From d3f96bdd76dc2a936582dff5ea295c1614e002cc Mon Sep 17 00:00:00 2001 From: guillaumemichel Date: Tue, 26 Nov 2024 16:18:08 +0100 Subject: [PATCH 01/23] updated Makefile with default aws profile --- Makefile | 2 +- ...00001_create_agent_versions_table.down.sql | 5 -- .../000001_create_agent_versions_table.up.sql | 24 ------- .../000001_create_requests_table.down.sql | 1 + .../000001_create_requests_table.up.sql | 21 ++++++ ...te_upsert_agent_versions_function.down.sql | 5 -- ...eate_upsert_agent_versions_function.up.sql | 32 --------- ...0003_create_multi_addresses_table.down.sql | 5 -- ...000003_create_multi_addresses_table.up.sql | 70 ------------------ ...e_upsert_multi_addresses_function.down.sql | 1 - ...ate_upsert_multi_addresses_function.up.sql | 32 --------- .../000005_create_peers_table.down.sql | 6 -- .../000005_create_peers_table.up.sql | 71 ------------------- .../000006_create_keys_table.down.sql | 5 -- db/migrations/000006_create_keys_table.up.sql | 17 ----- .../000007_create_peer_logs_table.down.sql | 5 -- .../000007_create_peer_logs_table.up.sql | 43 ----------- .../000008_create_requests_table.down.sql | 6 -- .../000008_create_requests_table.up.sql | 42 ----------- ...00009_create_upsert_peer_function.down.sql | 1 - .../000009_create_upsert_peer_function.up.sql | 30 -------- ...ate_peers_x_multi_addresses_table.down.sql | 5 -- ...reate_peers_x_multi_addresses_table.up.sql | 24 ------- ...11_create_insert_request_function.down.sql | 5 -- ...0011_create_insert_request_function.up.sql | 60 ---------------- ...000012_create_insert_key_function.down.sql | 5 -- .../000012_create_insert_key_function.up.sql | 36 ---------- .../000013_create_protocols_table.down.sql | 5 -- .../000013_create_protocols_table.up.sql | 25 ------- ..._create_upsert_protocols_function.down.sql | 7 -- ...14_create_upsert_protocols_function.up.sql | 57 --------------- ...015_create_upsert_protocol_set_id.down.sql | 5 -- ...00015_create_upsert_protocol_set_id.up.sql | 25 ------- .../000016_create_ip_addresses_table.down.sql | 3 - .../000016_create_ip_addresses_table.up.sql | 48 ------------- .../000017_create_requests_dnf_table.down.sql | 5 -- .../000017_create_requests_dnf_table.up.sql | 29 -------- ...d_protocols_to_requests_dnf_table.down.sql | 2 - ...add_protocols_to_requests_dnf_table.up.sql | 5 -- ...9_add_protocols_to_requests_table.down.sql | 2 - ...019_add_protocols_to_requests_table.up.sql | 7 -- ...20_alter_insert_requests_function.down.sql | 5 -- ...0020_alter_insert_requests_function.up.sql | 59 --------------- ...e_index_idx_normalized_at_is_null.down.sql | 1 - ...ate_index_idx_normalized_at_is_null.up.sql | 3 - 45 files changed, 23 insertions(+), 829 deletions(-) delete mode 100644 db/migrations/000001_create_agent_versions_table.down.sql delete mode 100644 db/migrations/000001_create_agent_versions_table.up.sql create mode 100644 db/migrations/000001_create_requests_table.down.sql create mode 100644 db/migrations/000001_create_requests_table.up.sql delete mode 100644 db/migrations/000002_create_upsert_agent_versions_function.down.sql delete mode 100644 db/migrations/000002_create_upsert_agent_versions_function.up.sql delete mode 100644 db/migrations/000003_create_multi_addresses_table.down.sql delete mode 100644 db/migrations/000003_create_multi_addresses_table.up.sql delete mode 100644 db/migrations/000004_create_upsert_multi_addresses_function.down.sql delete mode 100644 db/migrations/000004_create_upsert_multi_addresses_function.up.sql delete mode 100644 db/migrations/000005_create_peers_table.down.sql delete mode 100644 db/migrations/000005_create_peers_table.up.sql delete mode 100644 db/migrations/000006_create_keys_table.down.sql delete mode 100644 db/migrations/000006_create_keys_table.up.sql delete mode 100644 db/migrations/000007_create_peer_logs_table.down.sql delete mode 100644 db/migrations/000007_create_peer_logs_table.up.sql delete mode 100644 db/migrations/000008_create_requests_table.down.sql delete mode 100644 db/migrations/000008_create_requests_table.up.sql delete mode 100644 db/migrations/000009_create_upsert_peer_function.down.sql delete mode 100644 db/migrations/000009_create_upsert_peer_function.up.sql delete mode 100644 db/migrations/000010_create_peers_x_multi_addresses_table.down.sql delete mode 100644 db/migrations/000010_create_peers_x_multi_addresses_table.up.sql delete mode 100644 db/migrations/000011_create_insert_request_function.down.sql delete mode 100644 db/migrations/000011_create_insert_request_function.up.sql delete mode 100644 db/migrations/000012_create_insert_key_function.down.sql delete mode 100644 db/migrations/000012_create_insert_key_function.up.sql delete mode 100644 db/migrations/000013_create_protocols_table.down.sql delete mode 100644 db/migrations/000013_create_protocols_table.up.sql delete mode 100644 db/migrations/000014_create_upsert_protocols_function.down.sql delete mode 100644 db/migrations/000014_create_upsert_protocols_function.up.sql delete mode 100644 db/migrations/000015_create_upsert_protocol_set_id.down.sql delete mode 100644 db/migrations/000015_create_upsert_protocol_set_id.up.sql delete mode 100644 db/migrations/000016_create_ip_addresses_table.down.sql delete mode 100644 db/migrations/000016_create_ip_addresses_table.up.sql delete mode 100644 db/migrations/000017_create_requests_dnf_table.down.sql delete mode 100644 db/migrations/000017_create_requests_dnf_table.up.sql delete mode 100644 db/migrations/000018_add_protocols_to_requests_dnf_table.down.sql delete mode 100644 db/migrations/000018_add_protocols_to_requests_dnf_table.up.sql delete mode 100644 db/migrations/000019_add_protocols_to_requests_table.down.sql delete mode 100644 db/migrations/000019_add_protocols_to_requests_table.up.sql delete mode 100644 db/migrations/000020_alter_insert_requests_function.down.sql delete mode 100644 db/migrations/000020_alter_insert_requests_function.up.sql delete mode 100644 db/migrations/000021_create_index_idx_normalized_at_is_null.down.sql delete mode 100644 db/migrations/000021_create_index_idx_normalized_at_is_null.up.sql diff --git a/Makefile b/Makefile index 1f55ba7..005f75f 100644 --- a/Makefile +++ b/Makefile @@ -32,6 +32,6 @@ build: .PHONY: push push: - aws ecr get-login-password --profile probelab --region ${REPO_REGION} | docker login --username ${REPO_USER} --password-stdin ${REPO} + aws ecr get-login-password --region ${REPO_REGION} | docker login --username ${REPO_USER} --password-stdin ${REPO} docker tag ${IMAGE_NAME} ${REPO}/${IMAGE_NAME} docker push ${REPO}/${IMAGE_NAME} diff --git a/db/migrations/000001_create_agent_versions_table.down.sql b/db/migrations/000001_create_agent_versions_table.down.sql deleted file mode 100644 index 5c4e953..0000000 --- a/db/migrations/000001_create_agent_versions_table.down.sql +++ /dev/null @@ -1,5 +0,0 @@ -BEGIN; - -DROP TABLE IF EXISTS agent_versions; - -COMMIT; diff --git a/db/migrations/000001_create_agent_versions_table.up.sql b/db/migrations/000001_create_agent_versions_table.up.sql deleted file mode 100644 index fcf4710..0000000 --- a/db/migrations/000001_create_agent_versions_table.up.sql +++ /dev/null @@ -1,24 +0,0 @@ -BEGIN; - --- Holds all discovered agent_versions -CREATE TABLE agent_versions -( - -- A unique id that identifies a agent version. - id INT GENERATED ALWAYS AS IDENTITY, - -- Timestamp of when this agent version was seen the last time. - created_at TIMESTAMPTZ NOT NULL, - -- Agent version string as reported from the remote peer. - agent_version TEXT NOT NULL CHECK ( TRIM(agent_version) != '' ), - - -- There should only be unique agent version strings in this table. - CONSTRAINT uq_agent_versions_agent_version UNIQUE (agent_version), - - PRIMARY KEY (id) -); - -COMMENT ON TABLE agent_versions IS 'Holds all discovered agent_versions'; -COMMENT ON COLUMN agent_versions.id IS 'A unique id that identifies a agent version.'; -COMMENT ON COLUMN agent_versions.created_at IS 'Timestamp of when this agent version was seen the last time.'; -COMMENT ON COLUMN agent_versions.agent_version IS 'Agent version string as reported from the remote peer.'; - -COMMIT; diff --git a/db/migrations/000001_create_requests_table.down.sql b/db/migrations/000001_create_requests_table.down.sql new file mode 100644 index 0000000..e2deaea --- /dev/null +++ b/db/migrations/000001_create_requests_table.down.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS requests; \ No newline at end of file diff --git a/db/migrations/000001_create_requests_table.up.sql b/db/migrations/000001_create_requests_table.up.sql new file mode 100644 index 0000000..64cd9d0 --- /dev/null +++ b/db/migrations/000001_create_requests_table.up.sql @@ -0,0 +1,21 @@ +CREATE TABLE requests +( + id UUID, + ant_multihash String, + remote_multihash String, + agent_version Nullable(String), + protocols Nullable(Array(String)), + started_at DateTime64(3), + request_type Enum8( + 'PUT_VALUE', + 'GET_VALUE', + 'ADD_PROVIDER', + 'GET_PROVIDERS', + 'FIND_NODE', + 'PING' + ), + key_multihash String, + multi_addresses Array(String) +) ENGINE = MergeTree() + PRIMARY KEY (started_at) +TTL started_at + INTERVAL 1 DAY; diff --git a/db/migrations/000002_create_upsert_agent_versions_function.down.sql b/db/migrations/000002_create_upsert_agent_versions_function.down.sql deleted file mode 100644 index 66f4587..0000000 --- a/db/migrations/000002_create_upsert_agent_versions_function.down.sql +++ /dev/null @@ -1,5 +0,0 @@ -BEGIN; - -DROP FUNCTION IF EXISTS upsert_agent_version; - -COMMIT; diff --git a/db/migrations/000002_create_upsert_agent_versions_function.up.sql b/db/migrations/000002_create_upsert_agent_versions_function.up.sql deleted file mode 100644 index f3f9e9b..0000000 --- a/db/migrations/000002_create_upsert_agent_versions_function.up.sql +++ /dev/null @@ -1,32 +0,0 @@ -BEGIN; - -CREATE OR REPLACE FUNCTION upsert_agent_version( - new_agent_version TEXT, - new_created_at TIMESTAMPTZ DEFAULT NOW() -) RETURNS INT AS -$upsert_agent_version$ - WITH sel AS ( - SELECT id, agent_version - FROM agent_versions - WHERE agent_version = new_agent_version - ), ups AS ( - INSERT INTO agent_versions (agent_version, created_at) - SELECT new_agent_version, new_created_at - WHERE NOT EXISTS (SELECT NULL FROM sel) AND new_agent_version IS NOT NULL - ON CONFLICT ON CONSTRAINT uq_agent_versions_agent_version DO UPDATE - SET agent_version = new_agent_version - RETURNING id, agent_version - ) - SELECT id FROM sel - UNION ALL - SELECT id FROM ups; -$upsert_agent_version$ LANGUAGE sql; - -COMMENT ON FUNCTION upsert_agent_version IS - 'Takes an agent version string and inserts it into the database if it does not exist.' - 'Returns its ID. The function tries to minimize the insert operations and only does' - 'them if the agent version does not already exist in the database. If there was an' - 'insert from another transaction in between it resorts to an upsert by overwriting' - 'the existing agent version with the same value. This is done so that the ID is returned.'; - -COMMIT; diff --git a/db/migrations/000003_create_multi_addresses_table.down.sql b/db/migrations/000003_create_multi_addresses_table.down.sql deleted file mode 100644 index b187aa6..0000000 --- a/db/migrations/000003_create_multi_addresses_table.down.sql +++ /dev/null @@ -1,5 +0,0 @@ -BEGIN; - -DROP TABLE IF EXISTS multi_addresses; - -COMMIT; diff --git a/db/migrations/000003_create_multi_addresses_table.up.sql b/db/migrations/000003_create_multi_addresses_table.up.sql deleted file mode 100644 index 913a03c..0000000 --- a/db/migrations/000003_create_multi_addresses_table.up.sql +++ /dev/null @@ -1,70 +0,0 @@ -BEGIN; - --- The `multi_addresses` table keeps track of all ever encountered multi addresses. --- Some of these multi addresses can be associated with additional information. -CREATE TABLE multi_addresses -( - -- An internal unique id that identifies this multi address. - id INT GENERATED ALWAYS AS IDENTITY, - -- The autonomous system number that this multi address belongs to. - asn INT, - -- If NULL this multi address could not be associated with a cloud provider. - -- If not NULL the integer corresponds to the UdgerDB datacenter ID. - is_cloud INT, - -- A boolean value that indicates whether this multi address is a relay address. - is_relay BOOLEAN, - -- A boolean value that indicates whether this multi address is a publicly reachable one. - is_public BOOLEAN, - -- The derived IPv4 or IPv6 address that was used to determine the country etc. - addr INET, - -- Indicates if the multi_address has multiple IP addresses. Could happen for dnsaddr multi addresses. - -- We moved the above IP address properties back to this table because of these numbers from a couple of months long - -- crawler deployment: - -- multi_address_count ip_address_count - -- 896879 1 - -- 133 2 - -- 2 3 - -- 1 14 - -- This means the vast minority is only linked to multiple IP addresses. - -- If this flag is true there are corresponding IP addresses. - has_many_addrs BOOLEAN, - -- Indicates whether the resolver went over this multi address and tried to derived information from it - resolved BOOLEAN NOT NULL DEFAULT FALSE, - -- The country that this multi address belongs to in the form of a two letter country code. - country CHAR(2) CHECK ( TRIM(country) != '' ), - -- The continent that this multi address belongs to in the form of a two letter code. - continent CHAR(2) CHECK ( TRIM(continent) != '' ), - -- The multi address in the form of `/ip4/123.456.789.123/tcp/4001`. - maddr TEXT NOT NULL CHECK ( TRIM(maddr) != '' ), - - -- When was this multi address updated the last time - updated_at TIMESTAMPTZ NOT NULL CHECK ( updated_at >= created_at ), - -- When was this multi address created - created_at TIMESTAMPTZ NOT NULL, - - -- There should only ever be distinct multi addresses here - CONSTRAINT uq_multi_addresses_address UNIQUE (maddr), - - PRIMARY KEY (id) -); - --- Create an index for all rows that have has_many_addrs set to NULL. This means this row wasn't resolved yet. -CREATE INDEX idx_multi_addresses_unresolved ON multi_addresses (created_at) WHERE (resolved IS NULL); - -COMMENT ON TABLE multi_addresses IS '' - 'The `multi_addresses` table keeps track of all ever encountered multi addresses.' - 'Some of these multi addresses can be associated with additional information.'; -COMMENT ON COLUMN multi_addresses.id IS 'An internal unique id that identifies this multi address.'; -COMMENT ON COLUMN multi_addresses.asn IS 'The autonomous system number that this multi address belongs to.'; -COMMENT ON COLUMN multi_addresses.is_cloud IS 'If NULL this multi address could not be associated with a cloud provider. If not NULL the integer corresponds to the UdgerDB datacenter ID.'; -COMMENT ON COLUMN multi_addresses.is_relay IS 'A boolean value that indicates whether this multi address is a relay address.'; -COMMENT ON COLUMN multi_addresses.is_public IS 'A boolean value that indicates whether this multi address is a publicly reachable one.'; -COMMENT ON COLUMN multi_addresses.addr IS 'The derived IPv4 or IPv6 address of this multi address.'; -COMMENT ON COLUMN multi_addresses.has_many_addrs IS 'Indicates if the multi_address has multiple IP addresses. Could happen for dnsaddr multi addresses.'; -COMMENT ON COLUMN multi_addresses.country IS 'The country that this multi address belongs to in the form of a two letter country code.'; -COMMENT ON COLUMN multi_addresses.continent IS 'The continent that this multi address belongs to in the form of a two letter code.'; -COMMENT ON COLUMN multi_addresses.maddr IS 'The multi address in the form of `/ip4/123.456.789.123/tcp/4001`.'; -COMMENT ON COLUMN multi_addresses.updated_at IS 'Timestamp of when this multi address was updated.'; -COMMENT ON COLUMN multi_addresses.created_at IS 'Timestamp of when this multi address was created.'; - -COMMIT; diff --git a/db/migrations/000004_create_upsert_multi_addresses_function.down.sql b/db/migrations/000004_create_upsert_multi_addresses_function.down.sql deleted file mode 100644 index 467646e..0000000 --- a/db/migrations/000004_create_upsert_multi_addresses_function.down.sql +++ /dev/null @@ -1 +0,0 @@ -DROP FUNCTION IF EXISTS upsert_multi_addresses; diff --git a/db/migrations/000004_create_upsert_multi_addresses_function.up.sql b/db/migrations/000004_create_upsert_multi_addresses_function.up.sql deleted file mode 100644 index 2680048..0000000 --- a/db/migrations/000004_create_upsert_multi_addresses_function.up.sql +++ /dev/null @@ -1,32 +0,0 @@ -BEGIN; - -CREATE OR REPLACE FUNCTION upsert_multi_addresses( - new_multi_addresses TEXT[], - new_created_at TIMESTAMPTZ DEFAULT NOW() -) RETURNS TABLE (id INT) AS -$upsert_multi_addresses$ - WITH input AS ( - SELECT DISTINCT unnest AS maddr - FROM UNNEST(new_multi_addresses) unnest - ), sel AS (-- select all existing multi_addresses - SELECT multi_addresses.id, multi_addresses.maddr - FROM input - INNER JOIN multi_addresses USING (maddr) - ), ups AS (-- upsert all multi_addresses that don't exist yet - INSERT INTO multi_addresses (maddr, updated_at, created_at) - SELECT input.maddr, new_created_at, new_created_at - FROM input - LEFT JOIN sel USING (maddr) - WHERE sel.maddr IS NULL - ORDER BY input.maddr - ON CONFLICT ON CONSTRAINT uq_multi_addresses_address DO UPDATE - SET maddr = multi_addresses.maddr - RETURNING id, maddr - ) - SELECT id FROM sel - UNION - SELECT id FROM ups - ORDER BY id; -$upsert_multi_addresses$ LANGUAGE sql; - -COMMIT; diff --git a/db/migrations/000005_create_peers_table.down.sql b/db/migrations/000005_create_peers_table.down.sql deleted file mode 100644 index 1bf814e..0000000 --- a/db/migrations/000005_create_peers_table.down.sql +++ /dev/null @@ -1,6 +0,0 @@ -BEGIN; - -DROP TABLE IF EXISTS peers; -DROP TABLE IF EXISTS protocols_sets; - -COMMIT; diff --git a/db/migrations/000005_create_peers_table.up.sql b/db/migrations/000005_create_peers_table.up.sql deleted file mode 100644 index ce24c1b..0000000 --- a/db/migrations/000005_create_peers_table.up.sql +++ /dev/null @@ -1,71 +0,0 @@ -BEGIN; - - --- Activate intarray extension for efficient array operations -CREATE EXTENSION IF NOT EXISTS intarray; - --- Since the set of protocols for a particular peer does not change very often in between crawls, --- this table holds particular sets of protocols which other tables can reference and save space. -CREATE TABLE protocols_sets -( - -- An internal unique id that identifies a unique set of protocols. - -- We could also just use the hash below but since protocol sets are - -- referenced many times having just a 4 byte instead of 32 byte ID - -- can make huge storage difference. - id INT GENERATED ALWAYS AS IDENTITY, - -- The protocol IDs of this protocol set. The IDs reference the protocols table (no foreign key checks). - -- Note: there's an invariant regarding the INT type. Don't increase it to BIGINT without changing protocolsSetHash. - protocol_ids INT[] NOT NULL CHECK ( array_length(protocol_ids, 1) IS NOT NULL ), - -- The hash digest of the sorted protocol ids to allow a unique constraint - hash BYTEA NOT NULL, - - CONSTRAINT uq_protocols_sets_hash UNIQUE (hash), - - PRIMARY KEY (id) -); - -CREATE INDEX idx_protocols_sets_protocol_ids on protocols_sets USING GIST (protocol_ids); - -COMMENT ON TABLE protocols_sets IS '' - 'Since the set of protocols for a particular peer does not change very often in between crawls,' - 'this table holds particular sets of protocols which other tables can reference and save space.'; -COMMENT ON COLUMN protocols_sets.id IS 'An internal unique id that identifies a unique set of protocols.'; -COMMENT ON COLUMN protocols_sets.protocol_ids IS 'The protocol IDs of this protocol set. The IDs reference the protocols table (no foreign key checks).'; -COMMENT ON COLUMN protocols_sets.hash IS 'The hash digest of the sorted protocol ids to allow a unique constraint.'; - --- The `peers` table keeps track of all peers ever found in the DHT -CREATE TABLE peers -( - -- The peer ID as a database-friendly integer - id BIGINT GENERATED ALWAYS AS IDENTITY, - --- The current agent version of the peer (updated if changed). - agent_version_id INT, - - -- The set of protocols that this peer currently supports (updated if changed). - protocols_set_id INT, - - -- The peer ID in the form of Qm... or 12D3... - multi_hash TEXT NOT NULL CHECK ( TRIM(multi_hash) != '' ), - - -- When was the peer updated the last time. - updated_at TIMESTAMPTZ NOT NULL CHECK ( updated_at >= created_at ), - - -- When was this peer instance created. - -- This gives a pretty accurate idea of - -- when this peer was seen the first time. - created_at TIMESTAMPTZ NOT NULL, - - -- When was the peer seen for the last time - last_seen_at TIMESTAMPTZ NOT NULL CHECK ( last_seen_at >= created_at ), - - CONSTRAINT fk_peers_agent_version_id FOREIGN KEY (agent_version_id) REFERENCES agent_versions (id) ON DELETE SET NULL, - CONSTRAINT fk_peers_protocols_set_id FOREIGN KEY (protocols_set_id) REFERENCES protocols_sets (id) ON DELETE SET NULL, - - -- There should only ever be distinct peer multi hash here - CONSTRAINT uq_peers_multi_hash UNIQUE (multi_hash), - - PRIMARY KEY (id) -); - -COMMIT; diff --git a/db/migrations/000006_create_keys_table.down.sql b/db/migrations/000006_create_keys_table.down.sql deleted file mode 100644 index fa7a0f4..0000000 --- a/db/migrations/000006_create_keys_table.down.sql +++ /dev/null @@ -1,5 +0,0 @@ -BEGIN; - -DROP TABLE IF EXISTS keys; - -COMMIT; diff --git a/db/migrations/000006_create_keys_table.up.sql b/db/migrations/000006_create_keys_table.up.sql deleted file mode 100644 index 30285dc..0000000 --- a/db/migrations/000006_create_keys_table.up.sql +++ /dev/null @@ -1,17 +0,0 @@ -BEGIN; - -CREATE TABLE keys -( - id INT GENERATED ALWAYS AS IDENTITY, - -- Use peer ID for keys that are also peers - peer_id BIGINT, - -- The peer ID in the form of Qm... or 12D3... - multi_hash TEXT UNIQUE, - - PRIMARY KEY (id), - - CONSTRAINT fk_keys_peer_id FOREIGN KEY (peer_id) REFERENCES peers (id) ON DELETE SET NULL, - CONSTRAINT chk_keys_multi_hash_or_peer_id CHECK (peer_id IS NOT NULL OR (multi_hash IS NOT NULL AND TRIM(multi_hash) != '')) -); - -COMMIT; diff --git a/db/migrations/000007_create_peer_logs_table.down.sql b/db/migrations/000007_create_peer_logs_table.down.sql deleted file mode 100644 index c84d924..0000000 --- a/db/migrations/000007_create_peer_logs_table.down.sql +++ /dev/null @@ -1,5 +0,0 @@ -BEGIN; -DROP TRIGGER IF EXISTS on_peer_update ON peers; -DROP FUNCTION IF EXISTS insert_peer_log; -DROP TABLE IF EXISTS peer_logs; -COMMIT; diff --git a/db/migrations/000007_create_peer_logs_table.up.sql b/db/migrations/000007_create_peer_logs_table.up.sql deleted file mode 100644 index dbaf222..0000000 --- a/db/migrations/000007_create_peer_logs_table.up.sql +++ /dev/null @@ -1,43 +0,0 @@ -BEGIN; - -CREATE TABLE peer_logs -( - id INT GENERATED ALWAYS AS IDENTITY, - peer_id BIGINT NOT NULL, - field TEXT NOT NULL, - old TEXT NOT NULL, - new TEXT NOT NULL, - created_at TIMESTAMPTZ NOT NULL, - - CONSTRAINT fk_peer_logs_peer_id FOREIGN KEY (peer_id) REFERENCES peers (id), - - PRIMARY KEY (id, created_at) -) PARTITION BY RANGE (created_at); - -CREATE INDEX idx_peer_logs_peer_id_created_at ON peer_logs (peer_id, created_at); - -CREATE OR REPLACE FUNCTION insert_peer_log() - RETURNS TRIGGER AS -$$ -BEGIN - IF OLD.agent_version_id != NEW.agent_version_id THEN - INSERT INTO peer_logs (peer_id, field, old, new, created_at) - VALUES (NEW.id, 'agent_version_id', OLD.agent_version_id, NEW.agent_version_id, NOW()); - END IF; - - IF OLD.protocols_set_id != NEW.protocols_set_id THEN - INSERT INTO peer_logs (peer_id, field, old, new, created_at) - VALUES (NEW.id, 'protocols_set_id', OLD.protocols_set_id, NEW.protocols_set_id, NOW()); - END IF; - - RETURN NEW; -END; -$$ LANGUAGE 'plpgsql'; - -CREATE TRIGGER on_peer_update - BEFORE UPDATE - ON peers - FOR EACH ROW -EXECUTE PROCEDURE insert_peer_log(); - -COMMIT; diff --git a/db/migrations/000008_create_requests_table.down.sql b/db/migrations/000008_create_requests_table.down.sql deleted file mode 100644 index 8ba465c..0000000 --- a/db/migrations/000008_create_requests_table.down.sql +++ /dev/null @@ -1,6 +0,0 @@ -BEGIN; - -DROP TABLE IF EXISTS requests; -DROP TYPE IF EXISTS message_type; - -COMMIT; diff --git a/db/migrations/000008_create_requests_table.up.sql b/db/migrations/000008_create_requests_table.up.sql deleted file mode 100644 index b1b5638..0000000 --- a/db/migrations/000008_create_requests_table.up.sql +++ /dev/null @@ -1,42 +0,0 @@ -BEGIN; - - -CREATE TYPE message_type AS ENUM ( - 'PUT_VALUE', - 'GET_VALUE', - 'ADD_PROVIDER', - 'GET_PROVIDERS', - 'FIND_NODE', - 'PING' -); - -COMMENT ON TYPE message_type IS '' - 'The different types of messages from https://github.com/libp2p/go-libp2p-kad-dht/blob/master/pb/dht.proto#L15-L21.'; - -CREATE TABLE requests -( - -- An internal unique id that identifies a crawl. - id INT GENERATED ALWAYS AS IDENTITY, - -- Timestamp of when this request started. - timestamp TIMESTAMPTZ NOT NULL, - -- The message type of this request - request_type message_type NOT NULL, - -- Peer ID of the ant doing the request, - ant_id BIGINT NOT NULL, - -- The peer related to this request - peer_id BIGINT NOT NULL, - -- The key ID of this request (?) - key_id INT NOT NULL, - -- An array of all multi address IDs of the remote peer. - multi_address_ids INT[], - - CONSTRAINT fk_requests_ant_id FOREIGN KEY (ant_id) REFERENCES peers (id) ON DELETE SET NULL, - CONSTRAINT fk_requests_peer_id FOREIGN KEY (peer_id) REFERENCES peers (id) ON DELETE SET NULL, - CONSTRAINT fk_requests_key_id FOREIGN KEY (key_id) REFERENCES keys (id) ON DELETE SET NULL, - - PRIMARY KEY (id, timestamp) -) PARTITION BY RANGE (timestamp); - -CREATE INDEX idx_requests_timestamp ON requests (timestamp); - -COMMIT; diff --git a/db/migrations/000009_create_upsert_peer_function.down.sql b/db/migrations/000009_create_upsert_peer_function.down.sql deleted file mode 100644 index 5c18d52..0000000 --- a/db/migrations/000009_create_upsert_peer_function.down.sql +++ /dev/null @@ -1 +0,0 @@ -DROP FUNCTION upsert_peer; diff --git a/db/migrations/000009_create_upsert_peer_function.up.sql b/db/migrations/000009_create_upsert_peer_function.up.sql deleted file mode 100644 index 778b8d4..0000000 --- a/db/migrations/000009_create_upsert_peer_function.up.sql +++ /dev/null @@ -1,30 +0,0 @@ -BEGIN; - -CREATE OR REPLACE FUNCTION upsert_peer( - new_multi_hash TEXT, - new_agent_version_id INT DEFAULT NULL, - new_protocols_set_id INT DEFAULT NULL, - new_created_at TIMESTAMPTZ DEFAULT NOW(), - new_last_seen_at TIMESTAMPTZ DEFAULT NOW() -) RETURNS INT AS -$upsert_peer$ - WITH ups AS ( - INSERT INTO peers AS p (multi_hash, agent_version_id, protocols_set_id, created_at, updated_at, last_seen_at) - VALUES (new_multi_hash, new_agent_version_id, new_protocols_set_id, new_created_at, new_created_at, new_last_seen_at) - ON CONFLICT ON CONSTRAINT uq_peers_multi_hash DO UPDATE - SET multi_hash = EXCLUDED.multi_hash, - agent_version_id = COALESCE(EXCLUDED.agent_version_id, p.agent_version_id), - protocols_set_id = COALESCE(EXCLUDED.protocols_set_id, p.protocols_set_id), - updated_at = CASE - WHEN EXCLUDED.updated_at >= p.created_at - THEN EXCLUDED.updated_at - ELSE p.updated_at - END, - last_seen_at = EXCLUDED.last_seen_at - RETURNING id, multi_hash - ) - SELECT id FROM ups; - -$upsert_peer$ LANGUAGE sql; - -COMMIT; diff --git a/db/migrations/000010_create_peers_x_multi_addresses_table.down.sql b/db/migrations/000010_create_peers_x_multi_addresses_table.down.sql deleted file mode 100644 index 5f0a13a..0000000 --- a/db/migrations/000010_create_peers_x_multi_addresses_table.down.sql +++ /dev/null @@ -1,5 +0,0 @@ -BEGIN; - -DROP TABLE IF EXISTS peers_x_multi_addresses; - -COMMIT; diff --git a/db/migrations/000010_create_peers_x_multi_addresses_table.up.sql b/db/migrations/000010_create_peers_x_multi_addresses_table.up.sql deleted file mode 100644 index 115d9ec..0000000 --- a/db/migrations/000010_create_peers_x_multi_addresses_table.up.sql +++ /dev/null @@ -1,24 +0,0 @@ --- Begin the transaction -BEGIN; - --- The `peers_x_multi_addresses` table keeps track of --- the association of multi addresses to peers. -CREATE TABLE peers_x_multi_addresses -( - -- The peer ID of which we want to track the multi address - peer_id BIGINT NOT NULL, - -- The ID of the multi address that has been seen for the above peer - multi_address_id INT NOT NULL, - - -- The peer ID should always point to an existing peer in the DB - CONSTRAINT fk_peers_x_multi_addresses_peer_id FOREIGN KEY (peer_id) REFERENCES peers (id) ON DELETE CASCADE, - -- The maddr ID should always point to an existing multi address in the DB - CONSTRAINT fk_peers_x_multi_addresses_multi_address_id FOREIGN KEY (multi_address_id) REFERENCES multi_addresses (id) ON DELETE CASCADE, - - PRIMARY KEY (peer_id, multi_address_id) -); - -CREATE INDEX idx_peers_x_multi_addresses_peer_id ON peers_x_multi_addresses (peer_id); - --- End the transaction -COMMIT; diff --git a/db/migrations/000011_create_insert_request_function.down.sql b/db/migrations/000011_create_insert_request_function.down.sql deleted file mode 100644 index 6bd8cfe..0000000 --- a/db/migrations/000011_create_insert_request_function.down.sql +++ /dev/null @@ -1,5 +0,0 @@ -BEGIN; - -DROP FUNCTION IF EXISTS insert_request; - -COMMIT; diff --git a/db/migrations/000011_create_insert_request_function.up.sql b/db/migrations/000011_create_insert_request_function.up.sql deleted file mode 100644 index 7b645b1..0000000 --- a/db/migrations/000011_create_insert_request_function.up.sql +++ /dev/null @@ -1,60 +0,0 @@ -BEGIN; - -CREATE OR REPLACE FUNCTION insert_request( - new_timestamp TIMESTAMPTZ, - new_request_type message_type, - new_ant TEXT, - new_multi_hash TEXT, -- for peer - new_key_multi_hash TEXT, - new_multi_addresses TEXT[], - new_agent_version_id INT, - new_protocols_set_id INT -) RETURNS RECORD AS -$insert_request$ -DECLARE - new_multi_addresses_ids INT[]; - new_request_id INT; - new_peer_id INT; - new_ant_id INT; - new_key_id INT; -BEGIN - SELECT upsert_peer( - new_multi_hash, - new_agent_version_id, - new_protocols_set_id, - new_timestamp - ) INTO new_peer_id; - - SELECT id INTO new_ant_id - FROM peers - WHERE multi_hash = new_ant; - - SELECT insert_key(new_key_multi_hash) INTO new_key_id; - - SELECT array_agg(id) FROM upsert_multi_addresses(new_multi_addresses) INTO new_multi_addresses_ids; - - DELETE - FROM peers_x_multi_addresses pxma - WHERE peer_id = new_peer_id; - - INSERT INTO peers_x_multi_addresses (peer_id, multi_address_id) - SELECT new_peer_id, new_multi_address_id - FROM unnest(new_multi_addresses_ids) new_multi_address_id - ON CONFLICT DO NOTHING; - - INSERT INTO requests (timestamp, request_type, ant_id, peer_id, key_id, multi_address_ids) - SELECT new_timestamp, - new_request_type, - new_ant_id, - new_peer_id, - new_key_id, - new_multi_addresses_ids - RETURNING id INTO new_request_id; - - RETURN ROW(new_peer_id, new_request_id, new_key_id); -END; -$insert_request$ LANGUAGE plpgsql; - -COMMIT; - - diff --git a/db/migrations/000012_create_insert_key_function.down.sql b/db/migrations/000012_create_insert_key_function.down.sql deleted file mode 100644 index 8f95783..0000000 --- a/db/migrations/000012_create_insert_key_function.down.sql +++ /dev/null @@ -1,5 +0,0 @@ -BEGIN; - -DROP FUNCTION IF EXISTS insert_key; - -COMMIT; diff --git a/db/migrations/000012_create_insert_key_function.up.sql b/db/migrations/000012_create_insert_key_function.up.sql deleted file mode 100644 index f14786b..0000000 --- a/db/migrations/000012_create_insert_key_function.up.sql +++ /dev/null @@ -1,36 +0,0 @@ -BEGIN; - -CREATE OR REPLACE FUNCTION insert_key(key_multi_hash TEXT) -RETURNS INT AS -$insert_key$ -DECLARE - new_id INT; - key_peer_id INT; - key_model_id INT; -BEGIN - SELECT id INTO key_model_id FROM keys k WHERE k.multi_hash = key_multi_hash; - - IF key_model_id IS NULL THEN - SELECT id INTO key_peer_id FROM peers p WHERE p.multi_hash = key_multi_hash; - - IF key_peer_id IS NOT NULL THEN - INSERT INTO keys (peer_id, multi_hash) - VALUES (key_peer_id, NULL) - ON CONFLICT DO NOTHING - RETURNING id INTO new_id; - ELSE - INSERT INTO keys (peer_id, multi_hash) - VALUES (NULL, key_multi_hash) - ON CONFLICT DO NOTHING - RETURNING id INTO new_id; - END IF; - ELSE - - new_id := key_model_id; - END IF; - - RETURN new_id; -END; -$insert_key$ LANGUAGE plpgsql; - -COMMIT; diff --git a/db/migrations/000013_create_protocols_table.down.sql b/db/migrations/000013_create_protocols_table.down.sql deleted file mode 100644 index ced7f7d..0000000 --- a/db/migrations/000013_create_protocols_table.down.sql +++ /dev/null @@ -1,5 +0,0 @@ -BEGIN; - -DROP TABLE IF EXISTS protocols; - -COMMIT; diff --git a/db/migrations/000013_create_protocols_table.up.sql b/db/migrations/000013_create_protocols_table.up.sql deleted file mode 100644 index 168b636..0000000 --- a/db/migrations/000013_create_protocols_table.up.sql +++ /dev/null @@ -1,25 +0,0 @@ -BEGIN; - --- Holds all the different protocols that the crawler came across -CREATE TABLE protocols -( - -- A unique id that identifies a agent version. - -- Note: there's an invariant regarding the INT type. Don't increase it to BIGINT without changing protocolsSetHash. - id INT GENERATED ALWAYS AS IDENTITY, - -- Timestamp of when this protocol was seen the last time. - created_at TIMESTAMPTZ NOT NULL, - -- The full protocol string. - protocol TEXT NOT NULL CHECK ( TRIM(protocol) != '' ), - - -- There should only be unique protocol strings in this table - CONSTRAINT uq_protocols_protocol UNIQUE (protocol), - - PRIMARY KEY (id) -); - -COMMENT ON TABLE protocols IS 'Holds all the different protocols that the crawler came across.'; -COMMENT ON COLUMN protocols.id IS 'A unique id that identifies a agent version.'; -COMMENT ON COLUMN protocols.created_at IS 'Timestamp of when this protocol was seen the last time.'; -COMMENT ON COLUMN protocols.protocol IS 'The full protocol string.'; - -COMMIT; diff --git a/db/migrations/000014_create_upsert_protocols_function.down.sql b/db/migrations/000014_create_upsert_protocols_function.down.sql deleted file mode 100644 index 574ac5d..0000000 --- a/db/migrations/000014_create_upsert_protocols_function.down.sql +++ /dev/null @@ -1,7 +0,0 @@ -BEGIN; - -DROP FUNCTION IF EXISTS upsert_protocol; -DROP FUNCTION IF EXISTS upsert_protocols; -DROP TYPE IF EXISTS id_type; - -COMMIT; diff --git a/db/migrations/000014_create_upsert_protocols_function.up.sql b/db/migrations/000014_create_upsert_protocols_function.up.sql deleted file mode 100644 index 334aff9..0000000 --- a/db/migrations/000014_create_upsert_protocols_function.up.sql +++ /dev/null @@ -1,57 +0,0 @@ -BEGIN; - -CREATE OR REPLACE FUNCTION upsert_protocols( - new_protocols TEXT[], - new_created_at TIMESTAMPTZ DEFAULT NOW() -) RETURNS TABLE (id INT) AS -$upsert_protocols$ - WITH input AS ( - SELECT DISTINCT unnest AS protocol - FROM UNNEST(new_protocols) unnest - WHERE new_protocols IS NOT NULL - ), sel AS (-- select all existing protocols - SELECT protocols.id, protocols.protocol - FROM input - INNER JOIN protocols USING (protocol) - ), ups AS (-- upsert all protocols that don't exist yet - INSERT INTO protocols (protocol, created_at) - SELECT input.protocol, new_created_at - FROM input - LEFT JOIN sel USING (protocol) - WHERE sel.protocol IS NULL - ORDER BY input.protocol - ON CONFLICT ON CONSTRAINT uq_protocols_protocol DO UPDATE - SET protocol = protocols.protocol - RETURNING id, protocol - ) - SELECT id FROM sel - UNION ALL - SELECT id FROM ups - ORDER BY id; -$upsert_protocols$ LANGUAGE sql; - - -CREATE OR REPLACE FUNCTION upsert_protocol( - new_protocol TEXT, - new_created_at TIMESTAMPTZ DEFAULT NOW() -) RETURNS INT AS -$upsert_protocol$ - WITH sel AS ( - SELECT id, protocol - FROM protocols - WHERE protocol = new_protocol - ), ups AS ( - INSERT INTO protocols (protocol, created_at) - SELECT new_protocol, new_created_at - WHERE NOT EXISTS (SELECT NULL FROM sel) AND new_protocol IS NOT NULL - ON CONFLICT ON CONSTRAINT uq_protocols_protocol DO UPDATE - SET protocol = new_protocol - RETURNING id, protocol - ) - SELECT id FROM sel - UNION ALL - SELECT id FROM ups; -$upsert_protocol$ LANGUAGE sql; - - -COMMIT; diff --git a/db/migrations/000015_create_upsert_protocol_set_id.down.sql b/db/migrations/000015_create_upsert_protocol_set_id.down.sql deleted file mode 100644 index faf9b25..0000000 --- a/db/migrations/000015_create_upsert_protocol_set_id.down.sql +++ /dev/null @@ -1,5 +0,0 @@ -BEGIN; - -DROP FUNCTION IF EXISTS upsert_protocol_set_id; - -COMMIT; diff --git a/db/migrations/000015_create_upsert_protocol_set_id.up.sql b/db/migrations/000015_create_upsert_protocol_set_id.up.sql deleted file mode 100644 index 4c6d5a3..0000000 --- a/db/migrations/000015_create_upsert_protocol_set_id.up.sql +++ /dev/null @@ -1,25 +0,0 @@ -BEGIN; - -CREATE OR REPLACE FUNCTION upsert_protocol_set_id( - new_protocol_ids INT[] -) RETURNS INT AS -$upsert_protocol_set_id$ - WITH sel AS ( - SELECT id, protocol_ids - FROM protocols_sets - WHERE protocol_ids = new_protocol_ids - ), ups AS ( - INSERT INTO protocols_sets (protocol_ids, hash) - SELECT new_protocol_ids, sha256(new_protocol_ids::TEXT::BYTEA) - WHERE NOT EXISTS (SELECT NULL FROM sel) - ON CONFLICT (hash) DO UPDATE - SET protocol_ids = new_protocol_ids - RETURNING id, protocol_ids - ) - SELECT id FROM sel - UNION - SELECT id FROM ups; - -$upsert_protocol_set_id$ LANGUAGE sql; - -COMMIT; diff --git a/db/migrations/000016_create_ip_addresses_table.down.sql b/db/migrations/000016_create_ip_addresses_table.down.sql deleted file mode 100644 index 69d55e0..0000000 --- a/db/migrations/000016_create_ip_addresses_table.down.sql +++ /dev/null @@ -1,3 +0,0 @@ -BEGIN; -DROP TABLE IF EXISTS ip_addresses; -COMMIT; diff --git a/db/migrations/000016_create_ip_addresses_table.up.sql b/db/migrations/000016_create_ip_addresses_table.up.sql deleted file mode 100644 index f88c538..0000000 --- a/db/migrations/000016_create_ip_addresses_table.up.sql +++ /dev/null @@ -1,48 +0,0 @@ -BEGIN; - --- Rows in the `ip_addresses` capture information for a particular IP address --- that were derived from a multi address -CREATE TABLE ip_addresses -( - -- A unique id that identifies this ip address. - id INT GENERATED ALWAYS AS IDENTITY, - -- The multi address that this ip address belongs to. - multi_address_id INT NOT NULL, - -- The autonomous system number that this ip address belongs to. - asn INT, - -- If NULL this address could not be associated with a cloud provider. - -- If not NULL the integer corresponds to the UdgerDB datacenter ID. - is_cloud INT, - -- When was this IP address updated - updated_at TIMESTAMPTZ NOT NULL, - -- When was this IP address created - created_at TIMESTAMPTZ NOT NULL, - -- The country that this address belongs to in the form of a two to three letter country code - country CHAR(2) CHECK ( TRIM(country) != '' ), -- make it not null so that the unique constraint applies IPs without country. - -- The continent that this address belongs to in the form of a two letter code. - continent CHAR(2) CHECK ( TRIM(continent) != '' ), - -- The IP address derived from the reference multi address. - address INET NOT NULL, - - - -- Only one address/multi_address_id combination should be allowed. - CONSTRAINT uq_ip_addresses_multi_address_id_address UNIQUE (multi_address_id, address), - - -- The multi_address_id should reference the proper table row. - CONSTRAINT fk_ip_addresses_multi_address_id FOREIGN KEY (multi_address_id) REFERENCES multi_addresses (id) ON DELETE CASCADE, - - PRIMARY KEY (id) -); - -COMMENT ON TABLE ip_addresses IS 'Rows in the `ip_addresses` capture information for a particular IP address that were derived from a multi address'; -COMMENT ON COLUMN ip_addresses.id IS 'An internal unique id that identifies this ip address.'; -COMMENT ON COLUMN ip_addresses.multi_address_id IS 'The multi address that this ip address belongs to.'; -COMMENT ON COLUMN ip_addresses.asn IS 'The autonomous system number that this ip address belongs to.'; -COMMENT ON COLUMN ip_addresses.is_cloud IS 'If NULL this address could not be associated with a cloud provider. If not NULL the integer corresponds to the UdgerDB datacenter ID.'; -COMMENT ON COLUMN ip_addresses.updated_at IS 'Timestamp of when this IP address was updated.'; -COMMENT ON COLUMN ip_addresses.created_at IS 'Timestamp of when this IP address was created.'; -COMMENT ON COLUMN ip_addresses.country IS 'The country that this address belongs to in the form of a two to three letter country code'; -COMMENT ON COLUMN ip_addresses.continent IS 'The continent that this address belongs to in the form of a two letter code.'; -COMMENT ON COLUMN ip_addresses.address IS 'The IP address derived from the reference multi address.'; - -COMMIT; diff --git a/db/migrations/000017_create_requests_dnf_table.down.sql b/db/migrations/000017_create_requests_dnf_table.down.sql deleted file mode 100644 index 24710df..0000000 --- a/db/migrations/000017_create_requests_dnf_table.down.sql +++ /dev/null @@ -1,5 +0,0 @@ -BEGIN; - -DROP TABLE IF EXISTS requests_denormalized; - -COMMIT; diff --git a/db/migrations/000017_create_requests_dnf_table.up.sql b/db/migrations/000017_create_requests_dnf_table.up.sql deleted file mode 100644 index 9880a29..0000000 --- a/db/migrations/000017_create_requests_dnf_table.up.sql +++ /dev/null @@ -1,29 +0,0 @@ -BEGIN; - -CREATE TABLE requests_denormalized -( - -- An internal unique id that identifies a crawl. - id BIGINT GENERATED ALWAYS AS IDENTITY, - -- Timestamp of when this request started. - request_started_at TIMESTAMPTZ NOT NULL, - -- The message type of this request - request_type message_type NOT NULL, - -- Peer ID of the ant doing the request, - ant_multihash TEXT NOT NULL, - -- The peer related to this request - peer_multihash TEXT NOT NULL, - -- The key of this request - key_multihash TEXT NOT NULL, - -- An array of all multi addresses of the remote peer. - multi_addresses TEXT[], - - agent_version TEXT, - - normalized_at TIMESTAMPTZ, - - PRIMARY KEY (id, request_started_at) -) PARTITION BY RANGE (request_started_at); - -CREATE INDEX idx_requests_dnf_timestamp ON requests_denormalized (request_started_at); - -COMMIT; diff --git a/db/migrations/000018_add_protocols_to_requests_dnf_table.down.sql b/db/migrations/000018_add_protocols_to_requests_dnf_table.down.sql deleted file mode 100644 index 8654232..0000000 --- a/db/migrations/000018_add_protocols_to_requests_dnf_table.down.sql +++ /dev/null @@ -1,2 +0,0 @@ -ALTER TABLE requests_denormalized - DROP COLUMN IF EXISTS protocols; diff --git a/db/migrations/000018_add_protocols_to_requests_dnf_table.up.sql b/db/migrations/000018_add_protocols_to_requests_dnf_table.up.sql deleted file mode 100644 index b947dd9..0000000 --- a/db/migrations/000018_add_protocols_to_requests_dnf_table.up.sql +++ /dev/null @@ -1,5 +0,0 @@ -BEGIN; - -ALTER TABLE requests_denormalized ADD COLUMN protocols TEXT[]; - -COMMIT; diff --git a/db/migrations/000019_add_protocols_to_requests_table.down.sql b/db/migrations/000019_add_protocols_to_requests_table.down.sql deleted file mode 100644 index 708341f..0000000 --- a/db/migrations/000019_add_protocols_to_requests_table.down.sql +++ /dev/null @@ -1,2 +0,0 @@ -ALTER TABLE requests - DROP COLUMN IF EXISTS protocols_set_id; diff --git a/db/migrations/000019_add_protocols_to_requests_table.up.sql b/db/migrations/000019_add_protocols_to_requests_table.up.sql deleted file mode 100644 index 1b9ec24..0000000 --- a/db/migrations/000019_add_protocols_to_requests_table.up.sql +++ /dev/null @@ -1,7 +0,0 @@ -BEGIN; - -ALTER TABLE requests - ADD COLUMN protocols_set_id INT, - ADD CONSTRAINT fk_requests_protocols_set_id FOREIGN KEY (protocols_set_id) REFERENCES protocols_sets (id) ON DELETE SET NULL; - -COMMIT; diff --git a/db/migrations/000020_alter_insert_requests_function.down.sql b/db/migrations/000020_alter_insert_requests_function.down.sql deleted file mode 100644 index 6bd8cfe..0000000 --- a/db/migrations/000020_alter_insert_requests_function.down.sql +++ /dev/null @@ -1,5 +0,0 @@ -BEGIN; - -DROP FUNCTION IF EXISTS insert_request; - -COMMIT; diff --git a/db/migrations/000020_alter_insert_requests_function.up.sql b/db/migrations/000020_alter_insert_requests_function.up.sql deleted file mode 100644 index d807cf8..0000000 --- a/db/migrations/000020_alter_insert_requests_function.up.sql +++ /dev/null @@ -1,59 +0,0 @@ -BEGIN; - -CREATE OR REPLACE FUNCTION insert_request( - new_timestamp TIMESTAMPTZ, - new_request_type message_type, - new_ant TEXT, - new_multi_hash TEXT, - new_key_multi_hash TEXT, - new_multi_addresses TEXT[], - new_agent_version_id INT, - new_protocols_set_id INT -) RETURNS RECORD AS -$insert_request$ -DECLARE - new_multi_addresses_ids INT[]; - new_request_id INT; - new_peer_id INT; - new_ant_id INT; - new_key_id INT; -BEGIN - SELECT upsert_peer( - new_multi_hash, - new_agent_version_id, - new_protocols_set_id, - new_timestamp - ) INTO new_peer_id; - - SELECT id INTO new_ant_id - FROM peers - WHERE multi_hash = new_ant; - - SELECT insert_key(new_key_multi_hash) INTO new_key_id; - - SELECT array_agg(id) FROM upsert_multi_addresses(new_multi_addresses) INTO new_multi_addresses_ids; - - DELETE - FROM peers_x_multi_addresses pxma - WHERE peer_id = new_peer_id; - - INSERT INTO peers_x_multi_addresses (peer_id, multi_address_id) - SELECT new_peer_id, new_multi_address_id - FROM unnest(new_multi_addresses_ids) new_multi_address_id - ON CONFLICT DO NOTHING; - - INSERT INTO requests (timestamp, request_type, ant_id, peer_id, key_id, multi_address_ids, protocols_set_id) - SELECT new_timestamp, - new_request_type, - new_ant_id, - new_peer_id, - new_key_id, - new_multi_addresses_ids, - new_protocols_set_id - RETURNING id INTO new_request_id; - - RETURN ROW(new_peer_id, new_request_id, new_key_id); -END; -$insert_request$ LANGUAGE plpgsql; - -COMMIT; diff --git a/db/migrations/000021_create_index_idx_normalized_at_is_null.down.sql b/db/migrations/000021_create_index_idx_normalized_at_is_null.down.sql deleted file mode 100644 index 3401050..0000000 --- a/db/migrations/000021_create_index_idx_normalized_at_is_null.down.sql +++ /dev/null @@ -1 +0,0 @@ -DROP INDEX IF EXISTS idx_normalized_at_is_null; diff --git a/db/migrations/000021_create_index_idx_normalized_at_is_null.up.sql b/db/migrations/000021_create_index_idx_normalized_at_is_null.up.sql deleted file mode 100644 index 48920fa..0000000 --- a/db/migrations/000021_create_index_idx_normalized_at_is_null.up.sql +++ /dev/null @@ -1,3 +0,0 @@ -CREATE INDEX idx_normalized_at_is_null -ON requests_denormalized (normalized_at) -WHERE normalized_at IS NULL; From 81b993386b742c46e6aa7cff9dfe2b2331c3c29f Mon Sep 17 00:00:00 2001 From: Steph Samson Date: Thu, 28 Nov 2024 17:02:45 +0900 Subject: [PATCH 02/23] prep clickhouse integration --- cmd/honeypot/main.go | 175 +++++++++++++++++++++++++++---------------- db/client.go | 46 ++++++++++++ go.mod | 27 ++++--- go.sum | 67 ++++++++++++----- queen.go | 5 +- 5 files changed, 229 insertions(+), 91 deletions(-) create mode 100644 db/client.go diff --git a/cmd/honeypot/main.go b/cmd/honeypot/main.go index 309c112..3d5d389 100644 --- a/cmd/honeypot/main.go +++ b/cmd/honeypot/main.go @@ -2,22 +2,21 @@ package main import ( "context" - "flag" "fmt" "os" "os/signal" - "strconv" "syscall" "time" logging "github.com/ipfs/go-log/v2" "github.com/probe-lab/ants-watch" - "github.com/probe-lab/ants-watch/metrics" + "github.com/probe-lab/ants-watch/db" + "github.com/urfave/cli/v2" ) var logger = logging.Logger("ants-queen") -func runQueen(ctx context.Context, nebulaPostgresStr string, nPorts, firstPort int, upnp bool) error { +func runQueen(ctx context.Context, nebulaPostgresStr string, nPorts, firstPort int, upnp bool, clickhouseClient *db.Client) error { var queen *ants.Queen var err error @@ -27,9 +26,9 @@ func runQueen(ctx context.Context, nebulaPostgresStr string, nPorts, firstPort i } if upnp { - queen, err = ants.NewQueen(ctx, nebulaPostgresStr, keyDBPath, 0, 0) + queen, err = ants.NewQueen(ctx, nebulaPostgresStr, keyDBPath, 0, 0, clickhouseClient) } else { - queen, err = ants.NewQueen(ctx, nebulaPostgresStr, keyDBPath, uint16(nPorts), uint16(firstPort)) + queen, err = ants.NewQueen(ctx, nebulaPostgresStr, keyDBPath, uint16(nPorts), uint16(firstPort), clickhouseClient) } if err != nil { return fmt.Errorf("failed to create queen: %w", err) @@ -65,77 +64,127 @@ func main() { logging.SetLogLevel("dht", "error") logging.SetLogLevel("basichost", "info") - queenCmd := flag.NewFlagSet("queen", flag.ExitOnError) - nebulaPostgresStr := *queenCmd.String("postgres", "", "Postgres connection string, postgres://user:password@host:port/dbname") - if len(nebulaPostgresStr) == 0 { - nebulaPostgresStr = os.Getenv("NEBULA_POSTGRES_CONNURL") + app := &cli.App{ + Name: "ants-watch", + Usage: "Get DHT clients in your p2p network using a honeypot", + Commands: []*cli.Command{ + { + Name: "queen", + Usage: "Starts the queen service", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "clickhouseAddress", + Usage: "ClickHouse address containing the host and port, 127.0.0.1:9000", + EnvVars: []string{"CLICKHOUSE_ADDRESS"}, + }, + &cli.StringFlag{ + Name: "clickhouseDatabase", + Usage: "The ClickHouse database where ants requests will be recorded", + EnvVars: []string{"CLICKHOUSE_DATABASE"}, + }, + &cli.StringFlag{ + Name: "clickhouseUsername", + Usage: "The ClickHouse user that has the prerequisite privileges to record the requests", + EnvVars: []string{"CLICKHOUSE_USERNAME"}, + }, + &cli.StringFlag{ + Name: "clickhousePassword", + Usage: "The password for the ClickHouse user", + EnvVars: []string{"CLICKHOUSE_PASSWORD"}, + }, + &cli.StringFlag{ + Name: "nebulaDatabaseConnString", + Usage: "The connection string for the Postgres Nebula database", + EnvVars: []string{"NEBULA_DB_CONNSTRING"}, + }, + &cli.IntFlag{ + Name: "nPorts", + Value: 128, + Usage: "Number of ports ants can listen on", + }, + &cli.IntFlag{ + Name: "firstPort", + Value: 6000, + Usage: "First port ants can listen on", + }, + &cli.BoolFlag{ + Name: "upnp", + Value: false, + Usage: "Enable UPnP", + }, + }, + Action: func(c *cli.Context) error { + return runQueenCommand(c) + }, + }, + { + Name: "health", + Usage: "Checks the health of the service", + Action: func(c *cli.Context) error { + return healthCheckCommand() + }, + }, + }, } - nPorts := queenCmd.Int("nPorts", 128, "Number of ports ants can listen on") - firstPort := queenCmd.Int("firstPort", 6000, "First port ants can listen on") - upnp := queenCmd.Bool("upnp", false, "Enable UPnP") - - - healthCmd := flag.NewFlagSet("health", flag.ExitOnError) - - if len(os.Args) < 2 { - fmt.Println("Expected 'queen' or 'health' subcommands") + if err := app.Run(os.Args); err != nil { + logger.Warnf("Error running app: %v\n", err) os.Exit(1) } - if os.Args[1] != "health" { - metricsHost := os.Getenv("METRICS_HOST") - metricsPort := os.Getenv("METRICS_PORT") + logger.Debugln("Work is done") +} - p, err := strconv.Atoi(metricsPort) - if err != nil { - logger.Errorf("Port should be an int %v\n", metricsPort) - } - logger.Infoln("Serving metrics endpoint") - go metrics.ListenAndServe(metricsHost, p) - } +func runQueenCommand(c *cli.Context) error { + nebulaPostgresStr := c.String("nebulaDatabaseConnString") + nPorts := c.Int("nPorts") + firstPort := c.Int("firstPort") + upnp := c.Bool("upnp") - switch os.Args[1] { - case "queen": - queenCmd.Parse(os.Args[2:]) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + address := c.String("clickhouseAddress") + database := c.String("clickhouseDatabase") + username := c.String("clickhouseUsername") + password := c.String("clickhousePassword") - sigChan := make(chan os.Signal, 1) - signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM) + client, err := db.NewDatabaseClient( + ctx, address, database, username, password, + ) + if err != nil { + logger.Errorln(err) + } - errChan := make(chan error, 1) - go func() { - errChan <- runQueen(ctx, nebulaPostgresStr, *nPorts, *firstPort, *upnp) - }() + sigChan := make(chan os.Signal, 1) + signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM) - select { - case err := <-errChan: - if err != nil { - logger.Error(err) - os.Exit(1) - } - case sig := <-sigChan: - logger.Infof("Received signal: %v, initiating shutdown...", sig) - cancel() - <-errChan - } + errChan := make(chan error, 1) - case "health": - healthCmd.Parse(os.Args[2:]) + go func() { + errChan <- runQueen(ctx, nebulaPostgresStr, nPorts, firstPort, upnp, client) + }() - ctx := context.Background() - if err := HealthCheck(&ctx); err != nil { - fmt.Printf("Health check failed: %v\n", err) - os.Exit(1) + select { + case err := <-errChan: + if err != nil { + logger.Error(err) + return err } - fmt.Println("Health check passed") - - default: - fmt.Printf("Unknown command: %s\n", os.Args[1]) - os.Exit(1) + case sig := <-sigChan: + logger.Infof("Received signal: %v, initiating shutdown...", sig) + cancel() + <-errChan } + return nil +} - logger.Debugln("Work is done") +func healthCheckCommand() error { + ctx := context.Background() + if err := HealthCheck(&ctx); err != nil { + logger.Infof("Health check failed: %v\n", err) + return err + } + logger.Infoln("Health check passed") + return nil } diff --git a/db/client.go b/db/client.go new file mode 100644 index 0000000..bdd04a2 --- /dev/null +++ b/db/client.go @@ -0,0 +1,46 @@ +package db + +import ( + "context" + + "github.com/ClickHouse/clickhouse-go/v2" + "github.com/ClickHouse/clickhouse-go/v2/lib/driver" + // "github.com/dennis-tra/nebula-crawler/config" + lru "github.com/hashicorp/golang-lru" + mt "github.com/probe-lab/ants-watch/metrics" + // log "github.com/ipfs/go-log/v2" +) + +type Client struct { + ctx context.Context + conn driver.Conn + + agentVersion *lru.Cache + protocols *lru.Cache + protocolsSets *lru.Cache + + telemetry *mt.Telemetry +} + +func NewDatabaseClient(ctx context.Context, address, database, username, password string) (*Client, error) { + logger.Infoln("Creating new database client...") + + conn, err := clickhouse.Open(&clickhouse.Options{ + Addr: []string{address}, + Auth: clickhouse.Auth{ + Database: database, + Username: username, + Password: password, + }, + Debug: true, + }) + + if err != nil { + return nil, err + } + + return &Client{ + ctx: ctx, + conn: conn, + }, nil +} diff --git a/go.mod b/go.mod index aad5242..310ffa0 100644 --- a/go.mod +++ b/go.mod @@ -33,16 +33,22 @@ require ( ) require ( - github.com/dennis-tra/nebula-crawler v0.0.0-20241010113859-38e4489a8fa7 + github.com/ClickHouse/clickhouse-go/v2 v2.30.0 + github.com/dennis-tra/nebula-crawler v0.0.0-20241105123054-bbd84dcd5b43 github.com/patrickmn/go-cache v2.1.0+incompatible + github.com/urfave/cli/v2 v2.27.5 ) require ( + github.com/ClickHouse/ch-go v0.63.1 // indirect + github.com/andybalholm/brotli v1.1.1 // indirect github.com/btcsuite/btcd/btcec/v2 v2.3.4 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.5 // indirect github.com/ericlagergren/decimal v0.0.0-20240411145413-00de7ca16731 // indirect github.com/ethereum/go-ethereum v1.14.11 // indirect + github.com/go-faster/city v1.0.1 // indirect + github.com/go-faster/errors v0.7.1 // indirect github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/gofrs/uuid v4.4.0+incompatible // indirect github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect @@ -50,13 +56,16 @@ require ( github.com/holiman/uint256 v1.3.1 // indirect github.com/jackc/pgpassfile v1.0.0 // indirect github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect - github.com/jackc/puddle/v2 v2.2.1 // indirect + github.com/jackc/puddle/v2 v2.2.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/oschwald/maxminddb-golang v1.13.1 // indirect + github.com/paulmach/orb v0.11.1 // indirect + github.com/pierrec/lz4/v4 v4.1.21 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect + github.com/segmentio/asm v1.2.0 // indirect + github.com/shopspring/decimal v1.4.0 // indirect github.com/spf13/cast v1.7.0 // indirect github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect - github.com/urfave/cli/v2 v2.27.4 // indirect github.com/volatiletech/inflect v0.0.1 // indirect github.com/volatiletech/randomize v0.0.1 // indirect github.com/wlynxg/anet v0.0.5 // indirect @@ -102,7 +111,7 @@ require ( github.com/jackpal/go-nat-pmp v1.0.2 // indirect github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect github.com/jbenet/goprocess v0.1.4 // indirect - github.com/klauspost/compress v1.17.10 // indirect + github.com/klauspost/compress v1.17.11 // indirect github.com/klauspost/cpuid/v2 v2.2.8 // indirect github.com/koron/go-ssdp v0.0.4 // indirect github.com/libp2p/go-buffer-pool v0.1.0 // indirect @@ -168,20 +177,20 @@ require ( github.com/stretchr/testify v1.9.0 github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/otel v1.30.0 - go.opentelemetry.io/otel/metric v1.30.0 - go.opentelemetry.io/otel/trace v1.30.0 + go.opentelemetry.io/otel v1.32.0 + go.opentelemetry.io/otel/metric v1.32.0 + go.opentelemetry.io/otel/trace v1.32.0 go.uber.org/dig v1.18.0 // indirect go.uber.org/fx v1.22.2 // indirect go.uber.org/mock v0.4.0 // indirect go.uber.org/multierr v1.11.0 // indirect - go.uber.org/zap v1.27.0 + go.uber.org/zap v1.27.0 // indirect golang.org/x/crypto v0.28.0 // indirect golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c // indirect golang.org/x/mod v0.21.0 // indirect golang.org/x/net v0.30.0 // indirect golang.org/x/sync v0.8.0 // indirect - golang.org/x/sys v0.26.0 // indirect + golang.org/x/sys v0.27.0 // indirect golang.org/x/text v0.19.0 // indirect golang.org/x/tools v0.26.0 // indirect gonum.org/v1/gonum v0.15.1 // indirect diff --git a/go.sum b/go.sum index e428fc9..3e8d92c 100644 --- a/go.sum +++ b/go.sum @@ -69,6 +69,10 @@ github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg6 github.com/AzureAD/microsoft-authentication-library-for-go v0.4.0/go.mod h1:Vt9sXTKwMyGcOxSmLDMnGPgqsUg7m8pe215qMLrDXw4= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/ClickHouse/ch-go v0.63.1 h1:s2JyZvWLTCSAGdtjMBBmAgQQHMco6pawLJMOXi0FODM= +github.com/ClickHouse/ch-go v0.63.1/go.mod h1:I1kJJCL3WJcBMGe1m+HVK0+nREaG+JOYYBWjrDrF3R0= +github.com/ClickHouse/clickhouse-go/v2 v2.30.0 h1:AG4D/hW39qa58+JHQIFOSnxyL46H6h2lrmGGk17dhFo= +github.com/ClickHouse/clickhouse-go/v2 v2.30.0/go.mod h1:i9ZQAojcayW3RsdCb3YR+n+wC2h65eJsZCscZ1Z1wyo= github.com/DATA-DOG/go-sqlmock v1.4.1 h1:ThlnYciV1iM/V0OSF/dtkqWb6xo5qITT1TJBG1MRDJM= github.com/DATA-DOG/go-sqlmock v1.4.1/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= @@ -83,6 +87,8 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/andybalholm/brotli v1.1.1 h1:PR2pgnyFznKEugtsUo0xLdDop5SKXd5Qf5ysW+7XdTA= +github.com/andybalholm/brotli v1.1.1/go.mod h1:05ib4cKhjx3OQYUY22hTVd34Bc8upXjOLL2rKwwZBoA= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/apmckinlay/gsuneido v0.0.0-20190404155041-0b6cd442a18f/go.mod h1:JU2DOj5Fc6rol0yaT79Csr47QR0vONGwJtBNGRD7jmc= @@ -155,16 +161,16 @@ github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5il github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 h1:rpfIENRNNilwHwZeG5+P150SMrnNEcHYvcCuK6dPZSg= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= -github.com/dennis-tra/nebula-crawler v0.0.0-20241010113859-38e4489a8fa7 h1:8gnxwUfRJ21iNrU/DotVBXgs7CYo42u7R5pFo825ojM= -github.com/dennis-tra/nebula-crawler v0.0.0-20241010113859-38e4489a8fa7/go.mod h1:hk34teSCN+dviOIY3bfup6cGFwfIoCTtJb+gjNh9jZg= +github.com/dennis-tra/nebula-crawler v0.0.0-20241105123054-bbd84dcd5b43 h1:WaLad0gLikVTLgltYg61Iv7MpWiMEVzkisO4yvXBHa0= +github.com/dennis-tra/nebula-crawler v0.0.0-20241105123054-bbd84dcd5b43/go.mod h1:cU29FznX+nakuMKTg+adDtEzzFGC3oOKDf32c4Ps5+M= github.com/dhui/dktest v0.4.3 h1:wquqUxAFdcUgabAVLvSCOKOlag5cIZuaOjYIBOWdsR0= github.com/dhui/dktest v0.4.3/go.mod h1:zNK8IwktWzQRm6I/l2Wjp7MakiyaFWv4G1hjmodmMTs= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dnaeon/go-vcr v1.1.0/go.mod h1:M7tiix8f0r6mKKJ3Yq/kqU1OYf3MnfmBWVbPx/yU9ko= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= -github.com/docker/docker v27.2.0+incompatible h1:Rk9nIVdfH3+Vz4cyI/uhbINhEZ/oLmc+CBXmH6fbNk4= -github.com/docker/docker v27.2.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v27.3.0+incompatible h1:BNb1QY6o4JdKpqwi9IB+HUYcRRrVN4aGFUTvDmWYK1A= +github.com/docker/docker v27.3.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= @@ -212,6 +218,10 @@ github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbS github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= +github.com/go-faster/city v1.0.1 h1:4WAxSZ3V2Ws4QRDrscLEDcibJY8uf41H6AhXDrNDcGw= +github.com/go-faster/city v1.0.1/go.mod h1:jKcUJId49qdW3L1qKHH/3wPeUstCVpVSXTM6vO3VcTw= +github.com/go-faster/errors v0.7.1 h1:MkJTnDoEdi9pDabt1dpWf7AA8/BaSYZqibYyhZ20AYg= +github.com/go-faster/errors v0.7.1/go.mod h1:5ySTjWFiphBs07IKuiL69nxdfd5+fzh1u7FPGZP2quo= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -282,6 +292,7 @@ github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk= @@ -429,8 +440,8 @@ github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/ github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= github.com/jackc/pgx/v5 v5.5.4 h1:Xp2aQS8uXButQdnCMWNmvx6UysWQQC+u1EoizjguY+8= github.com/jackc/pgx/v5 v5.5.4/go.mod h1:ez9gk+OAat140fv9ErkZDYFWmXLfV+++K0uAOiwgm1A= -github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk= -github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= +github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= +github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA= @@ -455,8 +466,9 @@ github.com/kat-co/vala v0.0.0-20170210184112-42e1d8b61f12/go.mod h1:u9MdXq/QageO github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.10 h1:oXAz+Vh0PMUvJczoi+flxpnBEPxoER1IaAnU/NMPtT0= -github.com/klauspost/compress v1.17.10/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= +github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= +github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= github.com/klauspost/cpuid/v2 v2.2.8 h1:+StwCXwm9PdpiEkPyzBXIy+M9KUb4ODm0Zarf1kS5BM= github.com/klauspost/cpuid/v2 v2.2.8/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -570,6 +582,7 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8= +github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= github.com/montanaflynn/stats v0.6.6/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= @@ -634,11 +647,16 @@ github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FI github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= +github.com/paulmach/orb v0.11.1 h1:3koVegMC4X/WeiXYz9iswopaTwMem53NzTJuTF20JzU= +github.com/paulmach/orb v0.11.1/go.mod h1:5mULz1xQfs3bmQm63QEJA6lNGujuRafwA5S/EnuLaLU= +github.com/paulmach/protoscan v0.2.1/go.mod h1:SpcSwydNLrxUGSDvXvO0P7g7AuhJ7lcKfDlhJCDw2gY= github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0= github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y= github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pelletier/go-toml/v2 v2.0.1/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo= github.com/pelletier/go-toml/v2 v2.0.5/go.mod h1:OMHamSCAODeSsVrwwvcJOaoN0LIUIaFVNZzmWyNfXas= +github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= +github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pion/datachannel v1.5.9 h1:LpIWAOYPyDrXtU+BW7X0Yt/vGtYxtXQ8ql7dFfYUVZA= github.com/pion/datachannel v1.5.9/go.mod h1:kDUuk4CU4Uxp82NH4LQZbISULkX/HtzKa4P7ldf9izE= github.com/pion/dtls/v2 v2.2.7/go.mod h1:8WiMkebSHFD0T+dIU+UeBaoV7kDhOW5oDCzZ7WZ/F9s= @@ -747,10 +765,14 @@ github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQD github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/sagikazarmark/crypt v0.6.0/go.mod h1:U8+INwJo3nBv1m6A/8OBXAq7Jnpspk5AxSgDyEQcea8= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys= +github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k= +github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME= github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY= github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM= github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0= @@ -823,13 +845,14 @@ github.com/subosito/gotenv v1.4.1/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNG github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= +github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/uptrace/opentelemetry-go-extra/otelsql v0.3.2 h1:ZjUj9BLYf9PEqBn8W/OapxhPjVRdC6CsXTdULHsyk5c= github.com/uptrace/opentelemetry-go-extra/otelsql v0.3.2/go.mod h1:O8bHQfyinKwTXKkiKNGmLQS7vRsqRxIQTFZpYpHK3IQ= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.10/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/urfave/cli/v2 v2.27.4 h1:o1owoI+02Eb+K107p27wEX9Bb8eqIoZCfLXloLUSWJ8= -github.com/urfave/cli/v2 v2.27.4/go.mod h1:m4QzxcD2qpra4z7WhzEGn74WZLViBnMpb1ToCAKdGRQ= +github.com/urfave/cli/v2 v2.27.5 h1:WoHEJLdsXr6dDWoJgMq/CboDmyY/8HMMH1fTECbih+w= +github.com/urfave/cli/v2 v2.27.5/go.mod h1:3Sevf16NykTbInEnD0yKkjDAeZDS0A6bzhBH5hrMvTQ= github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= github.com/volatiletech/inflect v0.0.1 h1:2a6FcMQyhmPZcLa+uet3VJ8gLn/9svWhJxJYwvE8KsU= @@ -850,8 +873,14 @@ github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1/go.mod h github.com/wlynxg/anet v0.0.3/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA= github.com/wlynxg/anet v0.0.5 h1:J3VJGi1gvo0JwZ/P1/Yc/8p63SoW98B5dHkYDmpgvvU= github.com/wlynxg/anet v0.0.5/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA= +github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= +github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g= +github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8= github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 h1:gEOO8jv9F4OT7lGCjxCBTO/36wtF6j2nSip77qHd4x4= github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1/go.mod h1:Ohn+xnUBiLI6FVj/9LpzZWtj1/D6lUovWYBkxHVV3aM= +github.com/xyproto/randomstring v1.0.5 h1:YtlWPoRdgMu3NZtP45drfy1GKoojuR7hmRcnhZqKjWU= +github.com/xyproto/randomstring v1.0.5/go.mod h1:rgmS5DeNXLivK7YprL0pY+lTuhNQW3iGxZ18UQApw/E= +github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -862,6 +891,7 @@ go.etcd.io/etcd/api/v3 v3.5.4/go.mod h1:5GB2vv4A4AOn3yk7MftYGHkUfGtDHnEraIjym4dY go.etcd.io/etcd/client/pkg/v3 v3.5.4/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= go.etcd.io/etcd/client/v2 v2.305.4/go.mod h1:Ud+VUwIi9/uQHOMA+4ekToJ12lTxlv0zB/+DHwTGEbU= go.etcd.io/etcd/client/v3 v3.5.4/go.mod h1:ZaRkVgBZC+L+dLCjTcF1hRXpgZXQPOvnA/Ak/gq3kiY= +go.mongodb.org/mongo-driver v1.11.4/go.mod h1:PTSz5yu21bkT/wXpkS7WR5f0ddqw5quethTUn9WM+2g= go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= @@ -874,22 +904,22 @@ go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0/go.mod h1:L7UH0GbB0p47T4Rri3uHjbpCFYrVrwc1I25QhNPiGK8= -go.opentelemetry.io/otel v1.30.0 h1:F2t8sK4qf1fAmY9ua4ohFS/K+FUuOPemHUIXHtktrts= -go.opentelemetry.io/otel v1.30.0/go.mod h1:tFw4Br9b7fOS+uEao81PJjVMjW/5fvNCbpsDIXqP0pc= +go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U= +go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.30.0 h1:lsInsfvhVIfOI6qHVyysXMNDnjO9Npvl7tlDPJFBVd4= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.30.0/go.mod h1:KQsVNh4OjgjTG0G6EiNi1jVpnaeeKsKMRwbLN+f1+8M= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.30.0 h1:m0yTiGDLUvVYaTFbAvCkVYIYcvwKt3G7OLoN77NUs/8= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.30.0/go.mod h1:wBQbT4UekBfegL2nx0Xk1vBcnzyBPsIVm9hRG4fYcr4= go.opentelemetry.io/otel/exporters/prometheus v0.52.0 h1:kmU3H0b9ufFSi8IQCcxack+sWUblKkFbqWYs6YiACGQ= go.opentelemetry.io/otel/exporters/prometheus v0.52.0/go.mod h1:+wsAp2+JhuGXX7YRkjlkx6hyWY3ogFPfNA4x3nyiAh0= -go.opentelemetry.io/otel/metric v1.30.0 h1:4xNulvn9gjzo4hjg+wzIKG7iNFEaBMX00Qd4QIZs7+w= -go.opentelemetry.io/otel/metric v1.30.0/go.mod h1:aXTfST94tswhWEb+5QjlSqG+cZlmyXy/u8jFpor3WqQ= +go.opentelemetry.io/otel/metric v1.32.0 h1:xV2umtmNcThh2/a/aCP+h64Xx5wsj8qqnkYZktzNa0M= +go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8= go.opentelemetry.io/otel/sdk v1.30.0 h1:cHdik6irO49R5IysVhdn8oaiR9m8XluDaJAs4DfOrYE= go.opentelemetry.io/otel/sdk v1.30.0/go.mod h1:p14X4Ok8S+sygzblytT1nqG98QG2KYKv++HE0LY/mhg= go.opentelemetry.io/otel/sdk/metric v1.30.0 h1:QJLT8Pe11jyHBHfSAgYH7kEmT24eX792jZO1bo4BXkM= go.opentelemetry.io/otel/sdk/metric v1.30.0/go.mod h1:waS6P3YqFNzeP01kuo/MBBYqaoBJl7efRQHOaydhy1Y= -go.opentelemetry.io/otel/trace v1.30.0 h1:7UBkkYzeg3C7kQX8VAidWh2biiQbtAKjyIML8dQ9wmc= -go.opentelemetry.io/otel/trace v1.30.0/go.mod h1:5EyKqTzzmyqB9bwtCCq6pDLktPK6fmGf/Dph+8VI02o= +go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM= +go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= @@ -932,6 +962,7 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220511200225-c6db032c6c88/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220826181053-bd7e27e6170d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= @@ -1182,8 +1213,8 @@ golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= -golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= +golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= diff --git a/queen.go b/queen.go index 4ff3f01..e91417c 100644 --- a/queen.go +++ b/queen.go @@ -57,11 +57,13 @@ type Queen struct { mmc *maxmind.Client uclient *udger.Client + clickhouseClient *db.Client + resolveBatchSize int resolveBatchTime int // in sec } -func NewQueen(ctx context.Context, dbConnString string, keysDbPath string, nPorts, firstPort uint16) (*Queen, error) { +func NewQueen(ctx context.Context, dbConnString string, keysDbPath string, nPorts, firstPort uint16, clickhouseClient *db.Client) (*Queen, error) { nebulaDB := NewNebulaDB(dbConnString) keysDB := NewKeysDB(keysDbPath) peerstore, err := pstoremem.NewPeerstore() @@ -88,6 +90,7 @@ func NewQueen(ctx context.Context, dbConnString string, keysDbPath string, nPort uclient: getUdgerClient(), resolveBatchSize: getBatchSize(), resolveBatchTime: getBatchTime(), + clickhouseClient: clickhouseClient, } if nPorts != 0 { From a6db66b8de94897ef7b2869c57ad282ce123f04b Mon Sep 17 00:00:00 2001 From: Steph Samson Date: Sat, 30 Nov 2024 00:03:42 +0900 Subject: [PATCH 03/23] address comments from dennis --- cmd/honeypot/config.go | 27 ++++++++++ cmd/honeypot/health.go | 16 ++++-- cmd/honeypot/main.go | 111 +++++++++++++++++------------------------ 3 files changed, 86 insertions(+), 68 deletions(-) create mode 100644 cmd/honeypot/config.go diff --git a/cmd/honeypot/config.go b/cmd/honeypot/config.go new file mode 100644 index 0000000..ffde150 --- /dev/null +++ b/cmd/honeypot/config.go @@ -0,0 +1,27 @@ +package main + +var rootConfig = struct { + AntsClickhouseAddress string + AntsClickhouseDatabase string + AntsClickhouseUsername string + AntsClickhousePassword string + + NebulaDBConnString string + KeyDBPath string + + NumPorts int + FirstPort int + UPnp bool +}{ + AntsClickhouseAddress: "", + AntsClickhouseDatabase: "", + AntsClickhouseUsername: "", + AntsClickhousePassword: "", + + NebulaDBConnString: "", + KeyDBPath: "keys.db", + + NumPorts: 128, + FirstPort: 6000, + UPnp: false, +} diff --git a/cmd/honeypot/health.go b/cmd/honeypot/health.go index 46ba5e0..5469afa 100644 --- a/cmd/honeypot/health.go +++ b/cmd/honeypot/health.go @@ -1,25 +1,33 @@ package main import ( - "context" "fmt" "net/http" "os" + + "github.com/urfave/cli/v2" ) -func HealthCheck(ctx *context.Context) error { +func HealthCheck(c *cli.Context) error { endpoint := fmt.Sprintf( "http://%s:%s/health", os.Getenv("METRICS_HOST"), os.Getenv("METRICS_PORT"), ) - resp, err := http.Get(endpoint) + req, err := http.NewRequestWithContext(c, http.MethodGet, endpoint, nil) + if err != nil { + return err + } + + resp, err := http.DefaultClient.Do(req) if err != nil { return err } + defer resp.Body.Close() + if resp.StatusCode == http.StatusOK { return nil } - return fmt.Errorf("unhealthy") + return fmt.Errorf("unhealthy: status code %d", resp.StatusCode) } diff --git a/cmd/honeypot/main.go b/cmd/honeypot/main.go index 3d5d389..46c2461 100644 --- a/cmd/honeypot/main.go +++ b/cmd/honeypot/main.go @@ -16,19 +16,14 @@ import ( var logger = logging.Logger("ants-queen") -func runQueen(ctx context.Context, nebulaPostgresStr string, nPorts, firstPort int, upnp bool, clickhouseClient *db.Client) error { +func runQueen(ctx context.Context, clickhouseClient *db.Client) error { var queen *ants.Queen var err error - keyDBPath := os.Getenv("KEY_DB_PATH") - if len(keyDBPath) == 0 { - keyDBPath = "keys.db" - } - - if upnp { - queen, err = ants.NewQueen(ctx, nebulaPostgresStr, keyDBPath, 0, 0, clickhouseClient) + if rootConfig.UPnp { + queen, err = ants.NewQueen(ctx, rootConfig.NebulaDBConnString, rootConfig.KeyDBPath, 0, 0, clickhouseClient) } else { - queen, err = ants.NewQueen(ctx, nebulaPostgresStr, keyDBPath, uint16(nPorts), uint16(firstPort), clickhouseClient) + queen, err = ants.NewQueen(ctx, rootConfig.NebulaDBConnString, rootConfig.KeyDBPath, uint16(rootConfig.NumPorts), uint16(rootConfig.FirstPort), clickhouseClient) } if err != nil { return fmt.Errorf("failed to create queen: %w", err) @@ -73,37 +68,48 @@ func main() { Usage: "Starts the queen service", Flags: []cli.Flag{ &cli.StringFlag{ - Name: "clickhouseAddress", - Usage: "ClickHouse address containing the host and port, 127.0.0.1:9000", - EnvVars: []string{"CLICKHOUSE_ADDRESS"}, + Name: "ants.clickhouse.address", + Usage: "ClickHouse address containing the host and port, 127.0.0.1:9000", + EnvVars: []string{"ANTS_CLICKHOUSE_ADDRESS"}, + Destination: &rootConfig.AntsClickhouseAddress, + Value: rootConfig.AntsClickhouseAddress, }, &cli.StringFlag{ - Name: "clickhouseDatabase", - Usage: "The ClickHouse database where ants requests will be recorded", - EnvVars: []string{"CLICKHOUSE_DATABASE"}, + Name: "ants.clickhouse.database", + Usage: "The ClickHouse database where ants requests will be recorded", + EnvVars: []string{"ANTS_CLICKHOUSE_DATABASE"}, + Destination: &rootConfig.AntsClickhouseDatabase, + Value: rootConfig.AntsClickhouseDatabase, }, &cli.StringFlag{ - Name: "clickhouseUsername", - Usage: "The ClickHouse user that has the prerequisite privileges to record the requests", - EnvVars: []string{"CLICKHOUSE_USERNAME"}, + Name: "ants.clickhouse.username", + Usage: "The ClickHouse user that has the prerequisite privileges to record the requests", + EnvVars: []string{"ANTS_CLICKHOUSE_USERNAME"}, + Destination: &rootConfig.AntsClickhouseUsername, + Value: rootConfig.AntsClickhouseUsername, }, &cli.StringFlag{ - Name: "clickhousePassword", + Name: "ants.clickhouse.password", Usage: "The password for the ClickHouse user", - EnvVars: []string{"CLICKHOUSE_PASSWORD"}, + EnvVars: []string{"ANTS_CLICKHOUSE_PASSWORD"}, }, &cli.StringFlag{ - Name: "nebulaDatabaseConnString", + Name: "nebula.db.connstring", Usage: "The connection string for the Postgres Nebula database", EnvVars: []string{"NEBULA_DB_CONNSTRING"}, }, + &cli.PathFlag{ + Name: "key.db_path", + Usage: "The path to the data store containing the keys", + EnvVars: []string{"KEY_DB_PATH"}, + }, &cli.IntFlag{ - Name: "nPorts", + Name: "num_ports", Value: 128, Usage: "Number of ports ants can listen on", }, &cli.IntFlag{ - Name: "firstPort", + Name: "first_port", Value: 6000, Usage: "First port ants can listen on", }, @@ -113,21 +119,23 @@ func main() { Usage: "Enable UPnP", }, }, - Action: func(c *cli.Context) error { - return runQueenCommand(c) - }, + Action: runQueenCommand, }, { - Name: "health", - Usage: "Checks the health of the service", - Action: func(c *cli.Context) error { - return healthCheckCommand() - }, + Name: "health", + Usage: "Checks the health of the service", + Action: HealthCheck, }, }, } - if err := app.Run(os.Args); err != nil { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + sctx, stop := signal.NotifyContext(ctx, syscall.SIGINT) + defer stop() + + if err := app.RunContext(sctx, os.Args); err != nil { logger.Warnf("Error running app: %v\n", err) os.Exit(1) } @@ -136,33 +144,22 @@ func main() { } func runQueenCommand(c *cli.Context) error { - nebulaPostgresStr := c.String("nebulaDatabaseConnString") - nPorts := c.Int("nPorts") - firstPort := c.Int("firstPort") - upnp := c.Bool("upnp") - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - address := c.String("clickhouseAddress") - database := c.String("clickhouseDatabase") - username := c.String("clickhouseUsername") - password := c.String("clickhousePassword") - client, err := db.NewDatabaseClient( - ctx, address, database, username, password, + c.Context, + rootConfig.AntsClickhouseAddress, + rootConfig.AntsClickhouseDatabase, + rootConfig.AntsClickhouseUsername, + rootConfig.AntsClickhousePassword, ) + if err != nil { logger.Errorln(err) } - sigChan := make(chan os.Signal, 1) - signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM) - errChan := make(chan error, 1) go func() { - errChan <- runQueen(ctx, nebulaPostgresStr, nPorts, firstPort, upnp, client) + errChan <- runQueen(c.Context, client) }() select { @@ -171,20 +168,6 @@ func runQueenCommand(c *cli.Context) error { logger.Error(err) return err } - case sig := <-sigChan: - logger.Infof("Received signal: %v, initiating shutdown...", sig) - cancel() - <-errChan - } - return nil -} - -func healthCheckCommand() error { - ctx := context.Background() - if err := HealthCheck(&ctx); err != nil { - logger.Infof("Health check failed: %v\n", err) - return err } - logger.Infoln("Health check passed") return nil } From c1aff601c048013c66da740db6f137bce175bc56 Mon Sep 17 00:00:00 2001 From: Steph Samson Date: Mon, 2 Dec 2024 22:09:36 +0900 Subject: [PATCH 04/23] fix go errors --- cmd/honeypot/config.go | 2 +- cmd/honeypot/health.go | 2 +- cmd/honeypot/main.go | 42 ++++--- db/client.go | 51 ++++++-- .../000001_create_requests_table.up.sql | 4 +- queen.go | 110 +++--------------- util.go | 2 +- 7 files changed, 91 insertions(+), 122 deletions(-) diff --git a/cmd/honeypot/config.go b/cmd/honeypot/config.go index ffde150..a77924e 100644 --- a/cmd/honeypot/config.go +++ b/cmd/honeypot/config.go @@ -1,6 +1,6 @@ package main -var rootConfig = struct { +var RootConfig = struct { AntsClickhouseAddress string AntsClickhouseDatabase string AntsClickhouseUsername string diff --git a/cmd/honeypot/health.go b/cmd/honeypot/health.go index 5469afa..c475e7d 100644 --- a/cmd/honeypot/health.go +++ b/cmd/honeypot/health.go @@ -14,7 +14,7 @@ func HealthCheck(c *cli.Context) error { os.Getenv("METRICS_HOST"), os.Getenv("METRICS_PORT"), ) - req, err := http.NewRequestWithContext(c, http.MethodGet, endpoint, nil) + req, err := http.NewRequestWithContext(c.Context, http.MethodGet, endpoint, nil) if err != nil { return err } diff --git a/cmd/honeypot/main.go b/cmd/honeypot/main.go index 46c2461..ffa33dd 100644 --- a/cmd/honeypot/main.go +++ b/cmd/honeypot/main.go @@ -20,10 +20,10 @@ func runQueen(ctx context.Context, clickhouseClient *db.Client) error { var queen *ants.Queen var err error - if rootConfig.UPnp { - queen, err = ants.NewQueen(ctx, rootConfig.NebulaDBConnString, rootConfig.KeyDBPath, 0, 0, clickhouseClient) + if RootConfig.UPnp { + queen, err = ants.NewQueen(ctx, RootConfig.NebulaDBConnString, RootConfig.KeyDBPath, 0, 0, clickhouseClient) } else { - queen, err = ants.NewQueen(ctx, rootConfig.NebulaDBConnString, rootConfig.KeyDBPath, uint16(rootConfig.NumPorts), uint16(rootConfig.FirstPort), clickhouseClient) + queen, err = ants.NewQueen(ctx, RootConfig.NebulaDBConnString, RootConfig.KeyDBPath, uint16(RootConfig.NumPorts), uint16(RootConfig.FirstPort), clickhouseClient) } if err != nil { return fmt.Errorf("failed to create queen: %w", err) @@ -71,32 +71,36 @@ func main() { Name: "ants.clickhouse.address", Usage: "ClickHouse address containing the host and port, 127.0.0.1:9000", EnvVars: []string{"ANTS_CLICKHOUSE_ADDRESS"}, - Destination: &rootConfig.AntsClickhouseAddress, - Value: rootConfig.AntsClickhouseAddress, + Destination: &RootConfig.AntsClickhouseAddress, + Value: RootConfig.AntsClickhouseAddress, }, &cli.StringFlag{ Name: "ants.clickhouse.database", Usage: "The ClickHouse database where ants requests will be recorded", EnvVars: []string{"ANTS_CLICKHOUSE_DATABASE"}, - Destination: &rootConfig.AntsClickhouseDatabase, - Value: rootConfig.AntsClickhouseDatabase, + Destination: &RootConfig.AntsClickhouseDatabase, + Value: RootConfig.AntsClickhouseDatabase, }, &cli.StringFlag{ Name: "ants.clickhouse.username", Usage: "The ClickHouse user that has the prerequisite privileges to record the requests", EnvVars: []string{"ANTS_CLICKHOUSE_USERNAME"}, - Destination: &rootConfig.AntsClickhouseUsername, - Value: rootConfig.AntsClickhouseUsername, + Destination: &RootConfig.AntsClickhouseUsername, + Value: RootConfig.AntsClickhouseUsername, }, &cli.StringFlag{ - Name: "ants.clickhouse.password", - Usage: "The password for the ClickHouse user", - EnvVars: []string{"ANTS_CLICKHOUSE_PASSWORD"}, + Name: "ants.clickhouse.password", + Usage: "The password for the ClickHouse user", + EnvVars: []string{"ANTS_CLICKHOUSE_PASSWORD"}, + Destination: &RootConfig.AntsClickhousePassword, + Value: RootConfig.AntsClickhousePassword, }, &cli.StringFlag{ - Name: "nebula.db.connstring", - Usage: "The connection string for the Postgres Nebula database", - EnvVars: []string{"NEBULA_DB_CONNSTRING"}, + Name: "nebula.db.connstring", + Usage: "The connection string for the Postgres Nebula database", + EnvVars: []string{"NEBULA_DB_CONNSTRING"}, + Destination: &RootConfig.NebulaDBConnString, + Value: RootConfig.NebulaDBConnString, }, &cli.PathFlag{ Name: "key.db_path", @@ -146,10 +150,10 @@ func main() { func runQueenCommand(c *cli.Context) error { client, err := db.NewDatabaseClient( c.Context, - rootConfig.AntsClickhouseAddress, - rootConfig.AntsClickhouseDatabase, - rootConfig.AntsClickhouseUsername, - rootConfig.AntsClickhousePassword, + RootConfig.AntsClickhouseAddress, + RootConfig.AntsClickhouseDatabase, + RootConfig.AntsClickhouseUsername, + RootConfig.AntsClickhousePassword, ) if err != nil { diff --git a/db/client.go b/db/client.go index bdd04a2..cc9c5f4 100644 --- a/db/client.go +++ b/db/client.go @@ -2,23 +2,19 @@ package db import ( "context" + "time" "github.com/ClickHouse/clickhouse-go/v2" "github.com/ClickHouse/clickhouse-go/v2/lib/driver" - // "github.com/dennis-tra/nebula-crawler/config" - lru "github.com/hashicorp/golang-lru" + "github.com/golang-migrate/migrate/v4/database/multistmt" + "github.com/google/uuid" mt "github.com/probe-lab/ants-watch/metrics" - // log "github.com/ipfs/go-log/v2" ) type Client struct { ctx context.Context conn driver.Conn - agentVersion *lru.Cache - protocols *lru.Cache - protocolsSets *lru.Cache - telemetry *mt.Telemetry } @@ -39,8 +35,49 @@ func NewDatabaseClient(ctx context.Context, address, database, username, passwor return nil, err } + if err := conn.Ping(ctx); err != nil { + return nil, err + } + return &Client{ ctx: ctx, conn: conn, }, nil } + +type BatchRequest struct { + ctx context.Context + + insertStatement string + + conn driver.Conn + batch driver.Batch +} + +func NewBatch(ctx context.Context, conn driver.Conn, insertStatement string) (*BatchRequest, error) { + batch, err := conn.PrepareBatch(ctx, insertStatement, driver.WithReleaseConnection()) + if err != nil { + return nil, err + } + + return &BatchRequest{ + ctx: ctx, + insertStatement: insertStatement, + conn: conn, + batch: batch, + }, nil +} + +func (b *BatchRequest) Append(id, antMultihash, remoteMultihash, agentVersion string, protocols []string, startedAt time.Time, requestType, keyMultihash string, multiAddresses []string) error { + return b.batch.Append( + id, + antMultihash, + remoteMultihash, + agentVersion, + protocols, + startedAt, + requestType, + keyMultihash, + multiAddresses, + ) +} diff --git a/db/migrations/000001_create_requests_table.up.sql b/db/migrations/000001_create_requests_table.up.sql index 64cd9d0..53b2cdd 100644 --- a/db/migrations/000001_create_requests_table.up.sql +++ b/db/migrations/000001_create_requests_table.up.sql @@ -4,8 +4,8 @@ CREATE TABLE requests ant_multihash String, remote_multihash String, agent_version Nullable(String), - protocols Nullable(Array(String)), - started_at DateTime64(3), + protocols Array(Nullable(String)), + started_at DateTime, request_type Enum8( 'PUT_VALUE', 'GET_VALUE', diff --git a/queen.go b/queen.go index e91417c..d8a2f9f 100644 --- a/queen.go +++ b/queen.go @@ -7,7 +7,6 @@ import ( "strconv" "time" - "github.com/dennis-tra/nebula-crawler/config" ds "github.com/ipfs/go-datastore" dssync "github.com/ipfs/go-datastore/sync" "github.com/ipfs/go-log/v2" @@ -25,12 +24,9 @@ import ( "github.com/probe-lab/go-libdht/kad/trie" "github.com/volatiletech/null/v8" - "github.com/dennis-tra/nebula-crawler/maxmind" - "github.com/dennis-tra/nebula-crawler/udger" "github.com/patrickmn/go-cache" "github.com/probe-lab/ants-watch/db" "github.com/probe-lab/ants-watch/db/models" - tele "github.com/probe-lab/ants-watch/metrics" ) var logger = log.Logger("ants-queen") @@ -53,10 +49,6 @@ type Queen struct { portsOccupancy []bool firstPort uint16 - dbc *db.DBClient - mmc *maxmind.Client - uclient *udger.Client - clickhouseClient *db.Client resolveBatchSize int @@ -71,11 +63,6 @@ func NewQueen(ctx context.Context, dbConnString string, keysDbPath string, nPort return nil, err } - mmc, err := maxmind.NewClient(os.Getenv("MAXMIND_ASN_DB"), os.Getenv("MAXMIND_COUNTRY_DB")) - if err != nil { - logger.Errorf("Failed to initialized Maxmind client: %v\n", err) - } - queen := &Queen{ nebulaDB: nebulaDB, keysDB: keysDB, @@ -85,9 +72,6 @@ func NewQueen(ctx context.Context, dbConnString string, keysDbPath string, nPort antsLogs: make(chan antslog.RequestLog, 1024), agentsCache: cache.New(4*24*time.Hour, time.Hour), // 4 days of cache, clean every hour upnp: true, - dbc: getDbClient(ctx), - mmc: mmc, - uclient: getUdgerClient(), resolveBatchSize: getBatchSize(), resolveBatchTime: getBatchTime(), clickhouseClient: clickhouseClient, @@ -104,63 +88,6 @@ func NewQueen(ctx context.Context, dbConnString string, keysDbPath string, nPort return queen, nil } -func getDbClient(ctx context.Context) *db.DBClient { - dbPort, err := getEnvInt("DB_PORT", 5432) - if err != nil { - logger.Errorf("Port must be an integer: %w", err) - } - mP, _ := tele.NewMeterProvider() - - tracesHost, tracesHostSet := os.LookupEnv("TRACES_HOST") - if !tracesHostSet { - tracesHost = "" - } - tracesPort, err := getEnvInt("TRACES_PORT", 0) - if err != nil { - logger.Errorf("Port must be an integer: %w", err) - } - tP, err := tele.NewTracerProvider( - ctx, - tracesHost, - tracesPort, - ) - if err != nil { - logger.Errorf("new tracer provider: %w", err) - } - - dbc, err := db.InitDBClient(ctx, &config.Database{ - DatabaseHost: os.Getenv("DB_HOST"), - DatabasePort: dbPort, - DatabaseName: os.Getenv("DB_DATABASE"), - DatabaseUser: os.Getenv("DB_USER"), - DatabasePassword: os.Getenv("DB_PASSWORD"), - MeterProvider: mP, - TracerProvider: tP, - ProtocolsCacheSize: 100, - ProtocolsSetCacheSize: 200, - AgentVersionsCacheSize: 200, - DatabaseSSLMode: os.Getenv("DB_SSLMODE"), - }) - if err != nil { - logger.Errorf("Failed to initialize DB client: %v\n", err) - } - return dbc -} - -func getUdgerClient() *udger.Client { - filePathUdger := os.Getenv("UDGER_FILEPATH") - if filePathUdger != "" { - uclient, err := udger.NewClient(filePathUdger) - if err != nil { - logger.Errorf("Failed to initialize Udger client with %s: %v\n", filePathUdger, err) - return nil - } - return uclient - } - logger.Warn("Missing UDGER_FILEPATH: skipping udger") - return nil -} - func getBatchSize() int { batchSizeEnvVal := os.Getenv("BATCH_SIZE") if len(batchSizeEnvVal) == 0 { @@ -239,12 +166,12 @@ func (q *Queen) consumeAntsLogs(ctx context.Context) { case <-ctx.Done(): logger.Debugln("Gracefully shutting down ants...") logger.Debugln("Number of requests remaining to be inserted:", len(requests)) - if len(requests) > 0 { - err := db.BulkInsertRequests(context.Background(), q.dbc.Handler, requests) - if err != nil { - logger.Fatalf("Error inserting remaining requests: %v", err) - } - } + // if len(requests) > 0 { + // err := db.BulkInsertRequests(context.Background(), q.dbc.Handler, requests) + // if err != nil { + // logger.Fatalf("Error inserting remaining requests: %v", err) + // } + // } return case log := <-q.antsLogs: @@ -278,20 +205,20 @@ func (q *Queen) consumeAntsLogs(ctx context.Context) { } requests = append(requests, request) if len(requests) >= q.resolveBatchSize { - err = db.BulkInsertRequests(ctx, q.dbc.Handler, requests) - if err != nil { - logger.Errorf("Error inserting requests: %v", err) - } - requests = requests[:0] + // err = db.BulkInsertRequests(ctx, q.dbc.Handler, requests) + // if err != nil { + // logger.Errorf("Error inserting requests: %v", err) + // } + // requests = requests[:0] } case <-ticker.C: if len(requests) > 0 { - err := db.BulkInsertRequests(ctx, q.dbc.Handler, requests) - if err != nil { - logger.Fatalf("Error inserting requests: %v", err) - } - requests = requests[:0] + // err := db.BulkInsertRequests(ctx, q.dbc.Handler, requests) + // if err != nil { + // logger.Fatalf("Error inserting requests: %v", err) + // } + // requests = requests[:0] } default: @@ -383,9 +310,10 @@ func (q *Queen) routine(ctx context.Context) { for _, ant := range q.ants { logger.Debugf("Upserting ant: %v\n", ant.Host.ID().String()) - antID, err := q.dbc.UpsertPeer(ctx, ant.Host.ID().String(), null.StringFrom(ant.UserAgent), nil, time.Now()) + // antID, err := q.dbc.UpsertPeer(ctx, ant.Host.ID().String(), null.StringFrom(ant.UserAgent), nil, time.Now()) if err != nil { - logger.Errorf("antID: %d could not be inserted because of %v", antID, err) + logger.Errorf("Couldn't upsert") + // logger.Errorf("antID: %d could not be inserted because of %v", antID, err) } } diff --git a/util.go b/util.go index 9b7cff8..a154e2c 100644 --- a/util.go +++ b/util.go @@ -17,7 +17,7 @@ import ( ) const ( - CRAWL_INTERVAL = 30 * time.Minute + CRAWL_INTERVAL = 120 * time.Minute BUCKET_SIZE = 20 ) From 337a12fda24380715b91795c4b7ed3bb916ab975 Mon Sep 17 00:00:00 2001 From: Dennis Trautwein Date: Mon, 2 Dec 2024 15:43:33 +0100 Subject: [PATCH 05/23] add: clickhouse SSL option --- cmd/honeypot/config.go | 2 ++ cmd/honeypot/main.go | 8 ++++++++ db/client.go | 17 ++++++++++++++--- 3 files changed, 24 insertions(+), 3 deletions(-) diff --git a/cmd/honeypot/config.go b/cmd/honeypot/config.go index a77924e..aadd591 100644 --- a/cmd/honeypot/config.go +++ b/cmd/honeypot/config.go @@ -5,6 +5,7 @@ var RootConfig = struct { AntsClickhouseDatabase string AntsClickhouseUsername string AntsClickhousePassword string + AntsClickhouseSSL bool NebulaDBConnString string KeyDBPath string @@ -17,6 +18,7 @@ var RootConfig = struct { AntsClickhouseDatabase: "", AntsClickhouseUsername: "", AntsClickhousePassword: "", + AntsClickhouseSSL: true, NebulaDBConnString: "", KeyDBPath: "keys.db", diff --git a/cmd/honeypot/main.go b/cmd/honeypot/main.go index ffa33dd..a7ea0d0 100644 --- a/cmd/honeypot/main.go +++ b/cmd/honeypot/main.go @@ -95,6 +95,13 @@ func main() { Destination: &RootConfig.AntsClickhousePassword, Value: RootConfig.AntsClickhousePassword, }, + &cli.BoolFlag{ + Name: "ants.clickhouse.password", + Usage: "Whether to use SSL for the ClickHouse connection", + EnvVars: []string{"ANTS_CLICKHOUSE_SSL"}, + Destination: &RootConfig.AntsClickhouseSSL, + Value: RootConfig.AntsClickhouseSSL, + }, &cli.StringFlag{ Name: "nebula.db.connstring", Usage: "The connection string for the Postgres Nebula database", @@ -154,6 +161,7 @@ func runQueenCommand(c *cli.Context) error { RootConfig.AntsClickhouseDatabase, RootConfig.AntsClickhouseUsername, RootConfig.AntsClickhousePassword, + RootConfig.AntsClickhouseSSL, ) if err != nil { diff --git a/db/client.go b/db/client.go index cc9c5f4..cda2b0c 100644 --- a/db/client.go +++ b/db/client.go @@ -2,12 +2,13 @@ package db import ( "context" + "crypto/tls" + "golang.org/x/net/proxy" + "net" "time" "github.com/ClickHouse/clickhouse-go/v2" "github.com/ClickHouse/clickhouse-go/v2/lib/driver" - "github.com/golang-migrate/migrate/v4/database/multistmt" - "github.com/google/uuid" mt "github.com/probe-lab/ants-watch/metrics" ) @@ -18,7 +19,7 @@ type Client struct { telemetry *mt.Telemetry } -func NewDatabaseClient(ctx context.Context, address, database, username, password string) (*Client, error) { +func NewDatabaseClient(ctx context.Context, address, database, username, password string, ssl bool) (*Client, error) { logger.Infoln("Creating new database client...") conn, err := clickhouse.Open(&clickhouse.Options{ @@ -29,6 +30,16 @@ func NewDatabaseClient(ctx context.Context, address, database, username, passwor Password: password, }, Debug: true, + DialContext: func(ctx context.Context, addr string) (net.Conn, error) { + var d proxy.ContextDialer + if ssl { + d = &tls.Dialer{} + } else { + d = &net.Dialer{} + } + + return d.DialContext(ctx, "tcp", addr) + }, }) if err != nil { From 6f15e72689952bcfc3bfe7185267e7efa9cb67eb Mon Sep 17 00:00:00 2001 From: Dennis Trautwein Date: Mon, 2 Dec 2024 15:43:57 +0100 Subject: [PATCH 06/23] stop in case of bogus connection to database --- cmd/honeypot/main.go | 1 + 1 file changed, 1 insertion(+) diff --git a/cmd/honeypot/main.go b/cmd/honeypot/main.go index a7ea0d0..345ca80 100644 --- a/cmd/honeypot/main.go +++ b/cmd/honeypot/main.go @@ -166,6 +166,7 @@ func runQueenCommand(c *cli.Context) error { if err != nil { logger.Errorln(err) + return fmt.Errorf("init database client: %w", err) } errChan := make(chan error, 1) From fdf2e79ac5216c7b072083c74941de7d5469bdaf Mon Sep 17 00:00:00 2001 From: Dennis Trautwein Date: Mon, 2 Dec 2024 15:44:10 +0100 Subject: [PATCH 07/23] consolidate queen initialization --- cmd/honeypot/main.go | 67 +++++++++++++++++--------------------------- go.mod | 2 +- 2 files changed, 26 insertions(+), 43 deletions(-) diff --git a/cmd/honeypot/main.go b/cmd/honeypot/main.go index 345ca80..7c58155 100644 --- a/cmd/honeypot/main.go +++ b/cmd/honeypot/main.go @@ -16,44 +16,6 @@ import ( var logger = logging.Logger("ants-queen") -func runQueen(ctx context.Context, clickhouseClient *db.Client) error { - var queen *ants.Queen - var err error - - if RootConfig.UPnp { - queen, err = ants.NewQueen(ctx, RootConfig.NebulaDBConnString, RootConfig.KeyDBPath, 0, 0, clickhouseClient) - } else { - queen, err = ants.NewQueen(ctx, RootConfig.NebulaDBConnString, RootConfig.KeyDBPath, uint16(RootConfig.NumPorts), uint16(RootConfig.FirstPort), clickhouseClient) - } - if err != nil { - return fmt.Errorf("failed to create queen: %w", err) - } - - errChan := make(chan error, 1) - go func() { - logger.Debugln("Starting Queen.Run") - errChan <- queen.Run(ctx) - logger.Debugln("Queen.Run completed") - }() - - select { - case err := <-errChan: - if err != nil { - return fmt.Errorf("queen.Run returned an error: %w", err) - } - logger.Debugln("Queen.Run completed successfully") - case <-ctx.Done(): - select { - case <-errChan: - logger.Debugln("Queen.Run stopped after context cancellation") - case <-time.After(30 * time.Second): - logger.Warnln("Timeout waiting for Queen.Run to stop") - } - } - - return nil -} - func main() { logging.SetLogLevel("ants-queen", "debug") logging.SetLogLevel("dht", "error") @@ -155,6 +117,8 @@ func main() { } func runQueenCommand(c *cli.Context) error { + ctx := c.Context + client, err := db.NewDatabaseClient( c.Context, RootConfig.AntsClickhouseAddress, @@ -169,18 +133,37 @@ func runQueenCommand(c *cli.Context) error { return fmt.Errorf("init database client: %w", err) } - errChan := make(chan error, 1) + var queen *ants.Queen + if RootConfig.UPnp { + queen, err = ants.NewQueen(ctx, RootConfig.NebulaDBConnString, RootConfig.KeyDBPath, 0, 0, client) + } else { + queen, err = ants.NewQueen(ctx, RootConfig.NebulaDBConnString, RootConfig.KeyDBPath, uint16(RootConfig.NumPorts), uint16(RootConfig.FirstPort), client) + } + if err != nil { + return fmt.Errorf("failed to create queen: %w", err) + } + errChan := make(chan error, 1) go func() { - errChan <- runQueen(c.Context, client) + logger.Debugln("Starting Queen.Run") + errChan <- queen.Run(ctx) + logger.Debugln("Queen.Run completed") }() select { case err := <-errChan: if err != nil { - logger.Error(err) - return err + return fmt.Errorf("queen.Run returned an error: %w", err) + } + logger.Debugln("Queen.Run completed successfully") + case <-ctx.Done(): + select { + case <-errChan: + logger.Debugln("Queen.Run stopped after context cancellation") + case <-time.After(30 * time.Second): + logger.Warnln("Timeout waiting for Queen.Run to stop") } } + return nil } diff --git a/go.mod b/go.mod index 310ffa0..03d6c17 100644 --- a/go.mod +++ b/go.mod @@ -97,7 +97,7 @@ require ( github.com/gogo/protobuf v1.3.2 // indirect github.com/google/gopacket v1.1.19 // indirect github.com/google/pprof v0.0.0-20241009165004-a3522334989c // indirect - github.com/google/uuid v1.6.0 // indirect + github.com/google/uuid v1.6.0 github.com/gorilla/websocket v1.5.3 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect From a291a24eab62979edab61994dd0246a4618f43ae Mon Sep 17 00:00:00 2001 From: Dennis Trautwein Date: Mon, 2 Dec 2024 17:37:22 +0100 Subject: [PATCH 08/23] remove obsolete postgres models --- db/models/agent_versions.go | 1251 ------------------ db/models/boil_queries.go | 33 - db/models/boil_table_names.go | 30 - db/models/boil_types.go | 73 - db/models/boil_view_names.go | 7 - db/models/ip_addresses.go | 1242 ------------------ db/models/keys.go | 1167 ---------------- db/models/multi_addresses.go | 1534 ---------------------- db/models/peer_logs.go | 960 -------------- db/models/peers.go | 1972 ---------------------------- db/models/protocols.go | 918 ------------- db/models/protocols_sets.go | 1201 ----------------- db/models/psql_upsert.go | 61 - db/models/requests.go | 942 ------------- db/models/requests_denormalized.go | 1003 -------------- 15 files changed, 12394 deletions(-) delete mode 100644 db/models/agent_versions.go delete mode 100644 db/models/boil_queries.go delete mode 100644 db/models/boil_table_names.go delete mode 100644 db/models/boil_types.go delete mode 100644 db/models/boil_view_names.go delete mode 100644 db/models/ip_addresses.go delete mode 100644 db/models/keys.go delete mode 100644 db/models/multi_addresses.go delete mode 100644 db/models/peer_logs.go delete mode 100644 db/models/peers.go delete mode 100644 db/models/protocols.go delete mode 100644 db/models/protocols_sets.go delete mode 100644 db/models/psql_upsert.go delete mode 100644 db/models/requests.go delete mode 100644 db/models/requests_denormalized.go diff --git a/db/models/agent_versions.go b/db/models/agent_versions.go deleted file mode 100644 index bf273a4..0000000 --- a/db/models/agent_versions.go +++ /dev/null @@ -1,1251 +0,0 @@ -// Code generated by SQLBoiler 4.13.0 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT. -// This file is meant to be re-generated in place and/or deleted at any time. - -package models - -import ( - "context" - "database/sql" - "fmt" - "reflect" - "strconv" - "strings" - "sync" - "time" - - "github.com/friendsofgo/errors" - "github.com/volatiletech/sqlboiler/v4/boil" - "github.com/volatiletech/sqlboiler/v4/queries" - "github.com/volatiletech/sqlboiler/v4/queries/qm" - "github.com/volatiletech/sqlboiler/v4/queries/qmhelper" - "github.com/volatiletech/strmangle" -) - -// AgentVersion is an object representing the database table. -type AgentVersion struct { // A unique id that identifies a agent version. - ID int `boil:"id" json:"id" toml:"id" yaml:"id"` - // Timestamp of when this agent version was seen the last time. - CreatedAt time.Time `boil:"created_at" json:"created_at" toml:"created_at" yaml:"created_at"` - // Agent version string as reported from the remote peer. - AgentVersion string `boil:"agent_version" json:"agent_version" toml:"agent_version" yaml:"agent_version"` - - R *agentVersionR `boil:"-" json:"-" toml:"-" yaml:"-"` - L agentVersionL `boil:"-" json:"-" toml:"-" yaml:"-"` -} - -var AgentVersionColumns = struct { - ID string - CreatedAt string - AgentVersion string -}{ - ID: "id", - CreatedAt: "created_at", - AgentVersion: "agent_version", -} - -var AgentVersionTableColumns = struct { - ID string - CreatedAt string - AgentVersion string -}{ - ID: "agent_versions.id", - CreatedAt: "agent_versions.created_at", - AgentVersion: "agent_versions.agent_version", -} - -// Generated where - -type whereHelperint struct{ field string } - -func (w whereHelperint) EQ(x int) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.EQ, x) } -func (w whereHelperint) NEQ(x int) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.NEQ, x) } -func (w whereHelperint) LT(x int) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.LT, x) } -func (w whereHelperint) LTE(x int) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.LTE, x) } -func (w whereHelperint) GT(x int) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.GT, x) } -func (w whereHelperint) GTE(x int) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.GTE, x) } -func (w whereHelperint) IN(slice []int) qm.QueryMod { - values := make([]interface{}, 0, len(slice)) - for _, value := range slice { - values = append(values, value) - } - return qm.WhereIn(fmt.Sprintf("%s IN ?", w.field), values...) -} -func (w whereHelperint) NIN(slice []int) qm.QueryMod { - values := make([]interface{}, 0, len(slice)) - for _, value := range slice { - values = append(values, value) - } - return qm.WhereNotIn(fmt.Sprintf("%s NOT IN ?", w.field), values...) -} - -type whereHelpertime_Time struct{ field string } - -func (w whereHelpertime_Time) EQ(x time.Time) qm.QueryMod { - return qmhelper.Where(w.field, qmhelper.EQ, x) -} -func (w whereHelpertime_Time) NEQ(x time.Time) qm.QueryMod { - return qmhelper.Where(w.field, qmhelper.NEQ, x) -} -func (w whereHelpertime_Time) LT(x time.Time) qm.QueryMod { - return qmhelper.Where(w.field, qmhelper.LT, x) -} -func (w whereHelpertime_Time) LTE(x time.Time) qm.QueryMod { - return qmhelper.Where(w.field, qmhelper.LTE, x) -} -func (w whereHelpertime_Time) GT(x time.Time) qm.QueryMod { - return qmhelper.Where(w.field, qmhelper.GT, x) -} -func (w whereHelpertime_Time) GTE(x time.Time) qm.QueryMod { - return qmhelper.Where(w.field, qmhelper.GTE, x) -} - -type whereHelperstring struct{ field string } - -func (w whereHelperstring) EQ(x string) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.EQ, x) } -func (w whereHelperstring) NEQ(x string) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.NEQ, x) } -func (w whereHelperstring) LT(x string) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.LT, x) } -func (w whereHelperstring) LTE(x string) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.LTE, x) } -func (w whereHelperstring) GT(x string) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.GT, x) } -func (w whereHelperstring) GTE(x string) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.GTE, x) } -func (w whereHelperstring) IN(slice []string) qm.QueryMod { - values := make([]interface{}, 0, len(slice)) - for _, value := range slice { - values = append(values, value) - } - return qm.WhereIn(fmt.Sprintf("%s IN ?", w.field), values...) -} -func (w whereHelperstring) NIN(slice []string) qm.QueryMod { - values := make([]interface{}, 0, len(slice)) - for _, value := range slice { - values = append(values, value) - } - return qm.WhereNotIn(fmt.Sprintf("%s NOT IN ?", w.field), values...) -} - -var AgentVersionWhere = struct { - ID whereHelperint - CreatedAt whereHelpertime_Time - AgentVersion whereHelperstring -}{ - ID: whereHelperint{field: "\"agent_versions\".\"id\""}, - CreatedAt: whereHelpertime_Time{field: "\"agent_versions\".\"created_at\""}, - AgentVersion: whereHelperstring{field: "\"agent_versions\".\"agent_version\""}, -} - -// AgentVersionRels is where relationship names are stored. -var AgentVersionRels = struct { - Peers string -}{ - Peers: "Peers", -} - -// agentVersionR is where relationships are stored. -type agentVersionR struct { - Peers PeerSlice `boil:"Peers" json:"Peers" toml:"Peers" yaml:"Peers"` -} - -// NewStruct creates a new relationship struct -func (*agentVersionR) NewStruct() *agentVersionR { - return &agentVersionR{} -} - -func (r *agentVersionR) GetPeers() PeerSlice { - if r == nil { - return nil - } - return r.Peers -} - -// agentVersionL is where Load methods for each relationship are stored. -type agentVersionL struct{} - -var ( - agentVersionAllColumns = []string{"id", "created_at", "agent_version"} - agentVersionColumnsWithoutDefault = []string{"created_at", "agent_version"} - agentVersionColumnsWithDefault = []string{"id"} - agentVersionPrimaryKeyColumns = []string{"id"} - agentVersionGeneratedColumns = []string{"id"} -) - -type ( - // AgentVersionSlice is an alias for a slice of pointers to AgentVersion. - // This should almost always be used instead of []AgentVersion. - AgentVersionSlice []*AgentVersion - // AgentVersionHook is the signature for custom AgentVersion hook methods - AgentVersionHook func(context.Context, boil.ContextExecutor, *AgentVersion) error - - agentVersionQuery struct { - *queries.Query - } -) - -// Cache for insert, update and upsert -var ( - agentVersionType = reflect.TypeOf(&AgentVersion{}) - agentVersionMapping = queries.MakeStructMapping(agentVersionType) - agentVersionPrimaryKeyMapping, _ = queries.BindMapping(agentVersionType, agentVersionMapping, agentVersionPrimaryKeyColumns) - agentVersionInsertCacheMut sync.RWMutex - agentVersionInsertCache = make(map[string]insertCache) - agentVersionUpdateCacheMut sync.RWMutex - agentVersionUpdateCache = make(map[string]updateCache) - agentVersionUpsertCacheMut sync.RWMutex - agentVersionUpsertCache = make(map[string]insertCache) -) - -var ( - // Force time package dependency for automated UpdatedAt/CreatedAt. - _ = time.Second - // Force qmhelper dependency for where clause generation (which doesn't - // always happen) - _ = qmhelper.Where -) - -var agentVersionAfterSelectHooks []AgentVersionHook - -var agentVersionBeforeInsertHooks []AgentVersionHook -var agentVersionAfterInsertHooks []AgentVersionHook - -var agentVersionBeforeUpdateHooks []AgentVersionHook -var agentVersionAfterUpdateHooks []AgentVersionHook - -var agentVersionBeforeDeleteHooks []AgentVersionHook -var agentVersionAfterDeleteHooks []AgentVersionHook - -var agentVersionBeforeUpsertHooks []AgentVersionHook -var agentVersionAfterUpsertHooks []AgentVersionHook - -// doAfterSelectHooks executes all "after Select" hooks. -func (o *AgentVersion) doAfterSelectHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range agentVersionAfterSelectHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// doBeforeInsertHooks executes all "before insert" hooks. -func (o *AgentVersion) doBeforeInsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range agentVersionBeforeInsertHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// doAfterInsertHooks executes all "after Insert" hooks. -func (o *AgentVersion) doAfterInsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range agentVersionAfterInsertHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// doBeforeUpdateHooks executes all "before Update" hooks. -func (o *AgentVersion) doBeforeUpdateHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range agentVersionBeforeUpdateHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// doAfterUpdateHooks executes all "after Update" hooks. -func (o *AgentVersion) doAfterUpdateHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range agentVersionAfterUpdateHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// doBeforeDeleteHooks executes all "before Delete" hooks. -func (o *AgentVersion) doBeforeDeleteHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range agentVersionBeforeDeleteHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// doAfterDeleteHooks executes all "after Delete" hooks. -func (o *AgentVersion) doAfterDeleteHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range agentVersionAfterDeleteHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// doBeforeUpsertHooks executes all "before Upsert" hooks. -func (o *AgentVersion) doBeforeUpsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range agentVersionBeforeUpsertHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// doAfterUpsertHooks executes all "after Upsert" hooks. -func (o *AgentVersion) doAfterUpsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range agentVersionAfterUpsertHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// AddAgentVersionHook registers your hook function for all future operations. -func AddAgentVersionHook(hookPoint boil.HookPoint, agentVersionHook AgentVersionHook) { - switch hookPoint { - case boil.AfterSelectHook: - agentVersionAfterSelectHooks = append(agentVersionAfterSelectHooks, agentVersionHook) - case boil.BeforeInsertHook: - agentVersionBeforeInsertHooks = append(agentVersionBeforeInsertHooks, agentVersionHook) - case boil.AfterInsertHook: - agentVersionAfterInsertHooks = append(agentVersionAfterInsertHooks, agentVersionHook) - case boil.BeforeUpdateHook: - agentVersionBeforeUpdateHooks = append(agentVersionBeforeUpdateHooks, agentVersionHook) - case boil.AfterUpdateHook: - agentVersionAfterUpdateHooks = append(agentVersionAfterUpdateHooks, agentVersionHook) - case boil.BeforeDeleteHook: - agentVersionBeforeDeleteHooks = append(agentVersionBeforeDeleteHooks, agentVersionHook) - case boil.AfterDeleteHook: - agentVersionAfterDeleteHooks = append(agentVersionAfterDeleteHooks, agentVersionHook) - case boil.BeforeUpsertHook: - agentVersionBeforeUpsertHooks = append(agentVersionBeforeUpsertHooks, agentVersionHook) - case boil.AfterUpsertHook: - agentVersionAfterUpsertHooks = append(agentVersionAfterUpsertHooks, agentVersionHook) - } -} - -// One returns a single agentVersion record from the query. -func (q agentVersionQuery) One(ctx context.Context, exec boil.ContextExecutor) (*AgentVersion, error) { - o := &AgentVersion{} - - queries.SetLimit(q.Query, 1) - - err := q.Bind(ctx, exec, o) - if err != nil { - if errors.Is(err, sql.ErrNoRows) { - return nil, sql.ErrNoRows - } - return nil, errors.Wrap(err, "models: failed to execute a one query for agent_versions") - } - - if err := o.doAfterSelectHooks(ctx, exec); err != nil { - return o, err - } - - return o, nil -} - -// All returns all AgentVersion records from the query. -func (q agentVersionQuery) All(ctx context.Context, exec boil.ContextExecutor) (AgentVersionSlice, error) { - var o []*AgentVersion - - err := q.Bind(ctx, exec, &o) - if err != nil { - return nil, errors.Wrap(err, "models: failed to assign all query results to AgentVersion slice") - } - - if len(agentVersionAfterSelectHooks) != 0 { - for _, obj := range o { - if err := obj.doAfterSelectHooks(ctx, exec); err != nil { - return o, err - } - } - } - - return o, nil -} - -// Count returns the count of all AgentVersion records in the query. -func (q agentVersionQuery) Count(ctx context.Context, exec boil.ContextExecutor) (int64, error) { - var count int64 - - queries.SetSelect(q.Query, nil) - queries.SetCount(q.Query) - - err := q.Query.QueryRowContext(ctx, exec).Scan(&count) - if err != nil { - return 0, errors.Wrap(err, "models: failed to count agent_versions rows") - } - - return count, nil -} - -// Exists checks if the row exists in the table. -func (q agentVersionQuery) Exists(ctx context.Context, exec boil.ContextExecutor) (bool, error) { - var count int64 - - queries.SetSelect(q.Query, nil) - queries.SetCount(q.Query) - queries.SetLimit(q.Query, 1) - - err := q.Query.QueryRowContext(ctx, exec).Scan(&count) - if err != nil { - return false, errors.Wrap(err, "models: failed to check if agent_versions exists") - } - - return count > 0, nil -} - -// Peers retrieves all the peer's Peers with an executor. -func (o *AgentVersion) Peers(mods ...qm.QueryMod) peerQuery { - var queryMods []qm.QueryMod - if len(mods) != 0 { - queryMods = append(queryMods, mods...) - } - - queryMods = append(queryMods, - qm.Where("\"peers\".\"agent_version_id\"=?", o.ID), - ) - - return Peers(queryMods...) -} - -// LoadPeers allows an eager lookup of values, cached into the -// loaded structs of the objects. This is for a 1-M or N-M relationship. -func (agentVersionL) LoadPeers(ctx context.Context, e boil.ContextExecutor, singular bool, maybeAgentVersion interface{}, mods queries.Applicator) error { - var slice []*AgentVersion - var object *AgentVersion - - if singular { - var ok bool - object, ok = maybeAgentVersion.(*AgentVersion) - if !ok { - object = new(AgentVersion) - ok = queries.SetFromEmbeddedStruct(&object, &maybeAgentVersion) - if !ok { - return errors.New(fmt.Sprintf("failed to set %T from embedded struct %T", object, maybeAgentVersion)) - } - } - } else { - s, ok := maybeAgentVersion.(*[]*AgentVersion) - if ok { - slice = *s - } else { - ok = queries.SetFromEmbeddedStruct(&slice, maybeAgentVersion) - if !ok { - return errors.New(fmt.Sprintf("failed to set %T from embedded struct %T", slice, maybeAgentVersion)) - } - } - } - - args := make([]interface{}, 0, 1) - if singular { - if object.R == nil { - object.R = &agentVersionR{} - } - args = append(args, object.ID) - } else { - Outer: - for _, obj := range slice { - if obj.R == nil { - obj.R = &agentVersionR{} - } - - for _, a := range args { - if queries.Equal(a, obj.ID) { - continue Outer - } - } - - args = append(args, obj.ID) - } - } - - if len(args) == 0 { - return nil - } - - query := NewQuery( - qm.From(`peers`), - qm.WhereIn(`peers.agent_version_id in ?`, args...), - ) - if mods != nil { - mods.Apply(query) - } - - results, err := query.QueryContext(ctx, e) - if err != nil { - return errors.Wrap(err, "failed to eager load peers") - } - - var resultSlice []*Peer - if err = queries.Bind(results, &resultSlice); err != nil { - return errors.Wrap(err, "failed to bind eager loaded slice peers") - } - - if err = results.Close(); err != nil { - return errors.Wrap(err, "failed to close results in eager load on peers") - } - if err = results.Err(); err != nil { - return errors.Wrap(err, "error occurred during iteration of eager loaded relations for peers") - } - - if len(peerAfterSelectHooks) != 0 { - for _, obj := range resultSlice { - if err := obj.doAfterSelectHooks(ctx, e); err != nil { - return err - } - } - } - if singular { - object.R.Peers = resultSlice - for _, foreign := range resultSlice { - if foreign.R == nil { - foreign.R = &peerR{} - } - foreign.R.AgentVersion = object - } - return nil - } - - for _, foreign := range resultSlice { - for _, local := range slice { - if queries.Equal(local.ID, foreign.AgentVersionID) { - local.R.Peers = append(local.R.Peers, foreign) - if foreign.R == nil { - foreign.R = &peerR{} - } - foreign.R.AgentVersion = local - break - } - } - } - - return nil -} - -// AddPeers adds the given related objects to the existing relationships -// of the agent_version, optionally inserting them as new records. -// Appends related to o.R.Peers. -// Sets related.R.AgentVersion appropriately. -func (o *AgentVersion) AddPeers(ctx context.Context, exec boil.ContextExecutor, insert bool, related ...*Peer) error { - var err error - for _, rel := range related { - if insert { - queries.Assign(&rel.AgentVersionID, o.ID) - if err = rel.Insert(ctx, exec, boil.Infer()); err != nil { - return errors.Wrap(err, "failed to insert into foreign table") - } - } else { - updateQuery := fmt.Sprintf( - "UPDATE \"peers\" SET %s WHERE %s", - strmangle.SetParamNames("\"", "\"", 1, []string{"agent_version_id"}), - strmangle.WhereClause("\"", "\"", 2, peerPrimaryKeyColumns), - ) - values := []interface{}{o.ID, rel.ID} - - if boil.IsDebug(ctx) { - writer := boil.DebugWriterFrom(ctx) - fmt.Fprintln(writer, updateQuery) - fmt.Fprintln(writer, values) - } - if _, err = exec.ExecContext(ctx, updateQuery, values...); err != nil { - return errors.Wrap(err, "failed to update foreign table") - } - - queries.Assign(&rel.AgentVersionID, o.ID) - } - } - - if o.R == nil { - o.R = &agentVersionR{ - Peers: related, - } - } else { - o.R.Peers = append(o.R.Peers, related...) - } - - for _, rel := range related { - if rel.R == nil { - rel.R = &peerR{ - AgentVersion: o, - } - } else { - rel.R.AgentVersion = o - } - } - return nil -} - -// SetPeers removes all previously related items of the -// agent_version replacing them completely with the passed -// in related items, optionally inserting them as new records. -// Sets o.R.AgentVersion's Peers accordingly. -// Replaces o.R.Peers with related. -// Sets related.R.AgentVersion's Peers accordingly. -func (o *AgentVersion) SetPeers(ctx context.Context, exec boil.ContextExecutor, insert bool, related ...*Peer) error { - query := "update \"peers\" set \"agent_version_id\" = null where \"agent_version_id\" = $1" - values := []interface{}{o.ID} - if boil.IsDebug(ctx) { - writer := boil.DebugWriterFrom(ctx) - fmt.Fprintln(writer, query) - fmt.Fprintln(writer, values) - } - _, err := exec.ExecContext(ctx, query, values...) - if err != nil { - return errors.Wrap(err, "failed to remove relationships before set") - } - - if o.R != nil { - for _, rel := range o.R.Peers { - queries.SetScanner(&rel.AgentVersionID, nil) - if rel.R == nil { - continue - } - - rel.R.AgentVersion = nil - } - o.R.Peers = nil - } - - return o.AddPeers(ctx, exec, insert, related...) -} - -// RemovePeers relationships from objects passed in. -// Removes related items from R.Peers (uses pointer comparison, removal does not keep order) -// Sets related.R.AgentVersion. -func (o *AgentVersion) RemovePeers(ctx context.Context, exec boil.ContextExecutor, related ...*Peer) error { - if len(related) == 0 { - return nil - } - - var err error - for _, rel := range related { - queries.SetScanner(&rel.AgentVersionID, nil) - if rel.R != nil { - rel.R.AgentVersion = nil - } - if _, err = rel.Update(ctx, exec, boil.Whitelist("agent_version_id")); err != nil { - return err - } - } - if o.R == nil { - return nil - } - - for _, rel := range related { - for i, ri := range o.R.Peers { - if rel != ri { - continue - } - - ln := len(o.R.Peers) - if ln > 1 && i < ln-1 { - o.R.Peers[i] = o.R.Peers[ln-1] - } - o.R.Peers = o.R.Peers[:ln-1] - break - } - } - - return nil -} - -// AgentVersions retrieves all the records using an executor. -func AgentVersions(mods ...qm.QueryMod) agentVersionQuery { - mods = append(mods, qm.From("\"agent_versions\"")) - q := NewQuery(mods...) - if len(queries.GetSelect(q)) == 0 { - queries.SetSelect(q, []string{"\"agent_versions\".*"}) - } - - return agentVersionQuery{q} -} - -// FindAgentVersion retrieves a single record by ID with an executor. -// If selectCols is empty Find will return all columns. -func FindAgentVersion(ctx context.Context, exec boil.ContextExecutor, iD int, selectCols ...string) (*AgentVersion, error) { - agentVersionObj := &AgentVersion{} - - sel := "*" - if len(selectCols) > 0 { - sel = strings.Join(strmangle.IdentQuoteSlice(dialect.LQ, dialect.RQ, selectCols), ",") - } - query := fmt.Sprintf( - "select %s from \"agent_versions\" where \"id\"=$1", sel, - ) - - q := queries.Raw(query, iD) - - err := q.Bind(ctx, exec, agentVersionObj) - if err != nil { - if errors.Is(err, sql.ErrNoRows) { - return nil, sql.ErrNoRows - } - return nil, errors.Wrap(err, "models: unable to select from agent_versions") - } - - if err = agentVersionObj.doAfterSelectHooks(ctx, exec); err != nil { - return agentVersionObj, err - } - - return agentVersionObj, nil -} - -// Insert a single record using an executor. -// See boil.Columns.InsertColumnSet documentation to understand column list inference for inserts. -func (o *AgentVersion) Insert(ctx context.Context, exec boil.ContextExecutor, columns boil.Columns) error { - if o == nil { - return errors.New("models: no agent_versions provided for insertion") - } - - var err error - if !boil.TimestampsAreSkipped(ctx) { - currTime := time.Now().In(boil.GetLocation()) - - if o.CreatedAt.IsZero() { - o.CreatedAt = currTime - } - } - - if err := o.doBeforeInsertHooks(ctx, exec); err != nil { - return err - } - - nzDefaults := queries.NonZeroDefaultSet(agentVersionColumnsWithDefault, o) - - key := makeCacheKey(columns, nzDefaults) - agentVersionInsertCacheMut.RLock() - cache, cached := agentVersionInsertCache[key] - agentVersionInsertCacheMut.RUnlock() - - if !cached { - wl, returnColumns := columns.InsertColumnSet( - agentVersionAllColumns, - agentVersionColumnsWithDefault, - agentVersionColumnsWithoutDefault, - nzDefaults, - ) - wl = strmangle.SetComplement(wl, agentVersionGeneratedColumns) - - cache.valueMapping, err = queries.BindMapping(agentVersionType, agentVersionMapping, wl) - if err != nil { - return err - } - cache.retMapping, err = queries.BindMapping(agentVersionType, agentVersionMapping, returnColumns) - if err != nil { - return err - } - if len(wl) != 0 { - cache.query = fmt.Sprintf("INSERT INTO \"agent_versions\" (\"%s\") %%sVALUES (%s)%%s", strings.Join(wl, "\",\""), strmangle.Placeholders(dialect.UseIndexPlaceholders, len(wl), 1, 1)) - } else { - cache.query = "INSERT INTO \"agent_versions\" %sDEFAULT VALUES%s" - } - - var queryOutput, queryReturning string - - if len(cache.retMapping) != 0 { - queryReturning = fmt.Sprintf(" RETURNING \"%s\"", strings.Join(returnColumns, "\",\"")) - } - - cache.query = fmt.Sprintf(cache.query, queryOutput, queryReturning) - } - - value := reflect.Indirect(reflect.ValueOf(o)) - vals := queries.ValuesFromMapping(value, cache.valueMapping) - - if boil.IsDebug(ctx) { - writer := boil.DebugWriterFrom(ctx) - fmt.Fprintln(writer, cache.query) - fmt.Fprintln(writer, vals) - } - - if len(cache.retMapping) != 0 { - err = exec.QueryRowContext(ctx, cache.query, vals...).Scan(queries.PtrsFromMapping(value, cache.retMapping)...) - } else { - _, err = exec.ExecContext(ctx, cache.query, vals...) - } - - if err != nil { - return errors.Wrap(err, "models: unable to insert into agent_versions") - } - - if !cached { - agentVersionInsertCacheMut.Lock() - agentVersionInsertCache[key] = cache - agentVersionInsertCacheMut.Unlock() - } - - return o.doAfterInsertHooks(ctx, exec) -} - -// Update uses an executor to update the AgentVersion. -// See boil.Columns.UpdateColumnSet documentation to understand column list inference for updates. -// Update does not automatically update the record in case of default values. Use .Reload() to refresh the records. -func (o *AgentVersion) Update(ctx context.Context, exec boil.ContextExecutor, columns boil.Columns) (int64, error) { - var err error - if err = o.doBeforeUpdateHooks(ctx, exec); err != nil { - return 0, err - } - key := makeCacheKey(columns, nil) - agentVersionUpdateCacheMut.RLock() - cache, cached := agentVersionUpdateCache[key] - agentVersionUpdateCacheMut.RUnlock() - - if !cached { - wl := columns.UpdateColumnSet( - agentVersionAllColumns, - agentVersionPrimaryKeyColumns, - ) - wl = strmangle.SetComplement(wl, agentVersionGeneratedColumns) - - if !columns.IsWhitelist() { - wl = strmangle.SetComplement(wl, []string{"created_at"}) - } - if len(wl) == 0 { - return 0, errors.New("models: unable to update agent_versions, could not build whitelist") - } - - cache.query = fmt.Sprintf("UPDATE \"agent_versions\" SET %s WHERE %s", - strmangle.SetParamNames("\"", "\"", 1, wl), - strmangle.WhereClause("\"", "\"", len(wl)+1, agentVersionPrimaryKeyColumns), - ) - cache.valueMapping, err = queries.BindMapping(agentVersionType, agentVersionMapping, append(wl, agentVersionPrimaryKeyColumns...)) - if err != nil { - return 0, err - } - } - - values := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), cache.valueMapping) - - if boil.IsDebug(ctx) { - writer := boil.DebugWriterFrom(ctx) - fmt.Fprintln(writer, cache.query) - fmt.Fprintln(writer, values) - } - var result sql.Result - result, err = exec.ExecContext(ctx, cache.query, values...) - if err != nil { - return 0, errors.Wrap(err, "models: unable to update agent_versions row") - } - - rowsAff, err := result.RowsAffected() - if err != nil { - return 0, errors.Wrap(err, "models: failed to get rows affected by update for agent_versions") - } - - if !cached { - agentVersionUpdateCacheMut.Lock() - agentVersionUpdateCache[key] = cache - agentVersionUpdateCacheMut.Unlock() - } - - return rowsAff, o.doAfterUpdateHooks(ctx, exec) -} - -// UpdateAll updates all rows with the specified column values. -func (q agentVersionQuery) UpdateAll(ctx context.Context, exec boil.ContextExecutor, cols M) (int64, error) { - queries.SetUpdate(q.Query, cols) - - result, err := q.Query.ExecContext(ctx, exec) - if err != nil { - return 0, errors.Wrap(err, "models: unable to update all for agent_versions") - } - - rowsAff, err := result.RowsAffected() - if err != nil { - return 0, errors.Wrap(err, "models: unable to retrieve rows affected for agent_versions") - } - - return rowsAff, nil -} - -// UpdateAll updates all rows with the specified column values, using an executor. -func (o AgentVersionSlice) UpdateAll(ctx context.Context, exec boil.ContextExecutor, cols M) (int64, error) { - ln := int64(len(o)) - if ln == 0 { - return 0, nil - } - - if len(cols) == 0 { - return 0, errors.New("models: update all requires at least one column argument") - } - - colNames := make([]string, len(cols)) - args := make([]interface{}, len(cols)) - - i := 0 - for name, value := range cols { - colNames[i] = name - args[i] = value - i++ - } - - // Append all of the primary key values for each column - for _, obj := range o { - pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), agentVersionPrimaryKeyMapping) - args = append(args, pkeyArgs...) - } - - sql := fmt.Sprintf("UPDATE \"agent_versions\" SET %s WHERE %s", - strmangle.SetParamNames("\"", "\"", 1, colNames), - strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), len(colNames)+1, agentVersionPrimaryKeyColumns, len(o))) - - if boil.IsDebug(ctx) { - writer := boil.DebugWriterFrom(ctx) - fmt.Fprintln(writer, sql) - fmt.Fprintln(writer, args...) - } - result, err := exec.ExecContext(ctx, sql, args...) - if err != nil { - return 0, errors.Wrap(err, "models: unable to update all in agentVersion slice") - } - - rowsAff, err := result.RowsAffected() - if err != nil { - return 0, errors.Wrap(err, "models: unable to retrieve rows affected all in update all agentVersion") - } - return rowsAff, nil -} - -// Upsert attempts an insert using an executor, and does an update or ignore on conflict. -// See boil.Columns documentation for how to properly use updateColumns and insertColumns. -func (o *AgentVersion) Upsert(ctx context.Context, exec boil.ContextExecutor, updateOnConflict bool, conflictColumns []string, updateColumns, insertColumns boil.Columns) error { - if o == nil { - return errors.New("models: no agent_versions provided for upsert") - } - if !boil.TimestampsAreSkipped(ctx) { - currTime := time.Now().In(boil.GetLocation()) - - if o.CreatedAt.IsZero() { - o.CreatedAt = currTime - } - } - - if err := o.doBeforeUpsertHooks(ctx, exec); err != nil { - return err - } - - nzDefaults := queries.NonZeroDefaultSet(agentVersionColumnsWithDefault, o) - - // Build cache key in-line uglily - mysql vs psql problems - buf := strmangle.GetBuffer() - if updateOnConflict { - buf.WriteByte('t') - } else { - buf.WriteByte('f') - } - buf.WriteByte('.') - for _, c := range conflictColumns { - buf.WriteString(c) - } - buf.WriteByte('.') - buf.WriteString(strconv.Itoa(updateColumns.Kind)) - for _, c := range updateColumns.Cols { - buf.WriteString(c) - } - buf.WriteByte('.') - buf.WriteString(strconv.Itoa(insertColumns.Kind)) - for _, c := range insertColumns.Cols { - buf.WriteString(c) - } - buf.WriteByte('.') - for _, c := range nzDefaults { - buf.WriteString(c) - } - key := buf.String() - strmangle.PutBuffer(buf) - - agentVersionUpsertCacheMut.RLock() - cache, cached := agentVersionUpsertCache[key] - agentVersionUpsertCacheMut.RUnlock() - - var err error - - if !cached { - insert, ret := insertColumns.InsertColumnSet( - agentVersionAllColumns, - agentVersionColumnsWithDefault, - agentVersionColumnsWithoutDefault, - nzDefaults, - ) - - update := updateColumns.UpdateColumnSet( - agentVersionAllColumns, - agentVersionPrimaryKeyColumns, - ) - - insert = strmangle.SetComplement(insert, agentVersionGeneratedColumns) - update = strmangle.SetComplement(update, agentVersionGeneratedColumns) - - if updateOnConflict && len(update) == 0 { - return errors.New("models: unable to upsert agent_versions, could not build update column list") - } - - conflict := conflictColumns - if len(conflict) == 0 { - conflict = make([]string, len(agentVersionPrimaryKeyColumns)) - copy(conflict, agentVersionPrimaryKeyColumns) - } - cache.query = buildUpsertQueryPostgres(dialect, "\"agent_versions\"", updateOnConflict, ret, update, conflict, insert) - - cache.valueMapping, err = queries.BindMapping(agentVersionType, agentVersionMapping, insert) - if err != nil { - return err - } - if len(ret) != 0 { - cache.retMapping, err = queries.BindMapping(agentVersionType, agentVersionMapping, ret) - if err != nil { - return err - } - } - } - - value := reflect.Indirect(reflect.ValueOf(o)) - vals := queries.ValuesFromMapping(value, cache.valueMapping) - var returns []interface{} - if len(cache.retMapping) != 0 { - returns = queries.PtrsFromMapping(value, cache.retMapping) - } - - if boil.IsDebug(ctx) { - writer := boil.DebugWriterFrom(ctx) - fmt.Fprintln(writer, cache.query) - fmt.Fprintln(writer, vals) - } - if len(cache.retMapping) != 0 { - err = exec.QueryRowContext(ctx, cache.query, vals...).Scan(returns...) - if errors.Is(err, sql.ErrNoRows) { - err = nil // Postgres doesn't return anything when there's no update - } - } else { - _, err = exec.ExecContext(ctx, cache.query, vals...) - } - if err != nil { - return errors.Wrap(err, "models: unable to upsert agent_versions") - } - - if !cached { - agentVersionUpsertCacheMut.Lock() - agentVersionUpsertCache[key] = cache - agentVersionUpsertCacheMut.Unlock() - } - - return o.doAfterUpsertHooks(ctx, exec) -} - -// Delete deletes a single AgentVersion record with an executor. -// Delete will match against the primary key column to find the record to delete. -func (o *AgentVersion) Delete(ctx context.Context, exec boil.ContextExecutor) (int64, error) { - if o == nil { - return 0, errors.New("models: no AgentVersion provided for delete") - } - - if err := o.doBeforeDeleteHooks(ctx, exec); err != nil { - return 0, err - } - - args := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), agentVersionPrimaryKeyMapping) - sql := "DELETE FROM \"agent_versions\" WHERE \"id\"=$1" - - if boil.IsDebug(ctx) { - writer := boil.DebugWriterFrom(ctx) - fmt.Fprintln(writer, sql) - fmt.Fprintln(writer, args...) - } - result, err := exec.ExecContext(ctx, sql, args...) - if err != nil { - return 0, errors.Wrap(err, "models: unable to delete from agent_versions") - } - - rowsAff, err := result.RowsAffected() - if err != nil { - return 0, errors.Wrap(err, "models: failed to get rows affected by delete for agent_versions") - } - - if err := o.doAfterDeleteHooks(ctx, exec); err != nil { - return 0, err - } - - return rowsAff, nil -} - -// DeleteAll deletes all matching rows. -func (q agentVersionQuery) DeleteAll(ctx context.Context, exec boil.ContextExecutor) (int64, error) { - if q.Query == nil { - return 0, errors.New("models: no agentVersionQuery provided for delete all") - } - - queries.SetDelete(q.Query) - - result, err := q.Query.ExecContext(ctx, exec) - if err != nil { - return 0, errors.Wrap(err, "models: unable to delete all from agent_versions") - } - - rowsAff, err := result.RowsAffected() - if err != nil { - return 0, errors.Wrap(err, "models: failed to get rows affected by deleteall for agent_versions") - } - - return rowsAff, nil -} - -// DeleteAll deletes all rows in the slice, using an executor. -func (o AgentVersionSlice) DeleteAll(ctx context.Context, exec boil.ContextExecutor) (int64, error) { - if len(o) == 0 { - return 0, nil - } - - if len(agentVersionBeforeDeleteHooks) != 0 { - for _, obj := range o { - if err := obj.doBeforeDeleteHooks(ctx, exec); err != nil { - return 0, err - } - } - } - - var args []interface{} - for _, obj := range o { - pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), agentVersionPrimaryKeyMapping) - args = append(args, pkeyArgs...) - } - - sql := "DELETE FROM \"agent_versions\" WHERE " + - strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 1, agentVersionPrimaryKeyColumns, len(o)) - - if boil.IsDebug(ctx) { - writer := boil.DebugWriterFrom(ctx) - fmt.Fprintln(writer, sql) - fmt.Fprintln(writer, args) - } - result, err := exec.ExecContext(ctx, sql, args...) - if err != nil { - return 0, errors.Wrap(err, "models: unable to delete all from agentVersion slice") - } - - rowsAff, err := result.RowsAffected() - if err != nil { - return 0, errors.Wrap(err, "models: failed to get rows affected by deleteall for agent_versions") - } - - if len(agentVersionAfterDeleteHooks) != 0 { - for _, obj := range o { - if err := obj.doAfterDeleteHooks(ctx, exec); err != nil { - return 0, err - } - } - } - - return rowsAff, nil -} - -// Reload refetches the object from the database -// using the primary keys with an executor. -func (o *AgentVersion) Reload(ctx context.Context, exec boil.ContextExecutor) error { - ret, err := FindAgentVersion(ctx, exec, o.ID) - if err != nil { - return err - } - - *o = *ret - return nil -} - -// ReloadAll refetches every row with matching primary key column values -// and overwrites the original object slice with the newly updated slice. -func (o *AgentVersionSlice) ReloadAll(ctx context.Context, exec boil.ContextExecutor) error { - if o == nil || len(*o) == 0 { - return nil - } - - slice := AgentVersionSlice{} - var args []interface{} - for _, obj := range *o { - pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), agentVersionPrimaryKeyMapping) - args = append(args, pkeyArgs...) - } - - sql := "SELECT \"agent_versions\".* FROM \"agent_versions\" WHERE " + - strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 1, agentVersionPrimaryKeyColumns, len(*o)) - - q := queries.Raw(sql, args...) - - err := q.Bind(ctx, exec, &slice) - if err != nil { - return errors.Wrap(err, "models: unable to reload all in AgentVersionSlice") - } - - *o = slice - - return nil -} - -// AgentVersionExists checks if the AgentVersion row exists. -func AgentVersionExists(ctx context.Context, exec boil.ContextExecutor, iD int) (bool, error) { - var exists bool - sql := "select exists(select 1 from \"agent_versions\" where \"id\"=$1 limit 1)" - - if boil.IsDebug(ctx) { - writer := boil.DebugWriterFrom(ctx) - fmt.Fprintln(writer, sql) - fmt.Fprintln(writer, iD) - } - row := exec.QueryRowContext(ctx, sql, iD) - - err := row.Scan(&exists) - if err != nil { - return false, errors.Wrap(err, "models: unable to check if agent_versions exists") - } - - return exists, nil -} diff --git a/db/models/boil_queries.go b/db/models/boil_queries.go deleted file mode 100644 index 2bb0c0f..0000000 --- a/db/models/boil_queries.go +++ /dev/null @@ -1,33 +0,0 @@ -// Code generated by SQLBoiler 4.13.0 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT. -// This file is meant to be re-generated in place and/or deleted at any time. - -package models - -import ( - "github.com/volatiletech/sqlboiler/v4/drivers" - "github.com/volatiletech/sqlboiler/v4/queries" - "github.com/volatiletech/sqlboiler/v4/queries/qm" -) - -var dialect = drivers.Dialect{ - LQ: 0x22, - RQ: 0x22, - - UseIndexPlaceholders: true, - UseLastInsertID: false, - UseSchema: false, - UseDefaultKeyword: true, - UseAutoColumns: false, - UseTopClause: false, - UseOutputClause: false, - UseCaseWhenExistsClause: false, -} - -// NewQuery initializes a new Query using the passed in QueryMods -func NewQuery(mods ...qm.QueryMod) *queries.Query { - q := &queries.Query{} - queries.SetDialect(q, &dialect) - qm.Apply(q, mods...) - - return q -} diff --git a/db/models/boil_table_names.go b/db/models/boil_table_names.go deleted file mode 100644 index 253145b..0000000 --- a/db/models/boil_table_names.go +++ /dev/null @@ -1,30 +0,0 @@ -// Code generated by SQLBoiler 4.13.0 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT. -// This file is meant to be re-generated in place and/or deleted at any time. - -package models - -var TableNames = struct { - AgentVersions string - IPAddresses string - Keys string - MultiAddresses string - PeerLogs string - Peers string - PeersXMultiAddresses string - Protocols string - ProtocolsSets string - Requests string - RequestsDenormalized string -}{ - AgentVersions: "agent_versions", - IPAddresses: "ip_addresses", - Keys: "keys", - MultiAddresses: "multi_addresses", - PeerLogs: "peer_logs", - Peers: "peers", - PeersXMultiAddresses: "peers_x_multi_addresses", - Protocols: "protocols", - ProtocolsSets: "protocols_sets", - Requests: "requests", - RequestsDenormalized: "requests_denormalized", -} diff --git a/db/models/boil_types.go b/db/models/boil_types.go deleted file mode 100644 index a79a56b..0000000 --- a/db/models/boil_types.go +++ /dev/null @@ -1,73 +0,0 @@ -// Code generated by SQLBoiler 4.13.0 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT. -// This file is meant to be re-generated in place and/or deleted at any time. - -package models - -import ( - "strconv" - - "github.com/friendsofgo/errors" - "github.com/volatiletech/sqlboiler/v4/boil" - "github.com/volatiletech/strmangle" -) - -// M type is for providing columns and column values to UpdateAll. -type M map[string]interface{} - -// ErrSyncFail occurs during insert when the record could not be retrieved in -// order to populate default value information. This usually happens when LastInsertId -// fails or there was a primary key configuration that was not resolvable. -var ErrSyncFail = errors.New("models: failed to synchronize data after insert") - -type insertCache struct { - query string - retQuery string - valueMapping []uint64 - retMapping []uint64 -} - -type updateCache struct { - query string - valueMapping []uint64 -} - -func makeCacheKey(cols boil.Columns, nzDefaults []string) string { - buf := strmangle.GetBuffer() - - buf.WriteString(strconv.Itoa(cols.Kind)) - for _, w := range cols.Cols { - buf.WriteString(w) - } - - if len(nzDefaults) != 0 { - buf.WriteByte('.') - } - for _, nz := range nzDefaults { - buf.WriteString(nz) - } - - str := buf.String() - strmangle.PutBuffer(buf) - return str -} - -// Enum values for MessageType -const ( - MessageTypePUT_VALUE string = "PUT_VALUE" - MessageTypeGET_VALUE string = "GET_VALUE" - MessageTypeADD_PROVIDER string = "ADD_PROVIDER" - MessageTypeGET_PROVIDERS string = "GET_PROVIDERS" - MessageTypeFIND_NODE string = "FIND_NODE" - MessageTypePING string = "PING" -) - -func AllMessageType() []string { - return []string{ - MessageTypePUT_VALUE, - MessageTypeGET_VALUE, - MessageTypeADD_PROVIDER, - MessageTypeGET_PROVIDERS, - MessageTypeFIND_NODE, - MessageTypePING, - } -} diff --git a/db/models/boil_view_names.go b/db/models/boil_view_names.go deleted file mode 100644 index cca9483..0000000 --- a/db/models/boil_view_names.go +++ /dev/null @@ -1,7 +0,0 @@ -// Code generated by SQLBoiler 4.13.0 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT. -// This file is meant to be re-generated in place and/or deleted at any time. - -package models - -var ViewNames = struct { -}{} diff --git a/db/models/ip_addresses.go b/db/models/ip_addresses.go deleted file mode 100644 index 6acbc3d..0000000 --- a/db/models/ip_addresses.go +++ /dev/null @@ -1,1242 +0,0 @@ -// Code generated by SQLBoiler 4.13.0 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT. -// This file is meant to be re-generated in place and/or deleted at any time. - -package models - -import ( - "context" - "database/sql" - "fmt" - "reflect" - "strconv" - "strings" - "sync" - "time" - - "github.com/friendsofgo/errors" - "github.com/volatiletech/null/v8" - "github.com/volatiletech/sqlboiler/v4/boil" - "github.com/volatiletech/sqlboiler/v4/queries" - "github.com/volatiletech/sqlboiler/v4/queries/qm" - "github.com/volatiletech/sqlboiler/v4/queries/qmhelper" - "github.com/volatiletech/strmangle" -) - -// IPAddress is an object representing the database table. -type IPAddress struct { // An internal unique id that identifies this ip address. - ID int `boil:"id" json:"id" toml:"id" yaml:"id"` - // The multi address that this ip address belongs to. - MultiAddressID int `boil:"multi_address_id" json:"multi_address_id" toml:"multi_address_id" yaml:"multi_address_id"` - // The autonomous system number that this ip address belongs to. - Asn null.Int `boil:"asn" json:"asn,omitempty" toml:"asn" yaml:"asn,omitempty"` - // If NULL this address could not be associated with a cloud provider. If not NULL the integer corresponds to the UdgerDB datacenter ID. - IsCloud null.Int `boil:"is_cloud" json:"is_cloud,omitempty" toml:"is_cloud" yaml:"is_cloud,omitempty"` - // Timestamp of when this IP address was updated. - UpdatedAt time.Time `boil:"updated_at" json:"updated_at" toml:"updated_at" yaml:"updated_at"` - // Timestamp of when this IP address was created. - CreatedAt time.Time `boil:"created_at" json:"created_at" toml:"created_at" yaml:"created_at"` - // The country that this address belongs to in the form of a two to three letter country code - Country null.String `boil:"country" json:"country,omitempty" toml:"country" yaml:"country,omitempty"` - // The continent that this address belongs to in the form of a two letter code. - Continent null.String `boil:"continent" json:"continent,omitempty" toml:"continent" yaml:"continent,omitempty"` - // The IP address derived from the reference multi address. - Address string `boil:"address" json:"address" toml:"address" yaml:"address"` - - R *ipAddressR `boil:"-" json:"-" toml:"-" yaml:"-"` - L ipAddressL `boil:"-" json:"-" toml:"-" yaml:"-"` -} - -var IPAddressColumns = struct { - ID string - MultiAddressID string - Asn string - IsCloud string - UpdatedAt string - CreatedAt string - Country string - Continent string - Address string -}{ - ID: "id", - MultiAddressID: "multi_address_id", - Asn: "asn", - IsCloud: "is_cloud", - UpdatedAt: "updated_at", - CreatedAt: "created_at", - Country: "country", - Continent: "continent", - Address: "address", -} - -var IPAddressTableColumns = struct { - ID string - MultiAddressID string - Asn string - IsCloud string - UpdatedAt string - CreatedAt string - Country string - Continent string - Address string -}{ - ID: "ip_addresses.id", - MultiAddressID: "ip_addresses.multi_address_id", - Asn: "ip_addresses.asn", - IsCloud: "ip_addresses.is_cloud", - UpdatedAt: "ip_addresses.updated_at", - CreatedAt: "ip_addresses.created_at", - Country: "ip_addresses.country", - Continent: "ip_addresses.continent", - Address: "ip_addresses.address", -} - -// Generated where - -type whereHelpernull_Int struct{ field string } - -func (w whereHelpernull_Int) EQ(x null.Int) qm.QueryMod { - return qmhelper.WhereNullEQ(w.field, false, x) -} -func (w whereHelpernull_Int) NEQ(x null.Int) qm.QueryMod { - return qmhelper.WhereNullEQ(w.field, true, x) -} -func (w whereHelpernull_Int) LT(x null.Int) qm.QueryMod { - return qmhelper.Where(w.field, qmhelper.LT, x) -} -func (w whereHelpernull_Int) LTE(x null.Int) qm.QueryMod { - return qmhelper.Where(w.field, qmhelper.LTE, x) -} -func (w whereHelpernull_Int) GT(x null.Int) qm.QueryMod { - return qmhelper.Where(w.field, qmhelper.GT, x) -} -func (w whereHelpernull_Int) GTE(x null.Int) qm.QueryMod { - return qmhelper.Where(w.field, qmhelper.GTE, x) -} -func (w whereHelpernull_Int) IN(slice []int) qm.QueryMod { - values := make([]interface{}, 0, len(slice)) - for _, value := range slice { - values = append(values, value) - } - return qm.WhereIn(fmt.Sprintf("%s IN ?", w.field), values...) -} -func (w whereHelpernull_Int) NIN(slice []int) qm.QueryMod { - values := make([]interface{}, 0, len(slice)) - for _, value := range slice { - values = append(values, value) - } - return qm.WhereNotIn(fmt.Sprintf("%s NOT IN ?", w.field), values...) -} - -func (w whereHelpernull_Int) IsNull() qm.QueryMod { return qmhelper.WhereIsNull(w.field) } -func (w whereHelpernull_Int) IsNotNull() qm.QueryMod { return qmhelper.WhereIsNotNull(w.field) } - -type whereHelpernull_String struct{ field string } - -func (w whereHelpernull_String) EQ(x null.String) qm.QueryMod { - return qmhelper.WhereNullEQ(w.field, false, x) -} -func (w whereHelpernull_String) NEQ(x null.String) qm.QueryMod { - return qmhelper.WhereNullEQ(w.field, true, x) -} -func (w whereHelpernull_String) LT(x null.String) qm.QueryMod { - return qmhelper.Where(w.field, qmhelper.LT, x) -} -func (w whereHelpernull_String) LTE(x null.String) qm.QueryMod { - return qmhelper.Where(w.field, qmhelper.LTE, x) -} -func (w whereHelpernull_String) GT(x null.String) qm.QueryMod { - return qmhelper.Where(w.field, qmhelper.GT, x) -} -func (w whereHelpernull_String) GTE(x null.String) qm.QueryMod { - return qmhelper.Where(w.field, qmhelper.GTE, x) -} -func (w whereHelpernull_String) IN(slice []string) qm.QueryMod { - values := make([]interface{}, 0, len(slice)) - for _, value := range slice { - values = append(values, value) - } - return qm.WhereIn(fmt.Sprintf("%s IN ?", w.field), values...) -} -func (w whereHelpernull_String) NIN(slice []string) qm.QueryMod { - values := make([]interface{}, 0, len(slice)) - for _, value := range slice { - values = append(values, value) - } - return qm.WhereNotIn(fmt.Sprintf("%s NOT IN ?", w.field), values...) -} - -func (w whereHelpernull_String) IsNull() qm.QueryMod { return qmhelper.WhereIsNull(w.field) } -func (w whereHelpernull_String) IsNotNull() qm.QueryMod { return qmhelper.WhereIsNotNull(w.field) } - -var IPAddressWhere = struct { - ID whereHelperint - MultiAddressID whereHelperint - Asn whereHelpernull_Int - IsCloud whereHelpernull_Int - UpdatedAt whereHelpertime_Time - CreatedAt whereHelpertime_Time - Country whereHelpernull_String - Continent whereHelpernull_String - Address whereHelperstring -}{ - ID: whereHelperint{field: "\"ip_addresses\".\"id\""}, - MultiAddressID: whereHelperint{field: "\"ip_addresses\".\"multi_address_id\""}, - Asn: whereHelpernull_Int{field: "\"ip_addresses\".\"asn\""}, - IsCloud: whereHelpernull_Int{field: "\"ip_addresses\".\"is_cloud\""}, - UpdatedAt: whereHelpertime_Time{field: "\"ip_addresses\".\"updated_at\""}, - CreatedAt: whereHelpertime_Time{field: "\"ip_addresses\".\"created_at\""}, - Country: whereHelpernull_String{field: "\"ip_addresses\".\"country\""}, - Continent: whereHelpernull_String{field: "\"ip_addresses\".\"continent\""}, - Address: whereHelperstring{field: "\"ip_addresses\".\"address\""}, -} - -// IPAddressRels is where relationship names are stored. -var IPAddressRels = struct { - MultiAddress string -}{ - MultiAddress: "MultiAddress", -} - -// ipAddressR is where relationships are stored. -type ipAddressR struct { - MultiAddress *MultiAddress `boil:"MultiAddress" json:"MultiAddress" toml:"MultiAddress" yaml:"MultiAddress"` -} - -// NewStruct creates a new relationship struct -func (*ipAddressR) NewStruct() *ipAddressR { - return &ipAddressR{} -} - -func (r *ipAddressR) GetMultiAddress() *MultiAddress { - if r == nil { - return nil - } - return r.MultiAddress -} - -// ipAddressL is where Load methods for each relationship are stored. -type ipAddressL struct{} - -var ( - ipAddressAllColumns = []string{"id", "multi_address_id", "asn", "is_cloud", "updated_at", "created_at", "country", "continent", "address"} - ipAddressColumnsWithoutDefault = []string{"multi_address_id", "updated_at", "created_at", "address"} - ipAddressColumnsWithDefault = []string{"id", "asn", "is_cloud", "country", "continent"} - ipAddressPrimaryKeyColumns = []string{"id"} - ipAddressGeneratedColumns = []string{"id"} -) - -type ( - // IPAddressSlice is an alias for a slice of pointers to IPAddress. - // This should almost always be used instead of []IPAddress. - IPAddressSlice []*IPAddress - // IPAddressHook is the signature for custom IPAddress hook methods - IPAddressHook func(context.Context, boil.ContextExecutor, *IPAddress) error - - ipAddressQuery struct { - *queries.Query - } -) - -// Cache for insert, update and upsert -var ( - ipAddressType = reflect.TypeOf(&IPAddress{}) - ipAddressMapping = queries.MakeStructMapping(ipAddressType) - ipAddressPrimaryKeyMapping, _ = queries.BindMapping(ipAddressType, ipAddressMapping, ipAddressPrimaryKeyColumns) - ipAddressInsertCacheMut sync.RWMutex - ipAddressInsertCache = make(map[string]insertCache) - ipAddressUpdateCacheMut sync.RWMutex - ipAddressUpdateCache = make(map[string]updateCache) - ipAddressUpsertCacheMut sync.RWMutex - ipAddressUpsertCache = make(map[string]insertCache) -) - -var ( - // Force time package dependency for automated UpdatedAt/CreatedAt. - _ = time.Second - // Force qmhelper dependency for where clause generation (which doesn't - // always happen) - _ = qmhelper.Where -) - -var ipAddressAfterSelectHooks []IPAddressHook - -var ipAddressBeforeInsertHooks []IPAddressHook -var ipAddressAfterInsertHooks []IPAddressHook - -var ipAddressBeforeUpdateHooks []IPAddressHook -var ipAddressAfterUpdateHooks []IPAddressHook - -var ipAddressBeforeDeleteHooks []IPAddressHook -var ipAddressAfterDeleteHooks []IPAddressHook - -var ipAddressBeforeUpsertHooks []IPAddressHook -var ipAddressAfterUpsertHooks []IPAddressHook - -// doAfterSelectHooks executes all "after Select" hooks. -func (o *IPAddress) doAfterSelectHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range ipAddressAfterSelectHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// doBeforeInsertHooks executes all "before insert" hooks. -func (o *IPAddress) doBeforeInsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range ipAddressBeforeInsertHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// doAfterInsertHooks executes all "after Insert" hooks. -func (o *IPAddress) doAfterInsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range ipAddressAfterInsertHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// doBeforeUpdateHooks executes all "before Update" hooks. -func (o *IPAddress) doBeforeUpdateHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range ipAddressBeforeUpdateHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// doAfterUpdateHooks executes all "after Update" hooks. -func (o *IPAddress) doAfterUpdateHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range ipAddressAfterUpdateHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// doBeforeDeleteHooks executes all "before Delete" hooks. -func (o *IPAddress) doBeforeDeleteHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range ipAddressBeforeDeleteHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// doAfterDeleteHooks executes all "after Delete" hooks. -func (o *IPAddress) doAfterDeleteHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range ipAddressAfterDeleteHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// doBeforeUpsertHooks executes all "before Upsert" hooks. -func (o *IPAddress) doBeforeUpsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range ipAddressBeforeUpsertHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// doAfterUpsertHooks executes all "after Upsert" hooks. -func (o *IPAddress) doAfterUpsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range ipAddressAfterUpsertHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// AddIPAddressHook registers your hook function for all future operations. -func AddIPAddressHook(hookPoint boil.HookPoint, ipAddressHook IPAddressHook) { - switch hookPoint { - case boil.AfterSelectHook: - ipAddressAfterSelectHooks = append(ipAddressAfterSelectHooks, ipAddressHook) - case boil.BeforeInsertHook: - ipAddressBeforeInsertHooks = append(ipAddressBeforeInsertHooks, ipAddressHook) - case boil.AfterInsertHook: - ipAddressAfterInsertHooks = append(ipAddressAfterInsertHooks, ipAddressHook) - case boil.BeforeUpdateHook: - ipAddressBeforeUpdateHooks = append(ipAddressBeforeUpdateHooks, ipAddressHook) - case boil.AfterUpdateHook: - ipAddressAfterUpdateHooks = append(ipAddressAfterUpdateHooks, ipAddressHook) - case boil.BeforeDeleteHook: - ipAddressBeforeDeleteHooks = append(ipAddressBeforeDeleteHooks, ipAddressHook) - case boil.AfterDeleteHook: - ipAddressAfterDeleteHooks = append(ipAddressAfterDeleteHooks, ipAddressHook) - case boil.BeforeUpsertHook: - ipAddressBeforeUpsertHooks = append(ipAddressBeforeUpsertHooks, ipAddressHook) - case boil.AfterUpsertHook: - ipAddressAfterUpsertHooks = append(ipAddressAfterUpsertHooks, ipAddressHook) - } -} - -// One returns a single ipAddress record from the query. -func (q ipAddressQuery) One(ctx context.Context, exec boil.ContextExecutor) (*IPAddress, error) { - o := &IPAddress{} - - queries.SetLimit(q.Query, 1) - - err := q.Bind(ctx, exec, o) - if err != nil { - if errors.Is(err, sql.ErrNoRows) { - return nil, sql.ErrNoRows - } - return nil, errors.Wrap(err, "models: failed to execute a one query for ip_addresses") - } - - if err := o.doAfterSelectHooks(ctx, exec); err != nil { - return o, err - } - - return o, nil -} - -// All returns all IPAddress records from the query. -func (q ipAddressQuery) All(ctx context.Context, exec boil.ContextExecutor) (IPAddressSlice, error) { - var o []*IPAddress - - err := q.Bind(ctx, exec, &o) - if err != nil { - return nil, errors.Wrap(err, "models: failed to assign all query results to IPAddress slice") - } - - if len(ipAddressAfterSelectHooks) != 0 { - for _, obj := range o { - if err := obj.doAfterSelectHooks(ctx, exec); err != nil { - return o, err - } - } - } - - return o, nil -} - -// Count returns the count of all IPAddress records in the query. -func (q ipAddressQuery) Count(ctx context.Context, exec boil.ContextExecutor) (int64, error) { - var count int64 - - queries.SetSelect(q.Query, nil) - queries.SetCount(q.Query) - - err := q.Query.QueryRowContext(ctx, exec).Scan(&count) - if err != nil { - return 0, errors.Wrap(err, "models: failed to count ip_addresses rows") - } - - return count, nil -} - -// Exists checks if the row exists in the table. -func (q ipAddressQuery) Exists(ctx context.Context, exec boil.ContextExecutor) (bool, error) { - var count int64 - - queries.SetSelect(q.Query, nil) - queries.SetCount(q.Query) - queries.SetLimit(q.Query, 1) - - err := q.Query.QueryRowContext(ctx, exec).Scan(&count) - if err != nil { - return false, errors.Wrap(err, "models: failed to check if ip_addresses exists") - } - - return count > 0, nil -} - -// MultiAddress pointed to by the foreign key. -func (o *IPAddress) MultiAddress(mods ...qm.QueryMod) multiAddressQuery { - queryMods := []qm.QueryMod{ - qm.Where("\"id\" = ?", o.MultiAddressID), - } - - queryMods = append(queryMods, mods...) - - return MultiAddresses(queryMods...) -} - -// LoadMultiAddress allows an eager lookup of values, cached into the -// loaded structs of the objects. This is for an N-1 relationship. -func (ipAddressL) LoadMultiAddress(ctx context.Context, e boil.ContextExecutor, singular bool, maybeIPAddress interface{}, mods queries.Applicator) error { - var slice []*IPAddress - var object *IPAddress - - if singular { - var ok bool - object, ok = maybeIPAddress.(*IPAddress) - if !ok { - object = new(IPAddress) - ok = queries.SetFromEmbeddedStruct(&object, &maybeIPAddress) - if !ok { - return errors.New(fmt.Sprintf("failed to set %T from embedded struct %T", object, maybeIPAddress)) - } - } - } else { - s, ok := maybeIPAddress.(*[]*IPAddress) - if ok { - slice = *s - } else { - ok = queries.SetFromEmbeddedStruct(&slice, maybeIPAddress) - if !ok { - return errors.New(fmt.Sprintf("failed to set %T from embedded struct %T", slice, maybeIPAddress)) - } - } - } - - args := make([]interface{}, 0, 1) - if singular { - if object.R == nil { - object.R = &ipAddressR{} - } - args = append(args, object.MultiAddressID) - - } else { - Outer: - for _, obj := range slice { - if obj.R == nil { - obj.R = &ipAddressR{} - } - - for _, a := range args { - if a == obj.MultiAddressID { - continue Outer - } - } - - args = append(args, obj.MultiAddressID) - - } - } - - if len(args) == 0 { - return nil - } - - query := NewQuery( - qm.From(`multi_addresses`), - qm.WhereIn(`multi_addresses.id in ?`, args...), - ) - if mods != nil { - mods.Apply(query) - } - - results, err := query.QueryContext(ctx, e) - if err != nil { - return errors.Wrap(err, "failed to eager load MultiAddress") - } - - var resultSlice []*MultiAddress - if err = queries.Bind(results, &resultSlice); err != nil { - return errors.Wrap(err, "failed to bind eager loaded slice MultiAddress") - } - - if err = results.Close(); err != nil { - return errors.Wrap(err, "failed to close results of eager load for multi_addresses") - } - if err = results.Err(); err != nil { - return errors.Wrap(err, "error occurred during iteration of eager loaded relations for multi_addresses") - } - - if len(ipAddressAfterSelectHooks) != 0 { - for _, obj := range resultSlice { - if err := obj.doAfterSelectHooks(ctx, e); err != nil { - return err - } - } - } - - if len(resultSlice) == 0 { - return nil - } - - if singular { - foreign := resultSlice[0] - object.R.MultiAddress = foreign - if foreign.R == nil { - foreign.R = &multiAddressR{} - } - foreign.R.IPAddresses = append(foreign.R.IPAddresses, object) - return nil - } - - for _, local := range slice { - for _, foreign := range resultSlice { - if local.MultiAddressID == foreign.ID { - local.R.MultiAddress = foreign - if foreign.R == nil { - foreign.R = &multiAddressR{} - } - foreign.R.IPAddresses = append(foreign.R.IPAddresses, local) - break - } - } - } - - return nil -} - -// SetMultiAddress of the ipAddress to the related item. -// Sets o.R.MultiAddress to related. -// Adds o to related.R.IPAddresses. -func (o *IPAddress) SetMultiAddress(ctx context.Context, exec boil.ContextExecutor, insert bool, related *MultiAddress) error { - var err error - if insert { - if err = related.Insert(ctx, exec, boil.Infer()); err != nil { - return errors.Wrap(err, "failed to insert into foreign table") - } - } - - updateQuery := fmt.Sprintf( - "UPDATE \"ip_addresses\" SET %s WHERE %s", - strmangle.SetParamNames("\"", "\"", 1, []string{"multi_address_id"}), - strmangle.WhereClause("\"", "\"", 2, ipAddressPrimaryKeyColumns), - ) - values := []interface{}{related.ID, o.ID} - - if boil.IsDebug(ctx) { - writer := boil.DebugWriterFrom(ctx) - fmt.Fprintln(writer, updateQuery) - fmt.Fprintln(writer, values) - } - if _, err = exec.ExecContext(ctx, updateQuery, values...); err != nil { - return errors.Wrap(err, "failed to update local table") - } - - o.MultiAddressID = related.ID - if o.R == nil { - o.R = &ipAddressR{ - MultiAddress: related, - } - } else { - o.R.MultiAddress = related - } - - if related.R == nil { - related.R = &multiAddressR{ - IPAddresses: IPAddressSlice{o}, - } - } else { - related.R.IPAddresses = append(related.R.IPAddresses, o) - } - - return nil -} - -// IPAddresses retrieves all the records using an executor. -func IPAddresses(mods ...qm.QueryMod) ipAddressQuery { - mods = append(mods, qm.From("\"ip_addresses\"")) - q := NewQuery(mods...) - if len(queries.GetSelect(q)) == 0 { - queries.SetSelect(q, []string{"\"ip_addresses\".*"}) - } - - return ipAddressQuery{q} -} - -// FindIPAddress retrieves a single record by ID with an executor. -// If selectCols is empty Find will return all columns. -func FindIPAddress(ctx context.Context, exec boil.ContextExecutor, iD int, selectCols ...string) (*IPAddress, error) { - ipAddressObj := &IPAddress{} - - sel := "*" - if len(selectCols) > 0 { - sel = strings.Join(strmangle.IdentQuoteSlice(dialect.LQ, dialect.RQ, selectCols), ",") - } - query := fmt.Sprintf( - "select %s from \"ip_addresses\" where \"id\"=$1", sel, - ) - - q := queries.Raw(query, iD) - - err := q.Bind(ctx, exec, ipAddressObj) - if err != nil { - if errors.Is(err, sql.ErrNoRows) { - return nil, sql.ErrNoRows - } - return nil, errors.Wrap(err, "models: unable to select from ip_addresses") - } - - if err = ipAddressObj.doAfterSelectHooks(ctx, exec); err != nil { - return ipAddressObj, err - } - - return ipAddressObj, nil -} - -// Insert a single record using an executor. -// See boil.Columns.InsertColumnSet documentation to understand column list inference for inserts. -func (o *IPAddress) Insert(ctx context.Context, exec boil.ContextExecutor, columns boil.Columns) error { - if o == nil { - return errors.New("models: no ip_addresses provided for insertion") - } - - var err error - if !boil.TimestampsAreSkipped(ctx) { - currTime := time.Now().In(boil.GetLocation()) - - if o.UpdatedAt.IsZero() { - o.UpdatedAt = currTime - } - if o.CreatedAt.IsZero() { - o.CreatedAt = currTime - } - } - - if err := o.doBeforeInsertHooks(ctx, exec); err != nil { - return err - } - - nzDefaults := queries.NonZeroDefaultSet(ipAddressColumnsWithDefault, o) - - key := makeCacheKey(columns, nzDefaults) - ipAddressInsertCacheMut.RLock() - cache, cached := ipAddressInsertCache[key] - ipAddressInsertCacheMut.RUnlock() - - if !cached { - wl, returnColumns := columns.InsertColumnSet( - ipAddressAllColumns, - ipAddressColumnsWithDefault, - ipAddressColumnsWithoutDefault, - nzDefaults, - ) - wl = strmangle.SetComplement(wl, ipAddressGeneratedColumns) - - cache.valueMapping, err = queries.BindMapping(ipAddressType, ipAddressMapping, wl) - if err != nil { - return err - } - cache.retMapping, err = queries.BindMapping(ipAddressType, ipAddressMapping, returnColumns) - if err != nil { - return err - } - if len(wl) != 0 { - cache.query = fmt.Sprintf("INSERT INTO \"ip_addresses\" (\"%s\") %%sVALUES (%s)%%s", strings.Join(wl, "\",\""), strmangle.Placeholders(dialect.UseIndexPlaceholders, len(wl), 1, 1)) - } else { - cache.query = "INSERT INTO \"ip_addresses\" %sDEFAULT VALUES%s" - } - - var queryOutput, queryReturning string - - if len(cache.retMapping) != 0 { - queryReturning = fmt.Sprintf(" RETURNING \"%s\"", strings.Join(returnColumns, "\",\"")) - } - - cache.query = fmt.Sprintf(cache.query, queryOutput, queryReturning) - } - - value := reflect.Indirect(reflect.ValueOf(o)) - vals := queries.ValuesFromMapping(value, cache.valueMapping) - - if boil.IsDebug(ctx) { - writer := boil.DebugWriterFrom(ctx) - fmt.Fprintln(writer, cache.query) - fmt.Fprintln(writer, vals) - } - - if len(cache.retMapping) != 0 { - err = exec.QueryRowContext(ctx, cache.query, vals...).Scan(queries.PtrsFromMapping(value, cache.retMapping)...) - } else { - _, err = exec.ExecContext(ctx, cache.query, vals...) - } - - if err != nil { - return errors.Wrap(err, "models: unable to insert into ip_addresses") - } - - if !cached { - ipAddressInsertCacheMut.Lock() - ipAddressInsertCache[key] = cache - ipAddressInsertCacheMut.Unlock() - } - - return o.doAfterInsertHooks(ctx, exec) -} - -// Update uses an executor to update the IPAddress. -// See boil.Columns.UpdateColumnSet documentation to understand column list inference for updates. -// Update does not automatically update the record in case of default values. Use .Reload() to refresh the records. -func (o *IPAddress) Update(ctx context.Context, exec boil.ContextExecutor, columns boil.Columns) (int64, error) { - if !boil.TimestampsAreSkipped(ctx) { - currTime := time.Now().In(boil.GetLocation()) - - o.UpdatedAt = currTime - } - - var err error - if err = o.doBeforeUpdateHooks(ctx, exec); err != nil { - return 0, err - } - key := makeCacheKey(columns, nil) - ipAddressUpdateCacheMut.RLock() - cache, cached := ipAddressUpdateCache[key] - ipAddressUpdateCacheMut.RUnlock() - - if !cached { - wl := columns.UpdateColumnSet( - ipAddressAllColumns, - ipAddressPrimaryKeyColumns, - ) - wl = strmangle.SetComplement(wl, ipAddressGeneratedColumns) - - if !columns.IsWhitelist() { - wl = strmangle.SetComplement(wl, []string{"created_at"}) - } - if len(wl) == 0 { - return 0, errors.New("models: unable to update ip_addresses, could not build whitelist") - } - - cache.query = fmt.Sprintf("UPDATE \"ip_addresses\" SET %s WHERE %s", - strmangle.SetParamNames("\"", "\"", 1, wl), - strmangle.WhereClause("\"", "\"", len(wl)+1, ipAddressPrimaryKeyColumns), - ) - cache.valueMapping, err = queries.BindMapping(ipAddressType, ipAddressMapping, append(wl, ipAddressPrimaryKeyColumns...)) - if err != nil { - return 0, err - } - } - - values := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), cache.valueMapping) - - if boil.IsDebug(ctx) { - writer := boil.DebugWriterFrom(ctx) - fmt.Fprintln(writer, cache.query) - fmt.Fprintln(writer, values) - } - var result sql.Result - result, err = exec.ExecContext(ctx, cache.query, values...) - if err != nil { - return 0, errors.Wrap(err, "models: unable to update ip_addresses row") - } - - rowsAff, err := result.RowsAffected() - if err != nil { - return 0, errors.Wrap(err, "models: failed to get rows affected by update for ip_addresses") - } - - if !cached { - ipAddressUpdateCacheMut.Lock() - ipAddressUpdateCache[key] = cache - ipAddressUpdateCacheMut.Unlock() - } - - return rowsAff, o.doAfterUpdateHooks(ctx, exec) -} - -// UpdateAll updates all rows with the specified column values. -func (q ipAddressQuery) UpdateAll(ctx context.Context, exec boil.ContextExecutor, cols M) (int64, error) { - queries.SetUpdate(q.Query, cols) - - result, err := q.Query.ExecContext(ctx, exec) - if err != nil { - return 0, errors.Wrap(err, "models: unable to update all for ip_addresses") - } - - rowsAff, err := result.RowsAffected() - if err != nil { - return 0, errors.Wrap(err, "models: unable to retrieve rows affected for ip_addresses") - } - - return rowsAff, nil -} - -// UpdateAll updates all rows with the specified column values, using an executor. -func (o IPAddressSlice) UpdateAll(ctx context.Context, exec boil.ContextExecutor, cols M) (int64, error) { - ln := int64(len(o)) - if ln == 0 { - return 0, nil - } - - if len(cols) == 0 { - return 0, errors.New("models: update all requires at least one column argument") - } - - colNames := make([]string, len(cols)) - args := make([]interface{}, len(cols)) - - i := 0 - for name, value := range cols { - colNames[i] = name - args[i] = value - i++ - } - - // Append all of the primary key values for each column - for _, obj := range o { - pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), ipAddressPrimaryKeyMapping) - args = append(args, pkeyArgs...) - } - - sql := fmt.Sprintf("UPDATE \"ip_addresses\" SET %s WHERE %s", - strmangle.SetParamNames("\"", "\"", 1, colNames), - strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), len(colNames)+1, ipAddressPrimaryKeyColumns, len(o))) - - if boil.IsDebug(ctx) { - writer := boil.DebugWriterFrom(ctx) - fmt.Fprintln(writer, sql) - fmt.Fprintln(writer, args...) - } - result, err := exec.ExecContext(ctx, sql, args...) - if err != nil { - return 0, errors.Wrap(err, "models: unable to update all in ipAddress slice") - } - - rowsAff, err := result.RowsAffected() - if err != nil { - return 0, errors.Wrap(err, "models: unable to retrieve rows affected all in update all ipAddress") - } - return rowsAff, nil -} - -// Upsert attempts an insert using an executor, and does an update or ignore on conflict. -// See boil.Columns documentation for how to properly use updateColumns and insertColumns. -func (o *IPAddress) Upsert(ctx context.Context, exec boil.ContextExecutor, updateOnConflict bool, conflictColumns []string, updateColumns, insertColumns boil.Columns) error { - if o == nil { - return errors.New("models: no ip_addresses provided for upsert") - } - if !boil.TimestampsAreSkipped(ctx) { - currTime := time.Now().In(boil.GetLocation()) - - o.UpdatedAt = currTime - if o.CreatedAt.IsZero() { - o.CreatedAt = currTime - } - } - - if err := o.doBeforeUpsertHooks(ctx, exec); err != nil { - return err - } - - nzDefaults := queries.NonZeroDefaultSet(ipAddressColumnsWithDefault, o) - - // Build cache key in-line uglily - mysql vs psql problems - buf := strmangle.GetBuffer() - if updateOnConflict { - buf.WriteByte('t') - } else { - buf.WriteByte('f') - } - buf.WriteByte('.') - for _, c := range conflictColumns { - buf.WriteString(c) - } - buf.WriteByte('.') - buf.WriteString(strconv.Itoa(updateColumns.Kind)) - for _, c := range updateColumns.Cols { - buf.WriteString(c) - } - buf.WriteByte('.') - buf.WriteString(strconv.Itoa(insertColumns.Kind)) - for _, c := range insertColumns.Cols { - buf.WriteString(c) - } - buf.WriteByte('.') - for _, c := range nzDefaults { - buf.WriteString(c) - } - key := buf.String() - strmangle.PutBuffer(buf) - - ipAddressUpsertCacheMut.RLock() - cache, cached := ipAddressUpsertCache[key] - ipAddressUpsertCacheMut.RUnlock() - - var err error - - if !cached { - insert, ret := insertColumns.InsertColumnSet( - ipAddressAllColumns, - ipAddressColumnsWithDefault, - ipAddressColumnsWithoutDefault, - nzDefaults, - ) - - update := updateColumns.UpdateColumnSet( - ipAddressAllColumns, - ipAddressPrimaryKeyColumns, - ) - - insert = strmangle.SetComplement(insert, ipAddressGeneratedColumns) - update = strmangle.SetComplement(update, ipAddressGeneratedColumns) - - if updateOnConflict && len(update) == 0 { - return errors.New("models: unable to upsert ip_addresses, could not build update column list") - } - - conflict := conflictColumns - if len(conflict) == 0 { - conflict = make([]string, len(ipAddressPrimaryKeyColumns)) - copy(conflict, ipAddressPrimaryKeyColumns) - } - cache.query = buildUpsertQueryPostgres(dialect, "\"ip_addresses\"", updateOnConflict, ret, update, conflict, insert) - - cache.valueMapping, err = queries.BindMapping(ipAddressType, ipAddressMapping, insert) - if err != nil { - return err - } - if len(ret) != 0 { - cache.retMapping, err = queries.BindMapping(ipAddressType, ipAddressMapping, ret) - if err != nil { - return err - } - } - } - - value := reflect.Indirect(reflect.ValueOf(o)) - vals := queries.ValuesFromMapping(value, cache.valueMapping) - var returns []interface{} - if len(cache.retMapping) != 0 { - returns = queries.PtrsFromMapping(value, cache.retMapping) - } - - if boil.IsDebug(ctx) { - writer := boil.DebugWriterFrom(ctx) - fmt.Fprintln(writer, cache.query) - fmt.Fprintln(writer, vals) - } - if len(cache.retMapping) != 0 { - err = exec.QueryRowContext(ctx, cache.query, vals...).Scan(returns...) - if errors.Is(err, sql.ErrNoRows) { - err = nil // Postgres doesn't return anything when there's no update - } - } else { - _, err = exec.ExecContext(ctx, cache.query, vals...) - } - if err != nil { - return errors.Wrap(err, "models: unable to upsert ip_addresses") - } - - if !cached { - ipAddressUpsertCacheMut.Lock() - ipAddressUpsertCache[key] = cache - ipAddressUpsertCacheMut.Unlock() - } - - return o.doAfterUpsertHooks(ctx, exec) -} - -// Delete deletes a single IPAddress record with an executor. -// Delete will match against the primary key column to find the record to delete. -func (o *IPAddress) Delete(ctx context.Context, exec boil.ContextExecutor) (int64, error) { - if o == nil { - return 0, errors.New("models: no IPAddress provided for delete") - } - - if err := o.doBeforeDeleteHooks(ctx, exec); err != nil { - return 0, err - } - - args := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), ipAddressPrimaryKeyMapping) - sql := "DELETE FROM \"ip_addresses\" WHERE \"id\"=$1" - - if boil.IsDebug(ctx) { - writer := boil.DebugWriterFrom(ctx) - fmt.Fprintln(writer, sql) - fmt.Fprintln(writer, args...) - } - result, err := exec.ExecContext(ctx, sql, args...) - if err != nil { - return 0, errors.Wrap(err, "models: unable to delete from ip_addresses") - } - - rowsAff, err := result.RowsAffected() - if err != nil { - return 0, errors.Wrap(err, "models: failed to get rows affected by delete for ip_addresses") - } - - if err := o.doAfterDeleteHooks(ctx, exec); err != nil { - return 0, err - } - - return rowsAff, nil -} - -// DeleteAll deletes all matching rows. -func (q ipAddressQuery) DeleteAll(ctx context.Context, exec boil.ContextExecutor) (int64, error) { - if q.Query == nil { - return 0, errors.New("models: no ipAddressQuery provided for delete all") - } - - queries.SetDelete(q.Query) - - result, err := q.Query.ExecContext(ctx, exec) - if err != nil { - return 0, errors.Wrap(err, "models: unable to delete all from ip_addresses") - } - - rowsAff, err := result.RowsAffected() - if err != nil { - return 0, errors.Wrap(err, "models: failed to get rows affected by deleteall for ip_addresses") - } - - return rowsAff, nil -} - -// DeleteAll deletes all rows in the slice, using an executor. -func (o IPAddressSlice) DeleteAll(ctx context.Context, exec boil.ContextExecutor) (int64, error) { - if len(o) == 0 { - return 0, nil - } - - if len(ipAddressBeforeDeleteHooks) != 0 { - for _, obj := range o { - if err := obj.doBeforeDeleteHooks(ctx, exec); err != nil { - return 0, err - } - } - } - - var args []interface{} - for _, obj := range o { - pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), ipAddressPrimaryKeyMapping) - args = append(args, pkeyArgs...) - } - - sql := "DELETE FROM \"ip_addresses\" WHERE " + - strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 1, ipAddressPrimaryKeyColumns, len(o)) - - if boil.IsDebug(ctx) { - writer := boil.DebugWriterFrom(ctx) - fmt.Fprintln(writer, sql) - fmt.Fprintln(writer, args) - } - result, err := exec.ExecContext(ctx, sql, args...) - if err != nil { - return 0, errors.Wrap(err, "models: unable to delete all from ipAddress slice") - } - - rowsAff, err := result.RowsAffected() - if err != nil { - return 0, errors.Wrap(err, "models: failed to get rows affected by deleteall for ip_addresses") - } - - if len(ipAddressAfterDeleteHooks) != 0 { - for _, obj := range o { - if err := obj.doAfterDeleteHooks(ctx, exec); err != nil { - return 0, err - } - } - } - - return rowsAff, nil -} - -// Reload refetches the object from the database -// using the primary keys with an executor. -func (o *IPAddress) Reload(ctx context.Context, exec boil.ContextExecutor) error { - ret, err := FindIPAddress(ctx, exec, o.ID) - if err != nil { - return err - } - - *o = *ret - return nil -} - -// ReloadAll refetches every row with matching primary key column values -// and overwrites the original object slice with the newly updated slice. -func (o *IPAddressSlice) ReloadAll(ctx context.Context, exec boil.ContextExecutor) error { - if o == nil || len(*o) == 0 { - return nil - } - - slice := IPAddressSlice{} - var args []interface{} - for _, obj := range *o { - pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), ipAddressPrimaryKeyMapping) - args = append(args, pkeyArgs...) - } - - sql := "SELECT \"ip_addresses\".* FROM \"ip_addresses\" WHERE " + - strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 1, ipAddressPrimaryKeyColumns, len(*o)) - - q := queries.Raw(sql, args...) - - err := q.Bind(ctx, exec, &slice) - if err != nil { - return errors.Wrap(err, "models: unable to reload all in IPAddressSlice") - } - - *o = slice - - return nil -} - -// IPAddressExists checks if the IPAddress row exists. -func IPAddressExists(ctx context.Context, exec boil.ContextExecutor, iD int) (bool, error) { - var exists bool - sql := "select exists(select 1 from \"ip_addresses\" where \"id\"=$1 limit 1)" - - if boil.IsDebug(ctx) { - writer := boil.DebugWriterFrom(ctx) - fmt.Fprintln(writer, sql) - fmt.Fprintln(writer, iD) - } - row := exec.QueryRowContext(ctx, sql, iD) - - err := row.Scan(&exists) - if err != nil { - return false, errors.Wrap(err, "models: unable to check if ip_addresses exists") - } - - return exists, nil -} diff --git a/db/models/keys.go b/db/models/keys.go deleted file mode 100644 index a8dea10..0000000 --- a/db/models/keys.go +++ /dev/null @@ -1,1167 +0,0 @@ -// Code generated by SQLBoiler 4.13.0 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT. -// This file is meant to be re-generated in place and/or deleted at any time. - -package models - -import ( - "context" - "database/sql" - "fmt" - "reflect" - "strconv" - "strings" - "sync" - "time" - - "github.com/friendsofgo/errors" - "github.com/volatiletech/null/v8" - "github.com/volatiletech/sqlboiler/v4/boil" - "github.com/volatiletech/sqlboiler/v4/queries" - "github.com/volatiletech/sqlboiler/v4/queries/qm" - "github.com/volatiletech/sqlboiler/v4/queries/qmhelper" - "github.com/volatiletech/strmangle" -) - -// Key is an object representing the database table. -type Key struct { - ID int `boil:"id" json:"id" toml:"id" yaml:"id"` - PeerID null.Int64 `boil:"peer_id" json:"peer_id,omitempty" toml:"peer_id" yaml:"peer_id,omitempty"` - MultiHash null.String `boil:"multi_hash" json:"multi_hash,omitempty" toml:"multi_hash" yaml:"multi_hash,omitempty"` - - R *keyR `boil:"-" json:"-" toml:"-" yaml:"-"` - L keyL `boil:"-" json:"-" toml:"-" yaml:"-"` -} - -var KeyColumns = struct { - ID string - PeerID string - MultiHash string -}{ - ID: "id", - PeerID: "peer_id", - MultiHash: "multi_hash", -} - -var KeyTableColumns = struct { - ID string - PeerID string - MultiHash string -}{ - ID: "keys.id", - PeerID: "keys.peer_id", - MultiHash: "keys.multi_hash", -} - -// Generated where - -type whereHelpernull_Int64 struct{ field string } - -func (w whereHelpernull_Int64) EQ(x null.Int64) qm.QueryMod { - return qmhelper.WhereNullEQ(w.field, false, x) -} -func (w whereHelpernull_Int64) NEQ(x null.Int64) qm.QueryMod { - return qmhelper.WhereNullEQ(w.field, true, x) -} -func (w whereHelpernull_Int64) LT(x null.Int64) qm.QueryMod { - return qmhelper.Where(w.field, qmhelper.LT, x) -} -func (w whereHelpernull_Int64) LTE(x null.Int64) qm.QueryMod { - return qmhelper.Where(w.field, qmhelper.LTE, x) -} -func (w whereHelpernull_Int64) GT(x null.Int64) qm.QueryMod { - return qmhelper.Where(w.field, qmhelper.GT, x) -} -func (w whereHelpernull_Int64) GTE(x null.Int64) qm.QueryMod { - return qmhelper.Where(w.field, qmhelper.GTE, x) -} -func (w whereHelpernull_Int64) IN(slice []int64) qm.QueryMod { - values := make([]interface{}, 0, len(slice)) - for _, value := range slice { - values = append(values, value) - } - return qm.WhereIn(fmt.Sprintf("%s IN ?", w.field), values...) -} -func (w whereHelpernull_Int64) NIN(slice []int64) qm.QueryMod { - values := make([]interface{}, 0, len(slice)) - for _, value := range slice { - values = append(values, value) - } - return qm.WhereNotIn(fmt.Sprintf("%s NOT IN ?", w.field), values...) -} - -func (w whereHelpernull_Int64) IsNull() qm.QueryMod { return qmhelper.WhereIsNull(w.field) } -func (w whereHelpernull_Int64) IsNotNull() qm.QueryMod { return qmhelper.WhereIsNotNull(w.field) } - -var KeyWhere = struct { - ID whereHelperint - PeerID whereHelpernull_Int64 - MultiHash whereHelpernull_String -}{ - ID: whereHelperint{field: "\"keys\".\"id\""}, - PeerID: whereHelpernull_Int64{field: "\"keys\".\"peer_id\""}, - MultiHash: whereHelpernull_String{field: "\"keys\".\"multi_hash\""}, -} - -// KeyRels is where relationship names are stored. -var KeyRels = struct { - Peer string -}{ - Peer: "Peer", -} - -// keyR is where relationships are stored. -type keyR struct { - Peer *Peer `boil:"Peer" json:"Peer" toml:"Peer" yaml:"Peer"` -} - -// NewStruct creates a new relationship struct -func (*keyR) NewStruct() *keyR { - return &keyR{} -} - -func (r *keyR) GetPeer() *Peer { - if r == nil { - return nil - } - return r.Peer -} - -// keyL is where Load methods for each relationship are stored. -type keyL struct{} - -var ( - keyAllColumns = []string{"id", "peer_id", "multi_hash"} - keyColumnsWithoutDefault = []string{} - keyColumnsWithDefault = []string{"id", "peer_id", "multi_hash"} - keyPrimaryKeyColumns = []string{"id"} - keyGeneratedColumns = []string{"id"} -) - -type ( - // KeySlice is an alias for a slice of pointers to Key. - // This should almost always be used instead of []Key. - KeySlice []*Key - // KeyHook is the signature for custom Key hook methods - KeyHook func(context.Context, boil.ContextExecutor, *Key) error - - keyQuery struct { - *queries.Query - } -) - -// Cache for insert, update and upsert -var ( - keyType = reflect.TypeOf(&Key{}) - keyMapping = queries.MakeStructMapping(keyType) - keyPrimaryKeyMapping, _ = queries.BindMapping(keyType, keyMapping, keyPrimaryKeyColumns) - keyInsertCacheMut sync.RWMutex - keyInsertCache = make(map[string]insertCache) - keyUpdateCacheMut sync.RWMutex - keyUpdateCache = make(map[string]updateCache) - keyUpsertCacheMut sync.RWMutex - keyUpsertCache = make(map[string]insertCache) -) - -var ( - // Force time package dependency for automated UpdatedAt/CreatedAt. - _ = time.Second - // Force qmhelper dependency for where clause generation (which doesn't - // always happen) - _ = qmhelper.Where -) - -var keyAfterSelectHooks []KeyHook - -var keyBeforeInsertHooks []KeyHook -var keyAfterInsertHooks []KeyHook - -var keyBeforeUpdateHooks []KeyHook -var keyAfterUpdateHooks []KeyHook - -var keyBeforeDeleteHooks []KeyHook -var keyAfterDeleteHooks []KeyHook - -var keyBeforeUpsertHooks []KeyHook -var keyAfterUpsertHooks []KeyHook - -// doAfterSelectHooks executes all "after Select" hooks. -func (o *Key) doAfterSelectHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range keyAfterSelectHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// doBeforeInsertHooks executes all "before insert" hooks. -func (o *Key) doBeforeInsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range keyBeforeInsertHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// doAfterInsertHooks executes all "after Insert" hooks. -func (o *Key) doAfterInsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range keyAfterInsertHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// doBeforeUpdateHooks executes all "before Update" hooks. -func (o *Key) doBeforeUpdateHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range keyBeforeUpdateHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// doAfterUpdateHooks executes all "after Update" hooks. -func (o *Key) doAfterUpdateHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range keyAfterUpdateHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// doBeforeDeleteHooks executes all "before Delete" hooks. -func (o *Key) doBeforeDeleteHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range keyBeforeDeleteHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// doAfterDeleteHooks executes all "after Delete" hooks. -func (o *Key) doAfterDeleteHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range keyAfterDeleteHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// doBeforeUpsertHooks executes all "before Upsert" hooks. -func (o *Key) doBeforeUpsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range keyBeforeUpsertHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// doAfterUpsertHooks executes all "after Upsert" hooks. -func (o *Key) doAfterUpsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range keyAfterUpsertHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// AddKeyHook registers your hook function for all future operations. -func AddKeyHook(hookPoint boil.HookPoint, keyHook KeyHook) { - switch hookPoint { - case boil.AfterSelectHook: - keyAfterSelectHooks = append(keyAfterSelectHooks, keyHook) - case boil.BeforeInsertHook: - keyBeforeInsertHooks = append(keyBeforeInsertHooks, keyHook) - case boil.AfterInsertHook: - keyAfterInsertHooks = append(keyAfterInsertHooks, keyHook) - case boil.BeforeUpdateHook: - keyBeforeUpdateHooks = append(keyBeforeUpdateHooks, keyHook) - case boil.AfterUpdateHook: - keyAfterUpdateHooks = append(keyAfterUpdateHooks, keyHook) - case boil.BeforeDeleteHook: - keyBeforeDeleteHooks = append(keyBeforeDeleteHooks, keyHook) - case boil.AfterDeleteHook: - keyAfterDeleteHooks = append(keyAfterDeleteHooks, keyHook) - case boil.BeforeUpsertHook: - keyBeforeUpsertHooks = append(keyBeforeUpsertHooks, keyHook) - case boil.AfterUpsertHook: - keyAfterUpsertHooks = append(keyAfterUpsertHooks, keyHook) - } -} - -// One returns a single key record from the query. -func (q keyQuery) One(ctx context.Context, exec boil.ContextExecutor) (*Key, error) { - o := &Key{} - - queries.SetLimit(q.Query, 1) - - err := q.Bind(ctx, exec, o) - if err != nil { - if errors.Is(err, sql.ErrNoRows) { - return nil, sql.ErrNoRows - } - return nil, errors.Wrap(err, "models: failed to execute a one query for keys") - } - - if err := o.doAfterSelectHooks(ctx, exec); err != nil { - return o, err - } - - return o, nil -} - -// All returns all Key records from the query. -func (q keyQuery) All(ctx context.Context, exec boil.ContextExecutor) (KeySlice, error) { - var o []*Key - - err := q.Bind(ctx, exec, &o) - if err != nil { - return nil, errors.Wrap(err, "models: failed to assign all query results to Key slice") - } - - if len(keyAfterSelectHooks) != 0 { - for _, obj := range o { - if err := obj.doAfterSelectHooks(ctx, exec); err != nil { - return o, err - } - } - } - - return o, nil -} - -// Count returns the count of all Key records in the query. -func (q keyQuery) Count(ctx context.Context, exec boil.ContextExecutor) (int64, error) { - var count int64 - - queries.SetSelect(q.Query, nil) - queries.SetCount(q.Query) - - err := q.Query.QueryRowContext(ctx, exec).Scan(&count) - if err != nil { - return 0, errors.Wrap(err, "models: failed to count keys rows") - } - - return count, nil -} - -// Exists checks if the row exists in the table. -func (q keyQuery) Exists(ctx context.Context, exec boil.ContextExecutor) (bool, error) { - var count int64 - - queries.SetSelect(q.Query, nil) - queries.SetCount(q.Query) - queries.SetLimit(q.Query, 1) - - err := q.Query.QueryRowContext(ctx, exec).Scan(&count) - if err != nil { - return false, errors.Wrap(err, "models: failed to check if keys exists") - } - - return count > 0, nil -} - -// Peer pointed to by the foreign key. -func (o *Key) Peer(mods ...qm.QueryMod) peerQuery { - queryMods := []qm.QueryMod{ - qm.Where("\"id\" = ?", o.PeerID), - } - - queryMods = append(queryMods, mods...) - - return Peers(queryMods...) -} - -// LoadPeer allows an eager lookup of values, cached into the -// loaded structs of the objects. This is for an N-1 relationship. -func (keyL) LoadPeer(ctx context.Context, e boil.ContextExecutor, singular bool, maybeKey interface{}, mods queries.Applicator) error { - var slice []*Key - var object *Key - - if singular { - var ok bool - object, ok = maybeKey.(*Key) - if !ok { - object = new(Key) - ok = queries.SetFromEmbeddedStruct(&object, &maybeKey) - if !ok { - return errors.New(fmt.Sprintf("failed to set %T from embedded struct %T", object, maybeKey)) - } - } - } else { - s, ok := maybeKey.(*[]*Key) - if ok { - slice = *s - } else { - ok = queries.SetFromEmbeddedStruct(&slice, maybeKey) - if !ok { - return errors.New(fmt.Sprintf("failed to set %T from embedded struct %T", slice, maybeKey)) - } - } - } - - args := make([]interface{}, 0, 1) - if singular { - if object.R == nil { - object.R = &keyR{} - } - if !queries.IsNil(object.PeerID) { - args = append(args, object.PeerID) - } - - } else { - Outer: - for _, obj := range slice { - if obj.R == nil { - obj.R = &keyR{} - } - - for _, a := range args { - if queries.Equal(a, obj.PeerID) { - continue Outer - } - } - - if !queries.IsNil(obj.PeerID) { - args = append(args, obj.PeerID) - } - - } - } - - if len(args) == 0 { - return nil - } - - query := NewQuery( - qm.From(`peers`), - qm.WhereIn(`peers.id in ?`, args...), - ) - if mods != nil { - mods.Apply(query) - } - - results, err := query.QueryContext(ctx, e) - if err != nil { - return errors.Wrap(err, "failed to eager load Peer") - } - - var resultSlice []*Peer - if err = queries.Bind(results, &resultSlice); err != nil { - return errors.Wrap(err, "failed to bind eager loaded slice Peer") - } - - if err = results.Close(); err != nil { - return errors.Wrap(err, "failed to close results of eager load for peers") - } - if err = results.Err(); err != nil { - return errors.Wrap(err, "error occurred during iteration of eager loaded relations for peers") - } - - if len(keyAfterSelectHooks) != 0 { - for _, obj := range resultSlice { - if err := obj.doAfterSelectHooks(ctx, e); err != nil { - return err - } - } - } - - if len(resultSlice) == 0 { - return nil - } - - if singular { - foreign := resultSlice[0] - object.R.Peer = foreign - if foreign.R == nil { - foreign.R = &peerR{} - } - foreign.R.Keys = append(foreign.R.Keys, object) - return nil - } - - for _, local := range slice { - for _, foreign := range resultSlice { - if queries.Equal(local.PeerID, foreign.ID) { - local.R.Peer = foreign - if foreign.R == nil { - foreign.R = &peerR{} - } - foreign.R.Keys = append(foreign.R.Keys, local) - break - } - } - } - - return nil -} - -// SetPeer of the key to the related item. -// Sets o.R.Peer to related. -// Adds o to related.R.Keys. -func (o *Key) SetPeer(ctx context.Context, exec boil.ContextExecutor, insert bool, related *Peer) error { - var err error - if insert { - if err = related.Insert(ctx, exec, boil.Infer()); err != nil { - return errors.Wrap(err, "failed to insert into foreign table") - } - } - - updateQuery := fmt.Sprintf( - "UPDATE \"keys\" SET %s WHERE %s", - strmangle.SetParamNames("\"", "\"", 1, []string{"peer_id"}), - strmangle.WhereClause("\"", "\"", 2, keyPrimaryKeyColumns), - ) - values := []interface{}{related.ID, o.ID} - - if boil.IsDebug(ctx) { - writer := boil.DebugWriterFrom(ctx) - fmt.Fprintln(writer, updateQuery) - fmt.Fprintln(writer, values) - } - if _, err = exec.ExecContext(ctx, updateQuery, values...); err != nil { - return errors.Wrap(err, "failed to update local table") - } - - queries.Assign(&o.PeerID, related.ID) - if o.R == nil { - o.R = &keyR{ - Peer: related, - } - } else { - o.R.Peer = related - } - - if related.R == nil { - related.R = &peerR{ - Keys: KeySlice{o}, - } - } else { - related.R.Keys = append(related.R.Keys, o) - } - - return nil -} - -// RemovePeer relationship. -// Sets o.R.Peer to nil. -// Removes o from all passed in related items' relationships struct. -func (o *Key) RemovePeer(ctx context.Context, exec boil.ContextExecutor, related *Peer) error { - var err error - - queries.SetScanner(&o.PeerID, nil) - if _, err = o.Update(ctx, exec, boil.Whitelist("peer_id")); err != nil { - return errors.Wrap(err, "failed to update local table") - } - - if o.R != nil { - o.R.Peer = nil - } - if related == nil || related.R == nil { - return nil - } - - for i, ri := range related.R.Keys { - if queries.Equal(o.PeerID, ri.PeerID) { - continue - } - - ln := len(related.R.Keys) - if ln > 1 && i < ln-1 { - related.R.Keys[i] = related.R.Keys[ln-1] - } - related.R.Keys = related.R.Keys[:ln-1] - break - } - return nil -} - -// Keys retrieves all the records using an executor. -func Keys(mods ...qm.QueryMod) keyQuery { - mods = append(mods, qm.From("\"keys\"")) - q := NewQuery(mods...) - if len(queries.GetSelect(q)) == 0 { - queries.SetSelect(q, []string{"\"keys\".*"}) - } - - return keyQuery{q} -} - -// FindKey retrieves a single record by ID with an executor. -// If selectCols is empty Find will return all columns. -func FindKey(ctx context.Context, exec boil.ContextExecutor, iD int, selectCols ...string) (*Key, error) { - keyObj := &Key{} - - sel := "*" - if len(selectCols) > 0 { - sel = strings.Join(strmangle.IdentQuoteSlice(dialect.LQ, dialect.RQ, selectCols), ",") - } - query := fmt.Sprintf( - "select %s from \"keys\" where \"id\"=$1", sel, - ) - - q := queries.Raw(query, iD) - - err := q.Bind(ctx, exec, keyObj) - if err != nil { - if errors.Is(err, sql.ErrNoRows) { - return nil, sql.ErrNoRows - } - return nil, errors.Wrap(err, "models: unable to select from keys") - } - - if err = keyObj.doAfterSelectHooks(ctx, exec); err != nil { - return keyObj, err - } - - return keyObj, nil -} - -// Insert a single record using an executor. -// See boil.Columns.InsertColumnSet documentation to understand column list inference for inserts. -func (o *Key) Insert(ctx context.Context, exec boil.ContextExecutor, columns boil.Columns) error { - if o == nil { - return errors.New("models: no keys provided for insertion") - } - - var err error - - if err := o.doBeforeInsertHooks(ctx, exec); err != nil { - return err - } - - nzDefaults := queries.NonZeroDefaultSet(keyColumnsWithDefault, o) - - key := makeCacheKey(columns, nzDefaults) - keyInsertCacheMut.RLock() - cache, cached := keyInsertCache[key] - keyInsertCacheMut.RUnlock() - - if !cached { - wl, returnColumns := columns.InsertColumnSet( - keyAllColumns, - keyColumnsWithDefault, - keyColumnsWithoutDefault, - nzDefaults, - ) - wl = strmangle.SetComplement(wl, keyGeneratedColumns) - - cache.valueMapping, err = queries.BindMapping(keyType, keyMapping, wl) - if err != nil { - return err - } - cache.retMapping, err = queries.BindMapping(keyType, keyMapping, returnColumns) - if err != nil { - return err - } - if len(wl) != 0 { - cache.query = fmt.Sprintf("INSERT INTO \"keys\" (\"%s\") %%sVALUES (%s)%%s", strings.Join(wl, "\",\""), strmangle.Placeholders(dialect.UseIndexPlaceholders, len(wl), 1, 1)) - } else { - cache.query = "INSERT INTO \"keys\" %sDEFAULT VALUES%s" - } - - var queryOutput, queryReturning string - - if len(cache.retMapping) != 0 { - queryReturning = fmt.Sprintf(" RETURNING \"%s\"", strings.Join(returnColumns, "\",\"")) - } - - cache.query = fmt.Sprintf(cache.query, queryOutput, queryReturning) - } - - value := reflect.Indirect(reflect.ValueOf(o)) - vals := queries.ValuesFromMapping(value, cache.valueMapping) - - if boil.IsDebug(ctx) { - writer := boil.DebugWriterFrom(ctx) - fmt.Fprintln(writer, cache.query) - fmt.Fprintln(writer, vals) - } - - if len(cache.retMapping) != 0 { - err = exec.QueryRowContext(ctx, cache.query, vals...).Scan(queries.PtrsFromMapping(value, cache.retMapping)...) - } else { - _, err = exec.ExecContext(ctx, cache.query, vals...) - } - - if err != nil { - return errors.Wrap(err, "models: unable to insert into keys") - } - - if !cached { - keyInsertCacheMut.Lock() - keyInsertCache[key] = cache - keyInsertCacheMut.Unlock() - } - - return o.doAfterInsertHooks(ctx, exec) -} - -// Update uses an executor to update the Key. -// See boil.Columns.UpdateColumnSet documentation to understand column list inference for updates. -// Update does not automatically update the record in case of default values. Use .Reload() to refresh the records. -func (o *Key) Update(ctx context.Context, exec boil.ContextExecutor, columns boil.Columns) (int64, error) { - var err error - if err = o.doBeforeUpdateHooks(ctx, exec); err != nil { - return 0, err - } - key := makeCacheKey(columns, nil) - keyUpdateCacheMut.RLock() - cache, cached := keyUpdateCache[key] - keyUpdateCacheMut.RUnlock() - - if !cached { - wl := columns.UpdateColumnSet( - keyAllColumns, - keyPrimaryKeyColumns, - ) - wl = strmangle.SetComplement(wl, keyGeneratedColumns) - - if !columns.IsWhitelist() { - wl = strmangle.SetComplement(wl, []string{"created_at"}) - } - if len(wl) == 0 { - return 0, errors.New("models: unable to update keys, could not build whitelist") - } - - cache.query = fmt.Sprintf("UPDATE \"keys\" SET %s WHERE %s", - strmangle.SetParamNames("\"", "\"", 1, wl), - strmangle.WhereClause("\"", "\"", len(wl)+1, keyPrimaryKeyColumns), - ) - cache.valueMapping, err = queries.BindMapping(keyType, keyMapping, append(wl, keyPrimaryKeyColumns...)) - if err != nil { - return 0, err - } - } - - values := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), cache.valueMapping) - - if boil.IsDebug(ctx) { - writer := boil.DebugWriterFrom(ctx) - fmt.Fprintln(writer, cache.query) - fmt.Fprintln(writer, values) - } - var result sql.Result - result, err = exec.ExecContext(ctx, cache.query, values...) - if err != nil { - return 0, errors.Wrap(err, "models: unable to update keys row") - } - - rowsAff, err := result.RowsAffected() - if err != nil { - return 0, errors.Wrap(err, "models: failed to get rows affected by update for keys") - } - - if !cached { - keyUpdateCacheMut.Lock() - keyUpdateCache[key] = cache - keyUpdateCacheMut.Unlock() - } - - return rowsAff, o.doAfterUpdateHooks(ctx, exec) -} - -// UpdateAll updates all rows with the specified column values. -func (q keyQuery) UpdateAll(ctx context.Context, exec boil.ContextExecutor, cols M) (int64, error) { - queries.SetUpdate(q.Query, cols) - - result, err := q.Query.ExecContext(ctx, exec) - if err != nil { - return 0, errors.Wrap(err, "models: unable to update all for keys") - } - - rowsAff, err := result.RowsAffected() - if err != nil { - return 0, errors.Wrap(err, "models: unable to retrieve rows affected for keys") - } - - return rowsAff, nil -} - -// UpdateAll updates all rows with the specified column values, using an executor. -func (o KeySlice) UpdateAll(ctx context.Context, exec boil.ContextExecutor, cols M) (int64, error) { - ln := int64(len(o)) - if ln == 0 { - return 0, nil - } - - if len(cols) == 0 { - return 0, errors.New("models: update all requires at least one column argument") - } - - colNames := make([]string, len(cols)) - args := make([]interface{}, len(cols)) - - i := 0 - for name, value := range cols { - colNames[i] = name - args[i] = value - i++ - } - - // Append all of the primary key values for each column - for _, obj := range o { - pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), keyPrimaryKeyMapping) - args = append(args, pkeyArgs...) - } - - sql := fmt.Sprintf("UPDATE \"keys\" SET %s WHERE %s", - strmangle.SetParamNames("\"", "\"", 1, colNames), - strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), len(colNames)+1, keyPrimaryKeyColumns, len(o))) - - if boil.IsDebug(ctx) { - writer := boil.DebugWriterFrom(ctx) - fmt.Fprintln(writer, sql) - fmt.Fprintln(writer, args...) - } - result, err := exec.ExecContext(ctx, sql, args...) - if err != nil { - return 0, errors.Wrap(err, "models: unable to update all in key slice") - } - - rowsAff, err := result.RowsAffected() - if err != nil { - return 0, errors.Wrap(err, "models: unable to retrieve rows affected all in update all key") - } - return rowsAff, nil -} - -// Upsert attempts an insert using an executor, and does an update or ignore on conflict. -// See boil.Columns documentation for how to properly use updateColumns and insertColumns. -func (o *Key) Upsert(ctx context.Context, exec boil.ContextExecutor, updateOnConflict bool, conflictColumns []string, updateColumns, insertColumns boil.Columns) error { - if o == nil { - return errors.New("models: no keys provided for upsert") - } - - if err := o.doBeforeUpsertHooks(ctx, exec); err != nil { - return err - } - - nzDefaults := queries.NonZeroDefaultSet(keyColumnsWithDefault, o) - - // Build cache key in-line uglily - mysql vs psql problems - buf := strmangle.GetBuffer() - if updateOnConflict { - buf.WriteByte('t') - } else { - buf.WriteByte('f') - } - buf.WriteByte('.') - for _, c := range conflictColumns { - buf.WriteString(c) - } - buf.WriteByte('.') - buf.WriteString(strconv.Itoa(updateColumns.Kind)) - for _, c := range updateColumns.Cols { - buf.WriteString(c) - } - buf.WriteByte('.') - buf.WriteString(strconv.Itoa(insertColumns.Kind)) - for _, c := range insertColumns.Cols { - buf.WriteString(c) - } - buf.WriteByte('.') - for _, c := range nzDefaults { - buf.WriteString(c) - } - key := buf.String() - strmangle.PutBuffer(buf) - - keyUpsertCacheMut.RLock() - cache, cached := keyUpsertCache[key] - keyUpsertCacheMut.RUnlock() - - var err error - - if !cached { - insert, ret := insertColumns.InsertColumnSet( - keyAllColumns, - keyColumnsWithDefault, - keyColumnsWithoutDefault, - nzDefaults, - ) - - update := updateColumns.UpdateColumnSet( - keyAllColumns, - keyPrimaryKeyColumns, - ) - - insert = strmangle.SetComplement(insert, keyGeneratedColumns) - update = strmangle.SetComplement(update, keyGeneratedColumns) - - if updateOnConflict && len(update) == 0 { - return errors.New("models: unable to upsert keys, could not build update column list") - } - - conflict := conflictColumns - if len(conflict) == 0 { - conflict = make([]string, len(keyPrimaryKeyColumns)) - copy(conflict, keyPrimaryKeyColumns) - } - cache.query = buildUpsertQueryPostgres(dialect, "\"keys\"", updateOnConflict, ret, update, conflict, insert) - - cache.valueMapping, err = queries.BindMapping(keyType, keyMapping, insert) - if err != nil { - return err - } - if len(ret) != 0 { - cache.retMapping, err = queries.BindMapping(keyType, keyMapping, ret) - if err != nil { - return err - } - } - } - - value := reflect.Indirect(reflect.ValueOf(o)) - vals := queries.ValuesFromMapping(value, cache.valueMapping) - var returns []interface{} - if len(cache.retMapping) != 0 { - returns = queries.PtrsFromMapping(value, cache.retMapping) - } - - if boil.IsDebug(ctx) { - writer := boil.DebugWriterFrom(ctx) - fmt.Fprintln(writer, cache.query) - fmt.Fprintln(writer, vals) - } - if len(cache.retMapping) != 0 { - err = exec.QueryRowContext(ctx, cache.query, vals...).Scan(returns...) - if errors.Is(err, sql.ErrNoRows) { - err = nil // Postgres doesn't return anything when there's no update - } - } else { - _, err = exec.ExecContext(ctx, cache.query, vals...) - } - if err != nil { - return errors.Wrap(err, "models: unable to upsert keys") - } - - if !cached { - keyUpsertCacheMut.Lock() - keyUpsertCache[key] = cache - keyUpsertCacheMut.Unlock() - } - - return o.doAfterUpsertHooks(ctx, exec) -} - -// Delete deletes a single Key record with an executor. -// Delete will match against the primary key column to find the record to delete. -func (o *Key) Delete(ctx context.Context, exec boil.ContextExecutor) (int64, error) { - if o == nil { - return 0, errors.New("models: no Key provided for delete") - } - - if err := o.doBeforeDeleteHooks(ctx, exec); err != nil { - return 0, err - } - - args := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), keyPrimaryKeyMapping) - sql := "DELETE FROM \"keys\" WHERE \"id\"=$1" - - if boil.IsDebug(ctx) { - writer := boil.DebugWriterFrom(ctx) - fmt.Fprintln(writer, sql) - fmt.Fprintln(writer, args...) - } - result, err := exec.ExecContext(ctx, sql, args...) - if err != nil { - return 0, errors.Wrap(err, "models: unable to delete from keys") - } - - rowsAff, err := result.RowsAffected() - if err != nil { - return 0, errors.Wrap(err, "models: failed to get rows affected by delete for keys") - } - - if err := o.doAfterDeleteHooks(ctx, exec); err != nil { - return 0, err - } - - return rowsAff, nil -} - -// DeleteAll deletes all matching rows. -func (q keyQuery) DeleteAll(ctx context.Context, exec boil.ContextExecutor) (int64, error) { - if q.Query == nil { - return 0, errors.New("models: no keyQuery provided for delete all") - } - - queries.SetDelete(q.Query) - - result, err := q.Query.ExecContext(ctx, exec) - if err != nil { - return 0, errors.Wrap(err, "models: unable to delete all from keys") - } - - rowsAff, err := result.RowsAffected() - if err != nil { - return 0, errors.Wrap(err, "models: failed to get rows affected by deleteall for keys") - } - - return rowsAff, nil -} - -// DeleteAll deletes all rows in the slice, using an executor. -func (o KeySlice) DeleteAll(ctx context.Context, exec boil.ContextExecutor) (int64, error) { - if len(o) == 0 { - return 0, nil - } - - if len(keyBeforeDeleteHooks) != 0 { - for _, obj := range o { - if err := obj.doBeforeDeleteHooks(ctx, exec); err != nil { - return 0, err - } - } - } - - var args []interface{} - for _, obj := range o { - pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), keyPrimaryKeyMapping) - args = append(args, pkeyArgs...) - } - - sql := "DELETE FROM \"keys\" WHERE " + - strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 1, keyPrimaryKeyColumns, len(o)) - - if boil.IsDebug(ctx) { - writer := boil.DebugWriterFrom(ctx) - fmt.Fprintln(writer, sql) - fmt.Fprintln(writer, args) - } - result, err := exec.ExecContext(ctx, sql, args...) - if err != nil { - return 0, errors.Wrap(err, "models: unable to delete all from key slice") - } - - rowsAff, err := result.RowsAffected() - if err != nil { - return 0, errors.Wrap(err, "models: failed to get rows affected by deleteall for keys") - } - - if len(keyAfterDeleteHooks) != 0 { - for _, obj := range o { - if err := obj.doAfterDeleteHooks(ctx, exec); err != nil { - return 0, err - } - } - } - - return rowsAff, nil -} - -// Reload refetches the object from the database -// using the primary keys with an executor. -func (o *Key) Reload(ctx context.Context, exec boil.ContextExecutor) error { - ret, err := FindKey(ctx, exec, o.ID) - if err != nil { - return err - } - - *o = *ret - return nil -} - -// ReloadAll refetches every row with matching primary key column values -// and overwrites the original object slice with the newly updated slice. -func (o *KeySlice) ReloadAll(ctx context.Context, exec boil.ContextExecutor) error { - if o == nil || len(*o) == 0 { - return nil - } - - slice := KeySlice{} - var args []interface{} - for _, obj := range *o { - pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), keyPrimaryKeyMapping) - args = append(args, pkeyArgs...) - } - - sql := "SELECT \"keys\".* FROM \"keys\" WHERE " + - strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 1, keyPrimaryKeyColumns, len(*o)) - - q := queries.Raw(sql, args...) - - err := q.Bind(ctx, exec, &slice) - if err != nil { - return errors.Wrap(err, "models: unable to reload all in KeySlice") - } - - *o = slice - - return nil -} - -// KeyExists checks if the Key row exists. -func KeyExists(ctx context.Context, exec boil.ContextExecutor, iD int) (bool, error) { - var exists bool - sql := "select exists(select 1 from \"keys\" where \"id\"=$1 limit 1)" - - if boil.IsDebug(ctx) { - writer := boil.DebugWriterFrom(ctx) - fmt.Fprintln(writer, sql) - fmt.Fprintln(writer, iD) - } - row := exec.QueryRowContext(ctx, sql, iD) - - err := row.Scan(&exists) - if err != nil { - return false, errors.Wrap(err, "models: unable to check if keys exists") - } - - return exists, nil -} diff --git a/db/models/multi_addresses.go b/db/models/multi_addresses.go deleted file mode 100644 index 04f7c56..0000000 --- a/db/models/multi_addresses.go +++ /dev/null @@ -1,1534 +0,0 @@ -// Code generated by SQLBoiler 4.13.0 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT. -// This file is meant to be re-generated in place and/or deleted at any time. - -package models - -import ( - "context" - "database/sql" - "fmt" - "reflect" - "strconv" - "strings" - "sync" - "time" - - "github.com/friendsofgo/errors" - "github.com/volatiletech/null/v8" - "github.com/volatiletech/sqlboiler/v4/boil" - "github.com/volatiletech/sqlboiler/v4/queries" - "github.com/volatiletech/sqlboiler/v4/queries/qm" - "github.com/volatiletech/sqlboiler/v4/queries/qmhelper" - "github.com/volatiletech/strmangle" -) - -// MultiAddress is an object representing the database table. -type MultiAddress struct { // An internal unique id that identifies this multi address. - ID int `boil:"id" json:"id" toml:"id" yaml:"id"` - // The autonomous system number that this multi address belongs to. - Asn null.Int `boil:"asn" json:"asn,omitempty" toml:"asn" yaml:"asn,omitempty"` - // If NULL this multi address could not be associated with a cloud provider. If not NULL the integer corresponds to the UdgerDB datacenter ID. - IsCloud null.Int `boil:"is_cloud" json:"is_cloud,omitempty" toml:"is_cloud" yaml:"is_cloud,omitempty"` - // A boolean value that indicates whether this multi address is a relay address. - IsRelay null.Bool `boil:"is_relay" json:"is_relay,omitempty" toml:"is_relay" yaml:"is_relay,omitempty"` - // A boolean value that indicates whether this multi address is a publicly reachable one. - IsPublic null.Bool `boil:"is_public" json:"is_public,omitempty" toml:"is_public" yaml:"is_public,omitempty"` - // The derived IPv4 or IPv6 address of this multi address. - Addr null.String `boil:"addr" json:"addr,omitempty" toml:"addr" yaml:"addr,omitempty"` - // Indicates if the multi_address has multiple IP addresses. Could happen for dnsaddr multi addresses. - HasManyAddrs null.Bool `boil:"has_many_addrs" json:"has_many_addrs,omitempty" toml:"has_many_addrs" yaml:"has_many_addrs,omitempty"` - Resolved bool `boil:"resolved" json:"resolved" toml:"resolved" yaml:"resolved"` - // The country that this multi address belongs to in the form of a two letter country code. - Country null.String `boil:"country" json:"country,omitempty" toml:"country" yaml:"country,omitempty"` - // The continent that this multi address belongs to in the form of a two letter code. - Continent null.String `boil:"continent" json:"continent,omitempty" toml:"continent" yaml:"continent,omitempty"` - // The multi address in the form of `/ip4/123.456.789.123/tcp/4001`. - Maddr string `boil:"maddr" json:"maddr" toml:"maddr" yaml:"maddr"` - // Timestamp of when this multi address was updated. - UpdatedAt time.Time `boil:"updated_at" json:"updated_at" toml:"updated_at" yaml:"updated_at"` - // Timestamp of when this multi address was created. - CreatedAt time.Time `boil:"created_at" json:"created_at" toml:"created_at" yaml:"created_at"` - - R *multiAddressR `boil:"-" json:"-" toml:"-" yaml:"-"` - L multiAddressL `boil:"-" json:"-" toml:"-" yaml:"-"` -} - -var MultiAddressColumns = struct { - ID string - Asn string - IsCloud string - IsRelay string - IsPublic string - Addr string - HasManyAddrs string - Resolved string - Country string - Continent string - Maddr string - UpdatedAt string - CreatedAt string -}{ - ID: "id", - Asn: "asn", - IsCloud: "is_cloud", - IsRelay: "is_relay", - IsPublic: "is_public", - Addr: "addr", - HasManyAddrs: "has_many_addrs", - Resolved: "resolved", - Country: "country", - Continent: "continent", - Maddr: "maddr", - UpdatedAt: "updated_at", - CreatedAt: "created_at", -} - -var MultiAddressTableColumns = struct { - ID string - Asn string - IsCloud string - IsRelay string - IsPublic string - Addr string - HasManyAddrs string - Resolved string - Country string - Continent string - Maddr string - UpdatedAt string - CreatedAt string -}{ - ID: "multi_addresses.id", - Asn: "multi_addresses.asn", - IsCloud: "multi_addresses.is_cloud", - IsRelay: "multi_addresses.is_relay", - IsPublic: "multi_addresses.is_public", - Addr: "multi_addresses.addr", - HasManyAddrs: "multi_addresses.has_many_addrs", - Resolved: "multi_addresses.resolved", - Country: "multi_addresses.country", - Continent: "multi_addresses.continent", - Maddr: "multi_addresses.maddr", - UpdatedAt: "multi_addresses.updated_at", - CreatedAt: "multi_addresses.created_at", -} - -// Generated where - -type whereHelpernull_Bool struct{ field string } - -func (w whereHelpernull_Bool) EQ(x null.Bool) qm.QueryMod { - return qmhelper.WhereNullEQ(w.field, false, x) -} -func (w whereHelpernull_Bool) NEQ(x null.Bool) qm.QueryMod { - return qmhelper.WhereNullEQ(w.field, true, x) -} -func (w whereHelpernull_Bool) LT(x null.Bool) qm.QueryMod { - return qmhelper.Where(w.field, qmhelper.LT, x) -} -func (w whereHelpernull_Bool) LTE(x null.Bool) qm.QueryMod { - return qmhelper.Where(w.field, qmhelper.LTE, x) -} -func (w whereHelpernull_Bool) GT(x null.Bool) qm.QueryMod { - return qmhelper.Where(w.field, qmhelper.GT, x) -} -func (w whereHelpernull_Bool) GTE(x null.Bool) qm.QueryMod { - return qmhelper.Where(w.field, qmhelper.GTE, x) -} - -func (w whereHelpernull_Bool) IsNull() qm.QueryMod { return qmhelper.WhereIsNull(w.field) } -func (w whereHelpernull_Bool) IsNotNull() qm.QueryMod { return qmhelper.WhereIsNotNull(w.field) } - -type whereHelperbool struct{ field string } - -func (w whereHelperbool) EQ(x bool) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.EQ, x) } -func (w whereHelperbool) NEQ(x bool) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.NEQ, x) } -func (w whereHelperbool) LT(x bool) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.LT, x) } -func (w whereHelperbool) LTE(x bool) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.LTE, x) } -func (w whereHelperbool) GT(x bool) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.GT, x) } -func (w whereHelperbool) GTE(x bool) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.GTE, x) } - -var MultiAddressWhere = struct { - ID whereHelperint - Asn whereHelpernull_Int - IsCloud whereHelpernull_Int - IsRelay whereHelpernull_Bool - IsPublic whereHelpernull_Bool - Addr whereHelpernull_String - HasManyAddrs whereHelpernull_Bool - Resolved whereHelperbool - Country whereHelpernull_String - Continent whereHelpernull_String - Maddr whereHelperstring - UpdatedAt whereHelpertime_Time - CreatedAt whereHelpertime_Time -}{ - ID: whereHelperint{field: "\"multi_addresses\".\"id\""}, - Asn: whereHelpernull_Int{field: "\"multi_addresses\".\"asn\""}, - IsCloud: whereHelpernull_Int{field: "\"multi_addresses\".\"is_cloud\""}, - IsRelay: whereHelpernull_Bool{field: "\"multi_addresses\".\"is_relay\""}, - IsPublic: whereHelpernull_Bool{field: "\"multi_addresses\".\"is_public\""}, - Addr: whereHelpernull_String{field: "\"multi_addresses\".\"addr\""}, - HasManyAddrs: whereHelpernull_Bool{field: "\"multi_addresses\".\"has_many_addrs\""}, - Resolved: whereHelperbool{field: "\"multi_addresses\".\"resolved\""}, - Country: whereHelpernull_String{field: "\"multi_addresses\".\"country\""}, - Continent: whereHelpernull_String{field: "\"multi_addresses\".\"continent\""}, - Maddr: whereHelperstring{field: "\"multi_addresses\".\"maddr\""}, - UpdatedAt: whereHelpertime_Time{field: "\"multi_addresses\".\"updated_at\""}, - CreatedAt: whereHelpertime_Time{field: "\"multi_addresses\".\"created_at\""}, -} - -// MultiAddressRels is where relationship names are stored. -var MultiAddressRels = struct { - IPAddresses string - Peers string -}{ - IPAddresses: "IPAddresses", - Peers: "Peers", -} - -// multiAddressR is where relationships are stored. -type multiAddressR struct { - IPAddresses IPAddressSlice `boil:"IPAddresses" json:"IPAddresses" toml:"IPAddresses" yaml:"IPAddresses"` - Peers PeerSlice `boil:"Peers" json:"Peers" toml:"Peers" yaml:"Peers"` -} - -// NewStruct creates a new relationship struct -func (*multiAddressR) NewStruct() *multiAddressR { - return &multiAddressR{} -} - -func (r *multiAddressR) GetIPAddresses() IPAddressSlice { - if r == nil { - return nil - } - return r.IPAddresses -} - -func (r *multiAddressR) GetPeers() PeerSlice { - if r == nil { - return nil - } - return r.Peers -} - -// multiAddressL is where Load methods for each relationship are stored. -type multiAddressL struct{} - -var ( - multiAddressAllColumns = []string{"id", "asn", "is_cloud", "is_relay", "is_public", "addr", "has_many_addrs", "resolved", "country", "continent", "maddr", "updated_at", "created_at"} - multiAddressColumnsWithoutDefault = []string{"maddr", "updated_at", "created_at"} - multiAddressColumnsWithDefault = []string{"id", "asn", "is_cloud", "is_relay", "is_public", "addr", "has_many_addrs", "resolved", "country", "continent"} - multiAddressPrimaryKeyColumns = []string{"id"} - multiAddressGeneratedColumns = []string{"id"} -) - -type ( - // MultiAddressSlice is an alias for a slice of pointers to MultiAddress. - // This should almost always be used instead of []MultiAddress. - MultiAddressSlice []*MultiAddress - // MultiAddressHook is the signature for custom MultiAddress hook methods - MultiAddressHook func(context.Context, boil.ContextExecutor, *MultiAddress) error - - multiAddressQuery struct { - *queries.Query - } -) - -// Cache for insert, update and upsert -var ( - multiAddressType = reflect.TypeOf(&MultiAddress{}) - multiAddressMapping = queries.MakeStructMapping(multiAddressType) - multiAddressPrimaryKeyMapping, _ = queries.BindMapping(multiAddressType, multiAddressMapping, multiAddressPrimaryKeyColumns) - multiAddressInsertCacheMut sync.RWMutex - multiAddressInsertCache = make(map[string]insertCache) - multiAddressUpdateCacheMut sync.RWMutex - multiAddressUpdateCache = make(map[string]updateCache) - multiAddressUpsertCacheMut sync.RWMutex - multiAddressUpsertCache = make(map[string]insertCache) -) - -var ( - // Force time package dependency for automated UpdatedAt/CreatedAt. - _ = time.Second - // Force qmhelper dependency for where clause generation (which doesn't - // always happen) - _ = qmhelper.Where -) - -var multiAddressAfterSelectHooks []MultiAddressHook - -var multiAddressBeforeInsertHooks []MultiAddressHook -var multiAddressAfterInsertHooks []MultiAddressHook - -var multiAddressBeforeUpdateHooks []MultiAddressHook -var multiAddressAfterUpdateHooks []MultiAddressHook - -var multiAddressBeforeDeleteHooks []MultiAddressHook -var multiAddressAfterDeleteHooks []MultiAddressHook - -var multiAddressBeforeUpsertHooks []MultiAddressHook -var multiAddressAfterUpsertHooks []MultiAddressHook - -// doAfterSelectHooks executes all "after Select" hooks. -func (o *MultiAddress) doAfterSelectHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range multiAddressAfterSelectHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// doBeforeInsertHooks executes all "before insert" hooks. -func (o *MultiAddress) doBeforeInsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range multiAddressBeforeInsertHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// doAfterInsertHooks executes all "after Insert" hooks. -func (o *MultiAddress) doAfterInsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range multiAddressAfterInsertHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// doBeforeUpdateHooks executes all "before Update" hooks. -func (o *MultiAddress) doBeforeUpdateHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range multiAddressBeforeUpdateHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// doAfterUpdateHooks executes all "after Update" hooks. -func (o *MultiAddress) doAfterUpdateHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range multiAddressAfterUpdateHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// doBeforeDeleteHooks executes all "before Delete" hooks. -func (o *MultiAddress) doBeforeDeleteHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range multiAddressBeforeDeleteHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// doAfterDeleteHooks executes all "after Delete" hooks. -func (o *MultiAddress) doAfterDeleteHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range multiAddressAfterDeleteHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// doBeforeUpsertHooks executes all "before Upsert" hooks. -func (o *MultiAddress) doBeforeUpsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range multiAddressBeforeUpsertHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// doAfterUpsertHooks executes all "after Upsert" hooks. -func (o *MultiAddress) doAfterUpsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range multiAddressAfterUpsertHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// AddMultiAddressHook registers your hook function for all future operations. -func AddMultiAddressHook(hookPoint boil.HookPoint, multiAddressHook MultiAddressHook) { - switch hookPoint { - case boil.AfterSelectHook: - multiAddressAfterSelectHooks = append(multiAddressAfterSelectHooks, multiAddressHook) - case boil.BeforeInsertHook: - multiAddressBeforeInsertHooks = append(multiAddressBeforeInsertHooks, multiAddressHook) - case boil.AfterInsertHook: - multiAddressAfterInsertHooks = append(multiAddressAfterInsertHooks, multiAddressHook) - case boil.BeforeUpdateHook: - multiAddressBeforeUpdateHooks = append(multiAddressBeforeUpdateHooks, multiAddressHook) - case boil.AfterUpdateHook: - multiAddressAfterUpdateHooks = append(multiAddressAfterUpdateHooks, multiAddressHook) - case boil.BeforeDeleteHook: - multiAddressBeforeDeleteHooks = append(multiAddressBeforeDeleteHooks, multiAddressHook) - case boil.AfterDeleteHook: - multiAddressAfterDeleteHooks = append(multiAddressAfterDeleteHooks, multiAddressHook) - case boil.BeforeUpsertHook: - multiAddressBeforeUpsertHooks = append(multiAddressBeforeUpsertHooks, multiAddressHook) - case boil.AfterUpsertHook: - multiAddressAfterUpsertHooks = append(multiAddressAfterUpsertHooks, multiAddressHook) - } -} - -// One returns a single multiAddress record from the query. -func (q multiAddressQuery) One(ctx context.Context, exec boil.ContextExecutor) (*MultiAddress, error) { - o := &MultiAddress{} - - queries.SetLimit(q.Query, 1) - - err := q.Bind(ctx, exec, o) - if err != nil { - if errors.Is(err, sql.ErrNoRows) { - return nil, sql.ErrNoRows - } - return nil, errors.Wrap(err, "models: failed to execute a one query for multi_addresses") - } - - if err := o.doAfterSelectHooks(ctx, exec); err != nil { - return o, err - } - - return o, nil -} - -// All returns all MultiAddress records from the query. -func (q multiAddressQuery) All(ctx context.Context, exec boil.ContextExecutor) (MultiAddressSlice, error) { - var o []*MultiAddress - - err := q.Bind(ctx, exec, &o) - if err != nil { - return nil, errors.Wrap(err, "models: failed to assign all query results to MultiAddress slice") - } - - if len(multiAddressAfterSelectHooks) != 0 { - for _, obj := range o { - if err := obj.doAfterSelectHooks(ctx, exec); err != nil { - return o, err - } - } - } - - return o, nil -} - -// Count returns the count of all MultiAddress records in the query. -func (q multiAddressQuery) Count(ctx context.Context, exec boil.ContextExecutor) (int64, error) { - var count int64 - - queries.SetSelect(q.Query, nil) - queries.SetCount(q.Query) - - err := q.Query.QueryRowContext(ctx, exec).Scan(&count) - if err != nil { - return 0, errors.Wrap(err, "models: failed to count multi_addresses rows") - } - - return count, nil -} - -// Exists checks if the row exists in the table. -func (q multiAddressQuery) Exists(ctx context.Context, exec boil.ContextExecutor) (bool, error) { - var count int64 - - queries.SetSelect(q.Query, nil) - queries.SetCount(q.Query) - queries.SetLimit(q.Query, 1) - - err := q.Query.QueryRowContext(ctx, exec).Scan(&count) - if err != nil { - return false, errors.Wrap(err, "models: failed to check if multi_addresses exists") - } - - return count > 0, nil -} - -// IPAddresses retrieves all the ip_address's IPAddresses with an executor. -func (o *MultiAddress) IPAddresses(mods ...qm.QueryMod) ipAddressQuery { - var queryMods []qm.QueryMod - if len(mods) != 0 { - queryMods = append(queryMods, mods...) - } - - queryMods = append(queryMods, - qm.Where("\"ip_addresses\".\"multi_address_id\"=?", o.ID), - ) - - return IPAddresses(queryMods...) -} - -// Peers retrieves all the peer's Peers with an executor. -func (o *MultiAddress) Peers(mods ...qm.QueryMod) peerQuery { - var queryMods []qm.QueryMod - if len(mods) != 0 { - queryMods = append(queryMods, mods...) - } - - queryMods = append(queryMods, - qm.InnerJoin("\"peers_x_multi_addresses\" on \"peers\".\"id\" = \"peers_x_multi_addresses\".\"peer_id\""), - qm.Where("\"peers_x_multi_addresses\".\"multi_address_id\"=?", o.ID), - ) - - return Peers(queryMods...) -} - -// LoadIPAddresses allows an eager lookup of values, cached into the -// loaded structs of the objects. This is for a 1-M or N-M relationship. -func (multiAddressL) LoadIPAddresses(ctx context.Context, e boil.ContextExecutor, singular bool, maybeMultiAddress interface{}, mods queries.Applicator) error { - var slice []*MultiAddress - var object *MultiAddress - - if singular { - var ok bool - object, ok = maybeMultiAddress.(*MultiAddress) - if !ok { - object = new(MultiAddress) - ok = queries.SetFromEmbeddedStruct(&object, &maybeMultiAddress) - if !ok { - return errors.New(fmt.Sprintf("failed to set %T from embedded struct %T", object, maybeMultiAddress)) - } - } - } else { - s, ok := maybeMultiAddress.(*[]*MultiAddress) - if ok { - slice = *s - } else { - ok = queries.SetFromEmbeddedStruct(&slice, maybeMultiAddress) - if !ok { - return errors.New(fmt.Sprintf("failed to set %T from embedded struct %T", slice, maybeMultiAddress)) - } - } - } - - args := make([]interface{}, 0, 1) - if singular { - if object.R == nil { - object.R = &multiAddressR{} - } - args = append(args, object.ID) - } else { - Outer: - for _, obj := range slice { - if obj.R == nil { - obj.R = &multiAddressR{} - } - - for _, a := range args { - if a == obj.ID { - continue Outer - } - } - - args = append(args, obj.ID) - } - } - - if len(args) == 0 { - return nil - } - - query := NewQuery( - qm.From(`ip_addresses`), - qm.WhereIn(`ip_addresses.multi_address_id in ?`, args...), - ) - if mods != nil { - mods.Apply(query) - } - - results, err := query.QueryContext(ctx, e) - if err != nil { - return errors.Wrap(err, "failed to eager load ip_addresses") - } - - var resultSlice []*IPAddress - if err = queries.Bind(results, &resultSlice); err != nil { - return errors.Wrap(err, "failed to bind eager loaded slice ip_addresses") - } - - if err = results.Close(); err != nil { - return errors.Wrap(err, "failed to close results in eager load on ip_addresses") - } - if err = results.Err(); err != nil { - return errors.Wrap(err, "error occurred during iteration of eager loaded relations for ip_addresses") - } - - if len(ipAddressAfterSelectHooks) != 0 { - for _, obj := range resultSlice { - if err := obj.doAfterSelectHooks(ctx, e); err != nil { - return err - } - } - } - if singular { - object.R.IPAddresses = resultSlice - for _, foreign := range resultSlice { - if foreign.R == nil { - foreign.R = &ipAddressR{} - } - foreign.R.MultiAddress = object - } - return nil - } - - for _, foreign := range resultSlice { - for _, local := range slice { - if local.ID == foreign.MultiAddressID { - local.R.IPAddresses = append(local.R.IPAddresses, foreign) - if foreign.R == nil { - foreign.R = &ipAddressR{} - } - foreign.R.MultiAddress = local - break - } - } - } - - return nil -} - -// LoadPeers allows an eager lookup of values, cached into the -// loaded structs of the objects. This is for a 1-M or N-M relationship. -func (multiAddressL) LoadPeers(ctx context.Context, e boil.ContextExecutor, singular bool, maybeMultiAddress interface{}, mods queries.Applicator) error { - var slice []*MultiAddress - var object *MultiAddress - - if singular { - var ok bool - object, ok = maybeMultiAddress.(*MultiAddress) - if !ok { - object = new(MultiAddress) - ok = queries.SetFromEmbeddedStruct(&object, &maybeMultiAddress) - if !ok { - return errors.New(fmt.Sprintf("failed to set %T from embedded struct %T", object, maybeMultiAddress)) - } - } - } else { - s, ok := maybeMultiAddress.(*[]*MultiAddress) - if ok { - slice = *s - } else { - ok = queries.SetFromEmbeddedStruct(&slice, maybeMultiAddress) - if !ok { - return errors.New(fmt.Sprintf("failed to set %T from embedded struct %T", slice, maybeMultiAddress)) - } - } - } - - args := make([]interface{}, 0, 1) - if singular { - if object.R == nil { - object.R = &multiAddressR{} - } - args = append(args, object.ID) - } else { - Outer: - for _, obj := range slice { - if obj.R == nil { - obj.R = &multiAddressR{} - } - - for _, a := range args { - if a == obj.ID { - continue Outer - } - } - - args = append(args, obj.ID) - } - } - - if len(args) == 0 { - return nil - } - - query := NewQuery( - qm.Select("\"peers\".\"id\", \"peers\".\"agent_version_id\", \"peers\".\"protocols_set_id\", \"peers\".\"multi_hash\", \"peers\".\"updated_at\", \"peers\".\"created_at\", \"peers\".\"last_seen_at\", \"a\".\"multi_address_id\""), - qm.From("\"peers\""), - qm.InnerJoin("\"peers_x_multi_addresses\" as \"a\" on \"peers\".\"id\" = \"a\".\"peer_id\""), - qm.WhereIn("\"a\".\"multi_address_id\" in ?", args...), - ) - if mods != nil { - mods.Apply(query) - } - - results, err := query.QueryContext(ctx, e) - if err != nil { - return errors.Wrap(err, "failed to eager load peers") - } - - var resultSlice []*Peer - - var localJoinCols []int - for results.Next() { - one := new(Peer) - var localJoinCol int - - err = results.Scan(&one.ID, &one.AgentVersionID, &one.ProtocolsSetID, &one.MultiHash, &one.UpdatedAt, &one.CreatedAt, &one.LastSeenAt, &localJoinCol) - if err != nil { - return errors.Wrap(err, "failed to scan eager loaded results for peers") - } - if err = results.Err(); err != nil { - return errors.Wrap(err, "failed to plebian-bind eager loaded slice peers") - } - - resultSlice = append(resultSlice, one) - localJoinCols = append(localJoinCols, localJoinCol) - } - - if err = results.Close(); err != nil { - return errors.Wrap(err, "failed to close results in eager load on peers") - } - if err = results.Err(); err != nil { - return errors.Wrap(err, "error occurred during iteration of eager loaded relations for peers") - } - - if len(peerAfterSelectHooks) != 0 { - for _, obj := range resultSlice { - if err := obj.doAfterSelectHooks(ctx, e); err != nil { - return err - } - } - } - if singular { - object.R.Peers = resultSlice - for _, foreign := range resultSlice { - if foreign.R == nil { - foreign.R = &peerR{} - } - foreign.R.MultiAddresses = append(foreign.R.MultiAddresses, object) - } - return nil - } - - for i, foreign := range resultSlice { - localJoinCol := localJoinCols[i] - for _, local := range slice { - if local.ID == localJoinCol { - local.R.Peers = append(local.R.Peers, foreign) - if foreign.R == nil { - foreign.R = &peerR{} - } - foreign.R.MultiAddresses = append(foreign.R.MultiAddresses, local) - break - } - } - } - - return nil -} - -// AddIPAddresses adds the given related objects to the existing relationships -// of the multi_address, optionally inserting them as new records. -// Appends related to o.R.IPAddresses. -// Sets related.R.MultiAddress appropriately. -func (o *MultiAddress) AddIPAddresses(ctx context.Context, exec boil.ContextExecutor, insert bool, related ...*IPAddress) error { - var err error - for _, rel := range related { - if insert { - rel.MultiAddressID = o.ID - if err = rel.Insert(ctx, exec, boil.Infer()); err != nil { - return errors.Wrap(err, "failed to insert into foreign table") - } - } else { - updateQuery := fmt.Sprintf( - "UPDATE \"ip_addresses\" SET %s WHERE %s", - strmangle.SetParamNames("\"", "\"", 1, []string{"multi_address_id"}), - strmangle.WhereClause("\"", "\"", 2, ipAddressPrimaryKeyColumns), - ) - values := []interface{}{o.ID, rel.ID} - - if boil.IsDebug(ctx) { - writer := boil.DebugWriterFrom(ctx) - fmt.Fprintln(writer, updateQuery) - fmt.Fprintln(writer, values) - } - if _, err = exec.ExecContext(ctx, updateQuery, values...); err != nil { - return errors.Wrap(err, "failed to update foreign table") - } - - rel.MultiAddressID = o.ID - } - } - - if o.R == nil { - o.R = &multiAddressR{ - IPAddresses: related, - } - } else { - o.R.IPAddresses = append(o.R.IPAddresses, related...) - } - - for _, rel := range related { - if rel.R == nil { - rel.R = &ipAddressR{ - MultiAddress: o, - } - } else { - rel.R.MultiAddress = o - } - } - return nil -} - -// AddPeers adds the given related objects to the existing relationships -// of the multi_address, optionally inserting them as new records. -// Appends related to o.R.Peers. -// Sets related.R.MultiAddresses appropriately. -func (o *MultiAddress) AddPeers(ctx context.Context, exec boil.ContextExecutor, insert bool, related ...*Peer) error { - var err error - for _, rel := range related { - if insert { - if err = rel.Insert(ctx, exec, boil.Infer()); err != nil { - return errors.Wrap(err, "failed to insert into foreign table") - } - } - } - - for _, rel := range related { - query := "insert into \"peers_x_multi_addresses\" (\"multi_address_id\", \"peer_id\") values ($1, $2)" - values := []interface{}{o.ID, rel.ID} - - if boil.IsDebug(ctx) { - writer := boil.DebugWriterFrom(ctx) - fmt.Fprintln(writer, query) - fmt.Fprintln(writer, values) - } - _, err = exec.ExecContext(ctx, query, values...) - if err != nil { - return errors.Wrap(err, "failed to insert into join table") - } - } - if o.R == nil { - o.R = &multiAddressR{ - Peers: related, - } - } else { - o.R.Peers = append(o.R.Peers, related...) - } - - for _, rel := range related { - if rel.R == nil { - rel.R = &peerR{ - MultiAddresses: MultiAddressSlice{o}, - } - } else { - rel.R.MultiAddresses = append(rel.R.MultiAddresses, o) - } - } - return nil -} - -// SetPeers removes all previously related items of the -// multi_address replacing them completely with the passed -// in related items, optionally inserting them as new records. -// Sets o.R.MultiAddresses's Peers accordingly. -// Replaces o.R.Peers with related. -// Sets related.R.MultiAddresses's Peers accordingly. -func (o *MultiAddress) SetPeers(ctx context.Context, exec boil.ContextExecutor, insert bool, related ...*Peer) error { - query := "delete from \"peers_x_multi_addresses\" where \"multi_address_id\" = $1" - values := []interface{}{o.ID} - if boil.IsDebug(ctx) { - writer := boil.DebugWriterFrom(ctx) - fmt.Fprintln(writer, query) - fmt.Fprintln(writer, values) - } - _, err := exec.ExecContext(ctx, query, values...) - if err != nil { - return errors.Wrap(err, "failed to remove relationships before set") - } - - removePeersFromMultiAddressesSlice(o, related) - if o.R != nil { - o.R.Peers = nil - } - - return o.AddPeers(ctx, exec, insert, related...) -} - -// RemovePeers relationships from objects passed in. -// Removes related items from R.Peers (uses pointer comparison, removal does not keep order) -// Sets related.R.MultiAddresses. -func (o *MultiAddress) RemovePeers(ctx context.Context, exec boil.ContextExecutor, related ...*Peer) error { - if len(related) == 0 { - return nil - } - - var err error - query := fmt.Sprintf( - "delete from \"peers_x_multi_addresses\" where \"multi_address_id\" = $1 and \"peer_id\" in (%s)", - strmangle.Placeholders(dialect.UseIndexPlaceholders, len(related), 2, 1), - ) - values := []interface{}{o.ID} - for _, rel := range related { - values = append(values, rel.ID) - } - - if boil.IsDebug(ctx) { - writer := boil.DebugWriterFrom(ctx) - fmt.Fprintln(writer, query) - fmt.Fprintln(writer, values) - } - _, err = exec.ExecContext(ctx, query, values...) - if err != nil { - return errors.Wrap(err, "failed to remove relationships before set") - } - removePeersFromMultiAddressesSlice(o, related) - if o.R == nil { - return nil - } - - for _, rel := range related { - for i, ri := range o.R.Peers { - if rel != ri { - continue - } - - ln := len(o.R.Peers) - if ln > 1 && i < ln-1 { - o.R.Peers[i] = o.R.Peers[ln-1] - } - o.R.Peers = o.R.Peers[:ln-1] - break - } - } - - return nil -} - -func removePeersFromMultiAddressesSlice(o *MultiAddress, related []*Peer) { - for _, rel := range related { - if rel.R == nil { - continue - } - for i, ri := range rel.R.MultiAddresses { - if o.ID != ri.ID { - continue - } - - ln := len(rel.R.MultiAddresses) - if ln > 1 && i < ln-1 { - rel.R.MultiAddresses[i] = rel.R.MultiAddresses[ln-1] - } - rel.R.MultiAddresses = rel.R.MultiAddresses[:ln-1] - break - } - } -} - -// MultiAddresses retrieves all the records using an executor. -func MultiAddresses(mods ...qm.QueryMod) multiAddressQuery { - mods = append(mods, qm.From("\"multi_addresses\"")) - q := NewQuery(mods...) - if len(queries.GetSelect(q)) == 0 { - queries.SetSelect(q, []string{"\"multi_addresses\".*"}) - } - - return multiAddressQuery{q} -} - -// FindMultiAddress retrieves a single record by ID with an executor. -// If selectCols is empty Find will return all columns. -func FindMultiAddress(ctx context.Context, exec boil.ContextExecutor, iD int, selectCols ...string) (*MultiAddress, error) { - multiAddressObj := &MultiAddress{} - - sel := "*" - if len(selectCols) > 0 { - sel = strings.Join(strmangle.IdentQuoteSlice(dialect.LQ, dialect.RQ, selectCols), ",") - } - query := fmt.Sprintf( - "select %s from \"multi_addresses\" where \"id\"=$1", sel, - ) - - q := queries.Raw(query, iD) - - err := q.Bind(ctx, exec, multiAddressObj) - if err != nil { - if errors.Is(err, sql.ErrNoRows) { - return nil, sql.ErrNoRows - } - return nil, errors.Wrap(err, "models: unable to select from multi_addresses") - } - - if err = multiAddressObj.doAfterSelectHooks(ctx, exec); err != nil { - return multiAddressObj, err - } - - return multiAddressObj, nil -} - -// Insert a single record using an executor. -// See boil.Columns.InsertColumnSet documentation to understand column list inference for inserts. -func (o *MultiAddress) Insert(ctx context.Context, exec boil.ContextExecutor, columns boil.Columns) error { - if o == nil { - return errors.New("models: no multi_addresses provided for insertion") - } - - var err error - if !boil.TimestampsAreSkipped(ctx) { - currTime := time.Now().In(boil.GetLocation()) - - if o.UpdatedAt.IsZero() { - o.UpdatedAt = currTime - } - if o.CreatedAt.IsZero() { - o.CreatedAt = currTime - } - } - - if err := o.doBeforeInsertHooks(ctx, exec); err != nil { - return err - } - - nzDefaults := queries.NonZeroDefaultSet(multiAddressColumnsWithDefault, o) - - key := makeCacheKey(columns, nzDefaults) - multiAddressInsertCacheMut.RLock() - cache, cached := multiAddressInsertCache[key] - multiAddressInsertCacheMut.RUnlock() - - if !cached { - wl, returnColumns := columns.InsertColumnSet( - multiAddressAllColumns, - multiAddressColumnsWithDefault, - multiAddressColumnsWithoutDefault, - nzDefaults, - ) - wl = strmangle.SetComplement(wl, multiAddressGeneratedColumns) - - cache.valueMapping, err = queries.BindMapping(multiAddressType, multiAddressMapping, wl) - if err != nil { - return err - } - cache.retMapping, err = queries.BindMapping(multiAddressType, multiAddressMapping, returnColumns) - if err != nil { - return err - } - if len(wl) != 0 { - cache.query = fmt.Sprintf("INSERT INTO \"multi_addresses\" (\"%s\") %%sVALUES (%s)%%s", strings.Join(wl, "\",\""), strmangle.Placeholders(dialect.UseIndexPlaceholders, len(wl), 1, 1)) - } else { - cache.query = "INSERT INTO \"multi_addresses\" %sDEFAULT VALUES%s" - } - - var queryOutput, queryReturning string - - if len(cache.retMapping) != 0 { - queryReturning = fmt.Sprintf(" RETURNING \"%s\"", strings.Join(returnColumns, "\",\"")) - } - - cache.query = fmt.Sprintf(cache.query, queryOutput, queryReturning) - } - - value := reflect.Indirect(reflect.ValueOf(o)) - vals := queries.ValuesFromMapping(value, cache.valueMapping) - - if boil.IsDebug(ctx) { - writer := boil.DebugWriterFrom(ctx) - fmt.Fprintln(writer, cache.query) - fmt.Fprintln(writer, vals) - } - - if len(cache.retMapping) != 0 { - err = exec.QueryRowContext(ctx, cache.query, vals...).Scan(queries.PtrsFromMapping(value, cache.retMapping)...) - } else { - _, err = exec.ExecContext(ctx, cache.query, vals...) - } - - if err != nil { - return errors.Wrap(err, "models: unable to insert into multi_addresses") - } - - if !cached { - multiAddressInsertCacheMut.Lock() - multiAddressInsertCache[key] = cache - multiAddressInsertCacheMut.Unlock() - } - - return o.doAfterInsertHooks(ctx, exec) -} - -// Update uses an executor to update the MultiAddress. -// See boil.Columns.UpdateColumnSet documentation to understand column list inference for updates. -// Update does not automatically update the record in case of default values. Use .Reload() to refresh the records. -func (o *MultiAddress) Update(ctx context.Context, exec boil.ContextExecutor, columns boil.Columns) (int64, error) { - if !boil.TimestampsAreSkipped(ctx) { - currTime := time.Now().In(boil.GetLocation()) - - o.UpdatedAt = currTime - } - - var err error - if err = o.doBeforeUpdateHooks(ctx, exec); err != nil { - return 0, err - } - key := makeCacheKey(columns, nil) - multiAddressUpdateCacheMut.RLock() - cache, cached := multiAddressUpdateCache[key] - multiAddressUpdateCacheMut.RUnlock() - - if !cached { - wl := columns.UpdateColumnSet( - multiAddressAllColumns, - multiAddressPrimaryKeyColumns, - ) - wl = strmangle.SetComplement(wl, multiAddressGeneratedColumns) - - if !columns.IsWhitelist() { - wl = strmangle.SetComplement(wl, []string{"created_at"}) - } - if len(wl) == 0 { - return 0, errors.New("models: unable to update multi_addresses, could not build whitelist") - } - - cache.query = fmt.Sprintf("UPDATE \"multi_addresses\" SET %s WHERE %s", - strmangle.SetParamNames("\"", "\"", 1, wl), - strmangle.WhereClause("\"", "\"", len(wl)+1, multiAddressPrimaryKeyColumns), - ) - cache.valueMapping, err = queries.BindMapping(multiAddressType, multiAddressMapping, append(wl, multiAddressPrimaryKeyColumns...)) - if err != nil { - return 0, err - } - } - - values := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), cache.valueMapping) - - if boil.IsDebug(ctx) { - writer := boil.DebugWriterFrom(ctx) - fmt.Fprintln(writer, cache.query) - fmt.Fprintln(writer, values) - } - var result sql.Result - result, err = exec.ExecContext(ctx, cache.query, values...) - if err != nil { - return 0, errors.Wrap(err, "models: unable to update multi_addresses row") - } - - rowsAff, err := result.RowsAffected() - if err != nil { - return 0, errors.Wrap(err, "models: failed to get rows affected by update for multi_addresses") - } - - if !cached { - multiAddressUpdateCacheMut.Lock() - multiAddressUpdateCache[key] = cache - multiAddressUpdateCacheMut.Unlock() - } - - return rowsAff, o.doAfterUpdateHooks(ctx, exec) -} - -// UpdateAll updates all rows with the specified column values. -func (q multiAddressQuery) UpdateAll(ctx context.Context, exec boil.ContextExecutor, cols M) (int64, error) { - queries.SetUpdate(q.Query, cols) - - result, err := q.Query.ExecContext(ctx, exec) - if err != nil { - return 0, errors.Wrap(err, "models: unable to update all for multi_addresses") - } - - rowsAff, err := result.RowsAffected() - if err != nil { - return 0, errors.Wrap(err, "models: unable to retrieve rows affected for multi_addresses") - } - - return rowsAff, nil -} - -// UpdateAll updates all rows with the specified column values, using an executor. -func (o MultiAddressSlice) UpdateAll(ctx context.Context, exec boil.ContextExecutor, cols M) (int64, error) { - ln := int64(len(o)) - if ln == 0 { - return 0, nil - } - - if len(cols) == 0 { - return 0, errors.New("models: update all requires at least one column argument") - } - - colNames := make([]string, len(cols)) - args := make([]interface{}, len(cols)) - - i := 0 - for name, value := range cols { - colNames[i] = name - args[i] = value - i++ - } - - // Append all of the primary key values for each column - for _, obj := range o { - pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), multiAddressPrimaryKeyMapping) - args = append(args, pkeyArgs...) - } - - sql := fmt.Sprintf("UPDATE \"multi_addresses\" SET %s WHERE %s", - strmangle.SetParamNames("\"", "\"", 1, colNames), - strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), len(colNames)+1, multiAddressPrimaryKeyColumns, len(o))) - - if boil.IsDebug(ctx) { - writer := boil.DebugWriterFrom(ctx) - fmt.Fprintln(writer, sql) - fmt.Fprintln(writer, args...) - } - result, err := exec.ExecContext(ctx, sql, args...) - if err != nil { - return 0, errors.Wrap(err, "models: unable to update all in multiAddress slice") - } - - rowsAff, err := result.RowsAffected() - if err != nil { - return 0, errors.Wrap(err, "models: unable to retrieve rows affected all in update all multiAddress") - } - return rowsAff, nil -} - -// Upsert attempts an insert using an executor, and does an update or ignore on conflict. -// See boil.Columns documentation for how to properly use updateColumns and insertColumns. -func (o *MultiAddress) Upsert(ctx context.Context, exec boil.ContextExecutor, updateOnConflict bool, conflictColumns []string, updateColumns, insertColumns boil.Columns) error { - if o == nil { - return errors.New("models: no multi_addresses provided for upsert") - } - if !boil.TimestampsAreSkipped(ctx) { - currTime := time.Now().In(boil.GetLocation()) - - o.UpdatedAt = currTime - if o.CreatedAt.IsZero() { - o.CreatedAt = currTime - } - } - - if err := o.doBeforeUpsertHooks(ctx, exec); err != nil { - return err - } - - nzDefaults := queries.NonZeroDefaultSet(multiAddressColumnsWithDefault, o) - - // Build cache key in-line uglily - mysql vs psql problems - buf := strmangle.GetBuffer() - if updateOnConflict { - buf.WriteByte('t') - } else { - buf.WriteByte('f') - } - buf.WriteByte('.') - for _, c := range conflictColumns { - buf.WriteString(c) - } - buf.WriteByte('.') - buf.WriteString(strconv.Itoa(updateColumns.Kind)) - for _, c := range updateColumns.Cols { - buf.WriteString(c) - } - buf.WriteByte('.') - buf.WriteString(strconv.Itoa(insertColumns.Kind)) - for _, c := range insertColumns.Cols { - buf.WriteString(c) - } - buf.WriteByte('.') - for _, c := range nzDefaults { - buf.WriteString(c) - } - key := buf.String() - strmangle.PutBuffer(buf) - - multiAddressUpsertCacheMut.RLock() - cache, cached := multiAddressUpsertCache[key] - multiAddressUpsertCacheMut.RUnlock() - - var err error - - if !cached { - insert, ret := insertColumns.InsertColumnSet( - multiAddressAllColumns, - multiAddressColumnsWithDefault, - multiAddressColumnsWithoutDefault, - nzDefaults, - ) - - update := updateColumns.UpdateColumnSet( - multiAddressAllColumns, - multiAddressPrimaryKeyColumns, - ) - - insert = strmangle.SetComplement(insert, multiAddressGeneratedColumns) - update = strmangle.SetComplement(update, multiAddressGeneratedColumns) - - if updateOnConflict && len(update) == 0 { - return errors.New("models: unable to upsert multi_addresses, could not build update column list") - } - - conflict := conflictColumns - if len(conflict) == 0 { - conflict = make([]string, len(multiAddressPrimaryKeyColumns)) - copy(conflict, multiAddressPrimaryKeyColumns) - } - cache.query = buildUpsertQueryPostgres(dialect, "\"multi_addresses\"", updateOnConflict, ret, update, conflict, insert) - - cache.valueMapping, err = queries.BindMapping(multiAddressType, multiAddressMapping, insert) - if err != nil { - return err - } - if len(ret) != 0 { - cache.retMapping, err = queries.BindMapping(multiAddressType, multiAddressMapping, ret) - if err != nil { - return err - } - } - } - - value := reflect.Indirect(reflect.ValueOf(o)) - vals := queries.ValuesFromMapping(value, cache.valueMapping) - var returns []interface{} - if len(cache.retMapping) != 0 { - returns = queries.PtrsFromMapping(value, cache.retMapping) - } - - if boil.IsDebug(ctx) { - writer := boil.DebugWriterFrom(ctx) - fmt.Fprintln(writer, cache.query) - fmt.Fprintln(writer, vals) - } - if len(cache.retMapping) != 0 { - err = exec.QueryRowContext(ctx, cache.query, vals...).Scan(returns...) - if errors.Is(err, sql.ErrNoRows) { - err = nil // Postgres doesn't return anything when there's no update - } - } else { - _, err = exec.ExecContext(ctx, cache.query, vals...) - } - if err != nil { - return errors.Wrap(err, "models: unable to upsert multi_addresses") - } - - if !cached { - multiAddressUpsertCacheMut.Lock() - multiAddressUpsertCache[key] = cache - multiAddressUpsertCacheMut.Unlock() - } - - return o.doAfterUpsertHooks(ctx, exec) -} - -// Delete deletes a single MultiAddress record with an executor. -// Delete will match against the primary key column to find the record to delete. -func (o *MultiAddress) Delete(ctx context.Context, exec boil.ContextExecutor) (int64, error) { - if o == nil { - return 0, errors.New("models: no MultiAddress provided for delete") - } - - if err := o.doBeforeDeleteHooks(ctx, exec); err != nil { - return 0, err - } - - args := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), multiAddressPrimaryKeyMapping) - sql := "DELETE FROM \"multi_addresses\" WHERE \"id\"=$1" - - if boil.IsDebug(ctx) { - writer := boil.DebugWriterFrom(ctx) - fmt.Fprintln(writer, sql) - fmt.Fprintln(writer, args...) - } - result, err := exec.ExecContext(ctx, sql, args...) - if err != nil { - return 0, errors.Wrap(err, "models: unable to delete from multi_addresses") - } - - rowsAff, err := result.RowsAffected() - if err != nil { - return 0, errors.Wrap(err, "models: failed to get rows affected by delete for multi_addresses") - } - - if err := o.doAfterDeleteHooks(ctx, exec); err != nil { - return 0, err - } - - return rowsAff, nil -} - -// DeleteAll deletes all matching rows. -func (q multiAddressQuery) DeleteAll(ctx context.Context, exec boil.ContextExecutor) (int64, error) { - if q.Query == nil { - return 0, errors.New("models: no multiAddressQuery provided for delete all") - } - - queries.SetDelete(q.Query) - - result, err := q.Query.ExecContext(ctx, exec) - if err != nil { - return 0, errors.Wrap(err, "models: unable to delete all from multi_addresses") - } - - rowsAff, err := result.RowsAffected() - if err != nil { - return 0, errors.Wrap(err, "models: failed to get rows affected by deleteall for multi_addresses") - } - - return rowsAff, nil -} - -// DeleteAll deletes all rows in the slice, using an executor. -func (o MultiAddressSlice) DeleteAll(ctx context.Context, exec boil.ContextExecutor) (int64, error) { - if len(o) == 0 { - return 0, nil - } - - if len(multiAddressBeforeDeleteHooks) != 0 { - for _, obj := range o { - if err := obj.doBeforeDeleteHooks(ctx, exec); err != nil { - return 0, err - } - } - } - - var args []interface{} - for _, obj := range o { - pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), multiAddressPrimaryKeyMapping) - args = append(args, pkeyArgs...) - } - - sql := "DELETE FROM \"multi_addresses\" WHERE " + - strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 1, multiAddressPrimaryKeyColumns, len(o)) - - if boil.IsDebug(ctx) { - writer := boil.DebugWriterFrom(ctx) - fmt.Fprintln(writer, sql) - fmt.Fprintln(writer, args) - } - result, err := exec.ExecContext(ctx, sql, args...) - if err != nil { - return 0, errors.Wrap(err, "models: unable to delete all from multiAddress slice") - } - - rowsAff, err := result.RowsAffected() - if err != nil { - return 0, errors.Wrap(err, "models: failed to get rows affected by deleteall for multi_addresses") - } - - if len(multiAddressAfterDeleteHooks) != 0 { - for _, obj := range o { - if err := obj.doAfterDeleteHooks(ctx, exec); err != nil { - return 0, err - } - } - } - - return rowsAff, nil -} - -// Reload refetches the object from the database -// using the primary keys with an executor. -func (o *MultiAddress) Reload(ctx context.Context, exec boil.ContextExecutor) error { - ret, err := FindMultiAddress(ctx, exec, o.ID) - if err != nil { - return err - } - - *o = *ret - return nil -} - -// ReloadAll refetches every row with matching primary key column values -// and overwrites the original object slice with the newly updated slice. -func (o *MultiAddressSlice) ReloadAll(ctx context.Context, exec boil.ContextExecutor) error { - if o == nil || len(*o) == 0 { - return nil - } - - slice := MultiAddressSlice{} - var args []interface{} - for _, obj := range *o { - pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), multiAddressPrimaryKeyMapping) - args = append(args, pkeyArgs...) - } - - sql := "SELECT \"multi_addresses\".* FROM \"multi_addresses\" WHERE " + - strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 1, multiAddressPrimaryKeyColumns, len(*o)) - - q := queries.Raw(sql, args...) - - err := q.Bind(ctx, exec, &slice) - if err != nil { - return errors.Wrap(err, "models: unable to reload all in MultiAddressSlice") - } - - *o = slice - - return nil -} - -// MultiAddressExists checks if the MultiAddress row exists. -func MultiAddressExists(ctx context.Context, exec boil.ContextExecutor, iD int) (bool, error) { - var exists bool - sql := "select exists(select 1 from \"multi_addresses\" where \"id\"=$1 limit 1)" - - if boil.IsDebug(ctx) { - writer := boil.DebugWriterFrom(ctx) - fmt.Fprintln(writer, sql) - fmt.Fprintln(writer, iD) - } - row := exec.QueryRowContext(ctx, sql, iD) - - err := row.Scan(&exists) - if err != nil { - return false, errors.Wrap(err, "models: unable to check if multi_addresses exists") - } - - return exists, nil -} diff --git a/db/models/peer_logs.go b/db/models/peer_logs.go deleted file mode 100644 index 85e7cf7..0000000 --- a/db/models/peer_logs.go +++ /dev/null @@ -1,960 +0,0 @@ -// Code generated by SQLBoiler 4.13.0 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT. -// This file is meant to be re-generated in place and/or deleted at any time. - -package models - -import ( - "context" - "database/sql" - "fmt" - "reflect" - "strconv" - "strings" - "sync" - "time" - - "github.com/friendsofgo/errors" - "github.com/volatiletech/sqlboiler/v4/boil" - "github.com/volatiletech/sqlboiler/v4/queries" - "github.com/volatiletech/sqlboiler/v4/queries/qm" - "github.com/volatiletech/sqlboiler/v4/queries/qmhelper" - "github.com/volatiletech/strmangle" -) - -// PeerLog is an object representing the database table. -type PeerLog struct { - ID int `boil:"id" json:"id" toml:"id" yaml:"id"` - PeerID int64 `boil:"peer_id" json:"peer_id" toml:"peer_id" yaml:"peer_id"` - Field string `boil:"field" json:"field" toml:"field" yaml:"field"` - Old string `boil:"old" json:"old" toml:"old" yaml:"old"` - New string `boil:"new" json:"new" toml:"new" yaml:"new"` - CreatedAt time.Time `boil:"created_at" json:"created_at" toml:"created_at" yaml:"created_at"` - - R *peerLogR `boil:"-" json:"-" toml:"-" yaml:"-"` - L peerLogL `boil:"-" json:"-" toml:"-" yaml:"-"` -} - -var PeerLogColumns = struct { - ID string - PeerID string - Field string - Old string - New string - CreatedAt string -}{ - ID: "id", - PeerID: "peer_id", - Field: "field", - Old: "old", - New: "new", - CreatedAt: "created_at", -} - -var PeerLogTableColumns = struct { - ID string - PeerID string - Field string - Old string - New string - CreatedAt string -}{ - ID: "peer_logs.id", - PeerID: "peer_logs.peer_id", - Field: "peer_logs.field", - Old: "peer_logs.old", - New: "peer_logs.new", - CreatedAt: "peer_logs.created_at", -} - -// Generated where - -type whereHelperint64 struct{ field string } - -func (w whereHelperint64) EQ(x int64) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.EQ, x) } -func (w whereHelperint64) NEQ(x int64) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.NEQ, x) } -func (w whereHelperint64) LT(x int64) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.LT, x) } -func (w whereHelperint64) LTE(x int64) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.LTE, x) } -func (w whereHelperint64) GT(x int64) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.GT, x) } -func (w whereHelperint64) GTE(x int64) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.GTE, x) } -func (w whereHelperint64) IN(slice []int64) qm.QueryMod { - values := make([]interface{}, 0, len(slice)) - for _, value := range slice { - values = append(values, value) - } - return qm.WhereIn(fmt.Sprintf("%s IN ?", w.field), values...) -} -func (w whereHelperint64) NIN(slice []int64) qm.QueryMod { - values := make([]interface{}, 0, len(slice)) - for _, value := range slice { - values = append(values, value) - } - return qm.WhereNotIn(fmt.Sprintf("%s NOT IN ?", w.field), values...) -} - -var PeerLogWhere = struct { - ID whereHelperint - PeerID whereHelperint64 - Field whereHelperstring - Old whereHelperstring - New whereHelperstring - CreatedAt whereHelpertime_Time -}{ - ID: whereHelperint{field: "\"peer_logs\".\"id\""}, - PeerID: whereHelperint64{field: "\"peer_logs\".\"peer_id\""}, - Field: whereHelperstring{field: "\"peer_logs\".\"field\""}, - Old: whereHelperstring{field: "\"peer_logs\".\"old\""}, - New: whereHelperstring{field: "\"peer_logs\".\"new\""}, - CreatedAt: whereHelpertime_Time{field: "\"peer_logs\".\"created_at\""}, -} - -// PeerLogRels is where relationship names are stored. -var PeerLogRels = struct { -}{} - -// peerLogR is where relationships are stored. -type peerLogR struct { -} - -// NewStruct creates a new relationship struct -func (*peerLogR) NewStruct() *peerLogR { - return &peerLogR{} -} - -// peerLogL is where Load methods for each relationship are stored. -type peerLogL struct{} - -var ( - peerLogAllColumns = []string{"id", "peer_id", "field", "old", "new", "created_at"} - peerLogColumnsWithoutDefault = []string{"peer_id", "field", "old", "new", "created_at"} - peerLogColumnsWithDefault = []string{"id"} - peerLogPrimaryKeyColumns = []string{"id", "created_at"} - peerLogGeneratedColumns = []string{"id"} -) - -type ( - // PeerLogSlice is an alias for a slice of pointers to PeerLog. - // This should almost always be used instead of []PeerLog. - PeerLogSlice []*PeerLog - // PeerLogHook is the signature for custom PeerLog hook methods - PeerLogHook func(context.Context, boil.ContextExecutor, *PeerLog) error - - peerLogQuery struct { - *queries.Query - } -) - -// Cache for insert, update and upsert -var ( - peerLogType = reflect.TypeOf(&PeerLog{}) - peerLogMapping = queries.MakeStructMapping(peerLogType) - peerLogPrimaryKeyMapping, _ = queries.BindMapping(peerLogType, peerLogMapping, peerLogPrimaryKeyColumns) - peerLogInsertCacheMut sync.RWMutex - peerLogInsertCache = make(map[string]insertCache) - peerLogUpdateCacheMut sync.RWMutex - peerLogUpdateCache = make(map[string]updateCache) - peerLogUpsertCacheMut sync.RWMutex - peerLogUpsertCache = make(map[string]insertCache) -) - -var ( - // Force time package dependency for automated UpdatedAt/CreatedAt. - _ = time.Second - // Force qmhelper dependency for where clause generation (which doesn't - // always happen) - _ = qmhelper.Where -) - -var peerLogAfterSelectHooks []PeerLogHook - -var peerLogBeforeInsertHooks []PeerLogHook -var peerLogAfterInsertHooks []PeerLogHook - -var peerLogBeforeUpdateHooks []PeerLogHook -var peerLogAfterUpdateHooks []PeerLogHook - -var peerLogBeforeDeleteHooks []PeerLogHook -var peerLogAfterDeleteHooks []PeerLogHook - -var peerLogBeforeUpsertHooks []PeerLogHook -var peerLogAfterUpsertHooks []PeerLogHook - -// doAfterSelectHooks executes all "after Select" hooks. -func (o *PeerLog) doAfterSelectHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range peerLogAfterSelectHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// doBeforeInsertHooks executes all "before insert" hooks. -func (o *PeerLog) doBeforeInsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range peerLogBeforeInsertHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// doAfterInsertHooks executes all "after Insert" hooks. -func (o *PeerLog) doAfterInsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range peerLogAfterInsertHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// doBeforeUpdateHooks executes all "before Update" hooks. -func (o *PeerLog) doBeforeUpdateHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range peerLogBeforeUpdateHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// doAfterUpdateHooks executes all "after Update" hooks. -func (o *PeerLog) doAfterUpdateHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range peerLogAfterUpdateHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// doBeforeDeleteHooks executes all "before Delete" hooks. -func (o *PeerLog) doBeforeDeleteHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range peerLogBeforeDeleteHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// doAfterDeleteHooks executes all "after Delete" hooks. -func (o *PeerLog) doAfterDeleteHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range peerLogAfterDeleteHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// doBeforeUpsertHooks executes all "before Upsert" hooks. -func (o *PeerLog) doBeforeUpsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range peerLogBeforeUpsertHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// doAfterUpsertHooks executes all "after Upsert" hooks. -func (o *PeerLog) doAfterUpsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range peerLogAfterUpsertHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// AddPeerLogHook registers your hook function for all future operations. -func AddPeerLogHook(hookPoint boil.HookPoint, peerLogHook PeerLogHook) { - switch hookPoint { - case boil.AfterSelectHook: - peerLogAfterSelectHooks = append(peerLogAfterSelectHooks, peerLogHook) - case boil.BeforeInsertHook: - peerLogBeforeInsertHooks = append(peerLogBeforeInsertHooks, peerLogHook) - case boil.AfterInsertHook: - peerLogAfterInsertHooks = append(peerLogAfterInsertHooks, peerLogHook) - case boil.BeforeUpdateHook: - peerLogBeforeUpdateHooks = append(peerLogBeforeUpdateHooks, peerLogHook) - case boil.AfterUpdateHook: - peerLogAfterUpdateHooks = append(peerLogAfterUpdateHooks, peerLogHook) - case boil.BeforeDeleteHook: - peerLogBeforeDeleteHooks = append(peerLogBeforeDeleteHooks, peerLogHook) - case boil.AfterDeleteHook: - peerLogAfterDeleteHooks = append(peerLogAfterDeleteHooks, peerLogHook) - case boil.BeforeUpsertHook: - peerLogBeforeUpsertHooks = append(peerLogBeforeUpsertHooks, peerLogHook) - case boil.AfterUpsertHook: - peerLogAfterUpsertHooks = append(peerLogAfterUpsertHooks, peerLogHook) - } -} - -// One returns a single peerLog record from the query. -func (q peerLogQuery) One(ctx context.Context, exec boil.ContextExecutor) (*PeerLog, error) { - o := &PeerLog{} - - queries.SetLimit(q.Query, 1) - - err := q.Bind(ctx, exec, o) - if err != nil { - if errors.Is(err, sql.ErrNoRows) { - return nil, sql.ErrNoRows - } - return nil, errors.Wrap(err, "models: failed to execute a one query for peer_logs") - } - - if err := o.doAfterSelectHooks(ctx, exec); err != nil { - return o, err - } - - return o, nil -} - -// All returns all PeerLog records from the query. -func (q peerLogQuery) All(ctx context.Context, exec boil.ContextExecutor) (PeerLogSlice, error) { - var o []*PeerLog - - err := q.Bind(ctx, exec, &o) - if err != nil { - return nil, errors.Wrap(err, "models: failed to assign all query results to PeerLog slice") - } - - if len(peerLogAfterSelectHooks) != 0 { - for _, obj := range o { - if err := obj.doAfterSelectHooks(ctx, exec); err != nil { - return o, err - } - } - } - - return o, nil -} - -// Count returns the count of all PeerLog records in the query. -func (q peerLogQuery) Count(ctx context.Context, exec boil.ContextExecutor) (int64, error) { - var count int64 - - queries.SetSelect(q.Query, nil) - queries.SetCount(q.Query) - - err := q.Query.QueryRowContext(ctx, exec).Scan(&count) - if err != nil { - return 0, errors.Wrap(err, "models: failed to count peer_logs rows") - } - - return count, nil -} - -// Exists checks if the row exists in the table. -func (q peerLogQuery) Exists(ctx context.Context, exec boil.ContextExecutor) (bool, error) { - var count int64 - - queries.SetSelect(q.Query, nil) - queries.SetCount(q.Query) - queries.SetLimit(q.Query, 1) - - err := q.Query.QueryRowContext(ctx, exec).Scan(&count) - if err != nil { - return false, errors.Wrap(err, "models: failed to check if peer_logs exists") - } - - return count > 0, nil -} - -// PeerLogs retrieves all the records using an executor. -func PeerLogs(mods ...qm.QueryMod) peerLogQuery { - mods = append(mods, qm.From("\"peer_logs\"")) - q := NewQuery(mods...) - if len(queries.GetSelect(q)) == 0 { - queries.SetSelect(q, []string{"\"peer_logs\".*"}) - } - - return peerLogQuery{q} -} - -// FindPeerLog retrieves a single record by ID with an executor. -// If selectCols is empty Find will return all columns. -func FindPeerLog(ctx context.Context, exec boil.ContextExecutor, iD int, createdAt time.Time, selectCols ...string) (*PeerLog, error) { - peerLogObj := &PeerLog{} - - sel := "*" - if len(selectCols) > 0 { - sel = strings.Join(strmangle.IdentQuoteSlice(dialect.LQ, dialect.RQ, selectCols), ",") - } - query := fmt.Sprintf( - "select %s from \"peer_logs\" where \"id\"=$1 AND \"created_at\"=$2", sel, - ) - - q := queries.Raw(query, iD, createdAt) - - err := q.Bind(ctx, exec, peerLogObj) - if err != nil { - if errors.Is(err, sql.ErrNoRows) { - return nil, sql.ErrNoRows - } - return nil, errors.Wrap(err, "models: unable to select from peer_logs") - } - - if err = peerLogObj.doAfterSelectHooks(ctx, exec); err != nil { - return peerLogObj, err - } - - return peerLogObj, nil -} - -// Insert a single record using an executor. -// See boil.Columns.InsertColumnSet documentation to understand column list inference for inserts. -func (o *PeerLog) Insert(ctx context.Context, exec boil.ContextExecutor, columns boil.Columns) error { - if o == nil { - return errors.New("models: no peer_logs provided for insertion") - } - - var err error - if !boil.TimestampsAreSkipped(ctx) { - currTime := time.Now().In(boil.GetLocation()) - - if o.CreatedAt.IsZero() { - o.CreatedAt = currTime - } - } - - if err := o.doBeforeInsertHooks(ctx, exec); err != nil { - return err - } - - nzDefaults := queries.NonZeroDefaultSet(peerLogColumnsWithDefault, o) - - key := makeCacheKey(columns, nzDefaults) - peerLogInsertCacheMut.RLock() - cache, cached := peerLogInsertCache[key] - peerLogInsertCacheMut.RUnlock() - - if !cached { - wl, returnColumns := columns.InsertColumnSet( - peerLogAllColumns, - peerLogColumnsWithDefault, - peerLogColumnsWithoutDefault, - nzDefaults, - ) - wl = strmangle.SetComplement(wl, peerLogGeneratedColumns) - - cache.valueMapping, err = queries.BindMapping(peerLogType, peerLogMapping, wl) - if err != nil { - return err - } - cache.retMapping, err = queries.BindMapping(peerLogType, peerLogMapping, returnColumns) - if err != nil { - return err - } - if len(wl) != 0 { - cache.query = fmt.Sprintf("INSERT INTO \"peer_logs\" (\"%s\") %%sVALUES (%s)%%s", strings.Join(wl, "\",\""), strmangle.Placeholders(dialect.UseIndexPlaceholders, len(wl), 1, 1)) - } else { - cache.query = "INSERT INTO \"peer_logs\" %sDEFAULT VALUES%s" - } - - var queryOutput, queryReturning string - - if len(cache.retMapping) != 0 { - queryReturning = fmt.Sprintf(" RETURNING \"%s\"", strings.Join(returnColumns, "\",\"")) - } - - cache.query = fmt.Sprintf(cache.query, queryOutput, queryReturning) - } - - value := reflect.Indirect(reflect.ValueOf(o)) - vals := queries.ValuesFromMapping(value, cache.valueMapping) - - if boil.IsDebug(ctx) { - writer := boil.DebugWriterFrom(ctx) - fmt.Fprintln(writer, cache.query) - fmt.Fprintln(writer, vals) - } - - if len(cache.retMapping) != 0 { - err = exec.QueryRowContext(ctx, cache.query, vals...).Scan(queries.PtrsFromMapping(value, cache.retMapping)...) - } else { - _, err = exec.ExecContext(ctx, cache.query, vals...) - } - - if err != nil { - return errors.Wrap(err, "models: unable to insert into peer_logs") - } - - if !cached { - peerLogInsertCacheMut.Lock() - peerLogInsertCache[key] = cache - peerLogInsertCacheMut.Unlock() - } - - return o.doAfterInsertHooks(ctx, exec) -} - -// Update uses an executor to update the PeerLog. -// See boil.Columns.UpdateColumnSet documentation to understand column list inference for updates. -// Update does not automatically update the record in case of default values. Use .Reload() to refresh the records. -func (o *PeerLog) Update(ctx context.Context, exec boil.ContextExecutor, columns boil.Columns) (int64, error) { - var err error - if err = o.doBeforeUpdateHooks(ctx, exec); err != nil { - return 0, err - } - key := makeCacheKey(columns, nil) - peerLogUpdateCacheMut.RLock() - cache, cached := peerLogUpdateCache[key] - peerLogUpdateCacheMut.RUnlock() - - if !cached { - wl := columns.UpdateColumnSet( - peerLogAllColumns, - peerLogPrimaryKeyColumns, - ) - wl = strmangle.SetComplement(wl, peerLogGeneratedColumns) - - if !columns.IsWhitelist() { - wl = strmangle.SetComplement(wl, []string{"created_at"}) - } - if len(wl) == 0 { - return 0, errors.New("models: unable to update peer_logs, could not build whitelist") - } - - cache.query = fmt.Sprintf("UPDATE \"peer_logs\" SET %s WHERE %s", - strmangle.SetParamNames("\"", "\"", 1, wl), - strmangle.WhereClause("\"", "\"", len(wl)+1, peerLogPrimaryKeyColumns), - ) - cache.valueMapping, err = queries.BindMapping(peerLogType, peerLogMapping, append(wl, peerLogPrimaryKeyColumns...)) - if err != nil { - return 0, err - } - } - - values := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), cache.valueMapping) - - if boil.IsDebug(ctx) { - writer := boil.DebugWriterFrom(ctx) - fmt.Fprintln(writer, cache.query) - fmt.Fprintln(writer, values) - } - var result sql.Result - result, err = exec.ExecContext(ctx, cache.query, values...) - if err != nil { - return 0, errors.Wrap(err, "models: unable to update peer_logs row") - } - - rowsAff, err := result.RowsAffected() - if err != nil { - return 0, errors.Wrap(err, "models: failed to get rows affected by update for peer_logs") - } - - if !cached { - peerLogUpdateCacheMut.Lock() - peerLogUpdateCache[key] = cache - peerLogUpdateCacheMut.Unlock() - } - - return rowsAff, o.doAfterUpdateHooks(ctx, exec) -} - -// UpdateAll updates all rows with the specified column values. -func (q peerLogQuery) UpdateAll(ctx context.Context, exec boil.ContextExecutor, cols M) (int64, error) { - queries.SetUpdate(q.Query, cols) - - result, err := q.Query.ExecContext(ctx, exec) - if err != nil { - return 0, errors.Wrap(err, "models: unable to update all for peer_logs") - } - - rowsAff, err := result.RowsAffected() - if err != nil { - return 0, errors.Wrap(err, "models: unable to retrieve rows affected for peer_logs") - } - - return rowsAff, nil -} - -// UpdateAll updates all rows with the specified column values, using an executor. -func (o PeerLogSlice) UpdateAll(ctx context.Context, exec boil.ContextExecutor, cols M) (int64, error) { - ln := int64(len(o)) - if ln == 0 { - return 0, nil - } - - if len(cols) == 0 { - return 0, errors.New("models: update all requires at least one column argument") - } - - colNames := make([]string, len(cols)) - args := make([]interface{}, len(cols)) - - i := 0 - for name, value := range cols { - colNames[i] = name - args[i] = value - i++ - } - - // Append all of the primary key values for each column - for _, obj := range o { - pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), peerLogPrimaryKeyMapping) - args = append(args, pkeyArgs...) - } - - sql := fmt.Sprintf("UPDATE \"peer_logs\" SET %s WHERE %s", - strmangle.SetParamNames("\"", "\"", 1, colNames), - strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), len(colNames)+1, peerLogPrimaryKeyColumns, len(o))) - - if boil.IsDebug(ctx) { - writer := boil.DebugWriterFrom(ctx) - fmt.Fprintln(writer, sql) - fmt.Fprintln(writer, args...) - } - result, err := exec.ExecContext(ctx, sql, args...) - if err != nil { - return 0, errors.Wrap(err, "models: unable to update all in peerLog slice") - } - - rowsAff, err := result.RowsAffected() - if err != nil { - return 0, errors.Wrap(err, "models: unable to retrieve rows affected all in update all peerLog") - } - return rowsAff, nil -} - -// Upsert attempts an insert using an executor, and does an update or ignore on conflict. -// See boil.Columns documentation for how to properly use updateColumns and insertColumns. -func (o *PeerLog) Upsert(ctx context.Context, exec boil.ContextExecutor, updateOnConflict bool, conflictColumns []string, updateColumns, insertColumns boil.Columns) error { - if o == nil { - return errors.New("models: no peer_logs provided for upsert") - } - if !boil.TimestampsAreSkipped(ctx) { - currTime := time.Now().In(boil.GetLocation()) - - if o.CreatedAt.IsZero() { - o.CreatedAt = currTime - } - } - - if err := o.doBeforeUpsertHooks(ctx, exec); err != nil { - return err - } - - nzDefaults := queries.NonZeroDefaultSet(peerLogColumnsWithDefault, o) - - // Build cache key in-line uglily - mysql vs psql problems - buf := strmangle.GetBuffer() - if updateOnConflict { - buf.WriteByte('t') - } else { - buf.WriteByte('f') - } - buf.WriteByte('.') - for _, c := range conflictColumns { - buf.WriteString(c) - } - buf.WriteByte('.') - buf.WriteString(strconv.Itoa(updateColumns.Kind)) - for _, c := range updateColumns.Cols { - buf.WriteString(c) - } - buf.WriteByte('.') - buf.WriteString(strconv.Itoa(insertColumns.Kind)) - for _, c := range insertColumns.Cols { - buf.WriteString(c) - } - buf.WriteByte('.') - for _, c := range nzDefaults { - buf.WriteString(c) - } - key := buf.String() - strmangle.PutBuffer(buf) - - peerLogUpsertCacheMut.RLock() - cache, cached := peerLogUpsertCache[key] - peerLogUpsertCacheMut.RUnlock() - - var err error - - if !cached { - insert, ret := insertColumns.InsertColumnSet( - peerLogAllColumns, - peerLogColumnsWithDefault, - peerLogColumnsWithoutDefault, - nzDefaults, - ) - - update := updateColumns.UpdateColumnSet( - peerLogAllColumns, - peerLogPrimaryKeyColumns, - ) - - insert = strmangle.SetComplement(insert, peerLogGeneratedColumns) - update = strmangle.SetComplement(update, peerLogGeneratedColumns) - - if updateOnConflict && len(update) == 0 { - return errors.New("models: unable to upsert peer_logs, could not build update column list") - } - - conflict := conflictColumns - if len(conflict) == 0 { - conflict = make([]string, len(peerLogPrimaryKeyColumns)) - copy(conflict, peerLogPrimaryKeyColumns) - } - cache.query = buildUpsertQueryPostgres(dialect, "\"peer_logs\"", updateOnConflict, ret, update, conflict, insert) - - cache.valueMapping, err = queries.BindMapping(peerLogType, peerLogMapping, insert) - if err != nil { - return err - } - if len(ret) != 0 { - cache.retMapping, err = queries.BindMapping(peerLogType, peerLogMapping, ret) - if err != nil { - return err - } - } - } - - value := reflect.Indirect(reflect.ValueOf(o)) - vals := queries.ValuesFromMapping(value, cache.valueMapping) - var returns []interface{} - if len(cache.retMapping) != 0 { - returns = queries.PtrsFromMapping(value, cache.retMapping) - } - - if boil.IsDebug(ctx) { - writer := boil.DebugWriterFrom(ctx) - fmt.Fprintln(writer, cache.query) - fmt.Fprintln(writer, vals) - } - if len(cache.retMapping) != 0 { - err = exec.QueryRowContext(ctx, cache.query, vals...).Scan(returns...) - if errors.Is(err, sql.ErrNoRows) { - err = nil // Postgres doesn't return anything when there's no update - } - } else { - _, err = exec.ExecContext(ctx, cache.query, vals...) - } - if err != nil { - return errors.Wrap(err, "models: unable to upsert peer_logs") - } - - if !cached { - peerLogUpsertCacheMut.Lock() - peerLogUpsertCache[key] = cache - peerLogUpsertCacheMut.Unlock() - } - - return o.doAfterUpsertHooks(ctx, exec) -} - -// Delete deletes a single PeerLog record with an executor. -// Delete will match against the primary key column to find the record to delete. -func (o *PeerLog) Delete(ctx context.Context, exec boil.ContextExecutor) (int64, error) { - if o == nil { - return 0, errors.New("models: no PeerLog provided for delete") - } - - if err := o.doBeforeDeleteHooks(ctx, exec); err != nil { - return 0, err - } - - args := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), peerLogPrimaryKeyMapping) - sql := "DELETE FROM \"peer_logs\" WHERE \"id\"=$1 AND \"created_at\"=$2" - - if boil.IsDebug(ctx) { - writer := boil.DebugWriterFrom(ctx) - fmt.Fprintln(writer, sql) - fmt.Fprintln(writer, args...) - } - result, err := exec.ExecContext(ctx, sql, args...) - if err != nil { - return 0, errors.Wrap(err, "models: unable to delete from peer_logs") - } - - rowsAff, err := result.RowsAffected() - if err != nil { - return 0, errors.Wrap(err, "models: failed to get rows affected by delete for peer_logs") - } - - if err := o.doAfterDeleteHooks(ctx, exec); err != nil { - return 0, err - } - - return rowsAff, nil -} - -// DeleteAll deletes all matching rows. -func (q peerLogQuery) DeleteAll(ctx context.Context, exec boil.ContextExecutor) (int64, error) { - if q.Query == nil { - return 0, errors.New("models: no peerLogQuery provided for delete all") - } - - queries.SetDelete(q.Query) - - result, err := q.Query.ExecContext(ctx, exec) - if err != nil { - return 0, errors.Wrap(err, "models: unable to delete all from peer_logs") - } - - rowsAff, err := result.RowsAffected() - if err != nil { - return 0, errors.Wrap(err, "models: failed to get rows affected by deleteall for peer_logs") - } - - return rowsAff, nil -} - -// DeleteAll deletes all rows in the slice, using an executor. -func (o PeerLogSlice) DeleteAll(ctx context.Context, exec boil.ContextExecutor) (int64, error) { - if len(o) == 0 { - return 0, nil - } - - if len(peerLogBeforeDeleteHooks) != 0 { - for _, obj := range o { - if err := obj.doBeforeDeleteHooks(ctx, exec); err != nil { - return 0, err - } - } - } - - var args []interface{} - for _, obj := range o { - pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), peerLogPrimaryKeyMapping) - args = append(args, pkeyArgs...) - } - - sql := "DELETE FROM \"peer_logs\" WHERE " + - strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 1, peerLogPrimaryKeyColumns, len(o)) - - if boil.IsDebug(ctx) { - writer := boil.DebugWriterFrom(ctx) - fmt.Fprintln(writer, sql) - fmt.Fprintln(writer, args) - } - result, err := exec.ExecContext(ctx, sql, args...) - if err != nil { - return 0, errors.Wrap(err, "models: unable to delete all from peerLog slice") - } - - rowsAff, err := result.RowsAffected() - if err != nil { - return 0, errors.Wrap(err, "models: failed to get rows affected by deleteall for peer_logs") - } - - if len(peerLogAfterDeleteHooks) != 0 { - for _, obj := range o { - if err := obj.doAfterDeleteHooks(ctx, exec); err != nil { - return 0, err - } - } - } - - return rowsAff, nil -} - -// Reload refetches the object from the database -// using the primary keys with an executor. -func (o *PeerLog) Reload(ctx context.Context, exec boil.ContextExecutor) error { - ret, err := FindPeerLog(ctx, exec, o.ID, o.CreatedAt) - if err != nil { - return err - } - - *o = *ret - return nil -} - -// ReloadAll refetches every row with matching primary key column values -// and overwrites the original object slice with the newly updated slice. -func (o *PeerLogSlice) ReloadAll(ctx context.Context, exec boil.ContextExecutor) error { - if o == nil || len(*o) == 0 { - return nil - } - - slice := PeerLogSlice{} - var args []interface{} - for _, obj := range *o { - pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), peerLogPrimaryKeyMapping) - args = append(args, pkeyArgs...) - } - - sql := "SELECT \"peer_logs\".* FROM \"peer_logs\" WHERE " + - strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 1, peerLogPrimaryKeyColumns, len(*o)) - - q := queries.Raw(sql, args...) - - err := q.Bind(ctx, exec, &slice) - if err != nil { - return errors.Wrap(err, "models: unable to reload all in PeerLogSlice") - } - - *o = slice - - return nil -} - -// PeerLogExists checks if the PeerLog row exists. -func PeerLogExists(ctx context.Context, exec boil.ContextExecutor, iD int, createdAt time.Time) (bool, error) { - var exists bool - sql := "select exists(select 1 from \"peer_logs\" where \"id\"=$1 AND \"created_at\"=$2 limit 1)" - - if boil.IsDebug(ctx) { - writer := boil.DebugWriterFrom(ctx) - fmt.Fprintln(writer, sql) - fmt.Fprintln(writer, iD, createdAt) - } - row := exec.QueryRowContext(ctx, sql, iD, createdAt) - - err := row.Scan(&exists) - if err != nil { - return false, errors.Wrap(err, "models: unable to check if peer_logs exists") - } - - return exists, nil -} diff --git a/db/models/peers.go b/db/models/peers.go deleted file mode 100644 index f1cd34f..0000000 --- a/db/models/peers.go +++ /dev/null @@ -1,1972 +0,0 @@ -// Code generated by SQLBoiler 4.13.0 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT. -// This file is meant to be re-generated in place and/or deleted at any time. - -package models - -import ( - "context" - "database/sql" - "fmt" - "reflect" - "strconv" - "strings" - "sync" - "time" - - "github.com/friendsofgo/errors" - "github.com/volatiletech/null/v8" - "github.com/volatiletech/sqlboiler/v4/boil" - "github.com/volatiletech/sqlboiler/v4/queries" - "github.com/volatiletech/sqlboiler/v4/queries/qm" - "github.com/volatiletech/sqlboiler/v4/queries/qmhelper" - "github.com/volatiletech/strmangle" -) - -// Peer is an object representing the database table. -type Peer struct { - ID int64 `boil:"id" json:"id" toml:"id" yaml:"id"` - AgentVersionID null.Int `boil:"agent_version_id" json:"agent_version_id,omitempty" toml:"agent_version_id" yaml:"agent_version_id,omitempty"` - ProtocolsSetID null.Int `boil:"protocols_set_id" json:"protocols_set_id,omitempty" toml:"protocols_set_id" yaml:"protocols_set_id,omitempty"` - MultiHash string `boil:"multi_hash" json:"multi_hash" toml:"multi_hash" yaml:"multi_hash"` - UpdatedAt time.Time `boil:"updated_at" json:"updated_at" toml:"updated_at" yaml:"updated_at"` - CreatedAt time.Time `boil:"created_at" json:"created_at" toml:"created_at" yaml:"created_at"` - LastSeenAt time.Time `boil:"last_seen_at" json:"last_seen_at" toml:"last_seen_at" yaml:"last_seen_at"` - - R *peerR `boil:"-" json:"-" toml:"-" yaml:"-"` - L peerL `boil:"-" json:"-" toml:"-" yaml:"-"` -} - -var PeerColumns = struct { - ID string - AgentVersionID string - ProtocolsSetID string - MultiHash string - UpdatedAt string - CreatedAt string - LastSeenAt string -}{ - ID: "id", - AgentVersionID: "agent_version_id", - ProtocolsSetID: "protocols_set_id", - MultiHash: "multi_hash", - UpdatedAt: "updated_at", - CreatedAt: "created_at", - LastSeenAt: "last_seen_at", -} - -var PeerTableColumns = struct { - ID string - AgentVersionID string - ProtocolsSetID string - MultiHash string - UpdatedAt string - CreatedAt string - LastSeenAt string -}{ - ID: "peers.id", - AgentVersionID: "peers.agent_version_id", - ProtocolsSetID: "peers.protocols_set_id", - MultiHash: "peers.multi_hash", - UpdatedAt: "peers.updated_at", - CreatedAt: "peers.created_at", - LastSeenAt: "peers.last_seen_at", -} - -// Generated where - -var PeerWhere = struct { - ID whereHelperint64 - AgentVersionID whereHelpernull_Int - ProtocolsSetID whereHelpernull_Int - MultiHash whereHelperstring - UpdatedAt whereHelpertime_Time - CreatedAt whereHelpertime_Time - LastSeenAt whereHelpertime_Time -}{ - ID: whereHelperint64{field: "\"peers\".\"id\""}, - AgentVersionID: whereHelpernull_Int{field: "\"peers\".\"agent_version_id\""}, - ProtocolsSetID: whereHelpernull_Int{field: "\"peers\".\"protocols_set_id\""}, - MultiHash: whereHelperstring{field: "\"peers\".\"multi_hash\""}, - UpdatedAt: whereHelpertime_Time{field: "\"peers\".\"updated_at\""}, - CreatedAt: whereHelpertime_Time{field: "\"peers\".\"created_at\""}, - LastSeenAt: whereHelpertime_Time{field: "\"peers\".\"last_seen_at\""}, -} - -// PeerRels is where relationship names are stored. -var PeerRels = struct { - AgentVersion string - ProtocolsSet string - Keys string - MultiAddresses string -}{ - AgentVersion: "AgentVersion", - ProtocolsSet: "ProtocolsSet", - Keys: "Keys", - MultiAddresses: "MultiAddresses", -} - -// peerR is where relationships are stored. -type peerR struct { - AgentVersion *AgentVersion `boil:"AgentVersion" json:"AgentVersion" toml:"AgentVersion" yaml:"AgentVersion"` - ProtocolsSet *ProtocolsSet `boil:"ProtocolsSet" json:"ProtocolsSet" toml:"ProtocolsSet" yaml:"ProtocolsSet"` - Keys KeySlice `boil:"Keys" json:"Keys" toml:"Keys" yaml:"Keys"` - MultiAddresses MultiAddressSlice `boil:"MultiAddresses" json:"MultiAddresses" toml:"MultiAddresses" yaml:"MultiAddresses"` -} - -// NewStruct creates a new relationship struct -func (*peerR) NewStruct() *peerR { - return &peerR{} -} - -func (r *peerR) GetAgentVersion() *AgentVersion { - if r == nil { - return nil - } - return r.AgentVersion -} - -func (r *peerR) GetProtocolsSet() *ProtocolsSet { - if r == nil { - return nil - } - return r.ProtocolsSet -} - -func (r *peerR) GetKeys() KeySlice { - if r == nil { - return nil - } - return r.Keys -} - -func (r *peerR) GetMultiAddresses() MultiAddressSlice { - if r == nil { - return nil - } - return r.MultiAddresses -} - -// peerL is where Load methods for each relationship are stored. -type peerL struct{} - -var ( - peerAllColumns = []string{"id", "agent_version_id", "protocols_set_id", "multi_hash", "updated_at", "created_at", "last_seen_at"} - peerColumnsWithoutDefault = []string{"multi_hash", "updated_at", "created_at", "last_seen_at"} - peerColumnsWithDefault = []string{"id", "agent_version_id", "protocols_set_id"} - peerPrimaryKeyColumns = []string{"id"} - peerGeneratedColumns = []string{"id"} -) - -type ( - // PeerSlice is an alias for a slice of pointers to Peer. - // This should almost always be used instead of []Peer. - PeerSlice []*Peer - // PeerHook is the signature for custom Peer hook methods - PeerHook func(context.Context, boil.ContextExecutor, *Peer) error - - peerQuery struct { - *queries.Query - } -) - -// Cache for insert, update and upsert -var ( - peerType = reflect.TypeOf(&Peer{}) - peerMapping = queries.MakeStructMapping(peerType) - peerPrimaryKeyMapping, _ = queries.BindMapping(peerType, peerMapping, peerPrimaryKeyColumns) - peerInsertCacheMut sync.RWMutex - peerInsertCache = make(map[string]insertCache) - peerUpdateCacheMut sync.RWMutex - peerUpdateCache = make(map[string]updateCache) - peerUpsertCacheMut sync.RWMutex - peerUpsertCache = make(map[string]insertCache) -) - -var ( - // Force time package dependency for automated UpdatedAt/CreatedAt. - _ = time.Second - // Force qmhelper dependency for where clause generation (which doesn't - // always happen) - _ = qmhelper.Where -) - -var peerAfterSelectHooks []PeerHook - -var peerBeforeInsertHooks []PeerHook -var peerAfterInsertHooks []PeerHook - -var peerBeforeUpdateHooks []PeerHook -var peerAfterUpdateHooks []PeerHook - -var peerBeforeDeleteHooks []PeerHook -var peerAfterDeleteHooks []PeerHook - -var peerBeforeUpsertHooks []PeerHook -var peerAfterUpsertHooks []PeerHook - -// doAfterSelectHooks executes all "after Select" hooks. -func (o *Peer) doAfterSelectHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range peerAfterSelectHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// doBeforeInsertHooks executes all "before insert" hooks. -func (o *Peer) doBeforeInsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range peerBeforeInsertHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// doAfterInsertHooks executes all "after Insert" hooks. -func (o *Peer) doAfterInsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range peerAfterInsertHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// doBeforeUpdateHooks executes all "before Update" hooks. -func (o *Peer) doBeforeUpdateHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range peerBeforeUpdateHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// doAfterUpdateHooks executes all "after Update" hooks. -func (o *Peer) doAfterUpdateHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range peerAfterUpdateHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// doBeforeDeleteHooks executes all "before Delete" hooks. -func (o *Peer) doBeforeDeleteHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range peerBeforeDeleteHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// doAfterDeleteHooks executes all "after Delete" hooks. -func (o *Peer) doAfterDeleteHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range peerAfterDeleteHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// doBeforeUpsertHooks executes all "before Upsert" hooks. -func (o *Peer) doBeforeUpsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range peerBeforeUpsertHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// doAfterUpsertHooks executes all "after Upsert" hooks. -func (o *Peer) doAfterUpsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range peerAfterUpsertHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// AddPeerHook registers your hook function for all future operations. -func AddPeerHook(hookPoint boil.HookPoint, peerHook PeerHook) { - switch hookPoint { - case boil.AfterSelectHook: - peerAfterSelectHooks = append(peerAfterSelectHooks, peerHook) - case boil.BeforeInsertHook: - peerBeforeInsertHooks = append(peerBeforeInsertHooks, peerHook) - case boil.AfterInsertHook: - peerAfterInsertHooks = append(peerAfterInsertHooks, peerHook) - case boil.BeforeUpdateHook: - peerBeforeUpdateHooks = append(peerBeforeUpdateHooks, peerHook) - case boil.AfterUpdateHook: - peerAfterUpdateHooks = append(peerAfterUpdateHooks, peerHook) - case boil.BeforeDeleteHook: - peerBeforeDeleteHooks = append(peerBeforeDeleteHooks, peerHook) - case boil.AfterDeleteHook: - peerAfterDeleteHooks = append(peerAfterDeleteHooks, peerHook) - case boil.BeforeUpsertHook: - peerBeforeUpsertHooks = append(peerBeforeUpsertHooks, peerHook) - case boil.AfterUpsertHook: - peerAfterUpsertHooks = append(peerAfterUpsertHooks, peerHook) - } -} - -// One returns a single peer record from the query. -func (q peerQuery) One(ctx context.Context, exec boil.ContextExecutor) (*Peer, error) { - o := &Peer{} - - queries.SetLimit(q.Query, 1) - - err := q.Bind(ctx, exec, o) - if err != nil { - if errors.Is(err, sql.ErrNoRows) { - return nil, sql.ErrNoRows - } - return nil, errors.Wrap(err, "models: failed to execute a one query for peers") - } - - if err := o.doAfterSelectHooks(ctx, exec); err != nil { - return o, err - } - - return o, nil -} - -// All returns all Peer records from the query. -func (q peerQuery) All(ctx context.Context, exec boil.ContextExecutor) (PeerSlice, error) { - var o []*Peer - - err := q.Bind(ctx, exec, &o) - if err != nil { - return nil, errors.Wrap(err, "models: failed to assign all query results to Peer slice") - } - - if len(peerAfterSelectHooks) != 0 { - for _, obj := range o { - if err := obj.doAfterSelectHooks(ctx, exec); err != nil { - return o, err - } - } - } - - return o, nil -} - -// Count returns the count of all Peer records in the query. -func (q peerQuery) Count(ctx context.Context, exec boil.ContextExecutor) (int64, error) { - var count int64 - - queries.SetSelect(q.Query, nil) - queries.SetCount(q.Query) - - err := q.Query.QueryRowContext(ctx, exec).Scan(&count) - if err != nil { - return 0, errors.Wrap(err, "models: failed to count peers rows") - } - - return count, nil -} - -// Exists checks if the row exists in the table. -func (q peerQuery) Exists(ctx context.Context, exec boil.ContextExecutor) (bool, error) { - var count int64 - - queries.SetSelect(q.Query, nil) - queries.SetCount(q.Query) - queries.SetLimit(q.Query, 1) - - err := q.Query.QueryRowContext(ctx, exec).Scan(&count) - if err != nil { - return false, errors.Wrap(err, "models: failed to check if peers exists") - } - - return count > 0, nil -} - -// AgentVersion pointed to by the foreign key. -func (o *Peer) AgentVersion(mods ...qm.QueryMod) agentVersionQuery { - queryMods := []qm.QueryMod{ - qm.Where("\"id\" = ?", o.AgentVersionID), - } - - queryMods = append(queryMods, mods...) - - return AgentVersions(queryMods...) -} - -// ProtocolsSet pointed to by the foreign key. -func (o *Peer) ProtocolsSet(mods ...qm.QueryMod) protocolsSetQuery { - queryMods := []qm.QueryMod{ - qm.Where("\"id\" = ?", o.ProtocolsSetID), - } - - queryMods = append(queryMods, mods...) - - return ProtocolsSets(queryMods...) -} - -// Keys retrieves all the key's Keys with an executor. -func (o *Peer) Keys(mods ...qm.QueryMod) keyQuery { - var queryMods []qm.QueryMod - if len(mods) != 0 { - queryMods = append(queryMods, mods...) - } - - queryMods = append(queryMods, - qm.Where("\"keys\".\"peer_id\"=?", o.ID), - ) - - return Keys(queryMods...) -} - -// MultiAddresses retrieves all the multi_address's MultiAddresses with an executor. -func (o *Peer) MultiAddresses(mods ...qm.QueryMod) multiAddressQuery { - var queryMods []qm.QueryMod - if len(mods) != 0 { - queryMods = append(queryMods, mods...) - } - - queryMods = append(queryMods, - qm.InnerJoin("\"peers_x_multi_addresses\" on \"multi_addresses\".\"id\" = \"peers_x_multi_addresses\".\"multi_address_id\""), - qm.Where("\"peers_x_multi_addresses\".\"peer_id\"=?", o.ID), - ) - - return MultiAddresses(queryMods...) -} - -// LoadAgentVersion allows an eager lookup of values, cached into the -// loaded structs of the objects. This is for an N-1 relationship. -func (peerL) LoadAgentVersion(ctx context.Context, e boil.ContextExecutor, singular bool, maybePeer interface{}, mods queries.Applicator) error { - var slice []*Peer - var object *Peer - - if singular { - var ok bool - object, ok = maybePeer.(*Peer) - if !ok { - object = new(Peer) - ok = queries.SetFromEmbeddedStruct(&object, &maybePeer) - if !ok { - return errors.New(fmt.Sprintf("failed to set %T from embedded struct %T", object, maybePeer)) - } - } - } else { - s, ok := maybePeer.(*[]*Peer) - if ok { - slice = *s - } else { - ok = queries.SetFromEmbeddedStruct(&slice, maybePeer) - if !ok { - return errors.New(fmt.Sprintf("failed to set %T from embedded struct %T", slice, maybePeer)) - } - } - } - - args := make([]interface{}, 0, 1) - if singular { - if object.R == nil { - object.R = &peerR{} - } - if !queries.IsNil(object.AgentVersionID) { - args = append(args, object.AgentVersionID) - } - - } else { - Outer: - for _, obj := range slice { - if obj.R == nil { - obj.R = &peerR{} - } - - for _, a := range args { - if queries.Equal(a, obj.AgentVersionID) { - continue Outer - } - } - - if !queries.IsNil(obj.AgentVersionID) { - args = append(args, obj.AgentVersionID) - } - - } - } - - if len(args) == 0 { - return nil - } - - query := NewQuery( - qm.From(`agent_versions`), - qm.WhereIn(`agent_versions.id in ?`, args...), - ) - if mods != nil { - mods.Apply(query) - } - - results, err := query.QueryContext(ctx, e) - if err != nil { - return errors.Wrap(err, "failed to eager load AgentVersion") - } - - var resultSlice []*AgentVersion - if err = queries.Bind(results, &resultSlice); err != nil { - return errors.Wrap(err, "failed to bind eager loaded slice AgentVersion") - } - - if err = results.Close(); err != nil { - return errors.Wrap(err, "failed to close results of eager load for agent_versions") - } - if err = results.Err(); err != nil { - return errors.Wrap(err, "error occurred during iteration of eager loaded relations for agent_versions") - } - - if len(peerAfterSelectHooks) != 0 { - for _, obj := range resultSlice { - if err := obj.doAfterSelectHooks(ctx, e); err != nil { - return err - } - } - } - - if len(resultSlice) == 0 { - return nil - } - - if singular { - foreign := resultSlice[0] - object.R.AgentVersion = foreign - if foreign.R == nil { - foreign.R = &agentVersionR{} - } - foreign.R.Peers = append(foreign.R.Peers, object) - return nil - } - - for _, local := range slice { - for _, foreign := range resultSlice { - if queries.Equal(local.AgentVersionID, foreign.ID) { - local.R.AgentVersion = foreign - if foreign.R == nil { - foreign.R = &agentVersionR{} - } - foreign.R.Peers = append(foreign.R.Peers, local) - break - } - } - } - - return nil -} - -// LoadProtocolsSet allows an eager lookup of values, cached into the -// loaded structs of the objects. This is for an N-1 relationship. -func (peerL) LoadProtocolsSet(ctx context.Context, e boil.ContextExecutor, singular bool, maybePeer interface{}, mods queries.Applicator) error { - var slice []*Peer - var object *Peer - - if singular { - var ok bool - object, ok = maybePeer.(*Peer) - if !ok { - object = new(Peer) - ok = queries.SetFromEmbeddedStruct(&object, &maybePeer) - if !ok { - return errors.New(fmt.Sprintf("failed to set %T from embedded struct %T", object, maybePeer)) - } - } - } else { - s, ok := maybePeer.(*[]*Peer) - if ok { - slice = *s - } else { - ok = queries.SetFromEmbeddedStruct(&slice, maybePeer) - if !ok { - return errors.New(fmt.Sprintf("failed to set %T from embedded struct %T", slice, maybePeer)) - } - } - } - - args := make([]interface{}, 0, 1) - if singular { - if object.R == nil { - object.R = &peerR{} - } - if !queries.IsNil(object.ProtocolsSetID) { - args = append(args, object.ProtocolsSetID) - } - - } else { - Outer: - for _, obj := range slice { - if obj.R == nil { - obj.R = &peerR{} - } - - for _, a := range args { - if queries.Equal(a, obj.ProtocolsSetID) { - continue Outer - } - } - - if !queries.IsNil(obj.ProtocolsSetID) { - args = append(args, obj.ProtocolsSetID) - } - - } - } - - if len(args) == 0 { - return nil - } - - query := NewQuery( - qm.From(`protocols_sets`), - qm.WhereIn(`protocols_sets.id in ?`, args...), - ) - if mods != nil { - mods.Apply(query) - } - - results, err := query.QueryContext(ctx, e) - if err != nil { - return errors.Wrap(err, "failed to eager load ProtocolsSet") - } - - var resultSlice []*ProtocolsSet - if err = queries.Bind(results, &resultSlice); err != nil { - return errors.Wrap(err, "failed to bind eager loaded slice ProtocolsSet") - } - - if err = results.Close(); err != nil { - return errors.Wrap(err, "failed to close results of eager load for protocols_sets") - } - if err = results.Err(); err != nil { - return errors.Wrap(err, "error occurred during iteration of eager loaded relations for protocols_sets") - } - - if len(peerAfterSelectHooks) != 0 { - for _, obj := range resultSlice { - if err := obj.doAfterSelectHooks(ctx, e); err != nil { - return err - } - } - } - - if len(resultSlice) == 0 { - return nil - } - - if singular { - foreign := resultSlice[0] - object.R.ProtocolsSet = foreign - if foreign.R == nil { - foreign.R = &protocolsSetR{} - } - foreign.R.Peers = append(foreign.R.Peers, object) - return nil - } - - for _, local := range slice { - for _, foreign := range resultSlice { - if queries.Equal(local.ProtocolsSetID, foreign.ID) { - local.R.ProtocolsSet = foreign - if foreign.R == nil { - foreign.R = &protocolsSetR{} - } - foreign.R.Peers = append(foreign.R.Peers, local) - break - } - } - } - - return nil -} - -// LoadKeys allows an eager lookup of values, cached into the -// loaded structs of the objects. This is for a 1-M or N-M relationship. -func (peerL) LoadKeys(ctx context.Context, e boil.ContextExecutor, singular bool, maybePeer interface{}, mods queries.Applicator) error { - var slice []*Peer - var object *Peer - - if singular { - var ok bool - object, ok = maybePeer.(*Peer) - if !ok { - object = new(Peer) - ok = queries.SetFromEmbeddedStruct(&object, &maybePeer) - if !ok { - return errors.New(fmt.Sprintf("failed to set %T from embedded struct %T", object, maybePeer)) - } - } - } else { - s, ok := maybePeer.(*[]*Peer) - if ok { - slice = *s - } else { - ok = queries.SetFromEmbeddedStruct(&slice, maybePeer) - if !ok { - return errors.New(fmt.Sprintf("failed to set %T from embedded struct %T", slice, maybePeer)) - } - } - } - - args := make([]interface{}, 0, 1) - if singular { - if object.R == nil { - object.R = &peerR{} - } - args = append(args, object.ID) - } else { - Outer: - for _, obj := range slice { - if obj.R == nil { - obj.R = &peerR{} - } - - for _, a := range args { - if queries.Equal(a, obj.ID) { - continue Outer - } - } - - args = append(args, obj.ID) - } - } - - if len(args) == 0 { - return nil - } - - query := NewQuery( - qm.From(`keys`), - qm.WhereIn(`keys.peer_id in ?`, args...), - ) - if mods != nil { - mods.Apply(query) - } - - results, err := query.QueryContext(ctx, e) - if err != nil { - return errors.Wrap(err, "failed to eager load keys") - } - - var resultSlice []*Key - if err = queries.Bind(results, &resultSlice); err != nil { - return errors.Wrap(err, "failed to bind eager loaded slice keys") - } - - if err = results.Close(); err != nil { - return errors.Wrap(err, "failed to close results in eager load on keys") - } - if err = results.Err(); err != nil { - return errors.Wrap(err, "error occurred during iteration of eager loaded relations for keys") - } - - if len(keyAfterSelectHooks) != 0 { - for _, obj := range resultSlice { - if err := obj.doAfterSelectHooks(ctx, e); err != nil { - return err - } - } - } - if singular { - object.R.Keys = resultSlice - for _, foreign := range resultSlice { - if foreign.R == nil { - foreign.R = &keyR{} - } - foreign.R.Peer = object - } - return nil - } - - for _, foreign := range resultSlice { - for _, local := range slice { - if queries.Equal(local.ID, foreign.PeerID) { - local.R.Keys = append(local.R.Keys, foreign) - if foreign.R == nil { - foreign.R = &keyR{} - } - foreign.R.Peer = local - break - } - } - } - - return nil -} - -// LoadMultiAddresses allows an eager lookup of values, cached into the -// loaded structs of the objects. This is for a 1-M or N-M relationship. -func (peerL) LoadMultiAddresses(ctx context.Context, e boil.ContextExecutor, singular bool, maybePeer interface{}, mods queries.Applicator) error { - var slice []*Peer - var object *Peer - - if singular { - var ok bool - object, ok = maybePeer.(*Peer) - if !ok { - object = new(Peer) - ok = queries.SetFromEmbeddedStruct(&object, &maybePeer) - if !ok { - return errors.New(fmt.Sprintf("failed to set %T from embedded struct %T", object, maybePeer)) - } - } - } else { - s, ok := maybePeer.(*[]*Peer) - if ok { - slice = *s - } else { - ok = queries.SetFromEmbeddedStruct(&slice, maybePeer) - if !ok { - return errors.New(fmt.Sprintf("failed to set %T from embedded struct %T", slice, maybePeer)) - } - } - } - - args := make([]interface{}, 0, 1) - if singular { - if object.R == nil { - object.R = &peerR{} - } - args = append(args, object.ID) - } else { - Outer: - for _, obj := range slice { - if obj.R == nil { - obj.R = &peerR{} - } - - for _, a := range args { - if a == obj.ID { - continue Outer - } - } - - args = append(args, obj.ID) - } - } - - if len(args) == 0 { - return nil - } - - query := NewQuery( - qm.Select("\"multi_addresses\".\"id\", \"multi_addresses\".\"asn\", \"multi_addresses\".\"is_cloud\", \"multi_addresses\".\"is_relay\", \"multi_addresses\".\"is_public\", \"multi_addresses\".\"addr\", \"multi_addresses\".\"has_many_addrs\", \"multi_addresses\".\"resolved\", \"multi_addresses\".\"country\", \"multi_addresses\".\"continent\", \"multi_addresses\".\"maddr\", \"multi_addresses\".\"updated_at\", \"multi_addresses\".\"created_at\", \"a\".\"peer_id\""), - qm.From("\"multi_addresses\""), - qm.InnerJoin("\"peers_x_multi_addresses\" as \"a\" on \"multi_addresses\".\"id\" = \"a\".\"multi_address_id\""), - qm.WhereIn("\"a\".\"peer_id\" in ?", args...), - ) - if mods != nil { - mods.Apply(query) - } - - results, err := query.QueryContext(ctx, e) - if err != nil { - return errors.Wrap(err, "failed to eager load multi_addresses") - } - - var resultSlice []*MultiAddress - - var localJoinCols []int64 - for results.Next() { - one := new(MultiAddress) - var localJoinCol int64 - - err = results.Scan(&one.ID, &one.Asn, &one.IsCloud, &one.IsRelay, &one.IsPublic, &one.Addr, &one.HasManyAddrs, &one.Resolved, &one.Country, &one.Continent, &one.Maddr, &one.UpdatedAt, &one.CreatedAt, &localJoinCol) - if err != nil { - return errors.Wrap(err, "failed to scan eager loaded results for multi_addresses") - } - if err = results.Err(); err != nil { - return errors.Wrap(err, "failed to plebian-bind eager loaded slice multi_addresses") - } - - resultSlice = append(resultSlice, one) - localJoinCols = append(localJoinCols, localJoinCol) - } - - if err = results.Close(); err != nil { - return errors.Wrap(err, "failed to close results in eager load on multi_addresses") - } - if err = results.Err(); err != nil { - return errors.Wrap(err, "error occurred during iteration of eager loaded relations for multi_addresses") - } - - if len(multiAddressAfterSelectHooks) != 0 { - for _, obj := range resultSlice { - if err := obj.doAfterSelectHooks(ctx, e); err != nil { - return err - } - } - } - if singular { - object.R.MultiAddresses = resultSlice - for _, foreign := range resultSlice { - if foreign.R == nil { - foreign.R = &multiAddressR{} - } - foreign.R.Peers = append(foreign.R.Peers, object) - } - return nil - } - - for i, foreign := range resultSlice { - localJoinCol := localJoinCols[i] - for _, local := range slice { - if local.ID == localJoinCol { - local.R.MultiAddresses = append(local.R.MultiAddresses, foreign) - if foreign.R == nil { - foreign.R = &multiAddressR{} - } - foreign.R.Peers = append(foreign.R.Peers, local) - break - } - } - } - - return nil -} - -// SetAgentVersion of the peer to the related item. -// Sets o.R.AgentVersion to related. -// Adds o to related.R.Peers. -func (o *Peer) SetAgentVersion(ctx context.Context, exec boil.ContextExecutor, insert bool, related *AgentVersion) error { - var err error - if insert { - if err = related.Insert(ctx, exec, boil.Infer()); err != nil { - return errors.Wrap(err, "failed to insert into foreign table") - } - } - - updateQuery := fmt.Sprintf( - "UPDATE \"peers\" SET %s WHERE %s", - strmangle.SetParamNames("\"", "\"", 1, []string{"agent_version_id"}), - strmangle.WhereClause("\"", "\"", 2, peerPrimaryKeyColumns), - ) - values := []interface{}{related.ID, o.ID} - - if boil.IsDebug(ctx) { - writer := boil.DebugWriterFrom(ctx) - fmt.Fprintln(writer, updateQuery) - fmt.Fprintln(writer, values) - } - if _, err = exec.ExecContext(ctx, updateQuery, values...); err != nil { - return errors.Wrap(err, "failed to update local table") - } - - queries.Assign(&o.AgentVersionID, related.ID) - if o.R == nil { - o.R = &peerR{ - AgentVersion: related, - } - } else { - o.R.AgentVersion = related - } - - if related.R == nil { - related.R = &agentVersionR{ - Peers: PeerSlice{o}, - } - } else { - related.R.Peers = append(related.R.Peers, o) - } - - return nil -} - -// RemoveAgentVersion relationship. -// Sets o.R.AgentVersion to nil. -// Removes o from all passed in related items' relationships struct. -func (o *Peer) RemoveAgentVersion(ctx context.Context, exec boil.ContextExecutor, related *AgentVersion) error { - var err error - - queries.SetScanner(&o.AgentVersionID, nil) - if _, err = o.Update(ctx, exec, boil.Whitelist("agent_version_id")); err != nil { - return errors.Wrap(err, "failed to update local table") - } - - if o.R != nil { - o.R.AgentVersion = nil - } - if related == nil || related.R == nil { - return nil - } - - for i, ri := range related.R.Peers { - if queries.Equal(o.AgentVersionID, ri.AgentVersionID) { - continue - } - - ln := len(related.R.Peers) - if ln > 1 && i < ln-1 { - related.R.Peers[i] = related.R.Peers[ln-1] - } - related.R.Peers = related.R.Peers[:ln-1] - break - } - return nil -} - -// SetProtocolsSet of the peer to the related item. -// Sets o.R.ProtocolsSet to related. -// Adds o to related.R.Peers. -func (o *Peer) SetProtocolsSet(ctx context.Context, exec boil.ContextExecutor, insert bool, related *ProtocolsSet) error { - var err error - if insert { - if err = related.Insert(ctx, exec, boil.Infer()); err != nil { - return errors.Wrap(err, "failed to insert into foreign table") - } - } - - updateQuery := fmt.Sprintf( - "UPDATE \"peers\" SET %s WHERE %s", - strmangle.SetParamNames("\"", "\"", 1, []string{"protocols_set_id"}), - strmangle.WhereClause("\"", "\"", 2, peerPrimaryKeyColumns), - ) - values := []interface{}{related.ID, o.ID} - - if boil.IsDebug(ctx) { - writer := boil.DebugWriterFrom(ctx) - fmt.Fprintln(writer, updateQuery) - fmt.Fprintln(writer, values) - } - if _, err = exec.ExecContext(ctx, updateQuery, values...); err != nil { - return errors.Wrap(err, "failed to update local table") - } - - queries.Assign(&o.ProtocolsSetID, related.ID) - if o.R == nil { - o.R = &peerR{ - ProtocolsSet: related, - } - } else { - o.R.ProtocolsSet = related - } - - if related.R == nil { - related.R = &protocolsSetR{ - Peers: PeerSlice{o}, - } - } else { - related.R.Peers = append(related.R.Peers, o) - } - - return nil -} - -// RemoveProtocolsSet relationship. -// Sets o.R.ProtocolsSet to nil. -// Removes o from all passed in related items' relationships struct. -func (o *Peer) RemoveProtocolsSet(ctx context.Context, exec boil.ContextExecutor, related *ProtocolsSet) error { - var err error - - queries.SetScanner(&o.ProtocolsSetID, nil) - if _, err = o.Update(ctx, exec, boil.Whitelist("protocols_set_id")); err != nil { - return errors.Wrap(err, "failed to update local table") - } - - if o.R != nil { - o.R.ProtocolsSet = nil - } - if related == nil || related.R == nil { - return nil - } - - for i, ri := range related.R.Peers { - if queries.Equal(o.ProtocolsSetID, ri.ProtocolsSetID) { - continue - } - - ln := len(related.R.Peers) - if ln > 1 && i < ln-1 { - related.R.Peers[i] = related.R.Peers[ln-1] - } - related.R.Peers = related.R.Peers[:ln-1] - break - } - return nil -} - -// AddKeys adds the given related objects to the existing relationships -// of the peer, optionally inserting them as new records. -// Appends related to o.R.Keys. -// Sets related.R.Peer appropriately. -func (o *Peer) AddKeys(ctx context.Context, exec boil.ContextExecutor, insert bool, related ...*Key) error { - var err error - for _, rel := range related { - if insert { - queries.Assign(&rel.PeerID, o.ID) - if err = rel.Insert(ctx, exec, boil.Infer()); err != nil { - return errors.Wrap(err, "failed to insert into foreign table") - } - } else { - updateQuery := fmt.Sprintf( - "UPDATE \"keys\" SET %s WHERE %s", - strmangle.SetParamNames("\"", "\"", 1, []string{"peer_id"}), - strmangle.WhereClause("\"", "\"", 2, keyPrimaryKeyColumns), - ) - values := []interface{}{o.ID, rel.ID} - - if boil.IsDebug(ctx) { - writer := boil.DebugWriterFrom(ctx) - fmt.Fprintln(writer, updateQuery) - fmt.Fprintln(writer, values) - } - if _, err = exec.ExecContext(ctx, updateQuery, values...); err != nil { - return errors.Wrap(err, "failed to update foreign table") - } - - queries.Assign(&rel.PeerID, o.ID) - } - } - - if o.R == nil { - o.R = &peerR{ - Keys: related, - } - } else { - o.R.Keys = append(o.R.Keys, related...) - } - - for _, rel := range related { - if rel.R == nil { - rel.R = &keyR{ - Peer: o, - } - } else { - rel.R.Peer = o - } - } - return nil -} - -// SetKeys removes all previously related items of the -// peer replacing them completely with the passed -// in related items, optionally inserting them as new records. -// Sets o.R.Peer's Keys accordingly. -// Replaces o.R.Keys with related. -// Sets related.R.Peer's Keys accordingly. -func (o *Peer) SetKeys(ctx context.Context, exec boil.ContextExecutor, insert bool, related ...*Key) error { - query := "update \"keys\" set \"peer_id\" = null where \"peer_id\" = $1" - values := []interface{}{o.ID} - if boil.IsDebug(ctx) { - writer := boil.DebugWriterFrom(ctx) - fmt.Fprintln(writer, query) - fmt.Fprintln(writer, values) - } - _, err := exec.ExecContext(ctx, query, values...) - if err != nil { - return errors.Wrap(err, "failed to remove relationships before set") - } - - if o.R != nil { - for _, rel := range o.R.Keys { - queries.SetScanner(&rel.PeerID, nil) - if rel.R == nil { - continue - } - - rel.R.Peer = nil - } - o.R.Keys = nil - } - - return o.AddKeys(ctx, exec, insert, related...) -} - -// RemoveKeys relationships from objects passed in. -// Removes related items from R.Keys (uses pointer comparison, removal does not keep order) -// Sets related.R.Peer. -func (o *Peer) RemoveKeys(ctx context.Context, exec boil.ContextExecutor, related ...*Key) error { - if len(related) == 0 { - return nil - } - - var err error - for _, rel := range related { - queries.SetScanner(&rel.PeerID, nil) - if rel.R != nil { - rel.R.Peer = nil - } - if _, err = rel.Update(ctx, exec, boil.Whitelist("peer_id")); err != nil { - return err - } - } - if o.R == nil { - return nil - } - - for _, rel := range related { - for i, ri := range o.R.Keys { - if rel != ri { - continue - } - - ln := len(o.R.Keys) - if ln > 1 && i < ln-1 { - o.R.Keys[i] = o.R.Keys[ln-1] - } - o.R.Keys = o.R.Keys[:ln-1] - break - } - } - - return nil -} - -// AddMultiAddresses adds the given related objects to the existing relationships -// of the peer, optionally inserting them as new records. -// Appends related to o.R.MultiAddresses. -// Sets related.R.Peers appropriately. -func (o *Peer) AddMultiAddresses(ctx context.Context, exec boil.ContextExecutor, insert bool, related ...*MultiAddress) error { - var err error - for _, rel := range related { - if insert { - if err = rel.Insert(ctx, exec, boil.Infer()); err != nil { - return errors.Wrap(err, "failed to insert into foreign table") - } - } - } - - for _, rel := range related { - query := "insert into \"peers_x_multi_addresses\" (\"peer_id\", \"multi_address_id\") values ($1, $2)" - values := []interface{}{o.ID, rel.ID} - - if boil.IsDebug(ctx) { - writer := boil.DebugWriterFrom(ctx) - fmt.Fprintln(writer, query) - fmt.Fprintln(writer, values) - } - _, err = exec.ExecContext(ctx, query, values...) - if err != nil { - return errors.Wrap(err, "failed to insert into join table") - } - } - if o.R == nil { - o.R = &peerR{ - MultiAddresses: related, - } - } else { - o.R.MultiAddresses = append(o.R.MultiAddresses, related...) - } - - for _, rel := range related { - if rel.R == nil { - rel.R = &multiAddressR{ - Peers: PeerSlice{o}, - } - } else { - rel.R.Peers = append(rel.R.Peers, o) - } - } - return nil -} - -// SetMultiAddresses removes all previously related items of the -// peer replacing them completely with the passed -// in related items, optionally inserting them as new records. -// Sets o.R.Peers's MultiAddresses accordingly. -// Replaces o.R.MultiAddresses with related. -// Sets related.R.Peers's MultiAddresses accordingly. -func (o *Peer) SetMultiAddresses(ctx context.Context, exec boil.ContextExecutor, insert bool, related ...*MultiAddress) error { - query := "delete from \"peers_x_multi_addresses\" where \"peer_id\" = $1" - values := []interface{}{o.ID} - if boil.IsDebug(ctx) { - writer := boil.DebugWriterFrom(ctx) - fmt.Fprintln(writer, query) - fmt.Fprintln(writer, values) - } - _, err := exec.ExecContext(ctx, query, values...) - if err != nil { - return errors.Wrap(err, "failed to remove relationships before set") - } - - removeMultiAddressesFromPeersSlice(o, related) - if o.R != nil { - o.R.MultiAddresses = nil - } - - return o.AddMultiAddresses(ctx, exec, insert, related...) -} - -// RemoveMultiAddresses relationships from objects passed in. -// Removes related items from R.MultiAddresses (uses pointer comparison, removal does not keep order) -// Sets related.R.Peers. -func (o *Peer) RemoveMultiAddresses(ctx context.Context, exec boil.ContextExecutor, related ...*MultiAddress) error { - if len(related) == 0 { - return nil - } - - var err error - query := fmt.Sprintf( - "delete from \"peers_x_multi_addresses\" where \"peer_id\" = $1 and \"multi_address_id\" in (%s)", - strmangle.Placeholders(dialect.UseIndexPlaceholders, len(related), 2, 1), - ) - values := []interface{}{o.ID} - for _, rel := range related { - values = append(values, rel.ID) - } - - if boil.IsDebug(ctx) { - writer := boil.DebugWriterFrom(ctx) - fmt.Fprintln(writer, query) - fmt.Fprintln(writer, values) - } - _, err = exec.ExecContext(ctx, query, values...) - if err != nil { - return errors.Wrap(err, "failed to remove relationships before set") - } - removeMultiAddressesFromPeersSlice(o, related) - if o.R == nil { - return nil - } - - for _, rel := range related { - for i, ri := range o.R.MultiAddresses { - if rel != ri { - continue - } - - ln := len(o.R.MultiAddresses) - if ln > 1 && i < ln-1 { - o.R.MultiAddresses[i] = o.R.MultiAddresses[ln-1] - } - o.R.MultiAddresses = o.R.MultiAddresses[:ln-1] - break - } - } - - return nil -} - -func removeMultiAddressesFromPeersSlice(o *Peer, related []*MultiAddress) { - for _, rel := range related { - if rel.R == nil { - continue - } - for i, ri := range rel.R.Peers { - if o.ID != ri.ID { - continue - } - - ln := len(rel.R.Peers) - if ln > 1 && i < ln-1 { - rel.R.Peers[i] = rel.R.Peers[ln-1] - } - rel.R.Peers = rel.R.Peers[:ln-1] - break - } - } -} - -// Peers retrieves all the records using an executor. -func Peers(mods ...qm.QueryMod) peerQuery { - mods = append(mods, qm.From("\"peers\"")) - q := NewQuery(mods...) - if len(queries.GetSelect(q)) == 0 { - queries.SetSelect(q, []string{"\"peers\".*"}) - } - - return peerQuery{q} -} - -// FindPeer retrieves a single record by ID with an executor. -// If selectCols is empty Find will return all columns. -func FindPeer(ctx context.Context, exec boil.ContextExecutor, iD int64, selectCols ...string) (*Peer, error) { - peerObj := &Peer{} - - sel := "*" - if len(selectCols) > 0 { - sel = strings.Join(strmangle.IdentQuoteSlice(dialect.LQ, dialect.RQ, selectCols), ",") - } - query := fmt.Sprintf( - "select %s from \"peers\" where \"id\"=$1", sel, - ) - - q := queries.Raw(query, iD) - - err := q.Bind(ctx, exec, peerObj) - if err != nil { - if errors.Is(err, sql.ErrNoRows) { - return nil, sql.ErrNoRows - } - return nil, errors.Wrap(err, "models: unable to select from peers") - } - - if err = peerObj.doAfterSelectHooks(ctx, exec); err != nil { - return peerObj, err - } - - return peerObj, nil -} - -// Insert a single record using an executor. -// See boil.Columns.InsertColumnSet documentation to understand column list inference for inserts. -func (o *Peer) Insert(ctx context.Context, exec boil.ContextExecutor, columns boil.Columns) error { - if o == nil { - return errors.New("models: no peers provided for insertion") - } - - var err error - if !boil.TimestampsAreSkipped(ctx) { - currTime := time.Now().In(boil.GetLocation()) - - if o.UpdatedAt.IsZero() { - o.UpdatedAt = currTime - } - if o.CreatedAt.IsZero() { - o.CreatedAt = currTime - } - } - - if err := o.doBeforeInsertHooks(ctx, exec); err != nil { - return err - } - - nzDefaults := queries.NonZeroDefaultSet(peerColumnsWithDefault, o) - - key := makeCacheKey(columns, nzDefaults) - peerInsertCacheMut.RLock() - cache, cached := peerInsertCache[key] - peerInsertCacheMut.RUnlock() - - if !cached { - wl, returnColumns := columns.InsertColumnSet( - peerAllColumns, - peerColumnsWithDefault, - peerColumnsWithoutDefault, - nzDefaults, - ) - wl = strmangle.SetComplement(wl, peerGeneratedColumns) - - cache.valueMapping, err = queries.BindMapping(peerType, peerMapping, wl) - if err != nil { - return err - } - cache.retMapping, err = queries.BindMapping(peerType, peerMapping, returnColumns) - if err != nil { - return err - } - if len(wl) != 0 { - cache.query = fmt.Sprintf("INSERT INTO \"peers\" (\"%s\") %%sVALUES (%s)%%s", strings.Join(wl, "\",\""), strmangle.Placeholders(dialect.UseIndexPlaceholders, len(wl), 1, 1)) - } else { - cache.query = "INSERT INTO \"peers\" %sDEFAULT VALUES%s" - } - - var queryOutput, queryReturning string - - if len(cache.retMapping) != 0 { - queryReturning = fmt.Sprintf(" RETURNING \"%s\"", strings.Join(returnColumns, "\",\"")) - } - - cache.query = fmt.Sprintf(cache.query, queryOutput, queryReturning) - } - - value := reflect.Indirect(reflect.ValueOf(o)) - vals := queries.ValuesFromMapping(value, cache.valueMapping) - - if boil.IsDebug(ctx) { - writer := boil.DebugWriterFrom(ctx) - fmt.Fprintln(writer, cache.query) - fmt.Fprintln(writer, vals) - } - - if len(cache.retMapping) != 0 { - err = exec.QueryRowContext(ctx, cache.query, vals...).Scan(queries.PtrsFromMapping(value, cache.retMapping)...) - } else { - _, err = exec.ExecContext(ctx, cache.query, vals...) - } - - if err != nil { - return errors.Wrap(err, "models: unable to insert into peers") - } - - if !cached { - peerInsertCacheMut.Lock() - peerInsertCache[key] = cache - peerInsertCacheMut.Unlock() - } - - return o.doAfterInsertHooks(ctx, exec) -} - -// Update uses an executor to update the Peer. -// See boil.Columns.UpdateColumnSet documentation to understand column list inference for updates. -// Update does not automatically update the record in case of default values. Use .Reload() to refresh the records. -func (o *Peer) Update(ctx context.Context, exec boil.ContextExecutor, columns boil.Columns) (int64, error) { - if !boil.TimestampsAreSkipped(ctx) { - currTime := time.Now().In(boil.GetLocation()) - - o.UpdatedAt = currTime - } - - var err error - if err = o.doBeforeUpdateHooks(ctx, exec); err != nil { - return 0, err - } - key := makeCacheKey(columns, nil) - peerUpdateCacheMut.RLock() - cache, cached := peerUpdateCache[key] - peerUpdateCacheMut.RUnlock() - - if !cached { - wl := columns.UpdateColumnSet( - peerAllColumns, - peerPrimaryKeyColumns, - ) - wl = strmangle.SetComplement(wl, peerGeneratedColumns) - - if !columns.IsWhitelist() { - wl = strmangle.SetComplement(wl, []string{"created_at"}) - } - if len(wl) == 0 { - return 0, errors.New("models: unable to update peers, could not build whitelist") - } - - cache.query = fmt.Sprintf("UPDATE \"peers\" SET %s WHERE %s", - strmangle.SetParamNames("\"", "\"", 1, wl), - strmangle.WhereClause("\"", "\"", len(wl)+1, peerPrimaryKeyColumns), - ) - cache.valueMapping, err = queries.BindMapping(peerType, peerMapping, append(wl, peerPrimaryKeyColumns...)) - if err != nil { - return 0, err - } - } - - values := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), cache.valueMapping) - - if boil.IsDebug(ctx) { - writer := boil.DebugWriterFrom(ctx) - fmt.Fprintln(writer, cache.query) - fmt.Fprintln(writer, values) - } - var result sql.Result - result, err = exec.ExecContext(ctx, cache.query, values...) - if err != nil { - return 0, errors.Wrap(err, "models: unable to update peers row") - } - - rowsAff, err := result.RowsAffected() - if err != nil { - return 0, errors.Wrap(err, "models: failed to get rows affected by update for peers") - } - - if !cached { - peerUpdateCacheMut.Lock() - peerUpdateCache[key] = cache - peerUpdateCacheMut.Unlock() - } - - return rowsAff, o.doAfterUpdateHooks(ctx, exec) -} - -// UpdateAll updates all rows with the specified column values. -func (q peerQuery) UpdateAll(ctx context.Context, exec boil.ContextExecutor, cols M) (int64, error) { - queries.SetUpdate(q.Query, cols) - - result, err := q.Query.ExecContext(ctx, exec) - if err != nil { - return 0, errors.Wrap(err, "models: unable to update all for peers") - } - - rowsAff, err := result.RowsAffected() - if err != nil { - return 0, errors.Wrap(err, "models: unable to retrieve rows affected for peers") - } - - return rowsAff, nil -} - -// UpdateAll updates all rows with the specified column values, using an executor. -func (o PeerSlice) UpdateAll(ctx context.Context, exec boil.ContextExecutor, cols M) (int64, error) { - ln := int64(len(o)) - if ln == 0 { - return 0, nil - } - - if len(cols) == 0 { - return 0, errors.New("models: update all requires at least one column argument") - } - - colNames := make([]string, len(cols)) - args := make([]interface{}, len(cols)) - - i := 0 - for name, value := range cols { - colNames[i] = name - args[i] = value - i++ - } - - // Append all of the primary key values for each column - for _, obj := range o { - pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), peerPrimaryKeyMapping) - args = append(args, pkeyArgs...) - } - - sql := fmt.Sprintf("UPDATE \"peers\" SET %s WHERE %s", - strmangle.SetParamNames("\"", "\"", 1, colNames), - strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), len(colNames)+1, peerPrimaryKeyColumns, len(o))) - - if boil.IsDebug(ctx) { - writer := boil.DebugWriterFrom(ctx) - fmt.Fprintln(writer, sql) - fmt.Fprintln(writer, args...) - } - result, err := exec.ExecContext(ctx, sql, args...) - if err != nil { - return 0, errors.Wrap(err, "models: unable to update all in peer slice") - } - - rowsAff, err := result.RowsAffected() - if err != nil { - return 0, errors.Wrap(err, "models: unable to retrieve rows affected all in update all peer") - } - return rowsAff, nil -} - -// Upsert attempts an insert using an executor, and does an update or ignore on conflict. -// See boil.Columns documentation for how to properly use updateColumns and insertColumns. -func (o *Peer) Upsert(ctx context.Context, exec boil.ContextExecutor, updateOnConflict bool, conflictColumns []string, updateColumns, insertColumns boil.Columns) error { - if o == nil { - return errors.New("models: no peers provided for upsert") - } - if !boil.TimestampsAreSkipped(ctx) { - currTime := time.Now().In(boil.GetLocation()) - - o.UpdatedAt = currTime - if o.CreatedAt.IsZero() { - o.CreatedAt = currTime - } - } - - if err := o.doBeforeUpsertHooks(ctx, exec); err != nil { - return err - } - - nzDefaults := queries.NonZeroDefaultSet(peerColumnsWithDefault, o) - - // Build cache key in-line uglily - mysql vs psql problems - buf := strmangle.GetBuffer() - if updateOnConflict { - buf.WriteByte('t') - } else { - buf.WriteByte('f') - } - buf.WriteByte('.') - for _, c := range conflictColumns { - buf.WriteString(c) - } - buf.WriteByte('.') - buf.WriteString(strconv.Itoa(updateColumns.Kind)) - for _, c := range updateColumns.Cols { - buf.WriteString(c) - } - buf.WriteByte('.') - buf.WriteString(strconv.Itoa(insertColumns.Kind)) - for _, c := range insertColumns.Cols { - buf.WriteString(c) - } - buf.WriteByte('.') - for _, c := range nzDefaults { - buf.WriteString(c) - } - key := buf.String() - strmangle.PutBuffer(buf) - - peerUpsertCacheMut.RLock() - cache, cached := peerUpsertCache[key] - peerUpsertCacheMut.RUnlock() - - var err error - - if !cached { - insert, ret := insertColumns.InsertColumnSet( - peerAllColumns, - peerColumnsWithDefault, - peerColumnsWithoutDefault, - nzDefaults, - ) - - update := updateColumns.UpdateColumnSet( - peerAllColumns, - peerPrimaryKeyColumns, - ) - - insert = strmangle.SetComplement(insert, peerGeneratedColumns) - update = strmangle.SetComplement(update, peerGeneratedColumns) - - if updateOnConflict && len(update) == 0 { - return errors.New("models: unable to upsert peers, could not build update column list") - } - - conflict := conflictColumns - if len(conflict) == 0 { - conflict = make([]string, len(peerPrimaryKeyColumns)) - copy(conflict, peerPrimaryKeyColumns) - } - cache.query = buildUpsertQueryPostgres(dialect, "\"peers\"", updateOnConflict, ret, update, conflict, insert) - - cache.valueMapping, err = queries.BindMapping(peerType, peerMapping, insert) - if err != nil { - return err - } - if len(ret) != 0 { - cache.retMapping, err = queries.BindMapping(peerType, peerMapping, ret) - if err != nil { - return err - } - } - } - - value := reflect.Indirect(reflect.ValueOf(o)) - vals := queries.ValuesFromMapping(value, cache.valueMapping) - var returns []interface{} - if len(cache.retMapping) != 0 { - returns = queries.PtrsFromMapping(value, cache.retMapping) - } - - if boil.IsDebug(ctx) { - writer := boil.DebugWriterFrom(ctx) - fmt.Fprintln(writer, cache.query) - fmt.Fprintln(writer, vals) - } - if len(cache.retMapping) != 0 { - err = exec.QueryRowContext(ctx, cache.query, vals...).Scan(returns...) - if errors.Is(err, sql.ErrNoRows) { - err = nil // Postgres doesn't return anything when there's no update - } - } else { - _, err = exec.ExecContext(ctx, cache.query, vals...) - } - if err != nil { - return errors.Wrap(err, "models: unable to upsert peers") - } - - if !cached { - peerUpsertCacheMut.Lock() - peerUpsertCache[key] = cache - peerUpsertCacheMut.Unlock() - } - - return o.doAfterUpsertHooks(ctx, exec) -} - -// Delete deletes a single Peer record with an executor. -// Delete will match against the primary key column to find the record to delete. -func (o *Peer) Delete(ctx context.Context, exec boil.ContextExecutor) (int64, error) { - if o == nil { - return 0, errors.New("models: no Peer provided for delete") - } - - if err := o.doBeforeDeleteHooks(ctx, exec); err != nil { - return 0, err - } - - args := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), peerPrimaryKeyMapping) - sql := "DELETE FROM \"peers\" WHERE \"id\"=$1" - - if boil.IsDebug(ctx) { - writer := boil.DebugWriterFrom(ctx) - fmt.Fprintln(writer, sql) - fmt.Fprintln(writer, args...) - } - result, err := exec.ExecContext(ctx, sql, args...) - if err != nil { - return 0, errors.Wrap(err, "models: unable to delete from peers") - } - - rowsAff, err := result.RowsAffected() - if err != nil { - return 0, errors.Wrap(err, "models: failed to get rows affected by delete for peers") - } - - if err := o.doAfterDeleteHooks(ctx, exec); err != nil { - return 0, err - } - - return rowsAff, nil -} - -// DeleteAll deletes all matching rows. -func (q peerQuery) DeleteAll(ctx context.Context, exec boil.ContextExecutor) (int64, error) { - if q.Query == nil { - return 0, errors.New("models: no peerQuery provided for delete all") - } - - queries.SetDelete(q.Query) - - result, err := q.Query.ExecContext(ctx, exec) - if err != nil { - return 0, errors.Wrap(err, "models: unable to delete all from peers") - } - - rowsAff, err := result.RowsAffected() - if err != nil { - return 0, errors.Wrap(err, "models: failed to get rows affected by deleteall for peers") - } - - return rowsAff, nil -} - -// DeleteAll deletes all rows in the slice, using an executor. -func (o PeerSlice) DeleteAll(ctx context.Context, exec boil.ContextExecutor) (int64, error) { - if len(o) == 0 { - return 0, nil - } - - if len(peerBeforeDeleteHooks) != 0 { - for _, obj := range o { - if err := obj.doBeforeDeleteHooks(ctx, exec); err != nil { - return 0, err - } - } - } - - var args []interface{} - for _, obj := range o { - pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), peerPrimaryKeyMapping) - args = append(args, pkeyArgs...) - } - - sql := "DELETE FROM \"peers\" WHERE " + - strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 1, peerPrimaryKeyColumns, len(o)) - - if boil.IsDebug(ctx) { - writer := boil.DebugWriterFrom(ctx) - fmt.Fprintln(writer, sql) - fmt.Fprintln(writer, args) - } - result, err := exec.ExecContext(ctx, sql, args...) - if err != nil { - return 0, errors.Wrap(err, "models: unable to delete all from peer slice") - } - - rowsAff, err := result.RowsAffected() - if err != nil { - return 0, errors.Wrap(err, "models: failed to get rows affected by deleteall for peers") - } - - if len(peerAfterDeleteHooks) != 0 { - for _, obj := range o { - if err := obj.doAfterDeleteHooks(ctx, exec); err != nil { - return 0, err - } - } - } - - return rowsAff, nil -} - -// Reload refetches the object from the database -// using the primary keys with an executor. -func (o *Peer) Reload(ctx context.Context, exec boil.ContextExecutor) error { - ret, err := FindPeer(ctx, exec, o.ID) - if err != nil { - return err - } - - *o = *ret - return nil -} - -// ReloadAll refetches every row with matching primary key column values -// and overwrites the original object slice with the newly updated slice. -func (o *PeerSlice) ReloadAll(ctx context.Context, exec boil.ContextExecutor) error { - if o == nil || len(*o) == 0 { - return nil - } - - slice := PeerSlice{} - var args []interface{} - for _, obj := range *o { - pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), peerPrimaryKeyMapping) - args = append(args, pkeyArgs...) - } - - sql := "SELECT \"peers\".* FROM \"peers\" WHERE " + - strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 1, peerPrimaryKeyColumns, len(*o)) - - q := queries.Raw(sql, args...) - - err := q.Bind(ctx, exec, &slice) - if err != nil { - return errors.Wrap(err, "models: unable to reload all in PeerSlice") - } - - *o = slice - - return nil -} - -// PeerExists checks if the Peer row exists. -func PeerExists(ctx context.Context, exec boil.ContextExecutor, iD int64) (bool, error) { - var exists bool - sql := "select exists(select 1 from \"peers\" where \"id\"=$1 limit 1)" - - if boil.IsDebug(ctx) { - writer := boil.DebugWriterFrom(ctx) - fmt.Fprintln(writer, sql) - fmt.Fprintln(writer, iD) - } - row := exec.QueryRowContext(ctx, sql, iD) - - err := row.Scan(&exists) - if err != nil { - return false, errors.Wrap(err, "models: unable to check if peers exists") - } - - return exists, nil -} diff --git a/db/models/protocols.go b/db/models/protocols.go deleted file mode 100644 index 2fd9780..0000000 --- a/db/models/protocols.go +++ /dev/null @@ -1,918 +0,0 @@ -// Code generated by SQLBoiler 4.13.0 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT. -// This file is meant to be re-generated in place and/or deleted at any time. - -package models - -import ( - "context" - "database/sql" - "fmt" - "reflect" - "strconv" - "strings" - "sync" - "time" - - "github.com/friendsofgo/errors" - "github.com/volatiletech/sqlboiler/v4/boil" - "github.com/volatiletech/sqlboiler/v4/queries" - "github.com/volatiletech/sqlboiler/v4/queries/qm" - "github.com/volatiletech/sqlboiler/v4/queries/qmhelper" - "github.com/volatiletech/strmangle" -) - -// Protocol is an object representing the database table. -type Protocol struct { // A unique id that identifies a agent version. - ID int `boil:"id" json:"id" toml:"id" yaml:"id"` - // Timestamp of when this protocol was seen the last time. - CreatedAt time.Time `boil:"created_at" json:"created_at" toml:"created_at" yaml:"created_at"` - // The full protocol string. - Protocol string `boil:"protocol" json:"protocol" toml:"protocol" yaml:"protocol"` - - R *protocolR `boil:"-" json:"-" toml:"-" yaml:"-"` - L protocolL `boil:"-" json:"-" toml:"-" yaml:"-"` -} - -var ProtocolColumns = struct { - ID string - CreatedAt string - Protocol string -}{ - ID: "id", - CreatedAt: "created_at", - Protocol: "protocol", -} - -var ProtocolTableColumns = struct { - ID string - CreatedAt string - Protocol string -}{ - ID: "protocols.id", - CreatedAt: "protocols.created_at", - Protocol: "protocols.protocol", -} - -// Generated where - -var ProtocolWhere = struct { - ID whereHelperint - CreatedAt whereHelpertime_Time - Protocol whereHelperstring -}{ - ID: whereHelperint{field: "\"protocols\".\"id\""}, - CreatedAt: whereHelpertime_Time{field: "\"protocols\".\"created_at\""}, - Protocol: whereHelperstring{field: "\"protocols\".\"protocol\""}, -} - -// ProtocolRels is where relationship names are stored. -var ProtocolRels = struct { -}{} - -// protocolR is where relationships are stored. -type protocolR struct { -} - -// NewStruct creates a new relationship struct -func (*protocolR) NewStruct() *protocolR { - return &protocolR{} -} - -// protocolL is where Load methods for each relationship are stored. -type protocolL struct{} - -var ( - protocolAllColumns = []string{"id", "created_at", "protocol"} - protocolColumnsWithoutDefault = []string{"created_at", "protocol"} - protocolColumnsWithDefault = []string{"id"} - protocolPrimaryKeyColumns = []string{"id"} - protocolGeneratedColumns = []string{"id"} -) - -type ( - // ProtocolSlice is an alias for a slice of pointers to Protocol. - // This should almost always be used instead of []Protocol. - ProtocolSlice []*Protocol - // ProtocolHook is the signature for custom Protocol hook methods - ProtocolHook func(context.Context, boil.ContextExecutor, *Protocol) error - - protocolQuery struct { - *queries.Query - } -) - -// Cache for insert, update and upsert -var ( - protocolType = reflect.TypeOf(&Protocol{}) - protocolMapping = queries.MakeStructMapping(protocolType) - protocolPrimaryKeyMapping, _ = queries.BindMapping(protocolType, protocolMapping, protocolPrimaryKeyColumns) - protocolInsertCacheMut sync.RWMutex - protocolInsertCache = make(map[string]insertCache) - protocolUpdateCacheMut sync.RWMutex - protocolUpdateCache = make(map[string]updateCache) - protocolUpsertCacheMut sync.RWMutex - protocolUpsertCache = make(map[string]insertCache) -) - -var ( - // Force time package dependency for automated UpdatedAt/CreatedAt. - _ = time.Second - // Force qmhelper dependency for where clause generation (which doesn't - // always happen) - _ = qmhelper.Where -) - -var protocolAfterSelectHooks []ProtocolHook - -var protocolBeforeInsertHooks []ProtocolHook -var protocolAfterInsertHooks []ProtocolHook - -var protocolBeforeUpdateHooks []ProtocolHook -var protocolAfterUpdateHooks []ProtocolHook - -var protocolBeforeDeleteHooks []ProtocolHook -var protocolAfterDeleteHooks []ProtocolHook - -var protocolBeforeUpsertHooks []ProtocolHook -var protocolAfterUpsertHooks []ProtocolHook - -// doAfterSelectHooks executes all "after Select" hooks. -func (o *Protocol) doAfterSelectHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range protocolAfterSelectHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// doBeforeInsertHooks executes all "before insert" hooks. -func (o *Protocol) doBeforeInsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range protocolBeforeInsertHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// doAfterInsertHooks executes all "after Insert" hooks. -func (o *Protocol) doAfterInsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range protocolAfterInsertHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// doBeforeUpdateHooks executes all "before Update" hooks. -func (o *Protocol) doBeforeUpdateHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range protocolBeforeUpdateHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// doAfterUpdateHooks executes all "after Update" hooks. -func (o *Protocol) doAfterUpdateHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range protocolAfterUpdateHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// doBeforeDeleteHooks executes all "before Delete" hooks. -func (o *Protocol) doBeforeDeleteHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range protocolBeforeDeleteHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// doAfterDeleteHooks executes all "after Delete" hooks. -func (o *Protocol) doAfterDeleteHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range protocolAfterDeleteHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// doBeforeUpsertHooks executes all "before Upsert" hooks. -func (o *Protocol) doBeforeUpsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range protocolBeforeUpsertHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// doAfterUpsertHooks executes all "after Upsert" hooks. -func (o *Protocol) doAfterUpsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range protocolAfterUpsertHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// AddProtocolHook registers your hook function for all future operations. -func AddProtocolHook(hookPoint boil.HookPoint, protocolHook ProtocolHook) { - switch hookPoint { - case boil.AfterSelectHook: - protocolAfterSelectHooks = append(protocolAfterSelectHooks, protocolHook) - case boil.BeforeInsertHook: - protocolBeforeInsertHooks = append(protocolBeforeInsertHooks, protocolHook) - case boil.AfterInsertHook: - protocolAfterInsertHooks = append(protocolAfterInsertHooks, protocolHook) - case boil.BeforeUpdateHook: - protocolBeforeUpdateHooks = append(protocolBeforeUpdateHooks, protocolHook) - case boil.AfterUpdateHook: - protocolAfterUpdateHooks = append(protocolAfterUpdateHooks, protocolHook) - case boil.BeforeDeleteHook: - protocolBeforeDeleteHooks = append(protocolBeforeDeleteHooks, protocolHook) - case boil.AfterDeleteHook: - protocolAfterDeleteHooks = append(protocolAfterDeleteHooks, protocolHook) - case boil.BeforeUpsertHook: - protocolBeforeUpsertHooks = append(protocolBeforeUpsertHooks, protocolHook) - case boil.AfterUpsertHook: - protocolAfterUpsertHooks = append(protocolAfterUpsertHooks, protocolHook) - } -} - -// One returns a single protocol record from the query. -func (q protocolQuery) One(ctx context.Context, exec boil.ContextExecutor) (*Protocol, error) { - o := &Protocol{} - - queries.SetLimit(q.Query, 1) - - err := q.Bind(ctx, exec, o) - if err != nil { - if errors.Is(err, sql.ErrNoRows) { - return nil, sql.ErrNoRows - } - return nil, errors.Wrap(err, "models: failed to execute a one query for protocols") - } - - if err := o.doAfterSelectHooks(ctx, exec); err != nil { - return o, err - } - - return o, nil -} - -// All returns all Protocol records from the query. -func (q protocolQuery) All(ctx context.Context, exec boil.ContextExecutor) (ProtocolSlice, error) { - var o []*Protocol - - err := q.Bind(ctx, exec, &o) - if err != nil { - return nil, errors.Wrap(err, "models: failed to assign all query results to Protocol slice") - } - - if len(protocolAfterSelectHooks) != 0 { - for _, obj := range o { - if err := obj.doAfterSelectHooks(ctx, exec); err != nil { - return o, err - } - } - } - - return o, nil -} - -// Count returns the count of all Protocol records in the query. -func (q protocolQuery) Count(ctx context.Context, exec boil.ContextExecutor) (int64, error) { - var count int64 - - queries.SetSelect(q.Query, nil) - queries.SetCount(q.Query) - - err := q.Query.QueryRowContext(ctx, exec).Scan(&count) - if err != nil { - return 0, errors.Wrap(err, "models: failed to count protocols rows") - } - - return count, nil -} - -// Exists checks if the row exists in the table. -func (q protocolQuery) Exists(ctx context.Context, exec boil.ContextExecutor) (bool, error) { - var count int64 - - queries.SetSelect(q.Query, nil) - queries.SetCount(q.Query) - queries.SetLimit(q.Query, 1) - - err := q.Query.QueryRowContext(ctx, exec).Scan(&count) - if err != nil { - return false, errors.Wrap(err, "models: failed to check if protocols exists") - } - - return count > 0, nil -} - -// Protocols retrieves all the records using an executor. -func Protocols(mods ...qm.QueryMod) protocolQuery { - mods = append(mods, qm.From("\"protocols\"")) - q := NewQuery(mods...) - if len(queries.GetSelect(q)) == 0 { - queries.SetSelect(q, []string{"\"protocols\".*"}) - } - - return protocolQuery{q} -} - -// FindProtocol retrieves a single record by ID with an executor. -// If selectCols is empty Find will return all columns. -func FindProtocol(ctx context.Context, exec boil.ContextExecutor, iD int, selectCols ...string) (*Protocol, error) { - protocolObj := &Protocol{} - - sel := "*" - if len(selectCols) > 0 { - sel = strings.Join(strmangle.IdentQuoteSlice(dialect.LQ, dialect.RQ, selectCols), ",") - } - query := fmt.Sprintf( - "select %s from \"protocols\" where \"id\"=$1", sel, - ) - - q := queries.Raw(query, iD) - - err := q.Bind(ctx, exec, protocolObj) - if err != nil { - if errors.Is(err, sql.ErrNoRows) { - return nil, sql.ErrNoRows - } - return nil, errors.Wrap(err, "models: unable to select from protocols") - } - - if err = protocolObj.doAfterSelectHooks(ctx, exec); err != nil { - return protocolObj, err - } - - return protocolObj, nil -} - -// Insert a single record using an executor. -// See boil.Columns.InsertColumnSet documentation to understand column list inference for inserts. -func (o *Protocol) Insert(ctx context.Context, exec boil.ContextExecutor, columns boil.Columns) error { - if o == nil { - return errors.New("models: no protocols provided for insertion") - } - - var err error - if !boil.TimestampsAreSkipped(ctx) { - currTime := time.Now().In(boil.GetLocation()) - - if o.CreatedAt.IsZero() { - o.CreatedAt = currTime - } - } - - if err := o.doBeforeInsertHooks(ctx, exec); err != nil { - return err - } - - nzDefaults := queries.NonZeroDefaultSet(protocolColumnsWithDefault, o) - - key := makeCacheKey(columns, nzDefaults) - protocolInsertCacheMut.RLock() - cache, cached := protocolInsertCache[key] - protocolInsertCacheMut.RUnlock() - - if !cached { - wl, returnColumns := columns.InsertColumnSet( - protocolAllColumns, - protocolColumnsWithDefault, - protocolColumnsWithoutDefault, - nzDefaults, - ) - wl = strmangle.SetComplement(wl, protocolGeneratedColumns) - - cache.valueMapping, err = queries.BindMapping(protocolType, protocolMapping, wl) - if err != nil { - return err - } - cache.retMapping, err = queries.BindMapping(protocolType, protocolMapping, returnColumns) - if err != nil { - return err - } - if len(wl) != 0 { - cache.query = fmt.Sprintf("INSERT INTO \"protocols\" (\"%s\") %%sVALUES (%s)%%s", strings.Join(wl, "\",\""), strmangle.Placeholders(dialect.UseIndexPlaceholders, len(wl), 1, 1)) - } else { - cache.query = "INSERT INTO \"protocols\" %sDEFAULT VALUES%s" - } - - var queryOutput, queryReturning string - - if len(cache.retMapping) != 0 { - queryReturning = fmt.Sprintf(" RETURNING \"%s\"", strings.Join(returnColumns, "\",\"")) - } - - cache.query = fmt.Sprintf(cache.query, queryOutput, queryReturning) - } - - value := reflect.Indirect(reflect.ValueOf(o)) - vals := queries.ValuesFromMapping(value, cache.valueMapping) - - if boil.IsDebug(ctx) { - writer := boil.DebugWriterFrom(ctx) - fmt.Fprintln(writer, cache.query) - fmt.Fprintln(writer, vals) - } - - if len(cache.retMapping) != 0 { - err = exec.QueryRowContext(ctx, cache.query, vals...).Scan(queries.PtrsFromMapping(value, cache.retMapping)...) - } else { - _, err = exec.ExecContext(ctx, cache.query, vals...) - } - - if err != nil { - return errors.Wrap(err, "models: unable to insert into protocols") - } - - if !cached { - protocolInsertCacheMut.Lock() - protocolInsertCache[key] = cache - protocolInsertCacheMut.Unlock() - } - - return o.doAfterInsertHooks(ctx, exec) -} - -// Update uses an executor to update the Protocol. -// See boil.Columns.UpdateColumnSet documentation to understand column list inference for updates. -// Update does not automatically update the record in case of default values. Use .Reload() to refresh the records. -func (o *Protocol) Update(ctx context.Context, exec boil.ContextExecutor, columns boil.Columns) (int64, error) { - var err error - if err = o.doBeforeUpdateHooks(ctx, exec); err != nil { - return 0, err - } - key := makeCacheKey(columns, nil) - protocolUpdateCacheMut.RLock() - cache, cached := protocolUpdateCache[key] - protocolUpdateCacheMut.RUnlock() - - if !cached { - wl := columns.UpdateColumnSet( - protocolAllColumns, - protocolPrimaryKeyColumns, - ) - wl = strmangle.SetComplement(wl, protocolGeneratedColumns) - - if !columns.IsWhitelist() { - wl = strmangle.SetComplement(wl, []string{"created_at"}) - } - if len(wl) == 0 { - return 0, errors.New("models: unable to update protocols, could not build whitelist") - } - - cache.query = fmt.Sprintf("UPDATE \"protocols\" SET %s WHERE %s", - strmangle.SetParamNames("\"", "\"", 1, wl), - strmangle.WhereClause("\"", "\"", len(wl)+1, protocolPrimaryKeyColumns), - ) - cache.valueMapping, err = queries.BindMapping(protocolType, protocolMapping, append(wl, protocolPrimaryKeyColumns...)) - if err != nil { - return 0, err - } - } - - values := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), cache.valueMapping) - - if boil.IsDebug(ctx) { - writer := boil.DebugWriterFrom(ctx) - fmt.Fprintln(writer, cache.query) - fmt.Fprintln(writer, values) - } - var result sql.Result - result, err = exec.ExecContext(ctx, cache.query, values...) - if err != nil { - return 0, errors.Wrap(err, "models: unable to update protocols row") - } - - rowsAff, err := result.RowsAffected() - if err != nil { - return 0, errors.Wrap(err, "models: failed to get rows affected by update for protocols") - } - - if !cached { - protocolUpdateCacheMut.Lock() - protocolUpdateCache[key] = cache - protocolUpdateCacheMut.Unlock() - } - - return rowsAff, o.doAfterUpdateHooks(ctx, exec) -} - -// UpdateAll updates all rows with the specified column values. -func (q protocolQuery) UpdateAll(ctx context.Context, exec boil.ContextExecutor, cols M) (int64, error) { - queries.SetUpdate(q.Query, cols) - - result, err := q.Query.ExecContext(ctx, exec) - if err != nil { - return 0, errors.Wrap(err, "models: unable to update all for protocols") - } - - rowsAff, err := result.RowsAffected() - if err != nil { - return 0, errors.Wrap(err, "models: unable to retrieve rows affected for protocols") - } - - return rowsAff, nil -} - -// UpdateAll updates all rows with the specified column values, using an executor. -func (o ProtocolSlice) UpdateAll(ctx context.Context, exec boil.ContextExecutor, cols M) (int64, error) { - ln := int64(len(o)) - if ln == 0 { - return 0, nil - } - - if len(cols) == 0 { - return 0, errors.New("models: update all requires at least one column argument") - } - - colNames := make([]string, len(cols)) - args := make([]interface{}, len(cols)) - - i := 0 - for name, value := range cols { - colNames[i] = name - args[i] = value - i++ - } - - // Append all of the primary key values for each column - for _, obj := range o { - pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), protocolPrimaryKeyMapping) - args = append(args, pkeyArgs...) - } - - sql := fmt.Sprintf("UPDATE \"protocols\" SET %s WHERE %s", - strmangle.SetParamNames("\"", "\"", 1, colNames), - strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), len(colNames)+1, protocolPrimaryKeyColumns, len(o))) - - if boil.IsDebug(ctx) { - writer := boil.DebugWriterFrom(ctx) - fmt.Fprintln(writer, sql) - fmt.Fprintln(writer, args...) - } - result, err := exec.ExecContext(ctx, sql, args...) - if err != nil { - return 0, errors.Wrap(err, "models: unable to update all in protocol slice") - } - - rowsAff, err := result.RowsAffected() - if err != nil { - return 0, errors.Wrap(err, "models: unable to retrieve rows affected all in update all protocol") - } - return rowsAff, nil -} - -// Upsert attempts an insert using an executor, and does an update or ignore on conflict. -// See boil.Columns documentation for how to properly use updateColumns and insertColumns. -func (o *Protocol) Upsert(ctx context.Context, exec boil.ContextExecutor, updateOnConflict bool, conflictColumns []string, updateColumns, insertColumns boil.Columns) error { - if o == nil { - return errors.New("models: no protocols provided for upsert") - } - if !boil.TimestampsAreSkipped(ctx) { - currTime := time.Now().In(boil.GetLocation()) - - if o.CreatedAt.IsZero() { - o.CreatedAt = currTime - } - } - - if err := o.doBeforeUpsertHooks(ctx, exec); err != nil { - return err - } - - nzDefaults := queries.NonZeroDefaultSet(protocolColumnsWithDefault, o) - - // Build cache key in-line uglily - mysql vs psql problems - buf := strmangle.GetBuffer() - if updateOnConflict { - buf.WriteByte('t') - } else { - buf.WriteByte('f') - } - buf.WriteByte('.') - for _, c := range conflictColumns { - buf.WriteString(c) - } - buf.WriteByte('.') - buf.WriteString(strconv.Itoa(updateColumns.Kind)) - for _, c := range updateColumns.Cols { - buf.WriteString(c) - } - buf.WriteByte('.') - buf.WriteString(strconv.Itoa(insertColumns.Kind)) - for _, c := range insertColumns.Cols { - buf.WriteString(c) - } - buf.WriteByte('.') - for _, c := range nzDefaults { - buf.WriteString(c) - } - key := buf.String() - strmangle.PutBuffer(buf) - - protocolUpsertCacheMut.RLock() - cache, cached := protocolUpsertCache[key] - protocolUpsertCacheMut.RUnlock() - - var err error - - if !cached { - insert, ret := insertColumns.InsertColumnSet( - protocolAllColumns, - protocolColumnsWithDefault, - protocolColumnsWithoutDefault, - nzDefaults, - ) - - update := updateColumns.UpdateColumnSet( - protocolAllColumns, - protocolPrimaryKeyColumns, - ) - - insert = strmangle.SetComplement(insert, protocolGeneratedColumns) - update = strmangle.SetComplement(update, protocolGeneratedColumns) - - if updateOnConflict && len(update) == 0 { - return errors.New("models: unable to upsert protocols, could not build update column list") - } - - conflict := conflictColumns - if len(conflict) == 0 { - conflict = make([]string, len(protocolPrimaryKeyColumns)) - copy(conflict, protocolPrimaryKeyColumns) - } - cache.query = buildUpsertQueryPostgres(dialect, "\"protocols\"", updateOnConflict, ret, update, conflict, insert) - - cache.valueMapping, err = queries.BindMapping(protocolType, protocolMapping, insert) - if err != nil { - return err - } - if len(ret) != 0 { - cache.retMapping, err = queries.BindMapping(protocolType, protocolMapping, ret) - if err != nil { - return err - } - } - } - - value := reflect.Indirect(reflect.ValueOf(o)) - vals := queries.ValuesFromMapping(value, cache.valueMapping) - var returns []interface{} - if len(cache.retMapping) != 0 { - returns = queries.PtrsFromMapping(value, cache.retMapping) - } - - if boil.IsDebug(ctx) { - writer := boil.DebugWriterFrom(ctx) - fmt.Fprintln(writer, cache.query) - fmt.Fprintln(writer, vals) - } - if len(cache.retMapping) != 0 { - err = exec.QueryRowContext(ctx, cache.query, vals...).Scan(returns...) - if errors.Is(err, sql.ErrNoRows) { - err = nil // Postgres doesn't return anything when there's no update - } - } else { - _, err = exec.ExecContext(ctx, cache.query, vals...) - } - if err != nil { - return errors.Wrap(err, "models: unable to upsert protocols") - } - - if !cached { - protocolUpsertCacheMut.Lock() - protocolUpsertCache[key] = cache - protocolUpsertCacheMut.Unlock() - } - - return o.doAfterUpsertHooks(ctx, exec) -} - -// Delete deletes a single Protocol record with an executor. -// Delete will match against the primary key column to find the record to delete. -func (o *Protocol) Delete(ctx context.Context, exec boil.ContextExecutor) (int64, error) { - if o == nil { - return 0, errors.New("models: no Protocol provided for delete") - } - - if err := o.doBeforeDeleteHooks(ctx, exec); err != nil { - return 0, err - } - - args := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), protocolPrimaryKeyMapping) - sql := "DELETE FROM \"protocols\" WHERE \"id\"=$1" - - if boil.IsDebug(ctx) { - writer := boil.DebugWriterFrom(ctx) - fmt.Fprintln(writer, sql) - fmt.Fprintln(writer, args...) - } - result, err := exec.ExecContext(ctx, sql, args...) - if err != nil { - return 0, errors.Wrap(err, "models: unable to delete from protocols") - } - - rowsAff, err := result.RowsAffected() - if err != nil { - return 0, errors.Wrap(err, "models: failed to get rows affected by delete for protocols") - } - - if err := o.doAfterDeleteHooks(ctx, exec); err != nil { - return 0, err - } - - return rowsAff, nil -} - -// DeleteAll deletes all matching rows. -func (q protocolQuery) DeleteAll(ctx context.Context, exec boil.ContextExecutor) (int64, error) { - if q.Query == nil { - return 0, errors.New("models: no protocolQuery provided for delete all") - } - - queries.SetDelete(q.Query) - - result, err := q.Query.ExecContext(ctx, exec) - if err != nil { - return 0, errors.Wrap(err, "models: unable to delete all from protocols") - } - - rowsAff, err := result.RowsAffected() - if err != nil { - return 0, errors.Wrap(err, "models: failed to get rows affected by deleteall for protocols") - } - - return rowsAff, nil -} - -// DeleteAll deletes all rows in the slice, using an executor. -func (o ProtocolSlice) DeleteAll(ctx context.Context, exec boil.ContextExecutor) (int64, error) { - if len(o) == 0 { - return 0, nil - } - - if len(protocolBeforeDeleteHooks) != 0 { - for _, obj := range o { - if err := obj.doBeforeDeleteHooks(ctx, exec); err != nil { - return 0, err - } - } - } - - var args []interface{} - for _, obj := range o { - pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), protocolPrimaryKeyMapping) - args = append(args, pkeyArgs...) - } - - sql := "DELETE FROM \"protocols\" WHERE " + - strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 1, protocolPrimaryKeyColumns, len(o)) - - if boil.IsDebug(ctx) { - writer := boil.DebugWriterFrom(ctx) - fmt.Fprintln(writer, sql) - fmt.Fprintln(writer, args) - } - result, err := exec.ExecContext(ctx, sql, args...) - if err != nil { - return 0, errors.Wrap(err, "models: unable to delete all from protocol slice") - } - - rowsAff, err := result.RowsAffected() - if err != nil { - return 0, errors.Wrap(err, "models: failed to get rows affected by deleteall for protocols") - } - - if len(protocolAfterDeleteHooks) != 0 { - for _, obj := range o { - if err := obj.doAfterDeleteHooks(ctx, exec); err != nil { - return 0, err - } - } - } - - return rowsAff, nil -} - -// Reload refetches the object from the database -// using the primary keys with an executor. -func (o *Protocol) Reload(ctx context.Context, exec boil.ContextExecutor) error { - ret, err := FindProtocol(ctx, exec, o.ID) - if err != nil { - return err - } - - *o = *ret - return nil -} - -// ReloadAll refetches every row with matching primary key column values -// and overwrites the original object slice with the newly updated slice. -func (o *ProtocolSlice) ReloadAll(ctx context.Context, exec boil.ContextExecutor) error { - if o == nil || len(*o) == 0 { - return nil - } - - slice := ProtocolSlice{} - var args []interface{} - for _, obj := range *o { - pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), protocolPrimaryKeyMapping) - args = append(args, pkeyArgs...) - } - - sql := "SELECT \"protocols\".* FROM \"protocols\" WHERE " + - strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 1, protocolPrimaryKeyColumns, len(*o)) - - q := queries.Raw(sql, args...) - - err := q.Bind(ctx, exec, &slice) - if err != nil { - return errors.Wrap(err, "models: unable to reload all in ProtocolSlice") - } - - *o = slice - - return nil -} - -// ProtocolExists checks if the Protocol row exists. -func ProtocolExists(ctx context.Context, exec boil.ContextExecutor, iD int) (bool, error) { - var exists bool - sql := "select exists(select 1 from \"protocols\" where \"id\"=$1 limit 1)" - - if boil.IsDebug(ctx) { - writer := boil.DebugWriterFrom(ctx) - fmt.Fprintln(writer, sql) - fmt.Fprintln(writer, iD) - } - row := exec.QueryRowContext(ctx, sql, iD) - - err := row.Scan(&exists) - if err != nil { - return false, errors.Wrap(err, "models: unable to check if protocols exists") - } - - return exists, nil -} diff --git a/db/models/protocols_sets.go b/db/models/protocols_sets.go deleted file mode 100644 index 7b4253f..0000000 --- a/db/models/protocols_sets.go +++ /dev/null @@ -1,1201 +0,0 @@ -// Code generated by SQLBoiler 4.13.0 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT. -// This file is meant to be re-generated in place and/or deleted at any time. - -package models - -import ( - "context" - "database/sql" - "fmt" - "reflect" - "strconv" - "strings" - "sync" - "time" - - "github.com/friendsofgo/errors" - "github.com/volatiletech/sqlboiler/v4/boil" - "github.com/volatiletech/sqlboiler/v4/queries" - "github.com/volatiletech/sqlboiler/v4/queries/qm" - "github.com/volatiletech/sqlboiler/v4/queries/qmhelper" - "github.com/volatiletech/sqlboiler/v4/types" - "github.com/volatiletech/strmangle" -) - -// ProtocolsSet is an object representing the database table. -type ProtocolsSet struct { // An internal unique id that identifies a unique set of protocols. - ID int `boil:"id" json:"id" toml:"id" yaml:"id"` - // The protocol IDs of this protocol set. The IDs reference the protocols table (no foreign key checks). - ProtocolIds types.Int64Array `boil:"protocol_ids" json:"protocol_ids" toml:"protocol_ids" yaml:"protocol_ids"` - // The hash digest of the sorted protocol ids to allow a unique constraint. - Hash []byte `boil:"hash" json:"hash" toml:"hash" yaml:"hash"` - - R *protocolsSetR `boil:"-" json:"-" toml:"-" yaml:"-"` - L protocolsSetL `boil:"-" json:"-" toml:"-" yaml:"-"` -} - -var ProtocolsSetColumns = struct { - ID string - ProtocolIds string - Hash string -}{ - ID: "id", - ProtocolIds: "protocol_ids", - Hash: "hash", -} - -var ProtocolsSetTableColumns = struct { - ID string - ProtocolIds string - Hash string -}{ - ID: "protocols_sets.id", - ProtocolIds: "protocols_sets.protocol_ids", - Hash: "protocols_sets.hash", -} - -// Generated where - -type whereHelpertypes_Int64Array struct{ field string } - -func (w whereHelpertypes_Int64Array) EQ(x types.Int64Array) qm.QueryMod { - return qmhelper.Where(w.field, qmhelper.EQ, x) -} -func (w whereHelpertypes_Int64Array) NEQ(x types.Int64Array) qm.QueryMod { - return qmhelper.Where(w.field, qmhelper.NEQ, x) -} -func (w whereHelpertypes_Int64Array) LT(x types.Int64Array) qm.QueryMod { - return qmhelper.Where(w.field, qmhelper.LT, x) -} -func (w whereHelpertypes_Int64Array) LTE(x types.Int64Array) qm.QueryMod { - return qmhelper.Where(w.field, qmhelper.LTE, x) -} -func (w whereHelpertypes_Int64Array) GT(x types.Int64Array) qm.QueryMod { - return qmhelper.Where(w.field, qmhelper.GT, x) -} -func (w whereHelpertypes_Int64Array) GTE(x types.Int64Array) qm.QueryMod { - return qmhelper.Where(w.field, qmhelper.GTE, x) -} - -type whereHelper__byte struct{ field string } - -func (w whereHelper__byte) EQ(x []byte) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.EQ, x) } -func (w whereHelper__byte) NEQ(x []byte) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.NEQ, x) } -func (w whereHelper__byte) LT(x []byte) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.LT, x) } -func (w whereHelper__byte) LTE(x []byte) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.LTE, x) } -func (w whereHelper__byte) GT(x []byte) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.GT, x) } -func (w whereHelper__byte) GTE(x []byte) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.GTE, x) } - -var ProtocolsSetWhere = struct { - ID whereHelperint - ProtocolIds whereHelpertypes_Int64Array - Hash whereHelper__byte -}{ - ID: whereHelperint{field: "\"protocols_sets\".\"id\""}, - ProtocolIds: whereHelpertypes_Int64Array{field: "\"protocols_sets\".\"protocol_ids\""}, - Hash: whereHelper__byte{field: "\"protocols_sets\".\"hash\""}, -} - -// ProtocolsSetRels is where relationship names are stored. -var ProtocolsSetRels = struct { - Peers string -}{ - Peers: "Peers", -} - -// protocolsSetR is where relationships are stored. -type protocolsSetR struct { - Peers PeerSlice `boil:"Peers" json:"Peers" toml:"Peers" yaml:"Peers"` -} - -// NewStruct creates a new relationship struct -func (*protocolsSetR) NewStruct() *protocolsSetR { - return &protocolsSetR{} -} - -func (r *protocolsSetR) GetPeers() PeerSlice { - if r == nil { - return nil - } - return r.Peers -} - -// protocolsSetL is where Load methods for each relationship are stored. -type protocolsSetL struct{} - -var ( - protocolsSetAllColumns = []string{"id", "protocol_ids", "hash"} - protocolsSetColumnsWithoutDefault = []string{"protocol_ids", "hash"} - protocolsSetColumnsWithDefault = []string{"id"} - protocolsSetPrimaryKeyColumns = []string{"id"} - protocolsSetGeneratedColumns = []string{"id"} -) - -type ( - // ProtocolsSetSlice is an alias for a slice of pointers to ProtocolsSet. - // This should almost always be used instead of []ProtocolsSet. - ProtocolsSetSlice []*ProtocolsSet - // ProtocolsSetHook is the signature for custom ProtocolsSet hook methods - ProtocolsSetHook func(context.Context, boil.ContextExecutor, *ProtocolsSet) error - - protocolsSetQuery struct { - *queries.Query - } -) - -// Cache for insert, update and upsert -var ( - protocolsSetType = reflect.TypeOf(&ProtocolsSet{}) - protocolsSetMapping = queries.MakeStructMapping(protocolsSetType) - protocolsSetPrimaryKeyMapping, _ = queries.BindMapping(protocolsSetType, protocolsSetMapping, protocolsSetPrimaryKeyColumns) - protocolsSetInsertCacheMut sync.RWMutex - protocolsSetInsertCache = make(map[string]insertCache) - protocolsSetUpdateCacheMut sync.RWMutex - protocolsSetUpdateCache = make(map[string]updateCache) - protocolsSetUpsertCacheMut sync.RWMutex - protocolsSetUpsertCache = make(map[string]insertCache) -) - -var ( - // Force time package dependency for automated UpdatedAt/CreatedAt. - _ = time.Second - // Force qmhelper dependency for where clause generation (which doesn't - // always happen) - _ = qmhelper.Where -) - -var protocolsSetAfterSelectHooks []ProtocolsSetHook - -var protocolsSetBeforeInsertHooks []ProtocolsSetHook -var protocolsSetAfterInsertHooks []ProtocolsSetHook - -var protocolsSetBeforeUpdateHooks []ProtocolsSetHook -var protocolsSetAfterUpdateHooks []ProtocolsSetHook - -var protocolsSetBeforeDeleteHooks []ProtocolsSetHook -var protocolsSetAfterDeleteHooks []ProtocolsSetHook - -var protocolsSetBeforeUpsertHooks []ProtocolsSetHook -var protocolsSetAfterUpsertHooks []ProtocolsSetHook - -// doAfterSelectHooks executes all "after Select" hooks. -func (o *ProtocolsSet) doAfterSelectHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range protocolsSetAfterSelectHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// doBeforeInsertHooks executes all "before insert" hooks. -func (o *ProtocolsSet) doBeforeInsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range protocolsSetBeforeInsertHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// doAfterInsertHooks executes all "after Insert" hooks. -func (o *ProtocolsSet) doAfterInsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range protocolsSetAfterInsertHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// doBeforeUpdateHooks executes all "before Update" hooks. -func (o *ProtocolsSet) doBeforeUpdateHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range protocolsSetBeforeUpdateHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// doAfterUpdateHooks executes all "after Update" hooks. -func (o *ProtocolsSet) doAfterUpdateHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range protocolsSetAfterUpdateHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// doBeforeDeleteHooks executes all "before Delete" hooks. -func (o *ProtocolsSet) doBeforeDeleteHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range protocolsSetBeforeDeleteHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// doAfterDeleteHooks executes all "after Delete" hooks. -func (o *ProtocolsSet) doAfterDeleteHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range protocolsSetAfterDeleteHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// doBeforeUpsertHooks executes all "before Upsert" hooks. -func (o *ProtocolsSet) doBeforeUpsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range protocolsSetBeforeUpsertHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// doAfterUpsertHooks executes all "after Upsert" hooks. -func (o *ProtocolsSet) doAfterUpsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range protocolsSetAfterUpsertHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// AddProtocolsSetHook registers your hook function for all future operations. -func AddProtocolsSetHook(hookPoint boil.HookPoint, protocolsSetHook ProtocolsSetHook) { - switch hookPoint { - case boil.AfterSelectHook: - protocolsSetAfterSelectHooks = append(protocolsSetAfterSelectHooks, protocolsSetHook) - case boil.BeforeInsertHook: - protocolsSetBeforeInsertHooks = append(protocolsSetBeforeInsertHooks, protocolsSetHook) - case boil.AfterInsertHook: - protocolsSetAfterInsertHooks = append(protocolsSetAfterInsertHooks, protocolsSetHook) - case boil.BeforeUpdateHook: - protocolsSetBeforeUpdateHooks = append(protocolsSetBeforeUpdateHooks, protocolsSetHook) - case boil.AfterUpdateHook: - protocolsSetAfterUpdateHooks = append(protocolsSetAfterUpdateHooks, protocolsSetHook) - case boil.BeforeDeleteHook: - protocolsSetBeforeDeleteHooks = append(protocolsSetBeforeDeleteHooks, protocolsSetHook) - case boil.AfterDeleteHook: - protocolsSetAfterDeleteHooks = append(protocolsSetAfterDeleteHooks, protocolsSetHook) - case boil.BeforeUpsertHook: - protocolsSetBeforeUpsertHooks = append(protocolsSetBeforeUpsertHooks, protocolsSetHook) - case boil.AfterUpsertHook: - protocolsSetAfterUpsertHooks = append(protocolsSetAfterUpsertHooks, protocolsSetHook) - } -} - -// One returns a single protocolsSet record from the query. -func (q protocolsSetQuery) One(ctx context.Context, exec boil.ContextExecutor) (*ProtocolsSet, error) { - o := &ProtocolsSet{} - - queries.SetLimit(q.Query, 1) - - err := q.Bind(ctx, exec, o) - if err != nil { - if errors.Is(err, sql.ErrNoRows) { - return nil, sql.ErrNoRows - } - return nil, errors.Wrap(err, "models: failed to execute a one query for protocols_sets") - } - - if err := o.doAfterSelectHooks(ctx, exec); err != nil { - return o, err - } - - return o, nil -} - -// All returns all ProtocolsSet records from the query. -func (q protocolsSetQuery) All(ctx context.Context, exec boil.ContextExecutor) (ProtocolsSetSlice, error) { - var o []*ProtocolsSet - - err := q.Bind(ctx, exec, &o) - if err != nil { - return nil, errors.Wrap(err, "models: failed to assign all query results to ProtocolsSet slice") - } - - if len(protocolsSetAfterSelectHooks) != 0 { - for _, obj := range o { - if err := obj.doAfterSelectHooks(ctx, exec); err != nil { - return o, err - } - } - } - - return o, nil -} - -// Count returns the count of all ProtocolsSet records in the query. -func (q protocolsSetQuery) Count(ctx context.Context, exec boil.ContextExecutor) (int64, error) { - var count int64 - - queries.SetSelect(q.Query, nil) - queries.SetCount(q.Query) - - err := q.Query.QueryRowContext(ctx, exec).Scan(&count) - if err != nil { - return 0, errors.Wrap(err, "models: failed to count protocols_sets rows") - } - - return count, nil -} - -// Exists checks if the row exists in the table. -func (q protocolsSetQuery) Exists(ctx context.Context, exec boil.ContextExecutor) (bool, error) { - var count int64 - - queries.SetSelect(q.Query, nil) - queries.SetCount(q.Query) - queries.SetLimit(q.Query, 1) - - err := q.Query.QueryRowContext(ctx, exec).Scan(&count) - if err != nil { - return false, errors.Wrap(err, "models: failed to check if protocols_sets exists") - } - - return count > 0, nil -} - -// Peers retrieves all the peer's Peers with an executor. -func (o *ProtocolsSet) Peers(mods ...qm.QueryMod) peerQuery { - var queryMods []qm.QueryMod - if len(mods) != 0 { - queryMods = append(queryMods, mods...) - } - - queryMods = append(queryMods, - qm.Where("\"peers\".\"protocols_set_id\"=?", o.ID), - ) - - return Peers(queryMods...) -} - -// LoadPeers allows an eager lookup of values, cached into the -// loaded structs of the objects. This is for a 1-M or N-M relationship. -func (protocolsSetL) LoadPeers(ctx context.Context, e boil.ContextExecutor, singular bool, maybeProtocolsSet interface{}, mods queries.Applicator) error { - var slice []*ProtocolsSet - var object *ProtocolsSet - - if singular { - var ok bool - object, ok = maybeProtocolsSet.(*ProtocolsSet) - if !ok { - object = new(ProtocolsSet) - ok = queries.SetFromEmbeddedStruct(&object, &maybeProtocolsSet) - if !ok { - return errors.New(fmt.Sprintf("failed to set %T from embedded struct %T", object, maybeProtocolsSet)) - } - } - } else { - s, ok := maybeProtocolsSet.(*[]*ProtocolsSet) - if ok { - slice = *s - } else { - ok = queries.SetFromEmbeddedStruct(&slice, maybeProtocolsSet) - if !ok { - return errors.New(fmt.Sprintf("failed to set %T from embedded struct %T", slice, maybeProtocolsSet)) - } - } - } - - args := make([]interface{}, 0, 1) - if singular { - if object.R == nil { - object.R = &protocolsSetR{} - } - args = append(args, object.ID) - } else { - Outer: - for _, obj := range slice { - if obj.R == nil { - obj.R = &protocolsSetR{} - } - - for _, a := range args { - if queries.Equal(a, obj.ID) { - continue Outer - } - } - - args = append(args, obj.ID) - } - } - - if len(args) == 0 { - return nil - } - - query := NewQuery( - qm.From(`peers`), - qm.WhereIn(`peers.protocols_set_id in ?`, args...), - ) - if mods != nil { - mods.Apply(query) - } - - results, err := query.QueryContext(ctx, e) - if err != nil { - return errors.Wrap(err, "failed to eager load peers") - } - - var resultSlice []*Peer - if err = queries.Bind(results, &resultSlice); err != nil { - return errors.Wrap(err, "failed to bind eager loaded slice peers") - } - - if err = results.Close(); err != nil { - return errors.Wrap(err, "failed to close results in eager load on peers") - } - if err = results.Err(); err != nil { - return errors.Wrap(err, "error occurred during iteration of eager loaded relations for peers") - } - - if len(peerAfterSelectHooks) != 0 { - for _, obj := range resultSlice { - if err := obj.doAfterSelectHooks(ctx, e); err != nil { - return err - } - } - } - if singular { - object.R.Peers = resultSlice - for _, foreign := range resultSlice { - if foreign.R == nil { - foreign.R = &peerR{} - } - foreign.R.ProtocolsSet = object - } - return nil - } - - for _, foreign := range resultSlice { - for _, local := range slice { - if queries.Equal(local.ID, foreign.ProtocolsSetID) { - local.R.Peers = append(local.R.Peers, foreign) - if foreign.R == nil { - foreign.R = &peerR{} - } - foreign.R.ProtocolsSet = local - break - } - } - } - - return nil -} - -// AddPeers adds the given related objects to the existing relationships -// of the protocols_set, optionally inserting them as new records. -// Appends related to o.R.Peers. -// Sets related.R.ProtocolsSet appropriately. -func (o *ProtocolsSet) AddPeers(ctx context.Context, exec boil.ContextExecutor, insert bool, related ...*Peer) error { - var err error - for _, rel := range related { - if insert { - queries.Assign(&rel.ProtocolsSetID, o.ID) - if err = rel.Insert(ctx, exec, boil.Infer()); err != nil { - return errors.Wrap(err, "failed to insert into foreign table") - } - } else { - updateQuery := fmt.Sprintf( - "UPDATE \"peers\" SET %s WHERE %s", - strmangle.SetParamNames("\"", "\"", 1, []string{"protocols_set_id"}), - strmangle.WhereClause("\"", "\"", 2, peerPrimaryKeyColumns), - ) - values := []interface{}{o.ID, rel.ID} - - if boil.IsDebug(ctx) { - writer := boil.DebugWriterFrom(ctx) - fmt.Fprintln(writer, updateQuery) - fmt.Fprintln(writer, values) - } - if _, err = exec.ExecContext(ctx, updateQuery, values...); err != nil { - return errors.Wrap(err, "failed to update foreign table") - } - - queries.Assign(&rel.ProtocolsSetID, o.ID) - } - } - - if o.R == nil { - o.R = &protocolsSetR{ - Peers: related, - } - } else { - o.R.Peers = append(o.R.Peers, related...) - } - - for _, rel := range related { - if rel.R == nil { - rel.R = &peerR{ - ProtocolsSet: o, - } - } else { - rel.R.ProtocolsSet = o - } - } - return nil -} - -// SetPeers removes all previously related items of the -// protocols_set replacing them completely with the passed -// in related items, optionally inserting them as new records. -// Sets o.R.ProtocolsSet's Peers accordingly. -// Replaces o.R.Peers with related. -// Sets related.R.ProtocolsSet's Peers accordingly. -func (o *ProtocolsSet) SetPeers(ctx context.Context, exec boil.ContextExecutor, insert bool, related ...*Peer) error { - query := "update \"peers\" set \"protocols_set_id\" = null where \"protocols_set_id\" = $1" - values := []interface{}{o.ID} - if boil.IsDebug(ctx) { - writer := boil.DebugWriterFrom(ctx) - fmt.Fprintln(writer, query) - fmt.Fprintln(writer, values) - } - _, err := exec.ExecContext(ctx, query, values...) - if err != nil { - return errors.Wrap(err, "failed to remove relationships before set") - } - - if o.R != nil { - for _, rel := range o.R.Peers { - queries.SetScanner(&rel.ProtocolsSetID, nil) - if rel.R == nil { - continue - } - - rel.R.ProtocolsSet = nil - } - o.R.Peers = nil - } - - return o.AddPeers(ctx, exec, insert, related...) -} - -// RemovePeers relationships from objects passed in. -// Removes related items from R.Peers (uses pointer comparison, removal does not keep order) -// Sets related.R.ProtocolsSet. -func (o *ProtocolsSet) RemovePeers(ctx context.Context, exec boil.ContextExecutor, related ...*Peer) error { - if len(related) == 0 { - return nil - } - - var err error - for _, rel := range related { - queries.SetScanner(&rel.ProtocolsSetID, nil) - if rel.R != nil { - rel.R.ProtocolsSet = nil - } - if _, err = rel.Update(ctx, exec, boil.Whitelist("protocols_set_id")); err != nil { - return err - } - } - if o.R == nil { - return nil - } - - for _, rel := range related { - for i, ri := range o.R.Peers { - if rel != ri { - continue - } - - ln := len(o.R.Peers) - if ln > 1 && i < ln-1 { - o.R.Peers[i] = o.R.Peers[ln-1] - } - o.R.Peers = o.R.Peers[:ln-1] - break - } - } - - return nil -} - -// ProtocolsSets retrieves all the records using an executor. -func ProtocolsSets(mods ...qm.QueryMod) protocolsSetQuery { - mods = append(mods, qm.From("\"protocols_sets\"")) - q := NewQuery(mods...) - if len(queries.GetSelect(q)) == 0 { - queries.SetSelect(q, []string{"\"protocols_sets\".*"}) - } - - return protocolsSetQuery{q} -} - -// FindProtocolsSet retrieves a single record by ID with an executor. -// If selectCols is empty Find will return all columns. -func FindProtocolsSet(ctx context.Context, exec boil.ContextExecutor, iD int, selectCols ...string) (*ProtocolsSet, error) { - protocolsSetObj := &ProtocolsSet{} - - sel := "*" - if len(selectCols) > 0 { - sel = strings.Join(strmangle.IdentQuoteSlice(dialect.LQ, dialect.RQ, selectCols), ",") - } - query := fmt.Sprintf( - "select %s from \"protocols_sets\" where \"id\"=$1", sel, - ) - - q := queries.Raw(query, iD) - - err := q.Bind(ctx, exec, protocolsSetObj) - if err != nil { - if errors.Is(err, sql.ErrNoRows) { - return nil, sql.ErrNoRows - } - return nil, errors.Wrap(err, "models: unable to select from protocols_sets") - } - - if err = protocolsSetObj.doAfterSelectHooks(ctx, exec); err != nil { - return protocolsSetObj, err - } - - return protocolsSetObj, nil -} - -// Insert a single record using an executor. -// See boil.Columns.InsertColumnSet documentation to understand column list inference for inserts. -func (o *ProtocolsSet) Insert(ctx context.Context, exec boil.ContextExecutor, columns boil.Columns) error { - if o == nil { - return errors.New("models: no protocols_sets provided for insertion") - } - - var err error - - if err := o.doBeforeInsertHooks(ctx, exec); err != nil { - return err - } - - nzDefaults := queries.NonZeroDefaultSet(protocolsSetColumnsWithDefault, o) - - key := makeCacheKey(columns, nzDefaults) - protocolsSetInsertCacheMut.RLock() - cache, cached := protocolsSetInsertCache[key] - protocolsSetInsertCacheMut.RUnlock() - - if !cached { - wl, returnColumns := columns.InsertColumnSet( - protocolsSetAllColumns, - protocolsSetColumnsWithDefault, - protocolsSetColumnsWithoutDefault, - nzDefaults, - ) - wl = strmangle.SetComplement(wl, protocolsSetGeneratedColumns) - - cache.valueMapping, err = queries.BindMapping(protocolsSetType, protocolsSetMapping, wl) - if err != nil { - return err - } - cache.retMapping, err = queries.BindMapping(protocolsSetType, protocolsSetMapping, returnColumns) - if err != nil { - return err - } - if len(wl) != 0 { - cache.query = fmt.Sprintf("INSERT INTO \"protocols_sets\" (\"%s\") %%sVALUES (%s)%%s", strings.Join(wl, "\",\""), strmangle.Placeholders(dialect.UseIndexPlaceholders, len(wl), 1, 1)) - } else { - cache.query = "INSERT INTO \"protocols_sets\" %sDEFAULT VALUES%s" - } - - var queryOutput, queryReturning string - - if len(cache.retMapping) != 0 { - queryReturning = fmt.Sprintf(" RETURNING \"%s\"", strings.Join(returnColumns, "\",\"")) - } - - cache.query = fmt.Sprintf(cache.query, queryOutput, queryReturning) - } - - value := reflect.Indirect(reflect.ValueOf(o)) - vals := queries.ValuesFromMapping(value, cache.valueMapping) - - if boil.IsDebug(ctx) { - writer := boil.DebugWriterFrom(ctx) - fmt.Fprintln(writer, cache.query) - fmt.Fprintln(writer, vals) - } - - if len(cache.retMapping) != 0 { - err = exec.QueryRowContext(ctx, cache.query, vals...).Scan(queries.PtrsFromMapping(value, cache.retMapping)...) - } else { - _, err = exec.ExecContext(ctx, cache.query, vals...) - } - - if err != nil { - return errors.Wrap(err, "models: unable to insert into protocols_sets") - } - - if !cached { - protocolsSetInsertCacheMut.Lock() - protocolsSetInsertCache[key] = cache - protocolsSetInsertCacheMut.Unlock() - } - - return o.doAfterInsertHooks(ctx, exec) -} - -// Update uses an executor to update the ProtocolsSet. -// See boil.Columns.UpdateColumnSet documentation to understand column list inference for updates. -// Update does not automatically update the record in case of default values. Use .Reload() to refresh the records. -func (o *ProtocolsSet) Update(ctx context.Context, exec boil.ContextExecutor, columns boil.Columns) (int64, error) { - var err error - if err = o.doBeforeUpdateHooks(ctx, exec); err != nil { - return 0, err - } - key := makeCacheKey(columns, nil) - protocolsSetUpdateCacheMut.RLock() - cache, cached := protocolsSetUpdateCache[key] - protocolsSetUpdateCacheMut.RUnlock() - - if !cached { - wl := columns.UpdateColumnSet( - protocolsSetAllColumns, - protocolsSetPrimaryKeyColumns, - ) - wl = strmangle.SetComplement(wl, protocolsSetGeneratedColumns) - - if !columns.IsWhitelist() { - wl = strmangle.SetComplement(wl, []string{"created_at"}) - } - if len(wl) == 0 { - return 0, errors.New("models: unable to update protocols_sets, could not build whitelist") - } - - cache.query = fmt.Sprintf("UPDATE \"protocols_sets\" SET %s WHERE %s", - strmangle.SetParamNames("\"", "\"", 1, wl), - strmangle.WhereClause("\"", "\"", len(wl)+1, protocolsSetPrimaryKeyColumns), - ) - cache.valueMapping, err = queries.BindMapping(protocolsSetType, protocolsSetMapping, append(wl, protocolsSetPrimaryKeyColumns...)) - if err != nil { - return 0, err - } - } - - values := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), cache.valueMapping) - - if boil.IsDebug(ctx) { - writer := boil.DebugWriterFrom(ctx) - fmt.Fprintln(writer, cache.query) - fmt.Fprintln(writer, values) - } - var result sql.Result - result, err = exec.ExecContext(ctx, cache.query, values...) - if err != nil { - return 0, errors.Wrap(err, "models: unable to update protocols_sets row") - } - - rowsAff, err := result.RowsAffected() - if err != nil { - return 0, errors.Wrap(err, "models: failed to get rows affected by update for protocols_sets") - } - - if !cached { - protocolsSetUpdateCacheMut.Lock() - protocolsSetUpdateCache[key] = cache - protocolsSetUpdateCacheMut.Unlock() - } - - return rowsAff, o.doAfterUpdateHooks(ctx, exec) -} - -// UpdateAll updates all rows with the specified column values. -func (q protocolsSetQuery) UpdateAll(ctx context.Context, exec boil.ContextExecutor, cols M) (int64, error) { - queries.SetUpdate(q.Query, cols) - - result, err := q.Query.ExecContext(ctx, exec) - if err != nil { - return 0, errors.Wrap(err, "models: unable to update all for protocols_sets") - } - - rowsAff, err := result.RowsAffected() - if err != nil { - return 0, errors.Wrap(err, "models: unable to retrieve rows affected for protocols_sets") - } - - return rowsAff, nil -} - -// UpdateAll updates all rows with the specified column values, using an executor. -func (o ProtocolsSetSlice) UpdateAll(ctx context.Context, exec boil.ContextExecutor, cols M) (int64, error) { - ln := int64(len(o)) - if ln == 0 { - return 0, nil - } - - if len(cols) == 0 { - return 0, errors.New("models: update all requires at least one column argument") - } - - colNames := make([]string, len(cols)) - args := make([]interface{}, len(cols)) - - i := 0 - for name, value := range cols { - colNames[i] = name - args[i] = value - i++ - } - - // Append all of the primary key values for each column - for _, obj := range o { - pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), protocolsSetPrimaryKeyMapping) - args = append(args, pkeyArgs...) - } - - sql := fmt.Sprintf("UPDATE \"protocols_sets\" SET %s WHERE %s", - strmangle.SetParamNames("\"", "\"", 1, colNames), - strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), len(colNames)+1, protocolsSetPrimaryKeyColumns, len(o))) - - if boil.IsDebug(ctx) { - writer := boil.DebugWriterFrom(ctx) - fmt.Fprintln(writer, sql) - fmt.Fprintln(writer, args...) - } - result, err := exec.ExecContext(ctx, sql, args...) - if err != nil { - return 0, errors.Wrap(err, "models: unable to update all in protocolsSet slice") - } - - rowsAff, err := result.RowsAffected() - if err != nil { - return 0, errors.Wrap(err, "models: unable to retrieve rows affected all in update all protocolsSet") - } - return rowsAff, nil -} - -// Upsert attempts an insert using an executor, and does an update or ignore on conflict. -// See boil.Columns documentation for how to properly use updateColumns and insertColumns. -func (o *ProtocolsSet) Upsert(ctx context.Context, exec boil.ContextExecutor, updateOnConflict bool, conflictColumns []string, updateColumns, insertColumns boil.Columns) error { - if o == nil { - return errors.New("models: no protocols_sets provided for upsert") - } - - if err := o.doBeforeUpsertHooks(ctx, exec); err != nil { - return err - } - - nzDefaults := queries.NonZeroDefaultSet(protocolsSetColumnsWithDefault, o) - - // Build cache key in-line uglily - mysql vs psql problems - buf := strmangle.GetBuffer() - if updateOnConflict { - buf.WriteByte('t') - } else { - buf.WriteByte('f') - } - buf.WriteByte('.') - for _, c := range conflictColumns { - buf.WriteString(c) - } - buf.WriteByte('.') - buf.WriteString(strconv.Itoa(updateColumns.Kind)) - for _, c := range updateColumns.Cols { - buf.WriteString(c) - } - buf.WriteByte('.') - buf.WriteString(strconv.Itoa(insertColumns.Kind)) - for _, c := range insertColumns.Cols { - buf.WriteString(c) - } - buf.WriteByte('.') - for _, c := range nzDefaults { - buf.WriteString(c) - } - key := buf.String() - strmangle.PutBuffer(buf) - - protocolsSetUpsertCacheMut.RLock() - cache, cached := protocolsSetUpsertCache[key] - protocolsSetUpsertCacheMut.RUnlock() - - var err error - - if !cached { - insert, ret := insertColumns.InsertColumnSet( - protocolsSetAllColumns, - protocolsSetColumnsWithDefault, - protocolsSetColumnsWithoutDefault, - nzDefaults, - ) - - update := updateColumns.UpdateColumnSet( - protocolsSetAllColumns, - protocolsSetPrimaryKeyColumns, - ) - - insert = strmangle.SetComplement(insert, protocolsSetGeneratedColumns) - update = strmangle.SetComplement(update, protocolsSetGeneratedColumns) - - if updateOnConflict && len(update) == 0 { - return errors.New("models: unable to upsert protocols_sets, could not build update column list") - } - - conflict := conflictColumns - if len(conflict) == 0 { - conflict = make([]string, len(protocolsSetPrimaryKeyColumns)) - copy(conflict, protocolsSetPrimaryKeyColumns) - } - cache.query = buildUpsertQueryPostgres(dialect, "\"protocols_sets\"", updateOnConflict, ret, update, conflict, insert) - - cache.valueMapping, err = queries.BindMapping(protocolsSetType, protocolsSetMapping, insert) - if err != nil { - return err - } - if len(ret) != 0 { - cache.retMapping, err = queries.BindMapping(protocolsSetType, protocolsSetMapping, ret) - if err != nil { - return err - } - } - } - - value := reflect.Indirect(reflect.ValueOf(o)) - vals := queries.ValuesFromMapping(value, cache.valueMapping) - var returns []interface{} - if len(cache.retMapping) != 0 { - returns = queries.PtrsFromMapping(value, cache.retMapping) - } - - if boil.IsDebug(ctx) { - writer := boil.DebugWriterFrom(ctx) - fmt.Fprintln(writer, cache.query) - fmt.Fprintln(writer, vals) - } - if len(cache.retMapping) != 0 { - err = exec.QueryRowContext(ctx, cache.query, vals...).Scan(returns...) - if errors.Is(err, sql.ErrNoRows) { - err = nil // Postgres doesn't return anything when there's no update - } - } else { - _, err = exec.ExecContext(ctx, cache.query, vals...) - } - if err != nil { - return errors.Wrap(err, "models: unable to upsert protocols_sets") - } - - if !cached { - protocolsSetUpsertCacheMut.Lock() - protocolsSetUpsertCache[key] = cache - protocolsSetUpsertCacheMut.Unlock() - } - - return o.doAfterUpsertHooks(ctx, exec) -} - -// Delete deletes a single ProtocolsSet record with an executor. -// Delete will match against the primary key column to find the record to delete. -func (o *ProtocolsSet) Delete(ctx context.Context, exec boil.ContextExecutor) (int64, error) { - if o == nil { - return 0, errors.New("models: no ProtocolsSet provided for delete") - } - - if err := o.doBeforeDeleteHooks(ctx, exec); err != nil { - return 0, err - } - - args := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), protocolsSetPrimaryKeyMapping) - sql := "DELETE FROM \"protocols_sets\" WHERE \"id\"=$1" - - if boil.IsDebug(ctx) { - writer := boil.DebugWriterFrom(ctx) - fmt.Fprintln(writer, sql) - fmt.Fprintln(writer, args...) - } - result, err := exec.ExecContext(ctx, sql, args...) - if err != nil { - return 0, errors.Wrap(err, "models: unable to delete from protocols_sets") - } - - rowsAff, err := result.RowsAffected() - if err != nil { - return 0, errors.Wrap(err, "models: failed to get rows affected by delete for protocols_sets") - } - - if err := o.doAfterDeleteHooks(ctx, exec); err != nil { - return 0, err - } - - return rowsAff, nil -} - -// DeleteAll deletes all matching rows. -func (q protocolsSetQuery) DeleteAll(ctx context.Context, exec boil.ContextExecutor) (int64, error) { - if q.Query == nil { - return 0, errors.New("models: no protocolsSetQuery provided for delete all") - } - - queries.SetDelete(q.Query) - - result, err := q.Query.ExecContext(ctx, exec) - if err != nil { - return 0, errors.Wrap(err, "models: unable to delete all from protocols_sets") - } - - rowsAff, err := result.RowsAffected() - if err != nil { - return 0, errors.Wrap(err, "models: failed to get rows affected by deleteall for protocols_sets") - } - - return rowsAff, nil -} - -// DeleteAll deletes all rows in the slice, using an executor. -func (o ProtocolsSetSlice) DeleteAll(ctx context.Context, exec boil.ContextExecutor) (int64, error) { - if len(o) == 0 { - return 0, nil - } - - if len(protocolsSetBeforeDeleteHooks) != 0 { - for _, obj := range o { - if err := obj.doBeforeDeleteHooks(ctx, exec); err != nil { - return 0, err - } - } - } - - var args []interface{} - for _, obj := range o { - pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), protocolsSetPrimaryKeyMapping) - args = append(args, pkeyArgs...) - } - - sql := "DELETE FROM \"protocols_sets\" WHERE " + - strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 1, protocolsSetPrimaryKeyColumns, len(o)) - - if boil.IsDebug(ctx) { - writer := boil.DebugWriterFrom(ctx) - fmt.Fprintln(writer, sql) - fmt.Fprintln(writer, args) - } - result, err := exec.ExecContext(ctx, sql, args...) - if err != nil { - return 0, errors.Wrap(err, "models: unable to delete all from protocolsSet slice") - } - - rowsAff, err := result.RowsAffected() - if err != nil { - return 0, errors.Wrap(err, "models: failed to get rows affected by deleteall for protocols_sets") - } - - if len(protocolsSetAfterDeleteHooks) != 0 { - for _, obj := range o { - if err := obj.doAfterDeleteHooks(ctx, exec); err != nil { - return 0, err - } - } - } - - return rowsAff, nil -} - -// Reload refetches the object from the database -// using the primary keys with an executor. -func (o *ProtocolsSet) Reload(ctx context.Context, exec boil.ContextExecutor) error { - ret, err := FindProtocolsSet(ctx, exec, o.ID) - if err != nil { - return err - } - - *o = *ret - return nil -} - -// ReloadAll refetches every row with matching primary key column values -// and overwrites the original object slice with the newly updated slice. -func (o *ProtocolsSetSlice) ReloadAll(ctx context.Context, exec boil.ContextExecutor) error { - if o == nil || len(*o) == 0 { - return nil - } - - slice := ProtocolsSetSlice{} - var args []interface{} - for _, obj := range *o { - pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), protocolsSetPrimaryKeyMapping) - args = append(args, pkeyArgs...) - } - - sql := "SELECT \"protocols_sets\".* FROM \"protocols_sets\" WHERE " + - strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 1, protocolsSetPrimaryKeyColumns, len(*o)) - - q := queries.Raw(sql, args...) - - err := q.Bind(ctx, exec, &slice) - if err != nil { - return errors.Wrap(err, "models: unable to reload all in ProtocolsSetSlice") - } - - *o = slice - - return nil -} - -// ProtocolsSetExists checks if the ProtocolsSet row exists. -func ProtocolsSetExists(ctx context.Context, exec boil.ContextExecutor, iD int) (bool, error) { - var exists bool - sql := "select exists(select 1 from \"protocols_sets\" where \"id\"=$1 limit 1)" - - if boil.IsDebug(ctx) { - writer := boil.DebugWriterFrom(ctx) - fmt.Fprintln(writer, sql) - fmt.Fprintln(writer, iD) - } - row := exec.QueryRowContext(ctx, sql, iD) - - err := row.Scan(&exists) - if err != nil { - return false, errors.Wrap(err, "models: unable to check if protocols_sets exists") - } - - return exists, nil -} diff --git a/db/models/psql_upsert.go b/db/models/psql_upsert.go deleted file mode 100644 index 07b122a..0000000 --- a/db/models/psql_upsert.go +++ /dev/null @@ -1,61 +0,0 @@ -// Code generated by SQLBoiler 4.13.0 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT. -// This file is meant to be re-generated in place and/or deleted at any time. - -package models - -import ( - "fmt" - "strings" - - "github.com/volatiletech/sqlboiler/v4/drivers" - "github.com/volatiletech/strmangle" -) - -// buildUpsertQueryPostgres builds a SQL statement string using the upsertData provided. -func buildUpsertQueryPostgres(dia drivers.Dialect, tableName string, updateOnConflict bool, ret, update, conflict, whitelist []string) string { - conflict = strmangle.IdentQuoteSlice(dia.LQ, dia.RQ, conflict) - whitelist = strmangle.IdentQuoteSlice(dia.LQ, dia.RQ, whitelist) - ret = strmangle.IdentQuoteSlice(dia.LQ, dia.RQ, ret) - - buf := strmangle.GetBuffer() - defer strmangle.PutBuffer(buf) - - columns := "DEFAULT VALUES" - if len(whitelist) != 0 { - columns = fmt.Sprintf("(%s) VALUES (%s)", - strings.Join(whitelist, ", "), - strmangle.Placeholders(dia.UseIndexPlaceholders, len(whitelist), 1, 1)) - } - - fmt.Fprintf( - buf, - "INSERT INTO %s %s ON CONFLICT ", - tableName, - columns, - ) - - if !updateOnConflict || len(update) == 0 { - buf.WriteString("DO NOTHING") - } else { - buf.WriteByte('(') - buf.WriteString(strings.Join(conflict, ", ")) - buf.WriteString(") DO UPDATE SET ") - - for i, v := range update { - if i != 0 { - buf.WriteByte(',') - } - quoted := strmangle.IdentQuote(dia.LQ, dia.RQ, v) - buf.WriteString(quoted) - buf.WriteString(" = EXCLUDED.") - buf.WriteString(quoted) - } - } - - if len(ret) != 0 { - buf.WriteString(" RETURNING ") - buf.WriteString(strings.Join(ret, ", ")) - } - - return buf.String() -} diff --git a/db/models/requests.go b/db/models/requests.go deleted file mode 100644 index f5f9128..0000000 --- a/db/models/requests.go +++ /dev/null @@ -1,942 +0,0 @@ -// Code generated by SQLBoiler 4.13.0 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT. -// This file is meant to be re-generated in place and/or deleted at any time. - -package models - -import ( - "context" - "database/sql" - "fmt" - "reflect" - "strconv" - "strings" - "sync" - "time" - - "github.com/friendsofgo/errors" - "github.com/volatiletech/null/v8" - "github.com/volatiletech/sqlboiler/v4/boil" - "github.com/volatiletech/sqlboiler/v4/queries" - "github.com/volatiletech/sqlboiler/v4/queries/qm" - "github.com/volatiletech/sqlboiler/v4/queries/qmhelper" - "github.com/volatiletech/sqlboiler/v4/types" - "github.com/volatiletech/strmangle" -) - -// Request is an object representing the database table. -type Request struct { - ID int `boil:"id" json:"id" toml:"id" yaml:"id"` - Timestamp time.Time `boil:"timestamp" json:"timestamp" toml:"timestamp" yaml:"timestamp"` - RequestType string `boil:"request_type" json:"request_type" toml:"request_type" yaml:"request_type"` - AntID int64 `boil:"ant_id" json:"ant_id" toml:"ant_id" yaml:"ant_id"` - PeerID int64 `boil:"peer_id" json:"peer_id" toml:"peer_id" yaml:"peer_id"` - KeyID int `boil:"key_id" json:"key_id" toml:"key_id" yaml:"key_id"` - MultiAddressIds types.Int64Array `boil:"multi_address_ids" json:"multi_address_ids,omitempty" toml:"multi_address_ids" yaml:"multi_address_ids,omitempty"` - ProtocolsSetID null.Int `boil:"protocols_set_id" json:"protocols_set_id,omitempty" toml:"protocols_set_id" yaml:"protocols_set_id,omitempty"` - - R *requestR `boil:"-" json:"-" toml:"-" yaml:"-"` - L requestL `boil:"-" json:"-" toml:"-" yaml:"-"` -} - -var RequestColumns = struct { - ID string - Timestamp string - RequestType string - AntID string - PeerID string - KeyID string - MultiAddressIds string - ProtocolsSetID string -}{ - ID: "id", - Timestamp: "timestamp", - RequestType: "request_type", - AntID: "ant_id", - PeerID: "peer_id", - KeyID: "key_id", - MultiAddressIds: "multi_address_ids", - ProtocolsSetID: "protocols_set_id", -} - -var RequestTableColumns = struct { - ID string - Timestamp string - RequestType string - AntID string - PeerID string - KeyID string - MultiAddressIds string - ProtocolsSetID string -}{ - ID: "requests.id", - Timestamp: "requests.timestamp", - RequestType: "requests.request_type", - AntID: "requests.ant_id", - PeerID: "requests.peer_id", - KeyID: "requests.key_id", - MultiAddressIds: "requests.multi_address_ids", - ProtocolsSetID: "requests.protocols_set_id", -} - -// Generated where - -func (w whereHelpertypes_Int64Array) IsNull() qm.QueryMod { return qmhelper.WhereIsNull(w.field) } -func (w whereHelpertypes_Int64Array) IsNotNull() qm.QueryMod { return qmhelper.WhereIsNotNull(w.field) } - -var RequestWhere = struct { - ID whereHelperint - Timestamp whereHelpertime_Time - RequestType whereHelperstring - AntID whereHelperint64 - PeerID whereHelperint64 - KeyID whereHelperint - MultiAddressIds whereHelpertypes_Int64Array - ProtocolsSetID whereHelpernull_Int -}{ - ID: whereHelperint{field: "\"requests\".\"id\""}, - Timestamp: whereHelpertime_Time{field: "\"requests\".\"timestamp\""}, - RequestType: whereHelperstring{field: "\"requests\".\"request_type\""}, - AntID: whereHelperint64{field: "\"requests\".\"ant_id\""}, - PeerID: whereHelperint64{field: "\"requests\".\"peer_id\""}, - KeyID: whereHelperint{field: "\"requests\".\"key_id\""}, - MultiAddressIds: whereHelpertypes_Int64Array{field: "\"requests\".\"multi_address_ids\""}, - ProtocolsSetID: whereHelpernull_Int{field: "\"requests\".\"protocols_set_id\""}, -} - -// RequestRels is where relationship names are stored. -var RequestRels = struct { -}{} - -// requestR is where relationships are stored. -type requestR struct { -} - -// NewStruct creates a new relationship struct -func (*requestR) NewStruct() *requestR { - return &requestR{} -} - -// requestL is where Load methods for each relationship are stored. -type requestL struct{} - -var ( - requestAllColumns = []string{"id", "timestamp", "request_type", "ant_id", "peer_id", "key_id", "multi_address_ids", "protocols_set_id"} - requestColumnsWithoutDefault = []string{"timestamp", "request_type", "ant_id", "peer_id", "key_id"} - requestColumnsWithDefault = []string{"id", "multi_address_ids", "protocols_set_id"} - requestPrimaryKeyColumns = []string{"id", "timestamp"} - requestGeneratedColumns = []string{"id"} -) - -type ( - // RequestSlice is an alias for a slice of pointers to Request. - // This should almost always be used instead of []Request. - RequestSlice []*Request - // RequestHook is the signature for custom Request hook methods - RequestHook func(context.Context, boil.ContextExecutor, *Request) error - - requestQuery struct { - *queries.Query - } -) - -// Cache for insert, update and upsert -var ( - requestType = reflect.TypeOf(&Request{}) - requestMapping = queries.MakeStructMapping(requestType) - requestPrimaryKeyMapping, _ = queries.BindMapping(requestType, requestMapping, requestPrimaryKeyColumns) - requestInsertCacheMut sync.RWMutex - requestInsertCache = make(map[string]insertCache) - requestUpdateCacheMut sync.RWMutex - requestUpdateCache = make(map[string]updateCache) - requestUpsertCacheMut sync.RWMutex - requestUpsertCache = make(map[string]insertCache) -) - -var ( - // Force time package dependency for automated UpdatedAt/CreatedAt. - _ = time.Second - // Force qmhelper dependency for where clause generation (which doesn't - // always happen) - _ = qmhelper.Where -) - -var requestAfterSelectHooks []RequestHook - -var requestBeforeInsertHooks []RequestHook -var requestAfterInsertHooks []RequestHook - -var requestBeforeUpdateHooks []RequestHook -var requestAfterUpdateHooks []RequestHook - -var requestBeforeDeleteHooks []RequestHook -var requestAfterDeleteHooks []RequestHook - -var requestBeforeUpsertHooks []RequestHook -var requestAfterUpsertHooks []RequestHook - -// doAfterSelectHooks executes all "after Select" hooks. -func (o *Request) doAfterSelectHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range requestAfterSelectHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// doBeforeInsertHooks executes all "before insert" hooks. -func (o *Request) doBeforeInsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range requestBeforeInsertHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// doAfterInsertHooks executes all "after Insert" hooks. -func (o *Request) doAfterInsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range requestAfterInsertHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// doBeforeUpdateHooks executes all "before Update" hooks. -func (o *Request) doBeforeUpdateHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range requestBeforeUpdateHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// doAfterUpdateHooks executes all "after Update" hooks. -func (o *Request) doAfterUpdateHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range requestAfterUpdateHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// doBeforeDeleteHooks executes all "before Delete" hooks. -func (o *Request) doBeforeDeleteHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range requestBeforeDeleteHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// doAfterDeleteHooks executes all "after Delete" hooks. -func (o *Request) doAfterDeleteHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range requestAfterDeleteHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// doBeforeUpsertHooks executes all "before Upsert" hooks. -func (o *Request) doBeforeUpsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range requestBeforeUpsertHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// doAfterUpsertHooks executes all "after Upsert" hooks. -func (o *Request) doAfterUpsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range requestAfterUpsertHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// AddRequestHook registers your hook function for all future operations. -func AddRequestHook(hookPoint boil.HookPoint, requestHook RequestHook) { - switch hookPoint { - case boil.AfterSelectHook: - requestAfterSelectHooks = append(requestAfterSelectHooks, requestHook) - case boil.BeforeInsertHook: - requestBeforeInsertHooks = append(requestBeforeInsertHooks, requestHook) - case boil.AfterInsertHook: - requestAfterInsertHooks = append(requestAfterInsertHooks, requestHook) - case boil.BeforeUpdateHook: - requestBeforeUpdateHooks = append(requestBeforeUpdateHooks, requestHook) - case boil.AfterUpdateHook: - requestAfterUpdateHooks = append(requestAfterUpdateHooks, requestHook) - case boil.BeforeDeleteHook: - requestBeforeDeleteHooks = append(requestBeforeDeleteHooks, requestHook) - case boil.AfterDeleteHook: - requestAfterDeleteHooks = append(requestAfterDeleteHooks, requestHook) - case boil.BeforeUpsertHook: - requestBeforeUpsertHooks = append(requestBeforeUpsertHooks, requestHook) - case boil.AfterUpsertHook: - requestAfterUpsertHooks = append(requestAfterUpsertHooks, requestHook) - } -} - -// One returns a single request record from the query. -func (q requestQuery) One(ctx context.Context, exec boil.ContextExecutor) (*Request, error) { - o := &Request{} - - queries.SetLimit(q.Query, 1) - - err := q.Bind(ctx, exec, o) - if err != nil { - if errors.Is(err, sql.ErrNoRows) { - return nil, sql.ErrNoRows - } - return nil, errors.Wrap(err, "models: failed to execute a one query for requests") - } - - if err := o.doAfterSelectHooks(ctx, exec); err != nil { - return o, err - } - - return o, nil -} - -// All returns all Request records from the query. -func (q requestQuery) All(ctx context.Context, exec boil.ContextExecutor) (RequestSlice, error) { - var o []*Request - - err := q.Bind(ctx, exec, &o) - if err != nil { - return nil, errors.Wrap(err, "models: failed to assign all query results to Request slice") - } - - if len(requestAfterSelectHooks) != 0 { - for _, obj := range o { - if err := obj.doAfterSelectHooks(ctx, exec); err != nil { - return o, err - } - } - } - - return o, nil -} - -// Count returns the count of all Request records in the query. -func (q requestQuery) Count(ctx context.Context, exec boil.ContextExecutor) (int64, error) { - var count int64 - - queries.SetSelect(q.Query, nil) - queries.SetCount(q.Query) - - err := q.Query.QueryRowContext(ctx, exec).Scan(&count) - if err != nil { - return 0, errors.Wrap(err, "models: failed to count requests rows") - } - - return count, nil -} - -// Exists checks if the row exists in the table. -func (q requestQuery) Exists(ctx context.Context, exec boil.ContextExecutor) (bool, error) { - var count int64 - - queries.SetSelect(q.Query, nil) - queries.SetCount(q.Query) - queries.SetLimit(q.Query, 1) - - err := q.Query.QueryRowContext(ctx, exec).Scan(&count) - if err != nil { - return false, errors.Wrap(err, "models: failed to check if requests exists") - } - - return count > 0, nil -} - -// Requests retrieves all the records using an executor. -func Requests(mods ...qm.QueryMod) requestQuery { - mods = append(mods, qm.From("\"requests\"")) - q := NewQuery(mods...) - if len(queries.GetSelect(q)) == 0 { - queries.SetSelect(q, []string{"\"requests\".*"}) - } - - return requestQuery{q} -} - -// FindRequest retrieves a single record by ID with an executor. -// If selectCols is empty Find will return all columns. -func FindRequest(ctx context.Context, exec boil.ContextExecutor, iD int, timestamp time.Time, selectCols ...string) (*Request, error) { - requestObj := &Request{} - - sel := "*" - if len(selectCols) > 0 { - sel = strings.Join(strmangle.IdentQuoteSlice(dialect.LQ, dialect.RQ, selectCols), ",") - } - query := fmt.Sprintf( - "select %s from \"requests\" where \"id\"=$1 AND \"timestamp\"=$2", sel, - ) - - q := queries.Raw(query, iD, timestamp) - - err := q.Bind(ctx, exec, requestObj) - if err != nil { - if errors.Is(err, sql.ErrNoRows) { - return nil, sql.ErrNoRows - } - return nil, errors.Wrap(err, "models: unable to select from requests") - } - - if err = requestObj.doAfterSelectHooks(ctx, exec); err != nil { - return requestObj, err - } - - return requestObj, nil -} - -// Insert a single record using an executor. -// See boil.Columns.InsertColumnSet documentation to understand column list inference for inserts. -func (o *Request) Insert(ctx context.Context, exec boil.ContextExecutor, columns boil.Columns) error { - if o == nil { - return errors.New("models: no requests provided for insertion") - } - - var err error - - if err := o.doBeforeInsertHooks(ctx, exec); err != nil { - return err - } - - nzDefaults := queries.NonZeroDefaultSet(requestColumnsWithDefault, o) - - key := makeCacheKey(columns, nzDefaults) - requestInsertCacheMut.RLock() - cache, cached := requestInsertCache[key] - requestInsertCacheMut.RUnlock() - - if !cached { - wl, returnColumns := columns.InsertColumnSet( - requestAllColumns, - requestColumnsWithDefault, - requestColumnsWithoutDefault, - nzDefaults, - ) - wl = strmangle.SetComplement(wl, requestGeneratedColumns) - - cache.valueMapping, err = queries.BindMapping(requestType, requestMapping, wl) - if err != nil { - return err - } - cache.retMapping, err = queries.BindMapping(requestType, requestMapping, returnColumns) - if err != nil { - return err - } - if len(wl) != 0 { - cache.query = fmt.Sprintf("INSERT INTO \"requests\" (\"%s\") %%sVALUES (%s)%%s", strings.Join(wl, "\",\""), strmangle.Placeholders(dialect.UseIndexPlaceholders, len(wl), 1, 1)) - } else { - cache.query = "INSERT INTO \"requests\" %sDEFAULT VALUES%s" - } - - var queryOutput, queryReturning string - - if len(cache.retMapping) != 0 { - queryReturning = fmt.Sprintf(" RETURNING \"%s\"", strings.Join(returnColumns, "\",\"")) - } - - cache.query = fmt.Sprintf(cache.query, queryOutput, queryReturning) - } - - value := reflect.Indirect(reflect.ValueOf(o)) - vals := queries.ValuesFromMapping(value, cache.valueMapping) - - if boil.IsDebug(ctx) { - writer := boil.DebugWriterFrom(ctx) - fmt.Fprintln(writer, cache.query) - fmt.Fprintln(writer, vals) - } - - if len(cache.retMapping) != 0 { - err = exec.QueryRowContext(ctx, cache.query, vals...).Scan(queries.PtrsFromMapping(value, cache.retMapping)...) - } else { - _, err = exec.ExecContext(ctx, cache.query, vals...) - } - - if err != nil { - return errors.Wrap(err, "models: unable to insert into requests") - } - - if !cached { - requestInsertCacheMut.Lock() - requestInsertCache[key] = cache - requestInsertCacheMut.Unlock() - } - - return o.doAfterInsertHooks(ctx, exec) -} - -// Update uses an executor to update the Request. -// See boil.Columns.UpdateColumnSet documentation to understand column list inference for updates. -// Update does not automatically update the record in case of default values. Use .Reload() to refresh the records. -func (o *Request) Update(ctx context.Context, exec boil.ContextExecutor, columns boil.Columns) (int64, error) { - var err error - if err = o.doBeforeUpdateHooks(ctx, exec); err != nil { - return 0, err - } - key := makeCacheKey(columns, nil) - requestUpdateCacheMut.RLock() - cache, cached := requestUpdateCache[key] - requestUpdateCacheMut.RUnlock() - - if !cached { - wl := columns.UpdateColumnSet( - requestAllColumns, - requestPrimaryKeyColumns, - ) - wl = strmangle.SetComplement(wl, requestGeneratedColumns) - - if !columns.IsWhitelist() { - wl = strmangle.SetComplement(wl, []string{"created_at"}) - } - if len(wl) == 0 { - return 0, errors.New("models: unable to update requests, could not build whitelist") - } - - cache.query = fmt.Sprintf("UPDATE \"requests\" SET %s WHERE %s", - strmangle.SetParamNames("\"", "\"", 1, wl), - strmangle.WhereClause("\"", "\"", len(wl)+1, requestPrimaryKeyColumns), - ) - cache.valueMapping, err = queries.BindMapping(requestType, requestMapping, append(wl, requestPrimaryKeyColumns...)) - if err != nil { - return 0, err - } - } - - values := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), cache.valueMapping) - - if boil.IsDebug(ctx) { - writer := boil.DebugWriterFrom(ctx) - fmt.Fprintln(writer, cache.query) - fmt.Fprintln(writer, values) - } - var result sql.Result - result, err = exec.ExecContext(ctx, cache.query, values...) - if err != nil { - return 0, errors.Wrap(err, "models: unable to update requests row") - } - - rowsAff, err := result.RowsAffected() - if err != nil { - return 0, errors.Wrap(err, "models: failed to get rows affected by update for requests") - } - - if !cached { - requestUpdateCacheMut.Lock() - requestUpdateCache[key] = cache - requestUpdateCacheMut.Unlock() - } - - return rowsAff, o.doAfterUpdateHooks(ctx, exec) -} - -// UpdateAll updates all rows with the specified column values. -func (q requestQuery) UpdateAll(ctx context.Context, exec boil.ContextExecutor, cols M) (int64, error) { - queries.SetUpdate(q.Query, cols) - - result, err := q.Query.ExecContext(ctx, exec) - if err != nil { - return 0, errors.Wrap(err, "models: unable to update all for requests") - } - - rowsAff, err := result.RowsAffected() - if err != nil { - return 0, errors.Wrap(err, "models: unable to retrieve rows affected for requests") - } - - return rowsAff, nil -} - -// UpdateAll updates all rows with the specified column values, using an executor. -func (o RequestSlice) UpdateAll(ctx context.Context, exec boil.ContextExecutor, cols M) (int64, error) { - ln := int64(len(o)) - if ln == 0 { - return 0, nil - } - - if len(cols) == 0 { - return 0, errors.New("models: update all requires at least one column argument") - } - - colNames := make([]string, len(cols)) - args := make([]interface{}, len(cols)) - - i := 0 - for name, value := range cols { - colNames[i] = name - args[i] = value - i++ - } - - // Append all of the primary key values for each column - for _, obj := range o { - pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), requestPrimaryKeyMapping) - args = append(args, pkeyArgs...) - } - - sql := fmt.Sprintf("UPDATE \"requests\" SET %s WHERE %s", - strmangle.SetParamNames("\"", "\"", 1, colNames), - strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), len(colNames)+1, requestPrimaryKeyColumns, len(o))) - - if boil.IsDebug(ctx) { - writer := boil.DebugWriterFrom(ctx) - fmt.Fprintln(writer, sql) - fmt.Fprintln(writer, args...) - } - result, err := exec.ExecContext(ctx, sql, args...) - if err != nil { - return 0, errors.Wrap(err, "models: unable to update all in request slice") - } - - rowsAff, err := result.RowsAffected() - if err != nil { - return 0, errors.Wrap(err, "models: unable to retrieve rows affected all in update all request") - } - return rowsAff, nil -} - -// Upsert attempts an insert using an executor, and does an update or ignore on conflict. -// See boil.Columns documentation for how to properly use updateColumns and insertColumns. -func (o *Request) Upsert(ctx context.Context, exec boil.ContextExecutor, updateOnConflict bool, conflictColumns []string, updateColumns, insertColumns boil.Columns) error { - if o == nil { - return errors.New("models: no requests provided for upsert") - } - - if err := o.doBeforeUpsertHooks(ctx, exec); err != nil { - return err - } - - nzDefaults := queries.NonZeroDefaultSet(requestColumnsWithDefault, o) - - // Build cache key in-line uglily - mysql vs psql problems - buf := strmangle.GetBuffer() - if updateOnConflict { - buf.WriteByte('t') - } else { - buf.WriteByte('f') - } - buf.WriteByte('.') - for _, c := range conflictColumns { - buf.WriteString(c) - } - buf.WriteByte('.') - buf.WriteString(strconv.Itoa(updateColumns.Kind)) - for _, c := range updateColumns.Cols { - buf.WriteString(c) - } - buf.WriteByte('.') - buf.WriteString(strconv.Itoa(insertColumns.Kind)) - for _, c := range insertColumns.Cols { - buf.WriteString(c) - } - buf.WriteByte('.') - for _, c := range nzDefaults { - buf.WriteString(c) - } - key := buf.String() - strmangle.PutBuffer(buf) - - requestUpsertCacheMut.RLock() - cache, cached := requestUpsertCache[key] - requestUpsertCacheMut.RUnlock() - - var err error - - if !cached { - insert, ret := insertColumns.InsertColumnSet( - requestAllColumns, - requestColumnsWithDefault, - requestColumnsWithoutDefault, - nzDefaults, - ) - - update := updateColumns.UpdateColumnSet( - requestAllColumns, - requestPrimaryKeyColumns, - ) - - insert = strmangle.SetComplement(insert, requestGeneratedColumns) - update = strmangle.SetComplement(update, requestGeneratedColumns) - - if updateOnConflict && len(update) == 0 { - return errors.New("models: unable to upsert requests, could not build update column list") - } - - conflict := conflictColumns - if len(conflict) == 0 { - conflict = make([]string, len(requestPrimaryKeyColumns)) - copy(conflict, requestPrimaryKeyColumns) - } - cache.query = buildUpsertQueryPostgres(dialect, "\"requests\"", updateOnConflict, ret, update, conflict, insert) - - cache.valueMapping, err = queries.BindMapping(requestType, requestMapping, insert) - if err != nil { - return err - } - if len(ret) != 0 { - cache.retMapping, err = queries.BindMapping(requestType, requestMapping, ret) - if err != nil { - return err - } - } - } - - value := reflect.Indirect(reflect.ValueOf(o)) - vals := queries.ValuesFromMapping(value, cache.valueMapping) - var returns []interface{} - if len(cache.retMapping) != 0 { - returns = queries.PtrsFromMapping(value, cache.retMapping) - } - - if boil.IsDebug(ctx) { - writer := boil.DebugWriterFrom(ctx) - fmt.Fprintln(writer, cache.query) - fmt.Fprintln(writer, vals) - } - if len(cache.retMapping) != 0 { - err = exec.QueryRowContext(ctx, cache.query, vals...).Scan(returns...) - if errors.Is(err, sql.ErrNoRows) { - err = nil // Postgres doesn't return anything when there's no update - } - } else { - _, err = exec.ExecContext(ctx, cache.query, vals...) - } - if err != nil { - return errors.Wrap(err, "models: unable to upsert requests") - } - - if !cached { - requestUpsertCacheMut.Lock() - requestUpsertCache[key] = cache - requestUpsertCacheMut.Unlock() - } - - return o.doAfterUpsertHooks(ctx, exec) -} - -// Delete deletes a single Request record with an executor. -// Delete will match against the primary key column to find the record to delete. -func (o *Request) Delete(ctx context.Context, exec boil.ContextExecutor) (int64, error) { - if o == nil { - return 0, errors.New("models: no Request provided for delete") - } - - if err := o.doBeforeDeleteHooks(ctx, exec); err != nil { - return 0, err - } - - args := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), requestPrimaryKeyMapping) - sql := "DELETE FROM \"requests\" WHERE \"id\"=$1 AND \"timestamp\"=$2" - - if boil.IsDebug(ctx) { - writer := boil.DebugWriterFrom(ctx) - fmt.Fprintln(writer, sql) - fmt.Fprintln(writer, args...) - } - result, err := exec.ExecContext(ctx, sql, args...) - if err != nil { - return 0, errors.Wrap(err, "models: unable to delete from requests") - } - - rowsAff, err := result.RowsAffected() - if err != nil { - return 0, errors.Wrap(err, "models: failed to get rows affected by delete for requests") - } - - if err := o.doAfterDeleteHooks(ctx, exec); err != nil { - return 0, err - } - - return rowsAff, nil -} - -// DeleteAll deletes all matching rows. -func (q requestQuery) DeleteAll(ctx context.Context, exec boil.ContextExecutor) (int64, error) { - if q.Query == nil { - return 0, errors.New("models: no requestQuery provided for delete all") - } - - queries.SetDelete(q.Query) - - result, err := q.Query.ExecContext(ctx, exec) - if err != nil { - return 0, errors.Wrap(err, "models: unable to delete all from requests") - } - - rowsAff, err := result.RowsAffected() - if err != nil { - return 0, errors.Wrap(err, "models: failed to get rows affected by deleteall for requests") - } - - return rowsAff, nil -} - -// DeleteAll deletes all rows in the slice, using an executor. -func (o RequestSlice) DeleteAll(ctx context.Context, exec boil.ContextExecutor) (int64, error) { - if len(o) == 0 { - return 0, nil - } - - if len(requestBeforeDeleteHooks) != 0 { - for _, obj := range o { - if err := obj.doBeforeDeleteHooks(ctx, exec); err != nil { - return 0, err - } - } - } - - var args []interface{} - for _, obj := range o { - pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), requestPrimaryKeyMapping) - args = append(args, pkeyArgs...) - } - - sql := "DELETE FROM \"requests\" WHERE " + - strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 1, requestPrimaryKeyColumns, len(o)) - - if boil.IsDebug(ctx) { - writer := boil.DebugWriterFrom(ctx) - fmt.Fprintln(writer, sql) - fmt.Fprintln(writer, args) - } - result, err := exec.ExecContext(ctx, sql, args...) - if err != nil { - return 0, errors.Wrap(err, "models: unable to delete all from request slice") - } - - rowsAff, err := result.RowsAffected() - if err != nil { - return 0, errors.Wrap(err, "models: failed to get rows affected by deleteall for requests") - } - - if len(requestAfterDeleteHooks) != 0 { - for _, obj := range o { - if err := obj.doAfterDeleteHooks(ctx, exec); err != nil { - return 0, err - } - } - } - - return rowsAff, nil -} - -// Reload refetches the object from the database -// using the primary keys with an executor. -func (o *Request) Reload(ctx context.Context, exec boil.ContextExecutor) error { - ret, err := FindRequest(ctx, exec, o.ID, o.Timestamp) - if err != nil { - return err - } - - *o = *ret - return nil -} - -// ReloadAll refetches every row with matching primary key column values -// and overwrites the original object slice with the newly updated slice. -func (o *RequestSlice) ReloadAll(ctx context.Context, exec boil.ContextExecutor) error { - if o == nil || len(*o) == 0 { - return nil - } - - slice := RequestSlice{} - var args []interface{} - for _, obj := range *o { - pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), requestPrimaryKeyMapping) - args = append(args, pkeyArgs...) - } - - sql := "SELECT \"requests\".* FROM \"requests\" WHERE " + - strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 1, requestPrimaryKeyColumns, len(*o)) - - q := queries.Raw(sql, args...) - - err := q.Bind(ctx, exec, &slice) - if err != nil { - return errors.Wrap(err, "models: unable to reload all in RequestSlice") - } - - *o = slice - - return nil -} - -// RequestExists checks if the Request row exists. -func RequestExists(ctx context.Context, exec boil.ContextExecutor, iD int, timestamp time.Time) (bool, error) { - var exists bool - sql := "select exists(select 1 from \"requests\" where \"id\"=$1 AND \"timestamp\"=$2 limit 1)" - - if boil.IsDebug(ctx) { - writer := boil.DebugWriterFrom(ctx) - fmt.Fprintln(writer, sql) - fmt.Fprintln(writer, iD, timestamp) - } - row := exec.QueryRowContext(ctx, sql, iD, timestamp) - - err := row.Scan(&exists) - if err != nil { - return false, errors.Wrap(err, "models: unable to check if requests exists") - } - - return exists, nil -} diff --git a/db/models/requests_denormalized.go b/db/models/requests_denormalized.go deleted file mode 100644 index 7c0424f..0000000 --- a/db/models/requests_denormalized.go +++ /dev/null @@ -1,1003 +0,0 @@ -// Code generated by SQLBoiler 4.13.0 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT. -// This file is meant to be re-generated in place and/or deleted at any time. - -package models - -import ( - "context" - "database/sql" - "fmt" - "reflect" - "strconv" - "strings" - "sync" - "time" - - "github.com/friendsofgo/errors" - "github.com/volatiletech/null/v8" - "github.com/volatiletech/sqlboiler/v4/boil" - "github.com/volatiletech/sqlboiler/v4/queries" - "github.com/volatiletech/sqlboiler/v4/queries/qm" - "github.com/volatiletech/sqlboiler/v4/queries/qmhelper" - "github.com/volatiletech/sqlboiler/v4/types" - "github.com/volatiletech/strmangle" -) - -// RequestsDenormalized is an object representing the database table. -type RequestsDenormalized struct { - ID int64 `boil:"id" json:"id" toml:"id" yaml:"id"` - RequestStartedAt time.Time `boil:"request_started_at" json:"request_started_at" toml:"request_started_at" yaml:"request_started_at"` - RequestType string `boil:"request_type" json:"request_type" toml:"request_type" yaml:"request_type"` - AntMultihash string `boil:"ant_multihash" json:"ant_multihash" toml:"ant_multihash" yaml:"ant_multihash"` - PeerMultihash string `boil:"peer_multihash" json:"peer_multihash" toml:"peer_multihash" yaml:"peer_multihash"` - KeyMultihash string `boil:"key_multihash" json:"key_multihash" toml:"key_multihash" yaml:"key_multihash"` - MultiAddresses types.StringArray `boil:"multi_addresses" json:"multi_addresses,omitempty" toml:"multi_addresses" yaml:"multi_addresses,omitempty"` - AgentVersion null.String `boil:"agent_version" json:"agent_version,omitempty" toml:"agent_version" yaml:"agent_version,omitempty"` - NormalizedAt null.Time `boil:"normalized_at" json:"normalized_at,omitempty" toml:"normalized_at" yaml:"normalized_at,omitempty"` - Protocols types.StringArray `boil:"protocols" json:"protocols,omitempty" toml:"protocols" yaml:"protocols,omitempty"` - - R *requestsDenormalizedR `boil:"-" json:"-" toml:"-" yaml:"-"` - L requestsDenormalizedL `boil:"-" json:"-" toml:"-" yaml:"-"` -} - -var RequestsDenormalizedColumns = struct { - ID string - RequestStartedAt string - RequestType string - AntMultihash string - PeerMultihash string - KeyMultihash string - MultiAddresses string - AgentVersion string - NormalizedAt string - Protocols string -}{ - ID: "id", - RequestStartedAt: "request_started_at", - RequestType: "request_type", - AntMultihash: "ant_multihash", - PeerMultihash: "peer_multihash", - KeyMultihash: "key_multihash", - MultiAddresses: "multi_addresses", - AgentVersion: "agent_version", - NormalizedAt: "normalized_at", - Protocols: "protocols", -} - -var RequestsDenormalizedTableColumns = struct { - ID string - RequestStartedAt string - RequestType string - AntMultihash string - PeerMultihash string - KeyMultihash string - MultiAddresses string - AgentVersion string - NormalizedAt string - Protocols string -}{ - ID: "requests_denormalized.id", - RequestStartedAt: "requests_denormalized.request_started_at", - RequestType: "requests_denormalized.request_type", - AntMultihash: "requests_denormalized.ant_multihash", - PeerMultihash: "requests_denormalized.peer_multihash", - KeyMultihash: "requests_denormalized.key_multihash", - MultiAddresses: "requests_denormalized.multi_addresses", - AgentVersion: "requests_denormalized.agent_version", - NormalizedAt: "requests_denormalized.normalized_at", - Protocols: "requests_denormalized.protocols", -} - -// Generated where - -type whereHelpertypes_StringArray struct{ field string } - -func (w whereHelpertypes_StringArray) EQ(x types.StringArray) qm.QueryMod { - return qmhelper.WhereNullEQ(w.field, false, x) -} -func (w whereHelpertypes_StringArray) NEQ(x types.StringArray) qm.QueryMod { - return qmhelper.WhereNullEQ(w.field, true, x) -} -func (w whereHelpertypes_StringArray) LT(x types.StringArray) qm.QueryMod { - return qmhelper.Where(w.field, qmhelper.LT, x) -} -func (w whereHelpertypes_StringArray) LTE(x types.StringArray) qm.QueryMod { - return qmhelper.Where(w.field, qmhelper.LTE, x) -} -func (w whereHelpertypes_StringArray) GT(x types.StringArray) qm.QueryMod { - return qmhelper.Where(w.field, qmhelper.GT, x) -} -func (w whereHelpertypes_StringArray) GTE(x types.StringArray) qm.QueryMod { - return qmhelper.Where(w.field, qmhelper.GTE, x) -} - -func (w whereHelpertypes_StringArray) IsNull() qm.QueryMod { return qmhelper.WhereIsNull(w.field) } -func (w whereHelpertypes_StringArray) IsNotNull() qm.QueryMod { - return qmhelper.WhereIsNotNull(w.field) -} - -type whereHelpernull_Time struct{ field string } - -func (w whereHelpernull_Time) EQ(x null.Time) qm.QueryMod { - return qmhelper.WhereNullEQ(w.field, false, x) -} -func (w whereHelpernull_Time) NEQ(x null.Time) qm.QueryMod { - return qmhelper.WhereNullEQ(w.field, true, x) -} -func (w whereHelpernull_Time) LT(x null.Time) qm.QueryMod { - return qmhelper.Where(w.field, qmhelper.LT, x) -} -func (w whereHelpernull_Time) LTE(x null.Time) qm.QueryMod { - return qmhelper.Where(w.field, qmhelper.LTE, x) -} -func (w whereHelpernull_Time) GT(x null.Time) qm.QueryMod { - return qmhelper.Where(w.field, qmhelper.GT, x) -} -func (w whereHelpernull_Time) GTE(x null.Time) qm.QueryMod { - return qmhelper.Where(w.field, qmhelper.GTE, x) -} - -func (w whereHelpernull_Time) IsNull() qm.QueryMod { return qmhelper.WhereIsNull(w.field) } -func (w whereHelpernull_Time) IsNotNull() qm.QueryMod { return qmhelper.WhereIsNotNull(w.field) } - -var RequestsDenormalizedWhere = struct { - ID whereHelperint64 - RequestStartedAt whereHelpertime_Time - RequestType whereHelperstring - AntMultihash whereHelperstring - PeerMultihash whereHelperstring - KeyMultihash whereHelperstring - MultiAddresses whereHelpertypes_StringArray - AgentVersion whereHelpernull_String - NormalizedAt whereHelpernull_Time - Protocols whereHelpertypes_StringArray -}{ - ID: whereHelperint64{field: "\"requests_denormalized\".\"id\""}, - RequestStartedAt: whereHelpertime_Time{field: "\"requests_denormalized\".\"request_started_at\""}, - RequestType: whereHelperstring{field: "\"requests_denormalized\".\"request_type\""}, - AntMultihash: whereHelperstring{field: "\"requests_denormalized\".\"ant_multihash\""}, - PeerMultihash: whereHelperstring{field: "\"requests_denormalized\".\"peer_multihash\""}, - KeyMultihash: whereHelperstring{field: "\"requests_denormalized\".\"key_multihash\""}, - MultiAddresses: whereHelpertypes_StringArray{field: "\"requests_denormalized\".\"multi_addresses\""}, - AgentVersion: whereHelpernull_String{field: "\"requests_denormalized\".\"agent_version\""}, - NormalizedAt: whereHelpernull_Time{field: "\"requests_denormalized\".\"normalized_at\""}, - Protocols: whereHelpertypes_StringArray{field: "\"requests_denormalized\".\"protocols\""}, -} - -// RequestsDenormalizedRels is where relationship names are stored. -var RequestsDenormalizedRels = struct { -}{} - -// requestsDenormalizedR is where relationships are stored. -type requestsDenormalizedR struct { -} - -// NewStruct creates a new relationship struct -func (*requestsDenormalizedR) NewStruct() *requestsDenormalizedR { - return &requestsDenormalizedR{} -} - -// requestsDenormalizedL is where Load methods for each relationship are stored. -type requestsDenormalizedL struct{} - -var ( - requestsDenormalizedAllColumns = []string{"id", "request_started_at", "request_type", "ant_multihash", "peer_multihash", "key_multihash", "multi_addresses", "agent_version", "normalized_at", "protocols"} - requestsDenormalizedColumnsWithoutDefault = []string{"request_started_at", "request_type", "ant_multihash", "peer_multihash", "key_multihash"} - requestsDenormalizedColumnsWithDefault = []string{"id", "multi_addresses", "agent_version", "normalized_at", "protocols"} - requestsDenormalizedPrimaryKeyColumns = []string{"id", "request_started_at"} - requestsDenormalizedGeneratedColumns = []string{"id"} -) - -type ( - // RequestsDenormalizedSlice is an alias for a slice of pointers to RequestsDenormalized. - // This should almost always be used instead of []RequestsDenormalized. - RequestsDenormalizedSlice []*RequestsDenormalized - // RequestsDenormalizedHook is the signature for custom RequestsDenormalized hook methods - RequestsDenormalizedHook func(context.Context, boil.ContextExecutor, *RequestsDenormalized) error - - requestsDenormalizedQuery struct { - *queries.Query - } -) - -// Cache for insert, update and upsert -var ( - requestsDenormalizedType = reflect.TypeOf(&RequestsDenormalized{}) - requestsDenormalizedMapping = queries.MakeStructMapping(requestsDenormalizedType) - requestsDenormalizedPrimaryKeyMapping, _ = queries.BindMapping(requestsDenormalizedType, requestsDenormalizedMapping, requestsDenormalizedPrimaryKeyColumns) - requestsDenormalizedInsertCacheMut sync.RWMutex - requestsDenormalizedInsertCache = make(map[string]insertCache) - requestsDenormalizedUpdateCacheMut sync.RWMutex - requestsDenormalizedUpdateCache = make(map[string]updateCache) - requestsDenormalizedUpsertCacheMut sync.RWMutex - requestsDenormalizedUpsertCache = make(map[string]insertCache) -) - -var ( - // Force time package dependency for automated UpdatedAt/CreatedAt. - _ = time.Second - // Force qmhelper dependency for where clause generation (which doesn't - // always happen) - _ = qmhelper.Where -) - -var requestsDenormalizedAfterSelectHooks []RequestsDenormalizedHook - -var requestsDenormalizedBeforeInsertHooks []RequestsDenormalizedHook -var requestsDenormalizedAfterInsertHooks []RequestsDenormalizedHook - -var requestsDenormalizedBeforeUpdateHooks []RequestsDenormalizedHook -var requestsDenormalizedAfterUpdateHooks []RequestsDenormalizedHook - -var requestsDenormalizedBeforeDeleteHooks []RequestsDenormalizedHook -var requestsDenormalizedAfterDeleteHooks []RequestsDenormalizedHook - -var requestsDenormalizedBeforeUpsertHooks []RequestsDenormalizedHook -var requestsDenormalizedAfterUpsertHooks []RequestsDenormalizedHook - -// doAfterSelectHooks executes all "after Select" hooks. -func (o *RequestsDenormalized) doAfterSelectHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range requestsDenormalizedAfterSelectHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// doBeforeInsertHooks executes all "before insert" hooks. -func (o *RequestsDenormalized) doBeforeInsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range requestsDenormalizedBeforeInsertHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// doAfterInsertHooks executes all "after Insert" hooks. -func (o *RequestsDenormalized) doAfterInsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range requestsDenormalizedAfterInsertHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// doBeforeUpdateHooks executes all "before Update" hooks. -func (o *RequestsDenormalized) doBeforeUpdateHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range requestsDenormalizedBeforeUpdateHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// doAfterUpdateHooks executes all "after Update" hooks. -func (o *RequestsDenormalized) doAfterUpdateHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range requestsDenormalizedAfterUpdateHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// doBeforeDeleteHooks executes all "before Delete" hooks. -func (o *RequestsDenormalized) doBeforeDeleteHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range requestsDenormalizedBeforeDeleteHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// doAfterDeleteHooks executes all "after Delete" hooks. -func (o *RequestsDenormalized) doAfterDeleteHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range requestsDenormalizedAfterDeleteHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// doBeforeUpsertHooks executes all "before Upsert" hooks. -func (o *RequestsDenormalized) doBeforeUpsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range requestsDenormalizedBeforeUpsertHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// doAfterUpsertHooks executes all "after Upsert" hooks. -func (o *RequestsDenormalized) doAfterUpsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { - if boil.HooksAreSkipped(ctx) { - return nil - } - - for _, hook := range requestsDenormalizedAfterUpsertHooks { - if err := hook(ctx, exec, o); err != nil { - return err - } - } - - return nil -} - -// AddRequestsDenormalizedHook registers your hook function for all future operations. -func AddRequestsDenormalizedHook(hookPoint boil.HookPoint, requestsDenormalizedHook RequestsDenormalizedHook) { - switch hookPoint { - case boil.AfterSelectHook: - requestsDenormalizedAfterSelectHooks = append(requestsDenormalizedAfterSelectHooks, requestsDenormalizedHook) - case boil.BeforeInsertHook: - requestsDenormalizedBeforeInsertHooks = append(requestsDenormalizedBeforeInsertHooks, requestsDenormalizedHook) - case boil.AfterInsertHook: - requestsDenormalizedAfterInsertHooks = append(requestsDenormalizedAfterInsertHooks, requestsDenormalizedHook) - case boil.BeforeUpdateHook: - requestsDenormalizedBeforeUpdateHooks = append(requestsDenormalizedBeforeUpdateHooks, requestsDenormalizedHook) - case boil.AfterUpdateHook: - requestsDenormalizedAfterUpdateHooks = append(requestsDenormalizedAfterUpdateHooks, requestsDenormalizedHook) - case boil.BeforeDeleteHook: - requestsDenormalizedBeforeDeleteHooks = append(requestsDenormalizedBeforeDeleteHooks, requestsDenormalizedHook) - case boil.AfterDeleteHook: - requestsDenormalizedAfterDeleteHooks = append(requestsDenormalizedAfterDeleteHooks, requestsDenormalizedHook) - case boil.BeforeUpsertHook: - requestsDenormalizedBeforeUpsertHooks = append(requestsDenormalizedBeforeUpsertHooks, requestsDenormalizedHook) - case boil.AfterUpsertHook: - requestsDenormalizedAfterUpsertHooks = append(requestsDenormalizedAfterUpsertHooks, requestsDenormalizedHook) - } -} - -// One returns a single requestsDenormalized record from the query. -func (q requestsDenormalizedQuery) One(ctx context.Context, exec boil.ContextExecutor) (*RequestsDenormalized, error) { - o := &RequestsDenormalized{} - - queries.SetLimit(q.Query, 1) - - err := q.Bind(ctx, exec, o) - if err != nil { - if errors.Is(err, sql.ErrNoRows) { - return nil, sql.ErrNoRows - } - return nil, errors.Wrap(err, "models: failed to execute a one query for requests_denormalized") - } - - if err := o.doAfterSelectHooks(ctx, exec); err != nil { - return o, err - } - - return o, nil -} - -// All returns all RequestsDenormalized records from the query. -func (q requestsDenormalizedQuery) All(ctx context.Context, exec boil.ContextExecutor) (RequestsDenormalizedSlice, error) { - var o []*RequestsDenormalized - - err := q.Bind(ctx, exec, &o) - if err != nil { - return nil, errors.Wrap(err, "models: failed to assign all query results to RequestsDenormalized slice") - } - - if len(requestsDenormalizedAfterSelectHooks) != 0 { - for _, obj := range o { - if err := obj.doAfterSelectHooks(ctx, exec); err != nil { - return o, err - } - } - } - - return o, nil -} - -// Count returns the count of all RequestsDenormalized records in the query. -func (q requestsDenormalizedQuery) Count(ctx context.Context, exec boil.ContextExecutor) (int64, error) { - var count int64 - - queries.SetSelect(q.Query, nil) - queries.SetCount(q.Query) - - err := q.Query.QueryRowContext(ctx, exec).Scan(&count) - if err != nil { - return 0, errors.Wrap(err, "models: failed to count requests_denormalized rows") - } - - return count, nil -} - -// Exists checks if the row exists in the table. -func (q requestsDenormalizedQuery) Exists(ctx context.Context, exec boil.ContextExecutor) (bool, error) { - var count int64 - - queries.SetSelect(q.Query, nil) - queries.SetCount(q.Query) - queries.SetLimit(q.Query, 1) - - err := q.Query.QueryRowContext(ctx, exec).Scan(&count) - if err != nil { - return false, errors.Wrap(err, "models: failed to check if requests_denormalized exists") - } - - return count > 0, nil -} - -// RequestsDenormalizeds retrieves all the records using an executor. -func RequestsDenormalizeds(mods ...qm.QueryMod) requestsDenormalizedQuery { - mods = append(mods, qm.From("\"requests_denormalized\"")) - q := NewQuery(mods...) - if len(queries.GetSelect(q)) == 0 { - queries.SetSelect(q, []string{"\"requests_denormalized\".*"}) - } - - return requestsDenormalizedQuery{q} -} - -// FindRequestsDenormalized retrieves a single record by ID with an executor. -// If selectCols is empty Find will return all columns. -func FindRequestsDenormalized(ctx context.Context, exec boil.ContextExecutor, iD int64, requestStartedAt time.Time, selectCols ...string) (*RequestsDenormalized, error) { - requestsDenormalizedObj := &RequestsDenormalized{} - - sel := "*" - if len(selectCols) > 0 { - sel = strings.Join(strmangle.IdentQuoteSlice(dialect.LQ, dialect.RQ, selectCols), ",") - } - query := fmt.Sprintf( - "select %s from \"requests_denormalized\" where \"id\"=$1 AND \"request_started_at\"=$2", sel, - ) - - q := queries.Raw(query, iD, requestStartedAt) - - err := q.Bind(ctx, exec, requestsDenormalizedObj) - if err != nil { - if errors.Is(err, sql.ErrNoRows) { - return nil, sql.ErrNoRows - } - return nil, errors.Wrap(err, "models: unable to select from requests_denormalized") - } - - if err = requestsDenormalizedObj.doAfterSelectHooks(ctx, exec); err != nil { - return requestsDenormalizedObj, err - } - - return requestsDenormalizedObj, nil -} - -// Insert a single record using an executor. -// See boil.Columns.InsertColumnSet documentation to understand column list inference for inserts. -func (o *RequestsDenormalized) Insert(ctx context.Context, exec boil.ContextExecutor, columns boil.Columns) error { - if o == nil { - return errors.New("models: no requests_denormalized provided for insertion") - } - - var err error - - if err := o.doBeforeInsertHooks(ctx, exec); err != nil { - return err - } - - nzDefaults := queries.NonZeroDefaultSet(requestsDenormalizedColumnsWithDefault, o) - - key := makeCacheKey(columns, nzDefaults) - requestsDenormalizedInsertCacheMut.RLock() - cache, cached := requestsDenormalizedInsertCache[key] - requestsDenormalizedInsertCacheMut.RUnlock() - - if !cached { - wl, returnColumns := columns.InsertColumnSet( - requestsDenormalizedAllColumns, - requestsDenormalizedColumnsWithDefault, - requestsDenormalizedColumnsWithoutDefault, - nzDefaults, - ) - wl = strmangle.SetComplement(wl, requestsDenormalizedGeneratedColumns) - - cache.valueMapping, err = queries.BindMapping(requestsDenormalizedType, requestsDenormalizedMapping, wl) - if err != nil { - return err - } - cache.retMapping, err = queries.BindMapping(requestsDenormalizedType, requestsDenormalizedMapping, returnColumns) - if err != nil { - return err - } - if len(wl) != 0 { - cache.query = fmt.Sprintf("INSERT INTO \"requests_denormalized\" (\"%s\") %%sVALUES (%s)%%s", strings.Join(wl, "\",\""), strmangle.Placeholders(dialect.UseIndexPlaceholders, len(wl), 1, 1)) - } else { - cache.query = "INSERT INTO \"requests_denormalized\" %sDEFAULT VALUES%s" - } - - var queryOutput, queryReturning string - - if len(cache.retMapping) != 0 { - queryReturning = fmt.Sprintf(" RETURNING \"%s\"", strings.Join(returnColumns, "\",\"")) - } - - cache.query = fmt.Sprintf(cache.query, queryOutput, queryReturning) - } - - value := reflect.Indirect(reflect.ValueOf(o)) - vals := queries.ValuesFromMapping(value, cache.valueMapping) - - if boil.IsDebug(ctx) { - writer := boil.DebugWriterFrom(ctx) - fmt.Fprintln(writer, cache.query) - fmt.Fprintln(writer, vals) - } - - if len(cache.retMapping) != 0 { - err = exec.QueryRowContext(ctx, cache.query, vals...).Scan(queries.PtrsFromMapping(value, cache.retMapping)...) - } else { - _, err = exec.ExecContext(ctx, cache.query, vals...) - } - - if err != nil { - return errors.Wrap(err, "models: unable to insert into requests_denormalized") - } - - if !cached { - requestsDenormalizedInsertCacheMut.Lock() - requestsDenormalizedInsertCache[key] = cache - requestsDenormalizedInsertCacheMut.Unlock() - } - - return o.doAfterInsertHooks(ctx, exec) -} - -// Update uses an executor to update the RequestsDenormalized. -// See boil.Columns.UpdateColumnSet documentation to understand column list inference for updates. -// Update does not automatically update the record in case of default values. Use .Reload() to refresh the records. -func (o *RequestsDenormalized) Update(ctx context.Context, exec boil.ContextExecutor, columns boil.Columns) (int64, error) { - var err error - if err = o.doBeforeUpdateHooks(ctx, exec); err != nil { - return 0, err - } - key := makeCacheKey(columns, nil) - requestsDenormalizedUpdateCacheMut.RLock() - cache, cached := requestsDenormalizedUpdateCache[key] - requestsDenormalizedUpdateCacheMut.RUnlock() - - if !cached { - wl := columns.UpdateColumnSet( - requestsDenormalizedAllColumns, - requestsDenormalizedPrimaryKeyColumns, - ) - wl = strmangle.SetComplement(wl, requestsDenormalizedGeneratedColumns) - - if !columns.IsWhitelist() { - wl = strmangle.SetComplement(wl, []string{"created_at"}) - } - if len(wl) == 0 { - return 0, errors.New("models: unable to update requests_denormalized, could not build whitelist") - } - - cache.query = fmt.Sprintf("UPDATE \"requests_denormalized\" SET %s WHERE %s", - strmangle.SetParamNames("\"", "\"", 1, wl), - strmangle.WhereClause("\"", "\"", len(wl)+1, requestsDenormalizedPrimaryKeyColumns), - ) - cache.valueMapping, err = queries.BindMapping(requestsDenormalizedType, requestsDenormalizedMapping, append(wl, requestsDenormalizedPrimaryKeyColumns...)) - if err != nil { - return 0, err - } - } - - values := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), cache.valueMapping) - - if boil.IsDebug(ctx) { - writer := boil.DebugWriterFrom(ctx) - fmt.Fprintln(writer, cache.query) - fmt.Fprintln(writer, values) - } - var result sql.Result - result, err = exec.ExecContext(ctx, cache.query, values...) - if err != nil { - return 0, errors.Wrap(err, "models: unable to update requests_denormalized row") - } - - rowsAff, err := result.RowsAffected() - if err != nil { - return 0, errors.Wrap(err, "models: failed to get rows affected by update for requests_denormalized") - } - - if !cached { - requestsDenormalizedUpdateCacheMut.Lock() - requestsDenormalizedUpdateCache[key] = cache - requestsDenormalizedUpdateCacheMut.Unlock() - } - - return rowsAff, o.doAfterUpdateHooks(ctx, exec) -} - -// UpdateAll updates all rows with the specified column values. -func (q requestsDenormalizedQuery) UpdateAll(ctx context.Context, exec boil.ContextExecutor, cols M) (int64, error) { - queries.SetUpdate(q.Query, cols) - - result, err := q.Query.ExecContext(ctx, exec) - if err != nil { - return 0, errors.Wrap(err, "models: unable to update all for requests_denormalized") - } - - rowsAff, err := result.RowsAffected() - if err != nil { - return 0, errors.Wrap(err, "models: unable to retrieve rows affected for requests_denormalized") - } - - return rowsAff, nil -} - -// UpdateAll updates all rows with the specified column values, using an executor. -func (o RequestsDenormalizedSlice) UpdateAll(ctx context.Context, exec boil.ContextExecutor, cols M) (int64, error) { - ln := int64(len(o)) - if ln == 0 { - return 0, nil - } - - if len(cols) == 0 { - return 0, errors.New("models: update all requires at least one column argument") - } - - colNames := make([]string, len(cols)) - args := make([]interface{}, len(cols)) - - i := 0 - for name, value := range cols { - colNames[i] = name - args[i] = value - i++ - } - - // Append all of the primary key values for each column - for _, obj := range o { - pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), requestsDenormalizedPrimaryKeyMapping) - args = append(args, pkeyArgs...) - } - - sql := fmt.Sprintf("UPDATE \"requests_denormalized\" SET %s WHERE %s", - strmangle.SetParamNames("\"", "\"", 1, colNames), - strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), len(colNames)+1, requestsDenormalizedPrimaryKeyColumns, len(o))) - - if boil.IsDebug(ctx) { - writer := boil.DebugWriterFrom(ctx) - fmt.Fprintln(writer, sql) - fmt.Fprintln(writer, args...) - } - result, err := exec.ExecContext(ctx, sql, args...) - if err != nil { - return 0, errors.Wrap(err, "models: unable to update all in requestsDenormalized slice") - } - - rowsAff, err := result.RowsAffected() - if err != nil { - return 0, errors.Wrap(err, "models: unable to retrieve rows affected all in update all requestsDenormalized") - } - return rowsAff, nil -} - -// Upsert attempts an insert using an executor, and does an update or ignore on conflict. -// See boil.Columns documentation for how to properly use updateColumns and insertColumns. -func (o *RequestsDenormalized) Upsert(ctx context.Context, exec boil.ContextExecutor, updateOnConflict bool, conflictColumns []string, updateColumns, insertColumns boil.Columns) error { - if o == nil { - return errors.New("models: no requests_denormalized provided for upsert") - } - - if err := o.doBeforeUpsertHooks(ctx, exec); err != nil { - return err - } - - nzDefaults := queries.NonZeroDefaultSet(requestsDenormalizedColumnsWithDefault, o) - - // Build cache key in-line uglily - mysql vs psql problems - buf := strmangle.GetBuffer() - if updateOnConflict { - buf.WriteByte('t') - } else { - buf.WriteByte('f') - } - buf.WriteByte('.') - for _, c := range conflictColumns { - buf.WriteString(c) - } - buf.WriteByte('.') - buf.WriteString(strconv.Itoa(updateColumns.Kind)) - for _, c := range updateColumns.Cols { - buf.WriteString(c) - } - buf.WriteByte('.') - buf.WriteString(strconv.Itoa(insertColumns.Kind)) - for _, c := range insertColumns.Cols { - buf.WriteString(c) - } - buf.WriteByte('.') - for _, c := range nzDefaults { - buf.WriteString(c) - } - key := buf.String() - strmangle.PutBuffer(buf) - - requestsDenormalizedUpsertCacheMut.RLock() - cache, cached := requestsDenormalizedUpsertCache[key] - requestsDenormalizedUpsertCacheMut.RUnlock() - - var err error - - if !cached { - insert, ret := insertColumns.InsertColumnSet( - requestsDenormalizedAllColumns, - requestsDenormalizedColumnsWithDefault, - requestsDenormalizedColumnsWithoutDefault, - nzDefaults, - ) - - update := updateColumns.UpdateColumnSet( - requestsDenormalizedAllColumns, - requestsDenormalizedPrimaryKeyColumns, - ) - - insert = strmangle.SetComplement(insert, requestsDenormalizedGeneratedColumns) - update = strmangle.SetComplement(update, requestsDenormalizedGeneratedColumns) - - if updateOnConflict && len(update) == 0 { - return errors.New("models: unable to upsert requests_denormalized, could not build update column list") - } - - conflict := conflictColumns - if len(conflict) == 0 { - conflict = make([]string, len(requestsDenormalizedPrimaryKeyColumns)) - copy(conflict, requestsDenormalizedPrimaryKeyColumns) - } - cache.query = buildUpsertQueryPostgres(dialect, "\"requests_denormalized\"", updateOnConflict, ret, update, conflict, insert) - - cache.valueMapping, err = queries.BindMapping(requestsDenormalizedType, requestsDenormalizedMapping, insert) - if err != nil { - return err - } - if len(ret) != 0 { - cache.retMapping, err = queries.BindMapping(requestsDenormalizedType, requestsDenormalizedMapping, ret) - if err != nil { - return err - } - } - } - - value := reflect.Indirect(reflect.ValueOf(o)) - vals := queries.ValuesFromMapping(value, cache.valueMapping) - var returns []interface{} - if len(cache.retMapping) != 0 { - returns = queries.PtrsFromMapping(value, cache.retMapping) - } - - if boil.IsDebug(ctx) { - writer := boil.DebugWriterFrom(ctx) - fmt.Fprintln(writer, cache.query) - fmt.Fprintln(writer, vals) - } - if len(cache.retMapping) != 0 { - err = exec.QueryRowContext(ctx, cache.query, vals...).Scan(returns...) - if errors.Is(err, sql.ErrNoRows) { - err = nil // Postgres doesn't return anything when there's no update - } - } else { - _, err = exec.ExecContext(ctx, cache.query, vals...) - } - if err != nil { - return errors.Wrap(err, "models: unable to upsert requests_denormalized") - } - - if !cached { - requestsDenormalizedUpsertCacheMut.Lock() - requestsDenormalizedUpsertCache[key] = cache - requestsDenormalizedUpsertCacheMut.Unlock() - } - - return o.doAfterUpsertHooks(ctx, exec) -} - -// Delete deletes a single RequestsDenormalized record with an executor. -// Delete will match against the primary key column to find the record to delete. -func (o *RequestsDenormalized) Delete(ctx context.Context, exec boil.ContextExecutor) (int64, error) { - if o == nil { - return 0, errors.New("models: no RequestsDenormalized provided for delete") - } - - if err := o.doBeforeDeleteHooks(ctx, exec); err != nil { - return 0, err - } - - args := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), requestsDenormalizedPrimaryKeyMapping) - sql := "DELETE FROM \"requests_denormalized\" WHERE \"id\"=$1 AND \"request_started_at\"=$2" - - if boil.IsDebug(ctx) { - writer := boil.DebugWriterFrom(ctx) - fmt.Fprintln(writer, sql) - fmt.Fprintln(writer, args...) - } - result, err := exec.ExecContext(ctx, sql, args...) - if err != nil { - return 0, errors.Wrap(err, "models: unable to delete from requests_denormalized") - } - - rowsAff, err := result.RowsAffected() - if err != nil { - return 0, errors.Wrap(err, "models: failed to get rows affected by delete for requests_denormalized") - } - - if err := o.doAfterDeleteHooks(ctx, exec); err != nil { - return 0, err - } - - return rowsAff, nil -} - -// DeleteAll deletes all matching rows. -func (q requestsDenormalizedQuery) DeleteAll(ctx context.Context, exec boil.ContextExecutor) (int64, error) { - if q.Query == nil { - return 0, errors.New("models: no requestsDenormalizedQuery provided for delete all") - } - - queries.SetDelete(q.Query) - - result, err := q.Query.ExecContext(ctx, exec) - if err != nil { - return 0, errors.Wrap(err, "models: unable to delete all from requests_denormalized") - } - - rowsAff, err := result.RowsAffected() - if err != nil { - return 0, errors.Wrap(err, "models: failed to get rows affected by deleteall for requests_denormalized") - } - - return rowsAff, nil -} - -// DeleteAll deletes all rows in the slice, using an executor. -func (o RequestsDenormalizedSlice) DeleteAll(ctx context.Context, exec boil.ContextExecutor) (int64, error) { - if len(o) == 0 { - return 0, nil - } - - if len(requestsDenormalizedBeforeDeleteHooks) != 0 { - for _, obj := range o { - if err := obj.doBeforeDeleteHooks(ctx, exec); err != nil { - return 0, err - } - } - } - - var args []interface{} - for _, obj := range o { - pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), requestsDenormalizedPrimaryKeyMapping) - args = append(args, pkeyArgs...) - } - - sql := "DELETE FROM \"requests_denormalized\" WHERE " + - strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 1, requestsDenormalizedPrimaryKeyColumns, len(o)) - - if boil.IsDebug(ctx) { - writer := boil.DebugWriterFrom(ctx) - fmt.Fprintln(writer, sql) - fmt.Fprintln(writer, args) - } - result, err := exec.ExecContext(ctx, sql, args...) - if err != nil { - return 0, errors.Wrap(err, "models: unable to delete all from requestsDenormalized slice") - } - - rowsAff, err := result.RowsAffected() - if err != nil { - return 0, errors.Wrap(err, "models: failed to get rows affected by deleteall for requests_denormalized") - } - - if len(requestsDenormalizedAfterDeleteHooks) != 0 { - for _, obj := range o { - if err := obj.doAfterDeleteHooks(ctx, exec); err != nil { - return 0, err - } - } - } - - return rowsAff, nil -} - -// Reload refetches the object from the database -// using the primary keys with an executor. -func (o *RequestsDenormalized) Reload(ctx context.Context, exec boil.ContextExecutor) error { - ret, err := FindRequestsDenormalized(ctx, exec, o.ID, o.RequestStartedAt) - if err != nil { - return err - } - - *o = *ret - return nil -} - -// ReloadAll refetches every row with matching primary key column values -// and overwrites the original object slice with the newly updated slice. -func (o *RequestsDenormalizedSlice) ReloadAll(ctx context.Context, exec boil.ContextExecutor) error { - if o == nil || len(*o) == 0 { - return nil - } - - slice := RequestsDenormalizedSlice{} - var args []interface{} - for _, obj := range *o { - pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), requestsDenormalizedPrimaryKeyMapping) - args = append(args, pkeyArgs...) - } - - sql := "SELECT \"requests_denormalized\".* FROM \"requests_denormalized\" WHERE " + - strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 1, requestsDenormalizedPrimaryKeyColumns, len(*o)) - - q := queries.Raw(sql, args...) - - err := q.Bind(ctx, exec, &slice) - if err != nil { - return errors.Wrap(err, "models: unable to reload all in RequestsDenormalizedSlice") - } - - *o = slice - - return nil -} - -// RequestsDenormalizedExists checks if the RequestsDenormalized row exists. -func RequestsDenormalizedExists(ctx context.Context, exec boil.ContextExecutor, iD int64, requestStartedAt time.Time) (bool, error) { - var exists bool - sql := "select exists(select 1 from \"requests_denormalized\" where \"id\"=$1 AND \"request_started_at\"=$2 limit 1)" - - if boil.IsDebug(ctx) { - writer := boil.DebugWriterFrom(ctx) - fmt.Fprintln(writer, sql) - fmt.Fprintln(writer, iD, requestStartedAt) - } - row := exec.QueryRowContext(ctx, sql, iD, requestStartedAt) - - err := row.Scan(&exists) - if err != nil { - return false, errors.Wrap(err, "models: unable to check if requests_denormalized exists") - } - - return exists, nil -} From b454338facc5ec78fdeccdf1757e6334d78b9d17 Mon Sep 17 00:00:00 2001 From: Dennis Trautwein Date: Mon, 2 Dec 2024 17:37:28 +0100 Subject: [PATCH 09/23] remove sqlboiler --- sqlboiler.toml | 11 ----------- 1 file changed, 11 deletions(-) delete mode 100644 sqlboiler.toml diff --git a/sqlboiler.toml b/sqlboiler.toml deleted file mode 100644 index cb0685b..0000000 --- a/sqlboiler.toml +++ /dev/null @@ -1,11 +0,0 @@ -output = "db/models" -wipe = true - -[psql] -dbname = "ants_watch" -host = "localhost" -port = 5432 -user = "ants_watch" -pass = "password" -sslmode = "disable" -blacklist = ["schema_migrations"] From b048086d309cd202566ff77a8124f25dde16425c Mon Sep 17 00:00:00 2001 From: Dennis Trautwein Date: Mon, 2 Dec 2024 17:38:39 +0100 Subject: [PATCH 10/23] refactor ants to use clickhouse --- Makefile | 13 +- ant.go | 6 +- cmd/honeypot/config.go | 29 - cmd/honeypot/main.go | 126 ++- db/client.go | 87 +- db/client_db.go | 663 -------------- .../000001_create_requests_table.up.sql | 15 +- db/models.go | 21 + db/resolver.go | 136 --- go-libp2p-kad-dht | 2 +- go.mod | 31 +- go.sum | 832 +----------------- keys.go | 5 +- nebuladb.go | 34 +- queen.go | 262 +++--- util.go | 25 +- 16 files changed, 360 insertions(+), 1927 deletions(-) delete mode 100644 cmd/honeypot/config.go delete mode 100644 db/client_db.go create mode 100644 db/models.go delete mode 100644 db/resolver.go diff --git a/Makefile b/Makefile index 005f75f..40828f5 100644 --- a/Makefile +++ b/Makefile @@ -10,20 +10,17 @@ REPO_USER?=AWS REPO_REGION?=us-east-1 - tools: go install -tags 'postgres' github.com/golang-migrate/migrate/v4/cmd/migrate@v4.15.2 - go install github.com/volatiletech/sqlboiler/v4@v4.13.0 - go install github.com/volatiletech/sqlboiler/v4/drivers/sqlboiler-psql@v4.13.0 - -models: - sqlboiler --no-tests psql migrate-up: - migrate -database 'postgres://ants_watch:password@localhost:5432/ants_watch?sslmode=disable' -path db/migrations up + migrate -database 'clickhouse://localhost:9000?username=default&secure=false' -path db/migrations up migrate-down: - migrate -database 'postgres://ants_watch:password@localhost:5432/ants_watch?sslmode=disable' -path db/migrations down + migrate -database 'clickhouse://localhost:9000?username=default&secure=false' -path db/migrations down + +local-clickhouse: + docker run --name ants-clickhouse --rm -p 9000:9000 clickhouse/clickhouse-server .PHONY: build build: diff --git a/ant.go b/ant.go index ba11c50..f102f13 100644 --- a/ant.go +++ b/ant.go @@ -7,7 +7,7 @@ import ( ds "github.com/ipfs/go-datastore" "github.com/libp2p/go-libp2p" kad "github.com/libp2p/go-libp2p-kad-dht" - antslog "github.com/libp2p/go-libp2p-kad-dht/antslog" + "github.com/libp2p/go-libp2p-kad-dht/ants" "github.com/libp2p/go-libp2p/core/crypto" "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/peer" @@ -23,7 +23,7 @@ const ( ) type Ant struct { - port uint16 + port int dht *kad.IpfsDHT privKey crypto.PrivKey @@ -32,7 +32,7 @@ type Ant struct { UserAgent string } -func SpawnAnt(ctx context.Context, privKey crypto.PrivKey, peerstore peerstore.Peerstore, datastore ds.Batching, port uint16, logsChan chan antslog.RequestLog) (*Ant, error) { +func SpawnAnt(ctx context.Context, privKey crypto.PrivKey, peerstore peerstore.Peerstore, datastore ds.Batching, port int, logsChan chan ants.RequestEvent) (*Ant, error) { pid, _ := peer.IDFromPrivateKey(privKey) logger.Debugf("spawning ant. kadid: %s, peerid: %s", PeeridToKadid(pid).HexString(), pid) diff --git a/cmd/honeypot/config.go b/cmd/honeypot/config.go deleted file mode 100644 index aadd591..0000000 --- a/cmd/honeypot/config.go +++ /dev/null @@ -1,29 +0,0 @@ -package main - -var RootConfig = struct { - AntsClickhouseAddress string - AntsClickhouseDatabase string - AntsClickhouseUsername string - AntsClickhousePassword string - AntsClickhouseSSL bool - - NebulaDBConnString string - KeyDBPath string - - NumPorts int - FirstPort int - UPnp bool -}{ - AntsClickhouseAddress: "", - AntsClickhouseDatabase: "", - AntsClickhouseUsername: "", - AntsClickhousePassword: "", - AntsClickhouseSSL: true, - - NebulaDBConnString: "", - KeyDBPath: "keys.db", - - NumPorts: 128, - FirstPort: 6000, - UPnp: false, -} diff --git a/cmd/honeypot/main.go b/cmd/honeypot/main.go index 7c58155..5a5fb1d 100644 --- a/cmd/honeypot/main.go +++ b/cmd/honeypot/main.go @@ -16,6 +16,38 @@ import ( var logger = logging.Logger("ants-queen") +var rootConfig = struct { + AntsClickhouseAddress string + AntsClickhouseDatabase string + AntsClickhouseUsername string + AntsClickhousePassword string + AntsClickhouseSSL bool + NebulaDBConnString string + KeyDBPath string + NumPorts int + FirstPort int + UPnp bool + BatchSize int + BatchTime time.Duration + CrawlInterval time.Duration + CacheSize int +}{ + AntsClickhouseAddress: "", + AntsClickhouseDatabase: "", + AntsClickhouseUsername: "", + AntsClickhousePassword: "", + AntsClickhouseSSL: true, + NebulaDBConnString: "", + KeyDBPath: "keys.db", + NumPorts: 128, + FirstPort: 6000, + UPnp: false, + BatchSize: 1000, + BatchTime: time.Second, + CrawlInterval: 120 * time.Minute, + CacheSize: 10_000, +} + func main() { logging.SetLogLevel("ants-queen", "debug") logging.SetLogLevel("dht", "error") @@ -33,43 +65,71 @@ func main() { Name: "ants.clickhouse.address", Usage: "ClickHouse address containing the host and port, 127.0.0.1:9000", EnvVars: []string{"ANTS_CLICKHOUSE_ADDRESS"}, - Destination: &RootConfig.AntsClickhouseAddress, - Value: RootConfig.AntsClickhouseAddress, + Destination: &rootConfig.AntsClickhouseAddress, + Value: rootConfig.AntsClickhouseAddress, }, &cli.StringFlag{ Name: "ants.clickhouse.database", Usage: "The ClickHouse database where ants requests will be recorded", EnvVars: []string{"ANTS_CLICKHOUSE_DATABASE"}, - Destination: &RootConfig.AntsClickhouseDatabase, - Value: RootConfig.AntsClickhouseDatabase, + Destination: &rootConfig.AntsClickhouseDatabase, + Value: rootConfig.AntsClickhouseDatabase, }, &cli.StringFlag{ Name: "ants.clickhouse.username", Usage: "The ClickHouse user that has the prerequisite privileges to record the requests", EnvVars: []string{"ANTS_CLICKHOUSE_USERNAME"}, - Destination: &RootConfig.AntsClickhouseUsername, - Value: RootConfig.AntsClickhouseUsername, + Destination: &rootConfig.AntsClickhouseUsername, + Value: rootConfig.AntsClickhouseUsername, }, &cli.StringFlag{ Name: "ants.clickhouse.password", Usage: "The password for the ClickHouse user", EnvVars: []string{"ANTS_CLICKHOUSE_PASSWORD"}, - Destination: &RootConfig.AntsClickhousePassword, - Value: RootConfig.AntsClickhousePassword, + Destination: &rootConfig.AntsClickhousePassword, + Value: rootConfig.AntsClickhousePassword, }, &cli.BoolFlag{ - Name: "ants.clickhouse.password", + Name: "ants.clickhouse.ssl", Usage: "Whether to use SSL for the ClickHouse connection", EnvVars: []string{"ANTS_CLICKHOUSE_SSL"}, - Destination: &RootConfig.AntsClickhouseSSL, - Value: RootConfig.AntsClickhouseSSL, + Destination: &rootConfig.AntsClickhouseSSL, + Value: rootConfig.AntsClickhouseSSL, }, &cli.StringFlag{ Name: "nebula.db.connstring", Usage: "The connection string for the Postgres Nebula database", EnvVars: []string{"NEBULA_DB_CONNSTRING"}, - Destination: &RootConfig.NebulaDBConnString, - Value: RootConfig.NebulaDBConnString, + Destination: &rootConfig.NebulaDBConnString, + Value: rootConfig.NebulaDBConnString, + }, + &cli.IntFlag{ + Name: "batch.size", + Usage: "The number of ants to request to store at a time", + EnvVars: []string{"ANTS_BATCH_SIZE"}, + Destination: &rootConfig.BatchSize, + Value: rootConfig.BatchSize, + }, + &cli.DurationFlag{ + Name: "batch.time", + Usage: "The time to wait between batches", + EnvVars: []string{"ANTS_BATCH_TIME"}, + Destination: &rootConfig.BatchTime, + Value: rootConfig.BatchTime, + }, + &cli.DurationFlag{ + Name: "crawl.interval", + Usage: "The time between two crawls", + EnvVars: []string{"ANTS_CRAWL_INTERVAL"}, + Destination: &rootConfig.CrawlInterval, + Value: rootConfig.CrawlInterval, + }, + &cli.IntFlag{ + Name: "cache.size", + Usage: "How many agent versions and protocols should be cached in memory", + EnvVars: []string{"ANTS_CACHE_SIZE"}, + Destination: &rootConfig.CacheSize, + Value: rootConfig.CacheSize, }, &cli.PathFlag{ Name: "key.db_path", @@ -119,26 +179,40 @@ func main() { func runQueenCommand(c *cli.Context) error { ctx := c.Context - client, err := db.NewDatabaseClient( - c.Context, - RootConfig.AntsClickhouseAddress, - RootConfig.AntsClickhouseDatabase, - RootConfig.AntsClickhouseUsername, - RootConfig.AntsClickhousePassword, - RootConfig.AntsClickhouseSSL, + // initializing new clickhouse client + client, err := db.NewClient( + rootConfig.AntsClickhouseAddress, + rootConfig.AntsClickhouseDatabase, + rootConfig.AntsClickhouseUsername, + rootConfig.AntsClickhousePassword, + rootConfig.AntsClickhouseSSL, ) - if err != nil { logger.Errorln(err) return fmt.Errorf("init database client: %w", err) } - var queen *ants.Queen - if RootConfig.UPnp { - queen, err = ants.NewQueen(ctx, RootConfig.NebulaDBConnString, RootConfig.KeyDBPath, 0, 0, client) - } else { - queen, err = ants.NewQueen(ctx, RootConfig.NebulaDBConnString, RootConfig.KeyDBPath, uint16(RootConfig.NumPorts), uint16(RootConfig.FirstPort), client) + // pinging database to check availability + pingCtx, pingCancel := context.WithTimeout(ctx, 5*time.Second) + defer pingCancel() + if err = client.Ping(pingCtx); err != nil { + return fmt.Errorf("ping clickhouse: %w", err) } + + queenCfg := &ants.QueenConfig{ + KeysDBPath: rootConfig.KeyDBPath, + NPorts: rootConfig.NumPorts, + FirstPort: rootConfig.FirstPort, + UPnP: rootConfig.UPnp, + BatchSize: rootConfig.BatchSize, + BatchTime: rootConfig.BatchTime, + CrawlInterval: rootConfig.CrawlInterval, + CacheSize: rootConfig.CacheSize, + NebulaDBConnString: rootConfig.NebulaDBConnString, + } + + // initializting queen + queen, err := ants.NewQueen(client, queenCfg) if err != nil { return fmt.Errorf("failed to create queen: %w", err) } diff --git a/db/client.go b/db/client.go index cda2b0c..de0f4a8 100644 --- a/db/client.go +++ b/db/client.go @@ -3,24 +3,30 @@ package db import ( "context" "crypto/tls" - "golang.org/x/net/proxy" + "fmt" "net" - "time" "github.com/ClickHouse/clickhouse-go/v2" "github.com/ClickHouse/clickhouse-go/v2/lib/driver" - mt "github.com/probe-lab/ants-watch/metrics" + logging "github.com/ipfs/go-log/v2" + "golang.org/x/net/proxy" ) -type Client struct { - ctx context.Context - conn driver.Conn +var logger = logging.Logger("db") + +type Client interface { + Ping(ctx context.Context) error + BulkInsertRequests(ctx context.Context, requests []*Request) error +} - telemetry *mt.Telemetry +type ClickhouseClient struct { + driver.Conn } -func NewDatabaseClient(ctx context.Context, address, database, username, password string, ssl bool) (*Client, error) { - logger.Infoln("Creating new database client...") +var _ Client = (*ClickhouseClient)(nil) + +func NewClient(address, database, username, password string, ssl bool) (*ClickhouseClient, error) { + logger.Infoln("Creating new clickhouse client...") conn, err := clickhouse.Open(&clickhouse.Options{ Addr: []string{address}, @@ -29,7 +35,6 @@ func NewDatabaseClient(ctx context.Context, address, database, username, passwor Username: username, Password: password, }, - Debug: true, DialContext: func(ctx context.Context, addr string) (net.Conn, error) { var d proxy.ContextDialer if ssl { @@ -41,54 +46,38 @@ func NewDatabaseClient(ctx context.Context, address, database, username, passwor return d.DialContext(ctx, "tcp", addr) }, }) - if err != nil { return nil, err } - if err := conn.Ping(ctx); err != nil { - return nil, err + client := &ClickhouseClient{ + Conn: conn, } - return &Client{ - ctx: ctx, - conn: conn, - }, nil + return client, nil } -type BatchRequest struct { - ctx context.Context - - insertStatement string - - conn driver.Conn - batch driver.Batch -} - -func NewBatch(ctx context.Context, conn driver.Conn, insertStatement string) (*BatchRequest, error) { - batch, err := conn.PrepareBatch(ctx, insertStatement, driver.WithReleaseConnection()) +func (c *ClickhouseClient) BulkInsertRequests(ctx context.Context, requests []*Request) error { + batch, err := c.Conn.PrepareBatch(ctx, "INSERT INTO requests", driver.WithReleaseConnection()) if err != nil { - return nil, err + return fmt.Errorf("prepare batch: %w", err) } - return &BatchRequest{ - ctx: ctx, - insertStatement: insertStatement, - conn: conn, - batch: batch, - }, nil -} - -func (b *BatchRequest) Append(id, antMultihash, remoteMultihash, agentVersion string, protocols []string, startedAt time.Time, requestType, keyMultihash string, multiAddresses []string) error { - return b.batch.Append( - id, - antMultihash, - remoteMultihash, - agentVersion, - protocols, - startedAt, - requestType, - keyMultihash, - multiAddresses, - ) + for _, r := range requests { + err = batch.Append( + r.UUID.String(), + r.AntID.String(), + r.RemoteID.String(), + r.AgentVersion, + r.Protocols, + r.StartedAt, + r.Type, + r.KeyID, + r.MultiAddresses, + ) + if err != nil { + return fmt.Errorf("append request to batch: %w", err) + } + } + return batch.Send() } diff --git a/db/client_db.go b/db/client_db.go deleted file mode 100644 index fd986e3..0000000 --- a/db/client_db.go +++ /dev/null @@ -1,663 +0,0 @@ -// mostly copy-pastad from https://github.com/dennis-tra/nebula/blob/main/db/client_db.go -// except the `insertRequest` function - -package db - -import ( - "context" - "crypto/sha256" - "database/sql" - "embed" - "encoding/hex" - "errors" - "fmt" - "io/fs" - "os" - "path/filepath" - "sort" - "strconv" - "strings" - "sync" - "time" - - "github.com/dennis-tra/nebula-crawler/config" - "github.com/dennis-tra/nebula-crawler/db" - "github.com/golang-migrate/migrate/v4" - "github.com/golang-migrate/migrate/v4/database/postgres" - _ "github.com/golang-migrate/migrate/v4/source/file" - lru "github.com/hashicorp/golang-lru" - glog "github.com/ipfs/go-log/v2" - _ "github.com/lib/pq" - ma "github.com/multiformats/go-multiaddr" - - log "github.com/sirupsen/logrus" - "github.com/uptrace/opentelemetry-go-extra/otelsql" - "github.com/volatiletech/null/v8" - "github.com/volatiletech/sqlboiler/v4/boil" - "github.com/volatiletech/sqlboiler/v4/queries" - "github.com/volatiletech/sqlboiler/v4/queries/qm" - "github.com/volatiletech/sqlboiler/v4/types" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric" - semconv "go.opentelemetry.io/otel/semconv/v1.21.0" - - "github.com/probe-lab/ants-watch/db/models" - mt "github.com/probe-lab/ants-watch/metrics" -) - -//go:embed migrations -var migrations embed.FS - -var logger = glog.Logger("client-db") - -type DBClient struct { - ctx context.Context - cfg config.Database - - // Database handler - Handler *sql.DB - - // protocols cache - agentVersions *lru.Cache - - // protocols cache - protocols *lru.Cache - - // protocols set cache - protocolsSets *lru.Cache - - // Database telemetry - telemetry *mt.Telemetry -} - -func InitDBClient(ctx context.Context, cfg *config.Database) (*DBClient, error) { - log.WithFields(log.Fields{ - "host": cfg.DatabaseHost, - "port": cfg.DatabasePort, - "name": cfg.DatabaseName, - "user": cfg.DatabaseUser, - "ssl": cfg.DatabaseSSLMode, - }).Infoln("Initializing database client") - - connString := cfg.DatabaseSourceName() - dbh, err := otelsql.Open("postgres", connString, - otelsql.WithAttributes(semconv.DBSystemPostgreSQL), - otelsql.WithMeterProvider(cfg.MeterProvider), - otelsql.WithTracerProvider(cfg.TracerProvider), - ) - if err != nil { - return nil, fmt.Errorf("opening database: %w", err) - } - - // Set to match the writer worker - dbh.SetMaxIdleConns(cfg.MaxIdleConns) // default is 2 which leads to many connection open/closings - - otelsql.ReportDBStatsMetrics(dbh, otelsql.WithMeterProvider(cfg.MeterProvider)) - - // Ping database to verify connection. - if err = dbh.Ping(); err != nil { - return nil, fmt.Errorf("pinging database: %w", err) - } - - telemetry, err := mt.NewTelemetry(cfg.TracerProvider, cfg.MeterProvider) - if err != nil { - return nil, fmt.Errorf("new telemetry: %w", err) - } - - client := &DBClient{ - ctx: ctx, - cfg: *cfg, - telemetry: telemetry, - Handler: dbh, - } - client.applyMigrations(cfg, dbh) - - client.agentVersions, err = lru.New(cfg.AgentVersionsCacheSize) - if err != nil { - return nil, fmt.Errorf("new agent versions lru cache: %w", err) - } - - client.protocols, err = lru.New(cfg.ProtocolsCacheSize) - if err != nil { - return nil, fmt.Errorf("new protocol lru cache: %w", err) - } - - client.protocolsSets, err = lru.New(cfg.ProtocolsSetCacheSize) - if err != nil { - return nil, fmt.Errorf("new protocols set lru cache: %w", err) - } - - if err = client.fillAgentVersionsCache(ctx); err != nil { - return nil, fmt.Errorf("fill agent versions cache: %w", err) - } - - if err = client.fillProtocolsCache(ctx); err != nil { - return nil, fmt.Errorf("fill protocols cache: %w", err) - } - - if err = client.fillProtocolsSetCache(ctx); err != nil { - return nil, fmt.Errorf("fill protocols set cache: %w", err) - } - - client.ensurePartitions(ctx, time.Now()) - client.ensurePartitions(ctx, time.Now().Add(24*time.Hour)) - - go func() { - for range time.NewTicker(24 * time.Hour).C { - client.ensurePartitions(ctx, time.Now().Add(12*time.Hour)) - } - }() - - return client, nil -} - -func (c *DBClient) ensurePartitions(ctx context.Context, baseDate time.Time) { - lowerBound := time.Date(baseDate.Year(), baseDate.Month(), 1, 0, 0, 0, 0, baseDate.Location()) - upperBound := lowerBound.AddDate(0, 1, 0) - - query := partitionQuery(models.TableNames.PeerLogs, lowerBound, upperBound) - if _, err := c.Handler.ExecContext(ctx, query); err != nil { - log.WithError(err).WithField("query", query).Warnln("could not create peer_logs partition") - } - - query = partitionQuery(models.TableNames.Requests, lowerBound, upperBound) - if _, err := c.Handler.ExecContext(ctx, query); err != nil { - log.WithError(err).WithField("query", query).Warnln("could not create requests partition") - } - - query = partitionQuery(models.TableNames.RequestsDenormalized, lowerBound, upperBound) - if _, err := c.Handler.ExecContext(ctx, query); err != nil { - log.WithError(err).WithField("query", query).Warnln("could not create requests partition") - } - -} - -func partitionQuery(table string, lower time.Time, upper time.Time) string { - return fmt.Sprintf("CREATE TABLE IF NOT EXISTS %s_%s_%s PARTITION OF %s FOR VALUES FROM ('%s') TO ('%s')", - table, - lower.Format("2006"), - lower.Format("01"), - table, - lower.Format("2006-01-02"), - upper.Format("2006-01-02"), - ) -} - -func (c *DBClient) applyMigrations(cfg *config.Database, Handler *sql.DB) { - tmpDir, err := os.MkdirTemp("", "ants-watch") - if err != nil { - log.WithError(err).WithField("pattern", "ants-watch").Warnln("Could not create tmp directory for migrations") - return - } - defer func() { - if err = os.RemoveAll(tmpDir); err != nil { - log.WithError(err).WithField("tmpDir", tmpDir).Warnln("Could not clean up tmp directory") - } - }() - log.WithField("dir", tmpDir).Debugln("Created temporary directory") - - err = fs.WalkDir(migrations, ".", func(path string, d fs.DirEntry, err error) error { - join := filepath.Join(tmpDir, path) - if d.IsDir() { - return os.MkdirAll(join, 0o755) - } - - data, err := migrations.ReadFile(path) - if err != nil { - return fmt.Errorf("read file: %w", err) - } - - return os.WriteFile(join, data, 0o644) - }) - if err != nil { - log.WithError(err).Warnln("Could not create migrations files") - return - } - - // Apply migrations - driver, err := postgres.WithInstance(Handler, &postgres.Config{}) - if err != nil { - log.WithError(err).Warnln("Could not create driver instance") - return - } - - m, err := migrate.NewWithDatabaseInstance("file://"+filepath.Join(tmpDir, "migrations"), cfg.DatabaseName, driver) - if err != nil { - log.WithError(err).Warnln("Could not create migrate instance") - return - } - - if err = m.Up(); err != nil && !errors.Is(err, migrate.ErrNoChange) { - log.WithError(err).Warnln("Couldn't apply migrations") - return - } -} - -func (c *DBClient) insertRequest( - ctx context.Context, - timestamp time.Time, - requestType string, - antID string, - peerID string, - keyID string, - maddrs []string, - protocolsSetID null.Int, - agentVersionsID null.Int, -) (string, error) { - start := time.Now() - - rows, err := queries.Raw("SELECT insert_request($1, $2, $3, $4, $5, $6, $7, $8)", - timestamp, - requestType, - antID, - peerID, - keyID, - types.StringArray(maddrs), - agentVersionsID, - protocolsSetID, - ).QueryContext(ctx, c.Handler) - if err != nil { - return "", err - } - - c.telemetry.InsertRequestHistogram.Record(ctx, time.Since(start).Milliseconds(), metric.WithAttributes( - attribute.String("type", requestType), - attribute.Bool("success", err == nil), - )) - - defer func() { - if err := rows.Close(); err != nil { - log.WithError(err).Warnln("Could not close rows") - } - }() - - if !rows.Next() { - return peerID, nil - } - - if err = rows.Scan(&peerID); err != nil { - return "", err - } - - return peerID, nil -} - -func MaddrsToAddrs(maddrs []ma.Multiaddr) []string { - addrs := make([]string, len(maddrs)) - for i, maddr := range maddrs { - addrs[i] = maddr.String() - } - return addrs -} - -// protocolsSetHash returns a unique hash digest for this set of protocol IDs as it's also generated by the database. -// It expects the list of protocolIDs to be sorted in ascending order. -func (c *DBClient) protocolsSetHash(protocolIDs []int64) string { - protocolStrs := make([]string, len(protocolIDs)) - for i, id := range protocolIDs { - protocolStrs[i] = strconv.Itoa(int(id)) // safe because protocol IDs are just integers in the database. - } - dat := []byte("{" + strings.Join(protocolStrs, ",") + "}") - - h := sha256.New() - h.Write(dat) - return string(h.Sum(nil)) -} - -func (c *DBClient) GetOrCreateProtocolsSetID(ctx context.Context, exec boil.ContextExecutor, protocols []string) (*int, error) { - if len(protocols) == 0 { - return nil, db.ErrEmptyProtocolsSet - } - - protocolIDs := make([]int64, len(protocols)) - for i, protocol := range protocols { - protocolID, err := c.GetOrCreateProtocol(ctx, exec, protocol) - if errors.Is(err, db.ErrEmptyProtocol) { - continue - } else if err != nil { - return nil, fmt.Errorf("get or create protocol: %w", err) - } - protocolIDs[i] = int64(*protocolID) - } - - sort.Slice(protocolIDs, func(i, j int) bool { return protocolIDs[i] < protocolIDs[j] }) - - key := c.protocolsSetHash(protocolIDs) - if id, found := c.protocolsSets.Get(key); found { - c.telemetry.CacheQueriesCount.Add(ctx, 1, metric.WithAttributes( - attribute.String("entity", "protocol_set"), - attribute.Bool("hit", true), - )) - return id.(*int), nil - } - c.telemetry.CacheQueriesCount.Add(ctx, 1, metric.WithAttributes( - attribute.String("entity", "protocol_set"), - attribute.Bool("hit", false), - )) - - log.WithField("key", hex.EncodeToString([]byte(key))).Infoln("Upsert protocols set") - row := exec.QueryRowContext(ctx, "SELECT upsert_protocol_set_id($1)", types.Int64Array(protocolIDs)) - if row.Err() != nil { - return nil, fmt.Errorf("unable to upsert protocols set: %w", row.Err()) - } - - var protocolsSetID *int - if err := row.Scan(&protocolsSetID); err != nil { - return nil, fmt.Errorf("unable to scan result from upsert protocol set id: %w", err) - } - - if protocolsSetID == nil { - return nil, fmt.Errorf("protocols set not created") - } - - c.protocolsSets.Add(key, protocolsSetID) - - return protocolsSetID, nil -} - -func (c *DBClient) GetOrCreateProtocol(ctx context.Context, exec boil.ContextExecutor, protocol string) (*int, error) { - if protocol == "" { - return nil, db.ErrEmptyProtocol - } - - if id, found := c.protocols.Get(protocol); found { - c.telemetry.CacheQueriesCount.Add(ctx, 1, metric.WithAttributes( - attribute.String("entity", "protocol"), - attribute.Bool("hit", true), - )) - return id.(*int), nil - } - c.telemetry.CacheQueriesCount.Add(ctx, 1, metric.WithAttributes( - attribute.String("entity", "protocol"), - attribute.Bool("hit", false), - )) - - log.WithField("protocol", protocol).Infoln("Upsert protocol") - row := exec.QueryRowContext(ctx, "SELECT upsert_protocol($1)", protocol) - if row.Err() != nil { - return nil, fmt.Errorf("unable to upsert protocol: %w", row.Err()) - } - - var protocolID *int - if err := row.Scan(&protocolID); err != nil { - return nil, fmt.Errorf("unable to scan result from upsert protocol: %w", err) - } - - if protocolID == nil { - return nil, fmt.Errorf("protocol not created") - } - - c.protocols.Add(protocol, protocolID) - - return protocolID, nil -} - -func (c *DBClient) PersistRequest( - ctx context.Context, - timestamp time.Time, - requestType string, - antID string, - peerID string, - keyID string, - maddrs []string, - agentVersion null.String, - protocols []string, -) (string, error) { - var agentVersionID, protocolsSetID *int - var avidErr, psidErr error - - agentVersionString := agentVersion.String - - var wg sync.WaitGroup - wg.Add(2) - go func() { - agentVersionID, avidErr = c.GetOrCreateAgentVersionID(ctx, c.Handler, agentVersionString) - if avidErr != nil && !errors.Is(avidErr, db.ErrEmptyAgentVersion) && !errors.Is(psidErr, context.Canceled) { - log.WithError(avidErr).WithField("agentVersion", agentVersion).Warnln("Error getting or creating agent version id") - } - wg.Done() - }() - go func() { - protocolsSetID, psidErr = c.GetOrCreateProtocolsSetID(ctx, c.Handler, protocols) - if psidErr != nil && !errors.Is(psidErr, db.ErrEmptyProtocolsSet) && !errors.Is(psidErr, context.Canceled) { - log.WithError(psidErr).WithField("protocols", protocols).Warnln("Error getting or creating protocols set id") - } - wg.Done() - }() - wg.Wait() - - return c.insertRequest( - ctx, - timestamp, - requestType, - antID, - peerID, - keyID, - maddrs, - null.IntFromPtr(protocolsSetID), - null.IntFromPtr(agentVersionID), - ) -} - -func (c *DBClient) GetOrCreateAgentVersionID(ctx context.Context, exec boil.ContextExecutor, agentVersion string) (*int, error) { - if agentVersion == "" { - return nil, db.ErrEmptyAgentVersion - } - - if id, found := c.agentVersions.Get(agentVersion); found { - c.telemetry.CacheQueriesCount.Add(ctx, 1, metric.WithAttributes( - attribute.String("entity", "agent_version"), - attribute.Bool("hit", true), - )) - return id.(*int), nil - } - c.telemetry.CacheQueriesCount.Add(ctx, 1, metric.WithAttributes( - attribute.String("entity", "agent_version"), - attribute.Bool("hit", false), - )) - - log.WithField("agentVersion", agentVersion).Infoln("Upsert agent version") - row := exec.QueryRowContext(ctx, "SELECT upsert_agent_version($1)", agentVersion) - if row.Err() != nil { - return nil, fmt.Errorf("unable to upsert agent version: %w", row.Err()) - } - - var agentVersionID *int - if err := row.Scan(&agentVersionID); err != nil { - return nil, fmt.Errorf("unable to scan result from upsert agent version: %w", err) - } - - if agentVersionID == nil { - return nil, fmt.Errorf("agentVersion not created") - } - - c.agentVersions.Add(agentVersion, agentVersionID) - - return agentVersionID, nil -} - -// fillAgentVersionsCache fetches all rows until agent version cache size from the agent_versions table and -// initializes the DB clients agent version cache. -func (c *DBClient) fillAgentVersionsCache(ctx context.Context) error { - if c.cfg.AgentVersionsCacheSize == 0 { - return nil - } - - avs, err := models.AgentVersions(qm.Limit(c.cfg.AgentVersionsCacheSize)).All(ctx, c.Handler) - if err != nil { - return err - } - - for _, av := range avs { - c.agentVersions.Add(av.AgentVersion, &av.ID) - } - - return nil -} - -// fillProtocolsSetCache fetches all rows until protocolSet cache size from the protocolsSets table and -// initializes the DB clients protocolsSets cache. -func (c *DBClient) fillProtocolsSetCache(ctx context.Context) error { - if c.cfg.ProtocolsSetCacheSize == 0 { - return nil - } - - protSets, err := models.ProtocolsSets(qm.Limit(c.cfg.ProtocolsSetCacheSize)).All(ctx, c.Handler) - if err != nil { - return err - } - - for _, ps := range protSets { - c.protocolsSets.Add(string(ps.Hash), &ps.ID) - } - - return nil -} - -// fillProtocolsCache fetches all rows until protocol cache size from the protocols table and -// initializes the DB clients protocols cache. -func (c *DBClient) fillProtocolsCache(ctx context.Context) error { - if c.cfg.ProtocolsCacheSize == 0 { - return nil - } - - prots, err := models.Protocols(qm.Limit(c.cfg.ProtocolsCacheSize)).All(ctx, c.Handler) - if err != nil { - return err - } - - for _, p := range prots { - c.protocols.Add(p.Protocol, &p.ID) - } - - return nil -} - -func (c *DBClient) UpsertPeer(ctx context.Context, mh string, agentVersion null.String, protocols []string, timestamp time.Time) (int, error) { - var agentVersionID, protocolsSetID *int - var avidErr, psidErr error - - var wg sync.WaitGroup - wg.Add(2) - go func() { - agentVersionID, avidErr = c.GetOrCreateAgentVersionID(ctx, c.Handler, agentVersion.String) - if avidErr != nil && !errors.Is(avidErr, db.ErrEmptyAgentVersion) && !errors.Is(psidErr, context.Canceled) { - log.WithError(avidErr).WithField("agentVersion", agentVersion).Warnln("Error getting or creating agent version id") - } - wg.Done() - }() - go func() { - protocolsSetID, psidErr = c.GetOrCreateProtocolsSetID(ctx, c.Handler, protocols) - if psidErr != nil && !errors.Is(psidErr, db.ErrEmptyProtocolsSet) && !errors.Is(psidErr, context.Canceled) { - log.WithError(psidErr).WithField("protocols", protocols).Warnln("Error getting or creating protocols set id") - } - wg.Done() - }() - wg.Wait() - - rows, err := queries.Raw("SELECT upsert_peer($1, $2, $3, $4)", - mh, agentVersionID, protocolsSetID, timestamp, - ).QueryContext(ctx, c.Handler) - if err != nil { - return 0, err - } - - defer func() { - if err := rows.Close(); err != nil { - log.WithError(err).Warnln("Could not close rows") - } - }() - - id := 0 - if !rows.Next() { - return id, nil - } - - if err = rows.Scan(&id); err != nil { - return 0, err - } - - return id, nil -} - -// FetchUnresolvedMultiAddresses fetches all multi addresses that were not resolved yet. -func (c *DBClient) FetchUnresolvedMultiAddresses(ctx context.Context, limit int) (models.MultiAddressSlice, error) { - return models.MultiAddresses( - models.MultiAddressWhere.Resolved.EQ(false), - qm.OrderBy(models.MultiAddressColumns.CreatedAt), - qm.Limit(limit), - ).All(ctx, c.Handler) -} - -func BulkInsertRequests(ctx context.Context, db *sql.DB, requests []models.RequestsDenormalized) error { - valueStrings := []string{} - valueArgs := []interface{}{} - i := 1 - - for _, request := range requests { - valueStrings = append(valueStrings, fmt.Sprintf("($%d, $%d, $%d, $%d, $%d, $%d, $%d, $%d)", i, i+1, i+2, i+3, i+4, i+5, i+6, i+7)) - valueArgs = append(valueArgs, request.RequestStartedAt, request.RequestType, request.AntMultihash, request.PeerMultihash, request.KeyMultihash, request.MultiAddresses, request.AgentVersion, request.Protocols) - i += 8 - } - - stmt := fmt.Sprintf("INSERT INTO requests_denormalized (request_started_at, request_type, ant_multihash, peer_multihash, key_multihash, multi_addresses, agent_version, protocols) VALUES %s RETURNING id;", - strings.Join(valueStrings, ", ")) - - rows, err := queries.Raw(stmt, valueArgs...).QueryContext(ctx, db) - if err != nil { - return err - } - defer rows.Close() - - return nil -} - -func NormalizeRequests(ctx context.Context, db *sql.DB, dbClient *DBClient) error { - rows, err := db.QueryContext(ctx, "SELECT id, request_started_at, request_type, ant_multihash, peer_multihash, key_multihash, multi_addresses, agent_version, protocols FROM requests_denormalized WHERE normalized_at IS NULL LIMIT 1000") - if err != nil { - return err - } - defer rows.Close() - - for rows.Next() { - var request models.RequestsDenormalized - if err := rows.Scan(&request.ID, &request.RequestStartedAt, &request.RequestType, &request.AntMultihash, &request.PeerMultihash, &request.KeyMultihash, &request.MultiAddresses, &request.AgentVersion, &request.Protocols); err != nil { - return err - } - - _, err = dbClient.PersistRequest( - ctx, - request.RequestStartedAt, - request.RequestType, - request.AntMultihash, - request.PeerMultihash, - request.KeyMultihash, - request.MultiAddresses, - request.AgentVersion, - request.Protocols, - ) - if err != nil { - return fmt.Errorf("failed to normalize request ID %d: %w, timestamp: %v", request.ID, err, request.RequestStartedAt) - } - - _, err = db.ExecContext(ctx, "UPDATE requests_denormalized SET normalized_at = NOW() WHERE id = $1", request.ID) - if err != nil { - return fmt.Errorf("failed to update normalized_at for request ID %d: %w", request.ID, err) - } - } - - return nil -} - -func AddrsToMaddrs(addrs []string) ([]ma.Multiaddr, error) { - maddrs := make([]ma.Multiaddr, len(addrs)) - for i, addr := range addrs { - maddr, err := ma.NewMultiaddr(addr) - if err != nil { - return nil, err - } - maddrs[i] = maddr - } - - return maddrs, nil -} diff --git a/db/migrations/000001_create_requests_table.up.sql b/db/migrations/000001_create_requests_table.up.sql index 53b2cdd..5de5507 100644 --- a/db/migrations/000001_create_requests_table.up.sql +++ b/db/migrations/000001_create_requests_table.up.sql @@ -3,19 +3,12 @@ CREATE TABLE requests id UUID, ant_multihash String, remote_multihash String, - agent_version Nullable(String), - protocols Array(Nullable(String)), + agent_version String, + protocols Array(String), started_at DateTime, - request_type Enum8( - 'PUT_VALUE', - 'GET_VALUE', - 'ADD_PROVIDER', - 'GET_PROVIDERS', - 'FIND_NODE', - 'PING' - ), + request_type String, key_multihash String, multi_addresses Array(String) -) ENGINE = MergeTree() +) ENGINE = ReplicatedMergeTree() PRIMARY KEY (started_at) TTL started_at + INTERVAL 1 DAY; diff --git a/db/models.go b/db/models.go new file mode 100644 index 0000000..5b887e8 --- /dev/null +++ b/db/models.go @@ -0,0 +1,21 @@ +package db + +import ( + "time" + + "github.com/google/uuid" + pb "github.com/libp2p/go-libp2p-kad-dht/pb" + "github.com/libp2p/go-libp2p/core/peer" +) + +type Request struct { + UUID uuid.UUID + AntID peer.ID + RemoteID peer.ID + Type pb.Message_MessageType + AgentVersion string + Protocols []string + StartedAt time.Time + KeyID string + MultiAddresses []string +} diff --git a/db/resolver.go b/db/resolver.go deleted file mode 100644 index fe7d9f5..0000000 --- a/db/resolver.go +++ /dev/null @@ -1,136 +0,0 @@ -package db - -import ( - "context" - "database/sql" - "errors" - "fmt" - - ma "github.com/multiformats/go-multiaddr" - manet "github.com/multiformats/go-multiaddr/net" - log "github.com/sirupsen/logrus" - "github.com/volatiletech/null/v8" - "github.com/volatiletech/sqlboiler/v4/boil" - - "github.com/dennis-tra/nebula-crawler/db" - "github.com/dennis-tra/nebula-crawler/maxmind" - "github.com/dennis-tra/nebula-crawler/udger" - "github.com/probe-lab/ants-watch/db/models" -) - -func Resolve(ctx context.Context, dbh *sql.DB, mmc *maxmind.Client, uclient *udger.Client, dbmaddrs models.MultiAddressSlice) error { - log.WithField("size", len(dbmaddrs)).Infoln("Resolving batch of multi addresses...") - - for _, dbmaddr := range dbmaddrs { - if err := resolveAddr(ctx, dbh, mmc, uclient, dbmaddr); err != nil { - log.WithField("maddr", dbmaddr.Maddr).WithError(err).Warnln("Error resolving multi address") - } - } - - return nil -} - -func resolveAddr(ctx context.Context, dbh *sql.DB, mmc *maxmind.Client, uclient *udger.Client, dbmaddr *models.MultiAddress) error { - logEntry := log.WithField("maddr", dbmaddr.Maddr) - txn, err := dbh.BeginTx(ctx, nil) - if err != nil { - return fmt.Errorf("begin txn: %w", err) - } - defer db.Rollback(txn) - - maddr, err := ma.NewMultiaddr(dbmaddr.Maddr) - if err != nil { - logEntry.WithError(err).Warnln("Error parsing multi address - deleting row") - if _, delErr := dbmaddr.Delete(ctx, txn); err != nil { - logEntry.WithError(delErr).Warnln("Error deleting multi address") - return fmt.Errorf("parse multi address: %w", err) - } else { - return txn.Commit() - } - } - - dbmaddr.Resolved = true - dbmaddr.IsPublic = null.BoolFrom(manet.IsPublicAddr(maddr)) - dbmaddr.IsRelay = null.BoolFrom(isRelayedMaddr(maddr)) - - addrInfos, err := mmc.MaddrInfo(ctx, maddr) - if err != nil { - logEntry.WithError(err).Warnln("Error deriving address information from maddr ", maddr) - } - - if len(addrInfos) == 0 { - dbmaddr.HasManyAddrs = null.BoolFrom(false) - } else if len(addrInfos) == 1 { - dbmaddr.HasManyAddrs = null.BoolFrom(false) - - // we only have one addrInfo, extract it from the map - var addr string - var addrInfo *maxmind.AddrInfo - for k, v := range addrInfos { - addr, addrInfo = k, v - break - } - - dbmaddr.Asn = null.NewInt(int(addrInfo.ASN), addrInfo.ASN != 0) - dbmaddr.Country = null.NewString(addrInfo.Country, addrInfo.Country != "") - dbmaddr.Continent = null.NewString(addrInfo.Continent, addrInfo.Continent != "") - dbmaddr.Addr = null.NewString(addr, addr != "") - - if uclient != nil { - datacenterID, err := uclient.Datacenter(addr) - if err != nil && !errors.Is(err, sql.ErrNoRows) { - logEntry.WithError(err).WithField("addr", addr).Warnln("Error resolving ip address to datacenter") - } - dbmaddr.IsCloud = null.NewInt(datacenterID, datacenterID != 0) - } - - } else if len(addrInfos) > 1 { // not "else" because the MaddrInfo could have failed and we still want to update the maddr - dbmaddr.HasManyAddrs = null.BoolFrom(true) - - // Due to dnsaddr protocols each multi address can point to multiple - // IP addresses each in a different country. - for addr, addrInfo := range addrInfos { - datacenterID := 0 - if uclient != nil { - datacenterID, err = uclient.Datacenter(addr) - if err != nil && !errors.Is(err, sql.ErrNoRows) { - logEntry.WithError(err).WithField("addr", addr).Warnln("Error resolving ip address to datacenter") - } else if datacenterID > 0 { - dbmaddr.IsCloud = null.IntFrom(datacenterID) - } - } - - // Save the IP address + country information + asn information - ipaddr := &models.IPAddress{ - Asn: null.NewInt(int(addrInfo.ASN), addrInfo.ASN != 0), - IsCloud: null.NewInt(datacenterID, datacenterID != 0), - Country: null.NewString(addrInfo.Country, addrInfo.Country != ""), - Continent: null.NewString(addrInfo.Continent, addrInfo.Continent != ""), - Address: addr, - } - if err := dbmaddr.AddIPAddresses(ctx, txn, true, ipaddr); err != nil { - logEntry.WithError(err).WithField("addr", ipaddr.Address).Warnln("Could not insert ip address") - return fmt.Errorf("add ip addresses: %w", err) - } - } - } - - if _, err = dbmaddr.Update(ctx, txn, boil.Infer()); err != nil { - logEntry.WithError(err).Warnln("Could not update multi address") - return fmt.Errorf("update multi address: %w", err) - } - - return txn.Commit() -} - -func isRelayedMaddr(maddr ma.Multiaddr) bool { - _, err := maddr.ValueForProtocol(ma.P_CIRCUIT) - if err == nil { - return true - } else if errors.Is(err, ma.ErrProtocolNotFound) { - return false - } else { - log.WithError(err).WithField("maddr", maddr).Warnln("Unexpected error while parsing multi address") - return false - } -} diff --git a/go-libp2p-kad-dht b/go-libp2p-kad-dht index 845a8aa..628767c 160000 --- a/go-libp2p-kad-dht +++ b/go-libp2p-kad-dht @@ -1 +1 @@ -Subproject commit 845a8aaaad0064c8d6d73c31a9ff0c100c74f149 +Subproject commit 628767c5bb901a8f6669a5773e2c1bf6fc575aa8 diff --git a/go.mod b/go.mod index 03d6c17..cca1a96 100644 --- a/go.mod +++ b/go.mod @@ -10,20 +10,11 @@ go 1.23.1 toolchain go1.23.2 require ( - github.com/friendsofgo/errors v0.9.2 - github.com/golang-migrate/migrate/v4 v4.18.1 github.com/jackc/pgx/v5 v5.5.4 - github.com/lib/pq v1.10.9 github.com/libp2p/go-libp2p v0.36.5 github.com/libp2p/go-libp2p-kad-dht v0.27.0 - github.com/mattn/go-sqlite3 v1.14.24 // indirect - github.com/oschwald/geoip2-golang v1.11.0 // indirect github.com/probe-lab/go-libdht v0.1.2-0.20240821100354-770d7b4b2e71 github.com/sirupsen/logrus v1.9.3 - github.com/uptrace/opentelemetry-go-extra/otelsql v0.3.2 - github.com/volatiletech/null/v8 v8.1.2 - github.com/volatiletech/sqlboiler/v4 v4.16.2 - github.com/volatiletech/strmangle v0.0.6 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.30.0 go.opentelemetry.io/otel/exporters/prometheus v0.52.0 go.opentelemetry.io/otel/sdk v1.30.0 @@ -34,45 +25,37 @@ require ( require ( github.com/ClickHouse/clickhouse-go/v2 v2.30.0 - github.com/dennis-tra/nebula-crawler v0.0.0-20241105123054-bbd84dcd5b43 - github.com/patrickmn/go-cache v2.1.0+incompatible + github.com/hashicorp/golang-lru/v2 v2.0.7 + github.com/ipfs/go-ds-leveldb v0.5.0 github.com/urfave/cli/v2 v2.27.5 ) require ( github.com/ClickHouse/ch-go v0.63.1 // indirect github.com/andybalholm/brotli v1.1.1 // indirect - github.com/btcsuite/btcd/btcec/v2 v2.3.4 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.5 // indirect - github.com/ericlagergren/decimal v0.0.0-20240411145413-00de7ca16731 // indirect - github.com/ethereum/go-ethereum v1.14.11 // indirect + github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/go-faster/city v1.0.1 // indirect github.com/go-faster/errors v0.7.1 // indirect github.com/go-task/slim-sprig/v3 v3.0.0 // indirect - github.com/gofrs/uuid v4.4.0+incompatible // indirect github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 // indirect - github.com/holiman/uint256 v1.3.1 // indirect github.com/jackc/pgpassfile v1.0.0 // indirect github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect github.com/jackc/puddle/v2 v2.2.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/oschwald/maxminddb-golang v1.13.1 // indirect github.com/paulmach/orb v0.11.1 // indirect github.com/pierrec/lz4/v4 v4.1.21 // indirect + github.com/rogpeppe/go-internal v1.13.1 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/segmentio/asm v1.2.0 // indirect github.com/shopspring/decimal v1.4.0 // indirect - github.com/spf13/cast v1.7.0 // indirect github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect - github.com/volatiletech/inflect v0.0.1 // indirect - github.com/volatiletech/randomize v0.0.1 // indirect github.com/wlynxg/anet v0.0.5 // indirect github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.30.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect - golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect google.golang.org/genproto/googleapis/api v0.0.0-20241007155032-5fefd90f89a9 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20241007155032-5fefd90f89a9 // indirect google.golang.org/grpc v1.67.1 // indirect @@ -101,7 +84,7 @@ require ( github.com/gorilla/websocket v1.5.3 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect - github.com/hashicorp/golang-lru v1.0.2 + github.com/hashicorp/golang-lru v1.0.2 // indirect github.com/huin/goupnp v1.3.0 // indirect github.com/ipfs/boxo v0.24.0 // indirect github.com/ipfs/go-cid v0.4.1 @@ -135,7 +118,7 @@ require ( github.com/mr-tron/base58 v1.2.0 // indirect github.com/multiformats/go-base32 v0.1.0 // indirect github.com/multiformats/go-base36 v0.2.0 // indirect - github.com/multiformats/go-multiaddr v0.13.0 + github.com/multiformats/go-multiaddr v0.13.0 // indirect github.com/multiformats/go-multiaddr-dns v0.4.0 // indirect github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect github.com/multiformats/go-multibase v0.2.0 // indirect @@ -188,7 +171,7 @@ require ( golang.org/x/crypto v0.28.0 // indirect golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c // indirect golang.org/x/mod v0.21.0 // indirect - golang.org/x/net v0.30.0 // indirect + golang.org/x/net v0.30.0 golang.org/x/sync v0.8.0 // indirect golang.org/x/sys v0.27.0 // indirect golang.org/x/text v0.19.0 // indirect diff --git a/go.sum b/go.sum index 3e8d92c..161f7a8 100644 --- a/go.sum +++ b/go.sum @@ -2,155 +2,47 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.37.0/go.mod h1:TS1dMSSfndXH133OKGwekG838Om/cQT0BUHV3HcBgoo= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= -cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= -cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= -cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= -cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= -cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= -cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= -cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= -cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= -cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= -cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= -cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= -cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= -cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= -cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= -cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= -cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= -cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/firestore v1.6.1/go.mod h1:asNXNOzBdyVQmEU+ggO8UPodTkEVFW5Qx+rwHnAz+EY= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU= dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4= dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU= git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.0.0/go.mod h1:uGG2W01BaETf0Ozp+QxxKJdMBNRWPdstHG0Fmdwn1/U= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.0.0/go.mod h1:+6sju8gk8FRmSajX3Oz4G5Gm7P+mbqE9FVaXXFYTkCM= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.0/go.mod h1:eWRD7oawr1Mu1sLCawqVc0CUiF43ia3qQMxLscsKQ9w= -github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= -github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= -github.com/AzureAD/microsoft-authentication-library-for-go v0.4.0/go.mod h1:Vt9sXTKwMyGcOxSmLDMnGPgqsUg7m8pe215qMLrDXw4= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/ClickHouse/ch-go v0.63.1 h1:s2JyZvWLTCSAGdtjMBBmAgQQHMco6pawLJMOXi0FODM= github.com/ClickHouse/ch-go v0.63.1/go.mod h1:I1kJJCL3WJcBMGe1m+HVK0+nREaG+JOYYBWjrDrF3R0= github.com/ClickHouse/clickhouse-go/v2 v2.30.0 h1:AG4D/hW39qa58+JHQIFOSnxyL46H6h2lrmGGk17dhFo= github.com/ClickHouse/clickhouse-go/v2 v2.30.0/go.mod h1:i9ZQAojcayW3RsdCb3YR+n+wC2h65eJsZCscZ1Z1wyo= -github.com/DATA-DOG/go-sqlmock v1.4.1 h1:ThlnYciV1iM/V0OSF/dtkqWb6xo5qITT1TJBG1MRDJM= -github.com/DATA-DOG/go-sqlmock v1.4.1/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= -github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= -github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= -github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= -github.com/Masterminds/sprig/v3 v3.2.2/go.mod h1:UoaO7Yp8KlPnJIYWTFkMaqPUYKTfGFPhxNuwnnxkKlk= -github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= -github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/andybalholm/brotli v1.1.1 h1:PR2pgnyFznKEugtsUo0xLdDop5SKXd5Qf5ysW+7XdTA= github.com/andybalholm/brotli v1.1.1/go.mod h1:05ib4cKhjx3OQYUY22hTVd34Bc8upXjOLL2rKwwZBoA= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/apmckinlay/gsuneido v0.0.0-20190404155041-0b6cd442a18f/go.mod h1:JU2DOj5Fc6rol0yaT79Csr47QR0vONGwJtBNGRD7jmc= -github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= -github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= -github.com/btcsuite/btcd/btcec/v2 v2.3.4 h1:3EJjcN70HCu/mwqlUsGK8GcNVyLVxFDlWurTXGPFfiQ= -github.com/btcsuite/btcd/btcec/v2 v2.3.4/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04= -github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U= -github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= -github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= -github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM= github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw= -github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= -github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cpuguy83/go-md2man/v2 v2.0.5 h1:ZtcqGrnekaHpVLArFSe4HK5DoKx1T0rq2DwVB0alcyc= github.com/cpuguy83/go-md2man/v2 v2.0.5/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= @@ -161,18 +53,6 @@ github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5il github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 h1:rpfIENRNNilwHwZeG5+P150SMrnNEcHYvcCuK6dPZSg= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= -github.com/dennis-tra/nebula-crawler v0.0.0-20241105123054-bbd84dcd5b43 h1:WaLad0gLikVTLgltYg61Iv7MpWiMEVzkisO4yvXBHa0= -github.com/dennis-tra/nebula-crawler v0.0.0-20241105123054-bbd84dcd5b43/go.mod h1:cU29FznX+nakuMKTg+adDtEzzFGC3oOKDf32c4Ps5+M= -github.com/dhui/dktest v0.4.3 h1:wquqUxAFdcUgabAVLvSCOKOlag5cIZuaOjYIBOWdsR0= -github.com/dhui/dktest v0.4.3/go.mod h1:zNK8IwktWzQRm6I/l2Wjp7MakiyaFWv4G1hjmodmMTs= -github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= -github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/dnaeon/go-vcr v1.1.0/go.mod h1:M7tiix8f0r6mKKJ3Yq/kqU1OYf3MnfmBWVbPx/yU9ko= -github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= -github.com/docker/docker v27.3.0+incompatible h1:BNb1QY6o4JdKpqwi9IB+HUYcRRrVN4aGFUTvDmWYK1A= -github.com/docker/docker v27.3.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= -github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= @@ -183,36 +63,16 @@ github.com/elastic/gosigar v0.14.3/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0 github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= -github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/ericlagergren/decimal v0.0.0-20190420051523-6335edbaa640/go.mod h1:mdYyfAkzn9kyJ/kMk/7WE9ufl9lflh+2NvecQ5mAghs= -github.com/ericlagergren/decimal v0.0.0-20240411145413-00de7ca16731 h1:R/ZjJpjQKsZ6L/+Gf9WHbt31GG8NMVcpRqUE+1mMIyo= -github.com/ericlagergren/decimal v0.0.0-20240411145413-00de7ca16731/go.mod h1:M9R1FoZ3y//hwwnJtO51ypFGwm8ZfpxPT/ZLtO1mcgQ= -github.com/ethereum/go-ethereum v1.14.11 h1:8nFDCUUE67rPc6AKxFj7JKaOa2W/W1Rse3oS6LvvxEY= -github.com/ethereum/go-ethereum v1.14.11/go.mod h1:+l/fr42Mma+xBnhefL/+z11/hcmJ2egl+ScIVPjhc7E= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= -github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= -github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= -github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/flynn/noise v1.1.0 h1:KjPQoQCEFdZDiP03phOvGi11+SVVhBG2wOWAorLsstg= github.com/flynn/noise v1.1.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk= github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= -github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= -github.com/friendsofgo/errors v0.9.2 h1:X6NYxef4efCBdwI7BgS820zFaN7Cphrmb+Pljdzjtgk= -github.com/friendsofgo/errors v0.9.2/go.mod h1:yCvFW5AkDIL9qn7suHVLiI/gH228n7PC4Pn44IGoTOI= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= @@ -222,22 +82,11 @@ github.com/go-faster/city v1.0.1 h1:4WAxSZ3V2Ws4QRDrscLEDcibJY8uf41H6AhXDrNDcGw= github.com/go-faster/city v1.0.1/go.mod h1:jKcUJId49qdW3L1qKHH/3wPeUstCVpVSXTM6vO3VcTw= github.com/go-faster/errors v0.7.1 h1:MkJTnDoEdi9pDabt1dpWf7AA8/BaSYZqibYyhZ20AYg= github.com/go-faster/errors v0.7.1/go.mod h1:5ySTjWFiphBs07IKuiL69nxdfd5+fzh1u7FPGZP2quo= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/go-yaml/yaml v2.1.0+incompatible/go.mod h1:w2MrLa16VYP0jy6N7M5kHaCkaLENm+P+Tv+MfurjSw0= @@ -245,42 +94,18 @@ github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5x github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= -github.com/gofrs/uuid v4.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= -github.com/gofrs/uuid v4.4.0+incompatible h1:3qXRTX8/NbyulANqlc0lchS1gqAVxRgsuW1YrTJupqA= -github.com/gofrs/uuid v4.4.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt v3.2.1+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= -github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= -github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= -github.com/golang-migrate/migrate/v4 v4.18.1 h1:JML/k+t4tpHCpQTCAD62Nu43NUFzHY4CV3uAuvHGC+Y= -github.com/golang-migrate/migrate/v4 v4.18.1/go.mod h1:HAX6m3sQgcdO81tdjn5exv20+3Kb13cmGli1hrD6hks= -github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= -github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= -github.com/golang-sql/sqlexp v0.1.0/go.mod h1:J4ad9Vo8ZCWQ2GMrC4UCQy1JpCbwU9m3EOqtpKwwwHI= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= -github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= @@ -290,142 +115,73 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk= github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= -github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20241009165004-a3522334989c h1:NDovD0SMpBYXlE1zJmS1q55vWB/fUQBcPAqAboZSccA= github.com/google/pprof v0.0.0-20241009165004-a3522334989c/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= -github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= -github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= -github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= -github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= -github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c h1:7lF+Vz0LqiRidnzC1Oq86fpX1q/iEv2KJdrCtttYjT4= github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 h1:asbCHRVmodnJTuQ3qamDwqVOIjwqUPTYmYuemVOx+Ys= github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0/go.mod h1:ggCgvZ2r7uOoQjOyu2Y1NhHmEPPzzuhWgcza5M1Ji1I= -github.com/hashicorp/consul/api v1.12.0/go.mod h1:6pVBMo0ebnYdt2S3H87XhekM/HHrUoTD2XXb/VrZVy0= -github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-hclog v1.2.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= -github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= -github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= -github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= -github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= -github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= -github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= -github.com/hashicorp/serf v0.9.7/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= -github.com/holiman/uint256 v1.3.1 h1:JfTzmih28bittyHM8z360dCjIA9dbPIBlcTI6lmctQs= -github.com/holiman/uint256 v1.3.1/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E= +github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= +github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/huandu/xstrings v1.3.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= -github.com/huandu/xstrings v1.3.2/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc= github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/ipfs/boxo v0.24.0 h1:D9gTU3QdxyjPMlJ6QfqhHTG3TIJPplKzjXLO2J30h9U= github.com/ipfs/boxo v0.24.0/go.mod h1:iP7xUPpHq2QAmVAjwtQvsNBTxTwLpFuy6ZpiRFwmzDA= github.com/ipfs/go-block-format v0.2.0 h1:ZqrkxBA2ICbDRbK8KJs/u0O3dlp6gmAuuXUJNiW1Ycs= github.com/ipfs/go-block-format v0.2.0/go.mod h1:+jpL11nFx5A/SPpsoBn6Bzkra/zaArfSmsknbPMYgzM= github.com/ipfs/go-cid v0.4.1 h1:A/T3qGvxi4kpKWWcPC/PgbvDA2bjVLO7n4UeVwnbs/s= github.com/ipfs/go-cid v0.4.1/go.mod h1:uQHwDeX4c6CtyrFwdqyhpNcxVewur1M7l7fNU7LKwZk= +github.com/ipfs/go-datastore v0.5.0/go.mod h1:9zhEApYMTl17C8YDp7JmU7sQZi2/wqiYh73hakZ90Bk= github.com/ipfs/go-datastore v0.6.0 h1:JKyz+Gvz1QEZw0LsX1IBn+JFCJQH4SJVFtM4uWU0Myk= github.com/ipfs/go-datastore v0.6.0/go.mod h1:rt5M3nNbSO/8q1t4LNkLyUwRs8HupMeN/8O4Vn9YAT8= github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk= github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps= +github.com/ipfs/go-ds-leveldb v0.5.0 h1:s++MEBbD3ZKc9/8/njrn4flZLnCuY9I79v94gBUNumo= +github.com/ipfs/go-ds-leveldb v0.5.0/go.mod h1:d3XG9RUDzQ6V4SHi8+Xgj9j1XuEk1z82lquxrVbml/Q= +github.com/ipfs/go-ipfs-delay v0.0.0-20181109222059-70721b86a9a8/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= github.com/ipfs/go-ipfs-util v0.0.3 h1:2RFdGez6bu2ZlZdI+rWfIdbQb1KudQp3VGwPtdNCmE0= github.com/ipfs/go-ipfs-util v0.0.3/go.mod h1:LHzG1a0Ig4G+iZ26UUOMjHd+lfM84LZCrn17xAKWBvs= github.com/ipfs/go-log/v2 v2.5.1 h1:1XdUzF7048prq4aBjDQQ4SL5RxftpRGdXhNRwKSAlcY= @@ -450,20 +206,10 @@ github.com/jbenet/go-temp-err-catcher v0.1.0/go.mod h1:0kJRvmDZXNMIiJirNPEYfhpPw github.com/jbenet/goprocess v0.1.4 h1:DRGOFReOMqqDNXwW70QkacFW0YN9QnwLV0Vqk+3oU0o= github.com/jbenet/goprocess v0.1.4/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= -github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/kat-co/vala v0.0.0-20170210184112-42e1d8b61f12/go.mod h1:u9MdXq/QageOOSGp7qG4XAQsYUMP+V5zEel/Vrl6OOc= -github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= @@ -471,16 +217,11 @@ github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IX github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= github.com/klauspost/cpuid/v2 v2.2.8 h1:+StwCXwm9PdpiEkPyzBXIy+M9KUb4ODm0Zarf1kS5BM= github.com/klauspost/cpuid/v2 v2.2.8/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/koron/go-ssdp v0.0.4 h1:1IDwrghSKYM7yLf7XCzbByg2sJ/JcNOZRXS2jczTwz0= github.com/koron/go-ssdp v0.0.4/go.mod h1:oDXq+E5IL5q0U8uSBcoAXzTzInwy5lEgC91HoKtbmZk= -github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -490,10 +231,6 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.10.6/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= -github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8= github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= github.com/libp2p/go-cidranger v1.1.0 h1:ewPN8EZ0dd1LSnrtuwd4709PXVcITVeuwbag38yPW7c= @@ -523,32 +260,14 @@ github.com/libp2p/go-reuseport v0.4.0/go.mod h1:ZtI03j/wO5hZVDFo2jKywN6bYKWLOy8S github.com/libp2p/go-yamux/v4 v4.0.1 h1:FfDR4S1wj6Bw2Pqbc8Uz7pCxeRBPbwsBbEdfwiCypkQ= github.com/libp2p/go-yamux/v4 v4.0.1/go.mod h1:NWjl8ZTLOGlozrXSOZ/HlfG++39iKNnM5wwmtQP1YB4= github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= -github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= -github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= -github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= -github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= -github.com/mattn/go-sqlite3 v1.14.24 h1:tpSp2G2KyMnnQu99ngJ47EIkWVmliIizyZBfPrBWDRM= -github.com/mattn/go-sqlite3 v1.14.24/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= -github.com/microsoft/go-mssqldb v0.17.0/go.mod h1:OkoNGhGEs8EZqchVTtochlXruEhEOaO4S0d2sB5aeGQ= -github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= -github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= github.com/miekg/dns v1.1.62 h1:cN8OuEF1/x5Rq6Np+h1epln8OiyPWV+lROx9LxcGgIQ= github.com/miekg/dns v1.1.62/go.mod h1:mvDlcItzm+br7MToIKqkglaGhlFMHJ9DTNNWONWXbNQ= github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8= @@ -561,31 +280,9 @@ github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8Rv github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= -github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= -github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= -github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= -github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= -github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= -github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8= github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= -github.com/montanaflynn/stats v0.6.6/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= -github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= -github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= @@ -613,48 +310,33 @@ github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/n github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY= github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/ginkgo/v2 v2.20.2 h1:7NVCeyIWROIAheY21RLS+3j2bb52W0W82tkberYytp4= github.com/onsi/ginkgo/v2 v2.20.2/go.mod h1:K9gyxPIlb+aIvnZ8bd9Ak+YP18w3APlR+5coaZoE2ag= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.34.1 h1:EUMJIKUjM8sKjYbtxQI9A4z2o+rruxnzNvpknOXie6k= github.com/onsi/gomega v1.34.1/go.mod h1:kU1QgUvBDLXBJq618Xvm2LUX6rSAfRaFRTcdOeDLwwY= -github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= -github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= -github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk= github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= -github.com/oschwald/geoip2-golang v1.11.0 h1:hNENhCn1Uyzhf9PTmquXENiWS6AlxAEnBII6r8krA3w= -github.com/oschwald/geoip2-golang v1.11.0/go.mod h1:P9zG+54KPEFOliZ29i7SeYZ/GM6tfEL+rgSn03hYuUo= -github.com/oschwald/maxminddb-golang v1.13.1 h1:G3wwjdN9JmIK2o/ermkHM+98oX5fS+k5MbwsmL4MRQE= -github.com/oschwald/maxminddb-golang v1.13.1/go.mod h1:K4pgV9N/GcK694KSTmVSDTODk4IsCNThNdTmnaBZ/F8= -github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= -github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= github.com/paulmach/orb v0.11.1 h1:3koVegMC4X/WeiXYz9iswopaTwMem53NzTJuTF20JzU= github.com/paulmach/orb v0.11.1/go.mod h1:5mULz1xQfs3bmQm63QEJA6lNGujuRafwA5S/EnuLaLU= github.com/paulmach/protoscan v0.2.1/go.mod h1:SpcSwydNLrxUGSDvXvO0P7g7AuhJ7lcKfDlhJCDw2gY= github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0= github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y= -github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= -github.com/pelletier/go-toml/v2 v2.0.1/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo= -github.com/pelletier/go-toml/v2 v2.0.5/go.mod h1:OMHamSCAODeSsVrwwvcJOaoN0LIUIaFVNZzmWyNfXas= github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pion/datachannel v1.5.9 h1:LpIWAOYPyDrXtU+BW7X0Yt/vGtYxtXQ8ql7dFfYUVZA= @@ -699,49 +381,27 @@ github.com/pion/turn/v2 v2.1.6 h1:Xr2niVsiPTB0FPtt+yAWKFUkU1eotQbGgpTIld4x1Gc= github.com/pion/turn/v2 v2.1.6/go.mod h1:huEpByKKHix2/b9kmTAM3YoX6MKP+/D//0ClgUYR2fY= github.com/pion/webrtc/v3 v3.3.4 h1:v2heQVnXTSqNRXcaFQVOhIOYkLMxOu1iJG8uy1djvkk= github.com/pion/webrtc/v3 v3.3.4/go.mod h1:liNa+E1iwyzyXqNUwvoMRNQ10x8h8FOeJKL8RkIbamE= -github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4/go.mod h1:N6UoU20jOqggOuDwUaBQpluzLNDqif3kq9z2wpdYEfQ= -github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/polydawn/refmt v0.89.0 h1:ADJTApkvkeBZsN0tBTx8QjpD9JkmxbKp0cxfr9qszm4= github.com/polydawn/refmt v0.89.0/go.mod h1:/zvteZs/GwLtCgZ4BL6CBsk9IKIlexP43ObX9AxTqTw= -github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= github.com/probe-lab/go-libdht v0.1.2-0.20240821100354-770d7b4b2e71 h1:REjguBnnfbMAVMTQg1tjUtgKWiTTO9b+hmJLxQK2Ook= github.com/probe-lab/go-libdht v0.1.2-0.20240821100354-770d7b4b2e71/go.mod h1:DtQ0H+YdJQlBiyayohV8fKusnjqk88+yuwDKltan8ks= github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= -github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.20.4 h1:Tgh3Yr67PaOv/uTqloMsCEdeuFTatm5zIq5+qNN23vI= github.com/prometheus/client_golang v1.20.4/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.60.0 h1:+V9PAREWNvJMAuJ1x1BaWl9dewMW4YrHZQbx0sJNllA= github.com/prometheus/common v0.60.0/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= -github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI= @@ -752,25 +412,16 @@ github.com/quic-go/webtransport-go v0.8.0 h1:HxSrwun11U+LlmwpgM1kEqIqH90IT4N8auv github.com/quic-go/webtransport-go v0.8.0/go.mod h1:N99tjprW432Ut5ONql/aUhSLT0YVSlwHohQsuac9WaM= github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk= github.com/raulk/go-watchdog v1.3.0/go.mod h1:fIvOnLbF0b0ZwkB9YU4mOW9Did//4vPZtDqv66NfsMU= -github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/sagikazarmark/crypt v0.6.0/go.mod h1:U8+INwJo3nBv1m6A/8OBXAq7Jnpspk5AxSgDyEQcea8= -github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys= github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= -github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= -github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= -github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k= github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME= github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY= @@ -796,9 +447,6 @@ github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go. github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4= github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= @@ -808,64 +456,35 @@ github.com/smartystreets/goconvey v1.7.2 h1:9RBaZCeXEQ3UselpuwUQHltGVXvdwm6cv1hg github.com/smartystreets/goconvey v1.7.2/go.mod h1:Vw0tHAZW6lzCRk3xgdin6fKYcG+G3Pg9vgXWeJpQFMM= github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.8.2/go.mod h1:CtAatgMJh6bJEIs48Ay/FOnkljP3WeGUG0MC1RfAqwo= -github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= -github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= -github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= -github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= -github.com/spf13/cobra v1.5.0/go.mod h1:dWXEIy2H428czQCjInthrTRUg7yKbok+2Qi/yBIJoUM= -github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.12.0/go.mod h1:b6COn30jlNxbm/V2IqWiNWkJ+vZNiMNksliPCiuKtSI= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.5/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/subosito/gotenv v1.3.0/go.mod h1:YzJjq/33h7nrwdY+iHMhEOEEbW0ovIz0tB6t6PwAXzs= -github.com/subosito/gotenv v1.4.1/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= +github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= -github.com/uptrace/opentelemetry-go-extra/otelsql v0.3.2 h1:ZjUj9BLYf9PEqBn8W/OapxhPjVRdC6CsXTdULHsyk5c= -github.com/uptrace/opentelemetry-go-extra/otelsql v0.3.2/go.mod h1:O8bHQfyinKwTXKkiKNGmLQS7vRsqRxIQTFZpYpHK3IQ= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.10/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli/v2 v2.27.5 h1:WoHEJLdsXr6dDWoJgMq/CboDmyY/8HMMH1fTECbih+w= github.com/urfave/cli/v2 v2.27.5/go.mod h1:3Sevf16NykTbInEnD0yKkjDAeZDS0A6bzhBH5hrMvTQ= github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= -github.com/volatiletech/inflect v0.0.1 h1:2a6FcMQyhmPZcLa+uet3VJ8gLn/9svWhJxJYwvE8KsU= -github.com/volatiletech/inflect v0.0.1/go.mod h1:IBti31tG6phkHitLlr5j7shC5SOo//x0AjDzaJU1PLA= -github.com/volatiletech/null/v8 v8.1.2 h1:kiTiX1PpwvuugKwfvUNX/SU/5A2KGZMXfGD0DUHdKEI= -github.com/volatiletech/null/v8 v8.1.2/go.mod h1:98DbwNoKEpRrYtGjWFctievIfm4n4MxG0A6EBUcoS5g= -github.com/volatiletech/randomize v0.0.1 h1:eE5yajattWqTB2/eN8df4dw+8jwAzBtbdo5sbWC4nMk= -github.com/volatiletech/randomize v0.0.1/go.mod h1:GN3U0QYqfZ9FOJ67bzax1cqZ5q2xuj2mXrXBjWaRTlY= -github.com/volatiletech/sqlboiler/v4 v4.16.2 h1:PcV2bxjE+S+GwPKCyX7/AjlY3aiTKsOEjciLhpWQImc= -github.com/volatiletech/sqlboiler/v4 v4.16.2/go.mod h1:B14BPBGTrJ2X6l7lwnvV/iXgYR48+ozGSlzHI3frl6U= -github.com/volatiletech/strmangle v0.0.1/go.mod h1:F6RA6IkB5vq0yTG4GQ0UsbbRcl3ni9P76i+JrTBKFFg= -github.com/volatiletech/strmangle v0.0.6 h1:AdOYE3B2ygRDq4rXDij/MMwq6KVK/pWAYxpC7CLrkKQ= -github.com/volatiletech/strmangle v0.0.6/go.mod h1:ycDvbDkjDvhC0NUU8w3fWwl5JEMTV56vTKXzR3GeR+0= github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0 h1:GDDkbFiaK8jsSDJfjId/PEGEShv6ugrt4kYsC5UIDaQ= github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 h1:EKhdznlJHPMoKr0XTrX+IlJs1LH3lyx2nfr1dOlZ79k= @@ -881,29 +500,14 @@ github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1/go.mod h1:Ohn+xnUBi github.com/xyproto/randomstring v1.0.5 h1:YtlWPoRdgMu3NZtP45drfy1GKoojuR7hmRcnhZqKjWU= github.com/xyproto/randomstring v1.0.5/go.mod h1:rgmS5DeNXLivK7YprL0pY+lTuhNQW3iGxZ18UQApw/E= github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -go.etcd.io/etcd/api/v3 v3.5.4/go.mod h1:5GB2vv4A4AOn3yk7MftYGHkUfGtDHnEraIjym4dYz5A= -go.etcd.io/etcd/client/pkg/v3 v3.5.4/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= -go.etcd.io/etcd/client/v2 v2.305.4/go.mod h1:Ud+VUwIi9/uQHOMA+4ekToJ12lTxlv0zB/+DHwTGEbU= -go.etcd.io/etcd/client/v3 v3.5.4/go.mod h1:ZaRkVgBZC+L+dLCjTcF1hRXpgZXQPOvnA/Ak/gq3kiY= go.mongodb.org/mongo-driver v1.11.4/go.mod h1:PTSz5yu21bkT/wXpkS7WR5f0ddqw5quethTUn9WM+2g= go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0/go.mod h1:L7UH0GbB0p47T4Rri3uHjbpCFYrVrwc1I25QhNPiGK8= go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U= go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.30.0 h1:lsInsfvhVIfOI6qHVyysXMNDnjO9Npvl7tlDPJFBVd4= @@ -920,9 +524,9 @@ go.opentelemetry.io/otel/sdk/metric v1.30.0 h1:QJLT8Pe11jyHBHfSAgYH7kEmT24eX792j go.opentelemetry.io/otel/sdk/metric v1.30.0/go.mod h1:waS6P3YqFNzeP01kuo/MBBYqaoBJl7efRQHOaydhy1Y= go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM= go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= @@ -935,77 +539,45 @@ go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/mock v0.4.0 h1:VcM4ZOtdbR4f6VXfiOpwpVJDL6lCReaZ6mw31wqh7KU= go.uber.org/mock v0.4.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200414173820-0848c9571904/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220511200225-c6db032c6c88/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220826181053-bd7e27e6170d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw= golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c h1:7dEasQXItcW1xKJ2+gg5VOiBnqWrJc+rq0DPKyvvdbY= golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c/go.mod h1:NQtJDoLvd6faHhE7m4T/1IY708gDefGGjR/iUW8yQQ8= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= @@ -1016,55 +588,21 @@ golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= -golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= @@ -1077,24 +615,6 @@ golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAG golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1102,110 +622,39 @@ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220513210516-0976fa681c29/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220224120231-95c6836cb0e7/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220825204002-c680a09ffe64/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1222,13 +671,10 @@ golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= @@ -1239,8 +685,6 @@ golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1249,58 +693,15 @@ golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= @@ -1310,146 +711,22 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da h1:noIWHXmPHxILtqtCOPIhSt0ABwskkZKjD3bXGnZGpNY= -golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= gonum.org/v1/gonum v0.15.1 h1:FNy7N6OUZVUaWG9pTiD+jlhdQ3lMP+/LcTpJ6+a8sQ0= gonum.org/v1/gonum v0.15.1/go.mod h1:eZTZuRFrzu5pcyjN5wJhcIhnUdNijYxX1T2IcrOGY0o= google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= -google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= -google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= -google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= -google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= -google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= -google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= -google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= -google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= -google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= -google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= -google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= -google.golang.org/api v0.59.0/go.mod h1:sT2boj7M9YJxZzgeZqXogmhfmRWDtPzT31xkieUbuZU= -google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= -google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= -google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= -google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= -google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= -google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= -google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= -google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= -google.golang.org/api v0.81.0/go.mod h1:FA6Mb/bZxj706H2j+j2d6mHEEaHBmbbWnkfvmorOCko= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= google.golang.org/genproto v0.0.0-20190306203927-b5d61aea6440/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= -google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= -google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= -google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211008145708-270636b82663/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211028162531-8db9c33dc351/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= -google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220519153652-3a47de7e79bd/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= google.golang.org/genproto/googleapis/api v0.0.0-20241007155032-5fefd90f89a9 h1:T6rh4haD3GVYsgEfWExoCZA2o2FmbNyKpTuAxbEFPTg= google.golang.org/genproto/googleapis/api v0.0.0-20241007155032-5fefd90f89a9/go.mod h1:wp2WsuBYj6j8wUdo3ToZsdxxixbvQNAHqVJrTgi5E5M= google.golang.org/genproto/googleapis/rpc v0.0.0-20241007155032-5fefd90f89a9 h1:QCqS/PdaHTSWGvupk2F/ehwHtGc0/GYkT+3GAcR1CCc= @@ -1458,38 +735,12 @@ google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmE google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= -google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= -google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E= google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= -google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1498,15 +749,11 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -1515,73 +762,26 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EV gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/ini.v1 v1.66.4/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= lukechampine.com/blake3 v1.3.0 h1:sJ3XhFINmHSrYCgl958hscfIa3bw8x4DqMP3u1YvoYE= lukechampine.com/blake3 v1.3.0/go.mod h1:0OFRp7fBtAylGVCO40o87sbupkyIGgbpv1+M1k1LM6k= -lukechampine.com/uint128 v1.1.1/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= -lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= -modernc.org/cc/v3 v3.36.0/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= -modernc.org/cc/v3 v3.36.2/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= -modernc.org/cc/v3 v3.36.3/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= -modernc.org/ccgo/v3 v3.0.0-20220428102840-41399a37e894/go.mod h1:eI31LL8EwEBKPpNpA4bU1/i+sKOwOrQy8D87zWUcRZc= -modernc.org/ccgo/v3 v3.0.0-20220430103911-bc99d88307be/go.mod h1:bwdAnOoaIt8Ax9YdWGjxWsdkPcZyRPHqrOvJxaKAKGw= -modernc.org/ccgo/v3 v3.16.4/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ= -modernc.org/ccgo/v3 v3.16.6/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ= -modernc.org/ccgo/v3 v3.16.8/go.mod h1:zNjwkizS+fIFDrDjIAgBSCLkWbJuHF+ar3QRn+Z9aws= -modernc.org/ccgo/v3 v3.16.9/go.mod h1:zNMzC9A9xeNUepy6KuZBbugn3c0Mc9TeiJO4lgvkJDo= -modernc.org/ccorpus v1.11.6/go.mod h1:2gEUTrWqdpH2pXsmTM1ZkjeSrUWDpjMu2T6m29L/ErQ= -modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM= -modernc.org/libc v0.0.0-20220428101251-2d5f3daf273b/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA= -modernc.org/libc v1.16.0/go.mod h1:N4LD6DBE9cf+Dzf9buBlzVJndKr/iJHG97vGLHYnb5A= -modernc.org/libc v1.16.1/go.mod h1:JjJE0eu4yeK7tab2n4S1w8tlWd9MxXLRzheaRnAKymU= -modernc.org/libc v1.16.17/go.mod h1:hYIV5VZczAmGZAnG15Vdngn5HSF5cSkbvfz2B7GRuVU= -modernc.org/libc v1.16.19/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA= -modernc.org/libc v1.17.0/go.mod h1:XsgLldpP4aWlPlsjqKRdHPqCxCjISdHfM/yeWC5GyW0= -modernc.org/libc v1.17.1/go.mod h1:FZ23b+8LjxZs7XtFMbSzL/EhPxNbfZbErxEHc7cbD9s= -modernc.org/mathutil v1.2.2/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= -modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= -modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= -modernc.org/memory v1.1.1/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw= -modernc.org/memory v1.2.0/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw= -modernc.org/memory v1.2.1/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= -modernc.org/opt v0.1.1/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= -modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= -modernc.org/sqlite v1.18.1/go.mod h1:6ho+Gow7oX5V+OiOQ6Tr4xeqbx13UZ6t+Fw9IRUG4d4= -modernc.org/strutil v1.1.1/go.mod h1:DE+MQQ/hjKBZS2zNInV5hhcipt5rLPWkmpbGeW5mmdw= -modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw= -modernc.org/tcl v1.13.1/go.mod h1:XOLfOwzhkljL4itZkK6T72ckMgvj0BDsnKNdZVUOecw= -modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= -modernc.org/token v1.0.1/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= -modernc.org/z v1.5.1/go.mod h1:eWFB510QWW5Th9YGZT81s+LwvaAs3Q2yr4sP0rmLkv8= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= diff --git a/keys.go b/keys.go index 07c866e..2d019d9 100644 --- a/keys.go +++ b/keys.go @@ -27,7 +27,7 @@ func (db *KeysDB) readKeysFromFile() *trie.Trie[bit256.Key, crypto.PrivKey] { keysTrie := trie.New[bit256.Key, crypto.PrivKey]() // load file - file, err := os.OpenFile(db.filepath, os.O_RDONLY, 0600) + file, err := os.OpenFile(db.filepath, os.O_RDONLY, 0o600) if err != nil { logger.Warn("Couldn't open file", db.filepath, ":", err) return keysTrie @@ -66,7 +66,7 @@ func (db *KeysDB) readKeysFromFile() *trie.Trie[bit256.Key, crypto.PrivKey] { } func (db *KeysDB) writeKeysToFile(keysTrie *trie.Trie[bit256.Key, crypto.PrivKey]) { - file, err := os.OpenFile(db.filepath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) + file, err := os.OpenFile(db.filepath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o600) if err != nil { logger.Warn("Couldn't open file", db.filepath, ":", err) return @@ -99,7 +99,6 @@ func integrateKeysIntoTrie(keysTrie *trie.Trie[bit256.Key, crypto.PrivKey], keys } pid, err := peer.IDFromPrivateKey(key) - if err != nil { logger.Warnf("Error getting peer ID: %v", err) continue diff --git a/nebuladb.go b/nebuladb.go index 155ae75..fb4aca3 100644 --- a/nebuladb.go +++ b/nebuladb.go @@ -8,15 +8,23 @@ import ( "github.com/libp2p/go-libp2p/core/peer" ) +type NebulaProvider interface { + GetLatestPeerIds(ctx context.Context) ([]peer.ID, error) +} + type NebulaDB struct { - ConnString string + ConnString string + CrawlInterval time.Duration connPool *pgxpool.Pool } -func NewNebulaDB(connString string) *NebulaDB { +var _ NebulaProvider = (*NebulaDB)(nil) + +func NewNebulaDB(connString string, crawlInterval time.Duration) *NebulaDB { return &NebulaDB{ - ConnString: connString, + ConnString: connString, + CrawlInterval: crawlInterval, } } @@ -59,7 +67,7 @@ func (db *NebulaDB) GetLatestPeerIds(ctx context.Context) ([]peer.ID, error) { LIMIT 1 ` - crawlIntervalAgo := time.Now().Add(-CRAWL_INTERVAL) + crawlIntervalAgo := time.Now().Add(-db.CrawlInterval) var crawlId uint64 err := db.connPool.QueryRow(ctx, crawlIdQuery, crawlIntervalAgo).Scan(&crawlId) if err != nil { @@ -76,7 +84,7 @@ func (db *NebulaDB) GetLatestPeerIds(ctx context.Context) ([]peer.ID, error) { AND v.connect_error IS NULL ` - beforeLastCrawlStarted := crawlIntervalAgo.Add(-CRAWL_INTERVAL) + beforeLastCrawlStarted := crawlIntervalAgo.Add(-db.CrawlInterval) rows, err := db.connPool.Query(ctx, peersQuery, beforeLastCrawlStarted, crawlId) if err != nil { logger.Warn("unable to get peers from Nebula DB: ", err) @@ -101,3 +109,19 @@ func (db *NebulaDB) GetLatestPeerIds(ctx context.Context) ([]peer.ID, error) { return peerIds, nil } + +type NebulaServiceProvider struct { + address string +} + +var _ NebulaProvider = (*NebulaServiceProvider)(nil) + +func NewNebulaServiceProvider() *NebulaServiceProvider { + // TODO: init gRPC service client + return &NebulaServiceProvider{} +} + +func (n NebulaServiceProvider) GetLatestPeerIds(ctx context.Context) ([]peer.ID, error) { + // TODO: request peers from Nebula service + return nil, nil +} diff --git a/queen.go b/queen.go index d8a2f9f..5309fed 100644 --- a/queen.go +++ b/queen.go @@ -3,131 +3,119 @@ package ants import ( "context" "fmt" - "os" - "strconv" "time" + "github.com/google/uuid" + "github.com/hashicorp/golang-lru/v2" ds "github.com/ipfs/go-datastore" - dssync "github.com/ipfs/go-datastore/sync" + leveldb "github.com/ipfs/go-ds-leveldb" "github.com/ipfs/go-log/v2" - "github.com/libp2p/go-libp2p-kad-dht/antslog" - kadpb "github.com/libp2p/go-libp2p-kad-dht/pb" + "github.com/libp2p/go-libp2p-kad-dht/ants" "github.com/libp2p/go-libp2p/core/crypto" "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/peerstore" "github.com/libp2p/go-libp2p/core/protocol" "github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoremem" + "github.com/probe-lab/ants-watch/db" "github.com/probe-lab/go-libdht/kad" "github.com/probe-lab/go-libdht/kad/key" "github.com/probe-lab/go-libdht/kad/key/bit256" "github.com/probe-lab/go-libdht/kad/key/bitstr" "github.com/probe-lab/go-libdht/kad/trie" - "github.com/volatiletech/null/v8" - - "github.com/patrickmn/go-cache" - "github.com/probe-lab/ants-watch/db" - "github.com/probe-lab/ants-watch/db/models" ) var logger = log.Logger("ants-queen") +type QueenConfig struct { + KeysDBPath string + NPorts int + FirstPort int + UPnP bool + BatchSize int + BatchTime time.Duration + CrawlInterval time.Duration + CacheSize int + NebulaDBConnString string +} + type Queen struct { + cfg *QueenConfig + nebulaDB *NebulaDB keysDB *KeysDB - peerstore peerstore.Peerstore - datastore ds.Batching - agentsCache *cache.Cache + peerstore peerstore.Peerstore + datastore ds.Batching + agentsCache *lru.Cache[string, string] + protocolsCache *lru.Cache[string, []protocol.ID] - ants []*Ant - antsLogs chan antslog.RequestLog + ants []*Ant + antsEvents chan ants.RequestEvent - upnp bool // portsOccupancy is a slice of bools that represent the occupancy of the ports // false corresponds to an available port, true to an occupied port // the first item of the slice corresponds to the firstPort - portsOccupancy []bool - firstPort uint16 - - clickhouseClient *db.Client - - resolveBatchSize int - resolveBatchTime int // in sec + portsOccupancy []bool + clickhouseClient db.Client } -func NewQueen(ctx context.Context, dbConnString string, keysDbPath string, nPorts, firstPort uint16, clickhouseClient *db.Client) (*Queen, error) { - nebulaDB := NewNebulaDB(dbConnString) - keysDB := NewKeysDB(keysDbPath) - peerstore, err := pstoremem.NewPeerstore() +func NewQueen(clickhouseClient db.Client, cfg *QueenConfig) (*Queen, error) { + ps, err := pstoremem.NewPeerstore() if err != nil { - return nil, err + return nil, fmt.Errorf("creating peerstore: %w", err) } - queen := &Queen{ - nebulaDB: nebulaDB, - keysDB: keysDB, - peerstore: peerstore, - datastore: dssync.MutexWrap(ds.NewMapDatastore()), - ants: []*Ant{}, - antsLogs: make(chan antslog.RequestLog, 1024), - agentsCache: cache.New(4*24*time.Hour, time.Hour), // 4 days of cache, clean every hour - upnp: true, - resolveBatchSize: getBatchSize(), - resolveBatchTime: getBatchTime(), - clickhouseClient: clickhouseClient, + ldb, err := leveldb.NewDatastore("", nil) // empty string means in-memory + if err != nil { + return nil, fmt.Errorf("creating in-memory leveldb: %w", err) } - if nPorts != 0 { - queen.upnp = false - queen.firstPort = firstPort - queen.portsOccupancy = make([]bool, nPorts) + agentsCache, err := lru.New[string, string](cfg.CacheSize) + if err != nil { + return nil, fmt.Errorf("init agents cache: %w", err) } - logger.Info("queen created") - - return queen, nil -} - -func getBatchSize() int { - batchSizeEnvVal := os.Getenv("BATCH_SIZE") - if len(batchSizeEnvVal) == 0 { - batchSizeEnvVal = "1000" - } - batchSize, err := strconv.Atoi(batchSizeEnvVal) + protocolsCache, err := lru.New[string, []protocol.ID](cfg.CacheSize) if err != nil { - logger.Errorln("BATCH_SIZE should be an integer") + return nil, fmt.Errorf("init agents cache: %w", err) } - return batchSize -} -func getBatchTime() int { - batchTimeEnvVal := os.Getenv("BATCH_TIME") - if len(batchTimeEnvVal) == 0 { - batchTimeEnvVal = "30" - } - batchTime, err := strconv.Atoi(batchTimeEnvVal) - if err != nil { - logger.Errorln("BATCH_TIME should be an integer") + queen := &Queen{ + cfg: cfg, + nebulaDB: NewNebulaDB(cfg.NebulaDBConnString, cfg.CrawlInterval), + keysDB: NewKeysDB(cfg.KeysDBPath), + peerstore: ps, + datastore: ldb, + ants: []*Ant{}, + antsEvents: make(chan ants.RequestEvent, 1024), + agentsCache: agentsCache, + protocolsCache: protocolsCache, + clickhouseClient: clickhouseClient, + portsOccupancy: make([]bool, cfg.NPorts), } - return batchTime + + return queen, nil } -func (q *Queen) takeAvailablePort() (uint16, error) { - if q.upnp { +func (q *Queen) takeAvailablePort() (int, error) { + if q.cfg.UPnP { return 0, nil } + for i, occupied := range q.portsOccupancy { - if !occupied { - q.portsOccupancy[i] = true - return q.firstPort + uint16(i), nil + if occupied { + continue } + q.portsOccupancy[i] = true + return q.cfg.FirstPort + i, nil } + return 0, fmt.Errorf("no available port") } -func (q *Queen) freePort(port uint16) { - if !q.upnp { - q.portsOccupancy[port-q.firstPort] = false +func (q *Queen) freePort(port int) { + if !q.cfg.UPnP { + q.portsOccupancy[port-q.cfg.FirstPort] = false } } @@ -135,9 +123,13 @@ func (q *Queen) Run(ctx context.Context) error { logger.Debugln("Queen.Run started") defer logger.Debugln("Queen.Run completing") - go q.consumeAntsLogs(ctx) + if err := q.nebulaDB.Open(ctx); err != nil { + return fmt.Errorf("opening nebula db: %w", err) + } + + go q.consumeAntsEvents(ctx) - crawlTime := time.NewTicker(CRAWL_INTERVAL) + crawlTime := time.NewTicker(q.cfg.CrawlInterval) defer crawlTime.Stop() q.routine(ctx) @@ -154,76 +146,88 @@ func (q *Queen) Run(ctx context.Context) error { } } -func (q *Queen) consumeAntsLogs(ctx context.Context) { - requests := make([]models.RequestsDenormalized, 0, q.resolveBatchSize) +func (q *Queen) consumeAntsEvents(ctx context.Context) { + requests := make([]*db.Request, 0, q.cfg.BatchSize) + // bulk insert for every batch size or N seconds, whichever comes first - ticker := time.NewTicker(time.Duration(q.resolveBatchTime) * time.Second) + ticker := time.NewTicker(q.cfg.BatchTime) defer ticker.Stop() for { select { - case <-ctx.Done(): logger.Debugln("Gracefully shutting down ants...") logger.Debugln("Number of requests remaining to be inserted:", len(requests)) - // if len(requests) > 0 { - // err := db.BulkInsertRequests(context.Background(), q.dbc.Handler, requests) - // if err != nil { - // logger.Fatalf("Error inserting remaining requests: %v", err) - // } - // } - return - case log := <-q.antsLogs: - reqType := kadpb.Message_MessageType(log.Type).String() - maddrs := q.peerstore.Addrs(log.Requester) - var agent string - peerstoreAgent, err := q.peerstore.Get(log.Requester, "AgentVersion") - if err != nil { - if peerstoreAgent, ok := q.agentsCache.Get(log.Requester.String()); ok { - agent = peerstoreAgent.(string) - } else { - agent = "" + if len(requests) > 0 { + if err := q.clickhouseClient.BulkInsertRequests(ctx, requests); err != nil { + logger.Errorf("Error inserting requests: %v", err) } + requests = requests[:0] + } + return + + case evt := <-q.antsEvents: + + // transform multi addresses + maddrStrs := make([]string, len(evt.Maddrs)) + for i, maddr := range evt.Maddrs { + maddrStrs[i] = maddr.String() + } + + // cache agent version + if evt.AgentVersion == "" { + evt.AgentVersion, _ = q.agentsCache.Get(evt.Remote.String()) } else { - agent = peerstoreAgent.(string) - q.agentsCache.Set(log.Requester.String(), agent, 0) + q.agentsCache.Add(evt.Remote.String(), evt.AgentVersion) } - protocols, _ := q.peerstore.GetProtocols(log.Requester) - protocolsAsStr := protocol.ConvertToStrings(protocols) - - request := models.RequestsDenormalized{ - RequestStartedAt: log.Timestamp, - RequestType: reqType, - AntMultihash: log.Self.String(), - PeerMultihash: log.Requester.String(), - KeyMultihash: log.Target.B58String(), - MultiAddresses: db.MaddrsToAddrs(maddrs), - AgentVersion: null.StringFrom(agent), - Protocols: protocolsAsStr, + // cache protocols + var protocols []protocol.ID + if len(evt.Protocols) == 0 { + protocols, _ = q.protocolsCache.Get(evt.Remote.String()) + } else { + protocols = evt.Protocols + q.protocolsCache.Add(evt.Remote.String(), evt.Protocols) } + protocolStrs := protocol.ConvertToStrings(protocols) + + uuidv7, err := uuid.NewV7() + if err != nil { + logger.Warn("Error generating uuid") + continue + } + + request := &db.Request{ + UUID: uuidv7, + AntID: evt.Self, + RemoteID: evt.Remote, + Type: evt.Type, + AgentVersion: evt.AgentVersion, + Protocols: protocolStrs, + StartedAt: evt.Timestamp, + KeyID: evt.Target.B58String(), + MultiAddresses: maddrStrs, + } + requests = append(requests, request) - if len(requests) >= q.resolveBatchSize { - // err = db.BulkInsertRequests(ctx, q.dbc.Handler, requests) - // if err != nil { - // logger.Errorf("Error inserting requests: %v", err) - // } - // requests = requests[:0] + + if len(requests) >= q.cfg.BatchSize { + if err = q.clickhouseClient.BulkInsertRequests(ctx, requests); err != nil { + logger.Errorf("Error inserting requests: %v", err) + } + requests = requests[:0] } case <-ticker.C: - if len(requests) > 0 { - // err := db.BulkInsertRequests(ctx, q.dbc.Handler, requests) - // if err != nil { - // logger.Fatalf("Error inserting requests: %v", err) - // } - // requests = requests[:0] + if len(requests) == 0 { + continue } - default: - // against busy-looping since <-q.antsLogs is a busy chan - time.Sleep(10 * time.Millisecond) + if err := q.clickhouseClient.BulkInsertRequests(ctx, requests); err != nil { + logger.Errorf("Error inserting requests: %v", err) + } + requests = requests[:0] } } } @@ -251,7 +255,7 @@ func (q *Queen) routine(ctx context.Context) { } // zones correspond to the prefixes of the tries that must be covered by an ant - zones := trieZones(networkTrie, BUCKET_SIZE) + zones := trieZones(networkTrie, bucketSize) logger.Debugf("%d zones must be covered by ants", len(zones)) // convert string zone to bitstr.Key @@ -301,7 +305,7 @@ func (q *Queen) routine(ctx context.Context) { logger.Error("trying to spawn new ant: ") continue } - ant, err := SpawnAnt(ctx, key, q.peerstore, q.datastore, port, q.antsLogs) + ant, err := SpawnAnt(ctx, key, q.peerstore, q.datastore, port, q.antsEvents) if err != nil { logger.Warn("error creating ant", err) } diff --git a/util.go b/util.go index a154e2c..610b1c8 100644 --- a/util.go +++ b/util.go @@ -1,14 +1,8 @@ package ants import ( - "fmt" - "os" - "strconv" - "time" - "github.com/ipfs/go-cid" "github.com/libp2p/go-libp2p/core/peer" - mh "github.com/multiformats/go-multihash" mhreg "github.com/multiformats/go-multihash/core" @@ -16,10 +10,7 @@ import ( "github.com/probe-lab/go-libdht/kad/key/bitstr" ) -const ( - CRAWL_INTERVAL = 120 * time.Minute - BUCKET_SIZE = 20 -) +const bucketSize = 20 func PeeridToKadid(pid peer.ID) bit256.Key { hasher, _ := mhreg.GetHasher(mh.SHA2_256) @@ -55,17 +46,3 @@ func bitstrToBit256(strKey bitstr.Key, padding []byte) bit256.Key { } return bit256.NewKey(bit256Key) } - -func getEnvInt(key string, defaultValue int) (int, error) { - value, exists := os.LookupEnv(key) - if !exists { - return defaultValue, nil - } - - result, err := strconv.Atoi(value) - if err != nil { - return 0, fmt.Errorf("%s must be an int: %w", key, err) - } - - return result, nil -} From 238205e2ddcdc5135c4c5c473ce9f8bc39c74d71 Mon Sep 17 00:00:00 2001 From: Dennis Trautwein Date: Mon, 2 Dec 2024 17:49:47 +0100 Subject: [PATCH 11/23] map all cli flags --- cmd/honeypot/main.go | 32 ++++++++++++++++++++------------ 1 file changed, 20 insertions(+), 12 deletions(-) diff --git a/cmd/honeypot/main.go b/cmd/honeypot/main.go index 5a5fb1d..d275d63 100644 --- a/cmd/honeypot/main.go +++ b/cmd/honeypot/main.go @@ -132,24 +132,32 @@ func main() { Value: rootConfig.CacheSize, }, &cli.PathFlag{ - Name: "key.db_path", - Usage: "The path to the data store containing the keys", - EnvVars: []string{"KEY_DB_PATH"}, + Name: "key.db_path", + Usage: "The path to the data store containing the keys", + EnvVars: []string{"KEY_DB_PATH"}, + Destination: &rootConfig.KeyDBPath, + Value: rootConfig.KeyDBPath, }, &cli.IntFlag{ - Name: "num_ports", - Value: 128, - Usage: "Number of ports ants can listen on", + Name: "num_ports", + Usage: "Number of ports ants can listen on", + EnvVars: []string{"ANTS_NUM_PORTS"}, + Destination: &rootConfig.NumPorts, + Value: rootConfig.NumPorts, }, &cli.IntFlag{ - Name: "first_port", - Value: 6000, - Usage: "First port ants can listen on", + Name: "first_port", + Usage: "First port ants can listen on", + EnvVars: []string{"ANTS_FIRST_PORT"}, + Destination: &rootConfig.FirstPort, + Value: rootConfig.FirstPort, }, &cli.BoolFlag{ - Name: "upnp", - Value: false, - Usage: "Enable UPnP", + Name: "upnp", + Usage: "Enable UPnP", + EnvVars: []string{"ANTS_UPNP"}, + Destination: &rootConfig.UPnp, + Value: rootConfig.UPnp, }, }, Action: runQueenCommand, From 9b13cdcc5069488b51bb812e4d8573bcc47aa314 Mon Sep 17 00:00:00 2001 From: Dennis Trautwein Date: Tue, 3 Dec 2024 14:23:52 +0100 Subject: [PATCH 12/23] add queen ID to differentiate deployments --- db/client.go | 1 + db/migrations/000001_create_requests_table.up.sql | 1 + db/models.go | 1 + queen.go | 3 +++ 4 files changed, 6 insertions(+) diff --git a/db/client.go b/db/client.go index de0f4a8..f188e07 100644 --- a/db/client.go +++ b/db/client.go @@ -66,6 +66,7 @@ func (c *ClickhouseClient) BulkInsertRequests(ctx context.Context, requests []*R for _, r := range requests { err = batch.Append( r.UUID.String(), + r.QueenID, r.AntID.String(), r.RemoteID.String(), r.AgentVersion, diff --git a/db/migrations/000001_create_requests_table.up.sql b/db/migrations/000001_create_requests_table.up.sql index 5de5507..7fe8529 100644 --- a/db/migrations/000001_create_requests_table.up.sql +++ b/db/migrations/000001_create_requests_table.up.sql @@ -1,6 +1,7 @@ CREATE TABLE requests ( id UUID, + queen_id UUID, ant_multihash String, remote_multihash String, agent_version String, diff --git a/db/models.go b/db/models.go index 5b887e8..513b2fc 100644 --- a/db/models.go +++ b/db/models.go @@ -10,6 +10,7 @@ import ( type Request struct { UUID uuid.UUID + QueenID string AntID peer.ID RemoteID peer.ID Type pb.Message_MessageType diff --git a/queen.go b/queen.go index 5309fed..870ba2d 100644 --- a/queen.go +++ b/queen.go @@ -41,6 +41,7 @@ type QueenConfig struct { type Queen struct { cfg *QueenConfig + id string nebulaDB *NebulaDB keysDB *KeysDB @@ -82,6 +83,7 @@ func NewQueen(clickhouseClient db.Client, cfg *QueenConfig) (*Queen, error) { queen := &Queen{ cfg: cfg, + id: uuid.NewString(), nebulaDB: NewNebulaDB(cfg.NebulaDBConnString, cfg.CrawlInterval), keysDB: NewKeysDB(cfg.KeysDBPath), peerstore: ps, @@ -200,6 +202,7 @@ func (q *Queen) consumeAntsEvents(ctx context.Context) { request := &db.Request{ UUID: uuidv7, + QueenID: q.id, AntID: evt.Self, RemoteID: evt.Remote, Type: evt.Type, From 0ed1af9b03bce5bec06e1de02eab884f594c3d68 Mon Sep 17 00:00:00 2001 From: Dennis Trautwein Date: Tue, 3 Dec 2024 14:24:27 +0100 Subject: [PATCH 13/23] rename: honeypot package to ants --- cmd/{honeypot => ants}/health.go | 0 cmd/{honeypot => ants}/main.go | 142 +++++++++++++++++++------------ 2 files changed, 86 insertions(+), 56 deletions(-) rename cmd/{honeypot => ants}/health.go (100%) rename cmd/{honeypot => ants}/main.go (64%) diff --git a/cmd/honeypot/health.go b/cmd/ants/health.go similarity index 100% rename from cmd/honeypot/health.go rename to cmd/ants/health.go diff --git a/cmd/honeypot/main.go b/cmd/ants/main.go similarity index 64% rename from cmd/honeypot/main.go rename to cmd/ants/main.go index d275d63..0afe484 100644 --- a/cmd/honeypot/main.go +++ b/cmd/ants/main.go @@ -17,35 +17,42 @@ import ( var logger = logging.Logger("ants-queen") var rootConfig = struct { - AntsClickhouseAddress string - AntsClickhouseDatabase string - AntsClickhouseUsername string - AntsClickhousePassword string - AntsClickhouseSSL bool - NebulaDBConnString string - KeyDBPath string - NumPorts int - FirstPort int - UPnp bool - BatchSize int - BatchTime time.Duration - CrawlInterval time.Duration - CacheSize int + ClickhouseAddress string + ClickhouseDatabase string + ClickhouseUsername string + ClickhousePassword string + ClickhouseSSL bool + NebulaDBConnString string + KeyDBPath string + NumPorts int + FirstPort int + UPnp bool + BatchSize int + BatchTime time.Duration + CrawlInterval time.Duration + CacheSize int + BucketSize int + UserAgent string + ProtocolPrefix string + QueenID string }{ - AntsClickhouseAddress: "", - AntsClickhouseDatabase: "", - AntsClickhouseUsername: "", - AntsClickhousePassword: "", - AntsClickhouseSSL: true, - NebulaDBConnString: "", - KeyDBPath: "keys.db", - NumPorts: 128, - FirstPort: 6000, - UPnp: false, - BatchSize: 1000, - BatchTime: time.Second, - CrawlInterval: 120 * time.Minute, - CacheSize: 10_000, + ClickhouseAddress: "", + ClickhouseDatabase: "", + ClickhouseUsername: "", + ClickhousePassword: "", + ClickhouseSSL: true, + NebulaDBConnString: "", + KeyDBPath: "keys.db", + NumPorts: 128, + FirstPort: 6000, + UPnp: false, + BatchSize: 1000, + BatchTime: time.Second, + CrawlInterval: 120 * time.Minute, + CacheSize: 10_000, + BucketSize: 20, + UserAgent: "celestiant", + QueenID: "", } func main() { @@ -62,44 +69,44 @@ func main() { Usage: "Starts the queen service", Flags: []cli.Flag{ &cli.StringFlag{ - Name: "ants.clickhouse.address", + Name: "clickhouse.address", Usage: "ClickHouse address containing the host and port, 127.0.0.1:9000", EnvVars: []string{"ANTS_CLICKHOUSE_ADDRESS"}, - Destination: &rootConfig.AntsClickhouseAddress, - Value: rootConfig.AntsClickhouseAddress, + Destination: &rootConfig.ClickhouseAddress, + Value: rootConfig.ClickhouseAddress, }, &cli.StringFlag{ - Name: "ants.clickhouse.database", + Name: "clickhouse.database", Usage: "The ClickHouse database where ants requests will be recorded", EnvVars: []string{"ANTS_CLICKHOUSE_DATABASE"}, - Destination: &rootConfig.AntsClickhouseDatabase, - Value: rootConfig.AntsClickhouseDatabase, + Destination: &rootConfig.ClickhouseDatabase, + Value: rootConfig.ClickhouseDatabase, }, &cli.StringFlag{ - Name: "ants.clickhouse.username", + Name: "clickhouse.username", Usage: "The ClickHouse user that has the prerequisite privileges to record the requests", EnvVars: []string{"ANTS_CLICKHOUSE_USERNAME"}, - Destination: &rootConfig.AntsClickhouseUsername, - Value: rootConfig.AntsClickhouseUsername, + Destination: &rootConfig.ClickhouseUsername, + Value: rootConfig.ClickhouseUsername, }, &cli.StringFlag{ - Name: "ants.clickhouse.password", + Name: "clickhouse.password", Usage: "The password for the ClickHouse user", EnvVars: []string{"ANTS_CLICKHOUSE_PASSWORD"}, - Destination: &rootConfig.AntsClickhousePassword, - Value: rootConfig.AntsClickhousePassword, + Destination: &rootConfig.ClickhousePassword, + Value: rootConfig.ClickhousePassword, }, &cli.BoolFlag{ - Name: "ants.clickhouse.ssl", + Name: "clickhouse.ssl", Usage: "Whether to use SSL for the ClickHouse connection", EnvVars: []string{"ANTS_CLICKHOUSE_SSL"}, - Destination: &rootConfig.AntsClickhouseSSL, - Value: rootConfig.AntsClickhouseSSL, + Destination: &rootConfig.ClickhouseSSL, + Value: rootConfig.ClickhouseSSL, }, &cli.StringFlag{ - Name: "nebula.db.connstring", + Name: "nebula.connstring", Usage: "The connection string for the Postgres Nebula database", - EnvVars: []string{"NEBULA_DB_CONNSTRING"}, + EnvVars: []string{"ANTS_NEBULA_CONNSTRING"}, Destination: &rootConfig.NebulaDBConnString, Value: rootConfig.NebulaDBConnString, }, @@ -132,9 +139,9 @@ func main() { Value: rootConfig.CacheSize, }, &cli.PathFlag{ - Name: "key.db_path", + Name: "key.path", Usage: "The path to the data store containing the keys", - EnvVars: []string{"KEY_DB_PATH"}, + EnvVars: []string{"ANTS_KEY_PATH"}, Destination: &rootConfig.KeyDBPath, Value: rootConfig.KeyDBPath, }, @@ -159,6 +166,28 @@ func main() { Destination: &rootConfig.UPnp, Value: rootConfig.UPnp, }, + &cli.IntFlag{ + Name: "bucket.size", + Usage: "The bucket size for the ants DHT", + EnvVars: []string{"ANTS_BUCKET_SIZE"}, + Destination: &rootConfig.BucketSize, + Value: rootConfig.BucketSize, + }, + &cli.StringFlag{ + Name: "user.agent", + Usage: "The user agent to use for the ants hosts", + EnvVars: []string{"ANTS_USER_AGENT"}, + Destination: &rootConfig.UserAgent, + Value: rootConfig.UserAgent, + }, + &cli.StringFlag{ + Name: "queen.id", + Usage: "The ID for the queen that's orchestrating the ants", + EnvVars: []string{"ANTS_QUEEN_ID"}, + Destination: &rootConfig.QueenID, + Value: rootConfig.QueenID, + DefaultText: "generated", + }, }, Action: runQueenCommand, }, @@ -187,16 +216,15 @@ func main() { func runQueenCommand(c *cli.Context) error { ctx := c.Context - // initializing new clickhouse client + // initializing a new clickhouse client client, err := db.NewClient( - rootConfig.AntsClickhouseAddress, - rootConfig.AntsClickhouseDatabase, - rootConfig.AntsClickhouseUsername, - rootConfig.AntsClickhousePassword, - rootConfig.AntsClickhouseSSL, + rootConfig.ClickhouseAddress, + rootConfig.ClickhouseDatabase, + rootConfig.ClickhouseUsername, + rootConfig.ClickhousePassword, + rootConfig.ClickhouseSSL, ) if err != nil { - logger.Errorln(err) return fmt.Errorf("init database client: %w", err) } @@ -217,12 +245,14 @@ func runQueenCommand(c *cli.Context) error { CrawlInterval: rootConfig.CrawlInterval, CacheSize: rootConfig.CacheSize, NebulaDBConnString: rootConfig.NebulaDBConnString, + BucketSize: rootConfig.BucketSize, + UserAgent: rootConfig.UserAgent, } - // initializting queen + // initializing queen queen, err := ants.NewQueen(client, queenCfg) if err != nil { - return fmt.Errorf("failed to create queen: %w", err) + return fmt.Errorf("create queen: %w", err) } errChan := make(chan error, 1) From 8acd1a1ef63549899d7631e4fba248485eaa02bd Mon Sep 17 00:00:00 2001 From: Dennis Trautwein Date: Tue, 3 Dec 2024 14:24:57 +0100 Subject: [PATCH 14/23] adjust ants initialization --- ant.go | 90 +++++++++++++++++++++++++++++++++++++------------------- queen.go | 23 ++++++++------- 2 files changed, 73 insertions(+), 40 deletions(-) diff --git a/ant.go b/ant.go index f102f13..e0b04e6 100644 --- a/ant.go +++ b/ant.go @@ -22,21 +22,54 @@ const ( userAgent = "celestiant" ) -type Ant struct { - port int - dht *kad.IpfsDHT - privKey crypto.PrivKey +type AntConfig struct { + PrivateKey crypto.PrivKey + UserAgent string + Port int + ProtocolPrefix string + BootstrapPeers []peer.AddrInfo + EventsChan chan ants.RequestEvent +} + +func (cfg *AntConfig) Validate() error { + if cfg.PrivateKey == nil { + return fmt.Errorf("no ant private key given") + } + + if cfg.UserAgent == "" { + return fmt.Errorf("user agent is not set") + } + + if cfg.ProtocolPrefix == "" { + return fmt.Errorf("protocol prefix is not set") + } + + if len(cfg.BootstrapPeers) == 0 { + return fmt.Errorf("bootstrap peers are not set") + } + + if cfg.EventsChan == nil { + return fmt.Errorf("events channel is not set") + } - Host host.Host - KadId bit256.Key - UserAgent string + return nil } -func SpawnAnt(ctx context.Context, privKey crypto.PrivKey, peerstore peerstore.Peerstore, datastore ds.Batching, port int, logsChan chan ants.RequestEvent) (*Ant, error) { - pid, _ := peer.IDFromPrivateKey(privKey) - logger.Debugf("spawning ant. kadid: %s, peerid: %s", PeeridToKadid(pid).HexString(), pid) +type Ant struct { + cfg *AntConfig + host host.Host + dht *kad.IpfsDHT + kadID bit256.Key +} - portStr := fmt.Sprint(port) +func SpawnAnt(ctx context.Context, ps peerstore.Peerstore, ds ds.Batching, cfg *AntConfig) (*Ant, error) { + if cfg == nil { + return nil, fmt.Errorf("no config given") + } else if err := cfg.Validate(); err != nil { + return nil, fmt.Errorf("invalid config: %w", err) + } + + portStr := fmt.Sprint(cfg.Port) // taken from github.com/celestiaorg/celestia-node/nodebuilder/p2p/config.go // ports are assigned automatically @@ -51,43 +84,40 @@ func SpawnAnt(ctx context.Context, privKey crypto.PrivKey, peerstore peerstore.P opts := []libp2p.Option{ libp2p.UserAgent(userAgent), - libp2p.Identity(privKey), - libp2p.Peerstore(peerstore), + libp2p.Identity(cfg.PrivateKey), + libp2p.Peerstore(ps), libp2p.DisableRelay(), - libp2p.ListenAddrStrings(listenAddrs...), } - if port == 0 { + if cfg.Port == 0 { opts = append(opts, libp2p.NATPortMap()) // enable NAT port mapping if no port is specified } h, err := libp2p.New(opts...) if err != nil { - logger.Warn("unable to create libp2p host: ", err) - return nil, err + return nil, fmt.Errorf("new libp2p host: %w", err) } dhtOpts := []kad.Option{ kad.Mode(kad.ModeServer), - kad.BootstrapPeers(BootstrapPeers(celestiaNet)...), - kad.ProtocolPrefix(protocol.ID(fmt.Sprintf("/celestia/%s", celestiaNet))), - kad.Datastore(datastore), - kad.RequestsLogChan(logsChan), + kad.BootstrapPeers(cfg.BootstrapPeers...), + kad.ProtocolPrefix(protocol.ID(cfg.ProtocolPrefix)), + kad.Datastore(ds), + kad.RequestsLogChan(cfg.EventsChan), } dht, err := kad.New(ctx, h, dhtOpts...) if err != nil { - logger.Warn("unable to create libp2p dht: ", err) - return nil, err + return nil, fmt.Errorf("new libp2p dht: %w", err) } + logger.Debugf("spawned ant. kadid: %s, peerid: %s", PeerIDToKadID(h.ID()).HexString(), h.ID()) + ant := &Ant{ - Host: h, - dht: dht, - privKey: privKey, - KadId: PeeridToKadid(h.ID()), - port: port, - UserAgent: userAgent, + cfg: cfg, + host: h, + dht: dht, + kadID: PeerIDToKadID(h.ID()), } go dht.Bootstrap(ctx) @@ -100,5 +130,5 @@ func (a *Ant) Close() error { if err != nil { return err } - return a.Host.Close() + return a.host.Close() } diff --git a/queen.go b/queen.go index 870ba2d..f949847 100644 --- a/queen.go +++ b/queen.go @@ -308,20 +308,23 @@ func (q *Queen) routine(ctx context.Context) { logger.Error("trying to spawn new ant: ") continue } - ant, err := SpawnAnt(ctx, key, q.peerstore, q.datastore, port, q.antsEvents) - if err != nil { - logger.Warn("error creating ant", err) + + antCfg := &AntConfig{ + PrivateKey: key, + UserAgent: q.cfg.UserAgent, + Port: port, + ProtocolPrefix: fmt.Sprintf("/celestia/%s", celestiaNet), // TODO: parameterize + BootstrapPeers: BootstrapPeers(celestiaNet), // TODO: parameterize + EventsChan: q.antsEvents, } - q.ants = append(q.ants, ant) - } - for _, ant := range q.ants { - logger.Debugf("Upserting ant: %v\n", ant.Host.ID().String()) - // antID, err := q.dbc.UpsertPeer(ctx, ant.Host.ID().String(), null.StringFrom(ant.UserAgent), nil, time.Now()) + ant, err := SpawnAnt(ctx, q.peerstore, q.datastore, antCfg) if err != nil { - logger.Errorf("Couldn't upsert") - // logger.Errorf("antID: %d could not be inserted because of %v", antID, err) + logger.Warn("error creating ant", err) + continue } + + q.ants = append(q.ants, ant) } logger.Debugf("ants count: %d", len(q.ants)) From a46bcd437694ce88bb5da270ae232f837b11925c Mon Sep 17 00:00:00 2001 From: Dennis Trautwein Date: Tue, 3 Dec 2024 14:25:22 +0100 Subject: [PATCH 15/23] style: use go idomatic ID function name --- keys.go | 8 ++++---- keys_test.go | 6 +++--- util.go | 9 +++++---- util_test.go | 4 ++-- 4 files changed, 14 insertions(+), 13 deletions(-) diff --git a/keys.go b/keys.go index 2d019d9..4e8e05a 100644 --- a/keys.go +++ b/keys.go @@ -60,7 +60,7 @@ func (db *KeysDB) readKeysFromFile() *trie.Trie[bit256.Key, crypto.PrivKey] { } // Add to your keysTrie or equivalent data structure - keysTrie.Add(PeeridToKadid(pid), privKey) + keysTrie.Add(PeerIDToKadID(pid), privKey) } return keysTrie } @@ -103,7 +103,7 @@ func integrateKeysIntoTrie(keysTrie *trie.Trie[bit256.Key, crypto.PrivKey], keys logger.Warnf("Error getting peer ID: %v", err) continue } - keysTrie.Add(PeeridToKadid(pid), key) + keysTrie.Add(PeerIDToKadID(pid), key) } } @@ -144,12 +144,12 @@ func getMatchingKeys(prefixes []bitstr.Key, keysTrie *trie.Trie[bit256.Key, cryp continue } // check if the new key matches the prefix - if key.CommonPrefixLength(PeeridToKadid(pid), prefix) == prefix.BitLen() { + if key.CommonPrefixLength(PeerIDToKadID(pid), prefix) == prefix.BitLen() { keys[i] = newKey break } // add to keysTrie if not matching prefix - keysTrie.Add(PeeridToKadid(pid), newKey) + keysTrie.Add(PeerIDToKadID(pid), newKey) } } } diff --git a/keys_test.go b/keys_test.go index bc71514..fc8989f 100644 --- a/keys_test.go +++ b/keys_test.go @@ -24,7 +24,7 @@ func TestWriteReadKeys(t *testing.T) { require.NoError(t, err) pid, err := peer.IDFromPrivateKey(priv) require.NoError(t, err) - keysTrie.Add(PeeridToKadid(pid), priv) + keysTrie.Add(PeerIDToKadID(pid), priv) } db := NewKeysDB(filename) @@ -77,7 +77,7 @@ func TestKeysDB(t *testing.T) { for i, prefix := range prefixes { pid, err := peer.IDFromPrivateKey(privKeys[i]) require.NoError(t, err) - require.Equal(t, prefix.BitLen(), key.CommonPrefixLength(PeeridToKadid(pid), prefix)) + require.Equal(t, prefix.BitLen(), key.CommonPrefixLength(PeerIDToKadID(pid), prefix)) } // check that the keys are not reused @@ -102,7 +102,7 @@ func TestReturnKeysToEmptyTrie(t *testing.T) { pid, err := peer.IDFromPrivateKey(key) require.NoError(t, err) - found, foundKey := trie.Find(keysTrie, PeeridToKadid(pid)) + found, foundKey := trie.Find(keysTrie, PeerIDToKadID(pid)) require.True(t, found) require.Equal(t, key, foundKey) } diff --git a/util.go b/util.go index 610b1c8..cd6bef9 100644 --- a/util.go +++ b/util.go @@ -10,10 +10,11 @@ import ( "github.com/probe-lab/go-libdht/kad/key/bitstr" ) -const bucketSize = 20 - -func PeeridToKadid(pid peer.ID) bit256.Key { - hasher, _ := mhreg.GetHasher(mh.SHA2_256) +func PeerIDToKadID(pid peer.ID) bit256.Key { + hasher, err := mhreg.GetHasher(mh.SHA2_256) + if err != nil { + panic(err) + } hasher.Write([]byte(pid)) return bit256.NewKey(hasher.Sum(nil)) } diff --git a/util_test.go b/util_test.go index 57fa14a..7eeacaf 100644 --- a/util_test.go +++ b/util_test.go @@ -18,11 +18,11 @@ func TestBitstrToBit256(t *testing.T) { want := bit256.NewKey(target[:]) require.Equal(t, want, got) - strKey = bitstr.Key("000011110000") // 0x0f0 + strKey = "000011110000" // 0x0f0 got = bitstrToBit256(strKey, padding[:]) require.Equal(t, want, got) - strKey = bitstr.Key("111") // 0xe + strKey = "111" // 0xe padding[0] = 0x0f got = bitstrToBit256(strKey, padding[:]) target[0] = 0xef From 930417270ff413d6c149781694763b53d5b7079e Mon Sep 17 00:00:00 2001 From: Dennis Trautwein Date: Tue, 3 Dec 2024 14:25:43 +0100 Subject: [PATCH 16/23] add: is self lookup to data --- db/client.go | 1 + .../000001_create_requests_table.up.sql | 5 ++-- db/models.go | 1 + queen.go | 28 ++++++++++++------- 4 files changed, 23 insertions(+), 12 deletions(-) diff --git a/db/client.go b/db/client.go index f188e07..edc3eac 100644 --- a/db/client.go +++ b/db/client.go @@ -75,6 +75,7 @@ func (c *ClickhouseClient) BulkInsertRequests(ctx context.Context, requests []*R r.Type, r.KeyID, r.MultiAddresses, + r.IsSelfLookup, ) if err != nil { return fmt.Errorf("append request to batch: %w", err) diff --git a/db/migrations/000001_create_requests_table.up.sql b/db/migrations/000001_create_requests_table.up.sql index 7fe8529..8743a4a 100644 --- a/db/migrations/000001_create_requests_table.up.sql +++ b/db/migrations/000001_create_requests_table.up.sql @@ -9,7 +9,8 @@ CREATE TABLE requests started_at DateTime, request_type String, key_multihash String, - multi_addresses Array(String) -) ENGINE = ReplicatedMergeTree() + multi_addresses Array(String), + is_self_lookup bool +) ENGINE = ReplicatedMergeTree PRIMARY KEY (started_at) TTL started_at + INTERVAL 1 DAY; diff --git a/db/models.go b/db/models.go index 513b2fc..c18042a 100644 --- a/db/models.go +++ b/db/models.go @@ -19,4 +19,5 @@ type Request struct { StartedAt time.Time KeyID string MultiAddresses []string + IsSelfLookup bool } diff --git a/queen.go b/queen.go index f949847..d789b78 100644 --- a/queen.go +++ b/queen.go @@ -11,6 +11,7 @@ import ( leveldb "github.com/ipfs/go-ds-leveldb" "github.com/ipfs/go-log/v2" "github.com/libp2p/go-libp2p-kad-dht/ants" + pb "github.com/libp2p/go-libp2p-kad-dht/pb" "github.com/libp2p/go-libp2p/core/crypto" "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/peerstore" @@ -36,6 +37,8 @@ type QueenConfig struct { CrawlInterval time.Duration CacheSize int NebulaDBConnString string + BucketSize int + UserAgent string } type Queen struct { @@ -122,8 +125,8 @@ func (q *Queen) freePort(port int) { } func (q *Queen) Run(ctx context.Context) error { - logger.Debugln("Queen.Run started") - defer logger.Debugln("Queen.Run completing") + logger.Infoln("Queen.Run started") + defer logger.Infoln("Queen.Run completing") if err := q.nebulaDB.Open(ctx); err != nil { return fmt.Errorf("opening nebula db: %w", err) @@ -211,6 +214,7 @@ func (q *Queen) consumeAntsEvents(ctx context.Context) { StartedAt: evt.Timestamp, KeyID: evt.Target.B58String(), MultiAddresses: maddrStrs, + IsSelfLookup: peer.ID(evt.Target) == evt.Remote && evt.Type == pb.Message_FIND_NODE, } requests = append(requests, request) @@ -239,7 +243,7 @@ func (q *Queen) persistLiveAntsKeys() { logger.Debugln("Persisting live ants keys") antsKeys := make([]crypto.PrivKey, 0, len(q.ants)) for _, ant := range q.ants { - antsKeys = append(antsKeys, ant.Host.Peerstore().PrivKey(ant.Host.ID())) + antsKeys = append(antsKeys, ant.cfg.PrivateKey) } q.keysDB.MatchingKeys(nil, antsKeys) logger.Debugf("Number of antsKeys persisted: %d", len(antsKeys)) @@ -254,11 +258,11 @@ func (q *Queen) routine(ctx context.Context) { networkTrie := trie.New[bit256.Key, peer.ID]() for _, peerId := range networkPeers { - networkTrie.Add(PeeridToKadid(peerId), peerId) + networkTrie.Add(PeerIDToKadID(peerId), peerId) } // zones correspond to the prefixes of the tries that must be covered by an ant - zones := trieZones(networkTrie, bucketSize) + zones := trieZones(networkTrie, q.cfg.BucketSize) logger.Debugf("%d zones must be covered by ants", len(zones)) // convert string zone to bitstr.Key @@ -272,7 +276,7 @@ func (q *Queen) routine(ctx context.Context) { for index, ant := range q.ants { matchedKey := false for i, missingKey := range missingKeys { - if key.CommonPrefixLength(ant.KadId, missingKey) == missingKey.BitLen() { + if key.CommonPrefixLength(ant.kadID, missingKey) == missingKey.BitLen() { // remove key from missingKeys since covered by current ant missingKeys = append(missingKeys[:i], missingKeys[i+1:]...) matchedKey = true @@ -293,9 +297,13 @@ func (q *Queen) routine(ctx context.Context) { returnedKeys := make([]crypto.PrivKey, len(excessAntsIndices)) for i, index := range excessAntsIndices { ant := q.ants[index] - returnedKeys[i] = ant.privKey - port := ant.port - ant.Close() + returnedKeys[i] = ant.cfg.PrivateKey + port := ant.cfg.Port + + if err := ant.Close(); err != nil { + logger.Warn("error closing ant", err) + } + q.ants = append(q.ants[:index], q.ants[index+1:]...) q.freePort(port) } @@ -305,7 +313,7 @@ func (q *Queen) routine(ctx context.Context) { for _, key := range privKeys { port, err := q.takeAvailablePort() if err != nil { - logger.Error("trying to spawn new ant: ") + logger.Error("trying to spawn new ant: ", err) continue } From a66c4ee6f92610a3961ff9fa4a1be3a41d01f550 Mon Sep 17 00:00:00 2001 From: Dennis Trautwein Date: Tue, 3 Dec 2024 15:17:17 +0100 Subject: [PATCH 17/23] add prometheus operational metrics --- ant.go | 1 + cmd/ants/main.go | 38 ++++++++++++- db/client.go | 21 +++++-- .../000001_create_requests_table.up.sql | 9 ++- db/models.go | 1 - metrics/telemetry.go | 56 +++++++++++++------ queen.go | 28 ++++++++-- 7 files changed, 121 insertions(+), 33 deletions(-) diff --git a/ant.go b/ant.go index e0b04e6..d859ed4 100644 --- a/ant.go +++ b/ant.go @@ -88,6 +88,7 @@ func SpawnAnt(ctx context.Context, ps peerstore.Peerstore, ds ds.Batching, cfg * libp2p.Peerstore(ps), libp2p.DisableRelay(), libp2p.ListenAddrStrings(listenAddrs...), + libp2p.DisableMetrics(), } if cfg.Port == 0 { diff --git a/cmd/ants/main.go b/cmd/ants/main.go index 0afe484..665824c 100644 --- a/cmd/ants/main.go +++ b/cmd/ants/main.go @@ -9,14 +9,19 @@ import ( "time" logging "github.com/ipfs/go-log/v2" + "github.com/urfave/cli/v2" + "go.opentelemetry.io/otel/trace/noop" + "github.com/probe-lab/ants-watch" "github.com/probe-lab/ants-watch/db" - "github.com/urfave/cli/v2" + "github.com/probe-lab/ants-watch/metrics" ) var logger = logging.Logger("ants-queen") var rootConfig = struct { + MetricsHost string + MetricsPort int ClickhouseAddress string ClickhouseDatabase string ClickhouseUsername string @@ -36,6 +41,8 @@ var rootConfig = struct { ProtocolPrefix string QueenID string }{ + MetricsHost: "127.0.0.1", + MetricsPort: 5999, // one below the FirstPort to not accidentally override it ClickhouseAddress: "", ClickhouseDatabase: "", ClickhouseUsername: "", @@ -68,6 +75,20 @@ func main() { Name: "queen", Usage: "Starts the queen service", Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "metrics.host", + Usage: "On which host to expose the metrics", + EnvVars: []string{"ANTS_METRICS_HOST"}, + Destination: &rootConfig.MetricsHost, + Value: rootConfig.MetricsHost, + }, + &cli.IntFlag{ + Name: "metrics.port", + Usage: "On which port to expose the metrics", + EnvVars: []string{"ANTS_METRICS_PORT"}, + Destination: &rootConfig.MetricsPort, + Value: rootConfig.MetricsPort, + }, &cli.StringFlag{ Name: "clickhouse.address", Usage: "ClickHouse address containing the host and port, 127.0.0.1:9000", @@ -216,6 +237,19 @@ func main() { func runQueenCommand(c *cli.Context) error { ctx := c.Context + meterProvider, err := metrics.NewMeterProvider() + if err != nil { + return fmt.Errorf("init meter provider: %w", err) + } + + telemetry, err := metrics.NewTelemetry(noop.NewTracerProvider(), meterProvider) + if err != nil { + return fmt.Errorf("init telemetry: %w", err) + } + + logger.Debugln("Starting metrics server", "host", rootConfig.MetricsHost, "port", rootConfig.MetricsPort) + go metrics.ListenAndServe(rootConfig.MetricsHost, rootConfig.MetricsPort) + // initializing a new clickhouse client client, err := db.NewClient( rootConfig.ClickhouseAddress, @@ -223,6 +257,7 @@ func runQueenCommand(c *cli.Context) error { rootConfig.ClickhouseUsername, rootConfig.ClickhousePassword, rootConfig.ClickhouseSSL, + telemetry, ) if err != nil { return fmt.Errorf("init database client: %w", err) @@ -247,6 +282,7 @@ func runQueenCommand(c *cli.Context) error { NebulaDBConnString: rootConfig.NebulaDBConnString, BucketSize: rootConfig.BucketSize, UserAgent: rootConfig.UserAgent, + Telemetry: telemetry, } // initializing queen diff --git a/db/client.go b/db/client.go index edc3eac..4edf17e 100644 --- a/db/client.go +++ b/db/client.go @@ -5,6 +5,12 @@ import ( "crypto/tls" "fmt" "net" + "strconv" + "time" + + "github.com/probe-lab/ants-watch/metrics" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" "github.com/ClickHouse/clickhouse-go/v2" "github.com/ClickHouse/clickhouse-go/v2/lib/driver" @@ -21,11 +27,12 @@ type Client interface { type ClickhouseClient struct { driver.Conn + telemetry *metrics.Telemetry } var _ Client = (*ClickhouseClient)(nil) -func NewClient(address, database, username, password string, ssl bool) (*ClickhouseClient, error) { +func NewClient(address, database, username, password string, ssl bool, telemetry *metrics.Telemetry) (*ClickhouseClient, error) { logger.Infoln("Creating new clickhouse client...") conn, err := clickhouse.Open(&clickhouse.Options{ @@ -51,13 +58,20 @@ func NewClient(address, database, username, password string, ssl bool) (*Clickho } client := &ClickhouseClient{ - Conn: conn, + Conn: conn, + telemetry: telemetry, } return client, nil } -func (c *ClickhouseClient) BulkInsertRequests(ctx context.Context, requests []*Request) error { +func (c *ClickhouseClient) BulkInsertRequests(ctx context.Context, requests []*Request) (err error) { + start := time.Now() + defer func() { + c.telemetry.BulkInsertCounter.Add(ctx, 1, metric.WithAttributes(attribute.String("success", strconv.FormatBool(err == nil)))) + c.telemetry.BulkInsertSizeHist.Record(ctx, int64(len(requests))) + c.telemetry.BulkInsertLatencyMsHist.Record(ctx, time.Since(start).Milliseconds()) + }() batch, err := c.Conn.PrepareBatch(ctx, "INSERT INTO requests", driver.WithReleaseConnection()) if err != nil { return fmt.Errorf("prepare batch: %w", err) @@ -75,7 +89,6 @@ func (c *ClickhouseClient) BulkInsertRequests(ctx context.Context, requests []*R r.Type, r.KeyID, r.MultiAddresses, - r.IsSelfLookup, ) if err != nil { return fmt.Errorf("append request to batch: %w", err) diff --git a/db/migrations/000001_create_requests_table.up.sql b/db/migrations/000001_create_requests_table.up.sql index 8743a4a..f12da9d 100644 --- a/db/migrations/000001_create_requests_table.up.sql +++ b/db/migrations/000001_create_requests_table.up.sql @@ -6,11 +6,10 @@ CREATE TABLE requests remote_multihash String, agent_version String, protocols Array(String), - started_at DateTime, + started_at DateTime64(3), request_type String, key_multihash String, - multi_addresses Array(String), - is_self_lookup bool -) ENGINE = ReplicatedMergeTree + multi_addresses Array(String) +) ENGINE = MergeTree PRIMARY KEY (started_at) -TTL started_at + INTERVAL 1 DAY; +TTL toDateTime(started_at) + INTERVAL 1 DAY; diff --git a/db/models.go b/db/models.go index c18042a..513b2fc 100644 --- a/db/models.go +++ b/db/models.go @@ -19,5 +19,4 @@ type Request struct { StartedAt time.Time KeyID string MultiAddresses []string - IsSelfLookup bool } diff --git a/metrics/telemetry.go b/metrics/telemetry.go index 2e1ad38..9353b6d 100644 --- a/metrics/telemetry.go +++ b/metrics/telemetry.go @@ -8,10 +8,9 @@ import ( "fmt" "net/http" _ "net/http/pprof" - "runtime" + "github.com/ipfs/go-log/v2" "github.com/prometheus/client_golang/prometheus/promhttp" - log "github.com/sirupsen/logrus" "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc" "go.opentelemetry.io/otel/exporters/prometheus" "go.opentelemetry.io/otel/metric" @@ -24,34 +23,57 @@ import ( "go.uber.org/atomic" ) +var logger = log.Logger("telemetry") + const ( MeterName = "github.com/probe-lab/ants-watch" TracerName = "github.com/probe-lab/ants-watch" ) type Telemetry struct { - Tracer trace.Tracer - CacheQueriesCount metric.Int64Counter - InsertRequestHistogram metric.Int64Histogram + Tracer trace.Tracer + AntsCountGauge metric.Int64Gauge + BulkInsertCounter metric.Int64Counter + BulkInsertSizeHist metric.Int64Histogram + BulkInsertLatencyMsHist metric.Int64Histogram + CacheHitCounter metric.Int64Counter } func NewTelemetry(tp trace.TracerProvider, mp metric.MeterProvider) (*Telemetry, error) { meter := mp.Meter(MeterName) - cacheQueriesCount, err := meter.Int64Counter("cache_queries", metric.WithDescription("Number of queries to the LRU caches")) + antsCountGauge, err := meter.Int64Gauge("ants_count", metric.WithDescription("Number of running ants")) + if err != nil { + return nil, fmt.Errorf("ants_count gauge: %w", err) + } + + bulkInsertCounter, err := meter.Int64Counter("bulk_insert_count", metric.WithDescription("Number of bulk inserts")) if err != nil { - return nil, fmt.Errorf("cache_queries counter: %w", err) + return nil, fmt.Errorf("bulk_insert_count gauge: %w", err) } - insertRequestHistogram, err := meter.Int64Histogram("insert_request_timing", metric.WithDescription("Histogram of database query times for request insertions"), metric.WithUnit("milliseconds")) + bulkInsertSizeHist, err := meter.Int64Histogram("bulk_insert_size", metric.WithDescription("Size of bulk inserts"), metric.WithExplicitBucketBoundaries(0, 10, 50, 100, 500, 1000)) if err != nil { - return nil, fmt.Errorf("cache_queries counter: %w", err) + return nil, fmt.Errorf("bulk_insert_size histogram: %w", err) + } + + bulkInsertLatencyMsHist, err := meter.Int64Histogram("bulk_insert_latency", metric.WithDescription("Latency of bulk inserts (ms)"), metric.WithUnit("ms")) + if err != nil { + return nil, fmt.Errorf("bulk_insert_latency histogram: %w", err) + } + + cacheHitCounter, err := meter.Int64Counter("cache_hit_count", metric.WithDescription("Number of cache hits")) + if err != nil { + return nil, fmt.Errorf("cache_hit_counter gauge: %w", err) } return &Telemetry{ - Tracer: tp.Tracer(TracerName), - CacheQueriesCount: cacheQueriesCount, - InsertRequestHistogram: insertRequestHistogram, + Tracer: tp.Tracer(TracerName), + BulkInsertCounter: bulkInsertCounter, + BulkInsertSizeHist: bulkInsertSizeHist, + BulkInsertLatencyMsHist: bulkInsertLatencyMsHist, + CacheHitCounter: cacheHitCounter, + AntsCountGauge: antsCountGauge, }, nil } @@ -103,14 +125,11 @@ func NewTracerProvider(ctx context.Context, host string, port int) (trace.Tracer // `ants health`. func ListenAndServe(host string, port int) { addr := fmt.Sprintf("%s:%d", host, port) - log.WithField("addr", addr).Debugln("Starting telemetry endpoint") - - // profile 1% of contention events - runtime.SetMutexProfileFraction(1) + logger.Debugln("Starting telemetry endpoint", "addr", addr) http.Handle("/metrics", promhttp.Handler()) http.HandleFunc("/health", func(rw http.ResponseWriter, req *http.Request) { - log.Debugln("Responding to health check") + logger.Debugln("Responding to health check") if HealthStatus.Load() { rw.WriteHeader(http.StatusOK) } else { @@ -118,7 +137,8 @@ func ListenAndServe(host string, port int) { } }) + logger.Info("Starting prometheus server", "addr", addr) if err := http.ListenAndServe(addr, nil); err != nil { - log.WithError(err).Warnln("Error serving prometheus") + logger.Warnln("Error serving prometheus", "err", err) } } diff --git a/queen.go b/queen.go index d789b78..e8f5566 100644 --- a/queen.go +++ b/queen.go @@ -3,6 +3,7 @@ package ants import ( "context" "fmt" + "strconv" "time" "github.com/google/uuid" @@ -11,13 +12,16 @@ import ( leveldb "github.com/ipfs/go-ds-leveldb" "github.com/ipfs/go-log/v2" "github.com/libp2p/go-libp2p-kad-dht/ants" - pb "github.com/libp2p/go-libp2p-kad-dht/pb" "github.com/libp2p/go-libp2p/core/crypto" "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/peerstore" "github.com/libp2p/go-libp2p/core/protocol" "github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoremem" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "github.com/probe-lab/ants-watch/db" + "github.com/probe-lab/ants-watch/metrics" "github.com/probe-lab/go-libdht/kad" "github.com/probe-lab/go-libdht/kad/key" "github.com/probe-lab/go-libdht/kad/key/bit256" @@ -39,6 +43,7 @@ type QueenConfig struct { NebulaDBConnString string BucketSize int UserAgent string + Telemetry *metrics.Telemetry } type Queen struct { @@ -182,7 +187,15 @@ func (q *Queen) consumeAntsEvents(ctx context.Context) { // cache agent version if evt.AgentVersion == "" { - evt.AgentVersion, _ = q.agentsCache.Get(evt.Remote.String()) + var found bool + evt.AgentVersion, found = q.agentsCache.Get(evt.Remote.String()) + q.cfg.Telemetry.CacheHitCounter.Add(ctx, 1, metric.WithAttributes( + attribute.String("hit", strconv.FormatBool(found)), + attribute.String("cache", "agent_version"), + )) + if found { + continue + } } else { q.agentsCache.Add(evt.Remote.String(), evt.AgentVersion) } @@ -190,7 +203,12 @@ func (q *Queen) consumeAntsEvents(ctx context.Context) { // cache protocols var protocols []protocol.ID if len(evt.Protocols) == 0 { - protocols, _ = q.protocolsCache.Get(evt.Remote.String()) + var found bool + protocols, found = q.protocolsCache.Get(evt.Remote.String()) + q.cfg.Telemetry.CacheHitCounter.Add(ctx, 1, metric.WithAttributes( + attribute.String("hit", strconv.FormatBool(found)), + attribute.String("cache", "protocols"), + )) } else { protocols = evt.Protocols q.protocolsCache.Add(evt.Remote.String(), evt.Protocols) @@ -214,12 +232,12 @@ func (q *Queen) consumeAntsEvents(ctx context.Context) { StartedAt: evt.Timestamp, KeyID: evt.Target.B58String(), MultiAddresses: maddrStrs, - IsSelfLookup: peer.ID(evt.Target) == evt.Remote && evt.Type == pb.Message_FIND_NODE, } requests = append(requests, request) if len(requests) >= q.cfg.BatchSize { + if err = q.clickhouseClient.BulkInsertRequests(ctx, requests); err != nil { logger.Errorf("Error inserting requests: %v", err) } @@ -335,6 +353,8 @@ func (q *Queen) routine(ctx context.Context) { q.ants = append(q.ants, ant) } + q.cfg.Telemetry.AntsCountGauge.Record(ctx, int64(len(q.ants))) + logger.Debugf("ants count: %d", len(q.ants)) logger.Debug("queen routine over") } From 598870868f72d18d996ab6b7e57b8830037b5cec Mon Sep 17 00:00:00 2001 From: Dennis Trautwein Date: Tue, 3 Dec 2024 15:46:55 +0100 Subject: [PATCH 18/23] add: GitHub actions workflows --- .github/workflows/build_push.yml | 65 +++++++++++++++++++++ .github/workflows/ci.yml | 51 ---------------- .github/workflows/deploy.yml | 93 ++++++++++++++++++++++++++++++ .github/workflows/pull_request.yml | 12 ++++ .github/workflows/push_dev.yml | 27 +++++++++ .github/workflows/push_main.yml | 31 ++++++++++ .github/workflows/test.yml | 29 ++++++++++ cmd/ants/main.go | 16 ++--- 8 files changed, 265 insertions(+), 59 deletions(-) create mode 100644 .github/workflows/build_push.yml delete mode 100644 .github/workflows/ci.yml create mode 100644 .github/workflows/deploy.yml create mode 100644 .github/workflows/pull_request.yml create mode 100644 .github/workflows/push_dev.yml create mode 100644 .github/workflows/push_main.yml create mode 100644 .github/workflows/test.yml diff --git a/.github/workflows/build_push.yml b/.github/workflows/build_push.yml new file mode 100644 index 0000000..982477a --- /dev/null +++ b/.github/workflows/build_push.yml @@ -0,0 +1,65 @@ +name: Test, Build & Push (dev) + +on: + workflow_call: + outputs: + image_tag: + description: "The image tag that was pushed to ECR" + value: ${{ jobs.push_to_ecr.outputs.image_tag }} + +# TODO: move to workflow inputs: +env: + AWS_REGION: us-east-1 + AWS_ROLE: arn:aws:iam::019120760881:role/prod-use1-github-oidc-role + AWS_ECR_REPOSITORY: probelab + +jobs: + push_to_ecr: + name: Build & Push + runs-on: ubuntu-latest + outputs: + image_tag: ${{ steps.meta.outputs.image }} + steps: + - name: Checking out the Repository + uses: actions/checkout@v4 + + - name: Configuring AWS credentials + uses: aws-actions/configure-aws-credentials@v4 + with: + aws-region: ${{ env.AWS_REGION }} + role-to-assume: ${{ env.AWS_ROLE }} + role-session-name: PushToECR + + - name: Logging in to Amazon ECR + id: login-ecr + uses: aws-actions/amazon-ecr-login@v2 + + - name: Building ants Image Metadata + id: meta + run: | + SHA_SHORT=${{ github.sha }} + SHA_SHORT=${SHA_SHORT::7} + + NAMESPACE=${{ steps.login-ecr.outputs.registry }} + TAG="ants-sha${SHA_SHORT}" + + IMAGE="$NAMESPACE/$AWS_ECR_REPOSITORY:$TAG" + + echo "tag=$TAG" >> $GITHUB_OUTPUT + echo "image=$IMAGE" >> $GITHUB_OUTPUT + + - name: Checking if Image exists in ECR + id: check-ecr + run: | + aws ecr describe-images --repository-name $AWS_ECR_REPOSITORY --image-ids imageTag=${{ steps.meta.outputs.tag }} || exit_code=$? + echo "exit_code=$exit_code" >> $GITHUB_OUTPUT + + - name: Building Docker Image ${{ steps.meta.outputs.tag }} + id: build + if: steps.check-ecr.outputs.exit_code != 0 + run: docker build -t ${{ steps.meta.outputs.image }} . + + - name: Pushing Docker Image ${{ steps.meta.outputs.tag }} to Amazon ECR + id: push + if: steps.check-ecr.outputs.exit_code != 0 + run: docker push ${{ steps.meta.outputs.image }} diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml deleted file mode 100644 index adc3e5e..0000000 --- a/.github/workflows/ci.yml +++ /dev/null @@ -1,51 +0,0 @@ -name: continuous integration - -on: - push: - branches: [main] - pull_request: - -jobs: - build: - runs-on: ubuntu-latest - - strategy: - matrix: - go-version: [1.21, 1.22] - - steps: - - name: Checkout repository - uses: actions/checkout@v3 - with: - submodules: true - - - name: Set up Go - uses: actions/setup-go@v4 - with: - go-version: ${{ matrix.go-version }} - - - name: Cache Go modules - uses: actions/cache@v3 - with: - path: | - ~/go/pkg/mod - ~/.cache/go-build - key: ${{ runner.os }}-go-${{ matrix.go-version }}-${{ hashFiles('**/go.sum') }} - restore-keys: | - ${{ runner.os }}-go-${{ matrix.go-version }}- - - - name: Install dependencies - run: go mod download - - - name: Check Go fmt - run: | - if ! go fmt ./...; then - echo "Go fmt check failed" - exit 1 - fi - - - name: Run Go vet - run: go vet ./... - - - name: Run tests - run: go test ./... -v -cover diff --git a/.github/workflows/deploy.yml b/.github/workflows/deploy.yml new file mode 100644 index 0000000..02d5ad1 --- /dev/null +++ b/.github/workflows/deploy.yml @@ -0,0 +1,93 @@ +name: Deployment (prod) + +on: + workflow_call: + secrets: + slack_webhook_url: + required: true + +# TODO: move to workflow inputs: +env: + AWS_REGION: us-east-1 + AWS_ROLE: arn:aws:iam::019120760881:role/prod-use1-github-oidc-role + AWS_ECS_TASK_DEFINITION: prod-use1-cmi-ants-celestia-watch-task + AWS_ECS_SERVICE: prod-use1-cmi-ants-celestia-watch + AWS_ECS_CLUSTER_NAME: default + AWS_ECR_REPOSITORY: probelab + +jobs: + deploy: + name: Deploy + runs-on: ubuntu-latest + needs: build_push + steps: + - name: Configuring AWS credentials + uses: aws-actions/configure-aws-credentials@v4 + with: + aws-region: ${{ env.AWS_REGION }} + role-to-assume: ${{ env.AWS_ROLE }} + role-session-name: deploy-ants + + - name: Downloading latest Amazon ECS task definition + id: download + run: | + aws ecs describe-task-definition \ + --task-definition ${{ env.AWS_ECS_TASK_DEFINITION }} \ + --query taskDefinition > task-definition.json + + # Extract downloaded task definition revision + REVISION=$(cat task-definition.json | jq -r '.revision') + + # Store task definition revision + echo "task_definition=${{ env.AWS_ECS_TASK_DEFINITION }}:$REVISION" >> $GITHUB_OUTPUT + + # https://github.com/aws-actions/amazon-ecs-deploy-task-definition/issues/176 + # This isn't critical but just avoids some warning messages in the next step + - name: Removing invalid task definition fields + run: | + cat task-definition.json | jq -r 'del( + .taskDefinitionArn, + .requiresAttributes, + .compatibilities, + .revision, + .status, + .registeredAt, + .registeredBy + )' > task-definition-cleaned.json + + - name: Updating image tag of task definition ${{ steps.download.outputs.task_definition }} + id: task-def + uses: aws-actions/amazon-ecs-render-task-definition@v1 + with: + task-definition: task-definition-cleaned.json + container-name: prod-use1-cmi-ants-celestia-watch + image: ${{ needs.build_push.outputs.image_tag }} + + - name: Deploying to Amazon ECS + uses: aws-actions/amazon-ecs-deploy-task-definition@v2 + with: + task-definition: ${{ steps.task-def.outputs.task-definition }} + service: ${{ env.AWS_ECS_SERVICE }} + cluster: ${{ env.AWS_ECS_CLUSTER_NAME }} + wait-for-service-stability: true + wait-for-minutes: 15 # default is 30 + propagate-tags: SERVICE + enable-ecs-managed-tags: true + + - name: Publishing Success Notification to Slack + if: success() + uses: slackapi/slack-github-action@v2.0.0 + with: + webhook: ${{ secrets.slack_webhook_url }} + webhook-type: incoming-webhook + payload: | + text: "✅ Successfully deployed task definition ${{ steps.download.outputs.task_definition }}. " + + - name: Publishing Error Notification to Slack + if: failure() + uses: slackapi/slack-github-action@v2.0.0 + with: + webhook: ${{ secrets.slack_webhook_url }} + webhook-type: incoming-webhook + payload: | + text: "🚨 Deployment of task definition ${{ steps.download.outputs.task_definition }} failed. <${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}|View Run>" diff --git a/.github/workflows/pull_request.yml b/.github/workflows/pull_request.yml new file mode 100644 index 0000000..ca061c7 --- /dev/null +++ b/.github/workflows/pull_request.yml @@ -0,0 +1,12 @@ +name: Test, Build & Push (dev) + +on: + pull_request: + +jobs: + test: + name: Ants + uses: ./.github/workflows/test.yml + permissions: + id-token: write + contents: read \ No newline at end of file diff --git a/.github/workflows/push_dev.yml b/.github/workflows/push_dev.yml new file mode 100644 index 0000000..a99bc88 --- /dev/null +++ b/.github/workflows/push_dev.yml @@ -0,0 +1,27 @@ +name: Test, Build & Push (dev) + +on: + workflow_dispatch: + push: + branches: + - dev + +env: + AWS_REGION: us-east-1 + AWS_ROLE: arn:aws:iam::019120760881:role/prod-use1-github-oidc-role + AWS_ECR_REPOSITORY: probelab + +jobs: + test: + name: Ants + uses: ./.github/workflows/test.yml + permissions: + id-token: write + contents: read + build_push: + name: Ants + uses: ./.github/workflows/build_push.yml + needs: test + permissions: + id-token: write + contents: read \ No newline at end of file diff --git a/.github/workflows/push_main.yml b/.github/workflows/push_main.yml new file mode 100644 index 0000000..0bbd5cd --- /dev/null +++ b/.github/workflows/push_main.yml @@ -0,0 +1,31 @@ +name: Deployment (prod) + +on: + workflow_dispatch: + +jobs: + test: + name: Ants + uses: ./.github/workflows/test.yml + permissions: + id-token: write + contents: read + + build_push: + name: Ants + uses: ./.github/workflows/build_push.yml + needs: test + permissions: + id-token: write + contents: read + + + deploy: + name: Ants + uses: ./.github/workflows/deploy.yml + needs: build_push + permissions: + id-token: write + contents: read + secrets: + slack_webhook_url: ${{ secrets.SLACK_WEBHOOK_URL }} diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml new file mode 100644 index 0000000..1b11c63 --- /dev/null +++ b/.github/workflows/test.yml @@ -0,0 +1,29 @@ +name: Test + +on: + workflow_call: +jobs: + test: + name: Test + runs-on: ubuntu-latest + steps: + - name: Checking out repository code + uses: actions/checkout@v4 + + - name: Setting up Golang + uses: actions/setup-go@v5 + with: + go-version-file: go.mod + + - name: Checking Go fmt + run: | + if ! go fmt ./...; then + echo "Go fmt check failed" + exit 1 + fi + + - name: Running vet + run: go vet ./... + + - name: Running Tests + run: go test ./... diff --git a/cmd/ants/main.go b/cmd/ants/main.go index 665824c..779ae61 100644 --- a/cmd/ants/main.go +++ b/cmd/ants/main.go @@ -54,7 +54,7 @@ var rootConfig = struct { FirstPort: 6000, UPnp: false, BatchSize: 1000, - BatchTime: time.Second, + BatchTime: 20 * time.Second, CrawlInterval: 120 * time.Minute, CacheSize: 10_000, BucketSize: 20, @@ -166,13 +166,6 @@ func main() { Destination: &rootConfig.KeyDBPath, Value: rootConfig.KeyDBPath, }, - &cli.IntFlag{ - Name: "num_ports", - Usage: "Number of ports ants can listen on", - EnvVars: []string{"ANTS_NUM_PORTS"}, - Destination: &rootConfig.NumPorts, - Value: rootConfig.NumPorts, - }, &cli.IntFlag{ Name: "first_port", Usage: "First port ants can listen on", @@ -180,6 +173,13 @@ func main() { Destination: &rootConfig.FirstPort, Value: rootConfig.FirstPort, }, + &cli.IntFlag{ + Name: "num_ports", + Usage: "Number of ports ants can listen on", + EnvVars: []string{"ANTS_NUM_PORTS"}, + Destination: &rootConfig.NumPorts, + Value: rootConfig.NumPorts, + }, &cli.BoolFlag{ Name: "upnp", Usage: "Enable UPnP", From 39a1a3822411af3650ff6f6c54245b7164975b7d Mon Sep 17 00:00:00 2001 From: Dennis Trautwein Date: Tue, 3 Dec 2024 15:55:39 +0100 Subject: [PATCH 19/23] fix: dockerfile for new entry point --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 67cc327..f63322c 100644 --- a/Dockerfile +++ b/Dockerfile @@ -15,7 +15,7 @@ RUN go mod download COPY . ./ RUN --mount=type=cache,target=/root/.cache/go-build \ - CGO_ENABLED=1 GOOS=linux go build -ldflags "-X main.RawVersion='${RAW_VERSION}'" -o ants github.com/probe-lab/ants-watch/cmd/honeypot + CGO_ENABLED=1 GOOS=linux go build -ldflags "-X main.RawVersion='${RAW_VERSION}'" -o ants github.com/probe-lab/ants-watch/cmd/ants FROM alpine:3.18 From 122272701e3750edcba6c161e8de7373902952c3 Mon Sep 17 00:00:00 2001 From: Dennis Trautwein Date: Tue, 3 Dec 2024 16:04:33 +0100 Subject: [PATCH 20/23] increase ttl and change merge tree engine to clustered --- db/migrations/000001_create_requests_table.up.sql | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/db/migrations/000001_create_requests_table.up.sql b/db/migrations/000001_create_requests_table.up.sql index f12da9d..6f96178 100644 --- a/db/migrations/000001_create_requests_table.up.sql +++ b/db/migrations/000001_create_requests_table.up.sql @@ -10,6 +10,6 @@ CREATE TABLE requests request_type String, key_multihash String, multi_addresses Array(String) -) ENGINE = MergeTree +) ENGINE = ReplicatedMergeTree PRIMARY KEY (started_at) -TTL toDateTime(started_at) + INTERVAL 1 DAY; +TTL toDateTime(started_at) + INTERVAL 180 DAY; From 74babba3ca724fd2a5390eb8e04398b734628575 Mon Sep 17 00:00:00 2001 From: Dennis Trautwein Date: Tue, 3 Dec 2024 16:16:54 +0100 Subject: [PATCH 21/23] fix: health check --- cmd/ants/health.go | 14 +++-- cmd/ants/main.go | 130 +++++++++++++++++++++++++-------------------- 2 files changed, 83 insertions(+), 61 deletions(-) diff --git a/cmd/ants/health.go b/cmd/ants/health.go index c475e7d..be3250f 100644 --- a/cmd/ants/health.go +++ b/cmd/ants/health.go @@ -3,16 +3,22 @@ package main import ( "fmt" "net/http" - "os" "github.com/urfave/cli/v2" ) +var healthConfig = struct { + MetricsHost string + MetricsPort int +}{ + MetricsHost: "127.0.0.1", + MetricsPort: 5999, // one below the FirstPort to not accidentally override it +} + func HealthCheck(c *cli.Context) error { endpoint := fmt.Sprintf( - "http://%s:%s/health", - os.Getenv("METRICS_HOST"), - os.Getenv("METRICS_PORT"), + "http://%s:%d/metrics", + healthConfig.MetricsHost, healthConfig.MetricsPort, ) req, err := http.NewRequestWithContext(c.Context, http.MethodGet, endpoint, nil) if err != nil { diff --git a/cmd/ants/main.go b/cmd/ants/main.go index 779ae61..e0d912d 100644 --- a/cmd/ants/main.go +++ b/cmd/ants/main.go @@ -19,7 +19,7 @@ import ( var logger = logging.Logger("ants-queen") -var rootConfig = struct { +var queenConfig = struct { MetricsHost string MetricsPort int ClickhouseAddress string @@ -79,134 +79,134 @@ func main() { Name: "metrics.host", Usage: "On which host to expose the metrics", EnvVars: []string{"ANTS_METRICS_HOST"}, - Destination: &rootConfig.MetricsHost, - Value: rootConfig.MetricsHost, + Destination: &queenConfig.MetricsHost, + Value: queenConfig.MetricsHost, }, &cli.IntFlag{ Name: "metrics.port", Usage: "On which port to expose the metrics", EnvVars: []string{"ANTS_METRICS_PORT"}, - Destination: &rootConfig.MetricsPort, - Value: rootConfig.MetricsPort, + Destination: &queenConfig.MetricsPort, + Value: queenConfig.MetricsPort, }, &cli.StringFlag{ Name: "clickhouse.address", Usage: "ClickHouse address containing the host and port, 127.0.0.1:9000", EnvVars: []string{"ANTS_CLICKHOUSE_ADDRESS"}, - Destination: &rootConfig.ClickhouseAddress, - Value: rootConfig.ClickhouseAddress, + Destination: &queenConfig.ClickhouseAddress, + Value: queenConfig.ClickhouseAddress, }, &cli.StringFlag{ Name: "clickhouse.database", Usage: "The ClickHouse database where ants requests will be recorded", EnvVars: []string{"ANTS_CLICKHOUSE_DATABASE"}, - Destination: &rootConfig.ClickhouseDatabase, - Value: rootConfig.ClickhouseDatabase, + Destination: &queenConfig.ClickhouseDatabase, + Value: queenConfig.ClickhouseDatabase, }, &cli.StringFlag{ Name: "clickhouse.username", Usage: "The ClickHouse user that has the prerequisite privileges to record the requests", EnvVars: []string{"ANTS_CLICKHOUSE_USERNAME"}, - Destination: &rootConfig.ClickhouseUsername, - Value: rootConfig.ClickhouseUsername, + Destination: &queenConfig.ClickhouseUsername, + Value: queenConfig.ClickhouseUsername, }, &cli.StringFlag{ Name: "clickhouse.password", Usage: "The password for the ClickHouse user", EnvVars: []string{"ANTS_CLICKHOUSE_PASSWORD"}, - Destination: &rootConfig.ClickhousePassword, - Value: rootConfig.ClickhousePassword, + Destination: &queenConfig.ClickhousePassword, + Value: queenConfig.ClickhousePassword, }, &cli.BoolFlag{ Name: "clickhouse.ssl", Usage: "Whether to use SSL for the ClickHouse connection", EnvVars: []string{"ANTS_CLICKHOUSE_SSL"}, - Destination: &rootConfig.ClickhouseSSL, - Value: rootConfig.ClickhouseSSL, + Destination: &queenConfig.ClickhouseSSL, + Value: queenConfig.ClickhouseSSL, }, &cli.StringFlag{ Name: "nebula.connstring", Usage: "The connection string for the Postgres Nebula database", EnvVars: []string{"ANTS_NEBULA_CONNSTRING"}, - Destination: &rootConfig.NebulaDBConnString, - Value: rootConfig.NebulaDBConnString, + Destination: &queenConfig.NebulaDBConnString, + Value: queenConfig.NebulaDBConnString, }, &cli.IntFlag{ Name: "batch.size", Usage: "The number of ants to request to store at a time", EnvVars: []string{"ANTS_BATCH_SIZE"}, - Destination: &rootConfig.BatchSize, - Value: rootConfig.BatchSize, + Destination: &queenConfig.BatchSize, + Value: queenConfig.BatchSize, }, &cli.DurationFlag{ Name: "batch.time", Usage: "The time to wait between batches", EnvVars: []string{"ANTS_BATCH_TIME"}, - Destination: &rootConfig.BatchTime, - Value: rootConfig.BatchTime, + Destination: &queenConfig.BatchTime, + Value: queenConfig.BatchTime, }, &cli.DurationFlag{ Name: "crawl.interval", Usage: "The time between two crawls", EnvVars: []string{"ANTS_CRAWL_INTERVAL"}, - Destination: &rootConfig.CrawlInterval, - Value: rootConfig.CrawlInterval, + Destination: &queenConfig.CrawlInterval, + Value: queenConfig.CrawlInterval, }, &cli.IntFlag{ Name: "cache.size", Usage: "How many agent versions and protocols should be cached in memory", EnvVars: []string{"ANTS_CACHE_SIZE"}, - Destination: &rootConfig.CacheSize, - Value: rootConfig.CacheSize, + Destination: &queenConfig.CacheSize, + Value: queenConfig.CacheSize, }, &cli.PathFlag{ Name: "key.path", Usage: "The path to the data store containing the keys", EnvVars: []string{"ANTS_KEY_PATH"}, - Destination: &rootConfig.KeyDBPath, - Value: rootConfig.KeyDBPath, + Destination: &queenConfig.KeyDBPath, + Value: queenConfig.KeyDBPath, }, &cli.IntFlag{ Name: "first_port", Usage: "First port ants can listen on", EnvVars: []string{"ANTS_FIRST_PORT"}, - Destination: &rootConfig.FirstPort, - Value: rootConfig.FirstPort, + Destination: &queenConfig.FirstPort, + Value: queenConfig.FirstPort, }, &cli.IntFlag{ Name: "num_ports", Usage: "Number of ports ants can listen on", EnvVars: []string{"ANTS_NUM_PORTS"}, - Destination: &rootConfig.NumPorts, - Value: rootConfig.NumPorts, + Destination: &queenConfig.NumPorts, + Value: queenConfig.NumPorts, }, &cli.BoolFlag{ Name: "upnp", Usage: "Enable UPnP", EnvVars: []string{"ANTS_UPNP"}, - Destination: &rootConfig.UPnp, - Value: rootConfig.UPnp, + Destination: &queenConfig.UPnp, + Value: queenConfig.UPnp, }, &cli.IntFlag{ Name: "bucket.size", Usage: "The bucket size for the ants DHT", EnvVars: []string{"ANTS_BUCKET_SIZE"}, - Destination: &rootConfig.BucketSize, - Value: rootConfig.BucketSize, + Destination: &queenConfig.BucketSize, + Value: queenConfig.BucketSize, }, &cli.StringFlag{ Name: "user.agent", Usage: "The user agent to use for the ants hosts", EnvVars: []string{"ANTS_USER_AGENT"}, - Destination: &rootConfig.UserAgent, - Value: rootConfig.UserAgent, + Destination: &queenConfig.UserAgent, + Value: queenConfig.UserAgent, }, &cli.StringFlag{ Name: "queen.id", Usage: "The ID for the queen that's orchestrating the ants", EnvVars: []string{"ANTS_QUEEN_ID"}, - Destination: &rootConfig.QueenID, - Value: rootConfig.QueenID, + Destination: &queenConfig.QueenID, + Value: queenConfig.QueenID, DefaultText: "generated", }, }, @@ -216,6 +216,22 @@ func main() { Name: "health", Usage: "Checks the health of the service", Action: HealthCheck, + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "metrics.host", + Usage: "On which host to expose the metrics", + EnvVars: []string{"ANTS_METRICS_HOST"}, + Destination: &healthConfig.MetricsHost, + Value: healthConfig.MetricsHost, + }, + &cli.IntFlag{ + Name: "metrics.port", + Usage: "On which port to expose the metrics", + EnvVars: []string{"ANTS_METRICS_PORT"}, + Destination: &healthConfig.MetricsPort, + Value: healthConfig.MetricsPort, + }, + }, }, }, } @@ -247,16 +263,16 @@ func runQueenCommand(c *cli.Context) error { return fmt.Errorf("init telemetry: %w", err) } - logger.Debugln("Starting metrics server", "host", rootConfig.MetricsHost, "port", rootConfig.MetricsPort) - go metrics.ListenAndServe(rootConfig.MetricsHost, rootConfig.MetricsPort) + logger.Debugln("Starting metrics server", "host", queenConfig.MetricsHost, "port", queenConfig.MetricsPort) + go metrics.ListenAndServe(queenConfig.MetricsHost, queenConfig.MetricsPort) // initializing a new clickhouse client client, err := db.NewClient( - rootConfig.ClickhouseAddress, - rootConfig.ClickhouseDatabase, - rootConfig.ClickhouseUsername, - rootConfig.ClickhousePassword, - rootConfig.ClickhouseSSL, + queenConfig.ClickhouseAddress, + queenConfig.ClickhouseDatabase, + queenConfig.ClickhouseUsername, + queenConfig.ClickhousePassword, + queenConfig.ClickhouseSSL, telemetry, ) if err != nil { @@ -271,17 +287,17 @@ func runQueenCommand(c *cli.Context) error { } queenCfg := &ants.QueenConfig{ - KeysDBPath: rootConfig.KeyDBPath, - NPorts: rootConfig.NumPorts, - FirstPort: rootConfig.FirstPort, - UPnP: rootConfig.UPnp, - BatchSize: rootConfig.BatchSize, - BatchTime: rootConfig.BatchTime, - CrawlInterval: rootConfig.CrawlInterval, - CacheSize: rootConfig.CacheSize, - NebulaDBConnString: rootConfig.NebulaDBConnString, - BucketSize: rootConfig.BucketSize, - UserAgent: rootConfig.UserAgent, + KeysDBPath: queenConfig.KeyDBPath, + NPorts: queenConfig.NumPorts, + FirstPort: queenConfig.FirstPort, + UPnP: queenConfig.UPnp, + BatchSize: queenConfig.BatchSize, + BatchTime: queenConfig.BatchTime, + CrawlInterval: queenConfig.CrawlInterval, + CacheSize: queenConfig.CacheSize, + NebulaDBConnString: queenConfig.NebulaDBConnString, + BucketSize: queenConfig.BucketSize, + UserAgent: queenConfig.UserAgent, Telemetry: telemetry, } From 54cf657e12701ab6c28ef4630fcf053aa67aa621 Mon Sep 17 00:00:00 2001 From: guillaumemichel Date: Wed, 4 Dec 2024 13:08:22 +0100 Subject: [PATCH 22/23] updated submodule --- go-libp2p-kad-dht | 2 +- queen.go | 5 +---- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/go-libp2p-kad-dht b/go-libp2p-kad-dht index 628767c..5c7829f 160000 --- a/go-libp2p-kad-dht +++ b/go-libp2p-kad-dht @@ -1 +1 @@ -Subproject commit 628767c5bb901a8f6669a5773e2c1bf6fc575aa8 +Subproject commit 5c7829ffdc99a2945d6fba19250e3b3f9ea7bb46 diff --git a/queen.go b/queen.go index e8f5566..78dd832 100644 --- a/queen.go +++ b/queen.go @@ -7,7 +7,7 @@ import ( "time" "github.com/google/uuid" - "github.com/hashicorp/golang-lru/v2" + lru "github.com/hashicorp/golang-lru/v2" ds "github.com/ipfs/go-datastore" leveldb "github.com/ipfs/go-ds-leveldb" "github.com/ipfs/go-log/v2" @@ -193,9 +193,6 @@ func (q *Queen) consumeAntsEvents(ctx context.Context) { attribute.String("hit", strconv.FormatBool(found)), attribute.String("cache", "agent_version"), )) - if found { - continue - } } else { q.agentsCache.Add(evt.Remote.String(), evt.AgentVersion) } From 600d59e4983a1f73909e54425f794516f3a28a2f Mon Sep 17 00:00:00 2001 From: guillaumemichel Date: Wed, 4 Dec 2024 13:14:49 +0100 Subject: [PATCH 23/23] fixing ci --- .github/workflows/test.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 1b11c63..26e2703 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -9,6 +9,8 @@ jobs: steps: - name: Checking out repository code uses: actions/checkout@v4 + with: + submodules: true - name: Setting up Golang uses: actions/setup-go@v5 @@ -26,4 +28,4 @@ jobs: run: go vet ./... - name: Running Tests - run: go test ./... + run: go test ./ # run tests only in root directory