Skip to content
Snippets Groups Projects
Commit d4f17e06 authored by julian's avatar julian
Browse files

reverted to using b-tree indices

parent 68f1d9ce
No related branches found
No related tags found
No related merge requests found
......@@ -8,8 +8,8 @@ MOS_HOST=mosquitto
MOS_TOPIC=ta1-cadets-e3-official
# pub
LINES_PER_SECOND=4500
BATCH_SIZE=1500
LINES_PER_SECOND=1500
BATCH_SIZE=500
# query
QUERY_INTERVAL=1
......
......@@ -137,7 +137,7 @@ services:
depends_on:
- postgres
volumes:
volumes: #TODO remove unnecessary volumes (probably everything except postgres-data)
postgres-data:
postgres-logs:
postgres-admin:
......
......@@ -140,7 +140,7 @@ shared_buffers = 20GB # min 128kB
# you actively intend to use prepared transactions.
work_mem = 1GB # min 64kB
#hash_mem_multiplier = 2.0 # 1-1000.0 multiplier on hash table work_mem
maintenance_work_mem = 1GB # min 1MB
maintenance_work_mem = 2GB # min 1MB
#autovacuum_work_mem = -1 # min 1MB, or -1 to use maintenance_work_mem
#logical_decoding_work_mem = 64MB # min 64kB
#max_stack_depth = 2MB # min 100kB
......@@ -240,7 +240,7 @@ wal_buffers = 1GB # min 32kB, -1 sets based on shared_buffers
# - Checkpoints -
#checkpoint_timeout = 5min # range 30s-1d
checkpoint_timeout = 15min # range 30s-1d
#checkpoint_completion_target = 0.9 # checkpoint target duration, 0.0 - 1.0
#checkpoint_flush_after = 256kB # measured in pages, 0 disables
#checkpoint_warning = 30s # 0 disables
......
......@@ -36,7 +36,7 @@ def execute_queries(cur: Cursor[Any], queries: Sequence[query], log_path: str):
def job(queries: Sequence[query], log_path: str):
with open("/app/queries.sql") as prepared:
prepared_str = prepared.read()
prepared_str = prepared.read() # TODO read file only once
with connect(cursor_factory=ClientCursor) as conn:
conn.set_read_only(True)
conn.set_isolation_level(IsolationLevel.SERIALIZABLE)
......@@ -68,7 +68,7 @@ if __name__ == "__main__":
log_name = f"{log_path}.log"
csv_name = f"{log_path}.csv"
queries = (
# ("count_nodes", None),
("count_nodes", None),
# ("get_ancestors(%s, %s)", (1, 5)),
# ("get_descendants(%s, %s)", (1, 5)),
# (
......@@ -79,8 +79,7 @@ if __name__ == "__main__":
# 1,
# ),
# ),
("two_hop_new(%s)", ("9FF334BB-9072-D756-B290-556656D73728",)),
("two_hop(%s)", ("9FF334BB-9072-D756-B290-556656D73728",)),
# ("two_hop(%s)", ("9FF334BB-9072-D756-B290-556656D73728",)),
)
csv_header = (
"Query Key",
......
......@@ -84,7 +84,7 @@ PREPARE two_hop (UUID) AS
SELECT source AS node FROM hop2
UNION ALL
SELECT destination FROM hop2
) AS all_nodes
)
WHERE node <> $1;
PREPARE two_hop_new (UUID) AS
......
......@@ -8,13 +8,11 @@ CREATE TABLE node(
CREATE TYPE EDGE_TYPE AS ENUM ('is_generated_by', 'affects', 'affects_2', 'has_parent', 'has_local_principal', 'runs_on', 'resides_on', 'has_owning_principal', 'has_account_on');
CREATE TABLE edge(
--id INTEGER GENERATED ALWAYS AS IDENTITY,
source UUID NOT NULL REFERENCES node (uuid),
destination UUID NOT NULL REFERENCES node (uuid),
source UUID NOT NULL,
destination UUID NOT NULL,
type EDGE_TYPE NOT NULL
--,PRIMARY KEY (source, destination, type)
);
--CREATE INDEX idx_node ON node using HASH (uuid);
CREATE INDEX idx_edge_source ON edge USING HASH (source);
CREATE INDEX idx_edge_dest ON edge USING HASH (destination);
--CREATE INDEX idx_node ON node USING HASH (uuid);
CREATE INDEX idx_edge_source ON edge (source);
CREATE INDEX idx_edge_dest ON edge (destination);
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment