Skip to content
Snippets Groups Projects
Commit 2a3b220e authored by julian's avatar julian
Browse files

removed unused services, added override compose files for db configuration

parent c3f708d6
Branches
No related tags found
No related merge requests found
......@@ -9,8 +9,8 @@ MOS_HOST=mosquitto
MOS_TOPIC=ta1-cadets-e3-official
# pub
LINES_PER_SECOND=5000
BATCH_SIZE=2500
LINES_PER_SECOND=1
BATCH_SIZE=1
# query
QUERY_INTERVAL=1
......
......@@ -3,3 +3,4 @@ __pycache__
.mypy_cache
/data/
/output
/result/
\ No newline at end of file
......@@ -81,7 +81,6 @@ services:
secrets:
- postgres_db_pass
pgadmin:
image: dpage/pgadmin4
environment:
......@@ -104,69 +103,11 @@ services:
condition: service_healthy
restart: true
grafana:
image: grafana/grafana-oss
environment:
- DS_PROMETHEUS
profiles:
- measure
ports:
- 3000:3000
configs:
- source: grafana_data
target: /etc/grafana/provisioning/datasources/default.yaml
- source: grafana_dash
target: /etc/grafana/provisioning/dashboards/default.yaml
- source: grafana_dash_src
target: /etc/grafana/dashboards/9628_rev7.json
prometheus:
image: prom/prometheus
command: --config.file=/prometheus_conf --web.enable-admin-api
profiles:
- measure
configs:
- prometheus_conf
cadvisor:
image: gcr.io/cadvisor/cadvisor
privileged: true
profiles:
- measure
ports:
- 8080:8080
volumes:
- /:/rootfs:ro
- /var/run:/var/run:ro
- /sys:/sys:ro
- /var/lib/docker/:/var/lib/docker:ro
- /dev/disk/:/dev/disk:ro
devices:
- /dev/kmsg
postgres_exporter:
image: bitnami/postgres-exporter
environment:
- DATA_SOURCE_NAME=postgresql://$PGUSER:$PGPASSWORD@$PGHOST/$PGDATABASE?sslmode=disable
profiles:
- measure
depends_on:
- postgres
configs:
prometheus_conf:
file: ./prometheus.yml
postgres_conf:
file: ./postgresql.conf
mosquitto_conf:
file: ./mosquitto.conf
grafana_data:
file: ./grafana/datasource.yaml
grafana_dash:
file: ./grafana/dashboard.yaml
grafana_dash_src:
file: ./grafana/9628_rev7.json
pgadmin_server_conf:
file: ./servers.json
data.zip:
......
services:
postgres:
volumes:
- ./postgres/initdb/02-initdb_edge_id.sql:/docker-entrypoint-initdb.d/02-initdb.sql:ro
- ./postgres/initdb/03-initdb_trigger_edge_id.sql:/docker-entrypoint-initdb.d/03-initdb.sql:ro
services:
postgres:
volumes:
- ./postgres/initdb/02-initdb_edge_uuid.sql:/docker-entrypoint-initdb.d/02-initdb.sql:ro
- ./postgres/initdb/03-initdb_trigger_edge_uuid.sql:/docker-entrypoint-initdb.d/03-initdb.sql:ro
This diff is collapsed.
apiVersion: 1
providers:
- name: PostgreSQL Database
options:
path: /etc/grafana/dashboards
\ No newline at end of file
apiVersion: 1
datasources:
- name: Prometheus
type: prometheus
url: http://prometheus:9090
uid: ${DS_PROMETHEUS}
services:
postgres:
volumes:
- ./postgres/initdb/04-initdb_edge_index_hash.sql:/docker-entrypoint-initdb.d/04-initdb.sql:ro
services:
postgres:
volumes:
- ./postgres/initdb/04-initdb_edge_index_multicolumn.sql:/docker-entrypoint-initdb.d/04-initdb.sql:ro
services:
postgres:
volumes:
- ./postgres/initdb/04-initdb_edge_index.sql:/docker-entrypoint-initdb.d/04-initdb.sql:ro
global:
scrape_interval: 15s # By default, scrape targets every 15 seconds.
# Attach these labels to any time series or alerts when communicating with
# external systems (federation, remote storage, Alertmanager).
# external_labels:
# monitor: 'codelab-monitor'
# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
scrape_configs:
# The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
- job_name: 'prometheus'
# Override the global default and scrape targets from this job every 5 seconds.
scrape_interval: 5s
static_configs:
- targets: ['localhost:9090']
- job_name: 'postgres'
scrape_interval: 5s
static_configs:
- targets: ['postgres_exporter:9187']
- job_name: 'cadvisor'
scrape_interval: 5s
static_configs:
- targets: ['cadvisor:8080']
from time import sleep
from os import environ
from os import environ, makedirs, path
from subprocess import Popen, run
from typing import Sequence
def run_experiment(hours):
def run_experiment(compose_files: Sequence[str]):
print("Start compose environment...")
environ["COMPOSE_PROFILES"] = "experiment"
Popen(("docker", "compose", "down")).wait(30)
run(("docker", "compose", "up", "-d", "--build"))
environ["COMPOSE_PROFILES"] = "experiment,inspect"
Popen(("docker", "down")).wait(30)
f = ["-f"] * len(compose_files)
interleaved = (val for pair in zip(f, compose_files) for val in pair)
run(("docker", "compose", *interleaved, "up", "-d"))
sleep_time = 3600 * hours
sleep_time = 10
print(f"sleep for {sleep_time/60} minutes")
sleep(sleep_time)
run(("docker-compose", "down"))
run(("docker", "compose", "cp", "query_pg:log/*", "result/"))
run(("docker", "compose", "down"))
if __name__ == "__main__":
run_experiment(hours=2)
result_dir = "result"
if not path.exists(result_dir):
makedirs(result_dir)
run_experiment(("compose.yml", "edge-id-empty.yml", "index.yml"))
FROM python:3
WORKDIR /app
RUN pip install paho-mqtt "psycopg[binary,pool]"
COPY sub_pg_cdm.py drop_schemas.sql schema.sql procedures.sql schema_json.sql ./
COPY sub_pg_cdm.py ./
# ENV PGHOST= PGDATABASE= PGUSER= PGPASSWORD= MOS_HOST= MOS_TOPIC=
CMD ["python", "-u", "sub_pg_cdm.py"]
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment