diff --git a/.env.sample.holesky b/.env.sample.holesky index f6fdfcc..38f3b6e 100644 --- a/.env.sample.holesky +++ b/.env.sample.holesky @@ -161,14 +161,14 @@ MEV_RELAYS=https://0xab78bf8c781c58078c3beb5710c57940874dd96aef2835e7742c866b4c7 # Loki log aggregation server addresses. Disable loki log aggregation by setting an empty address. #CHARON_LOKI_ADDRESSES= -# Charon Cluster Name. Mandatory to send logs with Promtail. -#CLUSTER_NAME= +# Charon Cluster Name. Mandatory to send logs with Alloy and metrics with Prometheus. +#CLUSTER_NAME="" -# Charon Cluster Peer. Mandatory to send logs with Promtail. -#CLUSTER_PEER= +# Charon Cluster Peer. Mandatory to send logs with Alloy and metrics with Prometheus. +#CLUSTER_PEER="" # Nickname to identify this charon node on monitoring (max 32 characters). -#CHARON_NICKNAME= +#CHARON_NICKNAME="" # Docker network of running charon node. See `docker network ls`. #CHARON_DOCKER_NETWORK= @@ -196,23 +196,23 @@ MEV_RELAYS=https://0xab78bf8c781c58078c3beb5710c57940874dd96aef2835e7742c866b4c7 # Prometheus service owner used to uniquely identify user from which metrics are pushed. #SERVICE_OWNER=charon_user -# Uncomment these if you have log exporting with Promtail +# Uncomment these if you have log exporting with Alloy # and want to disable log export on a particular container. -#EL_NETHERMIND_PROMTAIL_MONITORED=false -#EL_RETH_PROMTAIL_MONITORED=false -#CL_LIGHTHOUSE_PROMTAIL_MONITORED=false -#CL_GRANDINE_PROMTAIL_MONITORED=false -#CL_TEKU_PROMTAIL_MONITORED=false -#CL_LODESTAR_PROMTAIL_MONITORED=false -#CHARON_PROMTAIL_MONITORED=false -#VC_LODESTAR_PROMTAIL_MONITORED=false -#VC_NIMBUS_PROMTAIL_MONITORED=false -#VC_PRYSM_PROMTAIL_MONITORED=false -#VC_TEKU_PROMTAIL_MONITORED=false -#MEV_MEV_BOOST_PROMTAIL_MONITORED=false -#MEV_COMMIT_BOOST_PROMTAIL_MONITORED=false -#EJECTOR_PROMTAIL_MONITORED=false -#DV_EXIT_PROMTAIL_MONITORED=false +#EL_NETHERMIND_ALLOY_MONITORED=false +#EL_RETH_ALLOY_MONITORED=false +#CL_LIGHTHOUSE_ALLOY_MONITORED=false +#CL_GRANDINE_ALLOY_MONITORED=false +#CL_TEKU_ALLOY_MONITORED=false +#CL_LODESTAR_ALLOY_MONITORED=false +#CHARON_ALLOY_MONITORED=false +#VC_LODESTAR_ALLOY_MONITORED=false +#VC_NIMBUS_ALLOY_MONITORED=false +#VC_PRYSM_ALLOY_MONITORED=false +#VC_TEKU_ALLOY_MONITORED=false +#MEV_MEV_BOOST_ALLOY_MONITORED=false +#MEV_COMMIT_BOOST_ALLOY_MONITORED=false +#EJECTOR_ALLOY_MONITORED=false +#DV_EXIT_ALLOY_MONITORED=false ######### Debug Config ######### diff --git a/.env.sample.hoodi b/.env.sample.hoodi index 7538ac6..3725b37 100644 --- a/.env.sample.hoodi +++ b/.env.sample.hoodi @@ -161,14 +161,14 @@ MEV_RELAYS=https://0x98f0ef62f00780cf8eb06701a7d22725b9437d4768bb19b363e882ae871 # Loki log aggregation server addresses. Disable loki log aggregation by setting an empty address. #CHARON_LOKI_ADDRESSES= -# Charon Cluster Name. Mandatory to send logs with Promtail. -#CLUSTER_NAME= +# Charon Cluster Name. Mandatory to send logs with Alloy and metrics with Prometheus. +#CLUSTER_NAME="" -# Charon Cluster Peer. Mandatory to send logs with Promtail. -#CLUSTER_PEER= +# Charon Cluster Peer. Mandatory to send logs with Alloy and metrics with Prometheus. +#CLUSTER_PEER="" # Nickname to identify this charon node on monitoring (max 32 characters). -#CHARON_NICKNAME= +#CHARON_NICKNAME="" # Docker network of running charon node. See `docker network ls`. #CHARON_DOCKER_NETWORK= @@ -196,23 +196,23 @@ MEV_RELAYS=https://0x98f0ef62f00780cf8eb06701a7d22725b9437d4768bb19b363e882ae871 # Prometheus service owner used to uniquely identify user from which metrics are pushed. #SERVICE_OWNER=charon_user -# Uncomment these if you have log exporting with Promtail +# Uncomment these if you have log exporting with Alloy # and want to disable log export on a particular container. -#EL_NETHERMIND_PROMTAIL_MONITORED=false -#EL_RETH_PROMTAIL_MONITORED=false -#CL_LIGHTHOUSE_PROMTAIL_MONITORED=false -#CL_GRANDINE_PROMTAIL_MONITORED=false -#CL_TEKU_PROMTAIL_MONITORED=false -#CL_LODESTAR_PROMTAIL_MONITORED=false -#CHARON_PROMTAIL_MONITORED=false -#VC_LODESTAR_PROMTAIL_MONITORED=false -#VC_NIMBUS_PROMTAIL_MONITORED=false -#VC_PRYSM_PROMTAIL_MONITORED=false -#VC_TEKU_PROMTAIL_MONITORED=false -#MEV_MEV_BOOST_PROMTAIL_MONITORED=false -#MEV_COMMIT_BOOST_PROMTAIL_MONITORED=false -#EJECTOR_PROMTAIL_MONITORED=false -#DV_EXIT_PROMTAIL_MONITORED=false +#EL_NETHERMIND_ALLOY_MONITORED=false +#EL_RETH_ALLOY_MONITORED=false +#CL_LIGHTHOUSE_ALLOY_MONITORED=false +#CL_GRANDINE_ALLOY_MONITORED=false +#CL_TEKU_ALLOY_MONITORED=false +#CL_LODESTAR_ALLOY_MONITORED=false +#CHARON_ALLOY_MONITORED=false +#VC_LODESTAR_ALLOY_MONITORED=false +#VC_NIMBUS_ALLOY_MONITORED=false +#VC_PRYSM_ALLOY_MONITORED=false +#VC_TEKU_ALLOY_MONITORED=false +#MEV_MEV_BOOST_ALLOY_MONITORED=false +#MEV_COMMIT_BOOST_ALLOY_MONITORED=false +#EJECTOR_ALLOY_MONITORED=false +#DV_EXIT_ALLOY_MONITORED=false ######### Debug Config ######### diff --git a/.env.sample.mainnet b/.env.sample.mainnet index 6b84c9f..0dcffdc 100644 --- a/.env.sample.mainnet +++ b/.env.sample.mainnet @@ -161,14 +161,14 @@ MEV_RELAYS=https://0xa15b52576bcbf1072f4a011c0f99f9fb6c66f3e1ff321f11f461d15e31b # Loki log aggregation server addresses. Disable loki log aggregation by setting an empty address. #CHARON_LOKI_ADDRESSES= -# Charon Cluster Name. Mandatory to send logs with Promtail. -#CLUSTER_NAME= +# Charon Cluster Name. Mandatory to send logs with Alloy and metrics with Prometheus. +#CLUSTER_NAME="" -# Charon Cluster Peer. Mandatory to send logs with Promtail. -#CLUSTER_PEER= +# Charon Cluster Peer. Mandatory to send logs with Alloy and metrics with Prometheus. +#CLUSTER_PEER="" # Nickname to identify this charon node on monitoring (max 32 characters). -#CHARON_NICKNAME= +#CHARON_NICKNAME="" # Docker network of running charon node. See `docker network ls`. #CHARON_DOCKER_NETWORK= @@ -196,23 +196,23 @@ MEV_RELAYS=https://0xa15b52576bcbf1072f4a011c0f99f9fb6c66f3e1ff321f11f461d15e31b # Prometheus service owner used to uniquely identify user from which metrics are pushed. #SERVICE_OWNER=charon_user -# Uncomment these if you have log exporting with Promtail +# Uncomment these if you have log exporting with Alloy # and want to disable log export on a particular container. -#EL_NETHERMIND_PROMTAIL_MONITORED=false -#EL_RETH_PROMTAIL_MONITORED=false -#CL_LIGHTHOUSE_PROMTAIL_MONITORED=false -#CL_GRANDINE_PROMTAIL_MONITORED=false -#CL_TEKU_PROMTAIL_MONITORED=false -#CL_LODESTAR_PROMTAIL_MONITORED=false -#CHARON_PROMTAIL_MONITORED=false -#VC_LODESTAR_PROMTAIL_MONITORED=false -#VC_NIMBUS_PROMTAIL_MONITORED=false -#VC_PRYSM_PROMTAIL_MONITORED=false -#VC_TEKU_PROMTAIL_MONITORED=false -#MEV_MEV_BOOST_PROMTAIL_MONITORED=false -#MEV_COMMIT_BOOST_PROMTAIL_MONITORED=false -#EJECTOR_PROMTAIL_MONITORED=false -#DV_EXIT_PROMTAIL_MONITORED=false +#EL_NETHERMIND_ALLOY_MONITORED=false +#EL_RETH_ALLOY_MONITORED=false +#CL_LIGHTHOUSE_ALLOY_MONITORED=false +#CL_GRANDINE_ALLOY_MONITORED=false +#CL_TEKU_ALLOY_MONITORED=false +#CL_LODESTAR_ALLOY_MONITORED=false +#CHARON_ALLOY_MONITORED=false +#VC_LODESTAR_ALLOY_MONITORED=false +#VC_NIMBUS_ALLOY_MONITORED=false +#VC_PRYSM_ALLOY_MONITORED=false +#VC_TEKU_ALLOY_MONITORED=false +#MEV_MEV_BOOST_ALLOY_MONITORED=false +#MEV_COMMIT_BOOST_ALLOY_MONITORED=false +#EJECTOR_ALLOY_MONITORED=false +#DV_EXIT_ALLOY_MONITORED=false ######### Debug Config ######### diff --git a/alloy/config.alloy.example b/alloy/config.alloy.example new file mode 100644 index 0000000..91577b4 --- /dev/null +++ b/alloy/config.alloy.example @@ -0,0 +1,87 @@ +discovery.docker "docker" { + host = "unix:///var/run/docker.sock" +} + +loki.process "docker" { + forward_to = [loki.write.default.receiver] + + stage.docker { } +} + +discovery.relabel "docker" { + targets = discovery.docker.docker.targets + + rule { + source_labels = ["__meta_docker_container_label_alloy_monitored"] + regex = "true" + action = "keep" + } + + rule { + source_labels = ["__meta_docker_container_name"] + regex = "/(.*)" + target_label = "container" + } + + rule { + source_labels = ["container"] + regex = ".*charon.*" + target_label = "job" + replacement = "charon" + } + + rule { + source_labels = ["container"] + regex = ".*nethermind.*" + target_label = "job" + replacement = "nethermind" + } + + rule { + source_labels = ["container"] + regex = ".*lodestar.*" + target_label = "job" + replacement = "lodestar" + } + + rule { + source_labels = ["container"] + regex = ".*lighthouse.*" + target_label = "job" + replacement = "lighthouse" + } + + rule { + source_labels = ["container"] + regex = ".*mev-boost.*" + target_label = "job" + replacement = "mev-boost" + } + + rule { + target_label = "cluster_name" + replacement = "$CLUSTER_NAME" + } + + rule { + target_label = "cluster_peer" + replacement = "$CLUSTER_PEER" + } +} + +loki.source.docker "docker" { + host = "unix:///var/run/docker.sock" + targets = discovery.docker.docker.targets + forward_to = [loki.process.docker.receiver] + relabel_rules = discovery.relabel.docker.rules +} + +loki.write "default" { + endpoint { + url = "$CHARON_LOKI_ADDRESSES" + } + external_labels = { + cluster_name = "$CLUSTER_NAME", + cluster_peer = "$CLUSTER_PEER", + } +} diff --git a/alloy/run.sh b/alloy/run.sh new file mode 100755 index 0000000..a129011 --- /dev/null +++ b/alloy/run.sh @@ -0,0 +1,34 @@ +#!/bin/sh + +if [ -z "${CHARON_LOKI_ADDRESSES:-}" ]; then + echo "Error: \$CHARON_LOKI_ADDRESSES variable is empty" >&2 + exit 1 +fi + +if [ -z "${CLUSTER_NAME:-}" ]; then + echo "Error: \$CLUSTER_NAME variable is empty" >&2 + exit 1 +fi + +if [ -z "${CLUSTER_PEER:-}" ]; then + echo "Error: \$CLUSTER_PEER variable is empty" >&2 + exit 1 +fi + +SRC="/etc/alloy/config.alloy.example" +DST="/etc/alloy/config.alloy" + +echo "Rendering template: $SRC -> $DST" + +sed -e "s|\$CHARON_LOKI_ADDRESSES|${CHARON_LOKI_ADDRESSES}|g" \ + -e "s|\$CLUSTER_NAME|${CLUSTER_NAME}|g" \ + -e "s|\$CLUSTER_PEER|${CLUSTER_PEER}|g" \ + "$SRC" > "$DST" + +echo "Config successfully rendered to $DST" + +# Execute the command passed as arguments if any +if [ $# -gt 0 ]; then + echo "Executing: $@" + exec "$@" +fi diff --git a/compose-cl.yml b/compose-cl.yml index 809222e..2c13b54 100644 --- a/compose-cl.yml +++ b/compose-cl.yml @@ -15,7 +15,7 @@ services: image: sifrai/grandine:${GRANDINE_VERSION:-v2.0.0} restart: unless-stopped labels: - - "promtail-monitored=${CL_GRANDINE_PROMTAIL_MONITORED:-true}" + - "alloy-monitored=${CL_GRANDINE_ALLOY_MONITORED:-true}" command: - --data-dir=/root/.grandine - --eth1-rpc-urls=http://${EL}:8551 @@ -48,7 +48,7 @@ services: image: sigp/lighthouse:${LIGHTHOUSE_VERSION:-v8.0.0} restart: unless-stopped labels: - - "promtail-monitored=${CL_LIGHTHOUSE_PROMTAIL_MONITORED:-true}" + - "alloy-monitored=${CL_LIGHTHOUSE_ALLOY_MONITORED:-true}" command: | lighthouse bn --network=${NETWORK} @@ -83,7 +83,7 @@ services: image: consensys/teku:${VC_VERSION:-25.11.0} restart: unless-stopped labels: - - "promtail-monitored=${CL_TEKU_PROMTAIL_MONITORED:-true}" + - "alloy-monitored=${CL_TEKU_ALLOY_MONITORED:-true}" command: | --network=${NETWORK} --checkpoint-sync-url=${LIGHTHOUSE_CHECKPOINT_SYNC_URL} @@ -115,7 +115,7 @@ services: image: chainsafe/lodestar:${VC_VERSION:-v1.36.0} restart: unless-stopped labels: - - "promtail-monitored=${CL_LODESTAR_PROMTAIL_MONITORED:-true}" + - "alloy-monitored=${CL_LODESTAR_ALLOY_MONITORED:-true}" command: | beacon --network=${NETWORK} diff --git a/compose-el.yml b/compose-el.yml index 8032010..53a1a6c 100644 --- a/compose-el.yml +++ b/compose-el.yml @@ -19,7 +19,7 @@ services: - ${EL_IP_HTTP:-127.0.0.1}:${EL_PORT_HTTP:-8545}:8545 # JSON-RPC - ${EL_IP_ENGINE:-127.0.0.1}:${EL_PORT_ENGINE:-8551}:8551 # ENGINE-API labels: - - "promtail-monitored=${EL_NETHERMIND_PROMTAIL_MONITORED:-true}" + - "alloy-monitored=${EL_NETHERMIND_ALLOY_MONITORED:-true}" command: | --config=${NETWORK} --data-dir=/nethermind/data @@ -56,7 +56,7 @@ services: - ${EL_IP_HTTP:-127.0.0.1}:${EL_PORT_HTTP:-8545}:8545 # JSON-RPC - ${EL_IP_ENGINE:-127.0.0.1}:${EL_PORT_ENGINE:-8551}:8551 # ENGINE-API labels: - - "promtail-monitored=${EL_RETH_PROMTAIL_MONITORED:-true}" + - "alloy-monitored=${EL_RETH_ALLOY_MONITORED:-true}" command: | node --chain=${NETWORK} diff --git a/compose-mev.yml b/compose-mev.yml index bac5bf2..e684850 100644 --- a/compose-mev.yml +++ b/compose-mev.yml @@ -17,7 +17,7 @@ services: volumes: - ./commit-boost/config.toml:/etc/commit-boost/config.toml:ro labels: - - "promtail-monitored=${MEV_COMMIT_BOOST_PROMTAIL_MONITORED:-true}" + - "alloy-monitored=${MEV_COMMIT_BOOST_ALLOY_MONITORED:-true}" networks: [dvnode] restart: unless-stopped @@ -40,6 +40,6 @@ services: -request-timeout-getpayload=${MEV_TIMEOUT_GETPAYLOAD:-4000} -request-timeout-regval=${MEV_TIMEOUT_REGVAL:-3000} labels: - - "promtail-monitored=${MEV_MEV_BOOST_PROMTAIL_MONITORED:-true}" + - "alloy-monitored=${MEV_MEV_BOOST_ALLOY_MONITORED:-true}" networks: [dvnode] restart: unless-stopped diff --git a/compose-vc.yml b/compose-vc.yml index 48c932e..d69f316 100644 --- a/compose-vc.yml +++ b/compose-vc.yml @@ -21,7 +21,7 @@ services: BUILDER_API_ENABLED: ${BUILDER_API_ENABLED:-true} BUILDER_SELECTION: ${VC_LODESTAR_BUILDER_SELECTION:-builderalways} labels: - - "promtail-monitored=${VC_LODESTAR_PROMTAIL_MONITORED:-true}" + - "alloy-monitored=${VC_LODESTAR_ALLOY_MONITORED:-true}" volumes: - ./lodestar/run.sh:/opt/lodestar/run.sh - .charon/validator_keys:/home/charon/validator_keys @@ -46,7 +46,7 @@ services: BEACON_NODE_ADDRESS: http://charon:3600 BUILDER_API_ENABLED: ${BUILDER_API_ENABLED:-true} labels: - - "promtail-monitored=${VC_NIMBUS_PROMTAIL_MONITORED:-true}" + - "alloy-monitored=${VC_NIMBUS_ALLOY_MONITORED:-true}" volumes: - ./nimbus/run.sh:/home/user/data/run.sh - .charon/validator_keys:/home/validator_keys @@ -70,7 +70,7 @@ services: BEACON_NODE_ADDRESS: http://charon:3600 NETWORK: ${NETWORK} labels: - - "promtail-monitored=${VC_PRYSM_PROMTAIL_MONITORED:-true}" + - "alloy-monitored=${VC_PRYSM_ALLOY_MONITORED:-true}" volumes: - ./prysm/run.sh:/home/prysm/run.sh - ./data/vc-prysm:/data/vc @@ -100,7 +100,7 @@ services: depends_on: [charon] networks: [dvnode] labels: - - "promtail-monitored=${VC_TEKU_PROMTAIL_MONITORED:-true}" + - "alloy-monitored=${VC_TEKU_ALLOY_MONITORED:-true}" volumes: - .charon/validator_keys:/opt/charon/validator_keys - ./data/vc-teku:/home/data diff --git a/docker-compose.yml b/docker-compose.yml index fe7740a..e0eec28 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -19,7 +19,7 @@ services: - ${NETHERMIND_IP_HTTP:-127.0.0.1}:${NETHERMIND_PORT_HTTP:-8545}:8545 # JSON-RPC - ${NETHERMIND_IP_ENGINE:-127.0.0.1}:${NETHERMIND_PORT_ENGINE:-8551}:8551 # ENGINE-API labels: - - "promtail-monitored=${NETHERMIND_PROMTAIL_MONITORED:-true}" + - "alloy-monitored=${NETHERMIND_ALLOY_MONITORED:-true}" command: | --config=${NETWORK} --datadir=data @@ -55,7 +55,7 @@ services: - 5054:5054/tcp # P2P TCP - ${LIGHTHOUSE_PORT_P2P:-9000}:9000/udp # P2P UDP labels: - - "promtail-monitored=${LIGHTHOUSE_PROMTAIL_MONITORED:-true}" + - "alloy-monitored=${LIGHTHOUSE_ALLOY_MONITORED:-true}" command: | lighthouse bn --network=${NETWORK} @@ -107,7 +107,7 @@ services: ports: - ${CHARON_PORT_P2P_TCP:-3610}:${CHARON_PORT_P2P_TCP:-3610}/tcp # P2P TCP libp2p labels: - - "promtail-monitored=${CHARON_PROMTAIL_MONITORED:-true}" + - "alloy-monitored=${CHARON_ALLOY_MONITORED:-true}" networks: [dvnode] volumes: - .charon:/opt/charon/.charon @@ -133,7 +133,7 @@ services: BUILDER_API_ENABLED: ${BUILDER_API_ENABLED:-true} BUILDER_SELECTION: ${BUILDER_SELECTION:-builderalways} labels: - - "promtail-monitored=${LODESTAR_PROMTAIL_MONITORED:-true}" + - "alloy-monitored=${LODESTAR_ALLOY_MONITORED:-true}" volumes: - ./lodestar/run.sh:/opt/lodestar/run.sh - .charon/validator_keys:/home/charon/validator_keys @@ -156,7 +156,7 @@ services: -relay-check -relays=${MEVBOOST_RELAYS} labels: - - "promtail-monitored=${MEV_BOOST_PROMTAIL_MONITORED:-true}" + - "alloy-monitored=${MEV_BOOST_ALLOY_MONITORED:-true}" networks: [dvnode] restart: unless-stopped diff --git a/logging.yml b/logging.yml index b57c2bc..121c176 100644 --- a/logging.yml +++ b/logging.yml @@ -1,17 +1,16 @@ services: - - promtail: - image: grafana/promtail:${PROMTAIL_VERSION:-2.8.2} + alloy: + image: grafana/alloy:${ALLOY_VERSION:-v1.11.3} environment: CHARON_LOKI_ADDRESSES: ${CHARON_LOKI_ADDRESSES} CLUSTER_NAME: ${CLUSTER_NAME} CLUSTER_PEER: ${CLUSTER_PEER} - command: -config.file=/etc/promtail/config.yml volumes: - - ./promtail:/etc/promtail + - ./alloy:/etc/alloy - /var/run/docker.sock:/var/run/docker.sock networks: [dvnode] - entrypoint: /etc/promtail/run.sh + entrypoint: /etc/alloy/run.sh + command: ["/bin/alloy", "run", "/etc/alloy/config.alloy", "--storage.path=/var/lib/alloy/data"] restart: unless-stopped networks: diff --git a/promtail/config.yml.example b/promtail/config.yml.example deleted file mode 100644 index 87e6144..0000000 --- a/promtail/config.yml.example +++ /dev/null @@ -1,49 +0,0 @@ -server: - http_listen_port: 9080 - grpc_listen_port: 0 - -positions: - filename: /tmp/positions.yaml - -clients: - - url: $CHARON_LOKI_ADDRESSES - -scrape_configs: - - job_name: docker - docker_sd_configs: - - host: unix:///var/run/docker.sock - relabel_configs: - - source_labels: - [__meta_docker_container_label_promtail_monitored] - regex: "true" - action: keep - - source_labels: ['__meta_docker_container_name'] - regex: '/(.*)' - replacement: '$1' - target_label: 'container' - - source_labels: ['container'] - regex: '.*charon.*' - replacement: 'charon' - target_label: 'job' - - source_labels: ['container'] - regex: '.*nethermind.*' - replacement: 'nethermind' - target_label: 'job' - - source_labels: ['container'] - regex: '.*lodestar.*' - replacement: 'lodestar' - target_label: 'job' - - source_labels: ['container'] - regex: '.*lighthouse.*' - replacement: 'lighthouse' - target_label: 'job' - - source_labels: ['container'] - regex: '.*mev-boost.*' - replacement: 'mev-boost' - target_label: 'job' - - target_label: 'cluster_name' - replacement: $CLUSTER_NAME - - target_label: 'cluster_peer' - replacement: $CLUSTER_PEER - pipeline_stages: - - docker: {} diff --git a/promtail/run.sh b/promtail/run.sh deleted file mode 100755 index f8d3336..0000000 --- a/promtail/run.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/sh - -if [ -z "$CHARON_LOKI_ADDRESSES" ]; then - echo "Error: \$CHARON_LOKI_ADDRESSES variable is empty" >&2 - exit 1 -fi - -if [ -z "$CLUSTER_NAME" ]; then - echo "Error: \$CLUSTER_NAME variable is empty" >&2 - exit 1 -fi - -if [ -z "$CLUSTER_PEER" ]; then - echo "Error: \$CLUSTER_PEER variable is empty" >&2 - exit 1 -fi - -# Process the template file once -sed -e "s|\$CHARON_LOKI_ADDRESSES|${CHARON_LOKI_ADDRESSES}|g" \ - -e "s|\$CLUSTER_NAME|${CLUSTER_NAME}|g" \ - -e "s|\$CLUSTER_PEER|${CLUSTER_PEER}|g" \ - /etc/promtail/config.yml.example > /etc/promtail/config.yml - -# Start Promtail with the generated config -/usr/bin/promtail \ - -config.file=/etc/promtail/config.yml