I got this error. Does anyone know how to fix it. ...
# support
h
I got this error. Does anyone know how to fix it. Thanks.
i
I think I've encountered this error a couple of times. I just deleted it and tried again with:
Copy code
sudo docker-compose -f ../deploy/docker/clickhouse-setup/docker-compose.yaml down --remove-orphan
Then:
Copy code
sudo docker-compose -f ../deploy/docker/clickhouse-setup/docker-compose.yaml up -d
h
I use windows so there is no "sudo" command
i
Try:
Copy code
docker compose -f docker/clickhouse-setup/docker-compose.yaml down --remove-orphan
Then:
Copy code
docker compose -f docker/clickhouse-setup/docker-compose.yaml up -d
h
I don't have that command either. Here is the log of this container
i
What command you don't have either? the command:
Copy code
docker compose
h
I don't have down --remove-orphan command
h
I made a mistake because of the missing "s". But it still doesn't work and gets the same error.
i
Open the docker-compose.yaml file and tell me which version you are using. My docker-compose.yaml: Line 1
Copy code
version: "2.4"
h
all versions use 2.4 except docker-compose in docker-swarm folder using version 3.9
i
I'm not sure I have well understood what you said. If you're using the Docker Compose file located in the docker-swarm folder, that's the one you need to check the
version
attribute. My Docker Compose file in docker-swarm folder is set to version 3.9.
h
docker-compose in docker folder using version 2.4 docker-compose in devker-swarm folder using version 3.9
i
Okay, then I have no clues.
1
h
i run them according to the document. step 1: git clone -b main https://github.com/SigNoz/signoz.git && cd signoz/deploy/ step 2: docker compose -f docker/clickhouse-setup/docker-compose.yaml up -d so i don't know in which directory it will run docker-compose
Thanks
i
Maybe try mine: I removed the frontend container and the service_completed_successfully condition. This is the last thing I can do.
Copy code
version: "2.4"

x-clickhouse-defaults: &clickhouse-defaults
  restart: on-failure
  # addding non LTS version due to this fix <https://github.com/ClickHouse/ClickHouse/commit/32caf8716352f45c1b617274c7508c86b7d1afab>
  image: clickhouse/clickhouse-server:24.1.2-alpine
  tty: true
  depends_on:
    - zookeeper-1
    # - zookeeper-2
    # - zookeeper-3
  logging:
    options:
      max-size: 50m
      max-file: "3"
  healthcheck:
    # "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
    test:
      [
        "CMD",
        "wget",
        "--spider",
        "-q",
        "0.0.0.0:8123/ping"
      ]
    interval: 30s
    timeout: 5s
    retries: 3
  ulimits:
    nproc: 65535
    nofile:
      soft: 262144
      hard: 262144

x-db-depend: &db-depend
  depends_on:
    clickhouse:
      condition: service_healthy
    #otel-collector-migrator:
    #  condition: service_completed_successfully
    # clickhouse-2:
    #   condition: service_healthy
    # clickhouse-3:
    #   condition: service_healthy

services:

  zookeeper-1:
    image: bitnami/zookeeper:3.7.1
    container_name: signoz-zookeeper-1
    hostname: zookeeper-1
    user: root
    ports:
      - "2181:2181"
      - "2888:2888"
      - "3888:3888"
    volumes:
      - ./data/zookeeper-1:/bitnami/zookeeper
    environment:
      - ZOO_SERVER_ID=1
      # - ZOO_SERVERS=0.0.0.0:2888:3888,zookeeper-2:2888:3888,zookeeper-3:2888:3888
      - ALLOW_ANONYMOUS_LOGIN=yes
      - ZOO_AUTOPURGE_INTERVAL=1

  # zookeeper-2:
  #   image: bitnami/zookeeper:3.7.0
  #   container_name: signoz-zookeeper-2
  #   hostname: zookeeper-2
  #   user: root
  #   ports:
  #     - "2182:2181"
  #     - "2889:2888"
  #     - "3889:3888"
  #   volumes:
  #     - ./data/zookeeper-2:/bitnami/zookeeper
  #   environment:
  #     - ZOO_SERVER_ID=2
  #     - ZOO_SERVERS=zookeeper-1:2888:3888,0.0.0.0:2888:3888,zookeeper-3:2888:3888
  #     - ALLOW_ANONYMOUS_LOGIN=yes
  #     - ZOO_AUTOPURGE_INTERVAL=1

  # zookeeper-3:
  #   image: bitnami/zookeeper:3.7.0
  #   container_name: signoz-zookeeper-3
  #   hostname: zookeeper-3
  #   user: root
  #   ports:
  #     - "2183:2181"
  #     - "2890:2888"
  #     - "3890:3888"
  #   volumes:
  #     - ./data/zookeeper-3:/bitnami/zookeeper
  #   environment:
  #     - ZOO_SERVER_ID=3
  #     - ZOO_SERVERS=zookeeper-1:2888:3888,zookeeper-2:2888:3888,0.0.0.0:2888:3888
  #     - ALLOW_ANONYMOUS_LOGIN=yes
  #     - ZOO_AUTOPURGE_INTERVAL=1

  clickhouse:
    <<: *clickhouse-defaults
    container_name: signoz-clickhouse
    hostname: clickhouse
    ports:
      - "9000:9000"
      - "8123:8123"
      - "9181:9181"
    volumes:
      - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
      - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
      - ./custom-function.xml:/etc/clickhouse-server/custom-function.xml
      - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
      # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
      - ./data/clickhouse/:/var/lib/clickhouse/
      - ./user_scripts:/var/lib/clickhouse/user_scripts/

  # clickhouse-2:
  #   <<: *clickhouse-defaults
  #   container_name: signoz-clickhouse-2
  #   hostname: clickhouse-2
  #   ports:
  #     - "9001:9000"
  #     - "8124:8123"
  #     - "9182:9181"
  #   volumes:
  #     - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
  #     - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
  #     - ./custom-function.xml:/etc/clickhouse-server/custom-function.xml
  #     - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
  #     # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
  #     - ./data/clickhouse-2/:/var/lib/clickhouse/
  #     - ./user_scripts:/var/lib/clickhouse/user_scripts/


  # clickhouse-3:
  #   <<: *clickhouse-defaults
  #   container_name: signoz-clickhouse-3
  #   hostname: clickhouse-3
  #   ports:
  #     - "9002:9000"
  #     - "8125:8123"
  #     - "9183:9181"
  #   volumes:
  #     - ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
  #     - ./clickhouse-users.xml:/etc/clickhouse-server/users.xml
  #     - ./custom-function.xml:/etc/clickhouse-server/custom-function.xml
  #     - ./clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
  #     # - ./clickhouse-storage.xml:/etc/clickhouse-server/config.d/storage.xml
  #     - ./data/clickhouse-3/:/var/lib/clickhouse/
  #     - ./user_scripts:/var/lib/clickhouse/user_scripts/

  alertmanager:
    image: signoz/alertmanager:${ALERTMANAGER_TAG:-0.23.5}
    container_name: signoz-alertmanager
    volumes:
      - ./data/alertmanager:/data
    depends_on:
      query-service:
        condition: service_healthy
    restart: on-failure
    command:
      - --queryService.url=<http://query-service:8085>
      - --storage.path=/data

  # Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`

  query-service:
    image: signoz/query-service:${DOCKER_TAG:-0.49.1}
    container_name: signoz-query-service
    command:
      [
        "-config=/root/config/prometheus.yml"
        # "--prefer-delta=true"
      ]
    ports:
    #   - "6060:6060"     # pprof port
      - "8080:8080"     # query-service port
    volumes:
      - ./prometheus.yml:/root/config/prometheus.yml
      - ../dashboards:/root/config/dashboards
      - ./data/signoz/:/var/lib/signoz/
    environment:
      - ClickHouseUrl=<tcp://clickhouse:9000>
      - ALERTMANAGER_API_PREFIX=<http://alertmanager:9093/api/>
      - SIGNOZ_LOCAL_DB_PATH=/var/lib/signoz/signoz.db
      - DASHBOARDS_PATH=/root/config/dashboards
      - STORAGE=clickhouse
      - GODEBUG=netdns=go
      - TELEMETRY_ENABLED=true
      - DEPLOYMENT_TYPE=docker-standalone-amd
    restart: on-failure
    healthcheck:
      test:
        [
          "CMD",
          "wget",
          "--spider",
          "-q",
          "localhost:8080/api/v1/health"
        ]
      interval: 30s
      timeout: 5s
      retries: 3
    <<: *db-depend

  #frontend:
  #  image: signoz/frontend:${DOCKER_TAG:-0.49.1}
  #  container_name: signoz-frontend
  #  restart: on-failure
  #  depends_on:
  #    - alertmanager
  #    - query-service
  #  ports:
  #    - "3301:3301"
  #  volumes:
  #    - ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf

  otel-collector-migrator:
    image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.102.2}
    container_name: otel-migrator
    command:
      - "--dsn=<tcp://clickhouse:9000>"
    depends_on:
      clickhouse:
        condition: service_healthy
      # clickhouse-2:
      #   condition: service_healthy
      # clickhouse-3:
      #   condition: service_healthy


  otel-collector:
    image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.102.2}
    container_name: signoz-otel-collector
    command:
      [
        "--config=/etc/otel-collector-config.yaml",
        "--manager-config=/etc/manager-config.yaml",
        "--copy-path=/var/tmp/collector-config.yaml",
        "--feature-gates=-pkg.translator.prometheus.NormalizeName"
      ]
    user: root # required for reading docker container logs
    volumes:
      - ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
      - ./otel-collector-opamp-config.yaml:/etc/manager-config.yaml
      - /var/lib/docker/containers:/var/lib/docker/containers:ro
    environment:
      - OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
      - DOCKER_MULTI_NODE_CLUSTER=false
      - LOW_CARDINAL_EXCEPTION_GROUPING=false
    ports:
      # - "1777:1777"     # pprof extension
      - "4317:4317" # OTLP gRPC receiver
      - "4318:4318" # OTLP HTTP receiver
      # - "8888:8888"     # OtelCollector internal metrics
      # - "8889:8889"     # signoz spanmetrics exposed by the agent
      # - "9411:9411"     # Zipkin port
      # - "13133:13133"   # health check extension
      # - "14250:14250"   # Jaeger gRPC
      # - "14268:14268"   # Jaeger thrift HTTP
      # - "55678:55678"   # OpenCensus receiver
      # - "55679:55679"   # zPages extension
    restart: on-failure
    depends_on:
      clickhouse:
        condition: service_healthy
      #otel-collector-migrator:
      #  condition: service_completed_successfully
      query-service:
        condition: service_healthy

  logspout:
    image: "gliderlabs/logspout:v3.2.14"
    container_name: signoz-logspout
    volumes:
      - /etc/hostname:/etc/host_hostname:ro
      - /var/run/docker.sock:/var/run/docker.sock
    command: <syslog+tcp://otel-collector:2255>
    depends_on:
      - otel-collector
    restart: on-failure

  hotrod:
    image: jaegertracing/example-hotrod:1.30
    container_name: hotrod
    logging:
      options:
        max-size: 50m
        max-file: "3"
    command: [ "all" ]
    environment:
      - JAEGER_ENDPOINT=<http://otel-collector:14268/api/traces>

  load-hotrod:
    image: "signoz/locust:1.2.3"
    container_name: load-hotrod
    hostname: load-hotrod
    environment:
      ATTACKED_HOST: <http://hotrod:8080>
      LOCUST_MODE: standalone
      NO_PROXY: standalone
      TASK_DELAY_FROM: 5
      TASK_DELAY_TO: 30
      QUIET_MODE: "${QUIET_MODE:-false}"
      LOCUST_OPTS: "--headless -u 10 -r 1"
    volumes:
      - ../common/locust-scripts:/locust
1