Hi Team, i am deploying two signoz using different ports on one Instance. All containers are running...
n
Hi Team, i am deploying two signoz using different ports on one Instance. All containers are running properly first signoz using port of otel-collector is 4317,4318 Second signoz using port of otel-collector is 4319 , 4320 i am facing issue is when i am sending logs to signoz either on port 4317 or 4319 then it goes to first signoz kindly suggest where i have to do changes in the configuration file. i already did the changes in the signoz-otel-collector-config.yaml file . Below is the second signoz "otel-collector config.yaml " file receivers: tcplog/docker: listen_address: "0.0.0.0:2255" operators: - type: regex_parser regex: '^<([0-9]+)>[0-9]+ (?P<timestamp>[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}[0 9]{2}[0-9]{2}(\.[0-9]+)?([zZ]|([\+-])([01]\d|2[0-3]):?([0-5]\d)?)?) (?P<container_id>\S+) (?P<container_name>\S+) [0-9]+ - -( (?P<body>.*))?' timestamp: parse_from: attributes.timestamp layout: '%Y-%m-%dT%H:%M:%S.%LZ' - type: move from: attributes["body"] to: body - type: remove field: attributes.timestamp # please remove names from below if you want to collect logs from them - type: filter id: signoz_logs_filter expr: 'attributes.container_name matches "^signoz-(logspout*|frontend|alertmanager|query-service*|otel-collector|clickhouse|zookeeper)"' opencensus: endpoint: 0.0.0.0:55678 otlp: protocols: grpc: endpoint: 0.0.0.0:4319 http: endpoint: 0.0.0.0:4320 jaeger: protocols: grpc: endpoint: 0.0.0.0:14251 thrift_http: endpoint: 0.0.0.0:14269 # thrift_compact: # endpoint: 0.0.0.0:6831 # thrift_binary: # endpoint: 0.0.0.0:6832 hostmetrics: collection_interval: 30s root_path: /hostfs scrapers: cpu: {} load: {} memory: {} disk: {} filesystem: {} network: {} prometheus: config: global: scrape_interval: 60s scrape_configs: # otel-collector internal metrics - job_name: otel-collector static_configs: - targets: - localhost:8888 labels: job_name: otel-collector processors: batch: send_batch_size: 10000 send_batch_max_size: 11000 timeout: 10s signozspanmetrics/cumulative: metrics_exporter: clickhousemetricswrite metrics_flush_interval: 60s latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ] dimensions_cache_size: 100000 dimensions: - name: service.namespace default: default - name: deployment.environment default: default # This is added to ensure the uniqueness of the timeseries # Otherwise, identical timeseries produced by multiple replicas of # collectors result in incorrect APM metrics - name: signoz.collector.id - name: service.version - name: browser.platform - name: browser.mobile - name: k8s.cluster.name - name: k8s.node.name - name: k8s.namespace.name - name: host.name - name: host.type - name: container.name # memory_limiter: # # 80% of maximum memory up to 2G # limit_mib: 1500 # # 25% of limit up to 2G # spike_limit_mib: 512 # check_interval: 5s # # # 50% of the maximum memory # limit_percentage: 50 # # 20% of max memory usage spike expected # spike_limit_percentage: 20 # queued_retry: # num_workers: 4 # queue_size: 100 # retry_on_failure: true resourcedetection: # Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels. detectors: [env, system] # include ec2 for AWS, gcp for GCP and azure for Azure. timeout: 2s signozspanmetrics/delta: metrics_exporter: clickhousemetricswrite metrics_flush_interval: 60s latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ] dimensions_cache_size: 100000 aggregation_temporality: AGGREGATION_TEMPORALITY_DELTA enable_exp_histogram: true dimensions: - name: service.namespace default: default - name: deployment.environment default: default # This is added to ensure the uniqueness of the timeseries # Otherwise, identical timeseries produced by multiple replicas of # collectors result in incorrect APM metrics - name: signoz.collector.id - name: service.version - name: browser.platform - name: browser.mobile - name: k8s.cluster.name - name: k8s.node.name - name: k8s.namespace.name - name: host.name - name: host.type - name: container.name extensions: health_check: endpoint: 0.0.0.0:13133 zpages: endpoint: 0.0.0.0:55679 pprof: endpoint: 0.0.0.0:1777 exporters: clickhousetraces: datasource: tcp://clickhouse:9000/signoz_traces docker_multi_node_cluster: ${DOCKER_MULTI_NODE_CLUSTER} low_cardinal_exception_grouping: ${LOW_CARDINAL_EXCEPTION_GROUPING} clickhousemetricswrite: endpoint: tcp://clickhouse:9000/signoz_metrics resource_to_telemetry_conversion: enabled: true clickhousemetricswrite/prometheus: endpoint: tcp://clickhouse:9000/signoz_metrics clickhouselogsexporter: dsn: tcp://clickhouse:9000/signoz_logs docker_multi_node_cluster: ${DOCKER_MULTI_NODE_CLUSTER} timeout: 10s use_new_schema: true # logging: {} service: telemetry: logs: encoding: json metrics: address: 0.0.0.0:8888 extensions: - health_check - zpages - pprof pipelines: traces: receivers: [jaeger, otlp] processors: [signozspanmetrics/cumulative, signozspanmetrics/delta, batch] exporters: [clickhousetraces] metrics: receivers: [otlp] processors: [batch] exporters: [clickhousemetricswrite] metrics/generic: receivers: [hostmetrics] processors: [resourcedetection, batch] exporters: [clickhousemetricswrite] metrics/prometheus: receivers: [prometheus] processors: [batch] exporters: [clickhousemetricswrite/prometheus] logs: receivers: [otlp, tcplog/docker] processors: [batch] exporters: [clickhouselogsexporter]