Slackbot
06/15/2023, 10:53 AMSrikanth Chekuri
06/15/2023, 10:55 AMk8sattributes
processor to the pipeline. See https://github.com/SigNoz/charts/blob/1e4bd24b3d6e913d6d9463250b5a273ae576b151/charts/signoz/values.yaml#L1590 for example.Vishnu Teja Vallala
06/15/2023, 11:28 AMVishnu Teja Vallala
07/28/2023, 10:28 AMSrikanth Chekuri
07/28/2023, 10:56 AMk8.pod.name
in the association rules. You would put it in extract::metadata
. Example.
pod_association:
- sources:
- from: resource_attribute
name: k8s.pod.ip
- sources:
- from: resource_attribute
name: k8s.pod.uid
- sources:
- from: connection
extract:
metadata:
- k8s.namespace.name
- k8s.pod.name
- k8s.pod.uid
- k8s.pod.start_time
- k8s.deployment.name
Vishnu Teja Vallala
07/28/2023, 11:22 AMSrikanth Chekuri
07/28/2023, 4:35 PMVishnu Teja Vallala
07/30/2023, 12:53 PMotelCollector:
name: "otel-collector"
replicaCount: 2
resources:
requests:
cpu: 100m
memory: 200Mi
limits:
cpu: "1"
memory: 2Gi
config:
service:
pipelines:
traces:
processors:
- signozspanmetrics/prometheus
- k8sattributes
- batch
nodeSelector:
project: signoz
tolerations:
- key: "app"
value: "dev-signoz"
operator: "Equal"
effect: "NoSchedule"
Do you see any issue with this config for otelCollector?Srikanth Chekuri
07/30/2023, 1:39 PMVishnu Teja Vallala
07/31/2023, 5:49 AM# Source: signoz/templates/otel-collector/configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: release-name-signoz-otel-collector
labels:
<http://helm.sh/chart|helm.sh/chart>: signoz-0.20.0
<http://app.kubernetes.io/name|app.kubernetes.io/name>: signoz
<http://app.kubernetes.io/instance|app.kubernetes.io/instance>: release-name
<http://app.kubernetes.io/component|app.kubernetes.io/component>: otel-collector
<http://app.kubernetes.io/version|app.kubernetes.io/version>: "0.24.0"
<http://app.kubernetes.io/managed-by|app.kubernetes.io/managed-by>: Helm
data:
otel-collector-config.yaml: |-
exporters:
clickhouselogsexporter:
dsn: tcp://${CLICKHOUSE_HOST}:${CLICKHOUSE_PORT}/?username=${CLICKHOUSE_USER}&password=${CLICKHOUSE_PASSWORD}
retry_on_failure:
enabled: true
initial_interval: 5s
max_elapsed_time: 300s
max_interval: 30s
sending_queue:
queue_size: 100
timeout: 10s
clickhousemetricswrite:
endpoint: tcp://${CLICKHOUSE_HOST}:${CLICKHOUSE_PORT}/?database=${CLICKHOUSE_DATABASE}&username=${CLICKHOUSE_USER}&password=${CLICKHOUSE_PASSWORD}
resource_to_telemetry_conversion:
enabled: true
clickhousetraces:
datasource: tcp://${CLICKHOUSE_HOST}:${CLICKHOUSE_PORT}/?database=${CLICKHOUSE_TRACE_DATABASE}&username=${CLICKHOUSE_USER}&password=${CLICKHOUSE_PASSWORD}
low_cardinal_exception_grouping: ${LOW_CARDINAL_EXCEPTION_GROUPING}
prometheus:
endpoint: 0.0.0.0:8889
extensions:
health_check:
endpoint: 0.0.0.0:13133
pprof:
endpoint: localhost:1777
zpages:
endpoint: localhost:55679
processors:
batch:
send_batch_size: 50000
timeout: 1s
k8sattributes:
extract:
metadata:
- k8s.namespace.name
- k8s.pod.name
- k8s.pod.uid
- k8s.pod.start_time
- k8s.deployment.name
- k8s.node.name
filter:
node_from_env_var: K8S_NODE_NAME
passthrough: false
pod_association:
- sources:
- from: resource_attribute
name: k8s.pod.ip
- sources:
- from: resource_attribute
name: k8s.pod.uid
- sources:
- from: connection
logstransform/internal:
operators:
- if: '"trace_id" in attributes or "span_id" in attributes'
output: remove_trace_id
span_id:
parse_from: attributes.span_id
trace_id:
parse_from: attributes.trace_id
type: trace_parser
- if: '"traceId" in attributes or "spanId" in attributes'
output: remove_traceId
span_id:
parse_from: attributes.spanId
trace_id:
parse_from: attributes.traceId
type: trace_parser
- field: attributes.traceId
id: remove_traceId
if: '"traceId" in attributes'
output: remove_spanId
type: remove
- field: attributes.spanId
id: remove_spanId
if: '"spanId" in attributes'
type: remove
- field: attributes.trace_id
id: remove_trace_id
if: '"trace_id" in attributes'
output: remove_span_id
type: remove
- field: attributes.span_id
id: remove_span_id
if: '"span_id" in attributes'
type: remove
memory_limiter: null
resourcedetection:
detectors:
- env
- system
system:
hostname_sources:
- dns
- os
timeout: 2s
signozspanmetrics/prometheus:
dimensions:
- default: default
name: service.namespace
- default: default
name: deployment.environment
- name: signoz.collector.id
dimensions_cache_size: 100000
latency_histogram_buckets:
- 100us
- 1ms
- 2ms
- 6ms
- 10ms
- 50ms
- 100ms
- 250ms
- 500ms
- 1000ms
- 1400ms
- 2000ms
- 5s
- 10s
- 20s
- 40s
- 60s
metrics_exporter: prometheus
receivers:
hostmetrics:
collection_interval: 30s
scrapers:
cpu: {}
disk: {}
filesystem: {}
load: {}
memory: {}
network: {}
jaeger:
protocols:
grpc:
endpoint: 0.0.0.0:14250
thrift_http:
endpoint: 0.0.0.0:14268
otlp:
protocols:
grpc:
endpoint: 0.0.0.0:4317
max_recv_msg_size_mib: 16
http:
endpoint: 0.0.0.0:4318
otlp/spanmetrics:
protocols:
grpc:
endpoint: localhost:12345
service:
extensions:
- health_check
- zpages
- pprof
pipelines:
logs:
exporters:
- clickhouselogsexporter
processors:
- logstransform/internal
- batch
receivers:
- otlp
metrics:
exporters:
- clickhousemetricswrite
processors:
- batch
receivers:
- otlp
metrics/internal:
exporters:
- clickhousemetricswrite
processors:
- resourcedetection
- k8sattributes
- batch
receivers:
- hostmetrics
metrics/spanmetrics:
exporters:
- prometheus
receivers:
- otlp/spanmetrics
traces:
exporters:
- clickhousetraces
processors:
- signozspanmetrics/prometheus
- batch
receivers:
- otlp
- jaeger
telemetry:
metrics:
address: 0.0.0.0:8888
---
Srikanth Chekuri
07/31/2023, 6:10 AMVishnu Teja Vallala
07/31/2023, 6:24 AMSrikanth Chekuri
07/31/2023, 6:25 AMor when i apply helm upgrade --debug, the result config?Yes, what is the result when do dry-run with override-values.yaml instead of the actual upgrade
Vishnu Teja Vallala
07/31/2023, 6:46 AM