Angeles MP
03/15/2023, 5:08 PM$ node -r ./tracing.js app.js
/Users/amora/Workspace/gta-api/tracing.js:33
.then(() => console.log('Tracing initialized'))
^
TypeError: Cannot read properties of undefined (reading 'then')
at Object.<anonymous> (/Users/amora/Workspace/gta-api/tracing.js:33:5)
at Module._compile (node:internal/modules/cjs/loader:1101:14)
at Object.Module._extensions..js (node:internal/modules/cjs/loader:1153:10)
at Module.load (node:internal/modules/cjs/loader:981:32)
at Function.Module._load (node:internal/modules/cjs/loader:822:12)
at Module.require (node:internal/modules/cjs/loader:1005:19)
at Module._preloadModules (node:internal/modules/cjs/loader:1276:12)
at loadPreloadModules (node:internal/bootstrap/pre_execution:484:5)
at prepareMainThreadExecution (node:internal/bootstrap/pre_execution:78:3)
at node:internal/main/run_main_module:7:1
Prashant Shahi
03/15/2023, 5:12 PMAngeles MP
03/16/2023, 7:53 AMPrashant Shahi
03/16/2023, 11:57 AMVishal Sharma
03/16/2023, 12:10 PMAngeles MP
03/16/2023, 1:19 PM$ cat package.json
{
"name": "gta-api",
"version": "1.0.0",
"description": "",
"main": "app.ts",
"scripts": {
"start": "node app.js"
},
"author": "",
"license": "ISC",
"dependencies": {
"@opentelemetry/api": "^1.4.1",
"@opentelemetry/auto-instrumentations-node": "^0.36.4",
"@opentelemetry/exporter-trace-otlp-http": "^0.36.0",
"@opentelemetry/sdk-node": "^0.36.0",
"axios": "^0.21.1",
"express": "^4.17.1",
"helmet": "^4.6.0",
"http-proxy-middleware": "^2.0.1",
"https": "^1.0.0",
"qs": "^6.7.0"
},
"devDependencies": {
"axios-debug-log": "^0.8.4"
}
}
$ cat otel-collector-config.yaml
receivers:
filelog/dockercontainers:
include: [ "/var/lib/docker/containers/*/*.log" ]
start_at: end
include_file_path: true
include_file_name: false
operators:
- type: json_parser
id: parser-docker
output: extract_metadata_from_filepath
timestamp:
parse_from: attributes.time
layout: '%Y-%m-%dT%H:%M:%S.%LZ'
- type: regex_parser
id: extract_metadata_from_filepath
regex: '^.*containers/(?P<container_id>[^_]+)/.*log$'
parse_from: attributes["log.file.path"]
output: parse_body
- type: move
id: parse_body
from: attributes.log
to: body
output: time
- type: remove
id: time
field: attributes.time
opencensus:
endpoint: 0.0.0.0:55678
otlp/spanmetrics:
protocols:
grpc:
endpoint: localhost:12345
otlp:
protocols:
grpc:
endpoint: 0.0.0.0:4317
http:
cors:
allowed_origins:
- <https://127.0.0.1:4200>
- <http://127.0.0.1:4200>
- <https://localhost:3333>
- <http://localhost:3333>
endpoint: 0.0.0.0:4318
jaeger:
protocols:
grpc:
endpoint: 0.0.0.0:14250
thrift_http:
endpoint: 0.0.0.0:14268
# thrift_compact:
# endpoint: 0.0.0.0:6831
# thrift_binary:
# endpoint: 0.0.0.0:6832
hostmetrics:
collection_interval: 30s
scrapers:
cpu: {}
load: {}
memory: {}
disk: {}
filesystem: {}
network: {}
prometheus:
config:
global:
scrape_interval: 60s
scrape_configs:
# otel-collector internal metrics
- job_name: otel-collector
static_configs:
- targets:
- localhost:8888
labels:
job_name: otel-collector
processors:
batch:
send_batch_size: 10000
send_batch_max_size: 11000
timeout: 10s
signozspanmetrics/prometheus:
metrics_exporter: prometheus
latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
dimensions_cache_size: 100000
dimensions:
- name: service.namespace
default: default
- name: deployment.environment
default: default
# This is added to ensure the uniqueness of the timeseries
# Otherwise, identical timeseries produced by multiple replicas of
# collectors result in incorrect APM metrics
- name: 'signoz.collector.id'
# memory_limiter:
# # 80% of maximum memory up to 2G
# limit_mib: 1500
# # 25% of limit up to 2G
# spike_limit_mib: 512
# check_interval: 5s
#
# # 50% of the maximum memory
# limit_percentage: 50
# # 20% of max memory usage spike expected
# spike_limit_percentage: 20
# queued_retry:
# num_workers: 4
# queue_size: 100
# retry_on_failure: true
resourcedetection:
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
detectors: [env, system] # include ec2 for AWS, gce for GCP and azure for Azure.
timeout: 2s
extensions:
health_check:
endpoint: 0.0.0.0:13133
zpages:
endpoint: 0.0.0.0:55679
pprof:
endpoint: 0.0.0.0:1777
exporters:
clickhousetraces:
datasource: <tcp://clickhouse:9000/?database=signoz_traces>
docker_multi_node_cluster: ${DOCKER_MULTI_NODE_CLUSTER}
low_cardinal_exception_grouping: ${LOW_CARDINAL_EXCEPTION_GROUPING}
clickhousemetricswrite:
endpoint: <tcp://clickhouse:9000/?database=signoz_metrics>
resource_to_telemetry_conversion:
enabled: true
clickhousemetricswrite/prometheus:
endpoint: <tcp://clickhouse:9000/?database=signoz_metrics>
prometheus:
endpoint: 0.0.0.0:8889
# logging: {}
clickhouselogsexporter:
dsn: <tcp://clickhouse:9000/>
docker_multi_node_cluster: ${DOCKER_MULTI_NODE_CLUSTER}
timeout: 5s
sending_queue:
queue_size: 100
retry_on_failure:
enabled: true
initial_interval: 5s
max_interval: 30s
max_elapsed_time: 300s
service:
telemetry:
metrics:
address: 0.0.0.0:8888
extensions:
- health_check
- zpages
- pprof
pipelines:
traces:
receivers: [jaeger, otlp]
processors: [signozspanmetrics/prometheus, batch]
exporters: [clickhousetraces]
metrics:
receivers: [otlp]
processors: [batch]
exporters: [clickhousemetricswrite]
metrics/generic:
receivers: [hostmetrics]
processors: [resourcedetection, batch]
exporters: [clickhousemetricswrite]
metrics/prometheus:
receivers: [prometheus]
processors: [batch]
exporters: [clickhousemetricswrite/prometheus]
metrics/spanmetrics:
receivers: [otlp/spanmetrics]
exporters: [prometheus]
logs:
receivers: [otlp, filelog/dockercontainers]
processors: [batch]
exporters: [clickhouselogsexporter]
Vishal Sharma
03/16/2023, 1:44 PMAngeles MP
03/16/2023, 6:10 PM