<@U02NBMHMJ9L> <@U02SS3ZAMKQ> Any idea about below error. that we are getting in clickhouse pods and...
s

sudhanshu dev

8 months ago
@Vishal Sharma @Prashant Shahi Any idea about below error. that we are getting in clickhouse pods and pods keep restarting. I understood it is related to the insufficient mem. {"date_time":"1736745070.040729","thread_name":"","thread_id":"580","level":"Error","query_id":"","logger_name":"MergeTreeBackgroundExecutor","message":"Exception while executing background task {7165cb86-e3b3-4832-9778-cf6a91b2273c:20250111 18913 18971 1} Code: 241. DB:Exception Memory limit (total) exceeded: would use 6.83 GiB (attempt to allocate chunk of 4224032 bytes), maximum: 6.79 GiB. OvercommitTracker decision: Memory overcommit isn't used. Waiting time or overcommit denominator are set to zero.: (while reading column scope_string): (while reading from part \/var\/lib\/clickhouse\/store\/716\/7165cb86-e3b3-4832-9778-cf6a91b2273c\/20250111_18931_18931_0\/ in table signoz_logs.logs_v2 (7165cb86-e3b3-4832-9778-cf6a91b2273c) located on disk default of type local, from mark 0 with max_rows_to_read = 5888): While executing MergeTreeSequentialSource. (MEMORY_LIMIT_EXCEEDED), Stack trace (when copying this message, always include the lines below):\n\n0. DB:Exception:Exception(DB:Exception:MessageMasked&&, int, bool) @ 0x000000000c800f1b in \/usr\/bin\/clickhouse\n1. DB:Exception:Exception<char const*, char const*, String, long&, String, char const*, std::basic_string_view<char, std::char_traits<char>>>(int, FormatStringHelperImpl<std::type_identity<char const*>::type, std::type_identity<char const*>::type, std:type identity&lt;String&gt;:type, std:type identity&lt;long&amp;&gt;:type, std:type identity&lt;String&gt;:type, std::type_identity<char const*>::type, std:type identity&lt;std:basic_string_view<char, std:char traits&lt;char&gt;&gt;&gt;:type>, char const*&&, char const*&&, String&&, long&, String&&, char const*&&, std::basic_string_view<char, std::char_traits<char>>&&) @ 0x000000000c816d0a in \/usr\/bin\/clickhouse\n2. MemoryTracker::allocImpl(long, bool, MemoryTracker*, double) @ 0x000000000c816948 in \/usr\/bin\/clickhouse\n3. MemoryTracker::allocImpl(long, bool, MemoryTracker*, double) @ 0x000000000c816389 in \/usr\/bin\/clickhouse\n4. MemoryTracker::allocImpl(long, bool, MemoryTracker*, double) @ 0x000000000c816389 in \/usr\/bin\/clickhouse\n5. MemoryTracker::allocImpl(long, bool, MemoryTracker*, double) @ 0x000000000c816389 in \/usr\/bin\/clickhouse\n6. Allocator<false, false>::alloc(unsigned long, unsigned long) @ 0x000000000c7d560d in \/usr\/bin\/clickhouse\n7. void DB::PODArrayBase<8ul, 4096ul, Allocator<false, false>, 63ul, 64ul>::resize<>(unsigned long) @ 0x0000000007220183 in \/usr\/bin\/clickhouse\n8. DB:SerializationArray:deserializeBinaryBulkWithMultipleStreams(COW<DB:IColumn&gt;immutable ptr&lt;DB:IColumn>&, unsigned long, DB:ISerialization:DeserializeBinaryBulkSettings&, std:shared ptr&lt;DBISerialization:DeserializeBinaryBulkState>&, std::unordered_map<String, COW<DB:IColumn&gt;immutable ptr&lt;DB:IColumn>, std::hash<String>, std::equal_to<String>, std:allocator&lt;std:pair<String const, COW<DB:IColumn&gt;immutable ptr&lt;DB:IColumn>>>>*) const @ 0x00000000108b9585 in \/usr\/bin\/clickhouse\n9. DB:MergeTreeReaderWide:readRows(unsigned long, unsigned long, bool, unsigned long, std:vector&lt;COW&lt;DBIColumn&gt;immutable ptr&lt;DB:IColumn>, std:allocator&lt;COW&lt;DBIColumn&gt;immutable ptr&lt;DB:IColumn>>>&) @ 0x000000001251ab34 in \/usr\/bin\/clickhouse\n10. DB:MergeTreeSequentialSource:generate() @ 0x000000001251ca4d in \/usr\/bin\/clickhouse\n11. DB:ISource:tryGenerate() @ 0x000000001297acf5 in \/usr\/bin\/clickhouse\n12. DB:ISource:work() @ 0x000000001297a743 in \/usr\/bin\/clickhouse\n13. DB:ExecutionThreadContext:executeTask() @ 0x000000001299371a in \/usr\/bin\/clickhouse\n14. DB:PipelineExecutor:executeStepImpl(unsigned long, std::atomic<bool>*) @ 0x000000001298a170 in \/usr\/bin\/clickhouse\n15. DB:PipelineExecutor:executeStep(std::atomic<bool>*) @ 0x0000000012989928 in \/usr\/bin\/clickhouse\n16. DB:PullingPipelineExecutor:pull(DB::Chunk&) @ 0x0000000012998017 in \/usr\/bin\/clickhouse\n17. DB:PullingPipelineExecutor:pull(DB::Block&) @ 0x00000000129981d3 in \/usr\/bin\/clickhouse\n18. DB:MergeTaskExecuteAndFinalizeHorizontalPart:executeImpl() @ 0x000000001233b6f2 in \/usr\/bin\/clickhouse\n19. DB:MergeTaskExecuteAndFinalizeHorizontalPart:execute() @ 0x000000001233b64b in \/usr\/bin\/clickhouse\n20. DB:MergeTask:execute() @ 0x0000000012340d99 in \/usr\/bin\/clickhouse\n21. DB:MergePlainMergeTreeTask:executeStep() @ 0x0000000012723517 in \/usr\/bin\/clickhouse\n22. DB:MergeTreeBackgroundExecutor&lt;DBDynamicRuntimeQueue&gt;:threadFunction() @ 0x00000000123532c4 in \/usr\/bin\/clickhouse\n23. ThreadPoolImpl<ThreadFromGlobalPoolImpl<false>>::worker(std::__list_iterator<ThreadFromGlobalPoolImpl<false>, void*>) @ 0x000000000c8eb0c1 in \/usr\/bin\/clickhouse\n24. void std: function:__policy_invoker<void ()>: call impl&lt;std function default alloc func&lt;ThreadFromGlobalPoolImpl&lt;false&gt;:ThreadFromGlobalPoolImpl<void ThreadPoolImpl<ThreadFromGlobalPoolImpl<false>>::scheduleImpl<void>(std::function<void ()>, Priority, std::optional<unsigned long>, bool)::'lambda0'()>(void&&)::'lambda'(), void ()>>(std: function:__policy_storage const*) @ 0x000000000c8ee8fa in \/usr\/bin\/clickhouse\n25. void* std: thread proxy[abiv15000]<std:tuple&lt;stdunique ptr&lt;std:__thread_struct, std:default delete&lt;std:__thread_struct>>, void ThreadPoolImpl<std:thread&gt;:scheduleImpl<void>(std::function<void ()>, Priority, std::optional<unsigned long>, bool)::'lambda0'()>>(void*) @ 0x000000000c8ed6fe in \/usr\/bin\/clickhouse\n26. ? @ 0x00007f2bbbadb609\n27. ? @ 0x00007f2bbba00353\n (version 24.1.2.5 (official build))","source_file":"src\/Common\/Exception.cpp; void DB::tryLogCurrentExceptionImpl(Poco::Logger *, const std::string &)","source_line":"222"}
{"date_time":"1736745070.040745","thread_name":"","thread_id":"592","level":"Error","query_id":"","logger_name":"MergeTreeBackgroundExecutor","message":"Exception while executing background task {7165cb86-e3b3-4832-9778-cf6a91b2273c::20250111_18664_18732_1}: Code: 241. DB::Exception: Memory limit (total) exceeded: would use 6.83 GiB (attempt to allocate chunk of 4299599 bytes), maximum: 6.79 GiB. OvercommitTracker decision: Memory overcommit isn't used. Waiting time or overcommit denominator are set to zero.: (while reading column attributes_string): (while reading from part \/var\/lib\/clickhouse\/store\/716\/7165cb86-e3b3-4832-9778-cf6a91b2273c\/20250111_18669_18669_0\/ in table signoz_logs.logs_v2 (7165cb86-e3b3-4832-9778-cf6a91b2273c) located on disk default of type local, from mark 0 with max_rows_to_read = 6526): While executing MergeTreeSequentialSource. (MEMORY_LIMIT_EXCEEDED), Stack trace (when copying this message, always include the lines below):\n\n0. DB::Exception::Exception(DB::Exception::MessageMasked&&, int, bool) @ 0x000000000c800f1b in \/usr\/bin\/clickhouse\n1. DB::Exception::Exception<char const*, char const*, String, long&, String, char const*, std::basic_string_view<char, std::char_traits<char>>>(int, FormatStringHelperImpl<std::type_identity<char const*>::type, std::type_identity<char const*>::type, std::type_identity<String>::type, std::type_identity<long&>::type, std::type_identity<String>::type, std::type_identity<char const*>::type, std::type_identity<std::basic_string_view<char, std::char_traits<char>>>::type>, char const*&&, char const*&&, String&&, long&, String&&, char const*&&, std::basic_string_view<char, std::char_traits<char>>&&) @ 0x000000000c816d0a in \/usr\/bin\/clickhouse\n2. MemoryTracker::allocImpl(long, bool, MemoryTracker*, double) @ 0x000000000c816948 in \/usr\/bin\/clickhouse\n3. MemoryTracker::allocImpl(long, bool, MemoryTracker*, double) @ 0x000000000c816389 in \/usr\/bin\/clickhouse\n4. MemoryTracker::allocImpl(long, bool, MemoryTracker*, double) @ 0x000000000c816389 in \/usr\/bin\/clickhouse\n5. MemoryTracker::allocImpl(long, bool, MemoryTracker*, double) @ 0x000000000c816389 in \/usr\/bin\/clickhouse\n6. Allocator<false, false>::realloc(void*, unsigned long, unsigned long, unsigned long) @ 0x000000000c7d5d87 in \/usr\/bin\/clickhouse\n7. void DB::PODArrayBase<1ul, 4096ul, Allocator<false, false>, 63ul, 64ul>::resize_exact<>(unsigned long) @ 0x0000000007226ba6 in \/usr\/bin\/clickhouse\n8. void DB::deserializeBinarySSE2<1>(DB::PODArray<char8_t, 4096ul, Allocator<false, false>, 63ul, 64ul>&, DB::PODArray<unsigned long, 4096ul, Allocator<false, false>, 63ul, 64ul>&, DB::ReadBuffer&, unsigned long) @ 0x00000000108f8836 in \/usr\/bin\/clickhouse\n9. DB::ISerialization::deserializeBinaryBulkWithMultipleStreams(COW<DB::IColumn>::immutable_ptr<DB::IColumn>&, unsigned long, DB::ISerialization::DeserializeBinaryBulkSettings&, std::shared_ptr<DB::ISerialization::DeserializeBinaryBulkState>&, std::unordered_map<String, COW<DB::IColumn>::immutable_ptr<DB::IColumn>, std::hash<String>, std::equal_to<String>, std::allocator<std::pair<String const, COW<DB::IColumn>::immutable_ptr<DB::IColumn>>>>*) const @ 0x00000000108b01d9 in \/usr\/bin\/clickhouse\n10. DB::SerializationTuple::deserializeBinaryBulkWithMultipleStreams(COW<DB::IColumn>::immutable_ptr<DB::IColumn>&, unsigned long, DB::ISerialization::DeserializeBinaryBulkSettings&, std::shared_ptr<DB::ISerialization::DeserializeBinaryBulkState>&, std::unordered_map<String, COW<DB::IColumn>::immutable_ptr<DB::IColumn>, std::hash<String>, std::equal_to<String>, std::allocator<std::pair<String const, COW<DB::IColumn>::immutable_ptr<DB::IColumn>>>>*) const @ 0x0000000010902f5a in \/usr\/bin\/clickhouse\n11. DB::SerializationArray::deserializeBinaryBulkWithMultipleStreams(COW<DB::IColumn>::immutable_ptr<DB::IColumn>&, unsigned long, DB::ISerialization::DeserializeBinaryBulkSettings&, std::shared_ptr<DB::ISerialization::DeserializeBinaryBulkState>&, std::unordered_map<String, COW<DB::IColumn>::immutable_ptr<DB::IColumn>, std::hash<String>, std::equal_to<String>, std::allocator<std::pair<String const, COW<DB::IColumn>::immutable_ptr<DB::IColumn>>>>*) const @ 0x00000000108b99c8 in \/usr\/bin\/clickhouse\n12. DB::MergeTreeReaderWide::readRows(unsigned long, unsigned long, bool, unsigned long, std::vector<COW<DB::IColumn>::immutable_ptr<DB::IColumn>, std::allocator<COW<DB::IColumn>::immutable_ptr<DB::IColumn>>>&) @ 0x000000001251ab34 in \/usr\/bin\/clickhouse\n13. DB::MergeTreeSequentialSource::generate() @ 0x000000001251ca4d in \/usr\/bin\/clickhouse\n14. DB::ISource::tryGenerate() @ 0x000000001297acf5 in \/usr\/bin\/clickhouse\n15. DB::ISource::work() @ 0x000000001297a743 in \/usr\/bin\/clickhouse\n16. DB::ExecutionThreadContext::executeTask() @ 0x000000001299371a in \/usr\/bin\/clickhouse\n17. DB::PipelineExecutor::executeStepImpl(unsigned long, std::atomic<bool>*) @ 0x000000001298a170 in \/usr\/bin\/clickhouse\n18. DB::PipelineExecutor::executeStep(std::atomic<bool>*) @ 0x0000000012989928 in \/usr\/bin\/clickhouse\n19. DB::PullingPipelineExecutor::pull(DB::Chunk&) @ 0x0000000012998017 in \/usr\/bin\/clickhouse\n20. DB::PullingPipelineExecutor::pull(DB::Block&) @ 0x00000000129981d3 in \/usr\/bin\/clickhouse\n21. DB::MergeTask::ExecuteAndFinalizeHorizontalPart::executeImpl() @ 0x000000001233b6f2 in \/usr\/bin\/clickhouse\n22. DB::MergeTask::ExecuteAndFinalizeHorizontalPart::execute() @ 0x000000001233b64b in \/usr\/bin\/clickhouse\n23. DB::MergeTask::execute() @ 0x0000000012340d99 in \/usr\/bin\/clickhouse\n24. DB::MergePlainMergeTreeTask::executeStep() @ 0x0000000012723517 in \/usr\/bin\/clickhouse\n25. DB::MergeTreeBackgroundExecutor<DB::DynamicRuntimeQueue>::threadFunction() @ 0x00000000123532c4 in \/usr\/bin\/clickhouse\n26.
Hi, my current setup looks like this 1. A self hosted signoz cluster running on K8s with helmchat (...
s

Samuel Olowoyeye

about 1 year ago
Hi, my current setup looks like this 1. A self hosted signoz cluster running on K8s with helmchat (signoz/signoz) 2. A k8s cluster where all my work loads are running, its pushing data to the self hosted and its working perfectly fine 3. I tried to onboard another cluster today. but i keep getting this error in agent logs
{"level":"error","ts":1723615085.4947934,"caller":"scraperhelper/scrapercontroller.go:200","msg":"Error scraping metrics","kind":"receiver","name":"hostmetrics","data_type":"metrics","error":"failed to read usage at /hostfs/var/vcap/data/nsx-kube-proxy/rootfs/etc/hosts: permission denied; failed to read usage at /hostfs/var/vcap/data/nsx-kube-proxy/rootfs/etc/hostname: permission denied; failed to read usage at /hostfs/var/vcap/data/nsx-kube-proxy/rootfs/usr/lib/os-release: permission denied; failed to read usage at /hostfs/var/vcap/data/nsx-kube-proxy/rootfs/var/log/nsx-ujo: permission denied; failed to read usage at /hostfs/var/vcap/data/nsx-kube-proxy/rootfs/etc/nsx-ujo/ncp.ini: permission denied; failed to read usage at /hostfs/var/vcap/data/nsx-kube-proxy/rootfs/etc/nsx-ujo/certs: permission denied; failed to read usage at /hostfs/var/vcap/data/nsx-node-agent/rootfs/etc/hosts: permission denied; failed to read usage at /hostfs/var/vcap/data/nsx-node-agent/rootfs/etc/hostname: permission denied; failed to read usage at /hostfs/var/vcap/data/nsx-node-agent/rootfs/usr/lib/os-release: permission denied; failed to read usage at /hostfs/var/vcap/data/nsx-node-agent/rootfs/var/log/nsx-ujo: permission denied; failed to read usage at /hostfs/var/vcap/data/nsx-node-agent/rootfs/etc/nsx-ujo/ncp.ini: permission denied; failed to read usage at /hostfs/var/vcap/data/nsx-node-agent/rootfs/etc/nsx-ujo/certs: permission denied; failed to read usage at /hostfs/var/vcap/data/nsx-node-agent/rootfs/var/vcap/data/garden-cni/container-netns: permission denied","scraper":"filesystem","stacktrace":"<http://go.opentelemetry.io/collector/receiver/scraperhelper.(*controller).scrapeMetricsAndReport|go.opentelemetry.io/collector/receiver/scraperhelper.(*controller).scrapeMetricsAndReport>\n\<http://tgo.opentelemetry.io/collector/receiver@v0.88.0/scraperhelper/scrapercontroller.go:200|tgo.opentelemetry.io/collector/receiver@v0.88.0/scraperhelper/scrapercontroller.go:200>\<http://ngo.opentelemetry.io/collector/receiver/scraperhelper.(*controller).startScraping.func1|ngo.opentelemetry.io/collector/receiver/scraperhelper.(*controller).startScraping.func1>\n\<http://tgo.opentelemetry.io/collector/receiver@v0.88.0/scraperhelper/scrapercontroller.go:176|tgo.opentelemetry.io/collector/receiver@v0.88.0/scraperhelper/scrapercontroller.go:176>"}
4. Then i noticed that deployment.envrionmental stopped working too, while checking through, i decided to downgrade the helm chart to version 0.11.7
helm upgrade --namespace=platform my-release signoz/k8s-infra -f vgfacematch2.yaml --version 0.11.7
everything now seem to work. what could be the issue here? how do i get to use the latest version 0.11.9. i could not find any documentation maybe i'm missing a step
Hi , i tried signoz self-hosted (docker), im using nestjs, but i cant see any thing in my signoz. //...
a

Adel

over 1 year ago
Hi , i tried signoz self-hosted (docker), im using nestjs, but i cant see any thing in my signoz. // tracer.ts 'use strict'; import { getNodeAutoInstrumentations } from '@opentelemetry/auto-instrumentations-node'; import { OTLPTraceExporter } from '@opentelemetry/exporter-trace-otlp-http'; import { Resource } from '@opentelemetry/resources'; import { formattedLogger } from './utils/formatedLogger'; import { SemanticResourceAttributes } from '@opentelemetry/semantic-conventions'; import { ConfigService } from '@nestjs/config'; import { NodeSDK } from '@opentelemetry/sdk-node'; const logger = formattedLogger('Signoz'); /* const configService = new ConfigService(); const TRACE_ENABLED = configService.getOrThrow<boolean>('TRACE_ENABLED'); logger.debug('TRACE_ENABLED: ' + TRACE_ENABLED); if (!TRACE_ENABLED) { return; } */ // Configure the SDK to export telemetry data to SigNoz const exporterOptions = { url: 'http://adelpro.duckdns.org:4318/v1/traces', }; const traceExporter = new OTLPTraceExporter(exporterOptions); const sdk = new NodeSDK({ traceExporter, instrumentations: [ getNodeAutoInstrumentations({ '@opentelemetry/instrumentation-nestjs-core': { enabled: true }, }), ], resource: new Resource({ [SemanticResourceAttributes.SERVICE_NAME]: 'imcoder-backend', }), }); sdk.start(); // Gracefully shut down the SDK on process exit process.on('SIGTERM', () => { sdk .shutdown() .then(() => logger.log('Tracing terminated')) .catch((error) => logger.error('Error terminating tracing: ' + error)) .finally(() => process.exit(0)); }); process.on('SIGINT', () => { sdk .shutdown() .then(() => logger.log('Tracing terminated')) .catch((error) => logger.error('Error terminating tracing:' + error)) .finally(() => process.exit(0)); }); export default sdk; // main.ts ... tracer.start() ...