@juspay/neurolink 9.40.0 → 9.42.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +12 -0
- package/README.md +7 -1
- package/dist/auth/anthropicOAuth.d.ts +18 -3
- package/dist/auth/anthropicOAuth.js +137 -4
- package/dist/auth/providers/firebase.js +5 -1
- package/dist/auth/providers/jwt.js +5 -1
- package/dist/auth/providers/workos.js +5 -1
- package/dist/auth/sessionManager.d.ts +1 -1
- package/dist/auth/sessionManager.js +58 -27
- package/dist/browser/neurolink.min.js +471 -445
- package/dist/cli/commands/mcp.js +3 -0
- package/dist/cli/commands/proxy.d.ts +2 -1
- package/dist/cli/commands/proxy.js +279 -16
- package/dist/cli/commands/task.d.ts +56 -0
- package/dist/cli/commands/task.js +838 -0
- package/dist/cli/factories/commandFactory.d.ts +2 -0
- package/dist/cli/factories/commandFactory.js +38 -0
- package/dist/cli/parser.js +8 -4
- package/dist/client/aiSdkAdapter.js +3 -0
- package/dist/client/streamingClient.js +30 -10
- package/dist/core/modules/GenerationHandler.js +3 -2
- package/dist/core/redisConversationMemoryManager.js +7 -3
- package/dist/evaluation/BatchEvaluator.js +4 -1
- package/dist/evaluation/hooks/observabilityHooks.js +5 -3
- package/dist/evaluation/pipeline/evaluationPipeline.d.ts +3 -2
- package/dist/evaluation/pipeline/evaluationPipeline.js +20 -8
- package/dist/evaluation/pipeline/strategies/batchStrategy.js +6 -3
- package/dist/evaluation/pipeline/strategies/samplingStrategy.js +18 -10
- package/dist/lib/auth/anthropicOAuth.d.ts +18 -3
- package/dist/lib/auth/anthropicOAuth.js +137 -4
- package/dist/lib/auth/providers/firebase.js +5 -1
- package/dist/lib/auth/providers/jwt.js +5 -1
- package/dist/lib/auth/providers/workos.js +5 -1
- package/dist/lib/auth/sessionManager.d.ts +1 -1
- package/dist/lib/auth/sessionManager.js +58 -27
- package/dist/lib/client/aiSdkAdapter.js +3 -0
- package/dist/lib/client/streamingClient.js +30 -10
- package/dist/lib/core/modules/GenerationHandler.js +3 -2
- package/dist/lib/core/redisConversationMemoryManager.js +7 -3
- package/dist/lib/evaluation/BatchEvaluator.js +4 -1
- package/dist/lib/evaluation/hooks/observabilityHooks.js +5 -3
- package/dist/lib/evaluation/pipeline/evaluationPipeline.d.ts +3 -2
- package/dist/lib/evaluation/pipeline/evaluationPipeline.js +20 -8
- package/dist/lib/evaluation/pipeline/strategies/batchStrategy.js +6 -3
- package/dist/lib/evaluation/pipeline/strategies/samplingStrategy.js +18 -10
- package/dist/lib/neurolink.d.ts +18 -1
- package/dist/lib/neurolink.js +367 -484
- package/dist/lib/observability/otelBridge.d.ts +2 -2
- package/dist/lib/observability/otelBridge.js +12 -3
- package/dist/lib/providers/amazonBedrock.js +2 -4
- package/dist/lib/providers/anthropic.d.ts +9 -5
- package/dist/lib/providers/anthropic.js +19 -14
- package/dist/lib/providers/anthropicBaseProvider.d.ts +3 -3
- package/dist/lib/providers/anthropicBaseProvider.js +5 -4
- package/dist/lib/providers/azureOpenai.d.ts +1 -1
- package/dist/lib/providers/azureOpenai.js +5 -4
- package/dist/lib/providers/googleAiStudio.js +30 -1
- package/dist/lib/providers/googleVertex.js +28 -6
- package/dist/lib/providers/huggingFace.d.ts +3 -3
- package/dist/lib/providers/huggingFace.js +6 -8
- package/dist/lib/providers/litellm.js +41 -29
- package/dist/lib/providers/mistral.js +2 -1
- package/dist/lib/providers/ollama.js +80 -23
- package/dist/lib/providers/openAI.js +3 -2
- package/dist/lib/providers/openRouter.js +2 -1
- package/dist/lib/providers/openaiCompatible.d.ts +4 -4
- package/dist/lib/providers/openaiCompatible.js +4 -4
- package/dist/lib/proxy/claudeFormat.d.ts +3 -2
- package/dist/lib/proxy/claudeFormat.js +25 -20
- package/dist/lib/proxy/cloaking/plugins/sessionIdentity.d.ts +2 -6
- package/dist/lib/proxy/cloaking/plugins/sessionIdentity.js +9 -33
- package/dist/lib/proxy/modelRouter.js +3 -0
- package/dist/lib/proxy/oauthFetch.d.ts +1 -1
- package/dist/lib/proxy/oauthFetch.js +65 -72
- package/dist/lib/proxy/proxyConfig.js +44 -24
- package/dist/lib/proxy/proxyEnv.d.ts +19 -0
- package/dist/lib/proxy/proxyEnv.js +73 -0
- package/dist/lib/proxy/proxyFetch.js +50 -4
- package/dist/lib/proxy/proxyTracer.d.ts +133 -0
- package/dist/lib/proxy/proxyTracer.js +645 -0
- package/dist/lib/proxy/rawStreamCapture.d.ts +10 -0
- package/dist/lib/proxy/rawStreamCapture.js +83 -0
- package/dist/lib/proxy/requestLogger.d.ts +32 -5
- package/dist/lib/proxy/requestLogger.js +406 -37
- package/dist/lib/proxy/sseInterceptor.d.ts +97 -0
- package/dist/lib/proxy/sseInterceptor.js +402 -0
- package/dist/lib/proxy/usageStats.d.ts +4 -3
- package/dist/lib/proxy/usageStats.js +25 -12
- package/dist/lib/rag/chunkers/MarkdownChunker.js +13 -5
- package/dist/lib/rag/chunking/markdownChunker.js +15 -6
- package/dist/lib/server/routes/claudeProxyRoutes.d.ts +7 -2
- package/dist/lib/server/routes/claudeProxyRoutes.js +1737 -508
- package/dist/lib/services/server/ai/observability/instrumentation.d.ts +7 -1
- package/dist/lib/services/server/ai/observability/instrumentation.js +240 -40
- package/dist/lib/tasks/backends/bullmqBackend.d.ts +33 -0
- package/dist/lib/tasks/backends/bullmqBackend.js +196 -0
- package/dist/lib/tasks/backends/nodeTimeoutBackend.d.ts +27 -0
- package/dist/lib/tasks/backends/nodeTimeoutBackend.js +141 -0
- package/dist/lib/tasks/backends/taskBackendRegistry.d.ts +31 -0
- package/dist/lib/tasks/backends/taskBackendRegistry.js +66 -0
- package/dist/lib/tasks/errors.d.ts +31 -0
- package/dist/lib/tasks/errors.js +18 -0
- package/dist/lib/tasks/store/fileTaskStore.d.ts +43 -0
- package/dist/lib/tasks/store/fileTaskStore.js +179 -0
- package/dist/lib/tasks/store/redisTaskStore.d.ts +43 -0
- package/dist/lib/tasks/store/redisTaskStore.js +197 -0
- package/dist/lib/tasks/taskExecutor.d.ts +21 -0
- package/dist/lib/tasks/taskExecutor.js +166 -0
- package/dist/lib/tasks/taskManager.d.ts +63 -0
- package/dist/lib/tasks/taskManager.js +426 -0
- package/dist/lib/tasks/tools/taskTools.d.ts +135 -0
- package/dist/lib/tasks/tools/taskTools.js +274 -0
- package/dist/lib/telemetry/index.d.ts +2 -1
- package/dist/lib/telemetry/index.js +2 -1
- package/dist/lib/telemetry/telemetryService.d.ts +3 -0
- package/dist/lib/telemetry/telemetryService.js +65 -5
- package/dist/lib/types/cli.d.ts +10 -0
- package/dist/lib/types/configTypes.d.ts +3 -0
- package/dist/lib/types/generateTypes.d.ts +13 -0
- package/dist/lib/types/index.d.ts +1 -0
- package/dist/lib/types/proxyTypes.d.ts +37 -5
- package/dist/lib/types/streamTypes.d.ts +25 -3
- package/dist/lib/types/taskTypes.d.ts +275 -0
- package/dist/lib/types/taskTypes.js +37 -0
- package/dist/lib/utils/messageBuilder.js +3 -2
- package/dist/lib/utils/providerHealth.d.ts +18 -0
- package/dist/lib/utils/providerHealth.js +240 -9
- package/dist/lib/utils/providerUtils.js +14 -8
- package/dist/lib/utils/toolChoice.d.ts +4 -0
- package/dist/lib/utils/toolChoice.js +7 -0
- package/dist/neurolink.d.ts +18 -1
- package/dist/neurolink.js +367 -484
- package/dist/observability/otelBridge.d.ts +2 -2
- package/dist/observability/otelBridge.js +12 -3
- package/dist/providers/amazonBedrock.js +2 -4
- package/dist/providers/anthropic.d.ts +9 -5
- package/dist/providers/anthropic.js +19 -14
- package/dist/providers/anthropicBaseProvider.d.ts +3 -3
- package/dist/providers/anthropicBaseProvider.js +5 -4
- package/dist/providers/azureOpenai.d.ts +1 -1
- package/dist/providers/azureOpenai.js +5 -4
- package/dist/providers/googleAiStudio.js +30 -1
- package/dist/providers/googleVertex.js +28 -6
- package/dist/providers/huggingFace.d.ts +3 -3
- package/dist/providers/huggingFace.js +6 -7
- package/dist/providers/litellm.js +41 -29
- package/dist/providers/mistral.js +2 -1
- package/dist/providers/ollama.js +80 -23
- package/dist/providers/openAI.js +3 -2
- package/dist/providers/openRouter.js +2 -1
- package/dist/providers/openaiCompatible.d.ts +4 -4
- package/dist/providers/openaiCompatible.js +4 -3
- package/dist/proxy/claudeFormat.d.ts +3 -2
- package/dist/proxy/claudeFormat.js +25 -20
- package/dist/proxy/cloaking/plugins/sessionIdentity.d.ts +2 -6
- package/dist/proxy/cloaking/plugins/sessionIdentity.js +9 -33
- package/dist/proxy/modelRouter.js +3 -0
- package/dist/proxy/oauthFetch.d.ts +1 -1
- package/dist/proxy/oauthFetch.js +65 -72
- package/dist/proxy/proxyConfig.js +44 -24
- package/dist/proxy/proxyEnv.d.ts +19 -0
- package/dist/proxy/proxyEnv.js +72 -0
- package/dist/proxy/proxyFetch.js +50 -4
- package/dist/proxy/proxyTracer.d.ts +133 -0
- package/dist/proxy/proxyTracer.js +644 -0
- package/dist/proxy/rawStreamCapture.d.ts +10 -0
- package/dist/proxy/rawStreamCapture.js +82 -0
- package/dist/proxy/requestLogger.d.ts +32 -5
- package/dist/proxy/requestLogger.js +406 -37
- package/dist/proxy/sseInterceptor.d.ts +97 -0
- package/dist/proxy/sseInterceptor.js +401 -0
- package/dist/proxy/usageStats.d.ts +4 -3
- package/dist/proxy/usageStats.js +25 -12
- package/dist/rag/chunkers/MarkdownChunker.js +13 -5
- package/dist/rag/chunking/markdownChunker.js +15 -6
- package/dist/server/routes/claudeProxyRoutes.d.ts +7 -2
- package/dist/server/routes/claudeProxyRoutes.js +1737 -508
- package/dist/services/server/ai/observability/instrumentation.d.ts +7 -1
- package/dist/services/server/ai/observability/instrumentation.js +240 -40
- package/dist/tasks/backends/bullmqBackend.d.ts +33 -0
- package/dist/tasks/backends/bullmqBackend.js +195 -0
- package/dist/tasks/backends/nodeTimeoutBackend.d.ts +27 -0
- package/dist/tasks/backends/nodeTimeoutBackend.js +140 -0
- package/dist/tasks/backends/taskBackendRegistry.d.ts +31 -0
- package/dist/tasks/backends/taskBackendRegistry.js +65 -0
- package/dist/tasks/errors.d.ts +31 -0
- package/dist/tasks/errors.js +17 -0
- package/dist/tasks/store/fileTaskStore.d.ts +43 -0
- package/dist/tasks/store/fileTaskStore.js +178 -0
- package/dist/tasks/store/redisTaskStore.d.ts +43 -0
- package/dist/tasks/store/redisTaskStore.js +196 -0
- package/dist/tasks/taskExecutor.d.ts +21 -0
- package/dist/tasks/taskExecutor.js +165 -0
- package/dist/tasks/taskManager.d.ts +63 -0
- package/dist/tasks/taskManager.js +425 -0
- package/dist/tasks/tools/taskTools.d.ts +135 -0
- package/dist/tasks/tools/taskTools.js +273 -0
- package/dist/telemetry/index.d.ts +2 -1
- package/dist/telemetry/index.js +2 -1
- package/dist/telemetry/telemetryService.d.ts +3 -0
- package/dist/telemetry/telemetryService.js +65 -5
- package/dist/types/cli.d.ts +10 -0
- package/dist/types/configTypes.d.ts +3 -0
- package/dist/types/generateTypes.d.ts +13 -0
- package/dist/types/index.d.ts +1 -0
- package/dist/types/proxyTypes.d.ts +37 -5
- package/dist/types/streamTypes.d.ts +25 -3
- package/dist/types/taskTypes.d.ts +275 -0
- package/dist/types/taskTypes.js +36 -0
- package/dist/utils/messageBuilder.js +3 -2
- package/dist/utils/providerHealth.d.ts +18 -0
- package/dist/utils/providerHealth.js +240 -9
- package/dist/utils/providerUtils.js +14 -8
- package/dist/utils/toolChoice.d.ts +4 -0
- package/dist/utils/toolChoice.js +6 -0
- package/docs/assets/dashboards/neurolink-proxy-observability-dashboard.json +6609 -0
- package/docs/changelog.md +252 -0
- package/package.json +19 -1
- package/scripts/observability/check-proxy-telemetry.mjs +235 -0
- package/scripts/observability/docker-compose.proxy-observability.yaml +55 -0
- package/scripts/observability/import-openobserve-dashboard.mjs +240 -0
- package/scripts/observability/manage-local-openobserve.sh +184 -0
- package/scripts/observability/otel-collector.proxy-observability.yaml +78 -0
- package/scripts/observability/proxy-observability.env.example +23 -0
|
@@ -8,7 +8,8 @@
|
|
|
8
8
|
*/
|
|
9
9
|
import { LangfuseSpanProcessor } from "@langfuse/otel";
|
|
10
10
|
import { trace } from "@opentelemetry/api";
|
|
11
|
-
import
|
|
11
|
+
import { LoggerProvider } from "@opentelemetry/sdk-logs";
|
|
12
|
+
import { type SpanProcessor } from "@opentelemetry/sdk-trace-base";
|
|
12
13
|
import { NodeTracerProvider } from "@opentelemetry/sdk-trace-node";
|
|
13
14
|
import type { LangfuseConfig } from "../../../../types/observability.js";
|
|
14
15
|
/**
|
|
@@ -104,6 +105,11 @@ export declare function getLangfuseSpanProcessor(): LangfuseSpanProcessor | null
|
|
|
104
105
|
* Get the tracer provider
|
|
105
106
|
*/
|
|
106
107
|
export declare function getTracerProvider(): NodeTracerProvider | null;
|
|
108
|
+
/**
|
|
109
|
+
* Get the logger provider for emitting OTLP log records.
|
|
110
|
+
* Returns null if OTLP is not configured or LoggerProvider was not created.
|
|
111
|
+
*/
|
|
112
|
+
export declare function getLoggerProvider(): LoggerProvider | null;
|
|
107
113
|
/**
|
|
108
114
|
* Check if OpenTelemetry is initialized
|
|
109
115
|
*/
|
|
@@ -7,8 +7,15 @@
|
|
|
7
7
|
* Flow: Vercel AI SDK → OpenTelemetry Spans → LangfuseSpanProcessor → Langfuse Platform
|
|
8
8
|
*/
|
|
9
9
|
import { LangfuseSpanProcessor } from "@langfuse/otel";
|
|
10
|
-
import { trace } from "@opentelemetry/api";
|
|
10
|
+
import { metrics, trace } from "@opentelemetry/api";
|
|
11
|
+
import { W3CTraceContextPropagator } from "@opentelemetry/core";
|
|
12
|
+
import { OTLPLogExporter } from "@opentelemetry/exporter-logs-otlp-http";
|
|
13
|
+
import { OTLPMetricExporter } from "@opentelemetry/exporter-metrics-otlp-http";
|
|
14
|
+
import { OTLPTraceExporter } from "@opentelemetry/exporter-trace-otlp-http";
|
|
11
15
|
import { resourceFromAttributes } from "@opentelemetry/resources";
|
|
16
|
+
import { MeterProvider, PeriodicExportingMetricReader, } from "@opentelemetry/sdk-metrics";
|
|
17
|
+
import { BatchLogRecordProcessor, LoggerProvider, } from "@opentelemetry/sdk-logs";
|
|
18
|
+
import { BatchSpanProcessor, } from "@opentelemetry/sdk-trace-base";
|
|
12
19
|
import { NodeTracerProvider } from "@opentelemetry/sdk-trace-node";
|
|
13
20
|
import { ATTR_SERVICE_NAME, ATTR_SERVICE_VERSION, } from "@opentelemetry/semantic-conventions";
|
|
14
21
|
import { AsyncLocalStorage } from "async_hooks";
|
|
@@ -16,6 +23,8 @@ import { logger } from "../../../../utils/logger.js";
|
|
|
16
23
|
const LOG_PREFIX = "[OpenTelemetry]";
|
|
17
24
|
const contextStorage = new AsyncLocalStorage();
|
|
18
25
|
let tracerProvider = null;
|
|
26
|
+
let meterProvider = null;
|
|
27
|
+
let loggerProvider = null;
|
|
19
28
|
let langfuseProcessor = null;
|
|
20
29
|
let isInitialized = false;
|
|
21
30
|
let isCredentialsValid = false;
|
|
@@ -453,58 +462,179 @@ export function initializeOpenTelemetry(config) {
|
|
|
453
462
|
return;
|
|
454
463
|
}
|
|
455
464
|
}
|
|
456
|
-
|
|
457
|
-
|
|
458
|
-
|
|
465
|
+
const otlpEndpoint = process.env.OTEL_EXPORTER_OTLP_ENDPOINT;
|
|
466
|
+
const langfuseRequested = config?.enabled === true;
|
|
467
|
+
const hasLangfuseCreds = !!config.publicKey && !!config.secretKey;
|
|
468
|
+
// THEN: Check whether we have any standalone observability backend at all.
|
|
469
|
+
if ((!langfuseRequested || !hasLangfuseCreds) && !otlpEndpoint) {
|
|
470
|
+
if (langfuseRequested && !hasLangfuseCreds) {
|
|
471
|
+
logger.warn(`${LOG_PREFIX} Langfuse requested but credentials are missing, and no OTLP endpoint is configured; skipping initialization`, {
|
|
472
|
+
hasPublicKey: !!config.publicKey,
|
|
473
|
+
hasSecretKey: !!config.secretKey,
|
|
474
|
+
});
|
|
475
|
+
}
|
|
476
|
+
else {
|
|
477
|
+
logger.debug(`${LOG_PREFIX} Langfuse disabled and OTLP endpoint missing, skipping initialization`);
|
|
478
|
+
}
|
|
459
479
|
isInitialized = true;
|
|
460
480
|
return;
|
|
461
481
|
}
|
|
462
|
-
|
|
463
|
-
|
|
464
|
-
logger.warn(`${LOG_PREFIX} Langfuse enabled but missing credentials, skipping initialization`, {
|
|
482
|
+
if (langfuseRequested && !hasLangfuseCreds) {
|
|
483
|
+
logger.warn(`${LOG_PREFIX} Langfuse requested but credentials are missing; continuing with OTLP-only telemetry`, {
|
|
465
484
|
hasPublicKey: !!config.publicKey,
|
|
466
485
|
hasSecretKey: !!config.secretKey,
|
|
486
|
+
otlpEnabled: !!otlpEndpoint,
|
|
467
487
|
});
|
|
468
|
-
isInitialized = true;
|
|
469
|
-
isCredentialsValid = false;
|
|
470
|
-
return;
|
|
471
488
|
}
|
|
472
489
|
try {
|
|
473
490
|
currentConfig = config;
|
|
474
|
-
isCredentialsValid =
|
|
475
|
-
// Step 1: Create LangfuseSpanProcessor
|
|
476
|
-
//
|
|
477
|
-
|
|
478
|
-
|
|
479
|
-
|
|
480
|
-
|
|
481
|
-
|
|
482
|
-
|
|
483
|
-
|
|
484
|
-
|
|
485
|
-
|
|
491
|
+
isCredentialsValid = hasLangfuseCreds;
|
|
492
|
+
// Step 1: Create LangfuseSpanProcessor only when Langfuse is explicitly enabled
|
|
493
|
+
// with real credentials. OTLP-only mode is valid and should not construct one.
|
|
494
|
+
if (langfuseRequested && hasLangfuseCreds) {
|
|
495
|
+
// shouldExportSpan: export all spans (v5 default filters to gen_ai spans only)
|
|
496
|
+
langfuseProcessor = new LangfuseSpanProcessor({
|
|
497
|
+
publicKey: config.publicKey,
|
|
498
|
+
secretKey: config.secretKey,
|
|
499
|
+
baseUrl: config.baseUrl || "https://cloud.langfuse.com",
|
|
500
|
+
environment: config.environment || "dev",
|
|
501
|
+
release: config.release || "v1.0.0",
|
|
502
|
+
shouldExportSpan: () => true,
|
|
503
|
+
});
|
|
504
|
+
}
|
|
505
|
+
else {
|
|
506
|
+
langfuseProcessor = null;
|
|
507
|
+
}
|
|
508
|
+
logger.debug(`${LOG_PREFIX} Standalone observability mode`, {
|
|
509
|
+
langfuseEnabled: !!langfuseProcessor,
|
|
510
|
+
otlpEnabled: !!otlpEndpoint,
|
|
486
511
|
baseUrl: config.baseUrl || "https://cloud.langfuse.com",
|
|
487
512
|
environment: config.environment || "dev",
|
|
488
513
|
});
|
|
489
514
|
// Step 2: Create our own TracerProvider (standalone behavior)
|
|
515
|
+
// Use OTEL_SERVICE_NAME env var if available, otherwise "neurolink"
|
|
516
|
+
const serviceName = process.env.OTEL_SERVICE_NAME || "neurolink";
|
|
490
517
|
const resource = resourceFromAttributes({
|
|
491
|
-
[ATTR_SERVICE_NAME]:
|
|
518
|
+
[ATTR_SERVICE_NAME]: serviceName,
|
|
492
519
|
[ATTR_SERVICE_VERSION]: config.release || "v1.0.0",
|
|
493
520
|
"deployment.environment": config.environment || "dev",
|
|
494
521
|
});
|
|
522
|
+
// Build span processor list
|
|
523
|
+
const spanProcessors = [new ContextEnricher()];
|
|
524
|
+
if (langfuseProcessor) {
|
|
525
|
+
spanProcessors.push(langfuseProcessor);
|
|
526
|
+
}
|
|
527
|
+
// Step 2b: If OTEL_EXPORTER_OTLP_ENDPOINT is set, also export via OTLP HTTP
|
|
528
|
+
// This allows sending traces to an OpenTelemetry Collector (e.g. for OpenObserve)
|
|
529
|
+
if (otlpEndpoint) {
|
|
530
|
+
try {
|
|
531
|
+
const otlpExporter = new OTLPTraceExporter({
|
|
532
|
+
url: `${otlpEndpoint}/v1/traces`,
|
|
533
|
+
});
|
|
534
|
+
const otlpBatchProcessor = new BatchSpanProcessor(otlpExporter, {
|
|
535
|
+
maxQueueSize: 2048,
|
|
536
|
+
maxExportBatchSize: 512,
|
|
537
|
+
scheduledDelayMillis: 1000,
|
|
538
|
+
exportTimeoutMillis: 30000,
|
|
539
|
+
});
|
|
540
|
+
spanProcessors.push(otlpBatchProcessor);
|
|
541
|
+
logger.info(`${LOG_PREFIX} OTLP trace exporter added`, {
|
|
542
|
+
endpoint: `${otlpEndpoint}/v1/traces`,
|
|
543
|
+
serviceName,
|
|
544
|
+
});
|
|
545
|
+
}
|
|
546
|
+
catch (otlpError) {
|
|
547
|
+
logger.warn(`${LOG_PREFIX} Failed to create OTLP exporter (non-fatal)`, {
|
|
548
|
+
error: otlpError instanceof Error
|
|
549
|
+
? otlpError.message
|
|
550
|
+
: String(otlpError),
|
|
551
|
+
endpoint: otlpEndpoint,
|
|
552
|
+
});
|
|
553
|
+
}
|
|
554
|
+
}
|
|
495
555
|
tracerProvider = new NodeTracerProvider({
|
|
496
556
|
resource,
|
|
497
|
-
spanProcessors
|
|
557
|
+
spanProcessors,
|
|
558
|
+
});
|
|
559
|
+
// Step 4: Register globally with explicit W3C propagator
|
|
560
|
+
// This ensures traceparent headers from calling SDKs are extracted correctly,
|
|
561
|
+
// even if another library registers a no-op propagator before us.
|
|
562
|
+
tracerProvider.register({
|
|
563
|
+
propagator: new W3CTraceContextPropagator(),
|
|
498
564
|
});
|
|
499
|
-
// Step 4: Register globally
|
|
500
|
-
tracerProvider.register();
|
|
501
565
|
usingExternalProvider = false;
|
|
502
566
|
isInitialized = true;
|
|
503
|
-
|
|
567
|
+
// Step 5: If OTLP endpoint is set, also set up MeterProvider for metrics export
|
|
568
|
+
// This enables TelemetryService's metrics.getMeter() instruments to export via OTLP
|
|
569
|
+
if (otlpEndpoint) {
|
|
570
|
+
try {
|
|
571
|
+
const metricExporter = new OTLPMetricExporter({
|
|
572
|
+
url: `${otlpEndpoint}/v1/metrics`,
|
|
573
|
+
});
|
|
574
|
+
const metricReader = new PeriodicExportingMetricReader({
|
|
575
|
+
exporter: metricExporter,
|
|
576
|
+
exportIntervalMillis: 15000, // Export every 15 seconds
|
|
577
|
+
exportTimeoutMillis: 10000,
|
|
578
|
+
});
|
|
579
|
+
meterProvider = new MeterProvider({
|
|
580
|
+
resource,
|
|
581
|
+
readers: [metricReader],
|
|
582
|
+
});
|
|
583
|
+
// Register globally so TelemetryService's metrics.getMeter() picks it up
|
|
584
|
+
metrics.setGlobalMeterProvider(meterProvider);
|
|
585
|
+
logger.info(`${LOG_PREFIX} OTLP metric exporter added — MeterProvider registered globally`, {
|
|
586
|
+
endpoint: `${otlpEndpoint}/v1/metrics`,
|
|
587
|
+
exportIntervalMs: 15000,
|
|
588
|
+
serviceName,
|
|
589
|
+
meterProviderType: meterProvider.constructor.name,
|
|
590
|
+
});
|
|
591
|
+
}
|
|
592
|
+
catch (metricsError) {
|
|
593
|
+
logger.warn(`${LOG_PREFIX} Failed to create OTLP metric exporter (non-fatal)`, {
|
|
594
|
+
error: metricsError instanceof Error
|
|
595
|
+
? metricsError.message
|
|
596
|
+
: String(metricsError),
|
|
597
|
+
endpoint: otlpEndpoint,
|
|
598
|
+
});
|
|
599
|
+
}
|
|
600
|
+
// Step 6: Set up LoggerProvider for OTLP log export
|
|
601
|
+
// This enables logRequest() to emit structured log records via OTLP
|
|
602
|
+
try {
|
|
603
|
+
const logExporter = new OTLPLogExporter({
|
|
604
|
+
url: `${otlpEndpoint}/v1/logs`,
|
|
605
|
+
});
|
|
606
|
+
const logProcessor = new BatchLogRecordProcessor(logExporter, {
|
|
607
|
+
maxQueueSize: 2048,
|
|
608
|
+
maxExportBatchSize: 512,
|
|
609
|
+
scheduledDelayMillis: 2000,
|
|
610
|
+
exportTimeoutMillis: 30000,
|
|
611
|
+
});
|
|
612
|
+
loggerProvider = new LoggerProvider({
|
|
613
|
+
resource,
|
|
614
|
+
processors: [logProcessor],
|
|
615
|
+
});
|
|
616
|
+
logger.info(`${LOG_PREFIX} OTLP log exporter added — LoggerProvider created`, {
|
|
617
|
+
endpoint: `${otlpEndpoint}/v1/logs`,
|
|
618
|
+
serviceName,
|
|
619
|
+
});
|
|
620
|
+
}
|
|
621
|
+
catch (logsError) {
|
|
622
|
+
logger.warn(`${LOG_PREFIX} Failed to create OTLP log exporter (non-fatal)`, {
|
|
623
|
+
error: logsError instanceof Error
|
|
624
|
+
? logsError.message
|
|
625
|
+
: String(logsError),
|
|
626
|
+
endpoint: otlpEndpoint,
|
|
627
|
+
});
|
|
628
|
+
}
|
|
629
|
+
}
|
|
630
|
+
logger.info(`${LOG_PREFIX} Observability initialized`, {
|
|
504
631
|
baseUrl: config.baseUrl || "https://cloud.langfuse.com",
|
|
505
632
|
environment: config.environment || "dev",
|
|
506
633
|
release: config.release || "v1.0.0",
|
|
507
634
|
mode: "standalone",
|
|
635
|
+
langfuseEnabled: !!langfuseProcessor,
|
|
636
|
+
otlpEnabled: !!otlpEndpoint,
|
|
637
|
+
serviceName,
|
|
508
638
|
});
|
|
509
639
|
}
|
|
510
640
|
catch (error) {
|
|
@@ -540,22 +670,75 @@ export async function flushOpenTelemetry() {
|
|
|
540
670
|
logger.debug(`${LOG_PREFIX} Not initialized, skipping flush`);
|
|
541
671
|
return;
|
|
542
672
|
}
|
|
543
|
-
|
|
544
|
-
|
|
545
|
-
|
|
673
|
+
const failures = [];
|
|
674
|
+
if (langfuseProcessor) {
|
|
675
|
+
try {
|
|
676
|
+
logger.info(`${LOG_PREFIX} Flushing Langfuse spans...`);
|
|
677
|
+
await langfuseProcessor.forceFlush();
|
|
678
|
+
}
|
|
679
|
+
catch (error) {
|
|
680
|
+
failures.push({ signal: "langfuse", error });
|
|
681
|
+
logger.error(`${LOG_PREFIX} Langfuse flush failed`, {
|
|
682
|
+
error: error instanceof Error ? error.message : String(error),
|
|
683
|
+
stack: error instanceof Error ? error.stack : undefined,
|
|
684
|
+
});
|
|
685
|
+
}
|
|
546
686
|
}
|
|
547
|
-
|
|
548
|
-
logger.
|
|
549
|
-
await langfuseProcessor.forceFlush();
|
|
550
|
-
logger.info(`${LOG_PREFIX} Successfully flushed spans to Langfuse`);
|
|
687
|
+
else {
|
|
688
|
+
logger.debug(`${LOG_PREFIX} Langfuse disabled, skipping Langfuse flush`);
|
|
551
689
|
}
|
|
552
|
-
|
|
553
|
-
|
|
554
|
-
|
|
555
|
-
|
|
556
|
-
}
|
|
557
|
-
|
|
690
|
+
if (tracerProvider && !usingExternalProvider) {
|
|
691
|
+
try {
|
|
692
|
+
logger.info(`${LOG_PREFIX} Flushing OTLP traces...`);
|
|
693
|
+
await tracerProvider.forceFlush();
|
|
694
|
+
}
|
|
695
|
+
catch (error) {
|
|
696
|
+
failures.push({ signal: "traces", error });
|
|
697
|
+
logger.error(`${LOG_PREFIX} Trace flush failed`, {
|
|
698
|
+
error: error instanceof Error ? error.message : String(error),
|
|
699
|
+
stack: error instanceof Error ? error.stack : undefined,
|
|
700
|
+
});
|
|
701
|
+
}
|
|
702
|
+
}
|
|
703
|
+
else {
|
|
704
|
+
logger.debug(`${LOG_PREFIX} No TracerProvider to flush`);
|
|
558
705
|
}
|
|
706
|
+
if (meterProvider) {
|
|
707
|
+
try {
|
|
708
|
+
logger.info(`${LOG_PREFIX} Flushing OTLP metrics...`);
|
|
709
|
+
await meterProvider.forceFlush();
|
|
710
|
+
}
|
|
711
|
+
catch (error) {
|
|
712
|
+
failures.push({ signal: "metrics", error });
|
|
713
|
+
logger.error(`${LOG_PREFIX} Metric flush failed`, {
|
|
714
|
+
error: error instanceof Error ? error.message : String(error),
|
|
715
|
+
stack: error instanceof Error ? error.stack : undefined,
|
|
716
|
+
});
|
|
717
|
+
}
|
|
718
|
+
}
|
|
719
|
+
else {
|
|
720
|
+
logger.debug(`${LOG_PREFIX} No MeterProvider to flush`);
|
|
721
|
+
}
|
|
722
|
+
if (loggerProvider) {
|
|
723
|
+
try {
|
|
724
|
+
logger.info(`${LOG_PREFIX} Flushing OTLP logs...`);
|
|
725
|
+
await loggerProvider.forceFlush();
|
|
726
|
+
}
|
|
727
|
+
catch (error) {
|
|
728
|
+
failures.push({ signal: "logs", error });
|
|
729
|
+
logger.error(`${LOG_PREFIX} Log flush failed`, {
|
|
730
|
+
error: error instanceof Error ? error.message : String(error),
|
|
731
|
+
stack: error instanceof Error ? error.stack : undefined,
|
|
732
|
+
});
|
|
733
|
+
}
|
|
734
|
+
}
|
|
735
|
+
else {
|
|
736
|
+
logger.debug(`${LOG_PREFIX} No LoggerProvider to flush`);
|
|
737
|
+
}
|
|
738
|
+
if (failures.length > 0) {
|
|
739
|
+
throw new Error(`${LOG_PREFIX} Flush failed for: ${failures.map((f) => f.signal).join(", ")}`);
|
|
740
|
+
}
|
|
741
|
+
logger.info(`${LOG_PREFIX} Flush complete`);
|
|
559
742
|
}
|
|
560
743
|
/**
|
|
561
744
|
* Shutdown OpenTelemetry and Langfuse span processor
|
|
@@ -577,7 +760,17 @@ export async function shutdownOpenTelemetry() {
|
|
|
577
760
|
if (cachedContextEnricher) {
|
|
578
761
|
await cachedContextEnricher.shutdown();
|
|
579
762
|
}
|
|
763
|
+
// Shutdown MeterProvider if we created it
|
|
764
|
+
if (meterProvider) {
|
|
765
|
+
await meterProvider.shutdown();
|
|
766
|
+
}
|
|
767
|
+
// Shutdown LoggerProvider if we created it
|
|
768
|
+
if (loggerProvider) {
|
|
769
|
+
await loggerProvider.shutdown();
|
|
770
|
+
}
|
|
580
771
|
tracerProvider = null;
|
|
772
|
+
meterProvider = null;
|
|
773
|
+
loggerProvider = null;
|
|
581
774
|
langfuseProcessor = null;
|
|
582
775
|
cachedContextEnricher = null;
|
|
583
776
|
isInitialized = false;
|
|
@@ -603,6 +796,13 @@ export function getLangfuseSpanProcessor() {
|
|
|
603
796
|
export function getTracerProvider() {
|
|
604
797
|
return tracerProvider;
|
|
605
798
|
}
|
|
799
|
+
/**
|
|
800
|
+
* Get the logger provider for emitting OTLP log records.
|
|
801
|
+
* Returns null if OTLP is not configured or LoggerProvider was not created.
|
|
802
|
+
*/
|
|
803
|
+
export function getLoggerProvider() {
|
|
804
|
+
return loggerProvider;
|
|
805
|
+
}
|
|
606
806
|
/**
|
|
607
807
|
* Check if OpenTelemetry is initialized
|
|
608
808
|
*/
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* BullMQ Backend — Production-grade task scheduling via Redis.
|
|
3
|
+
*
|
|
4
|
+
* - Cron tasks → BullMQ repeatable jobs with cron pattern
|
|
5
|
+
* - Interval tasks → BullMQ repeatable jobs with `every` option
|
|
6
|
+
* - One-shot tasks → BullMQ delayed jobs
|
|
7
|
+
* - Survives process restarts (Redis-persisted)
|
|
8
|
+
*/
|
|
9
|
+
import { type Task, type TaskBackend, type TaskExecutorFn, type TaskManagerConfig } from "../../types/taskTypes.js";
|
|
10
|
+
export declare class BullMQBackend implements TaskBackend {
|
|
11
|
+
readonly name = "bullmq";
|
|
12
|
+
private queue;
|
|
13
|
+
private worker;
|
|
14
|
+
private executors;
|
|
15
|
+
private config;
|
|
16
|
+
constructor(config: TaskManagerConfig);
|
|
17
|
+
initialize(): Promise<void>;
|
|
18
|
+
shutdown(): Promise<void>;
|
|
19
|
+
schedule(task: Task, executor: TaskExecutorFn): Promise<void>;
|
|
20
|
+
cancel(taskId: string): Promise<void>;
|
|
21
|
+
pause(taskId: string): Promise<void>;
|
|
22
|
+
resume(taskId: string): Promise<void>;
|
|
23
|
+
isHealthy(): Promise<boolean>;
|
|
24
|
+
/**
|
|
25
|
+
* Returns a connection options object for BullMQ / ioredis.
|
|
26
|
+
* When a URL is provided we parse it fully, preserving TLS (`rediss://`),
|
|
27
|
+
* ACL username, password, db index, and any query-string parameters so
|
|
28
|
+
* nothing is silently dropped.
|
|
29
|
+
*/
|
|
30
|
+
private getConnectionConfig;
|
|
31
|
+
private ensureInitialized;
|
|
32
|
+
private getQueue;
|
|
33
|
+
}
|
|
@@ -0,0 +1,195 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* BullMQ Backend — Production-grade task scheduling via Redis.
|
|
3
|
+
*
|
|
4
|
+
* - Cron tasks → BullMQ repeatable jobs with cron pattern
|
|
5
|
+
* - Interval tasks → BullMQ repeatable jobs with `every` option
|
|
6
|
+
* - One-shot tasks → BullMQ delayed jobs
|
|
7
|
+
* - Survives process restarts (Redis-persisted)
|
|
8
|
+
*/
|
|
9
|
+
import { Queue, Worker } from "bullmq";
|
|
10
|
+
import { logger } from "../../utils/logger.js";
|
|
11
|
+
import { TaskError } from "../errors.js";
|
|
12
|
+
import { TASK_DEFAULTS, } from "../../types/taskTypes.js";
|
|
13
|
+
const QUEUE_NAME = "neurolink-tasks";
|
|
14
|
+
export class BullMQBackend {
|
|
15
|
+
name = "bullmq";
|
|
16
|
+
queue = null;
|
|
17
|
+
worker = null;
|
|
18
|
+
executors = new Map();
|
|
19
|
+
config;
|
|
20
|
+
constructor(config) {
|
|
21
|
+
this.config = config;
|
|
22
|
+
}
|
|
23
|
+
async initialize() {
|
|
24
|
+
const connection = this.getConnectionConfig();
|
|
25
|
+
this.queue = new Queue(QUEUE_NAME, { connection });
|
|
26
|
+
this.worker = new Worker(QUEUE_NAME, async (job) => {
|
|
27
|
+
const taskId = job.data.taskId;
|
|
28
|
+
const task = job.data.task;
|
|
29
|
+
const executor = this.executors.get(taskId);
|
|
30
|
+
if (!executor) {
|
|
31
|
+
logger.warn("[BullMQ] No executor found for task", { taskId });
|
|
32
|
+
return;
|
|
33
|
+
}
|
|
34
|
+
logger.info("[BullMQ] Executing task", { taskId, name: task.name });
|
|
35
|
+
const result = await executor(task);
|
|
36
|
+
return result;
|
|
37
|
+
}, {
|
|
38
|
+
connection,
|
|
39
|
+
concurrency: this.config.maxConcurrentRuns ?? TASK_DEFAULTS.maxConcurrentRuns,
|
|
40
|
+
});
|
|
41
|
+
this.worker.on("failed", (job, err) => {
|
|
42
|
+
logger.error("[BullMQ] Job failed", {
|
|
43
|
+
taskId: job?.data?.taskId,
|
|
44
|
+
error: String(err),
|
|
45
|
+
});
|
|
46
|
+
});
|
|
47
|
+
this.worker.on("error", (err) => {
|
|
48
|
+
logger.error("[BullMQ] Worker error", { error: String(err) });
|
|
49
|
+
});
|
|
50
|
+
logger.info("[BullMQ] Backend initialized");
|
|
51
|
+
}
|
|
52
|
+
async shutdown() {
|
|
53
|
+
if (this.worker) {
|
|
54
|
+
await this.worker.close();
|
|
55
|
+
this.worker = null;
|
|
56
|
+
}
|
|
57
|
+
if (this.queue) {
|
|
58
|
+
await this.queue.close();
|
|
59
|
+
this.queue = null;
|
|
60
|
+
}
|
|
61
|
+
this.executors.clear();
|
|
62
|
+
logger.info("[BullMQ] Backend shut down");
|
|
63
|
+
}
|
|
64
|
+
async schedule(task, executor) {
|
|
65
|
+
const queue = this.getQueue();
|
|
66
|
+
this.executors.set(task.id, executor);
|
|
67
|
+
const jobData = { taskId: task.id, task };
|
|
68
|
+
const schedule = task.schedule;
|
|
69
|
+
if (schedule.type === "cron") {
|
|
70
|
+
await queue.upsertJobScheduler(task.id, {
|
|
71
|
+
pattern: schedule.expression,
|
|
72
|
+
...(schedule.timezone ? { tz: schedule.timezone } : {}),
|
|
73
|
+
}, { name: task.name, data: jobData });
|
|
74
|
+
}
|
|
75
|
+
else if (schedule.type === "interval") {
|
|
76
|
+
await queue.upsertJobScheduler(task.id, { every: schedule.every }, { name: task.name, data: jobData });
|
|
77
|
+
}
|
|
78
|
+
else if (schedule.type === "once") {
|
|
79
|
+
const at = typeof schedule.at === "string" ? new Date(schedule.at) : schedule.at;
|
|
80
|
+
const delay = Math.max(0, at.getTime() - Date.now());
|
|
81
|
+
await queue.add(task.name, jobData, {
|
|
82
|
+
jobId: task.id,
|
|
83
|
+
delay,
|
|
84
|
+
});
|
|
85
|
+
}
|
|
86
|
+
logger.info("[BullMQ] Task scheduled", {
|
|
87
|
+
taskId: task.id,
|
|
88
|
+
type: schedule.type,
|
|
89
|
+
});
|
|
90
|
+
}
|
|
91
|
+
async cancel(taskId) {
|
|
92
|
+
const queue = this.getQueue();
|
|
93
|
+
this.executors.delete(taskId);
|
|
94
|
+
// Remove repeatable job scheduler
|
|
95
|
+
try {
|
|
96
|
+
await queue.removeJobScheduler(taskId);
|
|
97
|
+
}
|
|
98
|
+
catch {
|
|
99
|
+
// May not be a repeatable job — try removing by job ID
|
|
100
|
+
}
|
|
101
|
+
// Remove delayed/waiting job
|
|
102
|
+
try {
|
|
103
|
+
const job = await queue.getJob(taskId);
|
|
104
|
+
if (job) {
|
|
105
|
+
await job.remove();
|
|
106
|
+
}
|
|
107
|
+
}
|
|
108
|
+
catch {
|
|
109
|
+
// Job may already be processed/removed
|
|
110
|
+
}
|
|
111
|
+
logger.info("[BullMQ] Task cancelled", { taskId });
|
|
112
|
+
}
|
|
113
|
+
async pause(taskId) {
|
|
114
|
+
// BullMQ doesn't have per-job pause, so we fully cancel the job scheduler
|
|
115
|
+
// and executor. This is intentionally destructive — cancel() removes both
|
|
116
|
+
// the executor from the map and the job/scheduler from Redis.
|
|
117
|
+
//
|
|
118
|
+
// Resume flow (orchestrated by TaskManager):
|
|
119
|
+
// 1. TaskManager.resume() updates task status to "active" in the store
|
|
120
|
+
// 2. TaskManager.resume() calls backend.schedule(task, newExecutor)
|
|
121
|
+
// 3. schedule() re-registers the executor and creates a new job/scheduler
|
|
122
|
+
//
|
|
123
|
+
// Because TaskManager always supplies a fresh executor on schedule(),
|
|
124
|
+
// there is no need to preserve the old executor here.
|
|
125
|
+
await this.cancel(taskId);
|
|
126
|
+
logger.info("[BullMQ] Task paused (cancelled pending jobs; TaskManager will re-schedule on resume)", { taskId });
|
|
127
|
+
}
|
|
128
|
+
async resume(taskId) {
|
|
129
|
+
// No-op: BullMQ resume is handled by TaskManager calling schedule() after
|
|
130
|
+
// this method returns. See TaskManager.resume() which calls:
|
|
131
|
+
// backend.schedule(updatedTask, executor)
|
|
132
|
+
// That call re-registers the executor and creates the job/scheduler in Redis.
|
|
133
|
+
logger.info("[BullMQ] Task resume requested (awaiting re-schedule from TaskManager)", { taskId });
|
|
134
|
+
}
|
|
135
|
+
async isHealthy() {
|
|
136
|
+
if (!this.queue) {
|
|
137
|
+
return false;
|
|
138
|
+
}
|
|
139
|
+
try {
|
|
140
|
+
// Check if the queue can reach Redis
|
|
141
|
+
await this.queue.getJobCounts();
|
|
142
|
+
return true;
|
|
143
|
+
}
|
|
144
|
+
catch {
|
|
145
|
+
return false;
|
|
146
|
+
}
|
|
147
|
+
}
|
|
148
|
+
// ── Internal ──────────────────────────────────────────
|
|
149
|
+
/**
|
|
150
|
+
* Returns a connection options object for BullMQ / ioredis.
|
|
151
|
+
* When a URL is provided we parse it fully, preserving TLS (`rediss://`),
|
|
152
|
+
* ACL username, password, db index, and any query-string parameters so
|
|
153
|
+
* nothing is silently dropped.
|
|
154
|
+
*/
|
|
155
|
+
getConnectionConfig() {
|
|
156
|
+
const redis = this.config.redis ?? {};
|
|
157
|
+
if (redis.url) {
|
|
158
|
+
const parsed = new URL(redis.url);
|
|
159
|
+
const opts = {
|
|
160
|
+
host: parsed.hostname || "localhost",
|
|
161
|
+
port: Number(parsed.port) || 6379,
|
|
162
|
+
db: parsed.pathname ? Number(parsed.pathname.slice(1)) || 0 : 0,
|
|
163
|
+
};
|
|
164
|
+
if (parsed.password) {
|
|
165
|
+
opts.password = decodeURIComponent(parsed.password);
|
|
166
|
+
}
|
|
167
|
+
if (parsed.username) {
|
|
168
|
+
opts.username = decodeURIComponent(parsed.username);
|
|
169
|
+
}
|
|
170
|
+
// rediss:// scheme → enable TLS
|
|
171
|
+
if (parsed.protocol === "rediss:") {
|
|
172
|
+
opts.tls = {};
|
|
173
|
+
}
|
|
174
|
+
return opts;
|
|
175
|
+
}
|
|
176
|
+
return {
|
|
177
|
+
host: redis.host ?? TASK_DEFAULTS.redis.host,
|
|
178
|
+
port: redis.port ?? TASK_DEFAULTS.redis.port,
|
|
179
|
+
...(redis.password ? { password: redis.password } : {}),
|
|
180
|
+
db: redis.db ?? 0,
|
|
181
|
+
};
|
|
182
|
+
}
|
|
183
|
+
ensureInitialized() {
|
|
184
|
+
if (!this.queue) {
|
|
185
|
+
throw TaskError.create("BACKEND_NOT_INITIALIZED", "[BullMQ] Backend not initialized. Call initialize() first.");
|
|
186
|
+
}
|
|
187
|
+
}
|
|
188
|
+
getQueue() {
|
|
189
|
+
this.ensureInitialized();
|
|
190
|
+
if (!this.queue) {
|
|
191
|
+
throw TaskError.create("BACKEND_NOT_INITIALIZED", "[BullMQ] Queue is unavailable after initialization.");
|
|
192
|
+
}
|
|
193
|
+
return this.queue;
|
|
194
|
+
}
|
|
195
|
+
}
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* NodeTimeout Backend — Development/zero-dependency task scheduling.
|
|
3
|
+
*
|
|
4
|
+
* - Cron tasks → parsed with `croner`, scheduled via setTimeout chains
|
|
5
|
+
* - Interval tasks → setInterval
|
|
6
|
+
* - One-shot tasks → setTimeout
|
|
7
|
+
* - All timers are in-process — lost on restart
|
|
8
|
+
*/
|
|
9
|
+
import { type Task, type TaskBackend, type TaskExecutorFn, type TaskManagerConfig } from "../../types/taskTypes.js";
|
|
10
|
+
export declare class NodeTimeoutBackend implements TaskBackend {
|
|
11
|
+
readonly name = "node-timeout";
|
|
12
|
+
private scheduled;
|
|
13
|
+
private paused;
|
|
14
|
+
private disposed;
|
|
15
|
+
private activeRuns;
|
|
16
|
+
private maxConcurrentRuns;
|
|
17
|
+
constructor(config: TaskManagerConfig);
|
|
18
|
+
initialize(): Promise<void>;
|
|
19
|
+
shutdown(): Promise<void>;
|
|
20
|
+
schedule(task: Task, executor: TaskExecutorFn): Promise<void>;
|
|
21
|
+
cancel(taskId: string): Promise<void>;
|
|
22
|
+
pause(taskId: string): Promise<void>;
|
|
23
|
+
resume(taskId: string): Promise<void>;
|
|
24
|
+
isHealthy(): Promise<boolean>;
|
|
25
|
+
private executeTask;
|
|
26
|
+
private clearEntry;
|
|
27
|
+
}
|