@zintrust/core 0.1.42 → 0.1.44
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +1 -0
- package/app/Controllers/AuthController.d.ts +10 -0
- package/app/Controllers/AuthController.d.ts.map +1 -0
- package/app/Controllers/AuthController.js +201 -0
- package/app/Controllers/UserController.d.ts +9 -0
- package/app/Controllers/UserController.d.ts.map +1 -0
- package/app/Controllers/UserController.js +8 -0
- package/app/Controllers/UserQueryBuilderController.d.ts +16 -0
- package/app/Controllers/UserQueryBuilderController.d.ts.map +1 -0
- package/app/Controllers/UserQueryBuilderController.js +404 -0
- package/app/Middleware/ProfilerMiddleware.d.ts +12 -0
- package/app/Middleware/ProfilerMiddleware.d.ts.map +1 -0
- package/app/Middleware/ProfilerMiddleware.js +47 -0
- package/app/Middleware/index.d.ts +59 -0
- package/app/Middleware/index.d.ts.map +1 -0
- package/app/Middleware/index.js +215 -0
- package/app/Models/Post.d.ts +14 -0
- package/app/Models/Post.d.ts.map +1 -0
- package/app/Models/Post.js +27 -0
- package/app/Models/User.d.ts +14 -0
- package/app/Models/User.d.ts.map +1 -0
- package/app/Models/User.js +44 -0
- package/app/Schedules/JobTracking.d.ts +3 -0
- package/app/Schedules/JobTracking.d.ts.map +1 -0
- package/app/Schedules/JobTracking.js +13 -0
- package/app/Schedules/index.d.ts +2 -0
- package/app/Schedules/index.d.ts.map +1 -0
- package/app/Schedules/index.js +1 -0
- package/app/Toolkit/Broadcast/sendBroadcast.d.ts +6 -0
- package/app/Toolkit/Broadcast/sendBroadcast.d.ts.map +1 -0
- package/app/Toolkit/Broadcast/sendBroadcast.js +5 -0
- package/app/Toolkit/Mail/sendWelcomeEmail.d.ts +6 -0
- package/app/Toolkit/Mail/sendWelcomeEmail.d.ts.map +1 -0
- package/app/Toolkit/Mail/sendWelcomeEmail.js +20 -0
- package/app/Toolkit/Notification/sendSlackNotification.d.ts +8 -0
- package/app/Toolkit/Notification/sendSlackNotification.d.ts.map +1 -0
- package/app/Toolkit/Notification/sendSlackNotification.js +5 -0
- package/app/Toolkit/Notification/sendSms.d.ts +6 -0
- package/app/Toolkit/Notification/sendSms.d.ts.map +1 -0
- package/app/Toolkit/Notification/sendSms.js +5 -0
- package/app/Types/controller.d.ts +42 -0
- package/app/Types/controller.d.ts.map +1 -0
- package/app/Types/controller.js +1 -0
- package/config/broadcast.d.ts +38 -0
- package/config/broadcast.d.ts.map +1 -0
- package/config/broadcast.js +37 -0
- package/config/cache.d.ts +40 -0
- package/config/cache.d.ts.map +1 -0
- package/config/cache.js +39 -0
- package/config/database.d.ts +58 -0
- package/config/database.d.ts.map +1 -0
- package/config/database.js +65 -0
- package/config/mail.d.ts +51 -0
- package/config/mail.d.ts.map +1 -0
- package/config/mail.js +69 -0
- package/config/middleware.d.ts +11 -0
- package/config/middleware.d.ts.map +1 -0
- package/config/middleware.js +30 -0
- package/config/notification.d.ts +33 -0
- package/config/notification.d.ts.map +1 -0
- package/config/notification.js +33 -0
- package/config/queue.d.ts +55 -0
- package/config/queue.d.ts.map +1 -0
- package/config/queue.js +87 -0
- package/config/storage.d.ts +59 -0
- package/config/storage.d.ts.map +1 -0
- package/config/storage.js +59 -0
- package/config/workers.d.ts +54 -0
- package/config/workers.d.ts.map +1 -0
- package/config/workers.js +83 -0
- package/package.json +12 -5
- package/packages/cloudflare-d1-proxy/src/index.d.ts +48 -0
- package/packages/cloudflare-d1-proxy/src/index.d.ts.map +1 -0
- package/packages/cloudflare-d1-proxy/src/index.js +387 -0
- package/packages/cloudflare-kv-proxy/src/index.d.ts +44 -0
- package/packages/cloudflare-kv-proxy/src/index.d.ts.map +1 -0
- package/packages/cloudflare-kv-proxy/src/index.js +325 -0
- package/packages/queue-monitor/src/QueueMonitoringService.d.ts +35 -0
- package/packages/queue-monitor/src/QueueMonitoringService.d.ts.map +1 -0
- package/packages/queue-monitor/src/QueueMonitoringService.js +194 -0
- package/packages/queue-monitor/src/connection.d.ts +3 -0
- package/packages/queue-monitor/src/connection.d.ts.map +1 -0
- package/packages/queue-monitor/src/connection.js +1 -0
- package/packages/queue-monitor/src/dashboard-ui.d.ts +7 -0
- package/packages/queue-monitor/src/dashboard-ui.d.ts.map +1 -0
- package/packages/queue-monitor/src/dashboard-ui.js +997 -0
- package/packages/queue-monitor/src/driver.d.ts +15 -0
- package/packages/queue-monitor/src/driver.d.ts.map +1 -0
- package/packages/queue-monitor/src/driver.js +115 -0
- package/packages/queue-monitor/src/index.d.ts +71 -0
- package/packages/queue-monitor/src/index.d.ts.map +1 -0
- package/packages/queue-monitor/src/index.js +296 -0
- package/packages/queue-monitor/src/metrics.d.ts +27 -0
- package/packages/queue-monitor/src/metrics.d.ts.map +1 -0
- package/packages/queue-monitor/src/metrics.js +92 -0
- package/packages/queue-monitor/src/worker.d.ts +8 -0
- package/packages/queue-monitor/src/worker.d.ts.map +1 -0
- package/packages/queue-monitor/src/worker.js +35 -0
- package/packages/queue-redis/src/BullMQRedisQueue.d.ts +26 -0
- package/packages/queue-redis/src/BullMQRedisQueue.d.ts.map +1 -0
- package/packages/queue-redis/src/BullMQRedisQueue.js +463 -0
- package/packages/queue-redis/src/HttpQueueDriver.d.ts +18 -0
- package/packages/queue-redis/src/HttpQueueDriver.d.ts.map +1 -0
- package/packages/queue-redis/src/HttpQueueDriver.js +249 -0
- package/packages/queue-redis/src/QueueHttpGateway.d.ts +16 -0
- package/packages/queue-redis/src/QueueHttpGateway.d.ts.map +1 -0
- package/packages/queue-redis/src/QueueHttpGateway.js +217 -0
- package/packages/queue-redis/src/RedisPublishClient.d.ts +14 -0
- package/packages/queue-redis/src/RedisPublishClient.d.ts.map +1 -0
- package/packages/queue-redis/src/RedisPublishClient.js +251 -0
- package/packages/queue-redis/src/index.d.ts +12 -0
- package/packages/queue-redis/src/index.d.ts.map +1 -0
- package/packages/queue-redis/src/index.js +10 -0
- package/packages/queue-redis/src/register.d.ts +6 -0
- package/packages/queue-redis/src/register.d.ts.map +1 -0
- package/packages/queue-redis/src/register.js +21 -0
- package/packages/workers/migrations/20260119100000_create_zintrust_workers_table.d.ts +11 -0
- package/packages/workers/migrations/20260119100000_create_zintrust_workers_table.d.ts.map +1 -0
- package/packages/workers/migrations/20260119100000_create_zintrust_workers_table.js +32 -0
- package/packages/workers/migrations/20260123180000_create_queue_jobs_table.d.ts +11 -0
- package/packages/workers/migrations/20260123180000_create_queue_jobs_table.d.ts.map +1 -0
- package/packages/workers/migrations/20260123180000_create_queue_jobs_table.js +46 -0
- package/packages/workers/migrations/20260213142000_create_zintrust_job_tracking_tables.d.ts +7 -0
- package/packages/workers/migrations/20260213142000_create_zintrust_job_tracking_tables.d.ts.map +1 -0
- package/packages/workers/migrations/20260213142000_create_zintrust_job_tracking_tables.js +44 -0
- package/packages/workers/migrations/20260213183000_expand_zintrust_job_tracking_reliability_tables.d.ts +7 -0
- package/packages/workers/migrations/20260213183000_expand_zintrust_job_tracking_reliability_tables.d.ts.map +1 -0
- package/packages/workers/migrations/20260213183000_expand_zintrust_job_tracking_reliability_tables.js +104 -0
- package/packages/workers/src/AnomalyDetection.d.ts +107 -0
- package/packages/workers/src/AnomalyDetection.d.ts.map +1 -0
- package/packages/workers/src/AnomalyDetection.js +329 -0
- package/packages/workers/src/AutoScaler.d.ts +128 -0
- package/packages/workers/src/AutoScaler.d.ts.map +1 -0
- package/packages/workers/src/AutoScaler.js +425 -0
- package/packages/workers/src/BroadcastWorker.d.ts +24 -0
- package/packages/workers/src/BroadcastWorker.d.ts.map +1 -0
- package/packages/workers/src/BroadcastWorker.js +24 -0
- package/packages/workers/src/CanaryController.d.ts +104 -0
- package/packages/workers/src/CanaryController.d.ts.map +1 -0
- package/packages/workers/src/CanaryController.js +424 -0
- package/packages/workers/src/ChaosEngineering.d.ts +80 -0
- package/packages/workers/src/ChaosEngineering.d.ts.map +1 -0
- package/packages/workers/src/ChaosEngineering.js +229 -0
- package/packages/workers/src/CircuitBreaker.d.ts +107 -0
- package/packages/workers/src/CircuitBreaker.d.ts.map +1 -0
- package/packages/workers/src/CircuitBreaker.js +374 -0
- package/packages/workers/src/ClusterLock.d.ts +91 -0
- package/packages/workers/src/ClusterLock.d.ts.map +1 -0
- package/packages/workers/src/ClusterLock.js +397 -0
- package/packages/workers/src/ComplianceManager.d.ts +178 -0
- package/packages/workers/src/ComplianceManager.d.ts.map +1 -0
- package/packages/workers/src/ComplianceManager.js +556 -0
- package/packages/workers/src/DatacenterOrchestrator.d.ts +134 -0
- package/packages/workers/src/DatacenterOrchestrator.d.ts.map +1 -0
- package/packages/workers/src/DatacenterOrchestrator.js +404 -0
- package/packages/workers/src/DeadLetterQueue.d.ts +123 -0
- package/packages/workers/src/DeadLetterQueue.d.ts.map +1 -0
- package/packages/workers/src/DeadLetterQueue.js +544 -0
- package/packages/workers/src/HealthMonitor.d.ts +43 -0
- package/packages/workers/src/HealthMonitor.d.ts.map +1 -0
- package/packages/workers/src/HealthMonitor.js +312 -0
- package/packages/workers/src/MultiQueueWorker.d.ts +90 -0
- package/packages/workers/src/MultiQueueWorker.d.ts.map +1 -0
- package/packages/workers/src/MultiQueueWorker.js +282 -0
- package/packages/workers/src/NotificationWorker.d.ts +24 -0
- package/packages/workers/src/NotificationWorker.d.ts.map +1 -0
- package/packages/workers/src/NotificationWorker.js +23 -0
- package/packages/workers/src/Observability.d.ts +154 -0
- package/packages/workers/src/Observability.d.ts.map +1 -0
- package/packages/workers/src/Observability.js +538 -0
- package/packages/workers/src/PluginManager.d.ts +124 -0
- package/packages/workers/src/PluginManager.d.ts.map +1 -0
- package/packages/workers/src/PluginManager.js +392 -0
- package/packages/workers/src/PriorityQueue.d.ts +118 -0
- package/packages/workers/src/PriorityQueue.d.ts.map +1 -0
- package/packages/workers/src/PriorityQueue.js +276 -0
- package/packages/workers/src/ResourceMonitor.d.ts +165 -0
- package/packages/workers/src/ResourceMonitor.d.ts.map +1 -0
- package/packages/workers/src/ResourceMonitor.js +632 -0
- package/packages/workers/src/SLAMonitor.d.ts +111 -0
- package/packages/workers/src/SLAMonitor.d.ts.map +1 -0
- package/packages/workers/src/SLAMonitor.js +274 -0
- package/packages/workers/src/WorkerFactory.d.ts +218 -0
- package/packages/workers/src/WorkerFactory.d.ts.map +1 -0
- package/packages/workers/src/WorkerFactory.js +2253 -0
- package/packages/workers/src/WorkerInit.d.ts +86 -0
- package/packages/workers/src/WorkerInit.d.ts.map +1 -0
- package/packages/workers/src/WorkerInit.js +307 -0
- package/packages/workers/src/WorkerMetrics.d.ts +116 -0
- package/packages/workers/src/WorkerMetrics.d.ts.map +1 -0
- package/packages/workers/src/WorkerMetrics.js +570 -0
- package/packages/workers/src/WorkerRegistry.d.ts +152 -0
- package/packages/workers/src/WorkerRegistry.d.ts.map +1 -0
- package/packages/workers/src/WorkerRegistry.js +396 -0
- package/packages/workers/src/WorkerShutdown.d.ts +70 -0
- package/packages/workers/src/WorkerShutdown.d.ts.map +1 -0
- package/packages/workers/src/WorkerShutdown.js +185 -0
- package/packages/workers/src/WorkerVersioning.d.ts +108 -0
- package/packages/workers/src/WorkerVersioning.d.ts.map +1 -0
- package/packages/workers/src/WorkerVersioning.js +300 -0
- package/packages/workers/src/config/workerConfig.d.ts +5 -0
- package/packages/workers/src/config/workerConfig.d.ts.map +1 -0
- package/packages/workers/src/config/workerConfig.js +25 -0
- package/packages/workers/src/createQueueWorker.d.ts +26 -0
- package/packages/workers/src/createQueueWorker.d.ts.map +1 -0
- package/packages/workers/src/createQueueWorker.js +367 -0
- package/packages/workers/src/dashboard/index.d.ts +2 -0
- package/packages/workers/src/dashboard/index.d.ts.map +1 -0
- package/packages/workers/src/dashboard/index.js +1 -0
- package/packages/workers/src/dashboard/types.d.ts +123 -0
- package/packages/workers/src/dashboard/types.d.ts.map +1 -0
- package/packages/workers/src/dashboard/types.js +1 -0
- package/packages/workers/src/dashboard/workers-api.d.ts +5 -0
- package/packages/workers/src/dashboard/workers-api.d.ts.map +1 -0
- package/packages/workers/src/dashboard/workers-api.js +738 -0
- package/packages/workers/src/helper/index.d.ts +6 -0
- package/packages/workers/src/helper/index.d.ts.map +1 -0
- package/packages/workers/src/helper/index.js +10 -0
- package/packages/workers/src/http/WorkerApiController.d.ts +39 -0
- package/packages/workers/src/http/WorkerApiController.d.ts.map +1 -0
- package/packages/workers/src/http/WorkerApiController.js +313 -0
- package/packages/workers/src/http/WorkerController.d.ts +375 -0
- package/packages/workers/src/http/WorkerController.d.ts.map +1 -0
- package/packages/workers/src/http/WorkerController.js +1454 -0
- package/packages/workers/src/http/WorkerMonitoringService.d.ts +12 -0
- package/packages/workers/src/http/WorkerMonitoringService.d.ts.map +1 -0
- package/packages/workers/src/http/WorkerMonitoringService.js +89 -0
- package/packages/workers/src/http/middleware/CustomValidation.d.ts +93 -0
- package/packages/workers/src/http/middleware/CustomValidation.d.ts.map +1 -0
- package/packages/workers/src/http/middleware/CustomValidation.js +270 -0
- package/packages/workers/src/http/middleware/DatacenterValidator.d.ts +4 -0
- package/packages/workers/src/http/middleware/DatacenterValidator.d.ts.map +1 -0
- package/packages/workers/src/http/middleware/DatacenterValidator.js +94 -0
- package/packages/workers/src/http/middleware/EditWorkerValidation.d.ts +8 -0
- package/packages/workers/src/http/middleware/EditWorkerValidation.d.ts.map +1 -0
- package/packages/workers/src/http/middleware/EditWorkerValidation.js +56 -0
- package/packages/workers/src/http/middleware/FeaturesValidator.d.ts +4 -0
- package/packages/workers/src/http/middleware/FeaturesValidator.d.ts.map +1 -0
- package/packages/workers/src/http/middleware/FeaturesValidator.js +60 -0
- package/packages/workers/src/http/middleware/InfrastructureValidator.d.ts +32 -0
- package/packages/workers/src/http/middleware/InfrastructureValidator.d.ts.map +1 -0
- package/packages/workers/src/http/middleware/InfrastructureValidator.js +226 -0
- package/packages/workers/src/http/middleware/OptionsValidator.d.ts +4 -0
- package/packages/workers/src/http/middleware/OptionsValidator.d.ts.map +1 -0
- package/packages/workers/src/http/middleware/OptionsValidator.js +112 -0
- package/packages/workers/src/http/middleware/PayloadSanitizer.d.ts +8 -0
- package/packages/workers/src/http/middleware/PayloadSanitizer.d.ts.map +1 -0
- package/packages/workers/src/http/middleware/PayloadSanitizer.js +42 -0
- package/packages/workers/src/http/middleware/ProcessorPathSanitizer.d.ts +4 -0
- package/packages/workers/src/http/middleware/ProcessorPathSanitizer.d.ts.map +1 -0
- package/packages/workers/src/http/middleware/ProcessorPathSanitizer.js +140 -0
- package/packages/workers/src/http/middleware/QueueNameSanitizer.d.ts +4 -0
- package/packages/workers/src/http/middleware/QueueNameSanitizer.d.ts.map +1 -0
- package/packages/workers/src/http/middleware/QueueNameSanitizer.js +45 -0
- package/packages/workers/src/http/middleware/ValidateDriver.d.ts +8 -0
- package/packages/workers/src/http/middleware/ValidateDriver.d.ts.map +1 -0
- package/packages/workers/src/http/middleware/ValidateDriver.js +20 -0
- package/packages/workers/src/http/middleware/VersionSanitizer.d.ts +4 -0
- package/packages/workers/src/http/middleware/VersionSanitizer.d.ts.map +1 -0
- package/packages/workers/src/http/middleware/VersionSanitizer.js +25 -0
- package/packages/workers/src/http/middleware/WorkerNameSanitizer.d.ts +4 -0
- package/packages/workers/src/http/middleware/WorkerNameSanitizer.d.ts.map +1 -0
- package/packages/workers/src/http/middleware/WorkerNameSanitizer.js +46 -0
- package/packages/workers/src/http/middleware/WorkerValidationChain.d.ts +28 -0
- package/packages/workers/src/http/middleware/WorkerValidationChain.d.ts.map +1 -0
- package/packages/workers/src/http/middleware/WorkerValidationChain.js +186 -0
- package/packages/workers/src/index.d.ts +47 -0
- package/packages/workers/src/index.d.ts.map +1 -0
- package/packages/workers/src/index.js +48 -0
- package/packages/workers/src/routes/workers.d.ts +13 -0
- package/packages/workers/src/routes/workers.d.ts.map +1 -0
- package/packages/workers/src/routes/workers.js +126 -0
- package/packages/workers/src/storage/WorkerStore.d.ts +52 -0
- package/packages/workers/src/storage/WorkerStore.d.ts.map +1 -0
- package/packages/workers/src/storage/WorkerStore.js +259 -0
- package/packages/workers/src/telemetry/api/TelemetryAPI.d.ts +47 -0
- package/packages/workers/src/telemetry/api/TelemetryAPI.d.ts.map +1 -0
- package/packages/workers/src/telemetry/api/TelemetryAPI.js +219 -0
- package/packages/workers/src/telemetry/api/TelemetryMonitoringService.d.ts +18 -0
- package/packages/workers/src/telemetry/api/TelemetryMonitoringService.d.ts.map +1 -0
- package/packages/workers/src/telemetry/api/TelemetryMonitoringService.js +140 -0
- package/packages/workers/src/telemetry/components/AlertPanel.d.ts +2 -0
- package/packages/workers/src/telemetry/components/AlertPanel.d.ts.map +1 -0
- package/packages/workers/src/telemetry/components/AlertPanel.js +13 -0
- package/packages/workers/src/telemetry/components/CostTracking.d.ts +2 -0
- package/packages/workers/src/telemetry/components/CostTracking.d.ts.map +1 -0
- package/packages/workers/src/telemetry/components/CostTracking.js +14 -0
- package/packages/workers/src/telemetry/components/ResourceUsageChart.d.ts +2 -0
- package/packages/workers/src/telemetry/components/ResourceUsageChart.d.ts.map +1 -0
- package/packages/workers/src/telemetry/components/ResourceUsageChart.js +11 -0
- package/packages/workers/src/telemetry/components/WorkerHealthChart.d.ts +2 -0
- package/packages/workers/src/telemetry/components/WorkerHealthChart.d.ts.map +1 -0
- package/packages/workers/src/telemetry/components/WorkerHealthChart.js +11 -0
- package/packages/workers/src/telemetry/index.d.ts +16 -0
- package/packages/workers/src/telemetry/index.d.ts.map +1 -0
- package/packages/workers/src/telemetry/index.js +60 -0
- package/packages/workers/src/telemetry/routes/dashboard.d.ts +7 -0
- package/packages/workers/src/telemetry/routes/dashboard.d.ts.map +1 -0
- package/packages/workers/src/telemetry/routes/dashboard.js +608 -0
- package/packages/workers/src/type.d.ts +77 -0
- package/packages/workers/src/type.d.ts.map +1 -0
- package/packages/workers/src/type.js +1 -0
- package/packages/workers/src/ui/router/EmbeddedAssets.d.ts +5 -0
- package/packages/workers/src/ui/router/EmbeddedAssets.d.ts.map +1 -0
- package/packages/workers/src/ui/router/EmbeddedAssets.js +13 -0
- package/packages/workers/src/ui/router/ui.d.ts +4 -0
- package/packages/workers/src/ui/router/ui.d.ts.map +1 -0
- package/packages/workers/src/ui/router/ui.js +208 -0
- package/packages/workers/src/ui/types/worker-ui.d.ts +230 -0
- package/packages/workers/src/ui/types/worker-ui.d.ts.map +1 -0
- package/packages/workers/src/ui/types/worker-ui.js +5 -0
- package/routes/api.d.ts +7 -0
- package/routes/api.d.ts.map +1 -0
- package/routes/api.js +129 -0
- package/routes/broadcast.d.ts +9 -0
- package/routes/broadcast.d.ts.map +1 -0
- package/routes/broadcast.js +27 -0
- package/routes/storage.d.ts +4 -0
- package/routes/storage.d.ts.map +1 -0
- package/routes/storage.js +35 -0
- package/src/cache/Cache.d.ts.map +1 -1
- package/src/cache/Cache.js +40 -8
- package/src/cache/drivers/KVRemoteDriver.d.ts +1 -1
- package/src/cache/drivers/KVRemoteDriver.d.ts.map +1 -1
- package/src/cache/drivers/KVRemoteDriver.js +259 -44
- package/src/cache/drivers/MemoryDriver.d.ts.map +1 -1
- package/src/cache/drivers/MemoryDriver.js +10 -2
- package/src/cache/drivers/RedisDriver.d.ts.map +1 -1
- package/src/cache/drivers/RedisDriver.js +256 -33
- package/src/cli/CLI.d.ts.map +1 -1
- package/src/cli/CLI.js +6 -0
- package/src/cli/commands/DeployCommand.d.ts.map +1 -1
- package/src/cli/commands/DeployCommand.js +12 -1
- package/src/cli/commands/DeployContainersProxyCommand.d.ts +5 -0
- package/src/cli/commands/DeployContainersProxyCommand.d.ts.map +1 -0
- package/src/cli/commands/DeployContainersProxyCommand.js +45 -0
- package/src/cli/commands/DockerCommand.d.ts +5 -0
- package/src/cli/commands/DockerCommand.d.ts.map +1 -0
- package/src/cli/commands/DockerCommand.js +74 -0
- package/src/cli/commands/InitContainerCommand.js +10 -10
- package/src/cli/commands/InitContainersProxyCommand.d.ts +5 -0
- package/src/cli/commands/InitContainersProxyCommand.d.ts.map +1 -0
- package/src/cli/commands/InitContainersProxyCommand.js +159 -0
- package/src/cli/commands/NewCommand.d.ts.map +1 -1
- package/src/cli/commands/NewCommand.js +33 -0
- package/src/cli/commands/PutCommand.d.ts.map +1 -1
- package/src/cli/commands/PutCommand.js +15 -5
- package/src/cli/commands/StartCommand.d.ts.map +1 -1
- package/src/cli/commands/StartCommand.js +43 -4
- package/src/cli/scaffolding/ProjectScaffolder.d.ts.map +1 -1
- package/src/cli/scaffolding/ProjectScaffolder.js +65 -73
- package/src/cli/utils/DistPackager.d.ts.map +1 -1
- package/src/cli/utils/DistPackager.js +25 -0
- package/src/cli/utils/EnvFileLoader.d.ts +1 -0
- package/src/cli/utils/EnvFileLoader.d.ts.map +1 -1
- package/src/cli/utils/EnvFileLoader.js +14 -0
- package/src/common/RemoteSignedJson.d.ts.map +1 -1
- package/src/common/RemoteSignedJson.js +9 -2
- package/src/config/cache.js +1 -1
- package/src/config/env.d.ts +7 -0
- package/src/config/env.d.ts.map +1 -1
- package/src/config/env.js +8 -0
- package/src/config/queue.js +1 -1
- package/src/functions/cloudflare.d.ts.map +1 -1
- package/src/functions/cloudflare.js +4 -2
- package/src/index.js +3 -3
- package/src/middleware/JwtAuthMiddleware.d.ts.map +1 -1
- package/src/middleware/JwtAuthMiddleware.js +11 -5
- package/src/runtime/RuntimeAdapter.d.ts.map +1 -1
- package/src/runtime/RuntimeAdapter.js +30 -12
- package/src/runtime/adapters/CloudflareAdapter.d.ts.map +1 -1
- package/src/runtime/adapters/CloudflareAdapter.js +15 -4
- package/src/scheduler/Schedule.js +1 -1
- package/src/scheduler/leader/SchedulerLeader.js +1 -1
- package/src/schedules/job-tracking-cleanup.js +1 -1
- package/src/security/TokenRevocation.d.ts +19 -2
- package/src/security/TokenRevocation.d.ts.map +1 -1
- package/src/security/TokenRevocation.js +558 -30
- package/src/templates/project/basic/app/Controllers/AuthController.ts.tpl +11 -3
- package/src/templates/project/basic/config/middleware.ts.tpl +23 -22
- package/src/templates/project/basic/wrangler.jsonc.tpl +28 -0
|
@@ -0,0 +1,2253 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Worker Factory
|
|
3
|
+
* Central factory for creating workers with all advanced features
|
|
4
|
+
* Sealed namespace for immutability
|
|
5
|
+
*/
|
|
6
|
+
import { Cloudflare, createRedisConnection, databaseConfig, Env, ErrorFactory, generateUuid, getBullMQSafeQueueName, JobStateTracker, Logger, NodeSingletons, queueConfig, registerDatabasesFromRuntimeConfig, useEnsureDbConnected, workersConfig, ZintrustLang, } from '../../../src/index.js';
|
|
7
|
+
import { Worker } from 'bullmq';
|
|
8
|
+
import { AutoScaler } from './AutoScaler.js';
|
|
9
|
+
import { CanaryController } from './CanaryController.js';
|
|
10
|
+
import { CircuitBreaker } from './CircuitBreaker.js';
|
|
11
|
+
import { ClusterLock } from './ClusterLock.js';
|
|
12
|
+
import { ComplianceManager } from './ComplianceManager.js';
|
|
13
|
+
import { DatacenterOrchestrator } from './DatacenterOrchestrator.js';
|
|
14
|
+
import { DeadLetterQueue } from './DeadLetterQueue.js';
|
|
15
|
+
import { HealthMonitor } from './HealthMonitor.js';
|
|
16
|
+
import { MultiQueueWorker } from './MultiQueueWorker.js';
|
|
17
|
+
import { Observability } from './Observability.js';
|
|
18
|
+
import { PluginManager } from './PluginManager.js';
|
|
19
|
+
import { PriorityQueue } from './PriorityQueue.js';
|
|
20
|
+
import { ResourceMonitor } from './ResourceMonitor.js';
|
|
21
|
+
import { WorkerMetrics } from './WorkerMetrics.js';
|
|
22
|
+
import { WorkerRegistry } from './WorkerRegistry.js';
|
|
23
|
+
import { WorkerVersioning } from './WorkerVersioning.js';
|
|
24
|
+
import { keyPrefix } from './config/workerConfig.js';
|
|
25
|
+
import { DbWorkerStore, InMemoryWorkerStore, RedisWorkerStore, } from './storage/WorkerStore.js';
|
|
26
|
+
const path = NodeSingletons.path;
|
|
27
|
+
const isNodeRuntime = () => typeof process !== 'undefined' && Boolean(process.versions?.node);
|
|
28
|
+
const resolveProjectRoot = () => {
|
|
29
|
+
const envRoot = Env.get('ZINTRUST_PROJECT_ROOT', '').trim();
|
|
30
|
+
return envRoot.length > 0 ? envRoot : process.cwd();
|
|
31
|
+
};
|
|
32
|
+
const canUseProjectFileImports = () => typeof NodeSingletons?.fs?.writeFileSync === 'function' &&
|
|
33
|
+
typeof NodeSingletons?.fs?.mkdirSync === 'function' &&
|
|
34
|
+
typeof NodeSingletons?.fs?.existsSync === 'function' &&
|
|
35
|
+
typeof NodeSingletons?.url?.pathToFileURL === 'function' &&
|
|
36
|
+
typeof NodeSingletons?.path?.join === 'function';
|
|
37
|
+
const buildCandidatesForSpecifier = (specifier, root) => {
|
|
38
|
+
if (specifier === '@zintrust/core') {
|
|
39
|
+
return [
|
|
40
|
+
path.join(root, 'dist', 'src', 'index.js'),
|
|
41
|
+
path.join(root, 'dist', 'index.js'),
|
|
42
|
+
path.join(root, 'src', 'index.ts'),
|
|
43
|
+
];
|
|
44
|
+
}
|
|
45
|
+
if (specifier === '@zintrust/workers') {
|
|
46
|
+
return [
|
|
47
|
+
path.join(root, 'dist', 'packages', 'workers', 'src', 'index.js'),
|
|
48
|
+
path.join(root, 'packages', 'workers', 'src', 'index.ts'),
|
|
49
|
+
];
|
|
50
|
+
}
|
|
51
|
+
return [];
|
|
52
|
+
};
|
|
53
|
+
const getProjectFileCandidates = (paths) => {
|
|
54
|
+
if (!canUseProjectFileImports())
|
|
55
|
+
return null;
|
|
56
|
+
for (const candidate of paths) {
|
|
57
|
+
if (NodeSingletons.fs.existsSync(candidate))
|
|
58
|
+
return candidate;
|
|
59
|
+
}
|
|
60
|
+
return null;
|
|
61
|
+
};
|
|
62
|
+
const resolveLocalPackageFallback = (specifier) => {
|
|
63
|
+
if (!canUseProjectFileImports())
|
|
64
|
+
return null;
|
|
65
|
+
const root = resolveProjectRoot();
|
|
66
|
+
const candidates = buildCandidatesForSpecifier(specifier, root);
|
|
67
|
+
const resolved = getProjectFileCandidates(candidates);
|
|
68
|
+
if (!resolved)
|
|
69
|
+
return null;
|
|
70
|
+
return NodeSingletons.url.pathToFileURL(resolved).href;
|
|
71
|
+
};
|
|
72
|
+
const resolvePackageSpecifierUrl = (specifier) => {
|
|
73
|
+
if (!isNodeRuntime() || !canUseProjectFileImports())
|
|
74
|
+
return null;
|
|
75
|
+
if (typeof NodeSingletons?.module?.createRequire !== 'function') {
|
|
76
|
+
return resolveLocalPackageFallback(specifier);
|
|
77
|
+
}
|
|
78
|
+
try {
|
|
79
|
+
const require = NodeSingletons.module.createRequire(import.meta.url);
|
|
80
|
+
const resolved = require.resolve(specifier);
|
|
81
|
+
if (specifier === '@zintrust/workers' &&
|
|
82
|
+
resolved.includes(`${path.sep}node_modules${path.sep}@zintrust${path.sep}workers${path.sep}`)) {
|
|
83
|
+
const local = resolveLocalPackageFallback(specifier);
|
|
84
|
+
if (local)
|
|
85
|
+
return local;
|
|
86
|
+
}
|
|
87
|
+
return NodeSingletons.url.pathToFileURL(resolved).href;
|
|
88
|
+
}
|
|
89
|
+
catch {
|
|
90
|
+
return resolveLocalPackageFallback(specifier);
|
|
91
|
+
}
|
|
92
|
+
};
|
|
93
|
+
const escapeRegExp = (value) => value.replaceAll(/[.*+?^${}()|[\]\\]/g, String.raw `\$&`);
|
|
94
|
+
const rewriteProcessorImports = (code) => {
|
|
95
|
+
const replacements = [];
|
|
96
|
+
const coreUrl = resolvePackageSpecifierUrl('@zintrust/core');
|
|
97
|
+
if (coreUrl)
|
|
98
|
+
replacements.push({ from: '@zintrust/core', to: coreUrl });
|
|
99
|
+
const workersUrl = resolvePackageSpecifierUrl('@zintrust/workers');
|
|
100
|
+
if (workersUrl)
|
|
101
|
+
replacements.push({ from: '@zintrust/workers', to: workersUrl });
|
|
102
|
+
if (replacements.length === 0)
|
|
103
|
+
return code;
|
|
104
|
+
let updated = code;
|
|
105
|
+
for (const { from, to } of replacements) {
|
|
106
|
+
const pattern = new RegExp(String.raw `(['"])${escapeRegExp(from)}\1`, 'g');
|
|
107
|
+
updated = updated.replace(pattern, `$1${to}$1`);
|
|
108
|
+
}
|
|
109
|
+
return updated;
|
|
110
|
+
};
|
|
111
|
+
const ensureProcessorSpecDir = () => {
|
|
112
|
+
if (!isNodeRuntime() || !canUseProjectFileImports())
|
|
113
|
+
return null;
|
|
114
|
+
const dir = path.join(resolveProjectRoot(), '.zintrust', 'processor-specs');
|
|
115
|
+
try {
|
|
116
|
+
if (!NodeSingletons.fs.existsSync(dir)) {
|
|
117
|
+
NodeSingletons.fs.mkdirSync(dir, { recursive: true });
|
|
118
|
+
}
|
|
119
|
+
return dir;
|
|
120
|
+
}
|
|
121
|
+
catch (error) {
|
|
122
|
+
Logger.debug('Failed to prepare processor spec cache directory', error);
|
|
123
|
+
return null;
|
|
124
|
+
}
|
|
125
|
+
};
|
|
126
|
+
const shouldFallbackToFileImport = (error) => {
|
|
127
|
+
if (!isNodeRuntime())
|
|
128
|
+
return false;
|
|
129
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
130
|
+
const code = error?.code ?? '';
|
|
131
|
+
if (code === 'ERR_INVALID_URL' || code === 'ERR_UNSUPPORTED_ESM_URL_SCHEME')
|
|
132
|
+
return true;
|
|
133
|
+
return (message.includes('Invalid relative URL') ||
|
|
134
|
+
message.includes('base scheme is not hierarchical') ||
|
|
135
|
+
message.includes('Failed to resolve module specifier'));
|
|
136
|
+
};
|
|
137
|
+
const importModuleFromCode = async (params) => {
|
|
138
|
+
const { code, normalized, cacheKey } = params;
|
|
139
|
+
const dataUrl = `data:text/javascript;base64,${toBase64(code)}`;
|
|
140
|
+
try {
|
|
141
|
+
return (await import(dataUrl));
|
|
142
|
+
}
|
|
143
|
+
catch (error) {
|
|
144
|
+
if (!shouldFallbackToFileImport(error))
|
|
145
|
+
throw error;
|
|
146
|
+
const dir = ensureProcessorSpecDir();
|
|
147
|
+
if (!dir)
|
|
148
|
+
throw error;
|
|
149
|
+
try {
|
|
150
|
+
const codeHash = await computeSha256(code);
|
|
151
|
+
const filePath = path.join(dir, `${codeHash || cacheKey}.mjs`);
|
|
152
|
+
NodeSingletons.fs.writeFileSync(filePath, code, 'utf8');
|
|
153
|
+
const fileUrl = NodeSingletons.url.pathToFileURL(filePath).href;
|
|
154
|
+
return (await import(fileUrl));
|
|
155
|
+
}
|
|
156
|
+
catch (fileError) {
|
|
157
|
+
Logger.debug(`Processor URL file fallback failed for ${normalized}`, fileError);
|
|
158
|
+
throw error;
|
|
159
|
+
}
|
|
160
|
+
}
|
|
161
|
+
};
|
|
162
|
+
const isCloudflareRuntime = () => Cloudflare.getWorkersEnv() !== null;
|
|
163
|
+
const getDefaultStoreForRuntime = async () => {
|
|
164
|
+
if (!isCloudflareRuntime()) {
|
|
165
|
+
await ensureWorkerStoreConfigured();
|
|
166
|
+
return workerStore;
|
|
167
|
+
}
|
|
168
|
+
const bootstrapConfig = buildPersistenceBootstrapConfig();
|
|
169
|
+
const persistence = resolvePersistenceConfig(bootstrapConfig);
|
|
170
|
+
if (!persistence) {
|
|
171
|
+
return InMemoryWorkerStore.create();
|
|
172
|
+
}
|
|
173
|
+
return resolveWorkerStoreForPersistence(persistence);
|
|
174
|
+
};
|
|
175
|
+
const getStoreForWorker = async (config, persistenceOverride) => {
|
|
176
|
+
if (persistenceOverride) {
|
|
177
|
+
return resolveWorkerStoreForPersistence(persistenceOverride);
|
|
178
|
+
}
|
|
179
|
+
// If worker has specific configuration, use it
|
|
180
|
+
if (config) {
|
|
181
|
+
const persistence = resolvePersistenceConfig(config);
|
|
182
|
+
if (persistence) {
|
|
183
|
+
return resolveWorkerStoreForPersistence(persistence);
|
|
184
|
+
}
|
|
185
|
+
}
|
|
186
|
+
// Fallback to default/global store
|
|
187
|
+
return getDefaultStoreForRuntime();
|
|
188
|
+
};
|
|
189
|
+
const validateAndGetStore = async (name, config, persistenceOverride) => {
|
|
190
|
+
const store = await getStoreForWorker(config, persistenceOverride);
|
|
191
|
+
const record = await store.get(name);
|
|
192
|
+
if (!record) {
|
|
193
|
+
throw ErrorFactory.createNotFoundError(`Worker "${name}" not found in the specified driver. Ensure you are addressing the correct storage backend.`);
|
|
194
|
+
}
|
|
195
|
+
return store;
|
|
196
|
+
};
|
|
197
|
+
// Worker creation status enum for proper lifecycle management
|
|
198
|
+
export const WorkerCreationStatus = {
|
|
199
|
+
CREATING: 'creating', // Initial state - worker is being created
|
|
200
|
+
CONNECTING: 'connecting', // Connecting to Redis/Queue
|
|
201
|
+
STARTING: 'starting', // Starting BullMQ worker
|
|
202
|
+
RUNNING: 'running', // Actually processing jobs
|
|
203
|
+
FAILED: 'failed', // Connection/startup failed
|
|
204
|
+
STOPPED: 'stopped', // Intentionally stopped
|
|
205
|
+
};
|
|
206
|
+
// Internal initialization state to prevent memory leaks and redundant calls
|
|
207
|
+
let clusteringInitialized = false;
|
|
208
|
+
let metricsInitialized = false;
|
|
209
|
+
let autoScalingInitialized = false;
|
|
210
|
+
let deadLetterQueueInitialized = false;
|
|
211
|
+
let resourceMonitoringInitialized = false;
|
|
212
|
+
let complianceInitialized = false;
|
|
213
|
+
let observabilityInitialized = false;
|
|
214
|
+
// Internal state
|
|
215
|
+
const workers = new Map();
|
|
216
|
+
let workerStore = InMemoryWorkerStore.create();
|
|
217
|
+
let workerStoreConfigured = false;
|
|
218
|
+
let workerStoreConfig = null;
|
|
219
|
+
const processorRegistry = new Map();
|
|
220
|
+
const processorPathRegistry = new Map();
|
|
221
|
+
const processorResolvers = [];
|
|
222
|
+
const processorSpecRegistry = new Map();
|
|
223
|
+
const processorCache = new Map();
|
|
224
|
+
let processorCacheSize = 0;
|
|
225
|
+
const buildPersistenceBootstrapConfig = () => {
|
|
226
|
+
const driver = Env.get('WORKER_PERSISTENCE_DRIVER', 'memory');
|
|
227
|
+
const config = {
|
|
228
|
+
name: '__zintrust_persistence_bootstrap__',
|
|
229
|
+
queueName: '__zintrust_bootstrap__',
|
|
230
|
+
processor: async () => undefined,
|
|
231
|
+
infrastructure: {
|
|
232
|
+
persistence: {
|
|
233
|
+
driver,
|
|
234
|
+
},
|
|
235
|
+
},
|
|
236
|
+
};
|
|
237
|
+
// Add Redis config if using Redis persistence
|
|
238
|
+
if (driver === 'redis') {
|
|
239
|
+
config.infrastructure = {
|
|
240
|
+
...config.infrastructure,
|
|
241
|
+
redis: queueConfig.drivers.redis,
|
|
242
|
+
};
|
|
243
|
+
}
|
|
244
|
+
return config;
|
|
245
|
+
};
|
|
246
|
+
const registerProcessor = (name, processor) => {
|
|
247
|
+
processorRegistry.set(name, processor);
|
|
248
|
+
};
|
|
249
|
+
const registerProcessors = (processors) => {
|
|
250
|
+
Object.entries(processors).forEach(([name, processor]) => {
|
|
251
|
+
if (typeof processor === 'function') {
|
|
252
|
+
processorRegistry.set(name, processor);
|
|
253
|
+
}
|
|
254
|
+
});
|
|
255
|
+
};
|
|
256
|
+
const registerProcessorPaths = (paths) => {
|
|
257
|
+
Object.entries(paths).forEach(([name, modulePath]) => {
|
|
258
|
+
if (typeof modulePath === 'string' && modulePath.trim().length > 0) {
|
|
259
|
+
processorPathRegistry.set(name, modulePath);
|
|
260
|
+
}
|
|
261
|
+
});
|
|
262
|
+
};
|
|
263
|
+
const registerProcessorResolver = (resolver) => {
|
|
264
|
+
processorResolvers.push(resolver);
|
|
265
|
+
};
|
|
266
|
+
const registerProcessorSpec = (spec, processor) => {
|
|
267
|
+
if (!spec || typeof processor !== 'function')
|
|
268
|
+
return;
|
|
269
|
+
processorSpecRegistry.set(normalizeProcessorSpec(spec), processor);
|
|
270
|
+
};
|
|
271
|
+
const decodeProcessorPathEntities = (value) => value
|
|
272
|
+
.replaceAll(///gi, '/')
|
|
273
|
+
.replaceAll('/', '/')
|
|
274
|
+
.replaceAll(///gi, '/');
|
|
275
|
+
const isUrlSpec = (spec) => {
|
|
276
|
+
if (spec.startsWith('url:'))
|
|
277
|
+
return true;
|
|
278
|
+
return spec.includes('://');
|
|
279
|
+
};
|
|
280
|
+
const normalizeProcessorSpec = (spec) => spec.startsWith('url:') ? spec.slice(4) : spec;
|
|
281
|
+
const parseCacheControl = (value) => {
|
|
282
|
+
if (!value)
|
|
283
|
+
return {};
|
|
284
|
+
const parts = value.split(',').map((part) => part.trim().toLowerCase());
|
|
285
|
+
const maxAge = parts.find((part) => part.startsWith('max-age='));
|
|
286
|
+
if (!maxAge)
|
|
287
|
+
return {};
|
|
288
|
+
const raw = maxAge.split('=')[1];
|
|
289
|
+
const parsed = Number.parseInt(raw ?? '', 10);
|
|
290
|
+
return Number.isFinite(parsed) ? { maxAge: parsed } : {};
|
|
291
|
+
};
|
|
292
|
+
const getProcessorSpecConfig = () => workersConfig.processorSpec;
|
|
293
|
+
const computeSha256 = async (value) => {
|
|
294
|
+
if (typeof globalThis !== 'undefined' && globalThis.crypto?.subtle) {
|
|
295
|
+
const data = new TextEncoder().encode(value);
|
|
296
|
+
const digest = await globalThis.crypto.subtle.digest('SHA-256', data);
|
|
297
|
+
return Array.from(new Uint8Array(digest))
|
|
298
|
+
.map((b) => b.toString(16).padStart(2, '0'))
|
|
299
|
+
.join('');
|
|
300
|
+
}
|
|
301
|
+
if (typeof NodeSingletons.createHash === 'function') {
|
|
302
|
+
return NodeSingletons.createHash('sha256').update(value).digest('hex');
|
|
303
|
+
}
|
|
304
|
+
return String(generateUuid()).slice(2);
|
|
305
|
+
};
|
|
306
|
+
const toBase64 = (value) => {
|
|
307
|
+
if (typeof Buffer !== 'undefined') {
|
|
308
|
+
return Buffer.from(value, 'utf-8').toString('base64');
|
|
309
|
+
}
|
|
310
|
+
if (typeof globalThis !== 'undefined' && typeof globalThis.btoa === 'function') {
|
|
311
|
+
const bytes = new TextEncoder().encode(value);
|
|
312
|
+
let binary = '';
|
|
313
|
+
bytes.forEach((byte) => {
|
|
314
|
+
binary += String.fromCodePoint(byte);
|
|
315
|
+
});
|
|
316
|
+
return globalThis.btoa(binary);
|
|
317
|
+
}
|
|
318
|
+
return value;
|
|
319
|
+
};
|
|
320
|
+
const getCachedProcessor = (key) => {
|
|
321
|
+
const entry = processorCache.get(key);
|
|
322
|
+
if (!entry)
|
|
323
|
+
return null;
|
|
324
|
+
const now = Date.now();
|
|
325
|
+
if (entry.expiresAt <= now) {
|
|
326
|
+
processorCache.delete(key);
|
|
327
|
+
processorCacheSize -= entry.size;
|
|
328
|
+
return null;
|
|
329
|
+
}
|
|
330
|
+
entry.lastAccess = now;
|
|
331
|
+
return entry;
|
|
332
|
+
};
|
|
333
|
+
const evictCacheIfNeeded = (maxSize) => {
|
|
334
|
+
if (processorCacheSize <= maxSize)
|
|
335
|
+
return;
|
|
336
|
+
const entries = Array.from(processorCache.entries());
|
|
337
|
+
entries.sort((a, b) => a[1].lastAccess - b[1].lastAccess);
|
|
338
|
+
for (const [key, entry] of entries) {
|
|
339
|
+
if (processorCacheSize <= maxSize)
|
|
340
|
+
break;
|
|
341
|
+
processorCache.delete(key);
|
|
342
|
+
processorCacheSize -= entry.size;
|
|
343
|
+
}
|
|
344
|
+
};
|
|
345
|
+
const setCachedProcessor = (key, entry, maxSize) => {
|
|
346
|
+
const existing = processorCache.get(key);
|
|
347
|
+
if (existing) {
|
|
348
|
+
processorCacheSize -= existing.size;
|
|
349
|
+
}
|
|
350
|
+
processorCache.set(key, entry);
|
|
351
|
+
processorCacheSize += entry.size;
|
|
352
|
+
evictCacheIfNeeded(maxSize);
|
|
353
|
+
};
|
|
354
|
+
const isAllowedRemoteHost = (host) => {
|
|
355
|
+
const allowlist = getProcessorSpecConfig().remoteAllowlist.map((value) => value.toLowerCase());
|
|
356
|
+
return allowlist.includes(host.toLowerCase());
|
|
357
|
+
};
|
|
358
|
+
const waitForWorkerConnection = async (worker, name, _queueName, timeoutMs) => {
|
|
359
|
+
const startTime = Date.now();
|
|
360
|
+
const checkInterval = 100; // 100ms between checks
|
|
361
|
+
let timeoutId = null;
|
|
362
|
+
return new Promise((resolve, reject) => {
|
|
363
|
+
const checkConnection = async () => {
|
|
364
|
+
try {
|
|
365
|
+
// Check if worker is actually running
|
|
366
|
+
const isRunning = await worker.isRunning();
|
|
367
|
+
if (!isRunning) {
|
|
368
|
+
throw ErrorFactory.createWorkerError('Worker not running');
|
|
369
|
+
}
|
|
370
|
+
// Check Redis connection
|
|
371
|
+
const client = await worker.client;
|
|
372
|
+
const pingResult = await client.ping();
|
|
373
|
+
if (pingResult !== 'PONG') {
|
|
374
|
+
throw ErrorFactory.createWorkerError('Redis ping failed');
|
|
375
|
+
}
|
|
376
|
+
// Removed heavy Queue instantiation loop - relying on Redis ping for connectivity check
|
|
377
|
+
// The queue instance creation was causing memory pressure and potential connection leaks in this retry loop
|
|
378
|
+
Logger.debug(`Worker health verification passed for ${name}`, {
|
|
379
|
+
isRunning,
|
|
380
|
+
pingResult,
|
|
381
|
+
});
|
|
382
|
+
if (timeoutId)
|
|
383
|
+
clearTimeout(timeoutId);
|
|
384
|
+
resolve();
|
|
385
|
+
return;
|
|
386
|
+
}
|
|
387
|
+
catch (error) {
|
|
388
|
+
Logger.debug(`Worker health verification failed for ${name}, retrying...`, error);
|
|
389
|
+
// Check timeout
|
|
390
|
+
if (Date.now() - startTime >= timeoutMs) {
|
|
391
|
+
if (timeoutId)
|
|
392
|
+
clearTimeout(timeoutId);
|
|
393
|
+
reject(ErrorFactory.createWorkerError('Worker failed health verification within timeout period'));
|
|
394
|
+
return;
|
|
395
|
+
}
|
|
396
|
+
// Schedule next check
|
|
397
|
+
timeoutId = globalThis.setTimeout(checkConnection, checkInterval);
|
|
398
|
+
}
|
|
399
|
+
};
|
|
400
|
+
// Start checking
|
|
401
|
+
checkConnection();
|
|
402
|
+
});
|
|
403
|
+
};
|
|
404
|
+
const startHealthMonitoring = (name, worker, queueName) => {
|
|
405
|
+
HealthMonitor.register(name, worker, queueName);
|
|
406
|
+
};
|
|
407
|
+
const sanitizeProcessorPath = (value) => {
|
|
408
|
+
const decoded = decodeProcessorPathEntities(value);
|
|
409
|
+
const base = decoded.split(/[?#&]/)[0]?.trim() ?? '';
|
|
410
|
+
if (!base)
|
|
411
|
+
return '';
|
|
412
|
+
const isAbsolutePath = base.startsWith('/') || /^[A-Za-z]:[\\/]/.test(base);
|
|
413
|
+
const relativePath = base.startsWith('.') ? base : `./${base}`;
|
|
414
|
+
return isAbsolutePath ? base : path.resolve(process.cwd(), relativePath);
|
|
415
|
+
};
|
|
416
|
+
const stripProcessorExtension = (value) => value.replace(/\.(ts|js)$/i, '');
|
|
417
|
+
const normalizeModulePath = (value) => value.replaceAll('\\', '/');
|
|
418
|
+
const filterExistingFileCandidates = (candidates) => {
|
|
419
|
+
if (!NodeSingletons?.fs?.existsSync)
|
|
420
|
+
return candidates;
|
|
421
|
+
return candidates.filter((candidate) => {
|
|
422
|
+
try {
|
|
423
|
+
return NodeSingletons.fs.existsSync(candidate);
|
|
424
|
+
}
|
|
425
|
+
catch {
|
|
426
|
+
return false;
|
|
427
|
+
}
|
|
428
|
+
});
|
|
429
|
+
};
|
|
430
|
+
const buildProcessorModuleCandidates = (modulePath, resolvedPath) => {
|
|
431
|
+
const candidates = [];
|
|
432
|
+
const normalized = normalizeModulePath(modulePath.trim());
|
|
433
|
+
const normalizedResolved = normalizeModulePath(resolvedPath);
|
|
434
|
+
if (normalized.startsWith('/app/')) {
|
|
435
|
+
candidates.push(`@app/${stripProcessorExtension(normalized.slice(5))}`);
|
|
436
|
+
}
|
|
437
|
+
else if (normalized.startsWith('app/')) {
|
|
438
|
+
candidates.push(`@app/${stripProcessorExtension(normalized.slice(4))}`);
|
|
439
|
+
}
|
|
440
|
+
const appIndex = normalizedResolved.lastIndexOf('/app/');
|
|
441
|
+
if (appIndex !== -1) {
|
|
442
|
+
const relative = normalizedResolved.slice(appIndex + 5);
|
|
443
|
+
if (relative) {
|
|
444
|
+
candidates.push(`@app/${stripProcessorExtension(relative)}`);
|
|
445
|
+
}
|
|
446
|
+
}
|
|
447
|
+
return Array.from(new Set(candidates));
|
|
448
|
+
};
|
|
449
|
+
const buildProcessorFilePathCandidates = (_modulePath, resolvedPath) => {
|
|
450
|
+
const candidates = [];
|
|
451
|
+
const normalizedResolved = normalizeModulePath(resolvedPath);
|
|
452
|
+
const projectRoot = normalizeModulePath(resolveProjectRoot());
|
|
453
|
+
const strippedResolved = stripProcessorExtension(resolvedPath);
|
|
454
|
+
candidates.push(`${strippedResolved}.js`, `${strippedResolved}.mjs`);
|
|
455
|
+
const appIndex = normalizedResolved.lastIndexOf('/app/');
|
|
456
|
+
if (appIndex !== -1) {
|
|
457
|
+
const relative = normalizedResolved.slice(appIndex + 5);
|
|
458
|
+
if (relative) {
|
|
459
|
+
const strippedRelative = stripProcessorExtension(relative);
|
|
460
|
+
candidates.push(path.join(projectRoot, 'dist', 'app', `${strippedRelative}.js`), path.join(projectRoot, 'app', relative), path.join(projectRoot, 'app', `${strippedRelative}.js`), path.join('/app', 'dist', 'app', `${strippedRelative}.js`));
|
|
461
|
+
}
|
|
462
|
+
}
|
|
463
|
+
return filterExistingFileCandidates(Array.from(new Set(candidates)));
|
|
464
|
+
};
|
|
465
|
+
const pickProcessorFromModule = (mod, source) => {
|
|
466
|
+
const candidate = mod?.['default'] ?? mod?.['processor'] ?? mod?.['handler'] ?? mod?.['handle'];
|
|
467
|
+
if (typeof candidate !== 'function') {
|
|
468
|
+
const keys = mod ? Object.keys(mod) : [];
|
|
469
|
+
Logger.warn(`Module imported from ${source} but no valid processor function found (exported: ${keys.join(', ')})`);
|
|
470
|
+
return undefined;
|
|
471
|
+
}
|
|
472
|
+
return candidate;
|
|
473
|
+
};
|
|
474
|
+
const extractZinTrustProcessor = (mod, source) => {
|
|
475
|
+
const candidate = mod?.['ZinTrustProcessor'];
|
|
476
|
+
if (typeof candidate !== 'function') {
|
|
477
|
+
const keys = mod ? Object.keys(mod) : [];
|
|
478
|
+
Logger.warn(`Module imported from ${source} but missing ZinTrustProcessor export (exported: ${keys.join(', ')})`);
|
|
479
|
+
return undefined;
|
|
480
|
+
}
|
|
481
|
+
return candidate;
|
|
482
|
+
};
|
|
483
|
+
const readResponseBody = async (response, maxSize) => {
|
|
484
|
+
const contentLength = response.headers.get('content-length');
|
|
485
|
+
if (contentLength) {
|
|
486
|
+
const size = Number.parseInt(contentLength, 10);
|
|
487
|
+
if (Number.isFinite(size) && size > maxSize) {
|
|
488
|
+
throw ErrorFactory.createConfigError('PROCESSOR_FETCH_SIZE_EXCEEDED');
|
|
489
|
+
}
|
|
490
|
+
}
|
|
491
|
+
const buffer = await response.arrayBuffer();
|
|
492
|
+
if (buffer.byteLength > maxSize) {
|
|
493
|
+
throw ErrorFactory.createConfigError('PROCESSOR_FETCH_SIZE_EXCEEDED');
|
|
494
|
+
}
|
|
495
|
+
return new TextDecoder().decode(buffer);
|
|
496
|
+
};
|
|
497
|
+
const computeCacheTtlSeconds = (config, cacheControl) => Math.min(config.cacheMaxTtlSeconds, cacheControl.maxAge ?? config.cacheDefaultTtlSeconds);
|
|
498
|
+
const refreshCachedProcessor = (existing, config, cacheControl) => {
|
|
499
|
+
const ttl = computeCacheTtlSeconds(config, cacheControl);
|
|
500
|
+
const now = Date.now();
|
|
501
|
+
existing.expiresAt = now + ttl * 1000;
|
|
502
|
+
existing.lastAccess = now;
|
|
503
|
+
return existing.processor;
|
|
504
|
+
};
|
|
505
|
+
const cacheProcessorFromResponse = async (params) => {
|
|
506
|
+
const { response, normalized, config, cacheKey } = params;
|
|
507
|
+
const rawCode = await readResponseBody(response, config.fetchMaxSizeBytes);
|
|
508
|
+
const code = rewriteProcessorImports(rawCode);
|
|
509
|
+
const mod = await importModuleFromCode({ code, normalized, cacheKey });
|
|
510
|
+
const processor = extractZinTrustProcessor(mod, normalized);
|
|
511
|
+
if (!processor) {
|
|
512
|
+
throw ErrorFactory.createConfigError('INVALID_PROCESSOR_URL_EXPORT');
|
|
513
|
+
}
|
|
514
|
+
const cacheControl = parseCacheControl(response.headers.get('cache-control'));
|
|
515
|
+
const ttl = computeCacheTtlSeconds(config, cacheControl);
|
|
516
|
+
const size = new TextEncoder().encode(code).byteLength;
|
|
517
|
+
const now = Date.now();
|
|
518
|
+
setCachedProcessor(cacheKey, {
|
|
519
|
+
code,
|
|
520
|
+
processor,
|
|
521
|
+
etag: response.headers.get('etag') ?? undefined,
|
|
522
|
+
cachedAt: now,
|
|
523
|
+
expiresAt: now + ttl * 1000,
|
|
524
|
+
size,
|
|
525
|
+
lastAccess: now,
|
|
526
|
+
}, config.cacheMaxSizeBytes);
|
|
527
|
+
return processor;
|
|
528
|
+
};
|
|
529
|
+
const delay = (ms) => new Promise((resolve) => {
|
|
530
|
+
globalThis.setTimeout(resolve, ms);
|
|
531
|
+
});
|
|
532
|
+
const fetchProcessorAttempt = async (params) => {
|
|
533
|
+
const { normalized, config, cacheKey, existing, attempt, maxAttempts } = params;
|
|
534
|
+
const controller = new AbortController();
|
|
535
|
+
const timeoutId = globalThis.setTimeout(() => controller.abort(), config.fetchTimeoutMs);
|
|
536
|
+
try {
|
|
537
|
+
const headers = {};
|
|
538
|
+
if (existing?.etag)
|
|
539
|
+
headers['If-None-Match'] = existing.etag;
|
|
540
|
+
const response = await fetch(normalized, {
|
|
541
|
+
method: 'GET',
|
|
542
|
+
headers,
|
|
543
|
+
signal: controller.signal,
|
|
544
|
+
});
|
|
545
|
+
if (response.status === 304 && existing) {
|
|
546
|
+
const cacheControl = parseCacheControl(response.headers.get('cache-control'));
|
|
547
|
+
return refreshCachedProcessor(existing, config, cacheControl);
|
|
548
|
+
}
|
|
549
|
+
if (!response.ok) {
|
|
550
|
+
throw ErrorFactory.createConfigError(`PROCESSOR_FETCH_FAILED:${response.status}`);
|
|
551
|
+
}
|
|
552
|
+
return await cacheProcessorFromResponse({ response, normalized, config, cacheKey });
|
|
553
|
+
}
|
|
554
|
+
catch (error) {
|
|
555
|
+
if (controller.signal.aborted) {
|
|
556
|
+
Logger.error('Processor URL fetch timeout', error);
|
|
557
|
+
}
|
|
558
|
+
else {
|
|
559
|
+
Logger.error('Processor URL fetch failed', error);
|
|
560
|
+
}
|
|
561
|
+
if (attempt >= maxAttempts) {
|
|
562
|
+
return undefined;
|
|
563
|
+
}
|
|
564
|
+
await delay(config.retryBackoffMs * attempt);
|
|
565
|
+
return fetchProcessorAttempt({
|
|
566
|
+
normalized,
|
|
567
|
+
config,
|
|
568
|
+
cacheKey,
|
|
569
|
+
existing,
|
|
570
|
+
attempt: attempt + 1,
|
|
571
|
+
maxAttempts,
|
|
572
|
+
});
|
|
573
|
+
}
|
|
574
|
+
finally {
|
|
575
|
+
clearTimeout(timeoutId);
|
|
576
|
+
}
|
|
577
|
+
};
|
|
578
|
+
const resolveProcessorFromUrl = async (spec) => {
|
|
579
|
+
const normalized = normalizeProcessorSpec(spec);
|
|
580
|
+
let parsed;
|
|
581
|
+
try {
|
|
582
|
+
parsed = new URL(normalized);
|
|
583
|
+
}
|
|
584
|
+
catch (error) {
|
|
585
|
+
Logger.error('Invalid processor URL spec', error);
|
|
586
|
+
return undefined;
|
|
587
|
+
}
|
|
588
|
+
if (parsed.protocol === 'file:') {
|
|
589
|
+
const filePath = decodeURIComponent(parsed.pathname);
|
|
590
|
+
return resolveProcessorFromPath(filePath);
|
|
591
|
+
}
|
|
592
|
+
if (parsed.protocol !== 'https:' && parsed.protocol !== 'file:') {
|
|
593
|
+
Logger.warn(`Invalid processor URL protocol: ${parsed.protocol}. Only https:// and file:// are supported.`);
|
|
594
|
+
return undefined;
|
|
595
|
+
}
|
|
596
|
+
if (!isAllowedRemoteHost(parsed.host) && parsed.protocol !== 'file:') {
|
|
597
|
+
Logger.warn(`Invalid processor URL host: ${parsed.host}. Host is not in the allowlist.`);
|
|
598
|
+
return undefined;
|
|
599
|
+
}
|
|
600
|
+
const config = getProcessorSpecConfig();
|
|
601
|
+
const cacheKey = await computeSha256(normalized);
|
|
602
|
+
const cached = getCachedProcessor(cacheKey);
|
|
603
|
+
if (cached)
|
|
604
|
+
return cached.processor;
|
|
605
|
+
return fetchProcessorAttempt({
|
|
606
|
+
normalized,
|
|
607
|
+
config,
|
|
608
|
+
cacheKey,
|
|
609
|
+
existing: processorCache.get(cacheKey),
|
|
610
|
+
attempt: 1,
|
|
611
|
+
maxAttempts: Math.max(1, config.retryAttempts),
|
|
612
|
+
});
|
|
613
|
+
};
|
|
614
|
+
const resolveProcessorSpec = async (spec) => {
|
|
615
|
+
if (!spec)
|
|
616
|
+
return undefined;
|
|
617
|
+
const normalized = normalizeProcessorSpec(spec);
|
|
618
|
+
const prebuilt = processorSpecRegistry.get(normalized) ?? processorSpecRegistry.get(spec);
|
|
619
|
+
if (prebuilt)
|
|
620
|
+
return prebuilt;
|
|
621
|
+
if (isUrlSpec(spec))
|
|
622
|
+
return resolveProcessorFromUrl(spec);
|
|
623
|
+
return resolveProcessorFromPath(spec);
|
|
624
|
+
};
|
|
625
|
+
const resolveProcessorFromPath = async (modulePath) => {
|
|
626
|
+
const trimmed = modulePath.trim();
|
|
627
|
+
if (!trimmed)
|
|
628
|
+
return undefined;
|
|
629
|
+
const resolved = sanitizeProcessorPath(trimmed);
|
|
630
|
+
if (!resolved)
|
|
631
|
+
return undefined;
|
|
632
|
+
const importProcessorFromCandidates = async (candidates) => {
|
|
633
|
+
if (candidates.length === 0)
|
|
634
|
+
return undefined;
|
|
635
|
+
const [candidatePath, ...rest] = candidates;
|
|
636
|
+
try {
|
|
637
|
+
const importPath = candidatePath.startsWith('/') && !candidatePath.startsWith('//')
|
|
638
|
+
? NodeSingletons.url.pathToFileURL(candidatePath).href
|
|
639
|
+
: candidatePath;
|
|
640
|
+
const mod = await import(importPath);
|
|
641
|
+
const candidate = pickProcessorFromModule(mod, importPath);
|
|
642
|
+
if (candidate)
|
|
643
|
+
return candidate;
|
|
644
|
+
}
|
|
645
|
+
catch (candidateError) {
|
|
646
|
+
Logger.debug(`Processor module candidate import failed: ${candidatePath}`, candidateError);
|
|
647
|
+
}
|
|
648
|
+
return importProcessorFromCandidates(rest);
|
|
649
|
+
};
|
|
650
|
+
try {
|
|
651
|
+
const mod = await import(resolved);
|
|
652
|
+
const candidate = pickProcessorFromModule(mod, resolved);
|
|
653
|
+
if (candidate)
|
|
654
|
+
return candidate;
|
|
655
|
+
}
|
|
656
|
+
catch (err) {
|
|
657
|
+
const fileCandidates = buildProcessorFilePathCandidates(trimmed, resolved);
|
|
658
|
+
const resolvedFileCandidate = await importProcessorFromCandidates(fileCandidates);
|
|
659
|
+
if (resolvedFileCandidate)
|
|
660
|
+
return resolvedFileCandidate;
|
|
661
|
+
const moduleCandidates = buildProcessorModuleCandidates(trimmed, resolved);
|
|
662
|
+
const resolvedModuleCandidate = await importProcessorFromCandidates(moduleCandidates);
|
|
663
|
+
if (resolvedModuleCandidate)
|
|
664
|
+
return resolvedModuleCandidate;
|
|
665
|
+
Logger.error(`Failed to import processor from path: ${resolved}`, err);
|
|
666
|
+
}
|
|
667
|
+
return undefined;
|
|
668
|
+
};
|
|
669
|
+
const resolveProcessor = async (name) => {
|
|
670
|
+
const direct = processorRegistry.get(name);
|
|
671
|
+
if (direct)
|
|
672
|
+
return direct;
|
|
673
|
+
const pathHint = processorPathRegistry.get(name);
|
|
674
|
+
if (pathHint) {
|
|
675
|
+
try {
|
|
676
|
+
const resolved = await resolveProcessorSpec(pathHint);
|
|
677
|
+
if (resolved)
|
|
678
|
+
return resolved;
|
|
679
|
+
}
|
|
680
|
+
catch (error) {
|
|
681
|
+
Logger.error(`Failed to resolve processor module for "${name}"`, error);
|
|
682
|
+
}
|
|
683
|
+
}
|
|
684
|
+
const resolverResults = await Promise.all(processorResolvers.map(async (resolver) => {
|
|
685
|
+
try {
|
|
686
|
+
return await resolver(name);
|
|
687
|
+
}
|
|
688
|
+
catch (error) {
|
|
689
|
+
Logger.error(`Processor resolver failed for "${name}"`, error);
|
|
690
|
+
return undefined;
|
|
691
|
+
}
|
|
692
|
+
}));
|
|
693
|
+
const resolvedFromResolvers = resolverResults.find((result) => result !== undefined);
|
|
694
|
+
if (resolvedFromResolvers)
|
|
695
|
+
return resolvedFromResolvers;
|
|
696
|
+
return undefined;
|
|
697
|
+
};
|
|
698
|
+
const resolveProcessorPath = async (modulePath) => resolveProcessorFromPath(modulePath);
|
|
699
|
+
const recordMetricSafely = (workerName, metricType, value, metadata) => {
|
|
700
|
+
WorkerMetrics.record(workerName, metricType, value, metadata).catch((error) => {
|
|
701
|
+
Logger.error(`Failed to record worker metric: ${workerName}/${metricType}`, error);
|
|
702
|
+
});
|
|
703
|
+
};
|
|
704
|
+
const ensureCircuitAllowsExecution = (workerName, version, jobId, features) => {
|
|
705
|
+
if (!(features?.circuitBreaker ?? false))
|
|
706
|
+
return;
|
|
707
|
+
const canExecute = CircuitBreaker.canExecute(workerName, version);
|
|
708
|
+
if (canExecute)
|
|
709
|
+
return;
|
|
710
|
+
const state = CircuitBreaker.getState(workerName, version);
|
|
711
|
+
Logger.warn('Circuit breaker is open, rejecting job', {
|
|
712
|
+
workerName,
|
|
713
|
+
version,
|
|
714
|
+
jobId,
|
|
715
|
+
circuitState: state?.state,
|
|
716
|
+
});
|
|
717
|
+
CircuitBreaker.recordRejection(workerName, version);
|
|
718
|
+
throw ErrorFactory.createGeneralError(`Circuit breaker is open for ${workerName}@${version}`);
|
|
719
|
+
};
|
|
720
|
+
const runBeforeProcessHooks = async (workerName, job, features) => {
|
|
721
|
+
if (!(features?.plugins ?? false)) {
|
|
722
|
+
return { skip: false, jobData: job.data };
|
|
723
|
+
}
|
|
724
|
+
const hookResult = await PluginManager.executeHook('beforeProcess', {
|
|
725
|
+
workerName,
|
|
726
|
+
jobId: job.id ?? '',
|
|
727
|
+
jobData: job.data,
|
|
728
|
+
timestamp: new Date(),
|
|
729
|
+
});
|
|
730
|
+
if (hookResult.stopped) {
|
|
731
|
+
const errorMessage = hookResult.errors[0]?.error?.message ?? 'Stopped by plugin';
|
|
732
|
+
Logger.info('Job processing stopped by plugin', {
|
|
733
|
+
workerName,
|
|
734
|
+
jobId: job.id,
|
|
735
|
+
reason: errorMessage,
|
|
736
|
+
});
|
|
737
|
+
return { skip: true, reason: errorMessage };
|
|
738
|
+
}
|
|
739
|
+
if (hookResult.modified) {
|
|
740
|
+
return { skip: false, jobData: hookResult.context.jobData };
|
|
741
|
+
}
|
|
742
|
+
return { skip: false, jobData: job.data };
|
|
743
|
+
};
|
|
744
|
+
const startProcessingSpan = (workerName, version, job, queueName, features) => {
|
|
745
|
+
if (!(features?.observability ?? false))
|
|
746
|
+
return null;
|
|
747
|
+
return Observability.startSpan(`worker.${workerName}.process`, {
|
|
748
|
+
attributes: {
|
|
749
|
+
worker_name: workerName,
|
|
750
|
+
worker_version: version,
|
|
751
|
+
job_id: job.id ?? '',
|
|
752
|
+
queue_name: queueName,
|
|
753
|
+
},
|
|
754
|
+
});
|
|
755
|
+
};
|
|
756
|
+
const usePluginManager = async (workerName, job, result) => {
|
|
757
|
+
await PluginManager.executeHook('afterProcess', {
|
|
758
|
+
workerName,
|
|
759
|
+
jobId: job.id ?? '',
|
|
760
|
+
jobData: job.data,
|
|
761
|
+
metadata: { result },
|
|
762
|
+
timestamp: new Date(),
|
|
763
|
+
});
|
|
764
|
+
await PluginManager.executeHook('onComplete', {
|
|
765
|
+
workerName,
|
|
766
|
+
jobId: job.id ?? '',
|
|
767
|
+
jobData: job.data,
|
|
768
|
+
metadata: { result },
|
|
769
|
+
timestamp: new Date(),
|
|
770
|
+
});
|
|
771
|
+
};
|
|
772
|
+
const handleSuccess = async (params) => {
|
|
773
|
+
const { workerName, jobVersion, job, result, duration, spanId, features } = params;
|
|
774
|
+
if (features?.metrics ?? false) {
|
|
775
|
+
recordMetricSafely(workerName, 'processed', 1);
|
|
776
|
+
recordMetricSafely(workerName, 'duration', duration);
|
|
777
|
+
}
|
|
778
|
+
if (features?.circuitBreaker ?? false) {
|
|
779
|
+
CircuitBreaker.recordSuccess(workerName, jobVersion);
|
|
780
|
+
}
|
|
781
|
+
if (features?.observability ?? false) {
|
|
782
|
+
Observability.recordJobMetrics(workerName, job.name, {
|
|
783
|
+
processed: 1,
|
|
784
|
+
failed: 0,
|
|
785
|
+
durationMs: duration,
|
|
786
|
+
});
|
|
787
|
+
if (spanId !== null) {
|
|
788
|
+
Observability.endSpan(spanId, { success: true });
|
|
789
|
+
}
|
|
790
|
+
}
|
|
791
|
+
if (features?.plugins ?? false) {
|
|
792
|
+
await usePluginManager(workerName, { id: job.id ?? '', data: job.data }, result);
|
|
793
|
+
}
|
|
794
|
+
};
|
|
795
|
+
const recordFailureMetrics = (workerName, _jobVersion, duration, features) => {
|
|
796
|
+
if (features?.metrics === true) {
|
|
797
|
+
recordMetricSafely(workerName, 'errors', 1);
|
|
798
|
+
recordMetricSafely(workerName, 'duration', duration);
|
|
799
|
+
}
|
|
800
|
+
};
|
|
801
|
+
const recordFailureObservability = (workerName, jobName, duration, spanId, features) => {
|
|
802
|
+
if (features?.observability === true) {
|
|
803
|
+
Observability.recordJobMetrics(workerName, jobName, {
|
|
804
|
+
processed: 0,
|
|
805
|
+
failed: 1,
|
|
806
|
+
durationMs: duration,
|
|
807
|
+
});
|
|
808
|
+
if (spanId !== null) {
|
|
809
|
+
Observability.recordSpanError(spanId, ErrorFactory.createGeneralError('Job processing failed'));
|
|
810
|
+
Observability.endSpan(spanId, { success: false });
|
|
811
|
+
}
|
|
812
|
+
}
|
|
813
|
+
};
|
|
814
|
+
const addFailedJobToDeadLetterQueue = async (workerName, job, error, duration, jobVersion, queueName, features) => {
|
|
815
|
+
if (features?.deadLetterQueue === true) {
|
|
816
|
+
await DeadLetterQueue.addFailedJob({
|
|
817
|
+
id: job.id ?? '',
|
|
818
|
+
queueName,
|
|
819
|
+
workerName,
|
|
820
|
+
jobName: job.name,
|
|
821
|
+
data: job.data,
|
|
822
|
+
error: {
|
|
823
|
+
name: error.name,
|
|
824
|
+
message: error.message,
|
|
825
|
+
stack: error.stack,
|
|
826
|
+
},
|
|
827
|
+
attemptsMade: job.attemptsMade ?? 0,
|
|
828
|
+
maxAttempts: job.opts.attempts ?? 0,
|
|
829
|
+
failedAt: new Date(),
|
|
830
|
+
firstAttemptAt: new Date(job.timestamp ?? Date.now()),
|
|
831
|
+
lastAttemptAt: new Date(),
|
|
832
|
+
processingTime: duration,
|
|
833
|
+
metadata: {
|
|
834
|
+
version: jobVersion,
|
|
835
|
+
},
|
|
836
|
+
complianceFlags: {
|
|
837
|
+
containsPII: false,
|
|
838
|
+
containsPHI: false,
|
|
839
|
+
dataClassification: 'public',
|
|
840
|
+
},
|
|
841
|
+
});
|
|
842
|
+
}
|
|
843
|
+
};
|
|
844
|
+
const executeFailurePlugins = async (workerName, job, error, features) => {
|
|
845
|
+
if (features?.plugins === true) {
|
|
846
|
+
await PluginManager.executeHook('onError', {
|
|
847
|
+
workerName,
|
|
848
|
+
jobId: job.id ?? '',
|
|
849
|
+
jobData: job.data,
|
|
850
|
+
error,
|
|
851
|
+
timestamp: new Date(),
|
|
852
|
+
});
|
|
853
|
+
}
|
|
854
|
+
};
|
|
855
|
+
const recordCircuitBreakerFailure = (workerName, jobVersion, error, features) => {
|
|
856
|
+
if (features?.circuitBreaker === true) {
|
|
857
|
+
CircuitBreaker.recordFailure(workerName, jobVersion, error);
|
|
858
|
+
}
|
|
859
|
+
};
|
|
860
|
+
const logAndRecordFailure = (workerName, jobVersion, job, error, features) => {
|
|
861
|
+
Logger.error(`Worker job failed: ${workerName}`, { error, jobId: job.id, version: jobVersion }, 'workers');
|
|
862
|
+
recordCircuitBreakerFailure(workerName, jobVersion, error, features);
|
|
863
|
+
};
|
|
864
|
+
const recordFailureObservabilityAndMetrics = (params) => {
|
|
865
|
+
const { workerName, jobVersion, jobName, duration, spanId, features } = params;
|
|
866
|
+
recordFailureMetrics(workerName, jobVersion, duration, features);
|
|
867
|
+
recordFailureObservability(workerName, jobName, duration, spanId, features);
|
|
868
|
+
};
|
|
869
|
+
const executeAllFailureHandlers = async (params) => {
|
|
870
|
+
const { workerName, jobVersion, job, error, duration, spanId, features, queueName } = params;
|
|
871
|
+
recordFailureObservabilityAndMetrics({
|
|
872
|
+
workerName,
|
|
873
|
+
jobVersion,
|
|
874
|
+
jobName: job.name,
|
|
875
|
+
duration,
|
|
876
|
+
spanId,
|
|
877
|
+
features,
|
|
878
|
+
});
|
|
879
|
+
if (features?.deadLetterQueue === true) {
|
|
880
|
+
await addFailedJobToDeadLetterQueue(workerName, job, error, duration, jobVersion, queueName, features);
|
|
881
|
+
}
|
|
882
|
+
};
|
|
883
|
+
const handleFailure = async (params) => {
|
|
884
|
+
const { workerName, jobVersion, job, error, features } = params;
|
|
885
|
+
logAndRecordFailure(workerName, jobVersion, job, error, features);
|
|
886
|
+
await executeAllFailureHandlers(params);
|
|
887
|
+
await executeFailurePlugins(workerName, job, error, features);
|
|
888
|
+
};
|
|
889
|
+
const toBackoffDelayMs = (backoff) => {
|
|
890
|
+
if (typeof backoff === 'number' && Number.isFinite(backoff)) {
|
|
891
|
+
return Math.max(0, Math.floor(backoff));
|
|
892
|
+
}
|
|
893
|
+
if (backoff !== null && backoff !== undefined && typeof backoff === 'object') {
|
|
894
|
+
const raw = backoff.delay;
|
|
895
|
+
if (typeof raw === 'number' && Number.isFinite(raw)) {
|
|
896
|
+
return Math.max(0, Math.floor(raw));
|
|
897
|
+
}
|
|
898
|
+
}
|
|
899
|
+
return 0;
|
|
900
|
+
};
|
|
901
|
+
const trackJobStarted = async (input) => {
|
|
902
|
+
if (!input.job.id)
|
|
903
|
+
return;
|
|
904
|
+
await JobStateTracker.started({
|
|
905
|
+
queueName: input.queueName,
|
|
906
|
+
jobId: input.job.id,
|
|
907
|
+
attempts: input.attempts,
|
|
908
|
+
timeoutMs: Math.max(1000, Env.getInt('QUEUE_JOB_TIMEOUT', 60) * 1000),
|
|
909
|
+
workerName: input.workerName,
|
|
910
|
+
workerVersion: input.workerVersion,
|
|
911
|
+
});
|
|
912
|
+
};
|
|
913
|
+
const trackJobCompleted = async (input) => {
|
|
914
|
+
if (!input.job.id)
|
|
915
|
+
return;
|
|
916
|
+
await JobStateTracker.completed({
|
|
917
|
+
queueName: input.queueName,
|
|
918
|
+
jobId: input.job.id,
|
|
919
|
+
processingTimeMs: input.duration,
|
|
920
|
+
result: input.result,
|
|
921
|
+
});
|
|
922
|
+
};
|
|
923
|
+
const trackJobFailed = async (input) => {
|
|
924
|
+
if (!input.job.id)
|
|
925
|
+
return;
|
|
926
|
+
const isFinal = input.maxAttempts === undefined ? true : input.attempts >= input.maxAttempts;
|
|
927
|
+
const backoffDelayMs = toBackoffDelayMs(input.job.opts?.backoff);
|
|
928
|
+
await JobStateTracker.failed({
|
|
929
|
+
queueName: input.queueName,
|
|
930
|
+
jobId: input.job.id,
|
|
931
|
+
attempts: input.attempts,
|
|
932
|
+
isFinal,
|
|
933
|
+
retryAt: !isFinal && backoffDelayMs > 0
|
|
934
|
+
? new Date(Date.now() + backoffDelayMs).toISOString()
|
|
935
|
+
: undefined,
|
|
936
|
+
error: input.error,
|
|
937
|
+
});
|
|
938
|
+
};
|
|
939
|
+
/**
|
|
940
|
+
* Helper: Create enhanced processor with all features
|
|
941
|
+
*/
|
|
942
|
+
const createEnhancedProcessor = (config) => {
|
|
943
|
+
return async (job) => {
|
|
944
|
+
const { name, version, processor, features } = config;
|
|
945
|
+
const jobVersion = version ?? '1.0.0';
|
|
946
|
+
ensureCircuitAllowsExecution(name, jobVersion, job.id, features);
|
|
947
|
+
const beforeOutcome = await runBeforeProcessHooks(name, job, features);
|
|
948
|
+
if (beforeOutcome.skip) {
|
|
949
|
+
return { skipped: true, reason: beforeOutcome.reason };
|
|
950
|
+
}
|
|
951
|
+
if (beforeOutcome.jobData !== undefined) {
|
|
952
|
+
job.data = beforeOutcome.jobData;
|
|
953
|
+
}
|
|
954
|
+
const startTime = Date.now();
|
|
955
|
+
let result;
|
|
956
|
+
let spanId = null;
|
|
957
|
+
const maxAttempts = typeof job.opts?.attempts === 'number' && Number.isFinite(job.opts.attempts)
|
|
958
|
+
? Math.max(1, Math.floor(job.opts.attempts))
|
|
959
|
+
: undefined;
|
|
960
|
+
const attempts = Math.max(1, Math.floor((job.attemptsMade ?? 0) + 1));
|
|
961
|
+
try {
|
|
962
|
+
spanId = startProcessingSpan(name, jobVersion, job, config.queueName, features);
|
|
963
|
+
await trackJobStarted({
|
|
964
|
+
queueName: config.queueName,
|
|
965
|
+
job,
|
|
966
|
+
attempts,
|
|
967
|
+
workerName: name,
|
|
968
|
+
workerVersion: jobVersion,
|
|
969
|
+
});
|
|
970
|
+
// Process the job
|
|
971
|
+
result = await processor(job);
|
|
972
|
+
const duration = Date.now() - startTime;
|
|
973
|
+
await handleSuccess({
|
|
974
|
+
workerName: name,
|
|
975
|
+
jobVersion,
|
|
976
|
+
job,
|
|
977
|
+
result,
|
|
978
|
+
duration,
|
|
979
|
+
spanId,
|
|
980
|
+
features,
|
|
981
|
+
});
|
|
982
|
+
await trackJobCompleted({ queueName: config.queueName, job, duration, result });
|
|
983
|
+
return result;
|
|
984
|
+
}
|
|
985
|
+
catch (err) {
|
|
986
|
+
const error = err;
|
|
987
|
+
const duration = Date.now() - startTime;
|
|
988
|
+
await trackJobFailed({
|
|
989
|
+
queueName: config.queueName,
|
|
990
|
+
job,
|
|
991
|
+
attempts,
|
|
992
|
+
maxAttempts,
|
|
993
|
+
error,
|
|
994
|
+
});
|
|
995
|
+
await handleFailure({
|
|
996
|
+
workerName: name,
|
|
997
|
+
jobVersion,
|
|
998
|
+
job,
|
|
999
|
+
error,
|
|
1000
|
+
duration,
|
|
1001
|
+
spanId,
|
|
1002
|
+
features,
|
|
1003
|
+
queueName: config.queueName,
|
|
1004
|
+
});
|
|
1005
|
+
throw error;
|
|
1006
|
+
}
|
|
1007
|
+
};
|
|
1008
|
+
};
|
|
1009
|
+
const requireInfrastructure = (value, message) => {
|
|
1010
|
+
if (value === null || value === undefined) {
|
|
1011
|
+
throw ErrorFactory.createConfigError(message);
|
|
1012
|
+
}
|
|
1013
|
+
return value;
|
|
1014
|
+
};
|
|
1015
|
+
const resolveEnvString = (envKey, fallback) => {
|
|
1016
|
+
if (!envKey)
|
|
1017
|
+
return fallback;
|
|
1018
|
+
return Env.get(envKey, fallback);
|
|
1019
|
+
};
|
|
1020
|
+
const resolveEnvInt = (envKey, fallback) => {
|
|
1021
|
+
if (!envKey)
|
|
1022
|
+
return fallback;
|
|
1023
|
+
return Env.getInt(envKey, fallback);
|
|
1024
|
+
};
|
|
1025
|
+
const isRedisEnvConfig = (config) => config.env === true;
|
|
1026
|
+
const requireRedisHost = (host, context) => {
|
|
1027
|
+
if (!host) {
|
|
1028
|
+
throw ErrorFactory.createConfigError(`${context}.host is required`);
|
|
1029
|
+
}
|
|
1030
|
+
return host;
|
|
1031
|
+
};
|
|
1032
|
+
const resolveRedisFallbacks = () => {
|
|
1033
|
+
const queueRedis = queueConfig.drivers.redis;
|
|
1034
|
+
return {
|
|
1035
|
+
host: queueRedis?.driver === 'redis' ? queueRedis.host : Env.get('REDIS_HOST', '127.0.0.1'),
|
|
1036
|
+
port: queueRedis?.driver === 'redis'
|
|
1037
|
+
? queueRedis.port
|
|
1038
|
+
: Env.getInt('REDIS_PORT', ZintrustLang.REDIS_DEFAULT_PORT),
|
|
1039
|
+
db: queueRedis?.driver === 'redis'
|
|
1040
|
+
? queueRedis.database
|
|
1041
|
+
: Env.getInt('REDIS_QUEUE_DB', ZintrustLang.REDIS_DEFAULT_DB),
|
|
1042
|
+
password: queueRedis?.driver === 'redis' ? (queueRedis.password ?? '') : Env.get('REDIS_PASSWORD', ''),
|
|
1043
|
+
};
|
|
1044
|
+
};
|
|
1045
|
+
const resolveRedisConfigFromEnv = (config, context) => {
|
|
1046
|
+
const fallback = resolveRedisFallbacks();
|
|
1047
|
+
const host = requireRedisHost(resolveEnvString(config.host ?? 'REDIS_HOST', fallback.host), context);
|
|
1048
|
+
const port = resolveEnvInt(String(config.port ?? 'REDIS_PORT'), fallback.port);
|
|
1049
|
+
const db = resolveEnvInt(config.db ?? 'REDIS_QUEUE_DB', fallback.db);
|
|
1050
|
+
const password = resolveEnvString(config.password ?? 'REDIS_PASSWORD', fallback.password);
|
|
1051
|
+
return {
|
|
1052
|
+
host,
|
|
1053
|
+
port,
|
|
1054
|
+
db,
|
|
1055
|
+
password: password || undefined,
|
|
1056
|
+
};
|
|
1057
|
+
};
|
|
1058
|
+
const resolveRedisConfigFromDirect = (config, context) => {
|
|
1059
|
+
const fallbackDb = Env.getInt('REDIS_QUEUE_DB', ZintrustLang.REDIS_DEFAULT_DB);
|
|
1060
|
+
let normalizedDb = fallbackDb;
|
|
1061
|
+
if (typeof config.db === 'number') {
|
|
1062
|
+
normalizedDb = config.db;
|
|
1063
|
+
}
|
|
1064
|
+
else if (typeof config.database === 'number') {
|
|
1065
|
+
normalizedDb = config.database;
|
|
1066
|
+
}
|
|
1067
|
+
return {
|
|
1068
|
+
host: requireRedisHost(config.host, context),
|
|
1069
|
+
port: config.port,
|
|
1070
|
+
db: normalizedDb,
|
|
1071
|
+
password: config.password ?? Env.get('REDIS_PASSWORD', undefined),
|
|
1072
|
+
};
|
|
1073
|
+
};
|
|
1074
|
+
const resolveRedisConfig = (config, context) => isRedisEnvConfig(config)
|
|
1075
|
+
? resolveRedisConfigFromEnv(config, context)
|
|
1076
|
+
: resolveRedisConfigFromDirect(config, context);
|
|
1077
|
+
const resolveRedisConfigWithFallback = (primary, fallback, errorMessage, context) => {
|
|
1078
|
+
const selected = primary ?? fallback;
|
|
1079
|
+
if (!selected) {
|
|
1080
|
+
throw ErrorFactory.createConfigError(errorMessage);
|
|
1081
|
+
}
|
|
1082
|
+
return resolveRedisConfig(selected, context);
|
|
1083
|
+
};
|
|
1084
|
+
const logRedisPersistenceConfig = (redisConfig, key_prefix, source) => {
|
|
1085
|
+
Logger.debug('Worker persistence redis config', {
|
|
1086
|
+
source,
|
|
1087
|
+
host: redisConfig.host,
|
|
1088
|
+
port: redisConfig.port,
|
|
1089
|
+
db: redisConfig.db,
|
|
1090
|
+
key_prefix,
|
|
1091
|
+
});
|
|
1092
|
+
};
|
|
1093
|
+
const normalizeEnvValue = (value) => {
|
|
1094
|
+
if (!value)
|
|
1095
|
+
return undefined;
|
|
1096
|
+
const trimmed = value.trim();
|
|
1097
|
+
return trimmed.length > 0 ? trimmed : undefined;
|
|
1098
|
+
};
|
|
1099
|
+
const resolveDefaultPersistenceTable = () => normalizeEnvValue(Env.get('WORKER_PERSISTENCE_TABLE', 'zintrust_workers')) ?? 'zintrust_workers';
|
|
1100
|
+
const resolveDefaultPersistenceConnection = () => normalizeEnvValue(Env.get('WORKER_PERSISTENCE_DB_CONNECTION', 'default')) ?? 'default';
|
|
1101
|
+
const resolveAutoStart = (config) => {
|
|
1102
|
+
// If explicitly set in config (not null/undefined), use that
|
|
1103
|
+
if (config.autoStart !== undefined && config.autoStart !== null) {
|
|
1104
|
+
return config.autoStart;
|
|
1105
|
+
}
|
|
1106
|
+
// Otherwise, use environment variable
|
|
1107
|
+
return Env.getBool('WORKER_AUTO_START', false);
|
|
1108
|
+
};
|
|
1109
|
+
const normalizeExplicitPersistence = (persistence) => {
|
|
1110
|
+
if (persistence.driver === 'memory')
|
|
1111
|
+
return { driver: 'memory' };
|
|
1112
|
+
if (persistence.driver === 'redis') {
|
|
1113
|
+
return {
|
|
1114
|
+
driver: 'redis',
|
|
1115
|
+
redis: persistence.redis,
|
|
1116
|
+
keyPrefix: keyPrefix(),
|
|
1117
|
+
};
|
|
1118
|
+
}
|
|
1119
|
+
const clientIsConnection = typeof persistence.client === 'string';
|
|
1120
|
+
const clientConnection = clientIsConnection ? persistence.client : undefined;
|
|
1121
|
+
const resolvedConnection = persistence.connection ??
|
|
1122
|
+
clientConnection ??
|
|
1123
|
+
normalizeEnvValue(Env.get('WORKER_PERSISTENCE_DB_CONNECTION', 'default')) ??
|
|
1124
|
+
resolveDefaultPersistenceConnection();
|
|
1125
|
+
return {
|
|
1126
|
+
driver: 'database',
|
|
1127
|
+
client: clientIsConnection ? undefined : persistence.client,
|
|
1128
|
+
connection: resolvedConnection,
|
|
1129
|
+
table: persistence.table ??
|
|
1130
|
+
normalizeEnvValue(Env.get('WORKER_PERSISTENCE_TABLE', 'zintrust_workers')) ??
|
|
1131
|
+
resolveDefaultPersistenceTable(),
|
|
1132
|
+
};
|
|
1133
|
+
};
|
|
1134
|
+
const resolvePersistenceConfig = (config) => {
|
|
1135
|
+
const explicit = config.infrastructure?.persistence;
|
|
1136
|
+
if (explicit)
|
|
1137
|
+
return normalizeExplicitPersistence(explicit);
|
|
1138
|
+
const driver = normalizeEnvValue(Env.get('WORKER_PERSISTENCE_DRIVER', ''))?.toLowerCase();
|
|
1139
|
+
if (!driver)
|
|
1140
|
+
return undefined;
|
|
1141
|
+
if (driver === 'memory')
|
|
1142
|
+
return { driver: 'memory' };
|
|
1143
|
+
if (driver === 'redis') {
|
|
1144
|
+
const persistenceDbOverride = normalizeEnvValue(Env.get('WORKER_PERSISTENCE_REDIS_DB', ''));
|
|
1145
|
+
return {
|
|
1146
|
+
driver: 'redis',
|
|
1147
|
+
// Optional override; otherwise defaults to REDIS_QUEUE_DB.
|
|
1148
|
+
redis: {
|
|
1149
|
+
env: true,
|
|
1150
|
+
db: persistenceDbOverride ? 'WORKER_PERSISTENCE_REDIS_DB' : 'REDIS_QUEUE_DB',
|
|
1151
|
+
},
|
|
1152
|
+
keyPrefix: keyPrefix(),
|
|
1153
|
+
};
|
|
1154
|
+
}
|
|
1155
|
+
if (driver === 'db' || driver === 'database') {
|
|
1156
|
+
return {
|
|
1157
|
+
driver: 'database',
|
|
1158
|
+
connection: resolveDefaultPersistenceConnection(),
|
|
1159
|
+
table: resolveDefaultPersistenceTable(),
|
|
1160
|
+
};
|
|
1161
|
+
}
|
|
1162
|
+
throw ErrorFactory.createConfigError('WORKER_PERSISTENCE_DRIVER must be one of memory, redis, or database');
|
|
1163
|
+
};
|
|
1164
|
+
const resolveDbClientFromEnv = async (connectionName = 'default') => {
|
|
1165
|
+
const connect = async () => await useEnsureDbConnected(undefined, connectionName);
|
|
1166
|
+
try {
|
|
1167
|
+
return await connect();
|
|
1168
|
+
}
|
|
1169
|
+
catch (error) {
|
|
1170
|
+
Logger.error('Worker persistence failed to resolve database connection', error);
|
|
1171
|
+
}
|
|
1172
|
+
try {
|
|
1173
|
+
registerDatabasesFromRuntimeConfig(databaseConfig);
|
|
1174
|
+
return await connect();
|
|
1175
|
+
}
|
|
1176
|
+
catch (error) {
|
|
1177
|
+
Logger.error('Worker persistence failed after registering runtime databases', error);
|
|
1178
|
+
throw ErrorFactory.createConfigError(`Worker persistence requires a database client. Register connection '${connectionName}' or pass infrastructure.persistence.client.`);
|
|
1179
|
+
}
|
|
1180
|
+
};
|
|
1181
|
+
const resolveWorkerStore = async (config) => {
|
|
1182
|
+
const persistence = resolvePersistenceConfig(config);
|
|
1183
|
+
if (!persistence)
|
|
1184
|
+
return workerStore;
|
|
1185
|
+
let next;
|
|
1186
|
+
if (persistence.driver === 'memory') {
|
|
1187
|
+
next = InMemoryWorkerStore.create();
|
|
1188
|
+
}
|
|
1189
|
+
else if (persistence.driver === 'redis') {
|
|
1190
|
+
const redisConfig = resolveRedisConfigWithFallback(persistence.redis, config.infrastructure?.redis, 'Worker persistence requires redis config (persistence.redis or infrastructure.redis)', 'infrastructure.persistence.redis');
|
|
1191
|
+
const key_prefix = persistence.keyPrefix ?? keyPrefix();
|
|
1192
|
+
logRedisPersistenceConfig(redisConfig, key_prefix, 'resolveWorkerStore');
|
|
1193
|
+
const client = createRedisConnection(redisConfig);
|
|
1194
|
+
next = RedisWorkerStore.create(client, key_prefix);
|
|
1195
|
+
}
|
|
1196
|
+
else if (persistence.driver === 'database') {
|
|
1197
|
+
const explicitConnection = typeof persistence.client === 'string' ? persistence.client : persistence.connection;
|
|
1198
|
+
const client = typeof persistence.client === 'string'
|
|
1199
|
+
? await resolveDbClientFromEnv(explicitConnection)
|
|
1200
|
+
: (persistence.client ?? (await resolveDbClientFromEnv(explicitConnection)));
|
|
1201
|
+
next = DbWorkerStore.create(client, persistence.table);
|
|
1202
|
+
}
|
|
1203
|
+
else {
|
|
1204
|
+
next = InMemoryWorkerStore.create();
|
|
1205
|
+
}
|
|
1206
|
+
await next.init();
|
|
1207
|
+
return next;
|
|
1208
|
+
};
|
|
1209
|
+
// Store instance cache to reuse connections
|
|
1210
|
+
const storeInstanceCache = new Map();
|
|
1211
|
+
/**
|
|
1212
|
+
* Generate cache key for persistence configuration
|
|
1213
|
+
*/
|
|
1214
|
+
const generateCacheKey = (persistence) => {
|
|
1215
|
+
return JSON.stringify({
|
|
1216
|
+
driver: persistence.driver,
|
|
1217
|
+
redis: 'redis' in persistence ? persistence.redis : undefined,
|
|
1218
|
+
keyPrefix: 'keyPrefix' in persistence ? persistence.keyPrefix : undefined,
|
|
1219
|
+
connection: 'connection' in persistence ? persistence.connection : undefined,
|
|
1220
|
+
table: 'table' in persistence ? persistence.table : undefined,
|
|
1221
|
+
});
|
|
1222
|
+
};
|
|
1223
|
+
/**
|
|
1224
|
+
* Create new store instance based on persistence configuration
|
|
1225
|
+
*/
|
|
1226
|
+
const createWorkerStore = async (persistence) => {
|
|
1227
|
+
if (persistence.driver === 'memory') {
|
|
1228
|
+
if (workerStoreConfigured && workerStoreConfig?.driver === 'memory') {
|
|
1229
|
+
return workerStore;
|
|
1230
|
+
}
|
|
1231
|
+
return InMemoryWorkerStore.create();
|
|
1232
|
+
}
|
|
1233
|
+
if (persistence.driver === 'redis') {
|
|
1234
|
+
const redisConfig = resolveRedisConfigWithFallback(persistence.redis ?? { env: true }, undefined, 'Worker persistence requires redis config (persistence.redis or REDIS_* env values)', 'persistence.redis');
|
|
1235
|
+
const key_prefix = persistence.keyPrefix ?? keyPrefix();
|
|
1236
|
+
logRedisPersistenceConfig(redisConfig, key_prefix, 'createWorkerStore');
|
|
1237
|
+
const client = createRedisConnection(redisConfig);
|
|
1238
|
+
return RedisWorkerStore.create(client, key_prefix);
|
|
1239
|
+
}
|
|
1240
|
+
// Database driver
|
|
1241
|
+
const explicitConnection = typeof persistence.client === 'string' ? persistence.client : persistence.connection;
|
|
1242
|
+
const client = typeof persistence.client === 'string'
|
|
1243
|
+
? await resolveDbClientFromEnv(explicitConnection)
|
|
1244
|
+
: (persistence.client ?? (await resolveDbClientFromEnv(explicitConnection)));
|
|
1245
|
+
return DbWorkerStore.create(client, persistence.table);
|
|
1246
|
+
};
|
|
1247
|
+
const resolveWorkerStoreForPersistence = async (persistence) => {
|
|
1248
|
+
const cacheKey = generateCacheKey(persistence);
|
|
1249
|
+
const isCloudflare = Cloudflare.getWorkersEnv() !== null;
|
|
1250
|
+
// Return cached instance if available (disable cache for Cloudflare to assume cleanup)
|
|
1251
|
+
// Or handle cleanup differently. For now, disable cache for Cloudflare to allow per-request connections.
|
|
1252
|
+
const cached = storeInstanceCache.get(cacheKey);
|
|
1253
|
+
if (cached && !isCloudflare) {
|
|
1254
|
+
return cached;
|
|
1255
|
+
}
|
|
1256
|
+
// Create new store instance
|
|
1257
|
+
const store = await createWorkerStore(persistence);
|
|
1258
|
+
await store.init();
|
|
1259
|
+
// Cache the store instance for reuse only if not Cloudflare
|
|
1260
|
+
if (!isCloudflare) {
|
|
1261
|
+
storeInstanceCache.set(cacheKey, store);
|
|
1262
|
+
}
|
|
1263
|
+
return store;
|
|
1264
|
+
};
|
|
1265
|
+
const getPersistedRecord = async (name, persistenceOverride) => {
|
|
1266
|
+
if (!persistenceOverride) {
|
|
1267
|
+
if (!isCloudflareRuntime()) {
|
|
1268
|
+
await ensureWorkerStoreConfigured();
|
|
1269
|
+
return workerStore.get(name);
|
|
1270
|
+
}
|
|
1271
|
+
const store = await getDefaultStoreForRuntime();
|
|
1272
|
+
try {
|
|
1273
|
+
return await store.get(name);
|
|
1274
|
+
}
|
|
1275
|
+
finally {
|
|
1276
|
+
if (store.close) {
|
|
1277
|
+
await store.close();
|
|
1278
|
+
}
|
|
1279
|
+
}
|
|
1280
|
+
}
|
|
1281
|
+
const store = await resolveWorkerStoreForPersistence(persistenceOverride);
|
|
1282
|
+
return store.get(name);
|
|
1283
|
+
};
|
|
1284
|
+
const ensureWorkerStoreConfigured = async () => {
|
|
1285
|
+
if (workerStoreConfigured)
|
|
1286
|
+
return;
|
|
1287
|
+
const bootstrapConfig = buildPersistenceBootstrapConfig();
|
|
1288
|
+
const persistence = resolvePersistenceConfig(bootstrapConfig);
|
|
1289
|
+
if (!persistence)
|
|
1290
|
+
return;
|
|
1291
|
+
workerStore = await resolveWorkerStore(bootstrapConfig);
|
|
1292
|
+
workerStoreConfigured = true;
|
|
1293
|
+
workerStoreConfig = persistence;
|
|
1294
|
+
};
|
|
1295
|
+
const buildWorkerRecord = (config, status) => {
|
|
1296
|
+
const now = new Date();
|
|
1297
|
+
const normalizedProcessorSpec = config.processorSpec
|
|
1298
|
+
? normalizeProcessorSpec(config.processorSpec)
|
|
1299
|
+
: null;
|
|
1300
|
+
return {
|
|
1301
|
+
name: config.name,
|
|
1302
|
+
queueName: config.queueName,
|
|
1303
|
+
version: config.version ?? '1.0.0',
|
|
1304
|
+
status,
|
|
1305
|
+
autoStart: resolveAutoStart(config),
|
|
1306
|
+
concurrency: config.options?.concurrency ?? 1,
|
|
1307
|
+
region: config.datacenter?.primaryRegion ?? null,
|
|
1308
|
+
processorSpec: normalizedProcessorSpec ?? null,
|
|
1309
|
+
activeStatus: config.activeStatus ?? true,
|
|
1310
|
+
features: config.features ? { ...config.features } : null,
|
|
1311
|
+
infrastructure: config.infrastructure ? { ...config.infrastructure } : null,
|
|
1312
|
+
datacenter: config.datacenter ? { ...config.datacenter } : null,
|
|
1313
|
+
createdAt: now,
|
|
1314
|
+
updatedAt: now,
|
|
1315
|
+
lastHealthCheck: undefined,
|
|
1316
|
+
lastError: undefined,
|
|
1317
|
+
connectionState: undefined,
|
|
1318
|
+
};
|
|
1319
|
+
};
|
|
1320
|
+
const buildDefaultAutoScalerConfig = () => ({
|
|
1321
|
+
enabled: workersConfig.autoScaling.enabled,
|
|
1322
|
+
checkInterval: workersConfig.autoScaling.interval,
|
|
1323
|
+
scalingPolicies: new Map(),
|
|
1324
|
+
costOptimization: {
|
|
1325
|
+
enabled: workersConfig.costOptimization.enabled,
|
|
1326
|
+
maxCostPerHour: 0,
|
|
1327
|
+
preferSpotInstances: workersConfig.costOptimization.spotInstances,
|
|
1328
|
+
offPeakSchedule: {
|
|
1329
|
+
start: workersConfig.autoScaling.offPeakSchedule.split('-')[0] ?? '22:00',
|
|
1330
|
+
end: workersConfig.autoScaling.offPeakSchedule.split('-')[1] ?? '06:00',
|
|
1331
|
+
timezone: 'UTC',
|
|
1332
|
+
reductionPercentage: Math.round(workersConfig.autoScaling.offPeakReduction * 100),
|
|
1333
|
+
},
|
|
1334
|
+
budgetAlerts: {
|
|
1335
|
+
dailyLimit: 0,
|
|
1336
|
+
weeklyLimit: 0,
|
|
1337
|
+
monthlyLimit: 0,
|
|
1338
|
+
},
|
|
1339
|
+
},
|
|
1340
|
+
});
|
|
1341
|
+
const resolveOffPeakSchedule = (input, defaults) => {
|
|
1342
|
+
const fallback = defaults.costOptimization.offPeakSchedule ?? {
|
|
1343
|
+
start: '22:00',
|
|
1344
|
+
end: '06:00',
|
|
1345
|
+
timezone: 'UTC',
|
|
1346
|
+
reductionPercentage: 0,
|
|
1347
|
+
};
|
|
1348
|
+
const override = input?.costOptimization?.offPeakSchedule;
|
|
1349
|
+
const schedule = { ...fallback };
|
|
1350
|
+
if (override) {
|
|
1351
|
+
Object.assign(schedule, override);
|
|
1352
|
+
}
|
|
1353
|
+
return schedule;
|
|
1354
|
+
};
|
|
1355
|
+
const resolveCostOptimization = (input, defaults) => ({
|
|
1356
|
+
...defaults.costOptimization,
|
|
1357
|
+
...input?.costOptimization,
|
|
1358
|
+
offPeakSchedule: resolveOffPeakSchedule(input, defaults),
|
|
1359
|
+
budgetAlerts: {
|
|
1360
|
+
...defaults.costOptimization.budgetAlerts,
|
|
1361
|
+
...input?.costOptimization?.budgetAlerts,
|
|
1362
|
+
},
|
|
1363
|
+
});
|
|
1364
|
+
const resolveAutoScalerConfig = (input) => {
|
|
1365
|
+
const defaults = buildDefaultAutoScalerConfig();
|
|
1366
|
+
if (!input)
|
|
1367
|
+
return defaults;
|
|
1368
|
+
return {
|
|
1369
|
+
...defaults,
|
|
1370
|
+
...input,
|
|
1371
|
+
costOptimization: resolveCostOptimization(input, defaults),
|
|
1372
|
+
};
|
|
1373
|
+
};
|
|
1374
|
+
const resolveWorkerOptions = (config, autoStart) => {
|
|
1375
|
+
const options = config.options ? { ...config.options } : {};
|
|
1376
|
+
if (options.prefix === undefined) {
|
|
1377
|
+
options.prefix = getBullMQSafeQueueName();
|
|
1378
|
+
}
|
|
1379
|
+
if (options.autorun === undefined) {
|
|
1380
|
+
options.autorun = autoStart;
|
|
1381
|
+
}
|
|
1382
|
+
if (options.connection)
|
|
1383
|
+
return options;
|
|
1384
|
+
const redisConfig = resolveRedisConfigWithFallback(config.infrastructure?.redis, undefined, 'Worker requires a connection. Provide options.connection or infrastructure.redis config', 'infrastructure.redis');
|
|
1385
|
+
return {
|
|
1386
|
+
...options,
|
|
1387
|
+
connection: {
|
|
1388
|
+
host: redisConfig.host,
|
|
1389
|
+
port: redisConfig.port,
|
|
1390
|
+
db: redisConfig.db,
|
|
1391
|
+
password: redisConfig.password,
|
|
1392
|
+
},
|
|
1393
|
+
};
|
|
1394
|
+
};
|
|
1395
|
+
const buildDefaultObservabilityConfig = () => ({
|
|
1396
|
+
prometheus: {
|
|
1397
|
+
enabled: workersConfig.observability.prometheus.enabled,
|
|
1398
|
+
port: workersConfig.observability.prometheus.port,
|
|
1399
|
+
},
|
|
1400
|
+
openTelemetry: {
|
|
1401
|
+
enabled: workersConfig.observability.opentelemetry.enabled,
|
|
1402
|
+
serviceName: 'zintrust-workers',
|
|
1403
|
+
exporterUrl: workersConfig.observability.opentelemetry.endpoint,
|
|
1404
|
+
},
|
|
1405
|
+
datadog: {
|
|
1406
|
+
enabled: workersConfig.observability.datadog.enabled,
|
|
1407
|
+
tags: workersConfig.observability.datadog.apiKey
|
|
1408
|
+
? [`apiKey:${workersConfig.observability.datadog.apiKey}`]
|
|
1409
|
+
: undefined,
|
|
1410
|
+
},
|
|
1411
|
+
});
|
|
1412
|
+
const resolveObservabilityConfig = (input) => {
|
|
1413
|
+
const defaults = buildDefaultObservabilityConfig();
|
|
1414
|
+
if (!input)
|
|
1415
|
+
return defaults;
|
|
1416
|
+
const enabledOverride = 'enabled' in input ? input.enabled : undefined;
|
|
1417
|
+
const prometheus = { ...defaults.prometheus };
|
|
1418
|
+
if (input.prometheus) {
|
|
1419
|
+
Object.assign(prometheus, input.prometheus);
|
|
1420
|
+
}
|
|
1421
|
+
const openTelemetry = { ...defaults.openTelemetry };
|
|
1422
|
+
if (input.openTelemetry) {
|
|
1423
|
+
Object.assign(openTelemetry, input.openTelemetry);
|
|
1424
|
+
}
|
|
1425
|
+
const datadog = { ...defaults.datadog };
|
|
1426
|
+
if (input.datadog) {
|
|
1427
|
+
Object.assign(datadog, input.datadog);
|
|
1428
|
+
}
|
|
1429
|
+
if (enabledOverride === false) {
|
|
1430
|
+
prometheus.enabled = false;
|
|
1431
|
+
openTelemetry.enabled = false;
|
|
1432
|
+
datadog.enabled = false;
|
|
1433
|
+
}
|
|
1434
|
+
else if (enabledOverride === true) {
|
|
1435
|
+
prometheus.enabled = true;
|
|
1436
|
+
openTelemetry.enabled = true;
|
|
1437
|
+
datadog.enabled = true;
|
|
1438
|
+
}
|
|
1439
|
+
if (!openTelemetry.serviceName) {
|
|
1440
|
+
openTelemetry.serviceName = defaults.openTelemetry.serviceName;
|
|
1441
|
+
}
|
|
1442
|
+
return { prometheus, openTelemetry, datadog };
|
|
1443
|
+
};
|
|
1444
|
+
const initializeClustering = (config) => {
|
|
1445
|
+
if (clusteringInitialized || !(config.features?.clustering ?? false))
|
|
1446
|
+
return;
|
|
1447
|
+
const redisConfig = resolveRedisConfigWithFallback(config.infrastructure?.redis, undefined, 'ClusterLock requires infrastructure.redis config', 'infrastructure.redis');
|
|
1448
|
+
ClusterLock.initialize(redisConfig);
|
|
1449
|
+
clusteringInitialized = true;
|
|
1450
|
+
};
|
|
1451
|
+
const initializeMetrics = (config) => {
|
|
1452
|
+
if (metricsInitialized || !(config.features?.metrics ?? false))
|
|
1453
|
+
return;
|
|
1454
|
+
const redisConfig = resolveRedisConfigWithFallback(config.infrastructure?.redis, undefined, 'WorkerMetrics requires infrastructure.redis config', 'infrastructure.redis');
|
|
1455
|
+
WorkerMetrics.initialize(redisConfig);
|
|
1456
|
+
metricsInitialized = true;
|
|
1457
|
+
};
|
|
1458
|
+
const initializeAutoScaling = (config) => {
|
|
1459
|
+
if (autoScalingInitialized || !(config.features?.autoScaling ?? false))
|
|
1460
|
+
return;
|
|
1461
|
+
const autoScalerConfig = resolveAutoScalerConfig(config.infrastructure?.autoScaler);
|
|
1462
|
+
AutoScaler.initialize(autoScalerConfig);
|
|
1463
|
+
autoScalingInitialized = true;
|
|
1464
|
+
};
|
|
1465
|
+
const initializeCircuitBreaker = (config, version) => {
|
|
1466
|
+
if (!(config.features?.circuitBreaker ?? false))
|
|
1467
|
+
return;
|
|
1468
|
+
CircuitBreaker.initialize(config.name, version);
|
|
1469
|
+
};
|
|
1470
|
+
const initializeDeadLetterQueue = (config) => {
|
|
1471
|
+
if (deadLetterQueueInitialized || !(config.features?.deadLetterQueue ?? false))
|
|
1472
|
+
return;
|
|
1473
|
+
const dlqConfig = requireInfrastructure(config.infrastructure?.deadLetterQueue, 'DeadLetterQueue requires infrastructure.deadLetterQueue config');
|
|
1474
|
+
const dlqRedisConfig = resolveRedisConfigWithFallback(dlqConfig.redis, config.infrastructure?.redis, 'DeadLetterQueue requires infrastructure.deadLetterQueue.redis or infrastructure.redis config', 'infrastructure.deadLetterQueue.redis');
|
|
1475
|
+
DeadLetterQueue.initialize(dlqRedisConfig, dlqConfig.policy);
|
|
1476
|
+
deadLetterQueueInitialized = true;
|
|
1477
|
+
};
|
|
1478
|
+
const initializeResourceMonitoring = (config) => {
|
|
1479
|
+
if (resourceMonitoringInitialized || !(config.features?.resourceMonitoring ?? false))
|
|
1480
|
+
return;
|
|
1481
|
+
ResourceMonitor.initialize();
|
|
1482
|
+
ResourceMonitor.start();
|
|
1483
|
+
resourceMonitoringInitialized = true;
|
|
1484
|
+
};
|
|
1485
|
+
const initializeCompliance = (config) => {
|
|
1486
|
+
if (complianceInitialized || !(config.features?.compliance ?? false))
|
|
1487
|
+
return;
|
|
1488
|
+
const complianceConfig = requireInfrastructure(config.infrastructure?.compliance, 'ComplianceManager requires infrastructure.compliance config');
|
|
1489
|
+
const complianceRedisConfig = resolveRedisConfigWithFallback(complianceConfig.redis, config.infrastructure?.redis, 'ComplianceManager requires infrastructure.compliance.redis or infrastructure.redis config', 'infrastructure.compliance.redis');
|
|
1490
|
+
ComplianceManager.initialize(complianceRedisConfig, complianceConfig.config);
|
|
1491
|
+
complianceInitialized = true;
|
|
1492
|
+
};
|
|
1493
|
+
const initializeObservability = async (config) => {
|
|
1494
|
+
if (observabilityInitialized || !(config.features?.observability ?? false))
|
|
1495
|
+
return;
|
|
1496
|
+
const observabilityConfig = resolveObservabilityConfig(config.infrastructure?.observability);
|
|
1497
|
+
await Observability.initialize(observabilityConfig);
|
|
1498
|
+
observabilityInitialized = true;
|
|
1499
|
+
};
|
|
1500
|
+
const initializeVersioning = (config, version) => {
|
|
1501
|
+
if (!(config.features?.versioning ?? false))
|
|
1502
|
+
return;
|
|
1503
|
+
WorkerVersioning.register({
|
|
1504
|
+
workerName: config.name,
|
|
1505
|
+
version: WorkerVersioning.parse(version),
|
|
1506
|
+
changelog: 'Initial version',
|
|
1507
|
+
});
|
|
1508
|
+
};
|
|
1509
|
+
const initializeDatacenter = (config) => {
|
|
1510
|
+
if (!(config.features?.datacenterOrchestration ?? false) || !config.datacenter)
|
|
1511
|
+
return;
|
|
1512
|
+
DatacenterOrchestrator.placeWorker({
|
|
1513
|
+
workerName: config.name,
|
|
1514
|
+
primaryRegion: config.datacenter.primaryRegion,
|
|
1515
|
+
secondaryRegions: config.datacenter.secondaryRegions ?? [],
|
|
1516
|
+
replicationStrategy: 'active-passive',
|
|
1517
|
+
affinityRules: {
|
|
1518
|
+
preferLocal: config.datacenter.affinityRules?.preferLocal ?? true,
|
|
1519
|
+
maxLatency: config.datacenter.affinityRules?.maxLatency,
|
|
1520
|
+
avoidRegions: config.datacenter.affinityRules?.avoidRegions,
|
|
1521
|
+
},
|
|
1522
|
+
});
|
|
1523
|
+
};
|
|
1524
|
+
const setupWorkerEventListeners = (worker, workerName, workerVersion, features) => {
|
|
1525
|
+
worker.on('completed', (job) => {
|
|
1526
|
+
try {
|
|
1527
|
+
Logger.debug(`Job completed: ${workerName}`, { jobId: job.id });
|
|
1528
|
+
if (features?.observability === true) {
|
|
1529
|
+
Observability.incrementCounter('worker.jobs.completed', 1, {
|
|
1530
|
+
worker: workerName,
|
|
1531
|
+
version: workerVersion,
|
|
1532
|
+
});
|
|
1533
|
+
}
|
|
1534
|
+
}
|
|
1535
|
+
catch (error) {
|
|
1536
|
+
// Isolate error - don't let it bubble up
|
|
1537
|
+
Logger.error(`Error in worker completed event handler: ${workerName}`, error, 'workers');
|
|
1538
|
+
}
|
|
1539
|
+
});
|
|
1540
|
+
worker.on('failed', (job, error) => {
|
|
1541
|
+
try {
|
|
1542
|
+
Logger.error(`Job failed: ${workerName}`, { error, jobId: job?.id }, 'workers');
|
|
1543
|
+
if (features?.observability === true) {
|
|
1544
|
+
Observability.incrementCounter('worker.jobs.failed', 1, {
|
|
1545
|
+
worker: workerName,
|
|
1546
|
+
version: workerVersion,
|
|
1547
|
+
});
|
|
1548
|
+
}
|
|
1549
|
+
}
|
|
1550
|
+
catch (handlerError) {
|
|
1551
|
+
// Isolate error - don't let it bubble up
|
|
1552
|
+
Logger.error(`Error in worker failed event handler: ${workerName}`, handlerError, 'workers');
|
|
1553
|
+
}
|
|
1554
|
+
});
|
|
1555
|
+
worker.on('error', (error) => {
|
|
1556
|
+
try {
|
|
1557
|
+
Logger.error(`Worker error: ${workerName}`, error);
|
|
1558
|
+
// Check if this is a Redis connection error that should be handled gracefully
|
|
1559
|
+
if (error.message.includes('ERR value is not an integer') ||
|
|
1560
|
+
error.message.includes('NOAUTH') ||
|
|
1561
|
+
error.message.includes('ECONNREFUSED')) {
|
|
1562
|
+
Logger.warn(`Worker ${workerName} encountered Redis configuration error - worker will remain failed but server will continue running`);
|
|
1563
|
+
}
|
|
1564
|
+
}
|
|
1565
|
+
catch (handlerError) {
|
|
1566
|
+
// Isolate error - don't let it bubble up
|
|
1567
|
+
Logger.error(`Error in worker error event handler: ${workerName}`, handlerError, 'workers');
|
|
1568
|
+
}
|
|
1569
|
+
});
|
|
1570
|
+
};
|
|
1571
|
+
const registerWorkerInstance = (params) => {
|
|
1572
|
+
const { worker, config, workerVersion, queueName, options, autoStart } = params;
|
|
1573
|
+
WorkerRegistry.register({
|
|
1574
|
+
name: config.name,
|
|
1575
|
+
config: {},
|
|
1576
|
+
activeStatus: config.activeStatus ?? true,
|
|
1577
|
+
version: workerVersion,
|
|
1578
|
+
region: config.datacenter?.primaryRegion,
|
|
1579
|
+
queues: [queueName],
|
|
1580
|
+
factory: async () => {
|
|
1581
|
+
await Promise.resolve();
|
|
1582
|
+
return {
|
|
1583
|
+
metadata: {
|
|
1584
|
+
name: config.name,
|
|
1585
|
+
status: autoStart ? 'running' : 'stopped',
|
|
1586
|
+
version: workerVersion,
|
|
1587
|
+
region: config.datacenter?.primaryRegion ?? 'unknown',
|
|
1588
|
+
queueName,
|
|
1589
|
+
concurrency: options?.concurrency ?? 1,
|
|
1590
|
+
activeStatus: config.activeStatus ?? true,
|
|
1591
|
+
startedAt: new Date(),
|
|
1592
|
+
stoppedAt: null,
|
|
1593
|
+
lastProcessedAt: null,
|
|
1594
|
+
restartCount: 0,
|
|
1595
|
+
processedCount: 0,
|
|
1596
|
+
errorCount: 0,
|
|
1597
|
+
lockKey: null,
|
|
1598
|
+
priority: 0,
|
|
1599
|
+
memoryUsage: 0,
|
|
1600
|
+
cpuUsage: 0,
|
|
1601
|
+
circuitState: 'closed',
|
|
1602
|
+
queues: [queueName],
|
|
1603
|
+
plugins: [],
|
|
1604
|
+
datacenter: config.datacenter?.primaryRegion ?? 'unknown',
|
|
1605
|
+
canaryPercentage: 0,
|
|
1606
|
+
config: {},
|
|
1607
|
+
},
|
|
1608
|
+
instance: worker,
|
|
1609
|
+
start: () => {
|
|
1610
|
+
if (!autoStart) {
|
|
1611
|
+
worker.run().catch((error) => {
|
|
1612
|
+
Logger.error(`Failed to start worker "${config.name}"`, error);
|
|
1613
|
+
});
|
|
1614
|
+
}
|
|
1615
|
+
},
|
|
1616
|
+
stop: async () => worker.close(),
|
|
1617
|
+
drain: async () => worker.close(),
|
|
1618
|
+
sleep: async () => worker.pause(),
|
|
1619
|
+
wakeup: () => {
|
|
1620
|
+
worker.resume();
|
|
1621
|
+
},
|
|
1622
|
+
getStatus: () => 'running',
|
|
1623
|
+
getHealth: () => 'green',
|
|
1624
|
+
};
|
|
1625
|
+
},
|
|
1626
|
+
});
|
|
1627
|
+
};
|
|
1628
|
+
const initializeWorkerFeatures = async (config, workerVersion) => {
|
|
1629
|
+
initializeClustering(config);
|
|
1630
|
+
initializeMetrics(config);
|
|
1631
|
+
initializeAutoScaling(config);
|
|
1632
|
+
initializeCircuitBreaker(config, workerVersion);
|
|
1633
|
+
initializeDeadLetterQueue(config);
|
|
1634
|
+
initializeResourceMonitoring(config);
|
|
1635
|
+
initializeCompliance(config);
|
|
1636
|
+
await initializeObservability(config);
|
|
1637
|
+
initializeVersioning(config, workerVersion);
|
|
1638
|
+
initializeDatacenter(config);
|
|
1639
|
+
};
|
|
1640
|
+
/**
|
|
1641
|
+
* Worker Factory - Sealed namespace
|
|
1642
|
+
*/
|
|
1643
|
+
export const WorkerFactory = Object.freeze({
|
|
1644
|
+
registerProcessor,
|
|
1645
|
+
registerProcessors,
|
|
1646
|
+
registerProcessorPaths,
|
|
1647
|
+
registerProcessorResolver,
|
|
1648
|
+
registerProcessorSpec,
|
|
1649
|
+
resolveProcessorPath,
|
|
1650
|
+
resolveProcessorSpec,
|
|
1651
|
+
/**
|
|
1652
|
+
* Register a new worker configuration without starting it.
|
|
1653
|
+
*/
|
|
1654
|
+
async register(config) {
|
|
1655
|
+
const { name } = config;
|
|
1656
|
+
// Check in-memory first (though unlikely if we are just registering)
|
|
1657
|
+
if (workers.has(name)) {
|
|
1658
|
+
throw ErrorFactory.createWorkerError(`Worker "${name}" is already running locally`);
|
|
1659
|
+
}
|
|
1660
|
+
const store = await getStoreForWorker(config);
|
|
1661
|
+
try {
|
|
1662
|
+
const existing = await store.get(name);
|
|
1663
|
+
if (existing) {
|
|
1664
|
+
throw ErrorFactory.createWorkerError(`Worker "${name}" already exists in persistence`);
|
|
1665
|
+
}
|
|
1666
|
+
// Init features to validate config, but mainly we just want to save it.
|
|
1667
|
+
// initializeWorkerFeatures might rely on being active or having resources, so we might skip it or do partial.
|
|
1668
|
+
// For now, just save definition.
|
|
1669
|
+
// Status should be STOPPED or CREATED.
|
|
1670
|
+
await store.save(buildWorkerRecord(config, WorkerCreationStatus.STOPPED));
|
|
1671
|
+
Logger.info(`Worker registered (persistence only): ${name}`);
|
|
1672
|
+
}
|
|
1673
|
+
finally {
|
|
1674
|
+
// If Cloudflare environment, try to close store connection to avoid zombie connections
|
|
1675
|
+
if (Cloudflare.getWorkersEnv() !== null && store.close) {
|
|
1676
|
+
await store.close();
|
|
1677
|
+
}
|
|
1678
|
+
}
|
|
1679
|
+
},
|
|
1680
|
+
/**
|
|
1681
|
+
* Create new worker with full setup
|
|
1682
|
+
*/
|
|
1683
|
+
async create(config) {
|
|
1684
|
+
const { name, version, queueName, features } = config;
|
|
1685
|
+
const workerVersion = version ?? '1.0.0';
|
|
1686
|
+
const autoStart = resolveAutoStart(config);
|
|
1687
|
+
if (workers.has(name)) {
|
|
1688
|
+
throw ErrorFactory.createWorkerError(`Worker "${name}" already exists`);
|
|
1689
|
+
}
|
|
1690
|
+
// Resolve the correct store for this worker configuration
|
|
1691
|
+
const store = await getStoreForWorker(config);
|
|
1692
|
+
// Save initial status as "creating"
|
|
1693
|
+
await store.save(buildWorkerRecord(config, WorkerCreationStatus.CREATING));
|
|
1694
|
+
try {
|
|
1695
|
+
await initializeWorkerFeatures(config, workerVersion);
|
|
1696
|
+
// Update status to "connecting"
|
|
1697
|
+
await store.update(name, {
|
|
1698
|
+
status: WorkerCreationStatus.CONNECTING,
|
|
1699
|
+
updatedAt: new Date(),
|
|
1700
|
+
});
|
|
1701
|
+
// Create enhanced processor
|
|
1702
|
+
const enhancedProcessor = createEnhancedProcessor(config);
|
|
1703
|
+
// Create BullMQ worker
|
|
1704
|
+
const resolvedOptions = resolveWorkerOptions(config, autoStart);
|
|
1705
|
+
const worker = new Worker(queueName, enhancedProcessor, resolvedOptions);
|
|
1706
|
+
setupWorkerEventListeners(worker, name, workerVersion, features);
|
|
1707
|
+
// Update status to "starting"
|
|
1708
|
+
await store.update(name, {
|
|
1709
|
+
status: WorkerCreationStatus.STARTING,
|
|
1710
|
+
updatedAt: new Date(),
|
|
1711
|
+
});
|
|
1712
|
+
const timeoutMs = Env.getInt('WORKER_CONNECTION_TIMEOUT', 5000);
|
|
1713
|
+
// Wait for actual connection and health verification
|
|
1714
|
+
await waitForWorkerConnection(worker, name, queueName, timeoutMs);
|
|
1715
|
+
// Update status to "running" only after successful connection
|
|
1716
|
+
await store.update(name, {
|
|
1717
|
+
status: WorkerCreationStatus.RUNNING,
|
|
1718
|
+
updatedAt: new Date(),
|
|
1719
|
+
});
|
|
1720
|
+
// Store worker instance
|
|
1721
|
+
const instance = {
|
|
1722
|
+
worker,
|
|
1723
|
+
config,
|
|
1724
|
+
startedAt: new Date(),
|
|
1725
|
+
status: WorkerCreationStatus.RUNNING,
|
|
1726
|
+
connectionState: 'connected',
|
|
1727
|
+
};
|
|
1728
|
+
workers.set(name, instance);
|
|
1729
|
+
registerWorkerInstance({
|
|
1730
|
+
worker,
|
|
1731
|
+
config,
|
|
1732
|
+
workerVersion,
|
|
1733
|
+
queueName,
|
|
1734
|
+
options: resolvedOptions,
|
|
1735
|
+
autoStart,
|
|
1736
|
+
});
|
|
1737
|
+
if (autoStart) {
|
|
1738
|
+
await WorkerRegistry.start(name, workerVersion);
|
|
1739
|
+
}
|
|
1740
|
+
// Execute afterStart hooks
|
|
1741
|
+
if (features?.plugins === true) {
|
|
1742
|
+
await PluginManager.executeHook('afterStart', {
|
|
1743
|
+
workerName: name,
|
|
1744
|
+
timestamp: new Date(),
|
|
1745
|
+
});
|
|
1746
|
+
}
|
|
1747
|
+
// Start health monitoring for the worker
|
|
1748
|
+
startHealthMonitoring(name, worker, queueName);
|
|
1749
|
+
return worker;
|
|
1750
|
+
}
|
|
1751
|
+
catch (error) {
|
|
1752
|
+
// Handle failure - update status to "failed"
|
|
1753
|
+
// Re-resolve store in case of error to be safe
|
|
1754
|
+
const failStore = await getStoreForWorker(config);
|
|
1755
|
+
await failStore.update(name, {
|
|
1756
|
+
status: WorkerCreationStatus.FAILED,
|
|
1757
|
+
updatedAt: new Date(),
|
|
1758
|
+
lastError: error.message,
|
|
1759
|
+
});
|
|
1760
|
+
Logger.error(`Worker creation failed: ${name}`, error);
|
|
1761
|
+
throw error;
|
|
1762
|
+
}
|
|
1763
|
+
},
|
|
1764
|
+
/**
|
|
1765
|
+
* Get worker instance
|
|
1766
|
+
*/
|
|
1767
|
+
get(name) {
|
|
1768
|
+
const instance = workers.get(name);
|
|
1769
|
+
return instance ? { ...instance } : null;
|
|
1770
|
+
},
|
|
1771
|
+
/**
|
|
1772
|
+
* Update worker status directly (used by HealthMonitor)
|
|
1773
|
+
*/
|
|
1774
|
+
async updateStatus(name, status, error) {
|
|
1775
|
+
const instance = workers.get(name);
|
|
1776
|
+
if (instance) {
|
|
1777
|
+
instance.status = status;
|
|
1778
|
+
}
|
|
1779
|
+
try {
|
|
1780
|
+
const store = await getStoreForWorker(instance?.config ?? {
|
|
1781
|
+
name,
|
|
1782
|
+
queueName: 'unknown',
|
|
1783
|
+
processor: async () => {
|
|
1784
|
+
return Promise.resolve(); //NOSONAR
|
|
1785
|
+
},
|
|
1786
|
+
});
|
|
1787
|
+
const errorMessage = typeof error === 'string' ? error : error?.message;
|
|
1788
|
+
await store.update(name, {
|
|
1789
|
+
status: status,
|
|
1790
|
+
updatedAt: new Date(),
|
|
1791
|
+
lastError: errorMessage,
|
|
1792
|
+
});
|
|
1793
|
+
}
|
|
1794
|
+
catch (err) {
|
|
1795
|
+
Logger.warn(`Failed to update status for ${name} to ${status}`, err);
|
|
1796
|
+
}
|
|
1797
|
+
},
|
|
1798
|
+
/**
|
|
1799
|
+
* Stop worker
|
|
1800
|
+
*/
|
|
1801
|
+
async stop(name, persistenceOverride, options) {
|
|
1802
|
+
const skipPersistedUpdate = options?.skipPersistedUpdate === true;
|
|
1803
|
+
const instance = workers.get(name);
|
|
1804
|
+
const store = await validateAndGetStore(name, instance?.config, persistenceOverride);
|
|
1805
|
+
if (!instance) {
|
|
1806
|
+
if (!skipPersistedUpdate) {
|
|
1807
|
+
await store.update(name, { status: 'stopped', updatedAt: new Date() });
|
|
1808
|
+
Logger.info(`Worker marked stopped (not running): ${name}`);
|
|
1809
|
+
}
|
|
1810
|
+
return;
|
|
1811
|
+
}
|
|
1812
|
+
// Execute beforeStop hooks
|
|
1813
|
+
if (instance.config.features?.plugins === true) {
|
|
1814
|
+
await PluginManager.executeHook('beforeStop', {
|
|
1815
|
+
workerName: name,
|
|
1816
|
+
timestamp: new Date(),
|
|
1817
|
+
});
|
|
1818
|
+
}
|
|
1819
|
+
// Close worker with timeout to prevent hanging
|
|
1820
|
+
const workerClosePromise = instance.worker.close();
|
|
1821
|
+
let timeoutId;
|
|
1822
|
+
const timeoutPromise = new Promise((_, reject) => {
|
|
1823
|
+
// eslint-disable-next-line no-restricted-syntax
|
|
1824
|
+
timeoutId = setTimeout(() => {
|
|
1825
|
+
reject(new Error('Worker close timeout'));
|
|
1826
|
+
}, 5000);
|
|
1827
|
+
});
|
|
1828
|
+
try {
|
|
1829
|
+
await Promise.race([workerClosePromise, timeoutPromise]);
|
|
1830
|
+
}
|
|
1831
|
+
catch (error) {
|
|
1832
|
+
Logger.warn(`Worker "${name}" close failed or timed out, continuing...`, error);
|
|
1833
|
+
}
|
|
1834
|
+
finally {
|
|
1835
|
+
// Always clean up timeout to prevent memory leak
|
|
1836
|
+
if (timeoutId) {
|
|
1837
|
+
clearTimeout(timeoutId);
|
|
1838
|
+
timeoutId = undefined;
|
|
1839
|
+
}
|
|
1840
|
+
}
|
|
1841
|
+
instance.status = WorkerCreationStatus.STOPPED;
|
|
1842
|
+
// Stop health monitoring for this worker
|
|
1843
|
+
HealthMonitor.unregister(name);
|
|
1844
|
+
if (!skipPersistedUpdate) {
|
|
1845
|
+
try {
|
|
1846
|
+
await store.update(name, {
|
|
1847
|
+
status: WorkerCreationStatus.STOPPED,
|
|
1848
|
+
updatedAt: new Date(),
|
|
1849
|
+
});
|
|
1850
|
+
Logger.info(`Worker "${name}" status updated to stopped`);
|
|
1851
|
+
}
|
|
1852
|
+
catch (error) {
|
|
1853
|
+
Logger.error(`Failed to update worker "${name}" status`, error);
|
|
1854
|
+
}
|
|
1855
|
+
}
|
|
1856
|
+
await WorkerRegistry.stop(name);
|
|
1857
|
+
// Execute afterStop hooks
|
|
1858
|
+
if (instance.config.features?.plugins === true) {
|
|
1859
|
+
await PluginManager.executeHook('afterStop', {
|
|
1860
|
+
workerName: name,
|
|
1861
|
+
timestamp: new Date(),
|
|
1862
|
+
});
|
|
1863
|
+
}
|
|
1864
|
+
Logger.info(`Worker stopped: ${name}`);
|
|
1865
|
+
},
|
|
1866
|
+
/**
|
|
1867
|
+
* Restart worker
|
|
1868
|
+
*/
|
|
1869
|
+
async restart(name, persistenceOverride) {
|
|
1870
|
+
const instance = workers.get(name);
|
|
1871
|
+
if (!instance) {
|
|
1872
|
+
await WorkerFactory.startFromPersisted(name, persistenceOverride);
|
|
1873
|
+
Logger.info(`Worker started from persistence: ${name}`);
|
|
1874
|
+
return;
|
|
1875
|
+
}
|
|
1876
|
+
await WorkerFactory.stop(name, persistenceOverride);
|
|
1877
|
+
const refreshed = workers.get(name);
|
|
1878
|
+
if (!refreshed) {
|
|
1879
|
+
throw ErrorFactory.createNotFoundError(`Worker "${name}" not found`);
|
|
1880
|
+
}
|
|
1881
|
+
workers.delete(name);
|
|
1882
|
+
const newWorker = await WorkerFactory.create(refreshed.config);
|
|
1883
|
+
refreshed.worker = newWorker;
|
|
1884
|
+
refreshed.status = WorkerCreationStatus.RUNNING;
|
|
1885
|
+
refreshed.startedAt = new Date();
|
|
1886
|
+
Logger.info(`Worker restarted: ${name}`);
|
|
1887
|
+
},
|
|
1888
|
+
/**
|
|
1889
|
+
* Pause worker
|
|
1890
|
+
*/
|
|
1891
|
+
async pause(name, persistenceOverride) {
|
|
1892
|
+
const instance = workers.get(name);
|
|
1893
|
+
const store = await validateAndGetStore(name, instance?.config, persistenceOverride);
|
|
1894
|
+
if (instance) {
|
|
1895
|
+
await instance.worker.pause();
|
|
1896
|
+
instance.status = WorkerCreationStatus.STARTING; // Using STARTING as equivalent to sleeping/paused
|
|
1897
|
+
}
|
|
1898
|
+
await store.update(name, {
|
|
1899
|
+
status: WorkerCreationStatus.STARTING,
|
|
1900
|
+
updatedAt: new Date(),
|
|
1901
|
+
});
|
|
1902
|
+
Logger.info(`Worker paused: ${name}`);
|
|
1903
|
+
},
|
|
1904
|
+
/**
|
|
1905
|
+
* Resume worker
|
|
1906
|
+
*/
|
|
1907
|
+
async resume(name, persistenceOverride) {
|
|
1908
|
+
const instance = workers.get(name);
|
|
1909
|
+
const store = await validateAndGetStore(name, instance?.config, persistenceOverride);
|
|
1910
|
+
if (instance) {
|
|
1911
|
+
instance.worker.resume();
|
|
1912
|
+
instance.status = WorkerCreationStatus.RUNNING;
|
|
1913
|
+
}
|
|
1914
|
+
try {
|
|
1915
|
+
await store.update(name, { status: WorkerCreationStatus.RUNNING, updatedAt: new Date() });
|
|
1916
|
+
}
|
|
1917
|
+
catch (error) {
|
|
1918
|
+
Logger.error('Failed to persist worker resume', error);
|
|
1919
|
+
}
|
|
1920
|
+
Logger.info(`Worker resumed: ${name}`);
|
|
1921
|
+
},
|
|
1922
|
+
/**
|
|
1923
|
+
* Update auto-start for persisted worker
|
|
1924
|
+
*/
|
|
1925
|
+
async setAutoStart(name, autoStart, persistenceOverride) {
|
|
1926
|
+
const instance = workers.get(name);
|
|
1927
|
+
const store = await validateAndGetStore(name, instance?.config, persistenceOverride);
|
|
1928
|
+
if (instance) {
|
|
1929
|
+
instance.config.autoStart = autoStart;
|
|
1930
|
+
}
|
|
1931
|
+
await store.update(name, { autoStart, updatedAt: new Date() });
|
|
1932
|
+
if (!autoStart)
|
|
1933
|
+
return;
|
|
1934
|
+
const refreshed = workers.get(name);
|
|
1935
|
+
if (refreshed) {
|
|
1936
|
+
if (refreshed.status !== 'running') {
|
|
1937
|
+
await WorkerFactory.start(name, persistenceOverride);
|
|
1938
|
+
}
|
|
1939
|
+
return;
|
|
1940
|
+
}
|
|
1941
|
+
await WorkerFactory.startFromPersisted(name, persistenceOverride);
|
|
1942
|
+
},
|
|
1943
|
+
/**
|
|
1944
|
+
* Update active status for a worker
|
|
1945
|
+
*/
|
|
1946
|
+
async setWorkerActiveStatus(name, activeStatus, persistenceOverride) {
|
|
1947
|
+
const instance = workers.get(name);
|
|
1948
|
+
const store = await validateAndGetStore(name, instance?.config, persistenceOverride);
|
|
1949
|
+
if (instance) {
|
|
1950
|
+
instance.config.activeStatus = activeStatus;
|
|
1951
|
+
}
|
|
1952
|
+
await store.update(name, { activeStatus, updatedAt: new Date() });
|
|
1953
|
+
WorkerRegistry.setActiveStatus(name, activeStatus);
|
|
1954
|
+
if (activeStatus === false && instance) {
|
|
1955
|
+
await WorkerFactory.stop(name, persistenceOverride);
|
|
1956
|
+
}
|
|
1957
|
+
},
|
|
1958
|
+
/**
|
|
1959
|
+
* Get active status for a worker
|
|
1960
|
+
*/
|
|
1961
|
+
async getWorkerActiveStatus(name, persistenceOverride) {
|
|
1962
|
+
const instance = workers.get(name);
|
|
1963
|
+
if (instance?.config.activeStatus !== undefined) {
|
|
1964
|
+
return instance.config.activeStatus;
|
|
1965
|
+
}
|
|
1966
|
+
const store = await getStoreForWorker(instance?.config, persistenceOverride);
|
|
1967
|
+
const record = await store.get(name);
|
|
1968
|
+
if (!record)
|
|
1969
|
+
return null;
|
|
1970
|
+
return record.activeStatus ?? true;
|
|
1971
|
+
},
|
|
1972
|
+
/**
|
|
1973
|
+
* Update persisted worker record and in-memory config if running.
|
|
1974
|
+
*/
|
|
1975
|
+
async update(name, patch, persistenceOverride) {
|
|
1976
|
+
const instance = workers.get(name);
|
|
1977
|
+
const store = await getStoreForWorker(instance?.config, persistenceOverride);
|
|
1978
|
+
const current = await store.get(name);
|
|
1979
|
+
if (!current) {
|
|
1980
|
+
throw ErrorFactory.createNotFoundError(`Worker "${name}" not found in persistence store`);
|
|
1981
|
+
}
|
|
1982
|
+
const merged = {
|
|
1983
|
+
...current,
|
|
1984
|
+
...patch,
|
|
1985
|
+
updatedAt: patch.updatedAt ?? new Date(),
|
|
1986
|
+
};
|
|
1987
|
+
// Use save() which will insert or update appropriately for each store
|
|
1988
|
+
await store.save(merged);
|
|
1989
|
+
// If the worker is running in memory, update its runtime config so restarts use the new config
|
|
1990
|
+
if (instance) {
|
|
1991
|
+
const cfg = instance.config;
|
|
1992
|
+
instance.config = {
|
|
1993
|
+
...cfg,
|
|
1994
|
+
version: merged.version ?? cfg.version,
|
|
1995
|
+
queueName: merged.queueName ?? cfg.queueName,
|
|
1996
|
+
options: {
|
|
1997
|
+
...cfg.options,
|
|
1998
|
+
concurrency: merged.concurrency ?? cfg.options?.concurrency,
|
|
1999
|
+
},
|
|
2000
|
+
processorSpec: merged.processorSpec ?? cfg.processorSpec,
|
|
2001
|
+
activeStatus: merged.activeStatus ?? cfg.activeStatus,
|
|
2002
|
+
infrastructure: merged.infrastructure ?? cfg.infrastructure,
|
|
2003
|
+
features: merged.features ?? cfg.features,
|
|
2004
|
+
datacenter: merged.datacenter ?? cfg.datacenter,
|
|
2005
|
+
};
|
|
2006
|
+
}
|
|
2007
|
+
},
|
|
2008
|
+
/**
|
|
2009
|
+
* Start worker
|
|
2010
|
+
*/
|
|
2011
|
+
async start(name, persistenceOverride) {
|
|
2012
|
+
const instance = workers.get(name);
|
|
2013
|
+
// Even if instance exists, we must validate against the requested driver
|
|
2014
|
+
const store = await validateAndGetStore(name, instance?.config, persistenceOverride);
|
|
2015
|
+
if (!instance) {
|
|
2016
|
+
throw ErrorFactory.createNotFoundError(`Worker "${name}" not found`);
|
|
2017
|
+
}
|
|
2018
|
+
if (instance.config.activeStatus === false) {
|
|
2019
|
+
throw ErrorFactory.createConfigError(`Worker "${name}" is inactive`);
|
|
2020
|
+
}
|
|
2021
|
+
const persisted = await store.get(name);
|
|
2022
|
+
if (persisted?.activeStatus === false) {
|
|
2023
|
+
throw ErrorFactory.createConfigError(`Worker "${name}" is inactive`);
|
|
2024
|
+
}
|
|
2025
|
+
const version = instance.config.version ?? '1.0.0';
|
|
2026
|
+
await WorkerRegistry.start(name, version);
|
|
2027
|
+
instance.status = WorkerCreationStatus.RUNNING;
|
|
2028
|
+
instance.startedAt = new Date();
|
|
2029
|
+
await store.update(name, { status: WorkerCreationStatus.RUNNING, updatedAt: new Date() });
|
|
2030
|
+
Logger.info(`Worker started: ${name}`);
|
|
2031
|
+
},
|
|
2032
|
+
/**
|
|
2033
|
+
* List all workers
|
|
2034
|
+
*/
|
|
2035
|
+
list() {
|
|
2036
|
+
return Array.from(workers.keys());
|
|
2037
|
+
},
|
|
2038
|
+
/**
|
|
2039
|
+
* List all persisted workers
|
|
2040
|
+
*/
|
|
2041
|
+
async listPersisted(persistenceOverride, options) {
|
|
2042
|
+
const records = await WorkerFactory.listPersistedRecords(persistenceOverride, options);
|
|
2043
|
+
return records.map((record) => record.name);
|
|
2044
|
+
},
|
|
2045
|
+
async listPersistedRecords(persistenceOverride, options) {
|
|
2046
|
+
const includeInactive = options?.includeInactive === true;
|
|
2047
|
+
if (!persistenceOverride) {
|
|
2048
|
+
if (!isCloudflareRuntime()) {
|
|
2049
|
+
await ensureWorkerStoreConfigured();
|
|
2050
|
+
const records = await workerStore.list(options);
|
|
2051
|
+
return includeInactive
|
|
2052
|
+
? records
|
|
2053
|
+
: records.filter((record) => record.activeStatus !== false);
|
|
2054
|
+
}
|
|
2055
|
+
const store = await getDefaultStoreForRuntime();
|
|
2056
|
+
try {
|
|
2057
|
+
const records = await store.list(options);
|
|
2058
|
+
return includeInactive
|
|
2059
|
+
? records
|
|
2060
|
+
: records.filter((record) => record.activeStatus !== false);
|
|
2061
|
+
}
|
|
2062
|
+
finally {
|
|
2063
|
+
if (store.close) {
|
|
2064
|
+
await store.close();
|
|
2065
|
+
}
|
|
2066
|
+
}
|
|
2067
|
+
}
|
|
2068
|
+
const store = await resolveWorkerStoreForPersistence(persistenceOverride);
|
|
2069
|
+
const records = await store.list(options);
|
|
2070
|
+
return includeInactive ? records : records.filter((record) => record.activeStatus !== false);
|
|
2071
|
+
},
|
|
2072
|
+
/**
|
|
2073
|
+
* Start a worker from persisted storage when it is not registered.
|
|
2074
|
+
*/
|
|
2075
|
+
async startFromPersisted(name, persistenceOverride) {
|
|
2076
|
+
const record = await getPersistedRecord(name, persistenceOverride);
|
|
2077
|
+
if (!record) {
|
|
2078
|
+
throw ErrorFactory.createNotFoundError(`Worker "${name}" not found in persistence store`);
|
|
2079
|
+
}
|
|
2080
|
+
if (record.activeStatus === false) {
|
|
2081
|
+
throw ErrorFactory.createConfigError(`Worker "${name}" is inactive`);
|
|
2082
|
+
}
|
|
2083
|
+
let processor = await resolveProcessor(name);
|
|
2084
|
+
const spec = record.processorSpec ?? undefined;
|
|
2085
|
+
if (!processor && spec) {
|
|
2086
|
+
try {
|
|
2087
|
+
processor = await resolveProcessorSpec(spec);
|
|
2088
|
+
}
|
|
2089
|
+
catch (error) {
|
|
2090
|
+
Logger.error(`Failed to resolve processor module for "${name}"`, error);
|
|
2091
|
+
}
|
|
2092
|
+
}
|
|
2093
|
+
if (!processor) {
|
|
2094
|
+
throw ErrorFactory.createConfigError(`Worker "${name}" processor is not registered or resolvable. Register the processor at startup or persist a processorSpec.`);
|
|
2095
|
+
}
|
|
2096
|
+
await WorkerFactory.create({
|
|
2097
|
+
name: record.name,
|
|
2098
|
+
queueName: record.queueName,
|
|
2099
|
+
version: record.version ?? undefined,
|
|
2100
|
+
processor,
|
|
2101
|
+
processorSpec: record.processorSpec ?? undefined,
|
|
2102
|
+
activeStatus: record.activeStatus ?? true,
|
|
2103
|
+
autoStart: true, // Override to true when manually starting
|
|
2104
|
+
options: { concurrency: record.concurrency },
|
|
2105
|
+
infrastructure: record.infrastructure,
|
|
2106
|
+
features: record.features,
|
|
2107
|
+
datacenter: record.datacenter,
|
|
2108
|
+
});
|
|
2109
|
+
},
|
|
2110
|
+
/**
|
|
2111
|
+
* Get persisted worker record
|
|
2112
|
+
*/
|
|
2113
|
+
async getPersisted(name, persistenceOverride) {
|
|
2114
|
+
const instance = workers.get(name);
|
|
2115
|
+
const store = await getStoreForWorker(instance?.config, persistenceOverride);
|
|
2116
|
+
try {
|
|
2117
|
+
const result = await store.get(name);
|
|
2118
|
+
return result;
|
|
2119
|
+
}
|
|
2120
|
+
finally {
|
|
2121
|
+
if (Cloudflare.getWorkersEnv() !== null && store.close) {
|
|
2122
|
+
await store.close();
|
|
2123
|
+
}
|
|
2124
|
+
}
|
|
2125
|
+
},
|
|
2126
|
+
/**
|
|
2127
|
+
* Remove worker
|
|
2128
|
+
*/
|
|
2129
|
+
async remove(name, persistenceOverride) {
|
|
2130
|
+
const instance = workers.get(name);
|
|
2131
|
+
// Validate that worker exists in the store we are trying to remove from
|
|
2132
|
+
const store = await validateAndGetStore(name, instance?.config, persistenceOverride);
|
|
2133
|
+
if (instance) {
|
|
2134
|
+
await WorkerFactory.stop(name, persistenceOverride);
|
|
2135
|
+
const registry = WorkerRegistry;
|
|
2136
|
+
registry.unregister?.(name);
|
|
2137
|
+
AutoScaler.clearHistory(name);
|
|
2138
|
+
ResourceMonitor.clearHistory(name);
|
|
2139
|
+
CircuitBreaker.deleteWorker(name);
|
|
2140
|
+
CanaryController.purge(name);
|
|
2141
|
+
WorkerVersioning.clear(name);
|
|
2142
|
+
DatacenterOrchestrator.removeWorker(name);
|
|
2143
|
+
await Observability.clearWorkerMetrics(name);
|
|
2144
|
+
// Stop health monitoring for this worker
|
|
2145
|
+
HealthMonitor.unregister(name);
|
|
2146
|
+
workers.delete(name);
|
|
2147
|
+
}
|
|
2148
|
+
await store.remove(name);
|
|
2149
|
+
Logger.info(`Worker removed: ${name}`);
|
|
2150
|
+
},
|
|
2151
|
+
/**
|
|
2152
|
+
* Get worker metrics
|
|
2153
|
+
*/
|
|
2154
|
+
async getMetrics(name) {
|
|
2155
|
+
const instance = workers.get(name);
|
|
2156
|
+
if (!instance) {
|
|
2157
|
+
throw ErrorFactory.createNotFoundError(`Worker "${name}" not found`);
|
|
2158
|
+
}
|
|
2159
|
+
if (instance.config.features?.metrics === undefined || !instance.config.features?.metrics) {
|
|
2160
|
+
return null;
|
|
2161
|
+
}
|
|
2162
|
+
const now = Date.now();
|
|
2163
|
+
const oneHourAgo = now - 3600 * 1000;
|
|
2164
|
+
const metrics = await WorkerMetrics.aggregate({
|
|
2165
|
+
workerName: name,
|
|
2166
|
+
metricType: 'processed',
|
|
2167
|
+
granularity: 'hourly',
|
|
2168
|
+
startDate: new Date(oneHourAgo),
|
|
2169
|
+
endDate: new Date(now),
|
|
2170
|
+
});
|
|
2171
|
+
return metrics;
|
|
2172
|
+
},
|
|
2173
|
+
/**
|
|
2174
|
+
* Get worker health
|
|
2175
|
+
*/
|
|
2176
|
+
async getHealth(name) {
|
|
2177
|
+
const instance = workers.get(name);
|
|
2178
|
+
if (!instance) {
|
|
2179
|
+
throw ErrorFactory.createNotFoundError(`Worker "${name}" not found`);
|
|
2180
|
+
}
|
|
2181
|
+
if (!(instance.config.features?.metrics ?? false)) {
|
|
2182
|
+
return { status: 'unknown' };
|
|
2183
|
+
}
|
|
2184
|
+
const health = await WorkerMetrics.getLatestHealth(name);
|
|
2185
|
+
return health;
|
|
2186
|
+
},
|
|
2187
|
+
/**
|
|
2188
|
+
* Shutdown all workers
|
|
2189
|
+
*/
|
|
2190
|
+
async shutdown() {
|
|
2191
|
+
Logger.info('WorkerFactory shutting down...');
|
|
2192
|
+
const workerEntries = Array.from(workers.entries());
|
|
2193
|
+
const workerNames = workerEntries.map(([name]) => name);
|
|
2194
|
+
// Bulk-update persisted statuses before stopping workers to avoid per-worker DB updates
|
|
2195
|
+
// during shutdown (which can fail if DB connections are closing).
|
|
2196
|
+
const storeGroups = new Map();
|
|
2197
|
+
// Parallel get stores for all workers
|
|
2198
|
+
const storePromises = workerEntries.map(async ([name, instance]) => {
|
|
2199
|
+
const store = await getStoreForWorker(instance.config);
|
|
2200
|
+
return { name, store };
|
|
2201
|
+
});
|
|
2202
|
+
const storeMappings = await Promise.all(storePromises);
|
|
2203
|
+
for (const { name, store } of storeMappings) {
|
|
2204
|
+
const existing = storeGroups.get(store);
|
|
2205
|
+
if (existing) {
|
|
2206
|
+
existing.push(name);
|
|
2207
|
+
}
|
|
2208
|
+
else {
|
|
2209
|
+
storeGroups.set(store, [name]);
|
|
2210
|
+
}
|
|
2211
|
+
}
|
|
2212
|
+
// Parallel bulk updates for all store groups
|
|
2213
|
+
const updatePromises = Array.from(storeGroups.entries()).map(async ([store, names]) => {
|
|
2214
|
+
if (typeof store.updateMany === 'function') {
|
|
2215
|
+
await store.updateMany(names, {
|
|
2216
|
+
status: WorkerCreationStatus.STOPPED,
|
|
2217
|
+
updatedAt: new Date(),
|
|
2218
|
+
});
|
|
2219
|
+
}
|
|
2220
|
+
});
|
|
2221
|
+
await Promise.all(updatePromises);
|
|
2222
|
+
await Promise.all(workerNames.map(async (name) => WorkerFactory.stop(name, undefined, { skipPersistedUpdate: true })));
|
|
2223
|
+
// Shutdown all modules
|
|
2224
|
+
ResourceMonitor.stop();
|
|
2225
|
+
await WorkerMetrics.shutdown();
|
|
2226
|
+
await MultiQueueWorker.shutdown();
|
|
2227
|
+
await ComplianceManager.shutdown();
|
|
2228
|
+
await PriorityQueue.shutdown();
|
|
2229
|
+
HealthMonitor.shutdown();
|
|
2230
|
+
AutoScaler.stop();
|
|
2231
|
+
ClusterLock.shutdown();
|
|
2232
|
+
WorkerVersioning.shutdown();
|
|
2233
|
+
CanaryController.shutdown();
|
|
2234
|
+
DatacenterOrchestrator.shutdown();
|
|
2235
|
+
PluginManager.shutdown();
|
|
2236
|
+
Observability.shutdown();
|
|
2237
|
+
await DeadLetterQueue.shutdown();
|
|
2238
|
+
CircuitBreaker.shutdown();
|
|
2239
|
+
workers.clear();
|
|
2240
|
+
Logger.info('WorkerFactory shutdown complete');
|
|
2241
|
+
},
|
|
2242
|
+
/**
|
|
2243
|
+
* Reset persistence connection state.
|
|
2244
|
+
* Useful when connections become stale in long-running processes or serverless environments.
|
|
2245
|
+
*/
|
|
2246
|
+
async resetPersistence() {
|
|
2247
|
+
workerStoreConfigured = false;
|
|
2248
|
+
workerStore = InMemoryWorkerStore.create();
|
|
2249
|
+
storeInstanceCache.clear();
|
|
2250
|
+
Logger.info('Worker persistence configuration reset');
|
|
2251
|
+
},
|
|
2252
|
+
});
|
|
2253
|
+
// Graceful shutdown handled by WorkerShutdown
|