@corvohq/worker 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/gen/corvo/v1/worker_pb.d.ts +689 -0
- package/dist/gen/corvo/v1/worker_pb.js +133 -0
- package/dist/index.d.ts +55 -0
- package/dist/index.js +405 -0
- package/dist/rpc.d.ts +78 -0
- package/dist/rpc.js +193 -0
- package/package.json +24 -0
|
@@ -0,0 +1,133 @@
|
|
|
1
|
+
// @generated by protoc-gen-es v2.11.0 with parameter "target=ts"
|
|
2
|
+
// @generated from file corvo/v1/worker.proto (package corvo.v1, syntax proto3)
|
|
3
|
+
/* eslint-disable */
|
|
4
|
+
import { fileDesc, messageDesc, serviceDesc } from "@bufbuild/protobuf/codegenv2";
|
|
5
|
+
import { file_google_protobuf_timestamp } from "@bufbuild/protobuf/wkt";
|
|
6
|
+
/**
|
|
7
|
+
* Describes the file corvo/v1/worker.proto.
|
|
8
|
+
*/
|
|
9
|
+
export const file_corvo_v1_worker = /*@__PURE__*/ fileDesc("ChVjb3J2by92MS93b3JrZXIucHJvdG8SCGNvcnZvLnYxIlsKDkVucXVldWVSZXF1ZXN0Eg0KBXF1ZXVlGAEgASgJEhQKDHBheWxvYWRfanNvbhgCIAEoCRIkCgVhZ2VudBgDIAEoCzIVLmNvcnZvLnYxLkFnZW50Q29uZmlnIkoKD0VucXVldWVSZXNwb25zZRIOCgZqb2JfaWQYASABKAkSDgoGc3RhdHVzGAIgASgJEhcKD3VuaXF1ZV9leGlzdGluZxgDIAEoCCJbCgxGZXRjaFJlcXVlc3QSDgoGcXVldWVzGAEgAygJEhEKCXdvcmtlcl9pZBgCIAEoCRIQCghob3N0bmFtZRgDIAEoCRIWCg5sZWFzZV9kdXJhdGlvbhgEIAEoBSLiAQoNRmV0Y2hSZXNwb25zZRINCgVmb3VuZBgBIAEoCBIOCgZqb2JfaWQYAiABKAkSDQoFcXVldWUYAyABKAkSFAoMcGF5bG9hZF9qc29uGAQgASgJEg8KB2F0dGVtcHQYBSABKAUSEwoLbWF4X3JldHJpZXMYBiABKAUSFgoObGVhc2VfZHVyYXRpb24YByABKAUSFwoPY2hlY2twb2ludF9qc29uGAggASgJEhEKCXRhZ3NfanNvbhgJIAEoCRIjCgVhZ2VudBgKIAEoCzIULmNvcnZvLnYxLkFnZW50U3RhdGUibwoRRmV0Y2hCYXRjaFJlcXVlc3QSDgoGcXVldWVzGAEgAygJEhEKCXdvcmtlcl9pZBgCIAEoCRIQCghob3N0bmFtZRgDIAEoCRIWCg5sZWFzZV9kdXJhdGlvbhgEIAEoBRINCgVjb3VudBgFIAEoBSLTAQoNRmV0Y2hCYXRjaEpvYhIOCgZqb2JfaWQYASABKAkSDQoFcXVldWUYAiABKAkSFAoMcGF5bG9hZF9qc29uGAMgASgJEg8KB2F0dGVtcHQYBCABKAUSEwoLbWF4X3JldHJpZXMYBSABKAUSFgoObGVhc2VfZHVyYXRpb24YBiABKAUSFwoPY2hlY2twb2ludF9qc29uGAcgASgJEhEKCXRhZ3NfanNvbhgIIAEoCRIjCgVhZ2VudBgJIAEoCzIULmNvcnZvLnYxLkFnZW50U3RhdGUiOwoSRmV0Y2hCYXRjaFJlc3BvbnNlEiUKBGpvYnMYASADKAsyFy5jb3J2by52MS5GZXRjaEJhdGNoSm9iIqcBCgtVc2FnZVJlcG9ydBIUCgxpbnB1dF90b2tlbnMYASABKAMSFQoNb3V0cHV0X3Rva2VucxgCIAEoAxIdChVjYWNoZV9jcmVhdGlvbl90b2tlbnMYAyABKAMSGQoRY2FjaGVfcmVhZF90b2tlbnMYBCABKAMSDQoFbW9kZWwYBSABKAkSEAoIcHJvdmlkZXIYBiABKAkSEAoIY29zdF91c2QYByABKAEirwEKCkFja1JlcXVlc3QSDgoGam9iX2lkGAEgASgJEhMKC3Jlc3VsdF9qc29uGAIgASgJEiQKBXVzYWdlGAMgASgLMhUuY29ydm8udjEuVXNhZ2VSZXBvcnQSFwoPY2hlY2twb2ludF9qc29uGAQgASgJEhQKDGFnZW50X3N0YXR1cxgFIAEoCRITCgtob2xkX3JlYXNvbhgGIAEoCRISCgp0cmFjZV9qc29uGAcgASgJIg0KC0Fja1Jlc3BvbnNlIlkKDEFja0JhdGNoSXRlbRIOCgZqb2JfaWQYASABKAkSEwoLcmVzdWx0X2pzb24YAiABKAkSJAoFdXNhZ2UYAyABKAsyFS5jb3J2by52MS5Vc2FnZVJlcG9ydCI4Cg9BY2tCYXRjaFJlcXVlc3QSJQoFaXRlbXMYASADKAsyFi5jb3J2by52MS5BY2tCYXRjaEl0ZW0iIQoQQWNrQmF0Y2hSZXNwb25zZRINCgVhY2tlZBgBIAEoBSLmAQoWTGlmZWN5Y2xlU3RyZWFtUmVxdWVzdBISCgpyZXF1ZXN0X2lkGAEgASgEEg4KBnF1ZXVlcxgCIAMoCRIRCgl3b3JrZXJfaWQYAyABKAkSEAoIaG9zdG5hbWUYBCABKAkSFgoObGVhc2VfZHVyYXRpb24YBSABKAUSEwoLZmV0Y2hfY291bnQYBiABKAUSJAoEYWNrcxgHIAMoCzIWLmNvcnZvLnYxLkFja0JhdGNoSXRlbRIwCghlbnF1ZXVlcxgIIAMoCzIeLmNvcnZvLnYxLkxpZmVjeWNsZUVucXVldWVJdGVtImEKFExpZmVjeWNsZUVucXVldWVJdGVtEg0KBXF1ZXVlGAEgASgJEhQKDHBheWxvYWRfanNvbhgCIAEoCRIkCgVhZ2VudBgDIAEoCzIVLmNvcnZvLnYxLkFnZW50Q29uZmlnIlYKC0FnZW50Q29uZmlnEhYKDm1heF9pdGVyYXRpb25zGAEgASgFEhQKDG1heF9jb3N0X3VzZBgCIAEoARIZChFpdGVyYXRpb25fdGltZW91dBgDIAEoCSKAAQoKQWdlbnRTdGF0ZRIWCg5tYXhfaXRlcmF0aW9ucxgBIAEoBRIUCgxtYXhfY29zdF91c2QYAiABKAESGQoRaXRlcmF0aW9uX3RpbWVvdXQYAyABKAkSEQoJaXRlcmF0aW9uGAQgASgFEhYKDnRvdGFsX2Nvc3RfdXNkGAUgASgBIqEBChdMaWZlY3ljbGVTdHJlYW1SZXNwb25zZRISCgpyZXF1ZXN0X2lkGAEgASgEEiUKBGpvYnMYAiADKAsyFy5jb3J2by52MS5GZXRjaEJhdGNoSm9iEg0KBWFja2VkGAMgASgFEg0KBWVycm9yGAQgASgJEhgKEGVucXVldWVkX2pvYl9pZHMYBSADKAkSEwoLbGVhZGVyX2FkZHIYBiABKAkiPwoLRmFpbFJlcXVlc3QSDgoGam9iX2lkGAEgASgJEg0KBWVycm9yGAIgASgJEhEKCWJhY2t0cmFjZRgDIAEoCSJvCgxGYWlsUmVzcG9uc2USDgoGc3RhdHVzGAEgASgJEjMKD25leHRfYXR0ZW1wdF9hdBgCIAEoCzIaLmdvb2dsZS5wcm90b2J1Zi5UaW1lc3RhbXASGgoSYXR0ZW1wdHNfcmVtYWluaW5nGAMgASgFIoABChJIZWFydGJlYXRKb2JVcGRhdGUSFQoNcHJvZ3Jlc3NfanNvbhgBIAEoCRIXCg9jaGVja3BvaW50X2pzb24YAiABKAkSJAoFdXNhZ2UYAyABKAsyFS5jb3J2by52MS5Vc2FnZVJlcG9ydBIUCgxzdHJlYW1fZGVsdGEYBCABKAkikQEKEEhlYXJ0YmVhdFJlcXVlc3QSMgoEam9icxgBIAMoCzIkLmNvcnZvLnYxLkhlYXJ0YmVhdFJlcXVlc3QuSm9ic0VudHJ5GkkKCUpvYnNFbnRyeRILCgNrZXkYASABKAkSKwoFdmFsdWUYAiABKAsyHC5jb3J2by52MS5IZWFydGJlYXRKb2JVcGRhdGU6AjgBIiYKFEhlYXJ0YmVhdEpvYlJlc3BvbnNlEg4KBnN0YXR1cxgBIAEoCSKVAQoRSGVhcnRiZWF0UmVzcG9uc2USMwoEam9icxgBIAMoCzIlLmNvcnZvLnYxLkhlYXJ0YmVhdFJlc3BvbnNlLkpvYnNFbnRyeRpLCglKb2JzRW50cnkSCwoDa2V5GAEgASgJEi0KBXZhbHVlGAIgASgLMh4uY29ydm8udjEuSGVhcnRiZWF0Sm9iUmVzcG9uc2U6AjgBMrIECg1Xb3JrZXJTZXJ2aWNlEkAKB0VucXVldWUSGC5jb3J2by52MS5FbnF1ZXVlUmVxdWVzdBoZLmNvcnZvLnYxLkVucXVldWVSZXNwb25zZSIAEjoKBUZldGNoEhYuY29ydm8udjEuRmV0Y2hSZXF1ZXN0GhcuY29ydm8udjEuRmV0Y2hSZXNwb25zZSIAEkkKCkZldGNoQmF0Y2gSGy5jb3J2by52MS5GZXRjaEJhdGNoUmVxdWVzdBocLmNvcnZvLnYxLkZldGNoQmF0Y2hSZXNwb25zZSIAEjQKA0FjaxIULmNvcnZvLnYxLkFja1JlcXVlc3QaFS5jb3J2by52MS5BY2tSZXNwb25zZSIAEkMKCEFja0JhdGNoEhkuY29ydm8udjEuQWNrQmF0Y2hSZXF1ZXN0GhouY29ydm8udjEuQWNrQmF0Y2hSZXNwb25zZSIAElwKD1N0cmVhbUxpZmVjeWNsZRIgLmNvcnZvLnYxLkxpZmVjeWNsZVN0cmVhbVJlcXVlc3QaIS5jb3J2by52MS5MaWZlY3ljbGVTdHJlYW1SZXNwb25zZSIAKAEwARI3CgRGYWlsEhUuY29ydm8udjEuRmFpbFJlcXVlc3QaFi5jb3J2by52MS5GYWlsUmVzcG9uc2UiABJGCglIZWFydGJlYXQSGi5jb3J2by52MS5IZWFydGJlYXRSZXF1ZXN0GhsuY29ydm8udjEuSGVhcnRiZWF0UmVzcG9uc2UiAGIGcHJvdG8z", [file_google_protobuf_timestamp]);
|
|
10
|
+
/**
|
|
11
|
+
* Describes the message corvo.v1.EnqueueRequest.
|
|
12
|
+
* Use `create(EnqueueRequestSchema)` to create a new message.
|
|
13
|
+
*/
|
|
14
|
+
export const EnqueueRequestSchema = /*@__PURE__*/ messageDesc(file_corvo_v1_worker, 0);
|
|
15
|
+
/**
|
|
16
|
+
* Describes the message corvo.v1.EnqueueResponse.
|
|
17
|
+
* Use `create(EnqueueResponseSchema)` to create a new message.
|
|
18
|
+
*/
|
|
19
|
+
export const EnqueueResponseSchema = /*@__PURE__*/ messageDesc(file_corvo_v1_worker, 1);
|
|
20
|
+
/**
|
|
21
|
+
* Describes the message corvo.v1.FetchRequest.
|
|
22
|
+
* Use `create(FetchRequestSchema)` to create a new message.
|
|
23
|
+
*/
|
|
24
|
+
export const FetchRequestSchema = /*@__PURE__*/ messageDesc(file_corvo_v1_worker, 2);
|
|
25
|
+
/**
|
|
26
|
+
* Describes the message corvo.v1.FetchResponse.
|
|
27
|
+
* Use `create(FetchResponseSchema)` to create a new message.
|
|
28
|
+
*/
|
|
29
|
+
export const FetchResponseSchema = /*@__PURE__*/ messageDesc(file_corvo_v1_worker, 3);
|
|
30
|
+
/**
|
|
31
|
+
* Describes the message corvo.v1.FetchBatchRequest.
|
|
32
|
+
* Use `create(FetchBatchRequestSchema)` to create a new message.
|
|
33
|
+
*/
|
|
34
|
+
export const FetchBatchRequestSchema = /*@__PURE__*/ messageDesc(file_corvo_v1_worker, 4);
|
|
35
|
+
/**
|
|
36
|
+
* Describes the message corvo.v1.FetchBatchJob.
|
|
37
|
+
* Use `create(FetchBatchJobSchema)` to create a new message.
|
|
38
|
+
*/
|
|
39
|
+
export const FetchBatchJobSchema = /*@__PURE__*/ messageDesc(file_corvo_v1_worker, 5);
|
|
40
|
+
/**
|
|
41
|
+
* Describes the message corvo.v1.FetchBatchResponse.
|
|
42
|
+
* Use `create(FetchBatchResponseSchema)` to create a new message.
|
|
43
|
+
*/
|
|
44
|
+
export const FetchBatchResponseSchema = /*@__PURE__*/ messageDesc(file_corvo_v1_worker, 6);
|
|
45
|
+
/**
|
|
46
|
+
* Describes the message corvo.v1.UsageReport.
|
|
47
|
+
* Use `create(UsageReportSchema)` to create a new message.
|
|
48
|
+
*/
|
|
49
|
+
export const UsageReportSchema = /*@__PURE__*/ messageDesc(file_corvo_v1_worker, 7);
|
|
50
|
+
/**
|
|
51
|
+
* Describes the message corvo.v1.AckRequest.
|
|
52
|
+
* Use `create(AckRequestSchema)` to create a new message.
|
|
53
|
+
*/
|
|
54
|
+
export const AckRequestSchema = /*@__PURE__*/ messageDesc(file_corvo_v1_worker, 8);
|
|
55
|
+
/**
|
|
56
|
+
* Describes the message corvo.v1.AckResponse.
|
|
57
|
+
* Use `create(AckResponseSchema)` to create a new message.
|
|
58
|
+
*/
|
|
59
|
+
export const AckResponseSchema = /*@__PURE__*/ messageDesc(file_corvo_v1_worker, 9);
|
|
60
|
+
/**
|
|
61
|
+
* Describes the message corvo.v1.AckBatchItem.
|
|
62
|
+
* Use `create(AckBatchItemSchema)` to create a new message.
|
|
63
|
+
*/
|
|
64
|
+
export const AckBatchItemSchema = /*@__PURE__*/ messageDesc(file_corvo_v1_worker, 10);
|
|
65
|
+
/**
|
|
66
|
+
* Describes the message corvo.v1.AckBatchRequest.
|
|
67
|
+
* Use `create(AckBatchRequestSchema)` to create a new message.
|
|
68
|
+
*/
|
|
69
|
+
export const AckBatchRequestSchema = /*@__PURE__*/ messageDesc(file_corvo_v1_worker, 11);
|
|
70
|
+
/**
|
|
71
|
+
* Describes the message corvo.v1.AckBatchResponse.
|
|
72
|
+
* Use `create(AckBatchResponseSchema)` to create a new message.
|
|
73
|
+
*/
|
|
74
|
+
export const AckBatchResponseSchema = /*@__PURE__*/ messageDesc(file_corvo_v1_worker, 12);
|
|
75
|
+
/**
|
|
76
|
+
* Describes the message corvo.v1.LifecycleStreamRequest.
|
|
77
|
+
* Use `create(LifecycleStreamRequestSchema)` to create a new message.
|
|
78
|
+
*/
|
|
79
|
+
export const LifecycleStreamRequestSchema = /*@__PURE__*/ messageDesc(file_corvo_v1_worker, 13);
|
|
80
|
+
/**
|
|
81
|
+
* Describes the message corvo.v1.LifecycleEnqueueItem.
|
|
82
|
+
* Use `create(LifecycleEnqueueItemSchema)` to create a new message.
|
|
83
|
+
*/
|
|
84
|
+
export const LifecycleEnqueueItemSchema = /*@__PURE__*/ messageDesc(file_corvo_v1_worker, 14);
|
|
85
|
+
/**
|
|
86
|
+
* Describes the message corvo.v1.AgentConfig.
|
|
87
|
+
* Use `create(AgentConfigSchema)` to create a new message.
|
|
88
|
+
*/
|
|
89
|
+
export const AgentConfigSchema = /*@__PURE__*/ messageDesc(file_corvo_v1_worker, 15);
|
|
90
|
+
/**
|
|
91
|
+
* Describes the message corvo.v1.AgentState.
|
|
92
|
+
* Use `create(AgentStateSchema)` to create a new message.
|
|
93
|
+
*/
|
|
94
|
+
export const AgentStateSchema = /*@__PURE__*/ messageDesc(file_corvo_v1_worker, 16);
|
|
95
|
+
/**
|
|
96
|
+
* Describes the message corvo.v1.LifecycleStreamResponse.
|
|
97
|
+
* Use `create(LifecycleStreamResponseSchema)` to create a new message.
|
|
98
|
+
*/
|
|
99
|
+
export const LifecycleStreamResponseSchema = /*@__PURE__*/ messageDesc(file_corvo_v1_worker, 17);
|
|
100
|
+
/**
|
|
101
|
+
* Describes the message corvo.v1.FailRequest.
|
|
102
|
+
* Use `create(FailRequestSchema)` to create a new message.
|
|
103
|
+
*/
|
|
104
|
+
export const FailRequestSchema = /*@__PURE__*/ messageDesc(file_corvo_v1_worker, 18);
|
|
105
|
+
/**
|
|
106
|
+
* Describes the message corvo.v1.FailResponse.
|
|
107
|
+
* Use `create(FailResponseSchema)` to create a new message.
|
|
108
|
+
*/
|
|
109
|
+
export const FailResponseSchema = /*@__PURE__*/ messageDesc(file_corvo_v1_worker, 19);
|
|
110
|
+
/**
|
|
111
|
+
* Describes the message corvo.v1.HeartbeatJobUpdate.
|
|
112
|
+
* Use `create(HeartbeatJobUpdateSchema)` to create a new message.
|
|
113
|
+
*/
|
|
114
|
+
export const HeartbeatJobUpdateSchema = /*@__PURE__*/ messageDesc(file_corvo_v1_worker, 20);
|
|
115
|
+
/**
|
|
116
|
+
* Describes the message corvo.v1.HeartbeatRequest.
|
|
117
|
+
* Use `create(HeartbeatRequestSchema)` to create a new message.
|
|
118
|
+
*/
|
|
119
|
+
export const HeartbeatRequestSchema = /*@__PURE__*/ messageDesc(file_corvo_v1_worker, 21);
|
|
120
|
+
/**
|
|
121
|
+
* Describes the message corvo.v1.HeartbeatJobResponse.
|
|
122
|
+
* Use `create(HeartbeatJobResponseSchema)` to create a new message.
|
|
123
|
+
*/
|
|
124
|
+
export const HeartbeatJobResponseSchema = /*@__PURE__*/ messageDesc(file_corvo_v1_worker, 22);
|
|
125
|
+
/**
|
|
126
|
+
* Describes the message corvo.v1.HeartbeatResponse.
|
|
127
|
+
* Use `create(HeartbeatResponseSchema)` to create a new message.
|
|
128
|
+
*/
|
|
129
|
+
export const HeartbeatResponseSchema = /*@__PURE__*/ messageDesc(file_corvo_v1_worker, 23);
|
|
130
|
+
/**
|
|
131
|
+
* @generated from service corvo.v1.WorkerService
|
|
132
|
+
*/
|
|
133
|
+
export const WorkerService = /*@__PURE__*/ serviceDesc(file_corvo_v1_worker, 0);
|
package/dist/index.d.ts
ADDED
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
import { CorvoClient } from "@corvohq/client";
|
|
2
|
+
export { PayloadTooLargeError } from "@corvohq/client";
|
|
3
|
+
export { RpcClient, ResilientLifecycleStream, LifecycleStream, ErrNotLeader } from "./rpc.js";
|
|
4
|
+
export type FetchedJob = {
|
|
5
|
+
job_id: string;
|
|
6
|
+
queue: string;
|
|
7
|
+
payload: Record<string, unknown>;
|
|
8
|
+
attempt: number;
|
|
9
|
+
max_retries: number;
|
|
10
|
+
lease_duration: number;
|
|
11
|
+
checkpoint?: Record<string, unknown>;
|
|
12
|
+
tags?: Record<string, string>;
|
|
13
|
+
agent?: Record<string, unknown>;
|
|
14
|
+
};
|
|
15
|
+
export type WorkerHandler = (job: FetchedJob, ctx: WorkerJobContext) => Promise<void> | void;
|
|
16
|
+
export type WorkerConfig = {
|
|
17
|
+
queues: string[];
|
|
18
|
+
workerID: string;
|
|
19
|
+
hostname?: string;
|
|
20
|
+
concurrency?: number;
|
|
21
|
+
shutdownTimeoutMs?: number;
|
|
22
|
+
fetchBatchSize?: number;
|
|
23
|
+
ackBatchSize?: number;
|
|
24
|
+
useRpc?: boolean;
|
|
25
|
+
};
|
|
26
|
+
export type WorkerJobContext = {
|
|
27
|
+
isCancelled: () => boolean;
|
|
28
|
+
checkpoint: (checkpoint: Record<string, unknown>) => Promise<void>;
|
|
29
|
+
progress: (current: number, total: number, message: string) => Promise<void>;
|
|
30
|
+
};
|
|
31
|
+
export declare class CorvoWorker {
|
|
32
|
+
private readonly client;
|
|
33
|
+
private readonly cfg;
|
|
34
|
+
private readonly handlers;
|
|
35
|
+
private readonly active;
|
|
36
|
+
private stopping;
|
|
37
|
+
private readonly fetchBatchSize;
|
|
38
|
+
private readonly ackBatchSize;
|
|
39
|
+
constructor(client: CorvoClient, cfg: WorkerConfig);
|
|
40
|
+
register(queue: string, handler: WorkerHandler): void;
|
|
41
|
+
start(): Promise<void>;
|
|
42
|
+
stop(): Promise<void>;
|
|
43
|
+
private rpcFetchLoop;
|
|
44
|
+
private rpcHeartbeatLoop;
|
|
45
|
+
private makeJobContext;
|
|
46
|
+
private fetchLoop;
|
|
47
|
+
private flushAcks;
|
|
48
|
+
private heartbeatLoop;
|
|
49
|
+
private fetchBatch;
|
|
50
|
+
private ackBatch;
|
|
51
|
+
private fetch;
|
|
52
|
+
private fail;
|
|
53
|
+
private heartbeat;
|
|
54
|
+
private request;
|
|
55
|
+
}
|
package/dist/index.js
ADDED
|
@@ -0,0 +1,405 @@
|
|
|
1
|
+
import { PayloadTooLargeError } from "@corvohq/client";
|
|
2
|
+
export { PayloadTooLargeError } from "@corvohq/client";
|
|
3
|
+
export { RpcClient, ResilientLifecycleStream, LifecycleStream, ErrNotLeader } from "./rpc.js";
|
|
4
|
+
export class CorvoWorker {
|
|
5
|
+
constructor(client, cfg) {
|
|
6
|
+
this.handlers = new Map();
|
|
7
|
+
this.active = new Map();
|
|
8
|
+
this.stopping = false;
|
|
9
|
+
this.client = client;
|
|
10
|
+
this.cfg = {
|
|
11
|
+
...cfg,
|
|
12
|
+
hostname: cfg.hostname ?? "corvo-worker",
|
|
13
|
+
concurrency: cfg.concurrency ?? 10,
|
|
14
|
+
shutdownTimeoutMs: cfg.shutdownTimeoutMs ?? 30000,
|
|
15
|
+
fetchBatchSize: cfg.fetchBatchSize ?? 1,
|
|
16
|
+
ackBatchSize: cfg.ackBatchSize ?? 1,
|
|
17
|
+
useRpc: cfg.useRpc ?? false,
|
|
18
|
+
};
|
|
19
|
+
this.fetchBatchSize = cfg.fetchBatchSize ?? 1;
|
|
20
|
+
this.ackBatchSize = cfg.ackBatchSize ?? 1;
|
|
21
|
+
}
|
|
22
|
+
register(queue, handler) {
|
|
23
|
+
this.handlers.set(queue, handler);
|
|
24
|
+
}
|
|
25
|
+
async start() {
|
|
26
|
+
const onSignal = () => {
|
|
27
|
+
this.stop().catch(() => { });
|
|
28
|
+
};
|
|
29
|
+
if (typeof process !== "undefined") {
|
|
30
|
+
process.on("SIGINT", onSignal);
|
|
31
|
+
process.on("SIGTERM", onSignal);
|
|
32
|
+
}
|
|
33
|
+
if (this.cfg.useRpc) {
|
|
34
|
+
const { ResilientLifecycleStream, RpcClient } = await import("./rpc.js");
|
|
35
|
+
const rpcLoop = this.rpcFetchLoop(ResilientLifecycleStream, RpcClient);
|
|
36
|
+
const heartbeat = this.rpcHeartbeatLoop(RpcClient);
|
|
37
|
+
await Promise.race([rpcLoop, heartbeat]);
|
|
38
|
+
}
|
|
39
|
+
else {
|
|
40
|
+
const loops = Array.from({ length: this.cfg.concurrency }, () => this.fetchLoop());
|
|
41
|
+
const heartbeat = this.heartbeatLoop();
|
|
42
|
+
await Promise.race([Promise.all(loops), heartbeat]);
|
|
43
|
+
}
|
|
44
|
+
if (typeof process !== "undefined") {
|
|
45
|
+
process.off("SIGINT", onSignal);
|
|
46
|
+
process.off("SIGTERM", onSignal);
|
|
47
|
+
}
|
|
48
|
+
}
|
|
49
|
+
async stop() {
|
|
50
|
+
if (this.stopping)
|
|
51
|
+
return;
|
|
52
|
+
this.stopping = true;
|
|
53
|
+
const deadline = Date.now() + this.cfg.shutdownTimeoutMs;
|
|
54
|
+
while (this.active.size > 0 && Date.now() < deadline) {
|
|
55
|
+
await sleep(100);
|
|
56
|
+
}
|
|
57
|
+
for (const [jobID] of this.active) {
|
|
58
|
+
await this.fail(jobID, "worker_shutdown");
|
|
59
|
+
}
|
|
60
|
+
}
|
|
61
|
+
// -------------------------------------------------------------------------
|
|
62
|
+
// RPC fetch loop — single bidi stream replaces N HTTP fetch loops
|
|
63
|
+
// -------------------------------------------------------------------------
|
|
64
|
+
async rpcFetchLoop(ResilientLifecycleStreamCtor, _RpcClientCtor) {
|
|
65
|
+
const stream = new ResilientLifecycleStreamCtor(this.client.baseURL, this.client.auth);
|
|
66
|
+
const pendingAcks = [];
|
|
67
|
+
const pendingEnqueues = [];
|
|
68
|
+
let requestId = 0;
|
|
69
|
+
const jobPromises = new Set();
|
|
70
|
+
try {
|
|
71
|
+
while (!this.stopping) {
|
|
72
|
+
try {
|
|
73
|
+
// Drain completed acks
|
|
74
|
+
const acks = pendingAcks.splice(0);
|
|
75
|
+
const enqueues = pendingEnqueues.splice(0);
|
|
76
|
+
const fetchCount = this.cfg.concurrency - this.active.size;
|
|
77
|
+
requestId++;
|
|
78
|
+
const resp = await stream.exchange({
|
|
79
|
+
requestId,
|
|
80
|
+
queues: this.cfg.queues,
|
|
81
|
+
workerId: this.cfg.workerID,
|
|
82
|
+
hostname: this.cfg.hostname,
|
|
83
|
+
leaseDuration: 30,
|
|
84
|
+
fetchCount: fetchCount > 0 ? fetchCount : 0,
|
|
85
|
+
acks: acks.map((a) => ({
|
|
86
|
+
jobId: a.jobId,
|
|
87
|
+
resultJson: a.resultJson,
|
|
88
|
+
})),
|
|
89
|
+
enqueues: enqueues.map((e) => ({
|
|
90
|
+
queue: e.queue,
|
|
91
|
+
payloadJson: e.payloadJson,
|
|
92
|
+
})),
|
|
93
|
+
});
|
|
94
|
+
// Dispatch fetched jobs
|
|
95
|
+
for (const protoJob of resp.jobs) {
|
|
96
|
+
const job = protoJobToFetched(protoJob);
|
|
97
|
+
const handler = this.handlers.get(job.queue);
|
|
98
|
+
if (!handler) {
|
|
99
|
+
pendingAcks.push({ jobId: job.job_id, resultJson: "{}" });
|
|
100
|
+
continue;
|
|
101
|
+
}
|
|
102
|
+
this.active.set(job.job_id, { cancelled: false });
|
|
103
|
+
const ctx = this.makeJobContext(job.job_id);
|
|
104
|
+
const p = (async () => {
|
|
105
|
+
try {
|
|
106
|
+
await handler(job, ctx);
|
|
107
|
+
pendingAcks.push({ jobId: job.job_id, resultJson: "{}" });
|
|
108
|
+
}
|
|
109
|
+
catch (err) {
|
|
110
|
+
const message = err instanceof Error ? err.message : String(err);
|
|
111
|
+
await this.fail(job.job_id, message);
|
|
112
|
+
}
|
|
113
|
+
finally {
|
|
114
|
+
this.active.delete(job.job_id);
|
|
115
|
+
}
|
|
116
|
+
})();
|
|
117
|
+
jobPromises.add(p);
|
|
118
|
+
p.finally(() => jobPromises.delete(p));
|
|
119
|
+
}
|
|
120
|
+
// If at capacity, wait for at least one job to finish
|
|
121
|
+
if (this.active.size >= this.cfg.concurrency && jobPromises.size > 0) {
|
|
122
|
+
await Promise.race(jobPromises);
|
|
123
|
+
}
|
|
124
|
+
}
|
|
125
|
+
catch {
|
|
126
|
+
if (!this.stopping)
|
|
127
|
+
await sleep(1000);
|
|
128
|
+
}
|
|
129
|
+
}
|
|
130
|
+
}
|
|
131
|
+
finally {
|
|
132
|
+
// Wait for in-flight jobs
|
|
133
|
+
if (jobPromises.size > 0) {
|
|
134
|
+
await Promise.allSettled(jobPromises);
|
|
135
|
+
}
|
|
136
|
+
stream.close();
|
|
137
|
+
}
|
|
138
|
+
}
|
|
139
|
+
async rpcHeartbeatLoop(RpcClientCtor) {
|
|
140
|
+
const rpcClient = new RpcClientCtor(this.client.baseURL, this.client.auth);
|
|
141
|
+
while (!this.stopping) {
|
|
142
|
+
await sleep(15000);
|
|
143
|
+
if (this.stopping || this.active.size === 0)
|
|
144
|
+
continue;
|
|
145
|
+
const jobs = {};
|
|
146
|
+
for (const [jobID] of this.active) {
|
|
147
|
+
jobs[jobID] = { progressJson: "", checkpointJson: "", streamDelta: "" };
|
|
148
|
+
}
|
|
149
|
+
try {
|
|
150
|
+
const result = await rpcClient.heartbeat(jobs);
|
|
151
|
+
for (const [jobID, info] of Object.entries(result)) {
|
|
152
|
+
if (info.status === "cancel") {
|
|
153
|
+
const state = this.active.get(jobID);
|
|
154
|
+
if (state)
|
|
155
|
+
state.cancelled = true;
|
|
156
|
+
}
|
|
157
|
+
}
|
|
158
|
+
}
|
|
159
|
+
catch {
|
|
160
|
+
// Best-effort heartbeat.
|
|
161
|
+
}
|
|
162
|
+
}
|
|
163
|
+
}
|
|
164
|
+
makeJobContext(jobId) {
|
|
165
|
+
return {
|
|
166
|
+
isCancelled: () => this.active.get(jobId)?.cancelled === true,
|
|
167
|
+
checkpoint: async (checkpoint) => {
|
|
168
|
+
await this.heartbeat({ [jobId]: { checkpoint } });
|
|
169
|
+
},
|
|
170
|
+
progress: async (current, total, message) => {
|
|
171
|
+
await this.heartbeat({
|
|
172
|
+
[jobId]: { progress: { current, total, message } },
|
|
173
|
+
});
|
|
174
|
+
},
|
|
175
|
+
};
|
|
176
|
+
}
|
|
177
|
+
// -------------------------------------------------------------------------
|
|
178
|
+
// HTTP fetch loop (original)
|
|
179
|
+
// -------------------------------------------------------------------------
|
|
180
|
+
async fetchLoop() {
|
|
181
|
+
const ackBuffer = [];
|
|
182
|
+
while (!this.stopping) {
|
|
183
|
+
try {
|
|
184
|
+
await this.flushAcks(ackBuffer);
|
|
185
|
+
let jobs;
|
|
186
|
+
if (this.fetchBatchSize > 1) {
|
|
187
|
+
const result = await this.fetchBatch();
|
|
188
|
+
jobs = result ?? [];
|
|
189
|
+
}
|
|
190
|
+
else {
|
|
191
|
+
const job = await this.fetch();
|
|
192
|
+
jobs = job ? [job] : [];
|
|
193
|
+
}
|
|
194
|
+
for (const job of jobs) {
|
|
195
|
+
const handler = this.handlers.get(job.queue);
|
|
196
|
+
if (!handler) {
|
|
197
|
+
ackBuffer.push({ job_id: job.job_id });
|
|
198
|
+
continue;
|
|
199
|
+
}
|
|
200
|
+
this.active.set(job.job_id, { cancelled: false });
|
|
201
|
+
const ctx = {
|
|
202
|
+
isCancelled: () => this.active.get(job.job_id)?.cancelled === true,
|
|
203
|
+
checkpoint: async (checkpoint) => {
|
|
204
|
+
await this.heartbeat({ [job.job_id]: { checkpoint } });
|
|
205
|
+
},
|
|
206
|
+
progress: async (current, total, message) => {
|
|
207
|
+
await this.heartbeat({
|
|
208
|
+
[job.job_id]: { progress: { current, total, message } },
|
|
209
|
+
});
|
|
210
|
+
},
|
|
211
|
+
};
|
|
212
|
+
try {
|
|
213
|
+
await handler(job, ctx);
|
|
214
|
+
ackBuffer.push({ job_id: job.job_id });
|
|
215
|
+
}
|
|
216
|
+
catch (err) {
|
|
217
|
+
const message = err instanceof Error ? err.message : String(err);
|
|
218
|
+
await this.fail(job.job_id, message);
|
|
219
|
+
}
|
|
220
|
+
finally {
|
|
221
|
+
this.active.delete(job.job_id);
|
|
222
|
+
}
|
|
223
|
+
}
|
|
224
|
+
await this.flushAcks(ackBuffer);
|
|
225
|
+
}
|
|
226
|
+
catch {
|
|
227
|
+
await sleep(1000);
|
|
228
|
+
}
|
|
229
|
+
}
|
|
230
|
+
// Flush remaining acks on shutdown.
|
|
231
|
+
await this.flushAcks(ackBuffer, true);
|
|
232
|
+
}
|
|
233
|
+
async flushAcks(buffer, force = false) {
|
|
234
|
+
while (this.ackBatchSize > 1 && buffer.length >= this.ackBatchSize) {
|
|
235
|
+
const batch = buffer.splice(0, this.ackBatchSize);
|
|
236
|
+
await this.ackBatch(batch);
|
|
237
|
+
}
|
|
238
|
+
if (force && buffer.length > 0 && this.ackBatchSize > 1) {
|
|
239
|
+
await this.ackBatch(buffer.splice(0));
|
|
240
|
+
}
|
|
241
|
+
if (this.ackBatchSize <= 1 && buffer.length > 0) {
|
|
242
|
+
for (const item of buffer) {
|
|
243
|
+
await this.client.ack(item.job_id, item.result ?? {});
|
|
244
|
+
}
|
|
245
|
+
buffer.length = 0;
|
|
246
|
+
}
|
|
247
|
+
}
|
|
248
|
+
async heartbeatLoop() {
|
|
249
|
+
while (!this.stopping) {
|
|
250
|
+
await sleep(15000);
|
|
251
|
+
if (this.stopping || this.active.size === 0)
|
|
252
|
+
continue;
|
|
253
|
+
const jobs = {};
|
|
254
|
+
for (const [jobID] of this.active)
|
|
255
|
+
jobs[jobID] = {};
|
|
256
|
+
try {
|
|
257
|
+
const result = await this.heartbeat(jobs);
|
|
258
|
+
for (const [jobID, info] of Object.entries(result.jobs || {})) {
|
|
259
|
+
if (info.status === "cancel") {
|
|
260
|
+
const state = this.active.get(jobID);
|
|
261
|
+
if (state)
|
|
262
|
+
state.cancelled = true;
|
|
263
|
+
}
|
|
264
|
+
}
|
|
265
|
+
}
|
|
266
|
+
catch {
|
|
267
|
+
// Best-effort heartbeat.
|
|
268
|
+
}
|
|
269
|
+
}
|
|
270
|
+
}
|
|
271
|
+
async fetchBatch() {
|
|
272
|
+
const result = await this.request("/api/v1/fetch/batch", {
|
|
273
|
+
method: "POST",
|
|
274
|
+
body: JSON.stringify({
|
|
275
|
+
queues: this.cfg.queues,
|
|
276
|
+
worker_id: this.cfg.workerID,
|
|
277
|
+
hostname: this.cfg.hostname,
|
|
278
|
+
timeout: 30,
|
|
279
|
+
count: this.fetchBatchSize,
|
|
280
|
+
}),
|
|
281
|
+
});
|
|
282
|
+
return result.jobs ?? [];
|
|
283
|
+
}
|
|
284
|
+
async ackBatch(acks) {
|
|
285
|
+
await this.request("/api/v1/ack/batch", {
|
|
286
|
+
method: "POST",
|
|
287
|
+
body: JSON.stringify({ acks }),
|
|
288
|
+
});
|
|
289
|
+
}
|
|
290
|
+
async fetch() {
|
|
291
|
+
const job = await this.request("/api/v1/fetch", {
|
|
292
|
+
method: "POST",
|
|
293
|
+
body: JSON.stringify({
|
|
294
|
+
queues: this.cfg.queues,
|
|
295
|
+
worker_id: this.cfg.workerID,
|
|
296
|
+
hostname: this.cfg.hostname,
|
|
297
|
+
timeout: 30,
|
|
298
|
+
}),
|
|
299
|
+
});
|
|
300
|
+
const j = job;
|
|
301
|
+
if (!j.job_id)
|
|
302
|
+
return null;
|
|
303
|
+
return j;
|
|
304
|
+
}
|
|
305
|
+
async fail(jobID, error, backtrace = "") {
|
|
306
|
+
if (this.cfg.useRpc) {
|
|
307
|
+
try {
|
|
308
|
+
const { RpcClient } = await import("./rpc.js");
|
|
309
|
+
const rpc = new RpcClient(this.client.baseURL, this.client.auth);
|
|
310
|
+
await rpc.fail(jobID, error, backtrace);
|
|
311
|
+
return;
|
|
312
|
+
}
|
|
313
|
+
catch {
|
|
314
|
+
// Fall through to HTTP
|
|
315
|
+
}
|
|
316
|
+
}
|
|
317
|
+
await this.request(`/api/v1/fail/${encodeURIComponent(jobID)}`, {
|
|
318
|
+
method: "POST",
|
|
319
|
+
body: JSON.stringify({ error, backtrace }),
|
|
320
|
+
});
|
|
321
|
+
}
|
|
322
|
+
async heartbeat(jobs) {
|
|
323
|
+
return this.request("/api/v1/heartbeat", {
|
|
324
|
+
method: "POST",
|
|
325
|
+
body: JSON.stringify({ jobs }),
|
|
326
|
+
});
|
|
327
|
+
}
|
|
328
|
+
async request(path, init) {
|
|
329
|
+
const authHeaders = await this.client.authHeaders();
|
|
330
|
+
const res = await this.client.fetchImpl(this.client.baseURL + path, {
|
|
331
|
+
...init,
|
|
332
|
+
headers: {
|
|
333
|
+
"content-type": "application/json",
|
|
334
|
+
...authHeaders,
|
|
335
|
+
...(init.headers || {}),
|
|
336
|
+
},
|
|
337
|
+
});
|
|
338
|
+
if (!res.ok) {
|
|
339
|
+
let details = `HTTP ${res.status}`;
|
|
340
|
+
let code = "";
|
|
341
|
+
try {
|
|
342
|
+
const body = (await res.json());
|
|
343
|
+
if (body.error)
|
|
344
|
+
details = body.error;
|
|
345
|
+
if (body.code)
|
|
346
|
+
code = body.code;
|
|
347
|
+
}
|
|
348
|
+
catch {
|
|
349
|
+
// ignore
|
|
350
|
+
}
|
|
351
|
+
if (code === "PAYLOAD_TOO_LARGE")
|
|
352
|
+
throw new PayloadTooLargeError(details);
|
|
353
|
+
throw new Error(details);
|
|
354
|
+
}
|
|
355
|
+
if (res.status === 204)
|
|
356
|
+
return {};
|
|
357
|
+
return (await res.json());
|
|
358
|
+
}
|
|
359
|
+
}
|
|
360
|
+
// ---------------------------------------------------------------------------
|
|
361
|
+
// Helpers
|
|
362
|
+
// ---------------------------------------------------------------------------
|
|
363
|
+
function protoJobToFetched(j) {
|
|
364
|
+
let payload = {};
|
|
365
|
+
if (j.payloadJson) {
|
|
366
|
+
try {
|
|
367
|
+
payload = JSON.parse(j.payloadJson);
|
|
368
|
+
}
|
|
369
|
+
catch {
|
|
370
|
+
// leave empty
|
|
371
|
+
}
|
|
372
|
+
}
|
|
373
|
+
let checkpoint;
|
|
374
|
+
if (j.checkpointJson) {
|
|
375
|
+
try {
|
|
376
|
+
checkpoint = JSON.parse(j.checkpointJson);
|
|
377
|
+
}
|
|
378
|
+
catch {
|
|
379
|
+
// ignore
|
|
380
|
+
}
|
|
381
|
+
}
|
|
382
|
+
let tags;
|
|
383
|
+
if (j.tagsJson) {
|
|
384
|
+
try {
|
|
385
|
+
tags = JSON.parse(j.tagsJson);
|
|
386
|
+
}
|
|
387
|
+
catch {
|
|
388
|
+
// ignore
|
|
389
|
+
}
|
|
390
|
+
}
|
|
391
|
+
return {
|
|
392
|
+
job_id: j.jobId,
|
|
393
|
+
queue: j.queue,
|
|
394
|
+
payload,
|
|
395
|
+
attempt: j.attempt,
|
|
396
|
+
max_retries: j.maxRetries,
|
|
397
|
+
lease_duration: j.leaseDuration,
|
|
398
|
+
checkpoint,
|
|
399
|
+
tags,
|
|
400
|
+
agent: j.agent,
|
|
401
|
+
};
|
|
402
|
+
}
|
|
403
|
+
function sleep(ms) {
|
|
404
|
+
return new Promise((resolve) => setTimeout(resolve, ms));
|
|
405
|
+
}
|
package/dist/rpc.d.ts
ADDED
|
@@ -0,0 +1,78 @@
|
|
|
1
|
+
import type { AuthOptions } from "@corvohq/client";
|
|
2
|
+
import { type FetchBatchJob } from "./gen/corvo/v1/worker_pb.js";
|
|
3
|
+
export type { FetchBatchJob, HeartbeatJobUpdate } from "./gen/corvo/v1/worker_pb.js";
|
|
4
|
+
export declare class RpcClient {
|
|
5
|
+
private readonly client;
|
|
6
|
+
constructor(baseUrl: string, auth?: AuthOptions);
|
|
7
|
+
enqueue(queue: string, payloadJson: string): Promise<{
|
|
8
|
+
jobId: string;
|
|
9
|
+
status: string;
|
|
10
|
+
uniqueExisting: boolean;
|
|
11
|
+
}>;
|
|
12
|
+
fail(jobId: string, error: string, backtrace?: string): Promise<{
|
|
13
|
+
status: string;
|
|
14
|
+
}>;
|
|
15
|
+
heartbeat(jobs: Record<string, {
|
|
16
|
+
progressJson?: string;
|
|
17
|
+
checkpointJson?: string;
|
|
18
|
+
streamDelta?: string;
|
|
19
|
+
}>): Promise<Record<string, {
|
|
20
|
+
status: string;
|
|
21
|
+
}>>;
|
|
22
|
+
/** Unary call to StreamLifecycle endpoint (Connect JSON protocol). */
|
|
23
|
+
lifecycleExchange(body: Record<string, unknown>): Promise<LifecycleRawResponse>;
|
|
24
|
+
}
|
|
25
|
+
export interface AckBatchItem {
|
|
26
|
+
jobId: string;
|
|
27
|
+
resultJson: string;
|
|
28
|
+
}
|
|
29
|
+
export interface LifecycleEnqueueItem {
|
|
30
|
+
queue: string;
|
|
31
|
+
payloadJson: string;
|
|
32
|
+
}
|
|
33
|
+
export declare class ErrNotLeader extends Error {
|
|
34
|
+
readonly leaderAddr: string;
|
|
35
|
+
constructor(leaderAddr: string);
|
|
36
|
+
}
|
|
37
|
+
export type LifecycleRequest = {
|
|
38
|
+
requestId: number;
|
|
39
|
+
queues: string[];
|
|
40
|
+
workerId: string;
|
|
41
|
+
hostname: string;
|
|
42
|
+
leaseDuration: number;
|
|
43
|
+
fetchCount: number;
|
|
44
|
+
acks: AckBatchItem[];
|
|
45
|
+
enqueues: LifecycleEnqueueItem[];
|
|
46
|
+
};
|
|
47
|
+
export type LifecycleResponse = {
|
|
48
|
+
requestId: number;
|
|
49
|
+
jobs: FetchBatchJob[];
|
|
50
|
+
acked: number;
|
|
51
|
+
enqueuedJobIds: string[];
|
|
52
|
+
error: string;
|
|
53
|
+
leaderAddr: string;
|
|
54
|
+
};
|
|
55
|
+
interface LifecycleRawResponse {
|
|
56
|
+
requestId?: number;
|
|
57
|
+
jobs?: FetchBatchJob[];
|
|
58
|
+
acked?: number;
|
|
59
|
+
enqueuedJobIds?: string[];
|
|
60
|
+
error?: string;
|
|
61
|
+
leaderAddr?: string;
|
|
62
|
+
}
|
|
63
|
+
export declare class LifecycleStream {
|
|
64
|
+
private readonly baseUrl;
|
|
65
|
+
private readonly auth;
|
|
66
|
+
private closed;
|
|
67
|
+
constructor(baseUrl: string, auth?: AuthOptions);
|
|
68
|
+
exchange(req: LifecycleRequest): Promise<LifecycleResponse>;
|
|
69
|
+
close(): void;
|
|
70
|
+
}
|
|
71
|
+
export declare class ResilientLifecycleStream {
|
|
72
|
+
private baseUrl;
|
|
73
|
+
private readonly auth;
|
|
74
|
+
private stream;
|
|
75
|
+
constructor(baseUrl: string, auth?: AuthOptions);
|
|
76
|
+
exchange(req: LifecycleRequest): Promise<LifecycleResponse>;
|
|
77
|
+
close(): void;
|
|
78
|
+
}
|