@goatlab/tasks-adapter-hatchet 0.3.4 → 0.4.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/HatchetConnector.d.ts +140 -18
- package/dist/HatchetConnector.js +127 -48
- package/dist/HatchetConnector.js.map +1 -1
- package/dist/benchmark.d.ts +9 -0
- package/dist/benchmark.js +227 -0
- package/dist/benchmark.js.map +1 -0
- package/dist/hatchet.spec.js +81 -45
- package/dist/hatchet.spec.js.map +1 -1
- package/dist/index.d.ts +1 -0
- package/dist/index.js.map +1 -1
- package/dist/tsconfig.tsbuildinfo +1 -1
- package/package.json +3 -3
|
@@ -1,18 +1,128 @@
|
|
|
1
|
-
import type { ShouldQueue, TaskConnector, TaskStatus } from '@goatlab/tasks-core';
|
|
1
|
+
import type { ShouldQueue, TaskConnector, TaskStatus, TenantCredentials } from '@goatlab/tasks-core';
|
|
2
2
|
import { Hatchet } from '@hatchet-dev/typescript-sdk';
|
|
3
|
+
/**
|
|
4
|
+
* HatchetConnector - TaskConnector implementation for Hatchet.
|
|
5
|
+
*
|
|
6
|
+
* ## Implementation Notes
|
|
7
|
+
*
|
|
8
|
+
* ### Why we use `admin.runWorkflow()` instead of `task.runNoWait()`
|
|
9
|
+
*
|
|
10
|
+
* Both methods are fire-and-forget (they only wait for the run ID, not task completion).
|
|
11
|
+
* However, `task.runNoWait()` internally uses a global `parentRunContextManager` to track
|
|
12
|
+
* parent/child workflow relationships:
|
|
13
|
+
*
|
|
14
|
+
* ```js
|
|
15
|
+
* // Inside task.runNoWait() - declaration.js:46-47
|
|
16
|
+
* const parentRunContext = parentRunContextManager.getContext();
|
|
17
|
+
* parentRunContextManager.incrementChildIndex(...);
|
|
18
|
+
* ```
|
|
19
|
+
*
|
|
20
|
+
* This global state causes issues when queuing many tasks rapidly in parallel - concurrent
|
|
21
|
+
* calls interfere with each other through this shared state, potentially causing duplicate
|
|
22
|
+
* run IDs or other race conditions.
|
|
23
|
+
*
|
|
24
|
+
* By calling `admin.runWorkflow()` directly, we bypass:
|
|
25
|
+
* - The `parentRunContextManager` global state
|
|
26
|
+
* - The `childIndex` tracking
|
|
27
|
+
* - The `childKey` / `sticky` handling
|
|
28
|
+
*
|
|
29
|
+
* These features are designed for spawning child workflows from within a parent task,
|
|
30
|
+
* not for high-volume top-level task queuing.
|
|
31
|
+
*
|
|
32
|
+
* ### Eventual Consistency between gRPC and REST API
|
|
33
|
+
*
|
|
34
|
+
* Hatchet uses gRPC for `queue()` (triggerWorkflow) and REST API for `getStatus()` (runs.get).
|
|
35
|
+
* There can be a brief delay before a newly created run is visible via REST API.
|
|
36
|
+
* We handle this with retry logic in `getStatus()`.
|
|
37
|
+
*
|
|
38
|
+
* @see https://docs.hatchet.run/home/run-no-wait
|
|
39
|
+
* @see https://docs.hatchet.run/home/v1-sdk-improvements
|
|
40
|
+
*/
|
|
41
|
+
/**
|
|
42
|
+
* Configuration for HatchetConnector
|
|
43
|
+
*/
|
|
44
|
+
export interface HatchetConnectorConfig {
|
|
45
|
+
/**
|
|
46
|
+
* Hatchet API token for authentication.
|
|
47
|
+
*/
|
|
48
|
+
token: string;
|
|
49
|
+
/**
|
|
50
|
+
* Hatchet gRPC host and port.
|
|
51
|
+
* Default: 'localhost:7077'
|
|
52
|
+
*/
|
|
53
|
+
hostAndPort?: string;
|
|
54
|
+
/**
|
|
55
|
+
* Hatchet REST API URL.
|
|
56
|
+
* Default: 'http://localhost:8888'
|
|
57
|
+
*/
|
|
58
|
+
apiUrl?: string;
|
|
59
|
+
/**
|
|
60
|
+
* Log level for Hatchet client.
|
|
61
|
+
* Default: 'INFO'
|
|
62
|
+
*/
|
|
63
|
+
logLevel?: 'INFO' | 'OFF' | 'DEBUG' | 'WARN' | 'ERROR';
|
|
64
|
+
/**
|
|
65
|
+
* Hatchet's internal tenant ID (requires separate token per tenant).
|
|
66
|
+
* This is different from our multi-tenant isolation - use `tenantId` instead.
|
|
67
|
+
*/
|
|
68
|
+
hatchetTenantId?: string;
|
|
69
|
+
/**
|
|
70
|
+
* Tenant ID for multi-tenant isolation using Hatchet namespaces.
|
|
71
|
+
* When set, this is used as the Hatchet namespace to isolate:
|
|
72
|
+
* - Workflows only trigger for this namespace
|
|
73
|
+
* - Events remain isolated to this namespace
|
|
74
|
+
* - Workers only process jobs from this namespace
|
|
75
|
+
*
|
|
76
|
+
* This allows multiple tenants to share the same Hatchet instance
|
|
77
|
+
* and token while maintaining isolation.
|
|
78
|
+
*/
|
|
79
|
+
tenantId?: string;
|
|
80
|
+
}
|
|
3
81
|
export declare class HatchetConnector implements TaskConnector<object> {
|
|
4
82
|
private readonly token;
|
|
5
83
|
private readonly hostAndPort;
|
|
6
84
|
private readonly apiUrl;
|
|
7
85
|
private readonly logLevel;
|
|
8
|
-
private readonly
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
86
|
+
private readonly hatchetTenantId;
|
|
87
|
+
private readonly _tenantId?;
|
|
88
|
+
private readonly config;
|
|
89
|
+
private registeredWorkflows;
|
|
90
|
+
/**
|
|
91
|
+
* The tenant ID this connector is scoped to.
|
|
92
|
+
* Uses Hatchet namespaces for isolation within the same Hatchet instance.
|
|
93
|
+
*/
|
|
94
|
+
get tenantId(): string | undefined;
|
|
95
|
+
/**
|
|
96
|
+
* The Hatchet namespace used for isolation.
|
|
97
|
+
* This is the same as tenantId when set.
|
|
98
|
+
*/
|
|
99
|
+
get namespace(): string;
|
|
100
|
+
constructor(config: HatchetConnectorConfig);
|
|
101
|
+
/**
|
|
102
|
+
* Creates a new HatchetConnector instance scoped to a specific tenant.
|
|
103
|
+
* Uses Hatchet namespaces for isolation within the same Hatchet instance.
|
|
104
|
+
*
|
|
105
|
+
* @param tenantId - The tenant identifier for isolation (used as Hatchet namespace)
|
|
106
|
+
* @param _credentials - Not used for Hatchet (namespaces share the same token)
|
|
107
|
+
* @returns A new HatchetConnector instance scoped to the tenant
|
|
108
|
+
*
|
|
109
|
+
* @example
|
|
110
|
+
* ```typescript
|
|
111
|
+
* const baseConnector = new HatchetConnector({ token: 'my-token' })
|
|
112
|
+
*
|
|
113
|
+
* // Create tenant-scoped connector
|
|
114
|
+
* const tenantConnector = baseConnector.forTenant('acme-corp')
|
|
115
|
+
* // Namespace: acme-corp (workflows isolated to this namespace)
|
|
116
|
+
* ```
|
|
117
|
+
*/
|
|
118
|
+
forTenant(tenantId: string, _credentials?: TenantCredentials): HatchetConnector;
|
|
119
|
+
/**
|
|
120
|
+
* Gets or creates a memoized Hatchet client.
|
|
121
|
+
*
|
|
122
|
+
* IMPORTANT: Due to memoization, each unique combination of config creates
|
|
123
|
+
* a separate client. This means tenant-scoped connectors get their own client
|
|
124
|
+
* with the correct namespace.
|
|
125
|
+
*/
|
|
16
126
|
getHatchetClient(): Hatchet;
|
|
17
127
|
getHatchetTask(task: ShouldQueue): import("@hatchet-dev/typescript-sdk").TaskWorkflowDeclaration<import("@hatchet-dev/typescript-sdk").JsonObject, any>;
|
|
18
128
|
startWorker({ workerName, tasks, slots }: {
|
|
@@ -21,18 +131,30 @@ export declare class HatchetConnector implements TaskConnector<object> {
|
|
|
21
131
|
slots?: number;
|
|
22
132
|
}): Promise<import("@hatchet-dev/typescript-sdk").Worker>;
|
|
23
133
|
/**
|
|
24
|
-
* Gets the status of a task by its
|
|
25
|
-
*
|
|
26
|
-
*
|
|
134
|
+
* Gets the status of a task by its Hatchet run ID.
|
|
135
|
+
*
|
|
136
|
+
* Uses `runs.get()` which calls the REST API endpoint `/api/v1/stable/workflow-runs/{id}`.
|
|
137
|
+
* Includes retry logic (3 attempts with exponential backoff) to handle eventual
|
|
138
|
+
* consistency between gRPC (used by queue) and REST API (used here).
|
|
139
|
+
*
|
|
140
|
+
* The payload is double-nested in Hatchet's response: `run.input.input` contains
|
|
141
|
+
* the actual payload we passed to `queue()`.
|
|
142
|
+
*
|
|
143
|
+
* @param id - The Hatchet workflow run ID returned by `queue()`.
|
|
144
|
+
* @returns Full task status including payload, status, and metadata.
|
|
145
|
+
* @throws Error if the run cannot be found after all retry attempts.
|
|
27
146
|
*/
|
|
28
147
|
getStatus(id: string): Promise<TaskStatus>;
|
|
29
148
|
/**
|
|
30
|
-
* Queues a task to be run in the background.
|
|
31
|
-
*
|
|
32
|
-
*
|
|
33
|
-
*
|
|
34
|
-
*
|
|
35
|
-
*
|
|
149
|
+
* Queues a task to be run in the background (fire-and-forget).
|
|
150
|
+
*
|
|
151
|
+
* Uses `admin.runWorkflow()` directly instead of `task.runNoWait()` to avoid
|
|
152
|
+
* the global `parentRunContextManager` state that causes race conditions
|
|
153
|
+
* when queuing tasks rapidly. See class documentation for details.
|
|
154
|
+
*
|
|
155
|
+
* @param params.taskName - Name of the task (must match a registered workflow).
|
|
156
|
+
* @param params.taskBody - The payload to pass to the task handler.
|
|
157
|
+
* @returns Task status with unique run ID. Does NOT wait for task completion.
|
|
36
158
|
*/
|
|
37
159
|
queue(params: any): Promise<Omit<TaskStatus, 'payload'>>;
|
|
38
160
|
}
|
package/dist/HatchetConnector.js
CHANGED
|
@@ -8,29 +8,81 @@ const typescript_sdk_1 = require("@hatchet-dev/typescript-sdk");
|
|
|
8
8
|
const DEFAULT_HOST_PORT = 'localhost:7077';
|
|
9
9
|
const DEFAULT_API_URL = 'http://localhost:8888';
|
|
10
10
|
const DEFAULT_LOG_LEVEL = 'INFO';
|
|
11
|
-
const
|
|
11
|
+
const DEFAULT_HATCHET_TENANT_ID = '707d0855-80ab-4e1f-a156-f1c4546cbf52';
|
|
12
12
|
class HatchetConnector {
|
|
13
13
|
token;
|
|
14
14
|
hostAndPort;
|
|
15
15
|
apiUrl;
|
|
16
16
|
logLevel;
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
17
|
+
hatchetTenantId;
|
|
18
|
+
_tenantId;
|
|
19
|
+
config;
|
|
20
|
+
// Store registered workflows by taskName for reuse in queue()
|
|
21
|
+
registeredWorkflows = new Map();
|
|
22
|
+
/**
|
|
23
|
+
* The tenant ID this connector is scoped to.
|
|
24
|
+
* Uses Hatchet namespaces for isolation within the same Hatchet instance.
|
|
25
|
+
*/
|
|
26
|
+
get tenantId() {
|
|
27
|
+
return this._tenantId;
|
|
28
|
+
}
|
|
29
|
+
/**
|
|
30
|
+
* The Hatchet namespace used for isolation.
|
|
31
|
+
* This is the same as tenantId when set.
|
|
32
|
+
*/
|
|
33
|
+
get namespace() {
|
|
34
|
+
return this._tenantId || '';
|
|
35
|
+
}
|
|
36
|
+
constructor(config) {
|
|
37
|
+
this.config = config;
|
|
38
|
+
this.token = config.token || '';
|
|
39
|
+
this.hostAndPort = config.hostAndPort || DEFAULT_HOST_PORT;
|
|
40
|
+
this.apiUrl = config.apiUrl || DEFAULT_API_URL;
|
|
41
|
+
this.logLevel = config.logLevel || DEFAULT_LOG_LEVEL;
|
|
42
|
+
this.hatchetTenantId = config.hatchetTenantId || '';
|
|
43
|
+
this._tenantId = config.tenantId;
|
|
44
|
+
}
|
|
45
|
+
/**
|
|
46
|
+
* Creates a new HatchetConnector instance scoped to a specific tenant.
|
|
47
|
+
* Uses Hatchet namespaces for isolation within the same Hatchet instance.
|
|
48
|
+
*
|
|
49
|
+
* @param tenantId - The tenant identifier for isolation (used as Hatchet namespace)
|
|
50
|
+
* @param _credentials - Not used for Hatchet (namespaces share the same token)
|
|
51
|
+
* @returns A new HatchetConnector instance scoped to the tenant
|
|
52
|
+
*
|
|
53
|
+
* @example
|
|
54
|
+
* ```typescript
|
|
55
|
+
* const baseConnector = new HatchetConnector({ token: 'my-token' })
|
|
56
|
+
*
|
|
57
|
+
* // Create tenant-scoped connector
|
|
58
|
+
* const tenantConnector = baseConnector.forTenant('acme-corp')
|
|
59
|
+
* // Namespace: acme-corp (workflows isolated to this namespace)
|
|
60
|
+
* ```
|
|
61
|
+
*/
|
|
62
|
+
forTenant(tenantId, _credentials) {
|
|
63
|
+
return new HatchetConnector({
|
|
64
|
+
...this.config,
|
|
65
|
+
tenantId
|
|
66
|
+
});
|
|
24
67
|
}
|
|
68
|
+
/**
|
|
69
|
+
* Gets or creates a memoized Hatchet client.
|
|
70
|
+
*
|
|
71
|
+
* IMPORTANT: Due to memoization, each unique combination of config creates
|
|
72
|
+
* a separate client. This means tenant-scoped connectors get their own client
|
|
73
|
+
* with the correct namespace.
|
|
74
|
+
*/
|
|
25
75
|
getHatchetClient() {
|
|
26
76
|
const hatchet = typescript_sdk_1.Hatchet.init({
|
|
27
77
|
token: this.token,
|
|
28
78
|
host_port: this.hostAndPort,
|
|
29
79
|
api_url: this.apiUrl,
|
|
30
80
|
log_level: this.logLevel,
|
|
31
|
-
//
|
|
32
|
-
tenant_id: this.
|
|
33
|
-
namespace
|
|
81
|
+
// Hatchet's internal tenant ID (requires separate token)
|
|
82
|
+
tenant_id: this.hatchetTenantId || DEFAULT_HATCHET_TENANT_ID,
|
|
83
|
+
// Use tenantId as namespace for multi-tenant isolation
|
|
84
|
+
// This prefixes workflows and isolates events/workers
|
|
85
|
+
namespace: this._tenantId || '',
|
|
34
86
|
tls_config: {
|
|
35
87
|
tls_strategy: 'none'
|
|
36
88
|
}
|
|
@@ -38,11 +90,13 @@ class HatchetConnector {
|
|
|
38
90
|
return hatchet;
|
|
39
91
|
}
|
|
40
92
|
getHatchetTask(task) {
|
|
41
|
-
|
|
93
|
+
const hatchetTask = this.getHatchetClient().task({
|
|
42
94
|
name: task.taskName,
|
|
43
95
|
retries: task.retries || 3,
|
|
44
|
-
fn: task.handle.bind(
|
|
96
|
+
fn: task.handle.bind(task)
|
|
45
97
|
});
|
|
98
|
+
this.registeredWorkflows.set(task.taskName, hatchetTask);
|
|
99
|
+
return hatchetTask;
|
|
46
100
|
}
|
|
47
101
|
async startWorker({ workerName, tasks, slots = 100 }) {
|
|
48
102
|
// Pre-map workflows to avoid repeated processing
|
|
@@ -59,51 +113,76 @@ class HatchetConnector {
|
|
|
59
113
|
return worker;
|
|
60
114
|
}
|
|
61
115
|
/**
|
|
62
|
-
* Gets the status of a task by its
|
|
63
|
-
*
|
|
64
|
-
*
|
|
116
|
+
* Gets the status of a task by its Hatchet run ID.
|
|
117
|
+
*
|
|
118
|
+
* Uses `runs.get()` which calls the REST API endpoint `/api/v1/stable/workflow-runs/{id}`.
|
|
119
|
+
* Includes retry logic (3 attempts with exponential backoff) to handle eventual
|
|
120
|
+
* consistency between gRPC (used by queue) and REST API (used here).
|
|
121
|
+
*
|
|
122
|
+
* The payload is double-nested in Hatchet's response: `run.input.input` contains
|
|
123
|
+
* the actual payload we passed to `queue()`.
|
|
124
|
+
*
|
|
125
|
+
* @param id - The Hatchet workflow run ID returned by `queue()`.
|
|
126
|
+
* @returns Full task status including payload, status, and metadata.
|
|
127
|
+
* @throws Error if the run cannot be found after all retry attempts.
|
|
65
128
|
*/
|
|
66
129
|
async getStatus(id) {
|
|
67
|
-
const
|
|
68
|
-
|
|
69
|
-
const
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
130
|
+
const hatchet = this.getHatchetClient();
|
|
131
|
+
const maxRetries = 3;
|
|
132
|
+
const retryDelay = 200; // ms, with exponential backoff
|
|
133
|
+
for (let attempt = 0; attempt < maxRetries; attempt++) {
|
|
134
|
+
try {
|
|
135
|
+
const details = await hatchet.runs.get(id);
|
|
136
|
+
const run = details.run;
|
|
137
|
+
const taskName = run?.displayName?.split('-')[0] || '';
|
|
138
|
+
// Payload is double-nested: run.input.input contains actual payload
|
|
139
|
+
const inputWrapper = run?.input;
|
|
140
|
+
const payload = inputWrapper?.input || {};
|
|
141
|
+
return {
|
|
142
|
+
id,
|
|
143
|
+
attempts: 0,
|
|
144
|
+
payload,
|
|
145
|
+
status: run?.status,
|
|
146
|
+
created: run?.metadata?.createdAt || new Date().toISOString(),
|
|
147
|
+
name: taskName,
|
|
148
|
+
nextRun: null,
|
|
149
|
+
nextRunMinutes: null,
|
|
150
|
+
output: run?.output
|
|
151
|
+
};
|
|
152
|
+
}
|
|
153
|
+
catch (error) {
|
|
154
|
+
// Retry on 404 - eventual consistency between gRPC and REST API
|
|
155
|
+
if (error?.response?.status === 404 && attempt < maxRetries - 1) {
|
|
156
|
+
await new Promise(r => setTimeout(r, retryDelay * (attempt + 1)));
|
|
157
|
+
continue;
|
|
158
|
+
}
|
|
159
|
+
throw error;
|
|
160
|
+
}
|
|
161
|
+
}
|
|
162
|
+
throw new Error(`Failed to get status for ${id} after ${maxRetries} attempts`);
|
|
82
163
|
}
|
|
83
164
|
/**
|
|
84
|
-
* Queues a task to be run in the background.
|
|
85
|
-
*
|
|
86
|
-
*
|
|
87
|
-
*
|
|
88
|
-
*
|
|
89
|
-
*
|
|
165
|
+
* Queues a task to be run in the background (fire-and-forget).
|
|
166
|
+
*
|
|
167
|
+
* Uses `admin.runWorkflow()` directly instead of `task.runNoWait()` to avoid
|
|
168
|
+
* the global `parentRunContextManager` state that causes race conditions
|
|
169
|
+
* when queuing tasks rapidly. See class documentation for details.
|
|
170
|
+
*
|
|
171
|
+
* @param params.taskName - Name of the task (must match a registered workflow).
|
|
172
|
+
* @param params.taskBody - The payload to pass to the task handler.
|
|
173
|
+
* @returns Task status with unique run ID. Does NOT wait for task completion.
|
|
90
174
|
*/
|
|
91
175
|
async queue(params) {
|
|
92
|
-
const hatchet = this.getHatchetClient()
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
fn: params.handle.bind(this)
|
|
96
|
-
});
|
|
97
|
-
const result = await hatchet.runNoWait(params.taskBody);
|
|
98
|
-
const taskId = await result.runId;
|
|
99
|
-
const now = new Date().toISOString();
|
|
176
|
+
const hatchet = this.getHatchetClient();
|
|
177
|
+
const ref = await hatchet.admin.runWorkflow(params.taskName, params.taskBody, {});
|
|
178
|
+
const runId = await ref.getWorkflowRunId();
|
|
100
179
|
return {
|
|
101
|
-
id:
|
|
102
|
-
name:
|
|
180
|
+
id: runId,
|
|
181
|
+
name: ref._standaloneTaskName || params.taskName,
|
|
103
182
|
output: '',
|
|
104
183
|
attempts: 0,
|
|
105
184
|
status: 'QUEUED',
|
|
106
|
-
created:
|
|
185
|
+
created: new Date().toISOString(),
|
|
107
186
|
nextRun: null,
|
|
108
187
|
nextRunMinutes: null
|
|
109
188
|
};
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"HatchetConnector.js","sourceRoot":"","sources":["../src/HatchetConnector.ts"],"names":[],"mappings":";;;;AAAA,gDAA6C;
|
|
1
|
+
{"version":3,"file":"HatchetConnector.js","sourceRoot":"","sources":["../src/HatchetConnector.ts"],"names":[],"mappings":";;;;AAAA,gDAA6C;AAO7C,gEAAqD;AAErD,kCAAkC;AAClC,MAAM,iBAAiB,GAAG,gBAAgB,CAAA;AAC1C,MAAM,eAAe,GAAG,uBAAuB,CAAA;AAC/C,MAAM,iBAAiB,GAAG,MAAM,CAAA;AAChC,MAAM,yBAAyB,GAAG,sCAAsC,CAAA;AAsFxE,MAAa,gBAAgB;IACV,KAAK,CAAQ;IACb,WAAW,CAAQ;IACnB,MAAM,CAAQ;IACd,QAAQ,CAA6C;IACrD,eAAe,CAAQ;IACvB,SAAS,CAAS;IAClB,MAAM,CAAwB;IAE/C,8DAA8D;IACtD,mBAAmB,GAAqB,IAAI,GAAG,EAAE,CAAA;IAEzD;;;OAGG;IACH,IAAW,QAAQ;QACjB,OAAO,IAAI,CAAC,SAAS,CAAA;IACvB,CAAC;IAED;;;OAGG;IACH,IAAW,SAAS;QAClB,OAAO,IAAI,CAAC,SAAS,IAAI,EAAE,CAAA;IAC7B,CAAC;IAED,YAAY,MAA8B;QACxC,IAAI,CAAC,MAAM,GAAG,MAAM,CAAA;QACpB,IAAI,CAAC,KAAK,GAAG,MAAM,CAAC,KAAK,IAAI,EAAE,CAAA;QAC/B,IAAI,CAAC,WAAW,GAAG,MAAM,CAAC,WAAW,IAAI,iBAAiB,CAAA;QAC1D,IAAI,CAAC,MAAM,GAAG,MAAM,CAAC,MAAM,IAAI,eAAe,CAAA;QAC9C,IAAI,CAAC,QAAQ,GAAG,MAAM,CAAC,QAAQ,IAAI,iBAAiB,CAAA;QACpD,IAAI,CAAC,eAAe,GAAG,MAAM,CAAC,eAAe,IAAI,EAAE,CAAA;QACnD,IAAI,CAAC,SAAS,GAAG,MAAM,CAAC,QAAQ,CAAA;IAClC,CAAC;IAED;;;;;;;;;;;;;;;;OAgBG;IACH,SAAS,CACP,QAAgB,EAChB,YAAgC;QAEhC,OAAO,IAAI,gBAAgB,CAAC;YAC1B,GAAG,IAAI,CAAC,MAAM;YACd,QAAQ;SACT,CAAC,CAAA;IACJ,CAAC;IAED;;;;;;OAMG;IAEI,gBAAgB;QACrB,MAAM,OAAO,GAAG,wBAAO,CAAC,IAAI,CAAC;YAC3B,KAAK,EAAE,IAAI,CAAC,KAAK;YACjB,SAAS,EAAE,IAAI,CAAC,WAAW;YAC3B,OAAO,EAAE,IAAI,CAAC,MAAM;YACpB,SAAS,EAAE,IAAI,CAAC,QAAQ;YACxB,yDAAyD;YACzD,SAAS,EAAE,IAAI,CAAC,eAAe,IAAI,yBAAyB;YAC5D,uDAAuD;YACvD,sDAAsD;YACtD,SAAS,EAAE,IAAI,CAAC,SAAS,IAAI,EAAE;YAC/B,UAAU,EAAE;gBACV,YAAY,EAAE,MAAM;aACrB;SACF,CAAC,CAAA;QAEF,OAAO,OAAO,CAAA;IAChB,CAAC;IAED,cAAc,CAAC,IAAiB;QAC9B,MAAM,WAAW,GAAG,IAAI,CAAC,gBAAgB,EAAE,CAAC,IAAI,CAAC;YAC/C,IAAI,EAAE,IAAI,CAAC,QAAQ;YACnB,OAAO,EAAE,IAAI,CAAC,OAAO,IAAI,CAAC;YAC1B,EAAE,EAAE,IAAI,CAAC,MAAM,CAAC,IAAI,CAAC,IAAI,CAAC;SAC3B,CAAC,CAAA;QAEF,IAAI,CAAC,mBAAmB,CAAC,GAAG,CAAC,IAAI,CAAC,QAAQ,EAAE,WAAW,CAAC,CAAA;QACxD,OAAO,WAAW,CAAA;IACpB,CAAC;IAED,KAAK,CAAC,WAAW,CAAC,EAChB,UAAU,EACV,KAAK,EACL,KAAK,GAAG,GAAG,EAKZ;QACC,iDAAiD;QACjD,MAAM,SAAS,GAAG,KAAK,CAAC,GAAG,CAAC,IAAI,CAAC,EAAE,CAAC,IAAI,CAAC,cAAc,CAAC,IAAI,CAAC,CAAC,CAAA;QAE9D,MAAM,MAAM,GAAG,MAAM,IAAI,CAAC,gBAAgB,EAAE,CAAC,MAAM,CACjD,GAAG,UAAU,IAAI,cAAG,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,EAChC;YACE,uDAAuD;YACvD,SAAS;YACT,sEAAsE;YACtE,KAAK;SACN,CACF,CAAA;QAED,KAAK,MAAM,CAAC,KAAK,EAAE,CAAA;QACnB,qCAAqC;QACrC,MAAM,IAAI,OAAO,CAAC,OAAO,CAAC,EAAE,CAAC,UAAU,CAAC,OAAO,EAAE,IAAI,CAAC,CAAC,CAAA;QACvD,OAAO,MAAM,CAAA;IACf,CAAC;IAED;;;;;;;;;;;;;OAaG;IACH,KAAK,CAAC,SAAS,CAAC,EAAU;QACxB,MAAM,OAAO,GAAG,IAAI,CAAC,gBAAgB,EAAE,CAAA;QACvC,MAAM,UAAU,GAAG,CAAC,CAAA;QACpB,MAAM,UAAU,GAAG,GAAG,CAAA,CAAC,+BAA+B;QAEtD,KAAK,IAAI,OAAO,GAAG,CAAC,EAAE,OAAO,GAAG,UAAU,EAAE,OAAO,EAAE,EAAE;YACrD,IAAI;gBACF,MAAM,OAAO,GAAG,MAAM,OAAO,CAAC,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,CAAA;gBAC1C,MAAM,GAAG,GAAG,OAAO,CAAC,GAAG,CAAA;gBAEvB,MAAM,QAAQ,GAAG,GAAG,EAAE,WAAW,EAAE,KAAK,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,IAAI,EAAE,CAAA;gBACtD,oEAAoE;gBACpE,MAAM,YAAY,GAAG,GAAG,EAAE,KAAY,CAAA;gBACtC,MAAM,OAAO,GAAG,YAAY,EAAE,KAAK,IAAI,EAAE,CAAA;gBAEzC,OAAO;oBACL,EAAE;oBACF,QAAQ,EAAE,CAAC;oBACX,OAAO;oBACP,MAAM,EAAE,GAAG,EAAE,MAAa;oBAC1B,OAAO,EAAE,GAAG,EAAE,QAAQ,EAAE,SAAS,IAAI,IAAI,IAAI,EAAE,CAAC,WAAW,EAAE;oBAC7D,IAAI,EAAE,QAAQ;oBACd,OAAO,EAAE,IAAI;oBACb,cAAc,EAAE,IAAI;oBACpB,MAAM,EAAE,GAAG,EAAE,MAAa;iBAC3B,CAAA;aACF;YAAC,OAAO,KAAU,EAAE;gBACnB,gEAAgE;gBAChE,IAAI,KAAK,EAAE,QAAQ,EAAE,MAAM,KAAK,GAAG,IAAI,OAAO,GAAG,UAAU,GAAG,CAAC,EAAE;oBAC/D,MAAM,IAAI,OAAO,CAAC,CAAC,CAAC,EAAE,CAAC,UAAU,CAAC,CAAC,EAAE,UAAU,GAAG,CAAC,OAAO,GAAG,CAAC,CAAC,CAAC,CAAC,CAAA;oBACjE,SAAQ;iBACT;gBACD,MAAM,KAAK,CAAA;aACZ;SACF;QAED,MAAM,IAAI,KAAK,CACb,4BAA4B,EAAE,UAAU,UAAU,WAAW,CAC9D,CAAA;IACH,CAAC;IAED;;;;;;;;;;OAUG;IACH,KAAK,CAAC,KAAK,CAAC,MAAW;QACrB,MAAM,OAAO,GAAG,IAAI,CAAC,gBAAgB,EAAE,CAAA;QACvC,MAAM,GAAG,GAAG,MAAM,OAAO,CAAC,KAAK,CAAC,WAAW,CACzC,MAAM,CAAC,QAAQ,EACf,MAAM,CAAC,QAAQ,EACf,EAAE,CACH,CAAA;QACD,MAAM,KAAK,GAAG,MAAM,GAAG,CAAC,gBAAgB,EAAE,CAAA;QAE1C,OAAO;YACL,EAAE,EAAE,KAAK;YACT,IAAI,EAAE,GAAG,CAAC,mBAAmB,IAAI,MAAM,CAAC,QAAQ;YAChD,MAAM,EAAE,EAAE;YACV,QAAQ,EAAE,CAAC;YACX,MAAM,EAAE,QAAQ;YAChB,OAAO,EAAE,IAAI,IAAI,EAAE,CAAC,WAAW,EAAE;YACjC,OAAO,EAAE,IAAI;YACb,cAAc,EAAE,IAAI;SACrB,CAAA;IACH,CAAC;CACF;AAjJC;IAAC,eAAI,CAAC,UAAU,EAAE;;;;wDAkBjB;AA1FH,4CAyNC"}
|
|
@@ -0,0 +1,227 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* Hatchet Benchmark Script
|
|
4
|
+
*
|
|
5
|
+
* Run with: npx tsx src/benchmark.ts
|
|
6
|
+
*
|
|
7
|
+
* Uses testcontainers to spin up Hatchet + Postgres automatically.
|
|
8
|
+
* Total runtime: ~45-60 seconds
|
|
9
|
+
*/
|
|
10
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
11
|
+
const tasks_core_1 = require("@goatlab/tasks-core");
|
|
12
|
+
const testcontainers_1 = require("testcontainers");
|
|
13
|
+
const HatchetConnector_js_1 = require("./HatchetConnector.js");
|
|
14
|
+
const hatchet_js_1 = require("./test/hatchet.js");
|
|
15
|
+
const postgres_js_1 = require("./test/postgres.js");
|
|
16
|
+
// Configuration - keep benchmarks short
|
|
17
|
+
const QUEUE_BATCH_SIZE = 50; // Smaller for Hatchet gRPC
|
|
18
|
+
const WARMUP_COUNT = 20;
|
|
19
|
+
const BENCHMARK_DURATION_MS = 5000; // 5 seconds
|
|
20
|
+
const E2E_TASK_COUNT = 200;
|
|
21
|
+
const LATENCY_SAMPLES = 20;
|
|
22
|
+
class BenchmarkTask extends tasks_core_1.ShouldQueue {
|
|
23
|
+
postUrl = 'http://localhost/benchmark';
|
|
24
|
+
taskName = 'benchmark_task';
|
|
25
|
+
constructor(connector) {
|
|
26
|
+
super({ connector });
|
|
27
|
+
}
|
|
28
|
+
async handle() {
|
|
29
|
+
return undefined;
|
|
30
|
+
}
|
|
31
|
+
}
|
|
32
|
+
async function runQueueBenchmark(connector, task) {
|
|
33
|
+
console.log('\n📊 Queue Throughput (queue-only)');
|
|
34
|
+
console.log('='.repeat(50));
|
|
35
|
+
// Start worker (required for Hatchet)
|
|
36
|
+
await connector.startWorker({
|
|
37
|
+
tasks: [task],
|
|
38
|
+
workerName: 'benchmark-worker',
|
|
39
|
+
slots: 100
|
|
40
|
+
});
|
|
41
|
+
await new Promise(r => setTimeout(r, 2000));
|
|
42
|
+
// Warmup
|
|
43
|
+
for (let i = 0; i < WARMUP_COUNT; i++) {
|
|
44
|
+
await task.queue({ index: i });
|
|
45
|
+
}
|
|
46
|
+
console.log(`Running for ${BENCHMARK_DURATION_MS / 1000}s...`);
|
|
47
|
+
let count = 0;
|
|
48
|
+
const startTime = Date.now();
|
|
49
|
+
const endTime = startTime + BENCHMARK_DURATION_MS;
|
|
50
|
+
while (Date.now() < endTime) {
|
|
51
|
+
const promises = [];
|
|
52
|
+
for (let i = 0; i < QUEUE_BATCH_SIZE; i++) {
|
|
53
|
+
promises.push(task.queue({ index: count++ }));
|
|
54
|
+
}
|
|
55
|
+
await Promise.all(promises);
|
|
56
|
+
}
|
|
57
|
+
const elapsed = Date.now() - startTime;
|
|
58
|
+
const throughput = (count / elapsed) * 1000;
|
|
59
|
+
console.log(`✅ Queued: ${count.toLocaleString()} tasks`);
|
|
60
|
+
console.log(`✅ Throughput: ${throughput.toFixed(0)} tasks/sec`);
|
|
61
|
+
return throughput;
|
|
62
|
+
}
|
|
63
|
+
async function runE2EBenchmark(connector, task) {
|
|
64
|
+
console.log('\n📊 End-to-End (queue + worker)');
|
|
65
|
+
console.log('='.repeat(50));
|
|
66
|
+
await connector.startWorker({
|
|
67
|
+
tasks: [task],
|
|
68
|
+
workerName: 'e2e-worker',
|
|
69
|
+
slots: 100
|
|
70
|
+
});
|
|
71
|
+
await new Promise(r => setTimeout(r, 2000));
|
|
72
|
+
console.log(`Queuing ${E2E_TASK_COUNT} tasks...`);
|
|
73
|
+
const start = Date.now();
|
|
74
|
+
const taskIds = [];
|
|
75
|
+
// Queue tasks
|
|
76
|
+
for (let i = 0; i < E2E_TASK_COUNT; i += QUEUE_BATCH_SIZE) {
|
|
77
|
+
const batch = [];
|
|
78
|
+
for (let j = 0; j < QUEUE_BATCH_SIZE && i + j < E2E_TASK_COUNT; j++) {
|
|
79
|
+
batch.push(task.queue({ index: i + j }));
|
|
80
|
+
}
|
|
81
|
+
const results = await Promise.all(batch);
|
|
82
|
+
taskIds.push(...results.map(r => r.id));
|
|
83
|
+
}
|
|
84
|
+
const queueTime = Date.now() - start;
|
|
85
|
+
// Wait for completion by sampling (max 20s)
|
|
86
|
+
const timeout = 20000;
|
|
87
|
+
let completed = 0;
|
|
88
|
+
const waitStart = Date.now();
|
|
89
|
+
while (completed < E2E_TASK_COUNT && Date.now() - waitStart < timeout) {
|
|
90
|
+
const sampleSize = Math.min(10, taskIds.length);
|
|
91
|
+
let sampleCompleted = 0;
|
|
92
|
+
for (let i = 0; i < sampleSize; i++) {
|
|
93
|
+
try {
|
|
94
|
+
const status = await task.getStatus(taskIds[i]);
|
|
95
|
+
if (status.status === 'COMPLETED' || status.status === 'FAILED') {
|
|
96
|
+
sampleCompleted++;
|
|
97
|
+
}
|
|
98
|
+
}
|
|
99
|
+
catch {
|
|
100
|
+
// ignore
|
|
101
|
+
}
|
|
102
|
+
}
|
|
103
|
+
completed = Math.floor((sampleCompleted / sampleSize) * E2E_TASK_COUNT);
|
|
104
|
+
if (completed < E2E_TASK_COUNT) {
|
|
105
|
+
await new Promise(r => setTimeout(r, 500));
|
|
106
|
+
}
|
|
107
|
+
}
|
|
108
|
+
const totalTime = Date.now() - start;
|
|
109
|
+
const throughput = (completed / totalTime) * 1000;
|
|
110
|
+
console.log(`✅ Processed: ~${completed.toLocaleString()} tasks`);
|
|
111
|
+
console.log(`✅ Queue time: ${queueTime}ms`);
|
|
112
|
+
console.log(`✅ Total time: ${totalTime}ms`);
|
|
113
|
+
console.log(`✅ Throughput: ~${throughput.toFixed(0)} tasks/sec`);
|
|
114
|
+
return throughput;
|
|
115
|
+
}
|
|
116
|
+
async function runLatencyBenchmark(connector, task) {
|
|
117
|
+
console.log('\n📊 Latency (round-trip)');
|
|
118
|
+
console.log('='.repeat(50));
|
|
119
|
+
await connector.startWorker({
|
|
120
|
+
tasks: [task],
|
|
121
|
+
workerName: 'latency-worker',
|
|
122
|
+
slots: 10
|
|
123
|
+
});
|
|
124
|
+
await new Promise(r => setTimeout(r, 2000));
|
|
125
|
+
const latencies = [];
|
|
126
|
+
for (let i = 0; i < LATENCY_SAMPLES; i++) {
|
|
127
|
+
const start = Date.now();
|
|
128
|
+
const status = await task.queue({ index: i });
|
|
129
|
+
// Poll for completion (max 5s per task)
|
|
130
|
+
const timeout = Date.now() + 5000;
|
|
131
|
+
while (Date.now() < timeout) {
|
|
132
|
+
try {
|
|
133
|
+
const currentStatus = await task.getStatus(status.id);
|
|
134
|
+
if (currentStatus.status === 'COMPLETED' ||
|
|
135
|
+
currentStatus.status === 'FAILED') {
|
|
136
|
+
latencies.push(Date.now() - start);
|
|
137
|
+
break;
|
|
138
|
+
}
|
|
139
|
+
}
|
|
140
|
+
catch {
|
|
141
|
+
// ignore
|
|
142
|
+
}
|
|
143
|
+
await new Promise(r => setTimeout(r, 50));
|
|
144
|
+
}
|
|
145
|
+
}
|
|
146
|
+
if (latencies.length === 0) {
|
|
147
|
+
console.log('❌ No tasks completed');
|
|
148
|
+
return { avg: 0, p50: 0, p95: 0, p99: 0 };
|
|
149
|
+
}
|
|
150
|
+
latencies.sort((a, b) => a - b);
|
|
151
|
+
const avg = latencies.reduce((a, b) => a + b, 0) / latencies.length;
|
|
152
|
+
const p50 = latencies[Math.floor(latencies.length * 0.5)];
|
|
153
|
+
const p95 = latencies[Math.floor(latencies.length * 0.95)];
|
|
154
|
+
const p99 = latencies[Math.floor(latencies.length * 0.99)];
|
|
155
|
+
console.log(`✅ Samples: ${latencies.length}`);
|
|
156
|
+
console.log(`✅ Avg: ${avg.toFixed(1)}ms | P50: ${p50}ms | P95: ${p95}ms | P99: ${p99}ms`);
|
|
157
|
+
return { avg, p50, p95, p99 };
|
|
158
|
+
}
|
|
159
|
+
async function main() {
|
|
160
|
+
console.log('🚀 Hatchet Benchmark');
|
|
161
|
+
console.log('Starting containers (Postgres + Hatchet)...');
|
|
162
|
+
const network = await new testcontainers_1.Network().start();
|
|
163
|
+
const postgresContainer = await (0, postgres_js_1.getPostgres)({ network }).start();
|
|
164
|
+
const connection = {
|
|
165
|
+
host: postgresContainer.getName().replace('/', ''),
|
|
166
|
+
port: postgresContainer.getMappedPort(5432),
|
|
167
|
+
database: postgresContainer.getDatabase(),
|
|
168
|
+
user: postgresContainer.getUsername(),
|
|
169
|
+
password: postgresContainer.getPassword()
|
|
170
|
+
};
|
|
171
|
+
const postgresUri = `postgresql://${connection.user}:${connection.password}@db:5432/${connection.database}`;
|
|
172
|
+
const hatchetContainer = await (0, hatchet_js_1.getHatchetContainer)({
|
|
173
|
+
postgresConnectionString: postgresUri,
|
|
174
|
+
network: network
|
|
175
|
+
}).start();
|
|
176
|
+
const cmd = await hatchetContainer.exec('/hatchet-admin token create --config /config --tenant-id 707d0855-80ab-4e1f-a156-f1c4546cbf52 | xargs');
|
|
177
|
+
const token = cmd.stdout.trim();
|
|
178
|
+
const hostAndPort = `localhost:${hatchetContainer.getMappedPort(7077)}`;
|
|
179
|
+
const apiUrl = `http://localhost:${hatchetContainer.getMappedPort(8888)}`;
|
|
180
|
+
console.log(`Hatchet gRPC: ${hostAndPort}`);
|
|
181
|
+
console.log(`Hatchet API: ${apiUrl}`);
|
|
182
|
+
try {
|
|
183
|
+
// Queue throughput test
|
|
184
|
+
const connector1 = new HatchetConnector_js_1.HatchetConnector({
|
|
185
|
+
token,
|
|
186
|
+
hostAndPort,
|
|
187
|
+
apiUrl,
|
|
188
|
+
logLevel: 'WARN'
|
|
189
|
+
});
|
|
190
|
+
const task1 = new BenchmarkTask(connector1);
|
|
191
|
+
const queueThroughput = await runQueueBenchmark(connector1, task1);
|
|
192
|
+
// E2E test
|
|
193
|
+
const connector2 = new HatchetConnector_js_1.HatchetConnector({
|
|
194
|
+
token,
|
|
195
|
+
hostAndPort,
|
|
196
|
+
apiUrl,
|
|
197
|
+
logLevel: 'WARN'
|
|
198
|
+
});
|
|
199
|
+
const task2 = new BenchmarkTask(connector2);
|
|
200
|
+
const e2eThroughput = await runE2EBenchmark(connector2, task2);
|
|
201
|
+
// Latency test
|
|
202
|
+
const connector3 = new HatchetConnector_js_1.HatchetConnector({
|
|
203
|
+
token,
|
|
204
|
+
hostAndPort,
|
|
205
|
+
apiUrl,
|
|
206
|
+
logLevel: 'WARN'
|
|
207
|
+
});
|
|
208
|
+
const task3 = new BenchmarkTask(connector3);
|
|
209
|
+
const latency = await runLatencyBenchmark(connector3, task3);
|
|
210
|
+
// Summary
|
|
211
|
+
console.log('\n' + '='.repeat(50));
|
|
212
|
+
console.log('📈 HATCHET SUMMARY');
|
|
213
|
+
console.log('='.repeat(50));
|
|
214
|
+
console.log(`Queue throughput: ${queueThroughput.toFixed(0)} tasks/sec`);
|
|
215
|
+
console.log(`E2E throughput: ~${e2eThroughput.toFixed(0)} tasks/sec`);
|
|
216
|
+
console.log(`Latency (avg): ${latency.avg.toFixed(1)}ms`);
|
|
217
|
+
console.log(`Latency (p95): ${latency.p95}ms`);
|
|
218
|
+
}
|
|
219
|
+
finally {
|
|
220
|
+
console.log('\nStopping containers...');
|
|
221
|
+
await hatchetContainer.stop();
|
|
222
|
+
await postgresContainer.stop();
|
|
223
|
+
await network.stop();
|
|
224
|
+
}
|
|
225
|
+
}
|
|
226
|
+
main().catch(console.error);
|
|
227
|
+
//# sourceMappingURL=benchmark.js.map
|