@lobu/worker 3.0.9 → 3.0.13
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/openclaw/session-context.d.ts.map +1 -1
- package/dist/openclaw/session-context.js +1 -1
- package/dist/openclaw/session-context.js.map +1 -1
- package/package.json +10 -9
- package/USAGE.md +0 -120
- package/docs/custom-base-image.md +0 -88
- package/scripts/worker-entrypoint.sh +0 -184
- package/src/__tests__/audio-provider-suggestions.test.ts +0 -198
- package/src/__tests__/embedded-just-bash-bootstrap.test.ts +0 -39
- package/src/__tests__/embedded-tools.test.ts +0 -558
- package/src/__tests__/instructions.test.ts +0 -59
- package/src/__tests__/memory-flush-runtime.test.ts +0 -138
- package/src/__tests__/memory-flush.test.ts +0 -64
- package/src/__tests__/model-resolver.test.ts +0 -156
- package/src/__tests__/processor.test.ts +0 -225
- package/src/__tests__/setup.ts +0 -109
- package/src/__tests__/sse-client.test.ts +0 -48
- package/src/__tests__/tool-policy.test.ts +0 -269
- package/src/__tests__/worker.test.ts +0 -89
- package/src/core/error-handler.ts +0 -70
- package/src/core/project-scanner.ts +0 -65
- package/src/core/types.ts +0 -125
- package/src/core/url-utils.ts +0 -9
- package/src/core/workspace.ts +0 -138
- package/src/embedded/just-bash-bootstrap.ts +0 -228
- package/src/gateway/gateway-integration.ts +0 -287
- package/src/gateway/message-batcher.ts +0 -128
- package/src/gateway/sse-client.ts +0 -955
- package/src/gateway/types.ts +0 -68
- package/src/index.ts +0 -144
- package/src/instructions/builder.ts +0 -80
- package/src/instructions/providers.ts +0 -27
- package/src/modules/lifecycle.ts +0 -92
- package/src/openclaw/custom-tools.ts +0 -290
- package/src/openclaw/instructions.ts +0 -38
- package/src/openclaw/model-resolver.ts +0 -150
- package/src/openclaw/plugin-loader.ts +0 -427
- package/src/openclaw/processor.ts +0 -216
- package/src/openclaw/session-context.ts +0 -277
- package/src/openclaw/tool-policy.ts +0 -212
- package/src/openclaw/tools.ts +0 -208
- package/src/openclaw/worker.ts +0 -1792
- package/src/server.ts +0 -329
- package/src/shared/audio-provider-suggestions.ts +0 -132
- package/src/shared/processor-utils.ts +0 -33
- package/src/shared/provider-auth-hints.ts +0 -64
- package/src/shared/tool-display-config.ts +0 -75
- package/src/shared/tool-implementations.ts +0 -768
- package/tsconfig.json +0 -21
|
@@ -1,955 +0,0 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* SSE client for receiving jobs from dispatcher
|
|
3
|
-
*/
|
|
4
|
-
|
|
5
|
-
import { spawn } from "node:child_process";
|
|
6
|
-
import {
|
|
7
|
-
createChildSpan,
|
|
8
|
-
createLogger,
|
|
9
|
-
extractTraceId,
|
|
10
|
-
flushTracing,
|
|
11
|
-
SpanStatusCode,
|
|
12
|
-
} from "@lobu/core";
|
|
13
|
-
import { z } from "zod";
|
|
14
|
-
import type { WorkerConfig, WorkerExecutor } from "../core/types";
|
|
15
|
-
import { HttpWorkerTransport } from "./gateway-integration";
|
|
16
|
-
import { MessageBatcher } from "./message-batcher";
|
|
17
|
-
import type { MessagePayload, QueuedMessage } from "./types";
|
|
18
|
-
|
|
19
|
-
const logger = createLogger("sse-client");
|
|
20
|
-
|
|
21
|
-
type AbortControllerLike = {
|
|
22
|
-
abort(): void;
|
|
23
|
-
readonly signal: AbortSignal;
|
|
24
|
-
};
|
|
25
|
-
|
|
26
|
-
// --- Pending config change notifications ---
|
|
27
|
-
|
|
28
|
-
interface ConfigChangeEntry {
|
|
29
|
-
category: string;
|
|
30
|
-
action: string;
|
|
31
|
-
summary: string;
|
|
32
|
-
details?: string[];
|
|
33
|
-
}
|
|
34
|
-
|
|
35
|
-
const pendingConfigNotifications: ConfigChangeEntry[] = [];
|
|
36
|
-
|
|
37
|
-
/**
|
|
38
|
-
* Returns and clears all pending config change notifications.
|
|
39
|
-
* Called by the worker before building the next prompt.
|
|
40
|
-
*/
|
|
41
|
-
export function consumePendingConfigNotifications(): ConfigChangeEntry[] {
|
|
42
|
-
if (pendingConfigNotifications.length === 0) return [];
|
|
43
|
-
return pendingConfigNotifications.splice(0);
|
|
44
|
-
}
|
|
45
|
-
|
|
46
|
-
// Zod schemas for runtime validation of SSE event data
|
|
47
|
-
const ConnectedEventSchema = z.object({
|
|
48
|
-
deploymentName: z.string(),
|
|
49
|
-
});
|
|
50
|
-
|
|
51
|
-
// PlatformMetadata has known fields plus string index signature
|
|
52
|
-
const PlatformMetadataSchema = z
|
|
53
|
-
.object({
|
|
54
|
-
team_id: z.string().optional(),
|
|
55
|
-
channel: z.string().optional(),
|
|
56
|
-
ts: z.string().optional(),
|
|
57
|
-
thread_ts: z.string().optional(),
|
|
58
|
-
files: z.array(z.any()).optional(),
|
|
59
|
-
})
|
|
60
|
-
.and(
|
|
61
|
-
z.record(
|
|
62
|
-
z.string(),
|
|
63
|
-
z.union([
|
|
64
|
-
z.string(),
|
|
65
|
-
z.number(),
|
|
66
|
-
z.boolean(),
|
|
67
|
-
z.array(z.any()),
|
|
68
|
-
z.undefined(),
|
|
69
|
-
])
|
|
70
|
-
)
|
|
71
|
-
);
|
|
72
|
-
|
|
73
|
-
// AgentOptions has known fields plus arbitrary extra fields (including nested objects)
|
|
74
|
-
const AgentOptionsSchema = z
|
|
75
|
-
.object({
|
|
76
|
-
runtime: z.string().optional(),
|
|
77
|
-
model: z.string().optional(),
|
|
78
|
-
maxTokens: z.number().optional(),
|
|
79
|
-
temperature: z.number().optional(),
|
|
80
|
-
allowedTools: z.union([z.string(), z.array(z.string())]).optional(),
|
|
81
|
-
disallowedTools: z.union([z.string(), z.array(z.string())]).optional(),
|
|
82
|
-
timeoutMinutes: z.union([z.number(), z.string()]).optional(),
|
|
83
|
-
// Additional settings passed through from gateway
|
|
84
|
-
networkConfig: z.any().optional(),
|
|
85
|
-
envVars: z.any().optional(),
|
|
86
|
-
})
|
|
87
|
-
.passthrough();
|
|
88
|
-
|
|
89
|
-
const JobEventSchema = z.object({
|
|
90
|
-
payload: z.object({
|
|
91
|
-
botId: z.string(),
|
|
92
|
-
userId: z.string(),
|
|
93
|
-
agentId: z.string(),
|
|
94
|
-
conversationId: z.string(),
|
|
95
|
-
platform: z.string(),
|
|
96
|
-
channelId: z.string(),
|
|
97
|
-
messageId: z.string(),
|
|
98
|
-
messageText: z.string(),
|
|
99
|
-
platformMetadata: PlatformMetadataSchema,
|
|
100
|
-
agentOptions: AgentOptionsSchema,
|
|
101
|
-
jobId: z.string().optional(),
|
|
102
|
-
teamId: z.string().optional(), // Optional for WhatsApp (top-level) and Slack (in platformMetadata)
|
|
103
|
-
}),
|
|
104
|
-
processedIds: z.array(z.string()).optional(),
|
|
105
|
-
});
|
|
106
|
-
|
|
107
|
-
/**
|
|
108
|
-
* Gateway client for workers - connects to dispatcher via SSE
|
|
109
|
-
* Receives jobs via SSE stream, sends responses via HTTP POST
|
|
110
|
-
*/
|
|
111
|
-
export class GatewayClient {
|
|
112
|
-
private dispatcherUrl: string;
|
|
113
|
-
private workerToken: string;
|
|
114
|
-
private userId: string;
|
|
115
|
-
private deploymentName: string;
|
|
116
|
-
private isRunning = false;
|
|
117
|
-
private currentWorker: WorkerExecutor | null = null;
|
|
118
|
-
private abortController?: AbortControllerLike;
|
|
119
|
-
private currentJobId?: string;
|
|
120
|
-
private currentTraceId?: string; // Trace ID for end-to-end observability
|
|
121
|
-
private currentTraceparent?: string; // W3C traceparent for distributed tracing
|
|
122
|
-
private reconnectAttempts = 0;
|
|
123
|
-
private maxReconnectAttempts = 10;
|
|
124
|
-
private messageBatcher: MessageBatcher;
|
|
125
|
-
private eventErrorCount = 0;
|
|
126
|
-
private eventErrorThreshold = 10;
|
|
127
|
-
private httpPort?: number;
|
|
128
|
-
|
|
129
|
-
constructor(
|
|
130
|
-
dispatcherUrl: string,
|
|
131
|
-
workerToken: string,
|
|
132
|
-
userId: string,
|
|
133
|
-
deploymentName: string,
|
|
134
|
-
httpPort?: number
|
|
135
|
-
) {
|
|
136
|
-
this.dispatcherUrl = dispatcherUrl;
|
|
137
|
-
this.workerToken = workerToken;
|
|
138
|
-
this.userId = userId;
|
|
139
|
-
this.deploymentName = deploymentName;
|
|
140
|
-
this.httpPort = httpPort;
|
|
141
|
-
// Get initial traceId from environment (set by deployment)
|
|
142
|
-
this.currentTraceId = process.env.TRACE_ID;
|
|
143
|
-
|
|
144
|
-
this.messageBatcher = new MessageBatcher({
|
|
145
|
-
onBatchReady: async (messages) => {
|
|
146
|
-
await this.processBatchedMessages(messages);
|
|
147
|
-
},
|
|
148
|
-
});
|
|
149
|
-
|
|
150
|
-
logger.info(
|
|
151
|
-
{ traceId: this.currentTraceId, deploymentName },
|
|
152
|
-
"Worker connected"
|
|
153
|
-
);
|
|
154
|
-
}
|
|
155
|
-
|
|
156
|
-
async start(): Promise<void> {
|
|
157
|
-
this.isRunning = true;
|
|
158
|
-
|
|
159
|
-
while (this.isRunning) {
|
|
160
|
-
try {
|
|
161
|
-
await this.connectAndListen();
|
|
162
|
-
if (!this.isRunning) break;
|
|
163
|
-
await this.handleReconnect();
|
|
164
|
-
} catch (error) {
|
|
165
|
-
if (error instanceof Error && error.name === "AbortError") {
|
|
166
|
-
logger.info("SSE connection aborted");
|
|
167
|
-
break;
|
|
168
|
-
}
|
|
169
|
-
logger.error("SSE connection error:", error);
|
|
170
|
-
if (!this.isRunning) break;
|
|
171
|
-
await this.handleReconnect();
|
|
172
|
-
}
|
|
173
|
-
}
|
|
174
|
-
}
|
|
175
|
-
|
|
176
|
-
private async connectAndListen(): Promise<void> {
|
|
177
|
-
// Abort previous controller before creating a new one
|
|
178
|
-
if (this.abortController) {
|
|
179
|
-
this.abortController.abort();
|
|
180
|
-
}
|
|
181
|
-
const abortController =
|
|
182
|
-
new globalThis.AbortController() as AbortControllerLike;
|
|
183
|
-
this.abortController = abortController;
|
|
184
|
-
const streamUrl = this.httpPort
|
|
185
|
-
? `${this.dispatcherUrl}/worker/stream?httpPort=${this.httpPort}`
|
|
186
|
-
: `${this.dispatcherUrl}/worker/stream`;
|
|
187
|
-
|
|
188
|
-
logger.info(
|
|
189
|
-
`Connecting to dispatcher at ${streamUrl} (attempt ${this.reconnectAttempts + 1})`
|
|
190
|
-
);
|
|
191
|
-
|
|
192
|
-
const response = await fetch(streamUrl, {
|
|
193
|
-
method: "GET",
|
|
194
|
-
headers: {
|
|
195
|
-
Authorization: `Bearer ${this.workerToken}`,
|
|
196
|
-
Accept: "text/event-stream",
|
|
197
|
-
},
|
|
198
|
-
signal: abortController.signal,
|
|
199
|
-
});
|
|
200
|
-
|
|
201
|
-
if (!response.ok) {
|
|
202
|
-
throw new Error(
|
|
203
|
-
`Failed to connect to dispatcher: ${response.status} ${response.statusText}`
|
|
204
|
-
);
|
|
205
|
-
}
|
|
206
|
-
|
|
207
|
-
logger.info("✅ Connected to dispatcher via SSE");
|
|
208
|
-
this.reconnectAttempts = 0;
|
|
209
|
-
|
|
210
|
-
const reader = response.body?.getReader();
|
|
211
|
-
const decoder = new TextDecoder();
|
|
212
|
-
|
|
213
|
-
if (!reader) {
|
|
214
|
-
throw new Error("No response body");
|
|
215
|
-
}
|
|
216
|
-
|
|
217
|
-
let buffer = "";
|
|
218
|
-
|
|
219
|
-
logger.info("[SSE-CLIENT] 🔄 Starting SSE stream reading loop");
|
|
220
|
-
|
|
221
|
-
while (this.isRunning) {
|
|
222
|
-
const { done, value } = await reader.read();
|
|
223
|
-
|
|
224
|
-
if (done) {
|
|
225
|
-
logger.info("[SSE-CLIENT] SSE stream ended");
|
|
226
|
-
break;
|
|
227
|
-
}
|
|
228
|
-
|
|
229
|
-
const chunk = decoder.decode(value, { stream: true });
|
|
230
|
-
logger.debug(
|
|
231
|
-
`[SSE-CLIENT] 📨 Received chunk: ${chunk.substring(0, 200)}`
|
|
232
|
-
);
|
|
233
|
-
buffer += chunk;
|
|
234
|
-
|
|
235
|
-
const events = buffer.split("\n\n");
|
|
236
|
-
buffer = events.pop() || "";
|
|
237
|
-
|
|
238
|
-
logger.debug(
|
|
239
|
-
`[SSE-CLIENT] 📊 Parsed ${events.length} events from buffer`
|
|
240
|
-
);
|
|
241
|
-
|
|
242
|
-
for (const event of events) {
|
|
243
|
-
if (!event.trim()) continue;
|
|
244
|
-
|
|
245
|
-
const lines = event.split("\n");
|
|
246
|
-
let eventType = "message";
|
|
247
|
-
let eventData = "";
|
|
248
|
-
|
|
249
|
-
for (const line of lines) {
|
|
250
|
-
if (line.startsWith("event:")) {
|
|
251
|
-
eventType = line.substring(6).trim();
|
|
252
|
-
} else if (line.startsWith("data:")) {
|
|
253
|
-
eventData = line.substring(5).trim();
|
|
254
|
-
}
|
|
255
|
-
}
|
|
256
|
-
|
|
257
|
-
if (eventData) {
|
|
258
|
-
logger.info(`[SSE-CLIENT] 🎯 Processing event type: ${eventType}`);
|
|
259
|
-
// Don't await - fire async to avoid blocking SSE reading loop
|
|
260
|
-
this.handleEvent(eventType, eventData).catch((error) => {
|
|
261
|
-
this.eventErrorCount++;
|
|
262
|
-
logger.error(
|
|
263
|
-
`[SSE-CLIENT] Error handling ${eventType} event (error ${this.eventErrorCount}/${this.eventErrorThreshold}):`,
|
|
264
|
-
error
|
|
265
|
-
);
|
|
266
|
-
|
|
267
|
-
// Trigger cleanup if too many errors
|
|
268
|
-
if (this.eventErrorCount >= this.eventErrorThreshold) {
|
|
269
|
-
logger.error(
|
|
270
|
-
`❌ Event error threshold reached (${this.eventErrorCount} errors). Triggering cleanup...`
|
|
271
|
-
);
|
|
272
|
-
this.cleanupOnEventError(eventType, error).catch((cleanupErr) => {
|
|
273
|
-
logger.error(
|
|
274
|
-
"Failed to cleanup after event errors:",
|
|
275
|
-
cleanupErr
|
|
276
|
-
);
|
|
277
|
-
});
|
|
278
|
-
}
|
|
279
|
-
});
|
|
280
|
-
}
|
|
281
|
-
}
|
|
282
|
-
}
|
|
283
|
-
}
|
|
284
|
-
|
|
285
|
-
/**
|
|
286
|
-
* Send a quick delivery receipt to the gateway confirming job was received.
|
|
287
|
-
* Fire-and-forget — don't block job processing on the receipt send.
|
|
288
|
-
*/
|
|
289
|
-
private sendDeliveryReceipt(jobId: string): void {
|
|
290
|
-
const url = `${this.dispatcherUrl}/worker/response`;
|
|
291
|
-
fetch(url, {
|
|
292
|
-
method: "POST",
|
|
293
|
-
headers: {
|
|
294
|
-
"Content-Type": "application/json",
|
|
295
|
-
Authorization: `Bearer ${this.workerToken}`,
|
|
296
|
-
},
|
|
297
|
-
body: JSON.stringify({ jobId, received: true }),
|
|
298
|
-
signal: AbortSignal.timeout(10_000),
|
|
299
|
-
}).catch((err) => {
|
|
300
|
-
logger.warn(`Failed to send delivery receipt for job ${jobId}:`, err);
|
|
301
|
-
});
|
|
302
|
-
}
|
|
303
|
-
|
|
304
|
-
/**
|
|
305
|
-
* Send a heartbeat ACK back to the gateway so stale cleanup is based on
|
|
306
|
-
* verified inbound worker activity rather than outbound SSE writes.
|
|
307
|
-
*/
|
|
308
|
-
private sendHeartbeatAck(): void {
|
|
309
|
-
const url = `${this.dispatcherUrl}/worker/response`;
|
|
310
|
-
fetch(url, {
|
|
311
|
-
method: "POST",
|
|
312
|
-
headers: {
|
|
313
|
-
"Content-Type": "application/json",
|
|
314
|
-
Authorization: `Bearer ${this.workerToken}`,
|
|
315
|
-
},
|
|
316
|
-
body: JSON.stringify({ received: true, heartbeat: true }),
|
|
317
|
-
signal: AbortSignal.timeout(10_000),
|
|
318
|
-
}).catch((err) => {
|
|
319
|
-
logger.warn("Failed to send heartbeat ACK:", err);
|
|
320
|
-
});
|
|
321
|
-
}
|
|
322
|
-
|
|
323
|
-
private async handleReconnect(): Promise<void> {
|
|
324
|
-
if (this.reconnectAttempts >= this.maxReconnectAttempts) {
|
|
325
|
-
logger.error("Max reconnection attempts reached, giving up");
|
|
326
|
-
this.isRunning = false;
|
|
327
|
-
return;
|
|
328
|
-
}
|
|
329
|
-
|
|
330
|
-
this.reconnectAttempts++;
|
|
331
|
-
const delay = Math.min(1000 * 2 ** (this.reconnectAttempts - 1), 60000);
|
|
332
|
-
|
|
333
|
-
logger.info(
|
|
334
|
-
`Reconnecting in ${delay}ms (attempt ${this.reconnectAttempts}/${this.maxReconnectAttempts})...`
|
|
335
|
-
);
|
|
336
|
-
|
|
337
|
-
await new Promise((resolve) => setTimeout(resolve, delay));
|
|
338
|
-
}
|
|
339
|
-
|
|
340
|
-
async stop(): Promise<void> {
|
|
341
|
-
try {
|
|
342
|
-
this.isRunning = false;
|
|
343
|
-
|
|
344
|
-
if (this.abortController) {
|
|
345
|
-
this.abortController.abort();
|
|
346
|
-
}
|
|
347
|
-
|
|
348
|
-
this.messageBatcher.stop();
|
|
349
|
-
|
|
350
|
-
if (this.currentWorker) {
|
|
351
|
-
await this.currentWorker.cleanup();
|
|
352
|
-
this.currentWorker = null;
|
|
353
|
-
}
|
|
354
|
-
|
|
355
|
-
logger.info("✅ Gateway client stopped");
|
|
356
|
-
} catch (error) {
|
|
357
|
-
logger.error("Error stopping gateway client:", error);
|
|
358
|
-
throw error;
|
|
359
|
-
}
|
|
360
|
-
}
|
|
361
|
-
|
|
362
|
-
private async handleEvent(eventType: string, data: string): Promise<void> {
|
|
363
|
-
try {
|
|
364
|
-
if (eventType === "connected") {
|
|
365
|
-
const parsedData = JSON.parse(data);
|
|
366
|
-
const validationResult = ConnectedEventSchema.safeParse(parsedData);
|
|
367
|
-
|
|
368
|
-
if (!validationResult.success) {
|
|
369
|
-
logger.error(
|
|
370
|
-
"Invalid connected event data:",
|
|
371
|
-
validationResult.error.format()
|
|
372
|
-
);
|
|
373
|
-
throw new Error(
|
|
374
|
-
`Connected event validation failed: ${validationResult.error.message}`
|
|
375
|
-
);
|
|
376
|
-
}
|
|
377
|
-
|
|
378
|
-
const connData = validationResult.data;
|
|
379
|
-
logger.info(
|
|
380
|
-
`Connected to dispatcher for deployment ${connData.deploymentName}`
|
|
381
|
-
);
|
|
382
|
-
return;
|
|
383
|
-
}
|
|
384
|
-
|
|
385
|
-
if (eventType === "ping") {
|
|
386
|
-
logger.debug("Received heartbeat ping from dispatcher");
|
|
387
|
-
this.sendHeartbeatAck();
|
|
388
|
-
return;
|
|
389
|
-
}
|
|
390
|
-
|
|
391
|
-
if (eventType === "config_changed") {
|
|
392
|
-
logger.info(
|
|
393
|
-
"Received config_changed event from gateway, invalidating session context cache"
|
|
394
|
-
);
|
|
395
|
-
const { invalidateSessionContextCache } = await import(
|
|
396
|
-
"../openclaw/session-context"
|
|
397
|
-
);
|
|
398
|
-
invalidateSessionContextCache();
|
|
399
|
-
|
|
400
|
-
// Parse and queue config change notifications for the next prompt
|
|
401
|
-
try {
|
|
402
|
-
const parsed = JSON.parse(data);
|
|
403
|
-
const changes = Array.isArray(parsed?.changes)
|
|
404
|
-
? (parsed.changes as ConfigChangeEntry[])
|
|
405
|
-
: [];
|
|
406
|
-
if (changes.length > 0) {
|
|
407
|
-
pendingConfigNotifications.push(...changes);
|
|
408
|
-
logger.info(
|
|
409
|
-
`Queued ${changes.length} config change notification(s)`
|
|
410
|
-
);
|
|
411
|
-
}
|
|
412
|
-
} catch {
|
|
413
|
-
// Backward compat: old gateway may send empty or invalid payload
|
|
414
|
-
}
|
|
415
|
-
return;
|
|
416
|
-
}
|
|
417
|
-
|
|
418
|
-
if (eventType === "job") {
|
|
419
|
-
try {
|
|
420
|
-
const parsedData = JSON.parse(data);
|
|
421
|
-
const validationResult = JobEventSchema.safeParse(parsedData);
|
|
422
|
-
|
|
423
|
-
if (!validationResult.success) {
|
|
424
|
-
logger.error(
|
|
425
|
-
"Invalid job event data:",
|
|
426
|
-
validationResult.error.format()
|
|
427
|
-
);
|
|
428
|
-
logger.debug(`Raw job data: ${data}`);
|
|
429
|
-
throw new Error(
|
|
430
|
-
`Job event validation failed: ${validationResult.error.message}`
|
|
431
|
-
);
|
|
432
|
-
}
|
|
433
|
-
|
|
434
|
-
// Send delivery receipt immediately so the gateway knows
|
|
435
|
-
// the job was actually received (not lost to a stale SSE connection).
|
|
436
|
-
// jobId is at the top level of the SSE event (set by job-router),
|
|
437
|
-
// not inside the validated payload.
|
|
438
|
-
const jobId = parsedData.jobId as string | undefined;
|
|
439
|
-
if (jobId) {
|
|
440
|
-
this.sendDeliveryReceipt(jobId);
|
|
441
|
-
}
|
|
442
|
-
|
|
443
|
-
// Zod validates structure but passthrough allows extra fields
|
|
444
|
-
// The validated payload matches MessagePayload interface
|
|
445
|
-
await this.handleThreadMessage(validationResult.data.payload);
|
|
446
|
-
} catch (parseError) {
|
|
447
|
-
logger.error(
|
|
448
|
-
`Failed to parse or validate job event data:`,
|
|
449
|
-
parseError
|
|
450
|
-
);
|
|
451
|
-
logger.debug(`Raw job data: ${data}`);
|
|
452
|
-
}
|
|
453
|
-
return;
|
|
454
|
-
}
|
|
455
|
-
|
|
456
|
-
logger.warn(
|
|
457
|
-
`[DEBUG] Unknown SSE event type: ${eventType}, data: ${data}`
|
|
458
|
-
);
|
|
459
|
-
} catch (error) {
|
|
460
|
-
logger.error(`Error handling event ${eventType}:`, error);
|
|
461
|
-
}
|
|
462
|
-
}
|
|
463
|
-
|
|
464
|
-
private async handleThreadMessage(data: MessagePayload): Promise<void> {
|
|
465
|
-
// Extract traceparent for distributed tracing
|
|
466
|
-
// Prefer platformMetadata.traceparent, fall back to TRACEPARENT env var
|
|
467
|
-
const traceparent =
|
|
468
|
-
(data.platformMetadata?.traceparent as string) || process.env.TRACEPARENT;
|
|
469
|
-
this.currentTraceparent = traceparent;
|
|
470
|
-
|
|
471
|
-
// Extract traceId for logging (backwards compatible)
|
|
472
|
-
const traceId =
|
|
473
|
-
extractTraceId(data) || this.currentTraceId || process.env.TRACE_ID;
|
|
474
|
-
this.currentTraceId = traceId;
|
|
475
|
-
|
|
476
|
-
const conversationId = data.conversationId;
|
|
477
|
-
|
|
478
|
-
if (data.jobId) {
|
|
479
|
-
this.currentJobId = data.jobId;
|
|
480
|
-
// Create child span for job received (linked to parent via traceparent)
|
|
481
|
-
const span = createChildSpan("job_received", traceparent, {
|
|
482
|
-
"lobu.job_id": data.jobId,
|
|
483
|
-
"lobu.message_id": data.messageId,
|
|
484
|
-
"lobu.conversation_id": conversationId,
|
|
485
|
-
"lobu.job_type": data.jobType || "message",
|
|
486
|
-
});
|
|
487
|
-
span?.setStatus({ code: SpanStatusCode.OK });
|
|
488
|
-
span?.end();
|
|
489
|
-
// Flush job_received span immediately
|
|
490
|
-
void flushTracing();
|
|
491
|
-
logger.info(
|
|
492
|
-
{
|
|
493
|
-
traceparent,
|
|
494
|
-
traceId,
|
|
495
|
-
jobId: data.jobId,
|
|
496
|
-
messageId: data.messageId,
|
|
497
|
-
jobType: data.jobType,
|
|
498
|
-
},
|
|
499
|
-
"Job received"
|
|
500
|
-
);
|
|
501
|
-
}
|
|
502
|
-
|
|
503
|
-
if (data.userId.toLowerCase() !== this.userId.toLowerCase()) {
|
|
504
|
-
logger.warn(
|
|
505
|
-
{ traceId, receivedUserId: data.userId, expectedUserId: this.userId },
|
|
506
|
-
"Received message for wrong user"
|
|
507
|
-
);
|
|
508
|
-
return;
|
|
509
|
-
}
|
|
510
|
-
|
|
511
|
-
// Check job type and dispatch accordingly
|
|
512
|
-
if (data.jobType === "exec") {
|
|
513
|
-
await this.handleExecJob(data);
|
|
514
|
-
return;
|
|
515
|
-
}
|
|
516
|
-
|
|
517
|
-
// Default: message job
|
|
518
|
-
const queuedMessage: QueuedMessage = {
|
|
519
|
-
payload: data,
|
|
520
|
-
timestamp: Date.now(),
|
|
521
|
-
};
|
|
522
|
-
|
|
523
|
-
await this.messageBatcher.addMessage(queuedMessage);
|
|
524
|
-
logger.info(
|
|
525
|
-
{ traceId, messageId: data.messageId, conversationId },
|
|
526
|
-
"Message queued for processing"
|
|
527
|
-
);
|
|
528
|
-
}
|
|
529
|
-
|
|
530
|
-
/**
|
|
531
|
-
* Handle exec job - spawn command in sandbox and stream output back
|
|
532
|
-
*/
|
|
533
|
-
private async handleExecJob(data: MessagePayload): Promise<void> {
|
|
534
|
-
const { execId, execCommand, execCwd, execEnv, execTimeout } = data;
|
|
535
|
-
const conversationId = data.conversationId;
|
|
536
|
-
const traceId = this.currentTraceId;
|
|
537
|
-
const traceparent = this.currentTraceparent;
|
|
538
|
-
|
|
539
|
-
if (!execId || !execCommand) {
|
|
540
|
-
logger.error(
|
|
541
|
-
{ traceId, execId },
|
|
542
|
-
"Invalid exec job: missing execId or execCommand"
|
|
543
|
-
);
|
|
544
|
-
return;
|
|
545
|
-
}
|
|
546
|
-
|
|
547
|
-
logger.info(
|
|
548
|
-
{ traceId, execId, command: execCommand.substring(0, 100) },
|
|
549
|
-
"Executing command in sandbox"
|
|
550
|
-
);
|
|
551
|
-
|
|
552
|
-
// Create span for exec execution
|
|
553
|
-
const span = createChildSpan("exec_execution", traceparent, {
|
|
554
|
-
"lobu.exec_id": execId,
|
|
555
|
-
"lobu.command": execCommand.substring(0, 100),
|
|
556
|
-
});
|
|
557
|
-
|
|
558
|
-
// Determine working directory
|
|
559
|
-
const workingDir = execCwd || process.env.WORKSPACE_DIR || "/workspace";
|
|
560
|
-
const timeout = execTimeout || 300000; // 5 minutes default
|
|
561
|
-
|
|
562
|
-
// Create transport for sending responses back to gateway
|
|
563
|
-
const transport = new HttpWorkerTransport({
|
|
564
|
-
gatewayUrl: this.dispatcherUrl,
|
|
565
|
-
workerToken: this.workerToken,
|
|
566
|
-
userId: data.userId,
|
|
567
|
-
channelId: data.channelId,
|
|
568
|
-
conversationId,
|
|
569
|
-
originalMessageTs: execId,
|
|
570
|
-
teamId: data.teamId || "api",
|
|
571
|
-
platform: data.platform,
|
|
572
|
-
platformMetadata: data.platformMetadata,
|
|
573
|
-
});
|
|
574
|
-
|
|
575
|
-
let completed = false;
|
|
576
|
-
|
|
577
|
-
try {
|
|
578
|
-
// Spawn the command
|
|
579
|
-
const proc = spawn("sh", ["-c", execCommand], {
|
|
580
|
-
cwd: workingDir,
|
|
581
|
-
env: { ...process.env, ...execEnv },
|
|
582
|
-
stdio: ["ignore", "pipe", "pipe"],
|
|
583
|
-
});
|
|
584
|
-
|
|
585
|
-
// Setup timeout
|
|
586
|
-
const timeoutId = setTimeout(() => {
|
|
587
|
-
if (!completed) {
|
|
588
|
-
logger.warn(
|
|
589
|
-
{ traceId, execId },
|
|
590
|
-
"Exec timeout reached, killing process"
|
|
591
|
-
);
|
|
592
|
-
proc.kill("SIGTERM");
|
|
593
|
-
setTimeout(() => {
|
|
594
|
-
if (!completed) {
|
|
595
|
-
proc.kill("SIGKILL");
|
|
596
|
-
}
|
|
597
|
-
}, 5000);
|
|
598
|
-
}
|
|
599
|
-
}, timeout);
|
|
600
|
-
|
|
601
|
-
// Stream stdout
|
|
602
|
-
proc.stdout?.on("data", (chunk: Buffer) => {
|
|
603
|
-
const content = chunk.toString();
|
|
604
|
-
transport.sendExecOutput(execId, "stdout", content).catch((err) => {
|
|
605
|
-
logger.error(
|
|
606
|
-
{ traceId, execId, error: err },
|
|
607
|
-
"Failed to send stdout"
|
|
608
|
-
);
|
|
609
|
-
});
|
|
610
|
-
});
|
|
611
|
-
|
|
612
|
-
// Stream stderr
|
|
613
|
-
proc.stderr?.on("data", (chunk: Buffer) => {
|
|
614
|
-
const content = chunk.toString();
|
|
615
|
-
transport.sendExecOutput(execId, "stderr", content).catch((err) => {
|
|
616
|
-
logger.error(
|
|
617
|
-
{ traceId, execId, error: err },
|
|
618
|
-
"Failed to send stderr"
|
|
619
|
-
);
|
|
620
|
-
});
|
|
621
|
-
});
|
|
622
|
-
|
|
623
|
-
// Wait for process to complete
|
|
624
|
-
const exitCode = await new Promise<number>((resolve, reject) => {
|
|
625
|
-
proc.on("close", (code) => {
|
|
626
|
-
completed = true;
|
|
627
|
-
clearTimeout(timeoutId);
|
|
628
|
-
resolve(code ?? 0);
|
|
629
|
-
});
|
|
630
|
-
|
|
631
|
-
proc.on("error", (error) => {
|
|
632
|
-
completed = true;
|
|
633
|
-
clearTimeout(timeoutId);
|
|
634
|
-
reject(error);
|
|
635
|
-
});
|
|
636
|
-
});
|
|
637
|
-
|
|
638
|
-
// Send completion
|
|
639
|
-
await transport.sendExecComplete(execId, exitCode);
|
|
640
|
-
|
|
641
|
-
span?.setAttribute("lobu.exit_code", exitCode);
|
|
642
|
-
span?.setStatus({ code: SpanStatusCode.OK });
|
|
643
|
-
span?.end();
|
|
644
|
-
await flushTracing();
|
|
645
|
-
|
|
646
|
-
logger.info({ traceId, execId, exitCode }, "Exec completed");
|
|
647
|
-
} catch (error) {
|
|
648
|
-
const errorMessage =
|
|
649
|
-
error instanceof Error ? error.message : String(error);
|
|
650
|
-
|
|
651
|
-
// Send error
|
|
652
|
-
await transport.sendExecError(execId, errorMessage).catch((err) => {
|
|
653
|
-
logger.error(
|
|
654
|
-
{ traceId, execId, error: err },
|
|
655
|
-
"Failed to send exec error"
|
|
656
|
-
);
|
|
657
|
-
});
|
|
658
|
-
|
|
659
|
-
span?.setStatus({ code: SpanStatusCode.ERROR, message: errorMessage });
|
|
660
|
-
span?.end();
|
|
661
|
-
await flushTracing();
|
|
662
|
-
|
|
663
|
-
logger.error({ traceId, execId, error: errorMessage }, "Exec failed");
|
|
664
|
-
} finally {
|
|
665
|
-
this.currentJobId = undefined;
|
|
666
|
-
}
|
|
667
|
-
}
|
|
668
|
-
|
|
669
|
-
private async processBatchedMessages(
|
|
670
|
-
messages: QueuedMessage[]
|
|
671
|
-
): Promise<void> {
|
|
672
|
-
if (messages.length === 0) return;
|
|
673
|
-
|
|
674
|
-
if (messages.length === 1) {
|
|
675
|
-
const singleMessage = messages[0];
|
|
676
|
-
if (singleMessage) {
|
|
677
|
-
await this.processSingleMessage(singleMessage, [
|
|
678
|
-
singleMessage.payload.messageId,
|
|
679
|
-
]);
|
|
680
|
-
}
|
|
681
|
-
return;
|
|
682
|
-
}
|
|
683
|
-
|
|
684
|
-
logger.info(`Batching ${messages.length} messages for combined processing`);
|
|
685
|
-
|
|
686
|
-
const firstMessage = messages[0];
|
|
687
|
-
if (!firstMessage) return;
|
|
688
|
-
|
|
689
|
-
const combinedPrompt = messages
|
|
690
|
-
.map((msg, index) => `Message ${index + 1}: ${msg.payload.messageText}`)
|
|
691
|
-
.join("\n\n");
|
|
692
|
-
|
|
693
|
-
const batchedMessage: QueuedMessage = {
|
|
694
|
-
timestamp: firstMessage.timestamp,
|
|
695
|
-
payload: {
|
|
696
|
-
...firstMessage.payload,
|
|
697
|
-
messageText: combinedPrompt,
|
|
698
|
-
agentOptions: firstMessage.payload.agentOptions,
|
|
699
|
-
},
|
|
700
|
-
};
|
|
701
|
-
|
|
702
|
-
const processedIds = messages
|
|
703
|
-
.map((m) => m.payload.messageId)
|
|
704
|
-
.filter(Boolean);
|
|
705
|
-
await this.processSingleMessage(batchedMessage, processedIds);
|
|
706
|
-
}
|
|
707
|
-
|
|
708
|
-
private async processSingleMessage(
|
|
709
|
-
message: QueuedMessage,
|
|
710
|
-
processedIds?: string[]
|
|
711
|
-
): Promise<void> {
|
|
712
|
-
// Get traceparent for distributed tracing
|
|
713
|
-
const traceparent =
|
|
714
|
-
(message.payload.platformMetadata?.traceparent as string) ||
|
|
715
|
-
this.currentTraceparent ||
|
|
716
|
-
process.env.TRACEPARENT;
|
|
717
|
-
|
|
718
|
-
const traceId =
|
|
719
|
-
extractTraceId(message.payload) ||
|
|
720
|
-
this.currentTraceId ||
|
|
721
|
-
process.env.TRACE_ID;
|
|
722
|
-
|
|
723
|
-
const conversationId = message.payload.conversationId;
|
|
724
|
-
|
|
725
|
-
// Create child span for agent execution (linked to parent via traceparent)
|
|
726
|
-
const span = createChildSpan("agent_execution", traceparent, {
|
|
727
|
-
"lobu.message_id": message.payload.messageId,
|
|
728
|
-
"lobu.conversation_id": conversationId,
|
|
729
|
-
"lobu.user_id": message.payload.userId,
|
|
730
|
-
"lobu.model": message.payload.agentOptions?.model || "default",
|
|
731
|
-
});
|
|
732
|
-
|
|
733
|
-
try {
|
|
734
|
-
if (!process.env.USER_ID) {
|
|
735
|
-
logger.warn(
|
|
736
|
-
`USER_ID not set in environment, using userId from payload: ${message.payload.userId}`
|
|
737
|
-
);
|
|
738
|
-
process.env.USER_ID = message.payload.userId;
|
|
739
|
-
}
|
|
740
|
-
|
|
741
|
-
const workerConfig = this.payloadToWorkerConfig(message.payload);
|
|
742
|
-
|
|
743
|
-
logger.info(
|
|
744
|
-
{
|
|
745
|
-
traceparent,
|
|
746
|
-
traceId,
|
|
747
|
-
messageId: message.payload.messageId,
|
|
748
|
-
model: message.payload.agentOptions?.model,
|
|
749
|
-
},
|
|
750
|
-
"Agent starting"
|
|
751
|
-
);
|
|
752
|
-
|
|
753
|
-
// Worker will decide whether to continue session based on workspace state
|
|
754
|
-
const { OpenClawWorker } = await import("../openclaw/worker");
|
|
755
|
-
this.currentWorker = new OpenClawWorker(workerConfig);
|
|
756
|
-
|
|
757
|
-
const workerTransport = this.currentWorker.getWorkerTransport();
|
|
758
|
-
|
|
759
|
-
if (workerTransport && workerTransport instanceof HttpWorkerTransport) {
|
|
760
|
-
if (this.currentJobId) {
|
|
761
|
-
workerTransport.setJobId(this.currentJobId);
|
|
762
|
-
}
|
|
763
|
-
|
|
764
|
-
// Set processedMessageIds directly on the integration instance
|
|
765
|
-
const messageIds =
|
|
766
|
-
processedIds && processedIds.length > 0
|
|
767
|
-
? processedIds
|
|
768
|
-
: message?.payload?.messageId
|
|
769
|
-
? [message.payload.messageId]
|
|
770
|
-
: [];
|
|
771
|
-
|
|
772
|
-
workerTransport.processedMessageIds = messageIds;
|
|
773
|
-
}
|
|
774
|
-
|
|
775
|
-
await this.currentWorker.execute();
|
|
776
|
-
|
|
777
|
-
this.currentJobId = undefined;
|
|
778
|
-
|
|
779
|
-
// Reset error count on successful message processing
|
|
780
|
-
this.eventErrorCount = 0;
|
|
781
|
-
|
|
782
|
-
// End span with success
|
|
783
|
-
span?.setStatus({ code: SpanStatusCode.OK });
|
|
784
|
-
span?.end();
|
|
785
|
-
// Flush traces immediately to ensure spans are exported before worker scales down
|
|
786
|
-
await flushTracing();
|
|
787
|
-
logger.info(
|
|
788
|
-
{
|
|
789
|
-
traceparent,
|
|
790
|
-
messageId: message.payload.messageId,
|
|
791
|
-
conversationId,
|
|
792
|
-
},
|
|
793
|
-
"Agent completed"
|
|
794
|
-
);
|
|
795
|
-
} catch (error) {
|
|
796
|
-
// End span with error
|
|
797
|
-
span?.setStatus({
|
|
798
|
-
code: SpanStatusCode.ERROR,
|
|
799
|
-
message: error instanceof Error ? error.message : String(error),
|
|
800
|
-
});
|
|
801
|
-
span?.end();
|
|
802
|
-
// Flush traces on error too
|
|
803
|
-
await flushTracing();
|
|
804
|
-
logger.error(
|
|
805
|
-
{
|
|
806
|
-
traceparent,
|
|
807
|
-
messageId: message.payload.messageId,
|
|
808
|
-
conversationId,
|
|
809
|
-
error: error instanceof Error ? error.message : String(error),
|
|
810
|
-
},
|
|
811
|
-
"Agent failed"
|
|
812
|
-
);
|
|
813
|
-
|
|
814
|
-
const workerTransport = this.currentWorker?.getWorkerTransport();
|
|
815
|
-
if (workerTransport) {
|
|
816
|
-
try {
|
|
817
|
-
const enhancedError =
|
|
818
|
-
error instanceof Error ? error : new Error(String(error));
|
|
819
|
-
await workerTransport.signalError(enhancedError);
|
|
820
|
-
} catch (errorSendError) {
|
|
821
|
-
logger.error(
|
|
822
|
-
{ traceId, error: errorSendError },
|
|
823
|
-
"Failed to send error to dispatcher"
|
|
824
|
-
);
|
|
825
|
-
}
|
|
826
|
-
}
|
|
827
|
-
|
|
828
|
-
throw error;
|
|
829
|
-
} finally {
|
|
830
|
-
if (this.currentWorker) {
|
|
831
|
-
try {
|
|
832
|
-
await this.currentWorker.cleanup();
|
|
833
|
-
} catch (cleanupError) {
|
|
834
|
-
logger.error(
|
|
835
|
-
{ traceId, error: cleanupError },
|
|
836
|
-
"Error during worker cleanup"
|
|
837
|
-
);
|
|
838
|
-
}
|
|
839
|
-
this.currentWorker = null;
|
|
840
|
-
}
|
|
841
|
-
}
|
|
842
|
-
}
|
|
843
|
-
|
|
844
|
-
private payloadToWorkerConfig(payload: MessagePayload): WorkerConfig {
|
|
845
|
-
const conversationId = payload.conversationId || "default";
|
|
846
|
-
const platformMetadata = payload.platformMetadata;
|
|
847
|
-
|
|
848
|
-
const agentOptions = {
|
|
849
|
-
...(payload.agentOptions || {}),
|
|
850
|
-
...(payload.agentOptions?.allowedTools
|
|
851
|
-
? { allowedTools: payload.agentOptions.allowedTools }
|
|
852
|
-
: {}),
|
|
853
|
-
...(payload.agentOptions?.disallowedTools
|
|
854
|
-
? { disallowedTools: payload.agentOptions.disallowedTools }
|
|
855
|
-
: {}),
|
|
856
|
-
...(payload.agentOptions?.timeoutMinutes
|
|
857
|
-
? { timeoutMinutes: payload.agentOptions.timeoutMinutes }
|
|
858
|
-
: {}),
|
|
859
|
-
};
|
|
860
|
-
|
|
861
|
-
return {
|
|
862
|
-
sessionKey: `session-${conversationId}`,
|
|
863
|
-
userId: payload.userId,
|
|
864
|
-
agentId: payload.agentId,
|
|
865
|
-
channelId: payload.channelId,
|
|
866
|
-
conversationId,
|
|
867
|
-
userPrompt: Buffer.from(payload.messageText).toString("base64"),
|
|
868
|
-
responseChannel: String(
|
|
869
|
-
platformMetadata.responseChannel || payload.channelId
|
|
870
|
-
),
|
|
871
|
-
responseId: String(platformMetadata.responseId || payload.messageId),
|
|
872
|
-
botResponseId: platformMetadata.botResponseId
|
|
873
|
-
? String(platformMetadata.botResponseId)
|
|
874
|
-
: undefined,
|
|
875
|
-
// Check both payload.teamId (WhatsApp) and platformMetadata.teamId (Slack)
|
|
876
|
-
teamId:
|
|
877
|
-
(payload.teamId ?? platformMetadata.teamId)
|
|
878
|
-
? String(payload.teamId ?? platformMetadata.teamId)
|
|
879
|
-
: undefined,
|
|
880
|
-
platform: payload.platform,
|
|
881
|
-
platformMetadata: platformMetadata, // Include full platformMetadata for files and other metadata
|
|
882
|
-
agentOptions: JSON.stringify(agentOptions),
|
|
883
|
-
workspace: {
|
|
884
|
-
baseDirectory: process.env.WORKSPACE_DIR || "/workspace",
|
|
885
|
-
},
|
|
886
|
-
};
|
|
887
|
-
}
|
|
888
|
-
|
|
889
|
-
/**
|
|
890
|
-
* Cleanup resources after event handling errors exceed threshold
|
|
891
|
-
*/
|
|
892
|
-
private async cleanupOnEventError(
|
|
893
|
-
eventType: string,
|
|
894
|
-
_error: unknown
|
|
895
|
-
): Promise<void> {
|
|
896
|
-
logger.warn(
|
|
897
|
-
`Cleaning up after ${this.eventErrorCount} event handling errors (last: ${eventType})`
|
|
898
|
-
);
|
|
899
|
-
|
|
900
|
-
try {
|
|
901
|
-
// Clean up current worker if it exists
|
|
902
|
-
if (this.currentWorker) {
|
|
903
|
-
logger.info("Cleaning up current worker due to event errors");
|
|
904
|
-
try {
|
|
905
|
-
await this.currentWorker.cleanup?.();
|
|
906
|
-
} catch (cleanupError) {
|
|
907
|
-
logger.error("Worker cleanup failed:", cleanupError);
|
|
908
|
-
}
|
|
909
|
-
this.currentWorker = null;
|
|
910
|
-
}
|
|
911
|
-
|
|
912
|
-
// Reset current job
|
|
913
|
-
if (this.currentJobId) {
|
|
914
|
-
logger.info(`Clearing stuck job: ${this.currentJobId}`);
|
|
915
|
-
this.currentJobId = undefined;
|
|
916
|
-
}
|
|
917
|
-
|
|
918
|
-
// Abort SSE connection to trigger reconnect
|
|
919
|
-
if (this.abortController) {
|
|
920
|
-
logger.info("Aborting SSE connection to trigger reconnect");
|
|
921
|
-
this.abortController.abort();
|
|
922
|
-
this.abortController = undefined;
|
|
923
|
-
}
|
|
924
|
-
|
|
925
|
-
// Reset error count after cleanup
|
|
926
|
-
this.eventErrorCount = 0;
|
|
927
|
-
|
|
928
|
-
logger.info("Event error cleanup completed, will reconnect");
|
|
929
|
-
} catch (cleanupError) {
|
|
930
|
-
logger.error("Fatal error during event error cleanup:", cleanupError);
|
|
931
|
-
// Last resort: stop the client entirely
|
|
932
|
-
this.isRunning = false;
|
|
933
|
-
}
|
|
934
|
-
}
|
|
935
|
-
|
|
936
|
-
isHealthy(): boolean {
|
|
937
|
-
return this.isRunning && !this.messageBatcher.isCurrentlyProcessing();
|
|
938
|
-
}
|
|
939
|
-
|
|
940
|
-
getStatus(): {
|
|
941
|
-
isRunning: boolean;
|
|
942
|
-
isProcessing: boolean;
|
|
943
|
-
userId: string;
|
|
944
|
-
deploymentName: string;
|
|
945
|
-
pendingMessages: number;
|
|
946
|
-
} {
|
|
947
|
-
return {
|
|
948
|
-
isRunning: this.isRunning,
|
|
949
|
-
isProcessing: this.messageBatcher.isCurrentlyProcessing(),
|
|
950
|
-
userId: this.userId,
|
|
951
|
-
deploymentName: this.deploymentName,
|
|
952
|
-
pendingMessages: this.messageBatcher.getPendingCount(),
|
|
953
|
-
};
|
|
954
|
-
}
|
|
955
|
-
}
|