@aikirun/client 0.5.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md ADDED
@@ -0,0 +1,120 @@
1
+ # @aikirun/client
2
+
3
+ Client SDK for Aiki durable execution platform - connect to the Aiki server, start workflows, and manage execution.
4
+
5
+ ## Installation
6
+
7
+ ```bash
8
+ npm install @aikirun/client
9
+ ```
10
+
11
+ ## Quick Start
12
+
13
+ ### Initialize the Client
14
+
15
+ ```typescript
16
+ import { client } from "@aikirun/client";
17
+
18
+ const aikiClient = await client({
19
+ url: "http://localhost:9090",
20
+ redis: {
21
+ host: "localhost",
22
+ port: 6379,
23
+ },
24
+ });
25
+ ```
26
+
27
+ ### Start a Workflow
28
+
29
+ ```typescript
30
+ import { onboardingWorkflowV1 } from "./workflows.ts";
31
+
32
+ const stateHandle = await onboardingWorkflowV1.start(aikiClient, {
33
+ email: "user@example.com",
34
+ });
35
+ ```
36
+
37
+ ### Wait for Workflow Completion
38
+
39
+ ```typescript
40
+ const result = await stateHandle.wait(
41
+ { type: "status", status: "completed" },
42
+ { maxDurationMs: 60_000, pollIntervalMs: 5_000 },
43
+ );
44
+
45
+ if (result.success) {
46
+ console.log("Workflow completed!");
47
+ } else {
48
+ console.log("Workflow did not complete:", result.cause);
49
+ }
50
+ ```
51
+
52
+ ### Get Workflow State
53
+
54
+ ```typescript
55
+ const state = await stateHandle.getState();
56
+ console.log("Current status:", state.status);
57
+ ```
58
+
59
+ ## Features
60
+
61
+ - **Reliable Connection** - HTTP client with automatic retry and connection pooling
62
+ - **Workflow Management** - Start workflows with type-safe inputs
63
+ - **State Polling** - Wait for workflow completion with configurable polling
64
+ - **Logger** - Built-in logging for debugging
65
+ - **Context Factory** - Pass application context through workflow execution
66
+ - **Redis Integration** - Connect to Redis for distributed state management
67
+
68
+ ## Configuration
69
+
70
+ ### Client Parameters
71
+
72
+ ```typescript
73
+ interface ClientParams<AppContext> {
74
+ url: string; // Server URL
75
+ redis: {
76
+ host: string;
77
+ port: number;
78
+ password?: string;
79
+ db?: number;
80
+ maxRetriesPerRequest?: number;
81
+ retryDelayOnFailoverMs?: number;
82
+ connectTimeoutMs?: number;
83
+ };
84
+ contextFactory?: (run: WorkflowRun) => AppContext | Promise<AppContext>;
85
+ logger?: Logger;
86
+ }
87
+ ```
88
+
89
+ ### Context Factory Example
90
+
91
+ ```typescript
92
+ const aikiClient = await client({
93
+ url: "http://localhost:9090",
94
+ redis: { host: "localhost", port: 6379 },
95
+ contextFactory: (run) => ({
96
+ traceId: generateTraceId(),
97
+ workflowRunId: run.id,
98
+ userId: extractUserIdFromRun(run),
99
+ }),
100
+ });
101
+ ```
102
+
103
+ ## API Reference
104
+
105
+ See the [Aiki documentation](https://github.com/aikirun/aiki) for comprehensive API reference.
106
+
107
+ ## Related Packages
108
+
109
+ - [@aikirun/workflow](https://www.npmjs.com/package/@aikirun/workflow) - Define workflows
110
+ - [@aikirun/task](https://www.npmjs.com/package/@aikirun/task) - Define tasks
111
+ - [@aikirun/worker](https://www.npmjs.com/package/@aikirun/worker) - Execute workflows
112
+ - [@aikirun/types](https://www.npmjs.com/package/@aikirun/types) - Type definitions
113
+
114
+ ## Changelog
115
+
116
+ See the [CHANGELOG](https://github.com/aikirun/aiki/blob/main/CHANGELOG.md) for version history.
117
+
118
+ ## License
119
+
120
+ Apache-2.0
@@ -0,0 +1,59 @@
1
+ import { ClientParams, Client, Logger } from '@aikirun/types/client';
2
+ export { AdaptivePollingSubscriberStrategy, ApiClient, Client, ClientParams, Logger, PollingSubscriberStrategy, RedisConfig, RedisStreamsSubscriberStrategy, ResolvedSubscriberStrategy, SubscriberMessageMeta, SubscriberStrategy, WorkflowRunBatch } from '@aikirun/types/client';
3
+
4
+ /**
5
+ * Creates an Aiki client for starting and managing workflows.
6
+ *
7
+ * The client connects to the Aiki server via HTTP and Redis for state management.
8
+ * It provides methods to start workflows and monitor their execution.
9
+ *
10
+ * @template AppContext - Type of application context passed to workflows (default: null)
11
+ * @param params - Client configuration parameters
12
+ * @param params.url - HTTP URL of the Aiki server (e.g., "http://localhost:9090")
13
+ * @param params.redis - Redis connection configuration
14
+ * @param params.redis.host - Redis server hostname
15
+ * @param params.redis.port - Redis server port
16
+ * @param params.redis.password - Optional Redis password
17
+ * @param params.contextFactory - Optional function to create context for each workflow run
18
+ * @param params.logger - Optional custom logger (defaults to ConsoleLogger)
19
+ * @returns Promise resolving to a configured Client instance
20
+ *
21
+ * @example
22
+ * ```typescript
23
+ * const aikiClient = await client({
24
+ * url: "http://localhost:9090",
25
+ * redis: { host: "localhost", port: 6379 },
26
+ * contextFactory: (run) => ({
27
+ * traceId: generateTraceId(),
28
+ * userId: extractUserId(run),
29
+ * }),
30
+ * });
31
+ *
32
+ * // Start a workflow
33
+ * const handle = await myWorkflow.start(aikiClient, { email: "user@example.com" });
34
+ *
35
+ * // Wait for completion
36
+ * const result = await handle.wait(
37
+ * { type: "status", status: "completed" },
38
+ * { maxDurationMs: 60_000 }
39
+ * );
40
+ *
41
+ * // Cleanup
42
+ * await aikiClient.close();
43
+ * ```
44
+ */
45
+ declare function client<AppContext = null>(params: ClientParams<AppContext>): Promise<Client<AppContext>>;
46
+
47
+ declare class ConsoleLogger implements Logger {
48
+ private readonly context;
49
+ constructor(context?: Record<string, unknown>);
50
+ trace(message: string, metadata?: Record<string, unknown>): void;
51
+ debug(message: string, metadata?: Record<string, unknown>): void;
52
+ info(message: string, metadata?: Record<string, unknown>): void;
53
+ warn(message: string, metadata?: Record<string, unknown>): void;
54
+ error(message: string, metadata?: Record<string, unknown>): void;
55
+ child(bindings: Record<string, unknown>): Logger;
56
+ private format;
57
+ }
58
+
59
+ export { ConsoleLogger, client };
package/dist/index.js ADDED
@@ -0,0 +1,559 @@
1
+ // client.ts
2
+ import { INTERNAL as INTERNAL2 } from "@aikirun/types/symbols";
3
+ import { createORPCClient } from "@orpc/client";
4
+ import { RPCLink } from "@orpc/client/fetch";
5
+ import { Redis } from "ioredis";
6
+
7
+ // logger/console-logger.ts
8
+ var ConsoleLogger = class _ConsoleLogger {
9
+ constructor(context = {}) {
10
+ this.context = context;
11
+ }
12
+ trace(message, metadata) {
13
+ console.debug(this.format("TRACE", message, metadata));
14
+ }
15
+ debug(message, metadata) {
16
+ console.debug(this.format("DEBUG", message, metadata));
17
+ }
18
+ info(message, metadata) {
19
+ console.info(this.format("INFO", message, metadata));
20
+ }
21
+ warn(message, metadata) {
22
+ console.warn(this.format("WARN", message, metadata));
23
+ }
24
+ error(message, metadata) {
25
+ console.error(this.format("ERROR", message, metadata));
26
+ }
27
+ child(bindings) {
28
+ return new _ConsoleLogger({ ...this.context, ...bindings });
29
+ }
30
+ format(level, message, metadata) {
31
+ const timestamp = (/* @__PURE__ */ new Date()).toISOString();
32
+ const mergedContext = { ...this.context, ...metadata };
33
+ const contextStr = Object.keys(mergedContext).length > 0 ? ` ${JSON.stringify(mergedContext)}` : "";
34
+ return `[${timestamp}] ${level}: ${message}${contextStr}`;
35
+ }
36
+ };
37
+
38
+ // ../../lib/array/utils.ts
39
+ function groupBy(items, unwrap) {
40
+ const result = /* @__PURE__ */ new Map();
41
+ for (const item of items) {
42
+ const [key, value] = unwrap(item);
43
+ const valuesWithSameKey = result.get(key);
44
+ if (valuesWithSameKey === void 0) {
45
+ result.set(key, [value]);
46
+ } else {
47
+ valuesWithSameKey.push(value);
48
+ }
49
+ }
50
+ return result;
51
+ }
52
+ function isNonEmptyArray(value) {
53
+ return value.length > 0;
54
+ }
55
+ function shuffleArray(array) {
56
+ const shuffledArray = Array.from(array);
57
+ for (let i = shuffledArray.length - 1; i > 0; i--) {
58
+ const j = Math.floor(Math.random() * (i + 1));
59
+ [shuffledArray[i], shuffledArray[j]] = [shuffledArray[j], shuffledArray[i]];
60
+ }
61
+ return shuffledArray;
62
+ }
63
+ function distributeRoundRobin(totalSize, itemCount) {
64
+ if (itemCount <= 0) return [];
65
+ const distribution = Array(itemCount).fill(0);
66
+ for (let i = 0; i < totalSize; i++) {
67
+ distribution[i % itemCount]++;
68
+ }
69
+ return distribution;
70
+ }
71
+
72
+ // ../../lib/retry/strategy.ts
73
+ function getRetryParams(attempts, strategy) {
74
+ const strategyType = strategy.type;
75
+ switch (strategyType) {
76
+ case "never":
77
+ return {
78
+ retriesLeft: false
79
+ };
80
+ case "fixed":
81
+ if (attempts >= strategy.maxAttempts) {
82
+ return {
83
+ retriesLeft: false
84
+ };
85
+ }
86
+ return {
87
+ retriesLeft: true,
88
+ delayMs: strategy.delayMs
89
+ };
90
+ case "exponential": {
91
+ if (attempts >= strategy.maxAttempts) {
92
+ return {
93
+ retriesLeft: false
94
+ };
95
+ }
96
+ const delayMs = strategy.baseDelayMs * (strategy.factor ?? 2) ** (attempts - 1);
97
+ return {
98
+ retriesLeft: true,
99
+ delayMs: Math.min(delayMs, strategy.maxDelayMs ?? Number.POSITIVE_INFINITY)
100
+ };
101
+ }
102
+ case "jittered": {
103
+ if (attempts >= strategy.maxAttempts) {
104
+ return {
105
+ retriesLeft: false
106
+ };
107
+ }
108
+ const base = strategy.baseDelayMs * (strategy.jitterFactor ?? 2) ** (attempts - 1);
109
+ const delayMs = Math.random() * base;
110
+ return {
111
+ retriesLeft: true,
112
+ delayMs: Math.min(delayMs, strategy.maxDelayMs ?? Number.POSITIVE_INFINITY)
113
+ };
114
+ }
115
+ default:
116
+ return strategyType;
117
+ }
118
+ }
119
+
120
+ // subscribers/redis-streams.ts
121
+ import { INTERNAL } from "@aikirun/types/symbols";
122
+ import { z } from "zod";
123
+ var WorkflowRunReadyMessageDataSchema = z.object({
124
+ type: z.literal("workflow_run_ready"),
125
+ workflowRunId: z.string().transform((id) => id)
126
+ });
127
+ var RedisMessageDataSchema = z.discriminatedUnion("type", [WorkflowRunReadyMessageDataSchema]);
128
+ var RedisMessageRawDataSchema = z.array(z.unknown()).transform((rawFields) => {
129
+ const data = {};
130
+ for (let i = 0; i < rawFields.length; i += 2) {
131
+ if (i + 1 < rawFields.length) {
132
+ const key = rawFields[i];
133
+ if (typeof key === "string") {
134
+ data[key] = rawFields[i + 1];
135
+ }
136
+ }
137
+ }
138
+ return data;
139
+ });
140
+ var RedisStreamMessageSchema = z.tuple([
141
+ z.string(),
142
+ // message-id
143
+ RedisMessageRawDataSchema
144
+ ]);
145
+ var RedisStreamEntrySchema = z.tuple([
146
+ z.string(),
147
+ // stream
148
+ z.array(RedisStreamMessageSchema)
149
+ ]);
150
+ var RedisStreamEntriesSchema = z.array(RedisStreamEntrySchema);
151
+ var RedisStreamPendingMessageSchema = z.tuple([z.string(), z.string(), z.number(), z.number()]);
152
+ var RedisStreamPendingMessagesSchema = z.array(RedisStreamPendingMessageSchema);
153
+ function createRedisStreamsStrategy(client2, strategy, workflows, workerShards) {
154
+ const redis = client2[INTERNAL].redis.getConnection();
155
+ const logger = client2.logger.child({
156
+ "aiki.component": "redis-subscriber"
157
+ });
158
+ const streamConsumerGroupMap = getRedisStreamConsumerGroupMap(workflows, workerShards);
159
+ const streams = Array.from(streamConsumerGroupMap.keys());
160
+ const intervalMs = strategy.intervalMs ?? 50;
161
+ const maxRetryIntervalMs = strategy.maxRetryIntervalMs ?? 3e4;
162
+ const atCapacityIntervalMs = strategy.atCapacityIntervalMs ?? 50;
163
+ const blockTimeMs = strategy.blockTimeMs ?? 1e3;
164
+ const claimMinIdleTimeMs = strategy.claimMinIdleTimeMs ?? 18e4;
165
+ const getNextDelay = (params) => {
166
+ switch (params.type) {
167
+ case "polled":
168
+ case "heartbeat":
169
+ return intervalMs;
170
+ case "retry": {
171
+ const retryParams = getRetryParams(params.attemptNumber, {
172
+ type: "jittered",
173
+ maxAttempts: Number.POSITIVE_INFINITY,
174
+ baseDelayMs: intervalMs,
175
+ maxDelayMs: maxRetryIntervalMs
176
+ });
177
+ if (!retryParams.retriesLeft) {
178
+ return maxRetryIntervalMs;
179
+ }
180
+ return retryParams.delayMs;
181
+ }
182
+ case "at_capacity":
183
+ return atCapacityIntervalMs;
184
+ default:
185
+ return params;
186
+ }
187
+ };
188
+ const getHeartbeat = (workerId) => async (workflowRunId, meta) => {
189
+ try {
190
+ await redis.xclaim(meta.stream, meta.consumerGroup, workerId, 0, meta.messageId, "JUSTID");
191
+ logger.debug("Heartbeat sent", {
192
+ "aiki.workflowRunId": workflowRunId,
193
+ "aiki.messageId": meta.messageId
194
+ });
195
+ } catch (error) {
196
+ logger.warn("Heartbeat failed", {
197
+ "aiki.workflowRunId": workflowRunId,
198
+ "aiki.error": error instanceof Error ? error.message : String(error)
199
+ });
200
+ }
201
+ };
202
+ const acknowledge = async (workflowRunId, meta) => {
203
+ try {
204
+ const result = await redis.xack(meta.stream, meta.consumerGroup, meta.messageId);
205
+ if (result === 0) {
206
+ logger.warn("Message already acknowledged", {
207
+ "aiki.workflowRunId": workflowRunId,
208
+ "aiki.messageId": meta.messageId
209
+ });
210
+ } else {
211
+ logger.debug("Message acknowledged", {
212
+ "aiki.workflowRunId": workflowRunId,
213
+ "aiki.messageId": meta.messageId
214
+ });
215
+ }
216
+ } catch (error) {
217
+ logger.error("Failed to acknowledge message", {
218
+ "aiki.error": error instanceof Error ? error.message : String(error),
219
+ "aiki.workflowRunId": workflowRunId,
220
+ "aiki.messageId": meta.messageId
221
+ });
222
+ throw error;
223
+ }
224
+ };
225
+ return {
226
+ async init(workerId, _callbacks) {
227
+ for (const [stream, consumerGroup] of streamConsumerGroupMap) {
228
+ try {
229
+ await redis.xgroup("CREATE", stream, consumerGroup, "0", "MKSTREAM");
230
+ } catch (error) {
231
+ if (!error.message?.includes("BUSYGROUP")) {
232
+ throw error;
233
+ }
234
+ }
235
+ }
236
+ return {
237
+ type: strategy.type,
238
+ getNextDelay,
239
+ getNextBatch: (size) => fetchRedisStreamMessages(
240
+ redis,
241
+ logger,
242
+ streams,
243
+ streamConsumerGroupMap,
244
+ workerId,
245
+ size,
246
+ blockTimeMs,
247
+ claimMinIdleTimeMs
248
+ ),
249
+ heartbeat: getHeartbeat(workerId),
250
+ acknowledge
251
+ };
252
+ }
253
+ };
254
+ }
255
+ function getRedisStreamConsumerGroupMap(workflows, shardKeys) {
256
+ if (!shardKeys || !isNonEmptyArray(shardKeys)) {
257
+ return new Map(
258
+ workflows.map((workflow) => [
259
+ `workflow/${workflow.id}/${workflow.versionId}`,
260
+ `worker/${workflow.id}/${workflow.versionId}`
261
+ ])
262
+ );
263
+ }
264
+ return new Map(
265
+ workflows.flatMap(
266
+ (workflow) => shardKeys.map((shardKey) => [
267
+ `workflow/${workflow.id}/${workflow.versionId}/${shardKey}`,
268
+ `worker/${workflow.id}/${workflow.versionId}/${shardKey}`
269
+ ])
270
+ )
271
+ );
272
+ }
273
+ async function fetchRedisStreamMessages(redis, logger, streams, streamConsumerGroupMap, workerId, size, blockTimeMs, claimMinIdleTimeMs) {
274
+ if (!isNonEmptyArray(streams)) {
275
+ return [];
276
+ }
277
+ const batchSizePerStream = distributeRoundRobin(size, streams.length);
278
+ const shuffledStreams = shuffleArray(streams);
279
+ const readPromises = [];
280
+ for (let i = 0; i < shuffledStreams.length; i++) {
281
+ const stream = shuffledStreams[i];
282
+ if (!stream) {
283
+ continue;
284
+ }
285
+ const streamBatchSize = batchSizePerStream[i];
286
+ if (!streamBatchSize || streamBatchSize === 0) {
287
+ continue;
288
+ }
289
+ const consumerGroup = streamConsumerGroupMap.get(stream);
290
+ if (!consumerGroup) {
291
+ continue;
292
+ }
293
+ const readPromise = redis.xreadgroup(
294
+ "GROUP",
295
+ consumerGroup,
296
+ workerId,
297
+ "COUNT",
298
+ streamBatchSize,
299
+ "BLOCK",
300
+ blockTimeMs,
301
+ "STREAMS",
302
+ stream,
303
+ ">"
304
+ );
305
+ readPromises.push(readPromise);
306
+ }
307
+ const readResults = await Promise.allSettled(readPromises);
308
+ const streamEntries = [];
309
+ for (const result of readResults) {
310
+ if (result.status === "fulfilled" && result.value) {
311
+ streamEntries.push(result.value);
312
+ }
313
+ }
314
+ const workflowRuns = isNonEmptyArray(streamEntries) ? await processRedisStreamMessages(redis, logger, streamConsumerGroupMap, streamEntries) : [];
315
+ const remainingCapacity = size - workflowRuns.length;
316
+ if (remainingCapacity > 0 && claimMinIdleTimeMs > 0) {
317
+ const claimedWorkflowRuns = await claimStuckRedisStreamMessages(
318
+ redis,
319
+ logger,
320
+ shuffledStreams,
321
+ streamConsumerGroupMap,
322
+ workerId,
323
+ remainingCapacity,
324
+ claimMinIdleTimeMs
325
+ );
326
+ for (const workflowRun of claimedWorkflowRuns) {
327
+ workflowRuns.push(workflowRun);
328
+ }
329
+ }
330
+ return workflowRuns;
331
+ }
332
+ async function processRedisStreamMessages(redis, logger, streamConsumerGroupMap, streamsEntries) {
333
+ const workflowRuns = [];
334
+ for (const streamEntriesRaw of streamsEntries) {
335
+ logger.debug("Raw stream entries", { entries: streamEntriesRaw });
336
+ const streamEntriesResult = RedisStreamEntriesSchema.safeParse(streamEntriesRaw);
337
+ if (!streamEntriesResult.success) {
338
+ logger.error("Invalid stream entries format", {
339
+ "aiki.error": z.treeifyError(streamEntriesResult.error)
340
+ });
341
+ continue;
342
+ }
343
+ for (const streamEntry of streamEntriesResult.data) {
344
+ const [stream, messages] = streamEntry;
345
+ const consumerGroup = streamConsumerGroupMap.get(stream);
346
+ if (!consumerGroup) {
347
+ logger.error("No consumer group found for stream", {
348
+ "aiki.stream": stream
349
+ });
350
+ continue;
351
+ }
352
+ for (const [messageId, rawMessageData] of messages) {
353
+ const messageData = RedisMessageDataSchema.safeParse(rawMessageData);
354
+ if (!messageData.success) {
355
+ logger.warn("Invalid message structure", {
356
+ "aiki.stream": stream,
357
+ "aiki.messageId": messageId,
358
+ "aiki.error": z.treeifyError(messageData.error)
359
+ });
360
+ await redis.xack(stream, consumerGroup, messageId);
361
+ continue;
362
+ }
363
+ switch (messageData.data.type) {
364
+ case "workflow_run_ready": {
365
+ const { workflowRunId } = messageData.data;
366
+ workflowRuns.push({
367
+ data: { workflowRunId },
368
+ meta: {
369
+ stream,
370
+ messageId,
371
+ consumerGroup
372
+ }
373
+ });
374
+ break;
375
+ }
376
+ default:
377
+ messageData.data.type;
378
+ continue;
379
+ }
380
+ }
381
+ }
382
+ }
383
+ return workflowRuns;
384
+ }
385
+ async function claimStuckRedisStreamMessages(redis, logger, shuffledStreams, streamConsumerGroupMap, workerId, maxClaim, minIdleMs) {
386
+ if (maxClaim <= 0 || minIdleMs <= 0) {
387
+ return [];
388
+ }
389
+ const claimableMessages = await findClaimableRedisStreamMessages(
390
+ redis,
391
+ logger,
392
+ shuffledStreams,
393
+ streamConsumerGroupMap,
394
+ workerId,
395
+ maxClaim,
396
+ minIdleMs
397
+ );
398
+ if (!isNonEmptyArray(claimableMessages)) {
399
+ return [];
400
+ }
401
+ const claimaibleMessagesByStream = groupBy(claimableMessages, (message) => [message.stream, message]);
402
+ const claimPromises = Array.from(claimaibleMessagesByStream.entries()).map(async ([stream, messages]) => {
403
+ const consumerGroup = streamConsumerGroupMap.get(stream);
404
+ if (!consumerGroup) {
405
+ return null;
406
+ }
407
+ const messageIds = messages.map((message) => message.messageId);
408
+ try {
409
+ const claimedMessages = await redis.xclaim(stream, consumerGroup, workerId, minIdleMs, ...messageIds);
410
+ return { stream, claimedMessages };
411
+ } catch (error) {
412
+ const errorMessage = error instanceof Error ? error.message : String(error);
413
+ if (errorMessage.includes("NOGROUP")) {
414
+ logger.warn("Consumer group does not exist for stream, skipping claim operation", {
415
+ "aiki.stream": stream
416
+ });
417
+ } else if (errorMessage.includes("BUSYGROUP")) {
418
+ logger.warn("Consumer group busy for stream, skipping claim operation", {
419
+ "aiki.stream": stream
420
+ });
421
+ } else if (errorMessage.includes("NOSCRIPT")) {
422
+ logger.warn("Redis script not loaded for stream, skipping claim operation", {
423
+ "aiki.stream": stream
424
+ });
425
+ } else {
426
+ logger.error("Failed to claim messages from stream", {
427
+ "aiki.error": errorMessage,
428
+ "aiki.messageIds": messageIds.length,
429
+ "aiki.workerId": workerId,
430
+ "aiki.consumerGroup": consumerGroup,
431
+ "aiki.stream": stream
432
+ });
433
+ }
434
+ return null;
435
+ }
436
+ });
437
+ const claimResults = await Promise.allSettled(claimPromises);
438
+ const claimedStreamEntries = [];
439
+ for (const result of claimResults) {
440
+ if (result.status === "fulfilled" && result.value !== null) {
441
+ const { stream, claimedMessages } = result.value;
442
+ claimedStreamEntries.push([stream, claimedMessages]);
443
+ }
444
+ }
445
+ if (!isNonEmptyArray(claimedStreamEntries)) {
446
+ return [];
447
+ }
448
+ return processRedisStreamMessages(redis, logger, streamConsumerGroupMap, claimedStreamEntries);
449
+ }
450
+ async function findClaimableRedisStreamMessages(redis, logger, shuffledStreams, streamConsumerGroupMap, workerId, maxClaim, minIdleMs) {
451
+ const claimableMessages = [];
452
+ const claimSizePerStream = distributeRoundRobin(maxClaim, shuffledStreams.length);
453
+ const pendingPromises = [];
454
+ for (let i = 0; i < shuffledStreams.length; i++) {
455
+ const stream = shuffledStreams[i];
456
+ if (!stream) {
457
+ continue;
458
+ }
459
+ const claimSize = claimSizePerStream[i];
460
+ if (!claimSize || claimSize === 0) {
461
+ continue;
462
+ }
463
+ const consumerGroup = streamConsumerGroupMap.get(stream);
464
+ if (!consumerGroup) {
465
+ continue;
466
+ }
467
+ const pendingPromise = redis.xpending(stream, consumerGroup, "IDLE", minIdleMs, "-", "+", claimSize).then((result) => ({ stream, result }));
468
+ pendingPromises.push(pendingPromise);
469
+ }
470
+ const pendingResults = await Promise.allSettled(pendingPromises);
471
+ for (const pendingResult of pendingResults) {
472
+ if (pendingResult.status !== "fulfilled") {
473
+ continue;
474
+ }
475
+ const { stream, result } = pendingResult.value;
476
+ const parsedResult = RedisStreamPendingMessagesSchema.safeParse(result);
477
+ if (!parsedResult.success) {
478
+ logger.error("Invalid XPENDING response", {
479
+ "aiki.stream": stream,
480
+ "aiki.error": z.treeifyError(parsedResult.error)
481
+ });
482
+ continue;
483
+ }
484
+ for (const [messageId, consumerName, _idleTimeMs, _deliveryCount] of parsedResult.data) {
485
+ if (consumerName === workerId) {
486
+ continue;
487
+ }
488
+ claimableMessages.push({ stream, messageId });
489
+ }
490
+ }
491
+ return claimableMessages;
492
+ }
493
+
494
+ // subscribers/strategy-resolver.ts
495
+ function resolveSubscriberStrategy(client2, strategy, workflows, workerShards) {
496
+ switch (strategy.type) {
497
+ // case "polling":
498
+ // return createPollingStrategy(client, strategy);
499
+ // case "adaptive_polling":
500
+ // return createAdaptivePollingStrategy(client, strategy);
501
+ case "redis_streams":
502
+ return createRedisStreamsStrategy(client2, strategy, workflows, workerShards);
503
+ default:
504
+ return strategy.type;
505
+ }
506
+ }
507
+
508
+ // client.ts
509
+ function client(params) {
510
+ return Promise.resolve(new ClientImpl(params));
511
+ }
512
+ var ClientImpl = class {
513
+ constructor(params) {
514
+ this.params = params;
515
+ this.logger = params.logger ?? new ConsoleLogger();
516
+ const rpcLink = new RPCLink({ url: `${params.url}` });
517
+ this.api = createORPCClient(rpcLink);
518
+ this.logger.info("Aiki client initialized", {
519
+ "aiki.url": params.url
520
+ });
521
+ this[INTERNAL2] = {
522
+ subscriber: {
523
+ create: (strategy, workflows, workerShards) => resolveSubscriberStrategy(this, strategy, workflows, workerShards)
524
+ },
525
+ redis: {
526
+ getConnection: () => this.getRedisConnection(),
527
+ closeConnection: () => this.closeRedisConnection()
528
+ },
529
+ contextFactory: this.params.contextFactory
530
+ };
531
+ }
532
+ api;
533
+ [INTERNAL2];
534
+ logger;
535
+ redisConnection;
536
+ async close() {
537
+ this.logger.info("Closing Aiki client");
538
+ await this.closeRedisConnection();
539
+ }
540
+ getRedisConnection() {
541
+ if (!this.redisConnection) {
542
+ if (!this.params.redis) {
543
+ throw new Error("Redis configuration not provided to client. Add 'redis' to ClientParams.");
544
+ }
545
+ this.redisConnection = new Redis(this.params.redis);
546
+ }
547
+ return this.redisConnection;
548
+ }
549
+ async closeRedisConnection() {
550
+ if (this.redisConnection) {
551
+ await this.redisConnection.quit();
552
+ this.redisConnection = void 0;
553
+ }
554
+ }
555
+ };
556
+ export {
557
+ ConsoleLogger,
558
+ client
559
+ };
package/package.json ADDED
@@ -0,0 +1,30 @@
1
+ {
2
+ "name": "@aikirun/client",
3
+ "version": "0.5.3",
4
+ "description": "Client SDK for Aiki - connect to the server, start workflows, and manage execution",
5
+ "type": "module",
6
+ "main": "./dist/index.js",
7
+ "types": "./dist/index.d.ts",
8
+ "exports": {
9
+ ".": {
10
+ "types": "./dist/index.d.ts",
11
+ "import": "./dist/index.js"
12
+ }
13
+ },
14
+ "files": [
15
+ "dist"
16
+ ],
17
+ "scripts": {
18
+ "build": "tsup"
19
+ },
20
+ "dependencies": {
21
+ "@aikirun/types": "0.5.3",
22
+ "@orpc/client": "^1.9.3",
23
+ "ioredis": "^5.4.1",
24
+ "zod": "^4.1.12"
25
+ },
26
+ "publishConfig": {
27
+ "access": "public"
28
+ },
29
+ "license": "Apache-2.0"
30
+ }