flowfn 0.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (59) hide show
  1. package/dist/index.d.mts +1305 -0
  2. package/dist/index.d.ts +1305 -0
  3. package/dist/index.js +3180 -0
  4. package/dist/index.js.map +1 -0
  5. package/dist/index.mjs +3088 -0
  6. package/dist/index.mjs.map +1 -0
  7. package/docs/API.md +801 -0
  8. package/docs/USAGE.md +619 -0
  9. package/package.json +75 -0
  10. package/src/adapters/base.ts +46 -0
  11. package/src/adapters/memory.ts +183 -0
  12. package/src/adapters/postgres/index.ts +383 -0
  13. package/src/adapters/postgres/postgres.test.ts +100 -0
  14. package/src/adapters/postgres/schema.ts +110 -0
  15. package/src/adapters/redis.test.ts +124 -0
  16. package/src/adapters/redis.ts +331 -0
  17. package/src/core/flow-fn.test.ts +70 -0
  18. package/src/core/flow-fn.ts +198 -0
  19. package/src/core/metrics.ts +198 -0
  20. package/src/core/scheduler.test.ts +80 -0
  21. package/src/core/scheduler.ts +154 -0
  22. package/src/index.ts +57 -0
  23. package/src/monitoring/health.ts +261 -0
  24. package/src/patterns/backoff.ts +30 -0
  25. package/src/patterns/batching.ts +248 -0
  26. package/src/patterns/circuit-breaker.test.ts +52 -0
  27. package/src/patterns/circuit-breaker.ts +52 -0
  28. package/src/patterns/priority.ts +146 -0
  29. package/src/patterns/rate-limit.ts +290 -0
  30. package/src/patterns/retry.test.ts +62 -0
  31. package/src/queue/batch.test.ts +35 -0
  32. package/src/queue/dependencies.test.ts +33 -0
  33. package/src/queue/dlq.ts +222 -0
  34. package/src/queue/job.ts +67 -0
  35. package/src/queue/queue.ts +243 -0
  36. package/src/queue/types.ts +153 -0
  37. package/src/queue/worker.ts +66 -0
  38. package/src/storage/event-log.ts +205 -0
  39. package/src/storage/job-storage.ts +206 -0
  40. package/src/storage/workflow-storage.ts +182 -0
  41. package/src/stream/stream.ts +194 -0
  42. package/src/stream/types.ts +81 -0
  43. package/src/utils/hashing.ts +29 -0
  44. package/src/utils/id-generator.ts +109 -0
  45. package/src/utils/serialization.ts +142 -0
  46. package/src/utils/time.ts +167 -0
  47. package/src/workflow/advanced.test.ts +43 -0
  48. package/src/workflow/events.test.ts +39 -0
  49. package/src/workflow/types.ts +132 -0
  50. package/src/workflow/workflow.test.ts +55 -0
  51. package/src/workflow/workflow.ts +422 -0
  52. package/tests/dlq.test.ts +205 -0
  53. package/tests/health.test.ts +228 -0
  54. package/tests/integration.test.ts +253 -0
  55. package/tests/stream.test.ts +233 -0
  56. package/tests/workflow.test.ts +286 -0
  57. package/tsconfig.json +17 -0
  58. package/tsup.config.ts +10 -0
  59. package/vitest.config.ts +15 -0
@@ -0,0 +1,110 @@
1
+ import { pgTable, varchar, text, jsonb, integer, bigint, timestamp, index, uniqueIndex } from 'drizzle-orm/pg-core';
2
+
3
+ export const jobs = pgTable('flowfn_jobs', {
4
+ id: varchar('id', { length: 255 }).primaryKey(),
5
+ queue: varchar('queue', { length: 255 }).notNull(),
6
+ name: varchar('name', { length: 255 }).notNull(),
7
+ data: jsonb('data').notNull(),
8
+ opts: jsonb('opts'),
9
+
10
+ state: varchar('state', { length: 50 }).notNull(), // waiting, active, completed, failed, delayed, paused
11
+ priority: integer('priority').default(0),
12
+ progress: integer('progress').default(0),
13
+ returnValue: jsonb('return_value'),
14
+
15
+ timestamp: bigint('timestamp', { mode: 'number' }).notNull(),
16
+ processedOn: bigint('processed_on', { mode: 'number' }),
17
+ finishedOn: bigint('finished_on', { mode: 'number' }),
18
+ delay: bigint('delay', { mode: 'number' }).default(0),
19
+
20
+ attemptsMade: integer('attempts_made').default(0),
21
+ failedReason: text('failed_reason'),
22
+ stacktrace: jsonb('stacktrace'),
23
+
24
+ createdAt: timestamp('created_at').defaultNow(),
25
+ updatedAt: timestamp('updated_at').defaultNow(),
26
+ }, (table) => ({
27
+ queueStateIdx: index('idx_queue_state').on(table.queue, table.state),
28
+ queuePriorityIdx: index('idx_queue_priority').on(table.queue, table.priority),
29
+ timestampIdx: index('idx_timestamp').on(table.timestamp),
30
+ }));
31
+
32
+ export const messages = pgTable('flowfn_messages', {
33
+ id: varchar('id', { length: 255 }).primaryKey(),
34
+ stream: varchar('stream', { length: 255 }).notNull(),
35
+ data: jsonb('data').notNull(),
36
+ headers: jsonb('headers'),
37
+
38
+ partition: integer('partition'),
39
+ offset: bigint('offset', { mode: 'number' }),
40
+ key: varchar('key', { length: 255 }),
41
+
42
+ timestamp: bigint('timestamp', { mode: 'number' }).notNull(),
43
+ createdAt: timestamp('created_at').defaultNow(),
44
+ }, (table) => ({
45
+ streamTimestampIdx: index('idx_stream_timestamp').on(table.stream, table.timestamp),
46
+ }));
47
+
48
+ export const consumerGroups = pgTable('flowfn_consumer_groups', {
49
+ id: varchar('id', { length: 255 }).primaryKey(),
50
+ stream: varchar('stream', { length: 255 }).notNull(),
51
+ groupId: varchar('group_id', { length: 255 }).notNull(),
52
+ consumerId: varchar('consumer_id', { length: 255 }).notNull(),
53
+
54
+ lastMessageId: varchar('last_message_id', { length: 255 }),
55
+ lastOffset: bigint('last_offset', { mode: 'number' }),
56
+ lag: integer('lag'),
57
+
58
+ createdAt: timestamp('created_at').defaultNow(),
59
+ updatedAt: timestamp('updated_at').defaultNow(),
60
+ }, (table) => ({
61
+ streamGroupConsumerIdx: uniqueIndex('idx_stream_group_consumer').on(table.stream, table.groupId, table.consumerId),
62
+ }));
63
+
64
+ export const workflows = pgTable('flowfn_workflows', {
65
+ id: varchar('id', { length: 255 }).primaryKey(),
66
+ name: varchar('name', { length: 255 }).notNull(),
67
+ definition: jsonb('definition').notNull(),
68
+ version: integer('version').default(1),
69
+
70
+ status: varchar('status', { length: 50 }).notNull(),
71
+ metadata: jsonb('metadata'),
72
+
73
+ createdAt: timestamp('created_at').defaultNow(),
74
+ updatedAt: timestamp('updated_at').defaultNow(),
75
+ });
76
+
77
+ export const workflowExecutions = pgTable('flowfn_workflow_executions', {
78
+ id: varchar('id', { length: 255 }).primaryKey(),
79
+ workflowId: varchar('workflow_id', { length: 255 }).notNull(),
80
+ workflowName: varchar('workflow_name', { length: 255 }),
81
+
82
+ status: varchar('status', { length: 50 }).notNull(),
83
+ input: jsonb('input'),
84
+ state: jsonb('state'),
85
+ output: jsonb('output'),
86
+ error: text('error'),
87
+
88
+ currentStep: varchar('current_step', { length: 255 }),
89
+ completedSteps: jsonb('completed_steps'),
90
+
91
+ startedAt: bigint('started_at', { mode: 'number' }).notNull(),
92
+ updatedAt: bigint('updated_at', { mode: 'number' }),
93
+ completedAt: bigint('completed_at', { mode: 'number' }),
94
+ durationMs: integer('duration_ms'),
95
+ }, (table) => ({
96
+ workflowIdx: index('idx_workflow').on(table.workflowId),
97
+ statusIdx: index('idx_status').on(table.status),
98
+ }));
99
+
100
+ export const workflowEvents = pgTable('flowfn_workflow_events', {
101
+ id: varchar('id', { length: 255 }).primaryKey(),
102
+ executionId: varchar('execution_id', { length: 255 }).notNull(),
103
+
104
+ type: varchar('type', { length: 50 }).notNull(),
105
+ step: varchar('step', { length: 255 }),
106
+ timestamp: bigint('timestamp', { mode: 'number' }).notNull(),
107
+ data: jsonb('data'),
108
+ }, (table) => ({
109
+ executionIdx: index('idx_execution').on(table.executionId),
110
+ }));
@@ -0,0 +1,124 @@
1
+ import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest';
2
+ import { RedisAdapter } from './redis.js';
3
+ import { JobImpl } from '../queue/job.js';
4
+
5
+ // Mock ioredis to return RedisMock
6
+ vi.mock('ioredis', async (importOriginal) => {
7
+ const RedisMock = (await import('ioredis-mock')).default;
8
+ return {
9
+ default: RedisMock,
10
+ };
11
+ });
12
+
13
+ describe('RedisAdapter', () => {
14
+ let adapter: RedisAdapter;
15
+ let redis: any;
16
+
17
+ beforeEach(async () => {
18
+ const RedisMock = (await import('ioredis-mock')).default;
19
+ redis = new RedisMock();
20
+
21
+ // Polyfill missing stream commands for ioredis-mock
22
+ redis.xgroup = vi.fn().mockResolvedValue('OK');
23
+ redis.xack = vi.fn().mockResolvedValue(1);
24
+
25
+ // Mock xreadgroup to behave like xread for testing purposes
26
+ // In reality, it should filter by group/consumer, but for this unit test
27
+ // we just want to verify data flow.
28
+ redis.xreadgroup = vi.fn().mockImplementation(async (cmd, group, consumer, block, ms, count, n, streams, key, id) => {
29
+ // Mapping arguments is tricky because ioredis passes them as varargs
30
+ // The call in adapter is: xreadgroup('GROUP', group, consumer, 'BLOCK', 1000, 'COUNT', 10, 'STREAMS', streamKey, '>')
31
+ // We'll just call xread internally to get data
32
+ // xread('BLOCK', 1000, 'STREAMS', streamKey, '0-0') to get all
33
+
34
+ // Extract stream key - it's usually near the end
35
+ // The adapter calls it as: .xreadgroup('GROUP', group, consumer, 'BLOCK', 1000, 'COUNT', 10, 'STREAMS', streamKey, '>')
36
+
37
+ // Simulating a response
38
+ const data = await redis.xread('STREAMS', 'flowfn:stream:group-stream', '0-0');
39
+ return data;
40
+ });
41
+
42
+ adapter = new RedisAdapter({ connection: redis });
43
+ });
44
+
45
+ afterEach(async () => {
46
+ await adapter.cleanup();
47
+ });
48
+
49
+ it('should enqueue and dequeue a job', async () => {
50
+ const job = new JobImpl('test-job', { data: 123 });
51
+ await adapter.enqueue('test-queue', job);
52
+
53
+ const dequeued = await adapter.dequeue('test-queue');
54
+ expect(dequeued).toBeDefined();
55
+ expect(dequeued!.id).toBe(job.id);
56
+ expect(dequeued!.data).toEqual({ data: 123 });
57
+ });
58
+
59
+ it('should handle delayed jobs', async () => {
60
+ const job = new JobImpl('delayed-job', { data: 'late' }, { delay: 1000 });
61
+ await adapter.enqueue('test-queue', job);
62
+
63
+ // Should be empty initially
64
+ const immediate = await adapter.dequeue('test-queue');
65
+ expect(immediate).toBeNull();
66
+
67
+ const delayedKey = 'flowfn:delayed:test-queue';
68
+ const delayedJobs = await redis.zcard(delayedKey);
69
+ expect(delayedJobs).toBe(1);
70
+ });
71
+
72
+ it('should publish and subscribe to a stream (XADD/XREAD)', async () => {
73
+ const received: any[] = [];
74
+ await adapter.subscribe('test-stream', async (msg) => {
75
+ received.push(msg);
76
+ });
77
+
78
+ const message = {
79
+ id: '1',
80
+ stream: 'test-stream',
81
+ data: { hello: 'world' },
82
+ headers: { foo: 'bar' },
83
+ timestamp: Date.now(),
84
+ ack: async () => {},
85
+ nack: async () => {}
86
+ };
87
+ await adapter.publish('test-stream', message);
88
+
89
+ // Wait for polling loop
90
+ await new Promise(r => setTimeout(r, 200));
91
+ expect(received.length).toBeGreaterThan(0);
92
+ expect(received[0].data).toEqual({ hello: 'world' });
93
+ expect(received[0].headers).toEqual({ foo: 'bar' });
94
+ });
95
+
96
+ it('should consume via consumer group', async () => {
97
+ const received: any[] = [];
98
+
99
+ await adapter.consume('group-stream', 'mygroup', 'consumer1', async (msg) => {
100
+ received.push(msg);
101
+ await msg.ack();
102
+ });
103
+
104
+ const message = {
105
+ id: '1',
106
+ stream: 'group-stream',
107
+ data: { group: 'test' },
108
+ timestamp: Date.now(),
109
+ ack: async () => {},
110
+ nack: async () => {}
111
+ };
112
+ await adapter.publish('group-stream', message);
113
+
114
+ // Wait for polling loop
115
+ await new Promise(r => setTimeout(r, 200));
116
+
117
+ expect(received.length).toBeGreaterThan(0);
118
+ expect(received[0].data).toEqual({ group: 'test' });
119
+
120
+ // Verify xgroup/xack called
121
+ expect(redis.xgroup).toHaveBeenCalled();
122
+ expect(redis.xack).toHaveBeenCalled();
123
+ });
124
+ });
@@ -0,0 +1,331 @@
1
+ import { FlowAdapter } from "./base.js";
2
+ import { Job, QueueStats } from "../queue/types.js";
3
+ import {
4
+ Message,
5
+ MessageHandler,
6
+ Subscription,
7
+ StreamInfo,
8
+ } from "../stream/types.js";
9
+ import { WorkflowExecution } from "../workflow/types.js";
10
+ import Redis, { RedisOptions } from "ioredis";
11
+
12
+ export interface RedisAdapterOptions {
13
+ connection?: Redis | RedisOptions;
14
+ prefix?: string;
15
+ }
16
+
17
+ export class RedisAdapter implements FlowAdapter {
18
+ private redis: Redis;
19
+ private prefix: string;
20
+ private subscriptions: Map<string, Redis[]> = new Map();
21
+
22
+ constructor(options: RedisAdapterOptions = {}) {
23
+ if (options.connection instanceof Redis) {
24
+ this.redis = options.connection;
25
+ } else {
26
+ this.redis = new Redis((options.connection as RedisOptions) || {});
27
+ }
28
+ this.prefix = options.prefix || "flowfn:";
29
+ }
30
+
31
+ private key(type: string, name: string): string {
32
+ return `${this.prefix}${type}:${name}`;
33
+ }
34
+
35
+ async enqueue(queueName: string, job: Job): Promise<string> {
36
+ const queueKey = this.key("queue", queueName);
37
+ const jobKey = this.key("job", job.id);
38
+
39
+ // Store job data
40
+ await this.redis.set(jobKey, JSON.stringify(job));
41
+
42
+ if (job.opts.delay && job.opts.delay > 0) {
43
+ const delayedKey = this.key("delayed", queueName);
44
+ await this.redis.zadd(delayedKey, Date.now() + job.opts.delay, job.id);
45
+ } else {
46
+ await this.redis.lpush(queueKey, job.id);
47
+ }
48
+
49
+ return job.id;
50
+ }
51
+
52
+ async dequeue(queueName: string): Promise<Job | null> {
53
+ const queueKey = this.key("queue", queueName);
54
+ const delayedKey = this.key("delayed", queueName);
55
+
56
+ // Check delayed jobs first
57
+ const now = Date.now();
58
+ const delayed = await this.redis.zrangebyscore(
59
+ delayedKey,
60
+ 0,
61
+ now,
62
+ "LIMIT",
63
+ 0,
64
+ 1
65
+ );
66
+ if (delayed.length > 0) {
67
+ const jobId = delayed[0];
68
+ await this.redis.zrem(delayedKey, jobId);
69
+ await this.redis.lpush(queueKey, jobId);
70
+ }
71
+
72
+ const jobId = await this.redis.rpop(queueKey);
73
+ if (!jobId) return null;
74
+
75
+ const jobData = await this.redis.get(this.key("job", jobId));
76
+ if (!jobData) return null;
77
+
78
+ return JSON.parse(jobData);
79
+ }
80
+
81
+ async ack(queue: string, jobId: string): Promise<void> {
82
+ await this.redis.del(this.key("job", jobId));
83
+ }
84
+
85
+ async nack(
86
+ queueName: string,
87
+ jobId: string,
88
+ requeue: boolean = true
89
+ ): Promise<void> {
90
+ if (requeue) {
91
+ await this.redis.lpush(this.key("queue", queueName), jobId);
92
+ }
93
+ }
94
+
95
+ async publish(streamName: string, message: Message): Promise<string> {
96
+ const streamKey = this.key("stream", streamName);
97
+ const id = await this.redis.xadd(
98
+ streamKey,
99
+ "*",
100
+ "data",
101
+ JSON.stringify(message.data),
102
+ "headers",
103
+ JSON.stringify(message.headers || {})
104
+ );
105
+ return id || "";
106
+ }
107
+
108
+ async subscribe(
109
+ streamName: string,
110
+ handler: MessageHandler<any>
111
+ ): Promise<Subscription> {
112
+ const streamKey = this.key("stream", streamName);
113
+ let active = true;
114
+
115
+ // Simple polling for new messages from '$' (end)
116
+ // Real impl might use blocking XREAD but that requires a dedicated connection or careful management
117
+ // We'll use a polling loop with XREAD for simplicity on shared connection
118
+ let lastId = "$";
119
+
120
+ const poll = async () => {
121
+ if (!active) return;
122
+ try {
123
+ const results = (await this.redis.xread(
124
+ "STREAMS",
125
+ streamKey,
126
+ lastId,
127
+ "BLOCK",
128
+ 1000
129
+ )) as any;
130
+ if (results) {
131
+ for (const [stream, messages] of results) {
132
+ for (const [id, fields] of messages) {
133
+ lastId = id;
134
+ // fields is array [key1, val1, key2, val2...]
135
+ const dataIdx = fields.indexOf("data");
136
+ const headersIdx = fields.indexOf("headers");
137
+
138
+ const data = dataIdx > -1 ? JSON.parse(fields[dataIdx + 1]) : {};
139
+ const headers =
140
+ headersIdx > -1 ? JSON.parse(fields[headersIdx + 1]) : {};
141
+
142
+ const msg: Message = {
143
+ id,
144
+ stream: streamName,
145
+ data,
146
+ headers,
147
+ timestamp: parseInt(id.split("-")[0]),
148
+ ack: async () => {},
149
+ nack: async () => {},
150
+ };
151
+ handler(msg).catch(console.error);
152
+ }
153
+ }
154
+ }
155
+ } catch (err) {
156
+ console.error("Redis stream poll error", err);
157
+ await new Promise((r) => setTimeout(r, 1000));
158
+ }
159
+ if (active) setTimeout(poll, 0);
160
+ };
161
+
162
+ // Start polling in next tick
163
+ setTimeout(poll, 0);
164
+
165
+ return {
166
+ unsubscribe: async () => {
167
+ active = false;
168
+ },
169
+ };
170
+ }
171
+
172
+ async consume(
173
+ streamName: string,
174
+ group: string,
175
+ consumer: string,
176
+ handler: MessageHandler<any>
177
+ ): Promise<Subscription> {
178
+ const streamKey = this.key("stream", streamName);
179
+
180
+ // Ensure group exists
181
+ try {
182
+ await this.redis.xgroup("CREATE", streamKey, group, "$", "MKSTREAM");
183
+ } catch (e: any) {
184
+ if (!e.message.includes("BUSYGROUP")) throw e;
185
+ }
186
+
187
+ let active = true;
188
+ const poll = async () => {
189
+ if (!active) return;
190
+ try {
191
+ // Read as consumer group
192
+ const results = (await this.redis.xreadgroup(
193
+ "GROUP",
194
+ group,
195
+ consumer,
196
+ "COUNT",
197
+ 10,
198
+ "BLOCK",
199
+ 1000,
200
+ "STREAMS",
201
+ streamKey,
202
+ ">"
203
+ )) as any;
204
+
205
+ if (results) {
206
+ for (const [stream, messages] of results) {
207
+ for (const [id, fields] of messages) {
208
+ const dataIdx = fields.indexOf("data");
209
+ const headersIdx = fields.indexOf("headers");
210
+ const data = dataIdx > -1 ? JSON.parse(fields[dataIdx + 1]) : {};
211
+ const headers =
212
+ headersIdx > -1 ? JSON.parse(fields[headersIdx + 1]) : {};
213
+
214
+ const msg: Message = {
215
+ id,
216
+ stream: streamName,
217
+ data,
218
+ headers,
219
+ timestamp: parseInt(id.split("-")[0]),
220
+ ack: async () => {
221
+ await this.redis.xack(streamKey, group, id);
222
+ },
223
+ nack: async (requeue = true) => {
224
+ // Redis streams don't support NACK/requeue natively like AMQP.
225
+ // The message stays in PEL (Pending Entries List).
226
+ // We could claim it or just leave it for another consumer to claim.
227
+ },
228
+ };
229
+ handler(msg).catch(console.error);
230
+ }
231
+ }
232
+ }
233
+ } catch (err) {
234
+ console.error("Redis consumer poll error", err);
235
+ await new Promise((r) => setTimeout(r, 1000));
236
+ }
237
+ if (active) setTimeout(poll, 0);
238
+ };
239
+
240
+ setTimeout(poll, 0);
241
+
242
+ return {
243
+ unsubscribe: async () => {
244
+ active = false;
245
+ },
246
+ };
247
+ }
248
+
249
+ async createConsumerGroup(stream: string, group: string): Promise<void> {
250
+ const streamKey = this.key("stream", stream);
251
+ try {
252
+ await this.redis.xgroup("CREATE", streamKey, group, "$", "MKSTREAM");
253
+ } catch (e: any) {
254
+ if (!e.message.includes("BUSYGROUP")) throw e;
255
+ }
256
+ }
257
+
258
+ async saveWorkflowState(
259
+ workflowId: string,
260
+ state: WorkflowExecution
261
+ ): Promise<void> {
262
+ await this.redis.set(
263
+ this.key("workflow", workflowId),
264
+ JSON.stringify(state)
265
+ );
266
+ }
267
+
268
+ async loadWorkflowState(
269
+ workflowId: string
270
+ ): Promise<WorkflowExecution | null> {
271
+ const data = await this.redis.get(this.key("workflow", workflowId));
272
+ return data ? JSON.parse(data) : null;
273
+ }
274
+
275
+ async getJob(queue: string, jobId: string): Promise<Job | null> {
276
+ const jobData = await this.redis.get(this.key("job", jobId));
277
+ return jobData ? JSON.parse(jobData) : null;
278
+ }
279
+
280
+ async getJobs(queue: string, status: string): Promise<Job[]> {
281
+ // Simplified implementation - would need custom tracking for status in production
282
+ return [];
283
+ }
284
+
285
+ async getAllJobs(queue: string): Promise<Job[]> {
286
+ // Simplified implementation - would need custom tracking in production
287
+ return [];
288
+ }
289
+
290
+ async cleanJobs(
291
+ queue: string,
292
+ grace: number,
293
+ status: string
294
+ ): Promise<number> {
295
+ // Simplified implementation - would need custom tracking in production
296
+ return 0;
297
+ }
298
+
299
+ async getQueueStats(queueName: string): Promise<QueueStats> {
300
+ const length = await this.redis.llen(this.key("queue", queueName));
301
+ const delayedLength = await this.redis.zcard(
302
+ this.key("delayed", queueName)
303
+ );
304
+ return {
305
+ waiting: length,
306
+ active: 0,
307
+ completed: 0,
308
+ failed: 0,
309
+ delayed: delayedLength,
310
+ paused: 0,
311
+ };
312
+ }
313
+
314
+ async getStreamInfo(streamName: string): Promise<StreamInfo> {
315
+ return {
316
+ name: streamName,
317
+ length: 0,
318
+ groups: 0,
319
+ };
320
+ }
321
+
322
+ async cleanup(): Promise<void> {
323
+ for (const subs of this.subscriptions.values()) {
324
+ for (const sub of subs) {
325
+ await sub.quit();
326
+ }
327
+ }
328
+ this.subscriptions.clear();
329
+ await this.redis.quit();
330
+ }
331
+ }
@@ -0,0 +1,70 @@
1
+ import { describe, it, expect, beforeEach, afterEach } from 'vitest';
2
+ import { createFlow } from './flow-fn.js';
3
+
4
+ describe('flowfn', () => {
5
+ let flow;
6
+
7
+ beforeEach(() => {
8
+ flow = createFlow({ adapter: 'memory' });
9
+ });
10
+
11
+ afterEach(async () => {
12
+ await flow.close();
13
+ });
14
+
15
+ it('should process jobs in a queue', async () => {
16
+ const queue = flow.queue('test-queue');
17
+
18
+ // Add job
19
+ await queue.add('job1', { foo: 'bar' });
20
+
21
+ const stats = await queue.getJobCounts();
22
+ expect(stats.waiting).toBe(1);
23
+
24
+ // Process
25
+ await new Promise<void>(resolve => {
26
+ queue.process(async (job) => {
27
+ expect(job.data.foo).toBe('bar');
28
+ resolve();
29
+ return { processed: true };
30
+ });
31
+ });
32
+ });
33
+
34
+ it('should stream messages', async () => {
35
+ const stream = flow.stream('test-stream');
36
+ const received: any[] = [];
37
+
38
+ await stream.subscribe(async (msg) => {
39
+ received.push(msg.data);
40
+ });
41
+
42
+ await stream.publish({ event: 'test' });
43
+
44
+ // Wait for event loop
45
+ await new Promise(r => setTimeout(r, 50));
46
+
47
+ expect(received).toHaveLength(1);
48
+ expect(received[0]).toEqual({ event: 'test' });
49
+ });
50
+
51
+ it('should execute a simple workflow', async () => {
52
+ const workflow = flow.workflow('test-workflow')
53
+ .step('step1', async (ctx) => {
54
+ await ctx.sleep(100); // Wait so we can catch it running
55
+ ctx.set('step1', 'done');
56
+ return 'result1';
57
+ })
58
+ .build();
59
+
60
+ const execution = await workflow.execute({ input: 'test' });
61
+
62
+ expect(execution.status).toBe('running');
63
+
64
+ // Wait for execution
65
+ await new Promise(r => setTimeout(r, 150));
66
+
67
+ const status = await workflow.getExecution(execution.id);
68
+ expect(status.status).toBe('completed');
69
+ });
70
+ });