@dojocho/effect-ts 0.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (149) hide show
  1. package/DOJO.md +22 -0
  2. package/dojo.json +50 -0
  3. package/katas/001-hello-effect/SENSEI.md +72 -0
  4. package/katas/001-hello-effect/solution.test.ts +35 -0
  5. package/katas/001-hello-effect/solution.ts +16 -0
  6. package/katas/002-transform-with-map/SENSEI.md +72 -0
  7. package/katas/002-transform-with-map/solution.test.ts +33 -0
  8. package/katas/002-transform-with-map/solution.ts +16 -0
  9. package/katas/003-generator-pipelines/SENSEI.md +72 -0
  10. package/katas/003-generator-pipelines/solution.test.ts +40 -0
  11. package/katas/003-generator-pipelines/solution.ts +29 -0
  12. package/katas/004-flatmap-and-chaining/SENSEI.md +80 -0
  13. package/katas/004-flatmap-and-chaining/solution.test.ts +34 -0
  14. package/katas/004-flatmap-and-chaining/solution.ts +18 -0
  15. package/katas/005-pipe-composition/SENSEI.md +81 -0
  16. package/katas/005-pipe-composition/solution.test.ts +41 -0
  17. package/katas/005-pipe-composition/solution.ts +19 -0
  18. package/katas/006-handle-errors/SENSEI.md +86 -0
  19. package/katas/006-handle-errors/solution.test.ts +53 -0
  20. package/katas/006-handle-errors/solution.ts +30 -0
  21. package/katas/007-tagged-errors/SENSEI.md +79 -0
  22. package/katas/007-tagged-errors/solution.test.ts +82 -0
  23. package/katas/007-tagged-errors/solution.ts +37 -0
  24. package/katas/008-error-patterns/SENSEI.md +89 -0
  25. package/katas/008-error-patterns/solution.test.ts +41 -0
  26. package/katas/008-error-patterns/solution.ts +38 -0
  27. package/katas/009-option-type/SENSEI.md +96 -0
  28. package/katas/009-option-type/solution.test.ts +49 -0
  29. package/katas/009-option-type/solution.ts +26 -0
  30. package/katas/010-either-and-exit/SENSEI.md +86 -0
  31. package/katas/010-either-and-exit/solution.test.ts +33 -0
  32. package/katas/010-either-and-exit/solution.ts +17 -0
  33. package/katas/011-services-and-context/SENSEI.md +82 -0
  34. package/katas/011-services-and-context/solution.test.ts +23 -0
  35. package/katas/011-services-and-context/solution.ts +17 -0
  36. package/katas/012-layers/SENSEI.md +73 -0
  37. package/katas/012-layers/solution.test.ts +23 -0
  38. package/katas/012-layers/solution.ts +26 -0
  39. package/katas/013-testing-effects/SENSEI.md +88 -0
  40. package/katas/013-testing-effects/solution.test.ts +41 -0
  41. package/katas/013-testing-effects/solution.ts +20 -0
  42. package/katas/014-schema-basics/SENSEI.md +81 -0
  43. package/katas/014-schema-basics/solution.test.ts +35 -0
  44. package/katas/014-schema-basics/solution.ts +25 -0
  45. package/katas/015-domain-modeling/SENSEI.md +85 -0
  46. package/katas/015-domain-modeling/solution.test.ts +46 -0
  47. package/katas/015-domain-modeling/solution.ts +42 -0
  48. package/katas/016-retry-and-schedule/SENSEI.md +72 -0
  49. package/katas/016-retry-and-schedule/solution.test.ts +26 -0
  50. package/katas/016-retry-and-schedule/solution.ts +23 -0
  51. package/katas/017-parallel-effects/SENSEI.md +70 -0
  52. package/katas/017-parallel-effects/solution.test.ts +33 -0
  53. package/katas/017-parallel-effects/solution.ts +17 -0
  54. package/katas/018-race-and-timeout/SENSEI.md +75 -0
  55. package/katas/018-race-and-timeout/solution.test.ts +30 -0
  56. package/katas/018-race-and-timeout/solution.ts +27 -0
  57. package/katas/019-ref-and-state/SENSEI.md +72 -0
  58. package/katas/019-ref-and-state/solution.test.ts +29 -0
  59. package/katas/019-ref-and-state/solution.ts +16 -0
  60. package/katas/020-fibers/SENSEI.md +80 -0
  61. package/katas/020-fibers/solution.test.ts +23 -0
  62. package/katas/020-fibers/solution.ts +23 -0
  63. package/katas/021-acquire-release/SENSEI.md +57 -0
  64. package/katas/021-acquire-release/solution.test.ts +23 -0
  65. package/katas/021-acquire-release/solution.ts +22 -0
  66. package/katas/022-scoped-layers/SENSEI.md +52 -0
  67. package/katas/022-scoped-layers/solution.test.ts +35 -0
  68. package/katas/022-scoped-layers/solution.ts +19 -0
  69. package/katas/023-resource-patterns/SENSEI.md +52 -0
  70. package/katas/023-resource-patterns/solution.test.ts +20 -0
  71. package/katas/023-resource-patterns/solution.ts +13 -0
  72. package/katas/024-streams-basics/SENSEI.md +61 -0
  73. package/katas/024-streams-basics/solution.test.ts +30 -0
  74. package/katas/024-streams-basics/solution.ts +16 -0
  75. package/katas/025-stream-operations/SENSEI.md +59 -0
  76. package/katas/025-stream-operations/solution.test.ts +26 -0
  77. package/katas/025-stream-operations/solution.ts +17 -0
  78. package/katas/026-combining-streams/SENSEI.md +54 -0
  79. package/katas/026-combining-streams/solution.test.ts +20 -0
  80. package/katas/026-combining-streams/solution.ts +16 -0
  81. package/katas/027-data-pipelines/SENSEI.md +58 -0
  82. package/katas/027-data-pipelines/solution.test.ts +22 -0
  83. package/katas/027-data-pipelines/solution.ts +16 -0
  84. package/katas/028-logging-and-spans/SENSEI.md +58 -0
  85. package/katas/028-logging-and-spans/solution.test.ts +50 -0
  86. package/katas/028-logging-and-spans/solution.ts +20 -0
  87. package/katas/029-http-client/SENSEI.md +59 -0
  88. package/katas/029-http-client/solution.test.ts +49 -0
  89. package/katas/029-http-client/solution.ts +24 -0
  90. package/katas/030-capstone/SENSEI.md +63 -0
  91. package/katas/030-capstone/solution.test.ts +67 -0
  92. package/katas/030-capstone/solution.ts +55 -0
  93. package/katas/031-config-and-environment/SENSEI.md +77 -0
  94. package/katas/031-config-and-environment/solution.test.ts +38 -0
  95. package/katas/031-config-and-environment/solution.ts +11 -0
  96. package/katas/032-cause-and-defects/SENSEI.md +90 -0
  97. package/katas/032-cause-and-defects/solution.test.ts +50 -0
  98. package/katas/032-cause-and-defects/solution.ts +23 -0
  99. package/katas/033-pattern-matching/SENSEI.md +86 -0
  100. package/katas/033-pattern-matching/solution.test.ts +36 -0
  101. package/katas/033-pattern-matching/solution.ts +28 -0
  102. package/katas/034-deferred-and-coordination/SENSEI.md +85 -0
  103. package/katas/034-deferred-and-coordination/solution.test.ts +25 -0
  104. package/katas/034-deferred-and-coordination/solution.ts +24 -0
  105. package/katas/035-queue-and-backpressure/SENSEI.md +100 -0
  106. package/katas/035-queue-and-backpressure/solution.test.ts +25 -0
  107. package/katas/035-queue-and-backpressure/solution.ts +21 -0
  108. package/katas/036-schema-advanced/SENSEI.md +81 -0
  109. package/katas/036-schema-advanced/solution.test.ts +55 -0
  110. package/katas/036-schema-advanced/solution.ts +19 -0
  111. package/katas/037-cache-and-memoization/SENSEI.md +73 -0
  112. package/katas/037-cache-and-memoization/solution.test.ts +47 -0
  113. package/katas/037-cache-and-memoization/solution.ts +24 -0
  114. package/katas/038-metrics/SENSEI.md +91 -0
  115. package/katas/038-metrics/solution.test.ts +39 -0
  116. package/katas/038-metrics/solution.ts +23 -0
  117. package/katas/039-managed-runtime/SENSEI.md +75 -0
  118. package/katas/039-managed-runtime/solution.test.ts +29 -0
  119. package/katas/039-managed-runtime/solution.ts +19 -0
  120. package/katas/040-request-batching/SENSEI.md +87 -0
  121. package/katas/040-request-batching/solution.test.ts +56 -0
  122. package/katas/040-request-batching/solution.ts +32 -0
  123. package/package.json +22 -0
  124. package/skills/effect-patterns-building-apis/SKILL.md +2393 -0
  125. package/skills/effect-patterns-building-data-pipelines/SKILL.md +1876 -0
  126. package/skills/effect-patterns-concurrency/SKILL.md +2999 -0
  127. package/skills/effect-patterns-concurrency-getting-started/SKILL.md +351 -0
  128. package/skills/effect-patterns-core-concepts/SKILL.md +3199 -0
  129. package/skills/effect-patterns-domain-modeling/SKILL.md +1385 -0
  130. package/skills/effect-patterns-error-handling/SKILL.md +1212 -0
  131. package/skills/effect-patterns-error-handling-resilience/SKILL.md +179 -0
  132. package/skills/effect-patterns-error-management/SKILL.md +1668 -0
  133. package/skills/effect-patterns-getting-started/SKILL.md +237 -0
  134. package/skills/effect-patterns-making-http-requests/SKILL.md +1756 -0
  135. package/skills/effect-patterns-observability/SKILL.md +1586 -0
  136. package/skills/effect-patterns-platform/SKILL.md +1195 -0
  137. package/skills/effect-patterns-platform-getting-started/SKILL.md +179 -0
  138. package/skills/effect-patterns-project-setup--execution/SKILL.md +233 -0
  139. package/skills/effect-patterns-resource-management/SKILL.md +827 -0
  140. package/skills/effect-patterns-scheduling/SKILL.md +451 -0
  141. package/skills/effect-patterns-scheduling-periodic-tasks/SKILL.md +763 -0
  142. package/skills/effect-patterns-streams/SKILL.md +2052 -0
  143. package/skills/effect-patterns-streams-getting-started/SKILL.md +421 -0
  144. package/skills/effect-patterns-streams-sinks/SKILL.md +1181 -0
  145. package/skills/effect-patterns-testing/SKILL.md +1632 -0
  146. package/skills/effect-patterns-tooling-and-debugging/SKILL.md +1125 -0
  147. package/skills/effect-patterns-value-handling/SKILL.md +676 -0
  148. package/tsconfig.json +20 -0
  149. package/vitest.config.ts +3 -0
@@ -0,0 +1,1181 @@
1
+ ---
2
+ name: effect-patterns-streams-sinks
3
+ description: Effect-TS patterns for Streams Sinks. Use when working with streams sinks in Effect-TS applications.
4
+ ---
5
+ # Effect-TS Patterns: Streams Sinks
6
+ This skill provides 6 curated Effect-TS patterns for streams sinks.
7
+ Use this skill when working on tasks related to:
8
+ - streams sinks
9
+ - Best practices in Effect-TS applications
10
+ - Real-world patterns and solutions
11
+
12
+ ---
13
+
14
+ ## 🟡 Intermediate Patterns
15
+
16
+ ### Sink Pattern 1: Batch Insert Stream Records into Database
17
+
18
+ **Rule:** Batch stream records before database operations to improve throughput and reduce transaction overhead.
19
+
20
+ **Good Example:**
21
+
22
+ This example demonstrates streaming user records from a paginated API and batching them for efficient database insertion.
23
+
24
+ ```typescript
25
+ import { Effect, Stream, Sink, Chunk } from "effect";
26
+
27
+ interface User {
28
+ readonly id: number;
29
+ readonly name: string;
30
+ readonly email: string;
31
+ }
32
+
33
+ interface PaginatedResponse {
34
+ readonly users: User[];
35
+ readonly nextPage: number | null;
36
+ }
37
+
38
+ // Mock API that returns paginated users
39
+ const fetchUserPage = (
40
+ page: number
41
+ ): Effect.Effect<PaginatedResponse> =>
42
+ Effect.succeed(
43
+ page < 10
44
+ ? {
45
+ users: Array.from({ length: 50 }, (_, i) => ({
46
+ id: page * 50 + i,
47
+ name: `User ${page * 50 + i}`,
48
+ email: `user${page * 50 + i}@example.com`,
49
+ })),
50
+ nextPage: page + 1,
51
+ }
52
+ : { users: [], nextPage: null }
53
+ ).pipe(Effect.delay("10 millis"));
54
+
55
+ // Mock database insert that takes a batch of users
56
+ const insertUserBatch = (
57
+ users: readonly User[]
58
+ ): Effect.Effect<number> =>
59
+ Effect.sync(() => {
60
+ console.log(`Inserting batch of ${users.length} users`);
61
+ return users.length;
62
+ }).pipe(Effect.delay("50 millis"));
63
+
64
+ // Create a stream of users from paginated API
65
+ const userStream: Stream.Stream<User> = Stream.paginateEffect(
66
+ 0,
67
+ (page) =>
68
+ fetchUserPage(page).pipe(
69
+ Effect.map((response) => [
70
+ Chunk.fromIterable(response.users),
71
+ response.nextPage !== null ? Option.some(response.nextPage) : Option.none(),
72
+ ])
73
+ )
74
+ );
75
+
76
+ // Sink that batches users and inserts them
77
+ const batchInsertSink: Sink.Sink<number, never, User> = Sink.fold(
78
+ 0,
79
+ (count, chunk: Chunk.Chunk<User>) =>
80
+ Effect.gen(function* () {
81
+ const users = Chunk.toArray(chunk);
82
+ const inserted = yield* insertUserBatch(users);
83
+ return count + inserted;
84
+ }),
85
+ (count) => Effect.succeed(count)
86
+ ).pipe(
87
+ // Batch into groups of 100 users
88
+ Sink.withChunking((chunk) =>
89
+ chunk.pipe(
90
+ Chunk.chunksOf(100),
91
+ Stream.fromIterable,
92
+ Stream.runCollect
93
+ )
94
+ )
95
+ );
96
+
97
+ // Run the stream with batching sink
98
+ const program = Effect.gen(function* () {
99
+ const totalInserted = yield* userStream.pipe(
100
+ Stream.run(batchInsertSink)
101
+ );
102
+ console.log(`Total users inserted: ${totalInserted}`);
103
+ });
104
+
105
+ Effect.runPromise(program);
106
+ ```
107
+
108
+ This pattern:
109
+
110
+ 1. **Creates a stream** of users from a paginated API
111
+ 2. **Defines a batching sink** that collects users into groups of 100
112
+ 3. **Inserts each batch** to the database in a single operation
113
+ 4. **Tracks total count** of inserted records
114
+
115
+ The batching happens automatically—the sink collects elements until the batch size is reached, then processes the complete batch.
116
+
117
+ ---
118
+
119
+ **Rationale:**
120
+
121
+ When consuming a stream of records to persist in a database, collect them into batches using `Sink` before inserting. This reduces the number of database round-trips and transaction overhead, improving overall throughput significantly.
122
+
123
+ ---
124
+
125
+
126
+ Inserting records one-by-one is inefficient:
127
+
128
+ - Each insert is a separate database call (network latency, connection overhead)
129
+ - Each insert may be a separate transaction (ACID overhead)
130
+ - Resource contention and connection pool exhaustion at scale
131
+
132
+ Batching solves this by:
133
+
134
+ - Grouping N records into a single bulk insert operation
135
+ - Amortizing database overhead across multiple records
136
+ - Maintaining throughput even under backpressure
137
+ - Enabling efficient transaction semantics for the entire batch
138
+
139
+ For example, inserting 10,000 records one-by-one might take 100 seconds. Batching in groups of 100 might take just 2-3 seconds.
140
+
141
+ ---
142
+
143
+ ---
144
+
145
+ ### Sink Pattern 2: Write Stream Events to Event Log
146
+
147
+ **Rule:** Append stream events to an event log with metadata to maintain a complete, ordered record of what happened.
148
+
149
+ **Good Example:**
150
+
151
+ This example demonstrates an event sourcing pattern where a user account stream of events is appended to an event log with metadata.
152
+
153
+ ```typescript
154
+ import { Effect, Stream, Sink, DateTime, Data } from "effect";
155
+
156
+ // Event types
157
+ type AccountEvent =
158
+ | AccountCreated
159
+ | MoneyDeposited
160
+ | MoneyWithdrawn
161
+ | AccountClosed;
162
+
163
+ class AccountCreated extends Data.TaggedError("AccountCreated")<{
164
+ readonly accountId: string;
165
+ readonly owner: string;
166
+ readonly initialBalance: number;
167
+ }> {}
168
+
169
+ class MoneyDeposited extends Data.TaggedError("MoneyDeposited")<{
170
+ readonly accountId: string;
171
+ readonly amount: number;
172
+ }> {}
173
+
174
+ class MoneyWithdrawn extends Data.TaggedError("MoneyWithdrawn")<{
175
+ readonly accountId: string;
176
+ readonly amount: number;
177
+ }> {}
178
+
179
+ class AccountClosed extends Data.TaggedError("AccountClosed")<{
180
+ readonly accountId: string;
181
+ }> {}
182
+
183
+ // Event envelope with metadata
184
+ interface StoredEvent {
185
+ readonly eventId: string; // Unique identifier per event
186
+ readonly eventType: string; // Type of event
187
+ readonly aggregateId: string; // What this event is about
188
+ readonly aggregateType: string; // What kind of thing (Account)
189
+ readonly data: any; // Event payload
190
+ readonly metadata: {
191
+ readonly timestamp: number;
192
+ readonly version: number; // Position in log
193
+ readonly causationId?: string; // What caused this
194
+ };
195
+ }
196
+
197
+ // Mock event log that appends events
198
+ const eventLog: StoredEvent[] = [];
199
+ let eventVersion = 0;
200
+
201
+ const appendToEventLog = (
202
+ event: AccountEvent,
203
+ aggregateId: string
204
+ ): Effect.Effect<StoredEvent> =>
205
+ Effect.gen(function* () {
206
+ const now = yield* DateTime.now;
207
+ const storedEvent: StoredEvent = {
208
+ eventId: `evt-${eventVersion}-${Date.now()}`,
209
+ eventType: event._tag,
210
+ aggregateId,
211
+ aggregateType: "Account",
212
+ data: event,
213
+ metadata: {
214
+ timestamp: now.toEpochMillis(),
215
+ version: ++eventVersion,
216
+ },
217
+ };
218
+
219
+ // Append to log (simulated)
220
+ eventLog.push(storedEvent);
221
+ console.log(
222
+ `[v${storedEvent.metadata.version}] ${storedEvent.eventType}: ${aggregateId}`
223
+ );
224
+
225
+ return storedEvent;
226
+ });
227
+
228
+ // Simulate a stream of events from various account operations
229
+ const accountEvents: Stream.Stream<[string, AccountEvent]> = Stream.fromIterable([
230
+ [
231
+ "acc-1",
232
+ new AccountCreated({
233
+ accountId: "acc-1",
234
+ owner: "Alice",
235
+ initialBalance: 1000,
236
+ }),
237
+ ],
238
+ ["acc-1", new MoneyDeposited({ accountId: "acc-1", amount: 500 })],
239
+ ["acc-1", new MoneyWithdrawn({ accountId: "acc-1", amount: 200 })],
240
+ [
241
+ "acc-2",
242
+ new AccountCreated({
243
+ accountId: "acc-2",
244
+ owner: "Bob",
245
+ initialBalance: 2000,
246
+ }),
247
+ ],
248
+ ["acc-2", new MoneyDeposited({ accountId: "acc-2", amount: 1000 })],
249
+ ["acc-1", new AccountClosed({ accountId: "acc-1" })],
250
+ ]);
251
+
252
+ // Sink that appends each event to the log
253
+ const eventLogSink: Sink.Sink<number, never, [string, AccountEvent]> = Sink.fold(
254
+ 0,
255
+ (count, [aggregateId, event]) =>
256
+ appendToEventLog(event, aggregateId).pipe(
257
+ Effect.map(() => count + 1)
258
+ ),
259
+ (count) => Effect.succeed(count)
260
+ );
261
+
262
+ // Run the stream and append all events
263
+ const program = Effect.gen(function* () {
264
+ const totalEvents = yield* accountEvents.pipe(Stream.run(eventLogSink));
265
+
266
+ console.log(`\nTotal events appended: ${totalEvents}`);
267
+ console.log(`\nEvent log contents:`);
268
+ eventLog.forEach((event) => {
269
+ console.log(` [v${event.metadata.version}] ${event.eventType}`);
270
+ });
271
+ });
272
+
273
+ Effect.runPromise(program);
274
+ ```
275
+
276
+ This pattern:
277
+
278
+ 1. **Defines event types** using tagged errors (AccountCreated, MoneyDeposited, etc.)
279
+ 2. **Creates event envelopes** with metadata (timestamp, version, causation)
280
+ 3. **Streams events** from various sources
281
+ 4. **Appends to log** with proper versioning and ordering
282
+ 5. **Maintains history** for reconstruction and audit
283
+
284
+ ---
285
+
286
+ **Rationale:**
287
+
288
+ When consuming a stream of events that represent changes in your system, append each event to an event log using `Sink`. Event logs provide immutable, ordered records that enable event sourcing, audit trails, and temporal queries.
289
+
290
+ ---
291
+
292
+
293
+ Event logs are foundational to many patterns:
294
+
295
+ - **Event Sourcing**: Instead of storing current state, store the sequence of events that led to it
296
+ - **Audit Trails**: Complete, tamper-proof record of who did what and when
297
+ - **Temporal Queries**: Reconstruct state at any point in time
298
+ - **Consistency**: Single source of truth for what happened
299
+ - **Replay**: Rebuild state or test changes by replaying events
300
+
301
+ Unlike batch inserts which are transactional, event logs are append-only. Each event is immutable once written. This simplicity enables:
302
+
303
+ - Fast appends (no updates, just sequential writes)
304
+ - Natural ordering (events in write order)
305
+ - Easy distribution (replicate the log)
306
+ - Strong consistency (events are facts that don't change)
307
+
308
+ ---
309
+
310
+ ---
311
+
312
+ ### Sink Pattern 4: Send Stream Records to Message Queue
313
+
314
+ **Rule:** Stream records to message queues with proper batching and acknowledgment for reliable distributed data flow.
315
+
316
+ **Good Example:**
317
+
318
+ This example demonstrates streaming sensor readings and publishing them to a message queue with topic-based partitioning.
319
+
320
+ ```typescript
321
+ import { Effect, Stream, Sink, Chunk } from "effect";
322
+
323
+ interface SensorReading {
324
+ readonly sensorId: string;
325
+ readonly location: string;
326
+ readonly temperature: number;
327
+ readonly humidity: number;
328
+ readonly timestamp: number;
329
+ }
330
+
331
+ // Mock message queue publisher
332
+ interface QueuePublisher {
333
+ readonly publish: (
334
+ topic: string,
335
+ partition: string,
336
+ messages: readonly SensorReading[]
337
+ ) => Effect.Effect<{ acknowledged: number; messageIds: string[] }>;
338
+ }
339
+
340
+ // Create a mock queue publisher
341
+ const createMockPublisher = (): QueuePublisher => {
342
+ const publishedMessages: Record<string, SensorReading[]> = {};
343
+
344
+ return {
345
+ publish: (topic, partition, messages) =>
346
+ Effect.gen(function* () {
347
+ const key = `${topic}/${partition}`;
348
+ publishedMessages[key] = [
349
+ ...(publishedMessages[key] ?? []),
350
+ ...messages,
351
+ ];
352
+
353
+ const messageIds = Array.from({ length: messages.length }, (_, i) =>
354
+ `msg-${Date.now()}-${i}`
355
+ );
356
+
357
+ console.log(
358
+ `Published ${messages.length} messages to ${key} (batch)`
359
+ );
360
+
361
+ return { acknowledged: messages.length, messageIds };
362
+ }),
363
+ };
364
+ };
365
+
366
+ // Determine the partition key based on sensor location
367
+ const getPartitionKey = (reading: SensorReading): string =>
368
+ reading.location; // Route by location for data locality
369
+
370
+ // Simulate a stream of sensor readings
371
+ const sensorStream: Stream.Stream<SensorReading> = Stream.fromIterable([
372
+ {
373
+ sensorId: "temp-1",
374
+ location: "warehouse-a",
375
+ temperature: 22.5,
376
+ humidity: 45,
377
+ timestamp: Date.now(),
378
+ },
379
+ {
380
+ sensorId: "temp-2",
381
+ location: "warehouse-b",
382
+ temperature: 21.0,
383
+ humidity: 50,
384
+ timestamp: Date.now() + 100,
385
+ },
386
+ {
387
+ sensorId: "temp-3",
388
+ location: "warehouse-a",
389
+ temperature: 22.8,
390
+ humidity: 46,
391
+ timestamp: Date.now() + 200,
392
+ },
393
+ {
394
+ sensorId: "temp-4",
395
+ location: "warehouse-c",
396
+ temperature: 20.5,
397
+ humidity: 55,
398
+ timestamp: Date.now() + 300,
399
+ },
400
+ {
401
+ sensorId: "temp-5",
402
+ location: "warehouse-b",
403
+ temperature: 21.2,
404
+ humidity: 51,
405
+ timestamp: Date.now() + 400,
406
+ },
407
+ {
408
+ sensorId: "temp-6",
409
+ location: "warehouse-a",
410
+ temperature: 23.0,
411
+ humidity: 47,
412
+ timestamp: Date.now() + 500,
413
+ },
414
+ ]);
415
+
416
+ // Create a sink that batches and publishes to message queue
417
+ const createQueuePublishSink = (
418
+ publisher: QueuePublisher,
419
+ topic: string,
420
+ batchSize: number = 100
421
+ ): Sink.Sink<number, Error, SensorReading> =>
422
+ Sink.fold(
423
+ { batches: new Map<string, SensorReading[]>(), totalPublished: 0 },
424
+ (state, reading) =>
425
+ Effect.gen(function* () {
426
+ const partition = getPartitionKey(reading);
427
+ const batch = state.batches.get(partition) ?? [];
428
+ const newBatch = [...batch, reading];
429
+
430
+ if (newBatch.length >= batchSize) {
431
+ // Batch is full, publish it
432
+ const result = yield* publisher.publish(topic, partition, newBatch);
433
+ const newState = new Map(state.batches);
434
+ newState.delete(partition);
435
+
436
+ return {
437
+ ...state,
438
+ batches: newState,
439
+ totalPublished: state.totalPublished + result.acknowledged,
440
+ };
441
+ } else {
442
+ // Add to batch and continue
443
+ const newState = new Map(state.batches);
444
+ newState.set(partition, newBatch);
445
+
446
+ return { ...state, batches: newState };
447
+ }
448
+ }),
449
+ (state) =>
450
+ Effect.gen(function* () {
451
+ let finalCount = state.totalPublished;
452
+
453
+ // Publish any remaining partial batches
454
+ for (const [partition, batch] of state.batches) {
455
+ if (batch.length > 0) {
456
+ const result = yield* publisher.publish(topic, partition, batch);
457
+ finalCount += result.acknowledged;
458
+ }
459
+ }
460
+
461
+ return finalCount;
462
+ })
463
+ );
464
+
465
+ // Run the stream and publish to queue
466
+ const program = Effect.gen(function* () {
467
+ const publisher = createMockPublisher();
468
+ const topic = "sensor-readings";
469
+
470
+ const published = yield* sensorStream.pipe(
471
+ Stream.run(createQueuePublishSink(publisher, topic, 50)) // Batch size of 50
472
+ );
473
+
474
+ console.log(
475
+ `\nTotal messages published to queue: ${published}`
476
+ );
477
+ });
478
+
479
+ Effect.runPromise(program);
480
+ ```
481
+
482
+ This pattern:
483
+
484
+ 1. **Groups readings by partition** (location) for data locality
485
+ 2. **Batches records** before publishing (50 at a time)
486
+ 3. **Publishes batches** to the queue with partition key
487
+ 4. **Flushes partial batches** when stream ends
488
+ 5. **Tracks acknowledgments** from the queue
489
+
490
+ ---
491
+
492
+ **Rationale:**
493
+
494
+ When consuming a stream of events that need to be distributed to other systems, use `Sink` to publish them to a message queue. Message queues provide reliable, scalable distribution with guarantees like ordering and exactly-once semantics.
495
+
496
+ ---
497
+
498
+
499
+ Message queues are the backbone of event-driven architectures:
500
+
501
+ - **Decoupling**: Producers don't wait for consumers
502
+ - **Scalability**: Multiple subscribers can consume independently
503
+ - **Durability**: Messages persist even if subscribers are down
504
+ - **Ordering**: Maintain event sequence (per partition/topic)
505
+ - **Reliability**: Acknowledgments and retries ensure no message loss
506
+
507
+ Unlike direct writes which block, queue publishing is asynchronous and enables:
508
+
509
+ - High-throughput publishing (batch multiple records per operation)
510
+ - Backpressure handling (queue manages flow)
511
+ - Multi-subscriber patterns (fan-out)
512
+ - Dead letter queues for error handling
513
+
514
+ ---
515
+
516
+ ---
517
+
518
+ ### Sink Pattern 5: Fall Back to Alternative Sink on Failure
519
+
520
+ **Rule:** Implement fallback sinks to handle failures gracefully and ensure data is persisted even when the primary destination is unavailable.
521
+
522
+ **Good Example:**
523
+
524
+ This example demonstrates a system that tries to write order records to a fast in-memory cache first, falls back to database if cache fails, and falls back to a dead letter file if database fails.
525
+
526
+ ```typescript
527
+ import { Effect, Stream, Sink, Chunk, Either, Data } from "effect";
528
+
529
+ interface Order {
530
+ readonly orderId: string;
531
+ readonly customerId: string;
532
+ readonly total: number;
533
+ readonly timestamp: number;
534
+ }
535
+
536
+ class CacheSinkError extends Data.TaggedError("CacheSinkError")<{
537
+ readonly reason: string;
538
+ }> {}
539
+
540
+ class DatabaseSinkError extends Data.TaggedError("DatabaseSinkError")<{
541
+ readonly reason: string;
542
+ }> {}
543
+
544
+ // Mock in-memory cache sink (fast but limited)
545
+ const createCacheSink = (): Sink.Sink<number, CacheSinkError, Order> => {
546
+ const cache: Order[] = [];
547
+ const MAX_CACHE_SIZE = 1000;
548
+
549
+ return Sink.fold(
550
+ 0,
551
+ (count, order) =>
552
+ Effect.gen(function* () {
553
+ if (cache.length >= MAX_CACHE_SIZE) {
554
+ yield* Effect.fail(
555
+ new CacheSinkError({
556
+ reason: `Cache full (${cache.length}/${MAX_CACHE_SIZE})`,
557
+ })
558
+ );
559
+ }
560
+
561
+ cache.push(order);
562
+ console.log(`[CACHE] Cached order ${order.orderId}`);
563
+ return count + 1;
564
+ }),
565
+ (count) =>
566
+ Effect.gen(function* () {
567
+ console.log(`[CACHE] Final: ${count} orders in cache`);
568
+ return count;
569
+ })
570
+ );
571
+ };
572
+
573
+ // Mock database sink (slower but reliable)
574
+ const createDatabaseSink = (): Sink.Sink<number, DatabaseSinkError, Order> => {
575
+ const orders: Order[] = [];
576
+
577
+ return Sink.fold(
578
+ 0,
579
+ (count, order) =>
580
+ Effect.gen(function* () {
581
+ // Simulate occasional database failures
582
+ if (Math.random() < 0.1) {
583
+ yield* Effect.fail(
584
+ new DatabaseSinkError({
585
+ reason: "Connection timeout",
586
+ })
587
+ );
588
+ }
589
+
590
+ orders.push(order);
591
+ console.log(`[DATABASE] Persisted order ${order.orderId}`);
592
+ return count + 1;
593
+ }),
594
+ (count) =>
595
+ Effect.gen(function* () {
596
+ console.log(`[DATABASE] Final: ${count} orders in database`);
597
+ return count;
598
+ })
599
+ );
600
+ };
601
+
602
+ // Mock file sink (always works but slow)
603
+ const createDeadLetterSink = (): Sink.Sink<number, never, Order> => {
604
+ const deadLetters: Order[] = [];
605
+
606
+ return Sink.fold(
607
+ 0,
608
+ (count, order) =>
609
+ Effect.gen(function* () {
610
+ deadLetters.push(order);
611
+ console.log(
612
+ `[DEAD-LETTER] Wrote order ${order.orderId} to dead letter file`
613
+ );
614
+ return count + 1;
615
+ }),
616
+ (count) =>
617
+ Effect.gen(function* () {
618
+ console.log(
619
+ `[DEAD-LETTER] Final: ${count} orders in dead letter file`
620
+ );
621
+ return count;
622
+ })
623
+ );
624
+ };
625
+
626
+ // Create a fallback sink that tries cache -> database -> file
627
+ const createFallbackSink = (): Sink.Sink<
628
+ { readonly cached: number; readonly persisted: number; readonly deadLetters: number },
629
+ never,
630
+ Order
631
+ > =>
632
+ Sink.fold(
633
+ { cached: 0, persisted: 0, deadLetters: 0 },
634
+ (state, order) =>
635
+ Effect.gen(function* () {
636
+ // Try cache first
637
+ const cacheResult = yield* createCacheSink()
638
+ .pipe(Sink.feed(Chunk.of(order)))
639
+ .pipe(Effect.either);
640
+
641
+ if (Either.isRight(cacheResult)) {
642
+ return {
643
+ ...state,
644
+ cached: state.cached + cacheResult.right,
645
+ };
646
+ }
647
+
648
+ console.log(
649
+ `[FALLBACK] Cache failed (${cacheResult.left.reason}), trying database`
650
+ );
651
+
652
+ // Cache failed, try database
653
+ const dbResult = yield* createDatabaseSink()
654
+ .pipe(Sink.feed(Chunk.of(order)))
655
+ .pipe(Effect.either);
656
+
657
+ if (Either.isRight(dbResult)) {
658
+ return {
659
+ ...state,
660
+ persisted: state.persisted + dbResult.right,
661
+ };
662
+ }
663
+
664
+ console.log(
665
+ `[FALLBACK] Database failed (${dbResult.left.reason}), falling back to dead letter`
666
+ );
667
+
668
+ // Database failed, use dead letter
669
+ const dlResult = yield* createDeadLetterSink()
670
+ .pipe(Sink.feed(Chunk.of(order)));
671
+
672
+ return {
673
+ ...state,
674
+ deadLetters: state.deadLetters + dlResult,
675
+ };
676
+ }),
677
+ (state) =>
678
+ Effect.gen(function* () {
679
+ console.log(`\n[SUMMARY]`);
680
+ console.log(` Cached: ${state.cached}`);
681
+ console.log(` Persisted: ${state.persisted}`);
682
+ console.log(` Dead Letter: ${state.deadLetters}`);
683
+ return state;
684
+ })
685
+ );
686
+
687
+ // Simulate a stream of orders
688
+ const orderStream: Stream.Stream<Order> = Stream.fromIterable([
689
+ {
690
+ orderId: "order-1",
691
+ customerId: "cust-1",
692
+ total: 99.99,
693
+ timestamp: Date.now(),
694
+ },
695
+ {
696
+ orderId: "order-2",
697
+ customerId: "cust-2",
698
+ total: 149.99,
699
+ timestamp: Date.now() + 100,
700
+ },
701
+ {
702
+ orderId: "order-3",
703
+ customerId: "cust-1",
704
+ total: 49.99,
705
+ timestamp: Date.now() + 200,
706
+ },
707
+ {
708
+ orderId: "order-4",
709
+ customerId: "cust-3",
710
+ total: 199.99,
711
+ timestamp: Date.now() + 300,
712
+ },
713
+ {
714
+ orderId: "order-5",
715
+ customerId: "cust-2",
716
+ total: 89.99,
717
+ timestamp: Date.now() + 400,
718
+ },
719
+ ]);
720
+
721
+ // Run the stream with fallback sink
722
+ const program = Effect.gen(function* () {
723
+ const result = yield* orderStream.pipe(Stream.run(createFallbackSink()));
724
+ console.log(`\nTotal orders processed: ${result.cached + result.persisted + result.deadLetters}`);
725
+ });
726
+
727
+ Effect.runPromise(program);
728
+ ```
729
+
730
+ This pattern:
731
+
732
+ 1. **Tries cache first** (fast, limited capacity)
733
+ 2. **Falls back to database** if cache is full
734
+ 3. **Falls back to dead letter** if database fails
735
+ 4. **Tracks which sink** was used for each record
736
+ 5. **Reports summary** of where data went
737
+
738
+ ---
739
+
740
+ **Rationale:**
741
+
742
+ When consuming a stream to a primary destination that might fail, wrap it in a fallback pattern. If the primary sink fails, automatically redirect the stream to an alternative sink. This enables progressive degradation where the system degrades gracefully rather than failing completely.
743
+
744
+ ---
745
+
746
+
747
+ Production systems need resilience:
748
+
749
+ - **Primary failures**: Database down, network timeout, quota exceeded
750
+ - **Progressive degradation**: Keep the system running, even at reduced capacity
751
+ - **No data loss**: Fallback ensures data is persisted somewhere
752
+ - **Operational flexibility**: Choose fallback based on failure type
753
+ - **Monitoring**: Track when fallbacks are used to alert operators
754
+
755
+ Without fallback patterns:
756
+
757
+ - System fails when primary destination fails
758
+ - Data is lost if primary is unavailable
759
+ - No clear signal that degradation occurred
760
+
761
+ With fallback sinks:
762
+
763
+ - Stream continues even when primary fails
764
+ - Data is safely persisted to alternative
765
+ - Clear audit trail of which sink was used
766
+
767
+ ---
768
+
769
+ ---
770
+
771
+ ### Sink Pattern 6: Retry Failed Stream Operations
772
+
773
+ **Rule:** Implement retry strategies in sinks to handle transient failures and improve resilience without manual intervention.
774
+
775
+ **Good Example:**
776
+
777
+ This example demonstrates retrying database writes with exponential backoff, tracking attempts, and falling back on permanent failures.
778
+
779
+ ```typescript
780
+ import { Effect, Stream, Sink, Chunk, Duration, Schedule } from "effect";
781
+
782
+ interface UserRecord {
783
+ readonly userId: string;
784
+ readonly name: string;
785
+ readonly email: string;
786
+ }
787
+
788
+ class WriteError extends Error {
789
+ readonly isTransient: boolean;
790
+
791
+ constructor(message: string, isTransient: boolean = true) {
792
+ super(message);
793
+ this.name = "WriteError";
794
+ this.isTransient = isTransient;
795
+ }
796
+ }
797
+
798
+ // Mock database that occasionally fails
799
+ const database = {
800
+ failureRate: 0.3, // 30% transient failure rate
801
+ permanentFailureRate: 0.05, // 5% permanent failure rate
802
+
803
+ insertUser: (user: UserRecord): Effect.Effect<void, WriteError> =>
804
+ Effect.gen(function* () {
805
+ const rand = Math.random();
806
+
807
+ // Permanent failure (e.g., constraint violation)
808
+ if (rand < database.permanentFailureRate) {
809
+ throw new WriteError(
810
+ `Permanent: User ${user.userId} already exists`,
811
+ false
812
+ );
813
+ }
814
+
815
+ // Transient failure (e.g., connection timeout)
816
+ if (rand < database.permanentFailureRate + database.failureRate) {
817
+ throw new WriteError(
818
+ `Transient: Connection timeout writing ${user.userId}`,
819
+ true
820
+ );
821
+ }
822
+
823
+ // Success
824
+ console.log(`✓ Wrote user ${user.userId}`);
825
+ }),
826
+ };
827
+
828
+ // Retry configuration
829
+ interface RetryConfig {
830
+ readonly maxAttempts: number;
831
+ readonly initialDelayMs: number;
832
+ readonly maxDelayMs: number;
833
+ readonly backoffFactor: number;
834
+ }
835
+
836
+ const defaultRetryConfig: RetryConfig = {
837
+ maxAttempts: 5,
838
+ initialDelayMs: 100, // Start with 100ms
839
+ maxDelayMs: 5000, // Cap at 5 seconds
840
+ backoffFactor: 2, // Double each time
841
+ };
842
+
843
+ // Result tracking
844
+ interface OperationResult {
845
+ readonly succeeded: number;
846
+ readonly transientFailures: number;
847
+ readonly permanentFailures: number;
848
+ readonly detailedStats: Array<{
849
+ readonly userId: string;
850
+ readonly attempts: number;
851
+ readonly status: "success" | "transient-failed" | "permanent-failed";
852
+ }>;
853
+ }
854
+
855
+ // Create a sink with retry logic
856
+ const createRetrySink = (config: RetryConfig): Sink.Sink<OperationResult, never, UserRecord> =>
857
+ Sink.fold(
858
+ {
859
+ succeeded: 0,
860
+ transientFailures: 0,
861
+ permanentFailures: 0,
862
+ detailedStats: [],
863
+ },
864
+ (state, user) =>
865
+ Effect.gen(function* () {
866
+ let lastError: WriteError | null = null;
867
+ let attempts = 0;
868
+
869
+ // Retry loop
870
+ for (attempts = 1; attempts <= config.maxAttempts; attempts++) {
871
+ try {
872
+ yield* database.insertUser(user);
873
+
874
+ // Success!
875
+ console.log(
876
+ `[${user.userId}] Success on attempt ${attempts}/${config.maxAttempts}`
877
+ );
878
+
879
+ return {
880
+ ...state,
881
+ succeeded: state.succeeded + 1,
882
+ detailedStats: [
883
+ ...state.detailedStats,
884
+ {
885
+ userId: user.userId,
886
+ attempts,
887
+ status: "success",
888
+ },
889
+ ],
890
+ };
891
+ } catch (error) {
892
+ lastError = error as WriteError;
893
+
894
+ if (!lastError.isTransient) {
895
+ // Permanent failure, don't retry
896
+ console.log(
897
+ `[${user.userId}] Permanent failure: ${lastError.message}`
898
+ );
899
+
900
+ return {
901
+ ...state,
902
+ permanentFailures: state.permanentFailures + 1,
903
+ detailedStats: [
904
+ ...state.detailedStats,
905
+ {
906
+ userId: user.userId,
907
+ attempts,
908
+ status: "permanent-failed",
909
+ },
910
+ ],
911
+ };
912
+ }
913
+
914
+ // Transient failure, retry if attempts remain
915
+ if (attempts < config.maxAttempts) {
916
+ // Calculate delay with exponential backoff
917
+ let delayMs = config.initialDelayMs * Math.pow(config.backoffFactor, attempts - 1);
918
+ delayMs = Math.min(delayMs, config.maxDelayMs);
919
+
920
+ // Add jitter (±10%)
921
+ const jitter = delayMs * 0.1;
922
+ delayMs = delayMs + (Math.random() - 0.5) * 2 * jitter;
923
+
924
+ console.log(
925
+ `[${user.userId}] Transient failure (attempt ${attempts}/${config.maxAttempts}): ${lastError.message}`
926
+ );
927
+ console.log(` Retrying in ${Math.round(delayMs)}ms...`);
928
+
929
+ yield* Effect.sleep(Duration.millis(Math.round(delayMs)));
930
+ }
931
+ }
932
+ }
933
+
934
+ // All retries exhausted
935
+ console.log(
936
+ `[${user.userId}] Failed after ${config.maxAttempts} attempts`
937
+ );
938
+
939
+ return {
940
+ ...state,
941
+ transientFailures: state.transientFailures + 1,
942
+ detailedStats: [
943
+ ...state.detailedStats,
944
+ {
945
+ userId: user.userId,
946
+ attempts: config.maxAttempts,
947
+ status: "transient-failed",
948
+ },
949
+ ],
950
+ };
951
+ }),
952
+ (state) =>
953
+ Effect.gen(function* () {
954
+ console.log(`\n[SUMMARY]`);
955
+ console.log(` Succeeded: ${state.succeeded}`);
956
+ console.log(` Transient Failures: ${state.transientFailures}`);
957
+ console.log(` Permanent Failures: ${state.permanentFailures}`);
958
+ console.log(` Total: ${state.detailedStats.length}`);
959
+
960
+ // Show detailed stats
961
+ const failed = state.detailedStats.filter((s) => s.status !== "success");
962
+ if (failed.length > 0) {
963
+ console.log(`\n[FAILURES]`);
964
+ failed.forEach((stat) => {
965
+ console.log(
966
+ ` ${stat.userId}: ${stat.attempts} attempts (${stat.status})`
967
+ );
968
+ });
969
+ }
970
+
971
+ return state;
972
+ })
973
+ );
974
+
975
+ // Simulate a stream of users to insert
976
+ const userStream: Stream.Stream<UserRecord> = Stream.fromIterable([
977
+ { userId: "user-1", name: "Alice", email: "alice@example.com" },
978
+ { userId: "user-2", name: "Bob", email: "bob@example.com" },
979
+ { userId: "user-3", name: "Charlie", email: "charlie@example.com" },
980
+ { userId: "user-4", name: "Diana", email: "diana@example.com" },
981
+ { userId: "user-5", name: "Eve", email: "eve@example.com" },
982
+ ]);
983
+
984
+ // Run the stream with retry sink
985
+ const program = Effect.gen(function* () {
986
+ const result = yield* userStream.pipe(Stream.run(createRetrySink(defaultRetryConfig)));
987
+ console.log(`\nProcessing complete.`);
988
+ });
989
+
990
+ Effect.runPromise(program);
991
+ ```
992
+
993
+ This pattern:
994
+
995
+ 1. **Attempts operation** up to max retries
996
+ 2. **Distinguishes transient vs. permanent failures**
997
+ 3. **Uses exponential backoff** to space retries
998
+ 4. **Adds jitter** to prevent thundering herd
999
+ 5. **Tracks detailed stats** for monitoring
1000
+ 6. **Reports summary** of outcomes
1001
+
1002
+ ---
1003
+
1004
+ **Rationale:**
1005
+
1006
+ When consuming a stream to a destination that may experience transient failures (network timeouts, rate limiting, temporary unavailability), wrap the sink operation with a retry policy. Use exponential backoff to avoid overwhelming a recovering system while still recovering quickly.
1007
+
1008
+ ---
1009
+
1010
+
1011
+ Transient failures are common in distributed systems:
1012
+
1013
+ - **Network timeouts**: Temporary connectivity issues resolve themselves
1014
+ - **Rate limiting**: Service recovers once rate limit window resets
1015
+ - **Temporary unavailability**: Services restart or scale up
1016
+ - **Circuit breaker trips**: Service recovers after backoff period
1017
+
1018
+ Without retry logic:
1019
+
1020
+ - Every transient failure causes data loss or stream interruption
1021
+ - Manual intervention required to restart
1022
+ - System appears less reliable than it actually is
1023
+
1024
+ With intelligent retry logic:
1025
+
1026
+ - Automatic recovery from transient failures
1027
+ - Exponential backoff prevents thundering herd
1028
+ - Clear visibility into which operations failed permanently
1029
+ - Data flows continuously despite temporary issues
1030
+
1031
+ ---
1032
+
1033
+ ---
1034
+
1035
+ ### Sink Pattern 3: Write Stream Lines to File
1036
+
1037
+ **Rule:** Write streaming lines to a file efficiently using buffered output and proper resource management.
1038
+
1039
+ **Good Example:**
1040
+
1041
+ This example demonstrates streaming log entries and writing them to a file with buffering.
1042
+
1043
+ ```typescript
1044
+ import { Effect, Stream, Sink, Chunk, FileSystem } from "effect";
1045
+
1046
+ interface LogEntry {
1047
+ readonly level: "debug" | "info" | "warn" | "error";
1048
+ readonly message: string;
1049
+ readonly timestamp: number;
1050
+ }
1051
+
1052
+ // Format a log entry as a line
1053
+ const formatLogLine = (entry: LogEntry): string => {
1054
+ const iso = new Date(entry.timestamp).toISOString();
1055
+ return `[${iso}] ${entry.level.toUpperCase()}: ${entry.message}`;
1056
+ };
1057
+
1058
+ // Simulate a stream of log entries
1059
+ const logStream: Stream.Stream<LogEntry> = Stream.fromIterable([
1060
+ { level: "info", message: "Server starting", timestamp: Date.now() },
1061
+ { level: "debug", message: "Loading config", timestamp: Date.now() + 100 },
1062
+ { level: "info", message: "Connected to database", timestamp: Date.now() + 200 },
1063
+ { level: "warn", message: "High memory usage detected", timestamp: Date.now() + 300 },
1064
+ { level: "info", message: "Processing request", timestamp: Date.now() + 400 },
1065
+ { level: "error", message: "Connection timeout", timestamp: Date.now() + 500 },
1066
+ { level: "info", message: "Retrying connection", timestamp: Date.now() + 600 },
1067
+ { level: "info", message: "Connection restored", timestamp: Date.now() + 700 },
1068
+ ]);
1069
+
1070
+ // Create a file writer sink with buffering
1071
+ const createFileWriteSink = (
1072
+ filePath: string,
1073
+ bufferSize: number = 100
1074
+ ): Sink.Sink<number, Error, string> =>
1075
+ Effect.scoped(
1076
+ Effect.gen(function* () {
1077
+ // Open file in append mode
1078
+ const fs = yield* FileSystem.FileSystem;
1079
+ const handle = yield* fs.open(filePath, "a");
1080
+
1081
+ let buffer: string[] = [];
1082
+ let lineCount = 0;
1083
+
1084
+ // Flush buffered lines to disk
1085
+ const flush = Effect.gen(function* () {
1086
+ if (buffer.length === 0) return;
1087
+
1088
+ const content = buffer.join("\n") + "\n";
1089
+ yield* fs.write(handle, content);
1090
+ buffer = [];
1091
+ });
1092
+
1093
+ // Return the sink
1094
+ return Sink.fold(
1095
+ 0,
1096
+ (count, line: string) =>
1097
+ Effect.gen(function* () {
1098
+ buffer.push(line);
1099
+ const newCount = count + 1;
1100
+
1101
+ // Flush when buffer reaches size limit
1102
+ if (buffer.length >= bufferSize) {
1103
+ yield* flush;
1104
+ }
1105
+
1106
+ return newCount;
1107
+ }),
1108
+ (count) =>
1109
+ Effect.gen(function* () {
1110
+ // Flush any remaining lines before closing
1111
+ yield* flush;
1112
+ yield* fs.close(handle);
1113
+ return count;
1114
+ })
1115
+ );
1116
+ })
1117
+ ).pipe(
1118
+ Effect.flatten
1119
+ );
1120
+
1121
+ // Process the log stream
1122
+ const program = Effect.gen(function* () {
1123
+ const fs = yield* FileSystem.FileSystem;
1124
+ const filePath = "/tmp/app.log";
1125
+
1126
+ // Clear the file first
1127
+ yield* fs.writeFileString(filePath, "");
1128
+
1129
+ // Stream logs, format them, and write to file
1130
+ const written = yield* logStream.pipe(
1131
+ Stream.map(formatLogLine),
1132
+ Stream.run(createFileWriteSink(filePath, 50)) // Buffer 50 lines before flush
1133
+ );
1134
+
1135
+ console.log(`Wrote ${written} log lines to ${filePath}`);
1136
+
1137
+ // Read back the file to verify
1138
+ const content = yield* fs.readFileString(filePath);
1139
+ console.log("\nFile contents:");
1140
+ console.log(content);
1141
+ });
1142
+
1143
+ Effect.runPromise(program);
1144
+ ```
1145
+
1146
+ This pattern:
1147
+
1148
+ 1. **Opens a file** for appending
1149
+ 2. **Buffers log lines** in memory (50 lines before flush)
1150
+ 3. **Flushes periodically** when buffer fills or stream ends
1151
+ 4. **Closes the file** safely using scopes
1152
+ 5. **Tracks line count** for confirmation
1153
+
1154
+ ---
1155
+
1156
+ **Rationale:**
1157
+
1158
+ When consuming a stream of data to persist as lines in a file, use `Sink` with a file writer. Buffer the output for efficiency and ensure proper resource cleanup using Effect's scope management.
1159
+
1160
+ ---
1161
+
1162
+
1163
+ Writing stream data to files requires:
1164
+
1165
+ - **Buffering**: Writing one line at a time is slow. Buffer multiple lines before flushing to disk
1166
+ - **Efficiency**: Reduce system calls and I/O overhead by batching writes
1167
+ - **Resource Management**: Ensure file handles are properly closed even on errors
1168
+ - **Ordering**: Maintain the order of lines as they appear in the stream
1169
+
1170
+ This pattern is essential for:
1171
+
1172
+ - Log files and audit trails
1173
+ - CSV/JSON Line export
1174
+ - Streaming data archival
1175
+ - Data pipelines with file intermediates
1176
+
1177
+ ---
1178
+
1179
+ ---
1180
+
1181
+