@dojocho/effect-ts 0.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (149) hide show
  1. package/DOJO.md +22 -0
  2. package/dojo.json +50 -0
  3. package/katas/001-hello-effect/SENSEI.md +72 -0
  4. package/katas/001-hello-effect/solution.test.ts +35 -0
  5. package/katas/001-hello-effect/solution.ts +16 -0
  6. package/katas/002-transform-with-map/SENSEI.md +72 -0
  7. package/katas/002-transform-with-map/solution.test.ts +33 -0
  8. package/katas/002-transform-with-map/solution.ts +16 -0
  9. package/katas/003-generator-pipelines/SENSEI.md +72 -0
  10. package/katas/003-generator-pipelines/solution.test.ts +40 -0
  11. package/katas/003-generator-pipelines/solution.ts +29 -0
  12. package/katas/004-flatmap-and-chaining/SENSEI.md +80 -0
  13. package/katas/004-flatmap-and-chaining/solution.test.ts +34 -0
  14. package/katas/004-flatmap-and-chaining/solution.ts +18 -0
  15. package/katas/005-pipe-composition/SENSEI.md +81 -0
  16. package/katas/005-pipe-composition/solution.test.ts +41 -0
  17. package/katas/005-pipe-composition/solution.ts +19 -0
  18. package/katas/006-handle-errors/SENSEI.md +86 -0
  19. package/katas/006-handle-errors/solution.test.ts +53 -0
  20. package/katas/006-handle-errors/solution.ts +30 -0
  21. package/katas/007-tagged-errors/SENSEI.md +79 -0
  22. package/katas/007-tagged-errors/solution.test.ts +82 -0
  23. package/katas/007-tagged-errors/solution.ts +37 -0
  24. package/katas/008-error-patterns/SENSEI.md +89 -0
  25. package/katas/008-error-patterns/solution.test.ts +41 -0
  26. package/katas/008-error-patterns/solution.ts +38 -0
  27. package/katas/009-option-type/SENSEI.md +96 -0
  28. package/katas/009-option-type/solution.test.ts +49 -0
  29. package/katas/009-option-type/solution.ts +26 -0
  30. package/katas/010-either-and-exit/SENSEI.md +86 -0
  31. package/katas/010-either-and-exit/solution.test.ts +33 -0
  32. package/katas/010-either-and-exit/solution.ts +17 -0
  33. package/katas/011-services-and-context/SENSEI.md +82 -0
  34. package/katas/011-services-and-context/solution.test.ts +23 -0
  35. package/katas/011-services-and-context/solution.ts +17 -0
  36. package/katas/012-layers/SENSEI.md +73 -0
  37. package/katas/012-layers/solution.test.ts +23 -0
  38. package/katas/012-layers/solution.ts +26 -0
  39. package/katas/013-testing-effects/SENSEI.md +88 -0
  40. package/katas/013-testing-effects/solution.test.ts +41 -0
  41. package/katas/013-testing-effects/solution.ts +20 -0
  42. package/katas/014-schema-basics/SENSEI.md +81 -0
  43. package/katas/014-schema-basics/solution.test.ts +35 -0
  44. package/katas/014-schema-basics/solution.ts +25 -0
  45. package/katas/015-domain-modeling/SENSEI.md +85 -0
  46. package/katas/015-domain-modeling/solution.test.ts +46 -0
  47. package/katas/015-domain-modeling/solution.ts +42 -0
  48. package/katas/016-retry-and-schedule/SENSEI.md +72 -0
  49. package/katas/016-retry-and-schedule/solution.test.ts +26 -0
  50. package/katas/016-retry-and-schedule/solution.ts +23 -0
  51. package/katas/017-parallel-effects/SENSEI.md +70 -0
  52. package/katas/017-parallel-effects/solution.test.ts +33 -0
  53. package/katas/017-parallel-effects/solution.ts +17 -0
  54. package/katas/018-race-and-timeout/SENSEI.md +75 -0
  55. package/katas/018-race-and-timeout/solution.test.ts +30 -0
  56. package/katas/018-race-and-timeout/solution.ts +27 -0
  57. package/katas/019-ref-and-state/SENSEI.md +72 -0
  58. package/katas/019-ref-and-state/solution.test.ts +29 -0
  59. package/katas/019-ref-and-state/solution.ts +16 -0
  60. package/katas/020-fibers/SENSEI.md +80 -0
  61. package/katas/020-fibers/solution.test.ts +23 -0
  62. package/katas/020-fibers/solution.ts +23 -0
  63. package/katas/021-acquire-release/SENSEI.md +57 -0
  64. package/katas/021-acquire-release/solution.test.ts +23 -0
  65. package/katas/021-acquire-release/solution.ts +22 -0
  66. package/katas/022-scoped-layers/SENSEI.md +52 -0
  67. package/katas/022-scoped-layers/solution.test.ts +35 -0
  68. package/katas/022-scoped-layers/solution.ts +19 -0
  69. package/katas/023-resource-patterns/SENSEI.md +52 -0
  70. package/katas/023-resource-patterns/solution.test.ts +20 -0
  71. package/katas/023-resource-patterns/solution.ts +13 -0
  72. package/katas/024-streams-basics/SENSEI.md +61 -0
  73. package/katas/024-streams-basics/solution.test.ts +30 -0
  74. package/katas/024-streams-basics/solution.ts +16 -0
  75. package/katas/025-stream-operations/SENSEI.md +59 -0
  76. package/katas/025-stream-operations/solution.test.ts +26 -0
  77. package/katas/025-stream-operations/solution.ts +17 -0
  78. package/katas/026-combining-streams/SENSEI.md +54 -0
  79. package/katas/026-combining-streams/solution.test.ts +20 -0
  80. package/katas/026-combining-streams/solution.ts +16 -0
  81. package/katas/027-data-pipelines/SENSEI.md +58 -0
  82. package/katas/027-data-pipelines/solution.test.ts +22 -0
  83. package/katas/027-data-pipelines/solution.ts +16 -0
  84. package/katas/028-logging-and-spans/SENSEI.md +58 -0
  85. package/katas/028-logging-and-spans/solution.test.ts +50 -0
  86. package/katas/028-logging-and-spans/solution.ts +20 -0
  87. package/katas/029-http-client/SENSEI.md +59 -0
  88. package/katas/029-http-client/solution.test.ts +49 -0
  89. package/katas/029-http-client/solution.ts +24 -0
  90. package/katas/030-capstone/SENSEI.md +63 -0
  91. package/katas/030-capstone/solution.test.ts +67 -0
  92. package/katas/030-capstone/solution.ts +55 -0
  93. package/katas/031-config-and-environment/SENSEI.md +77 -0
  94. package/katas/031-config-and-environment/solution.test.ts +38 -0
  95. package/katas/031-config-and-environment/solution.ts +11 -0
  96. package/katas/032-cause-and-defects/SENSEI.md +90 -0
  97. package/katas/032-cause-and-defects/solution.test.ts +50 -0
  98. package/katas/032-cause-and-defects/solution.ts +23 -0
  99. package/katas/033-pattern-matching/SENSEI.md +86 -0
  100. package/katas/033-pattern-matching/solution.test.ts +36 -0
  101. package/katas/033-pattern-matching/solution.ts +28 -0
  102. package/katas/034-deferred-and-coordination/SENSEI.md +85 -0
  103. package/katas/034-deferred-and-coordination/solution.test.ts +25 -0
  104. package/katas/034-deferred-and-coordination/solution.ts +24 -0
  105. package/katas/035-queue-and-backpressure/SENSEI.md +100 -0
  106. package/katas/035-queue-and-backpressure/solution.test.ts +25 -0
  107. package/katas/035-queue-and-backpressure/solution.ts +21 -0
  108. package/katas/036-schema-advanced/SENSEI.md +81 -0
  109. package/katas/036-schema-advanced/solution.test.ts +55 -0
  110. package/katas/036-schema-advanced/solution.ts +19 -0
  111. package/katas/037-cache-and-memoization/SENSEI.md +73 -0
  112. package/katas/037-cache-and-memoization/solution.test.ts +47 -0
  113. package/katas/037-cache-and-memoization/solution.ts +24 -0
  114. package/katas/038-metrics/SENSEI.md +91 -0
  115. package/katas/038-metrics/solution.test.ts +39 -0
  116. package/katas/038-metrics/solution.ts +23 -0
  117. package/katas/039-managed-runtime/SENSEI.md +75 -0
  118. package/katas/039-managed-runtime/solution.test.ts +29 -0
  119. package/katas/039-managed-runtime/solution.ts +19 -0
  120. package/katas/040-request-batching/SENSEI.md +87 -0
  121. package/katas/040-request-batching/solution.test.ts +56 -0
  122. package/katas/040-request-batching/solution.ts +32 -0
  123. package/package.json +22 -0
  124. package/skills/effect-patterns-building-apis/SKILL.md +2393 -0
  125. package/skills/effect-patterns-building-data-pipelines/SKILL.md +1876 -0
  126. package/skills/effect-patterns-concurrency/SKILL.md +2999 -0
  127. package/skills/effect-patterns-concurrency-getting-started/SKILL.md +351 -0
  128. package/skills/effect-patterns-core-concepts/SKILL.md +3199 -0
  129. package/skills/effect-patterns-domain-modeling/SKILL.md +1385 -0
  130. package/skills/effect-patterns-error-handling/SKILL.md +1212 -0
  131. package/skills/effect-patterns-error-handling-resilience/SKILL.md +179 -0
  132. package/skills/effect-patterns-error-management/SKILL.md +1668 -0
  133. package/skills/effect-patterns-getting-started/SKILL.md +237 -0
  134. package/skills/effect-patterns-making-http-requests/SKILL.md +1756 -0
  135. package/skills/effect-patterns-observability/SKILL.md +1586 -0
  136. package/skills/effect-patterns-platform/SKILL.md +1195 -0
  137. package/skills/effect-patterns-platform-getting-started/SKILL.md +179 -0
  138. package/skills/effect-patterns-project-setup--execution/SKILL.md +233 -0
  139. package/skills/effect-patterns-resource-management/SKILL.md +827 -0
  140. package/skills/effect-patterns-scheduling/SKILL.md +451 -0
  141. package/skills/effect-patterns-scheduling-periodic-tasks/SKILL.md +763 -0
  142. package/skills/effect-patterns-streams/SKILL.md +2052 -0
  143. package/skills/effect-patterns-streams-getting-started/SKILL.md +421 -0
  144. package/skills/effect-patterns-streams-sinks/SKILL.md +1181 -0
  145. package/skills/effect-patterns-testing/SKILL.md +1632 -0
  146. package/skills/effect-patterns-tooling-and-debugging/SKILL.md +1125 -0
  147. package/skills/effect-patterns-value-handling/SKILL.md +676 -0
  148. package/tsconfig.json +20 -0
  149. package/vitest.config.ts +3 -0
@@ -0,0 +1,52 @@
1
+ # SENSEI — 023 Resource Patterns
2
+
3
+ ## Briefing
4
+
5
+ ### Goal
6
+
7
+ Learn to use `Effect.ensuring` to add additional cleanup that always runs, verify that resources are released even when the use phase fails, and guarantee cleanup regardless of success or failure.
8
+
9
+ ### Tasks
10
+
11
+ 1. Implement `withEnsuring` -- use `Effect.acquireRelease` to create a resource and `Effect.ensuring` to add additional cleanup. Log `"acquire"`, `"ensure-cleanup"`, and `"release"` to the log array.
12
+ 2. Implement `releaseOnFailure` -- use `Effect.acquireRelease` where the "use" phase fails. Verify the resource is still released. Return `"released"` if cleanup happened.
13
+
14
+ ## Prerequisites
15
+
16
+ - **021 Acquire Release** — `Effect.acquireRelease`, `Effect.scoped`
17
+ - **022 Scoped Layers** — `Layer.scoped`, service lifetime
18
+
19
+ > **Note**: `Effect.runSync` appears only in tests. The student does NOT write it. Never attribute it to their learning.
20
+
21
+ ## Test Map
22
+
23
+ | Test | Concept | Verifies |
24
+ |------|---------|----------|
25
+ | `withEnsuring runs all cleanup` | `Effect.ensuring` + `Effect.acquireRelease` | Log contains `"acquire"`, `"ensure-cleanup"`, and `"release"` |
26
+ | `releaseOnFailure still releases on error` | `Effect.acquireRelease` + error recovery | Returns `"released"`; log contains `"release"` despite use-phase failure |
27
+
28
+ ## Teaching Approach
29
+
30
+ ### Socratic prompts
31
+
32
+ - "You know `acquireRelease` guarantees cleanup. What if you need *additional* cleanup that isn't tied to a specific resource — like flushing a metrics buffer?"
33
+ - "If the use phase fails, does the release still run? How would you verify that?"
34
+ - "For `releaseOnFailure`, the test expects the return value `'released'`. But if the use phase fails, how do you get a success value out?"
35
+
36
+ ### Common pitfalls
37
+
38
+ 1. **`ensuring` execution order** — `ensuring` runs after the whole effect completes, including after `acquireRelease`'s release. Students may expect it to run before release. Ask: "If you wrap an `acquireRelease` with `ensuring`, which cleanup runs first — the release or the ensuring finalizer?"
39
+ 2. **`releaseOnFailure` — catching after scoped** — the use phase fails inside the scope, but `acquireRelease` still releases. To return `"released"`, you need to catch the error *after* `Effect.scoped`. Students may try to catch inside the scope. Ask: "Where should the `catchAll` go — inside or outside the `Effect.scoped` call?"
40
+ 3. **Forgetting `Effect.scoped`** — same pitfall as kata 021. The scope boundary triggers the release. Without it, nothing gets cleaned up.
41
+ 4. **`withEnsuring` structure** — students need to combine `acquireRelease` (with its own release) *and* `ensuring` on the outer effect. The ensuring callback is separate from the acquireRelease release function.
42
+ 5. **`releaseOnFailure` catch placement** — create an `acquireRelease` where the use phase calls `Effect.fail`, wrap with `Effect.scoped`, then catch the error *outside* the scope and return `'released'`.
43
+
44
+ ## On Completion
45
+
46
+ ### Insight
47
+
48
+ `Effect.ensuring` adds a finalizer that runs regardless of outcome — it stacks with `acquireRelease`. For error recovery with resources, the key insight is that `acquireRelease` ALWAYS releases, even on failure. You can catch the use-phase failure after the scope closes and still verify cleanup happened. Together, these patterns give you complete control over resource lifecycles.
49
+
50
+ ### Bridge
51
+
52
+ Resource Management is now complete. You can acquire and release resources, tie them to service layers, add extra finalizers, and recover from failures — all with guaranteed cleanup. Next up: **Streams**. Kata 024 introduces `Stream`, `runCollect`, and `runFold` for processing lazy sequences of data.
@@ -0,0 +1,20 @@
1
+ import { Effect } from "effect";
2
+ import { describe, expect, it } from "vitest";
3
+ import { withEnsuring, releaseOnFailure } from "@/katas/023-resource-patterns/solution.js";
4
+
5
+ describe("023 — Resource Patterns", () => {
6
+ it("withEnsuring runs all cleanup", () => {
7
+ const log: string[] = [];
8
+ Effect.runSync(withEnsuring(log));
9
+ expect(log).toContain("acquire");
10
+ expect(log).toContain("ensure-cleanup");
11
+ expect(log).toContain("release");
12
+ });
13
+
14
+ it("releaseOnFailure still releases on error", () => {
15
+ const log: string[] = [];
16
+ const result = Effect.runSync(releaseOnFailure(log));
17
+ expect(result).toBe("released");
18
+ expect(log).toContain("release");
19
+ });
20
+ });
@@ -0,0 +1,13 @@
1
+ import { Effect } from "effect";
2
+
3
+ /** Use Effect.acquireRelease to create a resource, and Effect.ensuring
4
+ * to add additional cleanup. Log "acquire", "ensure-cleanup", "release" to the log array */
5
+ export const withEnsuring = (log: string[]): Effect.Effect<string> => {
6
+ throw new Error("Not implemented");
7
+ };
8
+
9
+ /** Use Effect.acquireRelease where the "use" phase fails.
10
+ * Verify the resource is still released. Return "released" if cleanup happened. */
11
+ export const releaseOnFailure = (log: string[]): Effect.Effect<string> => {
12
+ throw new Error("Not implemented");
13
+ };
@@ -0,0 +1,61 @@
1
+ # SENSEI — 024 Streams Basics
2
+
3
+ ## Briefing
4
+
5
+ ### Goal
6
+
7
+ Learn to use `Stream.fromIterable` to create streams, `Stream.map` and `Stream.filter` to transform them, `Stream.runCollect` to collect elements into a Chunk, and `Stream.runFold` to reduce a stream into a single value.
8
+
9
+ ### Tasks
10
+
11
+ 1. Implement `streamFromArray` -- create a Stream from an array and collect all elements back to an array.
12
+ 2. Implement `filterAndDouble` -- create a stream from `[1,2,3,4,5]`, filter even numbers, double them, and collect the results.
13
+ 3. Implement `sumStream` -- use `Stream.runFold` to sum all elements in a stream.
14
+
15
+ ## Prerequisites
16
+
17
+ - **003 Generator Pipelines** — `Effect.gen`, `yield*`
18
+ - **005 Pipe Composition** — `pipe`, function composition
19
+
20
+ ## Skills
21
+
22
+ Invoke `effect-patterns-streams-getting-started` before teaching this kata.
23
+
24
+ > **Note**: `Effect.runSync` appears only in tests. The student does NOT write it. Never attribute it to their learning.
25
+
26
+ ## Test Map
27
+
28
+ | Test | Concept | Verifies |
29
+ |------|---------|----------|
30
+ | `streamFromArray collects all elements` | `Stream.fromIterable` + `Stream.runCollect` | `[1, 2, 3]` round-trips through stream and back |
31
+ | `streamFromArray handles empty array` | `Stream.fromIterable` + `Stream.runCollect` | Edge case — empty input produces empty output |
32
+ | `filterAndDouble returns [4, 8]` | `Stream.filter` + `Stream.map` | Filters evens from `[1,2,3,4,5]`, doubles them |
33
+ | `sumStream sums all elements` | `Stream.runFold` | Folds `[1,2,3,4,5]` with addition to get `15` |
34
+ | `sumStream of empty is 0` | `Stream.runFold` | Edge case — empty stream with initial value `0` |
35
+
36
+ ## Teaching Approach
37
+
38
+ ### Socratic prompts
39
+
40
+ - "What happens to elements in a Stream that you never consume? How does laziness help when the data source is huge or infinite?"
41
+ - "If `runCollect` forces the entire stream into memory, when is that acceptable and when would you prefer `runFold`?"
42
+ - "After `runCollect`, you get a `Chunk`, not an array. Why do you think Effect uses its own collection type instead of plain arrays?"
43
+ - "What's the relationship between `Stream.runFold` and `Array.reduce`? Could you express `runCollect` in terms of `runFold`?"
44
+
45
+ ### Common pitfalls
46
+
47
+ 1. **`runCollect` returns a Chunk, not an array** — students must call `Chunk.toArray` to convert. Without it, the test comparison with `toEqual([...])` will fail. Ask: "What type does `runCollect` give you? How do you get a plain array from it?"
48
+ 2. **`filterAndDouble` source data** — the stream is created from `[1, 2, 3, 4, 5]`. Evens are 2 and 4. Doubled: `[4, 8]`. Students may filter odds instead. Ask: "Which numbers from 1 to 5 are even?"
49
+ 3. **`runFold` signature** — it takes an initial value, a combining function, and returns an Effect (not a Stream). Students may try to chain more stream operations after it. Ask: "Is `runFold` a transformation or a terminal operation?"
50
+ 4. **Forgetting that stream operations return new streams** — `Stream.filter` and `Stream.map` don't consume the stream. You need a terminal operation like `runCollect` or `runFold` to get a result. Ask: "What turns a stream description into an actual value?"
51
+ 5. **`runFold` is like `Array.reduce`** — for `sumStream`, use `runFold` with initial value `0` and a function `(acc, n) => acc + n`. Ask: "What should the sum be if the stream is empty?"
52
+
53
+ ## On Completion
54
+
55
+ ### Insight
56
+
57
+ Streams are lazy, pull-based sequences of values. Unlike arrays, they can represent infinite sequences and process elements one at a time without loading everything into memory. `runCollect` and `runFold` are terminal operations that "consume" the stream — until you call one, no elements are processed. This lazy evaluation model is what makes streams efficient for large or unbounded data.
58
+
59
+ ### Bridge
60
+
61
+ Now that you can create, transform, and consume streams, the next step is more powerful **stream operations**. Kata 025 introduces `Stream.take` (limiting elements), `Stream.scan` (running accumulations), and `Stream.grouped` (batching) — tools for building real data processing pipelines.
@@ -0,0 +1,30 @@
1
+ import { Effect } from "effect";
2
+ import { describe, expect, it } from "vitest";
3
+ import { streamFromArray, filterAndDouble, sumStream } from "@/katas/024-streams-basics/solution.js";
4
+
5
+ describe("024 — Streams Basics", () => {
6
+ it("streamFromArray collects all elements", () => {
7
+ const result = Effect.runSync(streamFromArray([1, 2, 3]));
8
+ expect(result).toEqual([1, 2, 3]);
9
+ });
10
+
11
+ it("streamFromArray handles empty array", () => {
12
+ const result = Effect.runSync(streamFromArray([]));
13
+ expect(result).toEqual([]);
14
+ });
15
+
16
+ it("filterAndDouble returns [4, 8]", () => {
17
+ const result = Effect.runSync(filterAndDouble());
18
+ expect(result).toEqual([4, 8]);
19
+ });
20
+
21
+ it("sumStream sums all elements", () => {
22
+ const result = Effect.runSync(sumStream([1, 2, 3, 4, 5]));
23
+ expect(result).toBe(15);
24
+ });
25
+
26
+ it("sumStream of empty is 0", () => {
27
+ const result = Effect.runSync(sumStream([]));
28
+ expect(result).toBe(0);
29
+ });
30
+ });
@@ -0,0 +1,16 @@
1
+ import { Effect, Stream, Chunk } from "effect";
2
+
3
+ /** Create a Stream from an array and collect all elements back to an array */
4
+ export const streamFromArray = (items: number[]): Effect.Effect<number[]> => {
5
+ throw new Error("Not implemented");
6
+ };
7
+
8
+ /** Create a stream from [1,2,3,4,5], filter even numbers, double them, collect */
9
+ export const filterAndDouble = (): Effect.Effect<number[]> => {
10
+ throw new Error("Not implemented");
11
+ };
12
+
13
+ /** Use Stream.runFold to sum all elements */
14
+ export const sumStream = (items: number[]): Effect.Effect<number> => {
15
+ throw new Error("Not implemented");
16
+ };
@@ -0,0 +1,59 @@
1
+ # SENSEI — 025 Stream Operations
2
+
3
+ ## Briefing
4
+
5
+ ### Goal
6
+
7
+ Learn to use `Stream.take` to take the first n elements from a stream, `Stream.scan` to compute running accumulations, and `Stream.grouped` to batch stream elements into fixed-size chunks.
8
+
9
+ ### Tasks
10
+
11
+ 1. Implement `takeFirst` -- take the first n elements from a stream of numbers 1, 2, 3, ...
12
+ 2. Implement `runningTotal` -- use `Stream.scan` to compute running totals of the input array (e.g., `[1,2,3]` becomes `[1,3,6]`).
13
+ 3. Implement `batchItems` -- use `Stream.grouped` to batch items into chunks of size n, returning an array of arrays.
14
+
15
+ ## Prerequisites
16
+
17
+ - **024 Streams Basics** — `Stream.fromIterable`, `Stream.filter`, `Stream.map`, `Stream.runCollect`, `Stream.runFold`
18
+
19
+ ## Skills
20
+
21
+ Invoke `effect-patterns-streams` before teaching this kata.
22
+
23
+ > **Note**: `Effect.runSync` appears only in tests. The student does NOT write it. Never attribute it to their learning.
24
+
25
+ ## Test Map
26
+
27
+ | Test | Concept | Verifies |
28
+ |------|---------|----------|
29
+ | `takeFirst(3) returns [1, 2, 3]` | `Stream.iterate` + `Stream.take` | Infinite stream limited to 3 elements |
30
+ | `takeFirst(0) returns []` | `Stream.take` | Edge case — taking zero elements |
31
+ | `runningTotal of [1,2,3] is [1,3,6]` | `Stream.scan` | Accumulating sums: 1, 1+2=3, 3+3=6 |
32
+ | `runningTotal of [] is []` | `Stream.scan` | Edge case — empty input |
33
+ | `batchItems groups into chunks` | `Stream.grouped` | `[1,2,3,4,5]` with size 2 produces `[[1,2],[3,4],[5]]` |
34
+
35
+ ## Teaching Approach
36
+
37
+ ### Socratic prompts
38
+
39
+ - "How does `scan` differ from `reduce` / `runFold`? When would you want *all* the intermediate results instead of just the final one?"
40
+ - "If you `take(3)` from an infinite stream, how does laziness prevent the stream from running forever?"
41
+ - "For batching, the last group might be smaller than the batch size. How does `grouped` handle that — does it drop it, pad it, or emit it as-is?"
42
+
43
+ ### Common pitfalls
44
+
45
+ 1. **`takeFirst` needs an infinite stream** — students must create a stream starting at 1 that counts upward. Use `Stream.iterate(1, n => n + 1)` or similar. Students may try to create a finite array first, which defeats the purpose. Ask: "Can you take 3 elements from an infinite stream without running out of memory? How?"
46
+ 2. **`scan` emits every intermediate value** — unlike `runFold` which produces one final result, `scan` emits each accumulation step. For `[1, 2, 3]` with addition, scan emits `[1, 3, 6]`, not just `6`. Students may confuse scan with fold. Ask: "How many elements does the output stream have compared to the input?"
47
+ 3. **`grouped` returns Chunk<Chunk<A>>** — after `runCollect`, you get a `Chunk` of `Chunk`s. You need to convert both the outer and inner chunks to arrays. Students may forget the inner conversion. Ask: "After `runCollect`, what's the type? How do you get `number[][]` from it?"
48
+ 4. **`scan` initial value and empty streams** — `scan` with an initial value of 0 would emit `[0, 1, 3, 6]` for input `[1, 2, 3]`. The test expects `[1, 3, 6]` — no leading zero. Students need to think about whether to use an initial value or start from the first element. Ask: "The test expects 3 elements out for 3 elements in. Does your scan add an extra element?"
49
+ 5. **`takeFirst` uses `Stream.iterate`** — create an infinite stream with `Stream.iterate(1, n => n + 1)`, then pipe through `Stream.take(n)` and collect. Don't pre-allocate a finite array.
50
+
51
+ ## On Completion
52
+
53
+ ### Insight
54
+
55
+ `Stream.take` on an infinite stream is safe — it only pulls n elements, then stops. `Stream.scan` is a stateful transformation that remembers the accumulator and emits each intermediate result. `Stream.grouped` batches elements without buffering the entire stream. These operations compose naturally — you build complex pipelines from simple parts, and laziness ensures only the necessary work is done.
56
+
57
+ ### Bridge
58
+
59
+ You can now create, transform, limit, accumulate, and batch streams. Kata 026 introduces **combining streams** — `merge`, `zip`, and `concat` — for building pipelines that draw from multiple data sources simultaneously.
@@ -0,0 +1,26 @@
1
+ import { Effect } from "effect";
2
+ import { describe, expect, it } from "vitest";
3
+ import { takeFirst, runningTotal, batchItems } from "@/katas/025-stream-operations/solution.js";
4
+
5
+ describe("025 — Stream Operations", () => {
6
+ it("takeFirst(3) returns [1, 2, 3]", () => {
7
+ expect(Effect.runSync(takeFirst(3))).toEqual([1, 2, 3]);
8
+ });
9
+
10
+ it("takeFirst(0) returns []", () => {
11
+ expect(Effect.runSync(takeFirst(0))).toEqual([]);
12
+ });
13
+
14
+ it("runningTotal of [1,2,3] is [1,3,6]", () => {
15
+ expect(Effect.runSync(runningTotal([1, 2, 3]))).toEqual([1, 3, 6]);
16
+ });
17
+
18
+ it("runningTotal of [] is []", () => {
19
+ expect(Effect.runSync(runningTotal([]))).toEqual([]);
20
+ });
21
+
22
+ it("batchItems groups into chunks", () => {
23
+ const result = Effect.runSync(batchItems([1, 2, 3, 4, 5], 2));
24
+ expect(result).toEqual([[1, 2], [3, 4], [5]]);
25
+ });
26
+ });
@@ -0,0 +1,17 @@
1
+ import { Effect, Stream } from "effect";
2
+
3
+ /** Take the first n elements from a stream of numbers 1,2,3,... */
4
+ export const takeFirst = (n: number): Effect.Effect<number[]> => {
5
+ throw new Error("Not implemented");
6
+ };
7
+
8
+ /** Use Stream.scan to compute running totals of the input array
9
+ * e.g., [1,2,3] -> [1,3,6] */
10
+ export const runningTotal = (items: number[]): Effect.Effect<number[]> => {
11
+ throw new Error("Not implemented");
12
+ };
13
+
14
+ /** Use Stream.grouped to batch items into chunks of size n, return as array of arrays */
15
+ export const batchItems = (items: number[], size: number): Effect.Effect<number[][]> => {
16
+ throw new Error("Not implemented");
17
+ };
@@ -0,0 +1,54 @@
1
+ # SENSEI — 026 Combining Streams
2
+
3
+ ## Briefing
4
+
5
+ ### Goal
6
+
7
+ Learn to use `Stream.concat` to concatenate two streams sequentially, `Stream.zip` to pair elements from two streams into tuples, and `Stream.merge` to interleave elements from two streams.
8
+
9
+ ### Tasks
10
+
11
+ 1. Implement `concatStreams` -- concatenate two streams sequentially.
12
+ 2. Implement `zipStreams` -- zip two streams into tuples.
13
+ 3. Implement `mergeStreams` -- merge two streams (interleaved, order may vary).
14
+
15
+ ## Prerequisites
16
+
17
+ - **024 Streams Basics** — `Stream`, `Stream.fromIterable`, `Stream.runCollect`
18
+ - **025 Stream Operations** — `Stream.map`, `Stream.filter`, `Stream.take`
19
+
20
+ > **Note**: `Effect.runSync` appears only in tests. Never attribute it to the user's learning.
21
+
22
+ ## Test Map
23
+
24
+ | Test | Concept | Verifies |
25
+ |------|---------|----------|
26
+ | `concatStreams appends second after first` | `Stream.concat` | `[1,2] ++ [3,4] = [1,2,3,4]` — sequential ordering |
27
+ | `zipStreams pairs elements` | `Stream.zip` | `[1,2] zip ["a","b"] = [[1,"a"],[2,"b"]]` — lock-step pairing |
28
+ | `mergeStreams contains all elements` | `Stream.merge` | `merge [1,2] [3,4]` sorted = `[1,2,3,4]` — all elements present regardless of order |
29
+
30
+ ## Teaching Approach
31
+
32
+ ### Socratic prompts
33
+
34
+ - "What ordering guarantees does `merge` give you vs `concat`? If you need results in a specific order, which do you pick?"
35
+ - "If one stream has 3 elements and the other has 5, what should `zip` produce? What about `concat`? What about `merge`? Think about the semantics of each combinator."
36
+ - "When would you choose `merge` (non-deterministic interleaving) over `concat` (sequential) in a real application?"
37
+
38
+ ### Common pitfalls
39
+
40
+ 1. **zip stops at the shorter stream** — `Stream.zip` produces pairs until one stream runs out. If the streams have different lengths, the extra elements from the longer one are dropped. Ask: "How many pairs do you get from zipping [1,2,3] with ['a','b']?"
41
+ 2. **merge order is non-deterministic** — `Stream.merge` interleaves elements as they become available. The test uses `.sort()` to verify contents regardless of order. Ask: "Can you guarantee which element comes first from a merge? Why does the test sort the result?"
42
+ 3. **Forgetting Chunk.toArray** — See kata 024 for `Chunk.toArray` -- same pattern applies here.
43
+ 4. **Confusing concat and merge** — `concat` is sequential (all of A, then all of B), while `merge` is concurrent (interleaved). Ask: "If stream A takes a long time, does `concat` start B before A finishes?"
44
+ 5. **All three end the same way** — every combinator function ends with `runCollect` then `Chunk.toArray`. Start with `concatStreams` (most intuitive), then apply the same pattern to `zip` and `merge`.
45
+
46
+ ## On Completion
47
+
48
+ ### Insight
49
+
50
+ `concat` is sequential (all of A then all of B), `zip` is lock-step (pairs up elements, stops at the shorter), `merge` is concurrent (interleaves as available). Each combinator models a different relationship between two data sources. Choosing the right one depends on whether your sources are independent and unrelated (concat), correlated by position (zip), or independent but concurrent (merge).
51
+
52
+ ### Bridge
53
+
54
+ Now that you can create, transform, and combine streams, kata 027 introduces **data pipelines** — using `Stream.mapEffect` to process each element through an effectful function, and `Stream.runFold` to reduce streams to a single value. This is the ETL (Extract, Transform, Load) pattern expressed as a stream pipeline.
@@ -0,0 +1,20 @@
1
+ import { Effect } from "effect";
2
+ import { describe, expect, it } from "vitest";
3
+ import { concatStreams, zipStreams, mergeStreams } from "@/katas/026-combining-streams/solution.js";
4
+
5
+ describe("026 — Combining Streams", () => {
6
+ it("concatStreams appends second after first", () => {
7
+ const result = Effect.runSync(concatStreams([1, 2], [3, 4]));
8
+ expect(result).toEqual([1, 2, 3, 4]);
9
+ });
10
+
11
+ it("zipStreams pairs elements", () => {
12
+ const result = Effect.runSync(zipStreams([1, 2], ["a", "b"]));
13
+ expect(result).toEqual([[1, "a"], [2, "b"]]);
14
+ });
15
+
16
+ it("mergeStreams contains all elements", () => {
17
+ const result = Effect.runSync(mergeStreams([1, 2], [3, 4]));
18
+ expect(result.sort()).toEqual([1, 2, 3, 4]);
19
+ });
20
+ });
@@ -0,0 +1,16 @@
1
+ import { Effect, Stream } from "effect";
2
+
3
+ /** Concatenate two streams sequentially */
4
+ export const concatStreams = (a: number[], b: number[]): Effect.Effect<number[]> => {
5
+ throw new Error("Not implemented");
6
+ };
7
+
8
+ /** Zip two streams into tuples */
9
+ export const zipStreams = (a: number[], b: string[]): Effect.Effect<[number, string][]> => {
10
+ throw new Error("Not implemented");
11
+ };
12
+
13
+ /** Merge two streams (interleaved, order may vary) */
14
+ export const mergeStreams = (a: number[], b: number[]): Effect.Effect<number[]> => {
15
+ throw new Error("Not implemented");
16
+ };
@@ -0,0 +1,58 @@
1
+ # SENSEI — 027 Data Pipelines
2
+
3
+ ## Briefing
4
+
5
+ ### Goal
6
+
7
+ Learn to use `Stream.mapEffect` to apply an effectful function to each stream element, and to build multi-step pipelines that compose stream transformations into data processing workflows.
8
+
9
+ ### Tasks
10
+
11
+ 1. Implement `processItems` -- use `Stream.mapEffect` to asynchronously process each item by applying `fn` to each element in the stream, then collect results.
12
+ 2. Implement `sumPositiveNumbers` -- build a pipeline that takes an array of strings, parses them to numbers (skipping non-numeric values), filters positives, and sums them.
13
+
14
+ ## Prerequisites
15
+
16
+ - **024 Streams Basics** — `Stream`, `Stream.fromIterable`, `Stream.runCollect`
17
+ - **025 Stream Operations** — `Stream.map`, `Stream.filter`, `Stream.take`
18
+ - **026 Combining Streams** — `Stream.concat`, `Stream.zip`, `Stream.merge`
19
+
20
+ ## Skills
21
+
22
+ Invoke `effect-patterns-building-data-pipelines` before teaching this kata.
23
+
24
+ > **Note**: `Effect.runSync` appears only in tests. Never attribute it to the user's learning.
25
+
26
+ ## Test Map
27
+
28
+ | Test | Concept | Verifies |
29
+ |------|---------|----------|
30
+ | `processItems applies function to each` | `Stream.mapEffect` + `Stream.runCollect` | `[1,2,3]` with `n => succeed(n*10)` = `[10,20,30]` |
31
+ | `sumPositiveNumbers parses, filters, and sums` | `Stream.map` + `Stream.filter` + `Stream.runFold` | `["1","abc","-2","3","4"]` -> skip non-numeric, filter positive, sum = 8 |
32
+ | `sumPositiveNumbers of empty is 0` | `Stream.runFold` | Empty stream fold returns initial value 0 |
33
+
34
+ ## Teaching Approach
35
+
36
+ ### Socratic prompts
37
+
38
+ - "If one step in your pipeline fails (e.g., a bad parse or a failed API call), what happens to the partially processed data? Does the entire pipeline abort?"
39
+ - "For `sumPositiveNumbers`, there are three stages: parse, filter, sum. What happens if you reorder them -- say, filter before parse? Does the pipeline still work?"
40
+ - "What makes `mapEffect` different from `map`? When would each step in a pipeline *need* to be effectful vs pure?"
41
+
42
+ ### Common pitfalls
43
+
44
+ 1. **Non-numeric strings should be skipped, not fail the pipeline** — `parseInt("abc")` returns `NaN`. The pipeline should filter these out rather than failing the entire stream. Ask: "What happens when you parse 'abc'? Should that crash the whole pipeline or just skip that element?"
45
+ 2. **processItems uses mapEffect, not map** — since `fn` returns an `Effect`, you need `Stream.mapEffect` to unwrap each result into the stream. Using `Stream.map` would give you a stream of Effects. Ask: "What type do you get if you `map` with a function that returns an Effect?"
46
+ 3. **runFold needs an initial value and a combine function** — `Stream.runFold(0, (acc, n) => acc + n)` starts at 0 and adds each element. Ask: "What should the sum be if the stream is empty?"
47
+ 4. **Filter placement matters** — filter out `NaN` values BEFORE filtering for positive numbers. Both checks can be combined, but the parse step must come first.
48
+ 5. **Pipeline stage order** — break `sumPositiveNumbers` into stages: `fromIterable` -> `map(parseInt)` -> `filter(not NaN and positive)` -> `runFold` to sum.
49
+
50
+ ## On Completion
51
+
52
+ ### Insight
53
+
54
+ `Stream.mapEffect` is the bridge between streams and effects — each element is processed through an effectful function, making streams capable of I/O, validation, and error handling at every step. The `sumPositiveNumbers` pipeline demonstrates the ETL pattern: **Extract** (parse strings), **Transform** (filter positives), **Load** (fold into sum). This same pattern scales to real-world data processing — reading from files, transforming records, writing to databases — all expressed as a composable stream pipeline.
55
+
56
+ ### Bridge
57
+
58
+ With streams and data pipelines covered, kata 028 shifts to **observability** — using `Effect.log`, `Effect.annotateLogs`, and `Effect.withSpan` to add structured logging and tracing to your effects. These tools make production applications debuggable without changing their logic.
@@ -0,0 +1,22 @@
1
+ import { Effect } from "effect";
2
+ import { describe, expect, it } from "vitest";
3
+ import { processItems, sumPositiveNumbers } from "@/katas/027-data-pipelines/solution.js";
4
+
5
+ describe("027 — Data Pipelines", () => {
6
+ it("processItems applies function to each", () => {
7
+ const result = Effect.runSync(
8
+ processItems([1, 2, 3], (n) => Effect.succeed(n * 10)),
9
+ );
10
+ expect(result).toEqual([10, 20, 30]);
11
+ });
12
+
13
+ it("sumPositiveNumbers parses, filters, and sums", () => {
14
+ const result = Effect.runSync(sumPositiveNumbers(["1", "abc", "-2", "3", "4"]));
15
+ expect(result).toBe(8);
16
+ });
17
+
18
+ it("sumPositiveNumbers of empty is 0", () => {
19
+ const result = Effect.runSync(sumPositiveNumbers([]));
20
+ expect(result).toBe(0);
21
+ });
22
+ });
@@ -0,0 +1,16 @@
1
+ import { Effect, Stream } from "effect";
2
+
3
+ /** Use Stream.mapEffect to asynchronously process each item
4
+ * Apply fn to each element in the stream, collect results */
5
+ export const processItems = <A, B>(
6
+ items: A[],
7
+ fn: (a: A) => Effect.Effect<B>,
8
+ ): Effect.Effect<B[]> => {
9
+ throw new Error("Not implemented");
10
+ };
11
+
12
+ /** Build a pipeline: take array of strings, parse to numbers (skip non-numeric),
13
+ * filter positives, sum them */
14
+ export const sumPositiveNumbers = (items: string[]): Effect.Effect<number> => {
15
+ throw new Error("Not implemented");
16
+ };
@@ -0,0 +1,58 @@
1
+ # SENSEI — 028 Logging and Spans
2
+
3
+ ## Briefing
4
+
5
+ ### Goal
6
+
7
+ Learn to use `Effect.log` for structured logging within effects, `Effect.annotateLogs` to add contextual metadata to log entries, and `Effect.withSpan` to wrap computations in named spans for tracing.
8
+
9
+ ### Tasks
10
+
11
+ 1. Implement `logAndReturn` -- use `Effect.log` to log a message, then return `"done"`.
12
+ 2. Implement `logWithContext` -- use `Effect.annotateLogs` to add a `{ requestId }` annotation, then log the message, then return `"done"`.
13
+ 3. Implement `withTracking` -- use `Effect.withSpan` to wrap a computation in a named span.
14
+
15
+ ## Prerequisites
16
+
17
+ - **003 Generator Pipelines** — `Effect.gen`, `yield*`
18
+ - **011 Services and Context** — `Context.Tag`, service access
19
+
20
+ ## Skills
21
+
22
+ Invoke `effect-patterns-observability` before teaching this kata.
23
+
24
+ > **Note**: `Effect.runSync` appears only in tests. Never attribute it to the user's learning.
25
+
26
+ ## Test Map
27
+
28
+ | Test | Concept | Verifies |
29
+ |------|---------|----------|
30
+ | `logAndReturn logs the message and returns 'done'` | `Effect.log` + sequencing | Custom logger captures "hello"; returns `"done"` |
31
+ | `logWithContext annotates logs with requestId` | `Effect.annotateLogs` + `Effect.log` | Custom logger captures `requestId: "req-1"` annotation |
32
+ | `withTracking preserves the effect result` | `Effect.withSpan` | Wrapping `Effect.succeed(42)` in a span still yields 42 |
33
+
34
+ ## Teaching Approach
35
+
36
+ ### Socratic prompts
37
+
38
+ - "If you removed all logging calls, would the tests still pass? What does that tell you about how the current tests verify observability?"
39
+ - "If you have a request ID and want every log in that request's scope to include it, how would you avoid passing it to every function manually? How does `annotateLogs` solve this?"
40
+ - "`withSpan` doesn't change the result of the effect. So what *does* it do, and why would you want it in production?"
41
+
42
+ ### Common pitfalls
43
+
44
+ 1. **Effect.log returns void** — `Effect.log("hello")` produces `Effect<void>`. You need to sequence it with the return value using `gen`, `flatMap`, or `andThen`. Writing `return Effect.log(message)` would return `Effect<void>`, not `Effect<string>`. Ask: "What type does `Effect.log` produce? How do you sequence it with returning 'done'?"
45
+ 2. **annotateLogs wraps an effect** — `Effect.annotateLogs(effect, { requestId })` adds annotations to ALL logs within that effect's scope. The annotation is the outer wrapper; the logging happens inside. Ask: "Does `annotateLogs` emit a log itself, or does it modify logs emitted by the effect you give it?"
46
+ 3. **withSpan is a simple wrapper** — `Effect.withSpan(effect, "name")` or `effect.pipe(Effect.withSpan("name"))` does not change the effect's result. Students may overthink it. Ask: "If you wrap `Effect.succeed(42)` in a span, what value comes out?"
47
+ 4. **Argument order** — `Effect.annotateLogs` and `Effect.withSpan` have specific argument orders. Check the types if unsure.
48
+ 5. **Sequencing with `Effect.gen`** — for `logAndReturn`, use `Effect.gen`: yield `Effect.log(message)`, then return `"done"`. The same pattern applies to `logWithContext` with `annotateLogs` wrapping the generator.
49
+
50
+ ## On Completion
51
+
52
+ ### Insight
53
+
54
+ `Effect.log` is structured — it integrates with the runtime's logging system, not just `console.log`. `annotateLogs` adds contextual metadata (like request IDs) that propagates through the entire effect tree. `withSpan` creates tracing spans for performance monitoring. All three are composable and production-ready. The key insight is that observability is a cross-cutting concern that Effect handles declaratively — you add it to your effects without changing their logic or return values.
55
+
56
+ ### Bridge
57
+
58
+ With observability in place, kata 029 introduces the **HTTP client** pattern — combining services (from kata 011), schema validation (from kata 014), and retry (from kata 016) into a realistic HTTP request pipeline. This is where multiple Effect patterns come together for real-world use.
@@ -0,0 +1,50 @@
1
+ import { Effect, Logger, Ref, LogLevel, HashMap } from "effect";
2
+ import { describe, expect, it } from "vitest";
3
+ import { logAndReturn, logWithContext, withTracking } from "@/katas/028-logging-and-spans/solution.js";
4
+
5
+ // Helper: create a test logger that captures messages to a Ref
6
+ const makeTestLogger = () =>
7
+ Effect.gen(function* () {
8
+ const logs = yield* Ref.make<string[]>([]);
9
+ const logger = Logger.make(({ message }) => {
10
+ Ref.update(logs, (arr) => [...arr, String(message)]).pipe(Effect.runSync);
11
+ });
12
+ return { logs, logger };
13
+ });
14
+
15
+ describe("028 — Logging and Spans", () => {
16
+ it("logAndReturn logs the message and returns 'done'", () =>
17
+ Effect.gen(function* () {
18
+ const { logs, logger } = yield* makeTestLogger();
19
+ const result = yield* logAndReturn("hello").pipe(
20
+ Logger.withMinimumLogLevel(LogLevel.All),
21
+ Effect.provide(Logger.replace(Logger.defaultLogger, logger)),
22
+ );
23
+ expect(result).toBe("done");
24
+ const captured = yield* Ref.get(logs);
25
+ expect(captured).toContain("hello");
26
+ }).pipe(Effect.runSync));
27
+
28
+ it("logWithContext annotates logs with requestId", () =>
29
+ Effect.gen(function* () {
30
+ const annotations = yield* Ref.make<Record<string, unknown>>({});
31
+ const logger = Logger.make(({ annotations: ann }) => {
32
+ const obj: Record<string, unknown> = {};
33
+ HashMap.forEach(ann, (value, key) => {
34
+ obj[key] = value;
35
+ });
36
+ Ref.update(annotations, () => obj).pipe(Effect.runSync);
37
+ });
38
+ yield* logWithContext("req-1", "processing").pipe(
39
+ Logger.withMinimumLogLevel(LogLevel.All),
40
+ Effect.provide(Logger.replace(Logger.defaultLogger, logger)),
41
+ );
42
+ const captured = yield* Ref.get(annotations);
43
+ expect(captured).toHaveProperty("requestId", "req-1");
44
+ }).pipe(Effect.runSync));
45
+
46
+ it("withTracking preserves the effect result", () => {
47
+ const result = Effect.runSync(withTracking("my-span", Effect.succeed(42)));
48
+ expect(result).toBe(42);
49
+ });
50
+ });
@@ -0,0 +1,20 @@
1
+ import { Effect } from "effect";
2
+
3
+ /** Use Effect.log to log a message, then return "done" */
4
+ export const logAndReturn = (message: string): Effect.Effect<string> => {
5
+ throw new Error("Not implemented");
6
+ };
7
+
8
+ /** Use Effect.annotateLogs to add { requestId } annotation,
9
+ * then log the message, then return "done" */
10
+ export const logWithContext = (requestId: string, message: string): Effect.Effect<string> => {
11
+ throw new Error("Not implemented");
12
+ };
13
+
14
+ /** Use Effect.withSpan to wrap a computation in a named span */
15
+ export const withTracking = <A, E>(
16
+ name: string,
17
+ effect: Effect.Effect<A, E>,
18
+ ): Effect.Effect<A, E> => {
19
+ throw new Error("Not implemented");
20
+ };