@effect-uai/core 0.2.0 → 0.4.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +1 -1
- package/dist/{AiError-CqmYjXyx.d.mts → AiError-csR8Bhxx.d.mts} +26 -4
- package/dist/{AiError-CqmYjXyx.d.mts.map → AiError-csR8Bhxx.d.mts.map} +1 -1
- package/dist/Audio-BfCTGnH3.d.mts +61 -0
- package/dist/Audio-BfCTGnH3.d.mts.map +1 -0
- package/dist/Image-DxyXqzAM.d.mts +61 -0
- package/dist/Image-DxyXqzAM.d.mts.map +1 -0
- package/dist/{Items-D1C2686t.d.mts → Items-Hg5AsYxl.d.mts} +132 -80
- package/dist/Items-Hg5AsYxl.d.mts.map +1 -0
- package/dist/Media-D_CpcM1Z.d.mts +57 -0
- package/dist/Media-D_CpcM1Z.d.mts.map +1 -0
- package/dist/{StructuredFormat-B5ueioNr.d.mts → StructuredFormat-Cl41C56K.d.mts} +5 -5
- package/dist/StructuredFormat-Cl41C56K.d.mts.map +1 -0
- package/dist/{Tool-5wxOCuOh.d.mts → Tool-B8B5qVEy.d.mts} +13 -13
- package/dist/Tool-B8B5qVEy.d.mts.map +1 -0
- package/dist/{Turn-Bi83du4I.d.mts → Turn-7geUcKsf.d.mts} +5 -11
- package/dist/Turn-7geUcKsf.d.mts.map +1 -0
- package/dist/{chunk-CfYAbeIz.mjs → chunk-uyGKjUfl.mjs} +2 -1
- package/dist/dist-DV5ISja1.mjs +13782 -0
- package/dist/dist-DV5ISja1.mjs.map +1 -0
- package/dist/domain/AiError.d.mts +2 -2
- package/dist/domain/AiError.mjs +19 -3
- package/dist/domain/AiError.mjs.map +1 -1
- package/dist/domain/Audio.d.mts +2 -0
- package/dist/domain/Audio.mjs +14 -0
- package/dist/domain/Audio.mjs.map +1 -0
- package/dist/domain/Image.d.mts +2 -0
- package/dist/domain/Image.mjs +58 -0
- package/dist/domain/Image.mjs.map +1 -0
- package/dist/domain/Items.d.mts +2 -2
- package/dist/domain/Items.mjs +19 -42
- package/dist/domain/Items.mjs.map +1 -1
- package/dist/domain/Media.d.mts +2 -0
- package/dist/domain/Media.mjs +14 -0
- package/dist/domain/Media.mjs.map +1 -0
- package/dist/domain/Music.d.mts +116 -0
- package/dist/domain/Music.d.mts.map +1 -0
- package/dist/domain/Music.mjs +29 -0
- package/dist/domain/Music.mjs.map +1 -0
- package/dist/domain/Transcript.d.mts +95 -0
- package/dist/domain/Transcript.d.mts.map +1 -0
- package/dist/domain/Transcript.mjs +22 -0
- package/dist/domain/Transcript.mjs.map +1 -0
- package/dist/domain/Turn.d.mts +1 -1
- package/dist/domain/Turn.mjs +1 -1
- package/dist/embedding-model/Embedding.d.mts +107 -0
- package/dist/embedding-model/Embedding.d.mts.map +1 -0
- package/dist/embedding-model/Embedding.mjs +18 -0
- package/dist/embedding-model/Embedding.mjs.map +1 -0
- package/dist/embedding-model/EmbeddingModel.d.mts +97 -0
- package/dist/embedding-model/EmbeddingModel.d.mts.map +1 -0
- package/dist/embedding-model/EmbeddingModel.mjs +17 -0
- package/dist/embedding-model/EmbeddingModel.mjs.map +1 -0
- package/dist/index.d.mts +21 -7
- package/dist/index.mjs +16 -2
- package/dist/language-model/LanguageModel.d.mts +12 -20
- package/dist/language-model/LanguageModel.d.mts.map +1 -1
- package/dist/language-model/LanguageModel.mjs +3 -20
- package/dist/language-model/LanguageModel.mjs.map +1 -1
- package/dist/loop/Loop.d.mts +31 -7
- package/dist/loop/Loop.d.mts.map +1 -1
- package/dist/loop/Loop.mjs +39 -6
- package/dist/loop/Loop.mjs.map +1 -1
- package/dist/loop/Loop.test.d.mts +1 -0
- package/dist/loop/Loop.test.mjs +411 -0
- package/dist/loop/Loop.test.mjs.map +1 -0
- package/dist/magic-string.es-BgIV5Mu3.mjs +1013 -0
- package/dist/magic-string.es-BgIV5Mu3.mjs.map +1 -0
- package/dist/math/Vector.d.mts +47 -0
- package/dist/math/Vector.d.mts.map +1 -0
- package/dist/math/Vector.mjs +117 -0
- package/dist/math/Vector.mjs.map +1 -0
- package/dist/music-generator/MusicGenerator.d.mts +77 -0
- package/dist/music-generator/MusicGenerator.d.mts.map +1 -0
- package/dist/music-generator/MusicGenerator.mjs +51 -0
- package/dist/music-generator/MusicGenerator.mjs.map +1 -0
- package/dist/music-generator/MusicGenerator.test.d.mts +1 -0
- package/dist/music-generator/MusicGenerator.test.mjs +154 -0
- package/dist/music-generator/MusicGenerator.test.mjs.map +1 -0
- package/dist/observability/Metrics.d.mts +2 -2
- package/dist/observability/Metrics.d.mts.map +1 -1
- package/dist/observability/Metrics.mjs +1 -1
- package/dist/observability/Metrics.mjs.map +1 -1
- package/dist/speech-synthesizer/SpeechSynthesizer.d.mts +96 -0
- package/dist/speech-synthesizer/SpeechSynthesizer.d.mts.map +1 -0
- package/dist/speech-synthesizer/SpeechSynthesizer.mjs +48 -0
- package/dist/speech-synthesizer/SpeechSynthesizer.mjs.map +1 -0
- package/dist/speech-synthesizer/SpeechSynthesizer.test.d.mts +1 -0
- package/dist/speech-synthesizer/SpeechSynthesizer.test.mjs +112 -0
- package/dist/speech-synthesizer/SpeechSynthesizer.test.mjs.map +1 -0
- package/dist/streaming/JSONL.d.mts +10 -3
- package/dist/streaming/JSONL.d.mts.map +1 -1
- package/dist/streaming/JSONL.mjs +13 -2
- package/dist/streaming/JSONL.mjs.map +1 -1
- package/dist/streaming/JSONL.test.d.mts +1 -0
- package/dist/streaming/JSONL.test.mjs +70 -0
- package/dist/streaming/JSONL.test.mjs.map +1 -0
- package/dist/streaming/Lines.mjs +1 -1
- package/dist/streaming/SSE.d.mts +2 -2
- package/dist/streaming/SSE.d.mts.map +1 -1
- package/dist/streaming/SSE.mjs +1 -1
- package/dist/streaming/SSE.mjs.map +1 -1
- package/dist/streaming/SSE.test.d.mts +1 -0
- package/dist/streaming/SSE.test.mjs +72 -0
- package/dist/streaming/SSE.test.mjs.map +1 -0
- package/dist/structured-format/StructuredFormat.d.mts +1 -1
- package/dist/structured-format/StructuredFormat.mjs +1 -1
- package/dist/structured-format/StructuredFormat.mjs.map +1 -1
- package/dist/testing/MockMusicGenerator.d.mts +39 -0
- package/dist/testing/MockMusicGenerator.d.mts.map +1 -0
- package/dist/testing/MockMusicGenerator.mjs +96 -0
- package/dist/testing/MockMusicGenerator.mjs.map +1 -0
- package/dist/testing/MockProvider.d.mts +6 -6
- package/dist/testing/MockProvider.d.mts.map +1 -1
- package/dist/testing/MockProvider.mjs.map +1 -1
- package/dist/testing/MockSpeechSynthesizer.d.mts +37 -0
- package/dist/testing/MockSpeechSynthesizer.d.mts.map +1 -0
- package/dist/testing/MockSpeechSynthesizer.mjs +95 -0
- package/dist/testing/MockSpeechSynthesizer.mjs.map +1 -0
- package/dist/testing/MockTranscriber.d.mts +37 -0
- package/dist/testing/MockTranscriber.d.mts.map +1 -0
- package/dist/testing/MockTranscriber.mjs +77 -0
- package/dist/testing/MockTranscriber.mjs.map +1 -0
- package/dist/tool/HistoryCheck.d.mts +6 -3
- package/dist/tool/HistoryCheck.d.mts.map +1 -1
- package/dist/tool/HistoryCheck.mjs +7 -1
- package/dist/tool/HistoryCheck.mjs.map +1 -1
- package/dist/tool/Outcome.d.mts +138 -2
- package/dist/tool/Outcome.d.mts.map +1 -0
- package/dist/tool/Outcome.mjs +32 -10
- package/dist/tool/Outcome.mjs.map +1 -1
- package/dist/tool/Resolvers.d.mts +11 -8
- package/dist/tool/Resolvers.d.mts.map +1 -1
- package/dist/tool/Resolvers.mjs +10 -1
- package/dist/tool/Resolvers.mjs.map +1 -1
- package/dist/tool/Resolvers.test.d.mts +1 -0
- package/dist/tool/Resolvers.test.mjs +317 -0
- package/dist/tool/Resolvers.test.mjs.map +1 -0
- package/dist/tool/Tool.d.mts +1 -1
- package/dist/tool/Tool.mjs +1 -1
- package/dist/tool/Tool.mjs.map +1 -1
- package/dist/tool/ToolEvent.d.mts +151 -2
- package/dist/tool/ToolEvent.d.mts.map +1 -0
- package/dist/tool/ToolEvent.mjs +30 -4
- package/dist/tool/ToolEvent.mjs.map +1 -1
- package/dist/tool/Toolkit.d.mts +19 -10
- package/dist/tool/Toolkit.d.mts.map +1 -1
- package/dist/tool/Toolkit.mjs +5 -5
- package/dist/tool/Toolkit.mjs.map +1 -1
- package/dist/tool/Toolkit.test.d.mts +1 -0
- package/dist/tool/Toolkit.test.mjs +113 -0
- package/dist/tool/Toolkit.test.mjs.map +1 -0
- package/dist/transcriber/Transcriber.d.mts +101 -0
- package/dist/transcriber/Transcriber.d.mts.map +1 -0
- package/dist/transcriber/Transcriber.mjs +49 -0
- package/dist/transcriber/Transcriber.mjs.map +1 -0
- package/dist/transcriber/Transcriber.test.d.mts +1 -0
- package/dist/transcriber/Transcriber.test.mjs +130 -0
- package/dist/transcriber/Transcriber.test.mjs.map +1 -0
- package/package.json +65 -13
- package/src/domain/AiError.ts +21 -0
- package/src/domain/Audio.ts +88 -0
- package/src/domain/Image.ts +75 -0
- package/src/domain/Items.ts +18 -47
- package/src/domain/Media.ts +61 -0
- package/src/domain/Music.ts +121 -0
- package/src/domain/Transcript.ts +83 -0
- package/src/embedding-model/Embedding.ts +117 -0
- package/src/embedding-model/EmbeddingModel.ts +107 -0
- package/src/index.ts +15 -1
- package/src/language-model/LanguageModel.ts +2 -22
- package/src/loop/Loop.test.ts +114 -2
- package/src/loop/Loop.ts +69 -5
- package/src/math/Vector.ts +138 -0
- package/src/music-generator/MusicGenerator.test.ts +170 -0
- package/src/music-generator/MusicGenerator.ts +123 -0
- package/src/observability/Metrics.ts +1 -1
- package/src/speech-synthesizer/SpeechSynthesizer.test.ts +141 -0
- package/src/speech-synthesizer/SpeechSynthesizer.ts +131 -0
- package/src/streaming/JSONL.ts +12 -0
- package/src/streaming/SSE.ts +1 -1
- package/src/structured-format/StructuredFormat.ts +2 -2
- package/src/testing/MockMusicGenerator.ts +170 -0
- package/src/testing/MockProvider.ts +2 -2
- package/src/testing/MockSpeechSynthesizer.ts +165 -0
- package/src/testing/MockTranscriber.ts +139 -0
- package/src/tool/HistoryCheck.ts +2 -5
- package/src/tool/Outcome.ts +36 -36
- package/src/tool/Resolvers.test.ts +11 -35
- package/src/tool/Resolvers.ts +5 -14
- package/src/tool/Tool.ts +9 -9
- package/src/tool/ToolEvent.ts +28 -24
- package/src/tool/Toolkit.test.ts +97 -2
- package/src/tool/Toolkit.ts +57 -33
- package/src/transcriber/Transcriber.test.ts +125 -0
- package/src/transcriber/Transcriber.ts +127 -0
- package/dist/Items-D1C2686t.d.mts.map +0 -1
- package/dist/Outcome-GiaNvt7i.d.mts +0 -32
- package/dist/Outcome-GiaNvt7i.d.mts.map +0 -1
- package/dist/StructuredFormat-B5ueioNr.d.mts.map +0 -1
- package/dist/Tool-5wxOCuOh.d.mts.map +0 -1
- package/dist/ToolEvent-wTMgb2GO.d.mts +0 -29
- package/dist/ToolEvent-wTMgb2GO.d.mts.map +0 -1
- package/dist/Turn-Bi83du4I.d.mts.map +0 -1
- package/dist/match/Match.d.mts +0 -16
- package/dist/match/Match.d.mts.map +0 -1
- package/dist/match/Match.mjs +0 -15
- package/dist/match/Match.mjs.map +0 -1
- package/src/match/Match.ts +0 -9
|
@@ -0,0 +1,138 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Linear-algebra primitives for embedding vectors:
|
|
3
|
+
*
|
|
4
|
+
* - **Dense float32**: `dot`, `l2Norm`, `normalize`, `cosine`,
|
|
5
|
+
* `euclidean`. Used for retrieval over single-vector embeddings.
|
|
6
|
+
* - **Sparse**: `sparseDot`, `sparseL2Norm`, `sparseCosine`. Used with
|
|
7
|
+
* `SparseEmbedding`, e.g. Jina ELSER outputs.
|
|
8
|
+
* - **Multivector** (late-interaction): `maxSim`. Used with
|
|
9
|
+
* `MultivectorEmbedding`, e.g. Jina v4 multivector / ColBERT.
|
|
10
|
+
*
|
|
11
|
+
* Hot loops are allocation-free; consumers can call these inside
|
|
12
|
+
* `.map()` over thousands of vectors without GC pressure. For
|
|
13
|
+
* GPU / SIMD / WASM-accelerated math at vector-DB scale, reach for a
|
|
14
|
+
* dedicated library - this module deliberately stays at the
|
|
15
|
+
* recipe-volume tier.
|
|
16
|
+
*/
|
|
17
|
+
import type { MultivectorEmbedding, SparseEmbedding } from "../embedding-model/Embedding.js"
|
|
18
|
+
|
|
19
|
+
/** Inner / dot product. */
|
|
20
|
+
export const dot = (a: Float32Array, b: Float32Array): number => {
|
|
21
|
+
let s = 0
|
|
22
|
+
const n = Math.min(a.length, b.length)
|
|
23
|
+
for (let i = 0; i < n; i++) s += a[i]! * b[i]!
|
|
24
|
+
return s
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
/** L2 norm (Euclidean magnitude). */
|
|
28
|
+
export const l2Norm = (v: Float32Array): number => {
|
|
29
|
+
let s = 0
|
|
30
|
+
for (let i = 0; i < v.length; i++) s += v[i]! * v[i]!
|
|
31
|
+
return Math.sqrt(s)
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
/**
|
|
35
|
+
* L2-normalize to a unit vector. Allocates a new `Float32Array`. A zero
|
|
36
|
+
* vector returns zeros (no division-by-zero).
|
|
37
|
+
*/
|
|
38
|
+
export const normalize = (v: Float32Array): Float32Array => {
|
|
39
|
+
const n = l2Norm(v)
|
|
40
|
+
if (n === 0) return new Float32Array(v.length)
|
|
41
|
+
const out = new Float32Array(v.length)
|
|
42
|
+
for (let i = 0; i < v.length; i++) out[i] = v[i]! / n
|
|
43
|
+
return out
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
/**
|
|
47
|
+
* Cosine similarity. Range `[-1, 1]`; higher = more similar. Returns
|
|
48
|
+
* `NaN` if either vector has zero magnitude.
|
|
49
|
+
*/
|
|
50
|
+
export const cosine = (a: Float32Array, b: Float32Array): number => {
|
|
51
|
+
let d = 0
|
|
52
|
+
let na = 0
|
|
53
|
+
let nb = 0
|
|
54
|
+
const n = Math.min(a.length, b.length)
|
|
55
|
+
for (let i = 0; i < n; i++) {
|
|
56
|
+
const ai = a[i]!
|
|
57
|
+
const bi = b[i]!
|
|
58
|
+
d += ai * bi
|
|
59
|
+
na += ai * ai
|
|
60
|
+
nb += bi * bi
|
|
61
|
+
}
|
|
62
|
+
return d / (Math.sqrt(na) * Math.sqrt(nb))
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
/** Euclidean (L2) distance. */
|
|
66
|
+
export const euclidean = (a: Float32Array, b: Float32Array): number => {
|
|
67
|
+
let s = 0
|
|
68
|
+
const n = Math.min(a.length, b.length)
|
|
69
|
+
for (let i = 0; i < n; i++) {
|
|
70
|
+
const d = a[i]! - b[i]!
|
|
71
|
+
s += d * d
|
|
72
|
+
}
|
|
73
|
+
return Math.sqrt(s)
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
// ---------------------------------------------------------------------------
|
|
77
|
+
// Sparse vectors (Record<string, number>)
|
|
78
|
+
// ---------------------------------------------------------------------------
|
|
79
|
+
|
|
80
|
+
/** Inner product over the intersection of token keys. */
|
|
81
|
+
export const sparseDot = (a: SparseEmbedding, b: SparseEmbedding): number => {
|
|
82
|
+
// Iterate the smaller map; lookup against the larger one. O(min(|a|, |b|)).
|
|
83
|
+
const aSize = Object.keys(a.weights).length
|
|
84
|
+
const bSize = Object.keys(b.weights).length
|
|
85
|
+
const [smaller, larger] = aSize <= bSize ? [a.weights, b.weights] : [b.weights, a.weights]
|
|
86
|
+
let s = 0
|
|
87
|
+
for (const token in smaller) {
|
|
88
|
+
const other = larger[token]
|
|
89
|
+
if (other !== undefined) s += smaller[token]! * other
|
|
90
|
+
}
|
|
91
|
+
return s
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
/** L2 norm of a sparse vector. */
|
|
95
|
+
export const sparseL2Norm = (v: SparseEmbedding): number => {
|
|
96
|
+
let s = 0
|
|
97
|
+
for (const token in v.weights) {
|
|
98
|
+
const w = v.weights[token]!
|
|
99
|
+
s += w * w
|
|
100
|
+
}
|
|
101
|
+
return Math.sqrt(s)
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
/**
|
|
105
|
+
* Sparse cosine similarity. Range `[-1, 1]` (typically `[0, 1]` for
|
|
106
|
+
* learned-sparse encoders since weights are non-negative). Returns
|
|
107
|
+
* `NaN` if either vector has zero magnitude.
|
|
108
|
+
*/
|
|
109
|
+
export const sparseCosine = (a: SparseEmbedding, b: SparseEmbedding): number =>
|
|
110
|
+
sparseDot(a, b) / (sparseL2Norm(a) * sparseL2Norm(b))
|
|
111
|
+
|
|
112
|
+
// ---------------------------------------------------------------------------
|
|
113
|
+
// Multivector / late-interaction (ColBERT-style)
|
|
114
|
+
// ---------------------------------------------------------------------------
|
|
115
|
+
|
|
116
|
+
/**
|
|
117
|
+
* MaxSim score for late-interaction retrieval. For each *query* vector,
|
|
118
|
+
* find the maximum dot product with any *document* vector, then sum.
|
|
119
|
+
*
|
|
120
|
+
* Captures fine-grained relevance that single-vector cosine smears out:
|
|
121
|
+
* each query token finds its own best-matching document token.
|
|
122
|
+
*
|
|
123
|
+
* Cost: O(|q| × |d| × dim). Fine at recipe volume; for production-scale
|
|
124
|
+
* retrieval use a vector store with native multivector indexing
|
|
125
|
+
* (Vespa, Qdrant, PLAID).
|
|
126
|
+
*/
|
|
127
|
+
export const maxSim = (q: MultivectorEmbedding, d: MultivectorEmbedding): number => {
|
|
128
|
+
let total = 0
|
|
129
|
+
for (const qv of q.vectors) {
|
|
130
|
+
let best = -Infinity
|
|
131
|
+
for (const dv of d.vectors) {
|
|
132
|
+
const s = dot(qv, dv)
|
|
133
|
+
if (s > best) best = s
|
|
134
|
+
}
|
|
135
|
+
total += best
|
|
136
|
+
}
|
|
137
|
+
return total
|
|
138
|
+
}
|
|
@@ -0,0 +1,170 @@
|
|
|
1
|
+
import { Effect, Stream } from "effect"
|
|
2
|
+
import { describe, expect, expectTypeOf, it } from "vitest"
|
|
3
|
+
import type * as AiError from "../domain/AiError.js"
|
|
4
|
+
import type { AudioChunk, AudioFormat } from "../domain/Audio.js"
|
|
5
|
+
import { configInput, promptsInput, type MusicResult } from "../domain/Music.js"
|
|
6
|
+
import * as MockMusicGenerator from "../testing/MockMusicGenerator.js"
|
|
7
|
+
import * as MusicGenerator from "./MusicGenerator.js"
|
|
8
|
+
|
|
9
|
+
const mp3Format: AudioFormat = {
|
|
10
|
+
container: "mp3",
|
|
11
|
+
encoding: "mp3",
|
|
12
|
+
sampleRate: 44100,
|
|
13
|
+
channels: 2,
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
const result: MusicResult = {
|
|
17
|
+
format: mp3Format,
|
|
18
|
+
bytes: new Uint8Array([0xff, 0xfb, 0x90, 0x00]),
|
|
19
|
+
durationSeconds: 30,
|
|
20
|
+
lyrics: "[Verse]\nhello\n",
|
|
21
|
+
watermark: { kind: "synthid" },
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
const chunk = (n: number): AudioChunk => ({ bytes: new Uint8Array([n]) })
|
|
25
|
+
|
|
26
|
+
describe("MusicGenerator.generate", () => {
|
|
27
|
+
it("returns the scripted MusicResult", async () => {
|
|
28
|
+
const mock = MockMusicGenerator.layer({ results: [result] })
|
|
29
|
+
const program = MusicGenerator.generate({
|
|
30
|
+
model: "mock-music",
|
|
31
|
+
prompts: "upbeat indie pop",
|
|
32
|
+
})
|
|
33
|
+
const out = await Effect.runPromise(program.pipe(Effect.provide(mock.layer)))
|
|
34
|
+
expect(out.bytes).toEqual(result.bytes)
|
|
35
|
+
expect(out.durationSeconds).toBe(30)
|
|
36
|
+
expect(out.watermark?.kind).toBe("synthid")
|
|
37
|
+
expect(out.lyrics).toContain("[Verse]")
|
|
38
|
+
})
|
|
39
|
+
|
|
40
|
+
it("records the request shape on the recorder", async () => {
|
|
41
|
+
const mock = MockMusicGenerator.layer({ results: [result, result] })
|
|
42
|
+
const program = Effect.gen(function* () {
|
|
43
|
+
yield* MusicGenerator.generate({ model: "m", prompts: "techno" })
|
|
44
|
+
yield* MusicGenerator.generate({
|
|
45
|
+
model: "m",
|
|
46
|
+
prompts: [
|
|
47
|
+
{ text: "synthwave", weight: 1.0 },
|
|
48
|
+
{ text: "80s movie OST", weight: 0.4 },
|
|
49
|
+
],
|
|
50
|
+
bpm: 120,
|
|
51
|
+
instrumental: true,
|
|
52
|
+
})
|
|
53
|
+
return yield* mock.recorder
|
|
54
|
+
})
|
|
55
|
+
const rec = await Effect.runPromise(program.pipe(Effect.provide(mock.layer)))
|
|
56
|
+
expect(rec.generateCalls.length).toBe(2)
|
|
57
|
+
expect(rec.generateCalls[1]!.bpm).toBe(120)
|
|
58
|
+
expect(rec.generateCalls[1]!.instrumental).toBe(true)
|
|
59
|
+
expect(Array.isArray(rec.generateCalls[1]!.prompts)).toBe(true)
|
|
60
|
+
})
|
|
61
|
+
})
|
|
62
|
+
|
|
63
|
+
describe("MusicGenerator.streamGeneration", () => {
|
|
64
|
+
it("emits scripted chunks", async () => {
|
|
65
|
+
const mock = MockMusicGenerator.layer({
|
|
66
|
+
streamGenerationChunks: [[chunk(1), chunk(2), chunk(3)]],
|
|
67
|
+
})
|
|
68
|
+
const program = Stream.runCollect(
|
|
69
|
+
MusicGenerator.streamGeneration({ model: "m", prompts: "ambient" }),
|
|
70
|
+
)
|
|
71
|
+
const out = await Effect.runPromise(program.pipe(Effect.provide(mock.layer)))
|
|
72
|
+
expect(out.map((c) => Array.from(c.bytes))).toEqual([[1], [2], [3]])
|
|
73
|
+
})
|
|
74
|
+
})
|
|
75
|
+
|
|
76
|
+
describe("MusicGenerator capability marker (compile-time)", () => {
|
|
77
|
+
const sgfReq: MusicGenerator.CommonStreamGenerateMusicRequest = {
|
|
78
|
+
model: "m",
|
|
79
|
+
prompts: "",
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
it("requires `MusicInteractiveSession` on the R channel of streamGenerationFrom", () => {
|
|
83
|
+
const inputs = Stream.fromIterable([promptsInput([{ text: "techno" }])])
|
|
84
|
+
const audio = inputs.pipe(MusicGenerator.streamGenerationFrom(sgfReq))
|
|
85
|
+
expectTypeOf(audio).toEqualTypeOf<
|
|
86
|
+
Stream.Stream<
|
|
87
|
+
AudioChunk,
|
|
88
|
+
AiError.AiError,
|
|
89
|
+
MusicGenerator.MusicGenerator | MusicGenerator.MusicInteractiveSession
|
|
90
|
+
>
|
|
91
|
+
>()
|
|
92
|
+
})
|
|
93
|
+
|
|
94
|
+
it("does NOT require `MusicInteractiveSession` for sync `generate`", () => {
|
|
95
|
+
const eff = MusicGenerator.generate({ model: "m", prompts: "ambient" })
|
|
96
|
+
expectTypeOf(eff).toEqualTypeOf<
|
|
97
|
+
Effect.Effect<MusicResult, AiError.AiError, MusicGenerator.MusicGenerator>
|
|
98
|
+
>()
|
|
99
|
+
})
|
|
100
|
+
|
|
101
|
+
it("does NOT require `MusicInteractiveSession` for `streamGeneration`", () => {
|
|
102
|
+
const audio = MusicGenerator.streamGeneration({ model: "m", prompts: "ambient" })
|
|
103
|
+
expectTypeOf(audio).toEqualTypeOf<
|
|
104
|
+
Stream.Stream<AudioChunk, AiError.AiError, MusicGenerator.MusicGenerator>
|
|
105
|
+
>()
|
|
106
|
+
})
|
|
107
|
+
|
|
108
|
+
it("a layer without the marker leaves `MusicInteractiveSession` unsatisfied in R", () => {
|
|
109
|
+
const noMarker = MockMusicGenerator.layerWithoutInteractive({})
|
|
110
|
+
const inputs = Stream.fromIterable([promptsInput([{ text: "techno" }])])
|
|
111
|
+
const audio = inputs.pipe(MusicGenerator.streamGenerationFrom(sgfReq))
|
|
112
|
+
const program = Stream.runDrain(audio).pipe(Effect.provide(noMarker.layer))
|
|
113
|
+
expectTypeOf(program).toEqualTypeOf<
|
|
114
|
+
Effect.Effect<void, AiError.AiError, MusicGenerator.MusicInteractiveSession>
|
|
115
|
+
>()
|
|
116
|
+
})
|
|
117
|
+
|
|
118
|
+
it("a full layer (with marker) clears R to never", () => {
|
|
119
|
+
const fullMock = MockMusicGenerator.layer({
|
|
120
|
+
streamGenerationFromChunks: [[]],
|
|
121
|
+
})
|
|
122
|
+
const inputs = Stream.fromIterable([promptsInput([{ text: "techno" }])])
|
|
123
|
+
const audio = inputs.pipe(MusicGenerator.streamGenerationFrom(sgfReq))
|
|
124
|
+
const program = Stream.runDrain(audio).pipe(Effect.provide(fullMock.layer))
|
|
125
|
+
expectTypeOf(program).toEqualTypeOf<Effect.Effect<void, AiError.AiError, never>>()
|
|
126
|
+
})
|
|
127
|
+
})
|
|
128
|
+
|
|
129
|
+
describe("MusicGenerator.streamGenerationFrom", () => {
|
|
130
|
+
const sgfReq: MusicGenerator.CommonStreamGenerateMusicRequest = {
|
|
131
|
+
model: "lyria-realtime-001",
|
|
132
|
+
prompts: "",
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
it("drains a session-input stream and emits scripted audio", async () => {
|
|
136
|
+
const mock = MockMusicGenerator.layer({
|
|
137
|
+
streamGenerationFromChunks: [[chunk(10), chunk(20)]],
|
|
138
|
+
})
|
|
139
|
+
const inputs = Stream.fromIterable([
|
|
140
|
+
promptsInput([{ text: "minimal techno", weight: 1.0 }]),
|
|
141
|
+
configInput({ bpm: 124 }),
|
|
142
|
+
promptsInput([
|
|
143
|
+
{ text: "minimal techno", weight: 1.0 },
|
|
144
|
+
{ text: "1980s synthwave", weight: 0.3 },
|
|
145
|
+
]),
|
|
146
|
+
])
|
|
147
|
+
const audio = inputs.pipe(MusicGenerator.streamGenerationFrom(sgfReq))
|
|
148
|
+
const out = await Effect.runPromise(Stream.runCollect(audio).pipe(Effect.provide(mock.layer)))
|
|
149
|
+
expect(out.map((c) => Array.from(c.bytes))).toEqual([[10], [20]])
|
|
150
|
+
})
|
|
151
|
+
|
|
152
|
+
it("records the request on the streamGenerationFrom call channel", async () => {
|
|
153
|
+
const mock = MockMusicGenerator.layer({
|
|
154
|
+
streamGenerationFromChunks: [[chunk(42)]],
|
|
155
|
+
})
|
|
156
|
+
const program = Effect.gen(function* () {
|
|
157
|
+
yield* Stream.runDrain(
|
|
158
|
+
Stream.fromIterable([promptsInput([{ text: "x" }])]).pipe(
|
|
159
|
+
MusicGenerator.streamGenerationFrom(sgfReq),
|
|
160
|
+
),
|
|
161
|
+
)
|
|
162
|
+
return yield* mock.recorder
|
|
163
|
+
})
|
|
164
|
+
const rec = await Effect.runPromise(program.pipe(Effect.provide(mock.layer)))
|
|
165
|
+
expect(rec.streamGenerationFromCalls.length).toBe(1)
|
|
166
|
+
expect(rec.streamGenerationFromCalls[0]!.model).toBe("lyria-realtime-001")
|
|
167
|
+
expect(rec.generateCalls.length).toBe(0)
|
|
168
|
+
expect(rec.streamGenerationCalls.length).toBe(0)
|
|
169
|
+
})
|
|
170
|
+
})
|
|
@@ -0,0 +1,123 @@
|
|
|
1
|
+
import { Context, Effect, Function, Stream } from "effect"
|
|
2
|
+
import * as AiError from "../domain/AiError.js"
|
|
3
|
+
import type { AudioChunk } from "../domain/Audio.js"
|
|
4
|
+
import type {
|
|
5
|
+
CommonGenerateMusicRequest,
|
|
6
|
+
CommonStreamGenerateMusicRequest,
|
|
7
|
+
MusicResult,
|
|
8
|
+
MusicSessionInput,
|
|
9
|
+
} from "../domain/Music.js"
|
|
10
|
+
|
|
11
|
+
export type {
|
|
12
|
+
CommonGenerateMusicRequest,
|
|
13
|
+
CommonStreamGenerateMusicRequest,
|
|
14
|
+
MusicResult,
|
|
15
|
+
MusicSessionInput,
|
|
16
|
+
WeightedPrompt,
|
|
17
|
+
} from "../domain/Music.js"
|
|
18
|
+
|
|
19
|
+
export type MusicGeneratorService = {
|
|
20
|
+
/**
|
|
21
|
+
* One-shot. Prompt in, full audio bytes out. Universally supported.
|
|
22
|
+
* Async/poll-based providers (Suno, Mureka) hide their poll loop
|
|
23
|
+
* inside the adapter — caller still sees a single `Effect`.
|
|
24
|
+
*/
|
|
25
|
+
readonly generate: (
|
|
26
|
+
request: CommonGenerateMusicRequest,
|
|
27
|
+
) => Effect.Effect<MusicResult, AiError.AiError>
|
|
28
|
+
/**
|
|
29
|
+
* Prompt in, audio chunks streamed out. Providers without a native
|
|
30
|
+
* chunked-output endpoint (Lyria 3 sync, Mureka, MiniMax, Stable
|
|
31
|
+
* Audio) emulate this by calling `generate` and emitting a single
|
|
32
|
+
* `AudioChunk` — first-class, no `Unsupported`.
|
|
33
|
+
*/
|
|
34
|
+
readonly streamGeneration: (
|
|
35
|
+
request: CommonStreamGenerateMusicRequest,
|
|
36
|
+
) => Stream.Stream<AudioChunk, AiError.AiError>
|
|
37
|
+
/**
|
|
38
|
+
* Bidirectional session: a `Stream` of prompt-or-config updates flows
|
|
39
|
+
* in, a `Stream` of audio chunks flows out. The session WS / RPC is
|
|
40
|
+
* acquired on first pull and released when the output stream is
|
|
41
|
+
* finalized via `Stream.scoped`.
|
|
42
|
+
*
|
|
43
|
+
* Gated by the `MusicInteractiveSession` capability marker on the
|
|
44
|
+
* top-level helper — providers without bidirectional support don't
|
|
45
|
+
* ship the marker, so calls fail at `Effect.provide` with a type
|
|
46
|
+
* error.
|
|
47
|
+
*/
|
|
48
|
+
readonly streamGenerationFrom: <E, R>(
|
|
49
|
+
input: Stream.Stream<MusicSessionInput, E, R>,
|
|
50
|
+
request: CommonStreamGenerateMusicRequest,
|
|
51
|
+
) => Stream.Stream<AudioChunk, AiError.AiError | E, R>
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
export class MusicGenerator extends Context.Service<MusicGenerator, MusicGeneratorService>()(
|
|
55
|
+
"@betalyra/effect-uai/MusicGenerator",
|
|
56
|
+
) {}
|
|
57
|
+
|
|
58
|
+
/**
|
|
59
|
+
* Capability marker — provided by provider layers whose
|
|
60
|
+
* `streamGenerationFrom` is wired up at the wire level. Currently only
|
|
61
|
+
* Lyria RealTime (via the BidiGenerateMusic WebSocket) ships it.
|
|
62
|
+
* Calling `streamGenerationFrom` while only a non-interactive Layer is
|
|
63
|
+
* in scope fails at `Effect.provide` with a type error.
|
|
64
|
+
*
|
|
65
|
+
* Phantom — the value is `void`; providers register with
|
|
66
|
+
* `Layer.succeed(MusicInteractiveSession, undefined)`.
|
|
67
|
+
*/
|
|
68
|
+
export class MusicInteractiveSession extends Context.Service<MusicInteractiveSession, void>()(
|
|
69
|
+
"@betalyra/effect-uai/capability/MusicInteractiveSession",
|
|
70
|
+
) {}
|
|
71
|
+
|
|
72
|
+
/** One-shot generation. */
|
|
73
|
+
export const generate = (
|
|
74
|
+
request: CommonGenerateMusicRequest,
|
|
75
|
+
): Effect.Effect<MusicResult, AiError.AiError, MusicGenerator> =>
|
|
76
|
+
Effect.flatMap(MusicGenerator.asEffect(), (s) => s.generate(request))
|
|
77
|
+
|
|
78
|
+
/** Prompt in, audio chunks out. */
|
|
79
|
+
export const streamGeneration = (
|
|
80
|
+
request: CommonStreamGenerateMusicRequest,
|
|
81
|
+
): Stream.Stream<AudioChunk, AiError.AiError, MusicGenerator> =>
|
|
82
|
+
Stream.unwrap(Effect.map(MusicGenerator.asEffect(), (s) => s.streamGeneration(request)))
|
|
83
|
+
|
|
84
|
+
/**
|
|
85
|
+
* Bidirectional generation. Dual-arity: pipeable (data-last) and
|
|
86
|
+
* direct (data-first). Requires `MusicInteractiveSession` in R —
|
|
87
|
+
* providers without bidirectional support are a type error at provide
|
|
88
|
+
* time.
|
|
89
|
+
*
|
|
90
|
+
* @example
|
|
91
|
+
* ```ts
|
|
92
|
+
* const audio = Stream.fromIterable([
|
|
93
|
+
* Music.promptsInput([{ text: "minimal techno", weight: 1.0 }]),
|
|
94
|
+
* Music.configInput({ bpm: 124 }),
|
|
95
|
+
* ]).pipe(
|
|
96
|
+
* MusicGenerator.streamGenerationFrom({ model: "lyria-realtime-001", prompts: "" }),
|
|
97
|
+
* )
|
|
98
|
+
* ```
|
|
99
|
+
*/
|
|
100
|
+
export const streamGenerationFrom: {
|
|
101
|
+
(
|
|
102
|
+
request: CommonStreamGenerateMusicRequest,
|
|
103
|
+
): <E, R>(
|
|
104
|
+
input: Stream.Stream<MusicSessionInput, E, R>,
|
|
105
|
+
) => Stream.Stream<AudioChunk, AiError.AiError | E, R | MusicGenerator | MusicInteractiveSession>
|
|
106
|
+
<E, R>(
|
|
107
|
+
input: Stream.Stream<MusicSessionInput, E, R>,
|
|
108
|
+
request: CommonStreamGenerateMusicRequest,
|
|
109
|
+
): Stream.Stream<AudioChunk, AiError.AiError | E, R | MusicGenerator | MusicInteractiveSession>
|
|
110
|
+
} = Function.dual(
|
|
111
|
+
2,
|
|
112
|
+
<E, R>(
|
|
113
|
+
input: Stream.Stream<MusicSessionInput, E, R>,
|
|
114
|
+
request: CommonStreamGenerateMusicRequest,
|
|
115
|
+
) =>
|
|
116
|
+
Stream.unwrap(
|
|
117
|
+
Effect.gen(function* () {
|
|
118
|
+
const s = yield* MusicGenerator.asEffect()
|
|
119
|
+
yield* MusicInteractiveSession.asEffect()
|
|
120
|
+
return s.streamGenerationFrom(input, request)
|
|
121
|
+
}),
|
|
122
|
+
),
|
|
123
|
+
)
|
|
@@ -0,0 +1,141 @@
|
|
|
1
|
+
import { Effect, Stream } from "effect"
|
|
2
|
+
import { describe, expect, expectTypeOf, it } from "vitest"
|
|
3
|
+
import type * as AiError from "../domain/AiError.js"
|
|
4
|
+
import type { AudioBlob, AudioChunk, AudioFormat } from "../domain/Audio.js"
|
|
5
|
+
import * as MockSpeechSynthesizer from "../testing/MockSpeechSynthesizer.js"
|
|
6
|
+
import * as SpeechSynthesizer from "./SpeechSynthesizer.js"
|
|
7
|
+
|
|
8
|
+
const pcmFormat: AudioFormat = {
|
|
9
|
+
container: "raw",
|
|
10
|
+
encoding: "pcm_s16le",
|
|
11
|
+
sampleRate: 24000,
|
|
12
|
+
}
|
|
13
|
+
|
|
14
|
+
const blob: AudioBlob = {
|
|
15
|
+
format: pcmFormat,
|
|
16
|
+
bytes: new Uint8Array([0xde, 0xad, 0xbe, 0xef]),
|
|
17
|
+
durationSeconds: 0.5,
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
const chunk = (n: number): AudioChunk => ({ bytes: new Uint8Array([n]) })
|
|
21
|
+
|
|
22
|
+
describe("SpeechSynthesizer.synthesize", () => {
|
|
23
|
+
it("returns the scripted AudioBlob", async () => {
|
|
24
|
+
const mock = MockSpeechSynthesizer.layer({ blobs: [blob] })
|
|
25
|
+
const program = SpeechSynthesizer.synthesize({
|
|
26
|
+
text: "hi",
|
|
27
|
+
model: "mock-tts",
|
|
28
|
+
voiceId: "stock-voice",
|
|
29
|
+
})
|
|
30
|
+
const result = await Effect.runPromise(program.pipe(Effect.provide(mock.layer)))
|
|
31
|
+
expect(result.bytes).toEqual(blob.bytes)
|
|
32
|
+
expect(result.durationSeconds).toBe(0.5)
|
|
33
|
+
})
|
|
34
|
+
})
|
|
35
|
+
|
|
36
|
+
describe("SpeechSynthesizer.streamSynthesis", () => {
|
|
37
|
+
it("emits scripted chunks for full-text-in streaming", async () => {
|
|
38
|
+
const mock = MockSpeechSynthesizer.layer({
|
|
39
|
+
streamSynthesisChunks: [[chunk(1), chunk(2), chunk(3)]],
|
|
40
|
+
})
|
|
41
|
+
const program = Stream.runCollect(
|
|
42
|
+
SpeechSynthesizer.streamSynthesis({
|
|
43
|
+
text: "hi",
|
|
44
|
+
model: "mock-tts",
|
|
45
|
+
voiceId: "stock-voice",
|
|
46
|
+
}),
|
|
47
|
+
)
|
|
48
|
+
const out = await Effect.runPromise(program.pipe(Effect.provide(mock.layer)))
|
|
49
|
+
expect(out.map((c) => Array.from(c.bytes))).toEqual([[1], [2], [3]])
|
|
50
|
+
})
|
|
51
|
+
})
|
|
52
|
+
|
|
53
|
+
describe("SpeechSynthesizer capability marker (compile-time)", () => {
|
|
54
|
+
const ssfReq: SpeechSynthesizer.CommonStreamSynthesizeRequest = {
|
|
55
|
+
model: "mock-tts",
|
|
56
|
+
voiceId: "v",
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
it("requires `TtsIncrementalText` on the R channel of streamSynthesisFrom", () => {
|
|
60
|
+
const tokens: Stream.Stream<string> = Stream.fromIterable(["a"])
|
|
61
|
+
const audio = tokens.pipe(SpeechSynthesizer.streamSynthesisFrom(ssfReq))
|
|
62
|
+
expectTypeOf(audio).toEqualTypeOf<
|
|
63
|
+
Stream.Stream<
|
|
64
|
+
AudioChunk,
|
|
65
|
+
AiError.AiError,
|
|
66
|
+
SpeechSynthesizer.SpeechSynthesizer | SpeechSynthesizer.TtsIncrementalText
|
|
67
|
+
>
|
|
68
|
+
>()
|
|
69
|
+
})
|
|
70
|
+
|
|
71
|
+
it("does NOT require `TtsIncrementalText` for sync `synthesize`", () => {
|
|
72
|
+
const eff = SpeechSynthesizer.synthesize({ text: "hi", model: "m", voiceId: "v" })
|
|
73
|
+
expectTypeOf(eff).toEqualTypeOf<
|
|
74
|
+
Effect.Effect<AudioBlob, AiError.AiError, SpeechSynthesizer.SpeechSynthesizer>
|
|
75
|
+
>()
|
|
76
|
+
})
|
|
77
|
+
|
|
78
|
+
it("does NOT require `TtsIncrementalText` for full-text `streamSynthesis`", () => {
|
|
79
|
+
const audio = SpeechSynthesizer.streamSynthesis({ text: "hi", model: "m", voiceId: "v" })
|
|
80
|
+
expectTypeOf(audio).toEqualTypeOf<
|
|
81
|
+
Stream.Stream<AudioChunk, AiError.AiError, SpeechSynthesizer.SpeechSynthesizer>
|
|
82
|
+
>()
|
|
83
|
+
})
|
|
84
|
+
|
|
85
|
+
it("a layer without the marker leaves `TtsIncrementalText` unsatisfied in R", () => {
|
|
86
|
+
const noMarker = MockSpeechSynthesizer.layerWithoutIncremental({})
|
|
87
|
+
const tokens: Stream.Stream<string> = Stream.fromIterable(["a"])
|
|
88
|
+
const audio = tokens.pipe(SpeechSynthesizer.streamSynthesisFrom(ssfReq))
|
|
89
|
+
const program = Stream.runDrain(audio).pipe(Effect.provide(noMarker.layer))
|
|
90
|
+
// `SpeechSynthesizer` is provided by the layer; `TtsIncrementalText` is not,
|
|
91
|
+
// so it remains in R — calling `Effect.runPromise(program)` would be a type
|
|
92
|
+
// error because runPromise requires `R = never`.
|
|
93
|
+
expectTypeOf(program).toEqualTypeOf<
|
|
94
|
+
Effect.Effect<void, AiError.AiError, SpeechSynthesizer.TtsIncrementalText>
|
|
95
|
+
>()
|
|
96
|
+
})
|
|
97
|
+
|
|
98
|
+
it("a full layer (with marker) clears R to never", () => {
|
|
99
|
+
const fullMock = MockSpeechSynthesizer.layer({
|
|
100
|
+
streamSynthesisFromChunks: [[]],
|
|
101
|
+
})
|
|
102
|
+
const tokens: Stream.Stream<string> = Stream.fromIterable(["a"])
|
|
103
|
+
const audio = tokens.pipe(SpeechSynthesizer.streamSynthesisFrom(ssfReq))
|
|
104
|
+
const program = Stream.runDrain(audio).pipe(Effect.provide(fullMock.layer))
|
|
105
|
+
expectTypeOf(program).toEqualTypeOf<Effect.Effect<void, AiError.AiError, never>>()
|
|
106
|
+
})
|
|
107
|
+
})
|
|
108
|
+
|
|
109
|
+
describe("SpeechSynthesizer.streamSynthesisFrom", () => {
|
|
110
|
+
const ssfReq: SpeechSynthesizer.CommonStreamSynthesizeRequest = {
|
|
111
|
+
model: "mock-tts",
|
|
112
|
+
voiceId: "stock-voice",
|
|
113
|
+
}
|
|
114
|
+
|
|
115
|
+
it("pipes an LLM-style text stream into audio chunks", async () => {
|
|
116
|
+
const mock = MockSpeechSynthesizer.layer({
|
|
117
|
+
streamSynthesisFromChunks: [[chunk(10), chunk(20)]],
|
|
118
|
+
})
|
|
119
|
+
const tokens = Stream.fromIterable(["Hello, ", "world."])
|
|
120
|
+
const audio = tokens.pipe(SpeechSynthesizer.streamSynthesisFrom(ssfReq))
|
|
121
|
+
const out = await Effect.runPromise(Stream.runCollect(audio).pipe(Effect.provide(mock.layer)))
|
|
122
|
+
expect(out.map((c) => Array.from(c.bytes))).toEqual([[10], [20]])
|
|
123
|
+
})
|
|
124
|
+
|
|
125
|
+
it("records the request on the streamSynthesisFrom call channel", async () => {
|
|
126
|
+
const mock = MockSpeechSynthesizer.layer({
|
|
127
|
+
streamSynthesisFromChunks: [[chunk(42)]],
|
|
128
|
+
})
|
|
129
|
+
const program = Effect.gen(function* () {
|
|
130
|
+
yield* Stream.runDrain(
|
|
131
|
+
Stream.fromIterable(["x"]).pipe(SpeechSynthesizer.streamSynthesisFrom(ssfReq)),
|
|
132
|
+
)
|
|
133
|
+
return yield* mock.recorder
|
|
134
|
+
})
|
|
135
|
+
const rec = await Effect.runPromise(program.pipe(Effect.provide(mock.layer)))
|
|
136
|
+
expect(rec.streamSynthesisFromCalls.length).toBe(1)
|
|
137
|
+
expect(rec.streamSynthesisFromCalls[0]!.voiceId).toBe("stock-voice")
|
|
138
|
+
expect(rec.synthesizeCalls.length).toBe(0)
|
|
139
|
+
expect(rec.streamSynthesisCalls.length).toBe(0)
|
|
140
|
+
})
|
|
141
|
+
})
|