@effect-uai/core 0.3.0 → 0.4.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/{AiError-CBuPHVKA.d.mts → AiError-csR8Bhxx.d.mts} +26 -4
- package/dist/{AiError-CBuPHVKA.d.mts.map → AiError-csR8Bhxx.d.mts.map} +1 -1
- package/dist/Audio-BfCTGnH3.d.mts +61 -0
- package/dist/Audio-BfCTGnH3.d.mts.map +1 -0
- package/dist/{Image-BZmKfIdq.d.mts → Image-DxyXqzAM.d.mts} +4 -4
- package/dist/{Image-BZmKfIdq.d.mts.map → Image-DxyXqzAM.d.mts.map} +1 -1
- package/dist/{Items-CB8Bo3FI.d.mts → Items-Hg5AsYxl.d.mts} +5 -5
- package/dist/{Items-CB8Bo3FI.d.mts.map → Items-Hg5AsYxl.d.mts.map} +1 -1
- package/dist/{StructuredFormat-BWq5Hd1O.d.mts → StructuredFormat-Cl41C56K.d.mts} +1 -1
- package/dist/{StructuredFormat-BWq5Hd1O.d.mts.map → StructuredFormat-Cl41C56K.d.mts.map} +1 -1
- package/dist/{Tool-DjVufH7i.d.mts → Tool-B8B5qVEy.d.mts} +2 -2
- package/dist/{Tool-DjVufH7i.d.mts.map → Tool-B8B5qVEy.d.mts.map} +1 -1
- package/dist/{Turn-OPaILVIB.d.mts → Turn-7geUcKsf.d.mts} +4 -4
- package/dist/{Turn-OPaILVIB.d.mts.map → Turn-7geUcKsf.d.mts.map} +1 -1
- package/dist/domain/AiError.d.mts +2 -2
- package/dist/domain/AiError.mjs +18 -2
- package/dist/domain/AiError.mjs.map +1 -1
- package/dist/domain/Audio.d.mts +2 -0
- package/dist/domain/Audio.mjs +14 -0
- package/dist/domain/Audio.mjs.map +1 -0
- package/dist/domain/Image.d.mts +1 -1
- package/dist/domain/Items.d.mts +1 -1
- package/dist/domain/Music.d.mts +116 -0
- package/dist/domain/Music.d.mts.map +1 -0
- package/dist/domain/Music.mjs +29 -0
- package/dist/domain/Music.mjs.map +1 -0
- package/dist/domain/Transcript.d.mts +95 -0
- package/dist/domain/Transcript.d.mts.map +1 -0
- package/dist/domain/Transcript.mjs +22 -0
- package/dist/domain/Transcript.mjs.map +1 -0
- package/dist/domain/Turn.d.mts +1 -1
- package/dist/embedding-model/Embedding.d.mts +1 -1
- package/dist/embedding-model/EmbeddingModel.d.mts +1 -1
- package/dist/index.d.mts +13 -7
- package/dist/index.mjs +7 -1
- package/dist/language-model/LanguageModel.d.mts +5 -5
- package/dist/loop/Loop.d.mts +2 -2
- package/dist/music-generator/MusicGenerator.d.mts +77 -0
- package/dist/music-generator/MusicGenerator.d.mts.map +1 -0
- package/dist/music-generator/MusicGenerator.mjs +51 -0
- package/dist/music-generator/MusicGenerator.mjs.map +1 -0
- package/dist/music-generator/MusicGenerator.test.d.mts +1 -0
- package/dist/music-generator/MusicGenerator.test.mjs +154 -0
- package/dist/music-generator/MusicGenerator.test.mjs.map +1 -0
- package/dist/speech-synthesizer/SpeechSynthesizer.d.mts +96 -0
- package/dist/speech-synthesizer/SpeechSynthesizer.d.mts.map +1 -0
- package/dist/speech-synthesizer/SpeechSynthesizer.mjs +48 -0
- package/dist/speech-synthesizer/SpeechSynthesizer.mjs.map +1 -0
- package/dist/speech-synthesizer/SpeechSynthesizer.test.d.mts +1 -0
- package/dist/speech-synthesizer/SpeechSynthesizer.test.mjs +112 -0
- package/dist/speech-synthesizer/SpeechSynthesizer.test.mjs.map +1 -0
- package/dist/streaming/JSONL.d.mts +10 -3
- package/dist/streaming/JSONL.d.mts.map +1 -1
- package/dist/streaming/JSONL.mjs +12 -1
- package/dist/streaming/JSONL.mjs.map +1 -1
- package/dist/structured-format/StructuredFormat.d.mts +1 -1
- package/dist/testing/MockMusicGenerator.d.mts +39 -0
- package/dist/testing/MockMusicGenerator.d.mts.map +1 -0
- package/dist/testing/MockMusicGenerator.mjs +96 -0
- package/dist/testing/MockMusicGenerator.mjs.map +1 -0
- package/dist/testing/MockProvider.d.mts +2 -2
- package/dist/testing/MockSpeechSynthesizer.d.mts +37 -0
- package/dist/testing/MockSpeechSynthesizer.d.mts.map +1 -0
- package/dist/testing/MockSpeechSynthesizer.mjs +95 -0
- package/dist/testing/MockSpeechSynthesizer.mjs.map +1 -0
- package/dist/testing/MockTranscriber.d.mts +37 -0
- package/dist/testing/MockTranscriber.d.mts.map +1 -0
- package/dist/testing/MockTranscriber.mjs +77 -0
- package/dist/testing/MockTranscriber.mjs.map +1 -0
- package/dist/tool/HistoryCheck.d.mts +1 -1
- package/dist/tool/Outcome.d.mts +1 -1
- package/dist/tool/Resolvers.d.mts +1 -1
- package/dist/tool/Tool.d.mts +1 -1
- package/dist/tool/Toolkit.d.mts +2 -2
- package/dist/transcriber/Transcriber.d.mts +101 -0
- package/dist/transcriber/Transcriber.d.mts.map +1 -0
- package/dist/transcriber/Transcriber.mjs +49 -0
- package/dist/transcriber/Transcriber.mjs.map +1 -0
- package/dist/transcriber/Transcriber.test.d.mts +1 -0
- package/dist/transcriber/Transcriber.test.mjs +130 -0
- package/dist/transcriber/Transcriber.test.mjs.map +1 -0
- package/package.json +37 -1
- package/src/domain/AiError.ts +21 -0
- package/src/domain/Audio.ts +88 -0
- package/src/domain/Music.ts +121 -0
- package/src/domain/Transcript.ts +83 -0
- package/src/index.ts +6 -0
- package/src/music-generator/MusicGenerator.test.ts +170 -0
- package/src/music-generator/MusicGenerator.ts +123 -0
- package/src/speech-synthesizer/SpeechSynthesizer.test.ts +141 -0
- package/src/speech-synthesizer/SpeechSynthesizer.ts +131 -0
- package/src/streaming/JSONL.ts +12 -0
- package/src/testing/MockMusicGenerator.ts +170 -0
- package/src/testing/MockSpeechSynthesizer.ts +165 -0
- package/src/testing/MockTranscriber.ts +139 -0
- package/src/transcriber/Transcriber.test.ts +125 -0
- package/src/transcriber/Transcriber.ts +127 -0
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@effect-uai/core",
|
|
3
|
-
"version": "0.
|
|
3
|
+
"version": "0.4.0",
|
|
4
4
|
"description": "Low-level primitives (loop, conversation, items, tools, streaming codecs) for building AI agents with Effect.",
|
|
5
5
|
"keywords": [
|
|
6
6
|
"agents",
|
|
@@ -38,6 +38,10 @@
|
|
|
38
38
|
"types": "./dist/domain/AiError.d.mts",
|
|
39
39
|
"import": "./dist/domain/AiError.mjs"
|
|
40
40
|
},
|
|
41
|
+
"./Audio": {
|
|
42
|
+
"types": "./dist/domain/Audio.d.mts",
|
|
43
|
+
"import": "./dist/domain/Audio.mjs"
|
|
44
|
+
},
|
|
41
45
|
"./Image": {
|
|
42
46
|
"types": "./dist/domain/Image.d.mts",
|
|
43
47
|
"import": "./dist/domain/Image.mjs"
|
|
@@ -50,6 +54,14 @@
|
|
|
50
54
|
"types": "./dist/domain/Media.d.mts",
|
|
51
55
|
"import": "./dist/domain/Media.mjs"
|
|
52
56
|
},
|
|
57
|
+
"./Music": {
|
|
58
|
+
"types": "./dist/domain/Music.d.mts",
|
|
59
|
+
"import": "./dist/domain/Music.mjs"
|
|
60
|
+
},
|
|
61
|
+
"./Transcript": {
|
|
62
|
+
"types": "./dist/domain/Transcript.d.mts",
|
|
63
|
+
"import": "./dist/domain/Transcript.mjs"
|
|
64
|
+
},
|
|
53
65
|
"./Turn": {
|
|
54
66
|
"types": "./dist/domain/Turn.d.mts",
|
|
55
67
|
"import": "./dist/domain/Turn.mjs"
|
|
@@ -66,6 +78,18 @@
|
|
|
66
78
|
"types": "./dist/language-model/LanguageModel.d.mts",
|
|
67
79
|
"import": "./dist/language-model/LanguageModel.mjs"
|
|
68
80
|
},
|
|
81
|
+
"./MusicGenerator": {
|
|
82
|
+
"types": "./dist/music-generator/MusicGenerator.d.mts",
|
|
83
|
+
"import": "./dist/music-generator/MusicGenerator.mjs"
|
|
84
|
+
},
|
|
85
|
+
"./SpeechSynthesizer": {
|
|
86
|
+
"types": "./dist/speech-synthesizer/SpeechSynthesizer.d.mts",
|
|
87
|
+
"import": "./dist/speech-synthesizer/SpeechSynthesizer.mjs"
|
|
88
|
+
},
|
|
89
|
+
"./Transcriber": {
|
|
90
|
+
"types": "./dist/transcriber/Transcriber.d.mts",
|
|
91
|
+
"import": "./dist/transcriber/Transcriber.mjs"
|
|
92
|
+
},
|
|
69
93
|
"./Loop": {
|
|
70
94
|
"types": "./dist/loop/Loop.d.mts",
|
|
71
95
|
"import": "./dist/loop/Loop.mjs"
|
|
@@ -121,6 +145,18 @@
|
|
|
121
145
|
"./testing/MockProvider": {
|
|
122
146
|
"types": "./dist/testing/MockProvider.d.mts",
|
|
123
147
|
"import": "./dist/testing/MockProvider.mjs"
|
|
148
|
+
},
|
|
149
|
+
"./testing/MockMusicGenerator": {
|
|
150
|
+
"types": "./dist/testing/MockMusicGenerator.d.mts",
|
|
151
|
+
"import": "./dist/testing/MockMusicGenerator.mjs"
|
|
152
|
+
},
|
|
153
|
+
"./testing/MockSpeechSynthesizer": {
|
|
154
|
+
"types": "./dist/testing/MockSpeechSynthesizer.d.mts",
|
|
155
|
+
"import": "./dist/testing/MockSpeechSynthesizer.mjs"
|
|
156
|
+
},
|
|
157
|
+
"./testing/MockTranscriber": {
|
|
158
|
+
"types": "./dist/testing/MockTranscriber.d.mts",
|
|
159
|
+
"import": "./dist/testing/MockTranscriber.mjs"
|
|
124
160
|
}
|
|
125
161
|
},
|
|
126
162
|
"publishConfig": {
|
package/src/domain/AiError.ts
CHANGED
|
@@ -80,6 +80,26 @@ export class IncompleteTurn extends Data.TaggedError("IncompleteTurn")<{
|
|
|
80
80
|
raw?: unknown
|
|
81
81
|
}> {}
|
|
82
82
|
|
|
83
|
+
/**
|
|
84
|
+
* The provider does not implement the requested capability for this
|
|
85
|
+
* specific request. Distinct from `InvalidRequest` (the request shape is
|
|
86
|
+
* malformed) and `AuthFailed` (the request was rejected).
|
|
87
|
+
*
|
|
88
|
+
* Reserved for request-data-dependent gaps where the provider supports
|
|
89
|
+
* the method in general but not for these inputs — e.g. Google's
|
|
90
|
+
* `streamSynthesisFrom` works only for Chirp 3 HD voices; calling it
|
|
91
|
+
* with a Neural2 voice ID fails `Unsupported`.
|
|
92
|
+
*
|
|
93
|
+
* Blanket provider-level gaps (e.g. OpenAI has no incremental-text-in
|
|
94
|
+
* TTS at all) are gated at compile time via capability marker tags
|
|
95
|
+
* (`TtsIncrementalText`, `SttStreaming`) on the R channel instead.
|
|
96
|
+
*/
|
|
97
|
+
export class Unsupported extends Data.TaggedError("Unsupported")<{
|
|
98
|
+
provider: string
|
|
99
|
+
capability: string
|
|
100
|
+
reason?: string
|
|
101
|
+
}> {}
|
|
102
|
+
|
|
83
103
|
export type AiError =
|
|
84
104
|
| RateLimited
|
|
85
105
|
| Unavailable
|
|
@@ -91,3 +111,4 @@ export type AiError =
|
|
|
91
111
|
| Cancelled
|
|
92
112
|
| IncompleteTurn
|
|
93
113
|
| GenerationFailed
|
|
114
|
+
| Unsupported
|
|
@@ -0,0 +1,88 @@
|
|
|
1
|
+
import type { MediaBase64, MediaBytes, MediaUrl } from "./Media.js"
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* MIME types we care about across STT input and TTS output. Container-
|
|
5
|
+
* level only — sample rate / encoding flavours live on `AudioFormat`.
|
|
6
|
+
*
|
|
7
|
+
* Per-provider request types narrow this further. The `(string & {})`
|
|
8
|
+
* tail keeps autocomplete on the literals while still accepting any
|
|
9
|
+
* string, so unusual formats work without an SDK update.
|
|
10
|
+
*/
|
|
11
|
+
export type AudioMimeType =
|
|
12
|
+
| "audio/mpeg"
|
|
13
|
+
| "audio/wav"
|
|
14
|
+
| "audio/x-wav"
|
|
15
|
+
| "audio/ogg"
|
|
16
|
+
| "audio/opus"
|
|
17
|
+
| "audio/flac"
|
|
18
|
+
| "audio/aac"
|
|
19
|
+
| "audio/mp4"
|
|
20
|
+
| "audio/webm"
|
|
21
|
+
| "audio/L16"
|
|
22
|
+
| "audio/pcm"
|
|
23
|
+
| "audio/mulaw"
|
|
24
|
+
| "audio/alaw"
|
|
25
|
+
// eslint-disable-next-line @typescript-eslint/ban-types
|
|
26
|
+
| (string & {})
|
|
27
|
+
|
|
28
|
+
/**
|
|
29
|
+
* Audio at rest — instantiates `MediaSource` with the audio MIME union.
|
|
30
|
+
* Used for sync STT input.
|
|
31
|
+
*
|
|
32
|
+
* URL variant is best-effort: some providers (OpenAI, Cartesia, Azure
|
|
33
|
+
* short-audio) reject URL ingestion and the adapter must upload via the
|
|
34
|
+
* `bytes` or `base64` variant instead. Adapter layers reject unsupported
|
|
35
|
+
* shapes up front with `AiError.InvalidRequest`.
|
|
36
|
+
*/
|
|
37
|
+
export type AudioSource =
|
|
38
|
+
| MediaUrl<AudioMimeType>
|
|
39
|
+
| MediaBase64<AudioMimeType>
|
|
40
|
+
| MediaBytes<AudioMimeType>
|
|
41
|
+
|
|
42
|
+
export const isAudioUrl = (s: AudioSource): s is MediaUrl<AudioMimeType> => s._tag === "url"
|
|
43
|
+
export const isAudioBase64 = (s: AudioSource): s is MediaBase64<AudioMimeType> =>
|
|
44
|
+
s._tag === "base64"
|
|
45
|
+
export const isAudioBytes = (s: AudioSource): s is MediaBytes<AudioMimeType> => s._tag === "bytes"
|
|
46
|
+
|
|
47
|
+
/**
|
|
48
|
+
* Structural audio format. Used both as TTS output spec and as STT
|
|
49
|
+
* streaming-input declaration. Providers that use compound slugs
|
|
50
|
+
* (`mp3_44100_128`, `audio-16khz-128kbitrate-mono-mp3`,
|
|
51
|
+
* `aura-2-thalia-en`) are encoded at the adapter layer.
|
|
52
|
+
*/
|
|
53
|
+
export type AudioFormat = {
|
|
54
|
+
readonly container: "mp3" | "wav" | "ogg" | "opus" | "flac" | "aac" | "webm" | "raw"
|
|
55
|
+
readonly encoding:
|
|
56
|
+
| "pcm_s16le"
|
|
57
|
+
| "pcm_f32le"
|
|
58
|
+
| "pcm_mulaw"
|
|
59
|
+
| "pcm_alaw"
|
|
60
|
+
| "mp3"
|
|
61
|
+
| "opus"
|
|
62
|
+
| "vorbis"
|
|
63
|
+
| "flac"
|
|
64
|
+
| "aac"
|
|
65
|
+
readonly sampleRate: 8000 | 16000 | 22050 | 24000 | 32000 | 44100 | 48000
|
|
66
|
+
/** mp3 / opus only. */
|
|
67
|
+
readonly bitRate?: number
|
|
68
|
+
readonly channels?: 1 | 2
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
/**
|
|
72
|
+
* Streamed audio chunk. `bytes` carries the codec-encoded payload as
|
|
73
|
+
* declared on the stream's `AudioFormat`. No per-chunk timestamp here —
|
|
74
|
+
* providers that emit timing do so via `TranscriptEvent.words[]`.
|
|
75
|
+
*/
|
|
76
|
+
export type AudioChunk = {
|
|
77
|
+
readonly bytes: Uint8Array
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
/**
|
|
81
|
+
* Full audio result for sync TTS. Format mirrors the request; provider
|
|
82
|
+
* layers normalize.
|
|
83
|
+
*/
|
|
84
|
+
export type AudioBlob = {
|
|
85
|
+
readonly format: AudioFormat
|
|
86
|
+
readonly bytes: Uint8Array
|
|
87
|
+
readonly durationSeconds?: number
|
|
88
|
+
}
|
|
@@ -0,0 +1,121 @@
|
|
|
1
|
+
import type { AudioBlob, AudioFormat } from "./Audio.js"
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* Prompt fragment with a relative weight. Native to Lyria RealTime
|
|
5
|
+
* (`{ text, weight }` pairs blended in the model). Single-prompt
|
|
6
|
+
* providers (Suno, Mureka, MiniMax) flatten to text at the adapter
|
|
7
|
+
* layer.
|
|
8
|
+
*/
|
|
9
|
+
export type WeightedPrompt = {
|
|
10
|
+
readonly text: string
|
|
11
|
+
/** Default `1.0`. Range typically `[0, 1]`; provider-dependent. */
|
|
12
|
+
readonly weight?: number
|
|
13
|
+
}
|
|
14
|
+
|
|
15
|
+
/**
|
|
16
|
+
* Cross-provider music-generation request. Provider-specific extras
|
|
17
|
+
* (Lyria `mode`, ElevenLabs `composition_plan`, Suno custom-mode `title`,
|
|
18
|
+
* MiniMax `lyrics_optimizer`) live on each provider's typed request
|
|
19
|
+
* which extends this and narrows `model`.
|
|
20
|
+
*/
|
|
21
|
+
export type CommonGenerateMusicRequest = {
|
|
22
|
+
/** Model identifier. Each provider narrows. */
|
|
23
|
+
readonly model: string
|
|
24
|
+
/** Single prompt string or weighted-prompt list (blended where supported). */
|
|
25
|
+
readonly prompts: string | ReadonlyArray<WeightedPrompt>
|
|
26
|
+
/**
|
|
27
|
+
* Lyrics text, optionally with section tags like `[Verse]` / `[Chorus]` /
|
|
28
|
+
* `[Bridge]` / `[Outro]`. Ignored for instrumental-only providers or
|
|
29
|
+
* when `instrumental: true`.
|
|
30
|
+
*/
|
|
31
|
+
readonly lyrics?: string
|
|
32
|
+
/** Target duration in seconds. Provider may treat as a hint or hard limit. */
|
|
33
|
+
readonly durationSeconds?: number
|
|
34
|
+
/** Beats per minute (60–200 typical). */
|
|
35
|
+
readonly bpm?: number
|
|
36
|
+
/**
|
|
37
|
+
* Musical key/mode hint. Provider-specific vocabulary (e.g. Lyria
|
|
38
|
+
* RealTime uses enum values like `"C_MAJOR"`, `"A_MINOR"`).
|
|
39
|
+
*/
|
|
40
|
+
readonly scale?: string
|
|
41
|
+
/** Skip vocals / lyrics. */
|
|
42
|
+
readonly instrumental?: boolean
|
|
43
|
+
/** Preferred output format. Provider may override. */
|
|
44
|
+
readonly outputFormat?: AudioFormat
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
/**
|
|
48
|
+
* Streamed-output request. Same shape as the sync request — the
|
|
49
|
+
* streaming variant only differs in how the response is delivered.
|
|
50
|
+
*/
|
|
51
|
+
export type CommonStreamGenerateMusicRequest = CommonGenerateMusicRequest
|
|
52
|
+
|
|
53
|
+
/**
|
|
54
|
+
* Bidirectional-session input. The user pushes one of these per
|
|
55
|
+
* change: a new prompt blend, a config delta, or a playback control.
|
|
56
|
+
* Lyria RealTime is the only provider currently surfacing these.
|
|
57
|
+
*/
|
|
58
|
+
export type MusicSessionInput =
|
|
59
|
+
| { readonly _tag: "prompts"; readonly prompts: ReadonlyArray<WeightedPrompt> }
|
|
60
|
+
| {
|
|
61
|
+
readonly _tag: "config"
|
|
62
|
+
readonly config: {
|
|
63
|
+
readonly bpm?: number
|
|
64
|
+
readonly scale?: string
|
|
65
|
+
readonly density?: number
|
|
66
|
+
readonly brightness?: number
|
|
67
|
+
readonly guidance?: number
|
|
68
|
+
readonly temperature?: number
|
|
69
|
+
readonly topK?: number
|
|
70
|
+
readonly seed?: number
|
|
71
|
+
readonly muteBass?: boolean
|
|
72
|
+
readonly muteDrums?: boolean
|
|
73
|
+
readonly onlyBassAndDrums?: boolean
|
|
74
|
+
}
|
|
75
|
+
}
|
|
76
|
+
| { readonly _tag: "control"; readonly action: "play" | "pause" | "stop" | "reset_context" }
|
|
77
|
+
|
|
78
|
+
export const promptsInput = (prompts: ReadonlyArray<WeightedPrompt>): MusicSessionInput => ({
|
|
79
|
+
_tag: "prompts",
|
|
80
|
+
prompts,
|
|
81
|
+
})
|
|
82
|
+
|
|
83
|
+
export const configInput = (
|
|
84
|
+
config: (MusicSessionInput & { _tag: "config" })["config"],
|
|
85
|
+
): MusicSessionInput => ({ _tag: "config", config })
|
|
86
|
+
|
|
87
|
+
export const controlInput = (
|
|
88
|
+
action: (MusicSessionInput & { _tag: "control" })["action"],
|
|
89
|
+
): MusicSessionInput => ({ _tag: "control", action })
|
|
90
|
+
|
|
91
|
+
/**
|
|
92
|
+
* Sync-generation result. Extends `AudioBlob` with provider-side
|
|
93
|
+
* metadata that's common across music providers:
|
|
94
|
+
*
|
|
95
|
+
* - `songId` — Suno task id, ElevenLabs `song_id`, etc. Used for
|
|
96
|
+
* back-reference (re-download, stem export, follow-up edits).
|
|
97
|
+
* - `lyrics` — generated lyrics when the model returned them (Lyria
|
|
98
|
+
* text part, Mureka, Suno).
|
|
99
|
+
* - `sections` — structured section markers (Lyria optional JSON
|
|
100
|
+
* structure response).
|
|
101
|
+
* - `watermark` — presence marker (Lyria SynthID is always set).
|
|
102
|
+
*/
|
|
103
|
+
export type MusicResult = AudioBlob & {
|
|
104
|
+
readonly songId?: string
|
|
105
|
+
readonly lyrics?: string
|
|
106
|
+
readonly sections?: ReadonlyArray<{
|
|
107
|
+
readonly label: string
|
|
108
|
+
readonly startSeconds: number
|
|
109
|
+
readonly endSeconds: number
|
|
110
|
+
}>
|
|
111
|
+
readonly watermark?: { readonly kind: string }
|
|
112
|
+
}
|
|
113
|
+
|
|
114
|
+
export const isPromptsInput = (
|
|
115
|
+
i: MusicSessionInput,
|
|
116
|
+
): i is MusicSessionInput & { _tag: "prompts" } => i._tag === "prompts"
|
|
117
|
+
export const isConfigInput = (i: MusicSessionInput): i is MusicSessionInput & { _tag: "config" } =>
|
|
118
|
+
i._tag === "config"
|
|
119
|
+
export const isControlInput = (
|
|
120
|
+
i: MusicSessionInput,
|
|
121
|
+
): i is MusicSessionInput & { _tag: "control" } => i._tag === "control"
|
|
@@ -0,0 +1,83 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Per-word timing + metadata. `confidence` and `speakerId` are optional
|
|
3
|
+
* because providers vary widely in what they emit and when (some only on
|
|
4
|
+
* final, some only with diarization enabled, some not at all).
|
|
5
|
+
*/
|
|
6
|
+
export type WordTimestamp = {
|
|
7
|
+
readonly text: string
|
|
8
|
+
readonly startSeconds: number
|
|
9
|
+
readonly endSeconds: number
|
|
10
|
+
readonly confidence?: number
|
|
11
|
+
readonly speakerId?: string
|
|
12
|
+
readonly languageCode?: string
|
|
13
|
+
}
|
|
14
|
+
|
|
15
|
+
/**
|
|
16
|
+
* Sync STT result. `raw` preserves the provider-specific response for
|
|
17
|
+
* consumers that need fields the common shape doesn't expose
|
|
18
|
+
* (alternatives, segments, NBest, audio events, etc.).
|
|
19
|
+
*/
|
|
20
|
+
export type TranscriptResult = {
|
|
21
|
+
readonly text: string
|
|
22
|
+
readonly languageCode?: string
|
|
23
|
+
readonly durationSeconds?: number
|
|
24
|
+
readonly words?: ReadonlyArray<WordTimestamp>
|
|
25
|
+
readonly raw?: unknown
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
/**
|
|
29
|
+
* Streaming STT event union. Collapses every provider's vocabulary into
|
|
30
|
+
* a small set; provider-specific shapes survive on `metadata.raw`.
|
|
31
|
+
*
|
|
32
|
+
* - `partial`: interim hypothesis. `stability` is Google-only.
|
|
33
|
+
* - `final`: committed transcript for the current utterance / segment.
|
|
34
|
+
* - `speech-started` / `utterance-ended`: VAD-derived boundaries. Not
|
|
35
|
+
* all providers emit them (OpenAI Realtime, Google with
|
|
36
|
+
* `voice_activity_events`, Deepgram with `vad_events`, AssemblyAI).
|
|
37
|
+
* - `audio-event`: non-speech label (`(laughter)`, `(music)`) — ElevenLabs only.
|
|
38
|
+
* - `metadata`: opaque server-side bookkeeping (request_id, model info).
|
|
39
|
+
* - `error`: non-fatal provider error mid-stream. Fatal errors surface
|
|
40
|
+
* on the `Stream`'s error channel as `AiError.AiError`.
|
|
41
|
+
*/
|
|
42
|
+
export type TranscriptEvent =
|
|
43
|
+
| {
|
|
44
|
+
readonly _tag: "partial"
|
|
45
|
+
readonly text: string
|
|
46
|
+
readonly words?: ReadonlyArray<WordTimestamp>
|
|
47
|
+
readonly stability?: number
|
|
48
|
+
}
|
|
49
|
+
| {
|
|
50
|
+
readonly _tag: "final"
|
|
51
|
+
readonly text: string
|
|
52
|
+
readonly words?: ReadonlyArray<WordTimestamp>
|
|
53
|
+
readonly languageCode?: string
|
|
54
|
+
}
|
|
55
|
+
| { readonly _tag: "speech-started"; readonly atSeconds: number }
|
|
56
|
+
| { readonly _tag: "utterance-ended"; readonly atSeconds: number }
|
|
57
|
+
| {
|
|
58
|
+
readonly _tag: "audio-event"
|
|
59
|
+
readonly label: string
|
|
60
|
+
readonly startSeconds: number
|
|
61
|
+
readonly endSeconds: number
|
|
62
|
+
}
|
|
63
|
+
| { readonly _tag: "metadata"; readonly raw: unknown }
|
|
64
|
+
| { readonly _tag: "error"; readonly code?: string; readonly message: string }
|
|
65
|
+
|
|
66
|
+
export const isPartial = (e: TranscriptEvent): e is Extract<TranscriptEvent, { _tag: "partial" }> =>
|
|
67
|
+
e._tag === "partial"
|
|
68
|
+
export const isFinal = (e: TranscriptEvent): e is Extract<TranscriptEvent, { _tag: "final" }> =>
|
|
69
|
+
e._tag === "final"
|
|
70
|
+
export const isSpeechStarted = (
|
|
71
|
+
e: TranscriptEvent,
|
|
72
|
+
): e is Extract<TranscriptEvent, { _tag: "speech-started" }> => e._tag === "speech-started"
|
|
73
|
+
export const isUtteranceEnded = (
|
|
74
|
+
e: TranscriptEvent,
|
|
75
|
+
): e is Extract<TranscriptEvent, { _tag: "utterance-ended" }> => e._tag === "utterance-ended"
|
|
76
|
+
export const isAudioEvent = (
|
|
77
|
+
e: TranscriptEvent,
|
|
78
|
+
): e is Extract<TranscriptEvent, { _tag: "audio-event" }> => e._tag === "audio-event"
|
|
79
|
+
export const isMetadata = (
|
|
80
|
+
e: TranscriptEvent,
|
|
81
|
+
): e is Extract<TranscriptEvent, { _tag: "metadata" }> => e._tag === "metadata"
|
|
82
|
+
export const isError = (e: TranscriptEvent): e is Extract<TranscriptEvent, { _tag: "error" }> =>
|
|
83
|
+
e._tag === "error"
|
package/src/index.ts
CHANGED
|
@@ -1,11 +1,17 @@
|
|
|
1
1
|
export * as AiError from "./domain/AiError.js"
|
|
2
|
+
export * as Audio from "./domain/Audio.js"
|
|
2
3
|
export * as Image from "./domain/Image.js"
|
|
3
4
|
export * as Items from "./domain/Items.js"
|
|
4
5
|
export * as Media from "./domain/Media.js"
|
|
6
|
+
export * as Music from "./domain/Music.js"
|
|
7
|
+
export * as Transcript from "./domain/Transcript.js"
|
|
5
8
|
export * as Turn from "./domain/Turn.js"
|
|
6
9
|
export * as Embedding from "./embedding-model/Embedding.js"
|
|
7
10
|
export * as EmbeddingModel from "./embedding-model/EmbeddingModel.js"
|
|
8
11
|
export * as LanguageModel from "./language-model/LanguageModel.js"
|
|
12
|
+
export * as MusicGenerator from "./music-generator/MusicGenerator.js"
|
|
13
|
+
export * as SpeechSynthesizer from "./speech-synthesizer/SpeechSynthesizer.js"
|
|
14
|
+
export * as Transcriber from "./transcriber/Transcriber.js"
|
|
9
15
|
export * as Vector from "./math/Vector.js"
|
|
10
16
|
export * as Loop from "./loop/Loop.js"
|
|
11
17
|
export * as Tool from "./tool/Tool.js"
|
|
@@ -0,0 +1,170 @@
|
|
|
1
|
+
import { Effect, Stream } from "effect"
|
|
2
|
+
import { describe, expect, expectTypeOf, it } from "vitest"
|
|
3
|
+
import type * as AiError from "../domain/AiError.js"
|
|
4
|
+
import type { AudioChunk, AudioFormat } from "../domain/Audio.js"
|
|
5
|
+
import { configInput, promptsInput, type MusicResult } from "../domain/Music.js"
|
|
6
|
+
import * as MockMusicGenerator from "../testing/MockMusicGenerator.js"
|
|
7
|
+
import * as MusicGenerator from "./MusicGenerator.js"
|
|
8
|
+
|
|
9
|
+
const mp3Format: AudioFormat = {
|
|
10
|
+
container: "mp3",
|
|
11
|
+
encoding: "mp3",
|
|
12
|
+
sampleRate: 44100,
|
|
13
|
+
channels: 2,
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
const result: MusicResult = {
|
|
17
|
+
format: mp3Format,
|
|
18
|
+
bytes: new Uint8Array([0xff, 0xfb, 0x90, 0x00]),
|
|
19
|
+
durationSeconds: 30,
|
|
20
|
+
lyrics: "[Verse]\nhello\n",
|
|
21
|
+
watermark: { kind: "synthid" },
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
const chunk = (n: number): AudioChunk => ({ bytes: new Uint8Array([n]) })
|
|
25
|
+
|
|
26
|
+
describe("MusicGenerator.generate", () => {
|
|
27
|
+
it("returns the scripted MusicResult", async () => {
|
|
28
|
+
const mock = MockMusicGenerator.layer({ results: [result] })
|
|
29
|
+
const program = MusicGenerator.generate({
|
|
30
|
+
model: "mock-music",
|
|
31
|
+
prompts: "upbeat indie pop",
|
|
32
|
+
})
|
|
33
|
+
const out = await Effect.runPromise(program.pipe(Effect.provide(mock.layer)))
|
|
34
|
+
expect(out.bytes).toEqual(result.bytes)
|
|
35
|
+
expect(out.durationSeconds).toBe(30)
|
|
36
|
+
expect(out.watermark?.kind).toBe("synthid")
|
|
37
|
+
expect(out.lyrics).toContain("[Verse]")
|
|
38
|
+
})
|
|
39
|
+
|
|
40
|
+
it("records the request shape on the recorder", async () => {
|
|
41
|
+
const mock = MockMusicGenerator.layer({ results: [result, result] })
|
|
42
|
+
const program = Effect.gen(function* () {
|
|
43
|
+
yield* MusicGenerator.generate({ model: "m", prompts: "techno" })
|
|
44
|
+
yield* MusicGenerator.generate({
|
|
45
|
+
model: "m",
|
|
46
|
+
prompts: [
|
|
47
|
+
{ text: "synthwave", weight: 1.0 },
|
|
48
|
+
{ text: "80s movie OST", weight: 0.4 },
|
|
49
|
+
],
|
|
50
|
+
bpm: 120,
|
|
51
|
+
instrumental: true,
|
|
52
|
+
})
|
|
53
|
+
return yield* mock.recorder
|
|
54
|
+
})
|
|
55
|
+
const rec = await Effect.runPromise(program.pipe(Effect.provide(mock.layer)))
|
|
56
|
+
expect(rec.generateCalls.length).toBe(2)
|
|
57
|
+
expect(rec.generateCalls[1]!.bpm).toBe(120)
|
|
58
|
+
expect(rec.generateCalls[1]!.instrumental).toBe(true)
|
|
59
|
+
expect(Array.isArray(rec.generateCalls[1]!.prompts)).toBe(true)
|
|
60
|
+
})
|
|
61
|
+
})
|
|
62
|
+
|
|
63
|
+
describe("MusicGenerator.streamGeneration", () => {
|
|
64
|
+
it("emits scripted chunks", async () => {
|
|
65
|
+
const mock = MockMusicGenerator.layer({
|
|
66
|
+
streamGenerationChunks: [[chunk(1), chunk(2), chunk(3)]],
|
|
67
|
+
})
|
|
68
|
+
const program = Stream.runCollect(
|
|
69
|
+
MusicGenerator.streamGeneration({ model: "m", prompts: "ambient" }),
|
|
70
|
+
)
|
|
71
|
+
const out = await Effect.runPromise(program.pipe(Effect.provide(mock.layer)))
|
|
72
|
+
expect(out.map((c) => Array.from(c.bytes))).toEqual([[1], [2], [3]])
|
|
73
|
+
})
|
|
74
|
+
})
|
|
75
|
+
|
|
76
|
+
describe("MusicGenerator capability marker (compile-time)", () => {
|
|
77
|
+
const sgfReq: MusicGenerator.CommonStreamGenerateMusicRequest = {
|
|
78
|
+
model: "m",
|
|
79
|
+
prompts: "",
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
it("requires `MusicInteractiveSession` on the R channel of streamGenerationFrom", () => {
|
|
83
|
+
const inputs = Stream.fromIterable([promptsInput([{ text: "techno" }])])
|
|
84
|
+
const audio = inputs.pipe(MusicGenerator.streamGenerationFrom(sgfReq))
|
|
85
|
+
expectTypeOf(audio).toEqualTypeOf<
|
|
86
|
+
Stream.Stream<
|
|
87
|
+
AudioChunk,
|
|
88
|
+
AiError.AiError,
|
|
89
|
+
MusicGenerator.MusicGenerator | MusicGenerator.MusicInteractiveSession
|
|
90
|
+
>
|
|
91
|
+
>()
|
|
92
|
+
})
|
|
93
|
+
|
|
94
|
+
it("does NOT require `MusicInteractiveSession` for sync `generate`", () => {
|
|
95
|
+
const eff = MusicGenerator.generate({ model: "m", prompts: "ambient" })
|
|
96
|
+
expectTypeOf(eff).toEqualTypeOf<
|
|
97
|
+
Effect.Effect<MusicResult, AiError.AiError, MusicGenerator.MusicGenerator>
|
|
98
|
+
>()
|
|
99
|
+
})
|
|
100
|
+
|
|
101
|
+
it("does NOT require `MusicInteractiveSession` for `streamGeneration`", () => {
|
|
102
|
+
const audio = MusicGenerator.streamGeneration({ model: "m", prompts: "ambient" })
|
|
103
|
+
expectTypeOf(audio).toEqualTypeOf<
|
|
104
|
+
Stream.Stream<AudioChunk, AiError.AiError, MusicGenerator.MusicGenerator>
|
|
105
|
+
>()
|
|
106
|
+
})
|
|
107
|
+
|
|
108
|
+
it("a layer without the marker leaves `MusicInteractiveSession` unsatisfied in R", () => {
|
|
109
|
+
const noMarker = MockMusicGenerator.layerWithoutInteractive({})
|
|
110
|
+
const inputs = Stream.fromIterable([promptsInput([{ text: "techno" }])])
|
|
111
|
+
const audio = inputs.pipe(MusicGenerator.streamGenerationFrom(sgfReq))
|
|
112
|
+
const program = Stream.runDrain(audio).pipe(Effect.provide(noMarker.layer))
|
|
113
|
+
expectTypeOf(program).toEqualTypeOf<
|
|
114
|
+
Effect.Effect<void, AiError.AiError, MusicGenerator.MusicInteractiveSession>
|
|
115
|
+
>()
|
|
116
|
+
})
|
|
117
|
+
|
|
118
|
+
it("a full layer (with marker) clears R to never", () => {
|
|
119
|
+
const fullMock = MockMusicGenerator.layer({
|
|
120
|
+
streamGenerationFromChunks: [[]],
|
|
121
|
+
})
|
|
122
|
+
const inputs = Stream.fromIterable([promptsInput([{ text: "techno" }])])
|
|
123
|
+
const audio = inputs.pipe(MusicGenerator.streamGenerationFrom(sgfReq))
|
|
124
|
+
const program = Stream.runDrain(audio).pipe(Effect.provide(fullMock.layer))
|
|
125
|
+
expectTypeOf(program).toEqualTypeOf<Effect.Effect<void, AiError.AiError, never>>()
|
|
126
|
+
})
|
|
127
|
+
})
|
|
128
|
+
|
|
129
|
+
describe("MusicGenerator.streamGenerationFrom", () => {
|
|
130
|
+
const sgfReq: MusicGenerator.CommonStreamGenerateMusicRequest = {
|
|
131
|
+
model: "lyria-realtime-001",
|
|
132
|
+
prompts: "",
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
it("drains a session-input stream and emits scripted audio", async () => {
|
|
136
|
+
const mock = MockMusicGenerator.layer({
|
|
137
|
+
streamGenerationFromChunks: [[chunk(10), chunk(20)]],
|
|
138
|
+
})
|
|
139
|
+
const inputs = Stream.fromIterable([
|
|
140
|
+
promptsInput([{ text: "minimal techno", weight: 1.0 }]),
|
|
141
|
+
configInput({ bpm: 124 }),
|
|
142
|
+
promptsInput([
|
|
143
|
+
{ text: "minimal techno", weight: 1.0 },
|
|
144
|
+
{ text: "1980s synthwave", weight: 0.3 },
|
|
145
|
+
]),
|
|
146
|
+
])
|
|
147
|
+
const audio = inputs.pipe(MusicGenerator.streamGenerationFrom(sgfReq))
|
|
148
|
+
const out = await Effect.runPromise(Stream.runCollect(audio).pipe(Effect.provide(mock.layer)))
|
|
149
|
+
expect(out.map((c) => Array.from(c.bytes))).toEqual([[10], [20]])
|
|
150
|
+
})
|
|
151
|
+
|
|
152
|
+
it("records the request on the streamGenerationFrom call channel", async () => {
|
|
153
|
+
const mock = MockMusicGenerator.layer({
|
|
154
|
+
streamGenerationFromChunks: [[chunk(42)]],
|
|
155
|
+
})
|
|
156
|
+
const program = Effect.gen(function* () {
|
|
157
|
+
yield* Stream.runDrain(
|
|
158
|
+
Stream.fromIterable([promptsInput([{ text: "x" }])]).pipe(
|
|
159
|
+
MusicGenerator.streamGenerationFrom(sgfReq),
|
|
160
|
+
),
|
|
161
|
+
)
|
|
162
|
+
return yield* mock.recorder
|
|
163
|
+
})
|
|
164
|
+
const rec = await Effect.runPromise(program.pipe(Effect.provide(mock.layer)))
|
|
165
|
+
expect(rec.streamGenerationFromCalls.length).toBe(1)
|
|
166
|
+
expect(rec.streamGenerationFromCalls[0]!.model).toBe("lyria-realtime-001")
|
|
167
|
+
expect(rec.generateCalls.length).toBe(0)
|
|
168
|
+
expect(rec.streamGenerationCalls.length).toBe(0)
|
|
169
|
+
})
|
|
170
|
+
})
|