modelfusion 0.47.3 → 0.49.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +55 -33
- package/core/getRun.cjs +5 -3
- package/core/getRun.js +5 -3
- package/index.cjs +1 -0
- package/index.d.ts +1 -0
- package/index.js +1 -0
- package/model-function/AsyncIterableResultPromise.cjs +5 -5
- package/model-function/AsyncIterableResultPromise.d.ts +3 -3
- package/model-function/AsyncIterableResultPromise.js +5 -5
- package/model-function/Model.d.ts +1 -1
- package/model-function/ModelCallEvent.d.ts +5 -7
- package/model-function/embed/embed.cjs +3 -3
- package/model-function/embed/embed.js +3 -3
- package/model-function/{executeCall.cjs → executeStandardCall.cjs} +3 -3
- package/model-function/{executeCall.d.ts → executeStandardCall.d.ts} +1 -1
- package/model-function/{executeCall.js → executeStandardCall.js} +1 -1
- package/model-function/executeStreamCall.cjs +132 -0
- package/model-function/executeStreamCall.d.ts +20 -0
- package/model-function/executeStreamCall.js +128 -0
- package/model-function/generate-image/generateImage.cjs +2 -2
- package/model-function/generate-image/generateImage.js +2 -2
- package/model-function/generate-speech/SpeechGenerationEvent.d.ts +27 -0
- package/model-function/generate-speech/SpeechGenerationModel.d.ts +15 -0
- package/model-function/{synthesize-speech/synthesizeSpeech.cjs → generate-speech/generateSpeech.cjs} +7 -7
- package/model-function/{synthesize-speech/synthesizeSpeech.d.ts → generate-speech/generateSpeech.d.ts} +2 -2
- package/model-function/{synthesize-speech/synthesizeSpeech.js → generate-speech/generateSpeech.js} +5 -5
- package/model-function/generate-speech/index.cjs +20 -0
- package/model-function/generate-speech/index.d.ts +4 -0
- package/model-function/generate-speech/index.js +4 -0
- package/model-function/generate-speech/streamSpeech.cjs +34 -0
- package/model-function/generate-speech/streamSpeech.d.ts +8 -0
- package/model-function/generate-speech/streamSpeech.js +30 -0
- package/model-function/generate-structure/generateStructure.cjs +2 -2
- package/model-function/generate-structure/generateStructure.js +2 -2
- package/model-function/generate-structure/generateStructureOrText.cjs +2 -2
- package/model-function/generate-structure/generateStructureOrText.js +2 -2
- package/model-function/generate-structure/index.cjs +27 -0
- package/model-function/generate-structure/index.d.ts +11 -0
- package/model-function/generate-structure/index.js +11 -0
- package/model-function/generate-structure/streamStructure.cjs +28 -136
- package/model-function/generate-structure/streamStructure.js +27 -135
- package/model-function/generate-text/TextGenerationEvent.d.ts +6 -0
- package/model-function/generate-text/generateText.cjs +3 -3
- package/model-function/generate-text/generateText.d.ts +1 -1
- package/model-function/generate-text/generateText.js +3 -3
- package/model-function/generate-text/index.cjs +0 -1
- package/model-function/generate-text/index.d.ts +0 -1
- package/model-function/generate-text/index.js +0 -1
- package/model-function/generate-text/streamText.cjs +21 -128
- package/model-function/generate-text/streamText.js +20 -127
- package/model-function/generate-text/trimChatPrompt.cjs +1 -1
- package/model-function/generate-text/trimChatPrompt.d.ts +1 -1
- package/model-function/generate-text/trimChatPrompt.js +1 -1
- package/model-function/{transcribe-speech/transcribe.cjs → generate-transcription/generateTranscription.cjs} +6 -6
- package/model-function/{transcribe-speech/transcribe.d.ts → generate-transcription/generateTranscription.d.ts} +2 -2
- package/model-function/{transcribe-speech/transcribe.js → generate-transcription/generateTranscription.js} +4 -4
- package/model-function/index.cjs +5 -20
- package/model-function/index.d.ts +5 -20
- package/model-function/index.js +5 -20
- package/model-provider/elevenlabs/ElevenLabsApiConfiguration.cjs +3 -0
- package/model-provider/elevenlabs/ElevenLabsApiConfiguration.d.ts +1 -0
- package/model-provider/elevenlabs/ElevenLabsApiConfiguration.js +3 -0
- package/model-provider/elevenlabs/ElevenLabsSpeechModel.cjs +191 -0
- package/model-provider/elevenlabs/ElevenLabsSpeechModel.d.ts +39 -0
- package/model-provider/elevenlabs/ElevenLabsSpeechModel.js +187 -0
- package/model-provider/elevenlabs/index.cjs +1 -1
- package/model-provider/elevenlabs/index.d.ts +1 -1
- package/model-provider/elevenlabs/index.js +1 -1
- package/model-provider/huggingface/HuggingFaceImageDescriptionModel.cjs +21 -2
- package/model-provider/huggingface/HuggingFaceImageDescriptionModel.d.ts +11 -6
- package/model-provider/huggingface/HuggingFaceImageDescriptionModel.js +21 -2
- package/model-provider/lmnt/{LmntSpeechSynthesisModel.cjs → LmntSpeechModel.cjs} +5 -5
- package/model-provider/lmnt/LmntSpeechModel.d.ts +26 -0
- package/model-provider/lmnt/{LmntSpeechSynthesisModel.js → LmntSpeechModel.js} +3 -3
- package/model-provider/lmnt/index.cjs +1 -1
- package/model-provider/lmnt/index.d.ts +1 -1
- package/model-provider/lmnt/index.js +1 -1
- package/model-provider/openai/{OpenAITextGenerationModel.cjs → OpenAICompletionModel.cjs} +17 -17
- package/model-provider/openai/{OpenAITextGenerationModel.d.ts → OpenAICompletionModel.d.ts} +25 -25
- package/model-provider/openai/{OpenAITextGenerationModel.js → OpenAICompletionModel.js} +12 -12
- package/model-provider/openai/OpenAICostCalculator.cjs +3 -3
- package/model-provider/openai/OpenAICostCalculator.js +3 -3
- package/model-provider/openai/OpenAITranscriptionModel.d.ts +1 -1
- package/model-provider/openai/TikTokenTokenizer.d.ts +2 -2
- package/model-provider/openai/index.cjs +1 -1
- package/model-provider/openai/index.d.ts +1 -1
- package/model-provider/openai/index.js +1 -1
- package/package.json +3 -1
- package/ui/MediaSourceAppender.cjs +54 -0
- package/ui/MediaSourceAppender.d.ts +11 -0
- package/ui/MediaSourceAppender.js +50 -0
- package/ui/index.cjs +17 -0
- package/ui/index.d.ts +1 -0
- package/ui/index.js +1 -0
- package/util/SimpleWebSocket.cjs +41 -0
- package/util/SimpleWebSocket.d.ts +12 -0
- package/util/SimpleWebSocket.js +14 -0
- package/model-function/describe-image/ImageDescriptionEvent.d.ts +0 -18
- package/model-function/describe-image/ImageDescriptionModel.d.ts +0 -10
- package/model-function/describe-image/describeImage.cjs +0 -26
- package/model-function/describe-image/describeImage.d.ts +0 -9
- package/model-function/describe-image/describeImage.js +0 -22
- package/model-function/generate-text/TextStreamingEvent.cjs +0 -2
- package/model-function/generate-text/TextStreamingEvent.d.ts +0 -7
- package/model-function/generate-text/TextStreamingEvent.js +0 -1
- package/model-function/synthesize-speech/SpeechSynthesisEvent.cjs +0 -2
- package/model-function/synthesize-speech/SpeechSynthesisEvent.d.ts +0 -21
- package/model-function/synthesize-speech/SpeechSynthesisEvent.js +0 -1
- package/model-function/synthesize-speech/SpeechSynthesisModel.cjs +0 -2
- package/model-function/synthesize-speech/SpeechSynthesisModel.d.ts +0 -11
- package/model-function/synthesize-speech/SpeechSynthesisModel.js +0 -1
- package/model-provider/elevenlabs/ElevenLabsSpeechSynthesisModel.cjs +0 -79
- package/model-provider/elevenlabs/ElevenLabsSpeechSynthesisModel.d.ts +0 -30
- package/model-provider/elevenlabs/ElevenLabsSpeechSynthesisModel.js +0 -75
- package/model-provider/lmnt/LmntSpeechSynthesisModel.d.ts +0 -26
- /package/model-function/{describe-image/ImageDescriptionEvent.cjs → generate-speech/SpeechGenerationEvent.cjs} +0 -0
- /package/model-function/{describe-image/ImageDescriptionEvent.js → generate-speech/SpeechGenerationEvent.js} +0 -0
- /package/model-function/{describe-image/ImageDescriptionModel.cjs → generate-speech/SpeechGenerationModel.cjs} +0 -0
- /package/model-function/{describe-image/ImageDescriptionModel.js → generate-speech/SpeechGenerationModel.js} +0 -0
- /package/model-function/{transcribe-speech → generate-transcription}/TranscriptionEvent.cjs +0 -0
- /package/model-function/{transcribe-speech → generate-transcription}/TranscriptionEvent.d.ts +0 -0
- /package/model-function/{transcribe-speech → generate-transcription}/TranscriptionEvent.js +0 -0
- /package/model-function/{transcribe-speech → generate-transcription}/TranscriptionModel.cjs +0 -0
- /package/model-function/{transcribe-speech → generate-transcription}/TranscriptionModel.d.ts +0 -0
- /package/model-function/{transcribe-speech → generate-transcription}/TranscriptionModel.js +0 -0
package/README.md
CHANGED
@@ -35,17 +35,17 @@ Or use a template: [ModelFusion terminal app starter](https://github.com/lgramme
|
|
35
35
|
|
36
36
|
You can provide API keys for the different [integrations](https://modelfusion.dev/integration/model-provider/) using environment variables (e.g., `OPENAI_API_KEY`) or pass them into the model constructors as options.
|
37
37
|
|
38
|
-
### [Generate
|
38
|
+
### [Generate Text](https://modelfusion.dev/guide/function/generate-text)
|
39
39
|
|
40
40
|
Generate text using a language model and a prompt.
|
41
41
|
You can stream the text if it is supported by the model.
|
42
|
-
You can use [prompt formats](https://modelfusion.dev/guide/function/generate-text
|
42
|
+
You can use [prompt formats](https://modelfusion.dev/guide/function/generate-text#prompt-format) to change the prompt format of a model.
|
43
43
|
|
44
44
|
#### generateText
|
45
45
|
|
46
46
|
```ts
|
47
47
|
const text = await generateText(
|
48
|
-
new
|
48
|
+
new OpenAICompletionModel({
|
49
49
|
model: "gpt-3.5-turbo-instruct",
|
50
50
|
}),
|
51
51
|
"Write a short story about a robot learning to love:\n\n"
|
@@ -58,7 +58,7 @@ Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai),
|
|
58
58
|
|
59
59
|
```ts
|
60
60
|
const textStream = await streamText(
|
61
|
-
new
|
61
|
+
new OpenAICompletionModel({
|
62
62
|
model: "gpt-3.5-turbo-instruct",
|
63
63
|
}),
|
64
64
|
"Write a short story about a robot learning to love:\n\n"
|
@@ -71,7 +71,7 @@ for await (const textFragment of textStream) {
|
|
71
71
|
|
72
72
|
Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai), [Anthropic](https://modelfusion.dev/integration/model-provider/anthropic), [Cohere](https://modelfusion.dev/integration/model-provider/cohere), [Llama.cpp](https://modelfusion.dev/integration/model-provider/llamacpp)
|
73
73
|
|
74
|
-
### [Generate
|
74
|
+
### [Generate Structure](https://modelfusion.dev/guide/function/generate-structure#generatestructure)
|
75
75
|
|
76
76
|
Generate typed objects using a language model and a schema.
|
77
77
|
|
@@ -256,12 +256,12 @@ const { tool, parameters, result, text } = await useToolOrGenerateText(
|
|
256
256
|
);
|
257
257
|
```
|
258
258
|
|
259
|
-
### [
|
259
|
+
### [Generate Transcription](https://modelfusion.dev/guide/function/generate-transcription)
|
260
260
|
|
261
|
-
|
261
|
+
Transcribe speech (audio) data into text. Also called speech-to-text (STT).
|
262
262
|
|
263
263
|
```ts
|
264
|
-
const transcription = await
|
264
|
+
const transcription = await generateTranscription(
|
265
265
|
new OpenAITranscriptionModel({ model: "whisper-1" }),
|
266
266
|
{
|
267
267
|
type: "mp3",
|
@@ -272,14 +272,20 @@ const transcription = await transcribe(
|
|
272
272
|
|
273
273
|
Providers: [OpenAI (Whisper)](https://modelfusion.dev/integration/model-provider/openai)
|
274
274
|
|
275
|
-
### [
|
275
|
+
### [Generate Speech](https://modelfusion.dev/guide/function/generate-speech)
|
276
276
|
|
277
|
-
|
277
|
+
Synthesize speech (audio) from text. Also called TTS (text-to-speech).
|
278
|
+
|
279
|
+
Providers: [Eleven Labs](https://modelfusion.dev/integration/model-provider/elevenlabs), [LMNT](https://modelfusion.dev/integration/model-provider/lmnt)
|
280
|
+
|
281
|
+
#### generateSpeech
|
282
|
+
|
283
|
+
`generateSpeech` synthesizes speech from text.
|
278
284
|
|
279
285
|
```ts
|
280
286
|
// `speech` is a Buffer with MP3 audio data
|
281
|
-
const speech = await
|
282
|
-
new
|
287
|
+
const speech = await generateSpeech(
|
288
|
+
new LmntSpeechModel({
|
283
289
|
voice: "034b632b-df71-46c8-b440-86a42ffc3cf3", // Henry
|
284
290
|
}),
|
285
291
|
"Good evening, ladies and gentlemen! Exciting news on the airwaves tonight " +
|
@@ -289,22 +295,29 @@ const speech = await synthesizeSpeech(
|
|
289
295
|
);
|
290
296
|
```
|
291
297
|
|
292
|
-
|
293
|
-
|
294
|
-
### [Describe Image](https://modelfusion.dev/guide/function/describe-image)
|
298
|
+
#### streamSpeech
|
295
299
|
|
296
|
-
|
300
|
+
`generateSpeech` generates a stream of speech chunks from text or from a text stream. Depending on the model, this can be fully duplex.
|
297
301
|
|
298
302
|
```ts
|
299
|
-
const
|
300
|
-
|
301
|
-
|
303
|
+
const textStream = await streamText(/* ... */);
|
304
|
+
|
305
|
+
const speechStream = await streamSpeech(
|
306
|
+
new ElevenLabsSpeechModel({
|
307
|
+
voice: "pNInz6obpgDQGcFmaJgB", // Adam
|
308
|
+
model: "eleven_monolingual_v1",
|
309
|
+
voiceSettings: { stability: 1, similarityBoost: 0.35 },
|
310
|
+
generationConfig: {
|
311
|
+
chunkLengthSchedule: [50, 90, 120, 150, 200],
|
312
|
+
},
|
302
313
|
}),
|
303
|
-
|
314
|
+
textStream
|
304
315
|
);
|
305
|
-
```
|
306
316
|
|
307
|
-
|
317
|
+
for await (const part of speechStream) {
|
318
|
+
// each part is a Buffer with MP3 audio data
|
319
|
+
}
|
320
|
+
```
|
308
321
|
|
309
322
|
### [Generate Image](https://modelfusion.dev/guide/function/generate-image)
|
310
323
|
|
@@ -435,7 +448,7 @@ Available Vector Stores: [Memory](https://modelfusion.dev/integration/vector-ind
|
|
435
448
|
|
436
449
|
Prompt formats let you use higher level prompt structures (such as instruction or chat prompts) for different models.
|
437
450
|
|
438
|
-
#### [Text Generation Prompt Formats](https://modelfusion.dev/guide/function/generate-text
|
451
|
+
#### [Text Generation Prompt Formats](https://modelfusion.dev/guide/function/generate-text#prompt-format)
|
439
452
|
|
440
453
|
```ts
|
441
454
|
const text = await generateText(
|
@@ -500,7 +513,7 @@ ModelFusion model functions return rich results that include the original respon
|
|
500
513
|
```ts
|
501
514
|
// access the full response (needs to be typed) and the metadata:
|
502
515
|
const { value, response, metadata } = await generateText(
|
503
|
-
new
|
516
|
+
new OpenAICompletionModel({
|
504
517
|
model: "gpt-3.5-turbo-instruct",
|
505
518
|
maxCompletionTokens: 1000,
|
506
519
|
n: 2, // generate 2 completions
|
@@ -511,7 +524,7 @@ const { value, response, metadata } = await generateText(
|
|
511
524
|
console.log(metadata);
|
512
525
|
|
513
526
|
// cast to the response type:
|
514
|
-
for (const choice of (response as
|
527
|
+
for (const choice of (response as OpenAICompletionResponse).choices) {
|
515
528
|
console.log(choice.text);
|
516
529
|
}
|
517
530
|
```
|
@@ -525,17 +538,14 @@ Integrations: [Helicone](https://modelfusion.dev/integration/observability/helic
|
|
525
538
|
### [Guide](https://modelfusion.dev/guide)
|
526
539
|
|
527
540
|
- [Model Functions](https://modelfusion.dev/guide/function/)
|
528
|
-
- [Generate
|
529
|
-
|
530
|
-
- [Generate
|
541
|
+
- [Generate text](https://modelfusion.dev/guide/function/generate-text)
|
542
|
+
- [Generate image](https://modelfusion.dev/guide/function/generate-image)
|
543
|
+
- [Generate speech](https://modelfusion.dev/guide/function/generate-speech)
|
544
|
+
- [Generate transcription](https://modelfusion.dev/guide/function/generation-transcription)
|
545
|
+
- [Generate structure](https://modelfusion.dev/guide/function/generate-structure)
|
531
546
|
- [Generate structure or text](https://modelfusion.dev/guide/function/generate-structure-or-text)
|
532
547
|
- [Tokenize Text](https://modelfusion.dev/guide/function/tokenize-text)
|
533
548
|
- [Embed Value](https://modelfusion.dev/guide/function/embed)
|
534
|
-
- [Transcribe Speech](https://modelfusion.dev/guide/function/transcribe-speech)
|
535
|
-
- [Synthesize Speech](https://modelfusion.dev/guide/function/synthesize-speech)
|
536
|
-
- [Describe Image](https://modelfusion.dev/guide/function/describe-image)
|
537
|
-
- [Generate Image](https://modelfusion.dev/guide/function/generate-image)
|
538
|
-
- [Prompt Format](https://modelfusion.dev/guide/function/generate-image/prompt-format)
|
539
549
|
- [Guards](https://modelfusion.dev/guide/guard)
|
540
550
|
- [Tools](https://modelfusion.dev/guide/tools)
|
541
551
|
- [Vector Indices](https://modelfusion.dev/guide/vector-index)
|
@@ -603,6 +613,12 @@ Create an 19th century painting image for your input.
|
|
603
613
|
|
604
614
|
Record audio with push-to-talk and transcribe it using Whisper, implemented as a Next.js app. The app shows a list of the transcriptions.
|
605
615
|
|
616
|
+
### [Speech Streaming (Vite(React) + Fastify))](https://github.com/lgrammel/modelfusion/tree/main/examples/speech-streaming-vite-react-fastify)
|
617
|
+
|
618
|
+
> _Speech Streaming_, _OpenAI_, _Elevenlabs_ _streaming_, _Vite_, _Fastify_
|
619
|
+
|
620
|
+
Given a prompt, the server returns both a text and a speech stream response.
|
621
|
+
|
606
622
|
### [BabyAGI Agent](https://github.com/lgrammel/modelfusion/tree/main/examples/babyagi-agent)
|
607
623
|
|
608
624
|
> _terminal app_, _agent_, _BabyAGI_
|
@@ -627,6 +643,12 @@ Small agent that solves middle school math problems. It uses a calculator tool t
|
|
627
643
|
|
628
644
|
Extracts information about a topic from a PDF and writes a tweet in your own style about it.
|
629
645
|
|
646
|
+
### [Cloudflare Workers](https://github.com/lgrammel/modelfusion/tree/main/examples/cloudflare-workers)
|
647
|
+
|
648
|
+
> _Cloudflare_, _OpenAI_
|
649
|
+
|
650
|
+
Generate text on a Cloudflare Worker using ModelFusion and OpenAI.
|
651
|
+
|
630
652
|
## Contributing
|
631
653
|
|
632
654
|
### [Contributing Guide](https://github.com/lgrammel/modelfusion/blob/main/CONTRIBUTING.md)
|
package/core/getRun.cjs
CHANGED
@@ -25,10 +25,12 @@ var __importStar = (this && this.__importStar) || function (mod) {
|
|
25
25
|
Object.defineProperty(exports, "__esModule", { value: true });
|
26
26
|
exports.withRun = exports.getRun = void 0;
|
27
27
|
let runStorage;
|
28
|
-
const isNode = typeof process !== "undefined" &&
|
29
|
-
process.versions != null &&
|
30
|
-
process.versions.node != null;
|
31
28
|
async function ensureLoaded() {
|
29
|
+
// Note: using process[versions] instead of process.versions to avoid Next.js edge runtime warnings.
|
30
|
+
const versions = "versions";
|
31
|
+
const isNode = typeof process !== "undefined" &&
|
32
|
+
process[versions] != null &&
|
33
|
+
process[versions].node != null;
|
32
34
|
if (!isNode)
|
33
35
|
return Promise.resolve();
|
34
36
|
if (!runStorage) {
|
package/core/getRun.js
CHANGED
@@ -1,8 +1,10 @@
|
|
1
1
|
let runStorage;
|
2
|
-
const isNode = typeof process !== "undefined" &&
|
3
|
-
process.versions != null &&
|
4
|
-
process.versions.node != null;
|
5
2
|
async function ensureLoaded() {
|
3
|
+
// Note: using process[versions] instead of process.versions to avoid Next.js edge runtime warnings.
|
4
|
+
const versions = "versions";
|
5
|
+
const isNode = typeof process !== "undefined" &&
|
6
|
+
process[versions] != null &&
|
7
|
+
process[versions].node != null;
|
6
8
|
if (!isNode)
|
7
9
|
return Promise.resolve();
|
8
10
|
if (!runStorage) {
|
package/index.cjs
CHANGED
@@ -25,5 +25,6 @@ __exportStar(require("./observability/index.cjs"), exports);
|
|
25
25
|
__exportStar(require("./retriever/index.cjs"), exports);
|
26
26
|
__exportStar(require("./text-chunk/index.cjs"), exports);
|
27
27
|
__exportStar(require("./tool/index.cjs"), exports);
|
28
|
+
__exportStar(require("./ui/index.cjs"), exports);
|
28
29
|
__exportStar(require("./util/index.cjs"), exports);
|
29
30
|
__exportStar(require("./vector-index/index.cjs"), exports);
|
package/index.d.ts
CHANGED
@@ -9,5 +9,6 @@ export * from "./observability/index.js";
|
|
9
9
|
export * from "./retriever/index.js";
|
10
10
|
export * from "./text-chunk/index.js";
|
11
11
|
export * from "./tool/index.js";
|
12
|
+
export * from "./ui/index.js";
|
12
13
|
export * from "./util/index.js";
|
13
14
|
export * from "./vector-index/index.js";
|
package/index.js
CHANGED
@@ -9,5 +9,6 @@ export * from "./observability/index.js";
|
|
9
9
|
export * from "./retriever/index.js";
|
10
10
|
export * from "./text-chunk/index.js";
|
11
11
|
export * from "./tool/index.js";
|
12
|
+
export * from "./ui/index.js";
|
12
13
|
export * from "./util/index.js";
|
13
14
|
export * from "./vector-index/index.js";
|
@@ -13,25 +13,25 @@ class AsyncIterableResultPromise extends Promise {
|
|
13
13
|
writable: true,
|
14
14
|
value: fullPromise
|
15
15
|
});
|
16
|
-
Object.defineProperty(this, "
|
16
|
+
Object.defineProperty(this, "valuePromise", {
|
17
17
|
enumerable: true,
|
18
18
|
configurable: true,
|
19
19
|
writable: true,
|
20
20
|
value: void 0
|
21
21
|
});
|
22
|
-
this.
|
22
|
+
this.valuePromise = fullPromise.then((result) => result.value);
|
23
23
|
}
|
24
24
|
asFullResponse() {
|
25
25
|
return this.fullPromise;
|
26
26
|
}
|
27
27
|
then(onfulfilled, onrejected) {
|
28
|
-
return this.
|
28
|
+
return this.valuePromise.then(onfulfilled, onrejected);
|
29
29
|
}
|
30
30
|
catch(onrejected) {
|
31
|
-
return this.
|
31
|
+
return this.valuePromise.catch(onrejected);
|
32
32
|
}
|
33
33
|
finally(onfinally) {
|
34
|
-
return this.
|
34
|
+
return this.valuePromise.finally(onfinally);
|
35
35
|
}
|
36
36
|
}
|
37
37
|
exports.AsyncIterableResultPromise = AsyncIterableResultPromise;
|
@@ -1,13 +1,13 @@
|
|
1
1
|
import { ModelCallMetadata } from "./ModelCallMetadata.js";
|
2
2
|
export declare class AsyncIterableResultPromise<T> extends Promise<AsyncIterable<T>> {
|
3
3
|
private fullPromise;
|
4
|
-
private
|
4
|
+
private valuePromise;
|
5
5
|
constructor(fullPromise: Promise<{
|
6
|
-
|
6
|
+
value: AsyncIterable<T>;
|
7
7
|
metadata: Omit<ModelCallMetadata, "durationInMs" | "finishTimestamp">;
|
8
8
|
}>);
|
9
9
|
asFullResponse(): Promise<{
|
10
|
-
|
10
|
+
value: AsyncIterable<T>;
|
11
11
|
metadata: Omit<ModelCallMetadata, "durationInMs" | "finishTimestamp">;
|
12
12
|
}>;
|
13
13
|
then<TResult1 = AsyncIterable<T>, TResult2 = never>(onfulfilled?: ((value: AsyncIterable<T>) => TResult1 | PromiseLike<TResult1>) | undefined | null, onrejected?: ((reason: unknown) => TResult2 | PromiseLike<TResult2>) | undefined | null): Promise<TResult1 | TResult2>;
|
@@ -10,24 +10,24 @@ export class AsyncIterableResultPromise extends Promise {
|
|
10
10
|
writable: true,
|
11
11
|
value: fullPromise
|
12
12
|
});
|
13
|
-
Object.defineProperty(this, "
|
13
|
+
Object.defineProperty(this, "valuePromise", {
|
14
14
|
enumerable: true,
|
15
15
|
configurable: true,
|
16
16
|
writable: true,
|
17
17
|
value: void 0
|
18
18
|
});
|
19
|
-
this.
|
19
|
+
this.valuePromise = fullPromise.then((result) => result.value);
|
20
20
|
}
|
21
21
|
asFullResponse() {
|
22
22
|
return this.fullPromise;
|
23
23
|
}
|
24
24
|
then(onfulfilled, onrejected) {
|
25
|
-
return this.
|
25
|
+
return this.valuePromise.then(onfulfilled, onrejected);
|
26
26
|
}
|
27
27
|
catch(onrejected) {
|
28
|
-
return this.
|
28
|
+
return this.valuePromise.catch(onrejected);
|
29
29
|
}
|
30
30
|
finally(onfinally) {
|
31
|
-
return this.
|
31
|
+
return this.valuePromise.finally(onfinally);
|
32
32
|
}
|
33
33
|
}
|
@@ -18,7 +18,7 @@ export interface Model<SETTINGS extends ModelSettings> {
|
|
18
18
|
* The `withSettings` method creates a new model with the same configuration as the original model, but with the specified settings changed.
|
19
19
|
*
|
20
20
|
* @example
|
21
|
-
* const model = new
|
21
|
+
* const model = new OpenAICompletionModel({
|
22
22
|
* model: "gpt-3.5-turbo-instruct",
|
23
23
|
* maxCompletionTokens: 500,
|
24
24
|
* });
|
@@ -1,14 +1,12 @@
|
|
1
1
|
import { BaseFunctionFinishedEvent, BaseFunctionStartedEvent } from "../core/FunctionEvent.js";
|
2
2
|
import { ModelInformation } from "./ModelInformation.js";
|
3
|
-
import { ImageDescriptionFinishedEvent, ImageDescriptionStartedEvent } from "./describe-image/ImageDescriptionEvent.js";
|
4
3
|
import { EmbeddingFinishedEvent, EmbeddingStartedEvent } from "./embed/EmbeddingEvent.js";
|
5
4
|
import { ImageGenerationFinishedEvent, ImageGenerationStartedEvent } from "./generate-image/ImageGenerationEvent.js";
|
5
|
+
import { SpeechGenerationFinishedEvent, SpeechGenerationStartedEvent, SpeechStreamingFinishedEvent, SpeechStreamingStartedEvent } from "./generate-speech/SpeechGenerationEvent.js";
|
6
6
|
import { StructureGenerationFinishedEvent, StructureGenerationStartedEvent } from "./generate-structure/StructureGenerationEvent.js";
|
7
7
|
import { StructureStreamingFinishedEvent, StructureStreamingStartedEvent } from "./generate-structure/StructureStreamingEvent.js";
|
8
|
-
import { TextGenerationFinishedEvent, TextGenerationStartedEvent } from "./generate-text/TextGenerationEvent.js";
|
9
|
-
import {
|
10
|
-
import { SpeechSynthesisFinishedEvent, SpeechSynthesisStartedEvent } from "./synthesize-speech/SpeechSynthesisEvent.js";
|
11
|
-
import { TranscriptionFinishedEvent, TranscriptionStartedEvent } from "./transcribe-speech/TranscriptionEvent.js";
|
8
|
+
import { TextGenerationFinishedEvent, TextGenerationStartedEvent, TextStreamingFinishedEvent, TextStreamingStartedEvent } from "./generate-text/TextGenerationEvent.js";
|
9
|
+
import { TranscriptionFinishedEvent, TranscriptionStartedEvent } from "./generate-transcription/TranscriptionEvent.js";
|
12
10
|
export interface BaseModelCallStartedEvent extends BaseFunctionStartedEvent {
|
13
11
|
model: ModelInformation;
|
14
12
|
/**
|
@@ -49,5 +47,5 @@ export interface BaseModelCallFinishedEvent extends BaseFunctionFinishedEvent {
|
|
49
47
|
*/
|
50
48
|
result: BaseModelCallFinishedEventResult;
|
51
49
|
}
|
52
|
-
export type ModelCallStartedEvent = EmbeddingStartedEvent |
|
53
|
-
export type ModelCallFinishedEvent = EmbeddingFinishedEvent |
|
50
|
+
export type ModelCallStartedEvent = EmbeddingStartedEvent | ImageGenerationStartedEvent | SpeechGenerationStartedEvent | SpeechStreamingStartedEvent | StructureGenerationStartedEvent | StructureStreamingStartedEvent | TextGenerationStartedEvent | TextStreamingStartedEvent | TranscriptionStartedEvent;
|
51
|
+
export type ModelCallFinishedEvent = EmbeddingFinishedEvent | ImageGenerationFinishedEvent | SpeechGenerationFinishedEvent | SpeechStreamingFinishedEvent | StructureGenerationFinishedEvent | StructureStreamingFinishedEvent | TextGenerationFinishedEvent | TextStreamingFinishedEvent | TranscriptionFinishedEvent;
|
@@ -1,7 +1,7 @@
|
|
1
1
|
"use strict";
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
3
3
|
exports.embed = exports.embedMany = void 0;
|
4
|
-
const
|
4
|
+
const executeStandardCall_js_1 = require("../executeStandardCall.cjs");
|
5
5
|
const ModelFunctionPromise_js_1 = require("../ModelFunctionPromise.cjs");
|
6
6
|
/**
|
7
7
|
* Generate embeddings for multiple values.
|
@@ -16,7 +16,7 @@ const ModelFunctionPromise_js_1 = require("../ModelFunctionPromise.cjs");
|
|
16
16
|
* );
|
17
17
|
*/
|
18
18
|
function embedMany(model, values, options) {
|
19
|
-
return new ModelFunctionPromise_js_1.ModelFunctionPromise((0,
|
19
|
+
return new ModelFunctionPromise_js_1.ModelFunctionPromise((0, executeStandardCall_js_1.executeStandardCall)({
|
20
20
|
functionType: "embedding",
|
21
21
|
input: values,
|
22
22
|
model,
|
@@ -57,7 +57,7 @@ exports.embedMany = embedMany;
|
|
57
57
|
* );
|
58
58
|
*/
|
59
59
|
function embed(model, value, options) {
|
60
|
-
return new ModelFunctionPromise_js_1.ModelFunctionPromise((0,
|
60
|
+
return new ModelFunctionPromise_js_1.ModelFunctionPromise((0, executeStandardCall_js_1.executeStandardCall)({
|
61
61
|
functionType: "embedding",
|
62
62
|
input: value,
|
63
63
|
model,
|
@@ -1,4 +1,4 @@
|
|
1
|
-
import {
|
1
|
+
import { executeStandardCall } from "../executeStandardCall.js";
|
2
2
|
import { ModelFunctionPromise } from "../ModelFunctionPromise.js";
|
3
3
|
/**
|
4
4
|
* Generate embeddings for multiple values.
|
@@ -13,7 +13,7 @@ import { ModelFunctionPromise } from "../ModelFunctionPromise.js";
|
|
13
13
|
* );
|
14
14
|
*/
|
15
15
|
export function embedMany(model, values, options) {
|
16
|
-
return new ModelFunctionPromise(
|
16
|
+
return new ModelFunctionPromise(executeStandardCall({
|
17
17
|
functionType: "embedding",
|
18
18
|
input: values,
|
19
19
|
model,
|
@@ -53,7 +53,7 @@ export function embedMany(model, values, options) {
|
|
53
53
|
* );
|
54
54
|
*/
|
55
55
|
export function embed(model, value, options) {
|
56
|
-
return new ModelFunctionPromise(
|
56
|
+
return new ModelFunctionPromise(executeStandardCall({
|
57
57
|
functionType: "embedding",
|
58
58
|
input: value,
|
59
59
|
model,
|
@@ -1,6 +1,6 @@
|
|
1
1
|
"use strict";
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
3
|
-
exports.
|
3
|
+
exports.executeStandardCall = void 0;
|
4
4
|
const nanoid_1 = require("nanoid");
|
5
5
|
const FunctionEventSource_js_1 = require("../core/FunctionEventSource.cjs");
|
6
6
|
const GlobalFunctionLogging_js_1 = require("../core/GlobalFunctionLogging.cjs");
|
@@ -10,7 +10,7 @@ const getFunctionCallLogger_js_1 = require("../core/getFunctionCallLogger.cjs");
|
|
10
10
|
const getRun_js_1 = require("../core/getRun.cjs");
|
11
11
|
const DurationMeasurement_js_1 = require("../util/DurationMeasurement.cjs");
|
12
12
|
const runSafe_js_1 = require("../util/runSafe.cjs");
|
13
|
-
async function
|
13
|
+
async function executeStandardCall({ model, options, input, functionType, generateResponse, }) {
|
14
14
|
const run = await (0, getRun_js_1.getRun)(options?.run);
|
15
15
|
const settings = model.settings;
|
16
16
|
const eventSource = new FunctionEventSource_js_1.FunctionEventSource({
|
@@ -104,4 +104,4 @@ async function executeCall({ model, options, input, functionType, generateRespon
|
|
104
104
|
},
|
105
105
|
};
|
106
106
|
}
|
107
|
-
exports.
|
107
|
+
exports.executeStandardCall = executeStandardCall;
|
@@ -2,7 +2,7 @@ import { FunctionOptions } from "../core/FunctionOptions.js";
|
|
2
2
|
import { Model, ModelSettings } from "./Model.js";
|
3
3
|
import { ModelCallStartedEvent } from "./ModelCallEvent.js";
|
4
4
|
import { ModelCallMetadata } from "./ModelCallMetadata.js";
|
5
|
-
export declare function
|
5
|
+
export declare function executeStandardCall<VALUE, MODEL extends Model<ModelSettings>>({ model, options, input, functionType, generateResponse, }: {
|
6
6
|
model: MODEL;
|
7
7
|
options?: FunctionOptions;
|
8
8
|
input: unknown;
|
@@ -7,7 +7,7 @@ import { getFunctionCallLogger } from "../core/getFunctionCallLogger.js";
|
|
7
7
|
import { getRun } from "../core/getRun.js";
|
8
8
|
import { startDurationMeasurement } from "../util/DurationMeasurement.js";
|
9
9
|
import { runSafe } from "../util/runSafe.js";
|
10
|
-
export async function
|
10
|
+
export async function executeStandardCall({ model, options, input, functionType, generateResponse, }) {
|
11
11
|
const run = await getRun(options?.run);
|
12
12
|
const settings = model.settings;
|
13
13
|
const eventSource = new FunctionEventSource({
|
@@ -0,0 +1,132 @@
|
|
1
|
+
"use strict";
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
3
|
+
exports.executeStreamCall = void 0;
|
4
|
+
const nanoid_1 = require("nanoid");
|
5
|
+
const FunctionEventSource_js_1 = require("../core/FunctionEventSource.cjs");
|
6
|
+
const GlobalFunctionLogging_js_1 = require("../core/GlobalFunctionLogging.cjs");
|
7
|
+
const GlobalFunctionObservers_js_1 = require("../core/GlobalFunctionObservers.cjs");
|
8
|
+
const AbortError_js_1 = require("../core/api/AbortError.cjs");
|
9
|
+
const getFunctionCallLogger_js_1 = require("../core/getFunctionCallLogger.cjs");
|
10
|
+
const getRun_js_1 = require("../core/getRun.cjs");
|
11
|
+
const DurationMeasurement_js_1 = require("../util/DurationMeasurement.cjs");
|
12
|
+
const runSafe_js_1 = require("../util/runSafe.cjs");
|
13
|
+
async function executeStreamCall({ model, options, input, functionType, startStream, processDelta, processFinished, getResult, }) {
|
14
|
+
const run = await (0, getRun_js_1.getRun)(options?.run);
|
15
|
+
const settings = model.settings;
|
16
|
+
const eventSource = new FunctionEventSource_js_1.FunctionEventSource({
|
17
|
+
observers: [
|
18
|
+
...(0, getFunctionCallLogger_js_1.getFunctionCallLogger)(options?.logging ?? (0, GlobalFunctionLogging_js_1.getGlobalFunctionLogging)()),
|
19
|
+
...(0, GlobalFunctionObservers_js_1.getGlobalFunctionObservers)(),
|
20
|
+
...(settings.observers ?? []),
|
21
|
+
...(run?.functionObserver != null ? [run.functionObserver] : []),
|
22
|
+
...(options?.observers ?? []),
|
23
|
+
],
|
24
|
+
errorHandler: run?.errorHandler,
|
25
|
+
});
|
26
|
+
const durationMeasurement = (0, DurationMeasurement_js_1.startDurationMeasurement)();
|
27
|
+
const startMetadata = {
|
28
|
+
functionType,
|
29
|
+
callId: `call-${(0, nanoid_1.nanoid)()}`,
|
30
|
+
runId: run?.runId,
|
31
|
+
sessionId: run?.sessionId,
|
32
|
+
userId: run?.userId,
|
33
|
+
functionId: options?.functionId,
|
34
|
+
model: model.modelInformation,
|
35
|
+
settings: model.settingsForEvent,
|
36
|
+
input,
|
37
|
+
timestamp: durationMeasurement.startDate,
|
38
|
+
startTimestamp: durationMeasurement.startDate,
|
39
|
+
};
|
40
|
+
eventSource.notify({
|
41
|
+
eventType: "started",
|
42
|
+
...startMetadata,
|
43
|
+
});
|
44
|
+
const result = await (0, runSafe_js_1.runSafe)(async () => {
|
45
|
+
const deltaIterable = await startStream({
|
46
|
+
functionId: options?.functionId,
|
47
|
+
logging: options?.logging,
|
48
|
+
observers: options?.observers,
|
49
|
+
run,
|
50
|
+
});
|
51
|
+
return (async function* () {
|
52
|
+
for await (const event of deltaIterable) {
|
53
|
+
if (event?.type === "error") {
|
54
|
+
const error = event.error;
|
55
|
+
const finishMetadata = {
|
56
|
+
eventType: "finished",
|
57
|
+
...startMetadata,
|
58
|
+
finishTimestamp: new Date(),
|
59
|
+
durationInMs: durationMeasurement.durationInMs,
|
60
|
+
};
|
61
|
+
eventSource.notify(error instanceof AbortError_js_1.AbortError
|
62
|
+
? {
|
63
|
+
...finishMetadata,
|
64
|
+
result: { status: "abort" },
|
65
|
+
}
|
66
|
+
: {
|
67
|
+
...finishMetadata,
|
68
|
+
result: { status: "error", error },
|
69
|
+
});
|
70
|
+
throw error;
|
71
|
+
}
|
72
|
+
if (event?.type === "delta") {
|
73
|
+
const value = processDelta(event);
|
74
|
+
if (value !== undefined) {
|
75
|
+
yield value;
|
76
|
+
}
|
77
|
+
}
|
78
|
+
}
|
79
|
+
if (processFinished != null) {
|
80
|
+
const value = processFinished();
|
81
|
+
if (value !== undefined) {
|
82
|
+
yield value;
|
83
|
+
}
|
84
|
+
}
|
85
|
+
const finishMetadata = {
|
86
|
+
eventType: "finished",
|
87
|
+
...startMetadata,
|
88
|
+
finishTimestamp: new Date(),
|
89
|
+
durationInMs: durationMeasurement.durationInMs,
|
90
|
+
};
|
91
|
+
eventSource.notify({
|
92
|
+
...finishMetadata,
|
93
|
+
result: {
|
94
|
+
status: "success",
|
95
|
+
...getResult(),
|
96
|
+
},
|
97
|
+
});
|
98
|
+
})();
|
99
|
+
});
|
100
|
+
if (!result.ok) {
|
101
|
+
const finishMetadata = {
|
102
|
+
eventType: "finished",
|
103
|
+
...startMetadata,
|
104
|
+
finishTimestamp: new Date(),
|
105
|
+
durationInMs: durationMeasurement.durationInMs,
|
106
|
+
};
|
107
|
+
if (result.isAborted) {
|
108
|
+
eventSource.notify({
|
109
|
+
...finishMetadata,
|
110
|
+
eventType: "finished",
|
111
|
+
result: {
|
112
|
+
status: "abort",
|
113
|
+
},
|
114
|
+
});
|
115
|
+
throw new AbortError_js_1.AbortError();
|
116
|
+
}
|
117
|
+
eventSource.notify({
|
118
|
+
...finishMetadata,
|
119
|
+
eventType: "finished",
|
120
|
+
result: {
|
121
|
+
status: "error",
|
122
|
+
error: result.error,
|
123
|
+
},
|
124
|
+
});
|
125
|
+
throw result.error;
|
126
|
+
}
|
127
|
+
return {
|
128
|
+
value: result.value,
|
129
|
+
metadata: startMetadata,
|
130
|
+
};
|
131
|
+
}
|
132
|
+
exports.executeStreamCall = executeStreamCall;
|
@@ -0,0 +1,20 @@
|
|
1
|
+
import { FunctionOptions } from "../core/FunctionOptions.js";
|
2
|
+
import { Delta } from "./Delta.js";
|
3
|
+
import { Model, ModelSettings } from "./Model.js";
|
4
|
+
import { ModelCallStartedEvent } from "./ModelCallEvent.js";
|
5
|
+
import { ModelCallMetadata } from "./ModelCallMetadata.js";
|
6
|
+
export declare function executeStreamCall<DELTA_VALUE, VALUE, MODEL extends Model<ModelSettings>>({ model, options, input, functionType, startStream, processDelta, processFinished, getResult, }: {
|
7
|
+
model: MODEL;
|
8
|
+
options?: FunctionOptions;
|
9
|
+
input: unknown;
|
10
|
+
functionType: ModelCallStartedEvent["functionType"];
|
11
|
+
startStream: (options?: FunctionOptions) => PromiseLike<AsyncIterable<Delta<DELTA_VALUE>>>;
|
12
|
+
processDelta: (delta: Delta<DELTA_VALUE> & {
|
13
|
+
type: "delta";
|
14
|
+
}) => VALUE | undefined;
|
15
|
+
processFinished?: () => VALUE | undefined;
|
16
|
+
getResult: () => Record<string, unknown>;
|
17
|
+
}): Promise<{
|
18
|
+
value: AsyncIterable<VALUE>;
|
19
|
+
metadata: Omit<ModelCallMetadata, "durationInMs" | "finishTimestamp">;
|
20
|
+
}>;
|