modelfusion 0.48.0 → 0.49.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +30 -45
- package/model-function/AsyncIterableResultPromise.cjs +5 -5
- package/model-function/AsyncIterableResultPromise.d.ts +3 -3
- package/model-function/AsyncIterableResultPromise.js +5 -5
- package/model-function/Model.d.ts +1 -1
- package/model-function/ModelCallEvent.d.ts +5 -7
- package/model-function/embed/embed.cjs +3 -3
- package/model-function/embed/embed.js +3 -3
- package/model-function/{executeCall.cjs → executeStandardCall.cjs} +3 -3
- package/model-function/{executeCall.d.ts → executeStandardCall.d.ts} +1 -1
- package/model-function/{executeCall.js → executeStandardCall.js} +1 -1
- package/model-function/executeStreamCall.cjs +132 -0
- package/model-function/executeStreamCall.d.ts +20 -0
- package/model-function/executeStreamCall.js +128 -0
- package/model-function/generate-image/generateImage.cjs +2 -2
- package/model-function/generate-image/generateImage.js +2 -2
- package/model-function/generate-speech/SpeechGenerationEvent.d.ts +27 -0
- package/model-function/generate-speech/SpeechGenerationModel.d.ts +15 -0
- package/model-function/generate-speech/generateSpeech.cjs +24 -0
- package/model-function/generate-speech/generateSpeech.d.ts +8 -0
- package/model-function/generate-speech/generateSpeech.js +20 -0
- package/model-function/generate-speech/index.cjs +20 -0
- package/model-function/generate-speech/index.d.ts +4 -0
- package/model-function/generate-speech/index.js +4 -0
- package/model-function/generate-speech/streamSpeech.cjs +34 -0
- package/model-function/generate-speech/streamSpeech.d.ts +8 -0
- package/model-function/generate-speech/streamSpeech.js +30 -0
- package/model-function/generate-structure/generateStructure.cjs +2 -2
- package/model-function/generate-structure/generateStructure.js +2 -2
- package/model-function/generate-structure/generateStructureOrText.cjs +2 -2
- package/model-function/generate-structure/generateStructureOrText.js +2 -2
- package/model-function/generate-structure/index.cjs +27 -0
- package/model-function/generate-structure/index.d.ts +11 -0
- package/model-function/generate-structure/index.js +11 -0
- package/model-function/generate-structure/streamStructure.cjs +28 -136
- package/model-function/generate-structure/streamStructure.js +27 -135
- package/model-function/generate-text/TextGenerationEvent.d.ts +6 -0
- package/model-function/generate-text/generateText.cjs +3 -3
- package/model-function/generate-text/generateText.d.ts +1 -1
- package/model-function/generate-text/generateText.js +3 -3
- package/model-function/generate-text/index.cjs +0 -1
- package/model-function/generate-text/index.d.ts +0 -1
- package/model-function/generate-text/index.js +0 -1
- package/model-function/generate-text/streamText.cjs +21 -128
- package/model-function/generate-text/streamText.js +20 -127
- package/model-function/generate-text/trimChatPrompt.cjs +1 -1
- package/model-function/generate-text/trimChatPrompt.d.ts +1 -1
- package/model-function/generate-text/trimChatPrompt.js +1 -1
- package/model-function/{transcribe-speech/transcribe.cjs → generate-transcription/generateTranscription.cjs} +6 -6
- package/model-function/{transcribe-speech/transcribe.d.ts → generate-transcription/generateTranscription.d.ts} +2 -2
- package/model-function/{transcribe-speech/transcribe.js → generate-transcription/generateTranscription.js} +4 -4
- package/model-function/index.cjs +5 -20
- package/model-function/index.d.ts +5 -20
- package/model-function/index.js +5 -20
- package/model-provider/elevenlabs/{ElevenLabsSpeechSynthesisModel.cjs → ElevenLabsSpeechModel.cjs} +6 -6
- package/model-provider/elevenlabs/{ElevenLabsSpeechSynthesisModel.d.ts → ElevenLabsSpeechModel.d.ts} +8 -8
- package/model-provider/elevenlabs/{ElevenLabsSpeechSynthesisModel.js → ElevenLabsSpeechModel.js} +4 -4
- package/model-provider/elevenlabs/index.cjs +1 -1
- package/model-provider/elevenlabs/index.d.ts +1 -1
- package/model-provider/elevenlabs/index.js +1 -1
- package/model-provider/huggingface/HuggingFaceImageDescriptionModel.cjs +21 -2
- package/model-provider/huggingface/HuggingFaceImageDescriptionModel.d.ts +11 -6
- package/model-provider/huggingface/HuggingFaceImageDescriptionModel.js +21 -2
- package/model-provider/lmnt/{LmntSpeechSynthesisModel.cjs → LmntSpeechModel.cjs} +5 -5
- package/model-provider/lmnt/LmntSpeechModel.d.ts +26 -0
- package/model-provider/lmnt/{LmntSpeechSynthesisModel.js → LmntSpeechModel.js} +3 -3
- package/model-provider/lmnt/index.cjs +1 -1
- package/model-provider/lmnt/index.d.ts +1 -1
- package/model-provider/lmnt/index.js +1 -1
- package/model-provider/openai/{OpenAITextGenerationModel.cjs → OpenAICompletionModel.cjs} +17 -17
- package/model-provider/openai/{OpenAITextGenerationModel.d.ts → OpenAICompletionModel.d.ts} +25 -25
- package/model-provider/openai/{OpenAITextGenerationModel.js → OpenAICompletionModel.js} +12 -12
- package/model-provider/openai/OpenAICostCalculator.cjs +3 -3
- package/model-provider/openai/OpenAICostCalculator.js +3 -3
- package/model-provider/openai/OpenAITranscriptionModel.d.ts +1 -1
- package/model-provider/openai/TikTokenTokenizer.d.ts +2 -2
- package/model-provider/openai/index.cjs +1 -1
- package/model-provider/openai/index.d.ts +1 -1
- package/model-provider/openai/index.js +1 -1
- package/package.json +1 -1
- package/model-function/describe-image/ImageDescriptionEvent.d.ts +0 -18
- package/model-function/describe-image/ImageDescriptionModel.d.ts +0 -10
- package/model-function/describe-image/describeImage.cjs +0 -26
- package/model-function/describe-image/describeImage.d.ts +0 -9
- package/model-function/describe-image/describeImage.js +0 -22
- package/model-function/generate-text/TextStreamingEvent.cjs +0 -2
- package/model-function/generate-text/TextStreamingEvent.d.ts +0 -7
- package/model-function/generate-text/TextStreamingEvent.js +0 -1
- package/model-function/synthesize-speech/SpeechSynthesisEvent.cjs +0 -2
- package/model-function/synthesize-speech/SpeechSynthesisEvent.d.ts +0 -21
- package/model-function/synthesize-speech/SpeechSynthesisEvent.js +0 -1
- package/model-function/synthesize-speech/SpeechSynthesisModel.cjs +0 -2
- package/model-function/synthesize-speech/SpeechSynthesisModel.d.ts +0 -15
- package/model-function/synthesize-speech/SpeechSynthesisModel.js +0 -1
- package/model-function/synthesize-speech/synthesizeSpeech.cjs +0 -67
- package/model-function/synthesize-speech/synthesizeSpeech.d.ts +0 -14
- package/model-function/synthesize-speech/synthesizeSpeech.js +0 -63
- package/model-provider/lmnt/LmntSpeechSynthesisModel.d.ts +0 -26
- /package/model-function/{describe-image/ImageDescriptionEvent.cjs → generate-speech/SpeechGenerationEvent.cjs} +0 -0
- /package/model-function/{describe-image/ImageDescriptionEvent.js → generate-speech/SpeechGenerationEvent.js} +0 -0
- /package/model-function/{describe-image/ImageDescriptionModel.cjs → generate-speech/SpeechGenerationModel.cjs} +0 -0
- /package/model-function/{describe-image/ImageDescriptionModel.js → generate-speech/SpeechGenerationModel.js} +0 -0
- /package/model-function/{transcribe-speech → generate-transcription}/TranscriptionEvent.cjs +0 -0
- /package/model-function/{transcribe-speech → generate-transcription}/TranscriptionEvent.d.ts +0 -0
- /package/model-function/{transcribe-speech → generate-transcription}/TranscriptionEvent.js +0 -0
- /package/model-function/{transcribe-speech → generate-transcription}/TranscriptionModel.cjs +0 -0
- /package/model-function/{transcribe-speech → generate-transcription}/TranscriptionModel.d.ts +0 -0
- /package/model-function/{transcribe-speech → generate-transcription}/TranscriptionModel.js +0 -0
package/README.md
CHANGED
@@ -35,17 +35,17 @@ Or use a template: [ModelFusion terminal app starter](https://github.com/lgramme
|
|
35
35
|
|
36
36
|
You can provide API keys for the different [integrations](https://modelfusion.dev/integration/model-provider/) using environment variables (e.g., `OPENAI_API_KEY`) or pass them into the model constructors as options.
|
37
37
|
|
38
|
-
### [Generate
|
38
|
+
### [Generate Text](https://modelfusion.dev/guide/function/generate-text)
|
39
39
|
|
40
40
|
Generate text using a language model and a prompt.
|
41
41
|
You can stream the text if it is supported by the model.
|
42
|
-
You can use [prompt formats](https://modelfusion.dev/guide/function/generate-text
|
42
|
+
You can use [prompt formats](https://modelfusion.dev/guide/function/generate-text#prompt-format) to change the prompt format of a model.
|
43
43
|
|
44
44
|
#### generateText
|
45
45
|
|
46
46
|
```ts
|
47
47
|
const text = await generateText(
|
48
|
-
new
|
48
|
+
new OpenAICompletionModel({
|
49
49
|
model: "gpt-3.5-turbo-instruct",
|
50
50
|
}),
|
51
51
|
"Write a short story about a robot learning to love:\n\n"
|
@@ -58,7 +58,7 @@ Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai),
|
|
58
58
|
|
59
59
|
```ts
|
60
60
|
const textStream = await streamText(
|
61
|
-
new
|
61
|
+
new OpenAICompletionModel({
|
62
62
|
model: "gpt-3.5-turbo-instruct",
|
63
63
|
}),
|
64
64
|
"Write a short story about a robot learning to love:\n\n"
|
@@ -71,7 +71,7 @@ for await (const textFragment of textStream) {
|
|
71
71
|
|
72
72
|
Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai), [Anthropic](https://modelfusion.dev/integration/model-provider/anthropic), [Cohere](https://modelfusion.dev/integration/model-provider/cohere), [Llama.cpp](https://modelfusion.dev/integration/model-provider/llamacpp)
|
73
73
|
|
74
|
-
### [Generate
|
74
|
+
### [Generate Structure](https://modelfusion.dev/guide/function/generate-structure#generatestructure)
|
75
75
|
|
76
76
|
Generate typed objects using a language model and a schema.
|
77
77
|
|
@@ -256,12 +256,12 @@ const { tool, parameters, result, text } = await useToolOrGenerateText(
|
|
256
256
|
);
|
257
257
|
```
|
258
258
|
|
259
|
-
### [
|
259
|
+
### [Generate Transcription](https://modelfusion.dev/guide/function/generate-transcription)
|
260
260
|
|
261
|
-
|
261
|
+
Transcribe speech (audio) data into text. Also called speech-to-text (STT).
|
262
262
|
|
263
263
|
```ts
|
264
|
-
const transcription = await
|
264
|
+
const transcription = await generateTranscription(
|
265
265
|
new OpenAITranscriptionModel({ model: "whisper-1" }),
|
266
266
|
{
|
267
267
|
type: "mp3",
|
@@ -272,18 +272,20 @@ const transcription = await transcribe(
|
|
272
272
|
|
273
273
|
Providers: [OpenAI (Whisper)](https://modelfusion.dev/integration/model-provider/openai)
|
274
274
|
|
275
|
-
### [
|
275
|
+
### [Generate Speech](https://modelfusion.dev/guide/function/generate-speech)
|
276
276
|
|
277
|
-
|
277
|
+
Synthesize speech (audio) from text. Also called TTS (text-to-speech).
|
278
278
|
|
279
279
|
Providers: [Eleven Labs](https://modelfusion.dev/integration/model-provider/elevenlabs), [LMNT](https://modelfusion.dev/integration/model-provider/lmnt)
|
280
280
|
|
281
|
-
####
|
281
|
+
#### generateSpeech
|
282
|
+
|
283
|
+
`generateSpeech` synthesizes speech from text.
|
282
284
|
|
283
285
|
```ts
|
284
286
|
// `speech` is a Buffer with MP3 audio data
|
285
|
-
const speech = await
|
286
|
-
new
|
287
|
+
const speech = await generateSpeech(
|
288
|
+
new LmntSpeechModel({
|
287
289
|
voice: "034b632b-df71-46c8-b440-86a42ffc3cf3", // Henry
|
288
290
|
}),
|
289
291
|
"Good evening, ladies and gentlemen! Exciting news on the airwaves tonight " +
|
@@ -293,13 +295,15 @@ const speech = await synthesizeSpeech(
|
|
293
295
|
);
|
294
296
|
```
|
295
297
|
|
296
|
-
####
|
298
|
+
#### streamSpeech
|
299
|
+
|
300
|
+
`generateSpeech` generates a stream of speech chunks from text or from a text stream. Depending on the model, this can be fully duplex.
|
297
301
|
|
298
302
|
```ts
|
299
303
|
const textStream = await streamText(/* ... */);
|
300
304
|
|
301
|
-
const speechStream = await
|
302
|
-
new
|
305
|
+
const speechStream = await streamSpeech(
|
306
|
+
new ElevenLabsSpeechModel({
|
303
307
|
voice: "pNInz6obpgDQGcFmaJgB", // Adam
|
304
308
|
model: "eleven_monolingual_v1",
|
305
309
|
voiceSettings: { stability: 1, similarityBoost: 0.35 },
|
@@ -307,8 +311,7 @@ const speechStream = await synthesizeSpeech(
|
|
307
311
|
chunkLengthSchedule: [50, 90, 120, 150, 200],
|
308
312
|
},
|
309
313
|
}),
|
310
|
-
textStream
|
311
|
-
{ mode: "stream-duplex" }
|
314
|
+
textStream
|
312
315
|
);
|
313
316
|
|
314
317
|
for await (const part of speechStream) {
|
@@ -316,21 +319,6 @@ for await (const part of speechStream) {
|
|
316
319
|
}
|
317
320
|
```
|
318
321
|
|
319
|
-
### [Describe Image](https://modelfusion.dev/guide/function/describe-image)
|
320
|
-
|
321
|
-
Describe an image as text, e.g. for image captioning or OCR.
|
322
|
-
|
323
|
-
```ts
|
324
|
-
const text = await describeImage(
|
325
|
-
new HuggingFaceImageDescriptionModel({
|
326
|
-
model: "nlpconnect/vit-gpt2-image-captioning",
|
327
|
-
}),
|
328
|
-
data // buffer with image data
|
329
|
-
);
|
330
|
-
```
|
331
|
-
|
332
|
-
Providers: [HuggingFace](/integration/model-provider/huggingface)
|
333
|
-
|
334
322
|
### [Generate Image](https://modelfusion.dev/guide/function/generate-image)
|
335
323
|
|
336
324
|
Generate an image from a prompt.
|
@@ -460,7 +448,7 @@ Available Vector Stores: [Memory](https://modelfusion.dev/integration/vector-ind
|
|
460
448
|
|
461
449
|
Prompt formats let you use higher level prompt structures (such as instruction or chat prompts) for different models.
|
462
450
|
|
463
|
-
#### [Text Generation Prompt Formats](https://modelfusion.dev/guide/function/generate-text
|
451
|
+
#### [Text Generation Prompt Formats](https://modelfusion.dev/guide/function/generate-text#prompt-format)
|
464
452
|
|
465
453
|
```ts
|
466
454
|
const text = await generateText(
|
@@ -525,7 +513,7 @@ ModelFusion model functions return rich results that include the original respon
|
|
525
513
|
```ts
|
526
514
|
// access the full response (needs to be typed) and the metadata:
|
527
515
|
const { value, response, metadata } = await generateText(
|
528
|
-
new
|
516
|
+
new OpenAICompletionModel({
|
529
517
|
model: "gpt-3.5-turbo-instruct",
|
530
518
|
maxCompletionTokens: 1000,
|
531
519
|
n: 2, // generate 2 completions
|
@@ -536,7 +524,7 @@ const { value, response, metadata } = await generateText(
|
|
536
524
|
console.log(metadata);
|
537
525
|
|
538
526
|
// cast to the response type:
|
539
|
-
for (const choice of (response as
|
527
|
+
for (const choice of (response as OpenAICompletionResponse).choices) {
|
540
528
|
console.log(choice.text);
|
541
529
|
}
|
542
530
|
```
|
@@ -550,17 +538,14 @@ Integrations: [Helicone](https://modelfusion.dev/integration/observability/helic
|
|
550
538
|
### [Guide](https://modelfusion.dev/guide)
|
551
539
|
|
552
540
|
- [Model Functions](https://modelfusion.dev/guide/function/)
|
553
|
-
- [Generate
|
554
|
-
|
555
|
-
- [Generate
|
541
|
+
- [Generate text](https://modelfusion.dev/guide/function/generate-text)
|
542
|
+
- [Generate image](https://modelfusion.dev/guide/function/generate-image)
|
543
|
+
- [Generate speech](https://modelfusion.dev/guide/function/generate-speech)
|
544
|
+
- [Generate transcription](https://modelfusion.dev/guide/function/generation-transcription)
|
545
|
+
- [Generate structure](https://modelfusion.dev/guide/function/generate-structure)
|
556
546
|
- [Generate structure or text](https://modelfusion.dev/guide/function/generate-structure-or-text)
|
557
547
|
- [Tokenize Text](https://modelfusion.dev/guide/function/tokenize-text)
|
558
548
|
- [Embed Value](https://modelfusion.dev/guide/function/embed)
|
559
|
-
- [Transcribe Speech](https://modelfusion.dev/guide/function/transcribe-speech)
|
560
|
-
- [Synthesize Speech](https://modelfusion.dev/guide/function/synthesize-speech)
|
561
|
-
- [Describe Image](https://modelfusion.dev/guide/function/describe-image)
|
562
|
-
- [Generate Image](https://modelfusion.dev/guide/function/generate-image)
|
563
|
-
- [Prompt Format](https://modelfusion.dev/guide/function/generate-image/prompt-format)
|
564
549
|
- [Guards](https://modelfusion.dev/guide/guard)
|
565
550
|
- [Tools](https://modelfusion.dev/guide/tools)
|
566
551
|
- [Vector Indices](https://modelfusion.dev/guide/vector-index)
|
@@ -628,7 +613,7 @@ Create an 19th century painting image for your input.
|
|
628
613
|
|
629
614
|
Record audio with push-to-talk and transcribe it using Whisper, implemented as a Next.js app. The app shows a list of the transcriptions.
|
630
615
|
|
631
|
-
### [
|
616
|
+
### [Speech Streaming (Vite(React) + Fastify))](https://github.com/lgrammel/modelfusion/tree/main/examples/speech-streaming-vite-react-fastify)
|
632
617
|
|
633
618
|
> _Speech Streaming_, _OpenAI_, _Elevenlabs_ _streaming_, _Vite_, _Fastify_
|
634
619
|
|
@@ -13,25 +13,25 @@ class AsyncIterableResultPromise extends Promise {
|
|
13
13
|
writable: true,
|
14
14
|
value: fullPromise
|
15
15
|
});
|
16
|
-
Object.defineProperty(this, "
|
16
|
+
Object.defineProperty(this, "valuePromise", {
|
17
17
|
enumerable: true,
|
18
18
|
configurable: true,
|
19
19
|
writable: true,
|
20
20
|
value: void 0
|
21
21
|
});
|
22
|
-
this.
|
22
|
+
this.valuePromise = fullPromise.then((result) => result.value);
|
23
23
|
}
|
24
24
|
asFullResponse() {
|
25
25
|
return this.fullPromise;
|
26
26
|
}
|
27
27
|
then(onfulfilled, onrejected) {
|
28
|
-
return this.
|
28
|
+
return this.valuePromise.then(onfulfilled, onrejected);
|
29
29
|
}
|
30
30
|
catch(onrejected) {
|
31
|
-
return this.
|
31
|
+
return this.valuePromise.catch(onrejected);
|
32
32
|
}
|
33
33
|
finally(onfinally) {
|
34
|
-
return this.
|
34
|
+
return this.valuePromise.finally(onfinally);
|
35
35
|
}
|
36
36
|
}
|
37
37
|
exports.AsyncIterableResultPromise = AsyncIterableResultPromise;
|
@@ -1,13 +1,13 @@
|
|
1
1
|
import { ModelCallMetadata } from "./ModelCallMetadata.js";
|
2
2
|
export declare class AsyncIterableResultPromise<T> extends Promise<AsyncIterable<T>> {
|
3
3
|
private fullPromise;
|
4
|
-
private
|
4
|
+
private valuePromise;
|
5
5
|
constructor(fullPromise: Promise<{
|
6
|
-
|
6
|
+
value: AsyncIterable<T>;
|
7
7
|
metadata: Omit<ModelCallMetadata, "durationInMs" | "finishTimestamp">;
|
8
8
|
}>);
|
9
9
|
asFullResponse(): Promise<{
|
10
|
-
|
10
|
+
value: AsyncIterable<T>;
|
11
11
|
metadata: Omit<ModelCallMetadata, "durationInMs" | "finishTimestamp">;
|
12
12
|
}>;
|
13
13
|
then<TResult1 = AsyncIterable<T>, TResult2 = never>(onfulfilled?: ((value: AsyncIterable<T>) => TResult1 | PromiseLike<TResult1>) | undefined | null, onrejected?: ((reason: unknown) => TResult2 | PromiseLike<TResult2>) | undefined | null): Promise<TResult1 | TResult2>;
|
@@ -10,24 +10,24 @@ export class AsyncIterableResultPromise extends Promise {
|
|
10
10
|
writable: true,
|
11
11
|
value: fullPromise
|
12
12
|
});
|
13
|
-
Object.defineProperty(this, "
|
13
|
+
Object.defineProperty(this, "valuePromise", {
|
14
14
|
enumerable: true,
|
15
15
|
configurable: true,
|
16
16
|
writable: true,
|
17
17
|
value: void 0
|
18
18
|
});
|
19
|
-
this.
|
19
|
+
this.valuePromise = fullPromise.then((result) => result.value);
|
20
20
|
}
|
21
21
|
asFullResponse() {
|
22
22
|
return this.fullPromise;
|
23
23
|
}
|
24
24
|
then(onfulfilled, onrejected) {
|
25
|
-
return this.
|
25
|
+
return this.valuePromise.then(onfulfilled, onrejected);
|
26
26
|
}
|
27
27
|
catch(onrejected) {
|
28
|
-
return this.
|
28
|
+
return this.valuePromise.catch(onrejected);
|
29
29
|
}
|
30
30
|
finally(onfinally) {
|
31
|
-
return this.
|
31
|
+
return this.valuePromise.finally(onfinally);
|
32
32
|
}
|
33
33
|
}
|
@@ -18,7 +18,7 @@ export interface Model<SETTINGS extends ModelSettings> {
|
|
18
18
|
* The `withSettings` method creates a new model with the same configuration as the original model, but with the specified settings changed.
|
19
19
|
*
|
20
20
|
* @example
|
21
|
-
* const model = new
|
21
|
+
* const model = new OpenAICompletionModel({
|
22
22
|
* model: "gpt-3.5-turbo-instruct",
|
23
23
|
* maxCompletionTokens: 500,
|
24
24
|
* });
|
@@ -1,14 +1,12 @@
|
|
1
1
|
import { BaseFunctionFinishedEvent, BaseFunctionStartedEvent } from "../core/FunctionEvent.js";
|
2
2
|
import { ModelInformation } from "./ModelInformation.js";
|
3
|
-
import { ImageDescriptionFinishedEvent, ImageDescriptionStartedEvent } from "./describe-image/ImageDescriptionEvent.js";
|
4
3
|
import { EmbeddingFinishedEvent, EmbeddingStartedEvent } from "./embed/EmbeddingEvent.js";
|
5
4
|
import { ImageGenerationFinishedEvent, ImageGenerationStartedEvent } from "./generate-image/ImageGenerationEvent.js";
|
5
|
+
import { SpeechGenerationFinishedEvent, SpeechGenerationStartedEvent, SpeechStreamingFinishedEvent, SpeechStreamingStartedEvent } from "./generate-speech/SpeechGenerationEvent.js";
|
6
6
|
import { StructureGenerationFinishedEvent, StructureGenerationStartedEvent } from "./generate-structure/StructureGenerationEvent.js";
|
7
7
|
import { StructureStreamingFinishedEvent, StructureStreamingStartedEvent } from "./generate-structure/StructureStreamingEvent.js";
|
8
|
-
import { TextGenerationFinishedEvent, TextGenerationStartedEvent } from "./generate-text/TextGenerationEvent.js";
|
9
|
-
import {
|
10
|
-
import { SpeechSynthesisFinishedEvent, SpeechSynthesisStartedEvent } from "./synthesize-speech/SpeechSynthesisEvent.js";
|
11
|
-
import { TranscriptionFinishedEvent, TranscriptionStartedEvent } from "./transcribe-speech/TranscriptionEvent.js";
|
8
|
+
import { TextGenerationFinishedEvent, TextGenerationStartedEvent, TextStreamingFinishedEvent, TextStreamingStartedEvent } from "./generate-text/TextGenerationEvent.js";
|
9
|
+
import { TranscriptionFinishedEvent, TranscriptionStartedEvent } from "./generate-transcription/TranscriptionEvent.js";
|
12
10
|
export interface BaseModelCallStartedEvent extends BaseFunctionStartedEvent {
|
13
11
|
model: ModelInformation;
|
14
12
|
/**
|
@@ -49,5 +47,5 @@ export interface BaseModelCallFinishedEvent extends BaseFunctionFinishedEvent {
|
|
49
47
|
*/
|
50
48
|
result: BaseModelCallFinishedEventResult;
|
51
49
|
}
|
52
|
-
export type ModelCallStartedEvent = EmbeddingStartedEvent |
|
53
|
-
export type ModelCallFinishedEvent = EmbeddingFinishedEvent |
|
50
|
+
export type ModelCallStartedEvent = EmbeddingStartedEvent | ImageGenerationStartedEvent | SpeechGenerationStartedEvent | SpeechStreamingStartedEvent | StructureGenerationStartedEvent | StructureStreamingStartedEvent | TextGenerationStartedEvent | TextStreamingStartedEvent | TranscriptionStartedEvent;
|
51
|
+
export type ModelCallFinishedEvent = EmbeddingFinishedEvent | ImageGenerationFinishedEvent | SpeechGenerationFinishedEvent | SpeechStreamingFinishedEvent | StructureGenerationFinishedEvent | StructureStreamingFinishedEvent | TextGenerationFinishedEvent | TextStreamingFinishedEvent | TranscriptionFinishedEvent;
|
@@ -1,7 +1,7 @@
|
|
1
1
|
"use strict";
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
3
3
|
exports.embed = exports.embedMany = void 0;
|
4
|
-
const
|
4
|
+
const executeStandardCall_js_1 = require("../executeStandardCall.cjs");
|
5
5
|
const ModelFunctionPromise_js_1 = require("../ModelFunctionPromise.cjs");
|
6
6
|
/**
|
7
7
|
* Generate embeddings for multiple values.
|
@@ -16,7 +16,7 @@ const ModelFunctionPromise_js_1 = require("../ModelFunctionPromise.cjs");
|
|
16
16
|
* );
|
17
17
|
*/
|
18
18
|
function embedMany(model, values, options) {
|
19
|
-
return new ModelFunctionPromise_js_1.ModelFunctionPromise((0,
|
19
|
+
return new ModelFunctionPromise_js_1.ModelFunctionPromise((0, executeStandardCall_js_1.executeStandardCall)({
|
20
20
|
functionType: "embedding",
|
21
21
|
input: values,
|
22
22
|
model,
|
@@ -57,7 +57,7 @@ exports.embedMany = embedMany;
|
|
57
57
|
* );
|
58
58
|
*/
|
59
59
|
function embed(model, value, options) {
|
60
|
-
return new ModelFunctionPromise_js_1.ModelFunctionPromise((0,
|
60
|
+
return new ModelFunctionPromise_js_1.ModelFunctionPromise((0, executeStandardCall_js_1.executeStandardCall)({
|
61
61
|
functionType: "embedding",
|
62
62
|
input: value,
|
63
63
|
model,
|
@@ -1,4 +1,4 @@
|
|
1
|
-
import {
|
1
|
+
import { executeStandardCall } from "../executeStandardCall.js";
|
2
2
|
import { ModelFunctionPromise } from "../ModelFunctionPromise.js";
|
3
3
|
/**
|
4
4
|
* Generate embeddings for multiple values.
|
@@ -13,7 +13,7 @@ import { ModelFunctionPromise } from "../ModelFunctionPromise.js";
|
|
13
13
|
* );
|
14
14
|
*/
|
15
15
|
export function embedMany(model, values, options) {
|
16
|
-
return new ModelFunctionPromise(
|
16
|
+
return new ModelFunctionPromise(executeStandardCall({
|
17
17
|
functionType: "embedding",
|
18
18
|
input: values,
|
19
19
|
model,
|
@@ -53,7 +53,7 @@ export function embedMany(model, values, options) {
|
|
53
53
|
* );
|
54
54
|
*/
|
55
55
|
export function embed(model, value, options) {
|
56
|
-
return new ModelFunctionPromise(
|
56
|
+
return new ModelFunctionPromise(executeStandardCall({
|
57
57
|
functionType: "embedding",
|
58
58
|
input: value,
|
59
59
|
model,
|
@@ -1,6 +1,6 @@
|
|
1
1
|
"use strict";
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
3
|
-
exports.
|
3
|
+
exports.executeStandardCall = void 0;
|
4
4
|
const nanoid_1 = require("nanoid");
|
5
5
|
const FunctionEventSource_js_1 = require("../core/FunctionEventSource.cjs");
|
6
6
|
const GlobalFunctionLogging_js_1 = require("../core/GlobalFunctionLogging.cjs");
|
@@ -10,7 +10,7 @@ const getFunctionCallLogger_js_1 = require("../core/getFunctionCallLogger.cjs");
|
|
10
10
|
const getRun_js_1 = require("../core/getRun.cjs");
|
11
11
|
const DurationMeasurement_js_1 = require("../util/DurationMeasurement.cjs");
|
12
12
|
const runSafe_js_1 = require("../util/runSafe.cjs");
|
13
|
-
async function
|
13
|
+
async function executeStandardCall({ model, options, input, functionType, generateResponse, }) {
|
14
14
|
const run = await (0, getRun_js_1.getRun)(options?.run);
|
15
15
|
const settings = model.settings;
|
16
16
|
const eventSource = new FunctionEventSource_js_1.FunctionEventSource({
|
@@ -104,4 +104,4 @@ async function executeCall({ model, options, input, functionType, generateRespon
|
|
104
104
|
},
|
105
105
|
};
|
106
106
|
}
|
107
|
-
exports.
|
107
|
+
exports.executeStandardCall = executeStandardCall;
|
@@ -2,7 +2,7 @@ import { FunctionOptions } from "../core/FunctionOptions.js";
|
|
2
2
|
import { Model, ModelSettings } from "./Model.js";
|
3
3
|
import { ModelCallStartedEvent } from "./ModelCallEvent.js";
|
4
4
|
import { ModelCallMetadata } from "./ModelCallMetadata.js";
|
5
|
-
export declare function
|
5
|
+
export declare function executeStandardCall<VALUE, MODEL extends Model<ModelSettings>>({ model, options, input, functionType, generateResponse, }: {
|
6
6
|
model: MODEL;
|
7
7
|
options?: FunctionOptions;
|
8
8
|
input: unknown;
|
@@ -7,7 +7,7 @@ import { getFunctionCallLogger } from "../core/getFunctionCallLogger.js";
|
|
7
7
|
import { getRun } from "../core/getRun.js";
|
8
8
|
import { startDurationMeasurement } from "../util/DurationMeasurement.js";
|
9
9
|
import { runSafe } from "../util/runSafe.js";
|
10
|
-
export async function
|
10
|
+
export async function executeStandardCall({ model, options, input, functionType, generateResponse, }) {
|
11
11
|
const run = await getRun(options?.run);
|
12
12
|
const settings = model.settings;
|
13
13
|
const eventSource = new FunctionEventSource({
|
@@ -0,0 +1,132 @@
|
|
1
|
+
"use strict";
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
3
|
+
exports.executeStreamCall = void 0;
|
4
|
+
const nanoid_1 = require("nanoid");
|
5
|
+
const FunctionEventSource_js_1 = require("../core/FunctionEventSource.cjs");
|
6
|
+
const GlobalFunctionLogging_js_1 = require("../core/GlobalFunctionLogging.cjs");
|
7
|
+
const GlobalFunctionObservers_js_1 = require("../core/GlobalFunctionObservers.cjs");
|
8
|
+
const AbortError_js_1 = require("../core/api/AbortError.cjs");
|
9
|
+
const getFunctionCallLogger_js_1 = require("../core/getFunctionCallLogger.cjs");
|
10
|
+
const getRun_js_1 = require("../core/getRun.cjs");
|
11
|
+
const DurationMeasurement_js_1 = require("../util/DurationMeasurement.cjs");
|
12
|
+
const runSafe_js_1 = require("../util/runSafe.cjs");
|
13
|
+
async function executeStreamCall({ model, options, input, functionType, startStream, processDelta, processFinished, getResult, }) {
|
14
|
+
const run = await (0, getRun_js_1.getRun)(options?.run);
|
15
|
+
const settings = model.settings;
|
16
|
+
const eventSource = new FunctionEventSource_js_1.FunctionEventSource({
|
17
|
+
observers: [
|
18
|
+
...(0, getFunctionCallLogger_js_1.getFunctionCallLogger)(options?.logging ?? (0, GlobalFunctionLogging_js_1.getGlobalFunctionLogging)()),
|
19
|
+
...(0, GlobalFunctionObservers_js_1.getGlobalFunctionObservers)(),
|
20
|
+
...(settings.observers ?? []),
|
21
|
+
...(run?.functionObserver != null ? [run.functionObserver] : []),
|
22
|
+
...(options?.observers ?? []),
|
23
|
+
],
|
24
|
+
errorHandler: run?.errorHandler,
|
25
|
+
});
|
26
|
+
const durationMeasurement = (0, DurationMeasurement_js_1.startDurationMeasurement)();
|
27
|
+
const startMetadata = {
|
28
|
+
functionType,
|
29
|
+
callId: `call-${(0, nanoid_1.nanoid)()}`,
|
30
|
+
runId: run?.runId,
|
31
|
+
sessionId: run?.sessionId,
|
32
|
+
userId: run?.userId,
|
33
|
+
functionId: options?.functionId,
|
34
|
+
model: model.modelInformation,
|
35
|
+
settings: model.settingsForEvent,
|
36
|
+
input,
|
37
|
+
timestamp: durationMeasurement.startDate,
|
38
|
+
startTimestamp: durationMeasurement.startDate,
|
39
|
+
};
|
40
|
+
eventSource.notify({
|
41
|
+
eventType: "started",
|
42
|
+
...startMetadata,
|
43
|
+
});
|
44
|
+
const result = await (0, runSafe_js_1.runSafe)(async () => {
|
45
|
+
const deltaIterable = await startStream({
|
46
|
+
functionId: options?.functionId,
|
47
|
+
logging: options?.logging,
|
48
|
+
observers: options?.observers,
|
49
|
+
run,
|
50
|
+
});
|
51
|
+
return (async function* () {
|
52
|
+
for await (const event of deltaIterable) {
|
53
|
+
if (event?.type === "error") {
|
54
|
+
const error = event.error;
|
55
|
+
const finishMetadata = {
|
56
|
+
eventType: "finished",
|
57
|
+
...startMetadata,
|
58
|
+
finishTimestamp: new Date(),
|
59
|
+
durationInMs: durationMeasurement.durationInMs,
|
60
|
+
};
|
61
|
+
eventSource.notify(error instanceof AbortError_js_1.AbortError
|
62
|
+
? {
|
63
|
+
...finishMetadata,
|
64
|
+
result: { status: "abort" },
|
65
|
+
}
|
66
|
+
: {
|
67
|
+
...finishMetadata,
|
68
|
+
result: { status: "error", error },
|
69
|
+
});
|
70
|
+
throw error;
|
71
|
+
}
|
72
|
+
if (event?.type === "delta") {
|
73
|
+
const value = processDelta(event);
|
74
|
+
if (value !== undefined) {
|
75
|
+
yield value;
|
76
|
+
}
|
77
|
+
}
|
78
|
+
}
|
79
|
+
if (processFinished != null) {
|
80
|
+
const value = processFinished();
|
81
|
+
if (value !== undefined) {
|
82
|
+
yield value;
|
83
|
+
}
|
84
|
+
}
|
85
|
+
const finishMetadata = {
|
86
|
+
eventType: "finished",
|
87
|
+
...startMetadata,
|
88
|
+
finishTimestamp: new Date(),
|
89
|
+
durationInMs: durationMeasurement.durationInMs,
|
90
|
+
};
|
91
|
+
eventSource.notify({
|
92
|
+
...finishMetadata,
|
93
|
+
result: {
|
94
|
+
status: "success",
|
95
|
+
...getResult(),
|
96
|
+
},
|
97
|
+
});
|
98
|
+
})();
|
99
|
+
});
|
100
|
+
if (!result.ok) {
|
101
|
+
const finishMetadata = {
|
102
|
+
eventType: "finished",
|
103
|
+
...startMetadata,
|
104
|
+
finishTimestamp: new Date(),
|
105
|
+
durationInMs: durationMeasurement.durationInMs,
|
106
|
+
};
|
107
|
+
if (result.isAborted) {
|
108
|
+
eventSource.notify({
|
109
|
+
...finishMetadata,
|
110
|
+
eventType: "finished",
|
111
|
+
result: {
|
112
|
+
status: "abort",
|
113
|
+
},
|
114
|
+
});
|
115
|
+
throw new AbortError_js_1.AbortError();
|
116
|
+
}
|
117
|
+
eventSource.notify({
|
118
|
+
...finishMetadata,
|
119
|
+
eventType: "finished",
|
120
|
+
result: {
|
121
|
+
status: "error",
|
122
|
+
error: result.error,
|
123
|
+
},
|
124
|
+
});
|
125
|
+
throw result.error;
|
126
|
+
}
|
127
|
+
return {
|
128
|
+
value: result.value,
|
129
|
+
metadata: startMetadata,
|
130
|
+
};
|
131
|
+
}
|
132
|
+
exports.executeStreamCall = executeStreamCall;
|
@@ -0,0 +1,20 @@
|
|
1
|
+
import { FunctionOptions } from "../core/FunctionOptions.js";
|
2
|
+
import { Delta } from "./Delta.js";
|
3
|
+
import { Model, ModelSettings } from "./Model.js";
|
4
|
+
import { ModelCallStartedEvent } from "./ModelCallEvent.js";
|
5
|
+
import { ModelCallMetadata } from "./ModelCallMetadata.js";
|
6
|
+
export declare function executeStreamCall<DELTA_VALUE, VALUE, MODEL extends Model<ModelSettings>>({ model, options, input, functionType, startStream, processDelta, processFinished, getResult, }: {
|
7
|
+
model: MODEL;
|
8
|
+
options?: FunctionOptions;
|
9
|
+
input: unknown;
|
10
|
+
functionType: ModelCallStartedEvent["functionType"];
|
11
|
+
startStream: (options?: FunctionOptions) => PromiseLike<AsyncIterable<Delta<DELTA_VALUE>>>;
|
12
|
+
processDelta: (delta: Delta<DELTA_VALUE> & {
|
13
|
+
type: "delta";
|
14
|
+
}) => VALUE | undefined;
|
15
|
+
processFinished?: () => VALUE | undefined;
|
16
|
+
getResult: () => Record<string, unknown>;
|
17
|
+
}): Promise<{
|
18
|
+
value: AsyncIterable<VALUE>;
|
19
|
+
metadata: Omit<ModelCallMetadata, "durationInMs" | "finishTimestamp">;
|
20
|
+
}>;
|