modelfusion 0.53.0 → 0.53.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. package/core/FunctionOptions.d.ts +4 -3
  2. package/model-function/embed/embed.cjs +16 -0
  3. package/model-function/embed/embed.d.ts +16 -0
  4. package/model-function/embed/embed.js +16 -0
  5. package/model-function/executeStreamCall.cjs +72 -35
  6. package/model-function/executeStreamCall.js +72 -35
  7. package/model-function/generate-image/generateImage.cjs +12 -3
  8. package/model-function/generate-image/generateImage.d.ts +12 -3
  9. package/model-function/generate-image/generateImage.js +12 -3
  10. package/model-function/generate-speech/generateSpeech.cjs +16 -1
  11. package/model-function/generate-speech/generateSpeech.d.ts +16 -1
  12. package/model-function/generate-speech/generateSpeech.js +16 -1
  13. package/model-function/generate-speech/streamSpeech.cjs +22 -1
  14. package/model-function/generate-speech/streamSpeech.d.ts +22 -1
  15. package/model-function/generate-speech/streamSpeech.js +22 -1
  16. package/model-function/generate-structure/generateStructure.cjs +41 -0
  17. package/model-function/generate-structure/generateStructure.d.ts +41 -0
  18. package/model-function/generate-structure/generateStructure.js +41 -0
  19. package/model-function/generate-structure/generateStructureOrText.cjs +62 -0
  20. package/model-function/generate-structure/generateStructureOrText.d.ts +62 -0
  21. package/model-function/generate-structure/generateStructureOrText.js +62 -0
  22. package/model-function/generate-structure/streamStructure.cjs +72 -1
  23. package/model-function/generate-structure/streamStructure.d.ts +68 -1
  24. package/model-function/generate-structure/streamStructure.js +72 -1
  25. package/model-function/generate-text/generateText.cjs +14 -6
  26. package/model-function/generate-text/generateText.d.ts +14 -6
  27. package/model-function/generate-text/generateText.js +14 -6
  28. package/model-function/generate-text/streamText.cjs +25 -0
  29. package/model-function/generate-text/streamText.d.ts +25 -0
  30. package/model-function/generate-text/streamText.js +25 -0
  31. package/model-function/generate-transcription/generateTranscription.cjs +10 -5
  32. package/model-function/generate-transcription/generateTranscription.d.ts +10 -5
  33. package/model-function/generate-transcription/generateTranscription.js +10 -5
  34. package/model-function/tokenize-text/Tokenizer.d.ts +27 -3
  35. package/package.json +1 -1
  36. package/util/AsyncQueue.cjs +26 -11
  37. package/util/AsyncQueue.d.ts +2 -0
  38. package/util/AsyncQueue.js +26 -11
  39. package/util/AsyncQueue.test.cjs +1 -1
  40. package/util/AsyncQueue.test.js +1 -1
  41. package/util/delay.cjs +2 -2
  42. package/util/delay.d.ts +1 -1
  43. package/util/delay.js +2 -2
  44. package/util/index.cjs +1 -0
  45. package/util/index.d.ts +1 -0
  46. package/util/index.js +1 -0
@@ -5,7 +5,7 @@ import { FunctionObserver } from "./FunctionObserver.js";
5
5
  */
6
6
  export type FunctionOptions = {
7
7
  /**
8
- * Optional function identifier that is used in events to identify the function.
8
+ * Optional function identifier. Used in events and logging.
9
9
  */
10
10
  functionId?: string;
11
11
  /**
@@ -18,11 +18,12 @@ export type FunctionOptions = {
18
18
  */
19
19
  observers?: Array<FunctionObserver>;
20
20
  /**
21
- * Optional run as part of which this function is called.
21
+ * Optional run as part of which this function is called. Used in events and logging.
22
+ * Run callbacks are invoked when it is provided.
22
23
  */
23
24
  run?: Run;
24
25
  /**
25
- * Unique identifier of the call id of the parent function.
26
+ * Unique identifier of the call id of the parent function. Used in events and logging.
26
27
  */
27
28
  parentCallId?: string | undefined;
28
29
  };
@@ -6,6 +6,8 @@ const ModelFunctionPromise_js_1 = require("../ModelFunctionPromise.cjs");
6
6
  /**
7
7
  * Generate embeddings for multiple values.
8
8
  *
9
+ * @see https://modelfusion.dev/guide/function/embed
10
+ *
9
11
  * @example
10
12
  * const embeddings = await embedMany(
11
13
  * new OpenAITextEmbeddingModel(...),
@@ -14,6 +16,12 @@ const ModelFunctionPromise_js_1 = require("../ModelFunctionPromise.cjs");
14
16
  * "He keenly observed and absorbed everything around him, from the birds in the sky to the trees in the forest.",
15
17
  * ]
16
18
  * );
19
+ *
20
+ * @param {EmbeddingModel<VALUE, EmbeddingModelSettings>} model - The model to use for generating embeddings.
21
+ * @param {VALUE[]} values - The values to generate embeddings for.
22
+ * @param {FunctionOptions} [options] - Optional settings for the function.
23
+ *
24
+ * @returns {ModelFunctionPromise<Vector[]>} - A promise that resolves to an array of vectors representing the embeddings.
17
25
  */
18
26
  function embedMany(model, values, options) {
19
27
  return new ModelFunctionPromise_js_1.ModelFunctionPromise((0, executeStandardCall_js_1.executeStandardCall)({
@@ -50,11 +58,19 @@ exports.embedMany = embedMany;
50
58
  /**
51
59
  * Generate an embedding for a single value.
52
60
  *
61
+ * @see https://modelfusion.dev/guide/function/embed
62
+ *
53
63
  * @example
54
64
  * const embedding = await embed(
55
65
  * new OpenAITextEmbeddingModel(...),
56
66
  * "At first, Nox didn't know what to do with the pup."
57
67
  * );
68
+ *
69
+ * @param {EmbeddingModel<VALUE, EmbeddingModelSettings>} model - The model to use for generating the embedding.
70
+ * @param {VALUE} value - The value to generate an embedding for.
71
+ * @param {FunctionOptions} [options] - Optional settings for the function.
72
+ *
73
+ * @returns {ModelFunctionPromise<Vector>} - A promise that resolves to a vector representing the embedding.
58
74
  */
59
75
  function embed(model, value, options) {
60
76
  return new ModelFunctionPromise_js_1.ModelFunctionPromise((0, executeStandardCall_js_1.executeStandardCall)({
@@ -5,6 +5,8 @@ import { EmbeddingModel, EmbeddingModelSettings } from "./EmbeddingModel.js";
5
5
  /**
6
6
  * Generate embeddings for multiple values.
7
7
  *
8
+ * @see https://modelfusion.dev/guide/function/embed
9
+ *
8
10
  * @example
9
11
  * const embeddings = await embedMany(
10
12
  * new OpenAITextEmbeddingModel(...),
@@ -13,15 +15,29 @@ import { EmbeddingModel, EmbeddingModelSettings } from "./EmbeddingModel.js";
13
15
  * "He keenly observed and absorbed everything around him, from the birds in the sky to the trees in the forest.",
14
16
  * ]
15
17
  * );
18
+ *
19
+ * @param {EmbeddingModel<VALUE, EmbeddingModelSettings>} model - The model to use for generating embeddings.
20
+ * @param {VALUE[]} values - The values to generate embeddings for.
21
+ * @param {FunctionOptions} [options] - Optional settings for the function.
22
+ *
23
+ * @returns {ModelFunctionPromise<Vector[]>} - A promise that resolves to an array of vectors representing the embeddings.
16
24
  */
17
25
  export declare function embedMany<VALUE>(model: EmbeddingModel<VALUE, EmbeddingModelSettings>, values: VALUE[], options?: FunctionOptions): ModelFunctionPromise<Vector[]>;
18
26
  /**
19
27
  * Generate an embedding for a single value.
20
28
  *
29
+ * @see https://modelfusion.dev/guide/function/embed
30
+ *
21
31
  * @example
22
32
  * const embedding = await embed(
23
33
  * new OpenAITextEmbeddingModel(...),
24
34
  * "At first, Nox didn't know what to do with the pup."
25
35
  * );
36
+ *
37
+ * @param {EmbeddingModel<VALUE, EmbeddingModelSettings>} model - The model to use for generating the embedding.
38
+ * @param {VALUE} value - The value to generate an embedding for.
39
+ * @param {FunctionOptions} [options] - Optional settings for the function.
40
+ *
41
+ * @returns {ModelFunctionPromise<Vector>} - A promise that resolves to a vector representing the embedding.
26
42
  */
27
43
  export declare function embed<VALUE>(model: EmbeddingModel<VALUE, EmbeddingModelSettings>, value: VALUE, options?: FunctionOptions): ModelFunctionPromise<Vector>;
@@ -3,6 +3,8 @@ import { ModelFunctionPromise } from "../ModelFunctionPromise.js";
3
3
  /**
4
4
  * Generate embeddings for multiple values.
5
5
  *
6
+ * @see https://modelfusion.dev/guide/function/embed
7
+ *
6
8
  * @example
7
9
  * const embeddings = await embedMany(
8
10
  * new OpenAITextEmbeddingModel(...),
@@ -11,6 +13,12 @@ import { ModelFunctionPromise } from "../ModelFunctionPromise.js";
11
13
  * "He keenly observed and absorbed everything around him, from the birds in the sky to the trees in the forest.",
12
14
  * ]
13
15
  * );
16
+ *
17
+ * @param {EmbeddingModel<VALUE, EmbeddingModelSettings>} model - The model to use for generating embeddings.
18
+ * @param {VALUE[]} values - The values to generate embeddings for.
19
+ * @param {FunctionOptions} [options] - Optional settings for the function.
20
+ *
21
+ * @returns {ModelFunctionPromise<Vector[]>} - A promise that resolves to an array of vectors representing the embeddings.
14
22
  */
15
23
  export function embedMany(model, values, options) {
16
24
  return new ModelFunctionPromise(executeStandardCall({
@@ -46,11 +54,19 @@ export function embedMany(model, values, options) {
46
54
  /**
47
55
  * Generate an embedding for a single value.
48
56
  *
57
+ * @see https://modelfusion.dev/guide/function/embed
58
+ *
49
59
  * @example
50
60
  * const embedding = await embed(
51
61
  * new OpenAITextEmbeddingModel(...),
52
62
  * "At first, Nox didn't know what to do with the pup."
53
63
  * );
64
+ *
65
+ * @param {EmbeddingModel<VALUE, EmbeddingModelSettings>} model - The model to use for generating the embedding.
66
+ * @param {VALUE} value - The value to generate an embedding for.
67
+ * @param {FunctionOptions} [options] - Optional settings for the function.
68
+ *
69
+ * @returns {ModelFunctionPromise<Vector>} - A promise that resolves to a vector representing the embedding.
54
70
  */
55
71
  export function embed(model, value, options) {
56
72
  return new ModelFunctionPromise(executeStandardCall({
@@ -55,53 +55,90 @@ async function executeStreamCall({ model, options, input, functionType, startStr
55
55
  const responseQueue = new AsyncQueue_js_1.AsyncQueue();
56
56
  // run async:
57
57
  (async function () {
58
- for await (const event of deltaIterable) {
59
- if (event?.type === "error") {
60
- const error = event.error;
58
+ try {
59
+ const loopResult = await (0, runSafe_js_1.runSafe)(async () => {
60
+ for await (const event of deltaIterable) {
61
+ if (event?.type === "error") {
62
+ const error = event.error;
63
+ const finishMetadata = {
64
+ eventType: "finished",
65
+ ...startMetadata,
66
+ finishTimestamp: new Date(),
67
+ durationInMs: durationMeasurement.durationInMs,
68
+ };
69
+ eventSource.notify(error instanceof AbortError_js_1.AbortError
70
+ ? {
71
+ ...finishMetadata,
72
+ result: { status: "abort" },
73
+ }
74
+ : {
75
+ ...finishMetadata,
76
+ result: { status: "error", error },
77
+ });
78
+ throw error;
79
+ }
80
+ if (event?.type === "delta") {
81
+ const value = processDelta(event);
82
+ if (value !== undefined) {
83
+ responseQueue.push(value);
84
+ }
85
+ }
86
+ }
87
+ if (processFinished != null) {
88
+ const value = processFinished();
89
+ if (value !== undefined) {
90
+ responseQueue.push(value);
91
+ }
92
+ }
93
+ });
94
+ // deal with abort or errors that happened during streaming:
95
+ if (!loopResult.ok) {
61
96
  const finishMetadata = {
62
97
  eventType: "finished",
63
98
  ...startMetadata,
64
99
  finishTimestamp: new Date(),
65
100
  durationInMs: durationMeasurement.durationInMs,
66
101
  };
67
- eventSource.notify(error instanceof AbortError_js_1.AbortError
68
- ? {
69
- ...finishMetadata,
70
- result: { status: "abort" },
71
- }
72
- : {
102
+ if (loopResult.isAborted) {
103
+ eventSource.notify({
73
104
  ...finishMetadata,
74
- result: { status: "error", error },
105
+ eventType: "finished",
106
+ result: {
107
+ status: "abort",
108
+ },
75
109
  });
76
- throw error;
77
- }
78
- if (event?.type === "delta") {
79
- const value = processDelta(event);
80
- if (value !== undefined) {
81
- responseQueue.push(value);
110
+ responseQueue.error(new AbortError_js_1.AbortError());
111
+ return; // error is handled through queue
82
112
  }
113
+ eventSource.notify({
114
+ ...finishMetadata,
115
+ eventType: "finished",
116
+ result: {
117
+ status: "error",
118
+ error: loopResult.error,
119
+ },
120
+ });
121
+ responseQueue.error(loopResult.error);
122
+ return; // error is handled through queue
83
123
  }
124
+ const finishMetadata = {
125
+ eventType: "finished",
126
+ ...startMetadata,
127
+ finishTimestamp: new Date(),
128
+ durationInMs: durationMeasurement.durationInMs,
129
+ };
130
+ eventSource.notify({
131
+ ...finishMetadata,
132
+ result: {
133
+ status: "success",
134
+ ...getResult(),
135
+ },
136
+ });
84
137
  }
85
- if (processFinished != null) {
86
- const value = processFinished();
87
- if (value !== undefined) {
88
- responseQueue.push(value);
89
- }
138
+ finally {
139
+ // always close the queue when done, no matter where a potential error happened:
140
+ responseQueue.close();
90
141
  }
91
- responseQueue.close();
92
- const finishMetadata = {
93
- eventType: "finished",
94
- ...startMetadata,
95
- finishTimestamp: new Date(),
96
- durationInMs: durationMeasurement.durationInMs,
97
- };
98
- eventSource.notify({
99
- ...finishMetadata,
100
- result: {
101
- status: "success",
102
- ...getResult(),
103
- },
104
- });
105
142
  })();
106
143
  return responseQueue;
107
144
  });
@@ -52,53 +52,90 @@ export async function executeStreamCall({ model, options, input, functionType, s
52
52
  const responseQueue = new AsyncQueue();
53
53
  // run async:
54
54
  (async function () {
55
- for await (const event of deltaIterable) {
56
- if (event?.type === "error") {
57
- const error = event.error;
55
+ try {
56
+ const loopResult = await runSafe(async () => {
57
+ for await (const event of deltaIterable) {
58
+ if (event?.type === "error") {
59
+ const error = event.error;
60
+ const finishMetadata = {
61
+ eventType: "finished",
62
+ ...startMetadata,
63
+ finishTimestamp: new Date(),
64
+ durationInMs: durationMeasurement.durationInMs,
65
+ };
66
+ eventSource.notify(error instanceof AbortError
67
+ ? {
68
+ ...finishMetadata,
69
+ result: { status: "abort" },
70
+ }
71
+ : {
72
+ ...finishMetadata,
73
+ result: { status: "error", error },
74
+ });
75
+ throw error;
76
+ }
77
+ if (event?.type === "delta") {
78
+ const value = processDelta(event);
79
+ if (value !== undefined) {
80
+ responseQueue.push(value);
81
+ }
82
+ }
83
+ }
84
+ if (processFinished != null) {
85
+ const value = processFinished();
86
+ if (value !== undefined) {
87
+ responseQueue.push(value);
88
+ }
89
+ }
90
+ });
91
+ // deal with abort or errors that happened during streaming:
92
+ if (!loopResult.ok) {
58
93
  const finishMetadata = {
59
94
  eventType: "finished",
60
95
  ...startMetadata,
61
96
  finishTimestamp: new Date(),
62
97
  durationInMs: durationMeasurement.durationInMs,
63
98
  };
64
- eventSource.notify(error instanceof AbortError
65
- ? {
66
- ...finishMetadata,
67
- result: { status: "abort" },
68
- }
69
- : {
99
+ if (loopResult.isAborted) {
100
+ eventSource.notify({
70
101
  ...finishMetadata,
71
- result: { status: "error", error },
102
+ eventType: "finished",
103
+ result: {
104
+ status: "abort",
105
+ },
72
106
  });
73
- throw error;
74
- }
75
- if (event?.type === "delta") {
76
- const value = processDelta(event);
77
- if (value !== undefined) {
78
- responseQueue.push(value);
107
+ responseQueue.error(new AbortError());
108
+ return; // error is handled through queue
79
109
  }
110
+ eventSource.notify({
111
+ ...finishMetadata,
112
+ eventType: "finished",
113
+ result: {
114
+ status: "error",
115
+ error: loopResult.error,
116
+ },
117
+ });
118
+ responseQueue.error(loopResult.error);
119
+ return; // error is handled through queue
80
120
  }
121
+ const finishMetadata = {
122
+ eventType: "finished",
123
+ ...startMetadata,
124
+ finishTimestamp: new Date(),
125
+ durationInMs: durationMeasurement.durationInMs,
126
+ };
127
+ eventSource.notify({
128
+ ...finishMetadata,
129
+ result: {
130
+ status: "success",
131
+ ...getResult(),
132
+ },
133
+ });
81
134
  }
82
- if (processFinished != null) {
83
- const value = processFinished();
84
- if (value !== undefined) {
85
- responseQueue.push(value);
86
- }
135
+ finally {
136
+ // always close the queue when done, no matter where a potential error happened:
137
+ responseQueue.close();
87
138
  }
88
- responseQueue.close();
89
- const finishMetadata = {
90
- eventType: "finished",
91
- ...startMetadata,
92
- finishTimestamp: new Date(),
93
- durationInMs: durationMeasurement.durationInMs,
94
- };
95
- eventSource.notify({
96
- ...finishMetadata,
97
- result: {
98
- status: "success",
99
- ...getResult(),
100
- },
101
- });
102
139
  })();
103
140
  return responseQueue;
104
141
  });
@@ -4,11 +4,13 @@ exports.generateImage = void 0;
4
4
  const executeStandardCall_js_1 = require("../executeStandardCall.cjs");
5
5
  const ImageGenerationPromise_js_1 = require("./ImageGenerationPromise.cjs");
6
6
  /**
7
- * Generates a base64-encoded image using a prompt.
8
- * The prompt format depends on the model.
9
- * For example, OpenAI image models expect a string prompt,
7
+ * Generates an image using a prompt.
8
+ *
9
+ * The prompt depends on the model. For example, OpenAI image models expect a string prompt,
10
10
  * and Stability AI models expect an array of text prompts with optional weights.
11
11
  *
12
+ * @see https://modelfusion.dev/guide/function/generate-image
13
+ *
12
14
  * @example
13
15
  * const image = await generateImage(
14
16
  * new StabilityImageGenerationModel(...),
@@ -17,6 +19,13 @@ const ImageGenerationPromise_js_1 = require("./ImageGenerationPromise.cjs");
17
19
  * { text: "style of early 19th century painting", weight: 0.5 },
18
20
  * ]
19
21
  * );
22
+ *
23
+ * @param {ImageGenerationModel<PROMPT, ImageGenerationModelSettings>} model - The image generation model to be used.
24
+ * @param {PROMPT} prompt - The prompt to be used for image generation.
25
+ * @param {FunctionOptions} [options] - Optional settings for the function.
26
+ *
27
+ * @returns {ImageGenerationPromise} - Returns a promise that resolves to the generated image.
28
+ * The image is a Buffer containing the image data in PNG format.
20
29
  */
21
30
  function generateImage(model, prompt, options) {
22
31
  return new ImageGenerationPromise_js_1.ImageGenerationPromise((0, executeStandardCall_js_1.executeStandardCall)({
@@ -2,11 +2,13 @@ import { FunctionOptions } from "../../core/FunctionOptions.js";
2
2
  import { ImageGenerationModel, ImageGenerationModelSettings } from "./ImageGenerationModel.js";
3
3
  import { ImageGenerationPromise } from "./ImageGenerationPromise.js";
4
4
  /**
5
- * Generates a base64-encoded image using a prompt.
6
- * The prompt format depends on the model.
7
- * For example, OpenAI image models expect a string prompt,
5
+ * Generates an image using a prompt.
6
+ *
7
+ * The prompt depends on the model. For example, OpenAI image models expect a string prompt,
8
8
  * and Stability AI models expect an array of text prompts with optional weights.
9
9
  *
10
+ * @see https://modelfusion.dev/guide/function/generate-image
11
+ *
10
12
  * @example
11
13
  * const image = await generateImage(
12
14
  * new StabilityImageGenerationModel(...),
@@ -15,5 +17,12 @@ import { ImageGenerationPromise } from "./ImageGenerationPromise.js";
15
17
  * { text: "style of early 19th century painting", weight: 0.5 },
16
18
  * ]
17
19
  * );
20
+ *
21
+ * @param {ImageGenerationModel<PROMPT, ImageGenerationModelSettings>} model - The image generation model to be used.
22
+ * @param {PROMPT} prompt - The prompt to be used for image generation.
23
+ * @param {FunctionOptions} [options] - Optional settings for the function.
24
+ *
25
+ * @returns {ImageGenerationPromise} - Returns a promise that resolves to the generated image.
26
+ * The image is a Buffer containing the image data in PNG format.
18
27
  */
19
28
  export declare function generateImage<PROMPT>(model: ImageGenerationModel<PROMPT, ImageGenerationModelSettings>, prompt: PROMPT, options?: FunctionOptions): ImageGenerationPromise;
@@ -1,11 +1,13 @@
1
1
  import { executeStandardCall } from "../executeStandardCall.js";
2
2
  import { ImageGenerationPromise } from "./ImageGenerationPromise.js";
3
3
  /**
4
- * Generates a base64-encoded image using a prompt.
5
- * The prompt format depends on the model.
6
- * For example, OpenAI image models expect a string prompt,
4
+ * Generates an image using a prompt.
5
+ *
6
+ * The prompt depends on the model. For example, OpenAI image models expect a string prompt,
7
7
  * and Stability AI models expect an array of text prompts with optional weights.
8
8
  *
9
+ * @see https://modelfusion.dev/guide/function/generate-image
10
+ *
9
11
  * @example
10
12
  * const image = await generateImage(
11
13
  * new StabilityImageGenerationModel(...),
@@ -14,6 +16,13 @@ import { ImageGenerationPromise } from "./ImageGenerationPromise.js";
14
16
  * { text: "style of early 19th century painting", weight: 0.5 },
15
17
  * ]
16
18
  * );
19
+ *
20
+ * @param {ImageGenerationModel<PROMPT, ImageGenerationModelSettings>} model - The image generation model to be used.
21
+ * @param {PROMPT} prompt - The prompt to be used for image generation.
22
+ * @param {FunctionOptions} [options] - Optional settings for the function.
23
+ *
24
+ * @returns {ImageGenerationPromise} - Returns a promise that resolves to the generated image.
25
+ * The image is a Buffer containing the image data in PNG format.
17
26
  */
18
27
  export function generateImage(model, prompt, options) {
19
28
  return new ImageGenerationPromise(executeStandardCall({
@@ -4,7 +4,22 @@ exports.generateSpeech = void 0;
4
4
  const ModelFunctionPromise_js_1 = require("../ModelFunctionPromise.cjs");
5
5
  const executeStandardCall_js_1 = require("../executeStandardCall.cjs");
6
6
  /**
7
- * Synthesizes speech from text.
7
+ * Synthesizes speech from text. Also called text-to-speech (TTS).
8
+ *
9
+ * @see https://modelfusion.dev/guide/function/generate-speech
10
+ *
11
+ * @example
12
+ * const speech = await generateSpeech(
13
+ * new LmntSpeechModel(...),
14
+ * "Good evening, ladies and gentlemen! Exciting news on the airwaves tonight " +
15
+ * "as The Rolling Stones unveil 'Hackney Diamonds.'
16
+ * );
17
+ *
18
+ * @param {SpeechGenerationModel<SpeechGenerationModelSettings>} model - The speech generation model.
19
+ * @param {string} text - The text to be converted to speech.
20
+ * @param {FunctionOptions} [options] - Optional function options.
21
+ *
22
+ * @returns {ModelFunctionPromise<Buffer>} - A promise that resolves to a buffer containing the synthesized speech.
8
23
  */
9
24
  function generateSpeech(model, text, options) {
10
25
  return new ModelFunctionPromise_js_1.ModelFunctionPromise((0, executeStandardCall_js_1.executeStandardCall)({
@@ -3,6 +3,21 @@ import { FunctionOptions } from "../../core/FunctionOptions.js";
3
3
  import { ModelFunctionPromise } from "../ModelFunctionPromise.js";
4
4
  import { SpeechGenerationModel, SpeechGenerationModelSettings } from "./SpeechGenerationModel.js";
5
5
  /**
6
- * Synthesizes speech from text.
6
+ * Synthesizes speech from text. Also called text-to-speech (TTS).
7
+ *
8
+ * @see https://modelfusion.dev/guide/function/generate-speech
9
+ *
10
+ * @example
11
+ * const speech = await generateSpeech(
12
+ * new LmntSpeechModel(...),
13
+ * "Good evening, ladies and gentlemen! Exciting news on the airwaves tonight " +
14
+ * "as The Rolling Stones unveil 'Hackney Diamonds.'
15
+ * );
16
+ *
17
+ * @param {SpeechGenerationModel<SpeechGenerationModelSettings>} model - The speech generation model.
18
+ * @param {string} text - The text to be converted to speech.
19
+ * @param {FunctionOptions} [options] - Optional function options.
20
+ *
21
+ * @returns {ModelFunctionPromise<Buffer>} - A promise that resolves to a buffer containing the synthesized speech.
7
22
  */
8
23
  export declare function generateSpeech(model: SpeechGenerationModel<SpeechGenerationModelSettings>, text: string, options?: FunctionOptions): ModelFunctionPromise<Buffer>;
@@ -1,7 +1,22 @@
1
1
  import { ModelFunctionPromise } from "../ModelFunctionPromise.js";
2
2
  import { executeStandardCall } from "../executeStandardCall.js";
3
3
  /**
4
- * Synthesizes speech from text.
4
+ * Synthesizes speech from text. Also called text-to-speech (TTS).
5
+ *
6
+ * @see https://modelfusion.dev/guide/function/generate-speech
7
+ *
8
+ * @example
9
+ * const speech = await generateSpeech(
10
+ * new LmntSpeechModel(...),
11
+ * "Good evening, ladies and gentlemen! Exciting news on the airwaves tonight " +
12
+ * "as The Rolling Stones unveil 'Hackney Diamonds.'
13
+ * );
14
+ *
15
+ * @param {SpeechGenerationModel<SpeechGenerationModelSettings>} model - The speech generation model.
16
+ * @param {string} text - The text to be converted to speech.
17
+ * @param {FunctionOptions} [options] - Optional function options.
18
+ *
19
+ * @returns {ModelFunctionPromise<Buffer>} - A promise that resolves to a buffer containing the synthesized speech.
5
20
  */
6
21
  export function generateSpeech(model, text, options) {
7
22
  return new ModelFunctionPromise(executeStandardCall({
@@ -5,7 +5,28 @@ const AsyncQueue_js_1 = require("../../util/AsyncQueue.cjs");
5
5
  const AsyncIterableResultPromise_js_1 = require("../AsyncIterableResultPromise.cjs");
6
6
  const executeStreamCall_js_1 = require("../executeStreamCall.cjs");
7
7
  /**
8
- * Synthesizes speech from text.
8
+ * Stream synthesized speech from text. Also called text-to-speech (TTS).
9
+ * Duplex streaming where both the input and output are streamed is supported.
10
+ *
11
+ * @see https://modelfusion.dev/guide/function/generate-speech
12
+ *
13
+ * @example
14
+ * const textStream = await streamText(...);
15
+ *
16
+ * const speechStream = await streamSpeech(
17
+ * new ElevenLabsSpeechModel(...),
18
+ * textStream
19
+ * );
20
+ *
21
+ * for await (const speechPart of speechStream) {
22
+ * // ...
23
+ * }
24
+ *
25
+ * @param {StreamingSpeechGenerationModel<SpeechGenerationModelSettings>} model - The speech generation model.
26
+ * @param {AsyncIterable<string> | string} text - The text to be converted to speech. Can be a string or an async iterable of strings.
27
+ * @param {FunctionOptions} [options] - Optional function options.
28
+ *
29
+ * @returns {AsyncIterableResultPromise<Buffer>} An async iterable promise that contains the synthesized speech chunks.
9
30
  */
10
31
  function streamSpeech(model, text, options) {
11
32
  let textStream;
@@ -3,6 +3,27 @@ import { FunctionOptions } from "../../core/FunctionOptions.js";
3
3
  import { AsyncIterableResultPromise } from "../AsyncIterableResultPromise.js";
4
4
  import { StreamingSpeechGenerationModel, SpeechGenerationModelSettings } from "./SpeechGenerationModel.js";
5
5
  /**
6
- * Synthesizes speech from text.
6
+ * Stream synthesized speech from text. Also called text-to-speech (TTS).
7
+ * Duplex streaming where both the input and output are streamed is supported.
8
+ *
9
+ * @see https://modelfusion.dev/guide/function/generate-speech
10
+ *
11
+ * @example
12
+ * const textStream = await streamText(...);
13
+ *
14
+ * const speechStream = await streamSpeech(
15
+ * new ElevenLabsSpeechModel(...),
16
+ * textStream
17
+ * );
18
+ *
19
+ * for await (const speechPart of speechStream) {
20
+ * // ...
21
+ * }
22
+ *
23
+ * @param {StreamingSpeechGenerationModel<SpeechGenerationModelSettings>} model - The speech generation model.
24
+ * @param {AsyncIterable<string> | string} text - The text to be converted to speech. Can be a string or an async iterable of strings.
25
+ * @param {FunctionOptions} [options] - Optional function options.
26
+ *
27
+ * @returns {AsyncIterableResultPromise<Buffer>} An async iterable promise that contains the synthesized speech chunks.
7
28
  */
8
29
  export declare function streamSpeech(model: StreamingSpeechGenerationModel<SpeechGenerationModelSettings>, text: AsyncIterable<string> | string, options?: FunctionOptions): AsyncIterableResultPromise<Buffer>;
@@ -2,7 +2,28 @@ import { AsyncQueue } from "../../util/AsyncQueue.js";
2
2
  import { AsyncIterableResultPromise } from "../AsyncIterableResultPromise.js";
3
3
  import { executeStreamCall } from "../executeStreamCall.js";
4
4
  /**
5
- * Synthesizes speech from text.
5
+ * Stream synthesized speech from text. Also called text-to-speech (TTS).
6
+ * Duplex streaming where both the input and output are streamed is supported.
7
+ *
8
+ * @see https://modelfusion.dev/guide/function/generate-speech
9
+ *
10
+ * @example
11
+ * const textStream = await streamText(...);
12
+ *
13
+ * const speechStream = await streamSpeech(
14
+ * new ElevenLabsSpeechModel(...),
15
+ * textStream
16
+ * );
17
+ *
18
+ * for await (const speechPart of speechStream) {
19
+ * // ...
20
+ * }
21
+ *
22
+ * @param {StreamingSpeechGenerationModel<SpeechGenerationModelSettings>} model - The speech generation model.
23
+ * @param {AsyncIterable<string> | string} text - The text to be converted to speech. Can be a string or an async iterable of strings.
24
+ * @param {FunctionOptions} [options] - Optional function options.
25
+ *
26
+ * @returns {AsyncIterableResultPromise<Buffer>} An async iterable promise that contains the synthesized speech chunks.
6
27
  */
7
28
  export function streamSpeech(model, text, options) {
8
29
  let textStream;