modelfusion 0.50.0 → 0.52.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +8 -8
- package/core/api/loadApiKey.cjs +2 -2
- package/core/api/loadApiKey.js +2 -2
- package/event-source/index.cjs +0 -1
- package/event-source/index.d.ts +0 -1
- package/event-source/index.js +0 -1
- package/event-source/readEventSourceStream.cjs +1 -1
- package/event-source/readEventSourceStream.js +1 -1
- package/model-function/executeStreamCall.cjs +9 -3
- package/model-function/executeStreamCall.js +9 -3
- package/model-function/generate-speech/streamSpeech.cjs +1 -1
- package/model-function/generate-speech/streamSpeech.js +1 -1
- package/model-provider/anthropic/AnthropicTextGenerationModel.cjs +1 -1
- package/model-provider/anthropic/AnthropicTextGenerationModel.js +1 -1
- package/model-provider/cohere/CohereTextGenerationModel.cjs +1 -1
- package/model-provider/cohere/CohereTextGenerationModel.js +1 -1
- package/model-provider/elevenlabs/ElevenLabsError.cjs +0 -1
- package/model-provider/elevenlabs/ElevenLabsError.js +0 -1
- package/model-provider/elevenlabs/ElevenLabsSpeechModel.cjs +34 -6
- package/model-provider/elevenlabs/ElevenLabsSpeechModel.d.ts +6 -1
- package/model-provider/elevenlabs/ElevenLabsSpeechModel.js +34 -6
- package/model-provider/llamacpp/LlamaCppTextGenerationModel.cjs +1 -1
- package/model-provider/llamacpp/LlamaCppTextGenerationModel.js +1 -1
- package/model-provider/lmnt/LmntError.cjs +0 -1
- package/model-provider/lmnt/LmntError.js +0 -1
- package/model-provider/openai/OpenAICompletionModel.cjs +1 -1
- package/model-provider/openai/OpenAICompletionModel.js +1 -1
- package/model-provider/openai/chat/OpenAIChatStreamIterable.cjs +1 -1
- package/model-provider/openai/chat/OpenAIChatStreamIterable.js +1 -1
- package/package.json +2 -2
- package/util/AsyncQueue.cjs +106 -0
- package/util/AsyncQueue.d.ts +49 -0
- package/util/AsyncQueue.js +102 -0
- package/util/AsyncQueue.test.cjs +138 -0
- package/util/AsyncQueue.test.d.ts +1 -0
- package/util/AsyncQueue.test.js +136 -0
- package/util/delay.cjs +7 -0
- package/util/delay.d.ts +1 -0
- package/util/delay.js +3 -0
- package/util/index.cjs +1 -0
- package/util/index.d.ts +1 -0
- package/util/index.js +1 -0
- package/event-source/AsyncQueue.cjs +0 -67
- package/event-source/AsyncQueue.d.ts +0 -8
- package/event-source/AsyncQueue.js +0 -63
package/README.md
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
# ModelFusion
|
2
2
|
|
3
|
-
> ###
|
3
|
+
> ### The TypeScript library for building multi-modal AI applications.
|
4
4
|
|
5
5
|
[](https://www.npmjs.com/package/modelfusion)
|
6
6
|
[](https://opensource.org/licenses/MIT)
|
@@ -10,12 +10,9 @@
|
|
10
10
|
|
11
11
|
[Introduction](#introduction) | [Quick Install](#quick-install) | [Usage](#usage-examples) | [Documentation](#documentation) | [Examples](#more-examples) | [Contributing](#contributing) | [modelfusion.dev](https://modelfusion.dev)
|
12
12
|
|
13
|
-
> [!NOTE]
|
14
|
-
> ModelFusion is in its initial development phase. Until version 1.0 there may be breaking changes, because I am still exploring the API design. Feedback and suggestions are welcome.
|
15
|
-
|
16
13
|
## Introduction
|
17
14
|
|
18
|
-
ModelFusion is a library for building AI applications, chatbots, and agents.
|
15
|
+
**ModelFusion** is a TypeScript library for building AI applications, chatbots, and agents.
|
19
16
|
|
20
17
|
- **Multimodal**: ModelFusion supports a wide range of models including text generation, image generation, text-to-speech, speech-to-text, and embedding models.
|
21
18
|
- **Streaming**: ModelFusion supports streaming for many generation models, e.g. text streaming, structure streaming, and full duplex speech streaming.
|
@@ -26,6 +23,9 @@ ModelFusion is a library for building AI applications, chatbots, and agents. Her
|
|
26
23
|
|
27
24
|
## Quick Install
|
28
25
|
|
26
|
+
> [!NOTE]
|
27
|
+
> ModelFusion is in its initial development phase. The main API is now mostly stable, but until version 1.0 there may be minor breaking changes. Feedback and suggestions are welcome.
|
28
|
+
|
29
29
|
```sh
|
30
30
|
npm install modelfusion
|
31
31
|
```
|
@@ -65,8 +65,8 @@ const textStream = await streamText(
|
|
65
65
|
"Write a short story about a robot learning to love:\n\n"
|
66
66
|
);
|
67
67
|
|
68
|
-
for await (const
|
69
|
-
process.stdout.write(
|
68
|
+
for await (const textPart of textStream) {
|
69
|
+
process.stdout.write(textPart);
|
70
70
|
}
|
71
71
|
```
|
72
72
|
|
@@ -118,7 +118,7 @@ const textStream = await streamText(/* ... */);
|
|
118
118
|
const speechStream = await streamSpeech(
|
119
119
|
new ElevenLabsSpeechModel({
|
120
120
|
voice: "pNInz6obpgDQGcFmaJgB", // Adam
|
121
|
-
|
121
|
+
optimizeStreamingLatency: 1,
|
122
122
|
voiceSettings: { stability: 1, similarityBoost: 0.35 },
|
123
123
|
generationConfig: {
|
124
124
|
chunkLengthSchedule: [50, 90, 120, 150, 200],
|
package/core/api/loadApiKey.cjs
CHANGED
@@ -6,11 +6,11 @@ function loadApiKey({ apiKey, environmentVariableName, apiKeyParameterName = "ap
|
|
6
6
|
return apiKey;
|
7
7
|
}
|
8
8
|
if (typeof process === "undefined") {
|
9
|
-
throw new Error(`${description} API key is missing. Pass it using the '${apiKeyParameterName}' parameter. Environment variables is not supported in this environment.`);
|
9
|
+
throw new Error(`${description} API key is missing. Pass it using the '${apiKeyParameterName}' parameter into the API configuration. Environment variables is not supported in this environment.`);
|
10
10
|
}
|
11
11
|
apiKey = process.env[environmentVariableName];
|
12
12
|
if (apiKey == null) {
|
13
|
-
throw new Error(`${description} API key is missing. Pass it using the '${apiKeyParameterName}' parameter or set it as an environment variable named ${environmentVariableName}.`);
|
13
|
+
throw new Error(`${description} API key is missing. Pass it using the '${apiKeyParameterName}' parameter into the API configuration or set it as an environment variable named ${environmentVariableName}.`);
|
14
14
|
}
|
15
15
|
return apiKey;
|
16
16
|
}
|
package/core/api/loadApiKey.js
CHANGED
@@ -3,11 +3,11 @@ export function loadApiKey({ apiKey, environmentVariableName, apiKeyParameterNam
|
|
3
3
|
return apiKey;
|
4
4
|
}
|
5
5
|
if (typeof process === "undefined") {
|
6
|
-
throw new Error(`${description} API key is missing. Pass it using the '${apiKeyParameterName}' parameter. Environment variables is not supported in this environment.`);
|
6
|
+
throw new Error(`${description} API key is missing. Pass it using the '${apiKeyParameterName}' parameter into the API configuration. Environment variables is not supported in this environment.`);
|
7
7
|
}
|
8
8
|
apiKey = process.env[environmentVariableName];
|
9
9
|
if (apiKey == null) {
|
10
|
-
throw new Error(`${description} API key is missing. Pass it using the '${apiKeyParameterName}' parameter or set it as an environment variable named ${environmentVariableName}.`);
|
10
|
+
throw new Error(`${description} API key is missing. Pass it using the '${apiKeyParameterName}' parameter into the API configuration or set it as an environment variable named ${environmentVariableName}.`);
|
11
11
|
}
|
12
12
|
return apiKey;
|
13
13
|
}
|
package/event-source/index.cjs
CHANGED
@@ -14,7 +14,6 @@ var __exportStar = (this && this.__exportStar) || function(m, exports) {
|
|
14
14
|
for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
|
15
15
|
};
|
16
16
|
Object.defineProperty(exports, "__esModule", { value: true });
|
17
|
-
__exportStar(require("./AsyncQueue.cjs"), exports);
|
18
17
|
__exportStar(require("./createEventSourceStream.cjs"), exports);
|
19
18
|
__exportStar(require("./readEventSource.cjs"), exports);
|
20
19
|
__exportStar(require("./readEventSourceStream.cjs"), exports);
|
package/event-source/index.d.ts
CHANGED
package/event-source/index.js
CHANGED
@@ -2,7 +2,7 @@
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
3
3
|
exports.readEventSourceStream = void 0;
|
4
4
|
const parseJSON_js_1 = require("../util/parseJSON.cjs");
|
5
|
-
const AsyncQueue_js_1 = require("
|
5
|
+
const AsyncQueue_js_1 = require("../util/AsyncQueue.cjs");
|
6
6
|
const parseEventSourceStream_js_1 = require("./parseEventSourceStream.cjs");
|
7
7
|
function readEventSourceStream({ stream, schema, errorHandler, }) {
|
8
8
|
const queue = new AsyncQueue_js_1.AsyncQueue();
|
@@ -1,5 +1,5 @@
|
|
1
1
|
import { safeParseJsonWithSchema } from "../util/parseJSON.js";
|
2
|
-
import { AsyncQueue } from "
|
2
|
+
import { AsyncQueue } from "../util/AsyncQueue.js";
|
3
3
|
import { parseEventSourceStream } from "./parseEventSourceStream.js";
|
4
4
|
export function readEventSourceStream({ stream, schema, errorHandler, }) {
|
5
5
|
const queue = new AsyncQueue();
|
@@ -8,6 +8,7 @@ const GlobalFunctionObservers_js_1 = require("../core/GlobalFunctionObservers.cj
|
|
8
8
|
const AbortError_js_1 = require("../core/api/AbortError.cjs");
|
9
9
|
const getFunctionCallLogger_js_1 = require("../core/getFunctionCallLogger.cjs");
|
10
10
|
const getRun_js_1 = require("../core/getRun.cjs");
|
11
|
+
const AsyncQueue_js_1 = require("../util/AsyncQueue.cjs");
|
11
12
|
const DurationMeasurement_js_1 = require("../util/DurationMeasurement.cjs");
|
12
13
|
const runSafe_js_1 = require("../util/runSafe.cjs");
|
13
14
|
async function executeStreamCall({ model, options, input, functionType, startStream, processDelta, processFinished, getResult, }) {
|
@@ -50,7 +51,10 @@ async function executeStreamCall({ model, options, input, functionType, startStr
|
|
50
51
|
run,
|
51
52
|
parentCallId: startMetadata.callId,
|
52
53
|
});
|
53
|
-
|
54
|
+
// Return a queue that can be iterated over several times:
|
55
|
+
const responseQueue = new AsyncQueue_js_1.AsyncQueue();
|
56
|
+
// run async:
|
57
|
+
(async function () {
|
54
58
|
for await (const event of deltaIterable) {
|
55
59
|
if (event?.type === "error") {
|
56
60
|
const error = event.error;
|
@@ -74,16 +78,17 @@ async function executeStreamCall({ model, options, input, functionType, startStr
|
|
74
78
|
if (event?.type === "delta") {
|
75
79
|
const value = processDelta(event);
|
76
80
|
if (value !== undefined) {
|
77
|
-
|
81
|
+
responseQueue.push(value);
|
78
82
|
}
|
79
83
|
}
|
80
84
|
}
|
81
85
|
if (processFinished != null) {
|
82
86
|
const value = processFinished();
|
83
87
|
if (value !== undefined) {
|
84
|
-
|
88
|
+
responseQueue.push(value);
|
85
89
|
}
|
86
90
|
}
|
91
|
+
responseQueue.close();
|
87
92
|
const finishMetadata = {
|
88
93
|
eventType: "finished",
|
89
94
|
...startMetadata,
|
@@ -98,6 +103,7 @@ async function executeStreamCall({ model, options, input, functionType, startStr
|
|
98
103
|
},
|
99
104
|
});
|
100
105
|
})();
|
106
|
+
return responseQueue;
|
101
107
|
});
|
102
108
|
if (!result.ok) {
|
103
109
|
const finishMetadata = {
|
@@ -5,6 +5,7 @@ import { getGlobalFunctionObservers } from "../core/GlobalFunctionObservers.js";
|
|
5
5
|
import { AbortError } from "../core/api/AbortError.js";
|
6
6
|
import { getFunctionCallLogger } from "../core/getFunctionCallLogger.js";
|
7
7
|
import { getRun } from "../core/getRun.js";
|
8
|
+
import { AsyncQueue } from "../util/AsyncQueue.js";
|
8
9
|
import { startDurationMeasurement } from "../util/DurationMeasurement.js";
|
9
10
|
import { runSafe } from "../util/runSafe.js";
|
10
11
|
export async function executeStreamCall({ model, options, input, functionType, startStream, processDelta, processFinished, getResult, }) {
|
@@ -47,7 +48,10 @@ export async function executeStreamCall({ model, options, input, functionType, s
|
|
47
48
|
run,
|
48
49
|
parentCallId: startMetadata.callId,
|
49
50
|
});
|
50
|
-
|
51
|
+
// Return a queue that can be iterated over several times:
|
52
|
+
const responseQueue = new AsyncQueue();
|
53
|
+
// run async:
|
54
|
+
(async function () {
|
51
55
|
for await (const event of deltaIterable) {
|
52
56
|
if (event?.type === "error") {
|
53
57
|
const error = event.error;
|
@@ -71,16 +75,17 @@ export async function executeStreamCall({ model, options, input, functionType, s
|
|
71
75
|
if (event?.type === "delta") {
|
72
76
|
const value = processDelta(event);
|
73
77
|
if (value !== undefined) {
|
74
|
-
|
78
|
+
responseQueue.push(value);
|
75
79
|
}
|
76
80
|
}
|
77
81
|
}
|
78
82
|
if (processFinished != null) {
|
79
83
|
const value = processFinished();
|
80
84
|
if (value !== undefined) {
|
81
|
-
|
85
|
+
responseQueue.push(value);
|
82
86
|
}
|
83
87
|
}
|
88
|
+
responseQueue.close();
|
84
89
|
const finishMetadata = {
|
85
90
|
eventType: "finished",
|
86
91
|
...startMetadata,
|
@@ -95,6 +100,7 @@ export async function executeStreamCall({ model, options, input, functionType, s
|
|
95
100
|
},
|
96
101
|
});
|
97
102
|
})();
|
103
|
+
return responseQueue;
|
98
104
|
});
|
99
105
|
if (!result.ok) {
|
100
106
|
const finishMetadata = {
|
@@ -1,7 +1,7 @@
|
|
1
1
|
"use strict";
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
3
3
|
exports.streamSpeech = void 0;
|
4
|
-
const AsyncQueue_js_1 = require("../../
|
4
|
+
const AsyncQueue_js_1 = require("../../util/AsyncQueue.cjs");
|
5
5
|
const AsyncIterableResultPromise_js_1 = require("../AsyncIterableResultPromise.cjs");
|
6
6
|
const executeStreamCall_js_1 = require("../executeStreamCall.cjs");
|
7
7
|
/**
|
@@ -4,10 +4,10 @@ exports.AnthropicTextGenerationResponseFormat = exports.AnthropicTextGenerationM
|
|
4
4
|
const zod_1 = require("zod");
|
5
5
|
const callWithRetryAndThrottle_js_1 = require("../../core/api/callWithRetryAndThrottle.cjs");
|
6
6
|
const postToApi_js_1 = require("../../core/api/postToApi.cjs");
|
7
|
-
const AsyncQueue_js_1 = require("../../event-source/AsyncQueue.cjs");
|
8
7
|
const parseEventSourceStream_js_1 = require("../../event-source/parseEventSourceStream.cjs");
|
9
8
|
const AbstractModel_js_1 = require("../../model-function/AbstractModel.cjs");
|
10
9
|
const PromptFormatTextStreamingModel_js_1 = require("../../model-function/generate-text/PromptFormatTextStreamingModel.cjs");
|
10
|
+
const AsyncQueue_js_1 = require("../../util/AsyncQueue.cjs");
|
11
11
|
const parseJSON_js_1 = require("../../util/parseJSON.cjs");
|
12
12
|
const AnthropicApiConfiguration_js_1 = require("./AnthropicApiConfiguration.cjs");
|
13
13
|
const AnthropicError_js_1 = require("./AnthropicError.cjs");
|
@@ -1,10 +1,10 @@
|
|
1
1
|
import { z } from "zod";
|
2
2
|
import { callWithRetryAndThrottle } from "../../core/api/callWithRetryAndThrottle.js";
|
3
3
|
import { createJsonResponseHandler, postJsonToApi, } from "../../core/api/postToApi.js";
|
4
|
-
import { AsyncQueue } from "../../event-source/AsyncQueue.js";
|
5
4
|
import { parseEventSourceStream } from "../../event-source/parseEventSourceStream.js";
|
6
5
|
import { AbstractModel } from "../../model-function/AbstractModel.js";
|
7
6
|
import { PromptFormatTextStreamingModel } from "../../model-function/generate-text/PromptFormatTextStreamingModel.js";
|
7
|
+
import { AsyncQueue } from "../../util/AsyncQueue.js";
|
8
8
|
import { parseJsonWithZod } from "../../util/parseJSON.js";
|
9
9
|
import { AnthropicApiConfiguration } from "./AnthropicApiConfiguration.js";
|
10
10
|
import { failedAnthropicCallResponseHandler } from "./AnthropicError.js";
|
@@ -4,7 +4,7 @@ exports.CohereTextGenerationResponseFormat = exports.CohereTextGenerationModel =
|
|
4
4
|
const zod_1 = require("zod");
|
5
5
|
const callWithRetryAndThrottle_js_1 = require("../../core/api/callWithRetryAndThrottle.cjs");
|
6
6
|
const postToApi_js_1 = require("../../core/api/postToApi.cjs");
|
7
|
-
const AsyncQueue_js_1 = require("../../
|
7
|
+
const AsyncQueue_js_1 = require("../../util/AsyncQueue.cjs");
|
8
8
|
const AbstractModel_js_1 = require("../../model-function/AbstractModel.cjs");
|
9
9
|
const PromptFormatTextStreamingModel_js_1 = require("../../model-function/generate-text/PromptFormatTextStreamingModel.cjs");
|
10
10
|
const TextPromptFormat_js_1 = require("../../model-function/generate-text/TextPromptFormat.cjs");
|
@@ -1,7 +1,7 @@
|
|
1
1
|
import { z } from "zod";
|
2
2
|
import { callWithRetryAndThrottle } from "../../core/api/callWithRetryAndThrottle.js";
|
3
3
|
import { createJsonResponseHandler, postJsonToApi, } from "../../core/api/postToApi.js";
|
4
|
-
import { AsyncQueue } from "../../
|
4
|
+
import { AsyncQueue } from "../../util/AsyncQueue.js";
|
5
5
|
import { AbstractModel } from "../../model-function/AbstractModel.js";
|
6
6
|
import { PromptFormatTextStreamingModel } from "../../model-function/generate-text/PromptFormatTextStreamingModel.js";
|
7
7
|
import { mapChatPromptToTextFormat, mapInstructionPromptToTextFormat, } from "../../model-function/generate-text/TextPromptFormat.js";
|
@@ -5,7 +5,6 @@ const ApiCallError_js_1 = require("../../core/api/ApiCallError.cjs");
|
|
5
5
|
const failedElevenLabsCallResponseHandler = async ({ response, url, requestBodyValues }) => {
|
6
6
|
const responseBody = await response.text();
|
7
7
|
try {
|
8
|
-
// TODO implement ElevenLabsError
|
9
8
|
return new ApiCallError_js_1.ApiCallError({
|
10
9
|
message: responseBody,
|
11
10
|
statusCode: response.status,
|
@@ -2,7 +2,6 @@ import { ApiCallError } from "../../core/api/ApiCallError.js";
|
|
2
2
|
export const failedElevenLabsCallResponseHandler = async ({ response, url, requestBodyValues }) => {
|
3
3
|
const responseBody = await response.text();
|
4
4
|
try {
|
5
|
-
// TODO implement ElevenLabsError
|
6
5
|
return new ApiCallError({
|
7
6
|
message: responseBody,
|
8
7
|
statusCode: response.status,
|
@@ -4,7 +4,7 @@ exports.ElevenLabsSpeechModel = void 0;
|
|
4
4
|
const zod_1 = require("zod");
|
5
5
|
const callWithRetryAndThrottle_js_1 = require("../../core/api/callWithRetryAndThrottle.cjs");
|
6
6
|
const postToApi_js_1 = require("../../core/api/postToApi.cjs");
|
7
|
-
const AsyncQueue_js_1 = require("../../
|
7
|
+
const AsyncQueue_js_1 = require("../../util/AsyncQueue.cjs");
|
8
8
|
const AbstractModel_js_1 = require("../../model-function/AbstractModel.cjs");
|
9
9
|
const SimpleWebSocket_js_1 = require("../../util/SimpleWebSocket.cjs");
|
10
10
|
const parseJSON_js_1 = require("../../util/parseJSON.cjs");
|
@@ -15,11 +15,14 @@ const elevenLabsModels = [
|
|
15
15
|
"eleven_multilingual_v1",
|
16
16
|
"eleven_monolingual_v1",
|
17
17
|
];
|
18
|
-
const defaultModel = "
|
18
|
+
const defaultModel = "eleven_monolingual_v1";
|
19
19
|
/**
|
20
20
|
* Synthesize speech using the ElevenLabs Text to Speech API.
|
21
21
|
*
|
22
|
-
*
|
22
|
+
* Both regular text-to-speech and full duplex text-to-speech streaming are supported.
|
23
|
+
*
|
24
|
+
* @see https://docs.elevenlabs.io/api-reference/text-to-speech
|
25
|
+
* @see https://docs.elevenlabs.io/api-reference/text-to-speech-websockets
|
23
26
|
*/
|
24
27
|
class ElevenLabsSpeechModel extends AbstractModel_js_1.AbstractModel {
|
25
28
|
constructor(settings) {
|
@@ -84,7 +87,11 @@ class ElevenLabsSpeechModel extends AbstractModel_js_1.AbstractModel {
|
|
84
87
|
]);
|
85
88
|
const queue = new AsyncQueue_js_1.AsyncQueue();
|
86
89
|
const model = this.settings.model ?? defaultModel;
|
87
|
-
const socket = await (0, SimpleWebSocket_js_1.createSimpleWebSocket)(`wss://api.elevenlabs.io/v1/text-to-speech/${this.settings.voice}/stream-input
|
90
|
+
const socket = await (0, SimpleWebSocket_js_1.createSimpleWebSocket)(`wss://api.elevenlabs.io/v1/text-to-speech/${this.settings.voice}/stream-input${assembleQuery({
|
91
|
+
model_id: model,
|
92
|
+
optimize_streaming_latency: this.settings.optimizeStreamingLatency,
|
93
|
+
output_format: this.settings.outputFormat,
|
94
|
+
})}`);
|
88
95
|
socket.onopen = async () => {
|
89
96
|
const api = this.settings.api ?? new ElevenLabsApiConfiguration_js_1.ElevenLabsApiConfiguration();
|
90
97
|
// send begin-of-stream (BOS) message:
|
@@ -158,9 +165,12 @@ class ElevenLabsSpeechModel extends AbstractModel_js_1.AbstractModel {
|
|
158
165
|
}
|
159
166
|
}
|
160
167
|
exports.ElevenLabsSpeechModel = ElevenLabsSpeechModel;
|
161
|
-
async function callElevenLabsTextToSpeechAPI({ api = new ElevenLabsApiConfiguration_js_1.ElevenLabsApiConfiguration(), abortSignal, text, voiceId, modelId, voiceSettings, }) {
|
168
|
+
async function callElevenLabsTextToSpeechAPI({ api = new ElevenLabsApiConfiguration_js_1.ElevenLabsApiConfiguration(), abortSignal, text, voiceId, modelId, optimizeStreamingLatency, outputFormat, voiceSettings, }) {
|
162
169
|
return (0, postToApi_js_1.postJsonToApi)({
|
163
|
-
url: api.assembleUrl(`/text-to-speech/${voiceId}
|
170
|
+
url: api.assembleUrl(`/text-to-speech/${voiceId}${assembleQuery({
|
171
|
+
optimize_streaming_latency: optimizeStreamingLatency,
|
172
|
+
output_format: outputFormat,
|
173
|
+
})}`),
|
164
174
|
headers: api.headers,
|
165
175
|
body: {
|
166
176
|
text,
|
@@ -172,6 +182,24 @@ async function callElevenLabsTextToSpeechAPI({ api = new ElevenLabsApiConfigurat
|
|
172
182
|
abortSignal,
|
173
183
|
});
|
174
184
|
}
|
185
|
+
function assembleQuery(parameters) {
|
186
|
+
let query = "";
|
187
|
+
let hasQuestionMark = false;
|
188
|
+
for (const [key, value] of Object.entries(parameters)) {
|
189
|
+
if (value == null) {
|
190
|
+
continue;
|
191
|
+
}
|
192
|
+
if (!hasQuestionMark) {
|
193
|
+
query += "?";
|
194
|
+
hasQuestionMark = true;
|
195
|
+
}
|
196
|
+
else {
|
197
|
+
query += "&";
|
198
|
+
}
|
199
|
+
query += `${key}=${value}`;
|
200
|
+
}
|
201
|
+
return query;
|
202
|
+
}
|
175
203
|
function toApiVoiceSettings(voiceSettings) {
|
176
204
|
return voiceSettings != null
|
177
205
|
? {
|
@@ -11,6 +11,8 @@ export interface ElevenLabsSpeechModelSettings extends SpeechGenerationModelSett
|
|
11
11
|
};
|
12
12
|
voice: string;
|
13
13
|
model?: (typeof elevenLabsModels)[number] | (string & {});
|
14
|
+
optimizeStreamingLatency?: 0 | 1 | 2 | 3 | 4;
|
15
|
+
outputFormat?: "mp3_44100" | "pcm_16000" | "pcm_22050" | "pcm_24000" | "pcm_44100";
|
14
16
|
voiceSettings?: {
|
15
17
|
stability: number;
|
16
18
|
similarityBoost: number;
|
@@ -24,7 +26,10 @@ export interface ElevenLabsSpeechModelSettings extends SpeechGenerationModelSett
|
|
24
26
|
/**
|
25
27
|
* Synthesize speech using the ElevenLabs Text to Speech API.
|
26
28
|
*
|
27
|
-
*
|
29
|
+
* Both regular text-to-speech and full duplex text-to-speech streaming are supported.
|
30
|
+
*
|
31
|
+
* @see https://docs.elevenlabs.io/api-reference/text-to-speech
|
32
|
+
* @see https://docs.elevenlabs.io/api-reference/text-to-speech-websockets
|
28
33
|
*/
|
29
34
|
export declare class ElevenLabsSpeechModel extends AbstractModel<ElevenLabsSpeechModelSettings> implements StreamingSpeechGenerationModel<ElevenLabsSpeechModelSettings> {
|
30
35
|
constructor(settings: ElevenLabsSpeechModelSettings);
|
@@ -1,7 +1,7 @@
|
|
1
1
|
import { z } from "zod";
|
2
2
|
import { callWithRetryAndThrottle } from "../../core/api/callWithRetryAndThrottle.js";
|
3
3
|
import { createAudioMpegResponseHandler, postJsonToApi, } from "../../core/api/postToApi.js";
|
4
|
-
import { AsyncQueue } from "../../
|
4
|
+
import { AsyncQueue } from "../../util/AsyncQueue.js";
|
5
5
|
import { AbstractModel } from "../../model-function/AbstractModel.js";
|
6
6
|
import { createSimpleWebSocket } from "../../util/SimpleWebSocket.js";
|
7
7
|
import { safeParseJsonWithZod } from "../../util/parseJSON.js";
|
@@ -12,11 +12,14 @@ const elevenLabsModels = [
|
|
12
12
|
"eleven_multilingual_v1",
|
13
13
|
"eleven_monolingual_v1",
|
14
14
|
];
|
15
|
-
const defaultModel = "
|
15
|
+
const defaultModel = "eleven_monolingual_v1";
|
16
16
|
/**
|
17
17
|
* Synthesize speech using the ElevenLabs Text to Speech API.
|
18
18
|
*
|
19
|
-
*
|
19
|
+
* Both regular text-to-speech and full duplex text-to-speech streaming are supported.
|
20
|
+
*
|
21
|
+
* @see https://docs.elevenlabs.io/api-reference/text-to-speech
|
22
|
+
* @see https://docs.elevenlabs.io/api-reference/text-to-speech-websockets
|
20
23
|
*/
|
21
24
|
export class ElevenLabsSpeechModel extends AbstractModel {
|
22
25
|
constructor(settings) {
|
@@ -81,7 +84,11 @@ export class ElevenLabsSpeechModel extends AbstractModel {
|
|
81
84
|
]);
|
82
85
|
const queue = new AsyncQueue();
|
83
86
|
const model = this.settings.model ?? defaultModel;
|
84
|
-
const socket = await createSimpleWebSocket(`wss://api.elevenlabs.io/v1/text-to-speech/${this.settings.voice}/stream-input
|
87
|
+
const socket = await createSimpleWebSocket(`wss://api.elevenlabs.io/v1/text-to-speech/${this.settings.voice}/stream-input${assembleQuery({
|
88
|
+
model_id: model,
|
89
|
+
optimize_streaming_latency: this.settings.optimizeStreamingLatency,
|
90
|
+
output_format: this.settings.outputFormat,
|
91
|
+
})}`);
|
85
92
|
socket.onopen = async () => {
|
86
93
|
const api = this.settings.api ?? new ElevenLabsApiConfiguration();
|
87
94
|
// send begin-of-stream (BOS) message:
|
@@ -154,9 +161,12 @@ export class ElevenLabsSpeechModel extends AbstractModel {
|
|
154
161
|
});
|
155
162
|
}
|
156
163
|
}
|
157
|
-
async function callElevenLabsTextToSpeechAPI({ api = new ElevenLabsApiConfiguration(), abortSignal, text, voiceId, modelId, voiceSettings, }) {
|
164
|
+
async function callElevenLabsTextToSpeechAPI({ api = new ElevenLabsApiConfiguration(), abortSignal, text, voiceId, modelId, optimizeStreamingLatency, outputFormat, voiceSettings, }) {
|
158
165
|
return postJsonToApi({
|
159
|
-
url: api.assembleUrl(`/text-to-speech/${voiceId}
|
166
|
+
url: api.assembleUrl(`/text-to-speech/${voiceId}${assembleQuery({
|
167
|
+
optimize_streaming_latency: optimizeStreamingLatency,
|
168
|
+
output_format: outputFormat,
|
169
|
+
})}`),
|
160
170
|
headers: api.headers,
|
161
171
|
body: {
|
162
172
|
text,
|
@@ -168,6 +178,24 @@ async function callElevenLabsTextToSpeechAPI({ api = new ElevenLabsApiConfigurat
|
|
168
178
|
abortSignal,
|
169
179
|
});
|
170
180
|
}
|
181
|
+
function assembleQuery(parameters) {
|
182
|
+
let query = "";
|
183
|
+
let hasQuestionMark = false;
|
184
|
+
for (const [key, value] of Object.entries(parameters)) {
|
185
|
+
if (value == null) {
|
186
|
+
continue;
|
187
|
+
}
|
188
|
+
if (!hasQuestionMark) {
|
189
|
+
query += "?";
|
190
|
+
hasQuestionMark = true;
|
191
|
+
}
|
192
|
+
else {
|
193
|
+
query += "&";
|
194
|
+
}
|
195
|
+
query += `${key}=${value}`;
|
196
|
+
}
|
197
|
+
return query;
|
198
|
+
}
|
171
199
|
function toApiVoiceSettings(voiceSettings) {
|
172
200
|
return voiceSettings != null
|
173
201
|
? {
|
@@ -4,7 +4,7 @@ exports.LlamaCppTextGenerationResponseFormat = exports.LlamaCppTextGenerationMod
|
|
4
4
|
const zod_1 = require("zod");
|
5
5
|
const callWithRetryAndThrottle_js_1 = require("../../core/api/callWithRetryAndThrottle.cjs");
|
6
6
|
const postToApi_js_1 = require("../../core/api/postToApi.cjs");
|
7
|
-
const AsyncQueue_js_1 = require("../../
|
7
|
+
const AsyncQueue_js_1 = require("../../util/AsyncQueue.cjs");
|
8
8
|
const parseEventSourceStream_js_1 = require("../../event-source/parseEventSourceStream.cjs");
|
9
9
|
const AbstractModel_js_1 = require("../../model-function/AbstractModel.cjs");
|
10
10
|
const PromptFormatTextStreamingModel_js_1 = require("../../model-function/generate-text/PromptFormatTextStreamingModel.cjs");
|
@@ -1,7 +1,7 @@
|
|
1
1
|
import { z } from "zod";
|
2
2
|
import { callWithRetryAndThrottle } from "../../core/api/callWithRetryAndThrottle.js";
|
3
3
|
import { createJsonResponseHandler, postJsonToApi, } from "../../core/api/postToApi.js";
|
4
|
-
import { AsyncQueue } from "../../
|
4
|
+
import { AsyncQueue } from "../../util/AsyncQueue.js";
|
5
5
|
import { parseEventSourceStream } from "../../event-source/parseEventSourceStream.js";
|
6
6
|
import { AbstractModel } from "../../model-function/AbstractModel.js";
|
7
7
|
import { PromptFormatTextStreamingModel } from "../../model-function/generate-text/PromptFormatTextStreamingModel.js";
|
@@ -5,7 +5,6 @@ const ApiCallError_js_1 = require("../../core/api/ApiCallError.cjs");
|
|
5
5
|
const failedLmntCallResponseHandler = async ({ response, url, requestBodyValues }) => {
|
6
6
|
const responseBody = await response.text();
|
7
7
|
try {
|
8
|
-
// TODO implement LmntError
|
9
8
|
return new ApiCallError_js_1.ApiCallError({
|
10
9
|
message: responseBody,
|
11
10
|
statusCode: response.status,
|
@@ -2,7 +2,6 @@ import { ApiCallError } from "../../core/api/ApiCallError.js";
|
|
2
2
|
export const failedLmntCallResponseHandler = async ({ response, url, requestBodyValues }) => {
|
3
3
|
const responseBody = await response.text();
|
4
4
|
try {
|
5
|
-
// TODO implement LmntError
|
6
5
|
return new ApiCallError({
|
7
6
|
message: responseBody,
|
8
7
|
statusCode: response.status,
|
@@ -4,7 +4,7 @@ exports.OpenAITextResponseFormat = exports.OpenAICompletionModel = exports.calcu
|
|
4
4
|
const zod_1 = require("zod");
|
5
5
|
const callWithRetryAndThrottle_js_1 = require("../../core/api/callWithRetryAndThrottle.cjs");
|
6
6
|
const postToApi_js_1 = require("../../core/api/postToApi.cjs");
|
7
|
-
const AsyncQueue_js_1 = require("../../
|
7
|
+
const AsyncQueue_js_1 = require("../../util/AsyncQueue.cjs");
|
8
8
|
const parseEventSourceStream_js_1 = require("../../event-source/parseEventSourceStream.cjs");
|
9
9
|
const AbstractModel_js_1 = require("../../model-function/AbstractModel.cjs");
|
10
10
|
const PromptFormatTextStreamingModel_js_1 = require("../../model-function/generate-text/PromptFormatTextStreamingModel.cjs");
|
@@ -1,7 +1,7 @@
|
|
1
1
|
import { z } from "zod";
|
2
2
|
import { callWithRetryAndThrottle } from "../../core/api/callWithRetryAndThrottle.js";
|
3
3
|
import { createJsonResponseHandler, postJsonToApi, } from "../../core/api/postToApi.js";
|
4
|
-
import { AsyncQueue } from "../../
|
4
|
+
import { AsyncQueue } from "../../util/AsyncQueue.js";
|
5
5
|
import { parseEventSourceStream } from "../../event-source/parseEventSourceStream.js";
|
6
6
|
import { AbstractModel } from "../../model-function/AbstractModel.js";
|
7
7
|
import { PromptFormatTextStreamingModel } from "../../model-function/generate-text/PromptFormatTextStreamingModel.js";
|
@@ -2,7 +2,7 @@
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
3
3
|
exports.createOpenAIChatDeltaIterableQueue = void 0;
|
4
4
|
const zod_1 = require("zod");
|
5
|
-
const AsyncQueue_js_1 = require("../../../
|
5
|
+
const AsyncQueue_js_1 = require("../../../util/AsyncQueue.cjs");
|
6
6
|
const parseEventSourceStream_js_1 = require("../../../event-source/parseEventSourceStream.cjs");
|
7
7
|
const parseJSON_js_1 = require("../../../util/parseJSON.cjs");
|
8
8
|
const chatResponseStreamEventSchema = zod_1.z.object({
|
@@ -1,5 +1,5 @@
|
|
1
1
|
import { z } from "zod";
|
2
|
-
import { AsyncQueue } from "../../../
|
2
|
+
import { AsyncQueue } from "../../../util/AsyncQueue.js";
|
3
3
|
import { parseEventSourceStream } from "../../../event-source/parseEventSourceStream.js";
|
4
4
|
import { safeParseJsonWithZod } from "../../../util/parseJSON.js";
|
5
5
|
const chatResponseStreamEventSchema = z.object({
|
package/package.json
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
{
|
2
2
|
"name": "modelfusion",
|
3
3
|
"description": "Build multimodal applications, chatbots, and agents with JavaScript and TypeScript.",
|
4
|
-
"version": "0.
|
4
|
+
"version": "0.52.0",
|
5
5
|
"author": "Lars Grammel",
|
6
6
|
"license": "MIT",
|
7
7
|
"keywords": [
|
@@ -48,7 +48,7 @@
|
|
48
48
|
"build:esm": "tsc --outDir dist/",
|
49
49
|
"build:cjs": "tsc --outDir build/cjs/ -p tsconfig.cjs.json && node bin/prepare-cjs.js",
|
50
50
|
"test": "vitest run src",
|
51
|
-
"test-interactive": "vitest
|
51
|
+
"test-interactive": "vitest watch",
|
52
52
|
"dist": "npm run clean && npm run lint && npm run test && npm run build && npm run dist:copy-files",
|
53
53
|
"dist:copy-files": "copyfiles package.json README.md LICENSE dist"
|
54
54
|
},
|