modelfusion 0.121.2 → 0.123.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +78 -1
- package/README.md +127 -85
- package/index.cjs +0 -1
- package/index.d.ts +0 -1
- package/index.js +0 -1
- package/model-function/ModelCallEvent.d.ts +6 -2
- package/model-function/classify/Classifier.cjs +2 -0
- package/model-function/classify/Classifier.d.ts +10 -0
- package/model-function/classify/Classifier.js +1 -0
- package/model-function/classify/ClassifyEvent.cjs +2 -0
- package/model-function/classify/ClassifyEvent.d.ts +20 -0
- package/model-function/classify/ClassifyEvent.js +1 -0
- package/model-function/classify/EmbeddingSimilarityClassifier.cjs +97 -0
- package/model-function/classify/EmbeddingSimilarityClassifier.d.ts +40 -0
- package/model-function/classify/EmbeddingSimilarityClassifier.js +93 -0
- package/model-function/classify/classify.cjs +27 -0
- package/model-function/classify/classify.d.ts +17 -0
- package/model-function/classify/classify.js +23 -0
- package/{classifier → model-function/classify}/index.cjs +4 -1
- package/model-function/classify/index.d.ts +4 -0
- package/model-function/classify/index.js +4 -0
- package/model-function/embed/embed.cjs +14 -14
- package/model-function/embed/embed.d.ts +24 -18
- package/model-function/embed/embed.js +14 -14
- package/model-function/generate-image/generateImage.cjs +6 -6
- package/model-function/generate-image/generateImage.d.ts +12 -9
- package/model-function/generate-image/generateImage.js +6 -6
- package/model-function/generate-speech/generateSpeech.cjs +7 -7
- package/model-function/generate-speech/generateSpeech.d.ts +12 -9
- package/model-function/generate-speech/generateSpeech.js +7 -7
- package/model-function/generate-speech/streamSpeech.cjs +6 -6
- package/model-function/generate-speech/streamSpeech.d.ts +12 -8
- package/model-function/generate-speech/streamSpeech.js +6 -6
- package/model-function/generate-structure/StructureFromTextGenerationModel.cjs +5 -3
- package/model-function/generate-structure/StructureFromTextGenerationModel.d.ts +1 -1
- package/model-function/generate-structure/StructureFromTextGenerationModel.js +5 -3
- package/model-function/generate-structure/StructureFromTextStreamingModel.cjs +5 -1
- package/model-function/generate-structure/StructureFromTextStreamingModel.js +5 -1
- package/model-function/generate-structure/StructureGenerationModel.d.ts +1 -1
- package/model-function/generate-structure/generateStructure.cjs +8 -8
- package/model-function/generate-structure/generateStructure.d.ts +17 -10
- package/model-function/generate-structure/generateStructure.js +8 -8
- package/model-function/generate-structure/streamStructure.cjs +6 -6
- package/model-function/generate-structure/streamStructure.d.ts +16 -10
- package/model-function/generate-structure/streamStructure.js +6 -6
- package/model-function/generate-text/generateText.cjs +6 -6
- package/model-function/generate-text/generateText.d.ts +12 -9
- package/model-function/generate-text/generateText.js +6 -6
- package/model-function/generate-text/streamText.cjs +6 -6
- package/model-function/generate-text/streamText.d.ts +12 -8
- package/model-function/generate-text/streamText.js +6 -6
- package/model-function/generate-transcription/generateTranscription.cjs +3 -3
- package/model-function/generate-transcription/generateTranscription.d.ts +12 -9
- package/model-function/generate-transcription/generateTranscription.js +3 -3
- package/model-function/index.cjs +1 -0
- package/model-function/index.d.ts +1 -0
- package/model-function/index.js +1 -0
- package/model-provider/cohere/CohereTextGenerationModel.d.ts +12 -12
- package/model-provider/cohere/CohereTextGenerationModel.test.cjs +7 -4
- package/model-provider/cohere/CohereTextGenerationModel.test.js +7 -4
- package/model-provider/llamacpp/LlamaCppCompletionModel.d.ts +10 -10
- package/model-provider/llamacpp/LlamaCppCompletionModel.test.cjs +4 -1
- package/model-provider/llamacpp/LlamaCppCompletionModel.test.js +4 -1
- package/model-provider/mistral/MistralChatModel.test.cjs +15 -8
- package/model-provider/mistral/MistralChatModel.test.js +15 -8
- package/model-provider/mistral/MistralTextEmbeddingModel.d.ts +13 -13
- package/model-provider/ollama/OllamaChatModel.d.ts +9 -9
- package/model-provider/ollama/OllamaChatModel.test.cjs +6 -1
- package/model-provider/ollama/OllamaChatModel.test.js +6 -1
- package/model-provider/ollama/OllamaCompletionModel.test.cjs +31 -16
- package/model-provider/ollama/OllamaCompletionModel.test.js +31 -16
- package/model-provider/openai/OpenAIChatFunctionCallStructureGenerationModel.cjs +4 -4
- package/model-provider/openai/OpenAIChatFunctionCallStructureGenerationModel.d.ts +1 -1
- package/model-provider/openai/OpenAIChatFunctionCallStructureGenerationModel.js +4 -4
- package/model-provider/openai/OpenAIChatModel.test.cjs +21 -14
- package/model-provider/openai/OpenAIChatModel.test.js +21 -14
- package/model-provider/openai/OpenAICompletionModel.test.cjs +15 -9
- package/model-provider/openai/OpenAICompletionModel.test.js +15 -9
- package/model-provider/openai/OpenAITextEmbeddingModel.d.ts +12 -12
- package/package.json +1 -1
- package/tool/execute-tool/executeTool.cjs +5 -5
- package/tool/execute-tool/executeTool.d.ts +8 -4
- package/tool/execute-tool/executeTool.js +5 -5
- package/tool/execute-tool/safeExecuteToolCall.cjs +1 -1
- package/tool/execute-tool/safeExecuteToolCall.js +1 -1
- package/tool/generate-tool-call/TextGenerationToolCallModel.cjs +4 -2
- package/tool/generate-tool-call/TextGenerationToolCallModel.js +4 -2
- package/tool/generate-tool-call/generateToolCall.cjs +7 -7
- package/tool/generate-tool-call/generateToolCall.d.ts +11 -5
- package/tool/generate-tool-call/generateToolCall.js +7 -7
- package/tool/generate-tool-calls/TextGenerationToolCallsModel.cjs +4 -2
- package/tool/generate-tool-calls/TextGenerationToolCallsModel.js +4 -2
- package/tool/generate-tool-calls/generateToolCalls.cjs +3 -3
- package/tool/generate-tool-calls/generateToolCalls.d.ts +11 -5
- package/tool/generate-tool-calls/generateToolCalls.js +3 -3
- package/tool/use-tool/useTool.cjs +2 -2
- package/tool/use-tool/useTool.d.ts +5 -1
- package/tool/use-tool/useTool.js +2 -2
- package/tool/use-tools/useTools.cjs +8 -2
- package/tool/use-tools/useTools.d.ts +5 -1
- package/tool/use-tools/useTools.js +8 -2
- package/vector-index/VectorIndexRetriever.cjs +5 -1
- package/vector-index/VectorIndexRetriever.js +5 -1
- package/vector-index/upsertIntoVectorIndex.cjs +5 -1
- package/vector-index/upsertIntoVectorIndex.js +5 -1
- package/classifier/SemanticClassifier.cjs +0 -75
- package/classifier/SemanticClassifier.d.ts +0 -25
- package/classifier/SemanticClassifier.js +0 -71
- package/classifier/index.d.ts +0 -1
- package/classifier/index.js +0 -1
@@ -40,11 +40,11 @@ export declare class OllamaChatModel extends AbstractModel<OllamaChatModelSettin
|
|
40
40
|
get settingsForEvent(): Partial<OllamaChatModelSettings>;
|
41
41
|
doGenerateTexts(prompt: OllamaChatPrompt, options: FunctionCallOptions): Promise<{
|
42
42
|
rawResponse: {
|
43
|
-
model: string;
|
44
43
|
message: {
|
45
44
|
role: string;
|
46
45
|
content: string;
|
47
46
|
};
|
47
|
+
model: string;
|
48
48
|
done: true;
|
49
49
|
created_at: string;
|
50
50
|
total_duration: number;
|
@@ -61,11 +61,11 @@ export declare class OllamaChatModel extends AbstractModel<OllamaChatModelSettin
|
|
61
61
|
}>;
|
62
62
|
restoreGeneratedTexts(rawResponse: unknown): {
|
63
63
|
rawResponse: {
|
64
|
-
model: string;
|
65
64
|
message: {
|
66
65
|
role: string;
|
67
66
|
content: string;
|
68
67
|
};
|
68
|
+
model: string;
|
69
69
|
done: true;
|
70
70
|
created_at: string;
|
71
71
|
total_duration: number;
|
@@ -82,11 +82,11 @@ export declare class OllamaChatModel extends AbstractModel<OllamaChatModelSettin
|
|
82
82
|
};
|
83
83
|
private processTextGenerationResponse;
|
84
84
|
doStreamText(prompt: OllamaChatPrompt, options: FunctionCallOptions): Promise<AsyncIterable<import("../../index.js").Delta<{
|
85
|
-
model: string;
|
86
85
|
message: {
|
87
86
|
role: string;
|
88
87
|
content: string;
|
89
88
|
};
|
89
|
+
model: string;
|
90
90
|
done: false;
|
91
91
|
created_at: string;
|
92
92
|
} | {
|
@@ -141,11 +141,11 @@ declare const ollamaChatResponseSchema: z.ZodObject<{
|
|
141
141
|
eval_count: z.ZodNumber;
|
142
142
|
eval_duration: z.ZodNumber;
|
143
143
|
}, "strip", z.ZodTypeAny, {
|
144
|
-
model: string;
|
145
144
|
message: {
|
146
145
|
role: string;
|
147
146
|
content: string;
|
148
147
|
};
|
148
|
+
model: string;
|
149
149
|
done: true;
|
150
150
|
created_at: string;
|
151
151
|
total_duration: number;
|
@@ -155,11 +155,11 @@ declare const ollamaChatResponseSchema: z.ZodObject<{
|
|
155
155
|
prompt_eval_count?: number | undefined;
|
156
156
|
prompt_eval_duration?: number | undefined;
|
157
157
|
}, {
|
158
|
-
model: string;
|
159
158
|
message: {
|
160
159
|
role: string;
|
161
160
|
content: string;
|
162
161
|
};
|
162
|
+
model: string;
|
163
163
|
done: true;
|
164
164
|
created_at: string;
|
165
165
|
total_duration: number;
|
@@ -185,19 +185,19 @@ declare const ollamaChatStreamChunkSchema: z.ZodDiscriminatedUnion<"done", [z.Zo
|
|
185
185
|
content: string;
|
186
186
|
}>;
|
187
187
|
}, "strip", z.ZodTypeAny, {
|
188
|
-
model: string;
|
189
188
|
message: {
|
190
189
|
role: string;
|
191
190
|
content: string;
|
192
191
|
};
|
192
|
+
model: string;
|
193
193
|
done: false;
|
194
194
|
created_at: string;
|
195
195
|
}, {
|
196
|
-
model: string;
|
197
196
|
message: {
|
198
197
|
role: string;
|
199
198
|
content: string;
|
200
199
|
};
|
200
|
+
model: string;
|
201
201
|
done: false;
|
202
202
|
created_at: string;
|
203
203
|
}>, z.ZodObject<{
|
@@ -247,11 +247,11 @@ export declare const OllamaChatResponseFormat: {
|
|
247
247
|
requestBodyValues: unknown;
|
248
248
|
response: Response;
|
249
249
|
}) => Promise<{
|
250
|
-
model: string;
|
251
250
|
message: {
|
252
251
|
role: string;
|
253
252
|
content: string;
|
254
253
|
};
|
254
|
+
model: string;
|
255
255
|
done: true;
|
256
256
|
created_at: string;
|
257
257
|
total_duration: number;
|
@@ -271,11 +271,11 @@ export declare const OllamaChatResponseFormat: {
|
|
271
271
|
handler: ({ response }: {
|
272
272
|
response: Response;
|
273
273
|
}) => Promise<AsyncIterable<import("../../index.js").Delta<{
|
274
|
-
model: string;
|
275
274
|
message: {
|
276
275
|
role: string;
|
277
276
|
content: string;
|
278
277
|
};
|
278
|
+
model: string;
|
279
279
|
done: false;
|
280
280
|
created_at: string;
|
281
281
|
} | {
|
@@ -16,7 +16,12 @@ describe("streamText", () => {
|
|
16
16
|
`"done":true,"total_duration":4843619375,"load_duration":1101458,"prompt_eval_count":5,"prompt_eval_duration":199339000,` +
|
17
17
|
`"eval_count":317,"eval_duration":4639772000}\n`,
|
18
18
|
];
|
19
|
-
const stream = await (0, streamText_js_1.streamText)(
|
19
|
+
const stream = await (0, streamText_js_1.streamText)({
|
20
|
+
model: new OllamaChatModel_js_1.OllamaChatModel({
|
21
|
+
model: "mistral:text",
|
22
|
+
}).withTextPrompt(),
|
23
|
+
prompt: "hello",
|
24
|
+
});
|
20
25
|
// note: space moved to last chunk bc of trimming
|
21
26
|
expect(await (0, arrayFromAsync_js_1.arrayFromAsync)(stream)).toStrictEqual([
|
22
27
|
"Hello",
|
@@ -14,7 +14,12 @@ describe("streamText", () => {
|
|
14
14
|
`"done":true,"total_duration":4843619375,"load_duration":1101458,"prompt_eval_count":5,"prompt_eval_duration":199339000,` +
|
15
15
|
`"eval_count":317,"eval_duration":4639772000}\n`,
|
16
16
|
];
|
17
|
-
const stream = await streamText(
|
17
|
+
const stream = await streamText({
|
18
|
+
model: new OllamaChatModel({
|
19
|
+
model: "mistral:text",
|
20
|
+
}).withTextPrompt(),
|
21
|
+
prompt: "hello",
|
22
|
+
});
|
18
23
|
// note: space moved to last chunk bc of trimming
|
19
24
|
expect(await arrayFromAsync(stream)).toStrictEqual([
|
20
25
|
"Hello",
|
@@ -34,9 +34,12 @@ describe("generateText", () => {
|
|
34
34
|
eval_count: 113,
|
35
35
|
eval_duration: 1325948000,
|
36
36
|
};
|
37
|
-
const result = await (0, generateText_js_1.generateText)(
|
38
|
-
model:
|
39
|
-
|
37
|
+
const result = await (0, generateText_js_1.generateText)({
|
38
|
+
model: new OllamaCompletionModel_js_1.OllamaCompletionModel({
|
39
|
+
model: "test-model",
|
40
|
+
}).withTextPrompt(),
|
41
|
+
prompt: "test prompt",
|
42
|
+
});
|
40
43
|
expect(result).toEqual("test response");
|
41
44
|
});
|
42
45
|
it("should throw retryable ApiCallError when Ollama is overloaded", async () => {
|
@@ -47,12 +50,15 @@ describe("generateText", () => {
|
|
47
50
|
done: false,
|
48
51
|
};
|
49
52
|
try {
|
50
|
-
await (0, generateText_js_1.generateText)(
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
-
|
55
|
-
|
53
|
+
await (0, generateText_js_1.generateText)({
|
54
|
+
model: new OllamaCompletionModel_js_1.OllamaCompletionModel({
|
55
|
+
api: new OllamaApiConfiguration_js_1.OllamaApiConfiguration({
|
56
|
+
retry: (0, retryNever_js_1.retryNever)(),
|
57
|
+
}),
|
58
|
+
model: "test-model",
|
59
|
+
}).withTextPrompt(),
|
60
|
+
prompt: "test prompt",
|
61
|
+
});
|
56
62
|
(0, assert_1.fail)("Should have thrown ApiCallError");
|
57
63
|
}
|
58
64
|
catch (expectedError) {
|
@@ -73,7 +79,12 @@ describe("streamText", () => {
|
|
73
79
|
`"done":true,"context":[123,456,789],"total_duration":2165354041,"load_duration":1293958,` +
|
74
80
|
`"prompt_eval_count":5,"prompt_eval_duration":193273000,"eval_count":136,"eval_duration":1966852000}\n`,
|
75
81
|
];
|
76
|
-
const stream = await (0, streamText_js_1.streamText)(
|
82
|
+
const stream = await (0, streamText_js_1.streamText)({
|
83
|
+
model: new OllamaCompletionModel_js_1.OllamaCompletionModel({
|
84
|
+
model: "mistral:text",
|
85
|
+
}).withTextPrompt(),
|
86
|
+
prompt: "hello",
|
87
|
+
});
|
77
88
|
// note: space moved to last chunk bc of trimming
|
78
89
|
expect(await (0, arrayFromAsync_js_1.arrayFromAsync)(stream)).toStrictEqual([
|
79
90
|
"Hello",
|
@@ -103,12 +114,16 @@ describe("streamStructure", () => {
|
|
103
114
|
`"total_duration":521893000,"load_duration":957666,"prompt_eval_count":74,"prompt_eval_duration":302508000,` +
|
104
115
|
`"eval_count":12,"eval_duration":215282000}\n`,
|
105
116
|
];
|
106
|
-
const stream = await (0, streamStructure_js_1.streamStructure)(
|
107
|
-
model:
|
108
|
-
|
109
|
-
|
110
|
-
|
111
|
-
|
117
|
+
const stream = await (0, streamStructure_js_1.streamStructure)({
|
118
|
+
model: new OllamaCompletionModel_js_1.OllamaCompletionModel({
|
119
|
+
model: "mistral:text",
|
120
|
+
promptTemplate: OllamaCompletionPrompt_js_1.Text,
|
121
|
+
format: "json",
|
122
|
+
raw: true,
|
123
|
+
}).asStructureGenerationModel(jsonStructurePrompt_js_1.jsonStructurePrompt.text()),
|
124
|
+
schema: (0, ZodSchema_js_1.zodSchema)(zod_1.z.object({ name: zod_1.z.string() })),
|
125
|
+
prompt: "generate a name",
|
126
|
+
});
|
112
127
|
// note: space moved to last chunk bc of trimming
|
113
128
|
expect(await (0, arrayFromAsync_js_1.arrayFromAsync)(stream)).toStrictEqual([
|
114
129
|
{ isComplete: false, value: {} },
|
@@ -32,9 +32,12 @@ describe("generateText", () => {
|
|
32
32
|
eval_count: 113,
|
33
33
|
eval_duration: 1325948000,
|
34
34
|
};
|
35
|
-
const result = await generateText(
|
36
|
-
model:
|
37
|
-
|
35
|
+
const result = await generateText({
|
36
|
+
model: new OllamaCompletionModel({
|
37
|
+
model: "test-model",
|
38
|
+
}).withTextPrompt(),
|
39
|
+
prompt: "test prompt",
|
40
|
+
});
|
38
41
|
expect(result).toEqual("test response");
|
39
42
|
});
|
40
43
|
it("should throw retryable ApiCallError when Ollama is overloaded", async () => {
|
@@ -45,12 +48,15 @@ describe("generateText", () => {
|
|
45
48
|
done: false,
|
46
49
|
};
|
47
50
|
try {
|
48
|
-
await generateText(
|
49
|
-
|
50
|
-
|
51
|
-
|
52
|
-
|
53
|
-
|
51
|
+
await generateText({
|
52
|
+
model: new OllamaCompletionModel({
|
53
|
+
api: new OllamaApiConfiguration({
|
54
|
+
retry: retryNever(),
|
55
|
+
}),
|
56
|
+
model: "test-model",
|
57
|
+
}).withTextPrompt(),
|
58
|
+
prompt: "test prompt",
|
59
|
+
});
|
54
60
|
fail("Should have thrown ApiCallError");
|
55
61
|
}
|
56
62
|
catch (expectedError) {
|
@@ -71,7 +77,12 @@ describe("streamText", () => {
|
|
71
77
|
`"done":true,"context":[123,456,789],"total_duration":2165354041,"load_duration":1293958,` +
|
72
78
|
`"prompt_eval_count":5,"prompt_eval_duration":193273000,"eval_count":136,"eval_duration":1966852000}\n`,
|
73
79
|
];
|
74
|
-
const stream = await streamText(
|
80
|
+
const stream = await streamText({
|
81
|
+
model: new OllamaCompletionModel({
|
82
|
+
model: "mistral:text",
|
83
|
+
}).withTextPrompt(),
|
84
|
+
prompt: "hello",
|
85
|
+
});
|
75
86
|
// note: space moved to last chunk bc of trimming
|
76
87
|
expect(await arrayFromAsync(stream)).toStrictEqual([
|
77
88
|
"Hello",
|
@@ -101,12 +112,16 @@ describe("streamStructure", () => {
|
|
101
112
|
`"total_duration":521893000,"load_duration":957666,"prompt_eval_count":74,"prompt_eval_duration":302508000,` +
|
102
113
|
`"eval_count":12,"eval_duration":215282000}\n`,
|
103
114
|
];
|
104
|
-
const stream = await streamStructure(
|
105
|
-
model:
|
106
|
-
|
107
|
-
|
108
|
-
|
109
|
-
|
115
|
+
const stream = await streamStructure({
|
116
|
+
model: new OllamaCompletionModel({
|
117
|
+
model: "mistral:text",
|
118
|
+
promptTemplate: Text,
|
119
|
+
format: "json",
|
120
|
+
raw: true,
|
121
|
+
}).asStructureGenerationModel(jsonStructurePrompt.text()),
|
122
|
+
schema: zodSchema(z.object({ name: z.string() })),
|
123
|
+
prompt: "generate a name",
|
124
|
+
});
|
110
125
|
// note: space moved to last chunk bc of trimming
|
111
126
|
expect(await arrayFromAsync(stream)).toStrictEqual([
|
112
127
|
{ isComplete: false, value: {} },
|
@@ -101,7 +101,7 @@ class OpenAIChatFunctionCallStructureGenerationModel {
|
|
101
101
|
async doGenerateStructure(schema, prompt, // first argument of the function
|
102
102
|
options) {
|
103
103
|
const expandedPrompt = this.promptTemplate.format(prompt);
|
104
|
-
const
|
104
|
+
const rawResponse = await this.model
|
105
105
|
.withSettings({
|
106
106
|
stopSequences: [
|
107
107
|
...(this.settings.stopSequences ?? []),
|
@@ -119,13 +119,13 @@ class OpenAIChatFunctionCallStructureGenerationModel {
|
|
119
119
|
},
|
120
120
|
],
|
121
121
|
});
|
122
|
-
const valueText =
|
122
|
+
const valueText = rawResponse.choices[0].message.function_call.arguments;
|
123
123
|
try {
|
124
124
|
return {
|
125
|
-
|
125
|
+
rawResponse,
|
126
126
|
valueText,
|
127
127
|
value: secure_json_parse_1.default.parse(valueText),
|
128
|
-
usage: this.model.extractUsage(
|
128
|
+
usage: this.model.extractUsage(rawResponse),
|
129
129
|
};
|
130
130
|
}
|
131
131
|
catch (error) {
|
@@ -127,7 +127,7 @@ OpenAIChatSettings> {
|
|
127
127
|
*/
|
128
128
|
doGenerateStructure(schema: Schema<unknown> & JsonSchemaProducer, prompt: Parameters<PROMPT_TEMPLATE["format"]>[0], // first argument of the function
|
129
129
|
options: FunctionCallOptions): Promise<{
|
130
|
-
|
130
|
+
rawResponse: {
|
131
131
|
object: "chat.completion";
|
132
132
|
model: string;
|
133
133
|
usage: {
|
@@ -95,7 +95,7 @@ export class OpenAIChatFunctionCallStructureGenerationModel {
|
|
95
95
|
async doGenerateStructure(schema, prompt, // first argument of the function
|
96
96
|
options) {
|
97
97
|
const expandedPrompt = this.promptTemplate.format(prompt);
|
98
|
-
const
|
98
|
+
const rawResponse = await this.model
|
99
99
|
.withSettings({
|
100
100
|
stopSequences: [
|
101
101
|
...(this.settings.stopSequences ?? []),
|
@@ -113,13 +113,13 @@ export class OpenAIChatFunctionCallStructureGenerationModel {
|
|
113
113
|
},
|
114
114
|
],
|
115
115
|
});
|
116
|
-
const valueText =
|
116
|
+
const valueText = rawResponse.choices[0].message.function_call.arguments;
|
117
117
|
try {
|
118
118
|
return {
|
119
|
-
|
119
|
+
rawResponse,
|
120
120
|
valueText,
|
121
121
|
value: SecureJSON.parse(valueText),
|
122
|
-
usage: this.model.extractUsage(
|
122
|
+
usage: this.model.extractUsage(rawResponse),
|
123
123
|
};
|
124
124
|
}
|
125
125
|
catch (error) {
|
@@ -27,11 +27,14 @@ describe("streamText", () => {
|
|
27
27
|
`"system_fingerprint":null,"choices":[{"index":1,"delta":{},"finish_reason":"stop"}]}\n\n`,
|
28
28
|
"data: [DONE]\n\n",
|
29
29
|
];
|
30
|
-
const stream = await (0, streamText_js_1.streamText)(
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
30
|
+
const stream = await (0, streamText_js_1.streamText)({
|
31
|
+
model: new OpenAIChatModel_js_1.OpenAIChatModel({
|
32
|
+
api: new OpenAIApiConfiguration_js_1.OpenAIApiConfiguration({ apiKey: "test-key" }),
|
33
|
+
model: "gpt-3.5-turbo",
|
34
|
+
numberOfGenerations: 2,
|
35
|
+
}).withTextPrompt(),
|
36
|
+
prompt: "test prompt",
|
37
|
+
});
|
35
38
|
expect(await (0, arrayFromAsync_js_1.arrayFromAsync)(stream)).toStrictEqual(["A"]);
|
36
39
|
});
|
37
40
|
});
|
@@ -73,15 +76,19 @@ describe("streamStructure", () => {
|
|
73
76
|
`"choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]}\n\n`,
|
74
77
|
`data: [DONE]\n\n`,
|
75
78
|
];
|
76
|
-
const stream = await (0, streamStructure_js_1.streamStructure)(
|
77
|
-
|
78
|
-
|
79
|
-
|
80
|
-
|
81
|
-
|
82
|
-
|
83
|
-
|
84
|
-
|
79
|
+
const stream = await (0, streamStructure_js_1.streamStructure)({
|
80
|
+
model: new OpenAIChatModel_js_1.OpenAIChatModel({
|
81
|
+
api: new OpenAIApiConfiguration_js_1.OpenAIApiConfiguration({ apiKey: "test-key" }),
|
82
|
+
model: "gpt-3.5-turbo",
|
83
|
+
})
|
84
|
+
.asFunctionCallStructureGenerationModel({
|
85
|
+
fnName: "generateCharacter",
|
86
|
+
fnDescription: "Generate character descriptions.",
|
87
|
+
})
|
88
|
+
.withTextPrompt(),
|
89
|
+
schema: (0, ZodSchema_js_1.zodSchema)(zod_1.z.object({ name: zod_1.z.string() })),
|
90
|
+
prompt: "generate a name",
|
91
|
+
});
|
85
92
|
// note: space moved to last chunk bc of trimming
|
86
93
|
expect(await (0, arrayFromAsync_js_1.arrayFromAsync)(stream)).toStrictEqual([
|
87
94
|
{ isComplete: false, value: {} },
|
@@ -25,11 +25,14 @@ describe("streamText", () => {
|
|
25
25
|
`"system_fingerprint":null,"choices":[{"index":1,"delta":{},"finish_reason":"stop"}]}\n\n`,
|
26
26
|
"data: [DONE]\n\n",
|
27
27
|
];
|
28
|
-
const stream = await streamText(
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
28
|
+
const stream = await streamText({
|
29
|
+
model: new OpenAIChatModel({
|
30
|
+
api: new OpenAIApiConfiguration({ apiKey: "test-key" }),
|
31
|
+
model: "gpt-3.5-turbo",
|
32
|
+
numberOfGenerations: 2,
|
33
|
+
}).withTextPrompt(),
|
34
|
+
prompt: "test prompt",
|
35
|
+
});
|
33
36
|
expect(await arrayFromAsync(stream)).toStrictEqual(["A"]);
|
34
37
|
});
|
35
38
|
});
|
@@ -71,15 +74,19 @@ describe("streamStructure", () => {
|
|
71
74
|
`"choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]}\n\n`,
|
72
75
|
`data: [DONE]\n\n`,
|
73
76
|
];
|
74
|
-
const stream = await streamStructure(
|
75
|
-
|
76
|
-
|
77
|
-
|
78
|
-
|
79
|
-
|
80
|
-
|
81
|
-
|
82
|
-
|
77
|
+
const stream = await streamStructure({
|
78
|
+
model: new OpenAIChatModel({
|
79
|
+
api: new OpenAIApiConfiguration({ apiKey: "test-key" }),
|
80
|
+
model: "gpt-3.5-turbo",
|
81
|
+
})
|
82
|
+
.asFunctionCallStructureGenerationModel({
|
83
|
+
fnName: "generateCharacter",
|
84
|
+
fnDescription: "Generate character descriptions.",
|
85
|
+
})
|
86
|
+
.withTextPrompt(),
|
87
|
+
schema: zodSchema(z.object({ name: z.string() })),
|
88
|
+
prompt: "generate a name",
|
89
|
+
});
|
83
90
|
// note: space moved to last chunk bc of trimming
|
84
91
|
expect(await arrayFromAsync(stream)).toStrictEqual([
|
85
92
|
{ isComplete: false, value: {} },
|
@@ -20,10 +20,13 @@ describe("streamText", () => {
|
|
20
20
|
`"choices":[{"text":"","index":0,"logprobs":null,"finish_reason":"length"}],"model":"gpt-3.5-turbo-instruct"}\n\n`,
|
21
21
|
"data: [DONE]\n\n",
|
22
22
|
];
|
23
|
-
const stream = await (0, streamText_js_1.streamText)(
|
24
|
-
|
25
|
-
|
26
|
-
|
23
|
+
const stream = await (0, streamText_js_1.streamText)({
|
24
|
+
model: new OpenAICompletionModel_js_1.OpenAICompletionModel({
|
25
|
+
api: new OpenAIApiConfiguration_js_1.OpenAIApiConfiguration({ apiKey: "test-key" }),
|
26
|
+
model: "gpt-3.5-turbo-instruct",
|
27
|
+
}),
|
28
|
+
prompt: "hello",
|
29
|
+
});
|
27
30
|
// note: space moved to last chunk bc of trimming
|
28
31
|
expect(await (0, arrayFromAsync_js_1.arrayFromAsync)(stream)).toStrictEqual([
|
29
32
|
"Hello",
|
@@ -43,11 +46,14 @@ describe("streamText", () => {
|
|
43
46
|
`"choices":[{"text":"","index":1,"logprobs":null,"finish_reason":"length"}],"model":"gpt-3.5-turbo-instruct"}\n\n`,
|
44
47
|
"data: [DONE]\n\n",
|
45
48
|
];
|
46
|
-
const stream = await (0, streamText_js_1.streamText)(
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
|
49
|
+
const stream = await (0, streamText_js_1.streamText)({
|
50
|
+
model: new OpenAICompletionModel_js_1.OpenAICompletionModel({
|
51
|
+
api: new OpenAIApiConfiguration_js_1.OpenAIApiConfiguration({ apiKey: "test-key" }),
|
52
|
+
model: "gpt-3.5-turbo-instruct",
|
53
|
+
numberOfGenerations: 2,
|
54
|
+
}),
|
55
|
+
prompt: "test prompt",
|
56
|
+
});
|
51
57
|
expect(await (0, arrayFromAsync_js_1.arrayFromAsync)(stream)).toStrictEqual(["A"]);
|
52
58
|
});
|
53
59
|
});
|
@@ -18,10 +18,13 @@ describe("streamText", () => {
|
|
18
18
|
`"choices":[{"text":"","index":0,"logprobs":null,"finish_reason":"length"}],"model":"gpt-3.5-turbo-instruct"}\n\n`,
|
19
19
|
"data: [DONE]\n\n",
|
20
20
|
];
|
21
|
-
const stream = await streamText(
|
22
|
-
|
23
|
-
|
24
|
-
|
21
|
+
const stream = await streamText({
|
22
|
+
model: new OpenAICompletionModel({
|
23
|
+
api: new OpenAIApiConfiguration({ apiKey: "test-key" }),
|
24
|
+
model: "gpt-3.5-turbo-instruct",
|
25
|
+
}),
|
26
|
+
prompt: "hello",
|
27
|
+
});
|
25
28
|
// note: space moved to last chunk bc of trimming
|
26
29
|
expect(await arrayFromAsync(stream)).toStrictEqual([
|
27
30
|
"Hello",
|
@@ -41,11 +44,14 @@ describe("streamText", () => {
|
|
41
44
|
`"choices":[{"text":"","index":1,"logprobs":null,"finish_reason":"length"}],"model":"gpt-3.5-turbo-instruct"}\n\n`,
|
42
45
|
"data: [DONE]\n\n",
|
43
46
|
];
|
44
|
-
const stream = await streamText(
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
|
47
|
+
const stream = await streamText({
|
48
|
+
model: new OpenAICompletionModel({
|
49
|
+
api: new OpenAIApiConfiguration({ apiKey: "test-key" }),
|
50
|
+
model: "gpt-3.5-turbo-instruct",
|
51
|
+
numberOfGenerations: 2,
|
52
|
+
}),
|
53
|
+
prompt: "test prompt",
|
54
|
+
});
|
49
55
|
expect(await arrayFromAsync(stream)).toStrictEqual(["A"]);
|
50
56
|
});
|
51
57
|
});
|
@@ -52,16 +52,16 @@ export declare class OpenAITextEmbeddingModel extends AbstractModel<OpenAITextEm
|
|
52
52
|
doEmbedValues(texts: string[], callOptions: FunctionCallOptions): Promise<{
|
53
53
|
rawResponse: {
|
54
54
|
object: "list";
|
55
|
-
model: string;
|
56
|
-
usage: {
|
57
|
-
prompt_tokens: number;
|
58
|
-
total_tokens: number;
|
59
|
-
};
|
60
55
|
data: {
|
61
56
|
object: "embedding";
|
62
57
|
embedding: number[];
|
63
58
|
index: number;
|
64
59
|
}[];
|
60
|
+
model: string;
|
61
|
+
usage: {
|
62
|
+
prompt_tokens: number;
|
63
|
+
total_tokens: number;
|
64
|
+
};
|
65
65
|
};
|
66
66
|
embeddings: number[][];
|
67
67
|
}>;
|
@@ -95,28 +95,28 @@ declare const openAITextEmbeddingResponseSchema: z.ZodObject<{
|
|
95
95
|
}>;
|
96
96
|
}, "strip", z.ZodTypeAny, {
|
97
97
|
object: "list";
|
98
|
-
model: string;
|
99
|
-
usage: {
|
100
|
-
prompt_tokens: number;
|
101
|
-
total_tokens: number;
|
102
|
-
};
|
103
98
|
data: {
|
104
99
|
object: "embedding";
|
105
100
|
embedding: number[];
|
106
101
|
index: number;
|
107
102
|
}[];
|
108
|
-
}, {
|
109
|
-
object: "list";
|
110
103
|
model: string;
|
111
104
|
usage: {
|
112
105
|
prompt_tokens: number;
|
113
106
|
total_tokens: number;
|
114
107
|
};
|
108
|
+
}, {
|
109
|
+
object: "list";
|
115
110
|
data: {
|
116
111
|
object: "embedding";
|
117
112
|
embedding: number[];
|
118
113
|
index: number;
|
119
114
|
}[];
|
115
|
+
model: string;
|
116
|
+
usage: {
|
117
|
+
prompt_tokens: number;
|
118
|
+
total_tokens: number;
|
119
|
+
};
|
120
120
|
}>;
|
121
121
|
export type OpenAITextEmbeddingResponse = z.infer<typeof openAITextEmbeddingResponseSchema>;
|
122
122
|
export {};
|
package/package.json
CHANGED
@@ -10,14 +10,14 @@ const getRun_js_1 = require("../../core/getRun.cjs");
|
|
10
10
|
const DurationMeasurement_js_1 = require("../../util/DurationMeasurement.cjs");
|
11
11
|
const runSafe_js_1 = require("../../util/runSafe.cjs");
|
12
12
|
const ToolExecutionError_js_1 = require("../ToolExecutionError.cjs");
|
13
|
-
|
14
|
-
tool, args, options) {
|
15
|
-
const
|
16
|
-
return
|
13
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
14
|
+
async function executeTool({ tool, args, fullResponse, ...options }) {
|
15
|
+
const callResponse = await doExecuteTool({ tool, args, ...options });
|
16
|
+
return fullResponse ? callResponse : callResponse.output;
|
17
17
|
}
|
18
18
|
exports.executeTool = executeTool;
|
19
19
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
20
|
-
async function doExecuteTool(tool, args, options) {
|
20
|
+
async function doExecuteTool({ tool, args, ...options }) {
|
21
21
|
const run = await (0, getRun_js_1.getRun)(options?.run);
|
22
22
|
const eventSource = new FunctionEventSource_js_1.FunctionEventSource({
|
23
23
|
observers: [
|
@@ -14,13 +14,17 @@ export type ExecuteToolMetadata = {
|
|
14
14
|
* `executeTool` executes a tool with the given parameters.
|
15
15
|
*/
|
16
16
|
export declare function executeTool<TOOL extends Tool<any, any, any>>(// eslint-disable-line @typescript-eslint/no-explicit-any
|
17
|
-
|
17
|
+
params: {
|
18
|
+
tool: TOOL;
|
19
|
+
args: TOOL["parameters"]["_type"];
|
18
20
|
fullResponse?: false;
|
19
|
-
}): Promise<ReturnType<TOOL["execute"]>>;
|
21
|
+
} & FunctionOptions): Promise<ReturnType<TOOL["execute"]>>;
|
20
22
|
export declare function executeTool<TOOL extends Tool<any, any, any>>(// eslint-disable-line @typescript-eslint/no-explicit-any
|
21
|
-
|
23
|
+
params: {
|
24
|
+
tool: TOOL;
|
25
|
+
args: TOOL["parameters"]["_type"];
|
22
26
|
fullResponse: true;
|
23
|
-
}): Promise<{
|
27
|
+
} & FunctionOptions): Promise<{
|
24
28
|
output: Awaited<ReturnType<TOOL["execute"]>>;
|
25
29
|
metadata: ExecuteToolMetadata;
|
26
30
|
}>;
|