modelfusion 0.103.0 → 0.105.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +56 -0
- package/model-function/Delta.d.ts +1 -2
- package/model-function/executeStreamCall.cjs +6 -4
- package/model-function/executeStreamCall.d.ts +2 -2
- package/model-function/executeStreamCall.js +6 -4
- package/model-function/generate-speech/streamSpeech.cjs +1 -2
- package/model-function/generate-speech/streamSpeech.js +1 -2
- package/model-function/generate-structure/StructureFromTextStreamingModel.cjs +25 -29
- package/model-function/generate-structure/StructureFromTextStreamingModel.d.ts +3 -1
- package/model-function/generate-structure/StructureFromTextStreamingModel.js +25 -29
- package/model-function/generate-structure/StructureGenerationModel.d.ts +2 -0
- package/model-function/generate-structure/streamStructure.cjs +7 -8
- package/model-function/generate-structure/streamStructure.d.ts +1 -1
- package/model-function/generate-structure/streamStructure.js +7 -8
- package/model-function/generate-text/PromptTemplateFullTextModel.cjs +35 -0
- package/model-function/generate-text/PromptTemplateFullTextModel.d.ts +41 -0
- package/model-function/generate-text/PromptTemplateFullTextModel.js +31 -0
- package/model-function/generate-text/PromptTemplateTextStreamingModel.cjs +3 -0
- package/model-function/generate-text/PromptTemplateTextStreamingModel.d.ts +2 -1
- package/model-function/generate-text/PromptTemplateTextStreamingModel.js +3 -0
- package/model-function/generate-text/TextGenerationModel.d.ts +2 -1
- package/model-function/generate-text/index.cjs +1 -0
- package/model-function/generate-text/index.d.ts +1 -0
- package/model-function/generate-text/index.js +1 -0
- package/model-function/generate-text/prompt-template/AlpacaPromptTemplate.cjs +2 -1
- package/model-function/generate-text/prompt-template/AlpacaPromptTemplate.d.ts +2 -2
- package/model-function/generate-text/prompt-template/AlpacaPromptTemplate.js +2 -1
- package/model-function/generate-text/prompt-template/ChatMLPromptTemplate.cjs +9 -5
- package/model-function/generate-text/prompt-template/ChatMLPromptTemplate.d.ts +4 -4
- package/model-function/generate-text/prompt-template/ChatMLPromptTemplate.js +9 -5
- package/model-function/generate-text/prompt-template/ChatPrompt.cjs +38 -20
- package/model-function/generate-text/prompt-template/ChatPrompt.d.ts +33 -34
- package/model-function/generate-text/prompt-template/ChatPrompt.js +37 -18
- package/model-function/generate-text/prompt-template/ContentPart.cjs +11 -0
- package/model-function/generate-text/prompt-template/ContentPart.d.ts +30 -0
- package/model-function/generate-text/prompt-template/ContentPart.js +7 -0
- package/model-function/generate-text/prompt-template/InstructionPrompt.d.ts +7 -22
- package/model-function/generate-text/prompt-template/Llama2PromptTemplate.cjs +40 -6
- package/model-function/generate-text/prompt-template/Llama2PromptTemplate.d.ts +16 -4
- package/model-function/generate-text/prompt-template/Llama2PromptTemplate.js +38 -5
- package/model-function/generate-text/prompt-template/NeuralChatPromptTemplate.cjs +10 -5
- package/model-function/generate-text/prompt-template/NeuralChatPromptTemplate.d.ts +4 -4
- package/model-function/generate-text/prompt-template/NeuralChatPromptTemplate.js +10 -5
- package/model-function/generate-text/prompt-template/TextPromptTemplate.cjs +8 -5
- package/model-function/generate-text/prompt-template/TextPromptTemplate.d.ts +4 -4
- package/model-function/generate-text/prompt-template/TextPromptTemplate.js +8 -5
- package/model-function/generate-text/prompt-template/VicunaPromptTemplate.cjs +8 -4
- package/model-function/generate-text/prompt-template/VicunaPromptTemplate.d.ts +2 -2
- package/model-function/generate-text/prompt-template/VicunaPromptTemplate.js +8 -4
- package/model-function/generate-text/prompt-template/index.cjs +1 -1
- package/model-function/generate-text/prompt-template/index.d.ts +1 -1
- package/model-function/generate-text/prompt-template/index.js +1 -1
- package/model-function/generate-text/prompt-template/trimChatPrompt.cjs +0 -2
- package/model-function/generate-text/prompt-template/trimChatPrompt.d.ts +4 -4
- package/model-function/generate-text/prompt-template/trimChatPrompt.js +0 -2
- package/model-function/generate-text/streamText.cjs +27 -28
- package/model-function/generate-text/streamText.d.ts +1 -0
- package/model-function/generate-text/streamText.js +27 -28
- package/model-provider/anthropic/AnthropicPromptTemplate.cjs +9 -4
- package/model-provider/anthropic/AnthropicPromptTemplate.d.ts +4 -4
- package/model-provider/anthropic/AnthropicPromptTemplate.js +9 -4
- package/model-provider/anthropic/AnthropicTextGenerationModel.cjs +8 -14
- package/model-provider/anthropic/AnthropicTextGenerationModel.d.ts +13 -4
- package/model-provider/anthropic/AnthropicTextGenerationModel.js +8 -14
- package/model-provider/anthropic/AnthropicTextGenerationModel.test.cjs +44 -0
- package/model-provider/anthropic/AnthropicTextGenerationModel.test.js +42 -0
- package/model-provider/automatic1111/Automatic1111ImageGenerationModel.d.ts +1 -1
- package/model-provider/cohere/CohereTextGenerationModel.cjs +6 -44
- package/model-provider/cohere/CohereTextGenerationModel.d.ts +47 -13
- package/model-provider/cohere/CohereTextGenerationModel.js +7 -45
- package/model-provider/cohere/CohereTextGenerationModel.test.cjs +33 -0
- package/model-provider/cohere/CohereTextGenerationModel.test.js +31 -0
- package/model-provider/elevenlabs/ElevenLabsSpeechModel.cjs +1 -2
- package/model-provider/elevenlabs/ElevenLabsSpeechModel.js +1 -2
- package/model-provider/llamacpp/LlamaCppBakLLaVA1PromptTemplate.cjs +29 -17
- package/model-provider/llamacpp/LlamaCppBakLLaVA1PromptTemplate.d.ts +4 -4
- package/model-provider/llamacpp/LlamaCppBakLLaVA1PromptTemplate.js +29 -17
- package/model-provider/llamacpp/LlamaCppTextGenerationModel.cjs +7 -14
- package/model-provider/llamacpp/LlamaCppTextGenerationModel.d.ts +157 -6
- package/model-provider/llamacpp/LlamaCppTextGenerationModel.js +8 -15
- package/model-provider/llamacpp/LlamaCppTextGenerationModel.test.cjs +37 -0
- package/model-provider/llamacpp/LlamaCppTextGenerationModel.test.d.ts +1 -0
- package/model-provider/llamacpp/LlamaCppTextGenerationModel.test.js +35 -0
- package/model-provider/mistral/MistralChatModel.cjs +30 -104
- package/model-provider/mistral/MistralChatModel.d.ts +49 -16
- package/model-provider/mistral/MistralChatModel.js +30 -104
- package/model-provider/mistral/MistralChatModel.test.cjs +51 -0
- package/model-provider/mistral/MistralChatModel.test.d.ts +1 -0
- package/model-provider/mistral/MistralChatModel.test.js +49 -0
- package/model-provider/mistral/MistralPromptTemplate.cjs +13 -5
- package/model-provider/mistral/MistralPromptTemplate.d.ts +4 -4
- package/model-provider/mistral/MistralPromptTemplate.js +13 -5
- package/model-provider/ollama/OllamaChatModel.cjs +7 -43
- package/model-provider/ollama/OllamaChatModel.d.ts +63 -11
- package/model-provider/ollama/OllamaChatModel.js +7 -43
- package/model-provider/ollama/OllamaChatModel.test.cjs +27 -0
- package/model-provider/ollama/OllamaChatModel.test.d.ts +1 -0
- package/model-provider/ollama/OllamaChatModel.test.js +25 -0
- package/model-provider/ollama/OllamaChatPromptTemplate.cjs +43 -17
- package/model-provider/ollama/OllamaChatPromptTemplate.d.ts +4 -4
- package/model-provider/ollama/OllamaChatPromptTemplate.js +43 -17
- package/model-provider/ollama/OllamaCompletionModel.cjs +22 -43
- package/model-provider/ollama/OllamaCompletionModel.d.ts +65 -9
- package/model-provider/ollama/OllamaCompletionModel.js +23 -44
- package/model-provider/ollama/OllamaCompletionModel.test.cjs +101 -13
- package/model-provider/ollama/OllamaCompletionModel.test.js +78 -13
- package/model-provider/openai/{chat/AbstractOpenAIChatModel.cjs → AbstractOpenAIChatModel.cjs} +71 -15
- package/model-provider/openai/{chat/AbstractOpenAIChatModel.d.ts → AbstractOpenAIChatModel.d.ts} +273 -19
- package/model-provider/openai/{chat/AbstractOpenAIChatModel.js → AbstractOpenAIChatModel.js} +71 -15
- package/model-provider/openai/{chat/OpenAIChatFunctionCallStructureGenerationModel.cjs → OpenAIChatFunctionCallStructureGenerationModel.cjs} +18 -2
- package/model-provider/openai/{chat/OpenAIChatFunctionCallStructureGenerationModel.d.ts → OpenAIChatFunctionCallStructureGenerationModel.d.ts} +41 -11
- package/model-provider/openai/{chat/OpenAIChatFunctionCallStructureGenerationModel.js → OpenAIChatFunctionCallStructureGenerationModel.js} +18 -2
- package/model-provider/openai/{chat/OpenAIChatMessage.d.ts → OpenAIChatMessage.d.ts} +3 -3
- package/model-provider/openai/{chat/OpenAIChatModel.cjs → OpenAIChatModel.cjs} +5 -5
- package/model-provider/openai/{chat/OpenAIChatModel.d.ts → OpenAIChatModel.d.ts} +12 -12
- package/model-provider/openai/{chat/OpenAIChatModel.js → OpenAIChatModel.js} +5 -5
- package/model-provider/openai/OpenAIChatModel.test.cjs +94 -0
- package/model-provider/openai/OpenAIChatModel.test.d.ts +1 -0
- package/model-provider/openai/OpenAIChatModel.test.js +92 -0
- package/model-provider/openai/OpenAIChatPromptTemplate.cjs +114 -0
- package/model-provider/openai/OpenAIChatPromptTemplate.d.ts +20 -0
- package/model-provider/openai/OpenAIChatPromptTemplate.js +107 -0
- package/model-provider/openai/OpenAICompletionModel.cjs +32 -84
- package/model-provider/openai/OpenAICompletionModel.d.ts +29 -12
- package/model-provider/openai/OpenAICompletionModel.js +33 -85
- package/model-provider/openai/OpenAICompletionModel.test.cjs +53 -0
- package/model-provider/openai/OpenAICompletionModel.test.d.ts +1 -0
- package/model-provider/openai/OpenAICompletionModel.test.js +51 -0
- package/model-provider/openai/OpenAICostCalculator.cjs +1 -1
- package/model-provider/openai/OpenAICostCalculator.js +1 -1
- package/model-provider/openai/OpenAIFacade.cjs +2 -2
- package/model-provider/openai/OpenAIFacade.d.ts +3 -3
- package/model-provider/openai/OpenAIFacade.js +2 -2
- package/model-provider/openai/OpenAITranscriptionModel.d.ts +6 -6
- package/model-provider/openai/TikTokenTokenizer.d.ts +1 -1
- package/model-provider/openai/{chat/countOpenAIChatMessageTokens.cjs → countOpenAIChatMessageTokens.cjs} +2 -2
- package/model-provider/openai/{chat/countOpenAIChatMessageTokens.js → countOpenAIChatMessageTokens.js} +2 -2
- package/model-provider/openai/index.cjs +6 -6
- package/model-provider/openai/index.d.ts +5 -6
- package/model-provider/openai/index.js +5 -5
- package/model-provider/openai-compatible/OpenAICompatibleChatModel.cjs +4 -4
- package/model-provider/openai-compatible/OpenAICompatibleChatModel.d.ts +6 -6
- package/model-provider/openai-compatible/OpenAICompatibleChatModel.js +4 -4
- package/model-provider/stability/StabilityImageGenerationModel.d.ts +1 -1
- package/package.json +5 -5
- package/test/JsonTestServer.cjs +33 -0
- package/test/JsonTestServer.d.ts +7 -0
- package/test/JsonTestServer.js +29 -0
- package/test/StreamingTestServer.cjs +55 -0
- package/test/StreamingTestServer.d.ts +7 -0
- package/test/StreamingTestServer.js +51 -0
- package/test/arrayFromAsync.cjs +13 -0
- package/test/arrayFromAsync.d.ts +1 -0
- package/test/arrayFromAsync.js +9 -0
- package/util/streaming/createEventSourceResponseHandler.cjs +9 -0
- package/util/streaming/createEventSourceResponseHandler.d.ts +4 -0
- package/util/streaming/createEventSourceResponseHandler.js +5 -0
- package/util/streaming/createJsonStreamResponseHandler.cjs +9 -0
- package/util/streaming/createJsonStreamResponseHandler.d.ts +4 -0
- package/util/streaming/createJsonStreamResponseHandler.js +5 -0
- package/util/streaming/parseEventSourceStreamAsAsyncIterable.cjs +52 -0
- package/util/streaming/parseEventSourceStreamAsAsyncIterable.d.ts +6 -0
- package/util/streaming/parseEventSourceStreamAsAsyncIterable.js +48 -0
- package/util/streaming/parseJsonStreamAsAsyncIterable.cjs +21 -0
- package/util/streaming/parseJsonStreamAsAsyncIterable.d.ts +6 -0
- package/util/streaming/parseJsonStreamAsAsyncIterable.js +17 -0
- package/model-function/generate-text/prompt-template/Content.cjs +0 -2
- package/model-function/generate-text/prompt-template/Content.d.ts +0 -20
- package/model-provider/openai/chat/OpenAIChatModel.test.cjs +0 -61
- package/model-provider/openai/chat/OpenAIChatModel.test.js +0 -59
- package/model-provider/openai/chat/OpenAIChatPromptTemplate.cjs +0 -72
- package/model-provider/openai/chat/OpenAIChatPromptTemplate.d.ts +0 -20
- package/model-provider/openai/chat/OpenAIChatPromptTemplate.js +0 -65
- package/model-provider/openai/chat/OpenAIChatStreamIterable.cjs +0 -156
- package/model-provider/openai/chat/OpenAIChatStreamIterable.d.ts +0 -19
- package/model-provider/openai/chat/OpenAIChatStreamIterable.js +0 -152
- /package/{model-function/generate-text/prompt-template/Content.js → model-provider/anthropic/AnthropicTextGenerationModel.test.d.ts} +0 -0
- /package/model-provider/{openai/chat/OpenAIChatModel.test.d.ts → cohere/CohereTextGenerationModel.test.d.ts} +0 -0
- /package/model-provider/openai/{chat/OpenAIChatMessage.cjs → OpenAIChatMessage.cjs} +0 -0
- /package/model-provider/openai/{chat/OpenAIChatMessage.js → OpenAIChatMessage.js} +0 -0
- /package/model-provider/openai/{chat/countOpenAIChatMessageTokens.d.ts → countOpenAIChatMessageTokens.d.ts} +0 -0
@@ -0,0 +1,107 @@
|
|
1
|
+
import { OpenAIChatMessage } from "./OpenAIChatMessage.js";
|
2
|
+
/**
|
3
|
+
* OpenAIMessage[] identity chat format.
|
4
|
+
*/
|
5
|
+
export function identity() {
|
6
|
+
return { format: (prompt) => prompt, stopSequences: [] };
|
7
|
+
}
|
8
|
+
/**
|
9
|
+
* Formats a text prompt as an OpenAI chat prompt.
|
10
|
+
*/
|
11
|
+
export function text() {
|
12
|
+
return {
|
13
|
+
format: (prompt) => [OpenAIChatMessage.user(prompt)],
|
14
|
+
stopSequences: [],
|
15
|
+
};
|
16
|
+
}
|
17
|
+
/**
|
18
|
+
* Formats an instruction prompt as an OpenAI chat prompt.
|
19
|
+
*/
|
20
|
+
export function instruction() {
|
21
|
+
return {
|
22
|
+
format(prompt) {
|
23
|
+
const messages = [];
|
24
|
+
if (prompt.system != null) {
|
25
|
+
messages.push(OpenAIChatMessage.system(prompt.system));
|
26
|
+
}
|
27
|
+
messages.push(OpenAIChatMessage.user(prompt.instruction));
|
28
|
+
return messages;
|
29
|
+
},
|
30
|
+
stopSequences: [],
|
31
|
+
};
|
32
|
+
}
|
33
|
+
/**
|
34
|
+
* Formats a chat prompt as an OpenAI chat prompt.
|
35
|
+
*/
|
36
|
+
export function chat() {
|
37
|
+
return {
|
38
|
+
format(prompt) {
|
39
|
+
const messages = [];
|
40
|
+
if (prompt.system != null) {
|
41
|
+
messages.push(OpenAIChatMessage.system(prompt.system));
|
42
|
+
}
|
43
|
+
for (const { role, content } of prompt.messages) {
|
44
|
+
switch (role) {
|
45
|
+
case "user": {
|
46
|
+
messages.push(OpenAIChatMessage.user(content));
|
47
|
+
break;
|
48
|
+
}
|
49
|
+
case "assistant": {
|
50
|
+
if (typeof content === "string") {
|
51
|
+
messages.push(OpenAIChatMessage.assistant(content));
|
52
|
+
}
|
53
|
+
else {
|
54
|
+
let text = "";
|
55
|
+
const toolCalls = [];
|
56
|
+
for (const part of content) {
|
57
|
+
switch (part.type) {
|
58
|
+
case "text": {
|
59
|
+
text += part.text;
|
60
|
+
break;
|
61
|
+
}
|
62
|
+
case "tool-call": {
|
63
|
+
toolCalls.push({
|
64
|
+
id: part.id,
|
65
|
+
type: "function",
|
66
|
+
function: {
|
67
|
+
name: part.name,
|
68
|
+
arguments: JSON.stringify(part.args),
|
69
|
+
},
|
70
|
+
});
|
71
|
+
break;
|
72
|
+
}
|
73
|
+
default: {
|
74
|
+
const _exhaustiveCheck = part;
|
75
|
+
throw new Error(`Unsupported part: ${_exhaustiveCheck}`);
|
76
|
+
}
|
77
|
+
}
|
78
|
+
}
|
79
|
+
messages.push({
|
80
|
+
role: "assistant",
|
81
|
+
content: text,
|
82
|
+
tool_calls: toolCalls,
|
83
|
+
});
|
84
|
+
}
|
85
|
+
break;
|
86
|
+
}
|
87
|
+
case "tool": {
|
88
|
+
for (const toolResponse of content) {
|
89
|
+
messages.push({
|
90
|
+
role: "tool",
|
91
|
+
tool_call_id: toolResponse.id,
|
92
|
+
content: JSON.stringify(toolResponse.response),
|
93
|
+
});
|
94
|
+
}
|
95
|
+
break;
|
96
|
+
}
|
97
|
+
default: {
|
98
|
+
const _exhaustiveCheck = role;
|
99
|
+
throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
|
100
|
+
}
|
101
|
+
}
|
102
|
+
}
|
103
|
+
return messages;
|
104
|
+
},
|
105
|
+
stopSequences: [],
|
106
|
+
};
|
107
|
+
}
|
@@ -5,14 +5,12 @@ const zod_1 = require("zod");
|
|
5
5
|
const callWithRetryAndThrottle_js_1 = require("../../core/api/callWithRetryAndThrottle.cjs");
|
6
6
|
const postToApi_js_1 = require("../../core/api/postToApi.cjs");
|
7
7
|
const ZodSchema_js_1 = require("../../core/schema/ZodSchema.cjs");
|
8
|
-
const parseJSON_js_1 = require("../../core/schema/parseJSON.cjs");
|
9
8
|
const AbstractModel_js_1 = require("../../model-function/AbstractModel.cjs");
|
10
9
|
const PromptTemplateTextStreamingModel_js_1 = require("../../model-function/generate-text/PromptTemplateTextStreamingModel.cjs");
|
11
10
|
const TextGenerationModel_js_1 = require("../../model-function/generate-text/TextGenerationModel.cjs");
|
12
11
|
const TextPromptTemplate_js_1 = require("../../model-function/generate-text/prompt-template/TextPromptTemplate.cjs");
|
13
12
|
const countTokens_js_1 = require("../../model-function/tokenize-text/countTokens.cjs");
|
14
|
-
const
|
15
|
-
const parseEventSourceStream_js_1 = require("../../util/streaming/parseEventSourceStream.cjs");
|
13
|
+
const createEventSourceResponseHandler_js_1 = require("../../util/streaming/createEventSourceResponseHandler.cjs");
|
16
14
|
const OpenAIApiConfiguration_js_1 = require("./OpenAIApiConfiguration.cjs");
|
17
15
|
const OpenAIError_js_1 = require("./OpenAIError.cjs");
|
18
16
|
const TikTokenTokenizer_js_1 = require("./TikTokenTokenizer.cjs");
|
@@ -186,18 +184,17 @@ class OpenAICompletionModel extends AbstractModel_js_1.AbstractModel {
|
|
186
184
|
? options.run?.userId
|
187
185
|
: undefined;
|
188
186
|
const abortSignal = options.run?.abortSignal;
|
189
|
-
let { stopSequences } = this.settings;
|
190
187
|
const openaiResponseFormat = options.responseFormat;
|
188
|
+
// empty arrays are not allowed for stop:
|
189
|
+
const stopSequences = this.settings.stopSequences != null &&
|
190
|
+
Array.isArray(this.settings.stopSequences) &&
|
191
|
+
this.settings.stopSequences.length === 0
|
192
|
+
? undefined
|
193
|
+
: this.settings.stopSequences;
|
191
194
|
return (0, callWithRetryAndThrottle_js_1.callWithRetryAndThrottle)({
|
192
195
|
retry: api.retry,
|
193
196
|
throttle: api.throttle,
|
194
197
|
call: async () => {
|
195
|
-
// empty arrays are not allowed for stop:
|
196
|
-
if (stopSequences != null &&
|
197
|
-
Array.isArray(stopSequences) &&
|
198
|
-
stopSequences.length === 0) {
|
199
|
-
stopSequences = undefined;
|
200
|
-
}
|
201
198
|
return (0, postToApi_js_1.postJsonToApi)({
|
202
199
|
url: api.assembleUrl("/completions"),
|
203
200
|
headers: api.headers,
|
@@ -212,7 +209,7 @@ class OpenAICompletionModel extends AbstractModel_js_1.AbstractModel {
|
|
212
209
|
n: this.settings.numberOfGenerations,
|
213
210
|
logprobs: this.settings.logprobs,
|
214
211
|
echo: this.settings.echo,
|
215
|
-
stop:
|
212
|
+
stop: stopSequences,
|
216
213
|
seed: this.settings.seed,
|
217
214
|
presence_penalty: this.settings.presencePenalty,
|
218
215
|
frequency_penalty: this.settings.frequencyPenalty,
|
@@ -281,6 +278,14 @@ class OpenAICompletionModel extends AbstractModel_js_1.AbstractModel {
|
|
281
278
|
responseFormat: exports.OpenAITextResponseFormat.deltaIterable,
|
282
279
|
});
|
283
280
|
}
|
281
|
+
extractTextDelta(delta) {
|
282
|
+
const chunk = delta;
|
283
|
+
const firstChoice = chunk.choices[0];
|
284
|
+
if (firstChoice.index > 0) {
|
285
|
+
return undefined;
|
286
|
+
}
|
287
|
+
return chunk.choices[0].text;
|
288
|
+
}
|
284
289
|
/**
|
285
290
|
* Returns this model with an instruction prompt template.
|
286
291
|
*/
|
@@ -330,6 +335,21 @@ const OpenAICompletionResponseSchema = zod_1.z.object({
|
|
330
335
|
total_tokens: zod_1.z.number(),
|
331
336
|
}),
|
332
337
|
});
|
338
|
+
const openaiCompletionStreamChunkSchema = (0, ZodSchema_js_1.zodSchema)(zod_1.z.object({
|
339
|
+
choices: zod_1.z.array(zod_1.z.object({
|
340
|
+
text: zod_1.z.string(),
|
341
|
+
finish_reason: zod_1.z
|
342
|
+
.enum(["stop", "length", "content_filter"])
|
343
|
+
.optional()
|
344
|
+
.nullable(),
|
345
|
+
index: zod_1.z.number(),
|
346
|
+
})),
|
347
|
+
created: zod_1.z.number(),
|
348
|
+
id: zod_1.z.string(),
|
349
|
+
model: zod_1.z.string(),
|
350
|
+
system_fingerprint: zod_1.z.string().optional(),
|
351
|
+
object: zod_1.z.literal("text_completion"),
|
352
|
+
}));
|
333
353
|
exports.OpenAITextResponseFormat = {
|
334
354
|
/**
|
335
355
|
* Returns the response as a JSON object.
|
@@ -344,78 +364,6 @@ exports.OpenAITextResponseFormat = {
|
|
344
364
|
*/
|
345
365
|
deltaIterable: {
|
346
366
|
stream: true,
|
347
|
-
handler:
|
367
|
+
handler: (0, createEventSourceResponseHandler_js_1.createEventSourceResponseHandler)(openaiCompletionStreamChunkSchema),
|
348
368
|
},
|
349
369
|
};
|
350
|
-
const textResponseStreamEventSchema = new ZodSchema_js_1.ZodSchema(zod_1.z.object({
|
351
|
-
choices: zod_1.z.array(zod_1.z.object({
|
352
|
-
text: zod_1.z.string(),
|
353
|
-
finish_reason: zod_1.z
|
354
|
-
.enum(["stop", "length", "content_filter"])
|
355
|
-
.optional()
|
356
|
-
.nullable(),
|
357
|
-
index: zod_1.z.number(),
|
358
|
-
})),
|
359
|
-
created: zod_1.z.number(),
|
360
|
-
id: zod_1.z.string(),
|
361
|
-
model: zod_1.z.string(),
|
362
|
-
system_fingerprint: zod_1.z.string().optional(),
|
363
|
-
object: zod_1.z.literal("text_completion"),
|
364
|
-
}));
|
365
|
-
async function createOpenAITextFullDeltaIterableQueue(stream) {
|
366
|
-
const queue = new AsyncQueue_js_1.AsyncQueue();
|
367
|
-
const streamDelta = [];
|
368
|
-
// process the stream asynchonously (no 'await' on purpose):
|
369
|
-
(0, parseEventSourceStream_js_1.parseEventSourceStream)({ stream })
|
370
|
-
.then(async (events) => {
|
371
|
-
try {
|
372
|
-
for await (const event of events) {
|
373
|
-
const data = event.data;
|
374
|
-
if (data === "[DONE]") {
|
375
|
-
queue.close();
|
376
|
-
return;
|
377
|
-
}
|
378
|
-
const eventData = (0, parseJSON_js_1.parseJSON)({
|
379
|
-
text: data,
|
380
|
-
schema: textResponseStreamEventSchema,
|
381
|
-
});
|
382
|
-
for (let i = 0; i < eventData.choices.length; i++) {
|
383
|
-
const eventChoice = eventData.choices[i];
|
384
|
-
const delta = eventChoice.text;
|
385
|
-
if (streamDelta[i] == null) {
|
386
|
-
streamDelta[i] = {
|
387
|
-
content: "",
|
388
|
-
isComplete: false,
|
389
|
-
delta: "",
|
390
|
-
};
|
391
|
-
}
|
392
|
-
const choice = streamDelta[i];
|
393
|
-
choice.delta = delta;
|
394
|
-
if (eventChoice.finish_reason != null) {
|
395
|
-
choice.isComplete = true;
|
396
|
-
}
|
397
|
-
choice.content += delta;
|
398
|
-
}
|
399
|
-
// Since we're mutating the choices array in an async scenario,
|
400
|
-
// we need to make a deep copy:
|
401
|
-
const streamDeltaDeepCopy = JSON.parse(JSON.stringify(streamDelta));
|
402
|
-
queue.push({
|
403
|
-
type: "delta",
|
404
|
-
fullDelta: streamDeltaDeepCopy,
|
405
|
-
valueDelta: streamDeltaDeepCopy[0].delta,
|
406
|
-
});
|
407
|
-
}
|
408
|
-
}
|
409
|
-
catch (error) {
|
410
|
-
queue.push({ type: "error", error });
|
411
|
-
queue.close();
|
412
|
-
return;
|
413
|
-
}
|
414
|
-
})
|
415
|
-
.catch((error) => {
|
416
|
-
queue.push({ type: "error", error });
|
417
|
-
queue.close();
|
418
|
-
return;
|
419
|
-
});
|
420
|
-
return queue;
|
421
|
-
}
|
@@ -3,7 +3,6 @@ import { FunctionOptions } from "../../core/FunctionOptions.js";
|
|
3
3
|
import { ApiConfiguration } from "../../core/api/ApiConfiguration.js";
|
4
4
|
import { ResponseHandler } from "../../core/api/postToApi.js";
|
5
5
|
import { AbstractModel } from "../../model-function/AbstractModel.js";
|
6
|
-
import { Delta } from "../../model-function/Delta.js";
|
7
6
|
import { PromptTemplateTextStreamingModel } from "../../model-function/generate-text/PromptTemplateTextStreamingModel.js";
|
8
7
|
import { TextGenerationModelSettings, TextStreamingModel } from "../../model-function/generate-text/TextGenerationModel.js";
|
9
8
|
import { TextGenerationPromptTemplate } from "../../model-function/generate-text/TextGenerationPromptTemplate.js";
|
@@ -174,18 +173,30 @@ export declare class OpenAICompletionModel extends AbstractModel<OpenAICompletio
|
|
174
173
|
};
|
175
174
|
}>;
|
176
175
|
private translateFinishReason;
|
177
|
-
doStreamText(prompt: string, options?: FunctionOptions): Promise<AsyncIterable<Delta<
|
176
|
+
doStreamText(prompt: string, options?: FunctionOptions): Promise<AsyncIterable<import("../../index.js").Delta<{
|
177
|
+
object: "text_completion";
|
178
|
+
model: string;
|
179
|
+
id: string;
|
180
|
+
created: number;
|
181
|
+
choices: {
|
182
|
+
text: string;
|
183
|
+
index: number;
|
184
|
+
finish_reason?: "length" | "stop" | "content_filter" | null | undefined;
|
185
|
+
}[];
|
186
|
+
system_fingerprint?: string | undefined;
|
187
|
+
}>>>;
|
188
|
+
extractTextDelta(delta: unknown): string | undefined;
|
178
189
|
/**
|
179
190
|
* Returns this model with an instruction prompt template.
|
180
191
|
*/
|
181
|
-
withInstructionPrompt(): PromptTemplateTextStreamingModel<import("../../index.js").
|
192
|
+
withInstructionPrompt(): PromptTemplateTextStreamingModel<import("../../index.js").InstructionPrompt, string, OpenAICompletionModelSettings, this>;
|
182
193
|
/**
|
183
194
|
* Returns this model with a chat prompt template.
|
184
195
|
*/
|
185
196
|
withChatPrompt(options?: {
|
186
197
|
user?: string;
|
187
198
|
assistant?: string;
|
188
|
-
}): PromptTemplateTextStreamingModel<import("../../index.js").
|
199
|
+
}): PromptTemplateTextStreamingModel<import("../../index.js").ChatPrompt, string, OpenAICompletionModelSettings, this>;
|
189
200
|
withPromptTemplate<INPUT_PROMPT>(promptTemplate: TextGenerationPromptTemplate<INPUT_PROMPT, string>): PromptTemplateTextStreamingModel<INPUT_PROMPT, string, OpenAICompletionModelSettings, this>;
|
190
201
|
withSettings(additionalSettings: Partial<OpenAICompletionModelSettings>): this;
|
191
202
|
}
|
@@ -269,7 +280,7 @@ export declare const OpenAITextResponseFormat: {
|
|
269
280
|
* Returns the response as a JSON object.
|
270
281
|
*/
|
271
282
|
json: {
|
272
|
-
stream:
|
283
|
+
stream: boolean;
|
273
284
|
handler: ResponseHandler<{
|
274
285
|
object: "text_completion";
|
275
286
|
usage: {
|
@@ -294,15 +305,21 @@ export declare const OpenAITextResponseFormat: {
|
|
294
305
|
* of the response stream.
|
295
306
|
*/
|
296
307
|
deltaIterable: {
|
297
|
-
stream:
|
308
|
+
stream: boolean;
|
298
309
|
handler: ({ response }: {
|
299
310
|
response: Response;
|
300
|
-
}) => Promise<AsyncIterable<Delta<
|
311
|
+
}) => Promise<AsyncIterable<import("../../index.js").Delta<{
|
312
|
+
object: "text_completion";
|
313
|
+
model: string;
|
314
|
+
id: string;
|
315
|
+
created: number;
|
316
|
+
choices: {
|
317
|
+
text: string;
|
318
|
+
index: number;
|
319
|
+
finish_reason?: "length" | "stop" | "content_filter" | null | undefined;
|
320
|
+
}[];
|
321
|
+
system_fingerprint?: string | undefined;
|
322
|
+
}>>>;
|
301
323
|
};
|
302
324
|
};
|
303
|
-
export type OpenAICompletionDelta = Array<{
|
304
|
-
content: string;
|
305
|
-
isComplete: boolean;
|
306
|
-
delta: string;
|
307
|
-
}>;
|
308
325
|
export {};
|
@@ -1,15 +1,13 @@
|
|
1
1
|
import { z } from "zod";
|
2
2
|
import { callWithRetryAndThrottle } from "../../core/api/callWithRetryAndThrottle.js";
|
3
3
|
import { createJsonResponseHandler, postJsonToApi, } from "../../core/api/postToApi.js";
|
4
|
-
import {
|
5
|
-
import { parseJSON } from "../../core/schema/parseJSON.js";
|
4
|
+
import { zodSchema } from "../../core/schema/ZodSchema.js";
|
6
5
|
import { AbstractModel } from "../../model-function/AbstractModel.js";
|
7
6
|
import { PromptTemplateTextStreamingModel } from "../../model-function/generate-text/PromptTemplateTextStreamingModel.js";
|
8
7
|
import { textGenerationModelProperties, } from "../../model-function/generate-text/TextGenerationModel.js";
|
9
8
|
import { chat, instruction, } from "../../model-function/generate-text/prompt-template/TextPromptTemplate.js";
|
10
9
|
import { countTokens } from "../../model-function/tokenize-text/countTokens.js";
|
11
|
-
import {
|
12
|
-
import { parseEventSourceStream } from "../../util/streaming/parseEventSourceStream.js";
|
10
|
+
import { createEventSourceResponseHandler } from "../../util/streaming/createEventSourceResponseHandler.js";
|
13
11
|
import { OpenAIApiConfiguration } from "./OpenAIApiConfiguration.js";
|
14
12
|
import { failedOpenAICallResponseHandler } from "./OpenAIError.js";
|
15
13
|
import { TikTokenTokenizer } from "./TikTokenTokenizer.js";
|
@@ -180,18 +178,17 @@ export class OpenAICompletionModel extends AbstractModel {
|
|
180
178
|
? options.run?.userId
|
181
179
|
: undefined;
|
182
180
|
const abortSignal = options.run?.abortSignal;
|
183
|
-
let { stopSequences } = this.settings;
|
184
181
|
const openaiResponseFormat = options.responseFormat;
|
182
|
+
// empty arrays are not allowed for stop:
|
183
|
+
const stopSequences = this.settings.stopSequences != null &&
|
184
|
+
Array.isArray(this.settings.stopSequences) &&
|
185
|
+
this.settings.stopSequences.length === 0
|
186
|
+
? undefined
|
187
|
+
: this.settings.stopSequences;
|
185
188
|
return callWithRetryAndThrottle({
|
186
189
|
retry: api.retry,
|
187
190
|
throttle: api.throttle,
|
188
191
|
call: async () => {
|
189
|
-
// empty arrays are not allowed for stop:
|
190
|
-
if (stopSequences != null &&
|
191
|
-
Array.isArray(stopSequences) &&
|
192
|
-
stopSequences.length === 0) {
|
193
|
-
stopSequences = undefined;
|
194
|
-
}
|
195
192
|
return postJsonToApi({
|
196
193
|
url: api.assembleUrl("/completions"),
|
197
194
|
headers: api.headers,
|
@@ -206,7 +203,7 @@ export class OpenAICompletionModel extends AbstractModel {
|
|
206
203
|
n: this.settings.numberOfGenerations,
|
207
204
|
logprobs: this.settings.logprobs,
|
208
205
|
echo: this.settings.echo,
|
209
|
-
stop:
|
206
|
+
stop: stopSequences,
|
210
207
|
seed: this.settings.seed,
|
211
208
|
presence_penalty: this.settings.presencePenalty,
|
212
209
|
frequency_penalty: this.settings.frequencyPenalty,
|
@@ -275,6 +272,14 @@ export class OpenAICompletionModel extends AbstractModel {
|
|
275
272
|
responseFormat: OpenAITextResponseFormat.deltaIterable,
|
276
273
|
});
|
277
274
|
}
|
275
|
+
extractTextDelta(delta) {
|
276
|
+
const chunk = delta;
|
277
|
+
const firstChoice = chunk.choices[0];
|
278
|
+
if (firstChoice.index > 0) {
|
279
|
+
return undefined;
|
280
|
+
}
|
281
|
+
return chunk.choices[0].text;
|
282
|
+
}
|
278
283
|
/**
|
279
284
|
* Returns this model with an instruction prompt template.
|
280
285
|
*/
|
@@ -323,6 +328,21 @@ const OpenAICompletionResponseSchema = z.object({
|
|
323
328
|
total_tokens: z.number(),
|
324
329
|
}),
|
325
330
|
});
|
331
|
+
const openaiCompletionStreamChunkSchema = zodSchema(z.object({
|
332
|
+
choices: z.array(z.object({
|
333
|
+
text: z.string(),
|
334
|
+
finish_reason: z
|
335
|
+
.enum(["stop", "length", "content_filter"])
|
336
|
+
.optional()
|
337
|
+
.nullable(),
|
338
|
+
index: z.number(),
|
339
|
+
})),
|
340
|
+
created: z.number(),
|
341
|
+
id: z.string(),
|
342
|
+
model: z.string(),
|
343
|
+
system_fingerprint: z.string().optional(),
|
344
|
+
object: z.literal("text_completion"),
|
345
|
+
}));
|
326
346
|
export const OpenAITextResponseFormat = {
|
327
347
|
/**
|
328
348
|
* Returns the response as a JSON object.
|
@@ -337,78 +357,6 @@ export const OpenAITextResponseFormat = {
|
|
337
357
|
*/
|
338
358
|
deltaIterable: {
|
339
359
|
stream: true,
|
340
|
-
handler:
|
360
|
+
handler: createEventSourceResponseHandler(openaiCompletionStreamChunkSchema),
|
341
361
|
},
|
342
362
|
};
|
343
|
-
const textResponseStreamEventSchema = new ZodSchema(z.object({
|
344
|
-
choices: z.array(z.object({
|
345
|
-
text: z.string(),
|
346
|
-
finish_reason: z
|
347
|
-
.enum(["stop", "length", "content_filter"])
|
348
|
-
.optional()
|
349
|
-
.nullable(),
|
350
|
-
index: z.number(),
|
351
|
-
})),
|
352
|
-
created: z.number(),
|
353
|
-
id: z.string(),
|
354
|
-
model: z.string(),
|
355
|
-
system_fingerprint: z.string().optional(),
|
356
|
-
object: z.literal("text_completion"),
|
357
|
-
}));
|
358
|
-
async function createOpenAITextFullDeltaIterableQueue(stream) {
|
359
|
-
const queue = new AsyncQueue();
|
360
|
-
const streamDelta = [];
|
361
|
-
// process the stream asynchonously (no 'await' on purpose):
|
362
|
-
parseEventSourceStream({ stream })
|
363
|
-
.then(async (events) => {
|
364
|
-
try {
|
365
|
-
for await (const event of events) {
|
366
|
-
const data = event.data;
|
367
|
-
if (data === "[DONE]") {
|
368
|
-
queue.close();
|
369
|
-
return;
|
370
|
-
}
|
371
|
-
const eventData = parseJSON({
|
372
|
-
text: data,
|
373
|
-
schema: textResponseStreamEventSchema,
|
374
|
-
});
|
375
|
-
for (let i = 0; i < eventData.choices.length; i++) {
|
376
|
-
const eventChoice = eventData.choices[i];
|
377
|
-
const delta = eventChoice.text;
|
378
|
-
if (streamDelta[i] == null) {
|
379
|
-
streamDelta[i] = {
|
380
|
-
content: "",
|
381
|
-
isComplete: false,
|
382
|
-
delta: "",
|
383
|
-
};
|
384
|
-
}
|
385
|
-
const choice = streamDelta[i];
|
386
|
-
choice.delta = delta;
|
387
|
-
if (eventChoice.finish_reason != null) {
|
388
|
-
choice.isComplete = true;
|
389
|
-
}
|
390
|
-
choice.content += delta;
|
391
|
-
}
|
392
|
-
// Since we're mutating the choices array in an async scenario,
|
393
|
-
// we need to make a deep copy:
|
394
|
-
const streamDeltaDeepCopy = JSON.parse(JSON.stringify(streamDelta));
|
395
|
-
queue.push({
|
396
|
-
type: "delta",
|
397
|
-
fullDelta: streamDeltaDeepCopy,
|
398
|
-
valueDelta: streamDeltaDeepCopy[0].delta,
|
399
|
-
});
|
400
|
-
}
|
401
|
-
}
|
402
|
-
catch (error) {
|
403
|
-
queue.push({ type: "error", error });
|
404
|
-
queue.close();
|
405
|
-
return;
|
406
|
-
}
|
407
|
-
})
|
408
|
-
.catch((error) => {
|
409
|
-
queue.push({ type: "error", error });
|
410
|
-
queue.close();
|
411
|
-
return;
|
412
|
-
});
|
413
|
-
return queue;
|
414
|
-
}
|
@@ -0,0 +1,53 @@
|
|
1
|
+
"use strict";
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
3
|
+
const streamText_js_1 = require("../../model-function/generate-text/streamText.cjs");
|
4
|
+
const StreamingTestServer_js_1 = require("../../test/StreamingTestServer.cjs");
|
5
|
+
const arrayFromAsync_js_1 = require("../../test/arrayFromAsync.cjs");
|
6
|
+
const OpenAIApiConfiguration_js_1 = require("./OpenAIApiConfiguration.cjs");
|
7
|
+
const OpenAICompletionModel_js_1 = require("./OpenAICompletionModel.cjs");
|
8
|
+
describe("streamText", () => {
|
9
|
+
const server = new StreamingTestServer_js_1.StreamingTestServer("https://api.openai.com/v1/completions");
|
10
|
+
server.setupTestEnvironment();
|
11
|
+
it("should return only values from the first choice when using streamText", async () => {
|
12
|
+
server.responseChunks = [
|
13
|
+
`data: {"id":"cmpl-8ZNls6dH7X2jUAJbY5joSWF9L0AD3","object":"text_completion","created":1703443548,` +
|
14
|
+
`"choices":[{"text":"Hello","index":0,"logprobs":null,"finish_reason":null}],"model":"gpt-3.5-turbo-instruct"}\n\n`,
|
15
|
+
`data: {"id":"cmpl-8ZNls6dH7X2jUAJbY5joSWF9L0AD3","object":"text_completion","created":1703443548,` +
|
16
|
+
`"choices":[{"text":", ","index":0,"logprobs":null,"finish_reason":null}],"model":"gpt-3.5-turbo-instruct"}\n\n`,
|
17
|
+
`data: {"id":"cmpl-8ZNls6dH7X2jUAJbY5joSWF9L0AD3","object":"text_completion","created":1703443548,` +
|
18
|
+
`"choices":[{"text":"world!","index":0,"logprobs":null,"finish_reason":null}],"model":"gpt-3.5-turbo-instruct"}\n\n`,
|
19
|
+
`data: {"id":"cmpl-8ZNls6dH7X2jUAJbY5joSWF9L0AD3","object":"text_completion","created":1703443548,` +
|
20
|
+
`"choices":[{"text":"","index":0,"logprobs":null,"finish_reason":"length"}],"model":"gpt-3.5-turbo-instruct"}\n\n`,
|
21
|
+
"data: [DONE]\n\n",
|
22
|
+
];
|
23
|
+
const stream = await (0, streamText_js_1.streamText)(new OpenAICompletionModel_js_1.OpenAICompletionModel({
|
24
|
+
api: new OpenAIApiConfiguration_js_1.OpenAIApiConfiguration({ apiKey: "test-key" }),
|
25
|
+
model: "gpt-3.5-turbo-instruct",
|
26
|
+
}), "hello");
|
27
|
+
// note: space moved to last chunk bc of trimming
|
28
|
+
expect(await (0, arrayFromAsync_js_1.arrayFromAsync)(stream)).toStrictEqual([
|
29
|
+
"Hello",
|
30
|
+
",",
|
31
|
+
" world!",
|
32
|
+
]);
|
33
|
+
});
|
34
|
+
it("should return only values from the first choice when using streamText", async () => {
|
35
|
+
server.responseChunks = [
|
36
|
+
`data: {"id":"cmpl-8ZNls6dH7X2jUAJbY5joSWF9L0AD3","object":"text_completion","created":1703443548,` +
|
37
|
+
`"choices":[{"text":"A","index":0,"logprobs":null,"finish_reason":null}],"model":"gpt-3.5-turbo-instruct"}\n\n`,
|
38
|
+
`data: {"id":"cmpl-8ZNls6dH7X2jUAJbY5joSWF9L0AD3","object":"text_completion","created":1703443548,` +
|
39
|
+
`"choices":[{"text":"B","index":1,"logprobs":null,"finish_reason":null}],"model":"gpt-3.5-turbo-instruct"}\n\n`,
|
40
|
+
`data: {"id":"cmpl-8ZNls6dH7X2jUAJbY5joSWF9L0AD3","object":"text_completion","created":1703443548,` +
|
41
|
+
`"choices":[{"text":"","index":0,"logprobs":null,"finish_reason":"length"}],"model":"gpt-3.5-turbo-instruct"}\n\n`,
|
42
|
+
`data: {"id":"cmpl-8ZNls6dH7X2jUAJbY5joSWF9L0AD3","object":"text_completion","created":1703443548,` +
|
43
|
+
`"choices":[{"text":"","index":1,"logprobs":null,"finish_reason":"length"}],"model":"gpt-3.5-turbo-instruct"}\n\n`,
|
44
|
+
"data: [DONE]\n\n",
|
45
|
+
];
|
46
|
+
const stream = await (0, streamText_js_1.streamText)(new OpenAICompletionModel_js_1.OpenAICompletionModel({
|
47
|
+
api: new OpenAIApiConfiguration_js_1.OpenAIApiConfiguration({ apiKey: "test-key" }),
|
48
|
+
model: "gpt-3.5-turbo-instruct",
|
49
|
+
numberOfGenerations: 2,
|
50
|
+
}), "test prompt");
|
51
|
+
expect(await (0, arrayFromAsync_js_1.arrayFromAsync)(stream)).toStrictEqual(["A"]);
|
52
|
+
});
|
53
|
+
});
|
@@ -0,0 +1 @@
|
|
1
|
+
export {};
|
@@ -0,0 +1,51 @@
|
|
1
|
+
import { streamText } from "../../model-function/generate-text/streamText.js";
|
2
|
+
import { StreamingTestServer } from "../../test/StreamingTestServer.js";
|
3
|
+
import { arrayFromAsync } from "../../test/arrayFromAsync.js";
|
4
|
+
import { OpenAIApiConfiguration } from "./OpenAIApiConfiguration.js";
|
5
|
+
import { OpenAICompletionModel } from "./OpenAICompletionModel.js";
|
6
|
+
describe("streamText", () => {
|
7
|
+
const server = new StreamingTestServer("https://api.openai.com/v1/completions");
|
8
|
+
server.setupTestEnvironment();
|
9
|
+
it("should return only values from the first choice when using streamText", async () => {
|
10
|
+
server.responseChunks = [
|
11
|
+
`data: {"id":"cmpl-8ZNls6dH7X2jUAJbY5joSWF9L0AD3","object":"text_completion","created":1703443548,` +
|
12
|
+
`"choices":[{"text":"Hello","index":0,"logprobs":null,"finish_reason":null}],"model":"gpt-3.5-turbo-instruct"}\n\n`,
|
13
|
+
`data: {"id":"cmpl-8ZNls6dH7X2jUAJbY5joSWF9L0AD3","object":"text_completion","created":1703443548,` +
|
14
|
+
`"choices":[{"text":", ","index":0,"logprobs":null,"finish_reason":null}],"model":"gpt-3.5-turbo-instruct"}\n\n`,
|
15
|
+
`data: {"id":"cmpl-8ZNls6dH7X2jUAJbY5joSWF9L0AD3","object":"text_completion","created":1703443548,` +
|
16
|
+
`"choices":[{"text":"world!","index":0,"logprobs":null,"finish_reason":null}],"model":"gpt-3.5-turbo-instruct"}\n\n`,
|
17
|
+
`data: {"id":"cmpl-8ZNls6dH7X2jUAJbY5joSWF9L0AD3","object":"text_completion","created":1703443548,` +
|
18
|
+
`"choices":[{"text":"","index":0,"logprobs":null,"finish_reason":"length"}],"model":"gpt-3.5-turbo-instruct"}\n\n`,
|
19
|
+
"data: [DONE]\n\n",
|
20
|
+
];
|
21
|
+
const stream = await streamText(new OpenAICompletionModel({
|
22
|
+
api: new OpenAIApiConfiguration({ apiKey: "test-key" }),
|
23
|
+
model: "gpt-3.5-turbo-instruct",
|
24
|
+
}), "hello");
|
25
|
+
// note: space moved to last chunk bc of trimming
|
26
|
+
expect(await arrayFromAsync(stream)).toStrictEqual([
|
27
|
+
"Hello",
|
28
|
+
",",
|
29
|
+
" world!",
|
30
|
+
]);
|
31
|
+
});
|
32
|
+
it("should return only values from the first choice when using streamText", async () => {
|
33
|
+
server.responseChunks = [
|
34
|
+
`data: {"id":"cmpl-8ZNls6dH7X2jUAJbY5joSWF9L0AD3","object":"text_completion","created":1703443548,` +
|
35
|
+
`"choices":[{"text":"A","index":0,"logprobs":null,"finish_reason":null}],"model":"gpt-3.5-turbo-instruct"}\n\n`,
|
36
|
+
`data: {"id":"cmpl-8ZNls6dH7X2jUAJbY5joSWF9L0AD3","object":"text_completion","created":1703443548,` +
|
37
|
+
`"choices":[{"text":"B","index":1,"logprobs":null,"finish_reason":null}],"model":"gpt-3.5-turbo-instruct"}\n\n`,
|
38
|
+
`data: {"id":"cmpl-8ZNls6dH7X2jUAJbY5joSWF9L0AD3","object":"text_completion","created":1703443548,` +
|
39
|
+
`"choices":[{"text":"","index":0,"logprobs":null,"finish_reason":"length"}],"model":"gpt-3.5-turbo-instruct"}\n\n`,
|
40
|
+
`data: {"id":"cmpl-8ZNls6dH7X2jUAJbY5joSWF9L0AD3","object":"text_completion","created":1703443548,` +
|
41
|
+
`"choices":[{"text":"","index":1,"logprobs":null,"finish_reason":"length"}],"model":"gpt-3.5-turbo-instruct"}\n\n`,
|
42
|
+
"data: [DONE]\n\n",
|
43
|
+
];
|
44
|
+
const stream = await streamText(new OpenAICompletionModel({
|
45
|
+
api: new OpenAIApiConfiguration({ apiKey: "test-key" }),
|
46
|
+
model: "gpt-3.5-turbo-instruct",
|
47
|
+
numberOfGenerations: 2,
|
48
|
+
}), "test prompt");
|
49
|
+
expect(await arrayFromAsync(stream)).toStrictEqual(["A"]);
|
50
|
+
});
|
51
|
+
});
|
@@ -6,7 +6,7 @@ const OpenAIImageGenerationModel_js_1 = require("./OpenAIImageGenerationModel.cj
|
|
6
6
|
const OpenAISpeechModel_js_1 = require("./OpenAISpeechModel.cjs");
|
7
7
|
const OpenAITextEmbeddingModel_js_1 = require("./OpenAITextEmbeddingModel.cjs");
|
8
8
|
const OpenAITranscriptionModel_js_1 = require("./OpenAITranscriptionModel.cjs");
|
9
|
-
const OpenAIChatModel_js_1 = require("./
|
9
|
+
const OpenAIChatModel_js_1 = require("./OpenAIChatModel.cjs");
|
10
10
|
class OpenAICostCalculator {
|
11
11
|
constructor() {
|
12
12
|
Object.defineProperty(this, "provider", {
|
@@ -3,7 +3,7 @@ import { calculateOpenAIImageGenerationCostInMillicents, } from "./OpenAIImageGe
|
|
3
3
|
import { calculateOpenAISpeechCostInMillicents, } from "./OpenAISpeechModel.js";
|
4
4
|
import { calculateOpenAIEmbeddingCostInMillicents, isOpenAIEmbeddingModel, } from "./OpenAITextEmbeddingModel.js";
|
5
5
|
import { calculateOpenAITranscriptionCostInMillicents, } from "./OpenAITranscriptionModel.js";
|
6
|
-
import { calculateOpenAIChatCostInMillicents, isOpenAIChatModel, } from "./
|
6
|
+
import { calculateOpenAIChatCostInMillicents, isOpenAIChatModel, } from "./OpenAIChatModel.js";
|
7
7
|
export class OpenAICostCalculator {
|
8
8
|
constructor() {
|
9
9
|
Object.defineProperty(this, "provider", {
|