modelfusion 0.104.0 → 0.105.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (164) hide show
  1. package/CHANGELOG.md +49 -0
  2. package/model-function/Delta.d.ts +1 -2
  3. package/model-function/executeStreamCall.cjs +6 -4
  4. package/model-function/executeStreamCall.d.ts +2 -2
  5. package/model-function/executeStreamCall.js +6 -4
  6. package/model-function/generate-speech/streamSpeech.cjs +1 -2
  7. package/model-function/generate-speech/streamSpeech.js +1 -2
  8. package/model-function/generate-structure/StructureFromTextStreamingModel.cjs +25 -29
  9. package/model-function/generate-structure/StructureFromTextStreamingModel.d.ts +3 -1
  10. package/model-function/generate-structure/StructureFromTextStreamingModel.js +25 -29
  11. package/model-function/generate-structure/StructureGenerationModel.d.ts +2 -0
  12. package/model-function/generate-structure/streamStructure.cjs +7 -8
  13. package/model-function/generate-structure/streamStructure.d.ts +1 -1
  14. package/model-function/generate-structure/streamStructure.js +7 -8
  15. package/model-function/generate-text/PromptTemplateFullTextModel.cjs +35 -0
  16. package/model-function/generate-text/PromptTemplateFullTextModel.d.ts +41 -0
  17. package/model-function/generate-text/PromptTemplateFullTextModel.js +31 -0
  18. package/model-function/generate-text/PromptTemplateTextStreamingModel.cjs +3 -0
  19. package/model-function/generate-text/PromptTemplateTextStreamingModel.d.ts +2 -1
  20. package/model-function/generate-text/PromptTemplateTextStreamingModel.js +3 -0
  21. package/model-function/generate-text/TextGenerationModel.d.ts +2 -1
  22. package/model-function/generate-text/index.cjs +1 -0
  23. package/model-function/generate-text/index.d.ts +1 -0
  24. package/model-function/generate-text/index.js +1 -0
  25. package/model-function/generate-text/prompt-template/AlpacaPromptTemplate.cjs +2 -2
  26. package/model-function/generate-text/prompt-template/AlpacaPromptTemplate.js +1 -1
  27. package/model-function/generate-text/prompt-template/ChatMLPromptTemplate.cjs +8 -5
  28. package/model-function/generate-text/prompt-template/ChatMLPromptTemplate.js +7 -4
  29. package/model-function/generate-text/prompt-template/ChatPrompt.cjs +42 -0
  30. package/model-function/generate-text/prompt-template/ChatPrompt.d.ts +27 -5
  31. package/model-function/generate-text/prompt-template/ChatPrompt.js +41 -1
  32. package/model-function/generate-text/prompt-template/{Content.cjs → ContentPart.cjs} +1 -1
  33. package/model-function/generate-text/prompt-template/ContentPart.d.ts +30 -0
  34. package/model-function/generate-text/prompt-template/{Content.js → ContentPart.js} +1 -1
  35. package/model-function/generate-text/prompt-template/InstructionPrompt.d.ts +3 -2
  36. package/model-function/generate-text/prompt-template/Llama2PromptTemplate.cjs +7 -4
  37. package/model-function/generate-text/prompt-template/Llama2PromptTemplate.js +5 -2
  38. package/model-function/generate-text/prompt-template/NeuralChatPromptTemplate.cjs +8 -4
  39. package/model-function/generate-text/prompt-template/NeuralChatPromptTemplate.js +6 -2
  40. package/model-function/generate-text/prompt-template/TextPromptTemplate.cjs +8 -4
  41. package/model-function/generate-text/prompt-template/TextPromptTemplate.js +6 -2
  42. package/model-function/generate-text/prompt-template/VicunaPromptTemplate.cjs +7 -3
  43. package/model-function/generate-text/prompt-template/VicunaPromptTemplate.js +6 -2
  44. package/model-function/generate-text/prompt-template/index.cjs +1 -1
  45. package/model-function/generate-text/prompt-template/index.d.ts +1 -1
  46. package/model-function/generate-text/prompt-template/index.js +1 -1
  47. package/model-function/generate-text/streamText.cjs +27 -28
  48. package/model-function/generate-text/streamText.d.ts +1 -0
  49. package/model-function/generate-text/streamText.js +27 -28
  50. package/model-provider/anthropic/AnthropicPromptTemplate.cjs +7 -3
  51. package/model-provider/anthropic/AnthropicPromptTemplate.js +5 -1
  52. package/model-provider/anthropic/AnthropicTextGenerationModel.cjs +8 -14
  53. package/model-provider/anthropic/AnthropicTextGenerationModel.d.ts +11 -2
  54. package/model-provider/anthropic/AnthropicTextGenerationModel.js +8 -14
  55. package/model-provider/anthropic/AnthropicTextGenerationModel.test.cjs +44 -0
  56. package/model-provider/anthropic/AnthropicTextGenerationModel.test.js +42 -0
  57. package/model-provider/cohere/CohereTextGenerationModel.cjs +6 -44
  58. package/model-provider/cohere/CohereTextGenerationModel.d.ts +45 -11
  59. package/model-provider/cohere/CohereTextGenerationModel.js +7 -45
  60. package/model-provider/cohere/CohereTextGenerationModel.test.cjs +33 -0
  61. package/model-provider/cohere/CohereTextGenerationModel.test.d.ts +1 -0
  62. package/model-provider/cohere/CohereTextGenerationModel.test.js +31 -0
  63. package/model-provider/elevenlabs/ElevenLabsSpeechModel.cjs +1 -2
  64. package/model-provider/elevenlabs/ElevenLabsSpeechModel.js +1 -2
  65. package/model-provider/llamacpp/LlamaCppBakLLaVA1PromptTemplate.cjs +6 -1
  66. package/model-provider/llamacpp/LlamaCppBakLLaVA1PromptTemplate.js +6 -1
  67. package/model-provider/llamacpp/LlamaCppTextGenerationModel.cjs +7 -14
  68. package/model-provider/llamacpp/LlamaCppTextGenerationModel.d.ts +157 -6
  69. package/model-provider/llamacpp/LlamaCppTextGenerationModel.js +8 -15
  70. package/model-provider/llamacpp/LlamaCppTextGenerationModel.test.cjs +37 -0
  71. package/model-provider/llamacpp/LlamaCppTextGenerationModel.test.d.ts +1 -0
  72. package/model-provider/llamacpp/LlamaCppTextGenerationModel.test.js +35 -0
  73. package/model-provider/mistral/MistralChatModel.cjs +30 -104
  74. package/model-provider/mistral/MistralChatModel.d.ts +47 -14
  75. package/model-provider/mistral/MistralChatModel.js +30 -104
  76. package/model-provider/mistral/MistralChatModel.test.cjs +51 -0
  77. package/model-provider/mistral/MistralChatModel.test.d.ts +1 -0
  78. package/model-provider/mistral/MistralChatModel.test.js +49 -0
  79. package/model-provider/mistral/MistralPromptTemplate.cjs +11 -4
  80. package/model-provider/mistral/MistralPromptTemplate.js +9 -2
  81. package/model-provider/ollama/OllamaChatModel.cjs +7 -43
  82. package/model-provider/ollama/OllamaChatModel.d.ts +61 -9
  83. package/model-provider/ollama/OllamaChatModel.js +7 -43
  84. package/model-provider/ollama/OllamaChatModel.test.cjs +27 -0
  85. package/model-provider/ollama/OllamaChatModel.test.d.ts +1 -0
  86. package/model-provider/ollama/OllamaChatModel.test.js +25 -0
  87. package/model-provider/ollama/OllamaChatPromptTemplate.cjs +34 -4
  88. package/model-provider/ollama/OllamaChatPromptTemplate.js +34 -4
  89. package/model-provider/ollama/OllamaCompletionModel.cjs +22 -43
  90. package/model-provider/ollama/OllamaCompletionModel.d.ts +65 -9
  91. package/model-provider/ollama/OllamaCompletionModel.js +23 -44
  92. package/model-provider/ollama/OllamaCompletionModel.test.cjs +101 -13
  93. package/model-provider/ollama/OllamaCompletionModel.test.js +78 -13
  94. package/model-provider/openai/{chat/AbstractOpenAIChatModel.cjs → AbstractOpenAIChatModel.cjs} +71 -15
  95. package/model-provider/openai/{chat/AbstractOpenAIChatModel.d.ts → AbstractOpenAIChatModel.d.ts} +273 -19
  96. package/model-provider/openai/{chat/AbstractOpenAIChatModel.js → AbstractOpenAIChatModel.js} +71 -15
  97. package/model-provider/openai/{chat/OpenAIChatFunctionCallStructureGenerationModel.cjs → OpenAIChatFunctionCallStructureGenerationModel.cjs} +18 -2
  98. package/model-provider/openai/{chat/OpenAIChatFunctionCallStructureGenerationModel.d.ts → OpenAIChatFunctionCallStructureGenerationModel.d.ts} +41 -11
  99. package/model-provider/openai/{chat/OpenAIChatFunctionCallStructureGenerationModel.js → OpenAIChatFunctionCallStructureGenerationModel.js} +18 -2
  100. package/model-provider/openai/{chat/OpenAIChatMessage.d.ts → OpenAIChatMessage.d.ts} +3 -3
  101. package/model-provider/openai/{chat/OpenAIChatModel.cjs → OpenAIChatModel.cjs} +5 -5
  102. package/model-provider/openai/{chat/OpenAIChatModel.d.ts → OpenAIChatModel.d.ts} +12 -12
  103. package/model-provider/openai/{chat/OpenAIChatModel.js → OpenAIChatModel.js} +5 -5
  104. package/model-provider/openai/OpenAIChatModel.test.cjs +94 -0
  105. package/model-provider/openai/OpenAIChatModel.test.d.ts +1 -0
  106. package/model-provider/openai/OpenAIChatModel.test.js +92 -0
  107. package/model-provider/openai/OpenAIChatPromptTemplate.cjs +114 -0
  108. package/model-provider/openai/{chat/OpenAIChatPromptTemplate.d.ts → OpenAIChatPromptTemplate.d.ts} +3 -3
  109. package/model-provider/openai/OpenAIChatPromptTemplate.js +107 -0
  110. package/model-provider/openai/OpenAICompletionModel.cjs +32 -84
  111. package/model-provider/openai/OpenAICompletionModel.d.ts +27 -10
  112. package/model-provider/openai/OpenAICompletionModel.js +33 -85
  113. package/model-provider/openai/OpenAICompletionModel.test.cjs +53 -0
  114. package/model-provider/openai/OpenAICompletionModel.test.d.ts +1 -0
  115. package/model-provider/openai/OpenAICompletionModel.test.js +51 -0
  116. package/model-provider/openai/OpenAICostCalculator.cjs +1 -1
  117. package/model-provider/openai/OpenAICostCalculator.js +1 -1
  118. package/model-provider/openai/OpenAIFacade.cjs +2 -2
  119. package/model-provider/openai/OpenAIFacade.d.ts +3 -3
  120. package/model-provider/openai/OpenAIFacade.js +2 -2
  121. package/model-provider/openai/OpenAITranscriptionModel.d.ts +6 -6
  122. package/model-provider/openai/TikTokenTokenizer.d.ts +1 -1
  123. package/model-provider/openai/{chat/countOpenAIChatMessageTokens.cjs → countOpenAIChatMessageTokens.cjs} +2 -2
  124. package/model-provider/openai/{chat/countOpenAIChatMessageTokens.js → countOpenAIChatMessageTokens.js} +2 -2
  125. package/model-provider/openai/index.cjs +6 -6
  126. package/model-provider/openai/index.d.ts +5 -6
  127. package/model-provider/openai/index.js +5 -5
  128. package/model-provider/openai-compatible/OpenAICompatibleChatModel.cjs +4 -4
  129. package/model-provider/openai-compatible/OpenAICompatibleChatModel.d.ts +6 -6
  130. package/model-provider/openai-compatible/OpenAICompatibleChatModel.js +4 -4
  131. package/package.json +5 -5
  132. package/test/JsonTestServer.cjs +33 -0
  133. package/test/JsonTestServer.d.ts +7 -0
  134. package/test/JsonTestServer.js +29 -0
  135. package/test/StreamingTestServer.cjs +55 -0
  136. package/test/StreamingTestServer.d.ts +7 -0
  137. package/test/StreamingTestServer.js +51 -0
  138. package/test/arrayFromAsync.cjs +13 -0
  139. package/test/arrayFromAsync.d.ts +1 -0
  140. package/test/arrayFromAsync.js +9 -0
  141. package/util/streaming/createEventSourceResponseHandler.cjs +9 -0
  142. package/util/streaming/createEventSourceResponseHandler.d.ts +4 -0
  143. package/util/streaming/createEventSourceResponseHandler.js +5 -0
  144. package/util/streaming/createJsonStreamResponseHandler.cjs +9 -0
  145. package/util/streaming/createJsonStreamResponseHandler.d.ts +4 -0
  146. package/util/streaming/createJsonStreamResponseHandler.js +5 -0
  147. package/util/streaming/parseEventSourceStreamAsAsyncIterable.cjs +52 -0
  148. package/util/streaming/parseEventSourceStreamAsAsyncIterable.d.ts +6 -0
  149. package/util/streaming/parseEventSourceStreamAsAsyncIterable.js +48 -0
  150. package/util/streaming/parseJsonStreamAsAsyncIterable.cjs +21 -0
  151. package/util/streaming/parseJsonStreamAsAsyncIterable.d.ts +6 -0
  152. package/util/streaming/parseJsonStreamAsAsyncIterable.js +17 -0
  153. package/model-function/generate-text/prompt-template/Content.d.ts +0 -25
  154. package/model-provider/openai/chat/OpenAIChatModel.test.cjs +0 -61
  155. package/model-provider/openai/chat/OpenAIChatModel.test.js +0 -59
  156. package/model-provider/openai/chat/OpenAIChatPromptTemplate.cjs +0 -70
  157. package/model-provider/openai/chat/OpenAIChatPromptTemplate.js +0 -63
  158. package/model-provider/openai/chat/OpenAIChatStreamIterable.cjs +0 -156
  159. package/model-provider/openai/chat/OpenAIChatStreamIterable.d.ts +0 -19
  160. package/model-provider/openai/chat/OpenAIChatStreamIterable.js +0 -152
  161. /package/model-provider/{openai/chat/OpenAIChatModel.test.d.ts → anthropic/AnthropicTextGenerationModel.test.d.ts} +0 -0
  162. /package/model-provider/openai/{chat/OpenAIChatMessage.cjs → OpenAIChatMessage.cjs} +0 -0
  163. /package/model-provider/openai/{chat/OpenAIChatMessage.js → OpenAIChatMessage.js} +0 -0
  164. /package/model-provider/openai/{chat/countOpenAIChatMessageTokens.d.ts → countOpenAIChatMessageTokens.d.ts} +0 -0
@@ -1,13 +1,13 @@
1
- import { StructureFromTextPromptTemplate } from "../../../model-function/generate-structure/StructureFromTextPromptTemplate.js";
2
- import { StructureFromTextStreamingModel } from "../../../model-function/generate-structure/StructureFromTextStreamingModel.js";
3
- import { PromptTemplateTextStreamingModel } from "../../../model-function/generate-text/PromptTemplateTextStreamingModel.js";
4
- import { TextGenerationModelSettings, TextStreamingModel } from "../../../model-function/generate-text/TextGenerationModel.js";
5
- import { TextGenerationPromptTemplate } from "../../../model-function/generate-text/TextGenerationPromptTemplate.js";
6
- import { ToolCallGenerationModel } from "../../../tool/generate-tool-call/ToolCallGenerationModel.js";
7
- import { ToolCallsOrTextGenerationModel } from "../../../tool/generate-tool-calls-or-text/ToolCallsOrTextGenerationModel.js";
8
- import { TikTokenTokenizer } from "../TikTokenTokenizer.js";
1
+ import { StructureFromTextPromptTemplate } from "../../model-function/generate-structure/StructureFromTextPromptTemplate.js";
2
+ import { StructureFromTextStreamingModel } from "../../model-function/generate-structure/StructureFromTextStreamingModel.js";
3
+ import { PromptTemplateFullTextModel } from "../../model-function/generate-text/PromptTemplateFullTextModel.js";
4
+ import { TextGenerationModelSettings, TextStreamingModel } from "../../model-function/generate-text/TextGenerationModel.js";
5
+ import { TextGenerationPromptTemplate } from "../../model-function/generate-text/TextGenerationPromptTemplate.js";
6
+ import { ToolCallGenerationModel } from "../../tool/generate-tool-call/ToolCallGenerationModel.js";
7
+ import { ToolCallsOrTextGenerationModel } from "../../tool/generate-tool-calls-or-text/ToolCallsOrTextGenerationModel.js";
9
8
  import { AbstractOpenAIChatCallSettings, AbstractOpenAIChatModel, OpenAIChatPrompt, OpenAIChatResponse } from "./AbstractOpenAIChatModel.js";
10
9
  import { OpenAIChatFunctionCallStructureGenerationModel } from "./OpenAIChatFunctionCallStructureGenerationModel.js";
10
+ import { TikTokenTokenizer } from "./TikTokenTokenizer.js";
11
11
  export declare const OPENAI_CHAT_MODELS: {
12
12
  "gpt-4": {
13
13
  contextWindowSize: number;
@@ -147,16 +147,16 @@ export declare class OpenAIChatModel extends AbstractOpenAIChatModel<OpenAIChatS
147
147
  /**
148
148
  * Returns this model with a text prompt template.
149
149
  */
150
- withTextPrompt(): PromptTemplateTextStreamingModel<string, OpenAIChatPrompt, OpenAIChatSettings, this>;
150
+ withTextPrompt(): PromptTemplateFullTextModel<string, OpenAIChatPrompt, OpenAIChatSettings, this>;
151
151
  /**
152
152
  * Returns this model with an instruction prompt template.
153
153
  */
154
- withInstructionPrompt(): PromptTemplateTextStreamingModel<import("../../../index.js").InstructionPrompt, OpenAIChatPrompt, OpenAIChatSettings, this>;
154
+ withInstructionPrompt(): PromptTemplateFullTextModel<import("../../index.js").InstructionPrompt, OpenAIChatPrompt, OpenAIChatSettings, this>;
155
155
  /**
156
156
  * Returns this model with a chat prompt template.
157
157
  */
158
- withChatPrompt(): PromptTemplateTextStreamingModel<import("../../../index.js").ChatPrompt, OpenAIChatPrompt, OpenAIChatSettings, this>;
159
- withPromptTemplate<INPUT_PROMPT>(promptTemplate: TextGenerationPromptTemplate<INPUT_PROMPT, OpenAIChatPrompt>): PromptTemplateTextStreamingModel<INPUT_PROMPT, OpenAIChatPrompt, OpenAIChatSettings, this>;
158
+ withChatPrompt(): PromptTemplateFullTextModel<import("../../index.js").ChatPrompt, OpenAIChatPrompt, OpenAIChatSettings, this>;
159
+ withPromptTemplate<INPUT_PROMPT>(promptTemplate: TextGenerationPromptTemplate<INPUT_PROMPT, OpenAIChatPrompt>): PromptTemplateFullTextModel<INPUT_PROMPT, OpenAIChatPrompt, OpenAIChatSettings, this>;
160
160
  withSettings(additionalSettings: Partial<OpenAIChatSettings>): this;
161
161
  }
162
162
  export {};
@@ -1,10 +1,10 @@
1
- import { StructureFromTextStreamingModel } from "../../../model-function/generate-structure/StructureFromTextStreamingModel.js";
2
- import { PromptTemplateTextStreamingModel } from "../../../model-function/generate-text/PromptTemplateTextStreamingModel.js";
3
- import { textGenerationModelProperties, } from "../../../model-function/generate-text/TextGenerationModel.js";
4
- import { TikTokenTokenizer } from "../TikTokenTokenizer.js";
1
+ import { StructureFromTextStreamingModel } from "../../model-function/generate-structure/StructureFromTextStreamingModel.js";
2
+ import { PromptTemplateFullTextModel } from "../../model-function/generate-text/PromptTemplateFullTextModel.js";
3
+ import { textGenerationModelProperties, } from "../../model-function/generate-text/TextGenerationModel.js";
5
4
  import { AbstractOpenAIChatModel, } from "./AbstractOpenAIChatModel.js";
6
5
  import { OpenAIChatFunctionCallStructureGenerationModel } from "./OpenAIChatFunctionCallStructureGenerationModel.js";
7
6
  import { chat, identity, instruction, text, } from "./OpenAIChatPromptTemplate.js";
7
+ import { TikTokenTokenizer } from "./TikTokenTokenizer.js";
8
8
  import { countOpenAIChatPromptTokens } from "./countOpenAIChatMessageTokens.js";
9
9
  /*
10
10
  * Available OpenAI chat models, their token limits, and pricing.
@@ -237,7 +237,7 @@ export class OpenAIChatModel extends AbstractOpenAIChatModel {
237
237
  return this.withPromptTemplate(chat());
238
238
  }
239
239
  withPromptTemplate(promptTemplate) {
240
- return new PromptTemplateTextStreamingModel({
240
+ return new PromptTemplateFullTextModel({
241
241
  model: this.withSettings({
242
242
  stopSequences: [
243
243
  ...(this.settings.stopSequences ?? []),
@@ -0,0 +1,94 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ const zod_1 = require("zod");
4
+ const ZodSchema_js_1 = require("../../core/schema/ZodSchema.cjs");
5
+ const streamStructure_js_1 = require("../../model-function/generate-structure/streamStructure.cjs");
6
+ const streamText_js_1 = require("../../model-function/generate-text/streamText.cjs");
7
+ const StreamingTestServer_js_1 = require("../../test/StreamingTestServer.cjs");
8
+ const arrayFromAsync_js_1 = require("../../test/arrayFromAsync.cjs");
9
+ const OpenAIApiConfiguration_js_1 = require("./OpenAIApiConfiguration.cjs");
10
+ const OpenAIChatModel_js_1 = require("./OpenAIChatModel.cjs");
11
+ describe("streamText", () => {
12
+ const server = new StreamingTestServer_js_1.StreamingTestServer("https://api.openai.com/v1/chat/completions");
13
+ server.setupTestEnvironment();
14
+ it("should return only values from the first choice when using streamText", async () => {
15
+ server.responseChunks = [
16
+ `data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1702657020,"model":"gpt-3.5-turbo-0613",` +
17
+ `"system_fingerprint":null,"choices":[{"index":0,"delta":{"role":"assistant","content":""},"finish_reason":null}]}\n\n`,
18
+ `data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1702657020,"model":"gpt-3.5-turbo-0613",` +
19
+ `"system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"A"},"finish_reason":null}]}\n\n`,
20
+ `data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1702657020,"model":"gpt-3.5-turbo-0613",` +
21
+ `"system_fingerprint":null,"choices":[{"index":1,"delta":{"role":"assistant","content":""},"finish_reason":null}]}\n\n`,
22
+ `data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1702657020,"model":"gpt-3.5-turbo-0613",` +
23
+ `"system_fingerprint":null,"choices":[{"index":1,"delta":{"content":"B"},"finish_reason":null}]}\n\n`,
24
+ `data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1702657020,"model":"gpt-3.5-turbo-0613",` +
25
+ `"system_fingerprint":null,"choices":[{"index":0,"delta":{},"finish_reason":"stop"}]}\n\n`,
26
+ `data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1702657020,"model":"gpt-3.5-turbo-0613",` +
27
+ `"system_fingerprint":null,"choices":[{"index":1,"delta":{},"finish_reason":"stop"}]}\n\n`,
28
+ "data: [DONE]\n\n",
29
+ ];
30
+ const stream = await (0, streamText_js_1.streamText)(new OpenAIChatModel_js_1.OpenAIChatModel({
31
+ api: new OpenAIApiConfiguration_js_1.OpenAIApiConfiguration({ apiKey: "test-key" }),
32
+ model: "gpt-3.5-turbo",
33
+ numberOfGenerations: 2,
34
+ }).withTextPrompt(), "test prompt");
35
+ expect(await (0, arrayFromAsync_js_1.arrayFromAsync)(stream)).toStrictEqual(["A"]);
36
+ });
37
+ });
38
+ describe("streamStructure", () => {
39
+ const server = new StreamingTestServer_js_1.StreamingTestServer("https://api.openai.com/v1/chat/completions");
40
+ server.setupTestEnvironment();
41
+ it("should return a text stream", async () => {
42
+ server.responseChunks = [
43
+ `data: {"id":"chatcmpl-8ZhZtizjTCGmZaPRwyUiuDJ1DYUD0","object":"chat.completion.chunk",` +
44
+ `"created":1703519685,"model":"gpt-3.5-turbo-0613","system_fingerprint":null,` +
45
+ `"choices":[{"index":0,"delta":{"role":"assistant","content":null,` +
46
+ `"function_call":{"name":"generateCharacter","arguments":""}},"logprobs":null,"finish_reason":null}]}\n\n`,
47
+ `data: {"id":"chatcmpl-8ZhZtizjTCGmZaPRwyUiuDJ1DYUD0","object":"chat.completion.chunk",` +
48
+ `"created":1703519685,"model":"gpt-3.5-turbo-0613","system_fingerprint":null,` +
49
+ `"choices":[{"index":0,"delta":{"function_call":{"arguments":"{\\n"}},"logprobs":null,"finish_reason":null}]}\n\n`,
50
+ `data: {"id":"chatcmpl-8ZhZtizjTCGmZaPRwyUiuDJ1DYUD0","object":"chat.completion.chunk",` +
51
+ `"created":1703519685,"model":"gpt-3.5-turbo-0613","system_fingerprint":null,` +
52
+ `"choices":[{"index":0,"delta":{"function_call":{"arguments":" "}},"logprobs":null,"finish_reason":null}]}\n\n`,
53
+ `data: {"id":"chatcmpl-8ZhZtizjTCGmZaPRwyUiuDJ1DYUD0","object":"chat.completion.chunk",` +
54
+ `"created":1703519685,"model":"gpt-3.5-turbo-0613","system_fingerprint":null,` +
55
+ `"choices":[{"index":0,"delta":{"function_call":{"arguments":" \\""}},"logprobs":null,"finish_reason":null}]}\n\n`,
56
+ `data: {"id":"chatcmpl-8ZhZtizjTCGmZaPRwyUiuDJ1DYUD0","object":"chat.completion.chunk",` +
57
+ `"created":1703519685,"model":"gpt-3.5-turbo-0613","system_fingerprint":null,` +
58
+ `"choices":[{"index":0,"delta":{"function_call":{"arguments":"name"}},"logprobs":null,"finish_reason":null}]}\n\n`,
59
+ `data: {"id":"chatcmpl-8ZhZtizjTCGmZaPRwyUiuDJ1DYUD0","object":"chat.completion.chunk",` +
60
+ `"created":1703519685,"model":"gpt-3.5-turbo-0613","system_fingerprint":null,` +
61
+ `"choices":[{"index":0,"delta":{"function_call":{"arguments":"\\":\\""}},"logprobs":null,"finish_reason":null}]}\n\n`,
62
+ `data: {"id":"chatcmpl-8ZhZtizjTCGmZaPRwyUiuDJ1DYUD0","object":"chat.completion.chunk",` +
63
+ `"created":1703519685,"model":"gpt-3.5-turbo-0613","system_fingerprint":null,` +
64
+ `"choices":[{"index":0,"delta":{"function_call":{"arguments":"M"}},"logprobs":null,"finish_reason":null}]}\n\n`,
65
+ `data: {"id":"chatcmpl-8ZhZtizjTCGmZaPRwyUiuDJ1DYUD0","object":"chat.completion.chunk",` +
66
+ `"created":1703519685,"model":"gpt-3.5-turbo-0613","system_fingerprint":null,` +
67
+ `"choices":[{"index":0,"delta":{"function_call":{"arguments":"ike\\"\\n"}},"logprobs":null,"finish_reason":null}]}\n\n`,
68
+ `data: {"id":"chatcmpl-8ZhZtizjTCGmZaPRwyUiuDJ1DYUD0","object":"chat.completion.chunk",` +
69
+ `"created":1703519685,"model":"gpt-3.5-turbo-0613","system_fingerprint":null,` +
70
+ `"choices":[{"index":0,"delta":{"function_call":{"arguments":"}"}},"logprobs":null,"finish_reason":null}]}\n\n`,
71
+ `data: {"id":"chatcmpl-8ZhZtizjTCGmZaPRwyUiuDJ1DYUD0","object":"chat.completion.chunk",` +
72
+ `"created":1703519685,"model":"gpt-3.5-turbo-0613","system_fingerprint":null,` +
73
+ `"choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]}\n\n`,
74
+ `data: [DONE]\n\n`,
75
+ ];
76
+ const stream = await (0, streamStructure_js_1.streamStructure)(new OpenAIChatModel_js_1.OpenAIChatModel({
77
+ api: new OpenAIApiConfiguration_js_1.OpenAIApiConfiguration({ apiKey: "test-key" }),
78
+ model: "gpt-3.5-turbo",
79
+ })
80
+ .asFunctionCallStructureGenerationModel({
81
+ fnName: "generateCharacter",
82
+ fnDescription: "Generate character descriptions.",
83
+ })
84
+ .withTextPrompt(), (0, ZodSchema_js_1.zodSchema)(zod_1.z.object({ name: zod_1.z.string() })), "generate a name");
85
+ // note: space moved to last chunk bc of trimming
86
+ expect(await (0, arrayFromAsync_js_1.arrayFromAsync)(stream)).toStrictEqual([
87
+ { isComplete: false, value: {} },
88
+ { isComplete: false, value: { name: "" } },
89
+ { isComplete: false, value: { name: "M" } },
90
+ { isComplete: false, value: { name: "Mike" } },
91
+ { isComplete: true, value: { name: "Mike" } },
92
+ ]);
93
+ });
94
+ });
@@ -0,0 +1 @@
1
+ export {};
@@ -0,0 +1,92 @@
1
+ import { z } from "zod";
2
+ import { zodSchema } from "../../core/schema/ZodSchema.js";
3
+ import { streamStructure } from "../../model-function/generate-structure/streamStructure.js";
4
+ import { streamText } from "../../model-function/generate-text/streamText.js";
5
+ import { StreamingTestServer } from "../../test/StreamingTestServer.js";
6
+ import { arrayFromAsync } from "../../test/arrayFromAsync.js";
7
+ import { OpenAIApiConfiguration } from "./OpenAIApiConfiguration.js";
8
+ import { OpenAIChatModel } from "./OpenAIChatModel.js";
9
+ describe("streamText", () => {
10
+ const server = new StreamingTestServer("https://api.openai.com/v1/chat/completions");
11
+ server.setupTestEnvironment();
12
+ it("should return only values from the first choice when using streamText", async () => {
13
+ server.responseChunks = [
14
+ `data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1702657020,"model":"gpt-3.5-turbo-0613",` +
15
+ `"system_fingerprint":null,"choices":[{"index":0,"delta":{"role":"assistant","content":""},"finish_reason":null}]}\n\n`,
16
+ `data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1702657020,"model":"gpt-3.5-turbo-0613",` +
17
+ `"system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"A"},"finish_reason":null}]}\n\n`,
18
+ `data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1702657020,"model":"gpt-3.5-turbo-0613",` +
19
+ `"system_fingerprint":null,"choices":[{"index":1,"delta":{"role":"assistant","content":""},"finish_reason":null}]}\n\n`,
20
+ `data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1702657020,"model":"gpt-3.5-turbo-0613",` +
21
+ `"system_fingerprint":null,"choices":[{"index":1,"delta":{"content":"B"},"finish_reason":null}]}\n\n`,
22
+ `data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1702657020,"model":"gpt-3.5-turbo-0613",` +
23
+ `"system_fingerprint":null,"choices":[{"index":0,"delta":{},"finish_reason":"stop"}]}\n\n`,
24
+ `data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1702657020,"model":"gpt-3.5-turbo-0613",` +
25
+ `"system_fingerprint":null,"choices":[{"index":1,"delta":{},"finish_reason":"stop"}]}\n\n`,
26
+ "data: [DONE]\n\n",
27
+ ];
28
+ const stream = await streamText(new OpenAIChatModel({
29
+ api: new OpenAIApiConfiguration({ apiKey: "test-key" }),
30
+ model: "gpt-3.5-turbo",
31
+ numberOfGenerations: 2,
32
+ }).withTextPrompt(), "test prompt");
33
+ expect(await arrayFromAsync(stream)).toStrictEqual(["A"]);
34
+ });
35
+ });
36
+ describe("streamStructure", () => {
37
+ const server = new StreamingTestServer("https://api.openai.com/v1/chat/completions");
38
+ server.setupTestEnvironment();
39
+ it("should return a text stream", async () => {
40
+ server.responseChunks = [
41
+ `data: {"id":"chatcmpl-8ZhZtizjTCGmZaPRwyUiuDJ1DYUD0","object":"chat.completion.chunk",` +
42
+ `"created":1703519685,"model":"gpt-3.5-turbo-0613","system_fingerprint":null,` +
43
+ `"choices":[{"index":0,"delta":{"role":"assistant","content":null,` +
44
+ `"function_call":{"name":"generateCharacter","arguments":""}},"logprobs":null,"finish_reason":null}]}\n\n`,
45
+ `data: {"id":"chatcmpl-8ZhZtizjTCGmZaPRwyUiuDJ1DYUD0","object":"chat.completion.chunk",` +
46
+ `"created":1703519685,"model":"gpt-3.5-turbo-0613","system_fingerprint":null,` +
47
+ `"choices":[{"index":0,"delta":{"function_call":{"arguments":"{\\n"}},"logprobs":null,"finish_reason":null}]}\n\n`,
48
+ `data: {"id":"chatcmpl-8ZhZtizjTCGmZaPRwyUiuDJ1DYUD0","object":"chat.completion.chunk",` +
49
+ `"created":1703519685,"model":"gpt-3.5-turbo-0613","system_fingerprint":null,` +
50
+ `"choices":[{"index":0,"delta":{"function_call":{"arguments":" "}},"logprobs":null,"finish_reason":null}]}\n\n`,
51
+ `data: {"id":"chatcmpl-8ZhZtizjTCGmZaPRwyUiuDJ1DYUD0","object":"chat.completion.chunk",` +
52
+ `"created":1703519685,"model":"gpt-3.5-turbo-0613","system_fingerprint":null,` +
53
+ `"choices":[{"index":0,"delta":{"function_call":{"arguments":" \\""}},"logprobs":null,"finish_reason":null}]}\n\n`,
54
+ `data: {"id":"chatcmpl-8ZhZtizjTCGmZaPRwyUiuDJ1DYUD0","object":"chat.completion.chunk",` +
55
+ `"created":1703519685,"model":"gpt-3.5-turbo-0613","system_fingerprint":null,` +
56
+ `"choices":[{"index":0,"delta":{"function_call":{"arguments":"name"}},"logprobs":null,"finish_reason":null}]}\n\n`,
57
+ `data: {"id":"chatcmpl-8ZhZtizjTCGmZaPRwyUiuDJ1DYUD0","object":"chat.completion.chunk",` +
58
+ `"created":1703519685,"model":"gpt-3.5-turbo-0613","system_fingerprint":null,` +
59
+ `"choices":[{"index":0,"delta":{"function_call":{"arguments":"\\":\\""}},"logprobs":null,"finish_reason":null}]}\n\n`,
60
+ `data: {"id":"chatcmpl-8ZhZtizjTCGmZaPRwyUiuDJ1DYUD0","object":"chat.completion.chunk",` +
61
+ `"created":1703519685,"model":"gpt-3.5-turbo-0613","system_fingerprint":null,` +
62
+ `"choices":[{"index":0,"delta":{"function_call":{"arguments":"M"}},"logprobs":null,"finish_reason":null}]}\n\n`,
63
+ `data: {"id":"chatcmpl-8ZhZtizjTCGmZaPRwyUiuDJ1DYUD0","object":"chat.completion.chunk",` +
64
+ `"created":1703519685,"model":"gpt-3.5-turbo-0613","system_fingerprint":null,` +
65
+ `"choices":[{"index":0,"delta":{"function_call":{"arguments":"ike\\"\\n"}},"logprobs":null,"finish_reason":null}]}\n\n`,
66
+ `data: {"id":"chatcmpl-8ZhZtizjTCGmZaPRwyUiuDJ1DYUD0","object":"chat.completion.chunk",` +
67
+ `"created":1703519685,"model":"gpt-3.5-turbo-0613","system_fingerprint":null,` +
68
+ `"choices":[{"index":0,"delta":{"function_call":{"arguments":"}"}},"logprobs":null,"finish_reason":null}]}\n\n`,
69
+ `data: {"id":"chatcmpl-8ZhZtizjTCGmZaPRwyUiuDJ1DYUD0","object":"chat.completion.chunk",` +
70
+ `"created":1703519685,"model":"gpt-3.5-turbo-0613","system_fingerprint":null,` +
71
+ `"choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]}\n\n`,
72
+ `data: [DONE]\n\n`,
73
+ ];
74
+ const stream = await streamStructure(new OpenAIChatModel({
75
+ api: new OpenAIApiConfiguration({ apiKey: "test-key" }),
76
+ model: "gpt-3.5-turbo",
77
+ })
78
+ .asFunctionCallStructureGenerationModel({
79
+ fnName: "generateCharacter",
80
+ fnDescription: "Generate character descriptions.",
81
+ })
82
+ .withTextPrompt(), zodSchema(z.object({ name: z.string() })), "generate a name");
83
+ // note: space moved to last chunk bc of trimming
84
+ expect(await arrayFromAsync(stream)).toStrictEqual([
85
+ { isComplete: false, value: {} },
86
+ { isComplete: false, value: { name: "" } },
87
+ { isComplete: false, value: { name: "M" } },
88
+ { isComplete: false, value: { name: "Mike" } },
89
+ { isComplete: true, value: { name: "Mike" } },
90
+ ]);
91
+ });
92
+ });
@@ -0,0 +1,114 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.chat = exports.instruction = exports.text = exports.identity = void 0;
4
+ const OpenAIChatMessage_js_1 = require("./OpenAIChatMessage.cjs");
5
+ /**
6
+ * OpenAIMessage[] identity chat format.
7
+ */
8
+ function identity() {
9
+ return { format: (prompt) => prompt, stopSequences: [] };
10
+ }
11
+ exports.identity = identity;
12
+ /**
13
+ * Formats a text prompt as an OpenAI chat prompt.
14
+ */
15
+ function text() {
16
+ return {
17
+ format: (prompt) => [OpenAIChatMessage_js_1.OpenAIChatMessage.user(prompt)],
18
+ stopSequences: [],
19
+ };
20
+ }
21
+ exports.text = text;
22
+ /**
23
+ * Formats an instruction prompt as an OpenAI chat prompt.
24
+ */
25
+ function instruction() {
26
+ return {
27
+ format(prompt) {
28
+ const messages = [];
29
+ if (prompt.system != null) {
30
+ messages.push(OpenAIChatMessage_js_1.OpenAIChatMessage.system(prompt.system));
31
+ }
32
+ messages.push(OpenAIChatMessage_js_1.OpenAIChatMessage.user(prompt.instruction));
33
+ return messages;
34
+ },
35
+ stopSequences: [],
36
+ };
37
+ }
38
+ exports.instruction = instruction;
39
+ /**
40
+ * Formats a chat prompt as an OpenAI chat prompt.
41
+ */
42
+ function chat() {
43
+ return {
44
+ format(prompt) {
45
+ const messages = [];
46
+ if (prompt.system != null) {
47
+ messages.push(OpenAIChatMessage_js_1.OpenAIChatMessage.system(prompt.system));
48
+ }
49
+ for (const { role, content } of prompt.messages) {
50
+ switch (role) {
51
+ case "user": {
52
+ messages.push(OpenAIChatMessage_js_1.OpenAIChatMessage.user(content));
53
+ break;
54
+ }
55
+ case "assistant": {
56
+ if (typeof content === "string") {
57
+ messages.push(OpenAIChatMessage_js_1.OpenAIChatMessage.assistant(content));
58
+ }
59
+ else {
60
+ let text = "";
61
+ const toolCalls = [];
62
+ for (const part of content) {
63
+ switch (part.type) {
64
+ case "text": {
65
+ text += part.text;
66
+ break;
67
+ }
68
+ case "tool-call": {
69
+ toolCalls.push({
70
+ id: part.id,
71
+ type: "function",
72
+ function: {
73
+ name: part.name,
74
+ arguments: JSON.stringify(part.args),
75
+ },
76
+ });
77
+ break;
78
+ }
79
+ default: {
80
+ const _exhaustiveCheck = part;
81
+ throw new Error(`Unsupported part: ${_exhaustiveCheck}`);
82
+ }
83
+ }
84
+ }
85
+ messages.push({
86
+ role: "assistant",
87
+ content: text,
88
+ tool_calls: toolCalls,
89
+ });
90
+ }
91
+ break;
92
+ }
93
+ case "tool": {
94
+ for (const toolResponse of content) {
95
+ messages.push({
96
+ role: "tool",
97
+ tool_call_id: toolResponse.id,
98
+ content: JSON.stringify(toolResponse.response),
99
+ });
100
+ }
101
+ break;
102
+ }
103
+ default: {
104
+ const _exhaustiveCheck = role;
105
+ throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
106
+ }
107
+ }
108
+ }
109
+ return messages;
110
+ },
111
+ stopSequences: [],
112
+ };
113
+ }
114
+ exports.chat = chat;
@@ -1,6 +1,6 @@
1
- import { TextGenerationPromptTemplate } from "../../../model-function/generate-text/TextGenerationPromptTemplate.js";
2
- import { ChatPrompt } from "../../../model-function/generate-text/prompt-template/ChatPrompt.js";
3
- import { InstructionPrompt } from "../../../model-function/generate-text/prompt-template/InstructionPrompt.js";
1
+ import { TextGenerationPromptTemplate } from "../../model-function/generate-text/TextGenerationPromptTemplate.js";
2
+ import { ChatPrompt } from "../../model-function/generate-text/prompt-template/ChatPrompt.js";
3
+ import { InstructionPrompt } from "../../model-function/generate-text/prompt-template/InstructionPrompt.js";
4
4
  import { OpenAIChatPrompt } from "./AbstractOpenAIChatModel.js";
5
5
  /**
6
6
  * OpenAIMessage[] identity chat format.
@@ -0,0 +1,107 @@
1
+ import { OpenAIChatMessage } from "./OpenAIChatMessage.js";
2
+ /**
3
+ * OpenAIMessage[] identity chat format.
4
+ */
5
+ export function identity() {
6
+ return { format: (prompt) => prompt, stopSequences: [] };
7
+ }
8
+ /**
9
+ * Formats a text prompt as an OpenAI chat prompt.
10
+ */
11
+ export function text() {
12
+ return {
13
+ format: (prompt) => [OpenAIChatMessage.user(prompt)],
14
+ stopSequences: [],
15
+ };
16
+ }
17
+ /**
18
+ * Formats an instruction prompt as an OpenAI chat prompt.
19
+ */
20
+ export function instruction() {
21
+ return {
22
+ format(prompt) {
23
+ const messages = [];
24
+ if (prompt.system != null) {
25
+ messages.push(OpenAIChatMessage.system(prompt.system));
26
+ }
27
+ messages.push(OpenAIChatMessage.user(prompt.instruction));
28
+ return messages;
29
+ },
30
+ stopSequences: [],
31
+ };
32
+ }
33
+ /**
34
+ * Formats a chat prompt as an OpenAI chat prompt.
35
+ */
36
+ export function chat() {
37
+ return {
38
+ format(prompt) {
39
+ const messages = [];
40
+ if (prompt.system != null) {
41
+ messages.push(OpenAIChatMessage.system(prompt.system));
42
+ }
43
+ for (const { role, content } of prompt.messages) {
44
+ switch (role) {
45
+ case "user": {
46
+ messages.push(OpenAIChatMessage.user(content));
47
+ break;
48
+ }
49
+ case "assistant": {
50
+ if (typeof content === "string") {
51
+ messages.push(OpenAIChatMessage.assistant(content));
52
+ }
53
+ else {
54
+ let text = "";
55
+ const toolCalls = [];
56
+ for (const part of content) {
57
+ switch (part.type) {
58
+ case "text": {
59
+ text += part.text;
60
+ break;
61
+ }
62
+ case "tool-call": {
63
+ toolCalls.push({
64
+ id: part.id,
65
+ type: "function",
66
+ function: {
67
+ name: part.name,
68
+ arguments: JSON.stringify(part.args),
69
+ },
70
+ });
71
+ break;
72
+ }
73
+ default: {
74
+ const _exhaustiveCheck = part;
75
+ throw new Error(`Unsupported part: ${_exhaustiveCheck}`);
76
+ }
77
+ }
78
+ }
79
+ messages.push({
80
+ role: "assistant",
81
+ content: text,
82
+ tool_calls: toolCalls,
83
+ });
84
+ }
85
+ break;
86
+ }
87
+ case "tool": {
88
+ for (const toolResponse of content) {
89
+ messages.push({
90
+ role: "tool",
91
+ tool_call_id: toolResponse.id,
92
+ content: JSON.stringify(toolResponse.response),
93
+ });
94
+ }
95
+ break;
96
+ }
97
+ default: {
98
+ const _exhaustiveCheck = role;
99
+ throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
100
+ }
101
+ }
102
+ }
103
+ return messages;
104
+ },
105
+ stopSequences: [],
106
+ };
107
+ }