modelfusion 0.103.0 → 0.105.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (181) hide show
  1. package/CHANGELOG.md +56 -0
  2. package/model-function/Delta.d.ts +1 -2
  3. package/model-function/executeStreamCall.cjs +6 -4
  4. package/model-function/executeStreamCall.d.ts +2 -2
  5. package/model-function/executeStreamCall.js +6 -4
  6. package/model-function/generate-speech/streamSpeech.cjs +1 -2
  7. package/model-function/generate-speech/streamSpeech.js +1 -2
  8. package/model-function/generate-structure/StructureFromTextStreamingModel.cjs +25 -29
  9. package/model-function/generate-structure/StructureFromTextStreamingModel.d.ts +3 -1
  10. package/model-function/generate-structure/StructureFromTextStreamingModel.js +25 -29
  11. package/model-function/generate-structure/StructureGenerationModel.d.ts +2 -0
  12. package/model-function/generate-structure/streamStructure.cjs +7 -8
  13. package/model-function/generate-structure/streamStructure.d.ts +1 -1
  14. package/model-function/generate-structure/streamStructure.js +7 -8
  15. package/model-function/generate-text/PromptTemplateFullTextModel.cjs +35 -0
  16. package/model-function/generate-text/PromptTemplateFullTextModel.d.ts +41 -0
  17. package/model-function/generate-text/PromptTemplateFullTextModel.js +31 -0
  18. package/model-function/generate-text/PromptTemplateTextStreamingModel.cjs +3 -0
  19. package/model-function/generate-text/PromptTemplateTextStreamingModel.d.ts +2 -1
  20. package/model-function/generate-text/PromptTemplateTextStreamingModel.js +3 -0
  21. package/model-function/generate-text/TextGenerationModel.d.ts +2 -1
  22. package/model-function/generate-text/index.cjs +1 -0
  23. package/model-function/generate-text/index.d.ts +1 -0
  24. package/model-function/generate-text/index.js +1 -0
  25. package/model-function/generate-text/prompt-template/AlpacaPromptTemplate.cjs +2 -1
  26. package/model-function/generate-text/prompt-template/AlpacaPromptTemplate.d.ts +2 -2
  27. package/model-function/generate-text/prompt-template/AlpacaPromptTemplate.js +2 -1
  28. package/model-function/generate-text/prompt-template/ChatMLPromptTemplate.cjs +9 -5
  29. package/model-function/generate-text/prompt-template/ChatMLPromptTemplate.d.ts +4 -4
  30. package/model-function/generate-text/prompt-template/ChatMLPromptTemplate.js +9 -5
  31. package/model-function/generate-text/prompt-template/ChatPrompt.cjs +38 -20
  32. package/model-function/generate-text/prompt-template/ChatPrompt.d.ts +33 -34
  33. package/model-function/generate-text/prompt-template/ChatPrompt.js +37 -18
  34. package/model-function/generate-text/prompt-template/ContentPart.cjs +11 -0
  35. package/model-function/generate-text/prompt-template/ContentPart.d.ts +30 -0
  36. package/model-function/generate-text/prompt-template/ContentPart.js +7 -0
  37. package/model-function/generate-text/prompt-template/InstructionPrompt.d.ts +7 -22
  38. package/model-function/generate-text/prompt-template/Llama2PromptTemplate.cjs +40 -6
  39. package/model-function/generate-text/prompt-template/Llama2PromptTemplate.d.ts +16 -4
  40. package/model-function/generate-text/prompt-template/Llama2PromptTemplate.js +38 -5
  41. package/model-function/generate-text/prompt-template/NeuralChatPromptTemplate.cjs +10 -5
  42. package/model-function/generate-text/prompt-template/NeuralChatPromptTemplate.d.ts +4 -4
  43. package/model-function/generate-text/prompt-template/NeuralChatPromptTemplate.js +10 -5
  44. package/model-function/generate-text/prompt-template/TextPromptTemplate.cjs +8 -5
  45. package/model-function/generate-text/prompt-template/TextPromptTemplate.d.ts +4 -4
  46. package/model-function/generate-text/prompt-template/TextPromptTemplate.js +8 -5
  47. package/model-function/generate-text/prompt-template/VicunaPromptTemplate.cjs +8 -4
  48. package/model-function/generate-text/prompt-template/VicunaPromptTemplate.d.ts +2 -2
  49. package/model-function/generate-text/prompt-template/VicunaPromptTemplate.js +8 -4
  50. package/model-function/generate-text/prompt-template/index.cjs +1 -1
  51. package/model-function/generate-text/prompt-template/index.d.ts +1 -1
  52. package/model-function/generate-text/prompt-template/index.js +1 -1
  53. package/model-function/generate-text/prompt-template/trimChatPrompt.cjs +0 -2
  54. package/model-function/generate-text/prompt-template/trimChatPrompt.d.ts +4 -4
  55. package/model-function/generate-text/prompt-template/trimChatPrompt.js +0 -2
  56. package/model-function/generate-text/streamText.cjs +27 -28
  57. package/model-function/generate-text/streamText.d.ts +1 -0
  58. package/model-function/generate-text/streamText.js +27 -28
  59. package/model-provider/anthropic/AnthropicPromptTemplate.cjs +9 -4
  60. package/model-provider/anthropic/AnthropicPromptTemplate.d.ts +4 -4
  61. package/model-provider/anthropic/AnthropicPromptTemplate.js +9 -4
  62. package/model-provider/anthropic/AnthropicTextGenerationModel.cjs +8 -14
  63. package/model-provider/anthropic/AnthropicTextGenerationModel.d.ts +13 -4
  64. package/model-provider/anthropic/AnthropicTextGenerationModel.js +8 -14
  65. package/model-provider/anthropic/AnthropicTextGenerationModel.test.cjs +44 -0
  66. package/model-provider/anthropic/AnthropicTextGenerationModel.test.js +42 -0
  67. package/model-provider/automatic1111/Automatic1111ImageGenerationModel.d.ts +1 -1
  68. package/model-provider/cohere/CohereTextGenerationModel.cjs +6 -44
  69. package/model-provider/cohere/CohereTextGenerationModel.d.ts +47 -13
  70. package/model-provider/cohere/CohereTextGenerationModel.js +7 -45
  71. package/model-provider/cohere/CohereTextGenerationModel.test.cjs +33 -0
  72. package/model-provider/cohere/CohereTextGenerationModel.test.js +31 -0
  73. package/model-provider/elevenlabs/ElevenLabsSpeechModel.cjs +1 -2
  74. package/model-provider/elevenlabs/ElevenLabsSpeechModel.js +1 -2
  75. package/model-provider/llamacpp/LlamaCppBakLLaVA1PromptTemplate.cjs +29 -17
  76. package/model-provider/llamacpp/LlamaCppBakLLaVA1PromptTemplate.d.ts +4 -4
  77. package/model-provider/llamacpp/LlamaCppBakLLaVA1PromptTemplate.js +29 -17
  78. package/model-provider/llamacpp/LlamaCppTextGenerationModel.cjs +7 -14
  79. package/model-provider/llamacpp/LlamaCppTextGenerationModel.d.ts +157 -6
  80. package/model-provider/llamacpp/LlamaCppTextGenerationModel.js +8 -15
  81. package/model-provider/llamacpp/LlamaCppTextGenerationModel.test.cjs +37 -0
  82. package/model-provider/llamacpp/LlamaCppTextGenerationModel.test.d.ts +1 -0
  83. package/model-provider/llamacpp/LlamaCppTextGenerationModel.test.js +35 -0
  84. package/model-provider/mistral/MistralChatModel.cjs +30 -104
  85. package/model-provider/mistral/MistralChatModel.d.ts +49 -16
  86. package/model-provider/mistral/MistralChatModel.js +30 -104
  87. package/model-provider/mistral/MistralChatModel.test.cjs +51 -0
  88. package/model-provider/mistral/MistralChatModel.test.d.ts +1 -0
  89. package/model-provider/mistral/MistralChatModel.test.js +49 -0
  90. package/model-provider/mistral/MistralPromptTemplate.cjs +13 -5
  91. package/model-provider/mistral/MistralPromptTemplate.d.ts +4 -4
  92. package/model-provider/mistral/MistralPromptTemplate.js +13 -5
  93. package/model-provider/ollama/OllamaChatModel.cjs +7 -43
  94. package/model-provider/ollama/OllamaChatModel.d.ts +63 -11
  95. package/model-provider/ollama/OllamaChatModel.js +7 -43
  96. package/model-provider/ollama/OllamaChatModel.test.cjs +27 -0
  97. package/model-provider/ollama/OllamaChatModel.test.d.ts +1 -0
  98. package/model-provider/ollama/OllamaChatModel.test.js +25 -0
  99. package/model-provider/ollama/OllamaChatPromptTemplate.cjs +43 -17
  100. package/model-provider/ollama/OllamaChatPromptTemplate.d.ts +4 -4
  101. package/model-provider/ollama/OllamaChatPromptTemplate.js +43 -17
  102. package/model-provider/ollama/OllamaCompletionModel.cjs +22 -43
  103. package/model-provider/ollama/OllamaCompletionModel.d.ts +65 -9
  104. package/model-provider/ollama/OllamaCompletionModel.js +23 -44
  105. package/model-provider/ollama/OllamaCompletionModel.test.cjs +101 -13
  106. package/model-provider/ollama/OllamaCompletionModel.test.js +78 -13
  107. package/model-provider/openai/{chat/AbstractOpenAIChatModel.cjs → AbstractOpenAIChatModel.cjs} +71 -15
  108. package/model-provider/openai/{chat/AbstractOpenAIChatModel.d.ts → AbstractOpenAIChatModel.d.ts} +273 -19
  109. package/model-provider/openai/{chat/AbstractOpenAIChatModel.js → AbstractOpenAIChatModel.js} +71 -15
  110. package/model-provider/openai/{chat/OpenAIChatFunctionCallStructureGenerationModel.cjs → OpenAIChatFunctionCallStructureGenerationModel.cjs} +18 -2
  111. package/model-provider/openai/{chat/OpenAIChatFunctionCallStructureGenerationModel.d.ts → OpenAIChatFunctionCallStructureGenerationModel.d.ts} +41 -11
  112. package/model-provider/openai/{chat/OpenAIChatFunctionCallStructureGenerationModel.js → OpenAIChatFunctionCallStructureGenerationModel.js} +18 -2
  113. package/model-provider/openai/{chat/OpenAIChatMessage.d.ts → OpenAIChatMessage.d.ts} +3 -3
  114. package/model-provider/openai/{chat/OpenAIChatModel.cjs → OpenAIChatModel.cjs} +5 -5
  115. package/model-provider/openai/{chat/OpenAIChatModel.d.ts → OpenAIChatModel.d.ts} +12 -12
  116. package/model-provider/openai/{chat/OpenAIChatModel.js → OpenAIChatModel.js} +5 -5
  117. package/model-provider/openai/OpenAIChatModel.test.cjs +94 -0
  118. package/model-provider/openai/OpenAIChatModel.test.d.ts +1 -0
  119. package/model-provider/openai/OpenAIChatModel.test.js +92 -0
  120. package/model-provider/openai/OpenAIChatPromptTemplate.cjs +114 -0
  121. package/model-provider/openai/OpenAIChatPromptTemplate.d.ts +20 -0
  122. package/model-provider/openai/OpenAIChatPromptTemplate.js +107 -0
  123. package/model-provider/openai/OpenAICompletionModel.cjs +32 -84
  124. package/model-provider/openai/OpenAICompletionModel.d.ts +29 -12
  125. package/model-provider/openai/OpenAICompletionModel.js +33 -85
  126. package/model-provider/openai/OpenAICompletionModel.test.cjs +53 -0
  127. package/model-provider/openai/OpenAICompletionModel.test.d.ts +1 -0
  128. package/model-provider/openai/OpenAICompletionModel.test.js +51 -0
  129. package/model-provider/openai/OpenAICostCalculator.cjs +1 -1
  130. package/model-provider/openai/OpenAICostCalculator.js +1 -1
  131. package/model-provider/openai/OpenAIFacade.cjs +2 -2
  132. package/model-provider/openai/OpenAIFacade.d.ts +3 -3
  133. package/model-provider/openai/OpenAIFacade.js +2 -2
  134. package/model-provider/openai/OpenAITranscriptionModel.d.ts +6 -6
  135. package/model-provider/openai/TikTokenTokenizer.d.ts +1 -1
  136. package/model-provider/openai/{chat/countOpenAIChatMessageTokens.cjs → countOpenAIChatMessageTokens.cjs} +2 -2
  137. package/model-provider/openai/{chat/countOpenAIChatMessageTokens.js → countOpenAIChatMessageTokens.js} +2 -2
  138. package/model-provider/openai/index.cjs +6 -6
  139. package/model-provider/openai/index.d.ts +5 -6
  140. package/model-provider/openai/index.js +5 -5
  141. package/model-provider/openai-compatible/OpenAICompatibleChatModel.cjs +4 -4
  142. package/model-provider/openai-compatible/OpenAICompatibleChatModel.d.ts +6 -6
  143. package/model-provider/openai-compatible/OpenAICompatibleChatModel.js +4 -4
  144. package/model-provider/stability/StabilityImageGenerationModel.d.ts +1 -1
  145. package/package.json +5 -5
  146. package/test/JsonTestServer.cjs +33 -0
  147. package/test/JsonTestServer.d.ts +7 -0
  148. package/test/JsonTestServer.js +29 -0
  149. package/test/StreamingTestServer.cjs +55 -0
  150. package/test/StreamingTestServer.d.ts +7 -0
  151. package/test/StreamingTestServer.js +51 -0
  152. package/test/arrayFromAsync.cjs +13 -0
  153. package/test/arrayFromAsync.d.ts +1 -0
  154. package/test/arrayFromAsync.js +9 -0
  155. package/util/streaming/createEventSourceResponseHandler.cjs +9 -0
  156. package/util/streaming/createEventSourceResponseHandler.d.ts +4 -0
  157. package/util/streaming/createEventSourceResponseHandler.js +5 -0
  158. package/util/streaming/createJsonStreamResponseHandler.cjs +9 -0
  159. package/util/streaming/createJsonStreamResponseHandler.d.ts +4 -0
  160. package/util/streaming/createJsonStreamResponseHandler.js +5 -0
  161. package/util/streaming/parseEventSourceStreamAsAsyncIterable.cjs +52 -0
  162. package/util/streaming/parseEventSourceStreamAsAsyncIterable.d.ts +6 -0
  163. package/util/streaming/parseEventSourceStreamAsAsyncIterable.js +48 -0
  164. package/util/streaming/parseJsonStreamAsAsyncIterable.cjs +21 -0
  165. package/util/streaming/parseJsonStreamAsAsyncIterable.d.ts +6 -0
  166. package/util/streaming/parseJsonStreamAsAsyncIterable.js +17 -0
  167. package/model-function/generate-text/prompt-template/Content.cjs +0 -2
  168. package/model-function/generate-text/prompt-template/Content.d.ts +0 -20
  169. package/model-provider/openai/chat/OpenAIChatModel.test.cjs +0 -61
  170. package/model-provider/openai/chat/OpenAIChatModel.test.js +0 -59
  171. package/model-provider/openai/chat/OpenAIChatPromptTemplate.cjs +0 -72
  172. package/model-provider/openai/chat/OpenAIChatPromptTemplate.d.ts +0 -20
  173. package/model-provider/openai/chat/OpenAIChatPromptTemplate.js +0 -65
  174. package/model-provider/openai/chat/OpenAIChatStreamIterable.cjs +0 -156
  175. package/model-provider/openai/chat/OpenAIChatStreamIterable.d.ts +0 -19
  176. package/model-provider/openai/chat/OpenAIChatStreamIterable.js +0 -152
  177. /package/{model-function/generate-text/prompt-template/Content.js → model-provider/anthropic/AnthropicTextGenerationModel.test.d.ts} +0 -0
  178. /package/model-provider/{openai/chat/OpenAIChatModel.test.d.ts → cohere/CohereTextGenerationModel.test.d.ts} +0 -0
  179. /package/model-provider/openai/{chat/OpenAIChatMessage.cjs → OpenAIChatMessage.cjs} +0 -0
  180. /package/model-provider/openai/{chat/OpenAIChatMessage.js → OpenAIChatMessage.js} +0 -0
  181. /package/model-provider/openai/{chat/countOpenAIChatMessageTokens.d.ts → countOpenAIChatMessageTokens.d.ts} +0 -0
@@ -0,0 +1,52 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.parseEventSourceStreamAsAsyncIterable = void 0;
4
+ const parseJSON_js_1 = require("../../core/schema/parseJSON.cjs");
5
+ const AsyncQueue_js_1 = require("../AsyncQueue.cjs");
6
+ const parseEventSourceStream_js_1 = require("./parseEventSourceStream.cjs");
7
+ async function parseEventSourceStreamAsAsyncIterable({ stream, schema, }) {
8
+ const queue = new AsyncQueue_js_1.AsyncQueue();
9
+ // process the stream asynchonously (no 'await' on purpose):
10
+ (0, parseEventSourceStream_js_1.parseEventSourceStream)({ stream })
11
+ .then(async (events) => {
12
+ try {
13
+ for await (const event of events) {
14
+ const data = event.data;
15
+ if (data === "[DONE]") {
16
+ queue.close();
17
+ return;
18
+ }
19
+ const parseResult = (0, parseJSON_js_1.safeParseJSON)({
20
+ text: data,
21
+ schema,
22
+ });
23
+ if (!parseResult.success) {
24
+ queue.push({
25
+ type: "error",
26
+ error: parseResult.error,
27
+ });
28
+ // Note: the queue is not closed on purpose. Some providers might add additional
29
+ // chunks that are not parsable, and ModelFusion should be resilient to that.
30
+ continue;
31
+ }
32
+ const completionChunk = parseResult.data;
33
+ queue.push({
34
+ type: "delta",
35
+ deltaValue: completionChunk,
36
+ });
37
+ }
38
+ }
39
+ catch (error) {
40
+ queue.push({ type: "error", error });
41
+ queue.close();
42
+ return;
43
+ }
44
+ })
45
+ .catch((error) => {
46
+ queue.push({ type: "error", error });
47
+ queue.close();
48
+ return;
49
+ });
50
+ return queue;
51
+ }
52
+ exports.parseEventSourceStreamAsAsyncIterable = parseEventSourceStreamAsAsyncIterable;
@@ -0,0 +1,6 @@
1
+ import { Schema } from "../../core/schema/Schema.js";
2
+ import { Delta } from "../../model-function/Delta.js";
3
+ export declare function parseEventSourceStreamAsAsyncIterable<T>({ stream, schema, }: {
4
+ stream: ReadableStream<Uint8Array>;
5
+ schema: Schema<T>;
6
+ }): Promise<AsyncIterable<Delta<T>>>;
@@ -0,0 +1,48 @@
1
+ import { safeParseJSON } from "../../core/schema/parseJSON.js";
2
+ import { AsyncQueue } from "../AsyncQueue.js";
3
+ import { parseEventSourceStream } from "./parseEventSourceStream.js";
4
+ export async function parseEventSourceStreamAsAsyncIterable({ stream, schema, }) {
5
+ const queue = new AsyncQueue();
6
+ // process the stream asynchonously (no 'await' on purpose):
7
+ parseEventSourceStream({ stream })
8
+ .then(async (events) => {
9
+ try {
10
+ for await (const event of events) {
11
+ const data = event.data;
12
+ if (data === "[DONE]") {
13
+ queue.close();
14
+ return;
15
+ }
16
+ const parseResult = safeParseJSON({
17
+ text: data,
18
+ schema,
19
+ });
20
+ if (!parseResult.success) {
21
+ queue.push({
22
+ type: "error",
23
+ error: parseResult.error,
24
+ });
25
+ // Note: the queue is not closed on purpose. Some providers might add additional
26
+ // chunks that are not parsable, and ModelFusion should be resilient to that.
27
+ continue;
28
+ }
29
+ const completionChunk = parseResult.data;
30
+ queue.push({
31
+ type: "delta",
32
+ deltaValue: completionChunk,
33
+ });
34
+ }
35
+ }
36
+ catch (error) {
37
+ queue.push({ type: "error", error });
38
+ queue.close();
39
+ return;
40
+ }
41
+ })
42
+ .catch((error) => {
43
+ queue.push({ type: "error", error });
44
+ queue.close();
45
+ return;
46
+ });
47
+ return queue;
48
+ }
@@ -0,0 +1,21 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.parseJsonStreamAsAsyncIterable = void 0;
4
+ const AsyncQueue_js_1 = require("../AsyncQueue.cjs");
5
+ const parseJsonStream_js_1 = require("./parseJsonStream.cjs");
6
+ async function parseJsonStreamAsAsyncIterable({ stream, schema, }) {
7
+ const queue = new AsyncQueue_js_1.AsyncQueue();
8
+ // process the stream asynchonously (no 'await' on purpose):
9
+ (0, parseJsonStream_js_1.parseJsonStream)({
10
+ stream,
11
+ schema,
12
+ process(event) {
13
+ queue.push({ type: "delta", deltaValue: event });
14
+ },
15
+ onDone() {
16
+ queue.close();
17
+ },
18
+ });
19
+ return queue;
20
+ }
21
+ exports.parseJsonStreamAsAsyncIterable = parseJsonStreamAsAsyncIterable;
@@ -0,0 +1,6 @@
1
+ import { Schema } from "../../core/schema/Schema.js";
2
+ import { Delta } from "../../model-function/Delta.js";
3
+ export declare function parseJsonStreamAsAsyncIterable<T>({ stream, schema, }: {
4
+ stream: ReadableStream<Uint8Array>;
5
+ schema: Schema<T>;
6
+ }): Promise<AsyncIterable<Delta<T>>>;
@@ -0,0 +1,17 @@
1
+ import { AsyncQueue } from "../AsyncQueue.js";
2
+ import { parseJsonStream } from "./parseJsonStream.js";
3
+ export async function parseJsonStreamAsAsyncIterable({ stream, schema, }) {
4
+ const queue = new AsyncQueue();
5
+ // process the stream asynchonously (no 'await' on purpose):
6
+ parseJsonStream({
7
+ stream,
8
+ schema,
9
+ process(event) {
10
+ queue.push({ type: "delta", deltaValue: event });
11
+ },
12
+ onDone() {
13
+ queue.close();
14
+ },
15
+ });
16
+ return queue;
17
+ }
@@ -1,2 +0,0 @@
1
- "use strict";
2
- Object.defineProperty(exports, "__esModule", { value: true });
@@ -1,20 +0,0 @@
1
- export type MultiModalInput = Array<Content>;
2
- export type Content = TextContent | ImageContent;
3
- export interface TextContent {
4
- type: "text";
5
- /**
6
- * The text content.
7
- */
8
- text: string;
9
- }
10
- export interface ImageContent {
11
- type: "image";
12
- /**
13
- * Base-64 encoded image.
14
- */
15
- base64Image: string;
16
- /**
17
- * Optional mime type of the image.
18
- */
19
- mimeType?: string;
20
- }
@@ -1,61 +0,0 @@
1
- "use strict";
2
- Object.defineProperty(exports, "__esModule", { value: true });
3
- const msw_1 = require("msw");
4
- const node_1 = require("msw/node");
5
- const streamText_js_1 = require("../../../model-function/generate-text/streamText.cjs");
6
- const OpenAIChatModel_js_1 = require("./OpenAIChatModel.cjs");
7
- const OpenAIApiConfiguration_js_1 = require("../OpenAIApiConfiguration.cjs");
8
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
9
- let responseChunks = [];
10
- const server = (0, node_1.setupServer)(msw_1.http.post("https://api.openai.com/v1/chat/completions", () => {
11
- const encoder = new TextEncoder();
12
- const stream = new ReadableStream({
13
- async start(controller) {
14
- try {
15
- for (const chunk of responseChunks) {
16
- controller.enqueue(encoder.encode(chunk));
17
- }
18
- }
19
- finally {
20
- controller.close();
21
- }
22
- },
23
- });
24
- return new msw_1.HttpResponse(stream, {
25
- status: 200,
26
- headers: {
27
- "Content-Type": "text/event-stream",
28
- "Cache-Control": "no-cache",
29
- Connection: "keep-alive",
30
- },
31
- });
32
- }));
33
- beforeAll(() => server.listen());
34
- beforeEach(() => {
35
- responseChunks = [];
36
- });
37
- afterEach(() => server.resetHandlers());
38
- afterAll(() => server.close());
39
- describe("streamText", () => {
40
- it("should return only values from the first choice when using streamText", async () => {
41
- responseChunks = [
42
- `data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1702657020,"model":"gpt-3.5-turbo-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"role":"assistant","content":""},"finish_reason":null}]}\n\n`,
43
- `data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1702657020,"model":"gpt-3.5-turbo-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"A"},"finish_reason":null}]}\n\n`,
44
- `data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1702657020,"model":"gpt-3.5-turbo-0613","system_fingerprint":null,"choices":[{"index":1,"delta":{"role":"assistant","content":""},"finish_reason":null}]}\n\n`,
45
- `data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1702657020,"model":"gpt-3.5-turbo-0613","system_fingerprint":null,"choices":[{"index":1,"delta":{"content":"B"},"finish_reason":null}]}\n\n`,
46
- `data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1702657020,"model":"gpt-3.5-turbo-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{},"finish_reason":"stop"}]}\n\n`,
47
- `data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1702657020,"model":"gpt-3.5-turbo-0613","system_fingerprint":null,"choices":[{"index":1,"delta":{},"finish_reason":"stop"}]}\n\n`,
48
- "data: [DONE]\n\n",
49
- ];
50
- const stream = await (0, streamText_js_1.streamText)(new OpenAIChatModel_js_1.OpenAIChatModel({
51
- api: new OpenAIApiConfiguration_js_1.OpenAIApiConfiguration({ apiKey: "test" }),
52
- model: "gpt-3.5-turbo",
53
- numberOfGenerations: 2,
54
- }).withTextPrompt(), "test prompt");
55
- const chunks = [];
56
- for await (const part of stream) {
57
- chunks.push(part);
58
- }
59
- expect(chunks).toStrictEqual(["A"]);
60
- });
61
- });
@@ -1,59 +0,0 @@
1
- import { HttpResponse, http } from "msw";
2
- import { setupServer } from "msw/node";
3
- import { streamText } from "../../../model-function/generate-text/streamText.js";
4
- import { OpenAIChatModel } from "./OpenAIChatModel.js";
5
- import { OpenAIApiConfiguration } from "../OpenAIApiConfiguration.js";
6
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
7
- let responseChunks = [];
8
- const server = setupServer(http.post("https://api.openai.com/v1/chat/completions", () => {
9
- const encoder = new TextEncoder();
10
- const stream = new ReadableStream({
11
- async start(controller) {
12
- try {
13
- for (const chunk of responseChunks) {
14
- controller.enqueue(encoder.encode(chunk));
15
- }
16
- }
17
- finally {
18
- controller.close();
19
- }
20
- },
21
- });
22
- return new HttpResponse(stream, {
23
- status: 200,
24
- headers: {
25
- "Content-Type": "text/event-stream",
26
- "Cache-Control": "no-cache",
27
- Connection: "keep-alive",
28
- },
29
- });
30
- }));
31
- beforeAll(() => server.listen());
32
- beforeEach(() => {
33
- responseChunks = [];
34
- });
35
- afterEach(() => server.resetHandlers());
36
- afterAll(() => server.close());
37
- describe("streamText", () => {
38
- it("should return only values from the first choice when using streamText", async () => {
39
- responseChunks = [
40
- `data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1702657020,"model":"gpt-3.5-turbo-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"role":"assistant","content":""},"finish_reason":null}]}\n\n`,
41
- `data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1702657020,"model":"gpt-3.5-turbo-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"A"},"finish_reason":null}]}\n\n`,
42
- `data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1702657020,"model":"gpt-3.5-turbo-0613","system_fingerprint":null,"choices":[{"index":1,"delta":{"role":"assistant","content":""},"finish_reason":null}]}\n\n`,
43
- `data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1702657020,"model":"gpt-3.5-turbo-0613","system_fingerprint":null,"choices":[{"index":1,"delta":{"content":"B"},"finish_reason":null}]}\n\n`,
44
- `data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1702657020,"model":"gpt-3.5-turbo-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{},"finish_reason":"stop"}]}\n\n`,
45
- `data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1702657020,"model":"gpt-3.5-turbo-0613","system_fingerprint":null,"choices":[{"index":1,"delta":{},"finish_reason":"stop"}]}\n\n`,
46
- "data: [DONE]\n\n",
47
- ];
48
- const stream = await streamText(new OpenAIChatModel({
49
- api: new OpenAIApiConfiguration({ apiKey: "test" }),
50
- model: "gpt-3.5-turbo",
51
- numberOfGenerations: 2,
52
- }).withTextPrompt(), "test prompt");
53
- const chunks = [];
54
- for await (const part of stream) {
55
- chunks.push(part);
56
- }
57
- expect(chunks).toStrictEqual(["A"]);
58
- });
59
- });
@@ -1,72 +0,0 @@
1
- "use strict";
2
- Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.chat = exports.instruction = exports.text = exports.identity = void 0;
4
- const ChatPrompt_js_1 = require("../../../model-function/generate-text/prompt-template/ChatPrompt.cjs");
5
- const OpenAIChatMessage_js_1 = require("./OpenAIChatMessage.cjs");
6
- /**
7
- * OpenAIMessage[] identity chat format.
8
- */
9
- function identity() {
10
- return { format: (prompt) => prompt, stopSequences: [] };
11
- }
12
- exports.identity = identity;
13
- /**
14
- * Formats a text prompt as an OpenAI chat prompt.
15
- */
16
- function text() {
17
- return {
18
- format: (prompt) => [OpenAIChatMessage_js_1.OpenAIChatMessage.user(prompt)],
19
- stopSequences: [],
20
- };
21
- }
22
- exports.text = text;
23
- /**
24
- * Formats an instruction prompt as an OpenAI chat prompt.
25
- */
26
- function instruction() {
27
- return {
28
- format(prompt) {
29
- const messages = [];
30
- if (prompt.system != null) {
31
- messages.push(OpenAIChatMessage_js_1.OpenAIChatMessage.system(prompt.system));
32
- }
33
- messages.push(OpenAIChatMessage_js_1.OpenAIChatMessage.user(prompt.instruction));
34
- return messages;
35
- },
36
- stopSequences: [],
37
- };
38
- }
39
- exports.instruction = instruction;
40
- /**
41
- * Formats a chat prompt as an OpenAI chat prompt.
42
- */
43
- function chat() {
44
- return {
45
- format(prompt) {
46
- (0, ChatPrompt_js_1.validateChatPrompt)(prompt);
47
- const messages = [];
48
- if (prompt.system != null) {
49
- messages.push(OpenAIChatMessage_js_1.OpenAIChatMessage.system(prompt.system));
50
- }
51
- for (const { role, content } of prompt.messages) {
52
- switch (role) {
53
- case "user": {
54
- messages.push(OpenAIChatMessage_js_1.OpenAIChatMessage.user(content));
55
- break;
56
- }
57
- case "assistant": {
58
- messages.push(OpenAIChatMessage_js_1.OpenAIChatMessage.assistant(content));
59
- break;
60
- }
61
- default: {
62
- const _exhaustiveCheck = role;
63
- throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
64
- }
65
- }
66
- }
67
- return messages;
68
- },
69
- stopSequences: [],
70
- };
71
- }
72
- exports.chat = chat;
@@ -1,20 +0,0 @@
1
- import { TextGenerationPromptTemplate } from "../../../model-function/generate-text/TextGenerationPromptTemplate.js";
2
- import { MultiModalChatPrompt, TextChatPrompt } from "../../../model-function/generate-text/prompt-template/ChatPrompt.js";
3
- import { MultiModalInstructionPrompt, TextInstructionPrompt } from "../../../model-function/generate-text/prompt-template/InstructionPrompt.js";
4
- import { OpenAIChatPrompt } from "./AbstractOpenAIChatModel.js";
5
- /**
6
- * OpenAIMessage[] identity chat format.
7
- */
8
- export declare function identity(): TextGenerationPromptTemplate<OpenAIChatPrompt, OpenAIChatPrompt>;
9
- /**
10
- * Formats a text prompt as an OpenAI chat prompt.
11
- */
12
- export declare function text(): TextGenerationPromptTemplate<string, OpenAIChatPrompt>;
13
- /**
14
- * Formats an instruction prompt as an OpenAI chat prompt.
15
- */
16
- export declare function instruction(): TextGenerationPromptTemplate<MultiModalInstructionPrompt | TextInstructionPrompt, OpenAIChatPrompt>;
17
- /**
18
- * Formats a chat prompt as an OpenAI chat prompt.
19
- */
20
- export declare function chat(): TextGenerationPromptTemplate<MultiModalChatPrompt | TextChatPrompt, OpenAIChatPrompt>;
@@ -1,65 +0,0 @@
1
- import { validateChatPrompt, } from "../../../model-function/generate-text/prompt-template/ChatPrompt.js";
2
- import { OpenAIChatMessage } from "./OpenAIChatMessage.js";
3
- /**
4
- * OpenAIMessage[] identity chat format.
5
- */
6
- export function identity() {
7
- return { format: (prompt) => prompt, stopSequences: [] };
8
- }
9
- /**
10
- * Formats a text prompt as an OpenAI chat prompt.
11
- */
12
- export function text() {
13
- return {
14
- format: (prompt) => [OpenAIChatMessage.user(prompt)],
15
- stopSequences: [],
16
- };
17
- }
18
- /**
19
- * Formats an instruction prompt as an OpenAI chat prompt.
20
- */
21
- export function instruction() {
22
- return {
23
- format(prompt) {
24
- const messages = [];
25
- if (prompt.system != null) {
26
- messages.push(OpenAIChatMessage.system(prompt.system));
27
- }
28
- messages.push(OpenAIChatMessage.user(prompt.instruction));
29
- return messages;
30
- },
31
- stopSequences: [],
32
- };
33
- }
34
- /**
35
- * Formats a chat prompt as an OpenAI chat prompt.
36
- */
37
- export function chat() {
38
- return {
39
- format(prompt) {
40
- validateChatPrompt(prompt);
41
- const messages = [];
42
- if (prompt.system != null) {
43
- messages.push(OpenAIChatMessage.system(prompt.system));
44
- }
45
- for (const { role, content } of prompt.messages) {
46
- switch (role) {
47
- case "user": {
48
- messages.push(OpenAIChatMessage.user(content));
49
- break;
50
- }
51
- case "assistant": {
52
- messages.push(OpenAIChatMessage.assistant(content));
53
- break;
54
- }
55
- default: {
56
- const _exhaustiveCheck = role;
57
- throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
58
- }
59
- }
60
- }
61
- return messages;
62
- },
63
- stopSequences: [],
64
- };
65
- }
@@ -1,156 +0,0 @@
1
- "use strict";
2
- Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.createOpenAIChatDeltaIterableQueue = void 0;
4
- const zod_1 = require("zod");
5
- const AsyncQueue_js_1 = require("../../../util/AsyncQueue.cjs");
6
- const parseEventSourceStream_js_1 = require("../../../util/streaming/parseEventSourceStream.cjs");
7
- const parseJSON_js_1 = require("../../../core/schema/parseJSON.cjs");
8
- const ZodSchema_js_1 = require("../../../core/schema/ZodSchema.cjs");
9
- const chatCompletionChunkSchema = zod_1.z.object({
10
- object: zod_1.z.literal("chat.completion.chunk"),
11
- id: zod_1.z.string(),
12
- choices: zod_1.z.array(zod_1.z.object({
13
- delta: zod_1.z.object({
14
- role: zod_1.z.enum(["assistant", "user"]).optional(),
15
- content: zod_1.z.string().nullable().optional(),
16
- function_call: zod_1.z
17
- .object({
18
- name: zod_1.z.string().optional(),
19
- arguments: zod_1.z.string().optional(),
20
- })
21
- .optional(),
22
- tool_calls: zod_1.z
23
- .array(zod_1.z.object({
24
- id: zod_1.z.string(),
25
- type: zod_1.z.literal("function"),
26
- function: zod_1.z.object({
27
- name: zod_1.z.string(),
28
- arguments: zod_1.z.string(),
29
- }),
30
- }))
31
- .optional(),
32
- }),
33
- finish_reason: zod_1.z
34
- .enum([
35
- "stop",
36
- "length",
37
- "tool_calls",
38
- "content_filter",
39
- "function_call",
40
- ])
41
- .nullable()
42
- .optional(),
43
- index: zod_1.z.number(),
44
- })),
45
- created: zod_1.z.number(),
46
- model: zod_1.z.string(),
47
- system_fingerprint: zod_1.z.string().optional().nullable(),
48
- });
49
- const chatResponseStreamEventSchema = new ZodSchema_js_1.ZodSchema(zod_1.z.union([
50
- chatCompletionChunkSchema,
51
- zod_1.z.object({
52
- object: zod_1.z.string().refine((obj) => obj !== "chat.completion.chunk", {
53
- message: "Object must be 'chat.completion.chunk'",
54
- }),
55
- }),
56
- ]));
57
- async function createOpenAIChatDeltaIterableQueue(stream, extractDeltaValue) {
58
- const queue = new AsyncQueue_js_1.AsyncQueue();
59
- const streamDelta = [];
60
- // process the stream asynchonously (no 'await' on purpose):
61
- (0, parseEventSourceStream_js_1.parseEventSourceStream)({ stream })
62
- .then(async (events) => {
63
- try {
64
- for await (const event of events) {
65
- const data = event.data;
66
- if (data === "[DONE]") {
67
- queue.close();
68
- return;
69
- }
70
- const parseResult = (0, parseJSON_js_1.safeParseJSON)({
71
- text: data,
72
- schema: chatResponseStreamEventSchema,
73
- });
74
- if (!parseResult.success) {
75
- queue.push({
76
- type: "error",
77
- error: parseResult.error,
78
- });
79
- // Note: the queue is not closed on purpose. Some providers might add additional
80
- // chunks that are not parsable, and ModelFusion should be resilient to that.
81
- continue;
82
- }
83
- const eventData = parseResult.data;
84
- // ignore objects that are not "chat.completion.chunk" events.
85
- // Such additional objects are e.g. sent by Azure OpenAI.
86
- if (eventData.object !== "chat.completion.chunk") {
87
- continue;
88
- }
89
- const completionChunk = eventData;
90
- // reset delta for all existing streamDeltas
91
- for (const delta of streamDelta) {
92
- delta.delta = undefined;
93
- }
94
- for (let i = 0; i < completionChunk.choices.length; i++) {
95
- const eventChoice = completionChunk.choices[i];
96
- const index = eventChoice.index;
97
- const delta = eventChoice.delta;
98
- if (streamDelta[index] == null) {
99
- streamDelta[index] = {
100
- role: undefined,
101
- content: "",
102
- isComplete: false,
103
- delta,
104
- };
105
- }
106
- const choice = streamDelta[index];
107
- choice.delta = delta;
108
- if (eventChoice.finish_reason != null) {
109
- choice.isComplete = true;
110
- }
111
- if (delta.content != undefined) {
112
- choice.content += delta.content;
113
- }
114
- if (delta.function_call != undefined) {
115
- if (choice.function_call == undefined) {
116
- choice.function_call = {
117
- name: "",
118
- arguments: "",
119
- };
120
- }
121
- if (delta.function_call.name != undefined) {
122
- choice.function_call.name += delta.function_call.name;
123
- }
124
- if (delta.function_call.arguments != undefined) {
125
- choice.function_call.arguments +=
126
- delta.function_call.arguments;
127
- }
128
- }
129
- if (delta.role != undefined) {
130
- choice.role = delta.role;
131
- }
132
- }
133
- // Since we're mutating the choices array in an async scenario,
134
- // we need to make a deep copy:
135
- const streamDeltaDeepCopy = JSON.parse(JSON.stringify(streamDelta));
136
- queue.push({
137
- type: "delta",
138
- fullDelta: streamDeltaDeepCopy,
139
- valueDelta: extractDeltaValue(streamDeltaDeepCopy),
140
- });
141
- }
142
- }
143
- catch (error) {
144
- queue.push({ type: "error", error });
145
- queue.close();
146
- return;
147
- }
148
- })
149
- .catch((error) => {
150
- queue.push({ type: "error", error });
151
- queue.close();
152
- return;
153
- });
154
- return queue;
155
- }
156
- exports.createOpenAIChatDeltaIterableQueue = createOpenAIChatDeltaIterableQueue;
@@ -1,19 +0,0 @@
1
- import { Delta } from "../../../model-function/Delta.js";
2
- export type OpenAIChatDelta = Array<{
3
- role: "assistant" | "user" | undefined;
4
- content: string;
5
- function_call?: {
6
- name: string;
7
- arguments: string;
8
- };
9
- isComplete: boolean;
10
- delta: {
11
- role?: "assistant" | "user";
12
- content?: string | null;
13
- function_call?: {
14
- name?: string;
15
- arguments?: string;
16
- };
17
- } | undefined;
18
- }>;
19
- export declare function createOpenAIChatDeltaIterableQueue<VALUE>(stream: ReadableStream<Uint8Array>, extractDeltaValue: (delta: OpenAIChatDelta) => VALUE): Promise<AsyncIterable<Delta<VALUE>>>;