modelfusion 0.121.2 → 0.122.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (83) hide show
  1. package/CHANGELOG.md +41 -1
  2. package/README.md +86 -84
  3. package/classifier/SemanticClassifier.cjs +8 -2
  4. package/classifier/SemanticClassifier.js +8 -2
  5. package/model-function/ModelCallEvent.d.ts +3 -0
  6. package/model-function/embed/embed.cjs +14 -14
  7. package/model-function/embed/embed.d.ts +24 -18
  8. package/model-function/embed/embed.js +14 -14
  9. package/model-function/generate-image/generateImage.cjs +6 -6
  10. package/model-function/generate-image/generateImage.d.ts +12 -9
  11. package/model-function/generate-image/generateImage.js +6 -6
  12. package/model-function/generate-speech/generateSpeech.cjs +7 -7
  13. package/model-function/generate-speech/generateSpeech.d.ts +12 -9
  14. package/model-function/generate-speech/generateSpeech.js +7 -7
  15. package/model-function/generate-speech/streamSpeech.cjs +6 -6
  16. package/model-function/generate-speech/streamSpeech.d.ts +12 -8
  17. package/model-function/generate-speech/streamSpeech.js +6 -6
  18. package/model-function/generate-structure/StructureFromTextGenerationModel.cjs +5 -3
  19. package/model-function/generate-structure/StructureFromTextGenerationModel.d.ts +1 -1
  20. package/model-function/generate-structure/StructureFromTextGenerationModel.js +5 -3
  21. package/model-function/generate-structure/StructureFromTextStreamingModel.cjs +5 -1
  22. package/model-function/generate-structure/StructureFromTextStreamingModel.js +5 -1
  23. package/model-function/generate-structure/StructureGenerationModel.d.ts +1 -1
  24. package/model-function/generate-structure/generateStructure.cjs +8 -8
  25. package/model-function/generate-structure/generateStructure.d.ts +17 -10
  26. package/model-function/generate-structure/generateStructure.js +8 -8
  27. package/model-function/generate-structure/streamStructure.cjs +6 -6
  28. package/model-function/generate-structure/streamStructure.d.ts +16 -10
  29. package/model-function/generate-structure/streamStructure.js +6 -6
  30. package/model-function/generate-text/generateText.cjs +6 -6
  31. package/model-function/generate-text/generateText.d.ts +12 -9
  32. package/model-function/generate-text/generateText.js +6 -6
  33. package/model-function/generate-text/streamText.cjs +6 -6
  34. package/model-function/generate-text/streamText.d.ts +12 -8
  35. package/model-function/generate-text/streamText.js +6 -6
  36. package/model-function/generate-transcription/generateTranscription.cjs +3 -3
  37. package/model-function/generate-transcription/generateTranscription.d.ts +12 -9
  38. package/model-function/generate-transcription/generateTranscription.js +3 -3
  39. package/model-provider/cohere/CohereTextGenerationModel.d.ts +12 -12
  40. package/model-provider/cohere/CohereTextGenerationModel.test.cjs +7 -4
  41. package/model-provider/cohere/CohereTextGenerationModel.test.js +7 -4
  42. package/model-provider/llamacpp/LlamaCppCompletionModel.d.ts +10 -10
  43. package/model-provider/llamacpp/LlamaCppCompletionModel.test.cjs +4 -1
  44. package/model-provider/llamacpp/LlamaCppCompletionModel.test.js +4 -1
  45. package/model-provider/mistral/MistralChatModel.test.cjs +15 -8
  46. package/model-provider/mistral/MistralChatModel.test.js +15 -8
  47. package/model-provider/ollama/OllamaChatModel.test.cjs +6 -1
  48. package/model-provider/ollama/OllamaChatModel.test.js +6 -1
  49. package/model-provider/ollama/OllamaCompletionModel.test.cjs +31 -16
  50. package/model-provider/ollama/OllamaCompletionModel.test.js +31 -16
  51. package/model-provider/openai/OpenAIChatFunctionCallStructureGenerationModel.cjs +4 -4
  52. package/model-provider/openai/OpenAIChatFunctionCallStructureGenerationModel.d.ts +1 -1
  53. package/model-provider/openai/OpenAIChatFunctionCallStructureGenerationModel.js +4 -4
  54. package/model-provider/openai/OpenAIChatModel.test.cjs +21 -14
  55. package/model-provider/openai/OpenAIChatModel.test.js +21 -14
  56. package/model-provider/openai/OpenAICompletionModel.test.cjs +15 -9
  57. package/model-provider/openai/OpenAICompletionModel.test.js +15 -9
  58. package/package.json +1 -1
  59. package/tool/execute-tool/executeTool.cjs +5 -5
  60. package/tool/execute-tool/executeTool.d.ts +8 -4
  61. package/tool/execute-tool/executeTool.js +5 -5
  62. package/tool/execute-tool/safeExecuteToolCall.cjs +1 -1
  63. package/tool/execute-tool/safeExecuteToolCall.js +1 -1
  64. package/tool/generate-tool-call/TextGenerationToolCallModel.cjs +4 -2
  65. package/tool/generate-tool-call/TextGenerationToolCallModel.js +4 -2
  66. package/tool/generate-tool-call/generateToolCall.cjs +7 -7
  67. package/tool/generate-tool-call/generateToolCall.d.ts +11 -5
  68. package/tool/generate-tool-call/generateToolCall.js +7 -7
  69. package/tool/generate-tool-calls/TextGenerationToolCallsModel.cjs +4 -2
  70. package/tool/generate-tool-calls/TextGenerationToolCallsModel.js +4 -2
  71. package/tool/generate-tool-calls/generateToolCalls.cjs +3 -3
  72. package/tool/generate-tool-calls/generateToolCalls.d.ts +11 -5
  73. package/tool/generate-tool-calls/generateToolCalls.js +3 -3
  74. package/tool/use-tool/useTool.cjs +2 -2
  75. package/tool/use-tool/useTool.d.ts +5 -1
  76. package/tool/use-tool/useTool.js +2 -2
  77. package/tool/use-tools/useTools.cjs +8 -2
  78. package/tool/use-tools/useTools.d.ts +5 -1
  79. package/tool/use-tools/useTools.js +8 -2
  80. package/vector-index/VectorIndexRetriever.cjs +5 -1
  81. package/vector-index/VectorIndexRetriever.js +5 -1
  82. package/vector-index/upsertIntoVectorIndex.cjs +5 -1
  83. package/vector-index/upsertIntoVectorIndex.js +5 -1
@@ -2,8 +2,8 @@
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.generateText = void 0;
4
4
  const executeStandardCall_js_1 = require("../executeStandardCall.cjs");
5
- async function generateText(model, prompt, options) {
6
- const fullResponse = await (0, executeStandardCall_js_1.executeStandardCall)({
5
+ async function generateText({ model, prompt, fullResponse, ...options }) {
6
+ const callResponse = await (0, executeStandardCall_js_1.executeStandardCall)({
7
7
  functionType: "generate-text",
8
8
  input: prompt,
9
9
  model,
@@ -66,16 +66,16 @@ async function generateText(model, prompt, options) {
66
66
  };
67
67
  },
68
68
  });
69
- const textGenerationResults = fullResponse.value;
69
+ const textGenerationResults = callResponse.value;
70
70
  const firstResult = textGenerationResults[0];
71
- return options?.fullResponse
71
+ return fullResponse
72
72
  ? {
73
73
  text: firstResult.text,
74
74
  finishReason: firstResult.finishReason,
75
75
  texts: textGenerationResults.map((textGeneration) => textGeneration.text),
76
76
  textGenerationResults,
77
- rawResponse: fullResponse.rawResponse,
78
- metadata: fullResponse.metadata,
77
+ rawResponse: callResponse.rawResponse,
78
+ metadata: callResponse.metadata,
79
79
  }
80
80
  : firstResult.text;
81
81
  }
@@ -12,23 +12,26 @@ import { TextGenerationFinishReason, TextGenerationResult } from "./TextGenerati
12
12
  * @see https://modelfusion.dev/guide/function/generate-text
13
13
  *
14
14
  * @example
15
- * const text = await generateText(
16
- * openai.CompletionTextGenerator(...),
17
- * "Write a short story about a robot learning to love:\n\n"
18
- * );
15
+ * const text = await generateText({
16
+ * model: openai.CompletionTextGenerator(...),
17
+ * prompt: "Write a short story about a robot learning to love:\n\n"
18
+ * });
19
19
  *
20
20
  * @param {TextGenerationModel<PROMPT, TextGenerationModelSettings>} model - The text generation model to use.
21
21
  * @param {PROMPT} prompt - The prompt to use for text generation.
22
- * @param {FunctionOptions} [options] - Optional parameters for the function.
23
22
  *
24
23
  * @returns {Promise<string>} - A promise that resolves to the generated text.
25
24
  */
26
- export declare function generateText<PROMPT>(model: TextGenerationModel<PROMPT, TextGenerationModelSettings>, prompt: PROMPT, options?: FunctionOptions & {
25
+ export declare function generateText<PROMPT>(args: {
26
+ model: TextGenerationModel<PROMPT, TextGenerationModelSettings>;
27
+ prompt: PROMPT;
27
28
  fullResponse?: false;
28
- }): Promise<string>;
29
- export declare function generateText<PROMPT>(model: TextGenerationModel<PROMPT, TextGenerationModelSettings>, prompt: PROMPT, options: FunctionOptions & {
29
+ } & FunctionOptions): Promise<string>;
30
+ export declare function generateText<PROMPT>(args: {
31
+ model: TextGenerationModel<PROMPT, TextGenerationModelSettings>;
32
+ prompt: PROMPT;
30
33
  fullResponse: true;
31
- }): Promise<{
34
+ } & FunctionOptions): Promise<{
32
35
  text: string;
33
36
  finishReason: TextGenerationFinishReason;
34
37
  texts: string[];
@@ -1,6 +1,6 @@
1
1
  import { executeStandardCall } from "../executeStandardCall.js";
2
- export async function generateText(model, prompt, options) {
3
- const fullResponse = await executeStandardCall({
2
+ export async function generateText({ model, prompt, fullResponse, ...options }) {
3
+ const callResponse = await executeStandardCall({
4
4
  functionType: "generate-text",
5
5
  input: prompt,
6
6
  model,
@@ -63,16 +63,16 @@ export async function generateText(model, prompt, options) {
63
63
  };
64
64
  },
65
65
  });
66
- const textGenerationResults = fullResponse.value;
66
+ const textGenerationResults = callResponse.value;
67
67
  const firstResult = textGenerationResults[0];
68
- return options?.fullResponse
68
+ return fullResponse
69
69
  ? {
70
70
  text: firstResult.text,
71
71
  finishReason: firstResult.finishReason,
72
72
  texts: textGenerationResults.map((textGeneration) => textGeneration.text),
73
73
  textGenerationResults,
74
- rawResponse: fullResponse.rawResponse,
75
- metadata: fullResponse.metadata,
74
+ rawResponse: callResponse.rawResponse,
75
+ metadata: callResponse.metadata,
76
76
  }
77
77
  : firstResult.text;
78
78
  }
@@ -2,7 +2,7 @@
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.streamText = void 0;
4
4
  const executeStreamCall_js_1 = require("../executeStreamCall.cjs");
5
- async function streamText(model, prompt, options) {
5
+ async function streamText({ model, prompt, fullResponse, ...options }) {
6
6
  const shouldTrimWhitespace = model.settings.trimWhitespace ?? true;
7
7
  let accumulatedText = "";
8
8
  let isFirstDelta = true;
@@ -11,7 +11,7 @@ async function streamText(model, prompt, options) {
11
11
  const textPromise = new Promise((resolve) => {
12
12
  resolveText = resolve;
13
13
  });
14
- const fullResponse = await (0, executeStreamCall_js_1.executeStreamCall)({
14
+ const callResponse = await (0, executeStreamCall_js_1.executeStreamCall)({
15
15
  functionType: "stream-text",
16
16
  input: prompt,
17
17
  model,
@@ -43,12 +43,12 @@ async function streamText(model, prompt, options) {
43
43
  resolveText(accumulatedText);
44
44
  },
45
45
  });
46
- return options?.fullResponse
46
+ return fullResponse
47
47
  ? {
48
- textStream: fullResponse.value,
48
+ textStream: callResponse.value,
49
49
  text: textPromise,
50
- metadata: fullResponse.metadata,
50
+ metadata: callResponse.metadata,
51
51
  }
52
- : fullResponse.value;
52
+ : callResponse.value;
53
53
  }
54
54
  exports.streamText = streamText;
@@ -11,10 +11,10 @@ import { TextStreamingModel } from "./TextGenerationModel.js";
11
11
  * @see https://modelfusion.dev/guide/function/generate-text
12
12
  *
13
13
  * @example
14
- * const textStream = await streamText(
15
- * openai.CompletionTextGenerator(...),
16
- * "Write a short story about a robot learning to love:\n\n"
17
- * );
14
+ * const textStream = await streamText({
15
+ * model: openai.CompletionTextGenerator(...),
16
+ * prompt: "Write a short story about a robot learning to love:\n\n"
17
+ * });
18
18
  *
19
19
  * for await (const textPart of textStream) {
20
20
  * // ...
@@ -26,12 +26,16 @@ import { TextStreamingModel } from "./TextGenerationModel.js";
26
26
  *
27
27
  * @returns {AsyncIterableResultPromise<string>} An async iterable promise that yields the generated text.
28
28
  */
29
- export declare function streamText<PROMPT>(model: TextStreamingModel<PROMPT>, prompt: PROMPT, options?: FunctionOptions & {
29
+ export declare function streamText<PROMPT>(args: {
30
+ model: TextStreamingModel<PROMPT>;
31
+ prompt: PROMPT;
30
32
  fullResponse?: false;
31
- }): Promise<AsyncIterable<string>>;
32
- export declare function streamText<PROMPT>(model: TextStreamingModel<PROMPT>, prompt: PROMPT, options: FunctionOptions & {
33
+ } & FunctionOptions): Promise<AsyncIterable<string>>;
34
+ export declare function streamText<PROMPT>(args: {
35
+ model: TextStreamingModel<PROMPT>;
36
+ prompt: PROMPT;
33
37
  fullResponse: true;
34
- }): Promise<{
38
+ } & FunctionOptions): Promise<{
35
39
  textStream: AsyncIterable<string>;
36
40
  text: PromiseLike<string>;
37
41
  metadata: Omit<ModelCallMetadata, "durationInMs" | "finishTimestamp">;
@@ -1,5 +1,5 @@
1
1
  import { executeStreamCall } from "../executeStreamCall.js";
2
- export async function streamText(model, prompt, options) {
2
+ export async function streamText({ model, prompt, fullResponse, ...options }) {
3
3
  const shouldTrimWhitespace = model.settings.trimWhitespace ?? true;
4
4
  let accumulatedText = "";
5
5
  let isFirstDelta = true;
@@ -8,7 +8,7 @@ export async function streamText(model, prompt, options) {
8
8
  const textPromise = new Promise((resolve) => {
9
9
  resolveText = resolve;
10
10
  });
11
- const fullResponse = await executeStreamCall({
11
+ const callResponse = await executeStreamCall({
12
12
  functionType: "stream-text",
13
13
  input: prompt,
14
14
  model,
@@ -40,11 +40,11 @@ export async function streamText(model, prompt, options) {
40
40
  resolveText(accumulatedText);
41
41
  },
42
42
  });
43
- return options?.fullResponse
43
+ return fullResponse
44
44
  ? {
45
- textStream: fullResponse.value,
45
+ textStream: callResponse.value,
46
46
  text: textPromise,
47
- metadata: fullResponse.metadata,
47
+ metadata: callResponse.metadata,
48
48
  }
49
- : fullResponse.value;
49
+ : callResponse.value;
50
50
  }
@@ -2,8 +2,8 @@
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.generateTranscription = void 0;
4
4
  const executeStandardCall_js_1 = require("../executeStandardCall.cjs");
5
- async function generateTranscription(model, data, options) {
6
- const fullResponse = await (0, executeStandardCall_js_1.executeStandardCall)({
5
+ async function generateTranscription({ model, data, fullResponse, ...options }) {
6
+ const callResponse = await (0, executeStandardCall_js_1.executeStandardCall)({
7
7
  functionType: "generate-transcription",
8
8
  input: data,
9
9
  model,
@@ -16,6 +16,6 @@ async function generateTranscription(model, data, options) {
16
16
  };
17
17
  },
18
18
  });
19
- return options?.fullResponse ? fullResponse : fullResponse.value;
19
+ return fullResponse ? callResponse : callResponse.value;
20
20
  }
21
21
  exports.generateTranscription = generateTranscription;
@@ -9,23 +9,26 @@ import { TranscriptionModel, TranscriptionModelSettings } from "./TranscriptionM
9
9
  * @example
10
10
  * const data = await fs.promises.readFile("data/test.mp3");
11
11
  *
12
- * const transcription = await generateTranscription(
13
- * openai.Transcriber({ model: "whisper-1" }),
14
- * { type: "mp3", data }
15
- * );
12
+ * const transcription = await generateTranscription({
13
+ * model: openai.Transcriber({ model: "whisper-1" }),
14
+ * data: { type: "mp3", data }
15
+ * });
16
16
  *
17
17
  * @param {TranscriptionModel<DATA, TranscriptionModelSettings>} model - The model to use for transcription.
18
18
  * @param {DATA} data - The data to transcribe.
19
- * @param {FunctionOptions} [options] - Optional parameters for the function.
20
19
  *
21
20
  * @returns {Promise<string>} A promise that resolves to the transcribed text.
22
21
  */
23
- export declare function generateTranscription<DATA>(model: TranscriptionModel<DATA, TranscriptionModelSettings>, data: DATA, options?: FunctionOptions & {
22
+ export declare function generateTranscription<DATA>(args: {
23
+ model: TranscriptionModel<DATA, TranscriptionModelSettings>;
24
+ data: DATA;
24
25
  fullResponse?: false;
25
- }): Promise<string>;
26
- export declare function generateTranscription<DATA>(model: TranscriptionModel<DATA, TranscriptionModelSettings>, data: DATA, options: FunctionOptions & {
26
+ } & FunctionOptions): Promise<string>;
27
+ export declare function generateTranscription<DATA>(args: {
28
+ model: TranscriptionModel<DATA, TranscriptionModelSettings>;
29
+ data: DATA;
27
30
  fullResponse: true;
28
- }): Promise<{
31
+ } & FunctionOptions): Promise<{
29
32
  value: string;
30
33
  rawResponse: unknown;
31
34
  metadata: ModelCallMetadata;
@@ -1,6 +1,6 @@
1
1
  import { executeStandardCall } from "../executeStandardCall.js";
2
- export async function generateTranscription(model, data, options) {
3
- const fullResponse = await executeStandardCall({
2
+ export async function generateTranscription({ model, data, fullResponse, ...options }) {
3
+ const callResponse = await executeStandardCall({
4
4
  functionType: "generate-transcription",
5
5
  input: data,
6
6
  model,
@@ -13,5 +13,5 @@ export async function generateTranscription(model, data, options) {
13
13
  };
14
14
  },
15
15
  });
16
- return options?.fullResponse ? fullResponse : fullResponse.value;
16
+ return fullResponse ? callResponse : callResponse.value;
17
17
  }
@@ -60,8 +60,8 @@ export declare class CohereTextGenerationModel extends AbstractModel<CohereTextG
60
60
  get settingsForEvent(): Partial<CohereTextGenerationModelSettings>;
61
61
  doGenerateTexts(prompt: string, options: FunctionCallOptions): Promise<{
62
62
  rawResponse: {
63
- id: string;
64
63
  prompt: string;
64
+ id: string;
65
65
  generations: {
66
66
  text: string;
67
67
  id: string;
@@ -80,8 +80,8 @@ export declare class CohereTextGenerationModel extends AbstractModel<CohereTextG
80
80
  }>;
81
81
  restoreGeneratedTexts(rawResponse: unknown): {
82
82
  rawResponse: {
83
- id: string;
84
83
  prompt: string;
84
+ id: string;
85
85
  generations: {
86
86
  text: string;
87
87
  id: string;
@@ -100,8 +100,8 @@ export declare class CohereTextGenerationModel extends AbstractModel<CohereTextG
100
100
  };
101
101
  processTextGenerationResponse(rawResponse: CohereTextGenerationResponse): {
102
102
  rawResponse: {
103
- id: string;
104
103
  prompt: string;
104
+ id: string;
105
105
  generations: {
106
106
  text: string;
107
107
  id: string;
@@ -124,8 +124,8 @@ export declare class CohereTextGenerationModel extends AbstractModel<CohereTextG
124
124
  is_finished: false;
125
125
  } | {
126
126
  response: {
127
- id: string;
128
127
  prompt: string;
128
+ id: string;
129
129
  generations: {
130
130
  text: string;
131
131
  id: string;
@@ -190,8 +190,8 @@ declare const cohereTextGenerationResponseSchema: z.ZodObject<{
190
190
  };
191
191
  }>>;
192
192
  }, "strip", z.ZodTypeAny, {
193
- id: string;
194
193
  prompt: string;
194
+ id: string;
195
195
  generations: {
196
196
  text: string;
197
197
  id: string;
@@ -203,8 +203,8 @@ declare const cohereTextGenerationResponseSchema: z.ZodObject<{
203
203
  };
204
204
  } | undefined;
205
205
  }, {
206
- id: string;
207
206
  prompt: string;
207
+ id: string;
208
208
  generations: {
209
209
  text: string;
210
210
  id: string;
@@ -263,8 +263,8 @@ declare const cohereTextStreamChunkSchema: z.ZodDiscriminatedUnion<"is_finished"
263
263
  };
264
264
  }>>;
265
265
  }, "strip", z.ZodTypeAny, {
266
- id: string;
267
266
  prompt: string;
267
+ id: string;
268
268
  generations: {
269
269
  text: string;
270
270
  id: string;
@@ -276,8 +276,8 @@ declare const cohereTextStreamChunkSchema: z.ZodDiscriminatedUnion<"is_finished"
276
276
  };
277
277
  } | undefined;
278
278
  }, {
279
- id: string;
280
279
  prompt: string;
280
+ id: string;
281
281
  generations: {
282
282
  text: string;
283
283
  id: string;
@@ -291,8 +291,8 @@ declare const cohereTextStreamChunkSchema: z.ZodDiscriminatedUnion<"is_finished"
291
291
  }>;
292
292
  }, "strip", z.ZodTypeAny, {
293
293
  response: {
294
- id: string;
295
294
  prompt: string;
295
+ id: string;
296
296
  generations: {
297
297
  text: string;
298
298
  id: string;
@@ -308,8 +308,8 @@ declare const cohereTextStreamChunkSchema: z.ZodDiscriminatedUnion<"is_finished"
308
308
  is_finished: true;
309
309
  }, {
310
310
  response: {
311
- id: string;
312
311
  prompt: string;
312
+ id: string;
313
313
  generations: {
314
314
  text: string;
315
315
  id: string;
@@ -336,8 +336,8 @@ export declare const CohereTextGenerationResponseFormat: {
336
336
  json: {
337
337
  stream: boolean;
338
338
  handler: ResponseHandler<{
339
- id: string;
340
339
  prompt: string;
340
+ id: string;
341
341
  generations: {
342
342
  text: string;
343
343
  id: string;
@@ -363,8 +363,8 @@ export declare const CohereTextGenerationResponseFormat: {
363
363
  is_finished: false;
364
364
  } | {
365
365
  response: {
366
- id: string;
367
366
  prompt: string;
367
+ id: string;
368
368
  generations: {
369
369
  text: string;
370
370
  id: string;
@@ -19,10 +19,13 @@ describe("streamText", () => {
19
19
  `"text":"Hello, world!","finish_reason":"COMPLETE"}],` +
20
20
  `"prompt":"hello"}}\n`,
21
21
  ];
22
- const stream = await (0, streamText_js_1.streamText)(new CohereTextGenerationModel_js_1.CohereTextGenerationModel({
23
- api: new CohereApiConfiguration_js_1.CohereApiConfiguration({ apiKey: "test-key" }),
24
- model: "command-light",
25
- }), "hello");
22
+ const stream = await (0, streamText_js_1.streamText)({
23
+ model: new CohereTextGenerationModel_js_1.CohereTextGenerationModel({
24
+ api: new CohereApiConfiguration_js_1.CohereApiConfiguration({ apiKey: "test-key" }),
25
+ model: "command-light",
26
+ }),
27
+ prompt: "hello",
28
+ });
26
29
  // note: space moved to last chunk bc of trimming
27
30
  expect(await (0, arrayFromAsync_js_1.arrayFromAsync)(stream)).toStrictEqual([
28
31
  "Hello",
@@ -17,10 +17,13 @@ describe("streamText", () => {
17
17
  `"text":"Hello, world!","finish_reason":"COMPLETE"}],` +
18
18
  `"prompt":"hello"}}\n`,
19
19
  ];
20
- const stream = await streamText(new CohereTextGenerationModel({
21
- api: new CohereApiConfiguration({ apiKey: "test-key" }),
22
- model: "command-light",
23
- }), "hello");
20
+ const stream = await streamText({
21
+ model: new CohereTextGenerationModel({
22
+ api: new CohereApiConfiguration({ apiKey: "test-key" }),
23
+ model: "command-light",
24
+ }),
25
+ prompt: "hello",
26
+ });
24
27
  // note: space moved to last chunk bc of trimming
25
28
  expect(await arrayFromAsync(stream)).toStrictEqual([
26
29
  "Hello",
@@ -157,9 +157,9 @@ export declare class LlamaCppCompletionModel<CONTEXT_WINDOW_SIZE extends number
157
157
  doGenerateTexts(prompt: LlamaCppCompletionPrompt, options: FunctionCallOptions): Promise<{
158
158
  rawResponse: {
159
159
  model: string;
160
+ prompt: string;
160
161
  stop: true;
161
162
  content: string;
162
- prompt: string;
163
163
  generation_settings: {
164
164
  model: string;
165
165
  stream: boolean;
@@ -217,9 +217,9 @@ export declare class LlamaCppCompletionModel<CONTEXT_WINDOW_SIZE extends number
217
217
  restoreGeneratedTexts(rawResponse: unknown): {
218
218
  rawResponse: {
219
219
  model: string;
220
+ prompt: string;
220
221
  stop: true;
221
222
  content: string;
222
- prompt: string;
223
223
  generation_settings: {
224
224
  model: string;
225
225
  stream: boolean;
@@ -277,9 +277,9 @@ export declare class LlamaCppCompletionModel<CONTEXT_WINDOW_SIZE extends number
277
277
  processTextGenerationResponse(rawResponse: LlamaCppTextGenerationResponse): {
278
278
  rawResponse: {
279
279
  model: string;
280
+ prompt: string;
280
281
  stop: true;
281
282
  content: string;
282
- prompt: string;
283
283
  generation_settings: {
284
284
  model: string;
285
285
  stream: boolean;
@@ -336,9 +336,9 @@ export declare class LlamaCppCompletionModel<CONTEXT_WINDOW_SIZE extends number
336
336
  };
337
337
  doStreamText(prompt: LlamaCppCompletionPrompt, options: FunctionCallOptions): Promise<AsyncIterable<Delta<{
338
338
  model: string;
339
+ prompt: string;
339
340
  stop: true;
340
341
  content: string;
341
- prompt: string;
342
342
  generation_settings: {
343
343
  model: string;
344
344
  stream: boolean;
@@ -515,9 +515,9 @@ declare const llamaCppTextGenerationResponseSchema: z.ZodObject<{
515
515
  truncated: z.ZodBoolean;
516
516
  }, "strip", z.ZodTypeAny, {
517
517
  model: string;
518
+ prompt: string;
518
519
  stop: true;
519
520
  content: string;
520
- prompt: string;
521
521
  generation_settings: {
522
522
  model: string;
523
523
  stream: boolean;
@@ -563,9 +563,9 @@ declare const llamaCppTextGenerationResponseSchema: z.ZodObject<{
563
563
  truncated: boolean;
564
564
  }, {
565
565
  model: string;
566
+ prompt: string;
566
567
  stop: true;
567
568
  content: string;
568
- prompt: string;
569
569
  generation_settings: {
570
570
  model: string;
571
571
  stream: boolean;
@@ -736,9 +736,9 @@ declare const llamaCppTextStreamChunkSchema: z.ZodDiscriminatedUnion<"stop", [z.
736
736
  truncated: z.ZodBoolean;
737
737
  }, "strip", z.ZodTypeAny, {
738
738
  model: string;
739
+ prompt: string;
739
740
  stop: true;
740
741
  content: string;
741
- prompt: string;
742
742
  generation_settings: {
743
743
  model: string;
744
744
  stream: boolean;
@@ -784,9 +784,9 @@ declare const llamaCppTextStreamChunkSchema: z.ZodDiscriminatedUnion<"stop", [z.
784
784
  truncated: boolean;
785
785
  }, {
786
786
  model: string;
787
+ prompt: string;
787
788
  stop: true;
788
789
  content: string;
789
- prompt: string;
790
790
  generation_settings: {
791
791
  model: string;
792
792
  stream: boolean;
@@ -844,9 +844,9 @@ export declare const LlamaCppCompletionResponseFormat: {
844
844
  stream: false;
845
845
  handler: ResponseHandler<{
846
846
  model: string;
847
+ prompt: string;
847
848
  stop: true;
848
849
  content: string;
849
- prompt: string;
850
850
  generation_settings: {
851
851
  model: string;
852
852
  stream: boolean;
@@ -902,9 +902,9 @@ export declare const LlamaCppCompletionResponseFormat: {
902
902
  response: Response;
903
903
  }) => Promise<AsyncIterable<Delta<{
904
904
  model: string;
905
+ prompt: string;
905
906
  stop: true;
906
907
  content: string;
907
- prompt: string;
908
908
  generation_settings: {
909
909
  model: string;
910
910
  stream: boolean;
@@ -26,7 +26,10 @@ describe("streamText", () => {
26
26
  `"prompt_per_token_ms":48.845600000000005},"tokens_cached":74,"tokens_evaluated":5,` +
27
27
  `"tokens_predicted":69,"truncated":false}\n\n`,
28
28
  ];
29
- const stream = await (0, streamText_js_1.streamText)(new LlamaCppCompletionModel_js_1.LlamaCppCompletionModel().withTextPrompt(), "hello");
29
+ const stream = await (0, streamText_js_1.streamText)({
30
+ model: new LlamaCppCompletionModel_js_1.LlamaCppCompletionModel().withTextPrompt(),
31
+ prompt: "hello",
32
+ });
30
33
  // note: space moved to last chunk bc of trimming
31
34
  expect(await (0, arrayFromAsync_js_1.arrayFromAsync)(stream)).toStrictEqual([
32
35
  "Hello",
@@ -24,7 +24,10 @@ describe("streamText", () => {
24
24
  `"prompt_per_token_ms":48.845600000000005},"tokens_cached":74,"tokens_evaluated":5,` +
25
25
  `"tokens_predicted":69,"truncated":false}\n\n`,
26
26
  ];
27
- const stream = await streamText(new LlamaCppCompletionModel().withTextPrompt(), "hello");
27
+ const stream = await streamText({
28
+ model: new LlamaCppCompletionModel().withTextPrompt(),
29
+ prompt: "hello",
30
+ });
28
31
  // note: space moved to last chunk bc of trimming
29
32
  expect(await arrayFromAsync(stream)).toStrictEqual([
30
33
  "Hello",
@@ -29,10 +29,13 @@ describe("streamText", () => {
29
29
  ];
30
30
  });
31
31
  it("should return a text stream", async () => {
32
- const stream = await (0, streamText_js_1.streamText)(new MistralChatModel_js_1.MistralChatModel({
33
- api: new MistralApiConfiguration_js_1.MistralApiConfiguration({ apiKey: "test-key" }),
34
- model: "mistral-tiny",
35
- }).withTextPrompt(), "hello");
32
+ const stream = await (0, streamText_js_1.streamText)({
33
+ model: new MistralChatModel_js_1.MistralChatModel({
34
+ api: new MistralApiConfiguration_js_1.MistralApiConfiguration({ apiKey: "test-key" }),
35
+ model: "mistral-tiny",
36
+ }).withTextPrompt(),
37
+ prompt: "hello",
38
+ });
36
39
  // note: space moved to last chunk bc of trimming
37
40
  expect(await (0, arrayFromAsync_js_1.arrayFromAsync)(stream)).toStrictEqual([
38
41
  "Hello",
@@ -41,10 +44,14 @@ describe("streamText", () => {
41
44
  ]);
42
45
  });
43
46
  it("should return text", async () => {
44
- const { text } = await (0, streamText_js_1.streamText)(new MistralChatModel_js_1.MistralChatModel({
45
- api: new MistralApiConfiguration_js_1.MistralApiConfiguration({ apiKey: "test-key" }),
46
- model: "mistral-tiny",
47
- }).withTextPrompt(), "hello", { fullResponse: true });
47
+ const { text } = await (0, streamText_js_1.streamText)({
48
+ model: new MistralChatModel_js_1.MistralChatModel({
49
+ api: new MistralApiConfiguration_js_1.MistralApiConfiguration({ apiKey: "test-key" }),
50
+ model: "mistral-tiny",
51
+ }).withTextPrompt(),
52
+ prompt: "hello",
53
+ fullResponse: true,
54
+ });
48
55
  expect(await text).toStrictEqual("Hello, world!");
49
56
  });
50
57
  });
@@ -27,10 +27,13 @@ describe("streamText", () => {
27
27
  ];
28
28
  });
29
29
  it("should return a text stream", async () => {
30
- const stream = await streamText(new MistralChatModel({
31
- api: new MistralApiConfiguration({ apiKey: "test-key" }),
32
- model: "mistral-tiny",
33
- }).withTextPrompt(), "hello");
30
+ const stream = await streamText({
31
+ model: new MistralChatModel({
32
+ api: new MistralApiConfiguration({ apiKey: "test-key" }),
33
+ model: "mistral-tiny",
34
+ }).withTextPrompt(),
35
+ prompt: "hello",
36
+ });
34
37
  // note: space moved to last chunk bc of trimming
35
38
  expect(await arrayFromAsync(stream)).toStrictEqual([
36
39
  "Hello",
@@ -39,10 +42,14 @@ describe("streamText", () => {
39
42
  ]);
40
43
  });
41
44
  it("should return text", async () => {
42
- const { text } = await streamText(new MistralChatModel({
43
- api: new MistralApiConfiguration({ apiKey: "test-key" }),
44
- model: "mistral-tiny",
45
- }).withTextPrompt(), "hello", { fullResponse: true });
45
+ const { text } = await streamText({
46
+ model: new MistralChatModel({
47
+ api: new MistralApiConfiguration({ apiKey: "test-key" }),
48
+ model: "mistral-tiny",
49
+ }).withTextPrompt(),
50
+ prompt: "hello",
51
+ fullResponse: true,
52
+ });
46
53
  expect(await text).toStrictEqual("Hello, world!");
47
54
  });
48
55
  });
@@ -16,7 +16,12 @@ describe("streamText", () => {
16
16
  `"done":true,"total_duration":4843619375,"load_duration":1101458,"prompt_eval_count":5,"prompt_eval_duration":199339000,` +
17
17
  `"eval_count":317,"eval_duration":4639772000}\n`,
18
18
  ];
19
- const stream = await (0, streamText_js_1.streamText)(new OllamaChatModel_js_1.OllamaChatModel({ model: "mistral:text" }).withTextPrompt(), "hello");
19
+ const stream = await (0, streamText_js_1.streamText)({
20
+ model: new OllamaChatModel_js_1.OllamaChatModel({
21
+ model: "mistral:text",
22
+ }).withTextPrompt(),
23
+ prompt: "hello",
24
+ });
20
25
  // note: space moved to last chunk bc of trimming
21
26
  expect(await (0, arrayFromAsync_js_1.arrayFromAsync)(stream)).toStrictEqual([
22
27
  "Hello",