modelfusion 0.73.1 → 0.74.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. package/README.md +34 -21
  2. package/core/api/retryWithExponentialBackoff.cjs +3 -2
  3. package/core/api/retryWithExponentialBackoff.js +3 -2
  4. package/model-function/generate-text/prompt-format/AlpacaPromptFormat.cjs +17 -1
  5. package/model-function/generate-text/prompt-format/AlpacaPromptFormat.d.ts +8 -1
  6. package/model-function/generate-text/prompt-format/AlpacaPromptFormat.js +15 -0
  7. package/model-function/generate-text/prompt-format/ChatMLPromptFormat.cjs +12 -4
  8. package/model-function/generate-text/prompt-format/ChatMLPromptFormat.d.ts +4 -0
  9. package/model-function/generate-text/prompt-format/ChatMLPromptFormat.js +10 -3
  10. package/model-function/generate-text/prompt-format/InstructionPrompt.d.ts +2 -8
  11. package/model-function/generate-text/prompt-format/Llama2PromptFormat.cjs +19 -2
  12. package/model-function/generate-text/prompt-format/Llama2PromptFormat.d.ts +11 -0
  13. package/model-function/generate-text/prompt-format/Llama2PromptFormat.js +17 -1
  14. package/model-function/generate-text/prompt-format/TextPromptFormat.cjs +9 -4
  15. package/model-function/generate-text/prompt-format/TextPromptFormat.d.ts +4 -0
  16. package/model-function/generate-text/prompt-format/TextPromptFormat.js +7 -3
  17. package/model-provider/anthropic/AnthropicPromptFormat.cjs +17 -6
  18. package/model-provider/anthropic/AnthropicPromptFormat.d.ts +4 -0
  19. package/model-provider/anthropic/AnthropicPromptFormat.js +15 -5
  20. package/model-provider/anthropic/AnthropicTextGenerationModel.cjs +6 -0
  21. package/model-provider/anthropic/AnthropicTextGenerationModel.d.ts +4 -0
  22. package/model-provider/anthropic/AnthropicTextGenerationModel.js +7 -1
  23. package/model-provider/llamacpp/LlamaCppBakLLaVA1Format.cjs +0 -3
  24. package/model-provider/llamacpp/LlamaCppBakLLaVA1Format.js +0 -3
  25. package/model-provider/llamacpp/LlamaCppTextGenerationModel.cjs +17 -0
  26. package/model-provider/llamacpp/LlamaCppTextGenerationModel.d.ts +7 -0
  27. package/model-provider/llamacpp/LlamaCppTextGenerationModel.js +17 -0
  28. package/model-provider/ollama/OllamaTextGenerationModel.cjs +7 -0
  29. package/model-provider/ollama/OllamaTextGenerationModel.d.ts +2 -0
  30. package/model-provider/ollama/OllamaTextGenerationModel.js +7 -0
  31. package/model-provider/openai/chat/OpenAIChatModel.cjs +6 -0
  32. package/model-provider/openai/chat/OpenAIChatModel.d.ts +4 -0
  33. package/model-provider/openai/chat/OpenAIChatModel.js +7 -1
  34. package/model-provider/openai/chat/OpenAIChatPromptFormat.cjs +11 -4
  35. package/model-provider/openai/chat/OpenAIChatPromptFormat.d.ts +4 -0
  36. package/model-provider/openai/chat/OpenAIChatPromptFormat.js +9 -3
  37. package/model-provider/openai/chat/OpenAIChatStreamIterable.cjs +22 -7
  38. package/model-provider/openai/chat/OpenAIChatStreamIterable.js +22 -7
  39. package/package.json +2 -2
  40. package/tool/generate-tool-call/FunctionListToolCallPromptFormat.cjs +70 -0
  41. package/tool/generate-tool-call/FunctionListToolCallPromptFormat.d.ts +14 -0
  42. package/tool/generate-tool-call/FunctionListToolCallPromptFormat.js +65 -0
  43. package/tool/generate-tool-call/TextGenerationToolCallModel.d.ts +5 -5
  44. package/tool/generate-tool-call/index.cjs +14 -0
  45. package/tool/generate-tool-call/index.d.ts +1 -0
  46. package/tool/generate-tool-call/index.js +1 -0
package/README.md CHANGED
@@ -32,7 +32,11 @@
32
32
  npm install modelfusion
33
33
  ```
34
34
 
35
- Or use a template: [ModelFusion terminal app starter](https://github.com/lgrammel/modelfusion-terminal-app-starter)
35
+ Or use a template:
36
+
37
+ - [ModelFusion terminal app starter](https://github.com/lgrammel/modelfusion-terminal-app-starter)
38
+ - [Next.js, Vercel AI SDK, Llama.cpp & ModelFusion starter](https://github.com/lgrammel/modelfusion-llamacpp-nextjs-starter)
39
+ - [Next.js, Vercel AI SDK, Ollama & ModelFusion starter](https://github.com/lgrammel/modelfusion-ollama-nextjs-starter)
36
40
 
37
41
  ## Usage Examples
38
42
 
@@ -475,11 +479,22 @@ const retrievedTexts = await retrieve(
475
479
 
476
480
  Available Vector Stores: [Memory](https://modelfusion.dev/integration/vector-index/memory), [SQLite VSS](https://modelfusion.dev/integration/vector-index/sqlite-vss), [Pinecone](https://modelfusion.dev/integration/vector-index/pinecone)
477
481
 
478
- ### Prompt Formats
482
+ ### [Text Generation Prompt Formats](https://modelfusion.dev/guide/function/generate-text#prompt-format)
479
483
 
480
- Prompt formats let you use higher level prompt structures (such as instruction or chat prompts) for different models.
484
+ Prompt formats let you use higher level prompt structures (such as text, instruction or chat prompts) for different models.
481
485
 
482
- #### [Text Generation Prompt Formats](https://modelfusion.dev/guide/function/generate-text#prompt-format)
486
+ #### Text Prompt Example
487
+
488
+ ```ts
489
+ const text = await generateText(
490
+ new AnthropicTextGenerationModel({
491
+ model: "claude-instant-1",
492
+ }).withTextPrompt(),
493
+ "Write a short story about a robot learning to love"
494
+ );
495
+ ```
496
+
497
+ #### Instruction Prompt Example
483
498
 
484
499
  ```ts
485
500
  // example assumes you are running https://huggingface.co/TheBloke/Llama-2-7B-GGUF with llama.cpp
@@ -487,9 +502,7 @@ const text = await generateText(
487
502
  new LlamaCppTextGenerationModel({
488
503
  contextWindowSize: 4096, // Llama 2 context window size
489
504
  maxCompletionTokens: 1000,
490
- })
491
- .withTextPrompt() // pure text prompt (no images)
492
- .withPromptFormat(Llama2PromptFormat.instruction()),
505
+ }).withTextPromptFormat(Llama2PromptFormat.instruction()),
493
506
  {
494
507
  system: "You are a story writer.",
495
508
  instruction: "Write a short story about a robot learning to love.",
@@ -497,7 +510,9 @@ const text = await generateText(
497
510
  );
498
511
  ```
499
512
 
500
- They can also be accessed through the shorthand methods `.withChatPrompt()` and `.withInstructionPrompt()` for many models:
513
+ They can also be accessed through the shorthand methods `.withTextPrompt()`, `.withChatPrompt()` and `.withInstructionPrompt()` for many models:
514
+
515
+ #### Chat Prompt Example
501
516
 
502
517
  ```ts
503
518
  const textStream = await streamText(
@@ -524,19 +539,17 @@ const textStream = await streamText(
524
539
  );
525
540
  ```
526
541
 
527
- | Prompt Format | Instruction Prompt | Chat Prompt |
528
- | ------------- | ------------------ | ----------- |
529
- | OpenAI Chat | ✅ | ✅ |
530
- | Anthropic | ✅ | ✅ |
531
- | Llama 2 | ✅ | ✅ |
532
- | ChatML | ✅ | ✅ |
533
- | Alpaca | ✅ | ❌ |
534
- | Vicuna | ❌ | ✅ |
535
- | Generic Text | ✅ | ✅ |
536
-
537
- #### [Vision Prompts]
538
-
539
- #### [Image Generation Prompt Formats](https://modelfusion.dev/guide/function/generate-image/prompt-format)
542
+ | Prompt Format | Text Prompt | Instruction Prompt | Chat Prompt |
543
+ | ------------- | ----------- | ------------------ | ----------- |
544
+ | OpenAI Chat | ✅ | ✅ | ✅ |
545
+ | Anthropic | ✅ | ✅ | ✅ |
546
+ | Llama 2 | ✅ | ✅ | ✅ |
547
+ | ChatML | ✅ | ✅ | ✅ |
548
+ | Alpaca | ✅ | ✅ | ❌ |
549
+ | Vicuna | ❌ | ❌ | ✅ |
550
+ | Generic Text | ✅ | ✅ | ✅ |
551
+
552
+ ### [Image Generation Prompt Formats](https://modelfusion.dev/guide/function/generate-image/prompt-format)
540
553
 
541
554
  You an use prompt formats with image models as well, e.g. to use a basic text prompt. It is available as a shorthand method:
542
555
 
@@ -2,6 +2,7 @@
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.retryWithExponentialBackoff = void 0;
4
4
  const delay_js_1 = require("../../util/delay.cjs");
5
+ const getErrorMessage_js_1 = require("../../util/getErrorMessage.cjs");
5
6
  const ApiCallError_js_1 = require("./ApiCallError.cjs");
6
7
  const RetryError_js_1 = require("./RetryError.cjs");
7
8
  /**
@@ -19,11 +20,12 @@ async function _retryWithExponentialBackoff(f, { maxTries, delayInMs, backoffFac
19
20
  return await f();
20
21
  }
21
22
  catch (error) {
23
+ const errorMessage = (0, getErrorMessage_js_1.getErrorMessage)(error);
22
24
  const newErrors = [...errors, error];
23
25
  const tryNumber = newErrors.length;
24
26
  if (tryNumber >= maxTries) {
25
27
  throw new RetryError_js_1.RetryError({
26
- message: `Failed after ${tryNumber} tries.`,
28
+ message: `Failed after ${tryNumber} tries. Last error: ${errorMessage}`,
27
29
  reason: "maxTriesExceeded",
28
30
  errors: newErrors,
29
31
  });
@@ -39,7 +41,6 @@ async function _retryWithExponentialBackoff(f, { maxTries, delayInMs, backoffFac
39
41
  return _retryWithExponentialBackoff(f, { maxTries, delayInMs: backoffFactor * delayInMs, backoffFactor }, newErrors);
40
42
  }
41
43
  }
42
- const errorMessage = error instanceof Error ? error.message : String(error);
43
44
  throw new RetryError_js_1.RetryError({
44
45
  message: `Failed after ${tryNumber} attempt(s) with non-retryable error: '${errorMessage}'`,
45
46
  reason: "errorNotRetryable",
@@ -1,4 +1,5 @@
1
1
  import { delay } from "../../util/delay.js";
2
+ import { getErrorMessage } from "../../util/getErrorMessage.js";
2
3
  import { ApiCallError } from "./ApiCallError.js";
3
4
  import { RetryError } from "./RetryError.js";
4
5
  /**
@@ -15,11 +16,12 @@ async function _retryWithExponentialBackoff(f, { maxTries, delayInMs, backoffFac
15
16
  return await f();
16
17
  }
17
18
  catch (error) {
19
+ const errorMessage = getErrorMessage(error);
18
20
  const newErrors = [...errors, error];
19
21
  const tryNumber = newErrors.length;
20
22
  if (tryNumber >= maxTries) {
21
23
  throw new RetryError({
22
- message: `Failed after ${tryNumber} tries.`,
24
+ message: `Failed after ${tryNumber} tries. Last error: ${errorMessage}`,
23
25
  reason: "maxTriesExceeded",
24
26
  errors: newErrors,
25
27
  });
@@ -35,7 +37,6 @@ async function _retryWithExponentialBackoff(f, { maxTries, delayInMs, backoffFac
35
37
  return _retryWithExponentialBackoff(f, { maxTries, delayInMs: backoffFactor * delayInMs, backoffFactor }, newErrors);
36
38
  }
37
39
  }
38
- const errorMessage = error instanceof Error ? error.message : String(error);
39
40
  throw new RetryError({
40
41
  message: `Failed after ${tryNumber} attempt(s) with non-retryable error: '${errorMessage}'`,
41
42
  reason: "errorNotRetryable",
@@ -1,8 +1,24 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.instruction = void 0;
3
+ exports.instruction = exports.text = void 0;
4
4
  const DEFAULT_SYSTEM_PROMPT_INPUT = "Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.";
5
5
  const DEFAULT_SYSTEM_PROMPT_NO_INPUT = "Below is an instruction that describes a task. Write a response that appropriately completes the request.";
6
+ /**
7
+ * Formats a text prompt as an Alpaca prompt.
8
+ */
9
+ function text() {
10
+ return {
11
+ stopSequences: [],
12
+ format: (instruction) => {
13
+ let text = DEFAULT_SYSTEM_PROMPT_NO_INPUT;
14
+ text += "\n\n### Instruction:\n";
15
+ text += instruction;
16
+ text += "\n\n### Response:\n";
17
+ return text;
18
+ },
19
+ };
20
+ }
21
+ exports.text = text;
6
22
  /**
7
23
  * Formats an instruction prompt as an Alpaca prompt.
8
24
  *
@@ -1,5 +1,9 @@
1
1
  import { InstructionPrompt } from "./InstructionPrompt.js";
2
2
  import { TextGenerationPromptFormat } from "../TextGenerationPromptFormat.js";
3
+ /**
4
+ * Formats a text prompt as an Alpaca prompt.
5
+ */
6
+ export declare function text(): TextGenerationPromptFormat<string, string>;
3
7
  /**
4
8
  * Formats an instruction prompt as an Alpaca prompt.
5
9
  *
@@ -36,4 +40,7 @@ import { TextGenerationPromptFormat } from "../TextGenerationPromptFormat.js";
36
40
  *
37
41
  * @see https://github.com/tatsu-lab/stanford_alpaca#data-release
38
42
  */
39
- export declare function instruction(): TextGenerationPromptFormat<InstructionPrompt, string>;
43
+ export declare function instruction(): TextGenerationPromptFormat<InstructionPrompt & {
44
+ input?: string;
45
+ }, // optional input supported by Alpaca
46
+ string>;
@@ -1,5 +1,20 @@
1
1
  const DEFAULT_SYSTEM_PROMPT_INPUT = "Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.";
2
2
  const DEFAULT_SYSTEM_PROMPT_NO_INPUT = "Below is an instruction that describes a task. Write a response that appropriately completes the request.";
3
+ /**
4
+ * Formats a text prompt as an Alpaca prompt.
5
+ */
6
+ export function text() {
7
+ return {
8
+ stopSequences: [],
9
+ format: (instruction) => {
10
+ let text = DEFAULT_SYSTEM_PROMPT_NO_INPUT;
11
+ text += "\n\n### Instruction:\n";
12
+ text += instruction;
13
+ text += "\n\n### Response:\n";
14
+ return text;
15
+ },
16
+ };
17
+ }
3
18
  /**
4
19
  * Formats an instruction prompt as an Alpaca prompt.
5
20
  *
@@ -1,6 +1,6 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.chat = exports.instruction = void 0;
3
+ exports.chat = exports.instruction = exports.text = void 0;
4
4
  const validateChatPrompt_js_1 = require("./validateChatPrompt.cjs");
5
5
  const START_SEGMENT = "<|im_start|>";
6
6
  const END_SEGMENT = "<|im_end|>";
@@ -10,6 +10,16 @@ function chatMLStart(role) {
10
10
  function chatMLSegment(role, text) {
11
11
  return text == null ? "" : `${chatMLStart(role)}${text}${END_SEGMENT}\n`;
12
12
  }
13
+ /**
14
+ * Formats a text prompt using the ChatML format.
15
+ */
16
+ function text() {
17
+ return {
18
+ stopSequences: [END_SEGMENT],
19
+ format: (instruction) => chatMLSegment("user", instruction),
20
+ };
21
+ }
22
+ exports.text = text;
13
23
  /**
14
24
  * Formats an instruction prompt using the ChatML format.
15
25
  *
@@ -27,9 +37,7 @@ function instruction() {
27
37
  return {
28
38
  stopSequences: [END_SEGMENT],
29
39
  format: (instruction) => chatMLSegment("system", instruction.system) +
30
- chatMLSegment("user", instruction.instruction + instruction.input != null
31
- ? `\n\n${instruction.input}`
32
- : ""),
40
+ chatMLSegment("user", instruction.instruction),
33
41
  };
34
42
  }
35
43
  exports.instruction = instruction;
@@ -1,6 +1,10 @@
1
1
  import { ChatPrompt } from "./ChatPrompt.js";
2
2
  import { InstructionPrompt } from "./InstructionPrompt.js";
3
3
  import { TextGenerationPromptFormat } from "../TextGenerationPromptFormat.js";
4
+ /**
5
+ * Formats a text prompt using the ChatML format.
6
+ */
7
+ export declare function text(): TextGenerationPromptFormat<string, string>;
4
8
  /**
5
9
  * Formats an instruction prompt using the ChatML format.
6
10
  *
@@ -7,6 +7,15 @@ function chatMLStart(role) {
7
7
  function chatMLSegment(role, text) {
8
8
  return text == null ? "" : `${chatMLStart(role)}${text}${END_SEGMENT}\n`;
9
9
  }
10
+ /**
11
+ * Formats a text prompt using the ChatML format.
12
+ */
13
+ export function text() {
14
+ return {
15
+ stopSequences: [END_SEGMENT],
16
+ format: (instruction) => chatMLSegment("user", instruction),
17
+ };
18
+ }
10
19
  /**
11
20
  * Formats an instruction prompt using the ChatML format.
12
21
  *
@@ -24,9 +33,7 @@ export function instruction() {
24
33
  return {
25
34
  stopSequences: [END_SEGMENT],
26
35
  format: (instruction) => chatMLSegment("system", instruction.system) +
27
- chatMLSegment("user", instruction.instruction + instruction.input != null
28
- ? `\n\n${instruction.input}`
29
- : ""),
36
+ chatMLSegment("user", instruction.instruction),
30
37
  };
31
38
  }
32
39
  /**
@@ -1,13 +1,11 @@
1
1
  /**
2
- * A single instruction prompt. It can contain an optional system message to define the role and behavior of the language model
3
- * and an optiona input to provide context for the language model.
2
+ * A single instruction prompt. It can contain an optional system message to define the role and behavior of the language model.
4
3
  *
5
4
  * @example
6
5
  * ```ts
7
6
  * {
8
7
  * system: "You are a celebrated poet.", // optional
9
- * instruction: "Write a short story about:",
10
- * input: "a robot learning to love.", // optional
8
+ * instruction: "Write a story about a robot learning to love",
11
9
  * }
12
10
  * ```
13
11
  */
@@ -21,10 +19,6 @@ export type InstructionPrompt = {
21
19
  * The instruction for the model.
22
20
  */
23
21
  instruction: string;
24
- /**
25
- * Optional additional input or context, e.g. a the content from which information should be extracted.
26
- */
27
- input?: string;
28
22
  /**
29
23
  * Optional image to provide context for the language model. Only supported by some models.
30
24
  */
@@ -1,6 +1,6 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.chat = exports.instruction = void 0;
3
+ exports.chat = exports.instruction = exports.text = void 0;
4
4
  const validateChatPrompt_js_1 = require("./validateChatPrompt.cjs");
5
5
  // see https://github.com/facebookresearch/llama/blob/6c7fe276574e78057f917549435a2554000a876d/llama/generation.py#L44
6
6
  const BEGIN_SEGMENT = "<s>";
@@ -9,6 +9,23 @@ const BEGIN_INSTRUCTION = "[INST]";
9
9
  const END_INSTRUCTION = "[/INST]";
10
10
  const BEGIN_SYSTEM = "<<SYS>>\n";
11
11
  const END_SYSTEM = "\n<</SYS>>\n\n";
12
+ /**
13
+ * Formats a text prompt as a Llama 2 prompt.
14
+ *
15
+ * Llama 2 prompt template:
16
+ * ```
17
+ * <s>[INST]{ instruction } [/INST]
18
+ * ```
19
+ *
20
+ * @see https://www.philschmid.de/llama-2#how-to-prompt-llama-2-chat
21
+ */
22
+ function text() {
23
+ return {
24
+ stopSequences: [END_SEGMENT],
25
+ format: (instruction) => `${BEGIN_SEGMENT}${BEGIN_INSTRUCTION}${instruction}${END_INSTRUCTION}\n`,
26
+ };
27
+ }
28
+ exports.text = text;
12
29
  /**
13
30
  * Formats an instruction prompt as a Llama 2 prompt.
14
31
  *
@@ -28,7 +45,7 @@ function instruction() {
28
45
  stopSequences: [END_SEGMENT],
29
46
  format: (instruction) => `${BEGIN_SEGMENT}${BEGIN_INSTRUCTION}${instruction.system != null
30
47
  ? ` ${BEGIN_SYSTEM}${instruction.system}${END_SYSTEM}`
31
- : ""} ${instruction.instruction}${instruction.input != null ? `\n\n${instruction.input}` : ""} ${END_INSTRUCTION}\n`,
48
+ : ""}${instruction.instruction}${END_INSTRUCTION}\n`,
32
49
  };
33
50
  }
34
51
  exports.instruction = instruction;
@@ -1,6 +1,17 @@
1
1
  import { ChatPrompt } from "./ChatPrompt.js";
2
2
  import { InstructionPrompt } from "./InstructionPrompt.js";
3
3
  import { TextGenerationPromptFormat } from "../TextGenerationPromptFormat.js";
4
+ /**
5
+ * Formats a text prompt as a Llama 2 prompt.
6
+ *
7
+ * Llama 2 prompt template:
8
+ * ```
9
+ * <s>[INST]{ instruction } [/INST]
10
+ * ```
11
+ *
12
+ * @see https://www.philschmid.de/llama-2#how-to-prompt-llama-2-chat
13
+ */
14
+ export declare function text(): TextGenerationPromptFormat<string, string>;
4
15
  /**
5
16
  * Formats an instruction prompt as a Llama 2 prompt.
6
17
  *
@@ -6,6 +6,22 @@ const BEGIN_INSTRUCTION = "[INST]";
6
6
  const END_INSTRUCTION = "[/INST]";
7
7
  const BEGIN_SYSTEM = "<<SYS>>\n";
8
8
  const END_SYSTEM = "\n<</SYS>>\n\n";
9
+ /**
10
+ * Formats a text prompt as a Llama 2 prompt.
11
+ *
12
+ * Llama 2 prompt template:
13
+ * ```
14
+ * <s>[INST]{ instruction } [/INST]
15
+ * ```
16
+ *
17
+ * @see https://www.philschmid.de/llama-2#how-to-prompt-llama-2-chat
18
+ */
19
+ export function text() {
20
+ return {
21
+ stopSequences: [END_SEGMENT],
22
+ format: (instruction) => `${BEGIN_SEGMENT}${BEGIN_INSTRUCTION}${instruction}${END_INSTRUCTION}\n`,
23
+ };
24
+ }
9
25
  /**
10
26
  * Formats an instruction prompt as a Llama 2 prompt.
11
27
  *
@@ -25,7 +41,7 @@ export function instruction() {
25
41
  stopSequences: [END_SEGMENT],
26
42
  format: (instruction) => `${BEGIN_SEGMENT}${BEGIN_INSTRUCTION}${instruction.system != null
27
43
  ? ` ${BEGIN_SYSTEM}${instruction.system}${END_SYSTEM}`
28
- : ""} ${instruction.instruction}${instruction.input != null ? `\n\n${instruction.input}` : ""} ${END_INSTRUCTION}\n`,
44
+ : ""}${instruction.instruction}${END_INSTRUCTION}\n`,
29
45
  };
30
46
  }
31
47
  /**
@@ -1,7 +1,15 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.chat = exports.instruction = void 0;
3
+ exports.chat = exports.instruction = exports.text = void 0;
4
4
  const validateChatPrompt_js_1 = require("./validateChatPrompt.cjs");
5
+ /**
6
+ * Formats a text prompt as a basic text prompt. Does not change the text prompt in any way.
7
+ */
8
+ const text = () => ({
9
+ stopSequences: [],
10
+ format: (instruction) => instruction,
11
+ });
12
+ exports.text = text;
5
13
  /**
6
14
  * Formats an instruction prompt as a basic text prompt.
7
15
  */
@@ -13,9 +21,6 @@ const instruction = () => ({
13
21
  text += `${instruction.system}\n\n`;
14
22
  }
15
23
  text += instruction.instruction;
16
- if (instruction.input != null) {
17
- text += `\n\n${instruction.input}`;
18
- }
19
24
  return text;
20
25
  },
21
26
  });
@@ -1,6 +1,10 @@
1
1
  import { ChatPrompt } from "./ChatPrompt.js";
2
2
  import { InstructionPrompt } from "./InstructionPrompt.js";
3
3
  import { TextGenerationPromptFormat } from "../TextGenerationPromptFormat.js";
4
+ /**
5
+ * Formats a text prompt as a basic text prompt. Does not change the text prompt in any way.
6
+ */
7
+ export declare const text: () => TextGenerationPromptFormat<string, string>;
4
8
  /**
5
9
  * Formats an instruction prompt as a basic text prompt.
6
10
  */
@@ -1,4 +1,11 @@
1
1
  import { validateChatPrompt } from "./validateChatPrompt.js";
2
+ /**
3
+ * Formats a text prompt as a basic text prompt. Does not change the text prompt in any way.
4
+ */
5
+ export const text = () => ({
6
+ stopSequences: [],
7
+ format: (instruction) => instruction,
8
+ });
2
9
  /**
3
10
  * Formats an instruction prompt as a basic text prompt.
4
11
  */
@@ -10,9 +17,6 @@ export const instruction = () => ({
10
17
  text += `${instruction.system}\n\n`;
11
18
  }
12
19
  text += instruction.instruction;
13
- if (instruction.input != null) {
14
- text += `\n\n${instruction.input}`;
15
- }
16
20
  return text;
17
21
  },
18
22
  });
@@ -1,7 +1,23 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.chat = exports.instruction = void 0;
3
+ exports.chat = exports.instruction = exports.text = void 0;
4
4
  const validateChatPrompt_js_1 = require("../../model-function/generate-text/prompt-format/validateChatPrompt.cjs");
5
+ /**
6
+ * Formats a text prompt as an Anthropic prompt.
7
+ */
8
+ function text() {
9
+ return {
10
+ format: (instruction) => {
11
+ let text = "";
12
+ text += "\n\nHuman:";
13
+ text += instruction;
14
+ text += "\n\nAssistant:";
15
+ return text;
16
+ },
17
+ stopSequences: [],
18
+ };
19
+ }
20
+ exports.text = text;
5
21
  /**
6
22
  * Formats an instruction prompt as an Anthropic prompt.
7
23
  */
@@ -14,11 +30,6 @@ function instruction() {
14
30
  }
15
31
  text += "\n\nHuman:";
16
32
  text += instruction.instruction;
17
- if (instruction.input != null) {
18
- // use tags per Anthropic instruction:
19
- // https://docs.anthropic.com/claude/docs/constructing-a-prompt
20
- text += `\n\n<data>${instruction.input}</data>`;
21
- }
22
33
  text += "\n\nAssistant:";
23
34
  return text;
24
35
  },
@@ -1,6 +1,10 @@
1
1
  import { ChatPrompt } from "../../model-function/generate-text/prompt-format/ChatPrompt.js";
2
2
  import { InstructionPrompt } from "../../model-function/generate-text/prompt-format/InstructionPrompt.js";
3
3
  import { TextGenerationPromptFormat } from "../../model-function/generate-text/TextGenerationPromptFormat.js";
4
+ /**
5
+ * Formats a text prompt as an Anthropic prompt.
6
+ */
7
+ export declare function text(): TextGenerationPromptFormat<string, string>;
4
8
  /**
5
9
  * Formats an instruction prompt as an Anthropic prompt.
6
10
  */
@@ -1,4 +1,19 @@
1
1
  import { validateChatPrompt } from "../../model-function/generate-text/prompt-format/validateChatPrompt.js";
2
+ /**
3
+ * Formats a text prompt as an Anthropic prompt.
4
+ */
5
+ export function text() {
6
+ return {
7
+ format: (instruction) => {
8
+ let text = "";
9
+ text += "\n\nHuman:";
10
+ text += instruction;
11
+ text += "\n\nAssistant:";
12
+ return text;
13
+ },
14
+ stopSequences: [],
15
+ };
16
+ }
2
17
  /**
3
18
  * Formats an instruction prompt as an Anthropic prompt.
4
19
  */
@@ -11,11 +26,6 @@ export function instruction() {
11
26
  }
12
27
  text += "\n\nHuman:";
13
28
  text += instruction.instruction;
14
- if (instruction.input != null) {
15
- // use tags per Anthropic instruction:
16
- // https://docs.anthropic.com/claude/docs/constructing-a-prompt
17
- text += `\n\n<data>${instruction.input}</data>`;
18
- }
19
29
  text += "\n\nAssistant:";
20
30
  return text;
21
31
  },
@@ -106,6 +106,12 @@ class AnthropicTextGenerationModel extends AbstractModel_js_1.AbstractModel {
106
106
  responseFormat: exports.AnthropicTextGenerationResponseFormat.deltaIterable,
107
107
  });
108
108
  }
109
+ /**
110
+ * Returns this model with a text prompt format.
111
+ */
112
+ withTextPrompt() {
113
+ return this.withPromptFormat((0, AnthropicPromptFormat_js_1.text)());
114
+ }
109
115
  /**
110
116
  * Returns this model with an instruction prompt format.
111
117
  */
@@ -55,6 +55,10 @@ export declare class AnthropicTextGenerationModel extends AbstractModel<Anthropi
55
55
  text: string;
56
56
  }>;
57
57
  doStreamText(prompt: string, options?: FunctionOptions): Promise<AsyncIterable<Delta<string>>>;
58
+ /**
59
+ * Returns this model with a text prompt format.
60
+ */
61
+ withTextPrompt(): PromptFormatTextStreamingModel<string, string, AnthropicTextGenerationModelSettings, this>;
58
62
  /**
59
63
  * Returns this model with an instruction prompt format.
60
64
  */
@@ -9,7 +9,7 @@ import { ZodSchema } from "../../core/schema/ZodSchema.js";
9
9
  import { parseJSON } from "../../core/schema/parseJSON.js";
10
10
  import { AnthropicApiConfiguration } from "./AnthropicApiConfiguration.js";
11
11
  import { failedAnthropicCallResponseHandler } from "./AnthropicError.js";
12
- import { instruction, chat } from "./AnthropicPromptFormat.js";
12
+ import { instruction, chat, text } from "./AnthropicPromptFormat.js";
13
13
  export const ANTHROPIC_TEXT_GENERATION_MODELS = {
14
14
  "claude-instant-1": {
15
15
  contextWindowSize: 100000,
@@ -103,6 +103,12 @@ export class AnthropicTextGenerationModel extends AbstractModel {
103
103
  responseFormat: AnthropicTextGenerationResponseFormat.deltaIterable,
104
104
  });
105
105
  }
106
+ /**
107
+ * Returns this model with a text prompt format.
108
+ */
109
+ withTextPrompt() {
110
+ return this.withPromptFormat(text());
111
+ }
106
112
  /**
107
113
  * Returns this model with an instruction prompt format.
108
114
  */
@@ -19,9 +19,6 @@ function instruction() {
19
19
  text += `[img-1]\n`;
20
20
  }
21
21
  text += `${instruction.instruction}\n`;
22
- if (instruction.input != null) {
23
- text += `${instruction.input}\n`;
24
- }
25
22
  text += `ASSISTANT: `;
26
23
  return {
27
24
  text,
@@ -16,9 +16,6 @@ export function instruction() {
16
16
  text += `[img-1]\n`;
17
17
  }
18
18
  text += `${instruction.instruction}\n`;
19
- if (instruction.input != null) {
20
- text += `${instruction.input}\n`;
21
- }
22
19
  text += `ASSISTANT: `;
23
20
  return {
24
21
  text,
@@ -108,6 +108,23 @@ class LlamaCppTextGenerationModel extends AbstractModel_js_1.AbstractModel {
108
108
  stopSequences: [],
109
109
  });
110
110
  }
111
+ /**
112
+ * Maps the prompt for a text version of the Llama.cpp prompt format (without image support).
113
+ */
114
+ withTextPromptFormat(promptFormat) {
115
+ return new PromptFormatTextStreamingModel_js_1.PromptFormatTextStreamingModel({
116
+ model: this.withTextPrompt().withSettings({
117
+ stopSequences: [
118
+ ...(this.settings.stopSequences ?? []),
119
+ ...promptFormat.stopSequences,
120
+ ],
121
+ }),
122
+ promptFormat,
123
+ });
124
+ }
125
+ /**
126
+ * Maps the prompt for the full Llama.cpp prompt format (incl. image support).
127
+ */
111
128
  withPromptFormat(promptFormat) {
112
129
  return new PromptFormatTextStreamingModel_js_1.PromptFormatTextStreamingModel({
113
130
  model: this.withSettings({
@@ -111,6 +111,13 @@ export declare class LlamaCppTextGenerationModel<CONTEXT_WINDOW_SIZE extends num
111
111
  }>;
112
112
  doStreamText(prompt: LlamaCppTextGenerationPrompt, options?: FunctionOptions): Promise<AsyncIterable<Delta<string>>>;
113
113
  withTextPrompt(): PromptFormatTextStreamingModel<string, LlamaCppTextGenerationPrompt, LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>, this>;
114
+ /**
115
+ * Maps the prompt for a text version of the Llama.cpp prompt format (without image support).
116
+ */
117
+ withTextPromptFormat<INPUT_PROMPT>(promptFormat: TextGenerationPromptFormat<INPUT_PROMPT, string>): PromptFormatTextStreamingModel<INPUT_PROMPT, string, LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>, PromptFormatTextStreamingModel<string, LlamaCppTextGenerationPrompt, LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>, this>>;
118
+ /**
119
+ * Maps the prompt for the full Llama.cpp prompt format (incl. image support).
120
+ */
114
121
  withPromptFormat<INPUT_PROMPT>(promptFormat: TextGenerationPromptFormat<INPUT_PROMPT, LlamaCppTextGenerationPrompt>): PromptFormatTextStreamingModel<INPUT_PROMPT, LlamaCppTextGenerationPrompt, LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>, this>;
115
122
  withSettings(additionalSettings: Partial<LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>>): this;
116
123
  }
@@ -105,6 +105,23 @@ export class LlamaCppTextGenerationModel extends AbstractModel {
105
105
  stopSequences: [],
106
106
  });
107
107
  }
108
+ /**
109
+ * Maps the prompt for a text version of the Llama.cpp prompt format (without image support).
110
+ */
111
+ withTextPromptFormat(promptFormat) {
112
+ return new PromptFormatTextStreamingModel({
113
+ model: this.withTextPrompt().withSettings({
114
+ stopSequences: [
115
+ ...(this.settings.stopSequences ?? []),
116
+ ...promptFormat.stopSequences,
117
+ ],
118
+ }),
119
+ promptFormat,
120
+ });
121
+ }
122
+ /**
123
+ * Maps the prompt for the full Llama.cpp prompt format (incl. image support).
124
+ */
108
125
  withPromptFormat(promptFormat) {
109
126
  return new PromptFormatTextStreamingModel({
110
127
  model: this.withSettings({
@@ -7,6 +7,7 @@ const postToApi_js_1 = require("../../core/api/postToApi.cjs");
7
7
  const ZodSchema_js_1 = require("../../core/schema/ZodSchema.cjs");
8
8
  const AbstractModel_js_1 = require("../../model-function/AbstractModel.cjs");
9
9
  const PromptFormatTextStreamingModel_js_1 = require("../../model-function/generate-text/PromptFormatTextStreamingModel.cjs");
10
+ const TextGenerationToolCallModel_js_1 = require("../../tool/generate-tool-call/TextGenerationToolCallModel.cjs");
10
11
  const AsyncQueue_js_1 = require("../../util/AsyncQueue.cjs");
11
12
  const parseJsonStream_js_1 = require("../../util/streaming/parseJsonStream.cjs");
12
13
  const OllamaApiConfiguration_js_1 = require("./OllamaApiConfiguration.cjs");
@@ -94,6 +95,12 @@ class OllamaTextGenerationModel extends AbstractModel_js_1.AbstractModel {
94
95
  responseFormat: exports.OllamaTextGenerationResponseFormat.deltaIterable,
95
96
  });
96
97
  }
98
+ asToolCallGenerationModel(promptFormat) {
99
+ return new TextGenerationToolCallModel_js_1.TextGenerationToolCallModel({
100
+ model: this.withSettings({ format: "json" }),
101
+ format: promptFormat,
102
+ });
103
+ }
97
104
  withPromptFormat(promptFormat) {
98
105
  return new PromptFormatTextStreamingModel_js_1.PromptFormatTextStreamingModel({
99
106
  model: this.withSettings({
@@ -7,6 +7,7 @@ import { Delta } from "../../model-function/Delta.js";
7
7
  import { PromptFormatTextStreamingModel } from "../../model-function/generate-text/PromptFormatTextStreamingModel.js";
8
8
  import { TextGenerationModelSettings, TextStreamingModel } from "../../model-function/generate-text/TextGenerationModel.js";
9
9
  import { TextGenerationPromptFormat } from "../../model-function/generate-text/TextGenerationPromptFormat.js";
10
+ import { TextGenerationToolCallModel, ToolCallPromptFormat } from "../../tool/generate-tool-call/TextGenerationToolCallModel.js";
10
11
  /**
11
12
  * @see https://github.com/jmorganca/ollama/blob/main/docs/api.md#generate-a-completion
12
13
  */
@@ -135,6 +136,7 @@ export declare class OllamaTextGenerationModel<CONTEXT_WINDOW_SIZE extends numbe
135
136
  text: string;
136
137
  }>;
137
138
  doStreamText(prompt: string, options?: FunctionOptions): Promise<AsyncIterable<Delta<string>>>;
139
+ asToolCallGenerationModel<INPUT_PROMPT>(promptFormat: ToolCallPromptFormat<INPUT_PROMPT, string>): TextGenerationToolCallModel<INPUT_PROMPT, string, this>;
138
140
  withPromptFormat<INPUT_PROMPT>(promptFormat: TextGenerationPromptFormat<INPUT_PROMPT, string>): PromptFormatTextStreamingModel<INPUT_PROMPT, string, OllamaTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>, this>;
139
141
  withSettings(additionalSettings: Partial<OllamaTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>>): this;
140
142
  }
@@ -4,6 +4,7 @@ import { createJsonResponseHandler, postJsonToApi, } from "../../core/api/postTo
4
4
  import { ZodSchema } from "../../core/schema/ZodSchema.js";
5
5
  import { AbstractModel } from "../../model-function/AbstractModel.js";
6
6
  import { PromptFormatTextStreamingModel } from "../../model-function/generate-text/PromptFormatTextStreamingModel.js";
7
+ import { TextGenerationToolCallModel, } from "../../tool/generate-tool-call/TextGenerationToolCallModel.js";
7
8
  import { AsyncQueue } from "../../util/AsyncQueue.js";
8
9
  import { parseJsonStream } from "../../util/streaming/parseJsonStream.js";
9
10
  import { OllamaApiConfiguration } from "./OllamaApiConfiguration.js";
@@ -91,6 +92,12 @@ export class OllamaTextGenerationModel extends AbstractModel {
91
92
  responseFormat: OllamaTextGenerationResponseFormat.deltaIterable,
92
93
  });
93
94
  }
95
+ asToolCallGenerationModel(promptFormat) {
96
+ return new TextGenerationToolCallModel({
97
+ model: this.withSettings({ format: "json" }),
98
+ format: promptFormat,
99
+ });
100
+ }
94
101
  withPromptFormat(promptFormat) {
95
102
  return new PromptFormatTextStreamingModel({
96
103
  model: this.withSettings({
@@ -379,6 +379,12 @@ class OpenAIChatModel extends AbstractModel_js_1.AbstractModel {
379
379
  totalTokens: response.usage.total_tokens,
380
380
  };
381
381
  }
382
+ /**
383
+ * Returns this model with a text prompt format.
384
+ */
385
+ withTextPrompt() {
386
+ return this.withPromptFormat((0, OpenAIChatPromptFormat_js_1.text)());
387
+ }
382
388
  /**
383
389
  * Returns this model with an instruction prompt format.
384
390
  */
@@ -375,6 +375,10 @@ export declare class OpenAIChatModel extends AbstractModel<OpenAIChatSettings> i
375
375
  completionTokens: number;
376
376
  totalTokens: number;
377
377
  };
378
+ /**
379
+ * Returns this model with a text prompt format.
380
+ */
381
+ withTextPrompt(): PromptFormatTextStreamingModel<string, OpenAIChatMessage[], OpenAIChatSettings, this>;
378
382
  /**
379
383
  * Returns this model with an instruction prompt format.
380
384
  */
@@ -10,7 +10,7 @@ import { PromptFormatTextStreamingModel } from "../../../model-function/generate
10
10
  import { OpenAIApiConfiguration } from "../OpenAIApiConfiguration.js";
11
11
  import { failedOpenAICallResponseHandler } from "../OpenAIError.js";
12
12
  import { TikTokenTokenizer } from "../TikTokenTokenizer.js";
13
- import { chat, instruction } from "./OpenAIChatPromptFormat.js";
13
+ import { chat, instruction, text } from "./OpenAIChatPromptFormat.js";
14
14
  import { createOpenAIChatDeltaIterableQueue } from "./OpenAIChatStreamIterable.js";
15
15
  import { countOpenAIChatPromptTokens } from "./countOpenAIChatMessageTokens.js";
16
16
  /*
@@ -370,6 +370,12 @@ export class OpenAIChatModel extends AbstractModel {
370
370
  totalTokens: response.usage.total_tokens,
371
371
  };
372
372
  }
373
+ /**
374
+ * Returns this model with a text prompt format.
375
+ */
376
+ withTextPrompt() {
377
+ return this.withPromptFormat(text());
378
+ }
373
379
  /**
374
380
  * Returns this model with an instruction prompt format.
375
381
  */
@@ -1,8 +1,18 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.chat = exports.instruction = void 0;
3
+ exports.chat = exports.instruction = exports.text = void 0;
4
4
  const validateChatPrompt_js_1 = require("../../../model-function/generate-text/prompt-format/validateChatPrompt.cjs");
5
5
  const OpenAIChatMessage_js_1 = require("./OpenAIChatMessage.cjs");
6
+ /**
7
+ * Formats a text prompt as an OpenAI chat prompt.
8
+ */
9
+ function text() {
10
+ return {
11
+ format: (instruction) => [OpenAIChatMessage_js_1.OpenAIChatMessage.user(instruction)],
12
+ stopSequences: [],
13
+ };
14
+ }
15
+ exports.text = text;
6
16
  /**
7
17
  * Formats an instruction prompt as an OpenAI chat prompt.
8
18
  */
@@ -16,9 +26,6 @@ function instruction() {
16
26
  messages.push(OpenAIChatMessage_js_1.OpenAIChatMessage.user(instruction.instruction, {
17
27
  image: instruction.image,
18
28
  }));
19
- if (instruction.input != null) {
20
- messages.push(OpenAIChatMessage_js_1.OpenAIChatMessage.user(instruction.input));
21
- }
22
29
  return messages;
23
30
  },
24
31
  stopSequences: [],
@@ -2,6 +2,10 @@ import { TextGenerationPromptFormat } from "../../../model-function/generate-tex
2
2
  import { ChatPrompt } from "../../../model-function/generate-text/prompt-format/ChatPrompt.js";
3
3
  import { InstructionPrompt } from "../../../model-function/generate-text/prompt-format/InstructionPrompt.js";
4
4
  import { OpenAIChatMessage } from "./OpenAIChatMessage.js";
5
+ /**
6
+ * Formats a text prompt as an OpenAI chat prompt.
7
+ */
8
+ export declare function text(): TextGenerationPromptFormat<string, Array<OpenAIChatMessage>>;
5
9
  /**
6
10
  * Formats an instruction prompt as an OpenAI chat prompt.
7
11
  */
@@ -1,5 +1,14 @@
1
1
  import { validateChatPrompt } from "../../../model-function/generate-text/prompt-format/validateChatPrompt.js";
2
2
  import { OpenAIChatMessage } from "./OpenAIChatMessage.js";
3
+ /**
4
+ * Formats a text prompt as an OpenAI chat prompt.
5
+ */
6
+ export function text() {
7
+ return {
8
+ format: (instruction) => [OpenAIChatMessage.user(instruction)],
9
+ stopSequences: [],
10
+ };
11
+ }
3
12
  /**
4
13
  * Formats an instruction prompt as an OpenAI chat prompt.
5
14
  */
@@ -13,9 +22,6 @@ export function instruction() {
13
22
  messages.push(OpenAIChatMessage.user(instruction.instruction, {
14
23
  image: instruction.image,
15
24
  }));
16
- if (instruction.input != null) {
17
- messages.push(OpenAIChatMessage.user(instruction.input));
18
- }
19
25
  return messages;
20
26
  },
21
27
  stopSequences: [],
@@ -6,7 +6,8 @@ const AsyncQueue_js_1 = require("../../../util/AsyncQueue.cjs");
6
6
  const parseEventSourceStream_js_1 = require("../../../util/streaming/parseEventSourceStream.cjs");
7
7
  const parseJSON_js_1 = require("../../../core/schema/parseJSON.cjs");
8
8
  const ZodSchema_js_1 = require("../../../core/schema/ZodSchema.cjs");
9
- const chatResponseStreamEventSchema = new ZodSchema_js_1.ZodSchema(zod_1.z.object({
9
+ const chatCompletionChunkSchema = zod_1.z.object({
10
+ object: zod_1.z.literal("chat.completion.chunk"),
10
11
  id: zod_1.z.string(),
11
12
  choices: zod_1.z.array(zod_1.z.object({
12
13
  delta: zod_1.z.object({
@@ -44,8 +45,15 @@ const chatResponseStreamEventSchema = new ZodSchema_js_1.ZodSchema(zod_1.z.objec
44
45
  created: zod_1.z.number(),
45
46
  model: zod_1.z.string(),
46
47
  system_fingerprint: zod_1.z.string().optional(),
47
- object: zod_1.z.literal("chat.completion.chunk"),
48
- }));
48
+ });
49
+ const chatResponseStreamEventSchema = new ZodSchema_js_1.ZodSchema(zod_1.z.union([
50
+ chatCompletionChunkSchema,
51
+ zod_1.z.object({
52
+ object: zod_1.z.string().refine((obj) => obj !== "chat.completion.chunk", {
53
+ message: "Object must not be 'chat.completion.chunk'",
54
+ }),
55
+ }),
56
+ ]));
49
57
  async function createOpenAIChatDeltaIterableQueue(stream, extractDeltaValue) {
50
58
  const queue = new AsyncQueue_js_1.AsyncQueue();
51
59
  const streamDelta = [];
@@ -68,12 +76,19 @@ async function createOpenAIChatDeltaIterableQueue(stream, extractDeltaValue) {
68
76
  type: "error",
69
77
  error: parseResult.error,
70
78
  });
71
- queue.close();
72
- return;
79
+ // Note: the queue is not closed on purpose. Some providers might add additional
80
+ // chunks that are not parsable, and ModelFusion should be resilient to that.
81
+ continue;
73
82
  }
74
83
  const eventData = parseResult.data;
75
- for (let i = 0; i < eventData.choices.length; i++) {
76
- const eventChoice = eventData.choices[i];
84
+ // ignore objects that are not "chat.completion.chunk" events.
85
+ // Such additional objects are e.g. sent by Azure OpenAI.
86
+ if (eventData.object !== "chat.completion.chunk") {
87
+ continue;
88
+ }
89
+ const completionChunk = eventData;
90
+ for (let i = 0; i < completionChunk.choices.length; i++) {
91
+ const eventChoice = completionChunk.choices[i];
77
92
  const delta = eventChoice.delta;
78
93
  if (streamDelta[i] == null) {
79
94
  streamDelta[i] = {
@@ -3,7 +3,8 @@ import { AsyncQueue } from "../../../util/AsyncQueue.js";
3
3
  import { parseEventSourceStream } from "../../../util/streaming/parseEventSourceStream.js";
4
4
  import { safeParseJSON } from "../../../core/schema/parseJSON.js";
5
5
  import { ZodSchema } from "../../../core/schema/ZodSchema.js";
6
- const chatResponseStreamEventSchema = new ZodSchema(z.object({
6
+ const chatCompletionChunkSchema = z.object({
7
+ object: z.literal("chat.completion.chunk"),
7
8
  id: z.string(),
8
9
  choices: z.array(z.object({
9
10
  delta: z.object({
@@ -41,8 +42,15 @@ const chatResponseStreamEventSchema = new ZodSchema(z.object({
41
42
  created: z.number(),
42
43
  model: z.string(),
43
44
  system_fingerprint: z.string().optional(),
44
- object: z.literal("chat.completion.chunk"),
45
- }));
45
+ });
46
+ const chatResponseStreamEventSchema = new ZodSchema(z.union([
47
+ chatCompletionChunkSchema,
48
+ z.object({
49
+ object: z.string().refine((obj) => obj !== "chat.completion.chunk", {
50
+ message: "Object must not be 'chat.completion.chunk'",
51
+ }),
52
+ }),
53
+ ]));
46
54
  export async function createOpenAIChatDeltaIterableQueue(stream, extractDeltaValue) {
47
55
  const queue = new AsyncQueue();
48
56
  const streamDelta = [];
@@ -65,12 +73,19 @@ export async function createOpenAIChatDeltaIterableQueue(stream, extractDeltaVal
65
73
  type: "error",
66
74
  error: parseResult.error,
67
75
  });
68
- queue.close();
69
- return;
76
+ // Note: the queue is not closed on purpose. Some providers might add additional
77
+ // chunks that are not parsable, and ModelFusion should be resilient to that.
78
+ continue;
70
79
  }
71
80
  const eventData = parseResult.data;
72
- for (let i = 0; i < eventData.choices.length; i++) {
73
- const eventChoice = eventData.choices[i];
81
+ // ignore objects that are not "chat.completion.chunk" events.
82
+ // Such additional objects are e.g. sent by Azure OpenAI.
83
+ if (eventData.object !== "chat.completion.chunk") {
84
+ continue;
85
+ }
86
+ const completionChunk = eventData;
87
+ for (let i = 0; i < completionChunk.choices.length; i++) {
88
+ const eventChoice = completionChunk.choices[i];
74
89
  const delta = eventChoice.delta;
75
90
  if (streamDelta[i] == null) {
76
91
  streamDelta[i] = {
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "modelfusion",
3
3
  "description": "The TypeScript library for building multi-modal AI applications.",
4
- "version": "0.73.1",
4
+ "version": "0.74.1",
5
5
  "author": "Lars Grammel",
6
6
  "license": "MIT",
7
7
  "keywords": [
@@ -69,7 +69,7 @@
69
69
  "secure-json-parse": "2.7.0",
70
70
  "ws": "8.14.2",
71
71
  "zod": "3.22.4",
72
- "zod-to-json-schema": "3.21.4"
72
+ "zod-to-json-schema": "3.22.0"
73
73
  },
74
74
  "devDependencies": {
75
75
  "@types/node": "18.11.9",
@@ -0,0 +1,70 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.instruction = exports.text = void 0;
4
+ const nanoid_1 = require("nanoid");
5
+ const zod_1 = require("zod");
6
+ const ZodSchema_js_1 = require("../../core/schema/ZodSchema.cjs");
7
+ const parseJSON_js_1 = require("../../core/schema/parseJSON.cjs");
8
+ const functionSchema = new ZodSchema_js_1.ZodSchema(zod_1.z.object({
9
+ function: zod_1.z.string(),
10
+ parameters: zod_1.z.any(),
11
+ }));
12
+ const DEFAULT_FUNCTION_CALL_PROMPT = [
13
+ ``,
14
+ `Select the most suitable function and parameters ` +
15
+ `from the list of available functions below, based on the user's input. ` +
16
+ `Provide your response in JSON format.`,
17
+ ``,
18
+ `Available functions:`,
19
+ ].join("\n");
20
+ function text({ functionCallPrompt = DEFAULT_FUNCTION_CALL_PROMPT, baseFormat, } = {}) {
21
+ return {
22
+ createPrompt(instruction, tool) {
23
+ const instructionWithFunctionCall = [
24
+ instruction,
25
+ functionCallPrompt,
26
+ `${tool.name}:`,
27
+ ` description: ${tool.description ?? ""}`,
28
+ ` parameters: ${JSON.stringify(tool.parameters.getJsonSchema())}`,
29
+ ``,
30
+ ].join("\n");
31
+ return (baseFormat?.format(instructionWithFunctionCall) ??
32
+ // handled by signature overloading:
33
+ instructionWithFunctionCall); // eslint-disable-line @typescript-eslint/no-explicit-any
34
+ },
35
+ extractToolCall(response) {
36
+ const json = (0, parseJSON_js_1.parseJSON)({ text: response, schema: functionSchema });
37
+ return {
38
+ id: (0, nanoid_1.nanoid)(),
39
+ args: json.parameters,
40
+ };
41
+ },
42
+ };
43
+ }
44
+ exports.text = text;
45
+ function instruction({ functionCallPrompt = DEFAULT_FUNCTION_CALL_PROMPT, baseFormat, }) {
46
+ return {
47
+ createPrompt(instruction, tool) {
48
+ const instructionWithFunctionCall = [
49
+ instruction.instruction,
50
+ functionCallPrompt,
51
+ `${tool.name}:`,
52
+ ` description: ${tool.description ?? ""}`,
53
+ ` parameters: ${JSON.stringify(tool.parameters.getJsonSchema())}`,
54
+ ``,
55
+ ].join("\n");
56
+ return baseFormat.format({
57
+ ...instruction,
58
+ instruction: instructionWithFunctionCall,
59
+ });
60
+ },
61
+ extractToolCall(response) {
62
+ const json = (0, parseJSON_js_1.parseJSON)({ text: response, schema: functionSchema });
63
+ return {
64
+ id: (0, nanoid_1.nanoid)(),
65
+ args: json.parameters,
66
+ };
67
+ },
68
+ };
69
+ }
70
+ exports.instruction = instruction;
@@ -0,0 +1,14 @@
1
+ import { PromptFormat } from "../../model-function/PromptFormat.js";
2
+ import { InstructionPrompt } from "../../model-function/generate-text/prompt-format/InstructionPrompt.js";
3
+ import { ToolCallPromptFormat } from "./TextGenerationToolCallModel.js";
4
+ export declare function text(options?: {
5
+ functionCallPrompt?: string;
6
+ }): ToolCallPromptFormat<string, string>;
7
+ export declare function text<TARGET_PROMPT>({ functionCallPrompt, baseFormat, }: {
8
+ functionCallPrompt?: string;
9
+ baseFormat: PromptFormat<string, TARGET_PROMPT>;
10
+ }): ToolCallPromptFormat<string, TARGET_PROMPT>;
11
+ export declare function instruction<TARGET_PROMPT>({ functionCallPrompt, baseFormat, }: {
12
+ functionCallPrompt?: string;
13
+ baseFormat: PromptFormat<InstructionPrompt, TARGET_PROMPT>;
14
+ }): ToolCallPromptFormat<InstructionPrompt, TARGET_PROMPT>;
@@ -0,0 +1,65 @@
1
+ import { nanoid } from "nanoid";
2
+ import { z } from "zod";
3
+ import { ZodSchema } from "../../core/schema/ZodSchema.js";
4
+ import { parseJSON } from "../../core/schema/parseJSON.js";
5
+ const functionSchema = new ZodSchema(z.object({
6
+ function: z.string(),
7
+ parameters: z.any(),
8
+ }));
9
+ const DEFAULT_FUNCTION_CALL_PROMPT = [
10
+ ``,
11
+ `Select the most suitable function and parameters ` +
12
+ `from the list of available functions below, based on the user's input. ` +
13
+ `Provide your response in JSON format.`,
14
+ ``,
15
+ `Available functions:`,
16
+ ].join("\n");
17
+ export function text({ functionCallPrompt = DEFAULT_FUNCTION_CALL_PROMPT, baseFormat, } = {}) {
18
+ return {
19
+ createPrompt(instruction, tool) {
20
+ const instructionWithFunctionCall = [
21
+ instruction,
22
+ functionCallPrompt,
23
+ `${tool.name}:`,
24
+ ` description: ${tool.description ?? ""}`,
25
+ ` parameters: ${JSON.stringify(tool.parameters.getJsonSchema())}`,
26
+ ``,
27
+ ].join("\n");
28
+ return (baseFormat?.format(instructionWithFunctionCall) ??
29
+ // handled by signature overloading:
30
+ instructionWithFunctionCall); // eslint-disable-line @typescript-eslint/no-explicit-any
31
+ },
32
+ extractToolCall(response) {
33
+ const json = parseJSON({ text: response, schema: functionSchema });
34
+ return {
35
+ id: nanoid(),
36
+ args: json.parameters,
37
+ };
38
+ },
39
+ };
40
+ }
41
+ export function instruction({ functionCallPrompt = DEFAULT_FUNCTION_CALL_PROMPT, baseFormat, }) {
42
+ return {
43
+ createPrompt(instruction, tool) {
44
+ const instructionWithFunctionCall = [
45
+ instruction.instruction,
46
+ functionCallPrompt,
47
+ `${tool.name}:`,
48
+ ` description: ${tool.description ?? ""}`,
49
+ ` parameters: ${JSON.stringify(tool.parameters.getJsonSchema())}`,
50
+ ``,
51
+ ].join("\n");
52
+ return baseFormat.format({
53
+ ...instruction,
54
+ instruction: instructionWithFunctionCall,
55
+ });
56
+ },
57
+ extractToolCall(response) {
58
+ const json = parseJSON({ text: response, schema: functionSchema });
59
+ return {
60
+ id: nanoid(),
61
+ args: json.parameters,
62
+ };
63
+ },
64
+ };
65
+ }
@@ -2,24 +2,24 @@ import { FunctionOptions } from "../../core/FunctionOptions.js";
2
2
  import { TextGenerationModel, TextGenerationModelSettings } from "../../model-function/generate-text/TextGenerationModel.js";
3
3
  import { ToolDefinition } from "../ToolDefinition.js";
4
4
  import { ToolCallGenerationModel } from "./ToolCallGenerationModel.js";
5
- export interface ToolCallTextPromptFormat<PROMPT> {
6
- createPrompt: (prompt: PROMPT, tool: ToolDefinition<string, unknown>) => string;
5
+ export interface ToolCallPromptFormat<SOURCE_PROMPT, TARGET_PROMPT> {
6
+ createPrompt: (prompt: SOURCE_PROMPT, tool: ToolDefinition<string, unknown>) => TARGET_PROMPT;
7
7
  extractToolCall: (response: string) => {
8
8
  id: string;
9
9
  args: unknown;
10
10
  } | null;
11
11
  }
12
- export declare class TextGenerationToolCallModel<PROMPT, MODEL extends TextGenerationModel<string, TextGenerationModelSettings>> implements ToolCallGenerationModel<PROMPT, MODEL["settings"]> {
12
+ export declare class TextGenerationToolCallModel<SOURCE_PROMPT, TARGET_PROMPT, MODEL extends TextGenerationModel<TARGET_PROMPT, TextGenerationModelSettings>> implements ToolCallGenerationModel<SOURCE_PROMPT, MODEL["settings"]> {
13
13
  private readonly model;
14
14
  private readonly format;
15
15
  constructor({ model, format, }: {
16
16
  model: MODEL;
17
- format: ToolCallTextPromptFormat<PROMPT>;
17
+ format: ToolCallPromptFormat<SOURCE_PROMPT, TARGET_PROMPT>;
18
18
  });
19
19
  get modelInformation(): import("../../index.js").ModelInformation;
20
20
  get settings(): TextGenerationModelSettings;
21
21
  get settingsForEvent(): Partial<MODEL["settings"]>;
22
- doGenerateToolCall(tool: ToolDefinition<string, unknown>, prompt: PROMPT, options?: FunctionOptions): Promise<{
22
+ doGenerateToolCall(tool: ToolDefinition<string, unknown>, prompt: SOURCE_PROMPT, options?: FunctionOptions): Promise<{
23
23
  response: unknown;
24
24
  toolCall: {
25
25
  id: string;
@@ -10,10 +10,24 @@ var __createBinding = (this && this.__createBinding) || (Object.create ? (functi
10
10
  if (k2 === undefined) k2 = k;
11
11
  o[k2] = m[k];
12
12
  }));
13
+ var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
14
+ Object.defineProperty(o, "default", { enumerable: true, value: v });
15
+ }) : function(o, v) {
16
+ o["default"] = v;
17
+ });
18
+ var __importStar = (this && this.__importStar) || function (mod) {
19
+ if (mod && mod.__esModule) return mod;
20
+ var result = {};
21
+ if (mod != null) for (var k in mod) if (k !== "default" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);
22
+ __setModuleDefault(result, mod);
23
+ return result;
24
+ };
13
25
  var __exportStar = (this && this.__exportStar) || function(m, exports) {
14
26
  for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
15
27
  };
16
28
  Object.defineProperty(exports, "__esModule", { value: true });
29
+ exports.FunctionListToolCallPromptFormat = void 0;
30
+ exports.FunctionListToolCallPromptFormat = __importStar(require("./FunctionListToolCallPromptFormat.cjs"));
17
31
  __exportStar(require("./TextGenerationToolCallModel.cjs"), exports);
18
32
  __exportStar(require("./ToolCallGenerationEvent.cjs"), exports);
19
33
  __exportStar(require("./ToolCallGenerationModel.cjs"), exports);
@@ -1,3 +1,4 @@
1
+ export * as FunctionListToolCallPromptFormat from "./FunctionListToolCallPromptFormat.js";
1
2
  export * from "./TextGenerationToolCallModel.js";
2
3
  export * from "./ToolCallGenerationEvent.js";
3
4
  export * from "./ToolCallGenerationModel.js";
@@ -1,3 +1,4 @@
1
+ export * as FunctionListToolCallPromptFormat from "./FunctionListToolCallPromptFormat.js";
1
2
  export * from "./TextGenerationToolCallModel.js";
2
3
  export * from "./ToolCallGenerationEvent.js";
3
4
  export * from "./ToolCallGenerationModel.js";