modelfusion 0.106.0 → 0.108.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (82) hide show
  1. package/CHANGELOG.md +59 -0
  2. package/README.md +19 -59
  3. package/model-function/generate-text/prompt-template/ChatMLPromptTemplate.test.cjs +11 -0
  4. package/model-function/generate-text/prompt-template/ChatMLPromptTemplate.test.js +11 -0
  5. package/model-function/generate-text/prompt-template/Llama2PromptTemplate.cjs +9 -7
  6. package/model-function/generate-text/prompt-template/Llama2PromptTemplate.js +9 -7
  7. package/model-function/generate-text/prompt-template/Llama2PromptTemplate.test.cjs +11 -0
  8. package/model-function/generate-text/prompt-template/Llama2PromptTemplate.test.js +11 -0
  9. package/model-function/generate-text/prompt-template/MistralInstructPromptTemplate.cjs +150 -0
  10. package/model-function/generate-text/prompt-template/MistralInstructPromptTemplate.d.ts +62 -0
  11. package/model-function/generate-text/prompt-template/MistralInstructPromptTemplate.js +143 -0
  12. package/model-function/generate-text/prompt-template/MistralInstructPromptTemplate.test.cjs +60 -0
  13. package/model-function/generate-text/prompt-template/MistralInstructPromptTemplate.test.js +58 -0
  14. package/model-function/generate-text/prompt-template/NeuralChatPromptTemplate.test.cjs +11 -0
  15. package/model-function/generate-text/prompt-template/NeuralChatPromptTemplate.test.js +11 -0
  16. package/model-function/generate-text/prompt-template/TextPromptTemplate.test.cjs +11 -0
  17. package/model-function/generate-text/prompt-template/TextPromptTemplate.test.js +11 -0
  18. package/model-function/generate-text/prompt-template/VicunaPromptTemplate.test.cjs +11 -0
  19. package/model-function/generate-text/prompt-template/VicunaPromptTemplate.test.js +11 -0
  20. package/model-function/generate-text/prompt-template/index.cjs +2 -1
  21. package/model-function/generate-text/prompt-template/index.d.ts +1 -0
  22. package/model-function/generate-text/prompt-template/index.js +1 -0
  23. package/model-provider/llamacpp/LlamaCppBakLLaVA1PromptTemplate.d.ts +3 -3
  24. package/model-provider/llamacpp/{LlamaCppTextGenerationModel.cjs → LlamaCppCompletionModel.cjs} +25 -11
  25. package/model-provider/llamacpp/{LlamaCppTextGenerationModel.d.ts → LlamaCppCompletionModel.d.ts} +125 -38
  26. package/model-provider/llamacpp/{LlamaCppTextGenerationModel.js → LlamaCppCompletionModel.js} +23 -9
  27. package/model-provider/llamacpp/{LlamaCppTextGenerationModel.test.cjs → LlamaCppCompletionModel.test.cjs} +3 -3
  28. package/model-provider/llamacpp/LlamaCppCompletionModel.test.d.ts +1 -0
  29. package/model-provider/llamacpp/{LlamaCppTextGenerationModel.test.js → LlamaCppCompletionModel.test.js} +3 -3
  30. package/model-provider/llamacpp/LlamaCppFacade.cjs +2 -2
  31. package/model-provider/llamacpp/LlamaCppFacade.d.ts +2 -2
  32. package/model-provider/llamacpp/LlamaCppFacade.js +2 -2
  33. package/model-provider/llamacpp/index.cjs +1 -1
  34. package/model-provider/llamacpp/index.d.ts +1 -1
  35. package/model-provider/llamacpp/index.js +1 -1
  36. package/model-provider/mistral/MistralChatModel.cjs +4 -4
  37. package/model-provider/mistral/MistralChatModel.d.ts +6 -6
  38. package/model-provider/mistral/MistralChatModel.js +1 -1
  39. package/model-provider/mistral/index.cjs +3 -3
  40. package/model-provider/mistral/index.d.ts +2 -2
  41. package/model-provider/mistral/index.js +2 -2
  42. package/model-provider/openai/AbstractOpenAIChatModel.cjs +2 -10
  43. package/model-provider/openai/AbstractOpenAIChatModel.d.ts +13 -195
  44. package/model-provider/openai/AbstractOpenAIChatModel.js +2 -10
  45. package/model-provider/openai/AbstractOpenAICompletionModel.cjs +167 -0
  46. package/model-provider/openai/AbstractOpenAICompletionModel.d.ts +199 -0
  47. package/model-provider/openai/AbstractOpenAICompletionModel.js +163 -0
  48. package/model-provider/openai/OpenAIChatFunctionCallStructureGenerationModel.d.ts +1 -3
  49. package/model-provider/openai/OpenAIChatModel.d.ts +3 -6
  50. package/model-provider/openai/OpenAICompletionModel.cjs +4 -156
  51. package/model-provider/openai/OpenAICompletionModel.d.ts +4 -191
  52. package/model-provider/openai/OpenAICompletionModel.js +3 -155
  53. package/model-provider/openai/index.cjs +1 -0
  54. package/model-provider/openai/index.d.ts +1 -0
  55. package/model-provider/openai/index.js +1 -0
  56. package/model-provider/openai-compatible/OpenAICompatibleChatModel.d.ts +4 -5
  57. package/model-provider/openai-compatible/OpenAICompatibleCompletionModel.cjs +74 -0
  58. package/model-provider/openai-compatible/OpenAICompatibleCompletionModel.d.ts +27 -0
  59. package/model-provider/openai-compatible/OpenAICompatibleCompletionModel.js +70 -0
  60. package/model-provider/openai-compatible/OpenAICompatibleFacade.cjs +37 -6
  61. package/model-provider/openai-compatible/OpenAICompatibleFacade.d.ts +33 -5
  62. package/model-provider/openai-compatible/OpenAICompatibleFacade.js +35 -5
  63. package/model-provider/openai-compatible/OpenAICompatibleProviderName.cjs +2 -0
  64. package/model-provider/openai-compatible/OpenAICompatibleProviderName.d.ts +1 -0
  65. package/model-provider/openai-compatible/OpenAICompatibleProviderName.js +1 -0
  66. package/model-provider/openai-compatible/TogetherAIApiConfiguration.cjs +29 -0
  67. package/model-provider/openai-compatible/TogetherAIApiConfiguration.d.ts +18 -0
  68. package/model-provider/openai-compatible/TogetherAIApiConfiguration.js +25 -0
  69. package/model-provider/openai-compatible/index.cjs +4 -1
  70. package/model-provider/openai-compatible/index.d.ts +4 -1
  71. package/model-provider/openai-compatible/index.js +4 -1
  72. package/package.json +16 -16
  73. package/tool/generate-tool-call/index.cjs +1 -0
  74. package/tool/generate-tool-call/index.d.ts +1 -0
  75. package/tool/generate-tool-call/index.js +1 -0
  76. package/tool/generate-tool-call/jsonToolCallPrompt.cjs +30 -0
  77. package/tool/generate-tool-call/jsonToolCallPrompt.d.ts +5 -0
  78. package/tool/generate-tool-call/jsonToolCallPrompt.js +27 -0
  79. /package/{model-provider/llamacpp/LlamaCppTextGenerationModel.test.d.ts → model-function/generate-text/prompt-template/MistralInstructPromptTemplate.test.d.ts} +0 -0
  80. /package/model-provider/mistral/{MistralPromptTemplate.cjs → MistralChatPromptTemplate.cjs} +0 -0
  81. /package/model-provider/mistral/{MistralPromptTemplate.d.ts → MistralChatPromptTemplate.d.ts} +0 -0
  82. /package/model-provider/mistral/{MistralPromptTemplate.js → MistralChatPromptTemplate.js} +0 -0
@@ -0,0 +1,60 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ const MistralInstructPromptTemplate_js_1 = require("./MistralInstructPromptTemplate.cjs");
4
+ describe("text prompt", () => {
5
+ it("should format prompt", () => {
6
+ const prompt = (0, MistralInstructPromptTemplate_js_1.text)().format("prompt");
7
+ expect(prompt).toMatchSnapshot();
8
+ });
9
+ });
10
+ describe("instruction prompt", () => {
11
+ it("should format prompt with instruction", () => {
12
+ const prompt = (0, MistralInstructPromptTemplate_js_1.instruction)().format({
13
+ instruction: "instruction",
14
+ });
15
+ expect(prompt).toMatchSnapshot();
16
+ });
17
+ it("should format prompt with system and instruction", () => {
18
+ const prompt = (0, MistralInstructPromptTemplate_js_1.instruction)().format({
19
+ system: "system",
20
+ instruction: "instruction",
21
+ });
22
+ expect(prompt).toMatchSnapshot();
23
+ });
24
+ it("should format prompt with instruction and response prefix", () => {
25
+ const prompt = (0, MistralInstructPromptTemplate_js_1.instruction)().format({
26
+ instruction: "instruction",
27
+ responsePrefix: "response prefix",
28
+ });
29
+ expect(prompt).toMatchSnapshot();
30
+ });
31
+ });
32
+ describe("chat prompt", () => {
33
+ it("should format prompt with user message", () => {
34
+ const prompt = (0, MistralInstructPromptTemplate_js_1.chat)().format({
35
+ messages: [{ role: "user", content: "user message" }],
36
+ });
37
+ expect(prompt).toMatchSnapshot();
38
+ });
39
+ it("should format prompt with user-assistant-user messages", () => {
40
+ const prompt = (0, MistralInstructPromptTemplate_js_1.chat)().format({
41
+ messages: [
42
+ { role: "user", content: "1st user message" },
43
+ { role: "assistant", content: "assistant message" },
44
+ { role: "user", content: "2nd user message" },
45
+ ],
46
+ });
47
+ expect(prompt).toMatchSnapshot();
48
+ });
49
+ it("should format prompt with system message and user-assistant-user messages", () => {
50
+ const prompt = (0, MistralInstructPromptTemplate_js_1.chat)().format({
51
+ system: "you are a chatbot",
52
+ messages: [
53
+ { role: "user", content: "1st user message" },
54
+ { role: "assistant", content: "assistant message" },
55
+ { role: "user", content: "2nd user message" },
56
+ ],
57
+ });
58
+ expect(prompt).toMatchSnapshot();
59
+ });
60
+ });
@@ -0,0 +1,58 @@
1
+ import { chat, instruction, text } from "./MistralInstructPromptTemplate.js";
2
+ describe("text prompt", () => {
3
+ it("should format prompt", () => {
4
+ const prompt = text().format("prompt");
5
+ expect(prompt).toMatchSnapshot();
6
+ });
7
+ });
8
+ describe("instruction prompt", () => {
9
+ it("should format prompt with instruction", () => {
10
+ const prompt = instruction().format({
11
+ instruction: "instruction",
12
+ });
13
+ expect(prompt).toMatchSnapshot();
14
+ });
15
+ it("should format prompt with system and instruction", () => {
16
+ const prompt = instruction().format({
17
+ system: "system",
18
+ instruction: "instruction",
19
+ });
20
+ expect(prompt).toMatchSnapshot();
21
+ });
22
+ it("should format prompt with instruction and response prefix", () => {
23
+ const prompt = instruction().format({
24
+ instruction: "instruction",
25
+ responsePrefix: "response prefix",
26
+ });
27
+ expect(prompt).toMatchSnapshot();
28
+ });
29
+ });
30
+ describe("chat prompt", () => {
31
+ it("should format prompt with user message", () => {
32
+ const prompt = chat().format({
33
+ messages: [{ role: "user", content: "user message" }],
34
+ });
35
+ expect(prompt).toMatchSnapshot();
36
+ });
37
+ it("should format prompt with user-assistant-user messages", () => {
38
+ const prompt = chat().format({
39
+ messages: [
40
+ { role: "user", content: "1st user message" },
41
+ { role: "assistant", content: "assistant message" },
42
+ { role: "user", content: "2nd user message" },
43
+ ],
44
+ });
45
+ expect(prompt).toMatchSnapshot();
46
+ });
47
+ it("should format prompt with system message and user-assistant-user messages", () => {
48
+ const prompt = chat().format({
49
+ system: "you are a chatbot",
50
+ messages: [
51
+ { role: "user", content: "1st user message" },
52
+ { role: "assistant", content: "assistant message" },
53
+ { role: "user", content: "2nd user message" },
54
+ ],
55
+ });
56
+ expect(prompt).toMatchSnapshot();
57
+ });
58
+ });
@@ -46,4 +46,15 @@ describe("chat prompt", () => {
46
46
  });
47
47
  expect(prompt).toMatchSnapshot();
48
48
  });
49
+ it("should format prompt with system message and user-assistant-user messages", () => {
50
+ const prompt = (0, NeuralChatPromptTemplate_js_1.chat)().format({
51
+ system: "you are a chatbot",
52
+ messages: [
53
+ { role: "user", content: "1st user message" },
54
+ { role: "assistant", content: "assistant message" },
55
+ { role: "user", content: "2nd user message" },
56
+ ],
57
+ });
58
+ expect(prompt).toMatchSnapshot();
59
+ });
49
60
  });
@@ -44,4 +44,15 @@ describe("chat prompt", () => {
44
44
  });
45
45
  expect(prompt).toMatchSnapshot();
46
46
  });
47
+ it("should format prompt with system message and user-assistant-user messages", () => {
48
+ const prompt = chat().format({
49
+ system: "you are a chatbot",
50
+ messages: [
51
+ { role: "user", content: "1st user message" },
52
+ { role: "assistant", content: "assistant message" },
53
+ { role: "user", content: "2nd user message" },
54
+ ],
55
+ });
56
+ expect(prompt).toMatchSnapshot();
57
+ });
47
58
  });
@@ -46,4 +46,15 @@ describe("chat prompt", () => {
46
46
  });
47
47
  expect(prompt).toMatchSnapshot();
48
48
  });
49
+ it("should format prompt with system message and user-assistant-user messages", () => {
50
+ const prompt = (0, TextPromptTemplate_js_1.chat)().format({
51
+ system: "you are a chatbot",
52
+ messages: [
53
+ { role: "user", content: "1st user message" },
54
+ { role: "assistant", content: "assistant message" },
55
+ { role: "user", content: "2nd user message" },
56
+ ],
57
+ });
58
+ expect(prompt).toMatchSnapshot();
59
+ });
49
60
  });
@@ -44,4 +44,15 @@ describe("chat prompt", () => {
44
44
  });
45
45
  expect(prompt).toMatchSnapshot();
46
46
  });
47
+ it("should format prompt with system message and user-assistant-user messages", () => {
48
+ const prompt = chat().format({
49
+ system: "you are a chatbot",
50
+ messages: [
51
+ { role: "user", content: "1st user message" },
52
+ { role: "assistant", content: "assistant message" },
53
+ { role: "user", content: "2nd user message" },
54
+ ],
55
+ });
56
+ expect(prompt).toMatchSnapshot();
57
+ });
47
58
  });
@@ -18,4 +18,15 @@ describe("chat prompt", () => {
18
18
  });
19
19
  expect(prompt).toMatchSnapshot();
20
20
  });
21
+ it("should format prompt with system message and user-assistant-user messages", () => {
22
+ const prompt = (0, VicunaPromptTemplate_js_1.chat)().format({
23
+ system: "you are a chatbot",
24
+ messages: [
25
+ { role: "user", content: "1st user message" },
26
+ { role: "assistant", content: "assistant message" },
27
+ { role: "user", content: "2nd user message" },
28
+ ],
29
+ });
30
+ expect(prompt).toMatchSnapshot();
31
+ });
21
32
  });
@@ -16,4 +16,15 @@ describe("chat prompt", () => {
16
16
  });
17
17
  expect(prompt).toMatchSnapshot();
18
18
  });
19
+ it("should format prompt with system message and user-assistant-user messages", () => {
20
+ const prompt = chat().format({
21
+ system: "you are a chatbot",
22
+ messages: [
23
+ { role: "user", content: "1st user message" },
24
+ { role: "assistant", content: "assistant message" },
25
+ { role: "user", content: "2nd user message" },
26
+ ],
27
+ });
28
+ expect(prompt).toMatchSnapshot();
29
+ });
19
30
  });
@@ -26,7 +26,7 @@ var __exportStar = (this && this.__exportStar) || function(m, exports) {
26
26
  for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
27
27
  };
28
28
  Object.defineProperty(exports, "__esModule", { value: true });
29
- exports.VicunaPrompt = exports.TextPrompt = exports.NeuralChatPrompt = exports.Llama2Prompt = exports.ChatMLPrompt = exports.AlpacaPrompt = void 0;
29
+ exports.VicunaPrompt = exports.TextPrompt = exports.NeuralChatPrompt = exports.MistralInstructPrompt = exports.Llama2Prompt = exports.ChatMLPrompt = exports.AlpacaPrompt = void 0;
30
30
  exports.AlpacaPrompt = __importStar(require("./AlpacaPromptTemplate.cjs"));
31
31
  exports.ChatMLPrompt = __importStar(require("./ChatMLPromptTemplate.cjs"));
32
32
  __exportStar(require("./ChatPrompt.cjs"), exports);
@@ -34,6 +34,7 @@ __exportStar(require("./ContentPart.cjs"), exports);
34
34
  __exportStar(require("./InstructionPrompt.cjs"), exports);
35
35
  __exportStar(require("./InvalidPromptError.cjs"), exports);
36
36
  exports.Llama2Prompt = __importStar(require("./Llama2PromptTemplate.cjs"));
37
+ exports.MistralInstructPrompt = __importStar(require("./MistralInstructPromptTemplate.cjs"));
37
38
  exports.NeuralChatPrompt = __importStar(require("./NeuralChatPromptTemplate.cjs"));
38
39
  exports.TextPrompt = __importStar(require("./TextPromptTemplate.cjs"));
39
40
  exports.VicunaPrompt = __importStar(require("./VicunaPromptTemplate.cjs"));
@@ -5,6 +5,7 @@ export * from "./ContentPart.js";
5
5
  export * from "./InstructionPrompt.js";
6
6
  export * from "./InvalidPromptError.js";
7
7
  export * as Llama2Prompt from "./Llama2PromptTemplate.js";
8
+ export * as MistralInstructPrompt from "./MistralInstructPromptTemplate.js";
8
9
  export * as NeuralChatPrompt from "./NeuralChatPromptTemplate.js";
9
10
  export * as TextPrompt from "./TextPromptTemplate.js";
10
11
  export * as VicunaPrompt from "./VicunaPromptTemplate.js";
@@ -5,6 +5,7 @@ export * from "./ContentPart.js";
5
5
  export * from "./InstructionPrompt.js";
6
6
  export * from "./InvalidPromptError.js";
7
7
  export * as Llama2Prompt from "./Llama2PromptTemplate.js";
8
+ export * as MistralInstructPrompt from "./MistralInstructPromptTemplate.js";
8
9
  export * as NeuralChatPrompt from "./NeuralChatPromptTemplate.js";
9
10
  export * as TextPrompt from "./TextPromptTemplate.js";
10
11
  export * as VicunaPrompt from "./VicunaPromptTemplate.js";
@@ -1,11 +1,11 @@
1
1
  import { TextGenerationPromptTemplate } from "../../model-function/generate-text/TextGenerationPromptTemplate.js";
2
2
  import { ChatPrompt } from "../../model-function/generate-text/prompt-template/ChatPrompt.js";
3
3
  import { InstructionPrompt } from "../../model-function/generate-text/prompt-template/InstructionPrompt.js";
4
- import { LlamaCppTextGenerationPrompt } from "./LlamaCppTextGenerationModel.js";
4
+ import { LlamaCppCompletionPrompt } from "./LlamaCppCompletionModel.js";
5
5
  /**
6
6
  * BakLLaVA 1 uses a Vicuna 1 prompt. This mapping combines it with the LlamaCpp prompt structure.
7
7
  *
8
8
  * @see https://github.com/SkunkworksAI/BakLLaVA
9
9
  */
10
- export declare function instruction(): TextGenerationPromptTemplate<InstructionPrompt, LlamaCppTextGenerationPrompt>;
11
- export declare function chat(): TextGenerationPromptTemplate<ChatPrompt, LlamaCppTextGenerationPrompt>;
10
+ export declare function instruction(): TextGenerationPromptTemplate<InstructionPrompt, LlamaCppCompletionPrompt>;
11
+ export declare function chat(): TextGenerationPromptTemplate<ChatPrompt, LlamaCppCompletionPrompt>;
@@ -1,6 +1,6 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.LlamaCppTextGenerationResponseFormat = exports.LlamaCppTextGenerationModel = void 0;
3
+ exports.LlamaCppCompletionResponseFormat = exports.LlamaCppCompletionModel = void 0;
4
4
  const zod_1 = require("zod");
5
5
  const callWithRetryAndThrottle_js_1 = require("../../core/api/callWithRetryAndThrottle.cjs");
6
6
  const postToApi_js_1 = require("../../core/api/postToApi.cjs");
@@ -14,7 +14,7 @@ const parseEventSourceStream_js_1 = require("../../util/streaming/parseEventSour
14
14
  const LlamaCppApiConfiguration_js_1 = require("./LlamaCppApiConfiguration.cjs");
15
15
  const LlamaCppError_js_1 = require("./LlamaCppError.cjs");
16
16
  const LlamaCppTokenizer_js_1 = require("./LlamaCppTokenizer.cjs");
17
- class LlamaCppTextGenerationModel extends AbstractModel_js_1.AbstractModel {
17
+ class LlamaCppCompletionModel extends AbstractModel_js_1.AbstractModel {
18
18
  constructor(settings = {}) {
19
19
  super({ settings });
20
20
  Object.defineProperty(this, "provider", {
@@ -56,10 +56,10 @@ class LlamaCppTextGenerationModel extends AbstractModel_js_1.AbstractModel {
56
56
  data,
57
57
  }))
58
58
  : undefined,
59
- cache_prompt: this.settings.cachePrompt,
60
59
  temperature: this.settings.temperature,
61
60
  top_k: this.settings.topK,
62
61
  top_p: this.settings.topP,
62
+ min_p: this.settings.minP,
63
63
  n_predict: this.settings.maxGenerationTokens,
64
64
  n_keep: this.settings.nKeep,
65
65
  stop: this.settings.stopSequences,
@@ -68,12 +68,19 @@ class LlamaCppTextGenerationModel extends AbstractModel_js_1.AbstractModel {
68
68
  repeat_penalty: this.settings.repeatPenalty,
69
69
  repeat_last_n: this.settings.repeatLastN,
70
70
  penalize_nl: this.settings.penalizeNl,
71
+ presence_penalty: this.settings.presencePenalty,
72
+ frequency_penalty: this.settings.frequencyPenalty,
73
+ penalty_prompt: this.settings.penaltyPrompt,
71
74
  mirostat: this.settings.mirostat,
72
75
  mirostat_tau: this.settings.mirostatTau,
73
76
  mirostat_eta: this.settings.mirostatEta,
77
+ grammar: this.settings.grammar,
74
78
  seed: this.settings.seed,
75
79
  ignore_eos: this.settings.ignoreEos,
76
80
  logit_bias: this.settings.logitBias,
81
+ n_probs: this.settings.nProbs,
82
+ cache_prompt: this.settings.cachePrompt,
83
+ slot_id: this.settings.slotId,
77
84
  },
78
85
  failedResponseHandler: LlamaCppError_js_1.failedLlamaCppCallResponseHandler,
79
86
  successfulResponseHandler: responseFormat.handler,
@@ -85,22 +92,29 @@ class LlamaCppTextGenerationModel extends AbstractModel_js_1.AbstractModel {
85
92
  const eventSettingProperties = [
86
93
  ...TextGenerationModel_js_1.textGenerationModelProperties,
87
94
  "contextWindowSize",
88
- "cachePrompt",
89
95
  "temperature",
90
96
  "topK",
91
97
  "topP",
98
+ "minP",
92
99
  "nKeep",
93
100
  "tfsZ",
94
101
  "typicalP",
95
102
  "repeatPenalty",
96
103
  "repeatLastN",
97
104
  "penalizeNl",
105
+ "presencePenalty",
106
+ "frequencyPenalty",
107
+ "penaltyPrompt",
98
108
  "mirostat",
99
109
  "mirostatTau",
100
110
  "mirostatEta",
111
+ "grammar",
101
112
  "seed",
102
113
  "ignoreEos",
103
114
  "logitBias",
115
+ "nProbs",
116
+ "cachePrompt",
117
+ "slotId",
104
118
  ];
105
119
  return Object.fromEntries(Object.entries(this.settings).filter(([key]) => eventSettingProperties.includes(key)));
106
120
  }
@@ -111,7 +125,7 @@ class LlamaCppTextGenerationModel extends AbstractModel_js_1.AbstractModel {
111
125
  async doGenerateTexts(prompt, options) {
112
126
  const response = await this.callAPI(prompt, {
113
127
  ...options,
114
- responseFormat: exports.LlamaCppTextGenerationResponseFormat.json,
128
+ responseFormat: exports.LlamaCppCompletionResponseFormat.json,
115
129
  });
116
130
  return {
117
131
  response,
@@ -135,7 +149,7 @@ class LlamaCppTextGenerationModel extends AbstractModel_js_1.AbstractModel {
135
149
  doStreamText(prompt, options) {
136
150
  return this.callAPI(prompt, {
137
151
  ...options,
138
- responseFormat: exports.LlamaCppTextGenerationResponseFormat.deltaIterable,
152
+ responseFormat: exports.LlamaCppCompletionResponseFormat.deltaIterable,
139
153
  });
140
154
  }
141
155
  extractTextDelta(delta) {
@@ -178,10 +192,10 @@ class LlamaCppTextGenerationModel extends AbstractModel_js_1.AbstractModel {
178
192
  });
179
193
  }
180
194
  withSettings(additionalSettings) {
181
- return new LlamaCppTextGenerationModel(Object.assign({}, this.settings, additionalSettings));
195
+ return new LlamaCppCompletionModel(Object.assign({}, this.settings, additionalSettings));
182
196
  }
183
197
  }
184
- exports.LlamaCppTextGenerationModel = LlamaCppTextGenerationModel;
198
+ exports.LlamaCppCompletionModel = LlamaCppCompletionModel;
185
199
  const llamaCppTextGenerationResponseSchema = zod_1.z.object({
186
200
  content: zod_1.z.string(),
187
201
  stop: zod_1.z.literal(true),
@@ -204,7 +218,7 @@ const llamaCppTextGenerationResponseSchema = zod_1.z.object({
204
218
  seed: zod_1.z.number(),
205
219
  stop: zod_1.z.array(zod_1.z.string()),
206
220
  stream: zod_1.z.boolean(),
207
- temp: zod_1.z.number(),
221
+ temperature: zod_1.z.number().optional(), // optional for backwards compatibility
208
222
  tfs_z: zod_1.z.number(),
209
223
  top_k: zod_1.z.number(),
210
224
  top_p: zod_1.z.number(),
@@ -221,7 +235,7 @@ const llamaCppTextGenerationResponseSchema = zod_1.z.object({
221
235
  predicted_n: zod_1.z.number(),
222
236
  predicted_per_second: zod_1.z.number().nullable(),
223
237
  predicted_per_token_ms: zod_1.z.number().nullable(),
224
- prompt_ms: zod_1.z.number().nullable(),
238
+ prompt_ms: zod_1.z.number().nullable().optional(),
225
239
  prompt_n: zod_1.z.number(),
226
240
  prompt_per_second: zod_1.z.number().nullable(),
227
241
  prompt_per_token_ms: zod_1.z.number().nullable(),
@@ -267,7 +281,7 @@ async function createLlamaCppFullDeltaIterableQueue(stream) {
267
281
  });
268
282
  return queue;
269
283
  }
270
- exports.LlamaCppTextGenerationResponseFormat = {
284
+ exports.LlamaCppCompletionResponseFormat = {
271
285
  /**
272
286
  * Returns the response as a JSON object.
273
287
  */