modelfusion 0.105.0 → 0.107.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (199) hide show
  1. package/CHANGELOG.md +26 -0
  2. package/README.md +16 -59
  3. package/core/DefaultRun.cjs +0 -4
  4. package/core/DefaultRun.d.ts +0 -2
  5. package/core/DefaultRun.js +0 -4
  6. package/core/ExtensionFunctionEvent.d.ts +11 -0
  7. package/core/FunctionEvent.d.ts +2 -2
  8. package/extension/index.cjs +22 -3
  9. package/extension/index.d.ts +5 -1
  10. package/extension/index.js +4 -1
  11. package/index.cjs +0 -3
  12. package/index.d.ts +0 -3
  13. package/index.js +0 -3
  14. package/model-function/generate-structure/jsonStructurePrompt.cjs +42 -6
  15. package/model-function/generate-structure/jsonStructurePrompt.d.ts +12 -1
  16. package/model-function/generate-structure/jsonStructurePrompt.js +42 -5
  17. package/model-function/generate-text/PromptTemplateTextGenerationModel.d.ts +2 -1
  18. package/model-function/generate-text/PromptTemplateTextGenerationModel.js +1 -1
  19. package/model-function/generate-text/prompt-template/ChatMLPromptTemplate.test.cjs +11 -0
  20. package/model-function/generate-text/prompt-template/ChatMLPromptTemplate.test.js +11 -0
  21. package/model-function/generate-text/prompt-template/Llama2PromptTemplate.cjs +10 -8
  22. package/model-function/generate-text/prompt-template/Llama2PromptTemplate.d.ts +1 -1
  23. package/model-function/generate-text/prompt-template/Llama2PromptTemplate.js +10 -8
  24. package/model-function/generate-text/prompt-template/Llama2PromptTemplate.test.cjs +11 -0
  25. package/model-function/generate-text/prompt-template/Llama2PromptTemplate.test.js +11 -0
  26. package/model-function/generate-text/prompt-template/MistralInstructPromptTemplate.cjs +150 -0
  27. package/model-function/generate-text/prompt-template/MistralInstructPromptTemplate.d.ts +62 -0
  28. package/model-function/generate-text/prompt-template/MistralInstructPromptTemplate.js +143 -0
  29. package/model-function/generate-text/prompt-template/MistralInstructPromptTemplate.test.cjs +60 -0
  30. package/model-function/generate-text/prompt-template/MistralInstructPromptTemplate.test.js +58 -0
  31. package/model-function/generate-text/prompt-template/NeuralChatPromptTemplate.test.cjs +11 -0
  32. package/model-function/generate-text/prompt-template/NeuralChatPromptTemplate.test.js +11 -0
  33. package/model-function/generate-text/prompt-template/TextPromptTemplate.test.cjs +11 -0
  34. package/model-function/generate-text/prompt-template/TextPromptTemplate.test.js +11 -0
  35. package/model-function/generate-text/prompt-template/VicunaPromptTemplate.test.cjs +11 -0
  36. package/model-function/generate-text/prompt-template/VicunaPromptTemplate.test.js +11 -0
  37. package/model-function/generate-text/prompt-template/index.cjs +2 -1
  38. package/model-function/generate-text/prompt-template/index.d.ts +1 -0
  39. package/model-function/generate-text/prompt-template/index.js +1 -0
  40. package/model-function/index.cjs +0 -1
  41. package/model-function/index.d.ts +0 -1
  42. package/model-function/index.js +0 -1
  43. package/model-provider/cohere/CohereTextEmbeddingModel.d.ts +3 -3
  44. package/model-provider/cohere/CohereTextGenerationModel.d.ts +6 -6
  45. package/model-provider/llamacpp/LlamaCppBakLLaVA1PromptTemplate.d.ts +3 -3
  46. package/model-provider/llamacpp/{LlamaCppTextGenerationModel.cjs → LlamaCppCompletionModel.cjs} +8 -8
  47. package/model-provider/llamacpp/{LlamaCppTextGenerationModel.d.ts → LlamaCppCompletionModel.d.ts} +49 -49
  48. package/model-provider/llamacpp/{LlamaCppTextGenerationModel.js → LlamaCppCompletionModel.js} +6 -6
  49. package/model-provider/llamacpp/{LlamaCppTextGenerationModel.test.cjs → LlamaCppCompletionModel.test.cjs} +3 -3
  50. package/model-provider/llamacpp/{LlamaCppTextGenerationModel.test.js → LlamaCppCompletionModel.test.js} +3 -3
  51. package/model-provider/llamacpp/LlamaCppFacade.cjs +2 -2
  52. package/model-provider/llamacpp/LlamaCppFacade.d.ts +2 -2
  53. package/model-provider/llamacpp/LlamaCppFacade.js +2 -2
  54. package/model-provider/llamacpp/index.cjs +1 -1
  55. package/model-provider/llamacpp/index.d.ts +1 -1
  56. package/model-provider/llamacpp/index.js +1 -1
  57. package/model-provider/mistral/MistralChatModel.cjs +4 -4
  58. package/model-provider/mistral/MistralChatModel.d.ts +6 -6
  59. package/model-provider/mistral/MistralChatModel.js +1 -1
  60. package/model-provider/mistral/MistralTextEmbeddingModel.d.ts +13 -13
  61. package/model-provider/mistral/index.cjs +3 -3
  62. package/model-provider/mistral/index.d.ts +2 -2
  63. package/model-provider/mistral/index.js +2 -2
  64. package/model-provider/ollama/OllamaChatModel.d.ts +9 -8
  65. package/model-provider/ollama/OllamaChatModel.js +1 -1
  66. package/model-provider/ollama/OllamaCompletionModel.d.ts +2 -1
  67. package/model-provider/ollama/OllamaCompletionModel.js +1 -1
  68. package/model-provider/ollama/OllamaCompletionModel.test.cjs +1 -7
  69. package/model-provider/ollama/OllamaCompletionModel.test.js +1 -7
  70. package/model-provider/openai/AbstractOpenAIChatModel.d.ts +8 -8
  71. package/model-provider/openai/OpenAIChatFunctionCallStructureGenerationModel.d.ts +1 -1
  72. package/model-provider/openai/OpenAICompletionModel.d.ts +6 -6
  73. package/model-provider/openai/OpenAITextEmbeddingModel.d.ts +12 -12
  74. package/model-provider/openai/OpenAITranscriptionModel.d.ts +11 -11
  75. package/model-provider/openai/index.cjs +0 -1
  76. package/model-provider/openai/index.d.ts +0 -1
  77. package/model-provider/openai/index.js +0 -1
  78. package/model-provider/stability/StabilityImageGenerationModel.d.ts +5 -5
  79. package/package.json +9 -20
  80. package/tool/generate-tool-call/TextGenerationToolCallModel.cjs +1 -1
  81. package/tool/generate-tool-call/TextGenerationToolCallModel.d.ts +1 -1
  82. package/tool/generate-tool-call/TextGenerationToolCallModel.js +1 -1
  83. package/tool/generate-tool-call/index.cjs +1 -0
  84. package/tool/generate-tool-call/index.d.ts +1 -0
  85. package/tool/generate-tool-call/index.js +1 -0
  86. package/tool/generate-tool-call/jsonToolCallPrompt.cjs +30 -0
  87. package/tool/generate-tool-call/jsonToolCallPrompt.d.ts +5 -0
  88. package/tool/generate-tool-call/jsonToolCallPrompt.js +27 -0
  89. package/tool/generate-tool-calls-or-text/TextGenerationToolCallsOrGenerateTextModel.d.ts +1 -11
  90. package/tool/generate-tool-calls-or-text/ToolCallsOrGenerateTextPromptTemplate.d.ts +12 -0
  91. package/tool/generate-tool-calls-or-text/index.cjs +1 -0
  92. package/tool/generate-tool-calls-or-text/index.d.ts +1 -0
  93. package/tool/generate-tool-calls-or-text/index.js +1 -0
  94. package/util/index.cjs +0 -1
  95. package/util/index.d.ts +0 -1
  96. package/util/index.js +0 -1
  97. package/browser/MediaSourceAppender.cjs +0 -54
  98. package/browser/MediaSourceAppender.d.ts +0 -11
  99. package/browser/MediaSourceAppender.js +0 -50
  100. package/browser/convertAudioChunksToBase64.cjs +0 -8
  101. package/browser/convertAudioChunksToBase64.d.ts +0 -4
  102. package/browser/convertAudioChunksToBase64.js +0 -4
  103. package/browser/convertBlobToBase64.cjs +0 -23
  104. package/browser/convertBlobToBase64.d.ts +0 -1
  105. package/browser/convertBlobToBase64.js +0 -19
  106. package/browser/index.cjs +0 -22
  107. package/browser/index.d.ts +0 -6
  108. package/browser/index.js +0 -6
  109. package/browser/invokeFlow.cjs +0 -23
  110. package/browser/invokeFlow.d.ts +0 -8
  111. package/browser/invokeFlow.js +0 -19
  112. package/browser/readEventSource.cjs +0 -29
  113. package/browser/readEventSource.d.ts +0 -9
  114. package/browser/readEventSource.js +0 -25
  115. package/browser/readEventSourceStream.cjs +0 -35
  116. package/browser/readEventSourceStream.d.ts +0 -7
  117. package/browser/readEventSourceStream.js +0 -31
  118. package/composed-function/index.cjs +0 -19
  119. package/composed-function/index.d.ts +0 -3
  120. package/composed-function/index.js +0 -3
  121. package/composed-function/summarize/SummarizationFunction.d.ts +0 -4
  122. package/composed-function/summarize/summarizeRecursively.cjs +0 -19
  123. package/composed-function/summarize/summarizeRecursively.d.ts +0 -11
  124. package/composed-function/summarize/summarizeRecursively.js +0 -15
  125. package/composed-function/summarize/summarizeRecursivelyWithTextGenerationAndTokenSplitting.cjs +0 -25
  126. package/composed-function/summarize/summarizeRecursivelyWithTextGenerationAndTokenSplitting.d.ts +0 -24
  127. package/composed-function/summarize/summarizeRecursivelyWithTextGenerationAndTokenSplitting.js +0 -21
  128. package/cost/Cost.cjs +0 -38
  129. package/cost/Cost.d.ts +0 -16
  130. package/cost/Cost.js +0 -34
  131. package/cost/CostCalculator.d.ts +0 -8
  132. package/cost/calculateCost.cjs +0 -28
  133. package/cost/calculateCost.d.ts +0 -7
  134. package/cost/calculateCost.js +0 -24
  135. package/cost/index.cjs +0 -19
  136. package/cost/index.d.ts +0 -3
  137. package/cost/index.js +0 -3
  138. package/guard/GuardEvent.cjs +0 -2
  139. package/guard/GuardEvent.d.ts +0 -7
  140. package/guard/fixStructure.cjs +0 -75
  141. package/guard/fixStructure.d.ts +0 -64
  142. package/guard/fixStructure.js +0 -71
  143. package/guard/guard.cjs +0 -79
  144. package/guard/guard.d.ts +0 -29
  145. package/guard/guard.js +0 -75
  146. package/guard/index.cjs +0 -19
  147. package/guard/index.d.ts +0 -3
  148. package/guard/index.js +0 -3
  149. package/model-function/SuccessfulModelCall.cjs +0 -10
  150. package/model-function/SuccessfulModelCall.d.ts +0 -12
  151. package/model-function/SuccessfulModelCall.js +0 -6
  152. package/model-provider/openai/OpenAICostCalculator.cjs +0 -89
  153. package/model-provider/openai/OpenAICostCalculator.d.ts +0 -6
  154. package/model-provider/openai/OpenAICostCalculator.js +0 -85
  155. package/server/fastify/AssetStorage.cjs +0 -2
  156. package/server/fastify/AssetStorage.d.ts +0 -17
  157. package/server/fastify/AssetStorage.js +0 -1
  158. package/server/fastify/DefaultFlow.cjs +0 -22
  159. package/server/fastify/DefaultFlow.d.ts +0 -16
  160. package/server/fastify/DefaultFlow.js +0 -18
  161. package/server/fastify/FileSystemAssetStorage.cjs +0 -60
  162. package/server/fastify/FileSystemAssetStorage.d.ts +0 -19
  163. package/server/fastify/FileSystemAssetStorage.js +0 -56
  164. package/server/fastify/FileSystemLogger.cjs +0 -49
  165. package/server/fastify/FileSystemLogger.d.ts +0 -18
  166. package/server/fastify/FileSystemLogger.js +0 -45
  167. package/server/fastify/Flow.cjs +0 -2
  168. package/server/fastify/Flow.d.ts +0 -9
  169. package/server/fastify/Flow.js +0 -1
  170. package/server/fastify/FlowRun.cjs +0 -71
  171. package/server/fastify/FlowRun.d.ts +0 -28
  172. package/server/fastify/FlowRun.js +0 -67
  173. package/server/fastify/FlowSchema.cjs +0 -2
  174. package/server/fastify/FlowSchema.d.ts +0 -5
  175. package/server/fastify/FlowSchema.js +0 -1
  176. package/server/fastify/Logger.cjs +0 -2
  177. package/server/fastify/Logger.d.ts +0 -13
  178. package/server/fastify/Logger.js +0 -1
  179. package/server/fastify/PathProvider.cjs +0 -34
  180. package/server/fastify/PathProvider.d.ts +0 -12
  181. package/server/fastify/PathProvider.js +0 -30
  182. package/server/fastify/index.cjs +0 -24
  183. package/server/fastify/index.d.ts +0 -8
  184. package/server/fastify/index.js +0 -8
  185. package/server/fastify/modelFusionFlowPlugin.cjs +0 -103
  186. package/server/fastify/modelFusionFlowPlugin.d.ts +0 -12
  187. package/server/fastify/modelFusionFlowPlugin.js +0 -99
  188. package/util/getAudioFileExtension.cjs +0 -29
  189. package/util/getAudioFileExtension.d.ts +0 -1
  190. package/util/getAudioFileExtension.js +0 -25
  191. /package/{composed-function/summarize/SummarizationFunction.cjs → core/ExtensionFunctionEvent.cjs} +0 -0
  192. /package/{composed-function/summarize/SummarizationFunction.js → core/ExtensionFunctionEvent.js} +0 -0
  193. /package/{cost/CostCalculator.js → model-function/generate-text/prompt-template/MistralInstructPromptTemplate.test.d.ts} +0 -0
  194. /package/{guard/GuardEvent.js → model-provider/llamacpp/LlamaCppCompletionModel.test.d.ts} +0 -0
  195. /package/model-provider/mistral/{MistralPromptTemplate.cjs → MistralChatPromptTemplate.cjs} +0 -0
  196. /package/model-provider/mistral/{MistralPromptTemplate.d.ts → MistralChatPromptTemplate.d.ts} +0 -0
  197. /package/model-provider/mistral/{MistralPromptTemplate.js → MistralChatPromptTemplate.js} +0 -0
  198. /package/{cost/CostCalculator.cjs → tool/generate-tool-calls-or-text/ToolCallsOrGenerateTextPromptTemplate.cjs} +0 -0
  199. /package/{model-provider/llamacpp/LlamaCppTextGenerationModel.test.d.ts → tool/generate-tool-calls-or-text/ToolCallsOrGenerateTextPromptTemplate.js} +0 -0
@@ -74,8 +74,8 @@ export declare class CohereTextEmbeddingModel extends AbstractModel<CohereTextEm
74
74
  get settingsForEvent(): Partial<CohereTextEmbeddingModelSettings>;
75
75
  doEmbedValues(texts: string[], options?: FunctionOptions): Promise<{
76
76
  response: {
77
- texts: string[];
78
77
  embeddings: number[][];
78
+ texts: string[];
79
79
  id: string;
80
80
  meta: {
81
81
  api_version: {
@@ -109,8 +109,8 @@ declare const cohereTextEmbeddingResponseSchema: z.ZodObject<{
109
109
  };
110
110
  }>;
111
111
  }, "strip", z.ZodTypeAny, {
112
- texts: string[];
113
112
  embeddings: number[][];
113
+ texts: string[];
114
114
  id: string;
115
115
  meta: {
116
116
  api_version: {
@@ -118,8 +118,8 @@ declare const cohereTextEmbeddingResponseSchema: z.ZodObject<{
118
118
  };
119
119
  };
120
120
  }, {
121
- texts: string[];
122
121
  embeddings: number[][];
122
+ texts: string[];
123
123
  id: string;
124
124
  meta: {
125
125
  api_version: {
@@ -66,8 +66,8 @@ export declare class CohereTextGenerationModel extends AbstractModel<CohereTextG
66
66
  get settingsForEvent(): Partial<CohereTextGenerationModelSettings>;
67
67
  doGenerateTexts(prompt: string, options?: FunctionOptions): Promise<{
68
68
  response: {
69
- prompt: string;
70
69
  id: string;
70
+ prompt: string;
71
71
  generations: {
72
72
  text: string;
73
73
  id: string;
@@ -90,8 +90,8 @@ export declare class CohereTextGenerationModel extends AbstractModel<CohereTextG
90
90
  is_finished: false;
91
91
  } | {
92
92
  response: {
93
- prompt: string;
94
93
  id: string;
94
+ prompt: string;
95
95
  generations: {
96
96
  text: string;
97
97
  id: string;
@@ -155,8 +155,8 @@ declare const cohereTextGenerationResponseSchema: z.ZodObject<{
155
155
  };
156
156
  }>>;
157
157
  }, "strip", z.ZodTypeAny, {
158
- prompt: string;
159
158
  id: string;
159
+ prompt: string;
160
160
  generations: {
161
161
  text: string;
162
162
  id: string;
@@ -168,8 +168,8 @@ declare const cohereTextGenerationResponseSchema: z.ZodObject<{
168
168
  };
169
169
  } | undefined;
170
170
  }, {
171
- prompt: string;
172
171
  id: string;
172
+ prompt: string;
173
173
  generations: {
174
174
  text: string;
175
175
  id: string;
@@ -193,8 +193,8 @@ export declare const CohereTextGenerationResponseFormat: {
193
193
  json: {
194
194
  stream: boolean;
195
195
  handler: ResponseHandler<{
196
- prompt: string;
197
196
  id: string;
197
+ prompt: string;
198
198
  generations: {
199
199
  text: string;
200
200
  id: string;
@@ -220,8 +220,8 @@ export declare const CohereTextGenerationResponseFormat: {
220
220
  is_finished: false;
221
221
  } | {
222
222
  response: {
223
- prompt: string;
224
223
  id: string;
224
+ prompt: string;
225
225
  generations: {
226
226
  text: string;
227
227
  id: string;
@@ -1,11 +1,11 @@
1
1
  import { TextGenerationPromptTemplate } from "../../model-function/generate-text/TextGenerationPromptTemplate.js";
2
2
  import { ChatPrompt } from "../../model-function/generate-text/prompt-template/ChatPrompt.js";
3
3
  import { InstructionPrompt } from "../../model-function/generate-text/prompt-template/InstructionPrompt.js";
4
- import { LlamaCppTextGenerationPrompt } from "./LlamaCppTextGenerationModel.js";
4
+ import { LlamaCppCompletionPrompt } from "./LlamaCppCompletionModel.js";
5
5
  /**
6
6
  * BakLLaVA 1 uses a Vicuna 1 prompt. This mapping combines it with the LlamaCpp prompt structure.
7
7
  *
8
8
  * @see https://github.com/SkunkworksAI/BakLLaVA
9
9
  */
10
- export declare function instruction(): TextGenerationPromptTemplate<InstructionPrompt, LlamaCppTextGenerationPrompt>;
11
- export declare function chat(): TextGenerationPromptTemplate<ChatPrompt, LlamaCppTextGenerationPrompt>;
10
+ export declare function instruction(): TextGenerationPromptTemplate<InstructionPrompt, LlamaCppCompletionPrompt>;
11
+ export declare function chat(): TextGenerationPromptTemplate<ChatPrompt, LlamaCppCompletionPrompt>;
@@ -1,6 +1,6 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.LlamaCppTextGenerationResponseFormat = exports.LlamaCppTextGenerationModel = void 0;
3
+ exports.LlamaCppCompletionResponseFormat = exports.LlamaCppCompletionModel = void 0;
4
4
  const zod_1 = require("zod");
5
5
  const callWithRetryAndThrottle_js_1 = require("../../core/api/callWithRetryAndThrottle.cjs");
6
6
  const postToApi_js_1 = require("../../core/api/postToApi.cjs");
@@ -14,7 +14,7 @@ const parseEventSourceStream_js_1 = require("../../util/streaming/parseEventSour
14
14
  const LlamaCppApiConfiguration_js_1 = require("./LlamaCppApiConfiguration.cjs");
15
15
  const LlamaCppError_js_1 = require("./LlamaCppError.cjs");
16
16
  const LlamaCppTokenizer_js_1 = require("./LlamaCppTokenizer.cjs");
17
- class LlamaCppTextGenerationModel extends AbstractModel_js_1.AbstractModel {
17
+ class LlamaCppCompletionModel extends AbstractModel_js_1.AbstractModel {
18
18
  constructor(settings = {}) {
19
19
  super({ settings });
20
20
  Object.defineProperty(this, "provider", {
@@ -111,7 +111,7 @@ class LlamaCppTextGenerationModel extends AbstractModel_js_1.AbstractModel {
111
111
  async doGenerateTexts(prompt, options) {
112
112
  const response = await this.callAPI(prompt, {
113
113
  ...options,
114
- responseFormat: exports.LlamaCppTextGenerationResponseFormat.json,
114
+ responseFormat: exports.LlamaCppCompletionResponseFormat.json,
115
115
  });
116
116
  return {
117
117
  response,
@@ -135,7 +135,7 @@ class LlamaCppTextGenerationModel extends AbstractModel_js_1.AbstractModel {
135
135
  doStreamText(prompt, options) {
136
136
  return this.callAPI(prompt, {
137
137
  ...options,
138
- responseFormat: exports.LlamaCppTextGenerationResponseFormat.deltaIterable,
138
+ responseFormat: exports.LlamaCppCompletionResponseFormat.deltaIterable,
139
139
  });
140
140
  }
141
141
  extractTextDelta(delta) {
@@ -178,10 +178,10 @@ class LlamaCppTextGenerationModel extends AbstractModel_js_1.AbstractModel {
178
178
  });
179
179
  }
180
180
  withSettings(additionalSettings) {
181
- return new LlamaCppTextGenerationModel(Object.assign({}, this.settings, additionalSettings));
181
+ return new LlamaCppCompletionModel(Object.assign({}, this.settings, additionalSettings));
182
182
  }
183
183
  }
184
- exports.LlamaCppTextGenerationModel = LlamaCppTextGenerationModel;
184
+ exports.LlamaCppCompletionModel = LlamaCppCompletionModel;
185
185
  const llamaCppTextGenerationResponseSchema = zod_1.z.object({
186
186
  content: zod_1.z.string(),
187
187
  stop: zod_1.z.literal(true),
@@ -204,7 +204,7 @@ const llamaCppTextGenerationResponseSchema = zod_1.z.object({
204
204
  seed: zod_1.z.number(),
205
205
  stop: zod_1.z.array(zod_1.z.string()),
206
206
  stream: zod_1.z.boolean(),
207
- temp: zod_1.z.number(),
207
+ temperature: zod_1.z.number().optional(), // optional for backwards compatibility
208
208
  tfs_z: zod_1.z.number(),
209
209
  top_k: zod_1.z.number(),
210
210
  top_p: zod_1.z.number(),
@@ -267,7 +267,7 @@ async function createLlamaCppFullDeltaIterableQueue(stream) {
267
267
  });
268
268
  return queue;
269
269
  }
270
- exports.LlamaCppTextGenerationResponseFormat = {
270
+ exports.LlamaCppCompletionResponseFormat = {
271
271
  /**
272
272
  * Returns the response as a JSON object.
273
273
  */
@@ -8,7 +8,7 @@ import { PromptTemplateTextStreamingModel } from "../../model-function/generate-
8
8
  import { TextGenerationModelSettings, TextStreamingModel } from "../../model-function/generate-text/TextGenerationModel.js";
9
9
  import { TextGenerationPromptTemplate } from "../../model-function/generate-text/TextGenerationPromptTemplate.js";
10
10
  import { LlamaCppTokenizer } from "./LlamaCppTokenizer.js";
11
- export interface LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE extends number | undefined> extends TextGenerationModelSettings {
11
+ export interface LlamaCppCompletionModelSettings<CONTEXT_WINDOW_SIZE extends number | undefined> extends TextGenerationModelSettings {
12
12
  api?: ApiConfiguration;
13
13
  /**
14
14
  * Specify the context window size of the model that you have loaded in your
@@ -35,7 +35,7 @@ export interface LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE extends
35
35
  ignoreEos?: boolean;
36
36
  logitBias?: Array<[number, number | false]>;
37
37
  }
38
- export interface LlamaCppTextGenerationPrompt {
38
+ export interface LlamaCppCompletionPrompt {
39
39
  /**
40
40
  * Text prompt. Images can be included through references such as `[img-ID]`, e.g. `[img-1]`.
41
41
  */
@@ -45,27 +45,27 @@ export interface LlamaCppTextGenerationPrompt {
45
45
  */
46
46
  images?: Record<number, string>;
47
47
  }
48
- export declare class LlamaCppTextGenerationModel<CONTEXT_WINDOW_SIZE extends number | undefined> extends AbstractModel<LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>> implements TextStreamingModel<LlamaCppTextGenerationPrompt, LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>> {
49
- constructor(settings?: LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>);
48
+ export declare class LlamaCppCompletionModel<CONTEXT_WINDOW_SIZE extends number | undefined> extends AbstractModel<LlamaCppCompletionModelSettings<CONTEXT_WINDOW_SIZE>> implements TextStreamingModel<LlamaCppCompletionPrompt, LlamaCppCompletionModelSettings<CONTEXT_WINDOW_SIZE>> {
49
+ constructor(settings?: LlamaCppCompletionModelSettings<CONTEXT_WINDOW_SIZE>);
50
50
  readonly provider = "llamacpp";
51
51
  get modelName(): null;
52
52
  get contextWindowSize(): CONTEXT_WINDOW_SIZE;
53
53
  readonly tokenizer: LlamaCppTokenizer;
54
- callAPI<RESPONSE>(prompt: LlamaCppTextGenerationPrompt, options: {
55
- responseFormat: LlamaCppTextGenerationResponseFormatType<RESPONSE>;
54
+ callAPI<RESPONSE>(prompt: LlamaCppCompletionPrompt, options: {
55
+ responseFormat: LlamaCppCompletionResponseFormatType<RESPONSE>;
56
56
  } & FunctionOptions): Promise<RESPONSE>;
57
- get settingsForEvent(): Partial<LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>>;
58
- countPromptTokens(prompt: LlamaCppTextGenerationPrompt): Promise<number>;
59
- doGenerateTexts(prompt: LlamaCppTextGenerationPrompt, options?: FunctionOptions): Promise<{
57
+ get settingsForEvent(): Partial<LlamaCppCompletionModelSettings<CONTEXT_WINDOW_SIZE>>;
58
+ countPromptTokens(prompt: LlamaCppCompletionPrompt): Promise<number>;
59
+ doGenerateTexts(prompt: LlamaCppCompletionPrompt, options?: FunctionOptions): Promise<{
60
60
  response: {
61
- stop: true;
62
61
  model: string;
63
- prompt: string;
62
+ stop: true;
64
63
  content: string;
64
+ prompt: string;
65
65
  generation_settings: {
66
- stop: string[];
67
66
  model: string;
68
67
  stream: boolean;
68
+ stop: string[];
69
69
  seed: number;
70
70
  mirostat: number;
71
71
  frequency_penalty: number;
@@ -81,11 +81,11 @@ export declare class LlamaCppTextGenerationModel<CONTEXT_WINDOW_SIZE extends num
81
81
  presence_penalty: number;
82
82
  repeat_last_n: number;
83
83
  repeat_penalty: number;
84
- temp: number;
85
84
  tfs_z: number;
86
85
  top_k: number;
87
86
  top_p: number;
88
87
  typical_p: number;
88
+ temperature?: number | undefined;
89
89
  };
90
90
  stopped_eos: boolean;
91
91
  stopped_limit: boolean;
@@ -116,15 +116,15 @@ export declare class LlamaCppTextGenerationModel<CONTEXT_WINDOW_SIZE extends num
116
116
  totalTokens: number;
117
117
  };
118
118
  }>;
119
- doStreamText(prompt: LlamaCppTextGenerationPrompt, options?: FunctionOptions): Promise<AsyncIterable<Delta<{
120
- stop: true;
119
+ doStreamText(prompt: LlamaCppCompletionPrompt, options?: FunctionOptions): Promise<AsyncIterable<Delta<{
121
120
  model: string;
122
- prompt: string;
121
+ stop: true;
123
122
  content: string;
123
+ prompt: string;
124
124
  generation_settings: {
125
- stop: string[];
126
125
  model: string;
127
126
  stream: boolean;
127
+ stop: string[];
128
128
  seed: number;
129
129
  mirostat: number;
130
130
  frequency_penalty: number;
@@ -140,11 +140,11 @@ export declare class LlamaCppTextGenerationModel<CONTEXT_WINDOW_SIZE extends num
140
140
  presence_penalty: number;
141
141
  repeat_last_n: number;
142
142
  repeat_penalty: number;
143
- temp: number;
144
143
  tfs_z: number;
145
144
  top_k: number;
146
145
  top_p: number;
147
146
  typical_p: number;
147
+ temperature?: number | undefined;
148
148
  };
149
149
  stopped_eos: boolean;
150
150
  stopped_limit: boolean;
@@ -169,16 +169,16 @@ export declare class LlamaCppTextGenerationModel<CONTEXT_WINDOW_SIZE extends num
169
169
  content: string;
170
170
  }>>>;
171
171
  extractTextDelta(delta: unknown): string;
172
- withTextPrompt(): PromptTemplateTextStreamingModel<string, LlamaCppTextGenerationPrompt, LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>, this>;
172
+ withTextPrompt(): PromptTemplateTextStreamingModel<string, LlamaCppCompletionPrompt, LlamaCppCompletionModelSettings<CONTEXT_WINDOW_SIZE>, this>;
173
173
  /**
174
174
  * Maps the prompt for a text version of the Llama.cpp prompt template (without image support).
175
175
  */
176
- withTextPromptTemplate<INPUT_PROMPT>(promptTemplate: TextGenerationPromptTemplate<INPUT_PROMPT, string>): PromptTemplateTextStreamingModel<INPUT_PROMPT, string, LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>, PromptTemplateTextStreamingModel<string, LlamaCppTextGenerationPrompt, LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>, this>>;
176
+ withTextPromptTemplate<INPUT_PROMPT>(promptTemplate: TextGenerationPromptTemplate<INPUT_PROMPT, string>): PromptTemplateTextStreamingModel<INPUT_PROMPT, string, LlamaCppCompletionModelSettings<CONTEXT_WINDOW_SIZE>, PromptTemplateTextStreamingModel<string, LlamaCppCompletionPrompt, LlamaCppCompletionModelSettings<CONTEXT_WINDOW_SIZE>, this>>;
177
177
  /**
178
178
  * Maps the prompt for the full Llama.cpp prompt template (incl. image support).
179
179
  */
180
- withPromptTemplate<INPUT_PROMPT>(promptTemplate: TextGenerationPromptTemplate<INPUT_PROMPT, LlamaCppTextGenerationPrompt>): PromptTemplateTextStreamingModel<INPUT_PROMPT, LlamaCppTextGenerationPrompt, LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>, this>;
181
- withSettings(additionalSettings: Partial<LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>>): this;
180
+ withPromptTemplate<INPUT_PROMPT>(promptTemplate: TextGenerationPromptTemplate<INPUT_PROMPT, LlamaCppCompletionPrompt>): PromptTemplateTextStreamingModel<INPUT_PROMPT, LlamaCppCompletionPrompt, LlamaCppCompletionModelSettings<CONTEXT_WINDOW_SIZE>, this>;
181
+ withSettings(additionalSettings: Partial<LlamaCppCompletionModelSettings<CONTEXT_WINDOW_SIZE>>): this;
182
182
  }
183
183
  declare const llamaCppTextGenerationResponseSchema: z.ZodObject<{
184
184
  content: z.ZodString;
@@ -202,15 +202,15 @@ declare const llamaCppTextGenerationResponseSchema: z.ZodObject<{
202
202
  seed: z.ZodNumber;
203
203
  stop: z.ZodArray<z.ZodString, "many">;
204
204
  stream: z.ZodBoolean;
205
- temp: z.ZodNumber;
205
+ temperature: z.ZodOptional<z.ZodNumber>;
206
206
  tfs_z: z.ZodNumber;
207
207
  top_k: z.ZodNumber;
208
208
  top_p: z.ZodNumber;
209
209
  typical_p: z.ZodNumber;
210
210
  }, "strip", z.ZodTypeAny, {
211
- stop: string[];
212
211
  model: string;
213
212
  stream: boolean;
213
+ stop: string[];
214
214
  seed: number;
215
215
  mirostat: number;
216
216
  frequency_penalty: number;
@@ -226,15 +226,15 @@ declare const llamaCppTextGenerationResponseSchema: z.ZodObject<{
226
226
  presence_penalty: number;
227
227
  repeat_last_n: number;
228
228
  repeat_penalty: number;
229
- temp: number;
230
229
  tfs_z: number;
231
230
  top_k: number;
232
231
  top_p: number;
233
232
  typical_p: number;
233
+ temperature?: number | undefined;
234
234
  }, {
235
- stop: string[];
236
235
  model: string;
237
236
  stream: boolean;
237
+ stop: string[];
238
238
  seed: number;
239
239
  mirostat: number;
240
240
  frequency_penalty: number;
@@ -250,11 +250,11 @@ declare const llamaCppTextGenerationResponseSchema: z.ZodObject<{
250
250
  presence_penalty: number;
251
251
  repeat_last_n: number;
252
252
  repeat_penalty: number;
253
- temp: number;
254
253
  tfs_z: number;
255
254
  top_k: number;
256
255
  top_p: number;
257
256
  typical_p: number;
257
+ temperature?: number | undefined;
258
258
  }>;
259
259
  model: z.ZodString;
260
260
  prompt: z.ZodString;
@@ -295,14 +295,14 @@ declare const llamaCppTextGenerationResponseSchema: z.ZodObject<{
295
295
  tokens_predicted: z.ZodNumber;
296
296
  truncated: z.ZodBoolean;
297
297
  }, "strip", z.ZodTypeAny, {
298
- stop: true;
299
298
  model: string;
300
- prompt: string;
299
+ stop: true;
301
300
  content: string;
301
+ prompt: string;
302
302
  generation_settings: {
303
- stop: string[];
304
303
  model: string;
305
304
  stream: boolean;
305
+ stop: string[];
306
306
  seed: number;
307
307
  mirostat: number;
308
308
  frequency_penalty: number;
@@ -318,11 +318,11 @@ declare const llamaCppTextGenerationResponseSchema: z.ZodObject<{
318
318
  presence_penalty: number;
319
319
  repeat_last_n: number;
320
320
  repeat_penalty: number;
321
- temp: number;
322
321
  tfs_z: number;
323
322
  top_k: number;
324
323
  top_p: number;
325
324
  typical_p: number;
325
+ temperature?: number | undefined;
326
326
  };
327
327
  stopped_eos: boolean;
328
328
  stopped_limit: boolean;
@@ -343,14 +343,14 @@ declare const llamaCppTextGenerationResponseSchema: z.ZodObject<{
343
343
  tokens_predicted: number;
344
344
  truncated: boolean;
345
345
  }, {
346
- stop: true;
347
346
  model: string;
348
- prompt: string;
347
+ stop: true;
349
348
  content: string;
349
+ prompt: string;
350
350
  generation_settings: {
351
- stop: string[];
352
351
  model: string;
353
352
  stream: boolean;
353
+ stop: string[];
354
354
  seed: number;
355
355
  mirostat: number;
356
356
  frequency_penalty: number;
@@ -366,11 +366,11 @@ declare const llamaCppTextGenerationResponseSchema: z.ZodObject<{
366
366
  presence_penalty: number;
367
367
  repeat_last_n: number;
368
368
  repeat_penalty: number;
369
- temp: number;
370
369
  tfs_z: number;
371
370
  top_k: number;
372
371
  top_p: number;
373
372
  typical_p: number;
373
+ temperature?: number | undefined;
374
374
  };
375
375
  stopped_eos: boolean;
376
376
  stopped_limit: boolean;
@@ -393,14 +393,14 @@ declare const llamaCppTextGenerationResponseSchema: z.ZodObject<{
393
393
  }>;
394
394
  export type LlamaCppTextGenerationResponse = z.infer<typeof llamaCppTextGenerationResponseSchema>;
395
395
  declare const llamaCppTextStreamChunkSchema: import("../../core/schema/ZodSchema.js").ZodSchema<{
396
- stop: true;
397
396
  model: string;
398
- prompt: string;
397
+ stop: true;
399
398
  content: string;
399
+ prompt: string;
400
400
  generation_settings: {
401
- stop: string[];
402
401
  model: string;
403
402
  stream: boolean;
403
+ stop: string[];
404
404
  seed: number;
405
405
  mirostat: number;
406
406
  frequency_penalty: number;
@@ -416,11 +416,11 @@ declare const llamaCppTextStreamChunkSchema: import("../../core/schema/ZodSchema
416
416
  presence_penalty: number;
417
417
  repeat_last_n: number;
418
418
  repeat_penalty: number;
419
- temp: number;
420
419
  tfs_z: number;
421
420
  top_k: number;
422
421
  top_p: number;
423
422
  typical_p: number;
423
+ temperature?: number | undefined;
424
424
  };
425
425
  stopped_eos: boolean;
426
426
  stopped_limit: boolean;
@@ -445,25 +445,25 @@ declare const llamaCppTextStreamChunkSchema: import("../../core/schema/ZodSchema
445
445
  content: string;
446
446
  }>;
447
447
  export type LlamaCppTextStreamChunk = (typeof llamaCppTextStreamChunkSchema)["_type"];
448
- export type LlamaCppTextGenerationResponseFormatType<T> = {
448
+ export type LlamaCppCompletionResponseFormatType<T> = {
449
449
  stream: boolean;
450
450
  handler: ResponseHandler<T>;
451
451
  };
452
- export declare const LlamaCppTextGenerationResponseFormat: {
452
+ export declare const LlamaCppCompletionResponseFormat: {
453
453
  /**
454
454
  * Returns the response as a JSON object.
455
455
  */
456
456
  json: {
457
457
  stream: false;
458
458
  handler: ResponseHandler<{
459
- stop: true;
460
459
  model: string;
461
- prompt: string;
460
+ stop: true;
462
461
  content: string;
462
+ prompt: string;
463
463
  generation_settings: {
464
- stop: string[];
465
464
  model: string;
466
465
  stream: boolean;
466
+ stop: string[];
467
467
  seed: number;
468
468
  mirostat: number;
469
469
  frequency_penalty: number;
@@ -479,11 +479,11 @@ export declare const LlamaCppTextGenerationResponseFormat: {
479
479
  presence_penalty: number;
480
480
  repeat_last_n: number;
481
481
  repeat_penalty: number;
482
- temp: number;
483
482
  tfs_z: number;
484
483
  top_k: number;
485
484
  top_p: number;
486
485
  typical_p: number;
486
+ temperature?: number | undefined;
487
487
  };
488
488
  stopped_eos: boolean;
489
489
  stopped_limit: boolean;
@@ -514,14 +514,14 @@ export declare const LlamaCppTextGenerationResponseFormat: {
514
514
  handler: ({ response }: {
515
515
  response: Response;
516
516
  }) => Promise<AsyncIterable<Delta<{
517
- stop: true;
518
517
  model: string;
519
- prompt: string;
518
+ stop: true;
520
519
  content: string;
520
+ prompt: string;
521
521
  generation_settings: {
522
- stop: string[];
523
522
  model: string;
524
523
  stream: boolean;
524
+ stop: string[];
525
525
  seed: number;
526
526
  mirostat: number;
527
527
  frequency_penalty: number;
@@ -537,11 +537,11 @@ export declare const LlamaCppTextGenerationResponseFormat: {
537
537
  presence_penalty: number;
538
538
  repeat_last_n: number;
539
539
  repeat_penalty: number;
540
- temp: number;
541
540
  tfs_z: number;
542
541
  top_k: number;
543
542
  top_p: number;
544
543
  typical_p: number;
544
+ temperature?: number | undefined;
545
545
  };
546
546
  stopped_eos: boolean;
547
547
  stopped_limit: boolean;
@@ -11,7 +11,7 @@ import { parseEventSourceStream } from "../../util/streaming/parseEventSourceStr
11
11
  import { LlamaCppApiConfiguration } from "./LlamaCppApiConfiguration.js";
12
12
  import { failedLlamaCppCallResponseHandler } from "./LlamaCppError.js";
13
13
  import { LlamaCppTokenizer } from "./LlamaCppTokenizer.js";
14
- export class LlamaCppTextGenerationModel extends AbstractModel {
14
+ export class LlamaCppCompletionModel extends AbstractModel {
15
15
  constructor(settings = {}) {
16
16
  super({ settings });
17
17
  Object.defineProperty(this, "provider", {
@@ -108,7 +108,7 @@ export class LlamaCppTextGenerationModel extends AbstractModel {
108
108
  async doGenerateTexts(prompt, options) {
109
109
  const response = await this.callAPI(prompt, {
110
110
  ...options,
111
- responseFormat: LlamaCppTextGenerationResponseFormat.json,
111
+ responseFormat: LlamaCppCompletionResponseFormat.json,
112
112
  });
113
113
  return {
114
114
  response,
@@ -132,7 +132,7 @@ export class LlamaCppTextGenerationModel extends AbstractModel {
132
132
  doStreamText(prompt, options) {
133
133
  return this.callAPI(prompt, {
134
134
  ...options,
135
- responseFormat: LlamaCppTextGenerationResponseFormat.deltaIterable,
135
+ responseFormat: LlamaCppCompletionResponseFormat.deltaIterable,
136
136
  });
137
137
  }
138
138
  extractTextDelta(delta) {
@@ -175,7 +175,7 @@ export class LlamaCppTextGenerationModel extends AbstractModel {
175
175
  });
176
176
  }
177
177
  withSettings(additionalSettings) {
178
- return new LlamaCppTextGenerationModel(Object.assign({}, this.settings, additionalSettings));
178
+ return new LlamaCppCompletionModel(Object.assign({}, this.settings, additionalSettings));
179
179
  }
180
180
  }
181
181
  const llamaCppTextGenerationResponseSchema = z.object({
@@ -200,7 +200,7 @@ const llamaCppTextGenerationResponseSchema = z.object({
200
200
  seed: z.number(),
201
201
  stop: z.array(z.string()),
202
202
  stream: z.boolean(),
203
- temp: z.number(),
203
+ temperature: z.number().optional(), // optional for backwards compatibility
204
204
  tfs_z: z.number(),
205
205
  top_k: z.number(),
206
206
  top_p: z.number(),
@@ -263,7 +263,7 @@ async function createLlamaCppFullDeltaIterableQueue(stream) {
263
263
  });
264
264
  return queue;
265
265
  }
266
- export const LlamaCppTextGenerationResponseFormat = {
266
+ export const LlamaCppCompletionResponseFormat = {
267
267
  /**
268
268
  * Returns the response as a JSON object.
269
269
  */
@@ -3,7 +3,7 @@ Object.defineProperty(exports, "__esModule", { value: true });
3
3
  const streamText_js_1 = require("../../model-function/generate-text/streamText.cjs");
4
4
  const StreamingTestServer_js_1 = require("../../test/StreamingTestServer.cjs");
5
5
  const arrayFromAsync_js_1 = require("../../test/arrayFromAsync.cjs");
6
- const LlamaCppTextGenerationModel_js_1 = require("./LlamaCppTextGenerationModel.cjs");
6
+ const LlamaCppCompletionModel_js_1 = require("./LlamaCppCompletionModel.cjs");
7
7
  describe("streamText", () => {
8
8
  const server = new StreamingTestServer_js_1.StreamingTestServer("http://127.0.0.1:8080/completion");
9
9
  server.setupTestEnvironment();
@@ -17,7 +17,7 @@ describe("streamText", () => {
17
17
  `"mirostat_eta":0.10000000149011612,"mirostat_tau":5.0,"model":"models/llama-2-7b-chat.Q4_K_M.gguf",` +
18
18
  `"n_ctx":4096,"n_keep":0,"n_predict":-1,"n_probs":0,"penalize_nl":true,"penalty_prompt_tokens":[],` +
19
19
  `"presence_penalty":0.0,"repeat_last_n":64,"repeat_penalty":1.100000023841858,"seed":4294967295,` +
20
- `"stop":[],"stream":true,"temp":0.800000011920929,"tfs_z":1.0,"top_k":40,"top_p":0.949999988079071,` +
20
+ `"stop":[],"stream":true,"temperature":0.800000011920929,"tfs_z":1.0,"top_k":40,"top_p":0.949999988079071,` +
21
21
  `"typical_p":1.0,"use_penalty_prompt_tokens":false},"model":"models/llama-2-7b-chat.Q4_K_M.gguf",` +
22
22
  `"prompt":"hello","slot_id":0,"stop":true,"stopped_eos":true,"stopped_limit":false,` +
23
23
  `"stopped_word":false,"stopping_word":"","timings":{"predicted_ms":1054.704,"predicted_n":69,` +
@@ -26,7 +26,7 @@ describe("streamText", () => {
26
26
  `"prompt_per_token_ms":48.845600000000005},"tokens_cached":74,"tokens_evaluated":5,` +
27
27
  `"tokens_predicted":69,"truncated":false}\n\n`,
28
28
  ];
29
- const stream = await (0, streamText_js_1.streamText)(new LlamaCppTextGenerationModel_js_1.LlamaCppTextGenerationModel().withTextPrompt(), "hello");
29
+ const stream = await (0, streamText_js_1.streamText)(new LlamaCppCompletionModel_js_1.LlamaCppCompletionModel().withTextPrompt(), "hello");
30
30
  // note: space moved to last chunk bc of trimming
31
31
  expect(await (0, arrayFromAsync_js_1.arrayFromAsync)(stream)).toStrictEqual([
32
32
  "Hello",
@@ -1,7 +1,7 @@
1
1
  import { streamText } from "../../model-function/generate-text/streamText.js";
2
2
  import { StreamingTestServer } from "../../test/StreamingTestServer.js";
3
3
  import { arrayFromAsync } from "../../test/arrayFromAsync.js";
4
- import { LlamaCppTextGenerationModel } from "./LlamaCppTextGenerationModel.js";
4
+ import { LlamaCppCompletionModel } from "./LlamaCppCompletionModel.js";
5
5
  describe("streamText", () => {
6
6
  const server = new StreamingTestServer("http://127.0.0.1:8080/completion");
7
7
  server.setupTestEnvironment();
@@ -15,7 +15,7 @@ describe("streamText", () => {
15
15
  `"mirostat_eta":0.10000000149011612,"mirostat_tau":5.0,"model":"models/llama-2-7b-chat.Q4_K_M.gguf",` +
16
16
  `"n_ctx":4096,"n_keep":0,"n_predict":-1,"n_probs":0,"penalize_nl":true,"penalty_prompt_tokens":[],` +
17
17
  `"presence_penalty":0.0,"repeat_last_n":64,"repeat_penalty":1.100000023841858,"seed":4294967295,` +
18
- `"stop":[],"stream":true,"temp":0.800000011920929,"tfs_z":1.0,"top_k":40,"top_p":0.949999988079071,` +
18
+ `"stop":[],"stream":true,"temperature":0.800000011920929,"tfs_z":1.0,"top_k":40,"top_p":0.949999988079071,` +
19
19
  `"typical_p":1.0,"use_penalty_prompt_tokens":false},"model":"models/llama-2-7b-chat.Q4_K_M.gguf",` +
20
20
  `"prompt":"hello","slot_id":0,"stop":true,"stopped_eos":true,"stopped_limit":false,` +
21
21
  `"stopped_word":false,"stopping_word":"","timings":{"predicted_ms":1054.704,"predicted_n":69,` +
@@ -24,7 +24,7 @@ describe("streamText", () => {
24
24
  `"prompt_per_token_ms":48.845600000000005},"tokens_cached":74,"tokens_evaluated":5,` +
25
25
  `"tokens_predicted":69,"truncated":false}\n\n`,
26
26
  ];
27
- const stream = await streamText(new LlamaCppTextGenerationModel().withTextPrompt(), "hello");
27
+ const stream = await streamText(new LlamaCppCompletionModel().withTextPrompt(), "hello");
28
28
  // note: space moved to last chunk bc of trimming
29
29
  expect(await arrayFromAsync(stream)).toStrictEqual([
30
30
  "Hello",
@@ -3,10 +3,10 @@ Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.Tokenizer = exports.TextEmbedder = exports.TextGenerator = void 0;
4
4
  const LlamaCppApiConfiguration_js_1 = require("./LlamaCppApiConfiguration.cjs");
5
5
  const LlamaCppTextEmbeddingModel_js_1 = require("./LlamaCppTextEmbeddingModel.cjs");
6
- const LlamaCppTextGenerationModel_js_1 = require("./LlamaCppTextGenerationModel.cjs");
6
+ const LlamaCppCompletionModel_js_1 = require("./LlamaCppCompletionModel.cjs");
7
7
  const LlamaCppTokenizer_js_1 = require("./LlamaCppTokenizer.cjs");
8
8
  function TextGenerator(settings = {}) {
9
- return new LlamaCppTextGenerationModel_js_1.LlamaCppTextGenerationModel(settings);
9
+ return new LlamaCppCompletionModel_js_1.LlamaCppCompletionModel(settings);
10
10
  }
11
11
  exports.TextGenerator = TextGenerator;
12
12
  function TextEmbedder(settings = {}) {
@@ -1,7 +1,7 @@
1
1
  import { ApiConfiguration } from "../../core/api/ApiConfiguration.js";
2
2
  import { LlamaCppTextEmbeddingModel, LlamaCppTextEmbeddingModelSettings } from "./LlamaCppTextEmbeddingModel.js";
3
- import { LlamaCppTextGenerationModel, LlamaCppTextGenerationModelSettings } from "./LlamaCppTextGenerationModel.js";
3
+ import { LlamaCppCompletionModel, LlamaCppCompletionModelSettings } from "./LlamaCppCompletionModel.js";
4
4
  import { LlamaCppTokenizer } from "./LlamaCppTokenizer.js";
5
- export declare function TextGenerator<CONTEXT_WINDOW_SIZE extends number>(settings?: LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>): LlamaCppTextGenerationModel<CONTEXT_WINDOW_SIZE>;
5
+ export declare function TextGenerator<CONTEXT_WINDOW_SIZE extends number>(settings?: LlamaCppCompletionModelSettings<CONTEXT_WINDOW_SIZE>): LlamaCppCompletionModel<CONTEXT_WINDOW_SIZE>;
6
6
  export declare function TextEmbedder(settings?: LlamaCppTextEmbeddingModelSettings): LlamaCppTextEmbeddingModel;
7
7
  export declare function Tokenizer(api?: ApiConfiguration): LlamaCppTokenizer;
@@ -1,9 +1,9 @@
1
1
  import { LlamaCppApiConfiguration } from "./LlamaCppApiConfiguration.js";
2
2
  import { LlamaCppTextEmbeddingModel, } from "./LlamaCppTextEmbeddingModel.js";
3
- import { LlamaCppTextGenerationModel, } from "./LlamaCppTextGenerationModel.js";
3
+ import { LlamaCppCompletionModel, } from "./LlamaCppCompletionModel.js";
4
4
  import { LlamaCppTokenizer } from "./LlamaCppTokenizer.js";
5
5
  export function TextGenerator(settings = {}) {
6
- return new LlamaCppTextGenerationModel(settings);
6
+ return new LlamaCppCompletionModel(settings);
7
7
  }
8
8
  export function TextEmbedder(settings = {}) {
9
9
  return new LlamaCppTextEmbeddingModel(settings);
@@ -33,5 +33,5 @@ var LlamaCppError_js_1 = require("./LlamaCppError.cjs");
33
33
  Object.defineProperty(exports, "LlamaCppError", { enumerable: true, get: function () { return LlamaCppError_js_1.LlamaCppError; } });
34
34
  exports.llamacpp = __importStar(require("./LlamaCppFacade.cjs"));
35
35
  __exportStar(require("./LlamaCppTextEmbeddingModel.cjs"), exports);
36
- __exportStar(require("./LlamaCppTextGenerationModel.cjs"), exports);
36
+ __exportStar(require("./LlamaCppCompletionModel.cjs"), exports);
37
37
  __exportStar(require("./LlamaCppTokenizer.cjs"), exports);