modelfusion 0.106.0 → 0.108.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (82) hide show
  1. package/CHANGELOG.md +59 -0
  2. package/README.md +19 -59
  3. package/model-function/generate-text/prompt-template/ChatMLPromptTemplate.test.cjs +11 -0
  4. package/model-function/generate-text/prompt-template/ChatMLPromptTemplate.test.js +11 -0
  5. package/model-function/generate-text/prompt-template/Llama2PromptTemplate.cjs +9 -7
  6. package/model-function/generate-text/prompt-template/Llama2PromptTemplate.js +9 -7
  7. package/model-function/generate-text/prompt-template/Llama2PromptTemplate.test.cjs +11 -0
  8. package/model-function/generate-text/prompt-template/Llama2PromptTemplate.test.js +11 -0
  9. package/model-function/generate-text/prompt-template/MistralInstructPromptTemplate.cjs +150 -0
  10. package/model-function/generate-text/prompt-template/MistralInstructPromptTemplate.d.ts +62 -0
  11. package/model-function/generate-text/prompt-template/MistralInstructPromptTemplate.js +143 -0
  12. package/model-function/generate-text/prompt-template/MistralInstructPromptTemplate.test.cjs +60 -0
  13. package/model-function/generate-text/prompt-template/MistralInstructPromptTemplate.test.js +58 -0
  14. package/model-function/generate-text/prompt-template/NeuralChatPromptTemplate.test.cjs +11 -0
  15. package/model-function/generate-text/prompt-template/NeuralChatPromptTemplate.test.js +11 -0
  16. package/model-function/generate-text/prompt-template/TextPromptTemplate.test.cjs +11 -0
  17. package/model-function/generate-text/prompt-template/TextPromptTemplate.test.js +11 -0
  18. package/model-function/generate-text/prompt-template/VicunaPromptTemplate.test.cjs +11 -0
  19. package/model-function/generate-text/prompt-template/VicunaPromptTemplate.test.js +11 -0
  20. package/model-function/generate-text/prompt-template/index.cjs +2 -1
  21. package/model-function/generate-text/prompt-template/index.d.ts +1 -0
  22. package/model-function/generate-text/prompt-template/index.js +1 -0
  23. package/model-provider/llamacpp/LlamaCppBakLLaVA1PromptTemplate.d.ts +3 -3
  24. package/model-provider/llamacpp/{LlamaCppTextGenerationModel.cjs → LlamaCppCompletionModel.cjs} +25 -11
  25. package/model-provider/llamacpp/{LlamaCppTextGenerationModel.d.ts → LlamaCppCompletionModel.d.ts} +125 -38
  26. package/model-provider/llamacpp/{LlamaCppTextGenerationModel.js → LlamaCppCompletionModel.js} +23 -9
  27. package/model-provider/llamacpp/{LlamaCppTextGenerationModel.test.cjs → LlamaCppCompletionModel.test.cjs} +3 -3
  28. package/model-provider/llamacpp/LlamaCppCompletionModel.test.d.ts +1 -0
  29. package/model-provider/llamacpp/{LlamaCppTextGenerationModel.test.js → LlamaCppCompletionModel.test.js} +3 -3
  30. package/model-provider/llamacpp/LlamaCppFacade.cjs +2 -2
  31. package/model-provider/llamacpp/LlamaCppFacade.d.ts +2 -2
  32. package/model-provider/llamacpp/LlamaCppFacade.js +2 -2
  33. package/model-provider/llamacpp/index.cjs +1 -1
  34. package/model-provider/llamacpp/index.d.ts +1 -1
  35. package/model-provider/llamacpp/index.js +1 -1
  36. package/model-provider/mistral/MistralChatModel.cjs +4 -4
  37. package/model-provider/mistral/MistralChatModel.d.ts +6 -6
  38. package/model-provider/mistral/MistralChatModel.js +1 -1
  39. package/model-provider/mistral/index.cjs +3 -3
  40. package/model-provider/mistral/index.d.ts +2 -2
  41. package/model-provider/mistral/index.js +2 -2
  42. package/model-provider/openai/AbstractOpenAIChatModel.cjs +2 -10
  43. package/model-provider/openai/AbstractOpenAIChatModel.d.ts +13 -195
  44. package/model-provider/openai/AbstractOpenAIChatModel.js +2 -10
  45. package/model-provider/openai/AbstractOpenAICompletionModel.cjs +167 -0
  46. package/model-provider/openai/AbstractOpenAICompletionModel.d.ts +199 -0
  47. package/model-provider/openai/AbstractOpenAICompletionModel.js +163 -0
  48. package/model-provider/openai/OpenAIChatFunctionCallStructureGenerationModel.d.ts +1 -3
  49. package/model-provider/openai/OpenAIChatModel.d.ts +3 -6
  50. package/model-provider/openai/OpenAICompletionModel.cjs +4 -156
  51. package/model-provider/openai/OpenAICompletionModel.d.ts +4 -191
  52. package/model-provider/openai/OpenAICompletionModel.js +3 -155
  53. package/model-provider/openai/index.cjs +1 -0
  54. package/model-provider/openai/index.d.ts +1 -0
  55. package/model-provider/openai/index.js +1 -0
  56. package/model-provider/openai-compatible/OpenAICompatibleChatModel.d.ts +4 -5
  57. package/model-provider/openai-compatible/OpenAICompatibleCompletionModel.cjs +74 -0
  58. package/model-provider/openai-compatible/OpenAICompatibleCompletionModel.d.ts +27 -0
  59. package/model-provider/openai-compatible/OpenAICompatibleCompletionModel.js +70 -0
  60. package/model-provider/openai-compatible/OpenAICompatibleFacade.cjs +37 -6
  61. package/model-provider/openai-compatible/OpenAICompatibleFacade.d.ts +33 -5
  62. package/model-provider/openai-compatible/OpenAICompatibleFacade.js +35 -5
  63. package/model-provider/openai-compatible/OpenAICompatibleProviderName.cjs +2 -0
  64. package/model-provider/openai-compatible/OpenAICompatibleProviderName.d.ts +1 -0
  65. package/model-provider/openai-compatible/OpenAICompatibleProviderName.js +1 -0
  66. package/model-provider/openai-compatible/TogetherAIApiConfiguration.cjs +29 -0
  67. package/model-provider/openai-compatible/TogetherAIApiConfiguration.d.ts +18 -0
  68. package/model-provider/openai-compatible/TogetherAIApiConfiguration.js +25 -0
  69. package/model-provider/openai-compatible/index.cjs +4 -1
  70. package/model-provider/openai-compatible/index.d.ts +4 -1
  71. package/model-provider/openai-compatible/index.js +4 -1
  72. package/package.json +16 -16
  73. package/tool/generate-tool-call/index.cjs +1 -0
  74. package/tool/generate-tool-call/index.d.ts +1 -0
  75. package/tool/generate-tool-call/index.js +1 -0
  76. package/tool/generate-tool-call/jsonToolCallPrompt.cjs +30 -0
  77. package/tool/generate-tool-call/jsonToolCallPrompt.d.ts +5 -0
  78. package/tool/generate-tool-call/jsonToolCallPrompt.js +27 -0
  79. /package/{model-provider/llamacpp/LlamaCppTextGenerationModel.test.d.ts → model-function/generate-text/prompt-template/MistralInstructPromptTemplate.test.d.ts} +0 -0
  80. /package/model-provider/mistral/{MistralPromptTemplate.cjs → MistralChatPromptTemplate.cjs} +0 -0
  81. /package/model-provider/mistral/{MistralPromptTemplate.d.ts → MistralChatPromptTemplate.d.ts} +0 -0
  82. /package/model-provider/mistral/{MistralPromptTemplate.js → MistralChatPromptTemplate.js} +0 -0
@@ -1,12 +1,7 @@
1
- import { z } from "zod";
2
- import { FunctionOptions } from "../../core/FunctionOptions.js";
3
- import { ApiConfiguration } from "../../core/api/ApiConfiguration.js";
4
- import { ResponseHandler } from "../../core/api/postToApi.js";
5
- import { AbstractModel } from "../../model-function/AbstractModel.js";
6
1
  import { PromptTemplateTextStreamingModel } from "../../model-function/generate-text/PromptTemplateTextStreamingModel.js";
7
- import { TextGenerationModelSettings, TextStreamingModel } from "../../model-function/generate-text/TextGenerationModel.js";
2
+ import { TextStreamingModel } from "../../model-function/generate-text/TextGenerationModel.js";
8
3
  import { TextGenerationPromptTemplate } from "../../model-function/generate-text/TextGenerationPromptTemplate.js";
9
- import { TextGenerationFinishReason } from "../../model-function/generate-text/TextGenerationResult.js";
4
+ import { AbstractOpenAICompletionModel, AbstractOpenAICompletionModelSettings, OpenAICompletionResponse } from "./AbstractOpenAICompletionModel.js";
10
5
  import { TikTokenTokenizer } from "./TikTokenTokenizer.js";
11
6
  /**
12
7
  * @see https://platform.openai.com/docs/models/
@@ -97,22 +92,8 @@ export declare const calculateOpenAICompletionCostInMillicents: ({ model, respon
97
92
  model: OpenAICompletionModelType;
98
93
  response: OpenAICompletionResponse;
99
94
  }) => number;
100
- export interface OpenAICompletionCallSettings {
101
- api?: ApiConfiguration;
95
+ export interface OpenAICompletionModelSettings extends AbstractOpenAICompletionModelSettings {
102
96
  model: OpenAICompletionModelType;
103
- suffix?: string;
104
- temperature?: number;
105
- topP?: number;
106
- logprobs?: number;
107
- echo?: boolean;
108
- presencePenalty?: number;
109
- frequencyPenalty?: number;
110
- bestOf?: number;
111
- logitBias?: Record<number, number>;
112
- seed?: number | null;
113
- }
114
- export interface OpenAICompletionModelSettings extends TextGenerationModelSettings, Omit<OpenAICompletionCallSettings, "stop" | "maxTokens"> {
115
- isUserIdForwardingEnabled?: boolean;
116
97
  }
117
98
  /**
118
99
  * Create a text generation model that calls the OpenAI text completion API.
@@ -132,60 +113,14 @@ export interface OpenAICompletionModelSettings extends TextGenerationModelSettin
132
113
  * "Write a short story about a robot learning to love:\n\n"
133
114
  * );
134
115
  */
135
- export declare class OpenAICompletionModel extends AbstractModel<OpenAICompletionModelSettings> implements TextStreamingModel<string, OpenAICompletionModelSettings> {
116
+ export declare class OpenAICompletionModel extends AbstractOpenAICompletionModel<OpenAICompletionModelSettings> implements TextStreamingModel<string, OpenAICompletionModelSettings> {
136
117
  constructor(settings: OpenAICompletionModelSettings);
137
118
  readonly provider: "openai";
138
119
  get modelName(): OpenAICompletionModelType;
139
120
  readonly contextWindowSize: number;
140
121
  readonly tokenizer: TikTokenTokenizer;
141
122
  countPromptTokens(input: string): Promise<number>;
142
- callAPI<RESULT>(prompt: string, options: {
143
- responseFormat: OpenAITextResponseFormatType<RESULT>;
144
- } & FunctionOptions): Promise<RESULT>;
145
123
  get settingsForEvent(): Partial<OpenAICompletionModelSettings>;
146
- doGenerateTexts(prompt: string, options?: FunctionOptions): Promise<{
147
- response: {
148
- object: "text_completion";
149
- usage: {
150
- prompt_tokens: number;
151
- total_tokens: number;
152
- completion_tokens: number;
153
- };
154
- model: string;
155
- id: string;
156
- created: number;
157
- choices: {
158
- text: string;
159
- index: number;
160
- finish_reason?: "length" | "stop" | "content_filter" | null | undefined;
161
- logprobs?: any;
162
- }[];
163
- system_fingerprint?: string | undefined;
164
- };
165
- textGenerationResults: {
166
- finishReason: TextGenerationFinishReason;
167
- text: string;
168
- }[];
169
- usage: {
170
- promptTokens: number;
171
- completionTokens: number;
172
- totalTokens: number;
173
- };
174
- }>;
175
- private translateFinishReason;
176
- doStreamText(prompt: string, options?: FunctionOptions): Promise<AsyncIterable<import("../../index.js").Delta<{
177
- object: "text_completion";
178
- model: string;
179
- id: string;
180
- created: number;
181
- choices: {
182
- text: string;
183
- index: number;
184
- finish_reason?: "length" | "stop" | "content_filter" | null | undefined;
185
- }[];
186
- system_fingerprint?: string | undefined;
187
- }>>>;
188
- extractTextDelta(delta: unknown): string | undefined;
189
124
  /**
190
125
  * Returns this model with an instruction prompt template.
191
126
  */
@@ -200,126 +135,4 @@ export declare class OpenAICompletionModel extends AbstractModel<OpenAICompletio
200
135
  withPromptTemplate<INPUT_PROMPT>(promptTemplate: TextGenerationPromptTemplate<INPUT_PROMPT, string>): PromptTemplateTextStreamingModel<INPUT_PROMPT, string, OpenAICompletionModelSettings, this>;
201
136
  withSettings(additionalSettings: Partial<OpenAICompletionModelSettings>): this;
202
137
  }
203
- declare const OpenAICompletionResponseSchema: z.ZodObject<{
204
- id: z.ZodString;
205
- choices: z.ZodArray<z.ZodObject<{
206
- finish_reason: z.ZodNullable<z.ZodOptional<z.ZodEnum<["stop", "length", "content_filter"]>>>;
207
- index: z.ZodNumber;
208
- logprobs: z.ZodNullable<z.ZodAny>;
209
- text: z.ZodString;
210
- }, "strip", z.ZodTypeAny, {
211
- text: string;
212
- index: number;
213
- finish_reason?: "length" | "stop" | "content_filter" | null | undefined;
214
- logprobs?: any;
215
- }, {
216
- text: string;
217
- index: number;
218
- finish_reason?: "length" | "stop" | "content_filter" | null | undefined;
219
- logprobs?: any;
220
- }>, "many">;
221
- created: z.ZodNumber;
222
- model: z.ZodString;
223
- system_fingerprint: z.ZodOptional<z.ZodString>;
224
- object: z.ZodLiteral<"text_completion">;
225
- usage: z.ZodObject<{
226
- prompt_tokens: z.ZodNumber;
227
- completion_tokens: z.ZodNumber;
228
- total_tokens: z.ZodNumber;
229
- }, "strip", z.ZodTypeAny, {
230
- prompt_tokens: number;
231
- total_tokens: number;
232
- completion_tokens: number;
233
- }, {
234
- prompt_tokens: number;
235
- total_tokens: number;
236
- completion_tokens: number;
237
- }>;
238
- }, "strip", z.ZodTypeAny, {
239
- object: "text_completion";
240
- usage: {
241
- prompt_tokens: number;
242
- total_tokens: number;
243
- completion_tokens: number;
244
- };
245
- model: string;
246
- id: string;
247
- created: number;
248
- choices: {
249
- text: string;
250
- index: number;
251
- finish_reason?: "length" | "stop" | "content_filter" | null | undefined;
252
- logprobs?: any;
253
- }[];
254
- system_fingerprint?: string | undefined;
255
- }, {
256
- object: "text_completion";
257
- usage: {
258
- prompt_tokens: number;
259
- total_tokens: number;
260
- completion_tokens: number;
261
- };
262
- model: string;
263
- id: string;
264
- created: number;
265
- choices: {
266
- text: string;
267
- index: number;
268
- finish_reason?: "length" | "stop" | "content_filter" | null | undefined;
269
- logprobs?: any;
270
- }[];
271
- system_fingerprint?: string | undefined;
272
- }>;
273
- export type OpenAICompletionResponse = z.infer<typeof OpenAICompletionResponseSchema>;
274
- export type OpenAITextResponseFormatType<T> = {
275
- stream: boolean;
276
- handler: ResponseHandler<T>;
277
- };
278
- export declare const OpenAITextResponseFormat: {
279
- /**
280
- * Returns the response as a JSON object.
281
- */
282
- json: {
283
- stream: boolean;
284
- handler: ResponseHandler<{
285
- object: "text_completion";
286
- usage: {
287
- prompt_tokens: number;
288
- total_tokens: number;
289
- completion_tokens: number;
290
- };
291
- model: string;
292
- id: string;
293
- created: number;
294
- choices: {
295
- text: string;
296
- index: number;
297
- finish_reason?: "length" | "stop" | "content_filter" | null | undefined;
298
- logprobs?: any;
299
- }[];
300
- system_fingerprint?: string | undefined;
301
- }>;
302
- };
303
- /**
304
- * Returns an async iterable over the full deltas (all choices, including full current state at time of event)
305
- * of the response stream.
306
- */
307
- deltaIterable: {
308
- stream: boolean;
309
- handler: ({ response }: {
310
- response: Response;
311
- }) => Promise<AsyncIterable<import("../../index.js").Delta<{
312
- object: "text_completion";
313
- model: string;
314
- id: string;
315
- created: number;
316
- choices: {
317
- text: string;
318
- index: number;
319
- finish_reason?: "length" | "stop" | "content_filter" | null | undefined;
320
- }[];
321
- system_fingerprint?: string | undefined;
322
- }>>>;
323
- };
324
- };
325
138
  export {};
@@ -1,15 +1,8 @@
1
- import { z } from "zod";
2
- import { callWithRetryAndThrottle } from "../../core/api/callWithRetryAndThrottle.js";
3
- import { createJsonResponseHandler, postJsonToApi, } from "../../core/api/postToApi.js";
4
- import { zodSchema } from "../../core/schema/ZodSchema.js";
5
- import { AbstractModel } from "../../model-function/AbstractModel.js";
6
1
  import { PromptTemplateTextStreamingModel } from "../../model-function/generate-text/PromptTemplateTextStreamingModel.js";
7
2
  import { textGenerationModelProperties, } from "../../model-function/generate-text/TextGenerationModel.js";
8
3
  import { chat, instruction, } from "../../model-function/generate-text/prompt-template/TextPromptTemplate.js";
9
4
  import { countTokens } from "../../model-function/tokenize-text/countTokens.js";
10
- import { createEventSourceResponseHandler } from "../../util/streaming/createEventSourceResponseHandler.js";
11
- import { OpenAIApiConfiguration } from "./OpenAIApiConfiguration.js";
12
- import { failedOpenAICallResponseHandler } from "./OpenAIError.js";
5
+ import { AbstractOpenAICompletionModel, } from "./AbstractOpenAICompletionModel.js";
13
6
  import { TikTokenTokenizer } from "./TikTokenTokenizer.js";
14
7
  /**
15
8
  * @see https://platform.openai.com/docs/models/
@@ -139,9 +132,9 @@ export const calculateOpenAICompletionCostInMillicents = ({ model, response, })
139
132
  * "Write a short story about a robot learning to love:\n\n"
140
133
  * );
141
134
  */
142
- export class OpenAICompletionModel extends AbstractModel {
135
+ export class OpenAICompletionModel extends AbstractOpenAICompletionModel {
143
136
  constructor(settings) {
144
- super({ settings });
137
+ super(settings);
145
138
  Object.defineProperty(this, "provider", {
146
139
  enumerable: true,
147
140
  configurable: true,
@@ -172,52 +165,6 @@ export class OpenAICompletionModel extends AbstractModel {
172
165
  async countPromptTokens(input) {
173
166
  return countTokens(this.tokenizer, input);
174
167
  }
175
- async callAPI(prompt, options) {
176
- const api = this.settings.api ?? new OpenAIApiConfiguration();
177
- const user = this.settings.isUserIdForwardingEnabled
178
- ? options.run?.userId
179
- : undefined;
180
- const abortSignal = options.run?.abortSignal;
181
- const openaiResponseFormat = options.responseFormat;
182
- // empty arrays are not allowed for stop:
183
- const stopSequences = this.settings.stopSequences != null &&
184
- Array.isArray(this.settings.stopSequences) &&
185
- this.settings.stopSequences.length === 0
186
- ? undefined
187
- : this.settings.stopSequences;
188
- return callWithRetryAndThrottle({
189
- retry: api.retry,
190
- throttle: api.throttle,
191
- call: async () => {
192
- return postJsonToApi({
193
- url: api.assembleUrl("/completions"),
194
- headers: api.headers,
195
- body: {
196
- stream: openaiResponseFormat.stream,
197
- model: this.settings.model,
198
- prompt,
199
- suffix: this.settings.suffix,
200
- max_tokens: this.settings.maxGenerationTokens,
201
- temperature: this.settings.temperature,
202
- top_p: this.settings.topP,
203
- n: this.settings.numberOfGenerations,
204
- logprobs: this.settings.logprobs,
205
- echo: this.settings.echo,
206
- stop: stopSequences,
207
- seed: this.settings.seed,
208
- presence_penalty: this.settings.presencePenalty,
209
- frequency_penalty: this.settings.frequencyPenalty,
210
- best_of: this.settings.bestOf,
211
- logit_bias: this.settings.logitBias,
212
- user,
213
- },
214
- failedResponseHandler: failedOpenAICallResponseHandler,
215
- successfulResponseHandler: openaiResponseFormat.handler,
216
- abortSignal,
217
- });
218
- },
219
- });
220
- }
221
168
  get settingsForEvent() {
222
169
  const eventSettingProperties = [
223
170
  ...textGenerationModelProperties,
@@ -234,52 +181,6 @@ export class OpenAICompletionModel extends AbstractModel {
234
181
  ];
235
182
  return Object.fromEntries(Object.entries(this.settings).filter(([key]) => eventSettingProperties.includes(key)));
236
183
  }
237
- async doGenerateTexts(prompt, options) {
238
- const response = await this.callAPI(prompt, {
239
- ...options,
240
- responseFormat: OpenAITextResponseFormat.json,
241
- });
242
- return {
243
- response,
244
- textGenerationResults: response.choices.map((choice) => {
245
- return {
246
- finishReason: this.translateFinishReason(choice.finish_reason),
247
- text: choice.text,
248
- };
249
- }),
250
- usage: {
251
- promptTokens: response.usage.prompt_tokens,
252
- completionTokens: response.usage.completion_tokens,
253
- totalTokens: response.usage.total_tokens,
254
- },
255
- };
256
- }
257
- translateFinishReason(finishReason) {
258
- switch (finishReason) {
259
- case "stop":
260
- return "stop";
261
- case "length":
262
- return "length";
263
- case "content_filter":
264
- return "content-filter";
265
- default:
266
- return "unknown";
267
- }
268
- }
269
- doStreamText(prompt, options) {
270
- return this.callAPI(prompt, {
271
- ...options,
272
- responseFormat: OpenAITextResponseFormat.deltaIterable,
273
- });
274
- }
275
- extractTextDelta(delta) {
276
- const chunk = delta;
277
- const firstChoice = chunk.choices[0];
278
- if (firstChoice.index > 0) {
279
- return undefined;
280
- }
281
- return chunk.choices[0].text;
282
- }
283
184
  /**
284
185
  * Returns this model with an instruction prompt template.
285
186
  */
@@ -307,56 +208,3 @@ export class OpenAICompletionModel extends AbstractModel {
307
208
  return new OpenAICompletionModel(Object.assign({}, this.settings, additionalSettings));
308
209
  }
309
210
  }
310
- const OpenAICompletionResponseSchema = z.object({
311
- id: z.string(),
312
- choices: z.array(z.object({
313
- finish_reason: z
314
- .enum(["stop", "length", "content_filter"])
315
- .optional()
316
- .nullable(),
317
- index: z.number(),
318
- logprobs: z.nullable(z.any()),
319
- text: z.string(),
320
- })),
321
- created: z.number(),
322
- model: z.string(),
323
- system_fingerprint: z.string().optional(),
324
- object: z.literal("text_completion"),
325
- usage: z.object({
326
- prompt_tokens: z.number(),
327
- completion_tokens: z.number(),
328
- total_tokens: z.number(),
329
- }),
330
- });
331
- const openaiCompletionStreamChunkSchema = zodSchema(z.object({
332
- choices: z.array(z.object({
333
- text: z.string(),
334
- finish_reason: z
335
- .enum(["stop", "length", "content_filter"])
336
- .optional()
337
- .nullable(),
338
- index: z.number(),
339
- })),
340
- created: z.number(),
341
- id: z.string(),
342
- model: z.string(),
343
- system_fingerprint: z.string().optional(),
344
- object: z.literal("text_completion"),
345
- }));
346
- export const OpenAITextResponseFormat = {
347
- /**
348
- * Returns the response as a JSON object.
349
- */
350
- json: {
351
- stream: false,
352
- handler: createJsonResponseHandler(OpenAICompletionResponseSchema),
353
- },
354
- /**
355
- * Returns an async iterable over the full deltas (all choices, including full current state at time of event)
356
- * of the response stream.
357
- */
358
- deltaIterable: {
359
- stream: true,
360
- handler: createEventSourceResponseHandler(openaiCompletionStreamChunkSchema),
361
- },
362
- };
@@ -28,6 +28,7 @@ var __importStar = (this && this.__importStar) || function (mod) {
28
28
  Object.defineProperty(exports, "__esModule", { value: true });
29
29
  exports.openai = exports.OpenAIChatPrompt = void 0;
30
30
  __exportStar(require("./AbstractOpenAIChatModel.cjs"), exports);
31
+ __exportStar(require("./AbstractOpenAICompletionModel.cjs"), exports);
31
32
  __exportStar(require("./AzureOpenAIApiConfiguration.cjs"), exports);
32
33
  __exportStar(require("./OpenAIApiConfiguration.cjs"), exports);
33
34
  __exportStar(require("./OpenAIChatMessage.cjs"), exports);
@@ -1,4 +1,5 @@
1
1
  export * from "./AbstractOpenAIChatModel.js";
2
+ export * from "./AbstractOpenAICompletionModel.js";
2
3
  export * from "./AzureOpenAIApiConfiguration.js";
3
4
  export * from "./OpenAIApiConfiguration.js";
4
5
  export * from "./OpenAIChatMessage.js";
@@ -1,4 +1,5 @@
1
1
  export * from "./AbstractOpenAIChatModel.js";
2
+ export * from "./AbstractOpenAICompletionModel.js";
2
3
  export * from "./AzureOpenAIApiConfiguration.js";
3
4
  export * from "./OpenAIApiConfiguration.js";
4
5
  export * from "./OpenAIChatMessage.js";
@@ -1,15 +1,14 @@
1
1
  import { StructureFromTextPromptTemplate } from "../../model-function/generate-structure/StructureFromTextPromptTemplate.js";
2
2
  import { StructureFromTextStreamingModel } from "../../model-function/generate-structure/StructureFromTextStreamingModel.js";
3
3
  import { PromptTemplateFullTextModel } from "../../model-function/generate-text/PromptTemplateFullTextModel.js";
4
- import { TextGenerationModelSettings, TextStreamingModel } from "../../model-function/generate-text/TextGenerationModel.js";
4
+ import { TextStreamingModel } from "../../model-function/generate-text/TextGenerationModel.js";
5
5
  import { TextGenerationPromptTemplate } from "../../model-function/generate-text/TextGenerationPromptTemplate.js";
6
6
  import { ToolCallGenerationModel } from "../../tool/generate-tool-call/ToolCallGenerationModel.js";
7
7
  import { ToolCallsOrTextGenerationModel } from "../../tool/generate-tool-calls-or-text/ToolCallsOrTextGenerationModel.js";
8
- import { AbstractOpenAIChatCallSettings, AbstractOpenAIChatModel, OpenAIChatPrompt } from "../openai/AbstractOpenAIChatModel.js";
9
- export type OpenAICompatibleProviderName = `openaicompatible` | `openaicompatible-${string}`;
10
- export interface OpenAICompatibleChatSettings extends TextGenerationModelSettings, Omit<AbstractOpenAIChatCallSettings, "stop" | "maxTokens"> {
8
+ import { AbstractOpenAIChatModel, AbstractOpenAIChatSettings, OpenAIChatPrompt } from "../openai/AbstractOpenAIChatModel.js";
9
+ import { OpenAICompatibleProviderName } from "./OpenAICompatibleProviderName.js";
10
+ export interface OpenAICompatibleChatSettings extends AbstractOpenAIChatSettings {
11
11
  provider?: OpenAICompatibleProviderName;
12
- isUserIdForwardingEnabled?: boolean;
13
12
  }
14
13
  /**
15
14
  * Create a text generation model that calls an API that is compatible with OpenAI's chat API.
@@ -0,0 +1,74 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.OpenAICompatibleCompletionModel = void 0;
4
+ const PromptTemplateTextStreamingModel_js_1 = require("../../model-function/generate-text/PromptTemplateTextStreamingModel.cjs");
5
+ const TextGenerationModel_js_1 = require("../../model-function/generate-text/TextGenerationModel.cjs");
6
+ const AbstractOpenAICompletionModel_js_1 = require("../openai/AbstractOpenAICompletionModel.cjs");
7
+ /**
8
+ * Create a text generation model that calls an API that is compatible with OpenAI's completion API.
9
+ *
10
+ * Please note that many providers implement the API with slight differences, which can cause
11
+ * unexpected errors and different behavior in less common scenarios.
12
+ *
13
+ * @see https://platform.openai.com/docs/api-reference/completions/create
14
+ */
15
+ class OpenAICompatibleCompletionModel extends AbstractOpenAICompletionModel_js_1.AbstractOpenAICompletionModel {
16
+ constructor(settings) {
17
+ super(settings);
18
+ Object.defineProperty(this, "contextWindowSize", {
19
+ enumerable: true,
20
+ configurable: true,
21
+ writable: true,
22
+ value: undefined
23
+ });
24
+ Object.defineProperty(this, "tokenizer", {
25
+ enumerable: true,
26
+ configurable: true,
27
+ writable: true,
28
+ value: undefined
29
+ });
30
+ Object.defineProperty(this, "countPromptTokens", {
31
+ enumerable: true,
32
+ configurable: true,
33
+ writable: true,
34
+ value: undefined
35
+ });
36
+ }
37
+ get provider() {
38
+ return this.settings.provider ?? "openaicompatible";
39
+ }
40
+ get modelName() {
41
+ return this.settings.model;
42
+ }
43
+ get settingsForEvent() {
44
+ const eventSettingProperties = [
45
+ ...TextGenerationModel_js_1.textGenerationModelProperties,
46
+ "suffix",
47
+ "temperature",
48
+ "topP",
49
+ "logprobs",
50
+ "echo",
51
+ "presencePenalty",
52
+ "frequencyPenalty",
53
+ "bestOf",
54
+ "logitBias",
55
+ "seed",
56
+ ];
57
+ return Object.fromEntries(Object.entries(this.settings).filter(([key]) => eventSettingProperties.includes(key)));
58
+ }
59
+ withPromptTemplate(promptTemplate) {
60
+ return new PromptTemplateTextStreamingModel_js_1.PromptTemplateTextStreamingModel({
61
+ model: this.withSettings({
62
+ stopSequences: [
63
+ ...(this.settings.stopSequences ?? []),
64
+ ...promptTemplate.stopSequences,
65
+ ],
66
+ }),
67
+ promptTemplate,
68
+ });
69
+ }
70
+ withSettings(additionalSettings) {
71
+ return new OpenAICompatibleCompletionModel(Object.assign({}, this.settings, additionalSettings));
72
+ }
73
+ }
74
+ exports.OpenAICompatibleCompletionModel = OpenAICompatibleCompletionModel;
@@ -0,0 +1,27 @@
1
+ import { PromptTemplateTextStreamingModel } from "../../model-function/generate-text/PromptTemplateTextStreamingModel.js";
2
+ import { TextStreamingModel } from "../../model-function/generate-text/TextGenerationModel.js";
3
+ import { TextGenerationPromptTemplate } from "../../model-function/generate-text/TextGenerationPromptTemplate.js";
4
+ import { AbstractOpenAICompletionModel, AbstractOpenAICompletionModelSettings } from "../openai/AbstractOpenAICompletionModel.js";
5
+ import { OpenAICompatibleProviderName } from "./OpenAICompatibleProviderName.js";
6
+ export interface OpenAICompatibleCompletionModelSettings extends AbstractOpenAICompletionModelSettings {
7
+ provider?: OpenAICompatibleProviderName;
8
+ }
9
+ /**
10
+ * Create a text generation model that calls an API that is compatible with OpenAI's completion API.
11
+ *
12
+ * Please note that many providers implement the API with slight differences, which can cause
13
+ * unexpected errors and different behavior in less common scenarios.
14
+ *
15
+ * @see https://platform.openai.com/docs/api-reference/completions/create
16
+ */
17
+ export declare class OpenAICompatibleCompletionModel extends AbstractOpenAICompletionModel<OpenAICompatibleCompletionModelSettings> implements TextStreamingModel<string, OpenAICompatibleCompletionModelSettings> {
18
+ constructor(settings: OpenAICompatibleCompletionModelSettings);
19
+ get provider(): OpenAICompatibleProviderName;
20
+ get modelName(): string;
21
+ readonly contextWindowSize: undefined;
22
+ readonly tokenizer: undefined;
23
+ readonly countPromptTokens: undefined;
24
+ get settingsForEvent(): Partial<OpenAICompatibleCompletionModelSettings>;
25
+ withPromptTemplate<INPUT_PROMPT>(promptTemplate: TextGenerationPromptTemplate<INPUT_PROMPT, string>): PromptTemplateTextStreamingModel<INPUT_PROMPT, string, OpenAICompatibleCompletionModelSettings, this>;
26
+ withSettings(additionalSettings: Partial<OpenAICompatibleCompletionModelSettings>): this;
27
+ }
@@ -0,0 +1,70 @@
1
+ import { PromptTemplateTextStreamingModel } from "../../model-function/generate-text/PromptTemplateTextStreamingModel.js";
2
+ import { textGenerationModelProperties, } from "../../model-function/generate-text/TextGenerationModel.js";
3
+ import { AbstractOpenAICompletionModel, } from "../openai/AbstractOpenAICompletionModel.js";
4
+ /**
5
+ * Create a text generation model that calls an API that is compatible with OpenAI's completion API.
6
+ *
7
+ * Please note that many providers implement the API with slight differences, which can cause
8
+ * unexpected errors and different behavior in less common scenarios.
9
+ *
10
+ * @see https://platform.openai.com/docs/api-reference/completions/create
11
+ */
12
+ export class OpenAICompatibleCompletionModel extends AbstractOpenAICompletionModel {
13
+ constructor(settings) {
14
+ super(settings);
15
+ Object.defineProperty(this, "contextWindowSize", {
16
+ enumerable: true,
17
+ configurable: true,
18
+ writable: true,
19
+ value: undefined
20
+ });
21
+ Object.defineProperty(this, "tokenizer", {
22
+ enumerable: true,
23
+ configurable: true,
24
+ writable: true,
25
+ value: undefined
26
+ });
27
+ Object.defineProperty(this, "countPromptTokens", {
28
+ enumerable: true,
29
+ configurable: true,
30
+ writable: true,
31
+ value: undefined
32
+ });
33
+ }
34
+ get provider() {
35
+ return this.settings.provider ?? "openaicompatible";
36
+ }
37
+ get modelName() {
38
+ return this.settings.model;
39
+ }
40
+ get settingsForEvent() {
41
+ const eventSettingProperties = [
42
+ ...textGenerationModelProperties,
43
+ "suffix",
44
+ "temperature",
45
+ "topP",
46
+ "logprobs",
47
+ "echo",
48
+ "presencePenalty",
49
+ "frequencyPenalty",
50
+ "bestOf",
51
+ "logitBias",
52
+ "seed",
53
+ ];
54
+ return Object.fromEntries(Object.entries(this.settings).filter(([key]) => eventSettingProperties.includes(key)));
55
+ }
56
+ withPromptTemplate(promptTemplate) {
57
+ return new PromptTemplateTextStreamingModel({
58
+ model: this.withSettings({
59
+ stopSequences: [
60
+ ...(this.settings.stopSequences ?? []),
61
+ ...promptTemplate.stopSequences,
62
+ ],
63
+ }),
64
+ promptTemplate,
65
+ });
66
+ }
67
+ withSettings(additionalSettings) {
68
+ return new OpenAICompatibleCompletionModel(Object.assign({}, this.settings, additionalSettings));
69
+ }
70
+ }