modelfusion 0.107.0 → 0.108.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. package/CHANGELOG.md +44 -0
  2. package/README.md +11 -10
  3. package/model-provider/llamacpp/LlamaCppCompletionModel.cjs +17 -3
  4. package/model-provider/llamacpp/LlamaCppCompletionModel.d.ts +99 -12
  5. package/model-provider/llamacpp/LlamaCppCompletionModel.js +17 -3
  6. package/model-provider/openai/AbstractOpenAIChatModel.cjs +2 -10
  7. package/model-provider/openai/AbstractOpenAIChatModel.d.ts +5 -187
  8. package/model-provider/openai/AbstractOpenAIChatModel.js +2 -10
  9. package/model-provider/openai/AbstractOpenAICompletionModel.cjs +167 -0
  10. package/model-provider/openai/AbstractOpenAICompletionModel.d.ts +199 -0
  11. package/model-provider/openai/AbstractOpenAICompletionModel.js +163 -0
  12. package/model-provider/openai/OpenAIChatFunctionCallStructureGenerationModel.d.ts +0 -2
  13. package/model-provider/openai/OpenAIChatModel.d.ts +3 -6
  14. package/model-provider/openai/OpenAICompletionModel.cjs +4 -156
  15. package/model-provider/openai/OpenAICompletionModel.d.ts +4 -191
  16. package/model-provider/openai/OpenAICompletionModel.js +3 -155
  17. package/model-provider/openai/index.cjs +1 -0
  18. package/model-provider/openai/index.d.ts +1 -0
  19. package/model-provider/openai/index.js +1 -0
  20. package/model-provider/openai-compatible/OpenAICompatibleChatModel.d.ts +4 -5
  21. package/model-provider/openai-compatible/OpenAICompatibleCompletionModel.cjs +74 -0
  22. package/model-provider/openai-compatible/OpenAICompatibleCompletionModel.d.ts +27 -0
  23. package/model-provider/openai-compatible/OpenAICompatibleCompletionModel.js +70 -0
  24. package/model-provider/openai-compatible/OpenAICompatibleFacade.cjs +37 -6
  25. package/model-provider/openai-compatible/OpenAICompatibleFacade.d.ts +33 -5
  26. package/model-provider/openai-compatible/OpenAICompatibleFacade.js +35 -5
  27. package/model-provider/openai-compatible/OpenAICompatibleProviderName.cjs +2 -0
  28. package/model-provider/openai-compatible/OpenAICompatibleProviderName.d.ts +1 -0
  29. package/model-provider/openai-compatible/OpenAICompatibleProviderName.js +1 -0
  30. package/model-provider/openai-compatible/TogetherAIApiConfiguration.cjs +29 -0
  31. package/model-provider/openai-compatible/TogetherAIApiConfiguration.d.ts +18 -0
  32. package/model-provider/openai-compatible/TogetherAIApiConfiguration.js +25 -0
  33. package/model-provider/openai-compatible/index.cjs +4 -1
  34. package/model-provider/openai-compatible/index.d.ts +4 -1
  35. package/model-provider/openai-compatible/index.js +4 -1
  36. package/package.json +16 -16
@@ -0,0 +1,163 @@
1
+ import { z } from "zod";
2
+ import { callWithRetryAndThrottle } from "../../core/api/callWithRetryAndThrottle.js";
3
+ import { createJsonResponseHandler, postJsonToApi, } from "../../core/api/postToApi.js";
4
+ import { zodSchema } from "../../core/schema/ZodSchema.js";
5
+ import { AbstractModel } from "../../model-function/AbstractModel.js";
6
+ import { createEventSourceResponseHandler } from "../../util/streaming/createEventSourceResponseHandler.js";
7
+ import { OpenAIApiConfiguration } from "./OpenAIApiConfiguration.js";
8
+ import { failedOpenAICallResponseHandler } from "./OpenAIError.js";
9
+ /**
10
+ * Abstract completion model that calls an API that is compatible with the OpenAI completions API.
11
+ *
12
+ * @see https://platform.openai.com/docs/api-reference/completions/create
13
+ */
14
+ export class AbstractOpenAICompletionModel extends AbstractModel {
15
+ constructor(settings) {
16
+ super({ settings });
17
+ }
18
+ async callAPI(prompt, options) {
19
+ const api = this.settings.api ?? new OpenAIApiConfiguration();
20
+ const user = this.settings.isUserIdForwardingEnabled
21
+ ? options.run?.userId
22
+ : undefined;
23
+ const abortSignal = options.run?.abortSignal;
24
+ const openaiResponseFormat = options.responseFormat;
25
+ // empty arrays are not allowed for stop:
26
+ const stopSequences = this.settings.stopSequences != null &&
27
+ Array.isArray(this.settings.stopSequences) &&
28
+ this.settings.stopSequences.length === 0
29
+ ? undefined
30
+ : this.settings.stopSequences;
31
+ return callWithRetryAndThrottle({
32
+ retry: api.retry,
33
+ throttle: api.throttle,
34
+ call: async () => {
35
+ return postJsonToApi({
36
+ url: api.assembleUrl("/completions"),
37
+ headers: api.headers,
38
+ body: {
39
+ stream: openaiResponseFormat.stream,
40
+ model: this.settings.model,
41
+ prompt,
42
+ suffix: this.settings.suffix,
43
+ max_tokens: this.settings.maxGenerationTokens,
44
+ temperature: this.settings.temperature,
45
+ top_p: this.settings.topP,
46
+ n: this.settings.numberOfGenerations,
47
+ logprobs: this.settings.logprobs,
48
+ echo: this.settings.echo,
49
+ stop: stopSequences,
50
+ seed: this.settings.seed,
51
+ presence_penalty: this.settings.presencePenalty,
52
+ frequency_penalty: this.settings.frequencyPenalty,
53
+ best_of: this.settings.bestOf,
54
+ logit_bias: this.settings.logitBias,
55
+ user,
56
+ },
57
+ failedResponseHandler: failedOpenAICallResponseHandler,
58
+ successfulResponseHandler: openaiResponseFormat.handler,
59
+ abortSignal,
60
+ });
61
+ },
62
+ });
63
+ }
64
+ async doGenerateTexts(prompt, options) {
65
+ const response = await this.callAPI(prompt, {
66
+ ...options,
67
+ responseFormat: OpenAITextResponseFormat.json,
68
+ });
69
+ return {
70
+ response,
71
+ textGenerationResults: response.choices.map((choice) => {
72
+ return {
73
+ finishReason: this.translateFinishReason(choice.finish_reason),
74
+ text: choice.text,
75
+ };
76
+ }),
77
+ usage: {
78
+ promptTokens: response.usage.prompt_tokens,
79
+ completionTokens: response.usage.completion_tokens,
80
+ totalTokens: response.usage.total_tokens,
81
+ },
82
+ };
83
+ }
84
+ translateFinishReason(finishReason) {
85
+ switch (finishReason) {
86
+ case "stop":
87
+ return "stop";
88
+ case "length":
89
+ return "length";
90
+ case "content_filter":
91
+ return "content-filter";
92
+ default:
93
+ return "unknown";
94
+ }
95
+ }
96
+ doStreamText(prompt, options) {
97
+ return this.callAPI(prompt, {
98
+ ...options,
99
+ responseFormat: OpenAITextResponseFormat.deltaIterable,
100
+ });
101
+ }
102
+ extractTextDelta(delta) {
103
+ const chunk = delta;
104
+ const firstChoice = chunk.choices[0];
105
+ if (firstChoice.index > 0) {
106
+ return undefined;
107
+ }
108
+ return chunk.choices[0].text;
109
+ }
110
+ }
111
+ const OpenAICompletionResponseSchema = z.object({
112
+ id: z.string(),
113
+ choices: z.array(z.object({
114
+ finish_reason: z
115
+ .enum(["stop", "length", "content_filter"])
116
+ .optional()
117
+ .nullable(),
118
+ index: z.number(),
119
+ logprobs: z.nullable(z.any()),
120
+ text: z.string(),
121
+ })),
122
+ created: z.number(),
123
+ model: z.string(),
124
+ system_fingerprint: z.string().optional(),
125
+ object: z.literal("text_completion"),
126
+ usage: z.object({
127
+ prompt_tokens: z.number(),
128
+ completion_tokens: z.number(),
129
+ total_tokens: z.number(),
130
+ }),
131
+ });
132
+ const openaiCompletionStreamChunkSchema = zodSchema(z.object({
133
+ choices: z.array(z.object({
134
+ text: z.string(),
135
+ finish_reason: z
136
+ .enum(["stop", "length", "content_filter"])
137
+ .optional()
138
+ .nullable(),
139
+ index: z.number(),
140
+ })),
141
+ created: z.number(),
142
+ id: z.string(),
143
+ model: z.string(),
144
+ system_fingerprint: z.string().optional(),
145
+ object: z.literal("text_completion"),
146
+ }));
147
+ export const OpenAITextResponseFormat = {
148
+ /**
149
+ * Returns the response as a JSON object.
150
+ */
151
+ json: {
152
+ stream: false,
153
+ handler: createJsonResponseHandler(OpenAICompletionResponseSchema),
154
+ },
155
+ /**
156
+ * Returns an async iterable over the full deltas (all choices, including full current state at time of event)
157
+ * of the response stream.
158
+ */
159
+ deltaIterable: {
160
+ stream: true,
161
+ handler: createEventSourceResponseHandler(openaiCompletionStreamChunkSchema),
162
+ },
163
+ };
@@ -111,8 +111,6 @@ OpenAIChatSettings> {
111
111
  finish_reason?: "length" | "stop" | "function_call" | "tool_calls" | "content_filter" | null | undefined;
112
112
  }[];
113
113
  system_fingerprint?: string | null | undefined;
114
- } | {
115
- object: string;
116
114
  }>>>;
117
115
  extractStructureTextDelta(delta: unknown): string | undefined;
118
116
  parseAccumulatedStructureText(accumulatedText: string): unknown;
@@ -1,11 +1,11 @@
1
1
  import { StructureFromTextPromptTemplate } from "../../model-function/generate-structure/StructureFromTextPromptTemplate.js";
2
2
  import { StructureFromTextStreamingModel } from "../../model-function/generate-structure/StructureFromTextStreamingModel.js";
3
3
  import { PromptTemplateFullTextModel } from "../../model-function/generate-text/PromptTemplateFullTextModel.js";
4
- import { TextGenerationModelSettings, TextStreamingModel } from "../../model-function/generate-text/TextGenerationModel.js";
4
+ import { TextStreamingModel } from "../../model-function/generate-text/TextGenerationModel.js";
5
5
  import { TextGenerationPromptTemplate } from "../../model-function/generate-text/TextGenerationPromptTemplate.js";
6
6
  import { ToolCallGenerationModel } from "../../tool/generate-tool-call/ToolCallGenerationModel.js";
7
7
  import { ToolCallsOrTextGenerationModel } from "../../tool/generate-tool-calls-or-text/ToolCallsOrTextGenerationModel.js";
8
- import { AbstractOpenAIChatCallSettings, AbstractOpenAIChatModel, OpenAIChatPrompt, OpenAIChatResponse } from "./AbstractOpenAIChatModel.js";
8
+ import { AbstractOpenAIChatModel, AbstractOpenAIChatSettings, OpenAIChatPrompt, OpenAIChatResponse } from "./AbstractOpenAIChatModel.js";
9
9
  import { OpenAIChatFunctionCallStructureGenerationModel } from "./OpenAIChatFunctionCallStructureGenerationModel.js";
10
10
  import { TikTokenTokenizer } from "./TikTokenTokenizer.js";
11
11
  export declare const OPENAI_CHAT_MODELS: {
@@ -102,12 +102,9 @@ export declare const calculateOpenAIChatCostInMillicents: ({ model, response, }:
102
102
  model: OpenAIChatModelType;
103
103
  response: OpenAIChatResponse;
104
104
  }) => number | null;
105
- export interface OpenAIChatCallSettings extends AbstractOpenAIChatCallSettings {
105
+ export interface OpenAIChatSettings extends AbstractOpenAIChatSettings {
106
106
  model: OpenAIChatModelType;
107
107
  }
108
- export interface OpenAIChatSettings extends TextGenerationModelSettings, Omit<OpenAIChatCallSettings, "stop" | "maxTokens"> {
109
- isUserIdForwardingEnabled?: boolean;
110
- }
111
108
  /**
112
109
  * Create a text generation model that calls the OpenAI chat API.
113
110
  *
@@ -1,18 +1,11 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.OpenAITextResponseFormat = exports.OpenAICompletionModel = exports.calculateOpenAICompletionCostInMillicents = exports.isOpenAICompletionModel = exports.getOpenAICompletionModelInformation = exports.OPENAI_TEXT_GENERATION_MODELS = void 0;
4
- const zod_1 = require("zod");
5
- const callWithRetryAndThrottle_js_1 = require("../../core/api/callWithRetryAndThrottle.cjs");
6
- const postToApi_js_1 = require("../../core/api/postToApi.cjs");
7
- const ZodSchema_js_1 = require("../../core/schema/ZodSchema.cjs");
8
- const AbstractModel_js_1 = require("../../model-function/AbstractModel.cjs");
3
+ exports.OpenAICompletionModel = exports.calculateOpenAICompletionCostInMillicents = exports.isOpenAICompletionModel = exports.getOpenAICompletionModelInformation = exports.OPENAI_TEXT_GENERATION_MODELS = void 0;
9
4
  const PromptTemplateTextStreamingModel_js_1 = require("../../model-function/generate-text/PromptTemplateTextStreamingModel.cjs");
10
5
  const TextGenerationModel_js_1 = require("../../model-function/generate-text/TextGenerationModel.cjs");
11
6
  const TextPromptTemplate_js_1 = require("../../model-function/generate-text/prompt-template/TextPromptTemplate.cjs");
12
7
  const countTokens_js_1 = require("../../model-function/tokenize-text/countTokens.cjs");
13
- const createEventSourceResponseHandler_js_1 = require("../../util/streaming/createEventSourceResponseHandler.cjs");
14
- const OpenAIApiConfiguration_js_1 = require("./OpenAIApiConfiguration.cjs");
15
- const OpenAIError_js_1 = require("./OpenAIError.cjs");
8
+ const AbstractOpenAICompletionModel_js_1 = require("./AbstractOpenAICompletionModel.cjs");
16
9
  const TikTokenTokenizer_js_1 = require("./TikTokenTokenizer.cjs");
17
10
  /**
18
11
  * @see https://platform.openai.com/docs/models/
@@ -145,9 +138,9 @@ exports.calculateOpenAICompletionCostInMillicents = calculateOpenAICompletionCos
145
138
  * "Write a short story about a robot learning to love:\n\n"
146
139
  * );
147
140
  */
148
- class OpenAICompletionModel extends AbstractModel_js_1.AbstractModel {
141
+ class OpenAICompletionModel extends AbstractOpenAICompletionModel_js_1.AbstractOpenAICompletionModel {
149
142
  constructor(settings) {
150
- super({ settings });
143
+ super(settings);
151
144
  Object.defineProperty(this, "provider", {
152
145
  enumerable: true,
153
146
  configurable: true,
@@ -178,52 +171,6 @@ class OpenAICompletionModel extends AbstractModel_js_1.AbstractModel {
178
171
  async countPromptTokens(input) {
179
172
  return (0, countTokens_js_1.countTokens)(this.tokenizer, input);
180
173
  }
181
- async callAPI(prompt, options) {
182
- const api = this.settings.api ?? new OpenAIApiConfiguration_js_1.OpenAIApiConfiguration();
183
- const user = this.settings.isUserIdForwardingEnabled
184
- ? options.run?.userId
185
- : undefined;
186
- const abortSignal = options.run?.abortSignal;
187
- const openaiResponseFormat = options.responseFormat;
188
- // empty arrays are not allowed for stop:
189
- const stopSequences = this.settings.stopSequences != null &&
190
- Array.isArray(this.settings.stopSequences) &&
191
- this.settings.stopSequences.length === 0
192
- ? undefined
193
- : this.settings.stopSequences;
194
- return (0, callWithRetryAndThrottle_js_1.callWithRetryAndThrottle)({
195
- retry: api.retry,
196
- throttle: api.throttle,
197
- call: async () => {
198
- return (0, postToApi_js_1.postJsonToApi)({
199
- url: api.assembleUrl("/completions"),
200
- headers: api.headers,
201
- body: {
202
- stream: openaiResponseFormat.stream,
203
- model: this.settings.model,
204
- prompt,
205
- suffix: this.settings.suffix,
206
- max_tokens: this.settings.maxGenerationTokens,
207
- temperature: this.settings.temperature,
208
- top_p: this.settings.topP,
209
- n: this.settings.numberOfGenerations,
210
- logprobs: this.settings.logprobs,
211
- echo: this.settings.echo,
212
- stop: stopSequences,
213
- seed: this.settings.seed,
214
- presence_penalty: this.settings.presencePenalty,
215
- frequency_penalty: this.settings.frequencyPenalty,
216
- best_of: this.settings.bestOf,
217
- logit_bias: this.settings.logitBias,
218
- user,
219
- },
220
- failedResponseHandler: OpenAIError_js_1.failedOpenAICallResponseHandler,
221
- successfulResponseHandler: openaiResponseFormat.handler,
222
- abortSignal,
223
- });
224
- },
225
- });
226
- }
227
174
  get settingsForEvent() {
228
175
  const eventSettingProperties = [
229
176
  ...TextGenerationModel_js_1.textGenerationModelProperties,
@@ -240,52 +187,6 @@ class OpenAICompletionModel extends AbstractModel_js_1.AbstractModel {
240
187
  ];
241
188
  return Object.fromEntries(Object.entries(this.settings).filter(([key]) => eventSettingProperties.includes(key)));
242
189
  }
243
- async doGenerateTexts(prompt, options) {
244
- const response = await this.callAPI(prompt, {
245
- ...options,
246
- responseFormat: exports.OpenAITextResponseFormat.json,
247
- });
248
- return {
249
- response,
250
- textGenerationResults: response.choices.map((choice) => {
251
- return {
252
- finishReason: this.translateFinishReason(choice.finish_reason),
253
- text: choice.text,
254
- };
255
- }),
256
- usage: {
257
- promptTokens: response.usage.prompt_tokens,
258
- completionTokens: response.usage.completion_tokens,
259
- totalTokens: response.usage.total_tokens,
260
- },
261
- };
262
- }
263
- translateFinishReason(finishReason) {
264
- switch (finishReason) {
265
- case "stop":
266
- return "stop";
267
- case "length":
268
- return "length";
269
- case "content_filter":
270
- return "content-filter";
271
- default:
272
- return "unknown";
273
- }
274
- }
275
- doStreamText(prompt, options) {
276
- return this.callAPI(prompt, {
277
- ...options,
278
- responseFormat: exports.OpenAITextResponseFormat.deltaIterable,
279
- });
280
- }
281
- extractTextDelta(delta) {
282
- const chunk = delta;
283
- const firstChoice = chunk.choices[0];
284
- if (firstChoice.index > 0) {
285
- return undefined;
286
- }
287
- return chunk.choices[0].text;
288
- }
289
190
  /**
290
191
  * Returns this model with an instruction prompt template.
291
192
  */
@@ -314,56 +215,3 @@ class OpenAICompletionModel extends AbstractModel_js_1.AbstractModel {
314
215
  }
315
216
  }
316
217
  exports.OpenAICompletionModel = OpenAICompletionModel;
317
- const OpenAICompletionResponseSchema = zod_1.z.object({
318
- id: zod_1.z.string(),
319
- choices: zod_1.z.array(zod_1.z.object({
320
- finish_reason: zod_1.z
321
- .enum(["stop", "length", "content_filter"])
322
- .optional()
323
- .nullable(),
324
- index: zod_1.z.number(),
325
- logprobs: zod_1.z.nullable(zod_1.z.any()),
326
- text: zod_1.z.string(),
327
- })),
328
- created: zod_1.z.number(),
329
- model: zod_1.z.string(),
330
- system_fingerprint: zod_1.z.string().optional(),
331
- object: zod_1.z.literal("text_completion"),
332
- usage: zod_1.z.object({
333
- prompt_tokens: zod_1.z.number(),
334
- completion_tokens: zod_1.z.number(),
335
- total_tokens: zod_1.z.number(),
336
- }),
337
- });
338
- const openaiCompletionStreamChunkSchema = (0, ZodSchema_js_1.zodSchema)(zod_1.z.object({
339
- choices: zod_1.z.array(zod_1.z.object({
340
- text: zod_1.z.string(),
341
- finish_reason: zod_1.z
342
- .enum(["stop", "length", "content_filter"])
343
- .optional()
344
- .nullable(),
345
- index: zod_1.z.number(),
346
- })),
347
- created: zod_1.z.number(),
348
- id: zod_1.z.string(),
349
- model: zod_1.z.string(),
350
- system_fingerprint: zod_1.z.string().optional(),
351
- object: zod_1.z.literal("text_completion"),
352
- }));
353
- exports.OpenAITextResponseFormat = {
354
- /**
355
- * Returns the response as a JSON object.
356
- */
357
- json: {
358
- stream: false,
359
- handler: (0, postToApi_js_1.createJsonResponseHandler)(OpenAICompletionResponseSchema),
360
- },
361
- /**
362
- * Returns an async iterable over the full deltas (all choices, including full current state at time of event)
363
- * of the response stream.
364
- */
365
- deltaIterable: {
366
- stream: true,
367
- handler: (0, createEventSourceResponseHandler_js_1.createEventSourceResponseHandler)(openaiCompletionStreamChunkSchema),
368
- },
369
- };
@@ -1,12 +1,7 @@
1
- import { z } from "zod";
2
- import { FunctionOptions } from "../../core/FunctionOptions.js";
3
- import { ApiConfiguration } from "../../core/api/ApiConfiguration.js";
4
- import { ResponseHandler } from "../../core/api/postToApi.js";
5
- import { AbstractModel } from "../../model-function/AbstractModel.js";
6
1
  import { PromptTemplateTextStreamingModel } from "../../model-function/generate-text/PromptTemplateTextStreamingModel.js";
7
- import { TextGenerationModelSettings, TextStreamingModel } from "../../model-function/generate-text/TextGenerationModel.js";
2
+ import { TextStreamingModel } from "../../model-function/generate-text/TextGenerationModel.js";
8
3
  import { TextGenerationPromptTemplate } from "../../model-function/generate-text/TextGenerationPromptTemplate.js";
9
- import { TextGenerationFinishReason } from "../../model-function/generate-text/TextGenerationResult.js";
4
+ import { AbstractOpenAICompletionModel, AbstractOpenAICompletionModelSettings, OpenAICompletionResponse } from "./AbstractOpenAICompletionModel.js";
10
5
  import { TikTokenTokenizer } from "./TikTokenTokenizer.js";
11
6
  /**
12
7
  * @see https://platform.openai.com/docs/models/
@@ -97,22 +92,8 @@ export declare const calculateOpenAICompletionCostInMillicents: ({ model, respon
97
92
  model: OpenAICompletionModelType;
98
93
  response: OpenAICompletionResponse;
99
94
  }) => number;
100
- export interface OpenAICompletionCallSettings {
101
- api?: ApiConfiguration;
95
+ export interface OpenAICompletionModelSettings extends AbstractOpenAICompletionModelSettings {
102
96
  model: OpenAICompletionModelType;
103
- suffix?: string;
104
- temperature?: number;
105
- topP?: number;
106
- logprobs?: number;
107
- echo?: boolean;
108
- presencePenalty?: number;
109
- frequencyPenalty?: number;
110
- bestOf?: number;
111
- logitBias?: Record<number, number>;
112
- seed?: number | null;
113
- }
114
- export interface OpenAICompletionModelSettings extends TextGenerationModelSettings, Omit<OpenAICompletionCallSettings, "stop" | "maxTokens"> {
115
- isUserIdForwardingEnabled?: boolean;
116
97
  }
117
98
  /**
118
99
  * Create a text generation model that calls the OpenAI text completion API.
@@ -132,60 +113,14 @@ export interface OpenAICompletionModelSettings extends TextGenerationModelSettin
132
113
  * "Write a short story about a robot learning to love:\n\n"
133
114
  * );
134
115
  */
135
- export declare class OpenAICompletionModel extends AbstractModel<OpenAICompletionModelSettings> implements TextStreamingModel<string, OpenAICompletionModelSettings> {
116
+ export declare class OpenAICompletionModel extends AbstractOpenAICompletionModel<OpenAICompletionModelSettings> implements TextStreamingModel<string, OpenAICompletionModelSettings> {
136
117
  constructor(settings: OpenAICompletionModelSettings);
137
118
  readonly provider: "openai";
138
119
  get modelName(): OpenAICompletionModelType;
139
120
  readonly contextWindowSize: number;
140
121
  readonly tokenizer: TikTokenTokenizer;
141
122
  countPromptTokens(input: string): Promise<number>;
142
- callAPI<RESULT>(prompt: string, options: {
143
- responseFormat: OpenAITextResponseFormatType<RESULT>;
144
- } & FunctionOptions): Promise<RESULT>;
145
123
  get settingsForEvent(): Partial<OpenAICompletionModelSettings>;
146
- doGenerateTexts(prompt: string, options?: FunctionOptions): Promise<{
147
- response: {
148
- object: "text_completion";
149
- usage: {
150
- prompt_tokens: number;
151
- completion_tokens: number;
152
- total_tokens: number;
153
- };
154
- model: string;
155
- id: string;
156
- created: number;
157
- choices: {
158
- text: string;
159
- index: number;
160
- finish_reason?: "length" | "stop" | "content_filter" | null | undefined;
161
- logprobs?: any;
162
- }[];
163
- system_fingerprint?: string | undefined;
164
- };
165
- textGenerationResults: {
166
- finishReason: TextGenerationFinishReason;
167
- text: string;
168
- }[];
169
- usage: {
170
- promptTokens: number;
171
- completionTokens: number;
172
- totalTokens: number;
173
- };
174
- }>;
175
- private translateFinishReason;
176
- doStreamText(prompt: string, options?: FunctionOptions): Promise<AsyncIterable<import("../../index.js").Delta<{
177
- object: "text_completion";
178
- model: string;
179
- id: string;
180
- created: number;
181
- choices: {
182
- text: string;
183
- index: number;
184
- finish_reason?: "length" | "stop" | "content_filter" | null | undefined;
185
- }[];
186
- system_fingerprint?: string | undefined;
187
- }>>>;
188
- extractTextDelta(delta: unknown): string | undefined;
189
124
  /**
190
125
  * Returns this model with an instruction prompt template.
191
126
  */
@@ -200,126 +135,4 @@ export declare class OpenAICompletionModel extends AbstractModel<OpenAICompletio
200
135
  withPromptTemplate<INPUT_PROMPT>(promptTemplate: TextGenerationPromptTemplate<INPUT_PROMPT, string>): PromptTemplateTextStreamingModel<INPUT_PROMPT, string, OpenAICompletionModelSettings, this>;
201
136
  withSettings(additionalSettings: Partial<OpenAICompletionModelSettings>): this;
202
137
  }
203
- declare const OpenAICompletionResponseSchema: z.ZodObject<{
204
- id: z.ZodString;
205
- choices: z.ZodArray<z.ZodObject<{
206
- finish_reason: z.ZodNullable<z.ZodOptional<z.ZodEnum<["stop", "length", "content_filter"]>>>;
207
- index: z.ZodNumber;
208
- logprobs: z.ZodNullable<z.ZodAny>;
209
- text: z.ZodString;
210
- }, "strip", z.ZodTypeAny, {
211
- text: string;
212
- index: number;
213
- finish_reason?: "length" | "stop" | "content_filter" | null | undefined;
214
- logprobs?: any;
215
- }, {
216
- text: string;
217
- index: number;
218
- finish_reason?: "length" | "stop" | "content_filter" | null | undefined;
219
- logprobs?: any;
220
- }>, "many">;
221
- created: z.ZodNumber;
222
- model: z.ZodString;
223
- system_fingerprint: z.ZodOptional<z.ZodString>;
224
- object: z.ZodLiteral<"text_completion">;
225
- usage: z.ZodObject<{
226
- prompt_tokens: z.ZodNumber;
227
- completion_tokens: z.ZodNumber;
228
- total_tokens: z.ZodNumber;
229
- }, "strip", z.ZodTypeAny, {
230
- prompt_tokens: number;
231
- completion_tokens: number;
232
- total_tokens: number;
233
- }, {
234
- prompt_tokens: number;
235
- completion_tokens: number;
236
- total_tokens: number;
237
- }>;
238
- }, "strip", z.ZodTypeAny, {
239
- object: "text_completion";
240
- usage: {
241
- prompt_tokens: number;
242
- completion_tokens: number;
243
- total_tokens: number;
244
- };
245
- model: string;
246
- id: string;
247
- created: number;
248
- choices: {
249
- text: string;
250
- index: number;
251
- finish_reason?: "length" | "stop" | "content_filter" | null | undefined;
252
- logprobs?: any;
253
- }[];
254
- system_fingerprint?: string | undefined;
255
- }, {
256
- object: "text_completion";
257
- usage: {
258
- prompt_tokens: number;
259
- completion_tokens: number;
260
- total_tokens: number;
261
- };
262
- model: string;
263
- id: string;
264
- created: number;
265
- choices: {
266
- text: string;
267
- index: number;
268
- finish_reason?: "length" | "stop" | "content_filter" | null | undefined;
269
- logprobs?: any;
270
- }[];
271
- system_fingerprint?: string | undefined;
272
- }>;
273
- export type OpenAICompletionResponse = z.infer<typeof OpenAICompletionResponseSchema>;
274
- export type OpenAITextResponseFormatType<T> = {
275
- stream: boolean;
276
- handler: ResponseHandler<T>;
277
- };
278
- export declare const OpenAITextResponseFormat: {
279
- /**
280
- * Returns the response as a JSON object.
281
- */
282
- json: {
283
- stream: boolean;
284
- handler: ResponseHandler<{
285
- object: "text_completion";
286
- usage: {
287
- prompt_tokens: number;
288
- completion_tokens: number;
289
- total_tokens: number;
290
- };
291
- model: string;
292
- id: string;
293
- created: number;
294
- choices: {
295
- text: string;
296
- index: number;
297
- finish_reason?: "length" | "stop" | "content_filter" | null | undefined;
298
- logprobs?: any;
299
- }[];
300
- system_fingerprint?: string | undefined;
301
- }>;
302
- };
303
- /**
304
- * Returns an async iterable over the full deltas (all choices, including full current state at time of event)
305
- * of the response stream.
306
- */
307
- deltaIterable: {
308
- stream: boolean;
309
- handler: ({ response }: {
310
- response: Response;
311
- }) => Promise<AsyncIterable<import("../../index.js").Delta<{
312
- object: "text_completion";
313
- model: string;
314
- id: string;
315
- created: number;
316
- choices: {
317
- text: string;
318
- index: number;
319
- finish_reason?: "length" | "stop" | "content_filter" | null | undefined;
320
- }[];
321
- system_fingerprint?: string | undefined;
322
- }>>>;
323
- };
324
- };
325
138
  export {};