modelfusion 0.107.0 → 0.109.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. package/CHANGELOG.md +44 -0
  2. package/README.md +11 -10
  3. package/model-provider/llamacpp/LlamaCppCompletionModel.cjs +17 -3
  4. package/model-provider/llamacpp/LlamaCppCompletionModel.d.ts +99 -12
  5. package/model-provider/llamacpp/LlamaCppCompletionModel.js +17 -3
  6. package/model-provider/openai/AbstractOpenAIChatModel.cjs +2 -10
  7. package/model-provider/openai/AbstractOpenAIChatModel.d.ts +5 -187
  8. package/model-provider/openai/AbstractOpenAIChatModel.js +2 -10
  9. package/model-provider/openai/AbstractOpenAICompletionModel.cjs +167 -0
  10. package/model-provider/openai/AbstractOpenAICompletionModel.d.ts +199 -0
  11. package/model-provider/openai/AbstractOpenAICompletionModel.js +163 -0
  12. package/model-provider/openai/OpenAIChatFunctionCallStructureGenerationModel.d.ts +0 -2
  13. package/model-provider/openai/OpenAIChatModel.d.ts +3 -6
  14. package/model-provider/openai/OpenAICompletionModel.cjs +4 -156
  15. package/model-provider/openai/OpenAICompletionModel.d.ts +4 -191
  16. package/model-provider/openai/OpenAICompletionModel.js +3 -155
  17. package/model-provider/openai/index.cjs +1 -0
  18. package/model-provider/openai/index.d.ts +1 -0
  19. package/model-provider/openai/index.js +1 -0
  20. package/model-provider/openai-compatible/OpenAICompatibleChatModel.d.ts +4 -5
  21. package/model-provider/openai-compatible/OpenAICompatibleCompletionModel.cjs +74 -0
  22. package/model-provider/openai-compatible/OpenAICompatibleCompletionModel.d.ts +27 -0
  23. package/model-provider/openai-compatible/OpenAICompatibleCompletionModel.js +70 -0
  24. package/model-provider/openai-compatible/OpenAICompatibleFacade.cjs +37 -6
  25. package/model-provider/openai-compatible/OpenAICompatibleFacade.d.ts +33 -5
  26. package/model-provider/openai-compatible/OpenAICompatibleFacade.js +35 -5
  27. package/model-provider/openai-compatible/OpenAICompatibleProviderName.cjs +2 -0
  28. package/model-provider/openai-compatible/OpenAICompatibleProviderName.d.ts +1 -0
  29. package/model-provider/openai-compatible/OpenAICompatibleProviderName.js +1 -0
  30. package/model-provider/openai-compatible/TogetherAIApiConfiguration.cjs +29 -0
  31. package/model-provider/openai-compatible/TogetherAIApiConfiguration.d.ts +18 -0
  32. package/model-provider/openai-compatible/TogetherAIApiConfiguration.js +25 -0
  33. package/model-provider/openai-compatible/index.cjs +4 -1
  34. package/model-provider/openai-compatible/index.d.ts +4 -1
  35. package/model-provider/openai-compatible/index.js +4 -1
  36. package/package.json +1 -1
@@ -1,15 +1,8 @@
1
- import { z } from "zod";
2
- import { callWithRetryAndThrottle } from "../../core/api/callWithRetryAndThrottle.js";
3
- import { createJsonResponseHandler, postJsonToApi, } from "../../core/api/postToApi.js";
4
- import { zodSchema } from "../../core/schema/ZodSchema.js";
5
- import { AbstractModel } from "../../model-function/AbstractModel.js";
6
1
  import { PromptTemplateTextStreamingModel } from "../../model-function/generate-text/PromptTemplateTextStreamingModel.js";
7
2
  import { textGenerationModelProperties, } from "../../model-function/generate-text/TextGenerationModel.js";
8
3
  import { chat, instruction, } from "../../model-function/generate-text/prompt-template/TextPromptTemplate.js";
9
4
  import { countTokens } from "../../model-function/tokenize-text/countTokens.js";
10
- import { createEventSourceResponseHandler } from "../../util/streaming/createEventSourceResponseHandler.js";
11
- import { OpenAIApiConfiguration } from "./OpenAIApiConfiguration.js";
12
- import { failedOpenAICallResponseHandler } from "./OpenAIError.js";
5
+ import { AbstractOpenAICompletionModel, } from "./AbstractOpenAICompletionModel.js";
13
6
  import { TikTokenTokenizer } from "./TikTokenTokenizer.js";
14
7
  /**
15
8
  * @see https://platform.openai.com/docs/models/
@@ -139,9 +132,9 @@ export const calculateOpenAICompletionCostInMillicents = ({ model, response, })
139
132
  * "Write a short story about a robot learning to love:\n\n"
140
133
  * );
141
134
  */
142
- export class OpenAICompletionModel extends AbstractModel {
135
+ export class OpenAICompletionModel extends AbstractOpenAICompletionModel {
143
136
  constructor(settings) {
144
- super({ settings });
137
+ super(settings);
145
138
  Object.defineProperty(this, "provider", {
146
139
  enumerable: true,
147
140
  configurable: true,
@@ -172,52 +165,6 @@ export class OpenAICompletionModel extends AbstractModel {
172
165
  async countPromptTokens(input) {
173
166
  return countTokens(this.tokenizer, input);
174
167
  }
175
- async callAPI(prompt, options) {
176
- const api = this.settings.api ?? new OpenAIApiConfiguration();
177
- const user = this.settings.isUserIdForwardingEnabled
178
- ? options.run?.userId
179
- : undefined;
180
- const abortSignal = options.run?.abortSignal;
181
- const openaiResponseFormat = options.responseFormat;
182
- // empty arrays are not allowed for stop:
183
- const stopSequences = this.settings.stopSequences != null &&
184
- Array.isArray(this.settings.stopSequences) &&
185
- this.settings.stopSequences.length === 0
186
- ? undefined
187
- : this.settings.stopSequences;
188
- return callWithRetryAndThrottle({
189
- retry: api.retry,
190
- throttle: api.throttle,
191
- call: async () => {
192
- return postJsonToApi({
193
- url: api.assembleUrl("/completions"),
194
- headers: api.headers,
195
- body: {
196
- stream: openaiResponseFormat.stream,
197
- model: this.settings.model,
198
- prompt,
199
- suffix: this.settings.suffix,
200
- max_tokens: this.settings.maxGenerationTokens,
201
- temperature: this.settings.temperature,
202
- top_p: this.settings.topP,
203
- n: this.settings.numberOfGenerations,
204
- logprobs: this.settings.logprobs,
205
- echo: this.settings.echo,
206
- stop: stopSequences,
207
- seed: this.settings.seed,
208
- presence_penalty: this.settings.presencePenalty,
209
- frequency_penalty: this.settings.frequencyPenalty,
210
- best_of: this.settings.bestOf,
211
- logit_bias: this.settings.logitBias,
212
- user,
213
- },
214
- failedResponseHandler: failedOpenAICallResponseHandler,
215
- successfulResponseHandler: openaiResponseFormat.handler,
216
- abortSignal,
217
- });
218
- },
219
- });
220
- }
221
168
  get settingsForEvent() {
222
169
  const eventSettingProperties = [
223
170
  ...textGenerationModelProperties,
@@ -234,52 +181,6 @@ export class OpenAICompletionModel extends AbstractModel {
234
181
  ];
235
182
  return Object.fromEntries(Object.entries(this.settings).filter(([key]) => eventSettingProperties.includes(key)));
236
183
  }
237
- async doGenerateTexts(prompt, options) {
238
- const response = await this.callAPI(prompt, {
239
- ...options,
240
- responseFormat: OpenAITextResponseFormat.json,
241
- });
242
- return {
243
- response,
244
- textGenerationResults: response.choices.map((choice) => {
245
- return {
246
- finishReason: this.translateFinishReason(choice.finish_reason),
247
- text: choice.text,
248
- };
249
- }),
250
- usage: {
251
- promptTokens: response.usage.prompt_tokens,
252
- completionTokens: response.usage.completion_tokens,
253
- totalTokens: response.usage.total_tokens,
254
- },
255
- };
256
- }
257
- translateFinishReason(finishReason) {
258
- switch (finishReason) {
259
- case "stop":
260
- return "stop";
261
- case "length":
262
- return "length";
263
- case "content_filter":
264
- return "content-filter";
265
- default:
266
- return "unknown";
267
- }
268
- }
269
- doStreamText(prompt, options) {
270
- return this.callAPI(prompt, {
271
- ...options,
272
- responseFormat: OpenAITextResponseFormat.deltaIterable,
273
- });
274
- }
275
- extractTextDelta(delta) {
276
- const chunk = delta;
277
- const firstChoice = chunk.choices[0];
278
- if (firstChoice.index > 0) {
279
- return undefined;
280
- }
281
- return chunk.choices[0].text;
282
- }
283
184
  /**
284
185
  * Returns this model with an instruction prompt template.
285
186
  */
@@ -307,56 +208,3 @@ export class OpenAICompletionModel extends AbstractModel {
307
208
  return new OpenAICompletionModel(Object.assign({}, this.settings, additionalSettings));
308
209
  }
309
210
  }
310
- const OpenAICompletionResponseSchema = z.object({
311
- id: z.string(),
312
- choices: z.array(z.object({
313
- finish_reason: z
314
- .enum(["stop", "length", "content_filter"])
315
- .optional()
316
- .nullable(),
317
- index: z.number(),
318
- logprobs: z.nullable(z.any()),
319
- text: z.string(),
320
- })),
321
- created: z.number(),
322
- model: z.string(),
323
- system_fingerprint: z.string().optional(),
324
- object: z.literal("text_completion"),
325
- usage: z.object({
326
- prompt_tokens: z.number(),
327
- completion_tokens: z.number(),
328
- total_tokens: z.number(),
329
- }),
330
- });
331
- const openaiCompletionStreamChunkSchema = zodSchema(z.object({
332
- choices: z.array(z.object({
333
- text: z.string(),
334
- finish_reason: z
335
- .enum(["stop", "length", "content_filter"])
336
- .optional()
337
- .nullable(),
338
- index: z.number(),
339
- })),
340
- created: z.number(),
341
- id: z.string(),
342
- model: z.string(),
343
- system_fingerprint: z.string().optional(),
344
- object: z.literal("text_completion"),
345
- }));
346
- export const OpenAITextResponseFormat = {
347
- /**
348
- * Returns the response as a JSON object.
349
- */
350
- json: {
351
- stream: false,
352
- handler: createJsonResponseHandler(OpenAICompletionResponseSchema),
353
- },
354
- /**
355
- * Returns an async iterable over the full deltas (all choices, including full current state at time of event)
356
- * of the response stream.
357
- */
358
- deltaIterable: {
359
- stream: true,
360
- handler: createEventSourceResponseHandler(openaiCompletionStreamChunkSchema),
361
- },
362
- };
@@ -28,6 +28,7 @@ var __importStar = (this && this.__importStar) || function (mod) {
28
28
  Object.defineProperty(exports, "__esModule", { value: true });
29
29
  exports.openai = exports.OpenAIChatPrompt = void 0;
30
30
  __exportStar(require("./AbstractOpenAIChatModel.cjs"), exports);
31
+ __exportStar(require("./AbstractOpenAICompletionModel.cjs"), exports);
31
32
  __exportStar(require("./AzureOpenAIApiConfiguration.cjs"), exports);
32
33
  __exportStar(require("./OpenAIApiConfiguration.cjs"), exports);
33
34
  __exportStar(require("./OpenAIChatMessage.cjs"), exports);
@@ -1,4 +1,5 @@
1
1
  export * from "./AbstractOpenAIChatModel.js";
2
+ export * from "./AbstractOpenAICompletionModel.js";
2
3
  export * from "./AzureOpenAIApiConfiguration.js";
3
4
  export * from "./OpenAIApiConfiguration.js";
4
5
  export * from "./OpenAIChatMessage.js";
@@ -1,4 +1,5 @@
1
1
  export * from "./AbstractOpenAIChatModel.js";
2
+ export * from "./AbstractOpenAICompletionModel.js";
2
3
  export * from "./AzureOpenAIApiConfiguration.js";
3
4
  export * from "./OpenAIApiConfiguration.js";
4
5
  export * from "./OpenAIChatMessage.js";
@@ -1,15 +1,14 @@
1
1
  import { StructureFromTextPromptTemplate } from "../../model-function/generate-structure/StructureFromTextPromptTemplate.js";
2
2
  import { StructureFromTextStreamingModel } from "../../model-function/generate-structure/StructureFromTextStreamingModel.js";
3
3
  import { PromptTemplateFullTextModel } from "../../model-function/generate-text/PromptTemplateFullTextModel.js";
4
- import { TextGenerationModelSettings, TextStreamingModel } from "../../model-function/generate-text/TextGenerationModel.js";
4
+ import { TextStreamingModel } from "../../model-function/generate-text/TextGenerationModel.js";
5
5
  import { TextGenerationPromptTemplate } from "../../model-function/generate-text/TextGenerationPromptTemplate.js";
6
6
  import { ToolCallGenerationModel } from "../../tool/generate-tool-call/ToolCallGenerationModel.js";
7
7
  import { ToolCallsOrTextGenerationModel } from "../../tool/generate-tool-calls-or-text/ToolCallsOrTextGenerationModel.js";
8
- import { AbstractOpenAIChatCallSettings, AbstractOpenAIChatModel, OpenAIChatPrompt } from "../openai/AbstractOpenAIChatModel.js";
9
- export type OpenAICompatibleProviderName = `openaicompatible` | `openaicompatible-${string}`;
10
- export interface OpenAICompatibleChatSettings extends TextGenerationModelSettings, Omit<AbstractOpenAIChatCallSettings, "stop" | "maxTokens"> {
8
+ import { AbstractOpenAIChatModel, AbstractOpenAIChatSettings, OpenAIChatPrompt } from "../openai/AbstractOpenAIChatModel.js";
9
+ import { OpenAICompatibleProviderName } from "./OpenAICompatibleProviderName.js";
10
+ export interface OpenAICompatibleChatSettings extends AbstractOpenAIChatSettings {
11
11
  provider?: OpenAICompatibleProviderName;
12
- isUserIdForwardingEnabled?: boolean;
13
12
  }
14
13
  /**
15
14
  * Create a text generation model that calls an API that is compatible with OpenAI's chat API.
@@ -0,0 +1,74 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.OpenAICompatibleCompletionModel = void 0;
4
+ const PromptTemplateTextStreamingModel_js_1 = require("../../model-function/generate-text/PromptTemplateTextStreamingModel.cjs");
5
+ const TextGenerationModel_js_1 = require("../../model-function/generate-text/TextGenerationModel.cjs");
6
+ const AbstractOpenAICompletionModel_js_1 = require("../openai/AbstractOpenAICompletionModel.cjs");
7
+ /**
8
+ * Create a text generation model that calls an API that is compatible with OpenAI's completion API.
9
+ *
10
+ * Please note that many providers implement the API with slight differences, which can cause
11
+ * unexpected errors and different behavior in less common scenarios.
12
+ *
13
+ * @see https://platform.openai.com/docs/api-reference/completions/create
14
+ */
15
+ class OpenAICompatibleCompletionModel extends AbstractOpenAICompletionModel_js_1.AbstractOpenAICompletionModel {
16
+ constructor(settings) {
17
+ super(settings);
18
+ Object.defineProperty(this, "contextWindowSize", {
19
+ enumerable: true,
20
+ configurable: true,
21
+ writable: true,
22
+ value: undefined
23
+ });
24
+ Object.defineProperty(this, "tokenizer", {
25
+ enumerable: true,
26
+ configurable: true,
27
+ writable: true,
28
+ value: undefined
29
+ });
30
+ Object.defineProperty(this, "countPromptTokens", {
31
+ enumerable: true,
32
+ configurable: true,
33
+ writable: true,
34
+ value: undefined
35
+ });
36
+ }
37
+ get provider() {
38
+ return this.settings.provider ?? "openaicompatible";
39
+ }
40
+ get modelName() {
41
+ return this.settings.model;
42
+ }
43
+ get settingsForEvent() {
44
+ const eventSettingProperties = [
45
+ ...TextGenerationModel_js_1.textGenerationModelProperties,
46
+ "suffix",
47
+ "temperature",
48
+ "topP",
49
+ "logprobs",
50
+ "echo",
51
+ "presencePenalty",
52
+ "frequencyPenalty",
53
+ "bestOf",
54
+ "logitBias",
55
+ "seed",
56
+ ];
57
+ return Object.fromEntries(Object.entries(this.settings).filter(([key]) => eventSettingProperties.includes(key)));
58
+ }
59
+ withPromptTemplate(promptTemplate) {
60
+ return new PromptTemplateTextStreamingModel_js_1.PromptTemplateTextStreamingModel({
61
+ model: this.withSettings({
62
+ stopSequences: [
63
+ ...(this.settings.stopSequences ?? []),
64
+ ...promptTemplate.stopSequences,
65
+ ],
66
+ }),
67
+ promptTemplate,
68
+ });
69
+ }
70
+ withSettings(additionalSettings) {
71
+ return new OpenAICompatibleCompletionModel(Object.assign({}, this.settings, additionalSettings));
72
+ }
73
+ }
74
+ exports.OpenAICompatibleCompletionModel = OpenAICompatibleCompletionModel;
@@ -0,0 +1,27 @@
1
+ import { PromptTemplateTextStreamingModel } from "../../model-function/generate-text/PromptTemplateTextStreamingModel.js";
2
+ import { TextStreamingModel } from "../../model-function/generate-text/TextGenerationModel.js";
3
+ import { TextGenerationPromptTemplate } from "../../model-function/generate-text/TextGenerationPromptTemplate.js";
4
+ import { AbstractOpenAICompletionModel, AbstractOpenAICompletionModelSettings } from "../openai/AbstractOpenAICompletionModel.js";
5
+ import { OpenAICompatibleProviderName } from "./OpenAICompatibleProviderName.js";
6
+ export interface OpenAICompatibleCompletionModelSettings extends AbstractOpenAICompletionModelSettings {
7
+ provider?: OpenAICompatibleProviderName;
8
+ }
9
+ /**
10
+ * Create a text generation model that calls an API that is compatible with OpenAI's completion API.
11
+ *
12
+ * Please note that many providers implement the API with slight differences, which can cause
13
+ * unexpected errors and different behavior in less common scenarios.
14
+ *
15
+ * @see https://platform.openai.com/docs/api-reference/completions/create
16
+ */
17
+ export declare class OpenAICompatibleCompletionModel extends AbstractOpenAICompletionModel<OpenAICompatibleCompletionModelSettings> implements TextStreamingModel<string, OpenAICompatibleCompletionModelSettings> {
18
+ constructor(settings: OpenAICompatibleCompletionModelSettings);
19
+ get provider(): OpenAICompatibleProviderName;
20
+ get modelName(): string;
21
+ readonly contextWindowSize: undefined;
22
+ readonly tokenizer: undefined;
23
+ readonly countPromptTokens: undefined;
24
+ get settingsForEvent(): Partial<OpenAICompatibleCompletionModelSettings>;
25
+ withPromptTemplate<INPUT_PROMPT>(promptTemplate: TextGenerationPromptTemplate<INPUT_PROMPT, string>): PromptTemplateTextStreamingModel<INPUT_PROMPT, string, OpenAICompatibleCompletionModelSettings, this>;
26
+ withSettings(additionalSettings: Partial<OpenAICompatibleCompletionModelSettings>): this;
27
+ }
@@ -0,0 +1,70 @@
1
+ import { PromptTemplateTextStreamingModel } from "../../model-function/generate-text/PromptTemplateTextStreamingModel.js";
2
+ import { textGenerationModelProperties, } from "../../model-function/generate-text/TextGenerationModel.js";
3
+ import { AbstractOpenAICompletionModel, } from "../openai/AbstractOpenAICompletionModel.js";
4
+ /**
5
+ * Create a text generation model that calls an API that is compatible with OpenAI's completion API.
6
+ *
7
+ * Please note that many providers implement the API with slight differences, which can cause
8
+ * unexpected errors and different behavior in less common scenarios.
9
+ *
10
+ * @see https://platform.openai.com/docs/api-reference/completions/create
11
+ */
12
+ export class OpenAICompatibleCompletionModel extends AbstractOpenAICompletionModel {
13
+ constructor(settings) {
14
+ super(settings);
15
+ Object.defineProperty(this, "contextWindowSize", {
16
+ enumerable: true,
17
+ configurable: true,
18
+ writable: true,
19
+ value: undefined
20
+ });
21
+ Object.defineProperty(this, "tokenizer", {
22
+ enumerable: true,
23
+ configurable: true,
24
+ writable: true,
25
+ value: undefined
26
+ });
27
+ Object.defineProperty(this, "countPromptTokens", {
28
+ enumerable: true,
29
+ configurable: true,
30
+ writable: true,
31
+ value: undefined
32
+ });
33
+ }
34
+ get provider() {
35
+ return this.settings.provider ?? "openaicompatible";
36
+ }
37
+ get modelName() {
38
+ return this.settings.model;
39
+ }
40
+ get settingsForEvent() {
41
+ const eventSettingProperties = [
42
+ ...textGenerationModelProperties,
43
+ "suffix",
44
+ "temperature",
45
+ "topP",
46
+ "logprobs",
47
+ "echo",
48
+ "presencePenalty",
49
+ "frequencyPenalty",
50
+ "bestOf",
51
+ "logitBias",
52
+ "seed",
53
+ ];
54
+ return Object.fromEntries(Object.entries(this.settings).filter(([key]) => eventSettingProperties.includes(key)));
55
+ }
56
+ withPromptTemplate(promptTemplate) {
57
+ return new PromptTemplateTextStreamingModel({
58
+ model: this.withSettings({
59
+ stopSequences: [
60
+ ...(this.settings.stopSequences ?? []),
61
+ ...promptTemplate.stopSequences,
62
+ ],
63
+ }),
64
+ promptTemplate,
65
+ });
66
+ }
67
+ withSettings(additionalSettings) {
68
+ return new OpenAICompatibleCompletionModel(Object.assign({}, this.settings, additionalSettings));
69
+ }
70
+ }
@@ -1,7 +1,34 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.ChatTextGenerator = void 0;
3
+ exports.ChatTextGenerator = exports.CompletionTextGenerator = void 0;
4
4
  const OpenAICompatibleChatModel_js_1 = require("./OpenAICompatibleChatModel.cjs");
5
+ const OpenAICompatibleCompletionModel_js_1 = require("./OpenAICompatibleCompletionModel.cjs");
6
+ /**
7
+ * Create a text generation model that calls an API that is compatible with OpenAI's completion API.
8
+ *
9
+ * Please note that many providers implement the API with slight differences, which can cause
10
+ * unexpected errors and different behavior in less common scenarios.
11
+ *
12
+ * @see https://platform.openai.com/docs/api-reference/completions/create
13
+ *
14
+ * @example
15
+ * ```ts
16
+ * const model = openaicompatible.CompletionTextGenerator({
17
+ * model: "provider-specific-model-name",
18
+ * temperature: 0.7,
19
+ * maxGenerationTokens: 500,
20
+ * });
21
+ *
22
+ * const text = await generateText(
23
+ * model,
24
+ * "Write a short story about a robot learning to love:"
25
+ * );
26
+ * ```
27
+ */
28
+ function CompletionTextGenerator(settings) {
29
+ return new OpenAICompatibleCompletionModel_js_1.OpenAICompatibleCompletionModel(settings);
30
+ }
31
+ exports.CompletionTextGenerator = CompletionTextGenerator;
5
32
  /**
6
33
  * Create a text generation model that calls an API that is compatible with OpenAI's chat API.
7
34
  *
@@ -11,18 +38,22 @@ const OpenAICompatibleChatModel_js_1 = require("./OpenAICompatibleChatModel.cjs"
11
38
  * @see https://platform.openai.com/docs/api-reference/chat/create
12
39
  *
13
40
  * @example
41
+ * ```ts
14
42
  * const model = openaicompatible.ChatTextGenerator({
15
43
  * model: "provider-specific-model-name",
16
44
  * temperature: 0.7,
17
45
  * maxGenerationTokens: 500,
18
46
  * });
19
47
  *
20
- * const text = await generateText([
48
+ * const text = await generateText(
21
49
  * model,
22
- * openai.ChatMessage.system(
23
- * "Write a short story about a robot learning to love:"
24
- * ),
25
- * ]);
50
+ * [
51
+ * openai.ChatMessage.user(
52
+ * "Write a short story about a robot learning to love:"
53
+ * ),
54
+ * ]
55
+ * );
56
+ * ```
26
57
  */
27
58
  function ChatTextGenerator(settings) {
28
59
  return new OpenAICompatibleChatModel_js_1.OpenAICompatibleChatModel(settings);
@@ -1,4 +1,28 @@
1
1
  import { OpenAICompatibleChatModel, OpenAICompatibleChatSettings } from "./OpenAICompatibleChatModel.js";
2
+ import { OpenAICompatibleCompletionModel } from "./OpenAICompatibleCompletionModel.js";
3
+ /**
4
+ * Create a text generation model that calls an API that is compatible with OpenAI's completion API.
5
+ *
6
+ * Please note that many providers implement the API with slight differences, which can cause
7
+ * unexpected errors and different behavior in less common scenarios.
8
+ *
9
+ * @see https://platform.openai.com/docs/api-reference/completions/create
10
+ *
11
+ * @example
12
+ * ```ts
13
+ * const model = openaicompatible.CompletionTextGenerator({
14
+ * model: "provider-specific-model-name",
15
+ * temperature: 0.7,
16
+ * maxGenerationTokens: 500,
17
+ * });
18
+ *
19
+ * const text = await generateText(
20
+ * model,
21
+ * "Write a short story about a robot learning to love:"
22
+ * );
23
+ * ```
24
+ */
25
+ export declare function CompletionTextGenerator(settings: OpenAICompatibleChatSettings): OpenAICompatibleCompletionModel;
2
26
  /**
3
27
  * Create a text generation model that calls an API that is compatible with OpenAI's chat API.
4
28
  *
@@ -8,17 +32,21 @@ import { OpenAICompatibleChatModel, OpenAICompatibleChatSettings } from "./OpenA
8
32
  * @see https://platform.openai.com/docs/api-reference/chat/create
9
33
  *
10
34
  * @example
35
+ * ```ts
11
36
  * const model = openaicompatible.ChatTextGenerator({
12
37
  * model: "provider-specific-model-name",
13
38
  * temperature: 0.7,
14
39
  * maxGenerationTokens: 500,
15
40
  * });
16
41
  *
17
- * const text = await generateText([
42
+ * const text = await generateText(
18
43
  * model,
19
- * openai.ChatMessage.system(
20
- * "Write a short story about a robot learning to love:"
21
- * ),
22
- * ]);
44
+ * [
45
+ * openai.ChatMessage.user(
46
+ * "Write a short story about a robot learning to love:"
47
+ * ),
48
+ * ]
49
+ * );
50
+ * ```
23
51
  */
24
52
  export declare function ChatTextGenerator(settings: OpenAICompatibleChatSettings): OpenAICompatibleChatModel;
@@ -1,4 +1,30 @@
1
1
  import { OpenAICompatibleChatModel, } from "./OpenAICompatibleChatModel.js";
2
+ import { OpenAICompatibleCompletionModel } from "./OpenAICompatibleCompletionModel.js";
3
+ /**
4
+ * Create a text generation model that calls an API that is compatible with OpenAI's completion API.
5
+ *
6
+ * Please note that many providers implement the API with slight differences, which can cause
7
+ * unexpected errors and different behavior in less common scenarios.
8
+ *
9
+ * @see https://platform.openai.com/docs/api-reference/completions/create
10
+ *
11
+ * @example
12
+ * ```ts
13
+ * const model = openaicompatible.CompletionTextGenerator({
14
+ * model: "provider-specific-model-name",
15
+ * temperature: 0.7,
16
+ * maxGenerationTokens: 500,
17
+ * });
18
+ *
19
+ * const text = await generateText(
20
+ * model,
21
+ * "Write a short story about a robot learning to love:"
22
+ * );
23
+ * ```
24
+ */
25
+ export function CompletionTextGenerator(settings) {
26
+ return new OpenAICompatibleCompletionModel(settings);
27
+ }
2
28
  /**
3
29
  * Create a text generation model that calls an API that is compatible with OpenAI's chat API.
4
30
  *
@@ -8,18 +34,22 @@ import { OpenAICompatibleChatModel, } from "./OpenAICompatibleChatModel.js";
8
34
  * @see https://platform.openai.com/docs/api-reference/chat/create
9
35
  *
10
36
  * @example
37
+ * ```ts
11
38
  * const model = openaicompatible.ChatTextGenerator({
12
39
  * model: "provider-specific-model-name",
13
40
  * temperature: 0.7,
14
41
  * maxGenerationTokens: 500,
15
42
  * });
16
43
  *
17
- * const text = await generateText([
44
+ * const text = await generateText(
18
45
  * model,
19
- * openai.ChatMessage.system(
20
- * "Write a short story about a robot learning to love:"
21
- * ),
22
- * ]);
46
+ * [
47
+ * openai.ChatMessage.user(
48
+ * "Write a short story about a robot learning to love:"
49
+ * ),
50
+ * ]
51
+ * );
52
+ * ```
23
53
  */
24
54
  export function ChatTextGenerator(settings) {
25
55
  return new OpenAICompatibleChatModel(settings);
@@ -0,0 +1,2 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
@@ -0,0 +1 @@
1
+ export type OpenAICompatibleProviderName = `openaicompatible` | `openaicompatible-${string}`;
@@ -0,0 +1,29 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.TogetherAIApiConfiguration = void 0;
4
+ const BaseUrlApiConfiguration_js_1 = require("../../core/api/BaseUrlApiConfiguration.cjs");
5
+ const loadApiKey_js_1 = require("../../core/api/loadApiKey.cjs");
6
+ /**
7
+ * Configuration for the Together.ai API.
8
+ *
9
+ * It uses the `TOGETHER_API_KEY` api key environment variable.
10
+ *
11
+ * @see https://docs.together.ai/docs/openai-api-compatibility
12
+ */
13
+ class TogetherAIApiConfiguration extends BaseUrlApiConfiguration_js_1.BaseUrlApiConfiguration {
14
+ constructor({ baseUrl = "https://api.together.xyz/v1", apiKey, retry, throttle, } = {}) {
15
+ super({
16
+ baseUrl,
17
+ headers: {
18
+ Authorization: `Bearer ${(0, loadApiKey_js_1.loadApiKey)({
19
+ apiKey,
20
+ environmentVariableName: "TOGETHER_API_KEY",
21
+ description: "Together AI",
22
+ })}`,
23
+ },
24
+ retry,
25
+ throttle,
26
+ });
27
+ }
28
+ }
29
+ exports.TogetherAIApiConfiguration = TogetherAIApiConfiguration;
@@ -0,0 +1,18 @@
1
+ import { BaseUrlApiConfiguration } from "../../core/api/BaseUrlApiConfiguration.js";
2
+ import { RetryFunction } from "../../core/api/RetryFunction.js";
3
+ import { ThrottleFunction } from "../../core/api/ThrottleFunction.js";
4
+ /**
5
+ * Configuration for the Together.ai API.
6
+ *
7
+ * It uses the `TOGETHER_API_KEY` api key environment variable.
8
+ *
9
+ * @see https://docs.together.ai/docs/openai-api-compatibility
10
+ */
11
+ export declare class TogetherAIApiConfiguration extends BaseUrlApiConfiguration {
12
+ constructor({ baseUrl, apiKey, retry, throttle, }?: {
13
+ baseUrl?: string;
14
+ apiKey?: string;
15
+ retry?: RetryFunction;
16
+ throttle?: ThrottleFunction;
17
+ });
18
+ }