modelfusion 0.22.0 → 0.24.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (111) hide show
  1. package/README.md +3 -0
  2. package/core/FunctionOptions.d.ts +14 -0
  3. package/core/GlobalFunctionLogging.cjs +12 -0
  4. package/core/GlobalFunctionLogging.d.ts +3 -0
  5. package/core/GlobalFunctionLogging.js +7 -0
  6. package/core/getFunctionCallLogger.cjs +74 -0
  7. package/core/getFunctionCallLogger.d.ts +3 -0
  8. package/core/getFunctionCallLogger.js +70 -0
  9. package/core/index.cjs +1 -1
  10. package/core/index.d.ts +1 -1
  11. package/core/index.js +1 -1
  12. package/model-function/AbstractModel.d.ts +1 -0
  13. package/model-function/Model.d.ts +6 -1
  14. package/model-function/ModelCallEvent.d.ts +21 -2
  15. package/model-function/embed-text/embedText.d.ts +2 -2
  16. package/model-function/executeCall.cjs +24 -17
  17. package/model-function/executeCall.d.ts +15 -13
  18. package/model-function/executeCall.js +22 -15
  19. package/model-function/generate-image/generateImage.d.ts +1 -1
  20. package/model-function/generate-json/JsonGenerationEvent.d.ts +16 -0
  21. package/model-function/generate-json/JsonGenerationModel.d.ts +13 -0
  22. package/model-function/generate-json/JsonOrTextGenerationModel.d.ts +23 -0
  23. package/model-function/generate-json/JsonTextGenerationModel.cjs +3 -0
  24. package/model-function/generate-json/JsonTextGenerationModel.d.ts +6 -5
  25. package/model-function/generate-json/JsonTextGenerationModel.js +3 -0
  26. package/model-function/generate-json/generateJson.cjs +1 -0
  27. package/model-function/generate-json/generateJson.d.ts +2 -2
  28. package/model-function/generate-json/generateJson.js +1 -0
  29. package/model-function/generate-json/generateJsonOrText.cjs +1 -0
  30. package/model-function/generate-json/generateJsonOrText.d.ts +2 -2
  31. package/model-function/generate-json/generateJsonOrText.js +1 -0
  32. package/model-function/generate-text/TextGenerationEvent.d.ts +5 -2
  33. package/model-function/generate-text/TextGenerationModel.d.ts +5 -0
  34. package/model-function/generate-text/generateText.cjs +1 -0
  35. package/model-function/generate-text/generateText.d.ts +1 -1
  36. package/model-function/generate-text/generateText.js +1 -0
  37. package/model-function/generate-text/streamText.cjs +9 -6
  38. package/model-function/generate-text/streamText.d.ts +5 -5
  39. package/model-function/generate-text/streamText.js +9 -6
  40. package/model-function/index.cjs +3 -2
  41. package/model-function/index.d.ts +3 -2
  42. package/model-function/index.js +3 -2
  43. package/model-function/synthesize-speech/SpeechSynthesisEvent.d.ts +2 -2
  44. package/model-function/synthesize-speech/SpeechSynthesisModel.d.ts +1 -1
  45. package/model-function/synthesize-speech/synthesizeSpeech.d.ts +1 -1
  46. package/model-function/transcribe-speech/TranscriptionModel.d.ts +1 -1
  47. package/model-function/transcribe-speech/transcribe.d.ts +1 -1
  48. package/model-provider/automatic1111/Automatic1111ImageGenerationModel.cjs +9 -0
  49. package/model-provider/automatic1111/Automatic1111ImageGenerationModel.d.ts +1 -0
  50. package/model-provider/automatic1111/Automatic1111ImageGenerationModel.js +9 -0
  51. package/model-provider/cohere/CohereTextEmbeddingModel.cjs +6 -0
  52. package/model-provider/cohere/CohereTextEmbeddingModel.d.ts +1 -0
  53. package/model-provider/cohere/CohereTextEmbeddingModel.js +6 -0
  54. package/model-provider/cohere/CohereTextGenerationModel.cjs +20 -0
  55. package/model-provider/cohere/CohereTextGenerationModel.d.ts +2 -1
  56. package/model-provider/cohere/CohereTextGenerationModel.js +20 -0
  57. package/model-provider/cohere/CohereTokenizer.d.ts +1 -1
  58. package/model-provider/elevenlabs/ElevenLabsSpeechSynthesisModel.cjs +8 -0
  59. package/model-provider/elevenlabs/ElevenLabsSpeechSynthesisModel.d.ts +1 -0
  60. package/model-provider/elevenlabs/ElevenLabsSpeechSynthesisModel.js +8 -0
  61. package/model-provider/huggingface/HuggingFaceTextEmbeddingModel.cjs +7 -0
  62. package/model-provider/huggingface/HuggingFaceTextEmbeddingModel.d.ts +1 -0
  63. package/model-provider/huggingface/HuggingFaceTextEmbeddingModel.js +7 -0
  64. package/model-provider/huggingface/HuggingFaceTextGenerationModel.cjs +16 -0
  65. package/model-provider/huggingface/HuggingFaceTextGenerationModel.d.ts +1 -0
  66. package/model-provider/huggingface/HuggingFaceTextGenerationModel.js +16 -0
  67. package/model-provider/llamacpp/LlamaCppTextEmbeddingModel.cjs +6 -0
  68. package/model-provider/llamacpp/LlamaCppTextEmbeddingModel.d.ts +1 -0
  69. package/model-provider/llamacpp/LlamaCppTextEmbeddingModel.js +6 -0
  70. package/model-provider/llamacpp/LlamaCppTextGenerationModel.cjs +31 -0
  71. package/model-provider/llamacpp/LlamaCppTextGenerationModel.d.ts +6 -0
  72. package/model-provider/llamacpp/LlamaCppTextGenerationModel.js +31 -0
  73. package/model-provider/openai/OpenAIImageGenerationModel.cjs +8 -0
  74. package/model-provider/openai/OpenAIImageGenerationModel.d.ts +1 -0
  75. package/model-provider/openai/OpenAIImageGenerationModel.js +8 -0
  76. package/model-provider/openai/OpenAITextEmbeddingModel.cjs +5 -0
  77. package/model-provider/openai/OpenAITextEmbeddingModel.d.ts +12 -11
  78. package/model-provider/openai/OpenAITextEmbeddingModel.js +5 -0
  79. package/model-provider/openai/OpenAITextGenerationModel.cjs +62 -6
  80. package/model-provider/openai/OpenAITextGenerationModel.d.ts +34 -17
  81. package/model-provider/openai/OpenAITextGenerationModel.js +60 -5
  82. package/model-provider/openai/OpenAITranscriptionModel.cjs +7 -0
  83. package/model-provider/openai/OpenAITranscriptionModel.d.ts +2 -0
  84. package/model-provider/openai/OpenAITranscriptionModel.js +7 -0
  85. package/model-provider/openai/TikTokenTokenizer.d.ts +4 -2
  86. package/model-provider/openai/chat/OpenAIChatModel.cjs +68 -9
  87. package/model-provider/openai/chat/OpenAIChatModel.d.ts +40 -20
  88. package/model-provider/openai/chat/OpenAIChatModel.js +66 -8
  89. package/model-provider/openai/chat/OpenAIChatPrompt.d.ts +2 -2
  90. package/model-provider/openai/chat/countOpenAIChatMessageTokens.cjs +5 -2
  91. package/model-provider/openai/chat/countOpenAIChatMessageTokens.js +5 -2
  92. package/model-provider/stability/StabilityImageGenerationModel.cjs +15 -0
  93. package/model-provider/stability/StabilityImageGenerationModel.d.ts +1 -0
  94. package/model-provider/stability/StabilityImageGenerationModel.js +15 -0
  95. package/package.json +1 -1
  96. package/prompt/PromptFormatTextGenerationModel.cjs +3 -0
  97. package/prompt/PromptFormatTextGenerationModel.d.ts +1 -0
  98. package/prompt/PromptFormatTextGenerationModel.js +3 -0
  99. package/tool/executeTool.cjs +3 -0
  100. package/tool/executeTool.js +3 -0
  101. package/tool/useTool.d.ts +2 -2
  102. package/tool/useToolOrGenerateText.d.ts +2 -2
  103. package/core/ConsoleLogger.cjs +0 -9
  104. package/core/ConsoleLogger.d.ts +0 -5
  105. package/core/ConsoleLogger.js +0 -5
  106. package/model-function/generate-json/GenerateJsonModel.d.ts +0 -8
  107. package/model-function/generate-json/GenerateJsonOrTextModel.d.ts +0 -18
  108. /package/model-function/generate-json/{GenerateJsonModel.cjs → JsonGenerationModel.cjs} +0 -0
  109. /package/model-function/generate-json/{GenerateJsonModel.js → JsonGenerationModel.js} +0 -0
  110. /package/model-function/generate-json/{GenerateJsonOrTextModel.cjs → JsonOrTextGenerationModel.cjs} +0 -0
  111. /package/model-function/generate-json/{GenerateJsonOrTextModel.js → JsonOrTextGenerationModel.js} +0 -0
@@ -19,10 +19,12 @@ export declare const OPENAI_TEXT_GENERATION_MODELS: {
19
19
  "davinci-002": {
20
20
  contextWindowSize: number;
21
21
  tokenCostInMillicents: number;
22
+ fineTunedTokenCostInMillicents: number;
22
23
  };
23
24
  "babbage-002": {
24
25
  contextWindowSize: number;
25
26
  tokenCostInMillicents: number;
27
+ fineTunedTokenCostInMillicents: number;
26
28
  };
27
29
  "text-davinci-003": {
28
30
  contextWindowSize: number;
@@ -65,8 +67,17 @@ export declare const OPENAI_TEXT_GENERATION_MODELS: {
65
67
  tokenCostInMillicents: number;
66
68
  };
67
69
  };
68
- export type OpenAITextGenerationModelType = keyof typeof OPENAI_TEXT_GENERATION_MODELS;
69
- export declare const isOpenAITextGenerationModel: (model: string) => model is "davinci-002" | "babbage-002" | "text-davinci-003" | "text-davinci-002" | "code-davinci-002" | "davinci" | "text-curie-001" | "curie" | "text-babbage-001" | "babbage" | "text-ada-001" | "ada";
70
+ export declare function getOpenAITextGenerationModelInformation(model: OpenAITextGenerationModelType): {
71
+ baseModel: OpenAITextGenerationBaseModelType;
72
+ isFineTuned: boolean;
73
+ contextWindowSize: number;
74
+ tokenCostInMillicents: number;
75
+ };
76
+ type FineTuneableOpenAITextGenerationModelType = "davinci-002" | "babbage-002";
77
+ type FineTunedOpenAITextGenerationModelType = `ft:${FineTuneableOpenAITextGenerationModelType}:${string}:${string}:${string}`;
78
+ export type OpenAITextGenerationBaseModelType = keyof typeof OPENAI_TEXT_GENERATION_MODELS;
79
+ export type OpenAITextGenerationModelType = OpenAITextGenerationBaseModelType | FineTunedOpenAITextGenerationModelType;
80
+ export declare const isOpenAITextGenerationModel: (model: string) => model is OpenAITextGenerationModelType;
70
81
  export declare const calculateOpenAITextGenerationCostInMillicents: ({ model, response, }: {
71
82
  model: OpenAITextGenerationModelType;
72
83
  response: OpenAITextGenerationResponse;
@@ -110,7 +121,7 @@ export interface OpenAITextGenerationModelSettings extends TextGenerationModelSe
110
121
  export declare class OpenAITextGenerationModel extends AbstractModel<OpenAITextGenerationModelSettings> implements TextGenerationModel<string, OpenAITextGenerationResponse, OpenAITextGenerationDelta, OpenAITextGenerationModelSettings> {
111
122
  constructor(settings: OpenAITextGenerationModelSettings);
112
123
  readonly provider: "openai";
113
- get modelName(): "davinci-002" | "babbage-002" | "text-davinci-003" | "text-davinci-002" | "code-davinci-002" | "davinci" | "text-curie-001" | "curie" | "text-babbage-001" | "babbage" | "text-ada-001" | "ada";
124
+ get modelName(): OpenAITextGenerationModelType;
114
125
  readonly contextWindowSize: number;
115
126
  readonly tokenizer: TikTokenTokenizer;
116
127
  private get apiKey();
@@ -120,16 +131,17 @@ export declare class OpenAITextGenerationModel extends AbstractModel<OpenAITextG
120
131
  } & ModelFunctionOptions<Partial<OpenAIImageGenerationCallSettings & OpenAIModelSettings & {
121
132
  user?: string;
122
133
  }>>): Promise<RESULT>;
134
+ get settingsForEvent(): Partial<OpenAITextGenerationModelSettings>;
123
135
  generateTextResponse(prompt: string, options?: ModelFunctionOptions<OpenAITextGenerationModelSettings>): Promise<{
124
136
  object: "text_completion";
125
137
  model: string;
126
- id: string;
127
- created: number;
128
138
  usage: {
129
139
  prompt_tokens: number;
130
- total_tokens: number;
131
140
  completion_tokens: number;
141
+ total_tokens: number;
132
142
  };
143
+ id: string;
144
+ created: number;
133
145
  choices: {
134
146
  text: string;
135
147
  finish_reason: string;
@@ -141,6 +153,11 @@ export declare class OpenAITextGenerationModel extends AbstractModel<OpenAITextG
141
153
  generateDeltaStreamResponse(prompt: string, options?: ModelFunctionOptions<OpenAITextGenerationModelSettings>): Promise<AsyncIterable<DeltaEvent<OpenAITextGenerationDelta>>>;
142
154
  extractTextDelta(fullDelta: OpenAITextGenerationDelta): string | undefined;
143
155
  withPromptFormat<INPUT_PROMPT>(promptFormat: PromptFormat<INPUT_PROMPT, string>): PromptFormatTextGenerationModel<INPUT_PROMPT, string, OpenAITextGenerationResponse, OpenAITextGenerationDelta, OpenAITextGenerationModelSettings, this>;
156
+ extractUsage(response: OpenAITextGenerationResponse): {
157
+ promptTokens: number;
158
+ completionTokens: number;
159
+ totalTokens: number;
160
+ };
144
161
  withSettings(additionalSettings: Partial<OpenAITextGenerationModelSettings>): this;
145
162
  }
146
163
  declare const openAITextGenerationResponseSchema: z.ZodObject<{
@@ -170,23 +187,23 @@ declare const openAITextGenerationResponseSchema: z.ZodObject<{
170
187
  total_tokens: z.ZodNumber;
171
188
  }, "strip", z.ZodTypeAny, {
172
189
  prompt_tokens: number;
173
- total_tokens: number;
174
190
  completion_tokens: number;
191
+ total_tokens: number;
175
192
  }, {
176
193
  prompt_tokens: number;
177
- total_tokens: number;
178
194
  completion_tokens: number;
195
+ total_tokens: number;
179
196
  }>;
180
197
  }, "strip", z.ZodTypeAny, {
181
198
  object: "text_completion";
182
199
  model: string;
183
- id: string;
184
- created: number;
185
200
  usage: {
186
201
  prompt_tokens: number;
187
- total_tokens: number;
188
202
  completion_tokens: number;
203
+ total_tokens: number;
189
204
  };
205
+ id: string;
206
+ created: number;
190
207
  choices: {
191
208
  text: string;
192
209
  finish_reason: string;
@@ -196,13 +213,13 @@ declare const openAITextGenerationResponseSchema: z.ZodObject<{
196
213
  }, {
197
214
  object: "text_completion";
198
215
  model: string;
199
- id: string;
200
- created: number;
201
216
  usage: {
202
217
  prompt_tokens: number;
203
- total_tokens: number;
204
218
  completion_tokens: number;
219
+ total_tokens: number;
205
220
  };
221
+ id: string;
222
+ created: number;
206
223
  choices: {
207
224
  text: string;
208
225
  finish_reason: string;
@@ -224,13 +241,13 @@ export declare const OpenAITextResponseFormat: {
224
241
  handler: ResponseHandler<{
225
242
  object: "text_completion";
226
243
  model: string;
227
- id: string;
228
- created: number;
229
244
  usage: {
230
245
  prompt_tokens: number;
231
- total_tokens: number;
232
246
  completion_tokens: number;
247
+ total_tokens: number;
233
248
  };
249
+ id: string;
250
+ created: number;
234
251
  choices: {
235
252
  text: string;
236
253
  finish_reason: string;
@@ -17,10 +17,12 @@ export const OPENAI_TEXT_GENERATION_MODELS = {
17
17
  "davinci-002": {
18
18
  contextWindowSize: 16384,
19
19
  tokenCostInMillicents: 0.2,
20
+ fineTunedTokenCostInMillicents: 1.2,
20
21
  },
21
22
  "babbage-002": {
22
23
  contextWindowSize: 16384,
23
24
  tokenCostInMillicents: 0.04,
25
+ fineTunedTokenCostInMillicents: 0.16,
24
26
  },
25
27
  "text-davinci-003": {
26
28
  contextWindowSize: 4096,
@@ -63,9 +65,36 @@ export const OPENAI_TEXT_GENERATION_MODELS = {
63
65
  tokenCostInMillicents: 0.04,
64
66
  },
65
67
  };
66
- export const isOpenAITextGenerationModel = (model) => model in OPENAI_TEXT_GENERATION_MODELS;
68
+ export function getOpenAITextGenerationModelInformation(model) {
69
+ // Model is already a base model:
70
+ if (model in OPENAI_TEXT_GENERATION_MODELS) {
71
+ const baseModelInformation = OPENAI_TEXT_GENERATION_MODELS[model];
72
+ return {
73
+ baseModel: model,
74
+ isFineTuned: false,
75
+ contextWindowSize: baseModelInformation.contextWindowSize,
76
+ tokenCostInMillicents: baseModelInformation.tokenCostInMillicents,
77
+ };
78
+ }
79
+ // Extract the base model from the fine-tuned model:
80
+ // eslint-disable-next-line @typescript-eslint/no-unused-vars
81
+ const [_, baseModel, ___, ____, _____] = model.split(":");
82
+ if (["davinci-002", "babbage-002"].includes(baseModel)) {
83
+ const baseModelInformation = OPENAI_TEXT_GENERATION_MODELS[baseModel];
84
+ return {
85
+ baseModel: baseModel,
86
+ isFineTuned: true,
87
+ contextWindowSize: baseModelInformation.contextWindowSize,
88
+ tokenCostInMillicents: baseModelInformation.fineTunedTokenCostInMillicents,
89
+ };
90
+ }
91
+ throw new Error(`Unknown OpenAI chat base model ${baseModel}.`);
92
+ }
93
+ export const isOpenAITextGenerationModel = (model) => model in OPENAI_TEXT_GENERATION_MODELS ||
94
+ model.startsWith("ft:davinci-002:") ||
95
+ model.startsWith("ft:babbage-002:");
67
96
  export const calculateOpenAITextGenerationCostInMillicents = ({ model, response, }) => response.usage.total_tokens *
68
- OPENAI_TEXT_GENERATION_MODELS[model].tokenCostInMillicents;
97
+ getOpenAITextGenerationModelInformation(model).tokenCostInMillicents;
69
98
  /**
70
99
  * Create a text generation model that calls the OpenAI text completion API.
71
100
  *
@@ -105,9 +134,11 @@ export class OpenAITextGenerationModel extends AbstractModel {
105
134
  writable: true,
106
135
  value: void 0
107
136
  });
108
- this.tokenizer = new TikTokenTokenizer({ model: settings.model });
109
- this.contextWindowSize =
110
- OPENAI_TEXT_GENERATION_MODELS[settings.model].contextWindowSize;
137
+ const modelInformation = getOpenAITextGenerationModelInformation(this.settings.model);
138
+ this.tokenizer = new TikTokenTokenizer({
139
+ model: modelInformation.baseModel,
140
+ });
141
+ this.contextWindowSize = modelInformation.contextWindowSize;
111
142
  }
112
143
  get modelName() {
113
144
  return this.settings.model;
@@ -147,6 +178,23 @@ export class OpenAITextGenerationModel extends AbstractModel {
147
178
  call: async () => callOpenAITextGenerationAPI(callSettings),
148
179
  });
149
180
  }
181
+ get settingsForEvent() {
182
+ const eventSettingProperties = [
183
+ "maxCompletionTokens",
184
+ "stopSequences",
185
+ "baseUrl",
186
+ "suffix",
187
+ "temperature",
188
+ "topP",
189
+ "n",
190
+ "logprobs",
191
+ "echo",
192
+ "presencePenalty",
193
+ "frequencyPenalty",
194
+ "bestOf",
195
+ ];
196
+ return Object.fromEntries(Object.entries(this.settings).filter(([key]) => eventSettingProperties.includes(key)));
197
+ }
150
198
  generateTextResponse(prompt, options) {
151
199
  return this.callAPI(prompt, {
152
200
  ...options,
@@ -173,6 +221,13 @@ export class OpenAITextGenerationModel extends AbstractModel {
173
221
  promptFormat,
174
222
  });
175
223
  }
224
+ extractUsage(response) {
225
+ return {
226
+ promptTokens: response.usage.prompt_tokens,
227
+ completionTokens: response.usage.completion_tokens,
228
+ totalTokens: response.usage.total_tokens,
229
+ };
230
+ }
176
231
  withSettings(additionalSettings) {
177
232
  return new OpenAITextGenerationModel(Object.assign({}, this.settings, additionalSettings));
178
233
  }
@@ -93,6 +93,13 @@ class OpenAITranscriptionModel extends AbstractModel_js_1.AbstractModel {
93
93
  call: async () => callOpenAITranscriptionAPI(callSettings),
94
94
  });
95
95
  }
96
+ getEventSettingProperties() {
97
+ return ["baseUrl"];
98
+ }
99
+ get settingsForEvent() {
100
+ const eventSettingProperties = ["baseUrl"];
101
+ return Object.fromEntries(Object.entries(this.settings).filter(([key]) => eventSettingProperties.includes(key)));
102
+ }
96
103
  withSettings(additionalSettings) {
97
104
  return new OpenAITranscriptionModel(Object.assign({}, this.settings, additionalSettings));
98
105
  }
@@ -57,6 +57,8 @@ export declare class OpenAITranscriptionModel extends AbstractModel<OpenAITransc
57
57
  callAPI<RESULT>(data: OpenAITranscriptionInput, options: {
58
58
  responseFormat: OpenAITranscriptionResponseFormatType<RESULT>;
59
59
  } & ModelFunctionOptions<Partial<OpenAITranscriptionModelSettings & OpenAIModelSettings>>): Promise<RESULT>;
60
+ getEventSettingProperties(): (keyof OpenAITranscriptionModelSettings)[];
61
+ get settingsForEvent(): Partial<OpenAITranscriptionModelSettings>;
60
62
  withSettings(additionalSettings: OpenAITranscriptionModelSettings): this;
61
63
  }
62
64
  declare const openAITranscriptionJsonSchema: z.ZodObject<{
@@ -86,6 +86,13 @@ export class OpenAITranscriptionModel extends AbstractModel {
86
86
  call: async () => callOpenAITranscriptionAPI(callSettings),
87
87
  });
88
88
  }
89
+ getEventSettingProperties() {
90
+ return ["baseUrl"];
91
+ }
92
+ get settingsForEvent() {
93
+ const eventSettingProperties = ["baseUrl"];
94
+ return Object.fromEntries(Object.entries(this.settings).filter(([key]) => eventSettingProperties.includes(key)));
95
+ }
89
96
  withSettings(additionalSettings) {
90
97
  return new OpenAITranscriptionModel(Object.assign({}, this.settings, additionalSettings));
91
98
  }
@@ -1,6 +1,8 @@
1
1
  import { TiktokenEncoding } from "js-tiktoken";
2
2
  import { FullTokenizer } from "../../model-function/tokenize-text/Tokenizer.js";
3
- import { OpenAIChatModelType, OpenAITextEmbeddingModelType, OpenAITextGenerationModelType } from "./index.js";
3
+ import { OpenAITextEmbeddingModelType } from "./OpenAITextEmbeddingModel.js";
4
+ import { OpenAITextGenerationBaseModelType } from "./OpenAITextGenerationModel.js";
5
+ import { OpenAIChatBaseModelType } from "./chat/OpenAIChatModel.js";
4
6
  /**
5
7
  * TikToken tokenizer for OpenAI language models.
6
8
  *
@@ -21,7 +23,7 @@ export declare class TikTokenTokenizer implements FullTokenizer {
21
23
  * Get a TikToken tokenizer for a specific model or encoding.
22
24
  */
23
25
  constructor(options: {
24
- model: OpenAIChatModelType | OpenAITextEmbeddingModelType | OpenAITextGenerationModelType;
26
+ model: OpenAIChatBaseModelType | OpenAITextGenerationBaseModelType | OpenAITextEmbeddingModelType;
25
27
  } | {
26
28
  encoding: TiktokenEncoding;
27
29
  });
@@ -3,7 +3,7 @@ var __importDefault = (this && this.__importDefault) || function (mod) {
3
3
  return (mod && mod.__esModule) ? mod : { "default": mod };
4
4
  };
5
5
  Object.defineProperty(exports, "__esModule", { value: true });
6
- exports.OpenAIChatResponseFormat = exports.OpenAIChatModel = exports.calculateOpenAIChatCostInMillicents = exports.isOpenAIChatModel = exports.OPENAI_CHAT_MODELS = void 0;
6
+ exports.OpenAIChatResponseFormat = exports.OpenAIChatModel = exports.calculateOpenAIChatCostInMillicents = exports.isOpenAIChatModel = exports.getOpenAIChatModelInformation = exports.OPENAI_CHAT_MODELS = void 0;
7
7
  const secure_json_parse_1 = __importDefault(require("secure-json-parse"));
8
8
  const zod_1 = __importDefault(require("zod"));
9
9
  const AbstractModel_js_1 = require("../../../model-function/AbstractModel.cjs");
@@ -55,6 +55,8 @@ exports.OPENAI_CHAT_MODELS = {
55
55
  contextWindowSize: 4096,
56
56
  promptTokenCostInMillicents: 0.15,
57
57
  completionTokenCostInMillicents: 0.2,
58
+ fineTunedPromptTokenCostInMillicents: 1.2,
59
+ fineTunedCompletionTokenCostInMillicents: 1.6,
58
60
  },
59
61
  "gpt-3.5-turbo-0301": {
60
62
  contextWindowSize: 4096,
@@ -65,6 +67,8 @@ exports.OPENAI_CHAT_MODELS = {
65
67
  contextWindowSize: 4096,
66
68
  promptTokenCostInMillicents: 0.15,
67
69
  completionTokenCostInMillicents: 0.2,
70
+ fineTunedPromptTokenCostInMillicents: 1.2,
71
+ fineTunedCompletionTokenCostInMillicents: 1.6,
68
72
  },
69
73
  "gpt-3.5-turbo-16k": {
70
74
  contextWindowSize: 16384,
@@ -77,12 +81,45 @@ exports.OPENAI_CHAT_MODELS = {
77
81
  completionTokenCostInMillicents: 0.4,
78
82
  },
79
83
  };
80
- const isOpenAIChatModel = (model) => model in exports.OPENAI_CHAT_MODELS;
84
+ function getOpenAIChatModelInformation(model) {
85
+ // Model is already a base model:
86
+ if (model in exports.OPENAI_CHAT_MODELS) {
87
+ const baseModelInformation = exports.OPENAI_CHAT_MODELS[model];
88
+ return {
89
+ baseModel: model,
90
+ isFineTuned: false,
91
+ contextWindowSize: baseModelInformation.contextWindowSize,
92
+ promptTokenCostInMillicents: baseModelInformation.promptTokenCostInMillicents,
93
+ completionTokenCostInMillicents: baseModelInformation.completionTokenCostInMillicents,
94
+ };
95
+ }
96
+ // Extract the base model from the fine-tuned model:
97
+ // eslint-disable-next-line @typescript-eslint/no-unused-vars
98
+ const [_, baseModel, ___, ____, _____] = model.split(":");
99
+ if (["gpt-3.5-turbo", "gpt-3.5-turbo-0613"].includes(baseModel)) {
100
+ const baseModelInformation = exports.OPENAI_CHAT_MODELS[baseModel];
101
+ return {
102
+ baseModel: baseModel,
103
+ isFineTuned: true,
104
+ contextWindowSize: baseModelInformation.contextWindowSize,
105
+ promptTokenCostInMillicents: baseModelInformation.fineTunedPromptTokenCostInMillicents,
106
+ completionTokenCostInMillicents: baseModelInformation.fineTunedCompletionTokenCostInMillicents,
107
+ };
108
+ }
109
+ throw new Error(`Unknown OpenAI chat base model ${baseModel}.`);
110
+ }
111
+ exports.getOpenAIChatModelInformation = getOpenAIChatModelInformation;
112
+ const isOpenAIChatModel = (model) => model in exports.OPENAI_CHAT_MODELS ||
113
+ model.startsWith("ft:gpt-3.5-turbo-0613:") ||
114
+ model.startsWith("ft:gpt-3.5-turbo:");
81
115
  exports.isOpenAIChatModel = isOpenAIChatModel;
82
- const calculateOpenAIChatCostInMillicents = ({ model, response, }) => response.usage.prompt_tokens *
83
- exports.OPENAI_CHAT_MODELS[model].promptTokenCostInMillicents +
84
- response.usage.completion_tokens *
85
- exports.OPENAI_CHAT_MODELS[model].completionTokenCostInMillicents;
116
+ const calculateOpenAIChatCostInMillicents = ({ model, response, }) => {
117
+ const modelInformation = getOpenAIChatModelInformation(model);
118
+ return (response.usage.prompt_tokens *
119
+ modelInformation.promptTokenCostInMillicents +
120
+ response.usage.completion_tokens *
121
+ modelInformation.completionTokenCostInMillicents);
122
+ };
86
123
  exports.calculateOpenAIChatCostInMillicents = calculateOpenAIChatCostInMillicents;
87
124
  /**
88
125
  * Create a text generation model that calls the OpenAI chat completion API.
@@ -124,9 +161,11 @@ class OpenAIChatModel extends AbstractModel_js_1.AbstractModel {
124
161
  writable: true,
125
162
  value: void 0
126
163
  });
127
- this.tokenizer = new TikTokenTokenizer_js_1.TikTokenTokenizer({ model: this.settings.model });
128
- this.contextWindowSize =
129
- exports.OPENAI_CHAT_MODELS[this.settings.model].contextWindowSize;
164
+ const modelInformation = getOpenAIChatModelInformation(this.settings.model);
165
+ this.tokenizer = new TikTokenTokenizer_js_1.TikTokenTokenizer({
166
+ model: modelInformation.baseModel,
167
+ });
168
+ this.contextWindowSize = modelInformation.contextWindowSize;
130
169
  }
131
170
  get modelName() {
132
171
  return this.settings.model;
@@ -170,6 +209,19 @@ class OpenAIChatModel extends AbstractModel_js_1.AbstractModel {
170
209
  call: async () => callOpenAIChatCompletionAPI(callSettings),
171
210
  });
172
211
  }
212
+ get settingsForEvent() {
213
+ const eventSettingProperties = [
214
+ "stopSequences",
215
+ "maxCompletionTokens",
216
+ "baseUrl",
217
+ "functions",
218
+ "functionCall",
219
+ "temperature",
220
+ "topP",
221
+ "n",
222
+ ];
223
+ return Object.fromEntries(Object.entries(this.settings).filter(([key]) => eventSettingProperties.includes(key)));
224
+ }
173
225
  generateTextResponse(prompt, options) {
174
226
  return this.callAPI(prompt, {
175
227
  ...options,
@@ -211,6 +263,13 @@ class OpenAIChatModel extends AbstractModel_js_1.AbstractModel {
211
263
  const jsonText = response.choices[0].message.function_call.arguments;
212
264
  return secure_json_parse_1.default.parse(jsonText);
213
265
  }
266
+ extractUsage(response) {
267
+ return {
268
+ promptTokens: response.usage.prompt_tokens,
269
+ completionTokens: response.usage.completion_tokens,
270
+ totalTokens: response.usage.total_tokens,
271
+ };
272
+ }
214
273
  withPromptFormat(promptFormat) {
215
274
  return new PromptFormatTextGenerationModel_js_1.PromptFormatTextGenerationModel({
216
275
  model: this.withSettings({ stopSequences: promptFormat.stopSequences }),
@@ -1,8 +1,8 @@
1
1
  import z from "zod";
2
2
  import { AbstractModel } from "../../../model-function/AbstractModel.js";
3
3
  import { ModelFunctionOptions } from "../../../model-function/ModelFunctionOptions.js";
4
- import { GenerateJsonModel } from "../../../model-function/generate-json/GenerateJsonModel.js";
5
- import { GenerateJsonOrTextModel } from "../../../model-function/generate-json/GenerateJsonOrTextModel.js";
4
+ import { JsonGenerationModel } from "../../../model-function/generate-json/JsonGenerationModel.js";
5
+ import { JsonOrTextGenerationModel } from "../../../model-function/generate-json/JsonOrTextGenerationModel.js";
6
6
  import { DeltaEvent } from "../../../model-function/generate-text/DeltaEvent.js";
7
7
  import { TextGenerationModel, TextGenerationModelSettings } from "../../../model-function/generate-text/TextGenerationModel.js";
8
8
  import { PromptFormat } from "../../../prompt/PromptFormat.js";
@@ -48,6 +48,8 @@ export declare const OPENAI_CHAT_MODELS: {
48
48
  contextWindowSize: number;
49
49
  promptTokenCostInMillicents: number;
50
50
  completionTokenCostInMillicents: number;
51
+ fineTunedPromptTokenCostInMillicents: number;
52
+ fineTunedCompletionTokenCostInMillicents: number;
51
53
  };
52
54
  "gpt-3.5-turbo-0301": {
53
55
  contextWindowSize: number;
@@ -58,6 +60,8 @@ export declare const OPENAI_CHAT_MODELS: {
58
60
  contextWindowSize: number;
59
61
  promptTokenCostInMillicents: number;
60
62
  completionTokenCostInMillicents: number;
63
+ fineTunedPromptTokenCostInMillicents: number;
64
+ fineTunedCompletionTokenCostInMillicents: number;
61
65
  };
62
66
  "gpt-3.5-turbo-16k": {
63
67
  contextWindowSize: number;
@@ -70,8 +74,18 @@ export declare const OPENAI_CHAT_MODELS: {
70
74
  completionTokenCostInMillicents: number;
71
75
  };
72
76
  };
73
- export type OpenAIChatModelType = keyof typeof OPENAI_CHAT_MODELS;
74
- export declare const isOpenAIChatModel: (model: string) => model is "gpt-4" | "gpt-4-0314" | "gpt-4-0613" | "gpt-4-32k" | "gpt-4-32k-0314" | "gpt-4-32k-0613" | "gpt-3.5-turbo" | "gpt-3.5-turbo-0301" | "gpt-3.5-turbo-0613" | "gpt-3.5-turbo-16k" | "gpt-3.5-turbo-16k-0613";
77
+ export declare function getOpenAIChatModelInformation(model: OpenAIChatModelType): {
78
+ baseModel: OpenAIChatBaseModelType;
79
+ isFineTuned: boolean;
80
+ contextWindowSize: number;
81
+ promptTokenCostInMillicents: number;
82
+ completionTokenCostInMillicents: number;
83
+ };
84
+ type FineTuneableOpenAIChatModelType = `gpt-3.5-turbo` | `gpt-3.5-turbo-0613`;
85
+ type FineTunedOpenAIChatModelType = `ft:${FineTuneableOpenAIChatModelType}:${string}:${string}:${string}`;
86
+ export type OpenAIChatBaseModelType = keyof typeof OPENAI_CHAT_MODELS;
87
+ export type OpenAIChatModelType = OpenAIChatBaseModelType | FineTunedOpenAIChatModelType;
88
+ export declare const isOpenAIChatModel: (model: string) => model is OpenAIChatModelType;
75
89
  export declare const calculateOpenAIChatCostInMillicents: ({ model, response, }: {
76
90
  model: OpenAIChatModelType;
77
91
  response: OpenAIChatResponse;
@@ -117,10 +131,10 @@ export interface OpenAIChatSettings extends TextGenerationModelSettings, OpenAIM
117
131
  * ),
118
132
  * ]);
119
133
  */
120
- export declare class OpenAIChatModel extends AbstractModel<OpenAIChatSettings> implements TextGenerationModel<OpenAIChatMessage[], OpenAIChatResponse, OpenAIChatDelta, OpenAIChatSettings>, GenerateJsonModel<OpenAIChatSingleFunctionPrompt<unknown>, OpenAIChatResponse, OpenAIChatSettings>, GenerateJsonOrTextModel<OpenAIChatAutoFunctionPrompt<Array<OpenAIFunctionDescription<unknown>>>, OpenAIChatResponse, OpenAIChatSettings> {
134
+ export declare class OpenAIChatModel extends AbstractModel<OpenAIChatSettings> implements TextGenerationModel<OpenAIChatMessage[], OpenAIChatResponse, OpenAIChatDelta, OpenAIChatSettings>, JsonGenerationModel<OpenAIChatSingleFunctionPrompt<unknown>, OpenAIChatResponse, OpenAIChatSettings>, JsonOrTextGenerationModel<OpenAIChatAutoFunctionPrompt<Array<OpenAIFunctionDescription<unknown>>>, OpenAIChatResponse, OpenAIChatSettings> {
121
135
  constructor(settings: OpenAIChatSettings);
122
136
  readonly provider: "openai";
123
- get modelName(): "gpt-4" | "gpt-4-0314" | "gpt-4-0613" | "gpt-4-32k" | "gpt-4-32k-0314" | "gpt-4-32k-0613" | "gpt-3.5-turbo" | "gpt-3.5-turbo-0301" | "gpt-3.5-turbo-0613" | "gpt-3.5-turbo-16k" | "gpt-3.5-turbo-16k-0613";
137
+ get modelName(): OpenAIChatModelType;
124
138
  readonly contextWindowSize: number;
125
139
  readonly tokenizer: TikTokenTokenizer;
126
140
  private get apiKey();
@@ -134,16 +148,17 @@ export declare class OpenAIChatModel extends AbstractModel<OpenAIChatSettings> i
134
148
  } & ModelFunctionOptions<Partial<OpenAIChatCallSettings & OpenAIModelSettings & {
135
149
  user?: string;
136
150
  }>>): Promise<RESULT>;
151
+ get settingsForEvent(): Partial<OpenAIChatSettings>;
137
152
  generateTextResponse(prompt: OpenAIChatMessage[], options?: ModelFunctionOptions<OpenAIChatSettings>): Promise<{
138
153
  object: "chat.completion";
139
154
  model: string;
140
- id: string;
141
- created: number;
142
155
  usage: {
143
156
  prompt_tokens: number;
144
- total_tokens: number;
145
157
  completion_tokens: number;
158
+ total_tokens: number;
146
159
  };
160
+ id: string;
161
+ created: number;
147
162
  choices: {
148
163
  message: {
149
164
  content: string | null;
@@ -170,6 +185,11 @@ export declare class OpenAIChatModel extends AbstractModel<OpenAIChatSettings> i
170
185
  */
171
186
  generateJsonResponse(prompt: OpenAIChatSingleFunctionPrompt<unknown> | OpenAIChatAutoFunctionPrompt<Array<OpenAIFunctionDescription<unknown>>>, options?: ModelFunctionOptions<OpenAIChatSettings> | undefined): PromiseLike<OpenAIChatResponse>;
172
187
  extractJson(response: OpenAIChatResponse): unknown;
188
+ extractUsage(response: OpenAIChatResponse): {
189
+ promptTokens: number;
190
+ completionTokens: number;
191
+ totalTokens: number;
192
+ };
173
193
  withPromptFormat<INPUT_PROMPT>(promptFormat: PromptFormat<INPUT_PROMPT, OpenAIChatMessage[]>): PromptFormatTextGenerationModel<INPUT_PROMPT, OpenAIChatMessage[], OpenAIChatResponse, OpenAIChatDelta, OpenAIChatSettings, this>;
174
194
  withSettings(additionalSettings: Partial<OpenAIChatSettings>): this;
175
195
  }
@@ -241,23 +261,23 @@ declare const openAIChatResponseSchema: z.ZodObject<{
241
261
  total_tokens: z.ZodNumber;
242
262
  }, "strip", z.ZodTypeAny, {
243
263
  prompt_tokens: number;
244
- total_tokens: number;
245
264
  completion_tokens: number;
265
+ total_tokens: number;
246
266
  }, {
247
267
  prompt_tokens: number;
248
- total_tokens: number;
249
268
  completion_tokens: number;
269
+ total_tokens: number;
250
270
  }>;
251
271
  }, "strip", z.ZodTypeAny, {
252
272
  object: "chat.completion";
253
273
  model: string;
254
- id: string;
255
- created: number;
256
274
  usage: {
257
275
  prompt_tokens: number;
258
- total_tokens: number;
259
276
  completion_tokens: number;
277
+ total_tokens: number;
260
278
  };
279
+ id: string;
280
+ created: number;
261
281
  choices: {
262
282
  message: {
263
283
  content: string | null;
@@ -274,13 +294,13 @@ declare const openAIChatResponseSchema: z.ZodObject<{
274
294
  }, {
275
295
  object: "chat.completion";
276
296
  model: string;
277
- id: string;
278
- created: number;
279
297
  usage: {
280
298
  prompt_tokens: number;
281
- total_tokens: number;
282
299
  completion_tokens: number;
300
+ total_tokens: number;
283
301
  };
302
+ id: string;
303
+ created: number;
284
304
  choices: {
285
305
  message: {
286
306
  content: string | null;
@@ -309,13 +329,13 @@ export declare const OpenAIChatResponseFormat: {
309
329
  handler: ResponseHandler<{
310
330
  object: "chat.completion";
311
331
  model: string;
312
- id: string;
313
- created: number;
314
332
  usage: {
315
333
  prompt_tokens: number;
316
- total_tokens: number;
317
334
  completion_tokens: number;
335
+ total_tokens: number;
318
336
  };
337
+ id: string;
338
+ created: number;
319
339
  choices: {
320
340
  message: {
321
341
  content: string | null;