modelfusion 0.123.0 → 0.125.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (72) hide show
  1. package/CHANGELOG.md +47 -1
  2. package/README.md +9 -22
  3. package/model-function/generate-text/PromptTemplateFullTextModel.cjs +0 -11
  4. package/model-function/generate-text/PromptTemplateFullTextModel.d.ts +0 -1
  5. package/model-function/generate-text/PromptTemplateFullTextModel.js +0 -11
  6. package/model-function/generate-text/PromptTemplateTextGenerationModel.cjs +0 -11
  7. package/model-function/generate-text/PromptTemplateTextGenerationModel.d.ts +0 -1
  8. package/model-function/generate-text/PromptTemplateTextGenerationModel.js +0 -11
  9. package/model-function/generate-text/PromptTemplateTextStreamingModel.cjs +0 -11
  10. package/model-function/generate-text/PromptTemplateTextStreamingModel.d.ts +0 -1
  11. package/model-function/generate-text/PromptTemplateTextStreamingModel.js +0 -11
  12. package/model-function/generate-text/TextGenerationModel.d.ts +31 -1
  13. package/model-provider/cohere/CohereTextEmbeddingModel.d.ts +3 -3
  14. package/model-provider/cohere/CohereTextGenerationModel.cjs +6 -9
  15. package/model-provider/cohere/CohereTextGenerationModel.d.ts +4 -9
  16. package/model-provider/cohere/CohereTextGenerationModel.js +7 -10
  17. package/model-provider/cohere/CohereTokenizer.d.ts +3 -3
  18. package/model-provider/llamacpp/LlamaCppCompletionModel.d.ts +2 -2
  19. package/model-provider/mistral/MistralChatModel.cjs +0 -9
  20. package/model-provider/mistral/MistralChatModel.d.ts +2 -11
  21. package/model-provider/mistral/MistralChatModel.js +0 -9
  22. package/model-provider/mistral/index.cjs +1 -2
  23. package/model-provider/mistral/index.d.ts +0 -1
  24. package/model-provider/mistral/index.js +0 -1
  25. package/model-provider/ollama/OllamaChatModel.cjs +0 -9
  26. package/model-provider/ollama/OllamaChatModel.d.ts +2 -11
  27. package/model-provider/ollama/OllamaChatModel.js +0 -9
  28. package/model-provider/ollama/OllamaCompletionModel.d.ts +2 -2
  29. package/model-provider/ollama/index.cjs +0 -1
  30. package/model-provider/ollama/index.d.ts +0 -1
  31. package/model-provider/ollama/index.js +0 -1
  32. package/model-provider/openai/AbstractOpenAIChatModel.cjs +5 -3
  33. package/model-provider/openai/AbstractOpenAIChatModel.d.ts +5 -5
  34. package/model-provider/openai/AbstractOpenAIChatModel.js +5 -3
  35. package/model-provider/openai/AbstractOpenAITextEmbeddingModel.cjs +82 -0
  36. package/model-provider/openai/AbstractOpenAITextEmbeddingModel.d.ts +91 -0
  37. package/model-provider/openai/AbstractOpenAITextEmbeddingModel.js +78 -0
  38. package/model-provider/openai/OpenAIChatFunctionCallStructureGenerationModel.d.ts +1 -1
  39. package/model-provider/openai/OpenAIChatModel.cjs +0 -9
  40. package/model-provider/openai/OpenAIChatModel.d.ts +2 -11
  41. package/model-provider/openai/OpenAIChatModel.js +0 -9
  42. package/model-provider/openai/OpenAICompletionModel.cjs +3 -6
  43. package/model-provider/openai/OpenAICompletionModel.d.ts +3 -8
  44. package/model-provider/openai/OpenAICompletionModel.js +4 -7
  45. package/model-provider/openai/OpenAIFacade.cjs +18 -18
  46. package/model-provider/openai/OpenAIFacade.d.ts +18 -18
  47. package/model-provider/openai/OpenAIFacade.js +18 -18
  48. package/model-provider/openai/OpenAITextEmbeddingModel.cjs +3 -68
  49. package/model-provider/openai/OpenAITextEmbeddingModel.d.ts +4 -82
  50. package/model-provider/openai/OpenAITextEmbeddingModel.js +3 -68
  51. package/model-provider/openai/index.cjs +2 -2
  52. package/model-provider/openai/index.d.ts +1 -1
  53. package/model-provider/openai/index.js +1 -1
  54. package/model-provider/openai-compatible/OpenAICompatibleChatModel.cjs +0 -9
  55. package/model-provider/openai-compatible/OpenAICompatibleChatModel.d.ts +4 -11
  56. package/model-provider/openai-compatible/OpenAICompatibleChatModel.js +0 -9
  57. package/model-provider/openai-compatible/OpenAICompatibleCompletionModel.cjs +10 -0
  58. package/model-provider/openai-compatible/OpenAICompatibleCompletionModel.d.ts +10 -2
  59. package/model-provider/openai-compatible/OpenAICompatibleCompletionModel.js +10 -0
  60. package/model-provider/openai-compatible/OpenAICompatibleFacade.cjs +40 -7
  61. package/model-provider/openai-compatible/OpenAICompatibleFacade.d.ts +35 -6
  62. package/model-provider/openai-compatible/OpenAICompatibleFacade.js +37 -6
  63. package/model-provider/openai-compatible/OpenAICompatibleTextEmbeddingModel.cjs +27 -0
  64. package/model-provider/openai-compatible/OpenAICompatibleTextEmbeddingModel.d.ts +18 -0
  65. package/model-provider/openai-compatible/OpenAICompatibleTextEmbeddingModel.js +23 -0
  66. package/model-provider/openai-compatible/PerplexityApiConfiguration.cjs +33 -0
  67. package/model-provider/openai-compatible/PerplexityApiConfiguration.d.ts +13 -0
  68. package/model-provider/openai-compatible/PerplexityApiConfiguration.js +29 -0
  69. package/model-provider/openai-compatible/index.cjs +2 -0
  70. package/model-provider/openai-compatible/index.d.ts +2 -0
  71. package/model-provider/openai-compatible/index.js +2 -0
  72. package/package.json +1 -1
@@ -1,6 +1,6 @@
1
1
  import { PromptTemplateTextStreamingModel } from "../../model-function/generate-text/PromptTemplateTextStreamingModel.js";
2
2
  import { textGenerationModelProperties, } from "../../model-function/generate-text/TextGenerationModel.js";
3
- import { chat, instruction, } from "../../model-function/generate-text/prompt-template/TextPromptTemplate.js";
3
+ import { chat, instruction, text, } from "../../model-function/generate-text/prompt-template/TextPromptTemplate.js";
4
4
  import { countTokens } from "../../model-function/tokenize-text/countTokens.js";
5
5
  import { AbstractOpenAICompletionModel, } from "./AbstractOpenAICompletionModel.js";
6
6
  import { TikTokenTokenizer } from "./TikTokenTokenizer.js";
@@ -93,15 +93,12 @@ export class OpenAICompletionModel extends AbstractOpenAICompletionModel {
93
93
  ];
94
94
  return Object.fromEntries(Object.entries(this.settings).filter(([key]) => eventSettingProperties.includes(key)));
95
95
  }
96
- /**
97
- * Returns this model with an instruction prompt template.
98
- */
96
+ withTextPrompt() {
97
+ return this.withPromptTemplate(text());
98
+ }
99
99
  withInstructionPrompt() {
100
100
  return this.withPromptTemplate(instruction());
101
101
  }
102
- /**
103
- * Returns this model with a chat prompt template.
104
- */
105
102
  withChatPrompt(options) {
106
103
  return this.withPromptTemplate(chat(options));
107
104
  }
@@ -42,10 +42,10 @@ exports.AzureApi = AzureApi;
42
42
  * retry: retryWithExponentialBackoff({ maxTries: 5 }),
43
43
  * });
44
44
  *
45
- * const text = await generateText(
45
+ * const text = await generateText({
46
46
  * model,
47
- * "Write a short story about a robot learning to love:\n\n"
48
- * );
47
+ * prompt: "Write a short story about a robot learning to love:\n\n"
48
+ * });
49
49
  *
50
50
  * @return A new instance of {@link OpenAICompletionModel}.
51
51
  */
@@ -65,14 +65,14 @@ exports.CompletionTextGenerator = CompletionTextGenerator;
65
65
  * maxGenerationTokens: 500,
66
66
  * });
67
67
  *
68
- * const text = await generateText(
68
+ * const text = await generateText({
69
69
  * model,
70
- * [
70
+ * prompt: [
71
71
  * openai.ChatMessage.system(
72
72
  * "Write a short story about a robot learning to love:"
73
73
  * ),
74
74
  * ]
75
- * );
75
+ * });
76
76
  */
77
77
  function ChatTextGenerator(settings) {
78
78
  return new OpenAIChatModel_js_1.OpenAIChatModel(settings);
@@ -84,13 +84,13 @@ exports.ChatTextGenerator = ChatTextGenerator;
84
84
  * @see https://platform.openai.com/docs/api-reference/embeddings
85
85
  *
86
86
  * @example
87
- * const embeddings = await embedMany(
88
- * openai.TextEmbedder({ model: "text-embedding-ada-002" }),
89
- * [
87
+ * const embeddings = await embedMany({
88
+ * model: openai.TextEmbedder({ model: "text-embedding-ada-002" }),
89
+ * values: [
90
90
  * "At first, Nox didn't know what to do with the pup.",
91
91
  * "He keenly observed and absorbed everything around him, from the birds in the sky to the trees in the forest.",
92
92
  * ]
93
- * );
93
+ * });
94
94
  *
95
95
  * @returns A new instance of {@link OpenAITextEmbeddingModel}.
96
96
  */
@@ -117,13 +117,13 @@ exports.SpeechGenerator = SpeechGenerator;
117
117
  * @example
118
118
  * const data = await fs.promises.readFile("data/test.mp3");
119
119
  *
120
- * const transcription = await transcribe(
121
- * openai.Transcriber({ model: "whisper-1" }),
122
- * {
120
+ * const transcription = await transcribe({
121
+ * model: openai.Transcriber({ model: "whisper-1" }),
122
+ * data: {
123
123
  * type: "mp3",
124
124
  * data,
125
125
  * }
126
- * );
126
+ * });
127
127
  *
128
128
  * @returns A new instance of {@link OpenAITranscriptionModel}.
129
129
  */
@@ -137,10 +137,10 @@ exports.Transcriber = Transcriber;
137
137
  * @see https://platform.openai.com/docs/api-reference/images/create
138
138
  *
139
139
  * @example
140
- * const image = await generateImage(
141
- * new OpenAIImageGenerationModel({ size: "512x512" }),
142
- * "the wicked witch of the west in the style of early 19th century painting"
143
- * );
140
+ * const image = await generateImage({
141
+ * model: new OpenAIImageGenerationModel({ size: "512x512" }),
142
+ * prompt: "the wicked witch of the west in the style of early 19th century painting"
143
+ * });
144
144
  *
145
145
  * @returns A new instance of {@link OpenAIImageGenerationModel}.
146
146
  */
@@ -36,10 +36,10 @@ export declare function AzureApi(settings: AzureOpenAIApiConfigurationOptions):
36
36
  * retry: retryWithExponentialBackoff({ maxTries: 5 }),
37
37
  * });
38
38
  *
39
- * const text = await generateText(
39
+ * const text = await generateText({
40
40
  * model,
41
- * "Write a short story about a robot learning to love:\n\n"
42
- * );
41
+ * prompt: "Write a short story about a robot learning to love:\n\n"
42
+ * });
43
43
  *
44
44
  * @return A new instance of {@link OpenAICompletionModel}.
45
45
  */
@@ -56,14 +56,14 @@ export declare function CompletionTextGenerator(settings: OpenAICompletionModelS
56
56
  * maxGenerationTokens: 500,
57
57
  * });
58
58
  *
59
- * const text = await generateText(
59
+ * const text = await generateText({
60
60
  * model,
61
- * [
61
+ * prompt: [
62
62
  * openai.ChatMessage.system(
63
63
  * "Write a short story about a robot learning to love:"
64
64
  * ),
65
65
  * ]
66
- * );
66
+ * });
67
67
  */
68
68
  export declare function ChatTextGenerator(settings: OpenAIChatSettings): OpenAIChatModel;
69
69
  /**
@@ -72,13 +72,13 @@ export declare function ChatTextGenerator(settings: OpenAIChatSettings): OpenAIC
72
72
  * @see https://platform.openai.com/docs/api-reference/embeddings
73
73
  *
74
74
  * @example
75
- * const embeddings = await embedMany(
76
- * openai.TextEmbedder({ model: "text-embedding-ada-002" }),
77
- * [
75
+ * const embeddings = await embedMany({
76
+ * model: openai.TextEmbedder({ model: "text-embedding-ada-002" }),
77
+ * values: [
78
78
  * "At first, Nox didn't know what to do with the pup.",
79
79
  * "He keenly observed and absorbed everything around him, from the birds in the sky to the trees in the forest.",
80
80
  * ]
81
- * );
81
+ * });
82
82
  *
83
83
  * @returns A new instance of {@link OpenAITextEmbeddingModel}.
84
84
  */
@@ -99,13 +99,13 @@ export declare function SpeechGenerator(settings: OpenAISpeechModelSettings): Op
99
99
  * @example
100
100
  * const data = await fs.promises.readFile("data/test.mp3");
101
101
  *
102
- * const transcription = await transcribe(
103
- * openai.Transcriber({ model: "whisper-1" }),
104
- * {
102
+ * const transcription = await transcribe({
103
+ * model: openai.Transcriber({ model: "whisper-1" }),
104
+ * data: {
105
105
  * type: "mp3",
106
106
  * data,
107
107
  * }
108
- * );
108
+ * });
109
109
  *
110
110
  * @returns A new instance of {@link OpenAITranscriptionModel}.
111
111
  */
@@ -116,10 +116,10 @@ export declare function Transcriber(settings: OpenAITranscriptionModelSettings):
116
116
  * @see https://platform.openai.com/docs/api-reference/images/create
117
117
  *
118
118
  * @example
119
- * const image = await generateImage(
120
- * new OpenAIImageGenerationModel({ size: "512x512" }),
121
- * "the wicked witch of the west in the style of early 19th century painting"
122
- * );
119
+ * const image = await generateImage({
120
+ * model: new OpenAIImageGenerationModel({ size: "512x512" }),
121
+ * prompt: "the wicked witch of the west in the style of early 19th century painting"
122
+ * });
123
123
  *
124
124
  * @returns A new instance of {@link OpenAIImageGenerationModel}.
125
125
  */
@@ -37,10 +37,10 @@ export function AzureApi(settings) {
37
37
  * retry: retryWithExponentialBackoff({ maxTries: 5 }),
38
38
  * });
39
39
  *
40
- * const text = await generateText(
40
+ * const text = await generateText({
41
41
  * model,
42
- * "Write a short story about a robot learning to love:\n\n"
43
- * );
42
+ * prompt: "Write a short story about a robot learning to love:\n\n"
43
+ * });
44
44
  *
45
45
  * @return A new instance of {@link OpenAICompletionModel}.
46
46
  */
@@ -59,14 +59,14 @@ export function CompletionTextGenerator(settings) {
59
59
  * maxGenerationTokens: 500,
60
60
  * });
61
61
  *
62
- * const text = await generateText(
62
+ * const text = await generateText({
63
63
  * model,
64
- * [
64
+ * prompt: [
65
65
  * openai.ChatMessage.system(
66
66
  * "Write a short story about a robot learning to love:"
67
67
  * ),
68
68
  * ]
69
- * );
69
+ * });
70
70
  */
71
71
  export function ChatTextGenerator(settings) {
72
72
  return new OpenAIChatModel(settings);
@@ -77,13 +77,13 @@ export function ChatTextGenerator(settings) {
77
77
  * @see https://platform.openai.com/docs/api-reference/embeddings
78
78
  *
79
79
  * @example
80
- * const embeddings = await embedMany(
81
- * openai.TextEmbedder({ model: "text-embedding-ada-002" }),
82
- * [
80
+ * const embeddings = await embedMany({
81
+ * model: openai.TextEmbedder({ model: "text-embedding-ada-002" }),
82
+ * values: [
83
83
  * "At first, Nox didn't know what to do with the pup.",
84
84
  * "He keenly observed and absorbed everything around him, from the birds in the sky to the trees in the forest.",
85
85
  * ]
86
- * );
86
+ * });
87
87
  *
88
88
  * @returns A new instance of {@link OpenAITextEmbeddingModel}.
89
89
  */
@@ -108,13 +108,13 @@ export function SpeechGenerator(settings) {
108
108
  * @example
109
109
  * const data = await fs.promises.readFile("data/test.mp3");
110
110
  *
111
- * const transcription = await transcribe(
112
- * openai.Transcriber({ model: "whisper-1" }),
113
- * {
111
+ * const transcription = await transcribe({
112
+ * model: openai.Transcriber({ model: "whisper-1" }),
113
+ * data: {
114
114
  * type: "mp3",
115
115
  * data,
116
116
  * }
117
- * );
117
+ * });
118
118
  *
119
119
  * @returns A new instance of {@link OpenAITranscriptionModel}.
120
120
  */
@@ -127,10 +127,10 @@ export function Transcriber(settings) {
127
127
  * @see https://platform.openai.com/docs/api-reference/images/create
128
128
  *
129
129
  * @example
130
- * const image = await generateImage(
131
- * new OpenAIImageGenerationModel({ size: "512x512" }),
132
- * "the wicked witch of the west in the style of early 19th century painting"
133
- * );
130
+ * const image = await generateImage({
131
+ * model: new OpenAIImageGenerationModel({ size: "512x512" }),
132
+ * prompt: "the wicked witch of the west in the style of early 19th century painting"
133
+ * });
134
134
  *
135
135
  * @returns A new instance of {@link OpenAIImageGenerationModel}.
136
136
  */
@@ -1,14 +1,8 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.OpenAITextEmbeddingModel = exports.calculateOpenAIEmbeddingCostInMillicents = exports.isOpenAIEmbeddingModel = exports.OPENAI_TEXT_EMBEDDING_MODELS = void 0;
4
- const zod_1 = require("zod");
5
- const callWithRetryAndThrottle_js_1 = require("../../core/api/callWithRetryAndThrottle.cjs");
6
- const postToApi_js_1 = require("../../core/api/postToApi.cjs");
7
- const ZodSchema_js_1 = require("../../core/schema/ZodSchema.cjs");
8
- const AbstractModel_js_1 = require("../../model-function/AbstractModel.cjs");
9
4
  const countTokens_js_1 = require("../../model-function/tokenize-text/countTokens.cjs");
10
- const OpenAIApiConfiguration_js_1 = require("./OpenAIApiConfiguration.cjs");
11
- const OpenAIError_js_1 = require("./OpenAIError.cjs");
5
+ const AbstractOpenAITextEmbeddingModel_js_1 = require("./AbstractOpenAITextEmbeddingModel.cjs");
12
6
  const TikTokenTokenizer_js_1 = require("./TikTokenTokenizer.cjs");
13
7
  exports.OPENAI_TEXT_EMBEDDING_MODELS = {
14
8
  "text-embedding-ada-002": {
@@ -43,21 +37,15 @@ exports.calculateOpenAIEmbeddingCostInMillicents = calculateOpenAIEmbeddingCostI
43
37
  * ]
44
38
  * );
45
39
  */
46
- class OpenAITextEmbeddingModel extends AbstractModel_js_1.AbstractModel {
40
+ class OpenAITextEmbeddingModel extends AbstractOpenAITextEmbeddingModel_js_1.AbstractOpenAITextEmbeddingModel {
47
41
  constructor(settings) {
48
- super({ settings });
42
+ super(settings);
49
43
  Object.defineProperty(this, "provider", {
50
44
  enumerable: true,
51
45
  configurable: true,
52
46
  writable: true,
53
47
  value: "openai"
54
48
  });
55
- Object.defineProperty(this, "isParallelizable", {
56
- enumerable: true,
57
- configurable: true,
58
- writable: true,
59
- value: true
60
- });
61
49
  Object.defineProperty(this, "embeddingDimensions", {
62
50
  enumerable: true,
63
51
  configurable: true,
@@ -85,67 +73,14 @@ class OpenAITextEmbeddingModel extends AbstractModel_js_1.AbstractModel {
85
73
  get modelName() {
86
74
  return this.settings.model;
87
75
  }
88
- get maxValuesPerCall() {
89
- return this.settings.maxValuesPerCall ?? 2048;
90
- }
91
76
  async countTokens(input) {
92
77
  return (0, countTokens_js_1.countTokens)(this.tokenizer, input);
93
78
  }
94
- async callAPI(texts, callOptions) {
95
- const api = this.settings.api ?? new OpenAIApiConfiguration_js_1.OpenAIApiConfiguration();
96
- const abortSignal = callOptions.run?.abortSignal;
97
- return (0, callWithRetryAndThrottle_js_1.callWithRetryAndThrottle)({
98
- retry: api.retry,
99
- throttle: api.throttle,
100
- call: async () => (0, postToApi_js_1.postJsonToApi)({
101
- url: api.assembleUrl("/embeddings"),
102
- headers: api.headers({
103
- functionType: callOptions.functionType,
104
- functionId: callOptions.functionId,
105
- run: callOptions.run,
106
- callId: callOptions.callId,
107
- }),
108
- body: {
109
- model: this.modelName,
110
- input: texts,
111
- user: this.settings.isUserIdForwardingEnabled
112
- ? callOptions.run?.userId
113
- : undefined,
114
- },
115
- failedResponseHandler: OpenAIError_js_1.failedOpenAICallResponseHandler,
116
- successfulResponseHandler: (0, postToApi_js_1.createJsonResponseHandler)((0, ZodSchema_js_1.zodSchema)(openAITextEmbeddingResponseSchema)),
117
- abortSignal,
118
- }),
119
- });
120
- }
121
79
  get settingsForEvent() {
122
80
  return {};
123
81
  }
124
- async doEmbedValues(texts, callOptions) {
125
- if (texts.length > this.maxValuesPerCall) {
126
- throw new Error(`The OpenAI embedding API only supports ${this.maxValuesPerCall} texts per API call.`);
127
- }
128
- const rawResponse = await this.callAPI(texts, callOptions);
129
- return {
130
- rawResponse,
131
- embeddings: rawResponse.data.map((data) => data.embedding),
132
- };
133
- }
134
82
  withSettings(additionalSettings) {
135
83
  return new OpenAITextEmbeddingModel(Object.assign({}, this.settings, additionalSettings));
136
84
  }
137
85
  }
138
86
  exports.OpenAITextEmbeddingModel = OpenAITextEmbeddingModel;
139
- const openAITextEmbeddingResponseSchema = zod_1.z.object({
140
- object: zod_1.z.literal("list"),
141
- data: zod_1.z.array(zod_1.z.object({
142
- object: zod_1.z.literal("embedding"),
143
- embedding: zod_1.z.array(zod_1.z.number()),
144
- index: zod_1.z.number(),
145
- })),
146
- model: zod_1.z.string(),
147
- usage: zod_1.z.object({
148
- prompt_tokens: zod_1.z.number(),
149
- total_tokens: zod_1.z.number(),
150
- }),
151
- });
@@ -1,8 +1,5 @@
1
- import { z } from "zod";
2
- import { FunctionCallOptions } from "../../core/FunctionOptions.js";
3
- import { ApiConfiguration } from "../../core/api/ApiConfiguration.js";
4
- import { AbstractModel } from "../../model-function/AbstractModel.js";
5
- import { EmbeddingModel, EmbeddingModelSettings } from "../../model-function/embed/EmbeddingModel.js";
1
+ import { EmbeddingModel } from "../../model-function/embed/EmbeddingModel.js";
2
+ import { AbstractOpenAITextEmbeddingModel, AbstractOpenAITextEmbeddingModelSettings, OpenAITextEmbeddingResponse } from "./AbstractOpenAITextEmbeddingModel.js";
6
3
  import { TikTokenTokenizer } from "./TikTokenTokenizer.js";
7
4
  export declare const OPENAI_TEXT_EMBEDDING_MODELS: {
8
5
  "text-embedding-ada-002": {
@@ -17,11 +14,8 @@ export declare const calculateOpenAIEmbeddingCostInMillicents: ({ model, respons
17
14
  model: OpenAITextEmbeddingModelType;
18
15
  responses: OpenAITextEmbeddingResponse[];
19
16
  }) => number;
20
- export interface OpenAITextEmbeddingModelSettings extends EmbeddingModelSettings {
21
- api?: ApiConfiguration;
22
- maxValuesPerCall?: number | undefined;
17
+ export interface OpenAITextEmbeddingModelSettings extends AbstractOpenAITextEmbeddingModelSettings {
23
18
  model: OpenAITextEmbeddingModelType;
24
- isUserIdForwardingEnabled?: boolean;
25
19
  }
26
20
  /**
27
21
  * Create a text embedding model that calls the OpenAI embedding API.
@@ -37,86 +31,14 @@ export interface OpenAITextEmbeddingModelSettings extends EmbeddingModelSettings
37
31
  * ]
38
32
  * );
39
33
  */
40
- export declare class OpenAITextEmbeddingModel extends AbstractModel<OpenAITextEmbeddingModelSettings> implements EmbeddingModel<string, OpenAITextEmbeddingModelSettings> {
34
+ export declare class OpenAITextEmbeddingModel extends AbstractOpenAITextEmbeddingModel<OpenAITextEmbeddingModelSettings> implements EmbeddingModel<string, OpenAITextEmbeddingModelSettings> {
41
35
  constructor(settings: OpenAITextEmbeddingModelSettings);
42
36
  readonly provider: "openai";
43
37
  get modelName(): "text-embedding-ada-002";
44
- get maxValuesPerCall(): number;
45
- readonly isParallelizable = true;
46
38
  readonly embeddingDimensions: number;
47
39
  readonly tokenizer: TikTokenTokenizer;
48
40
  readonly contextWindowSize: number;
49
41
  countTokens(input: string): Promise<number>;
50
- callAPI(texts: Array<string>, callOptions: FunctionCallOptions): Promise<OpenAITextEmbeddingResponse>;
51
42
  get settingsForEvent(): Partial<OpenAITextEmbeddingModelSettings>;
52
- doEmbedValues(texts: string[], callOptions: FunctionCallOptions): Promise<{
53
- rawResponse: {
54
- object: "list";
55
- data: {
56
- object: "embedding";
57
- embedding: number[];
58
- index: number;
59
- }[];
60
- model: string;
61
- usage: {
62
- prompt_tokens: number;
63
- total_tokens: number;
64
- };
65
- };
66
- embeddings: number[][];
67
- }>;
68
43
  withSettings(additionalSettings: OpenAITextEmbeddingModelSettings): this;
69
44
  }
70
- declare const openAITextEmbeddingResponseSchema: z.ZodObject<{
71
- object: z.ZodLiteral<"list">;
72
- data: z.ZodArray<z.ZodObject<{
73
- object: z.ZodLiteral<"embedding">;
74
- embedding: z.ZodArray<z.ZodNumber, "many">;
75
- index: z.ZodNumber;
76
- }, "strip", z.ZodTypeAny, {
77
- object: "embedding";
78
- embedding: number[];
79
- index: number;
80
- }, {
81
- object: "embedding";
82
- embedding: number[];
83
- index: number;
84
- }>, "many">;
85
- model: z.ZodString;
86
- usage: z.ZodObject<{
87
- prompt_tokens: z.ZodNumber;
88
- total_tokens: z.ZodNumber;
89
- }, "strip", z.ZodTypeAny, {
90
- prompt_tokens: number;
91
- total_tokens: number;
92
- }, {
93
- prompt_tokens: number;
94
- total_tokens: number;
95
- }>;
96
- }, "strip", z.ZodTypeAny, {
97
- object: "list";
98
- data: {
99
- object: "embedding";
100
- embedding: number[];
101
- index: number;
102
- }[];
103
- model: string;
104
- usage: {
105
- prompt_tokens: number;
106
- total_tokens: number;
107
- };
108
- }, {
109
- object: "list";
110
- data: {
111
- object: "embedding";
112
- embedding: number[];
113
- index: number;
114
- }[];
115
- model: string;
116
- usage: {
117
- prompt_tokens: number;
118
- total_tokens: number;
119
- };
120
- }>;
121
- export type OpenAITextEmbeddingResponse = z.infer<typeof openAITextEmbeddingResponseSchema>;
122
- export {};
@@ -1,11 +1,5 @@
1
- import { z } from "zod";
2
- import { callWithRetryAndThrottle } from "../../core/api/callWithRetryAndThrottle.js";
3
- import { createJsonResponseHandler, postJsonToApi, } from "../../core/api/postToApi.js";
4
- import { zodSchema } from "../../core/schema/ZodSchema.js";
5
- import { AbstractModel } from "../../model-function/AbstractModel.js";
6
1
  import { countTokens } from "../../model-function/tokenize-text/countTokens.js";
7
- import { OpenAIApiConfiguration } from "./OpenAIApiConfiguration.js";
8
- import { failedOpenAICallResponseHandler } from "./OpenAIError.js";
2
+ import { AbstractOpenAITextEmbeddingModel, } from "./AbstractOpenAITextEmbeddingModel.js";
9
3
  import { TikTokenTokenizer } from "./TikTokenTokenizer.js";
10
4
  export const OPENAI_TEXT_EMBEDDING_MODELS = {
11
5
  "text-embedding-ada-002": {
@@ -38,21 +32,15 @@ export const calculateOpenAIEmbeddingCostInMillicents = ({ model, responses, })
38
32
  * ]
39
33
  * );
40
34
  */
41
- export class OpenAITextEmbeddingModel extends AbstractModel {
35
+ export class OpenAITextEmbeddingModel extends AbstractOpenAITextEmbeddingModel {
42
36
  constructor(settings) {
43
- super({ settings });
37
+ super(settings);
44
38
  Object.defineProperty(this, "provider", {
45
39
  enumerable: true,
46
40
  configurable: true,
47
41
  writable: true,
48
42
  value: "openai"
49
43
  });
50
- Object.defineProperty(this, "isParallelizable", {
51
- enumerable: true,
52
- configurable: true,
53
- writable: true,
54
- value: true
55
- });
56
44
  Object.defineProperty(this, "embeddingDimensions", {
57
45
  enumerable: true,
58
46
  configurable: true,
@@ -80,66 +68,13 @@ export class OpenAITextEmbeddingModel extends AbstractModel {
80
68
  get modelName() {
81
69
  return this.settings.model;
82
70
  }
83
- get maxValuesPerCall() {
84
- return this.settings.maxValuesPerCall ?? 2048;
85
- }
86
71
  async countTokens(input) {
87
72
  return countTokens(this.tokenizer, input);
88
73
  }
89
- async callAPI(texts, callOptions) {
90
- const api = this.settings.api ?? new OpenAIApiConfiguration();
91
- const abortSignal = callOptions.run?.abortSignal;
92
- return callWithRetryAndThrottle({
93
- retry: api.retry,
94
- throttle: api.throttle,
95
- call: async () => postJsonToApi({
96
- url: api.assembleUrl("/embeddings"),
97
- headers: api.headers({
98
- functionType: callOptions.functionType,
99
- functionId: callOptions.functionId,
100
- run: callOptions.run,
101
- callId: callOptions.callId,
102
- }),
103
- body: {
104
- model: this.modelName,
105
- input: texts,
106
- user: this.settings.isUserIdForwardingEnabled
107
- ? callOptions.run?.userId
108
- : undefined,
109
- },
110
- failedResponseHandler: failedOpenAICallResponseHandler,
111
- successfulResponseHandler: createJsonResponseHandler(zodSchema(openAITextEmbeddingResponseSchema)),
112
- abortSignal,
113
- }),
114
- });
115
- }
116
74
  get settingsForEvent() {
117
75
  return {};
118
76
  }
119
- async doEmbedValues(texts, callOptions) {
120
- if (texts.length > this.maxValuesPerCall) {
121
- throw new Error(`The OpenAI embedding API only supports ${this.maxValuesPerCall} texts per API call.`);
122
- }
123
- const rawResponse = await this.callAPI(texts, callOptions);
124
- return {
125
- rawResponse,
126
- embeddings: rawResponse.data.map((data) => data.embedding),
127
- };
128
- }
129
77
  withSettings(additionalSettings) {
130
78
  return new OpenAITextEmbeddingModel(Object.assign({}, this.settings, additionalSettings));
131
79
  }
132
80
  }
133
- const openAITextEmbeddingResponseSchema = z.object({
134
- object: z.literal("list"),
135
- data: z.array(z.object({
136
- object: z.literal("embedding"),
137
- embedding: z.array(z.number()),
138
- index: z.number(),
139
- })),
140
- model: z.string(),
141
- usage: z.object({
142
- prompt_tokens: z.number(),
143
- total_tokens: z.number(),
144
- }),
145
- });
@@ -26,14 +26,14 @@ var __importStar = (this && this.__importStar) || function (mod) {
26
26
  return result;
27
27
  };
28
28
  Object.defineProperty(exports, "__esModule", { value: true });
29
- exports.openai = exports.OpenAIChatPrompt = void 0;
29
+ exports.openai = void 0;
30
30
  __exportStar(require("./AbstractOpenAIChatModel.cjs"), exports);
31
31
  __exportStar(require("./AbstractOpenAICompletionModel.cjs"), exports);
32
+ __exportStar(require("./AbstractOpenAITextEmbeddingModel.cjs"), exports);
32
33
  __exportStar(require("./AzureOpenAIApiConfiguration.cjs"), exports);
33
34
  __exportStar(require("./OpenAIApiConfiguration.cjs"), exports);
34
35
  __exportStar(require("./OpenAIChatMessage.cjs"), exports);
35
36
  __exportStar(require("./OpenAIChatModel.cjs"), exports);
36
- exports.OpenAIChatPrompt = __importStar(require("./OpenAIChatPromptTemplate.cjs"));
37
37
  __exportStar(require("./OpenAICompletionModel.cjs"), exports);
38
38
  exports.openai = __importStar(require("./OpenAIFacade.cjs"));
39
39
  __exportStar(require("./OpenAIImageGenerationModel.cjs"), exports);
@@ -1,10 +1,10 @@
1
1
  export * from "./AbstractOpenAIChatModel.js";
2
2
  export * from "./AbstractOpenAICompletionModel.js";
3
+ export * from "./AbstractOpenAITextEmbeddingModel.js";
3
4
  export * from "./AzureOpenAIApiConfiguration.js";
4
5
  export * from "./OpenAIApiConfiguration.js";
5
6
  export * from "./OpenAIChatMessage.js";
6
7
  export * from "./OpenAIChatModel.js";
7
- export * as OpenAIChatPrompt from "./OpenAIChatPromptTemplate.js";
8
8
  export * from "./OpenAICompletionModel.js";
9
9
  export { OpenAIErrorData } from "./OpenAIError.js";
10
10
  export * as openai from "./OpenAIFacade.js";
@@ -1,10 +1,10 @@
1
1
  export * from "./AbstractOpenAIChatModel.js";
2
2
  export * from "./AbstractOpenAICompletionModel.js";
3
+ export * from "./AbstractOpenAITextEmbeddingModel.js";
3
4
  export * from "./AzureOpenAIApiConfiguration.js";
4
5
  export * from "./OpenAIApiConfiguration.js";
5
6
  export * from "./OpenAIChatMessage.js";
6
7
  export * from "./OpenAIChatModel.js";
7
- export * as OpenAIChatPrompt from "./OpenAIChatPromptTemplate.js";
8
8
  export * from "./OpenAICompletionModel.js";
9
9
  export * as openai from "./OpenAIFacade.js";
10
10
  export * from "./OpenAIImageGenerationModel.js";
@@ -68,21 +68,12 @@ class OpenAICompatibleChatModel extends AbstractOpenAIChatModel_js_1.AbstractOpe
68
68
  template: promptTemplate,
69
69
  });
70
70
  }
71
- /**
72
- * Returns this model with a text prompt template.
73
- */
74
71
  withTextPrompt() {
75
72
  return this.withPromptTemplate((0, OpenAIChatPromptTemplate_js_1.text)());
76
73
  }
77
- /**
78
- * Returns this model with an instruction prompt template.
79
- */
80
74
  withInstructionPrompt() {
81
75
  return this.withPromptTemplate((0, OpenAIChatPromptTemplate_js_1.instruction)());
82
76
  }
83
- /**
84
- * Returns this model with a chat prompt template.
85
- */
86
77
  withChatPrompt() {
87
78
  return this.withPromptTemplate((0, OpenAIChatPromptTemplate_js_1.chat)());
88
79
  }