modelfusion 0.29.0 → 0.29.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -12,7 +12,10 @@ export interface TextEmbeddingModel<RESPONSE, SETTINGS extends TextEmbeddingMode
12
12
  * The size of the embedding vector.
13
13
  */
14
14
  readonly embeddingDimensions: number | undefined;
15
- readonly maxTextsPerCall: number;
15
+ /**
16
+ * Limit of how many texts can be sent in a single API call.
17
+ */
18
+ readonly maxTextsPerCall: number | undefined;
16
19
  generateEmbeddingResponse(texts: string[], options?: ModelFunctionOptions<SETTINGS>): PromiseLike<RESPONSE>;
17
20
  extractEmbeddings(response: RESPONSE): Vector[];
18
21
  }
@@ -24,8 +24,13 @@ function embedTexts(model, texts, options) {
24
24
  // split the texts into groups that are small enough to be sent in one call:
25
25
  const maxTextsPerCall = model.maxTextsPerCall;
26
26
  const textGroups = [];
27
- for (let i = 0; i < texts.length; i += maxTextsPerCall) {
28
- textGroups.push(texts.slice(i, i + maxTextsPerCall));
27
+ if (maxTextsPerCall == null) {
28
+ textGroups.push(texts);
29
+ }
30
+ else {
31
+ for (let i = 0; i < texts.length; i += maxTextsPerCall) {
32
+ textGroups.push(texts.slice(i, i + maxTextsPerCall));
33
+ }
29
34
  }
30
35
  return Promise.all(textGroups.map((textGroup) => model.generateEmbeddingResponse(textGroup, options)));
31
36
  },
@@ -21,8 +21,13 @@ export function embedTexts(model, texts, options) {
21
21
  // split the texts into groups that are small enough to be sent in one call:
22
22
  const maxTextsPerCall = model.maxTextsPerCall;
23
23
  const textGroups = [];
24
- for (let i = 0; i < texts.length; i += maxTextsPerCall) {
25
- textGroups.push(texts.slice(i, i + maxTextsPerCall));
24
+ if (maxTextsPerCall == null) {
25
+ textGroups.push(texts);
26
+ }
27
+ else {
28
+ for (let i = 0; i < texts.length; i += maxTextsPerCall) {
29
+ textGroups.push(texts.slice(i, i + maxTextsPerCall));
30
+ }
26
31
  }
27
32
  return Promise.all(textGroups.map((textGroup) => model.generateEmbeddingResponse(textGroup, options)));
28
33
  },
@@ -58,7 +58,7 @@ class OpenAITextEmbeddingModel extends AbstractModel_js_1.AbstractModel {
58
58
  enumerable: true,
59
59
  configurable: true,
60
60
  writable: true,
61
- value: 1
61
+ value: 2048
62
62
  });
63
63
  Object.defineProperty(this, "embeddingDimensions", {
64
64
  enumerable: true,
@@ -90,7 +90,7 @@ class OpenAITextEmbeddingModel extends AbstractModel_js_1.AbstractModel {
90
90
  async countTokens(input) {
91
91
  return (0, countTokens_js_1.countTokens)(this.tokenizer, input);
92
92
  }
93
- async callAPI(text, options) {
93
+ async callAPI(texts, options) {
94
94
  const run = options?.run;
95
95
  const settings = options?.settings;
96
96
  const combinedSettings = {
@@ -103,7 +103,7 @@ class OpenAITextEmbeddingModel extends AbstractModel_js_1.AbstractModel {
103
103
  ...combinedSettings,
104
104
  // other settings:
105
105
  abortSignal: run?.abortSignal,
106
- input: text,
106
+ input: texts,
107
107
  };
108
108
  return (0, callWithRetryAndThrottle_js_1.callWithRetryAndThrottle)({
109
109
  retry: callSettings.api?.retry,
@@ -118,7 +118,7 @@ class OpenAITextEmbeddingModel extends AbstractModel_js_1.AbstractModel {
118
118
  if (texts.length > this.maxTextsPerCall) {
119
119
  throw new Error(`The OpenAI embedding API only supports ${this.maxTextsPerCall} texts per API call.`);
120
120
  }
121
- return this.callAPI(texts[0], options);
121
+ return this.callAPI(texts, options);
122
122
  }
123
123
  extractEmbeddings(response) {
124
124
  return [response.data[0].embedding];
@@ -130,13 +130,11 @@ class OpenAITextEmbeddingModel extends AbstractModel_js_1.AbstractModel {
130
130
  exports.OpenAITextEmbeddingModel = OpenAITextEmbeddingModel;
131
131
  const openAITextEmbeddingResponseSchema = zod_1.default.object({
132
132
  object: zod_1.default.literal("list"),
133
- data: zod_1.default
134
- .array(zod_1.default.object({
133
+ data: zod_1.default.array(zod_1.default.object({
135
134
  object: zod_1.default.literal("embedding"),
136
135
  embedding: zod_1.default.array(zod_1.default.number()),
137
136
  index: zod_1.default.number(),
138
- }))
139
- .length(1),
137
+ })),
140
138
  model: zod_1.default.string(),
141
139
  usage: zod_1.default.object({
142
140
  prompt_tokens: zod_1.default.number(),
@@ -40,12 +40,12 @@ export declare class OpenAITextEmbeddingModel extends AbstractModel<OpenAITextEm
40
40
  constructor(settings: OpenAITextEmbeddingModelSettings);
41
41
  readonly provider: "openai";
42
42
  get modelName(): "text-embedding-ada-002";
43
- readonly maxTextsPerCall = 1;
43
+ readonly maxTextsPerCall = 2048;
44
44
  readonly embeddingDimensions: number;
45
45
  readonly tokenizer: TikTokenTokenizer;
46
46
  readonly contextWindowSize: number;
47
47
  countTokens(input: string): Promise<number>;
48
- callAPI(text: string, options?: ModelFunctionOptions<OpenAITextEmbeddingModelSettings>): Promise<OpenAITextEmbeddingResponse>;
48
+ callAPI(texts: Array<string>, options?: ModelFunctionOptions<OpenAITextEmbeddingModelSettings>): Promise<OpenAITextEmbeddingResponse>;
49
49
  get settingsForEvent(): Partial<OpenAITextEmbeddingModelSettings>;
50
50
  generateEmbeddingResponse(texts: string[], options?: ModelFunctionOptions<OpenAITextEmbeddingModelSettings>): Promise<{
51
51
  object: "list";
@@ -50,7 +50,7 @@ export class OpenAITextEmbeddingModel extends AbstractModel {
50
50
  enumerable: true,
51
51
  configurable: true,
52
52
  writable: true,
53
- value: 1
53
+ value: 2048
54
54
  });
55
55
  Object.defineProperty(this, "embeddingDimensions", {
56
56
  enumerable: true,
@@ -82,7 +82,7 @@ export class OpenAITextEmbeddingModel extends AbstractModel {
82
82
  async countTokens(input) {
83
83
  return countTokens(this.tokenizer, input);
84
84
  }
85
- async callAPI(text, options) {
85
+ async callAPI(texts, options) {
86
86
  const run = options?.run;
87
87
  const settings = options?.settings;
88
88
  const combinedSettings = {
@@ -95,7 +95,7 @@ export class OpenAITextEmbeddingModel extends AbstractModel {
95
95
  ...combinedSettings,
96
96
  // other settings:
97
97
  abortSignal: run?.abortSignal,
98
- input: text,
98
+ input: texts,
99
99
  };
100
100
  return callWithRetryAndThrottle({
101
101
  retry: callSettings.api?.retry,
@@ -110,7 +110,7 @@ export class OpenAITextEmbeddingModel extends AbstractModel {
110
110
  if (texts.length > this.maxTextsPerCall) {
111
111
  throw new Error(`The OpenAI embedding API only supports ${this.maxTextsPerCall} texts per API call.`);
112
112
  }
113
- return this.callAPI(texts[0], options);
113
+ return this.callAPI(texts, options);
114
114
  }
115
115
  extractEmbeddings(response) {
116
116
  return [response.data[0].embedding];
@@ -121,13 +121,11 @@ export class OpenAITextEmbeddingModel extends AbstractModel {
121
121
  }
122
122
  const openAITextEmbeddingResponseSchema = z.object({
123
123
  object: z.literal("list"),
124
- data: z
125
- .array(z.object({
124
+ data: z.array(z.object({
126
125
  object: z.literal("embedding"),
127
126
  embedding: z.array(z.number()),
128
127
  index: z.number(),
129
- }))
130
- .length(1),
128
+ })),
131
129
  model: z.string(),
132
130
  usage: z.object({
133
131
  prompt_tokens: z.number(),
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "modelfusion",
3
3
  "description": "Build AI applications, chatbots, and agents with JavaScript and TypeScript.",
4
- "version": "0.29.0",
4
+ "version": "0.29.1",
5
5
  "author": "Lars Grammel",
6
6
  "license": "MIT",
7
7
  "keywords": [