modelfusion 0.55.1 → 0.57.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -290,7 +290,7 @@ const embeddings = await embedMany(
290
290
  );
291
291
  ```
292
292
 
293
- Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai), [Cohere](https://modelfusion.dev/integration/model-provider/cohere), [Llama.cpp](https://modelfusion.dev/integration/model-provider/llamacpp), [Hugging Face](https://modelfusion.dev/integration/model-provider/huggingface)
293
+ Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai), [Cohere](https://modelfusion.dev/integration/model-provider/cohere), [Llama.cpp](https://modelfusion.dev/integration/model-provider/llamacpp), [Ollama](https://modelfusion.dev/integration/model-provider/ollama), [Hugging Face](https://modelfusion.dev/integration/model-provider/huggingface)
294
294
 
295
295
  ### [Tokenize Text](https://modelfusion.dev/guide/function/tokenize-text)
296
296
 
@@ -10,17 +10,33 @@ const CohereError_js_1 = require("./CohereError.cjs");
10
10
  const CohereTokenizer_js_1 = require("./CohereTokenizer.cjs");
11
11
  exports.COHERE_TEXT_EMBEDDING_MODELS = {
12
12
  "embed-english-light-v2.0": {
13
- contextWindowSize: 4096,
13
+ contextWindowSize: 512,
14
14
  embeddingDimensions: 1024,
15
15
  },
16
16
  "embed-english-v2.0": {
17
- contextWindowSize: 4096,
17
+ contextWindowSize: 512,
18
18
  embeddingDimensions: 4096,
19
19
  },
20
20
  "embed-multilingual-v2.0": {
21
- contextWindowSize: 4096,
21
+ contextWindowSize: 512,
22
22
  embeddingDimensions: 768,
23
23
  },
24
+ "embed-english-v3.0": {
25
+ contextWindowSize: 512,
26
+ embeddingDimensions: 1024,
27
+ },
28
+ "embed-english-light-v3.0": {
29
+ contextWindowSize: 512,
30
+ embeddingDimensions: 384,
31
+ },
32
+ "embed-multilingual-v3.0": {
33
+ contextWindowSize: 512,
34
+ embeddingDimensions: 1024,
35
+ },
36
+ "embed-multilingual-light-v3.0": {
37
+ contextWindowSize: 512,
38
+ embeddingDimensions: 384,
39
+ },
24
40
  };
25
41
  /**
26
42
  * Create a text embedding model that calls the Cohere Co.Embed API.
@@ -137,13 +153,14 @@ const cohereTextEmbeddingResponseSchema = zod_1.z.object({
137
153
  }),
138
154
  }),
139
155
  });
140
- async function callCohereEmbeddingAPI({ api = new CohereApiConfiguration_js_1.CohereApiConfiguration(), abortSignal, model, texts, truncate, }) {
156
+ async function callCohereEmbeddingAPI({ api = new CohereApiConfiguration_js_1.CohereApiConfiguration(), abortSignal, model, texts, inputType, truncate, }) {
141
157
  return (0, postToApi_js_1.postJsonToApi)({
142
158
  url: api.assembleUrl(`/embed`),
143
159
  headers: api.headers,
144
160
  body: {
145
161
  model,
146
162
  texts,
163
+ input_type: inputType,
147
164
  truncate,
148
165
  },
149
166
  failedResponseHandler: CohereError_js_1.failedCohereCallResponseHandler,
@@ -17,11 +17,28 @@ export declare const COHERE_TEXT_EMBEDDING_MODELS: {
17
17
  contextWindowSize: number;
18
18
  embeddingDimensions: number;
19
19
  };
20
+ "embed-english-v3.0": {
21
+ contextWindowSize: number;
22
+ embeddingDimensions: number;
23
+ };
24
+ "embed-english-light-v3.0": {
25
+ contextWindowSize: number;
26
+ embeddingDimensions: number;
27
+ };
28
+ "embed-multilingual-v3.0": {
29
+ contextWindowSize: number;
30
+ embeddingDimensions: number;
31
+ };
32
+ "embed-multilingual-light-v3.0": {
33
+ contextWindowSize: number;
34
+ embeddingDimensions: number;
35
+ };
20
36
  };
21
37
  export type CohereTextEmbeddingModelType = keyof typeof COHERE_TEXT_EMBEDDING_MODELS;
22
38
  export interface CohereTextEmbeddingModelSettings extends EmbeddingModelSettings {
23
39
  api?: ApiConfiguration;
24
40
  model: CohereTextEmbeddingModelType;
41
+ inputType?: "search_document" | "search_query" | "classification" | "clustering";
25
42
  truncate?: "NONE" | "START" | "END";
26
43
  }
27
44
  /**
@@ -41,7 +58,7 @@ export interface CohereTextEmbeddingModelSettings extends EmbeddingModelSettings
41
58
  export declare class CohereTextEmbeddingModel extends AbstractModel<CohereTextEmbeddingModelSettings> implements EmbeddingModel<string, CohereTextEmbeddingModelSettings>, FullTokenizer {
42
59
  constructor(settings: CohereTextEmbeddingModelSettings);
43
60
  readonly provider: "cohere";
44
- get modelName(): "embed-english-light-v2.0" | "embed-english-v2.0" | "embed-multilingual-v2.0";
61
+ get modelName(): "embed-english-light-v2.0" | "embed-english-v2.0" | "embed-multilingual-v2.0" | "embed-english-v3.0" | "embed-english-light-v3.0" | "embed-multilingual-v3.0" | "embed-multilingual-light-v3.0";
45
62
  readonly maxValuesPerCall = 96;
46
63
  readonly isParallizable = true;
47
64
  readonly embeddingDimensions: number;
@@ -7,17 +7,33 @@ import { failedCohereCallResponseHandler } from "./CohereError.js";
7
7
  import { CohereTokenizer } from "./CohereTokenizer.js";
8
8
  export const COHERE_TEXT_EMBEDDING_MODELS = {
9
9
  "embed-english-light-v2.0": {
10
- contextWindowSize: 4096,
10
+ contextWindowSize: 512,
11
11
  embeddingDimensions: 1024,
12
12
  },
13
13
  "embed-english-v2.0": {
14
- contextWindowSize: 4096,
14
+ contextWindowSize: 512,
15
15
  embeddingDimensions: 4096,
16
16
  },
17
17
  "embed-multilingual-v2.0": {
18
- contextWindowSize: 4096,
18
+ contextWindowSize: 512,
19
19
  embeddingDimensions: 768,
20
20
  },
21
+ "embed-english-v3.0": {
22
+ contextWindowSize: 512,
23
+ embeddingDimensions: 1024,
24
+ },
25
+ "embed-english-light-v3.0": {
26
+ contextWindowSize: 512,
27
+ embeddingDimensions: 384,
28
+ },
29
+ "embed-multilingual-v3.0": {
30
+ contextWindowSize: 512,
31
+ embeddingDimensions: 1024,
32
+ },
33
+ "embed-multilingual-light-v3.0": {
34
+ contextWindowSize: 512,
35
+ embeddingDimensions: 384,
36
+ },
21
37
  };
22
38
  /**
23
39
  * Create a text embedding model that calls the Cohere Co.Embed API.
@@ -133,13 +149,14 @@ const cohereTextEmbeddingResponseSchema = z.object({
133
149
  }),
134
150
  }),
135
151
  });
136
- async function callCohereEmbeddingAPI({ api = new CohereApiConfiguration(), abortSignal, model, texts, truncate, }) {
152
+ async function callCohereEmbeddingAPI({ api = new CohereApiConfiguration(), abortSignal, model, texts, inputType, truncate, }) {
137
153
  return postJsonToApi({
138
154
  url: api.assembleUrl(`/embed`),
139
155
  headers: api.headers,
140
156
  body: {
141
157
  model,
142
158
  texts,
159
+ input_type: inputType,
143
160
  truncate,
144
161
  },
145
162
  failedResponseHandler: failedCohereCallResponseHandler,
@@ -29,12 +29,6 @@ class LlamaCppTextEmbeddingModel extends AbstractModel_js_1.AbstractModel {
29
29
  writable: true,
30
30
  value: undefined
31
31
  });
32
- Object.defineProperty(this, "embeddingDimensions", {
33
- enumerable: true,
34
- configurable: true,
35
- writable: true,
36
- value: void 0
37
- });
38
32
  Object.defineProperty(this, "tokenizer", {
39
33
  enumerable: true,
40
34
  configurable: true,
@@ -42,7 +36,6 @@ class LlamaCppTextEmbeddingModel extends AbstractModel_js_1.AbstractModel {
42
36
  value: void 0
43
37
  });
44
38
  this.tokenizer = new LlamaCppTokenizer_js_1.LlamaCppTokenizer(this.settings.api);
45
- this.embeddingDimensions = this.settings.embeddingDimensions;
46
39
  }
47
40
  get modelName() {
48
41
  return null;
@@ -50,6 +43,9 @@ class LlamaCppTextEmbeddingModel extends AbstractModel_js_1.AbstractModel {
50
43
  get isParallizable() {
51
44
  return this.settings.isParallizable ?? false;
52
45
  }
46
+ get embeddingDimensions() {
47
+ return this.settings.embeddingDimensions;
48
+ }
53
49
  async tokenize(text) {
54
50
  return this.tokenizer.tokenize(text);
55
51
  }
@@ -15,7 +15,7 @@ export declare class LlamaCppTextEmbeddingModel extends AbstractModel<LlamaCppTe
15
15
  readonly maxValuesPerCall = 1;
16
16
  get isParallizable(): boolean;
17
17
  readonly contextWindowSize: undefined;
18
- readonly embeddingDimensions: number | undefined;
18
+ get embeddingDimensions(): number | undefined;
19
19
  private readonly tokenizer;
20
20
  tokenize(text: string): Promise<number[]>;
21
21
  callAPI(texts: Array<string>, options?: FunctionOptions): Promise<LlamaCppTextEmbeddingResponse>;
@@ -26,12 +26,6 @@ export class LlamaCppTextEmbeddingModel extends AbstractModel {
26
26
  writable: true,
27
27
  value: undefined
28
28
  });
29
- Object.defineProperty(this, "embeddingDimensions", {
30
- enumerable: true,
31
- configurable: true,
32
- writable: true,
33
- value: void 0
34
- });
35
29
  Object.defineProperty(this, "tokenizer", {
36
30
  enumerable: true,
37
31
  configurable: true,
@@ -39,7 +33,6 @@ export class LlamaCppTextEmbeddingModel extends AbstractModel {
39
33
  value: void 0
40
34
  });
41
35
  this.tokenizer = new LlamaCppTokenizer(this.settings.api);
42
- this.embeddingDimensions = this.settings.embeddingDimensions;
43
36
  }
44
37
  get modelName() {
45
38
  return null;
@@ -47,6 +40,9 @@ export class LlamaCppTextEmbeddingModel extends AbstractModel {
47
40
  get isParallizable() {
48
41
  return this.settings.isParallizable ?? false;
49
42
  }
43
+ get embeddingDimensions() {
44
+ return this.settings.embeddingDimensions;
45
+ }
50
46
  async tokenize(text) {
51
47
  return this.tokenizer.tokenize(text);
52
48
  }
@@ -1,14 +1,9 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.failedOllamaCallResponseHandler = exports.OllamaError = exports.ollamaErrorDataSchema = void 0;
4
- const zod_1 = require("zod");
3
+ exports.failedOllamaCallResponseHandler = exports.OllamaError = void 0;
5
4
  const ApiCallError_js_1 = require("../../core/api/ApiCallError.cjs");
6
- const parseJSON_js_1 = require("../../util/parseJSON.cjs");
7
- exports.ollamaErrorDataSchema = zod_1.z.object({
8
- error: zod_1.z.string(),
9
- });
10
5
  class OllamaError extends ApiCallError_js_1.ApiCallError {
11
- constructor({ data, statusCode, url, requestBodyValues, message = data.error, }) {
6
+ constructor({ statusCode, url, requestBodyValues, message, }) {
12
7
  super({ message, statusCode, requestBodyValues, url });
13
8
  Object.defineProperty(this, "data", {
14
9
  enumerable: true,
@@ -16,7 +11,6 @@ class OllamaError extends ApiCallError_js_1.ApiCallError {
16
11
  writable: true,
17
12
  value: void 0
18
13
  });
19
- this.data = data;
20
14
  }
21
15
  }
22
16
  exports.OllamaError = OllamaError;
@@ -24,6 +18,6 @@ const failedOllamaCallResponseHandler = async ({ response, url, requestBodyValue
24
18
  url,
25
19
  requestBodyValues,
26
20
  statusCode: response.status,
27
- data: (0, parseJSON_js_1.parseJsonWithZod)(await response.text(), exports.ollamaErrorDataSchema),
21
+ message: await response.text(),
28
22
  });
29
23
  exports.failedOllamaCallResponseHandler = failedOllamaCallResponseHandler;
@@ -1,22 +1,12 @@
1
- import { z } from "zod";
2
1
  import { ApiCallError } from "../../core/api/ApiCallError.js";
3
2
  import { ResponseHandler } from "../../core/api/postToApi.js";
4
- export declare const ollamaErrorDataSchema: z.ZodObject<{
5
- error: z.ZodString;
6
- }, "strip", z.ZodTypeAny, {
7
- error: string;
8
- }, {
9
- error: string;
10
- }>;
11
- export type OllamaErrorData = z.infer<typeof ollamaErrorDataSchema>;
12
3
  export declare class OllamaError extends ApiCallError {
13
- readonly data: OllamaErrorData;
14
- constructor({ data, statusCode, url, requestBodyValues, message, }: {
15
- message?: string;
4
+ readonly data: string;
5
+ constructor({ statusCode, url, requestBodyValues, message, }: {
6
+ message: string;
16
7
  statusCode: number;
17
8
  url: string;
18
9
  requestBodyValues: unknown;
19
- data: OllamaErrorData;
20
10
  });
21
11
  }
22
12
  export declare const failedOllamaCallResponseHandler: ResponseHandler<ApiCallError>;
@@ -1,11 +1,6 @@
1
- import { z } from "zod";
2
1
  import { ApiCallError } from "../../core/api/ApiCallError.js";
3
- import { parseJsonWithZod } from "../../util/parseJSON.js";
4
- export const ollamaErrorDataSchema = z.object({
5
- error: z.string(),
6
- });
7
2
  export class OllamaError extends ApiCallError {
8
- constructor({ data, statusCode, url, requestBodyValues, message = data.error, }) {
3
+ constructor({ statusCode, url, requestBodyValues, message, }) {
9
4
  super({ message, statusCode, requestBodyValues, url });
10
5
  Object.defineProperty(this, "data", {
11
6
  enumerable: true,
@@ -13,12 +8,11 @@ export class OllamaError extends ApiCallError {
13
8
  writable: true,
14
9
  value: void 0
15
10
  });
16
- this.data = data;
17
11
  }
18
12
  }
19
13
  export const failedOllamaCallResponseHandler = async ({ response, url, requestBodyValues }) => new OllamaError({
20
14
  url,
21
15
  requestBodyValues,
22
16
  statusCode: response.status,
23
- data: parseJsonWithZod(await response.text(), ollamaErrorDataSchema),
17
+ message: await response.text(),
24
18
  });
@@ -0,0 +1,84 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.OllamaTextEmbeddingModel = void 0;
4
+ const zod_1 = require("zod");
5
+ const callWithRetryAndThrottle_js_1 = require("../../core/api/callWithRetryAndThrottle.cjs");
6
+ const postToApi_js_1 = require("../../core/api/postToApi.cjs");
7
+ const AbstractModel_js_1 = require("../../model-function/AbstractModel.cjs");
8
+ const OllamaApiConfiguration_js_1 = require("./OllamaApiConfiguration.cjs");
9
+ const OllamaError_js_1 = require("./OllamaError.cjs");
10
+ class OllamaTextEmbeddingModel extends AbstractModel_js_1.AbstractModel {
11
+ constructor(settings) {
12
+ super({ settings });
13
+ Object.defineProperty(this, "provider", {
14
+ enumerable: true,
15
+ configurable: true,
16
+ writable: true,
17
+ value: "ollama"
18
+ });
19
+ Object.defineProperty(this, "maxValuesPerCall", {
20
+ enumerable: true,
21
+ configurable: true,
22
+ writable: true,
23
+ value: 1
24
+ });
25
+ Object.defineProperty(this, "contextWindowSize", {
26
+ enumerable: true,
27
+ configurable: true,
28
+ writable: true,
29
+ value: undefined
30
+ });
31
+ }
32
+ get modelName() {
33
+ return null;
34
+ }
35
+ get isParallizable() {
36
+ return this.settings.isParallizable ?? false;
37
+ }
38
+ get embeddingDimensions() {
39
+ return this.settings.embeddingDimensions;
40
+ }
41
+ async callAPI(texts, options) {
42
+ if (texts.length > this.maxValuesPerCall) {
43
+ throw new Error(`The Llama.cpp embedding API only supports ${this.maxValuesPerCall} texts per API call.`);
44
+ }
45
+ return (0, callWithRetryAndThrottle_js_1.callWithRetryAndThrottle)({
46
+ retry: this.settings.api?.retry,
47
+ throttle: this.settings.api?.throttle,
48
+ call: async () => callOllamaEmbeddingAPI({
49
+ ...this.settings,
50
+ abortSignal: options?.run?.abortSignal,
51
+ prompt: texts[0],
52
+ }),
53
+ });
54
+ }
55
+ get settingsForEvent() {
56
+ return {
57
+ embeddingDimensions: this.settings.embeddingDimensions,
58
+ };
59
+ }
60
+ async doEmbedValues(texts, options) {
61
+ const response = await this.callAPI(texts, options);
62
+ return {
63
+ response,
64
+ embeddings: [response.embedding],
65
+ };
66
+ }
67
+ withSettings(additionalSettings) {
68
+ return new OllamaTextEmbeddingModel(Object.assign({}, this.settings, additionalSettings));
69
+ }
70
+ }
71
+ exports.OllamaTextEmbeddingModel = OllamaTextEmbeddingModel;
72
+ const ollamaTextEmbeddingResponseSchema = zod_1.z.object({
73
+ embedding: zod_1.z.array(zod_1.z.number()),
74
+ });
75
+ async function callOllamaEmbeddingAPI({ api = new OllamaApiConfiguration_js_1.OllamaApiConfiguration(), abortSignal, model, prompt, }) {
76
+ return (0, postToApi_js_1.postJsonToApi)({
77
+ url: api.assembleUrl(`/api/embeddings`),
78
+ headers: api.headers,
79
+ body: { model, prompt },
80
+ failedResponseHandler: OllamaError_js_1.failedOllamaCallResponseHandler,
81
+ successfulResponseHandler: (0, postToApi_js_1.createJsonResponseHandler)(ollamaTextEmbeddingResponseSchema),
82
+ abortSignal,
83
+ });
84
+ }
@@ -0,0 +1,38 @@
1
+ import { z } from "zod";
2
+ import { FunctionOptions } from "../../core/FunctionOptions.js";
3
+ import { ApiConfiguration } from "../../core/api/ApiConfiguration.js";
4
+ import { AbstractModel } from "../../model-function/AbstractModel.js";
5
+ import { EmbeddingModel, EmbeddingModelSettings } from "../../model-function/embed/EmbeddingModel.js";
6
+ export interface OllamaTextEmbeddingModelSettings extends EmbeddingModelSettings {
7
+ api?: ApiConfiguration;
8
+ model: string;
9
+ embeddingDimensions?: number;
10
+ isParallizable?: boolean;
11
+ }
12
+ export declare class OllamaTextEmbeddingModel extends AbstractModel<OllamaTextEmbeddingModelSettings> implements EmbeddingModel<string, OllamaTextEmbeddingModelSettings> {
13
+ constructor(settings: OllamaTextEmbeddingModelSettings);
14
+ readonly provider: "ollama";
15
+ get modelName(): null;
16
+ readonly maxValuesPerCall = 1;
17
+ get isParallizable(): boolean;
18
+ readonly contextWindowSize: undefined;
19
+ get embeddingDimensions(): number | undefined;
20
+ callAPI(texts: Array<string>, options?: FunctionOptions): Promise<OllamaTextEmbeddingResponse>;
21
+ get settingsForEvent(): Partial<OllamaTextEmbeddingModelSettings>;
22
+ doEmbedValues(texts: string[], options?: FunctionOptions): Promise<{
23
+ response: {
24
+ embedding: number[];
25
+ };
26
+ embeddings: number[][];
27
+ }>;
28
+ withSettings(additionalSettings: Partial<OllamaTextEmbeddingModelSettings>): this;
29
+ }
30
+ declare const ollamaTextEmbeddingResponseSchema: z.ZodObject<{
31
+ embedding: z.ZodArray<z.ZodNumber, "many">;
32
+ }, "strip", z.ZodTypeAny, {
33
+ embedding: number[];
34
+ }, {
35
+ embedding: number[];
36
+ }>;
37
+ export type OllamaTextEmbeddingResponse = z.infer<typeof ollamaTextEmbeddingResponseSchema>;
38
+ export {};
@@ -0,0 +1,80 @@
1
+ import { z } from "zod";
2
+ import { callWithRetryAndThrottle } from "../../core/api/callWithRetryAndThrottle.js";
3
+ import { createJsonResponseHandler, postJsonToApi, } from "../../core/api/postToApi.js";
4
+ import { AbstractModel } from "../../model-function/AbstractModel.js";
5
+ import { OllamaApiConfiguration } from "./OllamaApiConfiguration.js";
6
+ import { failedOllamaCallResponseHandler } from "./OllamaError.js";
7
+ export class OllamaTextEmbeddingModel extends AbstractModel {
8
+ constructor(settings) {
9
+ super({ settings });
10
+ Object.defineProperty(this, "provider", {
11
+ enumerable: true,
12
+ configurable: true,
13
+ writable: true,
14
+ value: "ollama"
15
+ });
16
+ Object.defineProperty(this, "maxValuesPerCall", {
17
+ enumerable: true,
18
+ configurable: true,
19
+ writable: true,
20
+ value: 1
21
+ });
22
+ Object.defineProperty(this, "contextWindowSize", {
23
+ enumerable: true,
24
+ configurable: true,
25
+ writable: true,
26
+ value: undefined
27
+ });
28
+ }
29
+ get modelName() {
30
+ return null;
31
+ }
32
+ get isParallizable() {
33
+ return this.settings.isParallizable ?? false;
34
+ }
35
+ get embeddingDimensions() {
36
+ return this.settings.embeddingDimensions;
37
+ }
38
+ async callAPI(texts, options) {
39
+ if (texts.length > this.maxValuesPerCall) {
40
+ throw new Error(`The Llama.cpp embedding API only supports ${this.maxValuesPerCall} texts per API call.`);
41
+ }
42
+ return callWithRetryAndThrottle({
43
+ retry: this.settings.api?.retry,
44
+ throttle: this.settings.api?.throttle,
45
+ call: async () => callOllamaEmbeddingAPI({
46
+ ...this.settings,
47
+ abortSignal: options?.run?.abortSignal,
48
+ prompt: texts[0],
49
+ }),
50
+ });
51
+ }
52
+ get settingsForEvent() {
53
+ return {
54
+ embeddingDimensions: this.settings.embeddingDimensions,
55
+ };
56
+ }
57
+ async doEmbedValues(texts, options) {
58
+ const response = await this.callAPI(texts, options);
59
+ return {
60
+ response,
61
+ embeddings: [response.embedding],
62
+ };
63
+ }
64
+ withSettings(additionalSettings) {
65
+ return new OllamaTextEmbeddingModel(Object.assign({}, this.settings, additionalSettings));
66
+ }
67
+ }
68
+ const ollamaTextEmbeddingResponseSchema = z.object({
69
+ embedding: z.array(z.number()),
70
+ });
71
+ async function callOllamaEmbeddingAPI({ api = new OllamaApiConfiguration(), abortSignal, model, prompt, }) {
72
+ return postJsonToApi({
73
+ url: api.assembleUrl(`/api/embeddings`),
74
+ headers: api.headers,
75
+ body: { model, prompt },
76
+ failedResponseHandler: failedOllamaCallResponseHandler,
77
+ successfulResponseHandler: createJsonResponseHandler(ollamaTextEmbeddingResponseSchema),
78
+ abortSignal,
79
+ });
80
+ }
@@ -18,4 +18,5 @@ exports.OllamaError = void 0;
18
18
  __exportStar(require("./OllamaApiConfiguration.cjs"), exports);
19
19
  var OllamaError_js_1 = require("./OllamaError.cjs");
20
20
  Object.defineProperty(exports, "OllamaError", { enumerable: true, get: function () { return OllamaError_js_1.OllamaError; } });
21
+ __exportStar(require("./OllamaTextEmbeddingModel.cjs"), exports);
21
22
  __exportStar(require("./OllamaTextGenerationModel.cjs"), exports);
@@ -1,3 +1,4 @@
1
1
  export * from "./OllamaApiConfiguration.js";
2
- export { OllamaError, OllamaErrorData } from "./OllamaError.js";
2
+ export { OllamaError } from "./OllamaError.js";
3
+ export * from "./OllamaTextEmbeddingModel.js";
3
4
  export * from "./OllamaTextGenerationModel.js";
@@ -1,3 +1,4 @@
1
1
  export * from "./OllamaApiConfiguration.js";
2
2
  export { OllamaError } from "./OllamaError.js";
3
+ export * from "./OllamaTextEmbeddingModel.js";
3
4
  export * from "./OllamaTextGenerationModel.js";
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "modelfusion",
3
3
  "description": "Build multimodal applications, chatbots, and agents with JavaScript and TypeScript.",
4
- "version": "0.55.1",
4
+ "version": "0.57.0",
5
5
  "author": "Lars Grammel",
6
6
  "license": "MIT",
7
7
  "keywords": [