modelfusion 0.131.1 → 0.132.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,12 @@
1
1
  # Changelog
2
2
 
3
+ ## v0.132.0 - 2024-02-15
4
+
5
+ ### Added
6
+
7
+ - Support for OpenAI `text-embedding-3-small` and `text-embedding-3-large` embedding models.
8
+ - Support for OpenAI `gpt-4-turbo-preview`, `gpt-4-0125-preview`, and `gpt-3.5-turbo-0125` chat models.
9
+
3
10
  ## v0.131.1 - 2024-01-25
4
11
 
5
12
  ### Fixed
package/core/getRun.cjs CHANGED
@@ -24,16 +24,10 @@ var __importStar = (this && this.__importStar) || function (mod) {
24
24
  };
25
25
  Object.defineProperty(exports, "__esModule", { value: true });
26
26
  exports.withRun = exports.getRun = void 0;
27
+ const detectRuntime_js_1 = require("../util/detectRuntime.cjs");
27
28
  let runStorage;
28
29
  async function ensureLoaded() {
29
- // Note: using process[versions] instead of process.versions to avoid Next.js edge runtime warnings.
30
- const versions = "versions";
31
- const isNode = typeof process !== "undefined" &&
32
- process[versions] != null &&
33
- process[versions].node != null;
34
- if (!isNode)
35
- return Promise.resolve();
36
- if (!runStorage) {
30
+ if ((0, detectRuntime_js_1.detectRuntime)() === "node" && !runStorage) {
37
31
  // Note: using "async_hooks" instead of "node:async_hooks" to avoid webpack fallback problems.
38
32
  const { AsyncLocalStorage } = await Promise.resolve().then(() => __importStar(require("async_hooks")));
39
33
  runStorage = new AsyncLocalStorage();
package/core/getRun.d.ts CHANGED
@@ -1,4 +1,4 @@
1
- import { Run } from "./Run";
1
+ import { Run } from "./Run.js";
2
2
  /**
3
3
  * Returns the run stored in an AsyncLocalStorage if running in Node.js. It can be set with `withRun()`.
4
4
  */
package/core/getRun.js CHANGED
@@ -1,13 +1,7 @@
1
+ import { detectRuntime } from "../util/detectRuntime.js";
1
2
  let runStorage;
2
3
  async function ensureLoaded() {
3
- // Note: using process[versions] instead of process.versions to avoid Next.js edge runtime warnings.
4
- const versions = "versions";
5
- const isNode = typeof process !== "undefined" &&
6
- process[versions] != null &&
7
- process[versions].node != null;
8
- if (!isNode)
9
- return Promise.resolve();
10
- if (!runStorage) {
4
+ if (detectRuntime() === "node" && !runStorage) {
11
5
  // Note: using "async_hooks" instead of "node:async_hooks" to avoid webpack fallback problems.
12
6
  const { AsyncLocalStorage } = await import("async_hooks");
13
7
  runStorage = new AsyncLocalStorage();
@@ -33,11 +33,21 @@ exports.OPENAI_CHAT_MODELS = {
33
33
  fineTunedPromptTokenCostInMillicents: null,
34
34
  fineTunedCompletionTokenCostInMillicents: null,
35
35
  },
36
+ "gpt-4-turbo-preview": {
37
+ contextWindowSize: 128000,
38
+ promptTokenCostInMillicents: 1,
39
+ completionTokenCostInMillicents: 3,
40
+ },
36
41
  "gpt-4-1106-preview": {
37
42
  contextWindowSize: 128000,
38
43
  promptTokenCostInMillicents: 1,
39
44
  completionTokenCostInMillicents: 3,
40
45
  },
46
+ "gpt-4-0125-preview": {
47
+ contextWindowSize: 128000,
48
+ promptTokenCostInMillicents: 1,
49
+ completionTokenCostInMillicents: 3,
50
+ },
41
51
  "gpt-4-vision-preview": {
42
52
  contextWindowSize: 128000,
43
53
  promptTokenCostInMillicents: 1,
@@ -65,6 +75,11 @@ exports.OPENAI_CHAT_MODELS = {
65
75
  fineTunedPromptTokenCostInMillicents: 0.3,
66
76
  fineTunedCompletionTokenCostInMillicents: 0.6,
67
77
  },
78
+ "gpt-3.5-turbo-0125": {
79
+ contextWindowSize: 16385,
80
+ promptTokenCostInMillicents: 0.05,
81
+ completionTokenCostInMillicents: 0.15,
82
+ },
68
83
  "gpt-3.5-turbo-1106": {
69
84
  contextWindowSize: 16385,
70
85
  promptTokenCostInMillicents: 0.1,
@@ -26,11 +26,21 @@ export declare const OPENAI_CHAT_MODELS: {
26
26
  fineTunedPromptTokenCostInMillicents: null;
27
27
  fineTunedCompletionTokenCostInMillicents: null;
28
28
  };
29
+ "gpt-4-turbo-preview": {
30
+ contextWindowSize: number;
31
+ promptTokenCostInMillicents: number;
32
+ completionTokenCostInMillicents: number;
33
+ };
29
34
  "gpt-4-1106-preview": {
30
35
  contextWindowSize: number;
31
36
  promptTokenCostInMillicents: number;
32
37
  completionTokenCostInMillicents: number;
33
38
  };
39
+ "gpt-4-0125-preview": {
40
+ contextWindowSize: number;
41
+ promptTokenCostInMillicents: number;
42
+ completionTokenCostInMillicents: number;
43
+ };
34
44
  "gpt-4-vision-preview": {
35
45
  contextWindowSize: number;
36
46
  promptTokenCostInMillicents: number;
@@ -58,6 +68,11 @@ export declare const OPENAI_CHAT_MODELS: {
58
68
  fineTunedPromptTokenCostInMillicents: number;
59
69
  fineTunedCompletionTokenCostInMillicents: number;
60
70
  };
71
+ "gpt-3.5-turbo-0125": {
72
+ contextWindowSize: number;
73
+ promptTokenCostInMillicents: number;
74
+ completionTokenCostInMillicents: number;
75
+ };
61
76
  "gpt-3.5-turbo-1106": {
62
77
  contextWindowSize: number;
63
78
  promptTokenCostInMillicents: number;
@@ -30,11 +30,21 @@ export const OPENAI_CHAT_MODELS = {
30
30
  fineTunedPromptTokenCostInMillicents: null,
31
31
  fineTunedCompletionTokenCostInMillicents: null,
32
32
  },
33
+ "gpt-4-turbo-preview": {
34
+ contextWindowSize: 128000,
35
+ promptTokenCostInMillicents: 1,
36
+ completionTokenCostInMillicents: 3,
37
+ },
33
38
  "gpt-4-1106-preview": {
34
39
  contextWindowSize: 128000,
35
40
  promptTokenCostInMillicents: 1,
36
41
  completionTokenCostInMillicents: 3,
37
42
  },
43
+ "gpt-4-0125-preview": {
44
+ contextWindowSize: 128000,
45
+ promptTokenCostInMillicents: 1,
46
+ completionTokenCostInMillicents: 3,
47
+ },
38
48
  "gpt-4-vision-preview": {
39
49
  contextWindowSize: 128000,
40
50
  promptTokenCostInMillicents: 1,
@@ -62,6 +72,11 @@ export const OPENAI_CHAT_MODELS = {
62
72
  fineTunedPromptTokenCostInMillicents: 0.3,
63
73
  fineTunedCompletionTokenCostInMillicents: 0.6,
64
74
  },
75
+ "gpt-3.5-turbo-0125": {
76
+ contextWindowSize: 16385,
77
+ promptTokenCostInMillicents: 0.05,
78
+ completionTokenCostInMillicents: 0.15,
79
+ },
65
80
  "gpt-3.5-turbo-1106": {
66
81
  contextWindowSize: 16385,
67
82
  promptTokenCostInMillicents: 0.1,
@@ -5,6 +5,16 @@ const countTokens_js_1 = require("../../model-function/tokenize-text/countTokens
5
5
  const AbstractOpenAITextEmbeddingModel_js_1 = require("./AbstractOpenAITextEmbeddingModel.cjs");
6
6
  const TikTokenTokenizer_js_1 = require("./TikTokenTokenizer.cjs");
7
7
  exports.OPENAI_TEXT_EMBEDDING_MODELS = {
8
+ "text-embedding-3-small": {
9
+ contextWindowSize: 8192,
10
+ embeddingDimensions: 1536,
11
+ tokenCostInMillicents: 0.002,
12
+ },
13
+ "text-embedding-3-large": {
14
+ contextWindowSize: 8192,
15
+ embeddingDimensions: 3072,
16
+ tokenCostInMillicents: 0.013,
17
+ },
8
18
  "text-embedding-ada-002": {
9
19
  contextWindowSize: 8192,
10
20
  embeddingDimensions: 1536,
@@ -2,6 +2,16 @@ import { EmbeddingModel } from "../../model-function/embed/EmbeddingModel.js";
2
2
  import { AbstractOpenAITextEmbeddingModel, AbstractOpenAITextEmbeddingModelSettings, OpenAITextEmbeddingResponse } from "./AbstractOpenAITextEmbeddingModel.js";
3
3
  import { TikTokenTokenizer } from "./TikTokenTokenizer.js";
4
4
  export declare const OPENAI_TEXT_EMBEDDING_MODELS: {
5
+ "text-embedding-3-small": {
6
+ contextWindowSize: number;
7
+ embeddingDimensions: number;
8
+ tokenCostInMillicents: number;
9
+ };
10
+ "text-embedding-3-large": {
11
+ contextWindowSize: number;
12
+ embeddingDimensions: number;
13
+ tokenCostInMillicents: number;
14
+ };
5
15
  "text-embedding-ada-002": {
6
16
  contextWindowSize: number;
7
17
  embeddingDimensions: number;
@@ -9,7 +19,7 @@ export declare const OPENAI_TEXT_EMBEDDING_MODELS: {
9
19
  };
10
20
  };
11
21
  export type OpenAITextEmbeddingModelType = keyof typeof OPENAI_TEXT_EMBEDDING_MODELS;
12
- export declare const isOpenAIEmbeddingModel: (model: string) => model is "text-embedding-ada-002";
22
+ export declare const isOpenAIEmbeddingModel: (model: string) => model is "text-embedding-3-small" | "text-embedding-3-large" | "text-embedding-ada-002";
13
23
  export declare const calculateOpenAIEmbeddingCostInMillicents: ({ model, responses, }: {
14
24
  model: OpenAITextEmbeddingModelType;
15
25
  responses: OpenAITextEmbeddingResponse[];
@@ -34,7 +44,7 @@ export interface OpenAITextEmbeddingModelSettings extends AbstractOpenAITextEmbe
34
44
  export declare class OpenAITextEmbeddingModel extends AbstractOpenAITextEmbeddingModel<OpenAITextEmbeddingModelSettings> implements EmbeddingModel<string, OpenAITextEmbeddingModelSettings> {
35
45
  constructor(settings: OpenAITextEmbeddingModelSettings);
36
46
  readonly provider: "openai";
37
- get modelName(): "text-embedding-ada-002";
47
+ get modelName(): "text-embedding-3-small" | "text-embedding-3-large" | "text-embedding-ada-002";
38
48
  readonly embeddingDimensions: number;
39
49
  readonly tokenizer: TikTokenTokenizer;
40
50
  readonly contextWindowSize: number;
@@ -2,6 +2,16 @@ import { countTokens } from "../../model-function/tokenize-text/countTokens.js";
2
2
  import { AbstractOpenAITextEmbeddingModel, } from "./AbstractOpenAITextEmbeddingModel.js";
3
3
  import { TikTokenTokenizer } from "./TikTokenTokenizer.js";
4
4
  export const OPENAI_TEXT_EMBEDDING_MODELS = {
5
+ "text-embedding-3-small": {
6
+ contextWindowSize: 8192,
7
+ embeddingDimensions: 1536,
8
+ tokenCostInMillicents: 0.002,
9
+ },
10
+ "text-embedding-3-large": {
11
+ contextWindowSize: 8192,
12
+ embeddingDimensions: 3072,
13
+ tokenCostInMillicents: 0.013,
14
+ },
5
15
  "text-embedding-ada-002": {
6
16
  contextWindowSize: 8192,
7
17
  embeddingDimensions: 1536,
@@ -58,17 +58,22 @@ function getTiktokenBPE(model) {
58
58
  case "gpt-3.5-turbo-0301":
59
59
  case "gpt-3.5-turbo-0613":
60
60
  case "gpt-3.5-turbo-1106":
61
+ case "gpt-3.5-turbo-0125":
61
62
  case "gpt-3.5-turbo-16k":
62
63
  case "gpt-3.5-turbo-16k-0613":
63
64
  case "gpt-3.5-turbo-instruct":
64
65
  case "gpt-4":
65
66
  case "gpt-4-0314":
66
67
  case "gpt-4-0613":
68
+ case "gpt-4-turbo-preview":
67
69
  case "gpt-4-1106-preview":
70
+ case "gpt-4-0125-preview":
68
71
  case "gpt-4-vision-preview":
69
72
  case "gpt-4-32k":
70
73
  case "gpt-4-32k-0314":
71
74
  case "gpt-4-32k-0613":
75
+ case "text-embedding-3-small":
76
+ case "text-embedding-3-large":
72
77
  case "text-embedding-ada-002": {
73
78
  return cl100k_base_1.default;
74
79
  }
@@ -51,17 +51,22 @@ function getTiktokenBPE(model) {
51
51
  case "gpt-3.5-turbo-0301":
52
52
  case "gpt-3.5-turbo-0613":
53
53
  case "gpt-3.5-turbo-1106":
54
+ case "gpt-3.5-turbo-0125":
54
55
  case "gpt-3.5-turbo-16k":
55
56
  case "gpt-3.5-turbo-16k-0613":
56
57
  case "gpt-3.5-turbo-instruct":
57
58
  case "gpt-4":
58
59
  case "gpt-4-0314":
59
60
  case "gpt-4-0613":
61
+ case "gpt-4-turbo-preview":
60
62
  case "gpt-4-1106-preview":
63
+ case "gpt-4-0125-preview":
61
64
  case "gpt-4-vision-preview":
62
65
  case "gpt-4-32k":
63
66
  case "gpt-4-32k-0314":
64
67
  case "gpt-4-32k-0613":
68
+ case "text-embedding-3-small":
69
+ case "text-embedding-3-large":
65
70
  case "text-embedding-ada-002": {
66
71
  return cl100k_base;
67
72
  }
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "modelfusion",
3
3
  "description": "The TypeScript library for building AI applications.",
4
- "version": "0.131.1",
4
+ "version": "0.132.0",
5
5
  "author": "Lars Grammel",
6
6
  "license": "MIT",
7
7
  "keywords": [