modelfusion 0.47.2 → 0.47.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -17,7 +17,7 @@
17
17
 
18
18
  ModelFusion is a library for building AI apps, chatbots, and agents. It provides abstractions for AI models, vector indices, and tools.
19
19
 
20
- - **Multimodal Support**: Beyond just LLMs, ModelFusion encompasses a diverse array of models including text generation, text-to-speech, speech-to-text, and image generation, allowing you to build multifaceted AI applications with ease.
20
+ - **Multimodal Support**: Beyond just LLMs, ModelFusion encompasses a diverse array of models including text generation, text-to-speech, speech-to-text, and image generation, allowing you to build multi-modal AI applications with ease.
21
21
  - **Flexibility and control**: AI application development can be complex and unique to each project. With ModelFusion, you have complete control over the prompts and model settings, and you can access the raw responses from the models quickly to build what you need.
22
22
  - **Type inference and validation**: ModelFusion uses TypeScript to infer types wherever possible and to validate model responses. By default, [Zod](https://github.com/colinhacks/zod) is used for type validation, but you can also use other libraries.
23
23
  - **Guards**: ModelFusion provides a guard function that you can use to implement retry on error, redacting and changing reponses, etc.
@@ -2,7 +2,13 @@
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.loadApiKey = void 0;
4
4
  function loadApiKey({ apiKey, environmentVariableName, apiKeyParameterName = "apiKey", description, }) {
5
- apiKey ??= process.env[environmentVariableName];
5
+ if (apiKey != null) {
6
+ return apiKey;
7
+ }
8
+ if (typeof process === "undefined") {
9
+ throw new Error(`${description} API key is missing. Pass it using the '${apiKeyParameterName}' parameter. Environment variables is not supported in this environment.`);
10
+ }
11
+ apiKey = process.env[environmentVariableName];
6
12
  if (apiKey == null) {
7
13
  throw new Error(`${description} API key is missing. Pass it using the '${apiKeyParameterName}' parameter or set it as an environment variable named ${environmentVariableName}.`);
8
14
  }
@@ -1,5 +1,11 @@
1
1
  export function loadApiKey({ apiKey, environmentVariableName, apiKeyParameterName = "apiKey", description, }) {
2
- apiKey ??= process.env[environmentVariableName];
2
+ if (apiKey != null) {
3
+ return apiKey;
4
+ }
5
+ if (typeof process === "undefined") {
6
+ throw new Error(`${description} API key is missing. Pass it using the '${apiKeyParameterName}' parameter. Environment variables is not supported in this environment.`);
7
+ }
8
+ apiKey = process.env[environmentVariableName];
3
9
  if (apiKey == null) {
4
10
  throw new Error(`${description} API key is missing. Pass it using the '${apiKeyParameterName}' parameter or set it as an environment variable named ${environmentVariableName}.`);
5
11
  }
@@ -1,7 +1,13 @@
1
1
  "use strict";
2
+ var __importDefault = (this && this.__importDefault) || function (mod) {
3
+ return (mod && mod.__esModule) ? mod : { "default": mod };
4
+ };
2
5
  Object.defineProperty(exports, "__esModule", { value: true });
3
6
  exports.TikTokenTokenizer = void 0;
4
- const js_tiktoken_1 = require("js-tiktoken");
7
+ const lite_1 = require("js-tiktoken/lite");
8
+ const cl100k_base_1 = __importDefault(require("js-tiktoken/ranks/cl100k_base"));
9
+ const p50k_base_1 = __importDefault(require("js-tiktoken/ranks/p50k_base"));
10
+ const r50k_base_1 = __importDefault(require("js-tiktoken/ranks/r50k_base"));
5
11
  const never_js_1 = require("../../util/never.cjs");
6
12
  /**
7
13
  * TikToken tokenizer for OpenAI language models.
@@ -29,9 +35,7 @@ class TikTokenTokenizer {
29
35
  writable: true,
30
36
  value: void 0
31
37
  });
32
- this.tiktoken = (0, js_tiktoken_1.getEncoding)("model" in options
33
- ? getEncodingNameForModel(options.model)
34
- : options.encoding);
38
+ this.tiktoken = new lite_1.Tiktoken(getTiktokenBPE(options.model));
35
39
  }
36
40
  async tokenize(text) {
37
41
  return this.tiktoken.encode(text);
@@ -50,12 +54,12 @@ class TikTokenTokenizer {
50
54
  exports.TikTokenTokenizer = TikTokenTokenizer;
51
55
  // implemented here (instead of using js-tiktoken) to be able to quickly updated it
52
56
  // when new models are released
53
- function getEncodingNameForModel(model) {
57
+ function getTiktokenBPE(model) {
54
58
  switch (model) {
55
59
  case "code-davinci-002":
56
60
  case "text-davinci-002":
57
61
  case "text-davinci-003": {
58
- return "p50k_base";
62
+ return p50k_base_1.default;
59
63
  }
60
64
  case "ada":
61
65
  case "babbage":
@@ -64,7 +68,7 @@ function getEncodingNameForModel(model) {
64
68
  case "text-ada-001":
65
69
  case "text-babbage-001":
66
70
  case "text-curie-001": {
67
- return "r50k_base";
71
+ return r50k_base_1.default;
68
72
  }
69
73
  case "babbage-002":
70
74
  case "davinci-002":
@@ -81,7 +85,7 @@ function getEncodingNameForModel(model) {
81
85
  case "gpt-4-32k-0314":
82
86
  case "gpt-4-32k-0613":
83
87
  case "text-embedding-ada-002": {
84
- return "cl100k_base";
88
+ return cl100k_base_1.default;
85
89
  }
86
90
  default: {
87
91
  (0, never_js_1.never)(model);
@@ -1,4 +1,3 @@
1
- import { TiktokenEncoding } from "js-tiktoken";
2
1
  import { FullTokenizer } from "../../model-function/tokenize-text/Tokenizer.js";
3
2
  import { OpenAITextEmbeddingModelType } from "./OpenAITextEmbeddingModel.js";
4
3
  import { OpenAITextGenerationBaseModelType } from "./OpenAITextGenerationModel.js";
@@ -24,8 +23,6 @@ export declare class TikTokenTokenizer implements FullTokenizer {
24
23
  */
25
24
  constructor(options: {
26
25
  model: OpenAIChatBaseModelType | OpenAITextGenerationBaseModelType | OpenAITextEmbeddingModelType;
27
- } | {
28
- encoding: TiktokenEncoding;
29
26
  });
30
27
  private readonly tiktoken;
31
28
  tokenize(text: string): Promise<number[]>;
@@ -1,4 +1,7 @@
1
- import { getEncoding } from "js-tiktoken";
1
+ import { Tiktoken } from "js-tiktoken/lite";
2
+ import cl100k_base from "js-tiktoken/ranks/cl100k_base";
3
+ import p50k_base from "js-tiktoken/ranks/p50k_base";
4
+ import r50k_base from "js-tiktoken/ranks/r50k_base";
2
5
  import { never } from "../../util/never.js";
3
6
  /**
4
7
  * TikToken tokenizer for OpenAI language models.
@@ -26,9 +29,7 @@ export class TikTokenTokenizer {
26
29
  writable: true,
27
30
  value: void 0
28
31
  });
29
- this.tiktoken = getEncoding("model" in options
30
- ? getEncodingNameForModel(options.model)
31
- : options.encoding);
32
+ this.tiktoken = new Tiktoken(getTiktokenBPE(options.model));
32
33
  }
33
34
  async tokenize(text) {
34
35
  return this.tiktoken.encode(text);
@@ -46,12 +47,12 @@ export class TikTokenTokenizer {
46
47
  }
47
48
  // implemented here (instead of using js-tiktoken) to be able to quickly updated it
48
49
  // when new models are released
49
- function getEncodingNameForModel(model) {
50
+ function getTiktokenBPE(model) {
50
51
  switch (model) {
51
52
  case "code-davinci-002":
52
53
  case "text-davinci-002":
53
54
  case "text-davinci-003": {
54
- return "p50k_base";
55
+ return p50k_base;
55
56
  }
56
57
  case "ada":
57
58
  case "babbage":
@@ -60,7 +61,7 @@ function getEncodingNameForModel(model) {
60
61
  case "text-ada-001":
61
62
  case "text-babbage-001":
62
63
  case "text-curie-001": {
63
- return "r50k_base";
64
+ return r50k_base;
64
65
  }
65
66
  case "babbage-002":
66
67
  case "davinci-002":
@@ -77,7 +78,7 @@ function getEncodingNameForModel(model) {
77
78
  case "gpt-4-32k-0314":
78
79
  case "gpt-4-32k-0613":
79
80
  case "text-embedding-ada-002": {
80
- return "cl100k_base";
81
+ return cl100k_base;
81
82
  }
82
83
  default: {
83
84
  never(model);
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "modelfusion",
3
3
  "description": "Build multimodal applications, chatbots, and agents with JavaScript and TypeScript.",
4
- "version": "0.47.2",
4
+ "version": "0.47.3",
5
5
  "author": "Lars Grammel",
6
6
  "license": "MIT",
7
7
  "keywords": [