modelfusion 0.18.0 → 0.19.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -20,6 +20,14 @@ const TikTokenTokenizer_js_1 = require("./TikTokenTokenizer.cjs");
20
20
  * @see https://openai.com/pricing
21
21
  */
22
22
  exports.OPENAI_TEXT_GENERATION_MODELS = {
23
+ "davinci-002": {
24
+ contextWindowSize: 16384,
25
+ tokenCostInMillicents: 0.2,
26
+ },
27
+ "babbage-002": {
28
+ contextWindowSize: 16384,
29
+ tokenCostInMillicents: 0.04,
30
+ },
23
31
  "text-davinci-003": {
24
32
  contextWindowSize: 4096,
25
33
  tokenCostInMillicents: 2,
@@ -16,6 +16,14 @@ import { TikTokenTokenizer } from "./TikTokenTokenizer.js";
16
16
  * @see https://openai.com/pricing
17
17
  */
18
18
  export declare const OPENAI_TEXT_GENERATION_MODELS: {
19
+ "davinci-002": {
20
+ contextWindowSize: number;
21
+ tokenCostInMillicents: number;
22
+ };
23
+ "babbage-002": {
24
+ contextWindowSize: number;
25
+ tokenCostInMillicents: number;
26
+ };
19
27
  "text-davinci-003": {
20
28
  contextWindowSize: number;
21
29
  tokenCostInMillicents: number;
@@ -58,7 +66,7 @@ export declare const OPENAI_TEXT_GENERATION_MODELS: {
58
66
  };
59
67
  };
60
68
  export type OpenAITextGenerationModelType = keyof typeof OPENAI_TEXT_GENERATION_MODELS;
61
- export declare const isOpenAITextGenerationModel: (model: string) => model is "text-davinci-003" | "text-davinci-002" | "code-davinci-002" | "davinci" | "text-curie-001" | "curie" | "text-babbage-001" | "babbage" | "text-ada-001" | "ada";
69
+ export declare const isOpenAITextGenerationModel: (model: string) => model is "davinci-002" | "babbage-002" | "text-davinci-003" | "text-davinci-002" | "code-davinci-002" | "davinci" | "text-curie-001" | "curie" | "text-babbage-001" | "babbage" | "text-ada-001" | "ada";
62
70
  export declare const calculateOpenAITextGenerationCostInMillicents: ({ model, response, }: {
63
71
  model: OpenAITextGenerationModelType;
64
72
  response: OpenAITextGenerationResponse;
@@ -102,7 +110,7 @@ export interface OpenAITextGenerationModelSettings extends TextGenerationModelSe
102
110
  export declare class OpenAITextGenerationModel extends AbstractModel<OpenAITextGenerationModelSettings> implements TextGenerationModel<string, OpenAITextGenerationResponse, OpenAITextGenerationDelta, OpenAITextGenerationModelSettings> {
103
111
  constructor(settings: OpenAITextGenerationModelSettings);
104
112
  readonly provider: "openai";
105
- get modelName(): "text-davinci-003" | "text-davinci-002" | "code-davinci-002" | "davinci" | "text-curie-001" | "curie" | "text-babbage-001" | "babbage" | "text-ada-001" | "ada";
113
+ get modelName(): "davinci-002" | "babbage-002" | "text-davinci-003" | "text-davinci-002" | "code-davinci-002" | "davinci" | "text-curie-001" | "curie" | "text-babbage-001" | "babbage" | "text-ada-001" | "ada";
106
114
  readonly contextWindowSize: number;
107
115
  readonly tokenizer: TikTokenTokenizer;
108
116
  private get apiKey();
@@ -14,6 +14,14 @@ import { TikTokenTokenizer } from "./TikTokenTokenizer.js";
14
14
  * @see https://openai.com/pricing
15
15
  */
16
16
  export const OPENAI_TEXT_GENERATION_MODELS = {
17
+ "davinci-002": {
18
+ contextWindowSize: 16384,
19
+ tokenCostInMillicents: 0.2,
20
+ },
21
+ "babbage-002": {
22
+ contextWindowSize: 16384,
23
+ tokenCostInMillicents: 0.04,
24
+ },
17
25
  "text-davinci-003": {
18
26
  contextWindowSize: 4096,
19
27
  tokenCostInMillicents: 2,
@@ -57,13 +57,17 @@ function getEncodingNameForModel(model) {
57
57
  case "text-davinci-003": {
58
58
  return "p50k_base";
59
59
  }
60
+ case "babbage-002":
61
+ case "davinci-002":
60
62
  case "ada":
61
63
  case "babbage":
62
64
  case "curie":
63
65
  case "davinci":
64
66
  case "text-ada-001":
65
67
  case "text-babbage-001":
66
- case "text-curie-001":
68
+ case "text-curie-001": {
69
+ return "r50k_base";
70
+ }
67
71
  case "gpt-3.5-turbo":
68
72
  case "gpt-3.5-turbo-0301":
69
73
  case "gpt-3.5-turbo-0613":
@@ -53,13 +53,17 @@ function getEncodingNameForModel(model) {
53
53
  case "text-davinci-003": {
54
54
  return "p50k_base";
55
55
  }
56
+ case "babbage-002":
57
+ case "davinci-002":
56
58
  case "ada":
57
59
  case "babbage":
58
60
  case "curie":
59
61
  case "davinci":
60
62
  case "text-ada-001":
61
63
  case "text-babbage-001":
62
- case "text-curie-001":
64
+ case "text-curie-001": {
65
+ return "r50k_base";
66
+ }
63
67
  case "gpt-3.5-turbo":
64
68
  case "gpt-3.5-turbo-0301":
65
69
  case "gpt-3.5-turbo-0613":
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "modelfusion",
3
3
  "description": "Build AI applications, chatbots, and agents with JavaScript and TypeScript.",
4
- "version": "0.18.0",
4
+ "version": "0.19.0",
5
5
  "author": "Lars Grammel",
6
6
  "license": "MIT",
7
7
  "keywords": [