@node-llm/core 0.5.0 → 0.6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (70) hide show
  1. package/README.md +46 -7
  2. package/dist/chat/Chat.d.ts +5 -0
  3. package/dist/chat/Chat.d.ts.map +1 -1
  4. package/dist/chat/Chat.js +26 -4
  5. package/dist/chat/ChatOptions.d.ts +3 -0
  6. package/dist/chat/ChatOptions.d.ts.map +1 -1
  7. package/dist/chat/ChatResponse.d.ts +3 -0
  8. package/dist/chat/ChatResponse.d.ts.map +1 -1
  9. package/dist/chat/ChatResponse.js +3 -0
  10. package/dist/llm.d.ts +5 -1
  11. package/dist/llm.d.ts.map +1 -1
  12. package/dist/llm.js +18 -6
  13. package/dist/models/ModelRegistry.d.ts +39 -12
  14. package/dist/models/ModelRegistry.d.ts.map +1 -1
  15. package/dist/models/ModelRegistry.js +50 -40
  16. package/dist/models/models.d.ts +972 -0
  17. package/dist/models/models.d.ts.map +1 -0
  18. package/dist/models/models.js +7026 -0
  19. package/dist/models/types.d.ts +50 -0
  20. package/dist/models/types.d.ts.map +1 -0
  21. package/dist/models/types.js +1 -0
  22. package/dist/providers/Provider.d.ts +4 -0
  23. package/dist/providers/Provider.d.ts.map +1 -1
  24. package/dist/providers/anthropic/AnthropicProvider.d.ts.map +1 -1
  25. package/dist/providers/anthropic/AnthropicProvider.js +1 -3
  26. package/dist/providers/anthropic/Capabilities.d.ts +1 -37
  27. package/dist/providers/anthropic/Capabilities.d.ts.map +1 -1
  28. package/dist/providers/anthropic/Capabilities.js +59 -130
  29. package/dist/providers/anthropic/Chat.d.ts.map +1 -1
  30. package/dist/providers/anthropic/Chat.js +6 -2
  31. package/dist/providers/anthropic/Models.d.ts +1 -0
  32. package/dist/providers/anthropic/Models.d.ts.map +1 -1
  33. package/dist/providers/anthropic/Models.js +36 -41
  34. package/dist/providers/anthropic/Streaming.d.ts.map +1 -1
  35. package/dist/providers/anthropic/Streaming.js +10 -1
  36. package/dist/providers/gemini/Capabilities.d.ts +28 -7
  37. package/dist/providers/gemini/Capabilities.d.ts.map +1 -1
  38. package/dist/providers/gemini/Capabilities.js +32 -20
  39. package/dist/providers/gemini/Chat.d.ts.map +1 -1
  40. package/dist/providers/gemini/Chat.js +9 -11
  41. package/dist/providers/gemini/Models.d.ts +1 -0
  42. package/dist/providers/gemini/Models.d.ts.map +1 -1
  43. package/dist/providers/gemini/Models.js +46 -26
  44. package/dist/providers/openai/Capabilities.d.ts +3 -11
  45. package/dist/providers/openai/Capabilities.d.ts.map +1 -1
  46. package/dist/providers/openai/Capabilities.js +119 -122
  47. package/dist/providers/openai/Chat.d.ts.map +1 -1
  48. package/dist/providers/openai/Chat.js +19 -17
  49. package/dist/providers/openai/Embedding.d.ts.map +1 -1
  50. package/dist/providers/openai/Embedding.js +2 -1
  51. package/dist/providers/openai/Image.d.ts.map +1 -1
  52. package/dist/providers/openai/Image.js +2 -1
  53. package/dist/providers/openai/ModelDefinitions.d.ts +1 -24
  54. package/dist/providers/openai/ModelDefinitions.d.ts.map +1 -1
  55. package/dist/providers/openai/ModelDefinitions.js +1 -211
  56. package/dist/providers/openai/Models.d.ts +1 -0
  57. package/dist/providers/openai/Models.d.ts.map +1 -1
  58. package/dist/providers/openai/Models.js +46 -22
  59. package/dist/providers/openai/Moderation.d.ts.map +1 -1
  60. package/dist/providers/openai/Moderation.js +2 -1
  61. package/dist/providers/openai/Streaming.d.ts.map +1 -1
  62. package/dist/providers/openai/Streaming.js +5 -1
  63. package/dist/providers/openai/Transcription.d.ts.map +1 -1
  64. package/dist/providers/openai/Transcription.js +3 -2
  65. package/dist/providers/openai/index.d.ts.map +1 -1
  66. package/dist/providers/openai/index.js +2 -1
  67. package/dist/providers/openai/utils.d.ts +20 -0
  68. package/dist/providers/openai/utils.d.ts.map +1 -0
  69. package/dist/providers/openai/utils.js +25 -0
  70. package/package.json +1 -1
@@ -1,5 +1,9 @@
1
+ import { ModelRegistry } from "../../models/ModelRegistry.js";
1
2
  export class Capabilities {
2
3
  static getContextWindow(modelId) {
4
+ const val = ModelRegistry.getContextWindow(modelId, "gemini");
5
+ if (val !== undefined && val !== null)
6
+ return val;
3
7
  const id = this.normalizeModelId(modelId);
4
8
  if (id.match(/gemini-2\.5-pro-exp-03-25|gemini-2\.0-flash|gemini-2\.0-flash-lite|gemini-1\.5-flash|gemini-1\.5-flash-8b/)) {
5
9
  return 1_048_576;
@@ -19,6 +23,9 @@ export class Capabilities {
19
23
  return 32_768;
20
24
  }
21
25
  static getMaxOutputTokens(modelId) {
26
+ const val = ModelRegistry.getMaxOutputTokens(modelId, "gemini");
27
+ if (val !== undefined && val !== null)
28
+ return val;
22
29
  const id = this.normalizeModelId(modelId);
23
30
  if (id.match(/gemini-2\.5-pro-exp-03-25/)) {
24
31
  return 64_000;
@@ -35,6 +42,9 @@ export class Capabilities {
35
42
  return 4_096;
36
43
  }
37
44
  static supportsVision(modelId) {
45
+ const model = ModelRegistry.find(modelId, "gemini");
46
+ if (model?.modalities?.input?.includes("image"))
47
+ return true;
38
48
  const id = this.normalizeModelId(modelId);
39
49
  if (id.match(/text-embedding|embedding-001|aqa/)) {
40
50
  return false;
@@ -42,6 +52,9 @@ export class Capabilities {
42
52
  return !!id.match(/gemini|flash|pro|imagen/);
43
53
  }
44
54
  static supportsTools(modelId) {
55
+ const model = ModelRegistry.find(modelId, "gemini");
56
+ if (model?.capabilities?.includes("function_calling"))
57
+ return true;
45
58
  const id = this.normalizeModelId(modelId);
46
59
  if (id.match(/text-embedding|embedding-001|aqa|flash-lite|imagen|gemini-2\.0-flash-lite/)) {
47
60
  return false;
@@ -49,49 +62,48 @@ export class Capabilities {
49
62
  return !!id.match(/gemini|pro|flash/);
50
63
  }
51
64
  static supportsStructuredOutput(modelId) {
65
+ const model = ModelRegistry.find(modelId, "gemini");
66
+ if (model?.capabilities?.includes("structured_output"))
67
+ return true;
52
68
  const id = this.normalizeModelId(modelId);
53
69
  if (id.match(/text-embedding|embedding-001|aqa|imagen/)) {
54
70
  return false;
55
71
  }
56
72
  return true;
57
73
  }
74
+ static supportsSystemInstructions(modelId) {
75
+ return true;
76
+ }
58
77
  static supportsJsonMode(modelId) {
59
78
  return this.supportsStructuredOutput(modelId);
60
79
  }
61
80
  static supportsEmbeddings(modelId) {
81
+ const model = ModelRegistry.find(modelId, "gemini");
82
+ if (model?.modalities?.output?.includes("embeddings"))
83
+ return true;
62
84
  const id = this.normalizeModelId(modelId);
63
85
  return !!id.match(/text-embedding|embedding|gemini-embedding/);
64
86
  }
65
87
  static supportsImageGeneration(modelId) {
88
+ const model = ModelRegistry.find(modelId, "gemini");
89
+ if (model?.capabilities?.includes("image_generation") || model?.modalities?.output?.includes("image"))
90
+ return true;
66
91
  const id = this.normalizeModelId(modelId);
67
92
  return !!id.match(/imagen/);
68
93
  }
69
94
  static supportsTranscription(modelId) {
95
+ const model = ModelRegistry.find(modelId, "gemini");
96
+ if (model?.modalities?.input?.includes("audio"))
97
+ return true;
70
98
  const id = this.normalizeModelId(modelId);
71
99
  return !!id.match(/gemini|flash|pro/);
72
100
  }
73
101
  static supportsModeration(modelId) {
74
102
  return false;
75
103
  }
76
- static normalizeTemperature(temperature, _modelId) {
104
+ static normalizeTemperature(temperature, model) {
77
105
  return temperature;
78
106
  }
79
- static getFamily(modelId) {
80
- const id = this.normalizeModelId(modelId);
81
- if (id.startsWith("gemini-1.5-pro"))
82
- return "gemini-1.5-pro";
83
- if (id.startsWith("gemini-1.5-flash"))
84
- return "gemini-1.5-flash";
85
- if (id.startsWith("gemini-2.0-flash"))
86
- return "gemini-2.0-flash";
87
- if (id.startsWith("gemini-2.0-flash-lite"))
88
- return "gemini-2.0-flash-lite";
89
- if (id.startsWith("text-embedding"))
90
- return "text-embedding";
91
- if (id.startsWith("imagen"))
92
- return "imagen";
93
- return "other";
94
- }
95
107
  static getModalities(modelId) {
96
108
  const input = ["text"];
97
109
  const output = ["text"];
@@ -118,6 +130,9 @@ export class Capabilities {
118
130
  return caps;
119
131
  }
120
132
  static getPricing(modelId) {
133
+ const model = ModelRegistry.find(modelId, "gemini");
134
+ if (model?.pricing)
135
+ return model.pricing;
121
136
  const id = this.normalizeModelId(modelId);
122
137
  let input = 0;
123
138
  let output = 0;
@@ -142,9 +157,6 @@ export class Capabilities {
142
157
  }
143
158
  };
144
159
  }
145
- static formatDisplayName(modelId) {
146
- return modelId.replace("models/", "").replace(/-/g, " ").replace(/\b\w/g, l => l.toUpperCase());
147
- }
148
160
  static normalizeModelId(modelId) {
149
161
  return modelId.replace("models/", "");
150
162
  }
@@ -1 +1 @@
1
- {"version":3,"file":"Chat.d.ts","sourceRoot":"","sources":["../../../src/providers/gemini/Chat.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,WAAW,EAAE,YAAY,EAAE,MAAM,gBAAgB,CAAC;AAM3D,qBAAa,UAAU;IACT,OAAO,CAAC,QAAQ,CAAC,OAAO;IAAU,OAAO,CAAC,QAAQ,CAAC,MAAM;gBAAxC,OAAO,EAAE,MAAM,EAAmB,MAAM,EAAE,MAAM;IAEvE,OAAO,CAAC,OAAO,EAAE,WAAW,GAAG,OAAO,CAAC,YAAY,CAAC;IA0F1D,OAAO,CAAC,cAAc;CAwBvB"}
1
+ {"version":3,"file":"Chat.d.ts","sourceRoot":"","sources":["../../../src/providers/gemini/Chat.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,WAAW,EAAE,YAAY,EAAS,MAAM,gBAAgB,CAAC;AAOlE,qBAAa,UAAU;IACT,OAAO,CAAC,QAAQ,CAAC,OAAO;IAAU,OAAO,CAAC,QAAQ,CAAC,MAAM;gBAAxC,OAAO,EAAE,MAAM,EAAmB,MAAM,EAAE,MAAM;IAEvE,OAAO,CAAC,OAAO,EAAE,WAAW,GAAG,OAAO,CAAC,YAAY,CAAC;IAyF1D,OAAO,CAAC,cAAc;CAwBvB"}
@@ -1,6 +1,7 @@
1
1
  import { Capabilities } from "./Capabilities.js";
2
2
  import { handleGeminiError } from "./Errors.js";
3
3
  import { GeminiChatUtils } from "./ChatUtils.js";
4
+ import { ModelRegistry } from "../../models/ModelRegistry.js";
4
5
  export class GeminiChat {
5
6
  baseUrl;
6
7
  apiKey;
@@ -19,24 +20,20 @@ export class GeminiChat {
19
20
  if (request.response_format?.type === "json_object") {
20
21
  generationConfig.responseMimeType = "application/json";
21
22
  }
22
- else if (request.response_format?.type === "json_schema") {
23
- generationConfig.responseMimeType = "application/json";
24
- if (request.response_format.json_schema?.schema) {
25
- generationConfig.responseSchema = request.response_format.json_schema.schema;
26
- }
27
- }
28
- if (request.response_format?.type === "json_object") {
29
- generationConfig.responseMimeType = "application/json";
30
- }
31
23
  else if (request.response_format?.type === "json_schema") {
32
24
  generationConfig.responseMimeType = "application/json";
33
25
  if (request.response_format.json_schema?.schema) {
34
26
  generationConfig.responseSchema = this.sanitizeSchema(request.response_format.json_schema.schema);
35
27
  }
36
28
  }
29
+ const { model: _model, messages: _messages, tools: _tools, temperature: _temp, max_tokens: _max, response_format: _format, headers: _headers, ...rest } = request;
37
30
  const payload = {
38
31
  contents,
39
- generationConfig,
32
+ generationConfig: {
33
+ ...generationConfig,
34
+ ...(rest.generationConfig || {})
35
+ },
36
+ ...rest
40
37
  };
41
38
  if (systemInstructionParts.length > 0) {
42
39
  payload.systemInstruction = { parts: systemInstructionParts };
@@ -83,7 +80,8 @@ export class GeminiChat {
83
80
  output_tokens: json.usageMetadata.candidatesTokenCount,
84
81
  total_tokens: json.usageMetadata.totalTokenCount,
85
82
  } : undefined;
86
- return { content, tool_calls, usage };
83
+ const calculatedUsage = usage ? ModelRegistry.calculateCost(usage, request.model, "gemini") : undefined;
84
+ return { content, tool_calls, usage: calculatedUsage };
87
85
  }
88
86
  sanitizeSchema(schema) {
89
87
  if (typeof schema !== "object" || schema === null)
@@ -4,5 +4,6 @@ export declare class GeminiModels {
4
4
  private readonly apiKey;
5
5
  constructor(baseUrl: string, apiKey: string);
6
6
  execute(): Promise<ModelInfo[]>;
7
+ find(modelId: string): import("../../models/types.js").Model | undefined;
7
8
  }
8
9
  //# sourceMappingURL=Models.d.ts.map
@@ -1 +1 @@
1
- {"version":3,"file":"Models.d.ts","sourceRoot":"","sources":["../../../src/providers/gemini/Models.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,SAAS,EAAE,MAAM,gBAAgB,CAAC;AAI3C,qBAAa,YAAY;IACX,OAAO,CAAC,QAAQ,CAAC,OAAO;IAAU,OAAO,CAAC,QAAQ,CAAC,MAAM;gBAAxC,OAAO,EAAE,MAAM,EAAmB,MAAM,EAAE,MAAM;IAEvE,OAAO,IAAI,OAAO,CAAC,SAAS,EAAE,CAAC;CAgCtC"}
1
+ {"version":3,"file":"Models.d.ts","sourceRoot":"","sources":["../../../src/providers/gemini/Models.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,SAAS,EAAE,MAAM,gBAAgB,CAAC;AAK3C,qBAAa,YAAY;IACX,OAAO,CAAC,QAAQ,CAAC,OAAO;IAAU,OAAO,CAAC,QAAQ,CAAC,MAAM;gBAAxC,OAAO,EAAE,MAAM,EAAmB,MAAM,EAAE,MAAM;IAEvE,OAAO,IAAI,OAAO,CAAC,SAAS,EAAE,CAAC;IAkDrC,IAAI,CAAC,OAAO,EAAE,MAAM;CAGrB"}
@@ -1,4 +1,5 @@
1
1
  import { Capabilities } from "./Capabilities.js";
2
+ import { ModelRegistry } from "../../models/ModelRegistry.js";
2
3
  export class GeminiModels {
3
4
  baseUrl;
4
5
  apiKey;
@@ -7,32 +8,51 @@ export class GeminiModels {
7
8
  this.apiKey = apiKey;
8
9
  }
9
10
  async execute() {
10
- const url = `${this.baseUrl}/models?key=${this.apiKey}`;
11
- const response = await fetch(url);
12
- if (!response.ok) {
13
- const errorText = await response.text();
14
- throw new Error(`Gemini error (${response.status}): ${errorText}`);
15
- }
16
- const json = (await response.json());
17
- return json.models
18
- .filter(m => m.supportedGenerationMethods.includes("generateContent"))
19
- .map((model) => {
20
- const id = model.name.replace("models/", "");
21
- return {
22
- id: id,
23
- name: model.displayName || Capabilities.formatDisplayName(id),
24
- provider: "gemini",
25
- family: Capabilities.getFamily(id),
26
- context_window: model.inputTokenLimit || Capabilities.getContextWindow(id),
27
- max_output_tokens: model.outputTokenLimit || Capabilities.getMaxOutputTokens(id),
28
- modalities: Capabilities.getModalities(id),
29
- capabilities: Capabilities.getCapabilities(id),
30
- pricing: Capabilities.getPricing(id),
31
- metadata: {
32
- description: model.description,
33
- version: model.version,
11
+ try {
12
+ const response = await fetch(`${this.baseUrl}/models?key=${this.apiKey}`, {
13
+ method: "GET",
14
+ headers: {
15
+ "Content-Type": "application/json",
34
16
  },
35
- };
36
- });
17
+ });
18
+ if (response.ok) {
19
+ const { models } = await response.json();
20
+ return models.map(m => {
21
+ const modelId = m.name.replace("models/", "");
22
+ const registryModel = ModelRegistry.find(modelId, "gemini");
23
+ const info = {
24
+ id: modelId,
25
+ name: registryModel?.name || m.displayName || modelId,
26
+ provider: "gemini",
27
+ family: registryModel?.family || modelId,
28
+ context_window: registryModel?.context_window || Capabilities.getContextWindow(modelId),
29
+ max_output_tokens: registryModel?.max_output_tokens || Capabilities.getMaxOutputTokens(modelId),
30
+ modalities: registryModel?.modalities || Capabilities.getModalities(modelId),
31
+ capabilities: Capabilities.getCapabilities(modelId),
32
+ pricing: registryModel?.pricing || Capabilities.getPricing(modelId),
33
+ metadata: {
34
+ ...(registryModel?.metadata || {}),
35
+ description: m.description,
36
+ input_token_limit: m.inputTokenLimit,
37
+ output_token_limit: m.outputTokenLimit,
38
+ supported_generation_methods: m.supportedGenerationMethods
39
+ }
40
+ };
41
+ return info;
42
+ });
43
+ }
44
+ }
45
+ catch (_error) {
46
+ // Fallback
47
+ }
48
+ return ModelRegistry.all()
49
+ .filter(m => m.provider === "gemini")
50
+ .map(m => ({
51
+ ...m,
52
+ capabilities: Capabilities.getCapabilities(m.id)
53
+ }));
54
+ }
55
+ find(modelId) {
56
+ return ModelRegistry.find(modelId, "gemini");
37
57
  }
38
58
  }
@@ -1,7 +1,4 @@
1
- import { ModelFamilyDefinition } from "./ModelDefinitions.js";
2
1
  export declare class Capabilities {
3
- static getFamily(modelId: string): string;
4
- static getDefinition(modelId: string): ModelFamilyDefinition;
5
2
  static getContextWindow(modelId: string): number | null;
6
3
  static getMaxOutputTokens(modelId: string): number | null;
7
4
  static supportsVision(modelId: string): boolean;
@@ -12,19 +9,14 @@ export declare class Capabilities {
12
9
  static supportsImageGeneration(modelId: string): boolean;
13
10
  static supportsTranscription(modelId: string): boolean;
14
11
  static supportsModeration(modelId: string): boolean;
15
- static getInputPrice(modelId: string): number;
16
- static getCachedInputPrice(modelId: string): number | undefined;
17
- static getOutputPrice(modelId: string): number;
18
- static getModelType(modelId: string): "embedding" | "audio" | "moderation" | "image" | "chat";
19
- static formatDisplayName(modelId: string): string;
20
- private static applySpecialFormatting;
21
- private static specialPrefixFormat;
22
- static normalizeTemperature(temperature: number | undefined, modelId: string): number | undefined | null;
12
+ static getModelType(modelId: string): "embedding" | "audio" | "moderation" | "image" | "chat" | "audio_transcription" | "audio_speech";
23
13
  static getModalities(modelId: string): {
24
14
  input: string[];
25
15
  output: string[];
26
16
  };
27
17
  static getCapabilities(modelId: string): string[];
18
+ static normalizeTemperature(temperature: number | undefined, modelId: string): number | undefined | null;
19
+ static formatDisplayName(modelId: string): string;
28
20
  static getPricing(modelId: string): any;
29
21
  }
30
22
  //# sourceMappingURL=Capabilities.d.ts.map
@@ -1 +1 @@
1
- {"version":3,"file":"Capabilities.d.ts","sourceRoot":"","sources":["../../../src/providers/openai/Capabilities.ts"],"names":[],"mappings":"AAAA,OAAO,EAAiB,qBAAqB,EAAE,MAAM,uBAAuB,CAAC;AAE7E,qBAAa,YAAY;IACvB,MAAM,CAAC,SAAS,CAAC,OAAO,EAAE,MAAM,GAAG,MAAM;IAUzC,MAAM,CAAC,aAAa,CAAC,OAAO,EAAE,MAAM,GAAG,qBAAqB;IAK5D,MAAM,CAAC,gBAAgB,CAAC,OAAO,EAAE,MAAM,GAAG,MAAM,GAAG,IAAI;IAIvD,MAAM,CAAC,kBAAkB,CAAC,OAAO,EAAE,MAAM,GAAG,MAAM,GAAG,IAAI;IAIzD,MAAM,CAAC,cAAc,CAAC,OAAO,EAAE,MAAM,GAAG,OAAO;IAI/C,MAAM,CAAC,aAAa,CAAC,OAAO,EAAE,MAAM,GAAG,OAAO;IAI9C,MAAM,CAAC,wBAAwB,CAAC,OAAO,EAAE,MAAM,GAAG,OAAO;IAIzD,MAAM,CAAC,gBAAgB,CAAC,OAAO,EAAE,MAAM,GAAG,OAAO;IAIjD,MAAM,CAAC,kBAAkB,CAAC,OAAO,EAAE,MAAM,GAAG,OAAO;IAInD,MAAM,CAAC,uBAAuB,CAAC,OAAO,EAAE,MAAM,GAAG,OAAO;IAIxD,MAAM,CAAC,qBAAqB,CAAC,OAAO,EAAE,MAAM,GAAG,OAAO;IAMtD,MAAM,CAAC,kBAAkB,CAAC,OAAO,EAAE,MAAM,GAAG,OAAO;IAInD,MAAM,CAAC,aAAa,CAAC,OAAO,EAAE,MAAM,GAAG,MAAM;IAK7C,MAAM,CAAC,mBAAmB,CAAC,OAAO,EAAE,MAAM,GAAG,MAAM,GAAG,SAAS;IAI/D,MAAM,CAAC,cAAc,CAAC,OAAO,EAAE,MAAM,GAAG,MAAM;IAK9C,MAAM,CAAC,YAAY,CAAC,OAAO,EAAE,MAAM,GAAG,WAAW,GAAG,OAAO,GAAG,YAAY,GAAG,OAAO,GAAG,MAAM;IAI7F,MAAM,CAAC,iBAAiB,CAAC,OAAO,EAAE,MAAM,GAAG,MAAM;IAKjD,OAAO,CAAC,MAAM,CAAC,sBAAsB;IAarC,OAAO,CAAC,MAAM,CAAC,mBAAmB;IAUlC,MAAM,CAAC,oBAAoB,CAAC,WAAW,EAAE,MAAM,GAAG,SAAS,EAAE,OAAO,EAAE,MAAM,GAAG,MAAM,GAAG,SAAS,GAAG,IAAI;IAUxG,MAAM,CAAC,aAAa,CAAC,OAAO,EAAE,MAAM,GAAG;QAAE,KAAK,EAAE,MAAM,EAAE,CAAC;QAAC,MAAM,EAAE,MAAM,EAAE,CAAA;KAAE;IAqB5E,MAAM,CAAC,eAAe,CAAC,OAAO,EAAE,MAAM,GAAG,MAAM,EAAE;IAiBjD,MAAM,CAAC,UAAU,CAAC,OAAO,EAAE,MAAM;CAyBlC"}
1
+ {"version":3,"file":"Capabilities.d.ts","sourceRoot":"","sources":["../../../src/providers/openai/Capabilities.ts"],"names":[],"mappings":"AAEA,qBAAa,YAAY;IACvB,MAAM,CAAC,gBAAgB,CAAC,OAAO,EAAE,MAAM,GAAG,MAAM,GAAG,IAAI;IAUvD,MAAM,CAAC,kBAAkB,CAAC,OAAO,EAAE,MAAM,GAAG,MAAM,GAAG,IAAI;IASzD,MAAM,CAAC,cAAc,CAAC,OAAO,EAAE,MAAM,GAAG,OAAO;IAO/C,MAAM,CAAC,aAAa,CAAC,OAAO,EAAE,MAAM,GAAG,OAAO;IAO9C,MAAM,CAAC,wBAAwB,CAAC,OAAO,EAAE,MAAM,GAAG,OAAO;IAOzD,MAAM,CAAC,gBAAgB,CAAC,OAAO,EAAE,MAAM,GAAG,OAAO;IAIjD,MAAM,CAAC,kBAAkB,CAAC,OAAO,EAAE,MAAM,GAAG,OAAO;IAOnD,MAAM,CAAC,uBAAuB,CAAC,OAAO,EAAE,MAAM,GAAG,OAAO;IAOxD,MAAM,CAAC,qBAAqB,CAAC,OAAO,EAAE,MAAM,GAAG,OAAO;IAOtD,MAAM,CAAC,kBAAkB,CAAC,OAAO,EAAE,MAAM,GAAG,OAAO;IAOnD,MAAM,CAAC,YAAY,CAAC,OAAO,EAAE,MAAM,GAAG,WAAW,GAAG,OAAO,GAAG,YAAY,GAAG,OAAO,GAAG,MAAM,GAAG,qBAAqB,GAAG,cAAc;IAUtI,MAAM,CAAC,aAAa,CAAC,OAAO,EAAE,MAAM,GAAG;QAAE,KAAK,EAAE,MAAM,EAAE,CAAC;QAAC,MAAM,EAAE,MAAM,EAAE,CAAA;KAAE;IAiB5E,MAAM,CAAC,eAAe,CAAC,OAAO,EAAE,MAAM,GAAG,MAAM,EAAE;IAoBjD,MAAM,CAAC,oBAAoB,CAAC,WAAW,EAAE,MAAM,GAAG,SAAS,EAAE,OAAO,EAAE,MAAM,GAAG,MAAM,GAAG,SAAS,GAAG,IAAI;IAMxG,MAAM,CAAC,iBAAiB,CAAC,OAAO,EAAE,MAAM,GAAG,MAAM;IAOjD,MAAM,CAAC,UAAU,CAAC,OAAO,EAAE,MAAM,GAAG,GAAG;CAcxC"}
@@ -1,160 +1,157 @@
1
- import { OPENAI_MODELS } from "./ModelDefinitions.js";
1
+ import { ModelRegistry } from "../../models/ModelRegistry.js";
2
2
  export class Capabilities {
3
- static getFamily(modelId) {
4
- for (const [key, def] of Object.entries(OPENAI_MODELS)) {
5
- if (key === "other")
6
- continue;
7
- if (def.pattern.test(modelId)) {
8
- return key;
9
- }
10
- }
11
- return "other";
12
- }
13
- static getDefinition(modelId) {
14
- const family = this.getFamily(modelId);
15
- return OPENAI_MODELS[family];
16
- }
17
3
  static getContextWindow(modelId) {
18
- return this.getDefinition(modelId).contextWindow;
4
+ const val = ModelRegistry.getContextWindow(modelId, "openai");
5
+ if (val)
6
+ return val;
7
+ if (/gpt-4.*(preview|turbo|vision|o)/.test(modelId) || /o1|o3/.test(modelId))
8
+ return 128_000;
9
+ if (/gpt-4/.test(modelId))
10
+ return 8_192;
11
+ if (/gpt-3\.5/.test(modelId))
12
+ return 16_385;
13
+ return 128_000;
19
14
  }
20
15
  static getMaxOutputTokens(modelId) {
21
- return this.getDefinition(modelId).maxOutputTokens;
16
+ const val = ModelRegistry.getMaxOutputTokens(modelId, "openai");
17
+ if (val)
18
+ return val;
19
+ if (/o1.*(pro|mini)|o3/.test(modelId))
20
+ return 65_536;
21
+ if (/gpt-4o/.test(modelId))
22
+ return 16_384;
23
+ return 4_096;
22
24
  }
23
25
  static supportsVision(modelId) {
24
- return !!this.getDefinition(modelId).features.vision;
26
+ const model = ModelRegistry.find(modelId, "openai");
27
+ if (model?.modalities?.input?.includes("image"))
28
+ return true;
29
+ return /gpt-4(?!-3)|o1/.test(modelId) && !/audio|realtime|voice/.test(modelId);
25
30
  }
26
31
  static supportsTools(modelId) {
27
- return !!this.getDefinition(modelId).features.tools;
32
+ const model = ModelRegistry.find(modelId, "openai");
33
+ if (model?.capabilities?.includes("function_calling"))
34
+ return true;
35
+ return !/embedding|moderation|dall-e|tts|whisper/.test(modelId);
28
36
  }
29
37
  static supportsStructuredOutput(modelId) {
30
- return !!this.getDefinition(modelId).features.structuredOutput;
38
+ const model = ModelRegistry.find(modelId, "openai");
39
+ if (model?.capabilities?.includes("structured_output"))
40
+ return true;
41
+ return /gpt-4|o1|o3/.test(modelId);
31
42
  }
32
43
  static supportsJsonMode(modelId) {
33
44
  return this.supportsStructuredOutput(modelId);
34
45
  }
35
46
  static supportsEmbeddings(modelId) {
36
- return this.getDefinition(modelId).type === "embedding";
47
+ const model = ModelRegistry.find(modelId, "openai");
48
+ if (model?.modalities?.output?.includes("embeddings"))
49
+ return true;
50
+ return /embedding/.test(modelId);
37
51
  }
38
52
  static supportsImageGeneration(modelId) {
39
- return this.getDefinition(modelId).type === "image";
53
+ const model = ModelRegistry.find(modelId, "openai");
54
+ if (model?.capabilities?.includes("image_generation") || model?.modalities?.output?.includes("image"))
55
+ return true;
56
+ return /dall-e|image/.test(modelId);
40
57
  }
41
58
  static supportsTranscription(modelId) {
42
- // Transcription is supported by audio models or specific models like gpt-4o-audio
43
- const def = this.getDefinition(modelId);
44
- return def.type === "audio" || (def.type === "chat" && /audio|transcribe/.test(modelId));
59
+ const model = ModelRegistry.find(modelId, "openai");
60
+ if (model?.modalities?.input?.includes("audio"))
61
+ return true;
62
+ return /whisper|audio|transcribe/.test(modelId);
45
63
  }
46
64
  static supportsModeration(modelId) {
47
- return this.getDefinition(modelId).type === "moderation";
48
- }
49
- static getInputPrice(modelId) {
50
- const prices = this.getDefinition(modelId).pricing;
51
- return prices.input || prices.price || 0.5;
52
- }
53
- static getCachedInputPrice(modelId) {
54
- return this.getDefinition(modelId).pricing.cached_input;
55
- }
56
- static getOutputPrice(modelId) {
57
- const prices = this.getDefinition(modelId).pricing;
58
- return prices.output || prices.price || 1.5;
65
+ const model = ModelRegistry.find(modelId, "openai");
66
+ if (model?.modalities?.output?.includes("moderation"))
67
+ return true;
68
+ return /moderation/.test(modelId);
59
69
  }
60
70
  static getModelType(modelId) {
61
- return this.getDefinition(modelId).type;
71
+ if (/moderation/.test(modelId))
72
+ return "moderation";
73
+ if (/embedding/.test(modelId))
74
+ return "embedding";
75
+ if (/dall-e|image/.test(modelId))
76
+ return "image";
77
+ if (/whisper|transcribe/.test(modelId))
78
+ return "audio_transcription";
79
+ if (/tts|speech/.test(modelId))
80
+ return "audio_speech";
81
+ if (/audio/.test(modelId))
82
+ return "audio";
83
+ return "chat";
62
84
  }
63
- static formatDisplayName(modelId) {
64
- const humanized = modelId.replace(/-/g, " ").split(" ").map(s => s.charAt(0).toUpperCase() + s.slice(1)).join(" ");
65
- return this.applySpecialFormatting(humanized);
66
- }
67
- static applySpecialFormatting(name) {
68
- return name
69
- .replace(/(\d{4}) (\d{2}) (\d{2})/, "$1$2$3")
70
- .replace(/^(?:Gpt|Chatgpt|Tts|Dall E) /g, (m) => this.specialPrefixFormat(m.trim()))
71
- .replace(/^O([13]) /g, "O$1-")
72
- .replace(/^O[13] Mini/g, (m) => m.replace(" ", "-"))
73
- .replace(/\d\.\d /g, (m) => m.replace(" ", "-"))
74
- .replace(/4o (?=Mini|Preview|Turbo|Audio|Realtime|Transcribe|Tts)/g, "4o-")
75
- .replace(/\bHd\b/g, "HD")
76
- .replace(/(?:Omni|Text) Moderation/g, (m) => m.replace(" ", "-"))
77
- .replace("Text Embedding", "text-embedding-");
78
- }
79
- static specialPrefixFormat(prefix) {
80
- switch (prefix) {
81
- case "Gpt": return "GPT-";
82
- case "Chatgpt": return "ChatGPT-";
83
- case "Tts": return "TTS-";
84
- case "Dall E": return "DALL-E-";
85
- default: return prefix + "-";
85
+ static getModalities(modelId) {
86
+ const input = ["text"];
87
+ const output = ["text"];
88
+ const model = ModelRegistry.find(modelId, "openai");
89
+ if (model?.modalities)
90
+ return model.modalities;
91
+ if (this.supportsVision(modelId))
92
+ input.push("image", "pdf");
93
+ if (this.supportsTranscription(modelId))
94
+ input.push("audio");
95
+ if (this.supportsImageGeneration(modelId))
96
+ output.push("image");
97
+ if (this.supportsEmbeddings(modelId))
98
+ output.push("embeddings");
99
+ if (this.supportsModeration(modelId))
100
+ output.push("moderation");
101
+ return { input, output };
102
+ }
103
+ static getCapabilities(modelId) {
104
+ const caps = ["streaming"];
105
+ const model = ModelRegistry.find(modelId, "openai");
106
+ if (model) {
107
+ model.capabilities.forEach(c => { if (!caps.includes(c))
108
+ caps.push(c); });
109
+ return caps;
86
110
  }
111
+ if (this.supportsTools(modelId))
112
+ caps.push("function_calling");
113
+ if (this.supportsStructuredOutput(modelId))
114
+ caps.push("structured_output");
115
+ if (this.supportsEmbeddings(modelId))
116
+ caps.push("batch");
117
+ if (/o\d|gpt-5/.test(modelId))
118
+ caps.push("reasoning");
119
+ if (this.supportsImageGeneration(modelId))
120
+ caps.push("image_generation");
121
+ if (this.supportsTranscription(modelId))
122
+ caps.push("transcription");
123
+ return caps;
87
124
  }
88
125
  static normalizeTemperature(temperature, modelId) {
89
- if (/^(o\d|gpt-5)/.test(modelId)) {
126
+ if (/^(o\d|gpt-5)/.test(modelId))
90
127
  return 1.0;
91
- }
92
- if (/-search/.test(modelId)) {
128
+ if (/-search/.test(modelId))
93
129
  return null;
94
- }
95
130
  return temperature;
96
131
  }
97
- static getModalities(modelId) {
98
- const type = this.getModelType(modelId);
99
- const features = this.getDefinition(modelId).features;
100
- const modalities = {
101
- input: ["text"],
102
- output: ["text"]
103
- };
104
- if (features.vision)
105
- modalities.input.push("image", "pdf");
106
- if (type === "audio") {
107
- modalities.input.push("audio");
108
- modalities.output.push("audio");
109
- }
110
- if (type === "image")
111
- modalities.output.push("image");
112
- if (type === "embedding")
113
- modalities.output.push("embeddings");
114
- if (type === "moderation")
115
- modalities.output.push("moderation");
116
- return modalities;
117
- }
118
- static getCapabilities(modelId) {
119
- const capabilities = [];
120
- const type = this.getModelType(modelId);
121
- const features = this.getDefinition(modelId).features;
122
- if (type !== "moderation" && type !== "embedding")
123
- capabilities.push("streaming");
124
- if (features.tools)
125
- capabilities.push("function_calling");
126
- if (features.structuredOutput)
127
- capabilities.push("structured_output");
128
- if (type === "embedding")
129
- capabilities.push("batch");
130
- if (/o\d|gpt-5|codex/.test(modelId))
131
- capabilities.push("reasoning");
132
- if (type === "image")
133
- capabilities.push("image_generation");
134
- if (type === "audio")
135
- capabilities.push("speech_generation", "transcription");
136
- return capabilities;
132
+ static formatDisplayName(modelId) {
133
+ const model = ModelRegistry.find(modelId, "openai");
134
+ if (model?.name && model.name !== modelId)
135
+ return model.name;
136
+ return modelId.replace(/-/g, " ").replace(/\b\w/g, c => c.toUpperCase());
137
137
  }
138
138
  static getPricing(modelId) {
139
- const standardPricing = {
140
- input_per_million: this.getInputPrice(modelId),
141
- output_per_million: this.getOutputPrice(modelId)
142
- };
143
- const cachedPrice = this.getCachedInputPrice(modelId);
144
- const pricing = {
139
+ const model = ModelRegistry.find(modelId, "openai");
140
+ if (model?.pricing)
141
+ return model.pricing;
142
+ let input = 2.5, output = 10.0;
143
+ if (/gpt-3/.test(modelId)) {
144
+ input = 0.5;
145
+ output = 1.5;
146
+ }
147
+ if (/mini/.test(modelId)) {
148
+ input = 0.15;
149
+ output = 0.6;
150
+ }
151
+ return {
145
152
  text_tokens: {
146
- standard: {
147
- ...standardPricing,
148
- ...(cachedPrice ? { cached_input_per_million: cachedPrice } : {})
149
- }
153
+ standard: { input_per_million: input, output_per_million: output }
150
154
  }
151
155
  };
152
- if (this.getModelType(modelId) === "embedding") {
153
- pricing.text_tokens.batch = {
154
- input_per_million: standardPricing.input_per_million * 0.5,
155
- output_per_million: standardPricing.output_per_million * 0.5
156
- };
157
- }
158
- return pricing;
159
156
  }
160
157
  }
@@ -1 +1 @@
1
- {"version":3,"file":"Chat.d.ts","sourceRoot":"","sources":["../../../src/providers/openai/Chat.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,WAAW,EAAE,YAAY,EAAE,MAAM,gBAAgB,CAAC;AAK3D,qBAAa,UAAU;IACT,OAAO,CAAC,QAAQ,CAAC,OAAO;IAAU,OAAO,CAAC,QAAQ,CAAC,MAAM;gBAAxC,OAAO,EAAE,MAAM,EAAmB,MAAM,EAAE,MAAM;IAEvE,OAAO,CAAC,OAAO,EAAE,WAAW,GAAG,OAAO,CAAC,YAAY,CAAC;CA0D3D"}
1
+ {"version":3,"file":"Chat.d.ts","sourceRoot":"","sources":["../../../src/providers/openai/Chat.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,WAAW,EAAE,YAAY,EAAS,MAAM,gBAAgB,CAAC;AAOlE,qBAAa,UAAU;IACT,OAAO,CAAC,QAAQ,CAAC,OAAO;IAAU,OAAO,CAAC,QAAQ,CAAC,MAAM;gBAAxC,OAAO,EAAE,MAAM,EAAmB,MAAM,EAAE,MAAM;IAEvE,OAAO,CAAC,OAAO,EAAE,WAAW,GAAG,OAAO,CAAC,YAAY,CAAC;CAsD3D"}