@chainfuse/types 1.6.0 → 1.6.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -3,24 +3,18 @@ export interface RawCoordinate {
3
3
  lat: string;
4
4
  lon: string;
5
5
  }
6
+ export declare const enabledCloudflareLlmImageProviders: cloudflareModelPossibilities<'Text-to-Image'>[];
6
7
  export type CloudflareModelsEnum<M extends cloudflareModelTypes = cloudflareModelTypes> = {
7
8
  [K in cloudflareModelPossibilities<M>]: `workersai:${K}`;
8
9
  };
9
10
  export type CloudflareFunctionModelsEnum = {
10
11
  [K in cloudflareFilteredModelPossibilities<'Text Generation', 'function_calling', true>]: `workersai:${K}`;
11
12
  };
12
- export type AzureChatModels = 'gpt-35-turbo' | 'gpt-4-turbo' | 'gpt-4o-mini' | 'gpt-4o';
13
+ export type AzureChatModels = 'gpt-35-turbo' | 'gpt-4-turbo' | 'gpt-4o' | 'gpt-4o-mini' | 'o1' | 'o3-mini';
14
+ export type AzureImageModels = 'dall-e-3';
13
15
  export type AzureEmbeddingModels = 'text-embedding-3-small' | 'text-embedding-3-large';
14
16
  export declare namespace AiModels {
15
17
  namespace LanguageModels {
16
- enum OpenAi {
17
- gpt3 = "openai:gpt-3.5-turbo",
18
- gpt4o_mini = "openai:gpt-4o-mini",
19
- o1_mini = "openai:o1-mini",
20
- gpt4 = "openai:gpt-4-turbo",
21
- gpt4o = "openai:gpt-4o",
22
- o1 = "openai:o1-preview"
23
- }
24
18
  enum Azure {
25
19
  gpt3 = "azure:gpt-35-turbo",
26
20
  gpt4o_mini = "azure:gpt-4o-mini",
@@ -41,12 +35,31 @@ export declare namespace AiModels {
41
35
  gemini_pro = "google.generative-ai:gemini-1.5-pro",
42
36
  gemini_pro_search = "google.generative-ai:gemini-1.5-pro:search"
43
37
  }
38
+ enum OpenAi {
39
+ gpt3 = "openai:gpt-3.5-turbo",
40
+ gpt4o_mini = "openai:gpt-4o-mini",
41
+ o1_mini = "openai:o1-mini",
42
+ gpt4 = "openai:gpt-4-turbo",
43
+ gpt4o = "openai:gpt-4o",
44
+ o1 = "openai:o1-preview"
45
+ }
44
46
  }
45
- namespace TextEmbeddingModels {
47
+ namespace ImageModels {
48
+ enum Azure {
49
+ dalle3 = "azure:dall-e-3",
50
+ dalle2 = "azure:dall-e-2"
51
+ }
52
+ const Cloudflare: Readonly<CloudflareModelsEnum<"Text-to-Image">>;
53
+ enum GoogleGenerativeAi {
54
+ imagen = "google.generative-ai:imagen-3.0-generate-002",
55
+ imagen_fast = "google.generative-ai:imagen-3.0-fast-generate-001"
56
+ }
46
57
  enum OpenAi {
47
- te3_large = "openai:text-embedding-3-large",
48
- te3_small = "openai:text-embedding-3-small"
58
+ dalle3 = "openai:dall-e-3",
59
+ dalle2 = "openai:dall-e-2"
49
60
  }
61
+ }
62
+ namespace TextEmbeddingModels {
50
63
  enum Azure {
51
64
  te3_large = "azure:text-embedding-3-large",
52
65
  te3_small = "azure:text-embedding-3-small"
@@ -55,10 +68,15 @@ export declare namespace AiModels {
55
68
  enum GoogleGenerativeAi {
56
69
  te4 = "google.generative-ai:text-embedding-004"
57
70
  }
71
+ enum OpenAi {
72
+ te3_large = "openai:text-embedding-3-large",
73
+ te3_small = "openai:text-embedding-3-small"
74
+ }
58
75
  }
59
76
  }
60
77
  export type EnumOrEnumLike<T> = T extends Record<string, infer V> ? V : T extends Readonly<Record<string, infer V>> ? V : never;
61
78
  export type LanguageModelValues = EnumOrEnumLike<(typeof AiModels.LanguageModels)[keyof typeof AiModels.LanguageModels]>;
79
+ export type ImageModelValues = EnumOrEnumLike<(typeof AiModels.ImageModels)[keyof typeof AiModels.ImageModels]>;
62
80
  export type TextEmbeddingModelValues = EnumOrEnumLike<(typeof AiModels.TextEmbeddingModels)[keyof typeof AiModels.TextEmbeddingModels]>;
63
81
  export declare const default_mc_generic: LanguageModelValues;
64
82
  export declare const default_mc_summary: LanguageModelValues;
@@ -66,4 +84,5 @@ export declare const default_mc_extraction: LanguageModelValues;
66
84
  export declare const default_mc_tagging: LanguageModelValues;
67
85
  export declare const default_mc_sentiment: LanguageModelValues;
68
86
  export declare const default_mc_safety: LanguageModelValues;
87
+ export declare const default_mc_image: ImageModelValues;
69
88
  export declare const default_mc_embedding: TextEmbeddingModelValues;
@@ -1,17 +1,10 @@
1
1
  import { enabledCloudflareLlmEmbeddingProviders, enabledCloudflareLlmFunctionProviders, enabledCloudflareLlmProviders } from '../super-ai/index.js';
2
+ import { workersAiCatalog } from './workers-ai-catalog.js';
3
+ export const enabledCloudflareLlmImageProviders = workersAiCatalog.modelGroups['Text-to-Image'].models.map((model) => model.name);
2
4
  export var AiModels;
3
5
  (function (AiModels) {
4
6
  let LanguageModels;
5
7
  (function (LanguageModels) {
6
- let OpenAi;
7
- (function (OpenAi) {
8
- OpenAi["gpt3"] = "openai:gpt-3.5-turbo";
9
- OpenAi["gpt4o_mini"] = "openai:gpt-4o-mini";
10
- OpenAi["o1_mini"] = "openai:o1-mini";
11
- OpenAi["gpt4"] = "openai:gpt-4-turbo";
12
- OpenAi["gpt4o"] = "openai:gpt-4o";
13
- OpenAi["o1"] = "openai:o1-preview";
14
- })(OpenAi = LanguageModels.OpenAi || (LanguageModels.OpenAi = {}));
15
8
  let Azure;
16
9
  (function (Azure) {
17
10
  Azure["gpt3"] = "azure:gpt-35-turbo";
@@ -37,14 +30,37 @@ export var AiModels;
37
30
  GoogleGenerativeAi["gemini_pro"] = "google.generative-ai:gemini-1.5-pro";
38
31
  GoogleGenerativeAi["gemini_pro_search"] = "google.generative-ai:gemini-1.5-pro:search";
39
32
  })(GoogleGenerativeAi = LanguageModels.GoogleGenerativeAi || (LanguageModels.GoogleGenerativeAi = {}));
33
+ let OpenAi;
34
+ (function (OpenAi) {
35
+ OpenAi["gpt3"] = "openai:gpt-3.5-turbo";
36
+ OpenAi["gpt4o_mini"] = "openai:gpt-4o-mini";
37
+ OpenAi["o1_mini"] = "openai:o1-mini";
38
+ OpenAi["gpt4"] = "openai:gpt-4-turbo";
39
+ OpenAi["gpt4o"] = "openai:gpt-4o";
40
+ OpenAi["o1"] = "openai:o1-preview";
41
+ })(OpenAi = LanguageModels.OpenAi || (LanguageModels.OpenAi = {}));
40
42
  })(LanguageModels = AiModels.LanguageModels || (AiModels.LanguageModels = {}));
41
- let TextEmbeddingModels;
42
- (function (TextEmbeddingModels) {
43
+ let ImageModels;
44
+ (function (ImageModels) {
45
+ let Azure;
46
+ (function (Azure) {
47
+ Azure["dalle3"] = "azure:dall-e-3";
48
+ Azure["dalle2"] = "azure:dall-e-2";
49
+ })(Azure = ImageModels.Azure || (ImageModels.Azure = {}));
50
+ ImageModels.Cloudflare = Object.freeze(Object.fromEntries(enabledCloudflareLlmImageProviders.map((model) => [model, `workersai:${model}`])));
51
+ let GoogleGenerativeAi;
52
+ (function (GoogleGenerativeAi) {
53
+ GoogleGenerativeAi["imagen"] = "google.generative-ai:imagen-3.0-generate-002";
54
+ GoogleGenerativeAi["imagen_fast"] = "google.generative-ai:imagen-3.0-fast-generate-001";
55
+ })(GoogleGenerativeAi = ImageModels.GoogleGenerativeAi || (ImageModels.GoogleGenerativeAi = {}));
43
56
  let OpenAi;
44
57
  (function (OpenAi) {
45
- OpenAi["te3_large"] = "openai:text-embedding-3-large";
46
- OpenAi["te3_small"] = "openai:text-embedding-3-small";
47
- })(OpenAi = TextEmbeddingModels.OpenAi || (TextEmbeddingModels.OpenAi = {}));
58
+ OpenAi["dalle3"] = "openai:dall-e-3";
59
+ OpenAi["dalle2"] = "openai:dall-e-2";
60
+ })(OpenAi = ImageModels.OpenAi || (ImageModels.OpenAi = {}));
61
+ })(ImageModels = AiModels.ImageModels || (AiModels.ImageModels = {}));
62
+ let TextEmbeddingModels;
63
+ (function (TextEmbeddingModels) {
48
64
  let Azure;
49
65
  (function (Azure) {
50
66
  Azure["te3_large"] = "azure:text-embedding-3-large";
@@ -55,12 +71,18 @@ export var AiModels;
55
71
  (function (GoogleGenerativeAi) {
56
72
  GoogleGenerativeAi["te4"] = "google.generative-ai:text-embedding-004";
57
73
  })(GoogleGenerativeAi = TextEmbeddingModels.GoogleGenerativeAi || (TextEmbeddingModels.GoogleGenerativeAi = {}));
74
+ let OpenAi;
75
+ (function (OpenAi) {
76
+ OpenAi["te3_large"] = "openai:text-embedding-3-large";
77
+ OpenAi["te3_small"] = "openai:text-embedding-3-small";
78
+ })(OpenAi = TextEmbeddingModels.OpenAi || (TextEmbeddingModels.OpenAi = {}));
58
79
  })(TextEmbeddingModels = AiModels.TextEmbeddingModels || (AiModels.TextEmbeddingModels = {}));
59
80
  })(AiModels || (AiModels = {}));
60
81
  export const default_mc_generic = AiModels.LanguageModels.Azure.gpt4o_mini;
61
- export const default_mc_summary = AiModels.LanguageModels.Azure.gpt4o_mini;
82
+ export const default_mc_summary = AiModels.LanguageModels.Cloudflare['@cf/meta/llama-3.3-70b-instruct-fp8-fast'];
62
83
  export const default_mc_extraction = AiModels.LanguageModels.Azure.gpt4o_mini;
63
84
  export const default_mc_tagging = AiModels.LanguageModels.Azure.gpt4o_mini;
64
85
  export const default_mc_sentiment = AiModels.LanguageModels.Azure.gpt4o_mini;
65
86
  export const default_mc_safety = AiModels.LanguageModels.Cloudflare['@hf/thebloke/llamaguard-7b-awq'];
87
+ export const default_mc_image = AiModels.ImageModels.Cloudflare['@cf/stabilityai/stable-diffusion-xl-base-1.0'];
66
88
  export const default_mc_embedding = AiModels.TextEmbeddingModels.Cloudflare['@cf/baai/bge-large-en-v1.5'];
@@ -138,6 +138,15 @@ export declare const workersAiCatalog: {
138
138
  readonly properties: {
139
139
  readonly beta: true;
140
140
  };
141
+ }, {
142
+ readonly id: "ad01ab83-baf8-4e7b-8fed-a0a219d4eb45";
143
+ readonly source: 1;
144
+ readonly name: "@cf/deepseek-ai/deepseek-r1-distill-qwen-32b";
145
+ readonly description: "DeepSeek-R1-Distill-Qwen-32B is a model distilled from DeepSeek-R1 based on Qwen2.5. It outperforms OpenAI-o1-mini across various benchmarks, achieving new state-of-the-art results for dense models.";
146
+ readonly tags: readonly [];
147
+ readonly properties: {
148
+ readonly terms: "https://github.com/deepseek-ai/DeepSeek-R1/blob/main/LICENSE";
149
+ };
141
150
  }, {
142
151
  readonly id: "9d2ab560-065e-4d0d-a789-d4bc7468d33e";
143
152
  readonly source: 1;
@@ -152,6 +152,16 @@ export const workersAiCatalog = {
152
152
  beta: true,
153
153
  },
154
154
  },
155
+ {
156
+ id: 'ad01ab83-baf8-4e7b-8fed-a0a219d4eb45',
157
+ source: 1,
158
+ name: '@cf/deepseek-ai/deepseek-r1-distill-qwen-32b',
159
+ description: 'DeepSeek-R1-Distill-Qwen-32B is a model distilled from DeepSeek-R1 based on Qwen2.5. It outperforms OpenAI-o1-mini across various benchmarks, achieving new state-of-the-art results for dense models.',
160
+ tags: [],
161
+ properties: {
162
+ terms: 'https://github.com/deepseek-ai/DeepSeek-R1/blob/main/LICENSE',
163
+ },
164
+ },
155
165
  {
156
166
  id: '9d2ab560-065e-4d0d-a789-d4bc7468d33e',
157
167
  source: 1,
@@ -250,6 +250,15 @@ export declare const possibilities_mc_safety: readonly [...{
250
250
  readonly properties: {
251
251
  readonly beta: true;
252
252
  };
253
+ }, {
254
+ readonly id: "ad01ab83-baf8-4e7b-8fed-a0a219d4eb45";
255
+ readonly source: 1;
256
+ readonly name: "@cf/deepseek-ai/deepseek-r1-distill-qwen-32b";
257
+ readonly description: "DeepSeek-R1-Distill-Qwen-32B is a model distilled from DeepSeek-R1 based on Qwen2.5. It outperforms OpenAI-o1-mini across various benchmarks, achieving new state-of-the-art results for dense models.";
258
+ readonly tags: readonly [];
259
+ readonly properties: {
260
+ readonly terms: "https://github.com/deepseek-ai/DeepSeek-R1/blob/main/LICENSE";
261
+ };
253
262
  }, {
254
263
  readonly id: "9d2ab560-065e-4d0d-a789-d4bc7468d33e";
255
264
  readonly source: 1;
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@chainfuse/types",
3
- "version": "1.6.0",
3
+ "version": "1.6.2",
4
4
  "description": "",
5
5
  "author": "ChainFuse",
6
6
  "homepage": "https://github.com/ChainFuse/packages/tree/main/packages/types#readme",
@@ -86,8 +86,8 @@
86
86
  },
87
87
  "prettier": "@demosjarco/prettier-config",
88
88
  "devDependencies": {
89
- "@cloudflare/workers-types": "^4.20250124.3",
89
+ "@cloudflare/workers-types": "^4.20250129.0",
90
90
  "@types/json-schema": "^7.0.15"
91
91
  },
92
- "gitHead": "388e77364a079bd5b9c4048504cb72e2dc0efb57"
92
+ "gitHead": "5c8d059af28f814e48549d079a7b7f01dc3ad1db"
93
93
  }