@chainfuse/types 2.11.7 → 2.11.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -561,6 +561,8 @@ export declare const azureCatalog: readonly [{
561
561
  };
562
562
  readonly languageModelAvailability: readonly [{
563
563
  readonly name: "gpt-4.1-nano";
564
+ readonly inputTokenCost: 1e-7;
565
+ readonly outputTokenCost: 4e-7;
564
566
  }, {
565
567
  readonly name: "o4-mini";
566
568
  }];
@@ -738,6 +738,8 @@ export const azureCatalog = [
738
738
  languageModelAvailability: [
739
739
  {
740
740
  name: 'gpt-4.1-nano',
741
+ inputTokenCost: 1e-7,
742
+ outputTokenCost: 4e-7,
741
743
  },
742
744
  {
743
745
  name: 'o4-mini',
@@ -155,26 +155,27 @@ export declare const ZodLanguageModelValues: z4.ZodEnum<{
155
155
  "openai:o3": AiModels.LanguageModels.OpenAi.o3;
156
156
  }>;
157
157
  export type LanguageModelValues = z4.infer<typeof ZodLanguageModelValues>;
158
- export declare const ZodImageModelValues3: z3.ZodEnum<["workersai:@cf/runwayml/stable-diffusion-v1-5-inpainting" | "workersai:@cf/black-forest-labs/flux-1-schnell" | "workersai:@cf/bytedance/stable-diffusion-xl-lightning" | "workersai:@cf/lykon/dreamshaper-8-lcm" | "workersai:@cf/stabilityai/stable-diffusion-xl-base-1.0" | "workersai:@cf/runwayml/stable-diffusion-v1-5-img2img" | AiModels.ImageModels.Azure | AiModels.ImageModels.GoogleGenerativeAi | AiModels.ImageModels.OpenAi, ...("workersai:@cf/runwayml/stable-diffusion-v1-5-inpainting" | "workersai:@cf/black-forest-labs/flux-1-schnell" | "workersai:@cf/bytedance/stable-diffusion-xl-lightning" | "workersai:@cf/lykon/dreamshaper-8-lcm" | "workersai:@cf/stabilityai/stable-diffusion-xl-base-1.0" | "workersai:@cf/runwayml/stable-diffusion-v1-5-img2img" | AiModels.ImageModels.Azure | AiModels.ImageModels.GoogleGenerativeAi | AiModels.ImageModels.OpenAi)[]]>;
158
+ export declare const ZodImageModelValues3: z3.ZodEnum<["workersai:@cf/runwayml/stable-diffusion-v1-5-inpainting" | "workersai:@cf/black-forest-labs/flux-1-schnell" | "workersai:@cf/bytedance/stable-diffusion-xl-lightning" | "workersai:@cf/lykon/dreamshaper-8-lcm" | "workersai:@cf/leonardo/phoenix-1.0" | "workersai:@cf/stabilityai/stable-diffusion-xl-base-1.0" | "workersai:@cf/runwayml/stable-diffusion-v1-5-img2img" | "workersai:@cf/leonardo/lucid-origin" | AiModels.ImageModels.Azure | AiModels.ImageModels.GoogleGenerativeAi | AiModels.ImageModels.OpenAi, ...("workersai:@cf/runwayml/stable-diffusion-v1-5-inpainting" | "workersai:@cf/black-forest-labs/flux-1-schnell" | "workersai:@cf/bytedance/stable-diffusion-xl-lightning" | "workersai:@cf/lykon/dreamshaper-8-lcm" | "workersai:@cf/leonardo/phoenix-1.0" | "workersai:@cf/stabilityai/stable-diffusion-xl-base-1.0" | "workersai:@cf/runwayml/stable-diffusion-v1-5-img2img" | "workersai:@cf/leonardo/lucid-origin" | AiModels.ImageModels.Azure | AiModels.ImageModels.GoogleGenerativeAi | AiModels.ImageModels.OpenAi)[]]>;
159
159
  export declare const ZodImageModelValues: z4.ZodEnum<{
160
160
  "workersai:@cf/runwayml/stable-diffusion-v1-5-inpainting": "workersai:@cf/runwayml/stable-diffusion-v1-5-inpainting";
161
161
  "workersai:@cf/black-forest-labs/flux-1-schnell": "workersai:@cf/black-forest-labs/flux-1-schnell";
162
162
  "workersai:@cf/bytedance/stable-diffusion-xl-lightning": "workersai:@cf/bytedance/stable-diffusion-xl-lightning";
163
163
  "workersai:@cf/lykon/dreamshaper-8-lcm": "workersai:@cf/lykon/dreamshaper-8-lcm";
164
+ "workersai:@cf/leonardo/phoenix-1.0": "workersai:@cf/leonardo/phoenix-1.0";
164
165
  "workersai:@cf/stabilityai/stable-diffusion-xl-base-1.0": "workersai:@cf/stabilityai/stable-diffusion-xl-base-1.0";
165
166
  "workersai:@cf/runwayml/stable-diffusion-v1-5-img2img": "workersai:@cf/runwayml/stable-diffusion-v1-5-img2img";
167
+ "workersai:@cf/leonardo/lucid-origin": "workersai:@cf/leonardo/lucid-origin";
166
168
  "azure:dall-e-3": AiModels.ImageModels.Azure.dalle3;
167
169
  "google.generative-ai:imagen-3.0-generate-002": AiModels.ImageModels.GoogleGenerativeAi.imagen;
168
170
  "google.generative-ai:imagen-3.0-fast-generate-001": AiModels.ImageModels.GoogleGenerativeAi.imagen_fast;
169
171
  "openai:dall-e-3": AiModels.ImageModels.OpenAi.dalle3;
170
172
  }>;
171
173
  export type ImageModelValues = z4.infer<typeof ZodImageModelValues>;
172
- export declare const ZodTextEmbeddingModelValues3: z3.ZodEnum<["workersai:@cf/baai/bge-m3" | "workersai:@cf/baai/bge-small-en-v1.5" | "workersai:@cf/baai/bge-base-en-v1.5" | "workersai:@cf/baai/omni-bge-base-en-v1.5" | "workersai:@cf/baai/bge-large-en-v1.5" | AiModels.TextEmbeddingModels.Azure | AiModels.TextEmbeddingModels.GoogleGenerativeAi.te4 | AiModels.TextEmbeddingModels.OpenAi, ...("workersai:@cf/baai/bge-m3" | "workersai:@cf/baai/bge-small-en-v1.5" | "workersai:@cf/baai/bge-base-en-v1.5" | "workersai:@cf/baai/omni-bge-base-en-v1.5" | "workersai:@cf/baai/bge-large-en-v1.5" | AiModels.TextEmbeddingModels.Azure | AiModels.TextEmbeddingModels.GoogleGenerativeAi.te4 | AiModels.TextEmbeddingModels.OpenAi)[]]>;
174
+ export declare const ZodTextEmbeddingModelValues3: z3.ZodEnum<["workersai:@cf/baai/bge-m3" | "workersai:@cf/baai/bge-small-en-v1.5" | "workersai:@cf/baai/bge-base-en-v1.5" | "workersai:@cf/baai/bge-large-en-v1.5" | AiModels.TextEmbeddingModels.Azure | AiModels.TextEmbeddingModels.GoogleGenerativeAi.te4 | AiModels.TextEmbeddingModels.OpenAi, ...("workersai:@cf/baai/bge-m3" | "workersai:@cf/baai/bge-small-en-v1.5" | "workersai:@cf/baai/bge-base-en-v1.5" | "workersai:@cf/baai/bge-large-en-v1.5" | AiModels.TextEmbeddingModels.Azure | AiModels.TextEmbeddingModels.GoogleGenerativeAi.te4 | AiModels.TextEmbeddingModels.OpenAi)[]]>;
173
175
  export declare const ZodTextEmbeddingModelValues: z4.ZodEnum<{
174
176
  "workersai:@cf/baai/bge-m3": "workersai:@cf/baai/bge-m3";
175
177
  "workersai:@cf/baai/bge-small-en-v1.5": "workersai:@cf/baai/bge-small-en-v1.5";
176
178
  "workersai:@cf/baai/bge-base-en-v1.5": "workersai:@cf/baai/bge-base-en-v1.5";
177
- "workersai:@cf/baai/omni-bge-base-en-v1.5": "workersai:@cf/baai/omni-bge-base-en-v1.5";
178
179
  "workersai:@cf/baai/bge-large-en-v1.5": "workersai:@cf/baai/bge-large-en-v1.5";
179
180
  "azure:text-embedding-3-large": AiModels.TextEmbeddingModels.Azure.te3_large;
180
181
  "azure:text-embedding-3-small": AiModels.TextEmbeddingModels.Azure.te3_small;
@@ -1,5 +1,26 @@
1
1
  export declare const workersAiCatalog: {
2
2
  readonly modelGroups: {
3
+ readonly 'Dumb Pipe': {
4
+ readonly id: "ccb1ca5a-043d-41a7-8a3b-61017b2796fd";
5
+ readonly description: "Internal - Dumb Pipe models don't use tensors";
6
+ readonly models: readonly [{
7
+ readonly id: "fe8904cf-e20e-4884-b829-ed7cec0a01cb";
8
+ readonly source: 1;
9
+ readonly name: "@cf/pipecat-ai/smart-turn-v2";
10
+ readonly description: "An open source, community-driven, native audio turn detection model in 2nd version";
11
+ readonly created_at: "2025-08-04 10:08:04.219";
12
+ readonly tags: readonly [];
13
+ readonly properties: {
14
+ readonly async_queue: true;
15
+ readonly price: readonly [{
16
+ readonly unit: "per audio minute";
17
+ readonly price: 0.00034;
18
+ readonly currency: "USD";
19
+ }];
20
+ readonly realtime: true;
21
+ };
22
+ }];
23
+ };
3
24
  readonly 'Text Generation': {
4
25
  readonly id: "c329a1f9-323d-4e91-b2aa-582dd4188d34";
5
26
  readonly description: "Family of generative text models, such as large language models (LLM), that can be adapted for a variety of natural language tasks.";
@@ -643,6 +664,7 @@ export declare const workersAiCatalog: {
643
664
  readonly created_at: "2025-08-05 10:49:53.265";
644
665
  readonly tags: readonly [];
645
666
  readonly properties: {
667
+ readonly async_queue: true;
646
668
  readonly context_window: 128000;
647
669
  readonly price: readonly [{
648
670
  readonly unit: "per M input tokens";
@@ -810,19 +832,6 @@ export declare const workersAiCatalog: {
810
832
  readonly max_input_tokens: 512;
811
833
  readonly output_dimensions: 768;
812
834
  };
813
- }, {
814
- readonly id: "09e0e83d-b055-49c9-81a5-c13250a176a7";
815
- readonly source: 1;
816
- readonly name: "@cf/baai/omni-bge-base-en-v1.5";
817
- readonly description: "BAAI general embedding (Base) model that transforms any given text into a 768-dimensional vector";
818
- readonly created_at: "2025-08-24 11:06:58.799";
819
- readonly tags: readonly [];
820
- readonly properties: {
821
- readonly async_queue: true;
822
- readonly info: "https://huggingface.co/BAAI/bge-base-en-v1.5";
823
- readonly max_input_tokens: 512;
824
- readonly output_dimensions: 768;
825
- };
826
835
  }, {
827
836
  readonly id: "01bc2fb0-4bca-4598-b985-d2584a3f46c0";
828
837
  readonly source: 1;
@@ -894,6 +903,23 @@ export declare const workersAiCatalog: {
894
903
  readonly currency: "USD";
895
904
  }];
896
905
  };
906
+ }, {
907
+ readonly id: "1f55679f-009e-4456-aa4f-049a62b4b6a0";
908
+ readonly source: 1;
909
+ readonly name: "@cf/deepgram/aura-1";
910
+ readonly description: "Aura is a context-aware text-to-speech (TTS) model that applies natural pacing, expressiveness, and fillers based on the context of the provided text. The quality of your text input directly impacts the naturalness of the audio output.";
911
+ readonly created_at: "2025-08-27 01:18:18.880";
912
+ readonly tags: readonly [];
913
+ readonly properties: {
914
+ readonly async_queue: true;
915
+ readonly price: readonly [{
916
+ readonly unit: "per audio minute";
917
+ readonly price: 0;
918
+ readonly currency: "USD";
919
+ }];
920
+ readonly partner: true;
921
+ readonly realtime: true;
922
+ };
897
923
  }];
898
924
  };
899
925
  readonly 'Automatic Speech Recognition': {
@@ -914,6 +940,23 @@ export declare const workersAiCatalog: {
914
940
  }];
915
941
  readonly info: "https://openai.com/research/whisper";
916
942
  };
943
+ }, {
944
+ readonly id: "a226909f-eef8-4265-a3a0-90db0422762e";
945
+ readonly source: 1;
946
+ readonly name: "@cf/deepgram/nova-3";
947
+ readonly description: "Transcribe audio using Deepgram’s speech-to-text model";
948
+ readonly created_at: "2025-06-05 16:05:15.199";
949
+ readonly tags: readonly [];
950
+ readonly properties: {
951
+ readonly async_queue: true;
952
+ readonly price: readonly [{
953
+ readonly unit: "per audio minute";
954
+ readonly price: 0.0052;
955
+ readonly currency: "USD";
956
+ }];
957
+ readonly partner: true;
958
+ readonly realtime: true;
959
+ };
917
960
  }, {
918
961
  readonly id: "2169496d-9c0e-4e49-8399-c44ee66bff7d";
919
962
  readonly source: 1;
@@ -1030,6 +1073,25 @@ export declare const workersAiCatalog: {
1030
1073
  readonly properties: {
1031
1074
  readonly info: "https://huggingface.co/Lykon/DreamShaper";
1032
1075
  };
1076
+ }, {
1077
+ readonly id: "724608fa-983e-495d-b95c-340d6b7e78be";
1078
+ readonly source: 1;
1079
+ readonly name: "@cf/leonardo/phoenix-1.0";
1080
+ readonly description: "Phoenix 1.0 is a model by Leonardo.Ai that generates images with exceptional prompt adherence and coherent text.";
1081
+ readonly created_at: "2025-08-25 18:12:18.073";
1082
+ readonly tags: readonly [];
1083
+ readonly properties: {
1084
+ readonly price: readonly [{
1085
+ readonly unit: "per 512 by 512 tile";
1086
+ readonly price: 0.0058;
1087
+ readonly currency: "USD";
1088
+ }, {
1089
+ readonly unit: "per step";
1090
+ readonly price: 0.00011;
1091
+ readonly currency: "USD";
1092
+ }];
1093
+ readonly partner: true;
1094
+ };
1033
1095
  }, {
1034
1096
  readonly id: "6d52253a-b731-4a03-b203-cde2d4fae871";
1035
1097
  readonly source: 1;
@@ -1064,6 +1126,25 @@ export declare const workersAiCatalog: {
1064
1126
  readonly info: "https://huggingface.co/runwayml/stable-diffusion-v1-5";
1065
1127
  readonly terms: "https://github.com/runwayml/stable-diffusion/blob/main/LICENSE";
1066
1128
  };
1129
+ }, {
1130
+ readonly id: "0e372c11-8720-46c9-a02d-666188a22dae";
1131
+ readonly source: 1;
1132
+ readonly name: "@cf/leonardo/lucid-origin";
1133
+ readonly description: "Lucid Origin from Leonardo.AI is their most adaptable and prompt-responsive model to date. Whether you're generating images with sharp graphic design, stunning full-HD renders, or highly specific creative direction, it adheres closely to your prompts, renders text with accuracy, and supports a wide array of visual styles and aesthetics – from stylized concept art to crisp product mockups.\n";
1134
+ readonly created_at: "2025-08-25 19:21:28.770";
1135
+ readonly tags: readonly [];
1136
+ readonly properties: {
1137
+ readonly price: readonly [{
1138
+ readonly unit: "per 512 by 512 tile";
1139
+ readonly price: 0.007;
1140
+ readonly currency: "USD";
1141
+ }, {
1142
+ readonly unit: "per step";
1143
+ readonly price: 0.00013;
1144
+ readonly currency: "USD";
1145
+ }];
1146
+ readonly partner: true;
1147
+ };
1067
1148
  }];
1068
1149
  };
1069
1150
  readonly 'Image Classification': {
@@ -1,5 +1,30 @@
1
1
  export const workersAiCatalog = {
2
2
  modelGroups: {
3
+ 'Dumb Pipe': {
4
+ id: 'ccb1ca5a-043d-41a7-8a3b-61017b2796fd',
5
+ description: "Internal - Dumb Pipe models don't use tensors",
6
+ models: [
7
+ {
8
+ id: 'fe8904cf-e20e-4884-b829-ed7cec0a01cb',
9
+ source: 1,
10
+ name: '@cf/pipecat-ai/smart-turn-v2',
11
+ description: 'An open source, community-driven, native audio turn detection model in 2nd version',
12
+ created_at: '2025-08-04 10:08:04.219',
13
+ tags: [],
14
+ properties: {
15
+ async_queue: true,
16
+ price: [
17
+ {
18
+ unit: 'per audio minute',
19
+ price: 0.00034,
20
+ currency: 'USD',
21
+ },
22
+ ],
23
+ realtime: true,
24
+ },
25
+ },
26
+ ],
27
+ },
3
28
  'Text Generation': {
4
29
  id: 'c329a1f9-323d-4e91-b2aa-582dd4188d34',
5
30
  description: 'Family of generative text models, such as large language models (LLM), that can be adapted for a variety of natural language tasks.',
@@ -731,6 +756,7 @@ export const workersAiCatalog = {
731
756
  created_at: '2025-08-05 10:49:53.265',
732
757
  tags: [],
733
758
  properties: {
759
+ async_queue: true,
734
760
  context_window: 128000,
735
761
  price: [
736
762
  {
@@ -927,20 +953,6 @@ export const workersAiCatalog = {
927
953
  output_dimensions: 768,
928
954
  },
929
955
  },
930
- {
931
- id: '09e0e83d-b055-49c9-81a5-c13250a176a7',
932
- source: 1,
933
- name: '@cf/baai/omni-bge-base-en-v1.5',
934
- description: 'BAAI general embedding (Base) model that transforms any given text into a 768-dimensional vector',
935
- created_at: '2025-08-24 11:06:58.799',
936
- tags: [],
937
- properties: {
938
- async_queue: true,
939
- info: 'https://huggingface.co/BAAI/bge-base-en-v1.5',
940
- max_input_tokens: 512,
941
- output_dimensions: 768,
942
- },
943
- },
944
956
  {
945
957
  id: '01bc2fb0-4bca-4598-b985-d2584a3f46c0',
946
958
  source: 1,
@@ -1026,6 +1038,26 @@ export const workersAiCatalog = {
1026
1038
  ],
1027
1039
  },
1028
1040
  },
1041
+ {
1042
+ id: '1f55679f-009e-4456-aa4f-049a62b4b6a0',
1043
+ source: 1,
1044
+ name: '@cf/deepgram/aura-1',
1045
+ description: 'Aura is a context-aware text-to-speech (TTS) model that applies natural pacing, expressiveness, and fillers based on the context of the provided text. The quality of your text input directly impacts the naturalness of the audio output.',
1046
+ created_at: '2025-08-27 01:18:18.880',
1047
+ tags: [],
1048
+ properties: {
1049
+ async_queue: true,
1050
+ price: [
1051
+ {
1052
+ unit: 'per audio minute',
1053
+ price: 0,
1054
+ currency: 'USD',
1055
+ },
1056
+ ],
1057
+ partner: true,
1058
+ realtime: true,
1059
+ },
1060
+ },
1029
1061
  ],
1030
1062
  },
1031
1063
  'Automatic Speech Recognition': {
@@ -1050,6 +1082,26 @@ export const workersAiCatalog = {
1050
1082
  info: 'https://openai.com/research/whisper',
1051
1083
  },
1052
1084
  },
1085
+ {
1086
+ id: 'a226909f-eef8-4265-a3a0-90db0422762e',
1087
+ source: 1,
1088
+ name: '@cf/deepgram/nova-3',
1089
+ description: 'Transcribe audio using Deepgram’s speech-to-text model',
1090
+ created_at: '2025-06-05 16:05:15.199',
1091
+ tags: [],
1092
+ properties: {
1093
+ async_queue: true,
1094
+ price: [
1095
+ {
1096
+ unit: 'per audio minute',
1097
+ price: 0.0052,
1098
+ currency: 'USD',
1099
+ },
1100
+ ],
1101
+ partner: true,
1102
+ realtime: true,
1103
+ },
1104
+ },
1053
1105
  {
1054
1106
  id: '2169496d-9c0e-4e49-8399-c44ee66bff7d',
1055
1107
  source: 1,
@@ -1185,6 +1237,29 @@ export const workersAiCatalog = {
1185
1237
  info: 'https://huggingface.co/Lykon/DreamShaper',
1186
1238
  },
1187
1239
  },
1240
+ {
1241
+ id: '724608fa-983e-495d-b95c-340d6b7e78be',
1242
+ source: 1,
1243
+ name: '@cf/leonardo/phoenix-1.0',
1244
+ description: 'Phoenix 1.0 is a model by Leonardo.Ai that generates images with exceptional prompt adherence and coherent text.',
1245
+ created_at: '2025-08-25 18:12:18.073',
1246
+ tags: [],
1247
+ properties: {
1248
+ price: [
1249
+ {
1250
+ unit: 'per 512 by 512 tile',
1251
+ price: 0.0058,
1252
+ currency: 'USD',
1253
+ },
1254
+ {
1255
+ unit: 'per step',
1256
+ price: 0.00011,
1257
+ currency: 'USD',
1258
+ },
1259
+ ],
1260
+ partner: true,
1261
+ },
1262
+ },
1188
1263
  {
1189
1264
  id: '6d52253a-b731-4a03-b203-cde2d4fae871',
1190
1265
  source: 1,
@@ -1225,6 +1300,29 @@ export const workersAiCatalog = {
1225
1300
  terms: 'https://github.com/runwayml/stable-diffusion/blob/main/LICENSE',
1226
1301
  },
1227
1302
  },
1303
+ {
1304
+ id: '0e372c11-8720-46c9-a02d-666188a22dae',
1305
+ source: 1,
1306
+ name: '@cf/leonardo/lucid-origin',
1307
+ description: "Lucid Origin from Leonardo.AI is their most adaptable and prompt-responsive model to date. Whether you're generating images with sharp graphic design, stunning full-HD renders, or highly specific creative direction, it adheres closely to your prompts, renders text with accuracy, and supports a wide array of visual styles and aesthetics – from stylized concept art to crisp product mockups.\n",
1308
+ created_at: '2025-08-25 19:21:28.770',
1309
+ tags: [],
1310
+ properties: {
1311
+ price: [
1312
+ {
1313
+ unit: 'per 512 by 512 tile',
1314
+ price: 0.007,
1315
+ currency: 'USD',
1316
+ },
1317
+ {
1318
+ unit: 'per step',
1319
+ price: 0.00013,
1320
+ currency: 'USD',
1321
+ },
1322
+ ],
1323
+ partner: true,
1324
+ },
1325
+ },
1228
1326
  ],
1229
1327
  },
1230
1328
  'Image Classification': {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@chainfuse/types",
3
- "version": "2.11.7",
3
+ "version": "2.11.8",
4
4
  "description": "",
5
5
  "author": "ChainFuse",
6
6
  "homepage": "https://github.com/ChainFuse/packages/tree/main/packages/types#readme",
@@ -95,11 +95,11 @@
95
95
  "prettier": "@demosjarco/prettier-config",
96
96
  "dependencies": {
97
97
  "validator": "^13.15.15",
98
- "zod": "^4.1.3"
98
+ "zod": "^4.1.5"
99
99
  },
100
100
  "devDependencies": {
101
- "@cloudflare/workers-types": "^4.20250826.0",
102
- "@types/validator": "^13.15.2"
101
+ "@cloudflare/workers-types": "^4.20250903.0",
102
+ "@types/validator": "^13.15.3"
103
103
  },
104
- "gitHead": "f71bbfa34fcb4096bd830f2bbf17528c02d4be55"
104
+ "gitHead": "83cf299d3a269f53051512f24470cdb80e167209"
105
105
  }