@chainfuse/types 4.0.7 → 4.0.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -104,6 +104,7 @@ export declare const ZodLanguageModelValues: zm.ZodMiniEnum<{
104
104
  "workersai:@hf/thebloke/deepseek-coder-6.7b-base-awq": "workersai:@hf/thebloke/deepseek-coder-6.7b-base-awq";
105
105
  "workersai:@cf/meta-llama/llama-2-7b-chat-hf-lora": "workersai:@cf/meta-llama/llama-2-7b-chat-hf-lora";
106
106
  "workersai:@cf/meta/llama-3.3-70b-instruct-fp8-fast": "workersai:@cf/meta/llama-3.3-70b-instruct-fp8-fast";
107
+ "workersai:@cf/ibm-granite/granite-4.0-h-micro": "workersai:@cf/ibm-granite/granite-4.0-h-micro";
107
108
  "workersai:@hf/thebloke/openhermes-2.5-mistral-7b-awq": "workersai:@hf/thebloke/openhermes-2.5-mistral-7b-awq";
108
109
  "workersai:@hf/thebloke/deepseek-coder-6.7b-instruct-awq": "workersai:@hf/thebloke/deepseek-coder-6.7b-instruct-awq";
109
110
  "workersai:@cf/qwen/qwen2.5-coder-32b-instruct": "workersai:@cf/qwen/qwen2.5-coder-32b-instruct";
@@ -180,6 +181,7 @@ export declare const ZodLanguageModelValues4: z4.ZodEnum<{
180
181
  "workersai:@hf/thebloke/deepseek-coder-6.7b-base-awq": "workersai:@hf/thebloke/deepseek-coder-6.7b-base-awq";
181
182
  "workersai:@cf/meta-llama/llama-2-7b-chat-hf-lora": "workersai:@cf/meta-llama/llama-2-7b-chat-hf-lora";
182
183
  "workersai:@cf/meta/llama-3.3-70b-instruct-fp8-fast": "workersai:@cf/meta/llama-3.3-70b-instruct-fp8-fast";
184
+ "workersai:@cf/ibm-granite/granite-4.0-h-micro": "workersai:@cf/ibm-granite/granite-4.0-h-micro";
183
185
  "workersai:@hf/thebloke/openhermes-2.5-mistral-7b-awq": "workersai:@hf/thebloke/openhermes-2.5-mistral-7b-awq";
184
186
  "workersai:@hf/thebloke/deepseek-coder-6.7b-instruct-awq": "workersai:@hf/thebloke/deepseek-coder-6.7b-instruct-awq";
185
187
  "workersai:@cf/qwen/qwen2.5-coder-32b-instruct": "workersai:@cf/qwen/qwen2.5-coder-32b-instruct";
@@ -433,6 +433,25 @@ export declare const workersAiCatalog: {
433
433
  readonly function_calling: true;
434
434
  readonly terms: "https://github.com/meta-llama/llama-models/blob/main/models/llama3_3/LICENSE";
435
435
  };
436
+ }, {
437
+ readonly id: "7952d0cc-cb00-4e10-be02-667565c2ee0f";
438
+ readonly source: 1;
439
+ readonly name: "@cf/ibm-granite/granite-4.0-h-micro";
440
+ readonly description: "Granite 4.0 instruct models deliver strong performance across benchmarks, achieving industry-leading results in key agentic tasks like instruction following and function calling. These efficiencies make the models well-suited for a wide range of use cases like retrieval-augmented generation (RAG), multi-agent workflows, and edge deployments.";
441
+ readonly created_at: "2025-10-07 18:46:29.436";
442
+ readonly tags: readonly [];
443
+ readonly properties: {
444
+ readonly context_window: 131000;
445
+ readonly price: readonly [{
446
+ readonly unit: "per M input tokens";
447
+ readonly price: 0.017;
448
+ readonly currency: "USD";
449
+ }, {
450
+ readonly unit: "per M output tokens";
451
+ readonly price: 0.11;
452
+ readonly currency: "USD";
453
+ }];
454
+ };
436
455
  }, {
437
456
  readonly id: "673c56cc-8553-49a1-b179-dd549ec9209a";
438
457
  readonly source: 2;
@@ -818,6 +837,7 @@ export declare const workersAiCatalog: {
818
837
  readonly tags: readonly [];
819
838
  readonly properties: {
820
839
  readonly async_queue: true;
840
+ readonly context_window: 60000;
821
841
  readonly price: readonly [{
822
842
  readonly unit: "per M input tokens";
823
843
  readonly price: 0.012;
@@ -953,6 +973,23 @@ export declare const workersAiCatalog: {
953
973
  readonly currency: "USD";
954
974
  }];
955
975
  };
976
+ }, {
977
+ readonly id: "c5255b94-2161-4779-bd25-54f061829a2a";
978
+ readonly source: 1;
979
+ readonly name: "@cf/deepgram/aura-2-es";
980
+ readonly description: "Aura-2 is a context-aware text-to-speech (TTS) model that applies natural pacing, expressiveness, and fillers based on the context of the provided text. The quality of your text input directly impacts the naturalness of the audio output.";
981
+ readonly created_at: "2025-10-09 22:42:37.002";
982
+ readonly tags: readonly [];
983
+ readonly properties: {
984
+ readonly async_queue: true;
985
+ readonly price: readonly [{
986
+ readonly unit: "per 1k characters";
987
+ readonly price: 0.03;
988
+ readonly currency: "USD";
989
+ }];
990
+ readonly partner: true;
991
+ readonly realtime: true;
992
+ };
956
993
  }, {
957
994
  readonly id: "1f55679f-009e-4456-aa4f-049a62b4b6a0";
958
995
  readonly source: 1;
@@ -963,8 +1000,25 @@ export declare const workersAiCatalog: {
963
1000
  readonly properties: {
964
1001
  readonly async_queue: true;
965
1002
  readonly price: readonly [{
966
- readonly unit: "per audio minute";
967
- readonly price: 0;
1003
+ readonly unit: "per 1k characters";
1004
+ readonly price: 0.015;
1005
+ readonly currency: "USD";
1006
+ }];
1007
+ readonly partner: true;
1008
+ readonly realtime: true;
1009
+ };
1010
+ }, {
1011
+ readonly id: "01564c52-8717-47dc-8efd-907a2ca18301";
1012
+ readonly source: 1;
1013
+ readonly name: "@cf/deepgram/aura-2-en";
1014
+ readonly description: "Aura-2 is a context-aware text-to-speech (TTS) model that applies natural pacing, expressiveness, and fillers based on the context of the provided text. The quality of your text input directly impacts the naturalness of the audio output.";
1015
+ readonly created_at: "2025-10-09 22:19:34.483";
1016
+ readonly tags: readonly [];
1017
+ readonly properties: {
1018
+ readonly async_queue: true;
1019
+ readonly price: readonly [{
1020
+ readonly unit: "per 1k characters";
1021
+ readonly price: 0.03;
968
1022
  readonly currency: "USD";
969
1023
  }];
970
1024
  readonly partner: true;
@@ -990,6 +1044,17 @@ export declare const workersAiCatalog: {
990
1044
  }];
991
1045
  readonly info: "https://openai.com/research/whisper";
992
1046
  };
1047
+ }, {
1048
+ readonly id: "a2a2afba-b609-4325-8c41-5791ce962239";
1049
+ readonly source: 1;
1050
+ readonly name: "@cf/deepgram/flux";
1051
+ readonly description: "Flux is the first conversational speech recognition model built specifically for voice agents.";
1052
+ readonly created_at: "2025-09-29 21:07:55.114";
1053
+ readonly tags: readonly [];
1054
+ readonly properties: {
1055
+ readonly partner: true;
1056
+ readonly realtime: true;
1057
+ };
993
1058
  }, {
994
1059
  readonly id: "a226909f-eef8-4265-a3a0-90db0422762e";
995
1060
  readonly source: 1;
@@ -1003,6 +1068,10 @@ export declare const workersAiCatalog: {
1003
1068
  readonly unit: "per audio minute";
1004
1069
  readonly price: 0.0052;
1005
1070
  readonly currency: "USD";
1071
+ }, {
1072
+ readonly unit: "per audio minute (websocket)";
1073
+ readonly price: 0.0092;
1074
+ readonly currency: "USD";
1006
1075
  }];
1007
1076
  readonly partner: true;
1008
1077
  readonly realtime: true;
@@ -494,6 +494,29 @@ export const workersAiCatalog = {
494
494
  terms: 'https://github.com/meta-llama/llama-models/blob/main/models/llama3_3/LICENSE',
495
495
  },
496
496
  },
497
+ {
498
+ id: '7952d0cc-cb00-4e10-be02-667565c2ee0f',
499
+ source: 1,
500
+ name: '@cf/ibm-granite/granite-4.0-h-micro',
501
+ description: 'Granite 4.0 instruct models deliver strong performance across benchmarks, achieving industry-leading results in key agentic tasks like instruction following and function calling. These efficiencies make the models well-suited for a wide range of use cases like retrieval-augmented generation (RAG), multi-agent workflows, and edge deployments.',
502
+ created_at: '2025-10-07 18:46:29.436',
503
+ tags: [],
504
+ properties: {
505
+ context_window: 131000,
506
+ price: [
507
+ {
508
+ unit: 'per M input tokens',
509
+ price: 0.017,
510
+ currency: 'USD',
511
+ },
512
+ {
513
+ unit: 'per M output tokens',
514
+ price: 0.11,
515
+ currency: 'USD',
516
+ },
517
+ ],
518
+ },
519
+ },
497
520
  {
498
521
  id: '673c56cc-8553-49a1-b179-dd549ec9209a',
499
522
  source: 2,
@@ -933,6 +956,7 @@ export const workersAiCatalog = {
933
956
  tags: [],
934
957
  properties: {
935
958
  async_queue: true,
959
+ context_window: 60000,
936
960
  price: [
937
961
  {
938
962
  unit: 'per M input tokens',
@@ -1095,6 +1119,26 @@ export const workersAiCatalog = {
1095
1119
  ],
1096
1120
  },
1097
1121
  },
1122
+ {
1123
+ id: 'c5255b94-2161-4779-bd25-54f061829a2a',
1124
+ source: 1,
1125
+ name: '@cf/deepgram/aura-2-es',
1126
+ description: 'Aura-2 is a context-aware text-to-speech (TTS) model that applies natural pacing, expressiveness, and fillers based on the context of the provided text. The quality of your text input directly impacts the naturalness of the audio output.',
1127
+ created_at: '2025-10-09 22:42:37.002',
1128
+ tags: [],
1129
+ properties: {
1130
+ async_queue: true,
1131
+ price: [
1132
+ {
1133
+ unit: 'per 1k characters',
1134
+ price: 0.03,
1135
+ currency: 'USD',
1136
+ },
1137
+ ],
1138
+ partner: true,
1139
+ realtime: true,
1140
+ },
1141
+ },
1098
1142
  {
1099
1143
  id: '1f55679f-009e-4456-aa4f-049a62b4b6a0',
1100
1144
  source: 1,
@@ -1106,8 +1150,28 @@ export const workersAiCatalog = {
1106
1150
  async_queue: true,
1107
1151
  price: [
1108
1152
  {
1109
- unit: 'per audio minute',
1110
- price: 0,
1153
+ unit: 'per 1k characters',
1154
+ price: 0.015,
1155
+ currency: 'USD',
1156
+ },
1157
+ ],
1158
+ partner: true,
1159
+ realtime: true,
1160
+ },
1161
+ },
1162
+ {
1163
+ id: '01564c52-8717-47dc-8efd-907a2ca18301',
1164
+ source: 1,
1165
+ name: '@cf/deepgram/aura-2-en',
1166
+ description: 'Aura-2 is a context-aware text-to-speech (TTS) model that applies natural pacing, expressiveness, and fillers based on the context of the provided text. The quality of your text input directly impacts the naturalness of the audio output.',
1167
+ created_at: '2025-10-09 22:19:34.483',
1168
+ tags: [],
1169
+ properties: {
1170
+ async_queue: true,
1171
+ price: [
1172
+ {
1173
+ unit: 'per 1k characters',
1174
+ price: 0.03,
1111
1175
  currency: 'USD',
1112
1176
  },
1113
1177
  ],
@@ -1139,6 +1203,18 @@ export const workersAiCatalog = {
1139
1203
  info: 'https://openai.com/research/whisper',
1140
1204
  },
1141
1205
  },
1206
+ {
1207
+ id: 'a2a2afba-b609-4325-8c41-5791ce962239',
1208
+ source: 1,
1209
+ name: '@cf/deepgram/flux',
1210
+ description: 'Flux is the first conversational speech recognition model built specifically for voice agents.',
1211
+ created_at: '2025-09-29 21:07:55.114',
1212
+ tags: [],
1213
+ properties: {
1214
+ partner: true,
1215
+ realtime: true,
1216
+ },
1217
+ },
1142
1218
  {
1143
1219
  id: 'a226909f-eef8-4265-a3a0-90db0422762e',
1144
1220
  source: 1,
@@ -1154,6 +1230,11 @@ export const workersAiCatalog = {
1154
1230
  price: 0.0052,
1155
1231
  currency: 'USD',
1156
1232
  },
1233
+ {
1234
+ unit: 'per audio minute (websocket)',
1235
+ price: 0.0092,
1236
+ currency: 'USD',
1237
+ },
1157
1238
  ],
1158
1239
  partner: true,
1159
1240
  realtime: true,
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@chainfuse/types",
3
- "version": "4.0.7",
3
+ "version": "4.0.8",
4
4
  "description": "",
5
5
  "author": "ChainFuse",
6
6
  "homepage": "https://github.com/ChainFuse/packages/tree/main/packages/types#readme",
@@ -98,12 +98,12 @@
98
98
  },
99
99
  "prettier": "@demosjarco/prettier-config",
100
100
  "dependencies": {
101
- "validator": "^13.15.15",
102
- "zod": "^4.1.11"
101
+ "validator": "^13.15.20",
102
+ "zod": "^4.1.12"
103
103
  },
104
104
  "devDependencies": {
105
- "@cloudflare/workers-types": "^4.20250927.0",
105
+ "@cloudflare/workers-types": "^4.20251014.0",
106
106
  "@types/validator": "^13.15.3"
107
107
  },
108
- "gitHead": "dffad00676bba58bc4e92b0d868738f7092b2da5"
108
+ "gitHead": "8090cba39c0d4d7c769a1f291fca14e992cfeb43"
109
109
  }