@chainfuse/types 2.2.0 → 2.2.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/ai-tools/azure/catalog.d.ts +239 -237
- package/dist/ai-tools/azure/catalog.js +267 -265
- package/dist/ai-tools/workers-ai/catalog.d.ts +150 -3
- package/dist/ai-tools/workers-ai/catalog.js +185 -3
- package/dist/wf/index.js +1 -1
- package/package.json +6 -6
|
@@ -9,6 +9,7 @@ export const workersAiCatalog = {
|
|
|
9
9
|
source: 1,
|
|
10
10
|
name: '@cf/qwen/qwen1.5-0.5b-chat',
|
|
11
11
|
description: 'Qwen1.5 is the improved version of Qwen, the large language model series developed by Alibaba Cloud.',
|
|
12
|
+
created_at: '2024-02-27 18:23:37.344',
|
|
12
13
|
tags: [],
|
|
13
14
|
properties: {
|
|
14
15
|
beta: true,
|
|
@@ -21,6 +22,7 @@ export const workersAiCatalog = {
|
|
|
21
22
|
source: 1,
|
|
22
23
|
name: '@cf/google/gemma-2b-it-lora',
|
|
23
24
|
description: 'This is a Gemma-2B base model that Cloudflare dedicates for inference with LoRA adapters. Gemma is a family of lightweight, state-of-the-art open models from Google, built from the same research and technology used to create the Gemini models.',
|
|
25
|
+
created_at: '2024-04-02 00:19:34.669',
|
|
24
26
|
tags: [],
|
|
25
27
|
properties: {
|
|
26
28
|
beta: true,
|
|
@@ -33,6 +35,7 @@ export const workersAiCatalog = {
|
|
|
33
35
|
source: 2,
|
|
34
36
|
name: '@hf/nexusflow/starling-lm-7b-beta',
|
|
35
37
|
description: 'We introduce Starling-LM-7B-beta, an open large language model (LLM) trained by Reinforcement Learning from AI Feedback (RLAIF). Starling-LM-7B-beta is trained from Openchat-3.5-0106 with our new reward model Nexusflow/Starling-RM-34B and policy optimization method Fine-Tuning Language Models from Human Preferences (PPO).',
|
|
38
|
+
created_at: '2024-04-01 23:49:31.797',
|
|
36
39
|
tags: [],
|
|
37
40
|
properties: {
|
|
38
41
|
beta: true,
|
|
@@ -48,8 +51,19 @@ export const workersAiCatalog = {
|
|
|
48
51
|
source: 1,
|
|
49
52
|
name: '@cf/meta/llama-3-8b-instruct',
|
|
50
53
|
description: 'Generation over generation, Meta Llama 3 demonstrates state-of-the-art performance on a wide range of industry benchmarks and offers new capabilities, including improved reasoning.',
|
|
54
|
+
created_at: '2024-04-18 20:31:47.273',
|
|
51
55
|
tags: [],
|
|
52
56
|
properties: {
|
|
57
|
+
price: [
|
|
58
|
+
{
|
|
59
|
+
unit: 'per M input tokens',
|
|
60
|
+
price: '$0.28',
|
|
61
|
+
},
|
|
62
|
+
{
|
|
63
|
+
unit: 'per M output tokens',
|
|
64
|
+
price: '$0.83',
|
|
65
|
+
},
|
|
66
|
+
],
|
|
53
67
|
context_window: 7968,
|
|
54
68
|
info: 'https://llama.meta.com',
|
|
55
69
|
terms: 'https://llama.meta.com/llama3/license/#',
|
|
@@ -60,8 +74,19 @@ export const workersAiCatalog = {
|
|
|
60
74
|
source: 1,
|
|
61
75
|
name: '@cf/meta/llama-3.2-3b-instruct',
|
|
62
76
|
description: 'The Llama 3.2 instruction-tuned text only models are optimized for multilingual dialogue use cases, including agentic retrieval and summarization tasks.',
|
|
77
|
+
created_at: '2024-09-25 20:05:43.986',
|
|
63
78
|
tags: [],
|
|
64
79
|
properties: {
|
|
80
|
+
price: [
|
|
81
|
+
{
|
|
82
|
+
unit: 'per M input tokens',
|
|
83
|
+
price: '$0.051',
|
|
84
|
+
},
|
|
85
|
+
{
|
|
86
|
+
unit: 'per M output tokens',
|
|
87
|
+
price: '$0.34',
|
|
88
|
+
},
|
|
89
|
+
],
|
|
65
90
|
context_window: 128000,
|
|
66
91
|
terms: 'https://github.com/meta-llama/llama-models/blob/main/models/llama3_2/LICENSE',
|
|
67
92
|
},
|
|
@@ -71,6 +96,7 @@ export const workersAiCatalog = {
|
|
|
71
96
|
source: 2,
|
|
72
97
|
name: '@hf/thebloke/llamaguard-7b-awq',
|
|
73
98
|
description: 'Llama Guard is a model for classifying the safety of LLM prompts and responses, using a taxonomy of safety risks.\n',
|
|
99
|
+
created_at: '2024-02-06 18:13:59.060',
|
|
74
100
|
tags: [],
|
|
75
101
|
properties: {
|
|
76
102
|
beta: true,
|
|
@@ -82,6 +108,7 @@ export const workersAiCatalog = {
|
|
|
82
108
|
source: 2,
|
|
83
109
|
name: '@hf/thebloke/neural-chat-7b-v3-1-awq',
|
|
84
110
|
description: 'This model is a fine-tuned 7B parameter LLM on the Intel Gaudi 2 processor from the mistralai/Mistral-7B-v0.1 on the open source dataset Open-Orca/SlimOrca.',
|
|
111
|
+
created_at: '2024-02-06 18:12:30.722',
|
|
85
112
|
tags: [],
|
|
86
113
|
properties: {
|
|
87
114
|
beta: true,
|
|
@@ -93,14 +120,27 @@ export const workersAiCatalog = {
|
|
|
93
120
|
source: 1,
|
|
94
121
|
name: '@cf/meta/llama-guard-3-8b',
|
|
95
122
|
description: 'Llama Guard 3 is a Llama-3.1-8B pretrained model, fine-tuned for content safety classification. Similar to previous versions, it can be used to classify content in both LLM inputs (prompt classification) and in LLM responses (response classification). It acts as an LLM – it generates text in its output that indicates whether a given prompt or response is safe or unsafe, and if unsafe, it also lists the content categories violated.',
|
|
123
|
+
created_at: '2025-01-22 23:26:23.495',
|
|
96
124
|
tags: [],
|
|
97
|
-
properties: {
|
|
125
|
+
properties: {
|
|
126
|
+
price: [
|
|
127
|
+
{
|
|
128
|
+
unit: 'per M input tokens',
|
|
129
|
+
price: '$0.48',
|
|
130
|
+
},
|
|
131
|
+
{
|
|
132
|
+
unit: 'per M output tokens',
|
|
133
|
+
price: '$0.030',
|
|
134
|
+
},
|
|
135
|
+
],
|
|
136
|
+
},
|
|
98
137
|
},
|
|
99
138
|
{
|
|
100
139
|
id: 'ca54bcd6-0d98-4739-9b3b-5c8b4402193d',
|
|
101
140
|
source: 1,
|
|
102
141
|
name: '@cf/meta/llama-2-7b-chat-fp16',
|
|
103
142
|
description: 'Full precision (fp16) generative text model with 7 billion parameters from Meta',
|
|
143
|
+
created_at: '2023-11-07 11:54:20.229',
|
|
104
144
|
tags: [],
|
|
105
145
|
properties: {
|
|
106
146
|
beta: true,
|
|
@@ -114,6 +154,7 @@ export const workersAiCatalog = {
|
|
|
114
154
|
source: 1,
|
|
115
155
|
name: '@cf/mistral/mistral-7b-instruct-v0.1',
|
|
116
156
|
description: 'Instruct fine-tuned version of the Mistral-7b generative text model with 7 billion parameters',
|
|
157
|
+
created_at: '2023-11-07 11:54:20.229',
|
|
117
158
|
tags: [],
|
|
118
159
|
properties: {
|
|
119
160
|
beta: true,
|
|
@@ -127,6 +168,7 @@ export const workersAiCatalog = {
|
|
|
127
168
|
source: 1,
|
|
128
169
|
name: '@cf/mistral/mistral-7b-instruct-v0.2-lora',
|
|
129
170
|
description: 'The Mistral-7B-Instruct-v0.2 Large Language Model (LLM) is an instruct fine-tuned version of the Mistral-7B-v0.2.',
|
|
171
|
+
created_at: '2024-04-01 22:14:40.529',
|
|
130
172
|
tags: [],
|
|
131
173
|
properties: {
|
|
132
174
|
beta: true,
|
|
@@ -139,6 +181,7 @@ export const workersAiCatalog = {
|
|
|
139
181
|
source: 1,
|
|
140
182
|
name: '@cf/tinyllama/tinyllama-1.1b-chat-v1.0',
|
|
141
183
|
description: 'The TinyLlama project aims to pretrain a 1.1B Llama model on 3 trillion tokens. This is the chat model finetuned on top of TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T.',
|
|
184
|
+
created_at: '2024-02-27 18:25:37.524',
|
|
142
185
|
tags: [],
|
|
143
186
|
properties: {
|
|
144
187
|
beta: true,
|
|
@@ -151,6 +194,7 @@ export const workersAiCatalog = {
|
|
|
151
194
|
source: 2,
|
|
152
195
|
name: '@hf/mistral/mistral-7b-instruct-v0.2',
|
|
153
196
|
description: 'The Mistral-7B-Instruct-v0.2 Large Language Model (LLM) is an instruct fine-tuned version of the Mistral-7B-v0.2. Mistral-7B-v0.2 has the following changes compared to Mistral-7B-v0.1: 32k context window (vs 8k context in v0.1), rope-theta = 1e6, and no Sliding-Window Attention.',
|
|
197
|
+
created_at: '2024-04-02 13:00:59.244',
|
|
154
198
|
tags: [],
|
|
155
199
|
properties: {
|
|
156
200
|
beta: true,
|
|
@@ -167,6 +211,7 @@ export const workersAiCatalog = {
|
|
|
167
211
|
source: 1,
|
|
168
212
|
name: '@cf/fblgit/una-cybertron-7b-v2-bf16',
|
|
169
213
|
description: "Cybertron 7B v2 is a 7B MistralAI based model, best on it's series. It was trained with SFT, DPO and UNA (Unified Neural Alignment) on multiple datasets.",
|
|
214
|
+
created_at: '2024-04-24 14:37:19.494',
|
|
170
215
|
tags: [],
|
|
171
216
|
properties: {
|
|
172
217
|
beta: true,
|
|
@@ -178,9 +223,19 @@ export const workersAiCatalog = {
|
|
|
178
223
|
source: 1,
|
|
179
224
|
name: '@cf/deepseek-ai/deepseek-r1-distill-qwen-32b',
|
|
180
225
|
description: 'DeepSeek-R1-Distill-Qwen-32B is a model distilled from DeepSeek-R1 based on Qwen2.5. It outperforms OpenAI-o1-mini across various benchmarks, achieving new state-of-the-art results for dense models.',
|
|
226
|
+
created_at: '2025-01-22 19:48:55.776',
|
|
181
227
|
tags: [],
|
|
182
228
|
properties: {
|
|
183
|
-
|
|
229
|
+
price: [
|
|
230
|
+
{
|
|
231
|
+
unit: 'per M input tokens',
|
|
232
|
+
price: '$0.50',
|
|
233
|
+
},
|
|
234
|
+
{
|
|
235
|
+
unit: 'per M output tokens',
|
|
236
|
+
price: '$4.88',
|
|
237
|
+
},
|
|
238
|
+
],
|
|
184
239
|
terms: 'https://github.com/deepseek-ai/DeepSeek-R1/blob/main/LICENSE',
|
|
185
240
|
},
|
|
186
241
|
},
|
|
@@ -189,6 +244,7 @@ export const workersAiCatalog = {
|
|
|
189
244
|
source: 1,
|
|
190
245
|
name: '@cf/thebloke/discolm-german-7b-v1-awq',
|
|
191
246
|
description: 'DiscoLM German 7b is a Mistral-based large language model with a focus on German-language applications. AWQ is an efficient, accurate and blazing-fast low-bit weight quantization method, currently supporting 4-bit quantization.',
|
|
247
|
+
created_at: '2024-02-27 18:23:05.178',
|
|
192
248
|
tags: [],
|
|
193
249
|
properties: {
|
|
194
250
|
beta: true,
|
|
@@ -201,6 +257,7 @@ export const workersAiCatalog = {
|
|
|
201
257
|
source: 1,
|
|
202
258
|
name: '@cf/meta/llama-2-7b-chat-int8',
|
|
203
259
|
description: 'Quantized (int8) generative text model with 7 billion parameters from Meta',
|
|
260
|
+
created_at: '2023-09-25 19:21:11.898',
|
|
204
261
|
tags: [],
|
|
205
262
|
properties: {
|
|
206
263
|
context_window: 8192,
|
|
@@ -211,8 +268,19 @@ export const workersAiCatalog = {
|
|
|
211
268
|
source: 1,
|
|
212
269
|
name: '@cf/meta/llama-3.1-8b-instruct-fp8',
|
|
213
270
|
description: 'Llama 3.1 8B quantized to FP8 precision',
|
|
271
|
+
created_at: '2024-07-25 17:28:43.328',
|
|
214
272
|
tags: [],
|
|
215
273
|
properties: {
|
|
274
|
+
price: [
|
|
275
|
+
{
|
|
276
|
+
unit: 'per M input tokens',
|
|
277
|
+
price: '$0.15',
|
|
278
|
+
},
|
|
279
|
+
{
|
|
280
|
+
unit: 'per M output tokens',
|
|
281
|
+
price: '$0.29',
|
|
282
|
+
},
|
|
283
|
+
],
|
|
216
284
|
context_window: 32000,
|
|
217
285
|
terms: 'https://github.com/meta-llama/llama-models/blob/main/models/llama3_1/LICENSE',
|
|
218
286
|
},
|
|
@@ -222,6 +290,7 @@ export const workersAiCatalog = {
|
|
|
222
290
|
source: 2,
|
|
223
291
|
name: '@hf/thebloke/mistral-7b-instruct-v0.1-awq',
|
|
224
292
|
description: 'Mistral 7B Instruct v0.1 AWQ is an efficient, accurate and blazing-fast low-bit weight quantized Mistral variant.',
|
|
293
|
+
created_at: '2023-11-24 00:27:15.869',
|
|
225
294
|
tags: [],
|
|
226
295
|
properties: {
|
|
227
296
|
beta: true,
|
|
@@ -234,6 +303,7 @@ export const workersAiCatalog = {
|
|
|
234
303
|
source: 1,
|
|
235
304
|
name: '@cf/qwen/qwen1.5-7b-chat-awq',
|
|
236
305
|
description: 'Qwen1.5 is the improved version of Qwen, the large language model series developed by Alibaba Cloud. AWQ is an efficient, accurate and blazing-fast low-bit weight quantization method, currently supporting 4-bit quantization.',
|
|
306
|
+
created_at: '2024-02-27 18:24:11.709',
|
|
237
307
|
tags: [],
|
|
238
308
|
properties: {
|
|
239
309
|
beta: true,
|
|
@@ -246,8 +316,19 @@ export const workersAiCatalog = {
|
|
|
246
316
|
source: 1,
|
|
247
317
|
name: '@cf/meta/llama-3.2-1b-instruct',
|
|
248
318
|
description: 'The Llama 3.2 instruction-tuned text only models are optimized for multilingual dialogue use cases, including agentic retrieval and summarization tasks.',
|
|
319
|
+
created_at: '2024-09-25 21:36:32.050',
|
|
249
320
|
tags: [],
|
|
250
321
|
properties: {
|
|
322
|
+
price: [
|
|
323
|
+
{
|
|
324
|
+
unit: 'per M input tokens',
|
|
325
|
+
price: '$0.027',
|
|
326
|
+
},
|
|
327
|
+
{
|
|
328
|
+
unit: 'per M output tokens',
|
|
329
|
+
price: '$0.20',
|
|
330
|
+
},
|
|
331
|
+
],
|
|
251
332
|
context_window: 60000,
|
|
252
333
|
terms: 'https://github.com/meta-llama/llama-models/blob/main/models/llama3_2/LICENSE',
|
|
253
334
|
},
|
|
@@ -257,6 +338,7 @@ export const workersAiCatalog = {
|
|
|
257
338
|
source: 2,
|
|
258
339
|
name: '@hf/thebloke/llama-2-13b-chat-awq',
|
|
259
340
|
description: 'Llama 2 13B Chat AWQ is an efficient, accurate and blazing-fast low-bit weight quantized Llama 2 variant.',
|
|
341
|
+
created_at: '2023-11-24 00:27:15.869',
|
|
260
342
|
tags: [],
|
|
261
343
|
properties: {
|
|
262
344
|
beta: true,
|
|
@@ -269,6 +351,7 @@ export const workersAiCatalog = {
|
|
|
269
351
|
source: 2,
|
|
270
352
|
name: '@hf/thebloke/deepseek-coder-6.7b-base-awq',
|
|
271
353
|
description: 'Deepseek Coder is composed of a series of code language models, each trained from scratch on 2T tokens, with a composition of 87% code and 13% natural language in both English and Chinese.',
|
|
354
|
+
created_at: '2024-02-06 18:16:27.183',
|
|
272
355
|
tags: [],
|
|
273
356
|
properties: {
|
|
274
357
|
beta: true,
|
|
@@ -281,6 +364,7 @@ export const workersAiCatalog = {
|
|
|
281
364
|
source: 1,
|
|
282
365
|
name: '@cf/meta-llama/llama-2-7b-chat-hf-lora',
|
|
283
366
|
description: 'This is a Llama2 base model that Cloudflare dedicated for inference with LoRA adapters. Llama 2 is a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. This is the repository for the 7B fine-tuned model, optimized for dialogue use cases and converted for the Hugging Face Transformers format. ',
|
|
367
|
+
created_at: '2024-04-02 00:17:18.579',
|
|
284
368
|
tags: [],
|
|
285
369
|
properties: {
|
|
286
370
|
beta: true,
|
|
@@ -293,8 +377,19 @@ export const workersAiCatalog = {
|
|
|
293
377
|
source: 1,
|
|
294
378
|
name: '@cf/meta/llama-3.3-70b-instruct-fp8-fast',
|
|
295
379
|
description: 'Llama 3.3 70B quantized to fp8 precision, optimized to be faster.',
|
|
380
|
+
created_at: '2024-12-06 17:09:18.338',
|
|
296
381
|
tags: [],
|
|
297
382
|
properties: {
|
|
383
|
+
price: [
|
|
384
|
+
{
|
|
385
|
+
unit: 'per M input tokens',
|
|
386
|
+
price: '$0.29',
|
|
387
|
+
},
|
|
388
|
+
{
|
|
389
|
+
unit: 'per M output tokens',
|
|
390
|
+
price: '$2.25',
|
|
391
|
+
},
|
|
392
|
+
],
|
|
298
393
|
context_window: 24000,
|
|
299
394
|
function_calling: true,
|
|
300
395
|
terms: 'https://github.com/meta-llama/llama-models/blob/main/models/llama3_3/LICENSE',
|
|
@@ -305,6 +400,7 @@ export const workersAiCatalog = {
|
|
|
305
400
|
source: 2,
|
|
306
401
|
name: '@hf/thebloke/openhermes-2.5-mistral-7b-awq',
|
|
307
402
|
description: 'OpenHermes 2.5 Mistral 7B is a state of the art Mistral Fine-tune, a continuation of OpenHermes 2 model, which trained on additional code datasets.',
|
|
403
|
+
created_at: '2024-02-06 18:04:22.846',
|
|
308
404
|
tags: [],
|
|
309
405
|
properties: {
|
|
310
406
|
beta: true,
|
|
@@ -316,6 +412,7 @@ export const workersAiCatalog = {
|
|
|
316
412
|
source: 2,
|
|
317
413
|
name: '@hf/thebloke/deepseek-coder-6.7b-instruct-awq',
|
|
318
414
|
description: 'Deepseek Coder is composed of a series of code language models, each trained from scratch on 2T tokens, with a composition of 87% code and 13% natural language in both English and Chinese.',
|
|
415
|
+
created_at: '2024-02-06 18:18:27.462',
|
|
319
416
|
tags: [],
|
|
320
417
|
properties: {
|
|
321
418
|
beta: true,
|
|
@@ -328,6 +425,7 @@ export const workersAiCatalog = {
|
|
|
328
425
|
source: 1,
|
|
329
426
|
name: '@cf/deepseek-ai/deepseek-math-7b-instruct',
|
|
330
427
|
description: 'DeepSeekMath-Instruct 7B is a mathematically instructed tuning model derived from DeepSeekMath-Base 7B. DeepSeekMath is initialized with DeepSeek-Coder-v1.5 7B and continues pre-training on math-related tokens sourced from Common Crawl, together with natural language and code data for 500B tokens.',
|
|
428
|
+
created_at: '2024-02-27 17:54:17.459',
|
|
331
429
|
tags: [],
|
|
332
430
|
properties: {
|
|
333
431
|
beta: true,
|
|
@@ -341,6 +439,7 @@ export const workersAiCatalog = {
|
|
|
341
439
|
source: 1,
|
|
342
440
|
name: '@cf/tiiuae/falcon-7b-instruct',
|
|
343
441
|
description: 'Falcon-7B-Instruct is a 7B parameters causal decoder-only model built by TII based on Falcon-7B and finetuned on a mixture of chat/instruct datasets.',
|
|
442
|
+
created_at: '2024-02-27 18:21:15.796',
|
|
344
443
|
tags: [],
|
|
345
444
|
properties: {
|
|
346
445
|
beta: true,
|
|
@@ -353,6 +452,7 @@ export const workersAiCatalog = {
|
|
|
353
452
|
source: 2,
|
|
354
453
|
name: '@hf/nousresearch/hermes-2-pro-mistral-7b',
|
|
355
454
|
description: 'Hermes 2 Pro on Mistral 7B is the new flagship 7B Hermes! Hermes 2 Pro is an upgraded, retrained version of Nous Hermes 2, consisting of an updated and cleaned version of the OpenHermes 2.5 Dataset, as well as a newly introduced Function Calling and JSON Mode dataset developed in-house.',
|
|
455
|
+
created_at: '2024-04-01 23:45:53.800',
|
|
356
456
|
tags: [],
|
|
357
457
|
properties: {
|
|
358
458
|
beta: true,
|
|
@@ -366,8 +466,19 @@ export const workersAiCatalog = {
|
|
|
366
466
|
source: 1,
|
|
367
467
|
name: '@cf/meta/llama-3.1-8b-instruct',
|
|
368
468
|
description: 'The Meta Llama 3.1 collection of multilingual large language models (LLMs) is a collection of pretrained and instruction tuned generative models. The Llama 3.1 instruction tuned text only models are optimized for multilingual dialogue use cases and outperform many of the available open source and closed chat models on common industry benchmarks.',
|
|
469
|
+
created_at: '2024-07-18 22:53:33.746',
|
|
369
470
|
tags: [],
|
|
370
471
|
properties: {
|
|
472
|
+
price: [
|
|
473
|
+
{
|
|
474
|
+
unit: 'per M input tokens',
|
|
475
|
+
price: '$0.28',
|
|
476
|
+
},
|
|
477
|
+
{
|
|
478
|
+
unit: 'per M output tokens',
|
|
479
|
+
price: '$0.83',
|
|
480
|
+
},
|
|
481
|
+
],
|
|
371
482
|
context_window: 7968,
|
|
372
483
|
terms: 'https://github.com/meta-llama/llama-models/blob/main/models/llama3_1/LICENSE',
|
|
373
484
|
},
|
|
@@ -377,8 +488,19 @@ export const workersAiCatalog = {
|
|
|
377
488
|
source: 1,
|
|
378
489
|
name: '@cf/meta/llama-3.1-8b-instruct-awq',
|
|
379
490
|
description: 'Quantized (int4) generative text model with 8 billion parameters from Meta.\n',
|
|
491
|
+
created_at: '2024-07-25 17:46:04.304',
|
|
380
492
|
tags: [],
|
|
381
493
|
properties: {
|
|
494
|
+
price: [
|
|
495
|
+
{
|
|
496
|
+
unit: 'per M input tokens',
|
|
497
|
+
price: '$0.12',
|
|
498
|
+
},
|
|
499
|
+
{
|
|
500
|
+
unit: 'per M output tokens',
|
|
501
|
+
price: '$0.27',
|
|
502
|
+
},
|
|
503
|
+
],
|
|
382
504
|
context_window: 8192,
|
|
383
505
|
terms: 'https://github.com/meta-llama/llama-models/blob/main/models/llama3_1/LICENSE',
|
|
384
506
|
},
|
|
@@ -388,6 +510,7 @@ export const workersAiCatalog = {
|
|
|
388
510
|
source: 2,
|
|
389
511
|
name: '@hf/thebloke/zephyr-7b-beta-awq',
|
|
390
512
|
description: 'Zephyr 7B Beta AWQ is an efficient, accurate and blazing-fast low-bit weight quantized Zephyr model variant.',
|
|
513
|
+
created_at: '2023-11-24 00:27:15.869',
|
|
391
514
|
tags: [],
|
|
392
515
|
properties: {
|
|
393
516
|
beta: true,
|
|
@@ -400,6 +523,7 @@ export const workersAiCatalog = {
|
|
|
400
523
|
source: 1,
|
|
401
524
|
name: '@cf/google/gemma-7b-it-lora',
|
|
402
525
|
description: ' This is a Gemma-7B base model that Cloudflare dedicates for inference with LoRA adapters. Gemma is a family of lightweight, state-of-the-art open models from Google, built from the same research and technology used to create the Gemini models.',
|
|
526
|
+
created_at: '2024-04-02 00:20:19.633',
|
|
403
527
|
tags: [],
|
|
404
528
|
properties: {
|
|
405
529
|
beta: true,
|
|
@@ -412,6 +536,7 @@ export const workersAiCatalog = {
|
|
|
412
536
|
source: 1,
|
|
413
537
|
name: '@cf/qwen/qwen1.5-1.8b-chat',
|
|
414
538
|
description: 'Qwen1.5 is the improved version of Qwen, the large language model series developed by Alibaba Cloud.',
|
|
539
|
+
created_at: '2024-02-27 18:30:31.723',
|
|
415
540
|
tags: [],
|
|
416
541
|
properties: {
|
|
417
542
|
beta: true,
|
|
@@ -424,8 +549,19 @@ export const workersAiCatalog = {
|
|
|
424
549
|
source: 1,
|
|
425
550
|
name: '@cf/meta/llama-3-8b-instruct-awq',
|
|
426
551
|
description: 'Quantized (int4) generative text model with 8 billion parameters from Meta.',
|
|
552
|
+
created_at: '2024-05-09 23:32:47.584',
|
|
427
553
|
tags: [],
|
|
428
554
|
properties: {
|
|
555
|
+
price: [
|
|
556
|
+
{
|
|
557
|
+
unit: 'per M input tokens',
|
|
558
|
+
price: '$0.12',
|
|
559
|
+
},
|
|
560
|
+
{
|
|
561
|
+
unit: 'per M output tokens',
|
|
562
|
+
price: '$0.27',
|
|
563
|
+
},
|
|
564
|
+
],
|
|
429
565
|
context_window: 8192,
|
|
430
566
|
info: 'https://llama.meta.com',
|
|
431
567
|
terms: 'https://llama.meta.com/llama3/license/#',
|
|
@@ -436,8 +572,19 @@ export const workersAiCatalog = {
|
|
|
436
572
|
source: 1,
|
|
437
573
|
name: '@cf/meta/llama-3.2-11b-vision-instruct',
|
|
438
574
|
description: ' The Llama 3.2-Vision instruction-tuned models are optimized for visual recognition, image reasoning, captioning, and answering general questions about an image.',
|
|
575
|
+
created_at: '2024-09-25 05:36:04.547',
|
|
439
576
|
tags: [],
|
|
440
577
|
properties: {
|
|
578
|
+
price: [
|
|
579
|
+
{
|
|
580
|
+
unit: 'per M input tokens',
|
|
581
|
+
price: '$0.049',
|
|
582
|
+
},
|
|
583
|
+
{
|
|
584
|
+
unit: 'per M output tokens',
|
|
585
|
+
price: '$0.68',
|
|
586
|
+
},
|
|
587
|
+
],
|
|
441
588
|
terms: 'https://github.com/meta-llama/llama-models/blob/main/models/llama3_2/LICENSE',
|
|
442
589
|
},
|
|
443
590
|
},
|
|
@@ -446,6 +593,7 @@ export const workersAiCatalog = {
|
|
|
446
593
|
source: 1,
|
|
447
594
|
name: '@cf/defog/sqlcoder-7b-2',
|
|
448
595
|
description: 'This model is intended to be used by non-technical users to understand data inside their SQL databases. ',
|
|
596
|
+
created_at: '2024-02-27 18:18:46.095',
|
|
449
597
|
tags: [],
|
|
450
598
|
properties: {
|
|
451
599
|
beta: true,
|
|
@@ -459,6 +607,7 @@ export const workersAiCatalog = {
|
|
|
459
607
|
source: 1,
|
|
460
608
|
name: '@cf/microsoft/phi-2',
|
|
461
609
|
description: 'Phi-2 is a Transformer-based model with a next-word prediction objective, trained on 1.4T tokens from multiple passes on a mixture of Synthetic and Web datasets for NLP and coding.',
|
|
610
|
+
created_at: '2024-02-27 18:26:21.126',
|
|
462
611
|
tags: [],
|
|
463
612
|
properties: {
|
|
464
613
|
beta: true,
|
|
@@ -471,6 +620,7 @@ export const workersAiCatalog = {
|
|
|
471
620
|
source: 2,
|
|
472
621
|
name: '@hf/meta-llama/meta-llama-3-8b-instruct',
|
|
473
622
|
description: 'Generation over generation, Meta Llama 3 demonstrates state-of-the-art performance on a wide range of industry benchmarks and offers new capabilities, including improved reasoning.\t',
|
|
623
|
+
created_at: '2024-05-22 18:21:04.371',
|
|
474
624
|
tags: [],
|
|
475
625
|
properties: {
|
|
476
626
|
context_window: 8192,
|
|
@@ -481,6 +631,7 @@ export const workersAiCatalog = {
|
|
|
481
631
|
source: 2,
|
|
482
632
|
name: '@hf/google/gemma-7b-it',
|
|
483
633
|
description: 'Gemma is a family of lightweight, state-of-the-art open models from Google, built from the same research and technology used to create the Gemini models. They are text-to-text, decoder-only large language models, available in English, with open weights, pre-trained variants, and instruction-tuned variants.',
|
|
634
|
+
created_at: '2024-04-01 23:51:35.866',
|
|
484
635
|
tags: [],
|
|
485
636
|
properties: {
|
|
486
637
|
beta: true,
|
|
@@ -495,6 +646,7 @@ export const workersAiCatalog = {
|
|
|
495
646
|
source: 1,
|
|
496
647
|
name: '@cf/qwen/qwen1.5-14b-chat-awq',
|
|
497
648
|
description: 'Qwen1.5 is the improved version of Qwen, the large language model series developed by Alibaba Cloud. AWQ is an efficient, accurate and blazing-fast low-bit weight quantization method, currently supporting 4-bit quantization.',
|
|
649
|
+
created_at: '2024-02-27 18:24:45.316',
|
|
498
650
|
tags: [],
|
|
499
651
|
properties: {
|
|
500
652
|
beta: true,
|
|
@@ -507,6 +659,7 @@ export const workersAiCatalog = {
|
|
|
507
659
|
source: 1,
|
|
508
660
|
name: '@cf/openchat/openchat-3.5-0106',
|
|
509
661
|
description: 'OpenChat is an innovative library of open-source language models, fine-tuned with C-RLFT - a strategy inspired by offline reinforcement learning.',
|
|
662
|
+
created_at: '2024-02-27 18:20:39.169',
|
|
510
663
|
tags: [],
|
|
511
664
|
properties: {
|
|
512
665
|
beta: true,
|
|
@@ -525,6 +678,7 @@ export const workersAiCatalog = {
|
|
|
525
678
|
source: 1,
|
|
526
679
|
name: '@cf/baai/bge-m3',
|
|
527
680
|
description: 'Multi-Functionality, Multi-Linguality, and Multi-Granularity embeddings model.',
|
|
681
|
+
created_at: '2024-05-22 19:27:09.781',
|
|
528
682
|
tags: [],
|
|
529
683
|
properties: {},
|
|
530
684
|
},
|
|
@@ -533,6 +687,7 @@ export const workersAiCatalog = {
|
|
|
533
687
|
source: 1,
|
|
534
688
|
name: '@cf/baai/bge-small-en-v1.5',
|
|
535
689
|
description: 'BAAI general embedding (Small) model that transforms any given text into a 384-dimensional vector',
|
|
690
|
+
created_at: '2023-11-07 15:43:58.042',
|
|
536
691
|
tags: [],
|
|
537
692
|
properties: {
|
|
538
693
|
beta: true,
|
|
@@ -546,6 +701,7 @@ export const workersAiCatalog = {
|
|
|
546
701
|
source: 1,
|
|
547
702
|
name: '@cf/baai/bge-base-en-v1.5',
|
|
548
703
|
description: 'BAAI general embedding (Base) model that transforms any given text into a 768-dimensional vector',
|
|
704
|
+
created_at: '2023-09-25 19:21:11.898',
|
|
549
705
|
tags: [],
|
|
550
706
|
properties: {
|
|
551
707
|
beta: true,
|
|
@@ -559,6 +715,7 @@ export const workersAiCatalog = {
|
|
|
559
715
|
source: 1,
|
|
560
716
|
name: '@cf/baai/bge-large-en-v1.5',
|
|
561
717
|
description: 'BAAI general embedding (Large) model that transforms any given text into a 1024-dimensional vector',
|
|
718
|
+
created_at: '2023-11-07 15:43:58.042',
|
|
562
719
|
tags: [],
|
|
563
720
|
properties: {
|
|
564
721
|
beta: true,
|
|
@@ -578,6 +735,7 @@ export const workersAiCatalog = {
|
|
|
578
735
|
source: 1,
|
|
579
736
|
name: '@cf/huggingface/distilbert-sst-2-int8',
|
|
580
737
|
description: 'Distilled BERT model that was finetuned on SST-2 for sentiment classification',
|
|
738
|
+
created_at: '2023-09-25 19:21:11.898',
|
|
581
739
|
tags: [],
|
|
582
740
|
properties: {
|
|
583
741
|
beta: true,
|
|
@@ -589,6 +747,7 @@ export const workersAiCatalog = {
|
|
|
589
747
|
source: 1,
|
|
590
748
|
name: '@cf/baai/bge-reranker-base',
|
|
591
749
|
description: 'Different from embedding model, reranker uses question and document as input and directly output similarity instead of embedding. You can get a relevance score by inputting query and passage to the reranker. And the score can be mapped to a float value in [0,1] by sigmoid function.\n\n',
|
|
750
|
+
created_at: '2025-02-14 12:28:19.009',
|
|
592
751
|
tags: [],
|
|
593
752
|
properties: {},
|
|
594
753
|
},
|
|
@@ -603,6 +762,7 @@ export const workersAiCatalog = {
|
|
|
603
762
|
source: 1,
|
|
604
763
|
name: '@cf/facebook/detr-resnet-50',
|
|
605
764
|
description: 'DEtection TRansformer (DETR) model trained end-to-end on COCO 2017 object detection (118k annotated images).',
|
|
765
|
+
created_at: '2024-02-27 17:43:51.922',
|
|
606
766
|
tags: [],
|
|
607
767
|
properties: {
|
|
608
768
|
beta: true,
|
|
@@ -619,6 +779,7 @@ export const workersAiCatalog = {
|
|
|
619
779
|
source: 1,
|
|
620
780
|
name: '@cf/myshell-ai/melotts',
|
|
621
781
|
description: 'MeloTTS is a high-quality multi-lingual text-to-speech library by MyShell.ai.',
|
|
782
|
+
created_at: '2024-07-19 15:51:04.819',
|
|
622
783
|
tags: [],
|
|
623
784
|
properties: {},
|
|
624
785
|
},
|
|
@@ -633,6 +794,7 @@ export const workersAiCatalog = {
|
|
|
633
794
|
source: 1,
|
|
634
795
|
name: '@cf/openai/whisper',
|
|
635
796
|
description: 'Whisper is a general-purpose speech recognition model. It is trained on a large dataset of diverse audio and is also a multitasking model that can perform multilingual speech recognition, speech translation, and language identification.',
|
|
797
|
+
created_at: '2023-09-25 19:21:11.898',
|
|
636
798
|
tags: [],
|
|
637
799
|
properties: {
|
|
638
800
|
beta: true,
|
|
@@ -644,6 +806,7 @@ export const workersAiCatalog = {
|
|
|
644
806
|
source: 1,
|
|
645
807
|
name: '@cf/openai/whisper-tiny-en',
|
|
646
808
|
description: 'Whisper is a pre-trained model for automatic speech recognition (ASR) and speech translation. Trained on 680k hours of labelled data, Whisper models demonstrate a strong ability to generalize to many datasets and domains without the need for fine-tuning. This is the English-only version of the Whisper Tiny model which was trained on the task of speech recognition.',
|
|
809
|
+
created_at: '2024-04-22 20:59:02.731',
|
|
647
810
|
tags: [],
|
|
648
811
|
properties: {
|
|
649
812
|
beta: true,
|
|
@@ -654,8 +817,16 @@ export const workersAiCatalog = {
|
|
|
654
817
|
source: 1,
|
|
655
818
|
name: '@cf/openai/whisper-large-v3-turbo',
|
|
656
819
|
description: 'Whisper is a pre-trained model for automatic speech recognition (ASR) and speech translation. ',
|
|
820
|
+
created_at: '2024-05-22 00:02:18.656',
|
|
657
821
|
tags: [],
|
|
658
|
-
properties: {
|
|
822
|
+
properties: {
|
|
823
|
+
price: [
|
|
824
|
+
{
|
|
825
|
+
unit: 'per audio minute',
|
|
826
|
+
price: '$0.00051',
|
|
827
|
+
},
|
|
828
|
+
],
|
|
829
|
+
},
|
|
659
830
|
},
|
|
660
831
|
],
|
|
661
832
|
},
|
|
@@ -668,6 +839,7 @@ export const workersAiCatalog = {
|
|
|
668
839
|
source: 1,
|
|
669
840
|
name: '@cf/llava-hf/llava-1.5-7b-hf',
|
|
670
841
|
description: 'LLaVA is an open-source chatbot trained by fine-tuning LLaMA/Vicuna on GPT-generated multimodal instruction-following data. It is an auto-regressive language model, based on the transformer architecture.',
|
|
842
|
+
created_at: '2024-05-01 18:00:39.971',
|
|
671
843
|
tags: [],
|
|
672
844
|
properties: {
|
|
673
845
|
beta: true,
|
|
@@ -678,6 +850,7 @@ export const workersAiCatalog = {
|
|
|
678
850
|
source: 1,
|
|
679
851
|
name: '@cf/unum/uform-gen2-qwen-500m',
|
|
680
852
|
description: 'UForm-Gen is a small generative vision-language model primarily designed for Image Captioning and Visual Question Answering. The model was pre-trained on the internal image captioning dataset and fine-tuned on public instructions datasets: SVIT, LVIS, VQAs datasets.',
|
|
853
|
+
created_at: '2024-02-27 18:28:52.485',
|
|
681
854
|
tags: [],
|
|
682
855
|
properties: {
|
|
683
856
|
beta: true,
|
|
@@ -695,6 +868,7 @@ export const workersAiCatalog = {
|
|
|
695
868
|
source: 1,
|
|
696
869
|
name: '@cf/runwayml/stable-diffusion-v1-5-inpainting',
|
|
697
870
|
description: 'Stable Diffusion Inpainting is a latent text-to-image diffusion model capable of generating photo-realistic images given any text input, with the extra capability of inpainting the pictures by using a mask.',
|
|
871
|
+
created_at: '2024-02-27 17:23:57.528',
|
|
698
872
|
tags: [],
|
|
699
873
|
properties: {
|
|
700
874
|
beta: true,
|
|
@@ -707,6 +881,7 @@ export const workersAiCatalog = {
|
|
|
707
881
|
source: 1,
|
|
708
882
|
name: '@cf/black-forest-labs/flux-1-schnell',
|
|
709
883
|
description: 'FLUX.1 [schnell] is a 12 billion parameter rectified flow transformer capable of generating images from text descriptions. ',
|
|
884
|
+
created_at: '2024-08-29 16:37:39.541',
|
|
710
885
|
tags: [],
|
|
711
886
|
properties: {},
|
|
712
887
|
},
|
|
@@ -715,6 +890,7 @@ export const workersAiCatalog = {
|
|
|
715
890
|
source: 1,
|
|
716
891
|
name: '@cf/bytedance/stable-diffusion-xl-lightning',
|
|
717
892
|
description: 'SDXL-Lightning is a lightning-fast text-to-image generation model. It can generate high-quality 1024px images in a few steps.',
|
|
893
|
+
created_at: '2024-02-27 17:41:29.578',
|
|
718
894
|
tags: [],
|
|
719
895
|
properties: {
|
|
720
896
|
beta: true,
|
|
@@ -726,6 +902,7 @@ export const workersAiCatalog = {
|
|
|
726
902
|
source: 1,
|
|
727
903
|
name: '@cf/lykon/dreamshaper-8-lcm',
|
|
728
904
|
description: 'Stable Diffusion model that has been fine-tuned to be better at photorealism without sacrificing range.',
|
|
905
|
+
created_at: '2024-02-27 17:40:38.881',
|
|
729
906
|
tags: [],
|
|
730
907
|
properties: {
|
|
731
908
|
beta: true,
|
|
@@ -737,6 +914,7 @@ export const workersAiCatalog = {
|
|
|
737
914
|
source: 1,
|
|
738
915
|
name: '@cf/stabilityai/stable-diffusion-xl-base-1.0',
|
|
739
916
|
description: 'Diffusion-based text-to-image generative model by Stability AI. Generates and modify images based on text prompts.',
|
|
917
|
+
created_at: '2023-11-10 10:54:43.694',
|
|
740
918
|
tags: [],
|
|
741
919
|
properties: {
|
|
742
920
|
beta: true,
|
|
@@ -749,6 +927,7 @@ export const workersAiCatalog = {
|
|
|
749
927
|
source: 1,
|
|
750
928
|
name: '@cf/runwayml/stable-diffusion-v1-5-img2img',
|
|
751
929
|
description: 'Stable Diffusion is a latent text-to-image diffusion model capable of generating photo-realistic images. Img2img generate a new image from an input image with Stable Diffusion. ',
|
|
930
|
+
created_at: '2024-02-27 17:32:28.581',
|
|
752
931
|
tags: [],
|
|
753
932
|
properties: {
|
|
754
933
|
beta: true,
|
|
@@ -767,6 +946,7 @@ export const workersAiCatalog = {
|
|
|
767
946
|
source: 1,
|
|
768
947
|
name: '@cf/microsoft/resnet-50',
|
|
769
948
|
description: '50 layers deep image classification CNN trained on more than 1M images from ImageNet',
|
|
949
|
+
created_at: '2023-09-25 19:21:11.898',
|
|
770
950
|
tags: [],
|
|
771
951
|
properties: {
|
|
772
952
|
beta: true,
|
|
@@ -784,6 +964,7 @@ export const workersAiCatalog = {
|
|
|
784
964
|
source: 1,
|
|
785
965
|
name: '@cf/meta/m2m100-1.2b',
|
|
786
966
|
description: 'Multilingual encoder-decoder (seq-to-seq) model trained for Many-to-Many multilingual translation',
|
|
967
|
+
created_at: '2023-09-25 19:21:11.898',
|
|
787
968
|
tags: [],
|
|
788
969
|
properties: {
|
|
789
970
|
beta: true,
|
|
@@ -803,6 +984,7 @@ export const workersAiCatalog = {
|
|
|
803
984
|
source: 1,
|
|
804
985
|
name: '@cf/facebook/bart-large-cnn',
|
|
805
986
|
description: 'BART is a transformer encoder-encoder (seq2seq) model with a bidirectional (BERT-like) encoder and an autoregressive (GPT-like) decoder. You can use this model for text summarization.',
|
|
987
|
+
created_at: '2024-02-27 18:28:11.833',
|
|
806
988
|
tags: [],
|
|
807
989
|
properties: {
|
|
808
990
|
beta: true,
|
package/dist/wf/index.js
CHANGED