@chainfuse/types 0.0.5 → 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +6 -0
- package/dist/d1/index.js +2 -0
- package/dist/d1/tenants/index.js +1 -0
- package/dist/d1/users/index.js +1 -0
- package/dist/discourse/index.js +2 -0
- package/dist/discourse/post/index.js +7 -0
- package/dist/discourse/topic/index.js +8 -0
- package/dist/index.js +2 -0
- package/dist/super-ai/index.d.ts +546 -0
- package/dist/super-ai/index.js +34 -0
- package/dist/super-ai/workers-ai-catalog.d.ts +635 -0
- package/dist/super-ai/workers-ai-catalog.js +703 -0
- package/package.json +20 -4
|
@@ -0,0 +1,635 @@
|
|
|
1
|
+
export declare const workersAiCatalog: {
|
|
2
|
+
readonly modelGroups: {
|
|
3
|
+
readonly 'Text Generation': {
|
|
4
|
+
readonly id: "c329a1f9-323d-4e91-b2aa-582dd4188d34";
|
|
5
|
+
readonly description: "Family of generative text models, such as large language models (LLM), that can be adapted for a variety of natural language tasks.";
|
|
6
|
+
readonly models: readonly [{
|
|
7
|
+
readonly id: "f8703a00-ed54-4f98-bdc3-cd9a813286f3";
|
|
8
|
+
readonly source: 1;
|
|
9
|
+
readonly name: "@cf/qwen/qwen1.5-0.5b-chat";
|
|
10
|
+
readonly description: "Qwen1.5 is the improved version of Qwen, the large language model series developed by Alibaba Cloud.";
|
|
11
|
+
readonly tags: readonly [];
|
|
12
|
+
readonly properties: {
|
|
13
|
+
readonly beta: true;
|
|
14
|
+
readonly info: "https://huggingface.co/qwen/qwen1.5-0.5b-chat";
|
|
15
|
+
};
|
|
16
|
+
}, {
|
|
17
|
+
readonly id: "e8e8abe4-a372-4c13-815f-4688ba655c8e";
|
|
18
|
+
readonly source: 1;
|
|
19
|
+
readonly name: "@cf/google/gemma-2b-it-lora";
|
|
20
|
+
readonly description: "This is a Gemma-2B base model that Cloudflare dedicates for inference with LoRA adapters. Gemma is a family of lightweight, state-of-the-art open models from Google, built from the same research and technology used to create the Gemini models.";
|
|
21
|
+
readonly tags: readonly [];
|
|
22
|
+
readonly properties: {
|
|
23
|
+
readonly beta: true;
|
|
24
|
+
readonly lora: true;
|
|
25
|
+
};
|
|
26
|
+
}, {
|
|
27
|
+
readonly id: "e5ca943b-720f-4e66-aa8f-40e3d2770933";
|
|
28
|
+
readonly source: 2;
|
|
29
|
+
readonly name: "@hf/nexusflow/starling-lm-7b-beta";
|
|
30
|
+
readonly description: "We introduce Starling-LM-7B-beta, an open large language model (LLM) trained by Reinforcement Learning from AI Feedback (RLAIF). Starling-LM-7B-beta is trained from Openchat-3.5-0106 with our new reward model Nexusflow/Starling-RM-34B and policy optimization method Fine-Tuning Language Models from Human Preferences (PPO).";
|
|
31
|
+
readonly tags: readonly [];
|
|
32
|
+
readonly properties: {
|
|
33
|
+
readonly beta: true;
|
|
34
|
+
readonly info: "https://huggingface.co/Nexusflow/Starling-LM-7B-beta";
|
|
35
|
+
readonly max_batch_prefill_tokens: 8192;
|
|
36
|
+
readonly max_input_length: 3072;
|
|
37
|
+
readonly max_total_tokens: 4096;
|
|
38
|
+
};
|
|
39
|
+
}, {
|
|
40
|
+
readonly id: "e11d8f45-7b08-499a-9eeb-71d4d3c8cbf9";
|
|
41
|
+
readonly source: 1;
|
|
42
|
+
readonly name: "@cf/meta/llama-3-8b-instruct";
|
|
43
|
+
readonly description: "Generation over generation, Meta Llama 3 demonstrates state-of-the-art performance on a wide range of industry benchmarks and offers new capabilities, including improved reasoning.";
|
|
44
|
+
readonly tags: readonly [];
|
|
45
|
+
readonly properties: {
|
|
46
|
+
readonly beta: true;
|
|
47
|
+
readonly info: "https://llama.meta.com";
|
|
48
|
+
readonly terms: "https://llama.meta.com/llama3/license/#";
|
|
49
|
+
};
|
|
50
|
+
}, {
|
|
51
|
+
readonly id: "d9b7a55c-cefa-4208-8ab3-11497a2b046c";
|
|
52
|
+
readonly source: 2;
|
|
53
|
+
readonly name: "@hf/thebloke/llamaguard-7b-awq";
|
|
54
|
+
readonly description: "Llama Guard is a model for classifying the safety of LLM prompts and responses, using a taxonomy of safety risks.\n";
|
|
55
|
+
readonly tags: readonly [];
|
|
56
|
+
readonly properties: {
|
|
57
|
+
readonly beta: true;
|
|
58
|
+
};
|
|
59
|
+
}, {
|
|
60
|
+
readonly id: "d2ba5c6b-bbb7-49d6-b466-900654870cd6";
|
|
61
|
+
readonly source: 2;
|
|
62
|
+
readonly name: "@hf/thebloke/neural-chat-7b-v3-1-awq";
|
|
63
|
+
readonly description: "This model is a fine-tuned 7B parameter LLM on the Intel Gaudi 2 processor from the mistralai/Mistral-7B-v0.1 on the open source dataset Open-Orca/SlimOrca.";
|
|
64
|
+
readonly tags: readonly [];
|
|
65
|
+
readonly properties: {
|
|
66
|
+
readonly beta: true;
|
|
67
|
+
};
|
|
68
|
+
}, {
|
|
69
|
+
readonly id: "ca54bcd6-0d98-4739-9b3b-5c8b4402193d";
|
|
70
|
+
readonly source: 1;
|
|
71
|
+
readonly name: "@cf/meta/llama-2-7b-chat-fp16";
|
|
72
|
+
readonly description: "Full precision (fp16) generative text model with 7 billion parameters from Meta";
|
|
73
|
+
readonly tags: readonly [];
|
|
74
|
+
readonly properties: {
|
|
75
|
+
readonly beta: true;
|
|
76
|
+
readonly info: "https://ai.meta.com/llama/";
|
|
77
|
+
readonly terms: "https://ai.meta.com/resources/models-and-libraries/llama-downloads/";
|
|
78
|
+
};
|
|
79
|
+
}, {
|
|
80
|
+
readonly id: "c907d0f9-d69d-4e93-b501-4daeb4fd69eb";
|
|
81
|
+
readonly source: 1;
|
|
82
|
+
readonly name: "@cf/mistral/mistral-7b-instruct-v0.1";
|
|
83
|
+
readonly description: "Instruct fine-tuned version of the Mistral-7b generative text model with 7 billion parameters";
|
|
84
|
+
readonly tags: readonly [];
|
|
85
|
+
readonly properties: {
|
|
86
|
+
readonly beta: true;
|
|
87
|
+
readonly info: "https://mistral.ai/news/announcing-mistral-7b/";
|
|
88
|
+
readonly lora: true;
|
|
89
|
+
};
|
|
90
|
+
}, {
|
|
91
|
+
readonly id: "c58c317b-0c15-4bda-abb6-93e275f282d9";
|
|
92
|
+
readonly source: 1;
|
|
93
|
+
readonly name: "@cf/mistral/mistral-7b-instruct-v0.2-lora";
|
|
94
|
+
readonly description: "The Mistral-7B-Instruct-v0.2 Large Language Model (LLM) is an instruct fine-tuned version of the Mistral-7B-v0.2.";
|
|
95
|
+
readonly tags: readonly [];
|
|
96
|
+
readonly properties: {
|
|
97
|
+
readonly beta: true;
|
|
98
|
+
readonly lora: true;
|
|
99
|
+
};
|
|
100
|
+
}, {
|
|
101
|
+
readonly id: "bf6ddd21-6477-4681-bbbe-24c3d5423e78";
|
|
102
|
+
readonly source: 1;
|
|
103
|
+
readonly name: "@cf/tinyllama/tinyllama-1.1b-chat-v1.0";
|
|
104
|
+
readonly description: "The TinyLlama project aims to pretrain a 1.1B Llama model on 3 trillion tokens. This is the chat model finetuned on top of TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T.";
|
|
105
|
+
readonly tags: readonly [];
|
|
106
|
+
readonly properties: {
|
|
107
|
+
readonly beta: true;
|
|
108
|
+
readonly info: "https://huggingface.co/TinyLlama/TinyLlama-1.1B-Chat-v1.0";
|
|
109
|
+
};
|
|
110
|
+
}, {
|
|
111
|
+
readonly id: "b97d7069-48d9-461c-80dd-445d20a632eb";
|
|
112
|
+
readonly source: 2;
|
|
113
|
+
readonly name: "@hf/mistral/mistral-7b-instruct-v0.2";
|
|
114
|
+
readonly description: "The Mistral-7B-Instruct-v0.2 Large Language Model (LLM) is an instruct fine-tuned version of the Mistral-7B-v0.2. Mistral-7B-v0.2 has the following changes compared to Mistral-7B-v0.1: 32k context window (vs 8k context in v0.1), rope-theta = 1e6, and no Sliding-Window Attention.";
|
|
115
|
+
readonly tags: readonly [];
|
|
116
|
+
readonly properties: {
|
|
117
|
+
readonly beta: true;
|
|
118
|
+
readonly info: "https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2";
|
|
119
|
+
readonly lora: true;
|
|
120
|
+
readonly max_batch_prefill_tokens: 8192;
|
|
121
|
+
readonly max_input_length: 3072;
|
|
122
|
+
readonly max_total_tokens: 4096;
|
|
123
|
+
};
|
|
124
|
+
}, {
|
|
125
|
+
readonly id: "b7fe7ad2-aeaf-47d2-8bfa-7a5ae22a2ab4";
|
|
126
|
+
readonly source: 1;
|
|
127
|
+
readonly name: "@cf/fblgit/una-cybertron-7b-v2-bf16";
|
|
128
|
+
readonly description: "Cybertron 7B v2 is a 7B MistralAI based model, best on it's series. It was trained with SFT, DPO and UNA (Unified Neural Alignment) on multiple datasets.";
|
|
129
|
+
readonly tags: readonly [];
|
|
130
|
+
readonly properties: {
|
|
131
|
+
readonly beta: true;
|
|
132
|
+
};
|
|
133
|
+
}, {
|
|
134
|
+
readonly id: "9d2ab560-065e-4d0d-a789-d4bc7468d33e";
|
|
135
|
+
readonly source: 1;
|
|
136
|
+
readonly name: "@cf/thebloke/discolm-german-7b-v1-awq";
|
|
137
|
+
readonly description: "DiscoLM German 7b is a Mistral-based large language model with a focus on German-language applications. AWQ is an efficient, accurate and blazing-fast low-bit weight quantization method, currently supporting 4-bit quantization.";
|
|
138
|
+
readonly tags: readonly [];
|
|
139
|
+
readonly properties: {
|
|
140
|
+
readonly beta: true;
|
|
141
|
+
readonly info: "https://huggingface.co/TheBloke/DiscoLM_German_7b_v1-AWQ";
|
|
142
|
+
};
|
|
143
|
+
}, {
|
|
144
|
+
readonly id: "9c95c39d-45b3-4163-9631-22f0c0dc3b14";
|
|
145
|
+
readonly source: 1;
|
|
146
|
+
readonly name: "@cf/meta/llama-2-7b-chat-int8";
|
|
147
|
+
readonly description: "Quantized (int8) generative text model with 7 billion parameters from Meta";
|
|
148
|
+
readonly tags: readonly [];
|
|
149
|
+
readonly properties: {};
|
|
150
|
+
}, {
|
|
151
|
+
readonly id: "9b9c87c6-d4b7-494c-b177-87feab5904db";
|
|
152
|
+
readonly source: 1;
|
|
153
|
+
readonly name: "@cf/meta/llama-3.1-8b-instruct-fp8";
|
|
154
|
+
readonly description: "Llama 3.1 8B quantized to FP8 precision";
|
|
155
|
+
readonly tags: readonly [];
|
|
156
|
+
readonly properties: {
|
|
157
|
+
readonly beta: true;
|
|
158
|
+
readonly terms: "https://github.com/meta-llama/llama-models/blob/main/models/llama3_1/LICENSE";
|
|
159
|
+
};
|
|
160
|
+
}, {
|
|
161
|
+
readonly id: "980ec5e9-33c2-483a-a2d8-cd092fdf273f";
|
|
162
|
+
readonly source: 2;
|
|
163
|
+
readonly name: "@hf/thebloke/mistral-7b-instruct-v0.1-awq";
|
|
164
|
+
readonly description: "Mistral 7B Instruct v0.1 AWQ is an efficient, accurate and blazing-fast low-bit weight quantized Mistral variant.";
|
|
165
|
+
readonly tags: readonly [];
|
|
166
|
+
readonly properties: {
|
|
167
|
+
readonly beta: true;
|
|
168
|
+
readonly info: "https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.1-AWQ";
|
|
169
|
+
};
|
|
170
|
+
}, {
|
|
171
|
+
readonly id: "90a20ae7-7cf4-4eb3-8672-8fc4ee580635";
|
|
172
|
+
readonly source: 1;
|
|
173
|
+
readonly name: "@cf/qwen/qwen1.5-7b-chat-awq";
|
|
174
|
+
readonly description: "Qwen1.5 is the improved version of Qwen, the large language model series developed by Alibaba Cloud. AWQ is an efficient, accurate and blazing-fast low-bit weight quantization method, currently supporting 4-bit quantization.";
|
|
175
|
+
readonly tags: readonly [];
|
|
176
|
+
readonly properties: {
|
|
177
|
+
readonly beta: true;
|
|
178
|
+
readonly info: "https://huggingface.co/qwen/qwen1.5-7b-chat-awq";
|
|
179
|
+
};
|
|
180
|
+
}, {
|
|
181
|
+
readonly id: "85c5a3c6-24b0-45e7-b23a-023182578822";
|
|
182
|
+
readonly source: 2;
|
|
183
|
+
readonly name: "@hf/thebloke/llama-2-13b-chat-awq";
|
|
184
|
+
readonly description: "Llama 2 13B Chat AWQ is an efficient, accurate and blazing-fast low-bit weight quantized Llama 2 variant.";
|
|
185
|
+
readonly tags: readonly [];
|
|
186
|
+
readonly properties: {
|
|
187
|
+
readonly beta: true;
|
|
188
|
+
readonly info: "https://huggingface.co/TheBloke/Llama-2-13B-chat-AWQ";
|
|
189
|
+
};
|
|
190
|
+
}, {
|
|
191
|
+
readonly id: "7f180530-2e16-4116-9d26-f49fbed9d372";
|
|
192
|
+
readonly source: 2;
|
|
193
|
+
readonly name: "@hf/thebloke/deepseek-coder-6.7b-base-awq";
|
|
194
|
+
readonly description: "Deepseek Coder is composed of a series of code language models, each trained from scratch on 2T tokens, with a composition of 87% code and 13% natural language in both English and Chinese.";
|
|
195
|
+
readonly tags: readonly [];
|
|
196
|
+
readonly properties: {
|
|
197
|
+
readonly beta: true;
|
|
198
|
+
readonly terms: "https://huggingface.co/TheBloke/deepseek-coder-6.7B-base-AWQ";
|
|
199
|
+
};
|
|
200
|
+
}, {
|
|
201
|
+
readonly id: "7ed8d8e8-6040-4680-843a-aef402d6b013";
|
|
202
|
+
readonly source: 1;
|
|
203
|
+
readonly name: "@cf/meta-llama/llama-2-7b-chat-hf-lora";
|
|
204
|
+
readonly description: "This is a Llama2 base model that Cloudflare dedicated for inference with LoRA adapters. Llama 2 is a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. This is the repository for the 7B fine-tuned model, optimized for dialogue use cases and converted for the Hugging Face Transformers format. ";
|
|
205
|
+
readonly tags: readonly [];
|
|
206
|
+
readonly properties: {
|
|
207
|
+
readonly beta: true;
|
|
208
|
+
readonly lora: true;
|
|
209
|
+
};
|
|
210
|
+
}, {
|
|
211
|
+
readonly id: "673c56cc-8553-49a1-b179-dd549ec9209a";
|
|
212
|
+
readonly source: 2;
|
|
213
|
+
readonly name: "@hf/thebloke/openhermes-2.5-mistral-7b-awq";
|
|
214
|
+
readonly description: "OpenHermes 2.5 Mistral 7B is a state of the art Mistral Fine-tune, a continuation of OpenHermes 2 model, which trained on additional code datasets.";
|
|
215
|
+
readonly tags: readonly [];
|
|
216
|
+
readonly properties: {
|
|
217
|
+
readonly beta: true;
|
|
218
|
+
};
|
|
219
|
+
}, {
|
|
220
|
+
readonly id: "60474554-f03b-4ff4-8ecc-c1b7c71d7b29";
|
|
221
|
+
readonly source: 2;
|
|
222
|
+
readonly name: "@hf/thebloke/deepseek-coder-6.7b-instruct-awq";
|
|
223
|
+
readonly description: "Deepseek Coder is composed of a series of code language models, each trained from scratch on 2T tokens, with a composition of 87% code and 13% natural language in both English and Chinese.";
|
|
224
|
+
readonly tags: readonly [];
|
|
225
|
+
readonly properties: {
|
|
226
|
+
readonly beta: true;
|
|
227
|
+
readonly terms: "https://huggingface.co/TheBloke/deepseek-coder-6.7B-instruct-AWQ";
|
|
228
|
+
};
|
|
229
|
+
}, {
|
|
230
|
+
readonly id: "4c3a544e-da47-4336-9cea-c7cbfab33f16";
|
|
231
|
+
readonly source: 1;
|
|
232
|
+
readonly name: "@cf/deepseek-ai/deepseek-math-7b-instruct";
|
|
233
|
+
readonly description: "DeepSeekMath-Instruct 7B is a mathematically instructed tuning model derived from DeepSeekMath-Base 7B. DeepSeekMath is initialized with DeepSeek-Coder-v1.5 7B and continues pre-training on math-related tokens sourced from Common Crawl, together with natural language and code data for 500B tokens.";
|
|
234
|
+
readonly tags: readonly [];
|
|
235
|
+
readonly properties: {
|
|
236
|
+
readonly beta: true;
|
|
237
|
+
readonly info: "https://huggingface.co/deepseek-ai/deepseek-math-7b-instruct";
|
|
238
|
+
readonly terms: "https://github.com/deepseek-ai/DeepSeek-Math/blob/main/LICENSE-MODEL";
|
|
239
|
+
};
|
|
240
|
+
}, {
|
|
241
|
+
readonly id: "48dd2443-0c61-43b2-8894-22abddf1b081";
|
|
242
|
+
readonly source: 1;
|
|
243
|
+
readonly name: "@cf/tiiuae/falcon-7b-instruct";
|
|
244
|
+
readonly description: "Falcon-7B-Instruct is a 7B parameters causal decoder-only model built by TII based on Falcon-7B and finetuned on a mixture of chat/instruct datasets.";
|
|
245
|
+
readonly tags: readonly [];
|
|
246
|
+
readonly properties: {
|
|
247
|
+
readonly beta: true;
|
|
248
|
+
readonly info: "https://huggingface.co/tiiuae/falcon-7b-instruct";
|
|
249
|
+
};
|
|
250
|
+
}, {
|
|
251
|
+
readonly id: "44774b85-08c8-4bb8-8d2a-b06ebc538a79";
|
|
252
|
+
readonly source: 2;
|
|
253
|
+
readonly name: "@hf/nousresearch/hermes-2-pro-mistral-7b";
|
|
254
|
+
readonly description: "Hermes 2 Pro on Mistral 7B is the new flagship 7B Hermes! Hermes 2 Pro is an upgraded, retrained version of Nous Hermes 2, consisting of an updated and cleaned version of the OpenHermes 2.5 Dataset, as well as a newly introduced Function Calling and JSON Mode dataset developed in-house.";
|
|
255
|
+
readonly tags: readonly [];
|
|
256
|
+
readonly properties: {
|
|
257
|
+
readonly beta: true;
|
|
258
|
+
readonly function_calling: true;
|
|
259
|
+
readonly info: "https://huggingface.co/NousResearch/Hermes-2-Pro-Mistral-7B";
|
|
260
|
+
};
|
|
261
|
+
}, {
|
|
262
|
+
readonly id: "41975cc2-c82e-4e98-b7b8-88ffb186a545";
|
|
263
|
+
readonly source: 1;
|
|
264
|
+
readonly name: "@cf/meta/llama-3.1-8b-instruct";
|
|
265
|
+
readonly description: "The Meta Llama 3.1 collection of multilingual large language models (LLMs) is a collection of pretrained and instruction tuned generative models. The Llama 3.1 instruction tuned text only models are optimized for multilingual dialogue use cases and outperform many of the available open source and closed chat models on common industry benchmarks.";
|
|
266
|
+
readonly tags: readonly [];
|
|
267
|
+
readonly properties: {
|
|
268
|
+
readonly beta: true;
|
|
269
|
+
readonly terms: "https://github.com/meta-llama/llama-models/blob/main/models/llama3_1/LICENSE";
|
|
270
|
+
};
|
|
271
|
+
}, {
|
|
272
|
+
readonly id: "3dcb4f2d-26a8-412b-b6e3-2a368beff66b";
|
|
273
|
+
readonly source: 1;
|
|
274
|
+
readonly name: "@cf/meta/llama-3.1-8b-instruct-awq";
|
|
275
|
+
readonly description: "Quantized (int4) generative text model with 8 billion parameters from Meta.\n";
|
|
276
|
+
readonly tags: readonly [];
|
|
277
|
+
readonly properties: {
|
|
278
|
+
readonly beta: true;
|
|
279
|
+
readonly terms: "https://github.com/meta-llama/llama-models/blob/main/models/llama3_1/LICENSE";
|
|
280
|
+
};
|
|
281
|
+
}, {
|
|
282
|
+
readonly id: "3976bab8-3810-4ad8-8580-ab1e22de7823";
|
|
283
|
+
readonly source: 2;
|
|
284
|
+
readonly name: "@hf/thebloke/zephyr-7b-beta-awq";
|
|
285
|
+
readonly description: "Zephyr 7B Beta AWQ is an efficient, accurate and blazing-fast low-bit weight quantized Zephyr model variant.";
|
|
286
|
+
readonly tags: readonly [];
|
|
287
|
+
readonly properties: {
|
|
288
|
+
readonly beta: true;
|
|
289
|
+
readonly info: "https://huggingface.co/TheBloke/zephyr-7B-beta-AWQ";
|
|
290
|
+
};
|
|
291
|
+
}, {
|
|
292
|
+
readonly id: "337170b7-bd2f-4631-9a57-688b579cf6d3";
|
|
293
|
+
readonly source: 1;
|
|
294
|
+
readonly name: "@cf/google/gemma-7b-it-lora";
|
|
295
|
+
readonly description: " This is a Gemma-7B base model that Cloudflare dedicates for inference with LoRA adapters. Gemma is a family of lightweight, state-of-the-art open models from Google, built from the same research and technology used to create the Gemini models.";
|
|
296
|
+
readonly tags: readonly [];
|
|
297
|
+
readonly properties: {
|
|
298
|
+
readonly beta: true;
|
|
299
|
+
readonly lora: true;
|
|
300
|
+
};
|
|
301
|
+
}, {
|
|
302
|
+
readonly id: "3222ddb3-e211-4fd9-9a6d-79a80e47b3a6";
|
|
303
|
+
readonly source: 1;
|
|
304
|
+
readonly name: "@cf/qwen/qwen1.5-1.8b-chat";
|
|
305
|
+
readonly description: "Qwen1.5 is the improved version of Qwen, the large language model series developed by Alibaba Cloud.";
|
|
306
|
+
readonly tags: readonly [];
|
|
307
|
+
readonly properties: {
|
|
308
|
+
readonly beta: true;
|
|
309
|
+
readonly info: "https://huggingface.co/qwen/qwen1.5-1.8b-chat";
|
|
310
|
+
};
|
|
311
|
+
}, {
|
|
312
|
+
readonly id: "31097538-a3ff-4e6e-bb56-ad0e1f428b61";
|
|
313
|
+
readonly source: 1;
|
|
314
|
+
readonly name: "@cf/meta/llama-3-8b-instruct-awq";
|
|
315
|
+
readonly description: "Quantized (int4) generative text model with 8 billion parameters from Meta.";
|
|
316
|
+
readonly tags: readonly [];
|
|
317
|
+
readonly properties: {
|
|
318
|
+
readonly beta: true;
|
|
319
|
+
readonly info: "https://llama.meta.com";
|
|
320
|
+
readonly terms: "https://llama.meta.com/llama3/license/#";
|
|
321
|
+
};
|
|
322
|
+
}, {
|
|
323
|
+
readonly id: "1dc9e589-df6b-4e66-ac9f-ceff42d64983";
|
|
324
|
+
readonly source: 1;
|
|
325
|
+
readonly name: "@cf/defog/sqlcoder-7b-2";
|
|
326
|
+
readonly description: "This model is intended to be used by non-technical users to understand data inside their SQL databases. ";
|
|
327
|
+
readonly tags: readonly [];
|
|
328
|
+
readonly properties: {
|
|
329
|
+
readonly beta: true;
|
|
330
|
+
readonly info: "https://huggingface.co/defog/sqlcoder-7b-2";
|
|
331
|
+
readonly terms: "https://creativecommons.org/licenses/by-sa/4.0/deed.en";
|
|
332
|
+
};
|
|
333
|
+
}, {
|
|
334
|
+
readonly id: "1d933df3-680f-4280-940d-da87435edb07";
|
|
335
|
+
readonly source: 1;
|
|
336
|
+
readonly name: "@cf/microsoft/phi-2";
|
|
337
|
+
readonly description: "Phi-2 is a Transformer-based model with a next-word prediction objective, trained on 1.4T tokens from multiple passes on a mixture of Synthetic and Web datasets for NLP and coding.";
|
|
338
|
+
readonly tags: readonly [];
|
|
339
|
+
readonly properties: {
|
|
340
|
+
readonly beta: true;
|
|
341
|
+
readonly info: "https://huggingface.co/microsoft/phi-2";
|
|
342
|
+
};
|
|
343
|
+
}, {
|
|
344
|
+
readonly id: "1a7b6ad6-9987-4bd3-a329-20ee8de93296";
|
|
345
|
+
readonly source: 2;
|
|
346
|
+
readonly name: "@hf/meta-llama/meta-llama-3-8b-instruct";
|
|
347
|
+
readonly description: "Generation over generation, Meta Llama 3 demonstrates state-of-the-art performance on a wide range of industry benchmarks and offers new capabilities, including improved reasoning.\t";
|
|
348
|
+
readonly tags: readonly [];
|
|
349
|
+
readonly properties: {};
|
|
350
|
+
}, {
|
|
351
|
+
readonly id: "0f002249-7d86-4698-aabf-8529ed86cefb";
|
|
352
|
+
readonly source: 2;
|
|
353
|
+
readonly name: "@hf/google/gemma-7b-it";
|
|
354
|
+
readonly description: "Gemma is a family of lightweight, state-of-the-art open models from Google, built from the same research and technology used to create the Gemini models. They are text-to-text, decoder-only large language models, available in English, with open weights, pre-trained variants, and instruction-tuned variants.";
|
|
355
|
+
readonly tags: readonly [];
|
|
356
|
+
readonly properties: {
|
|
357
|
+
readonly beta: true;
|
|
358
|
+
readonly info: "https://ai.google.dev/gemma/docs";
|
|
359
|
+
readonly lora: true;
|
|
360
|
+
readonly max_batch_prefill_tokens: 2048;
|
|
361
|
+
readonly max_input_length: 1512;
|
|
362
|
+
readonly max_total_tokens: 2048;
|
|
363
|
+
readonly terms: "https://ai.google.dev/gemma/terms";
|
|
364
|
+
};
|
|
365
|
+
}, {
|
|
366
|
+
readonly id: "09d113a9-03c4-420e-b6f2-52ad4b3bed45";
|
|
367
|
+
readonly source: 1;
|
|
368
|
+
readonly name: "@cf/qwen/qwen1.5-14b-chat-awq";
|
|
369
|
+
readonly description: "Qwen1.5 is the improved version of Qwen, the large language model series developed by Alibaba Cloud. AWQ is an efficient, accurate and blazing-fast low-bit weight quantization method, currently supporting 4-bit quantization.";
|
|
370
|
+
readonly tags: readonly [];
|
|
371
|
+
readonly properties: {
|
|
372
|
+
readonly beta: true;
|
|
373
|
+
readonly info: "https://huggingface.co/qwen/qwen1.5-14b-chat-awq";
|
|
374
|
+
};
|
|
375
|
+
}, {
|
|
376
|
+
readonly id: "081054cd-a254-4349-855e-6dc0996277fa";
|
|
377
|
+
readonly source: 1;
|
|
378
|
+
readonly name: "@cf/openchat/openchat-3.5-0106";
|
|
379
|
+
readonly description: "OpenChat is an innovative library of open-source language models, fine-tuned with C-RLFT - a strategy inspired by offline reinforcement learning.";
|
|
380
|
+
readonly tags: readonly [];
|
|
381
|
+
readonly properties: {
|
|
382
|
+
readonly beta: true;
|
|
383
|
+
readonly info: "https://huggingface.co/openchat/openchat-3.5-0106";
|
|
384
|
+
};
|
|
385
|
+
}];
|
|
386
|
+
};
|
|
387
|
+
readonly 'Text Classification': {
|
|
388
|
+
readonly id: "19606750-23ed-4371-aab2-c20349b53a60";
|
|
389
|
+
readonly description: "Sentiment analysis or text classification is a common NLP task that classifies a text input into labels or classes.";
|
|
390
|
+
readonly models: readonly [{
|
|
391
|
+
readonly id: "eaf31752-a074-441f-8b70-d593255d2811";
|
|
392
|
+
readonly source: 1;
|
|
393
|
+
readonly name: "@cf/huggingface/distilbert-sst-2-int8";
|
|
394
|
+
readonly description: "Distilled BERT model that was finetuned on SST-2 for sentiment classification";
|
|
395
|
+
readonly tags: readonly [];
|
|
396
|
+
readonly properties: {
|
|
397
|
+
readonly beta: true;
|
|
398
|
+
readonly info: "https://huggingface.co/Intel/distilbert-base-uncased-finetuned-sst-2-english-int8-static";
|
|
399
|
+
};
|
|
400
|
+
}];
|
|
401
|
+
};
|
|
402
|
+
readonly 'Object Detection': {
|
|
403
|
+
readonly id: "9c178979-90d9-49d8-9e2c-0f1cf01815d4";
|
|
404
|
+
readonly description: "Object detection models can detect instances of objects like persons, faces, license plates, or others in an image. This task takes an image as input and returns a list of detected objects, each one containing a label, a probability score, and its surrounding box coordinates.";
|
|
405
|
+
readonly models: readonly [{
|
|
406
|
+
readonly id: "cc34ce52-3059-415f-9a48-12aa919d37ee";
|
|
407
|
+
readonly source: 1;
|
|
408
|
+
readonly name: "@cf/facebook/detr-resnet-50";
|
|
409
|
+
readonly description: "DEtection TRansformer (DETR) model trained end-to-end on COCO 2017 object detection (118k annotated images).";
|
|
410
|
+
readonly tags: readonly [];
|
|
411
|
+
readonly properties: {
|
|
412
|
+
readonly beta: true;
|
|
413
|
+
};
|
|
414
|
+
}];
|
|
415
|
+
};
|
|
416
|
+
readonly 'Automatic Speech Recognition': {
|
|
417
|
+
readonly id: "dfce1c48-2a81-462e-a7fd-de97ce985207";
|
|
418
|
+
readonly description: "Automatic speech recognition (ASR) models convert a speech signal, typically an audio input, to text.";
|
|
419
|
+
readonly models: readonly [{
|
|
420
|
+
readonly id: "c1c12ce4-c36a-4aa6-8da4-f63ba4b8984d";
|
|
421
|
+
readonly source: 1;
|
|
422
|
+
readonly name: "@cf/openai/whisper";
|
|
423
|
+
readonly description: "Whisper is a general-purpose speech recognition model. It is trained on a large dataset of diverse audio and is also a multitasking model that can perform multilingual speech recognition, speech translation, and language identification.";
|
|
424
|
+
readonly tags: readonly [];
|
|
425
|
+
readonly properties: {
|
|
426
|
+
readonly beta: true;
|
|
427
|
+
readonly info: "https://openai.com/research/whisper";
|
|
428
|
+
};
|
|
429
|
+
}, {
|
|
430
|
+
readonly id: "2169496d-9c0e-4e49-8399-c44ee66bff7d";
|
|
431
|
+
readonly source: 1;
|
|
432
|
+
readonly name: "@cf/openai/whisper-tiny-en";
|
|
433
|
+
readonly description: "Whisper is a pre-trained model for automatic speech recognition (ASR) and speech translation. Trained on 680k hours of labelled data, Whisper models demonstrate a strong ability to generalize to many datasets and domains without the need for fine-tuning. This is the English-only version of the Whisper Tiny model which was trained on the task of speech recognition.";
|
|
434
|
+
readonly tags: readonly [];
|
|
435
|
+
readonly properties: {
|
|
436
|
+
readonly beta: true;
|
|
437
|
+
};
|
|
438
|
+
}];
|
|
439
|
+
};
|
|
440
|
+
readonly 'Image-to-Text': {
|
|
441
|
+
readonly id: "882a91d1-c331-4eec-bdad-834c919942a8";
|
|
442
|
+
readonly description: "Image to text models output a text from a given image. Image captioning or optical character recognition can be considered as the most common applications of image to text.";
|
|
443
|
+
readonly models: readonly [{
|
|
444
|
+
readonly id: "af274959-cb47-4ba8-9d8e-5a0a58b6b402";
|
|
445
|
+
readonly source: 1;
|
|
446
|
+
readonly name: "@cf/llava-hf/llava-1.5-7b-hf";
|
|
447
|
+
readonly description: "LLaVA is an open-source chatbot trained by fine-tuning LLaMA/Vicuna on GPT-generated multimodal instruction-following data. It is an auto-regressive language model, based on the transformer architecture.";
|
|
448
|
+
readonly tags: readonly [];
|
|
449
|
+
readonly properties: {
|
|
450
|
+
readonly beta: true;
|
|
451
|
+
};
|
|
452
|
+
}, {
|
|
453
|
+
readonly id: "3dca5889-db3e-4973-aa0c-3a4a6bd22d29";
|
|
454
|
+
readonly source: 1;
|
|
455
|
+
readonly name: "@cf/unum/uform-gen2-qwen-500m";
|
|
456
|
+
readonly description: "UForm-Gen is a small generative vision-language model primarily designed for Image Captioning and Visual Question Answering. The model was pre-trained on the internal image captioning dataset and fine-tuned on public instructions datasets: SVIT, LVIS, VQAs datasets.";
|
|
457
|
+
readonly tags: readonly [];
|
|
458
|
+
readonly properties: {
|
|
459
|
+
readonly beta: true;
|
|
460
|
+
readonly info: "https://www.unum.cloud/";
|
|
461
|
+
};
|
|
462
|
+
}];
|
|
463
|
+
};
|
|
464
|
+
readonly 'Text-to-Image': {
|
|
465
|
+
readonly id: "3d6e1f35-341b-4915-a6c8-9a7142a9033a";
|
|
466
|
+
readonly description: "Generates images from input text. These models can be used to generate and modify images based on text prompts.";
|
|
467
|
+
readonly models: readonly [{
|
|
468
|
+
readonly id: "a9abaef0-3031-47ad-8790-d311d8684c6c";
|
|
469
|
+
readonly source: 1;
|
|
470
|
+
readonly name: "@cf/runwayml/stable-diffusion-v1-5-inpainting";
|
|
471
|
+
readonly description: "Stable Diffusion Inpainting is a latent text-to-image diffusion model capable of generating photo-realistic images given any text input, with the extra capability of inpainting the pictures by using a mask.";
|
|
472
|
+
readonly tags: readonly [];
|
|
473
|
+
readonly properties: {
|
|
474
|
+
readonly beta: true;
|
|
475
|
+
readonly info: "https://huggingface.co/runwayml/stable-diffusion-inpainting";
|
|
476
|
+
readonly terms: "https://github.com/runwayml/stable-diffusion/blob/main/LICENSE";
|
|
477
|
+
};
|
|
478
|
+
}, {
|
|
479
|
+
readonly id: "7f797b20-3eb0-44fd-b571-6cbbaa3c423b";
|
|
480
|
+
readonly source: 1;
|
|
481
|
+
readonly name: "@cf/bytedance/stable-diffusion-xl-lightning";
|
|
482
|
+
readonly description: "SDXL-Lightning is a lightning-fast text-to-image generation model. It can generate high-quality 1024px images in a few steps.";
|
|
483
|
+
readonly tags: readonly [];
|
|
484
|
+
readonly properties: {
|
|
485
|
+
readonly beta: true;
|
|
486
|
+
readonly info: "https://huggingface.co/ByteDance/SDXL-Lightning";
|
|
487
|
+
};
|
|
488
|
+
}, {
|
|
489
|
+
readonly id: "7912c0ab-542e-44b9-b9ee-3113d226a8b5";
|
|
490
|
+
readonly source: 1;
|
|
491
|
+
readonly name: "@cf/lykon/dreamshaper-8-lcm";
|
|
492
|
+
readonly description: "Stable Diffusion model that has been fine-tuned to be better at photorealism without sacrificing range.";
|
|
493
|
+
readonly tags: readonly [];
|
|
494
|
+
readonly properties: {
|
|
495
|
+
readonly beta: true;
|
|
496
|
+
readonly info: "https://huggingface.co/Lykon/DreamShaper";
|
|
497
|
+
};
|
|
498
|
+
}, {
|
|
499
|
+
readonly id: "6d52253a-b731-4a03-b203-cde2d4fae871";
|
|
500
|
+
readonly source: 1;
|
|
501
|
+
readonly name: "@cf/stabilityai/stable-diffusion-xl-base-1.0";
|
|
502
|
+
readonly description: "Diffusion-based text-to-image generative model by Stability AI. Generates and modify images based on text prompts.";
|
|
503
|
+
readonly tags: readonly [];
|
|
504
|
+
readonly properties: {
|
|
505
|
+
readonly beta: true;
|
|
506
|
+
readonly info: "https://stability.ai/stable-diffusion";
|
|
507
|
+
readonly terms: "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/LICENSE.md";
|
|
508
|
+
};
|
|
509
|
+
}, {
|
|
510
|
+
readonly id: "19547f04-7a6a-4f87-bf2c-f5e32fb12dc5";
|
|
511
|
+
readonly source: 1;
|
|
512
|
+
readonly name: "@cf/runwayml/stable-diffusion-v1-5-img2img";
|
|
513
|
+
readonly description: "Stable Diffusion is a latent text-to-image diffusion model capable of generating photo-realistic images. Img2img generate a new image from an input image with Stable Diffusion. ";
|
|
514
|
+
readonly tags: readonly [];
|
|
515
|
+
readonly properties: {
|
|
516
|
+
readonly beta: true;
|
|
517
|
+
readonly info: "https://huggingface.co/runwayml/stable-diffusion-v1-5";
|
|
518
|
+
readonly terms: "https://github.com/runwayml/stable-diffusion/blob/main/LICENSE";
|
|
519
|
+
};
|
|
520
|
+
}];
|
|
521
|
+
};
|
|
522
|
+
readonly 'Image Classification': {
|
|
523
|
+
readonly id: "00cd182b-bf30-4fc4-8481-84a3ab349657";
|
|
524
|
+
readonly description: "Image classification models take an image input and assigns it labels or classes.";
|
|
525
|
+
readonly models: readonly [{
|
|
526
|
+
readonly id: "7f9a76e1-d120-48dd-a565-101d328bbb02";
|
|
527
|
+
readonly source: 1;
|
|
528
|
+
readonly name: "@cf/microsoft/resnet-50";
|
|
529
|
+
readonly description: "50 layers deep image classification CNN trained on more than 1M images from ImageNet";
|
|
530
|
+
readonly tags: readonly [];
|
|
531
|
+
readonly properties: {
|
|
532
|
+
readonly beta: true;
|
|
533
|
+
readonly info: "https://www.microsoft.com/en-us/research/blog/microsoft-vision-model-resnet-50-combines-web-scale-data-and-multi-task-learning-to-achieve-state-of-the-art/";
|
|
534
|
+
};
|
|
535
|
+
}];
|
|
536
|
+
};
|
|
537
|
+
readonly Translation: {
|
|
538
|
+
readonly id: "f57d07cb-9087-487a-bbbf-bc3e17fecc4b";
|
|
539
|
+
readonly description: "Translation models convert a sequence of text from one language to another.";
|
|
540
|
+
readonly models: readonly [{
|
|
541
|
+
readonly id: "617e7ec3-bf8d-4088-a863-4f89582d91b5";
|
|
542
|
+
readonly source: 1;
|
|
543
|
+
readonly name: "@cf/meta/m2m100-1.2b";
|
|
544
|
+
readonly description: "Multilingual encoder-decoder (seq-to-seq) model trained for Many-to-Many multilingual translation";
|
|
545
|
+
readonly tags: readonly [];
|
|
546
|
+
readonly properties: {
|
|
547
|
+
readonly beta: true;
|
|
548
|
+
readonly info: "https://github.com/facebookresearch/fairseq/tree/main/examples/m2m_100";
|
|
549
|
+
readonly languages: "english, chinese, french, spanish, arabic, russian, german, japanese, portuguese, hindi";
|
|
550
|
+
readonly terms: "https://github.com/facebookresearch/fairseq/blob/main/LICENSE";
|
|
551
|
+
};
|
|
552
|
+
}];
|
|
553
|
+
};
|
|
554
|
+
readonly 'Text Embeddings': {
|
|
555
|
+
readonly id: "0137cdcf-162a-4108-94f2-1ca59e8c65ee";
|
|
556
|
+
readonly description: "Feature extraction models transform raw data into numerical features that can be processed while preserving the information in the original dataset. These models are ideal as part of building vector search applications or Retrieval Augmented Generation workflows with Large Language Models (LLM).";
|
|
557
|
+
readonly models: readonly [{
|
|
558
|
+
readonly id: "57fbd08a-a4c4-411c-910d-b9459ff36c20";
|
|
559
|
+
readonly source: 1;
|
|
560
|
+
readonly name: "@cf/baai/bge-small-en-v1.5";
|
|
561
|
+
readonly description: "BAAI general embedding (bge) models transform any given text into a compact vector";
|
|
562
|
+
readonly tags: readonly [];
|
|
563
|
+
readonly properties: {
|
|
564
|
+
readonly beta: true;
|
|
565
|
+
readonly info: "https://huggingface.co/BAAI/bge-base-en-v1.5";
|
|
566
|
+
readonly max_input_tokens: 512;
|
|
567
|
+
readonly output_dimensions: 384;
|
|
568
|
+
};
|
|
569
|
+
}, {
|
|
570
|
+
readonly id: "429b9e8b-d99e-44de-91ad-706cf8183658";
|
|
571
|
+
readonly source: 1;
|
|
572
|
+
readonly name: "@cf/baai/bge-base-en-v1.5";
|
|
573
|
+
readonly description: "BAAI general embedding (bge) models transform any given text into a compact vector";
|
|
574
|
+
readonly tags: readonly [];
|
|
575
|
+
readonly properties: {
|
|
576
|
+
readonly beta: true;
|
|
577
|
+
readonly info: "https://huggingface.co/BAAI/bge-base-en-v1.5";
|
|
578
|
+
readonly max_input_tokens: 512;
|
|
579
|
+
readonly output_dimensions: 768;
|
|
580
|
+
};
|
|
581
|
+
}, {
|
|
582
|
+
readonly id: "01bc2fb0-4bca-4598-b985-d2584a3f46c0";
|
|
583
|
+
readonly source: 1;
|
|
584
|
+
readonly name: "@cf/baai/bge-large-en-v1.5";
|
|
585
|
+
readonly description: "BAAI general embedding (bge) models transform any given text into a compact vector";
|
|
586
|
+
readonly tags: readonly [];
|
|
587
|
+
readonly properties: {
|
|
588
|
+
readonly beta: true;
|
|
589
|
+
readonly info: "https://huggingface.co/BAAI/bge-base-en-v1.5";
|
|
590
|
+
readonly max_input_tokens: 512;
|
|
591
|
+
readonly output_dimensions: 1024;
|
|
592
|
+
};
|
|
593
|
+
}];
|
|
594
|
+
};
|
|
595
|
+
readonly Summarization: {
|
|
596
|
+
readonly id: "6f4e65d8-da0f-40d2-9aa4-db582a5a04fd";
|
|
597
|
+
readonly description: "Summarization is the task of producing a shorter version of a document while preserving its important information. Some models can extract text from the original input, while other models can generate entirely new text.";
|
|
598
|
+
readonly models: readonly [{
|
|
599
|
+
readonly id: "19bd38eb-bcda-4e53-bec2-704b4689b43a";
|
|
600
|
+
readonly source: 1;
|
|
601
|
+
readonly name: "@cf/facebook/bart-large-cnn";
|
|
602
|
+
readonly description: "BART is a transformer encoder-encoder (seq2seq) model with a bidirectional (BERT-like) encoder and an autoregressive (GPT-like) decoder. You can use this model for text summarization.";
|
|
603
|
+
readonly tags: readonly [];
|
|
604
|
+
readonly properties: {
|
|
605
|
+
readonly beta: true;
|
|
606
|
+
};
|
|
607
|
+
}];
|
|
608
|
+
};
|
|
609
|
+
};
|
|
610
|
+
readonly loras: readonly [{
|
|
611
|
+
readonly public: true;
|
|
612
|
+
readonly id: "39fb185c-762a-4633-a2ad-7a4462940608";
|
|
613
|
+
readonly name: "cf-public-magicoder";
|
|
614
|
+
readonly description: "LoRA adapter that enables Mistral to generate code";
|
|
615
|
+
readonly created_at: "2024-05-08 02:23:55.897";
|
|
616
|
+
readonly modified_at: "2024-05-08 02:23:55.897";
|
|
617
|
+
readonly model: "@cf/mistral/mistral-7b-instruct-v0.2-lora";
|
|
618
|
+
}, {
|
|
619
|
+
readonly public: true;
|
|
620
|
+
readonly id: "911a83cf-d947-4c96-b4d2-a86c2c6d2b7f";
|
|
621
|
+
readonly name: "cf-public-cnn-summarization";
|
|
622
|
+
readonly description: "LoRA adapter that enables Mistral to summarize articles. https://huggingface.co/predibase/cnn";
|
|
623
|
+
readonly created_at: "2024-05-09 02:11:12.386";
|
|
624
|
+
readonly modified_at: "2024-05-09 02:11:12.386";
|
|
625
|
+
readonly model: "@cf/mistral/mistral-7b-instruct-v0.2-lora";
|
|
626
|
+
}, {
|
|
627
|
+
readonly public: true;
|
|
628
|
+
readonly id: "c0b52d28-530b-4751-b7d9-afbdb4795990";
|
|
629
|
+
readonly name: "cf-public-jigsaw-classification";
|
|
630
|
+
readonly description: "LoRA adapter that enables Mistral to detect and classify toxic comments. https://huggingface.co/predibase/jigsaw";
|
|
631
|
+
readonly created_at: "2024-05-09 02:19:48.750";
|
|
632
|
+
readonly modified_at: "2024-05-09 02:19:48.750";
|
|
633
|
+
readonly model: "@cf/mistral/mistral-7b-instruct-v0.2-lora";
|
|
634
|
+
}];
|
|
635
|
+
};
|