@chainfuse/types 0.1.2 → 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/super-ai/index.d.ts
CHANGED
|
@@ -55,7 +55,6 @@ export declare const possibilities_mc_generic: readonly [...{
|
|
|
55
55
|
};
|
|
56
56
|
}];
|
|
57
57
|
export type type_mc_generic = aiFunctionProviders;
|
|
58
|
-
export declare const default_mc_generic: type_mc_generic;
|
|
59
58
|
export declare const possibilities_mc_summary: readonly [...{
|
|
60
59
|
name: "Azure_OpenAi_Gpt3" | "Azure_OpenAi_Gpt4o_mini" | "Azure_OpenAi_Gpt4" | "Azure_OpenAi_Gpt4o";
|
|
61
60
|
}[], {
|
|
@@ -69,7 +68,6 @@ export declare const possibilities_mc_summary: readonly [...{
|
|
|
69
68
|
};
|
|
70
69
|
}];
|
|
71
70
|
export type type_mc_summary = aiProviders<'Summarization'>;
|
|
72
|
-
export declare const default_mc_summary: type_mc_summary;
|
|
73
71
|
export declare const possibilities_mc_extraction: readonly [...{
|
|
74
72
|
name: "Azure_OpenAi_Gpt3" | "Azure_OpenAi_Gpt4o_mini" | "Azure_OpenAi_Gpt4" | "Azure_OpenAi_Gpt4o";
|
|
75
73
|
}[], {
|
|
@@ -85,7 +83,6 @@ export declare const possibilities_mc_extraction: readonly [...{
|
|
|
85
83
|
};
|
|
86
84
|
}];
|
|
87
85
|
export type type_mc_extraction = aiFunctionProviders;
|
|
88
|
-
export declare const default_mc_extraction: type_mc_extraction;
|
|
89
86
|
export declare const possibilities_mc_tagging: readonly [...{
|
|
90
87
|
name: "Azure_OpenAi_Gpt3" | "Azure_OpenAi_Gpt4o_mini" | "Azure_OpenAi_Gpt4" | "Azure_OpenAi_Gpt4o";
|
|
91
88
|
}[], {
|
|
@@ -101,7 +98,6 @@ export declare const possibilities_mc_tagging: readonly [...{
|
|
|
101
98
|
};
|
|
102
99
|
}];
|
|
103
100
|
export type type_mc_tagging = aiFunctionProviders;
|
|
104
|
-
export declare const default_mc_tagging: type_mc_tagging;
|
|
105
101
|
export declare const possibilities_mc_sentiment: readonly [...{
|
|
106
102
|
name: "Azure_OpenAi_Gpt3" | "Azure_OpenAi_Gpt4o_mini" | "Azure_OpenAi_Gpt4" | "Azure_OpenAi_Gpt4o";
|
|
107
103
|
}[], {
|
|
@@ -117,7 +113,6 @@ export declare const possibilities_mc_sentiment: readonly [...{
|
|
|
117
113
|
};
|
|
118
114
|
}];
|
|
119
115
|
export type type_mc_sentiment = aiFunctionProviders;
|
|
120
|
-
export declare const default_mc_sentiment: type_mc_sentiment;
|
|
121
116
|
export declare const possibilities_mc_safety: readonly [...{
|
|
122
117
|
name: "Azure_OpenAi_Gpt3" | "Azure_OpenAi_Gpt4o_mini" | "Azure_OpenAi_Gpt4" | "Azure_OpenAi_Gpt4o";
|
|
123
118
|
}[], {
|
|
@@ -164,6 +159,16 @@ export declare const possibilities_mc_safety: readonly [...{
|
|
|
164
159
|
readonly info: "https://llama.meta.com";
|
|
165
160
|
readonly terms: "https://llama.meta.com/llama3/license/#";
|
|
166
161
|
};
|
|
162
|
+
}, {
|
|
163
|
+
readonly id: "d9dc8363-66f4-4bb0-8641-464ee7bfc131";
|
|
164
|
+
readonly source: 1;
|
|
165
|
+
readonly name: "@cf/meta/llama-3.2-3b-instruct";
|
|
166
|
+
readonly description: "The Llama 3.2 instruction-tuned text only models are optimized for multilingual dialogue use cases, including agentic retrieval and summarization tasks.";
|
|
167
|
+
readonly tags: readonly [];
|
|
168
|
+
readonly properties: {
|
|
169
|
+
readonly beta: true;
|
|
170
|
+
readonly terms: "https://github.com/meta-llama/llama-models/blob/main/models/llama3_2/LICENSE";
|
|
171
|
+
};
|
|
167
172
|
}, {
|
|
168
173
|
readonly id: "d9b7a55c-cefa-4208-8ab3-11497a2b046c";
|
|
169
174
|
readonly source: 2;
|
|
@@ -294,6 +299,16 @@ export declare const possibilities_mc_safety: readonly [...{
|
|
|
294
299
|
readonly beta: true;
|
|
295
300
|
readonly info: "https://huggingface.co/qwen/qwen1.5-7b-chat-awq";
|
|
296
301
|
};
|
|
302
|
+
}, {
|
|
303
|
+
readonly id: "906a57fd-b018-4d6c-a43e-a296d4cc5839";
|
|
304
|
+
readonly source: 1;
|
|
305
|
+
readonly name: "@cf/meta/llama-3.2-1b-instruct";
|
|
306
|
+
readonly description: "The Llama 3.2 instruction-tuned text only models are optimized for multilingual dialogue use cases, including agentic retrieval and summarization tasks.";
|
|
307
|
+
readonly tags: readonly [];
|
|
308
|
+
readonly properties: {
|
|
309
|
+
readonly beta: true;
|
|
310
|
+
readonly terms: "https://github.com/meta-llama/llama-models/blob/main/models/llama3_2/LICENSE";
|
|
311
|
+
};
|
|
297
312
|
}, {
|
|
298
313
|
readonly id: "85c5a3c6-24b0-45e7-b23a-023182578822";
|
|
299
314
|
readonly source: 2;
|
|
@@ -436,6 +451,16 @@ export declare const possibilities_mc_safety: readonly [...{
|
|
|
436
451
|
readonly info: "https://llama.meta.com";
|
|
437
452
|
readonly terms: "https://llama.meta.com/llama3/license/#";
|
|
438
453
|
};
|
|
454
|
+
}, {
|
|
455
|
+
readonly id: "2cbc033b-ded8-4e02-bbb2-47cf05d5cfe5";
|
|
456
|
+
readonly source: 1;
|
|
457
|
+
readonly name: "@cf/meta/llama-3.2-11b-vision-instruct";
|
|
458
|
+
readonly description: " The Llama 3.2-Vision instruction-tuned models are optimized for visual recognition, image reasoning, captioning, and answering general questions about an image.";
|
|
459
|
+
readonly tags: readonly [];
|
|
460
|
+
readonly properties: {
|
|
461
|
+
readonly beta: true;
|
|
462
|
+
readonly terms: "https://github.com/meta-llama/llama-models/blob/main/models/llama3_2/LICENSE";
|
|
463
|
+
};
|
|
439
464
|
}, {
|
|
440
465
|
readonly id: "1dc9e589-df6b-4e66-ac9f-ceff42d64983";
|
|
441
466
|
readonly source: 1;
|
|
@@ -501,7 +526,6 @@ export declare const possibilities_mc_safety: readonly [...{
|
|
|
501
526
|
};
|
|
502
527
|
}];
|
|
503
528
|
export type type_mc_safety = aiProviders<'Text Generation'>;
|
|
504
|
-
export declare const default_mc_safety: type_mc_safety;
|
|
505
529
|
export declare const possibilities_mc_embedding: readonly [...{
|
|
506
530
|
name: "Azure_OpenAi_Embed3_Large" | "Azure_OpenAi_Embed3_Small";
|
|
507
531
|
}[], {
|
|
@@ -542,5 +566,4 @@ export declare const possibilities_mc_embedding: readonly [...{
|
|
|
542
566
|
};
|
|
543
567
|
}];
|
|
544
568
|
export type type_mc_embedding = aiEmbeddingProviders;
|
|
545
|
-
export declare const default_mc_embedding: type_mc_embedding;
|
|
546
569
|
export {};
|
package/dist/super-ai/index.js
CHANGED
|
@@ -19,16 +19,9 @@ export const enabledCloudflareLlmFunctionProviders = workersAiCatalog.modelGroup
|
|
|
19
19
|
const possibilities_base = [...Object.values(enabledAzureLlmProviders)];
|
|
20
20
|
const possibilities_embeddings = [...Object.values(enabledAzureLlmEmbeddingProviders)];
|
|
21
21
|
export const possibilities_mc_generic = [...possibilities_base.map((modelName) => ({ name: modelName })), ...workersAiCatalog.modelGroups['Text Generation'].models.filter((model) => 'function_calling' in model.properties && model.properties.function_calling)];
|
|
22
|
-
export const default_mc_generic = '@hf/nousresearch/hermes-2-pro-mistral-7b';
|
|
23
22
|
export const possibilities_mc_summary = [...possibilities_base.map((modelName) => ({ name: modelName })), ...workersAiCatalog.modelGroups.Summarization.models];
|
|
24
|
-
export const default_mc_summary = enabledAzureLlmProviders.Azure_OpenAi_Gpt3;
|
|
25
23
|
export const possibilities_mc_extraction = [...possibilities_base.map((modelName) => ({ name: modelName })), ...workersAiCatalog.modelGroups['Text Generation'].models.filter((model) => 'function_calling' in model.properties && model.properties.function_calling)];
|
|
26
|
-
export const default_mc_extraction = '@hf/nousresearch/hermes-2-pro-mistral-7b';
|
|
27
24
|
export const possibilities_mc_tagging = [...possibilities_base.map((modelName) => ({ name: modelName })), ...workersAiCatalog.modelGroups['Text Generation'].models.filter((model) => 'function_calling' in model.properties && model.properties.function_calling)];
|
|
28
|
-
export const default_mc_tagging = enabledAzureLlmProviders.Azure_OpenAi_Gpt4o;
|
|
29
25
|
export const possibilities_mc_sentiment = [...possibilities_base.map((modelName) => ({ name: modelName })), ...workersAiCatalog.modelGroups['Text Generation'].models.filter((model) => 'function_calling' in model.properties && model.properties.function_calling)];
|
|
30
|
-
export const default_mc_sentiment = '@hf/nousresearch/hermes-2-pro-mistral-7b';
|
|
31
26
|
export const possibilities_mc_safety = [...possibilities_base.map((modelName) => ({ name: modelName })), ...workersAiCatalog.modelGroups['Text Generation'].models];
|
|
32
|
-
export const default_mc_safety = '@hf/thebloke/llamaguard-7b-awq';
|
|
33
27
|
export const possibilities_mc_embedding = [...possibilities_embeddings.map((modelName) => ({ name: modelName })), ...workersAiCatalog.modelGroups['Text Embeddings'].models];
|
|
34
|
-
export const default_mc_embedding = '@cf/baai/bge-large-en-v1.5';
|
|
@@ -47,6 +47,16 @@ export declare const workersAiCatalog: {
|
|
|
47
47
|
readonly info: "https://llama.meta.com";
|
|
48
48
|
readonly terms: "https://llama.meta.com/llama3/license/#";
|
|
49
49
|
};
|
|
50
|
+
}, {
|
|
51
|
+
readonly id: "d9dc8363-66f4-4bb0-8641-464ee7bfc131";
|
|
52
|
+
readonly source: 1;
|
|
53
|
+
readonly name: "@cf/meta/llama-3.2-3b-instruct";
|
|
54
|
+
readonly description: "The Llama 3.2 instruction-tuned text only models are optimized for multilingual dialogue use cases, including agentic retrieval and summarization tasks.";
|
|
55
|
+
readonly tags: readonly [];
|
|
56
|
+
readonly properties: {
|
|
57
|
+
readonly beta: true;
|
|
58
|
+
readonly terms: "https://github.com/meta-llama/llama-models/blob/main/models/llama3_2/LICENSE";
|
|
59
|
+
};
|
|
50
60
|
}, {
|
|
51
61
|
readonly id: "d9b7a55c-cefa-4208-8ab3-11497a2b046c";
|
|
52
62
|
readonly source: 2;
|
|
@@ -177,6 +187,16 @@ export declare const workersAiCatalog: {
|
|
|
177
187
|
readonly beta: true;
|
|
178
188
|
readonly info: "https://huggingface.co/qwen/qwen1.5-7b-chat-awq";
|
|
179
189
|
};
|
|
190
|
+
}, {
|
|
191
|
+
readonly id: "906a57fd-b018-4d6c-a43e-a296d4cc5839";
|
|
192
|
+
readonly source: 1;
|
|
193
|
+
readonly name: "@cf/meta/llama-3.2-1b-instruct";
|
|
194
|
+
readonly description: "The Llama 3.2 instruction-tuned text only models are optimized for multilingual dialogue use cases, including agentic retrieval and summarization tasks.";
|
|
195
|
+
readonly tags: readonly [];
|
|
196
|
+
readonly properties: {
|
|
197
|
+
readonly beta: true;
|
|
198
|
+
readonly terms: "https://github.com/meta-llama/llama-models/blob/main/models/llama3_2/LICENSE";
|
|
199
|
+
};
|
|
180
200
|
}, {
|
|
181
201
|
readonly id: "85c5a3c6-24b0-45e7-b23a-023182578822";
|
|
182
202
|
readonly source: 2;
|
|
@@ -319,6 +339,16 @@ export declare const workersAiCatalog: {
|
|
|
319
339
|
readonly info: "https://llama.meta.com";
|
|
320
340
|
readonly terms: "https://llama.meta.com/llama3/license/#";
|
|
321
341
|
};
|
|
342
|
+
}, {
|
|
343
|
+
readonly id: "2cbc033b-ded8-4e02-bbb2-47cf05d5cfe5";
|
|
344
|
+
readonly source: 1;
|
|
345
|
+
readonly name: "@cf/meta/llama-3.2-11b-vision-instruct";
|
|
346
|
+
readonly description: " The Llama 3.2-Vision instruction-tuned models are optimized for visual recognition, image reasoning, captioning, and answering general questions about an image.";
|
|
347
|
+
readonly tags: readonly [];
|
|
348
|
+
readonly properties: {
|
|
349
|
+
readonly beta: true;
|
|
350
|
+
readonly terms: "https://github.com/meta-llama/llama-models/blob/main/models/llama3_2/LICENSE";
|
|
351
|
+
};
|
|
322
352
|
}, {
|
|
323
353
|
readonly id: "1dc9e589-df6b-4e66-ac9f-ceff42d64983";
|
|
324
354
|
readonly source: 1;
|
|
@@ -475,6 +505,15 @@ export declare const workersAiCatalog: {
|
|
|
475
505
|
readonly info: "https://huggingface.co/runwayml/stable-diffusion-inpainting";
|
|
476
506
|
readonly terms: "https://github.com/runwayml/stable-diffusion/blob/main/LICENSE";
|
|
477
507
|
};
|
|
508
|
+
}, {
|
|
509
|
+
readonly id: "9e087485-23dc-47fa-997d-f5bfafc0c7cc";
|
|
510
|
+
readonly source: 1;
|
|
511
|
+
readonly name: "@cf/black-forest-labs/flux-1-schnell";
|
|
512
|
+
readonly description: "FLUX.1 [schnell] is a 12 billion parameter rectified flow transformer capable of generating images from text descriptions. ";
|
|
513
|
+
readonly tags: readonly [];
|
|
514
|
+
readonly properties: {
|
|
515
|
+
readonly beta: true;
|
|
516
|
+
};
|
|
478
517
|
}, {
|
|
479
518
|
readonly id: "7f797b20-3eb0-44fd-b571-6cbbaa3c423b";
|
|
480
519
|
readonly source: 1;
|
|
@@ -52,6 +52,17 @@ export const workersAiCatalog = {
|
|
|
52
52
|
terms: 'https://llama.meta.com/llama3/license/#',
|
|
53
53
|
},
|
|
54
54
|
},
|
|
55
|
+
{
|
|
56
|
+
id: 'd9dc8363-66f4-4bb0-8641-464ee7bfc131',
|
|
57
|
+
source: 1,
|
|
58
|
+
name: '@cf/meta/llama-3.2-3b-instruct',
|
|
59
|
+
description: 'The Llama 3.2 instruction-tuned text only models are optimized for multilingual dialogue use cases, including agentic retrieval and summarization tasks.',
|
|
60
|
+
tags: [],
|
|
61
|
+
properties: {
|
|
62
|
+
beta: true,
|
|
63
|
+
terms: 'https://github.com/meta-llama/llama-models/blob/main/models/llama3_2/LICENSE',
|
|
64
|
+
},
|
|
65
|
+
},
|
|
55
66
|
{
|
|
56
67
|
id: 'd9b7a55c-cefa-4208-8ab3-11497a2b046c',
|
|
57
68
|
source: 2,
|
|
@@ -195,6 +206,17 @@ export const workersAiCatalog = {
|
|
|
195
206
|
info: 'https://huggingface.co/qwen/qwen1.5-7b-chat-awq',
|
|
196
207
|
},
|
|
197
208
|
},
|
|
209
|
+
{
|
|
210
|
+
id: '906a57fd-b018-4d6c-a43e-a296d4cc5839',
|
|
211
|
+
source: 1,
|
|
212
|
+
name: '@cf/meta/llama-3.2-1b-instruct',
|
|
213
|
+
description: 'The Llama 3.2 instruction-tuned text only models are optimized for multilingual dialogue use cases, including agentic retrieval and summarization tasks.',
|
|
214
|
+
tags: [],
|
|
215
|
+
properties: {
|
|
216
|
+
beta: true,
|
|
217
|
+
terms: 'https://github.com/meta-llama/llama-models/blob/main/models/llama3_2/LICENSE',
|
|
218
|
+
},
|
|
219
|
+
},
|
|
198
220
|
{
|
|
199
221
|
id: '85c5a3c6-24b0-45e7-b23a-023182578822',
|
|
200
222
|
source: 2,
|
|
@@ -351,6 +373,17 @@ export const workersAiCatalog = {
|
|
|
351
373
|
terms: 'https://llama.meta.com/llama3/license/#',
|
|
352
374
|
},
|
|
353
375
|
},
|
|
376
|
+
{
|
|
377
|
+
id: '2cbc033b-ded8-4e02-bbb2-47cf05d5cfe5',
|
|
378
|
+
source: 1,
|
|
379
|
+
name: '@cf/meta/llama-3.2-11b-vision-instruct',
|
|
380
|
+
description: ' The Llama 3.2-Vision instruction-tuned models are optimized for visual recognition, image reasoning, captioning, and answering general questions about an image.',
|
|
381
|
+
tags: [],
|
|
382
|
+
properties: {
|
|
383
|
+
beta: true,
|
|
384
|
+
terms: 'https://github.com/meta-llama/llama-models/blob/main/models/llama3_2/LICENSE',
|
|
385
|
+
},
|
|
386
|
+
},
|
|
354
387
|
{
|
|
355
388
|
id: '1dc9e589-df6b-4e66-ac9f-ceff42d64983',
|
|
356
389
|
source: 1,
|
|
@@ -525,6 +558,16 @@ export const workersAiCatalog = {
|
|
|
525
558
|
terms: 'https://github.com/runwayml/stable-diffusion/blob/main/LICENSE',
|
|
526
559
|
},
|
|
527
560
|
},
|
|
561
|
+
{
|
|
562
|
+
id: '9e087485-23dc-47fa-997d-f5bfafc0c7cc',
|
|
563
|
+
source: 1,
|
|
564
|
+
name: '@cf/black-forest-labs/flux-1-schnell',
|
|
565
|
+
description: 'FLUX.1 [schnell] is a 12 billion parameter rectified flow transformer capable of generating images from text descriptions. ',
|
|
566
|
+
tags: [],
|
|
567
|
+
properties: {
|
|
568
|
+
beta: true,
|
|
569
|
+
},
|
|
570
|
+
},
|
|
528
571
|
{
|
|
529
572
|
id: '7f797b20-3eb0-44fd-b571-6cbbaa3c423b',
|
|
530
573
|
source: 1,
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@chainfuse/types",
|
|
3
|
-
"version": "0.
|
|
3
|
+
"version": "1.0.0",
|
|
4
4
|
"description": "",
|
|
5
5
|
"author": "ChainFuse",
|
|
6
6
|
"homepage": "https://github.com/ChainFuse/packages/tree/main/packages/types#readme",
|
|
@@ -80,8 +80,8 @@
|
|
|
80
80
|
},
|
|
81
81
|
"prettier": "@demosjarco/prettier-config",
|
|
82
82
|
"devDependencies": {
|
|
83
|
-
"@cloudflare/workers-types": "^4.
|
|
83
|
+
"@cloudflare/workers-types": "^4.20240925.0",
|
|
84
84
|
"@types/json-schema": "^7.0.15"
|
|
85
85
|
},
|
|
86
|
-
"gitHead": "
|
|
86
|
+
"gitHead": "39eacc48c44f8bcb3f2599404af48e7f20db7e33"
|
|
87
87
|
}
|
|
@@ -1,15 +0,0 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* It is used to carry over the types when using the `Object.values()` method.
|
|
3
|
-
*/
|
|
4
|
-
export type ObjectValues<T> = {
|
|
5
|
-
[K in keyof T]: T[K];
|
|
6
|
-
} extends Record<string, infer U> ? U[] : never;
|
|
7
|
-
export type ExtractKeysWithPrefix<T, Prefix extends string> = {
|
|
8
|
-
[K in keyof T]: K extends `${Prefix}${string}` ? K : never;
|
|
9
|
-
}[keyof T];
|
|
10
|
-
export type FilterKeysWithPrefix<T, Prefix extends string> = {
|
|
11
|
-
[K in keyof T as K extends `${Prefix}${string}` ? K : never]: T[K];
|
|
12
|
-
};
|
|
13
|
-
export type FilterKeysWithPrefixAndPrepend<T, Prefix extends string, Prepend extends string> = {
|
|
14
|
-
[K in keyof T as K extends `${Prefix}${string}` ? `${Prepend}${K}` : never]: T[K];
|
|
15
|
-
};
|