@chainfuse/types 1.3.0 → 1.4.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -1,4 +1,4 @@
1
- [![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/ChainFuse/packages/badge)](https://securityscorecards.dev/viewer/?uri=github.com/ChainFuse/packages)[![Socket Badge](https://socket.dev/api/badge/npm/package/@chainfuse/types)](https://socket.dev/npm/package/@chainFuse/types)
1
+ [![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/ChainFuse/packages/badge)](https://securityscorecards.dev/viewer/?uri=github.com/ChainFuse/packages)[![Socket Badge](https://socket.dev/api/badge/npm/package/@chainfuse/types)](https://socket.dev/npm/package/@chainfuse/types)
2
2
 
3
3
  ![NPM Downloads](https://img.shields.io/npm/dw/@chainfuse/types)![npm bundle size](https://img.shields.io/bundlephobia/min/@chainfuse/types)![NPM Unpacked Size](https://img.shields.io/npm/unpacked-size/@chainfuse/types)
4
4
 
@@ -0,0 +1,59 @@
1
+ import { type cloudflareFilteredModelPossibilities, type cloudflareModelPossibilities, type cloudflareModelTypes } from '../super-ai/index.js';
2
+ export interface RawCoordinate {
3
+ lat: string;
4
+ lon: string;
5
+ }
6
+ export type CloudflareModelsEnum<M extends cloudflareModelTypes = cloudflareModelTypes> = {
7
+ [K in cloudflareModelPossibilities<M>]: `workersai:${K}`;
8
+ };
9
+ export type CloudflareFunctionModelsEnum = {
10
+ [K in cloudflareFilteredModelPossibilities<'Text Generation', 'function_calling', true>]: `workersai:${K}`;
11
+ };
12
+ export type AzureChatModels = 'gpt-35-turbo' | 'gpt-4-turbo' | 'gpt-4o-mini' | 'gpt-4o';
13
+ export type AzureEmbeddingModels = 'text-embedding-3-small' | 'text-embedding-3-large';
14
+ export declare namespace AiModels {
15
+ namespace LanguageModels {
16
+ enum OpenAi {
17
+ gpt3 = "openai:gpt-3.5-turbo",
18
+ gpt4o_mini = "openai:gpt-4o-mini",
19
+ o1_mini = "openai:o1-mini",
20
+ gpt4 = "openai:gpt-4-turbo",
21
+ gpt4o = "openai:gpt-4o",
22
+ o1 = "openai:o1-preview"
23
+ }
24
+ enum Azure {
25
+ gpt3 = "azure:gpt-35-turbo",
26
+ gpt4o_mini = "azure:gpt-4o-mini",
27
+ gpt4 = "azure:gpt-4-turbo",
28
+ gpt4o = "azure:gpt-4o"
29
+ }
30
+ enum Anthropic {
31
+ haiku = "anthropic:claude-3-5-haiku-latest",
32
+ sonnet = "anthropic:claude-3-5-sonnet-latest"
33
+ }
34
+ const Cloudflare: Readonly<CloudflareModelsEnum<"Text Generation">>;
35
+ const CloudflareFunctions: Readonly<CloudflareFunctionModelsEnum>;
36
+ enum GoogleGenerativeAi {
37
+ gemini_flash_beta = "google.generative-ai:gemini-2.0-flash-exp",
38
+ gemini_flash_beta_search = "google.generative-ai:gemini-2.0-flash-exp:search",
39
+ gemini_flash = "google.generative-ai:gemini-1.5-flash",
40
+ gemini_flash_search = "google.generative-ai:gemini-1.5-flash:search",
41
+ gemini_pro = "google.generative-ai:gemini-1.5-pro",
42
+ gemini_pro_search = "google.generative-ai:gemini-1.5-pro:search"
43
+ }
44
+ }
45
+ namespace TextEmbeddingModels {
46
+ enum OpenAi {
47
+ te3_large = "openai:text-embedding-3-large",
48
+ te3_small = "openai:text-embedding-3-small"
49
+ }
50
+ enum Azure {
51
+ te3_large = "azure:text-embedding-3-large",
52
+ te3_small = "azure:text-embedding-3-small"
53
+ }
54
+ const Cloudflare: Readonly<CloudflareModelsEnum<"Text Embeddings">>;
55
+ enum GoogleGenerativeAi {
56
+ te4 = "google.generative-ai:text-embedding-004"
57
+ }
58
+ }
59
+ }
@@ -0,0 +1,59 @@
1
+ import { enabledCloudflareLlmEmbeddingProviders, enabledCloudflareLlmFunctionProviders, enabledCloudflareLlmProviders } from '../super-ai/index.js';
2
+ export var AiModels;
3
+ (function (AiModels) {
4
+ let LanguageModels;
5
+ (function (LanguageModels) {
6
+ let OpenAi;
7
+ (function (OpenAi) {
8
+ OpenAi["gpt3"] = "openai:gpt-3.5-turbo";
9
+ OpenAi["gpt4o_mini"] = "openai:gpt-4o-mini";
10
+ OpenAi["o1_mini"] = "openai:o1-mini";
11
+ OpenAi["gpt4"] = "openai:gpt-4-turbo";
12
+ OpenAi["gpt4o"] = "openai:gpt-4o";
13
+ OpenAi["o1"] = "openai:o1-preview";
14
+ })(OpenAi = LanguageModels.OpenAi || (LanguageModels.OpenAi = {}));
15
+ let Azure;
16
+ (function (Azure) {
17
+ Azure["gpt3"] = "azure:gpt-35-turbo";
18
+ Azure["gpt4o_mini"] = "azure:gpt-4o-mini";
19
+ Azure["gpt4"] = "azure:gpt-4-turbo";
20
+ Azure["gpt4o"] = "azure:gpt-4o";
21
+ })(Azure = LanguageModels.Azure || (LanguageModels.Azure = {}));
22
+ let Anthropic;
23
+ (function (Anthropic) {
24
+ Anthropic["haiku"] = "anthropic:claude-3-5-haiku-latest";
25
+ Anthropic["sonnet"] = "anthropic:claude-3-5-sonnet-latest";
26
+ })(Anthropic = LanguageModels.Anthropic || (LanguageModels.Anthropic = {}));
27
+ // export const CloudflareSummary = Object.freeze(Object.fromEntries(enabledCloudflareLlmSummaryProviders.map((model) => [model, `workersai:${model}`])) as unknown as CloudflareModelsEnum<'Summarization'>);
28
+ // export const CloudflareClassification = Object.freeze(Object.fromEntries(enabledCloudflareLlmClassificationProviders.map((model) => [model, `workersai:${model}`])) as unknown as CloudflareModelsEnum<'Text Classification'>);
29
+ LanguageModels.Cloudflare = Object.freeze(Object.fromEntries(enabledCloudflareLlmProviders.map((model) => [model, `workersai:${model}`])));
30
+ LanguageModels.CloudflareFunctions = Object.freeze(Object.fromEntries(enabledCloudflareLlmFunctionProviders.map((model) => [model, `workersai:${model}`])));
31
+ let GoogleGenerativeAi;
32
+ (function (GoogleGenerativeAi) {
33
+ GoogleGenerativeAi["gemini_flash_beta"] = "google.generative-ai:gemini-2.0-flash-exp";
34
+ GoogleGenerativeAi["gemini_flash_beta_search"] = "google.generative-ai:gemini-2.0-flash-exp:search";
35
+ GoogleGenerativeAi["gemini_flash"] = "google.generative-ai:gemini-1.5-flash";
36
+ GoogleGenerativeAi["gemini_flash_search"] = "google.generative-ai:gemini-1.5-flash:search";
37
+ GoogleGenerativeAi["gemini_pro"] = "google.generative-ai:gemini-1.5-pro";
38
+ GoogleGenerativeAi["gemini_pro_search"] = "google.generative-ai:gemini-1.5-pro:search";
39
+ })(GoogleGenerativeAi = LanguageModels.GoogleGenerativeAi || (LanguageModels.GoogleGenerativeAi = {}));
40
+ })(LanguageModels = AiModels.LanguageModels || (AiModels.LanguageModels = {}));
41
+ let TextEmbeddingModels;
42
+ (function (TextEmbeddingModels) {
43
+ let OpenAi;
44
+ (function (OpenAi) {
45
+ OpenAi["te3_large"] = "openai:text-embedding-3-large";
46
+ OpenAi["te3_small"] = "openai:text-embedding-3-small";
47
+ })(OpenAi = TextEmbeddingModels.OpenAi || (TextEmbeddingModels.OpenAi = {}));
48
+ let Azure;
49
+ (function (Azure) {
50
+ Azure["te3_large"] = "azure:text-embedding-3-large";
51
+ Azure["te3_small"] = "azure:text-embedding-3-small";
52
+ })(Azure = TextEmbeddingModels.Azure || (TextEmbeddingModels.Azure = {}));
53
+ TextEmbeddingModels.Cloudflare = Object.freeze(Object.fromEntries(enabledCloudflareLlmEmbeddingProviders.map((model) => [model, `workersai:${model}`])));
54
+ let GoogleGenerativeAi;
55
+ (function (GoogleGenerativeAi) {
56
+ GoogleGenerativeAi["te4"] = "google.generative-ai:text-embedding-004";
57
+ })(GoogleGenerativeAi = TextEmbeddingModels.GoogleGenerativeAi || (TextEmbeddingModels.GoogleGenerativeAi = {}));
58
+ })(TextEmbeddingModels = AiModels.TextEmbeddingModels || (AiModels.TextEmbeddingModels = {}));
59
+ })(AiModels || (AiModels = {}));
@@ -43,7 +43,6 @@ export declare const workersAiCatalog: {
43
43
  readonly description: "Generation over generation, Meta Llama 3 demonstrates state-of-the-art performance on a wide range of industry benchmarks and offers new capabilities, including improved reasoning.";
44
44
  readonly tags: readonly [];
45
45
  readonly properties: {
46
- readonly beta: true;
47
46
  readonly info: "https://llama.meta.com";
48
47
  readonly terms: "https://llama.meta.com/llama3/license/#";
49
48
  };
@@ -54,7 +53,6 @@ export declare const workersAiCatalog: {
54
53
  readonly description: "The Llama 3.2 instruction-tuned text only models are optimized for multilingual dialogue use cases, including agentic retrieval and summarization tasks.";
55
54
  readonly tags: readonly [];
56
55
  readonly properties: {
57
- readonly beta: true;
58
56
  readonly terms: "https://github.com/meta-llama/llama-models/blob/main/models/llama3_2/LICENSE";
59
57
  };
60
58
  }, {
@@ -164,7 +162,6 @@ export declare const workersAiCatalog: {
164
162
  readonly description: "Llama 3.1 8B quantized to FP8 precision";
165
163
  readonly tags: readonly [];
166
164
  readonly properties: {
167
- readonly beta: true;
168
165
  readonly terms: "https://github.com/meta-llama/llama-models/blob/main/models/llama3_1/LICENSE";
169
166
  };
170
167
  }, {
@@ -194,7 +191,6 @@ export declare const workersAiCatalog: {
194
191
  readonly description: "The Llama 3.2 instruction-tuned text only models are optimized for multilingual dialogue use cases, including agentic retrieval and summarization tasks.";
195
192
  readonly tags: readonly [];
196
193
  readonly properties: {
197
- readonly beta: true;
198
194
  readonly terms: "https://github.com/meta-llama/llama-models/blob/main/models/llama3_2/LICENSE";
199
195
  };
200
196
  }, {
@@ -227,6 +223,15 @@ export declare const workersAiCatalog: {
227
223
  readonly beta: true;
228
224
  readonly lora: true;
229
225
  };
226
+ }, {
227
+ readonly id: "7a143886-c9bb-4a1c-be95-377b1973bc3b";
228
+ readonly source: 1;
229
+ readonly name: "@cf/meta/llama-3.3-70b-instruct-fp8-fast";
230
+ readonly description: "Llama 3.3 70B quantized to fp8 precision, optimized to be faster.";
231
+ readonly tags: readonly [];
232
+ readonly properties: {
233
+ readonly terms: "https://github.com/meta-llama/llama-models/blob/main/models/llama3_3/LICENSE";
234
+ };
230
235
  }, {
231
236
  readonly id: "673c56cc-8553-49a1-b179-dd549ec9209a";
232
237
  readonly source: 2;
@@ -285,7 +290,6 @@ export declare const workersAiCatalog: {
285
290
  readonly description: "The Meta Llama 3.1 collection of multilingual large language models (LLMs) is a collection of pretrained and instruction tuned generative models. The Llama 3.1 instruction tuned text only models are optimized for multilingual dialogue use cases and outperform many of the available open source and closed chat models on common industry benchmarks.";
286
291
  readonly tags: readonly [];
287
292
  readonly properties: {
288
- readonly beta: true;
289
293
  readonly terms: "https://github.com/meta-llama/llama-models/blob/main/models/llama3_1/LICENSE";
290
294
  };
291
295
  }, {
@@ -295,7 +299,6 @@ export declare const workersAiCatalog: {
295
299
  readonly description: "Quantized (int4) generative text model with 8 billion parameters from Meta.\n";
296
300
  readonly tags: readonly [];
297
301
  readonly properties: {
298
- readonly beta: true;
299
302
  readonly terms: "https://github.com/meta-llama/llama-models/blob/main/models/llama3_1/LICENSE";
300
303
  };
301
304
  }, {
@@ -335,7 +338,6 @@ export declare const workersAiCatalog: {
335
338
  readonly description: "Quantized (int4) generative text model with 8 billion parameters from Meta.";
336
339
  readonly tags: readonly [];
337
340
  readonly properties: {
338
- readonly beta: true;
339
341
  readonly info: "https://llama.meta.com";
340
342
  readonly terms: "https://llama.meta.com/llama3/license/#";
341
343
  };
@@ -346,7 +348,6 @@ export declare const workersAiCatalog: {
346
348
  readonly description: " The Llama 3.2-Vision instruction-tuned models are optimized for visual recognition, image reasoning, captioning, and answering general questions about an image.";
347
349
  readonly tags: readonly [];
348
350
  readonly properties: {
349
- readonly beta: true;
350
351
  readonly terms: "https://github.com/meta-llama/llama-models/blob/main/models/llama3_2/LICENSE";
351
352
  };
352
353
  }, {
@@ -465,6 +466,15 @@ export declare const workersAiCatalog: {
465
466
  readonly properties: {
466
467
  readonly beta: true;
467
468
  };
469
+ }, {
470
+ readonly id: "200f0812-148c-48c1-915d-fb3277a94a08";
471
+ readonly source: 1;
472
+ readonly name: "@cf/openai/whisper-large-v3-turbo";
473
+ readonly description: "Whisper is a pre-trained model for automatic speech recognition (ASR) and speech translation. ";
474
+ readonly tags: readonly [];
475
+ readonly properties: {
476
+ readonly beta: true;
477
+ };
468
478
  }];
469
479
  };
470
480
  readonly 'Image-to-Text': {
@@ -597,7 +607,7 @@ export declare const workersAiCatalog: {
597
607
  readonly id: "57fbd08a-a4c4-411c-910d-b9459ff36c20";
598
608
  readonly source: 1;
599
609
  readonly name: "@cf/baai/bge-small-en-v1.5";
600
- readonly description: "BAAI general embedding (bge) models transform any given text into a compact vector";
610
+ readonly description: "BAAI general embedding (Small) model that transforms any given text into a 384-dimensional vector";
601
611
  readonly tags: readonly [];
602
612
  readonly properties: {
603
613
  readonly beta: true;
@@ -609,7 +619,7 @@ export declare const workersAiCatalog: {
609
619
  readonly id: "429b9e8b-d99e-44de-91ad-706cf8183658";
610
620
  readonly source: 1;
611
621
  readonly name: "@cf/baai/bge-base-en-v1.5";
612
- readonly description: "BAAI general embedding (bge) models transform any given text into a compact vector";
622
+ readonly description: "BAAI general embedding (Base) model that transforms any given text into a 768-dimensional vector";
613
623
  readonly tags: readonly [];
614
624
  readonly properties: {
615
625
  readonly beta: true;
@@ -621,7 +631,7 @@ export declare const workersAiCatalog: {
621
631
  readonly id: "01bc2fb0-4bca-4598-b985-d2584a3f46c0";
622
632
  readonly source: 1;
623
633
  readonly name: "@cf/baai/bge-large-en-v1.5";
624
- readonly description: "BAAI general embedding (bge) models transform any given text into a compact vector";
634
+ readonly description: "BAAI general embedding (Large) model that transforms any given text into a 1024-dimensional vector";
625
635
  readonly tags: readonly [];
626
636
  readonly properties: {
627
637
  readonly beta: true;
@@ -47,7 +47,6 @@ export const workersAiCatalog = {
47
47
  description: 'Generation over generation, Meta Llama 3 demonstrates state-of-the-art performance on a wide range of industry benchmarks and offers new capabilities, including improved reasoning.',
48
48
  tags: [],
49
49
  properties: {
50
- beta: true,
51
50
  info: 'https://llama.meta.com',
52
51
  terms: 'https://llama.meta.com/llama3/license/#',
53
52
  },
@@ -59,7 +58,6 @@ export const workersAiCatalog = {
59
58
  description: 'The Llama 3.2 instruction-tuned text only models are optimized for multilingual dialogue use cases, including agentic retrieval and summarization tasks.',
60
59
  tags: [],
61
60
  properties: {
62
- beta: true,
63
61
  terms: 'https://github.com/meta-llama/llama-models/blob/main/models/llama3_2/LICENSE',
64
62
  },
65
63
  },
@@ -180,7 +178,6 @@ export const workersAiCatalog = {
180
178
  description: 'Llama 3.1 8B quantized to FP8 precision',
181
179
  tags: [],
182
180
  properties: {
183
- beta: true,
184
181
  terms: 'https://github.com/meta-llama/llama-models/blob/main/models/llama3_1/LICENSE',
185
182
  },
186
183
  },
@@ -213,7 +210,6 @@ export const workersAiCatalog = {
213
210
  description: 'The Llama 3.2 instruction-tuned text only models are optimized for multilingual dialogue use cases, including agentic retrieval and summarization tasks.',
214
211
  tags: [],
215
212
  properties: {
216
- beta: true,
217
213
  terms: 'https://github.com/meta-llama/llama-models/blob/main/models/llama3_2/LICENSE',
218
214
  },
219
215
  },
@@ -250,6 +246,16 @@ export const workersAiCatalog = {
250
246
  lora: true,
251
247
  },
252
248
  },
249
+ {
250
+ id: '7a143886-c9bb-4a1c-be95-377b1973bc3b',
251
+ source: 1,
252
+ name: '@cf/meta/llama-3.3-70b-instruct-fp8-fast',
253
+ description: 'Llama 3.3 70B quantized to fp8 precision, optimized to be faster.',
254
+ tags: [],
255
+ properties: {
256
+ terms: 'https://github.com/meta-llama/llama-models/blob/main/models/llama3_3/LICENSE',
257
+ },
258
+ },
253
259
  {
254
260
  id: '673c56cc-8553-49a1-b179-dd549ec9209a',
255
261
  source: 2,
@@ -313,7 +319,6 @@ export const workersAiCatalog = {
313
319
  description: 'The Meta Llama 3.1 collection of multilingual large language models (LLMs) is a collection of pretrained and instruction tuned generative models. The Llama 3.1 instruction tuned text only models are optimized for multilingual dialogue use cases and outperform many of the available open source and closed chat models on common industry benchmarks.',
314
320
  tags: [],
315
321
  properties: {
316
- beta: true,
317
322
  terms: 'https://github.com/meta-llama/llama-models/blob/main/models/llama3_1/LICENSE',
318
323
  },
319
324
  },
@@ -324,7 +329,6 @@ export const workersAiCatalog = {
324
329
  description: 'Quantized (int4) generative text model with 8 billion parameters from Meta.\n',
325
330
  tags: [],
326
331
  properties: {
327
- beta: true,
328
332
  terms: 'https://github.com/meta-llama/llama-models/blob/main/models/llama3_1/LICENSE',
329
333
  },
330
334
  },
@@ -368,7 +372,6 @@ export const workersAiCatalog = {
368
372
  description: 'Quantized (int4) generative text model with 8 billion parameters from Meta.',
369
373
  tags: [],
370
374
  properties: {
371
- beta: true,
372
375
  info: 'https://llama.meta.com',
373
376
  terms: 'https://llama.meta.com/llama3/license/#',
374
377
  },
@@ -380,7 +383,6 @@ export const workersAiCatalog = {
380
383
  description: ' The Llama 3.2-Vision instruction-tuned models are optimized for visual recognition, image reasoning, captioning, and answering general questions about an image.',
381
384
  tags: [],
382
385
  properties: {
383
- beta: true,
384
386
  terms: 'https://github.com/meta-llama/llama-models/blob/main/models/llama3_2/LICENSE',
385
387
  },
386
388
  },
@@ -513,6 +515,16 @@ export const workersAiCatalog = {
513
515
  beta: true,
514
516
  },
515
517
  },
518
+ {
519
+ id: '200f0812-148c-48c1-915d-fb3277a94a08',
520
+ source: 1,
521
+ name: '@cf/openai/whisper-large-v3-turbo',
522
+ description: 'Whisper is a pre-trained model for automatic speech recognition (ASR) and speech translation. ',
523
+ tags: [],
524
+ properties: {
525
+ beta: true,
526
+ },
527
+ },
516
528
  ],
517
529
  },
518
530
  'Image-to-Text': {
@@ -660,7 +672,7 @@ export const workersAiCatalog = {
660
672
  id: '57fbd08a-a4c4-411c-910d-b9459ff36c20',
661
673
  source: 1,
662
674
  name: '@cf/baai/bge-small-en-v1.5',
663
- description: 'BAAI general embedding (bge) models transform any given text into a compact vector',
675
+ description: 'BAAI general embedding (Small) model that transforms any given text into a 384-dimensional vector',
664
676
  tags: [],
665
677
  properties: {
666
678
  beta: true,
@@ -673,7 +685,7 @@ export const workersAiCatalog = {
673
685
  id: '429b9e8b-d99e-44de-91ad-706cf8183658',
674
686
  source: 1,
675
687
  name: '@cf/baai/bge-base-en-v1.5',
676
- description: 'BAAI general embedding (bge) models transform any given text into a compact vector',
688
+ description: 'BAAI general embedding (Base) model that transforms any given text into a 768-dimensional vector',
677
689
  tags: [],
678
690
  properties: {
679
691
  beta: true,
@@ -686,7 +698,7 @@ export const workersAiCatalog = {
686
698
  id: '01bc2fb0-4bca-4598-b985-d2584a3f46c0',
687
699
  source: 1,
688
700
  name: '@cf/baai/bge-large-en-v1.5',
689
- description: 'BAAI general embedding (bge) models transform any given text into a compact vector',
701
+ description: 'BAAI general embedding (Large) model that transforms any given text into a 1024-dimensional vector',
690
702
  tags: [],
691
703
  properties: {
692
704
  beta: true,
package/dist/index.d.ts CHANGED
@@ -1,3 +1,4 @@
1
+ export * from './ai-tools/index.js';
1
2
  export * from './d1/index.js';
2
3
  export * from './discourse/index.js';
3
4
  export * from './super-ai/index.js';
package/dist/index.js CHANGED
@@ -1,3 +1,4 @@
1
+ export * from './ai-tools/index.js';
1
2
  export * from './d1/index.js';
2
3
  export * from './discourse/index.js';
3
4
  export * from './super-ai/index.js';
@@ -1,5 +1,5 @@
1
1
  import type { JSONSchema7 } from 'json-schema';
2
- import { workersAiCatalog } from './workers-ai-catalog.js';
2
+ import { workersAiCatalog } from '../ai-tools/workers-ai-catalog.js';
3
3
  export interface Coordinate {
4
4
  lat: number;
5
5
  lon: number;
@@ -155,7 +155,6 @@ export declare const possibilities_mc_safety: readonly [...{
155
155
  readonly description: "Generation over generation, Meta Llama 3 demonstrates state-of-the-art performance on a wide range of industry benchmarks and offers new capabilities, including improved reasoning.";
156
156
  readonly tags: readonly [];
157
157
  readonly properties: {
158
- readonly beta: true;
159
158
  readonly info: "https://llama.meta.com";
160
159
  readonly terms: "https://llama.meta.com/llama3/license/#";
161
160
  };
@@ -166,7 +165,6 @@ export declare const possibilities_mc_safety: readonly [...{
166
165
  readonly description: "The Llama 3.2 instruction-tuned text only models are optimized for multilingual dialogue use cases, including agentic retrieval and summarization tasks.";
167
166
  readonly tags: readonly [];
168
167
  readonly properties: {
169
- readonly beta: true;
170
168
  readonly terms: "https://github.com/meta-llama/llama-models/blob/main/models/llama3_2/LICENSE";
171
169
  };
172
170
  }, {
@@ -276,7 +274,6 @@ export declare const possibilities_mc_safety: readonly [...{
276
274
  readonly description: "Llama 3.1 8B quantized to FP8 precision";
277
275
  readonly tags: readonly [];
278
276
  readonly properties: {
279
- readonly beta: true;
280
277
  readonly terms: "https://github.com/meta-llama/llama-models/blob/main/models/llama3_1/LICENSE";
281
278
  };
282
279
  }, {
@@ -306,7 +303,6 @@ export declare const possibilities_mc_safety: readonly [...{
306
303
  readonly description: "The Llama 3.2 instruction-tuned text only models are optimized for multilingual dialogue use cases, including agentic retrieval and summarization tasks.";
307
304
  readonly tags: readonly [];
308
305
  readonly properties: {
309
- readonly beta: true;
310
306
  readonly terms: "https://github.com/meta-llama/llama-models/blob/main/models/llama3_2/LICENSE";
311
307
  };
312
308
  }, {
@@ -339,6 +335,15 @@ export declare const possibilities_mc_safety: readonly [...{
339
335
  readonly beta: true;
340
336
  readonly lora: true;
341
337
  };
338
+ }, {
339
+ readonly id: "7a143886-c9bb-4a1c-be95-377b1973bc3b";
340
+ readonly source: 1;
341
+ readonly name: "@cf/meta/llama-3.3-70b-instruct-fp8-fast";
342
+ readonly description: "Llama 3.3 70B quantized to fp8 precision, optimized to be faster.";
343
+ readonly tags: readonly [];
344
+ readonly properties: {
345
+ readonly terms: "https://github.com/meta-llama/llama-models/blob/main/models/llama3_3/LICENSE";
346
+ };
342
347
  }, {
343
348
  readonly id: "673c56cc-8553-49a1-b179-dd549ec9209a";
344
349
  readonly source: 2;
@@ -397,7 +402,6 @@ export declare const possibilities_mc_safety: readonly [...{
397
402
  readonly description: "The Meta Llama 3.1 collection of multilingual large language models (LLMs) is a collection of pretrained and instruction tuned generative models. The Llama 3.1 instruction tuned text only models are optimized for multilingual dialogue use cases and outperform many of the available open source and closed chat models on common industry benchmarks.";
398
403
  readonly tags: readonly [];
399
404
  readonly properties: {
400
- readonly beta: true;
401
405
  readonly terms: "https://github.com/meta-llama/llama-models/blob/main/models/llama3_1/LICENSE";
402
406
  };
403
407
  }, {
@@ -407,7 +411,6 @@ export declare const possibilities_mc_safety: readonly [...{
407
411
  readonly description: "Quantized (int4) generative text model with 8 billion parameters from Meta.\n";
408
412
  readonly tags: readonly [];
409
413
  readonly properties: {
410
- readonly beta: true;
411
414
  readonly terms: "https://github.com/meta-llama/llama-models/blob/main/models/llama3_1/LICENSE";
412
415
  };
413
416
  }, {
@@ -447,7 +450,6 @@ export declare const possibilities_mc_safety: readonly [...{
447
450
  readonly description: "Quantized (int4) generative text model with 8 billion parameters from Meta.";
448
451
  readonly tags: readonly [];
449
452
  readonly properties: {
450
- readonly beta: true;
451
453
  readonly info: "https://llama.meta.com";
452
454
  readonly terms: "https://llama.meta.com/llama3/license/#";
453
455
  };
@@ -458,7 +460,6 @@ export declare const possibilities_mc_safety: readonly [...{
458
460
  readonly description: " The Llama 3.2-Vision instruction-tuned models are optimized for visual recognition, image reasoning, captioning, and answering general questions about an image.";
459
461
  readonly tags: readonly [];
460
462
  readonly properties: {
461
- readonly beta: true;
462
463
  readonly terms: "https://github.com/meta-llama/llama-models/blob/main/models/llama3_2/LICENSE";
463
464
  };
464
465
  }, {
@@ -532,7 +533,7 @@ export declare const possibilities_mc_embedding: readonly [...{
532
533
  readonly id: "57fbd08a-a4c4-411c-910d-b9459ff36c20";
533
534
  readonly source: 1;
534
535
  readonly name: "@cf/baai/bge-small-en-v1.5";
535
- readonly description: "BAAI general embedding (bge) models transform any given text into a compact vector";
536
+ readonly description: "BAAI general embedding (Small) model that transforms any given text into a 384-dimensional vector";
536
537
  readonly tags: readonly [];
537
538
  readonly properties: {
538
539
  readonly beta: true;
@@ -544,7 +545,7 @@ export declare const possibilities_mc_embedding: readonly [...{
544
545
  readonly id: "429b9e8b-d99e-44de-91ad-706cf8183658";
545
546
  readonly source: 1;
546
547
  readonly name: "@cf/baai/bge-base-en-v1.5";
547
- readonly description: "BAAI general embedding (bge) models transform any given text into a compact vector";
548
+ readonly description: "BAAI general embedding (Base) model that transforms any given text into a 768-dimensional vector";
548
549
  readonly tags: readonly [];
549
550
  readonly properties: {
550
551
  readonly beta: true;
@@ -556,7 +557,7 @@ export declare const possibilities_mc_embedding: readonly [...{
556
557
  readonly id: "01bc2fb0-4bca-4598-b985-d2584a3f46c0";
557
558
  readonly source: 1;
558
559
  readonly name: "@cf/baai/bge-large-en-v1.5";
559
- readonly description: "BAAI general embedding (bge) models transform any given text into a compact vector";
560
+ readonly description: "BAAI general embedding (Large) model that transforms any given text into a 1024-dimensional vector";
560
561
  readonly tags: readonly [];
561
562
  readonly properties: {
562
563
  readonly beta: true;
@@ -1,4 +1,4 @@
1
- import { workersAiCatalog } from './workers-ai-catalog.js';
1
+ import { workersAiCatalog } from '../ai-tools/workers-ai-catalog.js';
2
2
  export var enabledAzureLlmProviders;
3
3
  (function (enabledAzureLlmProviders) {
4
4
  enabledAzureLlmProviders["Azure_OpenAi_Gpt3"] = "azure_openai_gpt3";
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@chainfuse/types",
3
- "version": "1.3.0",
3
+ "version": "1.4.1",
4
4
  "description": "",
5
5
  "author": "ChainFuse",
6
6
  "homepage": "https://github.com/ChainFuse/packages/tree/main/packages/types#readme",
@@ -28,10 +28,7 @@
28
28
  "lint:fix": "npm run lint -- --fix",
29
29
  "clean": "npx -y rimraf@latest ./dist ./.tsbuildinfo",
30
30
  "build": "tsc",
31
- "build:clean": "npm run build -- --build --clean && npm run build",
32
- "pretest": "npm -w @chainfuse/types i -D @chainfuse/super-ai @chainfuse/types-internal && tsc --project tsconfig.tests.json",
33
- "test": "node --enable-source-maps --test --experimental-test-coverage --test-reporter=spec --test-reporter-destination=stdout",
34
- "test:local": "npm run test -- --env-file=.dev.vars"
31
+ "build:clean": "npm run build -- --build --clean && npm run build"
35
32
  },
36
33
  "type": "module",
37
34
  "bugs": {
@@ -75,14 +72,22 @@
75
72
  "types": "./dist/super-ai/index.d.ts"
76
73
  },
77
74
  "./super-ai/catalog/workers-ai": {
78
- "import": "./dist/super-ai/workers-ai-catalog.js",
79
- "types": "./dist/super-ai/workers-ai-catalog.d.ts"
75
+ "import": "./dist/ai-tools/workers-ai-catalog.js",
76
+ "types": "./dist/ai-tools/workers-ai-catalog.d.ts"
77
+ },
78
+ "./ai-tools": {
79
+ "import": "./dist/ai-tools/index.js",
80
+ "types": "./dist/ai-tools/index.d.ts"
81
+ },
82
+ "./ai-tools/catalog/workers-ai": {
83
+ "import": "./dist/ai-tools/workers-ai-catalog.js",
84
+ "types": "./dist/ai-tools/workers-ai-catalog.d.ts"
80
85
  }
81
86
  },
82
87
  "prettier": "@demosjarco/prettier-config",
83
88
  "devDependencies": {
84
- "@cloudflare/workers-types": "^4.20241112.0",
89
+ "@cloudflare/workers-types": "^4.20250109.0",
85
90
  "@types/json-schema": "^7.0.15"
86
91
  },
87
- "gitHead": "ea93b05607f7e9687526434591370bfad1ad8605"
92
+ "gitHead": "75406cd04aedccc51d9972a79dfbbd5ce7fe6945"
88
93
  }