@chainfuse/types 2.9.0 → 2.10.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -235,7 +235,6 @@ export declare const workersAiCatalog: {
235
235
  readonly price: 4.88;
236
236
  readonly currency: "USD";
237
237
  }];
238
- readonly lora: true;
239
238
  readonly terms: "https://github.com/deepseek-ai/DeepSeek-R1/blob/main/LICENSE";
240
239
  };
241
240
  }, {
@@ -368,6 +367,7 @@ export declare const workersAiCatalog: {
368
367
  readonly created_at: "2024-12-06 17:09:18.338";
369
368
  readonly tags: readonly [];
370
369
  readonly properties: {
370
+ readonly async_queue: true;
371
371
  readonly context_window: 24000;
372
372
  readonly price: readonly [{
373
373
  readonly unit: "per M input tokens";
@@ -404,6 +404,26 @@ export declare const workersAiCatalog: {
404
404
  readonly context_window: 4096;
405
405
  readonly terms: "https://huggingface.co/TheBloke/deepseek-coder-6.7B-instruct-AWQ";
406
406
  };
407
+ }, {
408
+ readonly id: "51b71d5b-8bc0-4489-a107-95e542b69914";
409
+ readonly source: 1;
410
+ readonly name: "@cf/qwen/qwen2.5-coder-32b-instruct";
411
+ readonly description: "Qwen2.5-Coder is the latest series of Code-Specific Qwen large language models (formerly known as CodeQwen). As of now, Qwen2.5-Coder has covered six mainstream model sizes, 0.5, 1.5, 3, 7, 14, 32 billion parameters, to meet the needs of different developers. Qwen2.5-Coder brings the following improvements upon CodeQwen1.5:";
412
+ readonly created_at: "2025-02-27 00:31:43.829";
413
+ readonly tags: readonly [];
414
+ readonly properties: {
415
+ readonly context_window: 32768;
416
+ readonly price: readonly [{
417
+ readonly unit: "per M input tokens";
418
+ readonly price: 0.66;
419
+ readonly currency: "USD";
420
+ }, {
421
+ readonly unit: "per M output tokens";
422
+ readonly price: 1;
423
+ readonly currency: "USD";
424
+ }];
425
+ readonly lora: true;
426
+ };
407
427
  }, {
408
428
  readonly id: "4c3a544e-da47-4336-9cea-c7cbfab33f16";
409
429
  readonly source: 1;
@@ -518,6 +538,25 @@ export declare const workersAiCatalog: {
518
538
  readonly context_window: 32000;
519
539
  readonly info: "https://huggingface.co/qwen/qwen1.5-1.8b-chat";
520
540
  };
541
+ }, {
542
+ readonly id: "31690291-ebdc-4f98-bcfc-a44844e215b7";
543
+ readonly source: 1;
544
+ readonly name: "@cf/mistralai/mistral-small-3.1-24b-instruct";
545
+ readonly description: "Building upon Mistral Small 3 (2501), Mistral Small 3.1 (2503) adds state-of-the-art vision understanding and enhances long context capabilities up to 128k tokens without compromising text performance. With 24 billion parameters, this model achieves top-tier capabilities in both text and vision tasks.";
546
+ readonly created_at: "2025-03-18 03:28:37.890";
547
+ readonly tags: readonly [];
548
+ readonly properties: {
549
+ readonly context_window: 128000;
550
+ readonly price: readonly [{
551
+ readonly unit: "per M input tokens";
552
+ readonly price: 0.35;
553
+ readonly currency: "USD";
554
+ }, {
555
+ readonly unit: "per M output tokens";
556
+ readonly price: 0.56;
557
+ readonly currency: "USD";
558
+ }];
559
+ };
521
560
  }, {
522
561
  readonly id: "31097538-a3ff-4e6e-bb56-ad0e1f428b61";
523
562
  readonly source: 1;
@@ -652,6 +691,46 @@ export declare const workersAiCatalog: {
652
691
  }];
653
692
  readonly terms: "https://github.com/meta-llama/llama-models/blob/main/models/llama4/LICENSE";
654
693
  };
694
+ }, {
695
+ readonly id: "053d5ac0-861b-4d3b-8501-e58d00417ef8";
696
+ readonly source: 1;
697
+ readonly name: "@cf/google/gemma-3-12b-it";
698
+ readonly description: "Gemma 3 models are well-suited for a variety of text generation and image understanding tasks, including question answering, summarization, and reasoning. Gemma 3 models are multimodal, handling text and image input and generating text output, with a large, 128K context window, multilingual support in over 140 languages, and is available in more sizes than previous versions.";
699
+ readonly created_at: "2025-03-18 03:58:02.423";
700
+ readonly tags: readonly [];
701
+ readonly properties: {
702
+ readonly context_window: 1280000;
703
+ readonly price: readonly [{
704
+ readonly unit: "per M input tokens";
705
+ readonly price: 0.35;
706
+ readonly currency: "USD";
707
+ }, {
708
+ readonly unit: "per M output tokens";
709
+ readonly price: 0.56;
710
+ readonly currency: "USD";
711
+ }];
712
+ readonly lora: true;
713
+ };
714
+ }, {
715
+ readonly id: "02c16efa-29f5-4304-8e6c-3d188889f875";
716
+ readonly source: 1;
717
+ readonly name: "@cf/qwen/qwq-32b";
718
+ readonly description: "QwQ is the reasoning model of the Qwen series. Compared with conventional instruction-tuned models, QwQ, which is capable of thinking and reasoning, can achieve significantly enhanced performance in downstream tasks, especially hard problems. QwQ-32B is the medium-sized reasoning model, which is capable of achieving competitive performance against state-of-the-art reasoning models, e.g., DeepSeek-R1, o1-mini.";
719
+ readonly created_at: "2025-03-05 21:52:40.974";
720
+ readonly tags: readonly [];
721
+ readonly properties: {
722
+ readonly context_window: 24000;
723
+ readonly price: readonly [{
724
+ readonly unit: "per M input tokens";
725
+ readonly price: 0.66;
726
+ readonly currency: "USD";
727
+ }, {
728
+ readonly unit: "per M output tokens";
729
+ readonly price: 1;
730
+ readonly currency: "USD";
731
+ }];
732
+ readonly lora: true;
733
+ };
655
734
  }];
656
735
  };
657
736
  readonly 'Text Embeddings': {
@@ -665,6 +744,7 @@ export declare const workersAiCatalog: {
665
744
  readonly created_at: "2024-05-22 19:27:09.781";
666
745
  readonly tags: readonly [];
667
746
  readonly properties: {
747
+ readonly async_queue: true;
668
748
  readonly price: readonly [{
669
749
  readonly unit: "per M input tokens";
670
750
  readonly price: 0.012;
@@ -679,6 +759,7 @@ export declare const workersAiCatalog: {
679
759
  readonly created_at: "2023-11-07 15:43:58.042";
680
760
  readonly tags: readonly [];
681
761
  readonly properties: {
762
+ readonly async_queue: true;
682
763
  readonly price: readonly [{
683
764
  readonly unit: "per M input tokens";
684
765
  readonly price: 0.02;
@@ -696,6 +777,7 @@ export declare const workersAiCatalog: {
696
777
  readonly created_at: "2023-09-25 19:21:11.898";
697
778
  readonly tags: readonly [];
698
779
  readonly properties: {
780
+ readonly async_queue: true;
699
781
  readonly price: readonly [{
700
782
  readonly unit: "per M input tokens";
701
783
  readonly price: 0.067;
@@ -713,6 +795,7 @@ export declare const workersAiCatalog: {
713
795
  readonly created_at: "2023-11-07 15:43:58.042";
714
796
  readonly tags: readonly [];
715
797
  readonly properties: {
798
+ readonly async_queue: true;
716
799
  readonly price: readonly [{
717
800
  readonly unit: "per M input tokens";
718
801
  readonly price: 0.2;
@@ -999,6 +1082,7 @@ export declare const workersAiCatalog: {
999
1082
  readonly created_at: "2023-09-25 19:21:11.898";
1000
1083
  readonly tags: readonly [];
1001
1084
  readonly properties: {
1085
+ readonly async_queue: true;
1002
1086
  readonly price: readonly [{
1003
1087
  readonly unit: "per M input tokens";
1004
1088
  readonly price: 0.34;
@@ -268,7 +268,6 @@ export const workersAiCatalog = {
268
268
  currency: 'USD',
269
269
  },
270
270
  ],
271
- lora: true,
272
271
  terms: 'https://github.com/deepseek-ai/DeepSeek-R1/blob/main/LICENSE',
273
272
  },
274
273
  },
@@ -417,6 +416,7 @@ export const workersAiCatalog = {
417
416
  created_at: '2024-12-06 17:09:18.338',
418
417
  tags: [],
419
418
  properties: {
419
+ async_queue: true,
420
420
  context_window: 24000,
421
421
  price: [
422
422
  {
@@ -459,6 +459,30 @@ export const workersAiCatalog = {
459
459
  terms: 'https://huggingface.co/TheBloke/deepseek-coder-6.7B-instruct-AWQ',
460
460
  },
461
461
  },
462
+ {
463
+ id: '51b71d5b-8bc0-4489-a107-95e542b69914',
464
+ source: 1,
465
+ name: '@cf/qwen/qwen2.5-coder-32b-instruct',
466
+ description: 'Qwen2.5-Coder is the latest series of Code-Specific Qwen large language models (formerly known as CodeQwen). As of now, Qwen2.5-Coder has covered six mainstream model sizes, 0.5, 1.5, 3, 7, 14, 32 billion parameters, to meet the needs of different developers. Qwen2.5-Coder brings the following improvements upon CodeQwen1.5:',
467
+ created_at: '2025-02-27 00:31:43.829',
468
+ tags: [],
469
+ properties: {
470
+ context_window: 32768,
471
+ price: [
472
+ {
473
+ unit: 'per M input tokens',
474
+ price: 0.66,
475
+ currency: 'USD',
476
+ },
477
+ {
478
+ unit: 'per M output tokens',
479
+ price: 1,
480
+ currency: 'USD',
481
+ },
482
+ ],
483
+ lora: true,
484
+ },
485
+ },
462
486
  {
463
487
  id: '4c3a544e-da47-4336-9cea-c7cbfab33f16',
464
488
  source: 1,
@@ -587,6 +611,29 @@ export const workersAiCatalog = {
587
611
  info: 'https://huggingface.co/qwen/qwen1.5-1.8b-chat',
588
612
  },
589
613
  },
614
+ {
615
+ id: '31690291-ebdc-4f98-bcfc-a44844e215b7',
616
+ source: 1,
617
+ name: '@cf/mistralai/mistral-small-3.1-24b-instruct',
618
+ description: 'Building upon Mistral Small 3 (2501), Mistral Small 3.1 (2503) adds state-of-the-art vision understanding and enhances long context capabilities up to 128k tokens without compromising text performance. With 24 billion parameters, this model achieves top-tier capabilities in both text and vision tasks.',
619
+ created_at: '2025-03-18 03:28:37.890',
620
+ tags: [],
621
+ properties: {
622
+ context_window: 128000,
623
+ price: [
624
+ {
625
+ unit: 'per M input tokens',
626
+ price: 0.35,
627
+ currency: 'USD',
628
+ },
629
+ {
630
+ unit: 'per M output tokens',
631
+ price: 0.56,
632
+ currency: 'USD',
633
+ },
634
+ ],
635
+ },
636
+ },
590
637
  {
591
638
  id: '31097538-a3ff-4e6e-bb56-ad0e1f428b61',
592
639
  source: 1,
@@ -739,6 +786,54 @@ export const workersAiCatalog = {
739
786
  terms: 'https://github.com/meta-llama/llama-models/blob/main/models/llama4/LICENSE',
740
787
  },
741
788
  },
789
+ {
790
+ id: '053d5ac0-861b-4d3b-8501-e58d00417ef8',
791
+ source: 1,
792
+ name: '@cf/google/gemma-3-12b-it',
793
+ description: 'Gemma 3 models are well-suited for a variety of text generation and image understanding tasks, including question answering, summarization, and reasoning. Gemma 3 models are multimodal, handling text and image input and generating text output, with a large, 128K context window, multilingual support in over 140 languages, and is available in more sizes than previous versions.',
794
+ created_at: '2025-03-18 03:58:02.423',
795
+ tags: [],
796
+ properties: {
797
+ context_window: 1280000,
798
+ price: [
799
+ {
800
+ unit: 'per M input tokens',
801
+ price: 0.35,
802
+ currency: 'USD',
803
+ },
804
+ {
805
+ unit: 'per M output tokens',
806
+ price: 0.56,
807
+ currency: 'USD',
808
+ },
809
+ ],
810
+ lora: true,
811
+ },
812
+ },
813
+ {
814
+ id: '02c16efa-29f5-4304-8e6c-3d188889f875',
815
+ source: 1,
816
+ name: '@cf/qwen/qwq-32b',
817
+ description: 'QwQ is the reasoning model of the Qwen series. Compared with conventional instruction-tuned models, QwQ, which is capable of thinking and reasoning, can achieve significantly enhanced performance in downstream tasks, especially hard problems. QwQ-32B is the medium-sized reasoning model, which is capable of achieving competitive performance against state-of-the-art reasoning models, e.g., DeepSeek-R1, o1-mini.',
818
+ created_at: '2025-03-05 21:52:40.974',
819
+ tags: [],
820
+ properties: {
821
+ context_window: 24000,
822
+ price: [
823
+ {
824
+ unit: 'per M input tokens',
825
+ price: 0.66,
826
+ currency: 'USD',
827
+ },
828
+ {
829
+ unit: 'per M output tokens',
830
+ price: 1,
831
+ currency: 'USD',
832
+ },
833
+ ],
834
+ lora: true,
835
+ },
836
+ },
742
837
  ],
743
838
  },
744
839
  'Text Embeddings': {
@@ -753,6 +848,7 @@ export const workersAiCatalog = {
753
848
  created_at: '2024-05-22 19:27:09.781',
754
849
  tags: [],
755
850
  properties: {
851
+ async_queue: true,
756
852
  price: [
757
853
  {
758
854
  unit: 'per M input tokens',
@@ -770,6 +866,7 @@ export const workersAiCatalog = {
770
866
  created_at: '2023-11-07 15:43:58.042',
771
867
  tags: [],
772
868
  properties: {
869
+ async_queue: true,
773
870
  price: [
774
871
  {
775
872
  unit: 'per M input tokens',
@@ -790,6 +887,7 @@ export const workersAiCatalog = {
790
887
  created_at: '2023-09-25 19:21:11.898',
791
888
  tags: [],
792
889
  properties: {
890
+ async_queue: true,
793
891
  price: [
794
892
  {
795
893
  unit: 'per M input tokens',
@@ -810,6 +908,7 @@ export const workersAiCatalog = {
810
908
  created_at: '2023-11-07 15:43:58.042',
811
909
  tags: [],
812
910
  properties: {
911
+ async_queue: true,
813
912
  price: [
814
913
  {
815
914
  unit: 'per M input tokens',
@@ -1148,6 +1247,7 @@ export const workersAiCatalog = {
1148
1247
  created_at: '2023-09-25 19:21:11.898',
1149
1248
  tags: [],
1150
1249
  properties: {
1250
+ async_queue: true,
1151
1251
  price: [
1152
1252
  {
1153
1253
  unit: 'per M input tokens',
package/dist/index.d.ts CHANGED
@@ -50,7 +50,7 @@ export interface ExternallyResolvablePromise<T> {
50
50
  export type UndefinedProperties<T extends object> = {
51
51
  [P in keyof T]: undefined;
52
52
  };
53
- export type CustomLogCallback = (message?: any, ...optionalParams: any[]) => void;
53
+ export type CustomLogCallback = (message?: any, ...optionalParams: any[]) => PromiseLike<void> | void;
54
54
  export type CustomLoging = boolean | CustomLogCallback;
55
55
  /**
56
56
  * @link https://developers.cloudflare.com/durable-objects/reference/data-location/#restrict-durable-objects-to-a-jurisdiction
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@chainfuse/types",
3
- "version": "2.9.0",
3
+ "version": "2.10.0",
4
4
  "description": "",
5
5
  "author": "ChainFuse",
6
6
  "homepage": "https://github.com/ChainFuse/packages/tree/main/packages/types#readme",
@@ -94,8 +94,8 @@
94
94
  "zod": "^3.24.2"
95
95
  },
96
96
  "devDependencies": {
97
- "@cloudflare/workers-types": "^4.20250409.0",
97
+ "@cloudflare/workers-types": "^4.20250410.0",
98
98
  "@types/validator": "^13.12.3"
99
99
  },
100
- "gitHead": "cf5c05bde60fd803afc1dc3031d655f643b503c6"
100
+ "gitHead": "c480462d073b3d89c915eb003b0ba789fe79d414"
101
101
  }