@chainfuse/types 2.10.27 → 2.10.29

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -31,11 +31,8 @@ export declare namespace AiModels {
31
31
  */
32
32
  enum GoogleGenerativeAi {
33
33
  gemini_flash_lite = "google.generative-ai:gemini-2.0-flash-lite",
34
- gemini_flash_lite_search = "google.generative-ai:gemini-2.0-flash-lite:search",
35
34
  gemini_flash = "google.generative-ai:gemini-2.5-flash-preview",
36
- gemini_flash_search = "google.generative-ai:gemini-2.5-flash-preview:search",
37
- gemini_pro = "google.generative-ai:gemini-2.5-pro-preview",
38
- gemini_pro_search = "google.generative-ai:gemini-2.5-pro-preview:search"
35
+ gemini_pro = "google.generative-ai:gemini-2.5-pro-preview"
39
36
  }
40
37
  enum OpenAi {
41
38
  gpt41_nano = "openai:gpt-4.1-nano",
@@ -80,8 +77,9 @@ export declare namespace AiModels {
80
77
  }
81
78
  }
82
79
  }
83
- export declare const ZodLanguageModelValues3: z3.ZodEnum<["workersai:@cf/qwen/qwen1.5-0.5b-chat" | "workersai:@cf/google/gemma-2b-it-lora" | "workersai:@hf/nexusflow/starling-lm-7b-beta" | "workersai:@cf/meta/llama-3-8b-instruct" | "workersai:@cf/meta/llama-3.2-3b-instruct" | "workersai:@hf/thebloke/llamaguard-7b-awq" | "workersai:@hf/thebloke/neural-chat-7b-v3-1-awq" | "workersai:@cf/meta/llama-guard-3-8b" | "workersai:@cf/meta/llama-2-7b-chat-fp16" | "workersai:@cf/mistral/mistral-7b-instruct-v0.1" | "workersai:@cf/mistral/mistral-7b-instruct-v0.2-lora" | "workersai:@cf/tinyllama/tinyllama-1.1b-chat-v1.0" | "workersai:@hf/mistral/mistral-7b-instruct-v0.2" | "workersai:@cf/fblgit/una-cybertron-7b-v2-bf16" | "workersai:@cf/deepseek-ai/deepseek-r1-distill-qwen-32b" | "workersai:@cf/thebloke/discolm-german-7b-v1-awq" | "workersai:@cf/meta/llama-2-7b-chat-int8" | "workersai:@cf/meta/llama-3.1-8b-instruct-fp8" | "workersai:@hf/thebloke/mistral-7b-instruct-v0.1-awq" | "workersai:@cf/qwen/qwen1.5-7b-chat-awq" | "workersai:@cf/meta/llama-3.2-1b-instruct" | "workersai:@hf/thebloke/llama-2-13b-chat-awq" | "workersai:@hf/thebloke/deepseek-coder-6.7b-base-awq" | "workersai:@cf/meta-llama/llama-2-7b-chat-hf-lora" | "workersai:@cf/meta/llama-3.3-70b-instruct-fp8-fast" | "workersai:@hf/thebloke/openhermes-2.5-mistral-7b-awq" | "workersai:@hf/thebloke/deepseek-coder-6.7b-instruct-awq" | "workersai:@cf/qwen/qwen2.5-coder-32b-instruct" | "workersai:@cf/deepseek-ai/deepseek-math-7b-instruct" | "workersai:@cf/tiiuae/falcon-7b-instruct" | "workersai:@hf/nousresearch/hermes-2-pro-mistral-7b" | "workersai:@cf/meta/llama-3.1-8b-instruct-awq" | "workersai:@hf/thebloke/zephyr-7b-beta-awq" | "workersai:@cf/google/gemma-7b-it-lora" | "workersai:@cf/qwen/qwen1.5-1.8b-chat" | "workersai:@cf/mistralai/mistral-small-3.1-24b-instruct" | "workersai:@cf/meta/llama-3-8b-instruct-awq" | "workersai:@cf/meta/llama-3.2-11b-vision-instruct" | "workersai:@cf/defog/sqlcoder-7b-2" | "workersai:@cf/microsoft/phi-2" | "workersai:@hf/meta-llama/meta-llama-3-8b-instruct" | "workersai:@hf/google/gemma-7b-it" | "workersai:@cf/qwen/qwen1.5-14b-chat-awq" | "workersai:@cf/openchat/openchat-3.5-0106" | "workersai:@cf/meta/llama-4-scout-17b-16e-instruct" | "workersai:@cf/google/gemma-3-12b-it" | "workersai:@cf/qwen/qwq-32b" | AiModels.LanguageModels.Azure | AiModels.LanguageModels.Anthropic | AiModels.LanguageModels.GoogleGenerativeAi | AiModels.LanguageModels.OpenAi, ...("workersai:@cf/qwen/qwen1.5-0.5b-chat" | "workersai:@cf/google/gemma-2b-it-lora" | "workersai:@hf/nexusflow/starling-lm-7b-beta" | "workersai:@cf/meta/llama-3-8b-instruct" | "workersai:@cf/meta/llama-3.2-3b-instruct" | "workersai:@hf/thebloke/llamaguard-7b-awq" | "workersai:@hf/thebloke/neural-chat-7b-v3-1-awq" | "workersai:@cf/meta/llama-guard-3-8b" | "workersai:@cf/meta/llama-2-7b-chat-fp16" | "workersai:@cf/mistral/mistral-7b-instruct-v0.1" | "workersai:@cf/mistral/mistral-7b-instruct-v0.2-lora" | "workersai:@cf/tinyllama/tinyllama-1.1b-chat-v1.0" | "workersai:@hf/mistral/mistral-7b-instruct-v0.2" | "workersai:@cf/fblgit/una-cybertron-7b-v2-bf16" | "workersai:@cf/deepseek-ai/deepseek-r1-distill-qwen-32b" | "workersai:@cf/thebloke/discolm-german-7b-v1-awq" | "workersai:@cf/meta/llama-2-7b-chat-int8" | "workersai:@cf/meta/llama-3.1-8b-instruct-fp8" | "workersai:@hf/thebloke/mistral-7b-instruct-v0.1-awq" | "workersai:@cf/qwen/qwen1.5-7b-chat-awq" | "workersai:@cf/meta/llama-3.2-1b-instruct" | "workersai:@hf/thebloke/llama-2-13b-chat-awq" | "workersai:@hf/thebloke/deepseek-coder-6.7b-base-awq" | "workersai:@cf/meta-llama/llama-2-7b-chat-hf-lora" | "workersai:@cf/meta/llama-3.3-70b-instruct-fp8-fast" | "workersai:@hf/thebloke/openhermes-2.5-mistral-7b-awq" | "workersai:@hf/thebloke/deepseek-coder-6.7b-instruct-awq" | "workersai:@cf/qwen/qwen2.5-coder-32b-instruct" | "workersai:@cf/deepseek-ai/deepseek-math-7b-instruct" | "workersai:@cf/tiiuae/falcon-7b-instruct" | "workersai:@hf/nousresearch/hermes-2-pro-mistral-7b" | "workersai:@cf/meta/llama-3.1-8b-instruct-awq" | "workersai:@hf/thebloke/zephyr-7b-beta-awq" | "workersai:@cf/google/gemma-7b-it-lora" | "workersai:@cf/qwen/qwen1.5-1.8b-chat" | "workersai:@cf/mistralai/mistral-small-3.1-24b-instruct" | "workersai:@cf/meta/llama-3-8b-instruct-awq" | "workersai:@cf/meta/llama-3.2-11b-vision-instruct" | "workersai:@cf/defog/sqlcoder-7b-2" | "workersai:@cf/microsoft/phi-2" | "workersai:@hf/meta-llama/meta-llama-3-8b-instruct" | "workersai:@hf/google/gemma-7b-it" | "workersai:@cf/qwen/qwen1.5-14b-chat-awq" | "workersai:@cf/openchat/openchat-3.5-0106" | "workersai:@cf/meta/llama-4-scout-17b-16e-instruct" | "workersai:@cf/google/gemma-3-12b-it" | "workersai:@cf/qwen/qwq-32b" | AiModels.LanguageModels.Azure | AiModels.LanguageModels.Anthropic | AiModels.LanguageModels.GoogleGenerativeAi | AiModels.LanguageModels.OpenAi)[]]>;
80
+ export declare const ZodLanguageModelValues3: z3.ZodEnum<["workersai:@cf/openai/gpt-oss-120b" | "workersai:@cf/qwen/qwen1.5-0.5b-chat" | "workersai:@cf/google/gemma-2b-it-lora" | "workersai:@hf/nexusflow/starling-lm-7b-beta" | "workersai:@cf/meta/llama-3-8b-instruct" | "workersai:@cf/meta/llama-3.2-3b-instruct" | "workersai:@hf/thebloke/llamaguard-7b-awq" | "workersai:@hf/thebloke/neural-chat-7b-v3-1-awq" | "workersai:@cf/meta/llama-guard-3-8b" | "workersai:@cf/meta/llama-2-7b-chat-fp16" | "workersai:@cf/mistral/mistral-7b-instruct-v0.1" | "workersai:@cf/mistral/mistral-7b-instruct-v0.2-lora" | "workersai:@cf/tinyllama/tinyllama-1.1b-chat-v1.0" | "workersai:@hf/mistral/mistral-7b-instruct-v0.2" | "workersai:@cf/fblgit/una-cybertron-7b-v2-bf16" | "workersai:@cf/deepseek-ai/deepseek-r1-distill-qwen-32b" | "workersai:@cf/thebloke/discolm-german-7b-v1-awq" | "workersai:@cf/meta/llama-2-7b-chat-int8" | "workersai:@cf/meta/llama-3.1-8b-instruct-fp8" | "workersai:@hf/thebloke/mistral-7b-instruct-v0.1-awq" | "workersai:@cf/qwen/qwen1.5-7b-chat-awq" | "workersai:@cf/meta/llama-3.2-1b-instruct" | "workersai:@hf/thebloke/llama-2-13b-chat-awq" | "workersai:@hf/thebloke/deepseek-coder-6.7b-base-awq" | "workersai:@cf/meta-llama/llama-2-7b-chat-hf-lora" | "workersai:@cf/meta/llama-3.3-70b-instruct-fp8-fast" | "workersai:@hf/thebloke/openhermes-2.5-mistral-7b-awq" | "workersai:@hf/thebloke/deepseek-coder-6.7b-instruct-awq" | "workersai:@cf/qwen/qwen2.5-coder-32b-instruct" | "workersai:@cf/deepseek-ai/deepseek-math-7b-instruct" | "workersai:@cf/tiiuae/falcon-7b-instruct" | "workersai:@hf/nousresearch/hermes-2-pro-mistral-7b" | "workersai:@cf/meta/llama-3.1-8b-instruct-awq" | "workersai:@hf/thebloke/zephyr-7b-beta-awq" | "workersai:@cf/google/gemma-7b-it-lora" | "workersai:@cf/qwen/qwen1.5-1.8b-chat" | "workersai:@cf/mistralai/mistral-small-3.1-24b-instruct" | "workersai:@cf/meta/llama-3-8b-instruct-awq" | "workersai:@cf/meta/llama-3.2-11b-vision-instruct" | "workersai:@cf/defog/sqlcoder-7b-2" | "workersai:@cf/microsoft/phi-2" | "workersai:@hf/meta-llama/meta-llama-3-8b-instruct" | "workersai:@cf/openai/gpt-oss-20b" | "workersai:@hf/google/gemma-7b-it" | "workersai:@cf/qwen/qwen1.5-14b-chat-awq" | "workersai:@cf/openchat/openchat-3.5-0106" | "workersai:@cf/meta/llama-4-scout-17b-16e-instruct" | "workersai:@cf/google/gemma-3-12b-it" | "workersai:@cf/qwen/qwq-32b" | AiModels.LanguageModels.Azure | AiModels.LanguageModels.Anthropic | AiModels.LanguageModels.GoogleGenerativeAi | AiModels.LanguageModels.OpenAi, ...("workersai:@cf/openai/gpt-oss-120b" | "workersai:@cf/qwen/qwen1.5-0.5b-chat" | "workersai:@cf/google/gemma-2b-it-lora" | "workersai:@hf/nexusflow/starling-lm-7b-beta" | "workersai:@cf/meta/llama-3-8b-instruct" | "workersai:@cf/meta/llama-3.2-3b-instruct" | "workersai:@hf/thebloke/llamaguard-7b-awq" | "workersai:@hf/thebloke/neural-chat-7b-v3-1-awq" | "workersai:@cf/meta/llama-guard-3-8b" | "workersai:@cf/meta/llama-2-7b-chat-fp16" | "workersai:@cf/mistral/mistral-7b-instruct-v0.1" | "workersai:@cf/mistral/mistral-7b-instruct-v0.2-lora" | "workersai:@cf/tinyllama/tinyllama-1.1b-chat-v1.0" | "workersai:@hf/mistral/mistral-7b-instruct-v0.2" | "workersai:@cf/fblgit/una-cybertron-7b-v2-bf16" | "workersai:@cf/deepseek-ai/deepseek-r1-distill-qwen-32b" | "workersai:@cf/thebloke/discolm-german-7b-v1-awq" | "workersai:@cf/meta/llama-2-7b-chat-int8" | "workersai:@cf/meta/llama-3.1-8b-instruct-fp8" | "workersai:@hf/thebloke/mistral-7b-instruct-v0.1-awq" | "workersai:@cf/qwen/qwen1.5-7b-chat-awq" | "workersai:@cf/meta/llama-3.2-1b-instruct" | "workersai:@hf/thebloke/llama-2-13b-chat-awq" | "workersai:@hf/thebloke/deepseek-coder-6.7b-base-awq" | "workersai:@cf/meta-llama/llama-2-7b-chat-hf-lora" | "workersai:@cf/meta/llama-3.3-70b-instruct-fp8-fast" | "workersai:@hf/thebloke/openhermes-2.5-mistral-7b-awq" | "workersai:@hf/thebloke/deepseek-coder-6.7b-instruct-awq" | "workersai:@cf/qwen/qwen2.5-coder-32b-instruct" | "workersai:@cf/deepseek-ai/deepseek-math-7b-instruct" | "workersai:@cf/tiiuae/falcon-7b-instruct" | "workersai:@hf/nousresearch/hermes-2-pro-mistral-7b" | "workersai:@cf/meta/llama-3.1-8b-instruct-awq" | "workersai:@hf/thebloke/zephyr-7b-beta-awq" | "workersai:@cf/google/gemma-7b-it-lora" | "workersai:@cf/qwen/qwen1.5-1.8b-chat" | "workersai:@cf/mistralai/mistral-small-3.1-24b-instruct" | "workersai:@cf/meta/llama-3-8b-instruct-awq" | "workersai:@cf/meta/llama-3.2-11b-vision-instruct" | "workersai:@cf/defog/sqlcoder-7b-2" | "workersai:@cf/microsoft/phi-2" | "workersai:@hf/meta-llama/meta-llama-3-8b-instruct" | "workersai:@cf/openai/gpt-oss-20b" | "workersai:@hf/google/gemma-7b-it" | "workersai:@cf/qwen/qwen1.5-14b-chat-awq" | "workersai:@cf/openchat/openchat-3.5-0106" | "workersai:@cf/meta/llama-4-scout-17b-16e-instruct" | "workersai:@cf/google/gemma-3-12b-it" | "workersai:@cf/qwen/qwq-32b" | AiModels.LanguageModels.Azure | AiModels.LanguageModels.Anthropic | AiModels.LanguageModels.GoogleGenerativeAi | AiModels.LanguageModels.OpenAi)[]]>;
84
81
  export declare const ZodLanguageModelValues: z4.ZodEnum<{
82
+ "workersai:@cf/openai/gpt-oss-120b": "workersai:@cf/openai/gpt-oss-120b";
85
83
  "workersai:@cf/qwen/qwen1.5-0.5b-chat": "workersai:@cf/qwen/qwen1.5-0.5b-chat";
86
84
  "workersai:@cf/google/gemma-2b-it-lora": "workersai:@cf/google/gemma-2b-it-lora";
87
85
  "workersai:@hf/nexusflow/starling-lm-7b-beta": "workersai:@hf/nexusflow/starling-lm-7b-beta";
@@ -123,6 +121,7 @@ export declare const ZodLanguageModelValues: z4.ZodEnum<{
123
121
  "workersai:@cf/defog/sqlcoder-7b-2": "workersai:@cf/defog/sqlcoder-7b-2";
124
122
  "workersai:@cf/microsoft/phi-2": "workersai:@cf/microsoft/phi-2";
125
123
  "workersai:@hf/meta-llama/meta-llama-3-8b-instruct": "workersai:@hf/meta-llama/meta-llama-3-8b-instruct";
124
+ "workersai:@cf/openai/gpt-oss-20b": "workersai:@cf/openai/gpt-oss-20b";
126
125
  "workersai:@hf/google/gemma-7b-it": "workersai:@hf/google/gemma-7b-it";
127
126
  "workersai:@cf/qwen/qwen1.5-14b-chat-awq": "workersai:@cf/qwen/qwen1.5-14b-chat-awq";
128
127
  "workersai:@cf/openchat/openchat-3.5-0106": "workersai:@cf/openchat/openchat-3.5-0106";
@@ -142,11 +141,8 @@ export declare const ZodLanguageModelValues: z4.ZodEnum<{
142
141
  "anthropic:claude-4-sonnet-latest": AiModels.LanguageModels.Anthropic.sonnet;
143
142
  "anthropic:claude-4-opus-latest": AiModels.LanguageModels.Anthropic.haiku;
144
143
  "google.generative-ai:gemini-2.0-flash-lite": AiModels.LanguageModels.GoogleGenerativeAi.gemini_flash_lite;
145
- "google.generative-ai:gemini-2.0-flash-lite:search": AiModels.LanguageModels.GoogleGenerativeAi.gemini_flash_lite_search;
146
144
  "google.generative-ai:gemini-2.5-flash-preview": AiModels.LanguageModels.GoogleGenerativeAi.gemini_flash;
147
- "google.generative-ai:gemini-2.5-flash-preview:search": AiModels.LanguageModels.GoogleGenerativeAi.gemini_flash_search;
148
145
  "google.generative-ai:gemini-2.5-pro-preview": AiModels.LanguageModels.GoogleGenerativeAi.gemini_pro;
149
- "google.generative-ai:gemini-2.5-pro-preview:search": AiModels.LanguageModels.GoogleGenerativeAi.gemini_pro_search;
150
146
  "openai:gpt-4.1-nano": AiModels.LanguageModels.OpenAi.gpt41_nano;
151
147
  "openai:gpt-4o-mini": AiModels.LanguageModels.OpenAi.gpt4o_mini;
152
148
  "openai:gpt-4.1-mini": AiModels.LanguageModels.OpenAi.gpt41_mini;
@@ -33,11 +33,8 @@ export var AiModels;
33
33
  let GoogleGenerativeAi;
34
34
  (function (GoogleGenerativeAi) {
35
35
  GoogleGenerativeAi["gemini_flash_lite"] = "google.generative-ai:gemini-2.0-flash-lite";
36
- GoogleGenerativeAi["gemini_flash_lite_search"] = "google.generative-ai:gemini-2.0-flash-lite:search";
37
36
  GoogleGenerativeAi["gemini_flash"] = "google.generative-ai:gemini-2.5-flash-preview";
38
- GoogleGenerativeAi["gemini_flash_search"] = "google.generative-ai:gemini-2.5-flash-preview:search";
39
37
  GoogleGenerativeAi["gemini_pro"] = "google.generative-ai:gemini-2.5-pro-preview";
40
- GoogleGenerativeAi["gemini_pro_search"] = "google.generative-ai:gemini-2.5-pro-preview:search";
41
38
  })(GoogleGenerativeAi = LanguageModels.GoogleGenerativeAi || (LanguageModels.GoogleGenerativeAi = {}));
42
39
  let OpenAi;
43
40
  (function (OpenAi) {
@@ -4,6 +4,25 @@ export declare const workersAiCatalog: {
4
4
  readonly id: "c329a1f9-323d-4e91-b2aa-582dd4188d34";
5
5
  readonly description: "Family of generative text models, such as large language models (LLM), that can be adapted for a variety of natural language tasks.";
6
6
  readonly models: readonly [{
7
+ readonly id: "f9f2250b-1048-4a52-9910-d0bf976616a1";
8
+ readonly source: 1;
9
+ readonly name: "@cf/openai/gpt-oss-120b";
10
+ readonly description: "OpenAI’s open-weight models designed for powerful reasoning, agentic tasks, and versatile developer use cases – gpt-oss-120b is for production, general purpose, high reasoning use-cases.";
11
+ readonly created_at: "2025-08-05 10:27:29.131";
12
+ readonly tags: readonly [];
13
+ readonly properties: {
14
+ readonly context_window: 128000;
15
+ readonly price: readonly [{
16
+ readonly unit: "per M input tokens";
17
+ readonly price: 0.35;
18
+ readonly currency: "USD";
19
+ }, {
20
+ readonly unit: "per M output tokens";
21
+ readonly price: 0.75;
22
+ readonly currency: "USD";
23
+ }];
24
+ };
25
+ }, {
7
26
  readonly id: "f8703a00-ed54-4f98-bdc3-cd9a813286f3";
8
27
  readonly source: 1;
9
28
  readonly name: "@cf/qwen/qwen1.5-0.5b-chat";
@@ -615,6 +634,25 @@ export declare const workersAiCatalog: {
615
634
  readonly properties: {
616
635
  readonly context_window: 8192;
617
636
  };
637
+ }, {
638
+ readonly id: "188a4e1e-253e-46d0-9616-0bf8c149763f";
639
+ readonly source: 1;
640
+ readonly name: "@cf/openai/gpt-oss-20b";
641
+ readonly description: "OpenAI’s open-weight models designed for powerful reasoning, agentic tasks, and versatile developer use cases – gpt-oss-20b is for lower latency, and local or specialized use-cases.";
642
+ readonly created_at: "2025-08-05 10:49:53.265";
643
+ readonly tags: readonly [];
644
+ readonly properties: {
645
+ readonly context_window: 128000;
646
+ readonly price: readonly [{
647
+ readonly unit: "per M input tokens";
648
+ readonly price: 0.2;
649
+ readonly currency: "USD";
650
+ }, {
651
+ readonly unit: "per M output tokens";
652
+ readonly price: 0.3;
653
+ readonly currency: "USD";
654
+ }];
655
+ };
618
656
  }, {
619
657
  readonly id: "0f002249-7d86-4698-aabf-8529ed86cefb";
620
658
  readonly source: 2;
@@ -4,6 +4,29 @@ export const workersAiCatalog = {
4
4
  id: 'c329a1f9-323d-4e91-b2aa-582dd4188d34',
5
5
  description: 'Family of generative text models, such as large language models (LLM), that can be adapted for a variety of natural language tasks.',
6
6
  models: [
7
+ {
8
+ id: 'f9f2250b-1048-4a52-9910-d0bf976616a1',
9
+ source: 1,
10
+ name: '@cf/openai/gpt-oss-120b',
11
+ description: 'OpenAI’s open-weight models designed for powerful reasoning, agentic tasks, and versatile developer use cases – gpt-oss-120b is for production, general purpose, high reasoning use-cases.',
12
+ created_at: '2025-08-05 10:27:29.131',
13
+ tags: [],
14
+ properties: {
15
+ context_window: 128000,
16
+ price: [
17
+ {
18
+ unit: 'per M input tokens',
19
+ price: 0.35,
20
+ currency: 'USD',
21
+ },
22
+ {
23
+ unit: 'per M output tokens',
24
+ price: 0.75,
25
+ currency: 'USD',
26
+ },
27
+ ],
28
+ },
29
+ },
7
30
  {
8
31
  id: 'f8703a00-ed54-4f98-bdc3-cd9a813286f3',
9
32
  source: 1,
@@ -699,6 +722,29 @@ export const workersAiCatalog = {
699
722
  context_window: 8192,
700
723
  },
701
724
  },
725
+ {
726
+ id: '188a4e1e-253e-46d0-9616-0bf8c149763f',
727
+ source: 1,
728
+ name: '@cf/openai/gpt-oss-20b',
729
+ description: 'OpenAI’s open-weight models designed for powerful reasoning, agentic tasks, and versatile developer use cases – gpt-oss-20b is for lower latency, and local or specialized use-cases.',
730
+ created_at: '2025-08-05 10:49:53.265',
731
+ tags: [],
732
+ properties: {
733
+ context_window: 128000,
734
+ price: [
735
+ {
736
+ unit: 'per M input tokens',
737
+ price: 0.2,
738
+ currency: 'USD',
739
+ },
740
+ {
741
+ unit: 'per M output tokens',
742
+ price: 0.3,
743
+ currency: 'USD',
744
+ },
745
+ ],
746
+ },
747
+ },
702
748
  {
703
749
  id: '0f002249-7d86-4698-aabf-8529ed86cefb',
704
750
  source: 2,
@@ -3,9 +3,9 @@ import { z } from 'zod/v3';
3
3
  * @link https://zod.dev/?id=json-type
4
4
  */
5
5
  declare const literalSchema: z.ZodUnion<[z.ZodString, z.ZodNumber, z.ZodBoolean, z.ZodNull]>;
6
- type Json = z.infer<typeof literalSchema> | {
7
- [key: string]: Json;
8
- } | Json[];
9
- export declare const jsonSchema: z.ZodType<Json>;
6
+ export type JSON = z.infer<typeof literalSchema> | {
7
+ [key: string]: JSON;
8
+ } | JSON[];
9
+ export declare const jsonSchema: z.ZodType<JSON>;
10
10
  export declare const ZodCoordinate: import("zod/v4").ZodString;
11
11
  export {};
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@chainfuse/types",
3
- "version": "2.10.27",
3
+ "version": "2.10.29",
4
4
  "description": "",
5
5
  "author": "ChainFuse",
6
6
  "homepage": "https://github.com/ChainFuse/packages/tree/main/packages/types#readme",
@@ -99,11 +99,11 @@
99
99
  "prettier": "@demosjarco/prettier-config",
100
100
  "dependencies": {
101
101
  "validator": "^13.15.15",
102
- "zod": "^4.0.5"
102
+ "zod": "^4.0.15"
103
103
  },
104
104
  "devDependencies": {
105
- "@cloudflare/workers-types": "^4.20250731.0",
105
+ "@cloudflare/workers-types": "^4.20250807.0",
106
106
  "@types/validator": "^13.15.2"
107
107
  },
108
- "gitHead": "b19632b3f562dcc4f6bb3d5805eac83486d5357d"
108
+ "gitHead": "541693735b92bd92dc0d6c77235bcebe4ac62741"
109
109
  }