@chainfuse/types 4.2.5 → 4.2.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/ai-tools/index.js
CHANGED
|
@@ -97,20 +97,20 @@ const ZodLanguageModelValuesRaw = Object.entries(AiModels.LanguageModels).reduce
|
|
|
97
97
|
acc.push(value);
|
|
98
98
|
return acc;
|
|
99
99
|
}, []);
|
|
100
|
-
export const ZodLanguageModelValues = zm.enum(Object.values(
|
|
101
|
-
export const ZodLanguageModelValues4 = z4.enum(Object.values(
|
|
100
|
+
export const ZodLanguageModelValues = zm.enum(ZodLanguageModelValuesRaw.map((provider) => Object.values(provider)).flat());
|
|
101
|
+
export const ZodLanguageModelValues4 = z4.enum(ZodLanguageModelValuesRaw.map((provider) => Object.values(provider)).flat());
|
|
102
102
|
const ZodImageModelValuesRaw = Object.entries(AiModels.ImageModels).reduce((acc, [, value]) => {
|
|
103
103
|
acc.push(value);
|
|
104
104
|
return acc;
|
|
105
105
|
}, []);
|
|
106
|
-
export const ZodImageModelValues = zm.enum(Object.values(
|
|
107
|
-
export const ZodImageModelValues4 = z4.enum(Object.values(
|
|
106
|
+
export const ZodImageModelValues = zm.enum(ZodImageModelValuesRaw.map((provider) => Object.values(provider)).flat());
|
|
107
|
+
export const ZodImageModelValues4 = z4.enum(ZodImageModelValuesRaw.map((provider) => Object.values(provider)).flat());
|
|
108
108
|
const ZodTextEmbeddingModelValuesRaw = Object.entries(AiModels.TextEmbeddingModels).reduce((acc, [, value]) => {
|
|
109
109
|
acc.push(value);
|
|
110
110
|
return acc;
|
|
111
111
|
}, []);
|
|
112
|
-
export const ZodTextEmbeddingModelValues = zm.enum(Object.values(
|
|
113
|
-
export const ZodTextEmbeddingModelValues4 = z4.enum(Object.values(
|
|
112
|
+
export const ZodTextEmbeddingModelValues = zm.enum(ZodTextEmbeddingModelValuesRaw.map((provider) => Object.values(provider)).flat());
|
|
113
|
+
export const ZodTextEmbeddingModelValues4 = z4.enum(ZodTextEmbeddingModelValuesRaw.map((provider) => Object.values(provider)).flat());
|
|
114
114
|
export const default_mc_generic = AiModels.LanguageModels.Azure.gpt5_mini;
|
|
115
115
|
export const default_mc_summary = AiModels.LanguageModels.Azure.gpt5_nano;
|
|
116
116
|
export const default_mc_extraction = AiModels.LanguageModels.Azure.gpt5_nano;
|
|
@@ -131,7 +131,7 @@ export declare const workersAiCatalog: {
|
|
|
131
131
|
readonly name: "@hf/thebloke/llamaguard-7b-awq";
|
|
132
132
|
readonly description: "Llama Guard is a model for classifying the safety of LLM prompts and responses, using a taxonomy of safety risks.\n";
|
|
133
133
|
readonly created_at: "2024-02-06 18:13:59.060";
|
|
134
|
-
readonly tags: readonly [];
|
|
134
|
+
readonly tags: readonly ["moderation", "safety", "content-filtering", "guardrails"];
|
|
135
135
|
readonly properties: {
|
|
136
136
|
readonly beta: true;
|
|
137
137
|
readonly context_window: 4096;
|
|
@@ -155,7 +155,7 @@ export declare const workersAiCatalog: {
|
|
|
155
155
|
readonly name: "@cf/meta/llama-guard-3-8b";
|
|
156
156
|
readonly description: "Llama Guard 3 is a Llama-3.1-8B pretrained model, fine-tuned for content safety classification. Similar to previous versions, it can be used to classify content in both LLM inputs (prompt classification) and in LLM responses (response classification). It acts as an LLM – it generates text in its output that indicates whether a given prompt or response is safe or unsafe, and if unsafe, it also lists the content categories violated.";
|
|
157
157
|
readonly created_at: "2025-01-22 23:26:23.495";
|
|
158
|
-
readonly tags: readonly [];
|
|
158
|
+
readonly tags: readonly ["moderation", "safety", "content-filtering", "guardrails"];
|
|
159
159
|
readonly properties: {
|
|
160
160
|
readonly context_window: 131072;
|
|
161
161
|
readonly price: readonly [{
|
|
@@ -151,7 +151,7 @@ export const workersAiCatalog = {
|
|
|
151
151
|
name: '@hf/thebloke/llamaguard-7b-awq',
|
|
152
152
|
description: 'Llama Guard is a model for classifying the safety of LLM prompts and responses, using a taxonomy of safety risks.\n',
|
|
153
153
|
created_at: '2024-02-06 18:13:59.060',
|
|
154
|
-
tags: [],
|
|
154
|
+
tags: ['moderation', 'safety', 'content-filtering', 'guardrails'],
|
|
155
155
|
properties: {
|
|
156
156
|
beta: true,
|
|
157
157
|
context_window: 4096,
|
|
@@ -177,7 +177,7 @@ export const workersAiCatalog = {
|
|
|
177
177
|
name: '@cf/meta/llama-guard-3-8b',
|
|
178
178
|
description: 'Llama Guard 3 is a Llama-3.1-8B pretrained model, fine-tuned for content safety classification. Similar to previous versions, it can be used to classify content in both LLM inputs (prompt classification) and in LLM responses (response classification). It acts as an LLM – it generates text in its output that indicates whether a given prompt or response is safe or unsafe, and if unsafe, it also lists the content categories violated.',
|
|
179
179
|
created_at: '2025-01-22 23:26:23.495',
|
|
180
|
-
tags: [],
|
|
180
|
+
tags: ['moderation', 'safety', 'content-filtering', 'guardrails'],
|
|
181
181
|
properties: {
|
|
182
182
|
context_window: 131072,
|
|
183
183
|
price: [
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@chainfuse/types",
|
|
3
|
-
"version": "4.2.
|
|
3
|
+
"version": "4.2.6",
|
|
4
4
|
"description": "",
|
|
5
5
|
"author": "ChainFuse",
|
|
6
6
|
"homepage": "https://github.com/ChainFuse/packages/tree/main/packages/types#readme",
|
|
@@ -98,8 +98,8 @@
|
|
|
98
98
|
"zod": "^4.3.5"
|
|
99
99
|
},
|
|
100
100
|
"devDependencies": {
|
|
101
|
-
"@cloudflare/workers-types": "^4.
|
|
101
|
+
"@cloudflare/workers-types": "^4.20260113.0",
|
|
102
102
|
"@types/validator": "^13.15.10"
|
|
103
103
|
},
|
|
104
|
-
"gitHead": "
|
|
104
|
+
"gitHead": "de32e37b69010a4cf409fbef5112ec75223f7c4c"
|
|
105
105
|
}
|