@chainfuse/ai-tools 0.5.1 → 0.6.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/providers/customProviders.d.mts +2 -2
- package/dist/providers/customProviders.mjs +40 -12
- package/dist/providers/rawProviders.d.mts +2 -0
- package/dist/providers/rawProviders.mjs +27 -0
- package/dist/providers/types.d.mts +17 -1
- package/dist/registry.d.mts +1 -1
- package/dist/types.d.mts +1 -4
- package/package.json +12 -12
|
@@ -2,13 +2,13 @@ import type { GoogleGenerativeAIProvider } from '@ai-sdk/google';
|
|
|
2
2
|
import type { OpenAICompatibleProvider } from '@ai-sdk/openai-compatible';
|
|
3
3
|
import { AiBase } from '../base.mjs';
|
|
4
4
|
import type { AiRequestConfig } from '../types.mjs';
|
|
5
|
-
import type { AzureOpenAIProvider } from './types.mjs';
|
|
5
|
+
import type { AzureOpenAIProvider, WorkersAIProvider } from './types.mjs';
|
|
6
6
|
export declare class AiCustomProviders extends AiBase {
|
|
7
7
|
oaiOpenai(args: AiRequestConfig): Promise<import("@ai-sdk/openai").OpenAIProvider>;
|
|
8
8
|
azOpenai(args: AiRequestConfig, [server, ...servers]?: import("../serverSelector/types.mts").Server[]): Promise<AzureOpenAIProvider>;
|
|
9
9
|
anthropic(args: AiRequestConfig): Promise<import("@ai-sdk/anthropic").AnthropicProvider>;
|
|
10
10
|
private static workersAiIsRest;
|
|
11
|
-
cfWorkersAi(args: AiRequestConfig): Promise<OpenAICompatibleProvider<"@cf/qwen/qwen1.5-0.5b-chat" | "@cf/google/gemma-2b-it-lora" | "@hf/nexusflow/starling-lm-7b-beta" | "@cf/meta/llama-3-8b-instruct" | "@cf/meta/llama-3.2-3b-instruct" | "@hf/thebloke/llamaguard-7b-awq" | "@hf/thebloke/neural-chat-7b-v3-1-awq" | "@cf/meta/llama-2-7b-chat-fp16" | "@cf/mistral/mistral-7b-instruct-v0.1" | "@cf/mistral/mistral-7b-instruct-v0.2-lora" | "@cf/tinyllama/tinyllama-1.1b-chat-v1.0" | "@hf/mistral/mistral-7b-instruct-v0.2" | "@cf/fblgit/una-cybertron-7b-v2-bf16" | "@cf/deepseek-ai/deepseek-r1-distill-qwen-32b" | "@cf/thebloke/discolm-german-7b-v1-awq" | "@cf/meta/llama-2-7b-chat-int8" | "@cf/meta/llama-3.1-8b-instruct-fp8" | "@hf/thebloke/mistral-7b-instruct-v0.1-awq" | "@cf/qwen/qwen1.5-7b-chat-awq" | "@cf/meta/llama-3.2-1b-instruct" | "@hf/thebloke/llama-2-13b-chat-awq" | "@hf/thebloke/deepseek-coder-6.7b-base-awq" | "@cf/meta-llama/llama-2-7b-chat-hf-lora" | "@cf/meta/llama-3.3-70b-instruct-fp8-fast" | "@hf/thebloke/openhermes-2.5-mistral-7b-awq" | "@hf/thebloke/deepseek-coder-6.7b-instruct-awq" | "@cf/deepseek-ai/deepseek-math-7b-instruct" | "@cf/tiiuae/falcon-7b-instruct" | "@hf/nousresearch/hermes-2-pro-mistral-7b" | "@cf/meta/llama-3.1-8b-instruct" | "@cf/meta/llama-3.1-8b-instruct-awq" | "@hf/thebloke/zephyr-7b-beta-awq" | "@cf/google/gemma-7b-it-lora" | "@cf/qwen/qwen1.5-1.8b-chat" | "@cf/meta/llama-3-8b-instruct-awq" | "@cf/meta/llama-3.2-11b-vision-instruct" | "@cf/defog/sqlcoder-7b-2" | "@cf/microsoft/phi-2" | "@hf/meta-llama/meta-llama-3-8b-instruct" | "@hf/google/gemma-7b-it" | "@cf/qwen/qwen1.5-14b-chat-awq" | "@cf/openchat/openchat-3.5-0106", "@cf/qwen/qwen1.5-0.5b-chat" | "@cf/google/gemma-2b-it-lora" | "@hf/nexusflow/starling-lm-7b-beta" | "@cf/meta/llama-3-8b-instruct" | "@cf/meta/llama-3.2-3b-instruct" | "@hf/thebloke/llamaguard-7b-awq" | "@hf/thebloke/neural-chat-7b-v3-1-awq" | "@cf/meta/llama-2-7b-chat-fp16" | "@cf/mistral/mistral-7b-instruct-v0.1" | "@cf/mistral/mistral-7b-instruct-v0.2-lora" | "@cf/tinyllama/tinyllama-1.1b-chat-v1.0" | "@hf/mistral/mistral-7b-instruct-v0.2" | "@cf/fblgit/una-cybertron-7b-v2-bf16" | "@cf/deepseek-ai/deepseek-r1-distill-qwen-32b" | "@cf/thebloke/discolm-german-7b-v1-awq" | "@cf/meta/llama-2-7b-chat-int8" | "@cf/meta/llama-3.1-8b-instruct-fp8" | "@hf/thebloke/mistral-7b-instruct-v0.1-awq" | "@cf/qwen/qwen1.5-7b-chat-awq" | "@cf/meta/llama-3.2-1b-instruct" | "@hf/thebloke/llama-2-13b-chat-awq" | "@hf/thebloke/deepseek-coder-6.7b-base-awq" | "@cf/meta-llama/llama-2-7b-chat-hf-lora" | "@cf/meta/llama-3.3-70b-instruct-fp8-fast" | "@hf/thebloke/openhermes-2.5-mistral-7b-awq" | "@hf/thebloke/deepseek-coder-6.7b-instruct-awq" | "@cf/deepseek-ai/deepseek-math-7b-instruct" | "@cf/tiiuae/falcon-7b-instruct" | "@hf/nousresearch/hermes-2-pro-mistral-7b" | "@cf/meta/llama-3.1-8b-instruct" | "@cf/meta/llama-3.1-8b-instruct-awq" | "@hf/thebloke/zephyr-7b-beta-awq" | "@cf/google/gemma-7b-it-lora" | "@cf/qwen/qwen1.5-1.8b-chat" | "@cf/meta/llama-3-8b-instruct-awq" | "@cf/meta/llama-3.2-11b-vision-instruct" | "@cf/defog/sqlcoder-7b-2" | "@cf/microsoft/phi-2" | "@hf/meta-llama/meta-llama-3-8b-instruct" | "@hf/google/gemma-7b-it" | "@cf/qwen/qwen1.5-14b-chat-awq" | "@cf/openchat/openchat-3.5-0106", "@cf/baai/bge-small-en-v1.5" | "@cf/baai/bge-base-en-v1.5" | "@cf/baai/bge-large-en-v1.5"
|
|
11
|
+
cfWorkersAi(args: AiRequestConfig): Promise<OpenAICompatibleProvider<"@cf/qwen/qwen1.5-0.5b-chat" | "@cf/google/gemma-2b-it-lora" | "@hf/nexusflow/starling-lm-7b-beta" | "@cf/meta/llama-3-8b-instruct" | "@cf/meta/llama-3.2-3b-instruct" | "@hf/thebloke/llamaguard-7b-awq" | "@hf/thebloke/neural-chat-7b-v3-1-awq" | "@cf/meta/llama-guard-3-8b" | "@cf/meta/llama-2-7b-chat-fp16" | "@cf/mistral/mistral-7b-instruct-v0.1" | "@cf/mistral/mistral-7b-instruct-v0.2-lora" | "@cf/tinyllama/tinyllama-1.1b-chat-v1.0" | "@hf/mistral/mistral-7b-instruct-v0.2" | "@cf/fblgit/una-cybertron-7b-v2-bf16" | "@cf/deepseek-ai/deepseek-r1-distill-qwen-32b" | "@cf/thebloke/discolm-german-7b-v1-awq" | "@cf/meta/llama-2-7b-chat-int8" | "@cf/meta/llama-3.1-8b-instruct-fp8" | "@hf/thebloke/mistral-7b-instruct-v0.1-awq" | "@cf/qwen/qwen1.5-7b-chat-awq" | "@cf/meta/llama-3.2-1b-instruct" | "@hf/thebloke/llama-2-13b-chat-awq" | "@hf/thebloke/deepseek-coder-6.7b-base-awq" | "@cf/meta-llama/llama-2-7b-chat-hf-lora" | "@cf/meta/llama-3.3-70b-instruct-fp8-fast" | "@hf/thebloke/openhermes-2.5-mistral-7b-awq" | "@hf/thebloke/deepseek-coder-6.7b-instruct-awq" | "@cf/deepseek-ai/deepseek-math-7b-instruct" | "@cf/tiiuae/falcon-7b-instruct" | "@hf/nousresearch/hermes-2-pro-mistral-7b" | "@cf/meta/llama-3.1-8b-instruct" | "@cf/meta/llama-3.1-8b-instruct-awq" | "@hf/thebloke/zephyr-7b-beta-awq" | "@cf/google/gemma-7b-it-lora" | "@cf/qwen/qwen1.5-1.8b-chat" | "@cf/meta/llama-3-8b-instruct-awq" | "@cf/meta/llama-3.2-11b-vision-instruct" | "@cf/defog/sqlcoder-7b-2" | "@cf/microsoft/phi-2" | "@hf/meta-llama/meta-llama-3-8b-instruct" | "@hf/google/gemma-7b-it" | "@cf/qwen/qwen1.5-14b-chat-awq" | "@cf/openchat/openchat-3.5-0106", "@cf/qwen/qwen1.5-0.5b-chat" | "@cf/google/gemma-2b-it-lora" | "@hf/nexusflow/starling-lm-7b-beta" | "@cf/meta/llama-3-8b-instruct" | "@cf/meta/llama-3.2-3b-instruct" | "@hf/thebloke/llamaguard-7b-awq" | "@hf/thebloke/neural-chat-7b-v3-1-awq" | "@cf/meta/llama-guard-3-8b" | "@cf/meta/llama-2-7b-chat-fp16" | "@cf/mistral/mistral-7b-instruct-v0.1" | "@cf/mistral/mistral-7b-instruct-v0.2-lora" | "@cf/tinyllama/tinyllama-1.1b-chat-v1.0" | "@hf/mistral/mistral-7b-instruct-v0.2" | "@cf/fblgit/una-cybertron-7b-v2-bf16" | "@cf/deepseek-ai/deepseek-r1-distill-qwen-32b" | "@cf/thebloke/discolm-german-7b-v1-awq" | "@cf/meta/llama-2-7b-chat-int8" | "@cf/meta/llama-3.1-8b-instruct-fp8" | "@hf/thebloke/mistral-7b-instruct-v0.1-awq" | "@cf/qwen/qwen1.5-7b-chat-awq" | "@cf/meta/llama-3.2-1b-instruct" | "@hf/thebloke/llama-2-13b-chat-awq" | "@hf/thebloke/deepseek-coder-6.7b-base-awq" | "@cf/meta-llama/llama-2-7b-chat-hf-lora" | "@cf/meta/llama-3.3-70b-instruct-fp8-fast" | "@hf/thebloke/openhermes-2.5-mistral-7b-awq" | "@hf/thebloke/deepseek-coder-6.7b-instruct-awq" | "@cf/deepseek-ai/deepseek-math-7b-instruct" | "@cf/tiiuae/falcon-7b-instruct" | "@hf/nousresearch/hermes-2-pro-mistral-7b" | "@cf/meta/llama-3.1-8b-instruct" | "@cf/meta/llama-3.1-8b-instruct-awq" | "@hf/thebloke/zephyr-7b-beta-awq" | "@cf/google/gemma-7b-it-lora" | "@cf/qwen/qwen1.5-1.8b-chat" | "@cf/meta/llama-3-8b-instruct-awq" | "@cf/meta/llama-3.2-11b-vision-instruct" | "@cf/defog/sqlcoder-7b-2" | "@cf/microsoft/phi-2" | "@hf/meta-llama/meta-llama-3-8b-instruct" | "@hf/google/gemma-7b-it" | "@cf/qwen/qwen1.5-14b-chat-awq" | "@cf/openchat/openchat-3.5-0106", "@cf/baai/bge-small-en-v1.5" | "@cf/baai/bge-base-en-v1.5" | "@cf/baai/bge-large-en-v1.5"> | WorkersAIProvider>;
|
|
12
12
|
custom(args: AiRequestConfig): Promise<OpenAICompatibleProvider<string, string, string>>;
|
|
13
13
|
googleAi(args: AiRequestConfig): Promise<GoogleGenerativeAIProvider>;
|
|
14
14
|
}
|
|
@@ -178,19 +178,47 @@ export class AiCustomProviders extends AiBase {
|
|
|
178
178
|
});
|
|
179
179
|
}
|
|
180
180
|
else {
|
|
181
|
-
|
|
182
|
-
/*return customProvider({
|
|
181
|
+
return customProvider({
|
|
183
182
|
// @ts-expect-error override for types
|
|
184
|
-
languageModels: await enabledCloudflareLlmProviders.reduce(
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
183
|
+
languageModels: await enabledCloudflareLlmProviders.reduce(async (accPromise, model) => {
|
|
184
|
+
const acc = await accPromise;
|
|
185
|
+
/**
|
|
186
|
+
* Intercept and add in missing index property to be OpenAI compatible
|
|
187
|
+
*/
|
|
188
|
+
// @ts-expect-error override for types
|
|
189
|
+
acc[model] = wrapLanguageModel({
|
|
190
|
+
model: (await raw.bindingWorkersAi(args))(model),
|
|
191
|
+
middleware: [
|
|
192
|
+
// Fix output generation where it's correct, but encapsulated in a code fence
|
|
193
|
+
{
|
|
194
|
+
wrapGenerate: async ({ doGenerate, model }) => {
|
|
195
|
+
const result = await doGenerate();
|
|
196
|
+
/**
|
|
197
|
+
* `chunkSchema` is undocumented but always present in `model` regardless of model
|
|
198
|
+
* Can't use `responseFormat` (in `params`) because it isn't always present because some models don't support that part of openai api spec.
|
|
199
|
+
*/
|
|
200
|
+
if ('chunkSchema' in model) {
|
|
201
|
+
const codeFenceStart = new RegExp(/^`{1,3}\w*\s*(?=[\[{])/i);
|
|
202
|
+
const codefenceEnd = new RegExp(/(?![\]}])\s*`{1,3}$/i);
|
|
203
|
+
return {
|
|
204
|
+
...result,
|
|
205
|
+
/**
|
|
206
|
+
* 1. trim initially to remove any leading/trailing whitespace
|
|
207
|
+
* 2. Remove start and end
|
|
208
|
+
* 3. Trim again to remove any leading/trailing whitespace
|
|
209
|
+
*/
|
|
210
|
+
text: result.text?.trim().replace(codeFenceStart, '').replace(codefenceEnd, '').trim(),
|
|
211
|
+
};
|
|
212
|
+
}
|
|
213
|
+
return result;
|
|
214
|
+
},
|
|
215
|
+
},
|
|
216
|
+
],
|
|
217
|
+
});
|
|
218
|
+
return acc;
|
|
219
|
+
}, Promise.resolve({})),
|
|
220
|
+
fallbackProvider: await new AiRawProviders(this.config).bindingWorkersAi(args),
|
|
221
|
+
});
|
|
194
222
|
}
|
|
195
223
|
}
|
|
196
224
|
custom(args) {
|
|
@@ -3,6 +3,7 @@ import type { cloudflareModelPossibilities } from '@chainfuse/types';
|
|
|
3
3
|
import { AiBase } from '../base.mjs';
|
|
4
4
|
import type { Server } from '../serverSelector/types.mjs';
|
|
5
5
|
import type { AiRequestConfig } from '../types.mjs';
|
|
6
|
+
import type { WorkersAIProvider } from './types.mts';
|
|
6
7
|
export declare class AiRawProviders extends AiBase {
|
|
7
8
|
private readonly cacheTtl;
|
|
8
9
|
private updateGatewayLog;
|
|
@@ -12,4 +13,5 @@ export declare class AiRawProviders extends AiBase {
|
|
|
12
13
|
custom(args: AiRequestConfig): Promise<OpenAICompatibleProvider<string, string, string>>;
|
|
13
14
|
googleAi(args: AiRequestConfig): Promise<import("@ai-sdk/google").GoogleGenerativeAIProvider>;
|
|
14
15
|
restWorkersAi(args: AiRequestConfig): Promise<OpenAICompatibleProvider<cloudflareModelPossibilities<'Text Generation'>, cloudflareModelPossibilities<'Text Generation'>, cloudflareModelPossibilities<'Text Embeddings'>>>;
|
|
16
|
+
bindingWorkersAi(args: AiRequestConfig): Promise<WorkersAIProvider>;
|
|
15
17
|
}
|
|
@@ -423,4 +423,31 @@ export class AiRawProviders extends AiBase {
|
|
|
423
423
|
},
|
|
424
424
|
}));
|
|
425
425
|
}
|
|
426
|
+
async bindingWorkersAi(args) {
|
|
427
|
+
return import('workers-ai-provider').then(async ({ createWorkersAI }) => createWorkersAI({
|
|
428
|
+
binding: this.config.providers.workersAi,
|
|
429
|
+
gateway: {
|
|
430
|
+
id: this.config.environment,
|
|
431
|
+
...(args.cache && { cacheTtl: typeof args.cache === 'boolean' ? (args.cache ? this.cacheTtl : 0) : args.cache }),
|
|
432
|
+
...(args.skipCache && { skipCache: true }),
|
|
433
|
+
metadata: {
|
|
434
|
+
dbInfo: JSON.stringify({
|
|
435
|
+
messageId: (await BufferHelpers.uuidConvert(args.messageId)).utf8,
|
|
436
|
+
dataspaceId: (await BufferHelpers.uuidConvert(args.dataspaceId)).utf8,
|
|
437
|
+
}),
|
|
438
|
+
executor: JSON.stringify(args.executor),
|
|
439
|
+
// Generate incomplete id because we don't have the body to hash yet. Fill it in in the `fetch()`
|
|
440
|
+
idempotencyId: args.idempotencyId ?? (await BufferHelpers.generateUuid).utf8.slice(0, 23),
|
|
441
|
+
serverInfo: JSON.stringify({
|
|
442
|
+
name: 'cloudflare',
|
|
443
|
+
}),
|
|
444
|
+
/**
|
|
445
|
+
* Blank at first, add after request finishes
|
|
446
|
+
* CF AI Gateway allows only editing existing metadata not creating new ones after the request is made
|
|
447
|
+
*/
|
|
448
|
+
timing: JSON.stringify({}),
|
|
449
|
+
},
|
|
450
|
+
},
|
|
451
|
+
}));
|
|
452
|
+
}
|
|
426
453
|
}
|
|
@@ -1,7 +1,8 @@
|
|
|
1
1
|
import type { OpenAIChatSettings, OpenAIEmbeddingSettings } from '@ai-sdk/openai/internal';
|
|
2
2
|
import type { EmbeddingModelV1, LanguageModelV1 } from '@ai-sdk/provider';
|
|
3
|
-
import type { AzureChatModels, AzureEmbeddingModels } from '@chainfuse/types';
|
|
3
|
+
import type { AzureChatModels, AzureEmbeddingModels, cloudflareModelPossibilities } from '@chainfuse/types';
|
|
4
4
|
import type { Provider } from 'ai';
|
|
5
|
+
import type { createWorkersAI, WorkersAI } from 'workers-ai-provider';
|
|
5
6
|
export interface AzureOpenAIProvider extends Provider {
|
|
6
7
|
(deploymentId: AzureChatModels, settings?: OpenAIChatSettings): LanguageModelV1;
|
|
7
8
|
/**
|
|
@@ -13,3 +14,18 @@ export interface AzureOpenAIProvider extends Provider {
|
|
|
13
14
|
*/
|
|
14
15
|
textEmbeddingModel(deploymentId: AzureEmbeddingModels, settings?: OpenAIEmbeddingSettings): EmbeddingModelV1<string>;
|
|
15
16
|
}
|
|
17
|
+
export interface WorkersAIProvider extends WorkersAI {
|
|
18
|
+
(modelId: cloudflareModelPossibilities<'Text Generation'>, settings?: Parameters<typeof createWorkersAI>[0]): ReturnType<ReturnType<typeof createWorkersAI>>;
|
|
19
|
+
/**
|
|
20
|
+
* Creates a model for text generation.
|
|
21
|
+
**/
|
|
22
|
+
chat(modelId: cloudflareModelPossibilities<'Text Generation'>, settings?: Parameters<typeof createWorkersAI>[0]): ReturnType<ReturnType<typeof createWorkersAI>['chat']>;
|
|
23
|
+
/**
|
|
24
|
+
* Creates an Azure OpenAI chat model for text generation.
|
|
25
|
+
*/
|
|
26
|
+
languageModel(modelId: cloudflareModelPossibilities<'Text Generation'>, settings?: Parameters<typeof createWorkersAI>[0]): ReturnType<ReturnType<typeof createWorkersAI>>;
|
|
27
|
+
/**
|
|
28
|
+
* Creates an Azure OpenAI model for text embeddings.
|
|
29
|
+
*/
|
|
30
|
+
textEmbeddingModel(modelId: cloudflareModelPossibilities<'Text Embeddings'>, settings?: Parameters<typeof createWorkersAI>[0]): EmbeddingModelV1<string>;
|
|
31
|
+
}
|
package/dist/registry.d.mts
CHANGED
|
@@ -7,7 +7,7 @@ export declare class AiRegistry extends AiBase {
|
|
|
7
7
|
anthropic: import("@ai-sdk/anthropic").AnthropicProvider;
|
|
8
8
|
custom: import("@ai-sdk/openai-compatible").OpenAICompatibleProvider<string, string, string>;
|
|
9
9
|
'google.generative-ai': import("@ai-sdk/google").GoogleGenerativeAIProvider;
|
|
10
|
-
workersai: import("@ai-sdk/openai-compatible").OpenAICompatibleProvider<"@cf/qwen/qwen1.5-0.5b-chat" | "@cf/google/gemma-2b-it-lora" | "@hf/nexusflow/starling-lm-7b-beta" | "@cf/meta/llama-3-8b-instruct" | "@cf/meta/llama-3.2-3b-instruct" | "@hf/thebloke/llamaguard-7b-awq" | "@hf/thebloke/neural-chat-7b-v3-1-awq" | "@cf/meta/llama-2-7b-chat-fp16" | "@cf/mistral/mistral-7b-instruct-v0.1" | "@cf/mistral/mistral-7b-instruct-v0.2-lora" | "@cf/tinyllama/tinyllama-1.1b-chat-v1.0" | "@hf/mistral/mistral-7b-instruct-v0.2" | "@cf/fblgit/una-cybertron-7b-v2-bf16" | "@cf/deepseek-ai/deepseek-r1-distill-qwen-32b" | "@cf/thebloke/discolm-german-7b-v1-awq" | "@cf/meta/llama-2-7b-chat-int8" | "@cf/meta/llama-3.1-8b-instruct-fp8" | "@hf/thebloke/mistral-7b-instruct-v0.1-awq" | "@cf/qwen/qwen1.5-7b-chat-awq" | "@cf/meta/llama-3.2-1b-instruct" | "@hf/thebloke/llama-2-13b-chat-awq" | "@hf/thebloke/deepseek-coder-6.7b-base-awq" | "@cf/meta-llama/llama-2-7b-chat-hf-lora" | "@cf/meta/llama-3.3-70b-instruct-fp8-fast" | "@hf/thebloke/openhermes-2.5-mistral-7b-awq" | "@hf/thebloke/deepseek-coder-6.7b-instruct-awq" | "@cf/deepseek-ai/deepseek-math-7b-instruct" | "@cf/tiiuae/falcon-7b-instruct" | "@hf/nousresearch/hermes-2-pro-mistral-7b" | "@cf/meta/llama-3.1-8b-instruct" | "@cf/meta/llama-3.1-8b-instruct-awq" | "@hf/thebloke/zephyr-7b-beta-awq" | "@cf/google/gemma-7b-it-lora" | "@cf/qwen/qwen1.5-1.8b-chat" | "@cf/meta/llama-3-8b-instruct-awq" | "@cf/meta/llama-3.2-11b-vision-instruct" | "@cf/defog/sqlcoder-7b-2" | "@cf/microsoft/phi-2" | "@hf/meta-llama/meta-llama-3-8b-instruct" | "@hf/google/gemma-7b-it" | "@cf/qwen/qwen1.5-14b-chat-awq" | "@cf/openchat/openchat-3.5-0106", "@cf/qwen/qwen1.5-0.5b-chat" | "@cf/google/gemma-2b-it-lora" | "@hf/nexusflow/starling-lm-7b-beta" | "@cf/meta/llama-3-8b-instruct" | "@cf/meta/llama-3.2-3b-instruct" | "@hf/thebloke/llamaguard-7b-awq" | "@hf/thebloke/neural-chat-7b-v3-1-awq" | "@cf/meta/llama-2-7b-chat-fp16" | "@cf/mistral/mistral-7b-instruct-v0.1" | "@cf/mistral/mistral-7b-instruct-v0.2-lora" | "@cf/tinyllama/tinyllama-1.1b-chat-v1.0" | "@hf/mistral/mistral-7b-instruct-v0.2" | "@cf/fblgit/una-cybertron-7b-v2-bf16" | "@cf/deepseek-ai/deepseek-r1-distill-qwen-32b" | "@cf/thebloke/discolm-german-7b-v1-awq" | "@cf/meta/llama-2-7b-chat-int8" | "@cf/meta/llama-3.1-8b-instruct-fp8" | "@hf/thebloke/mistral-7b-instruct-v0.1-awq" | "@cf/qwen/qwen1.5-7b-chat-awq" | "@cf/meta/llama-3.2-1b-instruct" | "@hf/thebloke/llama-2-13b-chat-awq" | "@hf/thebloke/deepseek-coder-6.7b-base-awq" | "@cf/meta-llama/llama-2-7b-chat-hf-lora" | "@cf/meta/llama-3.3-70b-instruct-fp8-fast" | "@hf/thebloke/openhermes-2.5-mistral-7b-awq" | "@hf/thebloke/deepseek-coder-6.7b-instruct-awq" | "@cf/deepseek-ai/deepseek-math-7b-instruct" | "@cf/tiiuae/falcon-7b-instruct" | "@hf/nousresearch/hermes-2-pro-mistral-7b" | "@cf/meta/llama-3.1-8b-instruct" | "@cf/meta/llama-3.1-8b-instruct-awq" | "@hf/thebloke/zephyr-7b-beta-awq" | "@cf/google/gemma-7b-it-lora" | "@cf/qwen/qwen1.5-1.8b-chat" | "@cf/meta/llama-3-8b-instruct-awq" | "@cf/meta/llama-3.2-11b-vision-instruct" | "@cf/defog/sqlcoder-7b-2" | "@cf/microsoft/phi-2" | "@hf/meta-llama/meta-llama-3-8b-instruct" | "@hf/google/gemma-7b-it" | "@cf/qwen/qwen1.5-14b-chat-awq" | "@cf/openchat/openchat-3.5-0106", "@cf/baai/bge-small-en-v1.5" | "@cf/baai/bge-base-en-v1.5" | "@cf/baai/bge-large-en-v1.5"
|
|
10
|
+
workersai: import("@ai-sdk/openai-compatible").OpenAICompatibleProvider<"@cf/qwen/qwen1.5-0.5b-chat" | "@cf/google/gemma-2b-it-lora" | "@hf/nexusflow/starling-lm-7b-beta" | "@cf/meta/llama-3-8b-instruct" | "@cf/meta/llama-3.2-3b-instruct" | "@hf/thebloke/llamaguard-7b-awq" | "@hf/thebloke/neural-chat-7b-v3-1-awq" | "@cf/meta/llama-guard-3-8b" | "@cf/meta/llama-2-7b-chat-fp16" | "@cf/mistral/mistral-7b-instruct-v0.1" | "@cf/mistral/mistral-7b-instruct-v0.2-lora" | "@cf/tinyllama/tinyllama-1.1b-chat-v1.0" | "@hf/mistral/mistral-7b-instruct-v0.2" | "@cf/fblgit/una-cybertron-7b-v2-bf16" | "@cf/deepseek-ai/deepseek-r1-distill-qwen-32b" | "@cf/thebloke/discolm-german-7b-v1-awq" | "@cf/meta/llama-2-7b-chat-int8" | "@cf/meta/llama-3.1-8b-instruct-fp8" | "@hf/thebloke/mistral-7b-instruct-v0.1-awq" | "@cf/qwen/qwen1.5-7b-chat-awq" | "@cf/meta/llama-3.2-1b-instruct" | "@hf/thebloke/llama-2-13b-chat-awq" | "@hf/thebloke/deepseek-coder-6.7b-base-awq" | "@cf/meta-llama/llama-2-7b-chat-hf-lora" | "@cf/meta/llama-3.3-70b-instruct-fp8-fast" | "@hf/thebloke/openhermes-2.5-mistral-7b-awq" | "@hf/thebloke/deepseek-coder-6.7b-instruct-awq" | "@cf/deepseek-ai/deepseek-math-7b-instruct" | "@cf/tiiuae/falcon-7b-instruct" | "@hf/nousresearch/hermes-2-pro-mistral-7b" | "@cf/meta/llama-3.1-8b-instruct" | "@cf/meta/llama-3.1-8b-instruct-awq" | "@hf/thebloke/zephyr-7b-beta-awq" | "@cf/google/gemma-7b-it-lora" | "@cf/qwen/qwen1.5-1.8b-chat" | "@cf/meta/llama-3-8b-instruct-awq" | "@cf/meta/llama-3.2-11b-vision-instruct" | "@cf/defog/sqlcoder-7b-2" | "@cf/microsoft/phi-2" | "@hf/meta-llama/meta-llama-3-8b-instruct" | "@hf/google/gemma-7b-it" | "@cf/qwen/qwen1.5-14b-chat-awq" | "@cf/openchat/openchat-3.5-0106", "@cf/qwen/qwen1.5-0.5b-chat" | "@cf/google/gemma-2b-it-lora" | "@hf/nexusflow/starling-lm-7b-beta" | "@cf/meta/llama-3-8b-instruct" | "@cf/meta/llama-3.2-3b-instruct" | "@hf/thebloke/llamaguard-7b-awq" | "@hf/thebloke/neural-chat-7b-v3-1-awq" | "@cf/meta/llama-guard-3-8b" | "@cf/meta/llama-2-7b-chat-fp16" | "@cf/mistral/mistral-7b-instruct-v0.1" | "@cf/mistral/mistral-7b-instruct-v0.2-lora" | "@cf/tinyllama/tinyllama-1.1b-chat-v1.0" | "@hf/mistral/mistral-7b-instruct-v0.2" | "@cf/fblgit/una-cybertron-7b-v2-bf16" | "@cf/deepseek-ai/deepseek-r1-distill-qwen-32b" | "@cf/thebloke/discolm-german-7b-v1-awq" | "@cf/meta/llama-2-7b-chat-int8" | "@cf/meta/llama-3.1-8b-instruct-fp8" | "@hf/thebloke/mistral-7b-instruct-v0.1-awq" | "@cf/qwen/qwen1.5-7b-chat-awq" | "@cf/meta/llama-3.2-1b-instruct" | "@hf/thebloke/llama-2-13b-chat-awq" | "@hf/thebloke/deepseek-coder-6.7b-base-awq" | "@cf/meta-llama/llama-2-7b-chat-hf-lora" | "@cf/meta/llama-3.3-70b-instruct-fp8-fast" | "@hf/thebloke/openhermes-2.5-mistral-7b-awq" | "@hf/thebloke/deepseek-coder-6.7b-instruct-awq" | "@cf/deepseek-ai/deepseek-math-7b-instruct" | "@cf/tiiuae/falcon-7b-instruct" | "@hf/nousresearch/hermes-2-pro-mistral-7b" | "@cf/meta/llama-3.1-8b-instruct" | "@cf/meta/llama-3.1-8b-instruct-awq" | "@hf/thebloke/zephyr-7b-beta-awq" | "@cf/google/gemma-7b-it-lora" | "@cf/qwen/qwen1.5-1.8b-chat" | "@cf/meta/llama-3-8b-instruct-awq" | "@cf/meta/llama-3.2-11b-vision-instruct" | "@cf/defog/sqlcoder-7b-2" | "@cf/microsoft/phi-2" | "@hf/meta-llama/meta-llama-3-8b-instruct" | "@hf/google/gemma-7b-it" | "@cf/qwen/qwen1.5-14b-chat-awq" | "@cf/openchat/openchat-3.5-0106", "@cf/baai/bge-small-en-v1.5" | "@cf/baai/bge-base-en-v1.5" | "@cf/baai/bge-large-en-v1.5"> | import("./providers/types.mts").WorkersAIProvider;
|
|
11
11
|
}>>;
|
|
12
12
|
registry(args: AiRequestConfig): Promise<import("ai").Provider>;
|
|
13
13
|
}
|
package/dist/types.d.mts
CHANGED
|
@@ -53,10 +53,7 @@ export type AiConfigWorkersai = AiConfigWorkersaiRest | AiConfigWorkersaiBinding
|
|
|
53
53
|
export interface AiConfigWorkersaiRest {
|
|
54
54
|
apiToken: string;
|
|
55
55
|
}
|
|
56
|
-
|
|
57
|
-
* @deprecated Not functional. Use REST instead
|
|
58
|
-
*/
|
|
59
|
-
export type AiConfigWorkersaiBinding = Ai;
|
|
56
|
+
export type AiConfigWorkersaiBinding<T extends Ai = Ai> = T;
|
|
60
57
|
/**
|
|
61
58
|
* It's a UUID, but the last block is SHA256 of the request body
|
|
62
59
|
*/
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@chainfuse/ai-tools",
|
|
3
|
-
"version": "0.
|
|
3
|
+
"version": "0.6.1",
|
|
4
4
|
"description": "",
|
|
5
5
|
"author": "ChainFuse",
|
|
6
6
|
"homepage": "https://github.com/ChainFuse/packages/tree/main/packages/ai-tools#readme",
|
|
@@ -48,21 +48,21 @@
|
|
|
48
48
|
},
|
|
49
49
|
"prettier": "@demosjarco/prettier-config",
|
|
50
50
|
"dependencies": {
|
|
51
|
-
"@ai-sdk/anthropic": "^1.1.
|
|
52
|
-
"@ai-sdk/azure": "^1.1.
|
|
53
|
-
"@ai-sdk/google": "^1.1.
|
|
51
|
+
"@ai-sdk/anthropic": "^1.1.11",
|
|
52
|
+
"@ai-sdk/azure": "^1.1.14",
|
|
53
|
+
"@ai-sdk/google": "^1.1.17",
|
|
54
54
|
"@ai-sdk/openai": "^1.0.5",
|
|
55
|
-
"@ai-sdk/openai-compatible": "^0.1.
|
|
56
|
-
"@chainfuse/helpers": "^1.1.
|
|
57
|
-
"@chainfuse/types": "^1.6.
|
|
58
|
-
"ai": "^4.1.
|
|
55
|
+
"@ai-sdk/openai-compatible": "^0.1.12",
|
|
56
|
+
"@chainfuse/helpers": "^1.1.9",
|
|
57
|
+
"@chainfuse/types": "^1.6.9",
|
|
58
|
+
"ai": "^4.1.46",
|
|
59
59
|
"chalk": "^5.4.1",
|
|
60
60
|
"haversine-distance": "^1.2.3",
|
|
61
|
-
"workers-ai-provider": "^0.
|
|
61
|
+
"workers-ai-provider": "^0.1.1"
|
|
62
62
|
},
|
|
63
63
|
"devDependencies": {
|
|
64
|
-
"@cloudflare/workers-types": "^4.
|
|
65
|
-
"openai": "^4.85.
|
|
64
|
+
"@cloudflare/workers-types": "^4.20250224.0",
|
|
65
|
+
"openai": "^4.85.4"
|
|
66
66
|
},
|
|
67
|
-
"gitHead": "
|
|
67
|
+
"gitHead": "9d842a11f77e62dab485f3965c724827b44f688f"
|
|
68
68
|
}
|