@ai-sdk/huggingface 0.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md ADDED
@@ -0,0 +1,7 @@
1
+ # @ai-sdk/huggingface
2
+
3
+ ## 0.0.1
4
+
5
+ ### Patch Changes
6
+
7
+ - 3cbfcb4: initial stable release huggingface
package/LICENSE ADDED
@@ -0,0 +1,13 @@
1
+ Copyright 2023 Vercel, Inc.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
package/README.md ADDED
@@ -0,0 +1,35 @@
1
+ # Vercel AI SDK - Hugging Face Provider
2
+
3
+ The **[Hugging Face Inference Providers](https://huggingface.co/docs/inference-providers/index)** for the [Vercel AI SDK](https://ai-sdk.dev/docs) contains language model support for thousands of models through multiple inference providers via the Hugging Face router API.
4
+
5
+ ## Setup
6
+
7
+ The Hugging Face provider is available in the `@ai-sdk/huggingface` module. You can install it with:
8
+
9
+ ```bash
10
+ npm i @ai-sdk/huggingface
11
+ ```
12
+
13
+ ## Provider Instance
14
+
15
+ You can import the default provider instance `huggingface` from `@ai-sdk/huggingface`:
16
+
17
+ ```ts
18
+ import { huggingface } from '@ai-sdk/huggingface';
19
+ ```
20
+
21
+ ## Example
22
+
23
+ ```ts
24
+ import { huggingface } from '@ai-sdk/huggingface';
25
+ import { generateText } from 'ai';
26
+
27
+ const { text } = await generateText({
28
+ model: huggingface('meta-llama/Llama-3.1-8B-Instruct'),
29
+ prompt: 'Write a vegetarian lasagna recipe.',
30
+ });
31
+ ```
32
+
33
+ ## Documentation
34
+
35
+ Please check out the **[Hugging Face provider documentation](https://ai-sdk.dev/providers/ai-sdk-providers/huggingface)** for more information.
@@ -0,0 +1,55 @@
1
+ import { ProviderV2, LanguageModelV2 } from '@ai-sdk/provider';
2
+ import { FetchFunction } from '@ai-sdk/provider-utils';
3
+ export { OpenAICompatibleErrorData as HuggingFaceErrorData } from '@ai-sdk/openai-compatible';
4
+
5
+ type HuggingFaceResponsesModelId = 'meta-llama/Llama-3.1-8B-Instruct' | 'meta-llama/Llama-3.1-70B-Instruct' | 'meta-llama/Llama-3.1-405B-Instruct' | 'meta-llama/Llama-3.3-70B-Instruct' | 'meta-llama/Meta-Llama-3-8B-Instruct' | 'meta-llama/Meta-Llama-3-70B-Instruct' | 'meta-llama/Llama-3.2-3B-Instruct' | 'meta-llama/Llama-4-Maverick-17B-128E-Instruct' | 'meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8' | 'meta-llama/Llama-Guard-4-12B' | 'deepseek-ai/DeepSeek-V3.1' | 'deepseek-ai/DeepSeek-V3-0324' | 'deepseek-ai/DeepSeek-R1' | 'deepseek-ai/DeepSeek-R1-0528' | 'deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B' | 'deepseek-ai/DeepSeek-R1-Distill-Qwen-7B' | 'deepseek-ai/DeepSeek-R1-Distill-Qwen-14B' | 'deepseek-ai/DeepSeek-R1-Distill-Qwen-32B' | 'deepseek-ai/DeepSeek-R1-Distill-Llama-8B' | 'deepseek-ai/DeepSeek-R1-Distill-Llama-70B' | 'deepseek-ai/DeepSeek-Prover-V2-671B' | 'Qwen/Qwen3-32B' | 'Qwen/Qwen3-14B' | 'Qwen/Qwen3-8B' | 'Qwen/Qwen3-4B' | 'Qwen/Qwen3-Coder-480B-A35B-Instruct' | 'Qwen/Qwen3-Coder-480B-A35B-Instruct-FP8' | 'Qwen/Qwen3-30B-A3B' | 'Qwen/Qwen2.5-VL-7B-Instruct' | 'Qwen/Qwen2.5-7B-Instruct' | 'Qwen/Qwen2.5-Coder-7B-Instruct' | 'Qwen/Qwen2.5-Coder-32B-Instruct' | 'google/gemma-2-9b-it' | 'google/gemma-3-27b-it' | 'moonshotai/Kimi-K2-Instruct' | (string & {});
6
+ interface HuggingFaceResponsesSettings {
7
+ metadata?: Record<string, string>;
8
+ instructions?: string;
9
+ strictJsonSchema?: boolean;
10
+ }
11
+
12
+ interface HuggingFaceProviderSettings {
13
+ /**
14
+ Hugging Face API key.
15
+ */
16
+ apiKey?: string;
17
+ /**
18
+ Base URL for the API calls.
19
+ */
20
+ baseURL?: string;
21
+ /**
22
+ Custom headers to include in the requests.
23
+ */
24
+ headers?: Record<string, string>;
25
+ /**
26
+ Custom fetch implementation. You can use it as a middleware to intercept requests,
27
+ or to provide a custom fetch implementation for e.g. testing.
28
+ */
29
+ fetch?: FetchFunction;
30
+ generateId?: () => string;
31
+ }
32
+ interface HuggingFaceProvider extends ProviderV2 {
33
+ /**
34
+ Creates a Hugging Face responses model for text generation.
35
+ */
36
+ (modelId: HuggingFaceResponsesModelId): LanguageModelV2;
37
+ /**
38
+ Creates a Hugging Face responses model for text generation.
39
+ */
40
+ languageModel(modelId: HuggingFaceResponsesModelId): LanguageModelV2;
41
+ /**
42
+ Creates a Hugging Face responses model for text generation.
43
+ */
44
+ responses(modelId: HuggingFaceResponsesModelId): LanguageModelV2;
45
+ }
46
+ /**
47
+ Create a Hugging Face provider instance.
48
+ */
49
+ declare function createHuggingFace(options?: HuggingFaceProviderSettings): HuggingFaceProvider;
50
+ /**
51
+ Default Hugging Face provider instance.
52
+ */
53
+ declare const huggingface: HuggingFaceProvider;
54
+
55
+ export { type HuggingFaceProvider, type HuggingFaceProviderSettings, type HuggingFaceResponsesModelId, type HuggingFaceResponsesSettings, createHuggingFace, huggingface };
@@ -0,0 +1,55 @@
1
+ import { ProviderV2, LanguageModelV2 } from '@ai-sdk/provider';
2
+ import { FetchFunction } from '@ai-sdk/provider-utils';
3
+ export { OpenAICompatibleErrorData as HuggingFaceErrorData } from '@ai-sdk/openai-compatible';
4
+
5
+ type HuggingFaceResponsesModelId = 'meta-llama/Llama-3.1-8B-Instruct' | 'meta-llama/Llama-3.1-70B-Instruct' | 'meta-llama/Llama-3.1-405B-Instruct' | 'meta-llama/Llama-3.3-70B-Instruct' | 'meta-llama/Meta-Llama-3-8B-Instruct' | 'meta-llama/Meta-Llama-3-70B-Instruct' | 'meta-llama/Llama-3.2-3B-Instruct' | 'meta-llama/Llama-4-Maverick-17B-128E-Instruct' | 'meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8' | 'meta-llama/Llama-Guard-4-12B' | 'deepseek-ai/DeepSeek-V3.1' | 'deepseek-ai/DeepSeek-V3-0324' | 'deepseek-ai/DeepSeek-R1' | 'deepseek-ai/DeepSeek-R1-0528' | 'deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B' | 'deepseek-ai/DeepSeek-R1-Distill-Qwen-7B' | 'deepseek-ai/DeepSeek-R1-Distill-Qwen-14B' | 'deepseek-ai/DeepSeek-R1-Distill-Qwen-32B' | 'deepseek-ai/DeepSeek-R1-Distill-Llama-8B' | 'deepseek-ai/DeepSeek-R1-Distill-Llama-70B' | 'deepseek-ai/DeepSeek-Prover-V2-671B' | 'Qwen/Qwen3-32B' | 'Qwen/Qwen3-14B' | 'Qwen/Qwen3-8B' | 'Qwen/Qwen3-4B' | 'Qwen/Qwen3-Coder-480B-A35B-Instruct' | 'Qwen/Qwen3-Coder-480B-A35B-Instruct-FP8' | 'Qwen/Qwen3-30B-A3B' | 'Qwen/Qwen2.5-VL-7B-Instruct' | 'Qwen/Qwen2.5-7B-Instruct' | 'Qwen/Qwen2.5-Coder-7B-Instruct' | 'Qwen/Qwen2.5-Coder-32B-Instruct' | 'google/gemma-2-9b-it' | 'google/gemma-3-27b-it' | 'moonshotai/Kimi-K2-Instruct' | (string & {});
6
+ interface HuggingFaceResponsesSettings {
7
+ metadata?: Record<string, string>;
8
+ instructions?: string;
9
+ strictJsonSchema?: boolean;
10
+ }
11
+
12
+ interface HuggingFaceProviderSettings {
13
+ /**
14
+ Hugging Face API key.
15
+ */
16
+ apiKey?: string;
17
+ /**
18
+ Base URL for the API calls.
19
+ */
20
+ baseURL?: string;
21
+ /**
22
+ Custom headers to include in the requests.
23
+ */
24
+ headers?: Record<string, string>;
25
+ /**
26
+ Custom fetch implementation. You can use it as a middleware to intercept requests,
27
+ or to provide a custom fetch implementation for e.g. testing.
28
+ */
29
+ fetch?: FetchFunction;
30
+ generateId?: () => string;
31
+ }
32
+ interface HuggingFaceProvider extends ProviderV2 {
33
+ /**
34
+ Creates a Hugging Face responses model for text generation.
35
+ */
36
+ (modelId: HuggingFaceResponsesModelId): LanguageModelV2;
37
+ /**
38
+ Creates a Hugging Face responses model for text generation.
39
+ */
40
+ languageModel(modelId: HuggingFaceResponsesModelId): LanguageModelV2;
41
+ /**
42
+ Creates a Hugging Face responses model for text generation.
43
+ */
44
+ responses(modelId: HuggingFaceResponsesModelId): LanguageModelV2;
45
+ }
46
+ /**
47
+ Create a Hugging Face provider instance.
48
+ */
49
+ declare function createHuggingFace(options?: HuggingFaceProviderSettings): HuggingFaceProvider;
50
+ /**
51
+ Default Hugging Face provider instance.
52
+ */
53
+ declare const huggingface: HuggingFaceProvider;
54
+
55
+ export { type HuggingFaceProvider, type HuggingFaceProviderSettings, type HuggingFaceResponsesModelId, type HuggingFaceResponsesSettings, createHuggingFace, huggingface };