@ai-sdk/huggingface 1.0.16 → 1.0.18

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,19 @@
1
1
  # @ai-sdk/huggingface
2
2
 
3
+ ## 1.0.18
4
+
5
+ ### Patch Changes
6
+
7
+ - 2b8369d: chore: add docs to package dist
8
+
9
+ ## 1.0.17
10
+
11
+ ### Patch Changes
12
+
13
+ - 8dc54db: chore: add src folders to package bundle
14
+ - Updated dependencies [8dc54db]
15
+ - @ai-sdk/openai-compatible@2.0.17
16
+
3
17
  ## 1.0.16
4
18
 
5
19
  ### Patch Changes
@@ -0,0 +1,119 @@
1
+ ---
2
+ title: Hugging Face
3
+ description: Learn how to use Hugging Face Provider.
4
+ ---
5
+
6
+ # Hugging Face Provider
7
+
8
+ The [Hugging Face](https://huggingface.co/) provider offers access to thousands of language models through [Hugging Face Inference Providers](https://huggingface.co/docs/inference-providers/index), including models from Meta, DeepSeek, Qwen, and more.
9
+
10
+ API keys can be obtained from [Hugging Face Settings](https://huggingface.co/settings/tokens).
11
+
12
+ ## Setup
13
+
14
+ The Hugging Face provider is available via the `@ai-sdk/huggingface` module. You can install it with:
15
+
16
+ <Tabs items={['pnpm', 'npm', 'yarn', 'bun']}>
17
+ <Tab>
18
+ <Snippet text="pnpm add @ai-sdk/huggingface" dark />
19
+ </Tab>
20
+ <Tab>
21
+ <Snippet text="npm install @ai-sdk/huggingface" dark />
22
+ </Tab>
23
+ <Tab>
24
+ <Snippet text="yarn add @ai-sdk/huggingface" dark />
25
+ </Tab>
26
+
27
+ <Tab>
28
+ <Snippet text="bun add @ai-sdk/huggingface" dark />
29
+ </Tab>
30
+ </Tabs>
31
+
32
+ ## Provider Instance
33
+
34
+ You can import the default provider instance `huggingface` from `@ai-sdk/huggingface`:
35
+
36
+ ```ts
37
+ import { huggingface } from '@ai-sdk/huggingface';
38
+ ```
39
+
40
+ For custom configuration, you can import `createHuggingFace` and create a provider instance with your settings:
41
+
42
+ ```ts
43
+ import { createHuggingFace } from '@ai-sdk/huggingface';
44
+
45
+ const huggingface = createHuggingFace({
46
+ apiKey: process.env.HUGGINGFACE_API_KEY ?? '',
47
+ });
48
+ ```
49
+
50
+ You can use the following optional settings to customize the Hugging Face provider instance:
51
+
52
+ - **baseURL** _string_
53
+
54
+ Use a different URL prefix for API calls, e.g. to use proxy servers.
55
+ The default prefix is `https://router.huggingface.co/v1`.
56
+
57
+ - **apiKey** _string_
58
+
59
+ API key that is being sent using the `Authorization` header. It defaults to
60
+ the `HUGGINGFACE_API_KEY` environment variable. You can get your API key
61
+ from [Hugging Face Settings](https://huggingface.co/settings/tokens).
62
+
63
+ - **headers** _Record&lt;string,string&gt;_
64
+
65
+ Custom headers to include in the requests.
66
+
67
+ - **fetch** _(input: RequestInfo, init?: RequestInit) => Promise&lt;Response&gt;_
68
+
69
+ Custom [fetch](https://developer.mozilla.org/en-US/docs/Web/API/fetch) implementation.
70
+
71
+ ## Language Models
72
+
73
+ You can create language models using a provider instance:
74
+
75
+ ```ts
76
+ import { huggingface } from '@ai-sdk/huggingface';
77
+ import { generateText } from 'ai';
78
+
79
+ const { text } = await generateText({
80
+ model: huggingface('deepseek-ai/DeepSeek-V3-0324'),
81
+ prompt: 'Write a vegetarian lasagna recipe for 4 people.',
82
+ });
83
+ ```
84
+
85
+ You can also use the `.responses()` or `.languageModel()` factory methods:
86
+
87
+ ```ts
88
+ const model = huggingface.responses('deepseek-ai/DeepSeek-V3-0324');
89
+ // or
90
+ const model = huggingface.languageModel('moonshotai/Kimi-K2-Instruct');
91
+ ```
92
+
93
+ Hugging Face language models can be used in the `streamText` function
94
+ (see [AI SDK Core](/docs/ai-sdk-core)).
95
+
96
+ You can explore the latest and trending models with their capabilities, context size, throughput and pricing on the [Hugging Face Inference Models](https://huggingface.co/inference/models) page.
97
+
98
+ ## Model Capabilities
99
+
100
+ | Model | Image Input | Object Generation | Tool Usage | Tool Streaming |
101
+ | ------------------------------------------- | ------------------- | ------------------- | ------------------- | ------------------- |
102
+ | `meta-llama/Llama-3.1-8B-Instruct` | <Cross size={18} /> | <Check size={18} /> | <Check size={18} /> | <Check size={18} /> |
103
+ | `meta-llama/Llama-3.1-70B-Instruct` | <Cross size={18} /> | <Check size={18} /> | <Check size={18} /> | <Check size={18} /> |
104
+ | `meta-llama/Llama-3.3-70B-Instruct` | <Cross size={18} /> | <Check size={18} /> | <Check size={18} /> | <Check size={18} /> |
105
+ | `meta-llama/Llama-4-Scout-17B-16E-Instruct` | <Cross size={18} /> | <Check size={18} /> | <Check size={18} /> | <Check size={18} /> |
106
+ | `deepseek-ai/DeepSeek-V3-0324` | <Cross size={18} /> | <Check size={18} /> | <Check size={18} /> | <Check size={18} /> |
107
+ | `deepseek-ai/DeepSeek-R1` | <Cross size={18} /> | <Check size={18} /> | <Check size={18} /> | <Check size={18} /> |
108
+ | `deepseek-ai/DeepSeek-R1-Distill-Llama-70B` | <Cross size={18} /> | <Check size={18} /> | <Check size={18} /> | <Check size={18} /> |
109
+ | `Qwen/Qwen3-235B-A22B-Instruct-2507` | <Cross size={18} /> | <Check size={18} /> | <Check size={18} /> | <Check size={18} /> |
110
+ | `Qwen/Qwen3-Coder-480B-A35B-Instruct` | <Cross size={18} /> | <Check size={18} /> | <Check size={18} /> | <Check size={18} /> |
111
+ | `Qwen/Qwen2.5-VL-7B-Instruct` | <Check size={18} /> | <Check size={18} /> | <Check size={18} /> | <Check size={18} /> |
112
+ | `google/gemma-3-27b-it` | <Cross size={18} /> | <Check size={18} /> | <Check size={18} /> | <Check size={18} /> |
113
+ | `moonshotai/Kimi-K2-Instruct` | <Cross size={18} /> | <Check size={18} /> | <Check size={18} /> | <Check size={18} /> |
114
+
115
+ <Note>
116
+ The capabilities depend on the specific model you're using. Check the model
117
+ documentation on Hugging Face Hub for detailed information about each model's
118
+ features.
119
+ </Note>
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@ai-sdk/huggingface",
3
- "version": "1.0.16",
3
+ "version": "1.0.18",
4
4
  "license": "Apache-2.0",
5
5
  "sideEffects": false,
6
6
  "main": "./dist/index.js",
@@ -8,9 +8,14 @@
8
8
  "types": "./dist/index.d.ts",
9
9
  "files": [
10
10
  "dist/**/*",
11
+ "docs/**/*",
12
+ "src",
11
13
  "CHANGELOG.md",
12
14
  "README.md"
13
15
  ],
16
+ "directories": {
17
+ "doc": "./docs"
18
+ },
14
19
  "exports": {
15
20
  "./package.json": "./package.json",
16
21
  ".": {
@@ -20,7 +25,7 @@
20
25
  }
21
26
  },
22
27
  "dependencies": {
23
- "@ai-sdk/openai-compatible": "2.0.16",
28
+ "@ai-sdk/openai-compatible": "2.0.17",
24
29
  "@ai-sdk/provider": "3.0.4",
25
30
  "@ai-sdk/provider-utils": "4.0.8"
26
31
  },
@@ -29,7 +34,7 @@
29
34
  "tsup": "^8",
30
35
  "typescript": "5.8.3",
31
36
  "zod": "3.25.76",
32
- "@ai-sdk/test-server": "1.0.1",
37
+ "@ai-sdk/test-server": "1.0.2",
33
38
  "@vercel/ai-tsconfig": "0.0.0"
34
39
  },
35
40
  "peerDependencies": {
@@ -55,7 +60,7 @@
55
60
  "scripts": {
56
61
  "build": "pnpm clean && tsup --tsconfig tsconfig.build.json",
57
62
  "build:watch": "pnpm clean && tsup --watch",
58
- "clean": "rm -rf dist *.tsbuildinfo",
63
+ "clean": "rm -rf dist docs *.tsbuildinfo",
59
64
  "lint": "eslint \"./**/*.ts*\"",
60
65
  "type-check": "tsc --build",
61
66
  "prettier-check": "prettier --check \"./**/*.ts*\"",
@@ -0,0 +1,9 @@
1
+ import { FetchFunction } from '@ai-sdk/provider-utils';
2
+
3
+ export type HuggingFaceConfig = {
4
+ provider: string;
5
+ url: (options: { modelId: string; path: string }) => string;
6
+ headers: () => Record<string, string | undefined>;
7
+ fetch?: FetchFunction;
8
+ generateId?: () => string;
9
+ };
@@ -0,0 +1,17 @@
1
+ import { createJsonErrorResponseHandler } from '@ai-sdk/provider-utils';
2
+ import { z } from 'zod/v4';
3
+
4
+ const huggingfaceErrorDataSchema = z.object({
5
+ error: z.object({
6
+ message: z.string(),
7
+ type: z.string().optional(),
8
+ code: z.string().optional(),
9
+ }),
10
+ });
11
+
12
+ export type HuggingFaceErrorData = z.infer<typeof huggingfaceErrorDataSchema>;
13
+
14
+ export const huggingfaceFailedResponseHandler = createJsonErrorResponseHandler({
15
+ errorSchema: huggingfaceErrorDataSchema,
16
+ errorToMessage: data => data.error.message,
17
+ });
@@ -0,0 +1,62 @@
1
+ import { describe, it, expect } from 'vitest';
2
+ import { createHuggingFace } from './huggingface-provider';
3
+
4
+ describe('HuggingFaceProvider', () => {
5
+ describe('createHuggingFace', () => {
6
+ it('should create provider with default configuration', () => {
7
+ const provider = createHuggingFace();
8
+
9
+ expect(provider).toMatchInlineSnapshot(`[Function]`);
10
+ expect(typeof provider.responses).toBe('function');
11
+ expect(typeof provider.languageModel).toBe('function');
12
+ });
13
+
14
+ it('should create provider with custom settings', () => {
15
+ const provider = createHuggingFace({
16
+ apiKey: 'custom-key',
17
+ baseURL: 'https://custom.url',
18
+ headers: { 'Custom-Header': 'test' },
19
+ });
20
+
21
+ expect(typeof provider).toBe('function');
22
+ expect(provider).toHaveProperty('responses');
23
+ expect(provider).toHaveProperty('languageModel');
24
+ });
25
+ });
26
+
27
+ describe('model creation methods', () => {
28
+ it('should expose responses method', () => {
29
+ const provider = createHuggingFace();
30
+
31
+ expect(typeof provider.responses).toBe('function');
32
+ });
33
+
34
+ it('should expose languageModel method', () => {
35
+ const provider = createHuggingFace();
36
+
37
+ expect(typeof provider.languageModel).toBe('function');
38
+ });
39
+ });
40
+
41
+ describe('unsupported functionality', () => {
42
+ it('should throw for text embedding models', () => {
43
+ const provider = createHuggingFace();
44
+
45
+ expect(() =>
46
+ provider.embeddingModel('any-model'),
47
+ ).toThrowErrorMatchingInlineSnapshot(
48
+ `[AI_NoSuchModelError: Hugging Face Responses API does not support text embeddings. Use the Hugging Face Inference API directly for embeddings.]`,
49
+ );
50
+ });
51
+
52
+ it('should throw for image models', () => {
53
+ const provider = createHuggingFace();
54
+
55
+ expect(() =>
56
+ provider.imageModel('any-model'),
57
+ ).toThrowErrorMatchingInlineSnapshot(
58
+ `[AI_NoSuchModelError: Hugging Face Responses API does not support image generation. Use the Hugging Face Inference API directly for image models.]`,
59
+ );
60
+ });
61
+ });
62
+ });
@@ -0,0 +1,119 @@
1
+ import {
2
+ LanguageModelV3,
3
+ NoSuchModelError,
4
+ ProviderV3,
5
+ } from '@ai-sdk/provider';
6
+ import {
7
+ FetchFunction,
8
+ generateId,
9
+ loadApiKey,
10
+ withoutTrailingSlash,
11
+ } from '@ai-sdk/provider-utils';
12
+ import { HuggingFaceResponsesLanguageModel } from './responses/huggingface-responses-language-model';
13
+ import { HuggingFaceResponsesModelId } from './responses/huggingface-responses-settings';
14
+
15
+ export interface HuggingFaceProviderSettings {
16
+ /**
17
+ Hugging Face API key.
18
+ */
19
+ apiKey?: string;
20
+ /**
21
+ Base URL for the API calls.
22
+ */
23
+ baseURL?: string;
24
+ /**
25
+ Custom headers to include in the requests.
26
+ */
27
+ headers?: Record<string, string>;
28
+ /**
29
+ Custom fetch implementation. You can use it as a middleware to intercept requests,
30
+ or to provide a custom fetch implementation for e.g. testing.
31
+ */
32
+ fetch?: FetchFunction;
33
+
34
+ generateId?: () => string;
35
+ }
36
+
37
+ export interface HuggingFaceProvider extends ProviderV3 {
38
+ /**
39
+ Creates a Hugging Face responses model for text generation.
40
+ */
41
+ (modelId: HuggingFaceResponsesModelId): LanguageModelV3;
42
+
43
+ /**
44
+ Creates a Hugging Face responses model for text generation.
45
+ */
46
+ languageModel(modelId: HuggingFaceResponsesModelId): LanguageModelV3;
47
+
48
+ /**
49
+ Creates a Hugging Face responses model for text generation.
50
+ */
51
+ responses(modelId: HuggingFaceResponsesModelId): LanguageModelV3;
52
+
53
+ /**
54
+ * @deprecated Use `embeddingModel` instead.
55
+ */
56
+ textEmbeddingModel(modelId: string): never;
57
+ }
58
+
59
+ /**
60
+ Create a Hugging Face provider instance.
61
+ */
62
+ export function createHuggingFace(
63
+ options: HuggingFaceProviderSettings = {},
64
+ ): HuggingFaceProvider {
65
+ const baseURL =
66
+ withoutTrailingSlash(options.baseURL) ?? 'https://router.huggingface.co/v1';
67
+
68
+ const getHeaders = () => ({
69
+ Authorization: `Bearer ${loadApiKey({
70
+ apiKey: options.apiKey,
71
+ environmentVariableName: 'HUGGINGFACE_API_KEY',
72
+ description: 'Hugging Face',
73
+ })}`,
74
+ ...options.headers,
75
+ });
76
+
77
+ const createResponsesModel = (modelId: HuggingFaceResponsesModelId) => {
78
+ return new HuggingFaceResponsesLanguageModel(modelId, {
79
+ provider: 'huggingface.responses',
80
+ url: ({ path }) => `${baseURL}${path}`,
81
+ headers: getHeaders,
82
+ fetch: options.fetch,
83
+ generateId: options.generateId ?? generateId,
84
+ });
85
+ };
86
+
87
+ const provider = (modelId: HuggingFaceResponsesModelId) =>
88
+ createResponsesModel(modelId);
89
+
90
+ provider.specificationVersion = 'v3' as const;
91
+ provider.languageModel = createResponsesModel;
92
+ provider.responses = createResponsesModel;
93
+
94
+ provider.embeddingModel = (modelId: string) => {
95
+ throw new NoSuchModelError({
96
+ modelId,
97
+ modelType: 'embeddingModel',
98
+ message:
99
+ 'Hugging Face Responses API does not support text embeddings. Use the Hugging Face Inference API directly for embeddings.',
100
+ });
101
+ };
102
+ provider.textEmbeddingModel = provider.embeddingModel;
103
+
104
+ provider.imageModel = (modelId: string) => {
105
+ throw new NoSuchModelError({
106
+ modelId,
107
+ modelType: 'imageModel',
108
+ message:
109
+ 'Hugging Face Responses API does not support image generation. Use the Hugging Face Inference API directly for image models.',
110
+ });
111
+ };
112
+
113
+ return provider;
114
+ }
115
+
116
+ /**
117
+ Default Hugging Face provider instance.
118
+ */
119
+ export const huggingface = createHuggingFace();
package/src/index.ts ADDED
@@ -0,0 +1,10 @@
1
+ export { createHuggingFace, huggingface } from './huggingface-provider';
2
+ export type {
3
+ HuggingFaceProvider,
4
+ HuggingFaceProviderSettings,
5
+ } from './huggingface-provider';
6
+ export type {
7
+ HuggingFaceResponsesModelId,
8
+ HuggingFaceResponsesSettings,
9
+ } from './responses/huggingface-responses-settings';
10
+ export type { OpenAICompatibleErrorData as HuggingFaceErrorData } from '@ai-sdk/openai-compatible';
@@ -0,0 +1,54 @@
1
+ import { LanguageModelV3Usage } from '@ai-sdk/provider';
2
+
3
+ export type HuggingFaceResponsesUsage = {
4
+ input_tokens: number;
5
+ input_tokens_details?: {
6
+ cached_tokens?: number;
7
+ };
8
+ output_tokens: number;
9
+ output_tokens_details?: {
10
+ reasoning_tokens?: number;
11
+ };
12
+ total_tokens: number;
13
+ };
14
+
15
+ export function convertHuggingFaceResponsesUsage(
16
+ usage: HuggingFaceResponsesUsage | undefined | null,
17
+ ): LanguageModelV3Usage {
18
+ if (usage == null) {
19
+ return {
20
+ inputTokens: {
21
+ total: undefined,
22
+ noCache: undefined,
23
+ cacheRead: undefined,
24
+ cacheWrite: undefined,
25
+ },
26
+ outputTokens: {
27
+ total: undefined,
28
+ text: undefined,
29
+ reasoning: undefined,
30
+ },
31
+ raw: undefined,
32
+ };
33
+ }
34
+
35
+ const inputTokens = usage.input_tokens;
36
+ const outputTokens = usage.output_tokens;
37
+ const cachedTokens = usage.input_tokens_details?.cached_tokens ?? 0;
38
+ const reasoningTokens = usage.output_tokens_details?.reasoning_tokens ?? 0;
39
+
40
+ return {
41
+ inputTokens: {
42
+ total: inputTokens,
43
+ noCache: inputTokens - cachedTokens,
44
+ cacheRead: cachedTokens,
45
+ cacheWrite: undefined,
46
+ },
47
+ outputTokens: {
48
+ total: outputTokens,
49
+ text: outputTokens - reasoningTokens,
50
+ reasoning: reasoningTokens,
51
+ },
52
+ raw: usage,
53
+ };
54
+ }
@@ -0,0 +1,111 @@
1
+ import {
2
+ SharedV3Warning,
3
+ LanguageModelV3Prompt,
4
+ UnsupportedFunctionalityError,
5
+ } from '@ai-sdk/provider';
6
+
7
+ export async function convertToHuggingFaceResponsesMessages({
8
+ prompt,
9
+ }: {
10
+ prompt: LanguageModelV3Prompt;
11
+ }): Promise<{
12
+ input: string | Array<any>;
13
+ warnings: Array<SharedV3Warning>;
14
+ }> {
15
+ const messages: Array<any> = [];
16
+ const warnings: Array<SharedV3Warning> = [];
17
+
18
+ for (const { role, content } of prompt) {
19
+ switch (role) {
20
+ case 'system': {
21
+ messages.push({ role: 'system', content });
22
+ break;
23
+ }
24
+
25
+ case 'user': {
26
+ messages.push({
27
+ role: 'user',
28
+ content: content.map(part => {
29
+ switch (part.type) {
30
+ case 'text': {
31
+ return { type: 'input_text', text: part.text };
32
+ }
33
+ case 'file': {
34
+ if (part.mediaType.startsWith('image/')) {
35
+ const mediaType =
36
+ part.mediaType === 'image/*'
37
+ ? 'image/jpeg'
38
+ : part.mediaType;
39
+
40
+ return {
41
+ type: 'input_image',
42
+ image_url:
43
+ part.data instanceof URL
44
+ ? part.data.toString()
45
+ : `data:${mediaType};base64,${part.data}`,
46
+ };
47
+ } else {
48
+ throw new UnsupportedFunctionalityError({
49
+ functionality: `file part media type ${part.mediaType}`,
50
+ });
51
+ }
52
+ }
53
+ default: {
54
+ const _exhaustiveCheck: never = part;
55
+ throw new Error(`Unsupported part type: ${_exhaustiveCheck}`);
56
+ }
57
+ }
58
+ }),
59
+ });
60
+
61
+ break;
62
+ }
63
+
64
+ case 'assistant': {
65
+ for (const part of content) {
66
+ switch (part.type) {
67
+ case 'text': {
68
+ messages.push({
69
+ role: 'assistant',
70
+ content: [{ type: 'output_text', text: part.text }],
71
+ });
72
+ break;
73
+ }
74
+ case 'tool-call': {
75
+ // tool calls are handled by the responses API
76
+ break;
77
+ }
78
+
79
+ case 'tool-result': {
80
+ // tool results are handled by the responses API
81
+ break;
82
+ }
83
+
84
+ case 'reasoning': {
85
+ // include reasoning content in the message text
86
+ messages.push({
87
+ role: 'assistant',
88
+ content: [{ type: 'output_text', text: part.text }],
89
+ });
90
+ break;
91
+ }
92
+ }
93
+ }
94
+
95
+ break;
96
+ }
97
+
98
+ case 'tool': {
99
+ warnings.push({ type: 'unsupported', feature: 'tool messages' });
100
+ break;
101
+ }
102
+
103
+ default: {
104
+ const _exhaustiveCheck: never = role;
105
+ throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
106
+ }
107
+ }
108
+ }
109
+
110
+ return { input: messages, warnings };
111
+ }