@huggingface/inference 3.5.0 → 3.5.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +11 -11
- package/dist/index.cjs +117 -58
- package/dist/index.js +115 -57
- package/dist/src/{HfInference.d.ts → InferenceClient.d.ts} +12 -7
- package/dist/src/InferenceClient.d.ts.map +1 -0
- package/dist/src/index.d.ts +1 -1
- package/dist/src/index.d.ts.map +1 -1
- package/dist/src/providers/cohere.d.ts.map +1 -1
- package/dist/src/snippets/python.d.ts +0 -19
- package/dist/src/snippets/python.d.ts.map +1 -1
- package/dist/test/InferenceClient.spec.d.ts +2 -0
- package/dist/test/InferenceClient.spec.d.ts.map +1 -0
- package/package.json +4 -4
- package/src/{HfInference.ts → InferenceClient.ts} +12 -7
- package/src/index.ts +1 -1
- package/src/providers/black-forest-labs.ts +2 -2
- package/src/providers/cohere.ts +0 -1
- package/src/providers/fireworks-ai.ts +3 -3
- package/src/providers/replicate.ts +1 -1
- package/src/snippets/js.ts +12 -12
- package/src/snippets/python.ts +113 -37
- package/dist/src/HfInference.d.ts.map +0 -1
- package/dist/test/HfInference.spec.d.ts +0 -2
- package/dist/test/HfInference.spec.d.ts.map +0 -1
package/README.md
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
# 🤗 Hugging Face Inference
|
|
2
2
|
|
|
3
|
-
A Typescript powered wrapper for the HF Inference API (serverless), Inference Endpoints (dedicated), and
|
|
4
|
-
It works with [Inference API (serverless)](https://huggingface.co/docs/api-inference/index) and [Inference Endpoints (dedicated)](https://huggingface.co/docs/inference-endpoints/index), and even with supported third-party Inference Providers.
|
|
3
|
+
A Typescript powered wrapper for the HF Inference API (serverless), Inference Endpoints (dedicated), and all supported Inference Providers.
|
|
4
|
+
It works with [Inference API (serverless)](https://huggingface.co/docs/api-inference/index) and [Inference Endpoints (dedicated)](https://huggingface.co/docs/inference-endpoints/index), and even with all supported third-party Inference Providers.
|
|
5
5
|
|
|
6
6
|
Check out the [full documentation](https://huggingface.co/docs/huggingface.js/inference/README).
|
|
7
7
|
|
|
@@ -25,24 +25,24 @@ yarn add @huggingface/inference
|
|
|
25
25
|
|
|
26
26
|
```ts
|
|
27
27
|
// esm.sh
|
|
28
|
-
import {
|
|
28
|
+
import { InferenceClient } from "https://esm.sh/@huggingface/inference"
|
|
29
29
|
// or npm:
|
|
30
|
-
import {
|
|
30
|
+
import { InferenceClient } from "npm:@huggingface/inference"
|
|
31
31
|
```
|
|
32
32
|
|
|
33
33
|
### Initialize
|
|
34
34
|
|
|
35
35
|
```typescript
|
|
36
|
-
import {
|
|
36
|
+
import { InferenceClient } from '@huggingface/inference'
|
|
37
37
|
|
|
38
|
-
const hf = new
|
|
38
|
+
const hf = new InferenceClient('your access token')
|
|
39
39
|
```
|
|
40
40
|
|
|
41
41
|
❗**Important note:** Using an access token is optional to get started, however you will be rate limited eventually. Join [Hugging Face](https://huggingface.co/join) and then visit [access tokens](https://huggingface.co/settings/tokens) to generate your access token for **free**.
|
|
42
42
|
|
|
43
43
|
Your access token should be kept private. If you need to protect it in front-end applications, we suggest setting up a proxy server that stores the access token.
|
|
44
44
|
|
|
45
|
-
###
|
|
45
|
+
### All supported inference providers
|
|
46
46
|
|
|
47
47
|
You can send inference requests to third-party providers with the inference client.
|
|
48
48
|
|
|
@@ -63,7 +63,7 @@ To send requests to a third-party provider, you have to pass the `provider` para
|
|
|
63
63
|
```ts
|
|
64
64
|
const accessToken = "hf_..."; // Either a HF access token, or an API key from the third-party provider (Replicate in this example)
|
|
65
65
|
|
|
66
|
-
const client = new
|
|
66
|
+
const client = new InferenceClient(accessToken);
|
|
67
67
|
await client.textToImage({
|
|
68
68
|
provider: "replicate",
|
|
69
69
|
model:"black-forest-labs/Flux.1-dev",
|
|
@@ -93,7 +93,7 @@ This is not an issue for LLMs as everyone converged on the OpenAI API anyways, b
|
|
|
93
93
|
|
|
94
94
|
### Tree-shaking
|
|
95
95
|
|
|
96
|
-
You can import the functions you need directly from the module instead of using the `
|
|
96
|
+
You can import the functions you need directly from the module instead of using the `InferenceClient` class.
|
|
97
97
|
|
|
98
98
|
```ts
|
|
99
99
|
import { textGeneration } from "@huggingface/inference";
|
|
@@ -165,7 +165,7 @@ for await (const chunk of hf.chatCompletionStream({
|
|
|
165
165
|
It's also possible to call Mistral or OpenAI endpoints directly:
|
|
166
166
|
|
|
167
167
|
```typescript
|
|
168
|
-
const openai = new
|
|
168
|
+
const openai = new InferenceClient(OPENAI_TOKEN).endpoint("https://api.openai.com");
|
|
169
169
|
|
|
170
170
|
let out = "";
|
|
171
171
|
for await (const chunk of openai.chatCompletionStream({
|
|
@@ -602,7 +602,7 @@ You can use any Chat Completion API-compatible provider with the `chatCompletion
|
|
|
602
602
|
```typescript
|
|
603
603
|
// Chat Completion Example
|
|
604
604
|
const MISTRAL_KEY = process.env.MISTRAL_KEY;
|
|
605
|
-
const hf = new
|
|
605
|
+
const hf = new InferenceClient(MISTRAL_KEY);
|
|
606
606
|
const ep = hf.endpoint("https://api.mistral.ai");
|
|
607
607
|
const stream = ep.chatCompletionStream({
|
|
608
608
|
model: "mistral-tiny",
|
package/dist/index.cjs
CHANGED
|
@@ -21,8 +21,9 @@ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: tru
|
|
|
21
21
|
var src_exports = {};
|
|
22
22
|
__export(src_exports, {
|
|
23
23
|
HfInference: () => HfInference,
|
|
24
|
-
HfInferenceEndpoint: () => HfInferenceEndpoint,
|
|
25
24
|
INFERENCE_PROVIDERS: () => INFERENCE_PROVIDERS,
|
|
25
|
+
InferenceClient: () => InferenceClient,
|
|
26
|
+
InferenceClientEndpoint: () => InferenceClientEndpoint,
|
|
26
27
|
InferenceOutputError: () => InferenceOutputError,
|
|
27
28
|
audioClassification: () => audioClassification,
|
|
28
29
|
audioToAudio: () => audioToAudio,
|
|
@@ -102,7 +103,7 @@ var HF_HUB_URL = "https://huggingface.co";
|
|
|
102
103
|
var HF_ROUTER_URL = "https://router.huggingface.co";
|
|
103
104
|
|
|
104
105
|
// src/providers/black-forest-labs.ts
|
|
105
|
-
var BLACK_FOREST_LABS_AI_API_BASE_URL = "https://api.us1.bfl.ai
|
|
106
|
+
var BLACK_FOREST_LABS_AI_API_BASE_URL = "https://api.us1.bfl.ai";
|
|
106
107
|
var makeBody = (params) => {
|
|
107
108
|
return params.args;
|
|
108
109
|
};
|
|
@@ -114,7 +115,7 @@ var makeHeaders = (params) => {
|
|
|
114
115
|
}
|
|
115
116
|
};
|
|
116
117
|
var makeUrl = (params) => {
|
|
117
|
-
return `${params.baseUrl}/${params.model}`;
|
|
118
|
+
return `${params.baseUrl}/v1/${params.model}`;
|
|
118
119
|
};
|
|
119
120
|
var BLACK_FOREST_LABS_CONFIG = {
|
|
120
121
|
baseUrl: BLACK_FOREST_LABS_AI_API_BASE_URL,
|
|
@@ -186,7 +187,7 @@ var FAL_AI_CONFIG = {
|
|
|
186
187
|
};
|
|
187
188
|
|
|
188
189
|
// src/providers/fireworks-ai.ts
|
|
189
|
-
var FIREWORKS_AI_API_BASE_URL = "https://api.fireworks.ai
|
|
190
|
+
var FIREWORKS_AI_API_BASE_URL = "https://api.fireworks.ai";
|
|
190
191
|
var makeBody5 = (params) => {
|
|
191
192
|
return {
|
|
192
193
|
...params.args,
|
|
@@ -198,9 +199,9 @@ var makeHeaders5 = (params) => {
|
|
|
198
199
|
};
|
|
199
200
|
var makeUrl5 = (params) => {
|
|
200
201
|
if (params.task === "text-generation" && params.chatCompletion) {
|
|
201
|
-
return `${params.baseUrl}/v1/chat/completions`;
|
|
202
|
+
return `${params.baseUrl}/inference/v1/chat/completions`;
|
|
202
203
|
}
|
|
203
|
-
return params.baseUrl
|
|
204
|
+
return `${params.baseUrl}/inference`;
|
|
204
205
|
};
|
|
205
206
|
var FIREWORKS_AI_CONFIG = {
|
|
206
207
|
baseUrl: FIREWORKS_AI_API_BASE_URL,
|
|
@@ -325,7 +326,7 @@ var makeBody10 = (params) => {
|
|
|
325
326
|
};
|
|
326
327
|
};
|
|
327
328
|
var makeHeaders10 = (params) => {
|
|
328
|
-
return { Authorization: `Bearer ${params.accessToken}
|
|
329
|
+
return { Authorization: `Bearer ${params.accessToken}`, Prefer: "wait" };
|
|
329
330
|
};
|
|
330
331
|
var makeUrl10 = (params) => {
|
|
331
332
|
if (params.model.includes(":")) {
|
|
@@ -429,7 +430,7 @@ function isUrl(modelOrUrl) {
|
|
|
429
430
|
|
|
430
431
|
// package.json
|
|
431
432
|
var name = "@huggingface/inference";
|
|
432
|
-
var version = "3.5.
|
|
433
|
+
var version = "3.5.2";
|
|
433
434
|
|
|
434
435
|
// src/providers/consts.ts
|
|
435
436
|
var HARDCODED_MODEL_ID_MAPPING = {
|
|
@@ -1582,8 +1583,8 @@ async function tabularClassification(args, options) {
|
|
|
1582
1583
|
return res;
|
|
1583
1584
|
}
|
|
1584
1585
|
|
|
1585
|
-
// src/
|
|
1586
|
-
var
|
|
1586
|
+
// src/InferenceClient.ts
|
|
1587
|
+
var InferenceClient = class {
|
|
1587
1588
|
accessToken;
|
|
1588
1589
|
defaultOptions;
|
|
1589
1590
|
constructor(accessToken = "", defaultOptions = {}) {
|
|
@@ -1600,13 +1601,13 @@ var HfInference = class {
|
|
|
1600
1601
|
}
|
|
1601
1602
|
}
|
|
1602
1603
|
/**
|
|
1603
|
-
* Returns copy of
|
|
1604
|
+
* Returns copy of InferenceClient tied to a specified endpoint.
|
|
1604
1605
|
*/
|
|
1605
1606
|
endpoint(endpointUrl) {
|
|
1606
|
-
return new
|
|
1607
|
+
return new InferenceClientEndpoint(endpointUrl, this.accessToken, this.defaultOptions);
|
|
1607
1608
|
}
|
|
1608
1609
|
};
|
|
1609
|
-
var
|
|
1610
|
+
var InferenceClientEndpoint = class {
|
|
1610
1611
|
constructor(endpointUrl, accessToken = "", defaultOptions = {}) {
|
|
1611
1612
|
accessToken;
|
|
1612
1613
|
defaultOptions;
|
|
@@ -1621,6 +1622,8 @@ var HfInferenceEndpoint = class {
|
|
|
1621
1622
|
}
|
|
1622
1623
|
}
|
|
1623
1624
|
};
|
|
1625
|
+
var HfInference = class extends InferenceClient {
|
|
1626
|
+
};
|
|
1624
1627
|
|
|
1625
1628
|
// src/types.ts
|
|
1626
1629
|
var INFERENCE_PROVIDERS = [
|
|
@@ -1774,18 +1777,7 @@ function getCurlInferenceSnippet(model, accessToken, provider, providerModelId,
|
|
|
1774
1777
|
// src/snippets/python.ts
|
|
1775
1778
|
var python_exports = {};
|
|
1776
1779
|
__export(python_exports, {
|
|
1777
|
-
getPythonInferenceSnippet: () => getPythonInferenceSnippet
|
|
1778
|
-
pythonSnippets: () => pythonSnippets,
|
|
1779
|
-
snippetBasic: () => snippetBasic2,
|
|
1780
|
-
snippetConversational: () => snippetConversational,
|
|
1781
|
-
snippetDocumentQuestionAnswering: () => snippetDocumentQuestionAnswering,
|
|
1782
|
-
snippetFile: () => snippetFile2,
|
|
1783
|
-
snippetTabular: () => snippetTabular,
|
|
1784
|
-
snippetTextToAudio: () => snippetTextToAudio,
|
|
1785
|
-
snippetTextToImage: () => snippetTextToImage,
|
|
1786
|
-
snippetTextToVideo: () => snippetTextToVideo,
|
|
1787
|
-
snippetZeroShotClassification: () => snippetZeroShotClassification2,
|
|
1788
|
-
snippetZeroShotImageClassification: () => snippetZeroShotImageClassification
|
|
1780
|
+
getPythonInferenceSnippet: () => getPythonInferenceSnippet
|
|
1789
1781
|
});
|
|
1790
1782
|
var import_tasks3 = require("@huggingface/tasks");
|
|
1791
1783
|
var import_tasks4 = require("@huggingface/tasks");
|
|
@@ -1821,8 +1813,8 @@ var HFH_INFERENCE_CLIENT_METHODS = {
|
|
|
1821
1813
|
var snippetImportInferenceClient = (accessToken, provider) => `from huggingface_hub import InferenceClient
|
|
1822
1814
|
|
|
1823
1815
|
client = InferenceClient(
|
|
1824
|
-
|
|
1825
|
-
|
|
1816
|
+
provider="${provider}",
|
|
1817
|
+
api_key="${accessToken || "{API_TOKEN}"}",
|
|
1826
1818
|
)`;
|
|
1827
1819
|
var snippetConversational = (model, accessToken, provider, providerModelId, opts) => {
|
|
1828
1820
|
const streaming = opts?.streaming ?? true;
|
|
@@ -1850,7 +1842,7 @@ stream = client.chat.completions.create(
|
|
|
1850
1842
|
model="${model.id}",
|
|
1851
1843
|
messages=messages,
|
|
1852
1844
|
${configStr}
|
|
1853
|
-
stream=True
|
|
1845
|
+
stream=True,
|
|
1854
1846
|
)
|
|
1855
1847
|
|
|
1856
1848
|
for chunk in stream:
|
|
@@ -1960,9 +1952,8 @@ var snippetBasic2 = (model, accessToken, provider) => {
|
|
|
1960
1952
|
content: `${snippetImportInferenceClient(accessToken, provider)}
|
|
1961
1953
|
|
|
1962
1954
|
result = client.${HFH_INFERENCE_CLIENT_METHODS[model.pipeline_tag]}(
|
|
1963
|
-
model="${model.id}",
|
|
1964
1955
|
inputs=${(0, import_tasks4.getModelInputSnippet)(model)},
|
|
1965
|
-
|
|
1956
|
+
model="${model.id}",
|
|
1966
1957
|
)
|
|
1967
1958
|
|
|
1968
1959
|
print(result)
|
|
@@ -2004,7 +1995,7 @@ var snippetTextToImage = (model, accessToken, provider, providerModelId) => {
|
|
|
2004
1995
|
# output is a PIL.Image object
|
|
2005
1996
|
image = client.text_to_image(
|
|
2006
1997
|
${(0, import_tasks4.getModelInputSnippet)(model)},
|
|
2007
|
-
model="${model.id}"
|
|
1998
|
+
model="${model.id}",
|
|
2008
1999
|
)`
|
|
2009
2000
|
},
|
|
2010
2001
|
...provider === "fal-ai" ? [
|
|
@@ -2049,7 +2040,7 @@ var snippetTextToVideo = (model, accessToken, provider) => {
|
|
|
2049
2040
|
|
|
2050
2041
|
video = client.text_to_video(
|
|
2051
2042
|
${(0, import_tasks4.getModelInputSnippet)(model)},
|
|
2052
|
-
model="${model.id}"
|
|
2043
|
+
model="${model.id}",
|
|
2053
2044
|
)`
|
|
2054
2045
|
}
|
|
2055
2046
|
] : [];
|
|
@@ -2103,23 +2094,79 @@ Audio(audio, rate=sampling_rate)`
|
|
|
2103
2094
|
];
|
|
2104
2095
|
}
|
|
2105
2096
|
};
|
|
2106
|
-
var
|
|
2097
|
+
var snippetAutomaticSpeechRecognition = (model, accessToken, provider) => {
|
|
2107
2098
|
return [
|
|
2099
|
+
{
|
|
2100
|
+
client: "huggingface_hub",
|
|
2101
|
+
content: `${snippetImportInferenceClient(accessToken, provider)}
|
|
2102
|
+
output = client.automatic_speech_recognition(${(0, import_tasks4.getModelInputSnippet)(model)}, model="${model.id}")`
|
|
2103
|
+
},
|
|
2104
|
+
snippetFile2(model)[0]
|
|
2105
|
+
];
|
|
2106
|
+
};
|
|
2107
|
+
var snippetDocumentQuestionAnswering = (model, accessToken, provider) => {
|
|
2108
|
+
const inputsAsStr = (0, import_tasks4.getModelInputSnippet)(model);
|
|
2109
|
+
const inputsAsObj = JSON.parse(inputsAsStr);
|
|
2110
|
+
return [
|
|
2111
|
+
{
|
|
2112
|
+
client: "huggingface_hub",
|
|
2113
|
+
content: `${snippetImportInferenceClient(accessToken, provider)}
|
|
2114
|
+
output = client.document_question_answering(
|
|
2115
|
+
"${inputsAsObj.image}",
|
|
2116
|
+
question="${inputsAsObj.question}",
|
|
2117
|
+
model="${model.id}",
|
|
2118
|
+
)`
|
|
2119
|
+
},
|
|
2108
2120
|
{
|
|
2109
2121
|
client: "requests",
|
|
2110
2122
|
content: `def query(payload):
|
|
2111
2123
|
with open(payload["image"], "rb") as f:
|
|
2112
2124
|
img = f.read()
|
|
2113
|
-
payload["image"] = base64.b64encode(img).decode("utf-8")
|
|
2125
|
+
payload["image"] = base64.b64encode(img).decode("utf-8")
|
|
2114
2126
|
response = requests.post(API_URL, headers=headers, json=payload)
|
|
2115
2127
|
return response.json()
|
|
2116
2128
|
|
|
2117
2129
|
output = query({
|
|
2118
|
-
"inputs": ${
|
|
2130
|
+
"inputs": ${inputsAsStr},
|
|
2119
2131
|
})`
|
|
2120
2132
|
}
|
|
2121
2133
|
];
|
|
2122
2134
|
};
|
|
2135
|
+
var snippetImageToImage = (model, accessToken, provider) => {
|
|
2136
|
+
const inputsAsStr = (0, import_tasks4.getModelInputSnippet)(model);
|
|
2137
|
+
const inputsAsObj = JSON.parse(inputsAsStr);
|
|
2138
|
+
return [
|
|
2139
|
+
{
|
|
2140
|
+
client: "huggingface_hub",
|
|
2141
|
+
content: `${snippetImportInferenceClient(accessToken, provider)}
|
|
2142
|
+
# output is a PIL.Image object
|
|
2143
|
+
image = client.image_to_image(
|
|
2144
|
+
"${inputsAsObj.image}",
|
|
2145
|
+
prompt="${inputsAsObj.prompt}",
|
|
2146
|
+
model="${model.id}",
|
|
2147
|
+
)`
|
|
2148
|
+
},
|
|
2149
|
+
{
|
|
2150
|
+
client: "requests",
|
|
2151
|
+
content: `def query(payload):
|
|
2152
|
+
with open(payload["inputs"], "rb") as f:
|
|
2153
|
+
img = f.read()
|
|
2154
|
+
payload["inputs"] = base64.b64encode(img).decode("utf-8")
|
|
2155
|
+
response = requests.post(API_URL, headers=headers, json=payload)
|
|
2156
|
+
return response.content
|
|
2157
|
+
|
|
2158
|
+
image_bytes = query({
|
|
2159
|
+
"inputs": "${inputsAsObj.image}",
|
|
2160
|
+
"parameters": {"prompt": "${inputsAsObj.prompt}"},
|
|
2161
|
+
})
|
|
2162
|
+
|
|
2163
|
+
# You can access the image with PIL.Image for example
|
|
2164
|
+
import io
|
|
2165
|
+
from PIL import Image
|
|
2166
|
+
image = Image.open(io.BytesIO(image_bytes))`
|
|
2167
|
+
}
|
|
2168
|
+
];
|
|
2169
|
+
};
|
|
2123
2170
|
var pythonSnippets = {
|
|
2124
2171
|
// Same order as in tasks/src/pipelines.ts
|
|
2125
2172
|
"text-classification": snippetBasic2,
|
|
@@ -2135,7 +2182,7 @@ var pythonSnippets = {
|
|
|
2135
2182
|
"image-text-to-text": snippetConversational,
|
|
2136
2183
|
"fill-mask": snippetBasic2,
|
|
2137
2184
|
"sentence-similarity": snippetBasic2,
|
|
2138
|
-
"automatic-speech-recognition":
|
|
2185
|
+
"automatic-speech-recognition": snippetAutomaticSpeechRecognition,
|
|
2139
2186
|
"text-to-image": snippetTextToImage,
|
|
2140
2187
|
"text-to-video": snippetTextToVideo,
|
|
2141
2188
|
"text-to-speech": snippetTextToAudio,
|
|
@@ -2149,6 +2196,7 @@ var pythonSnippets = {
|
|
|
2149
2196
|
"image-segmentation": snippetFile2,
|
|
2150
2197
|
"document-question-answering": snippetDocumentQuestionAnswering,
|
|
2151
2198
|
"image-to-text": snippetFile2,
|
|
2199
|
+
"image-to-image": snippetImageToImage,
|
|
2152
2200
|
"zero-shot-image-classification": snippetZeroShotImageClassification
|
|
2153
2201
|
};
|
|
2154
2202
|
function getPythonInferenceSnippet(model, accessToken, provider, providerModelId, opts) {
|
|
@@ -2159,23 +2207,33 @@ function getPythonInferenceSnippet(model, accessToken, provider, providerModelId
|
|
|
2159
2207
|
return snippets.map((snippet) => {
|
|
2160
2208
|
return {
|
|
2161
2209
|
...snippet,
|
|
2162
|
-
content: snippet.
|
|
2163
|
-
|
|
2164
|
-
API_URL = "${(0, import_tasks3.openAIbaseUrl)(provider)}"
|
|
2165
|
-
headers = {"Authorization": ${accessToken ? `"Bearer ${accessToken}"` : `f"Bearer {API_TOKEN}"`}}
|
|
2166
|
-
|
|
2167
|
-
${snippet.content}` : snippet.content
|
|
2210
|
+
content: addImportsToSnippet(snippet.content, model, accessToken)
|
|
2168
2211
|
};
|
|
2169
2212
|
});
|
|
2170
2213
|
}
|
|
2171
2214
|
}
|
|
2215
|
+
var addImportsToSnippet = (snippet, model, accessToken) => {
|
|
2216
|
+
if (snippet.includes("requests")) {
|
|
2217
|
+
snippet = `import requests
|
|
2218
|
+
|
|
2219
|
+
API_URL = "https://router.huggingface.co/hf-inference/models/${model.id}"
|
|
2220
|
+
headers = {"Authorization": ${accessToken ? `"Bearer ${accessToken}"` : `f"Bearer {API_TOKEN}"`}}
|
|
2221
|
+
|
|
2222
|
+
${snippet}`;
|
|
2223
|
+
}
|
|
2224
|
+
if (snippet.includes("base64")) {
|
|
2225
|
+
snippet = `import base64
|
|
2226
|
+
${snippet}`;
|
|
2227
|
+
}
|
|
2228
|
+
return snippet;
|
|
2229
|
+
};
|
|
2172
2230
|
|
|
2173
2231
|
// src/snippets/js.ts
|
|
2174
2232
|
var js_exports = {};
|
|
2175
2233
|
__export(js_exports, {
|
|
2176
2234
|
getJsInferenceSnippet: () => getJsInferenceSnippet,
|
|
2177
2235
|
jsSnippets: () => jsSnippets,
|
|
2178
|
-
snippetAutomaticSpeechRecognition: () =>
|
|
2236
|
+
snippetAutomaticSpeechRecognition: () => snippetAutomaticSpeechRecognition2,
|
|
2179
2237
|
snippetBasic: () => snippetBasic3,
|
|
2180
2238
|
snippetFile: () => snippetFile3,
|
|
2181
2239
|
snippetTextGeneration: () => snippetTextGeneration2,
|
|
@@ -2204,9 +2262,9 @@ var snippetBasic3 = (model, accessToken, provider) => {
|
|
|
2204
2262
|
...model.pipeline_tag && model.pipeline_tag in HFJS_METHODS ? [
|
|
2205
2263
|
{
|
|
2206
2264
|
client: "huggingface.js",
|
|
2207
|
-
content: `import {
|
|
2265
|
+
content: `import { InferenceClient } from "@huggingface/inference";
|
|
2208
2266
|
|
|
2209
|
-
const client = new
|
|
2267
|
+
const client = new InferenceClient("${accessToken || `{API_TOKEN}`}");
|
|
2210
2268
|
|
|
2211
2269
|
const output = await client.${HFJS_METHODS[model.pipeline_tag]}({
|
|
2212
2270
|
model: "${model.id}",
|
|
@@ -2261,9 +2319,9 @@ var snippetTextGeneration2 = (model, accessToken, provider, providerModelId, opt
|
|
|
2261
2319
|
return [
|
|
2262
2320
|
{
|
|
2263
2321
|
client: "huggingface.js",
|
|
2264
|
-
content: `import {
|
|
2322
|
+
content: `import { InferenceClient } from "@huggingface/inference";
|
|
2265
2323
|
|
|
2266
|
-
const client = new
|
|
2324
|
+
const client = new InferenceClient("${accessToken || `{API_TOKEN}`}");
|
|
2267
2325
|
|
|
2268
2326
|
let out = "";
|
|
2269
2327
|
|
|
@@ -2313,9 +2371,9 @@ for await (const chunk of stream) {
|
|
|
2313
2371
|
return [
|
|
2314
2372
|
{
|
|
2315
2373
|
client: "huggingface.js",
|
|
2316
|
-
content: `import {
|
|
2374
|
+
content: `import { InferenceClient } from "@huggingface/inference";
|
|
2317
2375
|
|
|
2318
|
-
const client = new
|
|
2376
|
+
const client = new InferenceClient("${accessToken || `{API_TOKEN}`}");
|
|
2319
2377
|
|
|
2320
2378
|
const chatCompletion = await client.chatCompletion({
|
|
2321
2379
|
model: "${model.id}",
|
|
@@ -2383,9 +2441,9 @@ var snippetTextToImage2 = (model, accessToken, provider) => {
|
|
|
2383
2441
|
return [
|
|
2384
2442
|
{
|
|
2385
2443
|
client: "huggingface.js",
|
|
2386
|
-
content: `import {
|
|
2444
|
+
content: `import { InferenceClient } from "@huggingface/inference";
|
|
2387
2445
|
|
|
2388
|
-
const client = new
|
|
2446
|
+
const client = new InferenceClient("${accessToken || `{API_TOKEN}`}");
|
|
2389
2447
|
|
|
2390
2448
|
const image = await client.textToImage({
|
|
2391
2449
|
model: "${model.id}",
|
|
@@ -2425,9 +2483,9 @@ var snippetTextToVideo2 = (model, accessToken, provider) => {
|
|
|
2425
2483
|
return ["fal-ai", "replicate"].includes(provider) ? [
|
|
2426
2484
|
{
|
|
2427
2485
|
client: "huggingface.js",
|
|
2428
|
-
content: `import {
|
|
2486
|
+
content: `import { InferenceClient } from "@huggingface/inference";
|
|
2429
2487
|
|
|
2430
|
-
const client = new
|
|
2488
|
+
const client = new InferenceClient("${accessToken || `{API_TOKEN}`}");
|
|
2431
2489
|
|
|
2432
2490
|
const video = await client.textToVideo({
|
|
2433
2491
|
model: "${model.id}",
|
|
@@ -2485,13 +2543,13 @@ var snippetTextToAudio2 = (model, accessToken, provider) => {
|
|
|
2485
2543
|
];
|
|
2486
2544
|
}
|
|
2487
2545
|
};
|
|
2488
|
-
var
|
|
2546
|
+
var snippetAutomaticSpeechRecognition2 = (model, accessToken, provider) => {
|
|
2489
2547
|
return [
|
|
2490
2548
|
{
|
|
2491
2549
|
client: "huggingface.js",
|
|
2492
|
-
content: `import {
|
|
2550
|
+
content: `import { InferenceClient } from "@huggingface/inference";
|
|
2493
2551
|
|
|
2494
|
-
const client = new
|
|
2552
|
+
const client = new InferenceClient("${accessToken || `{API_TOKEN}`}");
|
|
2495
2553
|
|
|
2496
2554
|
const data = fs.readFileSync(${(0, import_tasks6.getModelInputSnippet)(model)});
|
|
2497
2555
|
|
|
@@ -2552,7 +2610,7 @@ var jsSnippets = {
|
|
|
2552
2610
|
"text2text-generation": snippetBasic3,
|
|
2553
2611
|
"fill-mask": snippetBasic3,
|
|
2554
2612
|
"sentence-similarity": snippetBasic3,
|
|
2555
|
-
"automatic-speech-recognition":
|
|
2613
|
+
"automatic-speech-recognition": snippetAutomaticSpeechRecognition2,
|
|
2556
2614
|
"text-to-image": snippetTextToImage2,
|
|
2557
2615
|
"text-to-video": snippetTextToVideo2,
|
|
2558
2616
|
"text-to-speech": snippetTextToAudio2,
|
|
@@ -2570,8 +2628,9 @@ function getJsInferenceSnippet(model, accessToken, provider, providerModelId, op
|
|
|
2570
2628
|
// Annotate the CommonJS export names for ESM import in node:
|
|
2571
2629
|
0 && (module.exports = {
|
|
2572
2630
|
HfInference,
|
|
2573
|
-
HfInferenceEndpoint,
|
|
2574
2631
|
INFERENCE_PROVIDERS,
|
|
2632
|
+
InferenceClient,
|
|
2633
|
+
InferenceClientEndpoint,
|
|
2575
2634
|
InferenceOutputError,
|
|
2576
2635
|
audioClassification,
|
|
2577
2636
|
audioToAudio,
|