@huggingface/inference 3.5.2 → 3.6.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.cjs +364 -970
- package/dist/index.js +366 -981
- package/dist/src/index.d.ts.map +1 -1
- package/dist/src/lib/makeRequestOptions.d.ts +16 -1
- package/dist/src/lib/makeRequestOptions.d.ts.map +1 -1
- package/dist/src/providers/novita.d.ts.map +1 -1
- package/dist/src/snippets/getInferenceSnippets.d.ts +4 -0
- package/dist/src/snippets/getInferenceSnippets.d.ts.map +1 -0
- package/dist/src/snippets/index.d.ts +1 -4
- package/dist/src/snippets/index.d.ts.map +1 -1
- package/dist/src/snippets/templates.exported.d.ts +2 -0
- package/dist/src/snippets/templates.exported.d.ts.map +1 -0
- package/dist/src/tasks/cv/textToVideo.d.ts.map +1 -1
- package/package.json +9 -5
- package/src/index.ts +1 -1
- package/src/lib/makeRequestOptions.ts +37 -10
- package/src/providers/fireworks-ai.ts +1 -1
- package/src/providers/hf-inference.ts +1 -1
- package/src/providers/nebius.ts +3 -3
- package/src/providers/novita.ts +7 -6
- package/src/providers/sambanova.ts +1 -1
- package/src/providers/together.ts +3 -3
- package/src/snippets/getInferenceSnippets.ts +380 -0
- package/src/snippets/index.ts +1 -5
- package/src/snippets/templates.exported.ts +72 -0
- package/src/tasks/cv/textToVideo.ts +25 -5
- package/src/vendor/fetch-event-source/LICENSE +21 -0
- package/dist/src/snippets/curl.d.ts +0 -17
- package/dist/src/snippets/curl.d.ts.map +0 -1
- package/dist/src/snippets/js.d.ts +0 -21
- package/dist/src/snippets/js.d.ts.map +0 -1
- package/dist/src/snippets/python.d.ts +0 -4
- package/dist/src/snippets/python.d.ts.map +0 -1
- package/src/snippets/curl.ts +0 -177
- package/src/snippets/js.ts +0 -475
- package/src/snippets/python.ts +0 -563
package/dist/index.cjs
CHANGED
|
@@ -198,7 +198,7 @@ var makeHeaders5 = (params) => {
|
|
|
198
198
|
return { Authorization: `Bearer ${params.accessToken}` };
|
|
199
199
|
};
|
|
200
200
|
var makeUrl5 = (params) => {
|
|
201
|
-
if (params.
|
|
201
|
+
if (params.chatCompletion) {
|
|
202
202
|
return `${params.baseUrl}/inference/v1/chat/completions`;
|
|
203
203
|
}
|
|
204
204
|
return `${params.baseUrl}/inference`;
|
|
@@ -224,7 +224,7 @@ var makeUrl6 = (params) => {
|
|
|
224
224
|
if (params.task && ["feature-extraction", "sentence-similarity"].includes(params.task)) {
|
|
225
225
|
return `${params.baseUrl}/pipeline/${params.task}/${params.model}`;
|
|
226
226
|
}
|
|
227
|
-
if (params.
|
|
227
|
+
if (params.chatCompletion) {
|
|
228
228
|
return `${params.baseUrl}/models/${params.model}/v1/chat/completions`;
|
|
229
229
|
}
|
|
230
230
|
return `${params.baseUrl}/models/${params.model}`;
|
|
@@ -275,10 +275,10 @@ var makeUrl8 = (params) => {
|
|
|
275
275
|
if (params.task === "text-to-image") {
|
|
276
276
|
return `${params.baseUrl}/v1/images/generations`;
|
|
277
277
|
}
|
|
278
|
+
if (params.chatCompletion) {
|
|
279
|
+
return `${params.baseUrl}/v1/chat/completions`;
|
|
280
|
+
}
|
|
278
281
|
if (params.task === "text-generation") {
|
|
279
|
-
if (params.chatCompletion) {
|
|
280
|
-
return `${params.baseUrl}/v1/chat/completions`;
|
|
281
|
-
}
|
|
282
282
|
return `${params.baseUrl}/v1/completions`;
|
|
283
283
|
}
|
|
284
284
|
return params.baseUrl;
|
|
@@ -291,7 +291,7 @@ var NEBIUS_CONFIG = {
|
|
|
291
291
|
};
|
|
292
292
|
|
|
293
293
|
// src/providers/novita.ts
|
|
294
|
-
var NOVITA_API_BASE_URL = "https://api.novita.ai
|
|
294
|
+
var NOVITA_API_BASE_URL = "https://api.novita.ai";
|
|
295
295
|
var makeBody9 = (params) => {
|
|
296
296
|
return {
|
|
297
297
|
...params.args,
|
|
@@ -302,11 +302,12 @@ var makeHeaders9 = (params) => {
|
|
|
302
302
|
return { Authorization: `Bearer ${params.accessToken}` };
|
|
303
303
|
};
|
|
304
304
|
var makeUrl9 = (params) => {
|
|
305
|
-
if (params.
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
}
|
|
309
|
-
|
|
305
|
+
if (params.chatCompletion) {
|
|
306
|
+
return `${params.baseUrl}/v3/openai/chat/completions`;
|
|
307
|
+
} else if (params.task === "text-generation") {
|
|
308
|
+
return `${params.baseUrl}/v3/openai/completions`;
|
|
309
|
+
} else if (params.task === "text-to-video") {
|
|
310
|
+
return `${params.baseUrl}/v3/hf/${params.model}`;
|
|
310
311
|
}
|
|
311
312
|
return params.baseUrl;
|
|
312
313
|
};
|
|
@@ -353,7 +354,7 @@ var makeHeaders11 = (params) => {
|
|
|
353
354
|
return { Authorization: `Bearer ${params.accessToken}` };
|
|
354
355
|
};
|
|
355
356
|
var makeUrl11 = (params) => {
|
|
356
|
-
if (params.
|
|
357
|
+
if (params.chatCompletion) {
|
|
357
358
|
return `${params.baseUrl}/v1/chat/completions`;
|
|
358
359
|
}
|
|
359
360
|
return params.baseUrl;
|
|
@@ -380,10 +381,10 @@ var makeUrl12 = (params) => {
|
|
|
380
381
|
if (params.task === "text-to-image") {
|
|
381
382
|
return `${params.baseUrl}/v1/images/generations`;
|
|
382
383
|
}
|
|
384
|
+
if (params.chatCompletion) {
|
|
385
|
+
return `${params.baseUrl}/v1/chat/completions`;
|
|
386
|
+
}
|
|
383
387
|
if (params.task === "text-generation") {
|
|
384
|
-
if (params.chatCompletion) {
|
|
385
|
-
return `${params.baseUrl}/v1/chat/completions`;
|
|
386
|
-
}
|
|
387
388
|
return `${params.baseUrl}/v1/completions`;
|
|
388
389
|
}
|
|
389
390
|
return params.baseUrl;
|
|
@@ -430,7 +431,7 @@ function isUrl(modelOrUrl) {
|
|
|
430
431
|
|
|
431
432
|
// package.json
|
|
432
433
|
var name = "@huggingface/inference";
|
|
433
|
-
var version = "3.
|
|
434
|
+
var version = "3.6.1";
|
|
434
435
|
|
|
435
436
|
// src/providers/consts.ts
|
|
436
437
|
var HARDCODED_MODEL_ID_MAPPING = {
|
|
@@ -518,11 +519,11 @@ var providerConfigs = {
|
|
|
518
519
|
together: TOGETHER_CONFIG
|
|
519
520
|
};
|
|
520
521
|
async function makeRequestOptions(args, options) {
|
|
521
|
-
const {
|
|
522
|
+
const { provider: maybeProvider, model: maybeModel } = args;
|
|
522
523
|
const provider = maybeProvider ?? "hf-inference";
|
|
523
524
|
const providerConfig = providerConfigs[provider];
|
|
524
|
-
const {
|
|
525
|
-
if (endpointUrl && provider !== "hf-inference") {
|
|
525
|
+
const { task, chatCompletion: chatCompletion2 } = options ?? {};
|
|
526
|
+
if (args.endpointUrl && provider !== "hf-inference") {
|
|
526
527
|
throw new Error(`Cannot use endpointUrl with a third-party provider.`);
|
|
527
528
|
}
|
|
528
529
|
if (maybeModel && isUrl(maybeModel)) {
|
|
@@ -538,17 +539,21 @@ async function makeRequestOptions(args, options) {
|
|
|
538
539
|
throw new Error(`Provider ${provider} requires a model ID to be passed directly.`);
|
|
539
540
|
}
|
|
540
541
|
const hfModel = maybeModel ?? await loadDefaultModel(task);
|
|
541
|
-
const
|
|
542
|
+
const resolvedModel = providerConfig.clientSideRoutingOnly ? (
|
|
542
543
|
// eslint-disable-next-line @typescript-eslint/no-non-null-assertion
|
|
543
544
|
removeProviderPrefix(maybeModel, provider)
|
|
544
|
-
) : (
|
|
545
|
-
|
|
546
|
-
|
|
547
|
-
|
|
548
|
-
|
|
549
|
-
|
|
550
|
-
|
|
551
|
-
|
|
545
|
+
) : await getProviderModelId({ model: hfModel, provider }, args, {
|
|
546
|
+
task,
|
|
547
|
+
chatCompletion: chatCompletion2,
|
|
548
|
+
fetch: options?.fetch
|
|
549
|
+
});
|
|
550
|
+
return makeRequestOptionsFromResolvedModel(resolvedModel, args, options);
|
|
551
|
+
}
|
|
552
|
+
function makeRequestOptionsFromResolvedModel(resolvedModel, args, options) {
|
|
553
|
+
const { accessToken, endpointUrl, provider: maybeProvider, model, ...remainingArgs } = args;
|
|
554
|
+
const provider = maybeProvider ?? "hf-inference";
|
|
555
|
+
const providerConfig = providerConfigs[provider];
|
|
556
|
+
const { includeCredentials, task, chatCompletion: chatCompletion2, signal } = options ?? {};
|
|
552
557
|
const authMethod = (() => {
|
|
553
558
|
if (providerConfig.clientSideRoutingOnly) {
|
|
554
559
|
if (accessToken && accessToken.startsWith("hf_")) {
|
|
@@ -566,7 +571,7 @@ async function makeRequestOptions(args, options) {
|
|
|
566
571
|
})();
|
|
567
572
|
const url = endpointUrl ? chatCompletion2 ? endpointUrl + `/v1/chat/completions` : endpointUrl : providerConfig.makeUrl({
|
|
568
573
|
baseUrl: authMethod !== "provider-key" ? HF_HUB_INFERENCE_PROXY_TEMPLATE.replace("{{PROVIDER}}", provider) : providerConfig.baseUrl,
|
|
569
|
-
model,
|
|
574
|
+
model: resolvedModel,
|
|
570
575
|
chatCompletion: chatCompletion2,
|
|
571
576
|
task
|
|
572
577
|
});
|
|
@@ -584,7 +589,7 @@ async function makeRequestOptions(args, options) {
|
|
|
584
589
|
const body = binary ? args.data : JSON.stringify(
|
|
585
590
|
providerConfig.makeBody({
|
|
586
591
|
args: remainingArgs,
|
|
587
|
-
model,
|
|
592
|
+
model: resolvedModel,
|
|
588
593
|
task,
|
|
589
594
|
chatCompletion: chatCompletion2
|
|
590
595
|
})
|
|
@@ -1218,14 +1223,14 @@ async function zeroShotImageClassification(args, options) {
|
|
|
1218
1223
|
}
|
|
1219
1224
|
|
|
1220
1225
|
// src/tasks/cv/textToVideo.ts
|
|
1221
|
-
var SUPPORTED_PROVIDERS = ["fal-ai", "replicate"];
|
|
1226
|
+
var SUPPORTED_PROVIDERS = ["fal-ai", "novita", "replicate"];
|
|
1222
1227
|
async function textToVideo(args, options) {
|
|
1223
1228
|
if (!args.provider || !typedInclude(SUPPORTED_PROVIDERS, args.provider)) {
|
|
1224
1229
|
throw new Error(
|
|
1225
1230
|
`textToVideo inference is only supported for the following providers: ${SUPPORTED_PROVIDERS.join(", ")}`
|
|
1226
1231
|
);
|
|
1227
1232
|
}
|
|
1228
|
-
const payload = args.provider === "fal-ai" || args.provider === "replicate" ? { ...omit(args, ["inputs", "parameters"]), ...args.parameters, prompt: args.inputs } : args;
|
|
1233
|
+
const payload = args.provider === "fal-ai" || args.provider === "replicate" || args.provider === "novita" ? { ...omit(args, ["inputs", "parameters"]), ...args.parameters, prompt: args.inputs } : args;
|
|
1229
1234
|
const res = await request(payload, {
|
|
1230
1235
|
...options,
|
|
1231
1236
|
task: "text-to-video"
|
|
@@ -1237,6 +1242,13 @@ async function textToVideo(args, options) {
|
|
|
1237
1242
|
}
|
|
1238
1243
|
const urlResponse = await fetch(res.video.url);
|
|
1239
1244
|
return await urlResponse.blob();
|
|
1245
|
+
} else if (args.provider === "novita") {
|
|
1246
|
+
const isValidOutput = typeof res === "object" && !!res && "video" in res && typeof res.video === "object" && !!res.video && "video_url" in res.video && typeof res.video.video_url === "string" && isUrl(res.video.video_url);
|
|
1247
|
+
if (!isValidOutput) {
|
|
1248
|
+
throw new InferenceOutputError("Expected { video: { video_url: string } }");
|
|
1249
|
+
}
|
|
1250
|
+
const urlResponse = await fetch(res.video.video_url);
|
|
1251
|
+
return await urlResponse.blob();
|
|
1240
1252
|
} else {
|
|
1241
1253
|
const isValidOutput = typeof res === "object" && !!res && "output" in res && typeof res.output === "string" && isUrl(res.output);
|
|
1242
1254
|
if (!isValidOutput) {
|
|
@@ -1645,985 +1657,367 @@ var INFERENCE_PROVIDERS = [
|
|
|
1645
1657
|
// src/snippets/index.ts
|
|
1646
1658
|
var snippets_exports = {};
|
|
1647
1659
|
__export(snippets_exports, {
|
|
1648
|
-
|
|
1649
|
-
js: () => js_exports,
|
|
1650
|
-
python: () => python_exports
|
|
1660
|
+
getInferenceSnippets: () => getInferenceSnippets
|
|
1651
1661
|
});
|
|
1652
1662
|
|
|
1653
|
-
// src/snippets/
|
|
1654
|
-
var curl_exports = {};
|
|
1655
|
-
__export(curl_exports, {
|
|
1656
|
-
curlSnippets: () => curlSnippets,
|
|
1657
|
-
getCurlInferenceSnippet: () => getCurlInferenceSnippet,
|
|
1658
|
-
snippetBasic: () => snippetBasic,
|
|
1659
|
-
snippetFile: () => snippetFile,
|
|
1660
|
-
snippetTextGeneration: () => snippetTextGeneration,
|
|
1661
|
-
snippetZeroShotClassification: () => snippetZeroShotClassification
|
|
1662
|
-
});
|
|
1663
|
+
// src/snippets/getInferenceSnippets.ts
|
|
1663
1664
|
var import_tasks = require("@huggingface/tasks");
|
|
1664
|
-
var
|
|
1665
|
-
|
|
1666
|
-
|
|
1667
|
-
|
|
1668
|
-
|
|
1669
|
-
|
|
1670
|
-
|
|
1671
|
-
|
|
1672
|
-
|
|
1673
|
-
-
|
|
1674
|
-
|
|
1675
|
-
|
|
1676
|
-
|
|
1665
|
+
var import_jinja = require("@huggingface/jinja");
|
|
1666
|
+
|
|
1667
|
+
// src/snippets/templates.exported.ts
|
|
1668
|
+
var templates = {
|
|
1669
|
+
"js": {
|
|
1670
|
+
"fetch": {
|
|
1671
|
+
"basic": 'async function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "application/json",\n },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.json();\n return result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});',
|
|
1672
|
+
"basicAudio": 'async function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "audio/flac"\n },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.json();\n return result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});',
|
|
1673
|
+
"basicImage": 'async function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "image/jpeg"\n },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.json();\n return result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});',
|
|
1674
|
+
"textToAudio": '{% if model.library_name == "transformers" %}\nasync function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "application/json",\n },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.blob();\n return result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n // Returns a byte object of the Audio wavform. Use it directly!\n});\n{% else %}\nasync function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "application/json",\n },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.json();\n return result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});\n{% endif %} ',
|
|
1675
|
+
"textToImage": 'async function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "application/json",\n },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.blob();\n return result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n // Use image\n});',
|
|
1676
|
+
"zeroShotClassification": 'async function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "application/json",\n },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.json();\n return result;\n}\n\nquery({\n inputs: {{ providerInputs.asObj.inputs }},\n parameters: { candidate_labels: ["refund", "legal", "faq"] }\n}).then((response) => {\n console.log(JSON.stringify(response));\n});'
|
|
1677
|
+
},
|
|
1678
|
+
"huggingface.js": {
|
|
1679
|
+
"basic": 'import { InferenceClient } from "@huggingface/inference";\n\nconst client = new InferenceClient("{{ accessToken }}");\n\nconst output = await client.{{ methodName }}({\n model: "{{ model.id }}",\n inputs: {{ inputs.asObj.inputs }},\n provider: "{{ provider }}",\n});\n\nconsole.log(output);',
|
|
1680
|
+
"basicAudio": 'import { InferenceClient } from "@huggingface/inference";\n\nconst client = new InferenceClient("{{ accessToken }}");\n\nconst data = fs.readFileSync({{inputs.asObj.inputs}});\n\nconst output = await client.{{ methodName }}({\n data,\n model: "{{ model.id }}",\n provider: "{{ provider }}",\n});\n\nconsole.log(output);',
|
|
1681
|
+
"basicImage": 'import { InferenceClient } from "@huggingface/inference";\n\nconst client = new InferenceClient("{{ accessToken }}");\n\nconst data = fs.readFileSync({{inputs.asObj.inputs}});\n\nconst output = await client.{{ methodName }}({\n data,\n model: "{{ model.id }}",\n provider: "{{ provider }}",\n});\n\nconsole.log(output);',
|
|
1682
|
+
"conversational": 'import { InferenceClient } from "@huggingface/inference";\n\nconst client = new InferenceClient("{{ accessToken }}");\n\nconst chatCompletion = await client.chatCompletion({\n provider: "{{ provider }}",\n model: "{{ model.id }}",\n{{ inputs.asTsString }}\n});\n\nconsole.log(chatCompletion.choices[0].message);',
|
|
1683
|
+
"conversationalStream": 'import { InferenceClient } from "@huggingface/inference";\n\nconst client = new InferenceClient("{{ accessToken }}");\n\nlet out = "";\n\nconst stream = await client.chatCompletionStream({\n provider: "{{ provider }}",\n model: "{{ model.id }}",\n{{ inputs.asTsString }}\n});\n\nfor await (const chunk of stream) {\n if (chunk.choices && chunk.choices.length > 0) {\n const newContent = chunk.choices[0].delta.content;\n out += newContent;\n console.log(newContent);\n } \n}',
|
|
1684
|
+
"textToImage": `import { InferenceClient } from "@huggingface/inference";
|
|
1685
|
+
|
|
1686
|
+
const client = new InferenceClient("{{ accessToken }}");
|
|
1687
|
+
|
|
1688
|
+
const image = await client.textToImage({
|
|
1689
|
+
provider: "{{ provider }}",
|
|
1690
|
+
model: "{{ model.id }}",
|
|
1691
|
+
inputs: {{ inputs.asObj.inputs }},
|
|
1692
|
+
parameters: { num_inference_steps: 5 },
|
|
1693
|
+
});
|
|
1694
|
+
/// Use the generated image (it's a Blob)`,
|
|
1695
|
+
"textToVideo": `import { InferenceClient } from "@huggingface/inference";
|
|
1696
|
+
|
|
1697
|
+
const client = new InferenceClient("{{ accessToken }}");
|
|
1698
|
+
|
|
1699
|
+
const image = await client.textToVideo({
|
|
1700
|
+
provider: "{{ provider }}",
|
|
1701
|
+
model: "{{ model.id }}",
|
|
1702
|
+
inputs: {{ inputs.asObj.inputs }},
|
|
1703
|
+
});
|
|
1704
|
+
// Use the generated video (it's a Blob)`
|
|
1705
|
+
},
|
|
1706
|
+
"openai": {
|
|
1707
|
+
"conversational": 'import { OpenAI } from "openai";\n\nconst client = new OpenAI({\n baseURL: "{{ baseUrl }}",\n apiKey: "{{ accessToken }}",\n});\n\nconst chatCompletion = await client.chat.completions.create({\n model: "{{ providerModelId }}",\n{{ inputs.asTsString }}\n});\n\nconsole.log(chatCompletion.choices[0].message);',
|
|
1708
|
+
"conversationalStream": 'import { OpenAI } from "openai";\n\nconst client = new OpenAI({\n baseURL: "{{ baseUrl }}",\n apiKey: "{{ accessToken }}",\n});\n\nlet out = "";\n\nconst stream = await client.chat.completions.create({\n provider: "{{ provider }}",\n model: "{{ model.id }}",\n{{ inputs.asTsString }}\n});\n\nfor await (const chunk of stream) {\n if (chunk.choices && chunk.choices.length > 0) {\n const newContent = chunk.choices[0].delta.content;\n out += newContent;\n console.log(newContent);\n } \n}'
|
|
1677
1709
|
}
|
|
1678
|
-
|
|
1679
|
-
|
|
1680
|
-
|
|
1681
|
-
|
|
1682
|
-
|
|
1683
|
-
|
|
1684
|
-
|
|
1685
|
-
|
|
1686
|
-
|
|
1687
|
-
|
|
1688
|
-
|
|
1689
|
-
|
|
1690
|
-
|
|
1691
|
-
}
|
|
1692
|
-
|
|
1693
|
-
{
|
|
1694
|
-
|
|
1695
|
-
|
|
1696
|
-
|
|
1697
|
-
|
|
1698
|
-
|
|
1699
|
-
"
|
|
1700
|
-
|
|
1701
|
-
|
|
1702
|
-
|
|
1703
|
-
|
|
1704
|
-
})
|
|
1705
|
-
|
|
1706
|
-
|
|
1707
|
-
|
|
1708
|
-
|
|
1709
|
-
|
|
1710
|
-
"
|
|
1711
|
-
}'
|
|
1712
|
-
}
|
|
1713
|
-
];
|
|
1714
|
-
} else {
|
|
1715
|
-
return snippetBasic(model, accessToken, provider);
|
|
1716
|
-
}
|
|
1717
|
-
};
|
|
1718
|
-
var snippetZeroShotClassification = (model, accessToken, provider) => {
|
|
1719
|
-
if (provider !== "hf-inference") {
|
|
1720
|
-
return [];
|
|
1721
|
-
}
|
|
1722
|
-
return [
|
|
1723
|
-
{
|
|
1724
|
-
client: "curl",
|
|
1725
|
-
content: `curl https://router.huggingface.co/hf-inference/models/${model.id} \\
|
|
1726
|
-
-X POST \\
|
|
1727
|
-
-d '{"inputs": ${(0, import_tasks2.getModelInputSnippet)(model, true)}, "parameters": {"candidate_labels": ["refund", "legal", "faq"]}}' \\
|
|
1728
|
-
-H 'Content-Type: application/json' \\
|
|
1729
|
-
-H 'Authorization: Bearer ${accessToken || `{API_TOKEN}`}'`
|
|
1710
|
+
},
|
|
1711
|
+
"python": {
|
|
1712
|
+
"fal_client": {
|
|
1713
|
+
"textToImage": '{% if provider == "fal-ai" %}\nimport fal_client\n\nresult = fal_client.subscribe(\n "{{ providerModelId }}",\n arguments={\n "prompt": {{ inputs.asObj.inputs }},\n },\n)\nprint(result)\n{% endif %} '
|
|
1714
|
+
},
|
|
1715
|
+
"huggingface_hub": {
|
|
1716
|
+
"basic": 'result = client.{{ methodName }}(\n inputs={{ inputs.asObj.inputs }},\n model="{{ model.id }}",\n)',
|
|
1717
|
+
"basicAudio": 'output = client.{{ methodName }}({{ inputs.asObj.inputs }}, model="{{ model.id }}")',
|
|
1718
|
+
"basicImage": 'output = client.{{ methodName }}({{ inputs.asObj.inputs }}, model="{{ model.id }}")',
|
|
1719
|
+
"conversational": 'completion = client.chat.completions.create(\n model="{{ model.id }}",\n{{ inputs.asPythonString }}\n)\n\nprint(completion.choices[0].message) ',
|
|
1720
|
+
"conversationalStream": 'stream = client.chat.completions.create(\n model="{{ model.id }}",\n{{ inputs.asPythonString }}\n stream=True,\n)\n\nfor chunk in stream:\n print(chunk.choices[0].delta.content, end="") ',
|
|
1721
|
+
"documentQuestionAnswering": 'output = client.document_question_answering(\n "{{ inputs.asObj.image }}",\n question="{{ inputs.asObj.question }}",\n model="{{ model.id }}",\n) ',
|
|
1722
|
+
"imageToImage": '# output is a PIL.Image object\nimage = client.image_to_image(\n "{{ inputs.asObj.inputs }}",\n prompt="{{ inputs.asObj.parameters.prompt }}",\n model="{{ model.id }}",\n) ',
|
|
1723
|
+
"importInferenceClient": 'from huggingface_hub import InferenceClient\n\nclient = InferenceClient(\n provider="{{ provider }}",\n api_key="{{ accessToken }}",\n)',
|
|
1724
|
+
"textToImage": '# output is a PIL.Image object\nimage = client.text_to_image(\n {{ inputs.asObj.inputs }},\n model="{{ model.id }}",\n) ',
|
|
1725
|
+
"textToVideo": 'video = client.text_to_video(\n {{ inputs.asObj.inputs }},\n model="{{ model.id }}",\n) '
|
|
1726
|
+
},
|
|
1727
|
+
"openai": {
|
|
1728
|
+
"conversational": 'from openai import OpenAI\n\nclient = OpenAI(\n base_url="{{ baseUrl }}",\n api_key="{{ accessToken }}"\n)\n\ncompletion = client.chat.completions.create(\n model="{{ providerModelId }}",\n{{ inputs.asPythonString }}\n)\n\nprint(completion.choices[0].message) ',
|
|
1729
|
+
"conversationalStream": 'from openai import OpenAI\n\nclient = OpenAI(\n base_url="{{ baseUrl }}",\n api_key="{{ accessToken }}"\n)\n\nstream = client.chat.completions.create(\n model="{{ providerModelId }}",\n{{ inputs.asPythonString }}\n stream=True,\n)\n\nfor chunk in stream:\n print(chunk.choices[0].delta.content, end="")'
|
|
1730
|
+
},
|
|
1731
|
+
"requests": {
|
|
1732
|
+
"basic": 'def query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\noutput = query({\n "inputs": {{ providerInputs.asObj.inputs }},\n}) ',
|
|
1733
|
+
"basicAudio": 'def query(filename):\n with open(filename, "rb") as f:\n data = f.read()\n response = requests.post(API_URL, headers={"Content-Type": "audio/flac", **headers}, data=data)\n return response.json()\n\noutput = query({{ providerInputs.asObj.inputs }})',
|
|
1734
|
+
"basicImage": 'def query(filename):\n with open(filename, "rb") as f:\n data = f.read()\n response = requests.post(API_URL, headers={"Content-Type": "image/jpeg", **headers}, data=data)\n return response.json()\n\noutput = query({{ providerInputs.asObj.inputs }})',
|
|
1735
|
+
"conversational": 'def query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\nresponse = query({\n{{ providerInputs.asJsonString }}\n})\n\nprint(response["choices"][0]["message"])',
|
|
1736
|
+
"conversationalStream": 'def query(payload):\n response = requests.post(API_URL, headers=headers, json=payload, stream=True)\n for line in response.iter_lines():\n if not line.startswith(b"data:"):\n continue\n if line.strip() == b"data: [DONE]":\n return\n yield json.loads(line.decode("utf-8").lstrip("data:").rstrip("/n"))\n\nchunks = query({\n{{ providerInputs.asJsonString }},\n "stream": True,\n})\n\nfor chunk in chunks:\n print(chunk["choices"][0]["delta"]["content"], end="")',
|
|
1737
|
+
"documentQuestionAnswering": 'def query(payload):\n with open(payload["image"], "rb") as f:\n img = f.read()\n payload["image"] = base64.b64encode(img).decode("utf-8")\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\noutput = query({\n "inputs": {\n "image": "{{ inputs.asObj.image }}",\n "question": "{{ inputs.asObj.question }}",\n },\n}) ',
|
|
1738
|
+
"imageToImage": 'def query(payload):\n with open(payload["inputs"], "rb") as f:\n img = f.read()\n payload["inputs"] = base64.b64encode(img).decode("utf-8")\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\nimage_bytes = query({\n{{ providerInputs.asJsonString }}\n})\n\n# You can access the image with PIL.Image for example\nimport io\nfrom PIL import Image\nimage = Image.open(io.BytesIO(image_bytes)) ',
|
|
1739
|
+
"importRequests": '{% if importBase64 %}\nimport base64\n{% endif %}\n{% if importJson %}\nimport json\n{% endif %}\nimport requests\n\nAPI_URL = "{{ fullUrl }}"\nheaders = {"Authorization": "{{ authorizationHeader }}"}',
|
|
1740
|
+
"tabular": 'def query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\nresponse = query({\n "inputs": {\n "data": {{ providerInputs.asObj.inputs }}\n },\n}) ',
|
|
1741
|
+
"textToAudio": '{% if model.library_name == "transformers" %}\ndef query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\naudio_bytes = query({\n "inputs": {{ providerInputs.asObj.inputs }},\n})\n# You can access the audio with IPython.display for example\nfrom IPython.display import Audio\nAudio(audio_bytes)\n{% else %}\ndef query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\naudio, sampling_rate = query({\n "inputs": {{ providerInputs.asObj.inputs }},\n})\n# You can access the audio with IPython.display for example\nfrom IPython.display import Audio\nAudio(audio, rate=sampling_rate)\n{% endif %} ',
|
|
1742
|
+
"textToImage": '{% if provider == "hf-inference" %}\ndef query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\nimage_bytes = query({\n "inputs": {{ providerInputs.asObj.inputs }},\n})\n\n# You can access the image with PIL.Image for example\nimport io\nfrom PIL import Image\nimage = Image.open(io.BytesIO(image_bytes))\n{% endif %}',
|
|
1743
|
+
"zeroShotClassification": 'def query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\noutput = query({\n "inputs": {{ providerInputs.asObj.inputs }},\n "parameters": {"candidate_labels": ["refund", "legal", "faq"]},\n}) ',
|
|
1744
|
+
"zeroShotImageClassification": 'def query(data):\n with open(data["image_path"], "rb") as f:\n img = f.read()\n payload={\n "parameters": data["parameters"],\n "inputs": base64.b64encode(img).decode("utf-8")\n }\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\noutput = query({\n "image_path": {{ providerInputs.asObj.inputs }},\n "parameters": {"candidate_labels": ["cat", "dog", "llama"]},\n}) '
|
|
1730
1745
|
}
|
|
1731
|
-
|
|
1732
|
-
|
|
1733
|
-
|
|
1734
|
-
|
|
1735
|
-
|
|
1736
|
-
|
|
1737
|
-
|
|
1738
|
-
{
|
|
1739
|
-
|
|
1740
|
-
|
|
1741
|
-
|
|
1742
|
-
|
|
1743
|
-
|
|
1746
|
+
},
|
|
1747
|
+
"sh": {
|
|
1748
|
+
"curl": {
|
|
1749
|
+
"basic": "curl {{ fullUrl }} \\\n -X POST \\\n -H 'Authorization: {{ authorizationHeader }}' \\\n -H 'Content-Type: application/json' \\\n -d '{\n{{ providerInputs.asCurlString }}\n }'",
|
|
1750
|
+
"basicAudio": "curl {{ fullUrl }} \\\n -X POST \\\n -H 'Authorization: {{ authorizationHeader }}' \\\n -H 'Content-Type: audio/flac' \\\n --data-binary @{{ providerInputs.asObj.inputs }}",
|
|
1751
|
+
"basicImage": "curl {{ fullUrl }} \\\n -X POST \\\n -H 'Authorization: {{ authorizationHeader }}' \\\n -H 'Content-Type: image/jpeg' \\\n --data-binary @{{ providerInputs.asObj.inputs }}",
|
|
1752
|
+
"conversational": `curl {{ fullUrl }} \\
|
|
1753
|
+
-H 'Authorization: {{ authorizationHeader }}' \\
|
|
1754
|
+
-H 'Content-Type: application/json' \\
|
|
1755
|
+
-d '{
|
|
1756
|
+
{{ providerInputs.asCurlString }},
|
|
1757
|
+
"stream": false
|
|
1758
|
+
}'`,
|
|
1759
|
+
"conversationalStream": `curl {{ fullUrl }} \\
|
|
1760
|
+
-H 'Authorization: {{ authorizationHeader }}' \\
|
|
1761
|
+
-H 'Content-Type: application/json' \\
|
|
1762
|
+
-d '{
|
|
1763
|
+
{{ providerInputs.asCurlString }},
|
|
1764
|
+
"stream": true
|
|
1765
|
+
}'`,
|
|
1766
|
+
"zeroShotClassification": `curl {{ fullUrl }} \\
|
|
1767
|
+
-X POST \\
|
|
1768
|
+
-d '{"inputs": {{ providerInputs.asObj.inputs }}, "parameters": {"candidate_labels": ["refund", "legal", "faq"]}}' \\
|
|
1769
|
+
-H 'Content-Type: application/json' \\
|
|
1770
|
+
-H 'Authorization: {{ authorizationHeader }}'`
|
|
1744
1771
|
}
|
|
1745
|
-
|
|
1746
|
-
};
|
|
1747
|
-
var curlSnippets = {
|
|
1748
|
-
// Same order as in tasks/src/pipelines.ts
|
|
1749
|
-
"text-classification": snippetBasic,
|
|
1750
|
-
"token-classification": snippetBasic,
|
|
1751
|
-
"table-question-answering": snippetBasic,
|
|
1752
|
-
"question-answering": snippetBasic,
|
|
1753
|
-
"zero-shot-classification": snippetZeroShotClassification,
|
|
1754
|
-
translation: snippetBasic,
|
|
1755
|
-
summarization: snippetBasic,
|
|
1756
|
-
"feature-extraction": snippetBasic,
|
|
1757
|
-
"text-generation": snippetTextGeneration,
|
|
1758
|
-
"image-text-to-text": snippetTextGeneration,
|
|
1759
|
-
"text2text-generation": snippetBasic,
|
|
1760
|
-
"fill-mask": snippetBasic,
|
|
1761
|
-
"sentence-similarity": snippetBasic,
|
|
1762
|
-
"automatic-speech-recognition": snippetFile,
|
|
1763
|
-
"text-to-image": snippetBasic,
|
|
1764
|
-
"text-to-speech": snippetBasic,
|
|
1765
|
-
"text-to-audio": snippetBasic,
|
|
1766
|
-
"audio-to-audio": snippetFile,
|
|
1767
|
-
"audio-classification": snippetFile,
|
|
1768
|
-
"image-classification": snippetFile,
|
|
1769
|
-
"image-to-text": snippetFile,
|
|
1770
|
-
"object-detection": snippetFile,
|
|
1771
|
-
"image-segmentation": snippetFile
|
|
1772
|
-
};
|
|
1773
|
-
function getCurlInferenceSnippet(model, accessToken, provider, providerModelId, opts) {
|
|
1774
|
-
return model.pipeline_tag && model.pipeline_tag in curlSnippets ? curlSnippets[model.pipeline_tag]?.(model, accessToken, provider, providerModelId, opts) ?? [] : [];
|
|
1775
|
-
}
|
|
1772
|
+
}
|
|
1773
|
+
};
|
|
1776
1774
|
|
|
1777
|
-
// src/snippets/
|
|
1778
|
-
var
|
|
1779
|
-
|
|
1780
|
-
|
|
1781
|
-
|
|
1782
|
-
|
|
1783
|
-
|
|
1784
|
-
|
|
1775
|
+
// src/snippets/getInferenceSnippets.ts
|
|
1776
|
+
var PYTHON_CLIENTS = ["huggingface_hub", "fal_client", "requests", "openai"];
|
|
1777
|
+
var JS_CLIENTS = ["fetch", "huggingface.js", "openai"];
|
|
1778
|
+
var SH_CLIENTS = ["curl"];
|
|
1779
|
+
var CLIENTS = {
|
|
1780
|
+
js: [...JS_CLIENTS],
|
|
1781
|
+
python: [...PYTHON_CLIENTS],
|
|
1782
|
+
sh: [...SH_CLIENTS]
|
|
1783
|
+
};
|
|
1784
|
+
var hasTemplate = (language, client, templateName) => templates[language]?.[client]?.[templateName] !== void 0;
|
|
1785
|
+
var loadTemplate = (language, client, templateName) => {
|
|
1786
|
+
const template = templates[language]?.[client]?.[templateName];
|
|
1787
|
+
if (!template) {
|
|
1788
|
+
throw new Error(`Template not found: ${language}/${client}/${templateName}`);
|
|
1789
|
+
}
|
|
1790
|
+
return (data) => new import_jinja.Template(template).render({ ...data });
|
|
1791
|
+
};
|
|
1792
|
+
var snippetImportPythonInferenceClient = loadTemplate("python", "huggingface_hub", "importInferenceClient");
|
|
1793
|
+
var snippetImportRequests = loadTemplate("python", "requests", "importRequests");
|
|
1794
|
+
var HF_PYTHON_METHODS = {
|
|
1785
1795
|
"audio-classification": "audio_classification",
|
|
1786
1796
|
"audio-to-audio": "audio_to_audio",
|
|
1787
1797
|
"automatic-speech-recognition": "automatic_speech_recognition",
|
|
1788
|
-
"
|
|
1798
|
+
"document-question-answering": "document_question_answering",
|
|
1799
|
+
"feature-extraction": "feature_extraction",
|
|
1800
|
+
"fill-mask": "fill_mask",
|
|
1789
1801
|
"image-classification": "image_classification",
|
|
1790
1802
|
"image-segmentation": "image_segmentation",
|
|
1791
1803
|
"image-to-image": "image_to_image",
|
|
1792
1804
|
"image-to-text": "image_to_text",
|
|
1793
1805
|
"object-detection": "object_detection",
|
|
1794
|
-
"text-to-image": "text_to_image",
|
|
1795
|
-
"text-to-video": "text_to_video",
|
|
1796
|
-
"zero-shot-image-classification": "zero_shot_image_classification",
|
|
1797
|
-
"document-question-answering": "document_question_answering",
|
|
1798
|
-
"visual-question-answering": "visual_question_answering",
|
|
1799
|
-
"feature-extraction": "feature_extraction",
|
|
1800
|
-
"fill-mask": "fill_mask",
|
|
1801
1806
|
"question-answering": "question_answering",
|
|
1802
1807
|
"sentence-similarity": "sentence_similarity",
|
|
1803
1808
|
summarization: "summarization",
|
|
1804
1809
|
"table-question-answering": "table_question_answering",
|
|
1810
|
+
"tabular-classification": "tabular_classification",
|
|
1811
|
+
"tabular-regression": "tabular_regression",
|
|
1805
1812
|
"text-classification": "text_classification",
|
|
1806
1813
|
"text-generation": "text_generation",
|
|
1814
|
+
"text-to-image": "text_to_image",
|
|
1815
|
+
"text-to-speech": "text_to_speech",
|
|
1816
|
+
"text-to-video": "text_to_video",
|
|
1807
1817
|
"token-classification": "token_classification",
|
|
1808
1818
|
translation: "translation",
|
|
1819
|
+
"visual-question-answering": "visual_question_answering",
|
|
1809
1820
|
"zero-shot-classification": "zero_shot_classification",
|
|
1810
|
-
"
|
|
1811
|
-
"tabular-regression": "tabular_regression"
|
|
1812
|
-
};
|
|
1813
|
-
var snippetImportInferenceClient = (accessToken, provider) => `from huggingface_hub import InferenceClient
|
|
1814
|
-
|
|
1815
|
-
client = InferenceClient(
|
|
1816
|
-
provider="${provider}",
|
|
1817
|
-
api_key="${accessToken || "{API_TOKEN}"}",
|
|
1818
|
-
)`;
|
|
1819
|
-
var snippetConversational = (model, accessToken, provider, providerModelId, opts) => {
|
|
1820
|
-
const streaming = opts?.streaming ?? true;
|
|
1821
|
-
const exampleMessages = (0, import_tasks4.getModelInputSnippet)(model);
|
|
1822
|
-
const messages = opts?.messages ?? exampleMessages;
|
|
1823
|
-
const messagesStr = (0, import_tasks4.stringifyMessages)(messages, { attributeKeyQuotes: true });
|
|
1824
|
-
const config = {
|
|
1825
|
-
...opts?.temperature ? { temperature: opts.temperature } : void 0,
|
|
1826
|
-
max_tokens: opts?.max_tokens ?? 500,
|
|
1827
|
-
...opts?.top_p ? { top_p: opts.top_p } : void 0
|
|
1828
|
-
};
|
|
1829
|
-
const configStr = (0, import_tasks4.stringifyGenerationConfig)(config, {
|
|
1830
|
-
indent: "\n ",
|
|
1831
|
-
attributeValueConnector: "="
|
|
1832
|
-
});
|
|
1833
|
-
if (streaming) {
|
|
1834
|
-
return [
|
|
1835
|
-
{
|
|
1836
|
-
client: "huggingface_hub",
|
|
1837
|
-
content: `${snippetImportInferenceClient(accessToken, provider)}
|
|
1838
|
-
|
|
1839
|
-
messages = ${messagesStr}
|
|
1840
|
-
|
|
1841
|
-
stream = client.chat.completions.create(
|
|
1842
|
-
model="${model.id}",
|
|
1843
|
-
messages=messages,
|
|
1844
|
-
${configStr}
|
|
1845
|
-
stream=True,
|
|
1846
|
-
)
|
|
1847
|
-
|
|
1848
|
-
for chunk in stream:
|
|
1849
|
-
print(chunk.choices[0].delta.content, end="")`
|
|
1850
|
-
},
|
|
1851
|
-
{
|
|
1852
|
-
client: "openai",
|
|
1853
|
-
content: `from openai import OpenAI
|
|
1854
|
-
|
|
1855
|
-
client = OpenAI(
|
|
1856
|
-
base_url="${(0, import_tasks3.openAIbaseUrl)(provider)}",
|
|
1857
|
-
api_key="${accessToken || "{API_TOKEN}"}"
|
|
1858
|
-
)
|
|
1859
|
-
|
|
1860
|
-
messages = ${messagesStr}
|
|
1861
|
-
|
|
1862
|
-
stream = client.chat.completions.create(
|
|
1863
|
-
model="${providerModelId ?? model.id}",
|
|
1864
|
-
messages=messages,
|
|
1865
|
-
${configStr}
|
|
1866
|
-
stream=True
|
|
1867
|
-
)
|
|
1868
|
-
|
|
1869
|
-
for chunk in stream:
|
|
1870
|
-
print(chunk.choices[0].delta.content, end="")`
|
|
1871
|
-
}
|
|
1872
|
-
];
|
|
1873
|
-
} else {
|
|
1874
|
-
return [
|
|
1875
|
-
{
|
|
1876
|
-
client: "huggingface_hub",
|
|
1877
|
-
content: `${snippetImportInferenceClient(accessToken, provider)}
|
|
1878
|
-
|
|
1879
|
-
messages = ${messagesStr}
|
|
1880
|
-
|
|
1881
|
-
completion = client.chat.completions.create(
|
|
1882
|
-
model="${model.id}",
|
|
1883
|
-
messages=messages,
|
|
1884
|
-
${configStr}
|
|
1885
|
-
)
|
|
1886
|
-
|
|
1887
|
-
print(completion.choices[0].message)`
|
|
1888
|
-
},
|
|
1889
|
-
{
|
|
1890
|
-
client: "openai",
|
|
1891
|
-
content: `from openai import OpenAI
|
|
1892
|
-
|
|
1893
|
-
client = OpenAI(
|
|
1894
|
-
base_url="${(0, import_tasks3.openAIbaseUrl)(provider)}",
|
|
1895
|
-
api_key="${accessToken || "{API_TOKEN}"}"
|
|
1896
|
-
)
|
|
1897
|
-
|
|
1898
|
-
messages = ${messagesStr}
|
|
1899
|
-
|
|
1900
|
-
completion = client.chat.completions.create(
|
|
1901
|
-
model="${providerModelId ?? model.id}",
|
|
1902
|
-
messages=messages,
|
|
1903
|
-
${configStr}
|
|
1904
|
-
)
|
|
1905
|
-
|
|
1906
|
-
print(completion.choices[0].message)`
|
|
1907
|
-
}
|
|
1908
|
-
];
|
|
1909
|
-
}
|
|
1910
|
-
};
|
|
1911
|
-
var snippetZeroShotClassification2 = (model) => {
|
|
1912
|
-
return [
|
|
1913
|
-
{
|
|
1914
|
-
client: "requests",
|
|
1915
|
-
content: `def query(payload):
|
|
1916
|
-
response = requests.post(API_URL, headers=headers, json=payload)
|
|
1917
|
-
return response.json()
|
|
1918
|
-
|
|
1919
|
-
output = query({
|
|
1920
|
-
"inputs": ${(0, import_tasks4.getModelInputSnippet)(model)},
|
|
1921
|
-
"parameters": {"candidate_labels": ["refund", "legal", "faq"]},
|
|
1922
|
-
})`
|
|
1923
|
-
}
|
|
1924
|
-
];
|
|
1925
|
-
};
|
|
1926
|
-
var snippetZeroShotImageClassification = (model) => {
|
|
1927
|
-
return [
|
|
1928
|
-
{
|
|
1929
|
-
client: "requests",
|
|
1930
|
-
content: `def query(data):
|
|
1931
|
-
with open(data["image_path"], "rb") as f:
|
|
1932
|
-
img = f.read()
|
|
1933
|
-
payload={
|
|
1934
|
-
"parameters": data["parameters"],
|
|
1935
|
-
"inputs": base64.b64encode(img).decode("utf-8")
|
|
1936
|
-
}
|
|
1937
|
-
response = requests.post(API_URL, headers=headers, json=payload)
|
|
1938
|
-
return response.json()
|
|
1939
|
-
|
|
1940
|
-
output = query({
|
|
1941
|
-
"image_path": ${(0, import_tasks4.getModelInputSnippet)(model)},
|
|
1942
|
-
"parameters": {"candidate_labels": ["cat", "dog", "llama"]},
|
|
1943
|
-
})`
|
|
1944
|
-
}
|
|
1945
|
-
];
|
|
1946
|
-
};
|
|
1947
|
-
var snippetBasic2 = (model, accessToken, provider) => {
|
|
1948
|
-
return [
|
|
1949
|
-
...model.pipeline_tag && model.pipeline_tag in HFH_INFERENCE_CLIENT_METHODS ? [
|
|
1950
|
-
{
|
|
1951
|
-
client: "huggingface_hub",
|
|
1952
|
-
content: `${snippetImportInferenceClient(accessToken, provider)}
|
|
1953
|
-
|
|
1954
|
-
result = client.${HFH_INFERENCE_CLIENT_METHODS[model.pipeline_tag]}(
|
|
1955
|
-
inputs=${(0, import_tasks4.getModelInputSnippet)(model)},
|
|
1956
|
-
model="${model.id}",
|
|
1957
|
-
)
|
|
1958
|
-
|
|
1959
|
-
print(result)
|
|
1960
|
-
`
|
|
1961
|
-
}
|
|
1962
|
-
] : [],
|
|
1963
|
-
{
|
|
1964
|
-
client: "requests",
|
|
1965
|
-
content: `def query(payload):
|
|
1966
|
-
response = requests.post(API_URL, headers=headers, json=payload)
|
|
1967
|
-
return response.json()
|
|
1968
|
-
|
|
1969
|
-
output = query({
|
|
1970
|
-
"inputs": ${(0, import_tasks4.getModelInputSnippet)(model)},
|
|
1971
|
-
})`
|
|
1972
|
-
}
|
|
1973
|
-
];
|
|
1974
|
-
};
|
|
1975
|
-
var snippetFile2 = (model) => {
|
|
1976
|
-
return [
|
|
1977
|
-
{
|
|
1978
|
-
client: "requests",
|
|
1979
|
-
content: `def query(filename):
|
|
1980
|
-
with open(filename, "rb") as f:
|
|
1981
|
-
data = f.read()
|
|
1982
|
-
response = requests.post(API_URL, headers=headers, data=data)
|
|
1983
|
-
return response.json()
|
|
1984
|
-
|
|
1985
|
-
output = query(${(0, import_tasks4.getModelInputSnippet)(model)})`
|
|
1986
|
-
}
|
|
1987
|
-
];
|
|
1988
|
-
};
|
|
1989
|
-
var snippetTextToImage = (model, accessToken, provider, providerModelId) => {
|
|
1990
|
-
return [
|
|
1991
|
-
{
|
|
1992
|
-
client: "huggingface_hub",
|
|
1993
|
-
content: `${snippetImportInferenceClient(accessToken, provider)}
|
|
1994
|
-
|
|
1995
|
-
# output is a PIL.Image object
|
|
1996
|
-
image = client.text_to_image(
|
|
1997
|
-
${(0, import_tasks4.getModelInputSnippet)(model)},
|
|
1998
|
-
model="${model.id}",
|
|
1999
|
-
)`
|
|
2000
|
-
},
|
|
2001
|
-
...provider === "fal-ai" ? [
|
|
2002
|
-
{
|
|
2003
|
-
client: "fal-client",
|
|
2004
|
-
content: `import fal_client
|
|
2005
|
-
|
|
2006
|
-
result = fal_client.subscribe(
|
|
2007
|
-
"${providerModelId ?? model.id}",
|
|
2008
|
-
arguments={
|
|
2009
|
-
"prompt": ${(0, import_tasks4.getModelInputSnippet)(model)},
|
|
2010
|
-
},
|
|
2011
|
-
)
|
|
2012
|
-
print(result)
|
|
2013
|
-
`
|
|
2014
|
-
}
|
|
2015
|
-
] : [],
|
|
2016
|
-
...provider === "hf-inference" ? [
|
|
2017
|
-
{
|
|
2018
|
-
client: "requests",
|
|
2019
|
-
content: `def query(payload):
|
|
2020
|
-
response = requests.post(API_URL, headers=headers, json=payload)
|
|
2021
|
-
return response.content
|
|
2022
|
-
|
|
2023
|
-
image_bytes = query({
|
|
2024
|
-
"inputs": ${(0, import_tasks4.getModelInputSnippet)(model)},
|
|
2025
|
-
})
|
|
2026
|
-
|
|
2027
|
-
# You can access the image with PIL.Image for example
|
|
2028
|
-
import io
|
|
2029
|
-
from PIL import Image
|
|
2030
|
-
image = Image.open(io.BytesIO(image_bytes))`
|
|
2031
|
-
}
|
|
2032
|
-
] : []
|
|
2033
|
-
];
|
|
2034
|
-
};
|
|
2035
|
-
var snippetTextToVideo = (model, accessToken, provider) => {
|
|
2036
|
-
return ["fal-ai", "replicate"].includes(provider) ? [
|
|
2037
|
-
{
|
|
2038
|
-
client: "huggingface_hub",
|
|
2039
|
-
content: `${snippetImportInferenceClient(accessToken, provider)}
|
|
2040
|
-
|
|
2041
|
-
video = client.text_to_video(
|
|
2042
|
-
${(0, import_tasks4.getModelInputSnippet)(model)},
|
|
2043
|
-
model="${model.id}",
|
|
2044
|
-
)`
|
|
2045
|
-
}
|
|
2046
|
-
] : [];
|
|
2047
|
-
};
|
|
2048
|
-
var snippetTabular = (model) => {
|
|
2049
|
-
return [
|
|
2050
|
-
{
|
|
2051
|
-
client: "requests",
|
|
2052
|
-
content: `def query(payload):
|
|
2053
|
-
response = requests.post(API_URL, headers=headers, json=payload)
|
|
2054
|
-
return response.content
|
|
2055
|
-
|
|
2056
|
-
response = query({
|
|
2057
|
-
"inputs": {"data": ${(0, import_tasks4.getModelInputSnippet)(model)}},
|
|
2058
|
-
})`
|
|
2059
|
-
}
|
|
2060
|
-
];
|
|
2061
|
-
};
|
|
2062
|
-
var snippetTextToAudio = (model) => {
|
|
2063
|
-
if (model.library_name === "transformers") {
|
|
2064
|
-
return [
|
|
2065
|
-
{
|
|
2066
|
-
client: "requests",
|
|
2067
|
-
content: `def query(payload):
|
|
2068
|
-
response = requests.post(API_URL, headers=headers, json=payload)
|
|
2069
|
-
return response.content
|
|
2070
|
-
|
|
2071
|
-
audio_bytes = query({
|
|
2072
|
-
"inputs": ${(0, import_tasks4.getModelInputSnippet)(model)},
|
|
2073
|
-
})
|
|
2074
|
-
# You can access the audio with IPython.display for example
|
|
2075
|
-
from IPython.display import Audio
|
|
2076
|
-
Audio(audio_bytes)`
|
|
2077
|
-
}
|
|
2078
|
-
];
|
|
2079
|
-
} else {
|
|
2080
|
-
return [
|
|
2081
|
-
{
|
|
2082
|
-
client: "requests",
|
|
2083
|
-
content: `def query(payload):
|
|
2084
|
-
response = requests.post(API_URL, headers=headers, json=payload)
|
|
2085
|
-
return response.json()
|
|
2086
|
-
|
|
2087
|
-
audio, sampling_rate = query({
|
|
2088
|
-
"inputs": ${(0, import_tasks4.getModelInputSnippet)(model)},
|
|
2089
|
-
})
|
|
2090
|
-
# You can access the audio with IPython.display for example
|
|
2091
|
-
from IPython.display import Audio
|
|
2092
|
-
Audio(audio, rate=sampling_rate)`
|
|
2093
|
-
}
|
|
2094
|
-
];
|
|
2095
|
-
}
|
|
1821
|
+
"zero-shot-image-classification": "zero_shot_image_classification"
|
|
2096
1822
|
};
|
|
2097
|
-
var
|
|
2098
|
-
|
|
2099
|
-
|
|
2100
|
-
|
|
2101
|
-
|
|
2102
|
-
output = client.automatic_speech_recognition(${(0, import_tasks4.getModelInputSnippet)(model)}, model="${model.id}")`
|
|
2103
|
-
},
|
|
2104
|
-
snippetFile2(model)[0]
|
|
2105
|
-
];
|
|
2106
|
-
};
|
|
2107
|
-
var snippetDocumentQuestionAnswering = (model, accessToken, provider) => {
|
|
2108
|
-
const inputsAsStr = (0, import_tasks4.getModelInputSnippet)(model);
|
|
2109
|
-
const inputsAsObj = JSON.parse(inputsAsStr);
|
|
2110
|
-
return [
|
|
2111
|
-
{
|
|
2112
|
-
client: "huggingface_hub",
|
|
2113
|
-
content: `${snippetImportInferenceClient(accessToken, provider)}
|
|
2114
|
-
output = client.document_question_answering(
|
|
2115
|
-
"${inputsAsObj.image}",
|
|
2116
|
-
question="${inputsAsObj.question}",
|
|
2117
|
-
model="${model.id}",
|
|
2118
|
-
)`
|
|
2119
|
-
},
|
|
2120
|
-
{
|
|
2121
|
-
client: "requests",
|
|
2122
|
-
content: `def query(payload):
|
|
2123
|
-
with open(payload["image"], "rb") as f:
|
|
2124
|
-
img = f.read()
|
|
2125
|
-
payload["image"] = base64.b64encode(img).decode("utf-8")
|
|
2126
|
-
response = requests.post(API_URL, headers=headers, json=payload)
|
|
2127
|
-
return response.json()
|
|
2128
|
-
|
|
2129
|
-
output = query({
|
|
2130
|
-
"inputs": ${inputsAsStr},
|
|
2131
|
-
})`
|
|
2132
|
-
}
|
|
2133
|
-
];
|
|
2134
|
-
};
|
|
2135
|
-
var snippetImageToImage = (model, accessToken, provider) => {
|
|
2136
|
-
const inputsAsStr = (0, import_tasks4.getModelInputSnippet)(model);
|
|
2137
|
-
const inputsAsObj = JSON.parse(inputsAsStr);
|
|
2138
|
-
return [
|
|
2139
|
-
{
|
|
2140
|
-
client: "huggingface_hub",
|
|
2141
|
-
content: `${snippetImportInferenceClient(accessToken, provider)}
|
|
2142
|
-
# output is a PIL.Image object
|
|
2143
|
-
image = client.image_to_image(
|
|
2144
|
-
"${inputsAsObj.image}",
|
|
2145
|
-
prompt="${inputsAsObj.prompt}",
|
|
2146
|
-
model="${model.id}",
|
|
2147
|
-
)`
|
|
2148
|
-
},
|
|
2149
|
-
{
|
|
2150
|
-
client: "requests",
|
|
2151
|
-
content: `def query(payload):
|
|
2152
|
-
with open(payload["inputs"], "rb") as f:
|
|
2153
|
-
img = f.read()
|
|
2154
|
-
payload["inputs"] = base64.b64encode(img).decode("utf-8")
|
|
2155
|
-
response = requests.post(API_URL, headers=headers, json=payload)
|
|
2156
|
-
return response.content
|
|
2157
|
-
|
|
2158
|
-
image_bytes = query({
|
|
2159
|
-
"inputs": "${inputsAsObj.image}",
|
|
2160
|
-
"parameters": {"prompt": "${inputsAsObj.prompt}"},
|
|
2161
|
-
})
|
|
2162
|
-
|
|
2163
|
-
# You can access the image with PIL.Image for example
|
|
2164
|
-
import io
|
|
2165
|
-
from PIL import Image
|
|
2166
|
-
image = Image.open(io.BytesIO(image_bytes))`
|
|
2167
|
-
}
|
|
2168
|
-
];
|
|
2169
|
-
};
|
|
2170
|
-
var pythonSnippets = {
|
|
2171
|
-
// Same order as in tasks/src/pipelines.ts
|
|
2172
|
-
"text-classification": snippetBasic2,
|
|
2173
|
-
"token-classification": snippetBasic2,
|
|
2174
|
-
"table-question-answering": snippetBasic2,
|
|
2175
|
-
"question-answering": snippetBasic2,
|
|
2176
|
-
"zero-shot-classification": snippetZeroShotClassification2,
|
|
2177
|
-
translation: snippetBasic2,
|
|
2178
|
-
summarization: snippetBasic2,
|
|
2179
|
-
"feature-extraction": snippetBasic2,
|
|
2180
|
-
"text-generation": snippetBasic2,
|
|
2181
|
-
"text2text-generation": snippetBasic2,
|
|
2182
|
-
"image-text-to-text": snippetConversational,
|
|
2183
|
-
"fill-mask": snippetBasic2,
|
|
2184
|
-
"sentence-similarity": snippetBasic2,
|
|
2185
|
-
"automatic-speech-recognition": snippetAutomaticSpeechRecognition,
|
|
2186
|
-
"text-to-image": snippetTextToImage,
|
|
2187
|
-
"text-to-video": snippetTextToVideo,
|
|
2188
|
-
"text-to-speech": snippetTextToAudio,
|
|
2189
|
-
"text-to-audio": snippetTextToAudio,
|
|
2190
|
-
"audio-to-audio": snippetFile2,
|
|
2191
|
-
"audio-classification": snippetFile2,
|
|
2192
|
-
"image-classification": snippetFile2,
|
|
2193
|
-
"tabular-regression": snippetTabular,
|
|
2194
|
-
"tabular-classification": snippetTabular,
|
|
2195
|
-
"object-detection": snippetFile2,
|
|
2196
|
-
"image-segmentation": snippetFile2,
|
|
2197
|
-
"document-question-answering": snippetDocumentQuestionAnswering,
|
|
2198
|
-
"image-to-text": snippetFile2,
|
|
2199
|
-
"image-to-image": snippetImageToImage,
|
|
2200
|
-
"zero-shot-image-classification": snippetZeroShotImageClassification
|
|
2201
|
-
};
|
|
2202
|
-
function getPythonInferenceSnippet(model, accessToken, provider, providerModelId, opts) {
|
|
2203
|
-
if (model.tags.includes("conversational")) {
|
|
2204
|
-
return snippetConversational(model, accessToken, provider, providerModelId, opts);
|
|
2205
|
-
} else {
|
|
2206
|
-
const snippets = model.pipeline_tag && model.pipeline_tag in pythonSnippets ? pythonSnippets[model.pipeline_tag]?.(model, accessToken, provider, providerModelId) ?? [] : [];
|
|
2207
|
-
return snippets.map((snippet) => {
|
|
2208
|
-
return {
|
|
2209
|
-
...snippet,
|
|
2210
|
-
content: addImportsToSnippet(snippet.content, model, accessToken)
|
|
2211
|
-
};
|
|
2212
|
-
});
|
|
2213
|
-
}
|
|
2214
|
-
}
|
|
2215
|
-
var addImportsToSnippet = (snippet, model, accessToken) => {
|
|
2216
|
-
if (snippet.includes("requests")) {
|
|
2217
|
-
snippet = `import requests
|
|
2218
|
-
|
|
2219
|
-
API_URL = "https://router.huggingface.co/hf-inference/models/${model.id}"
|
|
2220
|
-
headers = {"Authorization": ${accessToken ? `"Bearer ${accessToken}"` : `f"Bearer {API_TOKEN}"`}}
|
|
2221
|
-
|
|
2222
|
-
${snippet}`;
|
|
2223
|
-
}
|
|
2224
|
-
if (snippet.includes("base64")) {
|
|
2225
|
-
snippet = `import base64
|
|
2226
|
-
${snippet}`;
|
|
2227
|
-
}
|
|
2228
|
-
return snippet;
|
|
2229
|
-
};
|
|
2230
|
-
|
|
2231
|
-
// src/snippets/js.ts
|
|
2232
|
-
var js_exports = {};
|
|
2233
|
-
__export(js_exports, {
|
|
2234
|
-
getJsInferenceSnippet: () => getJsInferenceSnippet,
|
|
2235
|
-
jsSnippets: () => jsSnippets,
|
|
2236
|
-
snippetAutomaticSpeechRecognition: () => snippetAutomaticSpeechRecognition2,
|
|
2237
|
-
snippetBasic: () => snippetBasic3,
|
|
2238
|
-
snippetFile: () => snippetFile3,
|
|
2239
|
-
snippetTextGeneration: () => snippetTextGeneration2,
|
|
2240
|
-
snippetTextToAudio: () => snippetTextToAudio2,
|
|
2241
|
-
snippetTextToImage: () => snippetTextToImage2,
|
|
2242
|
-
snippetTextToVideo: () => snippetTextToVideo2,
|
|
2243
|
-
snippetZeroShotClassification: () => snippetZeroShotClassification3
|
|
2244
|
-
});
|
|
2245
|
-
var import_tasks5 = require("@huggingface/tasks");
|
|
2246
|
-
var import_tasks6 = require("@huggingface/tasks");
|
|
2247
|
-
var HFJS_METHODS = {
|
|
2248
|
-
"text-classification": "textClassification",
|
|
2249
|
-
"token-classification": "tokenClassification",
|
|
2250
|
-
"table-question-answering": "tableQuestionAnswering",
|
|
1823
|
+
var HF_JS_METHODS = {
|
|
1824
|
+
"automatic-speech-recognition": "automaticSpeechRecognition",
|
|
1825
|
+
"feature-extraction": "featureExtraction",
|
|
1826
|
+
"fill-mask": "fillMask",
|
|
1827
|
+
"image-classification": "imageClassification",
|
|
2251
1828
|
"question-answering": "questionAnswering",
|
|
2252
|
-
|
|
1829
|
+
"sentence-similarity": "sentenceSimilarity",
|
|
2253
1830
|
summarization: "summarization",
|
|
2254
|
-
"
|
|
1831
|
+
"table-question-answering": "tableQuestionAnswering",
|
|
1832
|
+
"text-classification": "textClassification",
|
|
2255
1833
|
"text-generation": "textGeneration",
|
|
2256
1834
|
"text2text-generation": "textGeneration",
|
|
2257
|
-
"
|
|
2258
|
-
|
|
1835
|
+
"token-classification": "tokenClassification",
|
|
1836
|
+
translation: "translation"
|
|
2259
1837
|
};
|
|
2260
|
-
var
|
|
2261
|
-
return
|
|
2262
|
-
|
|
2263
|
-
|
|
2264
|
-
|
|
2265
|
-
|
|
2266
|
-
|
|
2267
|
-
const
|
|
2268
|
-
|
|
2269
|
-
|
|
2270
|
-
|
|
2271
|
-
|
|
2272
|
-
|
|
2273
|
-
|
|
2274
|
-
|
|
2275
|
-
|
|
2276
|
-
|
|
1838
|
+
var snippetGenerator = (templateName, inputPreparationFn) => {
|
|
1839
|
+
return (model, accessToken, provider, providerModelId, opts) => {
|
|
1840
|
+
if (model.pipeline_tag && ["text-generation", "image-text-to-text"].includes(model.pipeline_tag) && model.tags.includes("conversational")) {
|
|
1841
|
+
templateName = opts?.streaming ? "conversationalStream" : "conversational";
|
|
1842
|
+
inputPreparationFn = prepareConversationalInput;
|
|
1843
|
+
}
|
|
1844
|
+
const inputs = inputPreparationFn ? inputPreparationFn(model, opts) : { inputs: (0, import_tasks.getModelInputSnippet)(model) };
|
|
1845
|
+
const request2 = makeRequestOptionsFromResolvedModel(
|
|
1846
|
+
providerModelId ?? model.id,
|
|
1847
|
+
{ accessToken, provider, ...inputs },
|
|
1848
|
+
{ chatCompletion: templateName.includes("conversational"), task: model.pipeline_tag }
|
|
1849
|
+
);
|
|
1850
|
+
let providerInputs = inputs;
|
|
1851
|
+
const bodyAsObj = request2.info.body;
|
|
1852
|
+
if (typeof bodyAsObj === "string") {
|
|
1853
|
+
try {
|
|
1854
|
+
providerInputs = JSON.parse(bodyAsObj);
|
|
1855
|
+
} catch (e) {
|
|
1856
|
+
console.error("Failed to parse body as JSON", e);
|
|
2277
1857
|
}
|
|
2278
|
-
] : [],
|
|
2279
|
-
{
|
|
2280
|
-
client: "fetch",
|
|
2281
|
-
content: `async function query(data) {
|
|
2282
|
-
const response = await fetch(
|
|
2283
|
-
"https://router.huggingface.co/hf-inference/models/${model.id}",
|
|
2284
|
-
{
|
|
2285
|
-
headers: {
|
|
2286
|
-
Authorization: "Bearer ${accessToken || `{API_TOKEN}`}",
|
|
2287
|
-
"Content-Type": "application/json",
|
|
2288
|
-
},
|
|
2289
|
-
method: "POST",
|
|
2290
|
-
body: JSON.stringify(data),
|
|
2291
|
-
}
|
|
2292
|
-
);
|
|
2293
|
-
const result = await response.json();
|
|
2294
|
-
return result;
|
|
2295
|
-
}
|
|
2296
|
-
|
|
2297
|
-
query({"inputs": ${(0, import_tasks6.getModelInputSnippet)(model)}}).then((response) => {
|
|
2298
|
-
console.log(JSON.stringify(response));
|
|
2299
|
-
});`
|
|
2300
1858
|
}
|
|
2301
|
-
|
|
2302
|
-
|
|
2303
|
-
|
|
2304
|
-
|
|
2305
|
-
|
|
2306
|
-
|
|
2307
|
-
|
|
2308
|
-
|
|
2309
|
-
|
|
2310
|
-
|
|
2311
|
-
|
|
2312
|
-
|
|
1859
|
+
const params = {
|
|
1860
|
+
accessToken,
|
|
1861
|
+
authorizationHeader: request2.info.headers?.Authorization,
|
|
1862
|
+
baseUrl: removeSuffix(request2.url, "/chat/completions"),
|
|
1863
|
+
fullUrl: request2.url,
|
|
1864
|
+
inputs: {
|
|
1865
|
+
asObj: inputs,
|
|
1866
|
+
asCurlString: formatBody(inputs, "curl"),
|
|
1867
|
+
asJsonString: formatBody(inputs, "json"),
|
|
1868
|
+
asPythonString: formatBody(inputs, "python"),
|
|
1869
|
+
asTsString: formatBody(inputs, "ts")
|
|
1870
|
+
},
|
|
1871
|
+
providerInputs: {
|
|
1872
|
+
asObj: providerInputs,
|
|
1873
|
+
asCurlString: formatBody(providerInputs, "curl"),
|
|
1874
|
+
asJsonString: formatBody(providerInputs, "json"),
|
|
1875
|
+
asPythonString: formatBody(providerInputs, "python"),
|
|
1876
|
+
asTsString: formatBody(providerInputs, "ts")
|
|
1877
|
+
},
|
|
1878
|
+
model,
|
|
1879
|
+
provider,
|
|
1880
|
+
providerModelId: providerModelId ?? model.id
|
|
2313
1881
|
};
|
|
2314
|
-
|
|
2315
|
-
|
|
2316
|
-
|
|
2317
|
-
|
|
2318
|
-
if (streaming) {
|
|
2319
|
-
return [
|
|
2320
|
-
{
|
|
2321
|
-
client: "huggingface.js",
|
|
2322
|
-
content: `import { InferenceClient } from "@huggingface/inference";
|
|
2323
|
-
|
|
2324
|
-
const client = new InferenceClient("${accessToken || `{API_TOKEN}`}");
|
|
2325
|
-
|
|
2326
|
-
let out = "";
|
|
2327
|
-
|
|
2328
|
-
const stream = client.chatCompletionStream({
|
|
2329
|
-
model: "${model.id}",
|
|
2330
|
-
messages: ${messagesStr},
|
|
2331
|
-
provider: "${provider}",
|
|
2332
|
-
${configStr}
|
|
2333
|
-
});
|
|
2334
|
-
|
|
2335
|
-
for await (const chunk of stream) {
|
|
2336
|
-
if (chunk.choices && chunk.choices.length > 0) {
|
|
2337
|
-
const newContent = chunk.choices[0].delta.content;
|
|
2338
|
-
out += newContent;
|
|
2339
|
-
console.log(newContent);
|
|
2340
|
-
}
|
|
2341
|
-
}`
|
|
2342
|
-
},
|
|
2343
|
-
{
|
|
2344
|
-
client: "openai",
|
|
2345
|
-
content: `import { OpenAI } from "openai";
|
|
2346
|
-
|
|
2347
|
-
const client = new OpenAI({
|
|
2348
|
-
baseURL: "${(0, import_tasks5.openAIbaseUrl)(provider)}",
|
|
2349
|
-
apiKey: "${accessToken || `{API_TOKEN}`}"
|
|
2350
|
-
});
|
|
2351
|
-
|
|
2352
|
-
let out = "";
|
|
2353
|
-
|
|
2354
|
-
const stream = await client.chat.completions.create({
|
|
2355
|
-
model: "${providerModelId ?? model.id}",
|
|
2356
|
-
messages: ${messagesStr},
|
|
2357
|
-
${configStr}
|
|
2358
|
-
stream: true,
|
|
2359
|
-
});
|
|
2360
|
-
|
|
2361
|
-
for await (const chunk of stream) {
|
|
2362
|
-
if (chunk.choices && chunk.choices.length > 0) {
|
|
2363
|
-
const newContent = chunk.choices[0].delta.content;
|
|
2364
|
-
out += newContent;
|
|
2365
|
-
console.log(newContent);
|
|
2366
|
-
}
|
|
2367
|
-
}`
|
|
1882
|
+
return import_tasks.inferenceSnippetLanguages.map((language) => {
|
|
1883
|
+
return CLIENTS[language].map((client) => {
|
|
1884
|
+
if (!hasTemplate(language, client, templateName)) {
|
|
1885
|
+
return;
|
|
2368
1886
|
}
|
|
2369
|
-
|
|
2370
|
-
|
|
2371
|
-
|
|
2372
|
-
|
|
2373
|
-
|
|
2374
|
-
|
|
2375
|
-
|
|
2376
|
-
|
|
2377
|
-
|
|
2378
|
-
|
|
2379
|
-
|
|
2380
|
-
|
|
2381
|
-
|
|
2382
|
-
|
|
2383
|
-
|
|
2384
|
-
|
|
2385
|
-
|
|
2386
|
-
|
|
2387
|
-
|
|
2388
|
-
|
|
2389
|
-
client: "openai",
|
|
2390
|
-
content: `import { OpenAI } from "openai";
|
|
2391
|
-
|
|
2392
|
-
const client = new OpenAI({
|
|
2393
|
-
baseURL: "${(0, import_tasks5.openAIbaseUrl)(provider)}",
|
|
2394
|
-
apiKey: "${accessToken || `{API_TOKEN}`}"
|
|
2395
|
-
});
|
|
1887
|
+
const template = loadTemplate(language, client, templateName);
|
|
1888
|
+
if (client === "huggingface_hub" && templateName.includes("basic")) {
|
|
1889
|
+
if (!(model.pipeline_tag && model.pipeline_tag in HF_PYTHON_METHODS)) {
|
|
1890
|
+
return;
|
|
1891
|
+
}
|
|
1892
|
+
params["methodName"] = HF_PYTHON_METHODS[model.pipeline_tag];
|
|
1893
|
+
}
|
|
1894
|
+
if (client === "huggingface.js" && templateName.includes("basic")) {
|
|
1895
|
+
if (!(model.pipeline_tag && model.pipeline_tag in HF_JS_METHODS)) {
|
|
1896
|
+
return;
|
|
1897
|
+
}
|
|
1898
|
+
params["methodName"] = HF_JS_METHODS[model.pipeline_tag];
|
|
1899
|
+
}
|
|
1900
|
+
let snippet = template(params).trim();
|
|
1901
|
+
if (!snippet) {
|
|
1902
|
+
return;
|
|
1903
|
+
}
|
|
1904
|
+
if (client === "huggingface_hub") {
|
|
1905
|
+
const importSection = snippetImportPythonInferenceClient({ ...params });
|
|
1906
|
+
snippet = `${importSection}
|
|
2396
1907
|
|
|
2397
|
-
|
|
2398
|
-
|
|
2399
|
-
|
|
2400
|
-
|
|
2401
|
-
|
|
1908
|
+
${snippet}`;
|
|
1909
|
+
} else if (client === "requests") {
|
|
1910
|
+
const importSection = snippetImportRequests({
|
|
1911
|
+
...params,
|
|
1912
|
+
importBase64: snippet.includes("base64"),
|
|
1913
|
+
importJson: snippet.includes("json.")
|
|
1914
|
+
});
|
|
1915
|
+
snippet = `${importSection}
|
|
2402
1916
|
|
|
2403
|
-
|
|
2404
|
-
`
|
|
1917
|
+
${snippet}`;
|
|
2405
1918
|
}
|
|
2406
|
-
|
|
2407
|
-
|
|
2408
|
-
|
|
2409
|
-
|
|
2410
|
-
}
|
|
2411
|
-
};
|
|
2412
|
-
var snippetZeroShotClassification3 = (model, accessToken) => {
|
|
2413
|
-
return [
|
|
2414
|
-
{
|
|
2415
|
-
client: "fetch",
|
|
2416
|
-
content: `async function query(data) {
|
|
2417
|
-
const response = await fetch(
|
|
2418
|
-
"https://router.huggingface.co/hf-inference/models/${model.id}",
|
|
2419
|
-
{
|
|
2420
|
-
headers: {
|
|
2421
|
-
Authorization: "Bearer ${accessToken || `{API_TOKEN}`}",
|
|
2422
|
-
"Content-Type": "application/json",
|
|
2423
|
-
},
|
|
2424
|
-
method: "POST",
|
|
2425
|
-
body: JSON.stringify(data),
|
|
2426
|
-
}
|
|
2427
|
-
);
|
|
2428
|
-
const result = await response.json();
|
|
2429
|
-
return result;
|
|
2430
|
-
}
|
|
2431
|
-
|
|
2432
|
-
query({"inputs": ${(0, import_tasks6.getModelInputSnippet)(
|
|
2433
|
-
model
|
|
2434
|
-
)}, "parameters": {"candidate_labels": ["refund", "legal", "faq"]}}).then((response) => {
|
|
2435
|
-
console.log(JSON.stringify(response));
|
|
2436
|
-
});`
|
|
2437
|
-
}
|
|
2438
|
-
];
|
|
1919
|
+
return { language, client, content: snippet };
|
|
1920
|
+
}).filter((snippet) => snippet !== void 0);
|
|
1921
|
+
}).flat();
|
|
1922
|
+
};
|
|
2439
1923
|
};
|
|
2440
|
-
var
|
|
2441
|
-
return
|
|
2442
|
-
{
|
|
2443
|
-
client: "huggingface.js",
|
|
2444
|
-
content: `import { InferenceClient } from "@huggingface/inference";
|
|
2445
|
-
|
|
2446
|
-
const client = new InferenceClient("${accessToken || `{API_TOKEN}`}");
|
|
2447
|
-
|
|
2448
|
-
const image = await client.textToImage({
|
|
2449
|
-
model: "${model.id}",
|
|
2450
|
-
inputs: ${(0, import_tasks6.getModelInputSnippet)(model)},
|
|
2451
|
-
parameters: { num_inference_steps: 5 },
|
|
2452
|
-
provider: "${provider}",
|
|
2453
|
-
});
|
|
2454
|
-
/// Use the generated image (it's a Blob)
|
|
2455
|
-
`
|
|
2456
|
-
},
|
|
2457
|
-
...provider === "hf-inference" ? [
|
|
2458
|
-
{
|
|
2459
|
-
client: "fetch",
|
|
2460
|
-
content: `async function query(data) {
|
|
2461
|
-
const response = await fetch(
|
|
2462
|
-
"https://router.huggingface.co/hf-inference/models/${model.id}",
|
|
2463
|
-
{
|
|
2464
|
-
headers: {
|
|
2465
|
-
Authorization: "Bearer ${accessToken || `{API_TOKEN}`}",
|
|
2466
|
-
"Content-Type": "application/json",
|
|
2467
|
-
},
|
|
2468
|
-
method: "POST",
|
|
2469
|
-
body: JSON.stringify(data),
|
|
2470
|
-
}
|
|
2471
|
-
);
|
|
2472
|
-
const result = await response.blob();
|
|
2473
|
-
return result;
|
|
2474
|
-
}
|
|
2475
|
-
query({"inputs": ${(0, import_tasks6.getModelInputSnippet)(model)}}).then((response) => {
|
|
2476
|
-
// Use image
|
|
2477
|
-
});`
|
|
2478
|
-
}
|
|
2479
|
-
] : []
|
|
2480
|
-
];
|
|
1924
|
+
var prepareDocumentQuestionAnsweringInput = (model) => {
|
|
1925
|
+
return JSON.parse((0, import_tasks.getModelInputSnippet)(model));
|
|
2481
1926
|
};
|
|
2482
|
-
var
|
|
2483
|
-
|
|
2484
|
-
|
|
2485
|
-
client: "huggingface.js",
|
|
2486
|
-
content: `import { InferenceClient } from "@huggingface/inference";
|
|
2487
|
-
|
|
2488
|
-
const client = new InferenceClient("${accessToken || `{API_TOKEN}`}");
|
|
2489
|
-
|
|
2490
|
-
const video = await client.textToVideo({
|
|
2491
|
-
model: "${model.id}",
|
|
2492
|
-
provider: "${provider}",
|
|
2493
|
-
inputs: ${(0, import_tasks6.getModelInputSnippet)(model)},
|
|
2494
|
-
parameters: { num_inference_steps: 5 },
|
|
2495
|
-
});
|
|
2496
|
-
// Use the generated video (it's a Blob)
|
|
2497
|
-
`
|
|
2498
|
-
}
|
|
2499
|
-
] : [];
|
|
2500
|
-
};
|
|
2501
|
-
var snippetTextToAudio2 = (model, accessToken, provider) => {
|
|
2502
|
-
if (provider !== "hf-inference") {
|
|
2503
|
-
return [];
|
|
2504
|
-
}
|
|
2505
|
-
const commonSnippet = `async function query(data) {
|
|
2506
|
-
const response = await fetch(
|
|
2507
|
-
"https://router.huggingface.co/hf-inference/models/${model.id}",
|
|
2508
|
-
{
|
|
2509
|
-
headers: {
|
|
2510
|
-
Authorization: "Bearer ${accessToken || `{API_TOKEN}`}",
|
|
2511
|
-
"Content-Type": "application/json",
|
|
2512
|
-
},
|
|
2513
|
-
method: "POST",
|
|
2514
|
-
body: JSON.stringify(data),
|
|
2515
|
-
}
|
|
2516
|
-
);`;
|
|
2517
|
-
if (model.library_name === "transformers") {
|
|
2518
|
-
return [
|
|
2519
|
-
{
|
|
2520
|
-
client: "fetch",
|
|
2521
|
-
content: commonSnippet + `
|
|
2522
|
-
const result = await response.blob();
|
|
2523
|
-
return result;
|
|
2524
|
-
}
|
|
2525
|
-
query({"inputs": ${(0, import_tasks6.getModelInputSnippet)(model)}}).then((response) => {
|
|
2526
|
-
// Returns a byte object of the Audio wavform. Use it directly!
|
|
2527
|
-
});`
|
|
2528
|
-
}
|
|
2529
|
-
];
|
|
2530
|
-
} else {
|
|
2531
|
-
return [
|
|
2532
|
-
{
|
|
2533
|
-
client: "fetch",
|
|
2534
|
-
content: commonSnippet + `
|
|
2535
|
-
const result = await response.json();
|
|
2536
|
-
return result;
|
|
2537
|
-
}
|
|
2538
|
-
|
|
2539
|
-
query({"inputs": ${(0, import_tasks6.getModelInputSnippet)(model)}}).then((response) => {
|
|
2540
|
-
console.log(JSON.stringify(response));
|
|
2541
|
-
});`
|
|
2542
|
-
}
|
|
2543
|
-
];
|
|
2544
|
-
}
|
|
1927
|
+
var prepareImageToImageInput = (model) => {
|
|
1928
|
+
const data = JSON.parse((0, import_tasks.getModelInputSnippet)(model));
|
|
1929
|
+
return { inputs: data.image, parameters: { prompt: data.prompt } };
|
|
2545
1930
|
};
|
|
2546
|
-
var
|
|
2547
|
-
return
|
|
2548
|
-
|
|
2549
|
-
|
|
2550
|
-
|
|
2551
|
-
|
|
2552
|
-
|
|
2553
|
-
|
|
2554
|
-
|
|
2555
|
-
|
|
2556
|
-
|
|
2557
|
-
|
|
2558
|
-
|
|
2559
|
-
|
|
2560
|
-
|
|
2561
|
-
|
|
2562
|
-
|
|
2563
|
-
|
|
2564
|
-
|
|
2565
|
-
|
|
2566
|
-
|
|
2567
|
-
|
|
2568
|
-
|
|
2569
|
-
|
|
2570
|
-
|
|
2571
|
-
|
|
2572
|
-
|
|
2573
|
-
|
|
2574
|
-
|
|
2575
|
-
|
|
2576
|
-
|
|
2577
|
-
|
|
2578
|
-
|
|
2579
|
-
|
|
2580
|
-
|
|
2581
|
-
|
|
2582
|
-
|
|
2583
|
-
|
|
2584
|
-
|
|
2585
|
-
|
|
2586
|
-
|
|
2587
|
-
);
|
|
2588
|
-
const result = await response.json();
|
|
2589
|
-
return result;
|
|
1931
|
+
var prepareConversationalInput = (model, opts) => {
|
|
1932
|
+
return {
|
|
1933
|
+
messages: opts?.messages ?? (0, import_tasks.getModelInputSnippet)(model),
|
|
1934
|
+
...opts?.temperature ? { temperature: opts?.temperature } : void 0,
|
|
1935
|
+
max_tokens: opts?.max_tokens ?? 500,
|
|
1936
|
+
...opts?.top_p ? { top_p: opts?.top_p } : void 0
|
|
1937
|
+
};
|
|
1938
|
+
};
|
|
1939
|
+
var snippets = {
|
|
1940
|
+
"audio-classification": snippetGenerator("basicAudio"),
|
|
1941
|
+
"audio-to-audio": snippetGenerator("basicAudio"),
|
|
1942
|
+
"automatic-speech-recognition": snippetGenerator("basicAudio"),
|
|
1943
|
+
"document-question-answering": snippetGenerator("documentQuestionAnswering", prepareDocumentQuestionAnsweringInput),
|
|
1944
|
+
"feature-extraction": snippetGenerator("basic"),
|
|
1945
|
+
"fill-mask": snippetGenerator("basic"),
|
|
1946
|
+
"image-classification": snippetGenerator("basicImage"),
|
|
1947
|
+
"image-segmentation": snippetGenerator("basicImage"),
|
|
1948
|
+
"image-text-to-text": snippetGenerator("conversational"),
|
|
1949
|
+
"image-to-image": snippetGenerator("imageToImage", prepareImageToImageInput),
|
|
1950
|
+
"image-to-text": snippetGenerator("basicImage"),
|
|
1951
|
+
"object-detection": snippetGenerator("basicImage"),
|
|
1952
|
+
"question-answering": snippetGenerator("basic"),
|
|
1953
|
+
"sentence-similarity": snippetGenerator("basic"),
|
|
1954
|
+
summarization: snippetGenerator("basic"),
|
|
1955
|
+
"tabular-classification": snippetGenerator("tabular"),
|
|
1956
|
+
"tabular-regression": snippetGenerator("tabular"),
|
|
1957
|
+
"table-question-answering": snippetGenerator("basic"),
|
|
1958
|
+
"text-classification": snippetGenerator("basic"),
|
|
1959
|
+
"text-generation": snippetGenerator("basic"),
|
|
1960
|
+
"text-to-audio": snippetGenerator("textToAudio"),
|
|
1961
|
+
"text-to-image": snippetGenerator("textToImage"),
|
|
1962
|
+
"text-to-speech": snippetGenerator("textToAudio"),
|
|
1963
|
+
"text-to-video": snippetGenerator("textToVideo"),
|
|
1964
|
+
"text2text-generation": snippetGenerator("basic"),
|
|
1965
|
+
"token-classification": snippetGenerator("basic"),
|
|
1966
|
+
translation: snippetGenerator("basic"),
|
|
1967
|
+
"zero-shot-classification": snippetGenerator("zeroShotClassification"),
|
|
1968
|
+
"zero-shot-image-classification": snippetGenerator("zeroShotImageClassification")
|
|
1969
|
+
};
|
|
1970
|
+
function getInferenceSnippets(model, accessToken, provider, providerModelId, opts) {
|
|
1971
|
+
return model.pipeline_tag && model.pipeline_tag in snippets ? snippets[model.pipeline_tag]?.(model, accessToken, provider, providerModelId, opts) ?? [] : [];
|
|
2590
1972
|
}
|
|
2591
|
-
|
|
2592
|
-
|
|
2593
|
-
|
|
2594
|
-
|
|
2595
|
-
|
|
2596
|
-
|
|
2597
|
-
|
|
2598
|
-
|
|
2599
|
-
|
|
2600
|
-
|
|
2601
|
-
|
|
2602
|
-
|
|
2603
|
-
|
|
2604
|
-
|
|
2605
|
-
|
|
2606
|
-
|
|
2607
|
-
|
|
2608
|
-
|
|
2609
|
-
|
|
2610
|
-
|
|
2611
|
-
|
|
2612
|
-
"
|
|
2613
|
-
|
|
2614
|
-
|
|
2615
|
-
|
|
2616
|
-
|
|
2617
|
-
|
|
2618
|
-
|
|
2619
|
-
|
|
2620
|
-
|
|
2621
|
-
|
|
2622
|
-
|
|
2623
|
-
|
|
2624
|
-
|
|
2625
|
-
|
|
2626
|
-
|
|
1973
|
+
function formatBody(obj, format) {
|
|
1974
|
+
switch (format) {
|
|
1975
|
+
case "curl":
|
|
1976
|
+
return indentString(formatBody(obj, "json"));
|
|
1977
|
+
case "json":
|
|
1978
|
+
return JSON.stringify(obj, null, 4).split("\n").slice(1, -1).join("\n");
|
|
1979
|
+
case "python":
|
|
1980
|
+
return indentString(
|
|
1981
|
+
Object.entries(obj).map(([key, value]) => {
|
|
1982
|
+
const formattedValue = JSON.stringify(value, null, 4).replace(/"/g, '"');
|
|
1983
|
+
return `${key}=${formattedValue},`;
|
|
1984
|
+
}).join("\n")
|
|
1985
|
+
);
|
|
1986
|
+
case "ts":
|
|
1987
|
+
return formatTsObject(obj).split("\n").slice(1, -1).join("\n");
|
|
1988
|
+
default:
|
|
1989
|
+
throw new Error(`Unsupported format: ${format}`);
|
|
1990
|
+
}
|
|
1991
|
+
}
|
|
1992
|
+
function formatTsObject(obj, depth) {
|
|
1993
|
+
depth = depth ?? 0;
|
|
1994
|
+
if (typeof obj !== "object" || obj === null) {
|
|
1995
|
+
return JSON.stringify(obj);
|
|
1996
|
+
}
|
|
1997
|
+
if (Array.isArray(obj)) {
|
|
1998
|
+
const items = obj.map((item) => {
|
|
1999
|
+
const formatted = formatTsObject(item, depth + 1);
|
|
2000
|
+
return `${" ".repeat(4 * (depth + 1))}${formatted},`;
|
|
2001
|
+
}).join("\n");
|
|
2002
|
+
return `[
|
|
2003
|
+
${items}
|
|
2004
|
+
${" ".repeat(4 * depth)}]`;
|
|
2005
|
+
}
|
|
2006
|
+
const entries = Object.entries(obj);
|
|
2007
|
+
const lines = entries.map(([key, value]) => {
|
|
2008
|
+
const formattedValue = formatTsObject(value, depth + 1);
|
|
2009
|
+
const keyStr = /^[a-zA-Z_$][a-zA-Z0-9_$]*$/.test(key) ? key : `"${key}"`;
|
|
2010
|
+
return `${" ".repeat(4 * (depth + 1))}${keyStr}: ${formattedValue},`;
|
|
2011
|
+
}).join("\n");
|
|
2012
|
+
return `{
|
|
2013
|
+
${lines}
|
|
2014
|
+
${" ".repeat(4 * depth)}}`;
|
|
2015
|
+
}
|
|
2016
|
+
function indentString(str) {
|
|
2017
|
+
return str.split("\n").map((line) => " ".repeat(4) + line).join("\n");
|
|
2018
|
+
}
|
|
2019
|
+
function removeSuffix(str, suffix) {
|
|
2020
|
+
return str.endsWith(suffix) ? str.slice(0, -suffix.length) : str;
|
|
2627
2021
|
}
|
|
2628
2022
|
// Annotate the CommonJS export names for ESM import in node:
|
|
2629
2023
|
0 && (module.exports = {
|