@huggingface/inference 3.5.2 → 3.6.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. package/dist/index.cjs +364 -970
  2. package/dist/index.js +366 -981
  3. package/dist/src/index.d.ts.map +1 -1
  4. package/dist/src/lib/makeRequestOptions.d.ts +16 -1
  5. package/dist/src/lib/makeRequestOptions.d.ts.map +1 -1
  6. package/dist/src/providers/novita.d.ts.map +1 -1
  7. package/dist/src/snippets/getInferenceSnippets.d.ts +4 -0
  8. package/dist/src/snippets/getInferenceSnippets.d.ts.map +1 -0
  9. package/dist/src/snippets/index.d.ts +1 -4
  10. package/dist/src/snippets/index.d.ts.map +1 -1
  11. package/dist/src/snippets/templates.exported.d.ts +2 -0
  12. package/dist/src/snippets/templates.exported.d.ts.map +1 -0
  13. package/dist/src/tasks/cv/textToVideo.d.ts.map +1 -1
  14. package/package.json +9 -5
  15. package/src/index.ts +1 -1
  16. package/src/lib/makeRequestOptions.ts +37 -10
  17. package/src/providers/fireworks-ai.ts +1 -1
  18. package/src/providers/hf-inference.ts +1 -1
  19. package/src/providers/nebius.ts +3 -3
  20. package/src/providers/novita.ts +7 -6
  21. package/src/providers/sambanova.ts +1 -1
  22. package/src/providers/together.ts +3 -3
  23. package/src/snippets/getInferenceSnippets.ts +380 -0
  24. package/src/snippets/index.ts +1 -5
  25. package/src/snippets/templates.exported.ts +72 -0
  26. package/src/tasks/cv/textToVideo.ts +25 -5
  27. package/src/vendor/fetch-event-source/LICENSE +21 -0
  28. package/dist/src/snippets/curl.d.ts +0 -17
  29. package/dist/src/snippets/curl.d.ts.map +0 -1
  30. package/dist/src/snippets/js.d.ts +0 -21
  31. package/dist/src/snippets/js.d.ts.map +0 -1
  32. package/dist/src/snippets/python.d.ts +0 -4
  33. package/dist/src/snippets/python.d.ts.map +0 -1
  34. package/src/snippets/curl.ts +0 -177
  35. package/src/snippets/js.ts +0 -475
  36. package/src/snippets/python.ts +0 -563
package/dist/index.js CHANGED
@@ -141,7 +141,7 @@ var makeHeaders5 = (params) => {
141
141
  return { Authorization: `Bearer ${params.accessToken}` };
142
142
  };
143
143
  var makeUrl5 = (params) => {
144
- if (params.task === "text-generation" && params.chatCompletion) {
144
+ if (params.chatCompletion) {
145
145
  return `${params.baseUrl}/inference/v1/chat/completions`;
146
146
  }
147
147
  return `${params.baseUrl}/inference`;
@@ -167,7 +167,7 @@ var makeUrl6 = (params) => {
167
167
  if (params.task && ["feature-extraction", "sentence-similarity"].includes(params.task)) {
168
168
  return `${params.baseUrl}/pipeline/${params.task}/${params.model}`;
169
169
  }
170
- if (params.task === "text-generation" && params.chatCompletion) {
170
+ if (params.chatCompletion) {
171
171
  return `${params.baseUrl}/models/${params.model}/v1/chat/completions`;
172
172
  }
173
173
  return `${params.baseUrl}/models/${params.model}`;
@@ -218,10 +218,10 @@ var makeUrl8 = (params) => {
218
218
  if (params.task === "text-to-image") {
219
219
  return `${params.baseUrl}/v1/images/generations`;
220
220
  }
221
+ if (params.chatCompletion) {
222
+ return `${params.baseUrl}/v1/chat/completions`;
223
+ }
221
224
  if (params.task === "text-generation") {
222
- if (params.chatCompletion) {
223
- return `${params.baseUrl}/v1/chat/completions`;
224
- }
225
225
  return `${params.baseUrl}/v1/completions`;
226
226
  }
227
227
  return params.baseUrl;
@@ -234,7 +234,7 @@ var NEBIUS_CONFIG = {
234
234
  };
235
235
 
236
236
  // src/providers/novita.ts
237
- var NOVITA_API_BASE_URL = "https://api.novita.ai/v3/openai";
237
+ var NOVITA_API_BASE_URL = "https://api.novita.ai";
238
238
  var makeBody9 = (params) => {
239
239
  return {
240
240
  ...params.args,
@@ -245,11 +245,12 @@ var makeHeaders9 = (params) => {
245
245
  return { Authorization: `Bearer ${params.accessToken}` };
246
246
  };
247
247
  var makeUrl9 = (params) => {
248
- if (params.task === "text-generation") {
249
- if (params.chatCompletion) {
250
- return `${params.baseUrl}/chat/completions`;
251
- }
252
- return `${params.baseUrl}/completions`;
248
+ if (params.chatCompletion) {
249
+ return `${params.baseUrl}/v3/openai/chat/completions`;
250
+ } else if (params.task === "text-generation") {
251
+ return `${params.baseUrl}/v3/openai/completions`;
252
+ } else if (params.task === "text-to-video") {
253
+ return `${params.baseUrl}/v3/hf/${params.model}`;
253
254
  }
254
255
  return params.baseUrl;
255
256
  };
@@ -296,7 +297,7 @@ var makeHeaders11 = (params) => {
296
297
  return { Authorization: `Bearer ${params.accessToken}` };
297
298
  };
298
299
  var makeUrl11 = (params) => {
299
- if (params.task === "text-generation" && params.chatCompletion) {
300
+ if (params.chatCompletion) {
300
301
  return `${params.baseUrl}/v1/chat/completions`;
301
302
  }
302
303
  return params.baseUrl;
@@ -323,10 +324,10 @@ var makeUrl12 = (params) => {
323
324
  if (params.task === "text-to-image") {
324
325
  return `${params.baseUrl}/v1/images/generations`;
325
326
  }
327
+ if (params.chatCompletion) {
328
+ return `${params.baseUrl}/v1/chat/completions`;
329
+ }
326
330
  if (params.task === "text-generation") {
327
- if (params.chatCompletion) {
328
- return `${params.baseUrl}/v1/chat/completions`;
329
- }
330
331
  return `${params.baseUrl}/v1/completions`;
331
332
  }
332
333
  return params.baseUrl;
@@ -373,7 +374,7 @@ function isUrl(modelOrUrl) {
373
374
 
374
375
  // package.json
375
376
  var name = "@huggingface/inference";
376
- var version = "3.5.2";
377
+ var version = "3.6.1";
377
378
 
378
379
  // src/providers/consts.ts
379
380
  var HARDCODED_MODEL_ID_MAPPING = {
@@ -461,11 +462,11 @@ var providerConfigs = {
461
462
  together: TOGETHER_CONFIG
462
463
  };
463
464
  async function makeRequestOptions(args, options) {
464
- const { accessToken, endpointUrl, provider: maybeProvider, model: maybeModel, ...remainingArgs } = args;
465
+ const { provider: maybeProvider, model: maybeModel } = args;
465
466
  const provider = maybeProvider ?? "hf-inference";
466
467
  const providerConfig = providerConfigs[provider];
467
- const { includeCredentials, task, chatCompletion: chatCompletion2, signal } = options ?? {};
468
- if (endpointUrl && provider !== "hf-inference") {
468
+ const { task, chatCompletion: chatCompletion2 } = options ?? {};
469
+ if (args.endpointUrl && provider !== "hf-inference") {
469
470
  throw new Error(`Cannot use endpointUrl with a third-party provider.`);
470
471
  }
471
472
  if (maybeModel && isUrl(maybeModel)) {
@@ -481,17 +482,21 @@ async function makeRequestOptions(args, options) {
481
482
  throw new Error(`Provider ${provider} requires a model ID to be passed directly.`);
482
483
  }
483
484
  const hfModel = maybeModel ?? await loadDefaultModel(task);
484
- const model = providerConfig.clientSideRoutingOnly ? (
485
+ const resolvedModel = providerConfig.clientSideRoutingOnly ? (
485
486
  // eslint-disable-next-line @typescript-eslint/no-non-null-assertion
486
487
  removeProviderPrefix(maybeModel, provider)
487
- ) : (
488
- // For closed-models API providers, one needs to pass the model ID directly (e.g. "gpt-3.5-turbo")
489
- await getProviderModelId({ model: hfModel, provider }, args, {
490
- task,
491
- chatCompletion: chatCompletion2,
492
- fetch: options?.fetch
493
- })
494
- );
488
+ ) : await getProviderModelId({ model: hfModel, provider }, args, {
489
+ task,
490
+ chatCompletion: chatCompletion2,
491
+ fetch: options?.fetch
492
+ });
493
+ return makeRequestOptionsFromResolvedModel(resolvedModel, args, options);
494
+ }
495
+ function makeRequestOptionsFromResolvedModel(resolvedModel, args, options) {
496
+ const { accessToken, endpointUrl, provider: maybeProvider, model, ...remainingArgs } = args;
497
+ const provider = maybeProvider ?? "hf-inference";
498
+ const providerConfig = providerConfigs[provider];
499
+ const { includeCredentials, task, chatCompletion: chatCompletion2, signal } = options ?? {};
495
500
  const authMethod = (() => {
496
501
  if (providerConfig.clientSideRoutingOnly) {
497
502
  if (accessToken && accessToken.startsWith("hf_")) {
@@ -509,7 +514,7 @@ async function makeRequestOptions(args, options) {
509
514
  })();
510
515
  const url = endpointUrl ? chatCompletion2 ? endpointUrl + `/v1/chat/completions` : endpointUrl : providerConfig.makeUrl({
511
516
  baseUrl: authMethod !== "provider-key" ? HF_HUB_INFERENCE_PROXY_TEMPLATE.replace("{{PROVIDER}}", provider) : providerConfig.baseUrl,
512
- model,
517
+ model: resolvedModel,
513
518
  chatCompletion: chatCompletion2,
514
519
  task
515
520
  });
@@ -527,7 +532,7 @@ async function makeRequestOptions(args, options) {
527
532
  const body = binary ? args.data : JSON.stringify(
528
533
  providerConfig.makeBody({
529
534
  args: remainingArgs,
530
- model,
535
+ model: resolvedModel,
531
536
  task,
532
537
  chatCompletion: chatCompletion2
533
538
  })
@@ -1161,14 +1166,14 @@ async function zeroShotImageClassification(args, options) {
1161
1166
  }
1162
1167
 
1163
1168
  // src/tasks/cv/textToVideo.ts
1164
- var SUPPORTED_PROVIDERS = ["fal-ai", "replicate"];
1169
+ var SUPPORTED_PROVIDERS = ["fal-ai", "novita", "replicate"];
1165
1170
  async function textToVideo(args, options) {
1166
1171
  if (!args.provider || !typedInclude(SUPPORTED_PROVIDERS, args.provider)) {
1167
1172
  throw new Error(
1168
1173
  `textToVideo inference is only supported for the following providers: ${SUPPORTED_PROVIDERS.join(", ")}`
1169
1174
  );
1170
1175
  }
1171
- const payload = args.provider === "fal-ai" || args.provider === "replicate" ? { ...omit(args, ["inputs", "parameters"]), ...args.parameters, prompt: args.inputs } : args;
1176
+ const payload = args.provider === "fal-ai" || args.provider === "replicate" || args.provider === "novita" ? { ...omit(args, ["inputs", "parameters"]), ...args.parameters, prompt: args.inputs } : args;
1172
1177
  const res = await request(payload, {
1173
1178
  ...options,
1174
1179
  task: "text-to-video"
@@ -1180,6 +1185,13 @@ async function textToVideo(args, options) {
1180
1185
  }
1181
1186
  const urlResponse = await fetch(res.video.url);
1182
1187
  return await urlResponse.blob();
1188
+ } else if (args.provider === "novita") {
1189
+ const isValidOutput = typeof res === "object" && !!res && "video" in res && typeof res.video === "object" && !!res.video && "video_url" in res.video && typeof res.video.video_url === "string" && isUrl(res.video.video_url);
1190
+ if (!isValidOutput) {
1191
+ throw new InferenceOutputError("Expected { video: { video_url: string } }");
1192
+ }
1193
+ const urlResponse = await fetch(res.video.video_url);
1194
+ return await urlResponse.blob();
1183
1195
  } else {
1184
1196
  const isValidOutput = typeof res === "object" && !!res && "output" in res && typeof res.output === "string" && isUrl(res.output);
1185
1197
  if (!isValidOutput) {
@@ -1588,997 +1600,370 @@ var INFERENCE_PROVIDERS = [
1588
1600
  // src/snippets/index.ts
1589
1601
  var snippets_exports = {};
1590
1602
  __export(snippets_exports, {
1591
- curl: () => curl_exports,
1592
- js: () => js_exports,
1593
- python: () => python_exports
1603
+ getInferenceSnippets: () => getInferenceSnippets
1594
1604
  });
1595
1605
 
1596
- // src/snippets/curl.ts
1597
- var curl_exports = {};
1598
- __export(curl_exports, {
1599
- curlSnippets: () => curlSnippets,
1600
- getCurlInferenceSnippet: () => getCurlInferenceSnippet,
1601
- snippetBasic: () => snippetBasic,
1602
- snippetFile: () => snippetFile,
1603
- snippetTextGeneration: () => snippetTextGeneration,
1604
- snippetZeroShotClassification: () => snippetZeroShotClassification
1605
- });
1606
- import { HF_HUB_INFERENCE_PROXY_TEMPLATE as HF_HUB_INFERENCE_PROXY_TEMPLATE2 } from "@huggingface/tasks";
1606
+ // src/snippets/getInferenceSnippets.ts
1607
1607
  import {
1608
- getModelInputSnippet,
1609
- stringifyGenerationConfig,
1610
- stringifyMessages
1608
+ inferenceSnippetLanguages,
1609
+ getModelInputSnippet
1611
1610
  } from "@huggingface/tasks";
1612
- var snippetBasic = (model, accessToken, provider) => {
1613
- if (provider !== "hf-inference") {
1614
- return [];
1615
- }
1616
- return [
1617
- {
1618
- client: "curl",
1619
- content: `curl https://router.huggingface.co/hf-inference/models/${model.id} \\
1620
- -X POST \\
1621
- -d '{"inputs": ${getModelInputSnippet(model, true)}}' \\
1622
- -H 'Content-Type: application/json' \\
1623
- -H 'Authorization: Bearer ${accessToken || `{API_TOKEN}`}'`
1611
+ import { Template } from "@huggingface/jinja";
1612
+
1613
+ // src/snippets/templates.exported.ts
1614
+ var templates = {
1615
+ "js": {
1616
+ "fetch": {
1617
+ "basic": 'async function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "application/json",\n },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.json();\n return result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});',
1618
+ "basicAudio": 'async function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "audio/flac"\n },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.json();\n return result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});',
1619
+ "basicImage": 'async function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "image/jpeg"\n },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.json();\n return result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});',
1620
+ "textToAudio": '{% if model.library_name == "transformers" %}\nasync function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "application/json",\n },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.blob();\n return result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n // Returns a byte object of the Audio wavform. Use it directly!\n});\n{% else %}\nasync function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "application/json",\n },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.json();\n return result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});\n{% endif %} ',
1621
+ "textToImage": 'async function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "application/json",\n },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.blob();\n return result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n // Use image\n});',
1622
+ "zeroShotClassification": 'async function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "application/json",\n },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.json();\n return result;\n}\n\nquery({\n inputs: {{ providerInputs.asObj.inputs }},\n parameters: { candidate_labels: ["refund", "legal", "faq"] }\n}).then((response) => {\n console.log(JSON.stringify(response));\n});'
1623
+ },
1624
+ "huggingface.js": {
1625
+ "basic": 'import { InferenceClient } from "@huggingface/inference";\n\nconst client = new InferenceClient("{{ accessToken }}");\n\nconst output = await client.{{ methodName }}({\n model: "{{ model.id }}",\n inputs: {{ inputs.asObj.inputs }},\n provider: "{{ provider }}",\n});\n\nconsole.log(output);',
1626
+ "basicAudio": 'import { InferenceClient } from "@huggingface/inference";\n\nconst client = new InferenceClient("{{ accessToken }}");\n\nconst data = fs.readFileSync({{inputs.asObj.inputs}});\n\nconst output = await client.{{ methodName }}({\n data,\n model: "{{ model.id }}",\n provider: "{{ provider }}",\n});\n\nconsole.log(output);',
1627
+ "basicImage": 'import { InferenceClient } from "@huggingface/inference";\n\nconst client = new InferenceClient("{{ accessToken }}");\n\nconst data = fs.readFileSync({{inputs.asObj.inputs}});\n\nconst output = await client.{{ methodName }}({\n data,\n model: "{{ model.id }}",\n provider: "{{ provider }}",\n});\n\nconsole.log(output);',
1628
+ "conversational": 'import { InferenceClient } from "@huggingface/inference";\n\nconst client = new InferenceClient("{{ accessToken }}");\n\nconst chatCompletion = await client.chatCompletion({\n provider: "{{ provider }}",\n model: "{{ model.id }}",\n{{ inputs.asTsString }}\n});\n\nconsole.log(chatCompletion.choices[0].message);',
1629
+ "conversationalStream": 'import { InferenceClient } from "@huggingface/inference";\n\nconst client = new InferenceClient("{{ accessToken }}");\n\nlet out = "";\n\nconst stream = await client.chatCompletionStream({\n provider: "{{ provider }}",\n model: "{{ model.id }}",\n{{ inputs.asTsString }}\n});\n\nfor await (const chunk of stream) {\n if (chunk.choices && chunk.choices.length > 0) {\n const newContent = chunk.choices[0].delta.content;\n out += newContent;\n console.log(newContent);\n } \n}',
1630
+ "textToImage": `import { InferenceClient } from "@huggingface/inference";
1631
+
1632
+ const client = new InferenceClient("{{ accessToken }}");
1633
+
1634
+ const image = await client.textToImage({
1635
+ provider: "{{ provider }}",
1636
+ model: "{{ model.id }}",
1637
+ inputs: {{ inputs.asObj.inputs }},
1638
+ parameters: { num_inference_steps: 5 },
1639
+ });
1640
+ /// Use the generated image (it's a Blob)`,
1641
+ "textToVideo": `import { InferenceClient } from "@huggingface/inference";
1642
+
1643
+ const client = new InferenceClient("{{ accessToken }}");
1644
+
1645
+ const image = await client.textToVideo({
1646
+ provider: "{{ provider }}",
1647
+ model: "{{ model.id }}",
1648
+ inputs: {{ inputs.asObj.inputs }},
1649
+ });
1650
+ // Use the generated video (it's a Blob)`
1651
+ },
1652
+ "openai": {
1653
+ "conversational": 'import { OpenAI } from "openai";\n\nconst client = new OpenAI({\n baseURL: "{{ baseUrl }}",\n apiKey: "{{ accessToken }}",\n});\n\nconst chatCompletion = await client.chat.completions.create({\n model: "{{ providerModelId }}",\n{{ inputs.asTsString }}\n});\n\nconsole.log(chatCompletion.choices[0].message);',
1654
+ "conversationalStream": 'import { OpenAI } from "openai";\n\nconst client = new OpenAI({\n baseURL: "{{ baseUrl }}",\n apiKey: "{{ accessToken }}",\n});\n\nlet out = "";\n\nconst stream = await client.chat.completions.create({\n provider: "{{ provider }}",\n model: "{{ model.id }}",\n{{ inputs.asTsString }}\n});\n\nfor await (const chunk of stream) {\n if (chunk.choices && chunk.choices.length > 0) {\n const newContent = chunk.choices[0].delta.content;\n out += newContent;\n console.log(newContent);\n } \n}'
1624
1655
  }
1625
- ];
1626
- };
1627
- var snippetTextGeneration = (model, accessToken, provider, providerModelId, opts) => {
1628
- if (model.tags.includes("conversational")) {
1629
- const baseUrl = provider === "hf-inference" ? `https://router.huggingface.co/hf-inference/models/${model.id}/v1/chat/completions` : HF_HUB_INFERENCE_PROXY_TEMPLATE2.replace("{{PROVIDER}}", provider) + "/v1/chat/completions";
1630
- const modelId = providerModelId ?? model.id;
1631
- const streaming = opts?.streaming ?? true;
1632
- const exampleMessages = getModelInputSnippet(model);
1633
- const messages = opts?.messages ?? exampleMessages;
1634
- const config = {
1635
- ...opts?.temperature ? { temperature: opts.temperature } : void 0,
1636
- max_tokens: opts?.max_tokens ?? 500,
1637
- ...opts?.top_p ? { top_p: opts.top_p } : void 0
1638
- };
1639
- return [
1640
- {
1641
- client: "curl",
1642
- content: `curl '${baseUrl}' \\
1643
- -H 'Authorization: Bearer ${accessToken || `{API_TOKEN}`}' \\
1644
- -H 'Content-Type: application/json' \\
1645
- --data '{
1646
- "model": "${modelId}",
1647
- "messages": ${stringifyMessages(messages, {
1648
- indent: " ",
1649
- attributeKeyQuotes: true,
1650
- customContentEscaper: (str) => str.replace(/'/g, "'\\''")
1651
- })},
1652
- ${stringifyGenerationConfig(config, {
1653
- indent: "\n ",
1654
- attributeKeyQuotes: true,
1655
- attributeValueConnector: ": "
1656
- })}
1657
- "stream": ${!!streaming}
1658
- }'`
1659
- }
1660
- ];
1661
- } else {
1662
- return snippetBasic(model, accessToken, provider);
1663
- }
1664
- };
1665
- var snippetZeroShotClassification = (model, accessToken, provider) => {
1666
- if (provider !== "hf-inference") {
1667
- return [];
1668
- }
1669
- return [
1670
- {
1671
- client: "curl",
1672
- content: `curl https://router.huggingface.co/hf-inference/models/${model.id} \\
1673
- -X POST \\
1674
- -d '{"inputs": ${getModelInputSnippet(model, true)}, "parameters": {"candidate_labels": ["refund", "legal", "faq"]}}' \\
1675
- -H 'Content-Type: application/json' \\
1676
- -H 'Authorization: Bearer ${accessToken || `{API_TOKEN}`}'`
1656
+ },
1657
+ "python": {
1658
+ "fal_client": {
1659
+ "textToImage": '{% if provider == "fal-ai" %}\nimport fal_client\n\nresult = fal_client.subscribe(\n "{{ providerModelId }}",\n arguments={\n "prompt": {{ inputs.asObj.inputs }},\n },\n)\nprint(result)\n{% endif %} '
1660
+ },
1661
+ "huggingface_hub": {
1662
+ "basic": 'result = client.{{ methodName }}(\n inputs={{ inputs.asObj.inputs }},\n model="{{ model.id }}",\n)',
1663
+ "basicAudio": 'output = client.{{ methodName }}({{ inputs.asObj.inputs }}, model="{{ model.id }}")',
1664
+ "basicImage": 'output = client.{{ methodName }}({{ inputs.asObj.inputs }}, model="{{ model.id }}")',
1665
+ "conversational": 'completion = client.chat.completions.create(\n model="{{ model.id }}",\n{{ inputs.asPythonString }}\n)\n\nprint(completion.choices[0].message) ',
1666
+ "conversationalStream": 'stream = client.chat.completions.create(\n model="{{ model.id }}",\n{{ inputs.asPythonString }}\n stream=True,\n)\n\nfor chunk in stream:\n print(chunk.choices[0].delta.content, end="") ',
1667
+ "documentQuestionAnswering": 'output = client.document_question_answering(\n "{{ inputs.asObj.image }}",\n question="{{ inputs.asObj.question }}",\n model="{{ model.id }}",\n) ',
1668
+ "imageToImage": '# output is a PIL.Image object\nimage = client.image_to_image(\n "{{ inputs.asObj.inputs }}",\n prompt="{{ inputs.asObj.parameters.prompt }}",\n model="{{ model.id }}",\n) ',
1669
+ "importInferenceClient": 'from huggingface_hub import InferenceClient\n\nclient = InferenceClient(\n provider="{{ provider }}",\n api_key="{{ accessToken }}",\n)',
1670
+ "textToImage": '# output is a PIL.Image object\nimage = client.text_to_image(\n {{ inputs.asObj.inputs }},\n model="{{ model.id }}",\n) ',
1671
+ "textToVideo": 'video = client.text_to_video(\n {{ inputs.asObj.inputs }},\n model="{{ model.id }}",\n) '
1672
+ },
1673
+ "openai": {
1674
+ "conversational": 'from openai import OpenAI\n\nclient = OpenAI(\n base_url="{{ baseUrl }}",\n api_key="{{ accessToken }}"\n)\n\ncompletion = client.chat.completions.create(\n model="{{ providerModelId }}",\n{{ inputs.asPythonString }}\n)\n\nprint(completion.choices[0].message) ',
1675
+ "conversationalStream": 'from openai import OpenAI\n\nclient = OpenAI(\n base_url="{{ baseUrl }}",\n api_key="{{ accessToken }}"\n)\n\nstream = client.chat.completions.create(\n model="{{ providerModelId }}",\n{{ inputs.asPythonString }}\n stream=True,\n)\n\nfor chunk in stream:\n print(chunk.choices[0].delta.content, end="")'
1676
+ },
1677
+ "requests": {
1678
+ "basic": 'def query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\noutput = query({\n "inputs": {{ providerInputs.asObj.inputs }},\n}) ',
1679
+ "basicAudio": 'def query(filename):\n with open(filename, "rb") as f:\n data = f.read()\n response = requests.post(API_URL, headers={"Content-Type": "audio/flac", **headers}, data=data)\n return response.json()\n\noutput = query({{ providerInputs.asObj.inputs }})',
1680
+ "basicImage": 'def query(filename):\n with open(filename, "rb") as f:\n data = f.read()\n response = requests.post(API_URL, headers={"Content-Type": "image/jpeg", **headers}, data=data)\n return response.json()\n\noutput = query({{ providerInputs.asObj.inputs }})',
1681
+ "conversational": 'def query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\nresponse = query({\n{{ providerInputs.asJsonString }}\n})\n\nprint(response["choices"][0]["message"])',
1682
+ "conversationalStream": 'def query(payload):\n response = requests.post(API_URL, headers=headers, json=payload, stream=True)\n for line in response.iter_lines():\n if not line.startswith(b"data:"):\n continue\n if line.strip() == b"data: [DONE]":\n return\n yield json.loads(line.decode("utf-8").lstrip("data:").rstrip("/n"))\n\nchunks = query({\n{{ providerInputs.asJsonString }},\n "stream": True,\n})\n\nfor chunk in chunks:\n print(chunk["choices"][0]["delta"]["content"], end="")',
1683
+ "documentQuestionAnswering": 'def query(payload):\n with open(payload["image"], "rb") as f:\n img = f.read()\n payload["image"] = base64.b64encode(img).decode("utf-8")\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\noutput = query({\n "inputs": {\n "image": "{{ inputs.asObj.image }}",\n "question": "{{ inputs.asObj.question }}",\n },\n}) ',
1684
+ "imageToImage": 'def query(payload):\n with open(payload["inputs"], "rb") as f:\n img = f.read()\n payload["inputs"] = base64.b64encode(img).decode("utf-8")\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\nimage_bytes = query({\n{{ providerInputs.asJsonString }}\n})\n\n# You can access the image with PIL.Image for example\nimport io\nfrom PIL import Image\nimage = Image.open(io.BytesIO(image_bytes)) ',
1685
+ "importRequests": '{% if importBase64 %}\nimport base64\n{% endif %}\n{% if importJson %}\nimport json\n{% endif %}\nimport requests\n\nAPI_URL = "{{ fullUrl }}"\nheaders = {"Authorization": "{{ authorizationHeader }}"}',
1686
+ "tabular": 'def query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\nresponse = query({\n "inputs": {\n "data": {{ providerInputs.asObj.inputs }}\n },\n}) ',
1687
+ "textToAudio": '{% if model.library_name == "transformers" %}\ndef query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\naudio_bytes = query({\n "inputs": {{ providerInputs.asObj.inputs }},\n})\n# You can access the audio with IPython.display for example\nfrom IPython.display import Audio\nAudio(audio_bytes)\n{% else %}\ndef query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\naudio, sampling_rate = query({\n "inputs": {{ providerInputs.asObj.inputs }},\n})\n# You can access the audio with IPython.display for example\nfrom IPython.display import Audio\nAudio(audio, rate=sampling_rate)\n{% endif %} ',
1688
+ "textToImage": '{% if provider == "hf-inference" %}\ndef query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\nimage_bytes = query({\n "inputs": {{ providerInputs.asObj.inputs }},\n})\n\n# You can access the image with PIL.Image for example\nimport io\nfrom PIL import Image\nimage = Image.open(io.BytesIO(image_bytes))\n{% endif %}',
1689
+ "zeroShotClassification": 'def query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\noutput = query({\n "inputs": {{ providerInputs.asObj.inputs }},\n "parameters": {"candidate_labels": ["refund", "legal", "faq"]},\n}) ',
1690
+ "zeroShotImageClassification": 'def query(data):\n with open(data["image_path"], "rb") as f:\n img = f.read()\n payload={\n "parameters": data["parameters"],\n "inputs": base64.b64encode(img).decode("utf-8")\n }\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\noutput = query({\n "image_path": {{ providerInputs.asObj.inputs }},\n "parameters": {"candidate_labels": ["cat", "dog", "llama"]},\n}) '
1677
1691
  }
1678
- ];
1679
- };
1680
- var snippetFile = (model, accessToken, provider) => {
1681
- if (provider !== "hf-inference") {
1682
- return [];
1683
- }
1684
- return [
1685
- {
1686
- client: "curl",
1687
- content: `curl https://router.huggingface.co/hf-inference/models/${model.id} \\
1688
- -X POST \\
1689
- --data-binary '@${getModelInputSnippet(model, true, true)}' \\
1690
- -H 'Authorization: Bearer ${accessToken || `{API_TOKEN}`}'`
1692
+ },
1693
+ "sh": {
1694
+ "curl": {
1695
+ "basic": "curl {{ fullUrl }} \\\n -X POST \\\n -H 'Authorization: {{ authorizationHeader }}' \\\n -H 'Content-Type: application/json' \\\n -d '{\n{{ providerInputs.asCurlString }}\n }'",
1696
+ "basicAudio": "curl {{ fullUrl }} \\\n -X POST \\\n -H 'Authorization: {{ authorizationHeader }}' \\\n -H 'Content-Type: audio/flac' \\\n --data-binary @{{ providerInputs.asObj.inputs }}",
1697
+ "basicImage": "curl {{ fullUrl }} \\\n -X POST \\\n -H 'Authorization: {{ authorizationHeader }}' \\\n -H 'Content-Type: image/jpeg' \\\n --data-binary @{{ providerInputs.asObj.inputs }}",
1698
+ "conversational": `curl {{ fullUrl }} \\
1699
+ -H 'Authorization: {{ authorizationHeader }}' \\
1700
+ -H 'Content-Type: application/json' \\
1701
+ -d '{
1702
+ {{ providerInputs.asCurlString }},
1703
+ "stream": false
1704
+ }'`,
1705
+ "conversationalStream": `curl {{ fullUrl }} \\
1706
+ -H 'Authorization: {{ authorizationHeader }}' \\
1707
+ -H 'Content-Type: application/json' \\
1708
+ -d '{
1709
+ {{ providerInputs.asCurlString }},
1710
+ "stream": true
1711
+ }'`,
1712
+ "zeroShotClassification": `curl {{ fullUrl }} \\
1713
+ -X POST \\
1714
+ -d '{"inputs": {{ providerInputs.asObj.inputs }}, "parameters": {"candidate_labels": ["refund", "legal", "faq"]}}' \\
1715
+ -H 'Content-Type: application/json' \\
1716
+ -H 'Authorization: {{ authorizationHeader }}'`
1691
1717
  }
1692
- ];
1693
- };
1694
- var curlSnippets = {
1695
- // Same order as in tasks/src/pipelines.ts
1696
- "text-classification": snippetBasic,
1697
- "token-classification": snippetBasic,
1698
- "table-question-answering": snippetBasic,
1699
- "question-answering": snippetBasic,
1700
- "zero-shot-classification": snippetZeroShotClassification,
1701
- translation: snippetBasic,
1702
- summarization: snippetBasic,
1703
- "feature-extraction": snippetBasic,
1704
- "text-generation": snippetTextGeneration,
1705
- "image-text-to-text": snippetTextGeneration,
1706
- "text2text-generation": snippetBasic,
1707
- "fill-mask": snippetBasic,
1708
- "sentence-similarity": snippetBasic,
1709
- "automatic-speech-recognition": snippetFile,
1710
- "text-to-image": snippetBasic,
1711
- "text-to-speech": snippetBasic,
1712
- "text-to-audio": snippetBasic,
1713
- "audio-to-audio": snippetFile,
1714
- "audio-classification": snippetFile,
1715
- "image-classification": snippetFile,
1716
- "image-to-text": snippetFile,
1717
- "object-detection": snippetFile,
1718
- "image-segmentation": snippetFile
1719
- };
1720
- function getCurlInferenceSnippet(model, accessToken, provider, providerModelId, opts) {
1721
- return model.pipeline_tag && model.pipeline_tag in curlSnippets ? curlSnippets[model.pipeline_tag]?.(model, accessToken, provider, providerModelId, opts) ?? [] : [];
1722
- }
1718
+ }
1719
+ };
1723
1720
 
1724
- // src/snippets/python.ts
1725
- var python_exports = {};
1726
- __export(python_exports, {
1727
- getPythonInferenceSnippet: () => getPythonInferenceSnippet
1728
- });
1729
- import { openAIbaseUrl } from "@huggingface/tasks";
1730
- import {
1731
- getModelInputSnippet as getModelInputSnippet2,
1732
- stringifyGenerationConfig as stringifyGenerationConfig2,
1733
- stringifyMessages as stringifyMessages2
1734
- } from "@huggingface/tasks";
1735
- var HFH_INFERENCE_CLIENT_METHODS = {
1721
+ // src/snippets/getInferenceSnippets.ts
1722
+ var PYTHON_CLIENTS = ["huggingface_hub", "fal_client", "requests", "openai"];
1723
+ var JS_CLIENTS = ["fetch", "huggingface.js", "openai"];
1724
+ var SH_CLIENTS = ["curl"];
1725
+ var CLIENTS = {
1726
+ js: [...JS_CLIENTS],
1727
+ python: [...PYTHON_CLIENTS],
1728
+ sh: [...SH_CLIENTS]
1729
+ };
1730
+ var hasTemplate = (language, client, templateName) => templates[language]?.[client]?.[templateName] !== void 0;
1731
+ var loadTemplate = (language, client, templateName) => {
1732
+ const template = templates[language]?.[client]?.[templateName];
1733
+ if (!template) {
1734
+ throw new Error(`Template not found: ${language}/${client}/${templateName}`);
1735
+ }
1736
+ return (data) => new Template(template).render({ ...data });
1737
+ };
1738
+ var snippetImportPythonInferenceClient = loadTemplate("python", "huggingface_hub", "importInferenceClient");
1739
+ var snippetImportRequests = loadTemplate("python", "requests", "importRequests");
1740
+ var HF_PYTHON_METHODS = {
1736
1741
  "audio-classification": "audio_classification",
1737
1742
  "audio-to-audio": "audio_to_audio",
1738
1743
  "automatic-speech-recognition": "automatic_speech_recognition",
1739
- "text-to-speech": "text_to_speech",
1744
+ "document-question-answering": "document_question_answering",
1745
+ "feature-extraction": "feature_extraction",
1746
+ "fill-mask": "fill_mask",
1740
1747
  "image-classification": "image_classification",
1741
1748
  "image-segmentation": "image_segmentation",
1742
1749
  "image-to-image": "image_to_image",
1743
1750
  "image-to-text": "image_to_text",
1744
1751
  "object-detection": "object_detection",
1745
- "text-to-image": "text_to_image",
1746
- "text-to-video": "text_to_video",
1747
- "zero-shot-image-classification": "zero_shot_image_classification",
1748
- "document-question-answering": "document_question_answering",
1749
- "visual-question-answering": "visual_question_answering",
1750
- "feature-extraction": "feature_extraction",
1751
- "fill-mask": "fill_mask",
1752
1752
  "question-answering": "question_answering",
1753
1753
  "sentence-similarity": "sentence_similarity",
1754
1754
  summarization: "summarization",
1755
1755
  "table-question-answering": "table_question_answering",
1756
+ "tabular-classification": "tabular_classification",
1757
+ "tabular-regression": "tabular_regression",
1756
1758
  "text-classification": "text_classification",
1757
1759
  "text-generation": "text_generation",
1760
+ "text-to-image": "text_to_image",
1761
+ "text-to-speech": "text_to_speech",
1762
+ "text-to-video": "text_to_video",
1758
1763
  "token-classification": "token_classification",
1759
1764
  translation: "translation",
1765
+ "visual-question-answering": "visual_question_answering",
1760
1766
  "zero-shot-classification": "zero_shot_classification",
1761
- "tabular-classification": "tabular_classification",
1762
- "tabular-regression": "tabular_regression"
1763
- };
1764
- var snippetImportInferenceClient = (accessToken, provider) => `from huggingface_hub import InferenceClient
1765
-
1766
- client = InferenceClient(
1767
- provider="${provider}",
1768
- api_key="${accessToken || "{API_TOKEN}"}",
1769
- )`;
1770
- var snippetConversational = (model, accessToken, provider, providerModelId, opts) => {
1771
- const streaming = opts?.streaming ?? true;
1772
- const exampleMessages = getModelInputSnippet2(model);
1773
- const messages = opts?.messages ?? exampleMessages;
1774
- const messagesStr = stringifyMessages2(messages, { attributeKeyQuotes: true });
1775
- const config = {
1776
- ...opts?.temperature ? { temperature: opts.temperature } : void 0,
1777
- max_tokens: opts?.max_tokens ?? 500,
1778
- ...opts?.top_p ? { top_p: opts.top_p } : void 0
1779
- };
1780
- const configStr = stringifyGenerationConfig2(config, {
1781
- indent: "\n ",
1782
- attributeValueConnector: "="
1783
- });
1784
- if (streaming) {
1785
- return [
1786
- {
1787
- client: "huggingface_hub",
1788
- content: `${snippetImportInferenceClient(accessToken, provider)}
1789
-
1790
- messages = ${messagesStr}
1791
-
1792
- stream = client.chat.completions.create(
1793
- model="${model.id}",
1794
- messages=messages,
1795
- ${configStr}
1796
- stream=True,
1797
- )
1798
-
1799
- for chunk in stream:
1800
- print(chunk.choices[0].delta.content, end="")`
1801
- },
1802
- {
1803
- client: "openai",
1804
- content: `from openai import OpenAI
1805
-
1806
- client = OpenAI(
1807
- base_url="${openAIbaseUrl(provider)}",
1808
- api_key="${accessToken || "{API_TOKEN}"}"
1809
- )
1810
-
1811
- messages = ${messagesStr}
1812
-
1813
- stream = client.chat.completions.create(
1814
- model="${providerModelId ?? model.id}",
1815
- messages=messages,
1816
- ${configStr}
1817
- stream=True
1818
- )
1819
-
1820
- for chunk in stream:
1821
- print(chunk.choices[0].delta.content, end="")`
1822
- }
1823
- ];
1824
- } else {
1825
- return [
1826
- {
1827
- client: "huggingface_hub",
1828
- content: `${snippetImportInferenceClient(accessToken, provider)}
1829
-
1830
- messages = ${messagesStr}
1831
-
1832
- completion = client.chat.completions.create(
1833
- model="${model.id}",
1834
- messages=messages,
1835
- ${configStr}
1836
- )
1837
-
1838
- print(completion.choices[0].message)`
1839
- },
1840
- {
1841
- client: "openai",
1842
- content: `from openai import OpenAI
1843
-
1844
- client = OpenAI(
1845
- base_url="${openAIbaseUrl(provider)}",
1846
- api_key="${accessToken || "{API_TOKEN}"}"
1847
- )
1848
-
1849
- messages = ${messagesStr}
1850
-
1851
- completion = client.chat.completions.create(
1852
- model="${providerModelId ?? model.id}",
1853
- messages=messages,
1854
- ${configStr}
1855
- )
1856
-
1857
- print(completion.choices[0].message)`
1858
- }
1859
- ];
1860
- }
1861
- };
1862
- var snippetZeroShotClassification2 = (model) => {
1863
- return [
1864
- {
1865
- client: "requests",
1866
- content: `def query(payload):
1867
- response = requests.post(API_URL, headers=headers, json=payload)
1868
- return response.json()
1869
-
1870
- output = query({
1871
- "inputs": ${getModelInputSnippet2(model)},
1872
- "parameters": {"candidate_labels": ["refund", "legal", "faq"]},
1873
- })`
1874
- }
1875
- ];
1876
- };
1877
- var snippetZeroShotImageClassification = (model) => {
1878
- return [
1879
- {
1880
- client: "requests",
1881
- content: `def query(data):
1882
- with open(data["image_path"], "rb") as f:
1883
- img = f.read()
1884
- payload={
1885
- "parameters": data["parameters"],
1886
- "inputs": base64.b64encode(img).decode("utf-8")
1887
- }
1888
- response = requests.post(API_URL, headers=headers, json=payload)
1889
- return response.json()
1890
-
1891
- output = query({
1892
- "image_path": ${getModelInputSnippet2(model)},
1893
- "parameters": {"candidate_labels": ["cat", "dog", "llama"]},
1894
- })`
1895
- }
1896
- ];
1897
- };
1898
- var snippetBasic2 = (model, accessToken, provider) => {
1899
- return [
1900
- ...model.pipeline_tag && model.pipeline_tag in HFH_INFERENCE_CLIENT_METHODS ? [
1901
- {
1902
- client: "huggingface_hub",
1903
- content: `${snippetImportInferenceClient(accessToken, provider)}
1904
-
1905
- result = client.${HFH_INFERENCE_CLIENT_METHODS[model.pipeline_tag]}(
1906
- inputs=${getModelInputSnippet2(model)},
1907
- model="${model.id}",
1908
- )
1909
-
1910
- print(result)
1911
- `
1912
- }
1913
- ] : [],
1914
- {
1915
- client: "requests",
1916
- content: `def query(payload):
1917
- response = requests.post(API_URL, headers=headers, json=payload)
1918
- return response.json()
1919
-
1920
- output = query({
1921
- "inputs": ${getModelInputSnippet2(model)},
1922
- })`
1923
- }
1924
- ];
1925
- };
1926
- var snippetFile2 = (model) => {
1927
- return [
1928
- {
1929
- client: "requests",
1930
- content: `def query(filename):
1931
- with open(filename, "rb") as f:
1932
- data = f.read()
1933
- response = requests.post(API_URL, headers=headers, data=data)
1934
- return response.json()
1935
-
1936
- output = query(${getModelInputSnippet2(model)})`
1937
- }
1938
- ];
1939
- };
1940
- var snippetTextToImage = (model, accessToken, provider, providerModelId) => {
1941
- return [
1942
- {
1943
- client: "huggingface_hub",
1944
- content: `${snippetImportInferenceClient(accessToken, provider)}
1945
-
1946
- # output is a PIL.Image object
1947
- image = client.text_to_image(
1948
- ${getModelInputSnippet2(model)},
1949
- model="${model.id}",
1950
- )`
1951
- },
1952
- ...provider === "fal-ai" ? [
1953
- {
1954
- client: "fal-client",
1955
- content: `import fal_client
1956
-
1957
- result = fal_client.subscribe(
1958
- "${providerModelId ?? model.id}",
1959
- arguments={
1960
- "prompt": ${getModelInputSnippet2(model)},
1961
- },
1962
- )
1963
- print(result)
1964
- `
1965
- }
1966
- ] : [],
1967
- ...provider === "hf-inference" ? [
1968
- {
1969
- client: "requests",
1970
- content: `def query(payload):
1971
- response = requests.post(API_URL, headers=headers, json=payload)
1972
- return response.content
1973
-
1974
- image_bytes = query({
1975
- "inputs": ${getModelInputSnippet2(model)},
1976
- })
1977
-
1978
- # You can access the image with PIL.Image for example
1979
- import io
1980
- from PIL import Image
1981
- image = Image.open(io.BytesIO(image_bytes))`
1982
- }
1983
- ] : []
1984
- ];
1985
- };
1986
- var snippetTextToVideo = (model, accessToken, provider) => {
1987
- return ["fal-ai", "replicate"].includes(provider) ? [
1988
- {
1989
- client: "huggingface_hub",
1990
- content: `${snippetImportInferenceClient(accessToken, provider)}
1991
-
1992
- video = client.text_to_video(
1993
- ${getModelInputSnippet2(model)},
1994
- model="${model.id}",
1995
- )`
1996
- }
1997
- ] : [];
1998
- };
1999
- var snippetTabular = (model) => {
2000
- return [
2001
- {
2002
- client: "requests",
2003
- content: `def query(payload):
2004
- response = requests.post(API_URL, headers=headers, json=payload)
2005
- return response.content
2006
-
2007
- response = query({
2008
- "inputs": {"data": ${getModelInputSnippet2(model)}},
2009
- })`
2010
- }
2011
- ];
2012
- };
2013
- var snippetTextToAudio = (model) => {
2014
- if (model.library_name === "transformers") {
2015
- return [
2016
- {
2017
- client: "requests",
2018
- content: `def query(payload):
2019
- response = requests.post(API_URL, headers=headers, json=payload)
2020
- return response.content
2021
-
2022
- audio_bytes = query({
2023
- "inputs": ${getModelInputSnippet2(model)},
2024
- })
2025
- # You can access the audio with IPython.display for example
2026
- from IPython.display import Audio
2027
- Audio(audio_bytes)`
2028
- }
2029
- ];
2030
- } else {
2031
- return [
2032
- {
2033
- client: "requests",
2034
- content: `def query(payload):
2035
- response = requests.post(API_URL, headers=headers, json=payload)
2036
- return response.json()
2037
-
2038
- audio, sampling_rate = query({
2039
- "inputs": ${getModelInputSnippet2(model)},
2040
- })
2041
- # You can access the audio with IPython.display for example
2042
- from IPython.display import Audio
2043
- Audio(audio, rate=sampling_rate)`
2044
- }
2045
- ];
2046
- }
1767
+ "zero-shot-image-classification": "zero_shot_image_classification"
2047
1768
  };
2048
- var snippetAutomaticSpeechRecognition = (model, accessToken, provider) => {
2049
- return [
2050
- {
2051
- client: "huggingface_hub",
2052
- content: `${snippetImportInferenceClient(accessToken, provider)}
2053
- output = client.automatic_speech_recognition(${getModelInputSnippet2(model)}, model="${model.id}")`
2054
- },
2055
- snippetFile2(model)[0]
2056
- ];
2057
- };
2058
- var snippetDocumentQuestionAnswering = (model, accessToken, provider) => {
2059
- const inputsAsStr = getModelInputSnippet2(model);
2060
- const inputsAsObj = JSON.parse(inputsAsStr);
2061
- return [
2062
- {
2063
- client: "huggingface_hub",
2064
- content: `${snippetImportInferenceClient(accessToken, provider)}
2065
- output = client.document_question_answering(
2066
- "${inputsAsObj.image}",
2067
- question="${inputsAsObj.question}",
2068
- model="${model.id}",
2069
- )`
2070
- },
2071
- {
2072
- client: "requests",
2073
- content: `def query(payload):
2074
- with open(payload["image"], "rb") as f:
2075
- img = f.read()
2076
- payload["image"] = base64.b64encode(img).decode("utf-8")
2077
- response = requests.post(API_URL, headers=headers, json=payload)
2078
- return response.json()
2079
-
2080
- output = query({
2081
- "inputs": ${inputsAsStr},
2082
- })`
2083
- }
2084
- ];
2085
- };
2086
- var snippetImageToImage = (model, accessToken, provider) => {
2087
- const inputsAsStr = getModelInputSnippet2(model);
2088
- const inputsAsObj = JSON.parse(inputsAsStr);
2089
- return [
2090
- {
2091
- client: "huggingface_hub",
2092
- content: `${snippetImportInferenceClient(accessToken, provider)}
2093
- # output is a PIL.Image object
2094
- image = client.image_to_image(
2095
- "${inputsAsObj.image}",
2096
- prompt="${inputsAsObj.prompt}",
2097
- model="${model.id}",
2098
- )`
2099
- },
2100
- {
2101
- client: "requests",
2102
- content: `def query(payload):
2103
- with open(payload["inputs"], "rb") as f:
2104
- img = f.read()
2105
- payload["inputs"] = base64.b64encode(img).decode("utf-8")
2106
- response = requests.post(API_URL, headers=headers, json=payload)
2107
- return response.content
2108
-
2109
- image_bytes = query({
2110
- "inputs": "${inputsAsObj.image}",
2111
- "parameters": {"prompt": "${inputsAsObj.prompt}"},
2112
- })
2113
-
2114
- # You can access the image with PIL.Image for example
2115
- import io
2116
- from PIL import Image
2117
- image = Image.open(io.BytesIO(image_bytes))`
2118
- }
2119
- ];
2120
- };
2121
- var pythonSnippets = {
2122
- // Same order as in tasks/src/pipelines.ts
2123
- "text-classification": snippetBasic2,
2124
- "token-classification": snippetBasic2,
2125
- "table-question-answering": snippetBasic2,
2126
- "question-answering": snippetBasic2,
2127
- "zero-shot-classification": snippetZeroShotClassification2,
2128
- translation: snippetBasic2,
2129
- summarization: snippetBasic2,
2130
- "feature-extraction": snippetBasic2,
2131
- "text-generation": snippetBasic2,
2132
- "text2text-generation": snippetBasic2,
2133
- "image-text-to-text": snippetConversational,
2134
- "fill-mask": snippetBasic2,
2135
- "sentence-similarity": snippetBasic2,
2136
- "automatic-speech-recognition": snippetAutomaticSpeechRecognition,
2137
- "text-to-image": snippetTextToImage,
2138
- "text-to-video": snippetTextToVideo,
2139
- "text-to-speech": snippetTextToAudio,
2140
- "text-to-audio": snippetTextToAudio,
2141
- "audio-to-audio": snippetFile2,
2142
- "audio-classification": snippetFile2,
2143
- "image-classification": snippetFile2,
2144
- "tabular-regression": snippetTabular,
2145
- "tabular-classification": snippetTabular,
2146
- "object-detection": snippetFile2,
2147
- "image-segmentation": snippetFile2,
2148
- "document-question-answering": snippetDocumentQuestionAnswering,
2149
- "image-to-text": snippetFile2,
2150
- "image-to-image": snippetImageToImage,
2151
- "zero-shot-image-classification": snippetZeroShotImageClassification
2152
- };
2153
- function getPythonInferenceSnippet(model, accessToken, provider, providerModelId, opts) {
2154
- if (model.tags.includes("conversational")) {
2155
- return snippetConversational(model, accessToken, provider, providerModelId, opts);
2156
- } else {
2157
- const snippets = model.pipeline_tag && model.pipeline_tag in pythonSnippets ? pythonSnippets[model.pipeline_tag]?.(model, accessToken, provider, providerModelId) ?? [] : [];
2158
- return snippets.map((snippet) => {
2159
- return {
2160
- ...snippet,
2161
- content: addImportsToSnippet(snippet.content, model, accessToken)
2162
- };
2163
- });
2164
- }
2165
- }
2166
- var addImportsToSnippet = (snippet, model, accessToken) => {
2167
- if (snippet.includes("requests")) {
2168
- snippet = `import requests
2169
-
2170
- API_URL = "https://router.huggingface.co/hf-inference/models/${model.id}"
2171
- headers = {"Authorization": ${accessToken ? `"Bearer ${accessToken}"` : `f"Bearer {API_TOKEN}"`}}
2172
-
2173
- ${snippet}`;
2174
- }
2175
- if (snippet.includes("base64")) {
2176
- snippet = `import base64
2177
- ${snippet}`;
2178
- }
2179
- return snippet;
2180
- };
2181
-
2182
- // src/snippets/js.ts
2183
- var js_exports = {};
2184
- __export(js_exports, {
2185
- getJsInferenceSnippet: () => getJsInferenceSnippet,
2186
- jsSnippets: () => jsSnippets,
2187
- snippetAutomaticSpeechRecognition: () => snippetAutomaticSpeechRecognition2,
2188
- snippetBasic: () => snippetBasic3,
2189
- snippetFile: () => snippetFile3,
2190
- snippetTextGeneration: () => snippetTextGeneration2,
2191
- snippetTextToAudio: () => snippetTextToAudio2,
2192
- snippetTextToImage: () => snippetTextToImage2,
2193
- snippetTextToVideo: () => snippetTextToVideo2,
2194
- snippetZeroShotClassification: () => snippetZeroShotClassification3
2195
- });
2196
- import { openAIbaseUrl as openAIbaseUrl2 } from "@huggingface/tasks";
2197
- import {
2198
- getModelInputSnippet as getModelInputSnippet3,
2199
- stringifyGenerationConfig as stringifyGenerationConfig3,
2200
- stringifyMessages as stringifyMessages3
2201
- } from "@huggingface/tasks";
2202
- var HFJS_METHODS = {
2203
- "text-classification": "textClassification",
2204
- "token-classification": "tokenClassification",
2205
- "table-question-answering": "tableQuestionAnswering",
1769
+ var HF_JS_METHODS = {
1770
+ "automatic-speech-recognition": "automaticSpeechRecognition",
1771
+ "feature-extraction": "featureExtraction",
1772
+ "fill-mask": "fillMask",
1773
+ "image-classification": "imageClassification",
2206
1774
  "question-answering": "questionAnswering",
2207
- translation: "translation",
1775
+ "sentence-similarity": "sentenceSimilarity",
2208
1776
  summarization: "summarization",
2209
- "feature-extraction": "featureExtraction",
1777
+ "table-question-answering": "tableQuestionAnswering",
1778
+ "text-classification": "textClassification",
2210
1779
  "text-generation": "textGeneration",
2211
1780
  "text2text-generation": "textGeneration",
2212
- "fill-mask": "fillMask",
2213
- "sentence-similarity": "sentenceSimilarity"
1781
+ "token-classification": "tokenClassification",
1782
+ translation: "translation"
2214
1783
  };
2215
- var snippetBasic3 = (model, accessToken, provider) => {
2216
- return [
2217
- ...model.pipeline_tag && model.pipeline_tag in HFJS_METHODS ? [
2218
- {
2219
- client: "huggingface.js",
2220
- content: `import { InferenceClient } from "@huggingface/inference";
2221
-
2222
- const client = new InferenceClient("${accessToken || `{API_TOKEN}`}");
2223
-
2224
- const output = await client.${HFJS_METHODS[model.pipeline_tag]}({
2225
- model: "${model.id}",
2226
- inputs: ${getModelInputSnippet3(model)},
2227
- provider: "${provider}",
2228
- });
2229
-
2230
- console.log(output);
2231
- `
1784
+ var snippetGenerator = (templateName, inputPreparationFn) => {
1785
+ return (model, accessToken, provider, providerModelId, opts) => {
1786
+ if (model.pipeline_tag && ["text-generation", "image-text-to-text"].includes(model.pipeline_tag) && model.tags.includes("conversational")) {
1787
+ templateName = opts?.streaming ? "conversationalStream" : "conversational";
1788
+ inputPreparationFn = prepareConversationalInput;
1789
+ }
1790
+ const inputs = inputPreparationFn ? inputPreparationFn(model, opts) : { inputs: getModelInputSnippet(model) };
1791
+ const request2 = makeRequestOptionsFromResolvedModel(
1792
+ providerModelId ?? model.id,
1793
+ { accessToken, provider, ...inputs },
1794
+ { chatCompletion: templateName.includes("conversational"), task: model.pipeline_tag }
1795
+ );
1796
+ let providerInputs = inputs;
1797
+ const bodyAsObj = request2.info.body;
1798
+ if (typeof bodyAsObj === "string") {
1799
+ try {
1800
+ providerInputs = JSON.parse(bodyAsObj);
1801
+ } catch (e) {
1802
+ console.error("Failed to parse body as JSON", e);
2232
1803
  }
2233
- ] : [],
2234
- {
2235
- client: "fetch",
2236
- content: `async function query(data) {
2237
- const response = await fetch(
2238
- "https://router.huggingface.co/hf-inference/models/${model.id}",
2239
- {
2240
- headers: {
2241
- Authorization: "Bearer ${accessToken || `{API_TOKEN}`}",
2242
- "Content-Type": "application/json",
2243
- },
2244
- method: "POST",
2245
- body: JSON.stringify(data),
2246
- }
2247
- );
2248
- const result = await response.json();
2249
- return result;
2250
- }
2251
-
2252
- query({"inputs": ${getModelInputSnippet3(model)}}).then((response) => {
2253
- console.log(JSON.stringify(response));
2254
- });`
2255
1804
  }
2256
- ];
2257
- };
2258
- var snippetTextGeneration2 = (model, accessToken, provider, providerModelId, opts) => {
2259
- if (model.tags.includes("conversational")) {
2260
- const streaming = opts?.streaming ?? true;
2261
- const exampleMessages = getModelInputSnippet3(model);
2262
- const messages = opts?.messages ?? exampleMessages;
2263
- const messagesStr = stringifyMessages3(messages, { indent: " " });
2264
- const config = {
2265
- ...opts?.temperature ? { temperature: opts.temperature } : void 0,
2266
- max_tokens: opts?.max_tokens ?? 500,
2267
- ...opts?.top_p ? { top_p: opts.top_p } : void 0
1805
+ const params = {
1806
+ accessToken,
1807
+ authorizationHeader: request2.info.headers?.Authorization,
1808
+ baseUrl: removeSuffix(request2.url, "/chat/completions"),
1809
+ fullUrl: request2.url,
1810
+ inputs: {
1811
+ asObj: inputs,
1812
+ asCurlString: formatBody(inputs, "curl"),
1813
+ asJsonString: formatBody(inputs, "json"),
1814
+ asPythonString: formatBody(inputs, "python"),
1815
+ asTsString: formatBody(inputs, "ts")
1816
+ },
1817
+ providerInputs: {
1818
+ asObj: providerInputs,
1819
+ asCurlString: formatBody(providerInputs, "curl"),
1820
+ asJsonString: formatBody(providerInputs, "json"),
1821
+ asPythonString: formatBody(providerInputs, "python"),
1822
+ asTsString: formatBody(providerInputs, "ts")
1823
+ },
1824
+ model,
1825
+ provider,
1826
+ providerModelId: providerModelId ?? model.id
2268
1827
  };
2269
- const configStr = stringifyGenerationConfig3(config, {
2270
- indent: "\n ",
2271
- attributeValueConnector: ": "
2272
- });
2273
- if (streaming) {
2274
- return [
2275
- {
2276
- client: "huggingface.js",
2277
- content: `import { InferenceClient } from "@huggingface/inference";
2278
-
2279
- const client = new InferenceClient("${accessToken || `{API_TOKEN}`}");
2280
-
2281
- let out = "";
2282
-
2283
- const stream = client.chatCompletionStream({
2284
- model: "${model.id}",
2285
- messages: ${messagesStr},
2286
- provider: "${provider}",
2287
- ${configStr}
2288
- });
2289
-
2290
- for await (const chunk of stream) {
2291
- if (chunk.choices && chunk.choices.length > 0) {
2292
- const newContent = chunk.choices[0].delta.content;
2293
- out += newContent;
2294
- console.log(newContent);
2295
- }
2296
- }`
2297
- },
2298
- {
2299
- client: "openai",
2300
- content: `import { OpenAI } from "openai";
2301
-
2302
- const client = new OpenAI({
2303
- baseURL: "${openAIbaseUrl2(provider)}",
2304
- apiKey: "${accessToken || `{API_TOKEN}`}"
2305
- });
2306
-
2307
- let out = "";
2308
-
2309
- const stream = await client.chat.completions.create({
2310
- model: "${providerModelId ?? model.id}",
2311
- messages: ${messagesStr},
2312
- ${configStr}
2313
- stream: true,
2314
- });
2315
-
2316
- for await (const chunk of stream) {
2317
- if (chunk.choices && chunk.choices.length > 0) {
2318
- const newContent = chunk.choices[0].delta.content;
2319
- out += newContent;
2320
- console.log(newContent);
2321
- }
2322
- }`
1828
+ return inferenceSnippetLanguages.map((language) => {
1829
+ return CLIENTS[language].map((client) => {
1830
+ if (!hasTemplate(language, client, templateName)) {
1831
+ return;
2323
1832
  }
2324
- ];
2325
- } else {
2326
- return [
2327
- {
2328
- client: "huggingface.js",
2329
- content: `import { InferenceClient } from "@huggingface/inference";
2330
-
2331
- const client = new InferenceClient("${accessToken || `{API_TOKEN}`}");
2332
-
2333
- const chatCompletion = await client.chatCompletion({
2334
- model: "${model.id}",
2335
- messages: ${messagesStr},
2336
- provider: "${provider}",
2337
- ${configStr}
2338
- });
2339
-
2340
- console.log(chatCompletion.choices[0].message);
2341
- `
2342
- },
2343
- {
2344
- client: "openai",
2345
- content: `import { OpenAI } from "openai";
2346
-
2347
- const client = new OpenAI({
2348
- baseURL: "${openAIbaseUrl2(provider)}",
2349
- apiKey: "${accessToken || `{API_TOKEN}`}"
2350
- });
1833
+ const template = loadTemplate(language, client, templateName);
1834
+ if (client === "huggingface_hub" && templateName.includes("basic")) {
1835
+ if (!(model.pipeline_tag && model.pipeline_tag in HF_PYTHON_METHODS)) {
1836
+ return;
1837
+ }
1838
+ params["methodName"] = HF_PYTHON_METHODS[model.pipeline_tag];
1839
+ }
1840
+ if (client === "huggingface.js" && templateName.includes("basic")) {
1841
+ if (!(model.pipeline_tag && model.pipeline_tag in HF_JS_METHODS)) {
1842
+ return;
1843
+ }
1844
+ params["methodName"] = HF_JS_METHODS[model.pipeline_tag];
1845
+ }
1846
+ let snippet = template(params).trim();
1847
+ if (!snippet) {
1848
+ return;
1849
+ }
1850
+ if (client === "huggingface_hub") {
1851
+ const importSection = snippetImportPythonInferenceClient({ ...params });
1852
+ snippet = `${importSection}
2351
1853
 
2352
- const chatCompletion = await client.chat.completions.create({
2353
- model: "${providerModelId ?? model.id}",
2354
- messages: ${messagesStr},
2355
- ${configStr}
2356
- });
1854
+ ${snippet}`;
1855
+ } else if (client === "requests") {
1856
+ const importSection = snippetImportRequests({
1857
+ ...params,
1858
+ importBase64: snippet.includes("base64"),
1859
+ importJson: snippet.includes("json.")
1860
+ });
1861
+ snippet = `${importSection}
2357
1862
 
2358
- console.log(chatCompletion.choices[0].message);
2359
- `
1863
+ ${snippet}`;
2360
1864
  }
2361
- ];
2362
- }
2363
- } else {
2364
- return snippetBasic3(model, accessToken, provider);
2365
- }
2366
- };
2367
- var snippetZeroShotClassification3 = (model, accessToken) => {
2368
- return [
2369
- {
2370
- client: "fetch",
2371
- content: `async function query(data) {
2372
- const response = await fetch(
2373
- "https://router.huggingface.co/hf-inference/models/${model.id}",
2374
- {
2375
- headers: {
2376
- Authorization: "Bearer ${accessToken || `{API_TOKEN}`}",
2377
- "Content-Type": "application/json",
2378
- },
2379
- method: "POST",
2380
- body: JSON.stringify(data),
2381
- }
2382
- );
2383
- const result = await response.json();
2384
- return result;
2385
- }
2386
-
2387
- query({"inputs": ${getModelInputSnippet3(
2388
- model
2389
- )}, "parameters": {"candidate_labels": ["refund", "legal", "faq"]}}).then((response) => {
2390
- console.log(JSON.stringify(response));
2391
- });`
2392
- }
2393
- ];
1865
+ return { language, client, content: snippet };
1866
+ }).filter((snippet) => snippet !== void 0);
1867
+ }).flat();
1868
+ };
2394
1869
  };
2395
- var snippetTextToImage2 = (model, accessToken, provider) => {
2396
- return [
2397
- {
2398
- client: "huggingface.js",
2399
- content: `import { InferenceClient } from "@huggingface/inference";
2400
-
2401
- const client = new InferenceClient("${accessToken || `{API_TOKEN}`}");
2402
-
2403
- const image = await client.textToImage({
2404
- model: "${model.id}",
2405
- inputs: ${getModelInputSnippet3(model)},
2406
- parameters: { num_inference_steps: 5 },
2407
- provider: "${provider}",
2408
- });
2409
- /// Use the generated image (it's a Blob)
2410
- `
2411
- },
2412
- ...provider === "hf-inference" ? [
2413
- {
2414
- client: "fetch",
2415
- content: `async function query(data) {
2416
- const response = await fetch(
2417
- "https://router.huggingface.co/hf-inference/models/${model.id}",
2418
- {
2419
- headers: {
2420
- Authorization: "Bearer ${accessToken || `{API_TOKEN}`}",
2421
- "Content-Type": "application/json",
2422
- },
2423
- method: "POST",
2424
- body: JSON.stringify(data),
2425
- }
2426
- );
2427
- const result = await response.blob();
2428
- return result;
2429
- }
2430
- query({"inputs": ${getModelInputSnippet3(model)}}).then((response) => {
2431
- // Use image
2432
- });`
2433
- }
2434
- ] : []
2435
- ];
1870
+ var prepareDocumentQuestionAnsweringInput = (model) => {
1871
+ return JSON.parse(getModelInputSnippet(model));
2436
1872
  };
2437
- var snippetTextToVideo2 = (model, accessToken, provider) => {
2438
- return ["fal-ai", "replicate"].includes(provider) ? [
2439
- {
2440
- client: "huggingface.js",
2441
- content: `import { InferenceClient } from "@huggingface/inference";
2442
-
2443
- const client = new InferenceClient("${accessToken || `{API_TOKEN}`}");
2444
-
2445
- const video = await client.textToVideo({
2446
- model: "${model.id}",
2447
- provider: "${provider}",
2448
- inputs: ${getModelInputSnippet3(model)},
2449
- parameters: { num_inference_steps: 5 },
2450
- });
2451
- // Use the generated video (it's a Blob)
2452
- `
2453
- }
2454
- ] : [];
2455
- };
2456
- var snippetTextToAudio2 = (model, accessToken, provider) => {
2457
- if (provider !== "hf-inference") {
2458
- return [];
2459
- }
2460
- const commonSnippet = `async function query(data) {
2461
- const response = await fetch(
2462
- "https://router.huggingface.co/hf-inference/models/${model.id}",
2463
- {
2464
- headers: {
2465
- Authorization: "Bearer ${accessToken || `{API_TOKEN}`}",
2466
- "Content-Type": "application/json",
2467
- },
2468
- method: "POST",
2469
- body: JSON.stringify(data),
2470
- }
2471
- );`;
2472
- if (model.library_name === "transformers") {
2473
- return [
2474
- {
2475
- client: "fetch",
2476
- content: commonSnippet + `
2477
- const result = await response.blob();
2478
- return result;
2479
- }
2480
- query({"inputs": ${getModelInputSnippet3(model)}}).then((response) => {
2481
- // Returns a byte object of the Audio wavform. Use it directly!
2482
- });`
2483
- }
2484
- ];
2485
- } else {
2486
- return [
2487
- {
2488
- client: "fetch",
2489
- content: commonSnippet + `
2490
- const result = await response.json();
2491
- return result;
2492
- }
2493
-
2494
- query({"inputs": ${getModelInputSnippet3(model)}}).then((response) => {
2495
- console.log(JSON.stringify(response));
2496
- });`
2497
- }
2498
- ];
2499
- }
1873
+ var prepareImageToImageInput = (model) => {
1874
+ const data = JSON.parse(getModelInputSnippet(model));
1875
+ return { inputs: data.image, parameters: { prompt: data.prompt } };
2500
1876
  };
2501
- var snippetAutomaticSpeechRecognition2 = (model, accessToken, provider) => {
2502
- return [
2503
- {
2504
- client: "huggingface.js",
2505
- content: `import { InferenceClient } from "@huggingface/inference";
2506
-
2507
- const client = new InferenceClient("${accessToken || `{API_TOKEN}`}");
2508
-
2509
- const data = fs.readFileSync(${getModelInputSnippet3(model)});
2510
-
2511
- const output = await client.automaticSpeechRecognition({
2512
- data,
2513
- model: "${model.id}",
2514
- provider: "${provider}",
2515
- });
2516
-
2517
- console.log(output);
2518
- `
2519
- },
2520
- ...provider === "hf-inference" ? snippetFile3(model, accessToken, provider) : []
2521
- ];
2522
- };
2523
- var snippetFile3 = (model, accessToken, provider) => {
2524
- if (provider !== "hf-inference") {
2525
- return [];
2526
- }
2527
- return [
2528
- {
2529
- client: "fetch",
2530
- content: `async function query(filename) {
2531
- const data = fs.readFileSync(filename);
2532
- const response = await fetch(
2533
- "https://router.huggingface.co/hf-inference/models/${model.id}",
2534
- {
2535
- headers: {
2536
- Authorization: "Bearer ${accessToken || `{API_TOKEN}`}",
2537
- "Content-Type": "application/json",
2538
- },
2539
- method: "POST",
2540
- body: data,
2541
- }
2542
- );
2543
- const result = await response.json();
2544
- return result;
1877
+ var prepareConversationalInput = (model, opts) => {
1878
+ return {
1879
+ messages: opts?.messages ?? getModelInputSnippet(model),
1880
+ ...opts?.temperature ? { temperature: opts?.temperature } : void 0,
1881
+ max_tokens: opts?.max_tokens ?? 500,
1882
+ ...opts?.top_p ? { top_p: opts?.top_p } : void 0
1883
+ };
1884
+ };
1885
+ var snippets = {
1886
+ "audio-classification": snippetGenerator("basicAudio"),
1887
+ "audio-to-audio": snippetGenerator("basicAudio"),
1888
+ "automatic-speech-recognition": snippetGenerator("basicAudio"),
1889
+ "document-question-answering": snippetGenerator("documentQuestionAnswering", prepareDocumentQuestionAnsweringInput),
1890
+ "feature-extraction": snippetGenerator("basic"),
1891
+ "fill-mask": snippetGenerator("basic"),
1892
+ "image-classification": snippetGenerator("basicImage"),
1893
+ "image-segmentation": snippetGenerator("basicImage"),
1894
+ "image-text-to-text": snippetGenerator("conversational"),
1895
+ "image-to-image": snippetGenerator("imageToImage", prepareImageToImageInput),
1896
+ "image-to-text": snippetGenerator("basicImage"),
1897
+ "object-detection": snippetGenerator("basicImage"),
1898
+ "question-answering": snippetGenerator("basic"),
1899
+ "sentence-similarity": snippetGenerator("basic"),
1900
+ summarization: snippetGenerator("basic"),
1901
+ "tabular-classification": snippetGenerator("tabular"),
1902
+ "tabular-regression": snippetGenerator("tabular"),
1903
+ "table-question-answering": snippetGenerator("basic"),
1904
+ "text-classification": snippetGenerator("basic"),
1905
+ "text-generation": snippetGenerator("basic"),
1906
+ "text-to-audio": snippetGenerator("textToAudio"),
1907
+ "text-to-image": snippetGenerator("textToImage"),
1908
+ "text-to-speech": snippetGenerator("textToAudio"),
1909
+ "text-to-video": snippetGenerator("textToVideo"),
1910
+ "text2text-generation": snippetGenerator("basic"),
1911
+ "token-classification": snippetGenerator("basic"),
1912
+ translation: snippetGenerator("basic"),
1913
+ "zero-shot-classification": snippetGenerator("zeroShotClassification"),
1914
+ "zero-shot-image-classification": snippetGenerator("zeroShotImageClassification")
1915
+ };
1916
+ function getInferenceSnippets(model, accessToken, provider, providerModelId, opts) {
1917
+ return model.pipeline_tag && model.pipeline_tag in snippets ? snippets[model.pipeline_tag]?.(model, accessToken, provider, providerModelId, opts) ?? [] : [];
2545
1918
  }
2546
-
2547
- query(${getModelInputSnippet3(model)}).then((response) => {
2548
- console.log(JSON.stringify(response));
2549
- });`
2550
- }
2551
- ];
2552
- };
2553
- var jsSnippets = {
2554
- // Same order as in tasks/src/pipelines.ts
2555
- "text-classification": snippetBasic3,
2556
- "token-classification": snippetBasic3,
2557
- "table-question-answering": snippetBasic3,
2558
- "question-answering": snippetBasic3,
2559
- "zero-shot-classification": snippetZeroShotClassification3,
2560
- translation: snippetBasic3,
2561
- summarization: snippetBasic3,
2562
- "feature-extraction": snippetBasic3,
2563
- "text-generation": snippetTextGeneration2,
2564
- "image-text-to-text": snippetTextGeneration2,
2565
- "text2text-generation": snippetBasic3,
2566
- "fill-mask": snippetBasic3,
2567
- "sentence-similarity": snippetBasic3,
2568
- "automatic-speech-recognition": snippetAutomaticSpeechRecognition2,
2569
- "text-to-image": snippetTextToImage2,
2570
- "text-to-video": snippetTextToVideo2,
2571
- "text-to-speech": snippetTextToAudio2,
2572
- "text-to-audio": snippetTextToAudio2,
2573
- "audio-to-audio": snippetFile3,
2574
- "audio-classification": snippetFile3,
2575
- "image-classification": snippetFile3,
2576
- "image-to-text": snippetFile3,
2577
- "object-detection": snippetFile3,
2578
- "image-segmentation": snippetFile3
2579
- };
2580
- function getJsInferenceSnippet(model, accessToken, provider, providerModelId, opts) {
2581
- return model.pipeline_tag && model.pipeline_tag in jsSnippets ? jsSnippets[model.pipeline_tag]?.(model, accessToken, provider, providerModelId, opts) ?? [] : [];
1919
+ function formatBody(obj, format) {
1920
+ switch (format) {
1921
+ case "curl":
1922
+ return indentString(formatBody(obj, "json"));
1923
+ case "json":
1924
+ return JSON.stringify(obj, null, 4).split("\n").slice(1, -1).join("\n");
1925
+ case "python":
1926
+ return indentString(
1927
+ Object.entries(obj).map(([key, value]) => {
1928
+ const formattedValue = JSON.stringify(value, null, 4).replace(/"/g, '"');
1929
+ return `${key}=${formattedValue},`;
1930
+ }).join("\n")
1931
+ );
1932
+ case "ts":
1933
+ return formatTsObject(obj).split("\n").slice(1, -1).join("\n");
1934
+ default:
1935
+ throw new Error(`Unsupported format: ${format}`);
1936
+ }
1937
+ }
1938
+ function formatTsObject(obj, depth) {
1939
+ depth = depth ?? 0;
1940
+ if (typeof obj !== "object" || obj === null) {
1941
+ return JSON.stringify(obj);
1942
+ }
1943
+ if (Array.isArray(obj)) {
1944
+ const items = obj.map((item) => {
1945
+ const formatted = formatTsObject(item, depth + 1);
1946
+ return `${" ".repeat(4 * (depth + 1))}${formatted},`;
1947
+ }).join("\n");
1948
+ return `[
1949
+ ${items}
1950
+ ${" ".repeat(4 * depth)}]`;
1951
+ }
1952
+ const entries = Object.entries(obj);
1953
+ const lines = entries.map(([key, value]) => {
1954
+ const formattedValue = formatTsObject(value, depth + 1);
1955
+ const keyStr = /^[a-zA-Z_$][a-zA-Z0-9_$]*$/.test(key) ? key : `"${key}"`;
1956
+ return `${" ".repeat(4 * (depth + 1))}${keyStr}: ${formattedValue},`;
1957
+ }).join("\n");
1958
+ return `{
1959
+ ${lines}
1960
+ ${" ".repeat(4 * depth)}}`;
1961
+ }
1962
+ function indentString(str) {
1963
+ return str.split("\n").map((line) => " ".repeat(4) + line).join("\n");
1964
+ }
1965
+ function removeSuffix(str, suffix) {
1966
+ return str.endsWith(suffix) ? str.slice(0, -suffix.length) : str;
2582
1967
  }
2583
1968
  export {
2584
1969
  HfInference,