@huggingface/inference 3.6.0 → 3.6.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.cjs +240 -71
- package/dist/index.js +240 -60
- package/dist/src/lib/makeRequestOptions.d.ts.map +1 -1
- package/dist/src/providers/black-forest-labs.d.ts.map +1 -1
- package/dist/src/providers/cerebras.d.ts.map +1 -1
- package/dist/src/providers/cohere.d.ts.map +1 -1
- package/dist/src/providers/fal-ai.d.ts +6 -16
- package/dist/src/providers/fal-ai.d.ts.map +1 -1
- package/dist/src/providers/fireworks-ai.d.ts.map +1 -1
- package/dist/src/providers/hf-inference.d.ts.map +1 -1
- package/dist/src/providers/hyperbolic.d.ts.map +1 -1
- package/dist/src/providers/nebius.d.ts.map +1 -1
- package/dist/src/providers/novita.d.ts.map +1 -1
- package/dist/src/providers/openai.d.ts.map +1 -1
- package/dist/src/providers/replicate.d.ts.map +1 -1
- package/dist/src/providers/sambanova.d.ts.map +1 -1
- package/dist/src/providers/together.d.ts.map +1 -1
- package/dist/src/snippets/getInferenceSnippets.d.ts.map +1 -1
- package/dist/src/snippets/templates.exported.d.ts +2 -0
- package/dist/src/snippets/templates.exported.d.ts.map +1 -0
- package/dist/src/tasks/cv/textToVideo.d.ts.map +1 -1
- package/dist/src/types.d.ts +4 -2
- package/dist/src/types.d.ts.map +1 -1
- package/dist/test/InferenceClient.spec.d.ts.map +1 -1
- package/package.json +10 -15
- package/src/lib/makeRequestOptions.ts +3 -1
- package/src/providers/black-forest-labs.ts +6 -2
- package/src/providers/cerebras.ts +6 -2
- package/src/providers/cohere.ts +6 -2
- package/src/providers/fal-ai.ts +85 -3
- package/src/providers/fireworks-ai.ts +6 -2
- package/src/providers/hf-inference.ts +6 -2
- package/src/providers/hyperbolic.ts +6 -2
- package/src/providers/nebius.ts +6 -2
- package/src/providers/novita.ts +5 -2
- package/src/providers/openai.ts +6 -2
- package/src/providers/replicate.ts +6 -2
- package/src/providers/sambanova.ts +6 -2
- package/src/providers/together.ts +6 -2
- package/src/snippets/getInferenceSnippets.ts +6 -24
- package/src/snippets/templates.exported.ts +72 -0
- package/src/tasks/cv/textToVideo.ts +5 -21
- package/src/types.ts +5 -2
- package/dist/browser/index.cjs +0 -1652
- package/dist/browser/index.js +0 -1652
- package/src/snippets/templates/js/fetch/basic.jinja +0 -19
- package/src/snippets/templates/js/fetch/basicAudio.jinja +0 -19
- package/src/snippets/templates/js/fetch/basicImage.jinja +0 -19
- package/src/snippets/templates/js/fetch/textToAudio.jinja +0 -41
- package/src/snippets/templates/js/fetch/textToImage.jinja +0 -19
- package/src/snippets/templates/js/fetch/zeroShotClassification.jinja +0 -22
- package/src/snippets/templates/js/huggingface.js/basic.jinja +0 -11
- package/src/snippets/templates/js/huggingface.js/basicAudio.jinja +0 -13
- package/src/snippets/templates/js/huggingface.js/basicImage.jinja +0 -13
- package/src/snippets/templates/js/huggingface.js/conversational.jinja +0 -11
- package/src/snippets/templates/js/huggingface.js/conversationalStream.jinja +0 -19
- package/src/snippets/templates/js/huggingface.js/textToImage.jinja +0 -11
- package/src/snippets/templates/js/huggingface.js/textToVideo.jinja +0 -10
- package/src/snippets/templates/js/openai/conversational.jinja +0 -13
- package/src/snippets/templates/js/openai/conversationalStream.jinja +0 -22
- package/src/snippets/templates/python/fal_client/textToImage.jinja +0 -11
- package/src/snippets/templates/python/huggingface_hub/basic.jinja +0 -4
- package/src/snippets/templates/python/huggingface_hub/basicAudio.jinja +0 -1
- package/src/snippets/templates/python/huggingface_hub/basicImage.jinja +0 -1
- package/src/snippets/templates/python/huggingface_hub/conversational.jinja +0 -6
- package/src/snippets/templates/python/huggingface_hub/conversationalStream.jinja +0 -8
- package/src/snippets/templates/python/huggingface_hub/documentQuestionAnswering.jinja +0 -5
- package/src/snippets/templates/python/huggingface_hub/imageToImage.jinja +0 -6
- package/src/snippets/templates/python/huggingface_hub/importInferenceClient.jinja +0 -6
- package/src/snippets/templates/python/huggingface_hub/textToImage.jinja +0 -5
- package/src/snippets/templates/python/huggingface_hub/textToVideo.jinja +0 -4
- package/src/snippets/templates/python/openai/conversational.jinja +0 -13
- package/src/snippets/templates/python/openai/conversationalStream.jinja +0 -15
- package/src/snippets/templates/python/requests/basic.jinja +0 -7
- package/src/snippets/templates/python/requests/basicAudio.jinja +0 -7
- package/src/snippets/templates/python/requests/basicImage.jinja +0 -7
- package/src/snippets/templates/python/requests/conversational.jinja +0 -9
- package/src/snippets/templates/python/requests/conversationalStream.jinja +0 -16
- package/src/snippets/templates/python/requests/documentQuestionAnswering.jinja +0 -13
- package/src/snippets/templates/python/requests/imageToImage.jinja +0 -15
- package/src/snippets/templates/python/requests/importRequests.jinja +0 -10
- package/src/snippets/templates/python/requests/tabular.jinja +0 -9
- package/src/snippets/templates/python/requests/textToAudio.jinja +0 -23
- package/src/snippets/templates/python/requests/textToImage.jinja +0 -14
- package/src/snippets/templates/python/requests/zeroShotClassification.jinja +0 -8
- package/src/snippets/templates/python/requests/zeroShotImageClassification.jinja +0 -14
- package/src/snippets/templates/sh/curl/basic.jinja +0 -7
- package/src/snippets/templates/sh/curl/basicAudio.jinja +0 -5
- package/src/snippets/templates/sh/curl/basicImage.jinja +0 -5
- package/src/snippets/templates/sh/curl/conversational.jinja +0 -7
- package/src/snippets/templates/sh/curl/conversationalStream.jinja +0 -7
- package/src/snippets/templates/sh/curl/zeroShotClassification.jinja +0 -5
package/dist/index.cjs
CHANGED
|
@@ -1,9 +1,7 @@
|
|
|
1
1
|
"use strict";
|
|
2
|
-
var __create = Object.create;
|
|
3
2
|
var __defProp = Object.defineProperty;
|
|
4
3
|
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
|
|
5
4
|
var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
6
|
-
var __getProtoOf = Object.getPrototypeOf;
|
|
7
5
|
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
|
8
6
|
var __export = (target, all) => {
|
|
9
7
|
for (var name2 in all)
|
|
@@ -17,14 +15,6 @@ var __copyProps = (to, from, except, desc) => {
|
|
|
17
15
|
}
|
|
18
16
|
return to;
|
|
19
17
|
};
|
|
20
|
-
var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__getProtoOf(mod)) : {}, __copyProps(
|
|
21
|
-
// If the importer is in node compatibility mode or this is not an ESM
|
|
22
|
-
// file that has been converted to a CommonJS file using a Babel-
|
|
23
|
-
// compatible transform (i.e. "__esModule" has not been set), then set
|
|
24
|
-
// "default" to the CommonJS "module.exports" for node compatibility.
|
|
25
|
-
isNodeMode || !mod || !mod.__esModule ? __defProp(target, "default", { value: mod, enumerable: true }) : target,
|
|
26
|
-
mod
|
|
27
|
-
));
|
|
28
18
|
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
|
|
29
19
|
|
|
30
20
|
// src/index.ts
|
|
@@ -114,6 +104,9 @@ var HF_ROUTER_URL = "https://router.huggingface.co";
|
|
|
114
104
|
|
|
115
105
|
// src/providers/black-forest-labs.ts
|
|
116
106
|
var BLACK_FOREST_LABS_AI_API_BASE_URL = "https://api.us1.bfl.ai";
|
|
107
|
+
var makeBaseUrl = () => {
|
|
108
|
+
return BLACK_FOREST_LABS_AI_API_BASE_URL;
|
|
109
|
+
};
|
|
117
110
|
var makeBody = (params) => {
|
|
118
111
|
return params.args;
|
|
119
112
|
};
|
|
@@ -128,7 +121,7 @@ var makeUrl = (params) => {
|
|
|
128
121
|
return `${params.baseUrl}/v1/${params.model}`;
|
|
129
122
|
};
|
|
130
123
|
var BLACK_FOREST_LABS_CONFIG = {
|
|
131
|
-
|
|
124
|
+
makeBaseUrl,
|
|
132
125
|
makeBody,
|
|
133
126
|
makeHeaders,
|
|
134
127
|
makeUrl
|
|
@@ -136,6 +129,9 @@ var BLACK_FOREST_LABS_CONFIG = {
|
|
|
136
129
|
|
|
137
130
|
// src/providers/cerebras.ts
|
|
138
131
|
var CEREBRAS_API_BASE_URL = "https://api.cerebras.ai";
|
|
132
|
+
var makeBaseUrl2 = () => {
|
|
133
|
+
return CEREBRAS_API_BASE_URL;
|
|
134
|
+
};
|
|
139
135
|
var makeBody2 = (params) => {
|
|
140
136
|
return {
|
|
141
137
|
...params.args,
|
|
@@ -149,7 +145,7 @@ var makeUrl2 = (params) => {
|
|
|
149
145
|
return `${params.baseUrl}/v1/chat/completions`;
|
|
150
146
|
};
|
|
151
147
|
var CEREBRAS_CONFIG = {
|
|
152
|
-
|
|
148
|
+
makeBaseUrl: makeBaseUrl2,
|
|
153
149
|
makeBody: makeBody2,
|
|
154
150
|
makeHeaders: makeHeaders2,
|
|
155
151
|
makeUrl: makeUrl2
|
|
@@ -157,6 +153,9 @@ var CEREBRAS_CONFIG = {
|
|
|
157
153
|
|
|
158
154
|
// src/providers/cohere.ts
|
|
159
155
|
var COHERE_API_BASE_URL = "https://api.cohere.com";
|
|
156
|
+
var makeBaseUrl3 = () => {
|
|
157
|
+
return COHERE_API_BASE_URL;
|
|
158
|
+
};
|
|
160
159
|
var makeBody3 = (params) => {
|
|
161
160
|
return {
|
|
162
161
|
...params.args,
|
|
@@ -170,14 +169,40 @@ var makeUrl3 = (params) => {
|
|
|
170
169
|
return `${params.baseUrl}/compatibility/v1/chat/completions`;
|
|
171
170
|
};
|
|
172
171
|
var COHERE_CONFIG = {
|
|
173
|
-
|
|
172
|
+
makeBaseUrl: makeBaseUrl3,
|
|
174
173
|
makeBody: makeBody3,
|
|
175
174
|
makeHeaders: makeHeaders3,
|
|
176
175
|
makeUrl: makeUrl3
|
|
177
176
|
};
|
|
178
177
|
|
|
178
|
+
// src/lib/InferenceOutputError.ts
|
|
179
|
+
var InferenceOutputError = class extends TypeError {
|
|
180
|
+
constructor(message) {
|
|
181
|
+
super(
|
|
182
|
+
`Invalid inference output: ${message}. Use the 'request' method with the same parameters to do a custom call with no type checking.`
|
|
183
|
+
);
|
|
184
|
+
this.name = "InferenceOutputError";
|
|
185
|
+
}
|
|
186
|
+
};
|
|
187
|
+
|
|
188
|
+
// src/lib/isUrl.ts
|
|
189
|
+
function isUrl(modelOrUrl) {
|
|
190
|
+
return /^http(s?):/.test(modelOrUrl) || modelOrUrl.startsWith("/");
|
|
191
|
+
}
|
|
192
|
+
|
|
193
|
+
// src/utils/delay.ts
|
|
194
|
+
function delay(ms) {
|
|
195
|
+
return new Promise((resolve) => {
|
|
196
|
+
setTimeout(() => resolve(), ms);
|
|
197
|
+
});
|
|
198
|
+
}
|
|
199
|
+
|
|
179
200
|
// src/providers/fal-ai.ts
|
|
180
201
|
var FAL_AI_API_BASE_URL = "https://fal.run";
|
|
202
|
+
var FAL_AI_API_BASE_URL_QUEUE = "https://queue.fal.run";
|
|
203
|
+
var makeBaseUrl4 = (task) => {
|
|
204
|
+
return task === "text-to-video" ? FAL_AI_API_BASE_URL_QUEUE : FAL_AI_API_BASE_URL;
|
|
205
|
+
};
|
|
181
206
|
var makeBody4 = (params) => {
|
|
182
207
|
return params.args;
|
|
183
208
|
};
|
|
@@ -187,17 +212,64 @@ var makeHeaders4 = (params) => {
|
|
|
187
212
|
};
|
|
188
213
|
};
|
|
189
214
|
var makeUrl4 = (params) => {
|
|
190
|
-
|
|
215
|
+
const baseUrl = `${params.baseUrl}/${params.model}`;
|
|
216
|
+
if (params.authMethod !== "provider-key" && params.task === "text-to-video") {
|
|
217
|
+
return `${baseUrl}?_subdomain=queue`;
|
|
218
|
+
}
|
|
219
|
+
return baseUrl;
|
|
191
220
|
};
|
|
192
221
|
var FAL_AI_CONFIG = {
|
|
193
|
-
|
|
222
|
+
makeBaseUrl: makeBaseUrl4,
|
|
194
223
|
makeBody: makeBody4,
|
|
195
224
|
makeHeaders: makeHeaders4,
|
|
196
225
|
makeUrl: makeUrl4
|
|
197
226
|
};
|
|
227
|
+
async function pollFalResponse(res, url, headers) {
|
|
228
|
+
const requestId = res.request_id;
|
|
229
|
+
if (!requestId) {
|
|
230
|
+
throw new InferenceOutputError("No request ID found in the response");
|
|
231
|
+
}
|
|
232
|
+
let status = res.status;
|
|
233
|
+
const parsedUrl = new URL(url);
|
|
234
|
+
const baseUrl = `${parsedUrl.protocol}//${parsedUrl.host}${parsedUrl.host === "router.huggingface.co" ? "/fal-ai" : ""}`;
|
|
235
|
+
const modelId = new URL(res.response_url).pathname;
|
|
236
|
+
const queryParams = parsedUrl.search;
|
|
237
|
+
const statusUrl = `${baseUrl}${modelId}/status${queryParams}`;
|
|
238
|
+
const resultUrl = `${baseUrl}${modelId}${queryParams}`;
|
|
239
|
+
while (status !== "COMPLETED") {
|
|
240
|
+
await delay(500);
|
|
241
|
+
const statusResponse = await fetch(statusUrl, { headers });
|
|
242
|
+
if (!statusResponse.ok) {
|
|
243
|
+
throw new InferenceOutputError("Failed to fetch response status from fal-ai API");
|
|
244
|
+
}
|
|
245
|
+
try {
|
|
246
|
+
status = (await statusResponse.json()).status;
|
|
247
|
+
} catch (error) {
|
|
248
|
+
throw new InferenceOutputError("Failed to parse status response from fal-ai API");
|
|
249
|
+
}
|
|
250
|
+
}
|
|
251
|
+
const resultResponse = await fetch(resultUrl, { headers });
|
|
252
|
+
let result;
|
|
253
|
+
try {
|
|
254
|
+
result = await resultResponse.json();
|
|
255
|
+
} catch (error) {
|
|
256
|
+
throw new InferenceOutputError("Failed to parse result response from fal-ai API");
|
|
257
|
+
}
|
|
258
|
+
if (typeof result === "object" && !!result && "video" in result && typeof result.video === "object" && !!result.video && "url" in result.video && typeof result.video.url === "string" && isUrl(result.video.url)) {
|
|
259
|
+
const urlResponse = await fetch(result.video.url);
|
|
260
|
+
return await urlResponse.blob();
|
|
261
|
+
} else {
|
|
262
|
+
throw new InferenceOutputError(
|
|
263
|
+
"Expected { video: { url: string } } result format, got instead: " + JSON.stringify(result)
|
|
264
|
+
);
|
|
265
|
+
}
|
|
266
|
+
}
|
|
198
267
|
|
|
199
268
|
// src/providers/fireworks-ai.ts
|
|
200
269
|
var FIREWORKS_AI_API_BASE_URL = "https://api.fireworks.ai";
|
|
270
|
+
var makeBaseUrl5 = () => {
|
|
271
|
+
return FIREWORKS_AI_API_BASE_URL;
|
|
272
|
+
};
|
|
201
273
|
var makeBody5 = (params) => {
|
|
202
274
|
return {
|
|
203
275
|
...params.args,
|
|
@@ -214,13 +286,16 @@ var makeUrl5 = (params) => {
|
|
|
214
286
|
return `${params.baseUrl}/inference`;
|
|
215
287
|
};
|
|
216
288
|
var FIREWORKS_AI_CONFIG = {
|
|
217
|
-
|
|
289
|
+
makeBaseUrl: makeBaseUrl5,
|
|
218
290
|
makeBody: makeBody5,
|
|
219
291
|
makeHeaders: makeHeaders5,
|
|
220
292
|
makeUrl: makeUrl5
|
|
221
293
|
};
|
|
222
294
|
|
|
223
295
|
// src/providers/hf-inference.ts
|
|
296
|
+
var makeBaseUrl6 = () => {
|
|
297
|
+
return `${HF_ROUTER_URL}/hf-inference`;
|
|
298
|
+
};
|
|
224
299
|
var makeBody6 = (params) => {
|
|
225
300
|
return {
|
|
226
301
|
...params.args,
|
|
@@ -240,7 +315,7 @@ var makeUrl6 = (params) => {
|
|
|
240
315
|
return `${params.baseUrl}/models/${params.model}`;
|
|
241
316
|
};
|
|
242
317
|
var HF_INFERENCE_CONFIG = {
|
|
243
|
-
|
|
318
|
+
makeBaseUrl: makeBaseUrl6,
|
|
244
319
|
makeBody: makeBody6,
|
|
245
320
|
makeHeaders: makeHeaders6,
|
|
246
321
|
makeUrl: makeUrl6
|
|
@@ -248,6 +323,9 @@ var HF_INFERENCE_CONFIG = {
|
|
|
248
323
|
|
|
249
324
|
// src/providers/hyperbolic.ts
|
|
250
325
|
var HYPERBOLIC_API_BASE_URL = "https://api.hyperbolic.xyz";
|
|
326
|
+
var makeBaseUrl7 = () => {
|
|
327
|
+
return HYPERBOLIC_API_BASE_URL;
|
|
328
|
+
};
|
|
251
329
|
var makeBody7 = (params) => {
|
|
252
330
|
return {
|
|
253
331
|
...params.args,
|
|
@@ -264,7 +342,7 @@ var makeUrl7 = (params) => {
|
|
|
264
342
|
return `${params.baseUrl}/v1/chat/completions`;
|
|
265
343
|
};
|
|
266
344
|
var HYPERBOLIC_CONFIG = {
|
|
267
|
-
|
|
345
|
+
makeBaseUrl: makeBaseUrl7,
|
|
268
346
|
makeBody: makeBody7,
|
|
269
347
|
makeHeaders: makeHeaders7,
|
|
270
348
|
makeUrl: makeUrl7
|
|
@@ -272,6 +350,9 @@ var HYPERBOLIC_CONFIG = {
|
|
|
272
350
|
|
|
273
351
|
// src/providers/nebius.ts
|
|
274
352
|
var NEBIUS_API_BASE_URL = "https://api.studio.nebius.ai";
|
|
353
|
+
var makeBaseUrl8 = () => {
|
|
354
|
+
return NEBIUS_API_BASE_URL;
|
|
355
|
+
};
|
|
275
356
|
var makeBody8 = (params) => {
|
|
276
357
|
return {
|
|
277
358
|
...params.args,
|
|
@@ -294,7 +375,7 @@ var makeUrl8 = (params) => {
|
|
|
294
375
|
return params.baseUrl;
|
|
295
376
|
};
|
|
296
377
|
var NEBIUS_CONFIG = {
|
|
297
|
-
|
|
378
|
+
makeBaseUrl: makeBaseUrl8,
|
|
298
379
|
makeBody: makeBody8,
|
|
299
380
|
makeHeaders: makeHeaders8,
|
|
300
381
|
makeUrl: makeUrl8
|
|
@@ -302,6 +383,9 @@ var NEBIUS_CONFIG = {
|
|
|
302
383
|
|
|
303
384
|
// src/providers/novita.ts
|
|
304
385
|
var NOVITA_API_BASE_URL = "https://api.novita.ai";
|
|
386
|
+
var makeBaseUrl9 = () => {
|
|
387
|
+
return NOVITA_API_BASE_URL;
|
|
388
|
+
};
|
|
305
389
|
var makeBody9 = (params) => {
|
|
306
390
|
return {
|
|
307
391
|
...params.args,
|
|
@@ -322,7 +406,7 @@ var makeUrl9 = (params) => {
|
|
|
322
406
|
return params.baseUrl;
|
|
323
407
|
};
|
|
324
408
|
var NOVITA_CONFIG = {
|
|
325
|
-
|
|
409
|
+
makeBaseUrl: makeBaseUrl9,
|
|
326
410
|
makeBody: makeBody9,
|
|
327
411
|
makeHeaders: makeHeaders9,
|
|
328
412
|
makeUrl: makeUrl9
|
|
@@ -330,6 +414,9 @@ var NOVITA_CONFIG = {
|
|
|
330
414
|
|
|
331
415
|
// src/providers/replicate.ts
|
|
332
416
|
var REPLICATE_API_BASE_URL = "https://api.replicate.com";
|
|
417
|
+
var makeBaseUrl10 = () => {
|
|
418
|
+
return REPLICATE_API_BASE_URL;
|
|
419
|
+
};
|
|
333
420
|
var makeBody10 = (params) => {
|
|
334
421
|
return {
|
|
335
422
|
input: params.args,
|
|
@@ -346,7 +433,7 @@ var makeUrl10 = (params) => {
|
|
|
346
433
|
return `${params.baseUrl}/v1/models/${params.model}/predictions`;
|
|
347
434
|
};
|
|
348
435
|
var REPLICATE_CONFIG = {
|
|
349
|
-
|
|
436
|
+
makeBaseUrl: makeBaseUrl10,
|
|
350
437
|
makeBody: makeBody10,
|
|
351
438
|
makeHeaders: makeHeaders10,
|
|
352
439
|
makeUrl: makeUrl10
|
|
@@ -354,6 +441,9 @@ var REPLICATE_CONFIG = {
|
|
|
354
441
|
|
|
355
442
|
// src/providers/sambanova.ts
|
|
356
443
|
var SAMBANOVA_API_BASE_URL = "https://api.sambanova.ai";
|
|
444
|
+
var makeBaseUrl11 = () => {
|
|
445
|
+
return SAMBANOVA_API_BASE_URL;
|
|
446
|
+
};
|
|
357
447
|
var makeBody11 = (params) => {
|
|
358
448
|
return {
|
|
359
449
|
...params.args,
|
|
@@ -370,7 +460,7 @@ var makeUrl11 = (params) => {
|
|
|
370
460
|
return params.baseUrl;
|
|
371
461
|
};
|
|
372
462
|
var SAMBANOVA_CONFIG = {
|
|
373
|
-
|
|
463
|
+
makeBaseUrl: makeBaseUrl11,
|
|
374
464
|
makeBody: makeBody11,
|
|
375
465
|
makeHeaders: makeHeaders11,
|
|
376
466
|
makeUrl: makeUrl11
|
|
@@ -378,6 +468,9 @@ var SAMBANOVA_CONFIG = {
|
|
|
378
468
|
|
|
379
469
|
// src/providers/together.ts
|
|
380
470
|
var TOGETHER_API_BASE_URL = "https://api.together.xyz";
|
|
471
|
+
var makeBaseUrl12 = () => {
|
|
472
|
+
return TOGETHER_API_BASE_URL;
|
|
473
|
+
};
|
|
381
474
|
var makeBody12 = (params) => {
|
|
382
475
|
return {
|
|
383
476
|
...params.args,
|
|
@@ -400,7 +493,7 @@ var makeUrl12 = (params) => {
|
|
|
400
493
|
return params.baseUrl;
|
|
401
494
|
};
|
|
402
495
|
var TOGETHER_CONFIG = {
|
|
403
|
-
|
|
496
|
+
makeBaseUrl: makeBaseUrl12,
|
|
404
497
|
makeBody: makeBody12,
|
|
405
498
|
makeHeaders: makeHeaders12,
|
|
406
499
|
makeUrl: makeUrl12
|
|
@@ -408,6 +501,9 @@ var TOGETHER_CONFIG = {
|
|
|
408
501
|
|
|
409
502
|
// src/providers/openai.ts
|
|
410
503
|
var OPENAI_API_BASE_URL = "https://api.openai.com";
|
|
504
|
+
var makeBaseUrl13 = () => {
|
|
505
|
+
return OPENAI_API_BASE_URL;
|
|
506
|
+
};
|
|
411
507
|
var makeBody13 = (params) => {
|
|
412
508
|
if (!params.chatCompletion) {
|
|
413
509
|
throw new Error("OpenAI only supports chat completions.");
|
|
@@ -427,21 +523,16 @@ var makeUrl13 = (params) => {
|
|
|
427
523
|
return `${params.baseUrl}/v1/chat/completions`;
|
|
428
524
|
};
|
|
429
525
|
var OPENAI_CONFIG = {
|
|
430
|
-
|
|
526
|
+
makeBaseUrl: makeBaseUrl13,
|
|
431
527
|
makeBody: makeBody13,
|
|
432
528
|
makeHeaders: makeHeaders13,
|
|
433
529
|
makeUrl: makeUrl13,
|
|
434
530
|
clientSideRoutingOnly: true
|
|
435
531
|
};
|
|
436
532
|
|
|
437
|
-
// src/lib/isUrl.ts
|
|
438
|
-
function isUrl(modelOrUrl) {
|
|
439
|
-
return /^http(s?):/.test(modelOrUrl) || modelOrUrl.startsWith("/");
|
|
440
|
-
}
|
|
441
|
-
|
|
442
533
|
// package.json
|
|
443
534
|
var name = "@huggingface/inference";
|
|
444
|
-
var version = "3.6.
|
|
535
|
+
var version = "3.6.2";
|
|
445
536
|
|
|
446
537
|
// src/providers/consts.ts
|
|
447
538
|
var HARDCODED_MODEL_ID_MAPPING = {
|
|
@@ -580,7 +671,8 @@ function makeRequestOptionsFromResolvedModel(resolvedModel, args, options) {
|
|
|
580
671
|
return "none";
|
|
581
672
|
})();
|
|
582
673
|
const url = endpointUrl ? chatCompletion2 ? endpointUrl + `/v1/chat/completions` : endpointUrl : providerConfig.makeUrl({
|
|
583
|
-
|
|
674
|
+
authMethod,
|
|
675
|
+
baseUrl: authMethod !== "provider-key" ? HF_HUB_INFERENCE_PROXY_TEMPLATE.replace("{{PROVIDER}}", provider) : providerConfig.makeBaseUrl(task),
|
|
584
676
|
model: resolvedModel,
|
|
585
677
|
chatCompletion: chatCompletion2,
|
|
586
678
|
task
|
|
@@ -844,16 +936,6 @@ async function* streamingRequest(args, options) {
|
|
|
844
936
|
}
|
|
845
937
|
}
|
|
846
938
|
|
|
847
|
-
// src/lib/InferenceOutputError.ts
|
|
848
|
-
var InferenceOutputError = class extends TypeError {
|
|
849
|
-
constructor(message) {
|
|
850
|
-
super(
|
|
851
|
-
`Invalid inference output: ${message}. Use the 'request' method with the same parameters to do a custom call with no type checking.`
|
|
852
|
-
);
|
|
853
|
-
this.name = "InferenceOutputError";
|
|
854
|
-
}
|
|
855
|
-
};
|
|
856
|
-
|
|
857
939
|
// src/utils/pick.ts
|
|
858
940
|
function pick(o, props) {
|
|
859
941
|
return Object.assign(
|
|
@@ -1068,13 +1150,6 @@ async function objectDetection(args, options) {
|
|
|
1068
1150
|
return res;
|
|
1069
1151
|
}
|
|
1070
1152
|
|
|
1071
|
-
// src/utils/delay.ts
|
|
1072
|
-
function delay(ms) {
|
|
1073
|
-
return new Promise((resolve) => {
|
|
1074
|
-
setTimeout(() => resolve(), ms);
|
|
1075
|
-
});
|
|
1076
|
-
}
|
|
1077
|
-
|
|
1078
1153
|
// src/tasks/cv/textToImage.ts
|
|
1079
1154
|
function getResponseFormatArg(provider) {
|
|
1080
1155
|
switch (provider) {
|
|
@@ -1246,12 +1321,8 @@ async function textToVideo(args, options) {
|
|
|
1246
1321
|
task: "text-to-video"
|
|
1247
1322
|
});
|
|
1248
1323
|
if (args.provider === "fal-ai") {
|
|
1249
|
-
const
|
|
1250
|
-
|
|
1251
|
-
throw new InferenceOutputError("Expected { video: { url: string } }");
|
|
1252
|
-
}
|
|
1253
|
-
const urlResponse = await fetch(res.video.url);
|
|
1254
|
-
return await urlResponse.blob();
|
|
1324
|
+
const { url, info } = await makeRequestOptions(args, { ...options, task: "text-to-video" });
|
|
1325
|
+
return await pollFalResponse(res, url, info.headers);
|
|
1255
1326
|
} else if (args.provider === "novita") {
|
|
1256
1327
|
const isValidOutput = typeof res === "object" && !!res && "video" in res && typeof res.video === "object" && !!res.video && "video_url" in res.video && typeof res.video.video_url === "string" && isUrl(res.video.video_url);
|
|
1257
1328
|
if (!isValidOutput) {
|
|
@@ -1673,10 +1744,116 @@ __export(snippets_exports, {
|
|
|
1673
1744
|
// src/snippets/getInferenceSnippets.ts
|
|
1674
1745
|
var import_tasks = require("@huggingface/tasks");
|
|
1675
1746
|
var import_jinja = require("@huggingface/jinja");
|
|
1676
|
-
|
|
1677
|
-
|
|
1678
|
-
var
|
|
1679
|
-
|
|
1747
|
+
|
|
1748
|
+
// src/snippets/templates.exported.ts
|
|
1749
|
+
var templates = {
|
|
1750
|
+
"js": {
|
|
1751
|
+
"fetch": {
|
|
1752
|
+
"basic": 'async function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "application/json",\n },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.json();\n return result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});',
|
|
1753
|
+
"basicAudio": 'async function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "audio/flac"\n },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.json();\n return result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});',
|
|
1754
|
+
"basicImage": 'async function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "image/jpeg"\n },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.json();\n return result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});',
|
|
1755
|
+
"textToAudio": '{% if model.library_name == "transformers" %}\nasync function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "application/json",\n },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.blob();\n return result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n // Returns a byte object of the Audio wavform. Use it directly!\n});\n{% else %}\nasync function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "application/json",\n },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.json();\n return result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});\n{% endif %} ',
|
|
1756
|
+
"textToImage": 'async function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "application/json",\n },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.blob();\n return result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n // Use image\n});',
|
|
1757
|
+
"zeroShotClassification": 'async function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "application/json",\n },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.json();\n return result;\n}\n\nquery({\n inputs: {{ providerInputs.asObj.inputs }},\n parameters: { candidate_labels: ["refund", "legal", "faq"] }\n}).then((response) => {\n console.log(JSON.stringify(response));\n});'
|
|
1758
|
+
},
|
|
1759
|
+
"huggingface.js": {
|
|
1760
|
+
"basic": 'import { InferenceClient } from "@huggingface/inference";\n\nconst client = new InferenceClient("{{ accessToken }}");\n\nconst output = await client.{{ methodName }}({\n model: "{{ model.id }}",\n inputs: {{ inputs.asObj.inputs }},\n provider: "{{ provider }}",\n});\n\nconsole.log(output);',
|
|
1761
|
+
"basicAudio": 'import { InferenceClient } from "@huggingface/inference";\n\nconst client = new InferenceClient("{{ accessToken }}");\n\nconst data = fs.readFileSync({{inputs.asObj.inputs}});\n\nconst output = await client.{{ methodName }}({\n data,\n model: "{{ model.id }}",\n provider: "{{ provider }}",\n});\n\nconsole.log(output);',
|
|
1762
|
+
"basicImage": 'import { InferenceClient } from "@huggingface/inference";\n\nconst client = new InferenceClient("{{ accessToken }}");\n\nconst data = fs.readFileSync({{inputs.asObj.inputs}});\n\nconst output = await client.{{ methodName }}({\n data,\n model: "{{ model.id }}",\n provider: "{{ provider }}",\n});\n\nconsole.log(output);',
|
|
1763
|
+
"conversational": 'import { InferenceClient } from "@huggingface/inference";\n\nconst client = new InferenceClient("{{ accessToken }}");\n\nconst chatCompletion = await client.chatCompletion({\n provider: "{{ provider }}",\n model: "{{ model.id }}",\n{{ inputs.asTsString }}\n});\n\nconsole.log(chatCompletion.choices[0].message);',
|
|
1764
|
+
"conversationalStream": 'import { InferenceClient } from "@huggingface/inference";\n\nconst client = new InferenceClient("{{ accessToken }}");\n\nlet out = "";\n\nconst stream = await client.chatCompletionStream({\n provider: "{{ provider }}",\n model: "{{ model.id }}",\n{{ inputs.asTsString }}\n});\n\nfor await (const chunk of stream) {\n if (chunk.choices && chunk.choices.length > 0) {\n const newContent = chunk.choices[0].delta.content;\n out += newContent;\n console.log(newContent);\n } \n}',
|
|
1765
|
+
"textToImage": `import { InferenceClient } from "@huggingface/inference";
|
|
1766
|
+
|
|
1767
|
+
const client = new InferenceClient("{{ accessToken }}");
|
|
1768
|
+
|
|
1769
|
+
const image = await client.textToImage({
|
|
1770
|
+
provider: "{{ provider }}",
|
|
1771
|
+
model: "{{ model.id }}",
|
|
1772
|
+
inputs: {{ inputs.asObj.inputs }},
|
|
1773
|
+
parameters: { num_inference_steps: 5 },
|
|
1774
|
+
});
|
|
1775
|
+
/// Use the generated image (it's a Blob)`,
|
|
1776
|
+
"textToVideo": `import { InferenceClient } from "@huggingface/inference";
|
|
1777
|
+
|
|
1778
|
+
const client = new InferenceClient("{{ accessToken }}");
|
|
1779
|
+
|
|
1780
|
+
const image = await client.textToVideo({
|
|
1781
|
+
provider: "{{ provider }}",
|
|
1782
|
+
model: "{{ model.id }}",
|
|
1783
|
+
inputs: {{ inputs.asObj.inputs }},
|
|
1784
|
+
});
|
|
1785
|
+
// Use the generated video (it's a Blob)`
|
|
1786
|
+
},
|
|
1787
|
+
"openai": {
|
|
1788
|
+
"conversational": 'import { OpenAI } from "openai";\n\nconst client = new OpenAI({\n baseURL: "{{ baseUrl }}",\n apiKey: "{{ accessToken }}",\n});\n\nconst chatCompletion = await client.chat.completions.create({\n model: "{{ providerModelId }}",\n{{ inputs.asTsString }}\n});\n\nconsole.log(chatCompletion.choices[0].message);',
|
|
1789
|
+
"conversationalStream": 'import { OpenAI } from "openai";\n\nconst client = new OpenAI({\n baseURL: "{{ baseUrl }}",\n apiKey: "{{ accessToken }}",\n});\n\nlet out = "";\n\nconst stream = await client.chat.completions.create({\n provider: "{{ provider }}",\n model: "{{ model.id }}",\n{{ inputs.asTsString }}\n});\n\nfor await (const chunk of stream) {\n if (chunk.choices && chunk.choices.length > 0) {\n const newContent = chunk.choices[0].delta.content;\n out += newContent;\n console.log(newContent);\n } \n}'
|
|
1790
|
+
}
|
|
1791
|
+
},
|
|
1792
|
+
"python": {
|
|
1793
|
+
"fal_client": {
|
|
1794
|
+
"textToImage": '{% if provider == "fal-ai" %}\nimport fal_client\n\nresult = fal_client.subscribe(\n "{{ providerModelId }}",\n arguments={\n "prompt": {{ inputs.asObj.inputs }},\n },\n)\nprint(result)\n{% endif %} '
|
|
1795
|
+
},
|
|
1796
|
+
"huggingface_hub": {
|
|
1797
|
+
"basic": 'result = client.{{ methodName }}(\n inputs={{ inputs.asObj.inputs }},\n model="{{ model.id }}",\n)',
|
|
1798
|
+
"basicAudio": 'output = client.{{ methodName }}({{ inputs.asObj.inputs }}, model="{{ model.id }}")',
|
|
1799
|
+
"basicImage": 'output = client.{{ methodName }}({{ inputs.asObj.inputs }}, model="{{ model.id }}")',
|
|
1800
|
+
"conversational": 'completion = client.chat.completions.create(\n model="{{ model.id }}",\n{{ inputs.asPythonString }}\n)\n\nprint(completion.choices[0].message) ',
|
|
1801
|
+
"conversationalStream": 'stream = client.chat.completions.create(\n model="{{ model.id }}",\n{{ inputs.asPythonString }}\n stream=True,\n)\n\nfor chunk in stream:\n print(chunk.choices[0].delta.content, end="") ',
|
|
1802
|
+
"documentQuestionAnswering": 'output = client.document_question_answering(\n "{{ inputs.asObj.image }}",\n question="{{ inputs.asObj.question }}",\n model="{{ model.id }}",\n) ',
|
|
1803
|
+
"imageToImage": '# output is a PIL.Image object\nimage = client.image_to_image(\n "{{ inputs.asObj.inputs }}",\n prompt="{{ inputs.asObj.parameters.prompt }}",\n model="{{ model.id }}",\n) ',
|
|
1804
|
+
"importInferenceClient": 'from huggingface_hub import InferenceClient\n\nclient = InferenceClient(\n provider="{{ provider }}",\n api_key="{{ accessToken }}",\n)',
|
|
1805
|
+
"textToImage": '# output is a PIL.Image object\nimage = client.text_to_image(\n {{ inputs.asObj.inputs }},\n model="{{ model.id }}",\n) ',
|
|
1806
|
+
"textToVideo": 'video = client.text_to_video(\n {{ inputs.asObj.inputs }},\n model="{{ model.id }}",\n) '
|
|
1807
|
+
},
|
|
1808
|
+
"openai": {
|
|
1809
|
+
"conversational": 'from openai import OpenAI\n\nclient = OpenAI(\n base_url="{{ baseUrl }}",\n api_key="{{ accessToken }}"\n)\n\ncompletion = client.chat.completions.create(\n model="{{ providerModelId }}",\n{{ inputs.asPythonString }}\n)\n\nprint(completion.choices[0].message) ',
|
|
1810
|
+
"conversationalStream": 'from openai import OpenAI\n\nclient = OpenAI(\n base_url="{{ baseUrl }}",\n api_key="{{ accessToken }}"\n)\n\nstream = client.chat.completions.create(\n model="{{ providerModelId }}",\n{{ inputs.asPythonString }}\n stream=True,\n)\n\nfor chunk in stream:\n print(chunk.choices[0].delta.content, end="")'
|
|
1811
|
+
},
|
|
1812
|
+
"requests": {
|
|
1813
|
+
"basic": 'def query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\noutput = query({\n "inputs": {{ providerInputs.asObj.inputs }},\n}) ',
|
|
1814
|
+
"basicAudio": 'def query(filename):\n with open(filename, "rb") as f:\n data = f.read()\n response = requests.post(API_URL, headers={"Content-Type": "audio/flac", **headers}, data=data)\n return response.json()\n\noutput = query({{ providerInputs.asObj.inputs }})',
|
|
1815
|
+
"basicImage": 'def query(filename):\n with open(filename, "rb") as f:\n data = f.read()\n response = requests.post(API_URL, headers={"Content-Type": "image/jpeg", **headers}, data=data)\n return response.json()\n\noutput = query({{ providerInputs.asObj.inputs }})',
|
|
1816
|
+
"conversational": 'def query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\nresponse = query({\n{{ providerInputs.asJsonString }}\n})\n\nprint(response["choices"][0]["message"])',
|
|
1817
|
+
"conversationalStream": 'def query(payload):\n response = requests.post(API_URL, headers=headers, json=payload, stream=True)\n for line in response.iter_lines():\n if not line.startswith(b"data:"):\n continue\n if line.strip() == b"data: [DONE]":\n return\n yield json.loads(line.decode("utf-8").lstrip("data:").rstrip("/n"))\n\nchunks = query({\n{{ providerInputs.asJsonString }},\n "stream": True,\n})\n\nfor chunk in chunks:\n print(chunk["choices"][0]["delta"]["content"], end="")',
|
|
1818
|
+
"documentQuestionAnswering": 'def query(payload):\n with open(payload["image"], "rb") as f:\n img = f.read()\n payload["image"] = base64.b64encode(img).decode("utf-8")\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\noutput = query({\n "inputs": {\n "image": "{{ inputs.asObj.image }}",\n "question": "{{ inputs.asObj.question }}",\n },\n}) ',
|
|
1819
|
+
"imageToImage": 'def query(payload):\n with open(payload["inputs"], "rb") as f:\n img = f.read()\n payload["inputs"] = base64.b64encode(img).decode("utf-8")\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\nimage_bytes = query({\n{{ providerInputs.asJsonString }}\n})\n\n# You can access the image with PIL.Image for example\nimport io\nfrom PIL import Image\nimage = Image.open(io.BytesIO(image_bytes)) ',
|
|
1820
|
+
"importRequests": '{% if importBase64 %}\nimport base64\n{% endif %}\n{% if importJson %}\nimport json\n{% endif %}\nimport requests\n\nAPI_URL = "{{ fullUrl }}"\nheaders = {"Authorization": "{{ authorizationHeader }}"}',
|
|
1821
|
+
"tabular": 'def query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\nresponse = query({\n "inputs": {\n "data": {{ providerInputs.asObj.inputs }}\n },\n}) ',
|
|
1822
|
+
"textToAudio": '{% if model.library_name == "transformers" %}\ndef query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\naudio_bytes = query({\n "inputs": {{ providerInputs.asObj.inputs }},\n})\n# You can access the audio with IPython.display for example\nfrom IPython.display import Audio\nAudio(audio_bytes)\n{% else %}\ndef query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\naudio, sampling_rate = query({\n "inputs": {{ providerInputs.asObj.inputs }},\n})\n# You can access the audio with IPython.display for example\nfrom IPython.display import Audio\nAudio(audio, rate=sampling_rate)\n{% endif %} ',
|
|
1823
|
+
"textToImage": '{% if provider == "hf-inference" %}\ndef query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\nimage_bytes = query({\n "inputs": {{ providerInputs.asObj.inputs }},\n})\n\n# You can access the image with PIL.Image for example\nimport io\nfrom PIL import Image\nimage = Image.open(io.BytesIO(image_bytes))\n{% endif %}',
|
|
1824
|
+
"zeroShotClassification": 'def query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\noutput = query({\n "inputs": {{ providerInputs.asObj.inputs }},\n "parameters": {"candidate_labels": ["refund", "legal", "faq"]},\n}) ',
|
|
1825
|
+
"zeroShotImageClassification": 'def query(data):\n with open(data["image_path"], "rb") as f:\n img = f.read()\n payload={\n "parameters": data["parameters"],\n "inputs": base64.b64encode(img).decode("utf-8")\n }\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\noutput = query({\n "image_path": {{ providerInputs.asObj.inputs }},\n "parameters": {"candidate_labels": ["cat", "dog", "llama"]},\n}) '
|
|
1826
|
+
}
|
|
1827
|
+
},
|
|
1828
|
+
"sh": {
|
|
1829
|
+
"curl": {
|
|
1830
|
+
"basic": "curl {{ fullUrl }} \\\n -X POST \\\n -H 'Authorization: {{ authorizationHeader }}' \\\n -H 'Content-Type: application/json' \\\n -d '{\n{{ providerInputs.asCurlString }}\n }'",
|
|
1831
|
+
"basicAudio": "curl {{ fullUrl }} \\\n -X POST \\\n -H 'Authorization: {{ authorizationHeader }}' \\\n -H 'Content-Type: audio/flac' \\\n --data-binary @{{ providerInputs.asObj.inputs }}",
|
|
1832
|
+
"basicImage": "curl {{ fullUrl }} \\\n -X POST \\\n -H 'Authorization: {{ authorizationHeader }}' \\\n -H 'Content-Type: image/jpeg' \\\n --data-binary @{{ providerInputs.asObj.inputs }}",
|
|
1833
|
+
"conversational": `curl {{ fullUrl }} \\
|
|
1834
|
+
-H 'Authorization: {{ authorizationHeader }}' \\
|
|
1835
|
+
-H 'Content-Type: application/json' \\
|
|
1836
|
+
-d '{
|
|
1837
|
+
{{ providerInputs.asCurlString }},
|
|
1838
|
+
"stream": false
|
|
1839
|
+
}'`,
|
|
1840
|
+
"conversationalStream": `curl {{ fullUrl }} \\
|
|
1841
|
+
-H 'Authorization: {{ authorizationHeader }}' \\
|
|
1842
|
+
-H 'Content-Type: application/json' \\
|
|
1843
|
+
-d '{
|
|
1844
|
+
{{ providerInputs.asCurlString }},
|
|
1845
|
+
"stream": true
|
|
1846
|
+
}'`,
|
|
1847
|
+
"zeroShotClassification": `curl {{ fullUrl }} \\
|
|
1848
|
+
-X POST \\
|
|
1849
|
+
-d '{"inputs": {{ providerInputs.asObj.inputs }}, "parameters": {"candidate_labels": ["refund", "legal", "faq"]}}' \\
|
|
1850
|
+
-H 'Content-Type: application/json' \\
|
|
1851
|
+
-H 'Authorization: {{ authorizationHeader }}'`
|
|
1852
|
+
}
|
|
1853
|
+
}
|
|
1854
|
+
};
|
|
1855
|
+
|
|
1856
|
+
// src/snippets/getInferenceSnippets.ts
|
|
1680
1857
|
var PYTHON_CLIENTS = ["huggingface_hub", "fal_client", "requests", "openai"];
|
|
1681
1858
|
var JS_CLIENTS = ["fetch", "huggingface.js", "openai"];
|
|
1682
1859
|
var SH_CLIENTS = ["curl"];
|
|
@@ -1685,20 +1862,12 @@ var CLIENTS = {
|
|
|
1685
1862
|
python: [...PYTHON_CLIENTS],
|
|
1686
1863
|
sh: [...SH_CLIENTS]
|
|
1687
1864
|
};
|
|
1688
|
-
var
|
|
1689
|
-
let currentPath = typeof import_meta !== "undefined" && import_meta.url ? import_path.default.normalize(new URL(import_meta.url).pathname) : __dirname;
|
|
1690
|
-
while (currentPath !== "/") {
|
|
1691
|
-
if ((0, import_node_fs.existsSync)(import_path.default.join(currentPath, "package.json"))) {
|
|
1692
|
-
return currentPath;
|
|
1693
|
-
}
|
|
1694
|
-
currentPath = import_path.default.normalize(import_path.default.join(currentPath, ".."));
|
|
1695
|
-
}
|
|
1696
|
-
return "/";
|
|
1697
|
-
};
|
|
1698
|
-
var templatePath = (language, client, templateName) => import_path.default.join(rootDirFinder(), "src", "snippets", "templates", language, client, `${templateName}.jinja`);
|
|
1699
|
-
var hasTemplate = (language, client, templateName) => (0, import_node_fs.existsSync)(templatePath(language, client, templateName));
|
|
1865
|
+
var hasTemplate = (language, client, templateName) => templates[language]?.[client]?.[templateName] !== void 0;
|
|
1700
1866
|
var loadTemplate = (language, client, templateName) => {
|
|
1701
|
-
const template =
|
|
1867
|
+
const template = templates[language]?.[client]?.[templateName];
|
|
1868
|
+
if (!template) {
|
|
1869
|
+
throw new Error(`Template not found: ${language}/${client}/${templateName}`);
|
|
1870
|
+
}
|
|
1702
1871
|
return (data) => new import_jinja.Template(template).render({ ...data });
|
|
1703
1872
|
};
|
|
1704
1873
|
var snippetImportPythonInferenceClient = loadTemplate("python", "huggingface_hub", "importInferenceClient");
|