@huggingface/inference 3.8.0 → 3.8.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.cjs +6 -6
- package/dist/index.js +6 -6
- package/dist/src/snippets/getInferenceSnippets.d.ts +5 -1
- package/dist/src/snippets/getInferenceSnippets.d.ts.map +1 -1
- package/dist/src/snippets/index.d.ts +1 -1
- package/dist/src/snippets/index.d.ts.map +1 -1
- package/package.json +2 -2
- package/src/snippets/getInferenceSnippets.ts +7 -8
- package/src/snippets/index.ts +1 -1
package/dist/index.cjs
CHANGED
|
@@ -1203,7 +1203,7 @@ function getProviderHelper(provider, task) {
|
|
|
1203
1203
|
|
|
1204
1204
|
// package.json
|
|
1205
1205
|
var name = "@huggingface/inference";
|
|
1206
|
-
var version = "3.8.
|
|
1206
|
+
var version = "3.8.1";
|
|
1207
1207
|
|
|
1208
1208
|
// src/providers/consts.ts
|
|
1209
1209
|
var HARDCODED_MODEL_INFERENCE_MAPPING = {
|
|
@@ -2346,7 +2346,7 @@ var HF_JS_METHODS = {
|
|
|
2346
2346
|
translation: "translation"
|
|
2347
2347
|
};
|
|
2348
2348
|
var snippetGenerator = (templateName, inputPreparationFn) => {
|
|
2349
|
-
return (model, accessToken, provider, inferenceProviderMapping,
|
|
2349
|
+
return (model, accessToken, provider, inferenceProviderMapping, opts) => {
|
|
2350
2350
|
const providerModelId = inferenceProviderMapping?.providerId ?? model.id;
|
|
2351
2351
|
let task = model.pipeline_tag;
|
|
2352
2352
|
if (model.pipeline_tag && ["text-generation", "image-text-to-text"].includes(model.pipeline_tag) && model.tags.includes("conversational")) {
|
|
@@ -2373,7 +2373,7 @@ var snippetGenerator = (templateName, inputPreparationFn) => {
|
|
|
2373
2373
|
inferenceProviderMapping,
|
|
2374
2374
|
{
|
|
2375
2375
|
task,
|
|
2376
|
-
billTo
|
|
2376
|
+
billTo: opts?.billTo
|
|
2377
2377
|
}
|
|
2378
2378
|
);
|
|
2379
2379
|
let providerInputs = inputs;
|
|
@@ -2407,7 +2407,7 @@ var snippetGenerator = (templateName, inputPreparationFn) => {
|
|
|
2407
2407
|
model,
|
|
2408
2408
|
provider,
|
|
2409
2409
|
providerModelId: providerModelId ?? model.id,
|
|
2410
|
-
billTo
|
|
2410
|
+
billTo: opts?.billTo
|
|
2411
2411
|
};
|
|
2412
2412
|
return import_tasks.inferenceSnippetLanguages.map((language) => {
|
|
2413
2413
|
return CLIENTS[language].map((client) => {
|
|
@@ -2497,8 +2497,8 @@ var snippets = {
|
|
|
2497
2497
|
"zero-shot-classification": snippetGenerator("zeroShotClassification"),
|
|
2498
2498
|
"zero-shot-image-classification": snippetGenerator("zeroShotImageClassification")
|
|
2499
2499
|
};
|
|
2500
|
-
function getInferenceSnippets(model, accessToken, provider, inferenceProviderMapping,
|
|
2501
|
-
return model.pipeline_tag && model.pipeline_tag in snippets ? snippets[model.pipeline_tag]?.(model, accessToken, provider, inferenceProviderMapping,
|
|
2500
|
+
function getInferenceSnippets(model, accessToken, provider, inferenceProviderMapping, opts) {
|
|
2501
|
+
return model.pipeline_tag && model.pipeline_tag in snippets ? snippets[model.pipeline_tag]?.(model, accessToken, provider, inferenceProviderMapping, opts) ?? [] : [];
|
|
2502
2502
|
}
|
|
2503
2503
|
function formatBody(obj, format) {
|
|
2504
2504
|
switch (format) {
|
package/dist/index.js
CHANGED
|
@@ -1146,7 +1146,7 @@ function getProviderHelper(provider, task) {
|
|
|
1146
1146
|
|
|
1147
1147
|
// package.json
|
|
1148
1148
|
var name = "@huggingface/inference";
|
|
1149
|
-
var version = "3.8.
|
|
1149
|
+
var version = "3.8.1";
|
|
1150
1150
|
|
|
1151
1151
|
// src/providers/consts.ts
|
|
1152
1152
|
var HARDCODED_MODEL_INFERENCE_MAPPING = {
|
|
@@ -2292,7 +2292,7 @@ var HF_JS_METHODS = {
|
|
|
2292
2292
|
translation: "translation"
|
|
2293
2293
|
};
|
|
2294
2294
|
var snippetGenerator = (templateName, inputPreparationFn) => {
|
|
2295
|
-
return (model, accessToken, provider, inferenceProviderMapping,
|
|
2295
|
+
return (model, accessToken, provider, inferenceProviderMapping, opts) => {
|
|
2296
2296
|
const providerModelId = inferenceProviderMapping?.providerId ?? model.id;
|
|
2297
2297
|
let task = model.pipeline_tag;
|
|
2298
2298
|
if (model.pipeline_tag && ["text-generation", "image-text-to-text"].includes(model.pipeline_tag) && model.tags.includes("conversational")) {
|
|
@@ -2319,7 +2319,7 @@ var snippetGenerator = (templateName, inputPreparationFn) => {
|
|
|
2319
2319
|
inferenceProviderMapping,
|
|
2320
2320
|
{
|
|
2321
2321
|
task,
|
|
2322
|
-
billTo
|
|
2322
|
+
billTo: opts?.billTo
|
|
2323
2323
|
}
|
|
2324
2324
|
);
|
|
2325
2325
|
let providerInputs = inputs;
|
|
@@ -2353,7 +2353,7 @@ var snippetGenerator = (templateName, inputPreparationFn) => {
|
|
|
2353
2353
|
model,
|
|
2354
2354
|
provider,
|
|
2355
2355
|
providerModelId: providerModelId ?? model.id,
|
|
2356
|
-
billTo
|
|
2356
|
+
billTo: opts?.billTo
|
|
2357
2357
|
};
|
|
2358
2358
|
return inferenceSnippetLanguages.map((language) => {
|
|
2359
2359
|
return CLIENTS[language].map((client) => {
|
|
@@ -2443,8 +2443,8 @@ var snippets = {
|
|
|
2443
2443
|
"zero-shot-classification": snippetGenerator("zeroShotClassification"),
|
|
2444
2444
|
"zero-shot-image-classification": snippetGenerator("zeroShotImageClassification")
|
|
2445
2445
|
};
|
|
2446
|
-
function getInferenceSnippets(model, accessToken, provider, inferenceProviderMapping,
|
|
2447
|
-
return model.pipeline_tag && model.pipeline_tag in snippets ? snippets[model.pipeline_tag]?.(model, accessToken, provider, inferenceProviderMapping,
|
|
2446
|
+
function getInferenceSnippets(model, accessToken, provider, inferenceProviderMapping, opts) {
|
|
2447
|
+
return model.pipeline_tag && model.pipeline_tag in snippets ? snippets[model.pipeline_tag]?.(model, accessToken, provider, inferenceProviderMapping, opts) ?? [] : [];
|
|
2448
2448
|
}
|
|
2449
2449
|
function formatBody(obj, format) {
|
|
2450
2450
|
switch (format) {
|
|
@@ -1,5 +1,9 @@
|
|
|
1
1
|
import { type InferenceSnippet, type ModelDataMinimal } from "@huggingface/tasks";
|
|
2
2
|
import type { InferenceProvider } from "../types";
|
|
3
3
|
import type { InferenceProviderModelMapping } from "../lib/getInferenceProviderMapping";
|
|
4
|
-
export
|
|
4
|
+
export type InferenceSnippetOptions = {
|
|
5
|
+
streaming?: boolean;
|
|
6
|
+
billTo?: string;
|
|
7
|
+
} & Record<string, unknown>;
|
|
8
|
+
export declare function getInferenceSnippets(model: ModelDataMinimal, accessToken: string, provider: InferenceProvider, inferenceProviderMapping?: InferenceProviderModelMapping, opts?: Record<string, unknown>): InferenceSnippet[];
|
|
5
9
|
//# sourceMappingURL=getInferenceSnippets.d.ts.map
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"getInferenceSnippets.d.ts","sourceRoot":"","sources":["../../../src/snippets/getInferenceSnippets.ts"],"names":[],"mappings":"AACA,OAAO,EACN,KAAK,gBAAgB,EAErB,KAAK,gBAAgB,EAGrB,MAAM,oBAAoB,CAAC;AAI5B,OAAO,KAAK,EAAE,iBAAiB,EAA8B,MAAM,UAAU,CAAC;AAE9E,OAAO,KAAK,EAAE,6BAA6B,EAAE,MAAM,oCAAoC,CAAC;
|
|
1
|
+
{"version":3,"file":"getInferenceSnippets.d.ts","sourceRoot":"","sources":["../../../src/snippets/getInferenceSnippets.ts"],"names":[],"mappings":"AACA,OAAO,EACN,KAAK,gBAAgB,EAErB,KAAK,gBAAgB,EAGrB,MAAM,oBAAoB,CAAC;AAI5B,OAAO,KAAK,EAAE,iBAAiB,EAA8B,MAAM,UAAU,CAAC;AAE9E,OAAO,KAAK,EAAE,6BAA6B,EAAE,MAAM,oCAAoC,CAAC;AAGxF,MAAM,MAAM,uBAAuB,GAAG;IAAE,SAAS,CAAC,EAAE,OAAO,CAAC;IAAC,MAAM,CAAC,EAAE,MAAM,CAAA;CAAE,GAAG,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;AAiTzG,wBAAgB,oBAAoB,CACnC,KAAK,EAAE,gBAAgB,EACvB,WAAW,EAAE,MAAM,EACnB,QAAQ,EAAE,iBAAiB,EAC3B,wBAAwB,CAAC,EAAE,6BAA6B,EACxD,IAAI,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,GAC5B,gBAAgB,EAAE,CAIpB"}
|
|
@@ -1,2 +1,2 @@
|
|
|
1
|
-
export { getInferenceSnippets } from "./getInferenceSnippets.js";
|
|
1
|
+
export { getInferenceSnippets, type InferenceSnippetOptions } from "./getInferenceSnippets.js";
|
|
2
2
|
//# sourceMappingURL=index.d.ts.map
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../../src/snippets/index.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,oBAAoB,EAAE,MAAM,2BAA2B,CAAC"}
|
|
1
|
+
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../../src/snippets/index.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,oBAAoB,EAAE,KAAK,uBAAuB,EAAE,MAAM,2BAA2B,CAAC"}
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@huggingface/inference",
|
|
3
|
-
"version": "3.8.
|
|
3
|
+
"version": "3.8.1",
|
|
4
4
|
"packageManager": "pnpm@8.10.5",
|
|
5
5
|
"license": "MIT",
|
|
6
6
|
"author": "Hugging Face and Tim Mikeladze <tim.mikeladze@gmail.com>",
|
|
@@ -40,7 +40,7 @@
|
|
|
40
40
|
},
|
|
41
41
|
"type": "module",
|
|
42
42
|
"dependencies": {
|
|
43
|
-
"@huggingface/tasks": "^0.18.
|
|
43
|
+
"@huggingface/tasks": "^0.18.8",
|
|
44
44
|
"@huggingface/jinja": "^0.3.4"
|
|
45
45
|
},
|
|
46
46
|
"devDependencies": {
|
|
@@ -14,6 +14,8 @@ import { templates } from "./templates.exported";
|
|
|
14
14
|
import type { InferenceProviderModelMapping } from "../lib/getInferenceProviderMapping";
|
|
15
15
|
import { getProviderHelper } from "../lib/getProviderHelper";
|
|
16
16
|
|
|
17
|
+
export type InferenceSnippetOptions = { streaming?: boolean; billTo?: string } & Record<string, unknown>;
|
|
18
|
+
|
|
17
19
|
const PYTHON_CLIENTS = ["huggingface_hub", "fal_client", "requests", "openai"] as const;
|
|
18
20
|
const JS_CLIENTS = ["fetch", "huggingface.js", "openai"] as const;
|
|
19
21
|
const SH_CLIENTS = ["curl"] as const;
|
|
@@ -120,8 +122,7 @@ const snippetGenerator = (templateName: string, inputPreparationFn?: InputPrepar
|
|
|
120
122
|
accessToken: string,
|
|
121
123
|
provider: InferenceProvider,
|
|
122
124
|
inferenceProviderMapping?: InferenceProviderModelMapping,
|
|
123
|
-
|
|
124
|
-
opts?: Record<string, unknown>
|
|
125
|
+
opts?: InferenceSnippetOptions
|
|
125
126
|
): InferenceSnippet[] => {
|
|
126
127
|
const providerModelId = inferenceProviderMapping?.providerId ?? model.id;
|
|
127
128
|
/// Hacky: hard-code conversational templates here
|
|
@@ -155,7 +156,7 @@ const snippetGenerator = (templateName: string, inputPreparationFn?: InputPrepar
|
|
|
155
156
|
inferenceProviderMapping,
|
|
156
157
|
{
|
|
157
158
|
task,
|
|
158
|
-
billTo,
|
|
159
|
+
billTo: opts?.billTo,
|
|
159
160
|
}
|
|
160
161
|
);
|
|
161
162
|
|
|
@@ -194,7 +195,7 @@ const snippetGenerator = (templateName: string, inputPreparationFn?: InputPrepar
|
|
|
194
195
|
model,
|
|
195
196
|
provider,
|
|
196
197
|
providerModelId: providerModelId ?? model.id,
|
|
197
|
-
billTo,
|
|
198
|
+
billTo: opts?.billTo,
|
|
198
199
|
};
|
|
199
200
|
|
|
200
201
|
/// Iterate over clients => check if a snippet exists => generate
|
|
@@ -283,8 +284,7 @@ const snippets: Partial<
|
|
|
283
284
|
accessToken: string,
|
|
284
285
|
provider: InferenceProvider,
|
|
285
286
|
inferenceProviderMapping?: InferenceProviderModelMapping,
|
|
286
|
-
|
|
287
|
-
opts?: Record<string, unknown>
|
|
287
|
+
opts?: InferenceSnippetOptions
|
|
288
288
|
) => InferenceSnippet[]
|
|
289
289
|
>
|
|
290
290
|
> = {
|
|
@@ -324,11 +324,10 @@ export function getInferenceSnippets(
|
|
|
324
324
|
accessToken: string,
|
|
325
325
|
provider: InferenceProvider,
|
|
326
326
|
inferenceProviderMapping?: InferenceProviderModelMapping,
|
|
327
|
-
billTo?: string,
|
|
328
327
|
opts?: Record<string, unknown>
|
|
329
328
|
): InferenceSnippet[] {
|
|
330
329
|
return model.pipeline_tag && model.pipeline_tag in snippets
|
|
331
|
-
? snippets[model.pipeline_tag]?.(model, accessToken, provider, inferenceProviderMapping,
|
|
330
|
+
? snippets[model.pipeline_tag]?.(model, accessToken, provider, inferenceProviderMapping, opts) ?? []
|
|
332
331
|
: [];
|
|
333
332
|
}
|
|
334
333
|
|
package/src/snippets/index.ts
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
export { getInferenceSnippets } from "./getInferenceSnippets.js";
|
|
1
|
+
export { getInferenceSnippets, type InferenceSnippetOptions } from "./getInferenceSnippets.js";
|