@huggingface/tasks 0.14.0 → 0.15.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/commonjs/index.d.ts +1 -0
- package/dist/commonjs/index.d.ts.map +1 -1
- package/dist/commonjs/index.js +1 -0
- package/dist/commonjs/inference-providers.d.ts +10 -0
- package/dist/commonjs/inference-providers.d.ts.map +1 -0
- package/dist/commonjs/inference-providers.js +16 -0
- package/dist/commonjs/model-libraries-snippets.js +1 -1
- package/dist/commonjs/snippets/curl.d.ts +8 -8
- package/dist/commonjs/snippets/curl.d.ts.map +1 -1
- package/dist/commonjs/snippets/curl.js +58 -30
- package/dist/commonjs/snippets/js.d.ts +11 -10
- package/dist/commonjs/snippets/js.d.ts.map +1 -1
- package/dist/commonjs/snippets/js.js +162 -53
- package/dist/commonjs/snippets/python.d.ts +12 -12
- package/dist/commonjs/snippets/python.d.ts.map +1 -1
- package/dist/commonjs/snippets/python.js +141 -71
- package/dist/commonjs/snippets/types.d.ts +1 -1
- package/dist/commonjs/snippets/types.d.ts.map +1 -1
- package/dist/esm/index.d.ts +1 -0
- package/dist/esm/index.d.ts.map +1 -1
- package/dist/esm/index.js +1 -0
- package/dist/esm/inference-providers.d.ts +10 -0
- package/dist/esm/inference-providers.d.ts.map +1 -0
- package/dist/esm/inference-providers.js +12 -0
- package/dist/esm/model-libraries-snippets.js +1 -1
- package/dist/esm/snippets/curl.d.ts +8 -8
- package/dist/esm/snippets/curl.d.ts.map +1 -1
- package/dist/esm/snippets/curl.js +58 -29
- package/dist/esm/snippets/js.d.ts +11 -10
- package/dist/esm/snippets/js.d.ts.map +1 -1
- package/dist/esm/snippets/js.js +159 -50
- package/dist/esm/snippets/python.d.ts +12 -12
- package/dist/esm/snippets/python.d.ts.map +1 -1
- package/dist/esm/snippets/python.js +140 -69
- package/dist/esm/snippets/types.d.ts +1 -1
- package/dist/esm/snippets/types.d.ts.map +1 -1
- package/package.json +1 -1
- package/src/index.ts +2 -0
- package/src/inference-providers.ts +16 -0
- package/src/model-libraries-snippets.ts +1 -1
- package/src/snippets/curl.ts +72 -23
- package/src/snippets/js.ts +189 -56
- package/src/snippets/python.ts +154 -75
- package/src/snippets/types.ts +1 -1
package/dist/commonjs/index.d.ts
CHANGED
|
@@ -19,4 +19,5 @@ export { LOCAL_APPS } from "./local-apps.js";
|
|
|
19
19
|
export type { LocalApp, LocalAppKey, LocalAppSnippet } from "./local-apps.js";
|
|
20
20
|
export { DATASET_LIBRARIES_UI_ELEMENTS } from "./dataset-libraries.js";
|
|
21
21
|
export type { DatasetLibraryUiElement, DatasetLibraryKey } from "./dataset-libraries.js";
|
|
22
|
+
export * from "./inference-providers.js";
|
|
22
23
|
//# sourceMappingURL=index.d.ts.map
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../src/index.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,oBAAoB,EAAE,MAAM,uBAAuB,CAAC;AAC7D,OAAO,EAAE,sBAAsB,EAAE,MAAM,4BAA4B,CAAC;AACpE,YAAY,EAAE,QAAQ,EAAE,QAAQ,EAAE,aAAa,EAAE,WAAW,EAAE,MAAM,kBAAkB,CAAC;AACvF,cAAc,kBAAkB,CAAC;AACjC,OAAO,EACN,aAAa,EACb,cAAc,EACd,KAAK,UAAU,EACf,KAAK,YAAY,EACjB,KAAK,YAAY,EACjB,KAAK,QAAQ,EACb,UAAU,EACV,eAAe,EACf,aAAa,EACb,kBAAkB,GAClB,MAAM,gBAAgB,CAAC;AACxB,OAAO,EACN,8BAA8B,EAC9B,sBAAsB,EACtB,2BAA2B,GAC3B,MAAM,sBAAsB,CAAC;AAC9B,YAAY,EAAE,gBAAgB,EAAE,eAAe,EAAE,MAAM,sBAAsB,CAAC;AAC9E,YAAY,EAAE,SAAS,EAAE,gBAAgB,EAAE,MAAM,iBAAiB,CAAC;AACnE,YAAY,EAAE,UAAU,EAAE,gBAAgB,EAAE,eAAe,EAAE,MAAM,qBAAqB,CAAC;AACzF,YAAY,EACX,aAAa,EACb,sBAAsB,EACtB,gCAAgC,EAChC,8BAA8B,EAC9B,kCAAkC,EAClC,uBAAuB,EACvB,sBAAsB,EACtB,oCAAoC,EACpC,gCAAgC,EAChC,2BAA2B,EAC3B,gCAAgC,EAChC,8BAA8B,EAC9B,sBAAsB,EACtB,8BAA8B,EAC9B,mBAAmB,EACnB,sBAAsB,EACtB,yBAAyB,EACzB,8BAA8B,EAC9B,uBAAuB,GACvB,MAAM,qBAAqB,CAAC;AAC7B,OAAO,EAAE,yBAAyB,EAAE,MAAM,qBAAqB,CAAC;AAEhE,OAAO,KAAK,QAAQ,MAAM,qBAAqB,CAAC;AAChD,cAAc,WAAW,CAAC;AAE1B,OAAO,EAAE,QAAQ,EAAE,CAAC;AACpB,YAAY,EAAE,gBAAgB,EAAE,MAAM,qBAAqB,CAAC;AAE5D,OAAO,EAAE,IAAI,EAAE,sBAAsB,EAAE,MAAM,eAAe,CAAC;AAC7D,YAAY,EAAE,YAAY,EAAE,OAAO,EAAE,MAAM,eAAe,CAAC;AAC3D,OAAO,EAAE,UAAU,EAAE,MAAM,iBAAiB,CAAC;AAC7C,YAAY,EAAE,QAAQ,EAAE,WAAW,EAAE,eAAe,EAAE,MAAM,iBAAiB,CAAC;AAE9E,OAAO,EAAE,6BAA6B,EAAE,MAAM,wBAAwB,CAAC;AACvE,YAAY,EAAE,uBAAuB,EAAE,iBAAiB,EAAE,MAAM,wBAAwB,CAAC"}
|
|
1
|
+
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../src/index.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,oBAAoB,EAAE,MAAM,uBAAuB,CAAC;AAC7D,OAAO,EAAE,sBAAsB,EAAE,MAAM,4BAA4B,CAAC;AACpE,YAAY,EAAE,QAAQ,EAAE,QAAQ,EAAE,aAAa,EAAE,WAAW,EAAE,MAAM,kBAAkB,CAAC;AACvF,cAAc,kBAAkB,CAAC;AACjC,OAAO,EACN,aAAa,EACb,cAAc,EACd,KAAK,UAAU,EACf,KAAK,YAAY,EACjB,KAAK,YAAY,EACjB,KAAK,QAAQ,EACb,UAAU,EACV,eAAe,EACf,aAAa,EACb,kBAAkB,GAClB,MAAM,gBAAgB,CAAC;AACxB,OAAO,EACN,8BAA8B,EAC9B,sBAAsB,EACtB,2BAA2B,GAC3B,MAAM,sBAAsB,CAAC;AAC9B,YAAY,EAAE,gBAAgB,EAAE,eAAe,EAAE,MAAM,sBAAsB,CAAC;AAC9E,YAAY,EAAE,SAAS,EAAE,gBAAgB,EAAE,MAAM,iBAAiB,CAAC;AACnE,YAAY,EAAE,UAAU,EAAE,gBAAgB,EAAE,eAAe,EAAE,MAAM,qBAAqB,CAAC;AACzF,YAAY,EACX,aAAa,EACb,sBAAsB,EACtB,gCAAgC,EAChC,8BAA8B,EAC9B,kCAAkC,EAClC,uBAAuB,EACvB,sBAAsB,EACtB,oCAAoC,EACpC,gCAAgC,EAChC,2BAA2B,EAC3B,gCAAgC,EAChC,8BAA8B,EAC9B,sBAAsB,EACtB,8BAA8B,EAC9B,mBAAmB,EACnB,sBAAsB,EACtB,yBAAyB,EACzB,8BAA8B,EAC9B,uBAAuB,GACvB,MAAM,qBAAqB,CAAC;AAC7B,OAAO,EAAE,yBAAyB,EAAE,MAAM,qBAAqB,CAAC;AAEhE,OAAO,KAAK,QAAQ,MAAM,qBAAqB,CAAC;AAChD,cAAc,WAAW,CAAC;AAE1B,OAAO,EAAE,QAAQ,EAAE,CAAC;AACpB,YAAY,EAAE,gBAAgB,EAAE,MAAM,qBAAqB,CAAC;AAE5D,OAAO,EAAE,IAAI,EAAE,sBAAsB,EAAE,MAAM,eAAe,CAAC;AAC7D,YAAY,EAAE,YAAY,EAAE,OAAO,EAAE,MAAM,eAAe,CAAC;AAC3D,OAAO,EAAE,UAAU,EAAE,MAAM,iBAAiB,CAAC;AAC7C,YAAY,EAAE,QAAQ,EAAE,WAAW,EAAE,eAAe,EAAE,MAAM,iBAAiB,CAAC;AAE9E,OAAO,EAAE,6BAA6B,EAAE,MAAM,wBAAwB,CAAC;AACvE,YAAY,EAAE,uBAAuB,EAAE,iBAAiB,EAAE,MAAM,wBAAwB,CAAC;AAEzF,cAAc,0BAA0B,CAAC"}
|
package/dist/commonjs/index.js
CHANGED
|
@@ -55,3 +55,4 @@ var local_apps_js_1 = require("./local-apps.js");
|
|
|
55
55
|
Object.defineProperty(exports, "LOCAL_APPS", { enumerable: true, get: function () { return local_apps_js_1.LOCAL_APPS; } });
|
|
56
56
|
var dataset_libraries_js_1 = require("./dataset-libraries.js");
|
|
57
57
|
Object.defineProperty(exports, "DATASET_LIBRARIES_UI_ELEMENTS", { enumerable: true, get: function () { return dataset_libraries_js_1.DATASET_LIBRARIES_UI_ELEMENTS; } });
|
|
58
|
+
__exportStar(require("./inference-providers.js"), exports);
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
export declare const INFERENCE_PROVIDERS: readonly ["hf-inference", "fal-ai", "replicate", "sambanova", "together"];
|
|
2
|
+
export type InferenceProvider = (typeof INFERENCE_PROVIDERS)[number];
|
|
3
|
+
export declare const HF_HUB_INFERENCE_PROXY_TEMPLATE = "https://huggingface.co/api/inference-proxy/{{PROVIDER}}";
|
|
4
|
+
/**
|
|
5
|
+
* URL to set as baseUrl in the OpenAI SDK.
|
|
6
|
+
*
|
|
7
|
+
* TODO(Expose this from HfInference in the future?)
|
|
8
|
+
*/
|
|
9
|
+
export declare function openAIbaseUrl(provider: InferenceProvider): string;
|
|
10
|
+
//# sourceMappingURL=inference-providers.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"inference-providers.d.ts","sourceRoot":"","sources":["../../src/inference-providers.ts"],"names":[],"mappings":"AAAA,eAAO,MAAM,mBAAmB,2EAA4E,CAAC;AAE7G,MAAM,MAAM,iBAAiB,GAAG,CAAC,OAAO,mBAAmB,CAAC,CAAC,MAAM,CAAC,CAAC;AAErE,eAAO,MAAM,+BAA+B,4DAA4D,CAAC;AAEzG;;;;GAIG;AACH,wBAAgB,aAAa,CAAC,QAAQ,EAAE,iBAAiB,GAAG,MAAM,CAIjE"}
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.HF_HUB_INFERENCE_PROXY_TEMPLATE = exports.INFERENCE_PROVIDERS = void 0;
|
|
4
|
+
exports.openAIbaseUrl = openAIbaseUrl;
|
|
5
|
+
exports.INFERENCE_PROVIDERS = ["hf-inference", "fal-ai", "replicate", "sambanova", "together"];
|
|
6
|
+
exports.HF_HUB_INFERENCE_PROXY_TEMPLATE = `https://huggingface.co/api/inference-proxy/{{PROVIDER}}`;
|
|
7
|
+
/**
|
|
8
|
+
* URL to set as baseUrl in the OpenAI SDK.
|
|
9
|
+
*
|
|
10
|
+
* TODO(Expose this from HfInference in the future?)
|
|
11
|
+
*/
|
|
12
|
+
function openAIbaseUrl(provider) {
|
|
13
|
+
return provider === "hf-inference"
|
|
14
|
+
? "https://api-inference.huggingface.co/v1/"
|
|
15
|
+
: exports.HF_HUB_INFERENCE_PROXY_TEMPLATE.replace("{{PROVIDER}}", provider);
|
|
16
|
+
}
|
|
@@ -499,7 +499,7 @@ const keras_hub = (model) => {
|
|
|
499
499
|
}
|
|
500
500
|
}
|
|
501
501
|
// Then, add remaining tasks
|
|
502
|
-
for (const task
|
|
502
|
+
for (const task of tasks) {
|
|
503
503
|
if (!Object.keys(_keras_hub_tasks_with_example).includes(task)) {
|
|
504
504
|
snippets.push(_keras_hub_task_without_example(task, modelId));
|
|
505
505
|
}
|
|
@@ -1,17 +1,17 @@
|
|
|
1
|
+
import { type InferenceProvider } from "../inference-providers.js";
|
|
1
2
|
import type { PipelineType } from "../pipelines.js";
|
|
2
3
|
import type { ChatCompletionInputMessage, GenerationParameters } from "../tasks/index.js";
|
|
3
4
|
import type { InferenceSnippet, ModelDataMinimal } from "./types.js";
|
|
4
|
-
export declare const snippetBasic: (model: ModelDataMinimal, accessToken: string) => InferenceSnippet;
|
|
5
|
-
export declare const snippetTextGeneration: (model: ModelDataMinimal, accessToken: string, opts?: {
|
|
5
|
+
export declare const snippetBasic: (model: ModelDataMinimal, accessToken: string, provider: InferenceProvider) => InferenceSnippet[];
|
|
6
|
+
export declare const snippetTextGeneration: (model: ModelDataMinimal, accessToken: string, provider: InferenceProvider, opts?: {
|
|
6
7
|
streaming?: boolean;
|
|
7
8
|
messages?: ChatCompletionInputMessage[];
|
|
8
9
|
temperature?: GenerationParameters["temperature"];
|
|
9
10
|
max_tokens?: GenerationParameters["max_tokens"];
|
|
10
11
|
top_p?: GenerationParameters["top_p"];
|
|
11
|
-
}) => InferenceSnippet;
|
|
12
|
-
export declare const snippetZeroShotClassification: (model: ModelDataMinimal, accessToken: string) => InferenceSnippet;
|
|
13
|
-
export declare const snippetFile: (model: ModelDataMinimal, accessToken: string) => InferenceSnippet;
|
|
14
|
-
export declare const curlSnippets: Partial<Record<PipelineType, (model: ModelDataMinimal, accessToken: string, opts?: Record<string, unknown>) => InferenceSnippet>>;
|
|
15
|
-
export declare function getCurlInferenceSnippet(model: ModelDataMinimal, accessToken: string, opts?: Record<string, unknown>): InferenceSnippet;
|
|
16
|
-
export declare function hasCurlInferenceSnippet(model: Pick<ModelDataMinimal, "pipeline_tag">): boolean;
|
|
12
|
+
}) => InferenceSnippet[];
|
|
13
|
+
export declare const snippetZeroShotClassification: (model: ModelDataMinimal, accessToken: string, provider: InferenceProvider) => InferenceSnippet[];
|
|
14
|
+
export declare const snippetFile: (model: ModelDataMinimal, accessToken: string, provider: InferenceProvider) => InferenceSnippet[];
|
|
15
|
+
export declare const curlSnippets: Partial<Record<PipelineType, (model: ModelDataMinimal, accessToken: string, provider: InferenceProvider, opts?: Record<string, unknown>) => InferenceSnippet[]>>;
|
|
16
|
+
export declare function getCurlInferenceSnippet(model: ModelDataMinimal, accessToken: string, provider: InferenceProvider, opts?: Record<string, unknown>): InferenceSnippet[];
|
|
17
17
|
//# sourceMappingURL=curl.d.ts.map
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"curl.d.ts","sourceRoot":"","sources":["../../../src/snippets/curl.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,iBAAiB,CAAC;AACpD,OAAO,KAAK,EAAE,0BAA0B,EAAE,oBAAoB,EAAE,MAAM,mBAAmB,CAAC;AAG1F,OAAO,KAAK,EAAE,gBAAgB,EAAE,gBAAgB,EAAE,MAAM,YAAY,CAAC;AAErE,eAAO,MAAM,YAAY,
|
|
1
|
+
{"version":3,"file":"curl.d.ts","sourceRoot":"","sources":["../../../src/snippets/curl.ts"],"names":[],"mappings":"AAAA,OAAO,EAAmC,KAAK,iBAAiB,EAAE,MAAM,2BAA2B,CAAC;AACpG,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,iBAAiB,CAAC;AACpD,OAAO,KAAK,EAAE,0BAA0B,EAAE,oBAAoB,EAAE,MAAM,mBAAmB,CAAC;AAG1F,OAAO,KAAK,EAAE,gBAAgB,EAAE,gBAAgB,EAAE,MAAM,YAAY,CAAC;AAErE,eAAO,MAAM,YAAY,UACjB,gBAAgB,eACV,MAAM,YACT,iBAAiB,KACzB,gBAAgB,EAelB,CAAC;AAEF,eAAO,MAAM,qBAAqB,UAC1B,gBAAgB,eACV,MAAM,YACT,iBAAiB,SACpB;IACN,SAAS,CAAC,EAAE,OAAO,CAAC;IACpB,QAAQ,CAAC,EAAE,0BAA0B,EAAE,CAAC;IACxC,WAAW,CAAC,EAAE,oBAAoB,CAAC,aAAa,CAAC,CAAC;IAClD,UAAU,CAAC,EAAE,oBAAoB,CAAC,YAAY,CAAC,CAAC;IAChD,KAAK,CAAC,EAAE,oBAAoB,CAAC,OAAO,CAAC,CAAC;CACtC,KACC,gBAAgB,EA0ClB,CAAC;AAEF,eAAO,MAAM,6BAA6B,UAClC,gBAAgB,eACV,MAAM,YACT,iBAAiB,KACzB,gBAAgB,EAclB,CAAC;AAEF,eAAO,MAAM,WAAW,UAChB,gBAAgB,eACV,MAAM,YACT,iBAAiB,KACzB,gBAAgB,EAalB,CAAC;AAEF,eAAO,MAAM,YAAY,EAAE,OAAO,CACjC,MAAM,CACL,YAAY,EACZ,CACC,KAAK,EAAE,gBAAgB,EACvB,WAAW,EAAE,MAAM,EACnB,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,KAC1B,gBAAgB,EAAE,CACvB,CA0BD,CAAC;AAEF,wBAAgB,uBAAuB,CACtC,KAAK,EAAE,gBAAgB,EACvB,WAAW,EAAE,MAAM,EACnB,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,GAC5B,gBAAgB,EAAE,CAIpB"}
|
|
@@ -2,19 +2,31 @@
|
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
3
|
exports.curlSnippets = exports.snippetFile = exports.snippetZeroShotClassification = exports.snippetTextGeneration = exports.snippetBasic = void 0;
|
|
4
4
|
exports.getCurlInferenceSnippet = getCurlInferenceSnippet;
|
|
5
|
-
|
|
5
|
+
const inference_providers_js_1 = require("../inference-providers.js");
|
|
6
6
|
const common_js_1 = require("./common.js");
|
|
7
7
|
const inputs_js_1 = require("./inputs.js");
|
|
8
|
-
const snippetBasic = (model, accessToken) =>
|
|
9
|
-
|
|
8
|
+
const snippetBasic = (model, accessToken, provider) => {
|
|
9
|
+
if (provider !== "hf-inference") {
|
|
10
|
+
return [];
|
|
11
|
+
}
|
|
12
|
+
return [
|
|
13
|
+
{
|
|
14
|
+
client: "curl",
|
|
15
|
+
content: `\
|
|
16
|
+
curl https://api-inference.huggingface.co/models/${model.id} \\
|
|
10
17
|
-X POST \\
|
|
11
18
|
-d '{"inputs": ${(0, inputs_js_1.getModelInputSnippet)(model, true)}}' \\
|
|
12
19
|
-H 'Content-Type: application/json' \\
|
|
13
20
|
-H 'Authorization: Bearer ${accessToken || `{API_TOKEN}`}'`,
|
|
14
|
-
}
|
|
21
|
+
},
|
|
22
|
+
];
|
|
23
|
+
};
|
|
15
24
|
exports.snippetBasic = snippetBasic;
|
|
16
|
-
const snippetTextGeneration = (model, accessToken, opts) => {
|
|
25
|
+
const snippetTextGeneration = (model, accessToken, provider, opts) => {
|
|
17
26
|
if (model.tags.includes("conversational")) {
|
|
27
|
+
const baseUrl = provider === "hf-inference"
|
|
28
|
+
? `https://api-inference.huggingface.co/models/${model.id}/v1/chat/completions`
|
|
29
|
+
: inference_providers_js_1.HF_HUB_INFERENCE_PROXY_TEMPLATE.replace("{{PROVIDER}}", provider) + "/v1/chat/completions";
|
|
18
30
|
// Conversational model detected, so we display a code snippet that features the Messages API
|
|
19
31
|
const streaming = opts?.streaming ?? true;
|
|
20
32
|
const exampleMessages = (0, inputs_js_1.getModelInputSnippet)(model);
|
|
@@ -24,48 +36,67 @@ const snippetTextGeneration = (model, accessToken, opts) => {
|
|
|
24
36
|
max_tokens: opts?.max_tokens ?? 500,
|
|
25
37
|
...(opts?.top_p ? { top_p: opts.top_p } : undefined),
|
|
26
38
|
};
|
|
27
|
-
return
|
|
28
|
-
|
|
39
|
+
return [
|
|
40
|
+
{
|
|
41
|
+
client: "curl",
|
|
42
|
+
content: `curl '${baseUrl}' \\
|
|
29
43
|
-H 'Authorization: Bearer ${accessToken || `{API_TOKEN}`}' \\
|
|
30
44
|
-H 'Content-Type: application/json' \\
|
|
31
45
|
--data '{
|
|
32
46
|
"model": "${model.id}",
|
|
33
47
|
"messages": ${(0, common_js_1.stringifyMessages)(messages, {
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
48
|
+
indent: "\t",
|
|
49
|
+
attributeKeyQuotes: true,
|
|
50
|
+
customContentEscaper: (str) => str.replace(/'/g, "'\\''"),
|
|
51
|
+
})},
|
|
38
52
|
${(0, common_js_1.stringifyGenerationConfig)(config, {
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
53
|
+
indent: "\n ",
|
|
54
|
+
attributeKeyQuotes: true,
|
|
55
|
+
attributeValueConnector: ": ",
|
|
56
|
+
})},
|
|
43
57
|
"stream": ${!!streaming}
|
|
44
58
|
}'`,
|
|
45
|
-
|
|
59
|
+
},
|
|
60
|
+
];
|
|
46
61
|
}
|
|
47
62
|
else {
|
|
48
|
-
return (0, exports.snippetBasic)(model, accessToken);
|
|
63
|
+
return (0, exports.snippetBasic)(model, accessToken, provider);
|
|
49
64
|
}
|
|
50
65
|
};
|
|
51
66
|
exports.snippetTextGeneration = snippetTextGeneration;
|
|
52
|
-
const snippetZeroShotClassification = (model, accessToken) =>
|
|
53
|
-
|
|
67
|
+
const snippetZeroShotClassification = (model, accessToken, provider) => {
|
|
68
|
+
if (provider !== "hf-inference") {
|
|
69
|
+
return [];
|
|
70
|
+
}
|
|
71
|
+
return [
|
|
72
|
+
{
|
|
73
|
+
client: "curl",
|
|
74
|
+
content: `curl https://api-inference.huggingface.co/models/${model.id} \\
|
|
54
75
|
-X POST \\
|
|
55
76
|
-d '{"inputs": ${(0, inputs_js_1.getModelInputSnippet)(model, true)}, "parameters": {"candidate_labels": ["refund", "legal", "faq"]}}' \\
|
|
56
77
|
-H 'Content-Type: application/json' \\
|
|
57
78
|
-H 'Authorization: Bearer ${accessToken || `{API_TOKEN}`}'`,
|
|
58
|
-
}
|
|
79
|
+
},
|
|
80
|
+
];
|
|
81
|
+
};
|
|
59
82
|
exports.snippetZeroShotClassification = snippetZeroShotClassification;
|
|
60
|
-
const snippetFile = (model, accessToken) =>
|
|
61
|
-
|
|
83
|
+
const snippetFile = (model, accessToken, provider) => {
|
|
84
|
+
if (provider !== "hf-inference") {
|
|
85
|
+
return [];
|
|
86
|
+
}
|
|
87
|
+
return [
|
|
88
|
+
{
|
|
89
|
+
client: "curl",
|
|
90
|
+
content: `curl https://api-inference.huggingface.co/models/${model.id} \\
|
|
62
91
|
-X POST \\
|
|
63
92
|
--data-binary '@${(0, inputs_js_1.getModelInputSnippet)(model, true, true)}' \\
|
|
64
93
|
-H 'Authorization: Bearer ${accessToken || `{API_TOKEN}`}'`,
|
|
65
|
-
}
|
|
94
|
+
},
|
|
95
|
+
];
|
|
96
|
+
};
|
|
66
97
|
exports.snippetFile = snippetFile;
|
|
67
98
|
exports.curlSnippets = {
|
|
68
|
-
// Same order as in
|
|
99
|
+
// Same order as in tasks/src/pipelines.ts
|
|
69
100
|
"text-classification": exports.snippetBasic,
|
|
70
101
|
"token-classification": exports.snippetBasic,
|
|
71
102
|
"table-question-answering": exports.snippetBasic,
|
|
@@ -90,11 +121,8 @@ exports.curlSnippets = {
|
|
|
90
121
|
"object-detection": exports.snippetFile,
|
|
91
122
|
"image-segmentation": exports.snippetFile,
|
|
92
123
|
};
|
|
93
|
-
function getCurlInferenceSnippet(model, accessToken, opts) {
|
|
124
|
+
function getCurlInferenceSnippet(model, accessToken, provider, opts) {
|
|
94
125
|
return model.pipeline_tag && model.pipeline_tag in exports.curlSnippets
|
|
95
|
-
? exports.curlSnippets[model.pipeline_tag]?.(model, accessToken, opts) ??
|
|
96
|
-
:
|
|
97
|
-
}
|
|
98
|
-
function hasCurlInferenceSnippet(model) {
|
|
99
|
-
return !!model.pipeline_tag && model.pipeline_tag in exports.curlSnippets;
|
|
126
|
+
? exports.curlSnippets[model.pipeline_tag]?.(model, accessToken, provider, opts) ?? []
|
|
127
|
+
: [];
|
|
100
128
|
}
|
|
@@ -1,19 +1,20 @@
|
|
|
1
|
+
import { type InferenceProvider } from "../inference-providers.js";
|
|
1
2
|
import type { PipelineType } from "../pipelines.js";
|
|
2
3
|
import type { ChatCompletionInputMessage, GenerationParameters } from "../tasks/index.js";
|
|
3
4
|
import type { InferenceSnippet, ModelDataMinimal } from "./types.js";
|
|
4
|
-
export declare const snippetBasic: (model: ModelDataMinimal, accessToken: string) => InferenceSnippet;
|
|
5
|
-
export declare const snippetTextGeneration: (model: ModelDataMinimal, accessToken: string, opts?: {
|
|
5
|
+
export declare const snippetBasic: (model: ModelDataMinimal, accessToken: string, provider: InferenceProvider) => InferenceSnippet[];
|
|
6
|
+
export declare const snippetTextGeneration: (model: ModelDataMinimal, accessToken: string, provider: InferenceProvider, opts?: {
|
|
6
7
|
streaming?: boolean;
|
|
7
8
|
messages?: ChatCompletionInputMessage[];
|
|
8
9
|
temperature?: GenerationParameters["temperature"];
|
|
9
10
|
max_tokens?: GenerationParameters["max_tokens"];
|
|
10
11
|
top_p?: GenerationParameters["top_p"];
|
|
11
|
-
}) => InferenceSnippet
|
|
12
|
-
export declare const snippetZeroShotClassification: (model: ModelDataMinimal, accessToken: string) => InferenceSnippet;
|
|
13
|
-
export declare const snippetTextToImage: (model: ModelDataMinimal, accessToken: string) => InferenceSnippet;
|
|
14
|
-
export declare const snippetTextToAudio: (model: ModelDataMinimal, accessToken: string) => InferenceSnippet;
|
|
15
|
-
export declare const
|
|
16
|
-
export declare const
|
|
17
|
-
export declare
|
|
18
|
-
export declare function
|
|
12
|
+
}) => InferenceSnippet[];
|
|
13
|
+
export declare const snippetZeroShotClassification: (model: ModelDataMinimal, accessToken: string) => InferenceSnippet[];
|
|
14
|
+
export declare const snippetTextToImage: (model: ModelDataMinimal, accessToken: string, provider: InferenceProvider) => InferenceSnippet[];
|
|
15
|
+
export declare const snippetTextToAudio: (model: ModelDataMinimal, accessToken: string, provider: InferenceProvider) => InferenceSnippet[];
|
|
16
|
+
export declare const snippetAutomaticSpeechRecognition: (model: ModelDataMinimal, accessToken: string, provider: InferenceProvider) => InferenceSnippet[];
|
|
17
|
+
export declare const snippetFile: (model: ModelDataMinimal, accessToken: string, provider: InferenceProvider) => InferenceSnippet[];
|
|
18
|
+
export declare const jsSnippets: Partial<Record<PipelineType, (model: ModelDataMinimal, accessToken: string, provider: InferenceProvider, opts?: Record<string, unknown>) => InferenceSnippet[]>>;
|
|
19
|
+
export declare function getJsInferenceSnippet(model: ModelDataMinimal, accessToken: string, provider: InferenceProvider, opts?: Record<string, unknown>): InferenceSnippet[];
|
|
19
20
|
//# sourceMappingURL=js.d.ts.map
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"js.d.ts","sourceRoot":"","sources":["../../../src/snippets/js.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,iBAAiB,CAAC;AACpD,OAAO,KAAK,EAAE,0BAA0B,EAAE,oBAAoB,EAAE,MAAM,mBAAmB,CAAC;AAG1F,OAAO,KAAK,EAAE,gBAAgB,EAAE,gBAAgB,EAAE,MAAM,YAAY,CAAC;
|
|
1
|
+
{"version":3,"file":"js.d.ts","sourceRoot":"","sources":["../../../src/snippets/js.ts"],"names":[],"mappings":"AAAA,OAAO,EAAiB,KAAK,iBAAiB,EAAE,MAAM,2BAA2B,CAAC;AAClF,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,iBAAiB,CAAC;AACpD,OAAO,KAAK,EAAE,0BAA0B,EAAE,oBAAoB,EAAE,MAAM,mBAAmB,CAAC;AAG1F,OAAO,KAAK,EAAE,gBAAgB,EAAE,gBAAgB,EAAE,MAAM,YAAY,CAAC;AAgBrE,eAAO,MAAM,YAAY,UACjB,gBAAgB,eACV,MAAM,YACT,iBAAiB,KACzB,gBAAgB,EA8ClB,CAAC;AAEF,eAAO,MAAM,qBAAqB,UAC1B,gBAAgB,eACV,MAAM,YACT,iBAAiB,SACpB;IACN,SAAS,CAAC,EAAE,OAAO,CAAC;IACpB,QAAQ,CAAC,EAAE,0BAA0B,EAAE,CAAC;IACxC,WAAW,CAAC,EAAE,oBAAoB,CAAC,aAAa,CAAC,CAAC;IAClD,UAAU,CAAC,EAAE,oBAAoB,CAAC,YAAY,CAAC,CAAC;IAChD,KAAK,CAAC,EAAE,oBAAoB,CAAC,OAAO,CAAC,CAAC;CACtC,KACC,gBAAgB,EA6GlB,CAAC;AAEF,eAAO,MAAM,6BAA6B,UAAW,gBAAgB,eAAe,MAAM,KAAG,gBAAgB,EA2B5G,CAAC;AAEF,eAAO,MAAM,kBAAkB,UACvB,gBAAgB,eACV,MAAM,YACT,iBAAiB,KACzB,gBAAgB,EA4ClB,CAAC;AAEF,eAAO,MAAM,kBAAkB,UACvB,gBAAgB,eACV,MAAM,YACT,iBAAiB,KACzB,gBAAgB,EAgDlB,CAAC;AAEF,eAAO,MAAM,iCAAiC,UACtC,gBAAgB,eACV,MAAM,YACT,iBAAiB,KACzB,gBAAgB,EAsBlB,CAAC;AAEF,eAAO,MAAM,WAAW,UAChB,gBAAgB,eACV,MAAM,YACT,iBAAiB,KACzB,gBAAgB,EA6BlB,CAAC;AAEF,eAAO,MAAM,UAAU,EAAE,OAAO,CAC/B,MAAM,CACL,YAAY,EACZ,CACC,KAAK,EAAE,gBAAgB,EACvB,WAAW,EAAE,MAAM,EACnB,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,KAC1B,gBAAgB,EAAE,CACvB,CA0BD,CAAC;AAEF,wBAAgB,qBAAqB,CACpC,KAAK,EAAE,gBAAgB,EACvB,WAAW,EAAE,MAAM,EACnB,QAAQ,EAAE,iBAAiB,EAC3B,IAAI,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,GAC5B,gBAAgB,EAAE,CAIpB"}
|
|
@@ -1,12 +1,49 @@
|
|
|
1
1
|
"use strict";
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
-
exports.jsSnippets = exports.snippetFile = exports.snippetTextToAudio = exports.snippetTextToImage = exports.snippetZeroShotClassification = exports.snippetTextGeneration = exports.snippetBasic = void 0;
|
|
3
|
+
exports.jsSnippets = exports.snippetFile = exports.snippetAutomaticSpeechRecognition = exports.snippetTextToAudio = exports.snippetTextToImage = exports.snippetZeroShotClassification = exports.snippetTextGeneration = exports.snippetBasic = void 0;
|
|
4
4
|
exports.getJsInferenceSnippet = getJsInferenceSnippet;
|
|
5
|
-
|
|
5
|
+
const inference_providers_js_1 = require("../inference-providers.js");
|
|
6
6
|
const common_js_1 = require("./common.js");
|
|
7
7
|
const inputs_js_1 = require("./inputs.js");
|
|
8
|
-
const
|
|
9
|
-
|
|
8
|
+
const HFJS_METHODS = {
|
|
9
|
+
"text-classification": "textClassification",
|
|
10
|
+
"token-classification": "tokenClassification",
|
|
11
|
+
"table-question-answering": "tableQuestionAnswering",
|
|
12
|
+
"question-answering": "questionAnswering",
|
|
13
|
+
translation: "translation",
|
|
14
|
+
summarization: "summarization",
|
|
15
|
+
"feature-extraction": "featureExtraction",
|
|
16
|
+
"text-generation": "textGeneration",
|
|
17
|
+
"text2text-generation": "textGeneration",
|
|
18
|
+
"fill-mask": "fillMask",
|
|
19
|
+
"sentence-similarity": "sentenceSimilarity",
|
|
20
|
+
};
|
|
21
|
+
const snippetBasic = (model, accessToken, provider) => {
|
|
22
|
+
return [
|
|
23
|
+
...(model.pipeline_tag && model.pipeline_tag in HFJS_METHODS
|
|
24
|
+
? [
|
|
25
|
+
{
|
|
26
|
+
client: "huggingface.js",
|
|
27
|
+
content: `\
|
|
28
|
+
import { HfInference } from "@huggingface/inference";
|
|
29
|
+
|
|
30
|
+
const client = new HfInference("${accessToken || `{API_TOKEN}`}");
|
|
31
|
+
|
|
32
|
+
const output = await client.${HFJS_METHODS[model.pipeline_tag]}({
|
|
33
|
+
model: "${model.id}",
|
|
34
|
+
inputs: ${(0, inputs_js_1.getModelInputSnippet)(model)},
|
|
35
|
+
provider: "${provider}",
|
|
36
|
+
});
|
|
37
|
+
|
|
38
|
+
console.log(output)
|
|
39
|
+
`,
|
|
40
|
+
},
|
|
41
|
+
]
|
|
42
|
+
: []),
|
|
43
|
+
{
|
|
44
|
+
client: "fetch",
|
|
45
|
+
content: `\
|
|
46
|
+
async function query(data) {
|
|
10
47
|
const response = await fetch(
|
|
11
48
|
"https://api-inference.huggingface.co/models/${model.id}",
|
|
12
49
|
{
|
|
@@ -25,9 +62,11 @@ const snippetBasic = (model, accessToken) => ({
|
|
|
25
62
|
query({"inputs": ${(0, inputs_js_1.getModelInputSnippet)(model)}}).then((response) => {
|
|
26
63
|
console.log(JSON.stringify(response));
|
|
27
64
|
});`,
|
|
28
|
-
}
|
|
65
|
+
},
|
|
66
|
+
];
|
|
67
|
+
};
|
|
29
68
|
exports.snippetBasic = snippetBasic;
|
|
30
|
-
const snippetTextGeneration = (model, accessToken, opts) => {
|
|
69
|
+
const snippetTextGeneration = (model, accessToken, provider, opts) => {
|
|
31
70
|
if (model.tags.includes("conversational")) {
|
|
32
71
|
// Conversational model detected, so we display a code snippet that features the Messages API
|
|
33
72
|
const streaming = opts?.streaming ?? true;
|
|
@@ -56,6 +95,7 @@ let out = "";
|
|
|
56
95
|
const stream = client.chatCompletionStream({
|
|
57
96
|
model: "${model.id}",
|
|
58
97
|
messages: ${messagesStr},
|
|
98
|
+
provider: "${provider}",
|
|
59
99
|
${configStr}
|
|
60
100
|
});
|
|
61
101
|
|
|
@@ -72,8 +112,8 @@ for await (const chunk of stream) {
|
|
|
72
112
|
content: `import { OpenAI } from "openai";
|
|
73
113
|
|
|
74
114
|
const client = new OpenAI({
|
|
75
|
-
baseURL: "
|
|
76
|
-
|
|
115
|
+
baseURL: "${(0, inference_providers_js_1.openAIbaseUrl)(provider)}",
|
|
116
|
+
apiKey: "${accessToken || `{API_TOKEN}`}"
|
|
77
117
|
});
|
|
78
118
|
|
|
79
119
|
let out = "";
|
|
@@ -106,6 +146,7 @@ const client = new HfInference("${accessToken || `{API_TOKEN}`}");
|
|
|
106
146
|
const chatCompletion = await client.chatCompletion({
|
|
107
147
|
model: "${model.id}",
|
|
108
148
|
messages: ${messagesStr},
|
|
149
|
+
provider: "${provider}",
|
|
109
150
|
${configStr}
|
|
110
151
|
});
|
|
111
152
|
|
|
@@ -116,8 +157,8 @@ console.log(chatCompletion.choices[0].message);`,
|
|
|
116
157
|
content: `import { OpenAI } from "openai";
|
|
117
158
|
|
|
118
159
|
const client = new OpenAI({
|
|
119
|
-
|
|
120
|
-
|
|
160
|
+
baseURL: "${(0, inference_providers_js_1.openAIbaseUrl)(provider)}",
|
|
161
|
+
apiKey: "${accessToken || `{API_TOKEN}`}"
|
|
121
162
|
});
|
|
122
163
|
|
|
123
164
|
const chatCompletion = await client.chat.completions.create({
|
|
@@ -132,34 +173,60 @@ console.log(chatCompletion.choices[0].message);`,
|
|
|
132
173
|
}
|
|
133
174
|
}
|
|
134
175
|
else {
|
|
135
|
-
return (0, exports.snippetBasic)(model, accessToken);
|
|
176
|
+
return (0, exports.snippetBasic)(model, accessToken, provider);
|
|
136
177
|
}
|
|
137
178
|
};
|
|
138
179
|
exports.snippetTextGeneration = snippetTextGeneration;
|
|
139
|
-
const snippetZeroShotClassification = (model, accessToken) =>
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
180
|
+
const snippetZeroShotClassification = (model, accessToken) => {
|
|
181
|
+
return [
|
|
182
|
+
{
|
|
183
|
+
client: "fetch",
|
|
184
|
+
content: `async function query(data) {
|
|
185
|
+
const response = await fetch(
|
|
186
|
+
"https://api-inference.huggingface.co/models/${model.id}",
|
|
187
|
+
{
|
|
188
|
+
headers: {
|
|
189
|
+
Authorization: "Bearer ${accessToken || `{API_TOKEN}`}",
|
|
190
|
+
"Content-Type": "application/json",
|
|
191
|
+
},
|
|
192
|
+
method: "POST",
|
|
193
|
+
body: JSON.stringify(data),
|
|
194
|
+
}
|
|
195
|
+
);
|
|
196
|
+
const result = await response.json();
|
|
197
|
+
return result;
|
|
150
198
|
}
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
}
|
|
199
|
+
|
|
200
|
+
query({"inputs": ${(0, inputs_js_1.getModelInputSnippet)(model)}, "parameters": {"candidate_labels": ["refund", "legal", "faq"]}}).then((response) => {
|
|
201
|
+
console.log(JSON.stringify(response));
|
|
202
|
+
});`,
|
|
203
|
+
},
|
|
204
|
+
];
|
|
205
|
+
};
|
|
206
|
+
exports.snippetZeroShotClassification = snippetZeroShotClassification;
|
|
207
|
+
const snippetTextToImage = (model, accessToken, provider) => {
|
|
208
|
+
return [
|
|
209
|
+
{
|
|
210
|
+
client: "huggingface.js",
|
|
211
|
+
content: `\
|
|
212
|
+
import { HfInference } from "@huggingface/inference";
|
|
155
213
|
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
214
|
+
const client = new HfInference("${accessToken || `{API_TOKEN}`}");
|
|
215
|
+
|
|
216
|
+
const image = await client.textToImage({
|
|
217
|
+
model: "${model.id}",
|
|
218
|
+
inputs: ${(0, inputs_js_1.getModelInputSnippet)(model)},
|
|
219
|
+
parameters: { num_inference_steps: 5 },
|
|
220
|
+
provider: "${provider}",
|
|
159
221
|
});
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
222
|
+
/// Use the generated image (it's a Blob)
|
|
223
|
+
`,
|
|
224
|
+
},
|
|
225
|
+
...(provider === "hf-inference"
|
|
226
|
+
? [
|
|
227
|
+
{
|
|
228
|
+
client: "fetch",
|
|
229
|
+
content: `async function query(data) {
|
|
163
230
|
const response = await fetch(
|
|
164
231
|
"https://api-inference.huggingface.co/models/${model.id}",
|
|
165
232
|
{
|
|
@@ -177,9 +244,16 @@ const snippetTextToImage = (model, accessToken) => ({
|
|
|
177
244
|
query({"inputs": ${(0, inputs_js_1.getModelInputSnippet)(model)}}).then((response) => {
|
|
178
245
|
// Use image
|
|
179
246
|
});`,
|
|
180
|
-
}
|
|
247
|
+
},
|
|
248
|
+
]
|
|
249
|
+
: []),
|
|
250
|
+
];
|
|
251
|
+
};
|
|
181
252
|
exports.snippetTextToImage = snippetTextToImage;
|
|
182
|
-
const snippetTextToAudio = (model, accessToken) => {
|
|
253
|
+
const snippetTextToAudio = (model, accessToken, provider) => {
|
|
254
|
+
if (provider !== "hf-inference") {
|
|
255
|
+
return [];
|
|
256
|
+
}
|
|
183
257
|
const commonSnippet = `async function query(data) {
|
|
184
258
|
const response = await fetch(
|
|
185
259
|
"https://api-inference.huggingface.co/models/${model.id}",
|
|
@@ -193,21 +267,26 @@ const snippetTextToAudio = (model, accessToken) => {
|
|
|
193
267
|
}
|
|
194
268
|
);`;
|
|
195
269
|
if (model.library_name === "transformers") {
|
|
196
|
-
return
|
|
197
|
-
|
|
198
|
-
|
|
270
|
+
return [
|
|
271
|
+
{
|
|
272
|
+
client: "fetch",
|
|
273
|
+
content: commonSnippet +
|
|
274
|
+
`
|
|
199
275
|
const result = await response.blob();
|
|
200
276
|
return result;
|
|
201
277
|
}
|
|
202
278
|
query({"inputs": ${(0, inputs_js_1.getModelInputSnippet)(model)}}).then((response) => {
|
|
203
279
|
// Returns a byte object of the Audio wavform. Use it directly!
|
|
204
280
|
});`,
|
|
205
|
-
|
|
281
|
+
},
|
|
282
|
+
];
|
|
206
283
|
}
|
|
207
284
|
else {
|
|
208
|
-
return
|
|
209
|
-
|
|
210
|
-
|
|
285
|
+
return [
|
|
286
|
+
{
|
|
287
|
+
client: "fetch",
|
|
288
|
+
content: commonSnippet +
|
|
289
|
+
`
|
|
211
290
|
const result = await response.json();
|
|
212
291
|
return result;
|
|
213
292
|
}
|
|
@@ -215,12 +294,43 @@ const snippetTextToAudio = (model, accessToken) => {
|
|
|
215
294
|
query({"inputs": ${(0, inputs_js_1.getModelInputSnippet)(model)}}).then((response) => {
|
|
216
295
|
console.log(JSON.stringify(response));
|
|
217
296
|
});`,
|
|
218
|
-
|
|
297
|
+
},
|
|
298
|
+
];
|
|
219
299
|
}
|
|
220
300
|
};
|
|
221
301
|
exports.snippetTextToAudio = snippetTextToAudio;
|
|
222
|
-
const
|
|
223
|
-
|
|
302
|
+
const snippetAutomaticSpeechRecognition = (model, accessToken, provider) => {
|
|
303
|
+
return [
|
|
304
|
+
{
|
|
305
|
+
client: "huggingface.js",
|
|
306
|
+
content: `\
|
|
307
|
+
import { HfInference } from "@huggingface/inference";
|
|
308
|
+
|
|
309
|
+
const client = new HfInference("${accessToken || `{API_TOKEN}`}");
|
|
310
|
+
|
|
311
|
+
const data = fs.readFileSync(${(0, inputs_js_1.getModelInputSnippet)(model)});
|
|
312
|
+
|
|
313
|
+
const output = await client.automaticSpeechRecognition({
|
|
314
|
+
data,
|
|
315
|
+
model: "${model.id}",
|
|
316
|
+
provider: "${provider}",
|
|
317
|
+
});
|
|
318
|
+
|
|
319
|
+
console.log(output);
|
|
320
|
+
`,
|
|
321
|
+
},
|
|
322
|
+
...(provider === "hf-inference" ? (0, exports.snippetFile)(model, accessToken, provider) : []),
|
|
323
|
+
];
|
|
324
|
+
};
|
|
325
|
+
exports.snippetAutomaticSpeechRecognition = snippetAutomaticSpeechRecognition;
|
|
326
|
+
const snippetFile = (model, accessToken, provider) => {
|
|
327
|
+
if (provider !== "hf-inference") {
|
|
328
|
+
return [];
|
|
329
|
+
}
|
|
330
|
+
return [
|
|
331
|
+
{
|
|
332
|
+
client: "fetch",
|
|
333
|
+
content: `async function query(filename) {
|
|
224
334
|
const data = fs.readFileSync(filename);
|
|
225
335
|
const response = await fetch(
|
|
226
336
|
"https://api-inference.huggingface.co/models/${model.id}",
|
|
@@ -240,10 +350,12 @@ const snippetFile = (model, accessToken) => ({
|
|
|
240
350
|
query(${(0, inputs_js_1.getModelInputSnippet)(model)}).then((response) => {
|
|
241
351
|
console.log(JSON.stringify(response));
|
|
242
352
|
});`,
|
|
243
|
-
}
|
|
353
|
+
},
|
|
354
|
+
];
|
|
355
|
+
};
|
|
244
356
|
exports.snippetFile = snippetFile;
|
|
245
357
|
exports.jsSnippets = {
|
|
246
|
-
// Same order as in
|
|
358
|
+
// Same order as in tasks/src/pipelines.ts
|
|
247
359
|
"text-classification": exports.snippetBasic,
|
|
248
360
|
"token-classification": exports.snippetBasic,
|
|
249
361
|
"table-question-answering": exports.snippetBasic,
|
|
@@ -257,7 +369,7 @@ exports.jsSnippets = {
|
|
|
257
369
|
"text2text-generation": exports.snippetBasic,
|
|
258
370
|
"fill-mask": exports.snippetBasic,
|
|
259
371
|
"sentence-similarity": exports.snippetBasic,
|
|
260
|
-
"automatic-speech-recognition": exports.
|
|
372
|
+
"automatic-speech-recognition": exports.snippetAutomaticSpeechRecognition,
|
|
261
373
|
"text-to-image": exports.snippetTextToImage,
|
|
262
374
|
"text-to-speech": exports.snippetTextToAudio,
|
|
263
375
|
"text-to-audio": exports.snippetTextToAudio,
|
|
@@ -268,11 +380,8 @@ exports.jsSnippets = {
|
|
|
268
380
|
"object-detection": exports.snippetFile,
|
|
269
381
|
"image-segmentation": exports.snippetFile,
|
|
270
382
|
};
|
|
271
|
-
function getJsInferenceSnippet(model, accessToken, opts) {
|
|
383
|
+
function getJsInferenceSnippet(model, accessToken, provider, opts) {
|
|
272
384
|
return model.pipeline_tag && model.pipeline_tag in exports.jsSnippets
|
|
273
|
-
? exports.jsSnippets[model.pipeline_tag]?.(model, accessToken, opts) ??
|
|
274
|
-
:
|
|
275
|
-
}
|
|
276
|
-
function hasJsInferenceSnippet(model) {
|
|
277
|
-
return !!model.pipeline_tag && model.pipeline_tag in exports.jsSnippets;
|
|
385
|
+
? exports.jsSnippets[model.pipeline_tag]?.(model, accessToken, provider, opts) ?? []
|
|
386
|
+
: [];
|
|
278
387
|
}
|