@huggingface/inference 3.9.0 → 3.9.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.cjs +3 -27
- package/dist/index.js +3 -27
- package/dist/src/lib/getInferenceProviderMapping.d.ts +2 -2
- package/dist/src/lib/getInferenceProviderMapping.d.ts.map +1 -1
- package/dist/src/providers/fal-ai.d.ts.map +1 -1
- package/package.json +3 -3
- package/src/lib/getInferenceProviderMapping.ts +1 -17
- package/src/providers/fal-ai.ts +0 -10
- package/src/snippets/templates.exported.ts +1 -1
package/dist/index.cjs
CHANGED
|
@@ -342,15 +342,7 @@ var FalAITextToImageTask = class extends FalAITask {
|
|
|
342
342
|
...omit(params.args, ["inputs", "parameters"]),
|
|
343
343
|
...params.args.parameters,
|
|
344
344
|
sync_mode: true,
|
|
345
|
-
prompt: params.args.inputs
|
|
346
|
-
...params.mapping?.adapter === "lora" && params.mapping.adapterWeightsPath ? {
|
|
347
|
-
loras: [
|
|
348
|
-
{
|
|
349
|
-
path: buildLoraPath(params.mapping.hfModelId, params.mapping.adapterWeightsPath),
|
|
350
|
-
scale: 1
|
|
351
|
-
}
|
|
352
|
-
]
|
|
353
|
-
} : void 0
|
|
345
|
+
prompt: params.args.inputs
|
|
354
346
|
};
|
|
355
347
|
if (params.mapping?.adapter === "lora" && params.mapping.adapterWeightsPath) {
|
|
356
348
|
payload.loras = [
|
|
@@ -1323,7 +1315,7 @@ function getProviderHelper(provider, task) {
|
|
|
1323
1315
|
|
|
1324
1316
|
// package.json
|
|
1325
1317
|
var name = "@huggingface/inference";
|
|
1326
|
-
var version = "3.9.
|
|
1318
|
+
var version = "3.9.2";
|
|
1327
1319
|
|
|
1328
1320
|
// src/providers/consts.ts
|
|
1329
1321
|
var HARDCODED_MODEL_INFERENCE_MAPPING = {
|
|
@@ -1388,22 +1380,6 @@ async function getInferenceProviderMapping(params, options) {
|
|
|
1388
1380
|
`Model ${params.modelId} is in staging mode for provider ${params.provider}. Meant for test purposes only.`
|
|
1389
1381
|
);
|
|
1390
1382
|
}
|
|
1391
|
-
if (providerMapping.adapter === "lora") {
|
|
1392
|
-
const treeResp = await (options?.fetch ?? fetch)(`${HF_HUB_URL}/api/models/${params.modelId}/tree/main`);
|
|
1393
|
-
if (!treeResp.ok) {
|
|
1394
|
-
throw new Error(`Unable to fetch the model tree for ${params.modelId}.`);
|
|
1395
|
-
}
|
|
1396
|
-
const tree = await treeResp.json();
|
|
1397
|
-
const adapterWeightsPath = tree.find(({ type, path }) => type === "file" && path.endsWith(".safetensors"))?.path;
|
|
1398
|
-
if (!adapterWeightsPath) {
|
|
1399
|
-
throw new Error(`No .safetensors file found in the model tree for ${params.modelId}.`);
|
|
1400
|
-
}
|
|
1401
|
-
return {
|
|
1402
|
-
...providerMapping,
|
|
1403
|
-
hfModelId: params.modelId,
|
|
1404
|
-
adapterWeightsPath
|
|
1405
|
-
};
|
|
1406
|
-
}
|
|
1407
1383
|
return { ...providerMapping, hfModelId: params.modelId };
|
|
1408
1384
|
}
|
|
1409
1385
|
return null;
|
|
@@ -2303,7 +2279,7 @@ var templates = {
|
|
|
2303
2279
|
"basicAudio": 'import { InferenceClient } from "@huggingface/inference";\n\nconst client = new InferenceClient("{{ accessToken }}");\n\nconst data = fs.readFileSync({{inputs.asObj.inputs}});\n\nconst output = await client.{{ methodName }}({\n data,\n model: "{{ model.id }}",\n provider: "{{ provider }}",\n}{% if billTo %}, {\n billTo: "{{ billTo }}",\n}{% endif %});\n\nconsole.log(output);',
|
|
2304
2280
|
"basicImage": 'import { InferenceClient } from "@huggingface/inference";\n\nconst client = new InferenceClient("{{ accessToken }}");\n\nconst data = fs.readFileSync({{inputs.asObj.inputs}});\n\nconst output = await client.{{ methodName }}({\n data,\n model: "{{ model.id }}",\n provider: "{{ provider }}",\n}{% if billTo %}, {\n billTo: "{{ billTo }}",\n}{% endif %});\n\nconsole.log(output);',
|
|
2305
2281
|
"conversational": 'import { InferenceClient } from "@huggingface/inference";\n\nconst client = new InferenceClient("{{ accessToken }}");\n\nconst chatCompletion = await client.chatCompletion({\n provider: "{{ provider }}",\n model: "{{ model.id }}",\n{{ inputs.asTsString }}\n}{% if billTo %}, {\n billTo: "{{ billTo }}",\n}{% endif %});\n\nconsole.log(chatCompletion.choices[0].message);',
|
|
2306
|
-
"conversationalStream": 'import { InferenceClient } from "@huggingface/inference";\n\nconst client = new InferenceClient("{{ accessToken }}");\n\nlet out = "";\n\nconst stream =
|
|
2282
|
+
"conversationalStream": 'import { InferenceClient } from "@huggingface/inference";\n\nconst client = new InferenceClient("{{ accessToken }}");\n\nlet out = "";\n\nconst stream = client.chatCompletionStream({\n provider: "{{ provider }}",\n model: "{{ model.id }}",\n{{ inputs.asTsString }}\n}{% if billTo %}, {\n billTo: "{{ billTo }}",\n}{% endif %});\n\nfor await (const chunk of stream) {\n if (chunk.choices && chunk.choices.length > 0) {\n const newContent = chunk.choices[0].delta.content;\n out += newContent;\n console.log(newContent);\n }\n}',
|
|
2307
2283
|
"textToImage": `import { InferenceClient } from "@huggingface/inference";
|
|
2308
2284
|
|
|
2309
2285
|
const client = new InferenceClient("{{ accessToken }}");
|
package/dist/index.js
CHANGED
|
@@ -285,15 +285,7 @@ var FalAITextToImageTask = class extends FalAITask {
|
|
|
285
285
|
...omit(params.args, ["inputs", "parameters"]),
|
|
286
286
|
...params.args.parameters,
|
|
287
287
|
sync_mode: true,
|
|
288
|
-
prompt: params.args.inputs
|
|
289
|
-
...params.mapping?.adapter === "lora" && params.mapping.adapterWeightsPath ? {
|
|
290
|
-
loras: [
|
|
291
|
-
{
|
|
292
|
-
path: buildLoraPath(params.mapping.hfModelId, params.mapping.adapterWeightsPath),
|
|
293
|
-
scale: 1
|
|
294
|
-
}
|
|
295
|
-
]
|
|
296
|
-
} : void 0
|
|
288
|
+
prompt: params.args.inputs
|
|
297
289
|
};
|
|
298
290
|
if (params.mapping?.adapter === "lora" && params.mapping.adapterWeightsPath) {
|
|
299
291
|
payload.loras = [
|
|
@@ -1266,7 +1258,7 @@ function getProviderHelper(provider, task) {
|
|
|
1266
1258
|
|
|
1267
1259
|
// package.json
|
|
1268
1260
|
var name = "@huggingface/inference";
|
|
1269
|
-
var version = "3.9.
|
|
1261
|
+
var version = "3.9.2";
|
|
1270
1262
|
|
|
1271
1263
|
// src/providers/consts.ts
|
|
1272
1264
|
var HARDCODED_MODEL_INFERENCE_MAPPING = {
|
|
@@ -1331,22 +1323,6 @@ async function getInferenceProviderMapping(params, options) {
|
|
|
1331
1323
|
`Model ${params.modelId} is in staging mode for provider ${params.provider}. Meant for test purposes only.`
|
|
1332
1324
|
);
|
|
1333
1325
|
}
|
|
1334
|
-
if (providerMapping.adapter === "lora") {
|
|
1335
|
-
const treeResp = await (options?.fetch ?? fetch)(`${HF_HUB_URL}/api/models/${params.modelId}/tree/main`);
|
|
1336
|
-
if (!treeResp.ok) {
|
|
1337
|
-
throw new Error(`Unable to fetch the model tree for ${params.modelId}.`);
|
|
1338
|
-
}
|
|
1339
|
-
const tree = await treeResp.json();
|
|
1340
|
-
const adapterWeightsPath = tree.find(({ type, path }) => type === "file" && path.endsWith(".safetensors"))?.path;
|
|
1341
|
-
if (!adapterWeightsPath) {
|
|
1342
|
-
throw new Error(`No .safetensors file found in the model tree for ${params.modelId}.`);
|
|
1343
|
-
}
|
|
1344
|
-
return {
|
|
1345
|
-
...providerMapping,
|
|
1346
|
-
hfModelId: params.modelId,
|
|
1347
|
-
adapterWeightsPath
|
|
1348
|
-
};
|
|
1349
|
-
}
|
|
1350
1326
|
return { ...providerMapping, hfModelId: params.modelId };
|
|
1351
1327
|
}
|
|
1352
1328
|
return null;
|
|
@@ -2249,7 +2225,7 @@ var templates = {
|
|
|
2249
2225
|
"basicAudio": 'import { InferenceClient } from "@huggingface/inference";\n\nconst client = new InferenceClient("{{ accessToken }}");\n\nconst data = fs.readFileSync({{inputs.asObj.inputs}});\n\nconst output = await client.{{ methodName }}({\n data,\n model: "{{ model.id }}",\n provider: "{{ provider }}",\n}{% if billTo %}, {\n billTo: "{{ billTo }}",\n}{% endif %});\n\nconsole.log(output);',
|
|
2250
2226
|
"basicImage": 'import { InferenceClient } from "@huggingface/inference";\n\nconst client = new InferenceClient("{{ accessToken }}");\n\nconst data = fs.readFileSync({{inputs.asObj.inputs}});\n\nconst output = await client.{{ methodName }}({\n data,\n model: "{{ model.id }}",\n provider: "{{ provider }}",\n}{% if billTo %}, {\n billTo: "{{ billTo }}",\n}{% endif %});\n\nconsole.log(output);',
|
|
2251
2227
|
"conversational": 'import { InferenceClient } from "@huggingface/inference";\n\nconst client = new InferenceClient("{{ accessToken }}");\n\nconst chatCompletion = await client.chatCompletion({\n provider: "{{ provider }}",\n model: "{{ model.id }}",\n{{ inputs.asTsString }}\n}{% if billTo %}, {\n billTo: "{{ billTo }}",\n}{% endif %});\n\nconsole.log(chatCompletion.choices[0].message);',
|
|
2252
|
-
"conversationalStream": 'import { InferenceClient } from "@huggingface/inference";\n\nconst client = new InferenceClient("{{ accessToken }}");\n\nlet out = "";\n\nconst stream =
|
|
2228
|
+
"conversationalStream": 'import { InferenceClient } from "@huggingface/inference";\n\nconst client = new InferenceClient("{{ accessToken }}");\n\nlet out = "";\n\nconst stream = client.chatCompletionStream({\n provider: "{{ provider }}",\n model: "{{ model.id }}",\n{{ inputs.asTsString }}\n}{% if billTo %}, {\n billTo: "{{ billTo }}",\n}{% endif %});\n\nfor await (const chunk of stream) {\n if (chunk.choices && chunk.choices.length > 0) {\n const newContent = chunk.choices[0].delta.content;\n out += newContent;\n console.log(newContent);\n }\n}',
|
|
2253
2229
|
"textToImage": `import { InferenceClient } from "@huggingface/inference";
|
|
2254
2230
|
|
|
2255
2231
|
const client = new InferenceClient("{{ accessToken }}");
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
import type { WidgetType } from "@huggingface/tasks";
|
|
2
2
|
import type { InferenceProvider, ModelId } from "../types";
|
|
3
|
-
export declare const inferenceProviderMappingCache: Map<string, Partial<Record<"black-forest-labs" | "cerebras" | "cohere" | "fal-ai" | "featherless-ai" | "fireworks-ai" | "groq" | "hf-inference" | "hyperbolic" | "nebius" | "novita" | "nscale" | "openai" | "replicate" | "sambanova" | "together", Omit<InferenceProviderModelMapping, "hfModelId"
|
|
4
|
-
export type InferenceProviderMapping = Partial<Record<InferenceProvider, Omit<InferenceProviderModelMapping, "hfModelId"
|
|
3
|
+
export declare const inferenceProviderMappingCache: Map<string, Partial<Record<"black-forest-labs" | "cerebras" | "cohere" | "fal-ai" | "featherless-ai" | "fireworks-ai" | "groq" | "hf-inference" | "hyperbolic" | "nebius" | "novita" | "nscale" | "openai" | "replicate" | "sambanova" | "together", Omit<InferenceProviderModelMapping, "hfModelId">>>>;
|
|
4
|
+
export type InferenceProviderMapping = Partial<Record<InferenceProvider, Omit<InferenceProviderModelMapping, "hfModelId">>>;
|
|
5
5
|
export interface InferenceProviderModelMapping {
|
|
6
6
|
adapter?: string;
|
|
7
7
|
adapterWeightsPath?: string;
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"getInferenceProviderMapping.d.ts","sourceRoot":"","sources":["../../../src/lib/getInferenceProviderMapping.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,UAAU,EAAE,MAAM,oBAAoB,CAAC;AACrD,OAAO,KAAK,EAAE,iBAAiB,EAAE,OAAO,EAAE,MAAM,UAAU,CAAC;AAM3D,eAAO,MAAM,6BAA6B,
|
|
1
|
+
{"version":3,"file":"getInferenceProviderMapping.d.ts","sourceRoot":"","sources":["../../../src/lib/getInferenceProviderMapping.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,UAAU,EAAE,MAAM,oBAAoB,CAAC;AACrD,OAAO,KAAK,EAAE,iBAAiB,EAAE,OAAO,EAAE,MAAM,UAAU,CAAC;AAM3D,eAAO,MAAM,6BAA6B,0SAA+C,CAAC;AAE1F,MAAM,MAAM,wBAAwB,GAAG,OAAO,CAC7C,MAAM,CAAC,iBAAiB,EAAE,IAAI,CAAC,6BAA6B,EAAE,WAAW,CAAC,CAAC,CAC3E,CAAC;AAEF,MAAM,WAAW,6BAA6B;IAC7C,OAAO,CAAC,EAAE,MAAM,CAAC;IACjB,kBAAkB,CAAC,EAAE,MAAM,CAAC;IAC5B,SAAS,EAAE,OAAO,CAAC;IACnB,UAAU,EAAE,MAAM,CAAC;IACnB,MAAM,EAAE,MAAM,GAAG,SAAS,CAAC;IAC3B,IAAI,EAAE,UAAU,CAAC;CACjB;AAED,wBAAsB,2BAA2B,CAChD,MAAM,EAAE;IACP,WAAW,CAAC,EAAE,MAAM,CAAC;IACrB,OAAO,EAAE,OAAO,CAAC;IACjB,QAAQ,EAAE,iBAAiB,CAAC;IAC5B,IAAI,EAAE,UAAU,CAAC;CACjB,EACD,OAAO,EAAE;IACR,KAAK,CAAC,EAAE,CAAC,KAAK,EAAE,WAAW,EAAE,IAAI,CAAC,EAAE,WAAW,KAAK,OAAO,CAAC,QAAQ,CAAC,CAAC;CACtE,GACC,OAAO,CAAC,6BAA6B,GAAG,IAAI,CAAC,CA+C/C"}
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"fal-ai.d.ts","sourceRoot":"","sources":["../../../src/providers/fal-ai.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;;;;;GAeG;AACH,OAAO,KAAK,EAAE,gCAAgC,EAAE,MAAM,oBAAoB,CAAC;AAG3E,OAAO,KAAK,EAAE,UAAU,EAAE,YAAY,EAAW,SAAS,EAAE,MAAM,UAAU,CAAC;AAG7E,OAAO,EACN,KAAK,oCAAoC,EACzC,kBAAkB,EAClB,KAAK,qBAAqB,EAC1B,KAAK,qBAAqB,EAC1B,MAAM,kBAAkB,CAAC;AAG1B,MAAM,WAAW,gBAAgB;IAChC,UAAU,EAAE,MAAM,CAAC;IACnB,MAAM,EAAE,MAAM,CAAC;IACf,YAAY,EAAE,MAAM,CAAC;CACrB;AAED,UAAU,sBAAsB;IAC/B,MAAM,EAAE,KAAK,CAAC;QACb,GAAG,EAAE,MAAM,CAAC;KACZ,CAAC,CAAC;CACH;AAYD,eAAO,MAAM,2BAA2B,UAA0D,CAAC;AAEnG,uBAAe,SAAU,SAAQ,kBAAkB;gBACtC,GAAG,CAAC,EAAE,MAAM;IAIxB,cAAc,CAAC,MAAM,EAAE,UAAU,GAAG,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC;IAG3D,SAAS,CAAC,MAAM,EAAE,SAAS,GAAG,MAAM;IAG3B,cAAc,CAAC,MAAM,EAAE,YAAY,EAAE,MAAM,EAAE,OAAO,GAAG,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC;CAUtF;AAMD,qBAAa,oBAAqB,SAAQ,SAAU,YAAW,qBAAqB;IAC1E,cAAc,CAAC,MAAM,EAAE,UAAU,GAAG,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC;
|
|
1
|
+
{"version":3,"file":"fal-ai.d.ts","sourceRoot":"","sources":["../../../src/providers/fal-ai.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;;;;;GAeG;AACH,OAAO,KAAK,EAAE,gCAAgC,EAAE,MAAM,oBAAoB,CAAC;AAG3E,OAAO,KAAK,EAAE,UAAU,EAAE,YAAY,EAAW,SAAS,EAAE,MAAM,UAAU,CAAC;AAG7E,OAAO,EACN,KAAK,oCAAoC,EACzC,kBAAkB,EAClB,KAAK,qBAAqB,EAC1B,KAAK,qBAAqB,EAC1B,MAAM,kBAAkB,CAAC;AAG1B,MAAM,WAAW,gBAAgB;IAChC,UAAU,EAAE,MAAM,CAAC;IACnB,MAAM,EAAE,MAAM,CAAC;IACf,YAAY,EAAE,MAAM,CAAC;CACrB;AAED,UAAU,sBAAsB;IAC/B,MAAM,EAAE,KAAK,CAAC;QACb,GAAG,EAAE,MAAM,CAAC;KACZ,CAAC,CAAC;CACH;AAYD,eAAO,MAAM,2BAA2B,UAA0D,CAAC;AAEnG,uBAAe,SAAU,SAAQ,kBAAkB;gBACtC,GAAG,CAAC,EAAE,MAAM;IAIxB,cAAc,CAAC,MAAM,EAAE,UAAU,GAAG,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC;IAG3D,SAAS,CAAC,MAAM,EAAE,SAAS,GAAG,MAAM;IAG3B,cAAc,CAAC,MAAM,EAAE,YAAY,EAAE,MAAM,EAAE,OAAO,GAAG,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC;CAUtF;AAMD,qBAAa,oBAAqB,SAAQ,SAAU,YAAW,qBAAqB;IAC1E,cAAc,CAAC,MAAM,EAAE,UAAU,GAAG,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC;IAuBrD,WAAW,CAAC,QAAQ,EAAE,sBAAsB,EAAE,UAAU,CAAC,EAAE,KAAK,GAAG,MAAM,GAAG,OAAO,CAAC,MAAM,GAAG,IAAI,CAAC;CAkBjH;AAED,qBAAa,oBAAqB,SAAQ,SAAU,YAAW,qBAAqB;;IAI1E,SAAS,CAAC,MAAM,EAAE,SAAS,GAAG,MAAM;IAMpC,cAAc,CAAC,MAAM,EAAE,UAAU,GAAG,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC;IAQrD,WAAW,CACzB,QAAQ,EAAE,gBAAgB,EAC1B,GAAG,CAAC,EAAE,MAAM,EACZ,OAAO,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,GAC9B,OAAO,CAAC,IAAI,CAAC;CA8DhB;AAED,qBAAa,mCAAoC,SAAQ,SAAU,YAAW,oCAAoC;IACxG,cAAc,CAAC,MAAM,EAAE,YAAY,EAAE,MAAM,EAAE,OAAO,GAAG,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC;IAKvE,WAAW,CAAC,QAAQ,EAAE,OAAO,GAAG,OAAO,CAAC,gCAAgC,CAAC;CASxF;AAED,qBAAa,qBAAsB,SAAQ,SAAS;IAC1C,cAAc,CAAC,MAAM,EAAE,UAAU,GAAG,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC;IAQrD,WAAW,CAAC,QAAQ,EAAE,OAAO,GAAG,OAAO,CAAC,IAAI,CAAC;CAqB5D"}
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@huggingface/inference",
|
|
3
|
-
"version": "3.9.
|
|
3
|
+
"version": "3.9.2",
|
|
4
4
|
"packageManager": "pnpm@8.10.5",
|
|
5
5
|
"license": "MIT",
|
|
6
6
|
"author": "Hugging Face and Tim Mikeladze <tim.mikeladze@gmail.com>",
|
|
@@ -40,8 +40,8 @@
|
|
|
40
40
|
},
|
|
41
41
|
"type": "module",
|
|
42
42
|
"dependencies": {
|
|
43
|
-
"@huggingface/
|
|
44
|
-
"@huggingface/
|
|
43
|
+
"@huggingface/tasks": "^0.18.11",
|
|
44
|
+
"@huggingface/jinja": "^0.3.4"
|
|
45
45
|
},
|
|
46
46
|
"devDependencies": {
|
|
47
47
|
"@types/node": "18.13.0"
|
|
@@ -8,7 +8,7 @@ import { typedInclude } from "../utils/typedInclude";
|
|
|
8
8
|
export const inferenceProviderMappingCache = new Map<ModelId, InferenceProviderMapping>();
|
|
9
9
|
|
|
10
10
|
export type InferenceProviderMapping = Partial<
|
|
11
|
-
Record<InferenceProvider, Omit<InferenceProviderModelMapping, "hfModelId"
|
|
11
|
+
Record<InferenceProvider, Omit<InferenceProviderModelMapping, "hfModelId">>
|
|
12
12
|
>;
|
|
13
13
|
|
|
14
14
|
export interface InferenceProviderModelMapping {
|
|
@@ -74,22 +74,6 @@ export async function getInferenceProviderMapping(
|
|
|
74
74
|
`Model ${params.modelId} is in staging mode for provider ${params.provider}. Meant for test purposes only.`
|
|
75
75
|
);
|
|
76
76
|
}
|
|
77
|
-
if (providerMapping.adapter === "lora") {
|
|
78
|
-
const treeResp = await (options?.fetch ?? fetch)(`${HF_HUB_URL}/api/models/${params.modelId}/tree/main`);
|
|
79
|
-
if (!treeResp.ok) {
|
|
80
|
-
throw new Error(`Unable to fetch the model tree for ${params.modelId}.`);
|
|
81
|
-
}
|
|
82
|
-
const tree: Array<{ type: "file" | "directory"; path: string }> = await treeResp.json();
|
|
83
|
-
const adapterWeightsPath = tree.find(({ type, path }) => type === "file" && path.endsWith(".safetensors"))?.path;
|
|
84
|
-
if (!adapterWeightsPath) {
|
|
85
|
-
throw new Error(`No .safetensors file found in the model tree for ${params.modelId}.`);
|
|
86
|
-
}
|
|
87
|
-
return {
|
|
88
|
-
...providerMapping,
|
|
89
|
-
hfModelId: params.modelId,
|
|
90
|
-
adapterWeightsPath,
|
|
91
|
-
};
|
|
92
|
-
}
|
|
93
77
|
return { ...providerMapping, hfModelId: params.modelId };
|
|
94
78
|
}
|
|
95
79
|
return null;
|
package/src/providers/fal-ai.ts
CHANGED
|
@@ -86,16 +86,6 @@ export class FalAITextToImageTask extends FalAITask implements TextToImageTaskHe
|
|
|
86
86
|
...(params.args.parameters as Record<string, unknown>),
|
|
87
87
|
sync_mode: true,
|
|
88
88
|
prompt: params.args.inputs,
|
|
89
|
-
...(params.mapping?.adapter === "lora" && params.mapping.adapterWeightsPath
|
|
90
|
-
? {
|
|
91
|
-
loras: [
|
|
92
|
-
{
|
|
93
|
-
path: buildLoraPath(params.mapping.hfModelId, params.mapping.adapterWeightsPath),
|
|
94
|
-
scale: 1,
|
|
95
|
-
},
|
|
96
|
-
],
|
|
97
|
-
}
|
|
98
|
-
: undefined),
|
|
99
89
|
};
|
|
100
90
|
|
|
101
91
|
if (params.mapping?.adapter === "lora" && params.mapping.adapterWeightsPath) {
|
|
@@ -14,7 +14,7 @@ export const templates: Record<string, Record<string, Record<string, string>>> =
|
|
|
14
14
|
"basicAudio": "import { InferenceClient } from \"@huggingface/inference\";\n\nconst client = new InferenceClient(\"{{ accessToken }}\");\n\nconst data = fs.readFileSync({{inputs.asObj.inputs}});\n\nconst output = await client.{{ methodName }}({\n\tdata,\n\tmodel: \"{{ model.id }}\",\n\tprovider: \"{{ provider }}\",\n}{% if billTo %}, {\n\tbillTo: \"{{ billTo }}\",\n}{% endif %});\n\nconsole.log(output);",
|
|
15
15
|
"basicImage": "import { InferenceClient } from \"@huggingface/inference\";\n\nconst client = new InferenceClient(\"{{ accessToken }}\");\n\nconst data = fs.readFileSync({{inputs.asObj.inputs}});\n\nconst output = await client.{{ methodName }}({\n\tdata,\n\tmodel: \"{{ model.id }}\",\n\tprovider: \"{{ provider }}\",\n}{% if billTo %}, {\n\tbillTo: \"{{ billTo }}\",\n}{% endif %});\n\nconsole.log(output);",
|
|
16
16
|
"conversational": "import { InferenceClient } from \"@huggingface/inference\";\n\nconst client = new InferenceClient(\"{{ accessToken }}\");\n\nconst chatCompletion = await client.chatCompletion({\n provider: \"{{ provider }}\",\n model: \"{{ model.id }}\",\n{{ inputs.asTsString }}\n}{% if billTo %}, {\n billTo: \"{{ billTo }}\",\n}{% endif %});\n\nconsole.log(chatCompletion.choices[0].message);",
|
|
17
|
-
"conversationalStream": "import { InferenceClient } from \"@huggingface/inference\";\n\nconst client = new InferenceClient(\"{{ accessToken }}\");\n\nlet out = \"\";\n\nconst stream =
|
|
17
|
+
"conversationalStream": "import { InferenceClient } from \"@huggingface/inference\";\n\nconst client = new InferenceClient(\"{{ accessToken }}\");\n\nlet out = \"\";\n\nconst stream = client.chatCompletionStream({\n provider: \"{{ provider }}\",\n model: \"{{ model.id }}\",\n{{ inputs.asTsString }}\n}{% if billTo %}, {\n billTo: \"{{ billTo }}\",\n}{% endif %});\n\nfor await (const chunk of stream) {\n\tif (chunk.choices && chunk.choices.length > 0) {\n\t\tconst newContent = chunk.choices[0].delta.content;\n\t\tout += newContent;\n\t\tconsole.log(newContent);\n\t}\n}",
|
|
18
18
|
"textToImage": "import { InferenceClient } from \"@huggingface/inference\";\n\nconst client = new InferenceClient(\"{{ accessToken }}\");\n\nconst image = await client.textToImage({\n provider: \"{{ provider }}\",\n model: \"{{ model.id }}\",\n\tinputs: {{ inputs.asObj.inputs }},\n\tparameters: { num_inference_steps: 5 },\n}{% if billTo %}, {\n billTo: \"{{ billTo }}\",\n}{% endif %});\n/// Use the generated image (it's a Blob)",
|
|
19
19
|
"textToVideo": "import { InferenceClient } from \"@huggingface/inference\";\n\nconst client = new InferenceClient(\"{{ accessToken }}\");\n\nconst image = await client.textToVideo({\n provider: \"{{ provider }}\",\n model: \"{{ model.id }}\",\n\tinputs: {{ inputs.asObj.inputs }},\n}{% if billTo %}, {\n billTo: \"{{ billTo }}\",\n}{% endif %});\n// Use the generated video (it's a Blob)"
|
|
20
20
|
},
|