@huggingface/inference 3.13.0 → 3.13.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.cjs +97 -83
- package/dist/index.js +97 -83
- package/dist/src/providers/fal-ai.d.ts +3 -17
- package/dist/src/providers/fal-ai.d.ts.map +1 -1
- package/dist/src/providers/hf-inference.d.ts +5 -1
- package/dist/src/providers/hf-inference.d.ts.map +1 -1
- package/dist/src/providers/providerHelper.d.ts +5 -1
- package/dist/src/providers/providerHelper.d.ts.map +1 -1
- package/dist/src/snippets/getInferenceSnippets.d.ts.map +1 -1
- package/dist/src/snippets/templates.exported.d.ts.map +1 -1
- package/dist/src/tasks/audio/automaticSpeechRecognition.d.ts.map +1 -1
- package/dist/src/tasks/cv/imageToImage.d.ts.map +1 -1
- package/package.json +3 -3
- package/src/providers/fal-ai.ts +26 -1
- package/src/providers/hf-inference.ts +31 -2
- package/src/providers/providerHelper.ts +5 -1
- package/src/snippets/getInferenceSnippets.ts +12 -2
- package/src/snippets/templates.exported.ts +3 -1
- package/src/tasks/audio/automaticSpeechRecognition.ts +2 -32
- package/src/tasks/cv/imageToImage.ts +3 -18
package/dist/index.cjs
CHANGED
|
@@ -235,6 +235,43 @@ var BaseTextGenerationTask = class extends TaskProviderHelper {
|
|
|
235
235
|
}
|
|
236
236
|
};
|
|
237
237
|
|
|
238
|
+
// src/utils/base64FromBytes.ts
|
|
239
|
+
function base64FromBytes(arr) {
|
|
240
|
+
if (globalThis.Buffer) {
|
|
241
|
+
return globalThis.Buffer.from(arr).toString("base64");
|
|
242
|
+
} else {
|
|
243
|
+
const bin = [];
|
|
244
|
+
arr.forEach((byte) => {
|
|
245
|
+
bin.push(String.fromCharCode(byte));
|
|
246
|
+
});
|
|
247
|
+
return globalThis.btoa(bin.join(""));
|
|
248
|
+
}
|
|
249
|
+
}
|
|
250
|
+
|
|
251
|
+
// src/utils/pick.ts
|
|
252
|
+
function pick(o, props) {
|
|
253
|
+
return Object.assign(
|
|
254
|
+
{},
|
|
255
|
+
...props.map((prop) => {
|
|
256
|
+
if (o[prop] !== void 0) {
|
|
257
|
+
return { [prop]: o[prop] };
|
|
258
|
+
}
|
|
259
|
+
})
|
|
260
|
+
);
|
|
261
|
+
}
|
|
262
|
+
|
|
263
|
+
// src/utils/typedInclude.ts
|
|
264
|
+
function typedInclude(arr, v) {
|
|
265
|
+
return arr.includes(v);
|
|
266
|
+
}
|
|
267
|
+
|
|
268
|
+
// src/utils/omit.ts
|
|
269
|
+
function omit(o, props) {
|
|
270
|
+
const propsArr = Array.isArray(props) ? props : [props];
|
|
271
|
+
const letsKeep = Object.keys(o).filter((prop) => !typedInclude(propsArr, prop));
|
|
272
|
+
return pick(o, letsKeep);
|
|
273
|
+
}
|
|
274
|
+
|
|
238
275
|
// src/providers/hf-inference.ts
|
|
239
276
|
var EQUIVALENT_SENTENCE_TRANSFORMERS_TASKS = ["feature-extraction", "sentence-similarity"];
|
|
240
277
|
var HFInferenceTask = class extends TaskProviderHelper {
|
|
@@ -342,6 +379,12 @@ var HFInferenceAutomaticSpeechRecognitionTask = class extends HFInferenceTask {
|
|
|
342
379
|
async getResponse(response) {
|
|
343
380
|
return response;
|
|
344
381
|
}
|
|
382
|
+
async preparePayloadAsync(args) {
|
|
383
|
+
return "data" in args ? args : {
|
|
384
|
+
...omit(args, "inputs"),
|
|
385
|
+
data: args.inputs
|
|
386
|
+
};
|
|
387
|
+
}
|
|
345
388
|
};
|
|
346
389
|
var HFInferenceAudioToAudioTask = class extends HFInferenceTask {
|
|
347
390
|
async getResponse(response) {
|
|
@@ -410,6 +453,22 @@ var HFInferenceImageToTextTask = class extends HFInferenceTask {
|
|
|
410
453
|
}
|
|
411
454
|
};
|
|
412
455
|
var HFInferenceImageToImageTask = class extends HFInferenceTask {
|
|
456
|
+
async preparePayloadAsync(args) {
|
|
457
|
+
if (!args.parameters) {
|
|
458
|
+
return {
|
|
459
|
+
...args,
|
|
460
|
+
model: args.model,
|
|
461
|
+
data: args.inputs
|
|
462
|
+
};
|
|
463
|
+
} else {
|
|
464
|
+
return {
|
|
465
|
+
...args,
|
|
466
|
+
inputs: base64FromBytes(
|
|
467
|
+
new Uint8Array(args.inputs instanceof ArrayBuffer ? args.inputs : await args.inputs.arrayBuffer())
|
|
468
|
+
)
|
|
469
|
+
};
|
|
470
|
+
}
|
|
471
|
+
}
|
|
413
472
|
async getResponse(response) {
|
|
414
473
|
if (response instanceof Blob) {
|
|
415
474
|
return response;
|
|
@@ -566,11 +625,6 @@ var HFInferenceTextToAudioTask = class extends HFInferenceTask {
|
|
|
566
625
|
}
|
|
567
626
|
};
|
|
568
627
|
|
|
569
|
-
// src/utils/typedInclude.ts
|
|
570
|
-
function typedInclude(arr, v) {
|
|
571
|
-
return arr.includes(v);
|
|
572
|
-
}
|
|
573
|
-
|
|
574
628
|
// src/lib/getInferenceProviderMapping.ts
|
|
575
629
|
var inferenceProviderMappingCache = /* @__PURE__ */ new Map();
|
|
576
630
|
async function fetchInferenceProviderMappingForModel(modelId, accessToken, options) {
|
|
@@ -656,25 +710,6 @@ function delay(ms) {
|
|
|
656
710
|
});
|
|
657
711
|
}
|
|
658
712
|
|
|
659
|
-
// src/utils/pick.ts
|
|
660
|
-
function pick(o, props) {
|
|
661
|
-
return Object.assign(
|
|
662
|
-
{},
|
|
663
|
-
...props.map((prop) => {
|
|
664
|
-
if (o[prop] !== void 0) {
|
|
665
|
-
return { [prop]: o[prop] };
|
|
666
|
-
}
|
|
667
|
-
})
|
|
668
|
-
);
|
|
669
|
-
}
|
|
670
|
-
|
|
671
|
-
// src/utils/omit.ts
|
|
672
|
-
function omit(o, props) {
|
|
673
|
-
const propsArr = Array.isArray(props) ? props : [props];
|
|
674
|
-
const letsKeep = Object.keys(o).filter((prop) => !typedInclude(propsArr, prop));
|
|
675
|
-
return pick(o, letsKeep);
|
|
676
|
-
}
|
|
677
|
-
|
|
678
713
|
// src/providers/black-forest-labs.ts
|
|
679
714
|
var BLACK_FOREST_LABS_AI_API_BASE_URL = "https://api.us1.bfl.ai";
|
|
680
715
|
var BlackForestLabsTextToImageTask = class extends TaskProviderHelper {
|
|
@@ -881,6 +916,27 @@ var FalAIAutomaticSpeechRecognitionTask = class extends FalAITask {
|
|
|
881
916
|
}
|
|
882
917
|
return { text: res.text };
|
|
883
918
|
}
|
|
919
|
+
async preparePayloadAsync(args) {
|
|
920
|
+
const blob = "data" in args && args.data instanceof Blob ? args.data : "inputs" in args ? args.inputs : void 0;
|
|
921
|
+
const contentType = blob?.type;
|
|
922
|
+
if (!contentType) {
|
|
923
|
+
throw new Error(
|
|
924
|
+
`Unable to determine the input's content-type. Make sure your are passing a Blob when using provider fal-ai.`
|
|
925
|
+
);
|
|
926
|
+
}
|
|
927
|
+
if (!FAL_AI_SUPPORTED_BLOB_TYPES.includes(contentType)) {
|
|
928
|
+
throw new Error(
|
|
929
|
+
`Provider fal-ai does not support blob type ${contentType} - supported content types are: ${FAL_AI_SUPPORTED_BLOB_TYPES.join(
|
|
930
|
+
", "
|
|
931
|
+
)}`
|
|
932
|
+
);
|
|
933
|
+
}
|
|
934
|
+
const base64audio = base64FromBytes(new Uint8Array(await blob.arrayBuffer()));
|
|
935
|
+
return {
|
|
936
|
+
..."data" in args ? omit(args, "data") : omit(args, "inputs"),
|
|
937
|
+
audio_url: `data:${contentType};base64,${base64audio}`
|
|
938
|
+
};
|
|
939
|
+
}
|
|
884
940
|
};
|
|
885
941
|
var FalAITextToSpeechTask = class extends FalAITask {
|
|
886
942
|
preparePayload(params) {
|
|
@@ -1489,7 +1545,7 @@ function getProviderHelper(provider, task) {
|
|
|
1489
1545
|
|
|
1490
1546
|
// package.json
|
|
1491
1547
|
var name = "@huggingface/inference";
|
|
1492
|
-
var version = "3.13.
|
|
1548
|
+
var version = "3.13.1";
|
|
1493
1549
|
|
|
1494
1550
|
// src/lib/makeRequestOptions.ts
|
|
1495
1551
|
var tasks = null;
|
|
@@ -1890,24 +1946,11 @@ async function audioToAudio(args, options) {
|
|
|
1890
1946
|
return providerHelper.getResponse(res);
|
|
1891
1947
|
}
|
|
1892
1948
|
|
|
1893
|
-
// src/utils/base64FromBytes.ts
|
|
1894
|
-
function base64FromBytes(arr) {
|
|
1895
|
-
if (globalThis.Buffer) {
|
|
1896
|
-
return globalThis.Buffer.from(arr).toString("base64");
|
|
1897
|
-
} else {
|
|
1898
|
-
const bin = [];
|
|
1899
|
-
arr.forEach((byte) => {
|
|
1900
|
-
bin.push(String.fromCharCode(byte));
|
|
1901
|
-
});
|
|
1902
|
-
return globalThis.btoa(bin.join(""));
|
|
1903
|
-
}
|
|
1904
|
-
}
|
|
1905
|
-
|
|
1906
1949
|
// src/tasks/audio/automaticSpeechRecognition.ts
|
|
1907
1950
|
async function automaticSpeechRecognition(args, options) {
|
|
1908
1951
|
const provider = await resolveProvider(args.provider, args.model, args.endpointUrl);
|
|
1909
1952
|
const providerHelper = getProviderHelper(provider, "automatic-speech-recognition");
|
|
1910
|
-
const payload = await
|
|
1953
|
+
const payload = await providerHelper.preparePayloadAsync(args);
|
|
1911
1954
|
const { data: res } = await innerRequest(payload, providerHelper, {
|
|
1912
1955
|
...options,
|
|
1913
1956
|
task: "automatic-speech-recognition"
|
|
@@ -1918,31 +1961,6 @@ async function automaticSpeechRecognition(args, options) {
|
|
|
1918
1961
|
}
|
|
1919
1962
|
return providerHelper.getResponse(res);
|
|
1920
1963
|
}
|
|
1921
|
-
async function buildPayload(args) {
|
|
1922
|
-
if (args.provider === "fal-ai") {
|
|
1923
|
-
const blob = "data" in args && args.data instanceof Blob ? args.data : "inputs" in args ? args.inputs : void 0;
|
|
1924
|
-
const contentType = blob?.type;
|
|
1925
|
-
if (!contentType) {
|
|
1926
|
-
throw new Error(
|
|
1927
|
-
`Unable to determine the input's content-type. Make sure your are passing a Blob when using provider fal-ai.`
|
|
1928
|
-
);
|
|
1929
|
-
}
|
|
1930
|
-
if (!FAL_AI_SUPPORTED_BLOB_TYPES.includes(contentType)) {
|
|
1931
|
-
throw new Error(
|
|
1932
|
-
`Provider fal-ai does not support blob type ${contentType} - supported content types are: ${FAL_AI_SUPPORTED_BLOB_TYPES.join(
|
|
1933
|
-
", "
|
|
1934
|
-
)}`
|
|
1935
|
-
);
|
|
1936
|
-
}
|
|
1937
|
-
const base64audio = base64FromBytes(new Uint8Array(await blob.arrayBuffer()));
|
|
1938
|
-
return {
|
|
1939
|
-
..."data" in args ? omit(args, "data") : omit(args, "inputs"),
|
|
1940
|
-
audio_url: `data:${contentType};base64,${base64audio}`
|
|
1941
|
-
};
|
|
1942
|
-
} else {
|
|
1943
|
-
return preparePayload(args);
|
|
1944
|
-
}
|
|
1945
|
-
}
|
|
1946
1964
|
|
|
1947
1965
|
// src/tasks/audio/textToSpeech.ts
|
|
1948
1966
|
async function textToSpeech(args, options) {
|
|
@@ -1988,22 +2006,8 @@ async function imageSegmentation(args, options) {
|
|
|
1988
2006
|
async function imageToImage(args, options) {
|
|
1989
2007
|
const provider = await resolveProvider(args.provider, args.model, args.endpointUrl);
|
|
1990
2008
|
const providerHelper = getProviderHelper(provider, "image-to-image");
|
|
1991
|
-
|
|
1992
|
-
|
|
1993
|
-
reqArgs = {
|
|
1994
|
-
accessToken: args.accessToken,
|
|
1995
|
-
model: args.model,
|
|
1996
|
-
data: args.inputs
|
|
1997
|
-
};
|
|
1998
|
-
} else {
|
|
1999
|
-
reqArgs = {
|
|
2000
|
-
...args,
|
|
2001
|
-
inputs: base64FromBytes(
|
|
2002
|
-
new Uint8Array(args.inputs instanceof ArrayBuffer ? args.inputs : await args.inputs.arrayBuffer())
|
|
2003
|
-
)
|
|
2004
|
-
};
|
|
2005
|
-
}
|
|
2006
|
-
const { data: res } = await innerRequest(reqArgs, providerHelper, {
|
|
2009
|
+
const payload = await providerHelper.preparePayloadAsync(args);
|
|
2010
|
+
const { data: res } = await innerRequest(payload, providerHelper, {
|
|
2007
2011
|
...options,
|
|
2008
2012
|
task: "image-to-image"
|
|
2009
2013
|
});
|
|
@@ -2468,7 +2472,7 @@ const video = await client.textToVideo({
|
|
|
2468
2472
|
"textToImage": '{% if provider == "fal-ai" %}\nimport fal_client\n\n{% if providerInputs.asObj.loras is defined and providerInputs.asObj.loras != none %}\nresult = fal_client.subscribe(\n "{{ providerModelId }}",\n arguments={\n "prompt": {{ inputs.asObj.inputs }},\n "loras":{{ providerInputs.asObj.loras | tojson }},\n },\n)\n{% else %}\nresult = fal_client.subscribe(\n "{{ providerModelId }}",\n arguments={\n "prompt": {{ inputs.asObj.inputs }},\n },\n)\n{% endif %} \nprint(result)\n{% endif %} '
|
|
2469
2473
|
},
|
|
2470
2474
|
"huggingface_hub": {
|
|
2471
|
-
"basic": 'result = client.{{ methodName }}(\n
|
|
2475
|
+
"basic": 'result = client.{{ methodName }}(\n {{ inputs.asObj.inputs }},\n model="{{ model.id }}",\n)',
|
|
2472
2476
|
"basicAudio": 'output = client.{{ methodName }}({{ inputs.asObj.inputs }}, model="{{ model.id }}")',
|
|
2473
2477
|
"basicImage": 'output = client.{{ methodName }}({{ inputs.asObj.inputs }}, model="{{ model.id }}")',
|
|
2474
2478
|
"conversational": 'completion = client.chat.completions.create(\n model="{{ model.id }}",\n{{ inputs.asPythonString }}\n)\n\nprint(completion.choices[0].message) ',
|
|
@@ -2476,6 +2480,8 @@ const video = await client.textToVideo({
|
|
|
2476
2480
|
"documentQuestionAnswering": 'output = client.document_question_answering(\n "{{ inputs.asObj.image }}",\n question="{{ inputs.asObj.question }}",\n model="{{ model.id }}",\n) ',
|
|
2477
2481
|
"imageToImage": '# output is a PIL.Image object\nimage = client.image_to_image(\n "{{ inputs.asObj.inputs }}",\n prompt="{{ inputs.asObj.parameters.prompt }}",\n model="{{ model.id }}",\n) ',
|
|
2478
2482
|
"importInferenceClient": 'from huggingface_hub import InferenceClient\n\nclient = InferenceClient(\n provider="{{ provider }}",\n api_key="{{ accessToken }}",\n{% if billTo %}\n bill_to="{{ billTo }}",\n{% endif %}\n)',
|
|
2483
|
+
"questionAnswering": 'answer = client.question_answering(\n question="{{ inputs.asObj.question }}",\n context="{{ inputs.asObj.context }}",\n model="{{ model.id }}",\n) ',
|
|
2484
|
+
"tableQuestionAnswering": 'answer = client.question_answering(\n query="{{ inputs.asObj.query }}",\n table={{ inputs.asObj.table }},\n model="{{ model.id }}",\n) ',
|
|
2479
2485
|
"textToImage": '# output is a PIL.Image object\nimage = client.text_to_image(\n {{ inputs.asObj.inputs }},\n model="{{ model.id }}",\n) ',
|
|
2480
2486
|
"textToSpeech": '# audio is returned as bytes\naudio = client.text_to_speech(\n {{ inputs.asObj.inputs }},\n model="{{ model.id }}",\n) \n',
|
|
2481
2487
|
"textToVideo": 'video = client.text_to_video(\n {{ inputs.asObj.inputs }},\n model="{{ model.id }}",\n) '
|
|
@@ -2729,6 +2735,14 @@ var prepareConversationalInput = (model, opts) => {
|
|
|
2729
2735
|
...opts?.top_p ? { top_p: opts?.top_p } : void 0
|
|
2730
2736
|
};
|
|
2731
2737
|
};
|
|
2738
|
+
var prepareQuestionAnsweringInput = (model) => {
|
|
2739
|
+
const data = JSON.parse((0, import_tasks.getModelInputSnippet)(model));
|
|
2740
|
+
return { question: data.question, context: data.context };
|
|
2741
|
+
};
|
|
2742
|
+
var prepareTableQuestionAnsweringInput = (model) => {
|
|
2743
|
+
const data = JSON.parse((0, import_tasks.getModelInputSnippet)(model));
|
|
2744
|
+
return { query: data.query, table: JSON.stringify(data.table) };
|
|
2745
|
+
};
|
|
2732
2746
|
var snippets = {
|
|
2733
2747
|
"audio-classification": snippetGenerator("basicAudio"),
|
|
2734
2748
|
"audio-to-audio": snippetGenerator("basicAudio"),
|
|
@@ -2742,12 +2756,12 @@ var snippets = {
|
|
|
2742
2756
|
"image-to-image": snippetGenerator("imageToImage", prepareImageToImageInput),
|
|
2743
2757
|
"image-to-text": snippetGenerator("basicImage"),
|
|
2744
2758
|
"object-detection": snippetGenerator("basicImage"),
|
|
2745
|
-
"question-answering": snippetGenerator("
|
|
2759
|
+
"question-answering": snippetGenerator("questionAnswering", prepareQuestionAnsweringInput),
|
|
2746
2760
|
"sentence-similarity": snippetGenerator("basic"),
|
|
2747
2761
|
summarization: snippetGenerator("basic"),
|
|
2748
2762
|
"tabular-classification": snippetGenerator("tabular"),
|
|
2749
2763
|
"tabular-regression": snippetGenerator("tabular"),
|
|
2750
|
-
"table-question-answering": snippetGenerator("
|
|
2764
|
+
"table-question-answering": snippetGenerator("tableQuestionAnswering", prepareTableQuestionAnsweringInput),
|
|
2751
2765
|
"text-classification": snippetGenerator("basic"),
|
|
2752
2766
|
"text-generation": snippetGenerator("basic"),
|
|
2753
2767
|
"text-to-audio": snippetGenerator("textToAudio"),
|
package/dist/index.js
CHANGED
|
@@ -177,6 +177,43 @@ var BaseTextGenerationTask = class extends TaskProviderHelper {
|
|
|
177
177
|
}
|
|
178
178
|
};
|
|
179
179
|
|
|
180
|
+
// src/utils/base64FromBytes.ts
|
|
181
|
+
function base64FromBytes(arr) {
|
|
182
|
+
if (globalThis.Buffer) {
|
|
183
|
+
return globalThis.Buffer.from(arr).toString("base64");
|
|
184
|
+
} else {
|
|
185
|
+
const bin = [];
|
|
186
|
+
arr.forEach((byte) => {
|
|
187
|
+
bin.push(String.fromCharCode(byte));
|
|
188
|
+
});
|
|
189
|
+
return globalThis.btoa(bin.join(""));
|
|
190
|
+
}
|
|
191
|
+
}
|
|
192
|
+
|
|
193
|
+
// src/utils/pick.ts
|
|
194
|
+
function pick(o, props) {
|
|
195
|
+
return Object.assign(
|
|
196
|
+
{},
|
|
197
|
+
...props.map((prop) => {
|
|
198
|
+
if (o[prop] !== void 0) {
|
|
199
|
+
return { [prop]: o[prop] };
|
|
200
|
+
}
|
|
201
|
+
})
|
|
202
|
+
);
|
|
203
|
+
}
|
|
204
|
+
|
|
205
|
+
// src/utils/typedInclude.ts
|
|
206
|
+
function typedInclude(arr, v) {
|
|
207
|
+
return arr.includes(v);
|
|
208
|
+
}
|
|
209
|
+
|
|
210
|
+
// src/utils/omit.ts
|
|
211
|
+
function omit(o, props) {
|
|
212
|
+
const propsArr = Array.isArray(props) ? props : [props];
|
|
213
|
+
const letsKeep = Object.keys(o).filter((prop) => !typedInclude(propsArr, prop));
|
|
214
|
+
return pick(o, letsKeep);
|
|
215
|
+
}
|
|
216
|
+
|
|
180
217
|
// src/providers/hf-inference.ts
|
|
181
218
|
var EQUIVALENT_SENTENCE_TRANSFORMERS_TASKS = ["feature-extraction", "sentence-similarity"];
|
|
182
219
|
var HFInferenceTask = class extends TaskProviderHelper {
|
|
@@ -284,6 +321,12 @@ var HFInferenceAutomaticSpeechRecognitionTask = class extends HFInferenceTask {
|
|
|
284
321
|
async getResponse(response) {
|
|
285
322
|
return response;
|
|
286
323
|
}
|
|
324
|
+
async preparePayloadAsync(args) {
|
|
325
|
+
return "data" in args ? args : {
|
|
326
|
+
...omit(args, "inputs"),
|
|
327
|
+
data: args.inputs
|
|
328
|
+
};
|
|
329
|
+
}
|
|
287
330
|
};
|
|
288
331
|
var HFInferenceAudioToAudioTask = class extends HFInferenceTask {
|
|
289
332
|
async getResponse(response) {
|
|
@@ -352,6 +395,22 @@ var HFInferenceImageToTextTask = class extends HFInferenceTask {
|
|
|
352
395
|
}
|
|
353
396
|
};
|
|
354
397
|
var HFInferenceImageToImageTask = class extends HFInferenceTask {
|
|
398
|
+
async preparePayloadAsync(args) {
|
|
399
|
+
if (!args.parameters) {
|
|
400
|
+
return {
|
|
401
|
+
...args,
|
|
402
|
+
model: args.model,
|
|
403
|
+
data: args.inputs
|
|
404
|
+
};
|
|
405
|
+
} else {
|
|
406
|
+
return {
|
|
407
|
+
...args,
|
|
408
|
+
inputs: base64FromBytes(
|
|
409
|
+
new Uint8Array(args.inputs instanceof ArrayBuffer ? args.inputs : await args.inputs.arrayBuffer())
|
|
410
|
+
)
|
|
411
|
+
};
|
|
412
|
+
}
|
|
413
|
+
}
|
|
355
414
|
async getResponse(response) {
|
|
356
415
|
if (response instanceof Blob) {
|
|
357
416
|
return response;
|
|
@@ -508,11 +567,6 @@ var HFInferenceTextToAudioTask = class extends HFInferenceTask {
|
|
|
508
567
|
}
|
|
509
568
|
};
|
|
510
569
|
|
|
511
|
-
// src/utils/typedInclude.ts
|
|
512
|
-
function typedInclude(arr, v) {
|
|
513
|
-
return arr.includes(v);
|
|
514
|
-
}
|
|
515
|
-
|
|
516
570
|
// src/lib/getInferenceProviderMapping.ts
|
|
517
571
|
var inferenceProviderMappingCache = /* @__PURE__ */ new Map();
|
|
518
572
|
async function fetchInferenceProviderMappingForModel(modelId, accessToken, options) {
|
|
@@ -598,25 +652,6 @@ function delay(ms) {
|
|
|
598
652
|
});
|
|
599
653
|
}
|
|
600
654
|
|
|
601
|
-
// src/utils/pick.ts
|
|
602
|
-
function pick(o, props) {
|
|
603
|
-
return Object.assign(
|
|
604
|
-
{},
|
|
605
|
-
...props.map((prop) => {
|
|
606
|
-
if (o[prop] !== void 0) {
|
|
607
|
-
return { [prop]: o[prop] };
|
|
608
|
-
}
|
|
609
|
-
})
|
|
610
|
-
);
|
|
611
|
-
}
|
|
612
|
-
|
|
613
|
-
// src/utils/omit.ts
|
|
614
|
-
function omit(o, props) {
|
|
615
|
-
const propsArr = Array.isArray(props) ? props : [props];
|
|
616
|
-
const letsKeep = Object.keys(o).filter((prop) => !typedInclude(propsArr, prop));
|
|
617
|
-
return pick(o, letsKeep);
|
|
618
|
-
}
|
|
619
|
-
|
|
620
655
|
// src/providers/black-forest-labs.ts
|
|
621
656
|
var BLACK_FOREST_LABS_AI_API_BASE_URL = "https://api.us1.bfl.ai";
|
|
622
657
|
var BlackForestLabsTextToImageTask = class extends TaskProviderHelper {
|
|
@@ -823,6 +858,27 @@ var FalAIAutomaticSpeechRecognitionTask = class extends FalAITask {
|
|
|
823
858
|
}
|
|
824
859
|
return { text: res.text };
|
|
825
860
|
}
|
|
861
|
+
async preparePayloadAsync(args) {
|
|
862
|
+
const blob = "data" in args && args.data instanceof Blob ? args.data : "inputs" in args ? args.inputs : void 0;
|
|
863
|
+
const contentType = blob?.type;
|
|
864
|
+
if (!contentType) {
|
|
865
|
+
throw new Error(
|
|
866
|
+
`Unable to determine the input's content-type. Make sure your are passing a Blob when using provider fal-ai.`
|
|
867
|
+
);
|
|
868
|
+
}
|
|
869
|
+
if (!FAL_AI_SUPPORTED_BLOB_TYPES.includes(contentType)) {
|
|
870
|
+
throw new Error(
|
|
871
|
+
`Provider fal-ai does not support blob type ${contentType} - supported content types are: ${FAL_AI_SUPPORTED_BLOB_TYPES.join(
|
|
872
|
+
", "
|
|
873
|
+
)}`
|
|
874
|
+
);
|
|
875
|
+
}
|
|
876
|
+
const base64audio = base64FromBytes(new Uint8Array(await blob.arrayBuffer()));
|
|
877
|
+
return {
|
|
878
|
+
..."data" in args ? omit(args, "data") : omit(args, "inputs"),
|
|
879
|
+
audio_url: `data:${contentType};base64,${base64audio}`
|
|
880
|
+
};
|
|
881
|
+
}
|
|
826
882
|
};
|
|
827
883
|
var FalAITextToSpeechTask = class extends FalAITask {
|
|
828
884
|
preparePayload(params) {
|
|
@@ -1431,7 +1487,7 @@ function getProviderHelper(provider, task) {
|
|
|
1431
1487
|
|
|
1432
1488
|
// package.json
|
|
1433
1489
|
var name = "@huggingface/inference";
|
|
1434
|
-
var version = "3.13.
|
|
1490
|
+
var version = "3.13.1";
|
|
1435
1491
|
|
|
1436
1492
|
// src/lib/makeRequestOptions.ts
|
|
1437
1493
|
var tasks = null;
|
|
@@ -1832,24 +1888,11 @@ async function audioToAudio(args, options) {
|
|
|
1832
1888
|
return providerHelper.getResponse(res);
|
|
1833
1889
|
}
|
|
1834
1890
|
|
|
1835
|
-
// src/utils/base64FromBytes.ts
|
|
1836
|
-
function base64FromBytes(arr) {
|
|
1837
|
-
if (globalThis.Buffer) {
|
|
1838
|
-
return globalThis.Buffer.from(arr).toString("base64");
|
|
1839
|
-
} else {
|
|
1840
|
-
const bin = [];
|
|
1841
|
-
arr.forEach((byte) => {
|
|
1842
|
-
bin.push(String.fromCharCode(byte));
|
|
1843
|
-
});
|
|
1844
|
-
return globalThis.btoa(bin.join(""));
|
|
1845
|
-
}
|
|
1846
|
-
}
|
|
1847
|
-
|
|
1848
1891
|
// src/tasks/audio/automaticSpeechRecognition.ts
|
|
1849
1892
|
async function automaticSpeechRecognition(args, options) {
|
|
1850
1893
|
const provider = await resolveProvider(args.provider, args.model, args.endpointUrl);
|
|
1851
1894
|
const providerHelper = getProviderHelper(provider, "automatic-speech-recognition");
|
|
1852
|
-
const payload = await
|
|
1895
|
+
const payload = await providerHelper.preparePayloadAsync(args);
|
|
1853
1896
|
const { data: res } = await innerRequest(payload, providerHelper, {
|
|
1854
1897
|
...options,
|
|
1855
1898
|
task: "automatic-speech-recognition"
|
|
@@ -1860,31 +1903,6 @@ async function automaticSpeechRecognition(args, options) {
|
|
|
1860
1903
|
}
|
|
1861
1904
|
return providerHelper.getResponse(res);
|
|
1862
1905
|
}
|
|
1863
|
-
async function buildPayload(args) {
|
|
1864
|
-
if (args.provider === "fal-ai") {
|
|
1865
|
-
const blob = "data" in args && args.data instanceof Blob ? args.data : "inputs" in args ? args.inputs : void 0;
|
|
1866
|
-
const contentType = blob?.type;
|
|
1867
|
-
if (!contentType) {
|
|
1868
|
-
throw new Error(
|
|
1869
|
-
`Unable to determine the input's content-type. Make sure your are passing a Blob when using provider fal-ai.`
|
|
1870
|
-
);
|
|
1871
|
-
}
|
|
1872
|
-
if (!FAL_AI_SUPPORTED_BLOB_TYPES.includes(contentType)) {
|
|
1873
|
-
throw new Error(
|
|
1874
|
-
`Provider fal-ai does not support blob type ${contentType} - supported content types are: ${FAL_AI_SUPPORTED_BLOB_TYPES.join(
|
|
1875
|
-
", "
|
|
1876
|
-
)}`
|
|
1877
|
-
);
|
|
1878
|
-
}
|
|
1879
|
-
const base64audio = base64FromBytes(new Uint8Array(await blob.arrayBuffer()));
|
|
1880
|
-
return {
|
|
1881
|
-
..."data" in args ? omit(args, "data") : omit(args, "inputs"),
|
|
1882
|
-
audio_url: `data:${contentType};base64,${base64audio}`
|
|
1883
|
-
};
|
|
1884
|
-
} else {
|
|
1885
|
-
return preparePayload(args);
|
|
1886
|
-
}
|
|
1887
|
-
}
|
|
1888
1906
|
|
|
1889
1907
|
// src/tasks/audio/textToSpeech.ts
|
|
1890
1908
|
async function textToSpeech(args, options) {
|
|
@@ -1930,22 +1948,8 @@ async function imageSegmentation(args, options) {
|
|
|
1930
1948
|
async function imageToImage(args, options) {
|
|
1931
1949
|
const provider = await resolveProvider(args.provider, args.model, args.endpointUrl);
|
|
1932
1950
|
const providerHelper = getProviderHelper(provider, "image-to-image");
|
|
1933
|
-
|
|
1934
|
-
|
|
1935
|
-
reqArgs = {
|
|
1936
|
-
accessToken: args.accessToken,
|
|
1937
|
-
model: args.model,
|
|
1938
|
-
data: args.inputs
|
|
1939
|
-
};
|
|
1940
|
-
} else {
|
|
1941
|
-
reqArgs = {
|
|
1942
|
-
...args,
|
|
1943
|
-
inputs: base64FromBytes(
|
|
1944
|
-
new Uint8Array(args.inputs instanceof ArrayBuffer ? args.inputs : await args.inputs.arrayBuffer())
|
|
1945
|
-
)
|
|
1946
|
-
};
|
|
1947
|
-
}
|
|
1948
|
-
const { data: res } = await innerRequest(reqArgs, providerHelper, {
|
|
1951
|
+
const payload = await providerHelper.preparePayloadAsync(args);
|
|
1952
|
+
const { data: res } = await innerRequest(payload, providerHelper, {
|
|
1949
1953
|
...options,
|
|
1950
1954
|
task: "image-to-image"
|
|
1951
1955
|
});
|
|
@@ -2413,7 +2417,7 @@ const video = await client.textToVideo({
|
|
|
2413
2417
|
"textToImage": '{% if provider == "fal-ai" %}\nimport fal_client\n\n{% if providerInputs.asObj.loras is defined and providerInputs.asObj.loras != none %}\nresult = fal_client.subscribe(\n "{{ providerModelId }}",\n arguments={\n "prompt": {{ inputs.asObj.inputs }},\n "loras":{{ providerInputs.asObj.loras | tojson }},\n },\n)\n{% else %}\nresult = fal_client.subscribe(\n "{{ providerModelId }}",\n arguments={\n "prompt": {{ inputs.asObj.inputs }},\n },\n)\n{% endif %} \nprint(result)\n{% endif %} '
|
|
2414
2418
|
},
|
|
2415
2419
|
"huggingface_hub": {
|
|
2416
|
-
"basic": 'result = client.{{ methodName }}(\n
|
|
2420
|
+
"basic": 'result = client.{{ methodName }}(\n {{ inputs.asObj.inputs }},\n model="{{ model.id }}",\n)',
|
|
2417
2421
|
"basicAudio": 'output = client.{{ methodName }}({{ inputs.asObj.inputs }}, model="{{ model.id }}")',
|
|
2418
2422
|
"basicImage": 'output = client.{{ methodName }}({{ inputs.asObj.inputs }}, model="{{ model.id }}")',
|
|
2419
2423
|
"conversational": 'completion = client.chat.completions.create(\n model="{{ model.id }}",\n{{ inputs.asPythonString }}\n)\n\nprint(completion.choices[0].message) ',
|
|
@@ -2421,6 +2425,8 @@ const video = await client.textToVideo({
|
|
|
2421
2425
|
"documentQuestionAnswering": 'output = client.document_question_answering(\n "{{ inputs.asObj.image }}",\n question="{{ inputs.asObj.question }}",\n model="{{ model.id }}",\n) ',
|
|
2422
2426
|
"imageToImage": '# output is a PIL.Image object\nimage = client.image_to_image(\n "{{ inputs.asObj.inputs }}",\n prompt="{{ inputs.asObj.parameters.prompt }}",\n model="{{ model.id }}",\n) ',
|
|
2423
2427
|
"importInferenceClient": 'from huggingface_hub import InferenceClient\n\nclient = InferenceClient(\n provider="{{ provider }}",\n api_key="{{ accessToken }}",\n{% if billTo %}\n bill_to="{{ billTo }}",\n{% endif %}\n)',
|
|
2428
|
+
"questionAnswering": 'answer = client.question_answering(\n question="{{ inputs.asObj.question }}",\n context="{{ inputs.asObj.context }}",\n model="{{ model.id }}",\n) ',
|
|
2429
|
+
"tableQuestionAnswering": 'answer = client.question_answering(\n query="{{ inputs.asObj.query }}",\n table={{ inputs.asObj.table }},\n model="{{ model.id }}",\n) ',
|
|
2424
2430
|
"textToImage": '# output is a PIL.Image object\nimage = client.text_to_image(\n {{ inputs.asObj.inputs }},\n model="{{ model.id }}",\n) ',
|
|
2425
2431
|
"textToSpeech": '# audio is returned as bytes\naudio = client.text_to_speech(\n {{ inputs.asObj.inputs }},\n model="{{ model.id }}",\n) \n',
|
|
2426
2432
|
"textToVideo": 'video = client.text_to_video(\n {{ inputs.asObj.inputs }},\n model="{{ model.id }}",\n) '
|
|
@@ -2674,6 +2680,14 @@ var prepareConversationalInput = (model, opts) => {
|
|
|
2674
2680
|
...opts?.top_p ? { top_p: opts?.top_p } : void 0
|
|
2675
2681
|
};
|
|
2676
2682
|
};
|
|
2683
|
+
var prepareQuestionAnsweringInput = (model) => {
|
|
2684
|
+
const data = JSON.parse(getModelInputSnippet(model));
|
|
2685
|
+
return { question: data.question, context: data.context };
|
|
2686
|
+
};
|
|
2687
|
+
var prepareTableQuestionAnsweringInput = (model) => {
|
|
2688
|
+
const data = JSON.parse(getModelInputSnippet(model));
|
|
2689
|
+
return { query: data.query, table: JSON.stringify(data.table) };
|
|
2690
|
+
};
|
|
2677
2691
|
var snippets = {
|
|
2678
2692
|
"audio-classification": snippetGenerator("basicAudio"),
|
|
2679
2693
|
"audio-to-audio": snippetGenerator("basicAudio"),
|
|
@@ -2687,12 +2701,12 @@ var snippets = {
|
|
|
2687
2701
|
"image-to-image": snippetGenerator("imageToImage", prepareImageToImageInput),
|
|
2688
2702
|
"image-to-text": snippetGenerator("basicImage"),
|
|
2689
2703
|
"object-detection": snippetGenerator("basicImage"),
|
|
2690
|
-
"question-answering": snippetGenerator("
|
|
2704
|
+
"question-answering": snippetGenerator("questionAnswering", prepareQuestionAnsweringInput),
|
|
2691
2705
|
"sentence-similarity": snippetGenerator("basic"),
|
|
2692
2706
|
summarization: snippetGenerator("basic"),
|
|
2693
2707
|
"tabular-classification": snippetGenerator("tabular"),
|
|
2694
2708
|
"tabular-regression": snippetGenerator("tabular"),
|
|
2695
|
-
"table-question-answering": snippetGenerator("
|
|
2709
|
+
"table-question-answering": snippetGenerator("tableQuestionAnswering", prepareTableQuestionAnsweringInput),
|
|
2696
2710
|
"text-classification": snippetGenerator("basic"),
|
|
2697
2711
|
"text-generation": snippetGenerator("basic"),
|
|
2698
2712
|
"text-to-audio": snippetGenerator("textToAudio"),
|
|
@@ -1,22 +1,7 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* See the registered mapping of HF model ID => Fal model ID here:
|
|
3
|
-
*
|
|
4
|
-
* https://huggingface.co/api/partners/fal-ai/models
|
|
5
|
-
*
|
|
6
|
-
* This is a publicly available mapping.
|
|
7
|
-
*
|
|
8
|
-
* If you want to try to run inference for a new model locally before it's registered on huggingface.co,
|
|
9
|
-
* you can add it to the dictionary "HARDCODED_MODEL_ID_MAPPING" in consts.ts, for dev purposes.
|
|
10
|
-
*
|
|
11
|
-
* - If you work at Fal and want to update this mapping, please use the model mapping API we provide on huggingface.co
|
|
12
|
-
* - If you're a community member and want to add a new supported HF model to Fal, please open an issue on the present repo
|
|
13
|
-
* and we will tag Fal team members.
|
|
14
|
-
*
|
|
15
|
-
* Thanks!
|
|
16
|
-
*/
|
|
17
1
|
import type { AutomaticSpeechRecognitionOutput } from "@huggingface/tasks";
|
|
18
|
-
import type { BodyParams, HeaderParams, UrlParams } from "../types";
|
|
2
|
+
import type { BodyParams, HeaderParams, RequestArgs, UrlParams } from "../types";
|
|
19
3
|
import { type AutomaticSpeechRecognitionTaskHelper, TaskProviderHelper, type TextToImageTaskHelper, type TextToVideoTaskHelper } from "./providerHelper";
|
|
4
|
+
import type { AutomaticSpeechRecognitionArgs } from "../tasks/audio/automaticSpeechRecognition";
|
|
20
5
|
export interface FalAiQueueOutput {
|
|
21
6
|
request_id: string;
|
|
22
7
|
status: string;
|
|
@@ -47,6 +32,7 @@ export declare class FalAITextToVideoTask extends FalAITask implements TextToVid
|
|
|
47
32
|
export declare class FalAIAutomaticSpeechRecognitionTask extends FalAITask implements AutomaticSpeechRecognitionTaskHelper {
|
|
48
33
|
prepareHeaders(params: HeaderParams, binary: boolean): Record<string, string>;
|
|
49
34
|
getResponse(response: unknown): Promise<AutomaticSpeechRecognitionOutput>;
|
|
35
|
+
preparePayloadAsync(args: AutomaticSpeechRecognitionArgs): Promise<RequestArgs>;
|
|
50
36
|
}
|
|
51
37
|
export declare class FalAITextToSpeechTask extends FalAITask {
|
|
52
38
|
preparePayload(params: BodyParams): Record<string, unknown>;
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"fal-ai.d.ts","sourceRoot":"","sources":["../../../src/providers/fal-ai.ts"],"names":[],"mappings":"
|
|
1
|
+
{"version":3,"file":"fal-ai.d.ts","sourceRoot":"","sources":["../../../src/providers/fal-ai.ts"],"names":[],"mappings":"AAkBA,OAAO,KAAK,EAAE,gCAAgC,EAAE,MAAM,oBAAoB,CAAC;AAG3E,OAAO,KAAK,EAAE,UAAU,EAAE,YAAY,EAAW,WAAW,EAAE,SAAS,EAAE,MAAM,UAAU,CAAC;AAG1F,OAAO,EACN,KAAK,oCAAoC,EACzC,kBAAkB,EAClB,KAAK,qBAAqB,EAC1B,KAAK,qBAAqB,EAC1B,MAAM,kBAAkB,CAAC;AAE1B,OAAO,KAAK,EAAE,8BAA8B,EAAE,MAAM,2CAA2C,CAAC;AAEhG,MAAM,WAAW,gBAAgB;IAChC,UAAU,EAAE,MAAM,CAAC;IACnB,MAAM,EAAE,MAAM,CAAC;IACf,YAAY,EAAE,MAAM,CAAC;CACrB;AAED,UAAU,sBAAsB;IAC/B,MAAM,EAAE,KAAK,CAAC;QACb,GAAG,EAAE,MAAM,CAAC;KACZ,CAAC,CAAC;CACH;AAYD,eAAO,MAAM,2BAA2B,UAA0D,CAAC;AAEnG,uBAAe,SAAU,SAAQ,kBAAkB;gBACtC,GAAG,CAAC,EAAE,MAAM;IAIxB,cAAc,CAAC,MAAM,EAAE,UAAU,GAAG,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC;IAG3D,SAAS,CAAC,MAAM,EAAE,SAAS,GAAG,MAAM;IAG3B,cAAc,CAAC,MAAM,EAAE,YAAY,EAAE,MAAM,EAAE,OAAO,GAAG,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC;CAUtF;AAMD,qBAAa,oBAAqB,SAAQ,SAAU,YAAW,qBAAqB;IAC1E,cAAc,CAAC,MAAM,EAAE,UAAU,GAAG,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC;IAuBrD,WAAW,CAAC,QAAQ,EAAE,sBAAsB,EAAE,UAAU,CAAC,EAAE,KAAK,GAAG,MAAM,GAAG,OAAO,CAAC,MAAM,GAAG,IAAI,CAAC;CAkBjH;AAED,qBAAa,oBAAqB,SAAQ,SAAU,YAAW,qBAAqB;;IAI1E,SAAS,CAAC,MAAM,EAAE,SAAS,GAAG,MAAM;IAMpC,cAAc,CAAC,MAAM,EAAE,UAAU,GAAG,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC;IAQrD,WAAW,CACzB,QAAQ,EAAE,gBAAgB,EAC1B,GAAG,CAAC,EAAE,MAAM,EACZ,OAAO,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,GAC9B,OAAO,CAAC,IAAI,CAAC;CA8DhB;AAED,qBAAa,mCAAoC,SAAQ,SAAU,YAAW,oCAAoC;IACxG,cAAc,CAAC,MAAM,EAAE,YAAY,EAAE,MAAM,EAAE,OAAO,GAAG,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC;IAKvE,WAAW,CAAC,QAAQ,EAAE,OAAO,GAAG,OAAO,CAAC,gCAAgC,CAAC;IAUlF,mBAAmB,CAAC,IAAI,EAAE,8BAA8B,GAAG,OAAO,CAAC,WAAW,CAAC;CAqBrF;AAED,qBAAa,qBAAsB,SAAQ,SAAS;IAC1C,cAAc,CAAC,MAAM,EAAE,UAAU,GAAG,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC;IAQrD,WAAW,CAAC,QAAQ,EAAE,OAAO,GAAG,OAAO,CAAC,IAAI,CAAC;CAqB5D"}
|
|
@@ -12,9 +12,11 @@
|
|
|
12
12
|
*/
|
|
13
13
|
import type { AudioClassificationOutput, AutomaticSpeechRecognitionOutput, ChatCompletionOutput, DocumentQuestionAnsweringOutput, FeatureExtractionOutput, FillMaskOutput, ImageClassificationOutput, ImageSegmentationOutput, ImageToTextOutput, ObjectDetectionOutput, QuestionAnsweringOutput, SentenceSimilarityOutput, SummarizationOutput, TableQuestionAnsweringOutput, TextClassificationOutput, TextGenerationOutput, TokenClassificationOutput, TranslationOutput, VisualQuestionAnsweringOutput, ZeroShotClassificationOutput, ZeroShotImageClassificationOutput } from "@huggingface/tasks";
|
|
14
14
|
import type { TabularClassificationOutput } from "../tasks/tabular/tabularClassification";
|
|
15
|
-
import type { BodyParams, UrlParams } from "../types";
|
|
15
|
+
import type { BodyParams, RequestArgs, UrlParams } from "../types";
|
|
16
16
|
import type { AudioClassificationTaskHelper, AudioToAudioTaskHelper, AutomaticSpeechRecognitionTaskHelper, ConversationalTaskHelper, DocumentQuestionAnsweringTaskHelper, FeatureExtractionTaskHelper, FillMaskTaskHelper, ImageClassificationTaskHelper, ImageSegmentationTaskHelper, ImageToImageTaskHelper, ImageToTextTaskHelper, ObjectDetectionTaskHelper, QuestionAnsweringTaskHelper, SentenceSimilarityTaskHelper, SummarizationTaskHelper, TableQuestionAnsweringTaskHelper, TabularClassificationTaskHelper, TabularRegressionTaskHelper, TextClassificationTaskHelper, TextGenerationTaskHelper, TextToAudioTaskHelper, TextToImageTaskHelper, TextToSpeechTaskHelper, TokenClassificationTaskHelper, TranslationTaskHelper, VisualQuestionAnsweringTaskHelper, ZeroShotClassificationTaskHelper, ZeroShotImageClassificationTaskHelper } from "./providerHelper";
|
|
17
17
|
import { TaskProviderHelper } from "./providerHelper";
|
|
18
|
+
import type { ImageToImageArgs } from "../tasks/cv/imageToImage";
|
|
19
|
+
import type { AutomaticSpeechRecognitionArgs } from "../tasks/audio/automaticSpeechRecognition";
|
|
18
20
|
interface Base64ImageGeneration {
|
|
19
21
|
data: Array<{
|
|
20
22
|
b64_json: string;
|
|
@@ -52,6 +54,7 @@ export declare class HFInferenceAudioClassificationTask extends HFInferenceTask
|
|
|
52
54
|
}
|
|
53
55
|
export declare class HFInferenceAutomaticSpeechRecognitionTask extends HFInferenceTask implements AutomaticSpeechRecognitionTaskHelper {
|
|
54
56
|
getResponse(response: AutomaticSpeechRecognitionOutput): Promise<AutomaticSpeechRecognitionOutput>;
|
|
57
|
+
preparePayloadAsync(args: AutomaticSpeechRecognitionArgs): Promise<RequestArgs>;
|
|
55
58
|
}
|
|
56
59
|
export declare class HFInferenceAudioToAudioTask extends HFInferenceTask implements AudioToAudioTaskHelper {
|
|
57
60
|
getResponse(response: AudioToAudioOutput[]): Promise<AudioToAudioOutput[]>;
|
|
@@ -72,6 +75,7 @@ export declare class HFInferenceImageToTextTask extends HFInferenceTask implemen
|
|
|
72
75
|
getResponse(response: ImageToTextOutput): Promise<ImageToTextOutput>;
|
|
73
76
|
}
|
|
74
77
|
export declare class HFInferenceImageToImageTask extends HFInferenceTask implements ImageToImageTaskHelper {
|
|
78
|
+
preparePayloadAsync(args: ImageToImageArgs): Promise<RequestArgs>;
|
|
75
79
|
getResponse(response: Blob): Promise<Blob>;
|
|
76
80
|
}
|
|
77
81
|
export declare class HFInferenceObjectDetectionTask extends HFInferenceTask implements ObjectDetectionTaskHelper {
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"hf-inference.d.ts","sourceRoot":"","sources":["../../../src/providers/hf-inference.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;GAWG;AACH,OAAO,KAAK,EACX,yBAAyB,EACzB,gCAAgC,EAChC,oBAAoB,EACpB,+BAA+B,EAC/B,uBAAuB,EACvB,cAAc,EACd,yBAAyB,EACzB,uBAAuB,EACvB,iBAAiB,EACjB,qBAAqB,EACrB,uBAAuB,EACvB,wBAAwB,EACxB,mBAAmB,EACnB,4BAA4B,EAC5B,wBAAwB,EACxB,oBAAoB,EACpB,yBAAyB,EACzB,iBAAiB,EACjB,6BAA6B,EAC7B,4BAA4B,EAC5B,iCAAiC,EACjC,MAAM,oBAAoB,CAAC;AAG5B,OAAO,KAAK,EAAE,2BAA2B,EAAE,MAAM,wCAAwC,CAAC;AAC1F,OAAO,KAAK,EAAE,UAAU,EAAE,SAAS,EAAE,MAAM,UAAU,CAAC;
|
|
1
|
+
{"version":3,"file":"hf-inference.d.ts","sourceRoot":"","sources":["../../../src/providers/hf-inference.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;GAWG;AACH,OAAO,KAAK,EACX,yBAAyB,EACzB,gCAAgC,EAChC,oBAAoB,EACpB,+BAA+B,EAC/B,uBAAuB,EACvB,cAAc,EACd,yBAAyB,EACzB,uBAAuB,EACvB,iBAAiB,EACjB,qBAAqB,EACrB,uBAAuB,EACvB,wBAAwB,EACxB,mBAAmB,EACnB,4BAA4B,EAC5B,wBAAwB,EACxB,oBAAoB,EACpB,yBAAyB,EACzB,iBAAiB,EACjB,6BAA6B,EAC7B,4BAA4B,EAC5B,iCAAiC,EACjC,MAAM,oBAAoB,CAAC;AAG5B,OAAO,KAAK,EAAE,2BAA2B,EAAE,MAAM,wCAAwC,CAAC;AAC1F,OAAO,KAAK,EAAE,UAAU,EAAE,WAAW,EAAE,SAAS,EAAE,MAAM,UAAU,CAAC;AAEnE,OAAO,KAAK,EACX,6BAA6B,EAC7B,sBAAsB,EACtB,oCAAoC,EACpC,wBAAwB,EACxB,mCAAmC,EACnC,2BAA2B,EAC3B,kBAAkB,EAClB,6BAA6B,EAC7B,2BAA2B,EAC3B,sBAAsB,EACtB,qBAAqB,EACrB,yBAAyB,EACzB,2BAA2B,EAC3B,4BAA4B,EAC5B,uBAAuB,EACvB,gCAAgC,EAChC,+BAA+B,EAC/B,2BAA2B,EAC3B,4BAA4B,EAC5B,wBAAwB,EACxB,qBAAqB,EACrB,qBAAqB,EACrB,sBAAsB,EACtB,6BAA6B,EAC7B,qBAAqB,EACrB,iCAAiC,EACjC,gCAAgC,EAChC,qCAAqC,EACrC,MAAM,kBAAkB,CAAC;AAE1B,OAAO,EAAE,kBAAkB,EAAE,MAAM,kBAAkB,CAAC;AAEtD,OAAO,KAAK,EAAE,gBAAgB,EAAE,MAAM,0BAA0B,CAAC;AACjE,OAAO,KAAK,EAAE,8BAA8B,EAAE,MAAM,2CAA2C,CAAC;AAEhG,UAAU,qBAAqB;IAC9B,IAAI,EAAE,KAAK,CAAC;QACX,QAAQ,EAAE,MAAM,CAAC;KACjB,CAAC,CAAC;CACH;AAED,UAAU,wBAAwB;IACjC,MAAM,EAAE,MAAM,EAAE,CAAC;CACjB;AAED,UAAU,kBAAkB;IAC3B,IAAI,EAAE,MAAM,CAAC;IACb,cAAc,EAAE,MAAM,CAAC;IACvB,KAAK,EAAE,MAAM,CAAC;CACd;AAED,eAAO,MAAM,sCAAsC,wDAAyD,CAAC;AAE7G,qBAAa,eAAgB,SAAQ,kBAAkB;;IAItD,cAAc,CAAC,MAAM,EAAE,UAAU,GAAG,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC;IAGlD,OAAO,CAAC,MAAM,EAAE,SAAS,GAAG,MAAM;IAO3C,SAAS,CAAC,MAAM,EAAE,SAAS,GAAG,MAAM;IAQrB,WAAW,CAAC,QAAQ,EAAE,OAAO,GAAG,OAAO,CAAC,OAAO,CAAC;CAG/D;AAED,qBAAa,0BAA2B,SAAQ,eAAgB,YAAW,qBAAqB;IAChF,WAAW,CACzB,QAAQ,EAAE,qBAAqB,GAAG,wBAAwB,EAC1D,GAAG,CAAC,EAAE,MAAM,EACZ,OAAO,CAAC,EAAE,WAAW,EACrB,UAAU,CAAC,EAAE,KAAK,GAAG,MAAM,GACzB,OAAO,CAAC,MAAM,GAAG,IAAI,CAAC;CA+BzB;AAED,qBAAa,6BAA8B,SAAQ,eAAgB,YAAW,wBAAwB;IAC5F,OAAO,CAAC,MAAM,EAAE,SAAS,GAAG,MAAM;IAkBlC,cAAc,CAAC,MAAM,EAAE,UAAU,GAAG,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC;IAOrD,WAAW,CAAC,QAAQ,EAAE,oBAAoB,GAAG,OAAO,CAAC,oBAAoB,CAAC;CAGzF;AAED,qBAAa,6BAA8B,SAAQ,eAAgB,YAAW,wBAAwB;IACtF,WAAW,CAAC,QAAQ,EAAE,oBAAoB,GAAG,oBAAoB,EAAE,GAAG,OAAO,CAAC,oBAAoB,CAAC;CAOlH;AAED,qBAAa,kCAAmC,SAAQ,eAAgB,YAAW,6BAA6B;IAChG,WAAW,CAAC,QAAQ,EAAE,OAAO,GAAG,OAAO,CAAC,yBAAyB,CAAC;CAejF;AAED,qBAAa,yCACZ,SAAQ,eACR,YAAW,oCAAoC;IAEhC,WAAW,CAAC,QAAQ,EAAE,gCAAgC,GAAG,OAAO,CAAC,gCAAgC,CAAC;IAI3G,mBAAmB,CAAC,IAAI,EAAE,8BAA8B,GAAG,OAAO,CAAC,WAAW,CAAC;CAQrF;AAED,qBAAa,2BAA4B,SAAQ,eAAgB,YAAW,sBAAsB;IAClF,WAAW,CAAC,QAAQ,EAAE,kBAAkB,EAAE,GAAG,OAAO,CAAC,kBAAkB,EAAE,CAAC;CAsBzF;AAED,qBAAa,wCACZ,SAAQ,eACR,YAAW,mCAAmC;IAE/B,WAAW,CACzB,QAAQ,EAAE,+BAA+B,GACvC,OAAO,CAAC,+BAA+B,CAAC,MAAM,CAAC,CAAC;CAiBnD;AAED,qBAAa,gCAAiC,SAAQ,eAAgB,YAAW,2BAA2B;IAC5F,WAAW,CAAC,QAAQ,EAAE,uBAAuB,GAAG,OAAO,CAAC,uBAAuB,CAAC;CAc/F;AAED,qBAAa,kCAAmC,SAAQ,eAAgB,YAAW,6BAA6B;IAChG,WAAW,CAAC,QAAQ,EAAE,yBAAyB,GAAG,OAAO,CAAC,yBAAyB,CAAC;CAMnG;AAED,qBAAa,gCAAiC,SAAQ,eAAgB,YAAW,2BAA2B;IAC5F,WAAW,CAAC,QAAQ,EAAE,uBAAuB,GAAG,OAAO,CAAC,uBAAuB,CAAC;CAc/F;AAED,qBAAa,0BAA2B,SAAQ,eAAgB,YAAW,qBAAqB;IAChF,WAAW,CAAC,QAAQ,EAAE,iBAAiB,GAAG,OAAO,CAAC,iBAAiB,CAAC;CAMnF;AAED,qBAAa,2BAA4B,SAAQ,eAAgB,YAAW,sBAAsB;IAC3F,mBAAmB,CAAC,IAAI,EAAE,gBAAgB,GAAG,OAAO,CAAC,WAAW,CAAC;IAiBxD,WAAW,CAAC,QAAQ,EAAE,IAAI,GAAG,OAAO,CAAC,IAAI,CAAC;CAMzD;AAED,qBAAa,8BAA+B,SAAQ,eAAgB,YAAW,yBAAyB;IACxF,WAAW,CAAC,QAAQ,EAAE,qBAAqB,GAAG,OAAO,CAAC,qBAAqB,CAAC;CAmB3F;AAED,qBAAa,0CACZ,SAAQ,eACR,YAAW,qCAAqC;IAEjC,WAAW,CAAC,QAAQ,EAAE,iCAAiC,GAAG,OAAO,CAAC,iCAAiC,CAAC;CAMnH;AAED,qBAAa,iCAAkC,SAAQ,eAAgB,YAAW,4BAA4B;IAC9F,WAAW,CAAC,QAAQ,EAAE,wBAAwB,GAAG,OAAO,CAAC,wBAAwB,CAAC;CAOjG;AAED,qBAAa,gCAAiC,SAAQ,eAAgB,YAAW,2BAA2B;IAC5F,WAAW,CACzB,QAAQ,EAAE,uBAAuB,GAAG,uBAAuB,CAAC,MAAM,CAAC,GACjE,OAAO,CAAC,uBAAuB,CAAC,MAAM,CAAC,CAAC;CAuB3C;AAED,qBAAa,uBAAwB,SAAQ,eAAgB,YAAW,kBAAkB;IAC1E,WAAW,CAAC,QAAQ,EAAE,cAAc,GAAG,OAAO,CAAC,cAAc,CAAC;CAiB7E;AAED,qBAAa,qCAAsC,SAAQ,eAAgB,YAAW,gCAAgC;IACtG,WAAW,CAAC,QAAQ,EAAE,4BAA4B,GAAG,OAAO,CAAC,4BAA4B,CAAC;CAgBzG;AAED,qBAAa,iCAAkC,SAAQ,eAAgB,YAAW,4BAA4B;IAC9F,WAAW,CAAC,QAAQ,EAAE,wBAAwB,GAAG,OAAO,CAAC,wBAAwB,CAAC;CAMjG;AAED,qBAAa,qCAAsC,SAAQ,eAAgB,YAAW,gCAAgC;IACrH,MAAM,CAAC,QAAQ,CAAC,IAAI,EAAE,OAAO,GAAG,IAAI,IAAI,4BAA4B,CAAC,MAAM,CAAC;IAkB7D,WAAW,CAAC,QAAQ,EAAE,4BAA4B,GAAG,OAAO,CAAC,4BAA4B,CAAC,MAAM,CAAC,CAAC;CAYjH;AAED,qBAAa,kCAAmC,SAAQ,eAAgB,YAAW,6BAA6B;IAChG,WAAW,CAAC,QAAQ,EAAE,yBAAyB,GAAG,OAAO,CAAC,yBAAyB,CAAC;CAkBnG;AAED,qBAAa,0BAA2B,SAAQ,eAAgB,YAAW,qBAAqB;IAChF,WAAW,CAAC,QAAQ,EAAE,iBAAiB,GAAG,OAAO,CAAC,iBAAiB,CAAC;CAMnF;AAED,qBAAa,4BAA6B,SAAQ,eAAgB,YAAW,uBAAuB;IACpF,WAAW,CAAC,QAAQ,EAAE,mBAAmB,GAAG,OAAO,CAAC,mBAAmB,CAAC;CAMvF;AAED,qBAAa,2BAA4B,SAAQ,eAAgB,YAAW,sBAAsB;IAClF,WAAW,CAAC,QAAQ,EAAE,IAAI,GAAG,OAAO,CAAC,IAAI,CAAC;CAGzD;AAED,qBAAa,oCAAqC,SAAQ,eAAgB,YAAW,+BAA+B;IACpG,WAAW,CAAC,QAAQ,EAAE,2BAA2B,GAAG,OAAO,CAAC,2BAA2B,CAAC;CAMvG;AAED,qBAAa,sCACZ,SAAQ,eACR,YAAW,iCAAiC;IAE7B,WAAW,CAAC,QAAQ,EAAE,6BAA6B,GAAG,OAAO,CAAC,6BAA6B,CAAC,MAAM,CAAC,CAAC;CAYnH;AAED,qBAAa,gCAAiC,SAAQ,eAAgB,YAAW,2BAA2B;IAC5F,WAAW,CAAC,QAAQ,EAAE,MAAM,EAAE,GAAG,OAAO,CAAC,MAAM,EAAE,CAAC;CAMjE;AAED,qBAAa,0BAA2B,SAAQ,eAAgB,YAAW,qBAAqB;IAChF,WAAW,CAAC,QAAQ,EAAE,IAAI,GAAG,OAAO,CAAC,IAAI,CAAC;CAGzD"}
|
|
@@ -1,6 +1,8 @@
|
|
|
1
1
|
import type { AudioClassificationInput, AudioClassificationOutput, AutomaticSpeechRecognitionInput, AutomaticSpeechRecognitionOutput, ChatCompletionInput, ChatCompletionOutput, DocumentQuestionAnsweringInput, DocumentQuestionAnsweringOutput, FeatureExtractionInput, FeatureExtractionOutput, FillMaskInput, FillMaskOutput, ImageClassificationInput, ImageClassificationOutput, ImageSegmentationInput, ImageSegmentationOutput, ImageToImageInput, ImageToTextInput, ImageToTextOutput, ObjectDetectionInput, ObjectDetectionOutput, QuestionAnsweringInput, QuestionAnsweringOutput, SentenceSimilarityInput, SentenceSimilarityOutput, SummarizationInput, SummarizationOutput, TableQuestionAnsweringInput, TableQuestionAnsweringOutput, TextClassificationOutput, TextGenerationInput, TextGenerationOutput, TextToImageInput, TextToSpeechInput, TextToVideoInput, TokenClassificationInput, TokenClassificationOutput, TranslationInput, TranslationOutput, VisualQuestionAnsweringInput, VisualQuestionAnsweringOutput, ZeroShotClassificationInput, ZeroShotClassificationOutput, ZeroShotImageClassificationInput, ZeroShotImageClassificationOutput } from "@huggingface/tasks";
|
|
2
2
|
import type { AudioToAudioOutput } from "../tasks/audio/audioToAudio";
|
|
3
|
-
import type { BaseArgs, BodyParams, HeaderParams, InferenceProvider, UrlParams } from "../types";
|
|
3
|
+
import type { BaseArgs, BodyParams, HeaderParams, InferenceProvider, RequestArgs, UrlParams } from "../types";
|
|
4
|
+
import type { ImageToImageArgs } from "../tasks/cv/imageToImage";
|
|
5
|
+
import type { AutomaticSpeechRecognitionArgs } from "../tasks/audio/automaticSpeechRecognition";
|
|
4
6
|
/**
|
|
5
7
|
* Base class for task-specific provider helpers
|
|
6
8
|
*/
|
|
@@ -52,6 +54,7 @@ export interface TextToVideoTaskHelper {
|
|
|
52
54
|
export interface ImageToImageTaskHelper {
|
|
53
55
|
getResponse(response: unknown, url?: string, headers?: HeadersInit): Promise<Blob>;
|
|
54
56
|
preparePayload(params: BodyParams<ImageToImageInput & BaseArgs>): Record<string, unknown>;
|
|
57
|
+
preparePayloadAsync(args: ImageToImageArgs): Promise<RequestArgs>;
|
|
55
58
|
}
|
|
56
59
|
export interface ImageSegmentationTaskHelper {
|
|
57
60
|
getResponse(response: unknown, url?: string, headers?: HeadersInit): Promise<ImageSegmentationOutput>;
|
|
@@ -134,6 +137,7 @@ export interface AudioToAudioTaskHelper {
|
|
|
134
137
|
export interface AutomaticSpeechRecognitionTaskHelper {
|
|
135
138
|
getResponse(response: unknown, url?: string, headers?: HeadersInit): Promise<AutomaticSpeechRecognitionOutput>;
|
|
136
139
|
preparePayload(params: BodyParams<AutomaticSpeechRecognitionInput & BaseArgs>): Record<string, unknown> | BodyInit;
|
|
140
|
+
preparePayloadAsync(args: AutomaticSpeechRecognitionArgs): Promise<RequestArgs>;
|
|
137
141
|
}
|
|
138
142
|
export interface AudioClassificationTaskHelper {
|
|
139
143
|
getResponse(response: unknown, url?: string, headers?: HeadersInit): Promise<AudioClassificationOutput>;
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"providerHelper.d.ts","sourceRoot":"","sources":["../../../src/providers/providerHelper.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EACX,wBAAwB,EACxB,yBAAyB,EACzB,+BAA+B,EAC/B,gCAAgC,EAChC,mBAAmB,EACnB,oBAAoB,EACpB,8BAA8B,EAC9B,+BAA+B,EAC/B,sBAAsB,EACtB,uBAAuB,EACvB,aAAa,EACb,cAAc,EACd,wBAAwB,EACxB,yBAAyB,EACzB,sBAAsB,EACtB,uBAAuB,EACvB,iBAAiB,EACjB,gBAAgB,EAChB,iBAAiB,EACjB,oBAAoB,EACpB,qBAAqB,EACrB,sBAAsB,EACtB,uBAAuB,EACvB,uBAAuB,EACvB,wBAAwB,EACxB,kBAAkB,EAClB,mBAAmB,EACnB,2BAA2B,EAC3B,4BAA4B,EAC5B,wBAAwB,EACxB,mBAAmB,EACnB,oBAAoB,EACpB,gBAAgB,EAChB,iBAAiB,EACjB,gBAAgB,EAChB,wBAAwB,EACxB,yBAAyB,EACzB,gBAAgB,EAChB,iBAAiB,EACjB,4BAA4B,EAC5B,6BAA6B,EAC7B,2BAA2B,EAC3B,4BAA4B,EAC5B,gCAAgC,EAChC,iCAAiC,EACjC,MAAM,oBAAoB,CAAC;AAG5B,OAAO,KAAK,EAAE,kBAAkB,EAAE,MAAM,6BAA6B,CAAC;AACtE,OAAO,KAAK,EAAE,QAAQ,EAAE,UAAU,EAAE,YAAY,EAAE,iBAAiB,EAAE,SAAS,EAAE,MAAM,UAAU,CAAC;
|
|
1
|
+
{"version":3,"file":"providerHelper.d.ts","sourceRoot":"","sources":["../../../src/providers/providerHelper.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EACX,wBAAwB,EACxB,yBAAyB,EACzB,+BAA+B,EAC/B,gCAAgC,EAChC,mBAAmB,EACnB,oBAAoB,EACpB,8BAA8B,EAC9B,+BAA+B,EAC/B,sBAAsB,EACtB,uBAAuB,EACvB,aAAa,EACb,cAAc,EACd,wBAAwB,EACxB,yBAAyB,EACzB,sBAAsB,EACtB,uBAAuB,EACvB,iBAAiB,EACjB,gBAAgB,EAChB,iBAAiB,EACjB,oBAAoB,EACpB,qBAAqB,EACrB,sBAAsB,EACtB,uBAAuB,EACvB,uBAAuB,EACvB,wBAAwB,EACxB,kBAAkB,EAClB,mBAAmB,EACnB,2BAA2B,EAC3B,4BAA4B,EAC5B,wBAAwB,EACxB,mBAAmB,EACnB,oBAAoB,EACpB,gBAAgB,EAChB,iBAAiB,EACjB,gBAAgB,EAChB,wBAAwB,EACxB,yBAAyB,EACzB,gBAAgB,EAChB,iBAAiB,EACjB,4BAA4B,EAC5B,6BAA6B,EAC7B,2BAA2B,EAC3B,4BAA4B,EAC5B,gCAAgC,EAChC,iCAAiC,EACjC,MAAM,oBAAoB,CAAC;AAG5B,OAAO,KAAK,EAAE,kBAAkB,EAAE,MAAM,6BAA6B,CAAC;AACtE,OAAO,KAAK,EAAE,QAAQ,EAAE,UAAU,EAAE,YAAY,EAAE,iBAAiB,EAAE,WAAW,EAAE,SAAS,EAAE,MAAM,UAAU,CAAC;AAE9G,OAAO,KAAK,EAAE,gBAAgB,EAAE,MAAM,0BAA0B,CAAC;AACjE,OAAO,KAAK,EAAE,8BAA8B,EAAE,MAAM,2CAA2C,CAAC;AAEhG;;GAEG;AACH,8BAAsB,kBAAkB;IAEtC,QAAQ,CAAC,QAAQ,EAAE,iBAAiB;IACpC,OAAO,CAAC,OAAO;IACf,QAAQ,CAAC,qBAAqB,EAAE,OAAO;gBAF9B,QAAQ,EAAE,iBAAiB,EAC5B,OAAO,EAAE,MAAM,EACd,qBAAqB,GAAE,OAAe;IAGhD;;;OAGG;IACH,QAAQ,CAAC,WAAW,CACnB,QAAQ,EAAE,OAAO,EACjB,GAAG,CAAC,EAAE,MAAM,EACZ,OAAO,CAAC,EAAE,WAAW,EACrB,UAAU,CAAC,EAAE,KAAK,GAAG,MAAM,GACzB,OAAO,CAAC,OAAO,CAAC;IAEnB;;;OAGG;IACH,QAAQ,CAAC,SAAS,CAAC,MAAM,EAAE,SAAS,GAAG,MAAM;IAC7C;;;OAGG;IACH,QAAQ,CAAC,cAAc,CAAC,MAAM,EAAE,UAAU,GAAG,OAAO;IAEpD;;OAEG;IACH,WAAW,CAAC,MAAM,EAAE,SAAS,GAAG,MAAM;IAItC;;OAEG;IACH,QAAQ,CAAC,MAAM,EAAE,UAAU,GAAG,QAAQ;IAOtC;;OAEG;IACH,OAAO,CAAC,MAAM,EAAE,SAAS,GAAG,MAAM;IAMlC;;OAEG;IACH,cAAc,CAAC,MAAM,EAAE,YAAY,EAAE,QAAQ,EAAE,OAAO,GAAG,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC;CAO/E;AAKD,MAAM,WAAW,qBAAqB;IACrC,WAAW,CACV,QAAQ,EAAE,OAAO,EACjB,GAAG,CAAC,EAAE,MAAM,EACZ,OAAO,CAAC,EAAE,WAAW,EACrB,UAAU,CAAC,EAAE,KAAK,GAAG,MAAM,GACzB,OAAO,CAAC,MAAM,GAAG,IAAI,CAAC,CAAC;IAC1B,cAAc,CAAC,MAAM,EAAE,UAAU,CAAC,gBAAgB,GAAG,QAAQ,CAAC,GAAG,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;CACzF;AAED,MAAM,WAAW,qBAAqB;IACrC,WAAW,CAAC,QAAQ,EAAE,OAAO,EAAE,GAAG,CAAC,EAAE,MAAM,EAAE,OAAO,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,GAAG,OAAO,CAAC,IAAI,CAAC,CAAC;IAC9F,cAAc,CAAC,MAAM,EAAE,UAAU,CAAC,gBAAgB,GAAG,QAAQ,CAAC,GAAG,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;CACzF;AAED,MAAM,WAAW,sBAAsB;IACtC,WAAW,CAAC,QAAQ,EAAE,OAAO,EAAE,GAAG,CAAC,EAAE,MAAM,EAAE,OAAO,CAAC,EAAE,WAAW,GAAG,OAAO,CAAC,IAAI,CAAC,CAAC;IACnF,cAAc,CAAC,MAAM,EAAE,UAAU,CAAC,iBAAiB,GAAG,QAAQ,CAAC,GAAG,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;IAC1F,mBAAmB,CAAC,IAAI,EAAE,gBAAgB,GAAG,OAAO,CAAC,WAAW,CAAC,CAAC;CAClE;AAED,MAAM,WAAW,2BAA2B;IAC3C,WAAW,CAAC,QAAQ,EAAE,OAAO,EAAE,GAAG,CAAC,EAAE,MAAM,EAAE,OAAO,CAAC,EAAE,WAAW,GAAG,OAAO,CAAC,uBAAuB,CAAC,CAAC;IACtG,cAAc,CAAC,MAAM,EAAE,UAAU,CAAC,sBAAsB,GAAG,QAAQ,CAAC,GAAG,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,GAAG,QAAQ,CAAC;CAC1G;AAED,MAAM,WAAW,6BAA6B;IAC7C,WAAW,CAAC,QAAQ,EAAE,OAAO,EAAE,GAAG,CAAC,EAAE,MAAM,EAAE,OAAO,CAAC,EAAE,WAAW,GAAG,OAAO,CAAC,yBAAyB,CAAC,CAAC;IACxG,cAAc,CAAC,MAAM,EAAE,UAAU,CAAC,wBAAwB,GAAG,QAAQ,CAAC,GAAG,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,GAAG,QAAQ,CAAC;CAC5G;AAED,MAAM,WAAW,yBAAyB;IACzC,WAAW,CAAC,QAAQ,EAAE,OAAO,EAAE,GAAG,CAAC,EAAE,MAAM,EAAE,OAAO,CAAC,EAAE,WAAW,GAAG,OAAO,CAAC,qBAAqB,CAAC,CAAC;IACpG,cAAc,CAAC,MAAM,EAAE,UAAU,CAAC,oBAAoB,GAAG,QAAQ,CAAC,GAAG,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,GAAG,QAAQ,CAAC;CACxG;AAED,MAAM,WAAW,qBAAqB;IACrC,WAAW,CAAC,QAAQ,EAAE,OAAO,EAAE,GAAG,CAAC,EAAE,MAAM,EAAE,OAAO,CAAC,EAAE,WAAW,GAAG,OAAO,CAAC,iBAAiB,CAAC,CAAC;IAChG,cAAc,CAAC,MAAM,EAAE,UAAU,CAAC,gBAAgB,GAAG,QAAQ,CAAC,GAAG,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,GAAG,QAAQ,CAAC;CACpG;AAED,MAAM,WAAW,qCAAqC;IACrD,WAAW,CAAC,QAAQ,EAAE,OAAO,EAAE,GAAG,CAAC,EAAE,MAAM,EAAE,OAAO,CAAC,EAAE,WAAW,GAAG,OAAO,CAAC,iCAAiC,CAAC,CAAC;IAChH,cAAc,CAAC,MAAM,EAAE,UAAU,CAAC,gCAAgC,GAAG,QAAQ,CAAC,GAAG,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,GAAG,QAAQ,CAAC;CACpH;AAGD,MAAM,WAAW,wBAAwB;IACxC,WAAW,CAAC,QAAQ,EAAE,OAAO,EAAE,GAAG,CAAC,EAAE,MAAM,EAAE,OAAO,CAAC,EAAE,WAAW,GAAG,OAAO,CAAC,oBAAoB,CAAC,CAAC;IACnG,cAAc,CAAC,MAAM,EAAE,UAAU,CAAC,mBAAmB,GAAG,QAAQ,CAAC,GAAG,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;CAC5F;AAED,MAAM,WAAW,wBAAwB;IACxC,WAAW,CAAC,QAAQ,EAAE,OAAO,EAAE,GAAG,CAAC,EAAE,MAAM,EAAE,OAAO,CAAC,EAAE,WAAW,GAAG,OAAO,CAAC,oBAAoB,CAAC,CAAC;IACnG,cAAc,CAAC,MAAM,EAAE,UAAU,CAAC,mBAAmB,GAAG,QAAQ,CAAC,GAAG,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;CAC5F;AAED,MAAM,WAAW,4BAA4B;IAC5C,WAAW,CAAC,QAAQ,EAAE,OAAO,EAAE,GAAG,CAAC,EAAE,MAAM,EAAE,OAAO,CAAC,EAAE,WAAW,GAAG,OAAO,CAAC,wBAAwB,CAAC,CAAC;IACvG,cAAc,CAAC,MAAM,EAAE,UAAU,CAAC,2BAA2B,GAAG,QAAQ,CAAC,GAAG,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;CACpG;AAED,MAAM,WAAW,2BAA2B;IAC3C,WAAW,CAAC,QAAQ,EAAE,OAAO,EAAE,GAAG,CAAC,EAAE,MAAM,EAAE,OAAO,CAAC,EAAE,WAAW,GAAG,OAAO,CAAC,uBAAuB,CAAC,MAAM,CAAC,CAAC,CAAC;IAC9G,cAAc,CAAC,MAAM,EAAE,UAAU,CAAC,sBAAsB,GAAG,QAAQ,CAAC,GAAG,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;CAC/F;AAED,MAAM,WAAW,kBAAkB;IAClC,WAAW,CAAC,QAAQ,EAAE,OAAO,EAAE,GAAG,CAAC,EAAE,MAAM,EAAE,OAAO,CAAC,EAAE,WAAW,GAAG,OAAO,CAAC,cAAc,CAAC,CAAC;IAC7F,cAAc,CAAC,MAAM,EAAE,UAAU,CAAC,aAAa,GAAG,QAAQ,CAAC,GAAG,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;CACtF;AAED,MAAM,WAAW,gCAAgC;IAChD,WAAW,CAAC,QAAQ,EAAE,OAAO,EAAE,GAAG,CAAC,EAAE,MAAM,EAAE,OAAO,CAAC,EAAE,WAAW,GAAG,OAAO,CAAC,4BAA4B,CAAC,CAAC;IAC3G,cAAc,CAAC,MAAM,EAAE,UAAU,CAAC,2BAA2B,GAAG,QAAQ,CAAC,GAAG,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;CACpG;AAED,MAAM,WAAW,4BAA4B;IAC5C,WAAW,CAAC,QAAQ,EAAE,OAAO,EAAE,GAAG,CAAC,EAAE,MAAM,EAAE,OAAO,CAAC,EAAE,WAAW,GAAG,OAAO,CAAC,wBAAwB,CAAC,CAAC;IACvG,cAAc,CAAC,MAAM,EAAE,UAAU,CAAC,uBAAuB,GAAG,QAAQ,CAAC,GAAG,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;CAChG;AAED,MAAM,WAAW,gCAAgC;IAChD,WAAW,CAAC,QAAQ,EAAE,OAAO,EAAE,GAAG,CAAC,EAAE,MAAM,EAAE,OAAO,CAAC,EAAE,WAAW,GAAG,OAAO,CAAC,4BAA4B,CAAC,MAAM,CAAC,CAAC,CAAC;IACnH,cAAc,CAAC,MAAM,EAAE,UAAU,CAAC,2BAA2B,GAAG,QAAQ,CAAC,GAAG,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;CACpG;AAED,MAAM,WAAW,6BAA6B;IAC7C,WAAW,CAAC,QAAQ,EAAE,OAAO,EAAE,GAAG,CAAC,EAAE,MAAM,EAAE,OAAO,CAAC,EAAE,WAAW,GAAG,OAAO,CAAC,yBAAyB,CAAC,CAAC;IACxG,cAAc,CAAC,MAAM,EAAE,UAAU,CAAC,wBAAwB,GAAG,QAAQ,CAAC,GAAG,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;CACjG;AAED,MAAM,WAAW,qBAAqB;IACrC,WAAW,CAAC,QAAQ,EAAE,OAAO,EAAE,GAAG,CAAC,EAAE,MAAM,EAAE,OAAO,CAAC,EAAE,WAAW,GAAG,OAAO,CAAC,iBAAiB,CAAC,CAAC;IAChG,cAAc,CAAC,MAAM,EAAE,UAAU,CAAC,gBAAgB,GAAG,QAAQ,CAAC,GAAG,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;CACzF;AAED,MAAM,WAAW,uBAAuB;IACvC,WAAW,CAAC,QAAQ,EAAE,OAAO,EAAE,GAAG,CAAC,EAAE,MAAM,EAAE,OAAO,CAAC,EAAE,WAAW,GAAG,OAAO,CAAC,mBAAmB,CAAC,CAAC;IAClG,cAAc,CAAC,MAAM,EAAE,UAAU,CAAC,kBAAkB,GAAG,QAAQ,CAAC,GAAG,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;CAC3F;AAGD,MAAM,WAAW,sBAAsB;IACtC,WAAW,CAAC,QAAQ,EAAE,OAAO,EAAE,GAAG,CAAC,EAAE,MAAM,EAAE,OAAO,CAAC,EAAE,WAAW,GAAG,OAAO,CAAC,IAAI,CAAC,CAAC;IACnF,cAAc,CAAC,MAAM,EAAE,UAAU,CAAC,iBAAiB,GAAG,QAAQ,CAAC,GAAG,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;CAC1F;AAED,MAAM,WAAW,qBAAqB;IACrC,WAAW,CAAC,QAAQ,EAAE,OAAO,EAAE,GAAG,CAAC,EAAE,MAAM,EAAE,OAAO,CAAC,EAAE,WAAW,GAAG,OAAO,CAAC,IAAI,CAAC,CAAC;IACnF,cAAc,CAAC,MAAM,EAAE,UAAU,CAAC,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,GAAG,QAAQ,CAAC,GAAG,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;CAChG;AAED,MAAM,WAAW,sBAAsB;IACtC,WAAW,CAAC,QAAQ,EAAE,OAAO,EAAE,GAAG,CAAC,EAAE,MAAM,EAAE,OAAO,CAAC,EAAE,WAAW,GAAG,OAAO,CAAC,kBAAkB,EAAE,CAAC,CAAC;IACnG,cAAc,CACb,MAAM,EAAE,UAAU,CAAC,QAAQ,GAAG;QAAE,MAAM,EAAE,IAAI,CAAA;KAAE,GAAG,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC,GACvE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,GAAG,QAAQ,CAAC;CACtC;AACD,MAAM,WAAW,oCAAoC;IACpD,WAAW,CAAC,QAAQ,EAAE,OAAO,EAAE,GAAG,CAAC,EAAE,MAAM,EAAE,OAAO,CAAC,EAAE,WAAW,GAAG,OAAO,CAAC,gCAAgC,CAAC,CAAC;IAC/G,cAAc,CAAC,MAAM,EAAE,UAAU,CAAC,+BAA+B,GAAG,QAAQ,CAAC,GAAG,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,GAAG,QAAQ,CAAC;IACnH,mBAAmB,CAAC,IAAI,EAAE,8BAA8B,GAAG,OAAO,CAAC,WAAW,CAAC,CAAC;CAChF;AAED,MAAM,WAAW,6BAA6B;IAC7C,WAAW,CAAC,QAAQ,EAAE,OAAO,EAAE,GAAG,CAAC,EAAE,MAAM,EAAE,OAAO,CAAC,EAAE,WAAW,GAAG,OAAO,CAAC,yBAAyB,CAAC,CAAC;IACxG,cAAc,CAAC,MAAM,EAAE,UAAU,CAAC,wBAAwB,GAAG,QAAQ,CAAC,GAAG,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,GAAG,QAAQ,CAAC;CAC5G;AAGD,MAAM,WAAW,mCAAmC;IACnD,WAAW,CAAC,QAAQ,EAAE,OAAO,EAAE,GAAG,CAAC,EAAE,MAAM,EAAE,OAAO,CAAC,EAAE,WAAW,GAAG,OAAO,CAAC,+BAA+B,CAAC,MAAM,CAAC,CAAC,CAAC;IACtH,cAAc,CAAC,MAAM,EAAE,UAAU,CAAC,8BAA8B,GAAG,QAAQ,CAAC,GAAG,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,GAAG,QAAQ,CAAC;CAClH;AAED,MAAM,WAAW,2BAA2B;IAC3C,WAAW,CAAC,QAAQ,EAAE,OAAO,EAAE,GAAG,CAAC,EAAE,MAAM,EAAE,OAAO,CAAC,EAAE,WAAW,GAAG,OAAO,CAAC,uBAAuB,CAAC,CAAC;IACtG,cAAc,CAAC,MAAM,EAAE,UAAU,CAAC,sBAAsB,GAAG,QAAQ,CAAC,GAAG,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;CAC/F;AAED,MAAM,WAAW,iCAAiC;IACjD,WAAW,CAAC,QAAQ,EAAE,OAAO,EAAE,GAAG,CAAC,EAAE,MAAM,EAAE,OAAO,CAAC,EAAE,WAAW,GAAG,OAAO,CAAC,6BAA6B,CAAC,MAAM,CAAC,CAAC,CAAC;IACpH,cAAc,CAAC,MAAM,EAAE,UAAU,CAAC,4BAA4B,GAAG,QAAQ,CAAC,GAAG,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,GAAG,QAAQ,CAAC;CAChH;AAED,MAAM,WAAW,+BAA+B;IAC/C,WAAW,CAAC,QAAQ,EAAE,OAAO,EAAE,GAAG,CAAC,EAAE,MAAM,EAAE,OAAO,CAAC,EAAE,WAAW,GAAG,OAAO,CAAC,MAAM,EAAE,CAAC,CAAC;IACvF,cAAc,CACb,MAAM,EAAE,UAAU,CAAC,QAAQ,GAAG;QAAE,MAAM,EAAE;YAAE,IAAI,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,EAAE,CAAC,CAAA;SAAE,CAAA;KAAE,GAAG,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC,GACrG,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,GAAG,QAAQ,CAAC;CACtC;AAED,MAAM,WAAW,2BAA2B;IAC3C,WAAW,CAAC,QAAQ,EAAE,OAAO,EAAE,GAAG,CAAC,EAAE,MAAM,EAAE,OAAO,CAAC,EAAE,WAAW,GAAG,OAAO,CAAC,MAAM,EAAE,CAAC,CAAC;IACvF,cAAc,CACb,MAAM,EAAE,UAAU,CAAC,QAAQ,GAAG;QAAE,MAAM,EAAE;YAAE,IAAI,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,EAAE,CAAC,CAAA;SAAE,CAAA;KAAE,GAAG,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC,GACrG,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,GAAG,QAAQ,CAAC;CACtC;AAID,qBAAa,sBAAuB,SAAQ,kBAAmB,YAAW,wBAAwB;gBACrF,QAAQ,EAAE,iBAAiB,EAAE,OAAO,EAAE,MAAM,EAAE,qBAAqB,GAAE,OAAe;IAIhG,SAAS,IAAI,MAAM;IAInB,cAAc,CAAC,MAAM,EAAE,UAAU,GAAG,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC;IAOrD,WAAW,CAAC,QAAQ,EAAE,oBAAoB,GAAG,OAAO,CAAC,oBAAoB,CAAC;CAkBhF;AAED,qBAAa,sBAAuB,SAAQ,kBAAmB,YAAW,wBAAwB;gBACrF,QAAQ,EAAE,iBAAiB,EAAE,OAAO,EAAE,MAAM,EAAE,qBAAqB,GAAE,OAAe;IAIhG,cAAc,CAAC,MAAM,EAAE,UAAU,GAAG,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC;IAO3D,SAAS,IAAI,MAAM;IAIb,WAAW,CAAC,QAAQ,EAAE,OAAO,GAAG,OAAO,CAAC,oBAAoB,CAAC;CAenE"}
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"getInferenceSnippets.d.ts","sourceRoot":"","sources":["../../../src/snippets/getInferenceSnippets.ts"],"names":[],"mappings":"AACA,OAAO,EACN,KAAK,gBAAgB,EAErB,KAAK,gBAAgB,EAGrB,MAAM,oBAAoB,CAAC;AAG5B,OAAO,KAAK,EAAE,6BAA6B,EAAE,MAAM,oCAAoC,CAAC;AAGxF,OAAO,KAAK,EAAE,yBAAyB,EAA8B,MAAM,UAAU,CAAC;AAGtF,MAAM,MAAM,uBAAuB,GAAG;IAAE,SAAS,CAAC,EAAE,OAAO,CAAC;IAAC,MAAM,CAAC,EAAE,MAAM,CAAA;CAAE,GAAG,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;
|
|
1
|
+
{"version":3,"file":"getInferenceSnippets.d.ts","sourceRoot":"","sources":["../../../src/snippets/getInferenceSnippets.ts"],"names":[],"mappings":"AACA,OAAO,EACN,KAAK,gBAAgB,EAErB,KAAK,gBAAgB,EAGrB,MAAM,oBAAoB,CAAC;AAG5B,OAAO,KAAK,EAAE,6BAA6B,EAAE,MAAM,oCAAoC,CAAC;AAGxF,OAAO,KAAK,EAAE,yBAAyB,EAA8B,MAAM,UAAU,CAAC;AAGtF,MAAM,MAAM,uBAAuB,GAAG;IAAE,SAAS,CAAC,EAAE,OAAO,CAAC;IAAC,MAAM,CAAC,EAAE,MAAM,CAAA;CAAE,GAAG,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;AAoUzG,wBAAgB,oBAAoB,CACnC,KAAK,EAAE,gBAAgB,EACvB,WAAW,EAAE,MAAM,EACnB,QAAQ,EAAE,yBAAyB,EACnC,wBAAwB,CAAC,EAAE,6BAA6B,EACxD,IAAI,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,GAC5B,gBAAgB,EAAE,CAIpB"}
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"templates.exported.d.ts","sourceRoot":"","sources":["../../../src/snippets/templates.exported.ts"],"names":[],"mappings":"AACA,eAAO,MAAM,SAAS,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC,
|
|
1
|
+
{"version":3,"file":"templates.exported.d.ts","sourceRoot":"","sources":["../../../src/snippets/templates.exported.ts"],"names":[],"mappings":"AACA,eAAO,MAAM,SAAS,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC,CA4EnE,CAAC"}
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"automaticSpeechRecognition.d.ts","sourceRoot":"","sources":["../../../../src/tasks/audio/automaticSpeechRecognition.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,+BAA+B,EAAE,gCAAgC,EAAE,MAAM,oBAAoB,CAAC;
|
|
1
|
+
{"version":3,"file":"automaticSpeechRecognition.d.ts","sourceRoot":"","sources":["../../../../src/tasks/audio/automaticSpeechRecognition.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,+BAA+B,EAAE,gCAAgC,EAAE,MAAM,oBAAoB,CAAC;AAI5G,OAAO,KAAK,EAAE,QAAQ,EAAE,OAAO,EAAE,MAAM,aAAa,CAAC;AAErD,OAAO,KAAK,EAAE,gBAAgB,EAAE,MAAM,SAAS,CAAC;AAEhD,MAAM,MAAM,8BAA8B,GAAG,QAAQ,GAAG,CAAC,+BAA+B,GAAG,gBAAgB,CAAC,CAAC;AAC7G;;;GAGG;AACH,wBAAsB,0BAA0B,CAC/C,IAAI,EAAE,8BAA8B,EACpC,OAAO,CAAC,EAAE,OAAO,GACf,OAAO,CAAC,gCAAgC,CAAC,CAa3C"}
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"imageToImage.d.ts","sourceRoot":"","sources":["../../../../src/tasks/cv/imageToImage.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,iBAAiB,EAAE,MAAM,oBAAoB,CAAC;AAG5D,OAAO,KAAK,EAAE,QAAQ,EAAE,OAAO,
|
|
1
|
+
{"version":3,"file":"imageToImage.d.ts","sourceRoot":"","sources":["../../../../src/tasks/cv/imageToImage.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,iBAAiB,EAAE,MAAM,oBAAoB,CAAC;AAG5D,OAAO,KAAK,EAAE,QAAQ,EAAE,OAAO,EAAE,MAAM,aAAa,CAAC;AAGrD,MAAM,MAAM,gBAAgB,GAAG,QAAQ,GAAG,iBAAiB,CAAC;AAE5D;;;GAGG;AACH,wBAAsB,YAAY,CAAC,IAAI,EAAE,gBAAgB,EAAE,OAAO,CAAC,EAAE,OAAO,GAAG,OAAO,CAAC,IAAI,CAAC,CAS3F"}
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@huggingface/inference",
|
|
3
|
-
"version": "3.13.
|
|
3
|
+
"version": "3.13.1",
|
|
4
4
|
"license": "MIT",
|
|
5
5
|
"author": "Hugging Face and Tim Mikeladze <tim.mikeladze@gmail.com>",
|
|
6
6
|
"description": "Typescript client for the Hugging Face Inference Providers and Inference Endpoints",
|
|
@@ -39,8 +39,8 @@
|
|
|
39
39
|
},
|
|
40
40
|
"type": "module",
|
|
41
41
|
"dependencies": {
|
|
42
|
-
"@huggingface/
|
|
43
|
-
"@huggingface/
|
|
42
|
+
"@huggingface/jinja": "^0.5.0",
|
|
43
|
+
"@huggingface/tasks": "^0.19.5"
|
|
44
44
|
},
|
|
45
45
|
"devDependencies": {
|
|
46
46
|
"@types/node": "18.13.0"
|
package/src/providers/fal-ai.ts
CHANGED
|
@@ -14,10 +14,12 @@
|
|
|
14
14
|
*
|
|
15
15
|
* Thanks!
|
|
16
16
|
*/
|
|
17
|
+
import { base64FromBytes } from "../utils/base64FromBytes";
|
|
18
|
+
|
|
17
19
|
import type { AutomaticSpeechRecognitionOutput } from "@huggingface/tasks";
|
|
18
20
|
import { InferenceOutputError } from "../lib/InferenceOutputError";
|
|
19
21
|
import { isUrl } from "../lib/isUrl";
|
|
20
|
-
import type { BodyParams, HeaderParams, ModelId, UrlParams } from "../types";
|
|
22
|
+
import type { BodyParams, HeaderParams, ModelId, RequestArgs, UrlParams } from "../types";
|
|
21
23
|
import { delay } from "../utils/delay";
|
|
22
24
|
import { omit } from "../utils/omit";
|
|
23
25
|
import {
|
|
@@ -27,6 +29,7 @@ import {
|
|
|
27
29
|
type TextToVideoTaskHelper,
|
|
28
30
|
} from "./providerHelper";
|
|
29
31
|
import { HF_HUB_URL } from "../config";
|
|
32
|
+
import type { AutomaticSpeechRecognitionArgs } from "../tasks/audio/automaticSpeechRecognition";
|
|
30
33
|
|
|
31
34
|
export interface FalAiQueueOutput {
|
|
32
35
|
request_id: string;
|
|
@@ -224,6 +227,28 @@ export class FalAIAutomaticSpeechRecognitionTask extends FalAITask implements Au
|
|
|
224
227
|
}
|
|
225
228
|
return { text: res.text };
|
|
226
229
|
}
|
|
230
|
+
|
|
231
|
+
async preparePayloadAsync(args: AutomaticSpeechRecognitionArgs): Promise<RequestArgs> {
|
|
232
|
+
const blob = "data" in args && args.data instanceof Blob ? args.data : "inputs" in args ? args.inputs : undefined;
|
|
233
|
+
const contentType = blob?.type;
|
|
234
|
+
if (!contentType) {
|
|
235
|
+
throw new Error(
|
|
236
|
+
`Unable to determine the input's content-type. Make sure your are passing a Blob when using provider fal-ai.`
|
|
237
|
+
);
|
|
238
|
+
}
|
|
239
|
+
if (!FAL_AI_SUPPORTED_BLOB_TYPES.includes(contentType)) {
|
|
240
|
+
throw new Error(
|
|
241
|
+
`Provider fal-ai does not support blob type ${contentType} - supported content types are: ${FAL_AI_SUPPORTED_BLOB_TYPES.join(
|
|
242
|
+
", "
|
|
243
|
+
)}`
|
|
244
|
+
);
|
|
245
|
+
}
|
|
246
|
+
const base64audio = base64FromBytes(new Uint8Array(await blob.arrayBuffer()));
|
|
247
|
+
return {
|
|
248
|
+
...("data" in args ? omit(args, "data") : omit(args, "inputs")),
|
|
249
|
+
audio_url: `data:${contentType};base64,${base64audio}`,
|
|
250
|
+
};
|
|
251
|
+
}
|
|
227
252
|
}
|
|
228
253
|
|
|
229
254
|
export class FalAITextToSpeechTask extends FalAITask {
|
|
@@ -36,7 +36,7 @@ import type {
|
|
|
36
36
|
import { HF_ROUTER_URL } from "../config";
|
|
37
37
|
import { InferenceOutputError } from "../lib/InferenceOutputError";
|
|
38
38
|
import type { TabularClassificationOutput } from "../tasks/tabular/tabularClassification";
|
|
39
|
-
import type { BodyParams, UrlParams } from "../types";
|
|
39
|
+
import type { BodyParams, RequestArgs, UrlParams } from "../types";
|
|
40
40
|
import { toArray } from "../utils/toArray";
|
|
41
41
|
import type {
|
|
42
42
|
AudioClassificationTaskHelper,
|
|
@@ -70,7 +70,10 @@ import type {
|
|
|
70
70
|
} from "./providerHelper";
|
|
71
71
|
|
|
72
72
|
import { TaskProviderHelper } from "./providerHelper";
|
|
73
|
-
|
|
73
|
+
import { base64FromBytes } from "../utils/base64FromBytes";
|
|
74
|
+
import type { ImageToImageArgs } from "../tasks/cv/imageToImage";
|
|
75
|
+
import type { AutomaticSpeechRecognitionArgs } from "../tasks/audio/automaticSpeechRecognition";
|
|
76
|
+
import { omit } from "../utils/omit";
|
|
74
77
|
interface Base64ImageGeneration {
|
|
75
78
|
data: Array<{
|
|
76
79
|
b64_json: string;
|
|
@@ -221,6 +224,15 @@ export class HFInferenceAutomaticSpeechRecognitionTask
|
|
|
221
224
|
override async getResponse(response: AutomaticSpeechRecognitionOutput): Promise<AutomaticSpeechRecognitionOutput> {
|
|
222
225
|
return response;
|
|
223
226
|
}
|
|
227
|
+
|
|
228
|
+
async preparePayloadAsync(args: AutomaticSpeechRecognitionArgs): Promise<RequestArgs> {
|
|
229
|
+
return "data" in args
|
|
230
|
+
? args
|
|
231
|
+
: {
|
|
232
|
+
...omit(args, "inputs"),
|
|
233
|
+
data: args.inputs,
|
|
234
|
+
};
|
|
235
|
+
}
|
|
224
236
|
}
|
|
225
237
|
|
|
226
238
|
export class HFInferenceAudioToAudioTask extends HFInferenceTask implements AudioToAudioTaskHelper {
|
|
@@ -326,6 +338,23 @@ export class HFInferenceImageToTextTask extends HFInferenceTask implements Image
|
|
|
326
338
|
}
|
|
327
339
|
|
|
328
340
|
export class HFInferenceImageToImageTask extends HFInferenceTask implements ImageToImageTaskHelper {
|
|
341
|
+
async preparePayloadAsync(args: ImageToImageArgs): Promise<RequestArgs> {
|
|
342
|
+
if (!args.parameters) {
|
|
343
|
+
return {
|
|
344
|
+
...args,
|
|
345
|
+
model: args.model,
|
|
346
|
+
data: args.inputs,
|
|
347
|
+
};
|
|
348
|
+
} else {
|
|
349
|
+
return {
|
|
350
|
+
...args,
|
|
351
|
+
inputs: base64FromBytes(
|
|
352
|
+
new Uint8Array(args.inputs instanceof ArrayBuffer ? args.inputs : await (args.inputs as Blob).arrayBuffer())
|
|
353
|
+
),
|
|
354
|
+
};
|
|
355
|
+
}
|
|
356
|
+
}
|
|
357
|
+
|
|
329
358
|
override async getResponse(response: Blob): Promise<Blob> {
|
|
330
359
|
if (response instanceof Blob) {
|
|
331
360
|
return response;
|
|
@@ -48,8 +48,10 @@ import type {
|
|
|
48
48
|
import { HF_ROUTER_URL } from "../config";
|
|
49
49
|
import { InferenceOutputError } from "../lib/InferenceOutputError";
|
|
50
50
|
import type { AudioToAudioOutput } from "../tasks/audio/audioToAudio";
|
|
51
|
-
import type { BaseArgs, BodyParams, HeaderParams, InferenceProvider, UrlParams } from "../types";
|
|
51
|
+
import type { BaseArgs, BodyParams, HeaderParams, InferenceProvider, RequestArgs, UrlParams } from "../types";
|
|
52
52
|
import { toArray } from "../utils/toArray";
|
|
53
|
+
import type { ImageToImageArgs } from "../tasks/cv/imageToImage";
|
|
54
|
+
import type { AutomaticSpeechRecognitionArgs } from "../tasks/audio/automaticSpeechRecognition";
|
|
53
55
|
|
|
54
56
|
/**
|
|
55
57
|
* Base class for task-specific provider helpers
|
|
@@ -142,6 +144,7 @@ export interface TextToVideoTaskHelper {
|
|
|
142
144
|
export interface ImageToImageTaskHelper {
|
|
143
145
|
getResponse(response: unknown, url?: string, headers?: HeadersInit): Promise<Blob>;
|
|
144
146
|
preparePayload(params: BodyParams<ImageToImageInput & BaseArgs>): Record<string, unknown>;
|
|
147
|
+
preparePayloadAsync(args: ImageToImageArgs): Promise<RequestArgs>;
|
|
145
148
|
}
|
|
146
149
|
|
|
147
150
|
export interface ImageSegmentationTaskHelper {
|
|
@@ -245,6 +248,7 @@ export interface AudioToAudioTaskHelper {
|
|
|
245
248
|
export interface AutomaticSpeechRecognitionTaskHelper {
|
|
246
249
|
getResponse(response: unknown, url?: string, headers?: HeadersInit): Promise<AutomaticSpeechRecognitionOutput>;
|
|
247
250
|
preparePayload(params: BodyParams<AutomaticSpeechRecognitionInput & BaseArgs>): Record<string, unknown> | BodyInit;
|
|
251
|
+
preparePayloadAsync(args: AutomaticSpeechRecognitionArgs): Promise<RequestArgs>;
|
|
248
252
|
}
|
|
249
253
|
|
|
250
254
|
export interface AudioClassificationTaskHelper {
|
|
@@ -285,6 +285,16 @@ const prepareConversationalInput = (
|
|
|
285
285
|
};
|
|
286
286
|
};
|
|
287
287
|
|
|
288
|
+
const prepareQuestionAnsweringInput = (model: ModelDataMinimal): object => {
|
|
289
|
+
const data = JSON.parse(getModelInputSnippet(model) as string);
|
|
290
|
+
return { question: data.question, context: data.context };
|
|
291
|
+
};
|
|
292
|
+
|
|
293
|
+
const prepareTableQuestionAnsweringInput = (model: ModelDataMinimal): object => {
|
|
294
|
+
const data = JSON.parse(getModelInputSnippet(model) as string);
|
|
295
|
+
return { query: data.query, table: JSON.stringify(data.table) };
|
|
296
|
+
};
|
|
297
|
+
|
|
288
298
|
const snippets: Partial<
|
|
289
299
|
Record<
|
|
290
300
|
PipelineType,
|
|
@@ -309,12 +319,12 @@ const snippets: Partial<
|
|
|
309
319
|
"image-to-image": snippetGenerator("imageToImage", prepareImageToImageInput),
|
|
310
320
|
"image-to-text": snippetGenerator("basicImage"),
|
|
311
321
|
"object-detection": snippetGenerator("basicImage"),
|
|
312
|
-
"question-answering": snippetGenerator("
|
|
322
|
+
"question-answering": snippetGenerator("questionAnswering", prepareQuestionAnsweringInput),
|
|
313
323
|
"sentence-similarity": snippetGenerator("basic"),
|
|
314
324
|
summarization: snippetGenerator("basic"),
|
|
315
325
|
"tabular-classification": snippetGenerator("tabular"),
|
|
316
326
|
"tabular-regression": snippetGenerator("tabular"),
|
|
317
|
-
"table-question-answering": snippetGenerator("
|
|
327
|
+
"table-question-answering": snippetGenerator("tableQuestionAnswering", prepareTableQuestionAnsweringInput),
|
|
318
328
|
"text-classification": snippetGenerator("basic"),
|
|
319
329
|
"text-generation": snippetGenerator("basic"),
|
|
320
330
|
"text-to-audio": snippetGenerator("textToAudio"),
|
|
@@ -30,7 +30,7 @@ export const templates: Record<string, Record<string, Record<string, string>>> =
|
|
|
30
30
|
"textToImage": "{% if provider == \"fal-ai\" %}\nimport fal_client\n\n{% if providerInputs.asObj.loras is defined and providerInputs.asObj.loras != none %}\nresult = fal_client.subscribe(\n \"{{ providerModelId }}\",\n arguments={\n \"prompt\": {{ inputs.asObj.inputs }},\n \"loras\":{{ providerInputs.asObj.loras | tojson }},\n },\n)\n{% else %}\nresult = fal_client.subscribe(\n \"{{ providerModelId }}\",\n arguments={\n \"prompt\": {{ inputs.asObj.inputs }},\n },\n)\n{% endif %} \nprint(result)\n{% endif %} "
|
|
31
31
|
},
|
|
32
32
|
"huggingface_hub": {
|
|
33
|
-
"basic": "result = client.{{ methodName }}(\n
|
|
33
|
+
"basic": "result = client.{{ methodName }}(\n {{ inputs.asObj.inputs }},\n model=\"{{ model.id }}\",\n)",
|
|
34
34
|
"basicAudio": "output = client.{{ methodName }}({{ inputs.asObj.inputs }}, model=\"{{ model.id }}\")",
|
|
35
35
|
"basicImage": "output = client.{{ methodName }}({{ inputs.asObj.inputs }}, model=\"{{ model.id }}\")",
|
|
36
36
|
"conversational": "completion = client.chat.completions.create(\n model=\"{{ model.id }}\",\n{{ inputs.asPythonString }}\n)\n\nprint(completion.choices[0].message) ",
|
|
@@ -38,6 +38,8 @@ export const templates: Record<string, Record<string, Record<string, string>>> =
|
|
|
38
38
|
"documentQuestionAnswering": "output = client.document_question_answering(\n \"{{ inputs.asObj.image }}\",\n question=\"{{ inputs.asObj.question }}\",\n model=\"{{ model.id }}\",\n) ",
|
|
39
39
|
"imageToImage": "# output is a PIL.Image object\nimage = client.image_to_image(\n \"{{ inputs.asObj.inputs }}\",\n prompt=\"{{ inputs.asObj.parameters.prompt }}\",\n model=\"{{ model.id }}\",\n) ",
|
|
40
40
|
"importInferenceClient": "from huggingface_hub import InferenceClient\n\nclient = InferenceClient(\n provider=\"{{ provider }}\",\n api_key=\"{{ accessToken }}\",\n{% if billTo %}\n bill_to=\"{{ billTo }}\",\n{% endif %}\n)",
|
|
41
|
+
"questionAnswering": "answer = client.question_answering(\n question=\"{{ inputs.asObj.question }}\",\n context=\"{{ inputs.asObj.context }}\",\n model=\"{{ model.id }}\",\n) ",
|
|
42
|
+
"tableQuestionAnswering": "answer = client.question_answering(\n query=\"{{ inputs.asObj.query }}\",\n table={{ inputs.asObj.table }},\n model=\"{{ model.id }}\",\n) ",
|
|
41
43
|
"textToImage": "# output is a PIL.Image object\nimage = client.text_to_image(\n {{ inputs.asObj.inputs }},\n model=\"{{ model.id }}\",\n) ",
|
|
42
44
|
"textToSpeech": "# audio is returned as bytes\naudio = client.text_to_speech(\n {{ inputs.asObj.inputs }},\n model=\"{{ model.id }}\",\n) \n",
|
|
43
45
|
"textToVideo": "video = client.text_to_video(\n {{ inputs.asObj.inputs }},\n model=\"{{ model.id }}\",\n) "
|
|
@@ -2,13 +2,9 @@ import type { AutomaticSpeechRecognitionInput, AutomaticSpeechRecognitionOutput
|
|
|
2
2
|
import { resolveProvider } from "../../lib/getInferenceProviderMapping";
|
|
3
3
|
import { getProviderHelper } from "../../lib/getProviderHelper";
|
|
4
4
|
import { InferenceOutputError } from "../../lib/InferenceOutputError";
|
|
5
|
-
import {
|
|
6
|
-
import type { BaseArgs, Options, RequestArgs } from "../../types";
|
|
7
|
-
import { base64FromBytes } from "../../utils/base64FromBytes";
|
|
8
|
-
import { omit } from "../../utils/omit";
|
|
5
|
+
import type { BaseArgs, Options } from "../../types";
|
|
9
6
|
import { innerRequest } from "../../utils/request";
|
|
10
7
|
import type { LegacyAudioInput } from "./utils";
|
|
11
|
-
import { preparePayload } from "./utils";
|
|
12
8
|
|
|
13
9
|
export type AutomaticSpeechRecognitionArgs = BaseArgs & (AutomaticSpeechRecognitionInput | LegacyAudioInput);
|
|
14
10
|
/**
|
|
@@ -21,7 +17,7 @@ export async function automaticSpeechRecognition(
|
|
|
21
17
|
): Promise<AutomaticSpeechRecognitionOutput> {
|
|
22
18
|
const provider = await resolveProvider(args.provider, args.model, args.endpointUrl);
|
|
23
19
|
const providerHelper = getProviderHelper(provider, "automatic-speech-recognition");
|
|
24
|
-
const payload = await
|
|
20
|
+
const payload = await providerHelper.preparePayloadAsync(args);
|
|
25
21
|
const { data: res } = await innerRequest<AutomaticSpeechRecognitionOutput>(payload, providerHelper, {
|
|
26
22
|
...options,
|
|
27
23
|
task: "automatic-speech-recognition",
|
|
@@ -32,29 +28,3 @@ export async function automaticSpeechRecognition(
|
|
|
32
28
|
}
|
|
33
29
|
return providerHelper.getResponse(res);
|
|
34
30
|
}
|
|
35
|
-
|
|
36
|
-
async function buildPayload(args: AutomaticSpeechRecognitionArgs): Promise<RequestArgs> {
|
|
37
|
-
if (args.provider === "fal-ai") {
|
|
38
|
-
const blob = "data" in args && args.data instanceof Blob ? args.data : "inputs" in args ? args.inputs : undefined;
|
|
39
|
-
const contentType = blob?.type;
|
|
40
|
-
if (!contentType) {
|
|
41
|
-
throw new Error(
|
|
42
|
-
`Unable to determine the input's content-type. Make sure your are passing a Blob when using provider fal-ai.`
|
|
43
|
-
);
|
|
44
|
-
}
|
|
45
|
-
if (!FAL_AI_SUPPORTED_BLOB_TYPES.includes(contentType)) {
|
|
46
|
-
throw new Error(
|
|
47
|
-
`Provider fal-ai does not support blob type ${contentType} - supported content types are: ${FAL_AI_SUPPORTED_BLOB_TYPES.join(
|
|
48
|
-
", "
|
|
49
|
-
)}`
|
|
50
|
-
);
|
|
51
|
-
}
|
|
52
|
-
const base64audio = base64FromBytes(new Uint8Array(await blob.arrayBuffer()));
|
|
53
|
-
return {
|
|
54
|
-
...("data" in args ? omit(args, "data") : omit(args, "inputs")),
|
|
55
|
-
audio_url: `data:${contentType};base64,${base64audio}`,
|
|
56
|
-
};
|
|
57
|
-
} else {
|
|
58
|
-
return preparePayload(args);
|
|
59
|
-
}
|
|
60
|
-
}
|
|
@@ -1,8 +1,7 @@
|
|
|
1
1
|
import type { ImageToImageInput } from "@huggingface/tasks";
|
|
2
2
|
import { resolveProvider } from "../../lib/getInferenceProviderMapping";
|
|
3
3
|
import { getProviderHelper } from "../../lib/getProviderHelper";
|
|
4
|
-
import type { BaseArgs, Options
|
|
5
|
-
import { base64FromBytes } from "../../utils/base64FromBytes";
|
|
4
|
+
import type { BaseArgs, Options } from "../../types";
|
|
6
5
|
import { innerRequest } from "../../utils/request";
|
|
7
6
|
|
|
8
7
|
export type ImageToImageArgs = BaseArgs & ImageToImageInput;
|
|
@@ -14,22 +13,8 @@ export type ImageToImageArgs = BaseArgs & ImageToImageInput;
|
|
|
14
13
|
export async function imageToImage(args: ImageToImageArgs, options?: Options): Promise<Blob> {
|
|
15
14
|
const provider = await resolveProvider(args.provider, args.model, args.endpointUrl);
|
|
16
15
|
const providerHelper = getProviderHelper(provider, "image-to-image");
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
reqArgs = {
|
|
20
|
-
accessToken: args.accessToken,
|
|
21
|
-
model: args.model,
|
|
22
|
-
data: args.inputs,
|
|
23
|
-
};
|
|
24
|
-
} else {
|
|
25
|
-
reqArgs = {
|
|
26
|
-
...args,
|
|
27
|
-
inputs: base64FromBytes(
|
|
28
|
-
new Uint8Array(args.inputs instanceof ArrayBuffer ? args.inputs : await args.inputs.arrayBuffer())
|
|
29
|
-
),
|
|
30
|
-
};
|
|
31
|
-
}
|
|
32
|
-
const { data: res } = await innerRequest<Blob>(reqArgs, providerHelper, {
|
|
16
|
+
const payload = await providerHelper.preparePayloadAsync(args);
|
|
17
|
+
const { data: res } = await innerRequest<Blob>(payload, providerHelper, {
|
|
33
18
|
...options,
|
|
34
19
|
task: "image-to-image",
|
|
35
20
|
});
|