@huggingface/inference 3.6.2 → 3.7.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +0 -25
- package/dist/index.cjs +1232 -898
- package/dist/index.js +1234 -900
- package/dist/src/config.d.ts +1 -0
- package/dist/src/config.d.ts.map +1 -1
- package/dist/src/lib/getProviderHelper.d.ts +37 -0
- package/dist/src/lib/getProviderHelper.d.ts.map +1 -0
- package/dist/src/lib/makeRequestOptions.d.ts +0 -2
- package/dist/src/lib/makeRequestOptions.d.ts.map +1 -1
- package/dist/src/providers/black-forest-labs.d.ts +14 -18
- package/dist/src/providers/black-forest-labs.d.ts.map +1 -1
- package/dist/src/providers/cerebras.d.ts +4 -2
- package/dist/src/providers/cerebras.d.ts.map +1 -1
- package/dist/src/providers/cohere.d.ts +5 -2
- package/dist/src/providers/cohere.d.ts.map +1 -1
- package/dist/src/providers/fal-ai.d.ts +50 -3
- package/dist/src/providers/fal-ai.d.ts.map +1 -1
- package/dist/src/providers/fireworks-ai.d.ts +5 -2
- package/dist/src/providers/fireworks-ai.d.ts.map +1 -1
- package/dist/src/providers/hf-inference.d.ts +125 -2
- package/dist/src/providers/hf-inference.d.ts.map +1 -1
- package/dist/src/providers/hyperbolic.d.ts +31 -2
- package/dist/src/providers/hyperbolic.d.ts.map +1 -1
- package/dist/src/providers/nebius.d.ts +20 -18
- package/dist/src/providers/nebius.d.ts.map +1 -1
- package/dist/src/providers/novita.d.ts +21 -18
- package/dist/src/providers/novita.d.ts.map +1 -1
- package/dist/src/providers/openai.d.ts +4 -2
- package/dist/src/providers/openai.d.ts.map +1 -1
- package/dist/src/providers/providerHelper.d.ts +182 -0
- package/dist/src/providers/providerHelper.d.ts.map +1 -0
- package/dist/src/providers/replicate.d.ts +23 -19
- package/dist/src/providers/replicate.d.ts.map +1 -1
- package/dist/src/providers/sambanova.d.ts +4 -2
- package/dist/src/providers/sambanova.d.ts.map +1 -1
- package/dist/src/providers/together.d.ts +32 -2
- package/dist/src/providers/together.d.ts.map +1 -1
- package/dist/src/snippets/getInferenceSnippets.d.ts.map +1 -1
- package/dist/src/tasks/audio/audioClassification.d.ts.map +1 -1
- package/dist/src/tasks/audio/automaticSpeechRecognition.d.ts.map +1 -1
- package/dist/src/tasks/audio/textToSpeech.d.ts.map +1 -1
- package/dist/src/tasks/audio/utils.d.ts +2 -1
- package/dist/src/tasks/audio/utils.d.ts.map +1 -1
- package/dist/src/tasks/custom/request.d.ts +1 -2
- package/dist/src/tasks/custom/request.d.ts.map +1 -1
- package/dist/src/tasks/custom/streamingRequest.d.ts +1 -2
- package/dist/src/tasks/custom/streamingRequest.d.ts.map +1 -1
- package/dist/src/tasks/cv/imageClassification.d.ts.map +1 -1
- package/dist/src/tasks/cv/imageSegmentation.d.ts.map +1 -1
- package/dist/src/tasks/cv/imageToImage.d.ts.map +1 -1
- package/dist/src/tasks/cv/imageToText.d.ts.map +1 -1
- package/dist/src/tasks/cv/objectDetection.d.ts +1 -1
- package/dist/src/tasks/cv/objectDetection.d.ts.map +1 -1
- package/dist/src/tasks/cv/textToImage.d.ts.map +1 -1
- package/dist/src/tasks/cv/textToVideo.d.ts +1 -1
- package/dist/src/tasks/cv/textToVideo.d.ts.map +1 -1
- package/dist/src/tasks/cv/zeroShotImageClassification.d.ts +1 -1
- package/dist/src/tasks/cv/zeroShotImageClassification.d.ts.map +1 -1
- package/dist/src/tasks/index.d.ts +6 -6
- package/dist/src/tasks/index.d.ts.map +1 -1
- package/dist/src/tasks/multimodal/documentQuestionAnswering.d.ts +1 -1
- package/dist/src/tasks/multimodal/documentQuestionAnswering.d.ts.map +1 -1
- package/dist/src/tasks/multimodal/visualQuestionAnswering.d.ts.map +1 -1
- package/dist/src/tasks/nlp/chatCompletion.d.ts +1 -1
- package/dist/src/tasks/nlp/chatCompletion.d.ts.map +1 -1
- package/dist/src/tasks/nlp/chatCompletionStream.d.ts +1 -1
- package/dist/src/tasks/nlp/chatCompletionStream.d.ts.map +1 -1
- package/dist/src/tasks/nlp/featureExtraction.d.ts.map +1 -1
- package/dist/src/tasks/nlp/fillMask.d.ts.map +1 -1
- package/dist/src/tasks/nlp/questionAnswering.d.ts.map +1 -1
- package/dist/src/tasks/nlp/sentenceSimilarity.d.ts.map +1 -1
- package/dist/src/tasks/nlp/summarization.d.ts.map +1 -1
- package/dist/src/tasks/nlp/tableQuestionAnswering.d.ts.map +1 -1
- package/dist/src/tasks/nlp/textClassification.d.ts.map +1 -1
- package/dist/src/tasks/nlp/textGeneration.d.ts.map +1 -1
- package/dist/src/tasks/nlp/tokenClassification.d.ts.map +1 -1
- package/dist/src/tasks/nlp/translation.d.ts.map +1 -1
- package/dist/src/tasks/nlp/zeroShotClassification.d.ts.map +1 -1
- package/dist/src/tasks/tabular/tabularClassification.d.ts.map +1 -1
- package/dist/src/tasks/tabular/tabularRegression.d.ts.map +1 -1
- package/dist/src/types.d.ts +10 -13
- package/dist/src/types.d.ts.map +1 -1
- package/dist/src/utils/request.d.ts +27 -0
- package/dist/src/utils/request.d.ts.map +1 -0
- package/package.json +3 -3
- package/src/config.ts +1 -0
- package/src/lib/getProviderHelper.ts +270 -0
- package/src/lib/makeRequestOptions.ts +36 -90
- package/src/providers/black-forest-labs.ts +73 -22
- package/src/providers/cerebras.ts +6 -27
- package/src/providers/cohere.ts +9 -28
- package/src/providers/fal-ai.ts +195 -77
- package/src/providers/fireworks-ai.ts +8 -29
- package/src/providers/hf-inference.ts +555 -34
- package/src/providers/hyperbolic.ts +107 -29
- package/src/providers/nebius.ts +65 -29
- package/src/providers/novita.ts +68 -32
- package/src/providers/openai.ts +6 -32
- package/src/providers/providerHelper.ts +354 -0
- package/src/providers/replicate.ts +124 -34
- package/src/providers/sambanova.ts +5 -30
- package/src/providers/together.ts +92 -28
- package/src/snippets/getInferenceSnippets.ts +16 -9
- package/src/snippets/templates.exported.ts +2 -2
- package/src/tasks/audio/audioClassification.ts +6 -9
- package/src/tasks/audio/audioToAudio.ts +5 -28
- package/src/tasks/audio/automaticSpeechRecognition.ts +7 -6
- package/src/tasks/audio/textToSpeech.ts +6 -30
- package/src/tasks/audio/utils.ts +2 -1
- package/src/tasks/custom/request.ts +7 -34
- package/src/tasks/custom/streamingRequest.ts +5 -87
- package/src/tasks/cv/imageClassification.ts +5 -9
- package/src/tasks/cv/imageSegmentation.ts +5 -10
- package/src/tasks/cv/imageToImage.ts +5 -8
- package/src/tasks/cv/imageToText.ts +8 -13
- package/src/tasks/cv/objectDetection.ts +6 -21
- package/src/tasks/cv/textToImage.ts +10 -138
- package/src/tasks/cv/textToVideo.ts +11 -59
- package/src/tasks/cv/zeroShotImageClassification.ts +7 -12
- package/src/tasks/index.ts +6 -6
- package/src/tasks/multimodal/documentQuestionAnswering.ts +10 -26
- package/src/tasks/multimodal/visualQuestionAnswering.ts +6 -12
- package/src/tasks/nlp/chatCompletion.ts +7 -23
- package/src/tasks/nlp/chatCompletionStream.ts +4 -5
- package/src/tasks/nlp/featureExtraction.ts +5 -20
- package/src/tasks/nlp/fillMask.ts +5 -18
- package/src/tasks/nlp/questionAnswering.ts +5 -23
- package/src/tasks/nlp/sentenceSimilarity.ts +5 -18
- package/src/tasks/nlp/summarization.ts +5 -8
- package/src/tasks/nlp/tableQuestionAnswering.ts +5 -29
- package/src/tasks/nlp/textClassification.ts +8 -14
- package/src/tasks/nlp/textGeneration.ts +13 -80
- package/src/tasks/nlp/textGenerationStream.ts +2 -2
- package/src/tasks/nlp/tokenClassification.ts +8 -24
- package/src/tasks/nlp/translation.ts +5 -8
- package/src/tasks/nlp/zeroShotClassification.ts +8 -22
- package/src/tasks/tabular/tabularClassification.ts +5 -8
- package/src/tasks/tabular/tabularRegression.ts +5 -8
- package/src/types.ts +11 -14
- package/src/utils/request.ts +161 -0
package/dist/index.cjs
CHANGED
|
@@ -98,90 +98,215 @@ __export(tasks_exports, {
|
|
|
98
98
|
zeroShotImageClassification: () => zeroShotImageClassification
|
|
99
99
|
});
|
|
100
100
|
|
|
101
|
+
// package.json
|
|
102
|
+
var name = "@huggingface/inference";
|
|
103
|
+
var version = "3.7.1";
|
|
104
|
+
|
|
101
105
|
// src/config.ts
|
|
102
106
|
var HF_HUB_URL = "https://huggingface.co";
|
|
103
107
|
var HF_ROUTER_URL = "https://router.huggingface.co";
|
|
108
|
+
var HF_HEADER_X_BILL_TO = "X-HF-Bill-To";
|
|
104
109
|
|
|
105
|
-
// src/
|
|
106
|
-
var
|
|
107
|
-
|
|
108
|
-
|
|
110
|
+
// src/lib/InferenceOutputError.ts
|
|
111
|
+
var InferenceOutputError = class extends TypeError {
|
|
112
|
+
constructor(message) {
|
|
113
|
+
super(
|
|
114
|
+
`Invalid inference output: ${message}. Use the 'request' method with the same parameters to do a custom call with no type checking.`
|
|
115
|
+
);
|
|
116
|
+
this.name = "InferenceOutputError";
|
|
117
|
+
}
|
|
109
118
|
};
|
|
110
|
-
|
|
111
|
-
|
|
119
|
+
|
|
120
|
+
// src/utils/delay.ts
|
|
121
|
+
function delay(ms) {
|
|
122
|
+
return new Promise((resolve) => {
|
|
123
|
+
setTimeout(() => resolve(), ms);
|
|
124
|
+
});
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
// src/utils/pick.ts
|
|
128
|
+
function pick(o, props) {
|
|
129
|
+
return Object.assign(
|
|
130
|
+
{},
|
|
131
|
+
...props.map((prop) => {
|
|
132
|
+
if (o[prop] !== void 0) {
|
|
133
|
+
return { [prop]: o[prop] };
|
|
134
|
+
}
|
|
135
|
+
})
|
|
136
|
+
);
|
|
137
|
+
}
|
|
138
|
+
|
|
139
|
+
// src/utils/typedInclude.ts
|
|
140
|
+
function typedInclude(arr, v) {
|
|
141
|
+
return arr.includes(v);
|
|
142
|
+
}
|
|
143
|
+
|
|
144
|
+
// src/utils/omit.ts
|
|
145
|
+
function omit(o, props) {
|
|
146
|
+
const propsArr = Array.isArray(props) ? props : [props];
|
|
147
|
+
const letsKeep = Object.keys(o).filter((prop) => !typedInclude(propsArr, prop));
|
|
148
|
+
return pick(o, letsKeep);
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
// src/utils/toArray.ts
|
|
152
|
+
function toArray(obj) {
|
|
153
|
+
if (Array.isArray(obj)) {
|
|
154
|
+
return obj;
|
|
155
|
+
}
|
|
156
|
+
return [obj];
|
|
157
|
+
}
|
|
158
|
+
|
|
159
|
+
// src/providers/providerHelper.ts
|
|
160
|
+
var TaskProviderHelper = class {
|
|
161
|
+
constructor(provider, baseUrl, clientSideRoutingOnly = false) {
|
|
162
|
+
this.provider = provider;
|
|
163
|
+
this.baseUrl = baseUrl;
|
|
164
|
+
this.clientSideRoutingOnly = clientSideRoutingOnly;
|
|
165
|
+
}
|
|
166
|
+
/**
|
|
167
|
+
* Prepare the base URL for the request
|
|
168
|
+
*/
|
|
169
|
+
makeBaseUrl(params) {
|
|
170
|
+
return params.authMethod !== "provider-key" ? `${HF_ROUTER_URL}/${this.provider}` : this.baseUrl;
|
|
171
|
+
}
|
|
172
|
+
/**
|
|
173
|
+
* Prepare the body for the request
|
|
174
|
+
*/
|
|
175
|
+
makeBody(params) {
|
|
176
|
+
if ("data" in params.args && !!params.args.data) {
|
|
177
|
+
return params.args.data;
|
|
178
|
+
}
|
|
179
|
+
return JSON.stringify(this.preparePayload(params));
|
|
180
|
+
}
|
|
181
|
+
/**
|
|
182
|
+
* Prepare the URL for the request
|
|
183
|
+
*/
|
|
184
|
+
makeUrl(params) {
|
|
185
|
+
const baseUrl = this.makeBaseUrl(params);
|
|
186
|
+
const route = this.makeRoute(params).replace(/^\/+/, "");
|
|
187
|
+
return `${baseUrl}/${route}`;
|
|
188
|
+
}
|
|
189
|
+
/**
|
|
190
|
+
* Prepare the headers for the request
|
|
191
|
+
*/
|
|
192
|
+
prepareHeaders(params, isBinary) {
|
|
193
|
+
const headers = { Authorization: `Bearer ${params.accessToken}` };
|
|
194
|
+
if (!isBinary) {
|
|
195
|
+
headers["Content-Type"] = "application/json";
|
|
196
|
+
}
|
|
197
|
+
return headers;
|
|
198
|
+
}
|
|
112
199
|
};
|
|
113
|
-
var
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
}
|
|
117
|
-
|
|
200
|
+
var BaseConversationalTask = class extends TaskProviderHelper {
|
|
201
|
+
constructor(provider, baseUrl, clientSideRoutingOnly = false) {
|
|
202
|
+
super(provider, baseUrl, clientSideRoutingOnly);
|
|
203
|
+
}
|
|
204
|
+
makeRoute() {
|
|
205
|
+
return "v1/chat/completions";
|
|
206
|
+
}
|
|
207
|
+
preparePayload(params) {
|
|
208
|
+
return {
|
|
209
|
+
...params.args,
|
|
210
|
+
model: params.model
|
|
211
|
+
};
|
|
212
|
+
}
|
|
213
|
+
async getResponse(response) {
|
|
214
|
+
if (typeof response === "object" && Array.isArray(response?.choices) && typeof response?.created === "number" && typeof response?.id === "string" && typeof response?.model === "string" && /// Together.ai and Nebius do not output a system_fingerprint
|
|
215
|
+
(response.system_fingerprint === void 0 || response.system_fingerprint === null || typeof response.system_fingerprint === "string") && typeof response?.usage === "object") {
|
|
216
|
+
return response;
|
|
217
|
+
}
|
|
218
|
+
throw new InferenceOutputError("Expected ChatCompletionOutput");
|
|
118
219
|
}
|
|
119
220
|
};
|
|
120
|
-
var
|
|
121
|
-
|
|
221
|
+
var BaseTextGenerationTask = class extends TaskProviderHelper {
|
|
222
|
+
constructor(provider, baseUrl, clientSideRoutingOnly = false) {
|
|
223
|
+
super(provider, baseUrl, clientSideRoutingOnly);
|
|
224
|
+
}
|
|
225
|
+
preparePayload(params) {
|
|
226
|
+
return {
|
|
227
|
+
...params.args,
|
|
228
|
+
model: params.model
|
|
229
|
+
};
|
|
230
|
+
}
|
|
231
|
+
makeRoute() {
|
|
232
|
+
return "v1/completions";
|
|
233
|
+
}
|
|
234
|
+
async getResponse(response) {
|
|
235
|
+
const res = toArray(response);
|
|
236
|
+
if (Array.isArray(res) && res.length > 0 && res.every(
|
|
237
|
+
(x) => typeof x === "object" && !!x && "generated_text" in x && typeof x.generated_text === "string"
|
|
238
|
+
)) {
|
|
239
|
+
return res[0];
|
|
240
|
+
}
|
|
241
|
+
throw new InferenceOutputError("Expected Array<{generated_text: string}>");
|
|
242
|
+
}
|
|
122
243
|
};
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
244
|
+
|
|
245
|
+
// src/providers/black-forest-labs.ts
|
|
246
|
+
var BLACK_FOREST_LABS_AI_API_BASE_URL = "https://api.us1.bfl.ai";
|
|
247
|
+
var BlackForestLabsTextToImageTask = class extends TaskProviderHelper {
|
|
248
|
+
constructor() {
|
|
249
|
+
super("black-forest-labs", BLACK_FOREST_LABS_AI_API_BASE_URL);
|
|
250
|
+
}
|
|
251
|
+
preparePayload(params) {
|
|
252
|
+
return {
|
|
253
|
+
...omit(params.args, ["inputs", "parameters"]),
|
|
254
|
+
...params.args.parameters,
|
|
255
|
+
prompt: params.args.inputs
|
|
256
|
+
};
|
|
257
|
+
}
|
|
258
|
+
prepareHeaders(params, binary) {
|
|
259
|
+
const headers = {
|
|
260
|
+
Authorization: params.authMethod !== "provider-key" ? `Bearer ${params.accessToken}` : `X-Key ${params.accessToken}`
|
|
261
|
+
};
|
|
262
|
+
if (!binary) {
|
|
263
|
+
headers["Content-Type"] = "application/json";
|
|
264
|
+
}
|
|
265
|
+
return headers;
|
|
266
|
+
}
|
|
267
|
+
makeRoute(params) {
|
|
268
|
+
if (!params) {
|
|
269
|
+
throw new Error("Params are required");
|
|
270
|
+
}
|
|
271
|
+
return `/v1/${params.model}`;
|
|
272
|
+
}
|
|
273
|
+
async getResponse(response, url, headers, outputType) {
|
|
274
|
+
const urlObj = new URL(response.polling_url);
|
|
275
|
+
for (let step = 0; step < 5; step++) {
|
|
276
|
+
await delay(1e3);
|
|
277
|
+
console.debug(`Polling Black Forest Labs API for the result... ${step + 1}/5`);
|
|
278
|
+
urlObj.searchParams.set("attempt", step.toString(10));
|
|
279
|
+
const resp = await fetch(urlObj, { headers: { "Content-Type": "application/json" } });
|
|
280
|
+
if (!resp.ok) {
|
|
281
|
+
throw new InferenceOutputError("Failed to fetch result from black forest labs API");
|
|
282
|
+
}
|
|
283
|
+
const payload = await resp.json();
|
|
284
|
+
if (typeof payload === "object" && payload && "status" in payload && typeof payload.status === "string" && payload.status === "Ready" && "result" in payload && typeof payload.result === "object" && payload.result && "sample" in payload.result && typeof payload.result.sample === "string") {
|
|
285
|
+
if (outputType === "url") {
|
|
286
|
+
return payload.result.sample;
|
|
287
|
+
}
|
|
288
|
+
const image = await fetch(payload.result.sample);
|
|
289
|
+
return await image.blob();
|
|
290
|
+
}
|
|
291
|
+
}
|
|
292
|
+
throw new InferenceOutputError("Failed to fetch result from black forest labs API");
|
|
293
|
+
}
|
|
128
294
|
};
|
|
129
295
|
|
|
130
296
|
// src/providers/cerebras.ts
|
|
131
|
-
var
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
}
|
|
135
|
-
var makeBody2 = (params) => {
|
|
136
|
-
return {
|
|
137
|
-
...params.args,
|
|
138
|
-
model: params.model
|
|
139
|
-
};
|
|
140
|
-
};
|
|
141
|
-
var makeHeaders2 = (params) => {
|
|
142
|
-
return { Authorization: `Bearer ${params.accessToken}` };
|
|
143
|
-
};
|
|
144
|
-
var makeUrl2 = (params) => {
|
|
145
|
-
return `${params.baseUrl}/v1/chat/completions`;
|
|
146
|
-
};
|
|
147
|
-
var CEREBRAS_CONFIG = {
|
|
148
|
-
makeBaseUrl: makeBaseUrl2,
|
|
149
|
-
makeBody: makeBody2,
|
|
150
|
-
makeHeaders: makeHeaders2,
|
|
151
|
-
makeUrl: makeUrl2
|
|
297
|
+
var CerebrasConversationalTask = class extends BaseConversationalTask {
|
|
298
|
+
constructor() {
|
|
299
|
+
super("cerebras", "https://api.cerebras.ai");
|
|
300
|
+
}
|
|
152
301
|
};
|
|
153
302
|
|
|
154
303
|
// src/providers/cohere.ts
|
|
155
|
-
var
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
}
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
...params.args,
|
|
162
|
-
model: params.model
|
|
163
|
-
};
|
|
164
|
-
};
|
|
165
|
-
var makeHeaders3 = (params) => {
|
|
166
|
-
return { Authorization: `Bearer ${params.accessToken}` };
|
|
167
|
-
};
|
|
168
|
-
var makeUrl3 = (params) => {
|
|
169
|
-
return `${params.baseUrl}/compatibility/v1/chat/completions`;
|
|
170
|
-
};
|
|
171
|
-
var COHERE_CONFIG = {
|
|
172
|
-
makeBaseUrl: makeBaseUrl3,
|
|
173
|
-
makeBody: makeBody3,
|
|
174
|
-
makeHeaders: makeHeaders3,
|
|
175
|
-
makeUrl: makeUrl3
|
|
176
|
-
};
|
|
177
|
-
|
|
178
|
-
// src/lib/InferenceOutputError.ts
|
|
179
|
-
var InferenceOutputError = class extends TypeError {
|
|
180
|
-
constructor(message) {
|
|
181
|
-
super(
|
|
182
|
-
`Invalid inference output: ${message}. Use the 'request' method with the same parameters to do a custom call with no type checking.`
|
|
183
|
-
);
|
|
184
|
-
this.name = "InferenceOutputError";
|
|
304
|
+
var CohereConversationalTask = class extends BaseConversationalTask {
|
|
305
|
+
constructor() {
|
|
306
|
+
super("cohere", "https://api.cohere.com");
|
|
307
|
+
}
|
|
308
|
+
makeRoute() {
|
|
309
|
+
return "/compatibility/v1/chat/completions";
|
|
185
310
|
}
|
|
186
311
|
};
|
|
187
312
|
|
|
@@ -190,349 +315,871 @@ function isUrl(modelOrUrl) {
|
|
|
190
315
|
return /^http(s?):/.test(modelOrUrl) || modelOrUrl.startsWith("/");
|
|
191
316
|
}
|
|
192
317
|
|
|
193
|
-
// src/utils/delay.ts
|
|
194
|
-
function delay(ms) {
|
|
195
|
-
return new Promise((resolve) => {
|
|
196
|
-
setTimeout(() => resolve(), ms);
|
|
197
|
-
});
|
|
198
|
-
}
|
|
199
|
-
|
|
200
318
|
// src/providers/fal-ai.ts
|
|
201
|
-
var
|
|
202
|
-
var
|
|
203
|
-
|
|
204
|
-
|
|
319
|
+
var FAL_AI_SUPPORTED_BLOB_TYPES = ["audio/mpeg", "audio/mp4", "audio/wav", "audio/x-wav"];
|
|
320
|
+
var FalAITask = class extends TaskProviderHelper {
|
|
321
|
+
constructor(url) {
|
|
322
|
+
super("fal-ai", url || "https://fal.run");
|
|
323
|
+
}
|
|
324
|
+
preparePayload(params) {
|
|
325
|
+
return params.args;
|
|
326
|
+
}
|
|
327
|
+
makeRoute(params) {
|
|
328
|
+
return `/${params.model}`;
|
|
329
|
+
}
|
|
330
|
+
prepareHeaders(params, binary) {
|
|
331
|
+
const headers = {
|
|
332
|
+
Authorization: params.authMethod !== "provider-key" ? `Bearer ${params.accessToken}` : `Key ${params.accessToken}`
|
|
333
|
+
};
|
|
334
|
+
if (!binary) {
|
|
335
|
+
headers["Content-Type"] = "application/json";
|
|
336
|
+
}
|
|
337
|
+
return headers;
|
|
338
|
+
}
|
|
205
339
|
};
|
|
206
|
-
var
|
|
207
|
-
|
|
340
|
+
var FalAITextToImageTask = class extends FalAITask {
|
|
341
|
+
preparePayload(params) {
|
|
342
|
+
return {
|
|
343
|
+
...omit(params.args, ["inputs", "parameters"]),
|
|
344
|
+
...params.args.parameters,
|
|
345
|
+
sync_mode: true,
|
|
346
|
+
prompt: params.args.inputs
|
|
347
|
+
};
|
|
348
|
+
}
|
|
349
|
+
async getResponse(response, outputType) {
|
|
350
|
+
if (typeof response === "object" && "images" in response && Array.isArray(response.images) && response.images.length > 0 && "url" in response.images[0] && typeof response.images[0].url === "string") {
|
|
351
|
+
if (outputType === "url") {
|
|
352
|
+
return response.images[0].url;
|
|
353
|
+
}
|
|
354
|
+
const urlResponse = await fetch(response.images[0].url);
|
|
355
|
+
return await urlResponse.blob();
|
|
356
|
+
}
|
|
357
|
+
throw new InferenceOutputError("Expected Fal.ai text-to-image response format");
|
|
358
|
+
}
|
|
208
359
|
};
|
|
209
|
-
var
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
}
|
|
360
|
+
var FalAITextToVideoTask = class extends FalAITask {
|
|
361
|
+
constructor() {
|
|
362
|
+
super("https://queue.fal.run");
|
|
363
|
+
}
|
|
364
|
+
makeRoute(params) {
|
|
365
|
+
if (params.authMethod !== "provider-key") {
|
|
366
|
+
return `/${params.model}?_subdomain=queue`;
|
|
367
|
+
}
|
|
368
|
+
return `/${params.model}`;
|
|
369
|
+
}
|
|
370
|
+
preparePayload(params) {
|
|
371
|
+
return {
|
|
372
|
+
...omit(params.args, ["inputs", "parameters"]),
|
|
373
|
+
...params.args.parameters,
|
|
374
|
+
prompt: params.args.inputs
|
|
375
|
+
};
|
|
376
|
+
}
|
|
377
|
+
async getResponse(response, url, headers) {
|
|
378
|
+
if (!url || !headers) {
|
|
379
|
+
throw new InferenceOutputError("URL and headers are required for text-to-video task");
|
|
380
|
+
}
|
|
381
|
+
const requestId = response.request_id;
|
|
382
|
+
if (!requestId) {
|
|
383
|
+
throw new InferenceOutputError("No request ID found in the response");
|
|
384
|
+
}
|
|
385
|
+
let status = response.status;
|
|
386
|
+
const parsedUrl = new URL(url);
|
|
387
|
+
const baseUrl = `${parsedUrl.protocol}//${parsedUrl.host}${parsedUrl.host === "router.huggingface.co" ? "/fal-ai" : ""}`;
|
|
388
|
+
const modelId = new URL(response.response_url).pathname;
|
|
389
|
+
const queryParams = parsedUrl.search;
|
|
390
|
+
const statusUrl = `${baseUrl}${modelId}/status${queryParams}`;
|
|
391
|
+
const resultUrl = `${baseUrl}${modelId}${queryParams}`;
|
|
392
|
+
while (status !== "COMPLETED") {
|
|
393
|
+
await delay(500);
|
|
394
|
+
const statusResponse = await fetch(statusUrl, { headers });
|
|
395
|
+
if (!statusResponse.ok) {
|
|
396
|
+
throw new InferenceOutputError("Failed to fetch response status from fal-ai API");
|
|
397
|
+
}
|
|
398
|
+
try {
|
|
399
|
+
status = (await statusResponse.json()).status;
|
|
400
|
+
} catch (error) {
|
|
401
|
+
throw new InferenceOutputError("Failed to parse status response from fal-ai API");
|
|
402
|
+
}
|
|
403
|
+
}
|
|
404
|
+
const resultResponse = await fetch(resultUrl, { headers });
|
|
405
|
+
let result;
|
|
406
|
+
try {
|
|
407
|
+
result = await resultResponse.json();
|
|
408
|
+
} catch (error) {
|
|
409
|
+
throw new InferenceOutputError("Failed to parse result response from fal-ai API");
|
|
410
|
+
}
|
|
411
|
+
if (typeof result === "object" && !!result && "video" in result && typeof result.video === "object" && !!result.video && "url" in result.video && typeof result.video.url === "string" && isUrl(result.video.url)) {
|
|
412
|
+
const urlResponse = await fetch(result.video.url);
|
|
413
|
+
return await urlResponse.blob();
|
|
414
|
+
} else {
|
|
415
|
+
throw new InferenceOutputError(
|
|
416
|
+
"Expected { video: { url: string } } result format, got instead: " + JSON.stringify(result)
|
|
417
|
+
);
|
|
418
|
+
}
|
|
419
|
+
}
|
|
420
|
+
};
|
|
421
|
+
var FalAIAutomaticSpeechRecognitionTask = class extends FalAITask {
|
|
422
|
+
prepareHeaders(params, binary) {
|
|
423
|
+
const headers = super.prepareHeaders(params, binary);
|
|
424
|
+
headers["Content-Type"] = "application/json";
|
|
425
|
+
return headers;
|
|
426
|
+
}
|
|
427
|
+
async getResponse(response) {
|
|
428
|
+
const res = response;
|
|
429
|
+
if (typeof res?.text !== "string") {
|
|
430
|
+
throw new InferenceOutputError(
|
|
431
|
+
`Expected { text: string } format from Fal.ai Automatic Speech Recognition, got: ${JSON.stringify(response)}`
|
|
432
|
+
);
|
|
433
|
+
}
|
|
434
|
+
return { text: res.text };
|
|
435
|
+
}
|
|
213
436
|
};
|
|
214
|
-
var
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
};
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
}
|
|
227
|
-
|
|
228
|
-
const requestId = res.request_id;
|
|
229
|
-
if (!requestId) {
|
|
230
|
-
throw new InferenceOutputError("No request ID found in the response");
|
|
231
|
-
}
|
|
232
|
-
let status = res.status;
|
|
233
|
-
const parsedUrl = new URL(url);
|
|
234
|
-
const baseUrl = `${parsedUrl.protocol}//${parsedUrl.host}${parsedUrl.host === "router.huggingface.co" ? "/fal-ai" : ""}`;
|
|
235
|
-
const modelId = new URL(res.response_url).pathname;
|
|
236
|
-
const queryParams = parsedUrl.search;
|
|
237
|
-
const statusUrl = `${baseUrl}${modelId}/status${queryParams}`;
|
|
238
|
-
const resultUrl = `${baseUrl}${modelId}${queryParams}`;
|
|
239
|
-
while (status !== "COMPLETED") {
|
|
240
|
-
await delay(500);
|
|
241
|
-
const statusResponse = await fetch(statusUrl, { headers });
|
|
242
|
-
if (!statusResponse.ok) {
|
|
243
|
-
throw new InferenceOutputError("Failed to fetch response status from fal-ai API");
|
|
437
|
+
var FalAITextToSpeechTask = class extends FalAITask {
|
|
438
|
+
preparePayload(params) {
|
|
439
|
+
return {
|
|
440
|
+
...omit(params.args, ["inputs", "parameters"]),
|
|
441
|
+
...params.args.parameters,
|
|
442
|
+
lyrics: params.args.inputs
|
|
443
|
+
};
|
|
444
|
+
}
|
|
445
|
+
async getResponse(response) {
|
|
446
|
+
const res = response;
|
|
447
|
+
if (typeof res?.audio?.url !== "string") {
|
|
448
|
+
throw new InferenceOutputError(
|
|
449
|
+
`Expected { audio: { url: string } } format from Fal.ai Text-to-Speech, got: ${JSON.stringify(response)}`
|
|
450
|
+
);
|
|
244
451
|
}
|
|
245
452
|
try {
|
|
246
|
-
|
|
453
|
+
const urlResponse = await fetch(res.audio.url);
|
|
454
|
+
if (!urlResponse.ok) {
|
|
455
|
+
throw new Error(`Failed to fetch audio from ${res.audio.url}: ${urlResponse.statusText}`);
|
|
456
|
+
}
|
|
457
|
+
return await urlResponse.blob();
|
|
247
458
|
} catch (error) {
|
|
248
|
-
throw new InferenceOutputError(
|
|
459
|
+
throw new InferenceOutputError(
|
|
460
|
+
`Error fetching or processing audio from Fal.ai Text-to-Speech URL: ${res.audio.url}. ${error instanceof Error ? error.message : String(error)}`
|
|
461
|
+
);
|
|
249
462
|
}
|
|
250
463
|
}
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
464
|
+
};
|
|
465
|
+
|
|
466
|
+
// src/providers/fireworks-ai.ts
|
|
467
|
+
var FireworksConversationalTask = class extends BaseConversationalTask {
|
|
468
|
+
constructor() {
|
|
469
|
+
super("fireworks-ai", "https://api.fireworks.ai");
|
|
257
470
|
}
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
471
|
+
makeRoute() {
|
|
472
|
+
return "/inference/v1/chat/completions";
|
|
473
|
+
}
|
|
474
|
+
};
|
|
475
|
+
|
|
476
|
+
// src/providers/hf-inference.ts
|
|
477
|
+
var HFInferenceTask = class extends TaskProviderHelper {
|
|
478
|
+
constructor() {
|
|
479
|
+
super("hf-inference", `${HF_ROUTER_URL}/hf-inference`);
|
|
480
|
+
}
|
|
481
|
+
preparePayload(params) {
|
|
482
|
+
return params.args;
|
|
483
|
+
}
|
|
484
|
+
makeUrl(params) {
|
|
485
|
+
if (params.model.startsWith("http://") || params.model.startsWith("https://")) {
|
|
486
|
+
return params.model;
|
|
487
|
+
}
|
|
488
|
+
return super.makeUrl(params);
|
|
489
|
+
}
|
|
490
|
+
makeRoute(params) {
|
|
491
|
+
if (params.task && ["feature-extraction", "sentence-similarity"].includes(params.task)) {
|
|
492
|
+
return `pipeline/${params.task}/${params.model}`;
|
|
493
|
+
}
|
|
494
|
+
return `models/${params.model}`;
|
|
495
|
+
}
|
|
496
|
+
async getResponse(response) {
|
|
497
|
+
return response;
|
|
498
|
+
}
|
|
499
|
+
};
|
|
500
|
+
var HFInferenceTextToImageTask = class extends HFInferenceTask {
|
|
501
|
+
async getResponse(response, url, headers, outputType) {
|
|
502
|
+
if (!response) {
|
|
503
|
+
throw new InferenceOutputError("response is undefined");
|
|
504
|
+
}
|
|
505
|
+
if (typeof response == "object") {
|
|
506
|
+
if ("data" in response && Array.isArray(response.data) && response.data[0].b64_json) {
|
|
507
|
+
const base64Data = response.data[0].b64_json;
|
|
508
|
+
if (outputType === "url") {
|
|
509
|
+
return `data:image/jpeg;base64,${base64Data}`;
|
|
510
|
+
}
|
|
511
|
+
const base64Response = await fetch(`data:image/jpeg;base64,${base64Data}`);
|
|
512
|
+
return await base64Response.blob();
|
|
513
|
+
}
|
|
514
|
+
if ("output" in response && Array.isArray(response.output)) {
|
|
515
|
+
if (outputType === "url") {
|
|
516
|
+
return response.output[0];
|
|
517
|
+
}
|
|
518
|
+
const urlResponse = await fetch(response.output[0]);
|
|
519
|
+
const blob = await urlResponse.blob();
|
|
520
|
+
return blob;
|
|
521
|
+
}
|
|
522
|
+
}
|
|
523
|
+
if (response instanceof Blob) {
|
|
524
|
+
if (outputType === "url") {
|
|
525
|
+
const b64 = await response.arrayBuffer().then((buf) => Buffer.from(buf).toString("base64"));
|
|
526
|
+
return `data:image/jpeg;base64,${b64}`;
|
|
527
|
+
}
|
|
528
|
+
return response;
|
|
529
|
+
}
|
|
530
|
+
throw new InferenceOutputError("Expected a Blob ");
|
|
531
|
+
}
|
|
532
|
+
};
|
|
533
|
+
var HFInferenceConversationalTask = class extends HFInferenceTask {
|
|
534
|
+
makeUrl(params) {
|
|
535
|
+
let url;
|
|
536
|
+
if (params.model.startsWith("http://") || params.model.startsWith("https://")) {
|
|
537
|
+
url = params.model.trim();
|
|
538
|
+
} else {
|
|
539
|
+
url = `${this.makeBaseUrl(params)}/models/${params.model}`;
|
|
540
|
+
}
|
|
541
|
+
url = url.replace(/\/+$/, "");
|
|
542
|
+
if (url.endsWith("/v1")) {
|
|
543
|
+
url += "/chat/completions";
|
|
544
|
+
} else if (!url.endsWith("/chat/completions")) {
|
|
545
|
+
url += "/v1/chat/completions";
|
|
546
|
+
}
|
|
547
|
+
return url;
|
|
548
|
+
}
|
|
549
|
+
preparePayload(params) {
|
|
550
|
+
return {
|
|
551
|
+
...params.args,
|
|
552
|
+
model: params.model
|
|
553
|
+
};
|
|
554
|
+
}
|
|
555
|
+
async getResponse(response) {
|
|
556
|
+
return response;
|
|
557
|
+
}
|
|
558
|
+
};
|
|
559
|
+
var HFInferenceTextGenerationTask = class extends HFInferenceTask {
|
|
560
|
+
async getResponse(response) {
|
|
561
|
+
const res = toArray(response);
|
|
562
|
+
if (Array.isArray(res) && res.every((x) => "generated_text" in x && typeof x?.generated_text === "string")) {
|
|
563
|
+
return res?.[0];
|
|
564
|
+
}
|
|
565
|
+
throw new InferenceOutputError("Expected Array<{generated_text: string}>");
|
|
566
|
+
}
|
|
567
|
+
};
|
|
568
|
+
var HFInferenceAudioClassificationTask = class extends HFInferenceTask {
|
|
569
|
+
async getResponse(response) {
|
|
570
|
+
if (Array.isArray(response) && response.every(
|
|
571
|
+
(x) => typeof x === "object" && x !== null && typeof x.label === "string" && typeof x.score === "number"
|
|
572
|
+
)) {
|
|
573
|
+
return response;
|
|
574
|
+
}
|
|
575
|
+
throw new InferenceOutputError("Expected Array<{label: string, score: number}> but received different format");
|
|
576
|
+
}
|
|
577
|
+
};
|
|
578
|
+
var HFInferenceAutomaticSpeechRecognitionTask = class extends HFInferenceTask {
|
|
579
|
+
async getResponse(response) {
|
|
580
|
+
return response;
|
|
581
|
+
}
|
|
582
|
+
};
|
|
583
|
+
var HFInferenceAudioToAudioTask = class extends HFInferenceTask {
|
|
584
|
+
async getResponse(response) {
|
|
585
|
+
if (!Array.isArray(response)) {
|
|
586
|
+
throw new InferenceOutputError("Expected Array");
|
|
587
|
+
}
|
|
588
|
+
if (!response.every((elem) => {
|
|
589
|
+
return typeof elem === "object" && elem && "label" in elem && typeof elem.label === "string" && "content-type" in elem && typeof elem["content-type"] === "string" && "blob" in elem && typeof elem.blob === "string";
|
|
590
|
+
})) {
|
|
591
|
+
throw new InferenceOutputError("Expected Array<{label: string, audio: Blob}>");
|
|
592
|
+
}
|
|
593
|
+
return response;
|
|
594
|
+
}
|
|
595
|
+
};
|
|
596
|
+
var HFInferenceDocumentQuestionAnsweringTask = class extends HFInferenceTask {
|
|
597
|
+
async getResponse(response) {
|
|
598
|
+
if (Array.isArray(response) && response.every(
|
|
599
|
+
(elem) => typeof elem === "object" && !!elem && typeof elem?.answer === "string" && (typeof elem.end === "number" || typeof elem.end === "undefined") && (typeof elem.score === "number" || typeof elem.score === "undefined") && (typeof elem.start === "number" || typeof elem.start === "undefined")
|
|
600
|
+
)) {
|
|
601
|
+
return response[0];
|
|
602
|
+
}
|
|
603
|
+
throw new InferenceOutputError("Expected Array<{answer: string, end: number, score: number, start: number}>");
|
|
604
|
+
}
|
|
605
|
+
};
|
|
606
|
+
var HFInferenceFeatureExtractionTask = class extends HFInferenceTask {
|
|
607
|
+
async getResponse(response) {
|
|
608
|
+
const isNumArrayRec = (arr, maxDepth, curDepth = 0) => {
|
|
609
|
+
if (curDepth > maxDepth)
|
|
610
|
+
return false;
|
|
611
|
+
if (arr.every((x) => Array.isArray(x))) {
|
|
612
|
+
return arr.every((x) => isNumArrayRec(x, maxDepth, curDepth + 1));
|
|
613
|
+
} else {
|
|
614
|
+
return arr.every((x) => typeof x === "number");
|
|
615
|
+
}
|
|
616
|
+
};
|
|
617
|
+
if (Array.isArray(response) && isNumArrayRec(response, 3, 0)) {
|
|
618
|
+
return response;
|
|
619
|
+
}
|
|
620
|
+
throw new InferenceOutputError("Expected Array<number[][][] | number[][] | number[] | number>");
|
|
621
|
+
}
|
|
622
|
+
};
|
|
623
|
+
var HFInferenceImageClassificationTask = class extends HFInferenceTask {
|
|
624
|
+
async getResponse(response) {
|
|
625
|
+
if (Array.isArray(response) && response.every((x) => typeof x.label === "string" && typeof x.score === "number")) {
|
|
626
|
+
return response;
|
|
627
|
+
}
|
|
628
|
+
throw new InferenceOutputError("Expected Array<{label: string, score: number}>");
|
|
629
|
+
}
|
|
630
|
+
};
|
|
631
|
+
var HFInferenceImageSegmentationTask = class extends HFInferenceTask {
|
|
632
|
+
async getResponse(response) {
|
|
633
|
+
if (Array.isArray(response) && response.every((x) => typeof x.label === "string" && typeof x.mask === "string" && typeof x.score === "number")) {
|
|
634
|
+
return response;
|
|
635
|
+
}
|
|
636
|
+
throw new InferenceOutputError("Expected Array<{label: string, mask: string, score: number}>");
|
|
637
|
+
}
|
|
638
|
+
};
|
|
639
|
+
var HFInferenceImageToTextTask = class extends HFInferenceTask {
|
|
640
|
+
async getResponse(response) {
|
|
641
|
+
if (typeof response?.generated_text !== "string") {
|
|
642
|
+
throw new InferenceOutputError("Expected {generated_text: string}");
|
|
643
|
+
}
|
|
644
|
+
return response;
|
|
645
|
+
}
|
|
646
|
+
};
|
|
647
|
+
var HFInferenceImageToImageTask = class extends HFInferenceTask {
|
|
648
|
+
async getResponse(response) {
|
|
649
|
+
if (response instanceof Blob) {
|
|
650
|
+
return response;
|
|
651
|
+
}
|
|
652
|
+
throw new InferenceOutputError("Expected Blob");
|
|
653
|
+
}
|
|
654
|
+
};
|
|
655
|
+
var HFInferenceObjectDetectionTask = class extends HFInferenceTask {
|
|
656
|
+
async getResponse(response) {
|
|
657
|
+
if (Array.isArray(response) && response.every(
|
|
658
|
+
(x) => typeof x.label === "string" && typeof x.score === "number" && typeof x.box.xmin === "number" && typeof x.box.ymin === "number" && typeof x.box.xmax === "number" && typeof x.box.ymax === "number"
|
|
659
|
+
)) {
|
|
660
|
+
return response;
|
|
661
|
+
}
|
|
262
662
|
throw new InferenceOutputError(
|
|
263
|
-
"Expected {
|
|
663
|
+
"Expected Array<{label: string, score: number, box: {xmin: number, ymin: number, xmax: number, ymax: number}}>"
|
|
264
664
|
);
|
|
265
665
|
}
|
|
266
|
-
}
|
|
267
|
-
|
|
268
|
-
// src/providers/fireworks-ai.ts
|
|
269
|
-
var FIREWORKS_AI_API_BASE_URL = "https://api.fireworks.ai";
|
|
270
|
-
var makeBaseUrl5 = () => {
|
|
271
|
-
return FIREWORKS_AI_API_BASE_URL;
|
|
272
666
|
};
|
|
273
|
-
var
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
667
|
+
var HFInferenceZeroShotImageClassificationTask = class extends HFInferenceTask {
|
|
668
|
+
async getResponse(response) {
|
|
669
|
+
if (Array.isArray(response) && response.every((x) => typeof x.label === "string" && typeof x.score === "number")) {
|
|
670
|
+
return response;
|
|
671
|
+
}
|
|
672
|
+
throw new InferenceOutputError("Expected Array<{label: string, score: number}>");
|
|
673
|
+
}
|
|
674
|
+
};
|
|
675
|
+
var HFInferenceTextClassificationTask = class extends HFInferenceTask {
|
|
676
|
+
async getResponse(response) {
|
|
677
|
+
const output = response?.[0];
|
|
678
|
+
if (Array.isArray(output) && output.every((x) => typeof x?.label === "string" && typeof x.score === "number")) {
|
|
679
|
+
return output;
|
|
680
|
+
}
|
|
681
|
+
throw new InferenceOutputError("Expected Array<{label: string, score: number}>");
|
|
682
|
+
}
|
|
683
|
+
};
|
|
684
|
+
var HFInferenceQuestionAnsweringTask = class extends HFInferenceTask {
|
|
685
|
+
async getResponse(response) {
|
|
686
|
+
if (Array.isArray(response) ? response.every(
|
|
687
|
+
(elem) => typeof elem === "object" && !!elem && typeof elem.answer === "string" && typeof elem.end === "number" && typeof elem.score === "number" && typeof elem.start === "number"
|
|
688
|
+
) : typeof response === "object" && !!response && typeof response.answer === "string" && typeof response.end === "number" && typeof response.score === "number" && typeof response.start === "number") {
|
|
689
|
+
return Array.isArray(response) ? response[0] : response;
|
|
690
|
+
}
|
|
691
|
+
throw new InferenceOutputError("Expected Array<{answer: string, end: number, score: number, start: number}>");
|
|
692
|
+
}
|
|
693
|
+
};
|
|
694
|
+
var HFInferenceFillMaskTask = class extends HFInferenceTask {
|
|
695
|
+
async getResponse(response) {
|
|
696
|
+
if (Array.isArray(response) && response.every(
|
|
697
|
+
(x) => typeof x.score === "number" && typeof x.sequence === "string" && typeof x.token === "number" && typeof x.token_str === "string"
|
|
698
|
+
)) {
|
|
699
|
+
return response;
|
|
700
|
+
}
|
|
701
|
+
throw new InferenceOutputError(
|
|
702
|
+
"Expected Array<{score: number, sequence: string, token: number, token_str: string}>"
|
|
703
|
+
);
|
|
704
|
+
}
|
|
278
705
|
};
|
|
279
|
-
var
|
|
280
|
-
|
|
706
|
+
var HFInferenceZeroShotClassificationTask = class extends HFInferenceTask {
|
|
707
|
+
async getResponse(response) {
|
|
708
|
+
if (Array.isArray(response) && response.every(
|
|
709
|
+
(x) => Array.isArray(x.labels) && x.labels.every((_label) => typeof _label === "string") && Array.isArray(x.scores) && x.scores.every((_score) => typeof _score === "number") && typeof x.sequence === "string"
|
|
710
|
+
)) {
|
|
711
|
+
return response;
|
|
712
|
+
}
|
|
713
|
+
throw new InferenceOutputError("Expected Array<{labels: string[], scores: number[], sequence: string}>");
|
|
714
|
+
}
|
|
281
715
|
};
|
|
282
|
-
var
|
|
283
|
-
|
|
284
|
-
|
|
716
|
+
var HFInferenceSentenceSimilarityTask = class extends HFInferenceTask {
|
|
717
|
+
async getResponse(response) {
|
|
718
|
+
if (Array.isArray(response) && response.every((x) => typeof x === "number")) {
|
|
719
|
+
return response;
|
|
720
|
+
}
|
|
721
|
+
throw new InferenceOutputError("Expected Array<number>");
|
|
285
722
|
}
|
|
286
|
-
return `${params.baseUrl}/inference`;
|
|
287
723
|
};
|
|
288
|
-
var
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
724
|
+
var HFInferenceTableQuestionAnsweringTask = class extends HFInferenceTask {
|
|
725
|
+
static validate(elem) {
|
|
726
|
+
return typeof elem === "object" && !!elem && "aggregator" in elem && typeof elem.aggregator === "string" && "answer" in elem && typeof elem.answer === "string" && "cells" in elem && Array.isArray(elem.cells) && elem.cells.every((x) => typeof x === "string") && "coordinates" in elem && Array.isArray(elem.coordinates) && elem.coordinates.every(
|
|
727
|
+
(coord) => Array.isArray(coord) && coord.every((x) => typeof x === "number")
|
|
728
|
+
);
|
|
729
|
+
}
|
|
730
|
+
async getResponse(response) {
|
|
731
|
+
if (Array.isArray(response) && Array.isArray(response) ? response.every((elem) => HFInferenceTableQuestionAnsweringTask.validate(elem)) : HFInferenceTableQuestionAnsweringTask.validate(response)) {
|
|
732
|
+
return Array.isArray(response) ? response[0] : response;
|
|
733
|
+
}
|
|
734
|
+
throw new InferenceOutputError(
|
|
735
|
+
"Expected {aggregator: string, answer: string, cells: string[], coordinates: number[][]}"
|
|
736
|
+
);
|
|
737
|
+
}
|
|
293
738
|
};
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
739
|
+
var HFInferenceTokenClassificationTask = class extends HFInferenceTask {
|
|
740
|
+
async getResponse(response) {
|
|
741
|
+
if (Array.isArray(response) && response.every(
|
|
742
|
+
(x) => typeof x.end === "number" && typeof x.entity_group === "string" && typeof x.score === "number" && typeof x.start === "number" && typeof x.word === "string"
|
|
743
|
+
)) {
|
|
744
|
+
return response;
|
|
745
|
+
}
|
|
746
|
+
throw new InferenceOutputError(
|
|
747
|
+
"Expected Array<{end: number, entity_group: string, score: number, start: number, word: string}>"
|
|
748
|
+
);
|
|
749
|
+
}
|
|
298
750
|
};
|
|
299
|
-
var
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
751
|
+
var HFInferenceTranslationTask = class extends HFInferenceTask {
|
|
752
|
+
async getResponse(response) {
|
|
753
|
+
if (Array.isArray(response) && response.every((x) => typeof x?.translation_text === "string")) {
|
|
754
|
+
return response?.length === 1 ? response?.[0] : response;
|
|
755
|
+
}
|
|
756
|
+
throw new InferenceOutputError("Expected Array<{translation_text: string}>");
|
|
757
|
+
}
|
|
304
758
|
};
|
|
305
|
-
var
|
|
306
|
-
|
|
759
|
+
var HFInferenceSummarizationTask = class extends HFInferenceTask {
|
|
760
|
+
async getResponse(response) {
|
|
761
|
+
if (Array.isArray(response) && response.every((x) => typeof x?.summary_text === "string")) {
|
|
762
|
+
return response?.[0];
|
|
763
|
+
}
|
|
764
|
+
throw new InferenceOutputError("Expected Array<{summary_text: string}>");
|
|
765
|
+
}
|
|
307
766
|
};
|
|
308
|
-
var
|
|
309
|
-
|
|
310
|
-
return
|
|
767
|
+
var HFInferenceTextToSpeechTask = class extends HFInferenceTask {
|
|
768
|
+
async getResponse(response) {
|
|
769
|
+
return response;
|
|
311
770
|
}
|
|
312
|
-
|
|
313
|
-
|
|
771
|
+
};
|
|
772
|
+
var HFInferenceTabularClassificationTask = class extends HFInferenceTask {
|
|
773
|
+
async getResponse(response) {
|
|
774
|
+
if (Array.isArray(response) && response.every((x) => typeof x === "number")) {
|
|
775
|
+
return response;
|
|
776
|
+
}
|
|
777
|
+
throw new InferenceOutputError("Expected Array<number>");
|
|
778
|
+
}
|
|
779
|
+
};
|
|
780
|
+
var HFInferenceVisualQuestionAnsweringTask = class extends HFInferenceTask {
|
|
781
|
+
async getResponse(response) {
|
|
782
|
+
if (Array.isArray(response) && response.every(
|
|
783
|
+
(elem) => typeof elem === "object" && !!elem && typeof elem?.answer === "string" && typeof elem.score === "number"
|
|
784
|
+
)) {
|
|
785
|
+
return response[0];
|
|
786
|
+
}
|
|
787
|
+
throw new InferenceOutputError("Expected Array<{answer: string, score: number}>");
|
|
314
788
|
}
|
|
315
|
-
return `${params.baseUrl}/models/${params.model}`;
|
|
316
789
|
};
|
|
317
|
-
var
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
790
|
+
var HFInferenceTabularRegressionTask = class extends HFInferenceTask {
|
|
791
|
+
async getResponse(response) {
|
|
792
|
+
if (Array.isArray(response) && response.every((x) => typeof x === "number")) {
|
|
793
|
+
return response;
|
|
794
|
+
}
|
|
795
|
+
throw new InferenceOutputError("Expected Array<number>");
|
|
796
|
+
}
|
|
797
|
+
};
|
|
798
|
+
var HFInferenceTextToAudioTask = class extends HFInferenceTask {
|
|
799
|
+
async getResponse(response) {
|
|
800
|
+
return response;
|
|
801
|
+
}
|
|
322
802
|
};
|
|
323
803
|
|
|
324
804
|
// src/providers/hyperbolic.ts
|
|
325
805
|
var HYPERBOLIC_API_BASE_URL = "https://api.hyperbolic.xyz";
|
|
326
|
-
var
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
return {
|
|
331
|
-
...params.args,
|
|
332
|
-
...params.task === "text-to-image" ? { model_name: params.model } : { model: params.model }
|
|
333
|
-
};
|
|
334
|
-
};
|
|
335
|
-
var makeHeaders7 = (params) => {
|
|
336
|
-
return { Authorization: `Bearer ${params.accessToken}` };
|
|
806
|
+
var HyperbolicConversationalTask = class extends BaseConversationalTask {
|
|
807
|
+
constructor() {
|
|
808
|
+
super("hyperbolic", HYPERBOLIC_API_BASE_URL);
|
|
809
|
+
}
|
|
337
810
|
};
|
|
338
|
-
var
|
|
339
|
-
|
|
340
|
-
|
|
811
|
+
var HyperbolicTextGenerationTask = class extends BaseTextGenerationTask {
|
|
812
|
+
constructor() {
|
|
813
|
+
super("hyperbolic", HYPERBOLIC_API_BASE_URL);
|
|
814
|
+
}
|
|
815
|
+
makeRoute() {
|
|
816
|
+
return "v1/chat/completions";
|
|
817
|
+
}
|
|
818
|
+
preparePayload(params) {
|
|
819
|
+
return {
|
|
820
|
+
messages: [{ content: params.args.inputs, role: "user" }],
|
|
821
|
+
...params.args.parameters ? {
|
|
822
|
+
max_tokens: params.args.parameters.max_new_tokens,
|
|
823
|
+
...omit(params.args.parameters, "max_new_tokens")
|
|
824
|
+
} : void 0,
|
|
825
|
+
...omit(params.args, ["inputs", "parameters"]),
|
|
826
|
+
model: params.model
|
|
827
|
+
};
|
|
828
|
+
}
|
|
829
|
+
async getResponse(response) {
|
|
830
|
+
if (typeof response === "object" && "choices" in response && Array.isArray(response?.choices) && typeof response?.model === "string") {
|
|
831
|
+
const completion = response.choices[0];
|
|
832
|
+
return {
|
|
833
|
+
generated_text: completion.message.content
|
|
834
|
+
};
|
|
835
|
+
}
|
|
836
|
+
throw new InferenceOutputError("Expected Hyperbolic text generation response format");
|
|
341
837
|
}
|
|
342
|
-
return `${params.baseUrl}/v1/chat/completions`;
|
|
343
838
|
};
|
|
344
|
-
var
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
|
|
839
|
+
var HyperbolicTextToImageTask = class extends TaskProviderHelper {
|
|
840
|
+
constructor() {
|
|
841
|
+
super("hyperbolic", HYPERBOLIC_API_BASE_URL);
|
|
842
|
+
}
|
|
843
|
+
makeRoute(params) {
|
|
844
|
+
return `/v1/images/generations`;
|
|
845
|
+
}
|
|
846
|
+
preparePayload(params) {
|
|
847
|
+
return {
|
|
848
|
+
...omit(params.args, ["inputs", "parameters"]),
|
|
849
|
+
...params.args.parameters,
|
|
850
|
+
prompt: params.args.inputs,
|
|
851
|
+
model_name: params.model
|
|
852
|
+
};
|
|
853
|
+
}
|
|
854
|
+
async getResponse(response, url, headers, outputType) {
|
|
855
|
+
if (typeof response === "object" && "images" in response && Array.isArray(response.images) && response.images[0] && typeof response.images[0].image === "string") {
|
|
856
|
+
if (outputType === "url") {
|
|
857
|
+
return `data:image/jpeg;base64,${response.images[0].image}`;
|
|
858
|
+
}
|
|
859
|
+
return fetch(`data:image/jpeg;base64,${response.images[0].image}`).then((res) => res.blob());
|
|
860
|
+
}
|
|
861
|
+
throw new InferenceOutputError("Expected Hyperbolic text-to-image response format");
|
|
862
|
+
}
|
|
349
863
|
};
|
|
350
864
|
|
|
351
865
|
// src/providers/nebius.ts
|
|
352
866
|
var NEBIUS_API_BASE_URL = "https://api.studio.nebius.ai";
|
|
353
|
-
var
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
return {
|
|
358
|
-
...params.args,
|
|
359
|
-
model: params.model
|
|
360
|
-
};
|
|
867
|
+
var NebiusConversationalTask = class extends BaseConversationalTask {
|
|
868
|
+
constructor() {
|
|
869
|
+
super("nebius", NEBIUS_API_BASE_URL);
|
|
870
|
+
}
|
|
361
871
|
};
|
|
362
|
-
var
|
|
363
|
-
|
|
872
|
+
var NebiusTextGenerationTask = class extends BaseTextGenerationTask {
|
|
873
|
+
constructor() {
|
|
874
|
+
super("nebius", NEBIUS_API_BASE_URL);
|
|
875
|
+
}
|
|
364
876
|
};
|
|
365
|
-
var
|
|
366
|
-
|
|
367
|
-
|
|
877
|
+
var NebiusTextToImageTask = class extends TaskProviderHelper {
|
|
878
|
+
constructor() {
|
|
879
|
+
super("nebius", NEBIUS_API_BASE_URL);
|
|
368
880
|
}
|
|
369
|
-
|
|
370
|
-
return
|
|
881
|
+
preparePayload(params) {
|
|
882
|
+
return {
|
|
883
|
+
...omit(params.args, ["inputs", "parameters"]),
|
|
884
|
+
...params.args.parameters,
|
|
885
|
+
response_format: "b64_json",
|
|
886
|
+
prompt: params.args.inputs,
|
|
887
|
+
model: params.model
|
|
888
|
+
};
|
|
371
889
|
}
|
|
372
|
-
|
|
373
|
-
return
|
|
890
|
+
makeRoute(params) {
|
|
891
|
+
return "v1/images/generations";
|
|
892
|
+
}
|
|
893
|
+
async getResponse(response, url, headers, outputType) {
|
|
894
|
+
if (typeof response === "object" && "data" in response && Array.isArray(response.data) && response.data.length > 0 && "b64_json" in response.data[0] && typeof response.data[0].b64_json === "string") {
|
|
895
|
+
const base64Data = response.data[0].b64_json;
|
|
896
|
+
if (outputType === "url") {
|
|
897
|
+
return `data:image/jpeg;base64,${base64Data}`;
|
|
898
|
+
}
|
|
899
|
+
return fetch(`data:image/jpeg;base64,${base64Data}`).then((res) => res.blob());
|
|
900
|
+
}
|
|
901
|
+
throw new InferenceOutputError("Expected Nebius text-to-image response format");
|
|
374
902
|
}
|
|
375
|
-
return params.baseUrl;
|
|
376
|
-
};
|
|
377
|
-
var NEBIUS_CONFIG = {
|
|
378
|
-
makeBaseUrl: makeBaseUrl8,
|
|
379
|
-
makeBody: makeBody8,
|
|
380
|
-
makeHeaders: makeHeaders8,
|
|
381
|
-
makeUrl: makeUrl8
|
|
382
903
|
};
|
|
383
904
|
|
|
384
905
|
// src/providers/novita.ts
|
|
385
906
|
var NOVITA_API_BASE_URL = "https://api.novita.ai";
|
|
386
|
-
var
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
|
|
393
|
-
};
|
|
394
|
-
};
|
|
395
|
-
var makeHeaders9 = (params) => {
|
|
396
|
-
return { Authorization: `Bearer ${params.accessToken}` };
|
|
907
|
+
var NovitaTextGenerationTask = class extends BaseTextGenerationTask {
|
|
908
|
+
constructor() {
|
|
909
|
+
super("novita", NOVITA_API_BASE_URL);
|
|
910
|
+
}
|
|
911
|
+
makeRoute() {
|
|
912
|
+
return "/v3/openai/chat/completions";
|
|
913
|
+
}
|
|
397
914
|
};
|
|
398
|
-
var
|
|
399
|
-
|
|
400
|
-
|
|
401
|
-
}
|
|
402
|
-
|
|
403
|
-
|
|
404
|
-
return `${params.baseUrl}/v3/hf/${params.model}`;
|
|
915
|
+
var NovitaConversationalTask = class extends BaseConversationalTask {
|
|
916
|
+
constructor() {
|
|
917
|
+
super("novita", NOVITA_API_BASE_URL);
|
|
918
|
+
}
|
|
919
|
+
makeRoute() {
|
|
920
|
+
return "/v3/openai/chat/completions";
|
|
405
921
|
}
|
|
406
|
-
return params.baseUrl;
|
|
407
922
|
};
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
|
|
411
|
-
|
|
412
|
-
|
|
923
|
+
|
|
924
|
+
// src/providers/openai.ts
|
|
925
|
+
var OPENAI_API_BASE_URL = "https://api.openai.com";
|
|
926
|
+
var OpenAIConversationalTask = class extends BaseConversationalTask {
|
|
927
|
+
constructor() {
|
|
928
|
+
super("openai", OPENAI_API_BASE_URL, true);
|
|
929
|
+
}
|
|
413
930
|
};
|
|
414
931
|
|
|
415
932
|
// src/providers/replicate.ts
|
|
416
|
-
var
|
|
417
|
-
|
|
418
|
-
|
|
419
|
-
}
|
|
420
|
-
|
|
421
|
-
|
|
422
|
-
|
|
423
|
-
|
|
424
|
-
|
|
933
|
+
var ReplicateTask = class extends TaskProviderHelper {
|
|
934
|
+
constructor(url) {
|
|
935
|
+
super("replicate", url || "https://api.replicate.com");
|
|
936
|
+
}
|
|
937
|
+
makeRoute(params) {
|
|
938
|
+
if (params.model.includes(":")) {
|
|
939
|
+
return "v1/predictions";
|
|
940
|
+
}
|
|
941
|
+
return `v1/models/${params.model}/predictions`;
|
|
942
|
+
}
|
|
943
|
+
preparePayload(params) {
|
|
944
|
+
return {
|
|
945
|
+
input: {
|
|
946
|
+
...omit(params.args, ["inputs", "parameters"]),
|
|
947
|
+
...params.args.parameters,
|
|
948
|
+
prompt: params.args.inputs
|
|
949
|
+
},
|
|
950
|
+
version: params.model.includes(":") ? params.model.split(":")[1] : void 0
|
|
951
|
+
};
|
|
952
|
+
}
|
|
953
|
+
prepareHeaders(params, binary) {
|
|
954
|
+
const headers = { Authorization: `Bearer ${params.accessToken}`, Prefer: "wait" };
|
|
955
|
+
if (!binary) {
|
|
956
|
+
headers["Content-Type"] = "application/json";
|
|
957
|
+
}
|
|
958
|
+
return headers;
|
|
959
|
+
}
|
|
960
|
+
makeUrl(params) {
|
|
961
|
+
const baseUrl = this.makeBaseUrl(params);
|
|
962
|
+
if (params.model.includes(":")) {
|
|
963
|
+
return `${baseUrl}/v1/predictions`;
|
|
964
|
+
}
|
|
965
|
+
return `${baseUrl}/v1/models/${params.model}/predictions`;
|
|
966
|
+
}
|
|
425
967
|
};
|
|
426
|
-
var
|
|
427
|
-
|
|
968
|
+
var ReplicateTextToImageTask = class extends ReplicateTask {
|
|
969
|
+
async getResponse(res, url, headers, outputType) {
|
|
970
|
+
if (typeof res === "object" && "output" in res && Array.isArray(res.output) && res.output.length > 0 && typeof res.output[0] === "string") {
|
|
971
|
+
if (outputType === "url") {
|
|
972
|
+
return res.output[0];
|
|
973
|
+
}
|
|
974
|
+
const urlResponse = await fetch(res.output[0]);
|
|
975
|
+
return await urlResponse.blob();
|
|
976
|
+
}
|
|
977
|
+
throw new InferenceOutputError("Expected Replicate text-to-image response format");
|
|
978
|
+
}
|
|
428
979
|
};
|
|
429
|
-
var
|
|
430
|
-
|
|
431
|
-
|
|
980
|
+
var ReplicateTextToSpeechTask = class extends ReplicateTask {
|
|
981
|
+
preparePayload(params) {
|
|
982
|
+
const payload = super.preparePayload(params);
|
|
983
|
+
const input = payload["input"];
|
|
984
|
+
if (typeof input === "object" && input !== null && "prompt" in input) {
|
|
985
|
+
const inputObj = input;
|
|
986
|
+
inputObj["text"] = inputObj["prompt"];
|
|
987
|
+
delete inputObj["prompt"];
|
|
988
|
+
}
|
|
989
|
+
return payload;
|
|
990
|
+
}
|
|
991
|
+
async getResponse(response) {
|
|
992
|
+
if (response instanceof Blob) {
|
|
993
|
+
return response;
|
|
994
|
+
}
|
|
995
|
+
if (response && typeof response === "object") {
|
|
996
|
+
if ("output" in response) {
|
|
997
|
+
if (typeof response.output === "string") {
|
|
998
|
+
const urlResponse = await fetch(response.output);
|
|
999
|
+
return await urlResponse.blob();
|
|
1000
|
+
} else if (Array.isArray(response.output)) {
|
|
1001
|
+
const urlResponse = await fetch(response.output[0]);
|
|
1002
|
+
return await urlResponse.blob();
|
|
1003
|
+
}
|
|
1004
|
+
}
|
|
1005
|
+
}
|
|
1006
|
+
throw new InferenceOutputError("Expected Blob or object with output");
|
|
432
1007
|
}
|
|
433
|
-
return `${params.baseUrl}/v1/models/${params.model}/predictions`;
|
|
434
1008
|
};
|
|
435
|
-
var
|
|
436
|
-
|
|
437
|
-
|
|
438
|
-
|
|
439
|
-
|
|
1009
|
+
var ReplicateTextToVideoTask = class extends ReplicateTask {
|
|
1010
|
+
async getResponse(response) {
|
|
1011
|
+
if (typeof response === "object" && !!response && "output" in response && typeof response.output === "string" && isUrl(response.output)) {
|
|
1012
|
+
const urlResponse = await fetch(response.output);
|
|
1013
|
+
return await urlResponse.blob();
|
|
1014
|
+
}
|
|
1015
|
+
throw new InferenceOutputError("Expected { output: string }");
|
|
1016
|
+
}
|
|
440
1017
|
};
|
|
441
1018
|
|
|
442
1019
|
// src/providers/sambanova.ts
|
|
443
|
-
var
|
|
444
|
-
|
|
445
|
-
|
|
446
|
-
};
|
|
447
|
-
var makeBody11 = (params) => {
|
|
448
|
-
return {
|
|
449
|
-
...params.args,
|
|
450
|
-
...params.chatCompletion ? { model: params.model } : void 0
|
|
451
|
-
};
|
|
452
|
-
};
|
|
453
|
-
var makeHeaders11 = (params) => {
|
|
454
|
-
return { Authorization: `Bearer ${params.accessToken}` };
|
|
455
|
-
};
|
|
456
|
-
var makeUrl11 = (params) => {
|
|
457
|
-
if (params.chatCompletion) {
|
|
458
|
-
return `${params.baseUrl}/v1/chat/completions`;
|
|
1020
|
+
var SambanovaConversationalTask = class extends BaseConversationalTask {
|
|
1021
|
+
constructor() {
|
|
1022
|
+
super("sambanova", "https://api.sambanova.ai");
|
|
459
1023
|
}
|
|
460
|
-
return params.baseUrl;
|
|
461
|
-
};
|
|
462
|
-
var SAMBANOVA_CONFIG = {
|
|
463
|
-
makeBaseUrl: makeBaseUrl11,
|
|
464
|
-
makeBody: makeBody11,
|
|
465
|
-
makeHeaders: makeHeaders11,
|
|
466
|
-
makeUrl: makeUrl11
|
|
467
1024
|
};
|
|
468
1025
|
|
|
469
1026
|
// src/providers/together.ts
|
|
470
1027
|
var TOGETHER_API_BASE_URL = "https://api.together.xyz";
|
|
471
|
-
var
|
|
472
|
-
|
|
473
|
-
|
|
474
|
-
|
|
475
|
-
return {
|
|
476
|
-
...params.args,
|
|
477
|
-
model: params.model
|
|
478
|
-
};
|
|
479
|
-
};
|
|
480
|
-
var makeHeaders12 = (params) => {
|
|
481
|
-
return { Authorization: `Bearer ${params.accessToken}` };
|
|
1028
|
+
var TogetherConversationalTask = class extends BaseConversationalTask {
|
|
1029
|
+
constructor() {
|
|
1030
|
+
super("together", TOGETHER_API_BASE_URL);
|
|
1031
|
+
}
|
|
482
1032
|
};
|
|
483
|
-
var
|
|
484
|
-
|
|
485
|
-
|
|
1033
|
+
var TogetherTextGenerationTask = class extends BaseTextGenerationTask {
|
|
1034
|
+
constructor() {
|
|
1035
|
+
super("together", TOGETHER_API_BASE_URL);
|
|
486
1036
|
}
|
|
487
|
-
|
|
488
|
-
return
|
|
1037
|
+
preparePayload(params) {
|
|
1038
|
+
return {
|
|
1039
|
+
model: params.model,
|
|
1040
|
+
...params.args,
|
|
1041
|
+
prompt: params.args.inputs
|
|
1042
|
+
};
|
|
489
1043
|
}
|
|
490
|
-
|
|
491
|
-
|
|
1044
|
+
async getResponse(response) {
|
|
1045
|
+
if (typeof response === "object" && "choices" in response && Array.isArray(response?.choices) && typeof response?.model === "string") {
|
|
1046
|
+
const completion = response.choices[0];
|
|
1047
|
+
return {
|
|
1048
|
+
generated_text: completion.text
|
|
1049
|
+
};
|
|
1050
|
+
}
|
|
1051
|
+
throw new InferenceOutputError("Expected Together text generation response format");
|
|
492
1052
|
}
|
|
493
|
-
return params.baseUrl;
|
|
494
1053
|
};
|
|
495
|
-
var
|
|
496
|
-
|
|
497
|
-
|
|
498
|
-
|
|
499
|
-
|
|
1054
|
+
var TogetherTextToImageTask = class extends TaskProviderHelper {
|
|
1055
|
+
constructor() {
|
|
1056
|
+
super("together", TOGETHER_API_BASE_URL);
|
|
1057
|
+
}
|
|
1058
|
+
makeRoute() {
|
|
1059
|
+
return "v1/images/generations";
|
|
1060
|
+
}
|
|
1061
|
+
preparePayload(params) {
|
|
1062
|
+
return {
|
|
1063
|
+
...omit(params.args, ["inputs", "parameters"]),
|
|
1064
|
+
...params.args.parameters,
|
|
1065
|
+
prompt: params.args.inputs,
|
|
1066
|
+
response_format: "base64",
|
|
1067
|
+
model: params.model
|
|
1068
|
+
};
|
|
1069
|
+
}
|
|
1070
|
+
async getResponse(response, outputType) {
|
|
1071
|
+
if (typeof response === "object" && "data" in response && Array.isArray(response.data) && response.data.length > 0 && "b64_json" in response.data[0] && typeof response.data[0].b64_json === "string") {
|
|
1072
|
+
const base64Data = response.data[0].b64_json;
|
|
1073
|
+
if (outputType === "url") {
|
|
1074
|
+
return `data:image/jpeg;base64,${base64Data}`;
|
|
1075
|
+
}
|
|
1076
|
+
return fetch(`data:image/jpeg;base64,${base64Data}`).then((res) => res.blob());
|
|
1077
|
+
}
|
|
1078
|
+
throw new InferenceOutputError("Expected Together text-to-image response format");
|
|
1079
|
+
}
|
|
500
1080
|
};
|
|
501
1081
|
|
|
502
|
-
// src/
|
|
503
|
-
var
|
|
504
|
-
|
|
505
|
-
|
|
506
|
-
}
|
|
507
|
-
|
|
508
|
-
|
|
509
|
-
|
|
1082
|
+
// src/lib/getProviderHelper.ts
|
|
1083
|
+
var PROVIDERS = {
|
|
1084
|
+
"black-forest-labs": {
|
|
1085
|
+
"text-to-image": new BlackForestLabsTextToImageTask()
|
|
1086
|
+
},
|
|
1087
|
+
cerebras: {
|
|
1088
|
+
conversational: new CerebrasConversationalTask()
|
|
1089
|
+
},
|
|
1090
|
+
cohere: {
|
|
1091
|
+
conversational: new CohereConversationalTask()
|
|
1092
|
+
},
|
|
1093
|
+
"fal-ai": {
|
|
1094
|
+
"text-to-image": new FalAITextToImageTask(),
|
|
1095
|
+
"text-to-speech": new FalAITextToSpeechTask(),
|
|
1096
|
+
"text-to-video": new FalAITextToVideoTask(),
|
|
1097
|
+
"automatic-speech-recognition": new FalAIAutomaticSpeechRecognitionTask()
|
|
1098
|
+
},
|
|
1099
|
+
"hf-inference": {
|
|
1100
|
+
"text-to-image": new HFInferenceTextToImageTask(),
|
|
1101
|
+
conversational: new HFInferenceConversationalTask(),
|
|
1102
|
+
"text-generation": new HFInferenceTextGenerationTask(),
|
|
1103
|
+
"text-classification": new HFInferenceTextClassificationTask(),
|
|
1104
|
+
"question-answering": new HFInferenceQuestionAnsweringTask(),
|
|
1105
|
+
"audio-classification": new HFInferenceAudioClassificationTask(),
|
|
1106
|
+
"automatic-speech-recognition": new HFInferenceAutomaticSpeechRecognitionTask(),
|
|
1107
|
+
"fill-mask": new HFInferenceFillMaskTask(),
|
|
1108
|
+
"feature-extraction": new HFInferenceFeatureExtractionTask(),
|
|
1109
|
+
"image-classification": new HFInferenceImageClassificationTask(),
|
|
1110
|
+
"image-segmentation": new HFInferenceImageSegmentationTask(),
|
|
1111
|
+
"document-question-answering": new HFInferenceDocumentQuestionAnsweringTask(),
|
|
1112
|
+
"image-to-text": new HFInferenceImageToTextTask(),
|
|
1113
|
+
"object-detection": new HFInferenceObjectDetectionTask(),
|
|
1114
|
+
"audio-to-audio": new HFInferenceAudioToAudioTask(),
|
|
1115
|
+
"zero-shot-image-classification": new HFInferenceZeroShotImageClassificationTask(),
|
|
1116
|
+
"zero-shot-classification": new HFInferenceZeroShotClassificationTask(),
|
|
1117
|
+
"image-to-image": new HFInferenceImageToImageTask(),
|
|
1118
|
+
"sentence-similarity": new HFInferenceSentenceSimilarityTask(),
|
|
1119
|
+
"table-question-answering": new HFInferenceTableQuestionAnsweringTask(),
|
|
1120
|
+
"tabular-classification": new HFInferenceTabularClassificationTask(),
|
|
1121
|
+
"text-to-speech": new HFInferenceTextToSpeechTask(),
|
|
1122
|
+
"token-classification": new HFInferenceTokenClassificationTask(),
|
|
1123
|
+
translation: new HFInferenceTranslationTask(),
|
|
1124
|
+
summarization: new HFInferenceSummarizationTask(),
|
|
1125
|
+
"visual-question-answering": new HFInferenceVisualQuestionAnsweringTask(),
|
|
1126
|
+
"tabular-regression": new HFInferenceTabularRegressionTask(),
|
|
1127
|
+
"text-to-audio": new HFInferenceTextToAudioTask()
|
|
1128
|
+
},
|
|
1129
|
+
"fireworks-ai": {
|
|
1130
|
+
conversational: new FireworksConversationalTask()
|
|
1131
|
+
},
|
|
1132
|
+
hyperbolic: {
|
|
1133
|
+
"text-to-image": new HyperbolicTextToImageTask(),
|
|
1134
|
+
conversational: new HyperbolicConversationalTask(),
|
|
1135
|
+
"text-generation": new HyperbolicTextGenerationTask()
|
|
1136
|
+
},
|
|
1137
|
+
nebius: {
|
|
1138
|
+
"text-to-image": new NebiusTextToImageTask(),
|
|
1139
|
+
conversational: new NebiusConversationalTask(),
|
|
1140
|
+
"text-generation": new NebiusTextGenerationTask()
|
|
1141
|
+
},
|
|
1142
|
+
novita: {
|
|
1143
|
+
conversational: new NovitaConversationalTask(),
|
|
1144
|
+
"text-generation": new NovitaTextGenerationTask()
|
|
1145
|
+
},
|
|
1146
|
+
openai: {
|
|
1147
|
+
conversational: new OpenAIConversationalTask()
|
|
1148
|
+
},
|
|
1149
|
+
replicate: {
|
|
1150
|
+
"text-to-image": new ReplicateTextToImageTask(),
|
|
1151
|
+
"text-to-speech": new ReplicateTextToSpeechTask(),
|
|
1152
|
+
"text-to-video": new ReplicateTextToVideoTask()
|
|
1153
|
+
},
|
|
1154
|
+
sambanova: {
|
|
1155
|
+
conversational: new SambanovaConversationalTask()
|
|
1156
|
+
},
|
|
1157
|
+
together: {
|
|
1158
|
+
"text-to-image": new TogetherTextToImageTask(),
|
|
1159
|
+
conversational: new TogetherConversationalTask(),
|
|
1160
|
+
"text-generation": new TogetherTextGenerationTask()
|
|
510
1161
|
}
|
|
511
|
-
return {
|
|
512
|
-
...params.args,
|
|
513
|
-
model: params.model
|
|
514
|
-
};
|
|
515
1162
|
};
|
|
516
|
-
|
|
517
|
-
|
|
518
|
-
|
|
519
|
-
|
|
520
|
-
|
|
521
|
-
throw new Error("OpenAI only supports chat completions.");
|
|
1163
|
+
function getProviderHelper(provider, task) {
|
|
1164
|
+
if (provider === "hf-inference") {
|
|
1165
|
+
if (!task) {
|
|
1166
|
+
return new HFInferenceTask();
|
|
1167
|
+
}
|
|
522
1168
|
}
|
|
523
|
-
|
|
524
|
-
|
|
525
|
-
|
|
526
|
-
|
|
527
|
-
|
|
528
|
-
|
|
529
|
-
|
|
530
|
-
|
|
531
|
-
|
|
532
|
-
|
|
533
|
-
|
|
534
|
-
|
|
535
|
-
|
|
1169
|
+
if (!task) {
|
|
1170
|
+
throw new Error("you need to provide a task name when using an external provider, e.g. 'text-to-image'");
|
|
1171
|
+
}
|
|
1172
|
+
if (!(provider in PROVIDERS)) {
|
|
1173
|
+
throw new Error(`Provider '${provider}' not supported. Available providers: ${Object.keys(PROVIDERS)}`);
|
|
1174
|
+
}
|
|
1175
|
+
const providerTasks = PROVIDERS[provider];
|
|
1176
|
+
if (!providerTasks || !(task in providerTasks)) {
|
|
1177
|
+
throw new Error(
|
|
1178
|
+
`Task '${task}' not supported for provider '${provider}'. Available tasks: ${Object.keys(providerTasks ?? {})}`
|
|
1179
|
+
);
|
|
1180
|
+
}
|
|
1181
|
+
return providerTasks[task];
|
|
1182
|
+
}
|
|
536
1183
|
|
|
537
1184
|
// src/providers/consts.ts
|
|
538
1185
|
var HARDCODED_MODEL_ID_MAPPING = {
|
|
@@ -602,28 +1249,11 @@ async function getProviderModelId(params, args, options = {}) {
|
|
|
602
1249
|
}
|
|
603
1250
|
|
|
604
1251
|
// src/lib/makeRequestOptions.ts
|
|
605
|
-
var HF_HUB_INFERENCE_PROXY_TEMPLATE = `${HF_ROUTER_URL}/{{PROVIDER}}`;
|
|
606
1252
|
var tasks = null;
|
|
607
|
-
var providerConfigs = {
|
|
608
|
-
"black-forest-labs": BLACK_FOREST_LABS_CONFIG,
|
|
609
|
-
cerebras: CEREBRAS_CONFIG,
|
|
610
|
-
cohere: COHERE_CONFIG,
|
|
611
|
-
"fal-ai": FAL_AI_CONFIG,
|
|
612
|
-
"fireworks-ai": FIREWORKS_AI_CONFIG,
|
|
613
|
-
"hf-inference": HF_INFERENCE_CONFIG,
|
|
614
|
-
hyperbolic: HYPERBOLIC_CONFIG,
|
|
615
|
-
openai: OPENAI_CONFIG,
|
|
616
|
-
nebius: NEBIUS_CONFIG,
|
|
617
|
-
novita: NOVITA_CONFIG,
|
|
618
|
-
replicate: REPLICATE_CONFIG,
|
|
619
|
-
sambanova: SAMBANOVA_CONFIG,
|
|
620
|
-
together: TOGETHER_CONFIG
|
|
621
|
-
};
|
|
622
1253
|
async function makeRequestOptions(args, options) {
|
|
623
1254
|
const { provider: maybeProvider, model: maybeModel } = args;
|
|
624
1255
|
const provider = maybeProvider ?? "hf-inference";
|
|
625
|
-
const
|
|
626
|
-
const { task, chatCompletion: chatCompletion2 } = options ?? {};
|
|
1256
|
+
const { task } = options ?? {};
|
|
627
1257
|
if (args.endpointUrl && provider !== "hf-inference") {
|
|
628
1258
|
throw new Error(`Cannot use endpointUrl with a third-party provider.`);
|
|
629
1259
|
}
|
|
@@ -633,19 +1263,16 @@ async function makeRequestOptions(args, options) {
|
|
|
633
1263
|
if (!maybeModel && !task) {
|
|
634
1264
|
throw new Error("No model provided, and no task has been specified.");
|
|
635
1265
|
}
|
|
636
|
-
|
|
637
|
-
|
|
638
|
-
|
|
639
|
-
if (providerConfig.clientSideRoutingOnly && !maybeModel) {
|
|
1266
|
+
const hfModel = maybeModel ?? await loadDefaultModel(task);
|
|
1267
|
+
const providerHelper = getProviderHelper(provider, task);
|
|
1268
|
+
if (providerHelper.clientSideRoutingOnly && !maybeModel) {
|
|
640
1269
|
throw new Error(`Provider ${provider} requires a model ID to be passed directly.`);
|
|
641
1270
|
}
|
|
642
|
-
const
|
|
643
|
-
const resolvedModel = providerConfig.clientSideRoutingOnly ? (
|
|
1271
|
+
const resolvedModel = providerHelper.clientSideRoutingOnly ? (
|
|
644
1272
|
// eslint-disable-next-line @typescript-eslint/no-non-null-assertion
|
|
645
1273
|
removeProviderPrefix(maybeModel, provider)
|
|
646
1274
|
) : await getProviderModelId({ model: hfModel, provider }, args, {
|
|
647
1275
|
task,
|
|
648
|
-
chatCompletion: chatCompletion2,
|
|
649
1276
|
fetch: options?.fetch
|
|
650
1277
|
});
|
|
651
1278
|
return makeRequestOptionsFromResolvedModel(resolvedModel, args, options);
|
|
@@ -653,10 +1280,10 @@ async function makeRequestOptions(args, options) {
|
|
|
653
1280
|
function makeRequestOptionsFromResolvedModel(resolvedModel, args, options) {
|
|
654
1281
|
const { accessToken, endpointUrl, provider: maybeProvider, model, ...remainingArgs } = args;
|
|
655
1282
|
const provider = maybeProvider ?? "hf-inference";
|
|
656
|
-
const
|
|
657
|
-
const
|
|
1283
|
+
const { includeCredentials, task, signal, billTo } = options ?? {};
|
|
1284
|
+
const providerHelper = getProviderHelper(provider, task);
|
|
658
1285
|
const authMethod = (() => {
|
|
659
|
-
if (
|
|
1286
|
+
if (providerHelper.clientSideRoutingOnly) {
|
|
660
1287
|
if (accessToken && accessToken.startsWith("hf_")) {
|
|
661
1288
|
throw new Error(`Provider ${provider} is closed-source and does not support HF tokens.`);
|
|
662
1289
|
}
|
|
@@ -670,32 +1297,30 @@ function makeRequestOptionsFromResolvedModel(resolvedModel, args, options) {
|
|
|
670
1297
|
}
|
|
671
1298
|
return "none";
|
|
672
1299
|
})();
|
|
673
|
-
const
|
|
1300
|
+
const modelId = endpointUrl ?? resolvedModel;
|
|
1301
|
+
const url = providerHelper.makeUrl({
|
|
674
1302
|
authMethod,
|
|
675
|
-
|
|
676
|
-
model: resolvedModel,
|
|
677
|
-
chatCompletion: chatCompletion2,
|
|
1303
|
+
model: modelId,
|
|
678
1304
|
task
|
|
679
1305
|
});
|
|
680
|
-
const
|
|
681
|
-
|
|
682
|
-
|
|
683
|
-
|
|
684
|
-
|
|
685
|
-
|
|
686
|
-
|
|
1306
|
+
const headers = providerHelper.prepareHeaders(
|
|
1307
|
+
{
|
|
1308
|
+
accessToken,
|
|
1309
|
+
authMethod
|
|
1310
|
+
},
|
|
1311
|
+
"data" in args && !!args.data
|
|
1312
|
+
);
|
|
1313
|
+
if (billTo) {
|
|
1314
|
+
headers[HF_HEADER_X_BILL_TO] = billTo;
|
|
687
1315
|
}
|
|
688
1316
|
const ownUserAgent = `${name}/${version}`;
|
|
689
1317
|
const userAgent = [ownUserAgent, typeof navigator !== "undefined" ? navigator.userAgent : void 0].filter((x) => x !== void 0).join(" ");
|
|
690
1318
|
headers["User-Agent"] = userAgent;
|
|
691
|
-
const body =
|
|
692
|
-
|
|
693
|
-
|
|
694
|
-
|
|
695
|
-
|
|
696
|
-
chatCompletion: chatCompletion2
|
|
697
|
-
})
|
|
698
|
-
);
|
|
1319
|
+
const body = providerHelper.makeBody({
|
|
1320
|
+
args: remainingArgs,
|
|
1321
|
+
model: resolvedModel,
|
|
1322
|
+
task
|
|
1323
|
+
});
|
|
699
1324
|
let credentials;
|
|
700
1325
|
if (typeof includeCredentials === "string") {
|
|
701
1326
|
credentials = includeCredentials;
|
|
@@ -735,37 +1360,6 @@ function removeProviderPrefix(model, provider) {
|
|
|
735
1360
|
return model.slice(provider.length + 1);
|
|
736
1361
|
}
|
|
737
1362
|
|
|
738
|
-
// src/tasks/custom/request.ts
|
|
739
|
-
async function request(args, options) {
|
|
740
|
-
const { url, info } = await makeRequestOptions(args, options);
|
|
741
|
-
const response = await (options?.fetch ?? fetch)(url, info);
|
|
742
|
-
if (options?.retry_on_error !== false && response.status === 503) {
|
|
743
|
-
return request(args, options);
|
|
744
|
-
}
|
|
745
|
-
if (!response.ok) {
|
|
746
|
-
const contentType = response.headers.get("Content-Type");
|
|
747
|
-
if (["application/json", "application/problem+json"].some((ct) => contentType?.startsWith(ct))) {
|
|
748
|
-
const output = await response.json();
|
|
749
|
-
if ([400, 422, 404, 500].includes(response.status) && options?.chatCompletion) {
|
|
750
|
-
throw new Error(
|
|
751
|
-
`Server ${args.model} does not seem to support chat completion. Error: ${JSON.stringify(output.error)}`
|
|
752
|
-
);
|
|
753
|
-
}
|
|
754
|
-
if (output.error || output.detail) {
|
|
755
|
-
throw new Error(JSON.stringify(output.error ?? output.detail));
|
|
756
|
-
} else {
|
|
757
|
-
throw new Error(output);
|
|
758
|
-
}
|
|
759
|
-
}
|
|
760
|
-
const message = contentType?.startsWith("text/plain;") ? await response.text() : void 0;
|
|
761
|
-
throw new Error(message ?? "An error occurred while fetching the blob");
|
|
762
|
-
}
|
|
763
|
-
if (response.headers.get("Content-Type")?.startsWith("application/json")) {
|
|
764
|
-
return await response.json();
|
|
765
|
-
}
|
|
766
|
-
return await response.blob();
|
|
767
|
-
}
|
|
768
|
-
|
|
769
1363
|
// src/vendor/fetch-event-source/parse.ts
|
|
770
1364
|
function getLines(onLine) {
|
|
771
1365
|
let buffer;
|
|
@@ -865,12 +1459,44 @@ function newMessage() {
|
|
|
865
1459
|
};
|
|
866
1460
|
}
|
|
867
1461
|
|
|
868
|
-
// src/
|
|
869
|
-
async function
|
|
1462
|
+
// src/utils/request.ts
|
|
1463
|
+
async function innerRequest(args, options) {
|
|
1464
|
+
const { url, info } = await makeRequestOptions(args, options);
|
|
1465
|
+
const response = await (options?.fetch ?? fetch)(url, info);
|
|
1466
|
+
const requestContext = { url, info };
|
|
1467
|
+
if (options?.retry_on_error !== false && response.status === 503) {
|
|
1468
|
+
return innerRequest(args, options);
|
|
1469
|
+
}
|
|
1470
|
+
if (!response.ok) {
|
|
1471
|
+
const contentType = response.headers.get("Content-Type");
|
|
1472
|
+
if (["application/json", "application/problem+json"].some((ct) => contentType?.startsWith(ct))) {
|
|
1473
|
+
const output = await response.json();
|
|
1474
|
+
if ([400, 422, 404, 500].includes(response.status) && options?.chatCompletion) {
|
|
1475
|
+
throw new Error(
|
|
1476
|
+
`Server ${args.model} does not seem to support chat completion. Error: ${JSON.stringify(output.error)}`
|
|
1477
|
+
);
|
|
1478
|
+
}
|
|
1479
|
+
if (output.error || output.detail) {
|
|
1480
|
+
throw new Error(JSON.stringify(output.error ?? output.detail));
|
|
1481
|
+
} else {
|
|
1482
|
+
throw new Error(output);
|
|
1483
|
+
}
|
|
1484
|
+
}
|
|
1485
|
+
const message = contentType?.startsWith("text/plain;") ? await response.text() : void 0;
|
|
1486
|
+
throw new Error(message ?? "An error occurred while fetching the blob");
|
|
1487
|
+
}
|
|
1488
|
+
if (response.headers.get("Content-Type")?.startsWith("application/json")) {
|
|
1489
|
+
const data = await response.json();
|
|
1490
|
+
return { data, requestContext };
|
|
1491
|
+
}
|
|
1492
|
+
const blob = await response.blob();
|
|
1493
|
+
return { data: blob, requestContext };
|
|
1494
|
+
}
|
|
1495
|
+
async function* innerStreamingRequest(args, options) {
|
|
870
1496
|
const { url, info } = await makeRequestOptions({ ...args, stream: true }, options);
|
|
871
1497
|
const response = await (options?.fetch ?? fetch)(url, info);
|
|
872
1498
|
if (options?.retry_on_error !== false && response.status === 503) {
|
|
873
|
-
return yield*
|
|
1499
|
+
return yield* innerStreamingRequest(args, options);
|
|
874
1500
|
}
|
|
875
1501
|
if (!response.ok) {
|
|
876
1502
|
if (response.headers.get("Content-Type")?.startsWith("application/json")) {
|
|
@@ -884,6 +1510,9 @@ async function* streamingRequest(args, options) {
|
|
|
884
1510
|
if (output.error && "message" in output.error && typeof output.error.message === "string") {
|
|
885
1511
|
throw new Error(output.error.message);
|
|
886
1512
|
}
|
|
1513
|
+
if (typeof output.message === "string") {
|
|
1514
|
+
throw new Error(output.message);
|
|
1515
|
+
}
|
|
887
1516
|
}
|
|
888
1517
|
throw new Error(`Server response contains error: ${response.status}`);
|
|
889
1518
|
}
|
|
@@ -936,28 +1565,21 @@ async function* streamingRequest(args, options) {
|
|
|
936
1565
|
}
|
|
937
1566
|
}
|
|
938
1567
|
|
|
939
|
-
// src/
|
|
940
|
-
function
|
|
941
|
-
|
|
942
|
-
|
|
943
|
-
...props.map((prop) => {
|
|
944
|
-
if (o[prop] !== void 0) {
|
|
945
|
-
return { [prop]: o[prop] };
|
|
946
|
-
}
|
|
947
|
-
})
|
|
1568
|
+
// src/tasks/custom/request.ts
|
|
1569
|
+
async function request(args, options) {
|
|
1570
|
+
console.warn(
|
|
1571
|
+
"The request method is deprecated and will be removed in a future version of huggingface.js. Use specific task functions instead."
|
|
948
1572
|
);
|
|
1573
|
+
const result = await innerRequest(args, options);
|
|
1574
|
+
return result.data;
|
|
949
1575
|
}
|
|
950
1576
|
|
|
951
|
-
// src/
|
|
952
|
-
function
|
|
953
|
-
|
|
954
|
-
|
|
955
|
-
|
|
956
|
-
|
|
957
|
-
function omit(o, props) {
|
|
958
|
-
const propsArr = Array.isArray(props) ? props : [props];
|
|
959
|
-
const letsKeep = Object.keys(o).filter((prop) => !typedInclude(propsArr, prop));
|
|
960
|
-
return pick(o, letsKeep);
|
|
1577
|
+
// src/tasks/custom/streamingRequest.ts
|
|
1578
|
+
async function* streamingRequest(args, options) {
|
|
1579
|
+
console.warn(
|
|
1580
|
+
"The streamingRequest method is deprecated and will be removed in a future version of huggingface.js. Use specific task functions instead."
|
|
1581
|
+
);
|
|
1582
|
+
yield* innerStreamingRequest(args, options);
|
|
961
1583
|
}
|
|
962
1584
|
|
|
963
1585
|
// src/tasks/audio/utils.ts
|
|
@@ -970,16 +1592,24 @@ function preparePayload(args) {
|
|
|
970
1592
|
|
|
971
1593
|
// src/tasks/audio/audioClassification.ts
|
|
972
1594
|
async function audioClassification(args, options) {
|
|
1595
|
+
const providerHelper = getProviderHelper(args.provider ?? "hf-inference", "audio-classification");
|
|
973
1596
|
const payload = preparePayload(args);
|
|
974
|
-
const res = await
|
|
1597
|
+
const { data: res } = await innerRequest(payload, {
|
|
975
1598
|
...options,
|
|
976
1599
|
task: "audio-classification"
|
|
977
1600
|
});
|
|
978
|
-
|
|
979
|
-
|
|
980
|
-
|
|
981
|
-
|
|
982
|
-
|
|
1601
|
+
return providerHelper.getResponse(res);
|
|
1602
|
+
}
|
|
1603
|
+
|
|
1604
|
+
// src/tasks/audio/audioToAudio.ts
|
|
1605
|
+
async function audioToAudio(args, options) {
|
|
1606
|
+
const providerHelper = getProviderHelper(args.provider ?? "hf-inference", "audio-to-audio");
|
|
1607
|
+
const payload = preparePayload(args);
|
|
1608
|
+
const { data: res } = await innerRequest(payload, {
|
|
1609
|
+
...options,
|
|
1610
|
+
task: "audio-to-audio"
|
|
1611
|
+
});
|
|
1612
|
+
return providerHelper.getResponse(res);
|
|
983
1613
|
}
|
|
984
1614
|
|
|
985
1615
|
// src/utils/base64FromBytes.ts
|
|
@@ -997,8 +1627,9 @@ function base64FromBytes(arr) {
|
|
|
997
1627
|
|
|
998
1628
|
// src/tasks/audio/automaticSpeechRecognition.ts
|
|
999
1629
|
async function automaticSpeechRecognition(args, options) {
|
|
1630
|
+
const providerHelper = getProviderHelper(args.provider ?? "hf-inference", "automatic-speech-recognition");
|
|
1000
1631
|
const payload = await buildPayload(args);
|
|
1001
|
-
const res = await
|
|
1632
|
+
const { data: res } = await innerRequest(payload, {
|
|
1002
1633
|
...options,
|
|
1003
1634
|
task: "automatic-speech-recognition"
|
|
1004
1635
|
});
|
|
@@ -1006,9 +1637,8 @@ async function automaticSpeechRecognition(args, options) {
|
|
|
1006
1637
|
if (!isValidOutput) {
|
|
1007
1638
|
throw new InferenceOutputError("Expected {text: string}");
|
|
1008
1639
|
}
|
|
1009
|
-
return res;
|
|
1640
|
+
return providerHelper.getResponse(res);
|
|
1010
1641
|
}
|
|
1011
|
-
var FAL_AI_SUPPORTED_BLOB_TYPES = ["audio/mpeg", "audio/mp4", "audio/wav", "audio/x-wav"];
|
|
1012
1642
|
async function buildPayload(args) {
|
|
1013
1643
|
if (args.provider === "fal-ai") {
|
|
1014
1644
|
const blob = "data" in args && args.data instanceof Blob ? args.data : "inputs" in args ? args.inputs : void 0;
|
|
@@ -1033,57 +1663,17 @@ async function buildPayload(args) {
|
|
|
1033
1663
|
} else {
|
|
1034
1664
|
return preparePayload(args);
|
|
1035
1665
|
}
|
|
1036
|
-
}
|
|
1037
|
-
|
|
1038
|
-
// src/tasks/audio/textToSpeech.ts
|
|
1039
|
-
async function textToSpeech(args, options) {
|
|
1040
|
-
const
|
|
1041
|
-
|
|
1042
|
-
|
|
1043
|
-
text: args.inputs
|
|
1044
|
-
} : args;
|
|
1045
|
-
const res = await request(payload, {
|
|
1046
|
-
...options,
|
|
1047
|
-
task: "text-to-speech"
|
|
1048
|
-
});
|
|
1049
|
-
if (res instanceof Blob) {
|
|
1050
|
-
return res;
|
|
1051
|
-
}
|
|
1052
|
-
if (res && typeof res === "object") {
|
|
1053
|
-
if ("output" in res) {
|
|
1054
|
-
if (typeof res.output === "string") {
|
|
1055
|
-
const urlResponse = await fetch(res.output);
|
|
1056
|
-
const blob = await urlResponse.blob();
|
|
1057
|
-
return blob;
|
|
1058
|
-
} else if (Array.isArray(res.output)) {
|
|
1059
|
-
const urlResponse = await fetch(res.output[0]);
|
|
1060
|
-
const blob = await urlResponse.blob();
|
|
1061
|
-
return blob;
|
|
1062
|
-
}
|
|
1063
|
-
}
|
|
1064
|
-
}
|
|
1065
|
-
throw new InferenceOutputError("Expected Blob or object with output");
|
|
1066
|
-
}
|
|
1067
|
-
|
|
1068
|
-
// src/tasks/audio/audioToAudio.ts
|
|
1069
|
-
async function audioToAudio(args, options) {
|
|
1070
|
-
const payload = preparePayload(args);
|
|
1071
|
-
const res = await request(payload, {
|
|
1666
|
+
}
|
|
1667
|
+
|
|
1668
|
+
// src/tasks/audio/textToSpeech.ts
|
|
1669
|
+
async function textToSpeech(args, options) {
|
|
1670
|
+
const provider = args.provider ?? "hf-inference";
|
|
1671
|
+
const providerHelper = getProviderHelper(provider, "text-to-speech");
|
|
1672
|
+
const { data: res } = await innerRequest(args, {
|
|
1072
1673
|
...options,
|
|
1073
|
-
task: "
|
|
1674
|
+
task: "text-to-speech"
|
|
1074
1675
|
});
|
|
1075
|
-
return
|
|
1076
|
-
}
|
|
1077
|
-
function validateOutput(output) {
|
|
1078
|
-
if (!Array.isArray(output)) {
|
|
1079
|
-
throw new InferenceOutputError("Expected Array");
|
|
1080
|
-
}
|
|
1081
|
-
if (!output.every((elem) => {
|
|
1082
|
-
return typeof elem === "object" && elem && "label" in elem && typeof elem.label === "string" && "content-type" in elem && typeof elem["content-type"] === "string" && "blob" in elem && typeof elem.blob === "string";
|
|
1083
|
-
})) {
|
|
1084
|
-
throw new InferenceOutputError("Expected Array<{label: string, audio: Blob}>");
|
|
1085
|
-
}
|
|
1086
|
-
return output;
|
|
1676
|
+
return providerHelper.getResponse(res);
|
|
1087
1677
|
}
|
|
1088
1678
|
|
|
1089
1679
|
// src/tasks/cv/utils.ts
|
|
@@ -1093,183 +1683,95 @@ function preparePayload2(args) {
|
|
|
1093
1683
|
|
|
1094
1684
|
// src/tasks/cv/imageClassification.ts
|
|
1095
1685
|
async function imageClassification(args, options) {
|
|
1686
|
+
const providerHelper = getProviderHelper(args.provider ?? "hf-inference", "image-classification");
|
|
1096
1687
|
const payload = preparePayload2(args);
|
|
1097
|
-
const res = await
|
|
1688
|
+
const { data: res } = await innerRequest(payload, {
|
|
1098
1689
|
...options,
|
|
1099
1690
|
task: "image-classification"
|
|
1100
1691
|
});
|
|
1101
|
-
|
|
1102
|
-
if (!isValidOutput) {
|
|
1103
|
-
throw new InferenceOutputError("Expected Array<{label: string, score: number}>");
|
|
1104
|
-
}
|
|
1105
|
-
return res;
|
|
1692
|
+
return providerHelper.getResponse(res);
|
|
1106
1693
|
}
|
|
1107
1694
|
|
|
1108
1695
|
// src/tasks/cv/imageSegmentation.ts
|
|
1109
1696
|
async function imageSegmentation(args, options) {
|
|
1697
|
+
const providerHelper = getProviderHelper(args.provider ?? "hf-inference", "image-segmentation");
|
|
1110
1698
|
const payload = preparePayload2(args);
|
|
1111
|
-
const res = await
|
|
1699
|
+
const { data: res } = await innerRequest(payload, {
|
|
1112
1700
|
...options,
|
|
1113
1701
|
task: "image-segmentation"
|
|
1114
1702
|
});
|
|
1115
|
-
|
|
1116
|
-
|
|
1117
|
-
|
|
1703
|
+
return providerHelper.getResponse(res);
|
|
1704
|
+
}
|
|
1705
|
+
|
|
1706
|
+
// src/tasks/cv/imageToImage.ts
|
|
1707
|
+
async function imageToImage(args, options) {
|
|
1708
|
+
const providerHelper = getProviderHelper(args.provider ?? "hf-inference", "image-to-image");
|
|
1709
|
+
let reqArgs;
|
|
1710
|
+
if (!args.parameters) {
|
|
1711
|
+
reqArgs = {
|
|
1712
|
+
accessToken: args.accessToken,
|
|
1713
|
+
model: args.model,
|
|
1714
|
+
data: args.inputs
|
|
1715
|
+
};
|
|
1716
|
+
} else {
|
|
1717
|
+
reqArgs = {
|
|
1718
|
+
...args,
|
|
1719
|
+
inputs: base64FromBytes(
|
|
1720
|
+
new Uint8Array(args.inputs instanceof ArrayBuffer ? args.inputs : await args.inputs.arrayBuffer())
|
|
1721
|
+
)
|
|
1722
|
+
};
|
|
1118
1723
|
}
|
|
1119
|
-
|
|
1724
|
+
const { data: res } = await innerRequest(reqArgs, {
|
|
1725
|
+
...options,
|
|
1726
|
+
task: "image-to-image"
|
|
1727
|
+
});
|
|
1728
|
+
return providerHelper.getResponse(res);
|
|
1120
1729
|
}
|
|
1121
1730
|
|
|
1122
1731
|
// src/tasks/cv/imageToText.ts
|
|
1123
1732
|
async function imageToText(args, options) {
|
|
1733
|
+
const providerHelper = getProviderHelper(args.provider ?? "hf-inference", "image-to-text");
|
|
1124
1734
|
const payload = preparePayload2(args);
|
|
1125
|
-
const res =
|
|
1735
|
+
const { data: res } = await innerRequest(payload, {
|
|
1126
1736
|
...options,
|
|
1127
1737
|
task: "image-to-text"
|
|
1128
|
-
})
|
|
1129
|
-
|
|
1130
|
-
throw new InferenceOutputError("Expected {generated_text: string}");
|
|
1131
|
-
}
|
|
1132
|
-
return res;
|
|
1738
|
+
});
|
|
1739
|
+
return providerHelper.getResponse(res[0]);
|
|
1133
1740
|
}
|
|
1134
1741
|
|
|
1135
1742
|
// src/tasks/cv/objectDetection.ts
|
|
1136
1743
|
async function objectDetection(args, options) {
|
|
1744
|
+
const providerHelper = getProviderHelper(args.provider ?? "hf-inference", "object-detection");
|
|
1137
1745
|
const payload = preparePayload2(args);
|
|
1138
|
-
const res = await
|
|
1746
|
+
const { data: res } = await innerRequest(payload, {
|
|
1139
1747
|
...options,
|
|
1140
1748
|
task: "object-detection"
|
|
1141
1749
|
});
|
|
1142
|
-
|
|
1143
|
-
(x) => typeof x.label === "string" && typeof x.score === "number" && typeof x.box.xmin === "number" && typeof x.box.ymin === "number" && typeof x.box.xmax === "number" && typeof x.box.ymax === "number"
|
|
1144
|
-
);
|
|
1145
|
-
if (!isValidOutput) {
|
|
1146
|
-
throw new InferenceOutputError(
|
|
1147
|
-
"Expected Array<{label:string; score:number; box:{xmin:number; ymin:number; xmax:number; ymax:number}}>"
|
|
1148
|
-
);
|
|
1149
|
-
}
|
|
1150
|
-
return res;
|
|
1750
|
+
return providerHelper.getResponse(res);
|
|
1151
1751
|
}
|
|
1152
1752
|
|
|
1153
1753
|
// src/tasks/cv/textToImage.ts
|
|
1154
|
-
function getResponseFormatArg(provider) {
|
|
1155
|
-
switch (provider) {
|
|
1156
|
-
case "fal-ai":
|
|
1157
|
-
return { sync_mode: true };
|
|
1158
|
-
case "nebius":
|
|
1159
|
-
return { response_format: "b64_json" };
|
|
1160
|
-
case "replicate":
|
|
1161
|
-
return void 0;
|
|
1162
|
-
case "together":
|
|
1163
|
-
return { response_format: "base64" };
|
|
1164
|
-
default:
|
|
1165
|
-
return void 0;
|
|
1166
|
-
}
|
|
1167
|
-
}
|
|
1168
1754
|
async function textToImage(args, options) {
|
|
1169
|
-
const
|
|
1170
|
-
|
|
1171
|
-
|
|
1172
|
-
...getResponseFormatArg(args.provider),
|
|
1173
|
-
prompt: args.inputs
|
|
1174
|
-
};
|
|
1175
|
-
const res = await request(payload, {
|
|
1755
|
+
const provider = args.provider ?? "hf-inference";
|
|
1756
|
+
const providerHelper = getProviderHelper(provider, "text-to-image");
|
|
1757
|
+
const { data: res } = await innerRequest(args, {
|
|
1176
1758
|
...options,
|
|
1177
1759
|
task: "text-to-image"
|
|
1178
1760
|
});
|
|
1179
|
-
|
|
1180
|
-
|
|
1181
|
-
return await pollBflResponse(res.polling_url, options?.outputType);
|
|
1182
|
-
}
|
|
1183
|
-
if (args.provider === "fal-ai" && "images" in res && Array.isArray(res.images) && res.images[0].url) {
|
|
1184
|
-
if (options?.outputType === "url") {
|
|
1185
|
-
return res.images[0].url;
|
|
1186
|
-
} else {
|
|
1187
|
-
const image = await fetch(res.images[0].url);
|
|
1188
|
-
return await image.blob();
|
|
1189
|
-
}
|
|
1190
|
-
}
|
|
1191
|
-
if (args.provider === "hyperbolic" && "images" in res && Array.isArray(res.images) && res.images[0] && typeof res.images[0].image === "string") {
|
|
1192
|
-
if (options?.outputType === "url") {
|
|
1193
|
-
return `data:image/jpeg;base64,${res.images[0].image}`;
|
|
1194
|
-
}
|
|
1195
|
-
const base64Response = await fetch(`data:image/jpeg;base64,${res.images[0].image}`);
|
|
1196
|
-
return await base64Response.blob();
|
|
1197
|
-
}
|
|
1198
|
-
if ("data" in res && Array.isArray(res.data) && res.data[0].b64_json) {
|
|
1199
|
-
const base64Data = res.data[0].b64_json;
|
|
1200
|
-
if (options?.outputType === "url") {
|
|
1201
|
-
return `data:image/jpeg;base64,${base64Data}`;
|
|
1202
|
-
}
|
|
1203
|
-
const base64Response = await fetch(`data:image/jpeg;base64,${base64Data}`);
|
|
1204
|
-
return await base64Response.blob();
|
|
1205
|
-
}
|
|
1206
|
-
if ("output" in res && Array.isArray(res.output)) {
|
|
1207
|
-
if (options?.outputType === "url") {
|
|
1208
|
-
return res.output[0];
|
|
1209
|
-
}
|
|
1210
|
-
const urlResponse = await fetch(res.output[0]);
|
|
1211
|
-
const blob = await urlResponse.blob();
|
|
1212
|
-
return blob;
|
|
1213
|
-
}
|
|
1214
|
-
}
|
|
1215
|
-
const isValidOutput = res && res instanceof Blob;
|
|
1216
|
-
if (!isValidOutput) {
|
|
1217
|
-
throw new InferenceOutputError("Expected Blob");
|
|
1218
|
-
}
|
|
1219
|
-
if (options?.outputType === "url") {
|
|
1220
|
-
const b64 = await res.arrayBuffer().then((buf) => Buffer.from(buf).toString("base64"));
|
|
1221
|
-
return `data:image/jpeg;base64,${b64}`;
|
|
1222
|
-
}
|
|
1223
|
-
return res;
|
|
1224
|
-
}
|
|
1225
|
-
async function pollBflResponse(url, outputType) {
|
|
1226
|
-
const urlObj = new URL(url);
|
|
1227
|
-
for (let step = 0; step < 5; step++) {
|
|
1228
|
-
await delay(1e3);
|
|
1229
|
-
console.debug(`Polling Black Forest Labs API for the result... ${step + 1}/5`);
|
|
1230
|
-
urlObj.searchParams.set("attempt", step.toString(10));
|
|
1231
|
-
const resp = await fetch(urlObj, { headers: { "Content-Type": "application/json" } });
|
|
1232
|
-
if (!resp.ok) {
|
|
1233
|
-
throw new InferenceOutputError("Failed to fetch result from black forest labs API");
|
|
1234
|
-
}
|
|
1235
|
-
const payload = await resp.json();
|
|
1236
|
-
if (typeof payload === "object" && payload && "status" in payload && typeof payload.status === "string" && payload.status === "Ready" && "result" in payload && typeof payload.result === "object" && payload.result && "sample" in payload.result && typeof payload.result.sample === "string") {
|
|
1237
|
-
if (outputType === "url") {
|
|
1238
|
-
return payload.result.sample;
|
|
1239
|
-
}
|
|
1240
|
-
const image = await fetch(payload.result.sample);
|
|
1241
|
-
return await image.blob();
|
|
1242
|
-
}
|
|
1243
|
-
}
|
|
1244
|
-
throw new InferenceOutputError("Failed to fetch result from black forest labs API");
|
|
1761
|
+
const { url, info } = await makeRequestOptions(args, { ...options, task: "text-to-image" });
|
|
1762
|
+
return providerHelper.getResponse(res, url, info.headers, options?.outputType);
|
|
1245
1763
|
}
|
|
1246
1764
|
|
|
1247
|
-
// src/tasks/cv/
|
|
1248
|
-
async function
|
|
1249
|
-
|
|
1250
|
-
|
|
1251
|
-
|
|
1252
|
-
accessToken: args.accessToken,
|
|
1253
|
-
model: args.model,
|
|
1254
|
-
data: args.inputs
|
|
1255
|
-
};
|
|
1256
|
-
} else {
|
|
1257
|
-
reqArgs = {
|
|
1258
|
-
...args,
|
|
1259
|
-
inputs: base64FromBytes(
|
|
1260
|
-
new Uint8Array(args.inputs instanceof ArrayBuffer ? args.inputs : await args.inputs.arrayBuffer())
|
|
1261
|
-
)
|
|
1262
|
-
};
|
|
1263
|
-
}
|
|
1264
|
-
const res = await request(reqArgs, {
|
|
1765
|
+
// src/tasks/cv/textToVideo.ts
|
|
1766
|
+
async function textToVideo(args, options) {
|
|
1767
|
+
const provider = args.provider ?? "hf-inference";
|
|
1768
|
+
const providerHelper = getProviderHelper(provider, "text-to-video");
|
|
1769
|
+
const { data: response } = await innerRequest(args, {
|
|
1265
1770
|
...options,
|
|
1266
|
-
task: "
|
|
1771
|
+
task: "text-to-video"
|
|
1267
1772
|
});
|
|
1268
|
-
const
|
|
1269
|
-
|
|
1270
|
-
throw new InferenceOutputError("Expected Blob");
|
|
1271
|
-
}
|
|
1272
|
-
return res;
|
|
1773
|
+
const { url, info } = await makeRequestOptions(args, { ...options, task: "text-to-video" });
|
|
1774
|
+
return providerHelper.getResponse(response, url, info.headers);
|
|
1273
1775
|
}
|
|
1274
1776
|
|
|
1275
1777
|
// src/tasks/cv/zeroShotImageClassification.ts
|
|
@@ -1295,235 +1797,117 @@ async function preparePayload3(args) {
|
|
|
1295
1797
|
}
|
|
1296
1798
|
}
|
|
1297
1799
|
async function zeroShotImageClassification(args, options) {
|
|
1800
|
+
const providerHelper = getProviderHelper(args.provider ?? "hf-inference", "zero-shot-image-classification");
|
|
1298
1801
|
const payload = await preparePayload3(args);
|
|
1299
|
-
const res = await
|
|
1802
|
+
const { data: res } = await innerRequest(payload, {
|
|
1300
1803
|
...options,
|
|
1301
1804
|
task: "zero-shot-image-classification"
|
|
1302
1805
|
});
|
|
1303
|
-
|
|
1304
|
-
if (!isValidOutput) {
|
|
1305
|
-
throw new InferenceOutputError("Expected Array<{label: string, score: number}>");
|
|
1306
|
-
}
|
|
1307
|
-
return res;
|
|
1806
|
+
return providerHelper.getResponse(res);
|
|
1308
1807
|
}
|
|
1309
1808
|
|
|
1310
|
-
// src/tasks/
|
|
1311
|
-
|
|
1312
|
-
|
|
1313
|
-
|
|
1314
|
-
throw new Error(
|
|
1315
|
-
`textToVideo inference is only supported for the following providers: ${SUPPORTED_PROVIDERS.join(", ")}`
|
|
1316
|
-
);
|
|
1317
|
-
}
|
|
1318
|
-
const payload = args.provider === "fal-ai" || args.provider === "replicate" || args.provider === "novita" ? { ...omit(args, ["inputs", "parameters"]), ...args.parameters, prompt: args.inputs } : args;
|
|
1319
|
-
const res = await request(payload, {
|
|
1809
|
+
// src/tasks/nlp/chatCompletion.ts
|
|
1810
|
+
async function chatCompletion(args, options) {
|
|
1811
|
+
const providerHelper = getProviderHelper(args.provider ?? "hf-inference", "conversational");
|
|
1812
|
+
const { data: response } = await innerRequest(args, {
|
|
1320
1813
|
...options,
|
|
1321
|
-
task: "
|
|
1814
|
+
task: "conversational"
|
|
1815
|
+
});
|
|
1816
|
+
return providerHelper.getResponse(response);
|
|
1817
|
+
}
|
|
1818
|
+
|
|
1819
|
+
// src/tasks/nlp/chatCompletionStream.ts
|
|
1820
|
+
async function* chatCompletionStream(args, options) {
|
|
1821
|
+
yield* innerStreamingRequest(args, {
|
|
1822
|
+
...options,
|
|
1823
|
+
task: "conversational"
|
|
1322
1824
|
});
|
|
1323
|
-
if (args.provider === "fal-ai") {
|
|
1324
|
-
const { url, info } = await makeRequestOptions(args, { ...options, task: "text-to-video" });
|
|
1325
|
-
return await pollFalResponse(res, url, info.headers);
|
|
1326
|
-
} else if (args.provider === "novita") {
|
|
1327
|
-
const isValidOutput = typeof res === "object" && !!res && "video" in res && typeof res.video === "object" && !!res.video && "video_url" in res.video && typeof res.video.video_url === "string" && isUrl(res.video.video_url);
|
|
1328
|
-
if (!isValidOutput) {
|
|
1329
|
-
throw new InferenceOutputError("Expected { video: { video_url: string } }");
|
|
1330
|
-
}
|
|
1331
|
-
const urlResponse = await fetch(res.video.video_url);
|
|
1332
|
-
return await urlResponse.blob();
|
|
1333
|
-
} else {
|
|
1334
|
-
const isValidOutput = typeof res === "object" && !!res && "output" in res && typeof res.output === "string" && isUrl(res.output);
|
|
1335
|
-
if (!isValidOutput) {
|
|
1336
|
-
throw new InferenceOutputError("Expected { output: string }");
|
|
1337
|
-
}
|
|
1338
|
-
const urlResponse = await fetch(res.output);
|
|
1339
|
-
return await urlResponse.blob();
|
|
1340
|
-
}
|
|
1341
1825
|
}
|
|
1342
1826
|
|
|
1343
1827
|
// src/tasks/nlp/featureExtraction.ts
|
|
1344
1828
|
async function featureExtraction(args, options) {
|
|
1345
|
-
const
|
|
1829
|
+
const providerHelper = getProviderHelper(args.provider ?? "hf-inference", "feature-extraction");
|
|
1830
|
+
const { data: res } = await innerRequest(args, {
|
|
1346
1831
|
...options,
|
|
1347
1832
|
task: "feature-extraction"
|
|
1348
1833
|
});
|
|
1349
|
-
|
|
1350
|
-
const isNumArrayRec = (arr, maxDepth, curDepth = 0) => {
|
|
1351
|
-
if (curDepth > maxDepth)
|
|
1352
|
-
return false;
|
|
1353
|
-
if (arr.every((x) => Array.isArray(x))) {
|
|
1354
|
-
return arr.every((x) => isNumArrayRec(x, maxDepth, curDepth + 1));
|
|
1355
|
-
} else {
|
|
1356
|
-
return arr.every((x) => typeof x === "number");
|
|
1357
|
-
}
|
|
1358
|
-
};
|
|
1359
|
-
isValidOutput = Array.isArray(res) && isNumArrayRec(res, 3, 0);
|
|
1360
|
-
if (!isValidOutput) {
|
|
1361
|
-
throw new InferenceOutputError("Expected Array<number[][][] | number[][] | number[] | number>");
|
|
1362
|
-
}
|
|
1363
|
-
return res;
|
|
1834
|
+
return providerHelper.getResponse(res);
|
|
1364
1835
|
}
|
|
1365
1836
|
|
|
1366
1837
|
// src/tasks/nlp/fillMask.ts
|
|
1367
1838
|
async function fillMask(args, options) {
|
|
1368
|
-
const
|
|
1839
|
+
const providerHelper = getProviderHelper(args.provider ?? "hf-inference", "fill-mask");
|
|
1840
|
+
const { data: res } = await innerRequest(args, {
|
|
1369
1841
|
...options,
|
|
1370
1842
|
task: "fill-mask"
|
|
1371
1843
|
});
|
|
1372
|
-
|
|
1373
|
-
(x) => typeof x.score === "number" && typeof x.sequence === "string" && typeof x.token === "number" && typeof x.token_str === "string"
|
|
1374
|
-
);
|
|
1375
|
-
if (!isValidOutput) {
|
|
1376
|
-
throw new InferenceOutputError(
|
|
1377
|
-
"Expected Array<{score: number, sequence: string, token: number, token_str: string}>"
|
|
1378
|
-
);
|
|
1379
|
-
}
|
|
1380
|
-
return res;
|
|
1844
|
+
return providerHelper.getResponse(res);
|
|
1381
1845
|
}
|
|
1382
1846
|
|
|
1383
1847
|
// src/tasks/nlp/questionAnswering.ts
|
|
1384
1848
|
async function questionAnswering(args, options) {
|
|
1385
|
-
const
|
|
1849
|
+
const providerHelper = getProviderHelper(args.provider ?? "hf-inference", "question-answering");
|
|
1850
|
+
const { data: res } = await innerRequest(args, {
|
|
1386
1851
|
...options,
|
|
1387
1852
|
task: "question-answering"
|
|
1388
1853
|
});
|
|
1389
|
-
|
|
1390
|
-
(elem) => typeof elem === "object" && !!elem && typeof elem.answer === "string" && typeof elem.end === "number" && typeof elem.score === "number" && typeof elem.start === "number"
|
|
1391
|
-
) : typeof res === "object" && !!res && typeof res.answer === "string" && typeof res.end === "number" && typeof res.score === "number" && typeof res.start === "number";
|
|
1392
|
-
if (!isValidOutput) {
|
|
1393
|
-
throw new InferenceOutputError("Expected Array<{answer: string, end: number, score: number, start: number}>");
|
|
1394
|
-
}
|
|
1395
|
-
return Array.isArray(res) ? res[0] : res;
|
|
1854
|
+
return providerHelper.getResponse(res);
|
|
1396
1855
|
}
|
|
1397
1856
|
|
|
1398
1857
|
// src/tasks/nlp/sentenceSimilarity.ts
|
|
1399
1858
|
async function sentenceSimilarity(args, options) {
|
|
1400
|
-
const
|
|
1859
|
+
const providerHelper = getProviderHelper(args.provider ?? "hf-inference", "sentence-similarity");
|
|
1860
|
+
const { data: res } = await innerRequest(args, {
|
|
1401
1861
|
...options,
|
|
1402
1862
|
task: "sentence-similarity"
|
|
1403
1863
|
});
|
|
1404
|
-
|
|
1405
|
-
if (!isValidOutput) {
|
|
1406
|
-
throw new InferenceOutputError("Expected number[]");
|
|
1407
|
-
}
|
|
1408
|
-
return res;
|
|
1409
|
-
}
|
|
1410
|
-
function prepareInput(args) {
|
|
1411
|
-
return {
|
|
1412
|
-
...omit(args, ["inputs", "parameters"]),
|
|
1413
|
-
inputs: { ...omit(args.inputs, "sourceSentence") },
|
|
1414
|
-
parameters: { source_sentence: args.inputs.sourceSentence, ...args.parameters }
|
|
1415
|
-
};
|
|
1864
|
+
return providerHelper.getResponse(res);
|
|
1416
1865
|
}
|
|
1417
1866
|
|
|
1418
1867
|
// src/tasks/nlp/summarization.ts
|
|
1419
1868
|
async function summarization(args, options) {
|
|
1420
|
-
const
|
|
1869
|
+
const providerHelper = getProviderHelper(args.provider ?? "hf-inference", "summarization");
|
|
1870
|
+
const { data: res } = await innerRequest(args, {
|
|
1421
1871
|
...options,
|
|
1422
1872
|
task: "summarization"
|
|
1423
1873
|
});
|
|
1424
|
-
|
|
1425
|
-
if (!isValidOutput) {
|
|
1426
|
-
throw new InferenceOutputError("Expected Array<{summary_text: string}>");
|
|
1427
|
-
}
|
|
1428
|
-
return res?.[0];
|
|
1874
|
+
return providerHelper.getResponse(res);
|
|
1429
1875
|
}
|
|
1430
1876
|
|
|
1431
1877
|
// src/tasks/nlp/tableQuestionAnswering.ts
|
|
1432
1878
|
async function tableQuestionAnswering(args, options) {
|
|
1433
|
-
const
|
|
1879
|
+
const providerHelper = getProviderHelper(args.provider ?? "hf-inference", "table-question-answering");
|
|
1880
|
+
const { data: res } = await innerRequest(args, {
|
|
1434
1881
|
...options,
|
|
1435
1882
|
task: "table-question-answering"
|
|
1436
1883
|
});
|
|
1437
|
-
|
|
1438
|
-
if (!isValidOutput) {
|
|
1439
|
-
throw new InferenceOutputError(
|
|
1440
|
-
"Expected {aggregator: string, answer: string, cells: string[], coordinates: number[][]}"
|
|
1441
|
-
);
|
|
1442
|
-
}
|
|
1443
|
-
return Array.isArray(res) ? res[0] : res;
|
|
1444
|
-
}
|
|
1445
|
-
function validate(elem) {
|
|
1446
|
-
return typeof elem === "object" && !!elem && "aggregator" in elem && typeof elem.aggregator === "string" && "answer" in elem && typeof elem.answer === "string" && "cells" in elem && Array.isArray(elem.cells) && elem.cells.every((x) => typeof x === "string") && "coordinates" in elem && Array.isArray(elem.coordinates) && elem.coordinates.every(
|
|
1447
|
-
(coord) => Array.isArray(coord) && coord.every((x) => typeof x === "number")
|
|
1448
|
-
);
|
|
1884
|
+
return providerHelper.getResponse(res);
|
|
1449
1885
|
}
|
|
1450
1886
|
|
|
1451
1887
|
// src/tasks/nlp/textClassification.ts
|
|
1452
1888
|
async function textClassification(args, options) {
|
|
1453
|
-
const
|
|
1889
|
+
const providerHelper = getProviderHelper(args.provider ?? "hf-inference", "text-classification");
|
|
1890
|
+
const { data: res } = await innerRequest(args, {
|
|
1454
1891
|
...options,
|
|
1455
1892
|
task: "text-classification"
|
|
1456
|
-
})
|
|
1457
|
-
|
|
1458
|
-
if (!isValidOutput) {
|
|
1459
|
-
throw new InferenceOutputError("Expected Array<{label: string, score: number}>");
|
|
1460
|
-
}
|
|
1461
|
-
return res;
|
|
1462
|
-
}
|
|
1463
|
-
|
|
1464
|
-
// src/utils/toArray.ts
|
|
1465
|
-
function toArray(obj) {
|
|
1466
|
-
if (Array.isArray(obj)) {
|
|
1467
|
-
return obj;
|
|
1468
|
-
}
|
|
1469
|
-
return [obj];
|
|
1893
|
+
});
|
|
1894
|
+
return providerHelper.getResponse(res);
|
|
1470
1895
|
}
|
|
1471
1896
|
|
|
1472
1897
|
// src/tasks/nlp/textGeneration.ts
|
|
1473
1898
|
async function textGeneration(args, options) {
|
|
1474
|
-
|
|
1475
|
-
|
|
1476
|
-
|
|
1477
|
-
|
|
1478
|
-
|
|
1479
|
-
|
|
1480
|
-
|
|
1481
|
-
if (!isValidOutput) {
|
|
1482
|
-
throw new InferenceOutputError("Expected ChatCompletionOutput");
|
|
1483
|
-
}
|
|
1484
|
-
const completion = raw.choices[0];
|
|
1485
|
-
return {
|
|
1486
|
-
generated_text: completion.text
|
|
1487
|
-
};
|
|
1488
|
-
} else if (args.provider === "hyperbolic") {
|
|
1489
|
-
const payload = {
|
|
1490
|
-
messages: [{ content: args.inputs, role: "user" }],
|
|
1491
|
-
...args.parameters ? {
|
|
1492
|
-
max_tokens: args.parameters.max_new_tokens,
|
|
1493
|
-
...omit(args.parameters, "max_new_tokens")
|
|
1494
|
-
} : void 0,
|
|
1495
|
-
...omit(args, ["inputs", "parameters"])
|
|
1496
|
-
};
|
|
1497
|
-
const raw = await request(payload, {
|
|
1498
|
-
...options,
|
|
1499
|
-
task: "text-generation"
|
|
1500
|
-
});
|
|
1501
|
-
const isValidOutput = typeof raw === "object" && "choices" in raw && Array.isArray(raw?.choices) && typeof raw?.model === "string";
|
|
1502
|
-
if (!isValidOutput) {
|
|
1503
|
-
throw new InferenceOutputError("Expected ChatCompletionOutput");
|
|
1504
|
-
}
|
|
1505
|
-
const completion = raw.choices[0];
|
|
1506
|
-
return {
|
|
1507
|
-
generated_text: completion.message.content
|
|
1508
|
-
};
|
|
1509
|
-
} else {
|
|
1510
|
-
const res = toArray(
|
|
1511
|
-
await request(args, {
|
|
1512
|
-
...options,
|
|
1513
|
-
task: "text-generation"
|
|
1514
|
-
})
|
|
1515
|
-
);
|
|
1516
|
-
const isValidOutput = Array.isArray(res) && res.every((x) => "generated_text" in x && typeof x?.generated_text === "string");
|
|
1517
|
-
if (!isValidOutput) {
|
|
1518
|
-
throw new InferenceOutputError("Expected Array<{generated_text: string}>");
|
|
1519
|
-
}
|
|
1520
|
-
return res?.[0];
|
|
1521
|
-
}
|
|
1899
|
+
const provider = args.provider ?? "hf-inference";
|
|
1900
|
+
const providerHelper = getProviderHelper(provider, "text-generation");
|
|
1901
|
+
const { data: response } = await innerRequest(args, {
|
|
1902
|
+
...options,
|
|
1903
|
+
task: "text-generation"
|
|
1904
|
+
});
|
|
1905
|
+
return providerHelper.getResponse(response);
|
|
1522
1906
|
}
|
|
1523
1907
|
|
|
1524
1908
|
// src/tasks/nlp/textGenerationStream.ts
|
|
1525
1909
|
async function* textGenerationStream(args, options) {
|
|
1526
|
-
yield*
|
|
1910
|
+
yield* innerStreamingRequest(args, {
|
|
1527
1911
|
...options,
|
|
1528
1912
|
task: "text-generation"
|
|
1529
1913
|
});
|
|
@@ -1531,79 +1915,37 @@ async function* textGenerationStream(args, options) {
|
|
|
1531
1915
|
|
|
1532
1916
|
// src/tasks/nlp/tokenClassification.ts
|
|
1533
1917
|
async function tokenClassification(args, options) {
|
|
1534
|
-
const
|
|
1535
|
-
|
|
1536
|
-
|
|
1537
|
-
|
|
1538
|
-
|
|
1539
|
-
);
|
|
1540
|
-
const isValidOutput = Array.isArray(res) && res.every(
|
|
1541
|
-
(x) => typeof x.end === "number" && typeof x.entity_group === "string" && typeof x.score === "number" && typeof x.start === "number" && typeof x.word === "string"
|
|
1542
|
-
);
|
|
1543
|
-
if (!isValidOutput) {
|
|
1544
|
-
throw new InferenceOutputError(
|
|
1545
|
-
"Expected Array<{end: number, entity_group: string, score: number, start: number, word: string}>"
|
|
1546
|
-
);
|
|
1547
|
-
}
|
|
1548
|
-
return res;
|
|
1918
|
+
const providerHelper = getProviderHelper(args.provider ?? "hf-inference", "token-classification");
|
|
1919
|
+
const { data: res } = await innerRequest(args, {
|
|
1920
|
+
...options,
|
|
1921
|
+
task: "token-classification"
|
|
1922
|
+
});
|
|
1923
|
+
return providerHelper.getResponse(res);
|
|
1549
1924
|
}
|
|
1550
1925
|
|
|
1551
1926
|
// src/tasks/nlp/translation.ts
|
|
1552
1927
|
async function translation(args, options) {
|
|
1553
|
-
const
|
|
1928
|
+
const providerHelper = getProviderHelper(args.provider ?? "hf-inference", "translation");
|
|
1929
|
+
const { data: res } = await innerRequest(args, {
|
|
1554
1930
|
...options,
|
|
1555
1931
|
task: "translation"
|
|
1556
1932
|
});
|
|
1557
|
-
|
|
1558
|
-
if (!isValidOutput) {
|
|
1559
|
-
throw new InferenceOutputError("Expected type Array<{translation_text: string}>");
|
|
1560
|
-
}
|
|
1561
|
-
return res?.length === 1 ? res?.[0] : res;
|
|
1933
|
+
return providerHelper.getResponse(res);
|
|
1562
1934
|
}
|
|
1563
1935
|
|
|
1564
1936
|
// src/tasks/nlp/zeroShotClassification.ts
|
|
1565
1937
|
async function zeroShotClassification(args, options) {
|
|
1566
|
-
const
|
|
1567
|
-
|
|
1568
|
-
...options,
|
|
1569
|
-
task: "zero-shot-classification"
|
|
1570
|
-
})
|
|
1571
|
-
);
|
|
1572
|
-
const isValidOutput = Array.isArray(res) && res.every(
|
|
1573
|
-
(x) => Array.isArray(x.labels) && x.labels.every((_label) => typeof _label === "string") && Array.isArray(x.scores) && x.scores.every((_score) => typeof _score === "number") && typeof x.sequence === "string"
|
|
1574
|
-
);
|
|
1575
|
-
if (!isValidOutput) {
|
|
1576
|
-
throw new InferenceOutputError("Expected Array<{labels: string[], scores: number[], sequence: string}>");
|
|
1577
|
-
}
|
|
1578
|
-
return res;
|
|
1579
|
-
}
|
|
1580
|
-
|
|
1581
|
-
// src/tasks/nlp/chatCompletion.ts
|
|
1582
|
-
async function chatCompletion(args, options) {
|
|
1583
|
-
const res = await request(args, {
|
|
1584
|
-
...options,
|
|
1585
|
-
task: "text-generation",
|
|
1586
|
-
chatCompletion: true
|
|
1587
|
-
});
|
|
1588
|
-
const isValidOutput = typeof res === "object" && Array.isArray(res?.choices) && typeof res?.created === "number" && typeof res?.id === "string" && typeof res?.model === "string" && /// Together.ai and Nebius do not output a system_fingerprint
|
|
1589
|
-
(res.system_fingerprint === void 0 || res.system_fingerprint === null || typeof res.system_fingerprint === "string") && typeof res?.usage === "object";
|
|
1590
|
-
if (!isValidOutput) {
|
|
1591
|
-
throw new InferenceOutputError("Expected ChatCompletionOutput");
|
|
1592
|
-
}
|
|
1593
|
-
return res;
|
|
1594
|
-
}
|
|
1595
|
-
|
|
1596
|
-
// src/tasks/nlp/chatCompletionStream.ts
|
|
1597
|
-
async function* chatCompletionStream(args, options) {
|
|
1598
|
-
yield* streamingRequest(args, {
|
|
1938
|
+
const providerHelper = getProviderHelper(args.provider ?? "hf-inference", "zero-shot-classification");
|
|
1939
|
+
const { data: res } = await innerRequest(args, {
|
|
1599
1940
|
...options,
|
|
1600
|
-
task: "
|
|
1601
|
-
chatCompletion: true
|
|
1941
|
+
task: "zero-shot-classification"
|
|
1602
1942
|
});
|
|
1943
|
+
return providerHelper.getResponse(res);
|
|
1603
1944
|
}
|
|
1604
1945
|
|
|
1605
1946
|
// src/tasks/multimodal/documentQuestionAnswering.ts
|
|
1606
1947
|
async function documentQuestionAnswering(args, options) {
|
|
1948
|
+
const providerHelper = getProviderHelper(args.provider ?? "hf-inference", "document-question-answering");
|
|
1607
1949
|
const reqArgs = {
|
|
1608
1950
|
...args,
|
|
1609
1951
|
inputs: {
|
|
@@ -1612,23 +1954,19 @@ async function documentQuestionAnswering(args, options) {
|
|
|
1612
1954
|
image: base64FromBytes(new Uint8Array(await args.inputs.image.arrayBuffer()))
|
|
1613
1955
|
}
|
|
1614
1956
|
};
|
|
1615
|
-
const res =
|
|
1616
|
-
|
|
1957
|
+
const { data: res } = await innerRequest(
|
|
1958
|
+
reqArgs,
|
|
1959
|
+
{
|
|
1617
1960
|
...options,
|
|
1618
1961
|
task: "document-question-answering"
|
|
1619
|
-
}
|
|
1620
|
-
);
|
|
1621
|
-
const isValidOutput = Array.isArray(res) && res.every(
|
|
1622
|
-
(elem) => typeof elem === "object" && !!elem && typeof elem?.answer === "string" && (typeof elem.end === "number" || typeof elem.end === "undefined") && (typeof elem.score === "number" || typeof elem.score === "undefined") && (typeof elem.start === "number" || typeof elem.start === "undefined")
|
|
1962
|
+
}
|
|
1623
1963
|
);
|
|
1624
|
-
|
|
1625
|
-
throw new InferenceOutputError("Expected Array<{answer: string, end?: number, score?: number, start?: number}>");
|
|
1626
|
-
}
|
|
1627
|
-
return res[0];
|
|
1964
|
+
return providerHelper.getResponse(res);
|
|
1628
1965
|
}
|
|
1629
1966
|
|
|
1630
1967
|
// src/tasks/multimodal/visualQuestionAnswering.ts
|
|
1631
1968
|
async function visualQuestionAnswering(args, options) {
|
|
1969
|
+
const providerHelper = getProviderHelper(args.provider ?? "hf-inference", "visual-question-answering");
|
|
1632
1970
|
const reqArgs = {
|
|
1633
1971
|
...args,
|
|
1634
1972
|
inputs: {
|
|
@@ -1637,43 +1975,31 @@ async function visualQuestionAnswering(args, options) {
|
|
|
1637
1975
|
image: base64FromBytes(new Uint8Array(await args.inputs.image.arrayBuffer()))
|
|
1638
1976
|
}
|
|
1639
1977
|
};
|
|
1640
|
-
const res = await
|
|
1978
|
+
const { data: res } = await innerRequest(reqArgs, {
|
|
1641
1979
|
...options,
|
|
1642
1980
|
task: "visual-question-answering"
|
|
1643
1981
|
});
|
|
1644
|
-
|
|
1645
|
-
(elem) => typeof elem === "object" && !!elem && typeof elem?.answer === "string" && typeof elem.score === "number"
|
|
1646
|
-
);
|
|
1647
|
-
if (!isValidOutput) {
|
|
1648
|
-
throw new InferenceOutputError("Expected Array<{answer: string, score: number}>");
|
|
1649
|
-
}
|
|
1650
|
-
return res[0];
|
|
1982
|
+
return providerHelper.getResponse(res);
|
|
1651
1983
|
}
|
|
1652
1984
|
|
|
1653
|
-
// src/tasks/tabular/
|
|
1654
|
-
async function
|
|
1655
|
-
const
|
|
1985
|
+
// src/tasks/tabular/tabularClassification.ts
|
|
1986
|
+
async function tabularClassification(args, options) {
|
|
1987
|
+
const providerHelper = getProviderHelper(args.provider ?? "hf-inference", "tabular-classification");
|
|
1988
|
+
const { data: res } = await innerRequest(args, {
|
|
1656
1989
|
...options,
|
|
1657
|
-
task: "tabular-
|
|
1990
|
+
task: "tabular-classification"
|
|
1658
1991
|
});
|
|
1659
|
-
|
|
1660
|
-
if (!isValidOutput) {
|
|
1661
|
-
throw new InferenceOutputError("Expected number[]");
|
|
1662
|
-
}
|
|
1663
|
-
return res;
|
|
1992
|
+
return providerHelper.getResponse(res);
|
|
1664
1993
|
}
|
|
1665
1994
|
|
|
1666
|
-
// src/tasks/tabular/
|
|
1667
|
-
async function
|
|
1668
|
-
const
|
|
1995
|
+
// src/tasks/tabular/tabularRegression.ts
|
|
1996
|
+
async function tabularRegression(args, options) {
|
|
1997
|
+
const providerHelper = getProviderHelper(args.provider ?? "hf-inference", "tabular-regression");
|
|
1998
|
+
const { data: res } = await innerRequest(args, {
|
|
1669
1999
|
...options,
|
|
1670
|
-
task: "tabular-
|
|
2000
|
+
task: "tabular-regression"
|
|
1671
2001
|
});
|
|
1672
|
-
|
|
1673
|
-
if (!isValidOutput) {
|
|
1674
|
-
throw new InferenceOutputError("Expected number[]");
|
|
1675
|
-
}
|
|
1676
|
-
return res;
|
|
2002
|
+
return providerHelper.getResponse(res);
|
|
1677
2003
|
}
|
|
1678
2004
|
|
|
1679
2005
|
// src/InferenceClient.ts
|
|
@@ -1742,8 +2068,8 @@ __export(snippets_exports, {
|
|
|
1742
2068
|
});
|
|
1743
2069
|
|
|
1744
2070
|
// src/snippets/getInferenceSnippets.ts
|
|
1745
|
-
var import_tasks = require("@huggingface/tasks");
|
|
1746
2071
|
var import_jinja = require("@huggingface/jinja");
|
|
2072
|
+
var import_tasks = require("@huggingface/tasks");
|
|
1747
2073
|
|
|
1748
2074
|
// src/snippets/templates.exported.ts
|
|
1749
2075
|
var templates = {
|
|
@@ -1753,7 +2079,7 @@ var templates = {
|
|
|
1753
2079
|
"basicAudio": 'async function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "audio/flac"\n },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.json();\n return result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});',
|
|
1754
2080
|
"basicImage": 'async function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "image/jpeg"\n },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.json();\n return result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});',
|
|
1755
2081
|
"textToAudio": '{% if model.library_name == "transformers" %}\nasync function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "application/json",\n },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.blob();\n return result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n // Returns a byte object of the Audio wavform. Use it directly!\n});\n{% else %}\nasync function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "application/json",\n },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.json();\n return result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});\n{% endif %} ',
|
|
1756
|
-
"textToImage": 'async function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "application/json",\n },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.blob();\n return result;\n}\n\nquery({
|
|
2082
|
+
"textToImage": 'async function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "application/json",\n },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.blob();\n return result;\n}\n\n\nquery({ {{ providerInputs.asTsString }} }).then((response) => {\n // Use image\n});',
|
|
1757
2083
|
"zeroShotClassification": 'async function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "application/json",\n },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.json();\n return result;\n}\n\nquery({\n inputs: {{ providerInputs.asObj.inputs }},\n parameters: { candidate_labels: ["refund", "legal", "faq"] }\n}).then((response) => {\n console.log(JSON.stringify(response));\n});'
|
|
1758
2084
|
},
|
|
1759
2085
|
"huggingface.js": {
|
|
@@ -1786,7 +2112,7 @@ const image = await client.textToVideo({
|
|
|
1786
2112
|
},
|
|
1787
2113
|
"openai": {
|
|
1788
2114
|
"conversational": 'import { OpenAI } from "openai";\n\nconst client = new OpenAI({\n baseURL: "{{ baseUrl }}",\n apiKey: "{{ accessToken }}",\n});\n\nconst chatCompletion = await client.chat.completions.create({\n model: "{{ providerModelId }}",\n{{ inputs.asTsString }}\n});\n\nconsole.log(chatCompletion.choices[0].message);',
|
|
1789
|
-
"conversationalStream": 'import { OpenAI } from "openai";\n\nconst client = new OpenAI({\n baseURL: "{{ baseUrl }}",\n apiKey: "{{ accessToken }}",\n});\n\
|
|
2115
|
+
"conversationalStream": 'import { OpenAI } from "openai";\n\nconst client = new OpenAI({\n baseURL: "{{ baseUrl }}",\n apiKey: "{{ accessToken }}",\n});\n\nconst stream = await client.chat.completions.create({\n model: "{{ providerModelId }}",\n{{ inputs.asTsString }}\n stream: true,\n});\n\nfor await (const chunk of stream) {\n process.stdout.write(chunk.choices[0]?.delta?.content || "");\n}'
|
|
1790
2116
|
}
|
|
1791
2117
|
},
|
|
1792
2118
|
"python": {
|
|
@@ -1918,15 +2244,23 @@ var HF_JS_METHODS = {
|
|
|
1918
2244
|
};
|
|
1919
2245
|
var snippetGenerator = (templateName, inputPreparationFn) => {
|
|
1920
2246
|
return (model, accessToken, provider, providerModelId, opts) => {
|
|
2247
|
+
let task = model.pipeline_tag;
|
|
1921
2248
|
if (model.pipeline_tag && ["text-generation", "image-text-to-text"].includes(model.pipeline_tag) && model.tags.includes("conversational")) {
|
|
1922
2249
|
templateName = opts?.streaming ? "conversationalStream" : "conversational";
|
|
1923
2250
|
inputPreparationFn = prepareConversationalInput;
|
|
2251
|
+
task = "conversational";
|
|
1924
2252
|
}
|
|
1925
2253
|
const inputs = inputPreparationFn ? inputPreparationFn(model, opts) : { inputs: (0, import_tasks.getModelInputSnippet)(model) };
|
|
1926
2254
|
const request2 = makeRequestOptionsFromResolvedModel(
|
|
1927
2255
|
providerModelId ?? model.id,
|
|
1928
|
-
{
|
|
1929
|
-
|
|
2256
|
+
{
|
|
2257
|
+
accessToken,
|
|
2258
|
+
provider,
|
|
2259
|
+
...inputs
|
|
2260
|
+
},
|
|
2261
|
+
{
|
|
2262
|
+
task
|
|
2263
|
+
}
|
|
1930
2264
|
);
|
|
1931
2265
|
let providerInputs = inputs;
|
|
1932
2266
|
const bodyAsObj = request2.info.body;
|
|
@@ -2013,7 +2347,7 @@ var prepareConversationalInput = (model, opts) => {
|
|
|
2013
2347
|
return {
|
|
2014
2348
|
messages: opts?.messages ?? (0, import_tasks.getModelInputSnippet)(model),
|
|
2015
2349
|
...opts?.temperature ? { temperature: opts?.temperature } : void 0,
|
|
2016
|
-
max_tokens: opts?.max_tokens ??
|
|
2350
|
+
max_tokens: opts?.max_tokens ?? 512,
|
|
2017
2351
|
...opts?.top_p ? { top_p: opts?.top_p } : void 0
|
|
2018
2352
|
};
|
|
2019
2353
|
};
|