@huggingface/tasks 0.16.7 → 0.17.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/commonjs/index.d.ts +1 -3
- package/dist/commonjs/index.d.ts.map +1 -1
- package/dist/commonjs/index.js +5 -15
- package/dist/commonjs/snippets/index.d.ts +2 -5
- package/dist/commonjs/snippets/index.d.ts.map +1 -1
- package/dist/commonjs/snippets/index.js +2 -21
- package/dist/esm/index.d.ts +1 -3
- package/dist/esm/index.d.ts.map +1 -1
- package/dist/esm/index.js +1 -2
- package/dist/esm/snippets/index.d.ts +2 -5
- package/dist/esm/snippets/index.d.ts.map +1 -1
- package/dist/esm/snippets/index.js +2 -5
- package/package.json +1 -1
- package/src/index.ts +7 -3
- package/src/snippets/index.ts +2 -6
- package/dist/commonjs/snippets/curl.d.ts +0 -17
- package/dist/commonjs/snippets/curl.d.ts.map +0 -1
- package/dist/commonjs/snippets/curl.js +0 -129
- package/dist/commonjs/snippets/js.d.ts +0 -21
- package/dist/commonjs/snippets/js.d.ts.map +0 -1
- package/dist/commonjs/snippets/js.js +0 -413
- package/dist/commonjs/snippets/python.d.ts +0 -23
- package/dist/commonjs/snippets/python.d.ts.map +0 -1
- package/dist/commonjs/snippets/python.js +0 -435
- package/dist/esm/snippets/curl.d.ts +0 -17
- package/dist/esm/snippets/curl.d.ts.map +0 -1
- package/dist/esm/snippets/curl.js +0 -121
- package/dist/esm/snippets/js.d.ts +0 -21
- package/dist/esm/snippets/js.d.ts.map +0 -1
- package/dist/esm/snippets/js.js +0 -401
- package/dist/esm/snippets/python.d.ts +0 -23
- package/dist/esm/snippets/python.d.ts.map +0 -1
- package/dist/esm/snippets/python.js +0 -421
- package/src/snippets/curl.ts +0 -173
- package/src/snippets/js.ts +0 -471
- package/src/snippets/python.ts +0 -483
|
@@ -1,413 +0,0 @@
|
|
|
1
|
-
"use strict";
|
|
2
|
-
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
-
exports.jsSnippets = exports.snippetFile = exports.snippetAutomaticSpeechRecognition = exports.snippetTextToAudio = exports.snippetTextToVideo = exports.snippetTextToImage = exports.snippetZeroShotClassification = exports.snippetTextGeneration = exports.snippetBasic = void 0;
|
|
4
|
-
exports.getJsInferenceSnippet = getJsInferenceSnippet;
|
|
5
|
-
const inference_providers_js_1 = require("../inference-providers.js");
|
|
6
|
-
const common_js_1 = require("./common.js");
|
|
7
|
-
const inputs_js_1 = require("./inputs.js");
|
|
8
|
-
const HFJS_METHODS = {
|
|
9
|
-
"text-classification": "textClassification",
|
|
10
|
-
"token-classification": "tokenClassification",
|
|
11
|
-
"table-question-answering": "tableQuestionAnswering",
|
|
12
|
-
"question-answering": "questionAnswering",
|
|
13
|
-
translation: "translation",
|
|
14
|
-
summarization: "summarization",
|
|
15
|
-
"feature-extraction": "featureExtraction",
|
|
16
|
-
"text-generation": "textGeneration",
|
|
17
|
-
"text2text-generation": "textGeneration",
|
|
18
|
-
"fill-mask": "fillMask",
|
|
19
|
-
"sentence-similarity": "sentenceSimilarity",
|
|
20
|
-
};
|
|
21
|
-
const snippetBasic = (model, accessToken, provider) => {
|
|
22
|
-
return [
|
|
23
|
-
...(model.pipeline_tag && model.pipeline_tag in HFJS_METHODS
|
|
24
|
-
? [
|
|
25
|
-
{
|
|
26
|
-
client: "huggingface.js",
|
|
27
|
-
content: `\
|
|
28
|
-
import { HfInference } from "@huggingface/inference";
|
|
29
|
-
|
|
30
|
-
const client = new HfInference("${accessToken || `{API_TOKEN}`}");
|
|
31
|
-
|
|
32
|
-
const output = await client.${HFJS_METHODS[model.pipeline_tag]}({
|
|
33
|
-
model: "${model.id}",
|
|
34
|
-
inputs: ${(0, inputs_js_1.getModelInputSnippet)(model)},
|
|
35
|
-
provider: "${provider}",
|
|
36
|
-
});
|
|
37
|
-
|
|
38
|
-
console.log(output);
|
|
39
|
-
`,
|
|
40
|
-
},
|
|
41
|
-
]
|
|
42
|
-
: []),
|
|
43
|
-
{
|
|
44
|
-
client: "fetch",
|
|
45
|
-
content: `\
|
|
46
|
-
async function query(data) {
|
|
47
|
-
const response = await fetch(
|
|
48
|
-
"https://router.huggingface.co/hf-inference/models/${model.id}",
|
|
49
|
-
{
|
|
50
|
-
headers: {
|
|
51
|
-
Authorization: "Bearer ${accessToken || `{API_TOKEN}`}",
|
|
52
|
-
"Content-Type": "application/json",
|
|
53
|
-
},
|
|
54
|
-
method: "POST",
|
|
55
|
-
body: JSON.stringify(data),
|
|
56
|
-
}
|
|
57
|
-
);
|
|
58
|
-
const result = await response.json();
|
|
59
|
-
return result;
|
|
60
|
-
}
|
|
61
|
-
|
|
62
|
-
query({"inputs": ${(0, inputs_js_1.getModelInputSnippet)(model)}}).then((response) => {
|
|
63
|
-
console.log(JSON.stringify(response));
|
|
64
|
-
});`,
|
|
65
|
-
},
|
|
66
|
-
];
|
|
67
|
-
};
|
|
68
|
-
exports.snippetBasic = snippetBasic;
|
|
69
|
-
const snippetTextGeneration = (model, accessToken, provider, providerModelId, opts) => {
|
|
70
|
-
if (model.tags.includes("conversational")) {
|
|
71
|
-
// Conversational model detected, so we display a code snippet that features the Messages API
|
|
72
|
-
const streaming = opts?.streaming ?? true;
|
|
73
|
-
const exampleMessages = (0, inputs_js_1.getModelInputSnippet)(model);
|
|
74
|
-
const messages = opts?.messages ?? exampleMessages;
|
|
75
|
-
const messagesStr = (0, common_js_1.stringifyMessages)(messages, { indent: "\t" });
|
|
76
|
-
const config = {
|
|
77
|
-
...(opts?.temperature ? { temperature: opts.temperature } : undefined),
|
|
78
|
-
max_tokens: opts?.max_tokens ?? 500,
|
|
79
|
-
...(opts?.top_p ? { top_p: opts.top_p } : undefined),
|
|
80
|
-
};
|
|
81
|
-
const configStr = (0, common_js_1.stringifyGenerationConfig)(config, {
|
|
82
|
-
indent: "\n\t",
|
|
83
|
-
attributeValueConnector: ": ",
|
|
84
|
-
});
|
|
85
|
-
if (streaming) {
|
|
86
|
-
return [
|
|
87
|
-
{
|
|
88
|
-
client: "huggingface.js",
|
|
89
|
-
content: `import { HfInference } from "@huggingface/inference";
|
|
90
|
-
|
|
91
|
-
const client = new HfInference("${accessToken || `{API_TOKEN}`}");
|
|
92
|
-
|
|
93
|
-
let out = "";
|
|
94
|
-
|
|
95
|
-
const stream = client.chatCompletionStream({
|
|
96
|
-
model: "${model.id}",
|
|
97
|
-
messages: ${messagesStr},
|
|
98
|
-
provider: "${provider}",
|
|
99
|
-
${configStr}
|
|
100
|
-
});
|
|
101
|
-
|
|
102
|
-
for await (const chunk of stream) {
|
|
103
|
-
if (chunk.choices && chunk.choices.length > 0) {
|
|
104
|
-
const newContent = chunk.choices[0].delta.content;
|
|
105
|
-
out += newContent;
|
|
106
|
-
console.log(newContent);
|
|
107
|
-
}
|
|
108
|
-
}`,
|
|
109
|
-
},
|
|
110
|
-
{
|
|
111
|
-
client: "openai",
|
|
112
|
-
content: `import { OpenAI } from "openai";
|
|
113
|
-
|
|
114
|
-
const client = new OpenAI({
|
|
115
|
-
baseURL: "${(0, inference_providers_js_1.openAIbaseUrl)(provider)}",
|
|
116
|
-
apiKey: "${accessToken || `{API_TOKEN}`}"
|
|
117
|
-
});
|
|
118
|
-
|
|
119
|
-
let out = "";
|
|
120
|
-
|
|
121
|
-
const stream = await client.chat.completions.create({
|
|
122
|
-
model: "${providerModelId ?? model.id}",
|
|
123
|
-
messages: ${messagesStr},
|
|
124
|
-
${configStr}
|
|
125
|
-
stream: true,
|
|
126
|
-
});
|
|
127
|
-
|
|
128
|
-
for await (const chunk of stream) {
|
|
129
|
-
if (chunk.choices && chunk.choices.length > 0) {
|
|
130
|
-
const newContent = chunk.choices[0].delta.content;
|
|
131
|
-
out += newContent;
|
|
132
|
-
console.log(newContent);
|
|
133
|
-
}
|
|
134
|
-
}`,
|
|
135
|
-
},
|
|
136
|
-
];
|
|
137
|
-
}
|
|
138
|
-
else {
|
|
139
|
-
return [
|
|
140
|
-
{
|
|
141
|
-
client: "huggingface.js",
|
|
142
|
-
content: `import { HfInference } from "@huggingface/inference";
|
|
143
|
-
|
|
144
|
-
const client = new HfInference("${accessToken || `{API_TOKEN}`}");
|
|
145
|
-
|
|
146
|
-
const chatCompletion = await client.chatCompletion({
|
|
147
|
-
model: "${model.id}",
|
|
148
|
-
messages: ${messagesStr},
|
|
149
|
-
provider: "${provider}",
|
|
150
|
-
${configStr}
|
|
151
|
-
});
|
|
152
|
-
|
|
153
|
-
console.log(chatCompletion.choices[0].message);
|
|
154
|
-
`,
|
|
155
|
-
},
|
|
156
|
-
{
|
|
157
|
-
client: "openai",
|
|
158
|
-
content: `import { OpenAI } from "openai";
|
|
159
|
-
|
|
160
|
-
const client = new OpenAI({
|
|
161
|
-
baseURL: "${(0, inference_providers_js_1.openAIbaseUrl)(provider)}",
|
|
162
|
-
apiKey: "${accessToken || `{API_TOKEN}`}"
|
|
163
|
-
});
|
|
164
|
-
|
|
165
|
-
const chatCompletion = await client.chat.completions.create({
|
|
166
|
-
model: "${providerModelId ?? model.id}",
|
|
167
|
-
messages: ${messagesStr},
|
|
168
|
-
${configStr}
|
|
169
|
-
});
|
|
170
|
-
|
|
171
|
-
console.log(chatCompletion.choices[0].message);
|
|
172
|
-
`,
|
|
173
|
-
},
|
|
174
|
-
];
|
|
175
|
-
}
|
|
176
|
-
}
|
|
177
|
-
else {
|
|
178
|
-
return (0, exports.snippetBasic)(model, accessToken, provider);
|
|
179
|
-
}
|
|
180
|
-
};
|
|
181
|
-
exports.snippetTextGeneration = snippetTextGeneration;
|
|
182
|
-
const snippetZeroShotClassification = (model, accessToken) => {
|
|
183
|
-
return [
|
|
184
|
-
{
|
|
185
|
-
client: "fetch",
|
|
186
|
-
content: `async function query(data) {
|
|
187
|
-
const response = await fetch(
|
|
188
|
-
"https://router.huggingface.co/hf-inference/models/${model.id}",
|
|
189
|
-
{
|
|
190
|
-
headers: {
|
|
191
|
-
Authorization: "Bearer ${accessToken || `{API_TOKEN}`}",
|
|
192
|
-
"Content-Type": "application/json",
|
|
193
|
-
},
|
|
194
|
-
method: "POST",
|
|
195
|
-
body: JSON.stringify(data),
|
|
196
|
-
}
|
|
197
|
-
);
|
|
198
|
-
const result = await response.json();
|
|
199
|
-
return result;
|
|
200
|
-
}
|
|
201
|
-
|
|
202
|
-
query({"inputs": ${(0, inputs_js_1.getModelInputSnippet)(model)}, "parameters": {"candidate_labels": ["refund", "legal", "faq"]}}).then((response) => {
|
|
203
|
-
console.log(JSON.stringify(response));
|
|
204
|
-
});`,
|
|
205
|
-
},
|
|
206
|
-
];
|
|
207
|
-
};
|
|
208
|
-
exports.snippetZeroShotClassification = snippetZeroShotClassification;
|
|
209
|
-
const snippetTextToImage = (model, accessToken, provider) => {
|
|
210
|
-
return [
|
|
211
|
-
{
|
|
212
|
-
client: "huggingface.js",
|
|
213
|
-
content: `\
|
|
214
|
-
import { HfInference } from "@huggingface/inference";
|
|
215
|
-
|
|
216
|
-
const client = new HfInference("${accessToken || `{API_TOKEN}`}");
|
|
217
|
-
|
|
218
|
-
const image = await client.textToImage({
|
|
219
|
-
model: "${model.id}",
|
|
220
|
-
inputs: ${(0, inputs_js_1.getModelInputSnippet)(model)},
|
|
221
|
-
parameters: { num_inference_steps: 5 },
|
|
222
|
-
provider: "${provider}",
|
|
223
|
-
});
|
|
224
|
-
/// Use the generated image (it's a Blob)
|
|
225
|
-
`,
|
|
226
|
-
},
|
|
227
|
-
...(provider === "hf-inference"
|
|
228
|
-
? [
|
|
229
|
-
{
|
|
230
|
-
client: "fetch",
|
|
231
|
-
content: `async function query(data) {
|
|
232
|
-
const response = await fetch(
|
|
233
|
-
"https://router.huggingface.co/hf-inference/models/${model.id}",
|
|
234
|
-
{
|
|
235
|
-
headers: {
|
|
236
|
-
Authorization: "Bearer ${accessToken || `{API_TOKEN}`}",
|
|
237
|
-
"Content-Type": "application/json",
|
|
238
|
-
},
|
|
239
|
-
method: "POST",
|
|
240
|
-
body: JSON.stringify(data),
|
|
241
|
-
}
|
|
242
|
-
);
|
|
243
|
-
const result = await response.blob();
|
|
244
|
-
return result;
|
|
245
|
-
}
|
|
246
|
-
query({"inputs": ${(0, inputs_js_1.getModelInputSnippet)(model)}}).then((response) => {
|
|
247
|
-
// Use image
|
|
248
|
-
});`,
|
|
249
|
-
},
|
|
250
|
-
]
|
|
251
|
-
: []),
|
|
252
|
-
];
|
|
253
|
-
};
|
|
254
|
-
exports.snippetTextToImage = snippetTextToImage;
|
|
255
|
-
const snippetTextToVideo = (model, accessToken, provider) => {
|
|
256
|
-
return ["fal-ai", "replicate"].includes(provider)
|
|
257
|
-
? [
|
|
258
|
-
{
|
|
259
|
-
client: "huggingface.js",
|
|
260
|
-
content: `\
|
|
261
|
-
import { HfInference } from "@huggingface/inference";
|
|
262
|
-
|
|
263
|
-
const client = new HfInference("${accessToken || `{API_TOKEN}`}");
|
|
264
|
-
|
|
265
|
-
const video = await client.textToVideo({
|
|
266
|
-
model: "${model.id}",
|
|
267
|
-
provider: "${provider}",
|
|
268
|
-
inputs: ${(0, inputs_js_1.getModelInputSnippet)(model)},
|
|
269
|
-
parameters: { num_inference_steps: 5 },
|
|
270
|
-
});
|
|
271
|
-
// Use the generated video (it's a Blob)
|
|
272
|
-
`,
|
|
273
|
-
},
|
|
274
|
-
]
|
|
275
|
-
: [];
|
|
276
|
-
};
|
|
277
|
-
exports.snippetTextToVideo = snippetTextToVideo;
|
|
278
|
-
const snippetTextToAudio = (model, accessToken, provider) => {
|
|
279
|
-
if (provider !== "hf-inference") {
|
|
280
|
-
return [];
|
|
281
|
-
}
|
|
282
|
-
const commonSnippet = `async function query(data) {
|
|
283
|
-
const response = await fetch(
|
|
284
|
-
"https://router.huggingface.co/hf-inference/models/${model.id}",
|
|
285
|
-
{
|
|
286
|
-
headers: {
|
|
287
|
-
Authorization: "Bearer ${accessToken || `{API_TOKEN}`}",
|
|
288
|
-
"Content-Type": "application/json",
|
|
289
|
-
},
|
|
290
|
-
method: "POST",
|
|
291
|
-
body: JSON.stringify(data),
|
|
292
|
-
}
|
|
293
|
-
);`;
|
|
294
|
-
if (model.library_name === "transformers") {
|
|
295
|
-
return [
|
|
296
|
-
{
|
|
297
|
-
client: "fetch",
|
|
298
|
-
content: commonSnippet +
|
|
299
|
-
`
|
|
300
|
-
const result = await response.blob();
|
|
301
|
-
return result;
|
|
302
|
-
}
|
|
303
|
-
query({"inputs": ${(0, inputs_js_1.getModelInputSnippet)(model)}}).then((response) => {
|
|
304
|
-
// Returns a byte object of the Audio wavform. Use it directly!
|
|
305
|
-
});`,
|
|
306
|
-
},
|
|
307
|
-
];
|
|
308
|
-
}
|
|
309
|
-
else {
|
|
310
|
-
return [
|
|
311
|
-
{
|
|
312
|
-
client: "fetch",
|
|
313
|
-
content: commonSnippet +
|
|
314
|
-
`
|
|
315
|
-
const result = await response.json();
|
|
316
|
-
return result;
|
|
317
|
-
}
|
|
318
|
-
|
|
319
|
-
query({"inputs": ${(0, inputs_js_1.getModelInputSnippet)(model)}}).then((response) => {
|
|
320
|
-
console.log(JSON.stringify(response));
|
|
321
|
-
});`,
|
|
322
|
-
},
|
|
323
|
-
];
|
|
324
|
-
}
|
|
325
|
-
};
|
|
326
|
-
exports.snippetTextToAudio = snippetTextToAudio;
|
|
327
|
-
const snippetAutomaticSpeechRecognition = (model, accessToken, provider) => {
|
|
328
|
-
return [
|
|
329
|
-
{
|
|
330
|
-
client: "huggingface.js",
|
|
331
|
-
content: `\
|
|
332
|
-
import { HfInference } from "@huggingface/inference";
|
|
333
|
-
|
|
334
|
-
const client = new HfInference("${accessToken || `{API_TOKEN}`}");
|
|
335
|
-
|
|
336
|
-
const data = fs.readFileSync(${(0, inputs_js_1.getModelInputSnippet)(model)});
|
|
337
|
-
|
|
338
|
-
const output = await client.automaticSpeechRecognition({
|
|
339
|
-
data,
|
|
340
|
-
model: "${model.id}",
|
|
341
|
-
provider: "${provider}",
|
|
342
|
-
});
|
|
343
|
-
|
|
344
|
-
console.log(output);
|
|
345
|
-
`,
|
|
346
|
-
},
|
|
347
|
-
...(provider === "hf-inference" ? (0, exports.snippetFile)(model, accessToken, provider) : []),
|
|
348
|
-
];
|
|
349
|
-
};
|
|
350
|
-
exports.snippetAutomaticSpeechRecognition = snippetAutomaticSpeechRecognition;
|
|
351
|
-
const snippetFile = (model, accessToken, provider) => {
|
|
352
|
-
if (provider !== "hf-inference") {
|
|
353
|
-
return [];
|
|
354
|
-
}
|
|
355
|
-
return [
|
|
356
|
-
{
|
|
357
|
-
client: "fetch",
|
|
358
|
-
content: `async function query(filename) {
|
|
359
|
-
const data = fs.readFileSync(filename);
|
|
360
|
-
const response = await fetch(
|
|
361
|
-
"https://router.huggingface.co/hf-inference/models/${model.id}",
|
|
362
|
-
{
|
|
363
|
-
headers: {
|
|
364
|
-
Authorization: "Bearer ${accessToken || `{API_TOKEN}`}",
|
|
365
|
-
"Content-Type": "application/json",
|
|
366
|
-
},
|
|
367
|
-
method: "POST",
|
|
368
|
-
body: data,
|
|
369
|
-
}
|
|
370
|
-
);
|
|
371
|
-
const result = await response.json();
|
|
372
|
-
return result;
|
|
373
|
-
}
|
|
374
|
-
|
|
375
|
-
query(${(0, inputs_js_1.getModelInputSnippet)(model)}).then((response) => {
|
|
376
|
-
console.log(JSON.stringify(response));
|
|
377
|
-
});`,
|
|
378
|
-
},
|
|
379
|
-
];
|
|
380
|
-
};
|
|
381
|
-
exports.snippetFile = snippetFile;
|
|
382
|
-
exports.jsSnippets = {
|
|
383
|
-
// Same order as in tasks/src/pipelines.ts
|
|
384
|
-
"text-classification": exports.snippetBasic,
|
|
385
|
-
"token-classification": exports.snippetBasic,
|
|
386
|
-
"table-question-answering": exports.snippetBasic,
|
|
387
|
-
"question-answering": exports.snippetBasic,
|
|
388
|
-
"zero-shot-classification": exports.snippetZeroShotClassification,
|
|
389
|
-
translation: exports.snippetBasic,
|
|
390
|
-
summarization: exports.snippetBasic,
|
|
391
|
-
"feature-extraction": exports.snippetBasic,
|
|
392
|
-
"text-generation": exports.snippetTextGeneration,
|
|
393
|
-
"image-text-to-text": exports.snippetTextGeneration,
|
|
394
|
-
"text2text-generation": exports.snippetBasic,
|
|
395
|
-
"fill-mask": exports.snippetBasic,
|
|
396
|
-
"sentence-similarity": exports.snippetBasic,
|
|
397
|
-
"automatic-speech-recognition": exports.snippetAutomaticSpeechRecognition,
|
|
398
|
-
"text-to-image": exports.snippetTextToImage,
|
|
399
|
-
"text-to-video": exports.snippetTextToVideo,
|
|
400
|
-
"text-to-speech": exports.snippetTextToAudio,
|
|
401
|
-
"text-to-audio": exports.snippetTextToAudio,
|
|
402
|
-
"audio-to-audio": exports.snippetFile,
|
|
403
|
-
"audio-classification": exports.snippetFile,
|
|
404
|
-
"image-classification": exports.snippetFile,
|
|
405
|
-
"image-to-text": exports.snippetFile,
|
|
406
|
-
"object-detection": exports.snippetFile,
|
|
407
|
-
"image-segmentation": exports.snippetFile,
|
|
408
|
-
};
|
|
409
|
-
function getJsInferenceSnippet(model, accessToken, provider, providerModelId, opts) {
|
|
410
|
-
return model.pipeline_tag && model.pipeline_tag in exports.jsSnippets
|
|
411
|
-
? exports.jsSnippets[model.pipeline_tag]?.(model, accessToken, provider, providerModelId, opts) ?? []
|
|
412
|
-
: [];
|
|
413
|
-
}
|
|
@@ -1,23 +0,0 @@
|
|
|
1
|
-
import { type SnippetInferenceProvider } from "../inference-providers.js";
|
|
2
|
-
import type { PipelineType } from "../pipelines.js";
|
|
3
|
-
import type { ChatCompletionInputMessage, GenerationParameters } from "../tasks/index.js";
|
|
4
|
-
import type { InferenceSnippet, ModelDataMinimal } from "./types.js";
|
|
5
|
-
export declare const snippetConversational: (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider, providerModelId?: string, opts?: {
|
|
6
|
-
streaming?: boolean;
|
|
7
|
-
messages?: ChatCompletionInputMessage[];
|
|
8
|
-
temperature?: GenerationParameters["temperature"];
|
|
9
|
-
max_tokens?: GenerationParameters["max_tokens"];
|
|
10
|
-
top_p?: GenerationParameters["top_p"];
|
|
11
|
-
}) => InferenceSnippet[];
|
|
12
|
-
export declare const snippetZeroShotClassification: (model: ModelDataMinimal) => InferenceSnippet[];
|
|
13
|
-
export declare const snippetZeroShotImageClassification: (model: ModelDataMinimal) => InferenceSnippet[];
|
|
14
|
-
export declare const snippetBasic: (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider) => InferenceSnippet[];
|
|
15
|
-
export declare const snippetFile: (model: ModelDataMinimal) => InferenceSnippet[];
|
|
16
|
-
export declare const snippetTextToImage: (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider, providerModelId?: string) => InferenceSnippet[];
|
|
17
|
-
export declare const snippetTextToVideo: (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider) => InferenceSnippet[];
|
|
18
|
-
export declare const snippetTabular: (model: ModelDataMinimal) => InferenceSnippet[];
|
|
19
|
-
export declare const snippetTextToAudio: (model: ModelDataMinimal) => InferenceSnippet[];
|
|
20
|
-
export declare const snippetDocumentQuestionAnswering: (model: ModelDataMinimal) => InferenceSnippet[];
|
|
21
|
-
export declare const pythonSnippets: Partial<Record<PipelineType, (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider, providerModelId?: string, opts?: Record<string, unknown>) => InferenceSnippet[]>>;
|
|
22
|
-
export declare function getPythonInferenceSnippet(model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider, providerModelId?: string, opts?: Record<string, unknown>): InferenceSnippet[];
|
|
23
|
-
//# sourceMappingURL=python.d.ts.map
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
{"version":3,"file":"python.d.ts","sourceRoot":"","sources":["../../../src/snippets/python.ts"],"names":[],"mappings":"AAAA,OAAO,EAAiB,KAAK,wBAAwB,EAAE,MAAM,2BAA2B,CAAC;AACzF,OAAO,KAAK,EAAE,YAAY,EAAc,MAAM,iBAAiB,CAAC;AAChE,OAAO,KAAK,EAAE,0BAA0B,EAAE,oBAAoB,EAAE,MAAM,mBAAmB,CAAC;AAG1F,OAAO,KAAK,EAAE,gBAAgB,EAAE,gBAAgB,EAAE,MAAM,YAAY,CAAC;AAyCrE,eAAO,MAAM,qBAAqB,UAC1B,gBAAgB,eACV,MAAM,YACT,wBAAwB,oBAChB,MAAM,SACjB;IACN,SAAS,CAAC,EAAE,OAAO,CAAC;IACpB,QAAQ,CAAC,EAAE,0BAA0B,EAAE,CAAC;IACxC,WAAW,CAAC,EAAE,oBAAoB,CAAC,aAAa,CAAC,CAAC;IAClD,UAAU,CAAC,EAAE,oBAAoB,CAAC,YAAY,CAAC,CAAC;IAChD,KAAK,CAAC,EAAE,oBAAoB,CAAC,OAAO,CAAC,CAAC;CACtC,KACC,gBAAgB,EAiGlB,CAAC;AAEF,eAAO,MAAM,6BAA6B,UAAW,gBAAgB,KAAG,gBAAgB,EAevF,CAAC;AAEF,eAAO,MAAM,kCAAkC,UAAW,gBAAgB,KAAG,gBAAgB,EAqB5F,CAAC;AAEF,eAAO,MAAM,YAAY,UACjB,gBAAgB,eACV,MAAM,YACT,wBAAwB,KAChC,gBAAgB,EAgClB,CAAC;AAEF,eAAO,MAAM,WAAW,UAAW,gBAAgB,KAAG,gBAAgB,EAcrE,CAAC;AAEF,eAAO,MAAM,kBAAkB,UACvB,gBAAgB,eACV,MAAM,YACT,wBAAwB,oBAChB,MAAM,KACtB,gBAAgB,EAoDlB,CAAC;AAEF,eAAO,MAAM,kBAAkB,UACvB,gBAAgB,eACV,MAAM,YACT,wBAAwB,KAChC,gBAAgB,EAelB,CAAC;AAEF,eAAO,MAAM,cAAc,UAAW,gBAAgB,KAAG,gBAAgB,EAcxE,CAAC;AAEF,eAAO,MAAM,kBAAkB,UAAW,gBAAgB,KAAG,gBAAgB,EAuC5E,CAAC;AAEF,eAAO,MAAM,gCAAgC,UAAW,gBAAgB,KAAG,gBAAgB,EAiB1F,CAAC;AAEF,eAAO,MAAM,cAAc,EAAE,OAAO,CACnC,MAAM,CACL,YAAY,EACZ,CACC,KAAK,EAAE,gBAAgB,EACvB,WAAW,EAAE,MAAM,EACnB,QAAQ,EAAE,wBAAwB,EAClC,eAAe,CAAC,EAAE,MAAM,EACxB,IAAI,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,KAC1B,gBAAgB,EAAE,CACvB,CA+BD,CAAC;AAEF,wBAAgB,yBAAyB,CACxC,KAAK,EAAE,gBAAgB,EACvB,WAAW,EAAE,MAAM,EACnB,QAAQ,EAAE,wBAAwB,EAClC,eAAe,CAAC,EAAE,MAAM,EACxB,IAAI,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,GAC5B,gBAAgB,EAAE,CA0BpB"}
|