@huggingface/inference 3.5.2 → 3.6.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. package/dist/index.cjs +364 -970
  2. package/dist/index.js +366 -981
  3. package/dist/src/index.d.ts.map +1 -1
  4. package/dist/src/lib/makeRequestOptions.d.ts +16 -1
  5. package/dist/src/lib/makeRequestOptions.d.ts.map +1 -1
  6. package/dist/src/providers/novita.d.ts.map +1 -1
  7. package/dist/src/snippets/getInferenceSnippets.d.ts +4 -0
  8. package/dist/src/snippets/getInferenceSnippets.d.ts.map +1 -0
  9. package/dist/src/snippets/index.d.ts +1 -4
  10. package/dist/src/snippets/index.d.ts.map +1 -1
  11. package/dist/src/snippets/templates.exported.d.ts +2 -0
  12. package/dist/src/snippets/templates.exported.d.ts.map +1 -0
  13. package/dist/src/tasks/cv/textToVideo.d.ts.map +1 -1
  14. package/package.json +9 -5
  15. package/src/index.ts +1 -1
  16. package/src/lib/makeRequestOptions.ts +37 -10
  17. package/src/providers/fireworks-ai.ts +1 -1
  18. package/src/providers/hf-inference.ts +1 -1
  19. package/src/providers/nebius.ts +3 -3
  20. package/src/providers/novita.ts +7 -6
  21. package/src/providers/sambanova.ts +1 -1
  22. package/src/providers/together.ts +3 -3
  23. package/src/snippets/getInferenceSnippets.ts +380 -0
  24. package/src/snippets/index.ts +1 -5
  25. package/src/snippets/templates.exported.ts +72 -0
  26. package/src/tasks/cv/textToVideo.ts +25 -5
  27. package/src/vendor/fetch-event-source/LICENSE +21 -0
  28. package/dist/src/snippets/curl.d.ts +0 -17
  29. package/dist/src/snippets/curl.d.ts.map +0 -1
  30. package/dist/src/snippets/js.d.ts +0 -21
  31. package/dist/src/snippets/js.d.ts.map +0 -1
  32. package/dist/src/snippets/python.d.ts +0 -4
  33. package/dist/src/snippets/python.d.ts.map +0 -1
  34. package/src/snippets/curl.ts +0 -177
  35. package/src/snippets/js.ts +0 -475
  36. package/src/snippets/python.ts +0 -563
@@ -0,0 +1,380 @@
1
+ import type { PipelineType, WidgetType } from "@huggingface/tasks/src/pipelines.js";
2
+ import type { ChatCompletionInputMessage, GenerationParameters } from "@huggingface/tasks/src/tasks/index.js";
3
+ import {
4
+ type InferenceSnippet,
5
+ type InferenceSnippetLanguage,
6
+ type ModelDataMinimal,
7
+ inferenceSnippetLanguages,
8
+ getModelInputSnippet,
9
+ } from "@huggingface/tasks";
10
+ import type { InferenceProvider, InferenceTask, RequestArgs } from "../types";
11
+ import { Template } from "@huggingface/jinja";
12
+ import { makeRequestOptionsFromResolvedModel } from "../lib/makeRequestOptions";
13
+ import { templates } from "./templates.exported";
14
+
15
+ const PYTHON_CLIENTS = ["huggingface_hub", "fal_client", "requests", "openai"] as const;
16
+ const JS_CLIENTS = ["fetch", "huggingface.js", "openai"] as const;
17
+ const SH_CLIENTS = ["curl"] as const;
18
+
19
+ type Client = (typeof SH_CLIENTS)[number] | (typeof PYTHON_CLIENTS)[number] | (typeof JS_CLIENTS)[number];
20
+
21
+ const CLIENTS: Record<InferenceSnippetLanguage, Client[]> = {
22
+ js: [...JS_CLIENTS],
23
+ python: [...PYTHON_CLIENTS],
24
+ sh: [...SH_CLIENTS],
25
+ };
26
+
27
+ type InputPreparationFn = (model: ModelDataMinimal, opts?: Record<string, unknown>) => object;
28
+ interface TemplateParams {
29
+ accessToken?: string;
30
+ authorizationHeader?: string;
31
+ baseUrl?: string;
32
+ fullUrl?: string;
33
+ inputs?: object;
34
+ providerInputs?: object;
35
+ model?: ModelDataMinimal;
36
+ provider?: InferenceProvider;
37
+ providerModelId?: string;
38
+ methodName?: string; // specific to snippetBasic
39
+ importBase64?: boolean; // specific to snippetImportRequests
40
+ importJson?: boolean; // specific to snippetImportRequests
41
+ }
42
+
43
+ // Helpers to find + load templates
44
+
45
+ const hasTemplate = (language: InferenceSnippetLanguage, client: Client, templateName: string): boolean =>
46
+ templates[language]?.[client]?.[templateName] !== undefined;
47
+
48
+ const loadTemplate = (
49
+ language: InferenceSnippetLanguage,
50
+ client: Client,
51
+ templateName: string
52
+ ): ((data: TemplateParams) => string) => {
53
+ const template = templates[language]?.[client]?.[templateName];
54
+ if (!template) {
55
+ throw new Error(`Template not found: ${language}/${client}/${templateName}`);
56
+ }
57
+ return (data: TemplateParams) => new Template(template).render({ ...data });
58
+ };
59
+
60
+ const snippetImportPythonInferenceClient = loadTemplate("python", "huggingface_hub", "importInferenceClient");
61
+ const snippetImportRequests = loadTemplate("python", "requests", "importRequests");
62
+
63
+ // Needed for huggingface_hub basic snippets
64
+
65
+ const HF_PYTHON_METHODS: Partial<Record<WidgetType, string>> = {
66
+ "audio-classification": "audio_classification",
67
+ "audio-to-audio": "audio_to_audio",
68
+ "automatic-speech-recognition": "automatic_speech_recognition",
69
+ "document-question-answering": "document_question_answering",
70
+ "feature-extraction": "feature_extraction",
71
+ "fill-mask": "fill_mask",
72
+ "image-classification": "image_classification",
73
+ "image-segmentation": "image_segmentation",
74
+ "image-to-image": "image_to_image",
75
+ "image-to-text": "image_to_text",
76
+ "object-detection": "object_detection",
77
+ "question-answering": "question_answering",
78
+ "sentence-similarity": "sentence_similarity",
79
+ summarization: "summarization",
80
+ "table-question-answering": "table_question_answering",
81
+ "tabular-classification": "tabular_classification",
82
+ "tabular-regression": "tabular_regression",
83
+ "text-classification": "text_classification",
84
+ "text-generation": "text_generation",
85
+ "text-to-image": "text_to_image",
86
+ "text-to-speech": "text_to_speech",
87
+ "text-to-video": "text_to_video",
88
+ "token-classification": "token_classification",
89
+ translation: "translation",
90
+ "visual-question-answering": "visual_question_answering",
91
+ "zero-shot-classification": "zero_shot_classification",
92
+ "zero-shot-image-classification": "zero_shot_image_classification",
93
+ };
94
+
95
+ // Needed for huggingface.js basic snippets
96
+
97
+ const HF_JS_METHODS: Partial<Record<WidgetType, string>> = {
98
+ "automatic-speech-recognition": "automaticSpeechRecognition",
99
+ "feature-extraction": "featureExtraction",
100
+ "fill-mask": "fillMask",
101
+ "image-classification": "imageClassification",
102
+ "question-answering": "questionAnswering",
103
+ "sentence-similarity": "sentenceSimilarity",
104
+ summarization: "summarization",
105
+ "table-question-answering": "tableQuestionAnswering",
106
+ "text-classification": "textClassification",
107
+ "text-generation": "textGeneration",
108
+ "text2text-generation": "textGeneration",
109
+ "token-classification": "tokenClassification",
110
+ translation: "translation",
111
+ };
112
+
113
+ // Snippet generators
114
+ const snippetGenerator = (templateName: string, inputPreparationFn?: InputPreparationFn) => {
115
+ return (
116
+ model: ModelDataMinimal,
117
+ accessToken: string,
118
+ provider: InferenceProvider,
119
+ providerModelId?: string,
120
+ opts?: Record<string, unknown>
121
+ ): InferenceSnippet[] => {
122
+ /// Hacky: hard-code conversational templates here
123
+ if (
124
+ model.pipeline_tag &&
125
+ ["text-generation", "image-text-to-text"].includes(model.pipeline_tag) &&
126
+ model.tags.includes("conversational")
127
+ ) {
128
+ templateName = opts?.streaming ? "conversationalStream" : "conversational";
129
+ inputPreparationFn = prepareConversationalInput;
130
+ }
131
+
132
+ /// Prepare inputs + make request
133
+ const inputs = inputPreparationFn ? inputPreparationFn(model, opts) : { inputs: getModelInputSnippet(model) };
134
+ const request = makeRequestOptionsFromResolvedModel(
135
+ providerModelId ?? model.id,
136
+ { accessToken: accessToken, provider: provider, ...inputs } as RequestArgs,
137
+ { chatCompletion: templateName.includes("conversational"), task: model.pipeline_tag as InferenceTask }
138
+ );
139
+
140
+ /// Parse request.info.body if not a binary.
141
+ /// This is the body sent to the provider. Important for snippets with raw payload (e.g curl, requests, etc.)
142
+ let providerInputs = inputs;
143
+ const bodyAsObj = request.info.body;
144
+ if (typeof bodyAsObj === "string") {
145
+ try {
146
+ providerInputs = JSON.parse(bodyAsObj);
147
+ } catch (e) {
148
+ console.error("Failed to parse body as JSON", e);
149
+ }
150
+ }
151
+
152
+ /// Prepare template injection data
153
+ const params: TemplateParams = {
154
+ accessToken,
155
+ authorizationHeader: (request.info.headers as Record<string, string>)?.Authorization,
156
+ baseUrl: removeSuffix(request.url, "/chat/completions"),
157
+ fullUrl: request.url,
158
+ inputs: {
159
+ asObj: inputs,
160
+ asCurlString: formatBody(inputs, "curl"),
161
+ asJsonString: formatBody(inputs, "json"),
162
+ asPythonString: formatBody(inputs, "python"),
163
+ asTsString: formatBody(inputs, "ts"),
164
+ },
165
+ providerInputs: {
166
+ asObj: providerInputs,
167
+ asCurlString: formatBody(providerInputs, "curl"),
168
+ asJsonString: formatBody(providerInputs, "json"),
169
+ asPythonString: formatBody(providerInputs, "python"),
170
+ asTsString: formatBody(providerInputs, "ts"),
171
+ },
172
+ model,
173
+ provider,
174
+ providerModelId: providerModelId ?? model.id,
175
+ };
176
+
177
+ /// Iterate over clients => check if a snippet exists => generate
178
+ return inferenceSnippetLanguages
179
+ .map((language) => {
180
+ return CLIENTS[language]
181
+ .map((client) => {
182
+ if (!hasTemplate(language, client, templateName)) {
183
+ return;
184
+ }
185
+ const template = loadTemplate(language, client, templateName);
186
+ if (client === "huggingface_hub" && templateName.includes("basic")) {
187
+ if (!(model.pipeline_tag && model.pipeline_tag in HF_PYTHON_METHODS)) {
188
+ return;
189
+ }
190
+ params["methodName"] = HF_PYTHON_METHODS[model.pipeline_tag];
191
+ }
192
+
193
+ if (client === "huggingface.js" && templateName.includes("basic")) {
194
+ if (!(model.pipeline_tag && model.pipeline_tag in HF_JS_METHODS)) {
195
+ return;
196
+ }
197
+ params["methodName"] = HF_JS_METHODS[model.pipeline_tag];
198
+ }
199
+
200
+ /// Generate snippet
201
+ let snippet = template(params).trim();
202
+ if (!snippet) {
203
+ return;
204
+ }
205
+
206
+ /// Add import section separately
207
+ if (client === "huggingface_hub") {
208
+ const importSection = snippetImportPythonInferenceClient({ ...params });
209
+ snippet = `${importSection}\n\n${snippet}`;
210
+ } else if (client === "requests") {
211
+ const importSection = snippetImportRequests({
212
+ ...params,
213
+ importBase64: snippet.includes("base64"),
214
+ importJson: snippet.includes("json."),
215
+ });
216
+ snippet = `${importSection}\n\n${snippet}`;
217
+ }
218
+
219
+ /// Snippet is ready!
220
+ return { language, client: client as string, content: snippet };
221
+ })
222
+ .filter((snippet): snippet is InferenceSnippet => snippet !== undefined);
223
+ })
224
+ .flat();
225
+ };
226
+ };
227
+
228
+ const prepareDocumentQuestionAnsweringInput = (model: ModelDataMinimal): object => {
229
+ return JSON.parse(getModelInputSnippet(model) as string);
230
+ };
231
+
232
+ const prepareImageToImageInput = (model: ModelDataMinimal): object => {
233
+ const data = JSON.parse(getModelInputSnippet(model) as string);
234
+ return { inputs: data.image, parameters: { prompt: data.prompt } };
235
+ };
236
+
237
+ const prepareConversationalInput = (
238
+ model: ModelDataMinimal,
239
+ opts?: {
240
+ streaming?: boolean;
241
+ messages?: ChatCompletionInputMessage[];
242
+ temperature?: GenerationParameters["temperature"];
243
+ max_tokens?: GenerationParameters["max_new_tokens"];
244
+ top_p?: GenerationParameters["top_p"];
245
+ }
246
+ ): object => {
247
+ return {
248
+ messages: opts?.messages ?? getModelInputSnippet(model),
249
+ ...(opts?.temperature ? { temperature: opts?.temperature } : undefined),
250
+ max_tokens: opts?.max_tokens ?? 500,
251
+ ...(opts?.top_p ? { top_p: opts?.top_p } : undefined),
252
+ };
253
+ };
254
+
255
+ const snippets: Partial<
256
+ Record<
257
+ PipelineType,
258
+ (
259
+ model: ModelDataMinimal,
260
+ accessToken: string,
261
+ provider: InferenceProvider,
262
+ providerModelId?: string,
263
+ opts?: Record<string, unknown>
264
+ ) => InferenceSnippet[]
265
+ >
266
+ > = {
267
+ "audio-classification": snippetGenerator("basicAudio"),
268
+ "audio-to-audio": snippetGenerator("basicAudio"),
269
+ "automatic-speech-recognition": snippetGenerator("basicAudio"),
270
+ "document-question-answering": snippetGenerator("documentQuestionAnswering", prepareDocumentQuestionAnsweringInput),
271
+ "feature-extraction": snippetGenerator("basic"),
272
+ "fill-mask": snippetGenerator("basic"),
273
+ "image-classification": snippetGenerator("basicImage"),
274
+ "image-segmentation": snippetGenerator("basicImage"),
275
+ "image-text-to-text": snippetGenerator("conversational"),
276
+ "image-to-image": snippetGenerator("imageToImage", prepareImageToImageInput),
277
+ "image-to-text": snippetGenerator("basicImage"),
278
+ "object-detection": snippetGenerator("basicImage"),
279
+ "question-answering": snippetGenerator("basic"),
280
+ "sentence-similarity": snippetGenerator("basic"),
281
+ summarization: snippetGenerator("basic"),
282
+ "tabular-classification": snippetGenerator("tabular"),
283
+ "tabular-regression": snippetGenerator("tabular"),
284
+ "table-question-answering": snippetGenerator("basic"),
285
+ "text-classification": snippetGenerator("basic"),
286
+ "text-generation": snippetGenerator("basic"),
287
+ "text-to-audio": snippetGenerator("textToAudio"),
288
+ "text-to-image": snippetGenerator("textToImage"),
289
+ "text-to-speech": snippetGenerator("textToAudio"),
290
+ "text-to-video": snippetGenerator("textToVideo"),
291
+ "text2text-generation": snippetGenerator("basic"),
292
+ "token-classification": snippetGenerator("basic"),
293
+ translation: snippetGenerator("basic"),
294
+ "zero-shot-classification": snippetGenerator("zeroShotClassification"),
295
+ "zero-shot-image-classification": snippetGenerator("zeroShotImageClassification"),
296
+ };
297
+
298
+ export function getInferenceSnippets(
299
+ model: ModelDataMinimal,
300
+ accessToken: string,
301
+ provider: InferenceProvider,
302
+ providerModelId?: string,
303
+ opts?: Record<string, unknown>
304
+ ): InferenceSnippet[] {
305
+ return model.pipeline_tag && model.pipeline_tag in snippets
306
+ ? snippets[model.pipeline_tag]?.(model, accessToken, provider, providerModelId, opts) ?? []
307
+ : [];
308
+ }
309
+
310
+ // String manipulation helpers
311
+
312
+ function formatBody(obj: object, format: "curl" | "json" | "python" | "ts"): string {
313
+ switch (format) {
314
+ case "curl":
315
+ return indentString(formatBody(obj, "json"));
316
+
317
+ case "json":
318
+ /// Hacky: remove outer brackets to make is extendable in templates
319
+ return JSON.stringify(obj, null, 4).split("\n").slice(1, -1).join("\n");
320
+
321
+ case "python":
322
+ return indentString(
323
+ Object.entries(obj)
324
+ .map(([key, value]) => {
325
+ const formattedValue = JSON.stringify(value, null, 4).replace(/"/g, '"');
326
+ return `${key}=${formattedValue},`;
327
+ })
328
+ .join("\n")
329
+ );
330
+
331
+ case "ts":
332
+ /// Hacky: remove outer brackets to make is extendable in templates
333
+ return formatTsObject(obj).split("\n").slice(1, -1).join("\n");
334
+
335
+ default:
336
+ throw new Error(`Unsupported format: ${format}`);
337
+ }
338
+ }
339
+
340
+ function formatTsObject(obj: unknown, depth?: number): string {
341
+ depth = depth ?? 0;
342
+
343
+ /// Case int, boolean, string, etc.
344
+ if (typeof obj !== "object" || obj === null) {
345
+ return JSON.stringify(obj);
346
+ }
347
+
348
+ /// Case array
349
+ if (Array.isArray(obj)) {
350
+ const items = obj
351
+ .map((item) => {
352
+ const formatted = formatTsObject(item, depth + 1);
353
+ return `${" ".repeat(4 * (depth + 1))}${formatted},`;
354
+ })
355
+ .join("\n");
356
+ return `[\n${items}\n${" ".repeat(4 * depth)}]`;
357
+ }
358
+
359
+ /// Case mapping
360
+ const entries = Object.entries(obj);
361
+ const lines = entries
362
+ .map(([key, value]) => {
363
+ const formattedValue = formatTsObject(value, depth + 1);
364
+ const keyStr = /^[a-zA-Z_$][a-zA-Z0-9_$]*$/.test(key) ? key : `"${key}"`;
365
+ return `${" ".repeat(4 * (depth + 1))}${keyStr}: ${formattedValue},`;
366
+ })
367
+ .join("\n");
368
+ return `{\n${lines}\n${" ".repeat(4 * depth)}}`;
369
+ }
370
+
371
+ function indentString(str: string): string {
372
+ return str
373
+ .split("\n")
374
+ .map((line) => " ".repeat(4) + line)
375
+ .join("\n");
376
+ }
377
+
378
+ function removeSuffix(str: string, suffix: string) {
379
+ return str.endsWith(suffix) ? str.slice(0, -suffix.length) : str;
380
+ }
@@ -1,5 +1 @@
1
- import * as curl from "./curl.js";
2
- import * as python from "./python.js";
3
- import * as js from "./js.js";
4
-
5
- export { curl, python, js };
1
+ export { getInferenceSnippets } from "./getInferenceSnippets.js";
@@ -0,0 +1,72 @@
1
+ // Generated file - do not edit directly
2
+ export const templates: Record<string, Record<string, Record<string, string>>> = {
3
+ "js": {
4
+ "fetch": {
5
+ "basic": "async function query(data) {\n\tconst response = await fetch(\n\t\t\"{{ fullUrl }}\",\n\t\t{\n\t\t\theaders: {\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\n\t\t\t\t\"Content-Type\": \"application/json\",\n\t\t\t},\n\t\t\tmethod: \"POST\",\n\t\t\tbody: JSON.stringify(data),\n\t\t}\n\t);\n\tconst result = await response.json();\n\treturn result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});",
6
+ "basicAudio": "async function query(data) {\n\tconst response = await fetch(\n\t\t\"{{ fullUrl }}\",\n\t\t{\n\t\t\theaders: {\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\n\t\t\t\t\"Content-Type\": \"audio/flac\"\n\t\t\t},\n\t\t\tmethod: \"POST\",\n\t\t\tbody: JSON.stringify(data),\n\t\t}\n\t);\n\tconst result = await response.json();\n\treturn result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});",
7
+ "basicImage": "async function query(data) {\n\tconst response = await fetch(\n\t\t\"{{ fullUrl }}\",\n\t\t{\n\t\t\theaders: {\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\n\t\t\t\t\"Content-Type\": \"image/jpeg\"\n\t\t\t},\n\t\t\tmethod: \"POST\",\n\t\t\tbody: JSON.stringify(data),\n\t\t}\n\t);\n\tconst result = await response.json();\n\treturn result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});",
8
+ "textToAudio": "{% if model.library_name == \"transformers\" %}\nasync function query(data) {\n\tconst response = await fetch(\n\t\t\"{{ fullUrl }}\",\n\t\t{\n\t\t\theaders: {\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\n\t\t\t\t\"Content-Type\": \"application/json\",\n\t\t\t},\n\t\t\tmethod: \"POST\",\n\t\t\tbody: JSON.stringify(data),\n\t\t}\n\t);\n\tconst result = await response.blob();\n return result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n // Returns a byte object of the Audio wavform. Use it directly!\n});\n{% else %}\nasync function query(data) {\n\tconst response = await fetch(\n\t\t\"{{ fullUrl }}\",\n\t\t{\n\t\t\theaders: {\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\n\t\t\t\t\"Content-Type\": \"application/json\",\n\t\t\t},\n\t\t\tmethod: \"POST\",\n\t\t\tbody: JSON.stringify(data),\n\t\t}\n\t);\n const result = await response.json();\n return result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});\n{% endif %} ",
9
+ "textToImage": "async function query(data) {\n\tconst response = await fetch(\n\t\t\"{{ fullUrl }}\",\n\t\t{\n\t\t\theaders: {\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\n\t\t\t\t\"Content-Type\": \"application/json\",\n\t\t\t},\n\t\t\tmethod: \"POST\",\n\t\t\tbody: JSON.stringify(data),\n\t\t}\n\t);\n\tconst result = await response.blob();\n\treturn result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n // Use image\n});",
10
+ "zeroShotClassification": "async function query(data) {\n const response = await fetch(\n\t\t\"{{ fullUrl }}\",\n {\n headers: {\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\n \"Content-Type\": \"application/json\",\n },\n method: \"POST\",\n body: JSON.stringify(data),\n }\n );\n const result = await response.json();\n return result;\n}\n\nquery({\n inputs: {{ providerInputs.asObj.inputs }},\n parameters: { candidate_labels: [\"refund\", \"legal\", \"faq\"] }\n}).then((response) => {\n console.log(JSON.stringify(response));\n});"
11
+ },
12
+ "huggingface.js": {
13
+ "basic": "import { InferenceClient } from \"@huggingface/inference\";\n\nconst client = new InferenceClient(\"{{ accessToken }}\");\n\nconst output = await client.{{ methodName }}({\n\tmodel: \"{{ model.id }}\",\n\tinputs: {{ inputs.asObj.inputs }},\n\tprovider: \"{{ provider }}\",\n});\n\nconsole.log(output);",
14
+ "basicAudio": "import { InferenceClient } from \"@huggingface/inference\";\n\nconst client = new InferenceClient(\"{{ accessToken }}\");\n\nconst data = fs.readFileSync({{inputs.asObj.inputs}});\n\nconst output = await client.{{ methodName }}({\n\tdata,\n\tmodel: \"{{ model.id }}\",\n\tprovider: \"{{ provider }}\",\n});\n\nconsole.log(output);",
15
+ "basicImage": "import { InferenceClient } from \"@huggingface/inference\";\n\nconst client = new InferenceClient(\"{{ accessToken }}\");\n\nconst data = fs.readFileSync({{inputs.asObj.inputs}});\n\nconst output = await client.{{ methodName }}({\n\tdata,\n\tmodel: \"{{ model.id }}\",\n\tprovider: \"{{ provider }}\",\n});\n\nconsole.log(output);",
16
+ "conversational": "import { InferenceClient } from \"@huggingface/inference\";\n\nconst client = new InferenceClient(\"{{ accessToken }}\");\n\nconst chatCompletion = await client.chatCompletion({\n provider: \"{{ provider }}\",\n model: \"{{ model.id }}\",\n{{ inputs.asTsString }}\n});\n\nconsole.log(chatCompletion.choices[0].message);",
17
+ "conversationalStream": "import { InferenceClient } from \"@huggingface/inference\";\n\nconst client = new InferenceClient(\"{{ accessToken }}\");\n\nlet out = \"\";\n\nconst stream = await client.chatCompletionStream({\n provider: \"{{ provider }}\",\n model: \"{{ model.id }}\",\n{{ inputs.asTsString }}\n});\n\nfor await (const chunk of stream) {\n\tif (chunk.choices && chunk.choices.length > 0) {\n\t\tconst newContent = chunk.choices[0].delta.content;\n\t\tout += newContent;\n\t\tconsole.log(newContent);\n\t} \n}",
18
+ "textToImage": "import { InferenceClient } from \"@huggingface/inference\";\n\nconst client = new InferenceClient(\"{{ accessToken }}\");\n\nconst image = await client.textToImage({\n provider: \"{{ provider }}\",\n model: \"{{ model.id }}\",\n\tinputs: {{ inputs.asObj.inputs }},\n\tparameters: { num_inference_steps: 5 },\n});\n/// Use the generated image (it's a Blob)",
19
+ "textToVideo": "import { InferenceClient } from \"@huggingface/inference\";\n\nconst client = new InferenceClient(\"{{ accessToken }}\");\n\nconst image = await client.textToVideo({\n provider: \"{{ provider }}\",\n model: \"{{ model.id }}\",\n\tinputs: {{ inputs.asObj.inputs }},\n});\n// Use the generated video (it's a Blob)"
20
+ },
21
+ "openai": {
22
+ "conversational": "import { OpenAI } from \"openai\";\n\nconst client = new OpenAI({\n\tbaseURL: \"{{ baseUrl }}\",\n\tapiKey: \"{{ accessToken }}\",\n});\n\nconst chatCompletion = await client.chat.completions.create({\n\tmodel: \"{{ providerModelId }}\",\n{{ inputs.asTsString }}\n});\n\nconsole.log(chatCompletion.choices[0].message);",
23
+ "conversationalStream": "import { OpenAI } from \"openai\";\n\nconst client = new OpenAI({\n\tbaseURL: \"{{ baseUrl }}\",\n\tapiKey: \"{{ accessToken }}\",\n});\n\nlet out = \"\";\n\nconst stream = await client.chat.completions.create({\n provider: \"{{ provider }}\",\n model: \"{{ model.id }}\",\n{{ inputs.asTsString }}\n});\n\nfor await (const chunk of stream) {\n\tif (chunk.choices && chunk.choices.length > 0) {\n\t\tconst newContent = chunk.choices[0].delta.content;\n\t\tout += newContent;\n\t\tconsole.log(newContent);\n\t} \n}"
24
+ }
25
+ },
26
+ "python": {
27
+ "fal_client": {
28
+ "textToImage": "{% if provider == \"fal-ai\" %}\nimport fal_client\n\nresult = fal_client.subscribe(\n \"{{ providerModelId }}\",\n arguments={\n \"prompt\": {{ inputs.asObj.inputs }},\n },\n)\nprint(result)\n{% endif %} "
29
+ },
30
+ "huggingface_hub": {
31
+ "basic": "result = client.{{ methodName }}(\n inputs={{ inputs.asObj.inputs }},\n model=\"{{ model.id }}\",\n)",
32
+ "basicAudio": "output = client.{{ methodName }}({{ inputs.asObj.inputs }}, model=\"{{ model.id }}\")",
33
+ "basicImage": "output = client.{{ methodName }}({{ inputs.asObj.inputs }}, model=\"{{ model.id }}\")",
34
+ "conversational": "completion = client.chat.completions.create(\n model=\"{{ model.id }}\",\n{{ inputs.asPythonString }}\n)\n\nprint(completion.choices[0].message) ",
35
+ "conversationalStream": "stream = client.chat.completions.create(\n model=\"{{ model.id }}\",\n{{ inputs.asPythonString }}\n stream=True,\n)\n\nfor chunk in stream:\n print(chunk.choices[0].delta.content, end=\"\") ",
36
+ "documentQuestionAnswering": "output = client.document_question_answering(\n \"{{ inputs.asObj.image }}\",\n question=\"{{ inputs.asObj.question }}\",\n model=\"{{ model.id }}\",\n) ",
37
+ "imageToImage": "# output is a PIL.Image object\nimage = client.image_to_image(\n \"{{ inputs.asObj.inputs }}\",\n prompt=\"{{ inputs.asObj.parameters.prompt }}\",\n model=\"{{ model.id }}\",\n) ",
38
+ "importInferenceClient": "from huggingface_hub import InferenceClient\n\nclient = InferenceClient(\n provider=\"{{ provider }}\",\n api_key=\"{{ accessToken }}\",\n)",
39
+ "textToImage": "# output is a PIL.Image object\nimage = client.text_to_image(\n {{ inputs.asObj.inputs }},\n model=\"{{ model.id }}\",\n) ",
40
+ "textToVideo": "video = client.text_to_video(\n {{ inputs.asObj.inputs }},\n model=\"{{ model.id }}\",\n) "
41
+ },
42
+ "openai": {
43
+ "conversational": "from openai import OpenAI\n\nclient = OpenAI(\n base_url=\"{{ baseUrl }}\",\n api_key=\"{{ accessToken }}\"\n)\n\ncompletion = client.chat.completions.create(\n model=\"{{ providerModelId }}\",\n{{ inputs.asPythonString }}\n)\n\nprint(completion.choices[0].message) ",
44
+ "conversationalStream": "from openai import OpenAI\n\nclient = OpenAI(\n base_url=\"{{ baseUrl }}\",\n api_key=\"{{ accessToken }}\"\n)\n\nstream = client.chat.completions.create(\n model=\"{{ providerModelId }}\",\n{{ inputs.asPythonString }}\n stream=True,\n)\n\nfor chunk in stream:\n print(chunk.choices[0].delta.content, end=\"\")"
45
+ },
46
+ "requests": {
47
+ "basic": "def query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\noutput = query({\n \"inputs\": {{ providerInputs.asObj.inputs }},\n}) ",
48
+ "basicAudio": "def query(filename):\n with open(filename, \"rb\") as f:\n data = f.read()\n response = requests.post(API_URL, headers={\"Content-Type\": \"audio/flac\", **headers}, data=data)\n return response.json()\n\noutput = query({{ providerInputs.asObj.inputs }})",
49
+ "basicImage": "def query(filename):\n with open(filename, \"rb\") as f:\n data = f.read()\n response = requests.post(API_URL, headers={\"Content-Type\": \"image/jpeg\", **headers}, data=data)\n return response.json()\n\noutput = query({{ providerInputs.asObj.inputs }})",
50
+ "conversational": "def query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\nresponse = query({\n{{ providerInputs.asJsonString }}\n})\n\nprint(response[\"choices\"][0][\"message\"])",
51
+ "conversationalStream": "def query(payload):\n response = requests.post(API_URL, headers=headers, json=payload, stream=True)\n for line in response.iter_lines():\n if not line.startswith(b\"data:\"):\n continue\n if line.strip() == b\"data: [DONE]\":\n return\n yield json.loads(line.decode(\"utf-8\").lstrip(\"data:\").rstrip(\"/n\"))\n\nchunks = query({\n{{ providerInputs.asJsonString }},\n \"stream\": True,\n})\n\nfor chunk in chunks:\n print(chunk[\"choices\"][0][\"delta\"][\"content\"], end=\"\")",
52
+ "documentQuestionAnswering": "def query(payload):\n with open(payload[\"image\"], \"rb\") as f:\n img = f.read()\n payload[\"image\"] = base64.b64encode(img).decode(\"utf-8\")\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\noutput = query({\n \"inputs\": {\n \"image\": \"{{ inputs.asObj.image }}\",\n \"question\": \"{{ inputs.asObj.question }}\",\n },\n}) ",
53
+ "imageToImage": "def query(payload):\n with open(payload[\"inputs\"], \"rb\") as f:\n img = f.read()\n payload[\"inputs\"] = base64.b64encode(img).decode(\"utf-8\")\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\nimage_bytes = query({\n{{ providerInputs.asJsonString }}\n})\n\n# You can access the image with PIL.Image for example\nimport io\nfrom PIL import Image\nimage = Image.open(io.BytesIO(image_bytes)) ",
54
+ "importRequests": "{% if importBase64 %}\nimport base64\n{% endif %}\n{% if importJson %}\nimport json\n{% endif %}\nimport requests\n\nAPI_URL = \"{{ fullUrl }}\"\nheaders = {\"Authorization\": \"{{ authorizationHeader }}\"}",
55
+ "tabular": "def query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\nresponse = query({\n \"inputs\": {\n \"data\": {{ providerInputs.asObj.inputs }}\n },\n}) ",
56
+ "textToAudio": "{% if model.library_name == \"transformers\" %}\ndef query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\naudio_bytes = query({\n \"inputs\": {{ providerInputs.asObj.inputs }},\n})\n# You can access the audio with IPython.display for example\nfrom IPython.display import Audio\nAudio(audio_bytes)\n{% else %}\ndef query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\naudio, sampling_rate = query({\n \"inputs\": {{ providerInputs.asObj.inputs }},\n})\n# You can access the audio with IPython.display for example\nfrom IPython.display import Audio\nAudio(audio, rate=sampling_rate)\n{% endif %} ",
57
+ "textToImage": "{% if provider == \"hf-inference\" %}\ndef query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\nimage_bytes = query({\n \"inputs\": {{ providerInputs.asObj.inputs }},\n})\n\n# You can access the image with PIL.Image for example\nimport io\nfrom PIL import Image\nimage = Image.open(io.BytesIO(image_bytes))\n{% endif %}",
58
+ "zeroShotClassification": "def query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\noutput = query({\n \"inputs\": {{ providerInputs.asObj.inputs }},\n \"parameters\": {\"candidate_labels\": [\"refund\", \"legal\", \"faq\"]},\n}) ",
59
+ "zeroShotImageClassification": "def query(data):\n with open(data[\"image_path\"], \"rb\") as f:\n img = f.read()\n payload={\n \"parameters\": data[\"parameters\"],\n \"inputs\": base64.b64encode(img).decode(\"utf-8\")\n }\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\noutput = query({\n \"image_path\": {{ providerInputs.asObj.inputs }},\n \"parameters\": {\"candidate_labels\": [\"cat\", \"dog\", \"llama\"]},\n}) "
60
+ }
61
+ },
62
+ "sh": {
63
+ "curl": {
64
+ "basic": "curl {{ fullUrl }} \\\n -X POST \\\n -H 'Authorization: {{ authorizationHeader }}' \\\n -H 'Content-Type: application/json' \\\n -d '{\n{{ providerInputs.asCurlString }}\n }'",
65
+ "basicAudio": "curl {{ fullUrl }} \\\n -X POST \\\n -H 'Authorization: {{ authorizationHeader }}' \\\n -H 'Content-Type: audio/flac' \\\n --data-binary @{{ providerInputs.asObj.inputs }}",
66
+ "basicImage": "curl {{ fullUrl }} \\\n -X POST \\\n -H 'Authorization: {{ authorizationHeader }}' \\\n -H 'Content-Type: image/jpeg' \\\n --data-binary @{{ providerInputs.asObj.inputs }}",
67
+ "conversational": "curl {{ fullUrl }} \\\n -H 'Authorization: {{ authorizationHeader }}' \\\n -H 'Content-Type: application/json' \\\n -d '{\n{{ providerInputs.asCurlString }},\n \"stream\": false\n }'",
68
+ "conversationalStream": "curl {{ fullUrl }} \\\n -H 'Authorization: {{ authorizationHeader }}' \\\n -H 'Content-Type: application/json' \\\n -d '{\n{{ providerInputs.asCurlString }},\n \"stream\": true\n }'",
69
+ "zeroShotClassification": "curl {{ fullUrl }} \\\n -X POST \\\n -d '{\"inputs\": {{ providerInputs.asObj.inputs }}, \"parameters\": {\"candidate_labels\": [\"refund\", \"legal\", \"faq\"]}}' \\\n -H 'Content-Type: application/json' \\\n -H 'Authorization: {{ authorizationHeader }}'"
70
+ }
71
+ }
72
+ } as const;
@@ -20,7 +20,13 @@ interface ReplicateOutput {
20
20
  output: string;
21
21
  }
22
22
 
23
- const SUPPORTED_PROVIDERS = ["fal-ai", "replicate"] as const satisfies readonly InferenceProvider[];
23
+ interface NovitaOutput {
24
+ video: {
25
+ video_url: string;
26
+ };
27
+ }
28
+
29
+ const SUPPORTED_PROVIDERS = ["fal-ai", "novita", "replicate"] as const satisfies readonly InferenceProvider[];
24
30
 
25
31
  export async function textToVideo(args: TextToVideoArgs, options?: Options): Promise<TextToVideoOutput> {
26
32
  if (!args.provider || !typedInclude(SUPPORTED_PROVIDERS, args.provider)) {
@@ -30,14 +36,13 @@ export async function textToVideo(args: TextToVideoArgs, options?: Options): Pro
30
36
  }
31
37
 
32
38
  const payload =
33
- args.provider === "fal-ai" || args.provider === "replicate"
39
+ args.provider === "fal-ai" || args.provider === "replicate" || args.provider === "novita"
34
40
  ? { ...omit(args, ["inputs", "parameters"]), ...args.parameters, prompt: args.inputs }
35
41
  : args;
36
- const res = await request<FalAiOutput | ReplicateOutput>(payload, {
42
+ const res = await request<FalAiOutput | ReplicateOutput | NovitaOutput>(payload, {
37
43
  ...options,
38
44
  task: "text-to-video",
39
45
  });
40
-
41
46
  if (args.provider === "fal-ai") {
42
47
  const isValidOutput =
43
48
  typeof res === "object" &&
@@ -51,7 +56,22 @@ export async function textToVideo(args: TextToVideoArgs, options?: Options): Pro
51
56
  if (!isValidOutput) {
52
57
  throw new InferenceOutputError("Expected { video: { url: string } }");
53
58
  }
54
- const urlResponse = await fetch(res.video.url);
59
+ const urlResponse = await fetch((res as FalAiOutput).video.url);
60
+ return await urlResponse.blob();
61
+ } else if (args.provider === "novita") {
62
+ const isValidOutput =
63
+ typeof res === "object" &&
64
+ !!res &&
65
+ "video" in res &&
66
+ typeof res.video === "object" &&
67
+ !!res.video &&
68
+ "video_url" in res.video &&
69
+ typeof res.video.video_url === "string" &&
70
+ isUrl(res.video.video_url);
71
+ if (!isValidOutput) {
72
+ throw new InferenceOutputError("Expected { video: { video_url: string } }");
73
+ }
74
+ const urlResponse = await fetch((res as NovitaOutput).video.video_url);
55
75
  return await urlResponse.blob();
56
76
  } else {
57
77
  /// TODO: Replicate: handle the case where the generation request "times out" / is async (ie output is null)
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) Microsoft Corporation.
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE
@@ -1,17 +0,0 @@
1
- import { type SnippetInferenceProvider } from "@huggingface/tasks";
2
- import type { PipelineType } from "@huggingface/tasks/src/pipelines.js";
3
- import type { ChatCompletionInputMessage, GenerationParameters } from "@huggingface/tasks/src/tasks/index.js";
4
- import { type InferenceSnippet, type ModelDataMinimal } from "@huggingface/tasks";
5
- export declare const snippetBasic: (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider) => InferenceSnippet[];
6
- export declare const snippetTextGeneration: (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider, providerModelId?: string, opts?: {
7
- streaming?: boolean;
8
- messages?: ChatCompletionInputMessage[];
9
- temperature?: GenerationParameters["temperature"];
10
- max_tokens?: GenerationParameters["max_tokens"];
11
- top_p?: GenerationParameters["top_p"];
12
- }) => InferenceSnippet[];
13
- export declare const snippetZeroShotClassification: (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider) => InferenceSnippet[];
14
- export declare const snippetFile: (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider) => InferenceSnippet[];
15
- export declare const curlSnippets: Partial<Record<PipelineType, (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider, providerModelId?: string, opts?: Record<string, unknown>) => InferenceSnippet[]>>;
16
- export declare function getCurlInferenceSnippet(model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider, providerModelId?: string, opts?: Record<string, unknown>): InferenceSnippet[];
17
- //# sourceMappingURL=curl.d.ts.map
@@ -1 +0,0 @@
1
- {"version":3,"file":"curl.d.ts","sourceRoot":"","sources":["../../../src/snippets/curl.ts"],"names":[],"mappings":"AAAA,OAAO,EAAmC,KAAK,wBAAwB,EAAE,MAAM,oBAAoB,CAAC;AACpG,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,qCAAqC,CAAC;AACxE,OAAO,KAAK,EAAE,0BAA0B,EAAE,oBAAoB,EAAE,MAAM,uCAAuC,CAAC;AAC9G,OAAO,EACN,KAAK,gBAAgB,EACrB,KAAK,gBAAgB,EAIrB,MAAM,oBAAoB,CAAC;AAE5B,eAAO,MAAM,YAAY,UACjB,gBAAgB,eACV,MAAM,YACT,wBAAwB,KAChC,gBAAgB,EAelB,CAAC;AAEF,eAAO,MAAM,qBAAqB,UAC1B,gBAAgB,eACV,MAAM,YACT,wBAAwB,oBAChB,MAAM,SACjB;IACN,SAAS,CAAC,EAAE,OAAO,CAAC;IACpB,QAAQ,CAAC,EAAE,0BAA0B,EAAE,CAAC;IACxC,WAAW,CAAC,EAAE,oBAAoB,CAAC,aAAa,CAAC,CAAC;IAClD,UAAU,CAAC,EAAE,oBAAoB,CAAC,YAAY,CAAC,CAAC;IAChD,KAAK,CAAC,EAAE,oBAAoB,CAAC,OAAO,CAAC,CAAC;CACtC,KACC,gBAAgB,EA2ClB,CAAC;AAEF,eAAO,MAAM,6BAA6B,UAClC,gBAAgB,eACV,MAAM,YACT,wBAAwB,KAChC,gBAAgB,EAclB,CAAC;AAEF,eAAO,MAAM,WAAW,UAChB,gBAAgB,eACV,MAAM,YACT,wBAAwB,KAChC,gBAAgB,EAalB,CAAC;AAEF,eAAO,MAAM,YAAY,EAAE,OAAO,CACjC,MAAM,CACL,YAAY,EACZ,CACC,KAAK,EAAE,gBAAgB,EACvB,WAAW,EAAE,MAAM,EACnB,QAAQ,EAAE,wBAAwB,EAClC,eAAe,CAAC,EAAE,MAAM,EACxB,IAAI,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,KAC1B,gBAAgB,EAAE,CACvB,CA0BD,CAAC;AAEF,wBAAgB,uBAAuB,CACtC,KAAK,EAAE,gBAAgB,EACvB,WAAW,EAAE,MAAM,EACnB,QAAQ,EAAE,wBAAwB,EAClC,eAAe,CAAC,EAAE,MAAM,EACxB,IAAI,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,GAC5B,gBAAgB,EAAE,CAIpB"}
@@ -1,21 +0,0 @@
1
- import { type SnippetInferenceProvider } from "@huggingface/tasks";
2
- import type { PipelineType } from "@huggingface/tasks/src/pipelines.js";
3
- import type { ChatCompletionInputMessage, GenerationParameters } from "@huggingface/tasks/src/tasks/index.js";
4
- import { type InferenceSnippet, type ModelDataMinimal } from "@huggingface/tasks";
5
- export declare const snippetBasic: (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider) => InferenceSnippet[];
6
- export declare const snippetTextGeneration: (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider, providerModelId?: string, opts?: {
7
- streaming?: boolean;
8
- messages?: ChatCompletionInputMessage[];
9
- temperature?: GenerationParameters["temperature"];
10
- max_tokens?: GenerationParameters["max_tokens"];
11
- top_p?: GenerationParameters["top_p"];
12
- }) => InferenceSnippet[];
13
- export declare const snippetZeroShotClassification: (model: ModelDataMinimal, accessToken: string) => InferenceSnippet[];
14
- export declare const snippetTextToImage: (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider) => InferenceSnippet[];
15
- export declare const snippetTextToVideo: (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider) => InferenceSnippet[];
16
- export declare const snippetTextToAudio: (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider) => InferenceSnippet[];
17
- export declare const snippetAutomaticSpeechRecognition: (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider) => InferenceSnippet[];
18
- export declare const snippetFile: (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider) => InferenceSnippet[];
19
- export declare const jsSnippets: Partial<Record<PipelineType, (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider, providerModelId?: string, opts?: Record<string, unknown>) => InferenceSnippet[]>>;
20
- export declare function getJsInferenceSnippet(model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider, providerModelId?: string, opts?: Record<string, unknown>): InferenceSnippet[];
21
- //# sourceMappingURL=js.d.ts.map
@@ -1 +0,0 @@
1
- {"version":3,"file":"js.d.ts","sourceRoot":"","sources":["../../../src/snippets/js.ts"],"names":[],"mappings":"AAAA,OAAO,EAAiB,KAAK,wBAAwB,EAAE,MAAM,oBAAoB,CAAC;AAClF,OAAO,KAAK,EAAE,YAAY,EAAc,MAAM,qCAAqC,CAAC;AACpF,OAAO,KAAK,EAAE,0BAA0B,EAAE,oBAAoB,EAAE,MAAM,uCAAuC,CAAC;AAC9G,OAAO,EACN,KAAK,gBAAgB,EACrB,KAAK,gBAAgB,EAIrB,MAAM,oBAAoB,CAAC;AAgB5B,eAAO,MAAM,YAAY,UACjB,gBAAgB,eACV,MAAM,YACT,wBAAwB,KAChC,gBAAgB,EA8ClB,CAAC;AAEF,eAAO,MAAM,qBAAqB,UAC1B,gBAAgB,eACV,MAAM,YACT,wBAAwB,oBAChB,MAAM,SACjB;IACN,SAAS,CAAC,EAAE,OAAO,CAAC;IACpB,QAAQ,CAAC,EAAE,0BAA0B,EAAE,CAAC;IACxC,WAAW,CAAC,EAAE,oBAAoB,CAAC,aAAa,CAAC,CAAC;IAClD,UAAU,CAAC,EAAE,oBAAoB,CAAC,YAAY,CAAC,CAAC;IAChD,KAAK,CAAC,EAAE,oBAAoB,CAAC,OAAO,CAAC,CAAC;CACtC,KACC,gBAAgB,EA+GlB,CAAC;AAEF,eAAO,MAAM,6BAA6B,UAAW,gBAAgB,eAAe,MAAM,KAAG,gBAAgB,EA2B5G,CAAC;AAEF,eAAO,MAAM,kBAAkB,UACvB,gBAAgB,eACV,MAAM,YACT,wBAAwB,KAChC,gBAAgB,EA4ClB,CAAC;AAEF,eAAO,MAAM,kBAAkB,UACvB,gBAAgB,eACV,MAAM,YACT,wBAAwB,KAChC,gBAAgB,EAqBlB,CAAC;AAEF,eAAO,MAAM,kBAAkB,UACvB,gBAAgB,eACV,MAAM,YACT,wBAAwB,KAChC,gBAAgB,EAgDlB,CAAC;AAEF,eAAO,MAAM,iCAAiC,UACtC,gBAAgB,eACV,MAAM,YACT,wBAAwB,KAChC,gBAAgB,EAsBlB,CAAC;AAEF,eAAO,MAAM,WAAW,UAChB,gBAAgB,eACV,MAAM,YACT,wBAAwB,KAChC,gBAAgB,EA6BlB,CAAC;AAEF,eAAO,MAAM,UAAU,EAAE,OAAO,CAC/B,MAAM,CACL,YAAY,EACZ,CACC,KAAK,EAAE,gBAAgB,EACvB,WAAW,EAAE,MAAM,EACnB,QAAQ,EAAE,wBAAwB,EAClC,eAAe,CAAC,EAAE,MAAM,EACxB,IAAI,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,KAC1B,gBAAgB,EAAE,CACvB,CA2BD,CAAC;AAEF,wBAAgB,qBAAqB,CACpC,KAAK,EAAE,gBAAgB,EACvB,WAAW,EAAE,MAAM,EACnB,QAAQ,EAAE,wBAAwB,EAClC,eAAe,CAAC,EAAE,MAAM,EACxB,IAAI,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,GAC5B,gBAAgB,EAAE,CAIpB"}
@@ -1,4 +0,0 @@
1
- import { type SnippetInferenceProvider } from "@huggingface/tasks";
2
- import { type InferenceSnippet, type ModelDataMinimal } from "@huggingface/tasks";
3
- export declare function getPythonInferenceSnippet(model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider, providerModelId?: string, opts?: Record<string, unknown>): InferenceSnippet[];
4
- //# sourceMappingURL=python.d.ts.map
@@ -1 +0,0 @@
1
- {"version":3,"file":"python.d.ts","sourceRoot":"","sources":["../../../src/snippets/python.ts"],"names":[],"mappings":"AAAA,OAAO,EAAiB,KAAK,wBAAwB,EAAE,MAAM,oBAAoB,CAAC;AAGlF,OAAO,EACN,KAAK,gBAAgB,EACrB,KAAK,gBAAgB,EAIrB,MAAM,oBAAoB,CAAC;AAkgB5B,wBAAgB,yBAAyB,CACxC,KAAK,EAAE,gBAAgB,EACvB,WAAW,EAAE,MAAM,EACnB,QAAQ,EAAE,wBAAwB,EAClC,eAAe,CAAC,EAAE,MAAM,EACxB,IAAI,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,GAC5B,gBAAgB,EAAE,CAiBpB"}