@huggingface/tasks 0.0.5 → 0.0.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (87) hide show
  1. package/README.md +16 -2
  2. package/dist/index.d.ts +364 -3
  3. package/dist/index.js +1942 -72
  4. package/dist/index.mjs +1934 -71
  5. package/package.json +1 -1
  6. package/src/default-widget-inputs.ts +718 -0
  7. package/src/index.ts +39 -4
  8. package/src/library-to-tasks.ts +47 -0
  9. package/src/library-ui-elements.ts +765 -0
  10. package/src/model-data.ts +239 -0
  11. package/src/{modelLibraries.ts → model-libraries.ts} +4 -0
  12. package/src/pipelines.ts +22 -0
  13. package/src/snippets/curl.ts +63 -0
  14. package/src/snippets/index.ts +6 -0
  15. package/src/snippets/inputs.ts +129 -0
  16. package/src/snippets/js.ts +150 -0
  17. package/src/snippets/python.ts +114 -0
  18. package/src/tags.ts +15 -0
  19. package/src/{audio-classification → tasks/audio-classification}/about.md +2 -1
  20. package/src/{audio-classification → tasks/audio-classification}/data.ts +3 -3
  21. package/src/{audio-to-audio → tasks/audio-to-audio}/data.ts +1 -1
  22. package/src/{automatic-speech-recognition → tasks/automatic-speech-recognition}/about.md +3 -2
  23. package/src/{automatic-speech-recognition → tasks/automatic-speech-recognition}/data.ts +6 -6
  24. package/src/{conversational → tasks/conversational}/data.ts +1 -1
  25. package/src/{depth-estimation → tasks/depth-estimation}/data.ts +1 -1
  26. package/src/{document-question-answering → tasks/document-question-answering}/data.ts +1 -1
  27. package/src/{feature-extraction → tasks/feature-extraction}/data.ts +2 -7
  28. package/src/{fill-mask → tasks/fill-mask}/data.ts +1 -1
  29. package/src/{image-classification → tasks/image-classification}/data.ts +1 -1
  30. package/src/{image-segmentation → tasks/image-segmentation}/data.ts +1 -1
  31. package/src/{image-to-image → tasks/image-to-image}/about.md +8 -7
  32. package/src/{image-to-image → tasks/image-to-image}/data.ts +1 -1
  33. package/src/{image-to-text → tasks/image-to-text}/data.ts +1 -1
  34. package/src/{tasksData.ts → tasks/index.ts} +140 -15
  35. package/src/{object-detection → tasks/object-detection}/data.ts +1 -1
  36. package/src/{placeholder → tasks/placeholder}/data.ts +1 -1
  37. package/src/{question-answering → tasks/question-answering}/data.ts +1 -1
  38. package/src/{reinforcement-learning → tasks/reinforcement-learning}/data.ts +1 -1
  39. package/src/{sentence-similarity → tasks/sentence-similarity}/data.ts +1 -1
  40. package/src/{summarization → tasks/summarization}/data.ts +1 -1
  41. package/src/{table-question-answering → tasks/table-question-answering}/data.ts +1 -1
  42. package/src/{tabular-classification → tasks/tabular-classification}/data.ts +1 -1
  43. package/src/{tabular-regression → tasks/tabular-regression}/data.ts +1 -1
  44. package/src/{text-classification → tasks/text-classification}/data.ts +1 -1
  45. package/src/{text-generation → tasks/text-generation}/about.md +3 -3
  46. package/src/{text-generation → tasks/text-generation}/data.ts +2 -2
  47. package/src/{text-to-image → tasks/text-to-image}/data.ts +1 -1
  48. package/src/{text-to-speech → tasks/text-to-speech}/about.md +2 -1
  49. package/src/{text-to-speech → tasks/text-to-speech}/data.ts +4 -4
  50. package/src/{text-to-video → tasks/text-to-video}/data.ts +1 -1
  51. package/src/{token-classification → tasks/token-classification}/data.ts +1 -1
  52. package/src/{translation → tasks/translation}/data.ts +1 -1
  53. package/src/{unconditional-image-generation → tasks/unconditional-image-generation}/data.ts +1 -1
  54. package/src/{video-classification → tasks/video-classification}/about.md +8 -28
  55. package/src/{video-classification → tasks/video-classification}/data.ts +1 -1
  56. package/src/{visual-question-answering → tasks/visual-question-answering}/data.ts +1 -1
  57. package/src/{zero-shot-classification → tasks/zero-shot-classification}/data.ts +1 -1
  58. package/src/{zero-shot-image-classification → tasks/zero-shot-image-classification}/data.ts +1 -1
  59. package/src/Types.ts +0 -64
  60. package/src/const.ts +0 -59
  61. /package/src/{audio-to-audio → tasks/audio-to-audio}/about.md +0 -0
  62. /package/src/{conversational → tasks/conversational}/about.md +0 -0
  63. /package/src/{depth-estimation → tasks/depth-estimation}/about.md +0 -0
  64. /package/src/{document-question-answering → tasks/document-question-answering}/about.md +0 -0
  65. /package/src/{feature-extraction → tasks/feature-extraction}/about.md +0 -0
  66. /package/src/{fill-mask → tasks/fill-mask}/about.md +0 -0
  67. /package/src/{image-classification → tasks/image-classification}/about.md +0 -0
  68. /package/src/{image-segmentation → tasks/image-segmentation}/about.md +0 -0
  69. /package/src/{image-to-text → tasks/image-to-text}/about.md +0 -0
  70. /package/src/{object-detection → tasks/object-detection}/about.md +0 -0
  71. /package/src/{placeholder → tasks/placeholder}/about.md +0 -0
  72. /package/src/{question-answering → tasks/question-answering}/about.md +0 -0
  73. /package/src/{reinforcement-learning → tasks/reinforcement-learning}/about.md +0 -0
  74. /package/src/{sentence-similarity → tasks/sentence-similarity}/about.md +0 -0
  75. /package/src/{summarization → tasks/summarization}/about.md +0 -0
  76. /package/src/{table-question-answering → tasks/table-question-answering}/about.md +0 -0
  77. /package/src/{tabular-classification → tasks/tabular-classification}/about.md +0 -0
  78. /package/src/{tabular-regression → tasks/tabular-regression}/about.md +0 -0
  79. /package/src/{text-classification → tasks/text-classification}/about.md +0 -0
  80. /package/src/{text-to-image → tasks/text-to-image}/about.md +0 -0
  81. /package/src/{text-to-video → tasks/text-to-video}/about.md +0 -0
  82. /package/src/{token-classification → tasks/token-classification}/about.md +0 -0
  83. /package/src/{translation → tasks/translation}/about.md +0 -0
  84. /package/src/{unconditional-image-generation → tasks/unconditional-image-generation}/about.md +0 -0
  85. /package/src/{visual-question-answering → tasks/visual-question-answering}/about.md +0 -0
  86. /package/src/{zero-shot-classification → tasks/zero-shot-classification}/about.md +0 -0
  87. /package/src/{zero-shot-image-classification → tasks/zero-shot-image-classification}/about.md +0 -0
@@ -0,0 +1,239 @@
1
+ import type { PipelineType } from "./pipelines";
2
+
3
+ type TableData = Record<string, (string | number)[]>;
4
+
5
+ //#region outputs
6
+ export type WidgetExampleOutputLabels = Array<{ label: string; score: number }>;
7
+ export interface WidgetExampleOutputAnswerScore {
8
+ answer: string;
9
+ score: number;
10
+ }
11
+ export interface WidgetExampleOutputText {
12
+ text: string;
13
+ }
14
+ export interface WidgetExampleOutputUrl {
15
+ url: string;
16
+ }
17
+
18
+ export type WidgetExampleOutput =
19
+ | WidgetExampleOutputLabels
20
+ | WidgetExampleOutputAnswerScore
21
+ | WidgetExampleOutputText
22
+ | WidgetExampleOutputUrl;
23
+ //#endregion
24
+
25
+ export interface WidgetExampleBase<TOutput> {
26
+ example_title?: string;
27
+ group?: string;
28
+ /**
29
+ * Potential overrides to API parameters for this specific example
30
+ * (takes precedences over the model card metadata's inference.parameters)
31
+ */
32
+ parameters?: {
33
+ /// token-classification
34
+ aggregation_strategy?: string;
35
+ /// text-generation
36
+ top_k?: number;
37
+ top_p?: number;
38
+ temperature?: number;
39
+ max_new_tokens?: number;
40
+ do_sample?: boolean;
41
+ /// text-to-image
42
+ negative_prompt?: string;
43
+ guidance_scale?: number;
44
+ num_inference_steps?: number;
45
+ };
46
+ /**
47
+ * Optional output
48
+ */
49
+ output?: TOutput;
50
+ }
51
+
52
+ export interface WidgetExampleTextInput<TOutput = WidgetExampleOutput> extends WidgetExampleBase<TOutput> {
53
+ text: string;
54
+ }
55
+
56
+ export interface WidgetExampleTextAndContextInput<TOutput = WidgetExampleOutput>
57
+ extends WidgetExampleTextInput<TOutput> {
58
+ context: string;
59
+ }
60
+
61
+ export interface WidgetExampleTextAndTableInput<TOutput = WidgetExampleOutput> extends WidgetExampleTextInput<TOutput> {
62
+ table: TableData;
63
+ }
64
+
65
+ export interface WidgetExampleAssetInput<TOutput = WidgetExampleOutput> extends WidgetExampleBase<TOutput> {
66
+ src: string;
67
+ }
68
+ export interface WidgetExampleAssetAndPromptInput<TOutput = WidgetExampleOutput>
69
+ extends WidgetExampleAssetInput<TOutput> {
70
+ prompt: string;
71
+ }
72
+
73
+ export type WidgetExampleAssetAndTextInput<TOutput = WidgetExampleOutput> = WidgetExampleAssetInput<TOutput> &
74
+ WidgetExampleTextInput<TOutput>;
75
+
76
+ export type WidgetExampleAssetAndZeroShotInput<TOutput = WidgetExampleOutput> = WidgetExampleAssetInput<TOutput> &
77
+ WidgetExampleZeroShotTextInput<TOutput>;
78
+
79
+ export interface WidgetExampleStructuredDataInput<TOutput = WidgetExampleOutput> extends WidgetExampleBase<TOutput> {
80
+ structured_data: TableData;
81
+ }
82
+
83
+ export interface WidgetExampleTableDataInput<TOutput = WidgetExampleOutput> extends WidgetExampleBase<TOutput> {
84
+ table: TableData;
85
+ }
86
+
87
+ export interface WidgetExampleZeroShotTextInput<TOutput = WidgetExampleOutput> extends WidgetExampleTextInput<TOutput> {
88
+ text: string;
89
+ candidate_labels: string;
90
+ multi_class: boolean;
91
+ }
92
+
93
+ export interface WidgetExampleSentenceSimilarityInput<TOutput = WidgetExampleOutput>
94
+ extends WidgetExampleBase<TOutput> {
95
+ source_sentence: string;
96
+ sentences: string[];
97
+ }
98
+
99
+ //#endregion
100
+
101
+ export type WidgetExample<TOutput = WidgetExampleOutput> =
102
+ | WidgetExampleTextInput<TOutput>
103
+ | WidgetExampleTextAndContextInput<TOutput>
104
+ | WidgetExampleTextAndTableInput<TOutput>
105
+ | WidgetExampleAssetInput<TOutput>
106
+ | WidgetExampleAssetAndPromptInput<TOutput>
107
+ | WidgetExampleAssetAndTextInput<TOutput>
108
+ | WidgetExampleAssetAndZeroShotInput<TOutput>
109
+ | WidgetExampleStructuredDataInput<TOutput>
110
+ | WidgetExampleTableDataInput<TOutput>
111
+ | WidgetExampleZeroShotTextInput<TOutput>
112
+ | WidgetExampleSentenceSimilarityInput<TOutput>;
113
+
114
+ type KeysOfUnion<T> = T extends unknown ? keyof T : never;
115
+
116
+ export type WidgetExampleAttribute = KeysOfUnion<WidgetExample>;
117
+
118
+ export enum InferenceDisplayability {
119
+ /**
120
+ * Yes
121
+ */
122
+ Yes = "Yes",
123
+ /**
124
+ * And then, all the possible reasons why it's no:
125
+ */
126
+ ExplicitOptOut = "ExplicitOptOut",
127
+ CustomCode = "CustomCode",
128
+ LibraryNotDetected = "LibraryNotDetected",
129
+ PipelineNotDetected = "PipelineNotDetected",
130
+ PipelineLibraryPairNotSupported = "PipelineLibraryPairNotSupported",
131
+ }
132
+
133
+ /**
134
+ * Public interface for model metadata
135
+ */
136
+ export interface ModelData {
137
+ /**
138
+ * id of model (e.g. 'user/repo_name')
139
+ */
140
+ id: string;
141
+ /**
142
+ * Kept for backward compatibility
143
+ */
144
+ modelId?: string;
145
+ /**
146
+ * Whether or not to enable inference widget for this model
147
+ */
148
+ inference: InferenceDisplayability;
149
+ /**
150
+ * is this model private?
151
+ */
152
+ private?: boolean;
153
+ /**
154
+ * this dictionary has useful information about the model configuration
155
+ */
156
+ config?: Record<string, unknown> & {
157
+ adapter_transformers?: { model_class?: string; model_name?: string };
158
+ architectures?: string[];
159
+ sklearn?: {
160
+ filename?: string;
161
+ model_format?: string;
162
+ };
163
+ speechbrain?: {
164
+ interface?: string;
165
+ };
166
+ peft?: {
167
+ base_model_name?: string;
168
+ task_type?: string;
169
+ };
170
+ };
171
+ /**
172
+ * all the model tags
173
+ */
174
+ tags?: string[];
175
+ /**
176
+ * transformers-specific info to display in the code sample.
177
+ */
178
+ transformersInfo?: TransformersInfo;
179
+ /**
180
+ * Pipeline type
181
+ */
182
+ pipeline_tag?: PipelineType | undefined;
183
+ /**
184
+ * for relevant models, get mask token
185
+ */
186
+ mask_token?: string | undefined;
187
+ /**
188
+ * Example data that will be fed into the widget.
189
+ *
190
+ * can be set in the model card metadata (under `widget`),
191
+ * or by default in `DefaultWidget.ts`
192
+ */
193
+ widgetData?: WidgetExample[] | undefined;
194
+ /**
195
+ * Parameters that will be used by the widget when calling Inference API
196
+ * https://huggingface.co/docs/api-inference/detailed_parameters
197
+ *
198
+ * can be set in the model card metadata (under `inference/parameters`)
199
+ * Example:
200
+ * inference:
201
+ * parameters:
202
+ * key: val
203
+ */
204
+ cardData?: {
205
+ inference?:
206
+ | boolean
207
+ | {
208
+ parameters?: Record<string, unknown>;
209
+ };
210
+ base_model?: string;
211
+ };
212
+ /**
213
+ * Library name
214
+ * Example: transformers, SpeechBrain, Stanza, etc.
215
+ */
216
+ library_name?: string;
217
+ }
218
+
219
+ /**
220
+ * transformers-specific info to display in the code sample.
221
+ */
222
+ export interface TransformersInfo {
223
+ /**
224
+ * e.g. AutoModelForSequenceClassification
225
+ */
226
+ auto_model: string;
227
+ /**
228
+ * if set in config.json's auto_map
229
+ */
230
+ custom_class?: string;
231
+ /**
232
+ * e.g. text-classification
233
+ */
234
+ pipeline_tag?: PipelineType;
235
+ /**
236
+ * e.g. "AutoTokenizer" | "AutoFeatureExtractor" | "AutoProcessor"
237
+ */
238
+ processor?: string;
239
+ }
@@ -41,3 +41,7 @@ export enum ModelLibrary {
41
41
  }
42
42
 
43
43
  export type ModelLibraryKey = keyof typeof ModelLibrary;
44
+
45
+ export const ALL_DISPLAY_MODEL_LIBRARY_KEYS = Object.keys(ModelLibrary).filter(
46
+ (k) => !["doctr", "k2", "mindspore", "tensorflowtts"].includes(k)
47
+ );
package/src/pipelines.ts CHANGED
@@ -438,6 +438,11 @@ export const PIPELINE_DATA = {
438
438
  modality: "cv",
439
439
  color: "indigo",
440
440
  },
441
+ "image-to-video": {
442
+ name: "Image-to-Video",
443
+ modality: "multimodal",
444
+ color: "indigo",
445
+ },
441
446
  "unconditional-image-generation": {
442
447
  name: "Unconditional Image Generation",
443
448
  modality: "cv",
@@ -606,6 +611,16 @@ export const PIPELINE_DATA = {
606
611
  modality: "multimodal",
607
612
  color: "green",
608
613
  },
614
+ "mask-generation": {
615
+ name: "Mask Generation",
616
+ modality: "cv",
617
+ color: "indigo",
618
+ },
619
+ "zero-shot-object-detection": {
620
+ name: "Zero-Shot Object Detection",
621
+ modality: "cv",
622
+ color: "yellow",
623
+ },
609
624
  other: {
610
625
  name: "Other",
611
626
  modality: "other",
@@ -616,4 +631,11 @@ export const PIPELINE_DATA = {
616
631
  } satisfies Record<string, PipelineData>;
617
632
 
618
633
  export type PipelineType = keyof typeof PIPELINE_DATA;
634
+
619
635
  export const PIPELINE_TYPES = Object.keys(PIPELINE_DATA) as PipelineType[];
636
+
637
+ export const SUBTASK_TYPES = Object.values(PIPELINE_DATA)
638
+ .flatMap((data) => ("subtasks" in data ? data.subtasks : []))
639
+ .map((s) => s.type);
640
+
641
+ export const PIPELINE_TYPES_SET = new Set(PIPELINE_TYPES);
@@ -0,0 +1,63 @@
1
+ import type { ModelData } from "../model-data.js";
2
+ import type { PipelineType } from "../pipelines.js";
3
+ import { getModelInputSnippet } from "./inputs.js";
4
+
5
+ export const snippetBasic = (model: ModelData, accessToken: string): string =>
6
+ `curl https://api-inference.huggingface.co/models/${model.id} \\
7
+ -X POST \\
8
+ -d '{"inputs": ${getModelInputSnippet(model, true)}}' \\
9
+ -H 'Content-Type: application/json' \\
10
+ -H "Authorization: Bearer ${accessToken || `{API_TOKEN}`}"
11
+ `;
12
+
13
+ export const snippetZeroShotClassification = (model: ModelData, accessToken: string): string =>
14
+ `curl https://api-inference.huggingface.co/models/${model.id} \\
15
+ -X POST \\
16
+ -d '{"inputs": ${getModelInputSnippet(model, true)}, "parameters": {"candidate_labels": ["refund", "legal", "faq"]}}' \\
17
+ -H 'Content-Type: application/json' \\
18
+ -H "Authorization: Bearer ${accessToken || `{API_TOKEN}`}"
19
+ `;
20
+
21
+ export const snippetFile = (model: ModelData, accessToken: string): string =>
22
+ `curl https://api-inference.huggingface.co/models/${model.id} \\
23
+ -X POST \\
24
+ --data-binary '@${getModelInputSnippet(model, true, true)}' \\
25
+ -H "Authorization: Bearer ${accessToken || `{API_TOKEN}`}"
26
+ `;
27
+
28
+ export const curlSnippets: Partial<Record<PipelineType, (model: ModelData, accessToken: string) => string>> = {
29
+ // Same order as in js/src/lib/interfaces/Types.ts
30
+ "text-classification": snippetBasic,
31
+ "token-classification": snippetBasic,
32
+ "table-question-answering": snippetBasic,
33
+ "question-answering": snippetBasic,
34
+ "zero-shot-classification": snippetZeroShotClassification,
35
+ translation: snippetBasic,
36
+ summarization: snippetBasic,
37
+ conversational: snippetBasic,
38
+ "feature-extraction": snippetBasic,
39
+ "text-generation": snippetBasic,
40
+ "text2text-generation": snippetBasic,
41
+ "fill-mask": snippetBasic,
42
+ "sentence-similarity": snippetBasic,
43
+ "automatic-speech-recognition": snippetFile,
44
+ "text-to-image": snippetBasic,
45
+ "text-to-speech": snippetBasic,
46
+ "text-to-audio": snippetBasic,
47
+ "audio-to-audio": snippetFile,
48
+ "audio-classification": snippetFile,
49
+ "image-classification": snippetFile,
50
+ "image-to-text": snippetFile,
51
+ "object-detection": snippetFile,
52
+ "image-segmentation": snippetFile,
53
+ };
54
+
55
+ export function getCurlInferenceSnippet(model: ModelData, accessToken: string): string {
56
+ return model.pipeline_tag && model.pipeline_tag in curlSnippets
57
+ ? curlSnippets[model.pipeline_tag]?.(model, accessToken) ?? ""
58
+ : "";
59
+ }
60
+
61
+ export function hasCurlInferenceSnippet(model: ModelData): boolean {
62
+ return !!model.pipeline_tag && model.pipeline_tag in curlSnippets;
63
+ }
@@ -0,0 +1,6 @@
1
+ import * as inputs from "./inputs";
2
+ import * as curl from "./curl";
3
+ import * as python from "./python";
4
+ import * as js from "./js";
5
+
6
+ export { inputs, curl, python, js };
@@ -0,0 +1,129 @@
1
+ import type { ModelData } from "../model-data";
2
+ import type { PipelineType } from "../pipelines";
3
+
4
+ const inputsZeroShotClassification = () =>
5
+ `"Hi, I recently bought a device from your company but it is not working as advertised and I would like to get reimbursed!"`;
6
+
7
+ const inputsTranslation = () => `"Меня зовут Вольфганг и я живу в Берлине"`;
8
+
9
+ const inputsSummarization = () =>
10
+ `"The tower is 324 metres (1,063 ft) tall, about the same height as an 81-storey building, and the tallest structure in Paris. Its base is square, measuring 125 metres (410 ft) on each side. During its construction, the Eiffel Tower surpassed the Washington Monument to become the tallest man-made structure in the world, a title it held for 41 years until the Chrysler Building in New York City was finished in 1930. It was the first structure to reach a height of 300 metres. Due to the addition of a broadcasting aerial at the top of the tower in 1957, it is now taller than the Chrysler Building by 5.2 metres (17 ft). Excluding transmitters, the Eiffel Tower is the second tallest free-standing structure in France after the Millau Viaduct."`;
11
+
12
+ const inputsConversational = () =>
13
+ `{
14
+ "past_user_inputs": ["Which movie is the best ?"],
15
+ "generated_responses": ["It is Die Hard for sure."],
16
+ "text": "Can you explain why ?"
17
+ }`;
18
+
19
+ const inputsTableQuestionAnswering = () =>
20
+ `{
21
+ "query": "How many stars does the transformers repository have?",
22
+ "table": {
23
+ "Repository": ["Transformers", "Datasets", "Tokenizers"],
24
+ "Stars": ["36542", "4512", "3934"],
25
+ "Contributors": ["651", "77", "34"],
26
+ "Programming language": [
27
+ "Python",
28
+ "Python",
29
+ "Rust, Python and NodeJS"
30
+ ]
31
+ }
32
+ }`;
33
+
34
+ const inputsQuestionAnswering = () =>
35
+ `{
36
+ "question": "What is my name?",
37
+ "context": "My name is Clara and I live in Berkeley."
38
+ }`;
39
+
40
+ const inputsTextClassification = () => `"I like you. I love you"`;
41
+
42
+ const inputsTokenClassification = () => `"My name is Sarah Jessica Parker but you can call me Jessica"`;
43
+
44
+ const inputsTextGeneration = () => `"Can you please let us know more details about your "`;
45
+
46
+ const inputsText2TextGeneration = () => `"The answer to the universe is"`;
47
+
48
+ const inputsFillMask = (model: ModelData) => `"The answer to the universe is ${model.mask_token}."`;
49
+
50
+ const inputsSentenceSimilarity = () =>
51
+ `{
52
+ "source_sentence": "That is a happy person",
53
+ "sentences": [
54
+ "That is a happy dog",
55
+ "That is a very happy person",
56
+ "Today is a sunny day"
57
+ ]
58
+ }`;
59
+
60
+ const inputsFeatureExtraction = () => `"Today is a sunny day and I will get some ice cream."`;
61
+
62
+ const inputsImageClassification = () => `"cats.jpg"`;
63
+
64
+ const inputsImageToText = () => `"cats.jpg"`;
65
+
66
+ const inputsImageSegmentation = () => `"cats.jpg"`;
67
+
68
+ const inputsObjectDetection = () => `"cats.jpg"`;
69
+
70
+ const inputsAudioToAudio = () => `"sample1.flac"`;
71
+
72
+ const inputsAudioClassification = () => `"sample1.flac"`;
73
+
74
+ const inputsTextToImage = () => `"Astronaut riding a horse"`;
75
+
76
+ const inputsTextToSpeech = () => `"The answer to the universe is 42"`;
77
+
78
+ const inputsTextToAudio = () => `"liquid drum and bass, atmospheric synths, airy sounds"`;
79
+
80
+ const inputsAutomaticSpeechRecognition = () => `"sample1.flac"`;
81
+
82
+ const modelInputSnippets: {
83
+ [key in PipelineType]?: (model: ModelData) => string;
84
+ } = {
85
+ "audio-to-audio": inputsAudioToAudio,
86
+ "audio-classification": inputsAudioClassification,
87
+ "automatic-speech-recognition": inputsAutomaticSpeechRecognition,
88
+ conversational: inputsConversational,
89
+ "feature-extraction": inputsFeatureExtraction,
90
+ "fill-mask": inputsFillMask,
91
+ "image-classification": inputsImageClassification,
92
+ "image-to-text": inputsImageToText,
93
+ "image-segmentation": inputsImageSegmentation,
94
+ "object-detection": inputsObjectDetection,
95
+ "question-answering": inputsQuestionAnswering,
96
+ "sentence-similarity": inputsSentenceSimilarity,
97
+ summarization: inputsSummarization,
98
+ "table-question-answering": inputsTableQuestionAnswering,
99
+ "text-classification": inputsTextClassification,
100
+ "text-generation": inputsTextGeneration,
101
+ "text-to-image": inputsTextToImage,
102
+ "text-to-speech": inputsTextToSpeech,
103
+ "text-to-audio": inputsTextToAudio,
104
+ "text2text-generation": inputsText2TextGeneration,
105
+ "token-classification": inputsTokenClassification,
106
+ translation: inputsTranslation,
107
+ "zero-shot-classification": inputsZeroShotClassification,
108
+ };
109
+
110
+ // Use noWrap to put the whole snippet on a single line (removing new lines and tabulations)
111
+ // Use noQuotes to strip quotes from start & end (example: "abc" -> abc)
112
+ export function getModelInputSnippet(model: ModelData, noWrap = false, noQuotes = false): string {
113
+ if (model.pipeline_tag) {
114
+ const inputs = modelInputSnippets[model.pipeline_tag];
115
+ if (inputs) {
116
+ let result = inputs(model);
117
+ if (noWrap) {
118
+ result = result.replace(/(?:(?:\r?\n|\r)\t*)|\t+/g, " ");
119
+ }
120
+ if (noQuotes) {
121
+ const REGEX_QUOTES = /^"(.+)"$/s;
122
+ const match = result.match(REGEX_QUOTES);
123
+ result = match ? match[1] : result;
124
+ }
125
+ return result;
126
+ }
127
+ }
128
+ return "No input example has been defined for this model task.";
129
+ }
@@ -0,0 +1,150 @@
1
+ import type { ModelData } from "../model-data.js";
2
+ import type { PipelineType } from "../pipelines.js";
3
+ import { getModelInputSnippet } from "./inputs.js";
4
+
5
+ export const snippetBasic = (model: ModelData, accessToken: string): string =>
6
+ `async function query(data) {
7
+ const response = await fetch(
8
+ "https://api-inference.huggingface.co/models/${model.id}",
9
+ {
10
+ headers: { Authorization: "Bearer ${accessToken || `{API_TOKEN}`}" },
11
+ method: "POST",
12
+ body: JSON.stringify(data),
13
+ }
14
+ );
15
+ const result = await response.json();
16
+ return result;
17
+ }
18
+
19
+ query({"inputs": ${getModelInputSnippet(model)}}).then((response) => {
20
+ console.log(JSON.stringify(response));
21
+ });`;
22
+
23
+ export const snippetZeroShotClassification = (model: ModelData, accessToken: string): string =>
24
+ `async function query(data) {
25
+ const response = await fetch(
26
+ "https://api-inference.huggingface.co/models/${model.id}",
27
+ {
28
+ headers: { Authorization: "Bearer ${accessToken || `{API_TOKEN}`}" },
29
+ method: "POST",
30
+ body: JSON.stringify(data),
31
+ }
32
+ );
33
+ const result = await response.json();
34
+ return result;
35
+ }
36
+
37
+ query({"inputs": ${getModelInputSnippet(
38
+ model
39
+ )}, "parameters": {"candidate_labels": ["refund", "legal", "faq"]}}).then((response) => {
40
+ console.log(JSON.stringify(response));
41
+ });`;
42
+
43
+ export const snippetTextToImage = (model: ModelData, accessToken: string): string =>
44
+ `async function query(data) {
45
+ const response = await fetch(
46
+ "https://api-inference.huggingface.co/models/${model.id}",
47
+ {
48
+ headers: { Authorization: "Bearer ${accessToken || `{API_TOKEN}`}" },
49
+ method: "POST",
50
+ body: JSON.stringify(data),
51
+ }
52
+ );
53
+ const result = await response.blob();
54
+ return result;
55
+ }
56
+ query({"inputs": ${getModelInputSnippet(model)}}).then((response) => {
57
+ // Use image
58
+ });`;
59
+
60
+ export const snippetTextToAudio = (model: ModelData, accessToken: string): string => {
61
+ const commonSnippet = `async function query(data) {
62
+ const response = await fetch(
63
+ "https://api-inference.huggingface.co/models/${model.id}",
64
+ {
65
+ headers: { Authorization: "Bearer ${accessToken || `{API_TOKEN}`}" },
66
+ method: "POST",
67
+ body: JSON.stringify(data),
68
+ }
69
+ );`;
70
+ if (model.library_name === "transformers") {
71
+ return (
72
+ commonSnippet +
73
+ `
74
+ const result = await response.blob();
75
+ return result;
76
+ }
77
+ query({"inputs": ${getModelInputSnippet(model)}}).then((response) => {
78
+ // Returns a byte object of the Audio wavform. Use it directly!
79
+ });`
80
+ );
81
+ } else {
82
+ return (
83
+ commonSnippet +
84
+ `
85
+ const result = await response.json();
86
+ return result;
87
+ }
88
+
89
+ query({"inputs": ${getModelInputSnippet(model)}}).then((response) => {
90
+ console.log(JSON.stringify(response));
91
+ });`
92
+ );
93
+ }
94
+ };
95
+
96
+ export const snippetFile = (model: ModelData, accessToken: string): string =>
97
+ `async function query(filename) {
98
+ const data = fs.readFileSync(filename);
99
+ const response = await fetch(
100
+ "https://api-inference.huggingface.co/models/${model.id}",
101
+ {
102
+ headers: { Authorization: "Bearer ${accessToken || `{API_TOKEN}`}" },
103
+ method: "POST",
104
+ body: data,
105
+ }
106
+ );
107
+ const result = await response.json();
108
+ return result;
109
+ }
110
+
111
+ query(${getModelInputSnippet(model)}).then((response) => {
112
+ console.log(JSON.stringify(response));
113
+ });`;
114
+
115
+ export const jsSnippets: Partial<Record<PipelineType, (model: ModelData, accessToken: string) => string>> = {
116
+ // Same order as in js/src/lib/interfaces/Types.ts
117
+ "text-classification": snippetBasic,
118
+ "token-classification": snippetBasic,
119
+ "table-question-answering": snippetBasic,
120
+ "question-answering": snippetBasic,
121
+ "zero-shot-classification": snippetZeroShotClassification,
122
+ translation: snippetBasic,
123
+ summarization: snippetBasic,
124
+ conversational: snippetBasic,
125
+ "feature-extraction": snippetBasic,
126
+ "text-generation": snippetBasic,
127
+ "text2text-generation": snippetBasic,
128
+ "fill-mask": snippetBasic,
129
+ "sentence-similarity": snippetBasic,
130
+ "automatic-speech-recognition": snippetFile,
131
+ "text-to-image": snippetTextToImage,
132
+ "text-to-speech": snippetTextToAudio,
133
+ "text-to-audio": snippetTextToAudio,
134
+ "audio-to-audio": snippetFile,
135
+ "audio-classification": snippetFile,
136
+ "image-classification": snippetFile,
137
+ "image-to-text": snippetFile,
138
+ "object-detection": snippetFile,
139
+ "image-segmentation": snippetFile,
140
+ };
141
+
142
+ export function getJsInferenceSnippet(model: ModelData, accessToken: string): string {
143
+ return model.pipeline_tag && model.pipeline_tag in jsSnippets
144
+ ? jsSnippets[model.pipeline_tag]?.(model, accessToken) ?? ""
145
+ : "";
146
+ }
147
+
148
+ export function hasJsInferenceSnippet(model: ModelData): boolean {
149
+ return !!model.pipeline_tag && model.pipeline_tag in jsSnippets;
150
+ }