@huggingface/tasks 0.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (103) hide show
  1. package/assets/audio-classification/audio.wav +0 -0
  2. package/assets/audio-to-audio/input.wav +0 -0
  3. package/assets/audio-to-audio/label-0.wav +0 -0
  4. package/assets/audio-to-audio/label-1.wav +0 -0
  5. package/assets/automatic-speech-recognition/input.flac +0 -0
  6. package/assets/automatic-speech-recognition/wav2vec2.png +0 -0
  7. package/assets/contribution-guide/anatomy.png +0 -0
  8. package/assets/contribution-guide/libraries.png +0 -0
  9. package/assets/depth-estimation/depth-estimation-input.jpg +0 -0
  10. package/assets/depth-estimation/depth-estimation-output.png +0 -0
  11. package/assets/document-question-answering/document-question-answering-input.png +0 -0
  12. package/assets/image-classification/image-classification-input.jpeg +0 -0
  13. package/assets/image-segmentation/image-segmentation-input.jpeg +0 -0
  14. package/assets/image-segmentation/image-segmentation-output.png +0 -0
  15. package/assets/image-to-image/image-to-image-input.jpeg +0 -0
  16. package/assets/image-to-image/image-to-image-output.png +0 -0
  17. package/assets/image-to-image/pix2pix_examples.jpg +0 -0
  18. package/assets/image-to-text/savanna.jpg +0 -0
  19. package/assets/object-detection/object-detection-input.jpg +0 -0
  20. package/assets/object-detection/object-detection-output.jpg +0 -0
  21. package/assets/table-question-answering/tableQA.jpg +0 -0
  22. package/assets/text-to-image/image.jpeg +0 -0
  23. package/assets/text-to-speech/audio.wav +0 -0
  24. package/assets/text-to-video/text-to-video-output.gif +0 -0
  25. package/assets/unconditional-image-generation/unconditional-image-generation-output.jpeg +0 -0
  26. package/assets/video-classification/video-classification-input.gif +0 -0
  27. package/assets/visual-question-answering/elephant.jpeg +0 -0
  28. package/assets/zero-shot-image-classification/image-classification-input.jpeg +0 -0
  29. package/dist/index.cjs +3105 -0
  30. package/dist/index.d.cts +145 -0
  31. package/dist/index.d.ts +145 -0
  32. package/dist/index.js +3079 -0
  33. package/package.json +35 -0
  34. package/src/Types.ts +58 -0
  35. package/src/audio-classification/about.md +85 -0
  36. package/src/audio-classification/data.ts +77 -0
  37. package/src/audio-to-audio/about.md +55 -0
  38. package/src/audio-to-audio/data.ts +63 -0
  39. package/src/automatic-speech-recognition/about.md +86 -0
  40. package/src/automatic-speech-recognition/data.ts +77 -0
  41. package/src/const.ts +51 -0
  42. package/src/conversational/about.md +50 -0
  43. package/src/conversational/data.ts +62 -0
  44. package/src/depth-estimation/about.md +38 -0
  45. package/src/depth-estimation/data.ts +52 -0
  46. package/src/document-question-answering/about.md +54 -0
  47. package/src/document-question-answering/data.ts +67 -0
  48. package/src/feature-extraction/about.md +35 -0
  49. package/src/feature-extraction/data.ts +57 -0
  50. package/src/fill-mask/about.md +51 -0
  51. package/src/fill-mask/data.ts +77 -0
  52. package/src/image-classification/about.md +48 -0
  53. package/src/image-classification/data.ts +88 -0
  54. package/src/image-segmentation/about.md +63 -0
  55. package/src/image-segmentation/data.ts +96 -0
  56. package/src/image-to-image/about.md +81 -0
  57. package/src/image-to-image/data.ts +97 -0
  58. package/src/image-to-text/about.md +58 -0
  59. package/src/image-to-text/data.ts +87 -0
  60. package/src/index.ts +2 -0
  61. package/src/object-detection/about.md +36 -0
  62. package/src/object-detection/data.ts +73 -0
  63. package/src/placeholder/about.md +15 -0
  64. package/src/placeholder/data.ts +18 -0
  65. package/src/question-answering/about.md +56 -0
  66. package/src/question-answering/data.ts +69 -0
  67. package/src/reinforcement-learning/about.md +176 -0
  68. package/src/reinforcement-learning/data.ts +78 -0
  69. package/src/sentence-similarity/about.md +97 -0
  70. package/src/sentence-similarity/data.ts +100 -0
  71. package/src/summarization/about.md +57 -0
  72. package/src/summarization/data.ts +72 -0
  73. package/src/table-question-answering/about.md +43 -0
  74. package/src/table-question-answering/data.ts +63 -0
  75. package/src/tabular-classification/about.md +67 -0
  76. package/src/tabular-classification/data.ts +69 -0
  77. package/src/tabular-regression/about.md +91 -0
  78. package/src/tabular-regression/data.ts +58 -0
  79. package/src/tasksData.ts +104 -0
  80. package/src/text-classification/about.md +171 -0
  81. package/src/text-classification/data.ts +90 -0
  82. package/src/text-generation/about.md +128 -0
  83. package/src/text-generation/data.ts +124 -0
  84. package/src/text-to-image/about.md +65 -0
  85. package/src/text-to-image/data.ts +88 -0
  86. package/src/text-to-speech/about.md +63 -0
  87. package/src/text-to-speech/data.ts +70 -0
  88. package/src/text-to-video/about.md +36 -0
  89. package/src/text-to-video/data.ts +97 -0
  90. package/src/token-classification/about.md +78 -0
  91. package/src/token-classification/data.ts +83 -0
  92. package/src/translation/about.md +65 -0
  93. package/src/translation/data.ts +68 -0
  94. package/src/unconditional-image-generation/about.md +45 -0
  95. package/src/unconditional-image-generation/data.ts +66 -0
  96. package/src/video-classification/about.md +53 -0
  97. package/src/video-classification/data.ts +84 -0
  98. package/src/visual-question-answering/about.md +43 -0
  99. package/src/visual-question-answering/data.ts +90 -0
  100. package/src/zero-shot-classification/about.md +39 -0
  101. package/src/zero-shot-classification/data.ts +66 -0
  102. package/src/zero-shot-image-classification/about.md +68 -0
  103. package/src/zero-shot-image-classification/data.ts +79 -0
@@ -0,0 +1,72 @@
1
+ import type { TaskDataCustom } from "../Types";
2
+
3
+ const taskData: TaskDataCustom = {
4
+ datasets: [
5
+ {
6
+ description: "News articles in five different languages along with their summaries. Widely used for benchmarking multilingual summarization models.",
7
+ id: "mlsum",
8
+ },
9
+ {
10
+ description: "English conversations and their summaries. Useful for benchmarking conversational agents.",
11
+ id: "samsum",
12
+ },
13
+ ],
14
+ demo: {
15
+ inputs: [
16
+ {
17
+ label: "Input",
18
+ content:
19
+ "The tower is 324 metres (1,063 ft) tall, about the same height as an 81-storey building, and the tallest structure in Paris. Its base is square, measuring 125 metres (410 ft) on each side. It was the first structure to reach a height of 300 metres. Excluding transmitters, the Eiffel Tower is the second tallest free-standing structure in France after the Millau Viaduct.",
20
+ type: "text",
21
+ },
22
+
23
+ ],
24
+ outputs: [
25
+ {
26
+ label: "Output",
27
+ content:
28
+ "The tower is 324 metres (1,063 ft) tall, about the same height as an 81-storey building. It was the first structure to reach a height of 300 metres.",
29
+ type: "text",
30
+ },
31
+ ],
32
+ },
33
+ metrics: [
34
+ {
35
+ description: "The generated sequence is compared against its summary, and the overlap of tokens are counted. ROUGE-N refers to overlap of N subsequent tokens, ROUGE-1 refers to overlap of single tokens and ROUGE-2 is the overlap of two subsequent tokens.",
36
+ id: "rouge",
37
+ },
38
+ ],
39
+ models: [
40
+ {
41
+ description: "A strong summarization model trained on English news articles. Excels at generating factual summaries.",
42
+ id: "facebook/bart-large-cnn",
43
+ },
44
+ {
45
+ description: "A summarization model trained on medical articles.",
46
+ id: "google/bigbird-pegasus-large-pubmed",
47
+ },
48
+ ],
49
+ spaces: [
50
+ {
51
+ description: "An application that can summarize long paragraphs.",
52
+ id: "pszemraj/summarize-long-text",
53
+ },
54
+ {
55
+ description: "A much needed summarization application for terms and conditions.",
56
+ id: "ml6team/distilbart-tos-summarizer-tosdr",
57
+ },
58
+ {
59
+ description: "An application that summarizes long documents.",
60
+ id: "pszemraj/document-summarization",
61
+ },
62
+ {
63
+ description: "An application that can detect errors in abstractive summarization.",
64
+ id: "ml6team/post-processing-summarization",
65
+ },
66
+ ],
67
+ summary: "Summarization is the task of producing a shorter version of a document while preserving its important information. Some models can extract text from the original input, while other models can generate entirely new text.",
68
+ widgetModels: ["sshleifer/distilbart-cnn-12-6"],
69
+ youtubeId: "yHnr5Dk2zCI",
70
+ };
71
+
72
+ export default taskData;
@@ -0,0 +1,43 @@
1
+ ## Use Cases
2
+
3
+ ### SQL execution
4
+
5
+ You can use the Table Question Answering models to simulate SQL execution by inputting a table.
6
+
7
+ ### Table Question Answering
8
+
9
+ Table Question Answering models are capable of answering questions based on a table.
10
+
11
+ ## Task Variants
12
+
13
+ This place can be filled with variants of this task if there's any.
14
+
15
+ ## Inference
16
+
17
+ You can infer with TableQA models using the 🤗 Transformers library.
18
+
19
+ ```python
20
+ from transformers import pipeline
21
+ import pandas as pd
22
+
23
+ # prepare table + question
24
+ data = {"Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"], "Number of movies": ["87", "53", "69"]}
25
+ table = pd.DataFrame.from_dict(data)
26
+ question = "how many movies does Leonardo Di Caprio have?"
27
+
28
+ # pipeline model
29
+ # Note: you must to install torch-scatter first.
30
+ tqa = pipeline(task="table-question-answering", model="google/tapas-large-finetuned-wtq")
31
+
32
+ # result
33
+
34
+ print(tqa(table=table, query=query)['cells'][0])
35
+ #53
36
+
37
+ ```
38
+
39
+ ## Useful Resources
40
+
41
+ In this area, you can insert useful resources about how to train or use a model for this task.
42
+
43
+ This task page is complete thanks to the efforts of [Hao Kim Tieu](https://huggingface.co/haotieu). 🦸
@@ -0,0 +1,63 @@
1
+ import type { TaskDataCustom } from "../Types";
2
+
3
+ const taskData: TaskDataCustom = {
4
+ datasets: [
5
+ {
6
+ description: "The WikiTableQuestions dataset is a large-scale dataset for the task of question answering on semi-structured tables.",
7
+ id: "wikitablequestions",
8
+ },
9
+ {
10
+ description: "WikiSQL is a dataset of 80654 hand-annotated examples of questions and SQL queries distributed across 24241 tables from Wikipedia.",
11
+ id: "wikisql",
12
+ },
13
+ ],
14
+ demo: {
15
+ inputs: [
16
+ { table: [
17
+ ["Rank", "Name", "No.of reigns", "Combined days"],
18
+ ["1", "lou Thesz", "3", "3749"],
19
+ ["2", "Ric Flair", "8", "3103"],
20
+ ["3", "Harley Race", "7", "1799"],
21
+ ],
22
+ type: "tabular" },
23
+
24
+ { label: "Question",
25
+ content: "What is the number of reigns for Harley Race?",
26
+ type: "text" },
27
+ ],
28
+ outputs: [
29
+ { label: "Result",
30
+ content: "7",
31
+ type: "text" },
32
+ ],
33
+ },
34
+ metrics:
35
+ [
36
+ { description: "Checks whether the predicted answer(s) is the same as the ground-truth answer(s).",
37
+ id: "Denotation Accuracy" },
38
+ ],
39
+ models:
40
+ [
41
+ {
42
+ description: "A table question answering model that is capable of neural SQL execution, i.e., employ TAPEX to execute a SQL query on a given table.",
43
+ id: "microsoft/tapex-base",
44
+ },
45
+ {
46
+ description: "A robust table question answering model.",
47
+ id: "google/tapas-base-finetuned-wtq",
48
+
49
+ },
50
+ ],
51
+ spaces: [
52
+ {
53
+ description: "An application that answers questions based on table CSV files.",
54
+ id: "katanaml/table-query",
55
+
56
+ },
57
+ ],
58
+ summary: "Table Question Answering (Table QA) is the answering a question about an information on a given table.",
59
+ widgetModels: ["google/tapas-base-finetuned-wtq"],
60
+ };
61
+
62
+
63
+ export default taskData;
@@ -0,0 +1,67 @@
1
+ ## About the Task
2
+
3
+ Tabular classification is the task of assigning a label or class given a limited number of attributes. For example, the input can be data related to a customer (balance of the customer, the time being a customer, or more) and the output can be whether the customer will churn from the service or not.
4
+ There are three types of categorical variables:
5
+
6
+ - Binary variables: Variables that can take two values, like yes or no, open or closed. The task of predicting binary variables is called binary classification.
7
+ - Ordinal variables: Variables with a ranking relationship, e.g., good, insignificant, and bad product reviews. The task of predicting ordinal variables is called ordinal classification.
8
+ - Nominal variables: Variables with no ranking relationship among them, e.g., predicting an animal from their weight and height, where categories are cat, dog, or bird. The task of predicting nominal variables is called multinomial classification.
9
+
10
+ ## Use Cases
11
+
12
+ ### Fraud Detection
13
+ Tabular classification models can be used in detecting fraudulent credit card transactions, where the features could be the amount of the transaction and the account balance, and the target to predict could be whether the transaction is fraudulent or not. This is an example of binary classification.
14
+
15
+ ### Churn Prediction
16
+ Tabular classification models can be used in predicting customer churn in telecommunication. An example dataset for the task is hosted [here](https://huggingface.co/datasets/scikit-learn/churn-prediction).
17
+
18
+
19
+ # Model Hosting and Inference
20
+
21
+ You can use [skops](https://skops.readthedocs.io/) for model hosting and inference on the Hugging Face Hub. This library is built to improve production workflows of various libraries that are used to train tabular models, including [sklearn](https://scikit-learn.org/stable/) and [xgboost](https://xgboost.readthedocs.io/en/stable/). Using `skops` you can:
22
+ - Easily use inference API,
23
+ - Build neat UIs with one line of code,
24
+ - Programmatically create model cards,
25
+ - Securely serialize your scikit-learn model. (See limitations of using pickle [here](https://huggingface.co/docs/hub/security-pickle).)
26
+
27
+ You can push your model as follows:
28
+
29
+ ```python
30
+ from skops import hub_utils
31
+ # initialize a repository with a trained model
32
+ local_repo = "/path_to_new_repo"
33
+ hub_utils.init(model, dst=local_repo)
34
+ # push to Hub!
35
+ hub_utils.push("username/my-awesome-model", source=local_repo)
36
+ ```
37
+
38
+ Once the model is pushed, you can infer easily.
39
+
40
+ ```python
41
+ import skops.hub_utils as hub_utils
42
+ import pandas as pd
43
+ data = pd.DataFrame(your_data)
44
+ # Load the model from the Hub
45
+ res = hub_utils.get_model_output("username/my-awesome-model", data)
46
+ ```
47
+
48
+ You can launch a UI for your model with only one line of code!
49
+
50
+ ```python
51
+ import gradio as gr
52
+ gr.Interface.load("huggingface/username/my-awesome-model").launch()
53
+ ```
54
+
55
+
56
+ ## Useful Resources
57
+
58
+ - Check out the [scikit-learn organization](https://huggingface.co/scikit-learn) to learn more about different algorithms used for this task.
59
+ - [Skops documentation](https://skops.readthedocs.io/en/latest/)
60
+ - [Skops announcement blog](https://huggingface.co/blog/skops)
61
+ - [Notebook: Persisting your scikit-learn model using skops](https://www.kaggle.com/code/unofficialmerve/persisting-your-scikit-learn-model-using-skops)
62
+ - Check out [interactive sklearn examples](https://huggingface.co/sklearn-docs) built with ❤️ using Gradio.
63
+
64
+
65
+ ### Training your own model in just a few seconds
66
+
67
+ We have built a [baseline trainer](https://huggingface.co/spaces/scikit-learn/baseline-trainer) application to which you can drag and drop your dataset. It will train a baseline and push it to your Hugging Face Hub profile with a model card containing information about the model.
@@ -0,0 +1,69 @@
1
+ import type { TaskDataCustom } from "../Types";
2
+
3
+ const taskData: TaskDataCustom = {
4
+ datasets: [
5
+ {
6
+ description: "A comprehensive curation of datasets covering all benchmarks.",
7
+ id: "inria-soda/tabular-benchmark",
8
+ },
9
+ ],
10
+ demo: {
11
+ inputs: [
12
+ {
13
+ table: [
14
+ ["Glucose", "Blood Pressure ", "Skin Thickness", "Insulin", "BMI"],
15
+ ["148", "72", "35", "0", "33.6"],
16
+ ["150", "50", "30", "0", "35.1"],
17
+ ["141", "60", "29", "1", "39.2"],
18
+ ],
19
+ type: "tabular",
20
+ },
21
+ ],
22
+ outputs: [
23
+ {
24
+ table: [["Diabetes"], ["1"], ["1"], ["0"]],
25
+ type: "tabular",
26
+ },
27
+ ],
28
+ },
29
+ metrics: [
30
+ {
31
+ description: "",
32
+ id: "accuracy",
33
+ },
34
+ {
35
+ description: "",
36
+ id: "recall",
37
+ },
38
+ {
39
+ description: "",
40
+ id: "precision",
41
+ },
42
+ {
43
+ description: "",
44
+ id: "f1",
45
+ },
46
+ ],
47
+ models: [
48
+ {
49
+ description: "Breast cancer prediction model based on decision trees.",
50
+ id: "scikit-learn/cancer-prediction-trees",
51
+ },
52
+ ],
53
+ spaces: [
54
+ {
55
+ description: "An application that can predict defective products on a production line.",
56
+ id: "scikit-learn/tabular-playground",
57
+ },
58
+ {
59
+ description: "An application that compares various tabular classification techniques on different datasets.",
60
+ id: "scikit-learn/classification",
61
+ },
62
+ ],
63
+ summary:
64
+ "Tabular classification is the task of classifying a target category (a group) based on set of attributes.",
65
+ widgetModels: ["scikit-learn/tabular-playground"],
66
+ youtubeId: "",
67
+ };
68
+
69
+ export default taskData;
@@ -0,0 +1,91 @@
1
+ ## About the Task
2
+
3
+ Tabular regression is the task of predicting a numerical value given a set of attributes/features. *Tabular* meaning that data is stored in a table (like an excel sheet), and each sample is contained in its own row. The features used to predict our target can be both numerical and categorical. However, including categorical features often requires additional preprocessing/feature engineering (a few models do accept categorical features directly, like [CatBoost](https://catboost.ai/)). An example of tabular regression would be predicting the weight of a fish given its' species and length.
4
+
5
+ ## Use Cases
6
+
7
+ ### Sales Prediction: a Use Case for Predicting a Continuous Target Variable
8
+ Here the objective is to predict a continuous variable based on a set of input variable(s). For example, predicting `sales` of an ice cream shop based on `temperature` of weather and `duration of hours` shop was open. Here we can build a regression model with `temperature` and `duration of hours` as input variable and `sales` as target variable.
9
+
10
+ ### Missing Value Imputation for Other Tabular Tasks
11
+ In real-world applications, due to human error or other reasons, some of the input values can be missing or there might not be any recorded data. Considering the example above, say the shopkeeper's watch was broken and they forgot to calculate the `hours` for which the shop was open. This will lead to a missing value in their dataset. In this case, missing values could be replaced it with zero, or average hours for which the shop is kept open. Another approach we can try is to use `temperature` and `sales` variables to predict the `hours` variable here.
12
+
13
+
14
+
15
+ ## Model Training
16
+
17
+ A simple regression model can be created using `sklearn` as follows:
18
+
19
+ ```python
20
+ #set the input features
21
+ X = data[["Feature 1", "Feature 2", "Feature 3"]]
22
+ #set the target variable
23
+ y = data["Target Variable"]
24
+ #initialize the model
25
+ model = LinearRegression()
26
+ #Fit the model
27
+ model.fit(X, y)
28
+ ```
29
+
30
+
31
+ # Model Hosting and Inference
32
+
33
+ You can use [skops](https://skops.readthedocs.io/) for model hosting and inference on the Hugging Face Hub. This library is built to improve production workflows of various libraries that are used to train tabular models, including [sklearn](https://scikit-learn.org/stable/) and [xgboost](https://xgboost.readthedocs.io/en/stable/). Using `skops` you can:
34
+ - Easily use inference API,
35
+ - Build neat UIs with one line of code,
36
+ - Programmatically create model cards,
37
+ - Securely serialize your models. (See limitations of using pickle [here](https://huggingface.co/docs/hub/security-pickle).)
38
+
39
+ You can push your model as follows:
40
+
41
+ ```python
42
+ from skops import hub_utils
43
+ # initialize a repository with a trained model
44
+ local_repo = "/path_to_new_repo"
45
+ hub_utils.init(model, dst=local_repo)
46
+ # push to Hub!
47
+ hub_utils.push("username/my-awesome-model", source=local_repo)
48
+ ```
49
+
50
+ Once the model is pushed, you can infer easily.
51
+
52
+ ```python
53
+ import skops.hub_utils as hub_utils
54
+ import pandas as pd
55
+ data = pd.DataFrame(your_data)
56
+ # Load the model from the Hub
57
+ res = hub_utils.get_model_output("username/my-awesome-model", data)
58
+ ```
59
+
60
+ You can launch a UI for your model with only one line of code!
61
+
62
+ ```python
63
+ import gradio as gr
64
+ gr.Interface.load("huggingface/username/my-awesome-model").launch()
65
+ ```
66
+
67
+ ## Useful Resources
68
+
69
+ - [Skops documentation](https://skops.readthedocs.io/en/stable/index.html)
70
+
71
+ - Check out [interactive sklearn examples](https://huggingface.co/sklearn-docs) built with ❤️ using Gradio.
72
+ - [Notebook: Persisting your scikit-learn model using skops](https://www.kaggle.com/code/unofficialmerve/persisting-your-scikit-learn-model-using-skops)
73
+
74
+
75
+ - For starting with tabular regression:
76
+ - Doing [Exploratory Data Analysis](https://neptune.ai/blog/exploratory-data-analysis-for-tabular-data) for tabular data.
77
+ - The data considered here consists of details of Olympic athletes and medal results from Athens 1896 to Rio 2016.
78
+ - Here you can learn more about how to explore and analyse the data and visualize them in order to get a better understanding of dataset.
79
+ - Building your [first ML model](https://www.kaggle.com/code/dansbecker/your-first-machine-learning-model).
80
+
81
+ - Intermediate level tutorials on tabular regression:
82
+ - [A Short Chronology of Deep Learning for Tabular Data](https://sebastianraschka.com/blog/2022/deep-learning-for-tabular-data.html) by Sebastian Raschka.
83
+
84
+
85
+
86
+ ### Training your own model in just a few seconds
87
+
88
+ We have built a [baseline trainer](https://huggingface.co/spaces/scikit-learn/baseline-trainer) application to which you can drag and drop your dataset. It will train a baseline and push it to your Hugging Face Hub profile with a model card containing information about the model.
89
+
90
+ This page was made possible thanks to efforts of [Brenden Connors](https://huggingface.co/brendenc) and [Ayush Bihani](https://huggingface.co/hsuyab).
91
+
@@ -0,0 +1,58 @@
1
+ import type { TaskDataCustom } from "../Types";
2
+
3
+ const taskData: TaskDataCustom = {
4
+ datasets: [
5
+ {
6
+ description: "A comprehensive curation of datasets covering all benchmarks.",
7
+ id: "inria-soda/tabular-benchmark",
8
+ },
9
+ ],
10
+ demo: {
11
+ inputs: [
12
+ {
13
+ table: [
14
+ ["Car Name", "Horsepower", "Weight"],
15
+ ["ford torino", "140", "3,449"],
16
+ ["amc hornet", "97", "2,774"],
17
+ ["toyota corolla", "65", "1,773"],
18
+
19
+ ],
20
+ type: "tabular",
21
+ },
22
+ ],
23
+ outputs: [
24
+ {
25
+ table: [["MPG (miles per gallon)"], ["17"], ["18"], ["31"]],
26
+ type: "tabular",
27
+ },
28
+ ],
29
+ },
30
+ metrics: [
31
+ {
32
+ description: "",
33
+ id: "mse",
34
+ },
35
+ {
36
+ description: "Coefficient of determination (or R-squared) is a measure of how well the model fits the data. Higher R-squared is considered a better fit.",
37
+ id: "r-squared",
38
+ },
39
+ ],
40
+ models: [
41
+ {
42
+ description: "Fish weight prediction based on length measurements and species.",
43
+ id: "scikit-learn/Fish-Weight",
44
+ },
45
+ ],
46
+ spaces: [
47
+ {
48
+ description: "An application that can predict weight of a fish based on set of attributes.",
49
+ id: "scikit-learn/fish-weight-prediction",
50
+ },
51
+ ],
52
+ summary:
53
+ "Tabular regression is the task of predicting a numerical value given a set of attributes.",
54
+ widgetModels: ["scikit-learn/Fish-Weight"],
55
+ youtubeId: "",
56
+ };
57
+
58
+ export default taskData;
@@ -0,0 +1,104 @@
1
+ import { type PipelineType, PIPELINE_DATA } from "../../js/src/lib/interfaces/Types";
2
+ import type { TaskDataCustom, TaskData } from "./Types";
3
+
4
+ import audioClassification from "./audio-classification/data";
5
+ import audioToAudio from "./audio-to-audio/data";
6
+ import automaticSpeechRecognition from "./automatic-speech-recognition/data";
7
+ import conversational from "./conversational/data";
8
+ import documentQuestionAnswering from "./document-question-answering/data";
9
+ import featureExtraction from "./feature-extraction/data";
10
+ import fillMask from "./fill-mask/data";
11
+ import imageClassification from "./image-classification/data";
12
+ import imageToImage from "./image-to-image/data";
13
+ import imageToText from "./image-to-text/data";
14
+ import imageSegmentation from "./image-segmentation/data";
15
+ import objectDetection from "./object-detection/data";
16
+ import depthEstimation from "./depth-estimation/data";
17
+ import placeholder from "./placeholder/data";
18
+ import reinforcementLearning from "./reinforcement-learning/data";
19
+ import questionAnswering from "./question-answering/data";
20
+ import sentenceSimilarity from "./sentence-similarity/data";
21
+ import summarization from "./summarization/data";
22
+ import tableQuestionAnswering from "./table-question-answering/data";
23
+ import tabularClassification from "./tabular-classification/data";
24
+ import tabularRegression from "./tabular-regression/data";
25
+ import textToImage from "./text-to-image/data";
26
+ import textToSpeech from "./text-to-speech/data";
27
+ import tokenClassification from "./token-classification/data";
28
+ import translation from "./translation/data";
29
+ import textClassification from "./text-classification/data";
30
+ import textGeneration from "./text-generation/data";
31
+ import textToVideo from "./text-to-video/data";
32
+ import unconditionalImageGeneration from "./unconditional-image-generation/data";
33
+ import videoClassification from "./video-classification/data";
34
+ import visualQuestionAnswering from "./visual-question-answering/data";
35
+ import zeroShotClassification from "./zero-shot-classification/data";
36
+ import zeroShotImageClassification from "./zero-shot-image-classification/data";
37
+ import { TASKS_MODEL_LIBRARIES } from "./const";
38
+
39
+ // To make comparisons easier, task order is the same as in const.ts
40
+ // Tasks set to undefined won't have an associated task page.
41
+ // Tasks that call getData() without the second argument will
42
+ // have a "placeholder" page.
43
+ export const TASKS_DATA: Record<PipelineType, TaskData | undefined> = {
44
+ "audio-classification": getData("audio-classification", audioClassification),
45
+ "audio-to-audio": getData("audio-to-audio", audioToAudio),
46
+ "automatic-speech-recognition": getData("automatic-speech-recognition", automaticSpeechRecognition),
47
+ "conversational": getData("conversational", conversational),
48
+ "depth-estimation": getData("depth-estimation", depthEstimation),
49
+ "document-question-answering": getData("document-question-answering", documentQuestionAnswering),
50
+ "feature-extraction": getData("feature-extraction", featureExtraction),
51
+ "fill-mask": getData("fill-mask", fillMask),
52
+ "graph-ml": undefined,
53
+ "image-classification": getData("image-classification", imageClassification),
54
+ "image-segmentation": getData("image-segmentation", imageSegmentation),
55
+ "image-to-image": getData("image-to-image", imageToImage),
56
+ "image-to-text": getData("image-to-text", imageToText),
57
+ "multiple-choice": undefined,
58
+ "object-detection": getData("object-detection", objectDetection),
59
+ "video-classification": getData("video-classification", videoClassification),
60
+ "other": undefined,
61
+ "question-answering": getData("question-answering", questionAnswering),
62
+ "reinforcement-learning": getData("reinforcement-learning", reinforcementLearning),
63
+ "robotics": undefined,
64
+ "sentence-similarity": getData("sentence-similarity", sentenceSimilarity),
65
+ "summarization": getData("summarization", summarization),
66
+ "table-question-answering": getData("table-question-answering", tableQuestionAnswering),
67
+ "table-to-text": undefined,
68
+ "tabular-classification": getData("tabular-classification", tabularClassification),
69
+ "tabular-regression": getData("tabular-regression", tabularRegression),
70
+ "tabular-to-text": undefined,
71
+ "text-classification": getData("text-classification", textClassification),
72
+ "text-generation": getData("text-generation", textGeneration),
73
+ "text-retrieval": undefined,
74
+ "text-to-image": getData("text-to-image", textToImage),
75
+ "text-to-speech": getData("text-to-speech", textToSpeech),
76
+ "text-to-audio": undefined,
77
+ "text-to-video": getData("text-to-video", textToVideo),
78
+ "text2text-generation": undefined,
79
+ "time-series-forecasting": undefined,
80
+ "token-classification": getData("token-classification", tokenClassification),
81
+ "translation": getData("translation", translation),
82
+ "unconditional-image-generation": getData("unconditional-image-generation", unconditionalImageGeneration),
83
+ "visual-question-answering": getData("visual-question-answering", visualQuestionAnswering),
84
+ "voice-activity-detection": undefined,
85
+ "zero-shot-classification": getData("zero-shot-classification", zeroShotClassification),
86
+ "zero-shot-image-classification": getData("zero-shot-image-classification", zeroShotImageClassification),
87
+ } as const;
88
+
89
+ /*
90
+ * Return the whole TaskData object for a certain task.
91
+ * If the partialTaskData argument is left undefined,
92
+ * the default placholder data will be used.
93
+ */
94
+ function getData(
95
+ type: PipelineType,
96
+ partialTaskData: TaskDataCustom = placeholder
97
+ ): TaskData {
98
+ return {
99
+ ...partialTaskData,
100
+ id: type,
101
+ label: PIPELINE_DATA[type].name,
102
+ libraries: TASKS_MODEL_LIBRARIES[type],
103
+ };
104
+ }