dslighting 1.7.1__py3-none-any.whl → 1.7.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- dslighting/__init__.py +1 -1
- dslighting/core/agent.py +78 -62
- {dslighting-1.7.1.dist-info → dslighting-1.7.6.dist-info}/METADATA +1 -1
- {dslighting-1.7.1.dist-info → dslighting-1.7.6.dist-info}/RECORD +352 -7
- {dslighting-1.7.1.dist-info → dslighting-1.7.6.dist-info}/top_level.txt +1 -0
- mlebench/README.md +39 -0
- mlebench/__init__.py +0 -0
- mlebench/cli.py +221 -0
- mlebench/competitions/3d-object-detection-for-autonomous-vehicles/grade.py +161 -0
- mlebench/competitions/3d-object-detection-for-autonomous-vehicles/mAP_evaluation.py +425 -0
- mlebench/competitions/3d-object-detection-for-autonomous-vehicles/prepare.py +483 -0
- mlebench/competitions/3d-object-detection-for-autonomous-vehicles/prepare_val.py +719 -0
- mlebench/competitions/AI4Code/grade.py +70 -0
- mlebench/competitions/AI4Code/prepare.py +84 -0
- mlebench/competitions/AI4Code/prepare_val.py +159 -0
- mlebench/competitions/__init__.py +0 -0
- mlebench/competitions/aerial-cactus-identification/grade.py +11 -0
- mlebench/competitions/aerial-cactus-identification/prepare.py +71 -0
- mlebench/competitions/aerial-cactus-identification/prepare_val.py +133 -0
- mlebench/competitions/alaska2-image-steganalysis/grade.py +136 -0
- mlebench/competitions/alaska2-image-steganalysis/prepare.py +88 -0
- mlebench/competitions/alaska2-image-steganalysis/prepare_val.py +148 -0
- mlebench/competitions/aptos2019-blindness-detection/grade.py +35 -0
- mlebench/competitions/aptos2019-blindness-detection/prepare.py +75 -0
- mlebench/competitions/aptos2019-blindness-detection/prepare_val.py +123 -0
- mlebench/competitions/bike-sharing-demand/__init__.py +0 -0
- mlebench/competitions/bike-sharing-demand/grade.py +55 -0
- mlebench/competitions/bike-sharing-demand/prepare.py +37 -0
- mlebench/competitions/billion-word-imputation/grade.py +37 -0
- mlebench/competitions/billion-word-imputation/prepare.py +107 -0
- mlebench/competitions/billion-word-imputation/prepare_val.py +179 -0
- mlebench/competitions/bms-molecular-translation/grade.py +40 -0
- mlebench/competitions/bms-molecular-translation/prepare.py +68 -0
- mlebench/competitions/bms-molecular-translation/prepare_val.py +131 -0
- mlebench/competitions/cassava-leaf-disease-classification/grade.py +12 -0
- mlebench/competitions/cassava-leaf-disease-classification/prepare.py +113 -0
- mlebench/competitions/cassava-leaf-disease-classification/prepare_val.py +186 -0
- mlebench/competitions/cdiscount-image-classification-challenge/grade.py +11 -0
- mlebench/competitions/cdiscount-image-classification-challenge/prepare.py +144 -0
- mlebench/competitions/cdiscount-image-classification-challenge/prepare_val.py +205 -0
- mlebench/competitions/chaii-hindi-and-tamil-question-answering/grade.py +67 -0
- mlebench/competitions/chaii-hindi-and-tamil-question-answering/prepare.py +31 -0
- mlebench/competitions/chaii-hindi-and-tamil-question-answering/prepare_val.py +94 -0
- mlebench/competitions/champs-scalar-coupling/grade.py +60 -0
- mlebench/competitions/champs-scalar-coupling/prepare.py +116 -0
- mlebench/competitions/champs-scalar-coupling/prepare_val.py +155 -0
- mlebench/competitions/conways-reverse-game-of-life-2020/__init__.py +0 -0
- mlebench/competitions/conways-reverse-game-of-life-2020/grade.py +40 -0
- mlebench/competitions/conways-reverse-game-of-life-2020/prepare.py +41 -0
- mlebench/competitions/demand-forecasting-kernels-only/__init__.py +0 -0
- mlebench/competitions/demand-forecasting-kernels-only/grade.py +66 -0
- mlebench/competitions/demand-forecasting-kernels-only/prepare.py +27 -0
- mlebench/competitions/demand_forecasting_kernels_only/__init__.py +0 -0
- mlebench/competitions/demand_forecasting_kernels_only/grade.py +66 -0
- mlebench/competitions/demand_forecasting_kernels_only/prepare.py +27 -0
- mlebench/competitions/denoising-dirty-documents/grade.py +44 -0
- mlebench/competitions/denoising-dirty-documents/prepare.py +134 -0
- mlebench/competitions/denoising-dirty-documents/prepare_val.py +178 -0
- mlebench/competitions/detecting-insults-in-social-commentary/grade.py +11 -0
- mlebench/competitions/detecting-insults-in-social-commentary/prepare.py +72 -0
- mlebench/competitions/detecting-insults-in-social-commentary/prepare_val.py +128 -0
- mlebench/competitions/dog-breed-identification/dogs.py +124 -0
- mlebench/competitions/dog-breed-identification/grade.py +42 -0
- mlebench/competitions/dog-breed-identification/prepare.py +55 -0
- mlebench/competitions/dog-breed-identification/prepare_val.py +104 -0
- mlebench/competitions/dogs-vs-cats-redux-kernels-edition/grade.py +43 -0
- mlebench/competitions/dogs-vs-cats-redux-kernels-edition/prepare.py +70 -0
- mlebench/competitions/dogs-vs-cats-redux-kernels-edition/prepare_val.py +143 -0
- mlebench/competitions/ethanol-concentration/grade.py +23 -0
- mlebench/competitions/ethanol-concentration/prepare.py +90 -0
- mlebench/competitions/facebook-recruiting-iii-keyword-extraction/grade.py +60 -0
- mlebench/competitions/facebook-recruiting-iii-keyword-extraction/prepare.py +41 -0
- mlebench/competitions/facebook-recruiting-iii-keyword-extraction/prepare_val.py +92 -0
- mlebench/competitions/feedback-prize-english-language-learning/__init__.py +0 -0
- mlebench/competitions/feedback-prize-english-language-learning/grade.py +60 -0
- mlebench/competitions/feedback-prize-english-language-learning/prepare.py +39 -0
- mlebench/competitions/freesound-audio-tagging-2019/grade.py +64 -0
- mlebench/competitions/freesound-audio-tagging-2019/prepare.py +94 -0
- mlebench/competitions/freesound-audio-tagging-2019/prepare_val.py +175 -0
- mlebench/competitions/freesound-audio-tagging-2019/vocabulary.py +83 -0
- mlebench/competitions/google-quest-challenge/classes.py +32 -0
- mlebench/competitions/google-quest-challenge/grade.py +45 -0
- mlebench/competitions/google-quest-challenge/prepare.py +58 -0
- mlebench/competitions/google-quest-challenge/prepare_val.py +120 -0
- mlebench/competitions/google-research-identify-contrails-reduce-global-warming/grade.py +77 -0
- mlebench/competitions/google-research-identify-contrails-reduce-global-warming/prepare.py +155 -0
- mlebench/competitions/google-research-identify-contrails-reduce-global-warming/prepare_val.py +211 -0
- mlebench/competitions/h-and-m-personalized-fashion-recommendations/grade.py +42 -0
- mlebench/competitions/h-and-m-personalized-fashion-recommendations/prepare.py +102 -0
- mlebench/competitions/h-and-m-personalized-fashion-recommendations/prepare_val.py +132 -0
- mlebench/competitions/handwriting/grade.py +23 -0
- mlebench/competitions/handwriting/prepare.py +179 -0
- mlebench/competitions/herbarium-2020-fgvc7/grade.py +34 -0
- mlebench/competitions/herbarium-2020-fgvc7/prepare.py +251 -0
- mlebench/competitions/herbarium-2020-fgvc7/prepare_val.py +242 -0
- mlebench/competitions/herbarium-2021-fgvc8/grade.py +34 -0
- mlebench/competitions/herbarium-2021-fgvc8/prepare.py +251 -0
- mlebench/competitions/herbarium-2021-fgvc8/prepare_val.py +222 -0
- mlebench/competitions/herbarium-2022-fgvc9/grade.py +31 -0
- mlebench/competitions/herbarium-2022-fgvc9/prepare.py +233 -0
- mlebench/competitions/herbarium-2022-fgvc9/prepare_val.py +213 -0
- mlebench/competitions/histopathologic-cancer-detection/grade.py +12 -0
- mlebench/competitions/histopathologic-cancer-detection/prepare.py +59 -0
- mlebench/competitions/histopathologic-cancer-detection/prepare_val.py +131 -0
- mlebench/competitions/hms-harmful-brain-activity-classification/constants.py +9 -0
- mlebench/competitions/hms-harmful-brain-activity-classification/grade.py +43 -0
- mlebench/competitions/hms-harmful-brain-activity-classification/kaggle_metric_utilities.py +96 -0
- mlebench/competitions/hms-harmful-brain-activity-classification/kullback_leibler_divergence.py +118 -0
- mlebench/competitions/hms-harmful-brain-activity-classification/prepare.py +121 -0
- mlebench/competitions/hms-harmful-brain-activity-classification/prepare_val.py +190 -0
- mlebench/competitions/hotel-id-2021-fgvc8/grade.py +41 -0
- mlebench/competitions/hotel-id-2021-fgvc8/prepare.py +63 -0
- mlebench/competitions/hotel-id-2021-fgvc8/prepare_val.py +132 -0
- mlebench/competitions/hubmap-kidney-segmentation/grade.py +62 -0
- mlebench/competitions/hubmap-kidney-segmentation/prepare.py +108 -0
- mlebench/competitions/hubmap-kidney-segmentation/prepare_val.py +153 -0
- mlebench/competitions/icecube-neutrinos-in-deep-ice/grade.py +111 -0
- mlebench/competitions/icecube-neutrinos-in-deep-ice/prepare.py +127 -0
- mlebench/competitions/icecube-neutrinos-in-deep-ice/prepare_val.py +183 -0
- mlebench/competitions/ili/grade.py +60 -0
- mlebench/competitions/ili/prepare.py +99 -0
- mlebench/competitions/imet-2020-fgvc7/grade.py +54 -0
- mlebench/competitions/imet-2020-fgvc7/prepare.py +77 -0
- mlebench/competitions/imet-2020-fgvc7/prepare_val.py +157 -0
- mlebench/competitions/inaturalist-2019-fgvc6/grade.py +35 -0
- mlebench/competitions/inaturalist-2019-fgvc6/prepare.py +259 -0
- mlebench/competitions/inaturalist-2019-fgvc6/prepare_val.py +304 -0
- mlebench/competitions/instant-gratification/__init__.py +0 -0
- mlebench/competitions/instant-gratification/grade.py +55 -0
- mlebench/competitions/instant-gratification/prepare.py +25 -0
- mlebench/competitions/instant_gratification/__init__.py +0 -0
- mlebench/competitions/instant_gratification/grade.py +55 -0
- mlebench/competitions/instant_gratification/prepare.py +25 -0
- mlebench/competitions/invasive-species-monitoring/grade.py +11 -0
- mlebench/competitions/invasive-species-monitoring/prepare.py +97 -0
- mlebench/competitions/invasive-species-monitoring/prepare_val.py +164 -0
- mlebench/competitions/iwildcam-2019-fgvc6/grade.py +44 -0
- mlebench/competitions/iwildcam-2019-fgvc6/prepare.py +118 -0
- mlebench/competitions/iwildcam-2019-fgvc6/prepare_val.py +194 -0
- mlebench/competitions/iwildcam-2020-fgvc7/grade.py +11 -0
- mlebench/competitions/iwildcam-2020-fgvc7/prepare.py +164 -0
- mlebench/competitions/iwildcam-2020-fgvc7/prepare_val.py +245 -0
- mlebench/competitions/jigsaw-toxic-comment-classification-challenge/classes.py +1 -0
- mlebench/competitions/jigsaw-toxic-comment-classification-challenge/grade.py +54 -0
- mlebench/competitions/jigsaw-toxic-comment-classification-challenge/prepare.py +42 -0
- mlebench/competitions/jigsaw-toxic-comment-classification-challenge/prepare_val.py +88 -0
- mlebench/competitions/jigsaw-unintended-bias-in-toxicity-classification/grade.py +153 -0
- mlebench/competitions/jigsaw-unintended-bias-in-toxicity-classification/prepare.py +36 -0
- mlebench/competitions/jigsaw-unintended-bias-in-toxicity-classification/prepare_val.py +117 -0
- mlebench/competitions/kuzushiji-recognition/grade.py +58 -0
- mlebench/competitions/kuzushiji-recognition/kuzushiji_metric.py +118 -0
- mlebench/competitions/kuzushiji-recognition/prepare.py +92 -0
- mlebench/competitions/kuzushiji-recognition/prepare_val.py +149 -0
- mlebench/competitions/leaf-classification/classes.py +101 -0
- mlebench/competitions/leaf-classification/grade.py +44 -0
- mlebench/competitions/leaf-classification/prepare.py +60 -0
- mlebench/competitions/leaf-classification/prepare_val.py +116 -0
- mlebench/competitions/learning-agency-lab-automated-essay-scoring-2/grade.py +44 -0
- mlebench/competitions/learning-agency-lab-automated-essay-scoring-2/prepare.py +51 -0
- mlebench/competitions/learning-agency-lab-automated-essay-scoring-2/prepare_val.py +96 -0
- mlebench/competitions/liverpool-ion-switching/__init__.py +0 -0
- mlebench/competitions/liverpool-ion-switching/grade.py +52 -0
- mlebench/competitions/liverpool-ion-switching/prepare.py +27 -0
- mlebench/competitions/liverpool_ion_switching/__init__.py +0 -0
- mlebench/competitions/liverpool_ion_switching/grade.py +52 -0
- mlebench/competitions/liverpool_ion_switching/prepare.py +27 -0
- mlebench/competitions/lmsys-chatbot-arena/grade.py +63 -0
- mlebench/competitions/lmsys-chatbot-arena/prepare.py +52 -0
- mlebench/competitions/lmsys-chatbot-arena/prepare_val.py +115 -0
- mlebench/competitions/mcm_2024_c_test/grade.py +107 -0
- mlebench/competitions/mcm_2024_c_test/prepare.py +2 -0
- mlebench/competitions/ml2021spring-hw2/grade.py +11 -0
- mlebench/competitions/ml2021spring-hw2/prepare.py +58 -0
- mlebench/competitions/ml2021spring-hw2/prepare_val.py +135 -0
- mlebench/competitions/mlsp-2013-birds/grade.py +11 -0
- mlebench/competitions/mlsp-2013-birds/prepare.py +182 -0
- mlebench/competitions/mlsp-2013-birds/prepare_val.py +241 -0
- mlebench/competitions/movie-review-sentiment-analysis-kernels-only/grade.py +11 -0
- mlebench/competitions/movie-review-sentiment-analysis-kernels-only/prepare.py +58 -0
- mlebench/competitions/movie-review-sentiment-analysis-kernels-only/prepare_val.py +120 -0
- mlebench/competitions/multi-modal-gesture-recognition/grade.py +58 -0
- mlebench/competitions/multi-modal-gesture-recognition/prepare.py +85 -0
- mlebench/competitions/multi-modal-gesture-recognition/prepare_val.py +139 -0
- mlebench/competitions/my-custom-task-01/prepare.py +2 -0
- mlebench/competitions/new-my-task-01/prepare.py +2 -0
- mlebench/competitions/new-my-task-03/grade.py +107 -0
- mlebench/competitions/new-my-task-03/prepare.py +2 -0
- mlebench/competitions/new-york-city-taxi-fare-prediction/grade.py +28 -0
- mlebench/competitions/new-york-city-taxi-fare-prediction/prepare.py +44 -0
- mlebench/competitions/new-york-city-taxi-fare-prediction/prepare_val.py +89 -0
- mlebench/competitions/nfl-player-contact-detection/grade.py +36 -0
- mlebench/competitions/nfl-player-contact-detection/prepare.py +101 -0
- mlebench/competitions/nfl-player-contact-detection/prepare_val.py +186 -0
- mlebench/competitions/nomad2018-predict-transparent-conductors/grade.py +47 -0
- mlebench/competitions/nomad2018-predict-transparent-conductors/prepare.py +77 -0
- mlebench/competitions/nomad2018-predict-transparent-conductors/prepare_val.py +144 -0
- mlebench/competitions/osic-pulmonary-fibrosis-progression/grade.py +74 -0
- mlebench/competitions/osic-pulmonary-fibrosis-progression/prepare.py +95 -0
- mlebench/competitions/osic-pulmonary-fibrosis-progression/prepare_val.py +167 -0
- mlebench/competitions/paddy-disease-classification/grade.py +35 -0
- mlebench/competitions/paddy-disease-classification/prepare.py +69 -0
- mlebench/competitions/paddy-disease-classification/prepare_val.py +122 -0
- mlebench/competitions/petfinder-pawpularity-score/grade.py +41 -0
- mlebench/competitions/petfinder-pawpularity-score/prepare.py +76 -0
- mlebench/competitions/petfinder-pawpularity-score/prepare_val.py +154 -0
- mlebench/competitions/plant-pathology-2020-fgvc7/grade.py +41 -0
- mlebench/competitions/plant-pathology-2020-fgvc7/prepare.py +74 -0
- mlebench/competitions/plant-pathology-2020-fgvc7/prepare_val.py +160 -0
- mlebench/competitions/plant-pathology-2021-fgvc8/grade.py +54 -0
- mlebench/competitions/plant-pathology-2021-fgvc8/prepare.py +65 -0
- mlebench/competitions/plant-pathology-2021-fgvc8/prepare_val.py +130 -0
- mlebench/competitions/plant-seedlings-classification/grade.py +39 -0
- mlebench/competitions/plant-seedlings-classification/prepare.py +91 -0
- mlebench/competitions/plant-seedlings-classification/prepare_val.py +158 -0
- mlebench/competitions/playground-series-s3e1/__init__.py +0 -0
- mlebench/competitions/playground-series-s3e1/grade.py +52 -0
- mlebench/competitions/playground-series-s3e1/prepare.py +25 -0
- mlebench/competitions/playground-series-s3e11/__init__.py +0 -0
- mlebench/competitions/playground-series-s3e11/grade.py +55 -0
- mlebench/competitions/playground-series-s3e11/prepare.py +25 -0
- mlebench/competitions/playground-series-s3e18/grade.py +39 -0
- mlebench/competitions/playground-series-s3e18/prepare.py +36 -0
- mlebench/competitions/playground-series-s3e18/prepare_val.py +89 -0
- mlebench/competitions/playground_series_s3e1/__init__.py +0 -0
- mlebench/competitions/playground_series_s3e1/grade.py +52 -0
- mlebench/competitions/playground_series_s3e1/prepare.py +25 -0
- mlebench/competitions/playground_series_s3e11/__init__.py +0 -0
- mlebench/competitions/playground_series_s3e11/grade.py +55 -0
- mlebench/competitions/playground_series_s3e11/prepare.py +25 -0
- mlebench/competitions/predict-volcanic-eruptions-ingv-oe/grade.py +44 -0
- mlebench/competitions/predict-volcanic-eruptions-ingv-oe/prepare.py +68 -0
- mlebench/competitions/predict-volcanic-eruptions-ingv-oe/prepare_val.py +146 -0
- mlebench/competitions/random-acts-of-pizza/grade.py +14 -0
- mlebench/competitions/random-acts-of-pizza/prepare.py +80 -0
- mlebench/competitions/random-acts-of-pizza/prepare_val.py +144 -0
- mlebench/competitions/ranzcr-clip-catheter-line-classification/classes.py +11 -0
- mlebench/competitions/ranzcr-clip-catheter-line-classification/grade.py +31 -0
- mlebench/competitions/ranzcr-clip-catheter-line-classification/prepare.py +53 -0
- mlebench/competitions/ranzcr-clip-catheter-line-classification/prepare_val.py +113 -0
- mlebench/competitions/rsna-2022-cervical-spine-fracture-detection/grade.py +124 -0
- mlebench/competitions/rsna-2022-cervical-spine-fracture-detection/prepare.py +219 -0
- mlebench/competitions/rsna-2022-cervical-spine-fracture-detection/prepare_val.py +257 -0
- mlebench/competitions/rsna-breast-cancer-detection/grade.py +65 -0
- mlebench/competitions/rsna-breast-cancer-detection/prepare.py +141 -0
- mlebench/competitions/rsna-breast-cancer-detection/prepare_val.py +201 -0
- mlebench/competitions/rsna-miccai-brain-tumor-radiogenomic-classification/grade.py +13 -0
- mlebench/competitions/rsna-miccai-brain-tumor-radiogenomic-classification/prepare.py +47 -0
- mlebench/competitions/rsna-miccai-brain-tumor-radiogenomic-classification/prepare_val.py +97 -0
- mlebench/competitions/santander-customer-satisfaction/grade.py +10 -0
- mlebench/competitions/santander-customer-satisfaction/prepare.py +41 -0
- mlebench/competitions/sciencebench-001-clintox-nn/__init__.py +0 -0
- mlebench/competitions/sciencebench-001-clintox-nn/grade.py +56 -0
- mlebench/competitions/sciencebench-001-clintox-nn/prepare.py +75 -0
- mlebench/competitions/sciencebench-015-aai/grade.py +37 -0
- mlebench/competitions/sciencebench-015-aai/prepare.py +102 -0
- mlebench/competitions/sciencebench-051-brain-blood-qsar/grade.py +58 -0
- mlebench/competitions/sciencebench-051-brain-blood-qsar/prepare.py +69 -0
- mlebench/competitions/sciencebench-101-experimental-band-gap-prediction/grade.py +55 -0
- mlebench/competitions/sciencebench-101-experimental-band-gap-prediction/prepare.py +88 -0
- mlebench/competitions/see-click-predict-fix/__init__.py +0 -0
- mlebench/competitions/see-click-predict-fix/grade.py +66 -0
- mlebench/competitions/see-click-predict-fix/prepare.py +25 -0
- mlebench/competitions/see_click_predict_fix/__init__.py +0 -0
- mlebench/competitions/see_click_predict_fix/grade.py +66 -0
- mlebench/competitions/see_click_predict_fix/prepare.py +25 -0
- mlebench/competitions/seti-breakthrough-listen/grade.py +11 -0
- mlebench/competitions/seti-breakthrough-listen/prepare.py +71 -0
- mlebench/competitions/seti-breakthrough-listen/prepare_val.py +159 -0
- mlebench/competitions/siim-covid19-detection/grade.py +194 -0
- mlebench/competitions/siim-covid19-detection/prepare.py +123 -0
- mlebench/competitions/siim-covid19-detection/prepare_val.py +164 -0
- mlebench/competitions/siim-isic-melanoma-classification/grade.py +11 -0
- mlebench/competitions/siim-isic-melanoma-classification/prepare.py +127 -0
- mlebench/competitions/siim-isic-melanoma-classification/prepare_val.py +158 -0
- mlebench/competitions/smartphone-decimeter-2022/grade.py +55 -0
- mlebench/competitions/smartphone-decimeter-2022/notebook.py +86 -0
- mlebench/competitions/smartphone-decimeter-2022/prepare.py +143 -0
- mlebench/competitions/smartphone-decimeter-2022/prepare_val.py +199 -0
- mlebench/competitions/spaceship-titanic/grade.py +11 -0
- mlebench/competitions/spaceship-titanic/prepare.py +23 -0
- mlebench/competitions/spaceship-titanic/prepare_val.py +61 -0
- mlebench/competitions/spooky-author-identification/classes.py +1 -0
- mlebench/competitions/spooky-author-identification/grade.py +38 -0
- mlebench/competitions/spooky-author-identification/prepare.py +40 -0
- mlebench/competitions/spooky-author-identification/prepare_val.py +78 -0
- mlebench/competitions/stanford-covid-vaccine/grade.py +65 -0
- mlebench/competitions/stanford-covid-vaccine/prepare.py +129 -0
- mlebench/competitions/stanford-covid-vaccine/prepare_val.py +199 -0
- mlebench/competitions/statoil-iceberg-classifier-challenge/grade.py +41 -0
- mlebench/competitions/statoil-iceberg-classifier-challenge/prepare.py +105 -0
- mlebench/competitions/statoil-iceberg-classifier-challenge/prepare_val.py +157 -0
- mlebench/competitions/tabular-playground-series-dec-2021/grade.py +11 -0
- mlebench/competitions/tabular-playground-series-dec-2021/prepare.py +39 -0
- mlebench/competitions/tabular-playground-series-dec-2021/prepare_val.py +99 -0
- mlebench/competitions/tabular-playground-series-may-2022/grade.py +9 -0
- mlebench/competitions/tabular-playground-series-may-2022/prepare.py +56 -0
- mlebench/competitions/tabular-playground-series-may-2022/prepare_val.py +116 -0
- mlebench/competitions/tensorflow-speech-recognition-challenge/grade.py +11 -0
- mlebench/competitions/tensorflow-speech-recognition-challenge/prepare.py +90 -0
- mlebench/competitions/tensorflow-speech-recognition-challenge/prepare_val.py +148 -0
- mlebench/competitions/tensorflow2-question-answering/grade.py +122 -0
- mlebench/competitions/tensorflow2-question-answering/prepare.py +122 -0
- mlebench/competitions/tensorflow2-question-answering/prepare_val.py +187 -0
- mlebench/competitions/text-normalization-challenge-english-language/grade.py +49 -0
- mlebench/competitions/text-normalization-challenge-english-language/prepare.py +115 -0
- mlebench/competitions/text-normalization-challenge-english-language/prepare_val.py +213 -0
- mlebench/competitions/text-normalization-challenge-russian-language/grade.py +49 -0
- mlebench/competitions/text-normalization-challenge-russian-language/prepare.py +113 -0
- mlebench/competitions/text-normalization-challenge-russian-language/prepare_val.py +165 -0
- mlebench/competitions/tgs-salt-identification-challenge/grade.py +144 -0
- mlebench/competitions/tgs-salt-identification-challenge/prepare.py +158 -0
- mlebench/competitions/tgs-salt-identification-challenge/prepare_val.py +166 -0
- mlebench/competitions/the-icml-2013-whale-challenge-right-whale-redux/grade.py +11 -0
- mlebench/competitions/the-icml-2013-whale-challenge-right-whale-redux/prepare.py +95 -0
- mlebench/competitions/the-icml-2013-whale-challenge-right-whale-redux/prepare_val.py +141 -0
- mlebench/competitions/tmdb-box-office-prediction/__init__.py +0 -0
- mlebench/competitions/tmdb-box-office-prediction/grade.py +55 -0
- mlebench/competitions/tmdb-box-office-prediction/prepare.py +35 -0
- mlebench/competitions/tweet-sentiment-extraction/grade.py +67 -0
- mlebench/competitions/tweet-sentiment-extraction/prepare.py +36 -0
- mlebench/competitions/tweet-sentiment-extraction/prepare_val.py +106 -0
- mlebench/competitions/us-patent-phrase-to-phrase-matching/grade.py +31 -0
- mlebench/competitions/us-patent-phrase-to-phrase-matching/prepare.py +33 -0
- mlebench/competitions/us-patent-phrase-to-phrase-matching/prepare_val.py +71 -0
- mlebench/competitions/utils.py +266 -0
- mlebench/competitions/uw-madison-gi-tract-image-segmentation/grade.py +158 -0
- mlebench/competitions/uw-madison-gi-tract-image-segmentation/prepare.py +139 -0
- mlebench/competitions/uw-madison-gi-tract-image-segmentation/prepare_val.py +193 -0
- mlebench/competitions/ventilator-pressure-prediction/__init__.py +0 -0
- mlebench/competitions/ventilator-pressure-prediction/grade.py +52 -0
- mlebench/competitions/ventilator-pressure-prediction/prepare.py +27 -0
- mlebench/competitions/ventilator-pressure-prediction/prepare_val.py +142 -0
- mlebench/competitions/ventilator_pressure_prediction/__init__.py +0 -0
- mlebench/competitions/ventilator_pressure_prediction/grade.py +52 -0
- mlebench/competitions/ventilator_pressure_prediction/prepare.py +27 -0
- mlebench/competitions/vesuvius-challenge-ink-detection/grade.py +97 -0
- mlebench/competitions/vesuvius-challenge-ink-detection/prepare.py +122 -0
- mlebench/competitions/vesuvius-challenge-ink-detection/prepare_val.py +170 -0
- mlebench/competitions/vinbigdata-chest-xray-abnormalities-detection/grade.py +220 -0
- mlebench/competitions/vinbigdata-chest-xray-abnormalities-detection/prepare.py +129 -0
- mlebench/competitions/vinbigdata-chest-xray-abnormalities-detection/prepare_val.py +204 -0
- mlebench/competitions/whale-categorization-playground/grade.py +41 -0
- mlebench/competitions/whale-categorization-playground/prepare.py +103 -0
- mlebench/competitions/whale-categorization-playground/prepare_val.py +196 -0
- mlebench/data.py +420 -0
- mlebench/grade.py +209 -0
- mlebench/grade_helpers.py +235 -0
- mlebench/metrics.py +75 -0
- mlebench/registry.py +332 -0
- mlebench/utils.py +346 -0
- {dslighting-1.7.1.dist-info → dslighting-1.7.6.dist-info}/WHEEL +0 -0
- {dslighting-1.7.1.dist-info → dslighting-1.7.6.dist-info}/entry_points.txt +0 -0
|
@@ -0,0 +1,90 @@
|
|
|
1
|
+
import pandas as pd
|
|
2
|
+
import numpy as np
|
|
3
|
+
import sys
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
def prepare(raw: Path, public: Path, private: Path):
|
|
8
|
+
"""
|
|
9
|
+
Prepare the ethanol-concentration dataset for the benchmark.
|
|
10
|
+
|
|
11
|
+
This function converts the .ts time series files to numpy arrays,
|
|
12
|
+
so that public directory only contains data, not data loading code.
|
|
13
|
+
|
|
14
|
+
Args:
|
|
15
|
+
raw: Path to raw data directory (contains EthanolConcentration_TRAIN.ts and EthanolConcentration_TEST.ts)
|
|
16
|
+
public: Path to public directory (visible to participants)
|
|
17
|
+
private: Path to private directory (hidden from participants, used for grading)
|
|
18
|
+
"""
|
|
19
|
+
# Load test data to extract labels for grading
|
|
20
|
+
try:
|
|
21
|
+
# Add raw directory to path temporarily to import dataset module
|
|
22
|
+
sys.path.insert(0, str(raw))
|
|
23
|
+
|
|
24
|
+
# Use the local dataset module to load data
|
|
25
|
+
from dataset import get_dataset
|
|
26
|
+
|
|
27
|
+
# Change to raw directory to load data
|
|
28
|
+
import os
|
|
29
|
+
original_dir = os.getcwd()
|
|
30
|
+
os.chdir(str(raw))
|
|
31
|
+
|
|
32
|
+
try:
|
|
33
|
+
# Load train and test datasets
|
|
34
|
+
X_train, y_train = get_dataset('TRAIN')
|
|
35
|
+
X_test, y_test = get_dataset('TEST')
|
|
36
|
+
|
|
37
|
+
print(f"Loaded training data: X_train.shape = {X_train.shape}, y_train.shape = {y_train.shape}")
|
|
38
|
+
print(f"Loaded test data: X_test.shape = {X_test.shape}, y_test.shape = {y_test.shape}")
|
|
39
|
+
print(f"Number of unique labels: {len(np.unique(y_train))}")
|
|
40
|
+
|
|
41
|
+
# Save training data as numpy arrays in public directory
|
|
42
|
+
np.save(public / "train_data.npy", X_train)
|
|
43
|
+
np.save(public / "train_labels.npy", y_train.flatten())
|
|
44
|
+
|
|
45
|
+
# Save test data (without labels) in public directory
|
|
46
|
+
np.save(public / "test_data.npy", X_test)
|
|
47
|
+
|
|
48
|
+
# Create test labels dataframe for grading (private)
|
|
49
|
+
test_labels_df = pd.DataFrame(
|
|
50
|
+
{"id": range(len(y_test)), "label": y_test.flatten()}
|
|
51
|
+
)
|
|
52
|
+
test_labels_df.to_csv(private / "test_labels.csv", index=False)
|
|
53
|
+
|
|
54
|
+
# Create sample submission file
|
|
55
|
+
sample_submission = pd.DataFrame(
|
|
56
|
+
{"id": range(len(y_test)), "label": 0} # Default to class 0
|
|
57
|
+
)
|
|
58
|
+
sample_submission.to_csv(public / "sample_submission.csv", index=False)
|
|
59
|
+
|
|
60
|
+
print(f"Data preparation completed:")
|
|
61
|
+
print(f" - Training: {len(X_train)} samples")
|
|
62
|
+
print(f" - Test: {len(X_test)} samples")
|
|
63
|
+
print(f" - Sequence length: {X_train.shape[1]}")
|
|
64
|
+
print(f" - Feature dimension: {X_train.shape[2]}")
|
|
65
|
+
print(f" - Number of classes: {len(np.unique(y_train))}")
|
|
66
|
+
|
|
67
|
+
finally:
|
|
68
|
+
os.chdir(original_dir)
|
|
69
|
+
sys.path.remove(str(raw))
|
|
70
|
+
|
|
71
|
+
except Exception as e:
|
|
72
|
+
print(f"Error loading test labels: {e}")
|
|
73
|
+
import traceback
|
|
74
|
+
traceback.print_exc()
|
|
75
|
+
# Fallback: create dummy files if loading fails
|
|
76
|
+
print("Creating dummy submission files...")
|
|
77
|
+
dummy_df = pd.DataFrame({"id": [0], "label": [0]})
|
|
78
|
+
dummy_df.to_csv(private / "test_labels.csv", index=False)
|
|
79
|
+
dummy_df.to_csv(public / "sample_submission.csv", index=False)
|
|
80
|
+
|
|
81
|
+
# Validation checks
|
|
82
|
+
assert (public / "train_data.npy").exists(), "Training data should exist"
|
|
83
|
+
assert (public / "train_labels.npy").exists(), "Training labels should exist"
|
|
84
|
+
assert (public / "test_data.npy").exists(), "Test data should exist"
|
|
85
|
+
assert (private / "test_labels.csv").exists(), "Test labels should exist"
|
|
86
|
+
assert (public / "sample_submission.csv").exists(), "Sample submission should exist"
|
|
87
|
+
|
|
88
|
+
print(f"\nPrepared ethanol-concentration dataset:")
|
|
89
|
+
print(f" - Public files: {list(public.glob('*'))}")
|
|
90
|
+
print(f" - Private files: {list(private.glob('*'))}")
|
|
@@ -0,0 +1,60 @@
|
|
|
1
|
+
import pandas as pd
|
|
2
|
+
from pandas import DataFrame
|
|
3
|
+
from scipy.sparse import csr_matrix
|
|
4
|
+
from sklearn.metrics import f1_score
|
|
5
|
+
from sklearn.preprocessing import MultiLabelBinarizer
|
|
6
|
+
|
|
7
|
+
from mlebench.grade_helpers import InvalidSubmissionError
|
|
8
|
+
from mlebench.utils import get_logger
|
|
9
|
+
|
|
10
|
+
logger = get_logger(__name__)
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def grade(submission: DataFrame, answers: DataFrame) -> float:
|
|
14
|
+
"""Grades the submission against the test set."""
|
|
15
|
+
|
|
16
|
+
y_true, y_pred = prepare_for_metric(submission, answers)
|
|
17
|
+
return f1_score(y_true=y_true, y_pred=y_pred, average="micro")
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def prepare_for_metric(
|
|
21
|
+
submission: pd.DataFrame, answers: pd.DataFrame
|
|
22
|
+
) -> tuple[csr_matrix, csr_matrix]:
|
|
23
|
+
|
|
24
|
+
# Answer checks
|
|
25
|
+
assert "Id" in answers.columns, "Answers must have an 'Id' column."
|
|
26
|
+
assert "Tags" in answers.columns, "Answers must have a 'Tags' column."
|
|
27
|
+
|
|
28
|
+
# Submission checks
|
|
29
|
+
if "Id" not in submission.columns:
|
|
30
|
+
raise InvalidSubmissionError("Submission DataFrame must have an 'Id' column.")
|
|
31
|
+
if "Tags" not in submission.columns:
|
|
32
|
+
raise InvalidSubmissionError("Submission DataFrame must have a 'Tags' column.")
|
|
33
|
+
if len(submission) != len(answers):
|
|
34
|
+
raise InvalidSubmissionError(
|
|
35
|
+
"Submission DataFrame must have the same number of rows as the answers DataFrame."
|
|
36
|
+
)
|
|
37
|
+
|
|
38
|
+
# Match order
|
|
39
|
+
submission = submission.sort_values(by="Id").reset_index(drop=True)
|
|
40
|
+
answers = answers.sort_values(by="Id").reset_index(drop=True)
|
|
41
|
+
if (submission["Id"].values != answers["Id"].values).any():
|
|
42
|
+
raise InvalidSubmissionError("Submission and answers must have matching 'Id's.")
|
|
43
|
+
|
|
44
|
+
# Get classes
|
|
45
|
+
classes = set()
|
|
46
|
+
|
|
47
|
+
for tags in answers["Tags"]:
|
|
48
|
+
if not isinstance(tags, str):
|
|
49
|
+
logger.warning(f"Tags from answers '{tags}' not of type str! Skipping.")
|
|
50
|
+
continue
|
|
51
|
+
|
|
52
|
+
tags_split = tags.split()
|
|
53
|
+
classes.update(tags_split)
|
|
54
|
+
|
|
55
|
+
# Convert to sparse matrix using MultiLabelBinarizer
|
|
56
|
+
mlb = MultiLabelBinarizer(classes=sorted(classes), sparse_output=True)
|
|
57
|
+
y_true = mlb.fit_transform(answers["Tags"].fillna("").str.split())
|
|
58
|
+
y_pred = mlb.transform(submission["Tags"].fillna("").str.split())
|
|
59
|
+
|
|
60
|
+
return y_true, y_pred
|
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
from pathlib import Path
|
|
2
|
+
|
|
3
|
+
from sklearn.model_selection import train_test_split
|
|
4
|
+
|
|
5
|
+
from mlebench.utils import extract, get_logger, read_csv
|
|
6
|
+
|
|
7
|
+
logger = get_logger(__name__)
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def prepare(raw: Path, public: Path, private: Path) -> None:
|
|
11
|
+
extract(raw / "Train.zip", raw)
|
|
12
|
+
|
|
13
|
+
# Create new train and test from original train set
|
|
14
|
+
logger.info(f"Creating new train and test sets from {raw}.")
|
|
15
|
+
|
|
16
|
+
old_train = read_csv(raw / "Train.csv", dtype={"Id": str, "Tags": str})
|
|
17
|
+
new_train, new_test = train_test_split(old_train, test_size=0.1, random_state=0)
|
|
18
|
+
new_test_without_labels = new_test.drop(columns=["Tags"])
|
|
19
|
+
sample_submission = new_test_without_labels.copy()
|
|
20
|
+
sample_submission["Tags"] = "javascript c# python php java"
|
|
21
|
+
|
|
22
|
+
# Copy over files to private and public directories
|
|
23
|
+
logger.info(f"Copying files to {private} and {public}.")
|
|
24
|
+
|
|
25
|
+
new_train.to_csv(public / "train.csv", index=False)
|
|
26
|
+
new_test_without_labels.to_csv(public / "test.csv", index=False)
|
|
27
|
+
sample_submission.to_csv(public / "sample_submission.csv", index=False)
|
|
28
|
+
new_test.to_csv(private / "test.csv", index=False)
|
|
29
|
+
|
|
30
|
+
# Sanity checks
|
|
31
|
+
logger.info("Performing sanity checks.")
|
|
32
|
+
|
|
33
|
+
assert len(new_test_without_labels) == len(
|
|
34
|
+
new_test
|
|
35
|
+
), f"Expected {len(new_test)} public test samples, got {len(new_test_without_labels)}."
|
|
36
|
+
assert len(old_train) == len(new_train) + len(
|
|
37
|
+
new_test
|
|
38
|
+
), f"Mismatch in number of samples in new train and test split! Expected {len(old_train)} samples, got {len(new_train) + len(new_test)}."
|
|
39
|
+
assert len(sample_submission) == len(
|
|
40
|
+
new_test
|
|
41
|
+
), f"Expected {len(new_test)} public test samples, got {len(sample_submission)}."
|
|
@@ -0,0 +1,92 @@
|
|
|
1
|
+
from pathlib import Path
|
|
2
|
+
|
|
3
|
+
import pandas as pd
|
|
4
|
+
from sklearn.model_selection import train_test_split
|
|
5
|
+
|
|
6
|
+
from mlebench.utils import extract, get_logger, read_csv
|
|
7
|
+
|
|
8
|
+
logger = get_logger(__name__)
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def _create_dataset_split(
|
|
12
|
+
source_df: pd.DataFrame, public_dir: Path, private_dir: Path
|
|
13
|
+
) -> None:
|
|
14
|
+
"""
|
|
15
|
+
Splits a source dataframe and saves the resulting files to public and private dirs.
|
|
16
|
+
|
|
17
|
+
This helper function encapsulates the logic for:
|
|
18
|
+
1. Splitting data into train/test sets.
|
|
19
|
+
2. Creating public (unlabeled) and private (labeled) test sets.
|
|
20
|
+
3. Saving all files (train.csv, test.csv, sample_submission.csv) to the
|
|
21
|
+
specified directories with a consistent naming scheme.
|
|
22
|
+
"""
|
|
23
|
+
# Create directories if they don't exist
|
|
24
|
+
public_dir.mkdir(parents=True, exist_ok=True)
|
|
25
|
+
private_dir.mkdir(parents=True, exist_ok=True)
|
|
26
|
+
|
|
27
|
+
# Split the source dataframe
|
|
28
|
+
train_split, test_split = train_test_split(
|
|
29
|
+
source_df, test_size=0.1, random_state=0
|
|
30
|
+
)
|
|
31
|
+
test_split_without_labels = test_split.drop(columns=["Tags"])
|
|
32
|
+
sample_submission = test_split_without_labels.copy()
|
|
33
|
+
sample_submission["Tags"] = "javascript c# python php java"
|
|
34
|
+
|
|
35
|
+
# Copy over files to private and public directories
|
|
36
|
+
logger.info(f"Copying files to {private_dir} and {public_dir}.")
|
|
37
|
+
|
|
38
|
+
train_split.to_csv(public_dir / "train.csv", index=False)
|
|
39
|
+
test_split_without_labels.to_csv(public_dir / "test.csv", index=False)
|
|
40
|
+
sample_submission.to_csv(public_dir / "sample_submission.csv", index=False)
|
|
41
|
+
test_split.to_csv(private_dir / "test.csv", index=False)
|
|
42
|
+
|
|
43
|
+
# Sanity checks
|
|
44
|
+
logger.info(f"Performing sanity checks for {public_dir.name}.")
|
|
45
|
+
|
|
46
|
+
assert len(test_split_without_labels) == len(
|
|
47
|
+
test_split
|
|
48
|
+
), f"Expected {len(test_split)} public test samples, got {len(test_split_without_labels)}."
|
|
49
|
+
assert len(source_df) == len(train_split) + len(
|
|
50
|
+
test_split
|
|
51
|
+
), f"Mismatch in number of samples! Expected {len(source_df)} samples, got {len(train_split) + len(test_split)}."
|
|
52
|
+
assert len(sample_submission) == len(
|
|
53
|
+
test_split
|
|
54
|
+
), f"Expected {len(test_split)} public test samples, got {len(sample_submission)}."
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
def prepare(raw: Path, public: Path, private: Path) -> None:
|
|
58
|
+
extract(raw / "Train.zip", raw)
|
|
59
|
+
|
|
60
|
+
# Read the full original training dataset
|
|
61
|
+
old_train = read_csv(raw / "Train.csv", dtype={"Id": str, "Tags": str})
|
|
62
|
+
|
|
63
|
+
# --- Original Data Split (public/private) ---
|
|
64
|
+
# This block creates the main train/test split. Its outputs in the `public`
|
|
65
|
+
# and `private` directories will remain identical to the original script.
|
|
66
|
+
logger.info(
|
|
67
|
+
"Creating original train/test split for public and private directories."
|
|
68
|
+
)
|
|
69
|
+
_create_dataset_split(source_df=old_train, public_dir=public, private_dir=private)
|
|
70
|
+
|
|
71
|
+
# --- New Validation Data Split (public_val/private_val) ---
|
|
72
|
+
# This block creates a new, independent validation split. It takes the
|
|
73
|
+
# training set from the *first* split and splits it again, creating a
|
|
74
|
+
# smaller training set and a validation set.
|
|
75
|
+
logger.info(
|
|
76
|
+
"Creating new train/validation split for public_val and private_val directories."
|
|
77
|
+
)
|
|
78
|
+
# Define the new output directories, parallel to the original ones.
|
|
79
|
+
public_val = public.parent / "public_val"
|
|
80
|
+
private_val = private.parent / "private_val"
|
|
81
|
+
|
|
82
|
+
# We must use the training set from the first split as the source for the second.
|
|
83
|
+
train_from_first_split, _ = train_test_split(
|
|
84
|
+
old_train, test_size=0.1, random_state=0
|
|
85
|
+
)
|
|
86
|
+
|
|
87
|
+
# Create the second split and save to the new _val directories.
|
|
88
|
+
_create_dataset_split(
|
|
89
|
+
source_df=train_from_first_split,
|
|
90
|
+
public_dir=public_val,
|
|
91
|
+
private_dir=private_val,
|
|
92
|
+
)
|
|
File without changes
|
|
@@ -0,0 +1,60 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
import pandas as pd
|
|
3
|
+
from sklearn.metrics import mean_squared_error
|
|
4
|
+
|
|
5
|
+
from mlebench.grade_helpers import InvalidSubmissionError
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
TARGET_COLUMNS = ['cohesion', 'syntax', 'vocabulary', 'phraseology', 'grammar', 'conventions']
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def prepare_for_metric(submission: pd.DataFrame, answers: pd.DataFrame):
|
|
12
|
+
"""Prepare submission and answers for MCRMSE calculation."""
|
|
13
|
+
if len(submission) != len(answers):
|
|
14
|
+
raise InvalidSubmissionError(
|
|
15
|
+
f"Submission length ({len(submission)}) != answers length ({len(answers)})"
|
|
16
|
+
)
|
|
17
|
+
|
|
18
|
+
if "text_id" not in submission.columns:
|
|
19
|
+
raise InvalidSubmissionError("Submission must have a 'text_id' column")
|
|
20
|
+
|
|
21
|
+
for col in TARGET_COLUMNS:
|
|
22
|
+
if col not in submission.columns:
|
|
23
|
+
raise InvalidSubmissionError(f"Submission must have a '{col}' column")
|
|
24
|
+
|
|
25
|
+
# Sort by text_id
|
|
26
|
+
submission = submission.sort_values("text_id").reset_index(drop=True)
|
|
27
|
+
answers = answers.sort_values("text_id").reset_index(drop=True)
|
|
28
|
+
|
|
29
|
+
# Check text_id alignment
|
|
30
|
+
if not (submission["text_id"] == answers["text_id"]).all():
|
|
31
|
+
raise InvalidSubmissionError("Submission and answers text_id mismatch")
|
|
32
|
+
|
|
33
|
+
# Validate predictions
|
|
34
|
+
for col in TARGET_COLUMNS:
|
|
35
|
+
try:
|
|
36
|
+
submission[col] = submission[col].astype(float)
|
|
37
|
+
except (ValueError, TypeError):
|
|
38
|
+
raise InvalidSubmissionError(f"'{col}' predictions must be numeric")
|
|
39
|
+
|
|
40
|
+
if submission[col].isnull().any():
|
|
41
|
+
raise InvalidSubmissionError(f"'{col}' predictions cannot contain NaN values")
|
|
42
|
+
|
|
43
|
+
return submission, answers
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
def grade(submission: pd.DataFrame, answers: pd.DataFrame) -> float:
|
|
47
|
+
"""
|
|
48
|
+
Calculate MCRMSE (Mean Columnwise Root Mean Squared Error).
|
|
49
|
+
|
|
50
|
+
MCRMSE = mean of RMSE across all target columns
|
|
51
|
+
"""
|
|
52
|
+
submission, answers = prepare_for_metric(submission, answers)
|
|
53
|
+
|
|
54
|
+
rmse_scores = []
|
|
55
|
+
for col in TARGET_COLUMNS:
|
|
56
|
+
rmse = np.sqrt(mean_squared_error(answers[col], submission[col]))
|
|
57
|
+
rmse_scores.append(rmse)
|
|
58
|
+
|
|
59
|
+
mcrmse = np.mean(rmse_scores)
|
|
60
|
+
return mcrmse
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
from pathlib import Path
|
|
2
|
+
import pandas as pd
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
TARGET_COLUMNS = ['cohesion', 'syntax', 'vocabulary', 'phraseology', 'grammar', 'conventions']
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def prepare(raw: Path, public: Path, private: Path):
|
|
9
|
+
"""
|
|
10
|
+
Prepare feedback-prize-english-language-learning dataset.
|
|
11
|
+
|
|
12
|
+
Raw data contains:
|
|
13
|
+
- train.csv: training data with text and scores
|
|
14
|
+
- test.csv: test data with text only
|
|
15
|
+
- sample_submission.csv: sample submission format
|
|
16
|
+
- test_answer.csv: test labels (private)
|
|
17
|
+
"""
|
|
18
|
+
# Read data
|
|
19
|
+
train = pd.read_csv(raw / "train.csv")
|
|
20
|
+
test = pd.read_csv(raw / "test.csv")
|
|
21
|
+
sample_submission = pd.read_csv(raw / "sample_submission.csv")
|
|
22
|
+
test_answer = pd.read_csv(raw / "test_answer.csv")
|
|
23
|
+
|
|
24
|
+
# Public files (visible to agents)
|
|
25
|
+
train.to_csv(public / "train.csv", index=False)
|
|
26
|
+
test.to_csv(public / "test.csv", index=False)
|
|
27
|
+
sample_submission.to_csv(public / "sample_submission.csv", index=False)
|
|
28
|
+
|
|
29
|
+
# Private files (for grading)
|
|
30
|
+
test_answer.to_csv(private / "test.csv", index=False)
|
|
31
|
+
|
|
32
|
+
# Validation checks
|
|
33
|
+
assert len(test_answer) == len(sample_submission), \
|
|
34
|
+
f"Test answer ({len(test_answer)}) and sample submission ({len(sample_submission)}) must have same length"
|
|
35
|
+
assert "text_id" in test_answer.columns, "Test answer must have 'text_id' column"
|
|
36
|
+
assert "text_id" in sample_submission.columns, "Sample submission must have 'text_id' column"
|
|
37
|
+
for col in TARGET_COLUMNS:
|
|
38
|
+
assert col in test_answer.columns, f"Test answer must have '{col}' column"
|
|
39
|
+
assert col in sample_submission.columns, f"Sample submission must have '{col}' column"
|
|
@@ -0,0 +1,64 @@
|
|
|
1
|
+
from pathlib import Path
|
|
2
|
+
|
|
3
|
+
import numpy as np
|
|
4
|
+
import pandas as pd
|
|
5
|
+
import sklearn.metrics
|
|
6
|
+
|
|
7
|
+
from mlebench.grade_helpers import InvalidSubmissionError
|
|
8
|
+
|
|
9
|
+
from .vocabulary import CLASS_NAMES
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
# Using the metric implementation given to participants. Source:
|
|
13
|
+
# https://colab.research.google.com/drive/1AgPdhSp7ttY18O3fEoHOQKlt_3HJDLi8
|
|
14
|
+
def calculate_overall_lwlrap_sklearn(truth: np.ndarray, scores: np.ndarray) -> float:
|
|
15
|
+
"""Calculate the overall lwlrap using sklearn.metrics.lrap."""
|
|
16
|
+
# sklearn doesn't correctly apply weighting to samples with no labels, so just skip them.
|
|
17
|
+
sample_weight = np.sum(truth > 0, axis=1)
|
|
18
|
+
nonzero_weight_sample_indices = np.flatnonzero(sample_weight > 0)
|
|
19
|
+
overall_lwlrap = sklearn.metrics.label_ranking_average_precision_score(
|
|
20
|
+
truth[nonzero_weight_sample_indices, :] > 0,
|
|
21
|
+
scores[nonzero_weight_sample_indices, :],
|
|
22
|
+
sample_weight=sample_weight[nonzero_weight_sample_indices],
|
|
23
|
+
)
|
|
24
|
+
return overall_lwlrap
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def prepare_for_metric(submission: pd.DataFrame, answers: pd.DataFrame) -> tuple:
|
|
28
|
+
"""
|
|
29
|
+
`submission` and `answers` are both DataFrames with N rows and M + 1 columns.
|
|
30
|
+
The first column is "fname" and the remaining M columns are the predictions for each class.
|
|
31
|
+
The values are floats between 0 and 1. For `answers`, the values are binary.
|
|
32
|
+
"""
|
|
33
|
+
if len(submission) != len(answers):
|
|
34
|
+
raise InvalidSubmissionError("Submission and answers must have the same length")
|
|
35
|
+
|
|
36
|
+
if "fname" != submission.columns[0]:
|
|
37
|
+
raise InvalidSubmissionError("Submission must have an 'fname' column")
|
|
38
|
+
|
|
39
|
+
if not all(submission.columns[1:] == CLASS_NAMES):
|
|
40
|
+
raise InvalidSubmissionError("Submission classes do not match expected classes")
|
|
41
|
+
|
|
42
|
+
assert answers.columns[0] == "fname", "Answers must have an 'fname' column"
|
|
43
|
+
assert all(answers.columns[1:] == CLASS_NAMES), "Answers classes do not match expected classes"
|
|
44
|
+
|
|
45
|
+
submission = submission.sort_values("fname").reset_index(drop=True)
|
|
46
|
+
answers = answers.sort_values("fname").reset_index(drop=True)
|
|
47
|
+
|
|
48
|
+
if (submission["fname"].values != answers["fname"].values).any():
|
|
49
|
+
raise InvalidSubmissionError("Submission and answers must have the same ids")
|
|
50
|
+
|
|
51
|
+
# Convert to numbers
|
|
52
|
+
try:
|
|
53
|
+
submission.iloc[:, 1:] = submission.iloc[:, 1:].apply(pd.to_numeric)
|
|
54
|
+
except (ValueError, TypeError) as e:
|
|
55
|
+
raise InvalidSubmissionError(f"Error converting submission values to float: {str(e)}")
|
|
56
|
+
answers.iloc[:, 1:] = answers.iloc[:, 1:].apply(pd.to_numeric)
|
|
57
|
+
|
|
58
|
+
# Return as numpy arrays
|
|
59
|
+
return answers.iloc[:, 1:].values, submission.iloc[:, 1:].values
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
def grade(submission: pd.DataFrame, answers: pd.DataFrame) -> float:
|
|
63
|
+
sub, ans = prepare_for_metric(submission, answers)
|
|
64
|
+
return calculate_overall_lwlrap_sklearn(sub, ans)
|
|
@@ -0,0 +1,94 @@
|
|
|
1
|
+
import shutil
|
|
2
|
+
from pathlib import Path
|
|
3
|
+
|
|
4
|
+
import pandas as pd
|
|
5
|
+
import requests
|
|
6
|
+
|
|
7
|
+
from mlebench.utils import get_logger
|
|
8
|
+
|
|
9
|
+
from .vocabulary import CLASS_NAMES
|
|
10
|
+
|
|
11
|
+
logger = get_logger(__name__)
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def download_file(url, local_filename):
|
|
15
|
+
with requests.get(url, stream=True) as r:
|
|
16
|
+
r.raise_for_status()
|
|
17
|
+
with open(local_filename, "wb") as f:
|
|
18
|
+
for chunk in r.iter_content(chunk_size=8192):
|
|
19
|
+
if chunk: # filter out keep-alive new chunks
|
|
20
|
+
f.write(chunk)
|
|
21
|
+
return local_filename
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def prepare(raw: Path, public: Path, private: Path):
|
|
25
|
+
"""
|
|
26
|
+
Straightforward: we have access to the post-competition released test labels, so we don't need
|
|
27
|
+
to create our own split here. We just copy over the raw data provided by the competition and
|
|
28
|
+
download the test labels.
|
|
29
|
+
|
|
30
|
+
Otherwise, the only work here is to convert the test data into the right format for grading:
|
|
31
|
+
The original form of `test.csv` is a DataFrame with N rows and 2 columns. The first column is
|
|
32
|
+
"fname" and the second column is the labels as comma-separated strings (class names). We
|
|
33
|
+
convert the test labels into a binary many-hot matrix matching the shape of the submission,
|
|
34
|
+
[N rows, M + 1 columns]: The first column is "fname" and the remaining M columns are the
|
|
35
|
+
predictions for each class.
|
|
36
|
+
"""
|
|
37
|
+
|
|
38
|
+
# Copy over everything in the raw directory
|
|
39
|
+
logger.info("Copying raw data to public directory")
|
|
40
|
+
# Don't copy the metadata file if it exists
|
|
41
|
+
items_to_copy = [item for item in raw.iterdir() if "FSDKaggle2019.meta" not in item.name]
|
|
42
|
+
for item in items_to_copy:
|
|
43
|
+
dest = public / item.name
|
|
44
|
+
if item.is_dir():
|
|
45
|
+
shutil.copytree(item, dest)
|
|
46
|
+
else:
|
|
47
|
+
shutil.copy(item, dest)
|
|
48
|
+
assert len(list(public.iterdir())) == len(
|
|
49
|
+
items_to_copy
|
|
50
|
+
), "Expected all files in raw to be copied to public"
|
|
51
|
+
|
|
52
|
+
# Download the test labels and metadata that were released after the competition
|
|
53
|
+
test_url = "https://zenodo.org/records/3612637/files/FSDKaggle2019.meta.zip?download=1"
|
|
54
|
+
dest_path = raw / "FSDKaggle2019.meta.zip"
|
|
55
|
+
download_file(test_url, dest_path)
|
|
56
|
+
logger.info(f"Downloaded file saved as {dest_path}")
|
|
57
|
+
# # Unzip
|
|
58
|
+
shutil.unpack_archive(dest_path, raw)
|
|
59
|
+
unzipped_path = raw / "FSDKaggle2019.meta"
|
|
60
|
+
logger.info(f"Unzipped file to {unzipped_path}")
|
|
61
|
+
|
|
62
|
+
# Read test labels
|
|
63
|
+
test_post_competition = pd.read_csv(unzipped_path / "test_post_competition.csv")
|
|
64
|
+
private_test = test_post_competition[test_post_competition["usage"] == "Private"]
|
|
65
|
+
# Create a binary many-hot matrix
|
|
66
|
+
new_test_rows = []
|
|
67
|
+
for idx, row in private_test.iterrows():
|
|
68
|
+
fname = row["fname"]
|
|
69
|
+
labels = row["labels"].split(",")
|
|
70
|
+
labels = [1 if label in labels else 0 for label in CLASS_NAMES]
|
|
71
|
+
new_test_rows.append([fname] + labels)
|
|
72
|
+
new_test = pd.DataFrame(new_test_rows, columns=["fname"] + CLASS_NAMES)
|
|
73
|
+
new_test.to_csv(private / "test.csv", index=False)
|
|
74
|
+
|
|
75
|
+
# Check that test and submission match
|
|
76
|
+
submission = pd.read_csv(public / "sample_submission.csv")
|
|
77
|
+
assert len(submission) == len(
|
|
78
|
+
new_test
|
|
79
|
+
), f"Expected {len(new_test)} rows in test.csv, but got {len(submission)}"
|
|
80
|
+
assert (
|
|
81
|
+
submission.columns[1:].tolist() == CLASS_NAMES
|
|
82
|
+
), "Expected class names to match between test.csv and sample_submission.csv"
|
|
83
|
+
assert all(
|
|
84
|
+
submission.columns == new_test.columns
|
|
85
|
+
), "Expected columns to match between test.csv and sample_submission.csv"
|
|
86
|
+
new_test.sort_values("fname", inplace=True)
|
|
87
|
+
submission.sort_values("fname", inplace=True)
|
|
88
|
+
assert (
|
|
89
|
+
submission["fname"].tolist() == new_test["fname"].tolist()
|
|
90
|
+
), "Expected 'fname' to match between test.csv and sample_submission.csv"
|
|
91
|
+
|
|
92
|
+
# Remove the downloaded metadata
|
|
93
|
+
dest_path.unlink()
|
|
94
|
+
shutil.rmtree(unzipped_path)
|