dslighting 1.7.1__py3-none-any.whl → 1.7.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- dslighting/__init__.py +1 -1
- dslighting/core/agent.py +78 -62
- {dslighting-1.7.1.dist-info → dslighting-1.7.8.dist-info}/METADATA +3 -1
- {dslighting-1.7.1.dist-info → dslighting-1.7.8.dist-info}/RECORD +352 -7
- {dslighting-1.7.1.dist-info → dslighting-1.7.8.dist-info}/top_level.txt +1 -0
- mlebench/README.md +39 -0
- mlebench/__init__.py +0 -0
- mlebench/cli.py +221 -0
- mlebench/competitions/3d-object-detection-for-autonomous-vehicles/grade.py +161 -0
- mlebench/competitions/3d-object-detection-for-autonomous-vehicles/mAP_evaluation.py +425 -0
- mlebench/competitions/3d-object-detection-for-autonomous-vehicles/prepare.py +483 -0
- mlebench/competitions/3d-object-detection-for-autonomous-vehicles/prepare_val.py +719 -0
- mlebench/competitions/AI4Code/grade.py +70 -0
- mlebench/competitions/AI4Code/prepare.py +84 -0
- mlebench/competitions/AI4Code/prepare_val.py +159 -0
- mlebench/competitions/__init__.py +0 -0
- mlebench/competitions/aerial-cactus-identification/grade.py +11 -0
- mlebench/competitions/aerial-cactus-identification/prepare.py +71 -0
- mlebench/competitions/aerial-cactus-identification/prepare_val.py +133 -0
- mlebench/competitions/alaska2-image-steganalysis/grade.py +136 -0
- mlebench/competitions/alaska2-image-steganalysis/prepare.py +88 -0
- mlebench/competitions/alaska2-image-steganalysis/prepare_val.py +148 -0
- mlebench/competitions/aptos2019-blindness-detection/grade.py +35 -0
- mlebench/competitions/aptos2019-blindness-detection/prepare.py +75 -0
- mlebench/competitions/aptos2019-blindness-detection/prepare_val.py +123 -0
- mlebench/competitions/bike-sharing-demand/__init__.py +0 -0
- mlebench/competitions/bike-sharing-demand/grade.py +55 -0
- mlebench/competitions/bike-sharing-demand/prepare.py +37 -0
- mlebench/competitions/billion-word-imputation/grade.py +37 -0
- mlebench/competitions/billion-word-imputation/prepare.py +107 -0
- mlebench/competitions/billion-word-imputation/prepare_val.py +179 -0
- mlebench/competitions/bms-molecular-translation/grade.py +40 -0
- mlebench/competitions/bms-molecular-translation/prepare.py +68 -0
- mlebench/competitions/bms-molecular-translation/prepare_val.py +131 -0
- mlebench/competitions/cassava-leaf-disease-classification/grade.py +12 -0
- mlebench/competitions/cassava-leaf-disease-classification/prepare.py +113 -0
- mlebench/competitions/cassava-leaf-disease-classification/prepare_val.py +186 -0
- mlebench/competitions/cdiscount-image-classification-challenge/grade.py +11 -0
- mlebench/competitions/cdiscount-image-classification-challenge/prepare.py +144 -0
- mlebench/competitions/cdiscount-image-classification-challenge/prepare_val.py +205 -0
- mlebench/competitions/chaii-hindi-and-tamil-question-answering/grade.py +67 -0
- mlebench/competitions/chaii-hindi-and-tamil-question-answering/prepare.py +31 -0
- mlebench/competitions/chaii-hindi-and-tamil-question-answering/prepare_val.py +94 -0
- mlebench/competitions/champs-scalar-coupling/grade.py +60 -0
- mlebench/competitions/champs-scalar-coupling/prepare.py +116 -0
- mlebench/competitions/champs-scalar-coupling/prepare_val.py +155 -0
- mlebench/competitions/conways-reverse-game-of-life-2020/__init__.py +0 -0
- mlebench/competitions/conways-reverse-game-of-life-2020/grade.py +40 -0
- mlebench/competitions/conways-reverse-game-of-life-2020/prepare.py +41 -0
- mlebench/competitions/demand-forecasting-kernels-only/__init__.py +0 -0
- mlebench/competitions/demand-forecasting-kernels-only/grade.py +66 -0
- mlebench/competitions/demand-forecasting-kernels-only/prepare.py +27 -0
- mlebench/competitions/demand_forecasting_kernels_only/__init__.py +0 -0
- mlebench/competitions/demand_forecasting_kernels_only/grade.py +66 -0
- mlebench/competitions/demand_forecasting_kernels_only/prepare.py +27 -0
- mlebench/competitions/denoising-dirty-documents/grade.py +44 -0
- mlebench/competitions/denoising-dirty-documents/prepare.py +134 -0
- mlebench/competitions/denoising-dirty-documents/prepare_val.py +178 -0
- mlebench/competitions/detecting-insults-in-social-commentary/grade.py +11 -0
- mlebench/competitions/detecting-insults-in-social-commentary/prepare.py +72 -0
- mlebench/competitions/detecting-insults-in-social-commentary/prepare_val.py +128 -0
- mlebench/competitions/dog-breed-identification/dogs.py +124 -0
- mlebench/competitions/dog-breed-identification/grade.py +42 -0
- mlebench/competitions/dog-breed-identification/prepare.py +55 -0
- mlebench/competitions/dog-breed-identification/prepare_val.py +104 -0
- mlebench/competitions/dogs-vs-cats-redux-kernels-edition/grade.py +43 -0
- mlebench/competitions/dogs-vs-cats-redux-kernels-edition/prepare.py +70 -0
- mlebench/competitions/dogs-vs-cats-redux-kernels-edition/prepare_val.py +143 -0
- mlebench/competitions/ethanol-concentration/grade.py +23 -0
- mlebench/competitions/ethanol-concentration/prepare.py +90 -0
- mlebench/competitions/facebook-recruiting-iii-keyword-extraction/grade.py +60 -0
- mlebench/competitions/facebook-recruiting-iii-keyword-extraction/prepare.py +41 -0
- mlebench/competitions/facebook-recruiting-iii-keyword-extraction/prepare_val.py +92 -0
- mlebench/competitions/feedback-prize-english-language-learning/__init__.py +0 -0
- mlebench/competitions/feedback-prize-english-language-learning/grade.py +60 -0
- mlebench/competitions/feedback-prize-english-language-learning/prepare.py +39 -0
- mlebench/competitions/freesound-audio-tagging-2019/grade.py +64 -0
- mlebench/competitions/freesound-audio-tagging-2019/prepare.py +94 -0
- mlebench/competitions/freesound-audio-tagging-2019/prepare_val.py +175 -0
- mlebench/competitions/freesound-audio-tagging-2019/vocabulary.py +83 -0
- mlebench/competitions/google-quest-challenge/classes.py +32 -0
- mlebench/competitions/google-quest-challenge/grade.py +45 -0
- mlebench/competitions/google-quest-challenge/prepare.py +58 -0
- mlebench/competitions/google-quest-challenge/prepare_val.py +120 -0
- mlebench/competitions/google-research-identify-contrails-reduce-global-warming/grade.py +77 -0
- mlebench/competitions/google-research-identify-contrails-reduce-global-warming/prepare.py +155 -0
- mlebench/competitions/google-research-identify-contrails-reduce-global-warming/prepare_val.py +211 -0
- mlebench/competitions/h-and-m-personalized-fashion-recommendations/grade.py +42 -0
- mlebench/competitions/h-and-m-personalized-fashion-recommendations/prepare.py +102 -0
- mlebench/competitions/h-and-m-personalized-fashion-recommendations/prepare_val.py +132 -0
- mlebench/competitions/handwriting/grade.py +23 -0
- mlebench/competitions/handwriting/prepare.py +179 -0
- mlebench/competitions/herbarium-2020-fgvc7/grade.py +34 -0
- mlebench/competitions/herbarium-2020-fgvc7/prepare.py +251 -0
- mlebench/competitions/herbarium-2020-fgvc7/prepare_val.py +242 -0
- mlebench/competitions/herbarium-2021-fgvc8/grade.py +34 -0
- mlebench/competitions/herbarium-2021-fgvc8/prepare.py +251 -0
- mlebench/competitions/herbarium-2021-fgvc8/prepare_val.py +222 -0
- mlebench/competitions/herbarium-2022-fgvc9/grade.py +31 -0
- mlebench/competitions/herbarium-2022-fgvc9/prepare.py +233 -0
- mlebench/competitions/herbarium-2022-fgvc9/prepare_val.py +213 -0
- mlebench/competitions/histopathologic-cancer-detection/grade.py +12 -0
- mlebench/competitions/histopathologic-cancer-detection/prepare.py +59 -0
- mlebench/competitions/histopathologic-cancer-detection/prepare_val.py +131 -0
- mlebench/competitions/hms-harmful-brain-activity-classification/constants.py +9 -0
- mlebench/competitions/hms-harmful-brain-activity-classification/grade.py +43 -0
- mlebench/competitions/hms-harmful-brain-activity-classification/kaggle_metric_utilities.py +96 -0
- mlebench/competitions/hms-harmful-brain-activity-classification/kullback_leibler_divergence.py +118 -0
- mlebench/competitions/hms-harmful-brain-activity-classification/prepare.py +121 -0
- mlebench/competitions/hms-harmful-brain-activity-classification/prepare_val.py +190 -0
- mlebench/competitions/hotel-id-2021-fgvc8/grade.py +41 -0
- mlebench/competitions/hotel-id-2021-fgvc8/prepare.py +63 -0
- mlebench/competitions/hotel-id-2021-fgvc8/prepare_val.py +132 -0
- mlebench/competitions/hubmap-kidney-segmentation/grade.py +62 -0
- mlebench/competitions/hubmap-kidney-segmentation/prepare.py +108 -0
- mlebench/competitions/hubmap-kidney-segmentation/prepare_val.py +153 -0
- mlebench/competitions/icecube-neutrinos-in-deep-ice/grade.py +111 -0
- mlebench/competitions/icecube-neutrinos-in-deep-ice/prepare.py +127 -0
- mlebench/competitions/icecube-neutrinos-in-deep-ice/prepare_val.py +183 -0
- mlebench/competitions/ili/grade.py +60 -0
- mlebench/competitions/ili/prepare.py +99 -0
- mlebench/competitions/imet-2020-fgvc7/grade.py +54 -0
- mlebench/competitions/imet-2020-fgvc7/prepare.py +77 -0
- mlebench/competitions/imet-2020-fgvc7/prepare_val.py +157 -0
- mlebench/competitions/inaturalist-2019-fgvc6/grade.py +35 -0
- mlebench/competitions/inaturalist-2019-fgvc6/prepare.py +259 -0
- mlebench/competitions/inaturalist-2019-fgvc6/prepare_val.py +304 -0
- mlebench/competitions/instant-gratification/__init__.py +0 -0
- mlebench/competitions/instant-gratification/grade.py +55 -0
- mlebench/competitions/instant-gratification/prepare.py +25 -0
- mlebench/competitions/instant_gratification/__init__.py +0 -0
- mlebench/competitions/instant_gratification/grade.py +55 -0
- mlebench/competitions/instant_gratification/prepare.py +25 -0
- mlebench/competitions/invasive-species-monitoring/grade.py +11 -0
- mlebench/competitions/invasive-species-monitoring/prepare.py +97 -0
- mlebench/competitions/invasive-species-monitoring/prepare_val.py +164 -0
- mlebench/competitions/iwildcam-2019-fgvc6/grade.py +44 -0
- mlebench/competitions/iwildcam-2019-fgvc6/prepare.py +118 -0
- mlebench/competitions/iwildcam-2019-fgvc6/prepare_val.py +194 -0
- mlebench/competitions/iwildcam-2020-fgvc7/grade.py +11 -0
- mlebench/competitions/iwildcam-2020-fgvc7/prepare.py +164 -0
- mlebench/competitions/iwildcam-2020-fgvc7/prepare_val.py +245 -0
- mlebench/competitions/jigsaw-toxic-comment-classification-challenge/classes.py +1 -0
- mlebench/competitions/jigsaw-toxic-comment-classification-challenge/grade.py +54 -0
- mlebench/competitions/jigsaw-toxic-comment-classification-challenge/prepare.py +42 -0
- mlebench/competitions/jigsaw-toxic-comment-classification-challenge/prepare_val.py +88 -0
- mlebench/competitions/jigsaw-unintended-bias-in-toxicity-classification/grade.py +153 -0
- mlebench/competitions/jigsaw-unintended-bias-in-toxicity-classification/prepare.py +36 -0
- mlebench/competitions/jigsaw-unintended-bias-in-toxicity-classification/prepare_val.py +117 -0
- mlebench/competitions/kuzushiji-recognition/grade.py +58 -0
- mlebench/competitions/kuzushiji-recognition/kuzushiji_metric.py +118 -0
- mlebench/competitions/kuzushiji-recognition/prepare.py +92 -0
- mlebench/competitions/kuzushiji-recognition/prepare_val.py +149 -0
- mlebench/competitions/leaf-classification/classes.py +101 -0
- mlebench/competitions/leaf-classification/grade.py +44 -0
- mlebench/competitions/leaf-classification/prepare.py +60 -0
- mlebench/competitions/leaf-classification/prepare_val.py +116 -0
- mlebench/competitions/learning-agency-lab-automated-essay-scoring-2/grade.py +44 -0
- mlebench/competitions/learning-agency-lab-automated-essay-scoring-2/prepare.py +51 -0
- mlebench/competitions/learning-agency-lab-automated-essay-scoring-2/prepare_val.py +96 -0
- mlebench/competitions/liverpool-ion-switching/__init__.py +0 -0
- mlebench/competitions/liverpool-ion-switching/grade.py +52 -0
- mlebench/competitions/liverpool-ion-switching/prepare.py +27 -0
- mlebench/competitions/liverpool_ion_switching/__init__.py +0 -0
- mlebench/competitions/liverpool_ion_switching/grade.py +52 -0
- mlebench/competitions/liverpool_ion_switching/prepare.py +27 -0
- mlebench/competitions/lmsys-chatbot-arena/grade.py +63 -0
- mlebench/competitions/lmsys-chatbot-arena/prepare.py +52 -0
- mlebench/competitions/lmsys-chatbot-arena/prepare_val.py +115 -0
- mlebench/competitions/mcm_2024_c_test/grade.py +107 -0
- mlebench/competitions/mcm_2024_c_test/prepare.py +2 -0
- mlebench/competitions/ml2021spring-hw2/grade.py +11 -0
- mlebench/competitions/ml2021spring-hw2/prepare.py +58 -0
- mlebench/competitions/ml2021spring-hw2/prepare_val.py +135 -0
- mlebench/competitions/mlsp-2013-birds/grade.py +11 -0
- mlebench/competitions/mlsp-2013-birds/prepare.py +182 -0
- mlebench/competitions/mlsp-2013-birds/prepare_val.py +241 -0
- mlebench/competitions/movie-review-sentiment-analysis-kernels-only/grade.py +11 -0
- mlebench/competitions/movie-review-sentiment-analysis-kernels-only/prepare.py +58 -0
- mlebench/competitions/movie-review-sentiment-analysis-kernels-only/prepare_val.py +120 -0
- mlebench/competitions/multi-modal-gesture-recognition/grade.py +58 -0
- mlebench/competitions/multi-modal-gesture-recognition/prepare.py +85 -0
- mlebench/competitions/multi-modal-gesture-recognition/prepare_val.py +139 -0
- mlebench/competitions/my-custom-task-01/prepare.py +2 -0
- mlebench/competitions/new-my-task-01/prepare.py +2 -0
- mlebench/competitions/new-my-task-03/grade.py +107 -0
- mlebench/competitions/new-my-task-03/prepare.py +2 -0
- mlebench/competitions/new-york-city-taxi-fare-prediction/grade.py +28 -0
- mlebench/competitions/new-york-city-taxi-fare-prediction/prepare.py +44 -0
- mlebench/competitions/new-york-city-taxi-fare-prediction/prepare_val.py +89 -0
- mlebench/competitions/nfl-player-contact-detection/grade.py +36 -0
- mlebench/competitions/nfl-player-contact-detection/prepare.py +101 -0
- mlebench/competitions/nfl-player-contact-detection/prepare_val.py +186 -0
- mlebench/competitions/nomad2018-predict-transparent-conductors/grade.py +47 -0
- mlebench/competitions/nomad2018-predict-transparent-conductors/prepare.py +77 -0
- mlebench/competitions/nomad2018-predict-transparent-conductors/prepare_val.py +144 -0
- mlebench/competitions/osic-pulmonary-fibrosis-progression/grade.py +74 -0
- mlebench/competitions/osic-pulmonary-fibrosis-progression/prepare.py +95 -0
- mlebench/competitions/osic-pulmonary-fibrosis-progression/prepare_val.py +167 -0
- mlebench/competitions/paddy-disease-classification/grade.py +35 -0
- mlebench/competitions/paddy-disease-classification/prepare.py +69 -0
- mlebench/competitions/paddy-disease-classification/prepare_val.py +122 -0
- mlebench/competitions/petfinder-pawpularity-score/grade.py +41 -0
- mlebench/competitions/petfinder-pawpularity-score/prepare.py +76 -0
- mlebench/competitions/petfinder-pawpularity-score/prepare_val.py +154 -0
- mlebench/competitions/plant-pathology-2020-fgvc7/grade.py +41 -0
- mlebench/competitions/plant-pathology-2020-fgvc7/prepare.py +74 -0
- mlebench/competitions/plant-pathology-2020-fgvc7/prepare_val.py +160 -0
- mlebench/competitions/plant-pathology-2021-fgvc8/grade.py +54 -0
- mlebench/competitions/plant-pathology-2021-fgvc8/prepare.py +65 -0
- mlebench/competitions/plant-pathology-2021-fgvc8/prepare_val.py +130 -0
- mlebench/competitions/plant-seedlings-classification/grade.py +39 -0
- mlebench/competitions/plant-seedlings-classification/prepare.py +91 -0
- mlebench/competitions/plant-seedlings-classification/prepare_val.py +158 -0
- mlebench/competitions/playground-series-s3e1/__init__.py +0 -0
- mlebench/competitions/playground-series-s3e1/grade.py +52 -0
- mlebench/competitions/playground-series-s3e1/prepare.py +25 -0
- mlebench/competitions/playground-series-s3e11/__init__.py +0 -0
- mlebench/competitions/playground-series-s3e11/grade.py +55 -0
- mlebench/competitions/playground-series-s3e11/prepare.py +25 -0
- mlebench/competitions/playground-series-s3e18/grade.py +39 -0
- mlebench/competitions/playground-series-s3e18/prepare.py +36 -0
- mlebench/competitions/playground-series-s3e18/prepare_val.py +89 -0
- mlebench/competitions/playground_series_s3e1/__init__.py +0 -0
- mlebench/competitions/playground_series_s3e1/grade.py +52 -0
- mlebench/competitions/playground_series_s3e1/prepare.py +25 -0
- mlebench/competitions/playground_series_s3e11/__init__.py +0 -0
- mlebench/competitions/playground_series_s3e11/grade.py +55 -0
- mlebench/competitions/playground_series_s3e11/prepare.py +25 -0
- mlebench/competitions/predict-volcanic-eruptions-ingv-oe/grade.py +44 -0
- mlebench/competitions/predict-volcanic-eruptions-ingv-oe/prepare.py +68 -0
- mlebench/competitions/predict-volcanic-eruptions-ingv-oe/prepare_val.py +146 -0
- mlebench/competitions/random-acts-of-pizza/grade.py +14 -0
- mlebench/competitions/random-acts-of-pizza/prepare.py +80 -0
- mlebench/competitions/random-acts-of-pizza/prepare_val.py +144 -0
- mlebench/competitions/ranzcr-clip-catheter-line-classification/classes.py +11 -0
- mlebench/competitions/ranzcr-clip-catheter-line-classification/grade.py +31 -0
- mlebench/competitions/ranzcr-clip-catheter-line-classification/prepare.py +53 -0
- mlebench/competitions/ranzcr-clip-catheter-line-classification/prepare_val.py +113 -0
- mlebench/competitions/rsna-2022-cervical-spine-fracture-detection/grade.py +124 -0
- mlebench/competitions/rsna-2022-cervical-spine-fracture-detection/prepare.py +219 -0
- mlebench/competitions/rsna-2022-cervical-spine-fracture-detection/prepare_val.py +257 -0
- mlebench/competitions/rsna-breast-cancer-detection/grade.py +65 -0
- mlebench/competitions/rsna-breast-cancer-detection/prepare.py +141 -0
- mlebench/competitions/rsna-breast-cancer-detection/prepare_val.py +201 -0
- mlebench/competitions/rsna-miccai-brain-tumor-radiogenomic-classification/grade.py +13 -0
- mlebench/competitions/rsna-miccai-brain-tumor-radiogenomic-classification/prepare.py +47 -0
- mlebench/competitions/rsna-miccai-brain-tumor-radiogenomic-classification/prepare_val.py +97 -0
- mlebench/competitions/santander-customer-satisfaction/grade.py +10 -0
- mlebench/competitions/santander-customer-satisfaction/prepare.py +41 -0
- mlebench/competitions/sciencebench-001-clintox-nn/__init__.py +0 -0
- mlebench/competitions/sciencebench-001-clintox-nn/grade.py +56 -0
- mlebench/competitions/sciencebench-001-clintox-nn/prepare.py +75 -0
- mlebench/competitions/sciencebench-015-aai/grade.py +37 -0
- mlebench/competitions/sciencebench-015-aai/prepare.py +102 -0
- mlebench/competitions/sciencebench-051-brain-blood-qsar/grade.py +58 -0
- mlebench/competitions/sciencebench-051-brain-blood-qsar/prepare.py +69 -0
- mlebench/competitions/sciencebench-101-experimental-band-gap-prediction/grade.py +55 -0
- mlebench/competitions/sciencebench-101-experimental-band-gap-prediction/prepare.py +88 -0
- mlebench/competitions/see-click-predict-fix/__init__.py +0 -0
- mlebench/competitions/see-click-predict-fix/grade.py +66 -0
- mlebench/competitions/see-click-predict-fix/prepare.py +25 -0
- mlebench/competitions/see_click_predict_fix/__init__.py +0 -0
- mlebench/competitions/see_click_predict_fix/grade.py +66 -0
- mlebench/competitions/see_click_predict_fix/prepare.py +25 -0
- mlebench/competitions/seti-breakthrough-listen/grade.py +11 -0
- mlebench/competitions/seti-breakthrough-listen/prepare.py +71 -0
- mlebench/competitions/seti-breakthrough-listen/prepare_val.py +159 -0
- mlebench/competitions/siim-covid19-detection/grade.py +194 -0
- mlebench/competitions/siim-covid19-detection/prepare.py +123 -0
- mlebench/competitions/siim-covid19-detection/prepare_val.py +164 -0
- mlebench/competitions/siim-isic-melanoma-classification/grade.py +11 -0
- mlebench/competitions/siim-isic-melanoma-classification/prepare.py +127 -0
- mlebench/competitions/siim-isic-melanoma-classification/prepare_val.py +158 -0
- mlebench/competitions/smartphone-decimeter-2022/grade.py +55 -0
- mlebench/competitions/smartphone-decimeter-2022/notebook.py +86 -0
- mlebench/competitions/smartphone-decimeter-2022/prepare.py +143 -0
- mlebench/competitions/smartphone-decimeter-2022/prepare_val.py +199 -0
- mlebench/competitions/spaceship-titanic/grade.py +11 -0
- mlebench/competitions/spaceship-titanic/prepare.py +23 -0
- mlebench/competitions/spaceship-titanic/prepare_val.py +61 -0
- mlebench/competitions/spooky-author-identification/classes.py +1 -0
- mlebench/competitions/spooky-author-identification/grade.py +38 -0
- mlebench/competitions/spooky-author-identification/prepare.py +40 -0
- mlebench/competitions/spooky-author-identification/prepare_val.py +78 -0
- mlebench/competitions/stanford-covid-vaccine/grade.py +65 -0
- mlebench/competitions/stanford-covid-vaccine/prepare.py +129 -0
- mlebench/competitions/stanford-covid-vaccine/prepare_val.py +199 -0
- mlebench/competitions/statoil-iceberg-classifier-challenge/grade.py +41 -0
- mlebench/competitions/statoil-iceberg-classifier-challenge/prepare.py +105 -0
- mlebench/competitions/statoil-iceberg-classifier-challenge/prepare_val.py +157 -0
- mlebench/competitions/tabular-playground-series-dec-2021/grade.py +11 -0
- mlebench/competitions/tabular-playground-series-dec-2021/prepare.py +39 -0
- mlebench/competitions/tabular-playground-series-dec-2021/prepare_val.py +99 -0
- mlebench/competitions/tabular-playground-series-may-2022/grade.py +9 -0
- mlebench/competitions/tabular-playground-series-may-2022/prepare.py +56 -0
- mlebench/competitions/tabular-playground-series-may-2022/prepare_val.py +116 -0
- mlebench/competitions/tensorflow-speech-recognition-challenge/grade.py +11 -0
- mlebench/competitions/tensorflow-speech-recognition-challenge/prepare.py +90 -0
- mlebench/competitions/tensorflow-speech-recognition-challenge/prepare_val.py +148 -0
- mlebench/competitions/tensorflow2-question-answering/grade.py +122 -0
- mlebench/competitions/tensorflow2-question-answering/prepare.py +122 -0
- mlebench/competitions/tensorflow2-question-answering/prepare_val.py +187 -0
- mlebench/competitions/text-normalization-challenge-english-language/grade.py +49 -0
- mlebench/competitions/text-normalization-challenge-english-language/prepare.py +115 -0
- mlebench/competitions/text-normalization-challenge-english-language/prepare_val.py +213 -0
- mlebench/competitions/text-normalization-challenge-russian-language/grade.py +49 -0
- mlebench/competitions/text-normalization-challenge-russian-language/prepare.py +113 -0
- mlebench/competitions/text-normalization-challenge-russian-language/prepare_val.py +165 -0
- mlebench/competitions/tgs-salt-identification-challenge/grade.py +144 -0
- mlebench/competitions/tgs-salt-identification-challenge/prepare.py +158 -0
- mlebench/competitions/tgs-salt-identification-challenge/prepare_val.py +166 -0
- mlebench/competitions/the-icml-2013-whale-challenge-right-whale-redux/grade.py +11 -0
- mlebench/competitions/the-icml-2013-whale-challenge-right-whale-redux/prepare.py +95 -0
- mlebench/competitions/the-icml-2013-whale-challenge-right-whale-redux/prepare_val.py +141 -0
- mlebench/competitions/tmdb-box-office-prediction/__init__.py +0 -0
- mlebench/competitions/tmdb-box-office-prediction/grade.py +55 -0
- mlebench/competitions/tmdb-box-office-prediction/prepare.py +35 -0
- mlebench/competitions/tweet-sentiment-extraction/grade.py +67 -0
- mlebench/competitions/tweet-sentiment-extraction/prepare.py +36 -0
- mlebench/competitions/tweet-sentiment-extraction/prepare_val.py +106 -0
- mlebench/competitions/us-patent-phrase-to-phrase-matching/grade.py +31 -0
- mlebench/competitions/us-patent-phrase-to-phrase-matching/prepare.py +33 -0
- mlebench/competitions/us-patent-phrase-to-phrase-matching/prepare_val.py +71 -0
- mlebench/competitions/utils.py +266 -0
- mlebench/competitions/uw-madison-gi-tract-image-segmentation/grade.py +158 -0
- mlebench/competitions/uw-madison-gi-tract-image-segmentation/prepare.py +139 -0
- mlebench/competitions/uw-madison-gi-tract-image-segmentation/prepare_val.py +193 -0
- mlebench/competitions/ventilator-pressure-prediction/__init__.py +0 -0
- mlebench/competitions/ventilator-pressure-prediction/grade.py +52 -0
- mlebench/competitions/ventilator-pressure-prediction/prepare.py +27 -0
- mlebench/competitions/ventilator-pressure-prediction/prepare_val.py +142 -0
- mlebench/competitions/ventilator_pressure_prediction/__init__.py +0 -0
- mlebench/competitions/ventilator_pressure_prediction/grade.py +52 -0
- mlebench/competitions/ventilator_pressure_prediction/prepare.py +27 -0
- mlebench/competitions/vesuvius-challenge-ink-detection/grade.py +97 -0
- mlebench/competitions/vesuvius-challenge-ink-detection/prepare.py +122 -0
- mlebench/competitions/vesuvius-challenge-ink-detection/prepare_val.py +170 -0
- mlebench/competitions/vinbigdata-chest-xray-abnormalities-detection/grade.py +220 -0
- mlebench/competitions/vinbigdata-chest-xray-abnormalities-detection/prepare.py +129 -0
- mlebench/competitions/vinbigdata-chest-xray-abnormalities-detection/prepare_val.py +204 -0
- mlebench/competitions/whale-categorization-playground/grade.py +41 -0
- mlebench/competitions/whale-categorization-playground/prepare.py +103 -0
- mlebench/competitions/whale-categorization-playground/prepare_val.py +196 -0
- mlebench/data.py +420 -0
- mlebench/grade.py +209 -0
- mlebench/grade_helpers.py +235 -0
- mlebench/metrics.py +75 -0
- mlebench/registry.py +332 -0
- mlebench/utils.py +346 -0
- {dslighting-1.7.1.dist-info → dslighting-1.7.8.dist-info}/WHEEL +0 -0
- {dslighting-1.7.1.dist-info → dslighting-1.7.8.dist-info}/entry_points.txt +0 -0
|
@@ -0,0 +1,175 @@
|
|
|
1
|
+
import shutil
|
|
2
|
+
from pathlib import Path
|
|
3
|
+
|
|
4
|
+
import pandas as pd
|
|
5
|
+
import requests
|
|
6
|
+
from sklearn.model_selection import train_test_split
|
|
7
|
+
|
|
8
|
+
from mlebench.utils import get_logger
|
|
9
|
+
|
|
10
|
+
from .vocabulary import CLASS_NAMES
|
|
11
|
+
|
|
12
|
+
logger = get_logger(__name__)
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def download_file(url, local_filename):
|
|
16
|
+
with requests.get(url, stream=True) as r:
|
|
17
|
+
r.raise_for_status()
|
|
18
|
+
with open(local_filename, "wb") as f:
|
|
19
|
+
for chunk in r.iter_content(chunk_size=8192):
|
|
20
|
+
if chunk: # filter out keep-alive new chunks
|
|
21
|
+
f.write(chunk)
|
|
22
|
+
return local_filename
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def _create_many_hot_labels(df: pd.DataFrame) -> pd.DataFrame:
|
|
26
|
+
"""Converts a DataFrame with comma-separated labels into a many-hot encoded DataFrame."""
|
|
27
|
+
new_rows = []
|
|
28
|
+
for _, row in df.iterrows():
|
|
29
|
+
fname = row["fname"]
|
|
30
|
+
labels = row["labels"].split(",")
|
|
31
|
+
labels_one_hot = [1 if class_name in labels else 0 for class_name in CLASS_NAMES]
|
|
32
|
+
new_rows.append([fname] + labels_one_hot)
|
|
33
|
+
|
|
34
|
+
new_df = pd.DataFrame(new_rows, columns=["fname"] + CLASS_NAMES)
|
|
35
|
+
return new_df
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
def prepare(raw: Path, public: Path, private: Path):
|
|
39
|
+
"""
|
|
40
|
+
Straightforward: we have access to the post-competition released test labels, so we don't need
|
|
41
|
+
to create our own split here. We just copy over the raw data provided by the competition and
|
|
42
|
+
download the test labels.
|
|
43
|
+
|
|
44
|
+
Otherwise, the only work here is to convert the test data into the right format for grading:
|
|
45
|
+
The original form of `test.csv` is a DataFrame with N rows and 2 columns. The first column is
|
|
46
|
+
"fname" and the second column is the labels as comma-separated strings (class names). We
|
|
47
|
+
convert the test labels into a binary many-hot matrix matching the shape of the submission,
|
|
48
|
+
[N rows, M + 1 columns]: The first column is "fname" and the remaining M columns are the
|
|
49
|
+
predictions for each class.
|
|
50
|
+
|
|
51
|
+
This script also creates a new validation set by splitting the original train_curated set,
|
|
52
|
+
saving the results in `public_val` and `private_val` directories.
|
|
53
|
+
"""
|
|
54
|
+
# =================================================================
|
|
55
|
+
# Original Data Preparation (public/ and private/)
|
|
56
|
+
# =================================================================
|
|
57
|
+
|
|
58
|
+
# Copy over everything in the raw directory
|
|
59
|
+
logger.info("Copying raw data to public directory")
|
|
60
|
+
# Don't copy the metadata file if it exists
|
|
61
|
+
items_to_copy = [item for item in raw.iterdir() if "FSDKaggle2019.meta" not in item.name]
|
|
62
|
+
for item in items_to_copy:
|
|
63
|
+
dest = public / item.name
|
|
64
|
+
if dest.exists():
|
|
65
|
+
continue
|
|
66
|
+
if item.is_dir():
|
|
67
|
+
shutil.copytree(item, dest)
|
|
68
|
+
else:
|
|
69
|
+
shutil.copy(item, dest)
|
|
70
|
+
assert len(list(public.iterdir())) >= len(
|
|
71
|
+
items_to_copy
|
|
72
|
+
), "Expected all files in raw to be copied to public"
|
|
73
|
+
|
|
74
|
+
# Download the test labels and metadata that were released after the competition
|
|
75
|
+
test_url = "https://zenodo.org/records/3612637/files/FSDKaggle2019.meta.zip?download=1"
|
|
76
|
+
dest_path = raw / "FSDKaggle2019.meta.zip"
|
|
77
|
+
if not dest_path.exists():
|
|
78
|
+
download_file(test_url, dest_path)
|
|
79
|
+
logger.info(f"Downloaded file saved as {dest_path}")
|
|
80
|
+
# # Unzip
|
|
81
|
+
shutil.unpack_archive(dest_path, raw)
|
|
82
|
+
logger.info(f"Unzipped file to {raw / 'FSDKaggle2019.meta'}")
|
|
83
|
+
|
|
84
|
+
unzipped_path = raw / "FSDKaggle2019.meta"
|
|
85
|
+
|
|
86
|
+
# Read test labels
|
|
87
|
+
test_post_competition = pd.read_csv(unzipped_path / "test_post_competition.csv")
|
|
88
|
+
private_test_df = test_post_competition[test_post_competition["usage"] == "Private"]
|
|
89
|
+
# Create a binary many-hot matrix
|
|
90
|
+
new_test = _create_many_hot_labels(private_test_df)
|
|
91
|
+
new_test.to_csv(private / "test.csv", index=False)
|
|
92
|
+
|
|
93
|
+
# Check that test and submission match
|
|
94
|
+
submission = pd.read_csv(public / "sample_submission.csv")
|
|
95
|
+
assert len(submission) == len(
|
|
96
|
+
new_test
|
|
97
|
+
), f"Expected {len(new_test)} rows in test.csv, but got {len(submission)}"
|
|
98
|
+
assert (
|
|
99
|
+
submission.columns[1:].tolist() == CLASS_NAMES
|
|
100
|
+
), "Expected class names to match between test.csv and sample_submission.csv"
|
|
101
|
+
assert all(
|
|
102
|
+
submission.columns == new_test.columns
|
|
103
|
+
), "Expected columns to match between test.csv and sample_submission.csv"
|
|
104
|
+
new_test.sort_values("fname", inplace=True)
|
|
105
|
+
submission.sort_values("fname", inplace=True)
|
|
106
|
+
assert (
|
|
107
|
+
submission["fname"].tolist() == new_test["fname"].tolist()
|
|
108
|
+
), "Expected 'fname' to match between test.csv and sample_submission.csv"
|
|
109
|
+
|
|
110
|
+
# Remove the downloaded metadata
|
|
111
|
+
if dest_path.exists():
|
|
112
|
+
dest_path.unlink()
|
|
113
|
+
if unzipped_path.exists():
|
|
114
|
+
shutil.rmtree(unzipped_path)
|
|
115
|
+
|
|
116
|
+
# =================================================================
|
|
117
|
+
# New Validation Set Creation (public_val/ and private_val/)
|
|
118
|
+
# =================================================================
|
|
119
|
+
logger.info("Creating new validation set from train_curated.csv")
|
|
120
|
+
|
|
121
|
+
# Define paths and create parallel directories for the validation set
|
|
122
|
+
public_val = public.parent / "public_val"
|
|
123
|
+
private_val = private.parent / "private_val"
|
|
124
|
+
public_val.mkdir(exist_ok=True)
|
|
125
|
+
private_val.mkdir(exist_ok=True)
|
|
126
|
+
|
|
127
|
+
# Load original curated training data and final test set (to match size)
|
|
128
|
+
original_train_df = pd.read_csv(public / "train_curated.csv")
|
|
129
|
+
original_test_df = pd.read_csv(private / "test.csv")
|
|
130
|
+
|
|
131
|
+
# Split the curated training data to create a new train/validation set.
|
|
132
|
+
# The new validation set ('test_val') will have the same size as the original test set.
|
|
133
|
+
train_val_df, test_val_df = train_test_split(
|
|
134
|
+
original_train_df, test_size=len(original_test_df), random_state=42, shuffle=True
|
|
135
|
+
)
|
|
136
|
+
|
|
137
|
+
# --- Populate public_val directory ---
|
|
138
|
+
logger.info(f"Populating {public_val} with new training and validation data")
|
|
139
|
+
|
|
140
|
+
# Save the new, smaller curated training set manifest
|
|
141
|
+
train_val_df.to_csv(public_val / "train_curated.csv", index=False)
|
|
142
|
+
|
|
143
|
+
# To mirror the directory structure, copy the noisy data as-is
|
|
144
|
+
shutil.copy(public / "train_noisy.csv", public_val / "train_noisy.csv")
|
|
145
|
+
if (public / "train_noisy").exists():
|
|
146
|
+
if (public_val / "train_noisy").exists():
|
|
147
|
+
shutil.rmtree(public_val / "train_noisy")
|
|
148
|
+
shutil.copytree(public / "train_noisy", public_val / "train_noisy")
|
|
149
|
+
|
|
150
|
+
# Create directories for the new audio file splits
|
|
151
|
+
(public_val / "train_curated").mkdir(exist_ok=True)
|
|
152
|
+
(public_val / "test").mkdir(exist_ok=True)
|
|
153
|
+
|
|
154
|
+
# Copy audio files for the new, smaller training set
|
|
155
|
+
for fname in train_val_df["fname"]:
|
|
156
|
+
shutil.copy(public / "train_curated" / fname, public_val / "train_curated" / fname)
|
|
157
|
+
|
|
158
|
+
# Copy audio files for the new validation set into its 'test' directory
|
|
159
|
+
for fname in test_val_df["fname"]:
|
|
160
|
+
shutil.copy(public / "train_curated" / fname, public_val / "test" / fname)
|
|
161
|
+
|
|
162
|
+
# Create a new sample submission file corresponding to the validation set
|
|
163
|
+
val_submission = pd.DataFrame({"fname": test_val_df["fname"].sort_values()})
|
|
164
|
+
for col in CLASS_NAMES:
|
|
165
|
+
val_submission[col] = 0
|
|
166
|
+
val_submission.to_csv(public_val / "sample_submission.csv", index=False)
|
|
167
|
+
|
|
168
|
+
# --- Populate private_val directory ---
|
|
169
|
+
logger.info(f"Populating {private_val} with new validation labels")
|
|
170
|
+
|
|
171
|
+
# Create and save the ground truth labels for the new validation set
|
|
172
|
+
private_test_val_df = _create_many_hot_labels(test_val_df)
|
|
173
|
+
private_test_val_df.to_csv(private_val / "test.csv", index=False)
|
|
174
|
+
|
|
175
|
+
logger.info("Validation set creation complete.")
|
|
@@ -0,0 +1,83 @@
|
|
|
1
|
+
CLASS_NAMES = [
|
|
2
|
+
"Accelerating_and_revving_and_vroom",
|
|
3
|
+
"Accordion",
|
|
4
|
+
"Acoustic_guitar",
|
|
5
|
+
"Applause",
|
|
6
|
+
"Bark",
|
|
7
|
+
"Bass_drum",
|
|
8
|
+
"Bass_guitar",
|
|
9
|
+
"Bathtub_(filling_or_washing)",
|
|
10
|
+
"Bicycle_bell",
|
|
11
|
+
"Burping_and_eructation",
|
|
12
|
+
"Bus",
|
|
13
|
+
"Buzz",
|
|
14
|
+
"Car_passing_by",
|
|
15
|
+
"Cheering",
|
|
16
|
+
"Chewing_and_mastication",
|
|
17
|
+
"Child_speech_and_kid_speaking",
|
|
18
|
+
"Chink_and_clink",
|
|
19
|
+
"Chirp_and_tweet",
|
|
20
|
+
"Church_bell",
|
|
21
|
+
"Clapping",
|
|
22
|
+
"Computer_keyboard",
|
|
23
|
+
"Crackle",
|
|
24
|
+
"Cricket",
|
|
25
|
+
"Crowd",
|
|
26
|
+
"Cupboard_open_or_close",
|
|
27
|
+
"Cutlery_and_silverware",
|
|
28
|
+
"Dishes_and_pots_and_pans",
|
|
29
|
+
"Drawer_open_or_close",
|
|
30
|
+
"Drip",
|
|
31
|
+
"Electric_guitar",
|
|
32
|
+
"Fart",
|
|
33
|
+
"Female_singing",
|
|
34
|
+
"Female_speech_and_woman_speaking",
|
|
35
|
+
"Fill_(with_liquid)",
|
|
36
|
+
"Finger_snapping",
|
|
37
|
+
"Frying_(food)",
|
|
38
|
+
"Gasp",
|
|
39
|
+
"Glockenspiel",
|
|
40
|
+
"Gong",
|
|
41
|
+
"Gurgling",
|
|
42
|
+
"Harmonica",
|
|
43
|
+
"Hi-hat",
|
|
44
|
+
"Hiss",
|
|
45
|
+
"Keys_jangling",
|
|
46
|
+
"Knock",
|
|
47
|
+
"Male_singing",
|
|
48
|
+
"Male_speech_and_man_speaking",
|
|
49
|
+
"Marimba_and_xylophone",
|
|
50
|
+
"Mechanical_fan",
|
|
51
|
+
"Meow",
|
|
52
|
+
"Microwave_oven",
|
|
53
|
+
"Motorcycle",
|
|
54
|
+
"Printer",
|
|
55
|
+
"Purr",
|
|
56
|
+
"Race_car_and_auto_racing",
|
|
57
|
+
"Raindrop",
|
|
58
|
+
"Run",
|
|
59
|
+
"Scissors",
|
|
60
|
+
"Screaming",
|
|
61
|
+
"Shatter",
|
|
62
|
+
"Sigh",
|
|
63
|
+
"Sink_(filling_or_washing)",
|
|
64
|
+
"Skateboard",
|
|
65
|
+
"Slam",
|
|
66
|
+
"Sneeze",
|
|
67
|
+
"Squeak",
|
|
68
|
+
"Stream",
|
|
69
|
+
"Strum",
|
|
70
|
+
"Tap",
|
|
71
|
+
"Tick-tock",
|
|
72
|
+
"Toilet_flush",
|
|
73
|
+
"Traffic_noise_and_roadway_noise",
|
|
74
|
+
"Trickle_and_dribble",
|
|
75
|
+
"Walk_and_footsteps",
|
|
76
|
+
"Water_tap_and_faucet",
|
|
77
|
+
"Waves_and_surf",
|
|
78
|
+
"Whispering",
|
|
79
|
+
"Writing",
|
|
80
|
+
"Yell",
|
|
81
|
+
"Zipper_(clothing)",
|
|
82
|
+
]
|
|
83
|
+
assert len(CLASS_NAMES) == 80, f"Expected 80 class names, but got {len(CLASS_NAMES)}"
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
CLASSES = [
|
|
2
|
+
"question_asker_intent_understanding",
|
|
3
|
+
"question_body_critical",
|
|
4
|
+
"question_conversational",
|
|
5
|
+
"question_expect_short_answer",
|
|
6
|
+
"question_fact_seeking",
|
|
7
|
+
"question_has_commonly_accepted_answer",
|
|
8
|
+
"question_interestingness_others",
|
|
9
|
+
"question_interestingness_self",
|
|
10
|
+
"question_multi_intent",
|
|
11
|
+
"question_not_really_a_question",
|
|
12
|
+
"question_opinion_seeking",
|
|
13
|
+
"question_type_choice",
|
|
14
|
+
"question_type_compare",
|
|
15
|
+
"question_type_consequence",
|
|
16
|
+
"question_type_definition",
|
|
17
|
+
"question_type_entity",
|
|
18
|
+
"question_type_instructions",
|
|
19
|
+
"question_type_procedure",
|
|
20
|
+
"question_type_reason_explanation",
|
|
21
|
+
"question_type_spelling",
|
|
22
|
+
"question_well_written",
|
|
23
|
+
"answer_helpful",
|
|
24
|
+
"answer_level_of_information",
|
|
25
|
+
"answer_plausible",
|
|
26
|
+
"answer_relevance",
|
|
27
|
+
"answer_satisfaction",
|
|
28
|
+
"answer_type_instructions",
|
|
29
|
+
"answer_type_procedure",
|
|
30
|
+
"answer_type_reason_explanation",
|
|
31
|
+
"answer_well_written",
|
|
32
|
+
]
|
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
import pandas as pd
|
|
3
|
+
from scipy.stats import spearmanr
|
|
4
|
+
|
|
5
|
+
from mlebench.grade_helpers import InvalidSubmissionError
|
|
6
|
+
|
|
7
|
+
from .classes import CLASSES
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def grade(submission: pd.DataFrame, answers: pd.DataFrame) -> float:
|
|
11
|
+
id_col = "qa_id"
|
|
12
|
+
classes_set = set(CLASSES)
|
|
13
|
+
|
|
14
|
+
# submission checks
|
|
15
|
+
if id_col not in submission.columns:
|
|
16
|
+
raise InvalidSubmissionError(f"Submission is missing `{id_col}` column")
|
|
17
|
+
sub_cols_set = set(submission.columns)
|
|
18
|
+
if not classes_set.issubset(sub_cols_set):
|
|
19
|
+
raise InvalidSubmissionError(
|
|
20
|
+
f"Submission is missing the following columns: {classes_set - sub_cols_set}"
|
|
21
|
+
)
|
|
22
|
+
if len(submission.columns) != len(CLASSES) + 1:
|
|
23
|
+
raise InvalidSubmissionError(
|
|
24
|
+
f"Submission has {len(submission.columns) - 1} columns, expected {len(CLASSES)}"
|
|
25
|
+
)
|
|
26
|
+
if len(submission) != len(answers):
|
|
27
|
+
raise InvalidSubmissionError("Submission and answers have different lengths")
|
|
28
|
+
|
|
29
|
+
# answers checks
|
|
30
|
+
assert id_col in answers.columns, f"Answers is missing `{id_col}` column"
|
|
31
|
+
ans_cols_set = set(answers.columns)
|
|
32
|
+
assert classes_set.issubset(
|
|
33
|
+
ans_cols_set
|
|
34
|
+
), f"Answers is missing the following columns: {classes_set - ans_cols_set}"
|
|
35
|
+
assert (
|
|
36
|
+
len(answers.columns) == len(CLASSES) + 1
|
|
37
|
+
), f"Answers has {len(answers.columns)} columns, expected {len(CLASSES) + 1}"
|
|
38
|
+
|
|
39
|
+
# sort by id to ensure correct order
|
|
40
|
+
submission = submission.sort_values(id_col)
|
|
41
|
+
answers = answers.sort_values(id_col)
|
|
42
|
+
|
|
43
|
+
spearmans = [spearmanr(submission[col], answers[col]).correlation for col in CLASSES]
|
|
44
|
+
score = np.mean(spearmans)
|
|
45
|
+
return score
|
|
@@ -0,0 +1,58 @@
|
|
|
1
|
+
from pathlib import Path
|
|
2
|
+
|
|
3
|
+
import numpy as np
|
|
4
|
+
from sklearn.model_selection import train_test_split
|
|
5
|
+
|
|
6
|
+
from mlebench.utils import read_csv
|
|
7
|
+
|
|
8
|
+
from .classes import CLASSES
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def prepare(raw: Path, public: Path, private: Path):
|
|
12
|
+
|
|
13
|
+
# Create train and test splits from train set
|
|
14
|
+
old_train = read_csv(raw / "train.csv")
|
|
15
|
+
new_train, new_test = train_test_split(old_train, test_size=0.1, random_state=0)
|
|
16
|
+
|
|
17
|
+
# question_type_spelling is almost always 0; if entirely 0 in test set, swap one row
|
|
18
|
+
if new_test["question_type_spelling"].nunique() == 1:
|
|
19
|
+
# need to do this swapping because spearmanr needs variation in the data to work
|
|
20
|
+
suitable_train_row_index = new_train[new_train["question_type_spelling"] != 0].index[0]
|
|
21
|
+
suitable_test_row_index = 0
|
|
22
|
+
temp = new_test.iloc[suitable_test_row_index].copy()
|
|
23
|
+
new_test.iloc[suitable_test_row_index] = new_train.loc[suitable_train_row_index].copy()
|
|
24
|
+
new_train.loc[suitable_train_row_index] = temp
|
|
25
|
+
|
|
26
|
+
new_test_without_labels = new_test.drop(CLASSES, axis=1, inplace=False)
|
|
27
|
+
|
|
28
|
+
# Create sample submission; private test will match this format
|
|
29
|
+
cols_to_keep = ["qa_id"] + CLASSES
|
|
30
|
+
new_test = new_test[cols_to_keep]
|
|
31
|
+
sample_submission = new_test.copy()
|
|
32
|
+
# spearmanr needs variation in the data to work; make each column increasing from 0 to 1
|
|
33
|
+
n, M = len(sample_submission), len(CLASSES)
|
|
34
|
+
sample_submission[CLASSES] = np.tile(np.linspace(0, 1, n)[:, None], (1, M))
|
|
35
|
+
|
|
36
|
+
# Create private files
|
|
37
|
+
new_test.to_csv(private / "test.csv", index=False)
|
|
38
|
+
|
|
39
|
+
# Create public files visible to agents
|
|
40
|
+
new_train.to_csv(public / "train.csv", index=False)
|
|
41
|
+
new_test_without_labels.to_csv(public / "test.csv", index=False)
|
|
42
|
+
sample_submission.to_csv(public / "sample_submission.csv", index=False)
|
|
43
|
+
|
|
44
|
+
# Checks
|
|
45
|
+
assert new_test_without_labels.shape[1] == 11, "Public test set should have 11 columns"
|
|
46
|
+
assert new_train.shape[1] == 41, "Public train set should have 41 columns"
|
|
47
|
+
# each private test set target column should not be constant
|
|
48
|
+
for column in CLASSES:
|
|
49
|
+
assert (
|
|
50
|
+
new_test[column].nunique() > 1
|
|
51
|
+
), f"Column {column} should not be constant in the private test set"
|
|
52
|
+
assert len(new_train) + len(new_test) == len(
|
|
53
|
+
old_train
|
|
54
|
+
), "Length of new_train and new_test should equal length of old_train"
|
|
55
|
+
assert (
|
|
56
|
+
sample_submission.columns.to_list() == new_test.columns.to_list()
|
|
57
|
+
), "Sample submission columns should match test set"
|
|
58
|
+
assert len(sample_submission) == len(new_test), "Sample submission length should match test set"
|
|
@@ -0,0 +1,120 @@
|
|
|
1
|
+
from pathlib import Path
|
|
2
|
+
|
|
3
|
+
import numpy as np
|
|
4
|
+
import pandas as pd
|
|
5
|
+
from sklearn.model_selection import train_test_split
|
|
6
|
+
|
|
7
|
+
from mlebench.utils import read_csv
|
|
8
|
+
|
|
9
|
+
from .classes import CLASSES
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def _create_split_and_write_files(
|
|
13
|
+
source_df: pd.DataFrame,
|
|
14
|
+
public_dir: Path,
|
|
15
|
+
private_dir: Path,
|
|
16
|
+
test_size: float,
|
|
17
|
+
random_state: int,
|
|
18
|
+
):
|
|
19
|
+
"""
|
|
20
|
+
Splits a dataframe into train and test sets, and writes them to public and private directories.
|
|
21
|
+
|
|
22
|
+
This function encapsulates the core data preparation logic:
|
|
23
|
+
1. Splits the data.
|
|
24
|
+
2. Handles a special case for 'question_type_spelling' to ensure variability.
|
|
25
|
+
3. Creates public-facing test set (without labels) and a sample submission.
|
|
26
|
+
4. Creates private-facing test set (with labels).
|
|
27
|
+
5. Writes all files to the specified directories.
|
|
28
|
+
6. Performs assertions to validate the output.
|
|
29
|
+
"""
|
|
30
|
+
# Create output directories if they don't exist
|
|
31
|
+
public_dir.mkdir(parents=True, exist_ok=True)
|
|
32
|
+
private_dir.mkdir(parents=True, exist_ok=True)
|
|
33
|
+
|
|
34
|
+
# Create train and test splits from the source dataframe
|
|
35
|
+
train_df, test_df = train_test_split(
|
|
36
|
+
source_df, test_size=test_size, random_state=random_state
|
|
37
|
+
)
|
|
38
|
+
|
|
39
|
+
# question_type_spelling is almost always 0; if entirely 0 in test set, swap one row
|
|
40
|
+
if test_df["question_type_spelling"].nunique() == 1:
|
|
41
|
+
# need to do this swapping because spearmanr needs variation in the data to work
|
|
42
|
+
suitable_train_row_index = train_df[train_df["question_type_spelling"] != 0].index[0]
|
|
43
|
+
suitable_test_row_index = test_df.index[0]
|
|
44
|
+
temp = test_df.loc[suitable_test_row_index].copy()
|
|
45
|
+
test_df.loc[suitable_test_row_index] = train_df.loc[suitable_train_row_index].copy()
|
|
46
|
+
train_df.loc[suitable_train_row_index] = temp
|
|
47
|
+
|
|
48
|
+
test_df_without_labels = test_df.drop(CLASSES, axis=1, inplace=False)
|
|
49
|
+
|
|
50
|
+
# Create sample submission; private test will match this format
|
|
51
|
+
cols_to_keep = ["qa_id"] + CLASSES
|
|
52
|
+
test_labels = test_df[cols_to_keep]
|
|
53
|
+
sample_submission = test_labels.copy()
|
|
54
|
+
# spearmanr needs variation in the data to work; make each column increasing from 0 to 1
|
|
55
|
+
n, M = len(sample_submission), len(CLASSES)
|
|
56
|
+
sample_submission[CLASSES] = np.tile(np.linspace(0, 1, n)[:, None], (1, M))
|
|
57
|
+
|
|
58
|
+
# Create private files
|
|
59
|
+
test_labels.to_csv(private_dir / "test.csv", index=False)
|
|
60
|
+
|
|
61
|
+
# Create public files visible to agents
|
|
62
|
+
train_df.to_csv(public_dir / "train.csv", index=False)
|
|
63
|
+
test_df_without_labels.to_csv(public_dir / "test.csv", index=False)
|
|
64
|
+
sample_submission.to_csv(public_dir / "sample_submission.csv", index=False)
|
|
65
|
+
|
|
66
|
+
# Checks
|
|
67
|
+
assert test_df_without_labels.shape[1] == 11, "Public test set should have 11 columns"
|
|
68
|
+
assert train_df.shape[1] == 41, "Public train set should have 41 columns"
|
|
69
|
+
# each private test set target column should not be constant
|
|
70
|
+
for column in CLASSES:
|
|
71
|
+
assert (
|
|
72
|
+
test_labels[column].nunique() > 1
|
|
73
|
+
), f"Column {column} should not be constant in the private test set"
|
|
74
|
+
assert len(train_df) + len(test_df) == len(
|
|
75
|
+
source_df
|
|
76
|
+
), "Length of new_train and new_test should equal length of source_df"
|
|
77
|
+
assert (
|
|
78
|
+
sample_submission.columns.to_list() == test_labels.columns.to_list()
|
|
79
|
+
), "Sample submission columns should match test set"
|
|
80
|
+
assert len(sample_submission) == len(test_labels), "Sample submission length should match test set"
|
|
81
|
+
|
|
82
|
+
return train_df, test_df
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
def prepare(raw: Path, public: Path, private: Path):
|
|
86
|
+
|
|
87
|
+
# Load the raw data from the competition
|
|
88
|
+
source_data = read_csv(raw / "train.csv")
|
|
89
|
+
|
|
90
|
+
# ---- 1. Create the Original Main Split (train/test) ----
|
|
91
|
+
# This first call generates the primary train and test sets.
|
|
92
|
+
# The output files are saved to the `public` and `private` directories,
|
|
93
|
+
# remaining identical to the original script's output.
|
|
94
|
+
main_train, main_test = _create_split_and_write_files(
|
|
95
|
+
source_df=source_data,
|
|
96
|
+
public_dir=public,
|
|
97
|
+
private_dir=private,
|
|
98
|
+
test_size=0.1,
|
|
99
|
+
random_state=0,
|
|
100
|
+
)
|
|
101
|
+
|
|
102
|
+
# ---- 2. Create the New Validation Split (train_val/test_val) ----
|
|
103
|
+
# This second call takes the `main_train` set from the first split and
|
|
104
|
+
# splits it again to create a new, smaller training set and a validation set.
|
|
105
|
+
# The outputs are saved to new, parallel `public_val` and `private_val` dirs.
|
|
106
|
+
public_val_dir = public.parent / "public_val"
|
|
107
|
+
private_val_dir = private.parent / "private_val"
|
|
108
|
+
|
|
109
|
+
# To make the new validation set (`test_val`) have the same number of samples
|
|
110
|
+
# as the original test set (`main_test`), we calculate the required `test_size`
|
|
111
|
+
# relative to the size of the `main_train` dataframe.
|
|
112
|
+
validation_test_size = len(main_test) / len(main_train)
|
|
113
|
+
|
|
114
|
+
_create_split_and_write_files(
|
|
115
|
+
source_df=main_train,
|
|
116
|
+
public_dir=public_val_dir,
|
|
117
|
+
private_dir=private_val_dir,
|
|
118
|
+
test_size=validation_test_size,
|
|
119
|
+
random_state=0, # Use the same random state for consistency
|
|
120
|
+
)
|
|
@@ -0,0 +1,77 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
from pandas import DataFrame
|
|
3
|
+
|
|
4
|
+
from mlebench.competitions.utils import rle_decode
|
|
5
|
+
from mlebench.grade_helpers import InvalidSubmissionError
|
|
6
|
+
from mlebench.metrics import dice_coefficient
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def grade(submission: DataFrame, answers: DataFrame) -> float:
|
|
10
|
+
expected_answer_columns = ["record_id", "encoded_pixels", "height", "width"]
|
|
11
|
+
expected_submission_columns = ["record_id", "encoded_pixels"]
|
|
12
|
+
|
|
13
|
+
assert set(answers.columns) == set(
|
|
14
|
+
expected_answer_columns
|
|
15
|
+
), f"Expected answers to have columns {expected_answer_columns}, but got {answers.columns} instead!"
|
|
16
|
+
|
|
17
|
+
if not set(expected_submission_columns).issubset(set(submission.columns)):
|
|
18
|
+
raise InvalidSubmissionError(
|
|
19
|
+
f"Expected submission to have columns {expected_submission_columns}, but got {submission.columns} instead!"
|
|
20
|
+
)
|
|
21
|
+
|
|
22
|
+
if len(submission) != len(answers):
|
|
23
|
+
raise InvalidSubmissionError(
|
|
24
|
+
f"Expected submission to have {len(answers)} rows, but got {len(submission)} instead!"
|
|
25
|
+
)
|
|
26
|
+
|
|
27
|
+
submission = submission.sort_values(by="record_id")
|
|
28
|
+
answers = answers.sort_values(by="record_id")
|
|
29
|
+
|
|
30
|
+
if (submission["record_id"].values != answers["record_id"].values).any():
|
|
31
|
+
raise InvalidSubmissionError(
|
|
32
|
+
f"Expected submission to have the same record_id's as answers, but got a different set of record_id's!"
|
|
33
|
+
)
|
|
34
|
+
|
|
35
|
+
# Replace "-" in "encoded_pixels" with ""
|
|
36
|
+
submission["encoded_pixels"] = submission["encoded_pixels"].replace("-", "")
|
|
37
|
+
answers["encoded_pixels"] = answers["encoded_pixels"].replace("-", "")
|
|
38
|
+
|
|
39
|
+
y_preds_rle = submission["encoded_pixels"].values
|
|
40
|
+
y_trues_rle, y_true_height, y_true_width = (
|
|
41
|
+
answers["encoded_pixels"].values,
|
|
42
|
+
answers["height"].values,
|
|
43
|
+
answers["width"].values,
|
|
44
|
+
)
|
|
45
|
+
|
|
46
|
+
try:
|
|
47
|
+
y_preds = np.array(
|
|
48
|
+
[
|
|
49
|
+
rle_decode(rle, height=int(height), width=int(width))
|
|
50
|
+
for (rle, height, width) in zip(y_trues_rle, y_true_height, y_true_width)
|
|
51
|
+
]
|
|
52
|
+
)
|
|
53
|
+
except AssertionError as e:
|
|
54
|
+
raise InvalidSubmissionError(f"Error decoding RLE masks: {e}")
|
|
55
|
+
|
|
56
|
+
y_trues = np.array(
|
|
57
|
+
[
|
|
58
|
+
rle_decode(rle, height=int(height), width=int(width))
|
|
59
|
+
for (rle, height, width) in zip(y_preds_rle, y_true_height, y_true_width)
|
|
60
|
+
]
|
|
61
|
+
)
|
|
62
|
+
|
|
63
|
+
# Flatten from [3D array (list of 2D masks)] into [1D array] then compute a global dice coefficient
|
|
64
|
+
"""
|
|
65
|
+
This competition is evaluated on the **global** Dice coefficient. The Dice coefficient can be used to compare the
|
|
66
|
+
pixel-wise agreement between a predicted segmentation and its corresponding ground truth. The formula is given by:
|
|
67
|
+
$$
|
|
68
|
+
\frac{2 \cdot |X \cap Y|}{|X| + |Y|}
|
|
69
|
+
$$
|
|
70
|
+
where X is the entire set of predicted contrail pixels for **all** observations in the test data and Y is the
|
|
71
|
+
ground truth set of **all** contrail pixels in the test data.
|
|
72
|
+
"""
|
|
73
|
+
y_preds_flat = y_preds.ravel()
|
|
74
|
+
y_trues_flat = y_trues.ravel()
|
|
75
|
+
score = dice_coefficient(y_trues_flat, y_preds_flat)
|
|
76
|
+
|
|
77
|
+
return score
|