dslighting 1.7.1__py3-none-any.whl → 1.7.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- dslighting/__init__.py +1 -1
- dslighting/core/agent.py +78 -62
- {dslighting-1.7.1.dist-info → dslighting-1.7.8.dist-info}/METADATA +3 -1
- {dslighting-1.7.1.dist-info → dslighting-1.7.8.dist-info}/RECORD +352 -7
- {dslighting-1.7.1.dist-info → dslighting-1.7.8.dist-info}/top_level.txt +1 -0
- mlebench/README.md +39 -0
- mlebench/__init__.py +0 -0
- mlebench/cli.py +221 -0
- mlebench/competitions/3d-object-detection-for-autonomous-vehicles/grade.py +161 -0
- mlebench/competitions/3d-object-detection-for-autonomous-vehicles/mAP_evaluation.py +425 -0
- mlebench/competitions/3d-object-detection-for-autonomous-vehicles/prepare.py +483 -0
- mlebench/competitions/3d-object-detection-for-autonomous-vehicles/prepare_val.py +719 -0
- mlebench/competitions/AI4Code/grade.py +70 -0
- mlebench/competitions/AI4Code/prepare.py +84 -0
- mlebench/competitions/AI4Code/prepare_val.py +159 -0
- mlebench/competitions/__init__.py +0 -0
- mlebench/competitions/aerial-cactus-identification/grade.py +11 -0
- mlebench/competitions/aerial-cactus-identification/prepare.py +71 -0
- mlebench/competitions/aerial-cactus-identification/prepare_val.py +133 -0
- mlebench/competitions/alaska2-image-steganalysis/grade.py +136 -0
- mlebench/competitions/alaska2-image-steganalysis/prepare.py +88 -0
- mlebench/competitions/alaska2-image-steganalysis/prepare_val.py +148 -0
- mlebench/competitions/aptos2019-blindness-detection/grade.py +35 -0
- mlebench/competitions/aptos2019-blindness-detection/prepare.py +75 -0
- mlebench/competitions/aptos2019-blindness-detection/prepare_val.py +123 -0
- mlebench/competitions/bike-sharing-demand/__init__.py +0 -0
- mlebench/competitions/bike-sharing-demand/grade.py +55 -0
- mlebench/competitions/bike-sharing-demand/prepare.py +37 -0
- mlebench/competitions/billion-word-imputation/grade.py +37 -0
- mlebench/competitions/billion-word-imputation/prepare.py +107 -0
- mlebench/competitions/billion-word-imputation/prepare_val.py +179 -0
- mlebench/competitions/bms-molecular-translation/grade.py +40 -0
- mlebench/competitions/bms-molecular-translation/prepare.py +68 -0
- mlebench/competitions/bms-molecular-translation/prepare_val.py +131 -0
- mlebench/competitions/cassava-leaf-disease-classification/grade.py +12 -0
- mlebench/competitions/cassava-leaf-disease-classification/prepare.py +113 -0
- mlebench/competitions/cassava-leaf-disease-classification/prepare_val.py +186 -0
- mlebench/competitions/cdiscount-image-classification-challenge/grade.py +11 -0
- mlebench/competitions/cdiscount-image-classification-challenge/prepare.py +144 -0
- mlebench/competitions/cdiscount-image-classification-challenge/prepare_val.py +205 -0
- mlebench/competitions/chaii-hindi-and-tamil-question-answering/grade.py +67 -0
- mlebench/competitions/chaii-hindi-and-tamil-question-answering/prepare.py +31 -0
- mlebench/competitions/chaii-hindi-and-tamil-question-answering/prepare_val.py +94 -0
- mlebench/competitions/champs-scalar-coupling/grade.py +60 -0
- mlebench/competitions/champs-scalar-coupling/prepare.py +116 -0
- mlebench/competitions/champs-scalar-coupling/prepare_val.py +155 -0
- mlebench/competitions/conways-reverse-game-of-life-2020/__init__.py +0 -0
- mlebench/competitions/conways-reverse-game-of-life-2020/grade.py +40 -0
- mlebench/competitions/conways-reverse-game-of-life-2020/prepare.py +41 -0
- mlebench/competitions/demand-forecasting-kernels-only/__init__.py +0 -0
- mlebench/competitions/demand-forecasting-kernels-only/grade.py +66 -0
- mlebench/competitions/demand-forecasting-kernels-only/prepare.py +27 -0
- mlebench/competitions/demand_forecasting_kernels_only/__init__.py +0 -0
- mlebench/competitions/demand_forecasting_kernels_only/grade.py +66 -0
- mlebench/competitions/demand_forecasting_kernels_only/prepare.py +27 -0
- mlebench/competitions/denoising-dirty-documents/grade.py +44 -0
- mlebench/competitions/denoising-dirty-documents/prepare.py +134 -0
- mlebench/competitions/denoising-dirty-documents/prepare_val.py +178 -0
- mlebench/competitions/detecting-insults-in-social-commentary/grade.py +11 -0
- mlebench/competitions/detecting-insults-in-social-commentary/prepare.py +72 -0
- mlebench/competitions/detecting-insults-in-social-commentary/prepare_val.py +128 -0
- mlebench/competitions/dog-breed-identification/dogs.py +124 -0
- mlebench/competitions/dog-breed-identification/grade.py +42 -0
- mlebench/competitions/dog-breed-identification/prepare.py +55 -0
- mlebench/competitions/dog-breed-identification/prepare_val.py +104 -0
- mlebench/competitions/dogs-vs-cats-redux-kernels-edition/grade.py +43 -0
- mlebench/competitions/dogs-vs-cats-redux-kernels-edition/prepare.py +70 -0
- mlebench/competitions/dogs-vs-cats-redux-kernels-edition/prepare_val.py +143 -0
- mlebench/competitions/ethanol-concentration/grade.py +23 -0
- mlebench/competitions/ethanol-concentration/prepare.py +90 -0
- mlebench/competitions/facebook-recruiting-iii-keyword-extraction/grade.py +60 -0
- mlebench/competitions/facebook-recruiting-iii-keyword-extraction/prepare.py +41 -0
- mlebench/competitions/facebook-recruiting-iii-keyword-extraction/prepare_val.py +92 -0
- mlebench/competitions/feedback-prize-english-language-learning/__init__.py +0 -0
- mlebench/competitions/feedback-prize-english-language-learning/grade.py +60 -0
- mlebench/competitions/feedback-prize-english-language-learning/prepare.py +39 -0
- mlebench/competitions/freesound-audio-tagging-2019/grade.py +64 -0
- mlebench/competitions/freesound-audio-tagging-2019/prepare.py +94 -0
- mlebench/competitions/freesound-audio-tagging-2019/prepare_val.py +175 -0
- mlebench/competitions/freesound-audio-tagging-2019/vocabulary.py +83 -0
- mlebench/competitions/google-quest-challenge/classes.py +32 -0
- mlebench/competitions/google-quest-challenge/grade.py +45 -0
- mlebench/competitions/google-quest-challenge/prepare.py +58 -0
- mlebench/competitions/google-quest-challenge/prepare_val.py +120 -0
- mlebench/competitions/google-research-identify-contrails-reduce-global-warming/grade.py +77 -0
- mlebench/competitions/google-research-identify-contrails-reduce-global-warming/prepare.py +155 -0
- mlebench/competitions/google-research-identify-contrails-reduce-global-warming/prepare_val.py +211 -0
- mlebench/competitions/h-and-m-personalized-fashion-recommendations/grade.py +42 -0
- mlebench/competitions/h-and-m-personalized-fashion-recommendations/prepare.py +102 -0
- mlebench/competitions/h-and-m-personalized-fashion-recommendations/prepare_val.py +132 -0
- mlebench/competitions/handwriting/grade.py +23 -0
- mlebench/competitions/handwriting/prepare.py +179 -0
- mlebench/competitions/herbarium-2020-fgvc7/grade.py +34 -0
- mlebench/competitions/herbarium-2020-fgvc7/prepare.py +251 -0
- mlebench/competitions/herbarium-2020-fgvc7/prepare_val.py +242 -0
- mlebench/competitions/herbarium-2021-fgvc8/grade.py +34 -0
- mlebench/competitions/herbarium-2021-fgvc8/prepare.py +251 -0
- mlebench/competitions/herbarium-2021-fgvc8/prepare_val.py +222 -0
- mlebench/competitions/herbarium-2022-fgvc9/grade.py +31 -0
- mlebench/competitions/herbarium-2022-fgvc9/prepare.py +233 -0
- mlebench/competitions/herbarium-2022-fgvc9/prepare_val.py +213 -0
- mlebench/competitions/histopathologic-cancer-detection/grade.py +12 -0
- mlebench/competitions/histopathologic-cancer-detection/prepare.py +59 -0
- mlebench/competitions/histopathologic-cancer-detection/prepare_val.py +131 -0
- mlebench/competitions/hms-harmful-brain-activity-classification/constants.py +9 -0
- mlebench/competitions/hms-harmful-brain-activity-classification/grade.py +43 -0
- mlebench/competitions/hms-harmful-brain-activity-classification/kaggle_metric_utilities.py +96 -0
- mlebench/competitions/hms-harmful-brain-activity-classification/kullback_leibler_divergence.py +118 -0
- mlebench/competitions/hms-harmful-brain-activity-classification/prepare.py +121 -0
- mlebench/competitions/hms-harmful-brain-activity-classification/prepare_val.py +190 -0
- mlebench/competitions/hotel-id-2021-fgvc8/grade.py +41 -0
- mlebench/competitions/hotel-id-2021-fgvc8/prepare.py +63 -0
- mlebench/competitions/hotel-id-2021-fgvc8/prepare_val.py +132 -0
- mlebench/competitions/hubmap-kidney-segmentation/grade.py +62 -0
- mlebench/competitions/hubmap-kidney-segmentation/prepare.py +108 -0
- mlebench/competitions/hubmap-kidney-segmentation/prepare_val.py +153 -0
- mlebench/competitions/icecube-neutrinos-in-deep-ice/grade.py +111 -0
- mlebench/competitions/icecube-neutrinos-in-deep-ice/prepare.py +127 -0
- mlebench/competitions/icecube-neutrinos-in-deep-ice/prepare_val.py +183 -0
- mlebench/competitions/ili/grade.py +60 -0
- mlebench/competitions/ili/prepare.py +99 -0
- mlebench/competitions/imet-2020-fgvc7/grade.py +54 -0
- mlebench/competitions/imet-2020-fgvc7/prepare.py +77 -0
- mlebench/competitions/imet-2020-fgvc7/prepare_val.py +157 -0
- mlebench/competitions/inaturalist-2019-fgvc6/grade.py +35 -0
- mlebench/competitions/inaturalist-2019-fgvc6/prepare.py +259 -0
- mlebench/competitions/inaturalist-2019-fgvc6/prepare_val.py +304 -0
- mlebench/competitions/instant-gratification/__init__.py +0 -0
- mlebench/competitions/instant-gratification/grade.py +55 -0
- mlebench/competitions/instant-gratification/prepare.py +25 -0
- mlebench/competitions/instant_gratification/__init__.py +0 -0
- mlebench/competitions/instant_gratification/grade.py +55 -0
- mlebench/competitions/instant_gratification/prepare.py +25 -0
- mlebench/competitions/invasive-species-monitoring/grade.py +11 -0
- mlebench/competitions/invasive-species-monitoring/prepare.py +97 -0
- mlebench/competitions/invasive-species-monitoring/prepare_val.py +164 -0
- mlebench/competitions/iwildcam-2019-fgvc6/grade.py +44 -0
- mlebench/competitions/iwildcam-2019-fgvc6/prepare.py +118 -0
- mlebench/competitions/iwildcam-2019-fgvc6/prepare_val.py +194 -0
- mlebench/competitions/iwildcam-2020-fgvc7/grade.py +11 -0
- mlebench/competitions/iwildcam-2020-fgvc7/prepare.py +164 -0
- mlebench/competitions/iwildcam-2020-fgvc7/prepare_val.py +245 -0
- mlebench/competitions/jigsaw-toxic-comment-classification-challenge/classes.py +1 -0
- mlebench/competitions/jigsaw-toxic-comment-classification-challenge/grade.py +54 -0
- mlebench/competitions/jigsaw-toxic-comment-classification-challenge/prepare.py +42 -0
- mlebench/competitions/jigsaw-toxic-comment-classification-challenge/prepare_val.py +88 -0
- mlebench/competitions/jigsaw-unintended-bias-in-toxicity-classification/grade.py +153 -0
- mlebench/competitions/jigsaw-unintended-bias-in-toxicity-classification/prepare.py +36 -0
- mlebench/competitions/jigsaw-unintended-bias-in-toxicity-classification/prepare_val.py +117 -0
- mlebench/competitions/kuzushiji-recognition/grade.py +58 -0
- mlebench/competitions/kuzushiji-recognition/kuzushiji_metric.py +118 -0
- mlebench/competitions/kuzushiji-recognition/prepare.py +92 -0
- mlebench/competitions/kuzushiji-recognition/prepare_val.py +149 -0
- mlebench/competitions/leaf-classification/classes.py +101 -0
- mlebench/competitions/leaf-classification/grade.py +44 -0
- mlebench/competitions/leaf-classification/prepare.py +60 -0
- mlebench/competitions/leaf-classification/prepare_val.py +116 -0
- mlebench/competitions/learning-agency-lab-automated-essay-scoring-2/grade.py +44 -0
- mlebench/competitions/learning-agency-lab-automated-essay-scoring-2/prepare.py +51 -0
- mlebench/competitions/learning-agency-lab-automated-essay-scoring-2/prepare_val.py +96 -0
- mlebench/competitions/liverpool-ion-switching/__init__.py +0 -0
- mlebench/competitions/liverpool-ion-switching/grade.py +52 -0
- mlebench/competitions/liverpool-ion-switching/prepare.py +27 -0
- mlebench/competitions/liverpool_ion_switching/__init__.py +0 -0
- mlebench/competitions/liverpool_ion_switching/grade.py +52 -0
- mlebench/competitions/liverpool_ion_switching/prepare.py +27 -0
- mlebench/competitions/lmsys-chatbot-arena/grade.py +63 -0
- mlebench/competitions/lmsys-chatbot-arena/prepare.py +52 -0
- mlebench/competitions/lmsys-chatbot-arena/prepare_val.py +115 -0
- mlebench/competitions/mcm_2024_c_test/grade.py +107 -0
- mlebench/competitions/mcm_2024_c_test/prepare.py +2 -0
- mlebench/competitions/ml2021spring-hw2/grade.py +11 -0
- mlebench/competitions/ml2021spring-hw2/prepare.py +58 -0
- mlebench/competitions/ml2021spring-hw2/prepare_val.py +135 -0
- mlebench/competitions/mlsp-2013-birds/grade.py +11 -0
- mlebench/competitions/mlsp-2013-birds/prepare.py +182 -0
- mlebench/competitions/mlsp-2013-birds/prepare_val.py +241 -0
- mlebench/competitions/movie-review-sentiment-analysis-kernels-only/grade.py +11 -0
- mlebench/competitions/movie-review-sentiment-analysis-kernels-only/prepare.py +58 -0
- mlebench/competitions/movie-review-sentiment-analysis-kernels-only/prepare_val.py +120 -0
- mlebench/competitions/multi-modal-gesture-recognition/grade.py +58 -0
- mlebench/competitions/multi-modal-gesture-recognition/prepare.py +85 -0
- mlebench/competitions/multi-modal-gesture-recognition/prepare_val.py +139 -0
- mlebench/competitions/my-custom-task-01/prepare.py +2 -0
- mlebench/competitions/new-my-task-01/prepare.py +2 -0
- mlebench/competitions/new-my-task-03/grade.py +107 -0
- mlebench/competitions/new-my-task-03/prepare.py +2 -0
- mlebench/competitions/new-york-city-taxi-fare-prediction/grade.py +28 -0
- mlebench/competitions/new-york-city-taxi-fare-prediction/prepare.py +44 -0
- mlebench/competitions/new-york-city-taxi-fare-prediction/prepare_val.py +89 -0
- mlebench/competitions/nfl-player-contact-detection/grade.py +36 -0
- mlebench/competitions/nfl-player-contact-detection/prepare.py +101 -0
- mlebench/competitions/nfl-player-contact-detection/prepare_val.py +186 -0
- mlebench/competitions/nomad2018-predict-transparent-conductors/grade.py +47 -0
- mlebench/competitions/nomad2018-predict-transparent-conductors/prepare.py +77 -0
- mlebench/competitions/nomad2018-predict-transparent-conductors/prepare_val.py +144 -0
- mlebench/competitions/osic-pulmonary-fibrosis-progression/grade.py +74 -0
- mlebench/competitions/osic-pulmonary-fibrosis-progression/prepare.py +95 -0
- mlebench/competitions/osic-pulmonary-fibrosis-progression/prepare_val.py +167 -0
- mlebench/competitions/paddy-disease-classification/grade.py +35 -0
- mlebench/competitions/paddy-disease-classification/prepare.py +69 -0
- mlebench/competitions/paddy-disease-classification/prepare_val.py +122 -0
- mlebench/competitions/petfinder-pawpularity-score/grade.py +41 -0
- mlebench/competitions/petfinder-pawpularity-score/prepare.py +76 -0
- mlebench/competitions/petfinder-pawpularity-score/prepare_val.py +154 -0
- mlebench/competitions/plant-pathology-2020-fgvc7/grade.py +41 -0
- mlebench/competitions/plant-pathology-2020-fgvc7/prepare.py +74 -0
- mlebench/competitions/plant-pathology-2020-fgvc7/prepare_val.py +160 -0
- mlebench/competitions/plant-pathology-2021-fgvc8/grade.py +54 -0
- mlebench/competitions/plant-pathology-2021-fgvc8/prepare.py +65 -0
- mlebench/competitions/plant-pathology-2021-fgvc8/prepare_val.py +130 -0
- mlebench/competitions/plant-seedlings-classification/grade.py +39 -0
- mlebench/competitions/plant-seedlings-classification/prepare.py +91 -0
- mlebench/competitions/plant-seedlings-classification/prepare_val.py +158 -0
- mlebench/competitions/playground-series-s3e1/__init__.py +0 -0
- mlebench/competitions/playground-series-s3e1/grade.py +52 -0
- mlebench/competitions/playground-series-s3e1/prepare.py +25 -0
- mlebench/competitions/playground-series-s3e11/__init__.py +0 -0
- mlebench/competitions/playground-series-s3e11/grade.py +55 -0
- mlebench/competitions/playground-series-s3e11/prepare.py +25 -0
- mlebench/competitions/playground-series-s3e18/grade.py +39 -0
- mlebench/competitions/playground-series-s3e18/prepare.py +36 -0
- mlebench/competitions/playground-series-s3e18/prepare_val.py +89 -0
- mlebench/competitions/playground_series_s3e1/__init__.py +0 -0
- mlebench/competitions/playground_series_s3e1/grade.py +52 -0
- mlebench/competitions/playground_series_s3e1/prepare.py +25 -0
- mlebench/competitions/playground_series_s3e11/__init__.py +0 -0
- mlebench/competitions/playground_series_s3e11/grade.py +55 -0
- mlebench/competitions/playground_series_s3e11/prepare.py +25 -0
- mlebench/competitions/predict-volcanic-eruptions-ingv-oe/grade.py +44 -0
- mlebench/competitions/predict-volcanic-eruptions-ingv-oe/prepare.py +68 -0
- mlebench/competitions/predict-volcanic-eruptions-ingv-oe/prepare_val.py +146 -0
- mlebench/competitions/random-acts-of-pizza/grade.py +14 -0
- mlebench/competitions/random-acts-of-pizza/prepare.py +80 -0
- mlebench/competitions/random-acts-of-pizza/prepare_val.py +144 -0
- mlebench/competitions/ranzcr-clip-catheter-line-classification/classes.py +11 -0
- mlebench/competitions/ranzcr-clip-catheter-line-classification/grade.py +31 -0
- mlebench/competitions/ranzcr-clip-catheter-line-classification/prepare.py +53 -0
- mlebench/competitions/ranzcr-clip-catheter-line-classification/prepare_val.py +113 -0
- mlebench/competitions/rsna-2022-cervical-spine-fracture-detection/grade.py +124 -0
- mlebench/competitions/rsna-2022-cervical-spine-fracture-detection/prepare.py +219 -0
- mlebench/competitions/rsna-2022-cervical-spine-fracture-detection/prepare_val.py +257 -0
- mlebench/competitions/rsna-breast-cancer-detection/grade.py +65 -0
- mlebench/competitions/rsna-breast-cancer-detection/prepare.py +141 -0
- mlebench/competitions/rsna-breast-cancer-detection/prepare_val.py +201 -0
- mlebench/competitions/rsna-miccai-brain-tumor-radiogenomic-classification/grade.py +13 -0
- mlebench/competitions/rsna-miccai-brain-tumor-radiogenomic-classification/prepare.py +47 -0
- mlebench/competitions/rsna-miccai-brain-tumor-radiogenomic-classification/prepare_val.py +97 -0
- mlebench/competitions/santander-customer-satisfaction/grade.py +10 -0
- mlebench/competitions/santander-customer-satisfaction/prepare.py +41 -0
- mlebench/competitions/sciencebench-001-clintox-nn/__init__.py +0 -0
- mlebench/competitions/sciencebench-001-clintox-nn/grade.py +56 -0
- mlebench/competitions/sciencebench-001-clintox-nn/prepare.py +75 -0
- mlebench/competitions/sciencebench-015-aai/grade.py +37 -0
- mlebench/competitions/sciencebench-015-aai/prepare.py +102 -0
- mlebench/competitions/sciencebench-051-brain-blood-qsar/grade.py +58 -0
- mlebench/competitions/sciencebench-051-brain-blood-qsar/prepare.py +69 -0
- mlebench/competitions/sciencebench-101-experimental-band-gap-prediction/grade.py +55 -0
- mlebench/competitions/sciencebench-101-experimental-band-gap-prediction/prepare.py +88 -0
- mlebench/competitions/see-click-predict-fix/__init__.py +0 -0
- mlebench/competitions/see-click-predict-fix/grade.py +66 -0
- mlebench/competitions/see-click-predict-fix/prepare.py +25 -0
- mlebench/competitions/see_click_predict_fix/__init__.py +0 -0
- mlebench/competitions/see_click_predict_fix/grade.py +66 -0
- mlebench/competitions/see_click_predict_fix/prepare.py +25 -0
- mlebench/competitions/seti-breakthrough-listen/grade.py +11 -0
- mlebench/competitions/seti-breakthrough-listen/prepare.py +71 -0
- mlebench/competitions/seti-breakthrough-listen/prepare_val.py +159 -0
- mlebench/competitions/siim-covid19-detection/grade.py +194 -0
- mlebench/competitions/siim-covid19-detection/prepare.py +123 -0
- mlebench/competitions/siim-covid19-detection/prepare_val.py +164 -0
- mlebench/competitions/siim-isic-melanoma-classification/grade.py +11 -0
- mlebench/competitions/siim-isic-melanoma-classification/prepare.py +127 -0
- mlebench/competitions/siim-isic-melanoma-classification/prepare_val.py +158 -0
- mlebench/competitions/smartphone-decimeter-2022/grade.py +55 -0
- mlebench/competitions/smartphone-decimeter-2022/notebook.py +86 -0
- mlebench/competitions/smartphone-decimeter-2022/prepare.py +143 -0
- mlebench/competitions/smartphone-decimeter-2022/prepare_val.py +199 -0
- mlebench/competitions/spaceship-titanic/grade.py +11 -0
- mlebench/competitions/spaceship-titanic/prepare.py +23 -0
- mlebench/competitions/spaceship-titanic/prepare_val.py +61 -0
- mlebench/competitions/spooky-author-identification/classes.py +1 -0
- mlebench/competitions/spooky-author-identification/grade.py +38 -0
- mlebench/competitions/spooky-author-identification/prepare.py +40 -0
- mlebench/competitions/spooky-author-identification/prepare_val.py +78 -0
- mlebench/competitions/stanford-covid-vaccine/grade.py +65 -0
- mlebench/competitions/stanford-covid-vaccine/prepare.py +129 -0
- mlebench/competitions/stanford-covid-vaccine/prepare_val.py +199 -0
- mlebench/competitions/statoil-iceberg-classifier-challenge/grade.py +41 -0
- mlebench/competitions/statoil-iceberg-classifier-challenge/prepare.py +105 -0
- mlebench/competitions/statoil-iceberg-classifier-challenge/prepare_val.py +157 -0
- mlebench/competitions/tabular-playground-series-dec-2021/grade.py +11 -0
- mlebench/competitions/tabular-playground-series-dec-2021/prepare.py +39 -0
- mlebench/competitions/tabular-playground-series-dec-2021/prepare_val.py +99 -0
- mlebench/competitions/tabular-playground-series-may-2022/grade.py +9 -0
- mlebench/competitions/tabular-playground-series-may-2022/prepare.py +56 -0
- mlebench/competitions/tabular-playground-series-may-2022/prepare_val.py +116 -0
- mlebench/competitions/tensorflow-speech-recognition-challenge/grade.py +11 -0
- mlebench/competitions/tensorflow-speech-recognition-challenge/prepare.py +90 -0
- mlebench/competitions/tensorflow-speech-recognition-challenge/prepare_val.py +148 -0
- mlebench/competitions/tensorflow2-question-answering/grade.py +122 -0
- mlebench/competitions/tensorflow2-question-answering/prepare.py +122 -0
- mlebench/competitions/tensorflow2-question-answering/prepare_val.py +187 -0
- mlebench/competitions/text-normalization-challenge-english-language/grade.py +49 -0
- mlebench/competitions/text-normalization-challenge-english-language/prepare.py +115 -0
- mlebench/competitions/text-normalization-challenge-english-language/prepare_val.py +213 -0
- mlebench/competitions/text-normalization-challenge-russian-language/grade.py +49 -0
- mlebench/competitions/text-normalization-challenge-russian-language/prepare.py +113 -0
- mlebench/competitions/text-normalization-challenge-russian-language/prepare_val.py +165 -0
- mlebench/competitions/tgs-salt-identification-challenge/grade.py +144 -0
- mlebench/competitions/tgs-salt-identification-challenge/prepare.py +158 -0
- mlebench/competitions/tgs-salt-identification-challenge/prepare_val.py +166 -0
- mlebench/competitions/the-icml-2013-whale-challenge-right-whale-redux/grade.py +11 -0
- mlebench/competitions/the-icml-2013-whale-challenge-right-whale-redux/prepare.py +95 -0
- mlebench/competitions/the-icml-2013-whale-challenge-right-whale-redux/prepare_val.py +141 -0
- mlebench/competitions/tmdb-box-office-prediction/__init__.py +0 -0
- mlebench/competitions/tmdb-box-office-prediction/grade.py +55 -0
- mlebench/competitions/tmdb-box-office-prediction/prepare.py +35 -0
- mlebench/competitions/tweet-sentiment-extraction/grade.py +67 -0
- mlebench/competitions/tweet-sentiment-extraction/prepare.py +36 -0
- mlebench/competitions/tweet-sentiment-extraction/prepare_val.py +106 -0
- mlebench/competitions/us-patent-phrase-to-phrase-matching/grade.py +31 -0
- mlebench/competitions/us-patent-phrase-to-phrase-matching/prepare.py +33 -0
- mlebench/competitions/us-patent-phrase-to-phrase-matching/prepare_val.py +71 -0
- mlebench/competitions/utils.py +266 -0
- mlebench/competitions/uw-madison-gi-tract-image-segmentation/grade.py +158 -0
- mlebench/competitions/uw-madison-gi-tract-image-segmentation/prepare.py +139 -0
- mlebench/competitions/uw-madison-gi-tract-image-segmentation/prepare_val.py +193 -0
- mlebench/competitions/ventilator-pressure-prediction/__init__.py +0 -0
- mlebench/competitions/ventilator-pressure-prediction/grade.py +52 -0
- mlebench/competitions/ventilator-pressure-prediction/prepare.py +27 -0
- mlebench/competitions/ventilator-pressure-prediction/prepare_val.py +142 -0
- mlebench/competitions/ventilator_pressure_prediction/__init__.py +0 -0
- mlebench/competitions/ventilator_pressure_prediction/grade.py +52 -0
- mlebench/competitions/ventilator_pressure_prediction/prepare.py +27 -0
- mlebench/competitions/vesuvius-challenge-ink-detection/grade.py +97 -0
- mlebench/competitions/vesuvius-challenge-ink-detection/prepare.py +122 -0
- mlebench/competitions/vesuvius-challenge-ink-detection/prepare_val.py +170 -0
- mlebench/competitions/vinbigdata-chest-xray-abnormalities-detection/grade.py +220 -0
- mlebench/competitions/vinbigdata-chest-xray-abnormalities-detection/prepare.py +129 -0
- mlebench/competitions/vinbigdata-chest-xray-abnormalities-detection/prepare_val.py +204 -0
- mlebench/competitions/whale-categorization-playground/grade.py +41 -0
- mlebench/competitions/whale-categorization-playground/prepare.py +103 -0
- mlebench/competitions/whale-categorization-playground/prepare_val.py +196 -0
- mlebench/data.py +420 -0
- mlebench/grade.py +209 -0
- mlebench/grade_helpers.py +235 -0
- mlebench/metrics.py +75 -0
- mlebench/registry.py +332 -0
- mlebench/utils.py +346 -0
- {dslighting-1.7.1.dist-info → dslighting-1.7.8.dist-info}/WHEEL +0 -0
- {dslighting-1.7.1.dist-info → dslighting-1.7.8.dist-info}/entry_points.txt +0 -0
|
@@ -0,0 +1,94 @@
|
|
|
1
|
+
from pathlib import Path
|
|
2
|
+
import pandas as pd
|
|
3
|
+
|
|
4
|
+
from sklearn.model_selection import train_test_split
|
|
5
|
+
|
|
6
|
+
from mlebench.utils import read_csv
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def _split_and_save(
|
|
10
|
+
source_df: pd.DataFrame,
|
|
11
|
+
public_dir: Path,
|
|
12
|
+
private_dir: Path,
|
|
13
|
+
test_size: float,
|
|
14
|
+
random_state: int,
|
|
15
|
+
) -> pd.DataFrame:
|
|
16
|
+
"""
|
|
17
|
+
Splits a dataframe into train and test sets and saves them to the specified
|
|
18
|
+
public and private directories, mimicking the competition data format.
|
|
19
|
+
|
|
20
|
+
Args:
|
|
21
|
+
source_df: The dataframe to be split.
|
|
22
|
+
public_dir: The directory for public-facing files (train set, unlabeled test set).
|
|
23
|
+
private_dir: The directory for private-facing files (test set labels).
|
|
24
|
+
test_size: The proportion of the dataset to allocate to the test split.
|
|
25
|
+
random_state: The seed used by the random number generator.
|
|
26
|
+
|
|
27
|
+
Returns:
|
|
28
|
+
The training set dataframe resulting from the split.
|
|
29
|
+
"""
|
|
30
|
+
# Create train, test split
|
|
31
|
+
new_train, new_test = train_test_split(
|
|
32
|
+
source_df, test_size=test_size, random_state=random_state
|
|
33
|
+
)
|
|
34
|
+
new_test_without_labels = new_test.drop(columns=["answer_start", "answer_text"])
|
|
35
|
+
|
|
36
|
+
# make private test match submission format
|
|
37
|
+
new_test_labels = new_test[["id", "answer_text"]]
|
|
38
|
+
new_test_labels.columns = ["id", "PredictionString"]
|
|
39
|
+
|
|
40
|
+
# Ensure output directories exist
|
|
41
|
+
public_dir.mkdir(exist_ok=True)
|
|
42
|
+
private_dir.mkdir(exist_ok=True)
|
|
43
|
+
|
|
44
|
+
# Copy over files
|
|
45
|
+
new_train.to_csv(public_dir / "train.csv", index=False)
|
|
46
|
+
new_test_without_labels.to_csv(public_dir / "test.csv", index=False)
|
|
47
|
+
new_test_labels.to_csv(private_dir / "test.csv", index=False)
|
|
48
|
+
|
|
49
|
+
# Create sample submission
|
|
50
|
+
sample_submission = new_test_labels.copy()
|
|
51
|
+
sample_submission["PredictionString"] = "dummy text"
|
|
52
|
+
sample_submission.to_csv(public_dir / "sample_submission.csv", index=False)
|
|
53
|
+
|
|
54
|
+
assert len(sample_submission) == len(
|
|
55
|
+
new_test_labels
|
|
56
|
+
), "Sample submission length does not match test length."
|
|
57
|
+
|
|
58
|
+
return new_train
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
def prepare(raw: Path, public: Path, private: Path):
|
|
62
|
+
|
|
63
|
+
# --- Define paths for the new validation split ---
|
|
64
|
+
public_val = public.parent / "public_val"
|
|
65
|
+
private_val = private.parent / "private_val"
|
|
66
|
+
|
|
67
|
+
# Load the initial raw training data
|
|
68
|
+
original_train_df = read_csv(raw / "train.csv")
|
|
69
|
+
|
|
70
|
+
# --- First Split: Create original train/test sets ---
|
|
71
|
+
# This split generates the main competition files in `public` and `private`.
|
|
72
|
+
# The outputs of this step must remain identical to the original script.
|
|
73
|
+
main_train_set = _split_and_save(
|
|
74
|
+
source_df=original_train_df,
|
|
75
|
+
public_dir=public,
|
|
76
|
+
private_dir=private,
|
|
77
|
+
test_size=0.1,
|
|
78
|
+
random_state=0,
|
|
79
|
+
)
|
|
80
|
+
|
|
81
|
+
# --- Second Split: Create validation train/test sets from the main train set ---
|
|
82
|
+
# This split uses the *training data* from the first split as its source.
|
|
83
|
+
# The new `test_val` set will have approx. the same size as the original `test` set.
|
|
84
|
+
# test_size for 2nd split = (size of original test) / (size of new train)
|
|
85
|
+
# = (0.1 * total) / (0.9 * total) = 0.1 / 0.9
|
|
86
|
+
validation_test_size = 0.1 / (1.0 - 0.1)
|
|
87
|
+
|
|
88
|
+
_split_and_save(
|
|
89
|
+
source_df=main_train_set,
|
|
90
|
+
public_dir=public_val,
|
|
91
|
+
private_dir=private_val,
|
|
92
|
+
test_size=validation_test_size,
|
|
93
|
+
random_state=0, # Use the same random state for consistency
|
|
94
|
+
)
|
|
@@ -0,0 +1,60 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
from pandas import DataFrame
|
|
3
|
+
from sklearn.metrics import mean_absolute_error
|
|
4
|
+
|
|
5
|
+
from mlebench.grade_helpers import InvalidSubmissionError
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def prepare_for_grading(submission: DataFrame, answers: DataFrame, target_column: str) -> DataFrame:
|
|
9
|
+
"""
|
|
10
|
+
Merge the submission's target_column into the answers DataFrame, matching on 'id'.
|
|
11
|
+
target_column from the submission DataFrame will have the suffix '_pred',
|
|
12
|
+
and target_column from the answers DataFrame will have the suffix '_true'.
|
|
13
|
+
"""
|
|
14
|
+
|
|
15
|
+
# Answers checks
|
|
16
|
+
assert (
|
|
17
|
+
target_column in answers.columns
|
|
18
|
+
), f"Target column {target_column} not found in answers DataFrame."
|
|
19
|
+
assert "id" in answers.columns, "ID column not found in answers DataFrame."
|
|
20
|
+
assert "type" in answers.columns, "Type column not found in answers DataFrame."
|
|
21
|
+
|
|
22
|
+
# Submission checks
|
|
23
|
+
if target_column not in submission.columns:
|
|
24
|
+
raise InvalidSubmissionError(
|
|
25
|
+
f"Target column {target_column} not found in submission DataFrame."
|
|
26
|
+
)
|
|
27
|
+
if "id" not in submission.columns:
|
|
28
|
+
raise InvalidSubmissionError("ID column not found in submission DataFrame.")
|
|
29
|
+
if not set(submission["id"]) == set(answers["id"]):
|
|
30
|
+
raise InvalidSubmissionError(
|
|
31
|
+
f"Submission is missing the following ids: {set(answers['id']) - set(submission['id'])}"
|
|
32
|
+
)
|
|
33
|
+
|
|
34
|
+
merged = answers.merge(submission[["id", target_column]], on="id", suffixes=("_true", "_pred"))
|
|
35
|
+
return merged
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
def log_mae(y_true, y_pred):
|
|
39
|
+
mae = mean_absolute_error(y_true, y_pred)
|
|
40
|
+
return np.log(mae)
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
def grade(submission: DataFrame, answers: DataFrame) -> float:
|
|
44
|
+
"""
|
|
45
|
+
Submissions are evaluated on the Log of the Mean Absolute Error, calculated for each scalar coupling type, and then averaged across types, so that a 1% decrease in MAE for one type provides the same improvement in score as a 1% decrease for another type. https://www.kaggle.com/c/champs-scalar-coupling/overview
|
|
46
|
+
"""
|
|
47
|
+
merged = prepare_for_grading(submission, answers, "scalar_coupling_constant")
|
|
48
|
+
|
|
49
|
+
# Group by scalar coupling type
|
|
50
|
+
grouped = merged.groupby("type")
|
|
51
|
+
|
|
52
|
+
log_maes = []
|
|
53
|
+
for name, group in grouped:
|
|
54
|
+
y_true = group["scalar_coupling_constant_true"]
|
|
55
|
+
y_pred = group["scalar_coupling_constant_pred"]
|
|
56
|
+
log_maes.append(log_mae(y_true, y_pred))
|
|
57
|
+
|
|
58
|
+
# Average the log MAEs
|
|
59
|
+
score = np.mean(log_maes)
|
|
60
|
+
return score
|
|
@@ -0,0 +1,116 @@
|
|
|
1
|
+
import shutil
|
|
2
|
+
from pathlib import Path
|
|
3
|
+
|
|
4
|
+
import pandas as pd
|
|
5
|
+
from sklearn.model_selection import train_test_split
|
|
6
|
+
from tqdm import tqdm
|
|
7
|
+
|
|
8
|
+
from mlebench.utils import read_csv
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def prepare(raw: Path, public: Path, private: Path):
|
|
12
|
+
|
|
13
|
+
# Create train, test from train split
|
|
14
|
+
old_train = read_csv(raw / "train.csv")
|
|
15
|
+
grouped_by_molecule = list(old_train.groupby("molecule_name"))
|
|
16
|
+
train_groups, test_groups = train_test_split(grouped_by_molecule, test_size=0.1, random_state=0)
|
|
17
|
+
new_train = pd.concat([group for _, group in train_groups])
|
|
18
|
+
answers = pd.concat([group for _, group in test_groups])
|
|
19
|
+
new_test = answers.drop(columns=["scalar_coupling_constant"])
|
|
20
|
+
|
|
21
|
+
# Create sample submission
|
|
22
|
+
sample_submission = new_test[["id"]].copy()
|
|
23
|
+
sample_submission["scalar_coupling_constant"] = 0
|
|
24
|
+
|
|
25
|
+
# Molecule structure data in CSV format
|
|
26
|
+
structures = read_csv(raw / "structures.csv")
|
|
27
|
+
structures = structures[structures["molecule_name"].isin(new_train["molecule_name"])]
|
|
28
|
+
|
|
29
|
+
# Additional data CSVs
|
|
30
|
+
dipole_moments = read_csv(raw / "dipole_moments.csv")
|
|
31
|
+
dipole_moments = dipole_moments[
|
|
32
|
+
dipole_moments["molecule_name"].isin(new_train["molecule_name"])
|
|
33
|
+
]
|
|
34
|
+
|
|
35
|
+
magnetic_shielding_tensors = read_csv(raw / "magnetic_shielding_tensors.csv")
|
|
36
|
+
magnetic_shielding_tensors = magnetic_shielding_tensors[
|
|
37
|
+
magnetic_shielding_tensors["molecule_name"].isin(new_train["molecule_name"])
|
|
38
|
+
]
|
|
39
|
+
|
|
40
|
+
mulliken_charges = read_csv(raw / "mulliken_charges.csv")
|
|
41
|
+
mulliken_charges = mulliken_charges[
|
|
42
|
+
mulliken_charges["molecule_name"].isin(new_train["molecule_name"])
|
|
43
|
+
]
|
|
44
|
+
|
|
45
|
+
potential_energy = read_csv(raw / "potential_energy.csv")
|
|
46
|
+
potential_energy = potential_energy[
|
|
47
|
+
potential_energy["molecule_name"].isin(new_train["molecule_name"])
|
|
48
|
+
]
|
|
49
|
+
|
|
50
|
+
scalar_coupling_contributions = read_csv(raw / "scalar_coupling_contributions.csv")
|
|
51
|
+
scalar_coupling_contributions = scalar_coupling_contributions[
|
|
52
|
+
scalar_coupling_contributions["molecule_name"].isin(new_train["molecule_name"])
|
|
53
|
+
]
|
|
54
|
+
|
|
55
|
+
# Checks before writing
|
|
56
|
+
data_csvs = {
|
|
57
|
+
"structures": structures,
|
|
58
|
+
"dipole_moments": dipole_moments,
|
|
59
|
+
"magnetic_shielding_tensors": magnetic_shielding_tensors,
|
|
60
|
+
"mulliken_charges": mulliken_charges,
|
|
61
|
+
"potential_energy": potential_energy,
|
|
62
|
+
"scalar_coupling_contributions": scalar_coupling_contributions,
|
|
63
|
+
}
|
|
64
|
+
for name, dataset in data_csvs.items():
|
|
65
|
+
assert set(dataset["molecule_name"]) == set(
|
|
66
|
+
new_train["molecule_name"]
|
|
67
|
+
), f"Filtered {name} should exactly match the molecule names present in the new_train set."
|
|
68
|
+
|
|
69
|
+
assert set(new_train["molecule_name"]).isdisjoint(
|
|
70
|
+
set(new_test["molecule_name"])
|
|
71
|
+
), "Train and test sets should not share any samples with the same molecule name."
|
|
72
|
+
|
|
73
|
+
assert set(new_train["id"]).isdisjoint(
|
|
74
|
+
set(new_test["id"])
|
|
75
|
+
), "Train and test sets should not share any samples with the same id."
|
|
76
|
+
|
|
77
|
+
assert len(sample_submission) == len(
|
|
78
|
+
new_test
|
|
79
|
+
), "Sample submission length does not match test length."
|
|
80
|
+
|
|
81
|
+
assert (
|
|
82
|
+
sample_submission.shape[1] == 2
|
|
83
|
+
), f"Sample submission should have 2 columns, but has {sample_submission.shape[1]}"
|
|
84
|
+
|
|
85
|
+
assert new_test.shape[1] == 5, f"new_test should have 5 columns, but has {new_test.shape[1]}"
|
|
86
|
+
|
|
87
|
+
assert answers.shape[1] == 6, f"answers should have 6 columns, but has {answers.shape[1]}"
|
|
88
|
+
|
|
89
|
+
assert new_train.shape[1] == 6, f"new_train should have 6 columns, but has {new_train.shape[1]}"
|
|
90
|
+
|
|
91
|
+
# Copy over molecule structure data individual files
|
|
92
|
+
for molecule_name in tqdm(
|
|
93
|
+
new_train["molecule_name"].unique(), desc="Copying molecule structure files"
|
|
94
|
+
):
|
|
95
|
+
src_file = raw / "structures" / f"{molecule_name}.xyz"
|
|
96
|
+
dst_file = public / "structures" / f"{molecule_name}.xyz"
|
|
97
|
+
dst_file.parent.mkdir(parents=True, exist_ok=True)
|
|
98
|
+
shutil.copyfile(src=src_file, dst=dst_file)
|
|
99
|
+
|
|
100
|
+
# Write CSVs
|
|
101
|
+
answers.to_csv(private / "answers.csv", index=False)
|
|
102
|
+
|
|
103
|
+
new_train.to_csv(public / "train.csv", index=False)
|
|
104
|
+
new_test.to_csv(public / "test.csv", index=False)
|
|
105
|
+
sample_submission.to_csv(public / "sample_submission.csv", index=False)
|
|
106
|
+
structures.to_csv(public / "structures.csv", index=False)
|
|
107
|
+
dipole_moments.to_csv(public / "dipole_moments.csv", index=False)
|
|
108
|
+
magnetic_shielding_tensors.to_csv(public / "magnetic_shielding_tensors.csv", index=False)
|
|
109
|
+
mulliken_charges.to_csv(public / "mulliken_charges.csv", index=False)
|
|
110
|
+
potential_energy.to_csv(public / "potential_energy.csv", index=False)
|
|
111
|
+
scalar_coupling_contributions.to_csv(public / "scalar_coupling_contributions.csv", index=False)
|
|
112
|
+
|
|
113
|
+
# Checks after writing
|
|
114
|
+
assert len(list((public / "structures").glob("*.xyz"))) == len(
|
|
115
|
+
new_train["molecule_name"].unique()
|
|
116
|
+
), "The number of files in public/structures should match the number of unique molecule names in the train set."
|
|
@@ -0,0 +1,155 @@
|
|
|
1
|
+
import shutil
|
|
2
|
+
from pathlib import Path
|
|
3
|
+
from typing import Dict
|
|
4
|
+
|
|
5
|
+
import pandas as pd
|
|
6
|
+
from sklearn.model_selection import train_test_split
|
|
7
|
+
from tqdm import tqdm
|
|
8
|
+
|
|
9
|
+
from mlebench.utils import read_csv
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def _process_and_save_split(
|
|
13
|
+
train_df: pd.DataFrame,
|
|
14
|
+
test_df: pd.DataFrame,
|
|
15
|
+
answers_df: pd.DataFrame,
|
|
16
|
+
public_path: Path,
|
|
17
|
+
private_path: Path,
|
|
18
|
+
raw_path: Path,
|
|
19
|
+
raw_supplementary_data: Dict[str, pd.DataFrame],
|
|
20
|
+
):
|
|
21
|
+
"""
|
|
22
|
+
Helper function to process and save a single data split (e.g., train/test or train_val/test_val).
|
|
23
|
+
|
|
24
|
+
This function takes a pair of train/test sets, filters the supplementary data accordingly,
|
|
25
|
+
performs checks, and writes all the necessary files to the specified public and private directories.
|
|
26
|
+
"""
|
|
27
|
+
public_path.mkdir(exist_ok=True)
|
|
28
|
+
private_path.mkdir(exist_ok=True)
|
|
29
|
+
|
|
30
|
+
# Create sample submission for the current test set
|
|
31
|
+
sample_submission = test_df[["id"]].copy()
|
|
32
|
+
sample_submission["scalar_coupling_constant"] = 0
|
|
33
|
+
|
|
34
|
+
# Filter supplementary data to only include molecules present in the training set
|
|
35
|
+
train_molecules = set(train_df["molecule_name"])
|
|
36
|
+
filtered_supplementary_data = {}
|
|
37
|
+
for name, df in raw_supplementary_data.items():
|
|
38
|
+
filtered_supplementary_data[name] = df[df["molecule_name"].isin(train_molecules)]
|
|
39
|
+
|
|
40
|
+
# Checks before writing
|
|
41
|
+
for name, dataset in filtered_supplementary_data.items():
|
|
42
|
+
assert set(dataset["molecule_name"]) == train_molecules, (
|
|
43
|
+
f"[{public_path.name}] Filtered {name} should exactly match the molecule names "
|
|
44
|
+
"present in the train set."
|
|
45
|
+
)
|
|
46
|
+
|
|
47
|
+
assert set(train_df["molecule_name"]).isdisjoint(
|
|
48
|
+
set(test_df["molecule_name"])
|
|
49
|
+
), f"[{public_path.name}] Train and test sets should not share any molecule names."
|
|
50
|
+
|
|
51
|
+
assert set(train_df["id"]).isdisjoint(
|
|
52
|
+
set(test_df["id"])
|
|
53
|
+
), f"[{public_path.name}] Train and test sets should not share any sample ids."
|
|
54
|
+
|
|
55
|
+
assert len(sample_submission) == len(
|
|
56
|
+
test_df
|
|
57
|
+
), f"[{public_path.name}] Sample submission length does not match test length."
|
|
58
|
+
|
|
59
|
+
# Write CSVs to public and private directories
|
|
60
|
+
answers_df.to_csv(private_path / "answers.csv", index=False)
|
|
61
|
+
|
|
62
|
+
train_df.to_csv(public_path / "train.csv", index=False)
|
|
63
|
+
test_df.to_csv(public_path / "test.csv", index=False)
|
|
64
|
+
sample_submission.to_csv(public_path / "sample_submission.csv", index=False)
|
|
65
|
+
|
|
66
|
+
for name, df in filtered_supplementary_data.items():
|
|
67
|
+
df.to_csv(public_path / f"{name}.csv", index=False)
|
|
68
|
+
|
|
69
|
+
# Copy over molecule structure .xyz files for the training set
|
|
70
|
+
structures_xyz_path = public_path / "structures"
|
|
71
|
+
structures_xyz_path.mkdir(parents=True, exist_ok=True)
|
|
72
|
+
for molecule_name in tqdm(
|
|
73
|
+
train_df["molecule_name"].unique(),
|
|
74
|
+
desc=f"Copying .xyz files to {public_path.name}",
|
|
75
|
+
):
|
|
76
|
+
src_file = raw_path / "structures" / f"{molecule_name}.xyz"
|
|
77
|
+
dst_file = structures_xyz_path / f"{molecule_name}.xyz"
|
|
78
|
+
shutil.copyfile(src=src_file, dst=dst_file)
|
|
79
|
+
|
|
80
|
+
# Checks after writing
|
|
81
|
+
assert len(list(structures_xyz_path.glob("*.xyz"))) == len(
|
|
82
|
+
train_df["molecule_name"].unique()
|
|
83
|
+
), (
|
|
84
|
+
f"[{public_path.name}] The number of files in {structures_xyz_path} should match the number "
|
|
85
|
+
"of unique molecule names in the train set."
|
|
86
|
+
)
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
def prepare(raw: Path, public: Path, private: Path):
|
|
90
|
+
"""
|
|
91
|
+
Prepares the data by performing two splits:
|
|
92
|
+
1. A main split of the raw data into a definitive train/test set.
|
|
93
|
+
Outputs are saved to `public/` and `private/`.
|
|
94
|
+
2. A validation split of the main training set into a smaller train/validation set.
|
|
95
|
+
Outputs are saved to `public_val/` and `private_val/`, mirroring the main output structure.
|
|
96
|
+
"""
|
|
97
|
+
# Load all data from raw directory first
|
|
98
|
+
old_train = read_csv(raw / "train.csv")
|
|
99
|
+
|
|
100
|
+
# Load all supplementary data into a dictionary for easy filtering later
|
|
101
|
+
raw_supplementary_data = {
|
|
102
|
+
"structures": read_csv(raw / "structures.csv"),
|
|
103
|
+
"dipole_moments": read_csv(raw / "dipole_moments.csv"),
|
|
104
|
+
"magnetic_shielding_tensors": read_csv(raw / "magnetic_shielding_tensors.csv"),
|
|
105
|
+
"mulliken_charges": read_csv(raw / "mulliken_charges.csv"),
|
|
106
|
+
"potential_energy": read_csv(raw / "potential_energy.csv"),
|
|
107
|
+
"scalar_coupling_contributions": read_csv(raw / "scalar_coupling_contributions.csv"),
|
|
108
|
+
}
|
|
109
|
+
|
|
110
|
+
# --- Create main Train/Test Split (Original Logic) ---
|
|
111
|
+
# The outputs of this split are final and must not be changed.
|
|
112
|
+
grouped_by_molecule = list(old_train.groupby("molecule_name"))
|
|
113
|
+
train_groups, test_groups = train_test_split(grouped_by_molecule, test_size=0.1, random_state=0)
|
|
114
|
+
new_train = pd.concat([group for _, group in train_groups])
|
|
115
|
+
answers = pd.concat([group for _, group in test_groups])
|
|
116
|
+
new_test = answers.drop(columns=["scalar_coupling_constant"])
|
|
117
|
+
|
|
118
|
+
# Process and save the main split to `public` and `private` directories
|
|
119
|
+
_process_and_save_split(
|
|
120
|
+
train_df=new_train,
|
|
121
|
+
test_df=new_test,
|
|
122
|
+
answers_df=answers,
|
|
123
|
+
public_path=public,
|
|
124
|
+
private_path=private,
|
|
125
|
+
raw_path=raw,
|
|
126
|
+
raw_supplementary_data=raw_supplementary_data,
|
|
127
|
+
)
|
|
128
|
+
|
|
129
|
+
# --- Create Validation Split (New Logic) ---
|
|
130
|
+
# This second split uses the `new_train` set from above to create a smaller
|
|
131
|
+
# training set and a new validation set.
|
|
132
|
+
public_val = public.parent / "public_val"
|
|
133
|
+
private_val = private.parent / "private_val"
|
|
134
|
+
|
|
135
|
+
# Split the `new_train` set by molecule groups again.
|
|
136
|
+
# The original test set was 10% of the total. The new train set is 90% of the total.
|
|
137
|
+
# To get a validation set of a similar size (10% of total), the test_size for this
|
|
138
|
+
# second split should be (10% / 90%) = 1/9.
|
|
139
|
+
train_val_groups, test_val_groups = train_test_split(
|
|
140
|
+
train_groups, test_size=1 / 9, random_state=0
|
|
141
|
+
)
|
|
142
|
+
train_val = pd.concat([group for _, group in train_val_groups])
|
|
143
|
+
answers_val = pd.concat([group for _, group in test_val_groups])
|
|
144
|
+
test_val = answers_val.drop(columns=["scalar_coupling_constant"])
|
|
145
|
+
|
|
146
|
+
# Process and save the validation split to `public_val` and `private_val` directories
|
|
147
|
+
_process_and_save_split(
|
|
148
|
+
train_df=train_val,
|
|
149
|
+
test_df=test_val,
|
|
150
|
+
answers_df=answers_val,
|
|
151
|
+
public_path=public_val,
|
|
152
|
+
private_path=private_val,
|
|
153
|
+
raw_path=raw,
|
|
154
|
+
raw_supplementary_data=raw_supplementary_data,
|
|
155
|
+
)
|
|
File without changes
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
import pandas as pd
|
|
3
|
+
|
|
4
|
+
from mlebench.grade_helpers import InvalidSubmissionError
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
def grade(submission: pd.DataFrame, answers: pd.DataFrame) -> float:
|
|
8
|
+
"""Return accuracy between predicted and true starting boards."""
|
|
9
|
+
if "id" not in submission.columns:
|
|
10
|
+
raise InvalidSubmissionError("Submission is missing required column 'id'.")
|
|
11
|
+
|
|
12
|
+
start_cols = [col for col in answers.columns if col.startswith("start_")]
|
|
13
|
+
if not start_cols:
|
|
14
|
+
raise ValueError("Answers DataFrame does not contain any start_* columns.")
|
|
15
|
+
|
|
16
|
+
missing_cols = [col for col in start_cols if col not in submission.columns]
|
|
17
|
+
if missing_cols:
|
|
18
|
+
raise InvalidSubmissionError(
|
|
19
|
+
f"Submission is missing required start columns: {missing_cols[:5]}"
|
|
20
|
+
)
|
|
21
|
+
|
|
22
|
+
merged = answers[["id", *start_cols]].merge(
|
|
23
|
+
submission[["id", *start_cols]],
|
|
24
|
+
on="id",
|
|
25
|
+
how="left",
|
|
26
|
+
suffixes=("_true", "_pred"),
|
|
27
|
+
)
|
|
28
|
+
|
|
29
|
+
pred_cols = [f"{col}_pred" for col in start_cols]
|
|
30
|
+
if merged[pred_cols].isna().any().any():
|
|
31
|
+
raise InvalidSubmissionError("Submission contains missing predictions for some ids.")
|
|
32
|
+
|
|
33
|
+
preds = merged[pred_cols].to_numpy(dtype=float)
|
|
34
|
+
truths = merged[[f"{col}_true" for col in start_cols]].to_numpy(dtype=float)
|
|
35
|
+
|
|
36
|
+
if not np.isin(preds, [0, 1]).all():
|
|
37
|
+
raise InvalidSubmissionError("Predictions must be binary (0 or 1).")
|
|
38
|
+
|
|
39
|
+
mae = np.abs(preds - truths).mean()
|
|
40
|
+
return float(1.0 - mae)
|
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
from pathlib import Path
|
|
2
|
+
|
|
3
|
+
import pandas as pd
|
|
4
|
+
|
|
5
|
+
from mlebench.utils import read_csv
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def prepare(raw: Path, public: Path, private: Path) -> None:
|
|
9
|
+
"""Prepare Reverse Game of Life tables for MLE-Bench."""
|
|
10
|
+
public.mkdir(parents=True, exist_ok=True)
|
|
11
|
+
private.mkdir(parents=True, exist_ok=True)
|
|
12
|
+
|
|
13
|
+
train = read_csv(raw / "train.csv")
|
|
14
|
+
test = read_csv(raw / "test.csv")
|
|
15
|
+
answers = read_csv(raw / "test_answer.csv")
|
|
16
|
+
|
|
17
|
+
sample_path = raw / "sample_submission.csv"
|
|
18
|
+
if sample_path.exists():
|
|
19
|
+
sample_submission = read_csv(sample_path)
|
|
20
|
+
else:
|
|
21
|
+
start_cols = [col for col in answers.columns if col.startswith("start_")]
|
|
22
|
+
sample_submission = answers[["id", *start_cols]].copy()
|
|
23
|
+
sample_submission[start_cols] = 0
|
|
24
|
+
|
|
25
|
+
merged_private = test.merge(answers, on="id", how="left", validate="one_to_one")
|
|
26
|
+
start_cols = [col for col in answers.columns if col.startswith("start_")]
|
|
27
|
+
if merged_private[start_cols].isna().any().any():
|
|
28
|
+
missing_ids = merged_private.loc[merged_private[start_cols].isna().any(axis=1), "id"].tolist()
|
|
29
|
+
raise ValueError(f"Missing start cells for ids: {missing_ids[:5]}")
|
|
30
|
+
|
|
31
|
+
train.to_csv(public / "train.csv", index=False)
|
|
32
|
+
test.to_csv(public / "test.csv", index=False)
|
|
33
|
+
sample_submission.to_csv(public / "sample_submission.csv", index=False)
|
|
34
|
+
|
|
35
|
+
merged_private.to_csv(private / "test.csv", index=False)
|
|
36
|
+
answers.to_csv(private / "gold_submission.csv", index=False)
|
|
37
|
+
|
|
38
|
+
assert len(test) == len(answers) == len(merged_private), "Public test and answers row counts differ."
|
|
39
|
+
assert sample_submission.columns.tolist()[0] == "id", "Sample submission must start with 'id'."
|
|
40
|
+
for col in start_cols:
|
|
41
|
+
assert col in sample_submission.columns, f"Sample submission missing column {col}."
|
|
File without changes
|
|
@@ -0,0 +1,66 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
import pandas as pd
|
|
3
|
+
|
|
4
|
+
from mlebench.grade_helpers import InvalidSubmissionError
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
def smape(y_true, y_pred):
|
|
8
|
+
"""Calculate SMAPE (Symmetric Mean Absolute Percentage Error)."""
|
|
9
|
+
# SMAPE = 0 when both actual and predicted are 0
|
|
10
|
+
denominator = (np.abs(y_true) + np.abs(y_pred))
|
|
11
|
+
diff = np.abs(y_true - y_pred)
|
|
12
|
+
|
|
13
|
+
# Handle division by zero: set SMAPE to 0 when denominator is 0
|
|
14
|
+
smape_val = np.where(denominator == 0, 0, diff / denominator)
|
|
15
|
+
|
|
16
|
+
return 100 * np.mean(smape_val)
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def prepare_for_metric(submission: pd.DataFrame, answers: pd.DataFrame):
|
|
20
|
+
"""Prepare submission and answers for SMAPE calculation."""
|
|
21
|
+
if len(submission) != len(answers):
|
|
22
|
+
raise InvalidSubmissionError(
|
|
23
|
+
f"Submission length ({len(submission)}) != answers length ({len(answers)})"
|
|
24
|
+
)
|
|
25
|
+
|
|
26
|
+
if "id" not in submission.columns:
|
|
27
|
+
raise InvalidSubmissionError("Submission must have an 'id' column")
|
|
28
|
+
|
|
29
|
+
if "sales" not in submission.columns:
|
|
30
|
+
raise InvalidSubmissionError("Submission must have a 'sales' column")
|
|
31
|
+
|
|
32
|
+
# Sort by id
|
|
33
|
+
submission = submission.sort_values("id").reset_index(drop=True)
|
|
34
|
+
answers = answers.sort_values("id").reset_index(drop=True)
|
|
35
|
+
|
|
36
|
+
# Check id alignment
|
|
37
|
+
if not (submission["id"] == answers["id"]).all():
|
|
38
|
+
raise InvalidSubmissionError("Submission and answers id mismatch")
|
|
39
|
+
|
|
40
|
+
# Extract predictions and true values
|
|
41
|
+
y_pred = submission["sales"].values
|
|
42
|
+
y_true = answers["sales"].values
|
|
43
|
+
|
|
44
|
+
# Validate predictions
|
|
45
|
+
try:
|
|
46
|
+
y_pred = y_pred.astype(float)
|
|
47
|
+
except (ValueError, TypeError):
|
|
48
|
+
raise InvalidSubmissionError("Predictions must be numeric")
|
|
49
|
+
|
|
50
|
+
if np.any(np.isnan(y_pred)):
|
|
51
|
+
raise InvalidSubmissionError("Predictions cannot contain NaN values")
|
|
52
|
+
|
|
53
|
+
if np.any(y_pred < 0):
|
|
54
|
+
raise InvalidSubmissionError("Predictions cannot be negative")
|
|
55
|
+
|
|
56
|
+
return y_true, y_pred
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
def grade(submission: pd.DataFrame, answers: pd.DataFrame) -> float:
|
|
60
|
+
"""Calculate SMAPE score."""
|
|
61
|
+
y_true, y_pred = prepare_for_metric(submission, answers)
|
|
62
|
+
|
|
63
|
+
# SMAPE
|
|
64
|
+
smape_score = smape(y_true, y_pred)
|
|
65
|
+
|
|
66
|
+
return smape_score
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
from pathlib import Path
|
|
2
|
+
import pandas as pd
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
def prepare(raw: Path, public: Path, private: Path):
|
|
6
|
+
"""
|
|
7
|
+
Prepare demand-forecasting-kernels-only dataset.
|
|
8
|
+
"""
|
|
9
|
+
# Read data
|
|
10
|
+
train = pd.read_csv(raw / "train.csv")
|
|
11
|
+
test = pd.read_csv(raw / "test.csv")
|
|
12
|
+
sample_submission = pd.read_csv(raw / "sample_submission.csv")
|
|
13
|
+
test_answer = pd.read_csv(raw / "test_answer.csv")
|
|
14
|
+
|
|
15
|
+
# Public files (visible to agents)
|
|
16
|
+
train.to_csv(public / "train.csv", index=False)
|
|
17
|
+
test.to_csv(public / "test.csv", index=False)
|
|
18
|
+
sample_submission.to_csv(public / "sample_submission.csv", index=False)
|
|
19
|
+
|
|
20
|
+
# Private files (for grading)
|
|
21
|
+
test_answer.to_csv(private / "test.csv", index=False)
|
|
22
|
+
|
|
23
|
+
# Validation checks
|
|
24
|
+
assert len(test_answer) == len(sample_submission), \
|
|
25
|
+
f"Test answer ({len(test_answer)}) and sample submission ({len(sample_submission)}) must have same length"
|
|
26
|
+
assert "id" in test_answer.columns, "Test answer must have 'id' column"
|
|
27
|
+
assert "sales" in test_answer.columns, "Test answer must have 'sales' column"
|
|
File without changes
|
|
@@ -0,0 +1,66 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
import pandas as pd
|
|
3
|
+
|
|
4
|
+
from mlebench.grade_helpers import InvalidSubmissionError
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
def smape(y_true, y_pred):
|
|
8
|
+
"""Calculate SMAPE (Symmetric Mean Absolute Percentage Error)."""
|
|
9
|
+
# SMAPE = 0 when both actual and predicted are 0
|
|
10
|
+
denominator = (np.abs(y_true) + np.abs(y_pred))
|
|
11
|
+
diff = np.abs(y_true - y_pred)
|
|
12
|
+
|
|
13
|
+
# Handle division by zero: set SMAPE to 0 when denominator is 0
|
|
14
|
+
smape_val = np.where(denominator == 0, 0, diff / denominator)
|
|
15
|
+
|
|
16
|
+
return 100 * np.mean(smape_val)
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def prepare_for_metric(submission: pd.DataFrame, answers: pd.DataFrame):
|
|
20
|
+
"""Prepare submission and answers for SMAPE calculation."""
|
|
21
|
+
if len(submission) != len(answers):
|
|
22
|
+
raise InvalidSubmissionError(
|
|
23
|
+
f"Submission length ({len(submission)}) != answers length ({len(answers)})"
|
|
24
|
+
)
|
|
25
|
+
|
|
26
|
+
if "id" not in submission.columns:
|
|
27
|
+
raise InvalidSubmissionError("Submission must have an 'id' column")
|
|
28
|
+
|
|
29
|
+
if "sales" not in submission.columns:
|
|
30
|
+
raise InvalidSubmissionError("Submission must have a 'sales' column")
|
|
31
|
+
|
|
32
|
+
# Sort by id
|
|
33
|
+
submission = submission.sort_values("id").reset_index(drop=True)
|
|
34
|
+
answers = answers.sort_values("id").reset_index(drop=True)
|
|
35
|
+
|
|
36
|
+
# Check id alignment
|
|
37
|
+
if not (submission["id"] == answers["id"]).all():
|
|
38
|
+
raise InvalidSubmissionError("Submission and answers id mismatch")
|
|
39
|
+
|
|
40
|
+
# Extract predictions and true values
|
|
41
|
+
y_pred = submission["sales"].values
|
|
42
|
+
y_true = answers["sales"].values
|
|
43
|
+
|
|
44
|
+
# Validate predictions
|
|
45
|
+
try:
|
|
46
|
+
y_pred = y_pred.astype(float)
|
|
47
|
+
except (ValueError, TypeError):
|
|
48
|
+
raise InvalidSubmissionError("Predictions must be numeric")
|
|
49
|
+
|
|
50
|
+
if np.any(np.isnan(y_pred)):
|
|
51
|
+
raise InvalidSubmissionError("Predictions cannot contain NaN values")
|
|
52
|
+
|
|
53
|
+
if np.any(y_pred < 0):
|
|
54
|
+
raise InvalidSubmissionError("Predictions cannot be negative")
|
|
55
|
+
|
|
56
|
+
return y_true, y_pred
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
def grade(submission: pd.DataFrame, answers: pd.DataFrame) -> float:
|
|
60
|
+
"""Calculate SMAPE score."""
|
|
61
|
+
y_true, y_pred = prepare_for_metric(submission, answers)
|
|
62
|
+
|
|
63
|
+
# SMAPE
|
|
64
|
+
smape_score = smape(y_true, y_pred)
|
|
65
|
+
|
|
66
|
+
return smape_score
|