dslighting 1.7.1__py3-none-any.whl → 1.7.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- dslighting/__init__.py +1 -1
- dslighting/core/agent.py +78 -62
- {dslighting-1.7.1.dist-info → dslighting-1.7.6.dist-info}/METADATA +1 -1
- {dslighting-1.7.1.dist-info → dslighting-1.7.6.dist-info}/RECORD +352 -7
- {dslighting-1.7.1.dist-info → dslighting-1.7.6.dist-info}/top_level.txt +1 -0
- mlebench/README.md +39 -0
- mlebench/__init__.py +0 -0
- mlebench/cli.py +221 -0
- mlebench/competitions/3d-object-detection-for-autonomous-vehicles/grade.py +161 -0
- mlebench/competitions/3d-object-detection-for-autonomous-vehicles/mAP_evaluation.py +425 -0
- mlebench/competitions/3d-object-detection-for-autonomous-vehicles/prepare.py +483 -0
- mlebench/competitions/3d-object-detection-for-autonomous-vehicles/prepare_val.py +719 -0
- mlebench/competitions/AI4Code/grade.py +70 -0
- mlebench/competitions/AI4Code/prepare.py +84 -0
- mlebench/competitions/AI4Code/prepare_val.py +159 -0
- mlebench/competitions/__init__.py +0 -0
- mlebench/competitions/aerial-cactus-identification/grade.py +11 -0
- mlebench/competitions/aerial-cactus-identification/prepare.py +71 -0
- mlebench/competitions/aerial-cactus-identification/prepare_val.py +133 -0
- mlebench/competitions/alaska2-image-steganalysis/grade.py +136 -0
- mlebench/competitions/alaska2-image-steganalysis/prepare.py +88 -0
- mlebench/competitions/alaska2-image-steganalysis/prepare_val.py +148 -0
- mlebench/competitions/aptos2019-blindness-detection/grade.py +35 -0
- mlebench/competitions/aptos2019-blindness-detection/prepare.py +75 -0
- mlebench/competitions/aptos2019-blindness-detection/prepare_val.py +123 -0
- mlebench/competitions/bike-sharing-demand/__init__.py +0 -0
- mlebench/competitions/bike-sharing-demand/grade.py +55 -0
- mlebench/competitions/bike-sharing-demand/prepare.py +37 -0
- mlebench/competitions/billion-word-imputation/grade.py +37 -0
- mlebench/competitions/billion-word-imputation/prepare.py +107 -0
- mlebench/competitions/billion-word-imputation/prepare_val.py +179 -0
- mlebench/competitions/bms-molecular-translation/grade.py +40 -0
- mlebench/competitions/bms-molecular-translation/prepare.py +68 -0
- mlebench/competitions/bms-molecular-translation/prepare_val.py +131 -0
- mlebench/competitions/cassava-leaf-disease-classification/grade.py +12 -0
- mlebench/competitions/cassava-leaf-disease-classification/prepare.py +113 -0
- mlebench/competitions/cassava-leaf-disease-classification/prepare_val.py +186 -0
- mlebench/competitions/cdiscount-image-classification-challenge/grade.py +11 -0
- mlebench/competitions/cdiscount-image-classification-challenge/prepare.py +144 -0
- mlebench/competitions/cdiscount-image-classification-challenge/prepare_val.py +205 -0
- mlebench/competitions/chaii-hindi-and-tamil-question-answering/grade.py +67 -0
- mlebench/competitions/chaii-hindi-and-tamil-question-answering/prepare.py +31 -0
- mlebench/competitions/chaii-hindi-and-tamil-question-answering/prepare_val.py +94 -0
- mlebench/competitions/champs-scalar-coupling/grade.py +60 -0
- mlebench/competitions/champs-scalar-coupling/prepare.py +116 -0
- mlebench/competitions/champs-scalar-coupling/prepare_val.py +155 -0
- mlebench/competitions/conways-reverse-game-of-life-2020/__init__.py +0 -0
- mlebench/competitions/conways-reverse-game-of-life-2020/grade.py +40 -0
- mlebench/competitions/conways-reverse-game-of-life-2020/prepare.py +41 -0
- mlebench/competitions/demand-forecasting-kernels-only/__init__.py +0 -0
- mlebench/competitions/demand-forecasting-kernels-only/grade.py +66 -0
- mlebench/competitions/demand-forecasting-kernels-only/prepare.py +27 -0
- mlebench/competitions/demand_forecasting_kernels_only/__init__.py +0 -0
- mlebench/competitions/demand_forecasting_kernels_only/grade.py +66 -0
- mlebench/competitions/demand_forecasting_kernels_only/prepare.py +27 -0
- mlebench/competitions/denoising-dirty-documents/grade.py +44 -0
- mlebench/competitions/denoising-dirty-documents/prepare.py +134 -0
- mlebench/competitions/denoising-dirty-documents/prepare_val.py +178 -0
- mlebench/competitions/detecting-insults-in-social-commentary/grade.py +11 -0
- mlebench/competitions/detecting-insults-in-social-commentary/prepare.py +72 -0
- mlebench/competitions/detecting-insults-in-social-commentary/prepare_val.py +128 -0
- mlebench/competitions/dog-breed-identification/dogs.py +124 -0
- mlebench/competitions/dog-breed-identification/grade.py +42 -0
- mlebench/competitions/dog-breed-identification/prepare.py +55 -0
- mlebench/competitions/dog-breed-identification/prepare_val.py +104 -0
- mlebench/competitions/dogs-vs-cats-redux-kernels-edition/grade.py +43 -0
- mlebench/competitions/dogs-vs-cats-redux-kernels-edition/prepare.py +70 -0
- mlebench/competitions/dogs-vs-cats-redux-kernels-edition/prepare_val.py +143 -0
- mlebench/competitions/ethanol-concentration/grade.py +23 -0
- mlebench/competitions/ethanol-concentration/prepare.py +90 -0
- mlebench/competitions/facebook-recruiting-iii-keyword-extraction/grade.py +60 -0
- mlebench/competitions/facebook-recruiting-iii-keyword-extraction/prepare.py +41 -0
- mlebench/competitions/facebook-recruiting-iii-keyword-extraction/prepare_val.py +92 -0
- mlebench/competitions/feedback-prize-english-language-learning/__init__.py +0 -0
- mlebench/competitions/feedback-prize-english-language-learning/grade.py +60 -0
- mlebench/competitions/feedback-prize-english-language-learning/prepare.py +39 -0
- mlebench/competitions/freesound-audio-tagging-2019/grade.py +64 -0
- mlebench/competitions/freesound-audio-tagging-2019/prepare.py +94 -0
- mlebench/competitions/freesound-audio-tagging-2019/prepare_val.py +175 -0
- mlebench/competitions/freesound-audio-tagging-2019/vocabulary.py +83 -0
- mlebench/competitions/google-quest-challenge/classes.py +32 -0
- mlebench/competitions/google-quest-challenge/grade.py +45 -0
- mlebench/competitions/google-quest-challenge/prepare.py +58 -0
- mlebench/competitions/google-quest-challenge/prepare_val.py +120 -0
- mlebench/competitions/google-research-identify-contrails-reduce-global-warming/grade.py +77 -0
- mlebench/competitions/google-research-identify-contrails-reduce-global-warming/prepare.py +155 -0
- mlebench/competitions/google-research-identify-contrails-reduce-global-warming/prepare_val.py +211 -0
- mlebench/competitions/h-and-m-personalized-fashion-recommendations/grade.py +42 -0
- mlebench/competitions/h-and-m-personalized-fashion-recommendations/prepare.py +102 -0
- mlebench/competitions/h-and-m-personalized-fashion-recommendations/prepare_val.py +132 -0
- mlebench/competitions/handwriting/grade.py +23 -0
- mlebench/competitions/handwriting/prepare.py +179 -0
- mlebench/competitions/herbarium-2020-fgvc7/grade.py +34 -0
- mlebench/competitions/herbarium-2020-fgvc7/prepare.py +251 -0
- mlebench/competitions/herbarium-2020-fgvc7/prepare_val.py +242 -0
- mlebench/competitions/herbarium-2021-fgvc8/grade.py +34 -0
- mlebench/competitions/herbarium-2021-fgvc8/prepare.py +251 -0
- mlebench/competitions/herbarium-2021-fgvc8/prepare_val.py +222 -0
- mlebench/competitions/herbarium-2022-fgvc9/grade.py +31 -0
- mlebench/competitions/herbarium-2022-fgvc9/prepare.py +233 -0
- mlebench/competitions/herbarium-2022-fgvc9/prepare_val.py +213 -0
- mlebench/competitions/histopathologic-cancer-detection/grade.py +12 -0
- mlebench/competitions/histopathologic-cancer-detection/prepare.py +59 -0
- mlebench/competitions/histopathologic-cancer-detection/prepare_val.py +131 -0
- mlebench/competitions/hms-harmful-brain-activity-classification/constants.py +9 -0
- mlebench/competitions/hms-harmful-brain-activity-classification/grade.py +43 -0
- mlebench/competitions/hms-harmful-brain-activity-classification/kaggle_metric_utilities.py +96 -0
- mlebench/competitions/hms-harmful-brain-activity-classification/kullback_leibler_divergence.py +118 -0
- mlebench/competitions/hms-harmful-brain-activity-classification/prepare.py +121 -0
- mlebench/competitions/hms-harmful-brain-activity-classification/prepare_val.py +190 -0
- mlebench/competitions/hotel-id-2021-fgvc8/grade.py +41 -0
- mlebench/competitions/hotel-id-2021-fgvc8/prepare.py +63 -0
- mlebench/competitions/hotel-id-2021-fgvc8/prepare_val.py +132 -0
- mlebench/competitions/hubmap-kidney-segmentation/grade.py +62 -0
- mlebench/competitions/hubmap-kidney-segmentation/prepare.py +108 -0
- mlebench/competitions/hubmap-kidney-segmentation/prepare_val.py +153 -0
- mlebench/competitions/icecube-neutrinos-in-deep-ice/grade.py +111 -0
- mlebench/competitions/icecube-neutrinos-in-deep-ice/prepare.py +127 -0
- mlebench/competitions/icecube-neutrinos-in-deep-ice/prepare_val.py +183 -0
- mlebench/competitions/ili/grade.py +60 -0
- mlebench/competitions/ili/prepare.py +99 -0
- mlebench/competitions/imet-2020-fgvc7/grade.py +54 -0
- mlebench/competitions/imet-2020-fgvc7/prepare.py +77 -0
- mlebench/competitions/imet-2020-fgvc7/prepare_val.py +157 -0
- mlebench/competitions/inaturalist-2019-fgvc6/grade.py +35 -0
- mlebench/competitions/inaturalist-2019-fgvc6/prepare.py +259 -0
- mlebench/competitions/inaturalist-2019-fgvc6/prepare_val.py +304 -0
- mlebench/competitions/instant-gratification/__init__.py +0 -0
- mlebench/competitions/instant-gratification/grade.py +55 -0
- mlebench/competitions/instant-gratification/prepare.py +25 -0
- mlebench/competitions/instant_gratification/__init__.py +0 -0
- mlebench/competitions/instant_gratification/grade.py +55 -0
- mlebench/competitions/instant_gratification/prepare.py +25 -0
- mlebench/competitions/invasive-species-monitoring/grade.py +11 -0
- mlebench/competitions/invasive-species-monitoring/prepare.py +97 -0
- mlebench/competitions/invasive-species-monitoring/prepare_val.py +164 -0
- mlebench/competitions/iwildcam-2019-fgvc6/grade.py +44 -0
- mlebench/competitions/iwildcam-2019-fgvc6/prepare.py +118 -0
- mlebench/competitions/iwildcam-2019-fgvc6/prepare_val.py +194 -0
- mlebench/competitions/iwildcam-2020-fgvc7/grade.py +11 -0
- mlebench/competitions/iwildcam-2020-fgvc7/prepare.py +164 -0
- mlebench/competitions/iwildcam-2020-fgvc7/prepare_val.py +245 -0
- mlebench/competitions/jigsaw-toxic-comment-classification-challenge/classes.py +1 -0
- mlebench/competitions/jigsaw-toxic-comment-classification-challenge/grade.py +54 -0
- mlebench/competitions/jigsaw-toxic-comment-classification-challenge/prepare.py +42 -0
- mlebench/competitions/jigsaw-toxic-comment-classification-challenge/prepare_val.py +88 -0
- mlebench/competitions/jigsaw-unintended-bias-in-toxicity-classification/grade.py +153 -0
- mlebench/competitions/jigsaw-unintended-bias-in-toxicity-classification/prepare.py +36 -0
- mlebench/competitions/jigsaw-unintended-bias-in-toxicity-classification/prepare_val.py +117 -0
- mlebench/competitions/kuzushiji-recognition/grade.py +58 -0
- mlebench/competitions/kuzushiji-recognition/kuzushiji_metric.py +118 -0
- mlebench/competitions/kuzushiji-recognition/prepare.py +92 -0
- mlebench/competitions/kuzushiji-recognition/prepare_val.py +149 -0
- mlebench/competitions/leaf-classification/classes.py +101 -0
- mlebench/competitions/leaf-classification/grade.py +44 -0
- mlebench/competitions/leaf-classification/prepare.py +60 -0
- mlebench/competitions/leaf-classification/prepare_val.py +116 -0
- mlebench/competitions/learning-agency-lab-automated-essay-scoring-2/grade.py +44 -0
- mlebench/competitions/learning-agency-lab-automated-essay-scoring-2/prepare.py +51 -0
- mlebench/competitions/learning-agency-lab-automated-essay-scoring-2/prepare_val.py +96 -0
- mlebench/competitions/liverpool-ion-switching/__init__.py +0 -0
- mlebench/competitions/liverpool-ion-switching/grade.py +52 -0
- mlebench/competitions/liverpool-ion-switching/prepare.py +27 -0
- mlebench/competitions/liverpool_ion_switching/__init__.py +0 -0
- mlebench/competitions/liverpool_ion_switching/grade.py +52 -0
- mlebench/competitions/liverpool_ion_switching/prepare.py +27 -0
- mlebench/competitions/lmsys-chatbot-arena/grade.py +63 -0
- mlebench/competitions/lmsys-chatbot-arena/prepare.py +52 -0
- mlebench/competitions/lmsys-chatbot-arena/prepare_val.py +115 -0
- mlebench/competitions/mcm_2024_c_test/grade.py +107 -0
- mlebench/competitions/mcm_2024_c_test/prepare.py +2 -0
- mlebench/competitions/ml2021spring-hw2/grade.py +11 -0
- mlebench/competitions/ml2021spring-hw2/prepare.py +58 -0
- mlebench/competitions/ml2021spring-hw2/prepare_val.py +135 -0
- mlebench/competitions/mlsp-2013-birds/grade.py +11 -0
- mlebench/competitions/mlsp-2013-birds/prepare.py +182 -0
- mlebench/competitions/mlsp-2013-birds/prepare_val.py +241 -0
- mlebench/competitions/movie-review-sentiment-analysis-kernels-only/grade.py +11 -0
- mlebench/competitions/movie-review-sentiment-analysis-kernels-only/prepare.py +58 -0
- mlebench/competitions/movie-review-sentiment-analysis-kernels-only/prepare_val.py +120 -0
- mlebench/competitions/multi-modal-gesture-recognition/grade.py +58 -0
- mlebench/competitions/multi-modal-gesture-recognition/prepare.py +85 -0
- mlebench/competitions/multi-modal-gesture-recognition/prepare_val.py +139 -0
- mlebench/competitions/my-custom-task-01/prepare.py +2 -0
- mlebench/competitions/new-my-task-01/prepare.py +2 -0
- mlebench/competitions/new-my-task-03/grade.py +107 -0
- mlebench/competitions/new-my-task-03/prepare.py +2 -0
- mlebench/competitions/new-york-city-taxi-fare-prediction/grade.py +28 -0
- mlebench/competitions/new-york-city-taxi-fare-prediction/prepare.py +44 -0
- mlebench/competitions/new-york-city-taxi-fare-prediction/prepare_val.py +89 -0
- mlebench/competitions/nfl-player-contact-detection/grade.py +36 -0
- mlebench/competitions/nfl-player-contact-detection/prepare.py +101 -0
- mlebench/competitions/nfl-player-contact-detection/prepare_val.py +186 -0
- mlebench/competitions/nomad2018-predict-transparent-conductors/grade.py +47 -0
- mlebench/competitions/nomad2018-predict-transparent-conductors/prepare.py +77 -0
- mlebench/competitions/nomad2018-predict-transparent-conductors/prepare_val.py +144 -0
- mlebench/competitions/osic-pulmonary-fibrosis-progression/grade.py +74 -0
- mlebench/competitions/osic-pulmonary-fibrosis-progression/prepare.py +95 -0
- mlebench/competitions/osic-pulmonary-fibrosis-progression/prepare_val.py +167 -0
- mlebench/competitions/paddy-disease-classification/grade.py +35 -0
- mlebench/competitions/paddy-disease-classification/prepare.py +69 -0
- mlebench/competitions/paddy-disease-classification/prepare_val.py +122 -0
- mlebench/competitions/petfinder-pawpularity-score/grade.py +41 -0
- mlebench/competitions/petfinder-pawpularity-score/prepare.py +76 -0
- mlebench/competitions/petfinder-pawpularity-score/prepare_val.py +154 -0
- mlebench/competitions/plant-pathology-2020-fgvc7/grade.py +41 -0
- mlebench/competitions/plant-pathology-2020-fgvc7/prepare.py +74 -0
- mlebench/competitions/plant-pathology-2020-fgvc7/prepare_val.py +160 -0
- mlebench/competitions/plant-pathology-2021-fgvc8/grade.py +54 -0
- mlebench/competitions/plant-pathology-2021-fgvc8/prepare.py +65 -0
- mlebench/competitions/plant-pathology-2021-fgvc8/prepare_val.py +130 -0
- mlebench/competitions/plant-seedlings-classification/grade.py +39 -0
- mlebench/competitions/plant-seedlings-classification/prepare.py +91 -0
- mlebench/competitions/plant-seedlings-classification/prepare_val.py +158 -0
- mlebench/competitions/playground-series-s3e1/__init__.py +0 -0
- mlebench/competitions/playground-series-s3e1/grade.py +52 -0
- mlebench/competitions/playground-series-s3e1/prepare.py +25 -0
- mlebench/competitions/playground-series-s3e11/__init__.py +0 -0
- mlebench/competitions/playground-series-s3e11/grade.py +55 -0
- mlebench/competitions/playground-series-s3e11/prepare.py +25 -0
- mlebench/competitions/playground-series-s3e18/grade.py +39 -0
- mlebench/competitions/playground-series-s3e18/prepare.py +36 -0
- mlebench/competitions/playground-series-s3e18/prepare_val.py +89 -0
- mlebench/competitions/playground_series_s3e1/__init__.py +0 -0
- mlebench/competitions/playground_series_s3e1/grade.py +52 -0
- mlebench/competitions/playground_series_s3e1/prepare.py +25 -0
- mlebench/competitions/playground_series_s3e11/__init__.py +0 -0
- mlebench/competitions/playground_series_s3e11/grade.py +55 -0
- mlebench/competitions/playground_series_s3e11/prepare.py +25 -0
- mlebench/competitions/predict-volcanic-eruptions-ingv-oe/grade.py +44 -0
- mlebench/competitions/predict-volcanic-eruptions-ingv-oe/prepare.py +68 -0
- mlebench/competitions/predict-volcanic-eruptions-ingv-oe/prepare_val.py +146 -0
- mlebench/competitions/random-acts-of-pizza/grade.py +14 -0
- mlebench/competitions/random-acts-of-pizza/prepare.py +80 -0
- mlebench/competitions/random-acts-of-pizza/prepare_val.py +144 -0
- mlebench/competitions/ranzcr-clip-catheter-line-classification/classes.py +11 -0
- mlebench/competitions/ranzcr-clip-catheter-line-classification/grade.py +31 -0
- mlebench/competitions/ranzcr-clip-catheter-line-classification/prepare.py +53 -0
- mlebench/competitions/ranzcr-clip-catheter-line-classification/prepare_val.py +113 -0
- mlebench/competitions/rsna-2022-cervical-spine-fracture-detection/grade.py +124 -0
- mlebench/competitions/rsna-2022-cervical-spine-fracture-detection/prepare.py +219 -0
- mlebench/competitions/rsna-2022-cervical-spine-fracture-detection/prepare_val.py +257 -0
- mlebench/competitions/rsna-breast-cancer-detection/grade.py +65 -0
- mlebench/competitions/rsna-breast-cancer-detection/prepare.py +141 -0
- mlebench/competitions/rsna-breast-cancer-detection/prepare_val.py +201 -0
- mlebench/competitions/rsna-miccai-brain-tumor-radiogenomic-classification/grade.py +13 -0
- mlebench/competitions/rsna-miccai-brain-tumor-radiogenomic-classification/prepare.py +47 -0
- mlebench/competitions/rsna-miccai-brain-tumor-radiogenomic-classification/prepare_val.py +97 -0
- mlebench/competitions/santander-customer-satisfaction/grade.py +10 -0
- mlebench/competitions/santander-customer-satisfaction/prepare.py +41 -0
- mlebench/competitions/sciencebench-001-clintox-nn/__init__.py +0 -0
- mlebench/competitions/sciencebench-001-clintox-nn/grade.py +56 -0
- mlebench/competitions/sciencebench-001-clintox-nn/prepare.py +75 -0
- mlebench/competitions/sciencebench-015-aai/grade.py +37 -0
- mlebench/competitions/sciencebench-015-aai/prepare.py +102 -0
- mlebench/competitions/sciencebench-051-brain-blood-qsar/grade.py +58 -0
- mlebench/competitions/sciencebench-051-brain-blood-qsar/prepare.py +69 -0
- mlebench/competitions/sciencebench-101-experimental-band-gap-prediction/grade.py +55 -0
- mlebench/competitions/sciencebench-101-experimental-band-gap-prediction/prepare.py +88 -0
- mlebench/competitions/see-click-predict-fix/__init__.py +0 -0
- mlebench/competitions/see-click-predict-fix/grade.py +66 -0
- mlebench/competitions/see-click-predict-fix/prepare.py +25 -0
- mlebench/competitions/see_click_predict_fix/__init__.py +0 -0
- mlebench/competitions/see_click_predict_fix/grade.py +66 -0
- mlebench/competitions/see_click_predict_fix/prepare.py +25 -0
- mlebench/competitions/seti-breakthrough-listen/grade.py +11 -0
- mlebench/competitions/seti-breakthrough-listen/prepare.py +71 -0
- mlebench/competitions/seti-breakthrough-listen/prepare_val.py +159 -0
- mlebench/competitions/siim-covid19-detection/grade.py +194 -0
- mlebench/competitions/siim-covid19-detection/prepare.py +123 -0
- mlebench/competitions/siim-covid19-detection/prepare_val.py +164 -0
- mlebench/competitions/siim-isic-melanoma-classification/grade.py +11 -0
- mlebench/competitions/siim-isic-melanoma-classification/prepare.py +127 -0
- mlebench/competitions/siim-isic-melanoma-classification/prepare_val.py +158 -0
- mlebench/competitions/smartphone-decimeter-2022/grade.py +55 -0
- mlebench/competitions/smartphone-decimeter-2022/notebook.py +86 -0
- mlebench/competitions/smartphone-decimeter-2022/prepare.py +143 -0
- mlebench/competitions/smartphone-decimeter-2022/prepare_val.py +199 -0
- mlebench/competitions/spaceship-titanic/grade.py +11 -0
- mlebench/competitions/spaceship-titanic/prepare.py +23 -0
- mlebench/competitions/spaceship-titanic/prepare_val.py +61 -0
- mlebench/competitions/spooky-author-identification/classes.py +1 -0
- mlebench/competitions/spooky-author-identification/grade.py +38 -0
- mlebench/competitions/spooky-author-identification/prepare.py +40 -0
- mlebench/competitions/spooky-author-identification/prepare_val.py +78 -0
- mlebench/competitions/stanford-covid-vaccine/grade.py +65 -0
- mlebench/competitions/stanford-covid-vaccine/prepare.py +129 -0
- mlebench/competitions/stanford-covid-vaccine/prepare_val.py +199 -0
- mlebench/competitions/statoil-iceberg-classifier-challenge/grade.py +41 -0
- mlebench/competitions/statoil-iceberg-classifier-challenge/prepare.py +105 -0
- mlebench/competitions/statoil-iceberg-classifier-challenge/prepare_val.py +157 -0
- mlebench/competitions/tabular-playground-series-dec-2021/grade.py +11 -0
- mlebench/competitions/tabular-playground-series-dec-2021/prepare.py +39 -0
- mlebench/competitions/tabular-playground-series-dec-2021/prepare_val.py +99 -0
- mlebench/competitions/tabular-playground-series-may-2022/grade.py +9 -0
- mlebench/competitions/tabular-playground-series-may-2022/prepare.py +56 -0
- mlebench/competitions/tabular-playground-series-may-2022/prepare_val.py +116 -0
- mlebench/competitions/tensorflow-speech-recognition-challenge/grade.py +11 -0
- mlebench/competitions/tensorflow-speech-recognition-challenge/prepare.py +90 -0
- mlebench/competitions/tensorflow-speech-recognition-challenge/prepare_val.py +148 -0
- mlebench/competitions/tensorflow2-question-answering/grade.py +122 -0
- mlebench/competitions/tensorflow2-question-answering/prepare.py +122 -0
- mlebench/competitions/tensorflow2-question-answering/prepare_val.py +187 -0
- mlebench/competitions/text-normalization-challenge-english-language/grade.py +49 -0
- mlebench/competitions/text-normalization-challenge-english-language/prepare.py +115 -0
- mlebench/competitions/text-normalization-challenge-english-language/prepare_val.py +213 -0
- mlebench/competitions/text-normalization-challenge-russian-language/grade.py +49 -0
- mlebench/competitions/text-normalization-challenge-russian-language/prepare.py +113 -0
- mlebench/competitions/text-normalization-challenge-russian-language/prepare_val.py +165 -0
- mlebench/competitions/tgs-salt-identification-challenge/grade.py +144 -0
- mlebench/competitions/tgs-salt-identification-challenge/prepare.py +158 -0
- mlebench/competitions/tgs-salt-identification-challenge/prepare_val.py +166 -0
- mlebench/competitions/the-icml-2013-whale-challenge-right-whale-redux/grade.py +11 -0
- mlebench/competitions/the-icml-2013-whale-challenge-right-whale-redux/prepare.py +95 -0
- mlebench/competitions/the-icml-2013-whale-challenge-right-whale-redux/prepare_val.py +141 -0
- mlebench/competitions/tmdb-box-office-prediction/__init__.py +0 -0
- mlebench/competitions/tmdb-box-office-prediction/grade.py +55 -0
- mlebench/competitions/tmdb-box-office-prediction/prepare.py +35 -0
- mlebench/competitions/tweet-sentiment-extraction/grade.py +67 -0
- mlebench/competitions/tweet-sentiment-extraction/prepare.py +36 -0
- mlebench/competitions/tweet-sentiment-extraction/prepare_val.py +106 -0
- mlebench/competitions/us-patent-phrase-to-phrase-matching/grade.py +31 -0
- mlebench/competitions/us-patent-phrase-to-phrase-matching/prepare.py +33 -0
- mlebench/competitions/us-patent-phrase-to-phrase-matching/prepare_val.py +71 -0
- mlebench/competitions/utils.py +266 -0
- mlebench/competitions/uw-madison-gi-tract-image-segmentation/grade.py +158 -0
- mlebench/competitions/uw-madison-gi-tract-image-segmentation/prepare.py +139 -0
- mlebench/competitions/uw-madison-gi-tract-image-segmentation/prepare_val.py +193 -0
- mlebench/competitions/ventilator-pressure-prediction/__init__.py +0 -0
- mlebench/competitions/ventilator-pressure-prediction/grade.py +52 -0
- mlebench/competitions/ventilator-pressure-prediction/prepare.py +27 -0
- mlebench/competitions/ventilator-pressure-prediction/prepare_val.py +142 -0
- mlebench/competitions/ventilator_pressure_prediction/__init__.py +0 -0
- mlebench/competitions/ventilator_pressure_prediction/grade.py +52 -0
- mlebench/competitions/ventilator_pressure_prediction/prepare.py +27 -0
- mlebench/competitions/vesuvius-challenge-ink-detection/grade.py +97 -0
- mlebench/competitions/vesuvius-challenge-ink-detection/prepare.py +122 -0
- mlebench/competitions/vesuvius-challenge-ink-detection/prepare_val.py +170 -0
- mlebench/competitions/vinbigdata-chest-xray-abnormalities-detection/grade.py +220 -0
- mlebench/competitions/vinbigdata-chest-xray-abnormalities-detection/prepare.py +129 -0
- mlebench/competitions/vinbigdata-chest-xray-abnormalities-detection/prepare_val.py +204 -0
- mlebench/competitions/whale-categorization-playground/grade.py +41 -0
- mlebench/competitions/whale-categorization-playground/prepare.py +103 -0
- mlebench/competitions/whale-categorization-playground/prepare_val.py +196 -0
- mlebench/data.py +420 -0
- mlebench/grade.py +209 -0
- mlebench/grade_helpers.py +235 -0
- mlebench/metrics.py +75 -0
- mlebench/registry.py +332 -0
- mlebench/utils.py +346 -0
- {dslighting-1.7.1.dist-info → dslighting-1.7.6.dist-info}/WHEEL +0 -0
- {dslighting-1.7.1.dist-info → dslighting-1.7.6.dist-info}/entry_points.txt +0 -0
|
@@ -0,0 +1,166 @@
|
|
|
1
|
+
import shutil
|
|
2
|
+
from pathlib import Path
|
|
3
|
+
|
|
4
|
+
import pandas as pd
|
|
5
|
+
from sklearn.model_selection import train_test_split
|
|
6
|
+
|
|
7
|
+
from mlebench.utils import extract, read_csv
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def _process_split(
|
|
11
|
+
train_df: pd.DataFrame,
|
|
12
|
+
test_df: pd.DataFrame,
|
|
13
|
+
public_dir: Path,
|
|
14
|
+
private_dir: Path,
|
|
15
|
+
raw_images_dir: Path,
|
|
16
|
+
raw_masks_dir: Path,
|
|
17
|
+
all_depths_df: pd.DataFrame,
|
|
18
|
+
) -> None:
|
|
19
|
+
"""
|
|
20
|
+
Processes a single data split (e.g., train/test or train_val/test_val),
|
|
21
|
+
saving all required files and performing sanity checks.
|
|
22
|
+
|
|
23
|
+
Args:
|
|
24
|
+
train_df: DataFrame for the training set.
|
|
25
|
+
test_df: DataFrame for the test set.
|
|
26
|
+
public_dir: The public output directory for this split.
|
|
27
|
+
private_dir: The private output directory for this split.
|
|
28
|
+
raw_images_dir: Path to the directory containing all source images.
|
|
29
|
+
raw_masks_dir: Path to the directory containing all source masks.
|
|
30
|
+
all_depths_df: DataFrame containing depth information for all samples.
|
|
31
|
+
"""
|
|
32
|
+
public_dir.mkdir(parents=True, exist_ok=True)
|
|
33
|
+
private_dir.mkdir(parents=True, exist_ok=True)
|
|
34
|
+
|
|
35
|
+
train_ids = set(train_df["id"])
|
|
36
|
+
test_ids = set(test_df["id"])
|
|
37
|
+
|
|
38
|
+
assert train_ids.isdisjoint(test_ids), "`id` is not disjoint between train and test sets"
|
|
39
|
+
|
|
40
|
+
train_df.sort_values(by="id").to_csv(public_dir / "train.csv", index=False)
|
|
41
|
+
test_df.sort_values(by="id").to_csv(private_dir / "test.csv", index=False)
|
|
42
|
+
|
|
43
|
+
all_source_images = set(raw_images_dir.glob("*.png"))
|
|
44
|
+
train_imgs = set(img for img in all_source_images if img.stem in train_ids)
|
|
45
|
+
test_imgs = set(img for img in all_source_images if img.stem in test_ids)
|
|
46
|
+
|
|
47
|
+
assert train_imgs.isdisjoint(test_imgs), "Images are not disjoint between train and test sets"
|
|
48
|
+
|
|
49
|
+
(public_dir / "train" / "images").mkdir(parents=True, exist_ok=True)
|
|
50
|
+
(public_dir / "train" / "masks").mkdir(parents=True, exist_ok=True)
|
|
51
|
+
|
|
52
|
+
for fpath in train_imgs:
|
|
53
|
+
shutil.copyfile(src=fpath, dst=public_dir / "train" / "images" / fpath.name)
|
|
54
|
+
shutil.copyfile(src=raw_masks_dir / fpath.name, dst=public_dir / "train" / "masks" / fpath.name)
|
|
55
|
+
|
|
56
|
+
(public_dir / "test" / "images").mkdir(parents=True, exist_ok=True)
|
|
57
|
+
|
|
58
|
+
for fpath in test_imgs:
|
|
59
|
+
shutil.copyfile(src=fpath, dst=public_dir / "test" / "images" / fpath.name)
|
|
60
|
+
|
|
61
|
+
sample_submission = test_df.drop(columns=["rle_mask"]).copy()
|
|
62
|
+
sample_submission["rle_mask"] = "1 1"
|
|
63
|
+
sample_submission.sort_values(by="id").to_csv(public_dir / "sample_submission.csv", index=False)
|
|
64
|
+
|
|
65
|
+
depths_mask = all_depths_df["id"].isin(train_ids)
|
|
66
|
+
filtered_depths = all_depths_df[depths_mask]
|
|
67
|
+
filtered_depths.sort_values(by="id").to_csv(public_dir / "depths.csv", index=False)
|
|
68
|
+
|
|
69
|
+
# Sanity checks
|
|
70
|
+
assert (public_dir / "train.csv").exists(), "`train.csv` doesn't exist!"
|
|
71
|
+
assert (public_dir / "sample_submission.csv").exists(), "`sample_submission.csv` doesn't exist!"
|
|
72
|
+
assert (public_dir / "depths.csv").exists(), "`depths.csv` doesn't exist!"
|
|
73
|
+
assert (public_dir / "train").exists(), "`train` directory doesn't exist!"
|
|
74
|
+
assert (public_dir / "test").exists(), "`test` directory doesn't exist!"
|
|
75
|
+
assert (private_dir / "test.csv").exists(), "`test.csv` doesn't exist!"
|
|
76
|
+
|
|
77
|
+
actual_train_imgs = set(img.stem for img in (public_dir / "train" / "images").glob("*.png"))
|
|
78
|
+
actual_train_masks = set(img.stem for img in (public_dir / "train" / "masks").glob("*.png"))
|
|
79
|
+
|
|
80
|
+
assert len(actual_train_imgs) == len(train_df), "The number of images in the train set doesn't match!"
|
|
81
|
+
assert len(actual_train_masks) == len(train_df), "The number of masks in the train set doesn't match!"
|
|
82
|
+
|
|
83
|
+
for train_id in train_df["id"]:
|
|
84
|
+
assert (public_dir / "train" / "images" / f"{train_id}.png").exists()
|
|
85
|
+
assert (public_dir / "train" / "masks" / f"{train_id}.png").exists()
|
|
86
|
+
|
|
87
|
+
actual_test_imgs = set(img.stem for img in (public_dir / "test" / "images").glob("*.png"))
|
|
88
|
+
|
|
89
|
+
assert not (public_dir / "test" / "masks").exists(), f"Expected `{public_dir}/test/masks` to not exist, but it does!"
|
|
90
|
+
assert len(actual_test_imgs) == len(test_df), "The number of images in the test set doesn't match!"
|
|
91
|
+
|
|
92
|
+
for test_id in test_df["id"]:
|
|
93
|
+
assert (public_dir / "test" / "images" / f"{test_id}.png").exists()
|
|
94
|
+
assert not (public_dir / "test" / "masks" / f"{test_id}.png").exists()
|
|
95
|
+
|
|
96
|
+
assert actual_train_imgs.isdisjoint(actual_test_imgs), "Image sets overlap!"
|
|
97
|
+
|
|
98
|
+
actual_sample_submission = read_csv(public_dir / "sample_submission.csv")
|
|
99
|
+
actual_test = read_csv(private_dir / "test.csv")
|
|
100
|
+
|
|
101
|
+
assert len(actual_sample_submission) == len(actual_test), "Sample submission and test set lengths differ!"
|
|
102
|
+
assert set(actual_sample_submission["id"]) == set(actual_test["id"]), "Sample submission and test set IDs differ!"
|
|
103
|
+
assert len(actual_test_imgs) == len(actual_test), "Test image count and test set length differ!"
|
|
104
|
+
assert set(actual_test["id"]) == actual_test_imgs, "Test set IDs and test images differ!"
|
|
105
|
+
|
|
106
|
+
|
|
107
|
+
def prepare(raw: Path, public: Path, private: Path) -> None:
|
|
108
|
+
extract(raw / "competition_data.zip", raw)
|
|
109
|
+
|
|
110
|
+
old_train = read_csv(raw / "competition_data" / "train.csv")
|
|
111
|
+
old_train = old_train.fillna("")
|
|
112
|
+
old_depths = read_csv(raw / "depths.csv")
|
|
113
|
+
|
|
114
|
+
# Original ratio is Train set - 4,000 samples; Test set - ~18,000 samples (82% ratio)
|
|
115
|
+
# We use a 0.25 ratio to get number of test samples into thousand OOM
|
|
116
|
+
new_train, new_test = train_test_split(old_train, test_size=0.25, random_state=0)
|
|
117
|
+
|
|
118
|
+
assert len(new_train) + len(new_test) == len(
|
|
119
|
+
old_train
|
|
120
|
+
), "Some samples were lost when creating the new train and test sets!"
|
|
121
|
+
|
|
122
|
+
# Create the new validation split from the `new_train` set.
|
|
123
|
+
# To make `test_val` have the same size as `new_test` (25% of original),
|
|
124
|
+
# we need to take 1/3 of `new_train` (since 1/3 * 75% = 25%).
|
|
125
|
+
train_val, test_val = train_test_split(new_train, test_size=(1/3), random_state=0)
|
|
126
|
+
|
|
127
|
+
assert len(train_val) + len(test_val) == len(
|
|
128
|
+
new_train
|
|
129
|
+
), "Some samples were lost when creating the validation train and test sets!"
|
|
130
|
+
|
|
131
|
+
# Define paths for raw images and new validation output directories
|
|
132
|
+
raw_images_dir = raw / "competition_data" / "train" / "images"
|
|
133
|
+
raw_masks_dir = raw / "competition_data" / "train" / "masks"
|
|
134
|
+
public_val = public.parent / "public_val"
|
|
135
|
+
private_val = private.parent / "private_val"
|
|
136
|
+
|
|
137
|
+
# Process the original split, saving to `public` and `private`
|
|
138
|
+
# This block ensures the original outputs are not modified.
|
|
139
|
+
_process_split(
|
|
140
|
+
train_df=new_train,
|
|
141
|
+
test_df=new_test,
|
|
142
|
+
public_dir=public,
|
|
143
|
+
private_dir=private,
|
|
144
|
+
raw_images_dir=raw_images_dir,
|
|
145
|
+
raw_masks_dir=raw_masks_dir,
|
|
146
|
+
all_depths_df=old_depths,
|
|
147
|
+
)
|
|
148
|
+
|
|
149
|
+
# Process the new validation split, saving to `public_val` and `private_val`
|
|
150
|
+
_process_split(
|
|
151
|
+
train_df=train_val,
|
|
152
|
+
test_df=test_val,
|
|
153
|
+
public_dir=public_val,
|
|
154
|
+
private_dir=private_val,
|
|
155
|
+
raw_images_dir=raw_images_dir,
|
|
156
|
+
raw_masks_dir=raw_masks_dir,
|
|
157
|
+
all_depths_df=old_depths,
|
|
158
|
+
)
|
|
159
|
+
|
|
160
|
+
# Final checks on data types, which are consistent across all splits.
|
|
161
|
+
assert new_train.applymap(
|
|
162
|
+
lambda x: isinstance(x, str)
|
|
163
|
+
).values.all(), "Not all elements in the DataFrame are strings!"
|
|
164
|
+
assert new_test.applymap(
|
|
165
|
+
lambda x: isinstance(x, str)
|
|
166
|
+
).values.all(), "Not all elements in the DataFrame are strings!"
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
import pandas as pd
|
|
2
|
+
from sklearn.metrics import roc_auc_score
|
|
3
|
+
|
|
4
|
+
from mlebench.competitions.utils import prepare_for_auroc_metric
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
def grade(submission: pd.DataFrame, answers: pd.DataFrame) -> float:
|
|
8
|
+
roc_auc_inputs = prepare_for_auroc_metric(
|
|
9
|
+
submission, answers, id_col="clip", target_col="probability"
|
|
10
|
+
)
|
|
11
|
+
return roc_auc_score(y_true=roc_auc_inputs["y_true"], y_score=roc_auc_inputs["y_score"])
|
|
@@ -0,0 +1,95 @@
|
|
|
1
|
+
import re
|
|
2
|
+
import shutil
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
|
|
5
|
+
import pandas as pd
|
|
6
|
+
from tqdm import tqdm
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def prepare(raw: Path, public: Path, private: Path):
|
|
10
|
+
"""
|
|
11
|
+
Splits the data in raw into public and private datasets with appropriate test/train splits.
|
|
12
|
+
"""
|
|
13
|
+
# Data is in train2.zip - we need to unzip it
|
|
14
|
+
shutil.unpack_archive(raw / "train2.zip", raw)
|
|
15
|
+
|
|
16
|
+
# Files are named as
|
|
17
|
+
# Train: "YYYYMMDD_HHMMSS_{seconds}_TRAIN{idx}_{label:0,1}.aif"
|
|
18
|
+
# Test: "YYYYMMDD_HHMMSS_{seconds}_Test{idx}.aif"
|
|
19
|
+
|
|
20
|
+
# There are 4 days in Train and 3 days in Test
|
|
21
|
+
# In our new dataset, we'll just split Train_old into 2 days for Train and 2 days for Test
|
|
22
|
+
|
|
23
|
+
samples_by_date = {}
|
|
24
|
+
n_train_old = 0
|
|
25
|
+
for sample in (raw / "train2").iterdir():
|
|
26
|
+
date = sample.name.split("_")[0]
|
|
27
|
+
if date not in samples_by_date:
|
|
28
|
+
samples_by_date[date] = []
|
|
29
|
+
samples_by_date[date].append(sample)
|
|
30
|
+
n_train_old += 1
|
|
31
|
+
|
|
32
|
+
assert len(samples_by_date) == 4, "Expected 4 days in Train_old"
|
|
33
|
+
dates = sorted(list(samples_by_date.keys()))
|
|
34
|
+
new_train = samples_by_date[dates[0]] + samples_by_date[dates[1]]
|
|
35
|
+
new_test = samples_by_date[dates[2]] + samples_by_date[dates[3]]
|
|
36
|
+
# Sort files - filenames have timestamps so we want new idxs to be time-ordered
|
|
37
|
+
new_train = sorted(new_train)
|
|
38
|
+
new_test = sorted(new_test)
|
|
39
|
+
|
|
40
|
+
# Copy files to new directories
|
|
41
|
+
(public / "train2").mkdir(exist_ok=True, parents=True)
|
|
42
|
+
for idx, sample in enumerate(tqdm(new_train)):
|
|
43
|
+
# Replace index part of filename with new index
|
|
44
|
+
new_sample_name = re.sub(r"TRAIN\d+", f"TRAIN{idx}", sample.name)
|
|
45
|
+
new_sample = public / "train2" / new_sample_name
|
|
46
|
+
shutil.copy(sample, new_sample)
|
|
47
|
+
|
|
48
|
+
answer_rows = [] # While we're at it, collect answers for the new test set
|
|
49
|
+
(public / "test2").mkdir(exist_ok=True, parents=True)
|
|
50
|
+
for idx, sample in enumerate(tqdm(new_test)):
|
|
51
|
+
# Replace everything after the TRAIN{idx} part of the filename
|
|
52
|
+
# (replaces index as well as label part of filename)
|
|
53
|
+
new_sample_name = sample.name.split("TRAIN")[0] + f"Test{idx}.aif"
|
|
54
|
+
new_sample = public / "test2" / new_sample_name
|
|
55
|
+
shutil.copy(sample, new_sample)
|
|
56
|
+
|
|
57
|
+
# Add to new test set answers
|
|
58
|
+
answer_rows.append(
|
|
59
|
+
{"clip": new_sample_name, "probability": 1 if sample.stem.endswith("_1") else 0}
|
|
60
|
+
)
|
|
61
|
+
|
|
62
|
+
assert len(new_train) == len(
|
|
63
|
+
list((public / "train2").glob("*.aif"))
|
|
64
|
+
), f"Expected {len(new_train)} samples in new_train ({len(list((public / 'train2').glob('*.aif')))}"
|
|
65
|
+
assert len(new_test) == len(
|
|
66
|
+
list((public / "test2").glob("*.aif"))
|
|
67
|
+
), f"Expected {len(new_test)} samples in new_test ({len(list((public / 'test2').glob('*.aif')))}"
|
|
68
|
+
assert (
|
|
69
|
+
len(new_train) + len(new_test) == n_train_old
|
|
70
|
+
), f"Expected {n_train_old} total samples in new_train ({len(new_train)}) and new_test ({len(new_test)})"
|
|
71
|
+
|
|
72
|
+
# Make zipped versions
|
|
73
|
+
shutil.make_archive(public / "train2", "zip", public, "train2")
|
|
74
|
+
shutil.make_archive(public / "test2", "zip", public, "test2")
|
|
75
|
+
# Remove unzipped directories (original comp doesn't have these)
|
|
76
|
+
shutil.rmtree(public / "train2")
|
|
77
|
+
shutil.rmtree(public / "test2")
|
|
78
|
+
# we also don't need the raw dirs anymore
|
|
79
|
+
shutil.rmtree(raw / "train2")
|
|
80
|
+
|
|
81
|
+
# Create answers
|
|
82
|
+
answers_df = pd.DataFrame(answer_rows)
|
|
83
|
+
answers_df.to_csv(private / "test.csv", index=False)
|
|
84
|
+
|
|
85
|
+
# Create sample submission
|
|
86
|
+
sample_submission = answers_df.copy()
|
|
87
|
+
sample_submission["probability"] = 0
|
|
88
|
+
sample_submission.to_csv(public / "sampleSubmission.csv", index=False)
|
|
89
|
+
|
|
90
|
+
assert set(answers_df.columns) == set(
|
|
91
|
+
["clip", "probability"]
|
|
92
|
+
), "Answers must have 'clip' and 'probability' columns"
|
|
93
|
+
assert set(sample_submission.columns) == set(
|
|
94
|
+
["clip", "probability"]
|
|
95
|
+
), "Sample submission must have 'clip' and 'probability' columns"
|
|
@@ -0,0 +1,141 @@
|
|
|
1
|
+
import re
|
|
2
|
+
import shutil
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
from typing import List
|
|
5
|
+
|
|
6
|
+
import pandas as pd
|
|
7
|
+
from tqdm import tqdm
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def _create_split(
|
|
11
|
+
train_files: List[Path],
|
|
12
|
+
test_files: List[Path],
|
|
13
|
+
public_path: Path,
|
|
14
|
+
private_path: Path,
|
|
15
|
+
):
|
|
16
|
+
"""
|
|
17
|
+
Helper function to process and save a single train/test split.
|
|
18
|
+
|
|
19
|
+
This function handles file copying, renaming, zipping, and the creation of
|
|
20
|
+
answer and sample submission files for a given set of train/test files.
|
|
21
|
+
"""
|
|
22
|
+
# Create output directories if they don't exist
|
|
23
|
+
public_path.mkdir(exist_ok=True, parents=True)
|
|
24
|
+
private_path.mkdir(exist_ok=True, parents=True)
|
|
25
|
+
|
|
26
|
+
# Process and copy train files
|
|
27
|
+
train_output_dir = public_path / "train2"
|
|
28
|
+
train_output_dir.mkdir(exist_ok=True)
|
|
29
|
+
# Sort files to ensure deterministic indexing
|
|
30
|
+
sorted_train_files = sorted(train_files)
|
|
31
|
+
for idx, sample in enumerate(
|
|
32
|
+
tqdm(sorted_train_files, desc=f"Creating train set for {public_path.name}")
|
|
33
|
+
):
|
|
34
|
+
new_sample_name = re.sub(r"TRAIN\d+", f"TRAIN{idx}", sample.name)
|
|
35
|
+
new_sample = train_output_dir / new_sample_name
|
|
36
|
+
shutil.copy(sample, new_sample)
|
|
37
|
+
|
|
38
|
+
# Process and copy test files, collecting answers
|
|
39
|
+
answer_rows = []
|
|
40
|
+
test_output_dir = public_path / "test2"
|
|
41
|
+
test_output_dir.mkdir(exist_ok=True)
|
|
42
|
+
# Sort files to ensure deterministic indexing
|
|
43
|
+
sorted_test_files = sorted(test_files)
|
|
44
|
+
for idx, sample in enumerate(
|
|
45
|
+
tqdm(sorted_test_files, desc=f"Creating test set for {public_path.name}")
|
|
46
|
+
):
|
|
47
|
+
new_sample_name = sample.name.split("TRAIN")[0] + f"Test{idx}.aif"
|
|
48
|
+
new_sample = test_output_dir / new_sample_name
|
|
49
|
+
shutil.copy(sample, new_sample)
|
|
50
|
+
answer_rows.append(
|
|
51
|
+
{"clip": new_sample_name, "probability": 1 if sample.stem.endswith("_1") else 0}
|
|
52
|
+
)
|
|
53
|
+
|
|
54
|
+
# Assertions to verify file counts
|
|
55
|
+
assert len(sorted_train_files) == len(list(train_output_dir.glob("*.aif")))
|
|
56
|
+
assert len(sorted_test_files) == len(list(test_output_dir.glob("*.aif")))
|
|
57
|
+
|
|
58
|
+
# Create zipped versions and remove temporary unzipped directories
|
|
59
|
+
shutil.make_archive(public_path / "train2", "zip", public_path, "train2")
|
|
60
|
+
shutil.make_archive(public_path / "test2", "zip", public_path, "test2")
|
|
61
|
+
shutil.rmtree(train_output_dir)
|
|
62
|
+
shutil.rmtree(test_output_dir)
|
|
63
|
+
|
|
64
|
+
# Create answer file
|
|
65
|
+
answers_df = pd.DataFrame(answer_rows)
|
|
66
|
+
answers_df.to_csv(private_path / "test.csv", index=False)
|
|
67
|
+
assert set(answers_df.columns) == set(["clip", "probability"])
|
|
68
|
+
|
|
69
|
+
# Create sample submission file
|
|
70
|
+
sample_submission = answers_df.copy()
|
|
71
|
+
sample_submission["probability"] = 0
|
|
72
|
+
sample_submission.to_csv(public_path / "sampleSubmission.csv", index=False)
|
|
73
|
+
assert set(sample_submission.columns) == set(["clip", "probability"])
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
def prepare(raw: Path, public: Path, private: Path):
|
|
77
|
+
"""
|
|
78
|
+
Splits the data in raw into public and private datasets with appropriate test/train splits.
|
|
79
|
+
Also creates a secondary validation split in public_val/private_val directories.
|
|
80
|
+
"""
|
|
81
|
+
# Data is in train2.zip - we need to unzip it
|
|
82
|
+
shutil.unpack_archive(raw / "train2.zip", raw)
|
|
83
|
+
|
|
84
|
+
# Files are named as
|
|
85
|
+
# Train: "YYYYMMDD_HHMMSS_{seconds}_TRAIN{idx}_{label:0,1}.aif"
|
|
86
|
+
# Test: "YYYYMMDD_HHMMSS_{seconds}_Test{idx}.aif"
|
|
87
|
+
|
|
88
|
+
# There are 4 days in Train and 3 days in Test
|
|
89
|
+
# In our new dataset, we'll just split Train_old into 2 days for Train and 2 days for Test
|
|
90
|
+
|
|
91
|
+
samples_by_date = {}
|
|
92
|
+
n_train_old = 0
|
|
93
|
+
for sample in (raw / "train2").iterdir():
|
|
94
|
+
date = sample.name.split("_")[0]
|
|
95
|
+
if date not in samples_by_date:
|
|
96
|
+
samples_by_date[date] = []
|
|
97
|
+
samples_by_date[date].append(sample)
|
|
98
|
+
n_train_old += 1
|
|
99
|
+
|
|
100
|
+
assert len(samples_by_date) == 4, "Expected 4 days in Train_old"
|
|
101
|
+
dates = sorted(list(samples_by_date.keys()))
|
|
102
|
+
|
|
103
|
+
# --- 1. Create the Original Split (public/private) ---
|
|
104
|
+
# This split uses the first two days for training and the last two days for testing.
|
|
105
|
+
# The outputs of this step must remain identical to the original script.
|
|
106
|
+
original_train_files = samples_by_date[dates[0]] + samples_by_date[dates[1]]
|
|
107
|
+
original_test_files = samples_by_date[dates[2]] + samples_by_date[dates[3]]
|
|
108
|
+
|
|
109
|
+
_create_split(
|
|
110
|
+
train_files=original_train_files,
|
|
111
|
+
test_files=original_test_files,
|
|
112
|
+
public_path=public,
|
|
113
|
+
private_path=private,
|
|
114
|
+
)
|
|
115
|
+
|
|
116
|
+
# --- 2. Create the New Validation Split (public_val/private_val) ---
|
|
117
|
+
# This second split takes the original *training* data (the first two days) and
|
|
118
|
+
# splits it again, using the same date-based logic. The first day becomes the
|
|
119
|
+
# new training set, and the second day becomes the new validation (test) set.
|
|
120
|
+
public_val = public.parent / "public_val"
|
|
121
|
+
private_val = private.parent / "private_val"
|
|
122
|
+
|
|
123
|
+
validation_train_files = samples_by_date[dates[0]]
|
|
124
|
+
validation_test_files = samples_by_date[dates[1]]
|
|
125
|
+
|
|
126
|
+
_create_split(
|
|
127
|
+
train_files=validation_train_files,
|
|
128
|
+
test_files=validation_test_files,
|
|
129
|
+
public_path=public_val,
|
|
130
|
+
private_path=private_val,
|
|
131
|
+
)
|
|
132
|
+
|
|
133
|
+
# Final cleanup of the raw unzipped directory after all processing is complete
|
|
134
|
+
shutil.rmtree(raw / "train2")
|
|
135
|
+
|
|
136
|
+
# Final top-level assertions from the original script
|
|
137
|
+
assert (
|
|
138
|
+
len(original_train_files) + len(original_test_files) == n_train_old
|
|
139
|
+
), f"Expected {n_train_old} total samples in new_train ({len(original_train_files)}) and new_test ({len(original_test_files)})"
|
|
140
|
+
assert (public / "sampleSubmission.csv").exists()
|
|
141
|
+
assert (private / "test.csv").exists()
|
|
File without changes
|
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
import pandas as pd
|
|
3
|
+
from sklearn.metrics import mean_squared_log_error
|
|
4
|
+
|
|
5
|
+
from mlebench.grade_helpers import InvalidSubmissionError
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def prepare_for_metric(submission: pd.DataFrame, answers: pd.DataFrame):
|
|
9
|
+
"""Prepare submission and answers for RMSLE calculation."""
|
|
10
|
+
if len(submission) != len(answers):
|
|
11
|
+
raise InvalidSubmissionError(
|
|
12
|
+
f"Submission length ({len(submission)}) != answers length ({len(answers)})"
|
|
13
|
+
)
|
|
14
|
+
|
|
15
|
+
if "id" not in submission.columns:
|
|
16
|
+
raise InvalidSubmissionError("Submission must have an 'id' column")
|
|
17
|
+
|
|
18
|
+
if "revenue" not in submission.columns:
|
|
19
|
+
raise InvalidSubmissionError("Submission must have a 'revenue' column")
|
|
20
|
+
|
|
21
|
+
# Sort by id
|
|
22
|
+
submission = submission.sort_values("id").reset_index(drop=True)
|
|
23
|
+
answers = answers.sort_values("id").reset_index(drop=True)
|
|
24
|
+
|
|
25
|
+
# Check id alignment
|
|
26
|
+
if not (submission["id"] == answers["id"]).all():
|
|
27
|
+
raise InvalidSubmissionError("Submission and answers id mismatch")
|
|
28
|
+
|
|
29
|
+
# Extract predictions and true values
|
|
30
|
+
y_pred = submission["revenue"].values
|
|
31
|
+
y_true = answers["revenue"].values
|
|
32
|
+
|
|
33
|
+
# Validate predictions
|
|
34
|
+
try:
|
|
35
|
+
y_pred = y_pred.astype(float)
|
|
36
|
+
except (ValueError, TypeError):
|
|
37
|
+
raise InvalidSubmissionError("Predictions must be numeric")
|
|
38
|
+
|
|
39
|
+
if np.any(np.isnan(y_pred)):
|
|
40
|
+
raise InvalidSubmissionError("Predictions cannot contain NaN values")
|
|
41
|
+
|
|
42
|
+
if np.any(y_pred < 0):
|
|
43
|
+
raise InvalidSubmissionError("Predictions cannot be negative")
|
|
44
|
+
|
|
45
|
+
return y_true, y_pred
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def grade(submission: pd.DataFrame, answers: pd.DataFrame) -> float:
|
|
49
|
+
"""Calculate RMSLE score."""
|
|
50
|
+
y_true, y_pred = prepare_for_metric(submission, answers)
|
|
51
|
+
|
|
52
|
+
# RMSLE = sqrt(MSLE)
|
|
53
|
+
rmsle = np.sqrt(mean_squared_log_error(y_true, y_pred))
|
|
54
|
+
|
|
55
|
+
return rmsle
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
from pathlib import Path
|
|
2
|
+
import pandas as pd
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
def prepare(raw: Path, public: Path, private: Path):
|
|
6
|
+
"""
|
|
7
|
+
Prepare tmdb-box-office-prediction dataset.
|
|
8
|
+
|
|
9
|
+
Raw data contains:
|
|
10
|
+
- train.csv: training data with movie features and revenue
|
|
11
|
+
- test.csv: test data with movie features only
|
|
12
|
+
- sample_submission.csv: sample submission format
|
|
13
|
+
- test_answer.csv: test revenue (private)
|
|
14
|
+
"""
|
|
15
|
+
# Read data
|
|
16
|
+
train = pd.read_csv(raw / "train.csv")
|
|
17
|
+
test = pd.read_csv(raw / "test.csv")
|
|
18
|
+
sample_submission = pd.read_csv(raw / "sample_submission.csv")
|
|
19
|
+
test_answer = pd.read_csv(raw / "test_answer.csv")
|
|
20
|
+
|
|
21
|
+
# Public files (visible to agents)
|
|
22
|
+
train.to_csv(public / "train.csv", index=False)
|
|
23
|
+
test.to_csv(public / "test.csv", index=False)
|
|
24
|
+
sample_submission.to_csv(public / "sample_submission.csv", index=False)
|
|
25
|
+
|
|
26
|
+
# Private files (for grading)
|
|
27
|
+
test_answer.to_csv(private / "test.csv", index=False)
|
|
28
|
+
|
|
29
|
+
# Validation checks
|
|
30
|
+
assert len(test_answer) == len(sample_submission), \
|
|
31
|
+
f"Test answer ({len(test_answer)}) and sample submission ({len(sample_submission)}) must have same length"
|
|
32
|
+
assert "id" in test_answer.columns, "Test answer must have 'id' column"
|
|
33
|
+
assert "revenue" in test_answer.columns, "Test answer must have 'revenue' column"
|
|
34
|
+
assert "id" in sample_submission.columns, "Sample submission must have 'id' column"
|
|
35
|
+
assert "revenue" in sample_submission.columns, "Sample submission must have 'revenue' column"
|
|
@@ -0,0 +1,67 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
import pandas as pd
|
|
3
|
+
|
|
4
|
+
from mlebench.grade_helpers import InvalidSubmissionError
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
def jaccard(str1: str, str2: str) -> float:
|
|
8
|
+
"""
|
|
9
|
+
Given by the competition page: https://www.kaggle.com/competitions/tweet-sentiment-extraction/overview
|
|
10
|
+
"""
|
|
11
|
+
a = set(str1.lower().split())
|
|
12
|
+
b = set(str2.lower().split())
|
|
13
|
+
c = a.intersection(b)
|
|
14
|
+
return float(len(c)) / (len(a) + len(b) - len(c))
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def mean_jaccard(preds: list[str], answers: list[str]) -> float:
|
|
18
|
+
return float(np.mean([jaccard(a, b) for a, b in zip(preds, answers)]))
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def prepare_for_metric(
|
|
22
|
+
submission: pd.DataFrame, answers: pd.DataFrame
|
|
23
|
+
) -> tuple[list[str], list[str]]:
|
|
24
|
+
"""
|
|
25
|
+
The submission and answers are DataFrames with the following columns:
|
|
26
|
+
```
|
|
27
|
+
textID,selected_text
|
|
28
|
+
2,"very good"
|
|
29
|
+
5,"I don't care"
|
|
30
|
+
6,"bad"
|
|
31
|
+
8,"it was, yes"
|
|
32
|
+
etc.
|
|
33
|
+
```
|
|
34
|
+
"""
|
|
35
|
+
|
|
36
|
+
if "textID" not in submission.columns:
|
|
37
|
+
raise InvalidSubmissionError("Submission DataFrame must have a 'textID' column.")
|
|
38
|
+
if "selected_text" not in submission.columns:
|
|
39
|
+
raise InvalidSubmissionError("Submission DataFrame must have a 'selected_text' column.")
|
|
40
|
+
if len(submission) != len(answers):
|
|
41
|
+
raise InvalidSubmissionError(
|
|
42
|
+
"Submission and answers DataFrames must have the same number of rows."
|
|
43
|
+
)
|
|
44
|
+
assert "textID" in answers.columns, "Answers DataFrame must have a 'textID' column."
|
|
45
|
+
assert (
|
|
46
|
+
"selected_text" in answers.columns
|
|
47
|
+
), "Answers DataFrame must have a 'selected_text' column."
|
|
48
|
+
|
|
49
|
+
# Sort by textID
|
|
50
|
+
submission = submission.sort_values("textID")
|
|
51
|
+
answers = answers.sort_values("textID")
|
|
52
|
+
|
|
53
|
+
if (submission["textID"].values != answers["textID"].values).any():
|
|
54
|
+
raise InvalidSubmissionError(
|
|
55
|
+
"Submission and answers DataFrames must have the same textID values."
|
|
56
|
+
)
|
|
57
|
+
|
|
58
|
+
# Note: empty strings in the submission get read as NaNs, so we fill them with empty strings
|
|
59
|
+
# before converting to str (otherwise pandas will convert NaNs to "nan" strings)
|
|
60
|
+
submission_labels = submission["selected_text"].fillna("").astype(str).tolist()
|
|
61
|
+
answer_labels = answers["selected_text"].fillna("").astype(str).tolist()
|
|
62
|
+
return submission_labels, answer_labels
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
def grade(submission: pd.DataFrame, answers: pd.DataFrame) -> float:
|
|
66
|
+
submission_labels, answer_labels = prepare_for_metric(submission, answers)
|
|
67
|
+
return mean_jaccard(submission_labels, answer_labels)
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
from pathlib import Path
|
|
2
|
+
|
|
3
|
+
from sklearn.model_selection import train_test_split
|
|
4
|
+
|
|
5
|
+
from mlebench.utils import read_csv
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def prepare(raw: Path, public: Path, private: Path):
|
|
9
|
+
"""
|
|
10
|
+
Splits the data in raw into public and private datasets with appropriate test/train splits.
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
# Create train, test from train split
|
|
14
|
+
old_train = read_csv(raw / "train.csv")
|
|
15
|
+
new_train, new_test = train_test_split(old_train, test_size=0.1, random_state=0)
|
|
16
|
+
new_test_without_labels = new_test.drop(columns=["selected_text"])
|
|
17
|
+
|
|
18
|
+
new_train.to_csv(public / "train.csv", index=False)
|
|
19
|
+
new_test.to_csv(private / "test.csv", index=False)
|
|
20
|
+
new_test_without_labels.to_csv(public / "test.csv", index=False)
|
|
21
|
+
|
|
22
|
+
assert len(new_test_without_labels) == len(
|
|
23
|
+
new_test
|
|
24
|
+
), f"Expected new_test_without_labels ({len(new_test_without_labels)}) == new_test ({len(new_test)})"
|
|
25
|
+
assert len(new_train) + len(new_test) == len(
|
|
26
|
+
old_train
|
|
27
|
+
), f"Expected new_train ({len(new_train)}) + new_test ({len(new_test)}) == old_train ({len(old_train)})"
|
|
28
|
+
|
|
29
|
+
# Create a sample submission file
|
|
30
|
+
submission_df = new_test.copy()[["textID", "selected_text"]]
|
|
31
|
+
submission_df["selected_text"] = ""
|
|
32
|
+
|
|
33
|
+
submission_df.to_csv(public / "sample_submission.csv", index=False)
|
|
34
|
+
assert len(submission_df) == len(
|
|
35
|
+
new_test
|
|
36
|
+
), f"Expected submission_df ({len(submission_df)}) == new_test ({len(new_test)})"
|