dslighting 1.7.1__py3-none-any.whl → 1.7.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (352) hide show
  1. dslighting/__init__.py +1 -1
  2. dslighting/core/agent.py +78 -62
  3. {dslighting-1.7.1.dist-info → dslighting-1.7.6.dist-info}/METADATA +1 -1
  4. {dslighting-1.7.1.dist-info → dslighting-1.7.6.dist-info}/RECORD +352 -7
  5. {dslighting-1.7.1.dist-info → dslighting-1.7.6.dist-info}/top_level.txt +1 -0
  6. mlebench/README.md +39 -0
  7. mlebench/__init__.py +0 -0
  8. mlebench/cli.py +221 -0
  9. mlebench/competitions/3d-object-detection-for-autonomous-vehicles/grade.py +161 -0
  10. mlebench/competitions/3d-object-detection-for-autonomous-vehicles/mAP_evaluation.py +425 -0
  11. mlebench/competitions/3d-object-detection-for-autonomous-vehicles/prepare.py +483 -0
  12. mlebench/competitions/3d-object-detection-for-autonomous-vehicles/prepare_val.py +719 -0
  13. mlebench/competitions/AI4Code/grade.py +70 -0
  14. mlebench/competitions/AI4Code/prepare.py +84 -0
  15. mlebench/competitions/AI4Code/prepare_val.py +159 -0
  16. mlebench/competitions/__init__.py +0 -0
  17. mlebench/competitions/aerial-cactus-identification/grade.py +11 -0
  18. mlebench/competitions/aerial-cactus-identification/prepare.py +71 -0
  19. mlebench/competitions/aerial-cactus-identification/prepare_val.py +133 -0
  20. mlebench/competitions/alaska2-image-steganalysis/grade.py +136 -0
  21. mlebench/competitions/alaska2-image-steganalysis/prepare.py +88 -0
  22. mlebench/competitions/alaska2-image-steganalysis/prepare_val.py +148 -0
  23. mlebench/competitions/aptos2019-blindness-detection/grade.py +35 -0
  24. mlebench/competitions/aptos2019-blindness-detection/prepare.py +75 -0
  25. mlebench/competitions/aptos2019-blindness-detection/prepare_val.py +123 -0
  26. mlebench/competitions/bike-sharing-demand/__init__.py +0 -0
  27. mlebench/competitions/bike-sharing-demand/grade.py +55 -0
  28. mlebench/competitions/bike-sharing-demand/prepare.py +37 -0
  29. mlebench/competitions/billion-word-imputation/grade.py +37 -0
  30. mlebench/competitions/billion-word-imputation/prepare.py +107 -0
  31. mlebench/competitions/billion-word-imputation/prepare_val.py +179 -0
  32. mlebench/competitions/bms-molecular-translation/grade.py +40 -0
  33. mlebench/competitions/bms-molecular-translation/prepare.py +68 -0
  34. mlebench/competitions/bms-molecular-translation/prepare_val.py +131 -0
  35. mlebench/competitions/cassava-leaf-disease-classification/grade.py +12 -0
  36. mlebench/competitions/cassava-leaf-disease-classification/prepare.py +113 -0
  37. mlebench/competitions/cassava-leaf-disease-classification/prepare_val.py +186 -0
  38. mlebench/competitions/cdiscount-image-classification-challenge/grade.py +11 -0
  39. mlebench/competitions/cdiscount-image-classification-challenge/prepare.py +144 -0
  40. mlebench/competitions/cdiscount-image-classification-challenge/prepare_val.py +205 -0
  41. mlebench/competitions/chaii-hindi-and-tamil-question-answering/grade.py +67 -0
  42. mlebench/competitions/chaii-hindi-and-tamil-question-answering/prepare.py +31 -0
  43. mlebench/competitions/chaii-hindi-and-tamil-question-answering/prepare_val.py +94 -0
  44. mlebench/competitions/champs-scalar-coupling/grade.py +60 -0
  45. mlebench/competitions/champs-scalar-coupling/prepare.py +116 -0
  46. mlebench/competitions/champs-scalar-coupling/prepare_val.py +155 -0
  47. mlebench/competitions/conways-reverse-game-of-life-2020/__init__.py +0 -0
  48. mlebench/competitions/conways-reverse-game-of-life-2020/grade.py +40 -0
  49. mlebench/competitions/conways-reverse-game-of-life-2020/prepare.py +41 -0
  50. mlebench/competitions/demand-forecasting-kernels-only/__init__.py +0 -0
  51. mlebench/competitions/demand-forecasting-kernels-only/grade.py +66 -0
  52. mlebench/competitions/demand-forecasting-kernels-only/prepare.py +27 -0
  53. mlebench/competitions/demand_forecasting_kernels_only/__init__.py +0 -0
  54. mlebench/competitions/demand_forecasting_kernels_only/grade.py +66 -0
  55. mlebench/competitions/demand_forecasting_kernels_only/prepare.py +27 -0
  56. mlebench/competitions/denoising-dirty-documents/grade.py +44 -0
  57. mlebench/competitions/denoising-dirty-documents/prepare.py +134 -0
  58. mlebench/competitions/denoising-dirty-documents/prepare_val.py +178 -0
  59. mlebench/competitions/detecting-insults-in-social-commentary/grade.py +11 -0
  60. mlebench/competitions/detecting-insults-in-social-commentary/prepare.py +72 -0
  61. mlebench/competitions/detecting-insults-in-social-commentary/prepare_val.py +128 -0
  62. mlebench/competitions/dog-breed-identification/dogs.py +124 -0
  63. mlebench/competitions/dog-breed-identification/grade.py +42 -0
  64. mlebench/competitions/dog-breed-identification/prepare.py +55 -0
  65. mlebench/competitions/dog-breed-identification/prepare_val.py +104 -0
  66. mlebench/competitions/dogs-vs-cats-redux-kernels-edition/grade.py +43 -0
  67. mlebench/competitions/dogs-vs-cats-redux-kernels-edition/prepare.py +70 -0
  68. mlebench/competitions/dogs-vs-cats-redux-kernels-edition/prepare_val.py +143 -0
  69. mlebench/competitions/ethanol-concentration/grade.py +23 -0
  70. mlebench/competitions/ethanol-concentration/prepare.py +90 -0
  71. mlebench/competitions/facebook-recruiting-iii-keyword-extraction/grade.py +60 -0
  72. mlebench/competitions/facebook-recruiting-iii-keyword-extraction/prepare.py +41 -0
  73. mlebench/competitions/facebook-recruiting-iii-keyword-extraction/prepare_val.py +92 -0
  74. mlebench/competitions/feedback-prize-english-language-learning/__init__.py +0 -0
  75. mlebench/competitions/feedback-prize-english-language-learning/grade.py +60 -0
  76. mlebench/competitions/feedback-prize-english-language-learning/prepare.py +39 -0
  77. mlebench/competitions/freesound-audio-tagging-2019/grade.py +64 -0
  78. mlebench/competitions/freesound-audio-tagging-2019/prepare.py +94 -0
  79. mlebench/competitions/freesound-audio-tagging-2019/prepare_val.py +175 -0
  80. mlebench/competitions/freesound-audio-tagging-2019/vocabulary.py +83 -0
  81. mlebench/competitions/google-quest-challenge/classes.py +32 -0
  82. mlebench/competitions/google-quest-challenge/grade.py +45 -0
  83. mlebench/competitions/google-quest-challenge/prepare.py +58 -0
  84. mlebench/competitions/google-quest-challenge/prepare_val.py +120 -0
  85. mlebench/competitions/google-research-identify-contrails-reduce-global-warming/grade.py +77 -0
  86. mlebench/competitions/google-research-identify-contrails-reduce-global-warming/prepare.py +155 -0
  87. mlebench/competitions/google-research-identify-contrails-reduce-global-warming/prepare_val.py +211 -0
  88. mlebench/competitions/h-and-m-personalized-fashion-recommendations/grade.py +42 -0
  89. mlebench/competitions/h-and-m-personalized-fashion-recommendations/prepare.py +102 -0
  90. mlebench/competitions/h-and-m-personalized-fashion-recommendations/prepare_val.py +132 -0
  91. mlebench/competitions/handwriting/grade.py +23 -0
  92. mlebench/competitions/handwriting/prepare.py +179 -0
  93. mlebench/competitions/herbarium-2020-fgvc7/grade.py +34 -0
  94. mlebench/competitions/herbarium-2020-fgvc7/prepare.py +251 -0
  95. mlebench/competitions/herbarium-2020-fgvc7/prepare_val.py +242 -0
  96. mlebench/competitions/herbarium-2021-fgvc8/grade.py +34 -0
  97. mlebench/competitions/herbarium-2021-fgvc8/prepare.py +251 -0
  98. mlebench/competitions/herbarium-2021-fgvc8/prepare_val.py +222 -0
  99. mlebench/competitions/herbarium-2022-fgvc9/grade.py +31 -0
  100. mlebench/competitions/herbarium-2022-fgvc9/prepare.py +233 -0
  101. mlebench/competitions/herbarium-2022-fgvc9/prepare_val.py +213 -0
  102. mlebench/competitions/histopathologic-cancer-detection/grade.py +12 -0
  103. mlebench/competitions/histopathologic-cancer-detection/prepare.py +59 -0
  104. mlebench/competitions/histopathologic-cancer-detection/prepare_val.py +131 -0
  105. mlebench/competitions/hms-harmful-brain-activity-classification/constants.py +9 -0
  106. mlebench/competitions/hms-harmful-brain-activity-classification/grade.py +43 -0
  107. mlebench/competitions/hms-harmful-brain-activity-classification/kaggle_metric_utilities.py +96 -0
  108. mlebench/competitions/hms-harmful-brain-activity-classification/kullback_leibler_divergence.py +118 -0
  109. mlebench/competitions/hms-harmful-brain-activity-classification/prepare.py +121 -0
  110. mlebench/competitions/hms-harmful-brain-activity-classification/prepare_val.py +190 -0
  111. mlebench/competitions/hotel-id-2021-fgvc8/grade.py +41 -0
  112. mlebench/competitions/hotel-id-2021-fgvc8/prepare.py +63 -0
  113. mlebench/competitions/hotel-id-2021-fgvc8/prepare_val.py +132 -0
  114. mlebench/competitions/hubmap-kidney-segmentation/grade.py +62 -0
  115. mlebench/competitions/hubmap-kidney-segmentation/prepare.py +108 -0
  116. mlebench/competitions/hubmap-kidney-segmentation/prepare_val.py +153 -0
  117. mlebench/competitions/icecube-neutrinos-in-deep-ice/grade.py +111 -0
  118. mlebench/competitions/icecube-neutrinos-in-deep-ice/prepare.py +127 -0
  119. mlebench/competitions/icecube-neutrinos-in-deep-ice/prepare_val.py +183 -0
  120. mlebench/competitions/ili/grade.py +60 -0
  121. mlebench/competitions/ili/prepare.py +99 -0
  122. mlebench/competitions/imet-2020-fgvc7/grade.py +54 -0
  123. mlebench/competitions/imet-2020-fgvc7/prepare.py +77 -0
  124. mlebench/competitions/imet-2020-fgvc7/prepare_val.py +157 -0
  125. mlebench/competitions/inaturalist-2019-fgvc6/grade.py +35 -0
  126. mlebench/competitions/inaturalist-2019-fgvc6/prepare.py +259 -0
  127. mlebench/competitions/inaturalist-2019-fgvc6/prepare_val.py +304 -0
  128. mlebench/competitions/instant-gratification/__init__.py +0 -0
  129. mlebench/competitions/instant-gratification/grade.py +55 -0
  130. mlebench/competitions/instant-gratification/prepare.py +25 -0
  131. mlebench/competitions/instant_gratification/__init__.py +0 -0
  132. mlebench/competitions/instant_gratification/grade.py +55 -0
  133. mlebench/competitions/instant_gratification/prepare.py +25 -0
  134. mlebench/competitions/invasive-species-monitoring/grade.py +11 -0
  135. mlebench/competitions/invasive-species-monitoring/prepare.py +97 -0
  136. mlebench/competitions/invasive-species-monitoring/prepare_val.py +164 -0
  137. mlebench/competitions/iwildcam-2019-fgvc6/grade.py +44 -0
  138. mlebench/competitions/iwildcam-2019-fgvc6/prepare.py +118 -0
  139. mlebench/competitions/iwildcam-2019-fgvc6/prepare_val.py +194 -0
  140. mlebench/competitions/iwildcam-2020-fgvc7/grade.py +11 -0
  141. mlebench/competitions/iwildcam-2020-fgvc7/prepare.py +164 -0
  142. mlebench/competitions/iwildcam-2020-fgvc7/prepare_val.py +245 -0
  143. mlebench/competitions/jigsaw-toxic-comment-classification-challenge/classes.py +1 -0
  144. mlebench/competitions/jigsaw-toxic-comment-classification-challenge/grade.py +54 -0
  145. mlebench/competitions/jigsaw-toxic-comment-classification-challenge/prepare.py +42 -0
  146. mlebench/competitions/jigsaw-toxic-comment-classification-challenge/prepare_val.py +88 -0
  147. mlebench/competitions/jigsaw-unintended-bias-in-toxicity-classification/grade.py +153 -0
  148. mlebench/competitions/jigsaw-unintended-bias-in-toxicity-classification/prepare.py +36 -0
  149. mlebench/competitions/jigsaw-unintended-bias-in-toxicity-classification/prepare_val.py +117 -0
  150. mlebench/competitions/kuzushiji-recognition/grade.py +58 -0
  151. mlebench/competitions/kuzushiji-recognition/kuzushiji_metric.py +118 -0
  152. mlebench/competitions/kuzushiji-recognition/prepare.py +92 -0
  153. mlebench/competitions/kuzushiji-recognition/prepare_val.py +149 -0
  154. mlebench/competitions/leaf-classification/classes.py +101 -0
  155. mlebench/competitions/leaf-classification/grade.py +44 -0
  156. mlebench/competitions/leaf-classification/prepare.py +60 -0
  157. mlebench/competitions/leaf-classification/prepare_val.py +116 -0
  158. mlebench/competitions/learning-agency-lab-automated-essay-scoring-2/grade.py +44 -0
  159. mlebench/competitions/learning-agency-lab-automated-essay-scoring-2/prepare.py +51 -0
  160. mlebench/competitions/learning-agency-lab-automated-essay-scoring-2/prepare_val.py +96 -0
  161. mlebench/competitions/liverpool-ion-switching/__init__.py +0 -0
  162. mlebench/competitions/liverpool-ion-switching/grade.py +52 -0
  163. mlebench/competitions/liverpool-ion-switching/prepare.py +27 -0
  164. mlebench/competitions/liverpool_ion_switching/__init__.py +0 -0
  165. mlebench/competitions/liverpool_ion_switching/grade.py +52 -0
  166. mlebench/competitions/liverpool_ion_switching/prepare.py +27 -0
  167. mlebench/competitions/lmsys-chatbot-arena/grade.py +63 -0
  168. mlebench/competitions/lmsys-chatbot-arena/prepare.py +52 -0
  169. mlebench/competitions/lmsys-chatbot-arena/prepare_val.py +115 -0
  170. mlebench/competitions/mcm_2024_c_test/grade.py +107 -0
  171. mlebench/competitions/mcm_2024_c_test/prepare.py +2 -0
  172. mlebench/competitions/ml2021spring-hw2/grade.py +11 -0
  173. mlebench/competitions/ml2021spring-hw2/prepare.py +58 -0
  174. mlebench/competitions/ml2021spring-hw2/prepare_val.py +135 -0
  175. mlebench/competitions/mlsp-2013-birds/grade.py +11 -0
  176. mlebench/competitions/mlsp-2013-birds/prepare.py +182 -0
  177. mlebench/competitions/mlsp-2013-birds/prepare_val.py +241 -0
  178. mlebench/competitions/movie-review-sentiment-analysis-kernels-only/grade.py +11 -0
  179. mlebench/competitions/movie-review-sentiment-analysis-kernels-only/prepare.py +58 -0
  180. mlebench/competitions/movie-review-sentiment-analysis-kernels-only/prepare_val.py +120 -0
  181. mlebench/competitions/multi-modal-gesture-recognition/grade.py +58 -0
  182. mlebench/competitions/multi-modal-gesture-recognition/prepare.py +85 -0
  183. mlebench/competitions/multi-modal-gesture-recognition/prepare_val.py +139 -0
  184. mlebench/competitions/my-custom-task-01/prepare.py +2 -0
  185. mlebench/competitions/new-my-task-01/prepare.py +2 -0
  186. mlebench/competitions/new-my-task-03/grade.py +107 -0
  187. mlebench/competitions/new-my-task-03/prepare.py +2 -0
  188. mlebench/competitions/new-york-city-taxi-fare-prediction/grade.py +28 -0
  189. mlebench/competitions/new-york-city-taxi-fare-prediction/prepare.py +44 -0
  190. mlebench/competitions/new-york-city-taxi-fare-prediction/prepare_val.py +89 -0
  191. mlebench/competitions/nfl-player-contact-detection/grade.py +36 -0
  192. mlebench/competitions/nfl-player-contact-detection/prepare.py +101 -0
  193. mlebench/competitions/nfl-player-contact-detection/prepare_val.py +186 -0
  194. mlebench/competitions/nomad2018-predict-transparent-conductors/grade.py +47 -0
  195. mlebench/competitions/nomad2018-predict-transparent-conductors/prepare.py +77 -0
  196. mlebench/competitions/nomad2018-predict-transparent-conductors/prepare_val.py +144 -0
  197. mlebench/competitions/osic-pulmonary-fibrosis-progression/grade.py +74 -0
  198. mlebench/competitions/osic-pulmonary-fibrosis-progression/prepare.py +95 -0
  199. mlebench/competitions/osic-pulmonary-fibrosis-progression/prepare_val.py +167 -0
  200. mlebench/competitions/paddy-disease-classification/grade.py +35 -0
  201. mlebench/competitions/paddy-disease-classification/prepare.py +69 -0
  202. mlebench/competitions/paddy-disease-classification/prepare_val.py +122 -0
  203. mlebench/competitions/petfinder-pawpularity-score/grade.py +41 -0
  204. mlebench/competitions/petfinder-pawpularity-score/prepare.py +76 -0
  205. mlebench/competitions/petfinder-pawpularity-score/prepare_val.py +154 -0
  206. mlebench/competitions/plant-pathology-2020-fgvc7/grade.py +41 -0
  207. mlebench/competitions/plant-pathology-2020-fgvc7/prepare.py +74 -0
  208. mlebench/competitions/plant-pathology-2020-fgvc7/prepare_val.py +160 -0
  209. mlebench/competitions/plant-pathology-2021-fgvc8/grade.py +54 -0
  210. mlebench/competitions/plant-pathology-2021-fgvc8/prepare.py +65 -0
  211. mlebench/competitions/plant-pathology-2021-fgvc8/prepare_val.py +130 -0
  212. mlebench/competitions/plant-seedlings-classification/grade.py +39 -0
  213. mlebench/competitions/plant-seedlings-classification/prepare.py +91 -0
  214. mlebench/competitions/plant-seedlings-classification/prepare_val.py +158 -0
  215. mlebench/competitions/playground-series-s3e1/__init__.py +0 -0
  216. mlebench/competitions/playground-series-s3e1/grade.py +52 -0
  217. mlebench/competitions/playground-series-s3e1/prepare.py +25 -0
  218. mlebench/competitions/playground-series-s3e11/__init__.py +0 -0
  219. mlebench/competitions/playground-series-s3e11/grade.py +55 -0
  220. mlebench/competitions/playground-series-s3e11/prepare.py +25 -0
  221. mlebench/competitions/playground-series-s3e18/grade.py +39 -0
  222. mlebench/competitions/playground-series-s3e18/prepare.py +36 -0
  223. mlebench/competitions/playground-series-s3e18/prepare_val.py +89 -0
  224. mlebench/competitions/playground_series_s3e1/__init__.py +0 -0
  225. mlebench/competitions/playground_series_s3e1/grade.py +52 -0
  226. mlebench/competitions/playground_series_s3e1/prepare.py +25 -0
  227. mlebench/competitions/playground_series_s3e11/__init__.py +0 -0
  228. mlebench/competitions/playground_series_s3e11/grade.py +55 -0
  229. mlebench/competitions/playground_series_s3e11/prepare.py +25 -0
  230. mlebench/competitions/predict-volcanic-eruptions-ingv-oe/grade.py +44 -0
  231. mlebench/competitions/predict-volcanic-eruptions-ingv-oe/prepare.py +68 -0
  232. mlebench/competitions/predict-volcanic-eruptions-ingv-oe/prepare_val.py +146 -0
  233. mlebench/competitions/random-acts-of-pizza/grade.py +14 -0
  234. mlebench/competitions/random-acts-of-pizza/prepare.py +80 -0
  235. mlebench/competitions/random-acts-of-pizza/prepare_val.py +144 -0
  236. mlebench/competitions/ranzcr-clip-catheter-line-classification/classes.py +11 -0
  237. mlebench/competitions/ranzcr-clip-catheter-line-classification/grade.py +31 -0
  238. mlebench/competitions/ranzcr-clip-catheter-line-classification/prepare.py +53 -0
  239. mlebench/competitions/ranzcr-clip-catheter-line-classification/prepare_val.py +113 -0
  240. mlebench/competitions/rsna-2022-cervical-spine-fracture-detection/grade.py +124 -0
  241. mlebench/competitions/rsna-2022-cervical-spine-fracture-detection/prepare.py +219 -0
  242. mlebench/competitions/rsna-2022-cervical-spine-fracture-detection/prepare_val.py +257 -0
  243. mlebench/competitions/rsna-breast-cancer-detection/grade.py +65 -0
  244. mlebench/competitions/rsna-breast-cancer-detection/prepare.py +141 -0
  245. mlebench/competitions/rsna-breast-cancer-detection/prepare_val.py +201 -0
  246. mlebench/competitions/rsna-miccai-brain-tumor-radiogenomic-classification/grade.py +13 -0
  247. mlebench/competitions/rsna-miccai-brain-tumor-radiogenomic-classification/prepare.py +47 -0
  248. mlebench/competitions/rsna-miccai-brain-tumor-radiogenomic-classification/prepare_val.py +97 -0
  249. mlebench/competitions/santander-customer-satisfaction/grade.py +10 -0
  250. mlebench/competitions/santander-customer-satisfaction/prepare.py +41 -0
  251. mlebench/competitions/sciencebench-001-clintox-nn/__init__.py +0 -0
  252. mlebench/competitions/sciencebench-001-clintox-nn/grade.py +56 -0
  253. mlebench/competitions/sciencebench-001-clintox-nn/prepare.py +75 -0
  254. mlebench/competitions/sciencebench-015-aai/grade.py +37 -0
  255. mlebench/competitions/sciencebench-015-aai/prepare.py +102 -0
  256. mlebench/competitions/sciencebench-051-brain-blood-qsar/grade.py +58 -0
  257. mlebench/competitions/sciencebench-051-brain-blood-qsar/prepare.py +69 -0
  258. mlebench/competitions/sciencebench-101-experimental-band-gap-prediction/grade.py +55 -0
  259. mlebench/competitions/sciencebench-101-experimental-band-gap-prediction/prepare.py +88 -0
  260. mlebench/competitions/see-click-predict-fix/__init__.py +0 -0
  261. mlebench/competitions/see-click-predict-fix/grade.py +66 -0
  262. mlebench/competitions/see-click-predict-fix/prepare.py +25 -0
  263. mlebench/competitions/see_click_predict_fix/__init__.py +0 -0
  264. mlebench/competitions/see_click_predict_fix/grade.py +66 -0
  265. mlebench/competitions/see_click_predict_fix/prepare.py +25 -0
  266. mlebench/competitions/seti-breakthrough-listen/grade.py +11 -0
  267. mlebench/competitions/seti-breakthrough-listen/prepare.py +71 -0
  268. mlebench/competitions/seti-breakthrough-listen/prepare_val.py +159 -0
  269. mlebench/competitions/siim-covid19-detection/grade.py +194 -0
  270. mlebench/competitions/siim-covid19-detection/prepare.py +123 -0
  271. mlebench/competitions/siim-covid19-detection/prepare_val.py +164 -0
  272. mlebench/competitions/siim-isic-melanoma-classification/grade.py +11 -0
  273. mlebench/competitions/siim-isic-melanoma-classification/prepare.py +127 -0
  274. mlebench/competitions/siim-isic-melanoma-classification/prepare_val.py +158 -0
  275. mlebench/competitions/smartphone-decimeter-2022/grade.py +55 -0
  276. mlebench/competitions/smartphone-decimeter-2022/notebook.py +86 -0
  277. mlebench/competitions/smartphone-decimeter-2022/prepare.py +143 -0
  278. mlebench/competitions/smartphone-decimeter-2022/prepare_val.py +199 -0
  279. mlebench/competitions/spaceship-titanic/grade.py +11 -0
  280. mlebench/competitions/spaceship-titanic/prepare.py +23 -0
  281. mlebench/competitions/spaceship-titanic/prepare_val.py +61 -0
  282. mlebench/competitions/spooky-author-identification/classes.py +1 -0
  283. mlebench/competitions/spooky-author-identification/grade.py +38 -0
  284. mlebench/competitions/spooky-author-identification/prepare.py +40 -0
  285. mlebench/competitions/spooky-author-identification/prepare_val.py +78 -0
  286. mlebench/competitions/stanford-covid-vaccine/grade.py +65 -0
  287. mlebench/competitions/stanford-covid-vaccine/prepare.py +129 -0
  288. mlebench/competitions/stanford-covid-vaccine/prepare_val.py +199 -0
  289. mlebench/competitions/statoil-iceberg-classifier-challenge/grade.py +41 -0
  290. mlebench/competitions/statoil-iceberg-classifier-challenge/prepare.py +105 -0
  291. mlebench/competitions/statoil-iceberg-classifier-challenge/prepare_val.py +157 -0
  292. mlebench/competitions/tabular-playground-series-dec-2021/grade.py +11 -0
  293. mlebench/competitions/tabular-playground-series-dec-2021/prepare.py +39 -0
  294. mlebench/competitions/tabular-playground-series-dec-2021/prepare_val.py +99 -0
  295. mlebench/competitions/tabular-playground-series-may-2022/grade.py +9 -0
  296. mlebench/competitions/tabular-playground-series-may-2022/prepare.py +56 -0
  297. mlebench/competitions/tabular-playground-series-may-2022/prepare_val.py +116 -0
  298. mlebench/competitions/tensorflow-speech-recognition-challenge/grade.py +11 -0
  299. mlebench/competitions/tensorflow-speech-recognition-challenge/prepare.py +90 -0
  300. mlebench/competitions/tensorflow-speech-recognition-challenge/prepare_val.py +148 -0
  301. mlebench/competitions/tensorflow2-question-answering/grade.py +122 -0
  302. mlebench/competitions/tensorflow2-question-answering/prepare.py +122 -0
  303. mlebench/competitions/tensorflow2-question-answering/prepare_val.py +187 -0
  304. mlebench/competitions/text-normalization-challenge-english-language/grade.py +49 -0
  305. mlebench/competitions/text-normalization-challenge-english-language/prepare.py +115 -0
  306. mlebench/competitions/text-normalization-challenge-english-language/prepare_val.py +213 -0
  307. mlebench/competitions/text-normalization-challenge-russian-language/grade.py +49 -0
  308. mlebench/competitions/text-normalization-challenge-russian-language/prepare.py +113 -0
  309. mlebench/competitions/text-normalization-challenge-russian-language/prepare_val.py +165 -0
  310. mlebench/competitions/tgs-salt-identification-challenge/grade.py +144 -0
  311. mlebench/competitions/tgs-salt-identification-challenge/prepare.py +158 -0
  312. mlebench/competitions/tgs-salt-identification-challenge/prepare_val.py +166 -0
  313. mlebench/competitions/the-icml-2013-whale-challenge-right-whale-redux/grade.py +11 -0
  314. mlebench/competitions/the-icml-2013-whale-challenge-right-whale-redux/prepare.py +95 -0
  315. mlebench/competitions/the-icml-2013-whale-challenge-right-whale-redux/prepare_val.py +141 -0
  316. mlebench/competitions/tmdb-box-office-prediction/__init__.py +0 -0
  317. mlebench/competitions/tmdb-box-office-prediction/grade.py +55 -0
  318. mlebench/competitions/tmdb-box-office-prediction/prepare.py +35 -0
  319. mlebench/competitions/tweet-sentiment-extraction/grade.py +67 -0
  320. mlebench/competitions/tweet-sentiment-extraction/prepare.py +36 -0
  321. mlebench/competitions/tweet-sentiment-extraction/prepare_val.py +106 -0
  322. mlebench/competitions/us-patent-phrase-to-phrase-matching/grade.py +31 -0
  323. mlebench/competitions/us-patent-phrase-to-phrase-matching/prepare.py +33 -0
  324. mlebench/competitions/us-patent-phrase-to-phrase-matching/prepare_val.py +71 -0
  325. mlebench/competitions/utils.py +266 -0
  326. mlebench/competitions/uw-madison-gi-tract-image-segmentation/grade.py +158 -0
  327. mlebench/competitions/uw-madison-gi-tract-image-segmentation/prepare.py +139 -0
  328. mlebench/competitions/uw-madison-gi-tract-image-segmentation/prepare_val.py +193 -0
  329. mlebench/competitions/ventilator-pressure-prediction/__init__.py +0 -0
  330. mlebench/competitions/ventilator-pressure-prediction/grade.py +52 -0
  331. mlebench/competitions/ventilator-pressure-prediction/prepare.py +27 -0
  332. mlebench/competitions/ventilator-pressure-prediction/prepare_val.py +142 -0
  333. mlebench/competitions/ventilator_pressure_prediction/__init__.py +0 -0
  334. mlebench/competitions/ventilator_pressure_prediction/grade.py +52 -0
  335. mlebench/competitions/ventilator_pressure_prediction/prepare.py +27 -0
  336. mlebench/competitions/vesuvius-challenge-ink-detection/grade.py +97 -0
  337. mlebench/competitions/vesuvius-challenge-ink-detection/prepare.py +122 -0
  338. mlebench/competitions/vesuvius-challenge-ink-detection/prepare_val.py +170 -0
  339. mlebench/competitions/vinbigdata-chest-xray-abnormalities-detection/grade.py +220 -0
  340. mlebench/competitions/vinbigdata-chest-xray-abnormalities-detection/prepare.py +129 -0
  341. mlebench/competitions/vinbigdata-chest-xray-abnormalities-detection/prepare_val.py +204 -0
  342. mlebench/competitions/whale-categorization-playground/grade.py +41 -0
  343. mlebench/competitions/whale-categorization-playground/prepare.py +103 -0
  344. mlebench/competitions/whale-categorization-playground/prepare_val.py +196 -0
  345. mlebench/data.py +420 -0
  346. mlebench/grade.py +209 -0
  347. mlebench/grade_helpers.py +235 -0
  348. mlebench/metrics.py +75 -0
  349. mlebench/registry.py +332 -0
  350. mlebench/utils.py +346 -0
  351. {dslighting-1.7.1.dist-info → dslighting-1.7.6.dist-info}/WHEEL +0 -0
  352. {dslighting-1.7.1.dist-info → dslighting-1.7.6.dist-info}/entry_points.txt +0 -0
mlebench/cli.py ADDED
@@ -0,0 +1,221 @@
1
+ import argparse
2
+ import json
3
+ from pathlib import Path
4
+
5
+ from mlebench.data import download_and_prepare_dataset, ensure_leaderboard_exists
6
+ from mlebench.grade import grade_csv, grade_jsonl
7
+ from mlebench.registry import registry
8
+ from mlebench.utils import get_logger
9
+
10
+ logger = get_logger(__name__)
11
+
12
+
13
+ def main():
14
+ parser = argparse.ArgumentParser(description="Runs agents on Kaggle competitions.")
15
+ subparsers = parser.add_subparsers(dest="command", help="Sub-command to run.")
16
+
17
+ # Prepare sub-parser
18
+ parser_prepare = subparsers.add_parser(
19
+ name="prepare",
20
+ help="Download and prepare competitions for the MLE-bench dataset.",
21
+ )
22
+ parser_prepare.add_argument(
23
+ "-c",
24
+ "--competition-id",
25
+ help=f"ID of the competition to prepare. Valid options: {registry.list_competition_ids()}",
26
+ type=str,
27
+ required=False,
28
+ )
29
+ parser_prepare.add_argument(
30
+ "-a",
31
+ "--all",
32
+ help="Prepare all competitions.",
33
+ action="store_true",
34
+ )
35
+ parser_prepare.add_argument(
36
+ "--lite",
37
+ help="Prepare all the low complexity competitions (MLE-bench Lite).",
38
+ action="store_true",
39
+ required=False,
40
+ )
41
+ parser_prepare.add_argument(
42
+ "-l",
43
+ "--list",
44
+ help="Prepare a list of competitions specified line by line in a text file.",
45
+ type=str,
46
+ required=False,
47
+ )
48
+ parser_prepare.add_argument(
49
+ "--keep-raw",
50
+ help="Keep the raw competition files after the competition has been prepared.",
51
+ action="store_true",
52
+ required=False,
53
+ default=False,
54
+ )
55
+ parser_prepare.add_argument(
56
+ "--data-dir",
57
+ help="Path to the directory where the data will be stored.",
58
+ required=False,
59
+ default=registry.get_data_dir(),
60
+ )
61
+ parser_prepare.add_argument(
62
+ "--overwrite-checksums",
63
+ help="[For Developers] Overwrite the checksums file for the competition.",
64
+ action="store_true",
65
+ required=False,
66
+ default=False,
67
+ )
68
+ parser_prepare.add_argument(
69
+ "--overwrite-leaderboard",
70
+ help="[For Developers] Overwrite the leaderboard file for the competition.",
71
+ action="store_true",
72
+ required=False,
73
+ default=False,
74
+ )
75
+ parser_prepare.add_argument(
76
+ "--skip-verification",
77
+ help="[For Developers] Skip the verification of the checksums.",
78
+ action="store_true",
79
+ required=False,
80
+ default=True,
81
+ )
82
+
83
+ # Grade eval sub-parser
84
+ parser_grade_eval = subparsers.add_parser(
85
+ "grade",
86
+ help="Grade a submission to the eval, comprising of several competition submissions",
87
+ )
88
+ parser_grade_eval.add_argument(
89
+ "--submission",
90
+ help="Path to the JSONL file of submissions. Refer to README.md#submission-format for the required format.",
91
+ type=str,
92
+ required=True,
93
+ )
94
+ parser_grade_eval.add_argument(
95
+ "--output-dir",
96
+ help="Path to the directory where the evaluation metrics will be saved.",
97
+ type=str,
98
+ required=True,
99
+ )
100
+ parser_grade_eval.add_argument(
101
+ "--data-dir",
102
+ help="Path to the directory where the data used for grading is stored.",
103
+ required=False,
104
+ default=registry.get_data_dir(),
105
+ )
106
+
107
+ # Grade sample sub-parser
108
+ parser_grade_sample = subparsers.add_parser(
109
+ name="grade-sample",
110
+ help="Grade a single sample (competition) in the eval",
111
+ )
112
+ parser_grade_sample.add_argument(
113
+ "submission",
114
+ help="Path to the submission CSV file.",
115
+ type=str,
116
+ )
117
+ parser_grade_sample.add_argument(
118
+ "competition_id",
119
+ help=f"ID of the competition to grade. Valid options: {registry.list_competition_ids()}",
120
+ type=str,
121
+ )
122
+ parser_grade_sample.add_argument(
123
+ "--data-dir",
124
+ help="Path to the directory where the data will be stored.",
125
+ required=False,
126
+ default=registry.get_data_dir(),
127
+ )
128
+
129
+ # Dev tools sub-parser
130
+ parser_dev = subparsers.add_parser("dev", help="Developer tools for extending MLE-bench.")
131
+ dev_subparsers = parser_dev.add_subparsers(dest="dev_command", help="Developer command to run.")
132
+
133
+ # Set up 'download-leaderboard' under 'dev'
134
+ parser_download_leaderboard = dev_subparsers.add_parser(
135
+ "download-leaderboard",
136
+ help="Download the leaderboard for a competition.",
137
+ )
138
+ parser_download_leaderboard.add_argument(
139
+ "-c",
140
+ "--competition-id",
141
+ help=f"Name of the competition to download the leaderboard for. Valid options: {registry.list_competition_ids()}",
142
+ type=str,
143
+ required=False,
144
+ )
145
+ parser_download_leaderboard.add_argument(
146
+ "--all",
147
+ help="Download the leaderboard for all competitions.",
148
+ action="store_true",
149
+ )
150
+ parser_download_leaderboard.add_argument(
151
+ "--force",
152
+ help="Force download the leaderboard, even if it already exists.",
153
+ action="store_true",
154
+ )
155
+
156
+ args = parser.parse_args()
157
+
158
+ if args.command == "prepare":
159
+ new_registry = registry.set_data_dir(Path(args.data_dir))
160
+ new_registry.set_mode("prepare")
161
+
162
+ if args.lite:
163
+ competitions = [
164
+ new_registry.get_competition(competition_id)
165
+ for competition_id in new_registry.get_lite_competition_ids()
166
+ ]
167
+ elif args.all:
168
+ competitions = [
169
+ new_registry.get_competition(competition_id)
170
+ for competition_id in registry.list_competition_ids()
171
+ ]
172
+ elif args.list:
173
+ with open(args.list, "r") as f:
174
+ competition_ids = f.read().splitlines()
175
+ competitions = [
176
+ new_registry.get_competition(competition_id) for competition_id in competition_ids
177
+ ]
178
+ else:
179
+ if not args.competition_id:
180
+ parser_prepare.error(
181
+ "One of --lite, --all, --list, or --competition-id must be specified."
182
+ )
183
+ competitions = [new_registry.get_competition(args.competition_id)]
184
+
185
+ for competition in competitions:
186
+ download_and_prepare_dataset(
187
+ competition=competition,
188
+ keep_raw=args.keep_raw,
189
+ overwrite_checksums=args.overwrite_checksums,
190
+ overwrite_leaderboard=args.overwrite_leaderboard,
191
+ skip_verification=args.skip_verification,
192
+ )
193
+ if args.command == "grade":
194
+ new_registry = registry.set_data_dir(Path(args.data_dir))
195
+ submission = Path(args.submission)
196
+ output_dir = Path(args.output_dir)
197
+ grade_jsonl(submission, output_dir, new_registry)
198
+ if args.command == "grade-sample":
199
+ new_registry = registry.set_data_dir(Path(args.data_dir))
200
+ competition = new_registry.get_competition(args.competition_id)
201
+ submission = Path(args.submission)
202
+ report = grade_csv(submission, competition)
203
+ logger.info("Competition report:")
204
+ logger.info(json.dumps(report.to_dict(), indent=4))
205
+ if args.command == "dev":
206
+ if args.dev_command == "download-leaderboard":
207
+ if args.all:
208
+ for competition_id in registry.list_competition_ids():
209
+ competition = registry.get_competition(competition_id)
210
+ ensure_leaderboard_exists(competition, force=args.force)
211
+ elif args.competition_id:
212
+ competition = registry.get_competition(args.competition_id)
213
+ ensure_leaderboard_exists(competition, force=args.force)
214
+ else:
215
+ parser_download_leaderboard.error(
216
+ "Either --all or --competition-id must be specified."
217
+ )
218
+
219
+
220
+ if __name__ == "__main__":
221
+ main()
@@ -0,0 +1,161 @@
1
+ import multiprocessing
2
+ from functools import partial
3
+ from typing import Any, Dict, List
4
+
5
+ import numpy as np
6
+ import pandas as pd
7
+ from pyquaternion import Quaternion
8
+ from tqdm import tqdm
9
+
10
+ from mlebench.grade_helpers import InvalidSubmissionError
11
+
12
+ from .mAP_evaluation import get_average_precisions, get_class_names
13
+
14
+
15
+ def parse_row(sample_token: str, prediction_string: str) -> List[Dict[str, Any]]:
16
+ """
17
+ Parse a row comprising of a sample_token and a prediction_string
18
+
19
+ `prediction_string` has the form:
20
+ `{confidence} center_x center_y center_z width length height yaw class_name`
21
+ where `confidence` is optional
22
+
23
+ Example row:
24
+ ```
25
+ 97ce3ab08ccbc0baae0267cbf8d4da947e1f11ae1dbcb80c3f4408784cd9170c,1.0 2742.152625996093 673.1631800662494 -18.6561112411676 1.834 4.609 1.648 2.619835541569646 car 0.5 2728.9634555684484 657.8296521874645 -18.54676216218047 1.799 4.348 1.728 -0.5425527100619654 bus
26
+ ```
27
+
28
+ Returns list of dicts with keys "sample_token", "translation", "size", "rotation", "name", "score", e.g.
29
+ ```
30
+ [{
31
+ 'sample_token': '0f0e3ce89d2324d8b45aa55a7b4f8207fbb039a550991a5149214f98cec136ac',
32
+ 'translation': [971.8343488872263, 1713.6816097857359, -25.82534357061308],
33
+ 'size': [2.519726579986132, 7.810161372666739, 3.483438286096803],
34
+ 'rotation': [0.10913582721095375, 0.04099572636992043, 0.01927712319721745, 1.029328402625659],
35
+ 'name': 'car',
36
+ 'score': 0.3077029437237213
37
+ }]
38
+ ```
39
+ """
40
+ if prediction_string.strip() == "":
41
+ return []
42
+ prediction_string = prediction_string.split(" ")
43
+
44
+ single_prediction_length = 9
45
+ if len(prediction_string) % single_prediction_length != 0:
46
+ raise InvalidSubmissionError(
47
+ f"Prediction string must have {single_prediction_length} tokens per object, but got {len(prediction_string)} tokens"
48
+ )
49
+
50
+ n_predictions = len(prediction_string) / single_prediction_length
51
+ preds = []
52
+ for i in range(int(n_predictions)):
53
+ prediction_segment = prediction_string[
54
+ i * single_prediction_length : (i + 1) * single_prediction_length
55
+ ]
56
+ confidence = float(prediction_segment[0])
57
+ prediction_segment = prediction_segment[1:]
58
+
59
+ # Parse the prediction string
60
+ translation = [float(x) for x in prediction_segment[0:3]]
61
+ size = [float(x) for x in prediction_segment[3:6]]
62
+ yaw = float(prediction_segment[6])
63
+ name = prediction_segment[7]
64
+
65
+ quat = Quaternion(axis=[0, 0, 1], radians=yaw)
66
+ # (from https://www.kaggle.com/competitions/3d-object-detection-for-autonomous-vehicles/discussion/110884#638390)
67
+
68
+ preds.append(
69
+ {
70
+ "sample_token": sample_token,
71
+ "translation": translation,
72
+ "size": size,
73
+ "rotation": list(quat),
74
+ "name": name,
75
+ "score": confidence,
76
+ }
77
+ )
78
+ return preds
79
+
80
+
81
+ def process_sample(gt_pred_pair, iou_thresholds):
82
+ gt, predictions = gt_pred_pair
83
+ class_names = get_class_names(gt)
84
+ sample_mAPs = []
85
+ for iou_threshold in iou_thresholds:
86
+ average_precisions = get_average_precisions(gt, predictions, class_names, iou_threshold)
87
+ mAP = np.mean(average_precisions)
88
+ sample_mAPs.append(mAP)
89
+ return sample_mAPs
90
+
91
+
92
+ def grade(submission: pd.DataFrame, answers: pd.DataFrame) -> float:
93
+ """
94
+ submission and answers are dataframes with columns `Id` and `PredictionString`
95
+
96
+ We grade the submission using the 3D mAP metric described in the competition overview:
97
+ https://www.kaggle.com/competitions/3d-object-detection-for-autonomous-vehicles/overview/evaluation
98
+
99
+ The competition organizers provide a Python script (mAP_evaluation.py) to perform this metric calculation
100
+ (search text for "3d version of the COCO mAP") for a (gt, predictions) pair from a single sample.
101
+ https://www.kaggle.com/competitions/3d-object-detection-for-autonomous-vehicles/discussion/133895
102
+
103
+ For each sample, we first parse the PredictionString in the format expected by mAP_evaluation.py, then calculate
104
+ the mAP over the requested range of IoU thresholds, and get a mean mAP over all thresholds for that sample.
105
+
106
+ The final score is the mean over all samples (i.e. the mean of the mean mAPs).
107
+ """
108
+ if len(submission) != len(answers):
109
+ raise InvalidSubmissionError("Submission and answers must have the same length")
110
+
111
+ if "Id" not in submission.columns:
112
+ raise InvalidSubmissionError("Submission must have an 'Id' column")
113
+
114
+ if "PredictionString" not in submission.columns:
115
+ raise InvalidSubmissionError("Submission must have a 'PredictionString' column")
116
+
117
+ assert "Id" in answers.columns, "Answers must have an 'Id' column"
118
+ assert "PredictionString" in answers.columns, "Answers must have a 'PredictionString' column"
119
+
120
+ submission = submission.sort_values("Id")
121
+ answers = answers.sort_values("Id")
122
+
123
+ if (submission["Id"].values != answers["Id"].values).any():
124
+ raise InvalidSubmissionError("Submission and answers must have the same ids")
125
+
126
+ # Empty values in the PredictionString column are allowed, but must be filled with an empty string
127
+ # (pandas converts them to NaN, so we convert them back)
128
+ submission["PredictionString"] = submission["PredictionString"].fillna("")
129
+
130
+ # Parse each row into the format expected by mAP_evaluation.py
131
+ submission_samples = [
132
+ parse_row(row["Id"], row["PredictionString"]) for _, row in submission.iterrows()
133
+ ]
134
+ answer_samples = [
135
+ parse_row(row["Id"], row["PredictionString"]) for _, row in answers.iterrows()
136
+ ]
137
+
138
+ iou_thresholds = [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95]
139
+ # (from https://www.kaggle.com/competitions/3d-object-detection-for-autonomous-vehicles/overview/evaluation)
140
+
141
+ # Prepare the data for parallel processing
142
+ sample_pairs = list(zip(answer_samples, submission_samples))
143
+
144
+ # Use multiprocessing to parallelize the computation
145
+ num_cpus = multiprocessing.cpu_count()
146
+ with multiprocessing.Pool(processes=num_cpus) as pool:
147
+ results = list(
148
+ tqdm(
149
+ pool.imap(partial(process_sample, iou_thresholds=iou_thresholds), sample_pairs),
150
+ total=len(sample_pairs),
151
+ desc="Processing samples",
152
+ )
153
+ )
154
+
155
+ # Flatten the results
156
+ mAPs = [mAP for sample_mAPs in results for mAP in sample_mAPs]
157
+
158
+ # Average over all samples and IoU thresholds
159
+ final_mAP = np.mean(mAPs)
160
+
161
+ return final_mAP