julearn 0.3.2.dev21__tar.gz → 0.3.2.dev57__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (237) hide show
  1. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/PKG-INFO +6 -1
  2. julearn-0.3.2.dev57/docs/changes/newsfragments/255.bugfix +1 -0
  3. julearn-0.3.2.dev57/docs/changes/newsfragments/260.enh +1 -0
  4. julearn-0.3.2.dev57/docs/changes/newsfragments/260.misc +1 -0
  5. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/docs/conf.py +1 -0
  6. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/docs/getting_started.rst +5 -1
  7. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/docs/links.inc +1 -0
  8. julearn-0.3.2.dev57/examples/03_complex_models/run_hyperparameter_tuning_bayessearch.py +95 -0
  9. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/examples/99_docs/run_hyperparameters_docs.py +123 -13
  10. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn/_version.py +2 -2
  11. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn/api.py +32 -23
  12. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn/conftest.py +134 -1
  13. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn/inspect/inspector.py +8 -5
  14. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn/inspect/tests/test_pipeline.py +7 -7
  15. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn/model_selection/__init__.py +4 -0
  16. julearn-0.3.2.dev57/julearn/model_selection/_skopt_searcher.py +32 -0
  17. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn/model_selection/available_searchers.py +66 -5
  18. julearn-0.3.2.dev57/julearn/model_selection/tests/test_available_searchers.py +83 -0
  19. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn/pipeline/merger.py +44 -35
  20. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn/pipeline/pipeline_creator.py +62 -4
  21. {julearn-0.3.2.dev21/julearn/pipeline/test → julearn-0.3.2.dev57/julearn/pipeline/tests}/test_merger.py +11 -2
  22. {julearn-0.3.2.dev21/julearn/pipeline/test → julearn-0.3.2.dev57/julearn/pipeline/tests}/test_pipeline_creator.py +231 -8
  23. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn/viz/_scores.py +1 -2
  24. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn.egg-info/PKG-INFO +6 -1
  25. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn.egg-info/SOURCES.txt +9 -4
  26. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn.egg-info/requires.txt +7 -0
  27. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/pyproject.toml +11 -0
  28. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/tox.ini +15 -4
  29. julearn-0.3.2.dev21/julearn/model_selection/tests/test_available_searchers.py +0 -44
  30. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/.github/ISSUE_TEMPLATE/bug_report.yaml +0 -0
  31. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/.github/ISSUE_TEMPLATE/config.yml +0 -0
  32. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/.github/ISSUE_TEMPLATE/documentation_request.yaml +0 -0
  33. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/.github/ISSUE_TEMPLATE/feature_request.yaml +0 -0
  34. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/.github/workflows/check-stale.yml +0 -0
  35. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/.github/workflows/ci-docs.yml +0 -0
  36. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/.github/workflows/ci.yml +0 -0
  37. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/.github/workflows/docs-preview.yml +0 -0
  38. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/.github/workflows/docs.yml +0 -0
  39. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/.github/workflows/lint.yml +0 -0
  40. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/.github/workflows/pypi.yml +0 -0
  41. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/.gitignore +0 -0
  42. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/.pre-commit-config.yaml +0 -0
  43. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/AUTHORS.rst +0 -0
  44. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/LICENSE.md +0 -0
  45. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/README.md +0 -0
  46. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/codecov.yml +0 -0
  47. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/docs/Makefile +0 -0
  48. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/docs/_static/css/custom.css +0 -0
  49. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/docs/_static/js/custom.js +0 -0
  50. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/docs/_templates/class.rst +0 -0
  51. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/docs/_templates/function.rst +0 -0
  52. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/docs/_templates/function_warning.rst +0 -0
  53. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/docs/_templates/versions.html +0 -0
  54. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/docs/api/base.rst +0 -0
  55. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/docs/api/index.rst +0 -0
  56. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/docs/api/inspect.rst +0 -0
  57. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/docs/api/main.rst +0 -0
  58. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/docs/api/model_selection.rst +0 -0
  59. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/docs/api/models.rst +0 -0
  60. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/docs/api/pipeline.rst +0 -0
  61. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/docs/api/prepare.rst +0 -0
  62. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/docs/api/scoring.rst +0 -0
  63. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/docs/api/stats.rst +0 -0
  64. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/docs/api/transformers.rst +0 -0
  65. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/docs/api/utils.rst +0 -0
  66. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/docs/api/viz.rst +0 -0
  67. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/docs/available_pipeline_steps.rst +0 -0
  68. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/docs/changes/contributors.inc +0 -0
  69. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/docs/changes/newsfragments/.gitignore +0 -0
  70. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/docs/changes/newsfragments/224.misc +0 -0
  71. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/docs/changes/newsfragments/244.misc +0 -0
  72. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/docs/changes/newsfragments/249.bugfix +0 -0
  73. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/docs/changes/newsfragments/251.misc +0 -0
  74. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/docs/configuration.rst +0 -0
  75. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/docs/contributing.rst +0 -0
  76. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/docs/examples.rst +0 -0
  77. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/docs/faq.rst +0 -0
  78. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/docs/images/corrected_ttest.png +0 -0
  79. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/docs/images/final_estimator.png +0 -0
  80. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/docs/images/iris_X.png +0 -0
  81. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/docs/images/iris_df.png +0 -0
  82. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/docs/images/iris_y.png +0 -0
  83. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/docs/images/julearn_logo.png +0 -0
  84. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/docs/images/julearn_logo_calm.png +0 -0
  85. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/docs/images/julearn_logo_confbias.png +0 -0
  86. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/docs/images/julearn_logo_cv.png +0 -0
  87. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/docs/images/julearn_logo_generalization.png +0 -0
  88. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/docs/images/julearn_logo_it.png +0 -0
  89. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/docs/images/julearn_logo_ml.png +0 -0
  90. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/docs/images/julearn_logo_mlit.png +0 -0
  91. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/docs/images/multiple_scorers_run_cv.png +0 -0
  92. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/docs/images/plot_scores.png +0 -0
  93. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/docs/images/scores_run_cv.png +0 -0
  94. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/docs/images/scores_run_cv_splitter.png +0 -0
  95. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/docs/images/scores_run_cv_train.png +0 -0
  96. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/docs/index.rst +0 -0
  97. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/docs/maintaining.rst +0 -0
  98. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/docs/redirect.html +0 -0
  99. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/docs/selected_deeper_topics/CBPM.rst +0 -0
  100. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/docs/selected_deeper_topics/confound_removal.rst +0 -0
  101. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/docs/selected_deeper_topics/cross_validation_splitter.rst +0 -0
  102. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/docs/selected_deeper_topics/hyperparameter_tuning.rst +0 -0
  103. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/docs/selected_deeper_topics/index.rst +0 -0
  104. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/docs/selected_deeper_topics/model_inspect.rst +0 -0
  105. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/docs/selected_deeper_topics/stacked_models.rst +0 -0
  106. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/docs/selected_deeper_topics/target_transformers.rst +0 -0
  107. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/docs/sphinxext/gh_substitutions.py +0 -0
  108. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/docs/what_really_need_know/cross_validation.rst +0 -0
  109. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/docs/what_really_need_know/data.rst +0 -0
  110. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/docs/what_really_need_know/index.rst +0 -0
  111. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/docs/what_really_need_know/model_comparison.rst +0 -0
  112. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/docs/what_really_need_know/model_evaluation.rst +0 -0
  113. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/docs/what_really_need_know/pipeline.rst +0 -0
  114. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/docs/whats_new.rst +0 -0
  115. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/examples/00_starting/README.rst +0 -0
  116. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/examples/00_starting/plot_cm_acc_multiclass.py +0 -0
  117. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/examples/00_starting/plot_example_regression.py +0 -0
  118. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/examples/00_starting/plot_stratified_kfold_reg.py +0 -0
  119. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/examples/00_starting/run_combine_pandas.py +0 -0
  120. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/examples/00_starting/run_grouped_cv.py +0 -0
  121. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/examples/00_starting/run_simple_binary_classification.py +0 -0
  122. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/examples/01_model_comparison/README.rst +0 -0
  123. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/examples/01_model_comparison/plot_simple_model_comparison.py +0 -0
  124. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/examples/02_inspection/README.rst +0 -0
  125. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/examples/02_inspection/plot_groupcv_inspect_svm.py +0 -0
  126. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/examples/02_inspection/plot_inspect_random_forest.py +0 -0
  127. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/examples/02_inspection/plot_preprocess.py +0 -0
  128. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/examples/02_inspection/run_binary_inspect_folds.py +0 -0
  129. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/examples/03_complex_models/README.rst +0 -0
  130. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/examples/03_complex_models/run_apply_to_target.py +0 -0
  131. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/examples/03_complex_models/run_example_pca_featsets.py +0 -0
  132. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/examples/03_complex_models/run_hyperparameter_multiple_grids.py +0 -0
  133. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/examples/03_complex_models/run_hyperparameter_tuning.py +0 -0
  134. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/examples/03_complex_models/run_stacked_models.py +0 -0
  135. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/examples/04_confounds/README.rst +0 -0
  136. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/examples/04_confounds/plot_confound_removal_classification.py +0 -0
  137. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/examples/04_confounds/run_return_confounds.py +0 -0
  138. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/examples/05_customization/README.rst +0 -0
  139. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/examples/05_customization/run_custom_scorers_regression.py +0 -0
  140. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/examples/99_docs/README.rst +0 -0
  141. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/examples/99_docs/run_cbpm_docs.py +0 -0
  142. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/examples/99_docs/run_confound_removal_docs.py +0 -0
  143. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/examples/99_docs/run_cv_splitters_docs.py +0 -0
  144. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/examples/99_docs/run_data_docs.py +0 -0
  145. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/examples/99_docs/run_model_comparison_docs.py +0 -0
  146. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/examples/99_docs/run_model_evaluation_docs.py +0 -0
  147. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/examples/99_docs/run_model_inspection_docs.py +0 -0
  148. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/examples/99_docs/run_pipeline_docs.py +0 -0
  149. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/examples/99_docs/run_stacked_models_docs.py +0 -0
  150. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/examples/99_docs/run_target_transformer_docs.py +0 -0
  151. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/examples/README.rst +0 -0
  152. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/examples/XX_disabled/dis_run_n_jobs.py +0 -0
  153. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/examples/XX_disabled/dis_run_target_confound_removal.py +0 -0
  154. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/ignore_words.txt +0 -0
  155. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn/__init__.py +0 -0
  156. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn/base/__init__.py +0 -0
  157. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn/base/column_types.py +0 -0
  158. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn/base/estimators.py +0 -0
  159. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn/base/tests/test_base_estimators.py +0 -0
  160. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn/base/tests/test_column_types.py +0 -0
  161. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn/config.py +0 -0
  162. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn/inspect/__init__.py +0 -0
  163. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn/inspect/_cv.py +0 -0
  164. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn/inspect/_pipeline.py +0 -0
  165. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn/inspect/_preprocess.py +0 -0
  166. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn/inspect/tests/test_cv.py +0 -0
  167. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn/inspect/tests/test_inspector.py +0 -0
  168. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn/inspect/tests/test_preprocess.py +0 -0
  169. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn/model_selection/continuous_stratified_kfold.py +0 -0
  170. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn/model_selection/stratified_bootstrap.py +0 -0
  171. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn/model_selection/tests/test_continous_stratified_kfold.py +0 -0
  172. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn/model_selection/tests/test_stratified_bootstrap.py +0 -0
  173. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn/models/__init__.py +0 -0
  174. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn/models/available_models.py +0 -0
  175. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn/models/dynamic.py +0 -0
  176. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn/models/tests/test_available_models.py +0 -0
  177. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn/models/tests/test_dynamic.py +0 -0
  178. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn/models/tests/test_models.py +0 -0
  179. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn/pipeline/__init__.py +0 -0
  180. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn/pipeline/target_pipeline.py +0 -0
  181. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn/pipeline/target_pipeline_creator.py +0 -0
  182. {julearn-0.3.2.dev21/julearn/pipeline/test → julearn-0.3.2.dev57/julearn/pipeline/tests}/test_target_pipeline.py +0 -0
  183. {julearn-0.3.2.dev21/julearn/pipeline/test → julearn-0.3.2.dev57/julearn/pipeline/tests}/test_target_pipeline_creator.py +0 -0
  184. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn/prepare.py +0 -0
  185. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn/scoring/__init__.py +0 -0
  186. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn/scoring/available_scorers.py +0 -0
  187. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn/scoring/metrics.py +0 -0
  188. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn/scoring/tests/test_available_scorers.py +0 -0
  189. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn/scoring/tests/test_metrics.py +0 -0
  190. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn/stats/__init__.py +0 -0
  191. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn/stats/corrected_ttest.py +0 -0
  192. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn/stats/tests/test_corrected_ttest.py +0 -0
  193. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn/tests/test_api.py +0 -0
  194. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn/tests/test_config.py +0 -0
  195. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn/tests/test_prepare.py +0 -0
  196. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn/transformers/__init__.py +0 -0
  197. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn/transformers/available_transformers.py +0 -0
  198. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn/transformers/cbpm.py +0 -0
  199. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn/transformers/confound_remover.py +0 -0
  200. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn/transformers/dataframe/__init__.py +0 -0
  201. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn/transformers/dataframe/change_column_types.py +0 -0
  202. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn/transformers/dataframe/drop_columns.py +0 -0
  203. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn/transformers/dataframe/filter_columns.py +0 -0
  204. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn/transformers/dataframe/set_column_types.py +0 -0
  205. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn/transformers/dataframe/tests/test_change_column_types.py +0 -0
  206. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn/transformers/dataframe/tests/test_drop_columns.py +0 -0
  207. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn/transformers/dataframe/tests/test_filter_columns.py +0 -0
  208. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn/transformers/dataframe/tests/test_set_column_types.py +0 -0
  209. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn/transformers/ju_column_transformer.py +0 -0
  210. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn/transformers/target/__init__.py +0 -0
  211. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn/transformers/target/available_target_transformers.py +0 -0
  212. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn/transformers/target/ju_target_transformer.py +0 -0
  213. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn/transformers/target/ju_transformed_target_model.py +0 -0
  214. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn/transformers/target/target_confound_remover.py +0 -0
  215. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn/transformers/target/tests/test_available_target_transformers.py +0 -0
  216. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn/transformers/target/tests/test_ju_target_transformer.py +0 -0
  217. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn/transformers/target/tests/test_ju_transformed_target_model.py +0 -0
  218. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn/transformers/target/tests/test_target_confound_remover.py +0 -0
  219. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn/transformers/tests/test_available_transformers.py +0 -0
  220. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn/transformers/tests/test_cbpm.py +0 -0
  221. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn/transformers/tests/test_confounds.py +0 -0
  222. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn/transformers/tests/test_jucolumntransformers.py +0 -0
  223. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn/utils/__init__.py +0 -0
  224. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn/utils/_cv.py +0 -0
  225. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn/utils/checks.py +0 -0
  226. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn/utils/logging.py +0 -0
  227. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn/utils/testing.py +0 -0
  228. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn/utils/tests/test_logging.py +0 -0
  229. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn/utils/tests/test_version.py +0 -0
  230. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn/utils/typing.py +0 -0
  231. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn/utils/versions.py +0 -0
  232. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn/viz/__init__.py +0 -0
  233. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn/viz/res/julearn_logo_generalization.png +0 -0
  234. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn.egg-info/dependency_links.txt +0 -0
  235. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/julearn.egg-info/top_level.txt +0 -0
  236. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/setup.cfg +0 -0
  237. {julearn-0.3.2.dev21 → julearn-0.3.2.dev57}/setup.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: julearn
3
- Version: 0.3.2.dev21
3
+ Version: 0.3.2.dev57
4
4
  Summary: Juelich Machine Learning Library
5
5
  Author-email: Fede Raimondo <f.raimondo@fz-juelich.de>, Sami Hamdan <s.hamdan@fz-juelich.de>
6
6
  Maintainer-email: Sami Hamdan <s.hamdan@fz-juelich.de>
@@ -40,12 +40,17 @@ Requires-Dist: furo<2024.0.0,>=2022.9.29; extra == "docs"
40
40
  Requires-Dist: sphinx_copybutton<0.6,>=0.5.0; extra == "docs"
41
41
  Requires-Dist: numpydoc<1.6,>=1.5.0; extra == "docs"
42
42
  Requires-Dist: towncrier<24; extra == "docs"
43
+ Requires-Dist: scikit-optimize<0.11,>=0.10.0; extra == "docs"
43
44
  Provides-Extra: deslib
44
45
  Requires-Dist: deslib<0.4,>=0.3.5; extra == "deslib"
45
46
  Provides-Extra: viz
46
47
  Requires-Dist: panel>=1.3.0; extra == "viz"
47
48
  Requires-Dist: bokeh>=3.0.0; extra == "viz"
48
49
  Requires-Dist: param>=2.0.0; extra == "viz"
50
+ Provides-Extra: skopt
51
+ Requires-Dist: scikit-optimize<0.11,>=0.10.0; extra == "skopt"
52
+ Provides-Extra: all
53
+ Requires-Dist: julearn[skopt,viz]; extra == "all"
49
54
 
50
55
  # julearn
51
56
 
@@ -0,0 +1 @@
1
+ Update bokeh api calls to remove warnings by `Fede Raimondo`_
@@ -0,0 +1 @@
1
+ Add :class:`~skopt.BayesSearchCV` to the list of available searchers as 'bayes' by `Fede Raimondo`_
@@ -0,0 +1 @@
1
+ Add ``all`` as optional dependencies to install all functional dependencies by `Fede Raimondo`_
@@ -160,6 +160,7 @@ intersphinx_mapping = {
160
160
  # "sqlalchemy": ("https://docs.sqlalchemy.org/en/20/", None),
161
161
  "joblib": ("https://joblib.readthedocs.io/en/latest/", None),
162
162
  "scipy": ("https://docs.scipy.org/doc/scipy/", None),
163
+ "skopt": ("https://scikit-optimize.readthedocs.io/en/latest", None),
163
164
  }
164
165
 
165
166
 
@@ -86,4 +86,8 @@ The following optional dependencies are available:
86
86
 
87
87
  * ``viz``: Visualization tools for ``julearn``. This includes the
88
88
  :mod:`.viz` module.
89
- * ``deslib``: The :mod:`.dynamic` module requires the `deslib`_ package.
89
+ * ``deslib``: The :mod:`.dynamic` module requires the `deslib`_ package. This
90
+ module is not compatible with newer Python versions and it is unmaintained.
91
+ * ``skopt``: Using the ``"bayes"`` searcher (:class:`~skopt.BayesSearchCV`)
92
+ requires the `scikit-optimize`_ package.
93
+ * ``all``: Install all optional functional dependencies (except ``deslib``).
@@ -40,3 +40,4 @@
40
40
 
41
41
 
42
42
  .. _`DESlib`: https://github.com/scikit-learn-contrib/DESlib
43
+ .. _`scikit-optimize`: https://scikit-optimize.readthedocs.io/en/stable/
@@ -0,0 +1,95 @@
1
+ """
2
+ Tuning Hyperparameters using Bayesian Search
3
+ ============================================
4
+
5
+ This example uses the ``fmri`` dataset, performs simple binary classification
6
+ using a Support Vector Machine classifier and analyzes the model.
7
+
8
+ References
9
+ ----------
10
+
11
+ Waskom, M.L., Frank, M.C., Wagner, A.D. (2016). Adaptive engagement of
12
+ cognitive control in context-dependent decision-making. Cerebral Cortex.
13
+
14
+ .. include:: ../../links.inc
15
+ """
16
+
17
+ # Authors: Federico Raimondo <f.raimondo@fz-juelich.de>
18
+ # License: AGPL
19
+
20
+ import numpy as np
21
+ from seaborn import load_dataset
22
+
23
+ from julearn import run_cross_validation
24
+ from julearn.utils import configure_logging, logger
25
+ from julearn.pipeline import PipelineCreator
26
+
27
+
28
+ ###############################################################################
29
+ # Set the logging level to info to see extra information.
30
+ configure_logging(level="INFO")
31
+
32
+ ###############################################################################
33
+ # Set the random seed to always have the same example.
34
+ np.random.seed(42)
35
+
36
+ ###############################################################################
37
+ # Load the dataset.
38
+ df_fmri = load_dataset("fmri")
39
+ df_fmri.head()
40
+
41
+ ###############################################################################
42
+ # Set the dataframe in the right format.
43
+ df_fmri = df_fmri.pivot(
44
+ index=["subject", "timepoint", "event"], columns="region", values="signal"
45
+ )
46
+
47
+ df_fmri = df_fmri.reset_index()
48
+ df_fmri.head()
49
+
50
+ ###############################################################################
51
+ # Following the hyperparamter tuning example, we will now use a Bayesian
52
+ # search to find the best hyperparameters for the SVM model.
53
+ X = ["frontal", "parietal"]
54
+ y = "event"
55
+
56
+ creator1 = PipelineCreator(problem_type="classification")
57
+ creator1.add("zscore")
58
+ creator1.add(
59
+ "svm",
60
+ kernel=["linear"],
61
+ C=(1e-6, 1e3, "log-uniform"),
62
+ )
63
+
64
+ creator2 = PipelineCreator(problem_type="classification")
65
+ creator2.add("zscore")
66
+ creator2.add(
67
+ "svm",
68
+ kernel=["rbf"],
69
+ C=(1e-6, 1e3, "log-uniform"),
70
+ gamma=(1e-6, 1e1, "log-uniform"),
71
+ )
72
+
73
+ search_params = {
74
+ "kind": "bayes",
75
+ "cv": 2, # to speed up the example
76
+ "n_iter": 10, # 10 iterations of bayesian search to speed up example
77
+ }
78
+
79
+
80
+ scores, estimator = run_cross_validation(
81
+ X=X,
82
+ y=y,
83
+ data=df_fmri,
84
+ model=[creator1, creator2],
85
+ cv=2, # to speed up the example
86
+ search_params=search_params,
87
+ return_estimator="final",
88
+ )
89
+
90
+ print(scores["test_score"].mean())
91
+
92
+
93
+ ###############################################################################
94
+ # It seems that we might have found a better model, but which one is it?
95
+ print(estimator.best_params_)
@@ -243,22 +243,132 @@ pprint(model_tuned.best_params_)
243
243
  # tries to find the best combination of values for the hyperparameters using
244
244
  # cross-validation.
245
245
  #
246
- # By default, ``julearn`` uses a :class:`~sklearn.model_selection.GridSearchCV`.
247
- # This searcher is very simple. First, it construct the "grid" of
248
- # hyperparameters to try. As we see above, we have 3 hyperparameters to tune.
249
- # So it constructs a 3-dimentional grid with all the possible combinations of
250
- # the hyperparameters values. The second step is to perform cross-validation
251
- # on each of the possible combinations of hyperparameters values.
246
+ # By default, ``julearn`` uses a
247
+ # :class:`~sklearn.model_selection.GridSearchCV`.
248
+ # This searcher, specified as ``"grid"`` is very simple. First, it constructs
249
+ # the _grid_ of hyperparameters to try. As we see above, we have 3
250
+ # hyperparameters to tune. So it constructs a 3-dimentional grid with all the
251
+ # possible combinations of the hyperparameters values. The second step is to
252
+ # perform cross-validation on each of the possible combinations of
253
+ # hyperparameters values.
252
254
  #
253
- # Another searcher that ``julearn`` provides is the
254
- # :class:`~sklearn.model_selection.RandomizedSearchCV`. This searcher is
255
- # similar to the :class:`~sklearn.model_selection.GridSearchCV`, but instead
256
- # of trying all the possible combinations of hyperparameters values, it tries
255
+ # Other searchers that ``julearn`` provides are the
256
+ # :class:`~sklearn.model_selection.RandomizedSearchCV` and
257
+ # :class:`~skopt.BayesSearchCV`.
258
+ #
259
+ # The randomized searcher
260
+ # (:class:`~sklearn.model_selection.RandomizedSearchCV`) is similar to the
261
+ # :class:`~sklearn.model_selection.GridSearchCV`, but instead
262
+ # of trying all the possible combinations of hyperparameter values, it tries
257
263
  # a random subset of them. This is useful when we have a lot of hyperparameters
258
- # to tune, since it can be very time consuming to try all the possible, as well
259
- # as continuous parameters that can be sampled out of a distribution. For
260
- # more information, see the
264
+ # to tune, since it can be very time consuming to try all the possible
265
+ # combinations, as well as continuous parameters that can be sampled out of a
266
+ # distribution. For more information, see the
261
267
  # :class:`~sklearn.model_selection.RandomizedSearchCV` documentation.
268
+ #
269
+ # The Bayesian searcher (:class:`~skopt.BayesSearchCV`) is a bit more
270
+ # complex. It uses Bayesian optimization to find the best hyperparameter set.
271
+ # As with the randomized search, it is useful when we have many
272
+ # hyperparameters to tune, and we don't want to try all the possible
273
+ # combinations due to computational constraints. For more information, see the
274
+ # :class:`~skopt.BayesSearchCV` documentation, including how to specify
275
+ # the prior distributions of the hyperparameters.
276
+ #
277
+ # We can specify the kind of searcher and its parametrization, by setting the
278
+ # ``search_params`` parameter in the :func:`.run_cross_validation` function.
279
+ # For example, we can use the
280
+ # :class:`~sklearn.model_selection.RandomizedSearchCV` searcher with
281
+ # 10 iterations of random search.
282
+
283
+ search_params = {
284
+ "kind": "random",
285
+ "n_iter": 10,
286
+ }
287
+
288
+ scores_tuned, model_tuned = run_cross_validation(
289
+ X=X,
290
+ y=y,
291
+ data=df,
292
+ X_types=X_types,
293
+ model=creator,
294
+ return_estimator="all",
295
+ search_params=search_params,
296
+ )
297
+
298
+ print(
299
+ "Scores with best hyperparameter using 10 iterations of "
300
+ f"randomized search: {scores_tuned['test_score'].mean()}"
301
+ )
302
+ pprint(model_tuned.best_params_)
303
+
304
+ ###############################################################################
305
+ # We can now see that the best hyperparameter might be different from the grid
306
+ # search. This is because it tried only 10 combinations and not the whole grid.
307
+ # Furthermore, the :class:`~sklearn.model_selection.RandomizedSearchCV`
308
+ # searcher can sample hyperparameters from distributions, which can be useful
309
+ # when we have continuous hyperparameters.
310
+ # Let's set both ``C`` and ``gamma`` to be sampled from log-uniform
311
+ # distributions. We can do this by setting the hyperparameter values as a
312
+ # tuple with the following format: ``(low, high, distribution)``. The
313
+ # distribution can be either ``"log-uniform"`` or ``"uniform"``.
314
+
315
+ creator = PipelineCreator(problem_type="classification")
316
+ creator.add("zscore")
317
+ creator.add("select_k", k=[2, 3, 4])
318
+ creator.add(
319
+ "svm",
320
+ C=(0.01, 10, "log-uniform"),
321
+ gamma=(1e-3, 1e-1, "log-uniform"),
322
+ )
323
+
324
+ print(creator)
325
+
326
+ scores_tuned, model_tuned = run_cross_validation(
327
+ X=X,
328
+ y=y,
329
+ data=df,
330
+ X_types=X_types,
331
+ model=creator,
332
+ return_estimator="all",
333
+ search_params=search_params,
334
+ )
335
+
336
+ print(
337
+ "Scores with best hyperparameter using 10 iterations of "
338
+ f"randomized search: {scores_tuned['test_score'].mean()}"
339
+ )
340
+ pprint(model_tuned.best_params_)
341
+
342
+
343
+ ###############################################################################
344
+ # We can also control the number of cross-validation folds used by the searcher
345
+ # by setting the ``cv`` parameter in the ``search_params`` dictionary. For
346
+ # example, we can use a bayesian search with 3 folds. Fortunately, the
347
+ # :class:`~skopt.BayesSearchCV` searcher also accepts distributions for the
348
+ # hyperparameters.
349
+
350
+ search_params = {
351
+ "kind": "bayes",
352
+ "n_iter": 10,
353
+ "cv": 3,
354
+ }
355
+
356
+ scores_tuned, model_tuned = run_cross_validation(
357
+ X=X,
358
+ y=y,
359
+ data=df,
360
+ X_types=X_types,
361
+ model=creator,
362
+ return_estimator="all",
363
+ search_params=search_params,
364
+ )
365
+
366
+ print(
367
+ "Scores with best hyperparameter using 10 iterations of "
368
+ f"bayesian search and 3-fold CV: {scores_tuned['test_score'].mean()}"
369
+ )
370
+ pprint(model_tuned.best_params_)
371
+
262
372
 
263
373
  ###############################################################################
264
374
  #
@@ -12,5 +12,5 @@ __version__: str
12
12
  __version_tuple__: VERSION_TUPLE
13
13
  version_tuple: VERSION_TUPLE
14
14
 
15
- __version__ = version = '0.3.2.dev21'
16
- __version_tuple__ = version_tuple = (0, 3, 2, 'dev21')
15
+ __version__ = version = '0.3.2.dev57'
16
+ __version_tuple__ = version_tuple = (0, 3, 2, 'dev57')
@@ -4,12 +4,16 @@
4
4
  # Sami Hamdan <s.hamdan@fz-juelich.de>
5
5
  # License: AGPL
6
6
 
7
- from typing import Dict, List, Optional, Union
7
+ from typing import Dict, Iterable, List, Optional, Union
8
8
 
9
9
  import numpy as np
10
10
  import pandas as pd
11
11
  from sklearn.base import BaseEstimator
12
- from sklearn.model_selection import check_cv, cross_validate
12
+ from sklearn.model_selection import (
13
+ BaseCrossValidator,
14
+ check_cv,
15
+ cross_validate,
16
+ )
13
17
  from sklearn.model_selection._search import BaseSearchCV
14
18
  from sklearn.pipeline import Pipeline
15
19
 
@@ -25,14 +29,14 @@ def run_cross_validation( # noqa: C901
25
29
  X: List[str], # noqa: N803
26
30
  y: str,
27
31
  model: Union[str, PipelineCreator, BaseEstimator, List[PipelineCreator]],
32
+ data: pd.DataFrame,
28
33
  X_types: Optional[Dict] = None, # noqa: N803
29
- data: Optional[pd.DataFrame] = None,
30
34
  problem_type: Optional[str] = None,
31
35
  preprocess: Union[None, str, List[str]] = None,
32
36
  return_estimator: Optional[str] = None,
33
37
  return_inspector: bool = False,
34
38
  return_train_score: bool = False,
35
- cv: Optional[int] = None,
39
+ cv: Optional[Union[int, BaseCrossValidator, Iterable]] = None,
36
40
  groups: Optional[str] = None,
37
41
  scoring: Union[str, List[str], None] = None,
38
42
  pos_labels: Union[str, List[str], None] = None,
@@ -54,12 +58,11 @@ def run_cross_validation( # noqa: C901
54
58
  See :ref:`data_usage` for details.
55
59
  model : str or scikit-learn compatible model.
56
60
  If string, it will use one of the available models.
61
+ data : pandas.DataFrame
62
+ DataFrame with the data. See :ref:`data_usage` for details.
57
63
  X_types : dict[str, list of str]
58
64
  A dictionary containing keys with column type as a str and the
59
65
  columns of this column type as a list of str.
60
- data : pandas.DataFrame | None
61
- DataFrame with the data (optional).
62
- See :ref:`data_usage` for details.
63
66
  problem_type : str
64
67
  The kind of problem to model.
65
68
 
@@ -132,8 +135,8 @@ def run_cross_validation( # noqa: C901
132
135
  the following keys:
133
136
 
134
137
  * 'kind': The kind of search algorithm to use, e.g.:
135
- 'grid' or 'random'. Can be any valid julearn searcher name or
136
- scikit-learn compatible searcher.
138
+ 'grid', 'random' or 'bayes'. Can be any valid julearn searcher name
139
+ or scikit-learn compatible searcher.
137
140
  * 'cv': If a searcher is going to be used, the cross-validation
138
141
  splitting strategy to use. Defaults to same CV as for the model
139
142
  evaluation.
@@ -196,7 +199,7 @@ def run_cross_validation( # noqa: C901
196
199
  np.random.seed(seed)
197
200
 
198
201
  # Interpret the input data and prepare it to be used with the library
199
- df_X, y, df_groups, X_types = prepare_input_data(
202
+ df_X, df_y, df_groups, X_types = prepare_input_data(
200
203
  X=X,
201
204
  y=y,
202
205
  df=data,
@@ -267,7 +270,7 @@ def run_cross_validation( # noqa: C901
267
270
 
268
271
  if has_target_transformer:
269
272
  if isinstance(pipeline, BaseSearchCV):
270
- last_step = pipeline.estimator[-1]
273
+ last_step = pipeline.estimator[-1] # type: ignore
271
274
  else:
272
275
  last_step = pipeline[-1]
273
276
  if not last_step.can_inverse_transform():
@@ -313,7 +316,7 @@ def run_cross_validation( # noqa: C901
313
316
  "Cannot use model_params with a model object. Use either "
314
317
  "a string or a PipelineCreator"
315
318
  )
316
- pipeline_creator.add(step=model, **t_params)
319
+ pipeline_creator.add(step=model, **t_params) # type: ignore
317
320
 
318
321
  # Check for extra model_params that are not used
319
322
  unused_params = []
@@ -346,17 +349,19 @@ def run_cross_validation( # noqa: C901
346
349
  logger.info("")
347
350
 
348
351
  if problem_type == "classification":
349
- logger.info(f"\tNumber of classes: {len(np.unique(y))}")
350
- logger.info(f"\tTarget type: {y.dtype}")
351
- logger.info(f"\tClass distributions: {y.value_counts()}")
352
+ logger.info(f"\tNumber of classes: {len(np.unique(df_y))}")
353
+ logger.info(f"\tTarget type: {df_y.dtype}")
354
+ logger.info(f"\tClass distributions: {df_y.value_counts()}")
352
355
  elif problem_type == "regression":
353
- logger.info(f"\tTarget type: {y.dtype}")
356
+ logger.info(f"\tTarget type: {df_y.dtype}")
354
357
 
355
358
  # Prepare cross validation
356
- cv_outer = check_cv(cv, classifier=problem_type == "classification")
359
+ cv_outer = check_cv(
360
+ cv, classifier=problem_type == "classification" # type: ignore
361
+ )
357
362
  logger.info(f"Using outer CV scheme {cv_outer}")
358
363
 
359
- check_consistency(y, cv, groups, problem_type)
364
+ check_consistency(df_y, cv, groups, problem_type) # type: ignore
360
365
 
361
366
  cv_return_estimator = return_estimator in ["cv", "all"]
362
367
  scoring = check_scoring(pipeline, scoring, wrap_score=wrap_score)
@@ -369,14 +374,14 @@ def run_cross_validation( # noqa: C901
369
374
  scores = cross_validate(
370
375
  pipeline,
371
376
  df_X,
372
- y,
377
+ df_y,
373
378
  cv=cv_outer,
374
379
  scoring=scoring,
375
380
  groups=df_groups,
376
381
  return_estimator=cv_return_estimator,
377
382
  n_jobs=n_jobs,
378
383
  return_train_score=return_train_score,
379
- verbose=verbose,
384
+ verbose=verbose, # type: ignore
380
385
  fit_params=fit_params,
381
386
  )
382
387
 
@@ -387,7 +392,10 @@ def run_cross_validation( # noqa: C901
387
392
  folds = np.tile(np.arange(n_folds), n_repeats)
388
393
 
389
394
  fold_sizes = np.array(
390
- [list(map(len, x)) for x in cv_outer.split(df_X, y, groups=df_groups)]
395
+ [
396
+ list(map(len, x))
397
+ for x in cv_outer.split(df_X, df_y, groups=df_groups)
398
+ ]
391
399
  )
392
400
  scores["n_train"] = fold_sizes[:, 0]
393
401
  scores["n_test"] = fold_sizes[:, 1]
@@ -398,7 +406,8 @@ def run_cross_validation( # noqa: C901
398
406
  scores_df = pd.DataFrame(scores)
399
407
  out = scores_df
400
408
  if return_estimator in ["final", "all"]:
401
- pipeline.fit(df_X, y, **fit_params)
409
+ logger.info("Fitting final model")
410
+ pipeline.fit(df_X, df_y, **fit_params)
402
411
  out = scores_df, pipeline
403
412
 
404
413
  if return_inspector:
@@ -406,7 +415,7 @@ def run_cross_validation( # noqa: C901
406
415
  scores=scores_df,
407
416
  model=pipeline,
408
417
  X=df_X,
409
- y=y,
418
+ y=df_y,
410
419
  groups=df_groups,
411
420
  cv=cv_outer,
412
421
  )
@@ -8,10 +8,77 @@ from copy import copy
8
8
  from typing import Callable, Dict, List, Optional, Union
9
9
 
10
10
  import pandas as pd
11
- from pytest import FixtureRequest, fixture
11
+ import pytest
12
+ from pytest import FixtureRequest, fixture, mark
12
13
  from seaborn import load_dataset
13
14
 
14
15
 
16
+ _filter_keys = {
17
+ "nodeps": "Test that runs without conditional dependencies only",
18
+ }
19
+
20
+
21
+ def pytest_configure(config: pytest.Config) -> None:
22
+ """Add a new marker to pytest.
23
+
24
+ Parameters
25
+ ----------
26
+ config : pytest.Config
27
+ The pytest configuration object.
28
+
29
+ """
30
+ # register your new marker to avoid warnings
31
+ for k, v in _filter_keys.items():
32
+ config.addinivalue_line("markers", f"{k}: {v}")
33
+
34
+
35
+ def pytest_addoption(parser: pytest.Parser) -> None:
36
+ """Add a new filter option to pytest.
37
+
38
+ Parameters
39
+ ----------
40
+ parser : pytest.Parser
41
+ The pytest parser object.
42
+
43
+ """
44
+ # add your new filter option (you can name it whatever you want)
45
+ parser.addoption(
46
+ "--filter",
47
+ action="store",
48
+ help="Select tests based on markers.",
49
+ )
50
+
51
+
52
+ def pytest_collection_modifyitems(
53
+ config: pytest.Config, items: List[pytest.Item]
54
+ ) -> None:
55
+ """Filter tests based on the key marker.
56
+
57
+ Parameters
58
+ ----------
59
+ config : pytest.Config
60
+ The pytest configuration object.
61
+ items : list
62
+ The list of items.
63
+
64
+ """
65
+ filter = config.getoption("--filter", None) # type: ignore
66
+ if filter is None:
67
+ for k in _filter_keys.keys():
68
+ skip_keys = mark.skip(
69
+ reason=f"Filter not specified for this test: {k}"
70
+ )
71
+ for item in items:
72
+ if k in item.keywords:
73
+ item.add_marker(skip_keys) # skip the test
74
+ else:
75
+ new_items = []
76
+ for item in items:
77
+ if filter in item.keywords:
78
+ new_items.append(item)
79
+ items[:] = new_items
80
+
81
+
15
82
  @fixture(scope="function")
16
83
  def df_typed_iris() -> pd.DataFrame:
17
84
  """Return a typed iris dataset.
@@ -191,6 +258,32 @@ def search_params(request: FixtureRequest) -> Optional[Dict]:
191
258
  A dictionary with the search_params argument.
192
259
 
193
260
  """
261
+
262
+ return request.param
263
+
264
+
265
+ @fixture(
266
+ params=[
267
+ {"kind": "bayes", "n_iter": 2, "cv": 3},
268
+ {"kind": "bayes", "n_iter": 2},
269
+ ],
270
+ scope="function",
271
+ )
272
+ def bayes_search_params(request: FixtureRequest) -> Optional[Dict]:
273
+ """Return different search_params argument for BayesSearchCV.
274
+
275
+ Parameters
276
+ ----------
277
+ request : pytest.FixtureRequest
278
+ The request object.
279
+
280
+ Returns
281
+ -------
282
+ dict or None
283
+ A dictionary with the search_params argument.
284
+
285
+ """
286
+
194
287
  return request.param
195
288
 
196
289
 
@@ -234,6 +327,46 @@ def get_tuning_params() -> Callable:
234
327
  return get
235
328
 
236
329
 
330
+ _tuning_distributions = {
331
+ "zscore": {"with_mean": [True, False]},
332
+ "pca": {"n_components": (0.2, 0.7, "uniform")},
333
+ "select_univariate": {"mode": ["k_best", "percentile"]},
334
+ "rf": {"n_estimators": [2, 5]},
335
+ "svm": {"C": (1, 10, "log-uniform")},
336
+ "ridge": {"alpha": (1, 3, "uniform")},
337
+ }
338
+
339
+
340
+ @fixture(scope="function")
341
+ def get_tuning_distributions() -> Callable:
342
+ """Return a function that returns the distributions to tune.
343
+
344
+ Returns
345
+ -------
346
+ get : callable
347
+ A function that returns the distributions to tune for a given step.
348
+
349
+ """
350
+
351
+ def get(step: str) -> Dict:
352
+ """Return the distributions to tune for a given step.
353
+
354
+ Parameters
355
+ ----------
356
+ step : str
357
+ The name of the step.
358
+
359
+ Returns
360
+ -------
361
+ dict
362
+ The distributions to tune for the given step.
363
+
364
+ """
365
+ return copy(_tuning_distributions.get(step, {}))
366
+
367
+ return get
368
+
369
+
237
370
  @fixture(
238
371
  params=[
239
372
  "zscore",
@@ -6,13 +6,16 @@
6
6
 
7
7
  from typing import TYPE_CHECKING, List, Optional, Union
8
8
 
9
+ import pandas as pd
10
+ from sklearn.model_selection import BaseCrossValidator
11
+
9
12
  from ..utils.logging import raise_error
10
13
  from ._cv import FoldsInspector
11
14
  from ._pipeline import PipelineInspector
12
15
 
13
16
 
14
17
  if TYPE_CHECKING:
15
- import pandas as pd
18
+
16
19
  from sklearn.base import BaseEstimator
17
20
 
18
21
  from ..pipeline.pipeline_creator import PipelineCreator
@@ -48,10 +51,10 @@ class Inspector:
48
51
  "BaseEstimator",
49
52
  None,
50
53
  ] = None,
51
- X: Optional[List[str]] = None, # noqa: N803
52
- y: Optional[str] = None,
53
- groups: Optional[str] = None,
54
- cv: Optional[int] = None,
54
+ X: Optional[pd.DataFrame] = None, # noqa: N803
55
+ y: Optional[pd.Series] = None,
56
+ groups: Optional[pd.Series] = None,
57
+ cv: Optional[Union[int, BaseCrossValidator]] = None,
55
58
  ) -> None:
56
59
  self._scores = scores
57
60
  self._model = model