arviz 0.19.0__tar.gz → 0.20.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (200) hide show
  1. {arviz-0.19.0 → arviz-0.20.0}/CHANGELOG.md +17 -0
  2. {arviz-0.19.0 → arviz-0.20.0}/PKG-INFO +1 -1
  3. {arviz-0.19.0 → arviz-0.20.0}/arviz/__init__.py +1 -1
  4. {arviz-0.19.0 → arviz-0.20.0}/arviz/data/base.py +18 -7
  5. {arviz-0.19.0 → arviz-0.20.0}/arviz/data/converters.py +7 -3
  6. {arviz-0.19.0 → arviz-0.20.0}/arviz/data/inference_data.py +8 -0
  7. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/compareplot.py +4 -4
  8. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/ecdfplot.py +16 -8
  9. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/forestplot.py +2 -2
  10. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/kdeplot.py +9 -2
  11. {arviz-0.19.0 → arviz-0.20.0}/arviz/stats/ecdf_utils.py +157 -2
  12. {arviz-0.19.0 → arviz-0.20.0}/arviz/stats/stats.py +13 -7
  13. {arviz-0.19.0 → arviz-0.20.0}/arviz/tests/base_tests/test_data.py +10 -0
  14. {arviz-0.19.0 → arviz-0.20.0}/arviz/tests/base_tests/test_plots_matplotlib.py +6 -3
  15. {arviz-0.19.0 → arviz-0.20.0}/arviz/tests/base_tests/test_stats_ecdf_utils.py +15 -2
  16. {arviz-0.19.0 → arviz-0.20.0}/arviz.egg-info/PKG-INFO +1 -1
  17. {arviz-0.19.0 → arviz-0.20.0}/requirements-docs.txt +1 -0
  18. {arviz-0.19.0 → arviz-0.20.0}/requirements-optional.txt +1 -0
  19. {arviz-0.19.0 → arviz-0.20.0}/requirements.txt +0 -1
  20. {arviz-0.19.0 → arviz-0.20.0}/CODE_OF_CONDUCT.md +0 -0
  21. {arviz-0.19.0 → arviz-0.20.0}/CONTRIBUTING.md +0 -0
  22. {arviz-0.19.0 → arviz-0.20.0}/GOVERNANCE.md +0 -0
  23. {arviz-0.19.0 → arviz-0.20.0}/LICENSE +0 -0
  24. {arviz-0.19.0 → arviz-0.20.0}/MANIFEST.in +0 -0
  25. {arviz-0.19.0 → arviz-0.20.0}/README.md +0 -0
  26. {arviz-0.19.0 → arviz-0.20.0}/arviz/data/__init__.py +0 -0
  27. {arviz-0.19.0 → arviz-0.20.0}/arviz/data/datasets.py +0 -0
  28. {arviz-0.19.0 → arviz-0.20.0}/arviz/data/example_data/code/radon/radon.json +0 -0
  29. {arviz-0.19.0 → arviz-0.20.0}/arviz/data/example_data/data/centered_eight.nc +0 -0
  30. {arviz-0.19.0 → arviz-0.20.0}/arviz/data/example_data/data/non_centered_eight.nc +0 -0
  31. {arviz-0.19.0 → arviz-0.20.0}/arviz/data/example_data/data_local.json +0 -0
  32. {arviz-0.19.0 → arviz-0.20.0}/arviz/data/example_data/data_remote.json +0 -0
  33. {arviz-0.19.0 → arviz-0.20.0}/arviz/data/io_beanmachine.py +0 -0
  34. {arviz-0.19.0 → arviz-0.20.0}/arviz/data/io_cmdstan.py +0 -0
  35. {arviz-0.19.0 → arviz-0.20.0}/arviz/data/io_cmdstanpy.py +0 -0
  36. {arviz-0.19.0 → arviz-0.20.0}/arviz/data/io_datatree.py +0 -0
  37. {arviz-0.19.0 → arviz-0.20.0}/arviz/data/io_dict.py +0 -0
  38. {arviz-0.19.0 → arviz-0.20.0}/arviz/data/io_emcee.py +0 -0
  39. {arviz-0.19.0 → arviz-0.20.0}/arviz/data/io_json.py +0 -0
  40. {arviz-0.19.0 → arviz-0.20.0}/arviz/data/io_netcdf.py +0 -0
  41. {arviz-0.19.0 → arviz-0.20.0}/arviz/data/io_numpyro.py +0 -0
  42. {arviz-0.19.0 → arviz-0.20.0}/arviz/data/io_pyjags.py +0 -0
  43. {arviz-0.19.0 → arviz-0.20.0}/arviz/data/io_pyro.py +0 -0
  44. {arviz-0.19.0 → arviz-0.20.0}/arviz/data/io_pystan.py +0 -0
  45. {arviz-0.19.0 → arviz-0.20.0}/arviz/data/io_zarr.py +0 -0
  46. {arviz-0.19.0 → arviz-0.20.0}/arviz/data/utils.py +0 -0
  47. {arviz-0.19.0 → arviz-0.20.0}/arviz/labels.py +0 -0
  48. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/__init__.py +0 -0
  49. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/autocorrplot.py +0 -0
  50. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/backends/__init__.py +0 -0
  51. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/backends/bokeh/__init__.py +0 -0
  52. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/backends/bokeh/autocorrplot.py +0 -0
  53. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/backends/bokeh/bfplot.py +0 -0
  54. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/backends/bokeh/bpvplot.py +0 -0
  55. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/backends/bokeh/compareplot.py +0 -0
  56. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/backends/bokeh/densityplot.py +0 -0
  57. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/backends/bokeh/distcomparisonplot.py +0 -0
  58. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/backends/bokeh/distplot.py +0 -0
  59. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/backends/bokeh/dotplot.py +0 -0
  60. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/backends/bokeh/ecdfplot.py +0 -0
  61. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/backends/bokeh/elpdplot.py +0 -0
  62. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/backends/bokeh/energyplot.py +0 -0
  63. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/backends/bokeh/essplot.py +0 -0
  64. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/backends/bokeh/forestplot.py +0 -0
  65. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/backends/bokeh/hdiplot.py +0 -0
  66. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/backends/bokeh/kdeplot.py +0 -0
  67. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/backends/bokeh/khatplot.py +0 -0
  68. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/backends/bokeh/lmplot.py +0 -0
  69. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/backends/bokeh/loopitplot.py +0 -0
  70. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/backends/bokeh/mcseplot.py +0 -0
  71. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/backends/bokeh/pairplot.py +0 -0
  72. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/backends/bokeh/parallelplot.py +0 -0
  73. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/backends/bokeh/posteriorplot.py +0 -0
  74. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/backends/bokeh/ppcplot.py +0 -0
  75. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/backends/bokeh/rankplot.py +0 -0
  76. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/backends/bokeh/separationplot.py +0 -0
  77. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/backends/bokeh/traceplot.py +0 -0
  78. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/backends/bokeh/violinplot.py +0 -0
  79. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/backends/matplotlib/__init__.py +0 -0
  80. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/backends/matplotlib/autocorrplot.py +0 -0
  81. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/backends/matplotlib/bfplot.py +0 -0
  82. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/backends/matplotlib/bpvplot.py +0 -0
  83. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/backends/matplotlib/compareplot.py +0 -0
  84. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/backends/matplotlib/densityplot.py +0 -0
  85. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/backends/matplotlib/distcomparisonplot.py +0 -0
  86. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/backends/matplotlib/distplot.py +0 -0
  87. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/backends/matplotlib/dotplot.py +0 -0
  88. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/backends/matplotlib/ecdfplot.py +0 -0
  89. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/backends/matplotlib/elpdplot.py +0 -0
  90. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/backends/matplotlib/energyplot.py +0 -0
  91. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/backends/matplotlib/essplot.py +0 -0
  92. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/backends/matplotlib/forestplot.py +0 -0
  93. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/backends/matplotlib/hdiplot.py +0 -0
  94. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/backends/matplotlib/kdeplot.py +0 -0
  95. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/backends/matplotlib/khatplot.py +0 -0
  96. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/backends/matplotlib/lmplot.py +0 -0
  97. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/backends/matplotlib/loopitplot.py +0 -0
  98. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/backends/matplotlib/mcseplot.py +0 -0
  99. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/backends/matplotlib/pairplot.py +0 -0
  100. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/backends/matplotlib/parallelplot.py +0 -0
  101. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/backends/matplotlib/posteriorplot.py +0 -0
  102. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/backends/matplotlib/ppcplot.py +0 -0
  103. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/backends/matplotlib/rankplot.py +0 -0
  104. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/backends/matplotlib/separationplot.py +0 -0
  105. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/backends/matplotlib/traceplot.py +0 -0
  106. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/backends/matplotlib/tsplot.py +0 -0
  107. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/backends/matplotlib/violinplot.py +0 -0
  108. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/bfplot.py +0 -0
  109. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/bpvplot.py +0 -0
  110. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/densityplot.py +0 -0
  111. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/distcomparisonplot.py +0 -0
  112. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/distplot.py +0 -0
  113. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/dotplot.py +0 -0
  114. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/elpdplot.py +0 -0
  115. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/energyplot.py +0 -0
  116. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/essplot.py +0 -0
  117. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/hdiplot.py +0 -0
  118. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/khatplot.py +0 -0
  119. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/lmplot.py +0 -0
  120. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/loopitplot.py +0 -0
  121. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/mcseplot.py +0 -0
  122. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/pairplot.py +0 -0
  123. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/parallelplot.py +0 -0
  124. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/plot_utils.py +0 -0
  125. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/posteriorplot.py +0 -0
  126. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/ppcplot.py +0 -0
  127. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/rankplot.py +0 -0
  128. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/separationplot.py +0 -0
  129. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/styles/arviz-bluish.mplstyle +0 -0
  130. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/styles/arviz-brownish.mplstyle +0 -0
  131. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/styles/arviz-colors.mplstyle +0 -0
  132. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/styles/arviz-cyanish.mplstyle +0 -0
  133. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/styles/arviz-darkgrid.mplstyle +0 -0
  134. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/styles/arviz-doc.mplstyle +0 -0
  135. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/styles/arviz-docgrid.mplstyle +0 -0
  136. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/styles/arviz-grayscale.mplstyle +0 -0
  137. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/styles/arviz-greenish.mplstyle +0 -0
  138. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/styles/arviz-orangish.mplstyle +0 -0
  139. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/styles/arviz-plasmish.mplstyle +0 -0
  140. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/styles/arviz-purplish.mplstyle +0 -0
  141. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/styles/arviz-redish.mplstyle +0 -0
  142. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/styles/arviz-royish.mplstyle +0 -0
  143. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/styles/arviz-viridish.mplstyle +0 -0
  144. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/styles/arviz-white.mplstyle +0 -0
  145. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/styles/arviz-whitegrid.mplstyle +0 -0
  146. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/traceplot.py +0 -0
  147. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/tsplot.py +0 -0
  148. {arviz-0.19.0 → arviz-0.20.0}/arviz/plots/violinplot.py +0 -0
  149. {arviz-0.19.0 → arviz-0.20.0}/arviz/preview.py +0 -0
  150. {arviz-0.19.0 → arviz-0.20.0}/arviz/py.typed +0 -0
  151. {arviz-0.19.0 → arviz-0.20.0}/arviz/rcparams.py +0 -0
  152. {arviz-0.19.0 → arviz-0.20.0}/arviz/sel_utils.py +0 -0
  153. {arviz-0.19.0 → arviz-0.20.0}/arviz/static/css/style.css +0 -0
  154. {arviz-0.19.0 → arviz-0.20.0}/arviz/static/html/icons-svg-inline.html +0 -0
  155. {arviz-0.19.0 → arviz-0.20.0}/arviz/stats/__init__.py +0 -0
  156. {arviz-0.19.0 → arviz-0.20.0}/arviz/stats/density_utils.py +0 -0
  157. {arviz-0.19.0 → arviz-0.20.0}/arviz/stats/diagnostics.py +0 -0
  158. {arviz-0.19.0 → arviz-0.20.0}/arviz/stats/stats_refitting.py +0 -0
  159. {arviz-0.19.0 → arviz-0.20.0}/arviz/stats/stats_utils.py +0 -0
  160. {arviz-0.19.0 → arviz-0.20.0}/arviz/tests/__init__.py +0 -0
  161. {arviz-0.19.0 → arviz-0.20.0}/arviz/tests/base_tests/__init__.py +0 -0
  162. {arviz-0.19.0 → arviz-0.20.0}/arviz/tests/base_tests/test_data_zarr.py +0 -0
  163. {arviz-0.19.0 → arviz-0.20.0}/arviz/tests/base_tests/test_diagnostics.py +0 -0
  164. {arviz-0.19.0 → arviz-0.20.0}/arviz/tests/base_tests/test_diagnostics_numba.py +0 -0
  165. {arviz-0.19.0 → arviz-0.20.0}/arviz/tests/base_tests/test_helpers.py +0 -0
  166. {arviz-0.19.0 → arviz-0.20.0}/arviz/tests/base_tests/test_labels.py +0 -0
  167. {arviz-0.19.0 → arviz-0.20.0}/arviz/tests/base_tests/test_plot_utils.py +0 -0
  168. {arviz-0.19.0 → arviz-0.20.0}/arviz/tests/base_tests/test_plots_bokeh.py +0 -0
  169. {arviz-0.19.0 → arviz-0.20.0}/arviz/tests/base_tests/test_rcparams.py +0 -0
  170. {arviz-0.19.0 → arviz-0.20.0}/arviz/tests/base_tests/test_stats.py +0 -0
  171. {arviz-0.19.0 → arviz-0.20.0}/arviz/tests/base_tests/test_stats_numba.py +0 -0
  172. {arviz-0.19.0 → arviz-0.20.0}/arviz/tests/base_tests/test_stats_utils.py +0 -0
  173. {arviz-0.19.0 → arviz-0.20.0}/arviz/tests/base_tests/test_utils.py +0 -0
  174. {arviz-0.19.0 → arviz-0.20.0}/arviz/tests/base_tests/test_utils_numba.py +0 -0
  175. {arviz-0.19.0 → arviz-0.20.0}/arviz/tests/conftest.py +0 -0
  176. {arviz-0.19.0 → arviz-0.20.0}/arviz/tests/external_tests/__init__.py +0 -0
  177. {arviz-0.19.0 → arviz-0.20.0}/arviz/tests/external_tests/test_data_beanmachine.py +0 -0
  178. {arviz-0.19.0 → arviz-0.20.0}/arviz/tests/external_tests/test_data_cmdstan.py +0 -0
  179. {arviz-0.19.0 → arviz-0.20.0}/arviz/tests/external_tests/test_data_cmdstanpy.py +0 -0
  180. {arviz-0.19.0 → arviz-0.20.0}/arviz/tests/external_tests/test_data_emcee.py +0 -0
  181. {arviz-0.19.0 → arviz-0.20.0}/arviz/tests/external_tests/test_data_numpyro.py +0 -0
  182. {arviz-0.19.0 → arviz-0.20.0}/arviz/tests/external_tests/test_data_pyjags.py +0 -0
  183. {arviz-0.19.0 → arviz-0.20.0}/arviz/tests/external_tests/test_data_pyro.py +0 -0
  184. {arviz-0.19.0 → arviz-0.20.0}/arviz/tests/external_tests/test_data_pystan.py +0 -0
  185. {arviz-0.19.0 → arviz-0.20.0}/arviz/tests/helpers.py +0 -0
  186. {arviz-0.19.0 → arviz-0.20.0}/arviz/utils.py +0 -0
  187. {arviz-0.19.0 → arviz-0.20.0}/arviz/wrappers/__init__.py +0 -0
  188. {arviz-0.19.0 → arviz-0.20.0}/arviz/wrappers/base.py +0 -0
  189. {arviz-0.19.0 → arviz-0.20.0}/arviz/wrappers/wrap_pymc.py +0 -0
  190. {arviz-0.19.0 → arviz-0.20.0}/arviz/wrappers/wrap_stan.py +0 -0
  191. {arviz-0.19.0 → arviz-0.20.0}/arviz.egg-info/SOURCES.txt +0 -0
  192. {arviz-0.19.0 → arviz-0.20.0}/arviz.egg-info/dependency_links.txt +0 -0
  193. {arviz-0.19.0 → arviz-0.20.0}/arviz.egg-info/requires.txt +1 -1
  194. {arviz-0.19.0 → arviz-0.20.0}/arviz.egg-info/top_level.txt +0 -0
  195. {arviz-0.19.0 → arviz-0.20.0}/pyproject.toml +0 -0
  196. {arviz-0.19.0 → arviz-0.20.0}/requirements-dev.txt +0 -0
  197. {arviz-0.19.0 → arviz-0.20.0}/requirements-external.txt +0 -0
  198. {arviz-0.19.0 → arviz-0.20.0}/requirements-test.txt +0 -0
  199. {arviz-0.19.0 → arviz-0.20.0}/setup.cfg +0 -0
  200. {arviz-0.19.0 → arviz-0.20.0}/setup.py +0 -0
@@ -1,5 +1,22 @@
1
1
  # Change Log
2
2
 
3
+ ## v0.20.0 (2024 Sep 28)
4
+
5
+ ### New features
6
+ - Add optimized simultaneous ECDF confidence bands ([2368](https://github.com/arviz-devs/arviz/pull/2368))
7
+ - Add support for setting groups with `idata[group]` ([2374](https://github.com/arviz-devs/arviz/pull/2374))
8
+
9
+ ### Maintenance and fixes
10
+ - Make `dm-tree` and optional dependency ([2379](https://github.com/arviz-devs/arviz/pull/2379))
11
+ - Fix bug in `psislw` modifying input inplace ([2377](https://github.com/arviz-devs/arviz/pull/2377))
12
+ - Fix behaviour of two dimensional KDE plot with recent matplotlib releases ([2383](https://github.com/arviz-devs/arviz/pull/2383))
13
+ - Make defaults in `plot_compare` more intuitive ([2388](https://github.com/arviz-devs/arviz/pull/2388))
14
+
15
+ ### Documentation
16
+ - Added extensions of virtual environments in [.gitignore](https://github.com/arviz-devs/arviz/blob/main/.gitignore) ([2371](https://github.com/arviz-devs/arviz/issues/2371))
17
+ - Fixed the issue in the [Contribution References Documentation](https://python.arviz.org/en/latest/contributing/index.html) ([2369](https://github.com/arviz-devs/arviz/issues/2369))
18
+ - Improve docstrings for `loo` and `waic` ([2366](https://github.com/arviz-devs/arviz/pull/2366))
19
+
3
20
  ## v0.19.0 (2024 Jul 19)
4
21
 
5
22
  ### New features
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: arviz
3
- Version: 0.19.0
3
+ Version: 0.20.0
4
4
  Summary: Exploratory analysis of Bayesian models
5
5
  Home-page: http://github.com/arviz-devs/arviz
6
6
  Author: ArviZ Developers
@@ -1,6 +1,6 @@
1
1
  # pylint: disable=wildcard-import,invalid-name,wrong-import-position
2
2
  """ArviZ is a library for exploratory analysis of Bayesian models."""
3
- __version__ = "0.19.0"
3
+ __version__ = "0.20.0"
4
4
 
5
5
  import logging
6
6
  import os
@@ -9,9 +9,13 @@ from copy import deepcopy
9
9
  from typing import Any, Callable, Dict, List, Optional, Tuple, TypeVar, Union
10
10
 
11
11
  import numpy as np
12
- import tree
13
12
  import xarray as xr
14
13
 
14
+ try:
15
+ import tree
16
+ except ImportError:
17
+ tree = None
18
+
15
19
  try:
16
20
  import ujson as json
17
21
  except ImportError:
@@ -89,6 +93,9 @@ def _yield_flat_up_to(shallow_tree, input_tree, path=()):
89
93
  input_tree.
90
94
  """
91
95
  # pylint: disable=protected-access
96
+ if tree is None:
97
+ raise ImportError("Missing optional dependency 'dm-tree'. Use pip or conda to install it")
98
+
92
99
  if isinstance(shallow_tree, tree._TEXT_OR_BYTES) or not (
93
100
  isinstance(shallow_tree, tree.collections_abc.Mapping)
94
101
  or tree._is_namedtuple(shallow_tree)
@@ -299,7 +306,7 @@ def numpy_to_data_array(
299
306
  return xr.DataArray(ary, coords=coords, dims=dims)
300
307
 
301
308
 
302
- def pytree_to_dataset(
309
+ def dict_to_dataset(
303
310
  data,
304
311
  *,
305
312
  attrs=None,
@@ -312,6 +319,8 @@ def pytree_to_dataset(
312
319
  ):
313
320
  """Convert a dictionary or pytree of numpy arrays to an xarray.Dataset.
314
321
 
322
+ ArviZ itself supports conversion of flat dictionaries.
323
+ Suport for pytrees requires ``dm-tree`` which is an optional dependency.
315
324
  See https://jax.readthedocs.io/en/latest/pytrees.html for what a pytree is, but
316
325
  this inclues at least dictionaries and tuple types.
317
326
 
@@ -386,10 +395,12 @@ def pytree_to_dataset(
386
395
  """
387
396
  if dims is None:
388
397
  dims = {}
389
- try:
390
- data = {k[0] if len(k) == 1 else k: v for k, v in _flatten_with_path(data)}
391
- except TypeError: # probably unsortable keys -- the function will still work if
392
- pass # it is an honest dictionary.
398
+
399
+ if tree is not None:
400
+ try:
401
+ data = {k[0] if len(k) == 1 else k: v for k, v in _flatten_with_path(data)}
402
+ except TypeError: # probably unsortable keys -- the function will still work if
403
+ pass # it is an honest dictionary.
393
404
 
394
405
  data_vars = {
395
406
  key: numpy_to_data_array(
@@ -406,7 +417,7 @@ def pytree_to_dataset(
406
417
  return xr.Dataset(data_vars=data_vars, attrs=make_attrs(attrs=attrs, library=library))
407
418
 
408
419
 
409
- dict_to_dataset = pytree_to_dataset
420
+ pytree_to_dataset = dict_to_dataset
410
421
 
411
422
 
412
423
  def make_attrs(attrs=None, library=None):
@@ -1,9 +1,13 @@
1
1
  """High level conversion functions."""
2
2
 
3
3
  import numpy as np
4
- import tree
5
4
  import xarray as xr
6
5
 
6
+ try:
7
+ from tree import is_nested
8
+ except ImportError:
9
+ is_nested = lambda obj: False
10
+
7
11
  from .base import dict_to_dataset
8
12
  from .inference_data import InferenceData
9
13
  from .io_beanmachine import from_beanmachine
@@ -107,7 +111,7 @@ def convert_to_inference_data(obj, *, group="posterior", coords=None, dims=None,
107
111
  dataset = obj.to_dataset()
108
112
  elif isinstance(obj, dict):
109
113
  dataset = dict_to_dataset(obj, coords=coords, dims=dims)
110
- elif tree.is_nested(obj) and not isinstance(obj, (list, tuple)):
114
+ elif is_nested(obj) and not isinstance(obj, (list, tuple)):
111
115
  dataset = dict_to_dataset(obj, coords=coords, dims=dims)
112
116
  elif isinstance(obj, np.ndarray):
113
117
  dataset = dict_to_dataset({"x": obj}, coords=coords, dims=dims)
@@ -122,7 +126,7 @@ def convert_to_inference_data(obj, *, group="posterior", coords=None, dims=None,
122
126
  "xarray dataarray",
123
127
  "xarray dataset",
124
128
  "dict",
125
- "pytree",
129
+ "pytree (if 'dm-tree' is installed)",
126
130
  "netcdf filename",
127
131
  "numpy array",
128
132
  "pystan fit",
@@ -266,6 +266,14 @@ class InferenceData(Mapping[str, xr.Dataset]):
266
266
  raise KeyError(key)
267
267
  return getattr(self, key)
268
268
 
269
+ def __setitem__(self, key: str, value: xr.Dataset):
270
+ """Set item by key and update group list accordingly."""
271
+ if key.startswith(WARMUP_TAG):
272
+ self._groups_warmup.append(key)
273
+ else:
274
+ self._groups.append(key)
275
+ setattr(self, key, value)
276
+
269
277
  def groups(self) -> List[str]:
270
278
  """Return all groups present in InferenceData object."""
271
279
  return self._groups_all
@@ -11,9 +11,9 @@ def plot_compare(
11
11
  comp_df,
12
12
  insample_dev=False,
13
13
  plot_standard_error=True,
14
- plot_ic_diff=True,
14
+ plot_ic_diff=False,
15
15
  order_by_rank=True,
16
- legend=True,
16
+ legend=False,
17
17
  title=True,
18
18
  figsize=None,
19
19
  textsize=None,
@@ -45,12 +45,12 @@ def plot_compare(
45
45
  penalization given by the effective number of parameters (p_loo or p_waic).
46
46
  plot_standard_error : bool, default True
47
47
  Plot the standard error of the ELPD.
48
- plot_ic_diff : bool, default True
48
+ plot_ic_diff : bool, default False
49
49
  Plot standard error of the difference in ELPD between each model
50
50
  and the top-ranked model.
51
51
  order_by_rank : bool, default True
52
52
  If True ensure the best model is used as reference.
53
- legend : bool, default True
53
+ legend : bool, default False
54
54
  Add legend to figure.
55
55
  figsize : (float, float), optional
56
56
  If `None`, size is (6, num of models) inches.
@@ -73,6 +73,7 @@ def plot_ecdf(
73
73
  - False: No confidence bands are plotted (default).
74
74
  - True: Plot bands computed with the default algorithm (subject to change)
75
75
  - "pointwise": Compute the pointwise (i.e. marginal) confidence band.
76
+ - "optimized": Use optimization to estimate a simultaneous confidence band.
76
77
  - "simulated": Use Monte Carlo simulation to estimate a simultaneous confidence
77
78
  band.
78
79
 
@@ -216,8 +217,7 @@ def plot_ecdf(
216
217
  >>> pit_vals = distribution.cdf(sample)
217
218
  >>> uniform_dist = uniform(0, 1)
218
219
  >>> az.plot_ecdf(
219
- >>> pit_vals, cdf=uniform_dist.cdf,
220
- >>> rvs=uniform_dist.rvs, confidence_bands=True
220
+ >>> pit_vals, cdf=uniform_dist.cdf, confidence_bands=True,
221
221
  >>> )
222
222
 
223
223
  Plot an ECDF-difference plot of PIT values.
@@ -226,8 +226,8 @@ def plot_ecdf(
226
226
  :context: close-figs
227
227
 
228
228
  >>> az.plot_ecdf(
229
- >>> pit_vals, cdf = uniform_dist.cdf, rvs = uniform_dist.rvs,
230
- >>> confidence_bands = True, difference = True
229
+ >>> pit_vals, cdf = uniform_dist.cdf, confidence_bands = True,
230
+ >>> difference = True
231
231
  >>> )
232
232
  """
233
233
  if confidence_bands is True:
@@ -238,9 +238,12 @@ def plot_ecdf(
238
238
  )
239
239
  confidence_bands = "pointwise"
240
240
  else:
241
- confidence_bands = "simulated"
242
- elif confidence_bands == "simulated" and pointwise:
243
- raise ValueError("Cannot specify both `confidence_bands='simulated'` and `pointwise=True`")
241
+ confidence_bands = "auto"
242
+ # if pointwise specified, confidence_bands must be a bool or 'pointwise'
243
+ elif confidence_bands not in [False, "pointwise"] and pointwise:
244
+ raise ValueError(
245
+ f"Cannot specify both `confidence_bands='{confidence_bands}'` and `pointwise=True`"
246
+ )
244
247
 
245
248
  if fpr is not None:
246
249
  warnings.warn(
@@ -298,7 +301,7 @@ def plot_ecdf(
298
301
  "`eval_points` explicitly.",
299
302
  BehaviourChangeWarning,
300
303
  )
301
- if confidence_bands == "simulated":
304
+ if confidence_bands in ["optimized", "simulated"]:
302
305
  warnings.warn(
303
306
  "For simultaneous bands to be correctly calibrated, specify `eval_points` "
304
307
  "independent of the `values`"
@@ -319,6 +322,11 @@ def plot_ecdf(
319
322
 
320
323
  if confidence_bands:
321
324
  ndraws = len(values)
325
+ if confidence_bands == "auto":
326
+ if ndraws < 200 or num_trials >= 250 * np.sqrt(ndraws):
327
+ confidence_bands = "optimized"
328
+ else:
329
+ confidence_bands = "simulated"
322
330
  x_bands = eval_points
323
331
  lower, higher = ecdf_confidence_band(
324
332
  ndraws,
@@ -55,8 +55,8 @@ def plot_forest(
55
55
  Specify the kind of plot:
56
56
 
57
57
  * The ``kind="forestplot"`` generates credible intervals, where the central points are the
58
- estimated posterior means, the thick lines are the central quartiles, and the thin lines
59
- represent the :math:`100\times`(`hdi_prob`)% highest density intervals.
58
+ estimated posterior median, the thick lines are the central quartiles, and the thin lines
59
+ represent the :math:`100\times(hdi\_prob)\%` highest density intervals.
60
60
  * The ``kind="ridgeplot"`` option generates density plots (kernel density estimate or
61
61
  histograms) in the same graph. Ridge plots can be configured to have different overlap,
62
62
  truncation bounds and quantile markers.
@@ -72,7 +72,7 @@ def plot_kde(
72
72
  If True plot the 2D KDE using contours, otherwise plot a smooth 2D KDE.
73
73
  hdi_probs : list, optional
74
74
  Plots highest density credibility regions for the provided probabilities for a 2D KDE.
75
- Defaults to matplotlib chosen levels with no fixed probability associated.
75
+ Defaults to [0.5, 0.8, 0.94].
76
76
  fill_last : bool, default False
77
77
  If True fill the last contour of the 2D KDE plot.
78
78
  figsize : (float, float), optional
@@ -270,6 +270,9 @@ def plot_kde(
270
270
  gridsize = (128, 128) if contour else (256, 256)
271
271
  density, xmin, xmax, ymin, ymax = _fast_kde_2d(values, values2, gridsize=gridsize)
272
272
 
273
+ if hdi_probs is None:
274
+ hdi_probs = [0.5, 0.8, 0.94]
275
+
273
276
  if hdi_probs is not None:
274
277
  # Check hdi probs are within bounds (0, 1)
275
278
  if min(hdi_probs) <= 0 or max(hdi_probs) >= 1:
@@ -289,7 +292,11 @@ def plot_kde(
289
292
  "Using 'hdi_probs' in favor of 'levels'.",
290
293
  UserWarning,
291
294
  )
292
- contour_kwargs["levels"] = contour_level_list
295
+
296
+ if backend == "bokeh":
297
+ contour_kwargs["levels"] = contour_level_list
298
+ elif backend == "matplotlib":
299
+ contour_kwargs["levels"] = contour_level_list[1:]
293
300
 
294
301
  contourf_kwargs = _init_kwargs_dict(contourf_kwargs)
295
302
  if "levels" in contourf_kwargs:
@@ -1,10 +1,25 @@
1
1
  """Functions for evaluating ECDFs and their confidence bands."""
2
2
 
3
+ import math
3
4
  from typing import Any, Callable, Optional, Tuple
4
5
  import warnings
5
6
 
6
7
  import numpy as np
7
8
  from scipy.stats import uniform, binom
9
+ from scipy.optimize import minimize_scalar
10
+
11
+ try:
12
+ from numba import jit, vectorize
13
+ except ImportError:
14
+
15
+ def jit(*args, **kwargs): # pylint: disable=unused-argument
16
+ return lambda f: f
17
+
18
+ def vectorize(*args, **kwargs): # pylint: disable=unused-argument
19
+ return lambda f: f
20
+
21
+
22
+ from ..utils import Numba
8
23
 
9
24
 
10
25
  def compute_ecdf(sample: np.ndarray, eval_points: np.ndarray) -> np.ndarray:
@@ -73,7 +88,7 @@ def ecdf_confidence_band(
73
88
  eval_points: np.ndarray,
74
89
  cdf_at_eval_points: np.ndarray,
75
90
  prob: float = 0.95,
76
- method="simulated",
91
+ method="optimized",
77
92
  **kwargs,
78
93
  ) -> Tuple[np.ndarray, np.ndarray]:
79
94
  """Compute the `prob`-level confidence band for the ECDF.
@@ -92,6 +107,7 @@ def ecdf_confidence_band(
92
107
  method : string, default "simulated"
93
108
  The method used to compute the confidence band. Valid options are:
94
109
  - "pointwise": Compute the pointwise (i.e. marginal) confidence band.
110
+ - "optimized": Use optimization to estimate a simultaneous confidence band.
95
111
  - "simulated": Use Monte Carlo simulation to estimate a simultaneous confidence band.
96
112
  `rvs` must be provided.
97
113
  rvs: callable, optional
@@ -115,12 +131,18 @@ def ecdf_confidence_band(
115
131
 
116
132
  if method == "pointwise":
117
133
  prob_pointwise = prob
134
+ elif method == "optimized":
135
+ prob_pointwise = _optimize_simultaneous_ecdf_band_probability(
136
+ ndraws, eval_points, cdf_at_eval_points, prob=prob, **kwargs
137
+ )
118
138
  elif method == "simulated":
119
139
  prob_pointwise = _simulate_simultaneous_ecdf_band_probability(
120
140
  ndraws, eval_points, cdf_at_eval_points, prob=prob, **kwargs
121
141
  )
122
142
  else:
123
- raise ValueError(f"Unknown method {method}. Valid options are 'pointwise' or 'simulated'.")
143
+ raise ValueError(
144
+ f"Unknown method {method}. Valid options are 'pointwise', 'optimized', or 'simulated'."
145
+ )
124
146
 
125
147
  prob_lower, prob_upper = _get_pointwise_confidence_band(
126
148
  prob_pointwise, ndraws, cdf_at_eval_points
@@ -129,6 +151,139 @@ def ecdf_confidence_band(
129
151
  return prob_lower, prob_upper
130
152
 
131
153
 
154
+ def _update_ecdf_band_interior_probabilities(
155
+ prob_left: np.ndarray,
156
+ interval_left: np.ndarray,
157
+ interval_right: np.ndarray,
158
+ p: float,
159
+ ndraws: int,
160
+ ) -> np.ndarray:
161
+ """Update the probability that an ECDF has been within the envelope including at the current
162
+ point.
163
+
164
+ Arguments
165
+ ---------
166
+ prob_left : np.ndarray
167
+ For each point in the interior at the previous point, the joint probability that it and all
168
+ points before are in the interior.
169
+ interval_left : np.ndarray
170
+ The set of points in the interior at the previous point.
171
+ interval_right : np.ndarray
172
+ The set of points in the interior at the current point.
173
+ p : float
174
+ The probability of any given point found between the previous point and the current one.
175
+ ndraws : int
176
+ Number of draws in the original dataset.
177
+
178
+ Returns
179
+ -------
180
+ prob_right : np.ndarray
181
+ For each point in the interior at the current point, the joint probability that it and all
182
+ previous points are in the interior.
183
+ """
184
+ interval_left = interval_left[:, np.newaxis]
185
+ prob_conditional = binom.pmf(interval_right, ndraws - interval_left, p, loc=interval_left)
186
+ prob_right = prob_left.dot(prob_conditional)
187
+ return prob_right
188
+
189
+
190
+ @vectorize(["float64(int64, int64, float64, int64)"])
191
+ def _binom_pmf(k, n, p, loc):
192
+ k -= loc
193
+ if k < 0 or k > n:
194
+ return 0.0
195
+ if p == 0:
196
+ return 1.0 if k == 0 else 0.0
197
+ if p == 1:
198
+ return 1.0 if k == n else 0.0
199
+ if k == 0:
200
+ return (1 - p) ** n
201
+ if k == n:
202
+ return p**n
203
+ lbinom = math.lgamma(n + 1) - math.lgamma(k + 1) - math.lgamma(n - k + 1)
204
+ return np.exp(lbinom + k * np.log(p) + (n - k) * np.log1p(-p))
205
+
206
+
207
+ @jit(nopython=True)
208
+ def _update_ecdf_band_interior_probabilities_numba(
209
+ prob_left: np.ndarray,
210
+ interval_left: np.ndarray,
211
+ interval_right: np.ndarray,
212
+ p: float,
213
+ ndraws: int,
214
+ ) -> np.ndarray:
215
+ interval_left = interval_left[:, np.newaxis]
216
+ prob_conditional = _binom_pmf(interval_right, ndraws - interval_left, p, interval_left)
217
+ prob_right = prob_left.dot(prob_conditional)
218
+ return prob_right
219
+
220
+
221
+ def _ecdf_band_interior_probability(prob_between_points, ndraws, lower_count, upper_count):
222
+ interval_left = np.arange(1)
223
+ prob_interior = np.ones(1)
224
+ for i in range(prob_between_points.shape[0]):
225
+ interval_right = np.arange(lower_count[i], upper_count[i])
226
+ prob_interior = _update_ecdf_band_interior_probabilities(
227
+ prob_interior, interval_left, interval_right, prob_between_points[i], ndraws
228
+ )
229
+ interval_left = interval_right
230
+ return prob_interior.sum()
231
+
232
+
233
+ @jit(nopython=True)
234
+ def _ecdf_band_interior_probability_numba(prob_between_points, ndraws, lower_count, upper_count):
235
+ interval_left = np.arange(1)
236
+ prob_interior = np.ones(1)
237
+ for i in range(prob_between_points.shape[0]):
238
+ interval_right = np.arange(lower_count[i], upper_count[i])
239
+ prob_interior = _update_ecdf_band_interior_probabilities_numba(
240
+ prob_interior, interval_left, interval_right, prob_between_points[i], ndraws
241
+ )
242
+ interval_left = interval_right
243
+ return prob_interior.sum()
244
+
245
+
246
+ def _ecdf_band_optimization_objective(
247
+ prob_pointwise: float,
248
+ cdf_at_eval_points: np.ndarray,
249
+ ndraws: int,
250
+ prob_target: float,
251
+ ) -> float:
252
+ """Objective function for optimizing the simultaneous confidence band probability."""
253
+ lower, upper = _get_pointwise_confidence_band(prob_pointwise, ndraws, cdf_at_eval_points)
254
+ lower_count = (lower * ndraws).astype(int)
255
+ upper_count = (upper * ndraws).astype(int) + 1
256
+ cdf_with_zero = np.insert(cdf_at_eval_points[:-1], 0, 0)
257
+ prob_between_points = (cdf_at_eval_points - cdf_with_zero) / (1 - cdf_with_zero)
258
+ if Numba.numba_flag:
259
+ prob_interior = _ecdf_band_interior_probability_numba(
260
+ prob_between_points, ndraws, lower_count, upper_count
261
+ )
262
+ else:
263
+ prob_interior = _ecdf_band_interior_probability(
264
+ prob_between_points, ndraws, lower_count, upper_count
265
+ )
266
+ return abs(prob_interior - prob_target)
267
+
268
+
269
+ def _optimize_simultaneous_ecdf_band_probability(
270
+ ndraws: int,
271
+ eval_points: np.ndarray, # pylint: disable=unused-argument
272
+ cdf_at_eval_points: np.ndarray,
273
+ prob: float = 0.95,
274
+ **kwargs, # pylint: disable=unused-argument
275
+ ):
276
+ """Estimate probability for simultaneous confidence band using optimization.
277
+
278
+ This function simulates the pointwise probability needed to construct pointwise confidence bands
279
+ that form a `prob`-level confidence envelope for the ECDF of a sample.
280
+ """
281
+ cdf_at_eval_points = np.unique(cdf_at_eval_points)
282
+ objective = lambda p: _ecdf_band_optimization_objective(p, cdf_at_eval_points, ndraws, prob)
283
+ prob_pointwise = minimize_scalar(objective, bounds=(prob, 1), method="bounded").x
284
+ return prob_pointwise
285
+
286
+
132
287
  def _simulate_simultaneous_ecdf_band_probability(
133
288
  ndraws: int,
134
289
  eval_points: np.ndarray,
@@ -711,16 +711,19 @@ def loo(data, pointwise=None, var_name=None, reff=None, scale=None):
711
711
  Returns
712
712
  -------
713
713
  ELPDData object (inherits from :class:`pandas.Series`) with the following row/attributes:
714
- elpd: approximated expected log pointwise predictive density (elpd)
714
+ elpd_loo: approximated expected log pointwise predictive density (elpd)
715
715
  se: standard error of the elpd
716
716
  p_loo: effective number of parameters
717
- shape_warn: bool
718
- True if the estimated shape parameter of Pareto distribution is greater than a thresold
719
- value for one or more samples. For a sample size S, the thresold is compute as
720
- min(1 - 1/log10(S), 0.7)
721
- loo_i: array of pointwise predictive accuracy, only if pointwise True
717
+ n_samples: number of samples
718
+ n_data_points: number of data points
719
+ warning: bool
720
+ True if the estimated shape parameter of Pareto distribution is greater than
721
+ ``good_k``.
722
+ loo_i: :class:`~xarray.DataArray` with the pointwise predictive accuracy,
723
+ only if pointwise=True
722
724
  pareto_k: array of Pareto shape values, only if pointwise True
723
725
  scale: scale of the elpd
726
+ good_k: For a sample size S, the thresold is compute as min(1 - 1/log10(S), 0.7)
724
727
 
725
728
  The returned object has a custom print method that overrides pd.Series method.
726
729
 
@@ -914,6 +917,7 @@ def psislw(log_weights, reff=1.0):
914
917
  ...: az.psislw(-log_likelihood, reff=0.8)
915
918
 
916
919
  """
920
+ log_weights = deepcopy(log_weights)
917
921
  if hasattr(log_weights, "__sample__"):
918
922
  n_samples = len(log_weights.__sample__)
919
923
  shape = [
@@ -1580,7 +1584,9 @@ def waic(data, pointwise=None, var_name=None, scale=None, dask_kwargs=None):
1580
1584
  elpd_waic: approximated expected log pointwise predictive density (elpd)
1581
1585
  se: standard error of the elpd
1582
1586
  p_waic: effective number parameters
1583
- var_warn: bool
1587
+ n_samples: number of samples
1588
+ n_data_points: number of data points
1589
+ warning: bool
1584
1590
  True if posterior variance of the log predictive densities exceeds 0.4
1585
1591
  waic_i: :class:`~xarray.DataArray` with the pointwise predictive accuracy,
1586
1592
  only if pointwise=True
@@ -44,6 +44,10 @@ from ..helpers import ( # pylint: disable=unused-import
44
44
  models,
45
45
  )
46
46
 
47
+ # Check if dm-tree is installed
48
+ dm_tree_installed = importlib.util.find_spec("tree") is not None # pylint: disable=invalid-name
49
+ skip_tests = (not dm_tree_installed) and ("ARVIZ_REQUIRE_ALL_DEPS" not in os.environ)
50
+
47
51
 
48
52
  @pytest.fixture(autouse=True)
49
53
  def no_remote_data(monkeypatch, tmpdir):
@@ -895,6 +899,11 @@ class TestInferenceData: # pylint: disable=too-many-public-methods
895
899
  assert escape(repr(idata)) in html
896
900
  xr.set_options(display_style=display_style)
897
901
 
902
+ def test_setitem(self, data_random):
903
+ data_random["new_group"] = data_random.posterior
904
+ assert "new_group" in data_random.groups()
905
+ assert hasattr(data_random, "new_group")
906
+
898
907
  def test_add_groups(self, data_random):
899
908
  data = np.random.normal(size=(4, 500, 8))
900
909
  idata = data_random
@@ -1076,6 +1085,7 @@ def test_dict_to_dataset():
1076
1085
  assert set(dataset.b.coords) == {"chain", "draw", "c"}
1077
1086
 
1078
1087
 
1088
+ @pytest.mark.skipif(skip_tests, reason="test requires dm-tree which is not installed")
1079
1089
  def test_nested_dict_to_dataset():
1080
1090
  datadict = {
1081
1091
  "top": {"a": np.random.randn(100), "b": np.random.randn(1, 100, 10)},
@@ -1285,10 +1285,11 @@ def test_plot_ecdf_eval_points():
1285
1285
  assert axes is not None
1286
1286
 
1287
1287
 
1288
- @pytest.mark.parametrize("confidence_bands", [True, "pointwise", "simulated"])
1289
- def test_plot_ecdf_confidence_bands(confidence_bands):
1288
+ @pytest.mark.parametrize("confidence_bands", [True, "pointwise", "optimized", "simulated"])
1289
+ @pytest.mark.parametrize("ndraws", [100, 10_000])
1290
+ def test_plot_ecdf_confidence_bands(confidence_bands, ndraws):
1290
1291
  """Check that all confidence_bands values correctly accepted"""
1291
- data = np.random.randn(4, 1000)
1292
+ data = np.random.randn(4, ndraws // 4)
1292
1293
  axes = plot_ecdf(data, confidence_bands=confidence_bands, cdf=norm(0, 1).cdf)
1293
1294
  assert axes is not None
1294
1295
 
@@ -1326,6 +1327,8 @@ def test_plot_ecdf_error():
1326
1327
  # contradictory confidence band types
1327
1328
  with pytest.raises(ValueError):
1328
1329
  plot_ecdf(data, cdf=dist.cdf, confidence_bands="simulated", pointwise=True)
1330
+ with pytest.raises(ValueError):
1331
+ plot_ecdf(data, cdf=dist.cdf, confidence_bands="optimized", pointwise=True)
1329
1332
  plot_ecdf(data, cdf=dist.cdf, confidence_bands=True, pointwise=True)
1330
1333
  plot_ecdf(data, cdf=dist.cdf, confidence_bands="pointwise")
1331
1334
 
@@ -10,6 +10,13 @@ from ...stats.ecdf_utils import (
10
10
  _get_pointwise_confidence_band,
11
11
  )
12
12
 
13
+ try:
14
+ import numba # pylint: disable=unused-import
15
+
16
+ numba_options = [True, False]
17
+ except ImportError:
18
+ numba_options = [False]
19
+
13
20
 
14
21
  def test_compute_ecdf():
15
22
  """Test compute_ecdf function."""
@@ -109,9 +116,15 @@ def test_get_pointwise_confidence_band(dist, prob, ndraws, num_trials=1_000, see
109
116
  ids=["continuous", "continuous default rvs", "discrete"],
110
117
  )
111
118
  @pytest.mark.parametrize("ndraws", [10_000])
112
- @pytest.mark.parametrize("method", ["pointwise", "simulated"])
113
- def test_ecdf_confidence_band(dist, rvs, prob, ndraws, method, num_trials=1_000, seed=57):
119
+ @pytest.mark.parametrize("method", ["pointwise", "optimized", "simulated"])
120
+ @pytest.mark.parametrize("use_numba", numba_options)
121
+ def test_ecdf_confidence_band(
122
+ dist, rvs, prob, ndraws, method, use_numba, num_trials=1_000, seed=57
123
+ ):
114
124
  """Test test_ecdf_confidence_band."""
125
+ if use_numba and method != "optimized":
126
+ pytest.skip("Numba only used in optimized method")
127
+
115
128
  eval_points = np.linspace(*dist.interval(0.99), 10)
116
129
  cdf_at_eval_points = dist.cdf(eval_points)
117
130
  random_state = np.random.default_rng(seed)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: arviz
3
- Version: 0.19.0
3
+ Version: 0.20.0
4
4
  Summary: Exploratory analysis of Bayesian models
5
5
  Home-page: http://github.com/arviz-devs/arviz
6
6
  Author: ArviZ Developers
@@ -15,3 +15,4 @@ sphinx_design
15
15
  sphinx-codeautolink>=0.9.0
16
16
  jupyter-sphinx
17
17
  sphinxcontrib-youtube
18
+ dm-tree>=0.1.8
@@ -6,3 +6,4 @@ ujson
6
6
  dask[distributed]
7
7
  zarr>=2.5.0,<3
8
8
  xarray-datatree
9
+ dm-tree>=0.1.8
@@ -4,7 +4,6 @@ numpy>=1.23.0
4
4
  scipy>=1.9.0
5
5
  packaging
6
6
  pandas>=1.5.0
7
- dm-tree>=0.1.8
8
7
  xarray>=2022.6.0
9
8
  h5netcdf>=1.0.2
10
9
  typing_extensions>=4.1.0
File without changes
File without changes
File without changes