HydPy 6.2.dev1__cp313-cp313-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- hydpy/__init__.py +275 -0
- hydpy/aliases.py +2554 -0
- hydpy/auxs/__init__.py +0 -0
- hydpy/auxs/anntools.py +1305 -0
- hydpy/auxs/armatools.py +883 -0
- hydpy/auxs/calibtools.py +3337 -0
- hydpy/auxs/interptools.py +1094 -0
- hydpy/auxs/iuhtools.py +543 -0
- hydpy/auxs/networktools.py +597 -0
- hydpy/auxs/ppolytools.py +809 -0
- hydpy/auxs/quadtools.py +61 -0
- hydpy/auxs/roottools.py +228 -0
- hydpy/auxs/smoothtools.py +273 -0
- hydpy/auxs/statstools.py +2125 -0
- hydpy/auxs/validtools.py +81 -0
- hydpy/conf/HydPyConfigBase.xsd +68637 -0
- hydpy/conf/HydPyConfigBase.xsdt +358 -0
- hydpy/conf/HydPyConfigMultipleRuns.xsd +25 -0
- hydpy/conf/HydPyConfigSingleRun.xsd +24 -0
- hydpy/conf/__init__.py +0 -0
- hydpy/conf/a_coefficients_explicit_lobatto_sequence.npy +0 -0
- hydpy/conf/support_points_for_smoothpar_logistic2.npy +0 -0
- hydpy/config.py +42 -0
- hydpy/core/__init__.py +0 -0
- hydpy/core/aliastools.py +214 -0
- hydpy/core/autodoctools.py +1947 -0
- hydpy/core/auxfiletools.py +1169 -0
- hydpy/core/devicetools.py +3810 -0
- hydpy/core/exceptiontools.py +269 -0
- hydpy/core/filetools.py +1985 -0
- hydpy/core/hydpytools.py +3089 -0
- hydpy/core/importtools.py +1398 -0
- hydpy/core/indextools.py +345 -0
- hydpy/core/itemtools.py +1849 -0
- hydpy/core/masktools.py +460 -0
- hydpy/core/modeltools.py +4868 -0
- hydpy/core/netcdftools.py +2683 -0
- hydpy/core/objecttools.py +2023 -0
- hydpy/core/optiontools.py +1045 -0
- hydpy/core/parametertools.py +4674 -0
- hydpy/core/printtools.py +222 -0
- hydpy/core/propertytools.py +643 -0
- hydpy/core/pubtools.py +254 -0
- hydpy/core/selectiontools.py +1571 -0
- hydpy/core/sequencetools.py +4476 -0
- hydpy/core/seriestools.py +339 -0
- hydpy/core/testtools.py +2483 -0
- hydpy/core/timetools.py +3567 -0
- hydpy/core/typingtools.py +333 -0
- hydpy/core/variabletools.py +2615 -0
- hydpy/cythons/__init__.py +24 -0
- hydpy/cythons/annutils.pxd +33 -0
- hydpy/cythons/annutils.pyi +25 -0
- hydpy/cythons/annutils.pyx +120 -0
- hydpy/cythons/autogen/__init__.py +0 -0
- hydpy/cythons/autogen/annutils.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/annutils.pxd +42 -0
- hydpy/cythons/autogen/annutils.pyx +129 -0
- hydpy/cythons/autogen/c_arma.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_arma.pxd +179 -0
- hydpy/cythons/autogen/c_arma.pyx +356 -0
- hydpy/cythons/autogen/c_arma_rimorido.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_arma_rimorido.pxd +179 -0
- hydpy/cythons/autogen/c_arma_rimorido.pyx +356 -0
- hydpy/cythons/autogen/c_conv.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_conv.pxd +198 -0
- hydpy/cythons/autogen/c_conv.pyx +491 -0
- hydpy/cythons/autogen/c_conv_idw.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_conv_idw.pxd +124 -0
- hydpy/cythons/autogen/c_conv_idw.pyx +264 -0
- hydpy/cythons/autogen/c_conv_idw_ed.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_conv_idw_ed.pxd +197 -0
- hydpy/cythons/autogen/c_conv_idw_ed.pyx +481 -0
- hydpy/cythons/autogen/c_conv_nn.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_conv_nn.pxd +120 -0
- hydpy/cythons/autogen/c_conv_nn.pyx +224 -0
- hydpy/cythons/autogen/c_dam.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_dam.pxd +805 -0
- hydpy/cythons/autogen/c_dam.pyx +1477 -0
- hydpy/cythons/autogen/c_dam_llake.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_dam_llake.pxd +364 -0
- hydpy/cythons/autogen/c_dam_llake.pyx +705 -0
- hydpy/cythons/autogen/c_dam_lreservoir.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_dam_lreservoir.pxd +365 -0
- hydpy/cythons/autogen/c_dam_lreservoir.pyx +708 -0
- hydpy/cythons/autogen/c_dam_lretention.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_dam_lretention.pxd +340 -0
- hydpy/cythons/autogen/c_dam_lretention.pyx +625 -0
- hydpy/cythons/autogen/c_dam_pump.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_dam_pump.pxd +402 -0
- hydpy/cythons/autogen/c_dam_pump.pyx +724 -0
- hydpy/cythons/autogen/c_dam_pump_sluice.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_dam_pump_sluice.pxd +452 -0
- hydpy/cythons/autogen/c_dam_pump_sluice.pyx +829 -0
- hydpy/cythons/autogen/c_dam_sluice.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_dam_sluice.pxd +404 -0
- hydpy/cythons/autogen/c_dam_sluice.pyx +726 -0
- hydpy/cythons/autogen/c_dam_v001.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_dam_v001.pxd +452 -0
- hydpy/cythons/autogen/c_dam_v001.pyx +816 -0
- hydpy/cythons/autogen/c_dam_v002.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_dam_v002.pxd +394 -0
- hydpy/cythons/autogen/c_dam_v002.pyx +703 -0
- hydpy/cythons/autogen/c_dam_v003.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_dam_v003.pxd +417 -0
- hydpy/cythons/autogen/c_dam_v003.pyx +744 -0
- hydpy/cythons/autogen/c_dam_v004.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_dam_v004.pxd +486 -0
- hydpy/cythons/autogen/c_dam_v004.pyx +891 -0
- hydpy/cythons/autogen/c_dam_v005.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_dam_v005.pxd +524 -0
- hydpy/cythons/autogen/c_dam_v005.pyx +928 -0
- hydpy/cythons/autogen/c_dummy.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_dummy.pxd +151 -0
- hydpy/cythons/autogen/c_dummy.pyx +263 -0
- hydpy/cythons/autogen/c_dummy_interceptedwater.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_dummy_interceptedwater.pxd +69 -0
- hydpy/cythons/autogen/c_dummy_interceptedwater.pyx +104 -0
- hydpy/cythons/autogen/c_dummy_node2node.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_dummy_node2node.pxd +89 -0
- hydpy/cythons/autogen/c_dummy_node2node.pyx +148 -0
- hydpy/cythons/autogen/c_dummy_snowalbedo.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_dummy_snowalbedo.pxd +69 -0
- hydpy/cythons/autogen/c_dummy_snowalbedo.pyx +104 -0
- hydpy/cythons/autogen/c_dummy_snowcover.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_dummy_snowcover.pxd +69 -0
- hydpy/cythons/autogen/c_dummy_snowcover.pyx +104 -0
- hydpy/cythons/autogen/c_dummy_snowycanopy.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_dummy_snowycanopy.pxd +69 -0
- hydpy/cythons/autogen/c_dummy_snowycanopy.pyx +104 -0
- hydpy/cythons/autogen/c_dummy_soilwater.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_dummy_soilwater.pxd +69 -0
- hydpy/cythons/autogen/c_dummy_soilwater.pyx +104 -0
- hydpy/cythons/autogen/c_evap.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_evap.pxd +1029 -0
- hydpy/cythons/autogen/c_evap.pyx +2601 -0
- hydpy/cythons/autogen/c_evap_aet_hbv96.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_evap_aet_hbv96.pxd +227 -0
- hydpy/cythons/autogen/c_evap_aet_hbv96.pyx +584 -0
- hydpy/cythons/autogen/c_evap_aet_minhas.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_evap_aet_minhas.pxd +193 -0
- hydpy/cythons/autogen/c_evap_aet_minhas.pyx +478 -0
- hydpy/cythons/autogen/c_evap_aet_morsim.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_evap_aet_morsim.pxd +681 -0
- hydpy/cythons/autogen/c_evap_aet_morsim.pyx +1642 -0
- hydpy/cythons/autogen/c_evap_pet_ambav1.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_evap_pet_ambav1.pxd +532 -0
- hydpy/cythons/autogen/c_evap_pet_ambav1.pyx +1296 -0
- hydpy/cythons/autogen/c_evap_pet_hbv96.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_evap_pet_hbv96.pxd +179 -0
- hydpy/cythons/autogen/c_evap_pet_hbv96.pyx +328 -0
- hydpy/cythons/autogen/c_evap_pet_m.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_evap_pet_m.pxd +124 -0
- hydpy/cythons/autogen/c_evap_pet_m.pyx +214 -0
- hydpy/cythons/autogen/c_evap_pet_mlc.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_evap_pet_mlc.pxd +126 -0
- hydpy/cythons/autogen/c_evap_pet_mlc.pyx +214 -0
- hydpy/cythons/autogen/c_evap_ret_fao56.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_evap_ret_fao56.pxd +305 -0
- hydpy/cythons/autogen/c_evap_ret_fao56.pyx +624 -0
- hydpy/cythons/autogen/c_evap_ret_io.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_evap_ret_io.pxd +112 -0
- hydpy/cythons/autogen/c_evap_ret_io.pyx +176 -0
- hydpy/cythons/autogen/c_evap_ret_tw2002.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_evap_ret_tw2002.pxd +139 -0
- hydpy/cythons/autogen/c_evap_ret_tw2002.pyx +273 -0
- hydpy/cythons/autogen/c_exch.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_exch.pxd +230 -0
- hydpy/cythons/autogen/c_exch.pyx +462 -0
- hydpy/cythons/autogen/c_exch_branch_hbv96.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_exch_branch_hbv96.pxd +134 -0
- hydpy/cythons/autogen/c_exch_branch_hbv96.pyx +255 -0
- hydpy/cythons/autogen/c_exch_waterlevel.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_exch_waterlevel.pxd +54 -0
- hydpy/cythons/autogen/c_exch_waterlevel.pyx +78 -0
- hydpy/cythons/autogen/c_exch_weir_hbv96.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_exch_weir_hbv96.pxd +156 -0
- hydpy/cythons/autogen/c_exch_weir_hbv96.pyx +282 -0
- hydpy/cythons/autogen/c_ga.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_ga.pxd +353 -0
- hydpy/cythons/autogen/c_ga.pyx +1204 -0
- hydpy/cythons/autogen/c_ga_garto.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_ga_garto.pxd +330 -0
- hydpy/cythons/autogen/c_ga_garto.pyx +1105 -0
- hydpy/cythons/autogen/c_ga_garto_submodel1.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_ga_garto_submodel1.pxd +236 -0
- hydpy/cythons/autogen/c_ga_garto_submodel1.pyx +944 -0
- hydpy/cythons/autogen/c_gland.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_gland.pxd +437 -0
- hydpy/cythons/autogen/c_gland.pyx +726 -0
- hydpy/cythons/autogen/c_gland_gr4.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_gland_gr4.pxd +382 -0
- hydpy/cythons/autogen/c_gland_gr4.pyx +605 -0
- hydpy/cythons/autogen/c_gland_gr5.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_gland_gr5.pxd +368 -0
- hydpy/cythons/autogen/c_gland_gr5.pyx +568 -0
- hydpy/cythons/autogen/c_gland_gr6.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_gland_gr6.pxd +420 -0
- hydpy/cythons/autogen/c_gland_gr6.pyx +673 -0
- hydpy/cythons/autogen/c_hland.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_hland.pxd +855 -0
- hydpy/cythons/autogen/c_hland.pyx +2486 -0
- hydpy/cythons/autogen/c_hland_96.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_hland_96.pxd +631 -0
- hydpy/cythons/autogen/c_hland_96.pyx +1724 -0
- hydpy/cythons/autogen/c_hland_96c.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_hland_96c.pxd +621 -0
- hydpy/cythons/autogen/c_hland_96c.pyx +1822 -0
- hydpy/cythons/autogen/c_hland_96p.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_hland_96p.pxd +683 -0
- hydpy/cythons/autogen/c_hland_96p.pyx +1911 -0
- hydpy/cythons/autogen/c_kinw.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_kinw.pxd +509 -0
- hydpy/cythons/autogen/c_kinw.pyx +965 -0
- hydpy/cythons/autogen/c_kinw_williams.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_kinw_williams.pxd +409 -0
- hydpy/cythons/autogen/c_kinw_williams.pyx +763 -0
- hydpy/cythons/autogen/c_kinw_williams_ext.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_kinw_williams_ext.pxd +220 -0
- hydpy/cythons/autogen/c_kinw_williams_ext.pyx +440 -0
- hydpy/cythons/autogen/c_lland.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_lland.pxd +1386 -0
- hydpy/cythons/autogen/c_lland.pyx +3679 -0
- hydpy/cythons/autogen/c_lland_dd.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_lland_dd.pxd +679 -0
- hydpy/cythons/autogen/c_lland_dd.pyx +1719 -0
- hydpy/cythons/autogen/c_lland_knauf.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_lland_knauf.pxd +1096 -0
- hydpy/cythons/autogen/c_lland_knauf.pyx +2784 -0
- hydpy/cythons/autogen/c_lland_knauf_ic.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_lland_knauf_ic.pxd +1369 -0
- hydpy/cythons/autogen/c_lland_knauf_ic.pyx +3625 -0
- hydpy/cythons/autogen/c_meteo.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_meteo.pxd +469 -0
- hydpy/cythons/autogen/c_meteo.pyx +879 -0
- hydpy/cythons/autogen/c_meteo_clear_glob_io.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_meteo_clear_glob_io.pxd +75 -0
- hydpy/cythons/autogen/c_meteo_clear_glob_io.pyx +107 -0
- hydpy/cythons/autogen/c_meteo_glob_fao56.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_meteo_glob_fao56.pxd +209 -0
- hydpy/cythons/autogen/c_meteo_glob_fao56.pyx +339 -0
- hydpy/cythons/autogen/c_meteo_glob_io.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_meteo_glob_io.pxd +63 -0
- hydpy/cythons/autogen/c_meteo_glob_io.pyx +91 -0
- hydpy/cythons/autogen/c_meteo_glob_morsim.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_meteo_glob_morsim.pxd +289 -0
- hydpy/cythons/autogen/c_meteo_glob_morsim.pyx +527 -0
- hydpy/cythons/autogen/c_meteo_precip_io.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_meteo_precip_io.pxd +112 -0
- hydpy/cythons/autogen/c_meteo_precip_io.pyx +176 -0
- hydpy/cythons/autogen/c_meteo_psun_sun_glob_io.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_meteo_psun_sun_glob_io.pxd +87 -0
- hydpy/cythons/autogen/c_meteo_psun_sun_glob_io.pyx +123 -0
- hydpy/cythons/autogen/c_meteo_sun_fao56.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_meteo_sun_fao56.pxd +209 -0
- hydpy/cythons/autogen/c_meteo_sun_fao56.pyx +343 -0
- hydpy/cythons/autogen/c_meteo_sun_morsim.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_meteo_sun_morsim.pxd +286 -0
- hydpy/cythons/autogen/c_meteo_sun_morsim.pyx +519 -0
- hydpy/cythons/autogen/c_meteo_temp_io.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_meteo_temp_io.pxd +112 -0
- hydpy/cythons/autogen/c_meteo_temp_io.pyx +176 -0
- hydpy/cythons/autogen/c_musk.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_musk.pxd +282 -0
- hydpy/cythons/autogen/c_musk.pyx +605 -0
- hydpy/cythons/autogen/c_musk_classic.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_musk_classic.pxd +138 -0
- hydpy/cythons/autogen/c_musk_classic.pyx +226 -0
- hydpy/cythons/autogen/c_musk_mct.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_musk_mct.pxd +282 -0
- hydpy/cythons/autogen/c_musk_mct.pyx +609 -0
- hydpy/cythons/autogen/c_rconc.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_rconc.pxd +119 -0
- hydpy/cythons/autogen/c_rconc.pyx +174 -0
- hydpy/cythons/autogen/c_rconc_nash.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_rconc_nash.pxd +111 -0
- hydpy/cythons/autogen/c_rconc_nash.pyx +185 -0
- hydpy/cythons/autogen/c_rconc_uh.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_rconc_uh.pxd +92 -0
- hydpy/cythons/autogen/c_rconc_uh.pyx +125 -0
- hydpy/cythons/autogen/c_sw1d.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_sw1d.pxd +511 -0
- hydpy/cythons/autogen/c_sw1d.pyx +1263 -0
- hydpy/cythons/autogen/c_sw1d_channel.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_sw1d_channel.pxd +119 -0
- hydpy/cythons/autogen/c_sw1d_channel.pyx +300 -0
- hydpy/cythons/autogen/c_sw1d_gate_out.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_sw1d_gate_out.pxd +240 -0
- hydpy/cythons/autogen/c_sw1d_gate_out.pyx +476 -0
- hydpy/cythons/autogen/c_sw1d_lias.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_sw1d_lias.pxd +320 -0
- hydpy/cythons/autogen/c_sw1d_lias.pyx +619 -0
- hydpy/cythons/autogen/c_sw1d_lias_sluice.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_sw1d_lias_sluice.pxd +325 -0
- hydpy/cythons/autogen/c_sw1d_lias_sluice.pyx +644 -0
- hydpy/cythons/autogen/c_sw1d_network.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_sw1d_network.pxd +90 -0
- hydpy/cythons/autogen/c_sw1d_network.pyx +246 -0
- hydpy/cythons/autogen/c_sw1d_pump.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_sw1d_pump.pxd +256 -0
- hydpy/cythons/autogen/c_sw1d_pump.pyx +502 -0
- hydpy/cythons/autogen/c_sw1d_q_in.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_sw1d_q_in.pxd +224 -0
- hydpy/cythons/autogen/c_sw1d_q_in.pyx +383 -0
- hydpy/cythons/autogen/c_sw1d_q_out.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_sw1d_q_out.pxd +224 -0
- hydpy/cythons/autogen/c_sw1d_q_out.pyx +383 -0
- hydpy/cythons/autogen/c_sw1d_storage.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_sw1d_storage.pxd +193 -0
- hydpy/cythons/autogen/c_sw1d_storage.pyx +349 -0
- hydpy/cythons/autogen/c_sw1d_weir_out.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_sw1d_weir_out.pxd +212 -0
- hydpy/cythons/autogen/c_sw1d_weir_out.pyx +404 -0
- hydpy/cythons/autogen/c_test.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_test.pxd +175 -0
- hydpy/cythons/autogen/c_test.pyx +348 -0
- hydpy/cythons/autogen/c_test_discontinous.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_test_discontinous.pxd +146 -0
- hydpy/cythons/autogen/c_test_discontinous.pyx +256 -0
- hydpy/cythons/autogen/c_test_stiff0d.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_test_stiff0d.pxd +146 -0
- hydpy/cythons/autogen/c_test_stiff0d.pyx +250 -0
- hydpy/cythons/autogen/c_test_stiff1d.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_test_stiff1d.pxd +145 -0
- hydpy/cythons/autogen/c_test_stiff1d.pyx +294 -0
- hydpy/cythons/autogen/c_whmod.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_whmod.pxd +482 -0
- hydpy/cythons/autogen/c_whmod.pyx +1156 -0
- hydpy/cythons/autogen/c_whmod_rural.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_whmod_rural.pxd +411 -0
- hydpy/cythons/autogen/c_whmod_rural.pyx +982 -0
- hydpy/cythons/autogen/c_whmod_urban.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_whmod_urban.pxd +482 -0
- hydpy/cythons/autogen/c_whmod_urban.pyx +1155 -0
- hydpy/cythons/autogen/c_wland.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_wland.pxd +842 -0
- hydpy/cythons/autogen/c_wland.pyx +1890 -0
- hydpy/cythons/autogen/c_wland_gd.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_wland_gd.pxd +829 -0
- hydpy/cythons/autogen/c_wland_gd.pyx +1847 -0
- hydpy/cythons/autogen/c_wland_wag.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_wland_wag.pxd +810 -0
- hydpy/cythons/autogen/c_wland_wag.pyx +1780 -0
- hydpy/cythons/autogen/c_wq.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_wq.pxd +275 -0
- hydpy/cythons/autogen/c_wq.pyx +652 -0
- hydpy/cythons/autogen/c_wq_trapeze.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_wq_trapeze.pxd +170 -0
- hydpy/cythons/autogen/c_wq_trapeze.pyx +400 -0
- hydpy/cythons/autogen/c_wq_trapeze_strickler.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_wq_trapeze_strickler.pxd +243 -0
- hydpy/cythons/autogen/c_wq_trapeze_strickler.pyx +578 -0
- hydpy/cythons/autogen/c_wq_walrus.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/c_wq_walrus.pxd +61 -0
- hydpy/cythons/autogen/c_wq_walrus.pyx +82 -0
- hydpy/cythons/autogen/configutils.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/configutils.pxd +17 -0
- hydpy/cythons/autogen/configutils.pyx +119 -0
- hydpy/cythons/autogen/interfaceutils.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/interfaceutils.pxd +31 -0
- hydpy/cythons/autogen/interfaceutils.pyx +82 -0
- hydpy/cythons/autogen/interputils.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/interputils.pxd +42 -0
- hydpy/cythons/autogen/interputils.pyx +118 -0
- hydpy/cythons/autogen/masterinterface.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/masterinterface.pxd +153 -0
- hydpy/cythons/autogen/masterinterface.pyx +222 -0
- hydpy/cythons/autogen/pointerutils.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/pointerutils.pxd +31 -0
- hydpy/cythons/autogen/pointerutils.pyx +650 -0
- hydpy/cythons/autogen/ppolyutils.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/ppolyutils.pxd +35 -0
- hydpy/cythons/autogen/ppolyutils.pyx +59 -0
- hydpy/cythons/autogen/quadutils.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/quadutils.pxd +26 -0
- hydpy/cythons/autogen/quadutils.pyx +973 -0
- hydpy/cythons/autogen/rootutils.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/rootutils.pxd +28 -0
- hydpy/cythons/autogen/rootutils.pyx +109 -0
- hydpy/cythons/autogen/sequenceutils.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/sequenceutils.pxd +45 -0
- hydpy/cythons/autogen/sequenceutils.pyx +101 -0
- hydpy/cythons/autogen/smoothutils.cp313-win_amd64.pyd +0 -0
- hydpy/cythons/autogen/smoothutils.pxd +29 -0
- hydpy/cythons/autogen/smoothutils.pyx +833 -0
- hydpy/cythons/configutils.pxd +8 -0
- hydpy/cythons/configutils.pyi +5 -0
- hydpy/cythons/configutils.pyx +110 -0
- hydpy/cythons/interfaceutils.pxd +22 -0
- hydpy/cythons/interfaceutils.pyi +15 -0
- hydpy/cythons/interfaceutils.pyx +73 -0
- hydpy/cythons/interputils.pxd +33 -0
- hydpy/cythons/interputils.pyi +32 -0
- hydpy/cythons/interputils.pyx +109 -0
- hydpy/cythons/modelutils.py +2990 -0
- hydpy/cythons/pointerutils.pxd +22 -0
- hydpy/cythons/pointerutils.pyi +89 -0
- hydpy/cythons/pointerutils.pyx +641 -0
- hydpy/cythons/ppolyutils.pxd +26 -0
- hydpy/cythons/ppolyutils.pyi +21 -0
- hydpy/cythons/ppolyutils.pyx +50 -0
- hydpy/cythons/quadutils.pxd +17 -0
- hydpy/cythons/quadutils.pyi +13 -0
- hydpy/cythons/quadutils.pyx +964 -0
- hydpy/cythons/rootutils.pxd +19 -0
- hydpy/cythons/rootutils.pyi +21 -0
- hydpy/cythons/rootutils.pyx +100 -0
- hydpy/cythons/sequenceutils.pxd +36 -0
- hydpy/cythons/sequenceutils.pyi +7 -0
- hydpy/cythons/sequenceutils.pyx +92 -0
- hydpy/cythons/smoothutils.pxd +20 -0
- hydpy/cythons/smoothutils.pyi +15 -0
- hydpy/cythons/smoothutils.pyx +824 -0
- hydpy/data/HydPy-H-Lahn/conditions/init_1996_01_01_00_00_00/land_dill_assl.py +13 -0
- hydpy/data/HydPy-H-Lahn/conditions/init_1996_01_01_00_00_00/land_lahn_kalk.py +13 -0
- hydpy/data/HydPy-H-Lahn/conditions/init_1996_01_01_00_00_00/land_lahn_leun.py +14 -0
- hydpy/data/HydPy-H-Lahn/conditions/init_1996_01_01_00_00_00/land_lahn_marb.py +13 -0
- hydpy/data/HydPy-H-Lahn/conditions/init_1996_01_01_00_00_00/stream_dill_assl_lahn_leun.py +5 -0
- hydpy/data/HydPy-H-Lahn/conditions/init_1996_01_01_00_00_00/stream_lahn_leun_lahn_kalk.py +5 -0
- hydpy/data/HydPy-H-Lahn/conditions/init_1996_01_01_00_00_00/stream_lahn_marb_lahn_leun.py +5 -0
- hydpy/data/HydPy-H-Lahn/control/default/land.py +9 -0
- hydpy/data/HydPy-H-Lahn/control/default/land_dill_assl.py +57 -0
- hydpy/data/HydPy-H-Lahn/control/default/land_lahn_kalk.py +57 -0
- hydpy/data/HydPy-H-Lahn/control/default/land_lahn_leun.py +56 -0
- hydpy/data/HydPy-H-Lahn/control/default/land_lahn_marb.py +57 -0
- hydpy/data/HydPy-H-Lahn/control/default/stream_dill_assl_lahn_leun.py +7 -0
- hydpy/data/HydPy-H-Lahn/control/default/stream_lahn_leun_lahn_kalk.py +7 -0
- hydpy/data/HydPy-H-Lahn/control/default/stream_lahn_marb_lahn_leun.py +7 -0
- hydpy/data/HydPy-H-Lahn/multiple_runs.xml +309 -0
- hydpy/data/HydPy-H-Lahn/multiple_runs_alpha.xml +71 -0
- hydpy/data/HydPy-H-Lahn/network/default/headwaters.py +11 -0
- hydpy/data/HydPy-H-Lahn/network/default/nonheadwaters.py +11 -0
- hydpy/data/HydPy-H-Lahn/network/default/streams.py +8 -0
- hydpy/data/HydPy-H-Lahn/series/default/dill_assl_obs_q.asc +11387 -0
- hydpy/data/HydPy-H-Lahn/series/default/evap_pet_hbv96_input_normalairtemperature.nc +0 -0
- hydpy/data/HydPy-H-Lahn/series/default/evap_pet_hbv96_input_normalevapotranspiration.nc +0 -0
- hydpy/data/HydPy-H-Lahn/series/default/hland_96_input_p.nc +0 -0
- hydpy/data/HydPy-H-Lahn/series/default/hland_96_input_t.nc +0 -0
- hydpy/data/HydPy-H-Lahn/series/default/lahn_kalk_obs_q.asc +11387 -0
- hydpy/data/HydPy-H-Lahn/series/default/lahn_leun_obs_q.asc +11387 -0
- hydpy/data/HydPy-H-Lahn/series/default/lahn_marb_obs_q.asc +11387 -0
- hydpy/data/HydPy-H-Lahn/series/default/land_dill_assl_evap_pet_hbv96_input_normalairtemperature.asc +11387 -0
- hydpy/data/HydPy-H-Lahn/series/default/land_dill_assl_evap_pet_hbv96_input_normalevapotranspiration.asc +11387 -0
- hydpy/data/HydPy-H-Lahn/series/default/land_dill_assl_hland_96_input_p.asc +11387 -0
- hydpy/data/HydPy-H-Lahn/series/default/land_dill_assl_hland_96_input_t.asc +11387 -0
- hydpy/data/HydPy-H-Lahn/series/default/land_lahn_kalk_evap_pet_hbv96_input_normalairtemperature.asc +11387 -0
- hydpy/data/HydPy-H-Lahn/series/default/land_lahn_kalk_evap_pet_hbv96_input_normalevapotranspiration.asc +11387 -0
- hydpy/data/HydPy-H-Lahn/series/default/land_lahn_kalk_hland_96_input_p.asc +11387 -0
- hydpy/data/HydPy-H-Lahn/series/default/land_lahn_kalk_hland_96_input_t.asc +11387 -0
- hydpy/data/HydPy-H-Lahn/series/default/land_lahn_leun_evap_pet_hbv96_input_normalairtemperature.asc +11387 -0
- hydpy/data/HydPy-H-Lahn/series/default/land_lahn_leun_evap_pet_hbv96_input_normalevapotranspiration.asc +11387 -0
- hydpy/data/HydPy-H-Lahn/series/default/land_lahn_leun_hland_96_input_p.asc +11387 -0
- hydpy/data/HydPy-H-Lahn/series/default/land_lahn_leun_hland_96_input_t.asc +11387 -0
- hydpy/data/HydPy-H-Lahn/series/default/land_lahn_marb_evap_pet_hbv96_input_normalairtemperature.asc +11387 -0
- hydpy/data/HydPy-H-Lahn/series/default/land_lahn_marb_evap_pet_hbv96_input_normalevapotranspiration.asc +11387 -0
- hydpy/data/HydPy-H-Lahn/series/default/land_lahn_marb_hland_96_input_p.asc +11387 -0
- hydpy/data/HydPy-H-Lahn/series/default/land_lahn_marb_hland_96_input_t.asc +11387 -0
- hydpy/data/HydPy-H-Lahn/series/default/obs_q.nc +0 -0
- hydpy/data/HydPy-H-Lahn/single_run.xml +152 -0
- hydpy/data/HydPy-H-Lahn/single_run.xmlt +143 -0
- hydpy/data/__init__.py +17 -0
- hydpy/docs/__init__.py +0 -0
- hydpy/docs/autofigs/__init__.py +0 -0
- hydpy/docs/bib/__init__.py +0 -0
- hydpy/docs/bib/refs.bib +566 -0
- hydpy/docs/combine_docversions.py +133 -0
- hydpy/docs/draw_model_sketches.py +1301 -0
- hydpy/docs/enable_autodoc.py +7 -0
- hydpy/docs/figs/HydPy-G-GR4.png +0 -0
- hydpy/docs/figs/HydPy-G-GR5.png +0 -0
- hydpy/docs/figs/HydPy-G-GR6.png +0 -0
- hydpy/docs/figs/HydPy-H-HBV96-COSERO.png +0 -0
- hydpy/docs/figs/HydPy-H-HBV96-PREVAH.png +0 -0
- hydpy/docs/figs/HydPy-H-HBV96.png +0 -0
- hydpy/docs/figs/HydPy-H-Lahn.png +0 -0
- hydpy/docs/figs/HydPy-KinW-Williams.png +0 -0
- hydpy/docs/figs/HydPy-L-DD.png +0 -0
- hydpy/docs/figs/HydPy-W-Wag.png +0 -0
- hydpy/docs/figs/HydPy_Logo.png +0 -0
- hydpy/docs/figs/HydPy_Logo_Text.png +0 -0
- hydpy/docs/figs/IDLE-editor.png +0 -0
- hydpy/docs/figs/IDLE-shell.png +0 -0
- hydpy/docs/figs/LAWA_river-basin-bumbers.png +0 -0
- hydpy/docs/figs/__init__.py +0 -0
- hydpy/docs/html_/__init__.py +0 -0
- hydpy/docs/polish_html.py +57 -0
- hydpy/docs/prepare.py +224 -0
- hydpy/docs/publish_docs.py +53 -0
- hydpy/docs/rst/HydPy-ARMA.rst +27 -0
- hydpy/docs/rst/HydPy-Conv.rst +22 -0
- hydpy/docs/rst/HydPy-Dam.rst +79 -0
- hydpy/docs/rst/HydPy-Dummy.rst +21 -0
- hydpy/docs/rst/HydPy-Evap.rst +26 -0
- hydpy/docs/rst/HydPy-Exch.rst +36 -0
- hydpy/docs/rst/HydPy-G.rst +40 -0
- hydpy/docs/rst/HydPy-GA.rst +34 -0
- hydpy/docs/rst/HydPy-H.rst +24 -0
- hydpy/docs/rst/HydPy-KinW.rst +32 -0
- hydpy/docs/rst/HydPy-L.rst +42 -0
- hydpy/docs/rst/HydPy-Meteo.rst +28 -0
- hydpy/docs/rst/HydPy-Musk.rst +21 -0
- hydpy/docs/rst/HydPy-Rconc.rst +17 -0
- hydpy/docs/rst/HydPy-SW1D.rst +49 -0
- hydpy/docs/rst/HydPy-Test.rst +19 -0
- hydpy/docs/rst/HydPy-W.rst +20 -0
- hydpy/docs/rst/HydPy-WHMod.rst +19 -0
- hydpy/docs/rst/HydPy-WQ.rst +20 -0
- hydpy/docs/rst/__init__.py +0 -0
- hydpy/docs/rst/additional_repositories.rst +40 -0
- hydpy/docs/rst/auxiliaries.rst +31 -0
- hydpy/docs/rst/continuous_integration.rst +75 -0
- hydpy/docs/rst/core.rst +75 -0
- hydpy/docs/rst/cythons.rst +47 -0
- hydpy/docs/rst/definitions.rst +506 -0
- hydpy/docs/rst/developer_guide.rst +54 -0
- hydpy/docs/rst/example_projects.rst +40 -0
- hydpy/docs/rst/execution.rst +22 -0
- hydpy/docs/rst/framework_tools.rst +56 -0
- hydpy/docs/rst/how_to_read_the_reference_manual.rst +156 -0
- hydpy/docs/rst/hydpydependencies.rst +55 -0
- hydpy/docs/rst/index.rst +125 -0
- hydpy/docs/rst/installation.rst +155 -0
- hydpy/docs/rst/model_families.rst +79 -0
- hydpy/docs/rst/model_overview.rst +291 -0
- hydpy/docs/rst/modelimports.rst +10 -0
- hydpy/docs/rst/options.rst +119 -0
- hydpy/docs/rst/programming_style.rst +572 -0
- hydpy/docs/rst/project_structure.rst +520 -0
- hydpy/docs/rst/quickstart.rst +304 -0
- hydpy/docs/rst/reference_manual.rst +29 -0
- hydpy/docs/rst/required_tools.rst +50 -0
- hydpy/docs/rst/simulation.rst +514 -0
- hydpy/docs/rst/submodel_interfaces.rst +32 -0
- hydpy/docs/rst/tests_and_documentation.rst +85 -0
- hydpy/docs/rst/user_guide.rst +38 -0
- hydpy/docs/rst/version_control.rst +116 -0
- hydpy/docs/rst/zbibliography.rst +8 -0
- hydpy/docs/sphinx/__init__.py +0 -0
- hydpy/docs/sphinx/_themes/basic_hydpy/changes/frameset.html +11 -0
- hydpy/docs/sphinx/_themes/basic_hydpy/changes/rstsource.html +15 -0
- hydpy/docs/sphinx/_themes/basic_hydpy/changes/versionchanges.html +33 -0
- hydpy/docs/sphinx/_themes/basic_hydpy/defindex.html +35 -0
- hydpy/docs/sphinx/_themes/basic_hydpy/domainindex.html +56 -0
- hydpy/docs/sphinx/_themes/basic_hydpy/genindex-single.html +63 -0
- hydpy/docs/sphinx/_themes/basic_hydpy/genindex-split.html +41 -0
- hydpy/docs/sphinx/_themes/basic_hydpy/genindex.html +76 -0
- hydpy/docs/sphinx/_themes/basic_hydpy/globaltoc.html +11 -0
- hydpy/docs/sphinx/_themes/basic_hydpy/layout.html +221 -0
- hydpy/docs/sphinx/_themes/basic_hydpy/localtoc.html +15 -0
- hydpy/docs/sphinx/_themes/basic_hydpy/opensearch.xml +13 -0
- hydpy/docs/sphinx/_themes/basic_hydpy/page.html +13 -0
- hydpy/docs/sphinx/_themes/basic_hydpy/relations.html +23 -0
- hydpy/docs/sphinx/_themes/basic_hydpy/search.html +65 -0
- hydpy/docs/sphinx/_themes/basic_hydpy/searchbox.html +21 -0
- hydpy/docs/sphinx/_themes/basic_hydpy/searchfield.html +23 -0
- hydpy/docs/sphinx/_themes/basic_hydpy/sourcelink.html +18 -0
- hydpy/docs/sphinx/_themes/basic_hydpy/static/basic.css_t +925 -0
- hydpy/docs/sphinx/_themes/basic_hydpy/static/doctools.js +156 -0
- hydpy/docs/sphinx/_themes/basic_hydpy/static/documentation_options.js_t +13 -0
- hydpy/docs/sphinx/_themes/basic_hydpy/static/file.png +0 -0
- hydpy/docs/sphinx/_themes/basic_hydpy/static/language_data.js_t +26 -0
- hydpy/docs/sphinx/_themes/basic_hydpy/static/minus.png +0 -0
- hydpy/docs/sphinx/_themes/basic_hydpy/static/plus.png +0 -0
- hydpy/docs/sphinx/_themes/basic_hydpy/static/searchtools.js +574 -0
- hydpy/docs/sphinx/_themes/basic_hydpy/static/sphinx_highlight.js +154 -0
- hydpy/docs/sphinx/_themes/basic_hydpy/theme.conf +16 -0
- hydpy/docs/sphinx/_themes/classic_hydpy/layout.html +23 -0
- hydpy/docs/sphinx/_themes/classic_hydpy/static/classic.css_t +358 -0
- hydpy/docs/sphinx/_themes/classic_hydpy/static/sidebar.js_t +72 -0
- hydpy/docs/sphinx/_themes/classic_hydpy/theme.conf +32 -0
- hydpy/docs/sphinx/conf.py +398 -0
- hydpy/docs/sphinx/defaultlinks_extension.py +36 -0
- hydpy/docs/sphinx/integrationtest_extension.py +104 -0
- hydpy/docs/sphinx/projectstructure_extension.py +58 -0
- hydpy/docs/sphinx/submodelgraph_extension.py +53 -0
- hydpy/exe/__init__.py +0 -0
- hydpy/exe/commandtools.py +651 -0
- hydpy/exe/hyd.py +277 -0
- hydpy/exe/modelimports.py +41 -0
- hydpy/exe/replacetools.py +216 -0
- hydpy/exe/servertools.py +2348 -0
- hydpy/exe/xmltools.py +3280 -0
- hydpy/interfaces/__init__.py +0 -0
- hydpy/interfaces/aetinterfaces.py +94 -0
- hydpy/interfaces/dischargeinterfaces.py +45 -0
- hydpy/interfaces/petinterfaces.py +117 -0
- hydpy/interfaces/precipinterfaces.py +42 -0
- hydpy/interfaces/radiationinterfaces.py +79 -0
- hydpy/interfaces/rconcinterfaces.py +30 -0
- hydpy/interfaces/routinginterfaces.py +324 -0
- hydpy/interfaces/soilinterfaces.py +96 -0
- hydpy/interfaces/stateinterfaces.py +98 -0
- hydpy/interfaces/tempinterfaces.py +46 -0
- hydpy/models/__init__.py +0 -0
- hydpy/models/arma/__init__.py +14 -0
- hydpy/models/arma/arma_control.py +383 -0
- hydpy/models/arma/arma_derived.py +204 -0
- hydpy/models/arma/arma_fluxes.py +41 -0
- hydpy/models/arma/arma_inlets.py +11 -0
- hydpy/models/arma/arma_logs.py +19 -0
- hydpy/models/arma/arma_model.py +461 -0
- hydpy/models/arma/arma_outlets.py +11 -0
- hydpy/models/arma_rimorido.py +381 -0
- hydpy/models/conv/__init__.py +12 -0
- hydpy/models/conv/conv_control.py +303 -0
- hydpy/models/conv/conv_derived.py +271 -0
- hydpy/models/conv/conv_fluxes.py +54 -0
- hydpy/models/conv/conv_inlets.py +11 -0
- hydpy/models/conv/conv_model.py +687 -0
- hydpy/models/conv/conv_outlets.py +11 -0
- hydpy/models/conv_idw.py +120 -0
- hydpy/models/conv_idw_ed.py +184 -0
- hydpy/models/conv_nn.py +112 -0
- hydpy/models/dam/__init__.py +16 -0
- hydpy/models/dam/dam_aides.py +17 -0
- hydpy/models/dam/dam_control.py +346 -0
- hydpy/models/dam/dam_derived.py +559 -0
- hydpy/models/dam/dam_factors.py +46 -0
- hydpy/models/dam/dam_fluxes.py +179 -0
- hydpy/models/dam/dam_inlets.py +29 -0
- hydpy/models/dam/dam_logs.py +52 -0
- hydpy/models/dam/dam_model.py +5011 -0
- hydpy/models/dam/dam_outlets.py +23 -0
- hydpy/models/dam/dam_receivers.py +41 -0
- hydpy/models/dam/dam_senders.py +23 -0
- hydpy/models/dam/dam_solver.py +75 -0
- hydpy/models/dam/dam_states.py +11 -0
- hydpy/models/dam_llake.py +499 -0
- hydpy/models/dam_lreservoir.py +548 -0
- hydpy/models/dam_lretention.py +343 -0
- hydpy/models/dam_pump.py +278 -0
- hydpy/models/dam_pump_sluice.py +339 -0
- hydpy/models/dam_sluice.py +319 -0
- hydpy/models/dam_v001.py +1127 -0
- hydpy/models/dam_v002.py +381 -0
- hydpy/models/dam_v003.py +422 -0
- hydpy/models/dam_v004.py +665 -0
- hydpy/models/dam_v005.py +479 -0
- hydpy/models/dummy/__init__.py +15 -0
- hydpy/models/dummy/dummy_control.py +22 -0
- hydpy/models/dummy/dummy_fluxes.py +11 -0
- hydpy/models/dummy/dummy_inlets.py +11 -0
- hydpy/models/dummy/dummy_inputs.py +41 -0
- hydpy/models/dummy/dummy_model.py +196 -0
- hydpy/models/dummy/dummy_outlets.py +11 -0
- hydpy/models/dummy_interceptedwater.py +85 -0
- hydpy/models/dummy_node2node.py +83 -0
- hydpy/models/dummy_snowalbedo.py +84 -0
- hydpy/models/dummy_snowcover.py +84 -0
- hydpy/models/dummy_snowycanopy.py +86 -0
- hydpy/models/dummy_soilwater.py +85 -0
- hydpy/models/evap/__init__.py +13 -0
- hydpy/models/evap/evap_control.py +354 -0
- hydpy/models/evap/evap_derived.py +236 -0
- hydpy/models/evap/evap_factors.py +188 -0
- hydpy/models/evap/evap_fixed.py +68 -0
- hydpy/models/evap/evap_fluxes.py +150 -0
- hydpy/models/evap/evap_inputs.py +54 -0
- hydpy/models/evap/evap_logs.py +91 -0
- hydpy/models/evap/evap_masks.py +48 -0
- hydpy/models/evap/evap_model.py +9170 -0
- hydpy/models/evap/evap_parameters.py +149 -0
- hydpy/models/evap/evap_sequences.py +32 -0
- hydpy/models/evap/evap_states.py +18 -0
- hydpy/models/evap_aet_hbv96.py +372 -0
- hydpy/models/evap_aet_minhas.py +331 -0
- hydpy/models/evap_aet_morsim.py +627 -0
- hydpy/models/evap_pet_ambav1.py +483 -0
- hydpy/models/evap_pet_hbv96.py +147 -0
- hydpy/models/evap_pet_m.py +94 -0
- hydpy/models/evap_pet_mlc.py +107 -0
- hydpy/models/evap_ret_fao56.py +265 -0
- hydpy/models/evap_ret_io.py +74 -0
- hydpy/models/evap_ret_tw2002.py +165 -0
- hydpy/models/exch/__init__.py +14 -0
- hydpy/models/exch/exch_control.py +262 -0
- hydpy/models/exch/exch_derived.py +36 -0
- hydpy/models/exch/exch_factors.py +26 -0
- hydpy/models/exch/exch_fluxes.py +48 -0
- hydpy/models/exch/exch_inlets.py +11 -0
- hydpy/models/exch/exch_logs.py +12 -0
- hydpy/models/exch/exch_model.py +451 -0
- hydpy/models/exch/exch_outlets.py +17 -0
- hydpy/models/exch/exch_receivers.py +17 -0
- hydpy/models/exch_branch_hbv96.py +186 -0
- hydpy/models/exch_waterlevel.py +73 -0
- hydpy/models/exch_weir_hbv96.py +609 -0
- hydpy/models/ga/__init__.py +14 -0
- hydpy/models/ga/ga_aides.py +17 -0
- hydpy/models/ga/ga_control.py +208 -0
- hydpy/models/ga/ga_derived.py +77 -0
- hydpy/models/ga/ga_fluxes.py +83 -0
- hydpy/models/ga/ga_inputs.py +26 -0
- hydpy/models/ga/ga_logs.py +17 -0
- hydpy/models/ga/ga_model.py +2952 -0
- hydpy/models/ga/ga_states.py +87 -0
- hydpy/models/ga_garto.py +1001 -0
- hydpy/models/ga_garto_submodel1.py +79 -0
- hydpy/models/gland/__init__.py +14 -0
- hydpy/models/gland/gland_control.py +90 -0
- hydpy/models/gland/gland_derived.py +113 -0
- hydpy/models/gland/gland_fluxes.py +137 -0
- hydpy/models/gland/gland_inputs.py +12 -0
- hydpy/models/gland/gland_model.py +1439 -0
- hydpy/models/gland/gland_outlets.py +11 -0
- hydpy/models/gland/gland_states.py +90 -0
- hydpy/models/gland_gr4.py +501 -0
- hydpy/models/gland_gr5.py +463 -0
- hydpy/models/gland_gr6.py +487 -0
- hydpy/models/hland/__init__.py +20 -0
- hydpy/models/hland/hland_aides.py +19 -0
- hydpy/models/hland/hland_constants.py +37 -0
- hydpy/models/hland/hland_control.py +1530 -0
- hydpy/models/hland/hland_derived.py +683 -0
- hydpy/models/hland/hland_factors.py +57 -0
- hydpy/models/hland/hland_fixed.py +42 -0
- hydpy/models/hland/hland_fluxes.py +279 -0
- hydpy/models/hland/hland_inputs.py +19 -0
- hydpy/models/hland/hland_masks.py +107 -0
- hydpy/models/hland/hland_model.py +4664 -0
- hydpy/models/hland/hland_outlets.py +11 -0
- hydpy/models/hland/hland_parameters.py +227 -0
- hydpy/models/hland/hland_sequences.py +382 -0
- hydpy/models/hland/hland_states.py +236 -0
- hydpy/models/hland_96.py +1812 -0
- hydpy/models/hland_96c.py +1196 -0
- hydpy/models/hland_96p.py +1204 -0
- hydpy/models/kinw/__init__.py +18 -0
- hydpy/models/kinw/kinw_aides.py +306 -0
- hydpy/models/kinw/kinw_control.py +270 -0
- hydpy/models/kinw/kinw_derived.py +197 -0
- hydpy/models/kinw/kinw_fixed.py +33 -0
- hydpy/models/kinw/kinw_fluxes.py +37 -0
- hydpy/models/kinw/kinw_inlets.py +11 -0
- hydpy/models/kinw/kinw_model.py +3026 -0
- hydpy/models/kinw/kinw_outlets.py +11 -0
- hydpy/models/kinw/kinw_solver.py +45 -0
- hydpy/models/kinw/kinw_states.py +17 -0
- hydpy/models/kinw_williams.py +1299 -0
- hydpy/models/kinw_williams_ext.py +768 -0
- hydpy/models/lland/__init__.py +42 -0
- hydpy/models/lland/lland_aides.py +38 -0
- hydpy/models/lland/lland_constants.py +88 -0
- hydpy/models/lland/lland_control.py +1329 -0
- hydpy/models/lland/lland_derived.py +380 -0
- hydpy/models/lland/lland_factors.py +18 -0
- hydpy/models/lland/lland_fixed.py +128 -0
- hydpy/models/lland/lland_fluxes.py +626 -0
- hydpy/models/lland/lland_inlets.py +12 -0
- hydpy/models/lland/lland_inputs.py +33 -0
- hydpy/models/lland/lland_logs.py +17 -0
- hydpy/models/lland/lland_masks.py +212 -0
- hydpy/models/lland/lland_model.py +7690 -0
- hydpy/models/lland/lland_outlets.py +12 -0
- hydpy/models/lland/lland_parameters.py +195 -0
- hydpy/models/lland/lland_sequences.py +67 -0
- hydpy/models/lland/lland_states.py +280 -0
- hydpy/models/lland_dd.py +2270 -0
- hydpy/models/lland_knauf.py +2156 -0
- hydpy/models/lland_knauf_ic.py +1920 -0
- hydpy/models/meteo/__init__.py +12 -0
- hydpy/models/meteo/meteo_control.py +154 -0
- hydpy/models/meteo/meteo_derived.py +159 -0
- hydpy/models/meteo/meteo_factors.py +88 -0
- hydpy/models/meteo/meteo_fixed.py +19 -0
- hydpy/models/meteo/meteo_fluxes.py +46 -0
- hydpy/models/meteo/meteo_inputs.py +47 -0
- hydpy/models/meteo/meteo_logs.py +30 -0
- hydpy/models/meteo/meteo_model.py +2904 -0
- hydpy/models/meteo/meteo_parameters.py +14 -0
- hydpy/models/meteo/meteo_sequences.py +22 -0
- hydpy/models/meteo_clear_glob_io.py +77 -0
- hydpy/models/meteo_glob_fao56.py +217 -0
- hydpy/models/meteo_glob_io.py +68 -0
- hydpy/models/meteo_glob_morsim.py +444 -0
- hydpy/models/meteo_precip_io.py +76 -0
- hydpy/models/meteo_psun_sun_glob_io.py +83 -0
- hydpy/models/meteo_sun_fao56.py +188 -0
- hydpy/models/meteo_sun_morsim.py +466 -0
- hydpy/models/meteo_temp_io.py +76 -0
- hydpy/models/musk/__init__.py +15 -0
- hydpy/models/musk/musk_control.py +328 -0
- hydpy/models/musk/musk_derived.py +32 -0
- hydpy/models/musk/musk_factors.py +53 -0
- hydpy/models/musk/musk_fluxes.py +24 -0
- hydpy/models/musk/musk_inlets.py +11 -0
- hydpy/models/musk/musk_masks.py +15 -0
- hydpy/models/musk/musk_model.py +838 -0
- hydpy/models/musk/musk_outlets.py +11 -0
- hydpy/models/musk/musk_sequences.py +88 -0
- hydpy/models/musk/musk_solver.py +68 -0
- hydpy/models/musk/musk_states.py +64 -0
- hydpy/models/musk_classic.py +228 -0
- hydpy/models/musk_mct.py +1247 -0
- hydpy/models/rconc/__init__.py +12 -0
- hydpy/models/rconc/rconc_control.py +473 -0
- hydpy/models/rconc/rconc_derived.py +76 -0
- hydpy/models/rconc/rconc_fluxes.py +19 -0
- hydpy/models/rconc/rconc_logs.py +74 -0
- hydpy/models/rconc/rconc_model.py +260 -0
- hydpy/models/rconc/rconc_states.py +11 -0
- hydpy/models/rconc_nash.py +48 -0
- hydpy/models/rconc_uh.py +53 -0
- hydpy/models/sw1d/__init__.py +17 -0
- hydpy/models/sw1d/sw1d_control.py +356 -0
- hydpy/models/sw1d/sw1d_derived.py +85 -0
- hydpy/models/sw1d/sw1d_factors.py +78 -0
- hydpy/models/sw1d/sw1d_fixed.py +12 -0
- hydpy/models/sw1d/sw1d_fluxes.py +55 -0
- hydpy/models/sw1d/sw1d_inlets.py +17 -0
- hydpy/models/sw1d/sw1d_model.py +3385 -0
- hydpy/models/sw1d/sw1d_outlets.py +11 -0
- hydpy/models/sw1d/sw1d_receivers.py +11 -0
- hydpy/models/sw1d/sw1d_senders.py +11 -0
- hydpy/models/sw1d/sw1d_states.py +23 -0
- hydpy/models/sw1d_channel.py +2051 -0
- hydpy/models/sw1d_gate_out.py +599 -0
- hydpy/models/sw1d_lias.py +105 -0
- hydpy/models/sw1d_lias_sluice.py +531 -0
- hydpy/models/sw1d_network.py +1219 -0
- hydpy/models/sw1d_pump.py +448 -0
- hydpy/models/sw1d_q_in.py +79 -0
- hydpy/models/sw1d_q_out.py +81 -0
- hydpy/models/sw1d_storage.py +78 -0
- hydpy/models/sw1d_weir_out.py +75 -0
- hydpy/models/test/__init__.py +14 -0
- hydpy/models/test/test_control.py +28 -0
- hydpy/models/test/test_fluxes.py +17 -0
- hydpy/models/test/test_model.py +201 -0
- hydpy/models/test/test_solver.py +48 -0
- hydpy/models/test/test_states.py +17 -0
- hydpy/models/test_discontinous.py +46 -0
- hydpy/models/test_stiff0d.py +47 -0
- hydpy/models/test_stiff1d.py +42 -0
- hydpy/models/whmod/__init__.py +21 -0
- hydpy/models/whmod/whmod_constants.py +77 -0
- hydpy/models/whmod/whmod_control.py +333 -0
- hydpy/models/whmod/whmod_derived.py +210 -0
- hydpy/models/whmod/whmod_factors.py +9 -0
- hydpy/models/whmod/whmod_fluxes.py +105 -0
- hydpy/models/whmod/whmod_inputs.py +15 -0
- hydpy/models/whmod/whmod_masks.py +178 -0
- hydpy/models/whmod/whmod_model.py +2091 -0
- hydpy/models/whmod/whmod_parameters.py +155 -0
- hydpy/models/whmod/whmod_sequences.py +193 -0
- hydpy/models/whmod/whmod_states.py +73 -0
- hydpy/models/whmod_rural.py +794 -0
- hydpy/models/whmod_urban.py +1011 -0
- hydpy/models/wland/__init__.py +43 -0
- hydpy/models/wland/wland_aides.py +55 -0
- hydpy/models/wland/wland_constants.py +103 -0
- hydpy/models/wland/wland_control.py +508 -0
- hydpy/models/wland/wland_derived.py +330 -0
- hydpy/models/wland/wland_factors.py +11 -0
- hydpy/models/wland/wland_fixed.py +12 -0
- hydpy/models/wland/wland_fluxes.py +166 -0
- hydpy/models/wland/wland_inputs.py +33 -0
- hydpy/models/wland/wland_masks.py +54 -0
- hydpy/models/wland/wland_model.py +3755 -0
- hydpy/models/wland/wland_outlets.py +11 -0
- hydpy/models/wland/wland_parameters.py +214 -0
- hydpy/models/wland/wland_sequences.py +108 -0
- hydpy/models/wland/wland_solver.py +45 -0
- hydpy/models/wland/wland_states.py +56 -0
- hydpy/models/wland_gd.py +888 -0
- hydpy/models/wland_wag.py +1244 -0
- hydpy/models/wq/__init__.py +14 -0
- hydpy/models/wq/wq_control.py +117 -0
- hydpy/models/wq/wq_derived.py +182 -0
- hydpy/models/wq/wq_factors.py +79 -0
- hydpy/models/wq/wq_fluxes.py +17 -0
- hydpy/models/wq/wq_model.py +1889 -0
- hydpy/models/wq_trapeze.py +168 -0
- hydpy/models/wq_trapeze_strickler.py +101 -0
- hydpy/models/wq_walrus.py +57 -0
- hydpy/py.typed +0 -0
- hydpy/tests/.coveragerc +22 -0
- hydpy/tests/__init__.py +0 -0
- hydpy/tests/check_consistency.py +32 -0
- hydpy/tests/hydpydoctestcustomize.pth +1 -0
- hydpy/tests/hydpydoctestcustomize.py +15 -0
- hydpy/tests/iotesting/__init__.py +0 -0
- hydpy/tests/run_doctests.py +233 -0
- hydpy-6.2.dev1.data/scripts/hyd.py +277 -0
- hydpy-6.2.dev1.dist-info/LICENSE +165 -0
- hydpy-6.2.dev1.dist-info/METADATA +163 -0
- hydpy-6.2.dev1.dist-info/RECORD +890 -0
- hydpy-6.2.dev1.dist-info/WHEEL +5 -0
- hydpy-6.2.dev1.dist-info/licenses_hydpy_installer.txt +42 -0
- hydpy-6.2.dev1.dist-info/top_level.txt +1 -0
hydpy/auxs/statstools.py
ADDED
|
@@ -0,0 +1,2125 @@
|
|
|
1
|
+
"""This module implements statistical functionalities frequently used in hydrological
|
|
2
|
+
modelling."""
|
|
3
|
+
|
|
4
|
+
# import...
|
|
5
|
+
# ...from standard library
|
|
6
|
+
import abc
|
|
7
|
+
import collections
|
|
8
|
+
import copy
|
|
9
|
+
import warnings
|
|
10
|
+
|
|
11
|
+
# ...from site-packages
|
|
12
|
+
import numpy
|
|
13
|
+
|
|
14
|
+
# ...from HydPy
|
|
15
|
+
import hydpy
|
|
16
|
+
from hydpy import config
|
|
17
|
+
from hydpy.core import exceptiontools
|
|
18
|
+
from hydpy.core import devicetools
|
|
19
|
+
from hydpy.core import objecttools
|
|
20
|
+
from hydpy.auxs import validtools
|
|
21
|
+
from hydpy.core import seriestools
|
|
22
|
+
from hydpy.core import timetools
|
|
23
|
+
from hydpy.core.typingtools import *
|
|
24
|
+
|
|
25
|
+
if TYPE_CHECKING:
|
|
26
|
+
import pandas
|
|
27
|
+
from scipy import optimize
|
|
28
|
+
from scipy import special
|
|
29
|
+
else:
|
|
30
|
+
pandas = exceptiontools.OptionalImport("pandas", ["pandas"], locals())
|
|
31
|
+
optimize = exceptiontools.OptionalImport("optimize", ["scipy.optimize"], locals())
|
|
32
|
+
special = exceptiontools.OptionalImport("special", ["scipy.special"], locals())
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
class SimObs(NamedTuple):
|
|
36
|
+
"""A named tuple containing one array of simulated and one array of observed
|
|
37
|
+
values."""
|
|
38
|
+
|
|
39
|
+
sim: VectorFloat
|
|
40
|
+
obs: VectorFloat
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
@overload
|
|
44
|
+
def filter_series(
|
|
45
|
+
*,
|
|
46
|
+
sim: VectorInputFloat,
|
|
47
|
+
obs: VectorInputFloat,
|
|
48
|
+
date_ranges: Iterable[tuple[timetools.DateConstrArg, timetools.DateConstrArg]],
|
|
49
|
+
) -> SimObs:
|
|
50
|
+
"""sim and obs and date_ranges as arguments"""
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
@overload
|
|
54
|
+
def filter_series(
|
|
55
|
+
*, sim: VectorInputFloat, obs: VectorInputFloat, months: Iterable[int]
|
|
56
|
+
) -> SimObs:
|
|
57
|
+
"""sim and obs and month as arguments"""
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
@overload
|
|
61
|
+
def filter_series(
|
|
62
|
+
*,
|
|
63
|
+
node: devicetools.Node,
|
|
64
|
+
date_ranges: Iterable[tuple[timetools.DateConstrArg, timetools.DateConstrArg]],
|
|
65
|
+
) -> SimObs:
|
|
66
|
+
"""node and date_ranges as arguments"""
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
@overload
|
|
70
|
+
def filter_series(*, node: devicetools.Node, months: Iterable[int]) -> SimObs:
|
|
71
|
+
"""node and month as arguments"""
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
@objecttools.excmessage_decorator("filter the given series")
|
|
75
|
+
def filter_series(
|
|
76
|
+
*,
|
|
77
|
+
sim: VectorInputFloat | None = None,
|
|
78
|
+
obs: VectorInputFloat | None = None,
|
|
79
|
+
node: devicetools.Node | None = None,
|
|
80
|
+
date_ranges: None | (
|
|
81
|
+
Iterable[tuple[timetools.DateConstrArg, timetools.DateConstrArg]]
|
|
82
|
+
) = None,
|
|
83
|
+
months: Iterable[int] | None = None,
|
|
84
|
+
) -> SimObs:
|
|
85
|
+
"""Filter time series for the given date ranges or months.
|
|
86
|
+
|
|
87
|
+
Often, we want to apply objective functions like |nse| on a subset of the available
|
|
88
|
+
simulated and observed values. The function |filter_series| helps to extract the
|
|
89
|
+
relevant data either by data ranges or by months. Common examples are to pass a
|
|
90
|
+
single date range to ignore the first non-optimal values of a warm-up period, to
|
|
91
|
+
pass a set of date ranges to focus on certain events or to pass a set of months to
|
|
92
|
+
perform a seasonal analysis.
|
|
93
|
+
|
|
94
|
+
To show how |filter_series| works, we prepare a daily initialisation time grid
|
|
95
|
+
spanning two hydrological years:
|
|
96
|
+
|
|
97
|
+
>>> from hydpy import filter_series, pub, Node
|
|
98
|
+
>>> pub.timegrids = "2001-11-01", "2003-11-01", "1d"
|
|
99
|
+
|
|
100
|
+
Next, we prepare a |Node| object and assign some constantly increasing and
|
|
101
|
+
decreasing values to its `simulation` and the `observation` series, respectively:
|
|
102
|
+
|
|
103
|
+
>>> import numpy
|
|
104
|
+
>>> node = Node("test")
|
|
105
|
+
>>> node.prepare_allseries()
|
|
106
|
+
>>> node.sequences.sim.series = numpy.arange(1, 2*365+1)
|
|
107
|
+
>>> node.sequences.obs.series = numpy.arange(2*365, 0, -1)
|
|
108
|
+
|
|
109
|
+
First, we select data of arbitrary sub-periods via the `data_ranges` argument.
|
|
110
|
+
Each data range consists of the start-point and the end-point of a sub-period.
|
|
111
|
+
Here, we choose all values that belong to 31 October or 1 November (note that
|
|
112
|
+
unsorted data ranges are acceptable):
|
|
113
|
+
|
|
114
|
+
>>> date_ranges = [("2001-11-01", "2001-11-02"),
|
|
115
|
+
... ("2002-10-31", "2002-11-02"),
|
|
116
|
+
... ("2003-10-31", "2003-11-01")]
|
|
117
|
+
>>> results = filter_series(node=node, date_ranges=date_ranges)
|
|
118
|
+
|
|
119
|
+
|filter_series| returns the data within index-sorted |pandas.Series| objects (note
|
|
120
|
+
that the index addresses the left boundary of each time step):
|
|
121
|
+
|
|
122
|
+
>>> results.sim # doctest: +ELLIPSIS
|
|
123
|
+
2001-11-01 1.0
|
|
124
|
+
2002-10-31 365.0
|
|
125
|
+
2002-11-01 366.0
|
|
126
|
+
2003-10-31 730.0
|
|
127
|
+
Name: sim...
|
|
128
|
+
>>> results.obs # doctest: +ELLIPSIS
|
|
129
|
+
2001-11-01 730.0
|
|
130
|
+
2002-10-31 366.0
|
|
131
|
+
2002-11-01 365.0
|
|
132
|
+
2003-10-31 1.0
|
|
133
|
+
Name: obs...
|
|
134
|
+
|
|
135
|
+
To help avoiding possible hard-to-find errors, |filter_series| performs the
|
|
136
|
+
following checks:
|
|
137
|
+
|
|
138
|
+
>>> date_ranges = [("2001-10-31", "2003-11-01")]
|
|
139
|
+
>>> filter_series(node=node, date_ranges=date_ranges)
|
|
140
|
+
Traceback (most recent call last):
|
|
141
|
+
...
|
|
142
|
+
ValueError: While trying to filter the given series, the following error occurred: \
|
|
143
|
+
The given date (2001-10-31 00:00:00) is before the first date of the initialisation \
|
|
144
|
+
period (2001-11-01 00:00:00).
|
|
145
|
+
|
|
146
|
+
>>> date_ranges = [("2001-11-01", "2003-11-02")]
|
|
147
|
+
>>> filter_series(node=node, date_ranges=date_ranges)
|
|
148
|
+
Traceback (most recent call last):
|
|
149
|
+
...
|
|
150
|
+
ValueError: While trying to filter the given series, the following error occurred: \
|
|
151
|
+
The given date (2003-11-02 00:00:00) is behind the last date of the initialisation \
|
|
152
|
+
period (2003-11-01 00:00:00).
|
|
153
|
+
|
|
154
|
+
>>> date_ranges = [("2001-11-02", "2001-11-02")]
|
|
155
|
+
>>> filter_series(node=node, date_ranges=date_ranges)
|
|
156
|
+
Traceback (most recent call last):
|
|
157
|
+
...
|
|
158
|
+
ValueError: While trying to filter the given series, the following error occurred: \
|
|
159
|
+
The given first date `2001-11-02 00:00:00` is not before than the given last date \
|
|
160
|
+
`2001-11-02 00:00:00`.
|
|
161
|
+
|
|
162
|
+
Note that function |filter_series| does not remove any duplicates:
|
|
163
|
+
|
|
164
|
+
>>> date_ranges = [("2001-11-01", "2001-11-05"),
|
|
165
|
+
... ("2001-11-01", "2001-11-02"),
|
|
166
|
+
... ("2001-11-04", "2001-11-06")]
|
|
167
|
+
>>> sim = filter_series(node=node, date_ranges=date_ranges).sim
|
|
168
|
+
>>> sim # doctest: +ELLIPSIS
|
|
169
|
+
2001-11-01 1.0
|
|
170
|
+
2001-11-01 1.0
|
|
171
|
+
2001-11-02 2.0
|
|
172
|
+
2001-11-03 3.0
|
|
173
|
+
2001-11-04 4.0
|
|
174
|
+
2001-11-04 4.0
|
|
175
|
+
2001-11-05 5.0
|
|
176
|
+
Name: sim...
|
|
177
|
+
|
|
178
|
+
Instead of date ranges, one can specify months via integer numbers. We begin with
|
|
179
|
+
selecting October (10) and November (11) individually:
|
|
180
|
+
|
|
181
|
+
>>> sim = filter_series(node=node, months=[11]).sim
|
|
182
|
+
>>> len(sim)
|
|
183
|
+
60
|
|
184
|
+
>>> sim # doctest: +ELLIPSIS
|
|
185
|
+
2001-11-01 1.0
|
|
186
|
+
2001-11-02 2.0
|
|
187
|
+
...
|
|
188
|
+
2002-11-29 394.0
|
|
189
|
+
2002-11-30 395.0
|
|
190
|
+
Name: sim...
|
|
191
|
+
|
|
192
|
+
>>> sim = filter_series(node=node, months=[10]).sim
|
|
193
|
+
>>> len(sim)
|
|
194
|
+
62
|
|
195
|
+
>>> sim # doctest: +ELLIPSIS
|
|
196
|
+
2002-10-01 335.0
|
|
197
|
+
2002-10-02 336.0
|
|
198
|
+
...
|
|
199
|
+
2003-10-30 729.0
|
|
200
|
+
2003-10-31 730.0
|
|
201
|
+
Name: sim...
|
|
202
|
+
|
|
203
|
+
One can select multiple months, which neither need to be sorted nor consecutive:
|
|
204
|
+
|
|
205
|
+
>>> sim = filter_series(node=node, months=[4, 1]).sim
|
|
206
|
+
>>> len(sim)
|
|
207
|
+
122
|
|
208
|
+
>>> sim # doctest: +ELLIPSIS
|
|
209
|
+
2002-01-01 62.0
|
|
210
|
+
2002-01-02 63.0
|
|
211
|
+
...
|
|
212
|
+
2003-04-29 545.0
|
|
213
|
+
2003-04-30 546.0
|
|
214
|
+
Name: sim...
|
|
215
|
+
|
|
216
|
+
Note that you are also free to either pass the `sim` and `obs` series directly
|
|
217
|
+
instead of a `node` (see function |prepare_arrays| for further information):
|
|
218
|
+
|
|
219
|
+
>>> xs = node.sequences.sim.series
|
|
220
|
+
>>> ys = node.sequences.obs.series
|
|
221
|
+
>>> filter_series(sim=xs, obs=ys, months=[4, 1]).sim # doctest: +ELLIPSIS
|
|
222
|
+
2002-01-01 62.0
|
|
223
|
+
2002-01-02 63.0
|
|
224
|
+
...
|
|
225
|
+
2003-04-29 545.0
|
|
226
|
+
2003-04-30 546.0
|
|
227
|
+
Name: sim...
|
|
228
|
+
|
|
229
|
+
Missing or double information for arguments `date_ranges` and `months` results in
|
|
230
|
+
the following error messages:
|
|
231
|
+
|
|
232
|
+
>>> filter_series(node=node)
|
|
233
|
+
Traceback (most recent call last):
|
|
234
|
+
...
|
|
235
|
+
ValueError: While trying to filter the given series, the following error occurred: \
|
|
236
|
+
You need to define either the `date_ranges` or `months` argument, but none of them is \
|
|
237
|
+
given.
|
|
238
|
+
|
|
239
|
+
>>> filter_series(node=node, date_ranges=[], months=[])
|
|
240
|
+
Traceback (most recent call last):
|
|
241
|
+
...
|
|
242
|
+
ValueError: While trying to filter the given series, the following error occurred: \
|
|
243
|
+
You need to define either the `date_ranges` or `months` argument, but both of them are \
|
|
244
|
+
given.
|
|
245
|
+
"""
|
|
246
|
+
dataframe = pandas.DataFrame()
|
|
247
|
+
sim_, obs_ = prepare_arrays(
|
|
248
|
+
sim=sim, obs=obs, node=node, skip_nan=False, subperiod=False
|
|
249
|
+
)
|
|
250
|
+
del sim, obs
|
|
251
|
+
dataframe["sim"] = sim_
|
|
252
|
+
dataframe["obs"] = obs_
|
|
253
|
+
tg = hydpy.pub.timegrids.init
|
|
254
|
+
dataframe.index = pandas.date_range(
|
|
255
|
+
start=tg.firstdate.datetime,
|
|
256
|
+
end=tg.lastdate.datetime - tg.stepsize.timedelta,
|
|
257
|
+
freq=tg.stepsize.timedelta,
|
|
258
|
+
)
|
|
259
|
+
dataframe_selected = pandas.DataFrame()
|
|
260
|
+
if (date_ranges is not None) and (months is None):
|
|
261
|
+
for date_range in date_ranges:
|
|
262
|
+
date0 = tg[tg[date_range[0]]]
|
|
263
|
+
date1 = tg[tg[date_range[1]]]
|
|
264
|
+
if date0 < tg.firstdate:
|
|
265
|
+
raise ValueError(
|
|
266
|
+
f"The given date ({date0}) is before the first date of the "
|
|
267
|
+
f"initialisation period ({tg.firstdate})."
|
|
268
|
+
)
|
|
269
|
+
if date1 > tg.lastdate:
|
|
270
|
+
raise ValueError(
|
|
271
|
+
f"The given date ({date1}) is behind the last date of the "
|
|
272
|
+
f"initialisation period ({tg.lastdate})."
|
|
273
|
+
)
|
|
274
|
+
if date0 >= date1:
|
|
275
|
+
raise ValueError(
|
|
276
|
+
f"The given first date `{date0}` is not before than the given "
|
|
277
|
+
f"last date `{date1}`."
|
|
278
|
+
)
|
|
279
|
+
idx0 = date0.to_string(style="iso1")
|
|
280
|
+
idx1 = (date1 - tg.stepsize).to_string(style="iso1")
|
|
281
|
+
selected_dates = dataframe.loc[idx0:idx1] # type: ignore[misc]
|
|
282
|
+
dataframe_selected = pandas.concat([selected_dates, dataframe_selected])
|
|
283
|
+
elif (date_ranges is None) and (months is not None):
|
|
284
|
+
for month in months:
|
|
285
|
+
selected_dates = dataframe.loc[dataframe.index.month == int(month)]
|
|
286
|
+
dataframe_selected = pandas.concat([selected_dates, dataframe_selected])
|
|
287
|
+
elif (date_ranges is None) and (months is None):
|
|
288
|
+
raise ValueError(
|
|
289
|
+
"You need to define either the `date_ranges` or `months` "
|
|
290
|
+
"argument, but none of them is given."
|
|
291
|
+
)
|
|
292
|
+
else:
|
|
293
|
+
raise ValueError(
|
|
294
|
+
"You need to define either the `date_ranges` or `months` argument, but "
|
|
295
|
+
"both of them are given."
|
|
296
|
+
)
|
|
297
|
+
dataframe_selected = dataframe_selected.sort_index()
|
|
298
|
+
return SimObs(sim=dataframe_selected["sim"], obs=dataframe_selected["obs"])
|
|
299
|
+
|
|
300
|
+
|
|
301
|
+
def prepare_arrays(
|
|
302
|
+
*,
|
|
303
|
+
sim: VectorInputFloat | None = None,
|
|
304
|
+
obs: VectorInputFloat | None = None,
|
|
305
|
+
node: devicetools.Node | None = None,
|
|
306
|
+
skip_nan: bool = False,
|
|
307
|
+
subperiod: bool | None = None,
|
|
308
|
+
) -> SimObs:
|
|
309
|
+
"""Prepare and return two |numpy| arrays based on the given arguments.
|
|
310
|
+
|
|
311
|
+
Note that many functions provided by module |statstools| apply function
|
|
312
|
+
|prepare_arrays| internally (e.g. |nse|). But you can also use it manually, as
|
|
313
|
+
shown in the following examples.
|
|
314
|
+
|
|
315
|
+
Function |prepare_arrays| can extract time series data from |Node| objects. To set
|
|
316
|
+
up an example for this, we define an initialisation period and prepare a |Node|
|
|
317
|
+
object:
|
|
318
|
+
|
|
319
|
+
>>> from hydpy import pub, Node, round_, nan
|
|
320
|
+
>>> pub.timegrids = "01.01.2000", "07.01.2000", "1d"
|
|
321
|
+
>>> node = Node("test")
|
|
322
|
+
|
|
323
|
+
Next, we assign some values to the `simulation` and the `observation` sequences of
|
|
324
|
+
the node:
|
|
325
|
+
|
|
326
|
+
>>> node.prepare_allseries()
|
|
327
|
+
>>> with pub.options.checkseries(False):
|
|
328
|
+
... node.sequences.sim.series = 1.0, nan, nan, nan, 2.0, 3.0
|
|
329
|
+
... node.sequences.obs.series = 4.0, 5.0, nan, nan, nan, 6.0
|
|
330
|
+
|
|
331
|
+
Now we can pass the node object to function |prepare_arrays| and get the
|
|
332
|
+
(unmodified) time series data:
|
|
333
|
+
|
|
334
|
+
>>> from hydpy import prepare_arrays
|
|
335
|
+
>>> arrays = prepare_arrays(node=node)
|
|
336
|
+
>>> round_(arrays.sim)
|
|
337
|
+
1.0, nan, nan, nan, 2.0, 3.0
|
|
338
|
+
>>> round_(arrays.obs)
|
|
339
|
+
4.0, 5.0, nan, nan, nan, 6.0
|
|
340
|
+
|
|
341
|
+
Alternatively, we can pass directly any iterable (e.g. |list| and |tuple| objects)
|
|
342
|
+
containing the `simulated` and `observed` data:
|
|
343
|
+
|
|
344
|
+
>>> arrays = prepare_arrays(sim=list(node.sequences.sim.series),
|
|
345
|
+
... obs=tuple(node.sequences.obs.series))
|
|
346
|
+
>>> round_(arrays.sim)
|
|
347
|
+
1.0, nan, nan, nan, 2.0, 3.0
|
|
348
|
+
>>> round_(arrays.obs)
|
|
349
|
+
4.0, 5.0, nan, nan, nan, 6.0
|
|
350
|
+
|
|
351
|
+
The optional `skip_nan` flag allows skipping all values, which are no numbers.
|
|
352
|
+
Note that |prepare_arrays| returns only those pairs of `simulated` and `observed`
|
|
353
|
+
values that do not contain any `nan` value:
|
|
354
|
+
|
|
355
|
+
>>> arrays = prepare_arrays(node=node, skip_nan=True)
|
|
356
|
+
>>> round_(arrays.sim)
|
|
357
|
+
1.0, 3.0
|
|
358
|
+
>>> round_(arrays.obs)
|
|
359
|
+
4.0, 6.0
|
|
360
|
+
|
|
361
|
+
If you are interested in analysing a sub-period, adapt the global |Timegrids.eval_|
|
|
362
|
+
|Timegrid| beforehand. When passing a |Node| object, function |prepare_arrays|
|
|
363
|
+
then returns the data of the current evaluation sub-period only:
|
|
364
|
+
|
|
365
|
+
>>> pub.timegrids.eval_.dates = "02.01.2000", "06.01.2000"
|
|
366
|
+
>>> arrays = prepare_arrays(node=node)
|
|
367
|
+
>>> round_(arrays.sim)
|
|
368
|
+
nan, nan, nan, 2.0
|
|
369
|
+
>>> round_(arrays.obs)
|
|
370
|
+
5.0, nan, nan, nan
|
|
371
|
+
|
|
372
|
+
Suppose one instead passes the simulation and observation time series directly
|
|
373
|
+
(which possibly fit the evaluation period already). In that case, function
|
|
374
|
+
|prepare_arrays| ignores the current |Timegrids.eval_| |Timegrid| by default:
|
|
375
|
+
|
|
376
|
+
>>> arrays = prepare_arrays(sim=arrays.sim, obs=arrays.obs)
|
|
377
|
+
>>> round_(arrays.sim)
|
|
378
|
+
nan, nan, nan, 2.0
|
|
379
|
+
>>> round_(arrays.obs)
|
|
380
|
+
5.0, nan, nan, nan
|
|
381
|
+
|
|
382
|
+
Use the `subperiod` argument to deviate from the default behaviour:
|
|
383
|
+
|
|
384
|
+
>>> arrays = prepare_arrays(node=node, subperiod=False)
|
|
385
|
+
>>> round_(arrays.sim)
|
|
386
|
+
1.0, nan, nan, nan, 2.0, 3.0
|
|
387
|
+
>>> round_(arrays.obs)
|
|
388
|
+
4.0, 5.0, nan, nan, nan, 6.0
|
|
389
|
+
|
|
390
|
+
>>> arrays = prepare_arrays(sim=arrays.sim, obs=arrays.obs, subperiod=True)
|
|
391
|
+
>>> round_(arrays.sim)
|
|
392
|
+
nan, nan, nan, 2.0
|
|
393
|
+
>>> round_(arrays.obs)
|
|
394
|
+
5.0, nan, nan, nan
|
|
395
|
+
|
|
396
|
+
The final examples show the error messages returned in case of invalid combinations
|
|
397
|
+
of input arguments:
|
|
398
|
+
|
|
399
|
+
>>> prepare_arrays()
|
|
400
|
+
Traceback (most recent call last):
|
|
401
|
+
...
|
|
402
|
+
ValueError: Neither a `Node` object is passed to argument `node` nor are arrays \
|
|
403
|
+
passed to arguments `sim` and `obs`.
|
|
404
|
+
|
|
405
|
+
>>> prepare_arrays(sim=node.sequences.sim.series, node=node)
|
|
406
|
+
Traceback (most recent call last):
|
|
407
|
+
...
|
|
408
|
+
ValueError: Values are passed to both arguments `sim` and `node`, which is not \
|
|
409
|
+
allowed.
|
|
410
|
+
|
|
411
|
+
>>> prepare_arrays(obs=node.sequences.obs.series, node=node)
|
|
412
|
+
Traceback (most recent call last):
|
|
413
|
+
...
|
|
414
|
+
ValueError: Values are passed to both arguments `obs` and `node`, which is not \
|
|
415
|
+
allowed.
|
|
416
|
+
|
|
417
|
+
>>> prepare_arrays(sim=node.sequences.sim.series)
|
|
418
|
+
Traceback (most recent call last):
|
|
419
|
+
...
|
|
420
|
+
ValueError: A value is passed to argument `sim` but no value is passed to argument \
|
|
421
|
+
`obs`.
|
|
422
|
+
|
|
423
|
+
>>> prepare_arrays(obs=node.sequences.obs.series)
|
|
424
|
+
Traceback (most recent call last):
|
|
425
|
+
...
|
|
426
|
+
ValueError: A value is passed to argument `obs` but no value is passed to argument \
|
|
427
|
+
`sim`.
|
|
428
|
+
"""
|
|
429
|
+
if node is not None:
|
|
430
|
+
if sim is not None:
|
|
431
|
+
raise ValueError(
|
|
432
|
+
"Values are passed to both arguments `sim` and `node`, which is not "
|
|
433
|
+
"allowed."
|
|
434
|
+
)
|
|
435
|
+
if obs is not None:
|
|
436
|
+
raise ValueError(
|
|
437
|
+
"Values are passed to both arguments `obs` and `node`, which is not "
|
|
438
|
+
"allowed."
|
|
439
|
+
)
|
|
440
|
+
sim = node.sequences.sim.series
|
|
441
|
+
obs = node.sequences.obs.series
|
|
442
|
+
elif (sim is not None) and (obs is None):
|
|
443
|
+
raise ValueError(
|
|
444
|
+
"A value is passed to argument `sim` but no value is passed to argument "
|
|
445
|
+
"`obs`."
|
|
446
|
+
)
|
|
447
|
+
elif (obs is not None) and (sim is None):
|
|
448
|
+
raise ValueError(
|
|
449
|
+
"A value is passed to argument `obs` but no value is passed to argument "
|
|
450
|
+
"`sim`."
|
|
451
|
+
)
|
|
452
|
+
elif (sim is None) and (obs is None):
|
|
453
|
+
raise ValueError(
|
|
454
|
+
"Neither a `Node` object is passed to argument `node` nor are arrays "
|
|
455
|
+
"passed to arguments `sim` and `obs`."
|
|
456
|
+
)
|
|
457
|
+
sim_ = numpy.asarray(sim)
|
|
458
|
+
obs_ = numpy.asarray(obs)
|
|
459
|
+
if subperiod or ((subperiod is None) and (node is not None)):
|
|
460
|
+
idx0, idx1 = hydpy.pub.timegrids.evalindices
|
|
461
|
+
sim_ = sim_[idx0:idx1]
|
|
462
|
+
obs_ = obs_[idx0:idx1]
|
|
463
|
+
if skip_nan:
|
|
464
|
+
idxs = ~numpy.isnan(sim_) * ~numpy.isnan(obs_)
|
|
465
|
+
sim_ = sim_[idxs]
|
|
466
|
+
obs_ = obs_[idxs]
|
|
467
|
+
return SimObs(sim=sim_, obs=obs_)
|
|
468
|
+
|
|
469
|
+
|
|
470
|
+
class Criterion(Protocol):
|
|
471
|
+
"""Callback protocol for efficiency criteria like |nse|."""
|
|
472
|
+
|
|
473
|
+
@overload
|
|
474
|
+
def __call__(
|
|
475
|
+
self,
|
|
476
|
+
*,
|
|
477
|
+
sim: VectorInputFloat,
|
|
478
|
+
obs: VectorInputFloat,
|
|
479
|
+
skip_nan: bool = False,
|
|
480
|
+
subperiod: bool = False,
|
|
481
|
+
) -> float: ...
|
|
482
|
+
|
|
483
|
+
@overload
|
|
484
|
+
def __call__(
|
|
485
|
+
self, *, node: devicetools.Node, skip_nan: bool = False, subperiod: bool = True
|
|
486
|
+
) -> float: ...
|
|
487
|
+
|
|
488
|
+
|
|
489
|
+
@overload
|
|
490
|
+
def rmse(
|
|
491
|
+
*,
|
|
492
|
+
sim: VectorInputFloat,
|
|
493
|
+
obs: VectorInputFloat,
|
|
494
|
+
skip_nan: bool = False,
|
|
495
|
+
subperiod: bool = False,
|
|
496
|
+
) -> float:
|
|
497
|
+
"""node as argument"""
|
|
498
|
+
|
|
499
|
+
|
|
500
|
+
@overload
|
|
501
|
+
def rmse(
|
|
502
|
+
*, node: devicetools.Node, skip_nan: bool = False, subperiod: bool = True
|
|
503
|
+
) -> float:
|
|
504
|
+
"""sim and obs as arguments"""
|
|
505
|
+
|
|
506
|
+
|
|
507
|
+
@objecttools.excmessage_decorator("calculate the root-mean-square error")
|
|
508
|
+
def rmse(
|
|
509
|
+
*,
|
|
510
|
+
sim: VectorInputFloat | None = None,
|
|
511
|
+
obs: VectorInputFloat | None = None,
|
|
512
|
+
node: devicetools.Node | None = None,
|
|
513
|
+
skip_nan: bool = False,
|
|
514
|
+
subperiod: bool | None = None,
|
|
515
|
+
) -> float:
|
|
516
|
+
"""Calculate the root-mean-square error.
|
|
517
|
+
|
|
518
|
+
>>> from hydpy import rmse, round_
|
|
519
|
+
>>> round_(rmse(sim=[1.0, 2.0, 3.0], obs=[1.0, 2.0, 3.0]))
|
|
520
|
+
0.0
|
|
521
|
+
>>> round_(rmse(sim=[1.0, 2.0, 3.0], obs=[0.5, 2.0, 4.5]))
|
|
522
|
+
0.912871
|
|
523
|
+
|
|
524
|
+
See the documentation on function |prepare_arrays| for some additional instructions
|
|
525
|
+
for using |rmse|.
|
|
526
|
+
"""
|
|
527
|
+
sim_, obs_ = prepare_arrays(
|
|
528
|
+
sim=sim, obs=obs, node=node, skip_nan=skip_nan, subperiod=subperiod
|
|
529
|
+
)
|
|
530
|
+
del sim, obs
|
|
531
|
+
return cast(float, numpy.sqrt(numpy.mean((sim_ - obs_) ** 2)))
|
|
532
|
+
|
|
533
|
+
|
|
534
|
+
@overload
|
|
535
|
+
def nse(
|
|
536
|
+
*,
|
|
537
|
+
sim: VectorInputFloat,
|
|
538
|
+
obs: VectorInputFloat,
|
|
539
|
+
skip_nan: bool = False,
|
|
540
|
+
subperiod: bool = False,
|
|
541
|
+
) -> float:
|
|
542
|
+
"""node as argument"""
|
|
543
|
+
|
|
544
|
+
|
|
545
|
+
@overload
|
|
546
|
+
def nse(
|
|
547
|
+
*, node: devicetools.Node, skip_nan: bool = False, subperiod: bool = True
|
|
548
|
+
) -> float:
|
|
549
|
+
"""sim and obs as arguments"""
|
|
550
|
+
|
|
551
|
+
|
|
552
|
+
@objecttools.excmessage_decorator("calculate the Nash-Sutcliffe efficiency")
|
|
553
|
+
def nse(
|
|
554
|
+
*,
|
|
555
|
+
sim: VectorInputFloat | None = None,
|
|
556
|
+
obs: VectorInputFloat | None = None,
|
|
557
|
+
node: devicetools.Node | None = None,
|
|
558
|
+
skip_nan: bool = False,
|
|
559
|
+
subperiod: bool | None = None,
|
|
560
|
+
) -> float:
|
|
561
|
+
"""Calculate the efficiency criteria after Nash & Sutcliffe.
|
|
562
|
+
|
|
563
|
+
If the simulated values predict the observed values and the average observed value
|
|
564
|
+
(regarding the mean square error), the NSE value is zero:
|
|
565
|
+
|
|
566
|
+
>>> from hydpy import nse, round_
|
|
567
|
+
>>> round_(nse(sim=[2.0, 2.0, 2.0], obs=[1.0, 2.0, 3.0]))
|
|
568
|
+
0.0
|
|
569
|
+
>>> round_(nse(sim=[0.0, 2.0, 4.0], obs=[1.0, 2.0, 3.0]))
|
|
570
|
+
0.0
|
|
571
|
+
|
|
572
|
+
For worse and better agreement, the NSE is negative or positive, respectively:
|
|
573
|
+
|
|
574
|
+
>>> round_(nse(sim=[3.0, 2.0, 1.0], obs=[1.0, 2.0, 3.0]))
|
|
575
|
+
-3.0
|
|
576
|
+
>>> round_(nse(sim=[1.0, 2.0, 2.0], obs=[1.0, 2.0, 3.0]))
|
|
577
|
+
0.5
|
|
578
|
+
|
|
579
|
+
The highest possible value is one:
|
|
580
|
+
|
|
581
|
+
>>> round_(nse(sim=[1.0, 2.0, 3.0], obs=[1.0, 2.0, 3.0]))
|
|
582
|
+
1.0
|
|
583
|
+
|
|
584
|
+
See the documentation on function |prepare_arrays| for some additional instructions
|
|
585
|
+
for using |nse|.
|
|
586
|
+
"""
|
|
587
|
+
sim_, obs_ = prepare_arrays(
|
|
588
|
+
sim=sim, obs=obs, node=node, skip_nan=skip_nan, subperiod=subperiod
|
|
589
|
+
)
|
|
590
|
+
del sim, obs
|
|
591
|
+
return cast(
|
|
592
|
+
float,
|
|
593
|
+
1.0 - numpy.sum((sim_ - obs_) ** 2) / numpy.sum((obs_ - numpy.mean(obs_)) ** 2),
|
|
594
|
+
)
|
|
595
|
+
|
|
596
|
+
|
|
597
|
+
@overload
|
|
598
|
+
def nse_log(
|
|
599
|
+
*,
|
|
600
|
+
sim: VectorInputFloat,
|
|
601
|
+
obs: VectorInputFloat,
|
|
602
|
+
skip_nan: bool = False,
|
|
603
|
+
subperiod: bool = False,
|
|
604
|
+
) -> float:
|
|
605
|
+
"""node as argument"""
|
|
606
|
+
|
|
607
|
+
|
|
608
|
+
@overload
|
|
609
|
+
def nse_log(
|
|
610
|
+
*, node: devicetools.Node, skip_nan: bool = False, subperiod: bool = True
|
|
611
|
+
) -> float:
|
|
612
|
+
"""sim and obs as arguments"""
|
|
613
|
+
|
|
614
|
+
|
|
615
|
+
@objecttools.excmessage_decorator("calculate the log-Nash-Sutcliffe efficiency")
|
|
616
|
+
def nse_log(
|
|
617
|
+
*,
|
|
618
|
+
sim: VectorInputFloat | None = None,
|
|
619
|
+
obs: VectorInputFloat | None = None,
|
|
620
|
+
node: devicetools.Node | None = None,
|
|
621
|
+
skip_nan: bool = False,
|
|
622
|
+
subperiod: bool | None = None,
|
|
623
|
+
) -> float:
|
|
624
|
+
"""Calculate the efficiency criteria after Nash & Sutcliffe for logarithmic values.
|
|
625
|
+
|
|
626
|
+
The following calculations repeat the ones of the documentation on function |nse|
|
|
627
|
+
but with exponentiated values. Hence, the results are similar or, as in the first
|
|
628
|
+
and the last example, even identical:
|
|
629
|
+
|
|
630
|
+
>>> from hydpy import nse_log, round_
|
|
631
|
+
>>> from numpy import exp
|
|
632
|
+
>>> round_(nse_log(sim=exp([2.0, 2.0, 2.0]), obs=exp([1.0, 2.0, 3.0])))
|
|
633
|
+
0.0
|
|
634
|
+
>>> round_(nse_log(sim=exp([0.0, 2.0, 4.0]), obs=exp([1.0, 2.0, 3.0])))
|
|
635
|
+
0.0
|
|
636
|
+
|
|
637
|
+
>>> round_(nse(sim=exp([3.0, 2.0, 1.0]), obs=exp([1.0, 2.0, 3.0])))
|
|
638
|
+
-2.734185
|
|
639
|
+
>>> round_(nse(sim=exp([1.0, 2.0, 2.0]), obs=exp([1.0, 2.0, 3.0])))
|
|
640
|
+
0.002139
|
|
641
|
+
|
|
642
|
+
>>> round_(nse(sim=exp([1.0, 2.0, 3.0]), obs=exp([1.0, 2.0, 3.0])))
|
|
643
|
+
1.0
|
|
644
|
+
|
|
645
|
+
See the documentation on function |prepare_arrays| for some additional instructions
|
|
646
|
+
for using |nse_log|.
|
|
647
|
+
"""
|
|
648
|
+
sim_, obs_ = prepare_arrays(
|
|
649
|
+
sim=sim, obs=obs, node=node, skip_nan=skip_nan, subperiod=subperiod
|
|
650
|
+
)
|
|
651
|
+
del sim, obs
|
|
652
|
+
return cast(
|
|
653
|
+
float,
|
|
654
|
+
1.0
|
|
655
|
+
- numpy.sum((numpy.log(sim_) - numpy.log(obs_)) ** 2)
|
|
656
|
+
/ numpy.sum((numpy.log(obs_) - numpy.mean(numpy.log(obs_))) ** 2),
|
|
657
|
+
)
|
|
658
|
+
|
|
659
|
+
|
|
660
|
+
@overload
|
|
661
|
+
def corr2(
|
|
662
|
+
*,
|
|
663
|
+
sim: VectorInputFloat,
|
|
664
|
+
obs: VectorInputFloat,
|
|
665
|
+
skip_nan: bool = False,
|
|
666
|
+
subperiod: bool = False,
|
|
667
|
+
) -> float:
|
|
668
|
+
"""node as argument"""
|
|
669
|
+
|
|
670
|
+
|
|
671
|
+
@overload
|
|
672
|
+
def corr2(
|
|
673
|
+
*, node: devicetools.Node, skip_nan: bool = False, subperiod: bool = True
|
|
674
|
+
) -> float:
|
|
675
|
+
"""sim and obs as arguments"""
|
|
676
|
+
|
|
677
|
+
|
|
678
|
+
@objecttools.excmessage_decorator("calculate the R²-Error")
|
|
679
|
+
def corr2(
|
|
680
|
+
*,
|
|
681
|
+
sim: VectorInputFloat | None = None,
|
|
682
|
+
obs: VectorInputFloat | None = None,
|
|
683
|
+
node: devicetools.Node | None = None,
|
|
684
|
+
skip_nan: bool = False,
|
|
685
|
+
subperiod: bool | None = None,
|
|
686
|
+
) -> float:
|
|
687
|
+
"""Calculate the coefficient of determination via the square of the coefficient of
|
|
688
|
+
correlation according to Bravais-Pearson.
|
|
689
|
+
|
|
690
|
+
For perfect positive or negative correlation, |corr2| returns 1:
|
|
691
|
+
|
|
692
|
+
>>> from hydpy import corr2, round_
|
|
693
|
+
>>> round_(corr2(sim=[1.0, 2.0, 3.0], obs=[1.0, 2.0, 3.0]))
|
|
694
|
+
1.0
|
|
695
|
+
>>> round_(corr2(sim=[3.0, 2.0, 1.0], obs=[1.0, 2.0, 3.0]))
|
|
696
|
+
1.0
|
|
697
|
+
|
|
698
|
+
If there is no correlation at all, |corr2| returns 0:
|
|
699
|
+
|
|
700
|
+
>>> round_(corr2(sim=[1.0, 2.0, 3.0], obs=[1.0, 2.0, 1.0]))
|
|
701
|
+
0.0
|
|
702
|
+
|
|
703
|
+
An intermediate example:
|
|
704
|
+
|
|
705
|
+
>>> round_(corr2(sim=[2.0, 2.0, 1.0], obs=[1.0, 2.0, 3.0]))
|
|
706
|
+
0.75
|
|
707
|
+
|
|
708
|
+
Take care if there is no variation in one of the data series. Then the correlation
|
|
709
|
+
coefficient is not defined, and |corr2| returns |numpy.nan|:
|
|
710
|
+
|
|
711
|
+
>>> round_(corr2(sim=[2.0, 2.0, 2.0], obs=[2.0, 2.0, 3.0]))
|
|
712
|
+
nan
|
|
713
|
+
|
|
714
|
+
See the documentation on function |prepare_arrays| for some additional instructions
|
|
715
|
+
for using |corr2|.
|
|
716
|
+
"""
|
|
717
|
+
sim_, obs_ = prepare_arrays(
|
|
718
|
+
sim=sim, obs=obs, node=node, skip_nan=skip_nan, subperiod=subperiod
|
|
719
|
+
)
|
|
720
|
+
del sim, obs
|
|
721
|
+
if (numpy.std(sim_) == 0.0) or (numpy.std(obs_) == 0.0):
|
|
722
|
+
return numpy.nan
|
|
723
|
+
return cast(float, numpy.corrcoef(sim_, obs_)[0, 1] ** 2)
|
|
724
|
+
|
|
725
|
+
|
|
726
|
+
@overload
|
|
727
|
+
def kge(
|
|
728
|
+
*,
|
|
729
|
+
sim: VectorInputFloat,
|
|
730
|
+
obs: VectorInputFloat,
|
|
731
|
+
skip_nan: bool = False,
|
|
732
|
+
subperiod: bool = False,
|
|
733
|
+
) -> float:
|
|
734
|
+
"""node as argument"""
|
|
735
|
+
|
|
736
|
+
|
|
737
|
+
@overload
|
|
738
|
+
def kge(
|
|
739
|
+
*, node: devicetools.Node, skip_nan: bool = False, subperiod: bool = True
|
|
740
|
+
) -> float:
|
|
741
|
+
"""sim and obs as arguments"""
|
|
742
|
+
|
|
743
|
+
|
|
744
|
+
@objecttools.excmessage_decorator("calculate the Kling-Gupta-Efficiency")
|
|
745
|
+
def kge(
|
|
746
|
+
*,
|
|
747
|
+
sim: VectorInputFloat | None = None,
|
|
748
|
+
obs: VectorInputFloat | None = None,
|
|
749
|
+
node: devicetools.Node | None = None,
|
|
750
|
+
skip_nan: bool = False,
|
|
751
|
+
subperiod: bool | None = None,
|
|
752
|
+
) -> float:
|
|
753
|
+
"""Calculate the Kling-Gupta efficiency according to :cite:t:`ref-Kling2012`.
|
|
754
|
+
|
|
755
|
+
For a perfect fit, |kge| returns one:
|
|
756
|
+
|
|
757
|
+
>>> from hydpy import kge, round_
|
|
758
|
+
>>> round_(kge(sim=[1.0, 2.0, 3.0], obs=[1.0, 2.0, 3.0]))
|
|
759
|
+
1.0
|
|
760
|
+
|
|
761
|
+
In each of the following three examples, only one of the KGE components deviates
|
|
762
|
+
from one:
|
|
763
|
+
|
|
764
|
+
>>> round_(kge(sim=[3.0, 2.0, 1.0], obs=[1.0, 2.0, 3.0])) # imperfect correlation
|
|
765
|
+
-1.0
|
|
766
|
+
>>> round_(kge(sim=[3.0, 2.0, 1.0], obs=[6.0, 4.0, 2.0])) # imperfect average
|
|
767
|
+
0.5
|
|
768
|
+
>>> round_(kge(sim=[3.0, 2.0, 1.0], obs=[4.0, 2.0, 0.0])) # imperfect variation
|
|
769
|
+
0.5
|
|
770
|
+
|
|
771
|
+
Finally, a mixed example, where all components deviate from one:
|
|
772
|
+
|
|
773
|
+
>>> round_(kge(sim=[3.0, 2.0, 1.0], obs=[2.0, 2.0, 1.0]))
|
|
774
|
+
0.495489
|
|
775
|
+
|
|
776
|
+
See the documentation on function |prepare_arrays| for some additional instructions
|
|
777
|
+
for using |kge|.
|
|
778
|
+
"""
|
|
779
|
+
sim_, obs_ = prepare_arrays(
|
|
780
|
+
sim=sim, obs=obs, node=node, skip_nan=skip_nan, subperiod=subperiod
|
|
781
|
+
)
|
|
782
|
+
del sim, obs
|
|
783
|
+
r = numpy.corrcoef(sim_, obs_)[0, 1]
|
|
784
|
+
m_sim, m_obs = numpy.mean(sim_), numpy.mean(obs_)
|
|
785
|
+
s_sim, s_obs = numpy.std(sim_), numpy.std(obs_)
|
|
786
|
+
b = m_sim / m_obs
|
|
787
|
+
g = (s_sim / m_sim) / (s_obs / m_obs)
|
|
788
|
+
return cast(float, 1.0 - ((r - 1.0) ** 2 + (b - 1.0) ** 2 + (g - 1.0) ** 2) ** 0.5)
|
|
789
|
+
|
|
790
|
+
|
|
791
|
+
@overload
|
|
792
|
+
def bias_abs(
|
|
793
|
+
*,
|
|
794
|
+
sim: VectorInputFloat,
|
|
795
|
+
obs: VectorInputFloat,
|
|
796
|
+
skip_nan: bool = False,
|
|
797
|
+
subperiod: bool = False,
|
|
798
|
+
) -> float:
|
|
799
|
+
"""node as argument"""
|
|
800
|
+
|
|
801
|
+
|
|
802
|
+
@overload
|
|
803
|
+
def bias_abs(
|
|
804
|
+
*, node: devicetools.Node, skip_nan: bool = False, subperiod: bool = True
|
|
805
|
+
) -> float:
|
|
806
|
+
"""sim and obs as arguments"""
|
|
807
|
+
|
|
808
|
+
|
|
809
|
+
@objecttools.excmessage_decorator("calculate the absolute bias")
|
|
810
|
+
def bias_abs(
|
|
811
|
+
*,
|
|
812
|
+
sim: VectorInputFloat | None = None,
|
|
813
|
+
obs: VectorInputFloat | None = None,
|
|
814
|
+
node: devicetools.Node | None = None,
|
|
815
|
+
skip_nan: bool = False,
|
|
816
|
+
subperiod: bool | None = None,
|
|
817
|
+
) -> float:
|
|
818
|
+
"""Calculate the absolute difference between the means of the simulated and the
|
|
819
|
+
observed values.
|
|
820
|
+
|
|
821
|
+
>>> from hydpy import bias_abs, round_
|
|
822
|
+
>>> round_(bias_abs(sim=[2.0, 2.0, 2.0], obs=[1.0, 2.0, 3.0]))
|
|
823
|
+
0.0
|
|
824
|
+
>>> round_(bias_abs(sim=[5.0, 2.0, 2.0], obs=[1.0, 2.0, 3.0]))
|
|
825
|
+
1.0
|
|
826
|
+
>>> round_(bias_abs(sim=[1.0, 1.0, 1.0], obs=[1.0, 2.0, 3.0]))
|
|
827
|
+
-1.0
|
|
828
|
+
|
|
829
|
+
See the documentation on function |prepare_arrays| for some additional instructions
|
|
830
|
+
for using |bias_abs|.
|
|
831
|
+
"""
|
|
832
|
+
sim_, obs_ = prepare_arrays(
|
|
833
|
+
sim=sim, obs=obs, node=node, skip_nan=skip_nan, subperiod=subperiod
|
|
834
|
+
)
|
|
835
|
+
del sim, obs
|
|
836
|
+
return cast(float, numpy.mean(sim_ - obs_))
|
|
837
|
+
|
|
838
|
+
|
|
839
|
+
@overload
|
|
840
|
+
def bias_rel(
|
|
841
|
+
*,
|
|
842
|
+
sim: VectorInputFloat,
|
|
843
|
+
obs: VectorInputFloat,
|
|
844
|
+
skip_nan: bool = False,
|
|
845
|
+
subperiod: bool = False,
|
|
846
|
+
) -> float:
|
|
847
|
+
"""node as argument"""
|
|
848
|
+
|
|
849
|
+
|
|
850
|
+
@overload
|
|
851
|
+
def bias_rel(
|
|
852
|
+
*, node: devicetools.Node, skip_nan: bool = False, subperiod: bool = True
|
|
853
|
+
) -> float:
|
|
854
|
+
"""sim and obs as arguments"""
|
|
855
|
+
|
|
856
|
+
|
|
857
|
+
@objecttools.excmessage_decorator("calculate the relative bias")
|
|
858
|
+
def bias_rel(
|
|
859
|
+
*,
|
|
860
|
+
sim: VectorInputFloat | None = None,
|
|
861
|
+
obs: VectorInputFloat | None = None,
|
|
862
|
+
node: devicetools.Node | None = None,
|
|
863
|
+
skip_nan: bool = False,
|
|
864
|
+
subperiod: bool | None = None,
|
|
865
|
+
) -> float:
|
|
866
|
+
"""Calculate the relative difference between the means of the simulated and the
|
|
867
|
+
observed values.
|
|
868
|
+
|
|
869
|
+
>>> from hydpy import bias_rel, round_
|
|
870
|
+
>>> round_(bias_rel(sim=[2.0, 2.0, 2.0], obs=[1.0, 2.0, 3.0]))
|
|
871
|
+
0.0
|
|
872
|
+
>>> round_(bias_rel(sim=[5.0, 2.0, 2.0], obs=[1.0, 2.0, 3.0]))
|
|
873
|
+
0.5
|
|
874
|
+
>>> round_(bias_rel(sim=[1.0, 1.0, 1.0], obs=[1.0, 2.0, 3.0]))
|
|
875
|
+
-0.5
|
|
876
|
+
|
|
877
|
+
See the documentation on function |prepare_arrays| for some additional instructions
|
|
878
|
+
for using |bias_rel|.
|
|
879
|
+
"""
|
|
880
|
+
sim_, obs_ = prepare_arrays(
|
|
881
|
+
sim=sim, obs=obs, node=node, skip_nan=skip_nan, subperiod=subperiod
|
|
882
|
+
)
|
|
883
|
+
del sim, obs
|
|
884
|
+
return cast(float, numpy.mean(sim_) / numpy.mean(obs_) - 1.0)
|
|
885
|
+
|
|
886
|
+
|
|
887
|
+
@overload
|
|
888
|
+
def std_ratio(
|
|
889
|
+
*,
|
|
890
|
+
sim: VectorInputFloat,
|
|
891
|
+
obs: VectorInputFloat,
|
|
892
|
+
skip_nan: bool = False,
|
|
893
|
+
subperiod: bool = False,
|
|
894
|
+
) -> float:
|
|
895
|
+
"""node as argument"""
|
|
896
|
+
|
|
897
|
+
|
|
898
|
+
@overload
|
|
899
|
+
def std_ratio(
|
|
900
|
+
*, node: devicetools.Node, skip_nan: bool = False, subperiod: bool = True
|
|
901
|
+
) -> float:
|
|
902
|
+
"""sim and obs as arguments"""
|
|
903
|
+
|
|
904
|
+
|
|
905
|
+
@objecttools.excmessage_decorator("calculate the standard deviation ratio")
|
|
906
|
+
def std_ratio(
|
|
907
|
+
*,
|
|
908
|
+
sim: VectorInputFloat | None = None,
|
|
909
|
+
obs: VectorInputFloat | None = None,
|
|
910
|
+
node: devicetools.Node | None = None,
|
|
911
|
+
skip_nan: bool = False,
|
|
912
|
+
subperiod: bool | None = None,
|
|
913
|
+
) -> float:
|
|
914
|
+
"""Calculate the ratio between the standard deviation of the simulated and the
|
|
915
|
+
observed values.
|
|
916
|
+
|
|
917
|
+
>>> from hydpy import round_, std_ratio
|
|
918
|
+
>>> round_(std_ratio(sim=[1.0, 2.0, 3.0], obs=[1.0, 2.0, 3.0]))
|
|
919
|
+
0.0
|
|
920
|
+
>>> round_(std_ratio(sim=[1.0, 1.0, 1.0], obs=[1.0, 2.0, 3.0]))
|
|
921
|
+
-1.0
|
|
922
|
+
>>> round_(std_ratio(sim=[0.0, 3.0, 6.0], obs=[1.0, 2.0, 3.0]))
|
|
923
|
+
2.0
|
|
924
|
+
|
|
925
|
+
See the documentation on function |prepare_arrays| for some additional instructions
|
|
926
|
+
for using |std_ratio|.
|
|
927
|
+
"""
|
|
928
|
+
sim_, obs_ = prepare_arrays(
|
|
929
|
+
sim=sim, obs=obs, node=node, skip_nan=skip_nan, subperiod=subperiod
|
|
930
|
+
)
|
|
931
|
+
del sim, obs
|
|
932
|
+
return cast(float, numpy.std(sim_) / numpy.std(obs_) - 1.0)
|
|
933
|
+
|
|
934
|
+
|
|
935
|
+
@overload
|
|
936
|
+
def var_ratio(
|
|
937
|
+
*,
|
|
938
|
+
sim: VectorInputFloat,
|
|
939
|
+
obs: VectorInputFloat,
|
|
940
|
+
skip_nan: bool = False,
|
|
941
|
+
subperiod: bool = False,
|
|
942
|
+
) -> float:
|
|
943
|
+
"""node as argument"""
|
|
944
|
+
|
|
945
|
+
|
|
946
|
+
@overload
|
|
947
|
+
def var_ratio(
|
|
948
|
+
*, node: devicetools.Node, skip_nan: bool = False, subperiod: bool = True
|
|
949
|
+
) -> float:
|
|
950
|
+
"""sim and obs as arguments"""
|
|
951
|
+
|
|
952
|
+
|
|
953
|
+
@objecttools.excmessage_decorator("calculate the variation coefficient ratio")
|
|
954
|
+
def var_ratio(
|
|
955
|
+
*,
|
|
956
|
+
sim: VectorInputFloat | None = None,
|
|
957
|
+
obs: VectorInputFloat | None = None,
|
|
958
|
+
node: devicetools.Node | None = None,
|
|
959
|
+
skip_nan: bool = False,
|
|
960
|
+
subperiod: bool | None = None,
|
|
961
|
+
) -> float:
|
|
962
|
+
"""Calculate the ratio between the variation coefficients of the simulated and the
|
|
963
|
+
observed values.
|
|
964
|
+
|
|
965
|
+
>>> from hydpy import round_, var_ratio
|
|
966
|
+
>>> round_(var_ratio(sim=[1.0, 2.0, 3.0], obs=[1.0, 2.0, 3.0]))
|
|
967
|
+
0.0
|
|
968
|
+
>>> round_(var_ratio(sim=[1.0, 2.0, 3.0], obs=[0.0, 1.0, 2.0]))
|
|
969
|
+
-0.5
|
|
970
|
+
>>> round_(var_ratio(sim=[1.0, 2.0, 3.0], obs=[0.0, 2.0, 4.0]))
|
|
971
|
+
-0.5
|
|
972
|
+
|
|
973
|
+
See the documentation on function |prepare_arrays| for some additional instructions
|
|
974
|
+
for using |var_ratio|.
|
|
975
|
+
"""
|
|
976
|
+
sim_, obs_ = prepare_arrays(
|
|
977
|
+
sim=sim, obs=obs, node=node, skip_nan=skip_nan, subperiod=subperiod
|
|
978
|
+
)
|
|
979
|
+
del sim, obs
|
|
980
|
+
var_sim = numpy.std(sim_) / numpy.mean(sim_)
|
|
981
|
+
var_obs = numpy.std(obs_) / numpy.mean(obs_)
|
|
982
|
+
return cast(float, var_sim / var_obs - 1.0)
|
|
983
|
+
|
|
984
|
+
|
|
985
|
+
@overload
|
|
986
|
+
def corr(
|
|
987
|
+
*,
|
|
988
|
+
sim: VectorInputFloat,
|
|
989
|
+
obs: VectorInputFloat,
|
|
990
|
+
skip_nan: bool = False,
|
|
991
|
+
subperiod: bool = False,
|
|
992
|
+
) -> float:
|
|
993
|
+
"""node as argument"""
|
|
994
|
+
|
|
995
|
+
|
|
996
|
+
@overload
|
|
997
|
+
def corr(
|
|
998
|
+
*, node: devicetools.Node, skip_nan: bool = False, subperiod: bool = True
|
|
999
|
+
) -> float:
|
|
1000
|
+
"""sim and obs as arguments"""
|
|
1001
|
+
|
|
1002
|
+
|
|
1003
|
+
@objecttools.excmessage_decorator("calculate the Pearson correlation coefficient")
|
|
1004
|
+
def corr(
|
|
1005
|
+
*,
|
|
1006
|
+
sim: VectorInputFloat | None = None,
|
|
1007
|
+
obs: VectorInputFloat | None = None,
|
|
1008
|
+
node: devicetools.Node | None = None,
|
|
1009
|
+
skip_nan: bool = False,
|
|
1010
|
+
subperiod: bool | None = None,
|
|
1011
|
+
) -> float:
|
|
1012
|
+
"""Calculate the product-moment correlation coefficient after Pearson.
|
|
1013
|
+
|
|
1014
|
+
>>> from hydpy import corr, round_
|
|
1015
|
+
>>> round_(corr(sim=[0.5, 1.0, 1.5], obs=[1.0, 2.0, 3.0]))
|
|
1016
|
+
1.0
|
|
1017
|
+
>>> round_(corr(sim=[4.0, 2.0, 0.0], obs=[1.0, 2.0, 3.0]))
|
|
1018
|
+
-1.0
|
|
1019
|
+
>>> round_(corr(sim=[1.0, 2.0, 1.0], obs=[1.0, 2.0, 3.0]))
|
|
1020
|
+
0.0
|
|
1021
|
+
|
|
1022
|
+
Take care if there is no variation in one of the data series. Then the correlation
|
|
1023
|
+
coefficient is not defined, and |corr| returns |numpy.nan|:
|
|
1024
|
+
|
|
1025
|
+
>>> round_(corr(sim=[2.0, 2.0, 2.0], obs=[1.0, 2.0, 3.0]))
|
|
1026
|
+
nan
|
|
1027
|
+
|
|
1028
|
+
See the documentation on function |prepare_arrays| for some additional instructions
|
|
1029
|
+
for use of function |corr|.
|
|
1030
|
+
"""
|
|
1031
|
+
sim_, obs_ = prepare_arrays(
|
|
1032
|
+
sim=sim, obs=obs, node=node, skip_nan=skip_nan, subperiod=subperiod
|
|
1033
|
+
)
|
|
1034
|
+
del sim, obs
|
|
1035
|
+
if (numpy.std(sim_) == 0.0) or (numpy.std(obs_) == 0.0):
|
|
1036
|
+
return numpy.nan
|
|
1037
|
+
return cast(float, numpy.corrcoef(sim_, obs_)[0, 1])
|
|
1038
|
+
|
|
1039
|
+
|
|
1040
|
+
def _pars_sepd(xi: float, beta: float) -> tuple[float, float, float, float]:
|
|
1041
|
+
gamma1 = special.gamma(3.0 * (1.0 + beta) / 2.0)
|
|
1042
|
+
gamma2 = special.gamma((1.0 + beta) / 2.0)
|
|
1043
|
+
w_beta = gamma1**0.5 / (1.0 + beta) / gamma2**1.5
|
|
1044
|
+
c_beta = (gamma1 / gamma2) ** (1.0 / (1.0 + beta))
|
|
1045
|
+
m_1 = special.gamma(1.0 + beta) / gamma1**0.5 / gamma2**0.5
|
|
1046
|
+
m_2 = 1.0
|
|
1047
|
+
mu_xi = m_1 * (xi - 1.0 / xi)
|
|
1048
|
+
sigma_xi = numpy.sqrt((m_2 - m_1**2) * (xi**2 + 1.0 / xi**2) + 2 * m_1**2 - m_2)
|
|
1049
|
+
return mu_xi, sigma_xi, w_beta, c_beta
|
|
1050
|
+
|
|
1051
|
+
|
|
1052
|
+
def _pars_h(sigma1: float, sigma2: float, sim: VectorFloat) -> VectorFloat:
|
|
1053
|
+
return sigma1 * cast(float, numpy.mean(sim)) + sigma2 * sim
|
|
1054
|
+
|
|
1055
|
+
|
|
1056
|
+
@overload
|
|
1057
|
+
def hsepd_pdf(
|
|
1058
|
+
*,
|
|
1059
|
+
sigma1: float,
|
|
1060
|
+
sigma2: float,
|
|
1061
|
+
xi: float,
|
|
1062
|
+
beta: float,
|
|
1063
|
+
sim: VectorInputFloat,
|
|
1064
|
+
obs: VectorInputFloat,
|
|
1065
|
+
skip_nan: bool = False,
|
|
1066
|
+
subperiod: bool = False,
|
|
1067
|
+
) -> VectorFloat:
|
|
1068
|
+
"""node as argument"""
|
|
1069
|
+
|
|
1070
|
+
|
|
1071
|
+
@overload
|
|
1072
|
+
def hsepd_pdf(
|
|
1073
|
+
*,
|
|
1074
|
+
sigma1: float,
|
|
1075
|
+
sigma2: float,
|
|
1076
|
+
xi: float,
|
|
1077
|
+
beta: float,
|
|
1078
|
+
node: devicetools.Node,
|
|
1079
|
+
skip_nan: bool = False,
|
|
1080
|
+
subperiod: bool = True,
|
|
1081
|
+
) -> VectorFloat:
|
|
1082
|
+
"""sim and obs as arguments"""
|
|
1083
|
+
|
|
1084
|
+
|
|
1085
|
+
@objecttools.excmessage_decorator(
|
|
1086
|
+
"calculate the probability densities with the heteroskedastic skewed exponential "
|
|
1087
|
+
"power distribution"
|
|
1088
|
+
)
|
|
1089
|
+
def hsepd_pdf(
|
|
1090
|
+
*,
|
|
1091
|
+
sigma1: float,
|
|
1092
|
+
sigma2: float,
|
|
1093
|
+
xi: float,
|
|
1094
|
+
beta: float,
|
|
1095
|
+
sim: VectorInputFloat | None = None,
|
|
1096
|
+
obs: VectorInputFloat | None = None,
|
|
1097
|
+
node: devicetools.Node | None = None,
|
|
1098
|
+
skip_nan: bool = False,
|
|
1099
|
+
subperiod: bool | None = None,
|
|
1100
|
+
) -> VectorFloat:
|
|
1101
|
+
"""Calculate the probability densities based on the heteroskedastic skewed
|
|
1102
|
+
exponential power distribution.
|
|
1103
|
+
|
|
1104
|
+
For convenience, we store the required parameters of the probability density
|
|
1105
|
+
function as well as the simulated and observed values in a dictionary:
|
|
1106
|
+
|
|
1107
|
+
>>> import numpy
|
|
1108
|
+
>>> from hydpy import hsepd_pdf, round_
|
|
1109
|
+
>>> general = {"sigma1": 0.2,
|
|
1110
|
+
... "sigma2": 0.0,
|
|
1111
|
+
... "xi": 1.0,
|
|
1112
|
+
... "beta": 0.0,
|
|
1113
|
+
... "sim": numpy.arange(10.0, 41.0),
|
|
1114
|
+
... "obs": numpy.full(31, 25.0)}
|
|
1115
|
+
|
|
1116
|
+
The following test function allows for varying one parameter and prints some and
|
|
1117
|
+
plots all the probability density values corresponding to different simulated
|
|
1118
|
+
values:
|
|
1119
|
+
|
|
1120
|
+
>>> def test(**kwargs):
|
|
1121
|
+
... from matplotlib import pyplot
|
|
1122
|
+
... special = general.copy()
|
|
1123
|
+
... name, values = list(kwargs.items())[0]
|
|
1124
|
+
... results = numpy.zeros((len(general["sim"]), len(values)+1))
|
|
1125
|
+
... results[:, 0] = general["sim"]
|
|
1126
|
+
... for jdx, value in enumerate(values):
|
|
1127
|
+
... special[name] = value
|
|
1128
|
+
... results[:, jdx+1] = hsepd_pdf(**special)
|
|
1129
|
+
... pyplot.plot(results[:, 0], results[:, jdx+1],
|
|
1130
|
+
... label="%s=%.1f" % (name, value))
|
|
1131
|
+
... pyplot.legend()
|
|
1132
|
+
... for idx, result in enumerate(results):
|
|
1133
|
+
... if not (idx % 5):
|
|
1134
|
+
... round_(result)
|
|
1135
|
+
|
|
1136
|
+
When varying `beta`, the resulting probabilities correspond to the Laplace
|
|
1137
|
+
distribution (1.0), normal distribution (0.0), and the uniform distribution (-1.0),
|
|
1138
|
+
respectively. Note that we use -0.99 instead of -1.0 for approximating the uniform
|
|
1139
|
+
distribution to prevent from running into numerical problems, which are not solved
|
|
1140
|
+
yet:
|
|
1141
|
+
|
|
1142
|
+
>>> test(beta=[1.0, 0.0, -0.99])
|
|
1143
|
+
10.0, 0.002032, 0.000886, 0.0
|
|
1144
|
+
15.0, 0.008359, 0.010798, 0.0
|
|
1145
|
+
20.0, 0.034382, 0.048394, 0.057739
|
|
1146
|
+
25.0, 0.141421, 0.079788, 0.057739
|
|
1147
|
+
30.0, 0.034382, 0.048394, 0.057739
|
|
1148
|
+
35.0, 0.008359, 0.010798, 0.0
|
|
1149
|
+
40.0, 0.002032, 0.000886, 0.0
|
|
1150
|
+
|
|
1151
|
+
.. testsetup::
|
|
1152
|
+
|
|
1153
|
+
>>> from matplotlib import pyplot
|
|
1154
|
+
>>> pyplot.close()
|
|
1155
|
+
|
|
1156
|
+
When varying `xi`, the resulting density is negatively skewed (0.2), symmetric
|
|
1157
|
+
(1.0), and positively skewed (5.0), respectively:
|
|
1158
|
+
|
|
1159
|
+
>>> test(xi=[0.2, 1.0, 5.0])
|
|
1160
|
+
10.0, 0.0, 0.000886, 0.003175
|
|
1161
|
+
15.0, 0.0, 0.010798, 0.012957
|
|
1162
|
+
20.0, 0.092845, 0.048394, 0.036341
|
|
1163
|
+
25.0, 0.070063, 0.079788, 0.070063
|
|
1164
|
+
30.0, 0.036341, 0.048394, 0.092845
|
|
1165
|
+
35.0, 0.012957, 0.010798, 0.0
|
|
1166
|
+
40.0, 0.003175, 0.000886, 0.0
|
|
1167
|
+
|
|
1168
|
+
.. testsetup::
|
|
1169
|
+
|
|
1170
|
+
>>> from matplotlib import pyplot
|
|
1171
|
+
>>> pyplot.close()
|
|
1172
|
+
|
|
1173
|
+
In the above examples, the actual `sigma` (5.0) is calculated by multiplying
|
|
1174
|
+
`sigma1` (0.2) with the mean simulated value (25.0) internally. This can be done
|
|
1175
|
+
for modelling homoscedastic errors. Instead, `sigma2` is multiplied with the
|
|
1176
|
+
individual simulated values to account for heteroscedastic errors. With increasing
|
|
1177
|
+
values of `sigma2`, the resulting densities are modified as follows:
|
|
1178
|
+
|
|
1179
|
+
>>> test(sigma2=[0.0, 0.1, 0.2])
|
|
1180
|
+
10.0, 0.000886, 0.002921, 0.005737
|
|
1181
|
+
15.0, 0.010798, 0.018795, 0.022831
|
|
1182
|
+
20.0, 0.048394, 0.044159, 0.037988
|
|
1183
|
+
25.0, 0.079788, 0.053192, 0.039894
|
|
1184
|
+
30.0, 0.048394, 0.04102, 0.032708
|
|
1185
|
+
35.0, 0.010798, 0.023493, 0.023493
|
|
1186
|
+
40.0, 0.000886, 0.011053, 0.015771
|
|
1187
|
+
|
|
1188
|
+
.. testsetup::
|
|
1189
|
+
|
|
1190
|
+
>>> from matplotlib import pyplot
|
|
1191
|
+
>>> pyplot.close()
|
|
1192
|
+
|
|
1193
|
+
See the documentation on function |prepare_arrays| for some additional instructions
|
|
1194
|
+
for using |hsepd_pdf|.
|
|
1195
|
+
"""
|
|
1196
|
+
sim_, obs_ = prepare_arrays(
|
|
1197
|
+
sim=sim, obs=obs, node=node, skip_nan=skip_nan, subperiod=subperiod
|
|
1198
|
+
)
|
|
1199
|
+
del sim, obs
|
|
1200
|
+
sigmas = _pars_h(sigma1, sigma2, sim_)
|
|
1201
|
+
mu_xi, sigma_xi, w_beta, c_beta = _pars_sepd(xi, beta)
|
|
1202
|
+
x, mu = obs_, sim_
|
|
1203
|
+
a = (x - mu) / sigmas
|
|
1204
|
+
a_xi = cast(VectorFloat, numpy.empty(a.shape))
|
|
1205
|
+
idxs = mu_xi + sigma_xi * a < 0.0
|
|
1206
|
+
a_xi[idxs] = numpy.absolute(xi * (mu_xi + sigma_xi * a[idxs]))
|
|
1207
|
+
a_xi[~idxs] = numpy.absolute(1.0 / xi * (mu_xi + sigma_xi * a[~idxs]))
|
|
1208
|
+
ps = (
|
|
1209
|
+
(2.0 * sigma_xi / (xi + 1.0 / xi) * w_beta)
|
|
1210
|
+
* cast(VectorFloat, numpy.exp(-c_beta * a_xi ** (2.0 / (1.0 + beta))))
|
|
1211
|
+
) / sigmas
|
|
1212
|
+
return ps
|
|
1213
|
+
|
|
1214
|
+
|
|
1215
|
+
def _hsepd_manual(
|
|
1216
|
+
*,
|
|
1217
|
+
sigma1: float,
|
|
1218
|
+
sigma2: float,
|
|
1219
|
+
xi: float,
|
|
1220
|
+
beta: float,
|
|
1221
|
+
sim: VectorInputFloat,
|
|
1222
|
+
obs: VectorInputFloat,
|
|
1223
|
+
skip_nan: bool = False,
|
|
1224
|
+
subperiod: bool = False,
|
|
1225
|
+
) -> float:
|
|
1226
|
+
ps = hsepd_pdf(
|
|
1227
|
+
sigma1=sigma1,
|
|
1228
|
+
sigma2=sigma2,
|
|
1229
|
+
xi=xi,
|
|
1230
|
+
beta=beta,
|
|
1231
|
+
sim=sim,
|
|
1232
|
+
obs=obs,
|
|
1233
|
+
skip_nan=skip_nan,
|
|
1234
|
+
subperiod=subperiod,
|
|
1235
|
+
)
|
|
1236
|
+
ps[ps < 1e-200] = 1e-200
|
|
1237
|
+
return cast(float, numpy.mean(numpy.log(ps)))
|
|
1238
|
+
|
|
1239
|
+
|
|
1240
|
+
@overload
|
|
1241
|
+
def hsepd_manual(
|
|
1242
|
+
*,
|
|
1243
|
+
sigma1: float,
|
|
1244
|
+
sigma2: float,
|
|
1245
|
+
xi: float,
|
|
1246
|
+
beta: float,
|
|
1247
|
+
sim: VectorInputFloat,
|
|
1248
|
+
obs: VectorInputFloat,
|
|
1249
|
+
skip_nan: bool = False,
|
|
1250
|
+
subperiod: bool = False,
|
|
1251
|
+
) -> float:
|
|
1252
|
+
"""node as argument"""
|
|
1253
|
+
|
|
1254
|
+
|
|
1255
|
+
@overload
|
|
1256
|
+
def hsepd_manual(
|
|
1257
|
+
*,
|
|
1258
|
+
sigma1: float,
|
|
1259
|
+
sigma2: float,
|
|
1260
|
+
xi: float,
|
|
1261
|
+
beta: float,
|
|
1262
|
+
node: devicetools.Node,
|
|
1263
|
+
skip_nan: bool = False,
|
|
1264
|
+
subperiod: bool = True,
|
|
1265
|
+
) -> float:
|
|
1266
|
+
"""sim and obs as arguments"""
|
|
1267
|
+
|
|
1268
|
+
|
|
1269
|
+
@objecttools.excmessage_decorator(
|
|
1270
|
+
"calculate an objective value based on method `hsepd_manual`"
|
|
1271
|
+
)
|
|
1272
|
+
def hsepd_manual(
|
|
1273
|
+
*,
|
|
1274
|
+
sigma1: float,
|
|
1275
|
+
sigma2: float,
|
|
1276
|
+
xi: float,
|
|
1277
|
+
beta: float,
|
|
1278
|
+
sim: VectorInputFloat | None = None,
|
|
1279
|
+
obs: VectorInputFloat | None = None,
|
|
1280
|
+
node: devicetools.Node | None = None,
|
|
1281
|
+
skip_nan: bool = False,
|
|
1282
|
+
subperiod: bool | None = None,
|
|
1283
|
+
) -> float:
|
|
1284
|
+
"""Calculate the mean of the logarithmic probability densities of the
|
|
1285
|
+
heteroskedastic skewed exponential power distribution.
|
|
1286
|
+
|
|
1287
|
+
The following examples stem from the documentation of function |hsepd_pdf|, which
|
|
1288
|
+
is used by function |hsepd_manual|. The first one deals with a heteroscedastic
|
|
1289
|
+
normal distribution:
|
|
1290
|
+
|
|
1291
|
+
>>> from hydpy import hsepd_manual, round_
|
|
1292
|
+
>>> round_(hsepd_manual(sigma1=0.2, sigma2=0.2,
|
|
1293
|
+
... xi=1.0, beta=0.0,
|
|
1294
|
+
... sim=numpy.arange(10.0, 41.0),
|
|
1295
|
+
... obs=numpy.full(31, 25.0)))
|
|
1296
|
+
-3.682842
|
|
1297
|
+
|
|
1298
|
+
Too small probability density values are set to 1e-200 before calculating their
|
|
1299
|
+
logarithm (which means that the lowest possible value returned by function
|
|
1300
|
+
|hsepd_manual| is approximately -460):
|
|
1301
|
+
|
|
1302
|
+
>>> round_(hsepd_manual(sigma1=0.2, sigma2=0.0,
|
|
1303
|
+
... xi=1.0, beta=-0.99,
|
|
1304
|
+
... sim=numpy.arange(10.0, 41.0),
|
|
1305
|
+
... obs=numpy.full(31, 25.0)))
|
|
1306
|
+
-209.539335
|
|
1307
|
+
|
|
1308
|
+
See the documentation on function |prepare_arrays| for some additional instructions
|
|
1309
|
+
for using |hsepd_manual|.
|
|
1310
|
+
"""
|
|
1311
|
+
sim_, obs_ = prepare_arrays(
|
|
1312
|
+
sim=sim, obs=obs, node=node, skip_nan=skip_nan, subperiod=subperiod
|
|
1313
|
+
)
|
|
1314
|
+
del sim, obs
|
|
1315
|
+
return _hsepd_manual(
|
|
1316
|
+
sigma1=sigma1,
|
|
1317
|
+
sigma2=sigma2,
|
|
1318
|
+
xi=xi,
|
|
1319
|
+
beta=beta,
|
|
1320
|
+
sim=sim_,
|
|
1321
|
+
obs=obs_,
|
|
1322
|
+
skip_nan=False,
|
|
1323
|
+
subperiod=False,
|
|
1324
|
+
)
|
|
1325
|
+
|
|
1326
|
+
|
|
1327
|
+
@overload
|
|
1328
|
+
def hsepd(
|
|
1329
|
+
*,
|
|
1330
|
+
sim: VectorInputFloat,
|
|
1331
|
+
obs: VectorInputFloat,
|
|
1332
|
+
skip_nan: bool = False,
|
|
1333
|
+
subperiod: bool = False,
|
|
1334
|
+
inits: Iterable[float] | None = None,
|
|
1335
|
+
return_pars: Literal[False] = ...,
|
|
1336
|
+
silent: bool = True,
|
|
1337
|
+
) -> float:
|
|
1338
|
+
"""sim and obs as argument, do not return parameters"""
|
|
1339
|
+
|
|
1340
|
+
|
|
1341
|
+
@overload
|
|
1342
|
+
def hsepd(
|
|
1343
|
+
*,
|
|
1344
|
+
sim: VectorInputFloat,
|
|
1345
|
+
obs: VectorInputFloat,
|
|
1346
|
+
skip_nan: bool = False,
|
|
1347
|
+
subperiod: bool = False,
|
|
1348
|
+
inits: Iterable[float] | None = None,
|
|
1349
|
+
return_pars: Literal[True],
|
|
1350
|
+
silent: bool = True,
|
|
1351
|
+
) -> tuple[float, tuple[float, float, float, float]]:
|
|
1352
|
+
"""sim and obs as arguments, do return parameters"""
|
|
1353
|
+
|
|
1354
|
+
|
|
1355
|
+
@overload
|
|
1356
|
+
def hsepd(
|
|
1357
|
+
*,
|
|
1358
|
+
node: devicetools.Node,
|
|
1359
|
+
skip_nan: bool = False,
|
|
1360
|
+
subperiod: bool = True,
|
|
1361
|
+
inits: Iterable[float] | None = None,
|
|
1362
|
+
return_pars: Literal[False] = ...,
|
|
1363
|
+
silent: bool = True,
|
|
1364
|
+
) -> float:
|
|
1365
|
+
"""node as an arguments, do not return parameters"""
|
|
1366
|
+
|
|
1367
|
+
|
|
1368
|
+
@overload
|
|
1369
|
+
def hsepd(
|
|
1370
|
+
*,
|
|
1371
|
+
node: devicetools.Node,
|
|
1372
|
+
skip_nan: bool = False,
|
|
1373
|
+
subperiod: bool = True,
|
|
1374
|
+
inits: Iterable[float] | None = None,
|
|
1375
|
+
return_pars: Literal[True],
|
|
1376
|
+
silent: bool = True,
|
|
1377
|
+
) -> tuple[float, tuple[float, float, float, float]]:
|
|
1378
|
+
"""node as an argument, do return parameters"""
|
|
1379
|
+
|
|
1380
|
+
|
|
1381
|
+
@objecttools.excmessage_decorator(
|
|
1382
|
+
"calculate an objective value based on method `hsepd`"
|
|
1383
|
+
)
|
|
1384
|
+
def hsepd(
|
|
1385
|
+
*,
|
|
1386
|
+
sim: VectorInputFloat | None = None,
|
|
1387
|
+
obs: VectorInputFloat | None = None,
|
|
1388
|
+
node: devicetools.Node | None = None,
|
|
1389
|
+
skip_nan: bool = False,
|
|
1390
|
+
subperiod: bool | None = None,
|
|
1391
|
+
inits: Iterable[float] | None = None,
|
|
1392
|
+
return_pars: bool = False,
|
|
1393
|
+
silent: bool = True,
|
|
1394
|
+
) -> float | tuple[float, tuple[float, float, float, float]]:
|
|
1395
|
+
"""Calculate the mean of the logarithmic probability densities of the
|
|
1396
|
+
heteroskedastic skewed exponential power distribution.
|
|
1397
|
+
|
|
1398
|
+
Function |hsepd| serves the same purpose as function |hsepd_manual| but tries to
|
|
1399
|
+
estimate the parameters of the heteroscedastic skewed exponential distribution via
|
|
1400
|
+
an optimisation algorithm. This is shown by generating a random sample. One
|
|
1401
|
+
thousand simulated values are scattered around the observed (true) value of 10.0
|
|
1402
|
+
with a standard deviation of 2.0:
|
|
1403
|
+
|
|
1404
|
+
>>> import numpy
|
|
1405
|
+
>>> numpy.random.seed(0)
|
|
1406
|
+
>>> sim = numpy.random.normal(10.0, 2.0, 1000)
|
|
1407
|
+
>>> obs = numpy.full(1000, 10.0)
|
|
1408
|
+
|
|
1409
|
+
First, as a reference, we calculate the "true" value based on function
|
|
1410
|
+
|hsepd_manual| and the correct distribution parameters:
|
|
1411
|
+
|
|
1412
|
+
>>> from hydpy import hsepd, hsepd_manual, round_
|
|
1413
|
+
>>> round_(hsepd_manual(sigma1=0.2, sigma2=0.0, xi=1.0, beta=0.0, sim=sim, obs=obs))
|
|
1414
|
+
-2.100093
|
|
1415
|
+
|
|
1416
|
+
When using function |hsepd|, the returned value is even a little "better":
|
|
1417
|
+
|
|
1418
|
+
>>> round_(hsepd(sim=sim, obs=obs))
|
|
1419
|
+
-2.09983
|
|
1420
|
+
|
|
1421
|
+
This is due to the deviation from the random sample to its theoretical distribution.
|
|
1422
|
+
This is reflected by small differences between the estimated values and the
|
|
1423
|
+
theoretical values of `sigma1` (0.2), `sigma2` (0.0), `xi` (1.0), and `beta` (0.0).
|
|
1424
|
+
The estimated values are returned in the mentioned order by enabling the
|
|
1425
|
+
`return_pars` option:
|
|
1426
|
+
|
|
1427
|
+
>>> value, pars = hsepd(sim=sim, obs=obs, return_pars=True)
|
|
1428
|
+
>>> round_(pars, decimals=5)
|
|
1429
|
+
0.19966, 0.0, 0.96836, 0.0188
|
|
1430
|
+
|
|
1431
|
+
There is no guarantee that the optimisation numerical optimisation algorithm
|
|
1432
|
+
underlying function |hsepd| will always find the parameters resulting in the
|
|
1433
|
+
largest value returned by function |hsepd_manual|. You can increase its robustness
|
|
1434
|
+
(and decrease computation time) by supplying close initial parameter values:
|
|
1435
|
+
|
|
1436
|
+
>>> value, pars = hsepd(sim=sim, obs=obs, return_pars=True,
|
|
1437
|
+
... inits=(0.2, 0.0, 1.0, 0.0))
|
|
1438
|
+
>>> round_(pars, decimals=5)
|
|
1439
|
+
0.19966, 0.0, 0.96836, 0.0188
|
|
1440
|
+
|
|
1441
|
+
However, the following example shows a case when this strategy results in worse
|
|
1442
|
+
results:
|
|
1443
|
+
|
|
1444
|
+
>>> value, pars = hsepd(sim=sim, obs=obs, return_pars=True,
|
|
1445
|
+
... inits=(0.0, 0.2, 1.0, 0.0))
|
|
1446
|
+
>>> round_(value)
|
|
1447
|
+
-2.174492
|
|
1448
|
+
>>> round_(pars)
|
|
1449
|
+
0.0, 0.213179, 1.705485, 0.505112
|
|
1450
|
+
|
|
1451
|
+
See the documentation on function |prepare_arrays| for some additional instructions
|
|
1452
|
+
for using |hsepd|.
|
|
1453
|
+
"""
|
|
1454
|
+
|
|
1455
|
+
def transform(pars: tuple[float, float, float, float]) -> float:
|
|
1456
|
+
"""Transform the actual optimisation problem into a function to be minimised
|
|
1457
|
+
and apply parameter constraints."""
|
|
1458
|
+
sigma1, sigma2, xi, beta = constrain(*pars)
|
|
1459
|
+
return -_hsepd_manual(
|
|
1460
|
+
sigma1=sigma1,
|
|
1461
|
+
sigma2=sigma2,
|
|
1462
|
+
xi=xi,
|
|
1463
|
+
beta=beta,
|
|
1464
|
+
sim=sim_,
|
|
1465
|
+
obs=obs_,
|
|
1466
|
+
skip_nan=False,
|
|
1467
|
+
subperiod=False,
|
|
1468
|
+
)
|
|
1469
|
+
|
|
1470
|
+
def constrain(
|
|
1471
|
+
sigma1: float, sigma2: float, xi: float, beta: float
|
|
1472
|
+
) -> tuple[float, float, float, float]:
|
|
1473
|
+
"""Apply constraints on the given parameter values."""
|
|
1474
|
+
return (
|
|
1475
|
+
max(sigma1, 0.0),
|
|
1476
|
+
max(sigma2, 0.0),
|
|
1477
|
+
min(max(xi, 0.1), 10.0),
|
|
1478
|
+
min(max(beta, -0.99), 5.0),
|
|
1479
|
+
)
|
|
1480
|
+
|
|
1481
|
+
sim_, obs_ = prepare_arrays(
|
|
1482
|
+
sim=sim, obs=obs, node=node, skip_nan=skip_nan, subperiod=subperiod
|
|
1483
|
+
)
|
|
1484
|
+
if inits is None:
|
|
1485
|
+
inits = [0.1, 0.2, 3.0, 1.0]
|
|
1486
|
+
original_values = optimize.fmin(
|
|
1487
|
+
transform, inits, ftol=1e-12, xtol=1e-12, disp=not silent
|
|
1488
|
+
)
|
|
1489
|
+
constrained_values = constrain(*original_values)
|
|
1490
|
+
result = _hsepd_manual(
|
|
1491
|
+
sigma1=constrained_values[0],
|
|
1492
|
+
sigma2=constrained_values[1],
|
|
1493
|
+
xi=constrained_values[2],
|
|
1494
|
+
beta=constrained_values[3],
|
|
1495
|
+
sim=sim_,
|
|
1496
|
+
obs=obs_,
|
|
1497
|
+
skip_nan=False,
|
|
1498
|
+
subperiod=False,
|
|
1499
|
+
)
|
|
1500
|
+
if return_pars:
|
|
1501
|
+
return result, constrained_values
|
|
1502
|
+
return result
|
|
1503
|
+
|
|
1504
|
+
|
|
1505
|
+
@objecttools.excmessage_decorator("calculate the weighted mean time")
|
|
1506
|
+
def calc_mean_time(timepoints: VectorInputFloat, weights: VectorInputFloat) -> float:
|
|
1507
|
+
"""Return the weighted mean of the given time points.
|
|
1508
|
+
|
|
1509
|
+
With equal given weights, the result is simply the mean of the given time points:
|
|
1510
|
+
|
|
1511
|
+
>>> from hydpy import calc_mean_time, round_
|
|
1512
|
+
>>> round_(calc_mean_time(timepoints=[3.0, 7.0], weights=[2.0, 2.0]))
|
|
1513
|
+
5.0
|
|
1514
|
+
|
|
1515
|
+
With different weights, the resulting time is shifted to the larger ones:
|
|
1516
|
+
|
|
1517
|
+
>>> round_(calc_mean_time(timepoints=[3.0, 7.0], weights=[1.0, 3.0]))
|
|
1518
|
+
6.0
|
|
1519
|
+
|
|
1520
|
+
Or, in the most extreme case:
|
|
1521
|
+
|
|
1522
|
+
>>> round_(calc_mean_time(timepoints=[3.0, 7.0], weights=[0.0, 4.0]))
|
|
1523
|
+
7.0
|
|
1524
|
+
|
|
1525
|
+
There are some checks for input plausibility, e.g.:
|
|
1526
|
+
|
|
1527
|
+
>>> calc_mean_time(timepoints=[3.0, 7.0], weights=[-2.0, 2.0])
|
|
1528
|
+
Traceback (most recent call last):
|
|
1529
|
+
...
|
|
1530
|
+
ValueError: While trying to calculate the weighted mean time, the following error \
|
|
1531
|
+
occurred: For the following objects, at least one value is negative: weights.
|
|
1532
|
+
"""
|
|
1533
|
+
timepoints = numpy.asarray(timepoints)
|
|
1534
|
+
weights = numpy.asarray(weights)
|
|
1535
|
+
validtools.test_equal_shape(timepoints=timepoints, weights=weights)
|
|
1536
|
+
validtools.test_non_negative(weights=weights)
|
|
1537
|
+
return cast(float, numpy.dot(timepoints, weights) / numpy.sum(weights))
|
|
1538
|
+
|
|
1539
|
+
|
|
1540
|
+
@objecttools.excmessage_decorator(
|
|
1541
|
+
"calculate the weighted time deviation from mean time"
|
|
1542
|
+
)
|
|
1543
|
+
def calc_mean_time_deviation(
|
|
1544
|
+
timepoints: VectorInputFloat,
|
|
1545
|
+
weights: VectorInputFloat,
|
|
1546
|
+
mean_time: float | None = None,
|
|
1547
|
+
) -> float:
|
|
1548
|
+
"""Return the weighted deviation of the given timepoints from their mean time.
|
|
1549
|
+
|
|
1550
|
+
With equal given weights, the is simply the standard deviation of the given time
|
|
1551
|
+
points:
|
|
1552
|
+
|
|
1553
|
+
>>> from hydpy import calc_mean_time_deviation, round_
|
|
1554
|
+
>>> round_(calc_mean_time_deviation(timepoints=[3.0, 7.0], weights=[2.0, 2.0]))
|
|
1555
|
+
2.0
|
|
1556
|
+
|
|
1557
|
+
One can pass a precalculated mean time:
|
|
1558
|
+
|
|
1559
|
+
>>> from hydpy import round_
|
|
1560
|
+
>>> round_(calc_mean_time_deviation(
|
|
1561
|
+
... timepoints=[3.0, 7.0], weights=[2.0, 2.0], mean_time=4.0))
|
|
1562
|
+
2.236068
|
|
1563
|
+
|
|
1564
|
+
>>> round_(calc_mean_time_deviation(timepoints=[3.0, 7.0], weights=[1.0, 3.0]))
|
|
1565
|
+
1.732051
|
|
1566
|
+
|
|
1567
|
+
Or, in the most extreme case:
|
|
1568
|
+
|
|
1569
|
+
>>> round_(calc_mean_time_deviation(timepoints=[3.0, 7.0], weights=[0.0, 4.0]))
|
|
1570
|
+
0.0
|
|
1571
|
+
|
|
1572
|
+
There are some checks for input plausibility, e.g.:
|
|
1573
|
+
|
|
1574
|
+
>>> calc_mean_time_deviation(timepoints=[3.0, 7.0], weights=[-2.0, 2.0])
|
|
1575
|
+
Traceback (most recent call last):
|
|
1576
|
+
...
|
|
1577
|
+
ValueError: While trying to calculate the weighted time deviation from mean time, \
|
|
1578
|
+
the following error occurred: For the following objects, at least one value is \
|
|
1579
|
+
negative: weights.
|
|
1580
|
+
"""
|
|
1581
|
+
timepoints_ = numpy.asarray(timepoints)
|
|
1582
|
+
weights_ = numpy.asarray(weights)
|
|
1583
|
+
del timepoints, weights
|
|
1584
|
+
validtools.test_equal_shape(timepoints=timepoints_, weights=weights_)
|
|
1585
|
+
validtools.test_non_negative(weights=weights_)
|
|
1586
|
+
if mean_time is None:
|
|
1587
|
+
mean_time = calc_mean_time(timepoints_, weights_)
|
|
1588
|
+
return cast(
|
|
1589
|
+
float,
|
|
1590
|
+
numpy.sqrt(
|
|
1591
|
+
numpy.dot(weights_, (timepoints_ - mean_time) ** 2) / numpy.sum(weights_)
|
|
1592
|
+
),
|
|
1593
|
+
)
|
|
1594
|
+
|
|
1595
|
+
|
|
1596
|
+
def calc_weights(nodes: Collection[devicetools.Node]) -> dict[devicetools.Node, float]:
|
|
1597
|
+
"""Calculate "statistical" weights for all given nodes based on the number of
|
|
1598
|
+
observations within the evaluation period.
|
|
1599
|
+
|
|
1600
|
+
>>> from hydpy import calc_weights, nan, Node, print_vector, pub
|
|
1601
|
+
>>> pub.timegrids = "01.01.2000", "04.01.2000", "1d"
|
|
1602
|
+
>>> test1, test2 = Node("test1"), Node("test2")
|
|
1603
|
+
>>> test1.prepare_obsseries()
|
|
1604
|
+
>>> test1.sequences.obs.series = 4.0, 5.0, 6.0
|
|
1605
|
+
>>> test2.prepare_obsseries()
|
|
1606
|
+
>>> with pub.options.checkseries(False):
|
|
1607
|
+
... test2.sequences.obs.series = 3.0, nan, 1.0
|
|
1608
|
+
|
|
1609
|
+
>>> print_vector(calc_weights((test1, test2)).values())
|
|
1610
|
+
0.6, 0.4
|
|
1611
|
+
|
|
1612
|
+
>>> pub.timegrids.eval_.lastdate = "03.01.2000"
|
|
1613
|
+
>>> print_vector(calc_weights((test1, test2)).values())
|
|
1614
|
+
0.666667, 0.333333
|
|
1615
|
+
|
|
1616
|
+
>>> pub.timegrids.eval_.firstdate = "02.01.2000"
|
|
1617
|
+
>>> print_vector(calc_weights((test1, test2)).values())
|
|
1618
|
+
1.0, 0.0
|
|
1619
|
+
|
|
1620
|
+
>>> print_vector(calc_weights((test1,)).values())
|
|
1621
|
+
1.0
|
|
1622
|
+
|
|
1623
|
+
>>> print_vector(calc_weights((test2,)).values())
|
|
1624
|
+
Traceback (most recent call last):
|
|
1625
|
+
...
|
|
1626
|
+
RuntimeError: None of the given nodes (test2) provides any observation values for \
|
|
1627
|
+
the current evaluation period (Timegrid("02.01.2000 00:00:00", "03.01.2000 00:00:00", \
|
|
1628
|
+
"1d")).
|
|
1629
|
+
|
|
1630
|
+
>>> calc_weights(())
|
|
1631
|
+
{}
|
|
1632
|
+
"""
|
|
1633
|
+
nonnans = []
|
|
1634
|
+
for node in nodes:
|
|
1635
|
+
nonnans.append(sum(~numpy.isnan(node.sequences.obs.evalseries)))
|
|
1636
|
+
sum_nonnan = sum(nonnans)
|
|
1637
|
+
if (len(nodes) > 0) and (sum_nonnan == 0):
|
|
1638
|
+
names = objecttools.enumeration(n.name for n in nodes)
|
|
1639
|
+
raise RuntimeError(
|
|
1640
|
+
f"None of the given nodes ({names}) provides any observation values for "
|
|
1641
|
+
f"the current evaluation period ({hydpy.pub.timegrids.eval_})."
|
|
1642
|
+
)
|
|
1643
|
+
return {g: w / sum_nonnan for g, w in zip(nodes, nonnans)}
|
|
1644
|
+
|
|
1645
|
+
|
|
1646
|
+
class SummaryRow(abc.ABC):
|
|
1647
|
+
"""Abstract base class for |SummaryRowSimple| and |SummaryRowWeighted|.
|
|
1648
|
+
|
|
1649
|
+
The documentation on function |print_evaluationtable| explains the intended use of
|
|
1650
|
+
the available |SummaryRow| subclasses. Here, we demonstrate their configuration in
|
|
1651
|
+
more detail based on the subclass |SummaryRowSimple|, which calculates simple
|
|
1652
|
+
(non-weighted) averages. You only need to pass the name and the node objects
|
|
1653
|
+
relevant for the corresponding row for initialising:
|
|
1654
|
+
|
|
1655
|
+
>>> from hydpy import Nodes, print_vector, SummaryRowSimple
|
|
1656
|
+
>>> n1, n2, n3 = Nodes("n1", "n2", "n3")
|
|
1657
|
+
>>> s = SummaryRowSimple("s", (n1, n2))
|
|
1658
|
+
|
|
1659
|
+
|print_evaluationtable| calculates values for all node-criterion combinations and
|
|
1660
|
+
passes them to |SummaryRow.summarise_criteria|. If the nodes passed to
|
|
1661
|
+
|print_evaluationtable| and the |SummaryRow| instance are identical,
|
|
1662
|
+
|SummaryRowSimple| just calculates the average for each criterion:
|
|
1663
|
+
|
|
1664
|
+
>>> print_vector(s.summarise_criteria(2, {n1: [1.0, 2.0], n2: [3.0, 6.0]}))
|
|
1665
|
+
2.0, 4.0
|
|
1666
|
+
|
|
1667
|
+
Nodes passed to |print_evaluationtable| but not to |SummaryRow| are considered
|
|
1668
|
+
irrelevant for the corresponding row and thus not taken into account for averaging:
|
|
1669
|
+
|
|
1670
|
+
>>> print_vector(s.summarise_criteria(1, {n1: [1.0], n2: [3.0], n3: [5.0]}))
|
|
1671
|
+
2.0
|
|
1672
|
+
|
|
1673
|
+
If the |SummaryRow| instance expects a node not passed to |print_evaluationtable|,
|
|
1674
|
+
it raises the following error:
|
|
1675
|
+
|
|
1676
|
+
>>> print_vector(s.summarise_criteria(1, {n1: [1.0]}))
|
|
1677
|
+
Traceback (most recent call last):
|
|
1678
|
+
...
|
|
1679
|
+
RuntimeError: While trying to calculate the values of row `s` based on class \
|
|
1680
|
+
`SummaryRowSimple`, the following error occurred: Missing information for node `n2`.
|
|
1681
|
+
|
|
1682
|
+
|SummaryRow.summarise_criteria| generally returns |numpy.nan| values for all
|
|
1683
|
+
|SummaryRow| instances that select no nodes:
|
|
1684
|
+
|
|
1685
|
+
>>> SummaryRowSimple("s", ()).summarise_criteria(2, {n1: [1.0, 2.0]})
|
|
1686
|
+
(nan, nan)
|
|
1687
|
+
"""
|
|
1688
|
+
|
|
1689
|
+
name: str
|
|
1690
|
+
_nodes: tuple[devicetools.Node, ...]
|
|
1691
|
+
|
|
1692
|
+
def __init__(self, name: str, nodes: Collection[devicetools.Node]) -> None:
|
|
1693
|
+
self.name = name
|
|
1694
|
+
self._nodes = tuple(nodes)
|
|
1695
|
+
|
|
1696
|
+
def summarise_criteria(
|
|
1697
|
+
self, nmb_criteria: int, node2values: Mapping[devicetools.Node, Sequence[float]]
|
|
1698
|
+
) -> tuple[float, ...]:
|
|
1699
|
+
"""Summarise the results of all criteria."""
|
|
1700
|
+
if len(self._nodes) == 0:
|
|
1701
|
+
return tuple(nmb_criteria * [numpy.nan])
|
|
1702
|
+
try:
|
|
1703
|
+
summaries = []
|
|
1704
|
+
for idx in range(nmb_criteria):
|
|
1705
|
+
node2value = {}
|
|
1706
|
+
for node in self._nodes:
|
|
1707
|
+
try:
|
|
1708
|
+
node2value[node] = node2values[node][idx]
|
|
1709
|
+
except KeyError:
|
|
1710
|
+
raise RuntimeError(
|
|
1711
|
+
f"Missing information for node `{node.name}`."
|
|
1712
|
+
) from None
|
|
1713
|
+
summaries.append(self.summarise_criterion(node2value))
|
|
1714
|
+
return tuple(summaries)
|
|
1715
|
+
except BaseException:
|
|
1716
|
+
objecttools.augment_excmessage(
|
|
1717
|
+
f"While trying to calculate the values of row `{self.name}` based on "
|
|
1718
|
+
f"class `{type(self).__name__}`"
|
|
1719
|
+
)
|
|
1720
|
+
|
|
1721
|
+
@abc.abstractmethod
|
|
1722
|
+
def summarise_criterion(
|
|
1723
|
+
self, node2value: Mapping[devicetools.Node, float]
|
|
1724
|
+
) -> float:
|
|
1725
|
+
"""Summarise the values of a specific criterion."""
|
|
1726
|
+
|
|
1727
|
+
|
|
1728
|
+
class SummaryRowSimple(SummaryRow):
|
|
1729
|
+
"""Helper to define additional "summary rows" in evaluation tables based on simple
|
|
1730
|
+
(non-weighted) averages.
|
|
1731
|
+
|
|
1732
|
+
See the documentation on class |SummaryRow| for further information.
|
|
1733
|
+
"""
|
|
1734
|
+
|
|
1735
|
+
def summarise_criterion(
|
|
1736
|
+
self, node2value: Mapping[devicetools.Node, float]
|
|
1737
|
+
) -> float:
|
|
1738
|
+
"""Calculate the simple (non-weighted) average of all selected nodes."""
|
|
1739
|
+
return sum(node2value[n] for n in self._nodes) / len(self._nodes)
|
|
1740
|
+
|
|
1741
|
+
|
|
1742
|
+
class SummaryRowWeighted(SummaryRow):
|
|
1743
|
+
"""Helper to define additional "summary rows" in evaluation tables based on
|
|
1744
|
+
weighted averages.
|
|
1745
|
+
|
|
1746
|
+
The documentation on class |SummaryRow| provides general information on using
|
|
1747
|
+
|SummaryRow| subclasses, while the following examples focus on the unique features
|
|
1748
|
+
of class |SummaryRowWeighted|.
|
|
1749
|
+
|
|
1750
|
+
First, we prepare two nodes. `n1` provides a complete and `n2` provides an
|
|
1751
|
+
incomplete observation time series:
|
|
1752
|
+
|
|
1753
|
+
>>> from hydpy import print_vector, pub, Node, nan
|
|
1754
|
+
>>> pub.timegrids = "2000-01-01", "2000-01-04", "1d"
|
|
1755
|
+
>>> n1, n2 = Node("n1"), Node("n2")
|
|
1756
|
+
>>> n1.prepare_obsseries()
|
|
1757
|
+
>>> n1.sequences.obs.series = 4.0, 5.0, 6.0
|
|
1758
|
+
>>> n2.prepare_obsseries()
|
|
1759
|
+
>>> with pub.options.checkseries(False):
|
|
1760
|
+
... n2.sequences.obs.series = 3.0, nan, 1.0
|
|
1761
|
+
|
|
1762
|
+
We can pass predefined weighting coefficients to |SummaryRowWeighted|. Then, the
|
|
1763
|
+
completeness of the observation series is irrelevant:
|
|
1764
|
+
|
|
1765
|
+
>>> sumrow = SummaryRowWeighted("sumrow", (n1, n2), (0.1, 0.9))
|
|
1766
|
+
>>> print_vector(sumrow.summarise_criteria(2, {n1: [-1.0, 2.0], n2: [1.0, 6.0]}))
|
|
1767
|
+
0.8, 5.6
|
|
1768
|
+
|
|
1769
|
+
If we do not pass any weights, |SummaryRowWeighted| determines them automatically
|
|
1770
|
+
based on the number of available observations per node by invoking function
|
|
1771
|
+
|calc_weights|:
|
|
1772
|
+
|
|
1773
|
+
>>> sumrow = SummaryRowWeighted("sumrow", (n1, n2))
|
|
1774
|
+
>>> print_vector(sumrow.summarise_criteria(2, {n1: [-1.0, 2.0], n2: [1.0, 6.0]}))
|
|
1775
|
+
-0.2, 3.6
|
|
1776
|
+
|
|
1777
|
+
|SummaryRowWeighted| reuses the internally calculated weights but updates them when
|
|
1778
|
+
the evaluation time grid changes in the meantime:
|
|
1779
|
+
|
|
1780
|
+
>>> pub.timegrids.eval_.firstdate = "2000-01-02"
|
|
1781
|
+
>>> print_vector(sumrow.summarise_criteria(2, {n1: [-1.0, 2.0], n2: [1.0, 6.0]}))
|
|
1782
|
+
-0.333333, 3.333333
|
|
1783
|
+
|
|
1784
|
+
|nan| values calculated for individual nodes due to completely missing observations
|
|
1785
|
+
within the evaluation period do not leak into the results of
|
|
1786
|
+
|SummaryRow.summarise_criteria| (if the corresponding weights are zero, as they
|
|
1787
|
+
should):
|
|
1788
|
+
|
|
1789
|
+
>>> pub.timegrids.eval_.lastdate = "2000-01-03"
|
|
1790
|
+
>>> print_vector(sumrow.summarise_criteria(2, {n1: [-1.0, 2.0], n2: [nan, nan]}))
|
|
1791
|
+
-1.0, 2.0
|
|
1792
|
+
"""
|
|
1793
|
+
|
|
1794
|
+
_node2weight: dict[devicetools.Node, float]
|
|
1795
|
+
_predefined: bool
|
|
1796
|
+
_evaltimegrid: timetools.Timegrid
|
|
1797
|
+
|
|
1798
|
+
def __init__(
|
|
1799
|
+
self,
|
|
1800
|
+
name: str,
|
|
1801
|
+
nodes: Collection[devicetools.Node],
|
|
1802
|
+
weights: Collection[float] | None = None,
|
|
1803
|
+
) -> None:
|
|
1804
|
+
super().__init__(name=name, nodes=nodes)
|
|
1805
|
+
self._nodes = tuple(nodes)
|
|
1806
|
+
self._evaltimegrid = copy.deepcopy(hydpy.pub.timegrids.eval_)
|
|
1807
|
+
if weights is None:
|
|
1808
|
+
self._predefined = False
|
|
1809
|
+
self._node2weight = calc_weights(nodes)
|
|
1810
|
+
else:
|
|
1811
|
+
self._predefined = True
|
|
1812
|
+
self._node2weight = dict(zip(self._nodes, weights))
|
|
1813
|
+
|
|
1814
|
+
def summarise_criterion(
|
|
1815
|
+
self, node2value: Mapping[devicetools.Node, float]
|
|
1816
|
+
) -> float:
|
|
1817
|
+
"""Calculate the weighted average of all selected nodes."""
|
|
1818
|
+
if not self._predefined and (self._evaltimegrid != hydpy.pub.timegrids.eval_):
|
|
1819
|
+
self._node2weight = calc_weights(self._nodes)
|
|
1820
|
+
self._evaltimegrid = copy.deepcopy(hydpy.pub.timegrids.eval_)
|
|
1821
|
+
return sum(
|
|
1822
|
+
w * node2value[n] if w > 0.0 else 0.0 for n, w in self._node2weight.items()
|
|
1823
|
+
)
|
|
1824
|
+
|
|
1825
|
+
|
|
1826
|
+
@overload
|
|
1827
|
+
def print_evaluationtable(
|
|
1828
|
+
*,
|
|
1829
|
+
nodes: Collection[devicetools.Node],
|
|
1830
|
+
criteria: Collection[Criterion],
|
|
1831
|
+
nodenames: Collection[str] | None = None,
|
|
1832
|
+
critnames: Collection[str] | None = None,
|
|
1833
|
+
critfactors: Collection1[float] = 1.0,
|
|
1834
|
+
critdigits: Collection1[int] = 2,
|
|
1835
|
+
subperiod: bool = True,
|
|
1836
|
+
average: bool = True,
|
|
1837
|
+
averagename: str = "mean",
|
|
1838
|
+
summaryrows: Collection[SummaryRow] = (),
|
|
1839
|
+
filter_: float = 0.0,
|
|
1840
|
+
missingvalue: str = "-",
|
|
1841
|
+
decimalseperator: str = ".",
|
|
1842
|
+
file_: str | TextIO | None = None,
|
|
1843
|
+
) -> None: ...
|
|
1844
|
+
|
|
1845
|
+
|
|
1846
|
+
@overload
|
|
1847
|
+
def print_evaluationtable(
|
|
1848
|
+
*,
|
|
1849
|
+
nodes: Collection[devicetools.Node],
|
|
1850
|
+
criteria: Collection[Criterion],
|
|
1851
|
+
nodenames: Collection[str] | None = None,
|
|
1852
|
+
critnames: Collection[str] | None = None,
|
|
1853
|
+
critfactors: Collection1[float] = 1.0,
|
|
1854
|
+
critdigits: Collection1[int] = 2,
|
|
1855
|
+
subperiod: bool = True,
|
|
1856
|
+
average: bool = True,
|
|
1857
|
+
averagename: str = "mean",
|
|
1858
|
+
summaryrows: Collection[SummaryRow] = (),
|
|
1859
|
+
filter_: float = 0.0,
|
|
1860
|
+
stepsize: Literal["daily", "d", "monthly", "m"] = "daily",
|
|
1861
|
+
aggregator: str | Callable[[VectorInputFloat], float] = "mean",
|
|
1862
|
+
missingvalue: str = "-",
|
|
1863
|
+
decimalseperator: str = ".",
|
|
1864
|
+
file_: str | TextIO | None = None,
|
|
1865
|
+
) -> None: ...
|
|
1866
|
+
|
|
1867
|
+
|
|
1868
|
+
@objecttools.excmessage_decorator(
|
|
1869
|
+
"evaluate the simulation results of some node objects"
|
|
1870
|
+
)
|
|
1871
|
+
def print_evaluationtable(
|
|
1872
|
+
*,
|
|
1873
|
+
nodes: Collection[devicetools.Node],
|
|
1874
|
+
criteria: Collection[Criterion],
|
|
1875
|
+
nodenames: Collection[str] | None = None,
|
|
1876
|
+
critnames: Collection[str] | None = None,
|
|
1877
|
+
critfactors: Collection1[float] = 1.0,
|
|
1878
|
+
critdigits: Collection1[int] = 2,
|
|
1879
|
+
subperiod: bool = True,
|
|
1880
|
+
average: bool = True,
|
|
1881
|
+
averagename: str = "mean",
|
|
1882
|
+
summaryrows: Collection[SummaryRow] = (),
|
|
1883
|
+
filter_: float = 0.0,
|
|
1884
|
+
stepsize: Literal["daily", "d", "monthly", "m"] | None = None,
|
|
1885
|
+
aggregator: str | Callable[[VectorInputFloat], float] = "mean",
|
|
1886
|
+
missingvalue: str = "-",
|
|
1887
|
+
decimalseperator: str = ".",
|
|
1888
|
+
file_: str | TextIO | None = None,
|
|
1889
|
+
) -> None:
|
|
1890
|
+
"""Print a table containing the results of the given evaluation criteria for the
|
|
1891
|
+
given |Node| objects.
|
|
1892
|
+
|
|
1893
|
+
First, we define two nodes with different simulation and observation data (see
|
|
1894
|
+
function |prepare_arrays| for some explanations):
|
|
1895
|
+
|
|
1896
|
+
>>> from hydpy import pub, Node, nan
|
|
1897
|
+
>>> pub.timegrids = "01.01.2000", "04.01.2000", "1d"
|
|
1898
|
+
>>> nodes = Node("test1"), Node("test2")
|
|
1899
|
+
>>> for node in nodes:
|
|
1900
|
+
... node.prepare_allseries()
|
|
1901
|
+
>>> nodes[0].sequences.sim.series = 1.0, 2.0, 3.0
|
|
1902
|
+
>>> nodes[0].sequences.obs.series = 4.0, 5.0, 6.0
|
|
1903
|
+
>>> nodes[1].sequences.sim.series = 1.0, 2.0, 3.0
|
|
1904
|
+
>>> with pub.options.checkseries(False):
|
|
1905
|
+
... nodes[1].sequences.obs.series = 3.0, nan, 1.0
|
|
1906
|
+
|
|
1907
|
+
Selecting functions |corr| and |bias_abs| as evaluation criteria, function
|
|
1908
|
+
|print_evaluationtable| prints the following table:
|
|
1909
|
+
|
|
1910
|
+
>>> from hydpy import bias_abs, corr, print_evaluationtable
|
|
1911
|
+
>>> print_evaluationtable(nodes=nodes, # doctest: +NORMALIZE_WHITESPACE
|
|
1912
|
+
... criteria=(corr, bias_abs))
|
|
1913
|
+
corr bias_abs
|
|
1914
|
+
test1 1.00 -3.00
|
|
1915
|
+
test2 -1.00 0.00
|
|
1916
|
+
mean 0.00 -1.50
|
|
1917
|
+
|
|
1918
|
+
One can pass alternative names for the node objects, the criteria functions, and
|
|
1919
|
+
the row containing the average values. Also, one can use the `filter_` argument to
|
|
1920
|
+
suppress printing statistics in case of incomplete observation data. In the
|
|
1921
|
+
following example, we set the minimum fraction of required data to 80 %:
|
|
1922
|
+
|
|
1923
|
+
>>> print_evaluationtable(nodes=nodes,
|
|
1924
|
+
... criteria=(corr, bias_abs),
|
|
1925
|
+
... nodenames=("first node", "second node"),
|
|
1926
|
+
... critnames=("corrcoef", "bias"),
|
|
1927
|
+
... critdigits=1,
|
|
1928
|
+
... averagename="average",
|
|
1929
|
+
... filter_=0.8) # doctest: +NORMALIZE_WHITESPACE
|
|
1930
|
+
corrcoef bias
|
|
1931
|
+
first node 1.0 -3.0
|
|
1932
|
+
second node - -
|
|
1933
|
+
average 1.0 -3.0
|
|
1934
|
+
|
|
1935
|
+
The number of assigned node objects and criteria functions must match the number of
|
|
1936
|
+
given alternative names:
|
|
1937
|
+
|
|
1938
|
+
>>> print_evaluationtable(nodes=nodes,
|
|
1939
|
+
... criteria=(corr, bias_abs),
|
|
1940
|
+
... nodenames=("first node",))
|
|
1941
|
+
Traceback (most recent call last):
|
|
1942
|
+
...
|
|
1943
|
+
ValueError: While trying to evaluate the simulation results of some node objects, \
|
|
1944
|
+
the following error occurred: 2 node objects are given which does not match with \
|
|
1945
|
+
number of given alternative names being 1.
|
|
1946
|
+
|
|
1947
|
+
>>> print_evaluationtable(nodes=nodes,
|
|
1948
|
+
... criteria=(corr, bias_abs),
|
|
1949
|
+
... critnames=("corrcoef",))
|
|
1950
|
+
Traceback (most recent call last):
|
|
1951
|
+
...
|
|
1952
|
+
ValueError: While trying to evaluate the simulation results of some node objects, \
|
|
1953
|
+
the following error occurred: 2 criteria functions are given which does not match with \
|
|
1954
|
+
number of given alternative names being 1.
|
|
1955
|
+
|
|
1956
|
+
Set the `average` argument to |False| to omit the row containing the average values:
|
|
1957
|
+
|
|
1958
|
+
>>> print_evaluationtable(nodes=nodes, # doctest: +NORMALIZE_WHITESPACE
|
|
1959
|
+
... criteria=(corr, bias_abs),
|
|
1960
|
+
... average=False)
|
|
1961
|
+
corr bias_abs
|
|
1962
|
+
test1 1.00 -3.00
|
|
1963
|
+
test2 -1.00 0.00
|
|
1964
|
+
|
|
1965
|
+
The `summaryrows` argument is a more flexible alternative to the standard averaging
|
|
1966
|
+
across nodes. You can pass an arbitrary number of |SummaryRow| instances. Their
|
|
1967
|
+
names define the descriptions in the first column. Here, we include additional
|
|
1968
|
+
lines giving the complete averages for all nodes, averages for a subset of nodes
|
|
1969
|
+
(in fact, the "average" for the single node `test2`), automatically weighted
|
|
1970
|
+
averages (based on the number of available observations), and manually weighted
|
|
1971
|
+
averages (based on predefined weights):
|
|
1972
|
+
|
|
1973
|
+
>>> from hydpy import SummaryRowSimple, SummaryRowWeighted
|
|
1974
|
+
>>> summaryrows = (SummaryRowSimple("complete", nodes),
|
|
1975
|
+
... SummaryRowSimple("selective", (nodes[1],)),
|
|
1976
|
+
... SummaryRowWeighted("automatically weighted", nodes),
|
|
1977
|
+
... SummaryRowWeighted("manually weighted", nodes, (0.1, 0.9)))
|
|
1978
|
+
>>> print_evaluationtable(nodes=nodes, # doctest: +NORMALIZE_WHITESPACE
|
|
1979
|
+
... criteria=(corr, bias_abs),
|
|
1980
|
+
... average=False,
|
|
1981
|
+
... summaryrows=summaryrows)
|
|
1982
|
+
corr bias_abs
|
|
1983
|
+
test1 1.00 -3.00
|
|
1984
|
+
test2 -1.00 0.00
|
|
1985
|
+
complete 0.00 -1.50
|
|
1986
|
+
selective -1.00 0.00
|
|
1987
|
+
automatically weighted 0.20 -1.80
|
|
1988
|
+
manually weighted -0.80 -0.30
|
|
1989
|
+
|
|
1990
|
+
You can use the arguments `critfactors` and `critdigits` by passing either a single
|
|
1991
|
+
number or a sequence of criteria-specific numbers to modify the printed values:
|
|
1992
|
+
|
|
1993
|
+
>>> print_evaluationtable(nodes=nodes, # doctest: +NORMALIZE_WHITESPACE
|
|
1994
|
+
... criteria=(corr, bias_abs),
|
|
1995
|
+
... critfactors=(10.0, 0.1),
|
|
1996
|
+
... critdigits=1)
|
|
1997
|
+
corr bias_abs
|
|
1998
|
+
test1 10.0 -0.3
|
|
1999
|
+
test2 -10.0 0.0
|
|
2000
|
+
mean 0.0 -0.2
|
|
2001
|
+
|
|
2002
|
+
By default, function |print_evaluationtable| prints the statics relevant for the
|
|
2003
|
+
actual evaluation period only:
|
|
2004
|
+
|
|
2005
|
+
>>> pub.timegrids.eval_.dates = "01.01.2000", "02.01.2000"
|
|
2006
|
+
>>> print_evaluationtable(nodes=nodes, # doctest: +NORMALIZE_WHITESPACE
|
|
2007
|
+
... criteria=(corr, bias_abs))
|
|
2008
|
+
corr bias_abs
|
|
2009
|
+
test1 - -3.00
|
|
2010
|
+
test2 - -2.00
|
|
2011
|
+
mean - -2.50
|
|
2012
|
+
|
|
2013
|
+
You can deviate from this default behaviour by setting the `subperiod` argument to
|
|
2014
|
+
|False|:
|
|
2015
|
+
|
|
2016
|
+
>>> print_evaluationtable(nodes=nodes, # doctest: +NORMALIZE_WHITESPACE
|
|
2017
|
+
... criteria=(corr, bias_abs),
|
|
2018
|
+
... subperiod=False)
|
|
2019
|
+
corr bias_abs
|
|
2020
|
+
test1 1.00 -3.00
|
|
2021
|
+
test2 -1.00 0.00
|
|
2022
|
+
mean 0.00 -1.50
|
|
2023
|
+
|
|
2024
|
+
Use the `stepsize` argument (eventually in combination with argument `aggregator`)
|
|
2025
|
+
to print the statistics of previously aggregated time series. See
|
|
2026
|
+
|aggregate_series| for further information.
|
|
2027
|
+
|
|
2028
|
+
Here, the daily aggregation step size results in identical results as the original
|
|
2029
|
+
step size is also one day:
|
|
2030
|
+
|
|
2031
|
+
>>> pub.timegrids.eval_ = pub.timegrids.init
|
|
2032
|
+
>>> print_evaluationtable(nodes=nodes, # doctest: +NORMALIZE_WHITESPACE
|
|
2033
|
+
... criteria=(corr, bias_abs),
|
|
2034
|
+
... stepsize="daily",
|
|
2035
|
+
... aggregator="mean")
|
|
2036
|
+
corr bias_abs
|
|
2037
|
+
test1 1.00 -3.00
|
|
2038
|
+
test2 -1.00 0.00
|
|
2039
|
+
mean 0.00 -1.50
|
|
2040
|
+
|
|
2041
|
+
For the monthly step size, the result table is empty due to the too short
|
|
2042
|
+
initialisation period covering less than a month:
|
|
2043
|
+
|
|
2044
|
+
>>> pub.timegrids.eval_.dates = pub.timegrids.init.dates
|
|
2045
|
+
>>> print_evaluationtable(nodes=nodes, # doctest: +NORMALIZE_WHITESPACE
|
|
2046
|
+
... criteria=(corr, bias_abs),
|
|
2047
|
+
... stepsize="monthly",
|
|
2048
|
+
... aggregator="mean")
|
|
2049
|
+
corr bias_abs
|
|
2050
|
+
test1 - -
|
|
2051
|
+
test2 - -
|
|
2052
|
+
mean - -
|
|
2053
|
+
"""
|
|
2054
|
+
if nodenames:
|
|
2055
|
+
if len(nodes) != len(nodenames):
|
|
2056
|
+
raise ValueError(
|
|
2057
|
+
f"{len(nodes)} node objects are given which does not match with "
|
|
2058
|
+
f"number of given alternative names being {len(nodenames)}."
|
|
2059
|
+
)
|
|
2060
|
+
else:
|
|
2061
|
+
nodenames = [node.name for node in nodes]
|
|
2062
|
+
if critnames:
|
|
2063
|
+
if len(criteria) != len(critnames):
|
|
2064
|
+
raise ValueError(
|
|
2065
|
+
f"{len(criteria)} criteria functions are given which does not match "
|
|
2066
|
+
f"with number of given alternative names being {len(critnames)}."
|
|
2067
|
+
)
|
|
2068
|
+
else:
|
|
2069
|
+
critnames = [getattr(crit, "__name__", str(crit)) for crit in criteria]
|
|
2070
|
+
if isinstance(critfactors, float):
|
|
2071
|
+
critfactors = len(criteria) * (critfactors,)
|
|
2072
|
+
if isinstance(critdigits, int):
|
|
2073
|
+
critdigits = len(criteria) * (critdigits,)
|
|
2074
|
+
formats = tuple(f"%.{d}f" for d in critdigits)
|
|
2075
|
+
node2values: collections.defaultdict[devicetools.Node, list[float]]
|
|
2076
|
+
node2values = collections.defaultdict(list)
|
|
2077
|
+
data = numpy.empty((len(nodes), len(criteria)), dtype=config.NP_FLOAT)
|
|
2078
|
+
for idx, node in enumerate(nodes):
|
|
2079
|
+
if stepsize is not None:
|
|
2080
|
+
sim = seriestools.aggregate_series(
|
|
2081
|
+
series=node.sequences.sim.series,
|
|
2082
|
+
stepsize=stepsize,
|
|
2083
|
+
aggregator=aggregator,
|
|
2084
|
+
subperiod=subperiod,
|
|
2085
|
+
).values
|
|
2086
|
+
obs = seriestools.aggregate_series(
|
|
2087
|
+
series=node.sequences.obs.series,
|
|
2088
|
+
stepsize=stepsize,
|
|
2089
|
+
aggregator=aggregator,
|
|
2090
|
+
subperiod=subperiod,
|
|
2091
|
+
).values
|
|
2092
|
+
else:
|
|
2093
|
+
sim, obs = prepare_arrays(node=node, skip_nan=False, subperiod=subperiod)
|
|
2094
|
+
availability = 0.0 if len(obs) == 0 else 1.0 - sum(numpy.isnan(obs)) / len(obs)
|
|
2095
|
+
if availability > 0.0:
|
|
2096
|
+
for criterion, critfactor in zip(criteria, critfactors):
|
|
2097
|
+
value = critfactor * criterion(sim=sim, obs=obs, skip_nan=True)
|
|
2098
|
+
node2values[node].append(value)
|
|
2099
|
+
else:
|
|
2100
|
+
node2values[node] = len(criteria) * [numpy.nan]
|
|
2101
|
+
data[idx, :] = numpy.nan if availability < filter_ else node2values[node]
|
|
2102
|
+
|
|
2103
|
+
def _write(x: str, ys: Iterable[str], printtarget_: TextIO) -> None:
|
|
2104
|
+
printtarget_.write(f"{x}\t")
|
|
2105
|
+
printtarget_.write("\t".join(ys).replace(".", decimalseperator))
|
|
2106
|
+
printtarget_.write("\n")
|
|
2107
|
+
|
|
2108
|
+
def _nmbs2strs(numbers: Iterable[float]) -> Generator[str, None, None]:
|
|
2109
|
+
return (
|
|
2110
|
+
(f % n).replace(".", decimalseperator).replace("nan", missingvalue)
|
|
2111
|
+
for n, f in zip(numbers, formats)
|
|
2112
|
+
)
|
|
2113
|
+
|
|
2114
|
+
with objecttools.get_printtarget(file_) as printtarget:
|
|
2115
|
+
_write("", critnames, printtarget)
|
|
2116
|
+
for nodename, row in zip(nodenames, data):
|
|
2117
|
+
_write(nodename, _nmbs2strs(row), printtarget)
|
|
2118
|
+
if average:
|
|
2119
|
+
with warnings.catch_warnings():
|
|
2120
|
+
warnings.filterwarnings("ignore", "Mean of empty slice")
|
|
2121
|
+
mean = _nmbs2strs(numpy.nanmean(data, axis=0))
|
|
2122
|
+
_write(averagename, mean, printtarget)
|
|
2123
|
+
for summaryrow in summaryrows:
|
|
2124
|
+
values = summaryrow.summarise_criteria(len(criteria), node2values)
|
|
2125
|
+
_write(summaryrow.name, _nmbs2strs(values), printtarget)
|