modverif 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,119 @@
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ env/
12
+ build/
13
+ develop-eggs/
14
+ dist/
15
+ downloads/
16
+ eggs/
17
+ .eggs/
18
+ lib/
19
+ lib64/
20
+ parts/
21
+ sdist/
22
+ var/
23
+ wheels/
24
+ *.egg-info/
25
+ .installed.cfg
26
+ *.egg
27
+
28
+ # PyInstaller
29
+ # Usually these files are written by a python script from a template
30
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
31
+ *.manifest
32
+ *.spec
33
+
34
+ # Installer logs
35
+ pip-log.txt
36
+ pip-delete-this-directory.txt
37
+
38
+ # Unit test / coverage reports
39
+ htmlcov/
40
+ .tox/
41
+ .coverage
42
+ .coverage.*
43
+ .cache
44
+ nosetests.xml
45
+ coverage.xml
46
+ *.cover
47
+ .hypothesis/
48
+ .pytest_cache/
49
+ junit/
50
+ junit.xml
51
+ test.db
52
+
53
+ # Translations
54
+ *.mo
55
+ *.pot
56
+
57
+ # Django stuff:
58
+ *.log
59
+ local_settings.py
60
+
61
+ # Flask stuff:
62
+ instance/
63
+ .webassets-cache
64
+
65
+ # Scrapy stuff:
66
+ .scrapy
67
+
68
+ # Sphinx documentation
69
+ docs/_build/
70
+
71
+ # PyBuilder
72
+ target/
73
+
74
+ # Jupyter Notebook
75
+ .ipynb_checkpoints
76
+
77
+ # pyenv
78
+ .python-version
79
+
80
+ # celery beat schedule file
81
+ celerybeat-schedule
82
+
83
+ # SageMath parsed files
84
+ *.sage.py
85
+
86
+ # dotenv
87
+ .env
88
+
89
+ # virtualenv
90
+ .venv
91
+ venv/
92
+ ENV/
93
+ .ruff*
94
+
95
+ # Spyder project settings
96
+ .spyderproject
97
+ .spyproject
98
+
99
+ # Rope project settings
100
+ .ropeproject
101
+
102
+ # mkdocs documentation
103
+ /site
104
+
105
+ # mypy
106
+ .mypy_cache/
107
+
108
+ # .vscode
109
+ .vscode/
110
+
111
+ # OS files
112
+ .DS_Store
113
+
114
+ # Temp data
115
+ data/*
116
+
117
+ # Test config files
118
+ /modverif/tests/*.toml
119
+ /modverif/tests/*.yml
modverif-0.1.0/LICENSE ADDED
@@ -0,0 +1,16 @@
1
+ Apache Software License 2.0
2
+
3
+ Copyright (c) 2026, Mike Kittridge
4
+
5
+ Licensed under the Apache License, Version 2.0 (the "License");
6
+ you may not use this file except in compliance with the License.
7
+ You may obtain a copy of the License at
8
+
9
+ http://www.apache.org/licenses/LICENSE-2.0
10
+
11
+ Unless required by applicable law or agreed to in writing, software
12
+ distributed under the License is distributed on an "AS IS" BASIS,
13
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ See the License for the specific language governing permissions and
15
+ limitations under the License.
16
+
@@ -0,0 +1,148 @@
1
+ Metadata-Version: 2.4
2
+ Name: modverif
3
+ Version: 0.1.0
4
+ Project-URL: Documentation, https://mullenkamp.github.io/modverif/
5
+ Project-URL: Source, https://github.com/mullenkamp/modverif
6
+ Author-email: mullenkamp <mullenkamp1@gmail.com>
7
+ License-File: LICENSE
8
+ Classifier: Programming Language :: Python :: 3 :: Only
9
+ Requires-Python: >=3.10
10
+ Requires-Dist: cartopy
11
+ Requires-Dist: cfdb
12
+ Requires-Dist: h5py
13
+ Requires-Dist: matplotlib
14
+ Requires-Dist: numpy>2
15
+ Requires-Dist: pyproj
16
+ Requires-Dist: rechunkit>=0.2.1
17
+ Requires-Dist: scipy
18
+ Description-Content-Type: text/markdown
19
+
20
+ # modverif
21
+
22
+ [![build](https://github.com/mullenkamp/modverif/workflows/Build/badge.svg)](https://github.com/mullenkamp/modverif/actions)
23
+ [![codecov](https://codecov.io/gh/mullenkamp/modverif/branch/master/graph/badge.svg)](https://codecov.io/gh/mullenkamp/modverif)
24
+ [![PyPI version](https://badge.fury.io/py/modverif.svg)](https://badge.fury.io/py/modverif)
25
+
26
+ ---
27
+
28
+ **Documentation**: <a href="https://mullenkamp.github.io/modverif/" target="_blank">https://mullenkamp.github.io/modverif/</a>
29
+
30
+ **Source Code**: <a href="https://github.com/mullenkamp/modverif" target="_blank">https://github.com/mullenkamp/modverif</a>
31
+
32
+ ---
33
+
34
+ A Python package for evaluating multidimensional model output, following [MET/METplus](https://dtcenter.org/community-code/model-evaluation-tools-met) standards for meteorological verification. All data I/O uses the [cfdb](https://github.com/mullenkamp/cfdb) format.
35
+
36
+ ## Features
37
+
38
+ ### Grid-to-Grid Evaluation (`Evaluator`)
39
+
40
+ Compare two gridded model runs (e.g., WRF outputs):
41
+
42
+ - **Cell-level metrics**: NE, ANE, RSE, Bias, MAE, POD, FAR, CSI, GSS, Frequency Bias
43
+ - **Domain-aggregated metrics**: NE, ANE, RMSE, Bias, Pearson correlation, POD, FAR, CSI, GSS, Frequency Bias
44
+ - **Fractions Skill Score (FSS)**: Multi-scale spatial verification for precipitation and other threshold-based fields
45
+ - **Vector wind metrics**: Vector RMSE, wind speed bias, wind direction bias from U/V components
46
+ - **Diurnal cycle analysis**: Metrics grouped by hour-of-day
47
+ - **Spatial subsetting**: Bounding box or 2D boolean mask
48
+ - **Time filtering**: Start/end time bounds
49
+
50
+ ### Grid-to-Point Evaluation (`StationEvaluator`)
51
+
52
+ Compare gridded model output to weather station observations:
53
+
54
+ - Automatic grid-to-point interpolation via cfdb's `GridInterp.to_points()`
55
+ - Per-station, per-timestep metrics: Bias, MAE, NE, ANE
56
+ - Per-station aggregated metrics: RMSE, Pearson correlation
57
+ - Station-aggregated summary statistics
58
+ - Height level matching (single-level and multi-level observations)
59
+ - Vector wind evaluation at station locations
60
+ - Diurnal cycle analysis per station
61
+
62
+ ### Cyclone Evaluation
63
+
64
+ Track cyclones independently in two datasets and compare:
65
+
66
+ - Cyclone tracking via SLP pressure minimum
67
+ - Track position, pressure, and radius differences
68
+ - Per-variable metrics within the cyclone region
69
+
70
+ ### Verification Plots
71
+
72
+ Publication-quality plots following MET/METplus conventions:
73
+
74
+ - **Scatter plot**: Model vs observed with 1:1 line, statistics box, density option
75
+ - **Station map**: Geographic map of station metric values (cartopy optional)
76
+ - **Time series**: Model/observation comparison over time
77
+ - **Performance diagram**: POD vs Success Ratio with CSI contours and bias lines (Roebber 2009)
78
+ - **Taylor diagram**: Standard deviation, correlation, and centered RMSE (Taylor 2001)
79
+ - **Diurnal cycle**: Hour-of-day metric comparison
80
+ - **FSS scale plot**: Skill vs neighborhood size
81
+ - **Wind rose comparison**: Side-by-side model/observed wind roses
82
+
83
+ ## Quick Start
84
+
85
+ ```python
86
+ from modverif import Evaluator, StationEvaluator
87
+
88
+ # Grid-to-grid evaluation
89
+ evaluator = Evaluator('source.cfdb', 'test.cfdb')
90
+ evaluator.evaluate_domain('output.cfdb', variables=['air_temperature'], metrics=['bias', 'rmse', 'pearson'])
91
+
92
+ # Grid-to-point evaluation
93
+ station_eval = StationEvaluator(
94
+ 'model.cfdb', 'stations.cfdb',
95
+ variable_heights={'air_temperature': 2.0, 'wind_speed': 10.0},
96
+ )
97
+ station_eval.evaluate('station_output.cfdb', variables=['air_temperature'], metrics=['bias', 'rmse'])
98
+
99
+ # FSS evaluation
100
+ evaluator.evaluate_fss('fss_output.cfdb', variables=['precipitation'], threshold=1.0)
101
+
102
+ # Vector wind evaluation
103
+ evaluator.evaluate_wind('wind_output.cfdb', metrics=['vector_rmse', 'speed_bias'])
104
+ ```
105
+
106
+ Convenience functions are also available:
107
+
108
+ ```python
109
+ from modverif.evaluate import (
110
+ evaluate_models_cell,
111
+ evaluate_models_domain,
112
+ evaluate_stations,
113
+ evaluate_fss,
114
+ evaluate_wind,
115
+ )
116
+ ```
117
+
118
+ ### Plotting
119
+
120
+ ```python
121
+ from modverif.plots import plot_scatter, plot_station_map, plot_performance_diagram
122
+
123
+ plot_scatter(model_values, obs_values, save_path='scatter.png', variable_name='Temperature', units='K')
124
+ plot_station_map(lons, lats, bias_values, save_path='map.png', metric_name='Bias')
125
+ plot_performance_diagram([0.85, 0.72], [0.15, 0.28], labels=['WRF-A', 'WRF-B'])
126
+ ```
127
+
128
+ ## Installation
129
+
130
+ ```bash
131
+ pip install modverif
132
+ ```
133
+
134
+ Or with UV:
135
+
136
+ ```bash
137
+ uv add modverif
138
+ ```
139
+
140
+ ## Dependencies
141
+
142
+ - Python >= 3.10
143
+ - cfdb, numpy, scipy, matplotlib, pyproj
144
+ - cartopy (optional, for geographic map projections)
145
+
146
+ ## License
147
+
148
+ This project is licensed under the terms of the Apache Software License 2.0.
@@ -0,0 +1,129 @@
1
+ # modverif
2
+
3
+ [![build](https://github.com/mullenkamp/modverif/workflows/Build/badge.svg)](https://github.com/mullenkamp/modverif/actions)
4
+ [![codecov](https://codecov.io/gh/mullenkamp/modverif/branch/master/graph/badge.svg)](https://codecov.io/gh/mullenkamp/modverif)
5
+ [![PyPI version](https://badge.fury.io/py/modverif.svg)](https://badge.fury.io/py/modverif)
6
+
7
+ ---
8
+
9
+ **Documentation**: <a href="https://mullenkamp.github.io/modverif/" target="_blank">https://mullenkamp.github.io/modverif/</a>
10
+
11
+ **Source Code**: <a href="https://github.com/mullenkamp/modverif" target="_blank">https://github.com/mullenkamp/modverif</a>
12
+
13
+ ---
14
+
15
+ A Python package for evaluating multidimensional model output, following [MET/METplus](https://dtcenter.org/community-code/model-evaluation-tools-met) standards for meteorological verification. All data I/O uses the [cfdb](https://github.com/mullenkamp/cfdb) format.
16
+
17
+ ## Features
18
+
19
+ ### Grid-to-Grid Evaluation (`Evaluator`)
20
+
21
+ Compare two gridded model runs (e.g., WRF outputs):
22
+
23
+ - **Cell-level metrics**: NE, ANE, RSE, Bias, MAE, POD, FAR, CSI, GSS, Frequency Bias
24
+ - **Domain-aggregated metrics**: NE, ANE, RMSE, Bias, Pearson correlation, POD, FAR, CSI, GSS, Frequency Bias
25
+ - **Fractions Skill Score (FSS)**: Multi-scale spatial verification for precipitation and other threshold-based fields
26
+ - **Vector wind metrics**: Vector RMSE, wind speed bias, wind direction bias from U/V components
27
+ - **Diurnal cycle analysis**: Metrics grouped by hour-of-day
28
+ - **Spatial subsetting**: Bounding box or 2D boolean mask
29
+ - **Time filtering**: Start/end time bounds
30
+
31
+ ### Grid-to-Point Evaluation (`StationEvaluator`)
32
+
33
+ Compare gridded model output to weather station observations:
34
+
35
+ - Automatic grid-to-point interpolation via cfdb's `GridInterp.to_points()`
36
+ - Per-station, per-timestep metrics: Bias, MAE, NE, ANE
37
+ - Per-station aggregated metrics: RMSE, Pearson correlation
38
+ - Station-aggregated summary statistics
39
+ - Height level matching (single-level and multi-level observations)
40
+ - Vector wind evaluation at station locations
41
+ - Diurnal cycle analysis per station
42
+
43
+ ### Cyclone Evaluation
44
+
45
+ Track cyclones independently in two datasets and compare:
46
+
47
+ - Cyclone tracking via SLP pressure minimum
48
+ - Track position, pressure, and radius differences
49
+ - Per-variable metrics within the cyclone region
50
+
51
+ ### Verification Plots
52
+
53
+ Publication-quality plots following MET/METplus conventions:
54
+
55
+ - **Scatter plot**: Model vs observed with 1:1 line, statistics box, density option
56
+ - **Station map**: Geographic map of station metric values (cartopy optional)
57
+ - **Time series**: Model/observation comparison over time
58
+ - **Performance diagram**: POD vs Success Ratio with CSI contours and bias lines (Roebber 2009)
59
+ - **Taylor diagram**: Standard deviation, correlation, and centered RMSE (Taylor 2001)
60
+ - **Diurnal cycle**: Hour-of-day metric comparison
61
+ - **FSS scale plot**: Skill vs neighborhood size
62
+ - **Wind rose comparison**: Side-by-side model/observed wind roses
63
+
64
+ ## Quick Start
65
+
66
+ ```python
67
+ from modverif import Evaluator, StationEvaluator
68
+
69
+ # Grid-to-grid evaluation
70
+ evaluator = Evaluator('source.cfdb', 'test.cfdb')
71
+ evaluator.evaluate_domain('output.cfdb', variables=['air_temperature'], metrics=['bias', 'rmse', 'pearson'])
72
+
73
+ # Grid-to-point evaluation
74
+ station_eval = StationEvaluator(
75
+ 'model.cfdb', 'stations.cfdb',
76
+ variable_heights={'air_temperature': 2.0, 'wind_speed': 10.0},
77
+ )
78
+ station_eval.evaluate('station_output.cfdb', variables=['air_temperature'], metrics=['bias', 'rmse'])
79
+
80
+ # FSS evaluation
81
+ evaluator.evaluate_fss('fss_output.cfdb', variables=['precipitation'], threshold=1.0)
82
+
83
+ # Vector wind evaluation
84
+ evaluator.evaluate_wind('wind_output.cfdb', metrics=['vector_rmse', 'speed_bias'])
85
+ ```
86
+
87
+ Convenience functions are also available:
88
+
89
+ ```python
90
+ from modverif.evaluate import (
91
+ evaluate_models_cell,
92
+ evaluate_models_domain,
93
+ evaluate_stations,
94
+ evaluate_fss,
95
+ evaluate_wind,
96
+ )
97
+ ```
98
+
99
+ ### Plotting
100
+
101
+ ```python
102
+ from modverif.plots import plot_scatter, plot_station_map, plot_performance_diagram
103
+
104
+ plot_scatter(model_values, obs_values, save_path='scatter.png', variable_name='Temperature', units='K')
105
+ plot_station_map(lons, lats, bias_values, save_path='map.png', metric_name='Bias')
106
+ plot_performance_diagram([0.85, 0.72], [0.15, 0.28], labels=['WRF-A', 'WRF-B'])
107
+ ```
108
+
109
+ ## Installation
110
+
111
+ ```bash
112
+ pip install modverif
113
+ ```
114
+
115
+ Or with UV:
116
+
117
+ ```bash
118
+ uv add modverif
119
+ ```
120
+
121
+ ## Dependencies
122
+
123
+ - Python >= 3.10
124
+ - cfdb, numpy, scipy, matplotlib, pyproj
125
+ - cartopy (optional, for geographic map projections)
126
+
127
+ ## License
128
+
129
+ This project is licensed under the terms of the Apache Software License 2.0.
@@ -0,0 +1,4 @@
1
+ from modverif.evaluator import Evaluator
2
+ from modverif.station import StationEvaluator
3
+
4
+ __version__ = '0.1.0'