webviz-subsurface 0.2.39__py3-none-any.whl → 0.2.40__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (80) hide show
  1. webviz_subsurface/_figures/timeseries_figure.py +1 -1
  2. webviz_subsurface/_providers/ensemble_summary_provider/_provider_impl_arrow_lazy.py +3 -1
  3. webviz_subsurface/_providers/ensemble_summary_provider/_provider_impl_arrow_presampled.py +3 -1
  4. webviz_subsurface/_providers/ensemble_table_provider/ensemble_table_provider_impl_arrow.py +3 -1
  5. webviz_subsurface/_utils/dataframe_utils.py +1 -1
  6. webviz_subsurface/_version.py +34 -0
  7. webviz_subsurface/plugins/_bhp_qc/views/_view_functions.py +5 -5
  8. webviz_subsurface/plugins/_co2_leakage/_utilities/co2volume.py +1 -1
  9. webviz_subsurface/plugins/_disk_usage.py +19 -8
  10. webviz_subsurface/plugins/_line_plotter_fmu/controllers/build_figure.py +4 -4
  11. webviz_subsurface/plugins/_map_viewer_fmu/map_viewer_fmu.py +1 -1
  12. webviz_subsurface/plugins/_parameter_analysis/_utils/_parameters_model.py +5 -5
  13. webviz_subsurface/plugins/_property_statistics/property_statistics.py +1 -1
  14. webviz_subsurface/plugins/_relative_permeability.py +6 -6
  15. webviz_subsurface/plugins/_reservoir_simulation_timeseries_regional.py +12 -12
  16. webviz_subsurface/plugins/_running_time_analysis_fmu.py +6 -1
  17. webviz_subsurface/plugins/_seismic_misfit.py +2 -3
  18. webviz_subsurface/plugins/_simulation_time_series/_views/_subplot_view/_utils/vector_statistics.py +4 -4
  19. webviz_subsurface/plugins/_structural_uncertainty/views/intersection_and_map.py +1 -1
  20. webviz_subsurface/plugins/_swatinit_qc/_business_logic.py +1 -1
  21. webviz_subsurface-0.2.40.dist-info/METADATA +822 -0
  22. {webviz_subsurface-0.2.39.dist-info → webviz_subsurface-0.2.40.dist-info}/RECORD +27 -78
  23. {webviz_subsurface-0.2.39.dist-info → webviz_subsurface-0.2.40.dist-info}/WHEEL +1 -1
  24. {webviz_subsurface-0.2.39.dist-info → webviz_subsurface-0.2.40.dist-info}/top_level.txt +0 -1
  25. tests/integration_tests/__init__.py +0 -0
  26. tests/integration_tests/test_parameter_filter.py +0 -28
  27. tests/integration_tests/test_surface_selector.py +0 -53
  28. tests/unit_tests/__init__.py +0 -0
  29. tests/unit_tests/abbreviations_tests/__init__.py +0 -0
  30. tests/unit_tests/abbreviations_tests/test_reservoir_simulation.py +0 -94
  31. tests/unit_tests/data_input/__init__.py +0 -0
  32. tests/unit_tests/data_input/test_calc_from_cumulatives.py +0 -178
  33. tests/unit_tests/data_input/test_image_processing.py +0 -11
  34. tests/unit_tests/mocks/__init__.py +0 -0
  35. tests/unit_tests/mocks/ensemble_summary_provider_dummy.py +0 -67
  36. tests/unit_tests/model_tests/__init__.py +0 -0
  37. tests/unit_tests/model_tests/test_ensemble_model.py +0 -176
  38. tests/unit_tests/model_tests/test_ensemble_set_model.py +0 -105
  39. tests/unit_tests/model_tests/test_gruptree_model.py +0 -89
  40. tests/unit_tests/model_tests/test_property_statistics_model.py +0 -42
  41. tests/unit_tests/model_tests/test_surface_set_model.py +0 -48
  42. tests/unit_tests/model_tests/test_well_attributes_model.py +0 -110
  43. tests/unit_tests/model_tests/test_well_set_model.py +0 -70
  44. tests/unit_tests/plugin_tests/__init__.py +0 -0
  45. tests/unit_tests/plugin_tests/test_grouptree.py +0 -175
  46. tests/unit_tests/plugin_tests/test_simulation_time_series/__init__.py +0 -0
  47. tests/unit_tests/plugin_tests/test_simulation_time_series/mocks/__init__.py +0 -0
  48. tests/unit_tests/plugin_tests/test_simulation_time_series/mocks/derived_vectors_accessor_ensemble_summary_provider_mock.py +0 -60
  49. tests/unit_tests/plugin_tests/test_simulation_time_series/test_utils/__init__.py +0 -0
  50. tests/unit_tests/plugin_tests/test_simulation_time_series/test_utils/test_create_vector_traces_utils.py +0 -530
  51. tests/unit_tests/plugin_tests/test_simulation_time_series/test_utils/test_dataframe_utils.py +0 -119
  52. tests/unit_tests/plugin_tests/test_simulation_time_series/test_utils/test_datetime_utils.py +0 -51
  53. tests/unit_tests/plugin_tests/test_simulation_time_series/test_utils/test_delta_ensemble_utils.py +0 -222
  54. tests/unit_tests/plugin_tests/test_simulation_time_series/test_utils/test_derived_delta_ensemble_vectors_accessor_impl.py +0 -319
  55. tests/unit_tests/plugin_tests/test_simulation_time_series/test_utils/test_derived_ensemble_vectors_accessor_impl.py +0 -271
  56. tests/unit_tests/plugin_tests/test_simulation_time_series/test_utils/test_derived_ensemble_vectors_accessor_utils.py +0 -78
  57. tests/unit_tests/plugin_tests/test_simulation_time_series/test_utils/test_derived_vector_accessor.py +0 -57
  58. tests/unit_tests/plugin_tests/test_simulation_time_series/test_utils/test_ensemble_summary_provider_set_utils.py +0 -213
  59. tests/unit_tests/plugin_tests/test_simulation_time_series/test_utils/test_from_timeseries_cumulatives.py +0 -322
  60. tests/unit_tests/plugin_tests/test_simulation_time_series/test_utils/test_history_vectors.py +0 -201
  61. tests/unit_tests/plugin_tests/test_simulation_time_series/test_utils/test_trace_line_shape.py +0 -56
  62. tests/unit_tests/plugin_tests/test_simulation_time_series/test_utils/test_vector_statistics.py +0 -171
  63. tests/unit_tests/plugin_tests/test_tornado_data.py +0 -130
  64. tests/unit_tests/plugin_tests/test_well_completions.py +0 -158
  65. tests/unit_tests/provider_tests/__init__.py +0 -0
  66. tests/unit_tests/provider_tests/test_ensemble_summary_provider.py +0 -255
  67. tests/unit_tests/provider_tests/test_ensemble_summary_provider_impl_arrow_lazy.py +0 -388
  68. tests/unit_tests/provider_tests/test_ensemble_summary_provider_impl_arrow_presampled.py +0 -160
  69. tests/unit_tests/provider_tests/test_ensemble_summary_provider_resampling.py +0 -320
  70. tests/unit_tests/provider_tests/test_ensemble_table_provider.py +0 -190
  71. tests/unit_tests/utils_tests/__init__.py +0 -0
  72. tests/unit_tests/utils_tests/test_dataframe_utils.py +0 -281
  73. tests/unit_tests/utils_tests/test_ensemble_summary_provider_set/__init__.py +0 -0
  74. tests/unit_tests/utils_tests/test_ensemble_summary_provider_set/test_ensemble_summary_provider_set.py +0 -306
  75. tests/unit_tests/utils_tests/test_formatting.py +0 -10
  76. tests/unit_tests/utils_tests/test_simulation_timeseries.py +0 -51
  77. webviz_subsurface-0.2.39.dist-info/METADATA +0 -147
  78. {webviz_subsurface-0.2.39.dist-info → webviz_subsurface-0.2.40.dist-info}/entry_points.txt +0 -0
  79. {webviz_subsurface-0.2.39.dist-info → webviz_subsurface-0.2.40.dist-info/licenses}/LICENSE +0 -0
  80. {webviz_subsurface-0.2.39.dist-info → webviz_subsurface-0.2.40.dist-info/licenses}/LICENSE.chromedriver +0 -0
@@ -211,7 +211,7 @@ class TimeSeriesFigure:
211
211
  .groupby(["DATE"])
212
212
  .agg(
213
213
  [
214
- ("Mean", np.nanmean),
214
+ ("Mean", "mean"),
215
215
  ("P10", lambda x: np.nanpercentile(x, q=90)),
216
216
  ("P90", lambda x: np.nanpercentile(x, q=10)),
217
217
  ]
@@ -160,7 +160,9 @@ class ProviderImplArrowLazy(EnsembleSummaryProvider):
160
160
  f"{len(unique_column_names)} unique column names"
161
161
  )
162
162
 
163
- full_table = pa.concat_tables(per_real_tables.values(), promote=True)
163
+ full_table = pa.concat_tables(
164
+ per_real_tables.values(), promote_options="default"
165
+ )
164
166
  elapsed.concat_tables_s = timer.lap_s()
165
167
 
166
168
  real_arr = np.empty(full_table.num_rows, np.int32)
@@ -214,7 +214,9 @@ class ProviderImplArrowPresampled(EnsembleSummaryProvider):
214
214
  )
215
215
 
216
216
  timer.lap_s()
217
- full_table = pa.concat_tables(per_real_tables.values(), promote=True)
217
+ full_table = pa.concat_tables(
218
+ per_real_tables.values(), promote_options="default"
219
+ )
218
220
  elapsed.concat_tables_s = timer.lap_s()
219
221
 
220
222
  real_arr = np.empty(full_table.num_rows, np.int32)
@@ -101,7 +101,9 @@ class EnsembleTableProviderImplArrow(EnsembleTableProvider):
101
101
  f"{len(unique_column_names)} unique column names"
102
102
  )
103
103
 
104
- full_table = pa.concat_tables(per_real_tables.values(), promote=True)
104
+ full_table = pa.concat_tables(
105
+ per_real_tables.values(), promote_options="default"
106
+ )
105
107
  elapsed.concat_tables_s = timer.lap_s()
106
108
 
107
109
  real_arr = np.empty(full_table.num_rows, np.int32)
@@ -82,7 +82,7 @@ def make_date_column_datetime_object(df: pd.DataFrame) -> None:
82
82
  # pylint: disable = unidiomatic-typecheck
83
83
  if type(sampled_date_value) == pd.Timestamp:
84
84
  df["DATE"] = pd.Series(
85
- df["DATE"].dt.to_pydatetime(), dtype=object, index=df.index
85
+ np.array(df["DATE"].dt.to_pydatetime()), dtype=object, index=df.index
86
86
  )
87
87
  return None
88
88
 
@@ -0,0 +1,34 @@
1
+ # file generated by setuptools-scm
2
+ # don't change, don't track in version control
3
+
4
+ __all__ = [
5
+ "__version__",
6
+ "__version_tuple__",
7
+ "version",
8
+ "version_tuple",
9
+ "__commit_id__",
10
+ "commit_id",
11
+ ]
12
+
13
+ TYPE_CHECKING = False
14
+ if TYPE_CHECKING:
15
+ from typing import Tuple
16
+ from typing import Union
17
+
18
+ VERSION_TUPLE = Tuple[Union[int, str], ...]
19
+ COMMIT_ID = Union[str, None]
20
+ else:
21
+ VERSION_TUPLE = object
22
+ COMMIT_ID = object
23
+
24
+ version: str
25
+ __version__: str
26
+ __version_tuple__: VERSION_TUPLE
27
+ version_tuple: VERSION_TUPLE
28
+ commit_id: COMMIT_ID
29
+ __commit_id__: COMMIT_ID
30
+
31
+ __version__ = version = '0.2.40'
32
+ __version_tuple__ = version_tuple = (0, 2, 40)
33
+
34
+ __commit_id__ = commit_id = None
@@ -41,15 +41,15 @@ def calc_statistics(df: pd.DataFrame) -> pd.DataFrame:
41
41
  # Calculate statistics, ignoring NaNs.
42
42
  stat_df = (
43
43
  df.groupby("ENSEMBLE")
44
- .agg([np.nanmean, "count", np.nanstd, np.nanmin, np.nanmax, p10, p50, p90])
44
+ .agg(["mean", "count", "std", "min", "max", p10, p50, p90])
45
45
  .reset_index(drop=True, level="ENSEMBLE")
46
46
  )
47
47
  # Rename nanmin, nanmax and nanmean to min, max and mean.
48
48
  col_stat_label_map = {
49
- "nanmin": "min",
50
- "nanmax": "max",
51
- "nanmean": "mean",
52
- "nanstd": "std",
49
+ "min": "min",
50
+ "max": "max",
51
+ "mean": "mean",
52
+ "std": "std",
53
53
  "p10": "high_p10",
54
54
  "p90": "low_p90",
55
55
  }
@@ -623,7 +623,7 @@ def _add_hover_info_in_field(
623
623
  for date in dates
624
624
  }
625
625
  prev_vals = {date: 0 for date in dates}
626
- date_dict = spaced_dates(dates, 4)
626
+ date_dict = spaced_dates(dates, 4) # type: ignore[arg-type]
627
627
  for name, color in zip(cat_ord["type"], colors):
628
628
  sub_df = df[df["type"] == name]
629
629
  for date in dates:
@@ -43,15 +43,25 @@ class DiskUsage(WebvizPluginABC):
43
43
  self.scratch_dir = scratch_dir
44
44
  self.date_input = date
45
45
  self.disk_usage = get_disk_usage(self.scratch_dir, self.date_input)
46
- self.date = str(self.disk_usage["date"][0])
46
+
47
47
  self.theme = webviz_settings.theme
48
48
 
49
49
  @property
50
50
  def layout(self) -> html.Div:
51
+ if self.disk_usage.empty:
52
+ return html.Div(
53
+ [
54
+ wcc.Header(
55
+ f"No disk usage data found for {self.scratch_dir}.",
56
+ style={"text-align": "center"},
57
+ ),
58
+ ]
59
+ )
60
+ date = str(self.disk_usage["date"][0]) if not self.disk_usage.empty else None
51
61
  return html.Div(
52
62
  [
53
63
  wcc.Header(
54
- f"Disk usage on {self.scratch_dir} per user as of {self.date}",
64
+ f"Disk usage on {self.scratch_dir} per user as of {date}",
55
65
  style={"text-align": "center"},
56
66
  ),
57
67
  wcc.FlexBox(
@@ -129,10 +139,10 @@ def get_disk_usage(scratch_dir: Path, date: Optional[str]) -> pd.DataFrame:
129
139
  df, date = _loop_dates(scratch_dir)
130
140
  else:
131
141
  df = _get_disk_usage_for_date(scratch_dir, date)
132
- if df is None:
133
- raise FileNotFoundError(
134
- f"No disk usage file found for {date} in {scratch_dir}."
135
- )
142
+
143
+ # Return early if no data is found
144
+ if df.empty:
145
+ return df
136
146
 
137
147
  df.rename(
138
148
  columns={"usageKB": "usageKiB"}, inplace=True
@@ -174,7 +184,7 @@ def get_disk_usage(scratch_dir: Path, date: Optional[str]) -> pd.DataFrame:
174
184
  return df.sort_values(by="usageGiB", axis=0, ascending=False)
175
185
 
176
186
 
177
- def _get_disk_usage_for_date(scratch_dir: Path, date: str) -> Optional[pd.DataFrame]:
187
+ def _get_disk_usage_for_date(scratch_dir: Path, date: str) -> pd.DataFrame:
178
188
  csv_file = scratch_dir / ".disk_usage" / f"disk_usage_user_test_{date}.csv"
179
189
  if csv_file.exists():
180
190
  return pd.read_csv(csv_file)
@@ -182,7 +192,8 @@ def _get_disk_usage_for_date(scratch_dir: Path, date: str) -> Optional[pd.DataFr
182
192
  csv_file = scratch_dir / ".disk_usage" / f"disk_usage_user_{date}.csv"
183
193
  if csv_file.exists():
184
194
  return pd.read_csv(csv_file)
185
- return None
195
+ # Create empty DataFrame with expected columns if no file is found
196
+ return pd.DataFrame(columns=["userid", "usageGiB", "date"])
186
197
 
187
198
 
188
199
  def _loop_dates(scratch_dir: Path) -> Tuple[pd.DataFrame, str]:
@@ -256,14 +256,14 @@ def calc_series_statistics(
256
256
  stat_df = (
257
257
  df[["ENSEMBLE", refaxis] + vectors]
258
258
  .groupby(["ENSEMBLE", refaxis])
259
- .agg([np.nanmean, np.nanmin, np.nanmax, p10, p90])
259
+ .agg(["mean", "min", "max", p10, p90])
260
260
  .reset_index() # level=["label", refaxis], col_level=0)
261
261
  )
262
262
  # Rename nanmin, nanmax and nanmean to min, max and mean.
263
263
  col_stat_label_map = {
264
- "nanmin": "min",
265
- "nanmax": "max",
266
- "nanmean": "mean",
264
+ "min": "min",
265
+ "max": "max",
266
+ "mean": "mean",
267
267
  "p10": "high_p10",
268
268
  "p90": "low_p90",
269
269
  }
@@ -106,7 +106,7 @@ color-tables.json for color_tables format.
106
106
  rel_surface_folder: str = "share/results/maps",
107
107
  color_tables: Path = None,
108
108
  hillshading_enabled: bool = None,
109
- render_surfaces_as_images: bool = True,
109
+ render_surfaces_as_images: bool = False,
110
110
  ):
111
111
  super().__init__()
112
112
  self._hillshading = hillshading_enabled
@@ -78,15 +78,15 @@ class ParametersModel:
78
78
  .groupby(["ENSEMBLE"])
79
79
  .agg(
80
80
  [
81
- ("Avg", np.mean),
82
- ("Stddev", np.std),
81
+ ("Avg", "mean"),
82
+ ("Stddev", "std"),
83
83
  ("P10", lambda x: np.percentile(x, 10)),
84
84
  ("P90", lambda x: np.percentile(x, 90)),
85
- ("Min", np.min),
86
- ("Max", np.max),
85
+ ("Min", "min"),
86
+ ("Max", "max"),
87
87
  ]
88
88
  )
89
- .stack(0)
89
+ .stack(0, future_stack=True)
90
90
  .rename_axis(["ENSEMBLE", "PARAMETER"])
91
91
  .reset_index()
92
92
  )
@@ -61,7 +61,7 @@ FMU format.
61
61
  **Using raw ensemble data stored in realization folders**
62
62
  * **`ensembles`:** Which ensembles in `shared_settings` to visualize.
63
63
  * **`rel_file_pattern`:** path to `.arrow` files with summary data.
64
- * **`statistic_file`:** Csv file for each realization with property statistics. See the \
64
+ * **`statistics_file`:** Csv file for each realization with property statistics. See the \
65
65
  documentation in [fmu-tools](http://fmu-docs.equinor.com/) on how to generate this data.
66
66
  * **`column_keys`:** List of vectors to extract. If not given, all vectors \
67
67
  from the simulations will be extracted. Wild card asterisk `*` can be used.
@@ -982,8 +982,8 @@ def add_statistic_traces(df, color_by, curves, sataxis, colors, nplots):
982
982
  ]
983
983
  df_stat = (
984
984
  satnum_df_shared_axis.groupby(sataxis)
985
- .agg([np.nanmean, np.nanmin, np.nanmax, p10, p90])
986
- .stack()
985
+ .agg(["mean", "min", "max", p10, p90])
986
+ .stack(future_stack=True)
987
987
  .swaplevel()
988
988
  )
989
989
  for curve_no, curve in enumerate(curves):
@@ -1032,7 +1032,7 @@ def _get_fanchart_traces(
1032
1032
  """Renders a fanchart"""
1033
1033
 
1034
1034
  # Retrieve indices from one of the keys in series
1035
- x = curve_stats["nanmax"].index.tolist()
1035
+ x = curve_stats["max"].index.tolist()
1036
1036
  data = FanchartData(
1037
1037
  samples=x,
1038
1038
  low_high=LowHighData(
@@ -1042,10 +1042,10 @@ def _get_fanchart_traces(
1042
1042
  high_name="P10",
1043
1043
  ),
1044
1044
  minimum_maximum=MinMaxData(
1045
- minimum=curve_stats["nanmin"].values,
1046
- maximum=curve_stats["nanmax"].values,
1045
+ minimum=curve_stats["min"].values,
1046
+ maximum=curve_stats["max"].values,
1047
1047
  ),
1048
- free_line=FreeLineData("Mean", curve_stats["nanmean"].values),
1048
+ free_line=FreeLineData("Mean", curve_stats["mean"].values),
1049
1049
  )
1050
1050
 
1051
1051
  hovertemplate = f"{curve} <br>" f"Ensemble: {ens}, Satnum: {satnum}"
@@ -1015,10 +1015,10 @@ def render_table(
1015
1015
  table.append(
1016
1016
  {
1017
1017
  "Group": ens,
1018
- "Minimum": df["nanmin"].iat[0],
1019
- "Maximum": df["nanmax"].iat[0],
1020
- "Mean": df["nanmean"].iat[0],
1021
- "Stddev": df["nanstd"].iat[0],
1018
+ "Minimum": df["min"].iat[0],
1019
+ "Maximum": df["max"].iat[0],
1020
+ "Mean": df["mean"].iat[0],
1021
+ "Stddev": df["std"].iat[0],
1022
1022
  "P10": df["p10"].iat[0],
1023
1023
  "P90": df["p90"].iat[0],
1024
1024
  }
@@ -1031,10 +1031,10 @@ def render_table(
1031
1031
  table.append(
1032
1032
  {
1033
1033
  "Group": col.split("_filtered_on_")[-1],
1034
- "Minimum": df["nanmin"].iat[0],
1035
- "Maximum": df["nanmax"].iat[0],
1036
- "Mean": df["nanmean"].iat[0],
1037
- "Stddev": df["nanstd"].iat[0],
1034
+ "Minimum": df["min"].iat[0],
1035
+ "Maximum": df["max"].iat[0],
1036
+ "Mean": df["mean"].iat[0],
1037
+ "Stddev": df["std"].iat[0],
1038
1038
  "P10": df["p10"].iat[0],
1039
1039
  "P90": df["p90"].iat[0],
1040
1040
  }
@@ -1259,7 +1259,7 @@ def calc_statistics(df: pd.DataFrame) -> pd.DataFrame:
1259
1259
  stat_dfs.append(
1260
1260
  ens_df.drop(columns=["REAL", "ENSEMBLE"])
1261
1261
  .groupby("DATE", as_index=False)
1262
- .agg([np.nanmean, np.nanstd, np.nanmin, np.nanmax, p10, p90])
1262
+ .agg(["mean", "std", "min", "max", p10, p90])
1263
1263
  .reset_index()
1264
1264
  .assign(ENSEMBLE=ens)
1265
1265
  )
@@ -1327,10 +1327,10 @@ def _get_fanchart_traces(
1327
1327
  high_name="P10",
1328
1328
  ),
1329
1329
  minimum_maximum=MinMaxData(
1330
- minimum=stat_df[column]["nanmin"].values,
1331
- maximum=stat_df[column]["nanmax"].values,
1330
+ minimum=stat_df[column]["min"].values,
1331
+ maximum=stat_df[column]["max"].values,
1332
1332
  ),
1333
- free_line=FreeLineData("Mean", stat_df[column]["nanmean"].values),
1333
+ free_line=FreeLineData("Mean", stat_df[column]["mean"].values),
1334
1334
  )
1335
1335
 
1336
1336
  hovertemplate = f"{legend_group}"
@@ -602,7 +602,12 @@ def make_status_df(
602
602
  # Load each json-file to a DataFrame for the realization
603
603
  with open(row.FULLPATH) as fjson:
604
604
  status_dict = json.load(fjson)
605
- real_df = pd.DataFrame(status_dict["jobs"])
605
+ if "steps" in status_dict:
606
+ real_df = pd.DataFrame(status_dict["steps"])
607
+ elif "jobs" in status_dict:
608
+ real_df = pd.DataFrame(status_dict["jobs"])
609
+ else:
610
+ raise KeyError(f"Neither 'steps' nor 'jobs' found in {status_file}")
606
611
 
607
612
  # If new ensemble, calculate ensemble scaled runtimes
608
613
  # for previous ensemble and reset temporary ensemble data
@@ -235,8 +235,7 @@ class SeismicMisfit(WebvizPluginABC):
235
235
 
236
236
  obsinfo = _compare_dfs_obs(self.dframeobs[attribute_name], self.ens_names)
237
237
  self.caseinfo = (
238
- f"{self.caseinfo}Attribute: {attribute_name}"
239
- f"\n{obsinfo}\n-----------\n"
238
+ f"{self.caseinfo}Attribute: {attribute_name}\n{obsinfo}\n-----------\n"
240
239
  )
241
240
 
242
241
  # get sorted list of unique region values
@@ -2441,7 +2440,7 @@ def update_obs_sim_map_plot(
2441
2440
  mode="markers",
2442
2441
  marker={
2443
2442
  "size": marker_size,
2444
- "color": ensdf_stat["coverage"],
2443
+ "color": ensdf_stat[coverage],
2445
2444
  "cmin": -1.0,
2446
2445
  "cmax": 2.0,
2447
2446
  "colorscale": SEISMIC_COVERAGE,
@@ -63,15 +63,15 @@ def create_vectors_statistics_df(vectors_df: pd.DataFrame) -> pd.DataFrame:
63
63
  statistics_df: pd.DataFrame = (
64
64
  vectors_df[["DATE"] + vector_names]
65
65
  .groupby(["DATE"])
66
- .agg([np.nanmean, np.nanmin, np.nanmax, p10, p90, p50])
66
+ .agg(["mean", "min", "max", p10, p90, p50])
67
67
  .reset_index(level=["DATE"], col_level=0)
68
68
  )
69
69
 
70
70
  # Rename columns to StatisticsOptions enum types for strongly typed format
71
71
  col_stat_label_map = {
72
- "nanmin": StatisticsOptions.MIN,
73
- "nanmax": StatisticsOptions.MAX,
74
- "nanmean": StatisticsOptions.MEAN,
72
+ "mean": StatisticsOptions.MEAN,
73
+ "min": StatisticsOptions.MIN,
74
+ "max": StatisticsOptions.MAX,
75
75
  "p10": StatisticsOptions.P10,
76
76
  "p90": StatisticsOptions.P90,
77
77
  "p50": StatisticsOptions.P50,
@@ -115,7 +115,7 @@ def map_layout(
115
115
  "disabled": False,
116
116
  "label": "Hillshading",
117
117
  },
118
- **props
118
+ **props,
119
119
  ),
120
120
  ),
121
121
  ],
@@ -208,7 +208,7 @@ class SwatinitQcDataModel:
208
208
  qc_vols[qc_cat.value] = 0.0
209
209
 
210
210
  # Overwrite dict values with correct figures:
211
- for qc_cat, qc_df in dframe.groupby("QC_FLAG"):
211
+ for qc_cat, qc_df in dframe.groupby("QC_FLAG", observed=False):
212
212
  qc_vols[qc_cat] = (
213
213
  (qc_df["SWAT"] - qc_df["SWATINIT"]) * qc_df["PORV"]
214
214
  ).sum()