webviz-subsurface 0.2.38__py3-none-any.whl → 0.2.40__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (86) hide show
  1. webviz_subsurface/_components/tornado/_tornado_bar_chart.py +31 -11
  2. webviz_subsurface/_components/tornado/_tornado_data.py +20 -2
  3. webviz_subsurface/_figures/timeseries_figure.py +1 -1
  4. webviz_subsurface/_providers/ensemble_summary_provider/_provider_impl_arrow_lazy.py +3 -1
  5. webviz_subsurface/_providers/ensemble_summary_provider/_provider_impl_arrow_presampled.py +3 -1
  6. webviz_subsurface/_providers/ensemble_table_provider/ensemble_table_provider_factory.py +4 -0
  7. webviz_subsurface/_providers/ensemble_table_provider/ensemble_table_provider_impl_arrow.py +3 -1
  8. webviz_subsurface/_utils/dataframe_utils.py +1 -1
  9. webviz_subsurface/_utils/design_matrix.py +36 -0
  10. webviz_subsurface/_version.py +34 -0
  11. webviz_subsurface/plugins/_bhp_qc/views/_view_functions.py +5 -5
  12. webviz_subsurface/plugins/_co2_leakage/_utilities/co2volume.py +1 -1
  13. webviz_subsurface/plugins/_disk_usage.py +19 -8
  14. webviz_subsurface/plugins/_line_plotter_fmu/controllers/build_figure.py +4 -4
  15. webviz_subsurface/plugins/_map_viewer_fmu/map_viewer_fmu.py +1 -1
  16. webviz_subsurface/plugins/_parameter_analysis/_types.py +1 -0
  17. webviz_subsurface/plugins/_parameter_analysis/_utils/_parameters_model.py +15 -7
  18. webviz_subsurface/plugins/_parameter_analysis/_views/_parameter_distributions_view/_settings/_visualization_type.py +2 -1
  19. webviz_subsurface/plugins/_property_statistics/property_statistics.py +1 -1
  20. webviz_subsurface/plugins/_relative_permeability.py +6 -6
  21. webviz_subsurface/plugins/_reservoir_simulation_timeseries_regional.py +12 -12
  22. webviz_subsurface/plugins/_running_time_analysis_fmu.py +6 -1
  23. webviz_subsurface/plugins/_seismic_misfit.py +2 -3
  24. webviz_subsurface/plugins/_simulation_time_series/_views/_subplot_view/_utils/vector_statistics.py +4 -4
  25. webviz_subsurface/plugins/_structural_uncertainty/views/intersection_and_map.py +1 -1
  26. webviz_subsurface/plugins/_swatinit_qc/_business_logic.py +1 -1
  27. webviz_subsurface-0.2.40.dist-info/METADATA +822 -0
  28. {webviz_subsurface-0.2.38.dist-info → webviz_subsurface-0.2.40.dist-info}/RECORD +33 -83
  29. {webviz_subsurface-0.2.38.dist-info → webviz_subsurface-0.2.40.dist-info}/WHEEL +1 -1
  30. {webviz_subsurface-0.2.38.dist-info → webviz_subsurface-0.2.40.dist-info}/top_level.txt +0 -1
  31. tests/integration_tests/__init__.py +0 -0
  32. tests/integration_tests/test_parameter_filter.py +0 -28
  33. tests/integration_tests/test_surface_selector.py +0 -53
  34. tests/unit_tests/__init__.py +0 -0
  35. tests/unit_tests/abbreviations_tests/__init__.py +0 -0
  36. tests/unit_tests/abbreviations_tests/test_reservoir_simulation.py +0 -94
  37. tests/unit_tests/data_input/__init__.py +0 -0
  38. tests/unit_tests/data_input/test_calc_from_cumulatives.py +0 -178
  39. tests/unit_tests/data_input/test_image_processing.py +0 -11
  40. tests/unit_tests/mocks/__init__.py +0 -0
  41. tests/unit_tests/mocks/ensemble_summary_provider_dummy.py +0 -67
  42. tests/unit_tests/model_tests/__init__.py +0 -0
  43. tests/unit_tests/model_tests/test_ensemble_model.py +0 -176
  44. tests/unit_tests/model_tests/test_ensemble_set_model.py +0 -105
  45. tests/unit_tests/model_tests/test_gruptree_model.py +0 -89
  46. tests/unit_tests/model_tests/test_property_statistics_model.py +0 -42
  47. tests/unit_tests/model_tests/test_surface_set_model.py +0 -48
  48. tests/unit_tests/model_tests/test_well_attributes_model.py +0 -110
  49. tests/unit_tests/model_tests/test_well_set_model.py +0 -70
  50. tests/unit_tests/plugin_tests/__init__.py +0 -0
  51. tests/unit_tests/plugin_tests/test_grouptree.py +0 -175
  52. tests/unit_tests/plugin_tests/test_simulation_time_series/__init__.py +0 -0
  53. tests/unit_tests/plugin_tests/test_simulation_time_series/mocks/__init__.py +0 -0
  54. tests/unit_tests/plugin_tests/test_simulation_time_series/mocks/derived_vectors_accessor_ensemble_summary_provider_mock.py +0 -60
  55. tests/unit_tests/plugin_tests/test_simulation_time_series/test_utils/__init__.py +0 -0
  56. tests/unit_tests/plugin_tests/test_simulation_time_series/test_utils/test_create_vector_traces_utils.py +0 -530
  57. tests/unit_tests/plugin_tests/test_simulation_time_series/test_utils/test_dataframe_utils.py +0 -119
  58. tests/unit_tests/plugin_tests/test_simulation_time_series/test_utils/test_datetime_utils.py +0 -51
  59. tests/unit_tests/plugin_tests/test_simulation_time_series/test_utils/test_delta_ensemble_utils.py +0 -222
  60. tests/unit_tests/plugin_tests/test_simulation_time_series/test_utils/test_derived_delta_ensemble_vectors_accessor_impl.py +0 -319
  61. tests/unit_tests/plugin_tests/test_simulation_time_series/test_utils/test_derived_ensemble_vectors_accessor_impl.py +0 -271
  62. tests/unit_tests/plugin_tests/test_simulation_time_series/test_utils/test_derived_ensemble_vectors_accessor_utils.py +0 -78
  63. tests/unit_tests/plugin_tests/test_simulation_time_series/test_utils/test_derived_vector_accessor.py +0 -57
  64. tests/unit_tests/plugin_tests/test_simulation_time_series/test_utils/test_ensemble_summary_provider_set_utils.py +0 -213
  65. tests/unit_tests/plugin_tests/test_simulation_time_series/test_utils/test_from_timeseries_cumulatives.py +0 -322
  66. tests/unit_tests/plugin_tests/test_simulation_time_series/test_utils/test_history_vectors.py +0 -201
  67. tests/unit_tests/plugin_tests/test_simulation_time_series/test_utils/test_trace_line_shape.py +0 -56
  68. tests/unit_tests/plugin_tests/test_simulation_time_series/test_utils/test_vector_statistics.py +0 -171
  69. tests/unit_tests/plugin_tests/test_tornado_data.py +0 -121
  70. tests/unit_tests/plugin_tests/test_well_completions.py +0 -158
  71. tests/unit_tests/provider_tests/__init__.py +0 -0
  72. tests/unit_tests/provider_tests/test_ensemble_summary_provider.py +0 -255
  73. tests/unit_tests/provider_tests/test_ensemble_summary_provider_impl_arrow_lazy.py +0 -388
  74. tests/unit_tests/provider_tests/test_ensemble_summary_provider_impl_arrow_presampled.py +0 -160
  75. tests/unit_tests/provider_tests/test_ensemble_summary_provider_resampling.py +0 -320
  76. tests/unit_tests/provider_tests/test_ensemble_table_provider.py +0 -190
  77. tests/unit_tests/utils_tests/__init__.py +0 -0
  78. tests/unit_tests/utils_tests/test_dataframe_utils.py +0 -281
  79. tests/unit_tests/utils_tests/test_ensemble_summary_provider_set/__init__.py +0 -0
  80. tests/unit_tests/utils_tests/test_ensemble_summary_provider_set/test_ensemble_summary_provider_set.py +0 -306
  81. tests/unit_tests/utils_tests/test_formatting.py +0 -10
  82. tests/unit_tests/utils_tests/test_simulation_timeseries.py +0 -51
  83. webviz_subsurface-0.2.38.dist-info/METADATA +0 -147
  84. {webviz_subsurface-0.2.38.dist-info → webviz_subsurface-0.2.40.dist-info}/entry_points.txt +0 -0
  85. {webviz_subsurface-0.2.38.dist-info → webviz_subsurface-0.2.40.dist-info/licenses}/LICENSE +0 -0
  86. {webviz_subsurface-0.2.38.dist-info → webviz_subsurface-0.2.40.dist-info/licenses}/LICENSE.chromedriver +0 -0
@@ -5,7 +5,7 @@ import plotly.graph_objects as go
5
5
 
6
6
  from webviz_subsurface._abbreviations.number_formatting import si_prefixed
7
7
 
8
- from ._tornado_data import TornadoData
8
+ from ._tornado_data import SensitivityType, TornadoData
9
9
 
10
10
 
11
11
  class TornadoBarChart:
@@ -136,9 +136,25 @@ class TornadoBarChart:
136
136
  hovertext.append(text)
137
137
  return hovertext
138
138
 
139
+ def get_sensitivity_colors(self, case: str) -> List:
140
+ """Create color list for bars based on sensitivity type
141
+ If colors are set by sensitivity, just create a color per sensitivty.
142
+ If not handle scalar and mc sensitivities separately.
143
+ For scalar, that is sensitivities with two "cases", use separate colors for each case.
144
+ For mc, use one color.
145
+ """
146
+ if self._color_by_sens:
147
+ return self.create_color_list(self._tornadotable["sensname"])
148
+ colors = []
149
+ for _, row in self._tornadotable.iterrows():
150
+ if row["senstype"] == SensitivityType.MONTE_CARLO or case == "low":
151
+ colors.append(self._plotly_theme["layout"]["colorway"][0])
152
+ else:
153
+ colors.append(self._plotly_theme["layout"]["colorway"][1])
154
+ return colors
155
+
139
156
  @property
140
157
  def data(self) -> List:
141
- colors = self.create_color_list(self._tornadotable["sensname"].unique())
142
158
  return [
143
159
  {
144
160
  "type": "bar",
@@ -157,7 +173,7 @@ class TornadoBarChart:
157
173
  "orientation": "h",
158
174
  "marker": {
159
175
  "line": {"width": 1.5, "color": "black"},
160
- "color": colors if self._color_by_sens else None,
176
+ "color": self.get_sensitivity_colors("low"),
161
177
  },
162
178
  },
163
179
  {
@@ -177,7 +193,7 @@ class TornadoBarChart:
177
193
  "orientation": "h",
178
194
  "marker": {
179
195
  "line": {"width": 1.5, "color": "black"},
180
- "color": colors if self._color_by_sens else None,
196
+ "color": self.get_sensitivity_colors("high"),
181
197
  },
182
198
  },
183
199
  ]
@@ -193,6 +209,15 @@ class TornadoBarChart:
193
209
 
194
210
  @property
195
211
  def scatter_data(self) -> List[Dict]:
212
+ def get_color(case_name_arr: pd.Series, case_type_arr: pd.Series) -> List:
213
+ colors = []
214
+ for case_name, case_type in zip(case_name_arr, case_type_arr):
215
+ if case_name == "low" or case_type == SensitivityType.MONTE_CARLO:
216
+ colors.append(self._plotly_theme["layout"]["colorway"][0])
217
+ else:
218
+ colors.append(self._plotly_theme["layout"]["colorway"][1])
219
+ return colors
220
+
196
221
  return [
197
222
  {
198
223
  "type": "scatter",
@@ -202,14 +227,9 @@ class TornadoBarChart:
202
227
  "text": df["REAL"],
203
228
  "hovertemplate": "REAL: <b>%{text}</b><br>"
204
229
  + "X: <b>%{x:.1f}</b> <extra></extra>",
205
- "marker": {
206
- "size": 15,
207
- "color": self._plotly_theme["layout"]["colorway"][0]
208
- if case == "low"
209
- else self._plotly_theme["layout"]["colorway"][1],
210
- },
230
+ "marker": {"size": 15, "color": get_color(df["case"], df["casetype"])},
211
231
  }
212
- for case, df in self._realtable.groupby("case")
232
+ for _, df in self._realtable.groupby("case")
213
233
  ]
214
234
 
215
235
  @property
@@ -3,6 +3,15 @@ from typing import Dict, List, Optional, Union
3
3
  import numpy as np
4
4
  import pandas as pd
5
5
 
6
+ from webviz_subsurface._utils.enum_shim import StrEnum
7
+
8
+
9
+ class SensitivityType(StrEnum):
10
+ """Sensitivity types used in Tornado analysis."""
11
+
12
+ SCALAR = "scalar"
13
+ MONTE_CARLO = "mc"
14
+
6
15
 
7
16
  class TornadoData:
8
17
  REQUIRED_COLUMNS = ["REAL", "SENSNAME", "SENSCASE", "SENSTYPE", "VALUE"]
@@ -50,7 +59,11 @@ class TornadoData:
50
59
  def _create_real_df(self, dframe: pd.DataFrame) -> pd.DataFrame:
51
60
  """Make dataframe with value and case info per realization"""
52
61
  realdf = dframe[self.REQUIRED_COLUMNS].rename(
53
- columns={"SENSNAME": "sensname", "SENSCASE": "senscase"}
62
+ columns={
63
+ "SENSNAME": "sensname",
64
+ "SENSCASE": "senscase",
65
+ "SENSTYPE": "senstype",
66
+ }
54
67
  )
55
68
 
56
69
  sensitivities = self._tornadotable["sensname"].unique()
@@ -61,7 +74,7 @@ class TornadoData:
61
74
  casemask = realdf["REAL"].isin(val[f"real_{case}"])
62
75
  realdf.loc[casemask, "case"] = case
63
76
 
64
- mc_mask = realdf["SENSTYPE"] == "mc"
77
+ mc_mask = realdf["senstype"] == "mc"
65
78
  realdf["casetype"] = np.where(mc_mask, "mc", realdf["case"])
66
79
  realdf["sensname_case"] = np.where(
67
80
  mc_mask,
@@ -127,6 +140,7 @@ class TornadoData:
127
140
  sens_case_df["VALUE"].mean()
128
141
  ),
129
142
  "reals": list(map(int, sens_case_df["REAL"])),
143
+ "senstype": SensitivityType.SCALAR,
130
144
  }
131
145
  )
132
146
  # If `SENSTYPE` is monte carlo get p10, p90
@@ -162,6 +176,7 @@ class TornadoData:
162
176
  "values": p90,
163
177
  "values_ref": self._scale_to_ref(p90),
164
178
  "reals": low_reals,
179
+ "senstype": SensitivityType.MONTE_CARLO,
165
180
  }
166
181
  )
167
182
  avg_per_sensitivity.append(
@@ -171,6 +186,7 @@ class TornadoData:
171
186
  "values": p10,
172
187
  "values_ref": self._scale_to_ref(p10),
173
188
  "reals": high_reals,
189
+ "senstype": SensitivityType.MONTE_CARLO,
174
190
  }
175
191
  )
176
192
 
@@ -198,6 +214,7 @@ class TornadoData:
198
214
  high["reals"] = []
199
215
  high["senscase"] = None
200
216
  high["values"] = self.reference_average
217
+
201
218
  else:
202
219
  low = (
203
220
  low.copy()
@@ -218,6 +235,7 @@ class TornadoData:
218
235
  "true_low": low["values"],
219
236
  "low_reals": low["reals"],
220
237
  "sensname": sensname,
238
+ "senstype": sens_name_df["senstype"].unique()[0],
221
239
  "high": self.calc_high_x(low["values_ref"], high["values_ref"]),
222
240
  "high_base": self.calc_high_base(
223
241
  low["values_ref"], high["values_ref"]
@@ -211,7 +211,7 @@ class TimeSeriesFigure:
211
211
  .groupby(["DATE"])
212
212
  .agg(
213
213
  [
214
- ("Mean", np.nanmean),
214
+ ("Mean", "mean"),
215
215
  ("P10", lambda x: np.nanpercentile(x, q=90)),
216
216
  ("P90", lambda x: np.nanpercentile(x, q=10)),
217
217
  ]
@@ -160,7 +160,9 @@ class ProviderImplArrowLazy(EnsembleSummaryProvider):
160
160
  f"{len(unique_column_names)} unique column names"
161
161
  )
162
162
 
163
- full_table = pa.concat_tables(per_real_tables.values(), promote=True)
163
+ full_table = pa.concat_tables(
164
+ per_real_tables.values(), promote_options="default"
165
+ )
164
166
  elapsed.concat_tables_s = timer.lap_s()
165
167
 
166
168
  real_arr = np.empty(full_table.num_rows, np.int32)
@@ -214,7 +214,9 @@ class ProviderImplArrowPresampled(EnsembleSummaryProvider):
214
214
  )
215
215
 
216
216
  timer.lap_s()
217
- full_table = pa.concat_tables(per_real_tables.values(), promote=True)
217
+ full_table = pa.concat_tables(
218
+ per_real_tables.values(), promote_options="default"
219
+ )
218
220
  elapsed.concat_tables_s = timer.lap_s()
219
221
 
220
222
  real_arr = np.empty(full_table.num_rows, np.int32)
@@ -10,6 +10,9 @@ from webviz_config.webviz_factory import WebvizFactory
10
10
  from webviz_config.webviz_factory_registry import WEBVIZ_FACTORY_REGISTRY
11
11
  from webviz_config.webviz_instance_info import WebvizRunMode
12
12
 
13
+ from webviz_subsurface._utils.design_matrix import (
14
+ rename_design_matrix_parameter_columns,
15
+ )
13
16
  from webviz_subsurface._utils.perf_timer import PerfTimer
14
17
 
15
18
  from ..ensemble_summary_provider._arrow_unsmry_import import (
@@ -283,6 +286,7 @@ class EnsembleTableProviderFactory(WebvizFactory):
283
286
  raise ValueError(
284
287
  f"Failed to load 'parameter.txt' files for ensemble {ens_path}."
285
288
  )
289
+ ensemble_df = rename_design_matrix_parameter_columns(ensemble_df)
286
290
 
287
291
  elapsed_load_parameters_s = timer.lap_s()
288
292
 
@@ -101,7 +101,9 @@ class EnsembleTableProviderImplArrow(EnsembleTableProvider):
101
101
  f"{len(unique_column_names)} unique column names"
102
102
  )
103
103
 
104
- full_table = pa.concat_tables(per_real_tables.values(), promote=True)
104
+ full_table = pa.concat_tables(
105
+ per_real_tables.values(), promote_options="default"
106
+ )
105
107
  elapsed.concat_tables_s = timer.lap_s()
106
108
 
107
109
  real_arr = np.empty(full_table.num_rows, np.int32)
@@ -82,7 +82,7 @@ def make_date_column_datetime_object(df: pd.DataFrame) -> None:
82
82
  # pylint: disable = unidiomatic-typecheck
83
83
  if type(sampled_date_value) == pd.Timestamp:
84
84
  df["DATE"] = pd.Series(
85
- df["DATE"].dt.to_pydatetime(), dtype=object, index=df.index
85
+ np.array(df["DATE"].dt.to_pydatetime()), dtype=object, index=df.index
86
86
  )
87
87
  return None
88
88
 
@@ -0,0 +1,36 @@
1
+ import logging
2
+
3
+ import pandas as pd
4
+
5
+ LOGGER = logging.getLogger(__name__)
6
+
7
+
8
+ def rename_design_matrix_parameter_columns(parameter_df: pd.DataFrame) -> pd.DataFrame:
9
+ """Given a dataframe of parameters, checks if the DESIGN_MATRIX prefix is present.
10
+ If present assume this is a design matrix run. Return the dataframe with the prefix
11
+ removed. Also do a check if removing the prefix result in any duplicates.
12
+ If duplicates remove those and give a warning.
13
+ """
14
+
15
+ if any(col.startswith("DESIGN_MATRIX:") for col in parameter_df.columns):
16
+ original_columns = parameter_df.columns
17
+ stripped_columns = original_columns.str.replace(
18
+ r"^DESIGN_MATRIX:", "", regex=True
19
+ )
20
+ rename_map = {
21
+ old: new
22
+ for old, new in zip(original_columns, stripped_columns)
23
+ if old != new
24
+ }
25
+ conflict_names = set(rename_map.values()) & set(original_columns)
26
+ if conflict_names:
27
+ LOGGER.info(
28
+ "DESIGN_MATRIX run detected, but non design matrix parameters was found."
29
+ )
30
+ LOGGER.info(
31
+ f"The following parameters will be dropped: {sorted(conflict_names)}"
32
+ )
33
+ parameter_df = parameter_df.drop(columns=conflict_names)
34
+
35
+ parameter_df = parameter_df.rename(columns=rename_map)
36
+ return parameter_df
@@ -0,0 +1,34 @@
1
+ # file generated by setuptools-scm
2
+ # don't change, don't track in version control
3
+
4
+ __all__ = [
5
+ "__version__",
6
+ "__version_tuple__",
7
+ "version",
8
+ "version_tuple",
9
+ "__commit_id__",
10
+ "commit_id",
11
+ ]
12
+
13
+ TYPE_CHECKING = False
14
+ if TYPE_CHECKING:
15
+ from typing import Tuple
16
+ from typing import Union
17
+
18
+ VERSION_TUPLE = Tuple[Union[int, str], ...]
19
+ COMMIT_ID = Union[str, None]
20
+ else:
21
+ VERSION_TUPLE = object
22
+ COMMIT_ID = object
23
+
24
+ version: str
25
+ __version__: str
26
+ __version_tuple__: VERSION_TUPLE
27
+ version_tuple: VERSION_TUPLE
28
+ commit_id: COMMIT_ID
29
+ __commit_id__: COMMIT_ID
30
+
31
+ __version__ = version = '0.2.40'
32
+ __version_tuple__ = version_tuple = (0, 2, 40)
33
+
34
+ __commit_id__ = commit_id = None
@@ -41,15 +41,15 @@ def calc_statistics(df: pd.DataFrame) -> pd.DataFrame:
41
41
  # Calculate statistics, ignoring NaNs.
42
42
  stat_df = (
43
43
  df.groupby("ENSEMBLE")
44
- .agg([np.nanmean, "count", np.nanstd, np.nanmin, np.nanmax, p10, p50, p90])
44
+ .agg(["mean", "count", "std", "min", "max", p10, p50, p90])
45
45
  .reset_index(drop=True, level="ENSEMBLE")
46
46
  )
47
47
  # Rename nanmin, nanmax and nanmean to min, max and mean.
48
48
  col_stat_label_map = {
49
- "nanmin": "min",
50
- "nanmax": "max",
51
- "nanmean": "mean",
52
- "nanstd": "std",
49
+ "min": "min",
50
+ "max": "max",
51
+ "mean": "mean",
52
+ "std": "std",
53
53
  "p10": "high_p10",
54
54
  "p90": "low_p90",
55
55
  }
@@ -623,7 +623,7 @@ def _add_hover_info_in_field(
623
623
  for date in dates
624
624
  }
625
625
  prev_vals = {date: 0 for date in dates}
626
- date_dict = spaced_dates(dates, 4)
626
+ date_dict = spaced_dates(dates, 4) # type: ignore[arg-type]
627
627
  for name, color in zip(cat_ord["type"], colors):
628
628
  sub_df = df[df["type"] == name]
629
629
  for date in dates:
@@ -43,15 +43,25 @@ class DiskUsage(WebvizPluginABC):
43
43
  self.scratch_dir = scratch_dir
44
44
  self.date_input = date
45
45
  self.disk_usage = get_disk_usage(self.scratch_dir, self.date_input)
46
- self.date = str(self.disk_usage["date"][0])
46
+
47
47
  self.theme = webviz_settings.theme
48
48
 
49
49
  @property
50
50
  def layout(self) -> html.Div:
51
+ if self.disk_usage.empty:
52
+ return html.Div(
53
+ [
54
+ wcc.Header(
55
+ f"No disk usage data found for {self.scratch_dir}.",
56
+ style={"text-align": "center"},
57
+ ),
58
+ ]
59
+ )
60
+ date = str(self.disk_usage["date"][0]) if not self.disk_usage.empty else None
51
61
  return html.Div(
52
62
  [
53
63
  wcc.Header(
54
- f"Disk usage on {self.scratch_dir} per user as of {self.date}",
64
+ f"Disk usage on {self.scratch_dir} per user as of {date}",
55
65
  style={"text-align": "center"},
56
66
  ),
57
67
  wcc.FlexBox(
@@ -129,10 +139,10 @@ def get_disk_usage(scratch_dir: Path, date: Optional[str]) -> pd.DataFrame:
129
139
  df, date = _loop_dates(scratch_dir)
130
140
  else:
131
141
  df = _get_disk_usage_for_date(scratch_dir, date)
132
- if df is None:
133
- raise FileNotFoundError(
134
- f"No disk usage file found for {date} in {scratch_dir}."
135
- )
142
+
143
+ # Return early if no data is found
144
+ if df.empty:
145
+ return df
136
146
 
137
147
  df.rename(
138
148
  columns={"usageKB": "usageKiB"}, inplace=True
@@ -174,7 +184,7 @@ def get_disk_usage(scratch_dir: Path, date: Optional[str]) -> pd.DataFrame:
174
184
  return df.sort_values(by="usageGiB", axis=0, ascending=False)
175
185
 
176
186
 
177
- def _get_disk_usage_for_date(scratch_dir: Path, date: str) -> Optional[pd.DataFrame]:
187
+ def _get_disk_usage_for_date(scratch_dir: Path, date: str) -> pd.DataFrame:
178
188
  csv_file = scratch_dir / ".disk_usage" / f"disk_usage_user_test_{date}.csv"
179
189
  if csv_file.exists():
180
190
  return pd.read_csv(csv_file)
@@ -182,7 +192,8 @@ def _get_disk_usage_for_date(scratch_dir: Path, date: str) -> Optional[pd.DataFr
182
192
  csv_file = scratch_dir / ".disk_usage" / f"disk_usage_user_{date}.csv"
183
193
  if csv_file.exists():
184
194
  return pd.read_csv(csv_file)
185
- return None
195
+ # Create empty DataFrame with expected columns if no file is found
196
+ return pd.DataFrame(columns=["userid", "usageGiB", "date"])
186
197
 
187
198
 
188
199
  def _loop_dates(scratch_dir: Path) -> Tuple[pd.DataFrame, str]:
@@ -256,14 +256,14 @@ def calc_series_statistics(
256
256
  stat_df = (
257
257
  df[["ENSEMBLE", refaxis] + vectors]
258
258
  .groupby(["ENSEMBLE", refaxis])
259
- .agg([np.nanmean, np.nanmin, np.nanmax, p10, p90])
259
+ .agg(["mean", "min", "max", p10, p90])
260
260
  .reset_index() # level=["label", refaxis], col_level=0)
261
261
  )
262
262
  # Rename nanmin, nanmax and nanmean to min, max and mean.
263
263
  col_stat_label_map = {
264
- "nanmin": "min",
265
- "nanmax": "max",
266
- "nanmean": "mean",
264
+ "min": "min",
265
+ "max": "max",
266
+ "mean": "mean",
267
267
  "p10": "high_p10",
268
268
  "p90": "low_p90",
269
269
  }
@@ -106,7 +106,7 @@ color-tables.json for color_tables format.
106
106
  rel_surface_folder: str = "share/results/maps",
107
107
  color_tables: Path = None,
108
108
  hillshading_enabled: bool = None,
109
- render_surfaces_as_images: bool = True,
109
+ render_surfaces_as_images: bool = False,
110
110
  ):
111
111
  super().__init__()
112
112
  self._hillshading = hillshading_enabled
@@ -2,6 +2,7 @@ from webviz_config.utils import StrEnum
2
2
 
3
3
 
4
4
  class VisualizationType(StrEnum):
5
+ HISTOGRAM = "histogram"
5
6
  DISTRIBUTION = "distribution"
6
7
  BOX = "box"
7
8
  STAT_TABLE = "stat-table"
@@ -8,6 +8,8 @@ from webviz_config import WebvizConfigTheme
8
8
  from webviz_subsurface._figures import create_figure
9
9
  from webviz_subsurface._models.parameter_model import ParametersModel as Pmodel
10
10
 
11
+ from .._types import VisualizationType
12
+
11
13
 
12
14
  class ParametersModel:
13
15
  """Class to process and visualize ensemble parameter data"""
@@ -76,15 +78,15 @@ class ParametersModel:
76
78
  .groupby(["ENSEMBLE"])
77
79
  .agg(
78
80
  [
79
- ("Avg", np.mean),
80
- ("Stddev", np.std),
81
+ ("Avg", "mean"),
82
+ ("Stddev", "std"),
81
83
  ("P10", lambda x: np.percentile(x, 10)),
82
84
  ("P90", lambda x: np.percentile(x, 90)),
83
- ("Min", np.min),
84
- ("Max", np.max),
85
+ ("Min", "min"),
86
+ ("Max", "max"),
85
87
  ]
86
88
  )
87
- .stack(0)
89
+ .stack(0, future_stack=True)
88
90
  .rename_axis(["ENSEMBLE", "PARAMETER"])
89
91
  .reset_index()
90
92
  )
@@ -163,7 +165,7 @@ class ParametersModel:
163
165
  self,
164
166
  ensembles: list,
165
167
  parameters: List[Any],
166
- plot_type: str = "distribution",
168
+ plot_type: VisualizationType = VisualizationType.DISTRIBUTION,
167
169
  ) -> go.Figure:
168
170
  """Create subplots for selected parameters"""
169
171
  df = self.dataframe_melted.copy()
@@ -171,7 +173,7 @@ class ParametersModel:
171
173
  df = df[df["PARAMETER"].isin(parameters)]
172
174
  df = self._sort_parameters_col(df, parameters)
173
175
 
174
- return (
176
+ figure = (
175
177
  create_figure(
176
178
  plot_type=plot_type,
177
179
  data_frame=df,
@@ -179,6 +181,7 @@ class ParametersModel:
179
181
  facet_col="PARAMETER",
180
182
  color="ENSEMBLE",
181
183
  color_discrete_sequence=self.colorway,
184
+ barmode="overlay",
182
185
  )
183
186
  .update_xaxes(matches=None)
184
187
  .for_each_trace(
@@ -189,6 +192,11 @@ class ParametersModel:
189
192
  )
190
193
  )
191
194
  )
195
+ # Use bingroup=None so that Plotly calculates bins per trace
196
+ # This also means that individual ensembles will have separate binning.
197
+ if plot_type == VisualizationType.HISTOGRAM:
198
+ figure.update_traces(bingroup=None)
199
+ return figure
192
200
 
193
201
  def get_stat_value(self, parameter: str, ensemble: str, stat_column: str) -> float:
194
202
  """
@@ -20,6 +20,7 @@ class ParamDistVisualizationType(SettingsGroupABC):
20
20
  wcc.RadioItems(
21
21
  id=self.register_component_unique_id(self.Ids.VISUALIZATION_TYPE),
22
22
  options=[
23
+ {"label": "Histogram", "value": VisualizationType.HISTOGRAM},
23
24
  {
24
25
  "label": "Distribution plots",
25
26
  "value": VisualizationType.DISTRIBUTION,
@@ -30,7 +31,7 @@ class ParamDistVisualizationType(SettingsGroupABC):
30
31
  "value": VisualizationType.STAT_TABLE,
31
32
  },
32
33
  ],
33
- value=VisualizationType.DISTRIBUTION,
34
+ value=VisualizationType.HISTOGRAM,
34
35
  vertical=True,
35
36
  )
36
37
  ]
@@ -61,7 +61,7 @@ FMU format.
61
61
  **Using raw ensemble data stored in realization folders**
62
62
  * **`ensembles`:** Which ensembles in `shared_settings` to visualize.
63
63
  * **`rel_file_pattern`:** path to `.arrow` files with summary data.
64
- * **`statistic_file`:** Csv file for each realization with property statistics. See the \
64
+ * **`statistics_file`:** Csv file for each realization with property statistics. See the \
65
65
  documentation in [fmu-tools](http://fmu-docs.equinor.com/) on how to generate this data.
66
66
  * **`column_keys`:** List of vectors to extract. If not given, all vectors \
67
67
  from the simulations will be extracted. Wild card asterisk `*` can be used.
@@ -982,8 +982,8 @@ def add_statistic_traces(df, color_by, curves, sataxis, colors, nplots):
982
982
  ]
983
983
  df_stat = (
984
984
  satnum_df_shared_axis.groupby(sataxis)
985
- .agg([np.nanmean, np.nanmin, np.nanmax, p10, p90])
986
- .stack()
985
+ .agg(["mean", "min", "max", p10, p90])
986
+ .stack(future_stack=True)
987
987
  .swaplevel()
988
988
  )
989
989
  for curve_no, curve in enumerate(curves):
@@ -1032,7 +1032,7 @@ def _get_fanchart_traces(
1032
1032
  """Renders a fanchart"""
1033
1033
 
1034
1034
  # Retrieve indices from one of the keys in series
1035
- x = curve_stats["nanmax"].index.tolist()
1035
+ x = curve_stats["max"].index.tolist()
1036
1036
  data = FanchartData(
1037
1037
  samples=x,
1038
1038
  low_high=LowHighData(
@@ -1042,10 +1042,10 @@ def _get_fanchart_traces(
1042
1042
  high_name="P10",
1043
1043
  ),
1044
1044
  minimum_maximum=MinMaxData(
1045
- minimum=curve_stats["nanmin"].values,
1046
- maximum=curve_stats["nanmax"].values,
1045
+ minimum=curve_stats["min"].values,
1046
+ maximum=curve_stats["max"].values,
1047
1047
  ),
1048
- free_line=FreeLineData("Mean", curve_stats["nanmean"].values),
1048
+ free_line=FreeLineData("Mean", curve_stats["mean"].values),
1049
1049
  )
1050
1050
 
1051
1051
  hovertemplate = f"{curve} <br>" f"Ensemble: {ens}, Satnum: {satnum}"
@@ -1015,10 +1015,10 @@ def render_table(
1015
1015
  table.append(
1016
1016
  {
1017
1017
  "Group": ens,
1018
- "Minimum": df["nanmin"].iat[0],
1019
- "Maximum": df["nanmax"].iat[0],
1020
- "Mean": df["nanmean"].iat[0],
1021
- "Stddev": df["nanstd"].iat[0],
1018
+ "Minimum": df["min"].iat[0],
1019
+ "Maximum": df["max"].iat[0],
1020
+ "Mean": df["mean"].iat[0],
1021
+ "Stddev": df["std"].iat[0],
1022
1022
  "P10": df["p10"].iat[0],
1023
1023
  "P90": df["p90"].iat[0],
1024
1024
  }
@@ -1031,10 +1031,10 @@ def render_table(
1031
1031
  table.append(
1032
1032
  {
1033
1033
  "Group": col.split("_filtered_on_")[-1],
1034
- "Minimum": df["nanmin"].iat[0],
1035
- "Maximum": df["nanmax"].iat[0],
1036
- "Mean": df["nanmean"].iat[0],
1037
- "Stddev": df["nanstd"].iat[0],
1034
+ "Minimum": df["min"].iat[0],
1035
+ "Maximum": df["max"].iat[0],
1036
+ "Mean": df["mean"].iat[0],
1037
+ "Stddev": df["std"].iat[0],
1038
1038
  "P10": df["p10"].iat[0],
1039
1039
  "P90": df["p90"].iat[0],
1040
1040
  }
@@ -1259,7 +1259,7 @@ def calc_statistics(df: pd.DataFrame) -> pd.DataFrame:
1259
1259
  stat_dfs.append(
1260
1260
  ens_df.drop(columns=["REAL", "ENSEMBLE"])
1261
1261
  .groupby("DATE", as_index=False)
1262
- .agg([np.nanmean, np.nanstd, np.nanmin, np.nanmax, p10, p90])
1262
+ .agg(["mean", "std", "min", "max", p10, p90])
1263
1263
  .reset_index()
1264
1264
  .assign(ENSEMBLE=ens)
1265
1265
  )
@@ -1327,10 +1327,10 @@ def _get_fanchart_traces(
1327
1327
  high_name="P10",
1328
1328
  ),
1329
1329
  minimum_maximum=MinMaxData(
1330
- minimum=stat_df[column]["nanmin"].values,
1331
- maximum=stat_df[column]["nanmax"].values,
1330
+ minimum=stat_df[column]["min"].values,
1331
+ maximum=stat_df[column]["max"].values,
1332
1332
  ),
1333
- free_line=FreeLineData("Mean", stat_df[column]["nanmean"].values),
1333
+ free_line=FreeLineData("Mean", stat_df[column]["mean"].values),
1334
1334
  )
1335
1335
 
1336
1336
  hovertemplate = f"{legend_group}"
@@ -602,7 +602,12 @@ def make_status_df(
602
602
  # Load each json-file to a DataFrame for the realization
603
603
  with open(row.FULLPATH) as fjson:
604
604
  status_dict = json.load(fjson)
605
- real_df = pd.DataFrame(status_dict["jobs"])
605
+ if "steps" in status_dict:
606
+ real_df = pd.DataFrame(status_dict["steps"])
607
+ elif "jobs" in status_dict:
608
+ real_df = pd.DataFrame(status_dict["jobs"])
609
+ else:
610
+ raise KeyError(f"Neither 'steps' nor 'jobs' found in {status_file}")
606
611
 
607
612
  # If new ensemble, calculate ensemble scaled runtimes
608
613
  # for previous ensemble and reset temporary ensemble data