ert 18.0.8__py3-none-any.whl → 19.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (117) hide show
  1. _ert/forward_model_runner/client.py +6 -2
  2. ert/__main__.py +20 -6
  3. ert/cli/main.py +7 -3
  4. ert/config/__init__.py +3 -4
  5. ert/config/_create_observation_dataframes.py +85 -59
  6. ert/config/_get_num_cpu.py +1 -1
  7. ert/config/_observations.py +106 -31
  8. ert/config/distribution.py +1 -1
  9. ert/config/ensemble_config.py +3 -3
  10. ert/config/ert_config.py +50 -0
  11. ert/config/{ext_param_config.py → everest_control.py} +8 -12
  12. ert/config/everest_response.py +3 -5
  13. ert/config/field.py +76 -14
  14. ert/config/forward_model_step.py +12 -9
  15. ert/config/gen_data_config.py +3 -4
  16. ert/config/gen_kw_config.py +2 -12
  17. ert/config/parameter_config.py +1 -16
  18. ert/config/parsing/_option_dict.py +10 -2
  19. ert/config/parsing/config_keywords.py +1 -0
  20. ert/config/parsing/config_schema.py +8 -0
  21. ert/config/parsing/config_schema_deprecations.py +3 -3
  22. ert/config/parsing/config_schema_item.py +12 -3
  23. ert/config/parsing/context_values.py +3 -3
  24. ert/config/parsing/file_context_token.py +1 -1
  25. ert/config/parsing/observations_parser.py +12 -2
  26. ert/config/parsing/queue_system.py +9 -0
  27. ert/config/queue_config.py +0 -1
  28. ert/config/response_config.py +0 -1
  29. ert/config/rft_config.py +78 -33
  30. ert/config/summary_config.py +1 -2
  31. ert/config/surface_config.py +59 -16
  32. ert/dark_storage/common.py +1 -1
  33. ert/dark_storage/compute/misfits.py +4 -1
  34. ert/dark_storage/endpoints/compute/misfits.py +4 -2
  35. ert/dark_storage/endpoints/experiment_server.py +12 -9
  36. ert/dark_storage/endpoints/experiments.py +2 -2
  37. ert/dark_storage/endpoints/observations.py +14 -4
  38. ert/dark_storage/endpoints/parameters.py +2 -18
  39. ert/dark_storage/endpoints/responses.py +10 -5
  40. ert/dark_storage/json_schema/experiment.py +1 -1
  41. ert/data/_measured_data.py +6 -5
  42. ert/ensemble_evaluator/config.py +2 -1
  43. ert/field_utils/field_utils.py +1 -1
  44. ert/field_utils/grdecl_io.py +26 -9
  45. ert/field_utils/roff_io.py +1 -1
  46. ert/gui/__init__.py +5 -2
  47. ert/gui/ertnotifier.py +1 -1
  48. ert/gui/ertwidgets/pathchooser.py +0 -3
  49. ert/gui/ertwidgets/suggestor/suggestor.py +63 -30
  50. ert/gui/main.py +27 -5
  51. ert/gui/main_window.py +0 -5
  52. ert/gui/simulation/experiment_panel.py +12 -3
  53. ert/gui/simulation/run_dialog.py +2 -16
  54. ert/gui/tools/manage_experiments/export_dialog.py +136 -0
  55. ert/gui/tools/manage_experiments/storage_info_widget.py +133 -28
  56. ert/gui/tools/plot/plot_api.py +24 -15
  57. ert/gui/tools/plot/plot_widget.py +19 -4
  58. ert/gui/tools/plot/plot_window.py +35 -18
  59. ert/gui/tools/plot/plottery/plots/__init__.py +2 -0
  60. ert/gui/tools/plot/plottery/plots/cesp.py +3 -1
  61. ert/gui/tools/plot/plottery/plots/distribution.py +6 -1
  62. ert/gui/tools/plot/plottery/plots/ensemble.py +3 -1
  63. ert/gui/tools/plot/plottery/plots/gaussian_kde.py +12 -2
  64. ert/gui/tools/plot/plottery/plots/histogram.py +3 -1
  65. ert/gui/tools/plot/plottery/plots/misfits.py +436 -0
  66. ert/gui/tools/plot/plottery/plots/observations.py +18 -4
  67. ert/gui/tools/plot/plottery/plots/statistics.py +3 -1
  68. ert/gui/tools/plot/plottery/plots/std_dev.py +3 -1
  69. ert/plugins/hook_implementations/workflows/csv_export.py +2 -3
  70. ert/plugins/plugin_manager.py +4 -0
  71. ert/resources/forward_models/run_reservoirsimulator.py +8 -3
  72. ert/run_models/_create_run_path.py +3 -3
  73. ert/run_models/everest_run_model.py +13 -11
  74. ert/run_models/initial_ensemble_run_model.py +2 -2
  75. ert/run_models/run_model.py +9 -0
  76. ert/services/_base_service.py +6 -5
  77. ert/services/ert_server.py +4 -4
  78. ert/shared/_doc_utils/__init__.py +4 -2
  79. ert/shared/net_utils.py +43 -18
  80. ert/shared/version.py +3 -3
  81. ert/storage/__init__.py +2 -0
  82. ert/storage/local_ensemble.py +25 -8
  83. ert/storage/local_experiment.py +2 -2
  84. ert/storage/local_storage.py +45 -25
  85. ert/storage/migration/to11.py +1 -1
  86. ert/storage/migration/to18.py +0 -1
  87. ert/storage/migration/to19.py +34 -0
  88. ert/storage/migration/to20.py +23 -0
  89. ert/storage/migration/to21.py +25 -0
  90. ert/storage/migration/to22.py +18 -0
  91. ert/storage/migration/to23.py +49 -0
  92. ert/workflow_runner.py +2 -1
  93. {ert-18.0.8.dist-info → ert-19.0.0.dist-info}/METADATA +1 -1
  94. {ert-18.0.8.dist-info → ert-19.0.0.dist-info}/RECORD +112 -110
  95. {ert-18.0.8.dist-info → ert-19.0.0.dist-info}/WHEEL +1 -1
  96. everest/bin/everlint_script.py +0 -2
  97. everest/bin/utils.py +2 -1
  98. everest/bin/visualization_script.py +4 -11
  99. everest/config/control_config.py +4 -4
  100. everest/config/control_variable_config.py +2 -2
  101. everest/config/everest_config.py +9 -0
  102. everest/config/utils.py +2 -2
  103. everest/config/validation_utils.py +7 -1
  104. everest/config_file_loader.py +0 -2
  105. everest/detached/client.py +3 -3
  106. everest/everest_storage.py +0 -2
  107. everest/gui/everest_client.py +2 -2
  108. everest/optimizer/everest2ropt.py +4 -4
  109. everest/optimizer/opt_model_transforms.py +2 -2
  110. ert/config/violations.py +0 -0
  111. ert/gui/tools/export/__init__.py +0 -3
  112. ert/gui/tools/export/export_panel.py +0 -83
  113. ert/gui/tools/export/export_tool.py +0 -69
  114. ert/gui/tools/export/exporter.py +0 -36
  115. {ert-18.0.8.dist-info → ert-19.0.0.dist-info}/entry_points.txt +0 -0
  116. {ert-18.0.8.dist-info → ert-19.0.0.dist-info}/licenses/COPYING +0 -0
  117. {ert-18.0.8.dist-info → ert-19.0.0.dist-info}/top_level.txt +0 -0
@@ -18,6 +18,7 @@ class ObservationType(StrEnum):
18
18
  HISTORY = "HISTORY_OBSERVATION"
19
19
  SUMMARY = "SUMMARY_OBSERVATION"
20
20
  GENERAL = "GENERAL_OBSERVATION"
21
+ RFT = "RFT_OBSERVATION"
21
22
 
22
23
 
23
24
  ObservationDict = dict[str, Any]
@@ -92,7 +93,7 @@ def parse_observations(content: str, filename: str) -> list[ObservationDict]:
92
93
  ), ["TYPE"]:
93
94
  message = (
94
95
  f"Unknown observation type '{unexpected_token}', "
95
- f"expected either 'GENERAL_OBSERVATION', "
96
+ f"expected either 'RFT_OBSERVATION', 'GENERAL_OBSERVATION', "
96
97
  f"'SUMMARY_OBSERVATION' or 'HISTORY_OBSERVATION'."
97
98
  )
98
99
  case UnexpectedToken(token=unexpected_char, expected=allowed_chars), _:
@@ -122,7 +123,10 @@ observations_parser = Lark(
122
123
  r"""
123
124
  start: observation*
124
125
  ?observation: type OBSERVATION_NAME object? ";"
125
- TYPE: "HISTORY_OBSERVATION" | "SUMMARY_OBSERVATION" | "GENERAL_OBSERVATION"
126
+ TYPE: "HISTORY_OBSERVATION"
127
+ | "SUMMARY_OBSERVATION"
128
+ | "GENERAL_OBSERVATION"
129
+ | "RFT_OBSERVATION"
126
130
  type: TYPE
127
131
  ?value: object
128
132
  | STRING
@@ -134,6 +138,7 @@ observations_parser = Lark(
134
138
  PARAMETER_NAME : CHAR+
135
139
  object : "{" [(declaration";")*] "}"
136
140
  ?declaration: "SEGMENT" STRING object -> segment
141
+ | "LOCALIZATION" object -> localization
137
142
  | pair
138
143
  pair : PARAMETER_NAME "=" value
139
144
 
@@ -189,6 +194,11 @@ class TreeToObservations(Transformer[FileContextToken, list[ObservationDict]]):
189
194
  def segment(tree):
190
195
  return (("SEGMENT", tree[0]), tree[1])
191
196
 
197
+ @staticmethod
198
+ @no_type_check
199
+ def localization(tree):
200
+ return ("LOCALIZATION", tree[0])
201
+
192
202
  @staticmethod
193
203
  @no_type_check
194
204
  def object(tree):
@@ -23,3 +23,12 @@ class QueueSystem(StrEnum):
23
23
  @staticmethod
24
24
  def ert_config_case() -> str:
25
25
  return "upper"
26
+
27
+ @property
28
+ def formatted_name(self) -> str:
29
+ return {
30
+ self.LSF: "LSF",
31
+ self.LOCAL: "Local",
32
+ self.TORQUE: "Torque/OpenPBS",
33
+ self.SLURM: "Slurm",
34
+ }[self]
@@ -51,7 +51,6 @@ class QueueOptions(
51
51
  BaseModelWithContextSupport,
52
52
  validate_assignment=True,
53
53
  extra="forbid",
54
- use_enum_values=True,
55
54
  validate_default=True,
56
55
  ):
57
56
  name: QueueSystem
@@ -31,7 +31,6 @@ class ResponseMetadata(BaseModel):
31
31
 
32
32
  class ResponseConfig(BaseModel):
33
33
  type: str
34
- name: str
35
34
  input_files: list[str] = Field(default_factory=list)
36
35
  keys: list[str] = Field(default_factory=list)
37
36
  has_finalized_keys: bool = False
ert/config/rft_config.py CHANGED
@@ -3,15 +3,16 @@ from __future__ import annotations
3
3
  import datetime
4
4
  import fnmatch
5
5
  import logging
6
+ import os
6
7
  import re
7
8
  from collections import defaultdict
8
- from typing import Literal
9
+ from typing import IO, Any, Literal
9
10
 
10
11
  import numpy as np
11
12
  import numpy.typing as npt
12
13
  import polars as pl
13
14
  from pydantic import Field
14
- from resfo_utilities import InvalidRFTError, RFTReader
15
+ from resfo_utilities import CornerpointGrid, InvalidRFTError, RFTReader
15
16
 
16
17
  from ert.substitutions import substitute_runpath_name
17
18
 
@@ -27,6 +28,7 @@ class RFTConfig(ResponseConfig):
27
28
  name: str = "rft"
28
29
  has_finalized_keys: bool = False
29
30
  data_to_read: dict[str, dict[str, list[str]]] = Field(default_factory=dict)
31
+ locations: list[tuple[float, float, float]] = Field(default_factory=list)
30
32
 
31
33
  @property
32
34
  def metadata(self) -> list[ResponseMetadata]:
@@ -51,6 +53,20 @@ class RFTConfig(ResponseConfig):
51
53
 
52
54
  return [f"{base}.RFT"]
53
55
 
56
+ def _find_indices(
57
+ self, egrid_file: str | os.PathLike[str] | IO[Any]
58
+ ) -> dict[tuple[int, int, int] | None, set[tuple[float, float, float]]]:
59
+ indices = defaultdict(set)
60
+ for a, b in zip(
61
+ CornerpointGrid.read_egrid(egrid_file).find_cell_containing_point(
62
+ self.locations
63
+ ),
64
+ self.locations,
65
+ strict=True,
66
+ ):
67
+ indices[a].add(b)
68
+ return indices
69
+
54
70
  def read_from_file(self, run_path: str, iens: int, iter_: int) -> pl.DataFrame:
55
71
  filename = substitute_runpath_name(self.input_files[0], iens, iter_)
56
72
  if filename.upper().endswith(".DATA"):
@@ -58,9 +74,20 @@ class RFTConfig(ResponseConfig):
58
74
  # allowed to give REFCASE and ECLBASE both
59
75
  # with and without .DATA extensions
60
76
  filename = filename[:-5]
77
+ grid_filename = f"{run_path}/{filename}"
78
+ if grid_filename.upper().endswith(".RFT"):
79
+ grid_filename = grid_filename[:-4]
80
+ grid_filename += ".EGRID"
61
81
  fetched: dict[tuple[str, datetime.date], dict[str, npt.NDArray[np.float32]]] = (
62
82
  defaultdict(dict)
63
83
  )
84
+ indices = {}
85
+ if self.locations:
86
+ indices = self._find_indices(grid_filename)
87
+ if None in indices:
88
+ raise InvalidResponseFile(
89
+ f"Did not find grid coordinate for location(s) {indices[None]}"
90
+ )
64
91
  # This is a somewhat complicated optimization in order to
65
92
  # support wildcards in well names, dates and properties
66
93
  # A python for loop is too slow so we use a compiled regex
@@ -72,6 +99,7 @@ class RFTConfig(ResponseConfig):
72
99
  "time": [],
73
100
  "depth": [],
74
101
  "values": [],
102
+ "location": [],
75
103
  }
76
104
  )
77
105
 
@@ -104,6 +132,7 @@ class RFTConfig(ResponseConfig):
104
132
  for time, props in inner_dict.items()
105
133
  )
106
134
  )
135
+ locations = {}
107
136
  try:
108
137
  with RFTReader.open(f"{run_path}/{filename}") as rft:
109
138
  for entry in rft:
@@ -113,6 +142,15 @@ class RFTConfig(ResponseConfig):
113
142
  key = f"{well}{sep}{date}{sep}{rft_property}"
114
143
  if matcher.fullmatch(key) is not None:
115
144
  values = entry[rft_property]
145
+ locations[well, date] = [
146
+ list(
147
+ indices.get(
148
+ (c[0] - 1, c[1] - 1, c[2] - 1),
149
+ [(None, None, None)],
150
+ )
151
+ )
152
+ for c in entry.connections
153
+ ]
116
154
  if np.isdtype(values.dtype, np.float32):
117
155
  fetched[well, date][rft_property] = values
118
156
  except (FileNotFoundError, InvalidRFTError) as err:
@@ -127,41 +165,48 @@ class RFTConfig(ResponseConfig):
127
165
  "time": [],
128
166
  "depth": [],
129
167
  "values": [],
168
+ "location": [],
130
169
  }
131
170
  )
132
171
 
133
- dfs = []
134
-
135
- for (well, time), inner_dict in fetched.items():
136
- wide = pl.DataFrame(
137
- {k: pl.Series(v.astype("<f4")) for k, v in inner_dict.items()}
138
- )
139
-
140
- if wide.columns == ["DEPTH"]:
141
- continue
142
-
143
- if "DEPTH" not in wide.columns:
144
- raise InvalidResponseFile(f"Could not find DEPTH in RFTFile {filename}")
145
-
146
- # Unpivot all columns except DEPTH
147
- long = wide.unpivot(
148
- index="DEPTH", # keep depth as column
149
- # turn other prop values into response_key col
150
- variable_name="response_key",
151
- value_name="values", # put values in own column
152
- ).rename({"DEPTH": "depth"})
153
-
154
- # Add wellname prefix to response_keys
155
- long = long.with_columns(
156
- (pl.lit(f"{well}:{time.isoformat()}:") + pl.col("response_key")).alias(
157
- "response_key"
158
- ),
159
- pl.lit(time).alias("time"),
172
+ try:
173
+ df = pl.concat(
174
+ [
175
+ pl.DataFrame(
176
+ {
177
+ "response_key": [f"{well}:{time.isoformat()}:{prop}"],
178
+ "time": [time],
179
+ "depth": [fetched[well, time]["DEPTH"]],
180
+ "values": [vals],
181
+ "location": pl.Series(
182
+ [
183
+ locations.get(
184
+ (well, time), [(None, None, None)] * len(vals)
185
+ )
186
+ ],
187
+ dtype=pl.Array(
188
+ pl.List(pl.Array(pl.Float32, 3)), len(vals)
189
+ ),
190
+ ),
191
+ }
192
+ )
193
+ .explode("depth", "values", "location")
194
+ .explode("location")
195
+ for (well, time), inner_dict in fetched.items()
196
+ for prop, vals in inner_dict.items()
197
+ if prop != "DEPTH"
198
+ ]
160
199
  )
200
+ except KeyError as err:
201
+ raise InvalidResponseFile(
202
+ f"Could not find {err.args[0]} in RFTFile {filename}"
203
+ ) from err
161
204
 
162
- dfs.append(long.select("response_key", "time", "depth", "values"))
163
-
164
- return pl.concat(dfs)
205
+ return df.with_columns(
206
+ east=pl.col("location").arr.get(0),
207
+ north=pl.col("location").arr.get(1),
208
+ tvd=pl.col("location").arr.get(2),
209
+ ).drop("location")
165
210
 
166
211
  @property
167
212
  def response_type(self) -> str:
@@ -169,7 +214,7 @@ class RFTConfig(ResponseConfig):
169
214
 
170
215
  @property
171
216
  def primary_key(self) -> list[str]:
172
- return []
217
+ return ["east", "north", "tvd"]
173
218
 
174
219
  @classmethod
175
220
  def from_config_dict(cls, config_dict: ConfigDict) -> RFTConfig | None:
@@ -19,14 +19,13 @@ logger = logging.getLogger(__name__)
19
19
 
20
20
  class SummaryConfig(ResponseConfig):
21
21
  type: Literal["summary"] = "summary"
22
- name: str = "summary"
23
22
  has_finalized_keys: bool = False
24
23
 
25
24
  @property
26
25
  def metadata(self) -> list[ResponseMetadata]:
27
26
  return [
28
27
  ResponseMetadata(
29
- response_type=self.name,
28
+ response_type=self.type,
30
29
  response_key=response_key,
31
30
  filter_on=None,
32
31
  finalized=self.has_finalized_keys,
@@ -10,11 +10,16 @@ import xarray as xr
10
10
  from pydantic import field_serializer
11
11
  from surfio import IrapHeader, IrapSurface
12
12
 
13
+ from ert.field_utils import (
14
+ calc_rho_for_2d_grid_layer,
15
+ transform_local_ellipse_angle_to_local_coords,
16
+ transform_positions_to_local_field_coordinates,
17
+ )
13
18
  from ert.substitutions import substitute_runpath_name
14
19
 
15
20
  from ._str_to_bool import str_to_bool
16
21
  from .field import create_flattened_cube_graph
17
- from .parameter_config import InvalidParameterFile, ParameterConfig, ParameterMetadata
22
+ from .parameter_config import InvalidParameterFile, ParameterConfig
18
23
  from .parsing import ConfigValidationError, ErrorInfo
19
24
 
20
25
  if TYPE_CHECKING:
@@ -46,6 +51,7 @@ class SurfaceMismatchError(InvalidParameterFile):
46
51
 
47
52
  class SurfaceConfig(ParameterConfig):
48
53
  type: Literal["surface"] = "surface"
54
+ dimensionality: Literal[2] = 2
49
55
  ncol: int
50
56
  nrow: int
51
57
  xori: float
@@ -70,21 +76,6 @@ class SurfaceConfig(ParameterConfig):
70
76
  def parameter_keys(self) -> list[str]:
71
77
  return []
72
78
 
73
- @property
74
- def metadata(self) -> list[ParameterMetadata]:
75
- return [
76
- ParameterMetadata(
77
- key=self.name,
78
- dimensionality=2,
79
- transformation=None,
80
- userdata={
81
- "data_origin": "SURFACE",
82
- "nx": self.ncol,
83
- "ny": self.nrow,
84
- },
85
- )
86
- ]
87
-
88
79
  @classmethod
89
80
  def from_config_list(cls, config_list: list[str | dict[str, str]]) -> Self:
90
81
  name = cast(str, config_list[0])
@@ -248,3 +239,55 @@ class SurfaceConfig(ParameterConfig):
248
239
  this flattening process"""
249
240
 
250
241
  return create_flattened_cube_graph(px=self.ncol, py=self.nrow, pz=1)
242
+
243
+ def calc_rho_for_2d_grid_layer(
244
+ self,
245
+ obs_xpos: npt.NDArray[np.float64],
246
+ obs_ypos: npt.NDArray[np.float64],
247
+ obs_main_range: npt.NDArray[np.float64],
248
+ obs_perp_range: npt.NDArray[np.float64],
249
+ obs_anisotropy_angle: npt.NDArray[np.float64],
250
+ ) -> npt.NDArray[np.float64]:
251
+ """Function to calculate scaling values to be used in the RHO matrix
252
+ for distance-based localization.
253
+
254
+ Args:
255
+ obs_xpos: x-coordinates in global coordinates of observations
256
+ obs_ypos: y-coordinates in global coordinates of observations
257
+ obs_main_range: Size of influence ellipse main principal direction.
258
+ obs_perp_range: Size of influence ellipse second principal direction.
259
+ obs_anisotropy_angle: Rotation angle anticlock wise of main principal
260
+ direction of influence ellipse relative to global coordinate
261
+ system's x-axis.
262
+
263
+ Returns:
264
+ Scaling values (elements of the RHO matrix) as a numpy array
265
+ of shape=(nx,ny,nobservations)
266
+
267
+ """
268
+ # Transform observation positions to local surface coordinates
269
+ xpos, ypos = transform_positions_to_local_field_coordinates(
270
+ (self.xori, self.yori), self.rotation, obs_xpos, obs_ypos
271
+ )
272
+ # Transform ellipse orientation to local surface coordinates
273
+ rotation_angle_of_localization_ellipse = (
274
+ transform_local_ellipse_angle_to_local_coords(
275
+ self.rotation, obs_anisotropy_angle
276
+ )
277
+ )
278
+
279
+ # Assume the coordinate system is not flipped.
280
+ # This means the right_handed_grid_indexing is False
281
+ assert self.yflip == 1
282
+ return calc_rho_for_2d_grid_layer(
283
+ self.ncol,
284
+ self.nrow,
285
+ self.xinc,
286
+ self.yinc,
287
+ xpos,
288
+ ypos,
289
+ obs_main_range,
290
+ obs_perp_range,
291
+ rotation_angle_of_localization_ellipse,
292
+ right_handed_grid_indexing=False,
293
+ )
@@ -28,7 +28,7 @@ def get_storage() -> Storage:
28
28
  except ErtStorageException as err:
29
29
  logger.exception(f"Error accessing storage: {err!s}")
30
30
  raise InternalServerError("Error accessing storage") from None
31
- _storage.refresh()
31
+ _storage.reload()
32
32
  return _storage
33
33
 
34
34
 
@@ -29,7 +29,10 @@ def calculate_signed_chi_squared_misfits(
29
29
  for realization_index in reponses_dict:
30
30
  misfits_dict[realization_index] = _calculate_signed_chi_squared_misfit(
31
31
  observation["values"],
32
- reponses_dict[realization_index].loc[:, observation.index].values.flatten(),
32
+ reponses_dict[realization_index]
33
+ .loc[:, observation.index]
34
+ .to_numpy()
35
+ .flatten(),
33
36
  observation["errors"],
34
37
  )
35
38
 
@@ -1,6 +1,6 @@
1
1
  import json
2
2
  from datetime import datetime
3
- from typing import Any
3
+ from typing import Annotated, Any
4
4
  from uuid import UUID
5
5
 
6
6
  import pandas as pd
@@ -36,7 +36,9 @@ async def get_response_misfits(
36
36
  response_name: str,
37
37
  realization_index: int | None = None,
38
38
  summary_misfits: bool = False,
39
- filter_on: str | None = Query(None, description="JSON string with filters"),
39
+ filter_on: Annotated[
40
+ str | None, Query(description="JSON string with filters")
41
+ ] = None,
40
42
  ) -> Response:
41
43
  ensemble = storage.get_ensemble(ensemble_id)
42
44
  dataframe = data_for_response(
@@ -9,6 +9,7 @@ import traceback
9
9
  import uuid
10
10
  from base64 import b64decode
11
11
  from queue import SimpleQueue
12
+ from typing import Annotated
12
13
 
13
14
  from fastapi import (
14
15
  APIRouter,
@@ -153,7 +154,7 @@ def _log(request: Request) -> None:
153
154
 
154
155
  @router.get("/")
155
156
  def get_status(
156
- request: Request, credentials: HTTPBasicCredentials = Depends(security)
157
+ request: Request, credentials: Annotated[HTTPBasicCredentials, Depends(security)]
157
158
  ) -> PlainTextResponse:
158
159
  _log(request)
159
160
  _check_user(credentials)
@@ -162,7 +163,7 @@ def get_status(
162
163
 
163
164
  @router.get("/status")
164
165
  def experiment_status(
165
- request: Request, credentials: HTTPBasicCredentials = Depends(security)
166
+ request: Request, credentials: Annotated[HTTPBasicCredentials, Depends(security)]
166
167
  ) -> ExperimentStatus:
167
168
  _log(request)
168
169
  _check_user(credentials)
@@ -171,7 +172,7 @@ def experiment_status(
171
172
 
172
173
  @router.post("/" + EverEndpoints.stop)
173
174
  def stop(
174
- request: Request, credentials: HTTPBasicCredentials = Depends(security)
175
+ request: Request, credentials: Annotated[HTTPBasicCredentials, Depends(security)]
175
176
  ) -> Response:
176
177
  _log(request)
177
178
  _check_user(credentials)
@@ -185,7 +186,7 @@ def stop(
185
186
  async def start_experiment(
186
187
  request: Request,
187
188
  background_tasks: BackgroundTasks,
188
- credentials: HTTPBasicCredentials = Depends(security),
189
+ credentials: Annotated[HTTPBasicCredentials, Depends(security)],
189
190
  ) -> Response:
190
191
  _log(request)
191
192
  _check_user(credentials)
@@ -218,7 +219,7 @@ async def start_experiment(
218
219
 
219
220
  @router.get("/" + EverEndpoints.config_path)
220
221
  async def config_path(
221
- request: Request, credentials: HTTPBasicCredentials = Depends(security)
222
+ request: Request, credentials: Annotated[HTTPBasicCredentials, Depends(security)]
222
223
  ) -> JSONResponse:
223
224
  _log(request)
224
225
  _check_user(credentials)
@@ -237,7 +238,7 @@ async def config_path(
237
238
 
238
239
  @router.get("/" + EverEndpoints.start_time)
239
240
  async def start_time(
240
- request: Request, credentials: HTTPBasicCredentials = Depends(security)
241
+ request: Request, credentials: Annotated[HTTPBasicCredentials, Depends(security)]
241
242
  ) -> Response:
242
243
  _log(request)
243
244
  _check_user(credentials)
@@ -316,9 +317,11 @@ class ExperimentRunner:
316
317
  simulation_future = loop.run_in_executor(
317
318
  None,
318
319
  lambda: run_model.start_simulations_thread(
319
- EvaluatorServerConfig()
320
- if run_model.queue_config.queue_system == QueueSystem.LOCAL
321
- else EvaluatorServerConfig(use_ipc_protocol=False)
320
+ EvaluatorServerConfig(
321
+ use_ipc_protocol=run_model.queue_config.queue_system
322
+ == QueueSystem.LOCAL,
323
+ prioritize_private_ip_address=site_plugins.prioritize_private_ip_address,
324
+ )
322
325
  ),
323
326
  )
324
327
  while True:
@@ -29,7 +29,7 @@ def get_experiments(
29
29
  priors=create_priors(experiment),
30
30
  userdata={},
31
31
  parameters={
32
- group: [m.model_dump() for m in config.metadata]
32
+ group: config.model_dump()
33
33
  for group, config in experiment.parameter_configuration.items()
34
34
  if not isinstance(config, SurfaceConfig)
35
35
  },
@@ -65,7 +65,7 @@ def get_experiment_by_id(
65
65
  priors=create_priors(experiment),
66
66
  userdata={},
67
67
  parameters={
68
- group: [m.model_dump() for m in config.metadata]
68
+ group: config.model_dump()
69
69
  for group, config in experiment.parameter_configuration.items()
70
70
  },
71
71
  responses={
@@ -1,7 +1,7 @@
1
1
  import json
2
2
  import logging
3
3
  import operator
4
- from typing import Any
4
+ from typing import Annotated, Any
5
5
  from urllib.parse import unquote
6
6
  from uuid import UUID, uuid4
7
7
 
@@ -57,7 +57,9 @@ async def get_observations_for_response(
57
57
  storage: Storage = DEFAULT_STORAGE,
58
58
  ensemble_id: UUID,
59
59
  response_key: str,
60
- filter_on: str | None = Query(None, description="JSON string with filters"),
60
+ filter_on: Annotated[
61
+ str | None, Query(description="JSON string with filters")
62
+ ] = None,
61
63
  ) -> list[js.ObservationOut]:
62
64
  response_key = unquote(response_key)
63
65
  try:
@@ -82,6 +84,7 @@ async def get_observations_for_response(
82
84
  ensemble.experiment,
83
85
  obs_keys,
84
86
  json.loads(filter_on) if filter_on is not None else None,
87
+ requested_response_type=response_type,
85
88
  )
86
89
  if not obss:
87
90
  return []
@@ -105,10 +108,17 @@ def _get_observations(
105
108
  experiment: Experiment,
106
109
  observation_keys: list[str] | None = None,
107
110
  filter_on: dict[str, Any] | None = None,
111
+ requested_response_type: str | None = None,
108
112
  ) -> list[dict[str, Any]]:
109
113
  observations = []
110
114
 
111
- for response_type, df in experiment.observations.items():
115
+ for stored_response_type, df in experiment.observations.items():
116
+ if (
117
+ requested_response_type is not None
118
+ and stored_response_type != requested_response_type
119
+ ):
120
+ continue
121
+
112
122
  if observation_keys is not None:
113
123
  df = df.filter(pl.col("observation_key").is_in(observation_keys))
114
124
 
@@ -125,7 +135,7 @@ def _get_observations(
125
135
  if df.is_empty():
126
136
  continue
127
137
 
128
- x_axis_fn = response_to_pandas_x_axis_fns[response_type]
138
+ x_axis_fn = response_to_pandas_x_axis_fns[stored_response_type]
129
139
  df = df.rename(
130
140
  {
131
141
  "observation_key": "name",
@@ -113,27 +113,11 @@ def get_parameter_std_dev(
113
113
  return Response(content=buffer.getvalue(), media_type="application/octet-stream")
114
114
 
115
115
 
116
- def _extract_parameter_group_and_key(key: str) -> tuple[str, str] | tuple[None, None]:
117
- key = key.removeprefix("LOG10_")
118
- if ":" not in key:
119
- # Assume all incoming keys are in format group:key for now
120
- return None, None
121
-
122
- param_group, param_key = key.split(":", maxsplit=1)
123
- return param_group, param_key
124
-
125
-
126
116
  def data_for_parameter(ensemble: Ensemble, key: str) -> pd.DataFrame:
127
- group, _ = _extract_parameter_group_and_key(key)
128
- if group is None:
129
- logger.warning(
130
- f"Parameter with key '{key}' does not have a group, "
131
- "fetching data for all parameters"
132
- )
133
117
  try:
134
- df = ensemble.load_scalars(group)
118
+ df = ensemble.load_scalar_keys([key], transformed=True)
135
119
  if df.is_empty():
136
- logger.warning(f"No data found for parameter '{group}:{key}'")
120
+ logger.warning(f"No data found for parameter '{key}'")
137
121
  return pd.DataFrame()
138
122
  except KeyError as e:
139
123
  logger.error(e)
@@ -45,7 +45,9 @@ async def get_response(
45
45
  storage: Storage = DEFAULT_STORAGE,
46
46
  ensemble_id: UUID,
47
47
  response_key: str,
48
- filter_on: str | None = Query(None, description="JSON string with filters"),
48
+ filter_on: Annotated[
49
+ str | None, Query(description="JSON string with filters")
50
+ ] = None,
49
51
  accept: Annotated[str | None, Header()] = None,
50
52
  ) -> Response:
51
53
  try:
@@ -96,6 +98,7 @@ async def get_response(
96
98
  response_to_pandas_x_axis_fns: dict[str, Callable[[tuple[Any, ...]], Any]] = {
97
99
  "summary": lambda t: pd.Timestamp(t[2]).isoformat(),
98
100
  "gen_data": lambda t: str(t[3]),
101
+ "rft": lambda t: str(t[4]),
99
102
  }
100
103
 
101
104
 
@@ -147,8 +150,9 @@ def data_for_response(
147
150
  # This performs the same aggragation by mean of duplicate values
148
151
  # as in ert/analysis/_es_update.py
149
152
  df = df.groupby(["Date", "Realization"]).mean()
150
- data = df.unstack(level="Date")
151
- data.columns = data.columns.droplevel(0)
153
+ data = df.reset_index().pivot_table(
154
+ index="Realization", columns="Date", values=df.columns[0]
155
+ )
152
156
  return data.astype(float)
153
157
 
154
158
  if response_type == "rft":
@@ -159,8 +163,9 @@ def data_for_response(
159
163
  )
160
164
  .rename({"realization": "Realization"})
161
165
  .select(["Realization", "depth", "values"])
166
+ .unique()
162
167
  .to_pandas()
163
- .pivot(index="Realization", columns="depth", values="values")
168
+ .pivot_table(index="Realization", columns="depth", values="values")
164
169
  .reset_index(drop=True)
165
170
  )
166
171
 
@@ -172,7 +177,7 @@ def data_for_response(
172
177
  assert "report_step" in filter_on
173
178
  report_step = int(filter_on["report_step"])
174
179
  vals = data.filter(pl.col("report_step").eq(report_step))
175
- pivoted = vals.drop("response_key", "report_step").pivot(
180
+ pivoted = vals.drop("response_key", "report_step").pivot( # noqa: PD010
176
181
  on="index", values="values"
177
182
  )
178
183
  data = pivoted.to_pandas().set_index("realization")
@@ -24,6 +24,6 @@ class ExperimentOut(_Experiment):
24
24
  ensemble_ids: list[UUID]
25
25
  priors: Mapping[str, dict[str, Any]]
26
26
  userdata: Mapping[str, Any]
27
- parameters: Mapping[str, list[dict[str, Any]]]
27
+ parameters: Mapping[str, dict[str, Any]]
28
28
  responses: Mapping[str, list[dict[str, Any]]]
29
29
  observations: Mapping[str, dict[str, list[str]]]