ert 19.0.1__py3-none-any.whl → 20.0.0b1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (87) hide show
  1. ert/__main__.py +94 -63
  2. ert/analysis/_es_update.py +11 -14
  3. ert/cli/main.py +1 -1
  4. ert/config/__init__.py +3 -2
  5. ert/config/_create_observation_dataframes.py +52 -375
  6. ert/config/_observations.py +527 -200
  7. ert/config/_read_summary.py +4 -5
  8. ert/config/ert_config.py +52 -117
  9. ert/config/everest_control.py +40 -39
  10. ert/config/everest_response.py +3 -15
  11. ert/config/field.py +4 -76
  12. ert/config/forward_model_step.py +17 -1
  13. ert/config/gen_data_config.py +14 -17
  14. ert/config/observation_config_migrations.py +821 -0
  15. ert/config/parameter_config.py +18 -28
  16. ert/config/parsing/__init__.py +0 -1
  17. ert/config/parsing/_parse_zonemap.py +45 -0
  18. ert/config/parsing/config_keywords.py +1 -0
  19. ert/config/parsing/config_schema.py +2 -0
  20. ert/config/parsing/observations_parser.py +2 -0
  21. ert/config/response_config.py +5 -23
  22. ert/config/rft_config.py +129 -31
  23. ert/config/summary_config.py +1 -13
  24. ert/config/surface_config.py +0 -57
  25. ert/dark_storage/compute/misfits.py +0 -42
  26. ert/dark_storage/endpoints/__init__.py +0 -2
  27. ert/dark_storage/endpoints/experiments.py +2 -5
  28. ert/dark_storage/json_schema/experiment.py +1 -2
  29. ert/field_utils/__init__.py +0 -2
  30. ert/field_utils/field_utils.py +1 -117
  31. ert/gui/ertwidgets/listeditbox.py +9 -1
  32. ert/gui/ertwidgets/models/ertsummary.py +20 -6
  33. ert/gui/ertwidgets/pathchooser.py +9 -1
  34. ert/gui/ertwidgets/stringbox.py +11 -3
  35. ert/gui/ertwidgets/textbox.py +10 -3
  36. ert/gui/ertwidgets/validationsupport.py +19 -1
  37. ert/gui/main_window.py +11 -6
  38. ert/gui/simulation/experiment_panel.py +1 -1
  39. ert/gui/simulation/run_dialog.py +11 -1
  40. ert/gui/tools/manage_experiments/export_dialog.py +4 -0
  41. ert/gui/tools/manage_experiments/manage_experiments_panel.py +1 -0
  42. ert/gui/tools/manage_experiments/storage_info_widget.py +1 -1
  43. ert/gui/tools/manage_experiments/storage_widget.py +21 -4
  44. ert/gui/tools/plot/data_type_proxy_model.py +1 -1
  45. ert/gui/tools/plot/plot_api.py +35 -27
  46. ert/gui/tools/plot/plot_widget.py +5 -0
  47. ert/gui/tools/plot/plot_window.py +4 -7
  48. ert/run_models/ensemble_experiment.py +2 -9
  49. ert/run_models/ensemble_smoother.py +1 -9
  50. ert/run_models/everest_run_model.py +31 -23
  51. ert/run_models/initial_ensemble_run_model.py +19 -22
  52. ert/run_models/manual_update.py +11 -5
  53. ert/run_models/model_factory.py +7 -7
  54. ert/run_models/multiple_data_assimilation.py +3 -16
  55. ert/sample_prior.py +12 -14
  56. ert/scheduler/job.py +24 -4
  57. ert/services/__init__.py +7 -3
  58. ert/services/_storage_main.py +59 -22
  59. ert/services/ert_server.py +186 -24
  60. ert/shared/version.py +3 -3
  61. ert/storage/local_ensemble.py +50 -116
  62. ert/storage/local_experiment.py +94 -109
  63. ert/storage/local_storage.py +10 -12
  64. ert/storage/migration/to24.py +26 -0
  65. ert/storage/migration/to25.py +91 -0
  66. ert/utils/__init__.py +20 -0
  67. {ert-19.0.1.dist-info → ert-20.0.0b1.dist-info}/METADATA +4 -51
  68. {ert-19.0.1.dist-info → ert-20.0.0b1.dist-info}/RECORD +80 -83
  69. everest/bin/everest_script.py +5 -5
  70. everest/bin/kill_script.py +2 -2
  71. everest/bin/monitor_script.py +2 -2
  72. everest/bin/utils.py +4 -4
  73. everest/detached/everserver.py +6 -6
  74. everest/gui/everest_client.py +0 -6
  75. everest/gui/main_window.py +2 -2
  76. everest/util/__init__.py +1 -19
  77. ert/dark_storage/compute/__init__.py +0 -0
  78. ert/dark_storage/endpoints/compute/__init__.py +0 -0
  79. ert/dark_storage/endpoints/compute/misfits.py +0 -95
  80. ert/services/_base_service.py +0 -387
  81. ert/services/webviz_ert_service.py +0 -20
  82. ert/shared/storage/command.py +0 -38
  83. ert/shared/storage/extraction.py +0 -42
  84. {ert-19.0.1.dist-info → ert-20.0.0b1.dist-info}/WHEEL +0 -0
  85. {ert-19.0.1.dist-info → ert-20.0.0b1.dist-info}/entry_points.txt +0 -0
  86. {ert-19.0.1.dist-info → ert-20.0.0b1.dist-info}/licenses/COPYING +0 -0
  87. {ert-19.0.1.dist-info → ert-20.0.0b1.dist-info}/top_level.txt +0 -0
@@ -18,9 +18,12 @@ from pandas.api.types import is_numeric_dtype
18
18
  from pandas.errors import ParserError
19
19
  from resfo_utilities import history_key
20
20
 
21
- from ert.config import ParameterConfig, ResponseMetadata
22
- from ert.services import ErtServer
21
+ from ert.config import ParameterConfig
22
+ from ert.config.ensemble_config import ResponseConfig
23
+ from ert.config.known_response_types import KnownResponseTypes
24
+ from ert.services import create_ertserver_client
23
25
  from ert.storage.local_experiment import _parameters_adapter as parameter_config_adapter
26
+ from ert.storage.local_experiment import _responses_adapter as response_config_adapter
24
27
  from ert.storage.realization_storage_state import RealizationStorageState
25
28
 
26
29
  logger = logging.getLogger(__name__)
@@ -46,18 +49,18 @@ class PlotApiKeyDefinition(NamedTuple):
46
49
  metadata: dict[Any, Any]
47
50
  filter_on: dict[Any, Any] | None = None
48
51
  parameter: ParameterConfig | None = None
49
- response_metadata: ResponseMetadata | None = None
52
+ response: ResponseConfig | None = None
50
53
 
51
54
 
52
55
  class PlotApi:
53
56
  def __init__(self, ens_path: Path) -> None:
54
- self.ens_path = ens_path
57
+ self.ens_path: Path = ens_path
55
58
  self._all_ensembles: list[EnsembleObject] | None = None
56
59
  self._timeout = 120
57
60
 
58
61
  @property
59
62
  def api_version(self) -> str:
60
- with ErtServer.session(project=self.ens_path) as client:
63
+ with create_ertserver_client(self.ens_path) as client:
61
64
  try:
62
65
  response = client.get("/version", timeout=self._timeout)
63
66
  self._check_response(response)
@@ -83,7 +86,7 @@ class PlotApi:
83
86
  return self._all_ensembles
84
87
 
85
88
  self._all_ensembles = []
86
- with ErtServer.session(project=self.ens_path) as client:
89
+ with create_ertserver_client(self.ens_path) as client:
87
90
  try:
88
91
  response = client.get("/experiments", timeout=self._timeout)
89
92
  self._check_response(response)
@@ -139,7 +142,7 @@ class PlotApi:
139
142
  all_keys: dict[str, PlotApiKeyDefinition] = {}
140
143
  all_params = {}
141
144
 
142
- with ErtServer.session(project=self.ens_path) as client:
145
+ with create_ertserver_client(self.ens_path) as client:
143
146
  response = client.get("/experiments", timeout=self._timeout)
144
147
  self._check_response(response)
145
148
 
@@ -166,7 +169,7 @@ class PlotApi:
166
169
  def responses_api_key_defs(self) -> list[PlotApiKeyDefinition]:
167
170
  key_defs: dict[str, PlotApiKeyDefinition] = {}
168
171
 
169
- with ErtServer.session(project=self.ens_path) as client:
172
+ with create_ertserver_client(self.ens_path) as client:
170
173
  response = client.get("/experiments", timeout=self._timeout)
171
174
  self._check_response(response)
172
175
 
@@ -176,22 +179,26 @@ class PlotApi:
176
179
  key_defs[plot_key_def.key] = plot_key_def
177
180
 
178
181
  for experiment in response.json():
179
- for response_type, response_metadatas in experiment[
180
- "responses"
181
- ].items():
182
- for metadata in response_metadatas:
183
- key = metadata["response_key"]
182
+ for response_type, metadata in experiment["responses"].items():
183
+ response_config: KnownResponseTypes = (
184
+ response_config_adapter.validate_python(metadata)
185
+ )
186
+ keys = response_config.keys
187
+ for key in keys:
184
188
  has_obs = (
185
189
  response_type in experiment["observations"]
186
190
  and key in experiment["observations"][response_type]
187
191
  )
188
- if metadata["filter_on"]:
192
+ if response_config.filter_on is not None:
189
193
  # Only assume one filter_on, this code is to be
190
194
  # considered a bit "temp".
191
195
  # In general, we could create a dropdown per
192
196
  # filter_on on the frontend side
193
- for filter_key, values in metadata["filter_on"].items():
197
+
198
+ filter_for_key = response_config.filter_on.get(key, {})
199
+ for filter_key, values in filter_for_key.items():
194
200
  for v in values:
201
+ filter_on = {filter_key: v}
195
202
  subkey = f"{key}@{v}"
196
203
  update_keydef(
197
204
  PlotApiKeyDefinition(
@@ -202,10 +209,8 @@ class PlotApi:
202
209
  metadata={
203
210
  "data_origin": response_type,
204
211
  },
205
- filter_on={filter_key: v},
206
- response_metadata=ResponseMetadata(
207
- **metadata
208
- ),
212
+ filter_on=filter_on,
213
+ response=response_config,
209
214
  )
210
215
  )
211
216
  else:
@@ -216,7 +221,7 @@ class PlotApi:
216
221
  observations=has_obs,
217
222
  dimensionality=2,
218
223
  metadata={"data_origin": response_type},
219
- response_metadata=ResponseMetadata(**metadata),
224
+ response=response_config,
220
225
  )
221
226
  )
222
227
 
@@ -228,7 +233,9 @@ class PlotApi:
228
233
  response_key: str,
229
234
  filter_on: dict[str, Any] | None = None,
230
235
  ) -> pd.DataFrame:
231
- with ErtServer.session(project=self.ens_path) as client:
236
+ if "@" in response_key:
237
+ response_key = response_key.split("@", maxsplit=1)[0]
238
+ with create_ertserver_client(self.ens_path) as client:
232
239
  response = client.get(
233
240
  f"/ensembles/{ensemble_id}/responses/{PlotApi.escape(response_key)}",
234
241
  headers={"accept": "application/x-parquet"},
@@ -256,7 +263,7 @@ class PlotApi:
256
263
  return df
257
264
 
258
265
  def data_for_parameter(self, ensemble_id: str, parameter_key: str) -> pd.DataFrame:
259
- with ErtServer.session(project=self.ens_path) as client:
266
+ with create_ertserver_client(self.ens_path) as client:
260
267
  parameter = client.get(
261
268
  f"/ensembles/{ensemble_id}/parameters/{PlotApi.escape(parameter_key)}",
262
269
  headers={"accept": "application/x-parquet"},
@@ -294,11 +301,12 @@ class PlotApi:
294
301
  )
295
302
  if not key_def:
296
303
  raise httpx.RequestError(f"Response key {key_def} not found")
297
-
298
- assert key_def.response_metadata is not None
299
- actual_response_key = key_def.response_metadata.response_key
304
+ assert key_def.response is not None
305
+ actual_response_key = key
306
+ if "@" in actual_response_key:
307
+ actual_response_key = key.split("@", maxsplit=1)[0]
300
308
  filter_on = key_def.filter_on
301
- with ErtServer.session(project=self.ens_path) as client:
309
+ with create_ertserver_client(self.ens_path) as client:
302
310
  response = client.get(
303
311
  f"/ensembles/{ensemble.id}/responses/{PlotApi.escape(actual_response_key)}/observations",
304
312
  timeout=self._timeout,
@@ -386,7 +394,7 @@ class PlotApi:
386
394
  if not ensemble:
387
395
  return np.array([])
388
396
 
389
- with ErtServer.session(project=self.ens_path) as client:
397
+ with create_ertserver_client(self.ens_path) as client:
390
398
  response = client.get(
391
399
  f"/ensembles/{ensemble.id}/parameters/{PlotApi.escape(key)}/std_dev",
392
400
  params={"z": z},
@@ -153,6 +153,7 @@ class PlotWidget(QWidget):
153
153
  # only for histogram plot see _sync_log_checkbox
154
154
  self._log_checkbox.setVisible(False)
155
155
  self._log_checkbox.setToolTip("Toggle data domain to log scale and back")
156
+ self._log_checkbox.clicked.connect(self.logLogScaleButtonUsage)
156
157
 
157
158
  log_checkbox_row = QHBoxLayout()
158
159
  log_checkbox_row.addWidget(self._log_checkbox)
@@ -193,6 +194,10 @@ class PlotWidget(QWidget):
193
194
  def name(self) -> str:
194
195
  return self._name
195
196
 
197
+ def logLogScaleButtonUsage(self) -> None:
198
+ logger.info(f"Plotwidget utility used: 'Log scale button' in tab '{self.name}'")
199
+ self._log_checkbox.clicked.disconnect() # Log only once
200
+
196
201
  def updatePlot(
197
202
  self,
198
203
  plot_context: "PlotContext",
@@ -25,7 +25,7 @@ from PyQt6.QtWidgets import (
25
25
  from ert.config.field import Field
26
26
  from ert.dark_storage.common import get_storage_api_version
27
27
  from ert.gui.ertwidgets import CopyButton, showWaitCursorWhileWaiting
28
- from ert.services._base_service import ServerBootFail
28
+ from ert.services import ServerBootFail
29
29
  from ert.utils import log_duration
30
30
 
31
31
  from .customize import PlotCustomizer
@@ -267,10 +267,10 @@ class PlotWindow(QMainWindow):
267
267
  ensemble_to_data_map: dict[EnsembleObject, pd.DataFrame] = {}
268
268
  for ensemble in selected_ensembles:
269
269
  try:
270
- if key_def.response_metadata is not None:
270
+ if key_def.response is not None:
271
271
  ensemble_to_data_map[ensemble] = self._api.data_for_response(
272
272
  ensemble_id=ensemble.id,
273
- response_key=key_def.response_metadata.response_key,
273
+ response_key=key,
274
274
  filter_on=key_def.filter_on,
275
275
  )
276
276
  elif (
@@ -348,10 +348,7 @@ class PlotWindow(QMainWindow):
348
348
  handle_exception(e)
349
349
  plot_context.history_data = None
350
350
 
351
- if (
352
- key_def.response_metadata is not None
353
- and key_def.response_metadata.response_type == "rft"
354
- ):
351
+ if key_def.response is not None and key_def.response.type == "rft":
355
352
  plot_context.setXLabel(key.split(":")[-1])
356
353
  plot_context.setYLabel("TVD")
357
354
  plot_context.depth_y_axis = True
@@ -1,16 +1,14 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  import logging
4
- from typing import ClassVar, cast
4
+ from typing import ClassVar
5
5
  from uuid import UUID
6
6
 
7
7
  from pydantic import PrivateAttr
8
8
 
9
9
  from ert.config import (
10
- ParameterConfig,
11
10
  PostExperimentFixtures,
12
11
  PreExperimentFixtures,
13
- ResponseConfig,
14
12
  )
15
13
  from ert.ensemble_evaluator import EvaluatorServerConfig
16
14
  from ert.run_models.initial_ensemble_run_model import (
@@ -54,13 +52,8 @@ class EnsembleExperiment(InitialEnsembleRunModel, EnsembleExperimentConfig):
54
52
  self.run_workflows(fixtures=PreExperimentFixtures(random_seed=self.random_seed))
55
53
 
56
54
  experiment_storage = self._storage.create_experiment(
57
- parameters=cast(list[ParameterConfig], self.parameter_configuration),
58
- observations={k: v.to_polars() for k, v in self.observations.items()}
59
- if self.observations is not None
60
- else None,
61
- responses=cast(list[ResponseConfig], self.response_configuration),
55
+ experiment_config=self.model_dump(mode="json"),
62
56
  name=self.experiment_name,
63
- templates=self.ert_templates,
64
57
  )
65
58
 
66
59
  ensemble_storage = self._storage.create_ensemble(
@@ -2,16 +2,13 @@ from __future__ import annotations
2
2
 
3
3
  import functools
4
4
  import logging
5
- from typing import cast
6
5
 
7
6
  import numpy as np
8
7
  from pydantic import PrivateAttr
9
8
 
10
9
  from ert.config import (
11
- ParameterConfig,
12
10
  PostExperimentFixtures,
13
11
  PreExperimentFixtures,
14
- ResponseConfig,
15
12
  )
16
13
  from ert.ensemble_evaluator import EvaluatorServerConfig
17
14
  from ert.run_models.initial_ensemble_run_model import (
@@ -48,13 +45,8 @@ class EnsembleSmoother(InitialEnsembleRunModel, UpdateRunModel, EnsembleSmoother
48
45
  self.run_workflows(fixtures=PreExperimentFixtures(random_seed=self.random_seed))
49
46
 
50
47
  experiment_storage = self._storage.create_experiment(
51
- parameters=cast(list[ParameterConfig], self.parameter_configuration),
52
- observations={k: v.to_polars() for k, v in self.observations.items()}
53
- if self.observations is not None
54
- else None,
55
- responses=cast(list[ResponseConfig], self.response_configuration),
48
+ experiment_config=self.model_dump(mode="json"),
56
49
  name=self.experiment_name,
57
- templates=self.ert_templates,
58
50
  )
59
51
 
60
52
  prior = self._storage.create_ensemble(
@@ -17,11 +17,11 @@ from enum import IntEnum, auto
17
17
  from functools import cached_property
18
18
  from pathlib import Path
19
19
  from types import TracebackType
20
- from typing import TYPE_CHECKING, Any, Protocol, cast
20
+ from typing import TYPE_CHECKING, Annotated, Any, Protocol
21
21
 
22
22
  import numpy as np
23
23
  from numpy.typing import NDArray
24
- from pydantic import PrivateAttr, ValidationError
24
+ from pydantic import Field, PrivateAttr, TypeAdapter, ValidationError
25
25
  from ropt.enums import ExitCode as RoptExitCode
26
26
  from ropt.evaluator import EvaluatorContext, EvaluatorResult
27
27
  from ropt.results import FunctionResults, Results
@@ -36,7 +36,6 @@ from ert.config import (
36
36
  GenDataConfig,
37
37
  HookRuntime,
38
38
  KnownQueueOptionsAdapter,
39
- ParameterConfig,
40
39
  QueueConfig,
41
40
  ResponseConfig,
42
41
  SummaryConfig,
@@ -222,12 +221,24 @@ def _get_workflows(
222
221
  return res_hooks, res_workflows
223
222
 
224
223
 
224
+ EverestResponseTypes = (
225
+ EverestObjectivesConfig | EverestConstraintsConfig | SummaryConfig | GenDataConfig
226
+ )
227
+
228
+ EverestResponseTypesAdapter = TypeAdapter( # type: ignore
229
+ Annotated[
230
+ EverestResponseTypes,
231
+ Field(discriminator="type"),
232
+ ]
233
+ )
234
+
235
+
225
236
  class EverestRunModelConfig(RunModelConfig):
226
237
  optimization_output_dir: str
227
238
  simulation_dir: str
228
239
 
229
- parameter_configuration: list[ParameterConfig]
230
- response_configuration: list[ResponseConfig]
240
+ parameter_configuration: list[EverestControl]
241
+ response_configuration: list[EverestResponseTypes]
231
242
 
232
243
  input_constraints: list[InputConstraintConfig]
233
244
  optimization: OptimizationConfig
@@ -542,7 +553,7 @@ class EverestRunModel(RunModel, EverestRunModelConfig):
542
553
 
543
554
  # There will and must always be one EverestControl config for an
544
555
  # Everest optimization.
545
- return cast(list[EverestControl], controls)
556
+ return controls
546
557
 
547
558
  @cached_property
548
559
  def _transforms(self) -> EverestOptModelTransforms:
@@ -676,9 +687,7 @@ class EverestRunModel(RunModel, EverestRunModelConfig):
676
687
  self._eval_server_cfg = evaluator_server_config
677
688
 
678
689
  self._experiment = self._experiment or self._storage.create_experiment(
679
- name=self.experiment_name,
680
- parameters=self.parameter_configuration,
681
- responses=self.response_configuration,
690
+ name=self.experiment_name, experiment_config=self.model_dump(mode="json")
682
691
  )
683
692
 
684
693
  self._experiment.status = ExperimentStatus(
@@ -764,7 +773,7 @@ class EverestRunModel(RunModel, EverestRunModelConfig):
764
773
 
765
774
  def _create_optimizer(self) -> tuple[BasicOptimizer, list[float]]:
766
775
  enopt_config, initial_guesses = everest2ropt(
767
- cast(list[EverestControl], self.parameter_configuration),
776
+ self.parameter_configuration,
768
777
  self.objectives_config,
769
778
  self.input_constraints,
770
779
  self.output_constraints_config,
@@ -831,19 +840,18 @@ class EverestRunModel(RunModel, EverestRunModelConfig):
831
840
 
832
841
  ensemble.save_everest_realization_info(realization_info)
833
842
 
834
- for sim_id in range(sim_to_control_vector.shape[0]):
835
- sim_controls = sim_to_control_vector[sim_id]
836
- offset = 0
837
- for control_config in self._everest_control_configs:
838
- n_param_keys = len(control_config.parameter_keys)
839
-
840
- # Save controls to ensemble
841
- ensemble.save_parameters_numpy(
842
- sim_controls[offset : (offset + n_param_keys)].reshape(-1, 1),
843
- control_config.name,
844
- np.array([sim_id]),
845
- )
846
- offset += n_param_keys
843
+ iens = sim_to_control_vector.shape[0]
844
+ offset = 0
845
+ for control_config in self._everest_control_configs:
846
+ n_param_keys = len(control_config.parameter_keys)
847
+ name = control_config.name
848
+ parameters = sim_to_control_vector[:, offset : offset + n_param_keys]
849
+ ensemble.save_parameters_numpy(
850
+ parameters.reshape(-1, n_param_keys),
851
+ name,
852
+ np.arange(iens),
853
+ )
854
+ offset += n_param_keys
847
855
 
848
856
  # Evaluate the batch:
849
857
  run_args = self._get_run_args(
@@ -3,15 +3,17 @@ from typing import Annotated, Any, Literal, Self
3
3
  import numpy as np
4
4
  import polars as pl
5
5
  from polars.datatypes import DataTypeClass
6
- from pydantic import BaseModel, Field, field_validator
6
+ from pydantic import BaseModel, Field
7
7
 
8
8
  from ert.config import (
9
9
  EverestControl,
10
10
  GenKwConfig,
11
11
  KnownResponseTypes,
12
+ Observation,
12
13
  SurfaceConfig,
13
14
  )
14
15
  from ert.config import Field as FieldConfig
16
+ from ert.config._create_observation_dataframes import create_observation_dataframes
15
17
  from ert.ensemble_evaluator.config import EvaluatorServerConfig
16
18
  from ert.run_arg import create_run_arguments
17
19
  from ert.run_models.run_model import RunModel, RunModelConfig
@@ -67,27 +69,7 @@ class InitialEnsembleRunModelConfig(RunModelConfig):
67
69
  ]
68
70
  ]
69
71
  ert_templates: list[tuple[str, str]]
70
- observations: dict[str, DictEncodedDataFrame] | None = None
71
-
72
- @field_validator("observations", mode="before")
73
- @classmethod
74
- def make_dict_encoded_observations(
75
- cls, v: dict[str, pl.DataFrame | DictEncodedDataFrame | dict[str, Any]] | None
76
- ) -> dict[str, DictEncodedDataFrame] | None:
77
- if v is None:
78
- return None
79
-
80
- encoded = {}
81
- for k, df in v.items():
82
- match df:
83
- case DictEncodedDataFrame():
84
- encoded[k] = df
85
- case pl.DataFrame():
86
- encoded[k] = DictEncodedDataFrame.from_polars(df)
87
- case dict():
88
- encoded[k] = DictEncodedDataFrame.model_validate(df)
89
-
90
- return encoded
72
+ observations: list[Observation] | None = None
91
73
 
92
74
 
93
75
  class InitialEnsembleRunModel(RunModel, InitialEnsembleRunModelConfig):
@@ -101,6 +83,7 @@ class InitialEnsembleRunModel(RunModel, InitialEnsembleRunModelConfig):
101
83
  np.where(self.active_realizations)[0],
102
84
  parameters=[param.name for param in self.parameter_configuration],
103
85
  random_seed=self.random_seed,
86
+ num_realizations=self.runpath_config.num_realizations,
104
87
  design_matrix_df=self.design_matrix.to_polars()
105
88
  if self.design_matrix is not None
106
89
  else None,
@@ -117,3 +100,17 @@ class InitialEnsembleRunModel(RunModel, InitialEnsembleRunModelConfig):
117
100
  evaluator_server_config,
118
101
  )
119
102
  return ensemble_storage
103
+
104
+ def observation_dataframes(self) -> dict[str, pl.DataFrame]:
105
+ if self.observations is None:
106
+ return {}
107
+
108
+ rft_config = next(
109
+ (r for r in self.response_configuration if r.type == "rft"),
110
+ None,
111
+ )
112
+
113
+ return create_observation_dataframes(
114
+ observations=self.observations,
115
+ rft_config=rft_config,
116
+ )
@@ -46,13 +46,19 @@ class ManualUpdate(UpdateRunModel, ManualUpdateConfig):
46
46
  self.set_env_key("_ERT_EXPERIMENT_ID", str(prior_experiment.id))
47
47
  self.set_env_key("_ERT_ENSEMBLE_ID", str(self._prior.id))
48
48
 
49
+ experiment_config = self.model_dump(mode="json") | {
50
+ "parameter_configuration": prior_experiment.experiment_config[
51
+ "parameter_configuration"
52
+ ],
53
+ "response_configuration": prior_experiment.experiment_config[
54
+ "response_configuration"
55
+ ],
56
+ "observations": prior_experiment.experiment_config["observations"],
57
+ }
58
+
49
59
  target_experiment = self._storage.create_experiment(
50
- parameters=list(prior_experiment.parameter_configuration.values()),
51
- responses=list(prior_experiment.response_configuration.values()),
52
- observations=prior_experiment.observations,
53
- simulation_arguments=prior_experiment.metadata,
60
+ experiment_config=experiment_config,
54
61
  name=f"Manual update of {self._prior.name}",
55
- templates=self.ert_templates,
56
62
  )
57
63
  self.update(
58
64
  self._prior,
@@ -153,7 +153,7 @@ def _setup_single_test_run(
153
153
  log_path=config.analysis_config.log_path,
154
154
  storage_path=config.ens_path,
155
155
  queue_config=config.queue_config.create_local_copy(),
156
- observations=config.observations,
156
+ observations=config.observation_declarations,
157
157
  )
158
158
 
159
159
  return SingleTestRun(
@@ -212,7 +212,7 @@ def _setup_ensemble_experiment(
212
212
  log_path=config.analysis_config.log_path,
213
213
  storage_path=config.ens_path,
214
214
  queue_config=config.queue_config,
215
- observations=config.observations,
215
+ observations=config.observation_declarations,
216
216
  )
217
217
 
218
218
  return EnsembleExperiment(
@@ -305,7 +305,7 @@ def _setup_manual_update(
305
305
  hooked_workflows=config.hooked_workflows,
306
306
  log_path=config.analysis_config.log_path,
307
307
  ert_templates=config.ert_templates,
308
- observations=config.observations,
308
+ observations=config.observation_declarations,
309
309
  )
310
310
  return ManualUpdate(**runmodel_config.model_dump(), status_queue=status_queue)
311
311
 
@@ -343,7 +343,7 @@ def _setup_manual_update_enif(
343
343
  substitutions=config.substitutions,
344
344
  hooked_workflows=config.hooked_workflows,
345
345
  log_path=config.analysis_config.log_path,
346
- observations=config.observations,
346
+ observations=config.observation_declarations,
347
347
  )
348
348
 
349
349
 
@@ -389,7 +389,7 @@ def _setup_ensemble_smoother(
389
389
  substitutions=config.substitutions,
390
390
  hooked_workflows=config.hooked_workflows,
391
391
  log_path=config.analysis_config.log_path,
392
- observations=config.observations,
392
+ observations=config.observation_declarations,
393
393
  )
394
394
  return EnsembleSmoother(**runmodel_config.model_dump(), status_queue=status_queue)
395
395
 
@@ -435,7 +435,7 @@ def _setup_ensemble_information_filter(
435
435
  substitutions=config.substitutions,
436
436
  hooked_workflows=config.hooked_workflows,
437
437
  log_path=config.analysis_config.log_path,
438
- observations=config.observations,
438
+ observations=config.observation_declarations,
439
439
  )
440
440
  return EnsembleInformationFilter(
441
441
  **runmodel_config.model_dump(), status_queue=status_queue
@@ -507,7 +507,7 @@ def _setup_multiple_data_assimilation(
507
507
  substitutions=config.substitutions,
508
508
  hooked_workflows=config.hooked_workflows,
509
509
  log_path=config.analysis_config.log_path,
510
- observations=config.observations,
510
+ observations=config.observation_declarations,
511
511
  )
512
512
  return MultipleDataAssimilation(
513
513
  **runmodel_config.model_dump(), status_queue=status_queue
@@ -2,16 +2,14 @@ from __future__ import annotations
2
2
 
3
3
  import functools
4
4
  import logging
5
- from typing import Any, ClassVar, cast
5
+ from typing import Any, ClassVar
6
6
  from uuid import UUID
7
7
 
8
8
  from pydantic import PrivateAttr
9
9
 
10
10
  from ert.config import (
11
- ParameterConfig,
12
11
  PostExperimentFixtures,
13
12
  PreExperimentFixtures,
14
- ResponseConfig,
15
13
  )
16
14
  from ert.ensemble_evaluator import EvaluatorServerConfig
17
15
  from ert.run_models.initial_ensemble_run_model import (
@@ -97,12 +95,8 @@ class MultipleDataAssimilation(
97
95
  f"restart iteration = {prior.iteration + 1}"
98
96
  )
99
97
  target_experiment = self._storage.create_experiment(
100
- parameters=list(prior.experiment.parameter_configuration.values()),
101
- responses=list(prior.experiment.response_configuration.values()),
102
- observations=prior.experiment.observations,
103
- simulation_arguments=prior.experiment.metadata,
98
+ experiment_config=self.model_dump(mode="json"),
104
99
  name=f"Restart from {prior.name}",
105
- templates=self.ert_templates,
106
100
  )
107
101
 
108
102
  except (KeyError, ValueError) as err:
@@ -113,16 +107,9 @@ class MultipleDataAssimilation(
113
107
  self.run_workflows(
114
108
  fixtures=PreExperimentFixtures(random_seed=self.random_seed),
115
109
  )
116
- sim_args = {"weights": self.weights}
117
110
  experiment_storage = self._storage.create_experiment(
118
- parameters=cast(list[ParameterConfig], self.parameter_configuration),
119
- observations={k: v.to_polars() for k, v in self.observations.items()}
120
- if self.observations is not None
121
- else None,
122
- responses=cast(list[ResponseConfig], self.response_configuration),
111
+ experiment_config=self.model_dump(mode="json"),
123
112
  name=self.experiment_name,
124
- templates=self.ert_templates,
125
- simulation_arguments=sim_args,
126
113
  )
127
114
 
128
115
  prior = self._storage.create_ensemble(
ert/sample_prior.py CHANGED
@@ -21,6 +21,7 @@ def sample_prior(
21
21
  ensemble: Ensemble,
22
22
  active_realizations: Iterable[int],
23
23
  random_seed: int,
24
+ num_realizations: int,
24
25
  parameters: list[str] | None = None,
25
26
  design_matrix_df: pl.DataFrame | None = None,
26
27
  ) -> None:
@@ -65,21 +66,18 @@ def sample_prior(
65
66
  f"Sampling parameter {config_node.name} "
66
67
  f"for realizations {active_realizations}"
67
68
  )
68
- datasets = [
69
- Ensemble.sample_parameter(
70
- config_node,
71
- realization_nr,
72
- random_seed=random_seed,
73
- )
74
- for realization_nr in active_realizations
75
- ]
76
- if datasets:
77
- dataset = pl.concat(datasets, how="vertical")
69
+ dataset = Ensemble.sample_parameter(
70
+ config_node,
71
+ list(active_realizations),
72
+ random_seed=random_seed,
73
+ num_realizations=num_realizations,
74
+ )
75
+ if not (dataset is None or dataset.is_empty()):
76
+ if complete_dataset is None:
77
+ complete_dataset = dataset
78
+ elif dataset is not None:
79
+ complete_dataset = complete_dataset.join(dataset, on="realization")
78
80
 
79
- if complete_dataset is None:
80
- complete_dataset = dataset
81
- elif dataset is not None:
82
- complete_dataset = complete_dataset.join(dataset, on="realization")
83
81
  else:
84
82
  for realization_nr in active_realizations:
85
83
  ds = config_node.read_from_runpath(Path(), realization_nr, 0)