ert 19.0.1__py3-none-any.whl → 20.0.0b1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (87) hide show
  1. ert/__main__.py +94 -63
  2. ert/analysis/_es_update.py +11 -14
  3. ert/cli/main.py +1 -1
  4. ert/config/__init__.py +3 -2
  5. ert/config/_create_observation_dataframes.py +52 -375
  6. ert/config/_observations.py +527 -200
  7. ert/config/_read_summary.py +4 -5
  8. ert/config/ert_config.py +52 -117
  9. ert/config/everest_control.py +40 -39
  10. ert/config/everest_response.py +3 -15
  11. ert/config/field.py +4 -76
  12. ert/config/forward_model_step.py +17 -1
  13. ert/config/gen_data_config.py +14 -17
  14. ert/config/observation_config_migrations.py +821 -0
  15. ert/config/parameter_config.py +18 -28
  16. ert/config/parsing/__init__.py +0 -1
  17. ert/config/parsing/_parse_zonemap.py +45 -0
  18. ert/config/parsing/config_keywords.py +1 -0
  19. ert/config/parsing/config_schema.py +2 -0
  20. ert/config/parsing/observations_parser.py +2 -0
  21. ert/config/response_config.py +5 -23
  22. ert/config/rft_config.py +129 -31
  23. ert/config/summary_config.py +1 -13
  24. ert/config/surface_config.py +0 -57
  25. ert/dark_storage/compute/misfits.py +0 -42
  26. ert/dark_storage/endpoints/__init__.py +0 -2
  27. ert/dark_storage/endpoints/experiments.py +2 -5
  28. ert/dark_storage/json_schema/experiment.py +1 -2
  29. ert/field_utils/__init__.py +0 -2
  30. ert/field_utils/field_utils.py +1 -117
  31. ert/gui/ertwidgets/listeditbox.py +9 -1
  32. ert/gui/ertwidgets/models/ertsummary.py +20 -6
  33. ert/gui/ertwidgets/pathchooser.py +9 -1
  34. ert/gui/ertwidgets/stringbox.py +11 -3
  35. ert/gui/ertwidgets/textbox.py +10 -3
  36. ert/gui/ertwidgets/validationsupport.py +19 -1
  37. ert/gui/main_window.py +11 -6
  38. ert/gui/simulation/experiment_panel.py +1 -1
  39. ert/gui/simulation/run_dialog.py +11 -1
  40. ert/gui/tools/manage_experiments/export_dialog.py +4 -0
  41. ert/gui/tools/manage_experiments/manage_experiments_panel.py +1 -0
  42. ert/gui/tools/manage_experiments/storage_info_widget.py +1 -1
  43. ert/gui/tools/manage_experiments/storage_widget.py +21 -4
  44. ert/gui/tools/plot/data_type_proxy_model.py +1 -1
  45. ert/gui/tools/plot/plot_api.py +35 -27
  46. ert/gui/tools/plot/plot_widget.py +5 -0
  47. ert/gui/tools/plot/plot_window.py +4 -7
  48. ert/run_models/ensemble_experiment.py +2 -9
  49. ert/run_models/ensemble_smoother.py +1 -9
  50. ert/run_models/everest_run_model.py +31 -23
  51. ert/run_models/initial_ensemble_run_model.py +19 -22
  52. ert/run_models/manual_update.py +11 -5
  53. ert/run_models/model_factory.py +7 -7
  54. ert/run_models/multiple_data_assimilation.py +3 -16
  55. ert/sample_prior.py +12 -14
  56. ert/scheduler/job.py +24 -4
  57. ert/services/__init__.py +7 -3
  58. ert/services/_storage_main.py +59 -22
  59. ert/services/ert_server.py +186 -24
  60. ert/shared/version.py +3 -3
  61. ert/storage/local_ensemble.py +50 -116
  62. ert/storage/local_experiment.py +94 -109
  63. ert/storage/local_storage.py +10 -12
  64. ert/storage/migration/to24.py +26 -0
  65. ert/storage/migration/to25.py +91 -0
  66. ert/utils/__init__.py +20 -0
  67. {ert-19.0.1.dist-info → ert-20.0.0b1.dist-info}/METADATA +4 -51
  68. {ert-19.0.1.dist-info → ert-20.0.0b1.dist-info}/RECORD +80 -83
  69. everest/bin/everest_script.py +5 -5
  70. everest/bin/kill_script.py +2 -2
  71. everest/bin/monitor_script.py +2 -2
  72. everest/bin/utils.py +4 -4
  73. everest/detached/everserver.py +6 -6
  74. everest/gui/everest_client.py +0 -6
  75. everest/gui/main_window.py +2 -2
  76. everest/util/__init__.py +1 -19
  77. ert/dark_storage/compute/__init__.py +0 -0
  78. ert/dark_storage/endpoints/compute/__init__.py +0 -0
  79. ert/dark_storage/endpoints/compute/misfits.py +0 -95
  80. ert/services/_base_service.py +0 -387
  81. ert/services/webviz_ert_service.py +0 -20
  82. ert/shared/storage/command.py +0 -38
  83. ert/shared/storage/extraction.py +0 -42
  84. {ert-19.0.1.dist-info → ert-20.0.0b1.dist-info}/WHEEL +0 -0
  85. {ert-19.0.1.dist-info → ert-20.0.0b1.dist-info}/entry_points.txt +0 -0
  86. {ert-19.0.1.dist-info → ert-20.0.0b1.dist-info}/licenses/COPYING +0 -0
  87. {ert-19.0.1.dist-info → ert-20.0.0b1.dist-info}/top_level.txt +0 -0
@@ -15,15 +15,18 @@ from typing import TYPE_CHECKING
15
15
  from uuid import UUID
16
16
 
17
17
  import numpy as np
18
- import pandas as pd
19
18
  import polars as pl
20
19
  import resfo
21
20
  import xarray as xr
22
21
  from pydantic import BaseModel
23
22
  from typing_extensions import TypedDict
24
23
 
25
- from ert.config import ParameterCardinality, ParameterConfig, SummaryConfig
26
- from ert.config.response_config import InvalidResponseFile
24
+ from ert.config import (
25
+ InvalidResponseFile,
26
+ ParameterCardinality,
27
+ ParameterConfig,
28
+ SummaryConfig,
29
+ )
27
30
  from ert.substitutions import substitute_runpath_name
28
31
 
29
32
  from .load_status import LoadResult
@@ -570,30 +573,51 @@ class LocalEnsemble(BaseMode):
570
573
  ) -> pl.DataFrame:
571
574
  if keys is None:
572
575
  keys = self.experiment.parameter_keys
573
- elif set(keys) - set(self.experiment.parameter_keys):
574
- missing = set(keys) - set(self.experiment.parameter_keys)
575
- raise KeyError(f"Parameters not registered to the experiment: {missing}")
576
576
 
577
577
  df_lazy = self._load_parameters_lazy(SCALAR_FILENAME)
578
- df_lazy = df_lazy.select(["realization", *keys])
578
+ names = df_lazy.collect_schema().names()
579
+ matches = [key for key in keys if any(key in item for item in names)]
580
+ if len(matches) != len(keys):
581
+ missing = set(keys) - set(matches)
582
+ raise KeyError(f"Parameters not registered to the experiment: {missing}")
583
+
584
+ parameter_keys = [
585
+ key
586
+ for e in keys
587
+ for key in self.experiment.parameter_configuration[e].parameter_keys
588
+ ]
589
+ df_lazy_filtered = df_lazy.select(["realization", *parameter_keys])
590
+
579
591
  if realizations is not None:
580
592
  if isinstance(realizations, int):
581
593
  realizations = np.array([realizations])
582
- df_lazy = df_lazy.filter(pl.col("realization").is_in(realizations))
583
- df = df_lazy.collect(engine="streaming")
594
+ df_lazy_filtered = df_lazy_filtered.filter(
595
+ pl.col("realization").is_in(realizations)
596
+ )
597
+ df = df_lazy_filtered.collect(engine="streaming")
584
598
  if df.is_empty():
585
599
  raise IndexError(
586
600
  f"No matching realizations {realizations} found for {keys}"
587
601
  )
588
602
 
589
603
  if transformed:
604
+ tmp_configuration: dict[str, ParameterConfig] = {}
605
+ for key in keys:
606
+ for col in df.columns:
607
+ if col == "realization":
608
+ continue
609
+ if col.startswith(key):
610
+ tmp_configuration[col] = (
611
+ self.experiment.parameter_configuration[key]
612
+ )
613
+
590
614
  df = df.with_columns(
591
615
  [
592
616
  pl.col(col)
593
617
  .map_elements(
594
- self.experiment.parameter_configuration[col].transform_data(),
595
- return_dtype=df[col].dtype,
618
+ tmp_configuration[col].transform_data(),
596
619
  )
620
+ .cast(df[col].dtype)
597
621
  .alias(col)
598
622
  for col in df.columns
599
623
  if col != "realization"
@@ -618,13 +642,11 @@ class LocalEnsemble(BaseMode):
618
642
  for p in self.experiment.parameter_configuration.values()
619
643
  if group in {p.name, p.group_name}
620
644
  ]
621
-
622
645
  if not cfgs:
623
646
  raise KeyError(f"{group} is not registered to the experiment.")
624
647
 
625
648
  # if group refers to a group name, we expect the same cardinality
626
649
  cardinality = next(cfg.cardinality for cfg in cfgs)
627
-
628
650
  if cardinality == ParameterCardinality.multiple_configs_per_ensemble_dataset:
629
651
  return self.load_scalar_keys(
630
652
  [cfg.name for cfg in cfgs], realizations, transformed
@@ -741,20 +763,21 @@ class LocalEnsemble(BaseMode):
741
763
  @staticmethod
742
764
  def sample_parameter(
743
765
  parameter: ParameterConfig,
744
- real_nr: int,
766
+ active_realizations: list[int],
745
767
  random_seed: int,
768
+ num_realizations: int,
746
769
  ) -> pl.DataFrame:
747
- parameter_value = parameter.sample_value(
748
- str(random_seed),
749
- real_nr,
770
+ parameter_values = parameter.sample_values(
771
+ str(random_seed), active_realizations, num_realizations=num_realizations
750
772
  )
751
773
 
752
- parameter_dict = {parameter.name: parameter_value[0]}
753
- parameter_dict["realization"] = real_nr
754
- return pl.DataFrame(
755
- parameter_dict,
756
- schema={parameter.name: pl.Float64, "realization": pl.Int64},
774
+ parameters = pl.DataFrame(
775
+ {parameter.name: parameter_values},
776
+ schema={parameter.name: pl.Float64},
757
777
  )
778
+ realizations_series = pl.Series("realization", list(active_realizations))
779
+
780
+ return parameters.with_columns(realizations_series)
758
781
 
759
782
  def load_responses(self, key: str, realizations: tuple[int, ...]) -> pl.DataFrame:
760
783
  """Load responses for key and realizations into xarray Dataset.
@@ -1054,10 +1077,12 @@ class LocalEnsemble(BaseMode):
1054
1077
  for col, observed_values in observed_cols.items():
1055
1078
  if col != "time":
1056
1079
  responses = responses.filter(
1057
- pl.col(col).is_in(observed_values.implode())
1080
+ pl.col(col).is_in(
1081
+ observed_values.implode(), nulls_equal=True
1082
+ )
1058
1083
  )
1059
1084
 
1060
- pivoted = responses.collect(engine="streaming").pivot( # noqa: PD010
1085
+ pivoted = responses.collect(engine="streaming").pivot(
1061
1086
  on="realization",
1062
1087
  index=["response_key", *response_cls.primary_key],
1063
1088
  values="values",
@@ -1097,6 +1122,7 @@ class LocalEnsemble(BaseMode):
1097
1122
  pivoted,
1098
1123
  how="left",
1099
1124
  on=["response_key", *response_cls.primary_key],
1125
+ nulls_equal=True,
1100
1126
  )
1101
1127
 
1102
1128
  # Do not drop primary keys which
@@ -1190,98 +1216,6 @@ class LocalEnsemble(BaseMode):
1190
1216
  self._path / "index.json", self._index.model_dump_json().encode("utf-8")
1191
1217
  )
1192
1218
 
1193
- @property
1194
- def all_parameters_and_gen_data(self) -> pl.DataFrame | None:
1195
- """
1196
- Only for Everest wrt objectives/constraints,
1197
- disregards summary data and primary key values
1198
- """
1199
- param_dfs = []
1200
- for param_group in self.experiment.parameter_configuration:
1201
- params_pd = self.load_parameters(param_group)["values"].to_pandas()
1202
-
1203
- assert isinstance(params_pd, pd.DataFrame)
1204
- params_pd = params_pd.reset_index()
1205
- param_df = pl.from_pandas(params_pd)
1206
-
1207
- param_columns = [c for c in param_df.columns if c != "realizations"]
1208
- param_df = param_df.rename(
1209
- {
1210
- **{
1211
- c: param_group + "." + c.replace("\0", ".")
1212
- for c in param_columns
1213
- },
1214
- "realizations": "realization",
1215
- }
1216
- )
1217
- param_df = param_df.cast(
1218
- {
1219
- "realization": pl.UInt16,
1220
- **{c: pl.Float64 for c in param_df.columns if c != "realization"},
1221
- }
1222
- )
1223
- param_dfs.append(param_df)
1224
-
1225
- responses = self.load_responses(
1226
- "gen_data", tuple(self.get_realization_list_with_responses())
1227
- )
1228
-
1229
- if responses is None:
1230
- return pl.concat(param_dfs)
1231
-
1232
- params_wide = pl.concat(
1233
- [
1234
- (
1235
- pdf.sort("realization").drop("realization")
1236
- if i > 0
1237
- else pdf.sort("realization")
1238
- )
1239
- for i, pdf in enumerate(param_dfs)
1240
- ],
1241
- how="horizontal",
1242
- )
1243
-
1244
- responses_wide = responses["realization", "response_key", "values"].pivot( # noqa: PD010
1245
- on="response_key", values="values"
1246
- )
1247
-
1248
- # If responses are missing for some realizations, this _left_ join will
1249
- # put null (polars) which maps to nan when doing .to_numpy() into the
1250
- # response columns for those realizations
1251
- params_and_responses = params_wide.join(
1252
- responses_wide, on="realization", how="left"
1253
- ).with_columns(pl.lit(self.iteration).alias("batch"))
1254
-
1255
- assert self.everest_realization_info is not None
1256
-
1257
- model_realization_mapping = {
1258
- k: v["model_realization"] for k, v in self.everest_realization_info.items()
1259
- }
1260
- perturbation_mapping = {
1261
- k: v["perturbation"] for k, v in self.everest_realization_info.items()
1262
- }
1263
-
1264
- params_and_responses = params_and_responses.with_columns(
1265
- pl.col("realization")
1266
- .replace(model_realization_mapping)
1267
- .alias("model_realization"),
1268
- pl.col("realization")
1269
- .cast(pl.Int32)
1270
- .replace(perturbation_mapping)
1271
- .alias("perturbation"),
1272
- )
1273
-
1274
- column_order = [
1275
- "batch",
1276
- "model_realization",
1277
- "perturbation",
1278
- "realization",
1279
- *[c for c in responses_wide.columns if c != "realization"],
1280
- *[c for c in params_wide.columns if c != "realization"],
1281
- ]
1282
-
1283
- return params_and_responses[column_order]
1284
-
1285
1219
 
1286
1220
  async def _read_parameters(
1287
1221
  run_path: str,
@@ -24,7 +24,10 @@ from ert.config import (
24
24
  SurfaceConfig,
25
25
  )
26
26
  from ert.config import Field as FieldConfig
27
- from ert.config.parsing.context_values import ContextBoolEncoder
27
+ from ert.config._create_observation_dataframes import (
28
+ create_observation_dataframes,
29
+ )
30
+ from ert.config._observations import Observation
28
31
 
29
32
  from .mode import BaseMode, Mode, require_write
30
33
 
@@ -54,6 +57,7 @@ class _Index(BaseModel):
54
57
  # from a different experiment. For example, a manual update
55
58
  # is a separate experiment from the one that created the prior.
56
59
  ensembles: list[UUID]
60
+ experiment: dict[str, Any] = {}
57
61
  status: ExperimentStatus | None = Field(default=None)
58
62
 
59
63
 
@@ -80,9 +84,6 @@ class LocalExperiment(BaseMode):
80
84
  arguments. Provides methods to create and access associated ensembles.
81
85
  """
82
86
 
83
- _parameter_file = Path("parameter.json")
84
- _responses_file = Path("responses.json")
85
- _metadata_file = Path("metadata.json")
86
87
  _templates_file = Path("templates.json")
87
88
  _index_file = Path("index.json")
88
89
 
@@ -118,63 +119,21 @@ class LocalExperiment(BaseMode):
118
119
  storage: LocalStorage,
119
120
  uuid: UUID,
120
121
  path: Path,
121
- *,
122
- parameters: list[ParameterConfig] | None = None,
123
- responses: list[ResponseConfig] | None = None,
124
- observations: dict[str, pl.DataFrame] | None = None,
125
- simulation_arguments: dict[Any, Any] | None = None,
122
+ experiment_config: dict[str, Any],
126
123
  name: str | None = None,
127
- templates: list[tuple[str, str]] | None = None,
128
124
  ) -> LocalExperiment:
129
- """
130
- Create a new LocalExperiment and store its configuration data.
131
-
132
- Parameters
133
- ----------
134
- storage : LocalStorage
135
- Storage instance for experiment creation.
136
- uuid : UUID
137
- Unique identifier for the new experiment.
138
- path : Path
139
- File system path for storing experiment data.
140
- parameters : list of ParameterConfig, optional
141
- List of parameter configurations.
142
- responses : list of ResponseConfig, optional
143
- List of response configurations.
144
- observations : dict of str to encoded observation datasets, optional
145
- Observations dictionary.
146
- simulation_arguments : SimulationArguments, optional
147
- Simulation arguments for the experiment.
148
- name : str, optional
149
- Experiment name. Defaults to current date if None.
150
- templates : list of tuple[str, str], optional
151
- Run templates for the experiment. Defaults to None.
152
-
153
- Returns
154
- -------
155
- local_experiment : LocalExperiment
156
- Instance of the newly created experiment.
157
- """
158
125
  if name is None:
159
126
  name = datetime.today().isoformat()
160
127
 
161
128
  storage._write_transaction(
162
129
  path / cls._index_file,
163
- _Index(id=uuid, name=name, ensembles=[])
130
+ _Index(id=uuid, name=name, ensembles=[], experiment=experiment_config)
164
131
  .model_dump_json(indent=2, exclude_none=True)
165
132
  .encode("utf-8"),
166
133
  )
167
134
 
168
- parameter_data = {}
169
- for parameter in parameters or []:
170
- parameter.save_experiment_data(path)
171
- parameter_data.update({parameter.name: parameter.model_dump(mode="json")})
172
- storage._write_transaction(
173
- path / cls._parameter_file,
174
- json.dumps(parameter_data, indent=2).encode("utf-8"),
175
- )
176
-
177
- if templates:
135
+ templates = experiment_config.get("ert_templates")
136
+ if templates is not None:
178
137
  templates_path = path / "templates"
179
138
  templates_path.mkdir(parents=True, exist_ok=True)
180
139
  templates_abs: list[tuple[str, str]] = []
@@ -191,27 +150,29 @@ class LocalExperiment(BaseMode):
191
150
  json.dumps(templates_abs).encode("utf-8"),
192
151
  )
193
152
 
194
- response_data = {}
195
- for response in responses or []:
196
- response_data.update({response.type: response.model_dump(mode="json")})
197
- storage._write_transaction(
198
- path / cls._responses_file,
199
- json.dumps(response_data, default=str, indent=2).encode("utf-8"),
200
- )
201
-
202
- if observations:
153
+ observation_declarations = experiment_config.get("observations")
154
+ if observation_declarations:
203
155
  output_path = path / "observations"
204
- output_path.mkdir()
205
- for response_type, dataset in observations.items():
206
- storage._to_parquet_transaction(
207
- output_path / f"{response_type}", dataset
208
- )
156
+ output_path.mkdir(parents=True, exist_ok=True)
209
157
 
210
- simulation_data = simulation_arguments or {}
211
- storage._write_transaction(
212
- path / cls._metadata_file,
213
- json.dumps(simulation_data, cls=ContextBoolEncoder).encode("utf-8"),
214
- )
158
+ responses_list = experiment_config.get("response_configuration", [])
159
+ rft_config_json = next(
160
+ (r for r in responses_list if r.get("type") == "rft"), None
161
+ )
162
+ rft_config = (
163
+ _responses_adapter.validate_python(rft_config_json)
164
+ if rft_config_json is not None
165
+ else None
166
+ )
167
+
168
+ obs_adapter = TypeAdapter(Observation) # type: ignore
169
+ obs_objs: list[Observation] = []
170
+ for od in observation_declarations:
171
+ obs_objs.append(obs_adapter.validate_python(od))
172
+
173
+ datasets = create_observation_dataframes(obs_objs, rft_config)
174
+ for response_type, df in datasets.items():
175
+ storage._to_parquet_transaction(output_path / response_type, df)
215
176
 
216
177
  return cls(storage, path, Mode.WRITE)
217
178
 
@@ -285,16 +246,10 @@ class LocalExperiment(BaseMode):
285
246
  return ens
286
247
  raise KeyError(f"Ensemble with name '{name}' not found")
287
248
 
288
- @property
289
- def metadata(self) -> dict[str, Any]:
290
- path = self.mount_point / self._metadata_file
291
- if not path.exists():
292
- raise ValueError(f"{self._metadata_file!s} does not exist")
293
- return json.loads(path.read_text(encoding="utf-8"))
294
-
295
249
  @property
296
250
  def relative_weights(self) -> str:
297
- return self.metadata.get("weights", "")
251
+ assert self.experiment_config is not None
252
+ return self.experiment_config.get("weights", "")
298
253
 
299
254
  @property
300
255
  def name(self) -> str:
@@ -324,9 +279,8 @@ class LocalExperiment(BaseMode):
324
279
 
325
280
  @property
326
281
  def parameter_info(self) -> dict[str, Any]:
327
- return json.loads(
328
- (self.mount_point / self._parameter_file).read_text(encoding="utf-8")
329
- )
282
+ parameters_list = self.experiment_config.get("parameter_configuration", [])
283
+ return {parameter["name"]: parameter for parameter in parameters_list}
330
284
 
331
285
  @property
332
286
  def templates_configuration(self) -> list[tuple[str, str]]:
@@ -348,9 +302,8 @@ class LocalExperiment(BaseMode):
348
302
 
349
303
  @property
350
304
  def response_info(self) -> dict[str, Any]:
351
- return json.loads(
352
- (self.mount_point / self._responses_file).read_text(encoding="utf-8")
353
- )
305
+ responses_list = self.experiment_config.get("response_configuration", [])
306
+ return {response["type"]: response for response in responses_list}
354
307
 
355
308
  def get_surface(self, name: str) -> IrapSurface:
356
309
  """
@@ -420,10 +373,50 @@ class LocalExperiment(BaseMode):
420
373
 
421
374
  @cached_property
422
375
  def observations(self) -> dict[str, pl.DataFrame]:
423
- observations = sorted(self.mount_point.glob("observations/*"))
376
+ obs_dir = self.mount_point / "observations"
377
+
378
+ if obs_dir.exists():
379
+ datasets: dict[str, pl.DataFrame] = {}
380
+ for p in obs_dir.iterdir():
381
+ if not p.is_file():
382
+ continue
383
+ try:
384
+ df = pl.read_parquet(p)
385
+ except Exception:
386
+ continue
387
+ datasets[p.stem] = df
388
+ return datasets
389
+
390
+ serialized_observations = self.experiment_config.get("observations", None)
391
+ if not serialized_observations:
392
+ return {}
393
+
394
+ output_path = self.mount_point / "observations"
395
+ output_path.mkdir(parents=True, exist_ok=True)
396
+
397
+ rft_cfg = None
398
+ try:
399
+ responses_list = self.experiment_config.get("response_configuration", [])
400
+ for r in responses_list:
401
+ if r.get("type") == "rft":
402
+ rft_cfg = _responses_adapter.validate_python(r)
403
+ break
404
+ except Exception:
405
+ rft_cfg = None
406
+
407
+ obs_adapter = TypeAdapter(Observation) # type: ignore
408
+ obs_objs: list[Observation] = []
409
+ for od in serialized_observations:
410
+ obs_objs.append(obs_adapter.validate_python(od))
411
+
412
+ datasets = create_observation_dataframes(obs_objs, rft_cfg)
413
+ for response_type, df in datasets.items():
414
+ self._storage._to_parquet_transaction(output_path / response_type, df)
415
+
424
416
  return {
425
- observation.name: pl.read_parquet(f"{observation}")
426
- for observation in observations
417
+ p.stem: pl.read_parquet(p)
418
+ for p in (self.mount_point / "observations").iterdir()
419
+ if p.is_file()
427
420
  }
428
421
 
429
422
  @cached_property
@@ -489,18 +482,22 @@ class LocalExperiment(BaseMode):
489
482
  )
490
483
 
491
484
  config = responses_configuration[response_type]
485
+
492
486
  config.keys = sorted(response_keys)
493
487
  config.has_finalized_keys = True
488
+
489
+ response_index = next(
490
+ i
491
+ for i, c in enumerate(self.experiment_config["response_configuration"])
492
+ if c["type"] == response_type
493
+ )
494
+ self.experiment_config["response_configuration"][response_index] = (
495
+ config.model_dump(mode="json")
496
+ )
497
+
494
498
  self._storage._write_transaction(
495
- self._path / self._responses_file,
496
- json.dumps(
497
- {
498
- c.type: c.model_dump(mode="json")
499
- for c in responses_configuration.values()
500
- },
501
- default=str,
502
- indent=2,
503
- ).encode("utf-8"),
499
+ self._path / self._index_file,
500
+ self._index.model_dump_json(indent=2).encode("utf-8"),
504
501
  )
505
502
 
506
503
  if self.response_key_to_response_type is not None:
@@ -510,17 +507,5 @@ class LocalExperiment(BaseMode):
510
507
  del self.response_type_to_response_keys
511
508
 
512
509
  @property
513
- def all_parameters_and_gen_data(self) -> pl.DataFrame | None:
514
- if not self.ensembles:
515
- return None
516
-
517
- ensemble_dfs = [
518
- e.all_parameters_and_gen_data
519
- for e in self.ensembles
520
- if e.all_parameters_and_gen_data is not None
521
- ]
522
-
523
- if not ensemble_dfs:
524
- return None
525
-
526
- return pl.concat(ensemble_dfs)
510
+ def experiment_config(self) -> dict[str, Any]:
511
+ return self._index.experiment
@@ -21,7 +21,7 @@ from filelock import FileLock, Timeout
21
21
  from pydantic import BaseModel, Field
22
22
 
23
23
  import ert.storage
24
- from ert.config import ErtConfig, ParameterConfig, ResponseConfig
24
+ from ert.config import ErtConfig
25
25
  from ert.shared import __version__
26
26
 
27
27
  from .local_ensemble import LocalEnsemble
@@ -31,7 +31,7 @@ from .realization_storage_state import RealizationStorageState
31
31
 
32
32
  logger = logging.getLogger(__name__)
33
33
 
34
- _LOCAL_STORAGE_VERSION = 23
34
+ _LOCAL_STORAGE_VERSION = 25
35
35
 
36
36
 
37
37
  class _Migrations(BaseModel):
@@ -322,12 +322,8 @@ class LocalStorage(BaseMode):
322
322
  @require_write
323
323
  def create_experiment(
324
324
  self,
325
- parameters: list[ParameterConfig] | None = None,
326
- responses: list[ResponseConfig] | None = None,
327
- observations: dict[str, pl.DataFrame] | None = None,
328
- simulation_arguments: dict[Any, Any] | None = None,
325
+ experiment_config: dict[str, Any] | None = None,
329
326
  name: str | None = None,
330
- templates: list[tuple[str, str]] | None = None,
331
327
  ) -> LocalExperiment:
332
328
  """
333
329
  Creates a new experiment in the storage.
@@ -352,6 +348,8 @@ class LocalStorage(BaseMode):
352
348
  local_experiment : LocalExperiment
353
349
  The newly created experiment.
354
350
  """
351
+ if experiment_config is None:
352
+ experiment_config = {}
355
353
 
356
354
  exp_id = uuid4()
357
355
  path = self._experiment_path(exp_id)
@@ -361,12 +359,8 @@ class LocalStorage(BaseMode):
361
359
  self,
362
360
  exp_id,
363
361
  path,
364
- parameters=parameters,
365
- responses=responses,
366
- observations=observations,
367
- simulation_arguments=simulation_arguments,
362
+ experiment_config=experiment_config,
368
363
  name=name,
369
- templates=templates,
370
364
  )
371
365
 
372
366
  self._experiments[exp.id] = exp
@@ -519,6 +513,8 @@ class LocalStorage(BaseMode):
519
513
  to21,
520
514
  to22,
521
515
  to23,
516
+ to24,
517
+ to25,
522
518
  )
523
519
 
524
520
  try:
@@ -571,6 +567,8 @@ class LocalStorage(BaseMode):
571
567
  20: to21,
572
568
  21: to22,
573
569
  22: to23,
570
+ 23: to24,
571
+ 24: to25,
574
572
  }
575
573
  for from_version in range(version, _LOCAL_STORAGE_VERSION):
576
574
  migrations[from_version].migrate(self.path)
@@ -0,0 +1,26 @@
1
+ from pathlib import Path
2
+
3
+ import polars as pl
4
+
5
+ info = "Add default None values to RFT observations and responses"
6
+
7
+
8
+ def migrate(path: Path) -> None:
9
+ for rft_obs in path.glob("experiments/*/observations/rft"):
10
+ rft_obs_df = pl.read_parquet(rft_obs)
11
+
12
+ if "zone" not in rft_obs_df.columns:
13
+ rft_obs_df = rft_obs_df.with_columns(
14
+ pl.lit(None, dtype=pl.String).alias("zone")
15
+ )
16
+
17
+ rft_obs_df.write_parquet(rft_obs)
18
+
19
+ for rft_response in path.glob("ensembles/*/*/rft.parquet"):
20
+ rft_response_df = pl.read_parquet(rft_response)
21
+
22
+ if "zone" not in rft_response_df.columns:
23
+ rft_response_df = rft_response_df.with_columns(
24
+ pl.lit(None, dtype=pl.String).alias("zone")
25
+ )
26
+ rft_response_df.write_parquet(rft_response)