ert 17.0.0__py3-none-any.whl → 19.0.0rc2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- _ert/events.py +19 -2
- _ert/forward_model_runner/client.py +6 -2
- ert/__main__.py +28 -13
- ert/analysis/_enif_update.py +8 -4
- ert/analysis/_es_update.py +19 -6
- ert/analysis/_update_commons.py +16 -6
- ert/cli/main.py +13 -6
- ert/cli/monitor.py +7 -0
- ert/config/__init__.py +15 -6
- ert/config/_create_observation_dataframes.py +117 -20
- ert/config/_get_num_cpu.py +1 -1
- ert/config/_observations.py +91 -2
- ert/config/_read_summary.py +8 -6
- ert/config/design_matrix.py +51 -24
- ert/config/distribution.py +1 -1
- ert/config/ensemble_config.py +9 -17
- ert/config/ert_config.py +103 -19
- ert/config/everest_control.py +234 -0
- ert/config/{everest_objective_config.py → everest_response.py} +24 -15
- ert/config/field.py +96 -84
- ert/config/forward_model_step.py +122 -17
- ert/config/gen_data_config.py +5 -10
- ert/config/gen_kw_config.py +5 -35
- ert/config/known_response_types.py +14 -0
- ert/config/parameter_config.py +1 -33
- ert/config/parsing/_option_dict.py +10 -2
- ert/config/parsing/config_keywords.py +2 -0
- ert/config/parsing/config_schema.py +23 -3
- ert/config/parsing/config_schema_deprecations.py +3 -14
- ert/config/parsing/config_schema_item.py +26 -11
- ert/config/parsing/context_values.py +3 -3
- ert/config/parsing/file_context_token.py +1 -1
- ert/config/parsing/observations_parser.py +6 -2
- ert/config/parsing/queue_system.py +9 -0
- ert/config/parsing/schema_item_type.py +1 -0
- ert/config/queue_config.py +4 -5
- ert/config/response_config.py +0 -8
- ert/config/rft_config.py +275 -0
- ert/config/summary_config.py +3 -8
- ert/config/surface_config.py +59 -16
- ert/config/workflow_fixtures.py +2 -1
- ert/dark_storage/client/__init__.py +2 -2
- ert/dark_storage/client/_session.py +4 -4
- ert/dark_storage/client/client.py +2 -2
- ert/dark_storage/common.py +1 -1
- ert/dark_storage/compute/misfits.py +11 -7
- ert/dark_storage/endpoints/compute/misfits.py +6 -4
- ert/dark_storage/endpoints/experiment_server.py +12 -9
- ert/dark_storage/endpoints/experiments.py +2 -2
- ert/dark_storage/endpoints/observations.py +8 -6
- ert/dark_storage/endpoints/parameters.py +2 -18
- ert/dark_storage/endpoints/responses.py +24 -5
- ert/dark_storage/json_schema/experiment.py +1 -1
- ert/data/_measured_data.py +6 -5
- ert/ensemble_evaluator/__init__.py +8 -1
- ert/ensemble_evaluator/config.py +2 -1
- ert/ensemble_evaluator/evaluator.py +81 -29
- ert/ensemble_evaluator/event.py +6 -0
- ert/ensemble_evaluator/snapshot.py +3 -1
- ert/ensemble_evaluator/state.py +1 -0
- ert/field_utils/__init__.py +8 -0
- ert/field_utils/field_utils.py +212 -3
- ert/field_utils/roff_io.py +1 -1
- ert/gui/__init__.py +5 -2
- ert/gui/ertnotifier.py +1 -1
- ert/gui/ertwidgets/__init__.py +23 -16
- ert/gui/ertwidgets/analysismoduleedit.py +2 -2
- ert/gui/ertwidgets/checklist.py +1 -1
- ert/gui/ertwidgets/create_experiment_dialog.py +3 -1
- ert/gui/ertwidgets/ensembleselector.py +2 -2
- ert/gui/ertwidgets/models/__init__.py +2 -0
- ert/gui/ertwidgets/models/activerealizationsmodel.py +2 -1
- ert/gui/ertwidgets/models/path_model.py +1 -1
- ert/gui/ertwidgets/models/targetensemblemodel.py +2 -1
- ert/gui/ertwidgets/models/text_model.py +1 -1
- ert/gui/ertwidgets/pathchooser.py +0 -3
- ert/gui/ertwidgets/searchbox.py +13 -4
- ert/gui/{suggestor → ertwidgets/suggestor}/_suggestor_message.py +13 -4
- ert/gui/{suggestor → ertwidgets/suggestor}/suggestor.py +63 -30
- ert/gui/main.py +37 -8
- ert/gui/main_window.py +1 -7
- ert/gui/simulation/ensemble_experiment_panel.py +1 -1
- ert/gui/simulation/ensemble_information_filter_panel.py +1 -1
- ert/gui/simulation/ensemble_smoother_panel.py +1 -1
- ert/gui/simulation/evaluate_ensemble_panel.py +1 -1
- ert/gui/simulation/experiment_panel.py +16 -3
- ert/gui/simulation/manual_update_panel.py +31 -8
- ert/gui/simulation/multiple_data_assimilation_panel.py +12 -8
- ert/gui/simulation/run_dialog.py +27 -20
- ert/gui/simulation/single_test_run_panel.py +2 -2
- ert/gui/summarypanel.py +20 -1
- ert/gui/tools/load_results/load_results_panel.py +1 -1
- ert/gui/tools/manage_experiments/export_dialog.py +136 -0
- ert/gui/tools/manage_experiments/storage_info_widget.py +121 -16
- ert/gui/tools/manage_experiments/storage_widget.py +1 -2
- ert/gui/tools/plot/plot_api.py +37 -25
- ert/gui/tools/plot/plot_widget.py +10 -2
- ert/gui/tools/plot/plot_window.py +38 -18
- ert/gui/tools/plot/plottery/plot_config.py +2 -0
- ert/gui/tools/plot/plottery/plot_context.py +14 -0
- ert/gui/tools/plot/plottery/plots/__init__.py +2 -0
- ert/gui/tools/plot/plottery/plots/cesp.py +3 -1
- ert/gui/tools/plot/plottery/plots/distribution.py +6 -1
- ert/gui/tools/plot/plottery/plots/ensemble.py +12 -3
- ert/gui/tools/plot/plottery/plots/gaussian_kde.py +12 -2
- ert/gui/tools/plot/plottery/plots/histogram.py +3 -1
- ert/gui/tools/plot/plottery/plots/misfits.py +436 -0
- ert/gui/tools/plot/plottery/plots/observations.py +18 -4
- ert/gui/tools/plot/plottery/plots/statistics.py +62 -20
- ert/gui/tools/plot/plottery/plots/std_dev.py +3 -1
- ert/mode_definitions.py +2 -0
- ert/plugins/__init__.py +0 -1
- ert/plugins/hook_implementations/workflows/csv_export.py +2 -3
- ert/plugins/hook_implementations/workflows/gen_data_rft_export.py +10 -2
- ert/plugins/hook_specifications/__init__.py +0 -2
- ert/plugins/hook_specifications/jobs.py +0 -9
- ert/plugins/plugin_manager.py +6 -33
- ert/resources/forward_models/run_reservoirsimulator.py +8 -3
- ert/resources/shell_scripts/delete_directory.py +2 -2
- ert/run_models/__init__.py +18 -5
- ert/run_models/_create_run_path.py +131 -37
- ert/run_models/ensemble_experiment.py +10 -4
- ert/run_models/ensemble_information_filter.py +8 -1
- ert/run_models/ensemble_smoother.py +9 -3
- ert/run_models/evaluate_ensemble.py +8 -6
- ert/run_models/event.py +7 -3
- ert/run_models/everest_run_model.py +159 -46
- ert/run_models/initial_ensemble_run_model.py +25 -24
- ert/run_models/manual_update.py +6 -3
- ert/run_models/manual_update_enif.py +37 -0
- ert/run_models/model_factory.py +81 -21
- ert/run_models/multiple_data_assimilation.py +22 -11
- ert/run_models/run_model.py +64 -55
- ert/run_models/single_test_run.py +7 -4
- ert/run_models/update_run_model.py +4 -2
- ert/runpaths.py +5 -6
- ert/sample_prior.py +9 -4
- ert/scheduler/driver.py +37 -0
- ert/scheduler/event.py +3 -1
- ert/scheduler/job.py +23 -13
- ert/scheduler/lsf_driver.py +6 -2
- ert/scheduler/openpbs_driver.py +7 -1
- ert/scheduler/scheduler.py +5 -0
- ert/scheduler/slurm_driver.py +6 -2
- ert/services/__init__.py +2 -2
- ert/services/_base_service.py +37 -20
- ert/services/ert_server.py +317 -0
- ert/shared/_doc_utils/__init__.py +4 -2
- ert/shared/_doc_utils/ert_jobs.py +1 -4
- ert/shared/net_utils.py +43 -18
- ert/shared/storage/connection.py +3 -3
- ert/shared/version.py +3 -3
- ert/storage/__init__.py +2 -0
- ert/storage/local_ensemble.py +38 -12
- ert/storage/local_experiment.py +8 -16
- ert/storage/local_storage.py +68 -42
- ert/storage/migration/to11.py +1 -1
- ert/storage/migration/to16.py +38 -0
- ert/storage/migration/to17.py +42 -0
- ert/storage/migration/to18.py +11 -0
- ert/storage/migration/to19.py +34 -0
- ert/storage/migration/to20.py +23 -0
- ert/storage/migration/to21.py +25 -0
- ert/storage/migration/to8.py +4 -4
- ert/substitutions.py +12 -28
- ert/validation/active_range.py +7 -7
- ert/validation/rangestring.py +16 -16
- ert/workflow_runner.py +2 -1
- {ert-17.0.0.dist-info → ert-19.0.0rc2.dist-info}/METADATA +9 -8
- {ert-17.0.0.dist-info → ert-19.0.0rc2.dist-info}/RECORD +208 -205
- {ert-17.0.0.dist-info → ert-19.0.0rc2.dist-info}/WHEEL +1 -1
- everest/api/everest_data_api.py +14 -1
- everest/bin/config_branch_script.py +3 -6
- everest/bin/everconfigdump_script.py +1 -9
- everest/bin/everest_script.py +21 -11
- everest/bin/everlint_script.py +0 -2
- everest/bin/kill_script.py +2 -2
- everest/bin/monitor_script.py +2 -2
- everest/bin/utils.py +8 -4
- everest/bin/visualization_script.py +6 -14
- everest/config/__init__.py +4 -1
- everest/config/control_config.py +81 -6
- everest/config/control_variable_config.py +4 -3
- everest/config/everest_config.py +75 -42
- everest/config/forward_model_config.py +5 -3
- everest/config/install_data_config.py +7 -5
- everest/config/install_job_config.py +7 -3
- everest/config/install_template_config.py +3 -3
- everest/config/optimization_config.py +19 -6
- everest/config/output_constraint_config.py +8 -2
- everest/config/server_config.py +6 -49
- everest/config/utils.py +25 -105
- everest/config/validation_utils.py +17 -11
- everest/config_file_loader.py +13 -4
- everest/detached/client.py +3 -3
- everest/detached/everserver.py +7 -8
- everest/everest_storage.py +6 -12
- everest/gui/everest_client.py +2 -3
- everest/gui/main_window.py +2 -2
- everest/optimizer/everest2ropt.py +59 -32
- everest/optimizer/opt_model_transforms.py +12 -13
- everest/optimizer/utils.py +0 -29
- everest/strings.py +0 -5
- ert/config/everest_constraints_config.py +0 -95
- ert/config/ext_param_config.py +0 -106
- ert/gui/tools/export/__init__.py +0 -3
- ert/gui/tools/export/export_panel.py +0 -83
- ert/gui/tools/export/export_tool.py +0 -69
- ert/gui/tools/export/exporter.py +0 -36
- ert/services/storage_service.py +0 -127
- everest/config/sampler_config.py +0 -103
- everest/simulator/__init__.py +0 -88
- everest/simulator/everest_to_ert.py +0 -51
- /ert/gui/{suggestor → ertwidgets/suggestor}/__init__.py +0 -0
- /ert/gui/{suggestor → ertwidgets/suggestor}/_colors.py +0 -0
- {ert-17.0.0.dist-info → ert-19.0.0rc2.dist-info}/entry_points.txt +0 -0
- {ert-17.0.0.dist-info → ert-19.0.0rc2.dist-info}/licenses/COPYING +0 -0
- {ert-17.0.0.dist-info → ert-19.0.0rc2.dist-info}/top_level.txt +0 -0
|
@@ -18,6 +18,7 @@ from ._observations import (
|
|
|
18
18
|
Observation,
|
|
19
19
|
ObservationDate,
|
|
20
20
|
ObservationError,
|
|
21
|
+
RFTObservation,
|
|
21
22
|
SummaryObservation,
|
|
22
23
|
)
|
|
23
24
|
from .gen_data_config import GenDataConfig
|
|
@@ -28,18 +29,21 @@ from .parsing import (
|
|
|
28
29
|
ObservationConfigError,
|
|
29
30
|
)
|
|
30
31
|
from .refcase import Refcase
|
|
32
|
+
from .rft_config import RFTConfig
|
|
31
33
|
|
|
32
34
|
if TYPE_CHECKING:
|
|
33
35
|
import numpy.typing as npt
|
|
34
36
|
|
|
35
37
|
|
|
36
38
|
DEFAULT_TIME_DELTA = timedelta(seconds=30)
|
|
39
|
+
DEFAULT_LOCATION_RANGE_M = 3000
|
|
37
40
|
|
|
38
41
|
|
|
39
42
|
def create_observation_dataframes(
|
|
40
43
|
observations: Sequence[Observation],
|
|
41
44
|
refcase: Refcase | None,
|
|
42
45
|
gen_data_config: GenDataConfig | None,
|
|
46
|
+
rft_config: RFTConfig | None,
|
|
43
47
|
time_map: list[datetime] | None,
|
|
44
48
|
history: HistorySource,
|
|
45
49
|
) -> dict[str, pl.DataFrame]:
|
|
@@ -55,7 +59,6 @@ def create_observation_dataframes(
|
|
|
55
59
|
config_errors: list[ErrorInfo] = []
|
|
56
60
|
grouped: dict[str, list[pl.DataFrame]] = defaultdict(list)
|
|
57
61
|
for obs in observations:
|
|
58
|
-
obs_name = obs.name
|
|
59
62
|
try:
|
|
60
63
|
match obs:
|
|
61
64
|
case HistoryObservation():
|
|
@@ -63,7 +66,7 @@ def create_observation_dataframes(
|
|
|
63
66
|
_handle_history_observation(
|
|
64
67
|
refcase,
|
|
65
68
|
obs,
|
|
66
|
-
|
|
69
|
+
obs.name,
|
|
67
70
|
history,
|
|
68
71
|
time_len,
|
|
69
72
|
)
|
|
@@ -72,7 +75,7 @@ def create_observation_dataframes(
|
|
|
72
75
|
grouped["summary"].append(
|
|
73
76
|
_handle_summary_observation(
|
|
74
77
|
obs,
|
|
75
|
-
|
|
78
|
+
obs.name,
|
|
76
79
|
obs_time_list,
|
|
77
80
|
bool(refcase),
|
|
78
81
|
)
|
|
@@ -82,11 +85,18 @@ def create_observation_dataframes(
|
|
|
82
85
|
_handle_general_observation(
|
|
83
86
|
gen_data_config,
|
|
84
87
|
obs,
|
|
85
|
-
|
|
88
|
+
obs.name,
|
|
86
89
|
obs_time_list,
|
|
87
90
|
bool(refcase),
|
|
88
91
|
)
|
|
89
92
|
)
|
|
93
|
+
case RFTObservation():
|
|
94
|
+
if rft_config is None:
|
|
95
|
+
raise TypeError(
|
|
96
|
+
"create_observation_dataframes requires "
|
|
97
|
+
"rft_config is not None when using RFTObservation"
|
|
98
|
+
)
|
|
99
|
+
grouped["rft"].append(_handle_rft_observation(rft_config, obs))
|
|
90
100
|
case default:
|
|
91
101
|
assert_never(default)
|
|
92
102
|
except ObservationConfigError as err:
|
|
@@ -286,6 +296,43 @@ def _get_restart(
|
|
|
286
296
|
) from err
|
|
287
297
|
|
|
288
298
|
|
|
299
|
+
def _has_localization(summary_dict: SummaryObservation) -> bool:
|
|
300
|
+
return any(
|
|
301
|
+
[
|
|
302
|
+
summary_dict.location_x is not None,
|
|
303
|
+
summary_dict.location_y is not None,
|
|
304
|
+
summary_dict.location_range is not None,
|
|
305
|
+
]
|
|
306
|
+
)
|
|
307
|
+
|
|
308
|
+
|
|
309
|
+
def _validate_localization_values(summary_dict: SummaryObservation) -> None:
|
|
310
|
+
"""The user must provide LOCATION_X and LOCATION_Y to use localization, while
|
|
311
|
+
unprovided LOCATION_RANGE should default to some value.
|
|
312
|
+
|
|
313
|
+
This method assumes the summary dict contains at least one LOCATION key.
|
|
314
|
+
"""
|
|
315
|
+
if summary_dict.location_x is None or summary_dict.location_y is None:
|
|
316
|
+
loc_values = {
|
|
317
|
+
"LOCATION_X": summary_dict.location_x,
|
|
318
|
+
"LOCATION_Y": summary_dict.location_y,
|
|
319
|
+
"LOCATION_RANGE": summary_dict.location_range,
|
|
320
|
+
}
|
|
321
|
+
provided_loc_values = {k: v for k, v in loc_values.items() if v is not None}
|
|
322
|
+
|
|
323
|
+
provided_loc_values_string = ", ".join(
|
|
324
|
+
key.upper() for key in provided_loc_values
|
|
325
|
+
)
|
|
326
|
+
raise ObservationConfigError.with_context(
|
|
327
|
+
f"Localization for observation {summary_dict.name} is misconfigured.\n"
|
|
328
|
+
f"Only {provided_loc_values_string} were provided. To enable "
|
|
329
|
+
f"localization for an observation, ensure that both LOCATION_X and "
|
|
330
|
+
f"LOCATION_Y are defined - or remove LOCATION keywords to disable "
|
|
331
|
+
f"localization.",
|
|
332
|
+
summary_dict,
|
|
333
|
+
)
|
|
334
|
+
|
|
335
|
+
|
|
289
336
|
def _handle_summary_observation(
|
|
290
337
|
summary_dict: SummaryObservation,
|
|
291
338
|
obs_key: str,
|
|
@@ -323,15 +370,23 @@ def _handle_summary_observation(
|
|
|
323
370
|
"Observation uncertainty must be strictly > 0", summary_key
|
|
324
371
|
) from None
|
|
325
372
|
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
)
|
|
373
|
+
data_dict = {
|
|
374
|
+
"response_key": [summary_key],
|
|
375
|
+
"observation_key": [obs_key],
|
|
376
|
+
"time": pl.Series([date]).dt.cast_time_unit("ms"),
|
|
377
|
+
"observations": pl.Series([value], dtype=pl.Float32),
|
|
378
|
+
"std": pl.Series([std_dev], dtype=pl.Float32),
|
|
379
|
+
}
|
|
380
|
+
|
|
381
|
+
if _has_localization(summary_dict):
|
|
382
|
+
_validate_localization_values(summary_dict)
|
|
383
|
+
data_dict["location_x"] = summary_dict.location_x
|
|
384
|
+
data_dict["location_y"] = summary_dict.location_y
|
|
385
|
+
data_dict["location_range"] = (
|
|
386
|
+
summary_dict.location_range or DEFAULT_LOCATION_RANGE_M
|
|
387
|
+
)
|
|
388
|
+
|
|
389
|
+
return pl.DataFrame(data_dict)
|
|
335
390
|
|
|
336
391
|
|
|
337
392
|
def _handle_general_observation(
|
|
@@ -414,10 +469,8 @@ def _handle_general_observation(
|
|
|
414
469
|
stds = file_values[1::2]
|
|
415
470
|
|
|
416
471
|
else:
|
|
417
|
-
assert
|
|
418
|
-
|
|
419
|
-
and general_observation.error is not None
|
|
420
|
-
)
|
|
472
|
+
assert general_observation.value is not None
|
|
473
|
+
assert general_observation.error is not None
|
|
421
474
|
values = np.array([general_observation.value])
|
|
422
475
|
stds = np.array([general_observation.error])
|
|
423
476
|
|
|
@@ -439,9 +492,11 @@ def _handle_general_observation(
|
|
|
439
492
|
raise ObservationConfigError.with_context(
|
|
440
493
|
f"Values ({values}), error ({stds}) and "
|
|
441
494
|
f"index list ({indices}) must be of equal length",
|
|
442
|
-
|
|
443
|
-
|
|
444
|
-
|
|
495
|
+
(
|
|
496
|
+
general_observation.obs_file
|
|
497
|
+
if general_observation.obs_file is not None
|
|
498
|
+
else ""
|
|
499
|
+
),
|
|
445
500
|
)
|
|
446
501
|
|
|
447
502
|
if np.any(stds <= 0):
|
|
@@ -461,3 +516,45 @@ def _handle_general_observation(
|
|
|
461
516
|
"std": pl.Series(stds, dtype=pl.Float32),
|
|
462
517
|
}
|
|
463
518
|
)
|
|
519
|
+
|
|
520
|
+
|
|
521
|
+
def _handle_rft_observation(
|
|
522
|
+
rft_config: RFTConfig,
|
|
523
|
+
rft_observation: RFTObservation,
|
|
524
|
+
) -> pl.DataFrame:
|
|
525
|
+
location = (rft_observation.east, rft_observation.north, rft_observation.tvd)
|
|
526
|
+
if location not in rft_config.locations:
|
|
527
|
+
rft_config.locations.append(location)
|
|
528
|
+
|
|
529
|
+
data_to_read = rft_config.data_to_read
|
|
530
|
+
if rft_observation.well not in data_to_read:
|
|
531
|
+
rft_config.data_to_read[rft_observation.well] = {}
|
|
532
|
+
|
|
533
|
+
well_dict = data_to_read[rft_observation.well]
|
|
534
|
+
if rft_observation.date not in well_dict:
|
|
535
|
+
well_dict[rft_observation.date] = []
|
|
536
|
+
|
|
537
|
+
property_list = well_dict[rft_observation.date]
|
|
538
|
+
if rft_observation.property not in property_list:
|
|
539
|
+
property_list.append(rft_observation.property)
|
|
540
|
+
|
|
541
|
+
if rft_observation.error <= 0.0:
|
|
542
|
+
raise ObservationConfigError.with_context(
|
|
543
|
+
"Observation uncertainty must be strictly > 0", rft_observation.well
|
|
544
|
+
)
|
|
545
|
+
|
|
546
|
+
return pl.DataFrame(
|
|
547
|
+
{
|
|
548
|
+
"response_key": (
|
|
549
|
+
f"{rft_observation.well}:"
|
|
550
|
+
f"{rft_observation.date}:"
|
|
551
|
+
f"{rft_observation.property}"
|
|
552
|
+
),
|
|
553
|
+
"observation_key": rft_observation.name,
|
|
554
|
+
"east": pl.Series([location[0]], dtype=pl.Float32),
|
|
555
|
+
"north": pl.Series([location[1]], dtype=pl.Float32),
|
|
556
|
+
"tvd": pl.Series([location[2]], dtype=pl.Float32),
|
|
557
|
+
"observations": pl.Series([rft_observation.value], dtype=pl.Float32),
|
|
558
|
+
"std": pl.Series([rft_observation.error], dtype=pl.Float32),
|
|
559
|
+
}
|
|
560
|
+
)
|
ert/config/_get_num_cpu.py
CHANGED
|
@@ -156,7 +156,7 @@ class _Parser:
|
|
|
156
156
|
def next_line(self) -> Iterator[str]: ...
|
|
157
157
|
|
|
158
158
|
@overload
|
|
159
|
-
def next_line(self, __default: T) -> Iterator[str] | T: ...
|
|
159
|
+
def next_line(self, __default: T, /) -> Iterator[str] | T: ...
|
|
160
160
|
|
|
161
161
|
def next_line(self, *args: T) -> Iterator[str] | T:
|
|
162
162
|
self.line_number += 1
|
ert/config/_observations.py
CHANGED
|
@@ -90,10 +90,13 @@ class _SummaryValues:
|
|
|
90
90
|
name: str
|
|
91
91
|
value: float
|
|
92
92
|
key: str #: The :term:`summary key` in the summary response
|
|
93
|
+
location_x: float | None = None
|
|
94
|
+
location_y: float | None = None
|
|
95
|
+
location_range: float | None = None
|
|
93
96
|
|
|
94
97
|
|
|
95
98
|
@dataclass
|
|
96
|
-
class SummaryObservation(ObservationDate,
|
|
99
|
+
class SummaryObservation(ObservationDate, _SummaryValues, ObservationError):
|
|
97
100
|
@classmethod
|
|
98
101
|
def from_obs_dict(cls, directory: str, observation_dict: ObservationDict) -> Self:
|
|
99
102
|
error_mode = ErrorModes.ABS
|
|
@@ -101,6 +104,7 @@ class SummaryObservation(ObservationDate, ObservationError, _SummaryValues):
|
|
|
101
104
|
|
|
102
105
|
date_dict: ObservationDate = ObservationDate()
|
|
103
106
|
float_values: dict[str, float] = {"ERROR_MIN": 0.1}
|
|
107
|
+
localization_values: dict[str, float] = {}
|
|
104
108
|
for key, value in observation_dict.items():
|
|
105
109
|
match key:
|
|
106
110
|
case "type" | "name":
|
|
@@ -121,6 +125,12 @@ class SummaryObservation(ObservationDate, ObservationError, _SummaryValues):
|
|
|
121
125
|
summary_key = value
|
|
122
126
|
case "DATE":
|
|
123
127
|
date_dict.date = value
|
|
128
|
+
case "LOCATION_X":
|
|
129
|
+
localization_values["x"] = validate_float(value, key)
|
|
130
|
+
case "LOCATION_Y":
|
|
131
|
+
localization_values["y"] = validate_float(value, key)
|
|
132
|
+
case "LOCATION_RANGE":
|
|
133
|
+
localization_values["range"] = validate_float(value, key)
|
|
124
134
|
case _:
|
|
125
135
|
raise _unknown_key_error(str(key), observation_dict["name"])
|
|
126
136
|
if "VALUE" not in float_values:
|
|
@@ -137,6 +147,9 @@ class SummaryObservation(ObservationDate, ObservationError, _SummaryValues):
|
|
|
137
147
|
error_min=float_values["ERROR_MIN"],
|
|
138
148
|
key=summary_key,
|
|
139
149
|
value=float_values["VALUE"],
|
|
150
|
+
location_x=localization_values.get("x"),
|
|
151
|
+
location_y=localization_values.get("y"),
|
|
152
|
+
location_range=localization_values.get("range"),
|
|
140
153
|
**date_dict.__dict__,
|
|
141
154
|
)
|
|
142
155
|
|
|
@@ -201,12 +214,88 @@ class GeneralObservation(ObservationDate, _GeneralObservation):
|
|
|
201
214
|
return output
|
|
202
215
|
|
|
203
216
|
|
|
204
|
-
|
|
217
|
+
@dataclass
|
|
218
|
+
class RFTObservation:
|
|
219
|
+
name: str
|
|
220
|
+
well: str
|
|
221
|
+
date: str
|
|
222
|
+
property: str
|
|
223
|
+
value: float
|
|
224
|
+
error: float
|
|
225
|
+
north: float
|
|
226
|
+
east: float
|
|
227
|
+
tvd: float
|
|
228
|
+
|
|
229
|
+
@classmethod
|
|
230
|
+
def from_obs_dict(cls, directory: str, observation_dict: ObservationDict) -> Self:
|
|
231
|
+
well = None
|
|
232
|
+
observed_property = None
|
|
233
|
+
observed_value = None
|
|
234
|
+
error = None
|
|
235
|
+
date = None
|
|
236
|
+
north = None
|
|
237
|
+
east = None
|
|
238
|
+
tvd = None
|
|
239
|
+
for key, value in observation_dict.items():
|
|
240
|
+
match key:
|
|
241
|
+
case "type" | "name":
|
|
242
|
+
pass
|
|
243
|
+
case "WELL":
|
|
244
|
+
well = value
|
|
245
|
+
case "PROPERTY":
|
|
246
|
+
observed_property = value
|
|
247
|
+
case "VALUE":
|
|
248
|
+
observed_value = validate_float(value, key)
|
|
249
|
+
case "ERROR":
|
|
250
|
+
error = validate_float(value, key)
|
|
251
|
+
case "DATE":
|
|
252
|
+
date = value
|
|
253
|
+
case "NORTH":
|
|
254
|
+
north = validate_float(value, key)
|
|
255
|
+
case "EAST":
|
|
256
|
+
east = validate_float(value, key)
|
|
257
|
+
case "TVD":
|
|
258
|
+
tvd = validate_float(value, key)
|
|
259
|
+
case _:
|
|
260
|
+
raise _unknown_key_error(str(key), observation_dict["name"])
|
|
261
|
+
if well is None:
|
|
262
|
+
raise _missing_value_error(observation_dict["name"], "WELL")
|
|
263
|
+
if observed_value is None:
|
|
264
|
+
raise _missing_value_error(observation_dict["name"], "VALUE")
|
|
265
|
+
if observed_property is None:
|
|
266
|
+
raise _missing_value_error(observation_dict["name"], "PROPERTY")
|
|
267
|
+
if error is None:
|
|
268
|
+
raise _missing_value_error(observation_dict["name"], "ERROR")
|
|
269
|
+
if date is None:
|
|
270
|
+
raise _missing_value_error(observation_dict["name"], "DATE")
|
|
271
|
+
if north is None:
|
|
272
|
+
raise _missing_value_error(observation_dict["name"], "NORTH")
|
|
273
|
+
if east is None:
|
|
274
|
+
raise _missing_value_error(observation_dict["name"], "EAST")
|
|
275
|
+
if tvd is None:
|
|
276
|
+
raise _missing_value_error(observation_dict["name"], "TVD")
|
|
277
|
+
return cls(
|
|
278
|
+
observation_dict["name"],
|
|
279
|
+
well,
|
|
280
|
+
date,
|
|
281
|
+
observed_property,
|
|
282
|
+
observed_value,
|
|
283
|
+
error,
|
|
284
|
+
north,
|
|
285
|
+
east,
|
|
286
|
+
tvd,
|
|
287
|
+
)
|
|
288
|
+
|
|
289
|
+
|
|
290
|
+
Observation = (
|
|
291
|
+
HistoryObservation | SummaryObservation | GeneralObservation | RFTObservation
|
|
292
|
+
)
|
|
205
293
|
|
|
206
294
|
_TYPE_TO_CLASS: dict[ObservationType, type[Observation]] = {
|
|
207
295
|
ObservationType.HISTORY: HistoryObservation,
|
|
208
296
|
ObservationType.SUMMARY: SummaryObservation,
|
|
209
297
|
ObservationType.GENERAL: GeneralObservation,
|
|
298
|
+
ObservationType.RFT: RFTObservation,
|
|
210
299
|
}
|
|
211
300
|
|
|
212
301
|
|
ert/config/_read_summary.py
CHANGED
|
@@ -12,6 +12,7 @@ import warnings
|
|
|
12
12
|
from collections.abc import Callable, Sequence
|
|
13
13
|
from datetime import datetime, timedelta
|
|
14
14
|
from enum import Enum, auto
|
|
15
|
+
from functools import lru_cache
|
|
15
16
|
|
|
16
17
|
import numpy as np
|
|
17
18
|
import numpy.typing as npt
|
|
@@ -83,29 +84,30 @@ class DateUnit(Enum):
|
|
|
83
84
|
raise InvalidResponseFile(f"Unknown date unit {val}")
|
|
84
85
|
|
|
85
86
|
|
|
87
|
+
@lru_cache
|
|
86
88
|
def _fetch_keys_to_matcher(fetch_keys: Sequence[str]) -> Callable[[str], bool]:
|
|
87
89
|
"""
|
|
88
90
|
Transform the list of keys (with * used as repeated wildcard) into
|
|
89
91
|
a matcher.
|
|
90
92
|
|
|
91
|
-
>>> match = _fetch_keys_to_matcher(
|
|
93
|
+
>>> match = _fetch_keys_to_matcher(("",))
|
|
92
94
|
>>> match("FOPR")
|
|
93
95
|
False
|
|
94
96
|
|
|
95
|
-
>>> match = _fetch_keys_to_matcher(
|
|
97
|
+
>>> match = _fetch_keys_to_matcher(("*",))
|
|
96
98
|
>>> match("FOPR"), match("FO*")
|
|
97
99
|
(True, True)
|
|
98
100
|
|
|
99
101
|
|
|
100
|
-
>>> match = _fetch_keys_to_matcher(
|
|
102
|
+
>>> match = _fetch_keys_to_matcher(("F*PR",))
|
|
101
103
|
>>> match("WOPR"), match("FOPR"), match("FGPR"), match("SOIL")
|
|
102
104
|
(False, True, True, False)
|
|
103
105
|
|
|
104
|
-
>>> match = _fetch_keys_to_matcher(
|
|
106
|
+
>>> match = _fetch_keys_to_matcher(("WGOR:*",))
|
|
105
107
|
>>> match("FOPR"), match("WGOR:OP1"), match("WGOR:OP2"), match("WGOR")
|
|
106
108
|
(False, True, True, False)
|
|
107
109
|
|
|
108
|
-
>>> match = _fetch_keys_to_matcher(
|
|
110
|
+
>>> match = _fetch_keys_to_matcher(("FOPR", "FGPR"))
|
|
109
111
|
>>> match("FOPR"), match("FGPR"), match("WGOR:OP2"), match("WGOR")
|
|
110
112
|
(True, True, False, False)
|
|
111
113
|
"""
|
|
@@ -138,7 +140,7 @@ def _read_spec(
|
|
|
138
140
|
date_index = None
|
|
139
141
|
date_unit_str = None
|
|
140
142
|
|
|
141
|
-
should_load_key = _fetch_keys_to_matcher(fetch_keys)
|
|
143
|
+
should_load_key = _fetch_keys_to_matcher(tuple(fetch_keys))
|
|
142
144
|
|
|
143
145
|
for i, kw in enumerate(keywords):
|
|
144
146
|
try:
|
ert/config/design_matrix.py
CHANGED
|
@@ -25,6 +25,7 @@ class DesignMatrix:
|
|
|
25
25
|
xls_filename: Path
|
|
26
26
|
design_sheet: str
|
|
27
27
|
default_sheet: str | None
|
|
28
|
+
priority_source: str = "design_matrix"
|
|
28
29
|
|
|
29
30
|
def __post_init__(self) -> None:
|
|
30
31
|
try:
|
|
@@ -33,6 +34,9 @@ class DesignMatrix:
|
|
|
33
34
|
self.design_matrix_df,
|
|
34
35
|
self.parameter_configurations,
|
|
35
36
|
) = self.read_and_validate_design_matrix()
|
|
37
|
+
self.parameter_priority = {
|
|
38
|
+
cfg.name: self.priority_source for cfg in self.parameter_configurations
|
|
39
|
+
}
|
|
36
40
|
except (ValueError, AttributeError) as exc:
|
|
37
41
|
raise ConfigValidationError.with_context(
|
|
38
42
|
f"Error reading design matrix {self.xls_filename}"
|
|
@@ -45,7 +49,7 @@ class DesignMatrix:
|
|
|
45
49
|
def from_config_list(cls, config_list: list[str | dict[str, str]]) -> DesignMatrix:
|
|
46
50
|
filename = Path(cast(str, config_list[0]))
|
|
47
51
|
options = cast(dict[str, str], config_list[1])
|
|
48
|
-
valid_options = ["DESIGN_SHEET", "DEFAULT_SHEET"]
|
|
52
|
+
valid_options = ["DESIGN_SHEET", "DEFAULT_SHEET", "PRIORITY"]
|
|
49
53
|
option_errors = [
|
|
50
54
|
ErrorInfo(
|
|
51
55
|
f"Option {option} is not a valid DESIGN_MATRIX option. "
|
|
@@ -59,6 +63,7 @@ class DesignMatrix:
|
|
|
59
63
|
raise ConfigValidationError.from_collected(option_errors)
|
|
60
64
|
design_sheet = options.get("DESIGN_SHEET", "DesignSheet")
|
|
61
65
|
default_sheet = options.get("DEFAULT_SHEET", None)
|
|
66
|
+
priority_source = options.get("PRIORITY", DataSource.DESIGN_MATRIX)
|
|
62
67
|
errors = []
|
|
63
68
|
if filename.suffix not in {
|
|
64
69
|
".xlsx",
|
|
@@ -75,6 +80,13 @@ class DesignMatrix:
|
|
|
75
80
|
"DESIGN_SHEET and DEFAULT_SHEET can not point to the same sheet."
|
|
76
81
|
).set_context(config_list)
|
|
77
82
|
)
|
|
83
|
+
if priority_source not in {DataSource.DESIGN_MATRIX, DataSource.SAMPLED}:
|
|
84
|
+
errors.append(
|
|
85
|
+
ErrorInfo(
|
|
86
|
+
f"PRIORITY must be either '{DataSource.DESIGN_MATRIX}'"
|
|
87
|
+
f" or '{DataSource.SAMPLED}' priority is '{priority_source}'"
|
|
88
|
+
).set_context(config_list)
|
|
89
|
+
)
|
|
78
90
|
if errors:
|
|
79
91
|
raise ConfigValidationError.from_collected(errors)
|
|
80
92
|
assert design_sheet is not None
|
|
@@ -82,6 +94,7 @@ class DesignMatrix:
|
|
|
82
94
|
xls_filename=filename,
|
|
83
95
|
design_sheet=design_sheet,
|
|
84
96
|
default_sheet=default_sheet,
|
|
97
|
+
priority_source=priority_source,
|
|
85
98
|
)
|
|
86
99
|
|
|
87
100
|
def merge_with_other(self, dm_other: DesignMatrix) -> None:
|
|
@@ -99,24 +112,17 @@ class DesignMatrix:
|
|
|
99
112
|
common_keys = set(
|
|
100
113
|
self.design_matrix_df.select(pl.exclude("realization")).columns
|
|
101
114
|
) & set(dm_other.design_matrix_df.columns)
|
|
102
|
-
non_identical_cols = set()
|
|
103
115
|
if common_keys:
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
f"Design Matrices '{self.xls_filename.name} "
|
|
113
|
-
f"({self.design_sheet} {self.default_sheet or ''})' and "
|
|
114
|
-
f"'{dm_other.xls_filename.name} ({dm_other.design_sheet} "
|
|
115
|
-
f"{dm_other.default_sheet or ''})' "
|
|
116
|
-
"contains non identical columns with the same name: "
|
|
117
|
-
f"{non_identical_cols}!"
|
|
118
|
-
)
|
|
116
|
+
errors.append(
|
|
117
|
+
ErrorInfo(
|
|
118
|
+
f"Design Matrices '{self.xls_filename.name} "
|
|
119
|
+
f"({self.design_sheet} {self.default_sheet or ''})' and "
|
|
120
|
+
f"'{dm_other.xls_filename.name} ({dm_other.design_sheet} "
|
|
121
|
+
f"{dm_other.default_sheet or ''})' "
|
|
122
|
+
"contains columns with the same name: "
|
|
123
|
+
f"{common_keys}!"
|
|
119
124
|
)
|
|
125
|
+
)
|
|
120
126
|
|
|
121
127
|
if errors:
|
|
122
128
|
raise ConfigValidationError.from_collected(errors)
|
|
@@ -125,9 +131,7 @@ class DesignMatrix:
|
|
|
125
131
|
self.design_matrix_df = pl.concat(
|
|
126
132
|
[
|
|
127
133
|
self.design_matrix_df,
|
|
128
|
-
dm_other.design_matrix_df.select(
|
|
129
|
-
pl.exclude([*list(common_keys), "realization"])
|
|
130
|
-
),
|
|
134
|
+
dm_other.design_matrix_df.select(pl.exclude(["realization"])),
|
|
131
135
|
],
|
|
132
136
|
how="horizontal",
|
|
133
137
|
)
|
|
@@ -145,6 +149,7 @@ class DesignMatrix:
|
|
|
145
149
|
for cfg in dm_other.parameter_configurations
|
|
146
150
|
if cfg.name not in common_keys
|
|
147
151
|
)
|
|
152
|
+
self.parameter_priority.update(dm_other.parameter_priority)
|
|
148
153
|
|
|
149
154
|
def merge_with_existing_parameters(
|
|
150
155
|
self, existing_parameters: list[ParameterConfig]
|
|
@@ -166,11 +171,33 @@ class DesignMatrix:
|
|
|
166
171
|
|
|
167
172
|
for param_cfg in existing_parameters:
|
|
168
173
|
if isinstance(param_cfg, GenKwConfig) and param_cfg.name in design_cfgs:
|
|
169
|
-
param_cfg.input_source = DataSource.DESIGN_MATRIX
|
|
170
|
-
param_cfg.update = False
|
|
171
|
-
param_cfg.distribution = RawSettings()
|
|
172
174
|
del design_cfgs[param_cfg.name]
|
|
173
|
-
|
|
175
|
+
input_source = DataSource(
|
|
176
|
+
self.parameter_priority.get(
|
|
177
|
+
param_cfg.name, DataSource.DESIGN_MATRIX.value
|
|
178
|
+
)
|
|
179
|
+
)
|
|
180
|
+
new_param_configs += [
|
|
181
|
+
GenKwConfig(
|
|
182
|
+
name=param_cfg.name,
|
|
183
|
+
update=(
|
|
184
|
+
input_source == DataSource.SAMPLED and param_cfg.update
|
|
185
|
+
),
|
|
186
|
+
distribution=(
|
|
187
|
+
RawSettings()
|
|
188
|
+
if input_source == DataSource.DESIGN_MATRIX
|
|
189
|
+
else param_cfg.distribution
|
|
190
|
+
),
|
|
191
|
+
group=(
|
|
192
|
+
DESIGN_MATRIX_GROUP
|
|
193
|
+
if input_source == DataSource.DESIGN_MATRIX
|
|
194
|
+
else param_cfg.group
|
|
195
|
+
),
|
|
196
|
+
input_source=input_source,
|
|
197
|
+
),
|
|
198
|
+
]
|
|
199
|
+
else:
|
|
200
|
+
new_param_configs += [param_cfg]
|
|
174
201
|
if design_cfgs.values():
|
|
175
202
|
new_param_configs += list(design_cfgs.values())
|
|
176
203
|
return new_param_configs
|
ert/config/distribution.py
CHANGED
|
@@ -17,7 +17,7 @@ class TransSettingsValidation(BaseModel):
|
|
|
17
17
|
model_config = {"extra": "forbid"}
|
|
18
18
|
|
|
19
19
|
@classmethod
|
|
20
|
-
def create(cls
|
|
20
|
+
def create(cls, *args: Any, **kwargs: Any) -> Self:
|
|
21
21
|
return cls(*args, **kwargs)
|
|
22
22
|
|
|
23
23
|
@classmethod
|
ert/config/ensemble_config.py
CHANGED
|
@@ -7,30 +7,22 @@ from typing import Self
|
|
|
7
7
|
|
|
8
8
|
from pydantic import BaseModel, Field, model_validator
|
|
9
9
|
|
|
10
|
-
from .
|
|
10
|
+
from .everest_control import EverestControl
|
|
11
11
|
from .field import Field as FieldConfig
|
|
12
|
-
from .gen_data_config import GenDataConfig
|
|
13
12
|
from .gen_kw_config import GenKwConfig
|
|
13
|
+
from .known_response_types import KNOWN_ERT_RESPONSE_TYPES, KnownErtResponseTypes
|
|
14
14
|
from .parameter_config import ParameterConfig
|
|
15
15
|
from .parsing import ConfigDict, ConfigKeys, ConfigValidationError
|
|
16
16
|
from .response_config import ResponseConfig
|
|
17
|
-
from .summary_config import SummaryConfig
|
|
18
17
|
from .surface_config import SurfaceConfig
|
|
19
18
|
|
|
20
|
-
KnownResponseTypes = SummaryConfig | GenDataConfig
|
|
21
|
-
|
|
22
|
-
_KNOWN_RESPONSE_TYPES = (
|
|
23
|
-
SummaryConfig,
|
|
24
|
-
GenDataConfig,
|
|
25
|
-
)
|
|
26
|
-
|
|
27
19
|
logger = logging.getLogger(__name__)
|
|
28
20
|
|
|
29
21
|
|
|
30
22
|
class EnsembleConfig(BaseModel):
|
|
31
|
-
response_configs: dict[str,
|
|
23
|
+
response_configs: dict[str, KnownErtResponseTypes] = Field(default_factory=dict)
|
|
32
24
|
parameter_configs: dict[
|
|
33
|
-
str, GenKwConfig | FieldConfig | SurfaceConfig |
|
|
25
|
+
str, GenKwConfig | FieldConfig | SurfaceConfig | EverestControl
|
|
34
26
|
] = Field(default_factory=dict)
|
|
35
27
|
|
|
36
28
|
@model_validator(mode="after")
|
|
@@ -131,16 +123,16 @@ class EnsembleConfig(BaseModel):
|
|
|
131
123
|
+ [make_field(f) for f in field_list]
|
|
132
124
|
)
|
|
133
125
|
EnsembleConfig._check_for_duplicate_gen_kw_param_names(gen_kw_cfgs)
|
|
134
|
-
response_configs: list[
|
|
126
|
+
response_configs: list[KnownErtResponseTypes] = []
|
|
135
127
|
|
|
136
|
-
for config_cls in
|
|
128
|
+
for config_cls in KNOWN_ERT_RESPONSE_TYPES:
|
|
137
129
|
instance = config_cls.from_config_dict(config_dict)
|
|
138
130
|
|
|
139
131
|
if instance is not None and instance.keys:
|
|
140
132
|
response_configs.append(instance)
|
|
141
133
|
|
|
142
134
|
return cls(
|
|
143
|
-
response_configs={response.
|
|
135
|
+
response_configs={response.type: response for response in response_configs},
|
|
144
136
|
parameter_configs={
|
|
145
137
|
parameter.name: parameter for parameter in parameter_configs
|
|
146
138
|
},
|
|
@@ -151,13 +143,13 @@ class EnsembleConfig(BaseModel):
|
|
|
151
143
|
return self.parameter_configs[key]
|
|
152
144
|
elif key in self.response_configs:
|
|
153
145
|
return self.response_configs[key]
|
|
154
|
-
elif
|
|
146
|
+
elif config := next(
|
|
155
147
|
(c for c in self.response_configs.values() if key in c.keys), None
|
|
156
148
|
):
|
|
157
149
|
# Only hit by blockfs migration
|
|
158
150
|
# returns the same config for one call per
|
|
159
151
|
# response type. Is later deduped before saving to json
|
|
160
|
-
return
|
|
152
|
+
return config
|
|
161
153
|
else:
|
|
162
154
|
raise KeyError(f"The key:{key} is not in the ensemble configuration")
|
|
163
155
|
|