ert 17.1.9__py3-none-any.whl → 18.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- _ert/events.py +19 -2
- ert/__main__.py +8 -7
- ert/analysis/_update_commons.py +12 -3
- ert/cli/main.py +6 -3
- ert/cli/monitor.py +7 -0
- ert/config/__init__.py +13 -3
- ert/config/_create_observation_dataframes.py +60 -12
- ert/config/_observations.py +14 -1
- ert/config/_read_summary.py +8 -6
- ert/config/ensemble_config.py +6 -14
- ert/config/ert_config.py +19 -13
- ert/config/{everest_objective_config.py → everest_response.py} +23 -12
- ert/config/ext_param_config.py +133 -1
- ert/config/field.py +12 -8
- ert/config/forward_model_step.py +108 -6
- ert/config/gen_data_config.py +2 -6
- ert/config/gen_kw_config.py +0 -9
- ert/config/known_response_types.py +14 -0
- ert/config/parameter_config.py +0 -17
- ert/config/parsing/config_keywords.py +1 -0
- ert/config/parsing/config_schema.py +12 -0
- ert/config/parsing/config_schema_deprecations.py +11 -0
- ert/config/parsing/config_schema_item.py +1 -1
- ert/config/queue_config.py +4 -4
- ert/config/response_config.py +0 -7
- ert/config/rft_config.py +230 -0
- ert/config/summary_config.py +2 -6
- ert/config/violations.py +0 -0
- ert/config/workflow_fixtures.py +2 -1
- ert/dark_storage/client/__init__.py +2 -2
- ert/dark_storage/client/_session.py +4 -4
- ert/dark_storage/client/client.py +2 -2
- ert/dark_storage/compute/misfits.py +7 -6
- ert/dark_storage/endpoints/compute/misfits.py +2 -2
- ert/dark_storage/endpoints/observations.py +4 -4
- ert/dark_storage/endpoints/responses.py +15 -1
- ert/ensemble_evaluator/__init__.py +8 -1
- ert/ensemble_evaluator/evaluator.py +81 -29
- ert/ensemble_evaluator/event.py +6 -0
- ert/ensemble_evaluator/snapshot.py +3 -1
- ert/ensemble_evaluator/state.py +1 -0
- ert/field_utils/__init__.py +8 -0
- ert/field_utils/field_utils.py +211 -1
- ert/gui/ertwidgets/__init__.py +23 -16
- ert/gui/ertwidgets/analysismoduleedit.py +2 -2
- ert/gui/ertwidgets/checklist.py +1 -1
- ert/gui/ertwidgets/create_experiment_dialog.py +3 -1
- ert/gui/ertwidgets/ensembleselector.py +2 -2
- ert/gui/ertwidgets/models/__init__.py +2 -0
- ert/gui/ertwidgets/models/activerealizationsmodel.py +2 -1
- ert/gui/ertwidgets/models/path_model.py +1 -1
- ert/gui/ertwidgets/models/targetensemblemodel.py +2 -1
- ert/gui/ertwidgets/models/text_model.py +1 -1
- ert/gui/ertwidgets/searchbox.py +13 -4
- ert/gui/{suggestor → ertwidgets/suggestor}/_suggestor_message.py +13 -4
- ert/gui/main.py +11 -6
- ert/gui/main_window.py +1 -2
- ert/gui/simulation/ensemble_experiment_panel.py +1 -1
- ert/gui/simulation/ensemble_information_filter_panel.py +1 -1
- ert/gui/simulation/ensemble_smoother_panel.py +1 -1
- ert/gui/simulation/evaluate_ensemble_panel.py +1 -1
- ert/gui/simulation/experiment_panel.py +1 -1
- ert/gui/simulation/manual_update_panel.py +31 -8
- ert/gui/simulation/multiple_data_assimilation_panel.py +12 -8
- ert/gui/simulation/run_dialog.py +25 -4
- ert/gui/simulation/single_test_run_panel.py +2 -2
- ert/gui/summarypanel.py +1 -1
- ert/gui/tools/load_results/load_results_panel.py +1 -1
- ert/gui/tools/manage_experiments/storage_info_widget.py +7 -7
- ert/gui/tools/manage_experiments/storage_widget.py +1 -2
- ert/gui/tools/plot/plot_api.py +13 -10
- ert/gui/tools/plot/plot_window.py +12 -0
- ert/gui/tools/plot/plottery/plot_config.py +2 -0
- ert/gui/tools/plot/plottery/plot_context.py +14 -0
- ert/gui/tools/plot/plottery/plots/ensemble.py +9 -2
- ert/gui/tools/plot/plottery/plots/statistics.py +59 -19
- ert/mode_definitions.py +2 -0
- ert/plugins/__init__.py +0 -1
- ert/plugins/hook_implementations/workflows/gen_data_rft_export.py +10 -2
- ert/plugins/hook_specifications/__init__.py +0 -2
- ert/plugins/hook_specifications/jobs.py +0 -9
- ert/plugins/plugin_manager.py +2 -33
- ert/resources/shell_scripts/delete_directory.py +2 -2
- ert/run_models/__init__.py +18 -5
- ert/run_models/_create_run_path.py +33 -21
- ert/run_models/ensemble_experiment.py +10 -4
- ert/run_models/ensemble_information_filter.py +8 -1
- ert/run_models/ensemble_smoother.py +9 -3
- ert/run_models/evaluate_ensemble.py +8 -6
- ert/run_models/event.py +7 -3
- ert/run_models/everest_run_model.py +155 -44
- ert/run_models/initial_ensemble_run_model.py +23 -22
- ert/run_models/manual_update.py +4 -2
- ert/run_models/manual_update_enif.py +37 -0
- ert/run_models/model_factory.py +81 -22
- ert/run_models/multiple_data_assimilation.py +21 -10
- ert/run_models/run_model.py +54 -34
- ert/run_models/single_test_run.py +7 -4
- ert/run_models/update_run_model.py +4 -2
- ert/runpaths.py +5 -6
- ert/sample_prior.py +9 -4
- ert/scheduler/driver.py +37 -0
- ert/scheduler/event.py +3 -1
- ert/scheduler/job.py +23 -13
- ert/scheduler/lsf_driver.py +6 -2
- ert/scheduler/openpbs_driver.py +7 -1
- ert/scheduler/scheduler.py +5 -0
- ert/scheduler/slurm_driver.py +6 -2
- ert/services/__init__.py +2 -2
- ert/services/_base_service.py +31 -15
- ert/services/ert_server.py +317 -0
- ert/shared/_doc_utils/ert_jobs.py +1 -4
- ert/shared/storage/connection.py +3 -3
- ert/shared/version.py +3 -3
- ert/storage/local_ensemble.py +25 -5
- ert/storage/local_experiment.py +6 -14
- ert/storage/local_storage.py +35 -30
- ert/storage/migration/to18.py +12 -0
- ert/storage/migration/to8.py +4 -4
- ert/substitutions.py +12 -28
- ert/validation/active_range.py +7 -7
- ert/validation/rangestring.py +16 -16
- {ert-17.1.9.dist-info → ert-18.0.0.dist-info}/METADATA +8 -7
- {ert-17.1.9.dist-info → ert-18.0.0.dist-info}/RECORD +160 -159
- everest/api/everest_data_api.py +1 -14
- everest/bin/config_branch_script.py +3 -6
- everest/bin/everconfigdump_script.py +1 -9
- everest/bin/everest_script.py +21 -11
- everest/bin/kill_script.py +2 -2
- everest/bin/monitor_script.py +2 -2
- everest/bin/utils.py +6 -3
- everest/config/__init__.py +4 -1
- everest/config/control_config.py +61 -2
- everest/config/control_variable_config.py +2 -1
- everest/config/everest_config.py +38 -16
- everest/config/forward_model_config.py +5 -3
- everest/config/install_data_config.py +7 -5
- everest/config/install_job_config.py +7 -3
- everest/config/install_template_config.py +3 -3
- everest/config/optimization_config.py +19 -6
- everest/config/output_constraint_config.py +8 -2
- everest/config/server_config.py +6 -49
- everest/config/utils.py +25 -105
- everest/config/validation_utils.py +10 -10
- everest/config_file_loader.py +13 -2
- everest/detached/everserver.py +7 -8
- everest/everest_storage.py +6 -10
- everest/gui/everest_client.py +0 -1
- everest/gui/main_window.py +2 -2
- everest/optimizer/everest2ropt.py +59 -32
- everest/optimizer/opt_model_transforms.py +12 -13
- everest/optimizer/utils.py +0 -29
- everest/strings.py +0 -5
- ert/config/everest_constraints_config.py +0 -95
- ert/services/storage_service.py +0 -127
- everest/config/sampler_config.py +0 -103
- everest/simulator/__init__.py +0 -88
- everest/simulator/everest_to_ert.py +0 -51
- /ert/gui/{suggestor → ertwidgets/suggestor}/__init__.py +0 -0
- /ert/gui/{suggestor → ertwidgets/suggestor}/_colors.py +0 -0
- /ert/gui/{suggestor → ertwidgets/suggestor}/suggestor.py +0 -0
- {ert-17.1.9.dist-info → ert-18.0.0.dist-info}/WHEEL +0 -0
- {ert-17.1.9.dist-info → ert-18.0.0.dist-info}/entry_points.txt +0 -0
- {ert-17.1.9.dist-info → ert-18.0.0.dist-info}/licenses/COPYING +0 -0
- {ert-17.1.9.dist-info → ert-18.0.0.dist-info}/top_level.txt +0 -0
everest/optimizer/utils.py
CHANGED
|
@@ -1,29 +0,0 @@
|
|
|
1
|
-
import importlib
|
|
2
|
-
|
|
3
|
-
from ropt.plugins import PluginManager
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
def get_ropt_plugin_manager() -> PluginManager:
|
|
7
|
-
# To check the validity of optimization and sampler backends and their
|
|
8
|
-
# supported algorithms or methods, an instance of a ropt PluginManager is
|
|
9
|
-
# needed. Everest also needs a ropt plugin manager at runtime which may add
|
|
10
|
-
# additional optimization and/or sampler backends. To be sure that these
|
|
11
|
-
# added backends are detected, all code should use this function to access
|
|
12
|
-
# the plugin manager. Any optimizer/sampler plugins that need to be added at
|
|
13
|
-
# runtime should be added in this function.
|
|
14
|
-
#
|
|
15
|
-
# Note: backends can also be added via the Python entrypoints mechanism,
|
|
16
|
-
# these are detected by default and do not need to be added here.
|
|
17
|
-
|
|
18
|
-
try:
|
|
19
|
-
return PluginManager()
|
|
20
|
-
except Exception as exc:
|
|
21
|
-
ert_version = importlib.metadata.version("ert")
|
|
22
|
-
ropt_version = importlib.metadata.version("ropt")
|
|
23
|
-
msg = (
|
|
24
|
-
f"Error while initializing ropt:\n\n{exc}.\n\n"
|
|
25
|
-
"Check the everest installation, there may a be version mismatch.\n"
|
|
26
|
-
f" (ERT: {ert_version}, ropt: {ropt_version})\n"
|
|
27
|
-
"If the everest installation is correct, please report this as a bug."
|
|
28
|
-
)
|
|
29
|
-
raise RuntimeError(msg) from exc
|
everest/strings.py
CHANGED
|
@@ -1,8 +1,5 @@
|
|
|
1
1
|
from enum import StrEnum
|
|
2
2
|
|
|
3
|
-
CERTIFICATE_DIR = "cert"
|
|
4
|
-
|
|
5
|
-
DETACHED_NODE_DIR = "detached_node_output"
|
|
6
3
|
DEFAULT_OUTPUT_DIR = "everest_output"
|
|
7
4
|
DEFAULT_LOGGING_FORMAT = "%(asctime)s %(name)s %(levelname)s: %(message)s"
|
|
8
5
|
|
|
@@ -10,8 +7,6 @@ EVEREST = "everest"
|
|
|
10
7
|
EVERSERVER = "everserver"
|
|
11
8
|
EXPERIMENT_SERVER = "experiment_server"
|
|
12
9
|
|
|
13
|
-
HOSTFILE_NAME = "storage_server.json"
|
|
14
|
-
|
|
15
10
|
NAME = "name"
|
|
16
11
|
|
|
17
12
|
OPTIMIZATION_OUTPUT_DIR = "optimization_output"
|
|
@@ -1,95 +0,0 @@
|
|
|
1
|
-
from pathlib import Path
|
|
2
|
-
from typing import Literal, Self
|
|
3
|
-
|
|
4
|
-
import numpy as np
|
|
5
|
-
import polars as pl
|
|
6
|
-
|
|
7
|
-
from ert.substitutions import substitute_runpath_name
|
|
8
|
-
|
|
9
|
-
from .parsing import ConfigDict
|
|
10
|
-
from .response_config import InvalidResponseFile, ResponseConfig, ResponseMetadata
|
|
11
|
-
from .responses_index import responses_index
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
class EverestConstraintsConfig(ResponseConfig):
|
|
15
|
-
@property
|
|
16
|
-
def metadata(self) -> list[ResponseMetadata]:
|
|
17
|
-
return [
|
|
18
|
-
ResponseMetadata(
|
|
19
|
-
response_type=self.name,
|
|
20
|
-
response_key=response_key,
|
|
21
|
-
finalized=self.has_finalized_keys,
|
|
22
|
-
filter_on=None,
|
|
23
|
-
)
|
|
24
|
-
for response_key in self.keys
|
|
25
|
-
]
|
|
26
|
-
|
|
27
|
-
type: Literal["everest_constraints"] = "everest_constraints"
|
|
28
|
-
name: str = "everest_constraints"
|
|
29
|
-
has_finalized_keys: bool = True
|
|
30
|
-
|
|
31
|
-
@property
|
|
32
|
-
def expected_input_files(self) -> list[str]:
|
|
33
|
-
return self.input_files
|
|
34
|
-
|
|
35
|
-
@classmethod
|
|
36
|
-
def from_config_dict(cls, config_dict: ConfigDict) -> Self:
|
|
37
|
-
raise NotImplementedError("Should only be directly initialized")
|
|
38
|
-
|
|
39
|
-
def read_from_file(self, run_path: str, iens: int, iter_: int) -> pl.DataFrame:
|
|
40
|
-
def _read_file(filename: Path) -> pl.DataFrame:
|
|
41
|
-
try:
|
|
42
|
-
data = np.loadtxt(filename, ndmin=1)
|
|
43
|
-
except ValueError as err:
|
|
44
|
-
raise InvalidResponseFile(str(err)) from err
|
|
45
|
-
return pl.DataFrame(
|
|
46
|
-
{
|
|
47
|
-
"values": pl.Series(data, dtype=pl.Float32),
|
|
48
|
-
}
|
|
49
|
-
)
|
|
50
|
-
|
|
51
|
-
errors = []
|
|
52
|
-
|
|
53
|
-
run_path_ = Path(run_path)
|
|
54
|
-
datasets_per_name = []
|
|
55
|
-
|
|
56
|
-
for name, input_file in zip(self.keys, self.input_files, strict=False):
|
|
57
|
-
datasets = []
|
|
58
|
-
try:
|
|
59
|
-
filename = substitute_runpath_name(input_file, iens, iter_)
|
|
60
|
-
datasets.append(_read_file(run_path_ / filename))
|
|
61
|
-
except (InvalidResponseFile, FileNotFoundError) as err:
|
|
62
|
-
errors.append(err)
|
|
63
|
-
|
|
64
|
-
if len(datasets) > 0:
|
|
65
|
-
combined_ds = pl.concat(datasets)
|
|
66
|
-
combined_ds.insert_column(
|
|
67
|
-
0, pl.Series("response_key", [name] * len(combined_ds))
|
|
68
|
-
)
|
|
69
|
-
datasets_per_name.append(combined_ds)
|
|
70
|
-
|
|
71
|
-
if errors:
|
|
72
|
-
if all(isinstance(err, FileNotFoundError) for err in errors):
|
|
73
|
-
raise FileNotFoundError(
|
|
74
|
-
"Could not find one or more files/directories while reading "
|
|
75
|
-
f"{self.name}: {','.join([str(err) for err in errors])}"
|
|
76
|
-
)
|
|
77
|
-
else:
|
|
78
|
-
raise InvalidResponseFile(
|
|
79
|
-
"Error reading "
|
|
80
|
-
f"{self.name}, errors: {','.join([str(err) for err in errors])}"
|
|
81
|
-
)
|
|
82
|
-
|
|
83
|
-
combined = pl.concat(datasets_per_name)
|
|
84
|
-
return combined
|
|
85
|
-
|
|
86
|
-
@property
|
|
87
|
-
def response_type(self) -> str:
|
|
88
|
-
return "everest_constraints"
|
|
89
|
-
|
|
90
|
-
@property
|
|
91
|
-
def primary_key(self) -> list[str]:
|
|
92
|
-
return []
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
responses_index.add_response_type(EverestConstraintsConfig)
|
ert/services/storage_service.py
DELETED
|
@@ -1,127 +0,0 @@
|
|
|
1
|
-
from __future__ import annotations
|
|
2
|
-
|
|
3
|
-
import logging
|
|
4
|
-
import os
|
|
5
|
-
from collections.abc import Mapping, Sequence
|
|
6
|
-
from json.decoder import JSONDecodeError
|
|
7
|
-
from typing import Any
|
|
8
|
-
|
|
9
|
-
import requests
|
|
10
|
-
|
|
11
|
-
from ert.dark_storage.client import Client, ConnInfo
|
|
12
|
-
from ert.services._base_service import BaseService, _Context, local_exec_args
|
|
13
|
-
from ert.trace import get_traceparent
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
class StorageService(BaseService):
|
|
17
|
-
service_name = "storage"
|
|
18
|
-
|
|
19
|
-
def __init__(
|
|
20
|
-
self,
|
|
21
|
-
exec_args: Sequence[str] = (),
|
|
22
|
-
timeout: int = 120,
|
|
23
|
-
parent_pid: int | None = None,
|
|
24
|
-
conn_info: Mapping[str, Any] | Exception | None = None,
|
|
25
|
-
project: str | None = None,
|
|
26
|
-
verbose: bool = False,
|
|
27
|
-
traceparent: str | None = "inherit_parent",
|
|
28
|
-
logging_config: str | None = None,
|
|
29
|
-
) -> None:
|
|
30
|
-
self._url: str | None = None
|
|
31
|
-
|
|
32
|
-
exec_args = local_exec_args("storage")
|
|
33
|
-
|
|
34
|
-
exec_args.extend(["--project", str(project)])
|
|
35
|
-
if verbose:
|
|
36
|
-
exec_args.append("--verbose")
|
|
37
|
-
if logging_config:
|
|
38
|
-
exec_args.extend(["--logging-config", str(logging_config)])
|
|
39
|
-
if traceparent:
|
|
40
|
-
traceparent = (
|
|
41
|
-
get_traceparent() if traceparent == "inherit_parent" else traceparent
|
|
42
|
-
)
|
|
43
|
-
exec_args.extend(["--traceparent", str(traceparent)])
|
|
44
|
-
if parent_pid is not None:
|
|
45
|
-
exec_args.extend(["--parent_pid", str(parent_pid)])
|
|
46
|
-
|
|
47
|
-
if (
|
|
48
|
-
conn_info is not None
|
|
49
|
-
and isinstance(conn_info, Mapping)
|
|
50
|
-
and "urls" not in conn_info
|
|
51
|
-
):
|
|
52
|
-
raise KeyError("urls not found in conn_info")
|
|
53
|
-
super().__init__(exec_args, timeout, conn_info, project)
|
|
54
|
-
|
|
55
|
-
def fetch_auth(self) -> tuple[str, Any]:
|
|
56
|
-
"""
|
|
57
|
-
Returns a tuple of username and password, compatible with requests' `auth`
|
|
58
|
-
kwarg.
|
|
59
|
-
|
|
60
|
-
Blocks while the server is starting.
|
|
61
|
-
"""
|
|
62
|
-
return ("__token__", self.fetch_conn_info()["authtoken"])
|
|
63
|
-
|
|
64
|
-
@classmethod
|
|
65
|
-
def init_service(cls, *args: Any, **kwargs: Any) -> _Context[StorageService]:
|
|
66
|
-
try:
|
|
67
|
-
service = cls.connect(timeout=0, project=kwargs.get("project", os.getcwd()))
|
|
68
|
-
# Check the server is up and running
|
|
69
|
-
_ = service.fetch_url()
|
|
70
|
-
except (TimeoutError, JSONDecodeError, KeyError) as e:
|
|
71
|
-
logging.getLogger(__name__).warning(
|
|
72
|
-
"Failed locating existing storage service due to "
|
|
73
|
-
f"{type(e).__name__}: {e}, starting new service"
|
|
74
|
-
)
|
|
75
|
-
return cls.start_server(*args, **kwargs)
|
|
76
|
-
except PermissionError as pe:
|
|
77
|
-
logging.getLogger(__name__).error(
|
|
78
|
-
f"{type(pe).__name__}: {pe}, cannot connect to storage service "
|
|
79
|
-
f"due to permission issues."
|
|
80
|
-
)
|
|
81
|
-
raise pe
|
|
82
|
-
return _Context(service)
|
|
83
|
-
|
|
84
|
-
def fetch_url(self) -> str:
|
|
85
|
-
"""Returns the url. Blocks while the server is starting"""
|
|
86
|
-
if self._url is not None:
|
|
87
|
-
return self._url
|
|
88
|
-
|
|
89
|
-
for url in self.fetch_conn_info()["urls"]:
|
|
90
|
-
con_info = self.fetch_conn_info()
|
|
91
|
-
try:
|
|
92
|
-
resp = requests.get(
|
|
93
|
-
f"{url}/healthcheck",
|
|
94
|
-
auth=self.fetch_auth(),
|
|
95
|
-
verify=con_info["cert"],
|
|
96
|
-
)
|
|
97
|
-
logging.getLogger(__name__).info(
|
|
98
|
-
f"Connecting to {url} got status: "
|
|
99
|
-
f"{resp.status_code}, {resp.headers}, {resp.reason}, {resp.text}"
|
|
100
|
-
)
|
|
101
|
-
if resp.status_code == 200:
|
|
102
|
-
self._url = url
|
|
103
|
-
return str(url)
|
|
104
|
-
|
|
105
|
-
except requests.ConnectionError as ce:
|
|
106
|
-
logging.getLogger(__name__).info(
|
|
107
|
-
f"Could not connect to {url}, but will try something else. "
|
|
108
|
-
f"Error: {ce}"
|
|
109
|
-
)
|
|
110
|
-
raise TimeoutError(
|
|
111
|
-
"None of the URLs provided for the ert storage server worked."
|
|
112
|
-
)
|
|
113
|
-
|
|
114
|
-
@classmethod
|
|
115
|
-
def session(cls, project: os.PathLike[str], timeout: int | None = None) -> Client:
|
|
116
|
-
"""
|
|
117
|
-
Start a HTTP transaction with the server
|
|
118
|
-
"""
|
|
119
|
-
inst = cls.connect(timeout=timeout, project=project)
|
|
120
|
-
info = inst.fetch_conn_info()
|
|
121
|
-
return Client(
|
|
122
|
-
conn_info=ConnInfo(
|
|
123
|
-
base_url=inst.fetch_url(),
|
|
124
|
-
auth_token=inst.fetch_auth()[1],
|
|
125
|
-
cert=info["cert"],
|
|
126
|
-
)
|
|
127
|
-
)
|
everest/config/sampler_config.py
DELETED
|
@@ -1,103 +0,0 @@
|
|
|
1
|
-
import logging
|
|
2
|
-
from textwrap import dedent
|
|
3
|
-
from typing import Any, Self
|
|
4
|
-
|
|
5
|
-
from pydantic import BaseModel, ConfigDict, Field, model_validator
|
|
6
|
-
|
|
7
|
-
from everest.optimizer.utils import get_ropt_plugin_manager
|
|
8
|
-
from everest.strings import EVEREST
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
class SamplerConfig(BaseModel):
|
|
12
|
-
backend: str | None = Field(
|
|
13
|
-
default=None,
|
|
14
|
-
description=dedent(
|
|
15
|
-
"""
|
|
16
|
-
[Deprecated]
|
|
17
|
-
|
|
18
|
-
The correct backend will be inferred by the method. If several backends
|
|
19
|
-
have a method named `A`, pick a specific backend `B` by putting `B/A` in
|
|
20
|
-
the `method` field.
|
|
21
|
-
"""
|
|
22
|
-
),
|
|
23
|
-
)
|
|
24
|
-
method: str = Field(
|
|
25
|
-
default="norm",
|
|
26
|
-
description=dedent(
|
|
27
|
-
"""
|
|
28
|
-
The sampling method or distribution used by the sampler backend.
|
|
29
|
-
|
|
30
|
-
The set of available methods depends on the sampler backend used. By
|
|
31
|
-
default a plugin based on `scipy.stats` is used, implementing the
|
|
32
|
-
following methods:
|
|
33
|
-
|
|
34
|
-
- From Probability Distributions:
|
|
35
|
-
- `norm`: Samples from a standard normal distribution (mean 0,
|
|
36
|
-
standard deviation 1).
|
|
37
|
-
- `truncnorm`: Samples from a truncated normal distribution
|
|
38
|
-
(mean 0, std. dev. 1), truncated to the range `[-1, 1]`.
|
|
39
|
-
- `uniform`: Samples from a uniform distribution in the range
|
|
40
|
-
`[-1, 1]`.
|
|
41
|
-
|
|
42
|
-
- From Quasi-Monte Carlo Sequences:
|
|
43
|
-
- `sobol`: Uses Sobol' sequences.
|
|
44
|
-
- `halton`: Uses Halton sequences.
|
|
45
|
-
- `lhs`: Uses Latin Hypercube Sampling.
|
|
46
|
-
|
|
47
|
-
Note: QMC samples are generated in the unit hypercube `[0, 1]^d`
|
|
48
|
-
and then scaled to the hypercube `[-1, 1]^d`.
|
|
49
|
-
"""
|
|
50
|
-
),
|
|
51
|
-
)
|
|
52
|
-
options: dict[str, Any] | None = Field(
|
|
53
|
-
default=None,
|
|
54
|
-
description=dedent(
|
|
55
|
-
"""
|
|
56
|
-
Specifies a dict of optional parameters for the sampler backend.
|
|
57
|
-
|
|
58
|
-
This dict of values is passed unchanged to the selected method in
|
|
59
|
-
the backend.
|
|
60
|
-
"""
|
|
61
|
-
),
|
|
62
|
-
)
|
|
63
|
-
shared: bool | None = Field(
|
|
64
|
-
default=None,
|
|
65
|
-
description=dedent(
|
|
66
|
-
"""
|
|
67
|
-
Whether to share perturbations between realizations.
|
|
68
|
-
"""
|
|
69
|
-
),
|
|
70
|
-
)
|
|
71
|
-
model_config = ConfigDict(extra="forbid")
|
|
72
|
-
|
|
73
|
-
@model_validator(mode="after")
|
|
74
|
-
def validate_backend_and_method(self) -> Self:
|
|
75
|
-
if self.backend is not None:
|
|
76
|
-
message = (
|
|
77
|
-
"sampler.backend is deprecated. "
|
|
78
|
-
"The correct backend will be inferred by the method. "
|
|
79
|
-
"If several backends have a method named A, you need to pick "
|
|
80
|
-
"a specific backend B by putting B/A in sampler.method."
|
|
81
|
-
)
|
|
82
|
-
print(message)
|
|
83
|
-
logging.getLogger(EVEREST).warning(message)
|
|
84
|
-
|
|
85
|
-
# Update the default for backends that are not scipy:
|
|
86
|
-
if (
|
|
87
|
-
self.backend not in {None, "scipy"}
|
|
88
|
-
and "method" not in self.model_fields_set
|
|
89
|
-
):
|
|
90
|
-
self.method = "default"
|
|
91
|
-
|
|
92
|
-
if self.backend is not None:
|
|
93
|
-
self.method = f"{self.backend}/{self.method}"
|
|
94
|
-
|
|
95
|
-
if (
|
|
96
|
-
get_ropt_plugin_manager().get_plugin_name("sampler", f"{self.method}")
|
|
97
|
-
is None
|
|
98
|
-
):
|
|
99
|
-
raise ValueError(f"Sampler method '{self.method}' not found")
|
|
100
|
-
|
|
101
|
-
self.backend = None
|
|
102
|
-
|
|
103
|
-
return self
|
everest/simulator/__init__.py
DELETED
|
@@ -1,88 +0,0 @@
|
|
|
1
|
-
JOB_SUCCESS = "Finished"
|
|
2
|
-
JOB_WAITING = "Waiting"
|
|
3
|
-
JOB_RUNNING = "Running"
|
|
4
|
-
JOB_FAILURE = "Failed"
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
DEFAULT_DATA_SUMMARY_KEYS = ["YEAR", "YEARS", "TCPU", "TCPUDAY", "MONTH", "DAY"]
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
DEFAULT_FIELD_SUMMARY_KEYS = [
|
|
11
|
-
"FOPR",
|
|
12
|
-
"FOPT",
|
|
13
|
-
"FOIR",
|
|
14
|
-
"FOIT",
|
|
15
|
-
"FWPR",
|
|
16
|
-
"FWPT",
|
|
17
|
-
"FWIR",
|
|
18
|
-
"FWIT",
|
|
19
|
-
"FGPR",
|
|
20
|
-
"FGPT",
|
|
21
|
-
"FGIR",
|
|
22
|
-
"FGIT",
|
|
23
|
-
"FVPR",
|
|
24
|
-
"FVPT",
|
|
25
|
-
"FVIR",
|
|
26
|
-
"FVIT",
|
|
27
|
-
"FWCT",
|
|
28
|
-
"FGOR",
|
|
29
|
-
"FOIP",
|
|
30
|
-
"FOIPL",
|
|
31
|
-
"FOIPG",
|
|
32
|
-
"FWIP",
|
|
33
|
-
"FGIP",
|
|
34
|
-
"FGIPL",
|
|
35
|
-
"FGIPG",
|
|
36
|
-
"FPR",
|
|
37
|
-
"FAQR",
|
|
38
|
-
"FAQRG",
|
|
39
|
-
"FAQT",
|
|
40
|
-
"FAQTG",
|
|
41
|
-
"FWGR",
|
|
42
|
-
]
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
_DEFAULT_WELL_SUMMARY_KEYS = [
|
|
46
|
-
"WOPR",
|
|
47
|
-
"WOPT",
|
|
48
|
-
"WOIR",
|
|
49
|
-
"WOIT",
|
|
50
|
-
"WWPR",
|
|
51
|
-
"WWPT",
|
|
52
|
-
"WWIR",
|
|
53
|
-
"WWIT",
|
|
54
|
-
"WGPR",
|
|
55
|
-
"WGPT",
|
|
56
|
-
"WGIR",
|
|
57
|
-
"WGIT",
|
|
58
|
-
"WVPR",
|
|
59
|
-
"WVPT",
|
|
60
|
-
"WVIR",
|
|
61
|
-
"WVIT",
|
|
62
|
-
"WWCT",
|
|
63
|
-
"WGOR",
|
|
64
|
-
"WWGR",
|
|
65
|
-
"WBHP",
|
|
66
|
-
"WTHP",
|
|
67
|
-
"WPI",
|
|
68
|
-
]
|
|
69
|
-
|
|
70
|
-
_EXCLUDED_TARGET_KEYS = "WGOR"
|
|
71
|
-
|
|
72
|
-
_DEFAULT_WELL_TARGET_SUMMARY_KEYS = [
|
|
73
|
-
well_key + "T"
|
|
74
|
-
for well_key in _DEFAULT_WELL_SUMMARY_KEYS
|
|
75
|
-
if well_key.endswith("R") and well_key not in _EXCLUDED_TARGET_KEYS
|
|
76
|
-
]
|
|
77
|
-
|
|
78
|
-
DEFAULT_WELL_SUMMARY_KEYS = (
|
|
79
|
-
_DEFAULT_WELL_SUMMARY_KEYS + _DEFAULT_WELL_TARGET_SUMMARY_KEYS
|
|
80
|
-
)
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
__all__ = [
|
|
84
|
-
"JOB_FAILURE",
|
|
85
|
-
"JOB_RUNNING",
|
|
86
|
-
"JOB_SUCCESS",
|
|
87
|
-
"JOB_WAITING",
|
|
88
|
-
]
|
|
@@ -1,51 +0,0 @@
|
|
|
1
|
-
import itertools
|
|
2
|
-
from typing import cast
|
|
3
|
-
|
|
4
|
-
import everest
|
|
5
|
-
from everest.config import EverestConfig
|
|
6
|
-
from everest.config.forward_model_config import SummaryResults
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
def extract_summary_keys(ever_config: EverestConfig) -> list[str]:
|
|
10
|
-
summary_fms = [
|
|
11
|
-
fm
|
|
12
|
-
for fm in ever_config.forward_model
|
|
13
|
-
if fm.results is not None and fm.results.type == "summary"
|
|
14
|
-
]
|
|
15
|
-
|
|
16
|
-
if not summary_fms:
|
|
17
|
-
return []
|
|
18
|
-
|
|
19
|
-
summary_fm = summary_fms[0]
|
|
20
|
-
assert summary_fm.results is not None
|
|
21
|
-
|
|
22
|
-
smry_results = cast(SummaryResults, summary_fm.results)
|
|
23
|
-
|
|
24
|
-
requested_keys: list[str] = ["*"] if smry_results.keys == "*" else smry_results.keys
|
|
25
|
-
|
|
26
|
-
data_keys = everest.simulator.DEFAULT_DATA_SUMMARY_KEYS
|
|
27
|
-
field_keys = everest.simulator.DEFAULT_FIELD_SUMMARY_KEYS
|
|
28
|
-
well_sum_keys = everest.simulator.DEFAULT_WELL_SUMMARY_KEYS
|
|
29
|
-
deprecated_user_specified_keys = (
|
|
30
|
-
[] if ever_config.export is None else ever_config.export.keywords
|
|
31
|
-
)
|
|
32
|
-
|
|
33
|
-
wells = (
|
|
34
|
-
[
|
|
35
|
-
variable.name
|
|
36
|
-
for control in ever_config.controls
|
|
37
|
-
for variable in control.variables
|
|
38
|
-
if control.type == "well_control"
|
|
39
|
-
]
|
|
40
|
-
if ever_config.wells is None
|
|
41
|
-
else [w.name for w in ever_config.wells]
|
|
42
|
-
)
|
|
43
|
-
|
|
44
|
-
well_keys = [
|
|
45
|
-
f"{sum_key}:{wname}"
|
|
46
|
-
for (sum_key, wname) in itertools.product(well_sum_keys, wells)
|
|
47
|
-
]
|
|
48
|
-
|
|
49
|
-
all_keys = data_keys + field_keys + well_keys + deprecated_user_specified_keys
|
|
50
|
-
|
|
51
|
-
return list(set(all_keys + requested_keys))
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|