ert 16.0.2__py3-none-any.whl → 17.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- _ert/forward_model_runner/fm_dispatch.py +9 -6
- _ert/forward_model_runner/reporting/event.py +1 -0
- _ert/forward_model_runner/runner.py +1 -2
- _ert/utils.py +12 -0
- ert/__main__.py +30 -25
- ert/base_model_context.py +1 -1
- ert/cli/main.py +4 -6
- ert/config/__init__.py +2 -0
- ert/config/_create_observation_dataframes.py +1 -1
- ert/config/_read_summary.py +65 -321
- ert/config/design_matrix.py +12 -0
- ert/config/ert_config.py +52 -39
- ert/config/everest_objective_config.py +3 -0
- ert/config/ext_param_config.py +1 -2
- ert/config/field.py +33 -44
- ert/config/gen_kw_config.py +16 -22
- ert/config/parameter_config.py +1 -17
- ert/config/parsing/config_errors.py +1 -1
- ert/config/parsing/config_keywords.py +0 -1
- ert/config/parsing/config_schema.py +0 -8
- ert/config/parsing/config_schema_deprecations.py +21 -23
- ert/config/queue_config.py +38 -45
- ert/config/surface_config.py +32 -41
- ert/config/workflow_job.py +135 -54
- ert/dark_storage/common.py +21 -2
- ert/dark_storage/endpoints/__init__.py +2 -0
- ert/dark_storage/endpoints/ensembles.py +4 -0
- ert/dark_storage/endpoints/experiment_server.py +18 -15
- ert/dark_storage/endpoints/parameters.py +8 -0
- ert/dark_storage/endpoints/version.py +16 -0
- ert/dark_storage/json_schema/ensemble.py +3 -0
- ert/field_utils/field_utils.py +16 -12
- ert/field_utils/grdecl_io.py +1 -1
- ert/gui/ertwidgets/closabledialog.py +2 -0
- ert/gui/ertwidgets/copyablelabel.py +2 -0
- ert/gui/ertwidgets/listeditbox.py +2 -0
- ert/gui/ertwidgets/models/activerealizationsmodel.py +3 -0
- ert/gui/ertwidgets/models/ertsummary.py +5 -8
- ert/gui/ertwidgets/models/targetensemblemodel.py +3 -0
- ert/gui/ertwidgets/models/text_model.py +3 -0
- ert/gui/ertwidgets/searchbox.py +4 -0
- ert/gui/ertwidgets/stringbox.py +2 -0
- ert/gui/main.py +4 -5
- ert/gui/main_window.py +3 -2
- ert/gui/model/fm_step_list.py +3 -0
- ert/gui/model/real_list.py +1 -0
- ert/gui/model/snapshot.py +1 -0
- ert/gui/simulation/combobox_with_description.py +3 -0
- ert/gui/simulation/ensemble_experiment_panel.py +7 -1
- ert/gui/simulation/ensemble_information_filter_panel.py +6 -1
- ert/gui/simulation/ensemble_smoother_panel.py +7 -1
- ert/gui/simulation/evaluate_ensemble_panel.py +16 -6
- ert/gui/simulation/experiment_panel.py +2 -3
- ert/gui/simulation/manual_update_panel.py +4 -2
- ert/gui/simulation/multiple_data_assimilation_panel.py +6 -2
- ert/gui/simulation/run_dialog.py +21 -0
- ert/gui/simulation/single_test_run_panel.py +4 -1
- ert/gui/simulation/view/progress_widget.py +2 -0
- ert/gui/simulation/view/realization.py +5 -1
- ert/gui/simulation/view/update.py +2 -0
- ert/gui/summarypanel.py +1 -1
- ert/gui/tools/event_viewer/panel.py +3 -4
- ert/gui/tools/event_viewer/tool.py +2 -0
- ert/gui/tools/export/export_tool.py +2 -0
- ert/gui/tools/file/file_dialog.py +6 -2
- ert/gui/tools/load_results/load_results_tool.py +2 -0
- ert/gui/tools/manage_experiments/manage_experiments_panel.py +2 -0
- ert/gui/tools/manage_experiments/storage_widget.py +3 -1
- ert/gui/tools/plot/customize/color_chooser.py +5 -2
- ert/gui/tools/plot/customize/customize_plot_dialog.py +2 -0
- ert/gui/tools/plot/customize/default_customization_view.py +4 -0
- ert/gui/tools/plot/customize/limits_customization_view.py +3 -0
- ert/gui/tools/plot/customize/statistics_customization_view.py +3 -0
- ert/gui/tools/plot/customize/style_chooser.py +2 -0
- ert/gui/tools/plot/customize/style_customization_view.py +3 -0
- ert/gui/tools/plot/data_type_keys_widget.py +2 -0
- ert/gui/tools/plot/data_type_proxy_model.py +3 -0
- ert/gui/tools/plot/plot_api.py +26 -3
- ert/gui/tools/plot/plot_ensemble_selection_widget.py +17 -10
- ert/gui/tools/plot/plot_widget.py +41 -2
- ert/gui/tools/plot/plot_window.py +116 -76
- ert/gui/tools/plot/plottery/plot_context.py +9 -0
- ert/gui/tools/plot/plottery/plots/ensemble.py +1 -2
- ert/gui/tools/plot/plottery/plots/histogram.py +34 -5
- ert/gui/tools/plot/widgets/clearable_line_edit.py +9 -0
- ert/gui/tools/plot/widgets/filter_popup.py +2 -0
- ert/gui/tools/plot/widgets/filterable_kw_list_model.py +3 -0
- ert/gui/tools/plugins/plugin.py +1 -1
- ert/gui/tools/plugins/plugins_tool.py +2 -0
- ert/gui/tools/plugins/process_job_dialog.py +3 -0
- ert/gui/tools/workflows/workflow_dialog.py +2 -0
- ert/gui/tools/workflows/workflows_tool.py +2 -0
- ert/libres_facade.py +5 -7
- ert/logging/__init__.py +4 -1
- ert/plugins/__init__.py +4 -5
- ert/plugins/hook_specifications/__init__.py +0 -8
- ert/plugins/plugin_manager.py +52 -96
- ert/resources/forward_models/run_reservoirsimulator.py +0 -1
- ert/resources/forward_models/template_render.py +10 -10
- ert/run_models/__init__.py +6 -1
- ert/run_models/_create_run_path.py +2 -1
- ert/run_models/everest_run_model.py +182 -71
- ert/run_models/run_model.py +9 -19
- ert/scheduler/__init__.py +10 -5
- ert/scheduler/driver.py +3 -0
- ert/scheduler/lsf_driver.py +9 -3
- ert/scheduler/openpbs_driver.py +3 -3
- ert/scheduler/slurm_driver.py +14 -3
- ert/services/_storage_main.py +20 -18
- ert/shared/net_utils.py +16 -29
- ert/shared/version.py +3 -3
- ert/storage/__init__.py +12 -1
- ert/storage/local_ensemble.py +6 -1
- ert/storage/local_experiment.py +46 -18
- ert/storage/local_storage.py +23 -17
- ert/storage/migration/to10.py +3 -2
- ert/storage/migration/to11.py +8 -9
- ert/storage/migration/to12.py +19 -20
- ert/storage/migration/to13.py +28 -27
- ert/storage/migration/to14.py +3 -3
- ert/storage/migration/to15.py +25 -0
- ert/storage/migration/to6.py +3 -2
- ert/storage/migration/to7.py +12 -13
- ert/storage/migration/to8.py +5 -7
- ert/storage/migration/to9.py +5 -4
- ert/storage/realization_storage_state.py +7 -7
- ert/validation/ensemble_realizations_argument.py +4 -2
- ert/workflow_runner.py +4 -2
- {ert-16.0.2.dist-info → ert-17.0.0.dist-info}/METADATA +16 -11
- {ert-16.0.2.dist-info → ert-17.0.0.dist-info}/RECORD +160 -159
- everest/assets/everest_logo.svg +406 -0
- everest/bin/config_branch_script.py +28 -9
- everest/bin/everconfigdump_script.py +1 -1
- everest/bin/everest_script.py +32 -22
- everest/bin/everlint_script.py +3 -3
- everest/bin/kill_script.py +5 -3
- everest/bin/main.py +11 -24
- everest/bin/monitor_script.py +63 -34
- everest/bin/utils.py +50 -39
- everest/bin/visualization_script.py +19 -2
- everest/config/everest_config.py +28 -38
- everest/config/install_job_config.py +39 -1
- everest/config/server_config.py +0 -6
- everest/config/simulator_config.py +62 -17
- everest/config/validation_utils.py +17 -4
- everest/config_file_loader.py +17 -17
- everest/detached/__init__.py +0 -6
- everest/detached/client.py +4 -49
- everest/detached/everserver.py +13 -38
- everest/everest_storage.py +19 -29
- everest/optimizer/everest2ropt.py +10 -11
- everest/optimizer/opt_model_transforms.py +4 -8
- everest/plugins/hook_specs.py +0 -24
- everest/simulator/everest_to_ert.py +1 -202
- everest/strings.py +1 -1
- everest/util/__init__.py +3 -1
- ert/plugins/hook_specifications/ecl_config.py +0 -29
- ert/summary_key_type.py +0 -234
- everest/bin/everexport_script.py +0 -53
- {ert-16.0.2.dist-info → ert-17.0.0.dist-info}/WHEEL +0 -0
- {ert-16.0.2.dist-info → ert-17.0.0.dist-info}/entry_points.txt +0 -0
- {ert-16.0.2.dist-info → ert-17.0.0.dist-info}/licenses/COPYING +0 -0
- {ert-16.0.2.dist-info → ert-17.0.0.dist-info}/top_level.txt +0 -0
|
@@ -75,9 +75,22 @@ class SimulatorConfig(BaseModelWithContextSupport, extra="forbid"):
|
|
|
75
75
|
default=None,
|
|
76
76
|
description=dedent(
|
|
77
77
|
"""
|
|
78
|
-
|
|
78
|
+
Amount of memory to set aside for a forward model.
|
|
79
79
|
|
|
80
|
-
|
|
80
|
+
This information is propagated to the queue system as the amount of memory
|
|
81
|
+
to reserve/book for a realization to complete. It is up to the configuration
|
|
82
|
+
of the queuing system how to treat this information, but usually it will
|
|
83
|
+
stop more realizations being assigned to a compute node if the compute nodes
|
|
84
|
+
memory is already fully booked.
|
|
85
|
+
|
|
86
|
+
Setting this number lower than the peak memory consumption of each
|
|
87
|
+
realization puts the realization at risk of being killed in an out-of-memory
|
|
88
|
+
situation. Setting this number higher than needed will give longer wait
|
|
89
|
+
times in the queue.
|
|
90
|
+
|
|
91
|
+
For the local queue system, this keyword has no effect. In that scenario,
|
|
92
|
+
you can use `max_running` to choke the memory consumption.
|
|
93
|
+
scheduling of compute jobs.
|
|
81
94
|
|
|
82
95
|
`max_memory` may be an integer value, indicating the number of
|
|
83
96
|
bytes, or a string consisting of a number followed by a unit. The
|
|
@@ -94,9 +107,6 @@ class SimulatorConfig(BaseModelWithContextSupport, extra="forbid"):
|
|
|
94
107
|
Spaces between the number and the unit are ignored, and so are any
|
|
95
108
|
characters after the first. For example: 2g, 2G, and 2 GB all
|
|
96
109
|
resolve to the same value: 2 gigabytes, equaling 2 * 1024**3 bytes.
|
|
97
|
-
|
|
98
|
-
If not set, or a set to zero, the allowed amount of memory is
|
|
99
|
-
unlimited.
|
|
100
110
|
"""
|
|
101
111
|
),
|
|
102
112
|
)
|
|
@@ -132,17 +142,40 @@ class SimulatorConfig(BaseModelWithContextSupport, extra="forbid"):
|
|
|
132
142
|
),
|
|
133
143
|
)
|
|
134
144
|
|
|
135
|
-
@
|
|
145
|
+
@model_validator(mode="before")
|
|
136
146
|
@classmethod
|
|
137
|
-
def
|
|
138
|
-
|
|
147
|
+
def apply_site_or_default_queue_if_no_user_queue(
|
|
148
|
+
cls, data: dict[str, Any], info: ValidationInfo
|
|
149
|
+
) -> Any:
|
|
150
|
+
queue_system = data.get("queue_system")
|
|
151
|
+
if queue_system is None:
|
|
139
152
|
options = None
|
|
140
153
|
if info.context:
|
|
141
154
|
options = info.context.queue_options
|
|
142
|
-
return options or LocalQueueOptions(max_running=8)
|
|
143
|
-
return v
|
|
144
155
|
|
|
145
|
-
|
|
156
|
+
defaulted_queue_options = (
|
|
157
|
+
options.model_dump()
|
|
158
|
+
if options is not None
|
|
159
|
+
else LocalQueueOptions(max_running=8).model_dump()
|
|
160
|
+
)
|
|
161
|
+
|
|
162
|
+
user_configured_max_memory = data.get("max_memory")
|
|
163
|
+
if user_configured_max_memory is not None:
|
|
164
|
+
cls.validate_max_memory(max_memory=user_configured_max_memory)
|
|
165
|
+
defaulted_queue_options["realization_memory"] = (
|
|
166
|
+
user_configured_max_memory
|
|
167
|
+
)
|
|
168
|
+
|
|
169
|
+
user_configured_cores_per_node = data.get("cores_per_node")
|
|
170
|
+
if user_configured_cores_per_node is not None:
|
|
171
|
+
defaulted_queue_options["num_cpu"] = user_configured_cores_per_node
|
|
172
|
+
|
|
173
|
+
data["queue_system"] = defaulted_queue_options
|
|
174
|
+
data["max_memory"] = None
|
|
175
|
+
|
|
176
|
+
return data
|
|
177
|
+
|
|
178
|
+
@field_validator("max_memory", mode="before")
|
|
146
179
|
@classmethod
|
|
147
180
|
def validate_max_memory(cls, max_memory: int | str | None) -> str | None:
|
|
148
181
|
if max_memory is None:
|
|
@@ -163,14 +196,26 @@ class SimulatorConfig(BaseModelWithContextSupport, extra="forbid"):
|
|
|
163
196
|
|
|
164
197
|
@model_validator(mode="after")
|
|
165
198
|
def update_max_memory(config: "SimulatorConfig") -> "SimulatorConfig":
|
|
199
|
+
if config.max_memory is None:
|
|
200
|
+
return config
|
|
201
|
+
parsed_max_memory = (
|
|
202
|
+
parse_string_to_bytes(config.max_memory)
|
|
203
|
+
if type(config.max_memory) is str
|
|
204
|
+
else int(config.max_memory)
|
|
205
|
+
)
|
|
166
206
|
if (
|
|
167
|
-
config.
|
|
168
|
-
and config.queue_system is not None
|
|
207
|
+
config.queue_system is not None
|
|
169
208
|
and config.queue_system.realization_memory == 0
|
|
170
209
|
):
|
|
171
|
-
config.queue_system.realization_memory =
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
210
|
+
config.queue_system.realization_memory = parsed_max_memory
|
|
211
|
+
elif (
|
|
212
|
+
config.queue_system is not None
|
|
213
|
+
and config.queue_system.realization_memory > 0
|
|
214
|
+
and config.queue_system.realization_memory != parsed_max_memory
|
|
215
|
+
):
|
|
216
|
+
raise ConfigValidationError(
|
|
217
|
+
"Ambiguous configuration of realization_memory. "
|
|
218
|
+
"Specify either max_memory or realization_memory, not both"
|
|
175
219
|
)
|
|
220
|
+
|
|
176
221
|
return config
|
|
@@ -15,7 +15,7 @@ from everest.util.forward_models import (
|
|
|
15
15
|
parse_forward_model_file,
|
|
16
16
|
)
|
|
17
17
|
|
|
18
|
-
from .install_job_config import
|
|
18
|
+
from .install_job_config import InstallForwardModelStepConfig
|
|
19
19
|
|
|
20
20
|
_VARIABLE_ERROR_MESSAGE = (
|
|
21
21
|
"Variable {name} must define {variable_type} value either"
|
|
@@ -39,6 +39,8 @@ _RESERVED_WORDS = [
|
|
|
39
39
|
"total_objective_value",
|
|
40
40
|
]
|
|
41
41
|
|
|
42
|
+
_OVERWRITE_MESSAGE = "Are you overwriting other parts of install_data?"
|
|
43
|
+
|
|
42
44
|
|
|
43
45
|
class InstallDataContext:
|
|
44
46
|
def __init__(
|
|
@@ -82,9 +84,20 @@ class InstallDataContext:
|
|
|
82
84
|
|
|
83
85
|
tmp_target = Path(self._temp_dir.name) / Path(target)
|
|
84
86
|
if tmp_target.exists():
|
|
87
|
+
if tmp_target.is_dir() and not tmp_target.is_symlink():
|
|
88
|
+
raise ValueError(
|
|
89
|
+
"Cannot make symlink due to existing directory at target location"
|
|
90
|
+
f" {tmp_target}. " + _OVERWRITE_MESSAGE
|
|
91
|
+
)
|
|
85
92
|
tmp_target.unlink()
|
|
86
|
-
|
|
87
|
-
|
|
93
|
+
try:
|
|
94
|
+
tmp_target.parent.mkdir(parents=True, exist_ok=True)
|
|
95
|
+
tmp_target.symlink_to(as_abs_path(source, self._config_dir))
|
|
96
|
+
except FileExistsError as err:
|
|
97
|
+
raise ValueError(
|
|
98
|
+
f"Cannot install data {source} into {target} due to existing "
|
|
99
|
+
"file or directory. " + _OVERWRITE_MESSAGE
|
|
100
|
+
) from err
|
|
88
101
|
|
|
89
102
|
def add_links_for_realization(self, realization: int) -> None:
|
|
90
103
|
for data in self._install_data:
|
|
@@ -284,7 +297,7 @@ def check_writeable_path(path_source: str, config_path: Path) -> None:
|
|
|
284
297
|
|
|
285
298
|
|
|
286
299
|
def validate_forward_model_configs(
|
|
287
|
-
forward_model: list[str], install_jobs: list[
|
|
300
|
+
forward_model: list[str], install_jobs: list[InstallForwardModelStepConfig]
|
|
288
301
|
) -> None:
|
|
289
302
|
if not forward_model:
|
|
290
303
|
return
|
everest/config_file_loader.py
CHANGED
|
@@ -4,6 +4,7 @@ import logging
|
|
|
4
4
|
import os
|
|
5
5
|
import re
|
|
6
6
|
from io import StringIO
|
|
7
|
+
from pathlib import Path
|
|
7
8
|
from typing import Any
|
|
8
9
|
|
|
9
10
|
import jinja2
|
|
@@ -31,23 +32,22 @@ ERT_CONFIG_TEMPLATES = {
|
|
|
31
32
|
|
|
32
33
|
|
|
33
34
|
def load_yaml(file_name: str, safe: bool = False) -> dict[str, Any]:
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
raise YAMLError(str(exc)) from exc
|
|
35
|
+
input_data = Path(file_name).read_text(encoding="utf-8").splitlines()
|
|
36
|
+
try:
|
|
37
|
+
yaml = YAML(typ="safe", pure=True) if safe else YAML()
|
|
38
|
+
yaml.preserve_quotes = True
|
|
39
|
+
return yaml.load("\n".join(input_data))
|
|
40
|
+
except YAMLError as exc:
|
|
41
|
+
if hasattr(exc, "problem_mark"):
|
|
42
|
+
mark = exc.problem_mark
|
|
43
|
+
raise YAMLError(
|
|
44
|
+
str(exc)
|
|
45
|
+
+ "\nError in line: {}\n {}^)".format(
|
|
46
|
+
input_data[mark.line], " " * mark.column
|
|
47
|
+
)
|
|
48
|
+
) from exc
|
|
49
|
+
else:
|
|
50
|
+
raise YAMLError(str(exc)) from exc
|
|
51
51
|
|
|
52
52
|
|
|
53
53
|
def _get_definitions(
|
everest/detached/__init__.py
CHANGED
|
@@ -4,28 +4,22 @@ Client methods for interacting with everserver
|
|
|
4
4
|
|
|
5
5
|
from .client import (
|
|
6
6
|
PROXY,
|
|
7
|
-
ExperimentState,
|
|
8
|
-
everserver_status,
|
|
9
7
|
server_is_running,
|
|
10
8
|
start_experiment,
|
|
11
9
|
start_monitor,
|
|
12
10
|
start_server,
|
|
13
11
|
stop_server,
|
|
14
|
-
update_everserver_status,
|
|
15
12
|
wait_for_server,
|
|
16
13
|
wait_for_server_to_stop,
|
|
17
14
|
)
|
|
18
15
|
|
|
19
16
|
__all__ = [
|
|
20
17
|
"PROXY",
|
|
21
|
-
"ExperimentState",
|
|
22
|
-
"everserver_status",
|
|
23
18
|
"server_is_running",
|
|
24
19
|
"start_experiment",
|
|
25
20
|
"start_monitor",
|
|
26
21
|
"start_server",
|
|
27
22
|
"stop_server",
|
|
28
|
-
"update_everserver_status",
|
|
29
23
|
"wait_for_server",
|
|
30
24
|
"wait_for_server_to_stop",
|
|
31
25
|
]
|
everest/detached/client.py
CHANGED
|
@@ -1,7 +1,5 @@
|
|
|
1
1
|
import asyncio
|
|
2
|
-
import json
|
|
3
2
|
import logging
|
|
4
|
-
import os
|
|
5
3
|
import re
|
|
6
4
|
import ssl
|
|
7
5
|
import time
|
|
@@ -17,9 +15,7 @@ from websockets import ConnectionClosedError, ConnectionClosedOK
|
|
|
17
15
|
from websockets.sync.client import connect
|
|
18
16
|
|
|
19
17
|
from ert.dark_storage.client import Client
|
|
20
|
-
from ert.ensemble_evaluator import EndEvent
|
|
21
18
|
from ert.run_models.event import EverestBatchResultEvent, status_event_from_json
|
|
22
|
-
from ert.run_models.run_model import ExperimentState
|
|
23
19
|
from ert.scheduler import create_driver
|
|
24
20
|
from ert.scheduler.driver import Driver, FailedSubmit
|
|
25
21
|
from ert.scheduler.event import StartedEvent
|
|
@@ -49,7 +45,7 @@ async def start_server(config: EverestConfig, logging_level: int) -> Driver:
|
|
|
49
45
|
"""
|
|
50
46
|
Start an Everest server running the optimization defined in the config
|
|
51
47
|
"""
|
|
52
|
-
driver = create_driver(config.server.queue_system) # type: ignore
|
|
48
|
+
driver = create_driver(config.server.queue_system, poll_period=0.1) # type: ignore
|
|
53
49
|
try:
|
|
54
50
|
args = [
|
|
55
51
|
"--output-dir",
|
|
@@ -236,9 +232,7 @@ def start_monitor(
|
|
|
236
232
|
try:
|
|
237
233
|
message = websocket.recv(timeout=1.0)
|
|
238
234
|
event = status_event_from_json(message)
|
|
239
|
-
if isinstance(event,
|
|
240
|
-
print(event.msg)
|
|
241
|
-
elif isinstance(event, EverestBatchResultEvent):
|
|
235
|
+
if isinstance(event, EverestBatchResultEvent):
|
|
242
236
|
if event.result_type == "FunctionResult":
|
|
243
237
|
callback(
|
|
244
238
|
{
|
|
@@ -261,45 +255,6 @@ def start_monitor(
|
|
|
261
255
|
logger.error("Error when processing event %s", exc_info=e)
|
|
262
256
|
|
|
263
257
|
time.sleep(polling_interval)
|
|
264
|
-
except Exception:
|
|
265
|
-
logger.debug(traceback.format_exc())
|
|
266
|
-
|
|
267
258
|
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
) -> None:
|
|
271
|
-
"""Update the everest server status with new status information"""
|
|
272
|
-
new_status = {"status": status, "message": message}
|
|
273
|
-
path = everserver_status_path
|
|
274
|
-
if not os.path.exists(os.path.dirname(path)):
|
|
275
|
-
os.makedirs(os.path.dirname(path))
|
|
276
|
-
with open(path, "w", encoding="utf-8") as outfile:
|
|
277
|
-
json.dump(new_status, outfile)
|
|
278
|
-
elif os.path.exists(path):
|
|
279
|
-
server_status = everserver_status(path)
|
|
280
|
-
if server_status["message"] is not None:
|
|
281
|
-
if message is not None:
|
|
282
|
-
new_status["message"] = "{}\n{}".format(
|
|
283
|
-
server_status["message"], message
|
|
284
|
-
)
|
|
285
|
-
else:
|
|
286
|
-
new_status["message"] = server_status["message"]
|
|
287
|
-
with open(path, "w", encoding="utf-8") as outfile:
|
|
288
|
-
json.dump(new_status, outfile)
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
def everserver_status(everserver_status_path: str) -> dict[str, Any]:
|
|
292
|
-
"""Returns a dictionary representing the everest server status. If the
|
|
293
|
-
status file is not found we assume the server has never ran before, and will
|
|
294
|
-
return a status of ServerStatus.never_run
|
|
295
|
-
|
|
296
|
-
Example: {
|
|
297
|
-
'status': ServerStatus.completed
|
|
298
|
-
'message': None
|
|
299
|
-
}
|
|
300
|
-
"""
|
|
301
|
-
if os.path.exists(everserver_status_path):
|
|
302
|
-
with open(everserver_status_path, encoding="utf-8") as f:
|
|
303
|
-
return json.load(f)
|
|
304
|
-
else:
|
|
305
|
-
return {"status": ExperimentState.never_run, "message": None}
|
|
259
|
+
except Exception:
|
|
260
|
+
logger.exception(traceback.format_exc())
|
everest/detached/everserver.py
CHANGED
|
@@ -4,7 +4,6 @@ import logging.config
|
|
|
4
4
|
import os
|
|
5
5
|
import pathlib
|
|
6
6
|
import time
|
|
7
|
-
import traceback
|
|
8
7
|
from pathlib import Path
|
|
9
8
|
from tempfile import NamedTemporaryFile
|
|
10
9
|
from typing import Any
|
|
@@ -13,20 +12,17 @@ import yaml
|
|
|
13
12
|
from opentelemetry.trace.propagation.tracecontext import TraceContextTextMapPropagator
|
|
14
13
|
|
|
15
14
|
from ert.plugins.plugin_manager import ErtPluginManager
|
|
16
|
-
from ert.run_models.run_model import ExperimentStatus
|
|
17
15
|
from ert.services import StorageService
|
|
18
16
|
from ert.services._base_service import BaseServiceExit
|
|
17
|
+
from ert.storage import ExperimentStatus
|
|
18
|
+
from ert.storage.local_experiment import ExperimentState
|
|
19
19
|
from ert.trace import tracer
|
|
20
20
|
from everest.config import ServerConfig
|
|
21
|
-
from everest.detached import (
|
|
22
|
-
ExperimentState,
|
|
23
|
-
everserver_status,
|
|
24
|
-
update_everserver_status,
|
|
25
|
-
)
|
|
26
21
|
from everest.strings import (
|
|
27
22
|
DEFAULT_LOGGING_FORMAT,
|
|
28
23
|
EVEREST,
|
|
29
24
|
EVERSERVER,
|
|
25
|
+
EXPERIMENT_SERVER,
|
|
30
26
|
OPTIMIZATION_LOG_DIR,
|
|
31
27
|
)
|
|
32
28
|
from everest.util import makedirs_if_needed, version_info
|
|
@@ -67,6 +63,11 @@ def _configure_loggers(
|
|
|
67
63
|
"level": logging_level,
|
|
68
64
|
"propagate": False,
|
|
69
65
|
},
|
|
66
|
+
EXPERIMENT_SERVER: {
|
|
67
|
+
"handlers": ["endpoint_log"],
|
|
68
|
+
"level": logging_level,
|
|
69
|
+
"propagate": False,
|
|
70
|
+
},
|
|
70
71
|
EVEREST: {
|
|
71
72
|
"handlers": ["everest_log"],
|
|
72
73
|
"level": logging_level,
|
|
@@ -93,8 +94,9 @@ def _configure_loggers(
|
|
|
93
94
|
|
|
94
95
|
yaml.add_representer(pathlib.PosixPath, path_representer)
|
|
95
96
|
if output_file:
|
|
96
|
-
|
|
97
|
-
yaml.dump(logging_config,
|
|
97
|
+
Path(output_file).write_text(
|
|
98
|
+
yaml.dump(logging_config, default_flow_style=False), encoding="utf-8"
|
|
99
|
+
)
|
|
98
100
|
logging.config.dictConfig(logging_config)
|
|
99
101
|
|
|
100
102
|
plugin_manager = ErtPluginManager()
|
|
@@ -135,8 +137,6 @@ def main() -> None:
|
|
|
135
137
|
|
|
136
138
|
output_dir = options.output_dir
|
|
137
139
|
|
|
138
|
-
status_path = ServerConfig.get_everserver_status_path(output_dir)
|
|
139
|
-
|
|
140
140
|
ctx = (
|
|
141
141
|
TraceContextTextMapPropagator().extract(
|
|
142
142
|
carrier={"traceparent": options.traceparent}
|
|
@@ -158,7 +158,6 @@ def main() -> None:
|
|
|
158
158
|
)
|
|
159
159
|
|
|
160
160
|
logging.getLogger(EVERSERVER).info("Everserver starting ...")
|
|
161
|
-
update_everserver_status(status_path, ExperimentState.pending)
|
|
162
161
|
logger.info(version_info())
|
|
163
162
|
logger.info(f"Output directory: {output_dir}")
|
|
164
163
|
# Starting the server
|
|
@@ -168,8 +167,7 @@ def main() -> None:
|
|
|
168
167
|
timeout=240, project=server_path, logging_config=log_file.name
|
|
169
168
|
) as server:
|
|
170
169
|
server.fetch_conn_info()
|
|
171
|
-
with StorageService.session(project=server_path) as client:
|
|
172
|
-
update_everserver_status(status_path, ExperimentState.running)
|
|
170
|
+
with StorageService.session(project=Path(server_path)) as client:
|
|
173
171
|
done = False
|
|
174
172
|
while not done:
|
|
175
173
|
response = client.get(
|
|
@@ -181,33 +179,10 @@ def main() -> None:
|
|
|
181
179
|
ExperimentState.running,
|
|
182
180
|
}
|
|
183
181
|
time.sleep(0.5)
|
|
184
|
-
if status.status == ExperimentState.completed:
|
|
185
|
-
update_everserver_status(
|
|
186
|
-
status_path,
|
|
187
|
-
ExperimentState.completed,
|
|
188
|
-
message=status.message,
|
|
189
|
-
)
|
|
190
|
-
elif status.status == ExperimentState.stopped:
|
|
191
|
-
update_everserver_status(
|
|
192
|
-
status_path,
|
|
193
|
-
ExperimentState.stopped,
|
|
194
|
-
message=status.message,
|
|
195
|
-
)
|
|
196
|
-
elif status.status == ExperimentState.failed:
|
|
197
|
-
update_everserver_status(
|
|
198
|
-
status_path, ExperimentState.failed, message=status.message
|
|
199
|
-
)
|
|
200
182
|
except BaseServiceExit:
|
|
201
183
|
# Server exit, happens on normal shutdown and keyboard interrupt
|
|
202
|
-
|
|
203
|
-
if server_status["status"] == ExperimentState.running:
|
|
204
|
-
update_everserver_status(status_path, ExperimentState.stopped)
|
|
184
|
+
logging.getLogger(EVERSERVER).info("Everserver stopped by user")
|
|
205
185
|
except Exception as e:
|
|
206
|
-
update_everserver_status(
|
|
207
|
-
status_path,
|
|
208
|
-
ExperimentState.failed,
|
|
209
|
-
message=traceback.format_exc(),
|
|
210
|
-
)
|
|
211
186
|
logging.getLogger(EVERSERVER).exception(e)
|
|
212
187
|
|
|
213
188
|
|
everest/everest_storage.py
CHANGED
|
@@ -12,7 +12,7 @@ import numpy as np
|
|
|
12
12
|
import polars as pl
|
|
13
13
|
from ropt.results import FunctionResults, GradientResults, Results
|
|
14
14
|
|
|
15
|
-
from
|
|
15
|
+
from ert.config import EverestObjectivesConfig
|
|
16
16
|
from everest.config.output_constraint_config import OutputConstraintConfig
|
|
17
17
|
from everest.strings import EVEREST
|
|
18
18
|
|
|
@@ -153,16 +153,12 @@ class BatchStorageData:
|
|
|
153
153
|
|
|
154
154
|
@cached_property
|
|
155
155
|
def is_improvement(self) -> bool:
|
|
156
|
-
|
|
157
|
-
info = json.load(f)
|
|
158
|
-
|
|
156
|
+
info = json.loads((self._path / "batch.json").read_text(encoding="utf-8"))
|
|
159
157
|
return bool(info["is_improvement"])
|
|
160
158
|
|
|
161
159
|
@cached_property
|
|
162
160
|
def batch_id(self) -> bool:
|
|
163
|
-
|
|
164
|
-
info = json.load(f)
|
|
165
|
-
|
|
161
|
+
info = json.loads((self._path / "batch.json").read_text(encoding="utf-8"))
|
|
166
162
|
return info["batch_id"]
|
|
167
163
|
|
|
168
164
|
def write_metadata(self, is_improvement: bool) -> None:
|
|
@@ -170,15 +166,9 @@ class BatchStorageData:
|
|
|
170
166
|
if "is_improvement" in self.__dict__:
|
|
171
167
|
del self.is_improvement
|
|
172
168
|
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
with open(self._path / "batch.json", "w", encoding="utf-8") as f:
|
|
178
|
-
json.dump(
|
|
179
|
-
info,
|
|
180
|
-
f,
|
|
181
|
-
)
|
|
169
|
+
info = json.loads((self._path / "batch.json").read_text(encoding="utf-8"))
|
|
170
|
+
info["is_improvement"] = is_improvement
|
|
171
|
+
(self._path / "batch.json").write_text(json.dumps(info), encoding="utf-8")
|
|
182
172
|
|
|
183
173
|
|
|
184
174
|
class FunctionBatchStorageData(BatchStorageData):
|
|
@@ -541,7 +531,7 @@ class EverestStorage:
|
|
|
541
531
|
def init(
|
|
542
532
|
self,
|
|
543
533
|
formatted_control_names: list[str],
|
|
544
|
-
objective_functions:
|
|
534
|
+
objective_functions: EverestObjectivesConfig,
|
|
545
535
|
output_constraints: list[OutputConstraintConfig] | None,
|
|
546
536
|
realizations: list[int],
|
|
547
537
|
) -> None:
|
|
@@ -554,18 +544,21 @@ class EverestStorage:
|
|
|
554
544
|
# TODO: The weight and normalization keys are only used by the everest api,
|
|
555
545
|
# with everviz. They should be removed in the long run.
|
|
556
546
|
weights = np.fromiter(
|
|
557
|
-
(
|
|
547
|
+
(
|
|
548
|
+
1.0 if weight is None else weight
|
|
549
|
+
for weight in objective_functions.weights
|
|
550
|
+
),
|
|
558
551
|
dtype=np.float64,
|
|
559
552
|
)
|
|
560
553
|
|
|
561
554
|
objective_functions_dataframe = pl.DataFrame(
|
|
562
555
|
{
|
|
563
|
-
"objective_name":
|
|
556
|
+
"objective_name": objective_functions.keys,
|
|
564
557
|
"weight": pl.Series(weights / sum(weights), dtype=pl.Float64),
|
|
565
558
|
"scale": pl.Series(
|
|
566
559
|
[
|
|
567
|
-
1.0 if
|
|
568
|
-
for
|
|
560
|
+
1.0 if scale is None else scale
|
|
561
|
+
for scale in objective_functions.scales
|
|
569
562
|
],
|
|
570
563
|
dtype=pl.Float64,
|
|
571
564
|
),
|
|
@@ -906,18 +899,15 @@ class EverestStorage:
|
|
|
906
899
|
for batch_id, batch_dict in batch_dicts.items():
|
|
907
900
|
target_ensemble = self._experiment.get_ensemble_by_name(f"batch_{batch_id}")
|
|
908
901
|
|
|
909
|
-
|
|
910
|
-
|
|
911
|
-
"w+",
|
|
912
|
-
encoding="utf-8",
|
|
913
|
-
) as f:
|
|
914
|
-
json.dump(
|
|
902
|
+
(target_ensemble.optimizer_mount_point / "batch.json").write_text(
|
|
903
|
+
json.dumps(
|
|
915
904
|
{
|
|
916
905
|
"batch_id": batch_id,
|
|
917
906
|
"is_improvement": False,
|
|
918
907
|
},
|
|
919
|
-
|
|
920
|
-
|
|
908
|
+
),
|
|
909
|
+
encoding="utf-8",
|
|
910
|
+
)
|
|
921
911
|
|
|
922
912
|
batch_data = BatchStorageData(path=target_ensemble.optimizer_mount_point)
|
|
923
913
|
|
|
@@ -3,11 +3,11 @@ from typing import Any
|
|
|
3
3
|
|
|
4
4
|
from ropt.enums import PerturbationType, VariableType
|
|
5
5
|
|
|
6
|
+
from ert.config import EverestObjectivesConfig
|
|
6
7
|
from everest.config import (
|
|
7
8
|
ControlConfig,
|
|
8
9
|
InputConstraintConfig,
|
|
9
10
|
ModelConfig,
|
|
10
|
-
ObjectiveFunctionConfig,
|
|
11
11
|
OptimizationConfig,
|
|
12
12
|
OutputConstraintConfig,
|
|
13
13
|
)
|
|
@@ -48,16 +48,15 @@ def _parse_controls(
|
|
|
48
48
|
|
|
49
49
|
|
|
50
50
|
def _parse_objectives(
|
|
51
|
-
objective_functions:
|
|
51
|
+
objective_functions: EverestObjectivesConfig,
|
|
52
52
|
) -> tuple[dict[str, Any], list[dict[str, Any]]]:
|
|
53
|
-
weights: list[float] = [
|
|
53
|
+
weights: list[float] = [
|
|
54
|
+
1.0 if weight is None else weight for weight in objective_functions.weights
|
|
55
|
+
]
|
|
54
56
|
function_estimator_indices: list[int] = []
|
|
55
57
|
function_estimators: list = [] # type: ignore
|
|
56
58
|
|
|
57
|
-
for
|
|
58
|
-
assert isinstance(objective.name, str)
|
|
59
|
-
weights.append(objective.weight or 1.0)
|
|
60
|
-
|
|
59
|
+
for objective_type in objective_functions.objective_types:
|
|
61
60
|
# If any objective specifies an objective type, we have to specify
|
|
62
61
|
# function estimators in ropt to implement these types. This is done by
|
|
63
62
|
# supplying a list of estimators and for each objective an index into
|
|
@@ -66,14 +65,14 @@ def _parse_objectives(
|
|
|
66
65
|
(
|
|
67
66
|
idx
|
|
68
67
|
for idx, estimator in enumerate(function_estimators)
|
|
69
|
-
if estimator["method"] ==
|
|
68
|
+
if estimator["method"] == objective_type
|
|
70
69
|
),
|
|
71
70
|
None,
|
|
72
71
|
)
|
|
73
72
|
# If not, make a new estimator:
|
|
74
73
|
if function_estimator_idx is None:
|
|
75
74
|
function_estimator_idx = len(function_estimators)
|
|
76
|
-
function_estimators.append({"method":
|
|
75
|
+
function_estimators.append({"method": objective_type})
|
|
77
76
|
function_estimator_indices.append(function_estimator_idx)
|
|
78
77
|
|
|
79
78
|
ropt_objectives: dict[str, Any] = {"weights": weights}
|
|
@@ -250,7 +249,7 @@ def _parse_optimization(
|
|
|
250
249
|
|
|
251
250
|
def everest2ropt(
|
|
252
251
|
controls: list[ControlConfig],
|
|
253
|
-
objective_functions:
|
|
252
|
+
objective_functions: EverestObjectivesConfig,
|
|
254
253
|
input_constraints: list[InputConstraintConfig],
|
|
255
254
|
output_constraints: list[OutputConstraintConfig],
|
|
256
255
|
optimization: OptimizationConfig | None,
|
|
@@ -297,7 +296,7 @@ def everest2ropt(
|
|
|
297
296
|
"variable": [
|
|
298
297
|
name for config in controls for name in config.formatted_control_names
|
|
299
298
|
],
|
|
300
|
-
"objective":
|
|
299
|
+
"objective": objective_functions.keys,
|
|
301
300
|
"nonlinear_constraint": [
|
|
302
301
|
constraint.name for constraint in output_constraints
|
|
303
302
|
],
|
|
@@ -11,11 +11,11 @@ from ropt.transforms.base import (
|
|
|
11
11
|
VariableTransform,
|
|
12
12
|
)
|
|
13
13
|
|
|
14
|
+
from ert.config import EverestObjectivesConfig
|
|
14
15
|
from everest.config import (
|
|
15
16
|
ControlConfig,
|
|
16
17
|
InputConstraintConfig,
|
|
17
18
|
ModelConfig,
|
|
18
|
-
ObjectiveFunctionConfig,
|
|
19
19
|
OutputConstraintConfig,
|
|
20
20
|
)
|
|
21
21
|
from everest.config.utils import FlattenedControls
|
|
@@ -430,7 +430,7 @@ class ConstraintScaler(NonLinearConstraintTransform):
|
|
|
430
430
|
|
|
431
431
|
def get_optimization_domain_transforms(
|
|
432
432
|
controls: list[ControlConfig],
|
|
433
|
-
objectives:
|
|
433
|
+
objectives: EverestObjectivesConfig,
|
|
434
434
|
input_constraints: list[InputConstraintConfig] | None,
|
|
435
435
|
output_constraints: list[OutputConstraintConfig] | None,
|
|
436
436
|
model: ModelConfig,
|
|
@@ -461,14 +461,10 @@ def get_optimization_domain_transforms(
|
|
|
461
461
|
|
|
462
462
|
objective_scaler = ObjectiveScaler(
|
|
463
463
|
auto_scale=auto_scale,
|
|
464
|
-
scales=[
|
|
465
|
-
1.0 if objective.scale is None else objective.scale
|
|
466
|
-
for objective in objectives
|
|
467
|
-
],
|
|
464
|
+
scales=[1.0 if scale is None else scale for scale in objectives.scales],
|
|
468
465
|
realization_weights=realization_weights,
|
|
469
466
|
objective_weights=[
|
|
470
|
-
1.0 if
|
|
471
|
-
for objective in objectives
|
|
467
|
+
1.0 if weight is None else weight for weight in objectives.weights
|
|
472
468
|
],
|
|
473
469
|
)
|
|
474
470
|
|