ert 17.0.0__py3-none-any.whl → 19.0.0rc2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (218) hide show
  1. _ert/events.py +19 -2
  2. _ert/forward_model_runner/client.py +6 -2
  3. ert/__main__.py +28 -13
  4. ert/analysis/_enif_update.py +8 -4
  5. ert/analysis/_es_update.py +19 -6
  6. ert/analysis/_update_commons.py +16 -6
  7. ert/cli/main.py +13 -6
  8. ert/cli/monitor.py +7 -0
  9. ert/config/__init__.py +15 -6
  10. ert/config/_create_observation_dataframes.py +117 -20
  11. ert/config/_get_num_cpu.py +1 -1
  12. ert/config/_observations.py +91 -2
  13. ert/config/_read_summary.py +8 -6
  14. ert/config/design_matrix.py +51 -24
  15. ert/config/distribution.py +1 -1
  16. ert/config/ensemble_config.py +9 -17
  17. ert/config/ert_config.py +103 -19
  18. ert/config/everest_control.py +234 -0
  19. ert/config/{everest_objective_config.py → everest_response.py} +24 -15
  20. ert/config/field.py +96 -84
  21. ert/config/forward_model_step.py +122 -17
  22. ert/config/gen_data_config.py +5 -10
  23. ert/config/gen_kw_config.py +5 -35
  24. ert/config/known_response_types.py +14 -0
  25. ert/config/parameter_config.py +1 -33
  26. ert/config/parsing/_option_dict.py +10 -2
  27. ert/config/parsing/config_keywords.py +2 -0
  28. ert/config/parsing/config_schema.py +23 -3
  29. ert/config/parsing/config_schema_deprecations.py +3 -14
  30. ert/config/parsing/config_schema_item.py +26 -11
  31. ert/config/parsing/context_values.py +3 -3
  32. ert/config/parsing/file_context_token.py +1 -1
  33. ert/config/parsing/observations_parser.py +6 -2
  34. ert/config/parsing/queue_system.py +9 -0
  35. ert/config/parsing/schema_item_type.py +1 -0
  36. ert/config/queue_config.py +4 -5
  37. ert/config/response_config.py +0 -8
  38. ert/config/rft_config.py +275 -0
  39. ert/config/summary_config.py +3 -8
  40. ert/config/surface_config.py +59 -16
  41. ert/config/workflow_fixtures.py +2 -1
  42. ert/dark_storage/client/__init__.py +2 -2
  43. ert/dark_storage/client/_session.py +4 -4
  44. ert/dark_storage/client/client.py +2 -2
  45. ert/dark_storage/common.py +1 -1
  46. ert/dark_storage/compute/misfits.py +11 -7
  47. ert/dark_storage/endpoints/compute/misfits.py +6 -4
  48. ert/dark_storage/endpoints/experiment_server.py +12 -9
  49. ert/dark_storage/endpoints/experiments.py +2 -2
  50. ert/dark_storage/endpoints/observations.py +8 -6
  51. ert/dark_storage/endpoints/parameters.py +2 -18
  52. ert/dark_storage/endpoints/responses.py +24 -5
  53. ert/dark_storage/json_schema/experiment.py +1 -1
  54. ert/data/_measured_data.py +6 -5
  55. ert/ensemble_evaluator/__init__.py +8 -1
  56. ert/ensemble_evaluator/config.py +2 -1
  57. ert/ensemble_evaluator/evaluator.py +81 -29
  58. ert/ensemble_evaluator/event.py +6 -0
  59. ert/ensemble_evaluator/snapshot.py +3 -1
  60. ert/ensemble_evaluator/state.py +1 -0
  61. ert/field_utils/__init__.py +8 -0
  62. ert/field_utils/field_utils.py +212 -3
  63. ert/field_utils/roff_io.py +1 -1
  64. ert/gui/__init__.py +5 -2
  65. ert/gui/ertnotifier.py +1 -1
  66. ert/gui/ertwidgets/__init__.py +23 -16
  67. ert/gui/ertwidgets/analysismoduleedit.py +2 -2
  68. ert/gui/ertwidgets/checklist.py +1 -1
  69. ert/gui/ertwidgets/create_experiment_dialog.py +3 -1
  70. ert/gui/ertwidgets/ensembleselector.py +2 -2
  71. ert/gui/ertwidgets/models/__init__.py +2 -0
  72. ert/gui/ertwidgets/models/activerealizationsmodel.py +2 -1
  73. ert/gui/ertwidgets/models/path_model.py +1 -1
  74. ert/gui/ertwidgets/models/targetensemblemodel.py +2 -1
  75. ert/gui/ertwidgets/models/text_model.py +1 -1
  76. ert/gui/ertwidgets/pathchooser.py +0 -3
  77. ert/gui/ertwidgets/searchbox.py +13 -4
  78. ert/gui/{suggestor → ertwidgets/suggestor}/_suggestor_message.py +13 -4
  79. ert/gui/{suggestor → ertwidgets/suggestor}/suggestor.py +63 -30
  80. ert/gui/main.py +37 -8
  81. ert/gui/main_window.py +1 -7
  82. ert/gui/simulation/ensemble_experiment_panel.py +1 -1
  83. ert/gui/simulation/ensemble_information_filter_panel.py +1 -1
  84. ert/gui/simulation/ensemble_smoother_panel.py +1 -1
  85. ert/gui/simulation/evaluate_ensemble_panel.py +1 -1
  86. ert/gui/simulation/experiment_panel.py +16 -3
  87. ert/gui/simulation/manual_update_panel.py +31 -8
  88. ert/gui/simulation/multiple_data_assimilation_panel.py +12 -8
  89. ert/gui/simulation/run_dialog.py +27 -20
  90. ert/gui/simulation/single_test_run_panel.py +2 -2
  91. ert/gui/summarypanel.py +20 -1
  92. ert/gui/tools/load_results/load_results_panel.py +1 -1
  93. ert/gui/tools/manage_experiments/export_dialog.py +136 -0
  94. ert/gui/tools/manage_experiments/storage_info_widget.py +121 -16
  95. ert/gui/tools/manage_experiments/storage_widget.py +1 -2
  96. ert/gui/tools/plot/plot_api.py +37 -25
  97. ert/gui/tools/plot/plot_widget.py +10 -2
  98. ert/gui/tools/plot/plot_window.py +38 -18
  99. ert/gui/tools/plot/plottery/plot_config.py +2 -0
  100. ert/gui/tools/plot/plottery/plot_context.py +14 -0
  101. ert/gui/tools/plot/plottery/plots/__init__.py +2 -0
  102. ert/gui/tools/plot/plottery/plots/cesp.py +3 -1
  103. ert/gui/tools/plot/plottery/plots/distribution.py +6 -1
  104. ert/gui/tools/plot/plottery/plots/ensemble.py +12 -3
  105. ert/gui/tools/plot/plottery/plots/gaussian_kde.py +12 -2
  106. ert/gui/tools/plot/plottery/plots/histogram.py +3 -1
  107. ert/gui/tools/plot/plottery/plots/misfits.py +436 -0
  108. ert/gui/tools/plot/plottery/plots/observations.py +18 -4
  109. ert/gui/tools/plot/plottery/plots/statistics.py +62 -20
  110. ert/gui/tools/plot/plottery/plots/std_dev.py +3 -1
  111. ert/mode_definitions.py +2 -0
  112. ert/plugins/__init__.py +0 -1
  113. ert/plugins/hook_implementations/workflows/csv_export.py +2 -3
  114. ert/plugins/hook_implementations/workflows/gen_data_rft_export.py +10 -2
  115. ert/plugins/hook_specifications/__init__.py +0 -2
  116. ert/plugins/hook_specifications/jobs.py +0 -9
  117. ert/plugins/plugin_manager.py +6 -33
  118. ert/resources/forward_models/run_reservoirsimulator.py +8 -3
  119. ert/resources/shell_scripts/delete_directory.py +2 -2
  120. ert/run_models/__init__.py +18 -5
  121. ert/run_models/_create_run_path.py +131 -37
  122. ert/run_models/ensemble_experiment.py +10 -4
  123. ert/run_models/ensemble_information_filter.py +8 -1
  124. ert/run_models/ensemble_smoother.py +9 -3
  125. ert/run_models/evaluate_ensemble.py +8 -6
  126. ert/run_models/event.py +7 -3
  127. ert/run_models/everest_run_model.py +159 -46
  128. ert/run_models/initial_ensemble_run_model.py +25 -24
  129. ert/run_models/manual_update.py +6 -3
  130. ert/run_models/manual_update_enif.py +37 -0
  131. ert/run_models/model_factory.py +81 -21
  132. ert/run_models/multiple_data_assimilation.py +22 -11
  133. ert/run_models/run_model.py +64 -55
  134. ert/run_models/single_test_run.py +7 -4
  135. ert/run_models/update_run_model.py +4 -2
  136. ert/runpaths.py +5 -6
  137. ert/sample_prior.py +9 -4
  138. ert/scheduler/driver.py +37 -0
  139. ert/scheduler/event.py +3 -1
  140. ert/scheduler/job.py +23 -13
  141. ert/scheduler/lsf_driver.py +6 -2
  142. ert/scheduler/openpbs_driver.py +7 -1
  143. ert/scheduler/scheduler.py +5 -0
  144. ert/scheduler/slurm_driver.py +6 -2
  145. ert/services/__init__.py +2 -2
  146. ert/services/_base_service.py +37 -20
  147. ert/services/ert_server.py +317 -0
  148. ert/shared/_doc_utils/__init__.py +4 -2
  149. ert/shared/_doc_utils/ert_jobs.py +1 -4
  150. ert/shared/net_utils.py +43 -18
  151. ert/shared/storage/connection.py +3 -3
  152. ert/shared/version.py +3 -3
  153. ert/storage/__init__.py +2 -0
  154. ert/storage/local_ensemble.py +38 -12
  155. ert/storage/local_experiment.py +8 -16
  156. ert/storage/local_storage.py +68 -42
  157. ert/storage/migration/to11.py +1 -1
  158. ert/storage/migration/to16.py +38 -0
  159. ert/storage/migration/to17.py +42 -0
  160. ert/storage/migration/to18.py +11 -0
  161. ert/storage/migration/to19.py +34 -0
  162. ert/storage/migration/to20.py +23 -0
  163. ert/storage/migration/to21.py +25 -0
  164. ert/storage/migration/to8.py +4 -4
  165. ert/substitutions.py +12 -28
  166. ert/validation/active_range.py +7 -7
  167. ert/validation/rangestring.py +16 -16
  168. ert/workflow_runner.py +2 -1
  169. {ert-17.0.0.dist-info → ert-19.0.0rc2.dist-info}/METADATA +9 -8
  170. {ert-17.0.0.dist-info → ert-19.0.0rc2.dist-info}/RECORD +208 -205
  171. {ert-17.0.0.dist-info → ert-19.0.0rc2.dist-info}/WHEEL +1 -1
  172. everest/api/everest_data_api.py +14 -1
  173. everest/bin/config_branch_script.py +3 -6
  174. everest/bin/everconfigdump_script.py +1 -9
  175. everest/bin/everest_script.py +21 -11
  176. everest/bin/everlint_script.py +0 -2
  177. everest/bin/kill_script.py +2 -2
  178. everest/bin/monitor_script.py +2 -2
  179. everest/bin/utils.py +8 -4
  180. everest/bin/visualization_script.py +6 -14
  181. everest/config/__init__.py +4 -1
  182. everest/config/control_config.py +81 -6
  183. everest/config/control_variable_config.py +4 -3
  184. everest/config/everest_config.py +75 -42
  185. everest/config/forward_model_config.py +5 -3
  186. everest/config/install_data_config.py +7 -5
  187. everest/config/install_job_config.py +7 -3
  188. everest/config/install_template_config.py +3 -3
  189. everest/config/optimization_config.py +19 -6
  190. everest/config/output_constraint_config.py +8 -2
  191. everest/config/server_config.py +6 -49
  192. everest/config/utils.py +25 -105
  193. everest/config/validation_utils.py +17 -11
  194. everest/config_file_loader.py +13 -4
  195. everest/detached/client.py +3 -3
  196. everest/detached/everserver.py +7 -8
  197. everest/everest_storage.py +6 -12
  198. everest/gui/everest_client.py +2 -3
  199. everest/gui/main_window.py +2 -2
  200. everest/optimizer/everest2ropt.py +59 -32
  201. everest/optimizer/opt_model_transforms.py +12 -13
  202. everest/optimizer/utils.py +0 -29
  203. everest/strings.py +0 -5
  204. ert/config/everest_constraints_config.py +0 -95
  205. ert/config/ext_param_config.py +0 -106
  206. ert/gui/tools/export/__init__.py +0 -3
  207. ert/gui/tools/export/export_panel.py +0 -83
  208. ert/gui/tools/export/export_tool.py +0 -69
  209. ert/gui/tools/export/exporter.py +0 -36
  210. ert/services/storage_service.py +0 -127
  211. everest/config/sampler_config.py +0 -103
  212. everest/simulator/__init__.py +0 -88
  213. everest/simulator/everest_to_ert.py +0 -51
  214. /ert/gui/{suggestor → ertwidgets/suggestor}/__init__.py +0 -0
  215. /ert/gui/{suggestor → ertwidgets/suggestor}/_colors.py +0 -0
  216. {ert-17.0.0.dist-info → ert-19.0.0rc2.dist-info}/entry_points.txt +0 -0
  217. {ert-17.0.0.dist-info → ert-19.0.0rc2.dist-info}/licenses/COPYING +0 -0
  218. {ert-17.0.0.dist-info → ert-19.0.0rc2.dist-info}/top_level.txt +0 -0
@@ -48,13 +48,21 @@ def option_dict(option_list: Sequence[str], offset: int) -> dict[str, str]:
48
48
  if len(option_pair.split(":")) == 2:
49
49
  key, val = option_pair.split(":")
50
50
  if val and key:
51
+ if key in result:
52
+ raise ConfigValidationError.with_context(
53
+ f"Option {key} occured multiple times.", option_pair
54
+ )
51
55
  result[key] = val
52
56
  else:
53
57
  raise ConfigValidationError.with_context(
54
- f"Invalid argument {option_pair!r}", option_pair
58
+ "Option argument should be of the form 'key':'value', "
59
+ f"got {option_pair!r}",
60
+ option_pair,
55
61
  )
56
62
  else:
57
63
  raise ConfigValidationError.with_context(
58
- f"Invalid argument {option_pair!r}", option_pair
64
+ "Option argument should be of the form 'key':'value', "
65
+ f"got {option_pair!r}",
66
+ option_pair,
59
67
  )
60
68
  return result
@@ -42,6 +42,7 @@ class ConfigKeys(StrEnum):
42
42
  SETENV = "SETENV"
43
43
  STD_CUTOFF = "STD_CUTOFF"
44
44
  SUMMARY = "SUMMARY"
45
+ RFT = "RFT"
45
46
  SURFACE = "SURFACE"
46
47
  UPDATE_LOG_PATH = "UPDATE_LOG_PATH"
47
48
  RANDOM_SEED = "RANDOM_SEED"
@@ -55,6 +56,7 @@ class ConfigKeys(StrEnum):
55
56
  REALIZATION_MEMORY = "REALIZATION_MEMORY"
56
57
  SUBMIT_SLEEP = "SUBMIT_SLEEP"
57
58
  MAX_RUNNING = "MAX_RUNNING"
59
+ PRIORITIZE_PRIVATE_IP_ADDRESS = "PRIORITIZE_PRIVATE_IP_ADDRESS"
58
60
 
59
61
  def __repr__(self) -> str:
60
62
  return f"{self.value!r}"
@@ -4,8 +4,8 @@ from .config_schema_deprecations import deprecated_keywords_list
4
4
  from .config_schema_item import (
5
5
  SchemaItem,
6
6
  Varies,
7
+ existing_file_keyword,
7
8
  existing_path_inline_keyword,
8
- existing_path_keyword,
9
9
  float_keyword,
10
10
  int_keyword,
11
11
  path_keyword,
@@ -63,6 +63,17 @@ def data_kw_keyword() -> SchemaItem:
63
63
  )
64
64
 
65
65
 
66
+ def rft_keyword() -> SchemaItem:
67
+ return SchemaItem(
68
+ kw=ConfigKeys.RFT,
69
+ required_set=False,
70
+ multi_occurrence=True,
71
+ options_after=0,
72
+ argc_min=1,
73
+ argc_max=1,
74
+ )
75
+
76
+
66
77
  def define_keyword() -> SchemaItem:
67
78
  return SchemaItem(
68
79
  kw=ConfigKeys.DEFINE,
@@ -115,6 +126,13 @@ def hook_workflow_keyword() -> SchemaItem:
115
126
  )
116
127
 
117
128
 
129
+ def prioritize_private_ip_address_keyword() -> SchemaItem:
130
+ return SchemaItem(
131
+ kw=ConfigKeys.PRIORITIZE_PRIVATE_IP_ADDRESS,
132
+ type_map=[SchemaItemType.BOOL],
133
+ )
134
+
135
+
118
136
  def set_env_keyword() -> SchemaItem:
119
137
  # You can set environment variables which will be applied to the run-time
120
138
  # environment.
@@ -303,11 +321,12 @@ def init_user_config_schema() -> ConfigSchemaDict:
303
321
  gen_kw_keyword(),
304
322
  gen_data_keyword(),
305
323
  summary_keyword(),
324
+ rft_keyword(),
306
325
  surface_keyword(),
307
326
  field_keyword(),
308
327
  single_arg_keyword(ConfigKeys.ECLBASE),
309
- existing_path_keyword(ConfigKeys.DATA_FILE),
310
- existing_path_keyword(ConfigKeys.GRID),
328
+ existing_file_keyword(ConfigKeys.DATA_FILE),
329
+ existing_file_keyword(ConfigKeys.GRID),
311
330
  path_keyword(ConfigKeys.REFCASE),
312
331
  int_keyword(ConfigKeys.RANDOM_SEED),
313
332
  num_realizations_keyword(),
@@ -337,6 +356,7 @@ def init_user_config_schema() -> ConfigSchemaDict:
337
356
  install_job_keyword(),
338
357
  install_job_directory_keyword(),
339
358
  hook_workflow_keyword(),
359
+ prioritize_private_ip_address_keyword(),
340
360
  ]:
341
361
  schema[item.kw] = item
342
362
  if item.kw in ConfigAliases:
@@ -39,9 +39,9 @@ deprecated_keywords_list = [
39
39
  keyword=kw,
40
40
  message=partial(
41
41
  lambda line, kw: f"Using {kw} with substitution strings "
42
- + "that are not of the form '<KEY>' is deprecated. "
43
- + f"Please change {line[0]} to "
44
- + f"<{line[0].replace('<', '').replace('>', '')}>",
42
+ "that are not of the form '<KEY>' is deprecated. "
43
+ f"Please change {line[0]} to "
44
+ f"<{line[0].replace('<', '').replace('>', '')}>",
45
45
  kw=kw,
46
46
  ),
47
47
  check=lambda line: not DeprecationInfo.is_angle_bracketed(str(line[0])),
@@ -217,15 +217,4 @@ deprecated_keywords_list = [
217
217
  ),
218
218
  check=lambda line: line[0] == "DESIGN2PARAMS",
219
219
  ),
220
- DeprecationInfo(
221
- keyword="FORWARD_MODEL",
222
- message=(
223
- "FORWARD_MODEL DESIGN_KW will be replaced with RUN_TEMPLATE. "
224
- "DESIGN2PARAMS has been replaced by DESIGN_MATRIX, so the "
225
- "parameters are already available for magic string replacement "
226
- "with the RUN_TEMPLATE keyword. Please use this format: "
227
- "'RUN_TEMPLATE my_text_file_template.txt my_text_output_file.txt'"
228
- ),
229
- check=lambda line: line[0] == "DESIGN_KW",
230
- ),
231
220
  ]
@@ -37,25 +37,34 @@ class SchemaItem:
37
37
 
38
38
  # The minimum number of arguments
39
39
  argc_min: NonNegativeInt = 1
40
+
40
41
  # The maximum number of arguments: None means no upper limit
41
42
  argc_max: NonNegativeInt | None = 1
43
+
42
44
  # A list of types for the items. Set along with argc_minmax()
43
45
  type_map: list[SchemaItemType | EnumType | None] = Field(default_factory=list)
46
+
44
47
  # A list of item's which must also be set (if this item is set). (can be NULL)
45
48
  required_children: list[str] = Field(default_factory=list)
49
+
46
50
  # Information about the deprecation if deprecated
47
51
  deprecation_info: list[DeprecationInfo] = Field(default_factory=list)
48
- # if positive, arguments after this count will be concatenated with a " " between
52
+
53
+ # If positive, arguments after this count will be concatenated with a " " between
49
54
  join_after: PositiveInt | None = None
50
- # if positive, arguments after this count will be interpreted as options
51
- options_after: PositiveInt | Varies | None = None
52
- # if true, will accumulate many values set for key, otherwise each entry will
55
+
56
+ # If positive, arguments after this count will be interpreted as options
57
+ options_after: NonNegativeInt | Varies | None = None
58
+
59
+ # If true, will accumulate many values set for key, otherwise each entry will
53
60
  # overwrite any previous value set
54
61
  multi_occurrence: bool = False
62
+
55
63
  # Only applies to SchemaItemType.EXISTING_PATH_INLINE where
56
64
  # the contents is then parsed
57
65
  parser: Callable[[str, str], Any] = lambda x, y: y
58
66
  expand_envvar: bool = True
67
+
59
68
  # Index of tokens to do substitution from until end
60
69
  substitute_from: NonNegativeInt = 1
61
70
  required_set: bool = False
@@ -155,9 +164,9 @@ class SchemaItem:
155
164
  f"value as argument {index + 1!r}",
156
165
  token,
157
166
  )
158
-
159
167
  case (
160
168
  SchemaItemType.PATH
169
+ | SchemaItemType.EXISTING_FILE
161
170
  | SchemaItemType.EXISTING_PATH
162
171
  | SchemaItemType.EXISTING_PATH_INLINE
163
172
  ):
@@ -166,10 +175,14 @@ class SchemaItem:
166
175
  path = os.path.normpath(
167
176
  os.path.join(os.path.dirname(token.filename), token)
168
177
  )
169
- if val_type in {
170
- SchemaItemType.EXISTING_PATH,
171
- SchemaItemType.EXISTING_PATH_INLINE,
172
- }:
178
+ if val_type != SchemaItemType.PATH:
179
+ if val_type == SchemaItemType.EXISTING_FILE and not os.path.isfile(
180
+ str(path)
181
+ ):
182
+ raise ConfigValidationError.with_context(
183
+ f"{self.kw} {token} is not a file.",
184
+ token,
185
+ )
173
186
  if not os.path.exists(str(path)):
174
187
  err = f'Cannot find file or directory "{token.value}". '
175
188
  if path != token:
@@ -311,6 +324,8 @@ class SchemaItem:
311
324
  self, line: Sequence[FileContextToken]
312
325
  ) -> Sequence[FileContextToken | dict[FileContextToken, FileContextToken]]:
313
326
  n = self.options_after
327
+ if not line:
328
+ return []
314
329
  if isinstance(n, Varies):
315
330
  args, kwargs = parse_variable_options(list(line), n.max_positionals)
316
331
  return [*args, kwargs] # type: ignore
@@ -343,8 +358,8 @@ def path_keyword(keyword: str) -> SchemaItem:
343
358
  return SchemaItem(kw=keyword, type_map=[SchemaItemType.PATH])
344
359
 
345
360
 
346
- def existing_path_keyword(keyword: str) -> SchemaItem:
347
- return SchemaItem(kw=keyword, type_map=[SchemaItemType.EXISTING_PATH])
361
+ def existing_file_keyword(keyword: str) -> SchemaItem:
362
+ return SchemaItem(kw=keyword, type_map=[SchemaItemType.EXISTING_FILE])
348
363
 
349
364
 
350
365
  def existing_path_inline_keyword(
@@ -1,7 +1,7 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  from json import JSONEncoder
4
- from typing import Any, TypeVar, no_type_check
4
+ from typing import Any, Self, TypeVar, no_type_check
5
5
 
6
6
  from .file_context_token import FileContextToken
7
7
 
@@ -37,7 +37,7 @@ class ContextBool:
37
37
 
38
38
 
39
39
  class ContextInt(int):
40
- def __new__(cls, val: int, token: FileContextToken) -> ContextInt:
40
+ def __new__(cls, val: int, token: FileContextToken) -> Self:
41
41
  obj = super().__new__(cls, val)
42
42
  obj.token = token
43
43
  return obj
@@ -50,7 +50,7 @@ class ContextInt(int):
50
50
 
51
51
 
52
52
  class ContextFloat(float):
53
- def __new__(cls, val: float, token: FileContextToken) -> ContextFloat:
53
+ def __new__(cls, val: float, token: FileContextToken) -> Self:
54
54
  obj = super().__new__(cls, val)
55
55
  obj.token = token
56
56
  return obj
@@ -12,7 +12,7 @@ class FileContextToken(Token):
12
12
 
13
13
  filename: str
14
14
 
15
- def __new__(cls, token: Token, filename: str) -> FileContextToken:
15
+ def __new__(cls, token: Token, filename: str) -> FileContextToken: # noqa: PYI034
16
16
  inst = super().__new__(
17
17
  cls,
18
18
  token.type,
@@ -18,6 +18,7 @@ class ObservationType(StrEnum):
18
18
  HISTORY = "HISTORY_OBSERVATION"
19
19
  SUMMARY = "SUMMARY_OBSERVATION"
20
20
  GENERAL = "GENERAL_OBSERVATION"
21
+ RFT = "RFT_OBSERVATION"
21
22
 
22
23
 
23
24
  ObservationDict = dict[str, Any]
@@ -92,7 +93,7 @@ def parse_observations(content: str, filename: str) -> list[ObservationDict]:
92
93
  ), ["TYPE"]:
93
94
  message = (
94
95
  f"Unknown observation type '{unexpected_token}', "
95
- f"expected either 'GENERAL_OBSERVATION', "
96
+ f"expected either 'RFT_OBSERVATION', 'GENERAL_OBSERVATION', "
96
97
  f"'SUMMARY_OBSERVATION' or 'HISTORY_OBSERVATION'."
97
98
  )
98
99
  case UnexpectedToken(token=unexpected_char, expected=allowed_chars), _:
@@ -122,7 +123,10 @@ observations_parser = Lark(
122
123
  r"""
123
124
  start: observation*
124
125
  ?observation: type OBSERVATION_NAME object? ";"
125
- TYPE: "HISTORY_OBSERVATION" | "SUMMARY_OBSERVATION" | "GENERAL_OBSERVATION"
126
+ TYPE: "HISTORY_OBSERVATION"
127
+ | "SUMMARY_OBSERVATION"
128
+ | "GENERAL_OBSERVATION"
129
+ | "RFT_OBSERVATION"
126
130
  type: TYPE
127
131
  ?value: object
128
132
  | STRING
@@ -23,3 +23,12 @@ class QueueSystem(StrEnum):
23
23
  @staticmethod
24
24
  def ert_config_case() -> str:
25
25
  return "upper"
26
+
27
+ @property
28
+ def formatted_name(self) -> str:
29
+ return {
30
+ self.LSF: "LSF",
31
+ self.LOCAL: "Local",
32
+ self.TORQUE: "Torque/OpenPBS",
33
+ self.SLURM: "Slurm",
34
+ }[self]
@@ -9,6 +9,7 @@ class SchemaItemType(StrEnum):
9
9
  POSITIVE_FLOAT = "POSITIVE_FLOAT"
10
10
  PATH = "PATH"
11
11
  EXISTING_PATH = "EXISTING_PATH"
12
+ EXISTING_FILE = "EXISTING_FILE"
12
13
  # EXISTING_PATH_INLINE is a directive to the
13
14
  # schema validation to inline the contents of
14
15
  # the file.
@@ -51,7 +51,6 @@ class QueueOptions(
51
51
  BaseModelWithContextSupport,
52
52
  validate_assignment=True,
53
53
  extra="forbid",
54
- use_enum_values=True,
55
54
  validate_default=True,
56
55
  ):
57
56
  name: QueueSystem
@@ -478,11 +477,11 @@ class QueueConfig(BaseModelWithContextSupport):
478
477
 
479
478
  # validate all queue options for the unselected queues
480
479
  # and show a warning
481
- for _queue_system in QueueSystem:
482
- if _queue_system != selected_queue_system:
480
+ for queue_system in QueueSystem:
481
+ if queue_system != selected_queue_system:
483
482
  _ = QueueOptions.create_queue_options(
484
- _queue_system,
485
- grouped_queue_options[_queue_system],
483
+ queue_system,
484
+ grouped_queue_options[queue_system],
486
485
  False,
487
486
  )
488
487
 
@@ -31,7 +31,6 @@ class ResponseMetadata(BaseModel):
31
31
 
32
32
  class ResponseConfig(BaseModel):
33
33
  type: str
34
- name: str
35
34
  input_files: list[str] = Field(default_factory=list)
36
35
  keys: list[str] = Field(default_factory=list)
37
36
  has_finalized_keys: bool = False
@@ -60,13 +59,6 @@ class ResponseConfig(BaseModel):
60
59
  def expected_input_files(self) -> list[str]:
61
60
  """Returns a list of filenames expected to be produced by the forward model"""
62
61
 
63
- @property
64
- @abstractmethod
65
- def response_type(self) -> str:
66
- """Label to identify what kind of response it is.
67
- Must not overlap with that of other response configs."""
68
- ...
69
-
70
62
  @property
71
63
  @abstractmethod
72
64
  def primary_key(self) -> list[str]:
@@ -0,0 +1,275 @@
1
+ from __future__ import annotations
2
+
3
+ import datetime
4
+ import fnmatch
5
+ import logging
6
+ import os
7
+ import re
8
+ from collections import defaultdict
9
+ from typing import IO, Any, Literal
10
+
11
+ import numpy as np
12
+ import numpy.typing as npt
13
+ import polars as pl
14
+ from pydantic import Field
15
+ from resfo_utilities import CornerpointGrid, InvalidRFTError, RFTReader
16
+
17
+ from ert.substitutions import substitute_runpath_name
18
+
19
+ from .parsing import ConfigDict, ConfigKeys, ConfigValidationError, ConfigWarning
20
+ from .response_config import InvalidResponseFile, ResponseConfig, ResponseMetadata
21
+ from .responses_index import responses_index
22
+
23
+ logger = logging.getLogger(__name__)
24
+
25
+
26
+ class RFTConfig(ResponseConfig):
27
+ type: Literal["rft"] = "rft"
28
+ name: str = "rft"
29
+ has_finalized_keys: bool = False
30
+ data_to_read: dict[str, dict[str, list[str]]] = Field(default_factory=dict)
31
+ locations: list[tuple[float, float, float]] = Field(default_factory=list)
32
+
33
+ @property
34
+ def metadata(self) -> list[ResponseMetadata]:
35
+ return [
36
+ ResponseMetadata(
37
+ response_type=self.name,
38
+ response_key=response_key,
39
+ filter_on=None,
40
+ finalized=self.has_finalized_keys,
41
+ )
42
+ for response_key in self.keys
43
+ ]
44
+
45
+ @property
46
+ def expected_input_files(self) -> list[str]:
47
+ base = self.input_files[0]
48
+ if base.upper().endswith(".DATA"):
49
+ # For backwards compatibility, it is
50
+ # allowed to give REFCASE and ECLBASE both
51
+ # with and without .DATA extensions
52
+ base = base[:-5]
53
+
54
+ return [f"{base}.RFT"]
55
+
56
+ def _find_indices(
57
+ self, egrid_file: str | os.PathLike[str] | IO[Any]
58
+ ) -> dict[tuple[int, int, int] | None, set[tuple[float, float, float]]]:
59
+ indices = defaultdict(set)
60
+ for a, b in zip(
61
+ CornerpointGrid.read_egrid(egrid_file).find_cell_containing_point(
62
+ self.locations
63
+ ),
64
+ self.locations,
65
+ strict=True,
66
+ ):
67
+ indices[a].add(b)
68
+ return indices
69
+
70
+ def read_from_file(self, run_path: str, iens: int, iter_: int) -> pl.DataFrame:
71
+ filename = substitute_runpath_name(self.input_files[0], iens, iter_)
72
+ if filename.upper().endswith(".DATA"):
73
+ # For backwards compatibility, it is
74
+ # allowed to give REFCASE and ECLBASE both
75
+ # with and without .DATA extensions
76
+ filename = filename[:-5]
77
+ grid_filename = f"{run_path}/{filename}"
78
+ if grid_filename.upper().endswith(".RFT"):
79
+ grid_filename = grid_filename[:-4]
80
+ grid_filename += ".EGRID"
81
+ fetched: dict[tuple[str, datetime.date], dict[str, npt.NDArray[np.float32]]] = (
82
+ defaultdict(dict)
83
+ )
84
+ indices = {}
85
+ if self.locations:
86
+ indices = self._find_indices(grid_filename)
87
+ if None in indices:
88
+ raise InvalidResponseFile(
89
+ f"Did not find grid coordinate for location(s) {indices[None]}"
90
+ )
91
+ # This is a somewhat complicated optimization in order to
92
+ # support wildcards in well names, dates and properties
93
+ # A python for loop is too slow so we use a compiled regex
94
+ # instead
95
+ if not self.data_to_read:
96
+ return pl.DataFrame(
97
+ {
98
+ "response_key": [],
99
+ "time": [],
100
+ "depth": [],
101
+ "values": [],
102
+ "location": [],
103
+ }
104
+ )
105
+
106
+ sep = "\x31"
107
+
108
+ def _translate(pat: str) -> str:
109
+ """Translates fnmatch pattern to match anywhere"""
110
+ return fnmatch.translate(pat).replace("\\z", "").replace("\\Z", "")
111
+
112
+ def _props_matcher(props: list[str]) -> str:
113
+ """Regex for matching given props _and_ DEPTH"""
114
+ pattern = f"({'|'.join(_translate(p) for p in props)})"
115
+ if re.fullmatch(pattern, "DEPTH") is None:
116
+ return f"({'|'.join(_translate(p) for p in [*props, 'DEPTH'])})"
117
+ else:
118
+ return pattern
119
+
120
+ matcher = re.compile(
121
+ "|".join(
122
+ "("
123
+ + re.escape(sep).join(
124
+ (
125
+ _translate(well),
126
+ _translate(time),
127
+ _props_matcher(props),
128
+ )
129
+ )
130
+ + ")"
131
+ for well, inner_dict in self.data_to_read.items()
132
+ for time, props in inner_dict.items()
133
+ )
134
+ )
135
+ locations = {}
136
+ try:
137
+ with RFTReader.open(f"{run_path}/{filename}") as rft:
138
+ for entry in rft:
139
+ date = entry.date
140
+ well = entry.well
141
+ for rft_property in entry:
142
+ key = f"{well}{sep}{date}{sep}{rft_property}"
143
+ if matcher.fullmatch(key) is not None:
144
+ values = entry[rft_property]
145
+ locations[well, date] = [
146
+ list(
147
+ indices.get(
148
+ (c[0] - 1, c[1] - 1, c[2] - 1),
149
+ [(None, None, None)],
150
+ )
151
+ )
152
+ for c in entry.connections
153
+ ]
154
+ if np.isdtype(values.dtype, np.float32):
155
+ fetched[well, date][rft_property] = values
156
+ except (FileNotFoundError, InvalidRFTError) as err:
157
+ raise InvalidResponseFile(
158
+ f"Could not read RFT from {run_path}/{filename}: {err}"
159
+ ) from err
160
+
161
+ if not fetched:
162
+ return pl.DataFrame(
163
+ {
164
+ "response_key": [],
165
+ "time": [],
166
+ "depth": [],
167
+ "values": [],
168
+ "location": [],
169
+ }
170
+ )
171
+
172
+ try:
173
+ df = pl.concat(
174
+ [
175
+ pl.DataFrame(
176
+ {
177
+ "response_key": [f"{well}:{time.isoformat()}:{prop}"],
178
+ "time": [time],
179
+ "depth": [fetched[well, time]["DEPTH"]],
180
+ "values": [vals],
181
+ "location": pl.Series(
182
+ [
183
+ locations.get(
184
+ (well, time), [(None, None, None)] * len(vals)
185
+ )
186
+ ],
187
+ dtype=pl.Array(
188
+ pl.List(pl.Array(pl.Float32, 3)), len(vals)
189
+ ),
190
+ ),
191
+ }
192
+ )
193
+ .explode("depth", "values", "location")
194
+ .explode("location")
195
+ for (well, time), inner_dict in fetched.items()
196
+ for prop, vals in inner_dict.items()
197
+ if prop != "DEPTH"
198
+ ]
199
+ )
200
+ except KeyError as err:
201
+ raise InvalidResponseFile(
202
+ f"Could not find {err.args[0]} in RFTFile {filename}"
203
+ ) from err
204
+
205
+ return df.with_columns(
206
+ east=pl.col("location").arr.get(0),
207
+ north=pl.col("location").arr.get(1),
208
+ tvd=pl.col("location").arr.get(2),
209
+ ).drop("location")
210
+
211
+ @property
212
+ def response_type(self) -> str:
213
+ return "rft"
214
+
215
+ @property
216
+ def primary_key(self) -> list[str]:
217
+ return ["east", "north", "tvd"]
218
+
219
+ @classmethod
220
+ def from_config_dict(cls, config_dict: ConfigDict) -> RFTConfig | None:
221
+ if rfts := config_dict.get(ConfigKeys.RFT, []):
222
+ eclbase: str | None = config_dict.get("ECLBASE")
223
+ if eclbase is None:
224
+ raise ConfigValidationError(
225
+ "In order to use rft responses, ECLBASE has to be set."
226
+ )
227
+ fm_steps = config_dict.get(ConfigKeys.FORWARD_MODEL, [])
228
+ names = [fm_step[0] for fm_step in fm_steps]
229
+ simulation_step_exists = any(
230
+ any(sim in name.lower() for sim in ["eclipse", "flow"])
231
+ for name in names
232
+ )
233
+ if not simulation_step_exists:
234
+ ConfigWarning.warn(
235
+ "Config contains a RFT key but no forward model "
236
+ "step known to generate rft files"
237
+ )
238
+
239
+ declared_data: dict[str, dict[datetime.date, list[str]]] = defaultdict(
240
+ lambda: defaultdict(list)
241
+ )
242
+ for rft in rfts:
243
+ for expected in ["WELL", "DATE", "PROPERTIES"]:
244
+ if expected not in rft:
245
+ raise ConfigValidationError.with_context(
246
+ f"For RFT keyword {expected} must be specified.", rft
247
+ )
248
+ well = rft["WELL"]
249
+ props = [p.strip() for p in rft["PROPERTIES"].split(",")]
250
+ time = rft["DATE"]
251
+ declared_data[well][time] += props
252
+ data_to_read = {
253
+ well: {time: sorted(set(p)) for time, p in inner_dict.items()}
254
+ for well, inner_dict in declared_data.items()
255
+ }
256
+ keys = sorted(
257
+ {
258
+ f"{well}:{time}:{p}"
259
+ for well, inner_dict in declared_data.items()
260
+ for time, props in inner_dict.items()
261
+ for p in props
262
+ }
263
+ )
264
+
265
+ return cls(
266
+ name="rft",
267
+ input_files=[eclbase.replace("%d", "<IENS>")],
268
+ keys=keys,
269
+ data_to_read=data_to_read,
270
+ )
271
+
272
+ return None
273
+
274
+
275
+ responses_index.add_response_type(RFTConfig)
@@ -19,14 +19,13 @@ logger = logging.getLogger(__name__)
19
19
 
20
20
  class SummaryConfig(ResponseConfig):
21
21
  type: Literal["summary"] = "summary"
22
- name: str = "summary"
23
22
  has_finalized_keys: bool = False
24
23
 
25
24
  @property
26
25
  def metadata(self) -> list[ResponseMetadata]:
27
26
  return [
28
27
  ResponseMetadata(
29
- response_type=self.name,
28
+ response_type=self.type,
30
29
  response_key=response_key,
31
30
  filter_on=None,
32
31
  finalized=self.has_finalized_keys,
@@ -72,10 +71,6 @@ class SummaryConfig(ResponseConfig):
72
71
  df = df.sort(by=["time"])
73
72
  return df
74
73
 
75
- @property
76
- def response_type(self) -> str:
77
- return "summary"
78
-
79
74
  @property
80
75
  def primary_key(self) -> list[str]:
81
76
  return ["time"]
@@ -91,8 +86,8 @@ class SummaryConfig(ResponseConfig):
91
86
  fm_steps = config_dict.get(ConfigKeys.FORWARD_MODEL, [])
92
87
  names = [fm_step[0] for fm_step in fm_steps]
93
88
  simulation_step_exists = any(
94
- any(sim in _name.lower() for sim in ["eclipse", "flow"])
95
- for _name in names
89
+ any(sim in name.lower() for sim in ["eclipse", "flow"])
90
+ for name in names
96
91
  )
97
92
  if not simulation_step_exists:
98
93
  ConfigWarning.warn(