ert 18.0.9__py3-none-any.whl → 19.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (116) hide show
  1. _ert/forward_model_runner/client.py +6 -2
  2. ert/__main__.py +20 -6
  3. ert/cli/main.py +7 -3
  4. ert/config/__init__.py +3 -4
  5. ert/config/_create_observation_dataframes.py +85 -59
  6. ert/config/_get_num_cpu.py +1 -1
  7. ert/config/_observations.py +106 -31
  8. ert/config/distribution.py +1 -1
  9. ert/config/ensemble_config.py +3 -3
  10. ert/config/ert_config.py +50 -0
  11. ert/config/{ext_param_config.py → everest_control.py} +8 -12
  12. ert/config/everest_response.py +3 -5
  13. ert/config/field.py +76 -14
  14. ert/config/forward_model_step.py +12 -9
  15. ert/config/gen_data_config.py +3 -4
  16. ert/config/gen_kw_config.py +2 -12
  17. ert/config/parameter_config.py +1 -16
  18. ert/config/parsing/_option_dict.py +10 -2
  19. ert/config/parsing/config_keywords.py +1 -0
  20. ert/config/parsing/config_schema.py +8 -0
  21. ert/config/parsing/config_schema_deprecations.py +3 -3
  22. ert/config/parsing/config_schema_item.py +12 -3
  23. ert/config/parsing/context_values.py +3 -3
  24. ert/config/parsing/file_context_token.py +1 -1
  25. ert/config/parsing/observations_parser.py +12 -2
  26. ert/config/parsing/queue_system.py +9 -0
  27. ert/config/queue_config.py +0 -1
  28. ert/config/response_config.py +0 -1
  29. ert/config/rft_config.py +78 -33
  30. ert/config/summary_config.py +1 -2
  31. ert/config/surface_config.py +59 -16
  32. ert/dark_storage/common.py +1 -1
  33. ert/dark_storage/compute/misfits.py +4 -1
  34. ert/dark_storage/endpoints/compute/misfits.py +4 -2
  35. ert/dark_storage/endpoints/experiment_server.py +12 -9
  36. ert/dark_storage/endpoints/experiments.py +2 -2
  37. ert/dark_storage/endpoints/observations.py +14 -4
  38. ert/dark_storage/endpoints/parameters.py +2 -18
  39. ert/dark_storage/endpoints/responses.py +10 -5
  40. ert/dark_storage/json_schema/experiment.py +1 -1
  41. ert/data/_measured_data.py +6 -5
  42. ert/ensemble_evaluator/config.py +2 -1
  43. ert/field_utils/field_utils.py +1 -1
  44. ert/field_utils/roff_io.py +1 -1
  45. ert/gui/__init__.py +5 -2
  46. ert/gui/ertnotifier.py +1 -1
  47. ert/gui/ertwidgets/pathchooser.py +0 -3
  48. ert/gui/ertwidgets/suggestor/suggestor.py +63 -30
  49. ert/gui/main.py +27 -5
  50. ert/gui/main_window.py +0 -5
  51. ert/gui/simulation/experiment_panel.py +12 -3
  52. ert/gui/simulation/run_dialog.py +2 -16
  53. ert/gui/tools/manage_experiments/export_dialog.py +136 -0
  54. ert/gui/tools/manage_experiments/storage_info_widget.py +133 -28
  55. ert/gui/tools/plot/plot_api.py +24 -15
  56. ert/gui/tools/plot/plot_widget.py +19 -4
  57. ert/gui/tools/plot/plot_window.py +35 -18
  58. ert/gui/tools/plot/plottery/plots/__init__.py +2 -0
  59. ert/gui/tools/plot/plottery/plots/cesp.py +3 -1
  60. ert/gui/tools/plot/plottery/plots/distribution.py +6 -1
  61. ert/gui/tools/plot/plottery/plots/ensemble.py +3 -1
  62. ert/gui/tools/plot/plottery/plots/gaussian_kde.py +12 -2
  63. ert/gui/tools/plot/plottery/plots/histogram.py +3 -1
  64. ert/gui/tools/plot/plottery/plots/misfits.py +436 -0
  65. ert/gui/tools/plot/plottery/plots/observations.py +18 -4
  66. ert/gui/tools/plot/plottery/plots/statistics.py +3 -1
  67. ert/gui/tools/plot/plottery/plots/std_dev.py +3 -1
  68. ert/plugins/hook_implementations/workflows/csv_export.py +2 -3
  69. ert/plugins/plugin_manager.py +4 -0
  70. ert/resources/forward_models/run_reservoirsimulator.py +8 -3
  71. ert/run_models/_create_run_path.py +3 -3
  72. ert/run_models/everest_run_model.py +13 -11
  73. ert/run_models/initial_ensemble_run_model.py +2 -2
  74. ert/run_models/run_model.py +9 -0
  75. ert/services/_base_service.py +6 -5
  76. ert/services/ert_server.py +4 -4
  77. ert/shared/_doc_utils/__init__.py +4 -2
  78. ert/shared/net_utils.py +43 -18
  79. ert/shared/version.py +3 -3
  80. ert/storage/__init__.py +2 -0
  81. ert/storage/local_ensemble.py +25 -8
  82. ert/storage/local_experiment.py +2 -2
  83. ert/storage/local_storage.py +45 -25
  84. ert/storage/migration/to11.py +1 -1
  85. ert/storage/migration/to18.py +0 -1
  86. ert/storage/migration/to19.py +34 -0
  87. ert/storage/migration/to20.py +23 -0
  88. ert/storage/migration/to21.py +25 -0
  89. ert/storage/migration/to22.py +18 -0
  90. ert/storage/migration/to23.py +49 -0
  91. ert/workflow_runner.py +2 -1
  92. {ert-18.0.9.dist-info → ert-19.0.0.dist-info}/METADATA +1 -1
  93. {ert-18.0.9.dist-info → ert-19.0.0.dist-info}/RECORD +111 -109
  94. {ert-18.0.9.dist-info → ert-19.0.0.dist-info}/WHEEL +1 -1
  95. everest/bin/everlint_script.py +0 -2
  96. everest/bin/utils.py +2 -1
  97. everest/bin/visualization_script.py +4 -11
  98. everest/config/control_config.py +4 -4
  99. everest/config/control_variable_config.py +2 -2
  100. everest/config/everest_config.py +9 -0
  101. everest/config/utils.py +2 -2
  102. everest/config/validation_utils.py +7 -1
  103. everest/config_file_loader.py +0 -2
  104. everest/detached/client.py +3 -3
  105. everest/everest_storage.py +0 -2
  106. everest/gui/everest_client.py +2 -2
  107. everest/optimizer/everest2ropt.py +4 -4
  108. everest/optimizer/opt_model_transforms.py +2 -2
  109. ert/config/violations.py +0 -0
  110. ert/gui/tools/export/__init__.py +0 -3
  111. ert/gui/tools/export/export_panel.py +0 -83
  112. ert/gui/tools/export/export_tool.py +0 -69
  113. ert/gui/tools/export/exporter.py +0 -36
  114. {ert-18.0.9.dist-info → ert-19.0.0.dist-info}/entry_points.txt +0 -0
  115. {ert-18.0.9.dist-info → ert-19.0.0.dist-info}/licenses/COPYING +0 -0
  116. {ert-18.0.9.dist-info → ert-19.0.0.dist-info}/top_level.txt +0 -0
ert/config/ert_config.py CHANGED
@@ -25,6 +25,7 @@ from ._design_matrix_validator import DesignMatrixValidator
25
25
  from ._observations import (
26
26
  HistoryObservation,
27
27
  Observation,
28
+ RFTObservation,
28
29
  SummaryObservation,
29
30
  make_observations,
30
31
  )
@@ -62,6 +63,7 @@ from .parsing import (
62
63
  from .parsing.observations_parser import ObservationDict
63
64
  from .queue_config import KnownQueueOptions, QueueConfig
64
65
  from .refcase import Refcase
66
+ from .rft_config import RFTConfig
65
67
  from .workflow import Workflow
66
68
  from .workflow_fixtures import fixtures_per_hook
67
69
  from .workflow_job import (
@@ -715,6 +717,7 @@ class ErtConfig(BaseModel):
715
717
  QUEUE_OPTIONS: ClassVar[KnownQueueOptions | None] = None
716
718
  RESERVED_KEYWORDS: ClassVar[list[str]] = RESERVED_KEYWORDS
717
719
  ENV_VARS: ClassVar[dict[str, str]] = {}
720
+ PRIORITIZE_PRIVATE_IP_ADDRESS: ClassVar[bool] = False
718
721
 
719
722
  substitutions: dict[str, str] = Field(default_factory=dict)
720
723
  ensemble_config: EnsembleConfig = Field(default_factory=EnsembleConfig)
@@ -729,6 +732,7 @@ class ErtConfig(BaseModel):
729
732
  default_factory=lambda: defaultdict(lambda: cast(list[Workflow], []))
730
733
  )
731
734
  runpath_file: Path = Path(DEFAULT_RUNPATH_FILE)
735
+ prioritize_private_ip_address: bool = False
732
736
 
733
737
  ert_templates: list[tuple[str, str]] = Field(default_factory=list)
734
738
 
@@ -745,6 +749,18 @@ class ErtConfig(BaseModel):
745
749
  @property
746
750
  def observations(self) -> dict[str, pl.DataFrame]:
747
751
  if self._observations is None:
752
+ has_rft_observations = any(
753
+ isinstance(o, RFTObservation) for o in self.observation_declarations
754
+ )
755
+ if (
756
+ has_rft_observations
757
+ and "rft" not in self.ensemble_config.response_configs
758
+ ):
759
+ self.ensemble_config.response_configs["rft"] = RFTConfig(
760
+ input_files=[self.runpath_config.eclbase_format_string],
761
+ data_to_read={},
762
+ locations=[],
763
+ )
748
764
  computed = create_observation_dataframes(
749
765
  self.observation_declarations,
750
766
  self.refcase,
@@ -752,6 +768,10 @@ class ErtConfig(BaseModel):
752
768
  GenDataConfig | None,
753
769
  self.ensemble_config.response_configs.get("gen_data", None),
754
770
  ),
771
+ cast(
772
+ RFTConfig | None,
773
+ self.ensemble_config.response_configs.get("rft", None),
774
+ ),
755
775
  self.time_map,
756
776
  self.history_source,
757
777
  )
@@ -848,6 +868,9 @@ class ErtConfig(BaseModel):
848
868
  )
849
869
  ENV_VARS = dict(runtime_plugins.environment_variables)
850
870
  QUEUE_OPTIONS = runtime_plugins.queue_options
871
+ PRIORITIZE_PRIVATE_IP_ADDRESS = (
872
+ runtime_plugins.prioritize_private_ip_address
873
+ )
851
874
 
852
875
  ErtConfigWithPlugins.model_rebuild()
853
876
  assert issubclass(ErtConfigWithPlugins, ErtConfig)
@@ -1100,6 +1123,19 @@ class ErtConfig(BaseModel):
1100
1123
  user_configured_.add(key)
1101
1124
  env_vars[key] = substituter.substitute(val)
1102
1125
 
1126
+ prioritize_private_ip_address: bool = cls.PRIORITIZE_PRIVATE_IP_ADDRESS
1127
+ if ConfigKeys.PRIORITIZE_PRIVATE_IP_ADDRESS in config_dict:
1128
+ user_prioritize_private_ip_address = bool(
1129
+ config_dict[ConfigKeys.PRIORITIZE_PRIVATE_IP_ADDRESS]
1130
+ )
1131
+ if prioritize_private_ip_address != user_prioritize_private_ip_address:
1132
+ logger.warning(
1133
+ "PRIORITIZE_PRIVATE_IP_ADDRESS was overwritten by user: "
1134
+ f"{prioritize_private_ip_address} -> "
1135
+ f"{user_prioritize_private_ip_address}"
1136
+ )
1137
+ prioritize_private_ip_address = user_prioritize_private_ip_address
1138
+
1103
1139
  try:
1104
1140
  refcase = Refcase.from_config_dict(config_dict)
1105
1141
  cls_config = cls(
@@ -1126,11 +1162,21 @@ class ErtConfig(BaseModel):
1126
1162
  time_map=time_map,
1127
1163
  history_source=history_source,
1128
1164
  refcase=refcase,
1165
+ prioritize_private_ip_address=prioritize_private_ip_address,
1129
1166
  )
1130
1167
 
1131
1168
  # The observations are created here because create_observation_dataframes
1132
1169
  # will perform additonal validation which needs the context in
1133
1170
  # obs_configs which is stripped by pydantic
1171
+ has_rft_observations = any(
1172
+ isinstance(o, RFTObservation) for o in obs_configs
1173
+ )
1174
+ if has_rft_observations and "rft" not in ensemble_config.response_configs:
1175
+ ensemble_config.response_configs["rft"] = RFTConfig(
1176
+ input_files=[eclbase],
1177
+ data_to_read={},
1178
+ locations=[],
1179
+ )
1134
1180
  cls_config._observations = create_observation_dataframes(
1135
1181
  obs_configs,
1136
1182
  refcase,
@@ -1138,6 +1184,10 @@ class ErtConfig(BaseModel):
1138
1184
  GenDataConfig | None,
1139
1185
  ensemble_config.response_configs.get("gen_data", None),
1140
1186
  ),
1187
+ cast(
1188
+ RFTConfig | None,
1189
+ ensemble_config.response_configs.get("rft", None),
1190
+ ),
1141
1191
  time_map,
1142
1192
  history_source,
1143
1193
  )
@@ -17,7 +17,7 @@ from ropt.workflow import find_sampler_plugin
17
17
 
18
18
  from ert.substitutions import substitute_runpath_name
19
19
 
20
- from .parameter_config import ParameterConfig, ParameterMetadata
20
+ from .parameter_config import ParameterConfig
21
21
 
22
22
  if TYPE_CHECKING:
23
23
  import numpy.typing as npt
@@ -106,7 +106,7 @@ class SamplerConfig(BaseModel):
106
106
  # from "everest" here as per old behavior.
107
107
  # Can consider logging this as if from ERT,
108
108
  # which is valid if we store SamplerConfig as part of
109
- # ExtParam configs.
109
+ # EverestControl configs.
110
110
  logging.getLogger("everest").warning(message)
111
111
 
112
112
  # Update the default for backends that are not scipy:
@@ -142,22 +142,14 @@ class SamplerConfig(BaseModel):
142
142
  return self
143
143
 
144
144
 
145
- class ExtParamConfig(ParameterConfig):
146
- """Create an ExtParamConfig for @key with the given @input_keys
145
+ class EverestControl(ParameterConfig):
146
+ """Create an EverestControl for @key with the given @input_keys
147
147
 
148
148
  @input_keys can be either a list of keys as strings or a dict with
149
149
  keys as strings and a list of suffixes for each key.
150
150
  If a list of strings is given, the order is preserved.
151
151
  """
152
152
 
153
- @property
154
- def parameter_keys(self) -> list[str]:
155
- return self.input_keys
156
-
157
- @property
158
- def metadata(self) -> list[ParameterMetadata]:
159
- return []
160
-
161
153
  type: Literal["everest_parameters"] = "everest_parameters"
162
154
  input_keys: list[str] = field(default_factory=list)
163
155
  forward_init: bool = False
@@ -179,6 +171,10 @@ class ExtParamConfig(ParameterConfig):
179
171
  # "dotdash" notation is removed for everest controls via everest config.
180
172
  input_keys_dotdash: list[str] = field(default_factory=list)
181
173
 
174
+ @property
175
+ def parameter_keys(self) -> list[str]:
176
+ return self.input_keys
177
+
182
178
  def read_from_runpath(
183
179
  self, run_path: Path, real_nr: int, iteration: int
184
180
  ) -> xr.Dataset:
@@ -25,7 +25,7 @@ class EverestResponse(ResponseConfig):
25
25
  def metadata(self) -> list[ResponseMetadata]:
26
26
  return [
27
27
  ResponseMetadata(
28
- response_type=self.name,
28
+ response_type=self.type,
29
29
  response_key=response_key,
30
30
  finalized=self.has_finalized_keys,
31
31
  filter_on=None,
@@ -76,12 +76,12 @@ class EverestResponse(ResponseConfig):
76
76
  if all(isinstance(err, FileNotFoundError) for err in errors):
77
77
  raise FileNotFoundError(
78
78
  "Could not find one or more files/directories while reading "
79
- f"{self.name}: {','.join([str(err) for err in errors])}"
79
+ f"{self.type}: {','.join([str(err) for err in errors])}"
80
80
  )
81
81
  else:
82
82
  raise InvalidResponseFile(
83
83
  "Error reading "
84
- f"{self.name}, errors: {','.join([str(err) for err in errors])}"
84
+ f"{self.type}, errors: {','.join([str(err) for err in errors])}"
85
85
  )
86
86
 
87
87
  combined = pl.concat(datasets_per_name)
@@ -90,7 +90,6 @@ class EverestResponse(ResponseConfig):
90
90
 
91
91
  class EverestConstraintsConfig(EverestResponse):
92
92
  type: Literal["everest_constraints"] = "everest_constraints"
93
- name: str = "everest_constraints"
94
93
  targets: list[float | None]
95
94
  upper_bounds: list[float]
96
95
  lower_bounds: list[float]
@@ -101,7 +100,6 @@ responses_index.add_response_type(EverestConstraintsConfig)
101
100
 
102
101
  class EverestObjectivesConfig(EverestResponse):
103
102
  type: Literal["everest_objectives"] = "everest_objectives"
104
- name: str = "everest_objectives"
105
103
  weights: list[float | None]
106
104
  objective_types: list[Literal["mean", "stddev"]]
107
105
 
ert/config/field.py CHANGED
@@ -17,16 +17,19 @@ from ert.field_utils import (
17
17
  ErtboxParameters,
18
18
  FieldFileFormat,
19
19
  Shape,
20
+ calc_rho_for_2d_grid_layer,
20
21
  calculate_ertbox_parameters,
21
22
  get_shape,
22
23
  read_field,
23
24
  save_field,
25
+ transform_local_ellipse_angle_to_local_coords,
26
+ transform_positions_to_local_field_coordinates,
24
27
  )
25
28
  from ert.substitutions import substitute_runpath_name
26
29
  from ert.utils import log_duration
27
30
 
28
31
  from ._str_to_bool import str_to_bool
29
- from .parameter_config import ParameterConfig, ParameterMetadata
32
+ from .parameter_config import ParameterConfig
30
33
  from .parsing import ConfigValidationError, ConfigWarning
31
34
 
32
35
  if TYPE_CHECKING:
@@ -67,6 +70,7 @@ def create_flattened_cube_graph(px: int, py: int, pz: int) -> nx.Graph[int]:
67
70
 
68
71
  class Field(ParameterConfig):
69
72
  type: Literal["field"] = "field"
73
+ dimensionality: Literal[3] = 3
70
74
  ertbox_params: ErtboxParameters
71
75
  file_format: FieldFileFormat
72
76
  output_transformation: str | None
@@ -85,17 +89,6 @@ class Field(ParameterConfig):
85
89
  def parameter_keys(self) -> list[str]:
86
90
  return []
87
91
 
88
- @property
89
- def metadata(self) -> list[ParameterMetadata]:
90
- return [
91
- ParameterMetadata(
92
- key=self.name,
93
- transformation=self.output_transformation,
94
- dimensionality=3,
95
- userdata={"data_origin": "FIELD", "ertbox_params": self.ertbox_params},
96
- )
97
- ]
98
-
99
92
  @classmethod
100
93
  def from_config_list(
101
94
  cls,
@@ -277,7 +270,7 @@ class Field(ParameterConfig):
277
270
  ensemble_size = len(ds.realizations)
278
271
  da = xr.DataArray(
279
272
  [
280
- np.ma.MaskedArray(data=d).compressed() # type: ignore
273
+ np.ma.MaskedArray(data=d).compressed()
281
274
  for d in ds["values"].values.reshape(ensemble_size, -1)
282
275
  ]
283
276
  )
@@ -291,7 +284,7 @@ class Field(ParameterConfig):
291
284
  def _transform_data(
292
285
  self, data_array: xr.DataArray
293
286
  ) -> np.ma.MaskedArray[Any, np.dtype[np.float32]]:
294
- return np.ma.MaskedArray( # type: ignore
287
+ return np.ma.MaskedArray(
295
288
  _field_truncate(
296
289
  field_transform(
297
290
  data_array,
@@ -325,6 +318,75 @@ class Field(ParameterConfig):
325
318
  def nz(self) -> int:
326
319
  return self.ertbox_params.nz
327
320
 
321
+ def calc_rho_for_2d_grid_layer(
322
+ self,
323
+ obs_xpos: npt.NDArray[np.float64],
324
+ obs_ypos: npt.NDArray[np.float64],
325
+ obs_main_range: npt.NDArray[np.float64],
326
+ obs_perp_range: npt.NDArray[np.float64],
327
+ obs_anisotropy_angle: npt.NDArray[np.float64],
328
+ right_handed_grid_indexing: bool = True,
329
+ ) -> npt.NDArray[np.float64]:
330
+ """Function to calculate scaling values to be used in the RHO matrix
331
+ for distance-based localization.
332
+
333
+ Args:
334
+ obs_xpos: x-coordinates in global coordinates of observations
335
+ obs_ypos: y-coordinates in global coordinates of observations
336
+ obs_main_range: Size of influence ellipse main principal direction.
337
+ obs_perp_range: Size of influence ellipse second principal direction.
338
+ obs_anisotropy_angle: Rotation angle anticlock wise of main principal
339
+ direction of influence ellipse relative to global coordinate
340
+ system's x-axis.
341
+ right_handed_grid_indexing: When this is True the field parameters
342
+ grid index order counts J-index down from ny-1 to 0.
343
+ If the value is False, the grid index order is to count J index
344
+ from 0 to ny-1. As standard for 3D field parameters,
345
+ the grid index order follows the right_handed grid indexing.
346
+
347
+ Returns:
348
+ Scaling values (elements of the RHO matrix) as a numpy array
349
+ of shape=(nx,ny,nobservations)
350
+
351
+ """
352
+ # Can only be used if ertbox coordinate system is defined
353
+ assert self.ertbox_params.xinc is not None, (
354
+ "Parameter for grid resolution must be defined"
355
+ )
356
+ assert self.ertbox_params.yinc is not None, (
357
+ "Parameter for grid resolution must be defined"
358
+ )
359
+ assert self.ertbox_params.origin is not None, (
360
+ "Parameter for grid origin must be defined"
361
+ )
362
+ assert self.ertbox_params.rotation_angle is not None, (
363
+ "Parameter for grid rotation must be defined"
364
+ )
365
+ # Transform positions of observations into local coordinates
366
+ xpos, ypos = transform_positions_to_local_field_coordinates(
367
+ self.ertbox_params.origin,
368
+ self.ertbox_params.rotation_angle,
369
+ obs_xpos,
370
+ obs_ypos,
371
+ )
372
+ # Transform localization ellipse orientation to local coordinates
373
+ ellipse_rotation = transform_local_ellipse_angle_to_local_coords(
374
+ self.ertbox_params.rotation_angle, obs_anisotropy_angle
375
+ )
376
+
377
+ return calc_rho_for_2d_grid_layer(
378
+ self.ertbox_params.nx,
379
+ self.ertbox_params.ny,
380
+ self.ertbox_params.xinc,
381
+ self.ertbox_params.yinc,
382
+ xpos,
383
+ ypos,
384
+ obs_main_range,
385
+ obs_perp_range,
386
+ ellipse_rotation,
387
+ right_handed_grid_indexing=right_handed_grid_indexing,
388
+ )
389
+
328
390
 
329
391
  TRANSFORM_FUNCTIONS: Final[dict[str, Callable[[Any], Any]]] = {
330
392
  "LN": np.log,
@@ -101,15 +101,18 @@ class ForwardModelStepDocumentation(BaseModel):
101
101
  source_function_name: str = Field(default="ert")
102
102
  description: str = Field(default="No description")
103
103
  examples: str = Field(default="No examples")
104
- category: (
105
- Literal[
106
- "utility.file_system",
107
- "simulators.reservoir",
108
- "modelling.reservoir",
109
- "utility.templating",
110
- ]
111
- | str
112
- ) = Field(default="Uncategorized")
104
+ category: Annotated[
105
+ str,
106
+ Field(
107
+ default="Uncategorized",
108
+ examples=[
109
+ "utility.file_system",
110
+ "simulators.reservoir",
111
+ "modelling.reservoir",
112
+ "utility.templating",
113
+ ],
114
+ ),
115
+ ]
113
116
 
114
117
 
115
118
  class ForwardModelStep(BaseModelWithContextSupport):
@@ -21,7 +21,6 @@ from .responses_index import responses_index
21
21
 
22
22
  class GenDataConfig(ResponseConfig):
23
23
  type: Literal["gen_data"] = "gen_data"
24
- name: str = "gen_data"
25
24
  report_steps_list: list[list[int] | None] = Field(default_factory=list)
26
25
  has_finalized_keys: bool = True
27
26
 
@@ -29,7 +28,7 @@ class GenDataConfig(ResponseConfig):
29
28
  def metadata(self) -> list[ResponseMetadata]:
30
29
  return [
31
30
  ResponseMetadata(
32
- response_type=self.name,
31
+ response_type=self.type,
33
32
  response_key=response_key,
34
33
  finalized=self.has_finalized_keys,
35
34
  filter_on={"report_step": report_steps}
@@ -198,12 +197,12 @@ class GenDataConfig(ResponseConfig):
198
197
  if all(isinstance(err, FileNotFoundError) for err in errors):
199
198
  raise FileNotFoundError(
200
199
  "Could not find one or more files/directories while reading "
201
- f"GEN_DATA {self.name}: {','.join([str(err) for err in errors])}"
200
+ f"GEN_DATA: {','.join([str(err) for err in errors])}"
202
201
  )
203
202
  else:
204
203
  raise InvalidResponseFile(
205
204
  "Error reading GEN_DATA "
206
- f"{self.name}, errors: {','.join([str(err) for err in errors])}"
205
+ f"{self.type}, errors: {','.join([str(err) for err in errors])}"
207
206
  )
208
207
 
209
208
  combined = pl.concat(datasets_per_name)
@@ -15,7 +15,7 @@ from typing_extensions import TypedDict
15
15
 
16
16
  from ._str_to_bool import str_to_bool
17
17
  from .distribution import DISTRIBUTION_CLASSES, DistributionSettings, get_distribution
18
- from .parameter_config import ParameterCardinality, ParameterConfig, ParameterMetadata
18
+ from .parameter_config import ParameterCardinality, ParameterConfig
19
19
  from .parsing import ConfigValidationError, ConfigWarning
20
20
 
21
21
  if TYPE_CHECKING:
@@ -53,6 +53,7 @@ class DataSource(StrEnum):
53
53
 
54
54
  class GenKwConfig(ParameterConfig):
55
55
  type: Literal["gen_kw"] = "gen_kw"
56
+ dimensionality: Literal[1] = 1
56
57
  distribution: DistributionSettings
57
58
  forward_init: bool = False
58
59
  update: bool = True
@@ -73,17 +74,6 @@ class GenKwConfig(ParameterConfig):
73
74
  def cardinality(self) -> ParameterCardinality:
74
75
  return ParameterCardinality.multiple_configs_per_ensemble_dataset
75
76
 
76
- @property
77
- def metadata(self) -> list[ParameterMetadata]:
78
- return [
79
- ParameterMetadata(
80
- key=f"{self.group}:{self.name}",
81
- transformation=self.distribution.name.upper(),
82
- dimensionality=1,
83
- userdata={"data_origin": "GEN_KW"},
84
- )
85
- ]
86
-
87
77
  @classmethod
88
78
  def templates_from_config(
89
79
  cls, gen_kw: list[str | dict[str, str]]
@@ -5,7 +5,7 @@ from collections.abc import Callable, Iterator
5
5
  from enum import StrEnum, auto
6
6
  from hashlib import sha256
7
7
  from pathlib import Path
8
- from typing import TYPE_CHECKING, Any, Literal
8
+ from typing import TYPE_CHECKING
9
9
 
10
10
  import networkx as nx
11
11
  import numpy as np
@@ -39,13 +39,6 @@ class ParameterCardinality(StrEnum):
39
39
  one_config_per_realization_dataset = auto()
40
40
 
41
41
 
42
- class ParameterMetadata(BaseModel):
43
- key: str
44
- transformation: str | None
45
- dimensionality: Literal[1, 2, 3] = 1
46
- userdata: dict[str, Any]
47
-
48
-
49
42
  class ParameterConfig(BaseModel):
50
43
  type: str
51
44
  name: str
@@ -59,14 +52,6 @@ class ParameterConfig(BaseModel):
59
52
  Returns a list of parameter keys within this parameter group
60
53
  """
61
54
 
62
- @property
63
- @abstractmethod
64
- def metadata(self) -> list[ParameterMetadata]:
65
- """
66
- Returns metadata describing this parameter
67
-
68
- """
69
-
70
55
  @abstractmethod
71
56
  def __len__(self) -> int:
72
57
  """Number of parameters"""
@@ -48,13 +48,21 @@ def option_dict(option_list: Sequence[str], offset: int) -> dict[str, str]:
48
48
  if len(option_pair.split(":")) == 2:
49
49
  key, val = option_pair.split(":")
50
50
  if val and key:
51
+ if key in result:
52
+ raise ConfigValidationError.with_context(
53
+ f"Option {key} occured multiple times.", option_pair
54
+ )
51
55
  result[key] = val
52
56
  else:
53
57
  raise ConfigValidationError.with_context(
54
- f"Invalid argument {option_pair!r}", option_pair
58
+ "Option argument should be of the form 'key':'value', "
59
+ f"got {option_pair!r}",
60
+ option_pair,
55
61
  )
56
62
  else:
57
63
  raise ConfigValidationError.with_context(
58
- f"Invalid argument {option_pair!r}", option_pair
64
+ "Option argument should be of the form 'key':'value', "
65
+ f"got {option_pair!r}",
66
+ option_pair,
59
67
  )
60
68
  return result
@@ -56,6 +56,7 @@ class ConfigKeys(StrEnum):
56
56
  REALIZATION_MEMORY = "REALIZATION_MEMORY"
57
57
  SUBMIT_SLEEP = "SUBMIT_SLEEP"
58
58
  MAX_RUNNING = "MAX_RUNNING"
59
+ PRIORITIZE_PRIVATE_IP_ADDRESS = "PRIORITIZE_PRIVATE_IP_ADDRESS"
59
60
 
60
61
  def __repr__(self) -> str:
61
62
  return f"{self.value!r}"
@@ -126,6 +126,13 @@ def hook_workflow_keyword() -> SchemaItem:
126
126
  )
127
127
 
128
128
 
129
+ def prioritize_private_ip_address_keyword() -> SchemaItem:
130
+ return SchemaItem(
131
+ kw=ConfigKeys.PRIORITIZE_PRIVATE_IP_ADDRESS,
132
+ type_map=[SchemaItemType.BOOL],
133
+ )
134
+
135
+
129
136
  def set_env_keyword() -> SchemaItem:
130
137
  # You can set environment variables which will be applied to the run-time
131
138
  # environment.
@@ -349,6 +356,7 @@ def init_user_config_schema() -> ConfigSchemaDict:
349
356
  install_job_keyword(),
350
357
  install_job_directory_keyword(),
351
358
  hook_workflow_keyword(),
359
+ prioritize_private_ip_address_keyword(),
352
360
  ]:
353
361
  schema[item.kw] = item
354
362
  if item.kw in ConfigAliases:
@@ -39,9 +39,9 @@ deprecated_keywords_list = [
39
39
  keyword=kw,
40
40
  message=partial(
41
41
  lambda line, kw: f"Using {kw} with substitution strings "
42
- + "that are not of the form '<KEY>' is deprecated. "
43
- + f"Please change {line[0]} to "
44
- + f"<{line[0].replace('<', '').replace('>', '')}>",
42
+ "that are not of the form '<KEY>' is deprecated. "
43
+ f"Please change {line[0]} to "
44
+ f"<{line[0].replace('<', '').replace('>', '')}>",
45
45
  kw=kw,
46
46
  ),
47
47
  check=lambda line: not DeprecationInfo.is_angle_bracketed(str(line[0])),
@@ -37,25 +37,34 @@ class SchemaItem:
37
37
 
38
38
  # The minimum number of arguments
39
39
  argc_min: NonNegativeInt = 1
40
+
40
41
  # The maximum number of arguments: None means no upper limit
41
42
  argc_max: NonNegativeInt | None = 1
43
+
42
44
  # A list of types for the items. Set along with argc_minmax()
43
45
  type_map: list[SchemaItemType | EnumType | None] = Field(default_factory=list)
46
+
44
47
  # A list of item's which must also be set (if this item is set). (can be NULL)
45
48
  required_children: list[str] = Field(default_factory=list)
49
+
46
50
  # Information about the deprecation if deprecated
47
51
  deprecation_info: list[DeprecationInfo] = Field(default_factory=list)
48
- # if positive, arguments after this count will be concatenated with a " " between
52
+
53
+ # If positive, arguments after this count will be concatenated with a " " between
49
54
  join_after: PositiveInt | None = None
50
- # if positive, arguments after this count will be interpreted as options
55
+
56
+ # If positive, arguments after this count will be interpreted as options
51
57
  options_after: NonNegativeInt | Varies | None = None
52
- # if true, will accumulate many values set for key, otherwise each entry will
58
+
59
+ # If true, will accumulate many values set for key, otherwise each entry will
53
60
  # overwrite any previous value set
54
61
  multi_occurrence: bool = False
62
+
55
63
  # Only applies to SchemaItemType.EXISTING_PATH_INLINE where
56
64
  # the contents is then parsed
57
65
  parser: Callable[[str, str], Any] = lambda x, y: y
58
66
  expand_envvar: bool = True
67
+
59
68
  # Index of tokens to do substitution from until end
60
69
  substitute_from: NonNegativeInt = 1
61
70
  required_set: bool = False
@@ -1,7 +1,7 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  from json import JSONEncoder
4
- from typing import Any, TypeVar, no_type_check
4
+ from typing import Any, Self, TypeVar, no_type_check
5
5
 
6
6
  from .file_context_token import FileContextToken
7
7
 
@@ -37,7 +37,7 @@ class ContextBool:
37
37
 
38
38
 
39
39
  class ContextInt(int):
40
- def __new__(cls, val: int, token: FileContextToken) -> ContextInt:
40
+ def __new__(cls, val: int, token: FileContextToken) -> Self:
41
41
  obj = super().__new__(cls, val)
42
42
  obj.token = token
43
43
  return obj
@@ -50,7 +50,7 @@ class ContextInt(int):
50
50
 
51
51
 
52
52
  class ContextFloat(float):
53
- def __new__(cls, val: float, token: FileContextToken) -> ContextFloat:
53
+ def __new__(cls, val: float, token: FileContextToken) -> Self:
54
54
  obj = super().__new__(cls, val)
55
55
  obj.token = token
56
56
  return obj
@@ -12,7 +12,7 @@ class FileContextToken(Token):
12
12
 
13
13
  filename: str
14
14
 
15
- def __new__(cls, token: Token, filename: str) -> FileContextToken:
15
+ def __new__(cls, token: Token, filename: str) -> FileContextToken: # noqa: PYI034
16
16
  inst = super().__new__(
17
17
  cls,
18
18
  token.type,