ert 19.0.1__py3-none-any.whl → 20.0.0b1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (87) hide show
  1. ert/__main__.py +94 -63
  2. ert/analysis/_es_update.py +11 -14
  3. ert/cli/main.py +1 -1
  4. ert/config/__init__.py +3 -2
  5. ert/config/_create_observation_dataframes.py +52 -375
  6. ert/config/_observations.py +527 -200
  7. ert/config/_read_summary.py +4 -5
  8. ert/config/ert_config.py +52 -117
  9. ert/config/everest_control.py +40 -39
  10. ert/config/everest_response.py +3 -15
  11. ert/config/field.py +4 -76
  12. ert/config/forward_model_step.py +17 -1
  13. ert/config/gen_data_config.py +14 -17
  14. ert/config/observation_config_migrations.py +821 -0
  15. ert/config/parameter_config.py +18 -28
  16. ert/config/parsing/__init__.py +0 -1
  17. ert/config/parsing/_parse_zonemap.py +45 -0
  18. ert/config/parsing/config_keywords.py +1 -0
  19. ert/config/parsing/config_schema.py +2 -0
  20. ert/config/parsing/observations_parser.py +2 -0
  21. ert/config/response_config.py +5 -23
  22. ert/config/rft_config.py +129 -31
  23. ert/config/summary_config.py +1 -13
  24. ert/config/surface_config.py +0 -57
  25. ert/dark_storage/compute/misfits.py +0 -42
  26. ert/dark_storage/endpoints/__init__.py +0 -2
  27. ert/dark_storage/endpoints/experiments.py +2 -5
  28. ert/dark_storage/json_schema/experiment.py +1 -2
  29. ert/field_utils/__init__.py +0 -2
  30. ert/field_utils/field_utils.py +1 -117
  31. ert/gui/ertwidgets/listeditbox.py +9 -1
  32. ert/gui/ertwidgets/models/ertsummary.py +20 -6
  33. ert/gui/ertwidgets/pathchooser.py +9 -1
  34. ert/gui/ertwidgets/stringbox.py +11 -3
  35. ert/gui/ertwidgets/textbox.py +10 -3
  36. ert/gui/ertwidgets/validationsupport.py +19 -1
  37. ert/gui/main_window.py +11 -6
  38. ert/gui/simulation/experiment_panel.py +1 -1
  39. ert/gui/simulation/run_dialog.py +11 -1
  40. ert/gui/tools/manage_experiments/export_dialog.py +4 -0
  41. ert/gui/tools/manage_experiments/manage_experiments_panel.py +1 -0
  42. ert/gui/tools/manage_experiments/storage_info_widget.py +1 -1
  43. ert/gui/tools/manage_experiments/storage_widget.py +21 -4
  44. ert/gui/tools/plot/data_type_proxy_model.py +1 -1
  45. ert/gui/tools/plot/plot_api.py +35 -27
  46. ert/gui/tools/plot/plot_widget.py +5 -0
  47. ert/gui/tools/plot/plot_window.py +4 -7
  48. ert/run_models/ensemble_experiment.py +2 -9
  49. ert/run_models/ensemble_smoother.py +1 -9
  50. ert/run_models/everest_run_model.py +31 -23
  51. ert/run_models/initial_ensemble_run_model.py +19 -22
  52. ert/run_models/manual_update.py +11 -5
  53. ert/run_models/model_factory.py +7 -7
  54. ert/run_models/multiple_data_assimilation.py +3 -16
  55. ert/sample_prior.py +12 -14
  56. ert/scheduler/job.py +24 -4
  57. ert/services/__init__.py +7 -3
  58. ert/services/_storage_main.py +59 -22
  59. ert/services/ert_server.py +186 -24
  60. ert/shared/version.py +3 -3
  61. ert/storage/local_ensemble.py +50 -116
  62. ert/storage/local_experiment.py +94 -109
  63. ert/storage/local_storage.py +10 -12
  64. ert/storage/migration/to24.py +26 -0
  65. ert/storage/migration/to25.py +91 -0
  66. ert/utils/__init__.py +20 -0
  67. {ert-19.0.1.dist-info → ert-20.0.0b1.dist-info}/METADATA +4 -51
  68. {ert-19.0.1.dist-info → ert-20.0.0b1.dist-info}/RECORD +80 -83
  69. everest/bin/everest_script.py +5 -5
  70. everest/bin/kill_script.py +2 -2
  71. everest/bin/monitor_script.py +2 -2
  72. everest/bin/utils.py +4 -4
  73. everest/detached/everserver.py +6 -6
  74. everest/gui/everest_client.py +0 -6
  75. everest/gui/main_window.py +2 -2
  76. everest/util/__init__.py +1 -19
  77. ert/dark_storage/compute/__init__.py +0 -0
  78. ert/dark_storage/endpoints/compute/__init__.py +0 -0
  79. ert/dark_storage/endpoints/compute/misfits.py +0 -95
  80. ert/services/_base_service.py +0 -387
  81. ert/services/webviz_ert_service.py +0 -20
  82. ert/shared/storage/command.py +0 -38
  83. ert/shared/storage/extraction.py +0 -42
  84. {ert-19.0.1.dist-info → ert-20.0.0b1.dist-info}/WHEEL +0 -0
  85. {ert-19.0.1.dist-info → ert-20.0.0b1.dist-info}/entry_points.txt +0 -0
  86. {ert-19.0.1.dist-info → ert-20.0.0b1.dist-info}/licenses/COPYING +0 -0
  87. {ert-19.0.1.dist-info → ert-20.0.0b1.dist-info}/top_level.txt +0 -0
@@ -8,7 +8,6 @@ from __future__ import annotations
8
8
 
9
9
  import fnmatch
10
10
  import re
11
- import warnings
12
11
  from collections.abc import Callable, Sequence
13
12
  from datetime import datetime, timedelta
14
13
  from enum import Enum, auto
@@ -158,10 +157,10 @@ def _read_spec(
158
157
  if kw.summary_variable == "TIME":
159
158
  date_index = i
160
159
  date_unit_str = kw.unit
161
- except InvalidSummaryKeyError as err:
162
- warnings.warn(
163
- f"Found {err} in summary specification, key not loaded", stacklevel=2
164
- )
160
+ except InvalidSummaryKeyError:
161
+ # InvalidSummaryKeyError will happen under normal conditions when
162
+ # the the number of wells set for WELLDIMS in the .DATA file is
163
+ # larger than the number of declared wells/groups/etc. These are skipped.
165
164
  continue
166
165
 
167
166
  if should_load_key(key):
ert/config/ert_config.py CHANGED
@@ -7,23 +7,21 @@ import pprint
7
7
  import re
8
8
  from collections import Counter, defaultdict
9
9
  from collections.abc import Mapping
10
- from datetime import datetime
11
10
  from functools import cached_property
12
11
  from os import path
13
12
  from pathlib import Path
14
13
  from typing import TYPE_CHECKING, Any, ClassVar, Self, cast, overload
15
14
 
16
- import polars as pl
17
15
  from numpy.random import SeedSequence
18
- from pydantic import BaseModel, Field, PrivateAttr, model_validator
16
+ from pydantic import BaseModel, Field, model_validator
19
17
  from pydantic import ValidationError as PydanticValidationError
20
18
 
19
+ from ert.config._create_observation_dataframes import create_observation_dataframes
21
20
  from ert.substitutions import Substitutions
22
21
 
23
- from ._create_observation_dataframes import create_observation_dataframes
24
22
  from ._design_matrix_validator import DesignMatrixValidator
25
23
  from ._observations import (
26
- HistoryObservation,
24
+ GeneralObservation,
27
25
  Observation,
28
26
  RFTObservation,
29
27
  SummaryObservation,
@@ -52,7 +50,6 @@ from .parsing import (
52
50
  ConfigWarning,
53
51
  ErrorInfo,
54
52
  ForwardModelStepKeys,
55
- HistorySource,
56
53
  HookRuntime,
57
54
  ObservationConfigError,
58
55
  init_forward_model_schema,
@@ -62,7 +59,6 @@ from .parsing import (
62
59
  )
63
60
  from .parsing.observations_parser import ObservationDict
64
61
  from .queue_config import KnownQueueOptions, QueueConfig
65
- from .refcase import Refcase
66
62
  from .rft_config import RFTConfig
67
63
  from .workflow import Workflow
68
64
  from .workflow_fixtures import fixtures_per_hook
@@ -104,23 +100,6 @@ def _seed_sequence(seed: int | None) -> int:
104
100
  return int_seed
105
101
 
106
102
 
107
- def _read_time_map(file_contents: str) -> list[datetime]:
108
- def str_to_datetime(date_str: str) -> datetime:
109
- try:
110
- return datetime.fromisoformat(date_str)
111
- except ValueError:
112
- logger.warning(
113
- "DD/MM/YYYY date format is deprecated"
114
- ", please use ISO date format YYYY-MM-DD."
115
- )
116
- return datetime.strptime(date_str, "%d/%m/%Y")
117
-
118
- dates = []
119
- for line in file_contents.splitlines():
120
- dates.append(str_to_datetime(line.strip()))
121
- return dates
122
-
123
-
124
103
  def create_forward_model_json(
125
104
  context: dict[str, str],
126
105
  forward_model_steps: list[ForwardModelStep],
@@ -688,14 +667,6 @@ def log_observation_keys(
688
667
  if key not in {"name", "type"}
689
668
  )
690
669
 
691
- if "HISTORY_OBSERVATION" in observation_type_counts:
692
- msg = (
693
- "HISTORY_OBSERVATION is deprecated and will be removed. "
694
- "Please use SUMMARY_OBSERVATION instead."
695
- )
696
- ConfigWarning.warn(msg)
697
- logger.warning(msg)
698
-
699
670
  logger.info(
700
671
  f"Count of observation types:\n\t{dict(observation_type_counts)}\n"
701
672
  f"Count of observation keywords:\n\t{dict(observation_keyword_counts)}"
@@ -739,43 +710,7 @@ class ErtConfig(BaseModel):
739
710
  user_config_file: str = "no_config"
740
711
  config_path: str = Field(init=False, default="")
741
712
  observation_declarations: list[Observation] = Field(default_factory=list)
742
- time_map: list[datetime] | None = None
743
- history_source: HistorySource = HistorySource.REFCASE_HISTORY
744
- refcase: Refcase | None = None
745
- _observations: dict[str, pl.DataFrame] | None = PrivateAttr(None)
746
-
747
- @property
748
- def observations(self) -> dict[str, pl.DataFrame]:
749
- if self._observations is None:
750
- has_rft_observations = any(
751
- isinstance(o, RFTObservation) for o in self.observation_declarations
752
- )
753
- if (
754
- has_rft_observations
755
- and "rft" not in self.ensemble_config.response_configs
756
- ):
757
- self.ensemble_config.response_configs["rft"] = RFTConfig(
758
- input_files=[self.runpath_config.eclbase_format_string],
759
- data_to_read={},
760
- locations=[],
761
- )
762
- computed = create_observation_dataframes(
763
- self.observation_declarations,
764
- self.refcase,
765
- cast(
766
- GenDataConfig | None,
767
- self.ensemble_config.response_configs.get("gen_data", None),
768
- ),
769
- cast(
770
- RFTConfig | None,
771
- self.ensemble_config.response_configs.get("rft", None),
772
- ),
773
- self.time_map,
774
- self.history_source,
775
- )
776
- self._observations = computed
777
- return computed
778
- return self._observations
713
+ zonemap: dict[int, list[str]] = Field(default_factory=dict)
779
714
 
780
715
  @model_validator(mode="after")
781
716
  def set_fields(self) -> Self:
@@ -830,27 +765,45 @@ class ErtConfig(BaseModel):
830
765
  )
831
766
  return self
832
767
 
833
- def __eq__(self, other: object) -> bool:
834
- if not isinstance(other, ErtConfig):
835
- return False
836
-
837
- for attr in vars(self):
838
- if attr == "observations":
839
- if self.observations.keys() != other.observations.keys():
840
- return False
841
-
842
- if not all(
843
- self.observations[k].equals(other.observations[k])
844
- for k in self.observations
845
- ):
846
- return False
768
+ @model_validator(mode="after")
769
+ def validate_observations_against_responses(self) -> Self:
770
+ gen_data_config = cast(
771
+ GenDataConfig | None,
772
+ self.ensemble_config.response_configs.get("gen_data", None),
773
+ )
847
774
 
848
- continue
775
+ errors: list[ErrorInfo] = []
776
+ for obs in self.observation_declarations:
777
+ if isinstance(obs, GeneralObservation):
778
+ response_key = obs.data
779
+ if gen_data_config is None or response_key not in gen_data_config.keys:
780
+ errors.append(
781
+ ErrorInfo(
782
+ message=(
783
+ f"Problem with GENERAL_OBSERVATION {obs.name}:"
784
+ f" No GEN_DATA with name {response_key!r} found"
785
+ )
786
+ ).set_context(response_key)
787
+ )
788
+ continue
789
+ assert isinstance(gen_data_config, GenDataConfig)
790
+ _, report_steps = gen_data_config.get_args_for_key(response_key)
791
+ response_report_steps = [] if report_steps is None else report_steps
792
+ if response_report_steps and obs.restart not in response_report_steps:
793
+ errors.append(
794
+ ErrorInfo(
795
+ message=(
796
+ f"The GEN_DATA node:{response_key} is not configured "
797
+ f"to load from report step:{obs.restart} for the "
798
+ f"observation:{obs.name}"
799
+ )
800
+ ).set_context(response_key)
801
+ )
849
802
 
850
- if getattr(self, attr) != getattr(other, attr):
851
- return False
803
+ if errors:
804
+ raise ConfigValidationError.from_collected(errors)
852
805
 
853
- return True
806
+ return self
854
807
 
855
808
  @staticmethod
856
809
  def with_plugins(runtime_plugins: ErtRuntimePlugins) -> type[ErtConfig]:
@@ -1024,7 +977,7 @@ class ErtConfig(BaseModel):
1024
977
  summary_obs = {
1025
978
  obs.key
1026
979
  for obs in obs_configs
1027
- if isinstance(obs, HistoryObservation | SummaryObservation)
980
+ if isinstance(obs, SummaryObservation)
1028
981
  }
1029
982
  if summary_obs:
1030
983
  summary_keys = ErtConfig._read_summary_keys(config_dict)
@@ -1032,16 +985,6 @@ class ErtConfig(BaseModel):
1032
985
  [key] for key in summary_obs if key not in summary_keys
1033
986
  ]
1034
987
  ensemble_config = EnsembleConfig.from_dict(config_dict=config_dict)
1035
- time_map = None
1036
- if time_map_args := config_dict.get(ConfigKeys.TIME_MAP):
1037
- time_map_file, time_map_contents = time_map_args
1038
- try:
1039
- time_map = _read_time_map(time_map_contents)
1040
- except ValueError as err:
1041
- raise ConfigValidationError.with_context(
1042
- f"Could not read timemap file {time_map_file}: {err}",
1043
- time_map_file,
1044
- ) from err
1045
988
  except ConfigValidationError as err:
1046
989
  errors.append(err)
1047
990
  except PydanticValidationError as err:
@@ -1094,9 +1037,6 @@ class ErtConfig(BaseModel):
1094
1037
 
1095
1038
  env_vars = {}
1096
1039
  substituter = Substitutions(substitutions)
1097
- history_source = config_dict.get(
1098
- ConfigKeys.HISTORY_SOURCE, HistorySource.REFCASE_HISTORY
1099
- )
1100
1040
 
1101
1041
  # Insert env vars from plugins/site config
1102
1042
  for key, val in cls.ENV_VARS.items():
@@ -1118,8 +1058,10 @@ class ErtConfig(BaseModel):
1118
1058
  user_configured_.add(key)
1119
1059
  env_vars[key] = substituter.substitute(val)
1120
1060
 
1061
+ if errors:
1062
+ raise ObservationConfigError.from_collected(errors)
1063
+
1121
1064
  try:
1122
- refcase = Refcase.from_config_dict(config_dict)
1123
1065
  cls_config = cls(
1124
1066
  substitutions=substitutions,
1125
1067
  ensemble_config=ensemble_config,
@@ -1141,9 +1083,7 @@ class ErtConfig(BaseModel):
1141
1083
  runpath_config=model_config,
1142
1084
  user_config_file=config_file_path,
1143
1085
  observation_declarations=list(obs_configs),
1144
- time_map=time_map,
1145
- history_source=history_source,
1146
- refcase=refcase,
1086
+ zonemap=config_dict.get(ConfigKeys.ZONEMAP, ("", {}))[1],
1147
1087
  )
1148
1088
 
1149
1089
  # The observations are created here because create_observation_dataframes
@@ -1157,20 +1097,15 @@ class ErtConfig(BaseModel):
1157
1097
  input_files=[eclbase],
1158
1098
  data_to_read={},
1159
1099
  locations=[],
1100
+ zonemap=cls_config.zonemap,
1160
1101
  )
1161
- cls_config._observations = create_observation_dataframes(
1102
+
1103
+ # PS:
1104
+ # This mutates the rft config and is necessary for the moment
1105
+ # Consider changing this pattern
1106
+ _ = create_observation_dataframes(
1162
1107
  obs_configs,
1163
- refcase,
1164
- cast(
1165
- GenDataConfig | None,
1166
- ensemble_config.response_configs.get("gen_data", None),
1167
- ),
1168
- cast(
1169
- RFTConfig | None,
1170
- ensemble_config.response_configs.get("rft", None),
1171
- ),
1172
- time_map,
1173
- history_source,
1108
+ cast(RFTConfig | None, ensemble_config.response_configs.get("rft")),
1174
1109
  )
1175
1110
  except PydanticValidationError as err:
1176
1111
  raise ConfigValidationError.from_pydantic(err) from err
@@ -11,13 +11,14 @@ from typing import TYPE_CHECKING, Any, Literal, Self
11
11
 
12
12
  import networkx as nx
13
13
  import numpy as np
14
+ import polars as pl
14
15
  import xarray as xr
15
16
  from pydantic import BaseModel, ConfigDict, Field, model_validator
16
17
  from ropt.workflow import find_sampler_plugin
17
18
 
18
19
  from ert.substitutions import substitute_runpath_name
19
20
 
20
- from .parameter_config import ParameterConfig
21
+ from .parameter_config import ParameterCardinality, ParameterConfig
21
22
 
22
23
  if TYPE_CHECKING:
23
24
  import numpy.typing as npt
@@ -175,31 +176,47 @@ class EverestControl(ParameterConfig):
175
176
  def parameter_keys(self) -> list[str]:
176
177
  return self.input_keys
177
178
 
179
+ @property
180
+ def cardinality(self) -> ParameterCardinality:
181
+ return ParameterCardinality.multiple_configs_per_ensemble_dataset
182
+
178
183
  def read_from_runpath(
179
184
  self, run_path: Path, real_nr: int, iteration: int
180
185
  ) -> xr.Dataset:
181
186
  raise NotImplementedError
182
187
 
188
+ def load_parameters(
189
+ self, ensemble: Ensemble, realizations: npt.NDArray[np.int_]
190
+ ) -> npt.NDArray[np.float64]:
191
+ raise NotImplementedError
192
+
193
+ def load_parameter_graph(self) -> nx.Graph[int]:
194
+ raise NotImplementedError
195
+
196
+ def __len__(self) -> int:
197
+ return len(self.input_keys)
198
+
183
199
  def write_to_runpath(
184
200
  self, run_path: Path, real_nr: int, ensemble: Ensemble
185
201
  ) -> None:
186
- file_path = run_path / substitute_runpath_name(
202
+ file_path: Path = run_path / substitute_runpath_name(
187
203
  self.output_file, real_nr, ensemble.iteration
188
204
  )
189
205
  Path.mkdir(file_path.parent, exist_ok=True, parents=True)
190
206
 
191
- data: MutableDataType = {}
192
- for da in ensemble.load_parameters(self.name, real_nr)["values"]:
193
- assert isinstance(da, xr.DataArray)
194
- name = str(da.names.values)
195
- try:
196
- outer, inner = name.split("\0")
197
-
198
- if outer not in data:
199
- data[outer] = {}
200
- data[outer][inner] = float(da) # type: ignore
201
- except ValueError:
202
- data[name] = float(da)
207
+ data: dict[str, Any] = {}
208
+ df = ensemble.load_parameters(self.name, real_nr)
209
+ assert isinstance(df, pl.DataFrame)
210
+ df = df.drop("realization")
211
+ df = df.rename({col: col.replace(f"{self.name}.", "", 1) for col in df.columns})
212
+ for c in df.columns:
213
+ if "." in c:
214
+ top_key, sub_key = c.split(".", 1)
215
+ if top_key not in data:
216
+ data[top_key] = {}
217
+ data[top_key][sub_key] = df[c].item()
218
+ else:
219
+ data[c] = df[c].item()
203
220
 
204
221
  file_path.write_text(json.dumps(data), encoding="utf-8")
205
222
 
@@ -207,28 +224,12 @@ class EverestControl(ParameterConfig):
207
224
  self,
208
225
  from_data: npt.NDArray[np.float64],
209
226
  iens_active_index: npt.NDArray[np.int_],
210
- ) -> Iterator[tuple[int, xr.Dataset]]:
211
- for i, realization in enumerate(iens_active_index):
212
- yield (
213
- int(realization),
214
- xr.Dataset(
215
- {
216
- "values": ("names", from_data[:, i]),
217
- "names": [
218
- x.split(f"{self.name}.")[1].replace(".", "\0")
219
- for x in self.parameter_keys
220
- ],
221
- }
222
- ),
223
- )
224
-
225
- def load_parameters(
226
- self, ensemble: Ensemble, realizations: npt.NDArray[np.int_]
227
- ) -> npt.NDArray[np.float64]:
228
- raise NotImplementedError
229
-
230
- def load_parameter_graph(self) -> nx.Graph[int]:
231
- raise NotImplementedError
232
-
233
- def __len__(self) -> int:
234
- return len(self.input_keys)
227
+ ) -> Iterator[tuple[None, pl.DataFrame]]:
228
+ df = pl.DataFrame(
229
+ {
230
+ "realization": iens_active_index,
231
+ **{k: from_data[:, i] for i, k in enumerate(self.parameter_keys)},
232
+ },
233
+ strict=False,
234
+ )
235
+ yield None, df
@@ -7,7 +7,7 @@ import polars as pl
7
7
  from ert.substitutions import substitute_runpath_name
8
8
 
9
9
  from .parsing import ConfigDict
10
- from .response_config import InvalidResponseFile, ResponseConfig, ResponseMetadata
10
+ from .response_config import InvalidResponseFile, ResponseConfig
11
11
  from .responses_index import responses_index
12
12
 
13
13
 
@@ -21,18 +21,6 @@ class EverestResponse(ResponseConfig):
21
21
  def primary_key(self) -> list[str]:
22
22
  return []
23
23
 
24
- @property
25
- def metadata(self) -> list[ResponseMetadata]:
26
- return [
27
- ResponseMetadata(
28
- response_type=self.type,
29
- response_key=response_key,
30
- finalized=self.has_finalized_keys,
31
- filter_on=None,
32
- )
33
- for response_key in self.keys
34
- ]
35
-
36
24
  @property
37
25
  def expected_input_files(self) -> list[str]:
38
26
  return self.input_files
@@ -91,8 +79,8 @@ class EverestResponse(ResponseConfig):
91
79
  class EverestConstraintsConfig(EverestResponse):
92
80
  type: Literal["everest_constraints"] = "everest_constraints"
93
81
  targets: list[float | None]
94
- upper_bounds: list[float]
95
- lower_bounds: list[float]
82
+ upper_bounds: list[float | None]
83
+ lower_bounds: list[float | None]
96
84
 
97
85
 
98
86
  responses_index.add_response_type(EverestConstraintsConfig)
ert/config/field.py CHANGED
@@ -17,13 +17,10 @@ from ert.field_utils import (
17
17
  ErtboxParameters,
18
18
  FieldFileFormat,
19
19
  Shape,
20
- calc_rho_for_2d_grid_layer,
21
20
  calculate_ertbox_parameters,
22
21
  get_shape,
23
22
  read_field,
24
23
  save_field,
25
- transform_local_ellipse_angle_to_local_coords,
26
- transform_positions_to_local_field_coordinates,
27
24
  )
28
25
  from ert.substitutions import substitute_runpath_name
29
26
  from ert.utils import log_duration
@@ -73,10 +70,10 @@ class Field(ParameterConfig):
73
70
  dimensionality: Literal[3] = 3
74
71
  ertbox_params: ErtboxParameters
75
72
  file_format: FieldFileFormat
76
- output_transformation: str | None
77
- input_transformation: str | None
78
- truncation_min: float | None
79
- truncation_max: float | None
73
+ output_transformation: str | None = None
74
+ input_transformation: str | None = None
75
+ truncation_min: float | None = None
76
+ truncation_max: float | None = None
80
77
  forward_init_file: str
81
78
  output_file: Path
82
79
  grid_file: str
@@ -318,75 +315,6 @@ class Field(ParameterConfig):
318
315
  def nz(self) -> int:
319
316
  return self.ertbox_params.nz
320
317
 
321
- def calc_rho_for_2d_grid_layer(
322
- self,
323
- obs_xpos: npt.NDArray[np.float64],
324
- obs_ypos: npt.NDArray[np.float64],
325
- obs_main_range: npt.NDArray[np.float64],
326
- obs_perp_range: npt.NDArray[np.float64],
327
- obs_anisotropy_angle: npt.NDArray[np.float64],
328
- right_handed_grid_indexing: bool = True,
329
- ) -> npt.NDArray[np.float64]:
330
- """Function to calculate scaling values to be used in the RHO matrix
331
- for distance-based localization.
332
-
333
- Args:
334
- obs_xpos: x-coordinates in global coordinates of observations
335
- obs_ypos: y-coordinates in global coordinates of observations
336
- obs_main_range: Size of influence ellipse main principal direction.
337
- obs_perp_range: Size of influence ellipse second principal direction.
338
- obs_anisotropy_angle: Rotation angle anticlock wise of main principal
339
- direction of influence ellipse relative to global coordinate
340
- system's x-axis.
341
- right_handed_grid_indexing: When this is True the field parameters
342
- grid index order counts J-index down from ny-1 to 0.
343
- If the value is False, the grid index order is to count J index
344
- from 0 to ny-1. As standard for 3D field parameters,
345
- the grid index order follows the right_handed grid indexing.
346
-
347
- Returns:
348
- Scaling values (elements of the RHO matrix) as a numpy array
349
- of shape=(nx,ny,nobservations)
350
-
351
- """
352
- # Can only be used if ertbox coordinate system is defined
353
- assert self.ertbox_params.xinc is not None, (
354
- "Parameter for grid resolution must be defined"
355
- )
356
- assert self.ertbox_params.yinc is not None, (
357
- "Parameter for grid resolution must be defined"
358
- )
359
- assert self.ertbox_params.origin is not None, (
360
- "Parameter for grid origin must be defined"
361
- )
362
- assert self.ertbox_params.rotation_angle is not None, (
363
- "Parameter for grid rotation must be defined"
364
- )
365
- # Transform positions of observations into local coordinates
366
- xpos, ypos = transform_positions_to_local_field_coordinates(
367
- self.ertbox_params.origin,
368
- self.ertbox_params.rotation_angle,
369
- obs_xpos,
370
- obs_ypos,
371
- )
372
- # Transform localization ellipse orientation to local coordinates
373
- ellipse_rotation = transform_local_ellipse_angle_to_local_coords(
374
- self.ertbox_params.rotation_angle, obs_anisotropy_angle
375
- )
376
-
377
- return calc_rho_for_2d_grid_layer(
378
- self.ertbox_params.nx,
379
- self.ertbox_params.ny,
380
- self.ertbox_params.xinc,
381
- self.ertbox_params.yinc,
382
- xpos,
383
- ypos,
384
- obs_main_range,
385
- obs_perp_range,
386
- ellipse_rotation,
387
- right_handed_grid_indexing=right_handed_grid_indexing,
388
- )
389
-
390
318
 
391
319
  TRANSFORM_FUNCTIONS: Final[dict[str, Callable[[Any], Any]]] = {
392
320
  "LN": np.log,
@@ -1,5 +1,6 @@
1
1
  from __future__ import annotations
2
2
 
3
+ import inspect
3
4
  import logging
4
5
  from typing import (
5
6
  TYPE_CHECKING,
@@ -95,9 +96,24 @@ class ForwardModelStepOptions(TypedDict, total=False):
95
96
  required_keywords: NotRequired[list[str]]
96
97
 
97
98
 
99
+ def _get_source_package() -> str:
100
+ """Return the top-level package name of the calling forward model step.
101
+
102
+ Finds the documentation() call (stack[2]) under the forward model step class
103
+ and return its parent module
104
+ """
105
+ stack = inspect.stack()
106
+ if len(stack) > 2:
107
+ caller_frame = stack[2]
108
+ caller_module = inspect.getmodule(caller_frame.frame)
109
+ if caller_module:
110
+ return caller_module.__name__.split(".")[0]
111
+ return "not found"
112
+
113
+
98
114
  class ForwardModelStepDocumentation(BaseModel):
99
115
  config_file: str | None = Field(default=None)
100
- source_package: str = Field(default="ert")
116
+ source_package: str = Field(default_factory=_get_source_package)
101
117
  source_function_name: str = Field(default="ert")
102
118
  description: str = Field(default="No description")
103
119
  examples: str = Field(default="No examples")
@@ -14,7 +14,6 @@ from .parsing import ConfigDict, ConfigValidationError, ConfigWarning, ErrorInfo
14
14
  from .response_config import (
15
15
  InvalidResponseFile,
16
16
  ResponseConfig,
17
- ResponseMetadata,
18
17
  )
19
18
  from .responses_index import responses_index
20
19
 
@@ -24,22 +23,6 @@ class GenDataConfig(ResponseConfig):
24
23
  report_steps_list: list[list[int] | None] = Field(default_factory=list)
25
24
  has_finalized_keys: bool = True
26
25
 
27
- @property
28
- def metadata(self) -> list[ResponseMetadata]:
29
- return [
30
- ResponseMetadata(
31
- response_type=self.type,
32
- response_key=response_key,
33
- finalized=self.has_finalized_keys,
34
- filter_on={"report_step": report_steps}
35
- if report_steps is not None
36
- else {"report_step": [0]},
37
- )
38
- for response_key, report_steps in zip(
39
- self.keys, self.report_steps_list, strict=False
40
- )
41
- ]
42
-
43
26
  def model_post_init(self, ctx: Any) -> None:
44
27
  if len(self.report_steps_list) == 0:
45
28
  self.report_steps_list = [[0] for _ in self.keys]
@@ -48,6 +31,20 @@ class GenDataConfig(ResponseConfig):
48
31
  if report_steps is not None:
49
32
  report_steps.sort()
50
33
 
34
+ @property
35
+ def filter_on(self) -> dict[str, dict[str, list[int]]]:
36
+ """Filters for this response.
37
+
38
+ For ``GEN_DATA`` this is always supported: return
39
+ ``{response_key: {"report_step": [allowed_steps...]}}``.
40
+ """
41
+ return {
42
+ response_key: {"report_step": report_steps or [0]}
43
+ for response_key, report_steps in zip(
44
+ self.keys, self.report_steps_list, strict=False
45
+ )
46
+ }
47
+
51
48
  @property
52
49
  def expected_input_files(self) -> list[str]:
53
50
  expected_files = []