resfo-utilities 0.3.0b0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of resfo-utilities might be problematic. Click here for more details.

@@ -0,0 +1,594 @@
1
+ from dataclasses import dataclass
2
+ import os
3
+ from collections.abc import Iterable, Iterator
4
+ from typing import Any, IO, overload, Callable, TypeAlias
5
+ from datetime import datetime
6
+ from itertools import tee
7
+ import re
8
+ from functools import partial
9
+ import warnings
10
+ from natsort import natsorted
11
+ import numpy.typing as npt
12
+ import resfo
13
+ import numpy as np
14
+
15
+
16
+ class InvalidSummaryError(ValueError):
17
+ """Raised when a given summary file is not valid.
18
+
19
+ Can be raised either when the file can't be read (eg. a directory)
20
+ or its contents is not valid.
21
+ """
22
+
23
+
24
+ @dataclass
25
+ class SummaryKeyword:
26
+ """One member of the KEYWORDS array.
27
+
28
+ Attributes:
29
+ summary_variable:
30
+ The variable name, eg WOPR, or FOPT.
31
+ number:
32
+ A number associated with the keyword,
33
+ eg. for block variables it is the index of the block.
34
+ name:
35
+ A name associated with the keyword,
36
+ eg. for well variables it is the name of the well.
37
+ lgr_name:
38
+ If a local variable then the name of the Local Grid
39
+ Refinement.
40
+ li:
41
+ The i index of the host cell for the LGR.
42
+ lj:
43
+ The j index of the host cell for the LGR.
44
+ lk:
45
+ The k index of the host cell for the LGR.
46
+ unit:
47
+ The units for the value of the keyword, eg. for
48
+ FOPR it may be SM3/DAY.
49
+ """
50
+
51
+ summary_variable: str
52
+ number: int | None = None
53
+ name: str | None = None
54
+ lgr_name: str | None = None
55
+ li: int | None = None
56
+ lj: int | None = None
57
+ lk: int | None = None
58
+ unit: str | None = None
59
+
60
+
61
+ FileOpener: TypeAlias = Callable[[], IO[Any]]
62
+
63
+
64
+ class SummaryReader:
65
+ """Reader for summary files.
66
+
67
+ The result of running a reservoir simulator is a number of time vectors
68
+ which are written to summary files.
69
+
70
+ Each file is opened when the corresponding data is requested so asking for
71
+ the properties of SummaryReader may raise `InvalidSummaryError` if the
72
+ corresponding file or its content is invalid.
73
+ """
74
+
75
+ @overload
76
+ def __init__(
77
+ self,
78
+ *,
79
+ case_path: str | os.PathLike[str],
80
+ smspec: None = None,
81
+ summaries: None = None,
82
+ ):
83
+ pass
84
+
85
+ @overload
86
+ def __init__(
87
+ self,
88
+ *,
89
+ smspec: FileOpener,
90
+ summaries: Iterable[FileOpener],
91
+ case_path: None = None,
92
+ ):
93
+ pass
94
+
95
+ def __init__(
96
+ self,
97
+ *,
98
+ case_path: str | os.PathLike[str] | None = None,
99
+ smspec: FileOpener | None = None,
100
+ summaries: Iterable[FileOpener] | None = None,
101
+ ):
102
+ """
103
+ Args:
104
+ case_path:
105
+ The path to one summary file or the basename
106
+ of several summary files. By giving just the base name,
107
+ eg. `path/to/CASE`, SummaryReader will look for summary
108
+ files named eg. `path/to/CASE.SMSPEC`, `path/to/CASE.UNSMRY`
109
+ `path/to/CASE.FSMSPEC`, `path/to/CASE.S0001`, etc. depending
110
+ on whether the summary is formatted and unified.
111
+
112
+ The order in which summary files will looked for in is formatted first
113
+ then unified first lexiographicall, ie.: UNSMRY, Snnnn, FUNSMRY, and
114
+ then Annnn.
115
+
116
+ By giving an extension, only summary files that match the given
117
+ formatted or unified combination is looked for, ie. if
118
+ case_path="CASE.UNSMRY" then only "CASE.SMSPEC" and "CASE.UNSMRY"
119
+ will be opened.
120
+ Raises:
121
+ FileNotFoundError:
122
+ If the required summary files for the given case_path
123
+ does not exist.
124
+ """
125
+ if case_path is None and (smspec is None or summaries is None):
126
+ raise ValueError(
127
+ "SummaryReader must be initialized with"
128
+ " either case_path or both smspec and summaries."
129
+ )
130
+ if case_path is not None and (smspec is not None or summaries is not None):
131
+ raise ValueError(
132
+ "SummaryReader must be initialized with"
133
+ " either case_path or smspec and summaries, not both."
134
+ )
135
+
136
+ if case_path is not None:
137
+ self._smspec, self._summaries = self._get_file_openers(case_path)
138
+ else:
139
+ assert smspec is not None
140
+ assert summaries is not None
141
+ self._smspec = smspec
142
+ self._summaries = summaries
143
+
144
+ self._start_date: datetime | None = None
145
+ self._summary_keywords: list[SummaryKeyword] | None = None
146
+ self._dimensions: tuple[int, int, int] | None = None
147
+ self._restart: str | None = None
148
+ self._have_read_smspec = False
149
+
150
+ @property
151
+ def smspec_filename(self) -> str:
152
+ """The filename of the summary spec file.
153
+
154
+ e.g. "CASE.SMSPEC"
155
+
156
+ """
157
+ return self._spec_filename
158
+
159
+ @property
160
+ def summary_filenames(self) -> Iterator[str]:
161
+ """The filename of the summary file(s).
162
+
163
+ e.g. ["CASE.UNSMRY"] for unified or
164
+ ["CASE.S0001", "CASE.S0002"] for split.
165
+
166
+ """
167
+ return iter(self._summary_filenames)
168
+
169
+ @property
170
+ def start_date(self) -> datetime:
171
+ """The start date of the simulation."""
172
+ if self._start_date is not None:
173
+ return self._start_date
174
+ self._start_date, self._summary_keywords, self._dimensions, self._restart = (
175
+ _read_spec(self._smspec)
176
+ )
177
+ self._have_read_smspec = True
178
+ assert self._start_date is not None
179
+ return self._start_date
180
+
181
+ @property
182
+ def summary_keywords(self) -> list[SummaryKeyword]:
183
+ """The list of keywords in the summary."""
184
+ if self._summary_keywords is not None:
185
+ return self._summary_keywords
186
+ self._start_date, self._summary_keywords, self._dimensions, self._restart = (
187
+ _read_spec(self._smspec)
188
+ )
189
+ self._have_read_smspec = True
190
+ assert self._summary_keywords is not None
191
+ return self._summary_keywords
192
+
193
+ @property
194
+ def dimensions(self) -> tuple[int, int, int] | None:
195
+ """The dimensions of the grid used in the simulation."""
196
+ if self._have_read_smspec:
197
+ return self._dimensions
198
+ self._start_date, self._summary_keywords, self._dimensions, self._restart = (
199
+ _read_spec(self._smspec)
200
+ )
201
+ self._have_read_smspec = True
202
+ return self._dimensions
203
+
204
+ @property
205
+ def restart(self) -> str | None:
206
+ """The name of the case the simulation was restarted from (if any)."""
207
+ if self._have_read_smspec:
208
+ return self._restart
209
+ self._start_date, self._summary_keywords, self._dimensions, self._restart = (
210
+ _read_spec(self._smspec)
211
+ )
212
+ self._have_read_smspec = True
213
+ return self._restart
214
+
215
+ def values(
216
+ self, report_step_only: bool = True
217
+ ) -> Iterator[npt.NDArray[np.float32]]:
218
+ """Iterate over the values for the summary keywords.
219
+
220
+ Args:
221
+ report_step_only: If ``True``, yield only at report steps (``DATES``).
222
+ Yields:
223
+ arrays of the keyword values in the order of
224
+ the summary_keywords.
225
+ Raises:
226
+ InvalidSummaryError:
227
+ If the summary files cannot be read from or contains invalid
228
+ contents.
229
+ """
230
+
231
+ last_params = None
232
+ try:
233
+ for smry_opener in self._summaries:
234
+ with smry_opener() as smry:
235
+ summary_name = _stream_name(smry)
236
+
237
+ def read_params() -> Iterator[npt.NDArray[np.float32]]:
238
+ nonlocal last_params
239
+ if last_params is not None:
240
+ vals = _validate_array(
241
+ "PARAMS", summary_name, last_params.read_array()
242
+ )
243
+ last_params = None
244
+ yield vals
245
+
246
+ for entry in resfo.lazy_read(smry):
247
+ kw = entry.read_keyword()
248
+ if last_params and not report_step_only:
249
+ yield from read_params()
250
+ if kw == "PARAMS ":
251
+ last_params = entry
252
+ if report_step_only and kw == "SEQHDR ":
253
+ yield from read_params()
254
+ yield from read_params()
255
+ except OSError as err:
256
+ raise InvalidSummaryError(
257
+ f"Could not read from summary file {err.filename}: {err.strerror}"
258
+ ) from err
259
+ except resfo.ResfoParsingError as err:
260
+ raise InvalidSummaryError(
261
+ f"Summary files contained invalid contents: {err}"
262
+ ) from err
263
+
264
+ def _get_file_openers(
265
+ self,
266
+ case_path: str | os.PathLike[str],
267
+ ) -> tuple[FileOpener, Iterable[FileOpener]]:
268
+ self.case_path = case_path
269
+ self._summary_filenames, self._spec_filename = _get_summary_filenames(case_path)
270
+ mode = "rt" if self._spec_filename.lower().endswith("fsmspec") else "rb"
271
+
272
+ def opener(s: str | os.PathLike[str]) -> FileOpener:
273
+ def inner() -> IO[Any]:
274
+ return open(os.path.abspath(s), mode)
275
+
276
+ return inner
277
+
278
+ return (
279
+ opener(self._spec_filename),
280
+ [opener(s) for s in self._summary_filenames],
281
+ )
282
+
283
+
284
+ def _read_spec(
285
+ spec_opener: FileOpener,
286
+ ) -> tuple[datetime, list[SummaryKeyword], tuple[int, int, int] | None, str | None]:
287
+ """Read an SMSPEC file and return start date, keywords, dimensions and restart
288
+
289
+ This function performs validation, determines the index of the
290
+ TIME vector and the unit, and read all available keys.
291
+
292
+ Args:
293
+ spec: A function that returns a file-like object for the
294
+ SMSPEC (binary or text depending on format).
295
+ key_patterns: Patterns identifying which keys to keep.
296
+
297
+ Returns:
298
+ tuple of the start date, list of summary keywords, dimensions,
299
+ and restart case path.
300
+
301
+ Raises:
302
+ InvalidSummaryError: On malformed content (e.g., missing UNITS, STARTDAT, etc.)
303
+ or if parsing of the smspec fails.
304
+ """
305
+ start_date = None
306
+ num_keywords = None
307
+ dimensions = None
308
+ wgnames = None
309
+ spec_name = ""
310
+ try:
311
+ with spec_opener() as spec:
312
+ spec_name = _stream_name(spec)
313
+
314
+ arrays: dict[str, npt.NDArray[Any] | None] = dict.fromkeys(
315
+ [
316
+ "NUMS ",
317
+ "KEYWORDS",
318
+ "NUMLX ",
319
+ "NUMLY ",
320
+ "NUMLZ ",
321
+ "LGRS ",
322
+ "UNITS ",
323
+ "RESTART ",
324
+ ],
325
+ None,
326
+ )
327
+ for entry in resfo.lazy_read(spec):
328
+ # If we have found all values we are looking for
329
+ # we stop reading
330
+ if all(
331
+ p is not None
332
+ for p in [start_date, num_keywords, dimensions, *arrays.values()]
333
+ ):
334
+ break
335
+ kw = entry.read_keyword()
336
+ if kw in arrays:
337
+ arrays[kw] = _validate_array(kw, spec_name, entry.read_array())
338
+ # "NAMES " is an alias for "WGNAMES "
339
+ # if kw is one of either, we set wgnames
340
+ if kw in {"WGNAMES ", "NAMES "}:
341
+ wgnames = _validate_array(kw, spec_name, entry.read_array())
342
+ if kw == "DIMENS ":
343
+ vals = _validate_array(kw, spec_name, entry.read_array())
344
+ size = len(vals)
345
+ num_keywords = vals[0] if size > 0 else None
346
+ dimensions = tuple(vals[1:4]) if size > 3 else None
347
+ if kw == "STARTDAT":
348
+ vals = _validate_array(kw, spec_name, entry.read_array())
349
+ size = len(vals)
350
+ day = vals[0] if size > 0 else 0
351
+ month = vals[1] if size > 1 else 0
352
+ year = vals[2] if size > 2 else 0
353
+ hour = vals[3] if size > 3 else 0
354
+ minute = vals[4] if size > 4 else 0
355
+ microsecond = vals[5] if size > 5 else 0
356
+ try:
357
+ start_date = datetime(
358
+ day=day,
359
+ month=month,
360
+ year=year,
361
+ hour=hour,
362
+ minute=minute,
363
+ second=microsecond // 10**6,
364
+ microsecond=microsecond % 10**6,
365
+ )
366
+ except Exception as err:
367
+ raise InvalidSummaryError(
368
+ f"SMSPEC {spec_name} contains invalid STARTDAT: {err}"
369
+ ) from err
370
+ except OSError as err:
371
+ raise InvalidSummaryError(
372
+ f"Could not read from summary spec {err.filename}: {err.strerror}"
373
+ ) from err
374
+ except resfo.ResfoParsingError as err:
375
+ raise InvalidSummaryError(
376
+ f"Summary spec contained invalid contents: {err}"
377
+ ) from err
378
+
379
+ keywords = arrays["KEYWORDS"]
380
+ nums = arrays["NUMS "]
381
+ numlx = arrays["NUMLX "]
382
+ numly = arrays["NUMLY "]
383
+ numlz = arrays["NUMLZ "]
384
+ lgr_names = arrays["LGRS "]
385
+ units = arrays["UNITS "]
386
+
387
+ if start_date is None:
388
+ raise InvalidSummaryError(f"Keyword startdat missing in {spec_name}")
389
+ if keywords is None:
390
+ raise InvalidSummaryError(f"Keywords missing in {spec_name}")
391
+ if num_keywords is None:
392
+ num_keywords = len(keywords)
393
+ warnings.warn(
394
+ "SMSPEC did not contain num_keywords in DIMENS."
395
+ f" Using length of KEYWORDS: {num_keywords}."
396
+ )
397
+ elif num_keywords > len(keywords):
398
+ warnings.warn(
399
+ f"number of keywords given in DIMENS {num_keywords} is larger than the "
400
+ f"length of KEYWORDS {len(keywords)}, truncating size to match.",
401
+ )
402
+ num_keywords = len(keywords)
403
+
404
+ summary_keywords: list[SummaryKeyword] = []
405
+
406
+ def optional_get(arr: npt.NDArray[Any] | None, idx: int) -> Any:
407
+ if arr is None:
408
+ return None
409
+ if len(arr) <= idx:
410
+ return None
411
+ return arr[idx]
412
+
413
+ def decode_if_byte(key: bytes | str) -> str:
414
+ return key.decode() if isinstance(key, bytes) else key
415
+
416
+ @overload
417
+ def key2str(key: bytes | str) -> str:
418
+ pass
419
+
420
+ @overload
421
+ def key2str(key: None) -> None:
422
+ pass
423
+
424
+ def key2str(key: bytes | str | None) -> str | None:
425
+ if key is None:
426
+ return None
427
+ return decode_if_byte(key).strip()
428
+
429
+ for i in range(num_keywords):
430
+ summary_keywords.append(
431
+ SummaryKeyword(
432
+ summary_variable=key2str(keywords[i]),
433
+ number=optional_get(nums, i),
434
+ name=key2str(optional_get(wgnames, i)),
435
+ lgr_name=key2str(optional_get(lgr_names, i)),
436
+ li=optional_get(numlx, i),
437
+ lj=optional_get(numly, i),
438
+ lk=optional_get(numlz, i),
439
+ unit=key2str(optional_get(units, i)),
440
+ )
441
+ )
442
+
443
+ restart_arr = arrays["RESTART "]
444
+ restart = None
445
+ if restart_arr is not None:
446
+ restart = "".join(decode_if_byte(s) for s in restart_arr).strip()
447
+ if restart and not os.path.isabs(restart):
448
+ restart = os.path.join(os.path.dirname(spec_name), restart)
449
+
450
+ return (
451
+ start_date,
452
+ summary_keywords,
453
+ dimensions,
454
+ restart,
455
+ )
456
+
457
+
458
+ def _validate_array(
459
+ kw: str, filename: str, vals: npt.NDArray[Any] | resfo.MessType
460
+ ) -> npt.NDArray[Any]:
461
+ if isinstance(vals, resfo.MessType):
462
+ raise InvalidSummaryError(f"{kw.strip()} in {filename} has incorrect type MESS")
463
+ return vals
464
+
465
+
466
+ def _has_extension(path: str, ext: str) -> bool:
467
+ """
468
+ >>> _has_extension("ECLBASE.SMSPEC", "smspec")
469
+ True
470
+ >>> _has_extension("BASE.SMSPEC", "smspec")
471
+ True
472
+ >>> _has_extension("BASE.FUNSMRY", "smspec")
473
+ False
474
+ >>> _has_extension("ECLBASE.smspec", "smspec")
475
+ True
476
+ >>> _has_extension("ECLBASE.tar.gz.smspec", "smspec")
477
+ True
478
+
479
+ Args:
480
+ path: File name to check.
481
+ ext: Allowed extension regex.
482
+
483
+ Returns:
484
+ ``True`` if the file has any of the extensions in ``exts``.
485
+ """
486
+ if "." not in path:
487
+ return False
488
+ splitted = path.split(".")
489
+ return re.fullmatch(ext, splitted[-1].lower()) is not None
490
+
491
+
492
+ def _is_base_with_extension(base: str, path: str, ext: str) -> bool:
493
+ """
494
+ >>> _is_base_with_extension("ECLBASE", "ECLBASE.SMSPEC", "smspec")
495
+ True
496
+ >>> _is_base_with_extension("ECLBASE", "BASE.SMSPEC", "smspec")
497
+ False
498
+ >>> _is_base_with_extension("ECLBASE", "BASE.FUNSMRY", "smspec")
499
+ False
500
+ >>> _is_base_with_extension("ECLBASE", "ECLBASE.smspec", "smspec")
501
+ True
502
+ >>> _is_base_with_extension("ECLBASE.tar.gz", "ECLBASE.tar.gz.smspec", "smspec")
503
+ True
504
+
505
+ Args:
506
+ base: Basename without extension.
507
+ path: Candidate path.
508
+ exts: Allowed extension regex pattern.
509
+
510
+ Returns:
511
+ ``True`` if ``path`` is ``base`` with one of ``exts``.
512
+ """
513
+ if "." not in path:
514
+ return False
515
+ splitted = path.split(".")
516
+ return (
517
+ ".".join(splitted[0:-1]) == base
518
+ and re.fullmatch(ext, splitted[-1].lower()) is not None
519
+ )
520
+
521
+
522
+ ANY_SUMMARY_EXTENSION = r"unsmry|smspec|funsmry|fsmspec|s\d\d\d\d|a\d\d\d\d"
523
+
524
+
525
+ def _get_summary_filenames(filepath: str | os.PathLike[str]) -> tuple[list[str], str]:
526
+ directory, file_name = os.path.split(filepath)
527
+ if "." in file_name:
528
+ case_name = ".".join(file_name.split(".")[:-1])
529
+ else:
530
+ case_name = file_name
531
+ specified_formatted = _has_extension(file_name, r"funsmry|fsmspec|a\d\d\d\d")
532
+ specified_unformatted = _has_extension(file_name, r"unsmry|smspec|s\d\d\d\d")
533
+ specified_unified = _has_extension(file_name, "funsmry")
534
+ specified_split = _has_extension(file_name, r"x\d\d\d\d|a\d\d\d\d")
535
+ spec_candidates, smry_candidates = tee(
536
+ map(
537
+ lambda x: os.path.join(directory, x),
538
+ filter(
539
+ lambda x: _is_base_with_extension(
540
+ path=x, base=case_name, ext=ANY_SUMMARY_EXTENSION
541
+ ),
542
+ os.listdir(directory or "."),
543
+ ),
544
+ )
545
+ )
546
+
547
+ def filter_extension(ext: str, lst: Iterable[str]) -> Iterator[str]:
548
+ return filter(partial(_has_extension, ext=ext), lst)
549
+
550
+ smry_candidates = filter_extension(
551
+ r"unsmry|funsmry|s\d\d\d\d|a\d\d\d\d", smry_candidates
552
+ )
553
+ if specified_split:
554
+ smry_candidates = filter_extension(r"s\d\d\d\d|a\d\d\d\d", smry_candidates)
555
+ if specified_unified:
556
+ smry_candidates = filter_extension("unsmry|funsmry", smry_candidates)
557
+ if specified_formatted:
558
+ smry_candidates = filter_extension("funsmry", smry_candidates)
559
+ if specified_unformatted:
560
+ smry_candidates = filter_extension("unsmry", smry_candidates)
561
+ all_summary = natsorted(list(smry_candidates))
562
+ summary = []
563
+ pat = None
564
+ for pat in ("unsmry", r"s\d\d\d\d", "funsmry", r"a\d\d\d\d"):
565
+ summary = list(filter_extension(pat, all_summary))
566
+ if summary:
567
+ break
568
+
569
+ if len(summary) != len(all_summary):
570
+ warnings.warn(f"More than one type of summary file, found {all_summary}")
571
+ if not summary:
572
+ raise FileNotFoundError(f"Could not find any summary files matching {filepath}")
573
+
574
+ if pat in ("unsmry", r"s\d\d\d\d"):
575
+ spec_candidates = filter_extension("smspec", spec_candidates)
576
+ else:
577
+ spec_candidates = filter_extension("fsmspec", spec_candidates)
578
+
579
+ spec = list(spec_candidates)
580
+ if len(spec) > 1:
581
+ warnings.warn(f"More than one type of summary spec file, found {spec}")
582
+
583
+ if not spec:
584
+ raise FileNotFoundError(f"Could not find any summary spec matching {filepath}")
585
+ return summary, spec[-1]
586
+
587
+
588
+ def _stream_name(stream: IO[Any]) -> str:
589
+ """
590
+ Returns:
591
+ The filename for an IO stream or 'unknown stream' if there is no filename
592
+ attached to the stream (which is the case for eg. `StringIO` and `BytesIO`).
593
+ """
594
+ return getattr(stream, "name", "unknown stream")
@@ -0,0 +1,88 @@
1
+ """
2
+ The testing module implements hypothesis generators for data commonly
3
+ found in reservoir simulator output.
4
+
5
+ The basic usage is to use either the ``egrids`` generator::
6
+
7
+ from hypothesis import given
8
+ from resfo_utilities import egrids, EGrid
9
+
10
+ @given(egrids)
11
+ def test_egrid(egrid: EGrid):
12
+ print(egrid.shape) # tuple ni,nj,nk
13
+ egrid.to_file("MY_CASE.EGRID")
14
+
15
+ or the ``summaries`` generator::
16
+
17
+ from resfo_utilities import SummaryReader
18
+ from resfo_utilities.testing import summaries
19
+ from io import BytesIO
20
+ from hypothesis import given
21
+
22
+ @given(summary=summaries())
23
+ def test_that_the_read_values_matches_those_in_the_input(summary):
24
+ smspec, unsmry = summary
25
+ smspec_buf = BytesIO()
26
+ unsmry_buf = BytesIO()
27
+ smspec.to_file(smspec_buf)
28
+ unsmry.to_file(unsmry_buf)
29
+ smspec_buf.seek(0)
30
+ unsmry_buf.seek(0)
31
+
32
+ summary = SummaryReader(smspec=lambda: smspec_buf, summaries=[lambda: unsmry_buf])
33
+ """
34
+
35
+ from ._egrid_generator import (
36
+ Units,
37
+ GridRelative,
38
+ GridUnit,
39
+ CoordinateType,
40
+ TypeOfGrid,
41
+ RockModel,
42
+ GridFormat,
43
+ Filehead,
44
+ GridHead,
45
+ GlobalGrid,
46
+ EGrid,
47
+ egrids,
48
+ )
49
+
50
+ from ._summary_generator import (
51
+ summary_variables,
52
+ UnitSystem,
53
+ Simulator,
54
+ SmspecIntehead,
55
+ Date,
56
+ Smspec,
57
+ smspecs,
58
+ SummaryMiniStep,
59
+ SummaryStep,
60
+ Unsmry,
61
+ summaries,
62
+ )
63
+
64
+ __all__ = [
65
+ "Units",
66
+ "GridRelative",
67
+ "GridUnit",
68
+ "CoordinateType",
69
+ "TypeOfGrid",
70
+ "RockModel",
71
+ "GridFormat",
72
+ "Filehead",
73
+ "GridHead",
74
+ "GlobalGrid",
75
+ "EGrid",
76
+ "egrids",
77
+ "summary_variables",
78
+ "UnitSystem",
79
+ "Simulator",
80
+ "SmspecIntehead",
81
+ "Date",
82
+ "Smspec",
83
+ "smspecs",
84
+ "SummaryMiniStep",
85
+ "SummaryStep",
86
+ "Unsmry",
87
+ "summaries",
88
+ ]