pastastore 1.10.2__py3-none-any.whl → 1.12.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
pastastore/styling.py CHANGED
@@ -55,16 +55,18 @@ def boolean_styler(b):
55
55
 
56
56
  >>> df.style.map(boolean_styler, subset=["some column"])
57
57
  """
58
- if b:
58
+ if b is True or b is np.True_:
59
59
  return (
60
60
  f"background-color: {rgb2hex((231 / 255, 255 / 255, 239 / 255))}; "
61
61
  "color: darkgreen"
62
62
  )
63
- else:
63
+ elif b is False or b is np.False_:
64
64
  return (
65
65
  f"background-color: {rgb2hex((255 / 255, 238 / 255, 238 / 255))}; "
66
66
  "color: darkred"
67
67
  )
68
+ else:
69
+ return "background-color: White; color: Black"
68
70
 
69
71
 
70
72
  def boolean_row_styler(row, column):
pastastore/typing.py ADDED
@@ -0,0 +1,12 @@
1
+ """Typing definitions for PastasStore."""
2
+
3
+ from typing import Literal, Union
4
+
5
+ import pandas as pd
6
+
7
+ FrameOrSeriesUnion = Union[pd.DataFrame, pd.Series]
8
+
9
+ # Literal types for library names
10
+ TimeSeriesLibs = Literal["oseries", "stresses"]
11
+ PastasLibs = Literal["oseries", "stresses", "models"]
12
+ AllLibs = Literal["oseries", "stresses", "models", "oseries_models", "stresses_models"]
pastastore/util.py CHANGED
@@ -1,27 +1,287 @@
1
1
  """Useful utilities for pastastore."""
2
2
 
3
+ import json
4
+ import logging
3
5
  import os
4
6
  import shutil
7
+ import sys
8
+ from pathlib import Path
5
9
  from typing import Dict, List, Optional, Union
6
10
 
7
11
  import numpy as np
8
12
  import pandas as pd
13
+ from colorama import Back, Fore, Style
9
14
  from numpy.lib._iotools import NameValidator
10
15
  from pandas.testing import assert_series_equal
16
+ from pastas import Model
17
+ from pastas.io.pas import PastasEncoder
11
18
  from pastas.stats.tests import runs_test, stoffer_toloi
12
19
  from tqdm.auto import tqdm
13
20
 
14
- from pastastore.version import PASTAS_LEQ_022
21
+ from pastastore.styling import boolean_row_styler
22
+ from pastastore.typing import TimeSeriesLibs
23
+
24
+ logger = logging.getLogger(__name__)
15
25
 
16
26
 
17
27
  def _custom_warning(message, category=UserWarning, filename="", lineno=-1, *args):
18
28
  print(f"{filename}:{lineno}: {category.__name__}: {message}")
19
29
 
20
30
 
31
+ class ZipUtils:
32
+ """Utility class for zip file operations."""
33
+
34
+ def __init__(self, pstore):
35
+ self.pstore = pstore
36
+
37
+ def _stored_series_to_json(
38
+ self,
39
+ libname: TimeSeriesLibs,
40
+ names: Optional[Union[list, str]] = None,
41
+ squeeze: bool = True,
42
+ progressbar: bool = False,
43
+ ):
44
+ """Write stored series to JSON.
45
+
46
+ Parameters
47
+ ----------
48
+ libname : str
49
+ library name
50
+ names : Optional[Union[list, str]], optional
51
+ names of series, by default None
52
+ squeeze : bool, optional
53
+ return single entry as json string instead
54
+ of list, by default True
55
+ progressbar : bool, optional
56
+ show progressbar, by default False
57
+
58
+ Returns
59
+ -------
60
+ files : list or str
61
+ list of series converted to JSON string or single string
62
+ if single entry is returned and squeeze is True
63
+ """
64
+ names = self.pstore.parse_names(names, libname=libname)
65
+ files = []
66
+ for n in tqdm(names, desc=libname) if progressbar else names:
67
+ s = self.pstore.conn._get_series(libname, n, progressbar=False)
68
+ if isinstance(s, pd.Series):
69
+ s = s.to_frame()
70
+ try:
71
+ sjson = s.to_json(orient="columns")
72
+ except ValueError as e:
73
+ msg = (
74
+ f"DatetimeIndex of '{n}' probably contains NaT "
75
+ "or duplicate timestamps!"
76
+ )
77
+ raise ValueError(msg) from e
78
+ files.append(sjson)
79
+ if len(files) == 1 and squeeze:
80
+ return files[0]
81
+ else:
82
+ return files
83
+
84
+ def _stored_metadata_to_json(
85
+ self,
86
+ libname: TimeSeriesLibs,
87
+ names: Optional[Union[list, str]] = None,
88
+ squeeze: bool = True,
89
+ progressbar: bool = False,
90
+ ):
91
+ """Write metadata from stored series to JSON.
92
+
93
+ Parameters
94
+ ----------
95
+ libname : str
96
+ library containing series
97
+ names : Optional[Union[list, str]], optional
98
+ names to parse, by default None
99
+ squeeze : bool, optional
100
+ return single entry as json string instead of list, by default True
101
+ progressbar : bool, optional
102
+ show progressbar, by default False
103
+
104
+ Returns
105
+ -------
106
+ files : list or str
107
+ list of json string
108
+ """
109
+ names = self.pstore.parse_names(names, libname=libname)
110
+ files = []
111
+ for n in tqdm(names, desc=libname) if progressbar else names:
112
+ meta = self.pstore.get_metadata(libname, n, as_frame=False)
113
+ meta_json = json.dumps(meta, cls=PastasEncoder, indent=4)
114
+ files.append(meta_json)
115
+ if len(files) == 1 and squeeze:
116
+ return files[0]
117
+ else:
118
+ return files
119
+
120
+ def series_to_archive(
121
+ self,
122
+ archive,
123
+ libname: TimeSeriesLibs,
124
+ names: Optional[Union[list, str]] = None,
125
+ progressbar: bool = True,
126
+ ):
127
+ """Write DataFrame or Series to zipfile (internal method).
128
+
129
+ Parameters
130
+ ----------
131
+ archive : zipfile.ZipFile
132
+ reference to an archive to write data to
133
+ libname : str
134
+ name of the library to write to zipfile
135
+ names : str or list of str, optional
136
+ names of the time series to write to archive, by default None,
137
+ which writes all time series to archive
138
+ progressbar : bool, optional
139
+ show progressbar, by default True
140
+ """
141
+ names = self.pstore.parse_names(names, libname=libname)
142
+ for n in tqdm(names, desc=libname) if progressbar else names:
143
+ sjson = self._stored_series_to_json(
144
+ libname, names=n, progressbar=False, squeeze=True
145
+ )
146
+ meta_json = self._stored_metadata_to_json(
147
+ libname, names=n, progressbar=False, squeeze=True
148
+ )
149
+ archive.writestr(f"{libname}/{n}.pas", sjson)
150
+ archive.writestr(f"{libname}/{n}_meta.pas", meta_json)
151
+
152
+ def models_to_archive(self, archive, names=None, progressbar=True):
153
+ """Write pastas.Model to zipfile (internal method).
154
+
155
+ Parameters
156
+ ----------
157
+ archive : zipfile.ZipFile
158
+ reference to an archive to write data to
159
+ names : str or list of str, optional
160
+ names of the models to write to archive, by default None,
161
+ which writes all models to archive
162
+ progressbar : bool, optional
163
+ show progressbar, by default True
164
+ """
165
+ names = self.pstore.parse_names(names, libname="models")
166
+ for n in tqdm(names, desc="models") if progressbar else names:
167
+ m = self.pstore.get_models(n, return_dict=True)
168
+ jsondict = json.dumps(m, cls=PastasEncoder, indent=4)
169
+ archive.writestr(f"models/{n}.pas", jsondict)
170
+
171
+
172
+ class ColoredFormatter(logging.Formatter):
173
+ """Colored log formatter.
174
+
175
+ Taken from
176
+ https://gist.github.com/joshbode/58fac7ababc700f51e2a9ecdebe563ad
177
+ """
178
+
179
+ def __init__(
180
+ self, *args, colors: Optional[Dict[str, str]] = None, **kwargs
181
+ ) -> None:
182
+ """Initialize the formatter with specified format strings."""
183
+ super().__init__(*args, **kwargs)
184
+
185
+ self.colors = colors if colors else {}
186
+
187
+ def format(self, record) -> str:
188
+ """Format the specified record as text."""
189
+ record.color = self.colors.get(record.levelname, "")
190
+ record.reset = Style.RESET_ALL
191
+
192
+ return super().format(record)
193
+
194
+
195
+ def get_color_logger(level="INFO", logger_name=None):
196
+ """Get a logger with colored output.
197
+
198
+ Parameters
199
+ ----------
200
+ level : str, optional
201
+ The logging level to set for the logger. Default is "INFO".
202
+
203
+ Returns
204
+ -------
205
+ logger : logging.Logger
206
+ The configured logger object.
207
+ """
208
+ if level == "DEBUG":
209
+ FORMAT = "{color}{levelname}:{name}.{funcName}:{lineno}:{message}{reset}"
210
+ else:
211
+ FORMAT = "{color}{message}{reset}"
212
+ formatter = ColoredFormatter(
213
+ FORMAT,
214
+ style="{",
215
+ datefmt="%Y-%m-%d %H:%M:%S",
216
+ colors={
217
+ "DEBUG": Fore.CYAN,
218
+ "INFO": Fore.GREEN,
219
+ "WARNING": Fore.YELLOW,
220
+ "ERROR": Fore.RED,
221
+ "CRITICAL": Fore.RED + Back.WHITE + Style.BRIGHT,
222
+ },
223
+ )
224
+
225
+ handler = logging.StreamHandler(sys.stdout)
226
+ handler.setFormatter(formatter)
227
+
228
+ logger = logging.getLogger(logger_name)
229
+ logger.handlers[:] = []
230
+ logger.addHandler(handler)
231
+ logger.setLevel(getattr(logging, level))
232
+
233
+ logging.captureWarnings(True)
234
+ return logger
235
+
236
+
21
237
  class ItemInLibraryException(Exception):
22
238
  """Exception when item is already in library."""
23
239
 
24
- pass
240
+
241
+ class SeriesUsedByModel(Exception):
242
+ """Exception raised when a series is used by a model."""
243
+
244
+
245
+ def series_from_json(fjson: str, squeeze: bool = True):
246
+ """Load time series from JSON.
247
+
248
+ Parameters
249
+ ----------
250
+ fjson : str
251
+ path to file
252
+ squeeze : bool, optional
253
+ squeeze time series object to obtain pandas Series
254
+
255
+ Returns
256
+ -------
257
+ s : pd.DataFrame
258
+ DataFrame containing time series
259
+ """
260
+ s = pd.read_json(fjson, orient="columns", precise_float=True, dtype=False)
261
+ if not isinstance(s.index, pd.DatetimeIndex):
262
+ s.index = pd.to_datetime(s.index, unit="ms")
263
+ s = s.sort_index() # needed for some reason ...
264
+ if squeeze:
265
+ return s.squeeze(axis="columns")
266
+ return s
267
+
268
+
269
+ def metadata_from_json(fjson: str):
270
+ """Load metadata dictionary from JSON.
271
+
272
+ Parameters
273
+ ----------
274
+ fjson : str
275
+ path to file
276
+
277
+ Returns
278
+ -------
279
+ meta : dict
280
+ dictionary containing metadata
281
+ """
282
+ with open(fjson, "r", encoding="utf-8") as f:
283
+ meta = json.load(f)
284
+ return meta
25
285
 
26
286
 
27
287
  def delete_arcticdb_connector(
@@ -52,9 +312,10 @@ def delete_arcticdb_connector(
52
312
  elif name is None or uri is None:
53
313
  raise ValueError("Provide 'name' and 'uri' OR 'conn'!")
54
314
 
315
+ # connect to arcticdb
55
316
  arc = arcticdb.Arctic(uri)
56
317
 
57
- print(f"Deleting ArcticDBConnector database: '{name}' ... ", end="")
318
+ logger.info("Deleting ArcticDBConnector database: '%s' ... ", name)
58
319
  # get library names
59
320
  if libraries is None:
60
321
  libs = []
@@ -68,53 +329,49 @@ def delete_arcticdb_connector(
68
329
 
69
330
  for lib in libs:
70
331
  arc.delete_library(lib)
71
- # shutil.rmtree(os.path.join(conn.uri.split("//")[-1], lib))
72
332
 
73
333
  if libraries is not None:
74
- print()
75
- print(f" - deleted: {lib}")
334
+ logger.info(" - deleted: %s", lib)
76
335
 
77
336
  # delete .pastastore file if entire pastastore is deleted
78
337
  remaining_libs = [
79
338
  ilib for ilib in arc.list_libraries() if ilib.split(".")[0] == name
80
339
  ]
81
340
  if remaining_libs == 0:
82
- os.unlink(os.path.join(uri.split("//")[-1], f"{name}.pastastore"))
341
+ os.unlink(uri.split("//")[-1] / f"{name}.pastastore")
83
342
 
84
343
  # check if any remaining libraries in lmdb dir, if none, delete entire folder
85
344
  remaining = arc.list_libraries()
86
345
  if len(remaining) == 0:
87
- shutil.rmtree(os.path.join(conn.uri.split("//")[-1]))
346
+ shutil.rmtree(Path(conn.uri.split("://")[-1]))
88
347
 
89
- print("Done!")
348
+ logger.info("Done!")
90
349
 
91
350
 
92
351
  def delete_dict_connector(conn, libraries: Optional[List[str]] = None) -> None:
93
352
  """Delete DictConnector object."""
94
- print(f"Deleting DictConnector: '{conn.name}' ... ", end="")
353
+ logger.info("Deleting DictConnector: '%s' ... ", conn.name)
95
354
  if libraries is None:
96
355
  del conn
97
- print(" Done!")
356
+ logger.info("Done!")
98
357
  else:
99
358
  for lib in libraries:
100
- print()
101
359
  delattr(conn, f"lib_{conn.libname[lib]}")
102
- print(f" - deleted: {lib}")
103
- print("Done!")
360
+ logger.info(" - deleted: %s", lib)
361
+ logger.info("Done!")
104
362
 
105
363
 
106
364
  def delete_pas_connector(conn, libraries: Optional[List[str]] = None) -> None:
107
365
  """Delete PasConnector object."""
108
- print(f"Deleting PasConnector database: '{conn.name}' ... ", end="")
366
+ logger.info("Deleting PasConnector database: '%s' ... ", conn.name)
109
367
  if libraries is None:
110
368
  shutil.rmtree(conn.path)
111
- print(" Done!")
369
+ logger.info("Done!")
112
370
  else:
113
371
  for lib in libraries:
114
- print()
115
- shutil.rmtree(os.path.join(conn.path, lib))
116
- print(f" - deleted: {lib}")
117
- print("Done!")
372
+ shutil.rmtree(conn.path / lib)
373
+ logger.info(" - deleted: %s", lib)
374
+ logger.info("Done!")
118
375
 
119
376
 
120
377
  def delete_pastastore(pstore, libraries: Optional[List[str]] = None) -> None:
@@ -195,7 +452,13 @@ def validate_names(
195
452
  raise ValueError("Provide one of 's' (string) or 'd' (dict)!")
196
453
 
197
454
 
198
- def compare_models(ml1, ml2, stats=None, detailed_comparison=False):
455
+ def compare_models(
456
+ ml1: Model,
457
+ ml2: Model,
458
+ stats: List[str] = None,
459
+ detailed_comparison: bool = False,
460
+ style_output: bool = False,
461
+ ) -> pd.DataFrame:
199
462
  """Compare two Pastas models.
200
463
 
201
464
  Parameters
@@ -210,6 +473,9 @@ def compare_models(ml1, ml2, stats=None, detailed_comparison=False):
210
473
  if True return DataFrame containing comparison details,
211
474
  by default False which returns True if models are equivalent
212
475
  or False if they are not
476
+ style_output : bool, optional
477
+ if True and detailed_comparison is True, return styled DataFrame
478
+ with colored output, by default False
213
479
 
214
480
  Returns
215
481
  -------
@@ -219,7 +485,6 @@ def compare_models(ml1, ml2, stats=None, detailed_comparison=False):
219
485
  """
220
486
  df = pd.DataFrame(columns=["model 0", "model 1"])
221
487
  so1 = [] # for storing series_original
222
- sv1 = [] # for storing series_validated
223
488
  ss1 = [] # for storing series
224
489
 
225
490
  for i, ml in enumerate([ml1, ml2]):
@@ -230,43 +495,20 @@ def compare_models(ml1, ml2, stats=None, detailed_comparison=False):
230
495
  df.loc[f"- settings: {k}", f"model {i}"] = ml.settings.get(k)
231
496
 
232
497
  if i == 0:
233
- oso = (
234
- ml.oseries.series_original
235
- if PASTAS_LEQ_022
236
- else ml.oseries._series_original
237
- )
498
+ oso = ml.oseries._series_original # noqa: SLF001
238
499
  df.loc["oseries: series_original", f"model {i}"] = True
239
500
 
240
- if PASTAS_LEQ_022:
241
- osv = ml.oseries.series_validated
242
- df.loc["oseries: series_validated", f"model {i}"] = True
243
-
244
501
  oss = ml.oseries.series
245
- df.loc["oseries: series_series", f"model {i}"] = True
502
+ df.loc["oseries: series", f"model {i}"] = True
246
503
 
247
504
  elif i == 1:
248
505
  try:
249
- assert_series_equal(
250
- oso,
251
- (
252
- ml.oseries.series_original
253
- if PASTAS_LEQ_022
254
- else ml.oseries._series_original
255
- ),
256
- )
506
+ assert_series_equal(oso, ml.oseries._series_original) # noqa: SLF001
257
507
  compare_oso = True
258
508
  except (ValueError, AssertionError):
259
509
  # series are not identical in length or index does not match
260
510
  compare_oso = False
261
511
 
262
- if PASTAS_LEQ_022:
263
- try:
264
- assert_series_equal(osv, ml.oseries.series_validated)
265
- compare_osv = True
266
- except (ValueError, AssertionError):
267
- # series are not identical in length or index does not match
268
- compare_osv = False
269
-
270
512
  try:
271
513
  assert_series_equal(oss, ml.oseries.series)
272
514
  compare_oss = True
@@ -275,17 +517,15 @@ def compare_models(ml1, ml2, stats=None, detailed_comparison=False):
275
517
  compare_oss = False
276
518
 
277
519
  df.loc["oseries: series_original", f"model {i}"] = compare_oso
278
-
279
- if PASTAS_LEQ_022:
280
- df.loc["oseries: series_validated", f"model {i}"] = compare_osv
281
-
282
- df.loc["oseries: series_series", f"model {i}"] = compare_oss
520
+ df.loc["oseries: series", f"model {i}"] = compare_oss
283
521
 
284
522
  for sm_name, sm in ml.stressmodels.items():
285
523
  df.loc[f"stressmodel: '{sm_name}'"] = sm_name
286
- df.loc["- rfunc"] = sm.rfunc._name if sm.rfunc is not None else "NA"
524
+ df.loc["- rfunc"] = (
525
+ type(sm.rfunc).__name__ if sm.rfunc is not None else "NA"
526
+ )
287
527
 
288
- if sm._name == "RechargeModel":
528
+ if type(sm).__name__ == "RechargeModel":
289
529
  stresses = [sm.prec, sm.evap]
290
530
  else:
291
531
  stresses = sm.stress
@@ -298,17 +538,9 @@ def compare_models(ml1, ml2, stats=None, detailed_comparison=False):
298
538
  )
299
539
 
300
540
  if i == 0:
301
- if PASTAS_LEQ_022:
302
- so1.append(ts.series_original.copy())
303
- sv1.append(ts.series_validated.copy())
304
- else:
305
- so1.append(ts._series_original.copy())
306
-
541
+ so1.append(ts._series_original.copy()) # noqa: SLF001
307
542
  ss1.append(ts.series.copy())
308
543
 
309
- if PASTAS_LEQ_022:
310
- df.loc[f" - {ts.name}: series_validated"] = True
311
-
312
544
  df.loc[f" - {ts.name}: series_original"] = True
313
545
  df.loc[f" - {ts.name}: series"] = True
314
546
 
@@ -318,31 +550,18 @@ def compare_models(ml1, ml2, stats=None, detailed_comparison=False):
318
550
  try:
319
551
  assert_series_equal(
320
552
  so1[counter],
321
- (
322
- ts.series_original
323
- if PASTAS_LEQ_022
324
- else ts._series_original
325
- ),
553
+ ts._series_original, # noqa: SLF001
326
554
  )
327
555
  compare_so1 = True
328
556
  except (ValueError, AssertionError):
329
557
  compare_so1 = False
330
558
 
331
- if PASTAS_LEQ_022:
332
- try:
333
- assert_series_equal(sv1[counter], ts.series_validated)
334
- compare_sv1 = True
335
- except (ValueError, AssertionError):
336
- compare_sv1 = False
337
-
338
559
  try:
339
560
  assert_series_equal(ss1[counter], ts.series)
340
561
  compare_ss1 = True
341
562
  except (ValueError, AssertionError):
342
563
  compare_ss1 = False
343
564
  df.loc[f" - {ts.name}: series_original"] = compare_so1
344
- if PASTAS_LEQ_022:
345
- df.loc[f" - {ts.name}: series_validated"] = compare_sv1
346
565
  df.loc[f" - {ts.name}: series"] = compare_ss1
347
566
 
348
567
  counter += 1
@@ -380,7 +599,20 @@ def compare_models(ml1, ml2, stats=None, detailed_comparison=False):
380
599
  df.loc[stats_idx, "comparison"] = b
381
600
 
382
601
  if detailed_comparison:
383
- return df
602
+ if style_output:
603
+ return df.style.apply(
604
+ boolean_row_styler, column="comparison", axis=1
605
+ ).set_table_styles(
606
+ [
607
+ {"selector": "th.col_heading", "props": [("text-align", "center")]},
608
+ {
609
+ "selector": "th.row_heading",
610
+ "props": [("text-align", "left"), ("white-space", "pre")],
611
+ },
612
+ ],
613
+ )
614
+ else:
615
+ return df
384
616
  else:
385
617
  return df["comparison"].iloc[1:].all() # ignore name difference
386
618
 
@@ -533,7 +765,7 @@ def frontiers_checks(
533
765
  if modelnames is not None:
534
766
  models = modelnames
535
767
  if oseries is not None:
536
- print(
768
+ logger.warning(
537
769
  "Warning! Both 'modelnames' and 'oseries' provided,"
538
770
  " only using 'modelnames'!"
539
771
  )
@@ -550,7 +782,9 @@ def frontiers_checks(
550
782
  ml = pstore.get_models(mlnam)
551
783
 
552
784
  if ml.parameters["optimal"].hasnans:
553
- print(f"Warning! Skipping model '{mlnam}' because it is not solved!")
785
+ logger.warning(
786
+ "Warning! Skipping model '%s' because it is not solved!", mlnam
787
+ )
554
788
  continue
555
789
 
556
790
  checks = pd.DataFrame(columns=["stat", "threshold", "units", "check_passed"])
@@ -571,7 +805,7 @@ def frontiers_checks(
571
805
  noise = ml.noise()
572
806
  if noise is None:
573
807
  noise = ml.residuals()
574
- print(
808
+ logger.warning(
575
809
  "Warning! Checking autocorrelation on the residuals not the noise!"
576
810
  )
577
811
  if check2_test == "runs" or check2_test == "both":
@@ -605,7 +839,7 @@ def frontiers_checks(
605
839
  if check3_tmem:
606
840
  len_oseries_calib = (ml.settings["tmax"] - ml.settings["tmin"]).days
607
841
  for sm_name, sm in ml.stressmodels.items():
608
- if sm._name == "WellModel":
842
+ if type(sm).__name__ == "WellModel":
609
843
  nwells = sm.distances.index.size
610
844
  for iw in range(nwells):
611
845
  p = sm.get_parameters(model=ml, istress=iw)
@@ -638,7 +872,7 @@ def frontiers_checks(
638
872
  # Check 4 - Uncertainty Gain
639
873
  if check4_gain:
640
874
  for sm_name, sm in ml.stressmodels.items():
641
- if sm._name == "WellModel":
875
+ if type(sm).__name__ == "WellModel":
642
876
  for iw in range(sm.distances.index.size):
643
877
  p = sm.get_parameters(model=ml, istress=iw)
644
878
  gain = sm.rfunc.gain(p)
@@ -663,10 +897,10 @@ def frontiers_checks(
663
897
  check_gain_passed,
664
898
  )
665
899
  continue
666
- elif sm._name == "LinearTrend":
900
+ elif type(sm).__name__ == "LinearTrend":
667
901
  gain = ml.parameters.loc[f"{sm_name}_a", "optimal"]
668
902
  gain_std = ml.parameters.loc[f"{sm_name}_a", "stderr"]
669
- elif sm._name == "StepModel":
903
+ elif type(sm).__name__ == "StepModel":
670
904
  gain = ml.parameters.loc[f"{sm_name}_d", "optimal"]
671
905
  gain_std = ml.parameters.loc[f"{sm_name}_d", "stderr"]
672
906
  else:
@@ -689,7 +923,7 @@ def frontiers_checks(
689
923
 
690
924
  # Check 5 - Parameter Bounds
691
925
  if check5_parambounds:
692
- upper, lower = ml._check_parameters_bounds()
926
+ upper, lower = ml._check_parameters_bounds() # noqa: SLF001
693
927
  for param in ml.parameters.index:
694
928
  bounds = (
695
929
  ml.parameters.loc[param, "pmin"],
@@ -756,7 +990,7 @@ def frontiers_aic_select(
756
990
  for o in oseries:
757
991
  modelnames += pstore.oseries_models[o]
758
992
  elif oseries is not None:
759
- print(
993
+ logger.warning(
760
994
  "Warning! Both 'modelnames' and 'oseries' provided, using only 'modelnames'"
761
995
  )
762
996