pastastore 1.10.2__py3-none-any.whl → 1.11.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
pastastore/typing.py ADDED
@@ -0,0 +1,12 @@
1
+ """Typing definitions for PastasStore."""
2
+
3
+ from typing import Literal, Union
4
+
5
+ import pandas as pd
6
+
7
+ FrameOrSeriesUnion = Union[pd.DataFrame, pd.Series]
8
+
9
+ # Literal types for library names
10
+ TimeSeriesLibs = Literal["oseries", "stresses"]
11
+ PastasLibs = Literal["oseries", "stresses", "models"]
12
+ AllLibs = Literal["oseries", "stresses", "models", "oseries_models", "stresses_models"]
pastastore/util.py CHANGED
@@ -1,27 +1,287 @@
1
1
  """Useful utilities for pastastore."""
2
2
 
3
+ import json
4
+ import logging
3
5
  import os
4
6
  import shutil
7
+ import sys
8
+ from pathlib import Path
5
9
  from typing import Dict, List, Optional, Union
6
10
 
7
11
  import numpy as np
8
12
  import pandas as pd
13
+ from colorama import Back, Fore, Style
9
14
  from numpy.lib._iotools import NameValidator
10
15
  from pandas.testing import assert_series_equal
16
+ from pastas import Model
17
+ from pastas.io.pas import PastasEncoder
11
18
  from pastas.stats.tests import runs_test, stoffer_toloi
12
19
  from tqdm.auto import tqdm
13
20
 
14
- from pastastore.version import PASTAS_LEQ_022
21
+ from pastastore.styling import boolean_row_styler
22
+ from pastastore.typing import TimeSeriesLibs
23
+
24
+ logger = logging.getLogger(__name__)
15
25
 
16
26
 
17
27
  def _custom_warning(message, category=UserWarning, filename="", lineno=-1, *args):
18
28
  print(f"{filename}:{lineno}: {category.__name__}: {message}")
19
29
 
20
30
 
31
+ class ZipUtils:
32
+ """Utility class for zip file operations."""
33
+
34
+ def __init__(self, pstore):
35
+ self.pstore = pstore
36
+
37
+ def _stored_series_to_json(
38
+ self,
39
+ libname: TimeSeriesLibs,
40
+ names: Optional[Union[list, str]] = None,
41
+ squeeze: bool = True,
42
+ progressbar: bool = False,
43
+ ):
44
+ """Write stored series to JSON.
45
+
46
+ Parameters
47
+ ----------
48
+ libname : str
49
+ library name
50
+ names : Optional[Union[list, str]], optional
51
+ names of series, by default None
52
+ squeeze : bool, optional
53
+ return single entry as json string instead
54
+ of list, by default True
55
+ progressbar : bool, optional
56
+ show progressbar, by default False
57
+
58
+ Returns
59
+ -------
60
+ files : list or str
61
+ list of series converted to JSON string or single string
62
+ if single entry is returned and squeeze is True
63
+ """
64
+ names = self.pstore.parse_names(names, libname=libname)
65
+ files = []
66
+ for n in tqdm(names, desc=libname) if progressbar else names:
67
+ s = self.pstore.conn._get_series(libname, n, progressbar=False)
68
+ if isinstance(s, pd.Series):
69
+ s = s.to_frame()
70
+ try:
71
+ sjson = s.to_json(orient="columns")
72
+ except ValueError as e:
73
+ msg = (
74
+ f"DatetimeIndex of '{n}' probably contains NaT "
75
+ "or duplicate timestamps!"
76
+ )
77
+ raise ValueError(msg) from e
78
+ files.append(sjson)
79
+ if len(files) == 1 and squeeze:
80
+ return files[0]
81
+ else:
82
+ return files
83
+
84
+ def _stored_metadata_to_json(
85
+ self,
86
+ libname: TimeSeriesLibs,
87
+ names: Optional[Union[list, str]] = None,
88
+ squeeze: bool = True,
89
+ progressbar: bool = False,
90
+ ):
91
+ """Write metadata from stored series to JSON.
92
+
93
+ Parameters
94
+ ----------
95
+ libname : str
96
+ library containing series
97
+ names : Optional[Union[list, str]], optional
98
+ names to parse, by default None
99
+ squeeze : bool, optional
100
+ return single entry as json string instead of list, by default True
101
+ progressbar : bool, optional
102
+ show progressbar, by default False
103
+
104
+ Returns
105
+ -------
106
+ files : list or str
107
+ list of json string
108
+ """
109
+ names = self.pstore.parse_names(names, libname=libname)
110
+ files = []
111
+ for n in tqdm(names, desc=libname) if progressbar else names:
112
+ meta = self.pstore.get_metadata(libname, n, as_frame=False)
113
+ meta_json = json.dumps(meta, cls=PastasEncoder, indent=4)
114
+ files.append(meta_json)
115
+ if len(files) == 1 and squeeze:
116
+ return files[0]
117
+ else:
118
+ return files
119
+
120
+ def series_to_archive(
121
+ self,
122
+ archive,
123
+ libname: TimeSeriesLibs,
124
+ names: Optional[Union[list, str]] = None,
125
+ progressbar: bool = True,
126
+ ):
127
+ """Write DataFrame or Series to zipfile (internal method).
128
+
129
+ Parameters
130
+ ----------
131
+ archive : zipfile.ZipFile
132
+ reference to an archive to write data to
133
+ libname : str
134
+ name of the library to write to zipfile
135
+ names : str or list of str, optional
136
+ names of the time series to write to archive, by default None,
137
+ which writes all time series to archive
138
+ progressbar : bool, optional
139
+ show progressbar, by default True
140
+ """
141
+ names = self.pstore.parse_names(names, libname=libname)
142
+ for n in tqdm(names, desc=libname) if progressbar else names:
143
+ sjson = self._stored_series_to_json(
144
+ libname, names=n, progressbar=False, squeeze=True
145
+ )
146
+ meta_json = self._stored_metadata_to_json(
147
+ libname, names=n, progressbar=False, squeeze=True
148
+ )
149
+ archive.writestr(f"{libname}/{n}.pas", sjson)
150
+ archive.writestr(f"{libname}/{n}_meta.pas", meta_json)
151
+
152
+ def models_to_archive(self, archive, names=None, progressbar=True):
153
+ """Write pastas.Model to zipfile (internal method).
154
+
155
+ Parameters
156
+ ----------
157
+ archive : zipfile.ZipFile
158
+ reference to an archive to write data to
159
+ names : str or list of str, optional
160
+ names of the models to write to archive, by default None,
161
+ which writes all models to archive
162
+ progressbar : bool, optional
163
+ show progressbar, by default True
164
+ """
165
+ names = self.pstore.parse_names(names, libname="models")
166
+ for n in tqdm(names, desc="models") if progressbar else names:
167
+ m = self.pstore.get_models(n, return_dict=True)
168
+ jsondict = json.dumps(m, cls=PastasEncoder, indent=4)
169
+ archive.writestr(f"models/{n}.pas", jsondict)
170
+
171
+
172
+ class ColoredFormatter(logging.Formatter):
173
+ """Colored log formatter.
174
+
175
+ Taken from
176
+ https://gist.github.com/joshbode/58fac7ababc700f51e2a9ecdebe563ad
177
+ """
178
+
179
+ def __init__(
180
+ self, *args, colors: Optional[Dict[str, str]] = None, **kwargs
181
+ ) -> None:
182
+ """Initialize the formatter with specified format strings."""
183
+ super().__init__(*args, **kwargs)
184
+
185
+ self.colors = colors if colors else {}
186
+
187
+ def format(self, record) -> str:
188
+ """Format the specified record as text."""
189
+ record.color = self.colors.get(record.levelname, "")
190
+ record.reset = Style.RESET_ALL
191
+
192
+ return super().format(record)
193
+
194
+
195
+ def get_color_logger(level="INFO", logger_name=None):
196
+ """Get a logger with colored output.
197
+
198
+ Parameters
199
+ ----------
200
+ level : str, optional
201
+ The logging level to set for the logger. Default is "INFO".
202
+
203
+ Returns
204
+ -------
205
+ logger : logging.Logger
206
+ The configured logger object.
207
+ """
208
+ if level == "DEBUG":
209
+ FORMAT = "{color}{levelname}:{name}.{funcName}:{lineno}:{message}{reset}"
210
+ else:
211
+ FORMAT = "{color}{message}{reset}"
212
+ formatter = ColoredFormatter(
213
+ FORMAT,
214
+ style="{",
215
+ datefmt="%Y-%m-%d %H:%M:%S",
216
+ colors={
217
+ "DEBUG": Fore.CYAN,
218
+ "INFO": Fore.GREEN,
219
+ "WARNING": Fore.YELLOW,
220
+ "ERROR": Fore.RED,
221
+ "CRITICAL": Fore.RED + Back.WHITE + Style.BRIGHT,
222
+ },
223
+ )
224
+
225
+ handler = logging.StreamHandler(sys.stdout)
226
+ handler.setFormatter(formatter)
227
+
228
+ logger = logging.getLogger(logger_name)
229
+ logger.handlers[:] = []
230
+ logger.addHandler(handler)
231
+ logger.setLevel(getattr(logging, level))
232
+
233
+ logging.captureWarnings(True)
234
+ return logger
235
+
236
+
21
237
  class ItemInLibraryException(Exception):
22
238
  """Exception when item is already in library."""
23
239
 
24
- pass
240
+
241
+ class SeriesUsedByModel(Exception):
242
+ """Exception raised when a series is used by a model."""
243
+
244
+
245
+ def series_from_json(fjson: str, squeeze: bool = True):
246
+ """Load time series from JSON.
247
+
248
+ Parameters
249
+ ----------
250
+ fjson : str
251
+ path to file
252
+ squeeze : bool, optional
253
+ squeeze time series object to obtain pandas Series
254
+
255
+ Returns
256
+ -------
257
+ s : pd.DataFrame
258
+ DataFrame containing time series
259
+ """
260
+ s = pd.read_json(fjson, orient="columns", precise_float=True, dtype=False)
261
+ if not isinstance(s.index, pd.DatetimeIndex):
262
+ s.index = pd.to_datetime(s.index, unit="ms")
263
+ s = s.sort_index() # needed for some reason ...
264
+ if squeeze:
265
+ return s.squeeze(axis="columns")
266
+ return s
267
+
268
+
269
+ def metadata_from_json(fjson: str):
270
+ """Load metadata dictionary from JSON.
271
+
272
+ Parameters
273
+ ----------
274
+ fjson : str
275
+ path to file
276
+
277
+ Returns
278
+ -------
279
+ meta : dict
280
+ dictionary containing metadata
281
+ """
282
+ with open(fjson, "r", encoding="utf-8") as f:
283
+ meta = json.load(f)
284
+ return meta
25
285
 
26
286
 
27
287
  def delete_arcticdb_connector(
@@ -54,7 +314,7 @@ def delete_arcticdb_connector(
54
314
 
55
315
  arc = arcticdb.Arctic(uri)
56
316
 
57
- print(f"Deleting ArcticDBConnector database: '{name}' ... ", end="")
317
+ logger.info("Deleting ArcticDBConnector database: '%s' ... ", name)
58
318
  # get library names
59
319
  if libraries is None:
60
320
  libs = []
@@ -68,53 +328,49 @@ def delete_arcticdb_connector(
68
328
 
69
329
  for lib in libs:
70
330
  arc.delete_library(lib)
71
- # shutil.rmtree(os.path.join(conn.uri.split("//")[-1], lib))
72
331
 
73
332
  if libraries is not None:
74
- print()
75
- print(f" - deleted: {lib}")
333
+ logger.info(" - deleted: %s", lib)
76
334
 
77
335
  # delete .pastastore file if entire pastastore is deleted
78
336
  remaining_libs = [
79
337
  ilib for ilib in arc.list_libraries() if ilib.split(".")[0] == name
80
338
  ]
81
339
  if remaining_libs == 0:
82
- os.unlink(os.path.join(uri.split("//")[-1], f"{name}.pastastore"))
340
+ os.unlink(uri.split("//")[-1] / f"{name}.pastastore")
83
341
 
84
342
  # check if any remaining libraries in lmdb dir, if none, delete entire folder
85
343
  remaining = arc.list_libraries()
86
344
  if len(remaining) == 0:
87
- shutil.rmtree(os.path.join(conn.uri.split("//")[-1]))
345
+ shutil.rmtree(Path(conn.uri.split("://")[-1]))
88
346
 
89
- print("Done!")
347
+ logger.info("Done!")
90
348
 
91
349
 
92
350
  def delete_dict_connector(conn, libraries: Optional[List[str]] = None) -> None:
93
351
  """Delete DictConnector object."""
94
- print(f"Deleting DictConnector: '{conn.name}' ... ", end="")
352
+ logger.info("Deleting DictConnector: '%s' ... ", conn.name)
95
353
  if libraries is None:
96
354
  del conn
97
- print(" Done!")
355
+ logger.info("Done!")
98
356
  else:
99
357
  for lib in libraries:
100
- print()
101
358
  delattr(conn, f"lib_{conn.libname[lib]}")
102
- print(f" - deleted: {lib}")
103
- print("Done!")
359
+ logger.info(" - deleted: %s", lib)
360
+ logger.info("Done!")
104
361
 
105
362
 
106
363
  def delete_pas_connector(conn, libraries: Optional[List[str]] = None) -> None:
107
364
  """Delete PasConnector object."""
108
- print(f"Deleting PasConnector database: '{conn.name}' ... ", end="")
365
+ logger.info("Deleting PasConnector database: '%s' ... ", conn.name)
109
366
  if libraries is None:
110
367
  shutil.rmtree(conn.path)
111
- print(" Done!")
368
+ logger.info("Done!")
112
369
  else:
113
370
  for lib in libraries:
114
- print()
115
- shutil.rmtree(os.path.join(conn.path, lib))
116
- print(f" - deleted: {lib}")
117
- print("Done!")
371
+ shutil.rmtree(conn.path / lib)
372
+ logger.info(" - deleted: %s", lib)
373
+ logger.info("Done!")
118
374
 
119
375
 
120
376
  def delete_pastastore(pstore, libraries: Optional[List[str]] = None) -> None:
@@ -195,7 +451,13 @@ def validate_names(
195
451
  raise ValueError("Provide one of 's' (string) or 'd' (dict)!")
196
452
 
197
453
 
198
- def compare_models(ml1, ml2, stats=None, detailed_comparison=False):
454
+ def compare_models(
455
+ ml1: Model,
456
+ ml2: Model,
457
+ stats: List[str] = None,
458
+ detailed_comparison: bool = False,
459
+ style_output: bool = False,
460
+ ) -> pd.DataFrame:
199
461
  """Compare two Pastas models.
200
462
 
201
463
  Parameters
@@ -210,6 +472,9 @@ def compare_models(ml1, ml2, stats=None, detailed_comparison=False):
210
472
  if True return DataFrame containing comparison details,
211
473
  by default False which returns True if models are equivalent
212
474
  or False if they are not
475
+ style_output : bool, optional
476
+ if True and detailed_comparison is True, return styled DataFrame
477
+ with colored output, by default False
213
478
 
214
479
  Returns
215
480
  -------
@@ -219,7 +484,6 @@ def compare_models(ml1, ml2, stats=None, detailed_comparison=False):
219
484
  """
220
485
  df = pd.DataFrame(columns=["model 0", "model 1"])
221
486
  so1 = [] # for storing series_original
222
- sv1 = [] # for storing series_validated
223
487
  ss1 = [] # for storing series
224
488
 
225
489
  for i, ml in enumerate([ml1, ml2]):
@@ -230,43 +494,20 @@ def compare_models(ml1, ml2, stats=None, detailed_comparison=False):
230
494
  df.loc[f"- settings: {k}", f"model {i}"] = ml.settings.get(k)
231
495
 
232
496
  if i == 0:
233
- oso = (
234
- ml.oseries.series_original
235
- if PASTAS_LEQ_022
236
- else ml.oseries._series_original
237
- )
497
+ oso = ml.oseries._series_original # noqa: SLF001
238
498
  df.loc["oseries: series_original", f"model {i}"] = True
239
499
 
240
- if PASTAS_LEQ_022:
241
- osv = ml.oseries.series_validated
242
- df.loc["oseries: series_validated", f"model {i}"] = True
243
-
244
500
  oss = ml.oseries.series
245
- df.loc["oseries: series_series", f"model {i}"] = True
501
+ df.loc["oseries: series", f"model {i}"] = True
246
502
 
247
503
  elif i == 1:
248
504
  try:
249
- assert_series_equal(
250
- oso,
251
- (
252
- ml.oseries.series_original
253
- if PASTAS_LEQ_022
254
- else ml.oseries._series_original
255
- ),
256
- )
505
+ assert_series_equal(oso, ml.oseries._series_original) # noqa: SLF001
257
506
  compare_oso = True
258
507
  except (ValueError, AssertionError):
259
508
  # series are not identical in length or index does not match
260
509
  compare_oso = False
261
510
 
262
- if PASTAS_LEQ_022:
263
- try:
264
- assert_series_equal(osv, ml.oseries.series_validated)
265
- compare_osv = True
266
- except (ValueError, AssertionError):
267
- # series are not identical in length or index does not match
268
- compare_osv = False
269
-
270
511
  try:
271
512
  assert_series_equal(oss, ml.oseries.series)
272
513
  compare_oss = True
@@ -275,17 +516,15 @@ def compare_models(ml1, ml2, stats=None, detailed_comparison=False):
275
516
  compare_oss = False
276
517
 
277
518
  df.loc["oseries: series_original", f"model {i}"] = compare_oso
278
-
279
- if PASTAS_LEQ_022:
280
- df.loc["oseries: series_validated", f"model {i}"] = compare_osv
281
-
282
- df.loc["oseries: series_series", f"model {i}"] = compare_oss
519
+ df.loc["oseries: series", f"model {i}"] = compare_oss
283
520
 
284
521
  for sm_name, sm in ml.stressmodels.items():
285
522
  df.loc[f"stressmodel: '{sm_name}'"] = sm_name
286
- df.loc["- rfunc"] = sm.rfunc._name if sm.rfunc is not None else "NA"
523
+ df.loc["- rfunc"] = (
524
+ type(sm.rfunc).__name__ if sm.rfunc is not None else "NA"
525
+ )
287
526
 
288
- if sm._name == "RechargeModel":
527
+ if type(sm).__name__ == "RechargeModel":
289
528
  stresses = [sm.prec, sm.evap]
290
529
  else:
291
530
  stresses = sm.stress
@@ -298,17 +537,9 @@ def compare_models(ml1, ml2, stats=None, detailed_comparison=False):
298
537
  )
299
538
 
300
539
  if i == 0:
301
- if PASTAS_LEQ_022:
302
- so1.append(ts.series_original.copy())
303
- sv1.append(ts.series_validated.copy())
304
- else:
305
- so1.append(ts._series_original.copy())
306
-
540
+ so1.append(ts._series_original.copy()) # noqa: SLF001
307
541
  ss1.append(ts.series.copy())
308
542
 
309
- if PASTAS_LEQ_022:
310
- df.loc[f" - {ts.name}: series_validated"] = True
311
-
312
543
  df.loc[f" - {ts.name}: series_original"] = True
313
544
  df.loc[f" - {ts.name}: series"] = True
314
545
 
@@ -318,31 +549,18 @@ def compare_models(ml1, ml2, stats=None, detailed_comparison=False):
318
549
  try:
319
550
  assert_series_equal(
320
551
  so1[counter],
321
- (
322
- ts.series_original
323
- if PASTAS_LEQ_022
324
- else ts._series_original
325
- ),
552
+ ts._series_original, # noqa: SLF001
326
553
  )
327
554
  compare_so1 = True
328
555
  except (ValueError, AssertionError):
329
556
  compare_so1 = False
330
557
 
331
- if PASTAS_LEQ_022:
332
- try:
333
- assert_series_equal(sv1[counter], ts.series_validated)
334
- compare_sv1 = True
335
- except (ValueError, AssertionError):
336
- compare_sv1 = False
337
-
338
558
  try:
339
559
  assert_series_equal(ss1[counter], ts.series)
340
560
  compare_ss1 = True
341
561
  except (ValueError, AssertionError):
342
562
  compare_ss1 = False
343
563
  df.loc[f" - {ts.name}: series_original"] = compare_so1
344
- if PASTAS_LEQ_022:
345
- df.loc[f" - {ts.name}: series_validated"] = compare_sv1
346
564
  df.loc[f" - {ts.name}: series"] = compare_ss1
347
565
 
348
566
  counter += 1
@@ -380,7 +598,20 @@ def compare_models(ml1, ml2, stats=None, detailed_comparison=False):
380
598
  df.loc[stats_idx, "comparison"] = b
381
599
 
382
600
  if detailed_comparison:
383
- return df
601
+ if style_output:
602
+ return df.style.apply(
603
+ boolean_row_styler, column="comparison", axis=1
604
+ ).set_table_styles(
605
+ [
606
+ {"selector": "th.col_heading", "props": [("text-align", "center")]},
607
+ {
608
+ "selector": "th.row_heading",
609
+ "props": [("text-align", "left"), ("white-space", "pre")],
610
+ },
611
+ ],
612
+ )
613
+ else:
614
+ return df
384
615
  else:
385
616
  return df["comparison"].iloc[1:].all() # ignore name difference
386
617
 
@@ -533,7 +764,7 @@ def frontiers_checks(
533
764
  if modelnames is not None:
534
765
  models = modelnames
535
766
  if oseries is not None:
536
- print(
767
+ logger.warning(
537
768
  "Warning! Both 'modelnames' and 'oseries' provided,"
538
769
  " only using 'modelnames'!"
539
770
  )
@@ -550,7 +781,9 @@ def frontiers_checks(
550
781
  ml = pstore.get_models(mlnam)
551
782
 
552
783
  if ml.parameters["optimal"].hasnans:
553
- print(f"Warning! Skipping model '{mlnam}' because it is not solved!")
784
+ logger.warning(
785
+ "Warning! Skipping model '%s' because it is not solved!", mlnam
786
+ )
554
787
  continue
555
788
 
556
789
  checks = pd.DataFrame(columns=["stat", "threshold", "units", "check_passed"])
@@ -571,7 +804,7 @@ def frontiers_checks(
571
804
  noise = ml.noise()
572
805
  if noise is None:
573
806
  noise = ml.residuals()
574
- print(
807
+ logger.warning(
575
808
  "Warning! Checking autocorrelation on the residuals not the noise!"
576
809
  )
577
810
  if check2_test == "runs" or check2_test == "both":
@@ -605,7 +838,7 @@ def frontiers_checks(
605
838
  if check3_tmem:
606
839
  len_oseries_calib = (ml.settings["tmax"] - ml.settings["tmin"]).days
607
840
  for sm_name, sm in ml.stressmodels.items():
608
- if sm._name == "WellModel":
841
+ if type(sm).__name__ == "WellModel":
609
842
  nwells = sm.distances.index.size
610
843
  for iw in range(nwells):
611
844
  p = sm.get_parameters(model=ml, istress=iw)
@@ -638,7 +871,7 @@ def frontiers_checks(
638
871
  # Check 4 - Uncertainty Gain
639
872
  if check4_gain:
640
873
  for sm_name, sm in ml.stressmodels.items():
641
- if sm._name == "WellModel":
874
+ if type(sm).__name__ == "WellModel":
642
875
  for iw in range(sm.distances.index.size):
643
876
  p = sm.get_parameters(model=ml, istress=iw)
644
877
  gain = sm.rfunc.gain(p)
@@ -663,10 +896,10 @@ def frontiers_checks(
663
896
  check_gain_passed,
664
897
  )
665
898
  continue
666
- elif sm._name == "LinearTrend":
899
+ elif type(sm).__name__ == "LinearTrend":
667
900
  gain = ml.parameters.loc[f"{sm_name}_a", "optimal"]
668
901
  gain_std = ml.parameters.loc[f"{sm_name}_a", "stderr"]
669
- elif sm._name == "StepModel":
902
+ elif type(sm).__name__ == "StepModel":
670
903
  gain = ml.parameters.loc[f"{sm_name}_d", "optimal"]
671
904
  gain_std = ml.parameters.loc[f"{sm_name}_d", "stderr"]
672
905
  else:
@@ -689,7 +922,7 @@ def frontiers_checks(
689
922
 
690
923
  # Check 5 - Parameter Bounds
691
924
  if check5_parambounds:
692
- upper, lower = ml._check_parameters_bounds()
925
+ upper, lower = ml._check_parameters_bounds() # noqa: SLF001
693
926
  for param in ml.parameters.index:
694
927
  bounds = (
695
928
  ml.parameters.loc[param, "pmin"],
@@ -756,7 +989,7 @@ def frontiers_aic_select(
756
989
  for o in oseries:
757
990
  modelnames += pstore.oseries_models[o]
758
991
  elif oseries is not None:
759
- print(
992
+ logger.warning(
760
993
  "Warning! Both 'modelnames' and 'oseries' provided, using only 'modelnames'"
761
994
  )
762
995