sierra-research 1.3.5__py3-none-any.whl → 1.3.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (23) hide show
  1. sierra/core/config.py +1 -1
  2. sierra/core/generators/exp_creator.py +1 -1
  3. sierra/core/graphs/heatmap.py +1 -1
  4. sierra/core/graphs/stacked_line_graph.py +3 -1
  5. sierra/core/graphs/summary_line_graph.py +3 -1
  6. sierra/core/pipeline/stage3/statistics_calculator.py +8 -8
  7. sierra/core/pipeline/stage4/graph_collator.py +5 -4
  8. sierra/core/startup.py +2 -2
  9. sierra/core/utils.py +1 -1
  10. sierra/version.py +1 -1
  11. {sierra_research-1.3.5.data → sierra_research-1.3.11.data}/data/share/man/man1/sierra-cli.1 +134 -134
  12. {sierra_research-1.3.5.data → sierra_research-1.3.11.data}/data/share/man/man7/sierra-examples.7 +54 -34
  13. {sierra_research-1.3.5.data → sierra_research-1.3.11.data}/data/share/man/man7/sierra-exec-envs.7 +12 -12
  14. {sierra_research-1.3.5.data → sierra_research-1.3.11.data}/data/share/man/man7/sierra-glossary.7 +12 -12
  15. {sierra_research-1.3.5.data → sierra_research-1.3.11.data}/data/share/man/man7/sierra-platforms.7 +31 -31
  16. {sierra_research-1.3.5.data → sierra_research-1.3.11.data}/data/share/man/man7/sierra-usage.7 +85 -81
  17. {sierra_research-1.3.5.data → sierra_research-1.3.11.data}/data/share/man/man7/sierra.7 +5 -5
  18. {sierra_research-1.3.5.dist-info → sierra_research-1.3.11.dist-info}/METADATA +19 -27
  19. {sierra_research-1.3.5.dist-info → sierra_research-1.3.11.dist-info}/RECORD +23 -23
  20. {sierra_research-1.3.5.dist-info → sierra_research-1.3.11.dist-info}/WHEEL +1 -1
  21. {sierra_research-1.3.5.dist-info → sierra_research-1.3.11.dist-info}/LICENSE +0 -0
  22. {sierra_research-1.3.5.dist-info → sierra_research-1.3.11.dist-info}/entry_points.txt +0 -0
  23. {sierra_research-1.3.5.dist-info → sierra_research-1.3.11.dist-info}/top_level.txt +0 -0
sierra/core/config.py CHANGED
@@ -45,7 +45,7 @@ def mpl_init():
45
45
  import matplotlib.pyplot as plt
46
46
 
47
47
  # Set MPL style
48
- plt.style.use('seaborn-colorblind')
48
+ plt.style.use('seaborn-v0_8-colorblind')
49
49
 
50
50
 
51
51
  # Actually initialize matplotlib
@@ -148,7 +148,7 @@ class ExpCreator:
148
148
  run_exp_def: definition.XMLExpDef,
149
149
  cmds_generator,
150
150
  run_num: int) -> None:
151
- run_output_dir = "{0}_{1}_output".format(self.template_stem, run_num)
151
+ run_output_dir = "{0}_run{1}_output".format(self.template_stem, run_num)
152
152
 
153
153
  # If the project defined per-run configuration, apply
154
154
  # it. Otherwise, just apply the configuration in the SIERRA core.
@@ -211,7 +211,7 @@ class DualHeatmap:
211
211
  # Scaffold graph. We can use either dataframe for setting the graph
212
212
  # size; we assume they have the same dimensions.
213
213
  #
214
- fig, axes = plt.subplots(ncols=2)
214
+ fig, axes = plt.subplots(nrows=1, ncols=2)
215
215
  Heatmap.set_graph_size(dfs[0], fig)
216
216
 
217
217
  y = np.arange(len(dfs[0].columns))
@@ -222,7 +222,9 @@ class StackedLineGraph:
222
222
 
223
223
  return dfs
224
224
 
225
- def _read_models(self) -> tp.Tuple[pd.DataFrame, tp.List[str]]:
225
+ # 2024/09/13 [JRH]: The union is for compatability with type checkers in
226
+ # python {3.8,3.11}.
227
+ def _read_models(self) -> tp.Tuple[pd.DataFrame, tp.Union[tp.List[str], tp.List[bytes]]]:
226
228
  if self.model_root is not None:
227
229
  model_fpath = self.model_root / \
228
230
  (self.input_stem + config.kModelsExt['model'])
@@ -332,7 +332,9 @@ class SummaryLineGraph:
332
332
 
333
333
  return dfs
334
334
 
335
- def _read_models(self) -> tp.Tuple[pd.DataFrame, tp.List[str]]:
335
+ # 2024/09/13 [JRH]: The union is for compatability with type checkers in
336
+ # python {3.8,3.11}.
337
+ def _read_models(self) -> tp.Tuple[pd.DataFrame, tp.Union[tp.List[str], tp.List[bytes]]]:
336
338
  if self.model_root is None:
337
339
  return (None, [])
338
340
 
@@ -229,11 +229,11 @@ class ExpCSVGatherer:
229
229
 
230
230
  self.logger.info('Processing .csvs: %s...', exp_output_root.name)
231
231
 
232
- pattern = "{}_{}_output".format(re.escape(self.gather_opts['template_input_leaf']),
233
- r'\d+')
232
+ pattern = "{}_run{}_output".format(re.escape(self.gather_opts['template_input_leaf']),
233
+ r'\d+')
234
234
 
235
235
  runs = list(exp_output_root.iterdir())
236
- assert(all(re.match(pattern, r.name) for r in runs)),\
236
+ assert (all(re.match(pattern, r.name) for r in runs)), \
237
237
  f"Extra files/not all dirs in '{exp_output_root}' are exp runs"
238
238
 
239
239
  # Maps (unique .csv stem, optional parent dir) to the averaged dataframe
@@ -299,7 +299,7 @@ class ExpCSVGatherer:
299
299
  reader = storage.DataFrameReader(self.gather_opts['storage_medium'])
300
300
  df = reader(item_path, index_col=False)
301
301
 
302
- if df.dtypes[0] == 'object':
302
+ if df.dtypes.iloc[0] == 'object':
303
303
  df[df.columns[0]] = df[df.columns[0]].apply(lambda x: float(x))
304
304
 
305
305
  if item not in gathered:
@@ -373,7 +373,7 @@ class ExpCSVGatherer:
373
373
  str(path1))
374
374
  continue
375
375
 
376
- assert (utils.path_exists(path1) and utils.path_exists(path2)),\
376
+ assert (utils.path_exists(path1) and utils.path_exists(path2)), \
377
377
  f"Either {path1} or {path2} does not exist"
378
378
 
379
379
  # Verify both dataframes have same # columns, and that
@@ -385,15 +385,15 @@ class ExpCSVGatherer:
385
385
  assert (len(df1.columns) == len(df2.columns)), \
386
386
  (f"Dataframes from {path1} and {path2} do not have "
387
387
  "the same # columns")
388
- assert(sorted(df1.columns) == sorted(df2.columns)),\
388
+ assert (sorted(df1.columns) == sorted(df2.columns)), \
389
389
  f"Columns from {path1} and {path2} not identical"
390
390
 
391
391
  # Verify the length of all columns in both dataframes is the same
392
392
  for c1 in df1.columns:
393
- assert(all(len(df1[c1]) == len(df1[c2]) for c2 in df1.columns)),\
393
+ assert (all(len(df1[c1]) == len(df1[c2]) for c2 in df1.columns)), \
394
394
  f"Not all columns from {path1} have same length"
395
395
 
396
- assert(all(len(df1[c1]) == len(df2[c2]) for c2 in df1.columns)),\
396
+ assert (all(len(df1[c1]) == len(df2[c2]) for c2 in df1.columns)), \
397
397
  (f"Not all columns from {path1} and {path2} have "
398
398
  "the same length")
399
399
 
@@ -124,7 +124,7 @@ class UnivarGraphCollator:
124
124
 
125
125
  data_df = storage.DataFrameReader('storage.csv')(csv_ipath)
126
126
 
127
- assert target['col'] in data_df.columns.values,\
127
+ assert target['col'] in data_df.columns.values, \
128
128
  "{0} not in columns of {1}".format(target['col'],
129
129
  target['src_stem'] + stat.df_ext)
130
130
 
@@ -221,7 +221,7 @@ class BivarGraphCollator:
221
221
 
222
222
  data_df = storage.DataFrameReader('storage.csv')(csv_ipath)
223
223
 
224
- assert target['col'] in data_df.columns.values,\
224
+ assert target['col'] in data_df.columns.values, \
225
225
  "{0} not in columns of {1}, which has {2}".format(target['col'],
226
226
  csv_ipath,
227
227
  data_df.columns)
@@ -238,10 +238,11 @@ class BivarGraphCollator:
238
238
  # in sequence, to generate a SEQUENCE of 2D dataframes.
239
239
  for row in data_df[target['col']].index:
240
240
  if row in stat.df_seq.keys():
241
- stat.df_seq[row].loc[xlabel][ylabel] = data_df[target['col']][row]
241
+ stat.df_seq[row].loc[xlabel][ylabel] = data_df.loc[row,
242
+ target['col']]
242
243
  else:
243
244
  df = pd.DataFrame(columns=stat.ylabels, index=stat.xlabels)
244
- df.loc[xlabel][ylabel] = data_df[target['col']][row]
245
+ df.loc[xlabel][ylabel] = data_df.loc[row, target['col']]
245
246
  stat.df_seq[row] = df
246
247
 
247
248
 
sierra/core/startup.py CHANGED
@@ -74,8 +74,8 @@ def _linux_pkg_checks() -> None:
74
74
 
75
75
  dist = distro.id()
76
76
  os_info = distro.os_release_info()
77
- if any(candidate in os_info['id_like'] for candidate in ['debian',
78
- 'ubuntu']):
77
+ if any(candidate in os_info['id'] for candidate in ['debian',
78
+ 'ubuntu']):
79
79
  _apt_pkg_checks(dist)
80
80
  else:
81
81
  logging.warning(("Unknown Linux distro '%s' detected: skipping package "
sierra/core/utils.py CHANGED
@@ -365,7 +365,7 @@ def df_fill(df: pd.DataFrame, policy: str) -> pd.DataFrame:
365
365
 
366
366
 
367
367
  @retry(OSError, tries=10, delay=0.100, backoff=1.1) # type:ignore
368
- def pickle_dump(obj: object, f: tp.IO) -> None:
368
+ def pickle_dump(obj: object, f) -> None:
369
369
  pickle.dump(obj, f)
370
370
 
371
371
 
sierra/version.py CHANGED
@@ -9,4 +9,4 @@
9
9
 
10
10
  # Project packages
11
11
 
12
- __version__ = "1.3.5"
12
+ __version__ = "1.3.11"