halib 0.2.6__py3-none-any.whl → 0.2.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -42,8 +42,10 @@ class AutoNamedCfg(YAMLWizard, NamedCfg):
42
42
  # or handled by the loader.
43
43
  pass
44
44
 
45
+
45
46
  T = TypeVar("T", bound=AutoNamedCfg)
46
47
 
48
+
47
49
  class BaseSelectorCfg(Generic[T]):
48
50
  """
49
51
  Base class to handle the logic of selecting an item from a list by name.
@@ -72,6 +74,8 @@ class ExpBaseCfg(ABC, YAMLWizard):
72
74
  3 - a method cfg
73
75
  """
74
76
 
77
+ cfg_name: Optional[str] = None
78
+
75
79
  # Save to yaml fil
76
80
  def save_to_outdir(
77
81
  self, filename: str = "__config.yaml", outdir=None, override: bool = False
@@ -103,13 +107,24 @@ class ExpBaseCfg(ABC, YAMLWizard):
103
107
  """Load a configuration from a custom YAML file."""
104
108
  pass
105
109
 
106
- @abstractmethod
107
- def get_cfg_name(self):
108
- """
109
- Get the name of the configuration.
110
- This method should be implemented in subclasses.
111
- """
112
- pass
110
+ def get_cfg_name(self, sep: str = "__", *args, **kwargs) -> str:
111
+ # auto get the config name from dataset, method, metric
112
+ # 2. Generate the canonical Config Name
113
+ name_parts = []
114
+ general_info = self.get_general_cfg().get_name()
115
+ dataset_info = self.get_dataset_cfg().get_name()
116
+ method_info = self.get_method_cfg().get_name()
117
+ name_parts = [
118
+ general_info,
119
+ f"ds_{dataset_info}",
120
+ f"mt_{method_info}",
121
+ ]
122
+ if "extra" in kwargs:
123
+ extra_info = kwargs["extra"]
124
+ assert isinstance(extra_info, str), "'extra' kwarg must be a string."
125
+ name_parts.append(extra_info)
126
+ self.cfg_name = sep.join(name_parts)
127
+ return self.cfg_name
113
128
 
114
129
  @abstractmethod
115
130
  def get_outdir(self):
@@ -120,7 +135,7 @@ class ExpBaseCfg(ABC, YAMLWizard):
120
135
  return None
121
136
 
122
137
  @abstractmethod
123
- def get_general_cfg(self):
138
+ def get_general_cfg(self) -> NamedCfg:
124
139
  """
125
140
  Get the general configuration like output directory, log settings, SEED, etc.
126
141
  This method should be implemented in subclasses.
@@ -135,6 +150,14 @@ class ExpBaseCfg(ABC, YAMLWizard):
135
150
  """
136
151
  pass
137
152
 
153
+ @abstractmethod
154
+ def get_method_cfg(self) -> NamedCfg:
155
+ """
156
+ Get the method configuration.
157
+ This method should be implemented in subclasses.
158
+ """
159
+ pass
160
+
138
161
  @abstractmethod
139
162
  def get_metric_cfg(self) -> NamedCfg:
140
163
  """
@@ -1,147 +1,168 @@
1
+ import os
2
+ from rich.pretty import pprint
1
3
  from abc import ABC, abstractmethod
2
- from typing import Tuple, Any, Optional
3
- from .base_config import ExpBaseCfg
4
- from ..perf.perfcalc import PerfCalc
5
- from ..perf.perfmetrics import MetricsBackend
4
+ from typing import List, Optional, TypeVar, Generic
6
5
 
7
-
8
- class ExpHook:
9
- """Base interface for all experiment hooks."""
10
- def on_before_run(self, exp): pass
11
- def on_after_run(self, exp, results): pass
6
+ from abc import ABC, abstractmethod
7
+ from dataclasses import dataclass
8
+ from dataclass_wizard import YAMLWizard
12
9
 
13
10
 
14
- # ! SEE https://github.com/hahv/base_exp for sample usage
15
- class BaseExp(PerfCalc, ABC):
11
+ class NamedCfg(ABC):
16
12
  """
17
- Base class for experiments.
18
- Orchestrates the experiment pipeline using a pluggable metrics backend.
13
+ Base class for named configurations.
14
+ All configurations should have a name.
19
15
  """
20
16
 
21
- def __init__(self, config: ExpBaseCfg):
22
- self.config = config
23
- self.metric_backend = None
24
- # Flag to track if init_general/prepare_dataset has run
25
- self._is_env_ready = False
26
- self.hooks = []
27
-
28
- def register_hook(self, hook: ExpHook):
29
- self.hooks.append(hook)
30
-
31
- def _trigger_hooks(self, method_name: str, *args, **kwargs):
32
- for hook in self.hooks:
33
- method = getattr(hook, method_name, None)
34
- if callable(method):
35
- method(*args, **kwargs)
36
-
37
- # -----------------------
38
- # PerfCalc Required Methods
39
- # -----------------------
40
- def get_dataset_name(self):
41
- return self.config.get_dataset_cfg().get_name()
42
-
43
- def get_experiment_name(self):
44
- return self.config.get_cfg_name()
45
-
46
- def get_metric_backend(self):
47
- if not self.metric_backend:
48
- self.metric_backend = self.prepare_metrics(self.config.get_metric_cfg())
49
- return self.metric_backend
50
-
51
- # -----------------------
52
- # Abstract Experiment Steps
53
- # -----------------------
54
17
  @abstractmethod
55
- def init_general(self, general_cfg):
56
- """Setup general settings like SEED, logging, env variables."""
18
+ def get_name(self):
19
+ """
20
+ Get the name of the configuration.
21
+ This method should be implemented in subclasses.
22
+ """
57
23
  pass
58
24
 
25
+
26
+ @dataclass
27
+ class AutoNamedCfg(YAMLWizard, NamedCfg):
28
+ """
29
+ Mixin that automatically implements get_name() by returning self.name.
30
+ Classes using this MUST have a 'name' field.
31
+ """
32
+
33
+ name: Optional[str] = None
34
+
35
+ def get_name(self):
36
+ return self.name
37
+
38
+ def __post_init__(self):
39
+ # Enforce the "MUST" rule here
40
+ if self.name is None:
41
+ # We allow None during initial load, but it must be set before usage
42
+ # or handled by the loader.
43
+ pass
44
+
45
+
46
+ T = TypeVar("T", bound=AutoNamedCfg)
47
+
48
+
49
+ class BaseSelectorCfg(Generic[T]):
50
+ """
51
+ Base class to handle the logic of selecting an item from a list by name.
52
+ """
53
+
54
+ def _resolve_selection(self, items: List[T], selected_name: str, context: str) -> T:
55
+ if selected_name is None:
56
+ raise ValueError(f"No {context} selected in the configuration.")
57
+
58
+ # Create a lookup dict for O(1) access, or just iterate if list is short
59
+ for item in items:
60
+ if item.name == selected_name:
61
+ return item
62
+
63
+ raise ValueError(
64
+ f"{context.capitalize()} '{selected_name}' not found in the configuration list."
65
+ )
66
+
67
+
68
+ class ExpBaseCfg(ABC, YAMLWizard):
69
+ """
70
+ Base class for configuration objects.
71
+ What a cfg class must have:
72
+ 1 - a dataset cfg
73
+ 2 - a metric cfg
74
+ 3 - a method cfg
75
+ """
76
+
77
+ cfg_name: Optional[str] = None
78
+
79
+ # Save to yaml fil
80
+ def save_to_outdir(
81
+ self, filename: str = "__config.yaml", outdir=None, override: bool = False
82
+ ) -> None:
83
+ """
84
+ Save the configuration to the output directory.
85
+ """
86
+ if outdir is not None:
87
+ output_dir = outdir
88
+ else:
89
+ output_dir = self.get_outdir()
90
+ os.makedirs(output_dir, exist_ok=True)
91
+ assert (output_dir is not None) and (
92
+ os.path.isdir(output_dir)
93
+ ), f"Output directory '{output_dir}' does not exist or is not a directory."
94
+ file_path = os.path.join(output_dir, filename)
95
+ if os.path.exists(file_path) and not override:
96
+ pprint(
97
+ f"File '{file_path}' already exists. Use 'override=True' to overwrite."
98
+ )
99
+ else:
100
+ # method of YAMLWizard to_yaml_file
101
+ self.to_yaml_file(file_path)
102
+
103
+ @classmethod
59
104
  @abstractmethod
60
- def prepare_dataset(self, dataset_cfg):
61
- """Load/prepare dataset."""
105
+ # load from a custom YAML file
106
+ def from_custom_yaml_file(cls, yaml_file: str):
107
+ """Load a configuration from a custom YAML file."""
62
108
  pass
63
109
 
110
+ def get_cfg_name(self, sep: str = "__", *args, **kwargs) -> str:
111
+ if self.cfg_name is None:
112
+ # auto get the config name from dataset, method, metric
113
+ # 2. Generate the canonical Config Name
114
+ name_parts = []
115
+ general_info = self.get_general_cfg().get_name()
116
+ dataset_info = self.get_dataset_cfg().get_name()
117
+ method_info = self.get_method_cfg().get_name()
118
+ name_parts = [
119
+ general_info,
120
+ f"ds_{dataset_info}",
121
+ f"mt_{method_info}",
122
+ ]
123
+ if "extra" in kwargs:
124
+ extra_info = kwargs["extra"]
125
+ assert isinstance(extra_info, str), "'extra' kwarg must be a string."
126
+ name_parts.append(extra_info)
127
+ self.cfg_name = sep.join(name_parts)
128
+ return self.cfg_name
129
+
64
130
  @abstractmethod
65
- def prepare_metrics(self, metric_cfg) -> MetricsBackend:
131
+ def get_outdir(self):
66
132
  """
67
- Prepare the metrics for the experiment.
133
+ Get the output directory for the configuration.
68
134
  This method should be implemented in subclasses.
69
135
  """
70
- pass
136
+ return None
71
137
 
72
138
  @abstractmethod
73
- def exec_exp(self, *args, **kwargs) -> Optional[Tuple[Any, Any]]:
74
- """Run experiment process, e.g.: training/evaluation loop.
75
- Return: either `None` or a tuple of (raw_metrics_data, extra_data) for calc_and_save_exp_perfs
139
+ def get_general_cfg(self) -> NamedCfg:
140
+ """
141
+ Get the general configuration like output directory, log settings, SEED, etc.
142
+ This method should be implemented in subclasses.
76
143
  """
77
144
  pass
78
145
 
79
- # -----------------------
80
- # Internal Helpers
81
- # -----------------------
82
- def _validate_and_unpack(self, results):
83
- if results is None:
84
- return None
85
- if not isinstance(results, (tuple, list)) or len(results) != 2:
86
- raise ValueError("exec must return (metrics_data, extra_data)")
87
- return results[0], results[1]
88
-
89
- def _prepare_environment(self, force_reload: bool = False):
146
+ @abstractmethod
147
+ def get_dataset_cfg(self) -> NamedCfg:
90
148
  """
91
- Common setup. Skips if already initialized, unless force_reload is True.
149
+ Get the dataset configuration.
150
+ This method should be implemented in subclasses.
92
151
  """
93
- if self._is_env_ready and not force_reload:
94
- # Environment is already prepared, skipping setup.
95
- return
96
-
97
- # 1. Run Setup
98
- self.init_general(self.config.get_general_cfg())
99
- self.prepare_dataset(self.config.get_dataset_cfg())
100
-
101
- # 2. Update metric backend (refresh if needed)
102
- self.metric_backend = self.prepare_metrics(self.config.get_metric_cfg())
103
-
104
- # 3. Mark as ready
105
- self._is_env_ready = True
152
+ pass
106
153
 
107
- # -----------------------
108
- # Main Experiment Runner
109
- # -----------------------
110
- def run_exp(self, should_calc_metrics=True, reload_env=False, *args, **kwargs):
154
+ @abstractmethod
155
+ def get_method_cfg(self) -> NamedCfg:
111
156
  """
112
- Run the whole experiment pipeline.
113
- :param reload_env: If True, forces dataset/general init to run again.
114
- :param should_calc_metrics: Whether to calculate and save metrics after execution.
115
- :kwargs Params:
116
- + 'outfile' to save csv file results,
117
- + 'outdir' to set output directory for experiment results.
118
- + 'return_df' to return a DataFrame of results instead of a dictionary.
119
-
120
- Full pipeline:
121
- 1. Init
122
- 2. Prepare Environment (General + Dataset + Metrics)
123
- 3. Save Config
124
- 4. Execute
125
- 5. Calculate & Save Metrics
157
+ Get the method configuration.
158
+ This method should be implemented in subclasses.
126
159
  """
127
- self._prepare_environment(force_reload=reload_env)
128
-
129
- self._trigger_hooks("before_run", self)
130
-
131
- # Save config before running
132
- self.config.save_to_outdir()
133
-
134
- # Execute experiment
135
- results = self.exec_exp(*args, **kwargs)
160
+ pass
136
161
 
137
- if should_calc_metrics and results is not None:
138
- metrics_data, extra_data = self._validate_and_unpack(results)
139
- # Calculate & Save metrics
140
- perf_results = self.calc_perfs(
141
- raw_metrics_data=metrics_data, extra_data=extra_data, *args, **kwargs
142
- )
143
- self._trigger_hooks("after_run", self, perf_results)
144
- return perf_results
145
- else:
146
- self._trigger_hooks("after_run", self, results)
147
- return results
162
+ @abstractmethod
163
+ def get_metric_cfg(self) -> NamedCfg:
164
+ """
165
+ Get the metric configuration.
166
+ This method should be implemented in subclasses.
167
+ """
168
+ pass
@@ -74,114 +74,191 @@ class PerfCalc(ABC): # Abstract base class for performance calculation
74
74
  "All dictionaries in raw_data_for_metrics must have the same keys as metric_names"
75
75
  )
76
76
 
77
- # ! only need to override this method if torchmetrics are not used
78
- def calc_exp_perf_metrics(
79
- self, metric_names, raw_metrics_data, extra_data=None, *args, **kwargs
80
- ):
81
- assert isinstance(raw_metrics_data, dict) or isinstance(
82
- raw_metrics_data, list
83
- ), "raw_data_for_metrics must be a dictionary or a list"
84
-
85
- if extra_data is not None:
86
- assert isinstance(
87
- extra_data, type(raw_metrics_data)
88
- ), "extra_data must be of the same type as raw_data_for_metrics (dict or list)"
89
- # prepare raw_metric data for processing
90
- proc_metric_raw_data_ls = (
91
- raw_metrics_data
92
- if isinstance(raw_metrics_data, list)
93
- else [raw_metrics_data.copy()]
94
- )
95
- self.valid_proc_metric_raw_data(metric_names, proc_metric_raw_data_ls)
96
- # prepare extra data for processing
97
- proc_extra_data_ls = []
98
- if extra_data is not None:
99
- proc_extra_data_ls = (
100
- extra_data if isinstance(extra_data, list) else [extra_data.copy()]
101
- )
102
- assert len(proc_extra_data_ls) == len(
103
- proc_metric_raw_data_ls
104
- ), "extra_data must have the same length as raw_data_for_metrics if it is a list"
105
- # validate the extra_data
106
- self.valid_proc_extra_data(proc_extra_data_ls)
77
+ # =========================================================================
78
+ # 1. Formatting Logic (Decoupled)
79
+ # =========================================================================
80
+ def package_metrics(
81
+ self,
82
+ metric_results_list: List[dict],
83
+ extra_data_list: Optional[List[dict]] = None,
84
+ ) -> List[OrderedDict]:
85
+ """
86
+ Pure formatting function.
87
+ Takes ALREADY CALCULATED metrics and formats them
88
+ (adds metadata, prefixes keys, ensures column order).
89
+ """
90
+ # Normalize extra_data to a list if provided
91
+ if extra_data_list is None:
92
+ extra_data_list = [{} for _ in range(len(metric_results_list))]
93
+ elif isinstance(extra_data_list, dict):
94
+ extra_data_list = [extra_data_list]
95
+
96
+ assert len(extra_data_list) == len(
97
+ metric_results_list
98
+ ), "Length mismatch: metrics vs extra_data"
107
99
 
108
- # calculate the metrics output results
109
- metrics_backend = self.get_metric_backend()
110
100
  proc_outdict_list = []
111
- for idx, raw_metrics_data in enumerate(proc_metric_raw_data_ls):
101
+
102
+ for metric_res, extra_item in zip(metric_results_list, extra_data_list):
103
+ # A. Base Metadata
112
104
  out_dict = {
113
105
  "dataset": self.get_dataset_name(),
114
106
  "experiment": self.get_experiment_name(),
115
107
  }
116
- custom_fields = []
117
- if len(proc_extra_data_ls) > 0:
118
- # add extra data to the output dictionary
119
- extra_data_item = proc_extra_data_ls[idx]
120
- out_dict.update(extra_data_item)
121
- custom_fields = list(extra_data_item.keys())
122
- metric_results = metrics_backend.calc_metrics(
123
- metrics_data_dict=raw_metrics_data, *args, **kwargs
124
- )
125
- metric_results_prefix = {
126
- f"metric_{k}": v for k, v in metric_results.items()
127
- }
128
- out_dict.update(metric_results_prefix)
129
- ordered_cols = (
130
- REQUIRED_COLS + custom_fields + list(metric_results_prefix.keys())
108
+
109
+ # B. Attach Extra Data
110
+ out_dict.update(extra_item)
111
+ custom_fields = list(extra_item.keys())
112
+
113
+ # C. Prefix Metric Keys (e.g., 'acc' -> 'metric_acc')
114
+ metric_results_prefixed = {f"metric_{k}": v for k, v in metric_res.items()}
115
+ out_dict.update(metric_results_prefixed)
116
+
117
+ # D. Order Columns
118
+ all_cols = (
119
+ REQUIRED_COLS + custom_fields + list(metric_results_prefixed.keys())
131
120
  )
132
- out_dict = OrderedDict(
133
- (col, out_dict[col]) for col in ordered_cols if col in out_dict
121
+ ordered_out = OrderedDict(
122
+ (col, out_dict[col]) for col in all_cols if col in out_dict
134
123
  )
135
- proc_outdict_list.append(out_dict)
124
+ proc_outdict_list.append(ordered_out)
136
125
 
137
126
  return proc_outdict_list
138
127
 
139
- #! custom kwargs:
140
- #! outfile - if provided, will save the output to a CSV file with the given path
141
- #! outdir - if provided, will save the output to a CSV file in the given directory with a generated filename
142
- #! return_df - if True, will return a DataFrame instead of a dictionary
143
- def calc_perfs(
128
+ # =========================================================================
129
+ # 2. Calculation Logic (The Coordinator)
130
+ # =========================================================================
131
+ def calc_exp_perf_metrics(
144
132
  self,
133
+ metric_names: List[str],
145
134
  raw_metrics_data: Union[List[dict], dict],
146
135
  extra_data: Optional[Union[List[dict], dict]] = None,
147
136
  *args,
148
137
  **kwargs,
149
- ) -> Tuple[Union[List[OrderedDict], pd.DataFrame], Optional[str]]:
138
+ ) -> List[OrderedDict]:
150
139
  """
151
- Calculate the metrics.
152
- This function should be overridden by the subclass if needed.
153
- Must return a dictionary with keys as metric names and values as the calculated metrics.
140
+ Full workflow: Validates raw data -> Calculates via Backend -> Packages results.
154
141
  """
155
- metric_names = self.get_metric_backend().metric_names
156
- out_dict_list = self.calc_exp_perf_metrics(
157
- metric_names=metric_names,
158
- raw_metrics_data=raw_metrics_data,
159
- extra_data=extra_data,
160
- *args,
161
- **kwargs,
142
+ # Prepare Raw Data
143
+ raw_data_ls = (
144
+ raw_metrics_data
145
+ if isinstance(raw_metrics_data, list)
146
+ else [raw_metrics_data]
162
147
  )
148
+ self.valid_proc_metric_raw_data(metric_names, raw_data_ls)
149
+
150
+ # Prepare Extra Data (Validation only)
151
+ extra_data_ls = None
152
+ if extra_data:
153
+ extra_data_ls = extra_data if isinstance(extra_data, list) else [extra_data]
154
+ self.valid_proc_extra_data(extra_data_ls)
155
+
156
+ # Calculate Metrics via Backend
157
+ metrics_backend = self.get_metric_backend()
158
+ calculated_results = []
159
+
160
+ for data_item in raw_data_ls:
161
+ res = metrics_backend.calc_metrics(
162
+ metrics_data_dict=data_item, *args, **kwargs
163
+ )
164
+ calculated_results.append(res)
165
+
166
+ # Delegate to Formatting
167
+ return self.package_metrics(calculated_results, extra_data_ls)
168
+
169
+ # =========================================================================
170
+ # 3. File Saving Logic (Decoupled)
171
+ # =========================================================================
172
+ def save_results_to_csv(
173
+ self, out_dict_list: List[OrderedDict], **kwargs
174
+ ) -> Tuple[pd.DataFrame, Optional[str]]:
175
+ """
176
+ Helper function to convert results to DataFrame and save to CSV.
177
+ """
163
178
  csv_outfile = kwargs.get("outfile", None)
179
+
180
+ # Determine Output Path
164
181
  if csv_outfile is not None:
165
182
  filePathNoExt, _ = os.path.splitext(csv_outfile)
166
- # pprint(f"CSV Outfile Path (No Ext): {filePathNoExt}")
167
183
  csv_outfile = f"{filePathNoExt}{CSV_FILE_POSTFIX}.csv"
168
184
  elif "outdir" in kwargs:
169
185
  csvoutdir = kwargs["outdir"]
170
186
  csvfilename = f"{now_str()}_{self.get_dataset_name()}_{self.get_experiment_name()}_{CSV_FILE_POSTFIX}.csv"
171
187
  csv_outfile = os.path.join(csvoutdir, csvfilename)
172
188
 
173
- # convert out_dict to a DataFrame
189
+ # Convert to DataFrame
174
190
  df = pd.DataFrame(out_dict_list)
175
- # get the orders of the columns as the orders or the keys in out_dict
176
- ordered_cols = list(out_dict_list[0].keys())
177
- df = df[ordered_cols] # reorder columns
191
+ if out_dict_list:
192
+ ordered_cols = list(out_dict_list[0].keys())
193
+ df = df[ordered_cols]
194
+
195
+ # Save to File
178
196
  if csv_outfile:
179
197
  df.to_csv(csv_outfile, index=False, sep=";", encoding="utf-8")
180
- return_df = kwargs.get("return_df", False)
181
- if return_df: # return DataFrame instead of dict if requested
182
- return df, csv_outfile
183
- else:
184
- return out_dict_list, csv_outfile
198
+
199
+ return df, csv_outfile
200
+
201
+ # =========================================================================
202
+ # 4. Public API: Standard Calculation
203
+ # raw_metrics_data: example: [{"preds": ..., "target": ...}, ...]
204
+ # =========================================================================
205
+ def calc_perfs(
206
+ self,
207
+ raw_metrics_data: Union[List[dict], dict],
208
+ extra_data: Optional[Union[List[dict], dict]] = None,
209
+ *args,
210
+ **kwargs,
211
+ ) -> Tuple[Union[List[OrderedDict], pd.DataFrame], Optional[str]]:
212
+ """
213
+ Standard use case: Calculate metrics AND save to CSV.
214
+ """
215
+ metric_names = self.get_metric_backend().metric_names
216
+
217
+ # 1. Calculate & Package
218
+ out_dict_list = self.calc_exp_perf_metrics(
219
+ metric_names=metric_names,
220
+ raw_metrics_data=raw_metrics_data,
221
+ extra_data=extra_data,
222
+ *args,
223
+ **kwargs,
224
+ )
225
+
226
+ # 2. Save
227
+ df, csv_outfile = self.save_results_to_csv(out_dict_list, **kwargs)
228
+
229
+ return (
230
+ (df, csv_outfile)
231
+ if kwargs.get("return_df", False)
232
+ else (out_dict_list, csv_outfile)
233
+ )
234
+
235
+ # =========================================================================
236
+ # 5. Public API: Manual / External Metrics (The Shortcut)
237
+ # =========================================================================
238
+ def save_computed_perfs(
239
+ self,
240
+ metrics_data: Union[List[dict], dict],
241
+ extra_data: Optional[Union[List[dict], dict]] = None,
242
+ **kwargs,
243
+ ) -> Tuple[Union[List[OrderedDict], pd.DataFrame], Optional[str]]:
244
+
245
+ # Ensure list format
246
+ if isinstance(metrics_data, dict):
247
+ metrics_data = [metrics_data]
248
+ if isinstance(extra_data, dict):
249
+ extra_data = [extra_data]
250
+
251
+ # 1. Package (Format)
252
+ formatted_list = self.package_metrics(metrics_data, extra_data)
253
+
254
+ # 2. Save
255
+ df, csv_outfile = self.save_results_to_csv(formatted_list, **kwargs)
256
+
257
+ return (
258
+ (df, csv_outfile)
259
+ if kwargs.get("return_df", False)
260
+ else (formatted_list, csv_outfile)
261
+ )
185
262
 
186
263
  @staticmethod
187
264
  def default_exp_csv_filter_fn(exp_file_name: str) -> bool:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: halib
3
- Version: 0.2.6
3
+ Version: 0.2.7
4
4
  Summary: Small library for common tasks
5
5
  Author: Hoang Van Ha
6
6
  Author-email: hoangvanhauit@gmail.com
@@ -53,8 +53,9 @@ Dynamic: summary
53
53
 
54
54
  # Helper package for coding and automation
55
55
 
56
- **Version 0.2.6**
56
+ **Version 0.2.7**
57
57
  + reorganize packages with most changes in `research` package; also rename `research` to `exp` (package for experiment management and utilities)
58
+ + update `exp/perfcalc.py` to allow save computed performance to csv file (without explicit calling method `calc_perfs`)
58
59
 
59
60
  **Version 0.2.1**
60
61
  + `research/base_exp`: add `eval_exp` method to evaluate experiment (e.g., model evaluation on test set) after experiment running is done.
@@ -21,8 +21,8 @@ halib/common/common.py,sha256=olkeXdFdojOkySP5aurzxKlehngRwBHdNBw5JfE4_fQ,5038
21
21
  halib/common/rich_color.py,sha256=tyK5fl3Dtv1tKsfFzt_5Rco4Fj72QliA-w5aGXaVuqQ,6392
22
22
  halib/exp/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
23
23
  halib/exp/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
24
- halib/exp/core/base_config.py,sha256=MtkbToF078imMcsna5Dlv9q4ORKRKkOUCERX7JbVdzM,4180
25
- halib/exp/core/base_exp.py,sha256=fknJVmW6ubbapOggbkrbNWgc1ZXcUz_FE3wMyuIGX7M,5180
24
+ halib/exp/core/base_config.py,sha256=Js2oVDt7qwT7eV_sOUWw6XXl569G1bX6ls-VYAx2gWY,5032
25
+ halib/exp/core/base_exp.py,sha256=XjRHXbUHE-DCZLRDTteDF5gsxKN3mhGEe2zWL24JP80,5131
26
26
  halib/exp/core/param_gen.py,sha256=I9JHrDCaep4CjvApDoX0QzFuw38zMC2PsDFueuA7pjM,4271
27
27
  halib/exp/core/wandb_op.py,sha256=powL2QyLBqF-6PUGAOqd60s1npHLLKJxPns3S4hKeNo,4160
28
28
  halib/exp/data/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -32,7 +32,7 @@ halib/exp/data/torchloader.py,sha256=oWUplXlGd1IB6CqdRd-mGe-DfMjjZxz9hQ7SWONb-0s
32
32
  halib/exp/perf/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
33
33
  halib/exp/perf/flop_calc.py,sha256=Kb3Gwqc7QtGALZzfyYXBA_9SioReJpTJdUX84kqj-Aw,6907
34
34
  halib/exp/perf/gpu_mon.py,sha256=vD41_ZnmPLKguuq9X44SB_vwd9JrblO4BDzHLXZhhFY,2233
35
- halib/exp/perf/perfcalc.py,sha256=FSWDStz9f94dObyAHYHRtq4fuo0dIw7l9JH_x5Wd7cQ,16225
35
+ halib/exp/perf/perfcalc.py,sha256=p7rhVShiie7DT_s50lbvbGftVCkrWE0tQGFLUEmTXi0,18326
36
36
  halib/exp/perf/perfmetrics.py,sha256=qRiNiCKGUSTLY7gPMVMuVHGAAyeosfGWup2eM4490aw,5485
37
37
  halib/exp/perf/perftb.py,sha256=IWElg3OB5dmhfxnY8pMZvkL2y_EnvLmEx3gJlpUR1Fs,31066
38
38
  halib/exp/perf/profiler.py,sha256=5ZjES8kAqEsSV1mC3Yr_1ivFLwQDc_yv4HY7dKt_AS0,11782
@@ -101,8 +101,8 @@ halib/utils/list.py,sha256=BM-8sRhYyqF7bh4p7TQtV7P_gnFruUCA6DTUOombaZg,337
101
101
  halib/utils/listop.py,sha256=Vpa8_2fI0wySpB2-8sfTBkyi_A4FhoFVVvFiuvW8N64,339
102
102
  halib/utils/tele_noti.py,sha256=-4WXZelCA4W9BroapkRyIdUu9cUVrcJJhegnMs_WpGU,5928
103
103
  halib/utils/video.py,sha256=zLoj5EHk4SmP9OnoHjO8mLbzPdtq6gQPzTQisOEDdO8,3261
104
- halib-0.2.6.dist-info/licenses/LICENSE.txt,sha256=qZssdna4aETiR8znYsShUjidu-U4jUT9Q-EWNlZ9yBQ,1100
105
- halib-0.2.6.dist-info/METADATA,sha256=vOwV5U8ejV_z6eL1YGbkaY5EnMCDQR54Tab1NJsSbIU,6714
106
- halib-0.2.6.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
107
- halib-0.2.6.dist-info/top_level.txt,sha256=7AD6PLaQTreE0Fn44mdZsoHBe_Zdd7GUmjsWPyQ7I-k,6
108
- halib-0.2.6.dist-info/RECORD,,
104
+ halib-0.2.7.dist-info/licenses/LICENSE.txt,sha256=qZssdna4aETiR8znYsShUjidu-U4jUT9Q-EWNlZ9yBQ,1100
105
+ halib-0.2.7.dist-info/METADATA,sha256=jSjp5DPZ8A8ohlO-QQ__7mE0Z-fO7sdkZ5Bz6ssKnhU,6836
106
+ halib-0.2.7.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
107
+ halib-0.2.7.dist-info/top_level.txt,sha256=7AD6PLaQTreE0Fn44mdZsoHBe_Zdd7GUmjsWPyQ7I-k,6
108
+ halib-0.2.7.dist-info/RECORD,,
File without changes