halib 0.1.99__py3-none-any.whl → 0.2.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,5 +1,5 @@
1
1
  from abc import ABC, abstractmethod
2
-
2
+ from typing import Tuple, Any, Optional
3
3
  from ..research.base_config import ExpBaseConfig
4
4
  from ..research.perfcalc import PerfCalc
5
5
  from ..research.metrics import MetricsBackend
@@ -14,6 +14,8 @@ class BaseExperiment(PerfCalc, ABC):
14
14
  def __init__(self, config: ExpBaseConfig):
15
15
  self.config = config
16
16
  self.metric_backend = None
17
+ # Flag to track if init_general/prepare_dataset has run
18
+ self._is_env_ready = False
17
19
 
18
20
  # -----------------------
19
21
  # PerfCalc Required Methods
@@ -51,50 +53,105 @@ class BaseExperiment(PerfCalc, ABC):
51
53
  pass
52
54
 
53
55
  @abstractmethod
54
- def exec_exp(self, *args, **kwargs):
56
+ def before_exec_exp_once(self, *args, **kwargs):
57
+ """Optional: any setup before exec_exp. Note this is called once per run_exp."""
58
+ pass
59
+
60
+ @abstractmethod
61
+ def exec_exp(self, *args, **kwargs) -> Optional[Tuple[Any, Any]]:
55
62
  """Run experiment process, e.g.: training/evaluation loop.
56
- Return: raw_metrics_data, and extra_data as input for calc_and_save_exp_perfs
63
+ Return: either `None` or a tuple of (raw_metrics_data, extra_data) for calc_and_save_exp_perfs
57
64
  """
58
65
  pass
59
66
 
60
- def eval_exp(self):
61
- """Optional: re-run evaluation from saved results."""
67
+ @abstractmethod
68
+ def exec_eval(self, *args, **kwargs) -> Optional[Tuple[Any, Any]]:
69
+ """Run evaluation process.
70
+ Return: either `None` or a tuple of (raw_metrics_data, extra_data) for calc_and_save_exp_perfs
71
+ """
62
72
  pass
63
73
 
74
+ # -----------------------
75
+ # Internal Helpers
76
+ # -----------------------
77
+ def _validate_and_unpack(self, results):
78
+ if results is None:
79
+ return None
80
+ if not isinstance(results, (tuple, list)) or len(results) != 2:
81
+ raise ValueError("exec must return (metrics_data, extra_data)")
82
+ return results[0], results[1]
83
+
84
+ def _prepare_environment(self, force_reload: bool = False):
85
+ """
86
+ Common setup. Skips if already initialized, unless force_reload is True.
87
+ """
88
+ if self._is_env_ready and not force_reload:
89
+ # Environment is already prepared, skipping setup.
90
+ return
91
+
92
+ # 1. Run Setup
93
+ self.init_general(self.config.get_general_cfg())
94
+ self.prepare_dataset(self.config.get_dataset_cfg())
95
+
96
+ # 2. Update metric backend (refresh if needed)
97
+ self.metric_backend = self.prepare_metrics(self.config.get_metric_cfg())
98
+
99
+ # 3. Mark as ready
100
+ self._is_env_ready = True
101
+
64
102
  # -----------------------
65
103
  # Main Experiment Runner
66
104
  # -----------------------
67
- def run_exp(self, do_calc_metrics=True, *args, **kwargs):
105
+ def run_exp(self, should_calc_metrics=True, reload_env=False, *args, **kwargs):
68
106
  """
69
107
  Run the whole experiment pipeline.
70
- Params:
108
+ :param reload_env: If True, forces dataset/general init to run again.
109
+ :param should_calc_metrics: Whether to calculate and save metrics after execution.
110
+ :kwargs Params:
71
111
  + 'outfile' to save csv file results,
72
112
  + 'outdir' to set output directory for experiment results.
73
113
  + 'return_df' to return a DataFrame of results instead of a dictionary.
74
114
 
75
115
  Full pipeline:
76
116
  1. Init
77
- 2. Dataset
78
- 3. Metrics Preparation
79
- 4. Save Config
80
- 5. Execute
81
- 6. Calculate & Save Metrics
117
+ 2. Prepare Environment (General + Dataset + Metrics)
118
+ 3. Save Config
119
+ 4. Execute
120
+ 5. Calculate & Save Metrics
82
121
  """
83
- self.init_general(self.config.get_general_cfg())
84
- self.prepare_dataset(self.config.get_dataset_cfg())
85
- self.prepare_metrics(self.config.get_metric_cfg())
122
+ self._prepare_environment(force_reload=reload_env)
86
123
 
124
+ # Any pre-exec setup (loading models, etc)
125
+ self.before_exec_exp_once(*args, **kwargs)
87
126
  # Save config before running
88
127
  self.config.save_to_outdir()
89
128
 
90
129
  # Execute experiment
91
130
  results = self.exec_exp(*args, **kwargs)
92
- if do_calc_metrics:
93
- metrics_data, extra_data = results
131
+
132
+ if should_calc_metrics and results is not None:
133
+ metrics_data, extra_data = self._validate_and_unpack(results)
94
134
  # Calculate & Save metrics
95
- perf_results = self.calc_and_save_exp_perfs(
135
+ perf_results = self.calc_perfs(
96
136
  raw_metrics_data=metrics_data, extra_data=extra_data, *args, **kwargs
97
137
  )
98
138
  return perf_results
99
139
  else:
100
140
  return results
141
+
142
+ # -----------------------
143
+ # Main Experiment Evaluator
144
+ # -----------------------
145
+ def eval_exp(self, reload_env=False, *args, **kwargs):
146
+ """
147
+ Run evaluation only.
148
+ :param reload_env: If True, forces dataset/general init to run again.
149
+ """
150
+ self._prepare_environment(force_reload=reload_env)
151
+ results = self.exec_eval(*args, **kwargs)
152
+ if results is not None:
153
+ metrics_data, extra_data = self._validate_and_unpack(results)
154
+ return self.calc_perfs(
155
+ raw_metrics_data=metrics_data, extra_data=extra_data, *args, **kwargs
156
+ )
157
+ return None
halib/research/dataset.py CHANGED
@@ -13,8 +13,8 @@ from rich.pretty import pprint
13
13
  from torchvision.datasets import ImageFolder
14
14
  from sklearn.model_selection import StratifiedShuffleSplit, ShuffleSplit
15
15
 
16
- from ..system import filesys as fs
17
16
  from ..common import console, seed_everything, ConsoleLog
17
+ from ..system import filesys as fs
18
18
 
19
19
  def parse_args():
20
20
  parser = ArgumentParser(description="desc text")
halib/research/metrics.py CHANGED
@@ -11,6 +11,10 @@ class MetricsBackend(ABC):
11
11
  def __init__(self, metrics_info: Union[List[str], Dict[str, Any]]):
12
12
  """
13
13
  Initialize the backend with optional metrics_info.
14
+ `metrics_info` can be either:
15
+ - A list of metric names (strings). e.g., ["accuracy", "precision"]
16
+ - A dict mapping metric names with object that defines how to compute them. e.g: {"accuracy": torchmetrics.Accuracy(), "precision": torchmetrics.Precision()}
17
+
14
18
  """
15
19
  self.metric_info = metrics_info
16
20
  self.validate_metrics_info(self.metric_info)
halib/research/mics.py CHANGED
@@ -9,7 +9,7 @@ PC_NAME_TO_ABBR = {
9
9
  "DESKTOP-5IRHU87": "MSI_Laptop",
10
10
  "DESKTOP-96HQCNO": "4090_SV",
11
11
  "DESKTOP-Q2IKLC0": "4GPU_SV",
12
- "DESKTOP-QNS3DNF": "1GPU_SV"
12
+ "DESKTOP-QNS3DNF": "1GPU_SV",
13
13
  }
14
14
 
15
15
  DEFAULT_ABBR_WORKING_DISK = {
@@ -19,19 +19,25 @@ DEFAULT_ABBR_WORKING_DISK = {
19
19
  "4GPU_SV": "D:",
20
20
  }
21
21
 
22
+
22
23
  def list_PCs(show=True):
23
- df = pd.DataFrame(list(PC_NAME_TO_ABBR.items()), columns=["PC Name", "Abbreviation"])
24
+ df = pd.DataFrame(
25
+ list(PC_NAME_TO_ABBR.items()), columns=["PC Name", "Abbreviation"]
26
+ )
24
27
  if show:
25
28
  csvfile.fn_display_df(df)
26
29
  return df
27
30
 
31
+
28
32
  def get_PC_name():
29
33
  return platform.node()
30
34
 
35
+
31
36
  def get_PC_abbr_name():
32
37
  pc_name = get_PC_name()
33
38
  return PC_NAME_TO_ABBR.get(pc_name, "Unknown")
34
39
 
40
+
35
41
  # ! This funcction search for full paths in the obj and normalize them according to the current platform and working disk
36
42
  # ! E.g: "E:/zdataset/DFire", but working_disk: "D:", current_platform: "windows" => "D:/zdataset/DFire"
37
43
  # ! E.g: "E:/zdataset/DFire", but working_disk: "D:", current_platform: "linux" => "/mnt/d/zdataset/DFire"
@@ -3,12 +3,9 @@ import glob
3
3
  from typing import Optional, Tuple
4
4
  import pandas as pd
5
5
 
6
- from rich.pretty import pprint
7
-
8
6
  from abc import ABC, abstractmethod
9
7
  from collections import OrderedDict
10
8
 
11
- from ..filetype import csvfile
12
9
  from ..system import filesys as fs
13
10
  from ..common import now_str
14
11
  from ..research.perftb import PerfTB
@@ -19,6 +16,7 @@ REQUIRED_COLS = ["experiment", "dataset"]
19
16
  CSV_FILE_POSTFIX = "__perf"
20
17
  METRIC_PREFIX = "metric_"
21
18
 
19
+
22
20
  class PerfCalc(ABC): # Abstract base class for performance calculation
23
21
  @abstractmethod
24
22
  def get_experiment_name(self) -> str:
@@ -44,29 +42,32 @@ class PerfCalc(ABC): # Abstract base class for performance calculation
44
42
  """
45
43
  pass
46
44
 
47
- def valid_proc_extra_data(
48
- self, proc_extra_data
49
- ):
45
+ def valid_proc_extra_data(self, proc_extra_data):
50
46
  # make sure that all items in proc_extra_data are dictionaries, with same keys
51
47
  if proc_extra_data is None or len(proc_extra_data) == 0:
52
48
  return
53
49
  if not all(isinstance(item, dict) for item in proc_extra_data):
54
50
  raise TypeError("All items in proc_extra_data must be dictionaries")
55
51
 
56
- if not all(item.keys() == proc_extra_data[0].keys() for item in proc_extra_data):
57
- raise ValueError("All dictionaries in proc_extra_data must have the same keys")
52
+ if not all(
53
+ item.keys() == proc_extra_data[0].keys() for item in proc_extra_data
54
+ ):
55
+ raise ValueError(
56
+ "All dictionaries in proc_extra_data must have the same keys"
57
+ )
58
58
 
59
- def valid_proc_metric_raw_data(
60
- self, metric_names, proc_metric_raw_data
61
- ):
59
+ def valid_proc_metric_raw_data(self, metric_names, proc_metric_raw_data):
62
60
  # make sure that all items in proc_metric_raw_data are dictionaries, with same keys as metric_names
63
- assert isinstance(proc_metric_raw_data, list) and len(proc_metric_raw_data) > 0, \
64
- "raw_data_for_metrics must be a non-empty list of dictionaries"
61
+ assert (
62
+ isinstance(proc_metric_raw_data, list) and len(proc_metric_raw_data) > 0
63
+ ), "raw_data_for_metrics must be a non-empty list of dictionaries"
65
64
 
66
65
  # make sure that all items in proc_metric_raw_data are dictionaries with keys as metric_names
67
66
  if not all(isinstance(item, dict) for item in proc_metric_raw_data):
68
67
  raise TypeError("All items in raw_data_for_metrics must be dictionaries")
69
- if not all( set(item.keys()) == set(metric_names) for item in proc_metric_raw_data):
68
+ if not all(
69
+ set(item.keys()) == set(metric_names) for item in proc_metric_raw_data
70
+ ):
70
71
  raise ValueError(
71
72
  "All dictionaries in raw_data_for_metrics must have the same keys as metric_names"
72
73
  )
@@ -75,21 +76,30 @@ class PerfCalc(ABC): # Abstract base class for performance calculation
75
76
  def calc_exp_perf_metrics(
76
77
  self, metric_names, raw_metrics_data, extra_data=None, *args, **kwargs
77
78
  ):
78
- assert isinstance(raw_metrics_data, dict) or isinstance(raw_metrics_data, list), \
79
- "raw_data_for_metrics must be a dictionary or a list"
79
+ assert isinstance(raw_metrics_data, dict) or isinstance(
80
+ raw_metrics_data, list
81
+ ), "raw_data_for_metrics must be a dictionary or a list"
80
82
 
81
83
  if extra_data is not None:
82
- assert isinstance(extra_data, type(raw_metrics_data)), \
83
- "extra_data must be of the same type as raw_data_for_metrics (dict or list)"
84
+ assert isinstance(
85
+ extra_data, type(raw_metrics_data)
86
+ ), "extra_data must be of the same type as raw_data_for_metrics (dict or list)"
84
87
  # prepare raw_metric data for processing
85
- proc_metric_raw_data_ls = raw_metrics_data if isinstance(raw_metrics_data, list) else [raw_metrics_data.copy()]
88
+ proc_metric_raw_data_ls = (
89
+ raw_metrics_data
90
+ if isinstance(raw_metrics_data, list)
91
+ else [raw_metrics_data.copy()]
92
+ )
86
93
  self.valid_proc_metric_raw_data(metric_names, proc_metric_raw_data_ls)
87
94
  # prepare extra data for processing
88
95
  proc_extra_data_ls = []
89
96
  if extra_data is not None:
90
- proc_extra_data_ls = extra_data if isinstance(extra_data, list) else [extra_data.copy()]
91
- assert len(proc_extra_data_ls) == len(proc_metric_raw_data_ls), \
92
- "extra_data must have the same length as raw_data_for_metrics if it is a list"
97
+ proc_extra_data_ls = (
98
+ extra_data if isinstance(extra_data, list) else [extra_data.copy()]
99
+ )
100
+ assert len(proc_extra_data_ls) == len(
101
+ proc_metric_raw_data_ls
102
+ ), "extra_data must have the same length as raw_data_for_metrics if it is a list"
93
103
  # validate the extra_data
94
104
  self.valid_proc_extra_data(proc_extra_data_ls)
95
105
 
@@ -102,7 +112,7 @@ class PerfCalc(ABC): # Abstract base class for performance calculation
102
112
  "experiment": self.get_experiment_name(),
103
113
  }
104
114
  custom_fields = []
105
- if len(proc_extra_data_ls)> 0:
115
+ if len(proc_extra_data_ls) > 0:
106
116
  # add extra data to the output dictionary
107
117
  extra_data_item = proc_extra_data_ls[idx]
108
118
  out_dict.update(extra_data_item)
@@ -110,7 +120,9 @@ class PerfCalc(ABC): # Abstract base class for performance calculation
110
120
  metric_results = metrics_backend.calc_metrics(
111
121
  metrics_data_dict=raw_metrics_data, *args, **kwargs
112
122
  )
113
- metric_results_prefix = {f"metric_{k}": v for k, v in metric_results.items()}
123
+ metric_results_prefix = {
124
+ f"metric_{k}": v for k, v in metric_results.items()
125
+ }
114
126
  out_dict.update(metric_results_prefix)
115
127
  ordered_cols = (
116
128
  REQUIRED_COLS + custom_fields + list(metric_results_prefix.keys())
@@ -126,7 +138,7 @@ class PerfCalc(ABC): # Abstract base class for performance calculation
126
138
  #! outfile - if provided, will save the output to a CSV file with the given path
127
139
  #! outdir - if provided, will save the output to a CSV file in the given directory with a generated filename
128
140
  #! return_df - if True, will return a DataFrame instead of a dictionary
129
- def calc_and_save_exp_perfs(
141
+ def calc_perfs(
130
142
  self,
131
143
  raw_metrics_data: Union[List[dict], dict],
132
144
  extra_data: Optional[Union[List[dict], dict]] = None,
@@ -140,9 +152,11 @@ class PerfCalc(ABC): # Abstract base class for performance calculation
140
152
  """
141
153
  metric_names = self.get_metric_backend().metric_names
142
154
  out_dict_list = self.calc_exp_perf_metrics(
143
- metric_names=metric_names, raw_metrics_data=raw_metrics_data,
155
+ metric_names=metric_names,
156
+ raw_metrics_data=raw_metrics_data,
144
157
  extra_data=extra_data,
145
- *args, **kwargs
158
+ *args,
159
+ **kwargs,
146
160
  )
147
161
  csv_outfile = kwargs.get("outfile", None)
148
162
  if csv_outfile is not None:
@@ -176,13 +190,18 @@ class PerfCalc(ABC): # Abstract base class for performance calculation
176
190
  return "__perf.csv" in exp_file_name
177
191
 
178
192
  @classmethod
179
- def gen_perf_report_for_multip_exps(
180
- cls, indir: str, exp_csv_filter_fn=default_exp_csv_filter_fn, include_file_name=False, csv_sep=";"
193
+ def get_perftb_for_multi_exps(
194
+ cls,
195
+ indir: str,
196
+ exp_csv_filter_fn=default_exp_csv_filter_fn,
197
+ include_file_name=False,
198
+ csv_sep=";",
181
199
  ) -> PerfTB:
182
200
  """
183
201
  Generate a performance report by scanning experiment subdirectories.
184
202
  Must return a dictionary with keys as metric names and values as performance tables.
185
203
  """
204
+
186
205
  def get_df_for_all_exp_perf(csv_perf_files, csv_sep=";"):
187
206
  """
188
207
  Create a single DataFrame from all CSV files.
@@ -194,7 +213,9 @@ class PerfCalc(ABC): # Abstract base class for performance calculation
194
213
  for csv_file in csv_perf_files:
195
214
  temp_df = pd.read_csv(csv_file, sep=csv_sep)
196
215
  if FILE_NAME_COL:
197
- temp_df[FILE_NAME_COL] = fs.get_file_name(csv_file, split_file_ext=False)
216
+ temp_df[FILE_NAME_COL] = fs.get_file_name(
217
+ csv_file, split_file_ext=False
218
+ )
198
219
  # csvfile.fn_display_df(temp_df)
199
220
  temp_df_cols = temp_df.columns.tolist()
200
221
  for col in temp_df_cols:
@@ -205,7 +226,9 @@ class PerfCalc(ABC): # Abstract base class for performance calculation
205
226
  for csv_file in csv_perf_files:
206
227
  temp_df = pd.read_csv(csv_file, sep=csv_sep)
207
228
  if FILE_NAME_COL:
208
- temp_df[FILE_NAME_COL] = fs.get_file_name(csv_file, split_file_ext=False)
229
+ temp_df[FILE_NAME_COL] = fs.get_file_name(
230
+ csv_file, split_file_ext=False
231
+ )
209
232
  # Drop all-NA columns to avoid dtype inconsistency
210
233
  temp_df = temp_df.dropna(axis=1, how="all")
211
234
  # ensure all columns are present in the final DataFrame
@@ -215,7 +238,9 @@ class PerfCalc(ABC): # Abstract base class for performance calculation
215
238
  df = pd.concat([df, temp_df], ignore_index=True)
216
239
  # assert that REQUIRED_COLS are present in the DataFrame
217
240
  # pprint(df.columns.tolist())
218
- sticky_cols = REQUIRED_COLS + ([FILE_NAME_COL] if include_file_name else []) # columns that must always be present
241
+ sticky_cols = REQUIRED_COLS + (
242
+ [FILE_NAME_COL] if include_file_name else []
243
+ ) # columns that must always be present
219
244
  for col in sticky_cols:
220
245
  if col not in df.columns:
221
246
  raise ValueError(
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: halib
3
- Version: 0.1.99
3
+ Version: 0.2.1
4
4
  Summary: Small library for common tasks
5
5
  Author: Hoang Van Ha
6
6
  Author-email: hoangvanhauit@gmail.com
@@ -53,6 +53,9 @@ Dynamic: summary
53
53
 
54
54
  # Helper package for coding and automation
55
55
 
56
+ **Version 0.2.01**
57
+ + `research/base_exp`: add `eval_exp` method to evaluate experiment (e.g., model evaluation on test set) after experiment running is done.
58
+
56
59
  **Version 0.1.99**
57
60
  + `filetype/ipynb`: add `gen_ipynb_name` generator to create file name based on current notebook name as prefix (with optional timestamp)
58
61
 
@@ -30,15 +30,15 @@ halib/online/gdrive_test.py,sha256=hMWzz4RqZwETHp4GG4WwVNFfYvFQhp2Boz5t-DqwMo0,1
30
30
  halib/online/projectmake.py,sha256=Zrs96WgXvO4nIrwxnCOletL4aTBge-EoF0r7hpKO1w8,4034
31
31
  halib/research/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
32
32
  halib/research/base_config.py,sha256=AqZHZ0NNQ3WmUOfRzs36lf3o0FrehSdVLbdmgNpbV7A,2833
33
- halib/research/base_exp.py,sha256=hiO2flt_I0iJJ4bWcQwyh2ISezoC8t2k3PtxHeVr0eI,3278
33
+ halib/research/base_exp.py,sha256=mC2bcG3M-dIvAkLw3d4O4Nu0HyvfrmvHvHX4iW8qurY,5749
34
34
  halib/research/benchquery.py,sha256=FuKnbWQtCEoRRtJAfN-zaN-jPiO_EzsakmTOMiqi7GQ,4626
35
- halib/research/dataset.py,sha256=avMRM9Jk3tq0P7iX1Jq-kq5d_3en3lcUlAy1dAtGSEU,6734
35
+ halib/research/dataset.py,sha256=X6lob-pcndC2aOP0fQ3z8aUnNVVR95ia4tiuxIAXppk,6734
36
36
  halib/research/flop_csv.py,sha256=JeIUWgPFmhkPqvmhe-MLwwvAu9yR5F2k3qaViJCJJD4,1148
37
37
  halib/research/flops.py,sha256=Us0VudX8QMOm7YenICGf-Tq57C_l9x9hj-MUGA8_hCg,5773
38
- halib/research/metrics.py,sha256=PXPCy8r1_0lpMKfjc5SjIpRHnX80gHmeZ1C4eVj9U_s,5200
39
- halib/research/mics.py,sha256=nZyric8d0yKP5HrwwLsN4AjszrdxAhpJCRo1oy-EKJI,2612
38
+ halib/research/metrics.py,sha256=qRiNiCKGUSTLY7gPMVMuVHGAAyeosfGWup2eM4490aw,5485
39
+ halib/research/mics.py,sha256=kFl7IsZP1N-qkogvuFojkDNr1Qf0kce9h--qOnJ_3gk,2637
40
40
  halib/research/params_gen.py,sha256=GcTMlniL0iE3HalJY-gVRiYa8Qy8u6nX4LkKZeMkct8,4262
41
- halib/research/perfcalc.py,sha256=G8WpGB95AY5KQCt0__bPK1yUa2M1onNhXLM7twkElxg,15904
41
+ halib/research/perfcalc.py,sha256=gjQh1D7CyEjocX5rusAzygyGFrb_s9Ob3Lqs4lIDGxg,16228
42
42
  halib/research/perftb.py,sha256=YlBXMeWn8S0LhsgxONEQZrKomRTju2T8QGGspUOy_6Y,31100
43
43
  halib/research/plot.py,sha256=GBCXP1QnzRlNqjAl9UvGvW3I9II61DBStJNQThrLy38,28578
44
44
  halib/research/profiler.py,sha256=GRAewTo0jGkOputjmRwtYVfJYBze_ivsOnrW9exWkPQ,11772
@@ -57,8 +57,8 @@ halib/utils/gpu_mon.py,sha256=vD41_ZnmPLKguuq9X44SB_vwd9JrblO4BDzHLXZhhFY,2233
57
57
  halib/utils/listop.py,sha256=Vpa8_2fI0wySpB2-8sfTBkyi_A4FhoFVVvFiuvW8N64,339
58
58
  halib/utils/tele_noti.py,sha256=-4WXZelCA4W9BroapkRyIdUu9cUVrcJJhegnMs_WpGU,5928
59
59
  halib/utils/video.py,sha256=zLoj5EHk4SmP9OnoHjO8mLbzPdtq6gQPzTQisOEDdO8,3261
60
- halib-0.1.99.dist-info/licenses/LICENSE.txt,sha256=qZssdna4aETiR8znYsShUjidu-U4jUT9Q-EWNlZ9yBQ,1100
61
- halib-0.1.99.dist-info/METADATA,sha256=1v3WnxBK6Aj36jA7tIycuFGQwJs19uLMi_EhSMpvmaU,6387
62
- halib-0.1.99.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
63
- halib-0.1.99.dist-info/top_level.txt,sha256=7AD6PLaQTreE0Fn44mdZsoHBe_Zdd7GUmjsWPyQ7I-k,6
64
- halib-0.1.99.dist-info/RECORD,,
60
+ halib-0.2.1.dist-info/licenses/LICENSE.txt,sha256=qZssdna4aETiR8znYsShUjidu-U4jUT9Q-EWNlZ9yBQ,1100
61
+ halib-0.2.1.dist-info/METADATA,sha256=VZ-msDI3P56gyQR12cQm1Ea6HZ7A6c1tXAHtuSzadJU,6548
62
+ halib-0.2.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
63
+ halib-0.2.1.dist-info/top_level.txt,sha256=7AD6PLaQTreE0Fn44mdZsoHBe_Zdd7GUmjsWPyQ7I-k,6
64
+ halib-0.2.1.dist-info/RECORD,,
File without changes