halib 0.2.1__py3-none-any.whl → 0.2.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. halib/__init__.py +3 -3
  2. halib/common/__init__.py +0 -0
  3. halib/common/common.py +178 -0
  4. halib/common/rich_color.py +285 -0
  5. halib/filetype/csvfile.py +3 -9
  6. halib/filetype/ipynb.py +3 -5
  7. halib/filetype/jsonfile.py +0 -3
  8. halib/filetype/textfile.py +0 -1
  9. halib/filetype/videofile.py +91 -2
  10. halib/filetype/yamlfile.py +3 -3
  11. halib/online/projectmake.py +7 -6
  12. halib/online/tele_noti.py +165 -0
  13. halib/research/core/__init__.py +0 -0
  14. halib/research/core/base_config.py +144 -0
  15. halib/research/core/base_exp.py +157 -0
  16. halib/research/core/param_gen.py +108 -0
  17. halib/research/core/wandb_op.py +117 -0
  18. halib/research/data/__init__.py +0 -0
  19. halib/research/data/dataclass_util.py +41 -0
  20. halib/research/data/dataset.py +208 -0
  21. halib/research/data/torchloader.py +165 -0
  22. halib/research/perf/__init__.py +0 -0
  23. halib/research/perf/flop_calc.py +190 -0
  24. halib/research/perf/gpu_mon.py +58 -0
  25. halib/research/perf/perfcalc.py +363 -0
  26. halib/research/perf/perfmetrics.py +137 -0
  27. halib/research/perf/perftb.py +778 -0
  28. halib/research/perf/profiler.py +301 -0
  29. halib/research/viz/__init__.py +0 -0
  30. halib/research/viz/plot.py +754 -0
  31. halib/system/filesys.py +60 -20
  32. halib/system/path.py +73 -0
  33. halib/utils/dict.py +9 -0
  34. halib/utils/list.py +12 -0
  35. {halib-0.2.1.dist-info → halib-0.2.2.dist-info}/METADATA +4 -1
  36. {halib-0.2.1.dist-info → halib-0.2.2.dist-info}/RECORD +39 -14
  37. {halib-0.2.1.dist-info → halib-0.2.2.dist-info}/WHEEL +0 -0
  38. {halib-0.2.1.dist-info → halib-0.2.2.dist-info}/licenses/LICENSE.txt +0 -0
  39. {halib-0.2.1.dist-info → halib-0.2.2.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,41 @@
1
+ import yaml
2
+ from typing import Any
3
+
4
+ from rich.pretty import pprint
5
+ from dataclasses import make_dataclass
6
+
7
+ from ...filetype import yamlfile
8
+
9
+ def dict_to_dataclass(name: str, data: dict):
10
+ fields = []
11
+ values = {}
12
+
13
+ for key, value in data.items():
14
+ if isinstance(value, dict):
15
+ sub_dc = dict_to_dataclass(key.capitalize(), value)
16
+ fields.append((key, type(sub_dc)))
17
+ values[key] = sub_dc
18
+ else:
19
+ field_type = type(value) if value is not None else Any
20
+ fields.append((key, field_type))
21
+ values[key] = value
22
+
23
+ DC = make_dataclass(name.capitalize(), fields)
24
+ return DC(**values)
25
+
26
+ def yaml_to_dataclass(name: str, yaml_str: str):
27
+ data = yaml.safe_load(yaml_str)
28
+ return dict_to_dataclass(name, data)
29
+
30
+
31
+ def yamlfile_to_dataclass(name: str, file_path: str):
32
+ data_dict = yamlfile.load_yaml(file_path, to_dict=True)
33
+ if "__base__" in data_dict:
34
+ del data_dict["__base__"]
35
+ return dict_to_dataclass(name, data_dict)
36
+
37
+ if __name__ == "__main__":
38
+ cfg = yamlfile_to_dataclass("Config", "test/dataclass_util_test_cfg.yaml")
39
+
40
+ # ! NOTICE: after print out this dataclass, we can copy the output and paste it into CHATGPT to generate a list of needed dataclass classes using `from dataclass_wizard import YAMLWizard`
41
+ pprint(cfg)
@@ -0,0 +1,208 @@
1
+ # This script create a test version
2
+ # of the watcam (wc) dataset
3
+ # for testing the tflite model
4
+
5
+ from argparse import ArgumentParser
6
+
7
+ import os
8
+ import click
9
+ import shutil
10
+ from tqdm import tqdm
11
+ from rich import inspect
12
+ from rich.pretty import pprint
13
+ from torchvision.datasets import ImageFolder
14
+ from sklearn.model_selection import StratifiedShuffleSplit, ShuffleSplit
15
+
16
+ from ...common.common import console, seed_everything, ConsoleLog
17
+ from ...system import filesys as fs
18
+
19
+ def parse_args():
20
+ parser = ArgumentParser(description="desc text")
21
+ parser.add_argument(
22
+ "-indir",
23
+ "--indir",
24
+ type=str,
25
+ help="orignal dataset path",
26
+ )
27
+ parser.add_argument(
28
+ "-outdir",
29
+ "--outdir",
30
+ type=str,
31
+ help="dataset out path",
32
+ default=".", # default to current dir
33
+ )
34
+ parser.add_argument(
35
+ "-val_size",
36
+ "--val_size",
37
+ type=float,
38
+ help="validation size", # no default value to force user to input
39
+ default=0.2,
40
+ )
41
+ # add using StratifiedShuffleSplit or ShuffleSplit
42
+ parser.add_argument(
43
+ "-seed",
44
+ "--seed",
45
+ type=int,
46
+ help="random seed",
47
+ default=42,
48
+ )
49
+ parser.add_argument(
50
+ "-inplace",
51
+ "--inplace",
52
+ action="store_true",
53
+ help="inplace operation, will overwrite the outdir if exists",
54
+ )
55
+
56
+ parser.add_argument(
57
+ "-stratified",
58
+ "--stratified",
59
+ action="store_true",
60
+ help="use StratifiedShuffleSplit instead of ShuffleSplit",
61
+ )
62
+ parser.add_argument(
63
+ "-no_train",
64
+ "--no_train",
65
+ action="store_true",
66
+ help="only create test set, no train set",
67
+ )
68
+ parser.add_argument(
69
+ "-reverse",
70
+ "--reverse",
71
+ action="store_true",
72
+ help="combine train and val set back to original dataset",
73
+ )
74
+ return parser.parse_args()
75
+
76
+
77
+ def move_images(image_paths, target_set_dir):
78
+ for img_path in tqdm(image_paths):
79
+ # get folder name of the image
80
+ img_dir = os.path.dirname(img_path)
81
+ out_cls_dir = os.path.join(target_set_dir, os.path.basename(img_dir))
82
+ if not os.path.exists(out_cls_dir):
83
+ os.makedirs(out_cls_dir)
84
+ # move the image to the class folder
85
+ shutil.move(img_path, out_cls_dir)
86
+
87
+
88
+ def split_dataset_cls(
89
+ indir, outdir, val_size, seed, inplace, stratified_split, no_train
90
+ ):
91
+ seed_everything(seed)
92
+ console.rule("Config confirm?")
93
+ pprint(locals())
94
+ click.confirm("Continue?", abort=True)
95
+ assert os.path.exists(indir), f"{indir} does not exist"
96
+
97
+ if not inplace:
98
+ assert (not inplace) and (
99
+ not os.path.exists(outdir)
100
+ ), f"{outdir} already exists; SKIP ...."
101
+
102
+ if inplace:
103
+ outdir = indir
104
+ if not os.path.exists(outdir):
105
+ os.makedirs(outdir)
106
+
107
+ console.rule(f"Creating train/val dataset")
108
+
109
+ sss = (
110
+ ShuffleSplit(n_splits=1, test_size=val_size)
111
+ if not stratified_split
112
+ else StratifiedShuffleSplit(n_splits=1, test_size=val_size)
113
+ )
114
+
115
+ pprint({"split strategy": sss, "indir": indir, "outdir": outdir})
116
+ dataset = ImageFolder(
117
+ root=indir,
118
+ transform=None,
119
+ )
120
+ train_dataset_indices = None
121
+ val_dataset_indices = None # val here means test
122
+ for train_indices, val_indices in sss.split(dataset.samples, dataset.targets):
123
+ train_dataset_indices = train_indices
124
+ val_dataset_indices = val_indices
125
+
126
+ # get image paths for train/val split dataset
127
+ train_image_paths = [dataset.imgs[i][0] for i in train_dataset_indices]
128
+ val_image_paths = [dataset.imgs[i][0] for i in val_dataset_indices]
129
+
130
+ # start creating train/val folders then move images
131
+ out_train_dir = os.path.join(outdir, "train")
132
+ out_val_dir = os.path.join(outdir, "val")
133
+ if inplace:
134
+ assert os.path.exists(out_train_dir) == False, f"{out_train_dir} already exists"
135
+ assert os.path.exists(out_val_dir) == False, f"{out_val_dir} already exists"
136
+
137
+ os.makedirs(out_train_dir)
138
+ os.makedirs(out_val_dir)
139
+
140
+ if not no_train:
141
+ with ConsoleLog(f"Moving train images to {out_train_dir} "):
142
+ move_images(train_image_paths, out_train_dir)
143
+ else:
144
+ pprint("test only, skip moving train images")
145
+ # remove out_train_dir
146
+ shutil.rmtree(out_train_dir)
147
+
148
+ with ConsoleLog(f"Moving val images to {out_val_dir} "):
149
+ move_images(val_image_paths, out_val_dir)
150
+
151
+ if inplace:
152
+ pprint(f"remove all folders, except train and val")
153
+ for cls_dir in os.listdir(outdir):
154
+ if cls_dir not in ["train", "val"]:
155
+ shutil.rmtree(os.path.join(indir, cls_dir))
156
+
157
+
158
+ def reverse_split_ds(indir):
159
+ console.rule(f"Reversing split dataset <{indir}>...")
160
+ ls_dirs = os.listdir(indir)
161
+ # make sure there are only two dirs 'train' and 'val'
162
+ assert len(ls_dirs) == 2, f"Found more than 2 dirs: {len(ls_dirs) } dirs"
163
+ assert "train" in ls_dirs, f"train dir not found in {indir}"
164
+ assert "val" in ls_dirs, f"val dir not found in {indir}"
165
+ train_dir = os.path.join(indir, "train")
166
+ val_dir = os.path.join(indir, "val")
167
+ all_train_files = fs.filter_files_by_extension(
168
+ train_dir, ["jpg", "jpeg", "png", "bmp", "gif", "tiff"]
169
+ )
170
+ all_val_files = fs.filter_files_by_extension(
171
+ val_dir, ["jpg", "jpeg", "png", "bmp", "gif", "tiff"]
172
+ )
173
+ # move all files from train to indir
174
+ with ConsoleLog(f"Moving train images to {indir} "):
175
+ move_images(all_train_files, indir)
176
+ with ConsoleLog(f"Moving val images to {indir} "):
177
+ move_images(all_val_files, indir)
178
+ with ConsoleLog(f"Removing train and val dirs"):
179
+ # remove train and val dirs
180
+ shutil.rmtree(train_dir)
181
+ shutil.rmtree(val_dir)
182
+
183
+
184
+ def main():
185
+ args = parse_args()
186
+ indir = args.indir
187
+ outdir = args.outdir
188
+ if outdir == ".":
189
+ # get current folder of the indir
190
+ indir_parent_dir = os.path.dirname(os.path.normpath(indir))
191
+ indir_name = os.path.basename(indir)
192
+ outdir = os.path.join(indir_parent_dir, f"{indir_name}_split")
193
+ val_size = args.val_size
194
+ seed = args.seed
195
+ inplace = args.inplace
196
+ stratified_split = args.stratified
197
+ no_train = args.no_train
198
+ reverse = args.reverse
199
+ if not reverse:
200
+ split_dataset_cls(
201
+ indir, outdir, val_size, seed, inplace, stratified_split, no_train
202
+ )
203
+ else:
204
+ reverse_split_ds(indir)
205
+
206
+
207
+ if __name__ == "__main__":
208
+ main()
@@ -0,0 +1,165 @@
1
+ """
2
+ * @author Hoang Van-Ha
3
+ * @email hoangvanhauit@gmail.com
4
+ * @create date 2024-03-27 15:40:22
5
+ * @modify date 2024-03-27 15:40:22
6
+ * @desc this module works as a utility tools for finding the best configuration for dataloader (num_workers, batch_size, pin_menory, etc.) that fits your hardware.
7
+ """
8
+ from argparse import ArgumentParser
9
+
10
+ import os
11
+ import time
12
+ import traceback
13
+
14
+ from tqdm import tqdm
15
+ from rich import inspect
16
+ from typing import Union
17
+ import itertools as it # for cartesian product
18
+
19
+ from torch.utils.data import DataLoader
20
+ from torchvision import datasets, transforms
21
+
22
+ from ...common.common import *
23
+ from ...filetype import csvfile
24
+ from ...filetype.yamlfile import load_yaml
25
+
26
+ def parse_args():
27
+ parser = ArgumentParser(description="desc text")
28
+ parser.add_argument("-cfg", "--cfg", type=str, help="cfg file for searching")
29
+ return parser.parse_args()
30
+
31
+
32
+ def get_test_range(cfg: dict, search_item="num_workers"):
33
+ item_search_cfg = cfg["search_space"].get(search_item, None)
34
+ if item_search_cfg is None:
35
+ raise ValueError(f"search_item: {search_item} not found in cfg")
36
+ if isinstance(item_search_cfg, list):
37
+ return item_search_cfg
38
+ elif isinstance(item_search_cfg, dict):
39
+ if "mode" in item_search_cfg:
40
+ mode = item_search_cfg["mode"]
41
+ assert mode in ["range", "list"], f"mode: {mode} not supported"
42
+ value_in_mode = item_search_cfg.get(mode, None)
43
+ if value_in_mode is None:
44
+ raise ValueError(f"mode<{mode}>: data not found in <{search_item}>")
45
+ if mode == "range":
46
+ assert len(value_in_mode) == 3, f"range must have 3 values: start, stop, step"
47
+ start = value_in_mode[0]
48
+ stop = value_in_mode[1]
49
+ step = value_in_mode[2]
50
+ return list(range(start, stop, step))
51
+ elif mode == "list":
52
+ return item_search_cfg["list"]
53
+ else:
54
+ return [item_search_cfg] # for int, float, str, bool, etc.
55
+
56
+
57
+ def load_an_batch(loader_iter):
58
+ start = time.time()
59
+ next(loader_iter)
60
+ end = time.time()
61
+ return end - start
62
+
63
+
64
+ def test_dataloader_with_cfg(origin_dataloader: DataLoader, cfg: Union[dict, str]):
65
+ try:
66
+ if isinstance(cfg, str):
67
+ cfg = load_yaml(cfg, to_dict=True)
68
+ dfmk = csvfile.DFCreator()
69
+ search_items = ["batch_size", "num_workers", "persistent_workers", "pin_memory"]
70
+ batch_limit = cfg["general"]["batch_limit"]
71
+ csv_cfg = cfg["general"]["to_csv"]
72
+ log_batch_info = cfg["general"]["log_batch_info"]
73
+
74
+ save_to_csv = csv_cfg["enabled"]
75
+ log_dir = csv_cfg["log_dir"]
76
+ filename = csv_cfg["filename"]
77
+ filename = f"{now_str()}_{filename}.csv"
78
+ outfile = os.path.join(log_dir, filename)
79
+
80
+ dfmk.create_table(
81
+ "cfg_search",
82
+ (search_items + ["avg_time_taken"]),
83
+ )
84
+ ls_range_test = []
85
+ for item in search_items:
86
+ range_test = get_test_range(cfg, search_item=item)
87
+ range_test = [(item, i) for i in range_test]
88
+ ls_range_test.append(range_test)
89
+
90
+ all_combinations = list(it.product(*ls_range_test))
91
+
92
+ rows = []
93
+ for cfg_idx, combine in enumerate(all_combinations):
94
+ console.rule(f"Testing cfg {cfg_idx+1}/{len(all_combinations)}")
95
+ inspect(combine)
96
+ batch_size = combine[search_items.index("batch_size")][1]
97
+ num_workers = combine[search_items.index("num_workers")][1]
98
+ persistent_workers = combine[search_items.index("persistent_workers")][1]
99
+ pin_memory = combine[search_items.index("pin_memory")][1]
100
+
101
+ test_dataloader = DataLoader(origin_dataloader.dataset, batch_size=batch_size, num_workers=num_workers, persistent_workers=persistent_workers, pin_memory=pin_memory, shuffle=True)
102
+ row = [
103
+ batch_size,
104
+ num_workers,
105
+ persistent_workers,
106
+ pin_memory,
107
+ 0.0,
108
+ ]
109
+
110
+ # calculate the avg time taken to load the data for <batch_limit> batches
111
+ trainiter = iter(test_dataloader)
112
+ time_elapsed = 0
113
+ pprint('Start testing...')
114
+ for i in tqdm(range(batch_limit)):
115
+ single_batch_time = load_an_batch(trainiter)
116
+ if log_batch_info:
117
+ pprint(f"Batch {i+1} took {single_batch_time:.4f} seconds to load")
118
+ time_elapsed += single_batch_time
119
+ row[-1] = time_elapsed / batch_limit
120
+ rows.append(row)
121
+ dfmk.insert_rows('cfg_search', rows)
122
+ dfmk.fill_table_from_row_pool('cfg_search')
123
+ with ConsoleLog("results"):
124
+ csvfile.fn_display_df(dfmk['cfg_search'])
125
+ if save_to_csv:
126
+ dfmk["cfg_search"].to_csv(outfile, index=False)
127
+ console.print(f"[red] Data saved to <{outfile}> [/red]")
128
+
129
+ except Exception as e:
130
+ traceback.print_exc()
131
+ print(e)
132
+ # get current directory of this python file
133
+ current_dir = os.path.dirname(os.path.realpath(__file__))
134
+ standar_cfg_path = os.path.join(current_dir, "torchloader_search.yaml")
135
+ pprint(
136
+ f"Make sure you get the right <cfg.yaml> file. An example of <cfg.yaml> file can be found at this path: {standar_cfg_path}"
137
+ )
138
+ return
139
+
140
+ def main():
141
+ args = parse_args()
142
+ cfg_yaml = args.cfg
143
+ cfg_dict = load_yaml(cfg_yaml, to_dict=True)
144
+
145
+ # Define transforms for data augmentation and normalization
146
+ transform = transforms.Compose(
147
+ [
148
+ transforms.RandomHorizontalFlip(), # Randomly flip images horizontally
149
+ transforms.RandomRotation(10), # Randomly rotate images by 10 degrees
150
+ transforms.ToTensor(), # Convert images to PyTorch tensors
151
+ transforms.Normalize(
152
+ (0.5, 0.5, 0.5), (0.5, 0.5, 0.5)
153
+ ), # Normalize pixel values to [-1, 1]
154
+ ]
155
+ )
156
+ test_dataset = datasets.CIFAR10(
157
+ root="./data", train=False, download=True, transform=transform
158
+ )
159
+ batch_size = 64
160
+ train_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=True)
161
+ test_dataloader_with_cfg(train_loader, cfg_dict)
162
+
163
+
164
+ if __name__ == "__main__":
165
+ main()
File without changes
@@ -0,0 +1,190 @@
1
+ import os
2
+ import sys
3
+ import torch
4
+ import timm
5
+ from argparse import ArgumentParser
6
+ from fvcore.nn import FlopCountAnalysis
7
+ from halib import *
8
+ from halib.filetype import csvfile
9
+ from curriculum.utils.config import *
10
+ from curriculum.utils.model_helper import *
11
+
12
+
13
+ # ---------------------------------------------------------------------
14
+ # Argument Parser
15
+ # ---------------------------------------------------------------------
16
+ def parse_args():
17
+ parser = ArgumentParser(description="Calculate FLOPs for TIMM or trained models")
18
+
19
+ # Option 1: Direct TIMM model
20
+ parser.add_argument(
21
+ "--model_name", type=str, help="TIMM model name (e.g., efficientnet_b0)"
22
+ )
23
+ parser.add_argument(
24
+ "--num_classes", type=int, default=1000, help="Number of output classes"
25
+ )
26
+
27
+ # Option 2: Experiment directory
28
+ parser.add_argument(
29
+ "--indir",
30
+ type=str,
31
+ default=None,
32
+ help="Directory containing trained experiment (with .yaml and .pth)",
33
+ )
34
+ parser.add_argument(
35
+ "-o", "--o", action="store_true", help="Open output CSV after saving"
36
+ )
37
+ return parser.parse_args()
38
+
39
+
40
+ # ---------------------------------------------------------------------
41
+ # Helper Functions
42
+ # ---------------------------------------------------------------------
43
+ def _get_list_of_proc_dirs(indir):
44
+ assert os.path.exists(indir), f"Input directory {indir} does not exist."
45
+ pth_files = [f for f in os.listdir(indir) if f.endswith(".pth")]
46
+ if len(pth_files) > 0:
47
+ return [indir]
48
+ return [
49
+ os.path.join(indir, f)
50
+ for f in os.listdir(indir)
51
+ if os.path.isdir(os.path.join(indir, f))
52
+ ]
53
+
54
+
55
+ def _calculate_flops_for_model(model_name, num_classes):
56
+ """Calculate FLOPs for a plain TIMM model."""
57
+ try:
58
+ model = timm.create_model(model_name, pretrained=False, num_classes=num_classes)
59
+ input_size = timm.data.resolve_data_config(model.default_cfg)["input_size"]
60
+ dummy_input = torch.randn(1, *input_size)
61
+ model.eval() # ! set to eval mode to avoid some warnings or errors
62
+ flops = FlopCountAnalysis(model, dummy_input)
63
+ gflops = flops.total() / 1e9
64
+ mflops = flops.total() / 1e6
65
+ print(f"\nModel: **{model_name}**, Classes: {num_classes}")
66
+ print(f"Input size: {input_size}, FLOPs: **{gflops:.3f} GFLOPs**, **{mflops:.3f} MFLOPs**\n")
67
+ return model_name, gflops, mflops
68
+ except Exception as e:
69
+ print(f"[Error] Could not calculate FLOPs for {model_name}: {e}")
70
+ return model_name, -1, -1
71
+
72
+
73
+ def _calculate_flops_for_experiment(exp_dir):
74
+ """Calculate FLOPs for a trained experiment directory."""
75
+ yaml_files = [f for f in os.listdir(exp_dir) if f.endswith(".yaml")]
76
+ pth_files = [f for f in os.listdir(exp_dir) if f.endswith(".pth")]
77
+
78
+ assert (
79
+ len(yaml_files) == 1
80
+ ), f"Expected 1 YAML file in {exp_dir}, found {len(yaml_files)}"
81
+ assert (
82
+ len(pth_files) == 1
83
+ ), f"Expected 1 PTH file in {exp_dir}, found {len(pth_files)}"
84
+
85
+ exp_cfg_yaml = os.path.join(exp_dir, yaml_files[0])
86
+ cfg = ExpConfig.from_yaml(exp_cfg_yaml)
87
+ ds_label_list = cfg.dataset.get_label_list()
88
+
89
+ try:
90
+ model = build_model(
91
+ cfg.model.name, num_classes=len(ds_label_list), pretrained=True
92
+ )
93
+ model_weights_path = os.path.join(exp_dir, pth_files[0])
94
+ model.load_state_dict(torch.load(model_weights_path, map_location="cpu"))
95
+ model.eval()
96
+
97
+ input_size = timm.data.resolve_data_config(model.default_cfg)["input_size"]
98
+ dummy_input = torch.randn(1, *input_size)
99
+ flops = FlopCountAnalysis(model, dummy_input)
100
+ gflops = flops.total() / 1e9
101
+ mflops = flops.total() / 1e6
102
+
103
+ return str(cfg), cfg.model.name, gflops, mflops
104
+ except Exception as e:
105
+ console.print(f"[red] Error processing {exp_dir}: {e}[/red]")
106
+ return str(cfg), cfg.model.name, -1, -1
107
+
108
+
109
+ # ---------------------------------------------------------------------
110
+ # Main Entry
111
+ # ---------------------------------------------------------------------
112
+ def main():
113
+ args = parse_args()
114
+
115
+ # Case 1: Direct TIMM model input
116
+ if args.model_name:
117
+ _calculate_flops_for_model(args.model_name, args.num_classes)
118
+ return
119
+
120
+ # Case 2: Experiment directory input
121
+ if args.indir is None:
122
+ print("[Error] Either --model_name or --indir must be specified.")
123
+ return
124
+
125
+ proc_dirs = _get_list_of_proc_dirs(args.indir)
126
+ pprint(proc_dirs)
127
+
128
+ dfmk = csvfile.DFCreator()
129
+ TABLE_NAME = "model_flops_results"
130
+ dfmk.create_table(TABLE_NAME, ["exp_name", "model_name", "gflops", "mflops"])
131
+
132
+ console.rule(f"Calculating FLOPs for models in {len(proc_dirs)} dir(s)...")
133
+ rows = []
134
+ for exp_dir in tqdm(proc_dirs):
135
+ dir_name = os.path.basename(exp_dir)
136
+ console.rule(f"{dir_name}")
137
+ exp_name, model_name, gflops, mflops = _calculate_flops_for_experiment(exp_dir)
138
+ rows.append([exp_name, model_name, gflops, mflops])
139
+
140
+ dfmk.insert_rows(TABLE_NAME, rows)
141
+ dfmk.fill_table_from_row_pool(TABLE_NAME)
142
+
143
+ outfile = f"zout/zreport/{now_str()}_model_flops_results.csv"
144
+ dfmk[TABLE_NAME].to_csv(outfile, sep=";", index=False)
145
+ csvfile.fn_display_df(dfmk[TABLE_NAME])
146
+
147
+ if args.o:
148
+ os.system(f"start {outfile}")
149
+
150
+
151
+ # ---------------------------------------------------------------------
152
+ # Script Entry
153
+ # ---------------------------------------------------------------------
154
+ # flop_csv.py
155
+ # if __name__ == "__main__":
156
+ # sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
157
+ # main()
158
+
159
+
160
+ # def main():
161
+ # csv_file = "./results-imagenet.csv"
162
+ # df = pd.read_csv(csv_file)
163
+ # # make param_count column as float
164
+ # # df['param_count'] = df['param_count'].astype(float)
165
+ # df["param_count"] = (
166
+ # pd.to_numeric(df["param_count"], errors="coerce").fillna(99999).astype(float)
167
+ # )
168
+ # df = df[df["param_count"] < 5.0] # filter models with param_count < 20M
169
+
170
+ # dict_ls = []
171
+
172
+ # for index, row in tqdm(df.iterrows()):
173
+ # console.rule(f"Row {index+1}/{len(df)}")
174
+ # model = row["model"]
175
+ # num_class = 2
176
+ # _, _, mflops = _calculate_flops_for_model(model, num_class)
177
+ # dict_ls.append(
178
+ # {"model": model, "param_count": row["param_count"], "mflops": mflops}
179
+ # )
180
+
181
+ # # Create a DataFrame from the list of dictionaries
182
+ # result_df = pd.DataFrame(dict_ls)
183
+
184
+ # final_df = pd.merge(df, result_df, on=["model", "param_count"])
185
+ # final_df.sort_values(by="mflops", inplace=True, ascending=True)
186
+ # csvfile.fn_display_df(final_df)
187
+
188
+
189
+ # if __name__ == "__main__":
190
+ # main()
@@ -0,0 +1,58 @@
1
+ # install `pynvml_utils` package first
2
+ # see this repo: https://github.com/gpuopenanalytics/pynvml
3
+ from pynvml_utils import nvidia_smi
4
+ import time
5
+ import threading
6
+ from rich.pretty import pprint
7
+
8
+ class GPUMonitor:
9
+ def __init__(self, gpu_index=0, interval=0.01):
10
+ self.nvsmi = nvidia_smi.getInstance()
11
+ self.gpu_index = gpu_index
12
+ self.interval = interval
13
+ self.gpu_stats = []
14
+ self._running = False
15
+ self._thread = None
16
+
17
+ def _monitor(self):
18
+ while self._running:
19
+ stats = self.nvsmi.DeviceQuery("power.draw, memory.used")["gpu"][
20
+ self.gpu_index
21
+ ]
22
+ # pprint(stats)
23
+ self.gpu_stats.append(
24
+ {
25
+ "power": stats["power_readings"]["power_draw"],
26
+ "power_unit": stats["power_readings"]["unit"],
27
+ "memory": stats["fb_memory_usage"]["used"],
28
+ "memory_unit": stats["fb_memory_usage"]["unit"],
29
+ }
30
+ )
31
+ time.sleep(self.interval)
32
+
33
+ def start(self):
34
+ if not self._running:
35
+ self._running = True
36
+ # clear previous stats
37
+ self.gpu_stats.clear()
38
+ self._thread = threading.Thread(target=self._monitor)
39
+ self._thread.start()
40
+
41
+ def stop(self):
42
+ if self._running:
43
+ self._running = False
44
+ self._thread.join()
45
+ # clear the thread reference
46
+ self._thread = None
47
+
48
+ def get_stats(self):
49
+ ## return self.gpu_stats
50
+ assert self._running is False, "GPU monitor is still running. Stop it first."
51
+
52
+ powers = [s["power"] for s in self.gpu_stats if s["power"] is not None]
53
+ memories = [s["memory"] for s in self.gpu_stats if s["memory"] is not None]
54
+ avg_power = sum(powers) / len(powers) if powers else 0
55
+ max_memory = max(memories) if memories else 0
56
+ # power_unit = self.gpu_stats[0]["power_unit"] if self.gpu_stats else "W"
57
+ # memory_unit = self.gpu_stats[0]["memory_unit"] if self.gpu_stats else "MiB"
58
+ return {"gpu_avg_power": avg_power, "gpu_avg_max_memory": max_memory}