halib 0.2.1__tar.gz → 0.2.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. {halib-0.2.1 → halib-0.2.2}/.gitignore +0 -1
  2. {halib-0.2.1 → halib-0.2.2}/PKG-INFO +4 -1
  3. {halib-0.2.1 → halib-0.2.2}/README.md +3 -0
  4. {halib-0.2.1 → halib-0.2.2}/halib/__init__.py +3 -3
  5. {halib-0.2.1/halib → halib-0.2.2/halib/common}/common.py +32 -5
  6. {halib-0.2.1 → halib-0.2.2}/halib/filetype/csvfile.py +3 -9
  7. {halib-0.2.1 → halib-0.2.2}/halib/filetype/ipynb.py +3 -5
  8. {halib-0.2.1 → halib-0.2.2}/halib/filetype/jsonfile.py +0 -3
  9. {halib-0.2.1 → halib-0.2.2}/halib/filetype/textfile.py +0 -1
  10. {halib-0.2.1 → halib-0.2.2}/halib/filetype/videofile.py +91 -2
  11. {halib-0.2.1 → halib-0.2.2}/halib/filetype/yamlfile.py +3 -3
  12. {halib-0.2.1 → halib-0.2.2}/halib/online/projectmake.py +7 -6
  13. {halib-0.2.1/halib/utils → halib-0.2.2/halib/online}/tele_noti.py +1 -2
  14. {halib-0.2.1/halib/research → halib-0.2.2/halib/research/core}/base_config.py +44 -0
  15. {halib-0.2.1/halib/research → halib-0.2.2/halib/research/core}/base_exp.py +3 -3
  16. halib-0.2.1/halib/research/params_gen.py → halib-0.2.2/halib/research/core/param_gen.py +6 -6
  17. {halib-0.2.1/halib/research → halib-0.2.2/halib/research/core}/wandb_op.py +5 -4
  18. halib-0.2.2/halib/research/data/__init__.py +0 -0
  19. {halib-0.2.1/halib/utils → halib-0.2.2/halib/research/data}/dataclass_util.py +3 -2
  20. {halib-0.2.1/halib/research → halib-0.2.2/halib/research/data}/dataset.py +2 -2
  21. {halib-0.2.1/halib/research → halib-0.2.2/halib/research/data}/torchloader.py +12 -9
  22. halib-0.2.2/halib/research/perf/__init__.py +0 -0
  23. halib-0.2.1/halib/research/flops.py → halib-0.2.2/halib/research/perf/flop_calc.py +37 -3
  24. {halib-0.2.1/halib/research → halib-0.2.2/halib/research/perf}/perfcalc.py +6 -4
  25. {halib-0.2.1/halib/research → halib-0.2.2/halib/research/perf}/perftb.py +4 -6
  26. {halib-0.2.1/halib/research → halib-0.2.2/halib/research/perf}/profiler.py +3 -2
  27. halib-0.2.2/halib/research/viz/__init__.py +0 -0
  28. {halib-0.2.1/halib/research → halib-0.2.2/halib/research/viz}/plot.py +3 -7
  29. halib-0.2.2/halib/system/__init__.py +0 -0
  30. halib-0.2.2/halib/system/filesys.py +164 -0
  31. halib-0.2.1/halib/research/mics.py → halib-0.2.2/halib/system/path.py +1 -2
  32. halib-0.2.2/halib/utils/__init__.py +0 -0
  33. halib-0.2.1/halib/utils/listop.py → halib-0.2.2/halib/utils/list.py +0 -1
  34. {halib-0.2.1 → halib-0.2.2}/halib.egg-info/PKG-INFO +4 -1
  35. halib-0.2.2/halib.egg-info/SOURCES.txt +53 -0
  36. {halib-0.2.1 → halib-0.2.2}/setup.py +1 -1
  37. halib-0.2.1/halib/cuda.py +0 -39
  38. halib-0.2.1/halib/online/gdrive_test.py +0 -50
  39. halib-0.2.1/halib/research/flop_csv.py +0 -34
  40. halib-0.2.1/halib/system/filesys.py +0 -124
  41. halib-0.2.1/halib/utils/video.py +0 -82
  42. halib-0.2.1/halib.egg-info/SOURCES.txt +0 -52
  43. {halib-0.2.1 → halib-0.2.2}/GDriveFolder.txt +0 -0
  44. {halib-0.2.1 → halib-0.2.2}/LICENSE.txt +0 -0
  45. {halib-0.2.1 → halib-0.2.2}/MANIFEST.in +0 -0
  46. {halib-0.2.1/halib/filetype → halib-0.2.2/halib/common}/__init__.py +0 -0
  47. {halib-0.2.1/halib → halib-0.2.2/halib/common}/rich_color.py +0 -0
  48. {halib-0.2.1/halib/online → halib-0.2.2/halib/filetype}/__init__.py +0 -0
  49. {halib-0.2.1/halib/research → halib-0.2.2/halib/online}/__init__.py +0 -0
  50. {halib-0.2.1 → halib-0.2.2}/halib/online/gdrive.py +0 -0
  51. {halib-0.2.1 → halib-0.2.2}/halib/online/gdrive_mkdir.py +0 -0
  52. {halib-0.2.1/halib/system → halib-0.2.2/halib/research}/__init__.py +0 -0
  53. {halib-0.2.1/halib/utils → halib-0.2.2/halib/research/core}/__init__.py +0 -0
  54. {halib-0.2.1/halib/utils → halib-0.2.2/halib/research/perf}/gpu_mon.py +0 -0
  55. /halib-0.2.1/halib/research/metrics.py → /halib-0.2.2/halib/research/perf/perfmetrics.py +0 -0
  56. {halib-0.2.1 → halib-0.2.2}/halib/system/cmd.py +0 -0
  57. /halib-0.2.1/halib/utils/dict_op.py → /halib-0.2.2/halib/utils/dict.py +0 -0
  58. {halib-0.2.1 → halib-0.2.2}/halib.egg-info/dependency_links.txt +0 -0
  59. {halib-0.2.1 → halib-0.2.2}/halib.egg-info/requires.txt +0 -0
  60. {halib-0.2.1 → halib-0.2.2}/halib.egg-info/top_level.txt +0 -0
  61. {halib-0.2.1 → halib-0.2.2}/setup.cfg +0 -0
@@ -50,7 +50,6 @@ Thumbs.db
50
50
 
51
51
  build
52
52
  dist
53
- data
54
53
 
55
54
  venv*/
56
55
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: halib
3
- Version: 0.2.1
3
+ Version: 0.2.2
4
4
  Summary: Small library for common tasks
5
5
  Author: Hoang Van Ha
6
6
  Author-email: hoangvanhauit@gmail.com
@@ -53,6 +53,9 @@ Dynamic: summary
53
53
 
54
54
  # Helper package for coding and automation
55
55
 
56
+ **Version 0.2.2**
57
+ + reorganize packages with most changes in `research` package
58
+
56
59
  **Version 0.2.01**
57
60
  + `research/base_exp`: add `eval_exp` method to evaluate experiment (e.g., model evaluation on test set) after experiment running is done.
58
61
 
@@ -1,5 +1,8 @@
1
1
  # Helper package for coding and automation
2
2
 
3
+ **Version 0.2.2**
4
+ + reorganize packages with most changes in `research` package
5
+
3
6
  **Version 0.2.01**
4
7
  + `research/base_exp`: add `eval_exp` method to evaluate experiment (e.g., model evaluation on test set) after experiment running is done.
5
8
 
@@ -56,8 +56,7 @@ from .filetype.yamlfile import load_yaml
56
56
  from .system import cmd
57
57
  from .system import filesys as fs
58
58
  from .filetype import csvfile
59
- from .cuda import tcuda
60
- from .common import (
59
+ from .common.common import (
61
60
  console,
62
61
  console_log,
63
62
  ConsoleLog,
@@ -65,6 +64,7 @@ from .common import (
65
64
  norm_str,
66
65
  pprint_box,
67
66
  pprint_local_path,
67
+ tcuda
68
68
  )
69
69
 
70
70
  # for log
@@ -76,7 +76,7 @@ from timebudget import timebudget
76
76
  import omegaconf
77
77
  from omegaconf import OmegaConf
78
78
  from omegaconf.dictconfig import DictConfig
79
- from .rich_color import rcolor_str, rcolor_palette, rcolor_palette_all, rcolor_all_str
79
+ from .common.rich_color import rcolor_str, rcolor_palette, rcolor_palette_all, rcolor_all_str
80
80
 
81
81
  # for visualization
82
82
  import seaborn as sns
@@ -1,20 +1,20 @@
1
1
  import os
2
2
  import re
3
- import rich
4
3
  import arrow
5
- import pathlib
6
- from pathlib import Path
7
- import urllib.parse
4
+ import importlib
8
5
 
6
+ import rich
9
7
  from rich import print
10
8
  from rich.panel import Panel
11
9
  from rich.console import Console
12
10
  from rich.pretty import pprint, Pretty
13
- from pathlib import PureWindowsPath
11
+
12
+ from pathlib import Path, PureWindowsPath
14
13
 
15
14
 
16
15
  console = Console()
17
16
 
17
+
18
18
  def seed_everything(seed=42):
19
19
  import random
20
20
  import numpy as np
@@ -61,6 +61,7 @@ def pprint_box(obj, title="", border_style="green"):
61
61
  Panel(Pretty(obj, expand_all=True), title=title, border_style=border_style)
62
62
  )
63
63
 
64
+
64
65
  def console_rule(msg, do_norm_msg=True, is_end_tag=False):
65
66
  msg = norm_str(msg) if do_norm_msg else msg
66
67
  if is_end_tag:
@@ -149,3 +150,29 @@ def pprint_local_path(
149
150
  console.print(content_str)
150
151
 
151
152
  return file_uri
153
+
154
+
155
+ def tcuda():
156
+ NOT_INSTALLED = "Not Installed"
157
+ GPU_AVAILABLE = "GPU(s) Available"
158
+ ls_lib = ["torch", "tensorflow"]
159
+ lib_stats = {lib: NOT_INSTALLED for lib in ls_lib}
160
+ for lib in ls_lib:
161
+ spec = importlib.util.find_spec(lib)
162
+ if spec:
163
+ if lib == "torch":
164
+ import torch
165
+
166
+ lib_stats[lib] = str(torch.cuda.device_count()) + " " + GPU_AVAILABLE
167
+ elif lib == "tensorflow":
168
+ import tensorflow as tf
169
+
170
+ lib_stats[lib] = (
171
+ str(len(tf.config.list_physical_devices("GPU")))
172
+ + " "
173
+ + GPU_AVAILABLE
174
+ )
175
+ console.rule("<CUDA Library Stats>")
176
+ pprint(lib_stats)
177
+ console.rule("</CUDA Library Stats>")
178
+ return lib_stats
@@ -1,19 +1,13 @@
1
+ import csv
2
+ import textwrap
1
3
  import pandas as pd
4
+ import pygwalker as pyg
2
5
  from tabulate import tabulate
3
6
  from rich.console import Console
4
- from rich import print as rprint
5
- from rich import inspect
6
- from rich.pretty import pprint
7
- from tqdm import tqdm
8
- from loguru import logger
9
7
  from itables import init_notebook_mode, show
10
- import pygwalker as pyg
11
- import textwrap
12
- import csv
13
8
 
14
9
  console = Console()
15
10
 
16
-
17
11
  def read(file, separator=","):
18
12
  df = pd.read_csv(file, separator)
19
13
  return df
@@ -1,10 +1,8 @@
1
- from contextlib import contextmanager
2
- from pathlib import Path
3
-
4
1
  import ipynbname
2
+ from pathlib import Path
3
+ from contextlib import contextmanager
5
4
 
6
- from ..common import console, now_str
7
-
5
+ from ..common.common import now_str
8
6
 
9
7
  @contextmanager
10
8
  def gen_ipynb_name(
@@ -1,17 +1,14 @@
1
1
  import json
2
2
 
3
-
4
3
  def read(file):
5
4
  with open(file) as f:
6
5
  data = json.load(f)
7
6
  return data
8
7
 
9
-
10
8
  def write(data_dict, outfile):
11
9
  with open(outfile, "w") as json_file:
12
10
  json.dump(data_dict, json_file)
13
11
 
14
-
15
12
  def beautify(json_str):
16
13
  formatted_json = json_str
17
14
  try:
@@ -4,7 +4,6 @@ def read_line_by_line(file_path):
4
4
  lines = [line.rstrip() for line in lines]
5
5
  return lines
6
6
 
7
-
8
7
  def write(lines, outfile, append=False):
9
8
  mode = "a" if append else "w"
10
9
  with open(outfile, mode, encoding="utf-8") as f:
@@ -1,11 +1,100 @@
1
+ import os
1
2
  import cv2
2
- import textfile
3
3
  import enlighten
4
+
4
5
  from enum import Enum
5
- from ..system import filesys
6
6
  from tube_dl import Youtube, Playlist
7
7
  from moviepy.video.io.ffmpeg_tools import ffmpeg_extract_subclip
8
8
 
9
+ from . import textfile
10
+ from . import csvfile
11
+ from ..system import filesys
12
+
13
+ class VideoUtils:
14
+ @staticmethod
15
+ def _default_meta_extractor(video_path):
16
+ """Default video metadata extractor function."""
17
+ # Open the video file
18
+ cap = cv2.VideoCapture(video_path)
19
+
20
+ # Check if the video was opened successfully
21
+ if not cap.isOpened():
22
+ print(f"Error: Could not open video file {video_path}")
23
+ return None
24
+
25
+ # Get the frame count
26
+ frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
27
+
28
+ # Get the FPS
29
+ fps = cap.get(cv2.CAP_PROP_FPS)
30
+
31
+ # get frame size
32
+ width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
33
+ height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
34
+
35
+ # Release the video capture object
36
+ cap.release()
37
+
38
+ meta_dict = {
39
+ "video_path": video_path,
40
+ "width": width,
41
+ "height": height,
42
+ "frame_count": frame_count,
43
+ "fps": fps,
44
+ }
45
+ return meta_dict
46
+
47
+ @staticmethod
48
+ def get_video_meta_dict(video_path, meta_dict_extractor_func=None):
49
+ assert os.path.exists(video_path), f"Video file {video_path} does not exist"
50
+ if meta_dict_extractor_func and callable(meta_dict_extractor_func):
51
+ assert (
52
+ meta_dict_extractor_func.__code__.co_argcount == 1
53
+ ), "meta_dict_extractor_func must take exactly one argument (video_path)"
54
+ meta_dict = meta_dict_extractor_func(video_path)
55
+ assert isinstance(
56
+ meta_dict, dict
57
+ ), "meta_dict_extractor_func must return a dictionary"
58
+ assert "video_path" in meta_dict, "meta_dict must contain 'video_path'"
59
+ else:
60
+ meta_dict = VideoUtils._default_meta_extractor(video_path=video_path)
61
+ return meta_dict
62
+
63
+ @staticmethod
64
+ def get_video_dir_meta_df(
65
+ video_dir,
66
+ video_exts=[".mp4", ".avi", ".mov", ".mkv"],
67
+ search_recursive=False,
68
+ csv_outfile=None,
69
+ ):
70
+ assert os.path.exists(video_dir), f"Video directory {video_dir} does not exist"
71
+ video_files = filesys.filter_files_by_extension(
72
+ video_dir, video_exts, recursive=search_recursive
73
+ )
74
+ assert (
75
+ len(video_files) > 0
76
+ ), f"No video files found in {video_dir} with extensions {video_exts}"
77
+ video_meta_list = []
78
+ for vfile in video_files:
79
+ meta_dict = VideoUtils.get_video_meta_dict(vfile)
80
+ if meta_dict:
81
+ video_meta_list.append(meta_dict)
82
+ dfmk = csvfile.DFCreator()
83
+ columns = list(video_meta_list[0].keys())
84
+ assert len(columns) > 0, "No video metadata found"
85
+ assert "video_path" in columns, "video_path column not found in video metadata"
86
+ # move video_path to the first column
87
+ columns.remove("video_path")
88
+ columns.insert(0, "video_path")
89
+ dfmk.create_table("video_meta", columns)
90
+ rows = [[meta[col] for col in columns] for meta in video_meta_list]
91
+ dfmk.insert_rows("video_meta", rows)
92
+ dfmk.fill_table_from_row_pool("video_meta")
93
+
94
+ if csv_outfile:
95
+ dfmk["video_meta"].to_csv(csv_outfile, index=False, sep=";")
96
+ return dfmk["video_meta"].copy()
97
+
9
98
 
10
99
  class VideoResolution(Enum):
11
100
  VR480p = "720x480"
@@ -2,15 +2,15 @@ import time
2
2
  import networkx as nx
3
3
  from rich import inspect
4
4
  from rich.pretty import pprint
5
- from omegaconf import OmegaConf
6
5
  from rich.console import Console
6
+
7
+ from omegaconf import OmegaConf
7
8
  from argparse import ArgumentParser
8
9
 
9
- from ..research.mics import *
10
+ from ..system.path import *
10
11
 
11
12
  console = Console()
12
13
 
13
-
14
14
  def _load_yaml_recursively(
15
15
  yaml_file, yaml_files=[], share_nx_graph=nx.DiGraph(), log_info=False
16
16
  ):
@@ -1,17 +1,18 @@
1
1
  # coding=utf-8
2
- import json
2
+
3
3
  import os
4
+ import json
5
+ import pycurl
4
6
  import shutil
5
- from argparse import ArgumentParser
6
- from io import BytesIO
7
+ import certifi
7
8
  import subprocess
9
+ from io import BytesIO
10
+
11
+ from argparse import ArgumentParser
8
12
 
9
- import certifi
10
- import pycurl
11
13
  from ..filetype import jsonfile
12
14
  from ..system import filesys
13
15
 
14
-
15
16
  def get_curl(url, user_and_pass, verbose=True):
16
17
  c = pycurl.Curl()
17
18
  c.setopt(pycurl.VERBOSE, verbose)
@@ -25,12 +25,11 @@ def parse_args():
25
25
  "--cfg",
26
26
  type=str,
27
27
  help="yaml file for tele",
28
- default=r"E:\Dev\halib\cfg_tele_noti.yaml",
28
+ default=r"E:\Dev\__halib\halib\online\tele_noti_cfg.yaml",
29
29
  )
30
30
 
31
31
  return parser.parse_args()
32
32
 
33
-
34
33
  def get_watcher_message_df(target_file, num_last_lines):
35
34
  file_ext = fs.get_file_name(target_file, split_file_ext=True)[1]
36
35
  supported_ext = [".txt", ".log", ".csv"]
@@ -1,6 +1,10 @@
1
1
  import os
2
2
  from rich.pretty import pprint
3
3
  from abc import ABC, abstractmethod
4
+ from typing import List, Optional, TypeVar, Generic
5
+
6
+ from abc import ABC, abstractmethod
7
+ from dataclasses import dataclass
4
8
  from dataclass_wizard import YAMLWizard
5
9
 
6
10
 
@@ -19,6 +23,46 @@ class NamedConfig(ABC):
19
23
  pass
20
24
 
21
25
 
26
+ @dataclass
27
+ class AutoNamedConfig(YAMLWizard, NamedConfig):
28
+ """
29
+ Mixin that automatically implements get_name() by returning self.name.
30
+ Classes using this MUST have a 'name' field.
31
+ """
32
+
33
+ name: Optional[str] = None
34
+
35
+ def get_name(self):
36
+ return self.name
37
+
38
+ def __post_init__(self):
39
+ # Enforce the "MUST" rule here
40
+ if self.name is None:
41
+ # We allow None during initial load, but it must be set before usage
42
+ # or handled by the loader.
43
+ pass
44
+
45
+ T = TypeVar("T", bound=AutoNamedConfig)
46
+
47
+ class BaseSelectorConfig(Generic[T]):
48
+ """
49
+ Base class to handle the logic of selecting an item from a list by name.
50
+ """
51
+
52
+ def _resolve_selection(self, items: List[T], selected_name: str, context: str) -> T:
53
+ if selected_name is None:
54
+ raise ValueError(f"No {context} selected in the configuration.")
55
+
56
+ # Create a lookup dict for O(1) access, or just iterate if list is short
57
+ for item in items:
58
+ if item.name == selected_name:
59
+ return item
60
+
61
+ raise ValueError(
62
+ f"{context.capitalize()} '{selected_name}' not found in the configuration list."
63
+ )
64
+
65
+
22
66
  class ExpBaseConfig(ABC, YAMLWizard):
23
67
  """
24
68
  Base class for configuration objects.
@@ -1,8 +1,8 @@
1
1
  from abc import ABC, abstractmethod
2
2
  from typing import Tuple, Any, Optional
3
- from ..research.base_config import ExpBaseConfig
4
- from ..research.perfcalc import PerfCalc
5
- from ..research.metrics import MetricsBackend
3
+ from base_config import ExpBaseConfig
4
+ from ..perf.perfcalc import PerfCalc
5
+ from ..perf.perfmetrics import MetricsBackend
6
6
 
7
7
  # ! SEE https://github.com/hahv/base_exp for sample usage
8
8
  class BaseExperiment(PerfCalc, ABC):
@@ -1,12 +1,12 @@
1
- from typing import Dict, Any, List
2
- import numpy as np
3
- from ..common import *
4
- from ..filetype import yamlfile
5
- import yaml
6
1
  import os
2
+ import yaml
3
+ import numpy as np
4
+ from typing import Dict, Any, List
7
5
 
8
- class ParamGen:
6
+ from ...common.common import *
7
+ from ...filetype import yamlfile
9
8
 
9
+ class ParamGen:
10
10
  @staticmethod
11
11
  def build_from_file(params_file):
12
12
  builder = ParamGen(params_file)
@@ -1,11 +1,12 @@
1
- import glob
2
- from rich.pretty import pprint
3
1
  import os
4
- import subprocess
5
- import argparse
2
+ import glob
6
3
  import wandb
4
+ import argparse
5
+ import subprocess
6
+
7
7
  from tqdm import tqdm
8
8
  from rich.console import Console
9
+
9
10
  console = Console()
10
11
 
11
12
  def sync_runs(outdir):
File without changes
@@ -1,10 +1,11 @@
1
1
  import yaml
2
2
  from typing import Any
3
+
3
4
  from rich.pretty import pprint
4
- from ..filetype import yamlfile
5
- # from halib.filetype import yamlfile
6
5
  from dataclasses import make_dataclass
7
6
 
7
+ from ...filetype import yamlfile
8
+
8
9
  def dict_to_dataclass(name: str, data: dict):
9
10
  fields = []
10
11
  values = {}
@@ -13,8 +13,8 @@ from rich.pretty import pprint
13
13
  from torchvision.datasets import ImageFolder
14
14
  from sklearn.model_selection import StratifiedShuffleSplit, ShuffleSplit
15
15
 
16
- from ..common import console, seed_everything, ConsoleLog
17
- from ..system import filesys as fs
16
+ from ...common.common import console, seed_everything, ConsoleLog
17
+ from ...system import filesys as fs
18
18
 
19
19
  def parse_args():
20
20
  parser = ArgumentParser(description="desc text")
@@ -6,19 +6,22 @@
6
6
  * @desc this module works as a utility tools for finding the best configuration for dataloader (num_workers, batch_size, pin_menory, etc.) that fits your hardware.
7
7
  """
8
8
  from argparse import ArgumentParser
9
- from ..common import *
10
- from ..filetype import csvfile
11
- from ..filetype.yamlfile import load_yaml
12
- from rich import inspect
13
- from torch.utils.data import DataLoader
14
- from torchvision import datasets, transforms
15
- from tqdm import tqdm
16
- from typing import Union
17
- import itertools as it # for cartesian product
9
+
18
10
  import os
19
11
  import time
20
12
  import traceback
21
13
 
14
+ from tqdm import tqdm
15
+ from rich import inspect
16
+ from typing import Union
17
+ import itertools as it # for cartesian product
18
+
19
+ from torch.utils.data import DataLoader
20
+ from torchvision import datasets, transforms
21
+
22
+ from ...common.common import *
23
+ from ...filetype import csvfile
24
+ from ...filetype.yamlfile import load_yaml
22
25
 
23
26
  def parse_args():
24
27
  parser = ArgumentParser(description="desc text")
File without changes
@@ -151,6 +151,40 @@ def main():
151
151
  # ---------------------------------------------------------------------
152
152
  # Script Entry
153
153
  # ---------------------------------------------------------------------
154
- if __name__ == "__main__":
155
- sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
156
- main()
154
+ # flop_csv.py
155
+ # if __name__ == "__main__":
156
+ # sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
157
+ # main()
158
+
159
+
160
+ # def main():
161
+ # csv_file = "./results-imagenet.csv"
162
+ # df = pd.read_csv(csv_file)
163
+ # # make param_count column as float
164
+ # # df['param_count'] = df['param_count'].astype(float)
165
+ # df["param_count"] = (
166
+ # pd.to_numeric(df["param_count"], errors="coerce").fillna(99999).astype(float)
167
+ # )
168
+ # df = df[df["param_count"] < 5.0] # filter models with param_count < 20M
169
+
170
+ # dict_ls = []
171
+
172
+ # for index, row in tqdm(df.iterrows()):
173
+ # console.rule(f"Row {index+1}/{len(df)}")
174
+ # model = row["model"]
175
+ # num_class = 2
176
+ # _, _, mflops = _calculate_flops_for_model(model, num_class)
177
+ # dict_ls.append(
178
+ # {"model": model, "param_count": row["param_count"], "mflops": mflops}
179
+ # )
180
+
181
+ # # Create a DataFrame from the list of dictionaries
182
+ # result_df = pd.DataFrame(dict_ls)
183
+
184
+ # final_df = pd.merge(df, result_df, on=["model", "param_count"])
185
+ # final_df.sort_values(by="mflops", inplace=True, ascending=True)
186
+ # csvfile.fn_display_df(final_df)
187
+
188
+
189
+ # if __name__ == "__main__":
190
+ # main()
@@ -6,10 +6,12 @@ import pandas as pd
6
6
  from abc import ABC, abstractmethod
7
7
  from collections import OrderedDict
8
8
 
9
- from ..system import filesys as fs
10
- from ..common import now_str
11
- from ..research.perftb import PerfTB
12
- from ..research.metrics import *
9
+
10
+ from ...common.common import now_str
11
+ from ...system import filesys as fs
12
+
13
+ from .perftb import PerfTB
14
+ from .perfmetrics import *
13
15
 
14
16
 
15
17
  REQUIRED_COLS = ["experiment", "dataset"]
@@ -1,5 +1,6 @@
1
1
  import warnings
2
2
  warnings.filterwarnings("ignore", category=DeprecationWarning)
3
+
3
4
  import os
4
5
  import random
5
6
  import itertools
@@ -9,14 +10,11 @@ from collections import defaultdict
9
10
  from plotly.subplots import make_subplots
10
11
  from typing import Dict, List, Union, Optional
11
12
 
12
- from rich.pretty import pprint
13
13
  import pandas as pd
14
+ from rich.pretty import pprint
14
15
 
15
- # from halib import *
16
- # internal imports
17
- from ..filetype import csvfile
18
- from ..common import ConsoleLog
19
-
16
+ from ...filetype import csvfile
17
+ from ...common.common import ConsoleLog
20
18
 
21
19
  class DatasetMetrics:
22
20
  """Class to store metrics definitions for a specific dataset."""
@@ -5,13 +5,14 @@ import json
5
5
  from pathlib import Path
6
6
  from pprint import pprint
7
7
  from threading import Lock
8
+ from loguru import logger
8
9
 
9
10
  from plotly.subplots import make_subplots
10
11
  import plotly.graph_objects as go
11
12
  import plotly.express as px # for dynamic color scales
12
- from ..common import ConsoleLog
13
13
 
14
- from loguru import logger
14
+ from ...common.common import ConsoleLog
15
+
15
16
 
16
17
  class zProfiler:
17
18
  """A singleton profiler to measure execution time of contexts and steps.
File without changes
@@ -5,24 +5,20 @@ import time
5
5
  import click
6
6
  import base64
7
7
  import pandas as pd
8
-
9
8
  from PIL import Image
10
9
  from io import BytesIO
11
-
12
10
  import plotly.express as px
13
- from ..common import now_str
14
- from ..filetype import csvfile
15
11
  import plotly.graph_objects as go
16
- from ..system import filesys as fs
17
-
18
12
  from rich.console import Console
19
13
  from typing import Callable, Optional, Tuple, List, Union
20
14
 
15
+ from ...common.common import now_str
16
+ from ...filetype import csvfile
17
+ from ...system import filesys as fs
21
18
 
22
19
  console = Console()
23
20
  desktop_path = os.path.expanduser("~/Desktop")
24
21
 
25
-
26
22
  class PlotHelper:
27
23
  def _verify_csv(self, csv_file):
28
24
  """Read a CSV and normalize column names (lowercase)."""
File without changes