halib 0.1.47__tar.gz → 0.1.49__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {halib-0.1.47/halib.egg-info → halib-0.1.49}/PKG-INFO +35 -5
- {halib-0.1.47 → halib-0.1.49}/README.md +5 -0
- halib-0.1.49/halib/research/benchquery.py +131 -0
- {halib-0.1.47/halib → halib-0.1.49/halib/research}/plot.py +3 -3
- {halib-0.1.47/halib → halib-0.1.49/halib/research}/torchloader.py +3 -3
- halib-0.1.49/halib/research/wandb_op.py +116 -0
- halib-0.1.49/halib/system/__init__.py +0 -0
- halib-0.1.49/halib/utils/__init__.py +0 -0
- {halib-0.1.47/halib → halib-0.1.49/halib/utils}/tele_noti.py +2 -2
- {halib-0.1.47 → halib-0.1.49/halib.egg-info}/PKG-INFO +35 -5
- {halib-0.1.47 → halib-0.1.49}/halib.egg-info/SOURCES.txt +9 -5
- {halib-0.1.47 → halib-0.1.49}/halib.egg-info/requires.txt +1 -0
- {halib-0.1.47 → halib-0.1.49}/setup.py +1 -1
- {halib-0.1.47 → halib-0.1.49}/.gitignore +0 -0
- {halib-0.1.47 → halib-0.1.49}/GDriveFolder.txt +0 -0
- {halib-0.1.47 → halib-0.1.49}/LICENSE.txt +0 -0
- {halib-0.1.47 → halib-0.1.49}/guide_publish_pip.pdf +0 -0
- {halib-0.1.47 → halib-0.1.49}/halib/__init__.py +0 -0
- {halib-0.1.47 → halib-0.1.49}/halib/common.py +0 -0
- {halib-0.1.47 → halib-0.1.49}/halib/cuda.py +0 -0
- {halib-0.1.47 → halib-0.1.49}/halib/filetype/__init__.py +0 -0
- {halib-0.1.47 → halib-0.1.49}/halib/filetype/csvfile.py +0 -0
- {halib-0.1.47 → halib-0.1.49}/halib/filetype/jsonfile.py +0 -0
- {halib-0.1.47 → halib-0.1.49}/halib/filetype/textfile.py +0 -0
- {halib-0.1.47 → halib-0.1.49}/halib/filetype/videofile.py +0 -0
- {halib-0.1.47 → halib-0.1.49}/halib/filetype/yamlfile.py +0 -0
- {halib-0.1.47 → halib-0.1.49}/halib/online/__init__.py +0 -0
- {halib-0.1.47 → halib-0.1.49}/halib/online/gdrive.py +0 -0
- {halib-0.1.47 → halib-0.1.49}/halib/online/gdrive_mkdir.py +0 -0
- {halib-0.1.47 → halib-0.1.49}/halib/online/gdrive_test.py +0 -0
- {halib-0.1.47 → halib-0.1.49}/halib/online/projectmake.py +0 -0
- {halib-0.1.47/halib/system → halib-0.1.49/halib/research}/__init__.py +0 -0
- {halib-0.1.47/halib → halib-0.1.49/halib/research}/dataset.py +0 -0
- {halib-0.1.47 → halib-0.1.49}/halib/rich_color.py +0 -0
- {halib-0.1.47 → halib-0.1.49}/halib/system/cmd.py +0 -0
- {halib-0.1.47 → halib-0.1.49}/halib/system/filesys.py +0 -0
- {halib-0.1.47/halib → halib-0.1.49/halib/utils}/listop.py +0 -0
- {halib-0.1.47 → halib-0.1.49}/halib.egg-info/dependency_links.txt +0 -0
- {halib-0.1.47 → halib-0.1.49}/halib.egg-info/top_level.txt +0 -0
- {halib-0.1.47 → halib-0.1.49}/setup.cfg +0 -0
- {halib-0.1.47 → halib-0.1.49}/test/test15.py +0 -0
- {halib-0.1.47 → halib-0.1.49}/test/test_df_creator.py +0 -0
@@ -1,20 +1,52 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: halib
|
3
|
-
Version: 0.1.
|
3
|
+
Version: 0.1.49
|
4
4
|
Summary: Small library for common tasks
|
5
5
|
Author: Hoang Van Ha
|
6
6
|
Author-email: hoangvanhauit@gmail.com
|
7
|
-
License: UNKNOWN
|
8
|
-
Platform: UNKNOWN
|
9
7
|
Classifier: Programming Language :: Python :: 3
|
10
8
|
Classifier: License :: OSI Approved :: MIT License
|
11
9
|
Classifier: Operating System :: OS Independent
|
12
10
|
Requires-Python: >=3.8
|
13
11
|
Description-Content-Type: text/markdown
|
14
12
|
License-File: LICENSE.txt
|
13
|
+
Requires-Dist: arrow
|
14
|
+
Requires-Dist: click
|
15
|
+
Requires-Dist: enlighten
|
16
|
+
Requires-Dist: kaleido==0.1.*
|
17
|
+
Requires-Dist: loguru
|
18
|
+
Requires-Dist: more-itertools
|
19
|
+
Requires-Dist: moviepy
|
20
|
+
Requires-Dist: networkx
|
21
|
+
Requires-Dist: numpy
|
22
|
+
Requires-Dist: omegaconf
|
23
|
+
Requires-Dist: opencv-python
|
24
|
+
Requires-Dist: pandas
|
25
|
+
Requires-Dist: Pillow
|
26
|
+
Requires-Dist: Pyarrow
|
27
|
+
Requires-Dist: pycurl
|
28
|
+
Requires-Dist: python-telegram-bot
|
29
|
+
Requires-Dist: requests
|
30
|
+
Requires-Dist: rich
|
31
|
+
Requires-Dist: scikit-learn
|
32
|
+
Requires-Dist: matplotlib
|
33
|
+
Requires-Dist: seaborn
|
34
|
+
Requires-Dist: plotly
|
35
|
+
Requires-Dist: pygwalker
|
36
|
+
Requires-Dist: tabulate
|
37
|
+
Requires-Dist: itables
|
38
|
+
Requires-Dist: timebudget
|
39
|
+
Requires-Dist: tqdm
|
40
|
+
Requires-Dist: tube_dl
|
41
|
+
Requires-Dist: wandb
|
15
42
|
|
16
43
|
Helper package for coding and automation
|
17
44
|
|
45
|
+
**Version 0.1.49**
|
46
|
+
|
47
|
+
+ add `research` package to help with research tasks, including `benchquery` for benchmarking queries from dataframe
|
48
|
+
+ add `wandb` module to allow easy sync offline data to Weights & Biases (wandb) and batch clear wandb runs.
|
49
|
+
|
18
50
|
**Version 0.1.47**
|
19
51
|
+ add `pprint_box` to print object/string in a box frame (like in `inspect`)
|
20
52
|
|
@@ -122,5 +154,3 @@ New Features
|
|
122
154
|
New Features
|
123
155
|
|
124
156
|
+ add support to upload local to google drive.
|
125
|
-
|
126
|
-
|
@@ -1,5 +1,10 @@
|
|
1
1
|
Helper package for coding and automation
|
2
2
|
|
3
|
+
**Version 0.1.49**
|
4
|
+
|
5
|
+
+ add `research` package to help with research tasks, including `benchquery` for benchmarking queries from dataframe
|
6
|
+
+ add `wandb` module to allow easy sync offline data to Weights & Biases (wandb) and batch clear wandb runs.
|
7
|
+
|
3
8
|
**Version 0.1.47**
|
4
9
|
+ add `pprint_box` to print object/string in a box frame (like in `inspect`)
|
5
10
|
|
@@ -0,0 +1,131 @@
|
|
1
|
+
import pandas as pd
|
2
|
+
from rich.pretty import pprint
|
3
|
+
from argparse import ArgumentParser
|
4
|
+
|
5
|
+
def cols_to_col_groups(df):
|
6
|
+
columns = list(df.columns)
|
7
|
+
# pprint(columns)
|
8
|
+
|
9
|
+
col_groups = []
|
10
|
+
current_group = []
|
11
|
+
|
12
|
+
def have_unnamed(col_group):
|
13
|
+
return any("unnamed" in col.lower() for col in col_group)
|
14
|
+
|
15
|
+
for i, col in enumerate(columns):
|
16
|
+
# Add the first column to the current group
|
17
|
+
if not current_group:
|
18
|
+
current_group.append(col)
|
19
|
+
continue
|
20
|
+
|
21
|
+
prev_col = columns[i - 1]
|
22
|
+
# Check if current column is "unnamed" or shares base name with previous
|
23
|
+
# Assuming "equal" means same base name (before any suffix like '_1')
|
24
|
+
base_prev = (
|
25
|
+
prev_col.split("_")[0].lower() if "_" in prev_col else prev_col.lower()
|
26
|
+
)
|
27
|
+
base_col = col.split("_")[0].lower() if "_" in col else col.lower()
|
28
|
+
is_unnamed = "unnamed" in col.lower()
|
29
|
+
is_equal = base_col == base_prev
|
30
|
+
|
31
|
+
if is_unnamed or is_equal:
|
32
|
+
# Add to current group
|
33
|
+
current_group.append(col)
|
34
|
+
else:
|
35
|
+
# Start a new group
|
36
|
+
col_groups.append(current_group)
|
37
|
+
current_group = [col]
|
38
|
+
# Append the last group
|
39
|
+
if current_group:
|
40
|
+
col_groups.append(current_group)
|
41
|
+
meta_dict = {"common_cols": [], "db_cols": []}
|
42
|
+
for group in col_groups:
|
43
|
+
if not have_unnamed(group):
|
44
|
+
meta_dict["common_cols"].extend(group)
|
45
|
+
else:
|
46
|
+
# find the first unnamed column
|
47
|
+
named_col = next(
|
48
|
+
(col for col in group if "unnamed" not in col.lower()), None
|
49
|
+
)
|
50
|
+
group_cols = [f"{named_col}_{i}" for i in range(len(group))]
|
51
|
+
meta_dict["db_cols"].extend(group_cols)
|
52
|
+
return meta_dict
|
53
|
+
|
54
|
+
# def bech_by_db_name(df, db_list="db1, db2", key_metrics="p, r, f1, acc"):
|
55
|
+
|
56
|
+
|
57
|
+
def str_2_list(input_str, sep=","):
|
58
|
+
out_ls = []
|
59
|
+
if len(input_str.strip()) == 0:
|
60
|
+
return out_ls
|
61
|
+
if sep not in input_str:
|
62
|
+
out_ls.append(input_str.strip())
|
63
|
+
return out_ls
|
64
|
+
else:
|
65
|
+
out_ls = [item.strip() for item in input_str.split(sep) if item.strip()]
|
66
|
+
return out_ls
|
67
|
+
|
68
|
+
|
69
|
+
def filter_bech_df_by_db_and_metrics(df, db_list="", key_metrics=""):
|
70
|
+
meta_cols_dict = cols_to_col_groups(df)
|
71
|
+
op_df = df.copy()
|
72
|
+
op_df.columns = (
|
73
|
+
meta_cols_dict["common_cols"].copy() + meta_cols_dict["db_cols"].copy()
|
74
|
+
)
|
75
|
+
filterd_cols = []
|
76
|
+
filterd_cols.extend(meta_cols_dict["common_cols"])
|
77
|
+
|
78
|
+
selected_db_list = str_2_list(db_list)
|
79
|
+
db_filted_cols = []
|
80
|
+
if len(selected_db_list) > 0:
|
81
|
+
for db_name in db_list.split(","):
|
82
|
+
db_name = db_name.strip()
|
83
|
+
for col_name in meta_cols_dict["db_cols"]:
|
84
|
+
if db_name.lower() in col_name.lower():
|
85
|
+
db_filted_cols.append(col_name)
|
86
|
+
else:
|
87
|
+
db_filted_cols = meta_cols_dict["db_cols"]
|
88
|
+
|
89
|
+
filterd_cols.extend(db_filted_cols)
|
90
|
+
df_filtered = op_df[filterd_cols].copy()
|
91
|
+
df_filtered
|
92
|
+
|
93
|
+
selected_metrics_ls = str_2_list(key_metrics)
|
94
|
+
if len(selected_metrics_ls) > 0:
|
95
|
+
# get the second row as metrics row (header)
|
96
|
+
metrics_row = df_filtered.iloc[0].copy()
|
97
|
+
# only get the values in columns in (db_filterd_cols)
|
98
|
+
metrics_values = metrics_row[db_filted_cols].values
|
99
|
+
keep_metrics_cols = []
|
100
|
+
# create a zip of db_filted_cols and metrics_values (in that metrics_row)
|
101
|
+
metrics_list = list(zip(metrics_values, db_filted_cols))
|
102
|
+
selected_metrics_ls = [metric.strip().lower() for metric in selected_metrics_ls]
|
103
|
+
for metric, col_name in metrics_list:
|
104
|
+
if metric.lower() in selected_metrics_ls:
|
105
|
+
keep_metrics_cols.append(col_name)
|
106
|
+
|
107
|
+
else:
|
108
|
+
pprint("No metrics selected, keeping all db columns")
|
109
|
+
keep_metrics_cols = db_filted_cols
|
110
|
+
|
111
|
+
final_filterd_cols = meta_cols_dict["common_cols"].copy() + keep_metrics_cols
|
112
|
+
df_final = df_filtered[final_filterd_cols].copy()
|
113
|
+
return df_final
|
114
|
+
|
115
|
+
|
116
|
+
def parse_args():
|
117
|
+
parser = ArgumentParser(
|
118
|
+
description="desc text")
|
119
|
+
parser.add_argument('-csv', '--csv', type=str, help='CSV file path', default=r"E:\Dev\__halib\test\bench.csv")
|
120
|
+
return parser.parse_args()
|
121
|
+
|
122
|
+
|
123
|
+
def main():
|
124
|
+
args = parse_args()
|
125
|
+
csv_file = args.csv
|
126
|
+
df = pd.read_csv(csv_file, sep=";", encoding="utf-8")
|
127
|
+
filtered_df = filter_bech_df_by_db_and_metrics(df, "bowfire", "acc")
|
128
|
+
print(filtered_df)
|
129
|
+
|
130
|
+
if __name__ == "__main__":
|
131
|
+
main()
|
@@ -1,6 +1,6 @@
|
|
1
|
-
from
|
2
|
-
from
|
3
|
-
from
|
1
|
+
from ..common import now_str, norm_str, ConsoleLog
|
2
|
+
from ..filetype import csvfile
|
3
|
+
from ..system import filesys as fs
|
4
4
|
from functools import partial
|
5
5
|
from rich.console import Console
|
6
6
|
from rich.pretty import pprint
|
@@ -6,9 +6,9 @@
|
|
6
6
|
* @desc this module works as a utility tools for finding the best configuration for dataloader (num_workers, batch_size, pin_menory, etc.) that fits your hardware.
|
7
7
|
"""
|
8
8
|
from argparse import ArgumentParser
|
9
|
-
from
|
10
|
-
from
|
11
|
-
from
|
9
|
+
from ..common import *
|
10
|
+
from ..filetype import csvfile
|
11
|
+
from ..filetype.yamlfile import load_yaml
|
12
12
|
from rich import inspect
|
13
13
|
from torch.utils.data import DataLoader
|
14
14
|
from torchvision import datasets, transforms
|
@@ -0,0 +1,116 @@
|
|
1
|
+
import glob
|
2
|
+
from rich.pretty import pprint
|
3
|
+
import os
|
4
|
+
import subprocess
|
5
|
+
import argparse
|
6
|
+
import wandb
|
7
|
+
from tqdm import tqdm
|
8
|
+
from rich.console import Console
|
9
|
+
console = Console()
|
10
|
+
|
11
|
+
def sync_runs(outdir):
|
12
|
+
outdir = os.path.abspath(outdir)
|
13
|
+
assert os.path.exists(outdir), f"Output directory {outdir} does not exist."
|
14
|
+
sub_dirs = [name for name in os.listdir(outdir) if os.path.isdir(os.path.join(outdir, name))]
|
15
|
+
assert len(sub_dirs) > 0, f"No subdirectories found in {outdir}."
|
16
|
+
console.rule("Parent Directory")
|
17
|
+
console.print(f"[yellow]{outdir}[/yellow]")
|
18
|
+
|
19
|
+
exp_dirs = [os.path.join(outdir, sub_dir) for sub_dir in sub_dirs]
|
20
|
+
wandb_dirs = []
|
21
|
+
for exp_dir in exp_dirs:
|
22
|
+
wandb_dirs.extend(glob.glob(f"{exp_dir}/wandb/*run-*"))
|
23
|
+
if len(wandb_dirs) == 0:
|
24
|
+
console.print(f"No wandb runs found in {outdir}.")
|
25
|
+
return
|
26
|
+
else:
|
27
|
+
console.print(f"Found [bold]{len(wandb_dirs)}[/bold] wandb runs in {outdir}.")
|
28
|
+
for i, wandb_dir in enumerate(wandb_dirs):
|
29
|
+
console.rule(f"Syncing wandb run {i + 1}/{len(wandb_dirs)}")
|
30
|
+
console.print(f"Syncing: {wandb_dir}")
|
31
|
+
process = subprocess.Popen(
|
32
|
+
["wandb", "sync", wandb_dir],
|
33
|
+
stdout=subprocess.PIPE,
|
34
|
+
stderr=subprocess.STDOUT,
|
35
|
+
text=True,
|
36
|
+
)
|
37
|
+
|
38
|
+
for line in process.stdout:
|
39
|
+
console.print(line.strip())
|
40
|
+
if " ERROR Error while calling W&B API" in line:
|
41
|
+
break
|
42
|
+
process.stdout.close()
|
43
|
+
process.wait()
|
44
|
+
if process.returncode != 0:
|
45
|
+
console.print(f"[red]Error syncing {wandb_dir}. Return code: {process.returncode}[/red]")
|
46
|
+
else:
|
47
|
+
console.print(f"Successfully synced {wandb_dir}.")
|
48
|
+
|
49
|
+
def delete_runs(project, pattern=None):
|
50
|
+
console.rule("Delete W&B Runs")
|
51
|
+
confirm_msg = f"Are you sure you want to delete all runs in"
|
52
|
+
confirm_msg += f" \n\tproject: [red]{project}[/red]"
|
53
|
+
if pattern:
|
54
|
+
confirm_msg += f"\n\tpattern: [blue]{pattern}[/blue]"
|
55
|
+
|
56
|
+
console.print(confirm_msg)
|
57
|
+
confirmation = input(f"This action cannot be undone. [y/N]: ").strip().lower()
|
58
|
+
if confirmation != "y":
|
59
|
+
print("Cancelled.")
|
60
|
+
return
|
61
|
+
|
62
|
+
print("Confirmed. Proceeding...")
|
63
|
+
api = wandb.Api()
|
64
|
+
runs = api.runs(project)
|
65
|
+
|
66
|
+
deleted = 0
|
67
|
+
console.rule("Deleting W&B Runs")
|
68
|
+
if len(runs) == 0:
|
69
|
+
print("No runs found in the project.")
|
70
|
+
return
|
71
|
+
for run in tqdm(runs):
|
72
|
+
if pattern is None or pattern in run.name:
|
73
|
+
run.delete()
|
74
|
+
console.print(f"Deleted run: [red]{run.name}[/red]")
|
75
|
+
deleted += 1
|
76
|
+
|
77
|
+
console.print(f"Total runs deleted: {deleted}")
|
78
|
+
|
79
|
+
|
80
|
+
def valid_argument(args):
|
81
|
+
if args.op == "sync":
|
82
|
+
assert os.path.exists(args.outdir), f"Output directory {args.outdir} does not exist."
|
83
|
+
elif args.op == "delete":
|
84
|
+
assert isinstance(args.project, str) and len(args.project.strip()) > 0, "Project name must be a non-empty string."
|
85
|
+
else:
|
86
|
+
raise ValueError(f"Unknown operation: {args.op}")
|
87
|
+
|
88
|
+
def parse_args():
|
89
|
+
parser = argparse.ArgumentParser(description="Operations on W&B runs")
|
90
|
+
parser.add_argument("-op", "--op", type=str, help="Operation to perform", default="sync", choices=["delete", "sync"])
|
91
|
+
parser.add_argument("-prj", "--project", type=str, default="fire-paper2-2025", help="W&B project name")
|
92
|
+
parser.add_argument("-outdir", "--outdir", type=str, help="arg1 description", default="./zout/train")
|
93
|
+
parser.add_argument("-pt", "--pattern",
|
94
|
+
type=str,
|
95
|
+
default=None,
|
96
|
+
help="Run name pattern to match for deletion",
|
97
|
+
)
|
98
|
+
|
99
|
+
return parser.parse_args()
|
100
|
+
|
101
|
+
|
102
|
+
def main():
|
103
|
+
args = parse_args()
|
104
|
+
# Validate arguments, stop if invalid
|
105
|
+
valid_argument(args)
|
106
|
+
|
107
|
+
op = args.op
|
108
|
+
if op == "sync":
|
109
|
+
sync_runs(args.outdir)
|
110
|
+
elif op == "delete":
|
111
|
+
delete_runs(args.project, args.pattern)
|
112
|
+
else:
|
113
|
+
raise ValueError(f"Unknown operation: {op}")
|
114
|
+
|
115
|
+
if __name__ == "__main__":
|
116
|
+
main()
|
File without changes
|
File without changes
|
@@ -10,8 +10,8 @@ from rich.pretty import pprint
|
|
10
10
|
from rich.console import Console
|
11
11
|
import plotly.graph_objects as go
|
12
12
|
|
13
|
-
from
|
14
|
-
from
|
13
|
+
from ..system import filesys as fs
|
14
|
+
from ..filetype import textfile, csvfile
|
15
15
|
|
16
16
|
from argparse import ArgumentParser
|
17
17
|
|
@@ -1,20 +1,52 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: halib
|
3
|
-
Version: 0.1.
|
3
|
+
Version: 0.1.49
|
4
4
|
Summary: Small library for common tasks
|
5
5
|
Author: Hoang Van Ha
|
6
6
|
Author-email: hoangvanhauit@gmail.com
|
7
|
-
License: UNKNOWN
|
8
|
-
Platform: UNKNOWN
|
9
7
|
Classifier: Programming Language :: Python :: 3
|
10
8
|
Classifier: License :: OSI Approved :: MIT License
|
11
9
|
Classifier: Operating System :: OS Independent
|
12
10
|
Requires-Python: >=3.8
|
13
11
|
Description-Content-Type: text/markdown
|
14
12
|
License-File: LICENSE.txt
|
13
|
+
Requires-Dist: arrow
|
14
|
+
Requires-Dist: click
|
15
|
+
Requires-Dist: enlighten
|
16
|
+
Requires-Dist: kaleido==0.1.*
|
17
|
+
Requires-Dist: loguru
|
18
|
+
Requires-Dist: more-itertools
|
19
|
+
Requires-Dist: moviepy
|
20
|
+
Requires-Dist: networkx
|
21
|
+
Requires-Dist: numpy
|
22
|
+
Requires-Dist: omegaconf
|
23
|
+
Requires-Dist: opencv-python
|
24
|
+
Requires-Dist: pandas
|
25
|
+
Requires-Dist: Pillow
|
26
|
+
Requires-Dist: Pyarrow
|
27
|
+
Requires-Dist: pycurl
|
28
|
+
Requires-Dist: python-telegram-bot
|
29
|
+
Requires-Dist: requests
|
30
|
+
Requires-Dist: rich
|
31
|
+
Requires-Dist: scikit-learn
|
32
|
+
Requires-Dist: matplotlib
|
33
|
+
Requires-Dist: seaborn
|
34
|
+
Requires-Dist: plotly
|
35
|
+
Requires-Dist: pygwalker
|
36
|
+
Requires-Dist: tabulate
|
37
|
+
Requires-Dist: itables
|
38
|
+
Requires-Dist: timebudget
|
39
|
+
Requires-Dist: tqdm
|
40
|
+
Requires-Dist: tube_dl
|
41
|
+
Requires-Dist: wandb
|
15
42
|
|
16
43
|
Helper package for coding and automation
|
17
44
|
|
45
|
+
**Version 0.1.49**
|
46
|
+
|
47
|
+
+ add `research` package to help with research tasks, including `benchquery` for benchmarking queries from dataframe
|
48
|
+
+ add `wandb` module to allow easy sync offline data to Weights & Biases (wandb) and batch clear wandb runs.
|
49
|
+
|
18
50
|
**Version 0.1.47**
|
19
51
|
+ add `pprint_box` to print object/string in a box frame (like in `inspect`)
|
20
52
|
|
@@ -122,5 +154,3 @@ New Features
|
|
122
154
|
New Features
|
123
155
|
|
124
156
|
+ add support to upload local to google drive.
|
125
|
-
|
126
|
-
|
@@ -7,12 +7,7 @@ setup.py
|
|
7
7
|
halib/__init__.py
|
8
8
|
halib/common.py
|
9
9
|
halib/cuda.py
|
10
|
-
halib/dataset.py
|
11
|
-
halib/listop.py
|
12
|
-
halib/plot.py
|
13
10
|
halib/rich_color.py
|
14
|
-
halib/tele_noti.py
|
15
|
-
halib/torchloader.py
|
16
11
|
halib.egg-info/PKG-INFO
|
17
12
|
halib.egg-info/SOURCES.txt
|
18
13
|
halib.egg-info/dependency_links.txt
|
@@ -29,8 +24,17 @@ halib/online/gdrive.py
|
|
29
24
|
halib/online/gdrive_mkdir.py
|
30
25
|
halib/online/gdrive_test.py
|
31
26
|
halib/online/projectmake.py
|
27
|
+
halib/research/__init__.py
|
28
|
+
halib/research/benchquery.py
|
29
|
+
halib/research/dataset.py
|
30
|
+
halib/research/plot.py
|
31
|
+
halib/research/torchloader.py
|
32
|
+
halib/research/wandb_op.py
|
32
33
|
halib/system/__init__.py
|
33
34
|
halib/system/cmd.py
|
34
35
|
halib/system/filesys.py
|
36
|
+
halib/utils/__init__.py
|
37
|
+
halib/utils/listop.py
|
38
|
+
halib/utils/tele_noti.py
|
35
39
|
test/test15.py
|
36
40
|
test/test_df_creator.py
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|