kevin-toolbox-dev 1.4.4__py3-none-any.whl → 1.4.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (27) hide show
  1. kevin_toolbox/__init__.py +2 -2
  2. kevin_toolbox/computer_science/algorithm/cache_manager/cache_manager.py +6 -0
  3. kevin_toolbox/data_flow/file/excel/__init__.py +1 -0
  4. kevin_toolbox/data_flow/file/excel/write_excel_with_matrix.py +105 -0
  5. kevin_toolbox/data_flow/file/json_/read_json.py +1 -0
  6. kevin_toolbox/data_flow/file/json_/write_json.py +2 -2
  7. kevin_toolbox/env_info/__init__.py +2 -1
  8. kevin_toolbox/env_info/check_validity_and_uninstall.py +41 -21
  9. kevin_toolbox/env_info/check_version_and_update.py +70 -49
  10. kevin_toolbox/env_info/test/test_check_.py +52 -0
  11. kevin_toolbox/env_info/test/test_variable_.py +50 -0
  12. kevin_toolbox/env_info/variable_/__init__.py +2 -0
  13. kevin_toolbox/env_info/variable_/env_vars_parser.py +88 -0
  14. kevin_toolbox/nested_dict_list/get_value.py +12 -2
  15. kevin_toolbox/nested_dict_list/serializer/read.py +6 -5
  16. kevin_toolbox/nested_dict_list/serializer/write.py +55 -28
  17. kevin_toolbox/nested_dict_list/set_value.py +16 -6
  18. kevin_toolbox/patches/for_os/find_files_in_dir.py +22 -17
  19. kevin_toolbox/patches/for_os/organize/__init__.py +1 -0
  20. kevin_toolbox/patches/for_os/organize/group_files_by_timestamp.py +90 -0
  21. kevin_toolbox/utils/__init__.py +0 -0
  22. kevin_toolbox/utils/variable.py +6 -0
  23. kevin_toolbox_dev-1.4.6.dist-info/METADATA +76 -0
  24. {kevin_toolbox_dev-1.4.4.dist-info → kevin_toolbox_dev-1.4.6.dist-info}/RECORD +26 -16
  25. kevin_toolbox_dev-1.4.4.dist-info/METADATA +0 -58
  26. {kevin_toolbox_dev-1.4.4.dist-info → kevin_toolbox_dev-1.4.6.dist-info}/WHEEL +0 -0
  27. {kevin_toolbox_dev-1.4.4.dist-info → kevin_toolbox_dev-1.4.6.dist-info}/top_level.txt +0 -0
kevin_toolbox/__init__.py CHANGED
@@ -1,4 +1,4 @@
1
- __version__ = "1.4.4"
1
+ __version__ = "1.4.6"
2
2
 
3
3
 
4
4
  import os
@@ -12,5 +12,5 @@ os.system(
12
12
  os.system(
13
13
  f'python {os.path.split(__file__)[0]}/env_info/check_validity_and_uninstall.py '
14
14
  f'--package_name kevin-toolbox-dev '
15
- f'--expiration_timestamp 1749817728 --verbose 0'
15
+ f'--expiration_timestamp 1753280251 --verbose 0'
16
16
  )
@@ -44,6 +44,12 @@ class Cache_Manager:
44
44
  当设置为 float 时表示占 upper_bound 的比例
45
45
  默认为 0.5
46
46
  strategy: <str/dict/Strategy_Base> 管理策略
47
+ 目前支持以下策略:
48
+ - ":by_initial_time:FIFO" 删除最后一次访问时间最久远的部分
49
+ - ":by_counts:LFU" 删除访问频率最低的部分
50
+ - ":by_last_time:LRU" 删除最后一次访问时间最久远的部分
51
+ - ":by_survival_time:LST" 删除访问频率最低的部分
52
+ 默认使用 LRU 策略
47
53
  cache: <str/dict/Cache_Base> 缓存种类
48
54
  """
49
55
  # 默认参数
@@ -0,0 +1 @@
1
+ from .write_excel_with_matrix import write_excel_with_matrix as write_with_matrix
@@ -0,0 +1,105 @@
1
+ import os
2
+ import openpyxl
3
+ from openpyxl.styles import Alignment, Font, PatternFill
4
+ from kevin_toolbox.patches.for_os import remove
5
+
6
+
7
+ # excel
8
+ def write_excel_with_matrix(matrix, file_path=None, file_obj=None, sheet_name="matrix",
9
+ column_label_ls=None, row_label_ls=None, column_title="", row_title="", main_title=""):
10
+ """
11
+ 将矩阵写入到 excel 文件中
12
+
13
+ 参数:
14
+ file_path: 要写入到哪个文件
15
+ file_obj: <openpyxl.Workbook> 文件对象
16
+ 注意!!以上两个参数指定其一即可,同时指定时候,以后者为准。
17
+ sheet_name: 要写入到哪个sheet页面
18
+ matrix: <np.array or np.matrix> 矩阵
19
+ column_label_ls, row_label_ls: 行列标签
20
+ column_title, row_title: 行列标题
21
+ main_title: 总标题
22
+ """
23
+ assert file_path is not None or file_obj is not None
24
+
25
+ if file_obj is None:
26
+ file_path = os.path.abspath(os.path.expanduser(file_path))
27
+ os.makedirs(os.path.dirname(file_path), exist_ok=True)
28
+ # 判断文件是否存在,不存在则新建,否则读取文件
29
+ if not os.path.isfile(file_path):
30
+ wb = openpyxl.Workbook() # 创建文件对象
31
+ # wb对象创建后,默认含有一个默认的名为 Sheet 的 页面,将其删除
32
+ ws_ = wb.active
33
+ wb.remove(ws_)
34
+ else:
35
+ wb = openpyxl.load_workbook(file_path)
36
+ else:
37
+ assert isinstance(file_obj, openpyxl.Workbook)
38
+ wb = file_obj
39
+ # 判断sheet是否存在,不存在则建立,否则先删除再建立
40
+ if sheet_name in wb.sheetnames:
41
+ ws = wb[sheet_name]
42
+ wb.remove(ws)
43
+ ws = wb.create_sheet(sheet_name)
44
+
45
+ # 开始写
46
+ matrix_r_offset, matrix_c_offset = 1, 1 # 矩阵的起始位置
47
+ r_offset, c_offset = 1, 1 # 目前的写入位置
48
+ for i in [main_title, column_title, column_label_ls]:
49
+ if i:
50
+ matrix_r_offset += 1
51
+ for j in [row_title, row_label_ls]:
52
+ if j:
53
+ matrix_c_offset += 1
54
+ # print(matrix)
55
+ matrix_row_num, matrix_column_num = matrix.shape[0], matrix.shape[1]
56
+ # 标题
57
+ alignment = Alignment(horizontal="center", vertical="center", wrap_text=True)
58
+ if main_title:
59
+ ws.merge_cells(start_row=r_offset, start_column=1, end_row=r_offset,
60
+ end_column=matrix_c_offset + matrix_column_num - 1)
61
+ ws.cell(row=r_offset, column=1).value = main_title
62
+ ws.cell(row=r_offset, column=1).alignment = alignment
63
+ ws.cell(row=r_offset, column=1).font = Font(size=10, bold=True, name='微软雅黑', color="000000")
64
+ r_offset += 1
65
+ if column_title:
66
+ ws.merge_cells(start_row=r_offset, start_column=matrix_c_offset, end_row=r_offset,
67
+ end_column=matrix_c_offset + matrix_column_num - 1)
68
+ ws.cell(row=r_offset, column=matrix_c_offset).value = column_title
69
+ ws.cell(row=r_offset, column=matrix_c_offset).alignment = alignment
70
+ ws.cell(row=r_offset, column=matrix_c_offset).font = Font(size=10, bold=True, name='微软雅黑', color="000000")
71
+ r_offset += 1
72
+ if row_title:
73
+ ws.merge_cells(start_row=matrix_r_offset, start_column=1, end_row=matrix_r_offset + matrix_row_num - 1,
74
+ end_column=1)
75
+ ws.cell(row=matrix_r_offset, column=1).value = row_title
76
+ ws.cell(row=matrix_r_offset, column=1).alignment = alignment
77
+ ws.cell(row=matrix_r_offset, column=1).font = Font(size=10, bold=True, name='微软雅黑', color="000000")
78
+ c_offset += 1
79
+ # 标签
80
+ if column_label_ls:
81
+ for i in range(matrix_column_num):
82
+ ws.cell(row=r_offset, column=matrix_c_offset + i).value = column_label_ls[i]
83
+ ws.cell(row=r_offset, column=matrix_c_offset + i).alignment = alignment
84
+ ws.cell(row=r_offset, column=matrix_c_offset + i).fill = PatternFill(patternType="solid",
85
+ start_color="33CCFF")
86
+ r_offset += 1
87
+ if row_label_ls:
88
+ for i in range(matrix_row_num):
89
+ ws.cell(row=matrix_r_offset + i, column=c_offset).value = row_label_ls[i]
90
+ ws.cell(row=matrix_r_offset + i, column=c_offset).alignment = alignment
91
+ ws.cell(row=matrix_r_offset + i, column=c_offset).fill = PatternFill(patternType="solid",
92
+ start_color="33CCFF")
93
+ c_offset += 1
94
+ # 校验,可省略
95
+ if not (c_offset == matrix_c_offset and r_offset == matrix_r_offset):
96
+ print("wrong here")
97
+ for r_ in range(matrix_row_num):
98
+ for c_ in range(matrix_column_num):
99
+ ws.cell(row=matrix_r_offset + r_, column=matrix_c_offset + c_).value = matrix[r_][c_]
100
+
101
+ if file_path is not None:
102
+ remove(file_path, ignore_errors=True)
103
+ wb.save(file_path)
104
+
105
+ return wb
@@ -23,6 +23,7 @@ def read_json(file_path=None, file_obj=None, converters=None, b_use_suggested_co
23
23
  """
24
24
  assert file_path is not None or file_obj is not None
25
25
  if file_path is not None:
26
+ file_path = os.path.abspath(os.path.expanduser(file_path))
26
27
  assert os.path.isfile(file_path), f'file {file_path} not found'
27
28
  file_obj = open(file_path, 'r')
28
29
  elif isinstance(file_obj, (BytesIO,)):
@@ -38,9 +38,9 @@ def write_json(content, file_path, sort_keys=False, converters=None, b_use_sugge
38
38
  content = json.dumps(content, indent=4, ensure_ascii=False, sort_keys=sort_keys)
39
39
 
40
40
  if file_path is not None:
41
- file_path = os.path.abspath(file_path)
41
+ file_path = os.path.abspath(os.path.expanduser(file_path))
42
42
  os.makedirs(os.path.dirname(file_path), exist_ok=True)
43
- with open(file_path, 'w') as f:
43
+ with open(file_path, 'w', encoding="utf-8") as f:
44
44
  f.write(content)
45
45
  else:
46
46
  return content
@@ -1 +1,2 @@
1
-
1
+ from .check_version_and_update import check_version_and_update
2
+ from .check_validity_and_uninstall import check_validity_and_uninstall
@@ -1,6 +1,4 @@
1
- import os
2
1
  import subprocess
3
- import argparse
4
2
  import time
5
3
 
6
4
  """
@@ -8,22 +6,44 @@ import time
8
6
  - 若超过指定的有效期,则卸载。
9
7
  """
10
8
 
11
- out_parser = argparse.ArgumentParser(description='check_validity_and_uninstall')
12
- out_parser.add_argument('--package_name', type=str, required=True)
13
- out_parser.add_argument('--expiration_timestamp', type=int, required=False, default=1e10)
14
- out_parser.add_argument('--verbose', type=int, required=False, default=1)
15
- args = out_parser.parse_args().__dict__
16
-
17
- cur_timestamp = time.time()
18
-
19
- if cur_timestamp > args["expiration_timestamp"]:
20
- ex = subprocess.Popen(f'pip uninstall {args["package_name"]} --yes', shell=True, stdout=subprocess.PIPE,
21
- stderr=subprocess.STDOUT)
22
- out, _ = ex.communicate()
23
- res = out.decode().strip()
24
- else:
25
- res = "still within the validity period"
26
-
27
- if args["verbose"]:
28
- # print(args)
29
- print(res)
9
+ DEFAULT_EXPIRATION_TIMESTAMP = 1e10
10
+
11
+
12
+ def check_validity_and_uninstall(package_name, expiration_timestamp=DEFAULT_EXPIRATION_TIMESTAMP):
13
+ """
14
+ 检查当前机器时间是否超过 expiration_timestamp 指定的有效期,若超过则卸载 package_name 对应的库
15
+ """
16
+ cur_timestamp = time.time()
17
+
18
+ b_success_uninstalled = False
19
+ if cur_timestamp > expiration_timestamp:
20
+ ex = subprocess.Popen(f'pip uninstall {package_name} --yes', shell=True, stdout=subprocess.PIPE,
21
+ stderr=subprocess.STDOUT)
22
+ out, _ = ex.communicate()
23
+ msg = out.decode().strip()
24
+ if ex.returncode == 0:
25
+ b_success_uninstalled = True
26
+ else:
27
+ msg = "still within the validity period"
28
+
29
+ res_s = dict(cur_timestamp=cur_timestamp, expiration_timestamp=expiration_timestamp,
30
+ b_success_uninstalled=b_success_uninstalled, msg=msg)
31
+ return res_s
32
+
33
+
34
+ if __name__ == '__main__':
35
+ import argparse
36
+
37
+ out_parser = argparse.ArgumentParser(description='check_validity_and_uninstall')
38
+ out_parser.add_argument('--package_name', type=str, required=True)
39
+ out_parser.add_argument('--expiration_timestamp', type=float, required=False, default=DEFAULT_EXPIRATION_TIMESTAMP)
40
+ out_parser.add_argument('--verbose', type=lambda x: bool(eval(x)), required=False, default=True)
41
+ args = out_parser.parse_args().__dict__
42
+
43
+ b_version = args.pop("verbose")
44
+
45
+ res_s_ = check_validity_and_uninstall(**args)
46
+
47
+ if b_version:
48
+ for k, v in res_s_.items():
49
+ print(f"{k}: {v}")
@@ -1,51 +1,72 @@
1
- import os
2
1
  import subprocess
3
- import argparse
4
- import version
5
-
6
- """
7
- 检查当前版本
8
- - 若在可用版本中,有比当前版本更高的版本,则更新到可以获取到的最新版本。
9
- """
10
-
11
- out_parser = argparse.ArgumentParser(description='check_version_and_update')
12
- out_parser.add_argument('--package_name', type=str, required=True)
13
- out_parser.add_argument('--cur_version', type=str, required=False)
14
- out_parser.add_argument('--available_versions', nargs='+', type=str, required=False)
15
- out_parser.add_argument('--verbose', type=int, required=False, default=1)
16
- args = out_parser.parse_args().__dict__
17
-
18
- # try to read cur_version
19
- if args["cur_version"] is None:
20
- ex = subprocess.Popen(f'pip list | grep "{args["package_name"]} "', shell=True, stdout=subprocess.PIPE)
21
- out, _ = ex.communicate()
22
- out = out.decode().strip()
23
- # breakpoint()
24
- args["cur_version"] = out.split(args["package_name"])[-1].strip()
25
-
26
- # try to read available versions
27
- if args["available_versions"] is None:
28
- ex = subprocess.Popen(f'pip install {args["package_name"]}==?', shell=True, stdout=subprocess.PIPE,
29
- stderr=subprocess.STDOUT)
30
- out, _ = ex.communicate()
31
- out = out.decode().strip()
32
- if "(from versions:" in out:
33
- v_ls = out.split("(from versions:")[-1].rsplit(")", 1)[0].split(",", -1)
34
- v_ls = [i.strip() for i in v_ls]
2
+
3
+ try:
4
+ from kevin_toolbox.env_info import version
5
+ except:
6
+ import version
7
+
8
+
9
+ def check_version_and_update(package_name, cur_version=None, available_versions=None):
10
+ """
11
+ 检查当前版本,并尝试更新
12
+ - 若在 pip 的可用版本中,有比当前版本更高的版本,则更新到可以获取到的最新版本。
13
+ """
14
+ # try to read cur_version
15
+ if cur_version is None:
16
+ ex = subprocess.Popen(f'pip list | grep "{package_name} "', shell=True, stdout=subprocess.PIPE)
17
+ out, _ = ex.communicate()
18
+ out = out.decode().strip()
19
+ # breakpoint()
20
+ cur_version = out.split(package_name)[-1].strip()
21
+
22
+ # try to read available versions
23
+ if available_versions is None:
24
+ ex = subprocess.Popen(f'pip install {package_name}==?', shell=True, stdout=subprocess.PIPE,
25
+ stderr=subprocess.STDOUT)
26
+ out, _ = ex.communicate()
27
+ out = out.decode().strip()
28
+ if "(from versions:" in out:
29
+ v_ls = out.split("(from versions:")[-1].rsplit(")", 1)[0].split(",", -1)
30
+ v_ls = [i.strip() for i in v_ls]
31
+ else:
32
+ v_ls = ["none"]
33
+ available_versions = version.sort_ls(version_ls=v_ls, reverse=True)
34
+
35
+ b_success_updated = False
36
+ new_version = None
37
+ if len(available_versions) > 0 and version.compare(available_versions[0], ">", cur_version):
38
+ ex = subprocess.Popen(
39
+ f'pip install {package_name}=={available_versions[0]} --no-dependencies',
40
+ shell=True, stdout=subprocess.PIPE
41
+ )
42
+ out, _ = ex.communicate()
43
+ msg = out.decode().strip()
44
+ if ex.returncode == 0:
45
+ b_success_updated = True
46
+ new_version = available_versions[0]
35
47
  else:
36
- v_ls = ["none"]
37
- args["available_versions"] = version.sort_ls(version_ls=v_ls, reverse=True)
38
-
39
- if len(args["available_versions"]) > 0 and version.compare(args["available_versions"][0], ">", args["cur_version"]):
40
- ex = subprocess.Popen(
41
- f'pip install {args["package_name"]}=={args["available_versions"][0]} --no-dependencies',
42
- shell=True, stdout=subprocess.PIPE
43
- )
44
- out, _ = ex.communicate()
45
- res = out.decode().strip()
46
- else:
47
- res = "Already the latest version, no need to update"
48
-
49
- if args["verbose"]:
50
- # print(args)
51
- print(res)
48
+ msg = "Already the latest version, no need to update"
49
+
50
+ res_s = dict(version_before_updated=cur_version, version_after_updated=new_version,
51
+ available_versions=available_versions, b_success_updated=b_success_updated, msg=msg)
52
+
53
+ return res_s
54
+
55
+
56
+ if __name__ == '__main__':
57
+ import argparse
58
+
59
+ out_parser = argparse.ArgumentParser(description='check_version_and_update')
60
+ out_parser.add_argument('--package_name', type=str, required=True)
61
+ out_parser.add_argument('--cur_version', type=str, required=False)
62
+ out_parser.add_argument('--available_versions', nargs='+', type=str, required=False)
63
+ out_parser.add_argument('--verbose', type=lambda x: bool(eval(x)), required=False, default=True)
64
+ args = out_parser.parse_args().__dict__
65
+
66
+ b_version = args.pop("verbose")
67
+
68
+ res_s_ = check_version_and_update(**args)
69
+
70
+ if b_version:
71
+ for k, v in res_s_.items():
72
+ print(f"{k}: {v}")
@@ -0,0 +1,52 @@
1
+ import os
2
+ import pytest
3
+ from kevin_toolbox.env_info import check_validity_and_uninstall, check_version_and_update
4
+ from kevin_toolbox.patches.for_test import check_consistency
5
+
6
+
7
+ @pytest.mark.parametrize(
8
+ "package_name, expiration_timestamp, expected_s",
9
+ [
10
+ ("tqdm", 1, {"b_success_uninstalled": True}),
11
+ ("tqdm", 1e10, {"b_success_uninstalled": False}),
12
+ ]
13
+ )
14
+ def test_check_validity_and_uninstall(package_name, expiration_timestamp, expected_s):
15
+ # call by func
16
+ res_s = check_validity_and_uninstall(package_name=package_name, expiration_timestamp=expiration_timestamp)
17
+ print(res_s)
18
+ for k, v in expected_s.items():
19
+ check_consistency(res_s[k], v)
20
+
21
+ # call by script
22
+ os.system(
23
+ f'python {os.path.dirname(os.path.split(__file__)[0])}/check_validity_and_uninstall.py ' +
24
+ (f'--package_name {package_name} ' if package_name is not None else '') +
25
+ (f'--expiration_timestamp {expiration_timestamp} ' if expiration_timestamp is not None else '') +
26
+ f'--verbose 1'
27
+ )
28
+
29
+
30
+ @pytest.mark.parametrize(
31
+ "package_name, cur_version, available_versions, expected_s",
32
+ [
33
+ ("tqdm", None, None, {}),
34
+ ("tqdm", "1.3.1", ["1.1.2", "1.1.3"], {"b_success_updated": False}),
35
+ ]
36
+ )
37
+ def test_check_version_and_update(package_name, cur_version, available_versions, expected_s):
38
+ # call by func
39
+ res_s = check_version_and_update(package_name=package_name, cur_version=cur_version,
40
+ available_versions=available_versions)
41
+ print(res_s)
42
+ for k, v in expected_s.items():
43
+ check_consistency(res_s[k], v)
44
+
45
+ # call by script
46
+ os.system(
47
+ f'python {os.path.dirname(os.path.split(__file__)[0])}/check_version_and_update.py ' +
48
+ (f'--package_name {package_name} ' if package_name is not None else '') +
49
+ (f'--cur_version {cur_version} ' if cur_version is not None else '') +
50
+ (f'--available_versions {available_versions} ' if available_versions is not None else '') +
51
+ f'--verbose 1'
52
+ )
@@ -0,0 +1,50 @@
1
+ import os
2
+ import pytest
3
+ from kevin_toolbox.env_info.variable_ import Env_Vars_Parser
4
+ from kevin_toolbox.patches.for_test import check_consistency
5
+
6
+
7
+ @pytest.mark.parametrize(
8
+ "input_text, expected",
9
+ [
10
+ # 验证 ${<cfg_name>:<var_name>} 的情况
11
+ (
12
+ "666/123${SYS:HOME}/afasf/${/xxx.../xxx.json:111:222}336",
13
+ ["666/123", ("SYS", [':'], ['HOME']), "/afasf/", ("/xxx.../xxx.json", [':', ':'], ['111', '222']),
14
+ "336"]
15
+ ),
16
+ # 验证 ${<cfg_name>} 和 ${:<var_name>} 的混合情况
17
+ (
18
+ "start${CFG}middle${:VAR}end",
19
+ ["start", ("CFG", [], []), "middle", ('', [':'], ['VAR']), "end"]
20
+ ),
21
+ (
22
+ "${:VAR}",
23
+ [('', [':'], ['VAR'])]
24
+ ),
25
+ (
26
+ "${CFG}",
27
+ [("CFG", [], [])]
28
+ ),
29
+ (
30
+ "{:VAR}",
31
+ ["{:VAR}"]
32
+ ),
33
+ ]
34
+ )
35
+ def test_split_string_in_env_vars_parser(input_text, expected):
36
+ result = Env_Vars_Parser.split_string(input_text)
37
+ check_consistency(result, expected)
38
+
39
+
40
+ def test_env_vars_parser_0():
41
+ env_cfg_file = os.path.expanduser("~/.kvt_cfg/.temp.json")
42
+ from kevin_toolbox.data_flow.file import json_
43
+ json_.write(content={"dataset_dir": ["~/data", "~/dataset"], "version": "001"}, file_path=env_cfg_file)
44
+ input_text = "/root/${KVT_TEMP:dataset_dir@1}/${KVT_TEMP:version}/${HOME}/${SYS:HOME}"
45
+ expected = "/".join(["/root/~/dataset/001", ] + [os.path.expanduser("~")] * 2)
46
+
47
+ #
48
+ parser = Env_Vars_Parser()
49
+ result = parser(input_text)
50
+ check_consistency(expected, result)
@@ -0,0 +1,2 @@
1
+ from .env_vars_parser import Env_Vars_Parser
2
+ env_vars_parser = Env_Vars_Parser()
@@ -0,0 +1,88 @@
1
+ import os
2
+ import re
3
+ from kevin_toolbox.data_flow.file import json_
4
+
5
+
6
+ class Env_Vars_Parser:
7
+ """
8
+ 解释并替换字符串中${}形式指定的环境变量
9
+ 支持以下几种方式:
10
+ - "${HOME}" 家目录
11
+ - "${SYS:<var_name>}" 其他系统环境变量
12
+ 在 linux 系统可以通过 env 命令来打印当前的环境变量,比如家目录也可以使用 ${SYS:HOME} 来表示
13
+ - "${KVT_XXX<ndl_name>}" 读取配置文件 ~/.kvt_cfg/.xxx.json 中的变量(xxx将被自动转为小写)
14
+ 配置文件要求是 ndl 结构,比如当配置文件 ~/.kvt_cfg/.ndl.json 中保存的值为:
15
+ {"dataset_dir":["~/data", "~/dataset"], ...}
16
+ 时,如果要指定使用 dataset_dir 下第二个路径,那么可以使用 ${KVT_NDL:dataset_dir@1} 来表示
17
+ - "${/xxx.../xxx.json<ndl_name>}" 读取指定路径下的配置文件 /xxx.../xxx.json 中的变量
18
+ """
19
+
20
+ def __init__(self, home_dir=None):
21
+ self.cfg_s = dict(
22
+ SYS=dict(os.environ),
23
+ HOME=home_dir if home_dir is not None else os.path.expanduser("~")
24
+ )
25
+
26
+ def __call__(self, *args, **kwargs):
27
+ return self.parse(*args, **kwargs)
28
+
29
+ def parse(self, text):
30
+ """
31
+ 解释并替换
32
+ """
33
+ temp_ls = []
34
+ for it in self.split_string(text=text):
35
+ if isinstance(it, str):
36
+ temp_ls.append(it)
37
+ continue
38
+ root_node, method_ls, node_ls = it
39
+ if root_node not in self.cfg_s:
40
+ try:
41
+ if root_node.startswith("KVT_"):
42
+ t0, t1 = root_node.lower().split("_", 1)
43
+ root_node = os.path.expanduser(f'~/.{t0}_cfg/.{t1}.json')
44
+ assert os.path.isfile(root_node), f'file not exist: {root_node}'
45
+ cfg = json_.read(file_path=root_node, b_use_suggested_converter=True)
46
+ self.cfg_s[root_node] = cfg
47
+ except Exception as e:
48
+ raise ValueError(f"invalid cfg_name: {root_node}, because: {e}")
49
+ cfg = self.cfg_s.get(root_node, None)
50
+ if cfg is None:
51
+ raise ValueError(f"invalid cfg_name: {root_node}")
52
+ #
53
+ from kevin_toolbox.nested_dict_list import get_value
54
+ temp_ls.append(get_value(var=cfg, name=it))
55
+
56
+ return "".join([f'{i}' for i in temp_ls])
57
+
58
+ @staticmethod
59
+ def split_string(text):
60
+ """
61
+ 将字符串中 ${<cfg_name>} 部分的内容分割出来
62
+ 比如对于 "666/123${SYS:HOME}/afasf/${/xxx.../xxx.json:111:222}336"
63
+ 应该分割为 ["666/123", ("SYS:HOME", ), "/afasf/", ("/xxx.../xxx.json:111:222", ), "336"]
64
+ 然后再对其中 tuple 部分使用 ndl.name_handler.parse_name 进行解释
65
+ """
66
+ from kevin_toolbox.nested_dict_list.name_handler import parse_name
67
+ pattern = r'\$\{([^}]+)\}'
68
+ matches = re.finditer(pattern, text)
69
+
70
+ result = []
71
+ last_end = 0
72
+
73
+ for match in matches:
74
+ start = match.start()
75
+ if start > last_end:
76
+ result.append(text[last_end:start])
77
+ result.append(parse_name(name=match.group(1)))
78
+ last_end = match.end()
79
+
80
+ if last_end < len(text):
81
+ result.append(text[last_end:])
82
+
83
+ return result
84
+
85
+
86
+ if __name__ == '__main__':
87
+ env_vars_parser = Env_Vars_Parser()
88
+ print(env_vars_parser.split_string("666/123${:VAR}/afasf/${/xxx.../xxx.json:111:222}336"))
@@ -7,7 +7,7 @@ def get_value(var, name, b_pop=False, **kwargs):
7
7
 
8
8
  参数:
9
9
  var: 任意支持索引取值的变量
10
- name: <str> 名字
10
+ name: <str/parsed_name> 名字
11
11
  名字 name 的具体介绍参见函数 name_handler.parse_name()
12
12
  假设 var=dict(acc=[0.66,0.78,0.99]),如果你想读取 var["acc"][1] => 0.78,那么可以将 name 写成:
13
13
  ":acc@1" 或者 "|acc|1" 等。
@@ -19,7 +19,11 @@ def get_value(var, name, b_pop=False, **kwargs):
19
19
  - 不设置(默认)。当取值失败时将报错。
20
20
  - 设置为任意值。取值失败时将返回该值。
21
21
  """
22
- _, method_ls, node_ls = parse_name(name=name, b_de_escape_node=True)
22
+ if isinstance(name, (tuple, list,)):
23
+ assert len(name) == 3, f'invalid parsed name {name}'
24
+ _, method_ls, node_ls = name
25
+ else:
26
+ _, method_ls, node_ls = parse_name(name=name, b_de_escape_node=True)
23
27
 
24
28
  try:
25
29
  pre, cur = None, var
@@ -46,3 +50,9 @@ def get_value(var, name, b_pop=False, **kwargs):
46
50
  raise IndexError(f'invalid name {name}')
47
51
 
48
52
  return cur
53
+
54
+
55
+ if __name__ == "__main__":
56
+ var_ = dict(acc=[0.66, 0.78, 0.99])
57
+ print(get_value(var_, ''))
58
+ print(get_value(var_, ['', [], []]))
@@ -1,9 +1,9 @@
1
1
  import os
2
- import time
2
+ import tempfile
3
3
  from kevin_toolbox.patches import for_os
4
4
  from kevin_toolbox.data_flow.file import json_
5
5
  import kevin_toolbox.nested_dict_list as ndl
6
- import tempfile
6
+ from kevin_toolbox.env_info.variable_ import env_vars_parser
7
7
 
8
8
 
9
9
  def read(input_path, **kwargs):
@@ -13,8 +13,6 @@ def read(input_path, **kwargs):
13
13
  参数:
14
14
  input_path: <path> 文件夹或者 .tar 文件,具体结构参考 write()
15
15
  """
16
- from kevin_toolbox.nested_dict_list.serializer.variable import SERIALIZER_BACKEND
17
-
18
16
  assert os.path.exists(input_path)
19
17
 
20
18
  with tempfile.TemporaryDirectory(dir=os.path.dirname(input_path)) as temp_dir:
@@ -63,7 +61,10 @@ def _read_unpacked_ndl(input_path, **kwargs):
63
61
  for name in processed_nodes:
64
62
  value = ndl.get_value(var=var, name=name)
65
63
  if isinstance(value, (dict,)) and "backend" in value and "name" in value:
66
- bk = SERIALIZER_BACKEND.get(name=value.pop("backend"))(folder=os.path.join(input_path, "nodes"))
64
+ nodes_dir = env_vars_parser(value.pop("nodes_dir")) if "nodes_dir" in value else os.path.join(input_path,
65
+ "nodes")
66
+ assert os.path.exists(nodes_dir), f"nodes_dir {nodes_dir} does not exist"
67
+ bk = SERIALIZER_BACKEND.get(name=value.pop("backend"))(folder=nodes_dir)
67
68
  ndl.set_value(var=var, name=name, value=bk.read(**value))
68
69
 
69
70
  #
@@ -7,6 +7,7 @@ from kevin_toolbox.data_flow.file import json_
7
7
  from kevin_toolbox.patches import for_os
8
8
  import kevin_toolbox.nested_dict_list as ndl
9
9
  from kevin_toolbox.nested_dict_list.traverse import Traversal_Mode
10
+ from kevin_toolbox.env_info.variable_ import env_vars_parser
10
11
  from .enum_variable import Strictness_Level
11
12
  from .saved_node_name_builder import Saved_Node_Name_Builder
12
13
 
@@ -32,7 +33,17 @@ def write(var, output_dir, settings=None, traversal_mode=Traversal_Mode.BFS, b_p
32
33
  var: <nested dict list>
33
34
  settings: <list of dict> 指定对于不同节点or部分的处理模式
34
35
  其结构为:
35
- [{"match_cond": <匹配模式>, "backend": <序列化方式>, "traversal_mode": <遍历方式>}, ...]
36
+ [
37
+ {
38
+ "match_cond": <匹配模式>,
39
+ "backend": <序列化方式>,
40
+ "traversal_mode": <遍历方式>,
41
+ ("nodes_dir": <节点保存目录>,
42
+ "saved_node_name_format": <nodes目录下节点文件/文件夹的命名方式>)
43
+ },
44
+ ...
45
+ ]
46
+ 允许专门指定某个处理模式下所使用的 nodes_dir 和 saved_node_name_format,若不指定,则使用后面的默认值。
36
47
  <匹配模式>支持以下4种:
37
48
  - "<level>..." 匹配指定层的节点,比如"<level>0"表示根节点,"<level>-1"表示所有叶节点
38
49
  - "<node>name" 匹配指定name的节点
@@ -85,7 +96,9 @@ def write(var, output_dir, settings=None, traversal_mode=Traversal_Mode.BFS, b_p
85
96
  - "low" / Strictness_Level.IGNORE_FAILURE 匹配不完整,或者某些节点尝试过所有匹配到
86
97
  的 backend 之后仍然无法写入
87
98
  默认是 "normal"
88
- saved_node_name_format: <str> nodes/目录下节点文件/文件夹的命名方式。
99
+ nodes_dir: <path> 节点内容保存目录
100
+ 默认为保存在 <output_dir>/nodes 下
101
+ saved_node_name_format: <str> nodes目录下节点文件/文件夹的命名方式。
89
102
  基本结构为: '{<part_0>}...{<part_1>}...'
90
103
  其中 {} 内将根据 part 指定的类型进行自动填充。目前支持以下几种选项:
91
104
  - "raw_name" 该节点对应位置的 name。
@@ -132,7 +145,8 @@ def write(var, output_dir, settings=None, traversal_mode=Traversal_Mode.BFS, b_p
132
145
  var = value_parser.replace_identical_with_reference(var=var, flag="same", b_reverse=False)
133
146
  if settings is None:
134
147
  settings = [{"match_cond": "<level>-1", "backend": (":skip:simple", ":numpy:npy", ":torch:tensor", ":pickle")}]
135
- snn_builder = Saved_Node_Name_Builder(format_=saved_node_name_format)
148
+ default_snn_builder = Saved_Node_Name_Builder(format_=saved_node_name_format)
149
+ default_nodes_dir = os.path.join(temp_output_dir, "nodes")
136
150
 
137
151
  # 构建 processed_s
138
152
  # 为了避免重复处理节点/结构,首先构建与 var 具有相似结构的 processed_s 来记录处理处理进度。
@@ -154,13 +168,20 @@ def write(var, output_dir, settings=None, traversal_mode=Traversal_Mode.BFS, b_p
154
168
  if isinstance(setting["match_cond"], str) and setting["match_cond"].startswith("<eval>"):
155
169
  setting["match_cond"] = eval(setting["match_cond"][6:])
156
170
  assert callable(setting["match_cond"]) or isinstance(setting["match_cond"], str)
171
+ # nodes_dir = env_vars_parser(value.pop("nodes_dir")) if "nodes_dir" in value else os.path.join(input_path,
172
+ # "nodes")
173
+ # assert os.path.exists(nodes_dir), f"nodes_dir {nodes_dir} does not exist"
157
174
  # backend
158
175
  backend_name_ls = setting["backend"] if isinstance(setting["backend"], (list, tuple)) else [setting["backend"]]
176
+ nodes_dir = env_vars_parser(setting["nodes_dir"]) if "nodes_dir" in setting else default_nodes_dir
159
177
  for i in backend_name_ls:
160
178
  if i not in backend_s:
161
- backend_s[i] = SERIALIZER_BACKEND.get(name=i)(folder=os.path.join(temp_output_dir, "nodes"))
179
+ backend_s[i] = SERIALIZER_BACKEND.get(name=i)(folder=nodes_dir)
162
180
  #
163
181
  t_mode = Traversal_Mode(setting.get("traversal_mode", traversal_mode))
182
+ # snn_builder
183
+ snn_builder = Saved_Node_Name_Builder(
184
+ format_=setting["saved_node_name_format"]) if "saved_node_name_format" in setting else default_snn_builder
164
185
  # _process and paras
165
186
  if callable(setting["match_cond"]):
166
187
  if t_mode in (Traversal_Mode.DFS_PRE_ORDER, Traversal_Mode.BFS):
@@ -181,7 +202,7 @@ def write(var, output_dir, settings=None, traversal_mode=Traversal_Mode.BFS, b_p
181
202
  # print(processed_s)
182
203
  # print(f'backend: {i}')
183
204
  _process(backend=backend_s[i], strictness_level=strictness_level, processed_s=processed_s,
184
- snn_builder=snn_builder, **paras)
205
+ snn_builder=snn_builder, b_record_nodes_dir=nodes_dir != default_nodes_dir, **paras)
185
206
  if "_hook_for_debug" in kwargs:
186
207
  kwargs["_hook_for_debug"]["processed"].append([i, ndl.copy_(var=processed_s, b_deepcopy=True)])
187
208
 
@@ -246,13 +267,29 @@ def _judge_processed_or_not(processed_s, name):
246
267
  return b_processed
247
268
 
248
269
 
249
- def _process_for_level(var, processed_s, processed_s_bak, level, backend, strictness_level, snn_builder):
270
+ def _process_for_level(var, processed_s, processed_s_bak, level, backend, strictness_level, snn_builder,
271
+ b_record_nodes_dir):
250
272
  for name, _ in ndl.get_nodes(var=processed_s_bak, level=level, b_strict=True):
251
273
  _process_for_name(var=var, processed_s=processed_s, name=name, backend=backend,
252
- strictness_level=strictness_level, snn_builder=snn_builder)
274
+ strictness_level=strictness_level, snn_builder=snn_builder,
275
+ b_record_nodes_dir=b_record_nodes_dir)
253
276
 
254
277
 
255
- def _process_for_name(var, processed_s, name, backend, strictness_level, snn_builder):
278
+ def _write_by_backend(backend, snn_builder, raw_name, value, strictness_level, b_record_nodes_dir):
279
+ snn_name = snn_builder(name=raw_name, value=value)
280
+ try:
281
+ res = backend.write(name=snn_name, var=value)
282
+ except:
283
+ assert strictness_level in (Strictness_Level.IGNORE_FAILURE, Strictness_Level.COMPATIBLE), \
284
+ f'An error occurred when node {snn_name} was saved using the first matched backend {backend}'
285
+ return False, None # b_success, res
286
+ if b_record_nodes_dir and isinstance(res, (dict,)) and res is not value:
287
+ # 记录节点位置
288
+ res["nodes_dir"] = backend.paras["folder"]
289
+ return True, res
290
+
291
+
292
+ def _process_for_name(var, processed_s, name, backend, strictness_level, snn_builder, b_record_nodes_dir):
256
293
  if _judge_processed_or_not(processed_s=processed_s, name=name) is True:
257
294
  # has been processed
258
295
  return
@@ -262,18 +299,15 @@ def _process_for_name(var, processed_s, name, backend, strictness_level, snn_bui
262
299
  return
263
300
 
264
301
  # write by backend
265
- snn_name = snn_builder(name=name, value=value)
266
- try:
267
- res = backend.write(name=snn_name, var=value)
268
- except:
269
- assert strictness_level in (Strictness_Level.IGNORE_FAILURE, Strictness_Level.COMPATIBLE), \
270
- f'An error occurred when node {name} was saved using the first matched backend {backend}'
302
+ b_success, res = _write_by_backend(backend, snn_builder, name, value, strictness_level, b_record_nodes_dir)
303
+ if not b_success:
271
304
  return
272
305
  ndl.set_value(var=processed_s, name=name, value=True, b_force=False)
273
306
  ndl.set_value(var=var, name=name, value=res, b_force=False)
274
307
 
275
308
 
276
- def _process_from_top_to_down(var, processed_s, match_cond, backend, traversal_mode, strictness_level, snn_builder):
309
+ def _process_from_top_to_down(var, processed_s, match_cond, backend, traversal_mode, strictness_level, snn_builder,
310
+ b_record_nodes_dir):
277
311
  def match_cond_(parent_type, idx, value):
278
312
  nonlocal match_cond, processed_s
279
313
 
@@ -288,12 +322,8 @@ def _process_from_top_to_down(var, processed_s, match_cond, backend, traversal_m
288
322
  nonlocal processed_s, backend, strictness_level
289
323
 
290
324
  # write by backend
291
- snn_name = snn_builder(name=idx, value=value)
292
- try:
293
- res = backend.write(name=snn_name, var=value)
294
- except:
295
- assert strictness_level in (Strictness_Level.IGNORE_FAILURE, Strictness_Level.COMPATIBLE), \
296
- f'An error occurred when node {name} was saved using the first matched backend {backend}'
325
+ b_success, res = _write_by_backend(backend, snn_builder, idx, value, strictness_level, b_record_nodes_dir)
326
+ if not b_success:
297
327
  return value
298
328
  ndl.set_value(var=processed_s, name=idx, value=True, b_force=True)
299
329
  return res
@@ -302,7 +332,8 @@ def _process_from_top_to_down(var, processed_s, match_cond, backend, traversal_m
302
332
  b_use_name_as_idx=True, traversal_mode=traversal_mode, b_traverse_matched_element=False)
303
333
 
304
334
 
305
- def _process_from_down_to_top(var, processed_s, match_cond, backend, traversal_mode, strictness_level, snn_builder):
335
+ def _process_from_down_to_top(var, processed_s, match_cond, backend, traversal_mode, strictness_level, snn_builder,
336
+ b_record_nodes_dir):
306
337
  processed_s_raw, processed_s = processed_s, ndl.copy_(var=processed_s, b_deepcopy=True)
307
338
 
308
339
  def match_cond_(parent_type, idx, value):
@@ -320,12 +351,8 @@ def _process_from_down_to_top(var, processed_s, match_cond, backend, traversal_m
320
351
  nonlocal processed_s, backend, processed_s_raw, strictness_level
321
352
 
322
353
  # write by backend
323
- snn_name = snn_builder(name=idx, value=value)
324
- try:
325
- res = backend.write(name=snn_name, var=value)
326
- except:
327
- assert strictness_level in (Strictness_Level.IGNORE_FAILURE, Strictness_Level.COMPATIBLE), \
328
- f'An error occurred when node {name} was saved using the first matched backend {backend}'
354
+ b_success, res = _write_by_backend(backend, snn_builder, idx, value, strictness_level, b_record_nodes_dir)
355
+ if not b_success:
329
356
  return value
330
357
  ndl.set_value(var=processed_s, name=idx, value=True, b_force=True)
331
358
  ndl.set_value(var=processed_s_raw, name=idx, value=True, b_force=True)
@@ -1,5 +1,5 @@
1
1
  from kevin_toolbox.nested_dict_list import get_value
2
- from kevin_toolbox.nested_dict_list.name_handler import parse_name, escape_node
2
+ from kevin_toolbox.nested_dict_list.name_handler import parse_name
3
3
 
4
4
 
5
5
  def set_value(var, name, value, b_force=False):
@@ -8,7 +8,7 @@ def set_value(var, name, value, b_force=False):
8
8
 
9
9
  参数:
10
10
  var: 任意支持索引赋值的变量
11
- name: <string> 名字
11
+ name: <string/parsed_name> 名字
12
12
  名字 name 的具体介绍参见函数 name_handler.parse_name()
13
13
  假设 var=dict(acc=[0.66,0.78,0.99]),如果你想将 var["acc"][1] 设置为 100,那么可以将 name 写成:
14
14
  ":acc@1" 或者 "|acc|1" 等。
@@ -25,14 +25,18 @@ def set_value(var, name, value, b_force=False):
25
25
  若 b_force 为 True 有可能不会在 var 的基础上进行改变,而是返回一个新的ndl结构,
26
26
  因此建议使用赋值 var = ndl.set_value(var) 来避免可能的错误。
27
27
  """
28
- _, method_ls, node_ls = parse_name(name=name, b_de_escape_node=False)
28
+ if isinstance(name, (tuple, list,)):
29
+ assert len(name) == 3, f'invalid parsed name {name}'
30
+ _, method_ls, node_ls = name
31
+ else:
32
+ _, method_ls, node_ls = parse_name(name=name, b_de_escape_node=True)
29
33
  if len(node_ls) == 0:
30
34
  return value
31
35
 
32
- raw_key = escape_node(node=node_ls[-1], b_reversed=True, times=1)
36
+ raw_key = node_ls[-1]
33
37
 
34
38
  try:
35
- item = get_value(var=var, name=name[:-1 - len(node_ls[-1])])
39
+ item = get_value(var=var, name=('', method_ls[:-1], node_ls[:-1]))
36
40
  if method_ls[-1] == "@":
37
41
  key = eval(raw_key)
38
42
  elif method_ls[-1] == "|":
@@ -63,6 +67,12 @@ def set_value(var, name, value, b_force=False):
63
67
  else:
64
68
  # 其他,比如当 key 为元组、浮点数等等时,则使用 dict 构建
65
69
  value = {key: value}
66
- var = set_value(var=var, name=name[:-1 - len(node_ls[-1])], value=value, b_force=b_force)
70
+ var = set_value(var=var, name=('', method_ls[:-1], node_ls[:-1]), value=value, b_force=b_force)
67
71
 
68
72
  return var
73
+
74
+
75
+ if __name__ == "__main__":
76
+ var_ = []
77
+ set_value(var=var_, name="@2:data", value=1, b_force=True)
78
+ print(var_)
@@ -2,7 +2,7 @@ import os
2
2
  from kevin_toolbox.patches.for_os import walk
3
3
 
4
4
 
5
- def find_files_in_dir(input_dir, suffix_ls, b_relative_path=True, b_ignore_case=True):
5
+ def find_files_in_dir(input_dir, suffix_ls=None, b_relative_path=True, b_ignore_case=True):
6
6
  """
7
7
  找出目录下带有给定后缀的所有文件的生成器
8
8
  主要利用了 for_os.walk 中的过滤规则进行实现
@@ -12,19 +12,24 @@ def find_files_in_dir(input_dir, suffix_ls, b_relative_path=True, b_ignore_case=
12
12
  b_relative_path: <bool> 是否返回相对路径
13
13
  b_ignore_case: <bool> 是否忽略大小写
14
14
  """
15
- suffix_ls = tuple(set(suffix_ls))
16
- suffix_ls = tuple(map(lambda x: x.lower(), suffix_ls)) if b_ignore_case else suffix_ls
17
- for root, dirs, files in walk(top=input_dir, topdown=True,
18
- ignore_s=[{
19
- "func": lambda _, b_is_symlink, path: b_is_symlink or not (
20
- path.lower() if b_ignore_case else path).endswith(suffix_ls),
21
- "scope": ["files", ]
22
- }]):
23
- for file in files:
24
- file_path = os.path.join(root, file)
25
- if b_relative_path:
26
- file_path = os.path.relpath(file_path, start=input_dir)
27
- yield file_path
28
-
29
-
30
-
15
+ if suffix_ls is not None:
16
+ suffix_ls = tuple(set(suffix_ls))
17
+ suffix_ls = tuple(map(lambda x: x.lower(), suffix_ls)) if b_ignore_case else suffix_ls
18
+ for root, dirs, files in walk(top=input_dir, topdown=True,
19
+ ignore_s=[{
20
+ "func": lambda _, b_is_symlink, path: b_is_symlink or not (
21
+ path.lower() if b_ignore_case else path).endswith(suffix_ls),
22
+ "scope": ["files", ]
23
+ }]):
24
+ for file in files:
25
+ file_path = os.path.join(root, file)
26
+ if b_relative_path:
27
+ file_path = os.path.relpath(file_path, start=input_dir)
28
+ yield file_path
29
+ else:
30
+ for root, dirs, files in walk(top=input_dir, topdown=True):
31
+ for file in files:
32
+ file_path = os.path.join(root, file)
33
+ if b_relative_path:
34
+ file_path = os.path.relpath(file_path, start=input_dir)
35
+ yield file_path
@@ -0,0 +1 @@
1
+ from .group_files_by_timestamp import group_files_by_timestamp
@@ -0,0 +1,90 @@
1
+ import os
2
+ from collections import defaultdict
3
+ import warnings
4
+ import time
5
+ from kevin_toolbox.patches.for_os import find_files_in_dir, copy, remove
6
+
7
+ # 获取文件的时间,返回time.struct_time格式
8
+ get_timestamp_method_s = dict(
9
+ c=lambda file_path: time.localtime(max(os.path.getctime(file_path), 0)),
10
+ a=lambda file_path: time.localtime(max(os.path.getatime(file_path), 0)),
11
+ m=lambda file_path: time.localtime(max(os.path.getmtime(file_path), 0))
12
+ )
13
+
14
+
15
+ def group_files_by_timestamp(input_dir, output_dir=None, suffix_ls=None, b_ignore_case=True,
16
+ grouping_rule=("%Y", "%m_%d"), timestamp_type="m", b_keep_source=True, b_verbose=False):
17
+ """
18
+ 将 input_dir 中的文件按照时间戳信息进行分组,输出到 output_dir 中
19
+
20
+ 参数:
21
+ input_dir: 输入目录
22
+ output_dir: 输出目录
23
+ 当设置为 None 时,将进行空跑,不实际复制文件到 output_dir 中
24
+ suffix_ls: <list of path/None> 指定要对带有哪些后缀的文件进行处理
25
+ 默认为 None 表示对所有文件都进行处理
26
+ b_ignore_case: <boolean> 忽略大小写
27
+ 默认为 True
28
+ grouping_rule: <str/list of str>分组规则
29
+ 默认为 ("%Y", "%m_%d"),此时将按照年月日进行分组。
30
+ 比如时间戳为 2016-03-20 11:45:39 的文件将被保存到 <output_dir>/2016/03_20 目录下
31
+ 其他可选样式:
32
+ - "%Y_%m" 精确到月
33
+ - ("%Y", "%m_%d", "%H-%M-%S") 精确到秒
34
+ 依次类推
35
+ timestamp_type: 使用哪个维度的时间戳
36
+ 有以下可选值:
37
+ - "m" 文件的修改时间
38
+ - "a" 文件的最近访问时间
39
+ - "c" 文件的创建时间
40
+ 默认为 "m"。
41
+ b_keep_source: <boolean> 是否保留 input_dir 中的原始文件
42
+ """
43
+ if isinstance(grouping_rule, str):
44
+ grouping_rule = [grouping_rule]
45
+ assert timestamp_type in ['m', 'a', 'c']
46
+ global get_timestamp_method_s
47
+ os.makedirs(output_dir, exist_ok=True)
48
+
49
+ get_timestamp = get_timestamp_method_s[timestamp_type]
50
+ res_s = defaultdict(lambda: dict(src_ls=[], dst_ls=[], b_success_ls=[]))
51
+ for file in find_files_in_dir(input_dir=input_dir, suffix_ls=suffix_ls, b_ignore_case=b_ignore_case,
52
+ b_relative_path=True):
53
+ src = os.path.join(input_dir, file)
54
+ timestamp = get_timestamp(src)
55
+ group_name = tuple(f'{time.strftime(i, timestamp)}' for i in grouping_rule)
56
+ out_folder = os.path.join(output_dir, *group_name)
57
+ dst = os.path.join(out_folder, os.path.basename(file))
58
+ os.makedirs(out_folder, exist_ok=True)
59
+ b_success = False
60
+ try:
61
+ copy(src=src, dst=dst)
62
+ if b_verbose:
63
+ print(f'{file} -> {out_folder}')
64
+ b_success = True
65
+ except:
66
+ warnings.warn(f'failed to copy file {file} to {out_folder}')
67
+ if not b_keep_source:
68
+ remove(path=dst, ignore_errors=True)
69
+ res_s[group_name]['b_success_ls'].append(b_success)
70
+ res_s[group_name]['src_ls'].append(file)
71
+ res_s[group_name]['dst_ls'].append(os.path.relpath(path=dst, start=output_dir))
72
+ return res_s
73
+
74
+
75
+ if __name__ == "__main__":
76
+ res = group_files_by_timestamp(suffix_ls=['.jpg', '.mp4', '.png', '.jpeg', '.mov', '.cr2', ".bmp"],
77
+ grouping_rule=("%Y-%m", "%Y_%m_%d"),
78
+ # "%Y-%m-%d %H:%M:%S" 2016-03-20 11:45:39 #"%a %b" Sat Mar
79
+ input_dir="/home/SENSETIME/xukaiming/Desktop/my_repos/python_projects/kevin_toolbox/kevin_toolbox/developing/photo_organization/test/test_data",
80
+ output_dir="/home/SENSETIME/xukaiming/Desktop/my_repos/python_projects/kevin_toolbox/kevin_toolbox/developing/photo_organization/test/test_data1",
81
+ timestamp_type="m")
82
+ print(res)
83
+
84
+ # timestamp_type = input("分类标准:m for modifytime \n c for createtime\n a for accesstime\n")
85
+ # copy_path_root = os.path.join(output_dir, \
86
+ # 'deal' + str(time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime())))
87
+ # group_by_timestamp(suffix_ls=['.MP4', '.jpg', '.mp4', '.png', '.JPG', '.MOV', '.CR2'],
88
+ # grouping_rule="%Y_%m_%d", # "%Y-%m-%d %H:%M:%S" 2016-03-20 11:45:39 #"%a %b" Sat Mar
89
+ # input_dir=input("please input the root path\n"),
90
+ # output_dir=input("please input the target_path\n"), timestamp_type=timestamp_type)
File without changes
@@ -0,0 +1,6 @@
1
+ import os
2
+
3
+ root_dir = os.path.split(os.path.split(os.path.split(__file__)[0])[0])[0]
4
+
5
+ if __name__ == '__main__':
6
+ print(root_dir)
@@ -0,0 +1,76 @@
1
+ Metadata-Version: 2.1
2
+ Name: kevin-toolbox-dev
3
+ Version: 1.4.6
4
+ Summary: 一个常用的工具代码包集合
5
+ Home-page: https://github.com/cantbeblank96/kevin_toolbox
6
+ Download-URL: https://github.com/username/your-package/archive/refs/tags/v1.0.0.tar.gz
7
+ Author: kevin hsu
8
+ Author-email: xukaiming1996@163.com
9
+ License: MIT
10
+ Keywords: mathematics,pytorch,numpy,machine-learning,algorithm
11
+ Platform: UNKNOWN
12
+ Classifier: License :: OSI Approved :: MIT License
13
+ Classifier: Programming Language :: Python
14
+ Classifier: Programming Language :: Python :: 3
15
+ Requires-Python: >=3.6
16
+ Description-Content-Type: text/markdown
17
+ Requires-Dist: torch (>=1.2.0)
18
+ Requires-Dist: numpy (>=1.19.0)
19
+ Provides-Extra: plot
20
+ Requires-Dist: matplotlib (>=3.0) ; extra == 'plot'
21
+ Provides-Extra: rest
22
+ Requires-Dist: pytest (>=6.2.5) ; extra == 'rest'
23
+ Requires-Dist: line-profiler (>=3.5) ; extra == 'rest'
24
+
25
+ # kevin_toolbox
26
+
27
+ 一个通用的工具代码包集合
28
+
29
+
30
+
31
+ 环境要求
32
+
33
+ ```shell
34
+ numpy>=1.19
35
+ pytorch>=1.2
36
+ ```
37
+
38
+ 安装方法:
39
+
40
+ ```shell
41
+ pip install kevin-toolbox --no-dependencies
42
+ ```
43
+
44
+
45
+
46
+ [项目地址 Repo](https://github.com/cantbeblank96/kevin_toolbox)
47
+
48
+ [使用指南 User_Guide](./notes/User_Guide.md)
49
+
50
+ [免责声明 Disclaimer](./notes/Disclaimer.md)
51
+
52
+ [版本更新记录](./notes/Release_Record.md):
53
+
54
+ - v 1.4.6 (2025-01-24)【new feature】
55
+
56
+ - data_flow.file
57
+ - modify json_.read(),支持输入路径使用 ~ 表示家目录。
58
+ - 【new feature】add excel,该模块用于 excel 表格处理。
59
+ - write_with_matrix():将矩阵写入到 excel 文件中
60
+
61
+ - patches.for_os
62
+ - modify find_files_in_dir(),支持 suffix_ls 设定 None 以表示不进行任何过滤。
63
+ - 【new feature】add organize,该模块用于文件整理。
64
+ - group_files_by_timestamp():将 input_dir 中的文件按照时间戳信息进行分组,输出到 output_dir 中。
65
+ - 添加了对应的测试用例。
66
+ - env_info
67
+ - 【new feature】add check_validity_and_uninstall(),检查当前机器时间是否超过 expiration_timestamp 指定的有效期,若超过则卸载 package_name 对应的库。
68
+ - 【new feature】add check_version_and_update(),检查当前库的版本,并尝试更新。
69
+ - 以上函数均系从同名脚本中抽取出来。
70
+ - 以上修改,均已添加了对应的测试用例。
71
+ - developing
72
+ - 【new feature】add photo_album_organization,该模块包含一系列整理相册相关的脚本。
73
+ - 0_group_by_timestamp.py :按照时间戳分组
74
+ - 1_merge_folders.py :将文件数量较少的目录合并
75
+
76
+
@@ -1,8 +1,8 @@
1
- kevin_toolbox/__init__.py,sha256=xs_PW9P4AhMhyKcprj8m2mOBXfhKslv-B4OCvUYBmmw,410
1
+ kevin_toolbox/__init__.py,sha256=J3exMbMRFMTDP3RxpdeO_mXW0RvxbLdqQ_D0yxWUDmE,410
2
2
  kevin_toolbox/computer_science/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
3
  kevin_toolbox/computer_science/algorithm/__init__.py,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
4
4
  kevin_toolbox/computer_science/algorithm/cache_manager/__init__.py,sha256=p2hddkZ1HfYF9-m2Hx-o9IotwQHd4QwDCePy2ADpTDA,41
5
- kevin_toolbox/computer_science/algorithm/cache_manager/cache_manager.py,sha256=e_mesTHjYlSV2VjXfmrA_osJRZhGe3nyS5SX1R5rfOQ,9151
5
+ kevin_toolbox/computer_science/algorithm/cache_manager/cache_manager.py,sha256=yQ6bqOSPcTrq4FLyuxDERz7Yv9ZWsenjb48q6KRStj4,9771
6
6
  kevin_toolbox/computer_science/algorithm/cache_manager/variable.py,sha256=j6yLezcZxf7gNdxwUxP0PPy2zauVpTr6KFMCg66m9lo,835
7
7
  kevin_toolbox/computer_science/algorithm/cache_manager/cache/__init__.py,sha256=Tt8XgrZsHP3J3f6UucGNhGAN8L7HiVjLLO1JzICR2Mc,70
8
8
  kevin_toolbox/computer_science/algorithm/cache_manager/cache/cache_base.py,sha256=Jln4Ey0sUGrzUqpkHaMP0S_z6NUw43iLnwMbmm-8msg,2229
@@ -72,9 +72,11 @@ kevin_toolbox/data_flow/core/reader/file_iterative_reader.py,sha256=l6UMYnvWwqQm
72
72
  kevin_toolbox/data_flow/core/reader/unified_reader.py,sha256=l6JxPoDUOdx2ZIPX2WLXbGU3VZtTd1AeHn5q6L8GWAI,2453
73
73
  kevin_toolbox/data_flow/core/reader/unified_reader_base.py,sha256=4gIADdV8UKpt2yD8dZjQsXFcF75nJ83ooIae3D7bw2s,11783
74
74
  kevin_toolbox/data_flow/file/__init__.py,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
75
+ kevin_toolbox/data_flow/file/excel/__init__.py,sha256=5m_rmklI6n6yk4rSAEW39pxzYTiAPjC4q69v-oz7Zvs,82
76
+ kevin_toolbox/data_flow/file/excel/write_excel_with_matrix.py,sha256=zrY_l0xCBpjqxm_9MoGpEXaZ4V_UwRMRgShessJ1sxA,5121
75
77
  kevin_toolbox/data_flow/file/json_/__init__.py,sha256=VAt8COS2tO3PJRuhSc43i35fEOlArFM_YahdTmEBaHE,85
76
- kevin_toolbox/data_flow/file/json_/read_json.py,sha256=BhAUCcagwPsSMaMeCJyyxDW3h9SGf1Dfvb0nXi6B_T8,2084
77
- kevin_toolbox/data_flow/file/json_/write_json.py,sha256=mWaxePr_QzfyeCb0hAy4xTKOGX7q0eFjep0jDqOqIgw,2379
78
+ kevin_toolbox/data_flow/file/json_/read_json.py,sha256=RyCeNONMmvVOeX_F3kSSmED_nx4opipLe8OHJzXKZvQ,2151
79
+ kevin_toolbox/data_flow/file/json_/write_json.py,sha256=uG6UnQ9KVhL_UWndGjvLLHF_UoGtOwVn4ADi1Gb1nRU,2417
78
80
  kevin_toolbox/data_flow/file/json_/converter/__init__.py,sha256=oQMgAgzELLq_f4LIIfz5E6l_E7g4lFsXqfmnJ3tPZTY,401
79
81
  kevin_toolbox/data_flow/file/json_/converter/convert_dict_key_to_number.py,sha256=SuSZj_HCqKZutHAJ5AttABnGBRZplPGQhMxJBt2Wlgc,559
80
82
  kevin_toolbox/data_flow/file/json_/converter/convert_ndarray_to_list.py,sha256=GALpC1MFJ4aMzs0FZIfJScYznfCP-gmhPeM8sWXGSWg,391
@@ -136,11 +138,15 @@ kevin_toolbox/developing/temperate/iterator_base.py,sha256=FYMJ49ltbBcV92tUThx8Z
136
138
  kevin_toolbox/developing/temperate/my_iterator.py,sha256=dpFq_3wa4W2Y4SZfVl93T8oAEoy3gDOkQUdXB4p-cBQ,1663
137
139
  kevin_toolbox/developing/temperate/my_iterator_base.py,sha256=gLv9zdM987BHRghTfADUTwwQcMOV4WZWx-8-QvlV2uY,1214
138
140
  kevin_toolbox/developing/temperate/sequence_map_base.py,sha256=ha1EIMhn9lBF05s9niHLTuxhRslOx5faOk8UIjhhxUk,217
139
- kevin_toolbox/env_info/__init__.py,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
140
- kevin_toolbox/env_info/check_validity_and_uninstall.py,sha256=GskNfWwj2ak2AszV_0MMs2hBZb2tmRD0POO11UiouLM,879
141
- kevin_toolbox/env_info/check_version_and_update.py,sha256=og9ngoO6VhnlmUIkL0IZCyGXrI8rpZWRoN4t9FYGIkw,1953
141
+ kevin_toolbox/env_info/__init__.py,sha256=8Io5RN5RcbEoMLHY4wfMa4pJxa1w0SMaXBN4v6k5CrM,134
142
+ kevin_toolbox/env_info/check_validity_and_uninstall.py,sha256=FOLeVKRqqiFnAQpx_AmIc8D3UcJigzIx58XDTJgv_qw,1676
143
+ kevin_toolbox/env_info/check_version_and_update.py,sha256=GsL5tqodh7bbcXmWRT1vWy9GFRhqHJmTgEgvJg4GT5E,2739
142
144
  kevin_toolbox/env_info/test/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
145
+ kevin_toolbox/env_info/test/test_check_.py,sha256=wiIM_UVy_ksdq3la-pbF605Lhl5iW3a-0S_B74QzPT8,2054
146
+ kevin_toolbox/env_info/test/test_variable_.py,sha256=n9To8UNfBSNey8Xy7relXcbrf0yX8ZoZzfJctd2fHBs,1657
143
147
  kevin_toolbox/env_info/test/test_version.py,sha256=xnF7RAcLSN3gpjIbVxFUV2-lmv0w7gOhdRa4XN0z0Q0,1728
148
+ kevin_toolbox/env_info/variable_/__init__.py,sha256=qFs8ZZVBjAfj6IuUujYxaEnmXk6HEbtN6GXaIkuQhoM,81
149
+ kevin_toolbox/env_info/variable_/env_vars_parser.py,sha256=8k6nBCD9oFsFu_Bq6usz3BF5NHr03c7SxFgbDj9EpP8,3781
144
150
  kevin_toolbox/env_info/version/__init__.py,sha256=PSrrvrYccfcE36IkOWG5kLQlKopfenQJ-4xilCdRULY,187
145
151
  kevin_toolbox/env_info/version/compare_version.py,sha256=rAksAR1OuOE5TrfJx3h5w7w5vftpcv_oJPHWGwuX7TI,2489
146
152
  kevin_toolbox/env_info/version/parse_version.py,sha256=QhYVO9hLZ8o4wdXWg5PBr0WIu5VayR-QFKQ_KyLDLgI,860
@@ -235,9 +241,9 @@ kevin_toolbox/nested_dict_list/copy_.py,sha256=MvzNRKm8htYpMe7Td1ao2-ZoaYVC_iNTG
235
241
  kevin_toolbox/nested_dict_list/count_leaf_node_nums.py,sha256=l67u47EvO1inoGinUqH6RZ7cHXwN0VcBQPUvSheqAvA,614
236
242
  kevin_toolbox/nested_dict_list/get_hash.py,sha256=Ygadnn5dnvIeE-9t39p2EwNKNRLzomL37ZsRD5daXxo,1286
237
243
  kevin_toolbox/nested_dict_list/get_nodes.py,sha256=doEcLPYOig4gGloGXEPlroXFcRWe5ovuH0RozsxYZ0U,3748
238
- kevin_toolbox/nested_dict_list/get_value.py,sha256=isvUhqSQyUNHBXgNuZX6_o2c84UV_SpjNjAYm2M3gd4,2083
244
+ kevin_toolbox/nested_dict_list/get_value.py,sha256=IiAqQCphyv-pAZWuQRWm0anEwxYQOkC9CttY5ZlUbSs,2389
239
245
  kevin_toolbox/nested_dict_list/set_default.py,sha256=laSgGP1CbApNgFB9HZGCtxCG9fe7u1C-YOx9ZCoHJms,3460
240
- kevin_toolbox/nested_dict_list/set_value.py,sha256=pmSWzC0y0jBxk7yritsjKU2Q-PPMar0X3A9bF6uWvoQ,3470
246
+ kevin_toolbox/nested_dict_list/set_value.py,sha256=AQ4foDtKo4JxyR---of-VSxjhRWfqkv6TrnQ4EoRo3M,3711
241
247
  kevin_toolbox/nested_dict_list/traverse.py,sha256=5_EirnYVy34JLfXxuTvb-mMjDeO1veyfLOcaVYcuGF8,6846
242
248
  kevin_toolbox/nested_dict_list/name_handler/__init__.py,sha256=P_pWq78oN6NdvWg2h6AduW_sUqbeaaVyoWWbW9kbgmU,107
243
249
  kevin_toolbox/nested_dict_list/name_handler/build_name.py,sha256=VPWyjE8i8l-4Zm4tkD06Ie4J2NCsmI32ecOxZQqqmok,989
@@ -245,10 +251,10 @@ kevin_toolbox/nested_dict_list/name_handler/escape_node.py,sha256=niT9MxmsyrSZYh
245
251
  kevin_toolbox/nested_dict_list/name_handler/parse_name.py,sha256=vUlAXPocpVSxtb3EnRi7U5K40Tz9plFG-_sbwLfYiy4,2280
246
252
  kevin_toolbox/nested_dict_list/serializer/__init__.py,sha256=79dd9l-mNz0bycFKjNm7YsfWPR-JsVx9NoG_Ofqy-HQ,153
247
253
  kevin_toolbox/nested_dict_list/serializer/enum_variable.py,sha256=RWPydtXI4adOJYGo_k5CWHSL0Odzj_bsahb24p1ranY,847
248
- kevin_toolbox/nested_dict_list/serializer/read.py,sha256=BjsEWYoyvEHgRKKVKw0suf1ukug2tAFLMCAmEnndqgg,2945
254
+ kevin_toolbox/nested_dict_list/serializer/read.py,sha256=HaEJJw7hBVNmsIs348kaIyatHP77Kr-JHEwYqRwLrso,3202
249
255
  kevin_toolbox/nested_dict_list/serializer/saved_node_name_builder.py,sha256=qsD-rmDmVaKZP4owN3Wm3QY2Ksi71XlYETqw4VmIsSU,1011
250
256
  kevin_toolbox/nested_dict_list/serializer/variable.py,sha256=ZywG6obipRBCGY1cY42gdvsuWk8GLZXr6eCYcW7ZJ9c,392
251
- kevin_toolbox/nested_dict_list/serializer/write.py,sha256=MCGdAMxZJuQc5OD_wD50zs2AKurtA5-d3l0fOi_ikHw,22169
257
+ kevin_toolbox/nested_dict_list/serializer/write.py,sha256=ZUYJlBXQbCkMW2UN3d29obskGGbTA-gm3dmuLLltxLI,24101
252
258
  kevin_toolbox/nested_dict_list/serializer/backends/__init__.py,sha256=8g7y-L3cmctxao616dVkGiot00FJzKNmNl_69V2bSmE,39
253
259
  kevin_toolbox/nested_dict_list/serializer/backends/_json_.py,sha256=oJXIc28yjxsD9ZJuw120pVHTVsTzCdaXEhVUSQeydq4,2145
254
260
  kevin_toolbox/nested_dict_list/serializer/backends/_ndl.py,sha256=3YkAq_Bqzehnw0kGxqxwtF6uUz0EV37tLI-1ROHjixY,1794
@@ -314,11 +320,13 @@ kevin_toolbox/patches/for_optuna/serialize/for_trial/dump.py,sha256=FT-Z1rzCNUYz
314
320
  kevin_toolbox/patches/for_optuna/serialize/for_trial/load.py,sha256=2fpeeHPKA9bT7CjQ6DVRXOarF6IAA6_f2pXbB1rXcvE,796
315
321
  kevin_toolbox/patches/for_os/__init__.py,sha256=OhGxHkzI-oBek6M07kkrRgTQfY42l1Y2nOIR95JYD-g,219
316
322
  kevin_toolbox/patches/for_os/copy.py,sha256=PWFLu15DpIA4JZxatvphHANNn2H3nC93qTbLLxDl5NU,1509
317
- kevin_toolbox/patches/for_os/find_files_in_dir.py,sha256=KtjUJTMTNivAoWXGYgVkuaXukU1ktEAA-u8peusznmU,1266
323
+ kevin_toolbox/patches/for_os/find_files_in_dir.py,sha256=89bV4g-1-GBHwRr1K1KjJk7UvYM4LCvQmPVCVTZkX-g,1656
318
324
  kevin_toolbox/patches/for_os/pack.py,sha256=A6u4g3dfwXPtlU4gBcThNrktz6dO4DVi2wmQXytqfDI,656
319
325
  kevin_toolbox/patches/for_os/remove.py,sha256=PmwqzVJbyfdqwXn_T1F9d4Oar8CwQ2YFaqcZQkfnrnI,750
320
326
  kevin_toolbox/patches/for_os/unpack.py,sha256=d_fO7nPmExy1VIg7ADIMayCzjBBeFxvJLhIsulIRlzI,1047
321
327
  kevin_toolbox/patches/for_os/walk.py,sha256=LrtEeRUDwzZgu_zGZ-kPsFJd4D-8R8ECHW6WNdEsDSw,8376
328
+ kevin_toolbox/patches/for_os/organize/__init__.py,sha256=GGSUvdHzYK9prCwz4XCm1lp4g4A_o9tuU_l3bNi5MMs,63
329
+ kevin_toolbox/patches/for_os/organize/group_files_by_timestamp.py,sha256=CpGKMJ7GIywTgumo3TyM_SXgaeh0Gtl9xIkm6-YKJE4,5222
322
330
  kevin_toolbox/patches/for_os/path/__init__.py,sha256=M4XaYawTDj-SjwZ_bWS5D38lqzPujxvAtVEvzRLDhtU,108
323
331
  kevin_toolbox/patches/for_os/path/find_illegal_chars.py,sha256=QmqzeaeBY50of28qtvfEmnDW9xeVIfCXi6QVzLzngks,1416
324
332
  kevin_toolbox/patches/for_os/path/replace_illegal_chars.py,sha256=OhxndHEJ8xK-ip-sWYQehTNSho8eNFeKj2iwPHR02os,1672
@@ -344,7 +352,9 @@ kevin_toolbox/patches/for_torch/math/get_y_at_x.py,sha256=bfoVcasZ_tMdhR_1Me0Jli
344
352
  kevin_toolbox/patches/for_torch/math/my_around.py,sha256=ptpU3ids50gwf663EpHbw7raj9tNrDGBFZ5t_uMNH14,1378
345
353
  kevin_toolbox/patches/for_torch/nn/__init__.py,sha256=aJs3RMqRzQmd8KKDmQW9FxwCqS5yfPqEdg-m0PwlQro,39
346
354
  kevin_toolbox/patches/for_torch/nn/lambda_layer.py,sha256=KUuLiX_Dr4bvRmpAaCW5QTDWDcnMPRnw0jg4NNXTFhM,223
347
- kevin_toolbox_dev-1.4.4.dist-info/METADATA,sha256=klDj2qFLTgPXKcBByDV1Wb5OJbJuj5H995C_3LbrlSk,1504
348
- kevin_toolbox_dev-1.4.4.dist-info/WHEEL,sha256=G16H4A3IeoQmnOrYV4ueZGKSjhipXx8zc8nu9FGlvMA,92
349
- kevin_toolbox_dev-1.4.4.dist-info/top_level.txt,sha256=S5TeRGF-PwlhsaUEPTI-f2vWrpLmh3axpyI6v-Fi75o,14
350
- kevin_toolbox_dev-1.4.4.dist-info/RECORD,,
355
+ kevin_toolbox/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
356
+ kevin_toolbox/utils/variable.py,sha256=PxUmp9w4CKKcKHjgdVNF_Iaw5gwPPOd4aY_Oe5F9U1M,133
357
+ kevin_toolbox_dev-1.4.6.dist-info/METADATA,sha256=G-yjDl5MP56GAhXAoPnGcqJPGqpuUiSt4X4B90dsP7c,2578
358
+ kevin_toolbox_dev-1.4.6.dist-info/WHEEL,sha256=G16H4A3IeoQmnOrYV4ueZGKSjhipXx8zc8nu9FGlvMA,92
359
+ kevin_toolbox_dev-1.4.6.dist-info/top_level.txt,sha256=S5TeRGF-PwlhsaUEPTI-f2vWrpLmh3axpyI6v-Fi75o,14
360
+ kevin_toolbox_dev-1.4.6.dist-info/RECORD,,
@@ -1,58 +0,0 @@
1
- Metadata-Version: 2.1
2
- Name: kevin-toolbox-dev
3
- Version: 1.4.4
4
- Summary: 一个常用的工具代码包集合
5
- Home-page: https://github.com/cantbeblank96/kevin_toolbox
6
- Download-URL: https://github.com/username/your-package/archive/refs/tags/v1.0.0.tar.gz
7
- Author: kevin hsu
8
- Author-email: xukaiming1996@163.com
9
- License: MIT
10
- Keywords: mathematics,pytorch,numpy,machine-learning,algorithm
11
- Platform: UNKNOWN
12
- Classifier: License :: OSI Approved :: MIT License
13
- Classifier: Programming Language :: Python
14
- Classifier: Programming Language :: Python :: 3
15
- Requires-Python: >=3.6
16
- Description-Content-Type: text/markdown
17
- Requires-Dist: torch (>=1.2.0)
18
- Requires-Dist: numpy (>=1.19.0)
19
- Provides-Extra: plot
20
- Requires-Dist: matplotlib (>=3.0) ; extra == 'plot'
21
- Provides-Extra: rest
22
- Requires-Dist: pytest (>=6.2.5) ; extra == 'rest'
23
- Requires-Dist: line-profiler (>=3.5) ; extra == 'rest'
24
-
25
- # kevin_toolbox
26
-
27
- 一个通用的工具代码包集合
28
-
29
-
30
-
31
- 环境要求
32
-
33
- ```shell
34
- numpy>=1.19
35
- pytorch>=1.2
36
- ```
37
-
38
- 安装方法:
39
-
40
- ```shell
41
- pip install kevin-toolbox --no-dependencies
42
- ```
43
-
44
-
45
-
46
- [项目地址 Repo](https://github.com/cantbeblank96/kevin_toolbox)
47
-
48
- [使用指南 User_Guide](./notes/User_Guide.md)
49
-
50
- [免责声明 Disclaimer](./notes/Disclaimer.md)
51
-
52
- [版本更新记录](./notes/Release_Record.md):
53
-
54
- - v 1.4.4 (2024-12-15)【bug fix】
55
- - nested_dict_list.serializer
56
- - fix bug in write() line 229,将判断目标是否存在时使用的 os.path.isfile 改为 os.path.exists 以支持目标是文件夹的情况。
57
-
58
-