addftool 0.2.8__py3-none-any.whl → 0.2.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
addftool/blob.py CHANGED
@@ -76,6 +76,14 @@ def install_main(args):
76
76
  # if has root permission, run install script
77
77
  # else, print install script
78
78
 
79
+ # make sure wget is installed
80
+ if not check_package_installed("wget"):
81
+ print("wget is not installed, installing wget")
82
+ command = "apt-get install wget -y"
83
+ if args.sudo:
84
+ command = "sudo " + command
85
+ execute_command(command, script_writer)
86
+
79
87
  print("Get ubuntu version: ", ubuntu_version)
80
88
  command = f"wget https://packages.microsoft.com/config/ubuntu/{ubuntu_version}/packages-microsoft-prod.deb -O /tmp/packages-microsoft-prod.deb"
81
89
  print("Install packages-microsoft-prod.deb")
addftool/sleep.py CHANGED
@@ -2,6 +2,7 @@ import time
2
2
  import subprocess
3
3
  import sys
4
4
  import multiprocessing as mp
5
+ import re
5
6
 
6
7
  try:
7
8
  import torch
@@ -9,9 +10,61 @@ except ImportError:
9
10
  print("PyTorch is not installed. Please install it to run this script.")
10
11
  sys.exit(1)
11
12
 
13
+ try:
14
+ import triton
15
+ import triton.runtime.driver
16
+ except ImportError:
17
+ print("Triton is not installed. Will try to detect GPU type using command line tools.")
18
+ triton = None
19
+
20
+ def is_cuda():
21
+ """使用 triton 检测是否是 CUDA 环境"""
22
+ try:
23
+ if triton is None:
24
+ return None
25
+ return triton.runtime.driver.active.get_current_target().backend == "cuda"
26
+ except:
27
+ return None
28
+
29
+ def get_gpu_type():
30
+ """检测GPU类型(NVIDIA/CUDA或AMD/ROCm)"""
31
+ # 首先尝试使用 triton 检测
32
+ cuda_detected = is_cuda()
33
+ if cuda_detected is True:
34
+ return "nvidia"
35
+ elif cuda_detected is False:
36
+ return "amd"
37
+
38
+ # 如果 triton 检测失败,回退到命令行检测
39
+ try:
40
+ # 尝试检测NVIDIA GPU
41
+ result = subprocess.run("nvidia-smi", shell=True, capture_output=True, text=True)
42
+ if result.returncode == 0:
43
+ return "nvidia"
44
+
45
+ # 尝试检测AMD GPU
46
+ result = subprocess.run("rocm-smi", shell=True, capture_output=True, text=True)
47
+ if result.returncode == 0:
48
+ return "amd"
49
+
50
+ return None
51
+ except:
52
+ return None
12
53
 
13
54
  def get_gpu_stats(device_id):
14
- """获取指定GPU的利用率和显存使用情况"""
55
+ """获取指定GPU的利用率和显存使用情况(支持NVIDIA和AMD)"""
56
+ gpu_type = get_gpu_type()
57
+
58
+ if gpu_type == "nvidia":
59
+ return get_nvidia_gpu_stats(device_id)
60
+ elif gpu_type == "amd":
61
+ return get_amd_gpu_stats(device_id)
62
+ else:
63
+ print("No supported GPU found (neither NVIDIA nor AMD)")
64
+ return None, None
65
+
66
+ def get_nvidia_gpu_stats(device_id):
67
+ """获取NVIDIA GPU的统计信息"""
15
68
  try:
16
69
  cmd = f"nvidia-smi --id={device_id} --query-gpu=utilization.gpu,memory.used --format=csv,noheader,nounits"
17
70
  result = subprocess.run(cmd, shell=True, capture_output=True, text=True)
@@ -31,9 +84,75 @@ def get_gpu_stats(device_id):
31
84
 
32
85
  return None, None
33
86
  except Exception as e:
34
- print(f"Error getting GPU stats for device {device_id}: {e}")
87
+ print(f"Error getting NVIDIA GPU stats for device {device_id}: {e}")
35
88
  return None, None
36
89
 
90
+ def get_amd_gpu_stats(device_id):
91
+ """获取AMD GPU的统计信息"""
92
+ try:
93
+ # 获取GPU利用率和显存使用情况
94
+ cmd = f"rocm-smi -d {device_id} --showuse --showmemuse"
95
+ result = subprocess.run(cmd, shell=True, capture_output=True, text=True)
96
+
97
+ if result.returncode != 0:
98
+ # 尝试备用命令
99
+ cmd = f"rocm-smi -d {device_id}"
100
+ result = subprocess.run(cmd, shell=True, capture_output=True, text=True)
101
+ if result.returncode != 0:
102
+ print(f"Error running rocm-smi for GPU {device_id}")
103
+ return None, None
104
+
105
+ gpu_util = None
106
+ memory_used = None
107
+
108
+ # 解析输出
109
+ output = result.stdout
110
+ lines = output.split('\n')
111
+
112
+ for line in lines:
113
+ # 查找GPU利用率
114
+ if 'GPU use' in line or '%' in line:
115
+ # 匹配百分比
116
+ match = re.search(r'(\d+)%', line)
117
+ if match:
118
+ gpu_util = int(match.group(1))
119
+
120
+ # 查找显存使用(MB)
121
+ if 'vram' in line.lower() or 'memory' in line.lower() or 'MB' in line:
122
+ # 匹配MB数值,格式可能是 "1024 MB" 或 "1024MB"
123
+ match = re.search(r'(\d+)\s*MB', line, re.IGNORECASE)
124
+ if match:
125
+ memory_used = int(match.group(1))
126
+
127
+ # 如果仍然无法获取利用率,设置为0(假设空闲)
128
+ if gpu_util is None:
129
+ gpu_util = 0
130
+
131
+ # 如果仍然无法获取内存使用,设置为0
132
+ if memory_used is None:
133
+ memory_used = 0
134
+
135
+ return gpu_util, memory_used
136
+
137
+ except Exception as e:
138
+ print(f"Error getting AMD GPU stats for device {device_id}: {e}")
139
+ return None, None
140
+
141
+ # 在程序启动时检测GPU类型
142
+ try:
143
+ GPU_TYPE = get_gpu_type()
144
+ if GPU_TYPE:
145
+ cuda_status = is_cuda()
146
+ if cuda_status is not None:
147
+ print(f"Detected {GPU_TYPE.upper()} GPU environment (triton backend: {'cuda' if cuda_status else 'hip'})")
148
+ else:
149
+ print(f"Detected {GPU_TYPE.upper()} GPU environment")
150
+ else:
151
+ print("No supported GPU environment detected")
152
+ except:
153
+ GPU_TYPE = None
154
+ print("Failed to detect GPU environment")
155
+
37
156
  def check_gpu_occupied(device_id, util_threshold=20, memory_threshold=2048):
38
157
  """检查GPU是否被其他进程占用
39
158
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: addftool
3
- Version: 0.2.8
3
+ Version: 0.2.9
4
4
  Requires-Dist: cryptography
5
5
  Requires-Dist: requests
6
6
  Requires-Dist: PyYAML
@@ -1,8 +1,8 @@
1
1
  addftool/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
2
  addftool/addf_portal.py,sha256=vc8opPzValNFPwJne5C5LbZvgcJ0eMBJSWDSiM23OPM,1274
3
- addftool/blob.py,sha256=y1HZaDBUNeXicVytvwpRXwufvvrgxR33ruBlYpxnSa4,9453
3
+ addftool/blob.py,sha256=vyjJHlQZuGrpEiaRF-Bdmow_TMnhXtXEGL31qA5Hb-g,9742
4
4
  addftool/broadcast_folder.py,sha256=GQBuSL8Ch537V_fSBHesWyqT3KRYry68pbYOKy2bDj4,19619
5
- addftool/sleep.py,sha256=FA1fTUI47eQq-9nBtXElkS7SZMunP_5tLiIBuFNSM6w,7823
5
+ addftool/sleep.py,sha256=Y6gAJb2Ho4qvbd52-UBmwt7Rgv2HpJAcBnWO23asaY8,11787
6
6
  addftool/sync.py,sha256=ZpYxbM8uiPFrV7ODmOaM7asVPCWaxBixA-arVc-1kfs,14045
7
7
  addftool/tool.py,sha256=FmxRY3-pP0_Z0zCUAngjmEMmPUruMftg_iUlB1t2TnQ,2001
8
8
  addftool/util.py,sha256=zlNLu8Be8cGIpNRqBw8_0q7nFxWlsJ9cToN62ohjdXE,2335
@@ -13,8 +13,8 @@ addftool/deploy/vscode_server.py,sha256=tLtSvlcK2fEOaw6udWt8dNELVhwv9F59hF5DJJ-1
13
13
  addftool/process/__init__.py,sha256=Dze8OrcyjQlAbPrjE_h8bMi8W4b3OJyZOjTucPrkJvM,3721
14
14
  addftool/process/utils.py,sha256=JldxnwanLJOgxaPgmCJh7SeBRaaj5rFxWWxh1hpsvbA,2609
15
15
  addftool/ssh/__init__.py,sha256=h5_rCO0A6q2Yw9vFguQZZp_ApAJsT1dcnKnbKKZ0cDM,4409
16
- addftool-0.2.8.dist-info/METADATA,sha256=rxu5Oy4lH7lQF99Z8gzz5QuoGxnZ739h0OBNhr_0NA0,221
17
- addftool-0.2.8.dist-info/WHEEL,sha256=iAkIy5fosb7FzIOwONchHf19Qu7_1wCWyFNR5gu9nU0,91
18
- addftool-0.2.8.dist-info/entry_points.txt,sha256=9lkmuWMInwUAtev8w8poNkNd7iML9Bjd5CBCFVxg2b8,111
19
- addftool-0.2.8.dist-info/top_level.txt,sha256=jqj56-plrBbyzY0tIxB6wPzjAA8kte4hUlajyyQygN4,9
20
- addftool-0.2.8.dist-info/RECORD,,
16
+ addftool-0.2.9.dist-info/METADATA,sha256=lWVPrPjbHKv10pKm1w4HPdccUy_122llzDR1WQ6c2mg,221
17
+ addftool-0.2.9.dist-info/WHEEL,sha256=iAkIy5fosb7FzIOwONchHf19Qu7_1wCWyFNR5gu9nU0,91
18
+ addftool-0.2.9.dist-info/entry_points.txt,sha256=9lkmuWMInwUAtev8w8poNkNd7iML9Bjd5CBCFVxg2b8,111
19
+ addftool-0.2.9.dist-info/top_level.txt,sha256=jqj56-plrBbyzY0tIxB6wPzjAA8kte4hUlajyyQygN4,9
20
+ addftool-0.2.9.dist-info/RECORD,,