addftool 0.1.9__py3-none-any.whl → 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- addftool/addf_portal.py +0 -8
- addftool/broadcast_folder.py +283 -0
- addftool/deploy/__init__.py +6 -0
- addftool/deploy/vscode_server.py +74 -0
- addftool/util.py +5 -68
- {addftool-0.1.9.dist-info → addftool-0.2.0.dist-info}/METADATA +1 -1
- {addftool-0.1.9.dist-info → addftool-0.2.0.dist-info}/RECORD +10 -8
- {addftool-0.1.9.dist-info → addftool-0.2.0.dist-info}/WHEEL +0 -0
- {addftool-0.1.9.dist-info → addftool-0.2.0.dist-info}/entry_points.txt +0 -0
- {addftool-0.1.9.dist-info → addftool-0.2.0.dist-info}/top_level.txt +0 -0
addftool/addf_portal.py
CHANGED
|
@@ -1,7 +1,5 @@
|
|
|
1
1
|
import argparse
|
|
2
2
|
from addftool.process import add_killer_args, killer_main
|
|
3
|
-
from addftool.sync import add_sync_args, sync_main
|
|
4
|
-
from addftool.deploy import add_deploy_args, deploy_main
|
|
5
3
|
|
|
6
4
|
|
|
7
5
|
def get_args():
|
|
@@ -9,8 +7,6 @@ def get_args():
|
|
|
9
7
|
|
|
10
8
|
subparsers = parser.add_subparsers(dest='command', help='Sub-command help')
|
|
11
9
|
add_killer_args(subparsers)
|
|
12
|
-
add_sync_args(subparsers)
|
|
13
|
-
add_deploy_args(subparsers)
|
|
14
10
|
|
|
15
11
|
return parser.parse_args()
|
|
16
12
|
|
|
@@ -19,10 +15,6 @@ def main():
|
|
|
19
15
|
args = get_args()
|
|
20
16
|
if args.command == "kill":
|
|
21
17
|
killer_main(args)
|
|
22
|
-
elif args.command == "sync":
|
|
23
|
-
sync_main(args)
|
|
24
|
-
elif args.command == "deploy":
|
|
25
|
-
deploy_main(args)
|
|
26
18
|
else:
|
|
27
19
|
print("Unknown command: ", args.command)
|
|
28
20
|
|
|
@@ -0,0 +1,283 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import time
|
|
3
|
+
import subprocess
|
|
4
|
+
import hashlib
|
|
5
|
+
|
|
6
|
+
from fabric import Connection, ThreadingGroup
|
|
7
|
+
|
|
8
|
+
try:
|
|
9
|
+
import torch
|
|
10
|
+
import torch.distributed as dist
|
|
11
|
+
from torch.distributed import init_process_group, destroy_process_group
|
|
12
|
+
_torch_is_available = True
|
|
13
|
+
except ImportError:
|
|
14
|
+
_torch_is_available = False
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def add_broadcast_folder_args(subparsers):
|
|
18
|
+
deploy_parser = subparsers.add_parser('broadcast-folder', help='broadcast folder from master node to other nodes')
|
|
19
|
+
add_args(deploy_parser)
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def add_args(parser):
|
|
23
|
+
parser.add_argument("--tool", help="tool name", type=str, default="torch_nccl", choices=["torch_nccl"])
|
|
24
|
+
parser.add_argument("--hostfile", help="host file, broadcast file from node-0 to others", type=str, default="")
|
|
25
|
+
|
|
26
|
+
parser.add_argument("--download_timeout", help="download timeout, default is 30 min", type=int, default=60 * 30)
|
|
27
|
+
|
|
28
|
+
parser.add_argument("--md5_verify", action='store_true', default=False,
|
|
29
|
+
help="whether to verify the md5 of the file after broadcast, default is False.")
|
|
30
|
+
parser.add_argument("--port", help="the port for torchrun, default is 29501", type=int, default=29501)
|
|
31
|
+
parser.add_argument("--torchrun_alias", type=str, default="torchrun",
|
|
32
|
+
help="the alias of torchrun, default is torchrun. If you use torchrun, please set it to torchrun.")
|
|
33
|
+
parser.add_argument("--transfer_ranks_per_node", type=int, default=8,
|
|
34
|
+
help="the number of ranks per node to transfer the files, default is 8.")
|
|
35
|
+
|
|
36
|
+
parser.add_argument("--from_blob_url", type=str, default="",
|
|
37
|
+
help="the blob url to download from, default is empty. " \
|
|
38
|
+
"Only node-0 will download the files from the blob url, " \
|
|
39
|
+
"If empty, will transfer the files from the node-0's local folder.")
|
|
40
|
+
|
|
41
|
+
# distributed downloader from blob
|
|
42
|
+
parser.add_argument("folder", help="the folder need to broadcast", type=str)
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
class ConnectionWithCommand(Connection):
|
|
46
|
+
def __init__(self, host, temp_config_dir, puts, command):
|
|
47
|
+
super().__init__(host)
|
|
48
|
+
self.command = command
|
|
49
|
+
self.puts = puts
|
|
50
|
+
self.temp_config_dir = temp_config_dir
|
|
51
|
+
|
|
52
|
+
def run(self, command, **kwargs):
|
|
53
|
+
super().run(f"mkdir -p {self.temp_config_dir}", **kwargs)
|
|
54
|
+
for src, dest in self.puts:
|
|
55
|
+
self.put(src, remote=dest)
|
|
56
|
+
print(f"Running command on {self.original_host}: {self.command}")
|
|
57
|
+
super().run(self.command, **kwargs)
|
|
58
|
+
if command:
|
|
59
|
+
super().run(command, **kwargs)
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
def get_ip_via_ssh(hostname):
|
|
63
|
+
if hostname == "localhost":
|
|
64
|
+
return "127.0.0.1"
|
|
65
|
+
try:
|
|
66
|
+
cmd = ["ssh", hostname, "hostname -I | awk '{print $1}'"]
|
|
67
|
+
result = subprocess.run(cmd, capture_output=True, text=True, timeout=5)
|
|
68
|
+
|
|
69
|
+
if result.returncode == 0:
|
|
70
|
+
ip = result.stdout.strip()
|
|
71
|
+
return ip
|
|
72
|
+
else:
|
|
73
|
+
print(f"SSH {hostname} failed: {result.stderr}")
|
|
74
|
+
return None
|
|
75
|
+
except Exception as e:
|
|
76
|
+
print(f"Error executing SSH command on {hostname}: {e}")
|
|
77
|
+
return None
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
def broadcast_folder_main(args):
|
|
81
|
+
with open(args.hostfile, "r") as f:
|
|
82
|
+
host_list = []
|
|
83
|
+
for line in f:
|
|
84
|
+
line = line.strip()
|
|
85
|
+
if line and not line.startswith("#"):
|
|
86
|
+
host_list.append(line)
|
|
87
|
+
|
|
88
|
+
print(f"Find {len(host_list)} hosts in hostfile: {args.hostfile}")
|
|
89
|
+
connection_list = []
|
|
90
|
+
|
|
91
|
+
remote_temp_config_dir = "/tmp/broadcast_temp_config_dir"
|
|
92
|
+
|
|
93
|
+
master_addr = get_ip_via_ssh(host_list[0])
|
|
94
|
+
|
|
95
|
+
for i, host in enumerate(host_list):
|
|
96
|
+
put_commands = []
|
|
97
|
+
put_commands.append((__file__, os.path.join(remote_temp_config_dir, "broadcast.py")))
|
|
98
|
+
commnads = "NCCL_IB_DISABLE=0 OPENBLAS_NUM_THREADS=1 MKL_NUM_THREADS=1 "
|
|
99
|
+
if os.environ.get("SAS_TOKEN") is not None and i == 0:
|
|
100
|
+
commnads += f"SAS_TOKEN=\"{os.environ['SAS_TOKEN']}\" "
|
|
101
|
+
commnads += f"{args.torchrun_alias} --nproc_per_node={args.transfer_ranks_per_node} --nnodes={len(host_list)} --node_rank={i} --master_addr={master_addr} --master_port={args.port}"
|
|
102
|
+
commnads += f" {remote_temp_config_dir}/broadcast.py {args.folder} --tool {args.tool} --transfer_ranks_per_node {args.transfer_ranks_per_node} "
|
|
103
|
+
if args.from_blob_url and i == 0:
|
|
104
|
+
commnads += f" --from_blob_url {args.from_blob_url}"
|
|
105
|
+
if args.md5_verify:
|
|
106
|
+
commnads += " --md5_verify"
|
|
107
|
+
|
|
108
|
+
connection_list.append(ConnectionWithCommand(host, remote_temp_config_dir, put_commands, commnads))
|
|
109
|
+
|
|
110
|
+
group = ThreadingGroup.from_connections(connection_list)
|
|
111
|
+
group.run('echo "Hello"', hide=False)
|
|
112
|
+
|
|
113
|
+
|
|
114
|
+
def download_files_from_blob(queue, blob_url, sas_token, folder, download_files, node_rank):
|
|
115
|
+
if not blob_url.endswith("/"):
|
|
116
|
+
blob_url += "/"
|
|
117
|
+
print(f"Node-{node_rank} start downloading {len(download_files)} files from {blob_url} to {folder}")
|
|
118
|
+
for file_name in download_files:
|
|
119
|
+
file_path = os.path.join(folder, file_name)
|
|
120
|
+
file_dir = os.path.dirname(file_path)
|
|
121
|
+
if not os.path.exists(file_dir):
|
|
122
|
+
os.makedirs(file_dir, exist_ok=True)
|
|
123
|
+
for try_count in range(3):
|
|
124
|
+
try:
|
|
125
|
+
download_status = subprocess.run(
|
|
126
|
+
["azcopy", "copy", blob_url + file_name + sas_token, file_path],
|
|
127
|
+
stdout=subprocess.PIPE,
|
|
128
|
+
stderr=subprocess.PIPE,
|
|
129
|
+
text=True
|
|
130
|
+
)
|
|
131
|
+
if download_status.returncode != 0:
|
|
132
|
+
raise RuntimeError(f"Failed to download {file_name}: {download_status.stderr}")
|
|
133
|
+
print(f"Rank {node_rank}: Downloaded {file_name} successfully, from {blob_url} to {file_path}")
|
|
134
|
+
queue.put(file_path)
|
|
135
|
+
break
|
|
136
|
+
except Exception as e:
|
|
137
|
+
print(f"Rank {node_rank}: Download failed: {e}")
|
|
138
|
+
|
|
139
|
+
|
|
140
|
+
def broadcast_file_from_rank(rank, file_path, from_rank, device, file_size, max_chunk_size=250 * 1024 * 1024, md5_verify=False, group=None):
|
|
141
|
+
if file_size == 0:
|
|
142
|
+
if rank != from_rank:
|
|
143
|
+
with open(file_path, "wb") as f:
|
|
144
|
+
f.write(b"")
|
|
145
|
+
return
|
|
146
|
+
|
|
147
|
+
if group is None:
|
|
148
|
+
group = dist.group.WORLD
|
|
149
|
+
pinned_cpu_tensor = torch.empty(min(max_chunk_size, file_size), dtype=torch.uint8).pin_memory()
|
|
150
|
+
|
|
151
|
+
for offset in range(0, file_size, max_chunk_size):
|
|
152
|
+
read_size = min(max_chunk_size, file_size - offset)
|
|
153
|
+
if rank == from_rank:
|
|
154
|
+
with open(file_path, "rb") as f:
|
|
155
|
+
f.seek(offset)
|
|
156
|
+
data = f.read(read_size)
|
|
157
|
+
tensor = torch.frombuffer(data, dtype=torch.uint8).contiguous().pin_memory()
|
|
158
|
+
tensor = tensor.to(device)
|
|
159
|
+
torch.cuda.synchronize()
|
|
160
|
+
dist.broadcast(tensor, src=from_rank, group=group, async_op=True)
|
|
161
|
+
else:
|
|
162
|
+
tensor = torch.empty(read_size, dtype=torch.uint8, device=device)
|
|
163
|
+
dist.broadcast(tensor, src=from_rank, group=group)
|
|
164
|
+
torch.cuda.synchronize()
|
|
165
|
+
|
|
166
|
+
file_dir = os.path.dirname(file_path)
|
|
167
|
+
if not os.path.exists(file_dir):
|
|
168
|
+
os.makedirs(file_dir, exist_ok=True)
|
|
169
|
+
|
|
170
|
+
with open(file_path, "ab" if offset > 0 else "wb") as f:
|
|
171
|
+
pinned_cpu_tensor[:read_size].copy_(tensor)
|
|
172
|
+
np_array = pinned_cpu_tensor.numpy()[:read_size]
|
|
173
|
+
np_array.tofile(f)
|
|
174
|
+
|
|
175
|
+
if md5_verify:
|
|
176
|
+
if rank == from_rank:
|
|
177
|
+
with open(file_path, "rb") as f:
|
|
178
|
+
file_md5 = hashlib.md5(f.read()).hexdigest()
|
|
179
|
+
md5_tensor = torch.frombuffer(file_md5.encode('utf-8'), dtype=torch.uint8).to(device)
|
|
180
|
+
dist.broadcast(md5_tensor, src=from_rank, group=group)
|
|
181
|
+
else:
|
|
182
|
+
with open(file_path, "rb") as f:
|
|
183
|
+
file_md5 = hashlib.md5(f.read()).hexdigest()
|
|
184
|
+
src_md5_tensor = torch.empty(32, dtype=torch.uint8, device=device)
|
|
185
|
+
dist.broadcast(src_md5_tensor, src=from_rank, group=group)
|
|
186
|
+
|
|
187
|
+
src_md5_str = src_md5_tensor.cpu().numpy().tobytes().decode('utf-8')
|
|
188
|
+
if file_md5 != src_md5_str:
|
|
189
|
+
raise ValueError(f"MD5 verification failed for {file_path}: {file_md5} != {src_md5_str}")
|
|
190
|
+
|
|
191
|
+
|
|
192
|
+
def broadcast_folder_worker(args):
|
|
193
|
+
assert args.tool in ["torch_nccl"], f"tool {args.tool} is not supported"
|
|
194
|
+
if not _torch_is_available:
|
|
195
|
+
raise ImportError("Torch is not available. Please install torch to use this feature.")
|
|
196
|
+
start_time = time.time()
|
|
197
|
+
|
|
198
|
+
init_process_group(backend='nccl')
|
|
199
|
+
global_rank = int(os.environ['RANK'])
|
|
200
|
+
local_rank = int(os.environ['LOCAL_RANK'])
|
|
201
|
+
world_size = int(os.environ['WORLD_SIZE'])
|
|
202
|
+
num_nodes = world_size // args.transfer_ranks_per_node
|
|
203
|
+
worker_rank = local_rank
|
|
204
|
+
|
|
205
|
+
num_gpus = torch.cuda.device_count()
|
|
206
|
+
torch.cuda.set_device(worker_rank % num_gpus)
|
|
207
|
+
|
|
208
|
+
device = torch.device(f"cuda:{worker_rank % num_gpus}")
|
|
209
|
+
|
|
210
|
+
workers_groups = []
|
|
211
|
+
for i in range(args.transfer_ranks_per_node):
|
|
212
|
+
worker_ranks = []
|
|
213
|
+
for j in range(num_nodes):
|
|
214
|
+
worker_ranks.append(j * args.transfer_ranks_per_node + i)
|
|
215
|
+
|
|
216
|
+
if global_rank == 0:
|
|
217
|
+
print(f"worker group {i} ranks: {worker_ranks}")
|
|
218
|
+
|
|
219
|
+
workers_groups.append(dist.new_group(worker_ranks))
|
|
220
|
+
|
|
221
|
+
if global_rank == 0:
|
|
222
|
+
print(f"Init {len(workers_groups)} worker groups")
|
|
223
|
+
|
|
224
|
+
print(f"rank {global_rank} start broadcast worker, args = {args}, nccl init time: {time.time() - start_time:.2f}s")
|
|
225
|
+
|
|
226
|
+
file_size_dict = {}
|
|
227
|
+
|
|
228
|
+
if global_rank == 0:
|
|
229
|
+
if args.from_blob_url:
|
|
230
|
+
raise NotImplementedError("Downloading files from blob storage is not implemented yet.")
|
|
231
|
+
|
|
232
|
+
if not os.path.exists(args.folder):
|
|
233
|
+
raise ValueError(f"Folder {args.folder} does not exist.")
|
|
234
|
+
for root, dirs, files in os.walk(args.folder):
|
|
235
|
+
for file in files:
|
|
236
|
+
file_path = os.path.join(root, file)
|
|
237
|
+
file_size = os.path.getsize(file_path)
|
|
238
|
+
file_size_dict[file_path] = file_size
|
|
239
|
+
|
|
240
|
+
# sort the file list by size
|
|
241
|
+
file_list = sorted(file_size_dict.keys(), key=lambda x: file_size_dict[x], reverse=True)
|
|
242
|
+
file_size_list = [file_size_dict[file] for file in file_list]
|
|
243
|
+
obj_list = [file_list, file_size_list]
|
|
244
|
+
dist.broadcast_object_list(obj_list, src=0)
|
|
245
|
+
else:
|
|
246
|
+
obj_list = [None, None]
|
|
247
|
+
dist.broadcast_object_list(obj_list, src=0)
|
|
248
|
+
file_list, file_size_list = obj_list
|
|
249
|
+
|
|
250
|
+
print(f"Rank {global_rank}: {len(file_list)} files, total size: {sum(file_size_list) / (1024 * 1024):.2f} MB, time taken: {time.time() - start_time:.2f}s")
|
|
251
|
+
|
|
252
|
+
worker_g = workers_groups[worker_rank]
|
|
253
|
+
from_rank = global_rank % args.transfer_ranks_per_node
|
|
254
|
+
for i in range(len(file_list)):
|
|
255
|
+
if i % args.transfer_ranks_per_node == worker_rank:
|
|
256
|
+
file_path = file_list[i]
|
|
257
|
+
file_size = file_size_list[i]
|
|
258
|
+
broadcast_file_from_rank(
|
|
259
|
+
global_rank, file_path, from_rank, device,
|
|
260
|
+
file_size, md5_verify=args.md5_verify, group=worker_g,
|
|
261
|
+
)
|
|
262
|
+
if global_rank == from_rank:
|
|
263
|
+
print(f"Group {global_rank} finished broadcasting {file_path}, size: {file_size / (1024 * 1024):.2f} MB, time taken: {time.time() - start_time:.2f}s")
|
|
264
|
+
|
|
265
|
+
dist.barrier()
|
|
266
|
+
for i in range(len(workers_groups)):
|
|
267
|
+
if i != worker_rank:
|
|
268
|
+
dist.destroy_process_group(workers_groups[i])
|
|
269
|
+
destroy_process_group()
|
|
270
|
+
|
|
271
|
+
print(f"Rank {global_rank} finished broadcasting all files, time taken: {time.time() - start_time:.2f}s")
|
|
272
|
+
|
|
273
|
+
|
|
274
|
+
if __name__ == "__main__":
|
|
275
|
+
import argparse
|
|
276
|
+
|
|
277
|
+
parser = argparse.ArgumentParser(description="Addf's tool")
|
|
278
|
+
add_args(parser)
|
|
279
|
+
args = parser.parse_args()
|
|
280
|
+
if args.hostfile:
|
|
281
|
+
broadcast_folder_main(args)
|
|
282
|
+
else:
|
|
283
|
+
broadcast_folder_worker(args)
|
addftool/deploy/__init__.py
CHANGED
|
@@ -2,6 +2,7 @@ import argparse
|
|
|
2
2
|
|
|
3
3
|
from .ssh_server import add_deploy_ssh_server_args, deploy_ssh_server_main
|
|
4
4
|
from .azure import add_deploy_azure_args, deploy_azure_main
|
|
5
|
+
from .vscode_server import add_deploy_vscode_server_args, deploy_vscode_server_main
|
|
5
6
|
|
|
6
7
|
|
|
7
8
|
def add_deploy_args(subparsers: argparse._SubParsersAction):
|
|
@@ -19,6 +20,9 @@ def add_deploy_args(subparsers: argparse._SubParsersAction):
|
|
|
19
20
|
azure_parser = deploy_subparsers.add_parser('azure', help='Azure options')
|
|
20
21
|
add_deploy_azure_args(azure_parser)
|
|
21
22
|
|
|
23
|
+
vscode_server_parser = deploy_subparsers.add_parser('vscode-server', help='VS Code options')
|
|
24
|
+
add_deploy_vscode_server_args(vscode_server_parser)
|
|
25
|
+
|
|
22
26
|
|
|
23
27
|
def deploy_main(args):
|
|
24
28
|
"""
|
|
@@ -30,6 +34,8 @@ def deploy_main(args):
|
|
|
30
34
|
deploy_ssh_server_main(args)
|
|
31
35
|
elif args.deploy_type == 'azure':
|
|
32
36
|
deploy_azure_main(args)
|
|
37
|
+
elif args.deploy_type == 'vscode-server':
|
|
38
|
+
deploy_vscode_server_main(args)
|
|
33
39
|
else:
|
|
34
40
|
print("Unknown deploy type")
|
|
35
41
|
print("Deploy type: ", args.deploy_type)
|
|
@@ -0,0 +1,74 @@
|
|
|
1
|
+
from addftool.util import need_sudo, execute_command, install_packages
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
def deploy_vscode_server(install_tunnel=True):
|
|
5
|
+
"""
|
|
6
|
+
Deploy Visual Studio Code and optionally start VS Code Tunnel.
|
|
7
|
+
|
|
8
|
+
Args:
|
|
9
|
+
install_tunnel (bool): Whether to start VS Code Tunnel after installation
|
|
10
|
+
"""
|
|
11
|
+
command_prefix = "sudo " if need_sudo() else ""
|
|
12
|
+
|
|
13
|
+
# Install prerequisites
|
|
14
|
+
print("Installing prerequisites...")
|
|
15
|
+
execute_command(command_prefix + "apt-get install -y wget gpg")
|
|
16
|
+
|
|
17
|
+
# Add Microsoft's GPG key
|
|
18
|
+
print("Adding Microsoft's GPG key...")
|
|
19
|
+
execute_command("wget -qO- https://packages.microsoft.com/keys/microsoft.asc | gpg --dearmor > packages.microsoft.gpg")
|
|
20
|
+
execute_command(command_prefix + "install -D -o root -g root -m 644 packages.microsoft.gpg /etc/apt/keyrings/packages.microsoft.gpg")
|
|
21
|
+
|
|
22
|
+
# Add VS Code repository
|
|
23
|
+
print("Adding VS Code repository...")
|
|
24
|
+
execute_command(command_prefix + "sh -c 'echo \"deb [arch=amd64,arm64,armhf signed-by=/etc/apt/keyrings/packages.microsoft.gpg] "
|
|
25
|
+
"https://packages.microsoft.com/repos/code stable main\" > /etc/apt/sources.list.d/vscode.list'")
|
|
26
|
+
|
|
27
|
+
# Clean up
|
|
28
|
+
execute_command("rm -f packages.microsoft.gpg")
|
|
29
|
+
|
|
30
|
+
# Install VS Code
|
|
31
|
+
print("Installing VS Code...")
|
|
32
|
+
execute_command(command_prefix + "apt install -y apt-transport-https")
|
|
33
|
+
execute_command(command_prefix + "apt update")
|
|
34
|
+
execute_command(command_prefix + "apt install -y code")
|
|
35
|
+
|
|
36
|
+
# Start VS Code Tunnel if requested
|
|
37
|
+
if install_tunnel:
|
|
38
|
+
print("Starting VS Code Tunnel...")
|
|
39
|
+
execute_command("code tunnel --accept-server-license-terms")
|
|
40
|
+
|
|
41
|
+
print("VS Code installation completed successfully!")
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
def add_deploy_vscode_server_args(parser):
|
|
45
|
+
"""
|
|
46
|
+
Add VS Code deployment arguments to the parser.
|
|
47
|
+
|
|
48
|
+
Args:
|
|
49
|
+
parser (argparse.ArgumentParser): The argument parser to add the arguments to.
|
|
50
|
+
"""
|
|
51
|
+
parser.add_argument("--no-tunnel", dest="install_tunnel", action="store_false",
|
|
52
|
+
help="Skip starting VS Code Tunnel after installation")
|
|
53
|
+
parser.set_defaults(install_tunnel=True)
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
def deploy_vscode_server_main(args):
|
|
57
|
+
"""
|
|
58
|
+
Main function for VS Code deployment.
|
|
59
|
+
|
|
60
|
+
Args:
|
|
61
|
+
args: Parsed command-line arguments
|
|
62
|
+
"""
|
|
63
|
+
deploy_vscode_server(install_tunnel=args.install_tunnel)
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
if __name__ == "__main__":
|
|
67
|
+
import argparse
|
|
68
|
+
|
|
69
|
+
parser = argparse.ArgumentParser(description="VS Code deployment arguments")
|
|
70
|
+
add_deploy_vscode_server_args(parser)
|
|
71
|
+
|
|
72
|
+
args = parser.parse_args()
|
|
73
|
+
|
|
74
|
+
deploy_vscode_server_main(args)
|
addftool/util.py
CHANGED
|
@@ -1,75 +1,12 @@
|
|
|
1
|
-
import os
|
|
2
1
|
import subprocess
|
|
3
2
|
|
|
4
3
|
|
|
5
|
-
def execute_command(command, to_file=None
|
|
4
|
+
def execute_command(command, to_file=None):
|
|
6
5
|
if to_file is not None:
|
|
7
6
|
to_file.write(command + "\n")
|
|
8
7
|
return None
|
|
9
8
|
else:
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
result
|
|
14
|
-
print(f"Return code: {result.returncode}")
|
|
15
|
-
if result.stdout is not None:
|
|
16
|
-
stdout = result.stdout.read().decode()
|
|
17
|
-
print(f"Stdout: {stdout}")
|
|
18
|
-
else:
|
|
19
|
-
stdout = None
|
|
20
|
-
if only_stdout:
|
|
21
|
-
if not hide and stdout is not None:
|
|
22
|
-
print(stdout)
|
|
23
|
-
return stdout
|
|
24
|
-
if result.stderr is not None:
|
|
25
|
-
stderr = result.stderr.read().decode()
|
|
26
|
-
print(f"Stderr: {stderr}")
|
|
27
|
-
else:
|
|
28
|
-
stderr = None
|
|
29
|
-
|
|
30
|
-
return {'stdout': stdout, 'stderr': stderr, 'returncode': result.returncode}
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
def need_sudo():
|
|
34
|
-
return os.name == 'posix' and os.getuid() != 0
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
def is_running_in_docker():
|
|
38
|
-
return os.path.exists('/.dockerenv') or \
|
|
39
|
-
any('docker' in line for line in open('/proc/self/cgroup', 'r')) if os.path.exists('/proc/self/cgroup') else False or \
|
|
40
|
-
os.environ.get('container') == 'docker' or \
|
|
41
|
-
os.environ.get('DOCKER') == 'true' or \
|
|
42
|
-
os.environ.get('DOCKER_CONTAINER') == 'yes'
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
def get_ubuntu_version():
|
|
46
|
-
with open("/etc/os-release") as f:
|
|
47
|
-
for line in f:
|
|
48
|
-
if line.startswith("VERSION_ID="):
|
|
49
|
-
version = line.split("=")[1].strip().strip('"')
|
|
50
|
-
return version
|
|
51
|
-
return "22.04"
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
def check_package_installed(package):
|
|
55
|
-
command = f"dpkg -l | grep {package}"
|
|
56
|
-
result = execute_command(command)
|
|
57
|
-
if result is not None and package in result:
|
|
58
|
-
return True
|
|
59
|
-
return False
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
def install_packages(package_list):
|
|
63
|
-
to_install = []
|
|
64
|
-
for package in package_list:
|
|
65
|
-
if check_package_installed(package):
|
|
66
|
-
print(f"{package} is already installed")
|
|
67
|
-
continue
|
|
68
|
-
to_install.append(package)
|
|
69
|
-
|
|
70
|
-
if len(to_install) > 0:
|
|
71
|
-
packages = " ".join(to_install)
|
|
72
|
-
command = f"apt-get install -y {packages}"
|
|
73
|
-
if need_sudo():
|
|
74
|
-
command = "sudo " + command
|
|
75
|
-
execute_command(command)
|
|
9
|
+
print("Execute command: ", command)
|
|
10
|
+
result = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE).stdout.read().decode()
|
|
11
|
+
print("Result: ", result)
|
|
12
|
+
return result
|
|
@@ -1,17 +1,19 @@
|
|
|
1
1
|
addftool/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
2
|
-
addftool/addf_portal.py,sha256=
|
|
2
|
+
addftool/addf_portal.py,sha256=U52TdNcwWOEvv_C5r-guWYxn3ntzwYI2eBzJIE7IdcY,493
|
|
3
3
|
addftool/blob.py,sha256=NZOItDyFUIdV1tfhJZJJBEzGy296CE5NCictTzP4OPc,8282
|
|
4
|
+
addftool/broadcast_folder.py,sha256=bEOr-8Q14DhZFS658hab4U-9HvAW3EGluEriilIdEXQ,11976
|
|
4
5
|
addftool/sync.py,sha256=ZpYxbM8uiPFrV7ODmOaM7asVPCWaxBixA-arVc-1kfs,14045
|
|
5
6
|
addftool/tool.py,sha256=EuKQ2t2InN7yB-_oYLcdsA7vRqzRGTunwIxplUSqEG0,2054
|
|
6
|
-
addftool/util.py,sha256=
|
|
7
|
-
addftool/deploy/__init__.py,sha256=
|
|
7
|
+
addftool/util.py,sha256=Q3A68vJDxgfeNiEFmk54HuMuworVndocXpSbVpvGMfc,362
|
|
8
|
+
addftool/deploy/__init__.py,sha256=UL8b-Idt7lStlMiOm8oTZ65fdzYz99Fgzq2Gaw8WsZc,1544
|
|
8
9
|
addftool/deploy/azure.py,sha256=_o_9Eh8cVwLDAqvfyRYBtQRHs_Gul-nCs2ZXttwO1bk,1301
|
|
9
10
|
addftool/deploy/ssh_server.py,sha256=f2T8fgwACVljPfdcimMywUjsFnLCWRde7iWPAILpRz8,5463
|
|
11
|
+
addftool/deploy/vscode_server.py,sha256=9KoWQdK3ESgKtdACEHTng2OuQKlIZjh_R_qJNT-XOv8,2609
|
|
10
12
|
addftool/process/__init__.py,sha256=Dze8OrcyjQlAbPrjE_h8bMi8W4b3OJyZOjTucPrkJvM,3721
|
|
11
13
|
addftool/process/utils.py,sha256=JldxnwanLJOgxaPgmCJh7SeBRaaj5rFxWWxh1hpsvbA,2609
|
|
12
14
|
addftool/ssh/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
13
|
-
addftool-0.
|
|
14
|
-
addftool-0.
|
|
15
|
-
addftool-0.
|
|
16
|
-
addftool-0.
|
|
17
|
-
addftool-0.
|
|
15
|
+
addftool-0.2.0.dist-info/METADATA,sha256=Ja75t6IT9XdIMjCUsZKLsx-QJJ9Yo2UfYtpVUC1AYPw,170
|
|
16
|
+
addftool-0.2.0.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
|
|
17
|
+
addftool-0.2.0.dist-info/entry_points.txt,sha256=9lkmuWMInwUAtev8w8poNkNd7iML9Bjd5CBCFVxg2b8,111
|
|
18
|
+
addftool-0.2.0.dist-info/top_level.txt,sha256=jqj56-plrBbyzY0tIxB6wPzjAA8kte4hUlajyyQygN4,9
|
|
19
|
+
addftool-0.2.0.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|