scientiflow-cli 0.4.12__py3-none-any.whl → 0.4.15__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- scientiflow_cli/pipeline/decode_and_execute.py +54 -84
- scientiflow_cli/services/executor.py +3 -5
- scientiflow_cli/services/request_handler.py +3 -0
- scientiflow_cli/utils/file_manager.py +2 -5
- {scientiflow_cli-0.4.12.dist-info → scientiflow_cli-0.4.15.dist-info}/METADATA +1 -1
- {scientiflow_cli-0.4.12.dist-info → scientiflow_cli-0.4.15.dist-info}/RECORD +9 -9
- {scientiflow_cli-0.4.12.dist-info → scientiflow_cli-0.4.15.dist-info}/LICENSE.md +0 -0
- {scientiflow_cli-0.4.12.dist-info → scientiflow_cli-0.4.15.dist-info}/WHEEL +0 -0
- {scientiflow_cli-0.4.12.dist-info → scientiflow_cli-0.4.15.dist-info}/entry_points.txt +0 -0
|
@@ -12,85 +12,6 @@ from scientiflow_cli.services.rich_printer import RichPrinter
|
|
|
12
12
|
|
|
13
13
|
printer = RichPrinter()
|
|
14
14
|
|
|
15
|
-
# Global background job tracker
|
|
16
|
-
class GlobalBackgroundJobTracker:
|
|
17
|
-
_instance = None
|
|
18
|
-
_lock = threading.Lock()
|
|
19
|
-
|
|
20
|
-
def __new__(cls):
|
|
21
|
-
if cls._instance is None:
|
|
22
|
-
with cls._lock:
|
|
23
|
-
if cls._instance is None:
|
|
24
|
-
cls._instance = super().__new__(cls)
|
|
25
|
-
cls._instance._initialize()
|
|
26
|
-
return cls._instance
|
|
27
|
-
|
|
28
|
-
def _initialize(self):
|
|
29
|
-
self.background_executors = []
|
|
30
|
-
self.background_jobs_count = 0
|
|
31
|
-
self.background_jobs_completed = 0
|
|
32
|
-
self.background_jobs_lock = threading.Lock()
|
|
33
|
-
|
|
34
|
-
def register_background_job(self, executor, futures, node_label, log_file_path):
|
|
35
|
-
"""Register a background job for global tracking."""
|
|
36
|
-
with self.background_jobs_lock:
|
|
37
|
-
self.background_jobs_count += 1
|
|
38
|
-
self.background_executors.append(executor)
|
|
39
|
-
|
|
40
|
-
# Start monitoring in a separate thread
|
|
41
|
-
monitor_thread = threading.Thread(
|
|
42
|
-
target=self._monitor_job,
|
|
43
|
-
args=(futures, node_label, executor, log_file_path),
|
|
44
|
-
daemon=True
|
|
45
|
-
)
|
|
46
|
-
monitor_thread.start()
|
|
47
|
-
|
|
48
|
-
def _monitor_job(self, futures, node_label, executor, log_file_path):
|
|
49
|
-
"""Monitor background job completion."""
|
|
50
|
-
all_successful = True
|
|
51
|
-
for future in as_completed(futures):
|
|
52
|
-
success = future.result()
|
|
53
|
-
if not success:
|
|
54
|
-
all_successful = False
|
|
55
|
-
|
|
56
|
-
if not all_successful:
|
|
57
|
-
with open(log_file_path, 'a') as f:
|
|
58
|
-
f.write(f"[ERROR] Background job {node_label} failed\n")
|
|
59
|
-
printer.print_message(f"[BACKGROUND JOB] {node_label} Failed - some commands in background job failed", style="bold red")
|
|
60
|
-
else:
|
|
61
|
-
printer.print_message(f"[BACKGROUND JOB] {node_label} Execution completed in the background", style="bold green")
|
|
62
|
-
|
|
63
|
-
# Clean up executor
|
|
64
|
-
executor.shutdown(wait=False)
|
|
65
|
-
with self.background_jobs_lock:
|
|
66
|
-
if executor in self.background_executors:
|
|
67
|
-
self.background_executors.remove(executor)
|
|
68
|
-
self.background_jobs_completed += 1
|
|
69
|
-
|
|
70
|
-
def wait_for_all_jobs(self):
|
|
71
|
-
"""Wait for all background jobs to complete."""
|
|
72
|
-
import time
|
|
73
|
-
if self.background_jobs_count > 0:
|
|
74
|
-
printer.print_message(f"[INFO] Waiting for {self.background_jobs_count} background job(s) to complete...", style="bold yellow")
|
|
75
|
-
|
|
76
|
-
while True:
|
|
77
|
-
with self.background_jobs_lock:
|
|
78
|
-
if self.background_jobs_completed >= self.background_jobs_count:
|
|
79
|
-
break
|
|
80
|
-
time.sleep(0.5) # Check every 500ms
|
|
81
|
-
|
|
82
|
-
printer.print_message("[INFO] All background jobs completed.", style="bold green")
|
|
83
|
-
|
|
84
|
-
def reset(self):
|
|
85
|
-
"""Reset the tracker for a new execution cycle."""
|
|
86
|
-
with self.background_jobs_lock:
|
|
87
|
-
self.background_executors = []
|
|
88
|
-
self.background_jobs_count = 0
|
|
89
|
-
self.background_jobs_completed = 0
|
|
90
|
-
|
|
91
|
-
# Global tracker instance
|
|
92
|
-
global_bg_tracker = GlobalBackgroundJobTracker()
|
|
93
|
-
|
|
94
15
|
def execute_background_command_standalone(command: str, log_file_path: str):
|
|
95
16
|
"""Execute a command in background without real-time output display - standalone function for multiprocessing."""
|
|
96
17
|
try:
|
|
@@ -130,6 +51,10 @@ class PipelineExecutor:
|
|
|
130
51
|
self.current_node = None
|
|
131
52
|
self.job_status = job_status
|
|
132
53
|
self.current_node_from_config = current_node_from_config
|
|
54
|
+
self.background_executors = [] # Keep track of background executors
|
|
55
|
+
self.background_jobs_count = 0 # Track number of active background jobs
|
|
56
|
+
self.background_jobs_completed = 0 # Track completed background jobs
|
|
57
|
+
self.background_jobs_lock = threading.Lock() # Thread-safe counter updates
|
|
133
58
|
|
|
134
59
|
# For resuming: flag to track if we've reached the resume point
|
|
135
60
|
self.resume_mode = (job_status == "running" and current_node_from_config is not None)
|
|
@@ -240,7 +165,47 @@ class PipelineExecutor:
|
|
|
240
165
|
self.update_terminal_output()
|
|
241
166
|
raise SystemExit("[ERROR] Pipeline execution terminated due to an unexpected error.")
|
|
242
167
|
|
|
168
|
+
def monitor_background_job(self, futures, node_label, executor):
|
|
169
|
+
"""Monitor background job completion in a separate thread."""
|
|
170
|
+
def monitor():
|
|
171
|
+
all_successful = True
|
|
172
|
+
for future in as_completed(futures):
|
|
173
|
+
success = future.result()
|
|
174
|
+
if not success:
|
|
175
|
+
all_successful = False
|
|
176
|
+
|
|
177
|
+
if not all_successful:
|
|
178
|
+
self.log_output(f"[ERROR] Background job {node_label} failed")
|
|
179
|
+
printer.print_message(f"[BACKGROUND JOB] {node_label} Failed - some commands in background job failed", style="bold red")
|
|
180
|
+
else:
|
|
181
|
+
printer.print_message(f"[BACKGROUND JOB] {node_label} Execution completed in the background", style="bold green")
|
|
182
|
+
|
|
183
|
+
# Clean up executor
|
|
184
|
+
executor.shutdown(wait=False)
|
|
185
|
+
if executor in self.background_executors:
|
|
186
|
+
self.background_executors.remove(executor)
|
|
187
|
+
|
|
188
|
+
# Update background job completion count
|
|
189
|
+
with self.background_jobs_lock:
|
|
190
|
+
self.background_jobs_completed += 1
|
|
191
|
+
|
|
192
|
+
# Start monitoring thread
|
|
193
|
+
monitor_thread = threading.Thread(target=monitor, daemon=True)
|
|
194
|
+
monitor_thread.start()
|
|
243
195
|
|
|
196
|
+
def wait_for_background_jobs(self):
|
|
197
|
+
"""Wait for all background jobs to complete."""
|
|
198
|
+
import time
|
|
199
|
+
if self.background_jobs_count > 0:
|
|
200
|
+
printer.print_message(f"[INFO] Waiting for {self.background_jobs_count} background job(s) to complete...", style="bold yellow")
|
|
201
|
+
|
|
202
|
+
while True:
|
|
203
|
+
with self.background_jobs_lock:
|
|
204
|
+
if self.background_jobs_completed >= self.background_jobs_count:
|
|
205
|
+
break
|
|
206
|
+
time.sleep(0.5) # Check every 500ms
|
|
207
|
+
|
|
208
|
+
printer.print_message("[INFO] All background jobs completed.", style="bold green")
|
|
244
209
|
|
|
245
210
|
def dfs(self, node: str):
|
|
246
211
|
"""Perform Depth-First Search (DFS) for executing pipeline nodes."""
|
|
@@ -319,14 +284,19 @@ class PipelineExecutor:
|
|
|
319
284
|
|
|
320
285
|
# Execute commands in background using ProcessPoolExecutor (non-blocking)
|
|
321
286
|
if command_list:
|
|
287
|
+
# Increment background jobs counter
|
|
288
|
+
with self.background_jobs_lock:
|
|
289
|
+
self.background_jobs_count += 1
|
|
290
|
+
|
|
322
291
|
executor = ProcessPoolExecutor(max_workers=numberOfThreads)
|
|
292
|
+
self.background_executors.append(executor) # Keep reference to prevent garbage collection
|
|
323
293
|
futures = []
|
|
324
294
|
for cmd in command_list:
|
|
325
295
|
future = executor.submit(execute_background_command_standalone, cmd, self.log_file_path)
|
|
326
296
|
futures.append(future)
|
|
327
297
|
|
|
328
|
-
#
|
|
329
|
-
|
|
298
|
+
# Start monitoring in a separate thread (non-blocking)
|
|
299
|
+
self.monitor_background_job(futures, node_label, executor)
|
|
330
300
|
|
|
331
301
|
# Don't wait for completion, immediately continue to next node
|
|
332
302
|
else:
|
|
@@ -374,9 +344,9 @@ class PipelineExecutor:
|
|
|
374
344
|
if starting_node:
|
|
375
345
|
self.dfs(starting_node)
|
|
376
346
|
|
|
377
|
-
#
|
|
378
|
-
|
|
379
|
-
|
|
347
|
+
# Wait for all background jobs to complete before marking pipeline as completed
|
|
348
|
+
self.wait_for_background_jobs()
|
|
349
|
+
|
|
380
350
|
update_job_status(self.project_job_id, "completed")
|
|
381
351
|
update_stopped_at_node(self.project_id, self.project_job_id, self.current_node)
|
|
382
352
|
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
from concurrent.futures import ThreadPoolExecutor
|
|
2
2
|
import asyncio
|
|
3
3
|
from scientiflow_cli.pipeline.get_jobs import get_jobs
|
|
4
|
-
from scientiflow_cli.pipeline.decode_and_execute import decode_and_execute_pipeline
|
|
4
|
+
from scientiflow_cli.pipeline.decode_and_execute import decode_and_execute_pipeline
|
|
5
5
|
from scientiflow_cli.pipeline.container_manager import get_job_containers
|
|
6
6
|
from scientiflow_cli.utils.file_manager import create_job_dirs, get_job_files
|
|
7
7
|
from scientiflow_cli.services.rich_printer import RichPrinter
|
|
@@ -37,6 +37,7 @@ def execute_jobs(job_ids: list[int] = None, parallel: bool = False, is_cloud: bo
|
|
|
37
37
|
if matching_jobs:
|
|
38
38
|
if is_cloud:
|
|
39
39
|
for job in matching_jobs:
|
|
40
|
+
job['project_title'] = str(job['project']['id']) + '_' + job['project']['project_title']
|
|
40
41
|
if 'server' not in job or job['server'] is None:
|
|
41
42
|
job['server'] = {'base_directory':None}
|
|
42
43
|
job['server']['base_directory'] = get_base_directory()
|
|
@@ -116,9 +117,6 @@ async def execute_async(jobs: list[dict]) -> None:
|
|
|
116
117
|
|
|
117
118
|
await asyncio.gather(*running_jobs) # Wait for all jobs to complete
|
|
118
119
|
printer.print_success("[ASYNC COMPLETE] All jobs finished!")
|
|
119
|
-
|
|
120
|
-
# Wait for all background jobs from all executed jobs to complete
|
|
121
|
-
global_bg_tracker.wait_for_all_jobs()
|
|
122
120
|
|
|
123
121
|
|
|
124
122
|
def execute_single_job(job: dict, is_cloud: bool = False) -> None:
|
|
@@ -158,7 +156,7 @@ def execute_single_job(job: dict, is_cloud: bool = False) -> None:
|
|
|
158
156
|
if job_status != "running":
|
|
159
157
|
if job["new_job"] == 1:
|
|
160
158
|
# Initialize folders for the project / project_job
|
|
161
|
-
create_job_dirs(job
|
|
159
|
+
create_job_dirs(job)
|
|
162
160
|
|
|
163
161
|
# Fetch the files and folder from the backend
|
|
164
162
|
get_job_files(job)
|
|
@@ -21,6 +21,7 @@ def make_auth_request(endpoint, method, data=None, params=None, error_message=No
|
|
|
21
21
|
return handle_response(response, error_message)
|
|
22
22
|
|
|
23
23
|
except requests.RequestException as e:
|
|
24
|
+
print(e)
|
|
24
25
|
return "Request failed"
|
|
25
26
|
|
|
26
27
|
|
|
@@ -32,10 +33,12 @@ def make_no_auth_request(endpoint, method, data=None, error_message=None):
|
|
|
32
33
|
response = requests.get(base_url + endpoint)
|
|
33
34
|
elif method == 'POST':
|
|
34
35
|
response = requests.post(base_url + endpoint, json=data)
|
|
36
|
+
print(response)
|
|
35
37
|
else:
|
|
36
38
|
raise ValueError("Unsupported HTTP method")
|
|
37
39
|
|
|
38
40
|
return handle_response(response, error_message)
|
|
39
41
|
|
|
40
42
|
except requests.RequestException as e:
|
|
43
|
+
print(e)
|
|
41
44
|
return "Request failed"
|
|
@@ -37,12 +37,9 @@ def get_job_files(job: dict) -> None:
|
|
|
37
37
|
|
|
38
38
|
printer.print_success(f"[+] Files extracted to {str(project_dir_name).strip()}")
|
|
39
39
|
|
|
40
|
-
def create_job_dirs(job: dict
|
|
40
|
+
def create_job_dirs(job: dict) -> None:
|
|
41
41
|
base_dir = Path(job['server']['base_directory'])
|
|
42
|
-
|
|
43
|
-
project_dir = base_dir / (str(job['project']['id'])+"_"+job['project']['project_title'])
|
|
44
|
-
else:
|
|
45
|
-
project_dir = base_dir / job['project']['project_title']
|
|
42
|
+
project_dir = base_dir / job['project']['project_title']
|
|
46
43
|
job_dir = project_dir / job['project_job']['job_directory']
|
|
47
44
|
job_dir.mkdir(parents=True, exist_ok=True)
|
|
48
45
|
|
|
@@ -7,25 +7,25 @@ scientiflow_cli/cli/logout.py,sha256=EzpFPA1ENoXqLvduo6rxaVF09GqgO5GCRvnGMDr5BEw
|
|
|
7
7
|
scientiflow_cli/main.py,sha256=AliEFU3bebJV2tbvxQYbiMY1bqVGqq3-a6dzkVUXong,6982
|
|
8
8
|
scientiflow_cli/pipeline/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
9
9
|
scientiflow_cli/pipeline/container_manager.py,sha256=KAnE5AvsSg4cewLc_v4gWCqhKGoc7ycHOtl_HYxUK7E,8444
|
|
10
|
-
scientiflow_cli/pipeline/decode_and_execute.py,sha256=
|
|
10
|
+
scientiflow_cli/pipeline/decode_and_execute.py,sha256=UhmIQrFxhJvtwqcbFIeg1wiDt2DAhVowASKiYSl5mDk,18257
|
|
11
11
|
scientiflow_cli/pipeline/get_jobs.py,sha256=69jOIVwXd8j2lAYy28r2QcsjFT4yRpXNOqsfopiZhFs,1498
|
|
12
12
|
scientiflow_cli/services/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
13
13
|
scientiflow_cli/services/auth_service.py,sha256=lknARfVTjRCH-4yWVPKiS330NIVHxFw3HlQdN2Lw3Og,3343
|
|
14
14
|
scientiflow_cli/services/base_directory.py,sha256=2dUvGYk4enLe3Cagcs_bfv2kNuHg1ws-dtMDEW_tccI,2726
|
|
15
|
-
scientiflow_cli/services/executor.py,sha256=
|
|
15
|
+
scientiflow_cli/services/executor.py,sha256=1TAST7zokikecTJZ5Pdd9hrlNncAzcuDeJnlGJhGGk0,11813
|
|
16
16
|
scientiflow_cli/services/modes.py,sha256=-Bk1CJO0vgc8v_rXktfKAyHSF6cr5bGbufSGa_DtvY4,1241
|
|
17
|
-
scientiflow_cli/services/request_handler.py,sha256=
|
|
17
|
+
scientiflow_cli/services/request_handler.py,sha256=5Y1B84kpHP-EDAI9d6fjaPtQE1-dMKCtoCaqvixifSU,1397
|
|
18
18
|
scientiflow_cli/services/rich_printer.py,sha256=5ORAaZOa_84m6vP-novpPOI70UPxt0pEvmRq9999Ifg,2129
|
|
19
19
|
scientiflow_cli/services/status_updater.py,sha256=VjC2V6lWzjwBN7ZhCQzW_h_sMG25Poeh35MNTeVdt5E,2910
|
|
20
20
|
scientiflow_cli/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
21
21
|
scientiflow_cli/utils/config.py,sha256=iq9aEVwlJA-KLPWX6Vtx6SaEgH3UAuKjNIODw2mGYxY,256
|
|
22
22
|
scientiflow_cli/utils/encryption.py,sha256=iQ-b40i75JvoB1cGANDqzZXLztD54toO_6loX1m5W9Q,1107
|
|
23
|
-
scientiflow_cli/utils/file_manager.py,sha256=
|
|
23
|
+
scientiflow_cli/utils/file_manager.py,sha256=KLdJlIzFng_BfKHHZzQNp35hXsFMWfgy4OUqodadzss,1923
|
|
24
24
|
scientiflow_cli/utils/logger.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
25
25
|
scientiflow_cli/utils/mock.py,sha256=UZ9cN2Qx3EAXcxnLQY5z4DQWy4jflnWFnfNTsuvnFH8,11237
|
|
26
26
|
scientiflow_cli/utils/singularity.py,sha256=jy8ep7Xa1Eg4fptNjyXLPuVN2KA8l4dFil-w-KaVNkw,4956
|
|
27
|
-
scientiflow_cli-0.4.
|
|
28
|
-
scientiflow_cli-0.4.
|
|
29
|
-
scientiflow_cli-0.4.
|
|
30
|
-
scientiflow_cli-0.4.
|
|
31
|
-
scientiflow_cli-0.4.
|
|
27
|
+
scientiflow_cli-0.4.15.dist-info/LICENSE.md,sha256=nb6GGGYuS_KXe33mSNwcEW-QzvwM475NQ4cNE7KBb34,425
|
|
28
|
+
scientiflow_cli-0.4.15.dist-info/METADATA,sha256=LBG22lfG_UFtnbrC64_MXN8FK86HgPpPehq4hMFmrbM,2492
|
|
29
|
+
scientiflow_cli-0.4.15.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
|
30
|
+
scientiflow_cli-0.4.15.dist-info/entry_points.txt,sha256=0lq2mjcG5hGfODrQodeMSAy9RfE2EX1MZSHRpfSncxc,61
|
|
31
|
+
scientiflow_cli-0.4.15.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|