psr-factory 5.0.0b21__py3-none-win_amd64.whl → 5.0.0b67__py3-none-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- psr/execqueue/client.py +31 -10
- psr/execqueue/config.py +8 -0
- psr/execqueue/db.py +71 -9
- psr/execqueue/server.py +317 -41
- psr/execqueue/watcher.py +1 -1
- psr/factory/__init__.py +1 -1
- psr/factory/api.py +378 -130
- psr/factory/factory.dll +0 -0
- psr/factory/factory.pmd +118 -52
- psr/factory/factory.pmk +3044 -2223
- psr/factory/factorylib.py +30 -2
- psr/factory/samples/sddp_case01.py +3 -2
- psr/factory/samples/sddp_case21.py +3 -3
- psr/{cloud/version.py → outputs/__init__.py} +2 -2
- psr/outputs/outputs.py +179 -0
- psr/outputs/resample.py +289 -0
- psr/psrfcommon/psrfcommon.py +4 -1
- psr/runner/runner.py +67 -22
- {psr_factory-5.0.0b21.dist-info → psr_factory-5.0.0b67.dist-info}/METADATA +5 -15
- psr_factory-5.0.0b67.dist-info/RECORD +32 -0
- psr/cloud/__init__.py +0 -7
- psr/cloud/aws.py +0 -256
- psr/cloud/cloud.py +0 -1444
- psr/cloud/data.py +0 -127
- psr/cloud/desktop.py +0 -82
- psr/cloud/log.py +0 -40
- psr/cloud/status.py +0 -81
- psr/cloud/tempfile.py +0 -117
- psr/cloud/xml.py +0 -57
- psr/factory/libcurl-x64.dll +0 -0
- psr_factory-5.0.0b21.dist-info/RECORD +0 -40
- {psr_factory-5.0.0b21.dist-info → psr_factory-5.0.0b67.dist-info}/WHEEL +0 -0
- {psr_factory-5.0.0b21.dist-info → psr_factory-5.0.0b67.dist-info}/licenses/LICENSE.txt +0 -0
- {psr_factory-5.0.0b21.dist-info → psr_factory-5.0.0b67.dist-info}/top_level.txt +0 -0
psr/execqueue/server.py
CHANGED
|
@@ -1,10 +1,14 @@
|
|
|
1
1
|
import hashlib
|
|
2
|
+
import os
|
|
2
3
|
import queue
|
|
3
4
|
import shutil
|
|
4
5
|
import sys
|
|
5
6
|
import threading
|
|
6
7
|
import time
|
|
7
8
|
import zipfile
|
|
9
|
+
import subprocess
|
|
10
|
+
import shlex
|
|
11
|
+
import logging
|
|
8
12
|
from dotenv import load_dotenv
|
|
9
13
|
from flask import (
|
|
10
14
|
Flask,
|
|
@@ -33,16 +37,56 @@ os.makedirs(TEMPORARY_UPLOAD_FOLDER, exist_ok=True)
|
|
|
33
37
|
|
|
34
38
|
load_dotenv()
|
|
35
39
|
|
|
40
|
+
# Configure logging: write to server.log inside STORAGE_PATH and to stdout
|
|
41
|
+
log_file = os.path.join(STORAGE_PATH, 'server.log')
|
|
42
|
+
os.makedirs(os.path.dirname(log_file), exist_ok=True)
|
|
43
|
+
fmt = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
|
|
44
|
+
# Create handlers
|
|
45
|
+
fh = logging.FileHandler(log_file, mode='a', encoding='utf-8')
|
|
46
|
+
fh.setLevel(logging.INFO)
|
|
47
|
+
fh.setFormatter(fmt)
|
|
48
|
+
sh = logging.StreamHandler(sys.stdout)
|
|
49
|
+
sh.setLevel(logging.INFO)
|
|
50
|
+
sh.setFormatter(fmt)
|
|
51
|
+
|
|
52
|
+
# Configure the root logger with our handlers so all loggers propagate here
|
|
53
|
+
root = logging.getLogger()
|
|
54
|
+
root.setLevel(logging.INFO)
|
|
55
|
+
root.handlers = []
|
|
56
|
+
root.addHandler(fh)
|
|
57
|
+
root.addHandler(sh)
|
|
58
|
+
|
|
59
|
+
# Create module logger that will propagate to root (do not add handlers here)
|
|
60
|
+
logger = logging.getLogger('execqueue')
|
|
61
|
+
logger.setLevel(logging.INFO)
|
|
62
|
+
logger.propagate = True
|
|
63
|
+
|
|
64
|
+
# Let Flask/Werkzeug propagate to root logger (so their messages go to server.log)
|
|
65
|
+
logging.getLogger('werkzeug').setLevel(logging.INFO)
|
|
66
|
+
logging.getLogger('werkzeug').propagate = True
|
|
67
|
+
logging.getLogger('flask.app').setLevel(logging.INFO)
|
|
68
|
+
logging.getLogger('flask.app').propagate = True
|
|
69
|
+
|
|
70
|
+
|
|
36
71
|
|
|
37
72
|
try:
|
|
38
73
|
client = psr.cloud.Client(cluster=psrcloud_cluster, verbose=True)
|
|
39
74
|
except psr.cloud.CloudInputError as e:
|
|
40
|
-
|
|
75
|
+
logger.exception(f"Error connecting to PSR Cloud. Check user credentials: {e}")
|
|
41
76
|
exit(1)
|
|
42
77
|
|
|
43
78
|
_cloud_execution_case_map = {}
|
|
44
79
|
|
|
45
80
|
app = Flask(__name__, root_path=os.getcwd())
|
|
81
|
+
# Ensure Flask's app logger writes to the same handlers as our logger/root
|
|
82
|
+
try:
|
|
83
|
+
app.logger.handlers = logger.handlers
|
|
84
|
+
app.logger.setLevel(logging.INFO)
|
|
85
|
+
app.logger.propagate = True
|
|
86
|
+
except Exception:
|
|
87
|
+
# In case logger isn't ready for some reason, ignore and continue
|
|
88
|
+
pass
|
|
89
|
+
|
|
46
90
|
session = None
|
|
47
91
|
|
|
48
92
|
|
|
@@ -58,12 +102,110 @@ def run_local_case(execution_id: str, case_path: str):
|
|
|
58
102
|
psr.runner.run_sddp(case_path, sddp_path, parallel_run=False)
|
|
59
103
|
success = True
|
|
60
104
|
except RuntimeError as e:
|
|
61
|
-
|
|
105
|
+
logger.exception(f"Error running {execution_id}: {e}")
|
|
62
106
|
|
|
63
107
|
status = db.LOCAL_EXECUTION_FINISHED if success else db.LOCAL_EXECUTION_ERROR
|
|
64
108
|
db.update_local_execution_status(session, execution_id, status)
|
|
65
109
|
|
|
66
110
|
|
|
111
|
+
def _ensure_case_workdir(case_id: str) -> str:
|
|
112
|
+
"""Ensure a working directory exists at uploads/<case_id> with extracted contents.
|
|
113
|
+
If it does not exist or is empty, extract the uploaded zip there.
|
|
114
|
+
Returns the absolute path to the working directory.
|
|
115
|
+
"""
|
|
116
|
+
workdir = os.path.join(UPLOADS_FOLDER, case_id)
|
|
117
|
+
zip_upload_path = os.path.join(UPLOADS_FOLDER, f"{case_id}.zip")
|
|
118
|
+
os.makedirs(workdir, exist_ok=True)
|
|
119
|
+
|
|
120
|
+
# If directory is empty or looks incomplete, (re)extract
|
|
121
|
+
try:
|
|
122
|
+
if not os.listdir(workdir):
|
|
123
|
+
with zipfile.ZipFile(zip_upload_path, 'r') as zip_ref:
|
|
124
|
+
zip_ref.extractall(workdir)
|
|
125
|
+
except FileNotFoundError:
|
|
126
|
+
# If there's no zip, still return folder (may be pre-populated)
|
|
127
|
+
pass
|
|
128
|
+
return workdir
|
|
129
|
+
|
|
130
|
+
|
|
131
|
+
def run_local_module(execution_id: str, case_id: str, module_name: str) -> int:
|
|
132
|
+
"""Run a configured module locally inside the case's upload workdir.
|
|
133
|
+
Returns process return code (0=success, non-zero=failure).
|
|
134
|
+
Updates LocalExecution status accordingly.
|
|
135
|
+
"""
|
|
136
|
+
global session
|
|
137
|
+
# Fetch module configuration
|
|
138
|
+
module_cfg = MODULES.get(module_name) if isinstance(MODULES, dict) else None
|
|
139
|
+
if not module_cfg or 'command' not in module_cfg:
|
|
140
|
+
logger.error(f"Module '{module_name}' not configured.")
|
|
141
|
+
db.update_local_execution_status(session, execution_id, db.LOCAL_EXECUTION_ERROR)
|
|
142
|
+
return 1
|
|
143
|
+
|
|
144
|
+
workdir = _ensure_case_workdir(case_id)
|
|
145
|
+
|
|
146
|
+
# Build command and log file path
|
|
147
|
+
cmd_tmpl = module_cfg.get('command')
|
|
148
|
+
# Allow placeholders
|
|
149
|
+
cmd = cmd_tmpl.format(case_path=workdir, case_id=case_id, module=module_name)
|
|
150
|
+
log_name = module_cfg.get('log_file', f"module_{module_name}.log")
|
|
151
|
+
log_path = os.path.join(workdir, log_name)
|
|
152
|
+
|
|
153
|
+
logger.info(f"Running module '{module_name}' for case {case_id} in {workdir}")
|
|
154
|
+
logger.debug(f"Command: {cmd}")
|
|
155
|
+
|
|
156
|
+
rc = 1
|
|
157
|
+
try:
|
|
158
|
+
# Prefer to run without shell to avoid platform-specific exit code mappings
|
|
159
|
+
# If the command starts with 'python' or references .py, build argv accordingly
|
|
160
|
+
argv = None
|
|
161
|
+
# Heuristic: if command contains .py, run with current Python executable
|
|
162
|
+
if '.py' in cmd:
|
|
163
|
+
parts = shlex.split(cmd)
|
|
164
|
+
# If the command already starts with python, use as-is; else prepend sys.executable
|
|
165
|
+
if parts[0].endswith('python') or parts[0].endswith('python.exe'):
|
|
166
|
+
argv = parts
|
|
167
|
+
else:
|
|
168
|
+
argv = [sys.executable] + parts
|
|
169
|
+
else:
|
|
170
|
+
argv = shlex.split(cmd)
|
|
171
|
+
|
|
172
|
+
with open(log_path, 'a', encoding='utf-8', errors='ignore') as logf:
|
|
173
|
+
proc = subprocess.Popen(
|
|
174
|
+
argv,
|
|
175
|
+
cwd=workdir,
|
|
176
|
+
stdout=logf,
|
|
177
|
+
stderr=logf,
|
|
178
|
+
)
|
|
179
|
+
rc = proc.wait()
|
|
180
|
+
|
|
181
|
+
# Now rc follows the subprocess return code semantics: 0 success, non-zero failure
|
|
182
|
+
status = db.LOCAL_EXECUTION_FINISHED if rc == 0 else db.LOCAL_EXECUTION_ERROR
|
|
183
|
+
db.update_local_execution_status(session, execution_id, status)
|
|
184
|
+
except Exception as e:
|
|
185
|
+
logger.exception(f"Error running module {module_name} for case {case_id}: {e}")
|
|
186
|
+
db.update_local_execution_status(session, execution_id, db.LOCAL_EXECUTION_ERROR)
|
|
187
|
+
rc = 1
|
|
188
|
+
return rc
|
|
189
|
+
|
|
190
|
+
|
|
191
|
+
def _copy_tree(src: str, dst: str):
|
|
192
|
+
os.makedirs(dst, exist_ok=True)
|
|
193
|
+
for root, dirs, files in os.walk(src):
|
|
194
|
+
rel = os.path.relpath(root, src)
|
|
195
|
+
target_root = os.path.join(dst, rel) if rel != '.' else dst
|
|
196
|
+
os.makedirs(target_root, exist_ok=True)
|
|
197
|
+
for d in dirs:
|
|
198
|
+
os.makedirs(os.path.join(target_root, d), exist_ok=True)
|
|
199
|
+
for f in files:
|
|
200
|
+
s = os.path.join(root, f)
|
|
201
|
+
t = os.path.join(target_root, f)
|
|
202
|
+
try:
|
|
203
|
+
shutil.copy2(s, t)
|
|
204
|
+
except Exception:
|
|
205
|
+
# Best-effort copy; skip problematic files
|
|
206
|
+
pass
|
|
207
|
+
|
|
208
|
+
|
|
67
209
|
def initialize_db():
|
|
68
210
|
session, engine = db.initialize()
|
|
69
211
|
return session
|
|
@@ -91,22 +233,59 @@ def run_cloud_case(execution_id: str, case_path: str):
|
|
|
91
233
|
def process_local_execution_queue():
|
|
92
234
|
global session
|
|
93
235
|
while True:
|
|
94
|
-
|
|
236
|
+
item = _execution_queue.get()
|
|
95
237
|
try:
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
238
|
+
# Detect item type (backward compatibility for tuple)
|
|
239
|
+
if isinstance(item, dict) and item.get('type') == 'module':
|
|
240
|
+
execution_id = item['execution_id']
|
|
241
|
+
case_id = item['case_id']
|
|
242
|
+
module_name = item['module']
|
|
243
|
+
logger.info(f"Processing module {module_name} for case {case_id} (exec {execution_id})...")
|
|
244
|
+
run_local_module(execution_id, case_id, module_name)
|
|
245
|
+
else:
|
|
246
|
+
if isinstance(item, (list, tuple)):
|
|
247
|
+
execution_id, case_id = item
|
|
248
|
+
else:
|
|
249
|
+
execution_id = item.get('execution_id')
|
|
250
|
+
case_id = item.get('case_id')
|
|
251
|
+
|
|
252
|
+
logger.info(f"Processing case execution {execution_id} for case {case_id}...")
|
|
253
|
+
|
|
254
|
+
# Wait for running modules to finish; abort if any failed
|
|
255
|
+
wait_loops = 0
|
|
256
|
+
while db.any_running_modules_for_case(session, case_id):
|
|
257
|
+
logger.info(f"Case {case_id} has running modules; waiting...")
|
|
258
|
+
time.sleep(5)
|
|
259
|
+
wait_loops += 1
|
|
260
|
+
# Safety: avoid infinite wait in worker
|
|
261
|
+
if wait_loops > 240: # ~20 minutes
|
|
262
|
+
break
|
|
263
|
+
|
|
264
|
+
# Check last execution per distinct module: if any last module execution failed, mark error
|
|
265
|
+
failing_modules = []
|
|
266
|
+
for mname in db.get_distinct_module_names_for_case(session, case_id):
|
|
267
|
+
last = db.last_module_execution_for_case(session, case_id, mname)
|
|
268
|
+
if last and last.status == db.LOCAL_EXECUTION_ERROR:
|
|
269
|
+
failing_modules.append(mname)
|
|
270
|
+
|
|
271
|
+
if failing_modules:
|
|
272
|
+
logger.warning(f"Case {case_id} has failed modules {failing_modules}; marking local execution {execution_id} as error and skipping run")
|
|
273
|
+
db.update_local_execution_status(session, execution_id, db.LOCAL_EXECUTION_ERROR)
|
|
274
|
+
else:
|
|
275
|
+
# Prepare a dedicated results folder copying the current working directory (with module changes)
|
|
276
|
+
execution_extraction_path = os.path.join(LOCAL_RESULTS_FOLDER, execution_id)
|
|
277
|
+
os.makedirs(execution_extraction_path, exist_ok=True)
|
|
278
|
+
workdir = _ensure_case_workdir(case_id)
|
|
279
|
+
_copy_tree(workdir, execution_extraction_path)
|
|
280
|
+
# Run SDDP
|
|
281
|
+
run_local_case(execution_id, execution_extraction_path)
|
|
107
282
|
|
|
108
283
|
except Exception as e:
|
|
109
|
-
|
|
284
|
+
# Use safe logging in case execution_id isn't available
|
|
285
|
+
try:
|
|
286
|
+
logger.exception(f"Error processing {execution_id}: {e}")
|
|
287
|
+
except Exception:
|
|
288
|
+
logger.exception(f"Error processing item: {e}")
|
|
110
289
|
finally:
|
|
111
290
|
_execution_queue.task_done()
|
|
112
291
|
|
|
@@ -120,14 +299,32 @@ def process_cloud_execution_queue():
|
|
|
120
299
|
while True:
|
|
121
300
|
cloud_upload_id, case_id = _cloud_upload_queue.get()
|
|
122
301
|
try:
|
|
123
|
-
|
|
124
|
-
#
|
|
125
|
-
|
|
302
|
+
logger.info(f"Processing {cloud_upload_id}...")
|
|
303
|
+
# Wait for running modules to finish; abort if any failed
|
|
304
|
+
wait_loops = 0
|
|
305
|
+
while db.any_running_modules_for_case(session, case_id):
|
|
306
|
+
logger.info(f"Case {case_id} has running modules; waiting before cloud run...")
|
|
307
|
+
time.sleep(5)
|
|
308
|
+
wait_loops += 1
|
|
309
|
+
if wait_loops > 240: # ~20 minutes
|
|
310
|
+
break
|
|
311
|
+
# Block if the last execution of any distinct module failed
|
|
312
|
+
failing_modules = []
|
|
313
|
+
for mname in db.get_distinct_module_names_for_case(session, case_id):
|
|
314
|
+
last = db.last_module_execution_for_case(session, case_id, mname)
|
|
315
|
+
if last and last.status == db.LOCAL_EXECUTION_ERROR:
|
|
316
|
+
failing_modules.append(mname)
|
|
317
|
+
if failing_modules:
|
|
318
|
+
logger.warning(f"Case {case_id} has failing modules in last execution {failing_modules}; skipping cloud run for upload {cloud_upload_id}")
|
|
319
|
+
# Nothing else to do; do not run in the cloud
|
|
320
|
+
continue
|
|
321
|
+
# Prepare temp folder by copying current working directory (with module changes)
|
|
126
322
|
tmp_extraction_path = os.path.join(TEMPORARY_UPLOAD_FOLDER, cloud_upload_id)
|
|
127
|
-
|
|
323
|
+
workdir = _ensure_case_workdir(case_id)
|
|
324
|
+
if os.path.isdir(tmp_extraction_path):
|
|
325
|
+
shutil.rmtree(tmp_extraction_path, ignore_errors=True)
|
|
128
326
|
os.makedirs(tmp_extraction_path, exist_ok=True)
|
|
129
|
-
|
|
130
|
-
zip_ref.extractall(tmp_extraction_path)
|
|
327
|
+
_copy_tree(workdir, tmp_extraction_path)
|
|
131
328
|
|
|
132
329
|
# Run SDDP
|
|
133
330
|
repository_id = run_cloud_case(cloud_upload_id, tmp_extraction_path)
|
|
@@ -136,14 +333,15 @@ def process_cloud_execution_queue():
|
|
|
136
333
|
shutil.rmtree(tmp_extraction_path)
|
|
137
334
|
|
|
138
335
|
execution_extraction_path = os.path.join(CLOUD_RESULTS_FOLDER, repository_id)
|
|
336
|
+
if os.path.isdir(execution_extraction_path):
|
|
337
|
+
shutil.rmtree(execution_extraction_path, ignore_errors=True)
|
|
139
338
|
os.makedirs(execution_extraction_path, exist_ok=True)
|
|
140
|
-
|
|
141
|
-
zip_ref.extractall(execution_extraction_path)
|
|
339
|
+
_copy_tree(workdir, execution_extraction_path)
|
|
142
340
|
|
|
143
341
|
db.register_cloud_execution(session, repository_id, cloud_upload_id, case_id)
|
|
144
342
|
|
|
145
343
|
except Exception as e:
|
|
146
|
-
|
|
344
|
+
logger.exception(f"Error processing {cloud_upload_id}: {e}")
|
|
147
345
|
finally:
|
|
148
346
|
_cloud_upload_queue.task_done()
|
|
149
347
|
|
|
@@ -164,19 +362,19 @@ def monitor_cloud_runs():
|
|
|
164
362
|
#check running executions
|
|
165
363
|
for cloud_execution in db.get_runing_cloud_executions(session):
|
|
166
364
|
case_id = cloud_execution.repository_id
|
|
167
|
-
|
|
365
|
+
logger.info(f"Checking status of {case_id}...")
|
|
168
366
|
status, status_msg = client.get_status(case_id)
|
|
169
367
|
if status in psr.cloud.FAULTY_TERMINATION_STATUS:
|
|
170
|
-
|
|
368
|
+
logger.warning(f"Execution {case_id} finished with errors")
|
|
171
369
|
db.update_cloud_execution_status(session, case_id, db.CloudStatus.ERROR.value)
|
|
172
370
|
elif status == psr.cloud.ExecutionStatus.SUCCESS:
|
|
173
|
-
|
|
371
|
+
logger.info(f"Execution {case_id} finished successfully")
|
|
174
372
|
db.update_cloud_execution_status(session, case_id, db.CloudStatus.FINISHED.value)
|
|
175
373
|
|
|
176
374
|
#download finished executions
|
|
177
375
|
for cloud_execution in db.get_cloud_finished_executions(session):
|
|
178
376
|
repository_id = cloud_execution.repository_id
|
|
179
|
-
|
|
377
|
+
logger.info(f"Downloading results for {repository_id}...")
|
|
180
378
|
result_path = os.path.join(CLOUD_RESULTS_FOLDER, str(repository_id))
|
|
181
379
|
client.download_results(repository_id, result_path)
|
|
182
380
|
db.update_cloud_execution_status(session, repository_id, db.CloudStatus.RESULTS_AVAILABLE.value)
|
|
@@ -185,17 +383,17 @@ def monitor_cloud_runs():
|
|
|
185
383
|
for cloud_execution in db.get_cloud_failed_executions(session):
|
|
186
384
|
try:
|
|
187
385
|
repository_id = cloud_execution.repository_id
|
|
188
|
-
|
|
386
|
+
logger.info(f"Downloading results for {repository_id}...")
|
|
189
387
|
result_path = os.path.join(CLOUD_RESULTS_FOLDER, str(repository_id))
|
|
190
388
|
client.download_results(repository_id, result_path, extensions=['log'])
|
|
191
389
|
db.update_cloud_execution_status(session, repository_id, db.CloudStatus.LOGS_AVAILABLE_ERROR.value)
|
|
192
390
|
except Exception as e:
|
|
193
|
-
|
|
194
|
-
|
|
391
|
+
logger.exception(f"Error downloading results for {repository_id}: {e}")
|
|
392
|
+
logger.warning("Forcing execution to Failed downloaded execution")
|
|
195
393
|
db.update_cloud_execution_status(session, repository_id, db.CloudStatus.LOGS_AVAILABLE_ERROR.value)
|
|
196
394
|
continue
|
|
197
395
|
else:
|
|
198
|
-
|
|
396
|
+
logger.info("Database not initialized. Retrying in 30s...")
|
|
199
397
|
time.sleep(30)
|
|
200
398
|
|
|
201
399
|
threading.Thread(target=monitor_cloud_runs, daemon=True).start()
|
|
@@ -224,7 +422,7 @@ def upload_file():
|
|
|
224
422
|
@app.route('/run', methods=['POST'])
|
|
225
423
|
def run_endpoint():
|
|
226
424
|
global session
|
|
227
|
-
cloud_execution = request.
|
|
425
|
+
cloud_execution = request.values.get('cloud_execution', 'false').lower() == 'true'
|
|
228
426
|
case_id = request.form.get('case_id')
|
|
229
427
|
|
|
230
428
|
if not case_id:
|
|
@@ -234,6 +432,15 @@ def run_endpoint():
|
|
|
234
432
|
if not os.path.exists(zip_case_path):
|
|
235
433
|
return jsonify({'error': 'Upload file for this case ID not found'}), 404
|
|
236
434
|
|
|
435
|
+
# Pre-check: for each distinct module, if the last execution failed, block the run
|
|
436
|
+
failing_modules = []
|
|
437
|
+
for mname in db.get_distinct_module_names_for_case(session, case_id):
|
|
438
|
+
last = db.last_module_execution_for_case(session, case_id, mname)
|
|
439
|
+
if last and last.status == db.LOCAL_EXECUTION_ERROR:
|
|
440
|
+
failing_modules.append(mname)
|
|
441
|
+
if failing_modules:
|
|
442
|
+
return jsonify({'error': 'Case has failed modules in last execution', 'modules': failing_modules}), 409
|
|
443
|
+
|
|
237
444
|
if cloud_execution:
|
|
238
445
|
cloud_upload_id = str(ulid.ULID())
|
|
239
446
|
_cloud_upload_queue.put((cloud_upload_id, case_id))
|
|
@@ -246,10 +453,40 @@ def run_endpoint():
|
|
|
246
453
|
_execution_queue.put((execution_id, case_id))
|
|
247
454
|
|
|
248
455
|
db.register_local_execution(session, case_id, execution_id)
|
|
456
|
+
# Mark as running explicitly
|
|
457
|
+
db.update_local_execution_status(session, execution_id, db.LOCAL_EXECUTION_RUNNING)
|
|
249
458
|
|
|
250
459
|
return jsonify({'case_id': case_id, 'execution_id': execution_id}), 200
|
|
251
460
|
|
|
252
461
|
|
|
462
|
+
@app.route('/run_module', methods=['POST'])
|
|
463
|
+
def run_module_endpoint():
|
|
464
|
+
global session
|
|
465
|
+
case_id = request.form.get('case_id')
|
|
466
|
+
module_name = request.form.get('module') or request.form.get('module_name')
|
|
467
|
+
|
|
468
|
+
if not case_id or not module_name:
|
|
469
|
+
return jsonify({'error': 'case_id and module are required'}), 400
|
|
470
|
+
|
|
471
|
+
# Validate case zip exists
|
|
472
|
+
zip_case_path = os.path.join(UPLOADS_FOLDER, f"{case_id}.zip")
|
|
473
|
+
workdir = os.path.join(UPLOADS_FOLDER, case_id)
|
|
474
|
+
if not os.path.exists(zip_case_path) and not os.path.isdir(workdir):
|
|
475
|
+
return jsonify({'error': 'Upload file or working directory for this case ID not found'}), 404
|
|
476
|
+
|
|
477
|
+
# Validate module exists in config
|
|
478
|
+
module_cfg = MODULES.get(module_name) if isinstance(MODULES, dict) else None
|
|
479
|
+
if not module_cfg or 'command' not in module_cfg:
|
|
480
|
+
return jsonify({'error': f"Module '{module_name}' not configured"}), 400
|
|
481
|
+
|
|
482
|
+
execution_id = str(ulid.ULID())
|
|
483
|
+
_execution_queue.put({'type': 'module', 'execution_id': execution_id, 'case_id': case_id, 'module': module_name})
|
|
484
|
+
db.register_local_execution(session, case_id, execution_id, is_module=1, module=module_name)
|
|
485
|
+
db.update_local_execution_status(session, execution_id, db.LOCAL_EXECUTION_RUNNING)
|
|
486
|
+
|
|
487
|
+
return jsonify({'case_id': case_id, 'module': module_name, 'execution_id': execution_id}), 200
|
|
488
|
+
|
|
489
|
+
|
|
253
490
|
@app.route('/upload_and_run', methods=['POST'])
|
|
254
491
|
def upload_and_run_file():
|
|
255
492
|
global session
|
|
@@ -260,7 +497,7 @@ def upload_and_run_file():
|
|
|
260
497
|
if file.filename == '':
|
|
261
498
|
return jsonify({'error': 'No selected file'}), 400
|
|
262
499
|
|
|
263
|
-
cloud_execution = request.
|
|
500
|
+
cloud_execution = request.values.get('cloud_execution', 'false').lower() == 'true'
|
|
264
501
|
|
|
265
502
|
case_id = str(ulid.ULID())
|
|
266
503
|
zip_path = os.path.join(UPLOADS_FOLDER, f"{case_id}.zip")
|
|
@@ -302,7 +539,7 @@ def get_status(execution_id):
|
|
|
302
539
|
"""
|
|
303
540
|
global client
|
|
304
541
|
global session
|
|
305
|
-
cloud_execution = request.
|
|
542
|
+
cloud_execution = request.values.get('cloud_execution', 'false').lower() == 'true'
|
|
306
543
|
|
|
307
544
|
if cloud_execution:
|
|
308
545
|
repository_id = db.get_repository_id_from_cloud_upload_id(session, execution_id)
|
|
@@ -321,7 +558,7 @@ def get_status(execution_id):
|
|
|
321
558
|
status_msg = 'Execution finished with errors and log files are avaialble to download'
|
|
322
559
|
else:
|
|
323
560
|
status_msg = 'Unknown status'
|
|
324
|
-
|
|
561
|
+
logger.info(f"Cloud execution status for {execution_id} ({repository_id}): {status_msg}")
|
|
325
562
|
return jsonify({'status_id': status, 'status_msg': status_msg}), 200
|
|
326
563
|
else:
|
|
327
564
|
status = db.get_local_execution_status(session, execution_id)
|
|
@@ -338,7 +575,8 @@ def get_status(execution_id):
|
|
|
338
575
|
def get_results(execution_id: str):
|
|
339
576
|
global session
|
|
340
577
|
global client
|
|
341
|
-
cloud_execution = request.
|
|
578
|
+
cloud_execution = request.values.get('cloud_execution', 'false').lower() == 'true'
|
|
579
|
+
logger.info(f"Getting results for execution ID: {execution_id} (cloud_execution={cloud_execution})")
|
|
342
580
|
|
|
343
581
|
if cloud_execution:
|
|
344
582
|
repository_id = db.get_repository_id_from_cloud_upload_id(session, execution_id)
|
|
@@ -346,6 +584,7 @@ def get_results(execution_id: str):
|
|
|
346
584
|
return jsonify({'error': 'Execution ID not found in Cloud'}),
|
|
347
585
|
status = db.get_cloud_execution_status(session, execution_id)
|
|
348
586
|
|
|
587
|
+
|
|
349
588
|
if status == db.CloudStatus.RUNNING:
|
|
350
589
|
return jsonify({'error': f'{repository_id} execution not finished yet'}), 402
|
|
351
590
|
elif status == db.CloudStatus.FINISHED:
|
|
@@ -371,11 +610,47 @@ def get_results(execution_id: str):
|
|
|
371
610
|
return jsonify({'execution_id': execution_id, 'files': result_files}), 200
|
|
372
611
|
|
|
373
612
|
|
|
613
|
+
@app.route('/module_log/<case_id>', methods=['GET'])
|
|
614
|
+
def get_module_log(case_id: str):
|
|
615
|
+
"""Return the content of the module's fixed log file for the last module run of the case,
|
|
616
|
+
or for a specific module if provided as query parameter ?module=<name>.
|
|
617
|
+
"""
|
|
618
|
+
global session
|
|
619
|
+
module_name = request.args.get('module') or request.args.get('module_name')
|
|
620
|
+
|
|
621
|
+
# Determine module and log file name
|
|
622
|
+
if not module_name:
|
|
623
|
+
last = db.last_module_execution_for_case(session, case_id)
|
|
624
|
+
if not last or not last.module:
|
|
625
|
+
return jsonify({'error': 'No module execution found for this case'}), 404
|
|
626
|
+
module_name = last.module
|
|
627
|
+
module_cfg = MODULES.get(module_name) if isinstance(MODULES, dict) else None
|
|
628
|
+
if not module_cfg:
|
|
629
|
+
return jsonify({'error': f"Module '{module_name}' not configured"}), 400
|
|
630
|
+
log_name = module_cfg.get('log_file', f"module_{module_name}.log")
|
|
631
|
+
|
|
632
|
+
workdir = os.path.join(UPLOADS_FOLDER, case_id)
|
|
633
|
+
if not os.path.isdir(workdir):
|
|
634
|
+
# Ensure workdir is created (may extract zip if needed)
|
|
635
|
+
workdir = _ensure_case_workdir(case_id)
|
|
636
|
+
log_path = os.path.join(workdir, log_name)
|
|
637
|
+
if not os.path.exists(log_path):
|
|
638
|
+
return jsonify({'error': 'Log file not found', 'module': module_name, 'log': log_name}), 404
|
|
639
|
+
|
|
640
|
+
try:
|
|
641
|
+
with open(log_path, 'r', encoding='utf-8', errors='ignore') as f:
|
|
642
|
+
content = f.read()
|
|
643
|
+
return content, 200, {'Content-Type': 'text/plain; charset=utf-8'}
|
|
644
|
+
except Exception as e:
|
|
645
|
+
return jsonify({'error': str(e)}), 500
|
|
646
|
+
|
|
647
|
+
|
|
374
648
|
@app.route('/results/<execution_id>/<file>', methods=['GET'])
|
|
375
649
|
def download_file(execution_id: str, file):
|
|
376
650
|
global session
|
|
377
651
|
|
|
378
|
-
cloud_execution = request.
|
|
652
|
+
cloud_execution = request.values.get('cloud_execution', 'false').lower() == 'true'
|
|
653
|
+
logger.info(f"Getting results for execution ID: {execution_id} (cloud_execution={cloud_execution})")
|
|
379
654
|
|
|
380
655
|
if cloud_execution:
|
|
381
656
|
repository_id = db.get_repository_id_from_cloud_upload_id(session, execution_id)
|
|
@@ -400,14 +675,15 @@ def download_file(execution_id: str, file):
|
|
|
400
675
|
|
|
401
676
|
|
|
402
677
|
if __name__ == '__main__':
|
|
403
|
-
|
|
678
|
+
logger.info("Starting server...")
|
|
404
679
|
session = initialize_db()
|
|
680
|
+
|
|
405
681
|
try:
|
|
406
|
-
app.run(debug=FLASK_DEBUG,
|
|
682
|
+
app.run(host=settings.get("host", DEFAULT_HOST), debug=FLASK_DEBUG,
|
|
407
683
|
port=settings.get("port", DEFAULT_PORT),
|
|
408
684
|
threaded=True,
|
|
409
685
|
use_reloader=False,)
|
|
410
686
|
except Exception as e:
|
|
411
|
-
|
|
687
|
+
logger.exception(f"Error starting server: {e}")
|
|
412
688
|
sys.exit(1)
|
|
413
689
|
|
psr/execqueue/watcher.py
CHANGED
|
@@ -12,7 +12,7 @@ SERVER_URL = os.getenv("SERVER_URL", "http://127.0.0.1:5000")
|
|
|
12
12
|
WATCH_DIR = os.getenv("WATCH_DIR")
|
|
13
13
|
PROCESSED_DIR = os.getenv("PROCESSED_DIR")
|
|
14
14
|
RESULTS_DIR = os.getenv("RESULTS_DIR", "results")
|
|
15
|
-
SLEEP_SECONDS = int(os.getenv("WATCHER_SLEEP", "
|
|
15
|
+
SLEEP_SECONDS = int(os.getenv("WATCHER_SLEEP", "10"))
|
|
16
16
|
DB_PATH = os.getenv("WATCHER_DB_PATH", "watcher.sqlite")
|
|
17
17
|
|
|
18
18
|
|