psr-factory 5.0.0b16__py3-none-win_amd64.whl → 5.0.0b67__py3-none-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- psr/execqueue/client.py +35 -14
- psr/execqueue/config.py +8 -0
- psr/execqueue/db.py +77 -11
- psr/execqueue/server.py +363 -57
- psr/execqueue/watcher.py +25 -11
- psr/factory/__init__.py +1 -1
- psr/factory/api.py +814 -523
- psr/factory/factory.dll +0 -0
- psr/factory/factory.pmd +120 -51
- psr/factory/factory.pmk +3028 -2199
- psr/factory/factorylib.py +32 -2
- psr/factory/samples/sddp_case01.py +3 -2
- psr/factory/samples/sddp_case21.py +3 -3
- psr/{cloud/version.py → outputs/__init__.py} +2 -2
- psr/outputs/outputs.py +179 -0
- psr/outputs/resample.py +289 -0
- psr/psrfcommon/psrfcommon.py +4 -1
- psr/runner/runner.py +74 -23
- psr_factory-5.0.0b67.dist-info/METADATA +47 -0
- psr_factory-5.0.0b67.dist-info/RECORD +32 -0
- psr/cloud/__init__.py +0 -7
- psr/cloud/aws.py +0 -256
- psr/cloud/cloud.py +0 -1444
- psr/cloud/data.py +0 -127
- psr/cloud/desktop.py +0 -82
- psr/cloud/log.py +0 -40
- psr/cloud/status.py +0 -81
- psr/cloud/tempfile.py +0 -117
- psr/cloud/xml.py +0 -57
- psr/factory/libcurl-x64.dll +0 -0
- psr_factory-5.0.0b16.dist-info/METADATA +0 -123
- psr_factory-5.0.0b16.dist-info/RECORD +0 -40
- {psr_factory-5.0.0b16.dist-info → psr_factory-5.0.0b67.dist-info}/WHEEL +0 -0
- {psr_factory-5.0.0b16.dist-info → psr_factory-5.0.0b67.dist-info}/licenses/LICENSE.txt +0 -0
- {psr_factory-5.0.0b16.dist-info → psr_factory-5.0.0b67.dist-info}/top_level.txt +0 -0
psr/execqueue/server.py
CHANGED
|
@@ -1,10 +1,14 @@
|
|
|
1
1
|
import hashlib
|
|
2
|
+
import os
|
|
2
3
|
import queue
|
|
3
4
|
import shutil
|
|
4
5
|
import sys
|
|
5
6
|
import threading
|
|
6
7
|
import time
|
|
7
8
|
import zipfile
|
|
9
|
+
import subprocess
|
|
10
|
+
import shlex
|
|
11
|
+
import logging
|
|
8
12
|
from dotenv import load_dotenv
|
|
9
13
|
from flask import (
|
|
10
14
|
Flask,
|
|
@@ -33,16 +37,56 @@ os.makedirs(TEMPORARY_UPLOAD_FOLDER, exist_ok=True)
|
|
|
33
37
|
|
|
34
38
|
load_dotenv()
|
|
35
39
|
|
|
40
|
+
# Configure logging: write to server.log inside STORAGE_PATH and to stdout
|
|
41
|
+
log_file = os.path.join(STORAGE_PATH, 'server.log')
|
|
42
|
+
os.makedirs(os.path.dirname(log_file), exist_ok=True)
|
|
43
|
+
fmt = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
|
|
44
|
+
# Create handlers
|
|
45
|
+
fh = logging.FileHandler(log_file, mode='a', encoding='utf-8')
|
|
46
|
+
fh.setLevel(logging.INFO)
|
|
47
|
+
fh.setFormatter(fmt)
|
|
48
|
+
sh = logging.StreamHandler(sys.stdout)
|
|
49
|
+
sh.setLevel(logging.INFO)
|
|
50
|
+
sh.setFormatter(fmt)
|
|
51
|
+
|
|
52
|
+
# Configure the root logger with our handlers so all loggers propagate here
|
|
53
|
+
root = logging.getLogger()
|
|
54
|
+
root.setLevel(logging.INFO)
|
|
55
|
+
root.handlers = []
|
|
56
|
+
root.addHandler(fh)
|
|
57
|
+
root.addHandler(sh)
|
|
58
|
+
|
|
59
|
+
# Create module logger that will propagate to root (do not add handlers here)
|
|
60
|
+
logger = logging.getLogger('execqueue')
|
|
61
|
+
logger.setLevel(logging.INFO)
|
|
62
|
+
logger.propagate = True
|
|
63
|
+
|
|
64
|
+
# Let Flask/Werkzeug propagate to root logger (so their messages go to server.log)
|
|
65
|
+
logging.getLogger('werkzeug').setLevel(logging.INFO)
|
|
66
|
+
logging.getLogger('werkzeug').propagate = True
|
|
67
|
+
logging.getLogger('flask.app').setLevel(logging.INFO)
|
|
68
|
+
logging.getLogger('flask.app').propagate = True
|
|
69
|
+
|
|
70
|
+
|
|
36
71
|
|
|
37
72
|
try:
|
|
38
73
|
client = psr.cloud.Client(cluster=psrcloud_cluster, verbose=True)
|
|
39
74
|
except psr.cloud.CloudInputError as e:
|
|
40
|
-
|
|
75
|
+
logger.exception(f"Error connecting to PSR Cloud. Check user credentials: {e}")
|
|
41
76
|
exit(1)
|
|
42
77
|
|
|
43
78
|
_cloud_execution_case_map = {}
|
|
44
79
|
|
|
45
|
-
app = Flask(__name__)
|
|
80
|
+
app = Flask(__name__, root_path=os.getcwd())
|
|
81
|
+
# Ensure Flask's app logger writes to the same handlers as our logger/root
|
|
82
|
+
try:
|
|
83
|
+
app.logger.handlers = logger.handlers
|
|
84
|
+
app.logger.setLevel(logging.INFO)
|
|
85
|
+
app.logger.propagate = True
|
|
86
|
+
except Exception:
|
|
87
|
+
# In case logger isn't ready for some reason, ignore and continue
|
|
88
|
+
pass
|
|
89
|
+
|
|
46
90
|
session = None
|
|
47
91
|
|
|
48
92
|
|
|
@@ -58,12 +102,110 @@ def run_local_case(execution_id: str, case_path: str):
|
|
|
58
102
|
psr.runner.run_sddp(case_path, sddp_path, parallel_run=False)
|
|
59
103
|
success = True
|
|
60
104
|
except RuntimeError as e:
|
|
61
|
-
|
|
105
|
+
logger.exception(f"Error running {execution_id}: {e}")
|
|
62
106
|
|
|
63
107
|
status = db.LOCAL_EXECUTION_FINISHED if success else db.LOCAL_EXECUTION_ERROR
|
|
64
108
|
db.update_local_execution_status(session, execution_id, status)
|
|
65
109
|
|
|
66
110
|
|
|
111
|
+
def _ensure_case_workdir(case_id: str) -> str:
|
|
112
|
+
"""Ensure a working directory exists at uploads/<case_id> with extracted contents.
|
|
113
|
+
If it does not exist or is empty, extract the uploaded zip there.
|
|
114
|
+
Returns the absolute path to the working directory.
|
|
115
|
+
"""
|
|
116
|
+
workdir = os.path.join(UPLOADS_FOLDER, case_id)
|
|
117
|
+
zip_upload_path = os.path.join(UPLOADS_FOLDER, f"{case_id}.zip")
|
|
118
|
+
os.makedirs(workdir, exist_ok=True)
|
|
119
|
+
|
|
120
|
+
# If directory is empty or looks incomplete, (re)extract
|
|
121
|
+
try:
|
|
122
|
+
if not os.listdir(workdir):
|
|
123
|
+
with zipfile.ZipFile(zip_upload_path, 'r') as zip_ref:
|
|
124
|
+
zip_ref.extractall(workdir)
|
|
125
|
+
except FileNotFoundError:
|
|
126
|
+
# If there's no zip, still return folder (may be pre-populated)
|
|
127
|
+
pass
|
|
128
|
+
return workdir
|
|
129
|
+
|
|
130
|
+
|
|
131
|
+
def run_local_module(execution_id: str, case_id: str, module_name: str) -> int:
|
|
132
|
+
"""Run a configured module locally inside the case's upload workdir.
|
|
133
|
+
Returns process return code (0=success, non-zero=failure).
|
|
134
|
+
Updates LocalExecution status accordingly.
|
|
135
|
+
"""
|
|
136
|
+
global session
|
|
137
|
+
# Fetch module configuration
|
|
138
|
+
module_cfg = MODULES.get(module_name) if isinstance(MODULES, dict) else None
|
|
139
|
+
if not module_cfg or 'command' not in module_cfg:
|
|
140
|
+
logger.error(f"Module '{module_name}' not configured.")
|
|
141
|
+
db.update_local_execution_status(session, execution_id, db.LOCAL_EXECUTION_ERROR)
|
|
142
|
+
return 1
|
|
143
|
+
|
|
144
|
+
workdir = _ensure_case_workdir(case_id)
|
|
145
|
+
|
|
146
|
+
# Build command and log file path
|
|
147
|
+
cmd_tmpl = module_cfg.get('command')
|
|
148
|
+
# Allow placeholders
|
|
149
|
+
cmd = cmd_tmpl.format(case_path=workdir, case_id=case_id, module=module_name)
|
|
150
|
+
log_name = module_cfg.get('log_file', f"module_{module_name}.log")
|
|
151
|
+
log_path = os.path.join(workdir, log_name)
|
|
152
|
+
|
|
153
|
+
logger.info(f"Running module '{module_name}' for case {case_id} in {workdir}")
|
|
154
|
+
logger.debug(f"Command: {cmd}")
|
|
155
|
+
|
|
156
|
+
rc = 1
|
|
157
|
+
try:
|
|
158
|
+
# Prefer to run without shell to avoid platform-specific exit code mappings
|
|
159
|
+
# If the command starts with 'python' or references .py, build argv accordingly
|
|
160
|
+
argv = None
|
|
161
|
+
# Heuristic: if command contains .py, run with current Python executable
|
|
162
|
+
if '.py' in cmd:
|
|
163
|
+
parts = shlex.split(cmd)
|
|
164
|
+
# If the command already starts with python, use as-is; else prepend sys.executable
|
|
165
|
+
if parts[0].endswith('python') or parts[0].endswith('python.exe'):
|
|
166
|
+
argv = parts
|
|
167
|
+
else:
|
|
168
|
+
argv = [sys.executable] + parts
|
|
169
|
+
else:
|
|
170
|
+
argv = shlex.split(cmd)
|
|
171
|
+
|
|
172
|
+
with open(log_path, 'a', encoding='utf-8', errors='ignore') as logf:
|
|
173
|
+
proc = subprocess.Popen(
|
|
174
|
+
argv,
|
|
175
|
+
cwd=workdir,
|
|
176
|
+
stdout=logf,
|
|
177
|
+
stderr=logf,
|
|
178
|
+
)
|
|
179
|
+
rc = proc.wait()
|
|
180
|
+
|
|
181
|
+
# Now rc follows the subprocess return code semantics: 0 success, non-zero failure
|
|
182
|
+
status = db.LOCAL_EXECUTION_FINISHED if rc == 0 else db.LOCAL_EXECUTION_ERROR
|
|
183
|
+
db.update_local_execution_status(session, execution_id, status)
|
|
184
|
+
except Exception as e:
|
|
185
|
+
logger.exception(f"Error running module {module_name} for case {case_id}: {e}")
|
|
186
|
+
db.update_local_execution_status(session, execution_id, db.LOCAL_EXECUTION_ERROR)
|
|
187
|
+
rc = 1
|
|
188
|
+
return rc
|
|
189
|
+
|
|
190
|
+
|
|
191
|
+
def _copy_tree(src: str, dst: str):
|
|
192
|
+
os.makedirs(dst, exist_ok=True)
|
|
193
|
+
for root, dirs, files in os.walk(src):
|
|
194
|
+
rel = os.path.relpath(root, src)
|
|
195
|
+
target_root = os.path.join(dst, rel) if rel != '.' else dst
|
|
196
|
+
os.makedirs(target_root, exist_ok=True)
|
|
197
|
+
for d in dirs:
|
|
198
|
+
os.makedirs(os.path.join(target_root, d), exist_ok=True)
|
|
199
|
+
for f in files:
|
|
200
|
+
s = os.path.join(root, f)
|
|
201
|
+
t = os.path.join(target_root, f)
|
|
202
|
+
try:
|
|
203
|
+
shutil.copy2(s, t)
|
|
204
|
+
except Exception:
|
|
205
|
+
# Best-effort copy; skip problematic files
|
|
206
|
+
pass
|
|
207
|
+
|
|
208
|
+
|
|
67
209
|
def initialize_db():
|
|
68
210
|
session, engine = db.initialize()
|
|
69
211
|
return session
|
|
@@ -76,7 +218,7 @@ def run_cloud_case(execution_id: str, case_path: str):
|
|
|
76
218
|
name="LSEG Server "+ execution_id,
|
|
77
219
|
data_path=case_path,
|
|
78
220
|
program="SDDP",
|
|
79
|
-
program_version = "17.
|
|
221
|
+
program_version = "17.3.9",
|
|
80
222
|
execution_type="Default",
|
|
81
223
|
memory_per_process_ratio='2:1',
|
|
82
224
|
price_optimized=False,
|
|
@@ -91,22 +233,59 @@ def run_cloud_case(execution_id: str, case_path: str):
|
|
|
91
233
|
def process_local_execution_queue():
|
|
92
234
|
global session
|
|
93
235
|
while True:
|
|
94
|
-
|
|
236
|
+
item = _execution_queue.get()
|
|
95
237
|
try:
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
238
|
+
# Detect item type (backward compatibility for tuple)
|
|
239
|
+
if isinstance(item, dict) and item.get('type') == 'module':
|
|
240
|
+
execution_id = item['execution_id']
|
|
241
|
+
case_id = item['case_id']
|
|
242
|
+
module_name = item['module']
|
|
243
|
+
logger.info(f"Processing module {module_name} for case {case_id} (exec {execution_id})...")
|
|
244
|
+
run_local_module(execution_id, case_id, module_name)
|
|
245
|
+
else:
|
|
246
|
+
if isinstance(item, (list, tuple)):
|
|
247
|
+
execution_id, case_id = item
|
|
248
|
+
else:
|
|
249
|
+
execution_id = item.get('execution_id')
|
|
250
|
+
case_id = item.get('case_id')
|
|
251
|
+
|
|
252
|
+
logger.info(f"Processing case execution {execution_id} for case {case_id}...")
|
|
253
|
+
|
|
254
|
+
# Wait for running modules to finish; abort if any failed
|
|
255
|
+
wait_loops = 0
|
|
256
|
+
while db.any_running_modules_for_case(session, case_id):
|
|
257
|
+
logger.info(f"Case {case_id} has running modules; waiting...")
|
|
258
|
+
time.sleep(5)
|
|
259
|
+
wait_loops += 1
|
|
260
|
+
# Safety: avoid infinite wait in worker
|
|
261
|
+
if wait_loops > 240: # ~20 minutes
|
|
262
|
+
break
|
|
263
|
+
|
|
264
|
+
# Check last execution per distinct module: if any last module execution failed, mark error
|
|
265
|
+
failing_modules = []
|
|
266
|
+
for mname in db.get_distinct_module_names_for_case(session, case_id):
|
|
267
|
+
last = db.last_module_execution_for_case(session, case_id, mname)
|
|
268
|
+
if last and last.status == db.LOCAL_EXECUTION_ERROR:
|
|
269
|
+
failing_modules.append(mname)
|
|
270
|
+
|
|
271
|
+
if failing_modules:
|
|
272
|
+
logger.warning(f"Case {case_id} has failed modules {failing_modules}; marking local execution {execution_id} as error and skipping run")
|
|
273
|
+
db.update_local_execution_status(session, execution_id, db.LOCAL_EXECUTION_ERROR)
|
|
274
|
+
else:
|
|
275
|
+
# Prepare a dedicated results folder copying the current working directory (with module changes)
|
|
276
|
+
execution_extraction_path = os.path.join(LOCAL_RESULTS_FOLDER, execution_id)
|
|
277
|
+
os.makedirs(execution_extraction_path, exist_ok=True)
|
|
278
|
+
workdir = _ensure_case_workdir(case_id)
|
|
279
|
+
_copy_tree(workdir, execution_extraction_path)
|
|
280
|
+
# Run SDDP
|
|
281
|
+
run_local_case(execution_id, execution_extraction_path)
|
|
107
282
|
|
|
108
283
|
except Exception as e:
|
|
109
|
-
|
|
284
|
+
# Use safe logging in case execution_id isn't available
|
|
285
|
+
try:
|
|
286
|
+
logger.exception(f"Error processing {execution_id}: {e}")
|
|
287
|
+
except Exception:
|
|
288
|
+
logger.exception(f"Error processing item: {e}")
|
|
110
289
|
finally:
|
|
111
290
|
_execution_queue.task_done()
|
|
112
291
|
|
|
@@ -120,14 +299,32 @@ def process_cloud_execution_queue():
|
|
|
120
299
|
while True:
|
|
121
300
|
cloud_upload_id, case_id = _cloud_upload_queue.get()
|
|
122
301
|
try:
|
|
123
|
-
|
|
124
|
-
#
|
|
125
|
-
|
|
302
|
+
logger.info(f"Processing {cloud_upload_id}...")
|
|
303
|
+
# Wait for running modules to finish; abort if any failed
|
|
304
|
+
wait_loops = 0
|
|
305
|
+
while db.any_running_modules_for_case(session, case_id):
|
|
306
|
+
logger.info(f"Case {case_id} has running modules; waiting before cloud run...")
|
|
307
|
+
time.sleep(5)
|
|
308
|
+
wait_loops += 1
|
|
309
|
+
if wait_loops > 240: # ~20 minutes
|
|
310
|
+
break
|
|
311
|
+
# Block if the last execution of any distinct module failed
|
|
312
|
+
failing_modules = []
|
|
313
|
+
for mname in db.get_distinct_module_names_for_case(session, case_id):
|
|
314
|
+
last = db.last_module_execution_for_case(session, case_id, mname)
|
|
315
|
+
if last and last.status == db.LOCAL_EXECUTION_ERROR:
|
|
316
|
+
failing_modules.append(mname)
|
|
317
|
+
if failing_modules:
|
|
318
|
+
logger.warning(f"Case {case_id} has failing modules in last execution {failing_modules}; skipping cloud run for upload {cloud_upload_id}")
|
|
319
|
+
# Nothing else to do; do not run in the cloud
|
|
320
|
+
continue
|
|
321
|
+
# Prepare temp folder by copying current working directory (with module changes)
|
|
126
322
|
tmp_extraction_path = os.path.join(TEMPORARY_UPLOAD_FOLDER, cloud_upload_id)
|
|
127
|
-
|
|
323
|
+
workdir = _ensure_case_workdir(case_id)
|
|
324
|
+
if os.path.isdir(tmp_extraction_path):
|
|
325
|
+
shutil.rmtree(tmp_extraction_path, ignore_errors=True)
|
|
128
326
|
os.makedirs(tmp_extraction_path, exist_ok=True)
|
|
129
|
-
|
|
130
|
-
zip_ref.extractall(tmp_extraction_path)
|
|
327
|
+
_copy_tree(workdir, tmp_extraction_path)
|
|
131
328
|
|
|
132
329
|
# Run SDDP
|
|
133
330
|
repository_id = run_cloud_case(cloud_upload_id, tmp_extraction_path)
|
|
@@ -136,14 +333,15 @@ def process_cloud_execution_queue():
|
|
|
136
333
|
shutil.rmtree(tmp_extraction_path)
|
|
137
334
|
|
|
138
335
|
execution_extraction_path = os.path.join(CLOUD_RESULTS_FOLDER, repository_id)
|
|
336
|
+
if os.path.isdir(execution_extraction_path):
|
|
337
|
+
shutil.rmtree(execution_extraction_path, ignore_errors=True)
|
|
139
338
|
os.makedirs(execution_extraction_path, exist_ok=True)
|
|
140
|
-
|
|
141
|
-
zip_ref.extractall(execution_extraction_path)
|
|
339
|
+
_copy_tree(workdir, execution_extraction_path)
|
|
142
340
|
|
|
143
341
|
db.register_cloud_execution(session, repository_id, cloud_upload_id, case_id)
|
|
144
342
|
|
|
145
343
|
except Exception as e:
|
|
146
|
-
|
|
344
|
+
logger.exception(f"Error processing {cloud_upload_id}: {e}")
|
|
147
345
|
finally:
|
|
148
346
|
_cloud_upload_queue.task_done()
|
|
149
347
|
|
|
@@ -164,24 +362,38 @@ def monitor_cloud_runs():
|
|
|
164
362
|
#check running executions
|
|
165
363
|
for cloud_execution in db.get_runing_cloud_executions(session):
|
|
166
364
|
case_id = cloud_execution.repository_id
|
|
167
|
-
|
|
365
|
+
logger.info(f"Checking status of {case_id}...")
|
|
168
366
|
status, status_msg = client.get_status(case_id)
|
|
169
367
|
if status in psr.cloud.FAULTY_TERMINATION_STATUS:
|
|
170
|
-
|
|
368
|
+
logger.warning(f"Execution {case_id} finished with errors")
|
|
171
369
|
db.update_cloud_execution_status(session, case_id, db.CloudStatus.ERROR.value)
|
|
172
370
|
elif status == psr.cloud.ExecutionStatus.SUCCESS:
|
|
173
|
-
|
|
371
|
+
logger.info(f"Execution {case_id} finished successfully")
|
|
174
372
|
db.update_cloud_execution_status(session, case_id, db.CloudStatus.FINISHED.value)
|
|
175
373
|
|
|
176
374
|
#download finished executions
|
|
177
375
|
for cloud_execution in db.get_cloud_finished_executions(session):
|
|
178
376
|
repository_id = cloud_execution.repository_id
|
|
179
|
-
|
|
377
|
+
logger.info(f"Downloading results for {repository_id}...")
|
|
180
378
|
result_path = os.path.join(CLOUD_RESULTS_FOLDER, str(repository_id))
|
|
181
379
|
client.download_results(repository_id, result_path)
|
|
182
380
|
db.update_cloud_execution_status(session, repository_id, db.CloudStatus.RESULTS_AVAILABLE.value)
|
|
381
|
+
|
|
382
|
+
#download failed executions
|
|
383
|
+
for cloud_execution in db.get_cloud_failed_executions(session):
|
|
384
|
+
try:
|
|
385
|
+
repository_id = cloud_execution.repository_id
|
|
386
|
+
logger.info(f"Downloading results for {repository_id}...")
|
|
387
|
+
result_path = os.path.join(CLOUD_RESULTS_FOLDER, str(repository_id))
|
|
388
|
+
client.download_results(repository_id, result_path, extensions=['log'])
|
|
389
|
+
db.update_cloud_execution_status(session, repository_id, db.CloudStatus.LOGS_AVAILABLE_ERROR.value)
|
|
390
|
+
except Exception as e:
|
|
391
|
+
logger.exception(f"Error downloading results for {repository_id}: {e}")
|
|
392
|
+
logger.warning("Forcing execution to Failed downloaded execution")
|
|
393
|
+
db.update_cloud_execution_status(session, repository_id, db.CloudStatus.LOGS_AVAILABLE_ERROR.value)
|
|
394
|
+
continue
|
|
183
395
|
else:
|
|
184
|
-
|
|
396
|
+
logger.info("Database not initialized. Retrying in 30s...")
|
|
185
397
|
time.sleep(30)
|
|
186
398
|
|
|
187
399
|
threading.Thread(target=monitor_cloud_runs, daemon=True).start()
|
|
@@ -210,7 +422,7 @@ def upload_file():
|
|
|
210
422
|
@app.route('/run', methods=['POST'])
|
|
211
423
|
def run_endpoint():
|
|
212
424
|
global session
|
|
213
|
-
cloud_execution = request.
|
|
425
|
+
cloud_execution = request.values.get('cloud_execution', 'false').lower() == 'true'
|
|
214
426
|
case_id = request.form.get('case_id')
|
|
215
427
|
|
|
216
428
|
if not case_id:
|
|
@@ -220,6 +432,15 @@ def run_endpoint():
|
|
|
220
432
|
if not os.path.exists(zip_case_path):
|
|
221
433
|
return jsonify({'error': 'Upload file for this case ID not found'}), 404
|
|
222
434
|
|
|
435
|
+
# Pre-check: for each distinct module, if the last execution failed, block the run
|
|
436
|
+
failing_modules = []
|
|
437
|
+
for mname in db.get_distinct_module_names_for_case(session, case_id):
|
|
438
|
+
last = db.last_module_execution_for_case(session, case_id, mname)
|
|
439
|
+
if last and last.status == db.LOCAL_EXECUTION_ERROR:
|
|
440
|
+
failing_modules.append(mname)
|
|
441
|
+
if failing_modules:
|
|
442
|
+
return jsonify({'error': 'Case has failed modules in last execution', 'modules': failing_modules}), 409
|
|
443
|
+
|
|
223
444
|
if cloud_execution:
|
|
224
445
|
cloud_upload_id = str(ulid.ULID())
|
|
225
446
|
_cloud_upload_queue.put((cloud_upload_id, case_id))
|
|
@@ -232,10 +453,40 @@ def run_endpoint():
|
|
|
232
453
|
_execution_queue.put((execution_id, case_id))
|
|
233
454
|
|
|
234
455
|
db.register_local_execution(session, case_id, execution_id)
|
|
456
|
+
# Mark as running explicitly
|
|
457
|
+
db.update_local_execution_status(session, execution_id, db.LOCAL_EXECUTION_RUNNING)
|
|
235
458
|
|
|
236
459
|
return jsonify({'case_id': case_id, 'execution_id': execution_id}), 200
|
|
237
460
|
|
|
238
461
|
|
|
462
|
+
@app.route('/run_module', methods=['POST'])
|
|
463
|
+
def run_module_endpoint():
|
|
464
|
+
global session
|
|
465
|
+
case_id = request.form.get('case_id')
|
|
466
|
+
module_name = request.form.get('module') or request.form.get('module_name')
|
|
467
|
+
|
|
468
|
+
if not case_id or not module_name:
|
|
469
|
+
return jsonify({'error': 'case_id and module are required'}), 400
|
|
470
|
+
|
|
471
|
+
# Validate case zip exists
|
|
472
|
+
zip_case_path = os.path.join(UPLOADS_FOLDER, f"{case_id}.zip")
|
|
473
|
+
workdir = os.path.join(UPLOADS_FOLDER, case_id)
|
|
474
|
+
if not os.path.exists(zip_case_path) and not os.path.isdir(workdir):
|
|
475
|
+
return jsonify({'error': 'Upload file or working directory for this case ID not found'}), 404
|
|
476
|
+
|
|
477
|
+
# Validate module exists in config
|
|
478
|
+
module_cfg = MODULES.get(module_name) if isinstance(MODULES, dict) else None
|
|
479
|
+
if not module_cfg or 'command' not in module_cfg:
|
|
480
|
+
return jsonify({'error': f"Module '{module_name}' not configured"}), 400
|
|
481
|
+
|
|
482
|
+
execution_id = str(ulid.ULID())
|
|
483
|
+
_execution_queue.put({'type': 'module', 'execution_id': execution_id, 'case_id': case_id, 'module': module_name})
|
|
484
|
+
db.register_local_execution(session, case_id, execution_id, is_module=1, module=module_name)
|
|
485
|
+
db.update_local_execution_status(session, execution_id, db.LOCAL_EXECUTION_RUNNING)
|
|
486
|
+
|
|
487
|
+
return jsonify({'case_id': case_id, 'module': module_name, 'execution_id': execution_id}), 200
|
|
488
|
+
|
|
489
|
+
|
|
239
490
|
@app.route('/upload_and_run', methods=['POST'])
|
|
240
491
|
def upload_and_run_file():
|
|
241
492
|
global session
|
|
@@ -246,7 +497,7 @@ def upload_and_run_file():
|
|
|
246
497
|
if file.filename == '':
|
|
247
498
|
return jsonify({'error': 'No selected file'}), 400
|
|
248
499
|
|
|
249
|
-
cloud_execution = request.
|
|
500
|
+
cloud_execution = request.values.get('cloud_execution', 'false').lower() == 'true'
|
|
250
501
|
|
|
251
502
|
case_id = str(ulid.ULID())
|
|
252
503
|
zip_path = os.path.join(UPLOADS_FOLDER, f"{case_id}.zip")
|
|
@@ -282,36 +533,50 @@ def get_status(execution_id):
|
|
|
282
533
|
200:
|
|
283
534
|
description: Execution status
|
|
284
535
|
schema:
|
|
285
|
-
type:
|
|
536
|
+
type: object
|
|
286
537
|
404:
|
|
287
538
|
description: Execution ID not found
|
|
288
|
-
"""
|
|
539
|
+
"""
|
|
289
540
|
global client
|
|
290
541
|
global session
|
|
291
|
-
cloud_execution = request.
|
|
292
|
-
return_status_id = request.form.get('return_status_id', 'false').lower() == 'true'
|
|
542
|
+
cloud_execution = request.values.get('cloud_execution', 'false').lower() == 'true'
|
|
293
543
|
|
|
294
544
|
if cloud_execution:
|
|
295
545
|
repository_id = db.get_repository_id_from_cloud_upload_id(session, execution_id)
|
|
296
546
|
if repository_id is None:
|
|
297
547
|
return jsonify({'error': 'Execution ID not found in Cloud'}), 404
|
|
298
548
|
status = db.get_cloud_execution_status(session, repository_id)
|
|
299
|
-
if return_status_id:
|
|
300
|
-
return jsonify({'status_id': status}), 200
|
|
301
549
|
if status == db.CloudStatus.ERROR.value:
|
|
302
|
-
|
|
550
|
+
status_msg = 'Execution finished with errors. Only log files will be downloaded'
|
|
303
551
|
elif status == db.CloudStatus.RUNNING.value:
|
|
304
|
-
|
|
552
|
+
status_msg = 'Execution not finished yet'
|
|
305
553
|
elif status == db.CloudStatus.FINISHED.value:
|
|
306
|
-
|
|
554
|
+
status_msg = 'Execution finished, but download not yet started from Cloud server'
|
|
307
555
|
elif status == db.CloudStatus.RESULTS_AVAILABLE.value:
|
|
308
|
-
|
|
556
|
+
status_msg = 'Execution finished and results are available to download'
|
|
557
|
+
elif status == db.CloudStatus.LOGS_AVAILABLE_ERROR.value:
|
|
558
|
+
status_msg = 'Execution finished with errors and log files are avaialble to download'
|
|
559
|
+
else:
|
|
560
|
+
status_msg = 'Unknown status'
|
|
561
|
+
logger.info(f"Cloud execution status for {execution_id} ({repository_id}): {status_msg}")
|
|
562
|
+
return jsonify({'status_id': status, 'status_msg': status_msg}), 200
|
|
563
|
+
else:
|
|
564
|
+
status = db.get_local_execution_status(session, execution_id)
|
|
565
|
+
if status == db.LOCAL_EXECUTION_ERROR:
|
|
566
|
+
status_msg = 'Execution finished with errors'
|
|
567
|
+
elif status != db.LOCAL_EXECUTION_FINISHED:
|
|
568
|
+
status_msg = 'Execution not finished yet'
|
|
569
|
+
else:
|
|
570
|
+
status_msg = 'Execution finished'
|
|
571
|
+
return jsonify({'status_id': status, 'status_msg': status_msg}), 200
|
|
572
|
+
|
|
309
573
|
|
|
310
574
|
@app.route('/results/<execution_id>', methods=['GET'])
|
|
311
575
|
def get_results(execution_id: str):
|
|
312
576
|
global session
|
|
313
577
|
global client
|
|
314
|
-
cloud_execution = request.
|
|
578
|
+
cloud_execution = request.values.get('cloud_execution', 'false').lower() == 'true'
|
|
579
|
+
logger.info(f"Getting results for execution ID: {execution_id} (cloud_execution={cloud_execution})")
|
|
315
580
|
|
|
316
581
|
if cloud_execution:
|
|
317
582
|
repository_id = db.get_repository_id_from_cloud_upload_id(session, execution_id)
|
|
@@ -319,18 +584,18 @@ def get_results(execution_id: str):
|
|
|
319
584
|
return jsonify({'error': 'Execution ID not found in Cloud'}),
|
|
320
585
|
status = db.get_cloud_execution_status(session, execution_id)
|
|
321
586
|
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
return jsonify({'error': 'Execution not finished yet'}), 402
|
|
587
|
+
|
|
588
|
+
if status == db.CloudStatus.RUNNING:
|
|
589
|
+
return jsonify({'error': f'{repository_id} execution not finished yet'}), 402
|
|
326
590
|
elif status == db.CloudStatus.FINISHED:
|
|
327
|
-
return jsonify({'error': '
|
|
591
|
+
return jsonify({'error': f'{repository_id} results not available yet'}), 403
|
|
328
592
|
else:
|
|
329
593
|
#fazer download da pasta do resultado
|
|
330
594
|
result_path = os.path.join(CLOUD_RESULTS_FOLDER, str(repository_id))
|
|
331
595
|
if not os.path.exists(result_path):
|
|
332
|
-
return jsonify({'error': '
|
|
596
|
+
return jsonify({'error': f'{repository_id} execution result folder not found'}), 404
|
|
333
597
|
result_files = os.listdir(result_path)
|
|
598
|
+
result_files = [f for f in result_files if os.path.isfile(os.path.join(result_path, f))]
|
|
334
599
|
return jsonify({'execution_id': repository_id, 'files': result_files}), 200
|
|
335
600
|
else:
|
|
336
601
|
status = db.get_local_execution_status(session, execution_id)
|
|
@@ -345,11 +610,47 @@ def get_results(execution_id: str):
|
|
|
345
610
|
return jsonify({'execution_id': execution_id, 'files': result_files}), 200
|
|
346
611
|
|
|
347
612
|
|
|
613
|
+
@app.route('/module_log/<case_id>', methods=['GET'])
|
|
614
|
+
def get_module_log(case_id: str):
|
|
615
|
+
"""Return the content of the module's fixed log file for the last module run of the case,
|
|
616
|
+
or for a specific module if provided as query parameter ?module=<name>.
|
|
617
|
+
"""
|
|
618
|
+
global session
|
|
619
|
+
module_name = request.args.get('module') or request.args.get('module_name')
|
|
620
|
+
|
|
621
|
+
# Determine module and log file name
|
|
622
|
+
if not module_name:
|
|
623
|
+
last = db.last_module_execution_for_case(session, case_id)
|
|
624
|
+
if not last or not last.module:
|
|
625
|
+
return jsonify({'error': 'No module execution found for this case'}), 404
|
|
626
|
+
module_name = last.module
|
|
627
|
+
module_cfg = MODULES.get(module_name) if isinstance(MODULES, dict) else None
|
|
628
|
+
if not module_cfg:
|
|
629
|
+
return jsonify({'error': f"Module '{module_name}' not configured"}), 400
|
|
630
|
+
log_name = module_cfg.get('log_file', f"module_{module_name}.log")
|
|
631
|
+
|
|
632
|
+
workdir = os.path.join(UPLOADS_FOLDER, case_id)
|
|
633
|
+
if not os.path.isdir(workdir):
|
|
634
|
+
# Ensure workdir is created (may extract zip if needed)
|
|
635
|
+
workdir = _ensure_case_workdir(case_id)
|
|
636
|
+
log_path = os.path.join(workdir, log_name)
|
|
637
|
+
if not os.path.exists(log_path):
|
|
638
|
+
return jsonify({'error': 'Log file not found', 'module': module_name, 'log': log_name}), 404
|
|
639
|
+
|
|
640
|
+
try:
|
|
641
|
+
with open(log_path, 'r', encoding='utf-8', errors='ignore') as f:
|
|
642
|
+
content = f.read()
|
|
643
|
+
return content, 200, {'Content-Type': 'text/plain; charset=utf-8'}
|
|
644
|
+
except Exception as e:
|
|
645
|
+
return jsonify({'error': str(e)}), 500
|
|
646
|
+
|
|
647
|
+
|
|
348
648
|
@app.route('/results/<execution_id>/<file>', methods=['GET'])
|
|
349
649
|
def download_file(execution_id: str, file):
|
|
350
650
|
global session
|
|
351
651
|
|
|
352
|
-
cloud_execution = request.
|
|
652
|
+
cloud_execution = request.values.get('cloud_execution', 'false').lower() == 'true'
|
|
653
|
+
logger.info(f"Getting results for execution ID: {execution_id} (cloud_execution={cloud_execution})")
|
|
353
654
|
|
|
354
655
|
if cloud_execution:
|
|
355
656
|
repository_id = db.get_repository_id_from_cloud_upload_id(session, execution_id)
|
|
@@ -357,27 +658,32 @@ def download_file(execution_id: str, file):
|
|
|
357
658
|
else:
|
|
358
659
|
result_path = os.path.join(LOCAL_RESULTS_FOLDER, execution_id)
|
|
359
660
|
if not os.path.exists(result_path):
|
|
360
|
-
|
|
661
|
+
if cloud_execution:
|
|
662
|
+
msg = f'{repository_id} execution result folder not found'
|
|
663
|
+
else:
|
|
664
|
+
msg = f'Execution result folder not found'
|
|
665
|
+
return jsonify({'error': msg}), 404
|
|
361
666
|
|
|
362
|
-
file_path = os.path.join(result_path, file)
|
|
667
|
+
file_path = os.path.normpath(os.path.join(result_path, file)).replace("\\", "/")
|
|
363
668
|
if not os.path.exists(file_path):
|
|
364
669
|
return jsonify({'error': 'File not found'}), 404
|
|
365
670
|
|
|
366
671
|
try:
|
|
367
|
-
|
|
672
|
+
return send_file(file_path, download_name=file, as_attachment=True)
|
|
368
673
|
except Exception as e:
|
|
369
674
|
return jsonify({'error': str(e)}), 500
|
|
370
675
|
|
|
371
676
|
|
|
372
677
|
if __name__ == '__main__':
|
|
373
|
-
|
|
678
|
+
logger.info("Starting server...")
|
|
374
679
|
session = initialize_db()
|
|
680
|
+
|
|
375
681
|
try:
|
|
376
|
-
app.run(debug=FLASK_DEBUG,
|
|
682
|
+
app.run(host=settings.get("host", DEFAULT_HOST), debug=FLASK_DEBUG,
|
|
377
683
|
port=settings.get("port", DEFAULT_PORT),
|
|
378
684
|
threaded=True,
|
|
379
685
|
use_reloader=False,)
|
|
380
686
|
except Exception as e:
|
|
381
|
-
|
|
687
|
+
logger.exception(f"Error starting server: {e}")
|
|
382
688
|
sys.exit(1)
|
|
383
689
|
|