psr-factory 5.0.0b69__py3-none-manylinux_2_28_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of psr-factory might be problematic. Click here for more details.

@@ -0,0 +1,689 @@
1
+ import hashlib
2
+ import os
3
+ import queue
4
+ import shutil
5
+ import sys
6
+ import threading
7
+ import time
8
+ import zipfile
9
+ import subprocess
10
+ import shlex
11
+ import logging
12
+ from dotenv import load_dotenv
13
+ from flask import (
14
+ Flask,
15
+ request,
16
+ jsonify,
17
+ send_file
18
+ )
19
+ import ulid
20
+
21
+
22
+ import psr.runner
23
+ import psr.cloud
24
+
25
+ from psr.execqueue.config import *
26
+ from psr.execqueue import db
27
+
28
+ _execution_queue = queue.Queue()
29
+ _cloud_upload_queue = queue.Queue()
30
+
31
+
32
+ os.makedirs(UPLOADS_FOLDER, exist_ok=True)
33
+ os.makedirs(LOCAL_RESULTS_FOLDER, exist_ok=True)
34
+ os.makedirs(CLOUD_RESULTS_FOLDER, exist_ok=True)
35
+ os.makedirs(TEMPORARY_UPLOAD_FOLDER, exist_ok=True)
36
+
37
+
38
+ load_dotenv()
39
+
40
+ # Configure logging: write to server.log inside STORAGE_PATH and to stdout
41
+ log_file = os.path.join(STORAGE_PATH, 'server.log')
42
+ os.makedirs(os.path.dirname(log_file), exist_ok=True)
43
+ fmt = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
44
+ # Create handlers
45
+ fh = logging.FileHandler(log_file, mode='a', encoding='utf-8')
46
+ fh.setLevel(logging.INFO)
47
+ fh.setFormatter(fmt)
48
+ sh = logging.StreamHandler(sys.stdout)
49
+ sh.setLevel(logging.INFO)
50
+ sh.setFormatter(fmt)
51
+
52
+ # Configure the root logger with our handlers so all loggers propagate here
53
+ root = logging.getLogger()
54
+ root.setLevel(logging.INFO)
55
+ root.handlers = []
56
+ root.addHandler(fh)
57
+ root.addHandler(sh)
58
+
59
+ # Create module logger that will propagate to root (do not add handlers here)
60
+ logger = logging.getLogger('execqueue')
61
+ logger.setLevel(logging.INFO)
62
+ logger.propagate = True
63
+
64
+ # Let Flask/Werkzeug propagate to root logger (so their messages go to server.log)
65
+ logging.getLogger('werkzeug').setLevel(logging.INFO)
66
+ logging.getLogger('werkzeug').propagate = True
67
+ logging.getLogger('flask.app').setLevel(logging.INFO)
68
+ logging.getLogger('flask.app').propagate = True
69
+
70
+
71
+
72
+ try:
73
+ client = psr.cloud.Client(cluster=psrcloud_cluster, verbose=True)
74
+ except psr.cloud.CloudInputError as e:
75
+ logger.exception(f"Error connecting to PSR Cloud. Check user credentials: {e}")
76
+ exit(1)
77
+
78
+ _cloud_execution_case_map = {}
79
+
80
+ app = Flask(__name__, root_path=os.getcwd())
81
+ # Ensure Flask's app logger writes to the same handlers as our logger/root
82
+ try:
83
+ app.logger.handlers = logger.handlers
84
+ app.logger.setLevel(logging.INFO)
85
+ app.logger.propagate = True
86
+ except Exception:
87
+ # In case logger isn't ready for some reason, ignore and continue
88
+ pass
89
+
90
+ session = None
91
+
92
+
93
+ def get_file_checksum(file_path: str) -> str:
94
+ with open(file_path, 'rb') as file:
95
+ return hashlib.md5(file.read()).hexdigest()
96
+
97
+
98
+ def run_local_case(execution_id: str, case_path: str):
99
+ global session
100
+ success = False
101
+ try:
102
+ psr.runner.run_sddp(case_path, sddp_path, parallel_run=False)
103
+ success = True
104
+ except RuntimeError as e:
105
+ logger.exception(f"Error running {execution_id}: {e}")
106
+
107
+ status = db.LOCAL_EXECUTION_FINISHED if success else db.LOCAL_EXECUTION_ERROR
108
+ db.update_local_execution_status(session, execution_id, status)
109
+
110
+
111
+ def _ensure_case_workdir(case_id: str) -> str:
112
+ """Ensure a working directory exists at uploads/<case_id> with extracted contents.
113
+ If it does not exist or is empty, extract the uploaded zip there.
114
+ Returns the absolute path to the working directory.
115
+ """
116
+ workdir = os.path.join(UPLOADS_FOLDER, case_id)
117
+ zip_upload_path = os.path.join(UPLOADS_FOLDER, f"{case_id}.zip")
118
+ os.makedirs(workdir, exist_ok=True)
119
+
120
+ # If directory is empty or looks incomplete, (re)extract
121
+ try:
122
+ if not os.listdir(workdir):
123
+ with zipfile.ZipFile(zip_upload_path, 'r') as zip_ref:
124
+ zip_ref.extractall(workdir)
125
+ except FileNotFoundError:
126
+ # If there's no zip, still return folder (may be pre-populated)
127
+ pass
128
+ return workdir
129
+
130
+
131
+ def run_local_module(execution_id: str, case_id: str, module_name: str) -> int:
132
+ """Run a configured module locally inside the case's upload workdir.
133
+ Returns process return code (0=success, non-zero=failure).
134
+ Updates LocalExecution status accordingly.
135
+ """
136
+ global session
137
+ # Fetch module configuration
138
+ module_cfg = MODULES.get(module_name) if isinstance(MODULES, dict) else None
139
+ if not module_cfg or 'command' not in module_cfg:
140
+ logger.error(f"Module '{module_name}' not configured.")
141
+ db.update_local_execution_status(session, execution_id, db.LOCAL_EXECUTION_ERROR)
142
+ return 1
143
+
144
+ workdir = _ensure_case_workdir(case_id)
145
+
146
+ # Build command and log file path
147
+ cmd_tmpl = module_cfg.get('command')
148
+ # Allow placeholders
149
+ cmd = cmd_tmpl.format(case_path=workdir, case_id=case_id, module=module_name)
150
+ log_name = module_cfg.get('log_file', f"module_{module_name}.log")
151
+ log_path = os.path.join(workdir, log_name)
152
+
153
+ logger.info(f"Running module '{module_name}' for case {case_id} in {workdir}")
154
+ logger.debug(f"Command: {cmd}")
155
+
156
+ rc = 1
157
+ try:
158
+ # Prefer to run without shell to avoid platform-specific exit code mappings
159
+ # If the command starts with 'python' or references .py, build argv accordingly
160
+ argv = None
161
+ # Heuristic: if command contains .py, run with current Python executable
162
+ if '.py' in cmd:
163
+ parts = shlex.split(cmd)
164
+ # If the command already starts with python, use as-is; else prepend sys.executable
165
+ if parts[0].endswith('python') or parts[0].endswith('python.exe'):
166
+ argv = parts
167
+ else:
168
+ argv = [sys.executable] + parts
169
+ else:
170
+ argv = shlex.split(cmd)
171
+
172
+ with open(log_path, 'a', encoding='utf-8', errors='ignore') as logf:
173
+ proc = subprocess.Popen(
174
+ argv,
175
+ cwd=workdir,
176
+ stdout=logf,
177
+ stderr=logf,
178
+ )
179
+ rc = proc.wait()
180
+
181
+ # Now rc follows the subprocess return code semantics: 0 success, non-zero failure
182
+ status = db.LOCAL_EXECUTION_FINISHED if rc == 0 else db.LOCAL_EXECUTION_ERROR
183
+ db.update_local_execution_status(session, execution_id, status)
184
+ except Exception as e:
185
+ logger.exception(f"Error running module {module_name} for case {case_id}: {e}")
186
+ db.update_local_execution_status(session, execution_id, db.LOCAL_EXECUTION_ERROR)
187
+ rc = 1
188
+ return rc
189
+
190
+
191
+ def _copy_tree(src: str, dst: str):
192
+ os.makedirs(dst, exist_ok=True)
193
+ for root, dirs, files in os.walk(src):
194
+ rel = os.path.relpath(root, src)
195
+ target_root = os.path.join(dst, rel) if rel != '.' else dst
196
+ os.makedirs(target_root, exist_ok=True)
197
+ for d in dirs:
198
+ os.makedirs(os.path.join(target_root, d), exist_ok=True)
199
+ for f in files:
200
+ s = os.path.join(root, f)
201
+ t = os.path.join(target_root, f)
202
+ try:
203
+ shutil.copy2(s, t)
204
+ except Exception:
205
+ # Best-effort copy; skip problematic files
206
+ pass
207
+
208
+
209
+ def initialize_db():
210
+ session, engine = db.initialize()
211
+ return session
212
+
213
+
214
+ def run_cloud_case(execution_id: str, case_path: str):
215
+ global client
216
+ # Run the case
217
+ case = psr.cloud.Case(
218
+ name="LSEG Server "+ execution_id,
219
+ data_path=case_path,
220
+ program="SDDP",
221
+ program_version = "17.3.9",
222
+ execution_type="Default",
223
+ memory_per_process_ratio='2:1',
224
+ price_optimized=False,
225
+ number_of_processes=64,
226
+ repository_duration=1,
227
+ )
228
+ case_id = client.run_case(case)
229
+
230
+ return str(case_id)
231
+
232
+
233
+ def process_local_execution_queue():
234
+ global session
235
+ while True:
236
+ item = _execution_queue.get()
237
+ try:
238
+ # Detect item type (backward compatibility for tuple)
239
+ if isinstance(item, dict) and item.get('type') == 'module':
240
+ execution_id = item['execution_id']
241
+ case_id = item['case_id']
242
+ module_name = item['module']
243
+ logger.info(f"Processing module {module_name} for case {case_id} (exec {execution_id})...")
244
+ run_local_module(execution_id, case_id, module_name)
245
+ else:
246
+ if isinstance(item, (list, tuple)):
247
+ execution_id, case_id = item
248
+ else:
249
+ execution_id = item.get('execution_id')
250
+ case_id = item.get('case_id')
251
+
252
+ logger.info(f"Processing case execution {execution_id} for case {case_id}...")
253
+
254
+ # Wait for running modules to finish; abort if any failed
255
+ wait_loops = 0
256
+ while db.any_running_modules_for_case(session, case_id):
257
+ logger.info(f"Case {case_id} has running modules; waiting...")
258
+ time.sleep(5)
259
+ wait_loops += 1
260
+ # Safety: avoid infinite wait in worker
261
+ if wait_loops > 240: # ~20 minutes
262
+ break
263
+
264
+ # Check last execution per distinct module: if any last module execution failed, mark error
265
+ failing_modules = []
266
+ for mname in db.get_distinct_module_names_for_case(session, case_id):
267
+ last = db.last_module_execution_for_case(session, case_id, mname)
268
+ if last and last.status == db.LOCAL_EXECUTION_ERROR:
269
+ failing_modules.append(mname)
270
+
271
+ if failing_modules:
272
+ logger.warning(f"Case {case_id} has failed modules {failing_modules}; marking local execution {execution_id} as error and skipping run")
273
+ db.update_local_execution_status(session, execution_id, db.LOCAL_EXECUTION_ERROR)
274
+ else:
275
+ # Prepare a dedicated results folder copying the current working directory (with module changes)
276
+ execution_extraction_path = os.path.join(LOCAL_RESULTS_FOLDER, execution_id)
277
+ os.makedirs(execution_extraction_path, exist_ok=True)
278
+ workdir = _ensure_case_workdir(case_id)
279
+ _copy_tree(workdir, execution_extraction_path)
280
+ # Run SDDP
281
+ run_local_case(execution_id, execution_extraction_path)
282
+
283
+ except Exception as e:
284
+ # Use safe logging in case execution_id isn't available
285
+ try:
286
+ logger.exception(f"Error processing {execution_id}: {e}")
287
+ except Exception:
288
+ logger.exception(f"Error processing item: {e}")
289
+ finally:
290
+ _execution_queue.task_done()
291
+
292
+
293
+ threading.Thread(target=process_local_execution_queue, daemon=True).start()
294
+
295
+
296
+ def process_cloud_execution_queue():
297
+ global client
298
+ global session
299
+ while True:
300
+ cloud_upload_id, case_id = _cloud_upload_queue.get()
301
+ try:
302
+ logger.info(f"Processing {cloud_upload_id}...")
303
+ # Wait for running modules to finish; abort if any failed
304
+ wait_loops = 0
305
+ while db.any_running_modules_for_case(session, case_id):
306
+ logger.info(f"Case {case_id} has running modules; waiting before cloud run...")
307
+ time.sleep(5)
308
+ wait_loops += 1
309
+ if wait_loops > 240: # ~20 minutes
310
+ break
311
+ # Block if the last execution of any distinct module failed
312
+ failing_modules = []
313
+ for mname in db.get_distinct_module_names_for_case(session, case_id):
314
+ last = db.last_module_execution_for_case(session, case_id, mname)
315
+ if last and last.status == db.LOCAL_EXECUTION_ERROR:
316
+ failing_modules.append(mname)
317
+ if failing_modules:
318
+ logger.warning(f"Case {case_id} has failing modules in last execution {failing_modules}; skipping cloud run for upload {cloud_upload_id}")
319
+ # Nothing else to do; do not run in the cloud
320
+ continue
321
+ # Prepare temp folder by copying current working directory (with module changes)
322
+ tmp_extraction_path = os.path.join(TEMPORARY_UPLOAD_FOLDER, cloud_upload_id)
323
+ workdir = _ensure_case_workdir(case_id)
324
+ if os.path.isdir(tmp_extraction_path):
325
+ shutil.rmtree(tmp_extraction_path, ignore_errors=True)
326
+ os.makedirs(tmp_extraction_path, exist_ok=True)
327
+ _copy_tree(workdir, tmp_extraction_path)
328
+
329
+ # Run SDDP
330
+ repository_id = run_cloud_case(cloud_upload_id, tmp_extraction_path)
331
+
332
+ #delete the extraction path folder recursively
333
+ shutil.rmtree(tmp_extraction_path)
334
+
335
+ execution_extraction_path = os.path.join(CLOUD_RESULTS_FOLDER, repository_id)
336
+ if os.path.isdir(execution_extraction_path):
337
+ shutil.rmtree(execution_extraction_path, ignore_errors=True)
338
+ os.makedirs(execution_extraction_path, exist_ok=True)
339
+ _copy_tree(workdir, execution_extraction_path)
340
+
341
+ db.register_cloud_execution(session, repository_id, cloud_upload_id, case_id)
342
+
343
+ except Exception as e:
344
+ logger.exception(f"Error processing {cloud_upload_id}: {e}")
345
+ finally:
346
+ _cloud_upload_queue.task_done()
347
+
348
+
349
+ threading.Thread(target=process_cloud_execution_queue, daemon=True).start()
350
+
351
+
352
+ def monitor_cloud_runs():
353
+ global client
354
+ global session
355
+
356
+ #wait for cloud upload queue to be empty
357
+ while not _cloud_upload_queue.empty():
358
+ time.sleep(10)
359
+
360
+ while True:
361
+ if session:
362
+ #check running executions
363
+ for cloud_execution in db.get_runing_cloud_executions(session):
364
+ case_id = cloud_execution.repository_id
365
+ logger.info(f"Checking status of {case_id}...")
366
+ status, status_msg = client.get_status(case_id)
367
+ if status in psr.cloud.FAULTY_TERMINATION_STATUS:
368
+ logger.warning(f"Execution {case_id} finished with errors")
369
+ db.update_cloud_execution_status(session, case_id, db.CloudStatus.ERROR.value)
370
+ elif status == psr.cloud.ExecutionStatus.SUCCESS:
371
+ logger.info(f"Execution {case_id} finished successfully")
372
+ db.update_cloud_execution_status(session, case_id, db.CloudStatus.FINISHED.value)
373
+
374
+ #download finished executions
375
+ for cloud_execution in db.get_cloud_finished_executions(session):
376
+ repository_id = cloud_execution.repository_id
377
+ logger.info(f"Downloading results for {repository_id}...")
378
+ result_path = os.path.join(CLOUD_RESULTS_FOLDER, str(repository_id))
379
+ client.download_results(repository_id, result_path)
380
+ db.update_cloud_execution_status(session, repository_id, db.CloudStatus.RESULTS_AVAILABLE.value)
381
+
382
+ #download failed executions
383
+ for cloud_execution in db.get_cloud_failed_executions(session):
384
+ try:
385
+ repository_id = cloud_execution.repository_id
386
+ logger.info(f"Downloading results for {repository_id}...")
387
+ result_path = os.path.join(CLOUD_RESULTS_FOLDER, str(repository_id))
388
+ client.download_results(repository_id, result_path, extensions=['log'])
389
+ db.update_cloud_execution_status(session, repository_id, db.CloudStatus.LOGS_AVAILABLE_ERROR.value)
390
+ except Exception as e:
391
+ logger.exception(f"Error downloading results for {repository_id}: {e}")
392
+ logger.warning("Forcing execution to Failed downloaded execution")
393
+ db.update_cloud_execution_status(session, repository_id, db.CloudStatus.LOGS_AVAILABLE_ERROR.value)
394
+ continue
395
+ else:
396
+ logger.info("Database not initialized. Retrying in 30s...")
397
+ time.sleep(30)
398
+
399
+ threading.Thread(target=monitor_cloud_runs, daemon=True).start()
400
+
401
+ @app.route('/upload', methods=['POST'])
402
+ def upload_file():
403
+ global session
404
+ if 'file' not in request.files:
405
+ return jsonify({'error': 'No file part'}), 400
406
+
407
+ file = request.files['file']
408
+ if file.filename == '':
409
+ return jsonify({'error': 'No selected file'}), 400
410
+
411
+ case_id = str(ulid.ULID())
412
+ zip_path = os.path.join(UPLOADS_FOLDER, f"{case_id}.zip")
413
+ file.save(zip_path)
414
+
415
+ checksum = get_file_checksum(zip_path)
416
+ db.register_case(session, case_id, checksum)
417
+
418
+ return jsonify({'case_id': case_id}), 200
419
+
420
+
421
+ # route to run an uploaded file
422
+ @app.route('/run', methods=['POST'])
423
+ def run_endpoint():
424
+ global session
425
+ cloud_execution = request.values.get('cloud_execution', 'false').lower() == 'true'
426
+ case_id = request.form.get('case_id')
427
+
428
+ if not case_id:
429
+ return jsonify({'error': 'Case ID not provided'}), 400
430
+
431
+ zip_case_path = os.path.join(UPLOADS_FOLDER, f"{case_id}.zip")
432
+ if not os.path.exists(zip_case_path):
433
+ return jsonify({'error': 'Upload file for this case ID not found'}), 404
434
+
435
+ # Pre-check: for each distinct module, if the last execution failed, block the run
436
+ failing_modules = []
437
+ for mname in db.get_distinct_module_names_for_case(session, case_id):
438
+ last = db.last_module_execution_for_case(session, case_id, mname)
439
+ if last and last.status == db.LOCAL_EXECUTION_ERROR:
440
+ failing_modules.append(mname)
441
+ if failing_modules:
442
+ return jsonify({'error': 'Case has failed modules in last execution', 'modules': failing_modules}), 409
443
+
444
+ if cloud_execution:
445
+ cloud_upload_id = str(ulid.ULID())
446
+ _cloud_upload_queue.put((cloud_upload_id, case_id))
447
+
448
+ db.register_cloud_upload(session, case_id, cloud_upload_id)
449
+
450
+ return jsonify({'case_id': case_id, 'cloud_upload_id': cloud_upload_id}), 200
451
+ else:
452
+ execution_id = str(ulid.ULID())
453
+ _execution_queue.put((execution_id, case_id))
454
+
455
+ db.register_local_execution(session, case_id, execution_id)
456
+ # Mark as running explicitly
457
+ db.update_local_execution_status(session, execution_id, db.LOCAL_EXECUTION_RUNNING)
458
+
459
+ return jsonify({'case_id': case_id, 'execution_id': execution_id}), 200
460
+
461
+
462
+ @app.route('/run_module', methods=['POST'])
463
+ def run_module_endpoint():
464
+ global session
465
+ case_id = request.form.get('case_id')
466
+ module_name = request.form.get('module') or request.form.get('module_name')
467
+
468
+ if not case_id or not module_name:
469
+ return jsonify({'error': 'case_id and module are required'}), 400
470
+
471
+ # Validate case zip exists
472
+ zip_case_path = os.path.join(UPLOADS_FOLDER, f"{case_id}.zip")
473
+ workdir = os.path.join(UPLOADS_FOLDER, case_id)
474
+ if not os.path.exists(zip_case_path) and not os.path.isdir(workdir):
475
+ return jsonify({'error': 'Upload file or working directory for this case ID not found'}), 404
476
+
477
+ # Validate module exists in config
478
+ module_cfg = MODULES.get(module_name) if isinstance(MODULES, dict) else None
479
+ if not module_cfg or 'command' not in module_cfg:
480
+ return jsonify({'error': f"Module '{module_name}' not configured"}), 400
481
+
482
+ execution_id = str(ulid.ULID())
483
+ _execution_queue.put({'type': 'module', 'execution_id': execution_id, 'case_id': case_id, 'module': module_name})
484
+ db.register_local_execution(session, case_id, execution_id, is_module=1, module=module_name)
485
+ db.update_local_execution_status(session, execution_id, db.LOCAL_EXECUTION_RUNNING)
486
+
487
+ return jsonify({'case_id': case_id, 'module': module_name, 'execution_id': execution_id}), 200
488
+
489
+
490
+ @app.route('/upload_and_run', methods=['POST'])
491
+ def upload_and_run_file():
492
+ global session
493
+ if 'file' not in request.files:
494
+ return jsonify({'error': 'No file part'}), 400
495
+
496
+ file = request.files['file']
497
+ if file.filename == '':
498
+ return jsonify({'error': 'No selected file'}), 400
499
+
500
+ cloud_execution = request.values.get('cloud_execution', 'false').lower() == 'true'
501
+
502
+ case_id = str(ulid.ULID())
503
+ zip_path = os.path.join(UPLOADS_FOLDER, f"{case_id}.zip")
504
+ file.save(zip_path)
505
+ db.register_case(session, case_id, get_file_checksum(zip_path))
506
+
507
+ if cloud_execution:
508
+ cloud_upload_id = str(ulid.ULID())
509
+ _cloud_upload_queue.put((cloud_upload_id, case_id))
510
+ db.register_cloud_upload(session, case_id, cloud_upload_id)
511
+ return jsonify({'case_id': case_id, 'cloud_upload_id': cloud_upload_id}), 200
512
+ else:
513
+ execution_id = str(ulid.ULID())
514
+ _execution_queue.put((execution_id, case_id))
515
+ db.register_local_execution(session, case_id, execution_id)
516
+ return jsonify({'case_id': case_id, 'execution_id': execution_id}), 200
517
+
518
+
519
+ @app.route('/status/<execution_id>', methods=['GET'])
520
+ def get_status(execution_id):
521
+ """
522
+ Get the status of an execution
523
+ ---
524
+ tags:
525
+ - Execution
526
+ parameters:
527
+ - name: execution_id
528
+ in: path
529
+ type: string
530
+ required: true
531
+ description: The ID of the execution
532
+ responses:
533
+ 200:
534
+ description: Execution status
535
+ schema:
536
+ type: object
537
+ 404:
538
+ description: Execution ID not found
539
+ """
540
+ global client
541
+ global session
542
+ cloud_execution = request.values.get('cloud_execution', 'false').lower() == 'true'
543
+
544
+ if cloud_execution:
545
+ repository_id = db.get_repository_id_from_cloud_upload_id(session, execution_id)
546
+ if repository_id is None:
547
+ return jsonify({'error': 'Execution ID not found in Cloud'}), 404
548
+ status = db.get_cloud_execution_status(session, repository_id)
549
+ if status == db.CloudStatus.ERROR.value:
550
+ status_msg = 'Execution finished with errors. Only log files will be downloaded'
551
+ elif status == db.CloudStatus.RUNNING.value:
552
+ status_msg = 'Execution not finished yet'
553
+ elif status == db.CloudStatus.FINISHED.value:
554
+ status_msg = 'Execution finished, but download not yet started from Cloud server'
555
+ elif status == db.CloudStatus.RESULTS_AVAILABLE.value:
556
+ status_msg = 'Execution finished and results are available to download'
557
+ elif status == db.CloudStatus.LOGS_AVAILABLE_ERROR.value:
558
+ status_msg = 'Execution finished with errors and log files are avaialble to download'
559
+ else:
560
+ status_msg = 'Unknown status'
561
+ logger.info(f"Cloud execution status for {execution_id} ({repository_id}): {status_msg}")
562
+ return jsonify({'status_id': status, 'status_msg': status_msg}), 200
563
+ else:
564
+ status = db.get_local_execution_status(session, execution_id)
565
+ if status == db.LOCAL_EXECUTION_ERROR:
566
+ status_msg = 'Execution finished with errors'
567
+ elif status != db.LOCAL_EXECUTION_FINISHED:
568
+ status_msg = 'Execution not finished yet'
569
+ else:
570
+ status_msg = 'Execution finished'
571
+ return jsonify({'status_id': status, 'status_msg': status_msg}), 200
572
+
573
+
574
+ @app.route('/results/<execution_id>', methods=['GET'])
575
+ def get_results(execution_id: str):
576
+ global session
577
+ global client
578
+ cloud_execution = request.values.get('cloud_execution', 'false').lower() == 'true'
579
+ logger.info(f"Getting results for execution ID: {execution_id} (cloud_execution={cloud_execution})")
580
+
581
+ if cloud_execution:
582
+ repository_id = db.get_repository_id_from_cloud_upload_id(session, execution_id)
583
+ if repository_id is None:
584
+ return jsonify({'error': 'Execution ID not found in Cloud'}),
585
+ status = db.get_cloud_execution_status(session, execution_id)
586
+
587
+
588
+ if status == db.CloudStatus.RUNNING:
589
+ return jsonify({'error': f'{repository_id} execution not finished yet'}), 402
590
+ elif status == db.CloudStatus.FINISHED:
591
+ return jsonify({'error': f'{repository_id} results not available yet'}), 403
592
+ else:
593
+ #fazer download da pasta do resultado
594
+ result_path = os.path.join(CLOUD_RESULTS_FOLDER, str(repository_id))
595
+ if not os.path.exists(result_path):
596
+ return jsonify({'error': f'{repository_id} execution result folder not found'}), 404
597
+ result_files = os.listdir(result_path)
598
+ result_files = [f for f in result_files if os.path.isfile(os.path.join(result_path, f))]
599
+ return jsonify({'execution_id': repository_id, 'files': result_files}), 200
600
+ else:
601
+ status = db.get_local_execution_status(session, execution_id)
602
+ if status == db.LOCAL_EXECUTION_ERROR:
603
+ return jsonify({'error': 'Execution finished with errors'}), 401
604
+ if status != db.LOCAL_EXECUTION_FINISHED:
605
+ return jsonify({'error': 'Execution not finished yet'}), 402
606
+ result_path = os.path.join(LOCAL_RESULTS_FOLDER, execution_id)
607
+ if not os.path.exists(result_path):
608
+ return jsonify({'error': 'Execution result folder not found'}), 404
609
+ result_files = os.listdir(result_path)
610
+ return jsonify({'execution_id': execution_id, 'files': result_files}), 200
611
+
612
+
613
+ @app.route('/module_log/<case_id>', methods=['GET'])
614
+ def get_module_log(case_id: str):
615
+ """Return the content of the module's fixed log file for the last module run of the case,
616
+ or for a specific module if provided as query parameter ?module=<name>.
617
+ """
618
+ global session
619
+ module_name = request.args.get('module') or request.args.get('module_name')
620
+
621
+ # Determine module and log file name
622
+ if not module_name:
623
+ last = db.last_module_execution_for_case(session, case_id)
624
+ if not last or not last.module:
625
+ return jsonify({'error': 'No module execution found for this case'}), 404
626
+ module_name = last.module
627
+ module_cfg = MODULES.get(module_name) if isinstance(MODULES, dict) else None
628
+ if not module_cfg:
629
+ return jsonify({'error': f"Module '{module_name}' not configured"}), 400
630
+ log_name = module_cfg.get('log_file', f"module_{module_name}.log")
631
+
632
+ workdir = os.path.join(UPLOADS_FOLDER, case_id)
633
+ if not os.path.isdir(workdir):
634
+ # Ensure workdir is created (may extract zip if needed)
635
+ workdir = _ensure_case_workdir(case_id)
636
+ log_path = os.path.join(workdir, log_name)
637
+ if not os.path.exists(log_path):
638
+ return jsonify({'error': 'Log file not found', 'module': module_name, 'log': log_name}), 404
639
+
640
+ try:
641
+ with open(log_path, 'r', encoding='utf-8', errors='ignore') as f:
642
+ content = f.read()
643
+ return content, 200, {'Content-Type': 'text/plain; charset=utf-8'}
644
+ except Exception as e:
645
+ return jsonify({'error': str(e)}), 500
646
+
647
+
648
+ @app.route('/results/<execution_id>/<file>', methods=['GET'])
649
+ def download_file(execution_id: str, file):
650
+ global session
651
+
652
+ cloud_execution = request.values.get('cloud_execution', 'false').lower() == 'true'
653
+ logger.info(f"Getting results for execution ID: {execution_id} (cloud_execution={cloud_execution})")
654
+
655
+ if cloud_execution:
656
+ repository_id = db.get_repository_id_from_cloud_upload_id(session, execution_id)
657
+ result_path = os.path.join(CLOUD_RESULTS_FOLDER, str(repository_id))
658
+ else:
659
+ result_path = os.path.join(LOCAL_RESULTS_FOLDER, execution_id)
660
+ if not os.path.exists(result_path):
661
+ if cloud_execution:
662
+ msg = f'{repository_id} execution result folder not found'
663
+ else:
664
+ msg = f'Execution result folder not found'
665
+ return jsonify({'error': msg}), 404
666
+
667
+ file_path = os.path.normpath(os.path.join(result_path, file)).replace("\\", "/")
668
+ if not os.path.exists(file_path):
669
+ return jsonify({'error': 'File not found'}), 404
670
+
671
+ try:
672
+ return send_file(file_path, download_name=file, as_attachment=True)
673
+ except Exception as e:
674
+ return jsonify({'error': str(e)}), 500
675
+
676
+
677
+ if __name__ == '__main__':
678
+ logger.info("Starting server...")
679
+ session = initialize_db()
680
+
681
+ try:
682
+ app.run(host=settings.get("host", DEFAULT_HOST), debug=FLASK_DEBUG,
683
+ port=settings.get("port", DEFAULT_PORT),
684
+ threaded=True,
685
+ use_reloader=False,)
686
+ except Exception as e:
687
+ logger.exception(f"Error starting server: {e}")
688
+ sys.exit(1)
689
+