psr-factory 5.0.0b9__py3-none-win_amd64.whl → 5.0.0b12__py3-none-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- psr/cloud/cloud.py +15 -7
- psr/cloud/data.py +3 -1
- psr/cloud/version.py +1 -1
- psr/execqueue/client.py +113 -0
- psr/execqueue/config.py +44 -0
- psr/execqueue/db.py +220 -0
- psr/execqueue/server.py +383 -0
- psr/execqueue/watcher.py +124 -0
- psr/factory/__init__.py +1 -1
- psr/factory/api.py +48 -17
- psr/factory/factory.dll +0 -0
- psr/factory/factory.pmd +10 -0
- psr/factory/factory.pmk +14 -12
- psr/factory/factorylib.py +8 -4
- psr/factory/libcurl-x64.dll +0 -0
- psr/factory/samples/sddp_case01.py +8 -10
- psr/runner/runner.py +1 -1
- psr_factory-5.0.0b12.dist-info/METADATA +56 -0
- psr_factory-5.0.0b12.dist-info/RECORD +40 -0
- psr/factory/samples/sddp_sample_case01.py +0 -164
- psr/factory/samples/sddp_sample_case21.py +0 -241
- psr_factory-5.0.0b9.dist-info/METADATA +0 -110
- psr_factory-5.0.0b9.dist-info/RECORD +0 -37
- {psr_factory-5.0.0b9.dist-info → psr_factory-5.0.0b12.dist-info}/WHEEL +0 -0
- {psr_factory-5.0.0b9.dist-info → psr_factory-5.0.0b12.dist-info}/licenses/LICENSE.txt +0 -0
- {psr_factory-5.0.0b9.dist-info → psr_factory-5.0.0b12.dist-info}/top_level.txt +0 -0
psr/cloud/cloud.py
CHANGED
@@ -139,7 +139,6 @@ class Client:
|
|
139
139
|
self._selected_cluster = kwargs.get("cluster", _DEFAULT_CLUSTER["pretty_name"])
|
140
140
|
self._import_desktop = kwargs.get("import_desktop", True)
|
141
141
|
self._debug_mode = kwargs.get("debug", False)
|
142
|
-
self._dry_run = kwargs.get("dry_run", False)
|
143
142
|
self._timeout = kwargs.get("timeout", None)
|
144
143
|
self._python_client = kwargs.get("python_client", False)
|
145
144
|
|
@@ -546,6 +545,7 @@ class Client:
|
|
546
545
|
:return: True if the requested status is reached, False if timeout occurs.
|
547
546
|
"""
|
548
547
|
status = None
|
548
|
+
last_status = None
|
549
549
|
start_time = time()
|
550
550
|
original_quiet_flag = self._quiet
|
551
551
|
original_verbose_flag = self._verbose
|
@@ -561,7 +561,10 @@ class Client:
|
|
561
561
|
f"Timeout reached while waiting for status {requested_status}"
|
562
562
|
)
|
563
563
|
return False
|
564
|
-
status, _ = self.get_status(case_id)
|
564
|
+
status, _ = self.get_status(case_id, quiet=True)
|
565
|
+
if last_status != status:
|
566
|
+
self._logger.info(f"Status: {STATUS_MAP_TEXT[status]}")
|
567
|
+
last_status = status
|
565
568
|
sleep(20)
|
566
569
|
finally:
|
567
570
|
self._quiet = original_quiet_flag
|
@@ -581,7 +584,7 @@ class Client:
|
|
581
584
|
os.rmdir(dir_path)
|
582
585
|
|
583
586
|
@thread_safe()
|
584
|
-
def run_case(self, case: "Case", **kwargs) -> int:
|
587
|
+
def run_case(self, case: "Case", dry_run=False, **kwargs) -> int:
|
585
588
|
self._validate_case(case)
|
586
589
|
instance_type_map = self._get_instance_type_map()
|
587
590
|
instance_type_id = next(
|
@@ -614,7 +617,6 @@ class Client:
|
|
614
617
|
"s3Dados": "",
|
615
618
|
"nproc": case.number_of_processes,
|
616
619
|
"repositorioId": "0",
|
617
|
-
"repositorioPai": case.parent_case_id,
|
618
620
|
"instanciaTipo": instance_type_id,
|
619
621
|
"validacaoModelo": "True",
|
620
622
|
"validacaoUsuario": "False",
|
@@ -632,6 +634,11 @@ class Client:
|
|
632
634
|
"arquivoSaida": case.mymodel_output_file,
|
633
635
|
}
|
634
636
|
|
637
|
+
if isinstance(case.parent_case_id, list) and case.parent_case_id is not None:
|
638
|
+
parameters["repositoriosPais"] = ",".join(map(str, case.parent_case_id))
|
639
|
+
else:
|
640
|
+
parameters["repositorioPai"] = case.parent_case_id
|
641
|
+
|
635
642
|
if case.budget:
|
636
643
|
parameters["budget"] = case.budget
|
637
644
|
if case.upload_only is not None:
|
@@ -639,7 +646,7 @@ class Client:
|
|
639
646
|
|
640
647
|
xml_content = create_case_xml(parameters)
|
641
648
|
|
642
|
-
if
|
649
|
+
if dry_run:
|
643
650
|
return xml_content
|
644
651
|
|
645
652
|
if self._python_client:
|
@@ -675,7 +682,7 @@ class Client:
|
|
675
682
|
|
676
683
|
return case_id
|
677
684
|
|
678
|
-
def get_status(self, case_id: int) -> tuple["ExecutionStatus", str]:
|
685
|
+
def get_status(self, case_id: int, quiet=False) -> tuple["ExecutionStatus", str]:
|
679
686
|
delete_xml = not self._debug_mode
|
680
687
|
xml_content = ""
|
681
688
|
with CreateTempFile(
|
@@ -717,7 +724,8 @@ class Client:
|
|
717
724
|
f"Contact PSR support at psrcloud@psr-inc.com with following data:\n\n{xml_str}\n\n"
|
718
725
|
)
|
719
726
|
|
720
|
-
|
727
|
+
if not quiet:
|
728
|
+
self._logger.info(f"Status: {STATUS_MAP_TEXT[status]}")
|
721
729
|
return status, STATUS_MAP_TEXT[status]
|
722
730
|
|
723
731
|
def list_download_files(self, case_id: int) -> List[dict]:
|
psr/cloud/data.py
CHANGED
@@ -87,7 +87,9 @@ class Case:
|
|
87
87
|
|
88
88
|
self.id: Optional[int] = kwargs.get("id", None)
|
89
89
|
self.user: Optional[str] = kwargs.get("user", None)
|
90
|
-
self.parent_case_id: Optional[int] = kwargs.get(
|
90
|
+
self.parent_case_id: Optional[Union[int, list]] = kwargs.get(
|
91
|
+
"parent_case_id", None
|
92
|
+
)
|
91
93
|
self.execution_date: Optional[datetime] = kwargs.get("execution_date", None)
|
92
94
|
self.budget: Optional[str] = kwargs.get("budget", None)
|
93
95
|
if self.budget is not None:
|
psr/cloud/version.py
CHANGED
psr/execqueue/client.py
ADDED
@@ -0,0 +1,113 @@
|
|
1
|
+
import os
|
2
|
+
import zipfile
|
3
|
+
import requests
|
4
|
+
from typing import List, Optional
|
5
|
+
|
6
|
+
|
7
|
+
def zip_directory(directory_path, output_zip):
|
8
|
+
"""Compress a directory into a zip file."""
|
9
|
+
with zipfile.ZipFile(output_zip, 'w', zipfile.ZIP_DEFLATED) as zipf:
|
10
|
+
for root, _, files in os.walk(directory_path):
|
11
|
+
for file in files:
|
12
|
+
file_path = os.path.join(root, file)
|
13
|
+
arcname = os.path.relpath(file_path, start=directory_path)
|
14
|
+
zipf.write(file_path, arcname=arcname)
|
15
|
+
|
16
|
+
|
17
|
+
def upload_case_file(zip_path, server_url):
|
18
|
+
"""Upload a zip file to the server."""
|
19
|
+
with open(zip_path, 'rb') as f:
|
20
|
+
files = {'file': (os.path.basename(zip_path), f)}
|
21
|
+
response = requests.post(f"{server_url}/upload", files=files)
|
22
|
+
|
23
|
+
if response.status_code == 200:
|
24
|
+
print("Upload successful!")
|
25
|
+
print("Case ID:", response.json().get('case_id'))
|
26
|
+
return response.json().get('case_id')
|
27
|
+
else:
|
28
|
+
print("Upload failed:", response.text)
|
29
|
+
return None
|
30
|
+
|
31
|
+
|
32
|
+
def run_case(case_id: str, server_url: str, cloud_execution: bool = False) -> Optional[str]:
|
33
|
+
"""Add a case to the execution queue. For server-local run,
|
34
|
+
returns the execution id. For cloud run, returns the cloud upload id."""
|
35
|
+
data = {"case_id": case_id,'cloud_execution': cloud_execution}
|
36
|
+
response = requests.post(f"{server_url}/run",data=data)
|
37
|
+
|
38
|
+
if response.status_code == 200:
|
39
|
+
if not cloud_execution:
|
40
|
+
print("Added to queue successfully!")
|
41
|
+
print("Execution ID:", response.json().get('execution_id'))
|
42
|
+
return response.json().get('execution_id')
|
43
|
+
else:
|
44
|
+
print("Cloud execution queued!")
|
45
|
+
print("Cloud upload ID:", response.json().get('cloud_upload_id'))
|
46
|
+
return response.json().get('cloud_upload_id')
|
47
|
+
else:
|
48
|
+
print("Upload failed:", response.text)
|
49
|
+
return None
|
50
|
+
|
51
|
+
|
52
|
+
def upload_and_run_file(zip_path: str, server_url: str, cloud_execution: bool = False):
|
53
|
+
"""Upload a zip file to the server."""
|
54
|
+
with open(zip_path, 'rb') as f:
|
55
|
+
files = {'file': (os.path.basename(zip_path), f)}
|
56
|
+
data = {'cloud_execution': cloud_execution}
|
57
|
+
response = requests.post(f"{server_url}/upload_and_run", files=files, data=data)
|
58
|
+
|
59
|
+
if response.status_code == 200:
|
60
|
+
print("Upload successful! Waiting for execution.")
|
61
|
+
|
62
|
+
if cloud_execution:
|
63
|
+
print("Cloud upload ID:", response.json().get('cloud_upload_id'))
|
64
|
+
return response.json().get('cloud_upload_id')
|
65
|
+
else:
|
66
|
+
print("Local execution ID:", response.json().get('execution_id'))
|
67
|
+
return response.json().get('execution_id')
|
68
|
+
else:
|
69
|
+
print("Upload failed:", response.text)
|
70
|
+
return None
|
71
|
+
|
72
|
+
|
73
|
+
def get_execution_status(execution_id: str, server_url: str, cloud_execution: bool = False, return_status_id: bool = False) -> Optional[str]:
|
74
|
+
"""Get the status of an execution."""
|
75
|
+
print("Getting status for execution ID:", execution_id)
|
76
|
+
data = {'cloud_execution': cloud_execution, 'return_status_id': return_status_id}
|
77
|
+
response = requests.get(f"{server_url}/status/{execution_id}", data=data)
|
78
|
+
|
79
|
+
if response.status_code == 200:
|
80
|
+
if return_status_id:
|
81
|
+
print("Status ID:", response.json().get('status_id'))
|
82
|
+
return response.json().get('status_id')
|
83
|
+
print("Status:", response.json().get('status'))
|
84
|
+
return response.json().get('status')
|
85
|
+
else:
|
86
|
+
print("Failed to get status:", response.text)
|
87
|
+
return None
|
88
|
+
|
89
|
+
def get_results(execution_id, server_url, cloud_execution=False) -> Optional[List[str]]:
|
90
|
+
"""Download the results of an execution."""
|
91
|
+
response = requests.get(f"{server_url}/results/{execution_id}", data={'cloud_execution': cloud_execution})
|
92
|
+
|
93
|
+
if response.status_code == 200:
|
94
|
+
print("Results downloaded successfully!")
|
95
|
+
files = response.json().get('files')
|
96
|
+
print("Files:", files)
|
97
|
+
return files
|
98
|
+
else:
|
99
|
+
print("Download failed:", response.text)
|
100
|
+
return None
|
101
|
+
|
102
|
+
|
103
|
+
def download_execution_file(execution_id: str, server_url: str, file: str, download_path: str, cloud_execution: bool = False):
|
104
|
+
"""Download the results of an execution."""
|
105
|
+
data = {'cloud_execution': cloud_execution}
|
106
|
+
response = requests.get(f"{server_url}/results/{execution_id}/{file}", data=data)
|
107
|
+
|
108
|
+
# TODO: add validation for download_path existence.
|
109
|
+
if response.status_code == 200:
|
110
|
+
with open(os.path.join(download_path, file), 'wb') as f:
|
111
|
+
f.write(response.content)
|
112
|
+
else:
|
113
|
+
print("Download failed:", response.text)
|
psr/execqueue/config.py
ADDED
@@ -0,0 +1,44 @@
|
|
1
|
+
import os
|
2
|
+
import tomllib
|
3
|
+
|
4
|
+
__version__ = "0.3.0"
|
5
|
+
_app_name = "PSR Factory ExecQueue"
|
6
|
+
DEFAULT_PORT = 5000
|
7
|
+
FLASK_DEBUG = False
|
8
|
+
_SETTINGS_FILE_PATH = "server_settings.toml"
|
9
|
+
|
10
|
+
if os.name == 'nt':
|
11
|
+
_DEFAULT_SDDP_PATH = r"C:/PSR/Sddp17.3"
|
12
|
+
else:
|
13
|
+
_DEFAULT_SDDP_PATH = "/opt/psr/sddp"
|
14
|
+
DEFAULT_CLUSTER_NAME = "server"
|
15
|
+
DEFAULT_PSRCLOUD_CLUSTER = "external"
|
16
|
+
DEFAULT_PSRCLOUD_CLUSTER_URL = ""
|
17
|
+
|
18
|
+
|
19
|
+
# read toml settings file
|
20
|
+
with open(os.path.join(os.getcwd(), _SETTINGS_FILE_PATH), 'rb') as f:
|
21
|
+
settings = tomllib.load(f)
|
22
|
+
|
23
|
+
|
24
|
+
sddp_path = settings.get("sddp_path", _DEFAULT_SDDP_PATH)
|
25
|
+
|
26
|
+
cluster_name = settings.get("cluster_name", DEFAULT_CLUSTER_NAME)
|
27
|
+
psrcloud_cluster = settings.get("psrcloud_cluster", DEFAULT_PSRCLOUD_CLUSTER)
|
28
|
+
psrcloud_cluster_url = settings.get("psrcloud_cluster", DEFAULT_PSRCLOUD_CLUSTER_URL)
|
29
|
+
|
30
|
+
# Base server data storage path.
|
31
|
+
STORAGE_PATH = settings.get("storage_path", os.path.join(os.getcwd(), 'serverdata'))
|
32
|
+
|
33
|
+
# Where uploaded (received) cases will be stored.
|
34
|
+
UPLOADS_FOLDER = os.path.join(STORAGE_PATH, 'uploads')
|
35
|
+
|
36
|
+
# Where results of local runs will be stored.
|
37
|
+
LOCAL_RESULTS_FOLDER = os.path.join(STORAGE_PATH, 'local_results')
|
38
|
+
|
39
|
+
# Where results of cloud runs will be stored.
|
40
|
+
CLOUD_RESULTS_FOLDER = os.path.join(STORAGE_PATH, 'cloud_results')
|
41
|
+
|
42
|
+
# Where temporary extracted case files will be stored
|
43
|
+
TEMPORARY_UPLOAD_FOLDER = os.path.join(STORAGE_PATH, 'tmp')
|
44
|
+
|
psr/execqueue/db.py
ADDED
@@ -0,0 +1,220 @@
|
|
1
|
+
import datetime
|
2
|
+
from enum import Enum
|
3
|
+
from typing import (
|
4
|
+
List,
|
5
|
+
Optional
|
6
|
+
)
|
7
|
+
|
8
|
+
from sqlalchemy import (
|
9
|
+
create_engine,
|
10
|
+
Column,
|
11
|
+
Integer,
|
12
|
+
String,
|
13
|
+
ForeignKey,
|
14
|
+
Text,
|
15
|
+
TIMESTAMP,
|
16
|
+
)
|
17
|
+
from sqlalchemy.ext.declarative import declarative_base
|
18
|
+
from sqlalchemy.orm import relationship, sessionmaker
|
19
|
+
|
20
|
+
from psr.execqueue.config import *
|
21
|
+
|
22
|
+
class CloudStatus(Enum):
|
23
|
+
RUNNING = 1
|
24
|
+
FINISHED = 3
|
25
|
+
ERROR = 4
|
26
|
+
RESULTS_AVAILABLE = 5
|
27
|
+
|
28
|
+
|
29
|
+
DB_NAME = "app.db"
|
30
|
+
|
31
|
+
|
32
|
+
LOCAL_EXECUTION_RUNNING = 0
|
33
|
+
LOCAL_EXECUTION_FINISHED = 1
|
34
|
+
LOCAL_EXECUTION_ERROR = 2
|
35
|
+
|
36
|
+
|
37
|
+
def get_db_path():
|
38
|
+
return os.path.join(STORAGE_PATH, DB_NAME)
|
39
|
+
|
40
|
+
|
41
|
+
Base = declarative_base()
|
42
|
+
|
43
|
+
|
44
|
+
class Case(Base):
|
45
|
+
__tablename__ = 'cases'
|
46
|
+
|
47
|
+
case_id = Column(String(26), primary_key=True)
|
48
|
+
upload_time = Column(TIMESTAMP, default=datetime.datetime.utcnow)
|
49
|
+
checksum = Column(Text)
|
50
|
+
removed = Column(Integer, default=0)
|
51
|
+
|
52
|
+
local_executions = relationship("LocalExecution", back_populates="case")
|
53
|
+
cloud_uploads = relationship("CloudUpload", back_populates="case")
|
54
|
+
cloud_executions = relationship("CloudExecution", back_populates="case")
|
55
|
+
|
56
|
+
|
57
|
+
class LocalExecution(Base):
|
58
|
+
__tablename__ = 'local_executions'
|
59
|
+
|
60
|
+
execution_id = Column(String(26), primary_key=True)
|
61
|
+
case_id = Column(String(26), ForeignKey('cases.case_id'))
|
62
|
+
start_time = Column(TIMESTAMP, default=datetime.datetime.utcnow)
|
63
|
+
finish_time = Column(TIMESTAMP)
|
64
|
+
status = Column(Integer, default=CloudStatus.RUNNING.value)
|
65
|
+
|
66
|
+
case = relationship("Case", back_populates="local_executions")
|
67
|
+
|
68
|
+
|
69
|
+
class CloudUpload(Base):
|
70
|
+
__tablename__ = 'cloud_uploads'
|
71
|
+
|
72
|
+
cloud_upload_id = Column(String(26), primary_key=True)
|
73
|
+
case_id = Column(String(26), ForeignKey('cases.case_id'))
|
74
|
+
start_time = Column(TIMESTAMP, default=datetime.datetime.utcnow)
|
75
|
+
|
76
|
+
case = relationship("Case", back_populates="cloud_uploads")
|
77
|
+
cloud_executions = relationship("CloudExecution", back_populates="cloud_upload")
|
78
|
+
|
79
|
+
|
80
|
+
class CloudExecution(Base):
|
81
|
+
__tablename__ = 'cloud_executions'
|
82
|
+
|
83
|
+
repository_id = Column(Integer, primary_key=True)
|
84
|
+
cloud_upload_id = Column(String(26), ForeignKey('cloud_uploads.cloud_upload_id'))
|
85
|
+
case_id = Column(String(26), ForeignKey('cases.case_id'))
|
86
|
+
start_time = Column(TIMESTAMP, default=datetime.datetime.utcnow)
|
87
|
+
archived = Column(Integer, default=0)
|
88
|
+
status = Column(Integer, default=0)
|
89
|
+
|
90
|
+
case = relationship("Case", back_populates="cloud_executions")
|
91
|
+
cloud_upload = relationship("CloudUpload", back_populates="cloud_executions")
|
92
|
+
|
93
|
+
|
94
|
+
def initialize():
|
95
|
+
# Create the SQLite database and create the tables
|
96
|
+
_db_path = get_db_path()
|
97
|
+
_create_db = not os.path.exists(_db_path)
|
98
|
+
engine = create_engine(f'sqlite:///{_db_path}')
|
99
|
+
Session = sessionmaker(bind=engine)
|
100
|
+
session = Session()
|
101
|
+
|
102
|
+
if _create_db:
|
103
|
+
Base.metadata.create_all(engine)
|
104
|
+
_first_time_setup(session)
|
105
|
+
|
106
|
+
return session, engine
|
107
|
+
|
108
|
+
|
109
|
+
def close(session):
|
110
|
+
session.close()
|
111
|
+
|
112
|
+
|
113
|
+
def _first_time_setup(session):
|
114
|
+
pass
|
115
|
+
|
116
|
+
|
117
|
+
def register_case(session, case_id, checksum):
|
118
|
+
case = Case(case_id=case_id,
|
119
|
+
checksum=checksum,
|
120
|
+
upload_time=datetime.datetime.utcnow()
|
121
|
+
)
|
122
|
+
session.add(case)
|
123
|
+
session.commit()
|
124
|
+
# registry.configure()
|
125
|
+
|
126
|
+
return case
|
127
|
+
|
128
|
+
|
129
|
+
def register_local_execution(session, case_id: str, execution_id: str):
|
130
|
+
case = session.query(Case).filter(Case.case_id == case_id).first()
|
131
|
+
local_execution = LocalExecution(execution_id=execution_id,
|
132
|
+
case_id=case_id,
|
133
|
+
start_time=datetime.datetime.utcnow()
|
134
|
+
)
|
135
|
+
case.local_executions.append(local_execution)
|
136
|
+
session.commit()
|
137
|
+
return local_execution
|
138
|
+
|
139
|
+
|
140
|
+
def get_case_id_from_execution_id(session, execution_id: str) -> Optional[str]:
|
141
|
+
local_execution = session.query(LocalExecution).filter(LocalExecution.execution_id == execution_id).first()
|
142
|
+
return local_execution.case_id if local_execution else None
|
143
|
+
|
144
|
+
|
145
|
+
def update_local_execution_status(session, execution_id: str, status: int):
|
146
|
+
local_execution = session.query(LocalExecution).filter(LocalExecution.execution_id == execution_id).first()
|
147
|
+
if local_execution:
|
148
|
+
if status not in [LOCAL_EXECUTION_FINISHED, LOCAL_EXECUTION_ERROR,
|
149
|
+
LOCAL_EXECUTION_RUNNING]:
|
150
|
+
raise ValueError("Wrong status for update.")
|
151
|
+
local_execution.status = status
|
152
|
+
if status in [LOCAL_EXECUTION_FINISHED, LOCAL_EXECUTION_ERROR]:
|
153
|
+
local_execution.finish_time = datetime.datetime.utcnow()
|
154
|
+
session.commit()
|
155
|
+
return True
|
156
|
+
|
157
|
+
def update_cloud_execution_status(session, repository_id: int, status: int):
|
158
|
+
cloud_execution = session.query(CloudExecution).filter(CloudExecution.repository_id == repository_id).first()
|
159
|
+
if cloud_execution:
|
160
|
+
if status not in CloudStatus:
|
161
|
+
raise ValueError("Wrong status for update.")
|
162
|
+
cloud_execution.status = status
|
163
|
+
session.commit()
|
164
|
+
return True
|
165
|
+
|
166
|
+
def get_local_execution_status(session, execution_id: str) -> Optional[int]:
|
167
|
+
local_execution = session.query(LocalExecution).filter(LocalExecution.execution_id == execution_id).first()
|
168
|
+
return local_execution.status if local_execution else None
|
169
|
+
|
170
|
+
|
171
|
+
def register_cloud_upload(session, case_id: str, cloud_upload_id: str):
|
172
|
+
case = session.query(Case).filter(Case.case_id == case_id).first()
|
173
|
+
cloud_upload = CloudUpload(cloud_upload_id=cloud_upload_id,
|
174
|
+
case_id=case_id,
|
175
|
+
start_time=datetime.datetime.utcnow()
|
176
|
+
)
|
177
|
+
case.cloud_uploads.append(cloud_upload)
|
178
|
+
session.commit()
|
179
|
+
return cloud_upload
|
180
|
+
|
181
|
+
|
182
|
+
def register_cloud_execution(session, repository_id: int, cloud_upload_id: str, case_id: str):
|
183
|
+
cloud_upload = session.query(CloudUpload).filter(CloudUpload.cloud_upload_id == cloud_upload_id).first()
|
184
|
+
cloud_execution = CloudExecution(repository_id=repository_id,
|
185
|
+
cloud_upload_id=cloud_upload_id,
|
186
|
+
case_id=case_id,
|
187
|
+
start_time=datetime.datetime.utcnow(),
|
188
|
+
status=CloudStatus.RUNNING.value
|
189
|
+
)
|
190
|
+
cloud_upload.cloud_executions.append(cloud_execution)
|
191
|
+
session.commit()
|
192
|
+
return cloud_execution
|
193
|
+
|
194
|
+
|
195
|
+
def get_case_id_from_cloud_execution_id(session, repository_id: int) -> Optional[str]:
|
196
|
+
cloud_execution = session.query(CloudExecution).filter(CloudExecution.repository_id == repository_id).first()
|
197
|
+
return cloud_execution.case_id if cloud_execution else None
|
198
|
+
|
199
|
+
|
200
|
+
def get_case_id_from_repository_id(session, repository_id: int) -> Optional[str]:
|
201
|
+
cloud_execution = session.query(CloudExecution).filter(CloudExecution.repository_id == repository_id).first()
|
202
|
+
return cloud_execution.case_id if cloud_execution else None
|
203
|
+
|
204
|
+
def get_repository_id_from_cloud_upload_id(session, cloud_upload_id: str) -> Optional[int]:
|
205
|
+
cloud_execution = session.query(CloudExecution).filter(CloudExecution.cloud_upload_id == cloud_upload_id).first()
|
206
|
+
return cloud_execution.repository_id if cloud_execution else None
|
207
|
+
|
208
|
+
def get_repository_ids_from_case_id(session, case_id: str) -> List[int]:
|
209
|
+
cloud_executions = session.query(CloudExecution).filter(CloudExecution.case_id == case_id).all()
|
210
|
+
return [ce.repository_id for ce in cloud_executions]
|
211
|
+
|
212
|
+
def get_runing_cloud_executions(session) -> List[CloudExecution]:
|
213
|
+
return session.query(CloudExecution).filter(CloudExecution.status == CloudStatus.RUNNING.value).all()
|
214
|
+
|
215
|
+
def get_cloud_execution_status(session, repository_id: int) -> Optional[int]:
|
216
|
+
cloud_execution = session.query(CloudExecution).filter(CloudExecution.repository_id == repository_id).first()
|
217
|
+
return cloud_execution.status if cloud_execution else None
|
218
|
+
|
219
|
+
def get_cloud_finished_executions(session) -> List[CloudExecution]:
|
220
|
+
return session.query(CloudExecution).filter(CloudExecution.status == CloudStatus.FINISHED.value).all()
|