argus-alm 0.12.9__py3-none-any.whl → 0.13.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- argus/client/base.py +1 -1
- argus/client/driver_matrix_tests/cli.py +2 -2
- argus/client/driver_matrix_tests/client.py +1 -1
- argus/client/generic/cli.py +2 -2
- argus/client/generic_result.py +3 -2
- argus/client/sct/client.py +3 -3
- argus/client/sirenada/client.py +1 -1
- {argus_alm-0.12.9.dist-info → argus_alm-0.13.0.dist-info}/METADATA +2 -4
- argus_alm-0.13.0.dist-info/RECORD +20 -0
- argus/backend/.gitkeep +0 -0
- argus/backend/cli.py +0 -41
- argus/backend/controller/__init__.py +0 -0
- argus/backend/controller/admin.py +0 -20
- argus/backend/controller/admin_api.py +0 -354
- argus/backend/controller/api.py +0 -529
- argus/backend/controller/auth.py +0 -67
- argus/backend/controller/client_api.py +0 -108
- argus/backend/controller/main.py +0 -274
- argus/backend/controller/notification_api.py +0 -72
- argus/backend/controller/notifications.py +0 -13
- argus/backend/controller/team.py +0 -126
- argus/backend/controller/team_ui.py +0 -18
- argus/backend/controller/testrun_api.py +0 -482
- argus/backend/controller/view_api.py +0 -162
- argus/backend/db.py +0 -100
- argus/backend/error_handlers.py +0 -21
- argus/backend/events/event_processors.py +0 -34
- argus/backend/models/__init__.py +0 -0
- argus/backend/models/result.py +0 -138
- argus/backend/models/web.py +0 -389
- argus/backend/plugins/__init__.py +0 -0
- argus/backend/plugins/core.py +0 -225
- argus/backend/plugins/driver_matrix_tests/controller.py +0 -63
- argus/backend/plugins/driver_matrix_tests/model.py +0 -421
- argus/backend/plugins/driver_matrix_tests/plugin.py +0 -22
- argus/backend/plugins/driver_matrix_tests/raw_types.py +0 -62
- argus/backend/plugins/driver_matrix_tests/service.py +0 -60
- argus/backend/plugins/driver_matrix_tests/udt.py +0 -42
- argus/backend/plugins/generic/model.py +0 -79
- argus/backend/plugins/generic/plugin.py +0 -16
- argus/backend/plugins/generic/types.py +0 -13
- argus/backend/plugins/loader.py +0 -40
- argus/backend/plugins/sct/controller.py +0 -185
- argus/backend/plugins/sct/plugin.py +0 -38
- argus/backend/plugins/sct/resource_setup.py +0 -178
- argus/backend/plugins/sct/service.py +0 -491
- argus/backend/plugins/sct/testrun.py +0 -272
- argus/backend/plugins/sct/udt.py +0 -101
- argus/backend/plugins/sirenada/model.py +0 -113
- argus/backend/plugins/sirenada/plugin.py +0 -17
- argus/backend/service/admin.py +0 -27
- argus/backend/service/argus_service.py +0 -688
- argus/backend/service/build_system_monitor.py +0 -188
- argus/backend/service/client_service.py +0 -122
- argus/backend/service/event_service.py +0 -18
- argus/backend/service/jenkins_service.py +0 -240
- argus/backend/service/notification_manager.py +0 -150
- argus/backend/service/release_manager.py +0 -230
- argus/backend/service/results_service.py +0 -317
- argus/backend/service/stats.py +0 -540
- argus/backend/service/team_manager_service.py +0 -83
- argus/backend/service/testrun.py +0 -559
- argus/backend/service/user.py +0 -307
- argus/backend/service/views.py +0 -258
- argus/backend/template_filters.py +0 -27
- argus/backend/tests/__init__.py +0 -0
- argus/backend/tests/argus_web.test.yaml +0 -39
- argus/backend/tests/conftest.py +0 -44
- argus/backend/tests/results_service/__init__.py +0 -0
- argus/backend/tests/results_service/test_best_results.py +0 -70
- argus/backend/util/common.py +0 -65
- argus/backend/util/config.py +0 -38
- argus/backend/util/encoders.py +0 -41
- argus/backend/util/logsetup.py +0 -81
- argus/backend/util/module_loaders.py +0 -30
- argus/backend/util/send_email.py +0 -91
- argus/client/generic_result_old.py +0 -143
- argus/db/.gitkeep +0 -0
- argus/db/argus_json.py +0 -14
- argus/db/cloud_types.py +0 -125
- argus/db/config.py +0 -135
- argus/db/db_types.py +0 -139
- argus/db/interface.py +0 -370
- argus/db/testrun.py +0 -740
- argus/db/utils.py +0 -15
- argus_alm-0.12.9.dist-info/RECORD +0 -96
- /argus/{backend → common}/__init__.py +0 -0
- /argus/{backend/util → common}/enums.py +0 -0
- /argus/{backend/plugins/sct/types.py → common/sct_types.py} +0 -0
- /argus/{backend/plugins/sirenada/types.py → common/sirenada_types.py} +0 -0
- {argus_alm-0.12.9.dist-info → argus_alm-0.13.0.dist-info}/LICENSE +0 -0
- {argus_alm-0.12.9.dist-info → argus_alm-0.13.0.dist-info}/WHEEL +0 -0
- {argus_alm-0.12.9.dist-info → argus_alm-0.13.0.dist-info}/entry_points.txt +0 -0
argus/db/interface.py
DELETED
|
@@ -1,370 +0,0 @@
|
|
|
1
|
-
# TODO: Deprecated, will be removed once REST API client is ready
|
|
2
|
-
from datetime import datetime
|
|
3
|
-
import re
|
|
4
|
-
import logging
|
|
5
|
-
import json
|
|
6
|
-
from uuid import UUID
|
|
7
|
-
from hashlib import sha1
|
|
8
|
-
from dataclasses import fields as dataclass_fields
|
|
9
|
-
from typing import KeysView, Union, Optional, Any, get_args as get_type_args, get_origin as get_type_origin
|
|
10
|
-
from types import GenericAlias
|
|
11
|
-
|
|
12
|
-
import cassandra.cluster
|
|
13
|
-
import cassandra.cqltypes
|
|
14
|
-
from cassandra import ConsistencyLevel
|
|
15
|
-
from cassandra.auth import PlainTextAuthProvider
|
|
16
|
-
from cassandra.query import named_tuple_factory
|
|
17
|
-
from cassandra.policies import WhiteListRoundRobinPolicy, AddressTranslator
|
|
18
|
-
from cassandra.cluster import ExecutionProfile, EXEC_PROFILE_DEFAULT
|
|
19
|
-
from cassandra.cqlengine import connection
|
|
20
|
-
from cassandra.cqlengine import models
|
|
21
|
-
from cassandra.cqlengine import management
|
|
22
|
-
from argus.db.argus_json import ArgusJSONEncoder
|
|
23
|
-
|
|
24
|
-
from argus.db.config import BaseConfig, FileConfig
|
|
25
|
-
from argus.db.db_types import ColumnInfo, CollectionHint, ArgusUDTBase
|
|
26
|
-
from argus.db.cloud_types import ResourceState
|
|
27
|
-
from argus.backend.models.web import ArgusSchedule, ArgusScheduleAssignee, \
|
|
28
|
-
ArgusScheduleGroup, ArgusScheduleTest, ArgusRelease, ArgusGroup, ArgusTest
|
|
29
|
-
|
|
30
|
-
LOGGER = logging.getLogger(__name__)
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
class ArgusInterfaceSingletonError(Exception):
|
|
34
|
-
pass
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
class ArgusInterfaceDatabaseConnectionError(Exception):
|
|
38
|
-
pass
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
class ArgusInterfaceSchemaError(Exception):
|
|
42
|
-
pass
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
class ArgusInterfaceNameError(Exception):
|
|
46
|
-
pass
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
class PrivateToPublicAddressTranslator(AddressTranslator):
|
|
50
|
-
def __init__(self, address_mapping):
|
|
51
|
-
self.address_mapping = address_mapping
|
|
52
|
-
|
|
53
|
-
def translate(self, addr):
|
|
54
|
-
return self.address_mapping.get(addr, addr)
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
class ArgusDatabase:
|
|
58
|
-
# pylint: disable=too-many-instance-attributes
|
|
59
|
-
CQL_ENGINE_CONNECTION_NAME = 'argus_cql_engine_conn'
|
|
60
|
-
ARGUS_EXECUTION_PROFILE = "argus_named_tuple"
|
|
61
|
-
REQUIRED_CQL_ENGINE_MODELS = [
|
|
62
|
-
ArgusSchedule,
|
|
63
|
-
ArgusScheduleGroup,
|
|
64
|
-
ArgusScheduleTest,
|
|
65
|
-
ArgusScheduleAssignee,
|
|
66
|
-
ArgusTest,
|
|
67
|
-
ArgusGroup,
|
|
68
|
-
ArgusRelease,
|
|
69
|
-
]
|
|
70
|
-
PYTHON_SCYLLA_TYPE_MAPPING = {
|
|
71
|
-
int: cassandra.cqltypes.IntegerType.typename,
|
|
72
|
-
float: cassandra.cqltypes.FloatType.typename,
|
|
73
|
-
str: cassandra.cqltypes.VarcharType.typename,
|
|
74
|
-
UUID: cassandra.cqltypes.UUIDType.typename,
|
|
75
|
-
datetime: cassandra.cqltypes.DateType.typename,
|
|
76
|
-
ResourceState: cassandra.cqltypes.VarcharType.typename,
|
|
77
|
-
Optional[str]: cassandra.cqltypes.VarcharType.typename,
|
|
78
|
-
Optional[int]: cassandra.cqltypes.IntegerType.typename,
|
|
79
|
-
}
|
|
80
|
-
|
|
81
|
-
_INSTANCE: Union['ArgusDatabase', Any] = None
|
|
82
|
-
|
|
83
|
-
def __init__(self, config: BaseConfig = None):
|
|
84
|
-
if not config:
|
|
85
|
-
config = FileConfig()
|
|
86
|
-
self.config = config
|
|
87
|
-
self.execution_profile = ExecutionProfile(
|
|
88
|
-
load_balancing_policy=WhiteListRoundRobinPolicy(hosts=self.config.contact_points),
|
|
89
|
-
consistency_level=ConsistencyLevel.QUORUM,
|
|
90
|
-
)
|
|
91
|
-
self.exec_profile_named_tuple = ExecutionProfile(
|
|
92
|
-
load_balancing_policy=WhiteListRoundRobinPolicy(hosts=self.config.contact_points),
|
|
93
|
-
consistency_level=ConsistencyLevel.QUORUM,
|
|
94
|
-
row_factory=named_tuple_factory
|
|
95
|
-
)
|
|
96
|
-
address_translator = None
|
|
97
|
-
if self.config.address_mapping:
|
|
98
|
-
address_translator = PrivateToPublicAddressTranslator(self.config.address_mapping)
|
|
99
|
-
|
|
100
|
-
self.cluster = cassandra.cluster.Cluster(contact_points=self.config.contact_points,
|
|
101
|
-
protocol_version=4,
|
|
102
|
-
auth_provider=PlainTextAuthProvider(
|
|
103
|
-
username=self.config.username,
|
|
104
|
-
password=self.config.password),
|
|
105
|
-
execution_profiles={
|
|
106
|
-
EXEC_PROFILE_DEFAULT: self.execution_profile,
|
|
107
|
-
self.ARGUS_EXECUTION_PROFILE: self.exec_profile_named_tuple
|
|
108
|
-
},
|
|
109
|
-
address_translator=address_translator,
|
|
110
|
-
)
|
|
111
|
-
self.session = self.cluster.connect()
|
|
112
|
-
self._keyspace_initialized = False
|
|
113
|
-
self.prepared_statements = {}
|
|
114
|
-
self.initialized_tables = {}
|
|
115
|
-
self._table_keys = {}
|
|
116
|
-
self._mapped_udts = {}
|
|
117
|
-
self._current_keyspace = self.init_keyspace(name=self.config.keyspace_name)
|
|
118
|
-
connection.register_connection(self.CQL_ENGINE_CONNECTION_NAME, session=self.session)
|
|
119
|
-
for model in self.REQUIRED_CQL_ENGINE_MODELS:
|
|
120
|
-
management.sync_table(model, keyspaces=(self._current_keyspace,),
|
|
121
|
-
connections=(self.CQL_ENGINE_CONNECTION_NAME,))
|
|
122
|
-
if not models.DEFAULT_KEYSPACE:
|
|
123
|
-
models.DEFAULT_KEYSPACE = self._current_keyspace
|
|
124
|
-
elif models.DEFAULT_KEYSPACE != self._current_keyspace:
|
|
125
|
-
LOGGER.warning(
|
|
126
|
-
"CQL Engine DEFAULT_KEYSPACE has been set already and differs from interface keyspace, this could cause issues")
|
|
127
|
-
|
|
128
|
-
@classmethod
|
|
129
|
-
def get(cls, config: BaseConfig = None):
|
|
130
|
-
if cls._INSTANCE:
|
|
131
|
-
LOGGER.debug("Found valid db session.")
|
|
132
|
-
return cls._INSTANCE
|
|
133
|
-
|
|
134
|
-
if not config:
|
|
135
|
-
config = FileConfig()
|
|
136
|
-
|
|
137
|
-
LOGGER.debug("Initializing db session from default config")
|
|
138
|
-
cls._INSTANCE = cls(config=config)
|
|
139
|
-
return cls._INSTANCE
|
|
140
|
-
|
|
141
|
-
@classmethod
|
|
142
|
-
def destroy(cls):
|
|
143
|
-
if not cls._INSTANCE:
|
|
144
|
-
LOGGER.warning("ArgusDatabase::destroy called with no valid session.")
|
|
145
|
-
return False
|
|
146
|
-
|
|
147
|
-
LOGGER.info("Shutting down the cluster connection.")
|
|
148
|
-
cls._INSTANCE.cluster.shutdown()
|
|
149
|
-
cls._INSTANCE = None
|
|
150
|
-
return True
|
|
151
|
-
|
|
152
|
-
@classmethod
|
|
153
|
-
def from_config(cls, config: BaseConfig = None):
|
|
154
|
-
return cls.get(config)
|
|
155
|
-
|
|
156
|
-
def prepare_query_for_table(self, table_name, query_type, query):
|
|
157
|
-
prepared_statement = self.session.prepare(query=query)
|
|
158
|
-
self.prepared_statements[f"{table_name}_{query_type}"] = prepared_statement
|
|
159
|
-
|
|
160
|
-
return prepared_statement
|
|
161
|
-
|
|
162
|
-
@staticmethod
|
|
163
|
-
def _verify_keyspace_name(name: str):
|
|
164
|
-
incorrect_keyspace_name_re = r"\."
|
|
165
|
-
if match := re.search(incorrect_keyspace_name_re, name):
|
|
166
|
-
raise ArgusInterfaceNameError("Keyspace name does not conform to the "
|
|
167
|
-
f"keyspace naming rules: {name} (pos: {match.pos})")
|
|
168
|
-
return name
|
|
169
|
-
|
|
170
|
-
@staticmethod
|
|
171
|
-
def _get_hash_from_keys(keys: Union[list[str], KeysView]):
|
|
172
|
-
key_string = ".".join(keys).encode(encoding="utf-8")
|
|
173
|
-
return sha1(key_string).hexdigest()
|
|
174
|
-
|
|
175
|
-
def init_keyspace(self, name="argus", prefix="", suffix="") -> str:
|
|
176
|
-
keyspace_name = self._verify_keyspace_name(f"{prefix}{name}{suffix}")
|
|
177
|
-
query = f"CREATE KEYSPACE IF NOT EXISTS {keyspace_name} " \
|
|
178
|
-
"WITH replication={'class': 'SimpleStrategy', 'replication_factor' : 3}"
|
|
179
|
-
LOGGER.debug("Running query: %s", query)
|
|
180
|
-
self.session.execute(query=query, execution_profile=self.ARGUS_EXECUTION_PROFILE)
|
|
181
|
-
self.session.set_keyspace(keyspace_name)
|
|
182
|
-
self._keyspace_initialized = True
|
|
183
|
-
return keyspace_name
|
|
184
|
-
|
|
185
|
-
def is_native_type(self, object_type):
|
|
186
|
-
return self.PYTHON_SCYLLA_TYPE_MAPPING.get(object_type, False)
|
|
187
|
-
|
|
188
|
-
def init_table(self, table_name: str, column_info: dict[str, ColumnInfo]):
|
|
189
|
-
# pylint: disable=too-many-locals
|
|
190
|
-
if not self._keyspace_initialized:
|
|
191
|
-
raise ArgusInterfaceDatabaseConnectionError("Uninitialized keyspace, cannot continue")
|
|
192
|
-
|
|
193
|
-
if self.initialized_tables.get(table_name):
|
|
194
|
-
return True, f"Table {table_name} already initialized"
|
|
195
|
-
|
|
196
|
-
primary_keys_info: dict = column_info.pop("$tablekeys$")
|
|
197
|
-
clustering_order_info: dict = column_info.pop("$clustering_order$")
|
|
198
|
-
indices_info: dict = column_info.pop("$indices$")
|
|
199
|
-
partition_keys = [key for key, (cls, pk_type) in primary_keys_info.items() if pk_type == "partition"]
|
|
200
|
-
partition_key_def = partition_keys[0] if len(partition_keys) == 1 else f"({', '.join(partition_keys)})"
|
|
201
|
-
clustering_columns = [key for key, (cls, pk_type) in primary_keys_info.items() if pk_type == "clustering"]
|
|
202
|
-
clustering_column_def = ", ".join(clustering_columns)
|
|
203
|
-
clustering_order = [f"{key} {clustering_order_info[key]}" for key in clustering_columns +
|
|
204
|
-
partition_keys if clustering_order_info.get(key)]
|
|
205
|
-
clustering_order_def = ", ".join(clustering_order)
|
|
206
|
-
clustering_statement = f"WITH CLUSTERING ORDER BY ({clustering_order_def})" if len(clustering_order) > 0 else ""
|
|
207
|
-
|
|
208
|
-
self._table_keys[table_name] = primary_keys_info
|
|
209
|
-
primary_key_def = f"{partition_key_def}, {clustering_column_def}" if len(
|
|
210
|
-
clustering_column_def) > 0 else partition_key_def
|
|
211
|
-
query = "CREATE TABLE IF NOT EXISTS {table_name}({columns}, PRIMARY KEY ({pk})) {cs}"
|
|
212
|
-
columns_query = []
|
|
213
|
-
for column in column_info.values():
|
|
214
|
-
if mapped_type := self.is_native_type(column.type):
|
|
215
|
-
column_type = mapped_type
|
|
216
|
-
elif column.type is CollectionHint:
|
|
217
|
-
column_type = self.create_collection_declaration(column.value.stored_type)
|
|
218
|
-
else:
|
|
219
|
-
# UDT
|
|
220
|
-
column_type = f"frozen<{self._init_user_data_type(column.type)}>"
|
|
221
|
-
|
|
222
|
-
constraints = " ".join(column.constraints)
|
|
223
|
-
column_query = f"{column.name} {column_type} {constraints}"
|
|
224
|
-
columns_query.append(column_query)
|
|
225
|
-
|
|
226
|
-
columns_query = ", ".join(columns_query)
|
|
227
|
-
completed_query = query.format(table_name=table_name, columns=columns_query,
|
|
228
|
-
pk=primary_key_def, cs=clustering_statement)
|
|
229
|
-
LOGGER.debug("About to execute: \"%s\"", completed_query)
|
|
230
|
-
self.session.execute(query=completed_query, execution_profile=self.ARGUS_EXECUTION_PROFILE)
|
|
231
|
-
self.create_indices(table_name, indices=indices_info)
|
|
232
|
-
self.initialized_tables[table_name] = True
|
|
233
|
-
return True, "Initialization complete"
|
|
234
|
-
|
|
235
|
-
def create_indices(self, table_name, indices):
|
|
236
|
-
for index in indices:
|
|
237
|
-
self.session.execute(f"CREATE INDEX IF NOT EXISTS ON {table_name}({index})")
|
|
238
|
-
return True
|
|
239
|
-
|
|
240
|
-
def create_collection_declaration(self, hint: GenericAlias):
|
|
241
|
-
collection_type = get_type_origin(hint)
|
|
242
|
-
collection_types = get_type_args(hint)
|
|
243
|
-
|
|
244
|
-
declaration_type = collection_type.__name__
|
|
245
|
-
|
|
246
|
-
declared_types = []
|
|
247
|
-
for inner_hint in collection_types:
|
|
248
|
-
type_class = get_type_origin(inner_hint) if isinstance(inner_hint, GenericAlias) else inner_hint
|
|
249
|
-
|
|
250
|
-
if type_class is tuple or type_class is list:
|
|
251
|
-
declaration = f"frozen<{self.create_collection_declaration(inner_hint)}>"
|
|
252
|
-
elif matched_type := self.PYTHON_SCYLLA_TYPE_MAPPING.get(type_class):
|
|
253
|
-
declaration = matched_type
|
|
254
|
-
else:
|
|
255
|
-
declaration = f"frozen<{self._init_user_data_type(type_class)}>"
|
|
256
|
-
|
|
257
|
-
declared_types.append(declaration)
|
|
258
|
-
|
|
259
|
-
declaration_query = ", ".join(declared_types) if collection_type is tuple else str(declared_types[0])
|
|
260
|
-
|
|
261
|
-
return f"{declaration_type}<{declaration_query}>"
|
|
262
|
-
|
|
263
|
-
def _init_user_data_type(self, cls: ArgusUDTBase):
|
|
264
|
-
if not self._keyspace_initialized:
|
|
265
|
-
raise ArgusInterfaceDatabaseConnectionError("Uninitialized keyspace, cannot continue")
|
|
266
|
-
|
|
267
|
-
udt_name = cls.basename()
|
|
268
|
-
|
|
269
|
-
if cls in self._mapped_udts.get(self._current_keyspace, []):
|
|
270
|
-
return udt_name
|
|
271
|
-
|
|
272
|
-
query = "CREATE TYPE IF NOT EXISTS {name} ({fields})"
|
|
273
|
-
fields = []
|
|
274
|
-
for field in dataclass_fields(cls):
|
|
275
|
-
name = field.name
|
|
276
|
-
field_type = get_type_origin(field.type) if isinstance(field.type, GenericAlias) else field.type
|
|
277
|
-
if field_type is list or field_type is tuple:
|
|
278
|
-
field_declaration = self.create_collection_declaration(field.type)
|
|
279
|
-
elif matched_type := self.PYTHON_SCYLLA_TYPE_MAPPING.get(field_type):
|
|
280
|
-
field_declaration = matched_type
|
|
281
|
-
else:
|
|
282
|
-
field_declaration = f"frozen<{self._init_user_data_type(field.type)}>"
|
|
283
|
-
fields.append(f"{name} {field_declaration}")
|
|
284
|
-
|
|
285
|
-
joined_fields = ", ".join(fields)
|
|
286
|
-
|
|
287
|
-
completed_query = query.format(name=udt_name, fields=joined_fields)
|
|
288
|
-
LOGGER.debug("About to execute: \"%s\"", completed_query)
|
|
289
|
-
self.session.execute(query=completed_query, execution_profile=self.ARGUS_EXECUTION_PROFILE)
|
|
290
|
-
|
|
291
|
-
existing_udts = self._mapped_udts.get(self._current_keyspace, [])
|
|
292
|
-
existing_udts.append(cls)
|
|
293
|
-
self._mapped_udts[self._current_keyspace] = existing_udts
|
|
294
|
-
|
|
295
|
-
return udt_name
|
|
296
|
-
|
|
297
|
-
def fetch(self, table_name: str, run_id: UUID, where_clause="WHERE id = ?"):
|
|
298
|
-
return self._fetch(table_name, (run_id,), where_clause)
|
|
299
|
-
|
|
300
|
-
def _fetch(self, table_name: str, params: tuple | list, where_clause: str):
|
|
301
|
-
if not self._keyspace_initialized:
|
|
302
|
-
raise ArgusInterfaceDatabaseConnectionError("Uninitialized keyspace, cannot continue")
|
|
303
|
-
|
|
304
|
-
query = self.prepared_statements.get(f"{table_name}_select_{where_clause.lower().replace(' ', '-')}",
|
|
305
|
-
self.prepare_query_for_table(table_name=table_name, query_type="insert",
|
|
306
|
-
query=f"SELECT * FROM {table_name} "
|
|
307
|
-
f"{where_clause}"))
|
|
308
|
-
|
|
309
|
-
cursor = self.session.execute(query=query, parameters=params, execution_profile=self.ARGUS_EXECUTION_PROFILE)
|
|
310
|
-
|
|
311
|
-
return cursor.one()
|
|
312
|
-
|
|
313
|
-
def fetch_generic(self, table_name: str, params: tuple | list, where_clause: str):
|
|
314
|
-
return self._fetch(table_name, params, where_clause)
|
|
315
|
-
|
|
316
|
-
def insert(self, table_name: str, run_data: dict):
|
|
317
|
-
if not self._keyspace_initialized:
|
|
318
|
-
raise ArgusInterfaceDatabaseConnectionError("Uninitialized keyspace, cannot continue")
|
|
319
|
-
|
|
320
|
-
query = self.prepared_statements.get(f"{table_name}_insert",
|
|
321
|
-
self.prepare_query_for_table(table_name=table_name, query_type="insert",
|
|
322
|
-
query=f"INSERT INTO {table_name} JSON ?"))
|
|
323
|
-
|
|
324
|
-
self.session.execute(query=query, parameters=(json.dumps(run_data, cls=ArgusJSONEncoder),),
|
|
325
|
-
execution_profile=self.ARGUS_EXECUTION_PROFILE)
|
|
326
|
-
|
|
327
|
-
def update(self, table_name: str, run_data: dict):
|
|
328
|
-
# pylint: disable=too-many-locals
|
|
329
|
-
def _convert_data_to_sequence(data: dict) -> list:
|
|
330
|
-
data_list = list(data.values())
|
|
331
|
-
for idx, value in enumerate(data_list):
|
|
332
|
-
if isinstance(value, dict):
|
|
333
|
-
data_list[idx] = _convert_data_to_sequence(value)
|
|
334
|
-
elif isinstance(value, list) and len(value) > 0 and isinstance(value[0], dict):
|
|
335
|
-
data_list[idx] = [_convert_data_to_sequence(d) for d in value]
|
|
336
|
-
return data_list
|
|
337
|
-
|
|
338
|
-
primary_keys: dict = self._table_keys.get(table_name)
|
|
339
|
-
if not primary_keys:
|
|
340
|
-
raise ArgusInterfaceSchemaError(f"Table \"{table_name}\" is not initialized!")
|
|
341
|
-
|
|
342
|
-
LOGGER.debug("Primary keys for table %s: %s", table_name, primary_keys)
|
|
343
|
-
where_clause = []
|
|
344
|
-
where_params = []
|
|
345
|
-
for key, (_, _) in primary_keys.items():
|
|
346
|
-
LOGGER.debug("Ejecting %s from update set as it is a part of the primary key", key)
|
|
347
|
-
try:
|
|
348
|
-
data_value = run_data.pop(key)
|
|
349
|
-
except KeyError as exc:
|
|
350
|
-
raise ArgusInterfaceSchemaError("Missing key from update set", key) from exc
|
|
351
|
-
field = f"{key} = ?"
|
|
352
|
-
where_clause.append(field)
|
|
353
|
-
where_params.append(data_value)
|
|
354
|
-
|
|
355
|
-
where_clause_joined = " AND ".join(where_clause)
|
|
356
|
-
|
|
357
|
-
if not (prepared_statement := self.prepared_statements.get(f"update_{table_name}")):
|
|
358
|
-
field_parameters = [f"\"{field_name}\" = ?" for field_name in run_data.keys()]
|
|
359
|
-
fields_joined = ", ".join(field_parameters)
|
|
360
|
-
|
|
361
|
-
query = f"UPDATE {table_name} SET {fields_joined} WHERE {where_clause_joined}"
|
|
362
|
-
LOGGER.debug("Formatted query: %s", query)
|
|
363
|
-
prepared_statement = self.session.prepare(query=query)
|
|
364
|
-
self.prepared_statements[f"update_{table_name}"] = prepared_statement
|
|
365
|
-
|
|
366
|
-
LOGGER.debug("Bound query for update: %s", prepared_statement.query_string)
|
|
367
|
-
query_parameters = _convert_data_to_sequence(run_data)
|
|
368
|
-
parameters = [*query_parameters, *where_params]
|
|
369
|
-
LOGGER.debug("Parameters: %s", parameters)
|
|
370
|
-
self.session.execute(prepared_statement, parameters=parameters, execution_profile=self.ARGUS_EXECUTION_PROFILE)
|