mustrd 0.1.8__py3-none-any.whl → 0.2.0a1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
mustrd/mustrd.py CHANGED
@@ -1,788 +1,842 @@
1
- """
2
- MIT License
3
-
4
- Copyright (c) 2023 Semantic Partners Ltd
5
-
6
- Permission is hereby granted, free of charge, to any person obtaining a copy
7
- of this software and associated documentation files (the "Software"), to deal
8
- in the Software without restriction, including without limitation the rights
9
- to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10
- copies of the Software, and to permit persons to whom the Software is
11
- furnished to do so, subject to the following conditions:
12
-
13
- The above copyright notice and this permission notice shall be included in all
14
- copies or substantial portions of the Software.
15
-
16
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19
- AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20
- LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21
- OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22
- SOFTWARE.
23
- """
24
-
25
- import os
26
- from typing import Tuple, List
27
-
28
- import tomli
29
- from rdflib.plugins.parsers.notation3 import BadSyntax
30
-
31
- from . import logger_setup
32
- from dataclasses import dataclass
33
-
34
- from pyparsing import ParseException
35
- from pathlib import Path
36
- from requests import ConnectionError, ConnectTimeout, HTTPError, RequestException
37
-
38
- from rdflib import Graph, URIRef, RDF, XSD, SH, Literal
39
-
40
- from rdflib.compare import isomorphic, graph_diff
41
- import pandas
42
-
43
- from .namespace import MUST, TRIPLESTORE
44
- import requests
45
- import json
46
- from pandas import DataFrame
47
-
48
- from .spec_component import TableThenSpec, parse_spec_component, WhenSpec, ThenSpec
49
- from .utils import is_json,get_mustrd_root
50
- from colorama import Fore, Style
51
- from tabulate import tabulate
52
- from collections import defaultdict
53
- from pyshacl import validate
54
- import logging
55
- from http.client import HTTPConnection
56
- from .steprunner import upload_given, run_when
57
-
58
- log = logger_setup.setup_logger(__name__)
59
-
60
- requests.packages.urllib3.disable_warnings()
61
- requests.packages.urllib3.util.ssl_.DEFAULT_CIPHERS += ':HIGH:!DH:!aNULL'
62
-
63
- logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO)
64
-
65
-
66
- def debug_requests_on():
67
- '''Switches on logging of the requests module.'''
68
- HTTPConnection.debuglevel = 1
69
-
70
- logging.basicConfig()
71
- logging.getLogger().setLevel(logging.DEBUG)
72
- requests_log = logging.getLogger("requests.packages.urllib3")
73
- requests_log.setLevel(logging.DEBUG)
74
- requests_log.propagate = True
75
-
76
- def debug_requests_off():
77
- '''Switches off logging of the requests module, might be some side-effects'''
78
- HTTPConnection.debuglevel = 0
79
-
80
- root_logger = logging.getLogger()
81
- root_logger.setLevel(logging.WARNING)
82
- root_logger.handlers = []
83
- requests_log = logging.getLogger("requests.packages.urllib3")
84
- requests_log.setLevel(logging.WARNING)
85
- requests_log.propagate = False
86
-
87
- debug_requests_off()
88
-
89
- @dataclass
90
- class Specification:
91
- spec_uri: URIRef
92
- triple_store: dict
93
- given: Graph
94
- when: WhenSpec
95
- then: ThenSpec
96
- spec_file_name: str = "default.mustrd.ttl"
97
-
98
-
99
- @dataclass
100
- class GraphComparison:
101
- in_expected_not_in_actual: Graph
102
- in_actual_not_in_expected: Graph
103
- in_both: Graph
104
-
105
-
106
- @dataclass
107
- class SpecResult:
108
- spec_uri: URIRef
109
- triple_store: URIRef
110
-
111
-
112
- @dataclass
113
- class SpecPassed(SpecResult):
114
- pass
115
-
116
-
117
- @dataclass()
118
- class SpecPassedWithWarning(SpecResult):
119
- warning: str
120
-
121
-
122
- @dataclass
123
- class SelectSpecFailure(SpecResult):
124
- table_comparison: pandas.DataFrame
125
- message: str
126
-
127
-
128
- @dataclass
129
- class ConstructSpecFailure(SpecResult):
130
- graph_comparison: GraphComparison
131
-
132
-
133
- @dataclass
134
- class UpdateSpecFailure(SpecResult):
135
- graph_comparison: GraphComparison
136
-
137
-
138
- @dataclass
139
- class SparqlParseFailure(SpecResult):
140
- exception: ParseException
141
-
142
-
143
- @dataclass
144
- class SparqlExecutionError(SpecResult):
145
- exception: Exception
146
-
147
-
148
- @dataclass
149
- class TripleStoreConnectionError(SpecResult):
150
- exception: ConnectionError
151
-
152
-
153
- @dataclass
154
- class SpecSkipped(SpecResult):
155
- message: str
156
- spec_file_name: str = "default.mustrd.ttl"
157
-
158
-
159
- @dataclass
160
- class SparqlAction:
161
- query: str
162
-
163
-
164
- @dataclass
165
- class SelectSparqlQuery(SparqlAction):
166
- pass
167
-
168
-
169
- @dataclass
170
- class ConstructSparqlQuery(SparqlAction):
171
- pass
172
-
173
-
174
- @dataclass
175
- class UpdateSparqlQuery(SparqlAction):
176
- pass
177
-
178
-
179
- # https://github.com/Semantic-partners/mustrd/issues/19
180
-
181
- def validate_specs(run_config: dict, triple_stores: List, shacl_graph: Graph, ont_graph: Graph, file_name: str = "*")\
182
- -> Tuple[List, Graph, List]:
183
- spec_graph = Graph()
184
- subject_uris = set()
185
- focus_uris = set()
186
- invalid_specs = []
187
- ttl_files = list(run_config['spec_path'].glob(f'**/{file_name}.mustrd.ttl'))
188
- ttl_files.sort()
189
- log.info(f"Found {len(ttl_files)} {file_name}.mustrd.ttl files in {run_config['spec_path']}")
190
-
191
- for file in ttl_files:
192
- error_messages = []
193
-
194
- log.info(f"Parse: {file}")
195
- try:
196
- file_graph = Graph().parse(file)
197
- except BadSyntax as e:
198
- template = "An exception of type {0} occurred when trying to parse a spec file. Arguments:\n{1!r}"
199
- message = template.format(type(e).__name__, e.args)
200
- log.error(message)
201
- error_messages += [f"Could not extract spec from {file} due to exception of type "
202
- f"{type(e).__name__} when parsing file"]
203
- continue
204
- # run shacl validation
205
- conforms, results_graph, results_text = validate(file_graph,
206
- shacl_graph=shacl_graph,
207
- ont_graph=ont_graph,
208
- inference='none',
209
- abort_on_first=False,
210
- allow_infos=False,
211
- allow_warnings=False,
212
- meta_shacl=False,
213
- advanced=True,
214
- js=False,
215
- debug=False)
216
- if not conforms:
217
- for msg in results_graph.objects(predicate=SH.resultMessage):
218
- log.warning(f"{file_graph}")
219
- log.warning(f"{msg} File: {file.name}")
220
- error_messages += [f"{msg} File: {file.name}"]
221
-
222
- # collect a list of uris of the tests in focus
223
- for focus_uri in file_graph.subjects(predicate=MUST.focus, object=Literal("true", datatype=XSD.boolean)):
224
- if focus_uri in focus_uris:
225
- focus_uri = URIRef(str(focus_uri) + "_DUPLICATE")
226
- focus_uris.add(focus_uri)
227
-
228
- # make sure there are no duplicate test IRIs in the files
229
- for subject_uri in file_graph.subjects(RDF.type, MUST.TestSpec):
230
- if subject_uri in subject_uris:
231
- log.warning(f"Duplicate subject URI found: {file.name} {subject_uri}. File will not be parsed.")
232
- error_messages += [f"Duplicate subject URI found in {file.name}."]
233
- subject_uri = URIRef(str(subject_uri) + "_DUPLICATE")
234
- if len(error_messages) > 0:
235
- error_messages.sort()
236
- error_message = "\n".join(msg for msg in error_messages)
237
- invalid_specs += [SpecSkipped(subject_uri, triple_store["type"], error_message, file.name) for triple_store in
238
- triple_stores]
239
- else:
240
- subject_uris.add(subject_uri)
241
- this_spec_graph = Graph()
242
- this_spec_graph.parse(file)
243
- spec_uris_in_this_file = list(this_spec_graph.subjects(RDF.type, MUST.TestSpec))
244
- for spec in spec_uris_in_this_file:
245
- # print(f"adding {tripleToAdd}")
246
- this_spec_graph.add([spec, MUST.specSourceFile, Literal(file)])
247
- this_spec_graph.add([spec, MUST.specFileName, Literal(file.name)])
248
- # print(f"beforeadd: {spec_graph}" )
249
- # print(f"beforeadd: {str(this_spec_graph.serialize())}" )
250
- spec_graph += this_spec_graph
251
-
252
-
253
- sourceFiles = list(spec_graph.subject_objects(MUST.specSourceFile))
254
- # print(f"sourceFiles: {sourceFiles}")
255
-
256
- valid_spec_uris = list(spec_graph.subjects(RDF.type, MUST.TestSpec))
257
-
258
- if focus_uris:
259
- invalid_focus_specs = []
260
- for spec in invalid_specs:
261
- if spec.spec_uri in focus_uris:
262
- invalid_focus_specs += [spec]
263
- focus_uris.remove(spec.spec_uri)
264
- log.info(f"Collected {len(focus_uris)} focus test spec(s)")
265
- return focus_uris, spec_graph, invalid_focus_specs
266
- else:
267
- log.info(f"Collected {len(valid_spec_uris)} valid test spec(s)")
268
- return valid_spec_uris, spec_graph, invalid_specs
269
-
270
-
271
- def get_specs(spec_uris: List[URIRef], spec_graph: Graph, triple_stores: List[dict],
272
- run_config: dict):
273
- specs = []
274
- skipped_results = []
275
- try:
276
- for triple_store in triple_stores:
277
- if "error" in triple_store:
278
- log.error(f"{triple_store['error']}. No specs run for this triple store.")
279
- skipped_results += [SpecSkipped(spec_uri, triple_store['type'], triple_store['error'], get_spec_file(spec_uri, spec_graph)) for spec_uri in
280
- spec_uris]
281
- else:
282
- for spec_uri in spec_uris:
283
- try:
284
- specs += [get_spec(spec_uri, spec_graph, run_config, triple_store)]
285
- except (ValueError, FileNotFoundError, ConnectionError) as e:
286
- skipped_results += [SpecSkipped(spec_uri, triple_store['type'], e, get_spec_file(spec_uri, spec_graph))]
287
-
288
- except (BadSyntax, FileNotFoundError) as e:
289
- template = "An exception of type {0} occurred when trying to parse the triple store configuration file. " \
290
- "Arguments:\n{1!r}"
291
- message = template.format(type(e).__name__, e.args)
292
- log.error(message)
293
- log.error("No specifications will be run.")
294
-
295
- log.info(f"Extracted {len(specs)} specifications that will be run")
296
- return specs, skipped_results
297
-
298
-
299
- def run_specs(specs) -> List[SpecResult]:
300
- results = []
301
- # https://github.com/Semantic-partners/mustrd/issues/115
302
- for specification in specs:
303
- results.append(run_spec(specification))
304
- return results
305
-
306
- def get_spec_file(spec_uri: URIRef, spec_graph: Graph):
307
- test = str(spec_graph.value(subject = spec_uri, predicate = MUST.specFileName, default = "default.mustrd.ttl"))
308
- return test
309
-
310
- def get_spec(spec_uri: URIRef, spec_graph: Graph, run_config: dict, mustrd_triple_store: dict = None) -> Specification:
311
- try:
312
- if mustrd_triple_store is None:
313
- mustrd_triple_store = {"type": TRIPLESTORE.RdfLib}
314
- components = []
315
- for predicate in MUST.given, MUST.when, MUST.then:
316
- components.append(parse_spec_component(subject=spec_uri,
317
- predicate=predicate,
318
- spec_graph=spec_graph,
319
- run_config=run_config,
320
- mustrd_triple_store=mustrd_triple_store))
321
-
322
- spec_file_name = get_spec_file(spec_uri, spec_graph)
323
- # https://github.com/Semantic-partners/mustrd/issues/92
324
- return Specification(spec_uri, mustrd_triple_store, components[0].value, components[1], components[2], spec_file_name)
325
-
326
- except (ValueError, FileNotFoundError) as e:
327
- template = "An exception of type {0} occurred. Arguments:\n{1!r}"
328
- message = template.format(type(e).__name__, e.args)
329
- log.exception(message)
330
- raise
331
- except ConnectionError as e:
332
- log.error(e)
333
- raise
334
-
335
-
336
- def check_result(spec, result):
337
- if type(spec.then) == TableThenSpec:
338
- return table_comparison(result, spec)
339
- else:
340
- graph_compare = graph_comparison(spec.then.value, result)
341
- if isomorphic(result, spec.then.value):
342
- return SpecPassed(spec.spec_uri, spec.triple_store["type"])
343
- else:
344
- if spec.when[0].queryType == MUST.ConstructSparql:
345
- return ConstructSpecFailure(spec.spec_uri, spec.triple_store["type"], graph_compare)
346
- else:
347
- return UpdateSpecFailure(spec.spec_uri, spec.triple_store["type"], graph_compare)
348
-
349
-
350
- def run_spec(spec: Specification) -> SpecResult:
351
- spec_uri = spec.spec_uri
352
- triple_store = spec.triple_store
353
- # close_connection = True
354
- log.debug(f"run_when {spec_uri=}, {triple_store=}, {spec.given=}, {spec.when=}, {spec.then=}")
355
- if spec.given:
356
- given_as_turtle = spec.given.serialize(format="turtle")
357
- log.debug(f"{given_as_turtle}")
358
- upload_given(triple_store, spec.given)
359
- else:
360
- if triple_store['type'] == TRIPLESTORE.RdfLib:
361
- return SpecSkipped(spec_uri, triple_store['type'], "Unable to run Inherited State tests on Rdflib")
362
- try:
363
- for when in spec.when:
364
- log.info(f"Running {when.queryType} spec {spec_uri} on {triple_store['type']}")
365
- try:
366
- result = run_when(spec_uri, triple_store, when)
367
- except ParseException as e:
368
- return SparqlParseFailure(spec_uri, triple_store["type"], e)
369
- except NotImplementedError as ex:
370
- return SpecSkipped(spec_uri, triple_store["type"], ex.args[0])
371
- return check_result(spec, result)
372
- except (ConnectionError, TimeoutError, HTTPError, ConnectTimeout, OSError) as e:
373
- # close_connection = False
374
- template = "An exception of type {0} occurred. Arguments:\n{1!r}"
375
- message = template.format(type(e).__name__, e.args)
376
- log.error(message)
377
- return TripleStoreConnectionError(spec_uri, triple_store["type"], message)
378
- except (TypeError, RequestException) as e:
379
- log.error(f"{type(e)} {e}")
380
- return SparqlExecutionError(spec_uri, triple_store["type"], e)
381
-
382
- # https://github.com/Semantic-partners/mustrd/issues/78
383
- # finally:
384
- # if type(mustrd_triple_store) == MustrdAnzo and close_connection:
385
- # mustrd_triple_store.clear_graph()
386
-
387
- def get_triple_store_graph(triple_store_graph_path: Path, secrets: str):
388
- if secrets:
389
- return Graph().parse(triple_store_graph_path).parse(data = secrets)
390
- else:
391
- secret_path = triple_store_graph_path.parent / Path(triple_store_graph_path.stem + "_secrets" + triple_store_graph_path.suffix)
392
- return Graph().parse(triple_store_graph_path).parse(secret_path)
393
-
394
-
395
- def get_triple_stores(triple_store_graph: Graph) -> list[dict]:
396
- triple_stores = []
397
- shacl_graph = Graph().parse(Path(os.path.join(get_mustrd_root(), "model/triplestoreshapes.ttl")))
398
- ont_graph = Graph().parse(Path(os.path.join(get_mustrd_root(), "model/triplestoreOntology.ttl")))
399
- conforms, results_graph, results_text = validate(
400
- data_graph= triple_store_graph,
401
- shacl_graph = shacl_graph,
402
- ont_graph = ont_graph,
403
- advanced= True,
404
- inference= 'none'
405
- )
406
- if not conforms:
407
- raise ValueError(f"Triple store configuration not conform to the shapes. SHACL report: {results_text}", results_graph)
408
- for triple_store_config, rdf_type, triple_store_type in triple_store_graph.triples((None, RDF.type, None)):
409
- triple_store = {}
410
- triple_store["type"] = triple_store_type
411
- triple_store["uri"] = triple_store_config
412
- # Anzo graph via anzo
413
- if triple_store_type == TRIPLESTORE.Anzo:
414
- triple_store["url"] = triple_store_graph.value(subject=triple_store_config, predicate=TRIPLESTORE.url)
415
- triple_store["port"] = triple_store_graph.value(subject=triple_store_config, predicate=TRIPLESTORE.port)
416
- try:
417
- triple_store["username"] = str(triple_store_graph.value(subject=triple_store_config, predicate=TRIPLESTORE.username))
418
- triple_store["password"] = str(triple_store_graph.value(subject=triple_store_config, predicate=TRIPLESTORE.password))
419
- except (FileNotFoundError, ValueError) as e:
420
- triple_store["error"] = e
421
- triple_store["gqe_uri"] = triple_store_graph.value(subject=triple_store_config, predicate=TRIPLESTORE.gqeURI)
422
- triple_store["input_graph"] = triple_store_graph.value(subject=triple_store_config,
423
- predicate=TRIPLESTORE.inputGraph)
424
- triple_store["output_graph"] = triple_store_graph.value(subject=triple_store_config,
425
- predicate=TRIPLESTORE.outputGraph)
426
- try:
427
- check_triple_store_params(triple_store, ["url", "port", "username", "password", "input_graph"])
428
- except ValueError as e:
429
- triple_store["error"] = e
430
- # GraphDB
431
- elif triple_store_type == TRIPLESTORE.GraphDb:
432
- triple_store["url"] = triple_store_graph.value(subject=triple_store_config, predicate=TRIPLESTORE.url)
433
- triple_store["port"] = triple_store_graph.value(subject=triple_store_config, predicate=TRIPLESTORE.port)
434
- try:
435
- triple_store["username"] = str(triple_store_graph.value(subject=triple_store_config, predicate=TRIPLESTORE.username))
436
- triple_store["password"] = str(triple_store_graph.value(subject=triple_store_config, predicate=TRIPLESTORE.password))
437
- except (FileNotFoundError, ValueError) as e:
438
- log.error(f"Credential retrieval failed {e}")
439
- triple_store["error"] = e
440
- triple_store["repository"] = triple_store_graph.value(subject=triple_store_config,
441
- predicate=TRIPLESTORE.repository)
442
- triple_store["input_graph"] = triple_store_graph.value(subject=triple_store_config,
443
- predicate=TRIPLESTORE.inputGraph)
444
-
445
- try:
446
- check_triple_store_params(triple_store, ["url", "port", "repository"])
447
- except ValueError as e:
448
- triple_store["error"] = e
449
- elif triple_store_type != TRIPLESTORE.RdfLib:
450
- triple_store["error"] = f"Triple store not implemented: {triple_store_type}"
451
-
452
- triple_stores.append(triple_store)
453
- return triple_stores
454
-
455
-
456
- def check_triple_store_params(triple_store: dict, required_params: List[str]):
457
- missing_params = [param for param in required_params if triple_store.get(param) is None]
458
- if missing_params:
459
- raise ValueError(f"Cannot establish connection to {triple_store['type']}. "
460
- f"Missing required parameter(s): {', '.join(missing_params)}.")
461
-
462
-
463
- def get_credential_from_file(triple_store_name: URIRef, credential: str, config_path: Literal) -> str:
464
- log.info(f"get_credential_from_file {triple_store_name}, {credential}, {config_path}")
465
- if config_path is None:
466
- raise ValueError(f"Cannot establish connection defined in {triple_store_name}. "
467
- f"Missing required parameter: {credential}.")
468
- # if os.path.isrelative(config_path)
469
- # project_root = get_project_root()
470
- path = Path(config_path)
471
- log.info(f"get_credential_from_file {path}")
472
-
473
- if not os.path.isfile(path):
474
- log.error(f"couldn't find {path}")
475
- raise FileNotFoundError(f"Credentials config file not found: {path}")
476
- try:
477
- with open(path, "rb") as f:
478
- config = tomli.load(f)
479
- except tomli.TOMLDecodeError as e:
480
- log.error(f"config error {path} {e}")
481
- raise ValueError(f"Error reading credentials config file: {e}")
482
- return config[str(triple_store_name)][credential]
483
-
484
- # Convert sparql json query results as defined in https://www.w3.org/TR/rdf-sparql-json-res/
485
- def json_results_to_panda_dataframe(result: str) -> pandas.DataFrame:
486
- json_result = json.loads(result)
487
- frames = DataFrame()
488
- for binding in json_result["results"]["bindings"]:
489
- columns = []
490
- values = []
491
- for key in binding:
492
- value_object = binding[key]
493
- columns.append(key)
494
- values.append(str(value_object["value"]))
495
- columns.append(key + "_datatype")
496
- if "type" in value_object and value_object["type"] == "literal":
497
- literal_type = str(XSD.string)
498
- if "datatype" in value_object:
499
- literal_type = value_object["datatype"]
500
- values.append(literal_type)
501
- else:
502
- values.append(str(XSD.anyURI))
503
-
504
- frames = pandas.concat(objs=[frames, pandas.DataFrame([values], columns=columns)], ignore_index=True)
505
- frames.fillna('', inplace=True)
506
-
507
- if frames.size == 0:
508
- frames = pandas.DataFrame()
509
- return frames
510
-
511
-
512
- # https://github.com/Semantic-partners/mustrd/issues/110
513
- # https://github.com/Semantic-partners/mustrd/issues/52
514
- def table_comparison(result: str, spec: Specification) -> SpecResult:
515
- warning = None
516
- order_list = ["order by ?", "order by desc", "order by asc"]
517
- ordered_result = any(pattern in spec.when[0].value.lower() for pattern in order_list)
518
- then = spec.then.value
519
- try:
520
- if is_json(result):
521
- df = json_results_to_panda_dataframe(result)
522
- columns = list(df.columns)
523
- else:
524
- raise ParseException
525
- sorted_columns = sorted(columns)
526
- sorted_then_cols = sorted(list(then))
527
- if not df.empty:
528
-
529
- if not ordered_result:
530
- df.sort_values(by=columns[::2], inplace=True)
531
- df.reset_index(inplace=True, drop=True)
532
- if spec.then.ordered:
533
- warning = f"sh:order in {spec.spec_uri} is ignored, no ORDER BY in query"
534
- log.warning(warning)
535
-
536
- # Scenario 1: expected no result but got a result
537
- if then.empty:
538
- message = f"Expected 0 row(s) and 0 column(s), got {df.shape[0]} row(s) and {round(df.shape[1] / 2)} column(s)"
539
- empty_then = create_empty_dataframe_with_columns(df)
540
- df_diff = empty_then.compare(df, result_names=("expected", "actual"))
541
-
542
- else:
543
- # Scenario 2: expected a result and got a result
544
- # pandas.set_option('display.max_columns', None)
545
- message = f"Expected {then.shape[0]} row(s) and {round(then.shape[1] / 2)} column(s), " \
546
- f"got {df.shape[0]} row(s) and {round(df.shape[1] / 2)} column(s)"
547
- if ordered_result is True and not spec.then.ordered:
548
- message += ". Actual result is ordered, must:then must contain sh:order on every row."
549
- return SelectSpecFailure(spec.spec_uri, spec.triple_store["type"], None, message)
550
- # if df.shape == then.shape and (df.columns == then.columns).all():
551
- # df_diff = then.compare(df, result_names=("expected", "actual"))
552
- # if df_diff.empty:
553
- # df_diff = df
554
- # print(df_diff.to_markdown())
555
- # else:
556
- # df_diff = construct_df_diff(df, then)
557
- # print(df_diff.to_markdown())
558
- else:
559
- if len(columns) == len(then.columns):
560
- if sorted_columns == sorted_then_cols:
561
- then = then[columns]
562
- if not ordered_result:
563
- then.sort_values(by=columns[::2], inplace=True)
564
- then.reset_index(drop=True, inplace=True)
565
- if df.shape == then.shape and (df.columns == then.columns).all():
566
- df_diff = then.compare(df, result_names=("expected", "actual"))
567
- else:
568
- df_diff = construct_df_diff(df, then)
569
-
570
- else:
571
- then = then[sorted_then_cols]
572
- df = df[sorted_columns]
573
- df_diff = construct_df_diff(df, then)
574
- else:
575
-
576
- then = then[sorted_then_cols]
577
- df = df[sorted_columns]
578
- df_diff = construct_df_diff(df, then)
579
- else:
580
-
581
- if then.empty:
582
- # Scenario 3: expected no result, got no result
583
- message = f"Expected 0 row(s) and 0 column(s), got 0 row(s) and 0 column(s)"
584
- df = pandas.DataFrame()
585
- else:
586
- # Scenario 4: expected a result, but got an empty result
587
- message = f"Expected {then.shape[0]} row(s) and {round(then.shape[1] / 2)} column(s), got 0 row(s) and 0 column(s)"
588
- then = then[sorted_then_cols]
589
- df = create_empty_dataframe_with_columns(then)
590
- df_diff = then.compare(df, result_names=("expected", "actual"))
591
- print(df_diff.to_markdown())
592
-
593
- if df_diff.empty:
594
- if warning:
595
- return SpecPassedWithWarning(spec.spec_uri, spec.triple_store["type"], warning)
596
- else:
597
- return SpecPassed(spec.spec_uri, spec.triple_store["type"])
598
- else:
599
- # message += f"\nexpected:\n{then}\nactual:{df}"
600
- log.error(message)
601
- # print(spec.spec_uri)
602
- # print("actual:")
603
- # print(then)
604
- # print("expected:")
605
- # print(df)
606
- return SelectSpecFailure(spec.spec_uri, spec.triple_store["type"], df_diff, message)
607
-
608
- except ParseException as e:
609
- return SparqlParseFailure(spec.spec_uri, spec.triple_store["type"], e)
610
- except NotImplementedError as ex:
611
- return SpecSkipped(spec.spec_uri, spec.triple_store["type"], ex)
612
-
613
-
614
- def graph_comparison(expected_graph: Graph, actual_graph: Graph) -> GraphComparison:
615
- diff = graph_diff(expected_graph, actual_graph)
616
- in_both = diff[0]
617
- in_expected = diff[1]
618
- in_actual = diff[2]
619
- in_expected_not_in_actual = (in_expected - in_actual)
620
- in_actual_not_in_expected = (in_actual - in_expected)
621
- return GraphComparison(in_expected_not_in_actual, in_actual_not_in_expected, in_both)
622
-
623
-
624
- def get_then_update(spec_uri: URIRef, spec_graph: Graph) -> Graph:
625
- then_query = f"""
626
- prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
627
-
628
- CONSTRUCT {{ ?s ?p ?o }}
629
- {{
630
- <{spec_uri}> <{MUST.then}>
631
- a <{MUST.StatementsDataset}> ;
632
- <{MUST.hasStatement}> [
633
- a rdf:Statement ;
634
- rdf:subject ?s ;
635
- rdf:predicate ?p ;
636
- rdf:object ?o ;
637
- ] ; ]
638
- }}
639
- """
640
- expected_results = spec_graph.query(then_query).graph
641
-
642
- return expected_results
643
-
644
-
645
- def calculate_row_difference(df1: pandas.DataFrame,
646
- df2: pandas.DataFrame) -> pandas.DataFrame:
647
- df_all = df1.merge(df2.drop_duplicates(), how='left', indicator=True)
648
- actual_rows = df_all[df_all['_merge'] == 'left_only']
649
- actual_rows = actual_rows.drop('_merge', axis=1)
650
- return actual_rows
651
-
652
-
653
- def construct_df_diff(df: pandas.DataFrame,
654
- then: pandas.DataFrame) -> pandas.DataFrame:
655
- actual_rows = calculate_row_difference(df, then)
656
- expected_rows = calculate_row_difference(then, df)
657
- actual_columns = df.columns.difference(then.columns)
658
- expected_columns = then.columns.difference(df.columns)
659
-
660
- df_diff = pandas.DataFrame()
661
- modified_df = df
662
- modified_then = then
663
-
664
- if actual_columns.size > 0:
665
- modified_then = modified_then.reindex(modified_then.columns.to_list() + actual_columns.to_list(), axis=1)
666
- modified_then[actual_columns.to_list()] = modified_then[actual_columns.to_list()].fillna('')
667
-
668
- if expected_columns.size > 0:
669
- modified_df = modified_df.reindex(modified_df.columns.to_list() + expected_columns.to_list(), axis=1)
670
- modified_df[expected_columns.to_list()] = modified_df[expected_columns.to_list()].fillna('')
671
-
672
- modified_df = modified_df.reindex(modified_then.columns, axis=1)
673
-
674
- if df.shape[0] != then.shape[0] and df.shape[1] != then.shape[1]:
675
- # take modified columns and add rows
676
- actual_rows = calculate_row_difference(modified_df, modified_then)
677
- expected_rows = calculate_row_difference(modified_then, modified_df)
678
- df_diff = generate_row_diff(actual_rows, expected_rows)
679
- elif actual_rows.shape[0] > 0 or expected_rows.shape[0] > 0:
680
- df_diff = generate_row_diff(actual_rows, expected_rows)
681
- elif actual_columns.size > 0 or expected_columns.size > 0:
682
- df_diff = modified_then.compare(modified_df, result_names=("expected", "actual"), keep_shape=True,
683
- keep_equal=True)
684
- df_diff.fillna("", inplace=True)
685
- return df_diff
686
-
687
-
688
- def generate_row_diff(actual_rows: pandas.DataFrame, expected_rows: pandas.DataFrame) -> pandas.DataFrame:
689
- df_diff_actual_rows = pandas.DataFrame()
690
- df_diff_expected_rows = pandas.DataFrame()
691
-
692
- if actual_rows.shape[0] > 0:
693
- empty_actual_copy = create_empty_dataframe_with_columns(actual_rows)
694
- df_diff_actual_rows = empty_actual_copy.compare(actual_rows, result_names=("expected", "actual"))
695
-
696
- if expected_rows.shape[0] > 0:
697
- empty_expected_copy = create_empty_dataframe_with_columns(expected_rows)
698
- df_diff_expected_rows = expected_rows.compare(empty_expected_copy, result_names=("expected", "actual"))
699
-
700
- df_diff_rows = pandas.concat([df_diff_actual_rows, df_diff_expected_rows], ignore_index=True)
701
- return df_diff_rows
702
-
703
-
704
- def create_empty_dataframe_with_columns(df: pandas.DataFrame) -> pandas.DataFrame:
705
- empty_copy = pandas.DataFrame().reindex_like(df)
706
- empty_copy.fillna("", inplace=True)
707
- return empty_copy
708
-
709
-
710
- def review_results(results: List[SpecResult], verbose: bool) -> None:
711
- print("===== Result Overview =====")
712
- # Init dictionaries
713
- status_dict = defaultdict(lambda: defaultdict(int))
714
- status_counts = defaultdict(lambda: defaultdict(int))
715
- colours = {SpecPassed: Fore.GREEN, SpecPassedWithWarning: Fore.YELLOW, SpecSkipped: Fore.YELLOW}
716
- # Populate dictionaries from results
717
- for result in results:
718
- status_counts[result.triple_store][type(result)] += 1
719
- status_dict[result.spec_uri][result.triple_store] = type(result)
720
-
721
- # Get the list of statuses and list of unique triple stores
722
- statuses = list(status for inner_dict in status_dict.values() for status in inner_dict.values())
723
- triple_stores = list(set(status for inner_dict in status_dict.values() for status in inner_dict.keys()))
724
-
725
- # Convert dictionaries to list for tabulate
726
- table_rows = [[spec_uri] + [
727
- f"{colours.get(status_dict[spec_uri][triple_store], Fore.RED)}{status_dict[spec_uri][triple_store].__name__}{Style.RESET_ALL}"
728
- for triple_store in triple_stores] for spec_uri in set(status_dict.keys())]
729
-
730
- status_rows = [[f"{colours.get(status, Fore.RED)}{status.__name__}{Style.RESET_ALL}"] +
731
- [f"{colours.get(status, Fore.RED)}{status_counts[triple_store][status]}{Style.RESET_ALL}"
732
- for triple_store in triple_stores] for status in set(statuses)]
733
-
734
- # Display tables with tabulate
735
- print(tabulate(table_rows, headers=['Spec Uris / triple stores'] + triple_stores, tablefmt="pretty"))
736
- print(tabulate(status_rows, headers=['Status / triple stores'] + triple_stores, tablefmt="pretty"))
737
-
738
- pass_count = statuses.count(SpecPassed)
739
- warning_count = statuses.count(SpecPassedWithWarning)
740
- skipped_count = statuses.count(SpecSkipped)
741
- fail_count = len(
742
- list(filter(lambda status: status not in [SpecPassed, SpecPassedWithWarning, SpecSkipped], statuses)))
743
-
744
- if fail_count:
745
- overview_colour = Fore.RED
746
- elif warning_count or skipped_count:
747
- overview_colour = Fore.YELLOW
748
- else:
749
- overview_colour = Fore.GREEN
750
-
751
- logger_setup.flush()
752
- print(f"{overview_colour}===== {fail_count} failures, {skipped_count} skipped, {Fore.GREEN}{pass_count} passed, "
753
- f"{overview_colour}{warning_count} passed with warnings =====")
754
-
755
- if verbose and (fail_count or warning_count or skipped_count):
756
- for res in results:
757
- if type(res) == UpdateSpecFailure:
758
- print(f"{Fore.RED}Failed {res.spec_uri} {res.triple_store}")
759
- print(f"{Fore.BLUE} In Expected Not In Actual:")
760
- print(res.graph_comparison.in_expected_not_in_actual.serialize(format="ttl"))
761
- print()
762
- print(f"{Fore.RED} in_actual_not_in_expected")
763
- print(res.graph_comparison.in_actual_not_in_expected.serialize(format="ttl"))
764
- print(f"{Fore.GREEN} in_both")
765
- print(res.graph_comparison.in_both.serialize(format="ttl"))
766
-
767
- if type(res) == SelectSpecFailure:
768
- print(f"{Fore.RED}Failed {res.spec_uri} {res.triple_store}")
769
- print(res.message)
770
- print(res.table_comparison.to_markdown())
771
- if type(res) == ConstructSpecFailure or type(res) == UpdateSpecFailure:
772
- print(f"{Fore.RED}Failed {res.spec_uri} {res.triple_store}")
773
- if type(res) == SpecPassedWithWarning:
774
- print(f"{Fore.YELLOW}Passed with warning {res.spec_uri} {res.triple_store}")
775
- print(res.warning)
776
- if type(res) == TripleStoreConnectionError or type(res) == SparqlExecutionError or \
777
- type(res) == SparqlParseFailure:
778
- print(f"{Fore.RED}Failed {res.spec_uri} {res.triple_store}")
779
- print(res.exception)
780
- if type(res) == SpecSkipped:
781
- print(f"{Fore.YELLOW}Skipped {res.spec_uri} {res.triple_store}")
782
- print(res.message)
783
-
784
-
785
-
786
-
787
-
788
-
1
+ """
2
+ MIT License
3
+
4
+ Copyright (c) 2023 Semantic Partners Ltd
5
+
6
+ Permission is hereby granted, free of charge, to any person obtaining a copy
7
+ of this software and associated documentation files (the "Software"), to deal
8
+ in the Software without restriction, including without limitation the rights
9
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10
+ copies of the Software, and to permit persons to whom the Software is
11
+ furnished to do so, subject to the following conditions:
12
+
13
+ The above copyright notice and this permission notice shall be included in all
14
+ copies or substantial portions of the Software.
15
+
16
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22
+ SOFTWARE.
23
+ """
24
+
25
+ import os
26
+ from typing import Tuple, List
27
+ from execute_update_spec import execute_update_spec
28
+
29
+ import tomli
30
+ from rdflib.plugins.parsers.notation3 import BadSyntax
31
+
32
+ import logger_setup
33
+ from dataclasses import dataclass
34
+
35
+ from pyparsing import ParseException
36
+ from pathlib import Path
37
+ from requests import ConnectionError, ConnectTimeout, HTTPError, RequestException
38
+
39
+ from rdflib import Graph, URIRef, RDF, XSD, SH, Literal
40
+
41
+ from rdflib.compare import isomorphic, graph_diff
42
+ import pandas
43
+ from multimethods import MultiMethod, Default
44
+
45
+ from namespace import MUST
46
+ import requests
47
+ import json
48
+ from pandas import DataFrame
49
+
50
+ from spec_component import parse_spec_component, WhenSpec, ThenSpec
51
+ from triple_store_dispatch import execute_select_spec, execute_construct_spec
52
+ from utils import get_project_root
53
+ from colorama import Fore, Style
54
+ from tabulate import tabulate
55
+ from collections import defaultdict
56
+ from pyshacl import validate
57
+ import logging
58
+ from http.client import HTTPConnection
59
+ import mustrdAnzo
60
+
61
+ log = logger_setup.setup_logger(__name__)
62
+
63
+ requests.packages.urllib3.disable_warnings()
64
+ requests.packages.urllib3.util.ssl_.DEFAULT_CIPHERS += ':HIGH:!DH:!aNULL'
65
+
66
+ logging.basicConfig(format='%(asctime)s %(message)s', level=logging.DEBUG)
67
+
68
+
69
+ def debug_requests_on():
70
+ '''Switches on logging of the requests module.'''
71
+ HTTPConnection.debuglevel = 1
72
+
73
+ logging.basicConfig()
74
+ logging.getLogger().setLevel(logging.DEBUG)
75
+ requests_log = logging.getLogger("requests.packages.urllib3")
76
+ requests_log.setLevel(logging.DEBUG)
77
+ requests_log.propagate = True
78
+
79
+ def debug_requests_off():
80
+ '''Switches off logging of the requests module, might be some side-effects'''
81
+ HTTPConnection.debuglevel = 0
82
+
83
+ root_logger = logging.getLogger()
84
+ root_logger.setLevel(logging.WARNING)
85
+ root_logger.handlers = []
86
+ requests_log = logging.getLogger("requests.packages.urllib3")
87
+ requests_log.setLevel(logging.WARNING)
88
+ requests_log.propagate = False
89
+
90
+ debug_requests_on()
91
+
92
+ @dataclass
93
+ class Specification:
94
+ spec_uri: URIRef
95
+ triple_store: dict
96
+ given: Graph
97
+ when: WhenSpec
98
+ then: ThenSpec
99
+
100
+
101
+ @dataclass
102
+ class GraphComparison:
103
+ in_expected_not_in_actual: Graph
104
+ in_actual_not_in_expected: Graph
105
+ in_both: Graph
106
+
107
+
108
+ @dataclass
109
+ class SpecResult:
110
+ spec_uri: URIRef
111
+ triple_store: URIRef
112
+
113
+
114
+ @dataclass
115
+ class SpecPassed(SpecResult):
116
+ pass
117
+
118
+
119
+ @dataclass()
120
+ class SpecPassedWithWarning(SpecResult):
121
+ warning: str
122
+
123
+
124
+ @dataclass
125
+ class SelectSpecFailure(SpecResult):
126
+ table_comparison: pandas.DataFrame
127
+ message: str
128
+
129
+
130
+ @dataclass
131
+ class ConstructSpecFailure(SpecResult):
132
+ graph_comparison: GraphComparison
133
+
134
+
135
+ @dataclass
136
+ class UpdateSpecFailure(SpecResult):
137
+ graph_comparison: GraphComparison
138
+
139
+
140
+ @dataclass
141
+ class SparqlParseFailure(SpecResult):
142
+ exception: ParseException
143
+
144
+
145
+ @dataclass
146
+ class SparqlExecutionError(SpecResult):
147
+ exception: Exception
148
+
149
+
150
+ @dataclass
151
+ class TripleStoreConnectionError(SpecResult):
152
+ exception: ConnectionError
153
+
154
+
155
+ @dataclass
156
+ class SpecSkipped(SpecResult):
157
+ message: str
158
+
159
+
160
+ @dataclass
161
+ class SparqlAction:
162
+ query: str
163
+
164
+
165
+ @dataclass
166
+ class SelectSparqlQuery(SparqlAction):
167
+ pass
168
+
169
+
170
+ @dataclass
171
+ class ConstructSparqlQuery(SparqlAction):
172
+ pass
173
+
174
+
175
+ @dataclass
176
+ class UpdateSparqlQuery(SparqlAction):
177
+ pass
178
+
179
+
180
+ # https://github.com/Semantic-partners/mustrd/issues/19
181
+
182
+ def validate_specs(spec_path: Path, triple_stores: List, shacl_graph: Graph, ont_graph: Graph)\
183
+ -> Tuple[List, Graph, List]:
184
+ # os.chdir(spec_path)
185
+ spec_graph = Graph()
186
+ subject_uris = set()
187
+ invalid_specs = []
188
+ ttl_files = list(spec_path.glob('**/*.mustrd.ttl'))
189
+ ttl_files.sort()
190
+ log.info(f"Found {len(ttl_files)} ttl files in {spec_path}")
191
+
192
+ for file in ttl_files:
193
+ error_messages = []
194
+
195
+ log.info(f"Parse: {file}")
196
+ try:
197
+ file_graph = Graph().parse(file)
198
+ except BadSyntax as e:
199
+ template = "An exception of type {0} occurred when trying to parse a spec file. Arguments:\n{1!r}"
200
+ message = template.format(type(e).__name__, e.args)
201
+ log.error(message)
202
+ error_messages += [f"Could not extract spec from {file} due to exception of type "
203
+ f"{type(e).__name__} when parsing file"]
204
+ continue
205
+ # run shacl validation
206
+ conforms, results_graph, results_text = validate(file_graph,
207
+ shacl_graph=shacl_graph,
208
+ ont_graph=ont_graph,
209
+ inference='none',
210
+ abort_on_first=False,
211
+ allow_infos=False,
212
+ allow_warnings=False,
213
+ meta_shacl=False,
214
+ advanced=True,
215
+ js=False,
216
+ debug=False)
217
+ if not conforms:
218
+ for msg in results_graph.objects(predicate=SH.resultMessage):
219
+ log.warning(f"{file_graph}")
220
+ log.warning(f"{msg} File: {file.name}")
221
+ error_messages += [f"{msg} File: {file.name}"]
222
+
223
+ # make sure there are no duplicate test IRIs in the files
224
+ for subject_uri in file_graph.subjects(RDF.type, MUST.TestSpec):
225
+ if subject_uri in subject_uris:
226
+ log.warning(f"Duplicate subject URI found: {file.name} {subject_uri}. File will not be parsed.")
227
+ error_messages += [f"Duplicate subject URI found in {file.name}."]
228
+ subject_uri = URIRef(str(subject_uri) + "_DUPLICATE")
229
+
230
+ if len(error_messages) > 0:
231
+ error_messages.sort()
232
+ error_message = "\n".join(msg for msg in error_messages)
233
+ invalid_specs += [SpecSkipped(subject_uri, triple_store["type"], error_message) for triple_store in
234
+ triple_stores]
235
+ else:
236
+ # logging.info(f"{subject_uri=}")
237
+ # subject_uris.add(subject_uri)
238
+ spec_graph.parse(file)
239
+
240
+ valid_spec_uris = list(spec_graph.subjects(RDF.type, MUST.TestSpec))
241
+ log.info(f"Collected {len(valid_spec_uris)} items")
242
+ return valid_spec_uris, spec_graph, invalid_specs
243
+
244
+
245
+ def run_specs(spec_uris: List[URIRef], spec_graph: Graph, results: List[SpecResult], triple_stores: List[dict],
246
+ given_path: Path = None, when_path: Path = None, then_path: Path = None) -> List[SpecResult]:
247
+ specs = []
248
+ try:
249
+ for triple_store in triple_stores:
250
+ if "error" in triple_store:
251
+ log.error(f"{triple_store['error']}. No specs run for this triple store.")
252
+ results += [SpecSkipped(spec_uri, triple_store['type'], triple_store['error']) for spec_uri in
253
+ spec_uris]
254
+ else:
255
+ for spec_uri in spec_uris:
256
+ try:
257
+ specs += [get_spec(spec_uri, spec_graph, given_path, when_path, then_path, triple_store)]
258
+ except (ValueError, FileNotFoundError, ConnectionError) as e:
259
+ results += [SpecSkipped(spec_uri, triple_store['type'], e)]
260
+
261
+ except (BadSyntax, FileNotFoundError) as e:
262
+ template = "An exception of type {0} occurred when trying to parse the triple store configuration file. " \
263
+ "Arguments:\n{1!r}"
264
+ message = template.format(type(e).__name__, e.args)
265
+ log.error(message)
266
+ log.error("No specifications will be run.")
267
+
268
+ log.info(f"Extracted {len(specs)} specifications that will be run")
269
+ # https://github.com/Semantic-partners/mustrd/issues/115
270
+
271
+ for specification in specs:
272
+ results += [run_spec(specification)]
273
+
274
+ return results
275
+
276
+
277
+ def get_spec(spec_uri: URIRef, spec_graph: Graph, given_path: Path = None, when_path: Path = None,
278
+ then_path: Path = None, mustrd_triple_store: dict = None) -> Specification:
279
+ try:
280
+ if mustrd_triple_store is None:
281
+ mustrd_triple_store = {"type": MUST.RdfLib}
282
+
283
+ spec_uri = URIRef(str(spec_uri))
284
+
285
+ given_component = parse_spec_component(subject=spec_uri,
286
+ predicate=MUST.given,
287
+ spec_graph=spec_graph,
288
+ folder_location=given_path,
289
+ mustrd_triple_store=mustrd_triple_store)
290
+
291
+ log.debug(f"Given: {given_component.value}")
292
+
293
+ when_component = parse_spec_component(subject=spec_uri,
294
+ predicate=MUST.when,
295
+ spec_graph=spec_graph,
296
+ folder_location=when_path,
297
+ mustrd_triple_store=mustrd_triple_store)
298
+
299
+ log.debug(f"when: {when_component.value}")
300
+
301
+ then_component = parse_spec_component(subject=spec_uri,
302
+ predicate=MUST.then,
303
+ spec_graph=spec_graph,
304
+ folder_location=then_path,
305
+ mustrd_triple_store=mustrd_triple_store)
306
+
307
+ log.debug(f"then: {then_component.value}")
308
+
309
+ # https://github.com/Semantic-partners/mustrd/issues/92
310
+ return Specification(spec_uri, mustrd_triple_store, given_component.value, when_component, then_component)
311
+ except (ValueError, FileNotFoundError) as e:
312
+ template = "An exception of type {0} occurred. Arguments:\n{1!r}"
313
+ message = template.format(type(e).__name__, e.args)
314
+ log.error(message)
315
+ raise
316
+ except ConnectionError as e:
317
+ log.error(e)
318
+ raise
319
+
320
+
321
+ def run_spec(spec: Specification) -> SpecResult:
322
+ spec_uri = spec.spec_uri
323
+ triple_store = spec.triple_store
324
+ # close_connection = True
325
+ try:
326
+ log.info(f"run_when {spec_uri=}, {triple_store=}, {spec.given=}, {spec.when=}, {spec.then=}")
327
+ if spec.given is not None:
328
+ given_as_turtle = spec.given.serialize(format="turtle")
329
+ log.info(f"{given_as_turtle}")
330
+ return run_when(spec)
331
+ except ParseException as e:
332
+ log.error(f"{type(e)} {e}")
333
+ return SparqlParseFailure(spec_uri, triple_store["type"], e)
334
+ except (ConnectionError, TimeoutError, HTTPError, ConnectTimeout, OSError) as e:
335
+ # close_connection = False
336
+ template = "An exception of type {0} occurred. Arguments:\n{1!r}"
337
+ message = template.format(type(e).__name__, e.args)
338
+ log.error(message)
339
+ return TripleStoreConnectionError(spec_uri, triple_store["type"], message)
340
+ except (TypeError, RequestException) as e:
341
+ log.error(f"{type(e)} {e}")
342
+ return SparqlExecutionError(spec_uri, triple_store["type"], e)
343
+
344
+ # https://github.com/Semantic-partners/mustrd/issues/78
345
+ # finally:
346
+ # if type(mustrd_triple_store) == MustrdAnzo and close_connection:
347
+ # mustrd_triple_store.clear_graph()
348
+
349
+
350
+ def dispatch_run_when(spec: Specification):
351
+ to = spec.when.queryType
352
+ log.info(f"dispatch_run_when to SPARQL type {to}")
353
+ return to
354
+
355
+
356
+ run_when = MultiMethod('run_when', dispatch_run_when)
357
+
358
+
359
+ @run_when.method(MUST.UpdateSparql)
360
+ def _multi_run_when_update(spec: Specification):
361
+ then = spec.then.value
362
+
363
+ result = run_update_spec(spec.spec_uri, spec.given, spec.when.value, then,
364
+ spec.triple_store, spec.when.bindings)
365
+
366
+ return result
367
+
368
+
369
+ @run_when.method(MUST.ConstructSparql)
370
+ def _multi_run_when_construct(spec: Specification):
371
+ then = spec.then.value
372
+ result = run_construct_spec(spec.spec_uri, spec.given, spec.when.value, then, spec.triple_store, spec.when.bindings)
373
+ return result
374
+
375
+
376
+ @run_when.method(MUST.SelectSparql)
377
+ def _multi_run_when_select(spec: Specification):
378
+ then = spec.then.value
379
+ result = run_select_spec(spec.spec_uri, spec.given, spec.when.value, then, spec.triple_store, spec.then.ordered,
380
+ spec.when.bindings)
381
+ return result
382
+
383
+
384
+ @run_when.method(Default)
385
+ def _multi_run_when_default(spec: Specification):
386
+ if spec.when.queryType == MUST.AskSparql:
387
+ log.warning(f"Skipping {spec.spec_uri}, SPARQL ASK not implemented.")
388
+ return SpecSkipped(spec.spec_uri, spec.triple_store['type'], "SPARQL ASK not implemented.")
389
+ elif spec.when.queryType == MUST.DescribeSparql:
390
+ log.warning(f"Skipping {spec.spec_uri}, SPARQL DESCRIBE not implemented.")
391
+ return SpecSkipped(spec.spec_uri, spec.triple_store['type'], "SPARQL DESCRIBE not implemented.")
392
+ else:
393
+ log.warning(f"Skipping {spec.spec_uri}, {spec.when.queryType} is not a valid SPARQL query type.")
394
+ return SpecSkipped(spec.spec_uri, spec.triple_store['type'],
395
+ f"{spec.when.queryType} is not a valid SPARQL query type.")
396
+
397
+
398
+ def is_json(myjson: str) -> bool:
399
+ try:
400
+ json.loads(myjson)
401
+ except ValueError:
402
+ return False
403
+ return True
404
+
405
+
406
+ def get_triple_stores(triple_store_graph: Graph) -> list[dict]:
407
+ triple_stores = []
408
+ for triple_store_config, rdf_type, triple_store_type in triple_store_graph.triples((None, RDF.type, None)):
409
+ triple_store = {}
410
+ # Local rdf lib triple store
411
+ if triple_store_type == MUST.RdfLibConfig:
412
+ triple_store["type"] = MUST.RdfLib
413
+ # Anzo graph via anzo
414
+ elif triple_store_type == MUST.AnzoConfig:
415
+ triple_store["type"] = MUST.Anzo
416
+ triple_store["url"] = triple_store_graph.value(subject=triple_store_config, predicate=MUST.url)
417
+ triple_store["port"] = triple_store_graph.value(subject=triple_store_config, predicate=MUST.port)
418
+ try:
419
+ triple_store["username"] = get_credential_from_file(triple_store_config, "username",
420
+ triple_store_graph.value(
421
+ subject=triple_store_config,
422
+ predicate=MUST.username))
423
+ triple_store["password"] = get_credential_from_file(triple_store_config, "password",
424
+ triple_store_graph.value(
425
+ subject=triple_store_config,
426
+ predicate=MUST.password))
427
+ except (FileNotFoundError, ValueError) as e:
428
+ triple_store["error"] = e
429
+ triple_store["gqe_uri"] = triple_store_graph.value(subject=triple_store_config, predicate=MUST.gqeURI)
430
+ triple_store["input_graph"] = triple_store_graph.value(subject=triple_store_config,
431
+ predicate=MUST.inputGraph)
432
+ triple_store["output_graph"] = triple_store_graph.value(subject=triple_store_config,
433
+ predicate=MUST.outputGraph)
434
+ try:
435
+ check_triple_store_params(triple_store, ["url", "port", "username", "password", "input_graph"])
436
+ except ValueError as e:
437
+ triple_store["error"] = e
438
+ # GraphDB
439
+ elif triple_store_type == MUST.GraphDbConfig:
440
+ triple_store["type"] = MUST.GraphDb
441
+ triple_store["url"] = triple_store_graph.value(subject=triple_store_config, predicate=MUST.url)
442
+ triple_store["port"] = triple_store_graph.value(subject=triple_store_config, predicate=MUST.port)
443
+ try:
444
+ triple_store["username"] = get_credential_from_file(triple_store_config, "username",
445
+ triple_store_graph.value(
446
+ subject=triple_store_config,
447
+ predicate=MUST.username))
448
+ triple_store["password"] = get_credential_from_file(triple_store_config, "password",
449
+ triple_store_graph.value(
450
+ subject=triple_store_config,
451
+ predicate=MUST.password))
452
+ except (FileNotFoundError, ValueError) as e:
453
+ log.error(f"Credential retrieval failed {e}")
454
+ triple_store["error"] = e
455
+ triple_store["repository"] = triple_store_graph.value(subject=triple_store_config,
456
+ predicate=MUST.repository)
457
+ triple_store["input_graph"] = triple_store_graph.value(subject=triple_store_config,
458
+ predicate=MUST.inputGraph)
459
+
460
+ try:
461
+ check_triple_store_params(triple_store, ["url", "port", "repository"])
462
+ except ValueError as e:
463
+ triple_store["error"] = e
464
+ else:
465
+ triple_store["type"] = triple_store_type
466
+ triple_store["error"] = f"Triple store not implemented: {triple_store_type}"
467
+
468
+ triple_stores.append(triple_store)
469
+ return triple_stores
470
+
471
+
472
+ def check_triple_store_params(triple_store: dict, required_params: List[str]):
473
+ missing_params = [param for param in required_params if triple_store.get(param) is None]
474
+ if missing_params:
475
+ raise ValueError(f"Cannot establish connection to {triple_store['type']}. "
476
+ f"Missing required parameter(s): {', '.join(missing_params)}.")
477
+
478
+
479
+ def get_credential_from_file(triple_store_name: URIRef, credential: str, config_path: Literal) -> str:
480
+ log.info(f"get_credential_from_file {triple_store_name}, {credential}, {config_path}")
481
+ if config_path is None:
482
+ raise ValueError(f"Cannot establish connection defined in {triple_store_name}. "
483
+ f"Missing required parameter: {credential}.")
484
+ # if os.path.isrelative(config_path)
485
+ # project_root = get_project_root()
486
+ path = Path(config_path)
487
+ log.info(f"get_credential_from_file {path}")
488
+
489
+ if not os.path.isfile(path):
490
+ log.error(f"couldn't find {path}")
491
+ raise FileNotFoundError(f"Credentials config file not found: {path}")
492
+ try:
493
+ with open(path, "rb") as f:
494
+ config = tomli.load(f)
495
+ except tomli.TOMLDecodeError as e:
496
+ log.error(f"config error {path} {e}")
497
+ raise ValueError(f"Error reading credentials config file: {e}")
498
+ return config[str(triple_store_name)][credential]
499
+
500
+
501
+ # Get column order
502
+ def json_results_order(result: str) -> list[str]:
503
+ columns = []
504
+ json_result = json.loads(result)
505
+ for binding in json_result["head"]["vars"]:
506
+ columns.append(binding)
507
+ columns.append(binding + "_datatype")
508
+ return columns
509
+
510
+
511
+ # Convert sparql json query results as defined in https://www.w3.org/TR/rdf-sparql-json-res/
512
+ def json_results_to_panda_dataframe(result: str) -> pandas.DataFrame:
513
+ json_result = json.loads(result)
514
+ frames = DataFrame()
515
+ for binding in json_result["results"]["bindings"]:
516
+ columns = []
517
+ values = []
518
+ for key in binding:
519
+ value_object = binding[key]
520
+ columns.append(key)
521
+ values.append(str(value_object["value"]))
522
+ columns.append(key + "_datatype")
523
+ if "type" in value_object and value_object["type"] == "literal":
524
+ literal_type = str(XSD.string)
525
+ if "datatype" in value_object:
526
+ literal_type = value_object["datatype"]
527
+ values.append(literal_type)
528
+ else:
529
+ values.append(str(XSD.anyURI))
530
+
531
+ frames = pandas.concat(objs=[frames, pandas.DataFrame([values], columns=columns)], ignore_index=True)
532
+ frames.fillna('', inplace=True)
533
+
534
+ if frames.size == 0:
535
+ frames = pandas.DataFrame()
536
+ return frames
537
+
538
+
539
+ # https://github.com/Semantic-partners/mustrd/issues/110
540
+ # https://github.com/Semantic-partners/mustrd/issues/52
541
+ def run_select_spec(spec_uri: URIRef,
542
+ given: Graph,
543
+ when: str,
544
+ then: pandas.DataFrame,
545
+ triple_store: dict,
546
+ then_ordered: bool = False,
547
+ bindings: dict = None) -> SpecResult:
548
+ log.info(f"Running select spec {spec_uri} on {triple_store['type']}")
549
+
550
+ warning = None
551
+ if triple_store['type'] == MUST.RdfLib and given is None:
552
+ return SpecSkipped(spec_uri, triple_store['type'], "Unable to run Inherited State tests on Rdflib")
553
+ try:
554
+ result = execute_select_spec(triple_store, given, when, bindings)
555
+ if is_json(result):
556
+ df = json_results_to_panda_dataframe(result)
557
+ columns = json_results_order(result)
558
+ else:
559
+ raise ParseException
560
+
561
+ if df.empty is False:
562
+ when_ordered = False
563
+
564
+ order_list = ["order by ?", "order by desc", "order by asc"]
565
+ if any(pattern in when.lower() for pattern in order_list):
566
+ when_ordered = True
567
+ else:
568
+ df = df[columns]
569
+ df.sort_values(by=columns[::2], inplace=True)
570
+
571
+ df.reset_index(inplace=True, drop=True)
572
+ if then_ordered:
573
+ warning = f"sh:order in {spec_uri} is ignored, no ORDER BY in query"
574
+ log.warning(warning)
575
+
576
+ # Scenario 1: expected no result but got a result
577
+ if then.empty:
578
+ message = f"Expected 0 row(s) and 0 column(s), got {df.shape[0]} row(s) and {round(df.shape[1] / 2)} column(s)"
579
+ then = create_empty_dataframe_with_columns(df)
580
+ df_diff = then.compare(df, result_names=("expected", "actual"))
581
+ else:
582
+ # Scenario 2: expected a result and got a result
583
+ message = f"Expected {then.shape[0]} row(s) and {round(then.shape[1] / 2)} column(s), " \
584
+ f"got {df.shape[0]} row(s) and {round(df.shape[1] / 2)} column(s)"
585
+ if when_ordered is True and not then_ordered:
586
+ message += ". Actual result is ordered, must:then must contain sh:order on every row."
587
+ if df.shape == then.shape and (df.columns == then.columns).all():
588
+ df_diff = then.compare(df, result_names=("expected", "actual"))
589
+ if df_diff.empty:
590
+ df_diff = df
591
+ else:
592
+ df_diff = construct_df_diff(df, then)
593
+ else:
594
+ if df.shape == then.shape and (df.columns == then.columns).all():
595
+ df_diff = then.compare(df, result_names=("expected", "actual"))
596
+ else:
597
+ df_diff = construct_df_diff(df, then)
598
+ else:
599
+
600
+ if then.empty:
601
+ # Scenario 3: expected no result, got no result
602
+ message = f"Expected 0 row(s) and 0 column(s), got 0 row(s) and 0 column(s)"
603
+ df = pandas.DataFrame()
604
+ else:
605
+ # Scenario 4: expected a result, but got an empty result
606
+ message = f"Expected {then.shape[0]} row(s) and {round(then.shape[1] / 2)} column(s), got 0 row(s) and 0 column(s)"
607
+ df = create_empty_dataframe_with_columns(then)
608
+ df_diff = then.compare(df, result_names=("expected", "actual"))
609
+
610
+ if df_diff.empty:
611
+ if warning:
612
+ return SpecPassedWithWarning(spec_uri, triple_store["type"], warning)
613
+ else:
614
+ return SpecPassed(spec_uri, triple_store["type"])
615
+ else:
616
+ log.error(message)
617
+ return SelectSpecFailure(spec_uri, triple_store["type"], df_diff, message)
618
+
619
+ except ParseException as e:
620
+ return SparqlParseFailure(spec_uri, triple_store["type"], e)
621
+ except NotImplementedError as ex:
622
+ return SpecSkipped(spec_uri, triple_store["type"], ex)
623
+
624
+
625
+ def run_construct_spec(spec_uri: URIRef,
626
+ given: Graph,
627
+ when: str,
628
+ then: Graph,
629
+ triple_store: dict,
630
+ bindings: dict = None) -> SpecResult:
631
+ log.info(f"Running construct spec {spec_uri} on {triple_store['type']}")
632
+
633
+ try:
634
+ result = execute_construct_spec(triple_store, given, when, bindings)
635
+ # result = mustrd_triple_store.execute_construct(given, when, bindings)
636
+
637
+ graph_compare = graph_comparison(then, result)
638
+ equal = isomorphic(result, then)
639
+ if equal:
640
+ return SpecPassed(spec_uri, triple_store["type"])
641
+ else:
642
+ return ConstructSpecFailure(spec_uri, triple_store["type"], graph_compare)
643
+ except ParseException as e:
644
+ return SparqlParseFailure(spec_uri, triple_store["type"], e)
645
+ except NotImplementedError as ex:
646
+ return SpecSkipped(spec_uri, triple_store["type"], ex)
647
+
648
+
649
+ def run_update_spec(spec_uri: URIRef,
650
+ given: Graph,
651
+ when: str,
652
+ then: Graph,
653
+ triple_store: dict,
654
+ bindings: dict = None) -> SpecResult:
655
+ log.info(f"Running update spec {spec_uri} on {triple_store['type']}")
656
+
657
+ try:
658
+ result = execute_update_spec(triple_store, given, when, bindings)
659
+
660
+ graph_compare = graph_comparison(then, result)
661
+ equal = isomorphic(result, then)
662
+ if equal:
663
+ return SpecPassed(spec_uri, triple_store["type"])
664
+ else:
665
+ return UpdateSpecFailure(spec_uri, triple_store["type"], graph_compare)
666
+
667
+ except ParseException as e:
668
+ return SparqlParseFailure(spec_uri, triple_store["type"], e)
669
+ except NotImplementedError as ex:
670
+ return SpecSkipped(spec_uri, triple_store["type"], ex)
671
+
672
+
673
+ def graph_comparison(expected_graph: Graph, actual_graph: Graph) -> GraphComparison:
674
+ diff = graph_diff(expected_graph, actual_graph)
675
+ in_both = diff[0]
676
+ in_expected = diff[1]
677
+ in_actual = diff[2]
678
+ in_expected_not_in_actual = (in_expected - in_actual)
679
+ in_actual_not_in_expected = (in_actual - in_expected)
680
+ return GraphComparison(in_expected_not_in_actual, in_actual_not_in_expected, in_both)
681
+
682
+
683
+ def get_then_update(spec_uri: URIRef, spec_graph: Graph) -> Graph:
684
+ then_query = f"""
685
+ prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
686
+
687
+ CONSTRUCT {{ ?s ?p ?o }}
688
+ {{
689
+ <{spec_uri}> <{MUST.then}>
690
+ a <{MUST.StatementsDataset}> ;
691
+ <{MUST.hasStatement}> [
692
+ a rdf:Statement ;
693
+ rdf:subject ?s ;
694
+ rdf:predicate ?p ;
695
+ rdf:object ?o ;
696
+ ] ; ]
697
+ }}
698
+ """
699
+ expected_results = spec_graph.query(then_query).graph
700
+
701
+ return expected_results
702
+
703
+
704
+ def calculate_row_difference(df1: pandas.DataFrame,
705
+ df2: pandas.DataFrame) -> pandas.DataFrame:
706
+ df_all = df1.merge(df2.drop_duplicates(), how='left', indicator=True)
707
+ actual_rows = df_all[df_all['_merge'] == 'left_only']
708
+ actual_rows = actual_rows.drop('_merge', axis=1)
709
+ return actual_rows
710
+
711
+
712
+ def construct_df_diff(df: pandas.DataFrame,
713
+ then: pandas.DataFrame) -> pandas.DataFrame:
714
+ actual_rows = calculate_row_difference(df, then)
715
+ expected_rows = calculate_row_difference(then, df)
716
+ actual_columns = df.columns.difference(then.columns)
717
+ expected_columns = then.columns.difference(df.columns)
718
+
719
+ df_diff = pandas.DataFrame()
720
+ modified_df = df
721
+ modified_then = then
722
+
723
+ if actual_columns.size > 0:
724
+ modified_then = modified_then.reindex(modified_then.columns.to_list() + actual_columns.to_list(), axis=1)
725
+ modified_then[actual_columns.to_list()] = modified_then[actual_columns.to_list()].fillna('')
726
+
727
+ if expected_columns.size > 0:
728
+ modified_df = modified_df.reindex(modified_df.columns.to_list() + expected_columns.to_list(), axis=1)
729
+ modified_df[expected_columns.to_list()] = modified_df[expected_columns.to_list()].fillna('')
730
+
731
+ modified_df = modified_df.reindex(modified_then.columns, axis=1)
732
+
733
+ if df.shape[0] != then.shape[0] and df.shape[1] != then.shape[1]:
734
+ # take modified columns and add rows
735
+ actual_rows = calculate_row_difference(modified_df, modified_then)
736
+ expected_rows = calculate_row_difference(modified_then, modified_df)
737
+ df_diff = generate_row_diff(actual_rows, expected_rows)
738
+ elif actual_rows.shape[0] > 0 or expected_rows.shape[0] > 0:
739
+ df_diff = generate_row_diff(actual_rows, expected_rows)
740
+ elif actual_columns.size > 0 or expected_columns.size > 0:
741
+ df_diff = modified_then.compare(modified_df, result_names=("expected", "actual"), keep_shape=True,
742
+ keep_equal=True)
743
+
744
+ return df_diff
745
+
746
+
747
+ def generate_row_diff(actual_rows: pandas.DataFrame, expected_rows: pandas.DataFrame) -> pandas.DataFrame:
748
+ df_diff_actual_rows = pandas.DataFrame()
749
+ df_diff_expected_rows = pandas.DataFrame()
750
+
751
+ if actual_rows.shape[0] > 0:
752
+ empty_actual_copy = create_empty_dataframe_with_columns(actual_rows)
753
+ df_diff_actual_rows = empty_actual_copy.compare(actual_rows, result_names=("expected", "actual"))
754
+
755
+ if expected_rows.shape[0] > 0:
756
+ empty_expected_copy = create_empty_dataframe_with_columns(expected_rows)
757
+ df_diff_expected_rows = expected_rows.compare(empty_expected_copy, result_names=("expected", "actual"))
758
+
759
+ df_diff_rows = pandas.concat([df_diff_actual_rows, df_diff_expected_rows], ignore_index=True)
760
+ return df_diff_rows
761
+
762
+
763
+ def create_empty_dataframe_with_columns(original: pandas.DataFrame) -> pandas.DataFrame:
764
+ empty_copy = original.copy()
765
+ for col in empty_copy.columns:
766
+ empty_copy[col].values[:] = None
767
+ return empty_copy
768
+
769
+
770
+ def review_results(results: List[SpecResult], verbose: bool) -> None:
771
+ print("===== Result Overview =====")
772
+ # Init dictionaries
773
+ status_dict = defaultdict(lambda: defaultdict(int))
774
+ status_counts = defaultdict(lambda: defaultdict(int))
775
+ colours = {SpecPassed: Fore.GREEN, SpecPassedWithWarning: Fore.YELLOW, SpecSkipped: Fore.YELLOW}
776
+ # Populate dictionaries from results
777
+ for result in results:
778
+ status_counts[result.triple_store][type(result)] += 1
779
+ status_dict[result.spec_uri][result.triple_store] = type(result)
780
+
781
+ # Get the list of statuses and list of unique triple stores
782
+ statuses = list(status for inner_dict in status_dict.values() for status in inner_dict.values())
783
+ triple_stores = list(set(status for inner_dict in status_dict.values() for status in inner_dict.keys()))
784
+
785
+ # Convert dictionaries to list for tabulate
786
+ table_rows = [[spec_uri] + [
787
+ f"{colours.get(status_dict[spec_uri][triple_store], Fore.RED)}{status_dict[spec_uri][triple_store].__name__}{Style.RESET_ALL}"
788
+ for triple_store in triple_stores] for spec_uri in set(status_dict.keys())]
789
+
790
+ status_rows = [[f"{colours.get(status, Fore.RED)}{status.__name__}{Style.RESET_ALL}"] +
791
+ [f"{colours.get(status, Fore.RED)}{status_counts[triple_store][status]}{Style.RESET_ALL}"
792
+ for triple_store in triple_stores] for status in set(statuses)]
793
+
794
+ # Display tables with tabulate
795
+ print(tabulate(table_rows, headers=['Spec Uris / triple stores'] + triple_stores, tablefmt="pretty"))
796
+ print(tabulate(status_rows, headers=['Status / triple stores'] + triple_stores, tablefmt="pretty"))
797
+
798
+ pass_count = statuses.count(SpecPassed)
799
+ warning_count = statuses.count(SpecPassedWithWarning)
800
+ skipped_count = statuses.count(SpecSkipped)
801
+ fail_count = len(
802
+ list(filter(lambda status: status not in [SpecPassed, SpecPassedWithWarning, SpecSkipped], statuses)))
803
+
804
+ if fail_count:
805
+ overview_colour = Fore.RED
806
+ elif warning_count or skipped_count:
807
+ overview_colour = Fore.YELLOW
808
+ else:
809
+ overview_colour = Fore.GREEN
810
+
811
+ logger_setup.flush()
812
+ print(f"{overview_colour}===== {fail_count} failures, {skipped_count} skipped, {Fore.GREEN}{pass_count} passed, "
813
+ f"{overview_colour}{warning_count} passed with warnings =====")
814
+
815
+ if verbose and (fail_count or warning_count or skipped_count):
816
+ for res in results:
817
+ if type(res) == UpdateSpecFailure:
818
+ print(f"{Fore.RED}Failed {res.spec_uri} {res.triple_store}")
819
+ print(f"{Fore.BLUE} In Expected Not In Actual:")
820
+ print(res.graph_comparison.in_expected_not_in_actual.serialize(format="ttl"))
821
+ print()
822
+ print(f"{Fore.RED} in_actual_not_in_expected")
823
+ print(res.graph_comparison.in_actual_not_in_expected.serialize(format="ttl"))
824
+ print(f"{Fore.GREEN} in_both")
825
+ print(res.graph_comparison.in_both.serialize(format="ttl"))
826
+
827
+ if type(res) == SelectSpecFailure:
828
+ print(f"{Fore.RED}Failed {res.spec_uri} {res.triple_store}")
829
+ print(res.message)
830
+ print(res.table_comparison.to_markdown())
831
+ if type(res) == ConstructSpecFailure or type(res) == UpdateSpecFailure:
832
+ print(f"{Fore.RED}Failed {res.spec_uri} {res.triple_store}")
833
+ if type(res) == SpecPassedWithWarning:
834
+ print(f"{Fore.YELLOW}Passed with warning {res.spec_uri} {res.triple_store}")
835
+ print(res.warning)
836
+ if type(res) == TripleStoreConnectionError or type(res) == SparqlExecutionError or \
837
+ type(res) == SparqlParseFailure:
838
+ print(f"{Fore.RED}Failed {res.spec_uri} {res.triple_store}")
839
+ print(res.exception)
840
+ if type(res) == SpecSkipped:
841
+ print(f"{Fore.YELLOW}Skipped {res.spec_uri} {res.triple_store}")
842
+ print(res.message)