rdf-construct 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- rdf_construct/__init__.py +12 -0
- rdf_construct/__main__.py +0 -0
- rdf_construct/cli.py +1762 -0
- rdf_construct/core/__init__.py +33 -0
- rdf_construct/core/config.py +116 -0
- rdf_construct/core/ordering.py +219 -0
- rdf_construct/core/predicate_order.py +212 -0
- rdf_construct/core/profile.py +157 -0
- rdf_construct/core/selector.py +64 -0
- rdf_construct/core/serialiser.py +232 -0
- rdf_construct/core/utils.py +89 -0
- rdf_construct/cq/__init__.py +77 -0
- rdf_construct/cq/expectations.py +365 -0
- rdf_construct/cq/formatters/__init__.py +45 -0
- rdf_construct/cq/formatters/json.py +104 -0
- rdf_construct/cq/formatters/junit.py +104 -0
- rdf_construct/cq/formatters/text.py +146 -0
- rdf_construct/cq/loader.py +300 -0
- rdf_construct/cq/runner.py +321 -0
- rdf_construct/diff/__init__.py +59 -0
- rdf_construct/diff/change_types.py +214 -0
- rdf_construct/diff/comparator.py +338 -0
- rdf_construct/diff/filters.py +133 -0
- rdf_construct/diff/formatters/__init__.py +71 -0
- rdf_construct/diff/formatters/json.py +192 -0
- rdf_construct/diff/formatters/markdown.py +210 -0
- rdf_construct/diff/formatters/text.py +195 -0
- rdf_construct/docs/__init__.py +60 -0
- rdf_construct/docs/config.py +238 -0
- rdf_construct/docs/extractors.py +603 -0
- rdf_construct/docs/generator.py +360 -0
- rdf_construct/docs/renderers/__init__.py +7 -0
- rdf_construct/docs/renderers/html.py +803 -0
- rdf_construct/docs/renderers/json.py +390 -0
- rdf_construct/docs/renderers/markdown.py +628 -0
- rdf_construct/docs/search.py +278 -0
- rdf_construct/docs/templates/html/base.html.jinja +44 -0
- rdf_construct/docs/templates/html/class.html.jinja +152 -0
- rdf_construct/docs/templates/html/hierarchy.html.jinja +28 -0
- rdf_construct/docs/templates/html/index.html.jinja +110 -0
- rdf_construct/docs/templates/html/instance.html.jinja +90 -0
- rdf_construct/docs/templates/html/namespaces.html.jinja +37 -0
- rdf_construct/docs/templates/html/property.html.jinja +124 -0
- rdf_construct/docs/templates/html/single_page.html.jinja +169 -0
- rdf_construct/lint/__init__.py +75 -0
- rdf_construct/lint/config.py +214 -0
- rdf_construct/lint/engine.py +396 -0
- rdf_construct/lint/formatters.py +327 -0
- rdf_construct/lint/rules.py +692 -0
- rdf_construct/main.py +6 -0
- rdf_construct/puml2rdf/__init__.py +103 -0
- rdf_construct/puml2rdf/config.py +230 -0
- rdf_construct/puml2rdf/converter.py +420 -0
- rdf_construct/puml2rdf/merger.py +200 -0
- rdf_construct/puml2rdf/model.py +202 -0
- rdf_construct/puml2rdf/parser.py +565 -0
- rdf_construct/puml2rdf/validators.py +451 -0
- rdf_construct/shacl/__init__.py +56 -0
- rdf_construct/shacl/config.py +166 -0
- rdf_construct/shacl/converters.py +520 -0
- rdf_construct/shacl/generator.py +364 -0
- rdf_construct/shacl/namespaces.py +93 -0
- rdf_construct/stats/__init__.py +29 -0
- rdf_construct/stats/collector.py +178 -0
- rdf_construct/stats/comparator.py +298 -0
- rdf_construct/stats/formatters/__init__.py +83 -0
- rdf_construct/stats/formatters/json.py +38 -0
- rdf_construct/stats/formatters/markdown.py +153 -0
- rdf_construct/stats/formatters/text.py +186 -0
- rdf_construct/stats/metrics/__init__.py +26 -0
- rdf_construct/stats/metrics/basic.py +147 -0
- rdf_construct/stats/metrics/complexity.py +137 -0
- rdf_construct/stats/metrics/connectivity.py +130 -0
- rdf_construct/stats/metrics/documentation.py +128 -0
- rdf_construct/stats/metrics/hierarchy.py +207 -0
- rdf_construct/stats/metrics/properties.py +88 -0
- rdf_construct/uml/__init__.py +22 -0
- rdf_construct/uml/context.py +194 -0
- rdf_construct/uml/mapper.py +371 -0
- rdf_construct/uml/odm_renderer.py +789 -0
- rdf_construct/uml/renderer.py +684 -0
- rdf_construct/uml/uml_layout.py +393 -0
- rdf_construct/uml/uml_style.py +613 -0
- rdf_construct-0.2.0.dist-info/METADATA +431 -0
- rdf_construct-0.2.0.dist-info/RECORD +88 -0
- rdf_construct-0.2.0.dist-info/WHEEL +4 -0
- rdf_construct-0.2.0.dist-info/entry_points.txt +3 -0
- rdf_construct-0.2.0.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,146 @@
|
|
|
1
|
+
"""Text output formatter for competency question test results.
|
|
2
|
+
|
|
3
|
+
Produces human-readable console output with colors using Click.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from rdf_construct.cq.runner import CQTestResults, CQTestResult, CQStatus
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def format_text(results: CQTestResults, verbose: bool = False,
|
|
10
|
+
use_color: bool = True) -> str:
|
|
11
|
+
"""Format test results as human-readable text.
|
|
12
|
+
|
|
13
|
+
Args:
|
|
14
|
+
results: Test results to format
|
|
15
|
+
verbose: Include detailed information
|
|
16
|
+
use_color: Include ANSI color codes (for terminal output)
|
|
17
|
+
|
|
18
|
+
Returns:
|
|
19
|
+
Formatted text string
|
|
20
|
+
"""
|
|
21
|
+
lines = []
|
|
22
|
+
|
|
23
|
+
# Header
|
|
24
|
+
if results.ontology_file:
|
|
25
|
+
header = f"Competency Question Tests: {results.ontology_file.name}"
|
|
26
|
+
else:
|
|
27
|
+
header = "Competency Question Tests"
|
|
28
|
+
|
|
29
|
+
lines.append(header)
|
|
30
|
+
lines.append("=" * len(header))
|
|
31
|
+
lines.append("")
|
|
32
|
+
|
|
33
|
+
# Individual test results
|
|
34
|
+
for result in results.results:
|
|
35
|
+
line = _format_result_line(result, verbose, use_color)
|
|
36
|
+
lines.append(line)
|
|
37
|
+
|
|
38
|
+
# Add failure details
|
|
39
|
+
if result.status == CQStatus.FAIL and result.check_result:
|
|
40
|
+
lines.append(_indent(f"Expected: {result.check_result.expected}"))
|
|
41
|
+
lines.append(_indent(f"Actual: {result.check_result.actual}"))
|
|
42
|
+
|
|
43
|
+
# Add error details
|
|
44
|
+
if result.status == CQStatus.ERROR and result.error:
|
|
45
|
+
lines.append(_indent(f"Error: {result.error}"))
|
|
46
|
+
|
|
47
|
+
# Add verbose details
|
|
48
|
+
if verbose and result.result_count is not None:
|
|
49
|
+
lines.append(_indent(f"Results: {result.result_count}"))
|
|
50
|
+
if verbose and result.duration_ms:
|
|
51
|
+
lines.append(_indent(f"Duration: {result.duration_ms:.1f}ms"))
|
|
52
|
+
|
|
53
|
+
lines.append("")
|
|
54
|
+
|
|
55
|
+
# Summary
|
|
56
|
+
summary = _format_summary(results, use_color)
|
|
57
|
+
lines.append(summary)
|
|
58
|
+
|
|
59
|
+
# Total duration
|
|
60
|
+
if verbose:
|
|
61
|
+
lines.append(f"Total duration: {results.total_duration_ms:.1f}ms")
|
|
62
|
+
|
|
63
|
+
return "\n".join(lines)
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
def _format_result_line(result: CQTestResult, verbose: bool,
|
|
67
|
+
use_color: bool) -> str:
|
|
68
|
+
"""Format a single test result line."""
|
|
69
|
+
status_map = {
|
|
70
|
+
CQStatus.PASS: ("PASS", "green"),
|
|
71
|
+
CQStatus.FAIL: ("FAIL", "red"),
|
|
72
|
+
CQStatus.ERROR: ("ERROR", "red"),
|
|
73
|
+
CQStatus.SKIP: ("SKIP", "yellow"),
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
status_text, color = status_map[result.status]
|
|
77
|
+
|
|
78
|
+
if use_color:
|
|
79
|
+
status = _colorise(f"[{status_text}]", color)
|
|
80
|
+
else:
|
|
81
|
+
status = f"[{status_text}]"
|
|
82
|
+
|
|
83
|
+
# Build result info
|
|
84
|
+
info_parts = []
|
|
85
|
+
if result.result_count is not None and result.status == CQStatus.PASS:
|
|
86
|
+
info_parts.append(f"{result.result_count} result(s)")
|
|
87
|
+
|
|
88
|
+
if result.status == CQStatus.SKIP and result.test.skip_reason:
|
|
89
|
+
info_parts.append(result.test.skip_reason)
|
|
90
|
+
|
|
91
|
+
info_str = f" ({', '.join(info_parts)})" if info_parts else ""
|
|
92
|
+
|
|
93
|
+
return f"{status} {result.test.id}: {result.test.name}{info_str}"
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
def _format_summary(results: CQTestResults, use_color: bool) -> str:
|
|
97
|
+
"""Format the summary line."""
|
|
98
|
+
parts = []
|
|
99
|
+
|
|
100
|
+
passed = results.passed_count
|
|
101
|
+
failed = results.failed_count
|
|
102
|
+
errors = results.error_count
|
|
103
|
+
skipped = results.skipped_count
|
|
104
|
+
|
|
105
|
+
if passed > 0:
|
|
106
|
+
text = f"{passed} passed"
|
|
107
|
+
parts.append(_colorise(text, "green") if use_color else text)
|
|
108
|
+
|
|
109
|
+
if failed > 0:
|
|
110
|
+
text = f"{failed} failed"
|
|
111
|
+
parts.append(_colorise(text, "red") if use_color else text)
|
|
112
|
+
|
|
113
|
+
if errors > 0:
|
|
114
|
+
text = f"{errors} error(s)"
|
|
115
|
+
parts.append(_colorise(text, "red") if use_color else text)
|
|
116
|
+
|
|
117
|
+
if skipped > 0:
|
|
118
|
+
text = f"{skipped} skipped"
|
|
119
|
+
parts.append(_colorise(text, "yellow") if use_color else text)
|
|
120
|
+
|
|
121
|
+
return f"Results: {', '.join(parts)}"
|
|
122
|
+
|
|
123
|
+
|
|
124
|
+
def _indent(text: str, spaces: int = 7) -> str:
|
|
125
|
+
"""Indent text for nested display."""
|
|
126
|
+
return " " * spaces + text
|
|
127
|
+
|
|
128
|
+
|
|
129
|
+
def _colorise(text: str, color: str) -> str:
|
|
130
|
+
"""Add ANSI color codes to text.
|
|
131
|
+
|
|
132
|
+
Supported colors: green, red, yellow, cyan, bold
|
|
133
|
+
"""
|
|
134
|
+
color_codes = {
|
|
135
|
+
"green": "\033[32m",
|
|
136
|
+
"red": "\033[31m",
|
|
137
|
+
"yellow": "\033[33m",
|
|
138
|
+
"cyan": "\033[36m",
|
|
139
|
+
"bold": "\033[1m",
|
|
140
|
+
"reset": "\033[0m",
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
code = color_codes.get(color, "")
|
|
144
|
+
reset = color_codes["reset"]
|
|
145
|
+
|
|
146
|
+
return f"{code}{text}{reset}"
|
|
@@ -0,0 +1,300 @@
|
|
|
1
|
+
"""YAML test file loader for competency question tests.
|
|
2
|
+
|
|
3
|
+
Parses YAML files containing competency questions with SPARQL queries
|
|
4
|
+
and their expected results.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from dataclasses import dataclass, field
|
|
8
|
+
from pathlib import Path
|
|
9
|
+
from typing import Any
|
|
10
|
+
|
|
11
|
+
import yaml
|
|
12
|
+
from rdflib import Graph
|
|
13
|
+
|
|
14
|
+
from rdf_construct.cq.expectations import Expectation, parse_expectation
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
@dataclass
|
|
18
|
+
class CQTest:
|
|
19
|
+
"""A single competency question test.
|
|
20
|
+
|
|
21
|
+
Attributes:
|
|
22
|
+
id: Unique identifier for the test (e.g., "cq-001")
|
|
23
|
+
name: Human-readable name
|
|
24
|
+
description: Optional longer description
|
|
25
|
+
tags: Tags for filtering tests (e.g., ["core", "schema"])
|
|
26
|
+
query: SPARQL query string
|
|
27
|
+
expectation: What result is expected
|
|
28
|
+
skip: Whether to skip this test
|
|
29
|
+
skip_reason: Reason for skipping (if skip is True)
|
|
30
|
+
"""
|
|
31
|
+
id: str
|
|
32
|
+
name: str
|
|
33
|
+
query: str
|
|
34
|
+
expectation: Expectation
|
|
35
|
+
description: str | None = None
|
|
36
|
+
tags: list[str] = field(default_factory=list)
|
|
37
|
+
skip: bool = False
|
|
38
|
+
skip_reason: str | None = None
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
@dataclass
|
|
42
|
+
class CQTestSuite:
|
|
43
|
+
"""A suite of competency question tests.
|
|
44
|
+
|
|
45
|
+
Attributes:
|
|
46
|
+
prefixes: Namespace prefix definitions shared across all tests
|
|
47
|
+
data_graph: Optional sample data graph merged with ontology
|
|
48
|
+
questions: List of competency question tests
|
|
49
|
+
version: Optional format version string
|
|
50
|
+
name: Optional suite name
|
|
51
|
+
description: Optional suite description
|
|
52
|
+
"""
|
|
53
|
+
prefixes: dict[str, str]
|
|
54
|
+
questions: list[CQTest]
|
|
55
|
+
data_graph: Graph | None = None
|
|
56
|
+
version: str | None = None
|
|
57
|
+
name: str | None = None
|
|
58
|
+
description: str | None = None
|
|
59
|
+
|
|
60
|
+
def filter_by_tags(self, include_tags: set[str] | None = None,
|
|
61
|
+
exclude_tags: set[str] | None = None) -> "CQTestSuite":
|
|
62
|
+
"""Return a new suite with only tests matching tag criteria.
|
|
63
|
+
|
|
64
|
+
Args:
|
|
65
|
+
include_tags: If set, only include tests with at least one of these tags
|
|
66
|
+
exclude_tags: If set, exclude tests with any of these tags
|
|
67
|
+
|
|
68
|
+
Returns:
|
|
69
|
+
New CQTestSuite with filtered questions
|
|
70
|
+
"""
|
|
71
|
+
filtered = []
|
|
72
|
+
for q in self.questions:
|
|
73
|
+
question_tags = set(q.tags)
|
|
74
|
+
|
|
75
|
+
# Check exclusions first
|
|
76
|
+
if exclude_tags and question_tags & exclude_tags:
|
|
77
|
+
continue
|
|
78
|
+
|
|
79
|
+
# Check inclusions
|
|
80
|
+
if include_tags and not (question_tags & include_tags):
|
|
81
|
+
continue
|
|
82
|
+
|
|
83
|
+
filtered.append(q)
|
|
84
|
+
|
|
85
|
+
return CQTestSuite(
|
|
86
|
+
prefixes=self.prefixes,
|
|
87
|
+
questions=filtered,
|
|
88
|
+
data_graph=self.data_graph,
|
|
89
|
+
version=self.version,
|
|
90
|
+
name=self.name,
|
|
91
|
+
description=self.description,
|
|
92
|
+
)
|
|
93
|
+
|
|
94
|
+
|
|
95
|
+
def load_test_suite(path: Path, base_dir: Path | None = None) -> CQTestSuite:
|
|
96
|
+
"""Load a competency question test suite from a YAML file.
|
|
97
|
+
|
|
98
|
+
Args:
|
|
99
|
+
path: Path to the YAML file
|
|
100
|
+
base_dir: Base directory for resolving relative file paths
|
|
101
|
+
(defaults to parent directory of the YAML file)
|
|
102
|
+
|
|
103
|
+
Returns:
|
|
104
|
+
Parsed CQTestSuite
|
|
105
|
+
|
|
106
|
+
Raises:
|
|
107
|
+
FileNotFoundError: If the YAML file doesn't exist
|
|
108
|
+
ValueError: If the YAML is malformed or invalid
|
|
109
|
+
"""
|
|
110
|
+
if not path.exists():
|
|
111
|
+
raise FileNotFoundError(f"Test suite file not found: {path}")
|
|
112
|
+
|
|
113
|
+
if base_dir is None:
|
|
114
|
+
base_dir = path.parent
|
|
115
|
+
|
|
116
|
+
with open(path, "r", encoding="utf-8") as f:
|
|
117
|
+
config = yaml.safe_load(f)
|
|
118
|
+
|
|
119
|
+
if config is None:
|
|
120
|
+
raise ValueError(f"Empty test suite file: {path}")
|
|
121
|
+
|
|
122
|
+
# Parse metadata
|
|
123
|
+
version = config.get("version")
|
|
124
|
+
name = config.get("name")
|
|
125
|
+
description = config.get("description")
|
|
126
|
+
|
|
127
|
+
# Parse prefixes
|
|
128
|
+
prefixes = config.get("prefixes", {})
|
|
129
|
+
if not isinstance(prefixes, dict):
|
|
130
|
+
raise ValueError("'prefixes' must be a dictionary")
|
|
131
|
+
|
|
132
|
+
# Parse sample data
|
|
133
|
+
data_graph = _load_data(config.get("data", {}), prefixes, base_dir)
|
|
134
|
+
|
|
135
|
+
# Parse questions
|
|
136
|
+
questions_raw = config.get("questions", [])
|
|
137
|
+
if not isinstance(questions_raw, list):
|
|
138
|
+
raise ValueError("'questions' must be a list")
|
|
139
|
+
|
|
140
|
+
questions = []
|
|
141
|
+
for i, q in enumerate(questions_raw):
|
|
142
|
+
try:
|
|
143
|
+
question = _parse_question(q, prefixes)
|
|
144
|
+
questions.append(question)
|
|
145
|
+
except Exception as e:
|
|
146
|
+
q_id = q.get("id", f"question[{i}]") if isinstance(q, dict) else f"question[{i}]"
|
|
147
|
+
raise ValueError(f"Error parsing {q_id}: {e}") from e
|
|
148
|
+
|
|
149
|
+
return CQTestSuite(
|
|
150
|
+
prefixes=prefixes,
|
|
151
|
+
questions=questions,
|
|
152
|
+
data_graph=data_graph,
|
|
153
|
+
version=version,
|
|
154
|
+
name=name,
|
|
155
|
+
description=description,
|
|
156
|
+
)
|
|
157
|
+
|
|
158
|
+
|
|
159
|
+
def _load_data(data_config: dict | None, prefixes: dict[str, str],
|
|
160
|
+
base_dir: Path) -> Graph | None:
|
|
161
|
+
"""Load sample data from configuration.
|
|
162
|
+
|
|
163
|
+
Supports:
|
|
164
|
+
- Inline Turtle data
|
|
165
|
+
- External file references
|
|
166
|
+
- Both combined
|
|
167
|
+
|
|
168
|
+
Args:
|
|
169
|
+
data_config: Data configuration dict from YAML
|
|
170
|
+
prefixes: Prefix definitions to apply to inline data
|
|
171
|
+
base_dir: Base directory for resolving file paths
|
|
172
|
+
|
|
173
|
+
Returns:
|
|
174
|
+
Combined data graph, or None if no data specified
|
|
175
|
+
"""
|
|
176
|
+
if not data_config:
|
|
177
|
+
return None
|
|
178
|
+
|
|
179
|
+
graph = Graph()
|
|
180
|
+
|
|
181
|
+
# Bind prefixes
|
|
182
|
+
for prefix, uri in prefixes.items():
|
|
183
|
+
graph.bind(prefix, uri)
|
|
184
|
+
|
|
185
|
+
# Load inline data
|
|
186
|
+
if "inline" in data_config:
|
|
187
|
+
inline = data_config["inline"]
|
|
188
|
+
if isinstance(inline, str):
|
|
189
|
+
# Build prefix declarations for parsing
|
|
190
|
+
prefix_decls = "\n".join(
|
|
191
|
+
f"@prefix {p}: <{u}> ." for p, u in prefixes.items()
|
|
192
|
+
)
|
|
193
|
+
turtle_data = f"{prefix_decls}\n\n{inline}"
|
|
194
|
+
graph.parse(data=turtle_data, format="turtle")
|
|
195
|
+
|
|
196
|
+
# Load external files
|
|
197
|
+
if "files" in data_config:
|
|
198
|
+
files = data_config["files"]
|
|
199
|
+
if isinstance(files, str):
|
|
200
|
+
files = [files]
|
|
201
|
+
|
|
202
|
+
for file_path_str in files:
|
|
203
|
+
file_path = base_dir / file_path_str
|
|
204
|
+
if not file_path.exists():
|
|
205
|
+
raise FileNotFoundError(f"Data file not found: {file_path}")
|
|
206
|
+
|
|
207
|
+
# Infer format from extension
|
|
208
|
+
fmt = _format_from_extension(file_path)
|
|
209
|
+
graph.parse(str(file_path), format=fmt)
|
|
210
|
+
|
|
211
|
+
return graph if len(graph) > 0 else None
|
|
212
|
+
|
|
213
|
+
|
|
214
|
+
def _format_from_extension(path: Path) -> str:
|
|
215
|
+
"""Infer RDF format from file extension."""
|
|
216
|
+
suffix = path.suffix.lower()
|
|
217
|
+
format_map = {
|
|
218
|
+
".ttl": "turtle",
|
|
219
|
+
".turtle": "turtle",
|
|
220
|
+
".rdf": "xml",
|
|
221
|
+
".xml": "xml",
|
|
222
|
+
".owl": "xml",
|
|
223
|
+
".nt": "nt",
|
|
224
|
+
".ntriples": "nt",
|
|
225
|
+
".n3": "n3",
|
|
226
|
+
".jsonld": "json-ld",
|
|
227
|
+
".json": "json-ld",
|
|
228
|
+
}
|
|
229
|
+
return format_map.get(suffix, "turtle")
|
|
230
|
+
|
|
231
|
+
|
|
232
|
+
def _parse_question(q: dict[str, Any], prefixes: dict[str, str]) -> CQTest:
|
|
233
|
+
"""Parse a single question from YAML config.
|
|
234
|
+
|
|
235
|
+
Args:
|
|
236
|
+
q: Question dict from YAML
|
|
237
|
+
prefixes: Prefix definitions for query injection
|
|
238
|
+
|
|
239
|
+
Returns:
|
|
240
|
+
Parsed CQTest
|
|
241
|
+
|
|
242
|
+
Raises:
|
|
243
|
+
ValueError: If required fields are missing or invalid
|
|
244
|
+
"""
|
|
245
|
+
# Required fields
|
|
246
|
+
if "id" not in q:
|
|
247
|
+
raise ValueError("Question missing required 'id' field")
|
|
248
|
+
if "query" not in q:
|
|
249
|
+
raise ValueError(f"Question '{q['id']}' missing required 'query' field")
|
|
250
|
+
if "expect" not in q:
|
|
251
|
+
raise ValueError(f"Question '{q['id']}' missing required 'expect' field")
|
|
252
|
+
|
|
253
|
+
# Parse expectation
|
|
254
|
+
expectation = parse_expectation(q["expect"])
|
|
255
|
+
|
|
256
|
+
# Handle skip
|
|
257
|
+
skip = q.get("skip", False)
|
|
258
|
+
skip_reason = q.get("skip_reason")
|
|
259
|
+
|
|
260
|
+
# Parse tags
|
|
261
|
+
tags = q.get("tags", [])
|
|
262
|
+
if isinstance(tags, str):
|
|
263
|
+
tags = [tags]
|
|
264
|
+
|
|
265
|
+
return CQTest(
|
|
266
|
+
id=q["id"],
|
|
267
|
+
name=q.get("name", q["id"]),
|
|
268
|
+
description=q.get("description"),
|
|
269
|
+
tags=tags,
|
|
270
|
+
query=q["query"],
|
|
271
|
+
expectation=expectation,
|
|
272
|
+
skip=skip,
|
|
273
|
+
skip_reason=skip_reason,
|
|
274
|
+
)
|
|
275
|
+
|
|
276
|
+
|
|
277
|
+
def build_query_with_prefixes(query: str, prefixes: dict[str, str]) -> str:
|
|
278
|
+
"""Inject prefix declarations into a SPARQL query if not present.
|
|
279
|
+
|
|
280
|
+
Args:
|
|
281
|
+
query: SPARQL query string
|
|
282
|
+
prefixes: Prefix definitions to inject
|
|
283
|
+
|
|
284
|
+
Returns:
|
|
285
|
+
Query with prefix declarations prepended
|
|
286
|
+
"""
|
|
287
|
+
# Check if query already has PREFIX declarations
|
|
288
|
+
query_upper = query.upper().strip()
|
|
289
|
+
|
|
290
|
+
# Build prefix declarations
|
|
291
|
+
prefix_lines = []
|
|
292
|
+
for prefix, uri in prefixes.items():
|
|
293
|
+
prefix_decl = f"PREFIX {prefix}: <{uri}>"
|
|
294
|
+
# Only add if not already declared
|
|
295
|
+
if prefix_decl.upper() not in query_upper:
|
|
296
|
+
prefix_lines.append(prefix_decl)
|
|
297
|
+
|
|
298
|
+
if prefix_lines:
|
|
299
|
+
return "\n".join(prefix_lines) + "\n\n" + query
|
|
300
|
+
return query
|