@mcptoolshop/accessibility-suite 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.github/workflows/ci.yml +63 -0
- package/LICENSE +21 -0
- package/README.md +37 -0
- package/docs/prov-spec/.github/workflows/ci.yml +68 -0
- package/docs/prov-spec/CHANGELOG.md +69 -0
- package/docs/prov-spec/CODE_OF_CONDUCT.md +129 -0
- package/docs/prov-spec/CONFORMANCE_LEVELS.md +223 -0
- package/docs/prov-spec/CONTRIBUTING.md +145 -0
- package/docs/prov-spec/IMPLEMENTER_CHECKLIST.md +137 -0
- package/docs/prov-spec/LICENSE +21 -0
- package/docs/prov-spec/PRESS_RELEASE.md +74 -0
- package/docs/prov-spec/README.md +182 -0
- package/docs/prov-spec/SETUP.md +135 -0
- package/docs/prov-spec/WHY.md +86 -0
- package/docs/prov-spec/examples/artifact.example.json +14 -0
- package/docs/prov-spec/examples/artifact.ref.example.json +9 -0
- package/docs/prov-spec/examples/evidence.example.json +6 -0
- package/docs/prov-spec/examples/mcp.envelope.example.json +97 -0
- package/docs/prov-spec/examples/mcp.request.example.json +28 -0
- package/docs/prov-spec/examples/prov.record.example.json +35 -0
- package/docs/prov-spec/interop/PROOF_NODE_ENGINE.md +114 -0
- package/docs/prov-spec/spec/MCP_COMPATIBILITY.md +241 -0
- package/docs/prov-spec/spec/PROV_METHODS_CATALOG.md +142 -0
- package/docs/prov-spec/spec/PROV_METHODS_SPEC.md +397 -0
- package/docs/prov-spec/spec/methods.json +213 -0
- package/docs/prov-spec/spec/schemas/artifact.ref.schema.v0.1.json +58 -0
- package/docs/prov-spec/spec/schemas/artifact.schema.v0.1.json +61 -0
- package/docs/prov-spec/spec/schemas/assist.request.schema.v0.1.json +52 -0
- package/docs/prov-spec/spec/schemas/assist.response.schema.v0.1.json +70 -0
- package/docs/prov-spec/spec/schemas/cli.error.schema.v0.1.json +78 -0
- package/docs/prov-spec/spec/schemas/evidence.schema.v0.1.json +37 -0
- package/docs/prov-spec/spec/schemas/mcp.envelope.schema.v0.1.json +141 -0
- package/docs/prov-spec/spec/schemas/mcp.request.schema.v0.1.json +79 -0
- package/docs/prov-spec/spec/schemas/methods.schema.json +93 -0
- package/docs/prov-spec/spec/schemas/prov-capabilities.schema.json +122 -0
- package/docs/prov-spec/spec/schemas/prov.record.schema.v0.1.json +133 -0
- package/docs/prov-spec/spec/vectors/adapter.wrap.envelope_v0_1/expected.json +4 -0
- package/docs/prov-spec/spec/vectors/adapter.wrap.envelope_v0_1/input.json +1 -0
- package/docs/prov-spec/spec/vectors/adapter.wrap.envelope_v0_1/negative/double_wrapped.json +14 -0
- package/docs/prov-spec/spec/vectors/adapter.wrap.envelope_v0_1/negative/wrong_schema_version.json +11 -0
- package/docs/prov-spec/spec/vectors/engine.extract.evidence.json_pointer/expected.json +24 -0
- package/docs/prov-spec/spec/vectors/engine.extract.evidence.json_pointer/input.json +8 -0
- package/docs/prov-spec/spec/vectors/integrity.digest.sha256/expected.json +7 -0
- package/docs/prov-spec/spec/vectors/integrity.digest.sha256/input.json +1 -0
- package/docs/prov-spec/spec/vectors/integrity.digest.sha256/negative/non_hex_chars.json +16 -0
- package/docs/prov-spec/spec/vectors/integrity.digest.sha256/negative/uppercase_hex.json +16 -0
- package/docs/prov-spec/spec/vectors/integrity.digest.sha256/negative/wrong_length.json +16 -0
- package/docs/prov-spec/spec/vectors/method_id_syntax/negative/hyphen_separator.json +8 -0
- package/docs/prov-spec/spec/vectors/method_id_syntax/negative/reserved_namespace.json +8 -0
- package/docs/prov-spec/spec/vectors/method_id_syntax/negative/starts_with_digit.json +8 -0
- package/docs/prov-spec/spec/vectors/method_id_syntax/negative/uppercase.json +8 -0
- package/docs/prov-spec/spec/vectors/method_id_syntax/positive/valid_ids.json +18 -0
- package/docs/prov-spec/tools/python/prov_validator.py +428 -0
- package/examples/a11y-demo-site/.github/workflows/a11y-artifacts.yml +81 -0
- package/examples/a11y-demo-site/.github/workflows/a11y.yml +34 -0
- package/examples/a11y-demo-site/CODE_OF_CONDUCT.md +129 -0
- package/examples/a11y-demo-site/CONTRIBUTING.md +83 -0
- package/examples/a11y-demo-site/LICENSE +21 -0
- package/examples/a11y-demo-site/README.md +155 -0
- package/examples/a11y-demo-site/html/contact.html +15 -0
- package/examples/a11y-demo-site/html/index.html +20 -0
- package/examples/a11y-demo-site/scripts/a11y.sh +20 -0
- package/package.json +26 -0
- package/src/a11y-assist/.github/workflows/publish.yml +52 -0
- package/src/a11y-assist/.github/workflows/test.yml +30 -0
- package/src/a11y-assist/A11Y_ASSIST_TEST_COVERAGE_REQUIREMENTS.md +104 -0
- package/src/a11y-assist/CODE_OF_CONDUCT.md +129 -0
- package/src/a11y-assist/CONTRIBUTING.md +98 -0
- package/src/a11y-assist/ENGINE.md +363 -0
- package/src/a11y-assist/LICENSE +21 -0
- package/src/a11y-assist/PRESS_RELEASE.md +71 -0
- package/src/a11y-assist/QUICKSTART.md +101 -0
- package/src/a11y-assist/README.md +192 -0
- package/src/a11y-assist/RELEASE_NOTES.md +319 -0
- package/src/a11y-assist/a11y_assist/__init__.py +3 -0
- package/src/a11y-assist/a11y_assist/cli.py +599 -0
- package/src/a11y-assist/a11y_assist/from_cli_error.py +149 -0
- package/src/a11y-assist/a11y_assist/guard.py +444 -0
- package/src/a11y-assist/a11y_assist/ingest.py +407 -0
- package/src/a11y-assist/a11y_assist/methods.py +137 -0
- package/src/a11y-assist/a11y_assist/parse_raw.py +71 -0
- package/src/a11y-assist/a11y_assist/profiles/__init__.py +29 -0
- package/src/a11y-assist/a11y_assist/profiles/cognitive_load.py +245 -0
- package/src/a11y-assist/a11y_assist/profiles/cognitive_load_render.py +86 -0
- package/src/a11y-assist/a11y_assist/profiles/dyslexia.py +144 -0
- package/src/a11y-assist/a11y_assist/profiles/dyslexia_render.py +77 -0
- package/src/a11y-assist/a11y_assist/profiles/plain_language.py +119 -0
- package/src/a11y-assist/a11y_assist/profiles/plain_language_render.py +66 -0
- package/src/a11y-assist/a11y_assist/profiles/screen_reader.py +348 -0
- package/src/a11y-assist/a11y_assist/profiles/screen_reader_render.py +89 -0
- package/src/a11y-assist/a11y_assist/render.py +95 -0
- package/src/a11y-assist/a11y_assist/schemas/assist.request.schema.v0.1.json +52 -0
- package/src/a11y-assist/a11y_assist/schemas/assist.response.schema.v0.1.json +70 -0
- package/src/a11y-assist/a11y_assist/schemas/cli.error.schema.v0.1.json +78 -0
- package/src/a11y-assist/a11y_assist/storage.py +31 -0
- package/src/a11y-assist/pyproject.toml +60 -0
- package/src/a11y-assist/tests/__init__.py +1 -0
- package/src/a11y-assist/tests/fixtures/base_inputs/cli_error_high.json +18 -0
- package/src/a11y-assist/tests/fixtures/base_inputs/cli_error_medium.json +16 -0
- package/src/a11y-assist/tests/fixtures/base_inputs/raw_text_low.txt +3 -0
- package/src/a11y-assist/tests/fixtures/cli_error_good.json +9 -0
- package/src/a11y-assist/tests/fixtures/cli_error_missing_id.json +7 -0
- package/src/a11y-assist/tests/fixtures/cli_error_string_format.json +7 -0
- package/src/a11y-assist/tests/fixtures/expected/cognitive_load_high.txt +20 -0
- package/src/a11y-assist/tests/fixtures/expected/dyslexia_high.txt +20 -0
- package/src/a11y-assist/tests/fixtures/expected/lowvision_high.txt +18 -0
- package/src/a11y-assist/tests/fixtures/expected/plain_language_high.txt +14 -0
- package/src/a11y-assist/tests/fixtures/expected/screen_reader_high.txt +19 -0
- package/src/a11y-assist/tests/fixtures/golden_screen_reader_cli_error.txt +16 -0
- package/src/a11y-assist/tests/fixtures/golden_screen_reader_raw_no_id.txt +14 -0
- package/src/a11y-assist/tests/fixtures/golden_screen_reader_raw_with_id.txt +14 -0
- package/src/a11y-assist/tests/fixtures/raw_good.txt +11 -0
- package/src/a11y-assist/tests/fixtures/raw_no_id.txt +2 -0
- package/src/a11y-assist/tests/test_cognitive_load.py +469 -0
- package/src/a11y-assist/tests/test_dyslexia.py +337 -0
- package/src/a11y-assist/tests/test_explain.py +74 -0
- package/src/a11y-assist/tests/test_golden.py +127 -0
- package/src/a11y-assist/tests/test_guard.py +819 -0
- package/src/a11y-assist/tests/test_guard_integration.py +457 -0
- package/src/a11y-assist/tests/test_ingest.py +311 -0
- package/src/a11y-assist/tests/test_methods_metadata.py +236 -0
- package/src/a11y-assist/tests/test_plain_language.py +348 -0
- package/src/a11y-assist/tests/test_render.py +117 -0
- package/src/a11y-assist/tests/test_screen_reader.py +703 -0
- package/src/a11y-assist/tests/test_storage_last.py +61 -0
- package/src/a11y-assist/tests/test_triage.py +86 -0
- package/src/a11y-ci/.github/workflows/ci.yml +43 -0
- package/src/a11y-ci/.github/workflows/test.yml +30 -0
- package/src/a11y-ci/A11Y_CI_TEST_COVERAGE_REQUIREMENTS.md +94 -0
- package/src/a11y-ci/CODE_OF_CONDUCT.md +129 -0
- package/src/a11y-ci/CONTRIBUTING.md +142 -0
- package/src/a11y-ci/LICENSE +21 -0
- package/src/a11y-ci/README.md +105 -0
- package/src/a11y-ci/a11y_ci/__init__.py +3 -0
- package/src/a11y-ci/a11y_ci/allowlist.py +83 -0
- package/src/a11y-ci/a11y_ci/cli.py +145 -0
- package/src/a11y-ci/a11y_ci/gate.py +131 -0
- package/src/a11y-ci/a11y_ci/render.py +48 -0
- package/src/a11y-ci/a11y_ci/schemas/allowlist.schema.json +24 -0
- package/src/a11y-ci/a11y_ci/scorecard.py +99 -0
- package/src/a11y-ci/npm/package.json +35 -0
- package/src/a11y-ci/pyproject.toml +64 -0
- package/src/a11y-ci/tests/__init__.py +1 -0
- package/src/a11y-ci/tests/fixtures/allowlist_expired.json +10 -0
- package/src/a11y-ci/tests/fixtures/allowlist_ok.json +10 -0
- package/src/a11y-ci/tests/fixtures/baseline_ok.json +7 -0
- package/src/a11y-ci/tests/fixtures/current_fail.json +6 -0
- package/src/a11y-ci/tests/fixtures/current_ok.json +6 -0
- package/src/a11y-ci/tests/fixtures/current_regresses.json +7 -0
- package/src/a11y-ci/tests/test_gate.py +134 -0
- package/src/a11y-evidence-engine/.github/workflows/ci.yml +53 -0
- package/src/a11y-evidence-engine/CODE_OF_CONDUCT.md +129 -0
- package/src/a11y-evidence-engine/CONTRIBUTING.md +128 -0
- package/src/a11y-evidence-engine/LICENSE +21 -0
- package/src/a11y-evidence-engine/README.md +71 -0
- package/src/a11y-evidence-engine/bin/a11y-engine.js +11 -0
- package/src/a11y-evidence-engine/fixtures/bad/button-no-name.html +30 -0
- package/src/a11y-evidence-engine/fixtures/bad/img-missing-alt.html +19 -0
- package/src/a11y-evidence-engine/fixtures/bad/input-missing-label.html +26 -0
- package/src/a11y-evidence-engine/fixtures/bad/missing-lang.html +11 -0
- package/src/a11y-evidence-engine/fixtures/good/index.html +29 -0
- package/src/a11y-evidence-engine/package-lock.json +109 -0
- package/src/a11y-evidence-engine/package.json +45 -0
- package/src/a11y-evidence-engine/src/cli.js +74 -0
- package/src/a11y-evidence-engine/src/evidence/canonicalize.js +52 -0
- package/src/a11y-evidence-engine/src/evidence/json_pointer.js +34 -0
- package/src/a11y-evidence-engine/src/evidence/prov_emit.js +153 -0
- package/src/a11y-evidence-engine/src/fswalk.js +56 -0
- package/src/a11y-evidence-engine/src/html_parse.js +117 -0
- package/src/a11y-evidence-engine/src/ids.js +53 -0
- package/src/a11y-evidence-engine/src/rules/document_missing_lang.js +50 -0
- package/src/a11y-evidence-engine/src/rules/form_control_missing_label.js +105 -0
- package/src/a11y-evidence-engine/src/rules/img_missing_alt.js +77 -0
- package/src/a11y-evidence-engine/src/rules/index.js +37 -0
- package/src/a11y-evidence-engine/src/rules/interactive_missing_name.js +129 -0
- package/src/a11y-evidence-engine/src/scan.js +128 -0
- package/src/a11y-evidence-engine/test/scan.test.js +149 -0
- package/src/a11y-evidence-engine/test/vectors.test.js +200 -0
- package/src/a11y-lint/.github/workflows/ci.yml +46 -0
- package/src/a11y-lint/.github/workflows/test.yml +34 -0
- package/src/a11y-lint/CODE_OF_CONDUCT.md +129 -0
- package/src/a11y-lint/CONTRIBUTING.md +70 -0
- package/src/a11y-lint/GOVERNANCE.md +57 -0
- package/src/a11y-lint/LICENSE +21 -0
- package/src/a11y-lint/PRESS_RELEASE.md +50 -0
- package/src/a11y-lint/README.md +276 -0
- package/src/a11y-lint/RELEASE_NOTES.md +57 -0
- package/src/a11y-lint/RELEASING.md +57 -0
- package/src/a11y-lint/a11y_lint/__init__.py +64 -0
- package/src/a11y-lint/a11y_lint/cli.py +319 -0
- package/src/a11y-lint/a11y_lint/errors.py +252 -0
- package/src/a11y-lint/a11y_lint/render.py +293 -0
- package/src/a11y-lint/a11y_lint/report_md.py +289 -0
- package/src/a11y-lint/a11y_lint/scan_cli_text.py +434 -0
- package/src/a11y-lint/a11y_lint/schemas/cli.error.schema.v0.1.json +83 -0
- package/src/a11y-lint/a11y_lint/scorecard.py +244 -0
- package/src/a11y-lint/a11y_lint/validate.py +225 -0
- package/src/a11y-lint/pyproject.toml +75 -0
- package/src/a11y-lint/tests/__init__.py +1 -0
- package/src/a11y-lint/tests/test_cli.py +200 -0
- package/src/a11y-lint/tests/test_errors.py +188 -0
- package/src/a11y-lint/tests/test_render.py +202 -0
- package/src/a11y-lint/tests/test_report_md.py +188 -0
- package/src/a11y-lint/tests/test_scan_cli_text.py +290 -0
- package/src/a11y-lint/tests/test_scorecard.py +195 -0
- package/src/a11y-lint/tests/test_validate.py +257 -0
- package/src/a11y-mcp-tools/.github/workflows/ci.yml +53 -0
- package/src/a11y-mcp-tools/CODE_OF_CONDUCT.md +129 -0
- package/src/a11y-mcp-tools/CONTRIBUTING.md +136 -0
- package/src/a11y-mcp-tools/LICENSE +21 -0
- package/src/a11y-mcp-tools/PROV_METHODS_CATALOG.md +104 -0
- package/src/a11y-mcp-tools/README.md +168 -0
- package/src/a11y-mcp-tools/bin/cli.js +452 -0
- package/src/a11y-mcp-tools/bin/server.js +244 -0
- package/src/a11y-mcp-tools/fixtures/requests/a11y.diagnose.ok.json +27 -0
- package/src/a11y-mcp-tools/fixtures/requests/a11y.evidence.ok.json +25 -0
- package/src/a11y-mcp-tools/fixtures/responses/a11y.diagnose.ok.json +139 -0
- package/src/a11y-mcp-tools/fixtures/responses/a11y.diagnose.provenance_fail.json +13 -0
- package/src/a11y-mcp-tools/fixtures/responses/a11y.evidence.ok.json +88 -0
- package/src/a11y-mcp-tools/package-lock.json +189 -0
- package/src/a11y-mcp-tools/package.json +49 -0
- package/src/a11y-mcp-tools/src/envelope.js +197 -0
- package/src/a11y-mcp-tools/src/index.js +9 -0
- package/src/a11y-mcp-tools/src/schemas/artifact.js +85 -0
- package/src/a11y-mcp-tools/src/schemas/diagnosis.schema.v0.1.json +137 -0
- package/src/a11y-mcp-tools/src/schemas/envelope.schema.v0.1.json +108 -0
- package/src/a11y-mcp-tools/src/schemas/evidence.bundle.schema.v0.1.json +129 -0
- package/src/a11y-mcp-tools/src/schemas/evidence.js +97 -0
- package/src/a11y-mcp-tools/src/schemas/index.js +11 -0
- package/src/a11y-mcp-tools/src/schemas/provenance.js +140 -0
- package/src/a11y-mcp-tools/src/schemas/tools/a11y.diagnose.request.schema.v0.1.json +77 -0
- package/src/a11y-mcp-tools/src/schemas/tools/a11y.diagnose.response.schema.v0.1.json +50 -0
- package/src/a11y-mcp-tools/src/schemas/tools/a11y.evidence.request.schema.v0.1.json +120 -0
- package/src/a11y-mcp-tools/src/schemas/tools/a11y.evidence.response.schema.v0.1.json +50 -0
- package/src/a11y-mcp-tools/src/tools/diagnose.js +597 -0
- package/src/a11y-mcp-tools/src/tools/evidence.js +481 -0
- package/src/a11y-mcp-tools/src/tools/index.js +10 -0
- package/src/a11y-mcp-tools/test/contract.test.mjs +154 -0
- package/src/a11y-mcp-tools/test/diagnose.test.js +485 -0
- package/src/a11y-mcp-tools/test/evidence.test.js +183 -0
- package/src/a11y-mcp-tools/test/schema.test.js +327 -0
|
@@ -0,0 +1,407 @@
|
|
|
1
|
+
"""Ingest command for a11y-evidence-engine findings.
|
|
2
|
+
|
|
3
|
+
Takes findings.json from a11y-evidence-engine and produces:
|
|
4
|
+
- ingest-summary.json: Normalized stats and grouping
|
|
5
|
+
- advisories.json: Fix-oriented tasks with evidence links
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from __future__ import annotations
|
|
9
|
+
|
|
10
|
+
import hashlib
|
|
11
|
+
import json
|
|
12
|
+
from dataclasses import dataclass, field
|
|
13
|
+
from datetime import datetime, timezone
|
|
14
|
+
from pathlib import Path
|
|
15
|
+
from typing import Any, Dict, List, Optional, Tuple
|
|
16
|
+
|
|
17
|
+
from . import __version__
|
|
18
|
+
|
|
19
|
+
# Default fix guidance per rule
|
|
20
|
+
DEFAULT_GUIDANCE: Dict[str, Tuple[str, str]] = {
|
|
21
|
+
"html.document.missing_lang": (
|
|
22
|
+
"Add language attribute to document",
|
|
23
|
+
'Add lang="en" (or correct locale) to the <html> element.',
|
|
24
|
+
),
|
|
25
|
+
"html.img.missing_alt": (
|
|
26
|
+
"Add alt text to images",
|
|
27
|
+
'Add a meaningful alt attribute, or mark decorative images with alt="" and role="presentation".',
|
|
28
|
+
),
|
|
29
|
+
"html.form_control.missing_label": (
|
|
30
|
+
"Associate labels with form controls",
|
|
31
|
+
"Add <label for> association, or use aria-label/aria-labelledby.",
|
|
32
|
+
),
|
|
33
|
+
"html.interactive.missing_name": (
|
|
34
|
+
"Add accessible names to interactive elements",
|
|
35
|
+
"Ensure text content, aria-label, aria-labelledby, or title attribute is present.",
|
|
36
|
+
),
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
@dataclass
|
|
41
|
+
class IngestResult:
|
|
42
|
+
"""Result of ingesting findings."""
|
|
43
|
+
|
|
44
|
+
source_engine: str
|
|
45
|
+
source_version: str
|
|
46
|
+
ingested_at: str
|
|
47
|
+
target: Dict[str, Any]
|
|
48
|
+
summary: Dict[str, int]
|
|
49
|
+
by_rule: List[Dict[str, Any]]
|
|
50
|
+
top_files: List[Dict[str, Any]]
|
|
51
|
+
findings: List[Dict[str, Any]]
|
|
52
|
+
provenance_verified: bool = False
|
|
53
|
+
provenance_errors: List[str] = field(default_factory=list)
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
def load_findings(findings_path: Path) -> Dict[str, Any]:
|
|
57
|
+
"""Load and validate findings.json structure."""
|
|
58
|
+
if not findings_path.exists():
|
|
59
|
+
raise IngestError(f"Findings file not found: {findings_path}")
|
|
60
|
+
|
|
61
|
+
try:
|
|
62
|
+
with open(findings_path, "r", encoding="utf-8") as f:
|
|
63
|
+
data = json.load(f)
|
|
64
|
+
except json.JSONDecodeError as e:
|
|
65
|
+
raise IngestError(f"Invalid JSON in findings file: {e}")
|
|
66
|
+
|
|
67
|
+
# Basic validation
|
|
68
|
+
required = ["engine", "version", "summary", "findings"]
|
|
69
|
+
missing = [k for k in required if k not in data]
|
|
70
|
+
if missing:
|
|
71
|
+
raise IngestError(f"Missing required fields: {missing}")
|
|
72
|
+
|
|
73
|
+
if not isinstance(data["findings"], list):
|
|
74
|
+
raise IngestError("'findings' must be an array")
|
|
75
|
+
|
|
76
|
+
return data
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
def verify_provenance(
|
|
80
|
+
finding: Dict[str, Any], base_dir: Path
|
|
81
|
+
) -> Tuple[bool, Optional[str]]:
|
|
82
|
+
"""Verify provenance for a single finding.
|
|
83
|
+
|
|
84
|
+
Returns (success, error_message).
|
|
85
|
+
"""
|
|
86
|
+
evidence_ref = finding.get("evidence_ref")
|
|
87
|
+
if not evidence_ref:
|
|
88
|
+
return False, f"{finding.get('finding_id', 'unknown')}: Missing evidence_ref"
|
|
89
|
+
|
|
90
|
+
# Check all files exist
|
|
91
|
+
for key in ["record", "digest", "envelope"]:
|
|
92
|
+
ref_path = evidence_ref.get(key)
|
|
93
|
+
if not ref_path:
|
|
94
|
+
return False, f"{finding.get('finding_id')}: Missing {key} reference"
|
|
95
|
+
|
|
96
|
+
full_path = base_dir / ref_path
|
|
97
|
+
if not full_path.exists():
|
|
98
|
+
return False, f"{finding.get('finding_id')}: File not found: {ref_path}"
|
|
99
|
+
|
|
100
|
+
# Verify digest matches canonical evidence
|
|
101
|
+
try:
|
|
102
|
+
record_path = base_dir / evidence_ref["record"]
|
|
103
|
+
digest_path = base_dir / evidence_ref["digest"]
|
|
104
|
+
|
|
105
|
+
with open(record_path, "r", encoding="utf-8") as f:
|
|
106
|
+
record = json.load(f)
|
|
107
|
+
with open(digest_path, "r", encoding="utf-8") as f:
|
|
108
|
+
digest_record = json.load(f)
|
|
109
|
+
|
|
110
|
+
# Extract evidence from record
|
|
111
|
+
prov = record.get("prov.record.v0.1", {})
|
|
112
|
+
outputs = prov.get("outputs", [])
|
|
113
|
+
if not outputs:
|
|
114
|
+
return False, f"{finding.get('finding_id')}: No outputs in record"
|
|
115
|
+
|
|
116
|
+
evidence = outputs[0].get("artifact.v0.1", {}).get("content")
|
|
117
|
+
if evidence is None:
|
|
118
|
+
return False, f"{finding.get('finding_id')}: No evidence content in record"
|
|
119
|
+
|
|
120
|
+
# Extract expected digest
|
|
121
|
+
digest_prov = digest_record.get("prov.record.v0.1", {})
|
|
122
|
+
digest_outputs = digest_prov.get("outputs", [])
|
|
123
|
+
if not digest_outputs:
|
|
124
|
+
return False, f"{finding.get('finding_id')}: No outputs in digest record"
|
|
125
|
+
|
|
126
|
+
expected_digest = (
|
|
127
|
+
digest_outputs[0].get("artifact.v0.1", {}).get("digest", {}).get("value")
|
|
128
|
+
)
|
|
129
|
+
if not expected_digest:
|
|
130
|
+
return False, f"{finding.get('finding_id')}: No digest value found"
|
|
131
|
+
|
|
132
|
+
# Compute actual digest using canonical JSON
|
|
133
|
+
canonical = canonicalize(evidence)
|
|
134
|
+
actual_digest = hashlib.sha256(canonical.encode("utf-8")).hexdigest()
|
|
135
|
+
|
|
136
|
+
if actual_digest != expected_digest:
|
|
137
|
+
return (
|
|
138
|
+
False,
|
|
139
|
+
f"{finding.get('finding_id')}: Digest mismatch (expected {expected_digest[:16]}..., got {actual_digest[:16]}...)",
|
|
140
|
+
)
|
|
141
|
+
|
|
142
|
+
return True, None
|
|
143
|
+
|
|
144
|
+
except (json.JSONDecodeError, KeyError, TypeError) as e:
|
|
145
|
+
return False, f"{finding.get('finding_id')}: Error verifying provenance: {e}"
|
|
146
|
+
|
|
147
|
+
|
|
148
|
+
def canonicalize(value: Any) -> str:
|
|
149
|
+
"""Canonicalize JSON per prov-spec (sorted keys, no whitespace)."""
|
|
150
|
+
if value is None:
|
|
151
|
+
return "null"
|
|
152
|
+
|
|
153
|
+
if isinstance(value, bool):
|
|
154
|
+
return "true" if value else "false"
|
|
155
|
+
|
|
156
|
+
if isinstance(value, str):
|
|
157
|
+
return json.dumps(value)
|
|
158
|
+
|
|
159
|
+
if isinstance(value, (int, float)):
|
|
160
|
+
if not (isinstance(value, bool)) and not (
|
|
161
|
+
isinstance(value, float) and (value != value or abs(value) == float("inf"))
|
|
162
|
+
):
|
|
163
|
+
return json.dumps(value)
|
|
164
|
+
raise ValueError("Non-finite numbers not allowed")
|
|
165
|
+
|
|
166
|
+
if isinstance(value, list):
|
|
167
|
+
items = [canonicalize(item) for item in value]
|
|
168
|
+
return "[" + ",".join(items) + "]"
|
|
169
|
+
|
|
170
|
+
if isinstance(value, dict):
|
|
171
|
+
keys = sorted(value.keys())
|
|
172
|
+
pairs = [json.dumps(k) + ":" + canonicalize(value[k]) for k in keys]
|
|
173
|
+
return "{" + ",".join(pairs) + "}"
|
|
174
|
+
|
|
175
|
+
raise ValueError(f"Non-JSON value type: {type(value)}")
|
|
176
|
+
|
|
177
|
+
|
|
178
|
+
def group_by_rule(findings: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
|
179
|
+
"""Group findings by rule_id with counts."""
|
|
180
|
+
counts: Dict[str, Dict[str, Any]] = {}
|
|
181
|
+
|
|
182
|
+
for finding in findings:
|
|
183
|
+
rule_id = finding.get("rule_id", "unknown")
|
|
184
|
+
severity = finding.get("severity", "info")
|
|
185
|
+
|
|
186
|
+
if rule_id not in counts:
|
|
187
|
+
counts[rule_id] = {"rule_id": rule_id, "severity": severity, "count": 0}
|
|
188
|
+
counts[rule_id]["count"] += 1
|
|
189
|
+
|
|
190
|
+
# Sort by count descending, then rule_id
|
|
191
|
+
return sorted(counts.values(), key=lambda x: (-x["count"], x["rule_id"]))
|
|
192
|
+
|
|
193
|
+
|
|
194
|
+
def group_by_file(findings: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
|
195
|
+
"""Group findings by file with severity counts."""
|
|
196
|
+
file_counts: Dict[str, Dict[str, int]] = {}
|
|
197
|
+
|
|
198
|
+
for finding in findings:
|
|
199
|
+
file_path = finding.get("location", {}).get("file", "unknown")
|
|
200
|
+
severity = finding.get("severity", "info")
|
|
201
|
+
|
|
202
|
+
if file_path not in file_counts:
|
|
203
|
+
file_counts[file_path] = {"errors": 0, "warnings": 0, "info": 0}
|
|
204
|
+
|
|
205
|
+
if severity == "error":
|
|
206
|
+
file_counts[file_path]["errors"] += 1
|
|
207
|
+
elif severity == "warning":
|
|
208
|
+
file_counts[file_path]["warnings"] += 1
|
|
209
|
+
else:
|
|
210
|
+
file_counts[file_path]["info"] += 1
|
|
211
|
+
|
|
212
|
+
# Build result sorted by errors desc, then file name
|
|
213
|
+
result = [
|
|
214
|
+
{"file": f, **counts}
|
|
215
|
+
for f, counts in sorted(
|
|
216
|
+
file_counts.items(), key=lambda x: (-x[1]["errors"], x[0])
|
|
217
|
+
)
|
|
218
|
+
]
|
|
219
|
+
|
|
220
|
+
return result[:10] # Top 10 files
|
|
221
|
+
|
|
222
|
+
|
|
223
|
+
def build_advisories(findings: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
|
224
|
+
"""Build advisories grouped by rule with fix guidance."""
|
|
225
|
+
by_rule: Dict[str, List[Dict[str, Any]]] = {}
|
|
226
|
+
|
|
227
|
+
for finding in findings:
|
|
228
|
+
rule_id = finding.get("rule_id", "unknown")
|
|
229
|
+
if rule_id not in by_rule:
|
|
230
|
+
by_rule[rule_id] = []
|
|
231
|
+
by_rule[rule_id].append(finding)
|
|
232
|
+
|
|
233
|
+
advisories = []
|
|
234
|
+
adv_num = 1
|
|
235
|
+
|
|
236
|
+
# Sort rules by count descending for priority
|
|
237
|
+
for rule_id in sorted(by_rule.keys(), key=lambda r: -len(by_rule[r])):
|
|
238
|
+
instances = by_rule[rule_id]
|
|
239
|
+
first = instances[0]
|
|
240
|
+
|
|
241
|
+
title, fix = DEFAULT_GUIDANCE.get(
|
|
242
|
+
rule_id, (f"Fix {rule_id}", "Review the accessibility issue and apply appropriate fix.")
|
|
243
|
+
)
|
|
244
|
+
|
|
245
|
+
advisory = {
|
|
246
|
+
"advisory_id": f"adv-{adv_num:04d}",
|
|
247
|
+
"rule_id": rule_id,
|
|
248
|
+
"severity": first.get("severity", "error"),
|
|
249
|
+
"confidence": first.get("confidence", 0.9),
|
|
250
|
+
"title": title,
|
|
251
|
+
"recommended_fix": fix,
|
|
252
|
+
"instances": [
|
|
253
|
+
{
|
|
254
|
+
"finding_id": inst.get("finding_id"),
|
|
255
|
+
"location": inst.get("location"),
|
|
256
|
+
"evidence_ref": inst.get("evidence_ref"),
|
|
257
|
+
}
|
|
258
|
+
for inst in instances
|
|
259
|
+
],
|
|
260
|
+
}
|
|
261
|
+
advisories.append(advisory)
|
|
262
|
+
adv_num += 1
|
|
263
|
+
|
|
264
|
+
return advisories
|
|
265
|
+
|
|
266
|
+
|
|
267
|
+
def ingest(
|
|
268
|
+
findings_path: Path,
|
|
269
|
+
verify_provenance_flag: bool = False,
|
|
270
|
+
min_severity: str = "info",
|
|
271
|
+
) -> IngestResult:
|
|
272
|
+
"""Ingest findings from a11y-evidence-engine.
|
|
273
|
+
|
|
274
|
+
Args:
|
|
275
|
+
findings_path: Path to findings.json
|
|
276
|
+
verify_provenance_flag: If True, verify all provenance bundles
|
|
277
|
+
min_severity: Minimum severity to include (info, warning, error)
|
|
278
|
+
|
|
279
|
+
Returns:
|
|
280
|
+
IngestResult with summary and advisories
|
|
281
|
+
"""
|
|
282
|
+
data = load_findings(findings_path)
|
|
283
|
+
base_dir = findings_path.parent
|
|
284
|
+
|
|
285
|
+
# Filter by severity
|
|
286
|
+
severity_order = {"info": 0, "warning": 1, "error": 2}
|
|
287
|
+
min_level = severity_order.get(min_severity, 0)
|
|
288
|
+
|
|
289
|
+
filtered_findings = [
|
|
290
|
+
f
|
|
291
|
+
for f in data["findings"]
|
|
292
|
+
if severity_order.get(f.get("severity", "info"), 0) >= min_level
|
|
293
|
+
]
|
|
294
|
+
|
|
295
|
+
# Verify provenance if requested
|
|
296
|
+
prov_errors: List[str] = []
|
|
297
|
+
prov_verified = False
|
|
298
|
+
|
|
299
|
+
if verify_provenance_flag:
|
|
300
|
+
prov_verified = True
|
|
301
|
+
for finding in filtered_findings:
|
|
302
|
+
success, error = verify_provenance(finding, base_dir)
|
|
303
|
+
if not success and error:
|
|
304
|
+
prov_errors.append(error)
|
|
305
|
+
prov_verified = False
|
|
306
|
+
|
|
307
|
+
# Build result
|
|
308
|
+
return IngestResult(
|
|
309
|
+
source_engine=data.get("engine", "unknown"),
|
|
310
|
+
source_version=data.get("version", "unknown"),
|
|
311
|
+
ingested_at=datetime.now(timezone.utc).isoformat(),
|
|
312
|
+
target=data.get("target", {}),
|
|
313
|
+
summary=data.get("summary", {}),
|
|
314
|
+
by_rule=group_by_rule(filtered_findings),
|
|
315
|
+
top_files=group_by_file(filtered_findings),
|
|
316
|
+
findings=filtered_findings,
|
|
317
|
+
provenance_verified=prov_verified,
|
|
318
|
+
provenance_errors=prov_errors,
|
|
319
|
+
)
|
|
320
|
+
|
|
321
|
+
|
|
322
|
+
def write_ingest_summary(result: IngestResult, out_path: Path) -> None:
|
|
323
|
+
"""Write ingest-summary.json."""
|
|
324
|
+
summary = {
|
|
325
|
+
"source_engine": result.source_engine,
|
|
326
|
+
"source_version": result.source_version,
|
|
327
|
+
"ingested_at": result.ingested_at,
|
|
328
|
+
"target": result.target,
|
|
329
|
+
"summary": result.summary,
|
|
330
|
+
"by_rule": result.by_rule,
|
|
331
|
+
"top_files": result.top_files,
|
|
332
|
+
}
|
|
333
|
+
|
|
334
|
+
if result.provenance_verified:
|
|
335
|
+
summary["provenance_verified"] = True
|
|
336
|
+
elif result.provenance_errors:
|
|
337
|
+
summary["provenance_verified"] = False
|
|
338
|
+
summary["provenance_errors"] = result.provenance_errors
|
|
339
|
+
|
|
340
|
+
out_path.parent.mkdir(parents=True, exist_ok=True)
|
|
341
|
+
with open(out_path, "w", encoding="utf-8") as f:
|
|
342
|
+
json.dump(summary, f, indent=2)
|
|
343
|
+
|
|
344
|
+
|
|
345
|
+
def write_advisories(result: IngestResult, out_path: Path) -> None:
|
|
346
|
+
"""Write advisories.json."""
|
|
347
|
+
advisories = build_advisories(result.findings)
|
|
348
|
+
|
|
349
|
+
output = {
|
|
350
|
+
"schema": "a11y-assist/advisories@v0.1",
|
|
351
|
+
"generated_by": {
|
|
352
|
+
"tool": "a11y-assist",
|
|
353
|
+
"command": "ingest",
|
|
354
|
+
"version": __version__,
|
|
355
|
+
},
|
|
356
|
+
"advisories": advisories,
|
|
357
|
+
}
|
|
358
|
+
|
|
359
|
+
out_path.parent.mkdir(parents=True, exist_ok=True)
|
|
360
|
+
with open(out_path, "w", encoding="utf-8") as f:
|
|
361
|
+
json.dump(output, f, indent=2)
|
|
362
|
+
|
|
363
|
+
|
|
364
|
+
def render_text_summary(result: IngestResult) -> str:
|
|
365
|
+
"""Render a human-readable summary."""
|
|
366
|
+
lines = []
|
|
367
|
+
lines.append(f"Source: {result.source_engine} v{result.source_version}")
|
|
368
|
+
lines.append(f"Target: {result.target.get('path', 'unknown')}")
|
|
369
|
+
lines.append("")
|
|
370
|
+
|
|
371
|
+
s = result.summary
|
|
372
|
+
lines.append(
|
|
373
|
+
f"Files scanned: {s.get('files_scanned', 0)} "
|
|
374
|
+
f"Errors: {s.get('errors', 0)} "
|
|
375
|
+
f"Warnings: {s.get('warnings', 0)} "
|
|
376
|
+
f"Info: {s.get('info', 0)}"
|
|
377
|
+
)
|
|
378
|
+
lines.append("")
|
|
379
|
+
|
|
380
|
+
if result.by_rule:
|
|
381
|
+
lines.append("By rule:")
|
|
382
|
+
for rule in result.by_rule[:5]:
|
|
383
|
+
lines.append(
|
|
384
|
+
f" {rule['rule_id']}: {rule['count']} ({rule['severity']})"
|
|
385
|
+
)
|
|
386
|
+
lines.append("")
|
|
387
|
+
|
|
388
|
+
if result.top_files:
|
|
389
|
+
lines.append("Top files:")
|
|
390
|
+
for f in result.top_files[:5]:
|
|
391
|
+
lines.append(f" {f['file']}: {f['errors']} errors, {f['warnings']} warnings")
|
|
392
|
+
lines.append("")
|
|
393
|
+
|
|
394
|
+
if result.provenance_verified:
|
|
395
|
+
lines.append("Provenance: VERIFIED")
|
|
396
|
+
elif result.provenance_errors:
|
|
397
|
+
lines.append(f"Provenance: FAILED ({len(result.provenance_errors)} errors)")
|
|
398
|
+
for err in result.provenance_errors[:3]:
|
|
399
|
+
lines.append(f" - {err}")
|
|
400
|
+
|
|
401
|
+
return "\n".join(lines)
|
|
402
|
+
|
|
403
|
+
|
|
404
|
+
class IngestError(Exception):
|
|
405
|
+
"""Error during ingest."""
|
|
406
|
+
|
|
407
|
+
pass
|
|
@@ -0,0 +1,137 @@
|
|
|
1
|
+
"""Methods metadata helpers for audit traceability.
|
|
2
|
+
|
|
3
|
+
Provides utilities for adding method IDs and evidence anchors
|
|
4
|
+
to AssistResult without modifying core behavior.
|
|
5
|
+
|
|
6
|
+
These are audit-only and do not affect rendering output.
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
from __future__ import annotations
|
|
10
|
+
|
|
11
|
+
from dataclasses import replace
|
|
12
|
+
from typing import List, Sequence
|
|
13
|
+
|
|
14
|
+
from .render import AssistResult, Evidence
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
# =============================================================================
|
|
18
|
+
# Method ID Constants
|
|
19
|
+
# =============================================================================
|
|
20
|
+
|
|
21
|
+
# Engine normalization methods
|
|
22
|
+
METHOD_NORMALIZE_CLI_ERROR = "engine.normalize.from_cli_error_v0_1"
|
|
23
|
+
METHOD_NORMALIZE_RAW_TEXT = "engine.normalize.from_raw_text"
|
|
24
|
+
|
|
25
|
+
# Profile methods
|
|
26
|
+
METHOD_PROFILE_LOWVISION = "profile.lowvision.apply"
|
|
27
|
+
METHOD_PROFILE_COGNITIVE_LOAD = "profile.cognitive_load.apply"
|
|
28
|
+
METHOD_PROFILE_SCREEN_READER = "profile.screen_reader.apply"
|
|
29
|
+
METHOD_PROFILE_DYSLEXIA = "profile.dyslexia.apply"
|
|
30
|
+
METHOD_PROFILE_PLAIN_LANGUAGE = "profile.plain_language.apply"
|
|
31
|
+
|
|
32
|
+
# Guard methods (coarse)
|
|
33
|
+
METHOD_GUARD_VALIDATE = "guard.validate_profile_transform"
|
|
34
|
+
|
|
35
|
+
# Guard methods (fine-grained)
|
|
36
|
+
METHOD_GUARD_ID_NO_INVENTION = "guard.id.no_invention"
|
|
37
|
+
METHOD_GUARD_CONFIDENCE_NO_INCREASE = "guard.confidence.no_increase"
|
|
38
|
+
METHOD_GUARD_COMMANDS_SAFE_ONLY = "guard.commands.safe_only"
|
|
39
|
+
METHOD_GUARD_PLAN_MAX_STEPS = "guard.plan.max_steps"
|
|
40
|
+
METHOD_GUARD_CONTENT_SUPPORT = "guard.content.support_heuristic"
|
|
41
|
+
METHOD_GUARD_NO_PARENTHETICALS = "guard.screen_reader.no_parentheticals"
|
|
42
|
+
METHOD_GUARD_NO_VISUAL_REFS = "guard.screen_reader.no_visual_refs"
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
# =============================================================================
|
|
46
|
+
# Helper Functions
|
|
47
|
+
# =============================================================================
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
def with_methods(result: AssistResult, methods: Sequence[str]) -> AssistResult:
|
|
51
|
+
"""Add method IDs to an AssistResult (deduplicating).
|
|
52
|
+
|
|
53
|
+
Args:
|
|
54
|
+
result: The AssistResult to update
|
|
55
|
+
methods: Method IDs to add
|
|
56
|
+
|
|
57
|
+
Returns:
|
|
58
|
+
New AssistResult with methods added
|
|
59
|
+
"""
|
|
60
|
+
current = list(result.methods_applied)
|
|
61
|
+
for m in methods:
|
|
62
|
+
if m not in current:
|
|
63
|
+
current.append(m)
|
|
64
|
+
return replace(result, methods_applied=tuple(current))
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
def with_evidence(result: AssistResult, evidence: Sequence[Evidence]) -> AssistResult:
|
|
68
|
+
"""Add evidence anchors to an AssistResult.
|
|
69
|
+
|
|
70
|
+
Args:
|
|
71
|
+
result: The AssistResult to update
|
|
72
|
+
evidence: Evidence anchors to add
|
|
73
|
+
|
|
74
|
+
Returns:
|
|
75
|
+
New AssistResult with evidence added
|
|
76
|
+
"""
|
|
77
|
+
current = list(result.evidence)
|
|
78
|
+
current.extend(evidence)
|
|
79
|
+
return replace(result, evidence=tuple(current))
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
def with_method(result: AssistResult, method: str) -> AssistResult:
|
|
83
|
+
"""Add a single method ID to an AssistResult.
|
|
84
|
+
|
|
85
|
+
Args:
|
|
86
|
+
result: The AssistResult to update
|
|
87
|
+
method: Method ID to add
|
|
88
|
+
|
|
89
|
+
Returns:
|
|
90
|
+
New AssistResult with method added
|
|
91
|
+
"""
|
|
92
|
+
return with_methods(result, [method])
|
|
93
|
+
|
|
94
|
+
|
|
95
|
+
def evidence_for_plan(
|
|
96
|
+
plan: List[str],
|
|
97
|
+
source_prefix: str = "cli.error.fix",
|
|
98
|
+
) -> List[Evidence]:
|
|
99
|
+
"""Generate evidence anchors for plan steps.
|
|
100
|
+
|
|
101
|
+
Args:
|
|
102
|
+
plan: List of plan steps
|
|
103
|
+
source_prefix: Source path prefix (e.g., "cli.error.fix")
|
|
104
|
+
|
|
105
|
+
Returns:
|
|
106
|
+
List of Evidence objects mapping plan[i] to source[i]
|
|
107
|
+
"""
|
|
108
|
+
return [
|
|
109
|
+
Evidence(field=f"plan[{i}]", source=f"{source_prefix}[{i}]")
|
|
110
|
+
for i in range(len(plan))
|
|
111
|
+
]
|
|
112
|
+
|
|
113
|
+
|
|
114
|
+
def evidence_for_commands(
|
|
115
|
+
commands: List[str],
|
|
116
|
+
source_indices: List[int],
|
|
117
|
+
source_prefix: str = "cli.error.fix",
|
|
118
|
+
) -> List[Evidence]:
|
|
119
|
+
"""Generate evidence anchors for safe commands.
|
|
120
|
+
|
|
121
|
+
Args:
|
|
122
|
+
commands: List of safe commands
|
|
123
|
+
source_indices: Index of each command in the source
|
|
124
|
+
source_prefix: Source path prefix
|
|
125
|
+
|
|
126
|
+
Returns:
|
|
127
|
+
List of Evidence objects
|
|
128
|
+
"""
|
|
129
|
+
result = []
|
|
130
|
+
for i, idx in enumerate(source_indices):
|
|
131
|
+
result.append(
|
|
132
|
+
Evidence(
|
|
133
|
+
field=f"next_safe_commands[{i}]",
|
|
134
|
+
source=f"{source_prefix}[{idx}]",
|
|
135
|
+
)
|
|
136
|
+
)
|
|
137
|
+
return result
|
|
@@ -0,0 +1,71 @@
|
|
|
1
|
+
"""Best-effort parser for raw CLI output.
|
|
2
|
+
|
|
3
|
+
Never invents an ID. Confidence is Low/Medium only.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from __future__ import annotations
|
|
7
|
+
|
|
8
|
+
import re
|
|
9
|
+
from typing import Dict, List, Optional, Tuple
|
|
10
|
+
|
|
11
|
+
# Match [OK]/[WARN]/[ERROR] with optional (ID: ...)
|
|
12
|
+
STATUS_RE = re.compile(r"^\[(OK|WARN|ERROR)\]\s+(.+?)\s*(\((ID:\s*.+)\))?\s*$")
|
|
13
|
+
|
|
14
|
+
# Match (ID: NAMESPACE.CATEGORY.DETAIL) anywhere in text
|
|
15
|
+
ID_IN_PARENS_RE = re.compile(r"\(ID:\s*([A-Z][A-Z0-9]*(?:\.[A-Z0-9]+)+)\)")
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def extract_id(text: str) -> Optional[str]:
|
|
19
|
+
"""Extract an error ID from text if present."""
|
|
20
|
+
m = ID_IN_PARENS_RE.search(text)
|
|
21
|
+
if not m:
|
|
22
|
+
return None
|
|
23
|
+
return m.group(1)
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def extract_blocks(lines: List[str]) -> Dict[str, List[str]]:
|
|
27
|
+
"""Extract What:/Why:/Fix: blocks from lines."""
|
|
28
|
+
blocks: Dict[str, List[str]] = {"What:": [], "Why:": [], "Fix:": []}
|
|
29
|
+
current: Optional[str] = None
|
|
30
|
+
|
|
31
|
+
for line in lines:
|
|
32
|
+
s = line.rstrip("\n")
|
|
33
|
+
stripped = s.strip()
|
|
34
|
+
|
|
35
|
+
# Check if this is a block header
|
|
36
|
+
if stripped in blocks:
|
|
37
|
+
current = stripped
|
|
38
|
+
continue
|
|
39
|
+
|
|
40
|
+
# If we're in a block and line is indented, add it
|
|
41
|
+
if current and s.startswith(" "):
|
|
42
|
+
blocks[current].append(stripped)
|
|
43
|
+
elif current and stripped == "":
|
|
44
|
+
# Allow blank lines inside blocks
|
|
45
|
+
continue
|
|
46
|
+
else:
|
|
47
|
+
# Non-indented lines end the current block
|
|
48
|
+
current = None
|
|
49
|
+
|
|
50
|
+
return blocks
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
def parse_raw(text: str) -> Tuple[Optional[str], str, Dict[str, List[str]]]:
|
|
54
|
+
"""Parse raw CLI output.
|
|
55
|
+
|
|
56
|
+
Returns:
|
|
57
|
+
(error_id or None, status string, blocks dict)
|
|
58
|
+
"""
|
|
59
|
+
lines = text.splitlines()
|
|
60
|
+
status = "UNKNOWN"
|
|
61
|
+
|
|
62
|
+
if lines:
|
|
63
|
+
first_line = lines[0].strip()
|
|
64
|
+
m = STATUS_RE.match(first_line)
|
|
65
|
+
if m:
|
|
66
|
+
status = m.group(1)
|
|
67
|
+
|
|
68
|
+
err_id = extract_id(text)
|
|
69
|
+
blocks = extract_blocks(lines)
|
|
70
|
+
|
|
71
|
+
return err_id, status, blocks
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
"""Accessibility profiles for a11y-assist.
|
|
2
|
+
|
|
3
|
+
Profiles transform AssistResult for different accessibility needs:
|
|
4
|
+
- lowvision: Default profile, clear labels and spacing
|
|
5
|
+
- cognitive-load: Reduced steps, simplified language, strict limits
|
|
6
|
+
- screen-reader: TTS-optimized, expanded abbreviations, no visual references
|
|
7
|
+
- dyslexia: Reduced reading friction, explicit labels, no symbolic emphasis
|
|
8
|
+
- plain-language: Maximum clarity, one clause per sentence, simple structure
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
from .cognitive_load import apply_cognitive_load
|
|
12
|
+
from .cognitive_load_render import render_cognitive_load
|
|
13
|
+
from .dyslexia import apply_dyslexia
|
|
14
|
+
from .dyslexia_render import render_dyslexia
|
|
15
|
+
from .plain_language import apply_plain_language
|
|
16
|
+
from .plain_language_render import render_plain_language
|
|
17
|
+
from .screen_reader import apply_screen_reader
|
|
18
|
+
from .screen_reader_render import render_screen_reader
|
|
19
|
+
|
|
20
|
+
__all__ = [
|
|
21
|
+
"apply_cognitive_load",
|
|
22
|
+
"render_cognitive_load",
|
|
23
|
+
"apply_dyslexia",
|
|
24
|
+
"render_dyslexia",
|
|
25
|
+
"apply_plain_language",
|
|
26
|
+
"render_plain_language",
|
|
27
|
+
"apply_screen_reader",
|
|
28
|
+
"render_screen_reader",
|
|
29
|
+
]
|