riskforge 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (81) hide show
  1. riskforge/__init__.py +2 -0
  2. riskforge/_data/__init__.py +0 -0
  3. riskforge/_data/patterns/__init__.py +0 -0
  4. riskforge/_data/patterns/patterns.yaml +147 -0
  5. riskforge/_data/question_bank/__init__.py +0 -0
  6. riskforge/_data/question_bank/data_governance.yaml +57 -0
  7. riskforge/_data/question_bank/discrimination.yaml +46 -0
  8. riskforge/_data/question_bank/fundamental_rights.yaml +46 -0
  9. riskforge/_data/question_bank/health_safety.yaml +68 -0
  10. riskforge/_data/question_bank/human_oversight.yaml +46 -0
  11. riskforge/_data/question_bank/privacy.yaml +57 -0
  12. riskforge/_data/question_bank/robustness.yaml +57 -0
  13. riskforge/_data/question_bank/transparency.yaml +46 -0
  14. riskforge/_data/schemas/__init__.py +0 -0
  15. riskforge/_data/schemas/rmf.schema.json +184 -0
  16. riskforge/_data/templates/__init__.py +0 -0
  17. riskforge/adapters/__init__.py +1 -0
  18. riskforge/adapters/base.py +24 -0
  19. riskforge/adapters/rag_benchmarking.py +68 -0
  20. riskforge/adapters/traceforge.py +82 -0
  21. riskforge/cli/__init__.py +1 -0
  22. riskforge/cli/commands/__init__.py +1 -0
  23. riskforge/cli/commands/assess.py +32 -0
  24. riskforge/cli/commands/diff.py +37 -0
  25. riskforge/cli/commands/export.py +73 -0
  26. riskforge/cli/commands/import_cmd.py +51 -0
  27. riskforge/cli/commands/init.py +60 -0
  28. riskforge/cli/commands/risk.py +72 -0
  29. riskforge/cli/commands/serve.py +48 -0
  30. riskforge/cli/commands/system.py +28 -0
  31. riskforge/cli/commands/tests_cmd.py +34 -0
  32. riskforge/cli/commands/validate.py +57 -0
  33. riskforge/cli/commands/verify.py +36 -0
  34. riskforge/cli/main.py +55 -0
  35. riskforge/engine/__init__.py +1 -0
  36. riskforge/engine/assess.py +89 -0
  37. riskforge/engine/audit.py +71 -0
  38. riskforge/engine/export.py +100 -0
  39. riskforge/engine/migrations.py +42 -0
  40. riskforge/engine/risk.py +111 -0
  41. riskforge/engine/tests.py +106 -0
  42. riskforge/engine/validate.py +145 -0
  43. riskforge/exporters/__init__.py +1 -0
  44. riskforge/exporters/base.py +19 -0
  45. riskforge/exporters/json_exporter.py +19 -0
  46. riskforge/exporters/markdown_exporter.py +71 -0
  47. riskforge/exporters/pdf/__init__.py +1 -0
  48. riskforge/exporters/pdf/pdf_exporter.py +50 -0
  49. riskforge/exporters/pdf/templates/report.css +291 -0
  50. riskforge/exporters/pdf/templates/report.html +192 -0
  51. riskforge/migrations/__init__.py +0 -0
  52. riskforge/migrations/m0001_initial.py +22 -0
  53. riskforge/models/__init__.py +15 -0
  54. riskforge/models/audit.py +31 -0
  55. riskforge/models/register.py +52 -0
  56. riskforge/models/risk.py +139 -0
  57. riskforge/models/rmf.py +58 -0
  58. riskforge/models/system.py +48 -0
  59. riskforge/plugins/__init__.py +1 -0
  60. riskforge/plugins/builtin.py +48 -0
  61. riskforge/plugins/loader.py +46 -0
  62. riskforge/plugins/registry.py +64 -0
  63. riskforge/server/__init__.py +1 -0
  64. riskforge/server/app.py +57 -0
  65. riskforge/server/auth.py +31 -0
  66. riskforge/server/config.py +14 -0
  67. riskforge/server/metrics.py +20 -0
  68. riskforge/server/middleware.py +30 -0
  69. riskforge/server/routers/__init__.py +1 -0
  70. riskforge/server/routers/exports.py +14 -0
  71. riskforge/server/routers/health.py +18 -0
  72. riskforge/server/routers/registers.py +20 -0
  73. riskforge/server/routers/risks.py +20 -0
  74. riskforge/server/routers/webhooks.py +20 -0
  75. riskforge/storage/__init__.py +4 -0
  76. riskforge/storage/base.py +235 -0
  77. riskforge/storage/filesystem.py +486 -0
  78. riskforge-0.1.0.dist-info/METADATA +217 -0
  79. riskforge-0.1.0.dist-info/RECORD +81 -0
  80. riskforge-0.1.0.dist-info/WHEEL +4 -0
  81. riskforge-0.1.0.dist-info/entry_points.txt +21 -0
riskforge/__init__.py ADDED
@@ -0,0 +1,2 @@
1
+ """RiskForge — EU AI Act Article 9 Risk Management System CLI."""
2
+ __version__ = "0.1.0"
File without changes
File without changes
@@ -0,0 +1,147 @@
1
+ schema_version: "1.0.0"
2
+ patterns:
3
+ - pattern_id: CREDIT_SCORING_BIAS
4
+ name: "Credit Scoring — Demographic Bias Risk"
5
+ triggers:
6
+ annex_iii_category: essential_services
7
+ purpose_keywords: ["credit", "loan", "mortgage", "underwriting", "insurance"]
8
+ risks:
9
+ - dimension: discrimination
10
+ title: "Demographic bias in credit scoring outputs"
11
+ description: "Credit scoring models trained on historical data may encode systemic biases against protected groups (age, gender, ethnicity). Article 9(9) requires consideration of impacts on vulnerable groups."
12
+ likelihood_hint: 3
13
+ severity_hint: 4
14
+ article_refs: ["Art.9(2)(a)", "Art.9(9)", "Art.10(2)(f)"]
15
+ nist_rmf_ref: "MEASURE 2.9"
16
+ iso42001_ref: "Clause A.7"
17
+ - dimension: transparency
18
+ title: "Lack of explanation for credit decisions"
19
+ description: "Automated credit decisions must be explainable to affected individuals under GDPR Article 22. The system may not currently provide this."
20
+ likelihood_hint: 3
21
+ severity_hint: 3
22
+ article_refs: ["Art.13"]
23
+ nist_rmf_ref: "GOVERN 1.7"
24
+ iso42001_ref: "Clause A.6"
25
+
26
+ - pattern_id: HIRING_SCREENING
27
+ name: "Hiring / CV Screening — Multiple Risk Cluster"
28
+ triggers:
29
+ annex_iii_category: employment
30
+ purpose_keywords: ["recruit", "hiring", "cv", "resume", "screening", "interview", "candidate"]
31
+ risks:
32
+ - dimension: discrimination
33
+ title: "Proxy discrimination in automated CV screening"
34
+ description: "Automated screening tools may use proxies (school name, postcode, employment gaps) that correlate with protected characteristics."
35
+ likelihood_hint: 4
36
+ severity_hint: 4
37
+ article_refs: ["Art.9(2)(a)", "Art.10(2)(f)"]
38
+ nist_rmf_ref: "MEASURE 2.9"
39
+ iso42001_ref: "Clause A.7"
40
+ - dimension: transparency
41
+ title: "Lack of explainability for rejection decisions"
42
+ description: "Candidates subject to automated CV screening may have a right to explanation under GDPR Article 22. The system may not be designed to provide this."
43
+ likelihood_hint: 3
44
+ severity_hint: 3
45
+ article_refs: ["Art.13", "Art.14"]
46
+ nist_rmf_ref: "GOVERN 1.7"
47
+ iso42001_ref: "Clause A.6"
48
+ - dimension: human_oversight
49
+ title: "Automation bias in recruiter review of shortlisted candidates"
50
+ description: "Recruiters presented with AI-ranked shortlists tend to follow the ranking without independent assessment. This undermines the human oversight requirement."
51
+ likelihood_hint: 4
52
+ severity_hint: 3
53
+ article_refs: ["Art.14(3)"]
54
+ nist_rmf_ref: "MANAGE 1.1"
55
+ iso42001_ref: "Clause A.9"
56
+
57
+ - pattern_id: LAW_ENFORCEMENT_BIOMETRIC
58
+ name: "Law Enforcement — Biometric Identification Risk Cluster"
59
+ triggers:
60
+ annex_iii_category: law_enforcement
61
+ purpose_keywords: ["facial recognition", "biometric", "identification", "surveillance", "matching"]
62
+ risks:
63
+ - dimension: fundamental_rights
64
+ title: "Chilling effect on freedom of assembly and expression"
65
+ description: "Real-time biometric identification in public spaces can have a chilling effect on fundamental rights even when technically lawful. Article 9(2)(a) requires this to be assessed."
66
+ likelihood_hint: 4
67
+ severity_hint: 5
68
+ article_refs: ["Art.9(2)(a)"]
69
+ nist_rmf_ref: "MAP 1.5"
70
+ iso42001_ref: "Clause 6.1"
71
+ - dimension: discrimination
72
+ title: "Higher false match rates for under-represented demographic groups"
73
+ description: "Facial recognition systems have documented higher false positive rates for darker skin tones and women. This creates discriminatory law enforcement outcomes."
74
+ likelihood_hint: 4
75
+ severity_hint: 5
76
+ article_refs: ["Art.9(2)(a)", "Art.10(2)(f)"]
77
+ nist_rmf_ref: "MEASURE 2.9"
78
+ iso42001_ref: "Clause A.7"
79
+
80
+ - pattern_id: EDUCATION_ASSESSMENT
81
+ name: "Education — Automated Student Assessment Risk"
82
+ triggers:
83
+ annex_iii_category: education
84
+ purpose_keywords: ["assessment", "grading", "scoring", "evaluation", "exam", "test"]
85
+ risks:
86
+ - dimension: discrimination
87
+ title: "Bias in automated educational assessment"
88
+ description: "Automated grading systems may unfairly penalise essays written in non-standard dialects or by non-native speakers, perpetuating educational inequality."
89
+ likelihood_hint: 3
90
+ severity_hint: 4
91
+ article_refs: ["Art.9(2)(a)", "Art.10(2)(f)"]
92
+ nist_rmf_ref: "MEASURE 2.9"
93
+ iso42001_ref: "Clause A.7"
94
+ - dimension: transparency
95
+ title: "Students unable to understand or contest automated grade decisions"
96
+ description: "Where automated assessment influences final grades or academic progression, students must be able to understand the basis and contest the outcome."
97
+ likelihood_hint: 3
98
+ severity_hint: 4
99
+ article_refs: ["Art.13"]
100
+ nist_rmf_ref: "GOVERN 1.7"
101
+ iso42001_ref: "Clause A.6"
102
+
103
+ - pattern_id: MEDICAL_DIAGNOSIS
104
+ name: "Healthcare — Medical Diagnosis or Clinical Decision Support"
105
+ triggers:
106
+ annex_iii_category: essential_services
107
+ purpose_keywords: ["diagnosis", "clinical", "medical", "patient", "triage", "prognosis", "screening"]
108
+ risks:
109
+ - dimension: health_safety
110
+ title: "False negatives in medical screening causing delayed treatment"
111
+ description: "AI-based screening tools that produce false negatives may delay life-critical treatment. The clinical consequences of failure must be quantified."
112
+ likelihood_hint: 2
113
+ severity_hint: 5
114
+ article_refs: ["Art.9(2)(b)", "Art.14(1)"]
115
+ nist_rmf_ref: "MAP 5.1"
116
+ iso42001_ref: "Clause 6.1"
117
+ - dimension: human_oversight
118
+ title: "Clinician over-reliance on AI diagnostic suggestions"
119
+ description: "Studies show clinicians may defer to AI suggestions even when clinical judgement contradicts them. Human oversight may become nominal without workload safeguards."
120
+ likelihood_hint: 3
121
+ severity_hint: 5
122
+ article_refs: ["Art.14"]
123
+ nist_rmf_ref: "MANAGE 1.1"
124
+ iso42001_ref: "Clause A.9"
125
+
126
+ - pattern_id: MIGRATION_ASYLUM
127
+ name: "Migration / Asylum — Decision Support Risk Cluster"
128
+ triggers:
129
+ annex_iii_category: migration
130
+ purpose_keywords: ["asylum", "border", "visa", "migration", "refugee", "immigration"]
131
+ risks:
132
+ - dimension: fundamental_rights
133
+ title: "AI-assisted asylum decisions affecting right to protection"
134
+ description: "Errors in AI-assisted migration decisions can result in refoulement — return of persons to places where they face persecution. This is an absolute fundamental rights violation."
135
+ likelihood_hint: 2
136
+ severity_hint: 5
137
+ article_refs: ["Art.9(2)(a)"]
138
+ nist_rmf_ref: "MAP 1.5"
139
+ iso42001_ref: "Clause 6.1"
140
+ - dimension: transparency
141
+ title: "Applicants unable to understand factors influencing their assessment"
142
+ description: "Asylum seekers subject to AI-assisted assessment may not understand which factors were considered or how to address adverse outcomes."
143
+ likelihood_hint: 3
144
+ severity_hint: 4
145
+ article_refs: ["Art.13"]
146
+ nist_rmf_ref: "GOVERN 1.7"
147
+ iso42001_ref: "Clause A.6"
File without changes
@@ -0,0 +1,57 @@
1
+ schema_version: "1.0.0"
2
+ dimension: data_governance
3
+ questions:
4
+ - id: DG-001
5
+ text: "Is the provenance of all training, validation, and test datasets documented (source, collection date, license, version)?"
6
+ guidance: "Article 10(2) requires data governance practices including documentation of dataset provenance. TraceForge can automate this."
7
+ annex_iii_categories: [employment, education, law_enforcement, essential_services, biometric]
8
+ default_likelihood_hint: 2
9
+ default_severity_hint: 3
10
+ article_refs: ["Art.10(2)", "Art.10(3)"]
11
+ nist_rmf_ref: "GOVERN 1.7"
12
+ iso42001_ref: "Clause A.8"
13
+ regulatory_status: settled
14
+
15
+ - id: DG-002
16
+ text: "Have dataset licenses been reviewed to confirm lawful use for the intended AI application (including commercial use rights)?"
17
+ guidance: "Many public datasets have restrictive licenses that prohibit commercial use or redistribution. Legal review is required."
18
+ annex_iii_categories: [employment, education, law_enforcement]
19
+ default_likelihood_hint: 3
20
+ default_severity_hint: 3
21
+ article_refs: ["Art.10(2)", "Art.10(3)"]
22
+ nist_rmf_ref: "GOVERN 1.7"
23
+ iso42001_ref: "Clause A.8"
24
+ regulatory_status: settled
25
+
26
+ - id: DG-003
27
+ text: "Are data quality checks (completeness, consistency, accuracy) applied and documented before training and fine-tuning?"
28
+ guidance: "Article 10(2)(e) requires data governance practices including data quality assessment."
29
+ annex_iii_categories: [employment, education, law_enforcement, essential_services]
30
+ default_likelihood_hint: 2
31
+ default_severity_hint: 3
32
+ article_refs: ["Art.10(2)(e)"]
33
+ nist_rmf_ref: "GOVERN 1.7"
34
+ iso42001_ref: "Clause A.8"
35
+ regulatory_status: settled
36
+
37
+ - id: DG-004
38
+ text: "Is there a data retention and deletion schedule that covers both training data and inference logs?"
39
+ guidance: "Data retained beyond its purpose creates ongoing legal and privacy risk. GDPR Article 5(1)(e) requires storage limitation."
40
+ annex_iii_categories: [law_enforcement, employment, biometric, essential_services]
41
+ default_likelihood_hint: 2
42
+ default_severity_hint: 2
43
+ article_refs: ["Art.10(3)"]
44
+ nist_rmf_ref: "GOVERN 1.6"
45
+ iso42001_ref: "Clause A.8"
46
+ regulatory_status: settled
47
+
48
+ - id: DG-005
49
+ text: "Is there a data pipeline version control system ensuring reproducibility of training runs from a specific dataset version?"
50
+ guidance: "Reproducibility is essential for audit purposes. If a model must be retrained to fix a defect, the exact training conditions must be recoverable."
51
+ annex_iii_categories: [employment, law_enforcement, essential_services]
52
+ default_likelihood_hint: 2
53
+ default_severity_hint: 3
54
+ article_refs: ["Art.10", "Art.11"]
55
+ nist_rmf_ref: "GOVERN 1.7"
56
+ iso42001_ref: "Clause A.8"
57
+ regulatory_status: settled
@@ -0,0 +1,46 @@
1
+ schema_version: "1.0.0"
2
+ dimension: discrimination
3
+ questions:
4
+ - id: DI-001
5
+ text: "Has the training dataset been audited for representation imbalances across protected characteristics (gender, ethnicity, age, disability)?"
6
+ guidance: "Biased training data produces biased outputs. Article 10(2)(f) requires data governance measures to address biases."
7
+ annex_iii_categories: [employment, education, essential_services, law_enforcement]
8
+ default_likelihood_hint: 3
9
+ default_severity_hint: 4
10
+ article_refs: ["Art.10(2)(f)", "Art.9(2)(a)"]
11
+ nist_rmf_ref: "MEASURE 2.9"
12
+ iso42001_ref: "Clause A.7"
13
+ regulatory_status: settled
14
+
15
+ - id: DI-002
16
+ text: "Have demographic parity, equalised odds, or other fairness metrics been computed and documented for each protected group?"
17
+ guidance: "Quantified fairness metrics are required for meaningful bias assessment. Select metrics appropriate to the use case."
18
+ annex_iii_categories: [employment, education, essential_services]
19
+ default_likelihood_hint: 2
20
+ default_severity_hint: 4
21
+ article_refs: ["Art.9(7)", "Art.10(2)(f)"]
22
+ nist_rmf_ref: "MEASURE 2.9"
23
+ iso42001_ref: "Clause A.7"
24
+ regulatory_status: settled
25
+
26
+ - id: DI-003
27
+ text: "Does the system use proxy variables that may correlate with protected characteristics (e.g. postcode, school name, employment gaps)?"
28
+ guidance: "Proxy discrimination occurs when facially neutral features encode protected characteristics. Legal exposure exists even without intent."
29
+ annex_iii_categories: [employment, education, essential_services, law_enforcement]
30
+ default_likelihood_hint: 3
31
+ default_severity_hint: 4
32
+ article_refs: ["Art.9(2)(a)", "Art.10(2)(f)"]
33
+ nist_rmf_ref: "MEASURE 2.9"
34
+ iso42001_ref: "Clause A.7"
35
+ regulatory_status: settled
36
+
37
+ - id: DI-004
38
+ text: "Are bias test results documented and reviewed at each model update, not just at initial deployment?"
39
+ guidance: "Data drift and concept drift can re-introduce bias after initial mitigation. Continuous monitoring is required."
40
+ annex_iii_categories: [employment, education, essential_services]
41
+ default_likelihood_hint: 2
42
+ default_severity_hint: 3
43
+ article_refs: ["Art.9(7)", "Art.72"]
44
+ nist_rmf_ref: "MEASURE 2.9"
45
+ iso42001_ref: "Clause 9.1"
46
+ regulatory_status: settled
@@ -0,0 +1,46 @@
1
+ schema_version: "1.0.0"
2
+ dimension: fundamental_rights
3
+ questions:
4
+ - id: FR-001
5
+ text: "Has a fundamental rights impact assessment (FRIA) been conducted prior to deployment?"
6
+ guidance: "Article 9(2)(a) requires identification of known and foreseeable risks to fundamental rights. A FRIA is the standard mechanism for this."
7
+ annex_iii_categories: [law_enforcement, migration, biometric, employment]
8
+ default_likelihood_hint: null
9
+ default_severity_hint: null
10
+ article_refs: ["Art.9(2)(a)"]
11
+ nist_rmf_ref: "GOVERN 1.1"
12
+ iso42001_ref: "Clause 6.1"
13
+ regulatory_status: settled
14
+
15
+ - id: FR-002
16
+ text: "Could the system restrict or affect access to rights protected under the EU Charter of Fundamental Rights?"
17
+ guidance: "Consider right to an effective remedy, right to a fair trial, presumption of innocence, freedom of expression, right to education."
18
+ annex_iii_categories: [law_enforcement, justice, migration]
19
+ default_likelihood_hint: 2
20
+ default_severity_hint: 5
21
+ article_refs: ["Art.9(2)(a)"]
22
+ nist_rmf_ref: "MAP 1.5"
23
+ iso42001_ref: "Clause 6.1"
24
+ regulatory_status: settled
25
+
26
+ - id: FR-003
27
+ text: "Are affected individuals informed of the AI system's role in decisions that affect them, in a language they understand?"
28
+ guidance: "Transparency is a foundational right. The obligation to inform subjects is strengthened in high-risk AI contexts."
29
+ annex_iii_categories: [essential_services, law_enforcement, employment, education]
30
+ default_likelihood_hint: 2
31
+ default_severity_hint: 3
32
+ article_refs: ["Art.13", "Art.14"]
33
+ nist_rmf_ref: "GOVERN 1.7"
34
+ iso42001_ref: "Clause A.6"
35
+ regulatory_status: settled
36
+
37
+ - id: FR-004
38
+ text: "Is there a human review mechanism for decisions that significantly affect an individual's fundamental rights?"
39
+ guidance: "Purely automated decisions affecting fundamental rights require meaningful human oversight under Article 14."
40
+ annex_iii_categories: [law_enforcement, justice, employment, essential_services]
41
+ default_likelihood_hint: null
42
+ default_severity_hint: null
43
+ article_refs: ["Art.14"]
44
+ nist_rmf_ref: "MANAGE 1.1"
45
+ iso42001_ref: "Clause A.9"
46
+ regulatory_status: settled
@@ -0,0 +1,68 @@
1
+ schema_version: "1.0.0"
2
+ dimension: health_safety
3
+ questions:
4
+ - id: HS-001
5
+ text: "Could the system's outputs directly influence a clinical, safety, or physical decision without mandatory human review?"
6
+ guidance: "Consider decisions about medication dosing, surgical planning, vehicle operation, equipment control, or emergency dispatch."
7
+ annex_iii_categories: [essential_services, critical_infrastructure, biometric]
8
+ default_likelihood_hint: 3
9
+ default_severity_hint: 4
10
+ article_refs: ["Art.9(2)(a)", "Art.14(1)"]
11
+ nist_rmf_ref: "MAP 1.5"
12
+ iso42001_ref: "Clause 6.1"
13
+ regulatory_status: settled
14
+
15
+ - id: HS-002
16
+ text: "Could a system failure (incorrect output, unavailability) cause physical harm to end users or third parties?"
17
+ guidance: "Failure modes include false negatives in medical screening, incorrect routing in emergency services, or missed safety-critical alerts."
18
+ annex_iii_categories: [critical_infrastructure, essential_services]
19
+ default_likelihood_hint: 2
20
+ default_severity_hint: 5
21
+ article_refs: ["Art.9(2)(b)"]
22
+ nist_rmf_ref: "MAP 5.1"
23
+ iso42001_ref: "Clause 6.1"
24
+ regulatory_status: settled
25
+
26
+ - id: HS-003
27
+ text: "Are there documented emergency override procedures that allow a human operator to immediately halt or override the system's decisions?"
28
+ guidance: "Article 14 requires human oversight mechanisms; this question checks whether the technical implementation exists."
29
+ annex_iii_categories: [biometric, critical_infrastructure, law_enforcement]
30
+ default_likelihood_hint: null
31
+ default_severity_hint: null
32
+ article_refs: ["Art.14(4)(e)"]
33
+ nist_rmf_ref: "MANAGE 1.1"
34
+ iso42001_ref: "Clause A.9"
35
+ regulatory_status: settled
36
+
37
+ - id: HS-004
38
+ text: "Has a failure mode and effects analysis (FMEA) or equivalent been conducted to identify safety-critical failure paths?"
39
+ guidance: "Systematic safety analysis documents are expected as part of the technical documentation under Article 11 and Annex IV."
40
+ annex_iii_categories: [critical_infrastructure, essential_services, biometric]
41
+ default_likelihood_hint: null
42
+ default_severity_hint: null
43
+ article_refs: ["Art.9(2)(c)", "Art.11"]
44
+ nist_rmf_ref: "MAP 5.2"
45
+ iso42001_ref: "Clause 6.1"
46
+ regulatory_status: settled
47
+
48
+ - id: HS-005
49
+ text: "Are system performance metrics (accuracy, error rates, latency) monitored in production with alerts for degradation?"
50
+ guidance: "Post-market monitoring under Article 72 requires tracking of actual performance against stated specifications."
51
+ annex_iii_categories: [critical_infrastructure, essential_services, employment]
52
+ default_likelihood_hint: 2
53
+ default_severity_hint: 3
54
+ article_refs: ["Art.9(7)", "Art.72"]
55
+ nist_rmf_ref: "MEASURE 2.5"
56
+ iso42001_ref: "Clause 9.1"
57
+ regulatory_status: settled
58
+
59
+ - id: HS-006
60
+ text: "Could vulnerable populations (elderly, children, people with disabilities) be disproportionately harmed by system errors?"
61
+ guidance: "Article 9(9) requires explicit consideration of impacts on children and other vulnerable groups."
62
+ annex_iii_categories: [essential_services, education, law_enforcement]
63
+ default_likelihood_hint: 3
64
+ default_severity_hint: 4
65
+ article_refs: ["Art.9(9)"]
66
+ nist_rmf_ref: "MAP 5.1"
67
+ iso42001_ref: "Clause 6.1"
68
+ regulatory_status: settled
@@ -0,0 +1,46 @@
1
+ schema_version: "1.0.0"
2
+ dimension: human_oversight
3
+ questions:
4
+ - id: HO-001
5
+ text: "Can a human operator override or stop the AI system's output in real time without significant technical barriers?"
6
+ guidance: "Article 14(4)(e) requires the ability to intervene on or interrupt AI system operation. This must be technically implemented, not just policy-stated."
7
+ annex_iii_categories: [critical_infrastructure, law_enforcement, biometric]
8
+ default_likelihood_hint: null
9
+ default_severity_hint: null
10
+ article_refs: ["Art.14(4)(e)"]
11
+ nist_rmf_ref: "MANAGE 1.1"
12
+ iso42001_ref: "Clause A.9"
13
+ regulatory_status: settled
14
+
15
+ - id: HO-002
16
+ text: "Are human reviewers trained on the system's capabilities, limitations, and known biases before making decisions based on its output?"
17
+ guidance: "Untrained human reviewers provide illusory oversight. Article 14(3) requires deployers to assign oversight to qualified persons."
18
+ annex_iii_categories: [employment, law_enforcement, essential_services, education]
19
+ default_likelihood_hint: 3
20
+ default_severity_hint: 3
21
+ article_refs: ["Art.14(3)"]
22
+ nist_rmf_ref: "MANAGE 1.1"
23
+ iso42001_ref: "Clause A.9"
24
+ regulatory_status: settled
25
+
26
+ - id: HO-003
27
+ text: "Is the human oversight load (volume of cases reviewed per reviewer per day) realistic for meaningful review, not rubber-stamping?"
28
+ guidance: "Automation bias and review fatigue cause human oversight to become nominal. Workload design is a technical control."
29
+ annex_iii_categories: [employment, law_enforcement, essential_services]
30
+ default_likelihood_hint: 3
31
+ default_severity_hint: 4
32
+ article_refs: ["Art.14"]
33
+ nist_rmf_ref: "MANAGE 1.1"
34
+ iso42001_ref: "Clause A.9"
35
+ regulatory_status: settled
36
+
37
+ - id: HO-004
38
+ text: "Is the system designed to make its reasoning interpretable to human reviewers (e.g. via explainability methods, confidence scores, supporting evidence)?"
39
+ guidance: "Human oversight is only meaningful if the reviewer can understand what the system decided and why."
40
+ annex_iii_categories: [law_enforcement, employment, essential_services, education]
41
+ default_likelihood_hint: 2
42
+ default_severity_hint: 4
43
+ article_refs: ["Art.14(4)(c)"]
44
+ nist_rmf_ref: "MEASURE 1.1"
45
+ iso42001_ref: "Clause A.6"
46
+ regulatory_status: settled
@@ -0,0 +1,57 @@
1
+ schema_version: "1.0.0"
2
+ dimension: privacy
3
+ questions:
4
+ - id: PR-001
5
+ text: "Does the system process personal data or special category data (health, biometric, ethnicity, religion)?"
6
+ guidance: "Processing of special category data triggers additional obligations under GDPR Article 9 and EU AI Act Article 10(5)."
7
+ annex_iii_categories: [biometric, health, law_enforcement, employment]
8
+ default_likelihood_hint: null
9
+ default_severity_hint: null
10
+ article_refs: ["Art.10(5)"]
11
+ nist_rmf_ref: "GOVERN 1.6"
12
+ iso42001_ref: "Clause A.8"
13
+ regulatory_status: settled
14
+
15
+ - id: PR-002
16
+ text: "Has a Data Protection Impact Assessment (DPIA) been conducted and approved by the Data Protection Officer?"
17
+ guidance: "GDPR Article 35 requires a DPIA for high-risk processing. High-risk AI systems typically trigger this threshold."
18
+ annex_iii_categories: [biometric, law_enforcement, employment, essential_services]
19
+ default_likelihood_hint: null
20
+ default_severity_hint: null
21
+ article_refs: ["Art.10", "Art.9(2)(a)"]
22
+ nist_rmf_ref: "GOVERN 1.6"
23
+ iso42001_ref: "Clause A.8"
24
+ regulatory_status: settled
25
+
26
+ - id: PR-003
27
+ text: "Is data minimisation enforced — does the model use only the minimum personal data necessary for the intended purpose?"
28
+ guidance: "GDPR data minimisation and purpose limitation principles apply to AI systems processing personal data."
29
+ annex_iii_categories: [biometric, employment, essential_services, education]
30
+ default_likelihood_hint: 2
31
+ default_severity_hint: 3
32
+ article_refs: ["Art.10(3)", "Art.10(5)"]
33
+ nist_rmf_ref: "GOVERN 1.6"
34
+ iso42001_ref: "Clause A.8"
35
+ regulatory_status: settled
36
+
37
+ - id: PR-004
38
+ text: "Has the training data been scanned for inadvertent PII inclusion, and is PII redaction/pseudonymisation applied before training?"
39
+ guidance: "Training data containing real PII creates privacy risk. Automated PII detection tools (e.g. TraceForge) can surface this."
40
+ annex_iii_categories: [biometric, employment, health]
41
+ default_likelihood_hint: 3
42
+ default_severity_hint: 4
43
+ article_refs: ["Art.10(3)", "Art.10(5)"]
44
+ nist_rmf_ref: "GOVERN 1.6"
45
+ iso42001_ref: "Clause A.8"
46
+ regulatory_status: settled
47
+
48
+ - id: PR-005
49
+ text: "Are model outputs screened to prevent accidental reproduction of training data PII (membership inference / data extraction attacks)?"
50
+ guidance: "Large models can memorise and reproduce training data verbatim. This is an active attack surface requiring specific mitigations."
51
+ annex_iii_categories: [biometric, law_enforcement, employment]
52
+ default_likelihood_hint: 2
53
+ default_severity_hint: 4
54
+ article_refs: ["Art.9(2)(a)", "Art.15"]
55
+ nist_rmf_ref: "MEASURE 2.5"
56
+ iso42001_ref: "Clause A.9"
57
+ regulatory_status: settled
@@ -0,0 +1,57 @@
1
+ schema_version: "1.0.0"
2
+ dimension: robustness
3
+ questions:
4
+ - id: RO-001
5
+ text: "Has the system been tested against out-of-distribution inputs, edge cases, and adversarial perturbations?"
6
+ guidance: "Article 9(7) and Article 15 require testing of AI systems against the intended purpose and reasonably foreseeable misuse."
7
+ annex_iii_categories: [critical_infrastructure, law_enforcement, essential_services, biometric]
8
+ default_likelihood_hint: 2
9
+ default_severity_hint: 4
10
+ article_refs: ["Art.9(7)", "Art.15"]
11
+ nist_rmf_ref: "MEASURE 2.5"
12
+ iso42001_ref: "Clause A.9"
13
+ regulatory_status: settled
14
+
15
+ - id: RO-002
16
+ text: "Are accuracy, precision, recall, and calibration metrics documented across all relevant demographic subgroups?"
17
+ guidance: "Aggregate metrics can mask poor performance for subpopulations. Slice-level evaluation is required for high-risk systems."
18
+ annex_iii_categories: [employment, education, law_enforcement, essential_services]
19
+ default_likelihood_hint: 2
20
+ default_severity_hint: 4
21
+ article_refs: ["Art.9(7)", "Art.10(2)(f)"]
22
+ nist_rmf_ref: "MEASURE 2.5"
23
+ iso42001_ref: "Clause A.9"
24
+ regulatory_status: settled
25
+
26
+ - id: RO-003
27
+ text: "Is there a defined performance floor (minimum acceptable metric threshold) below which the system is taken offline?"
28
+ guidance: "A defined performance floor ensures degraded models do not remain in production. This is a key operational control."
29
+ annex_iii_categories: [critical_infrastructure, law_enforcement, essential_services]
30
+ default_likelihood_hint: 2
31
+ default_severity_hint: 4
32
+ article_refs: ["Art.9(7)", "Art.72"]
33
+ nist_rmf_ref: "MANAGE 1.3"
34
+ iso42001_ref: "Clause 9.1"
35
+ regulatory_status: settled
36
+
37
+ - id: RO-004
38
+ text: "Are model degradation scenarios (data drift, concept drift, distribution shift) monitored and have defined response procedures?"
39
+ guidance: "Models degrade over time as real-world data distributions shift. Post-market monitoring under Article 72 requires detection and response."
40
+ annex_iii_categories: [essential_services, employment, law_enforcement]
41
+ default_likelihood_hint: 3
42
+ default_severity_hint: 3
43
+ article_refs: ["Art.72"]
44
+ nist_rmf_ref: "MEASURE 2.5"
45
+ iso42001_ref: "Clause 9.1"
46
+ regulatory_status: settled
47
+
48
+ - id: RO-005
49
+ text: "Has the system been tested for resistance to prompt injection, model inversion, or membership inference attacks (if applicable)?"
50
+ guidance: "LLM-based and generative AI systems face specific adversarial risks not covered by traditional ML robustness testing."
51
+ annex_iii_categories: [law_enforcement, critical_infrastructure, biometric]
52
+ default_likelihood_hint: 2
53
+ default_severity_hint: 4
54
+ article_refs: ["Art.15"]
55
+ nist_rmf_ref: "MEASURE 2.5"
56
+ iso42001_ref: "Clause A.9"
57
+ regulatory_status: pending_implementing_act
@@ -0,0 +1,46 @@
1
+ schema_version: "1.0.0"
2
+ dimension: transparency
3
+ questions:
4
+ - id: TR-001
5
+ text: "Are users notified that they are interacting with or being assessed by an AI system?"
6
+ guidance: "Article 13 requires providers to ensure a sufficient level of transparency to enable users to interpret the system's output. Notification is a baseline."
7
+ annex_iii_categories: [employment, education, essential_services, law_enforcement]
8
+ default_likelihood_hint: null
9
+ default_severity_hint: null
10
+ article_refs: ["Art.13(1)"]
11
+ nist_rmf_ref: "GOVERN 1.7"
12
+ iso42001_ref: "Clause A.6"
13
+ regulatory_status: settled
14
+
15
+ - id: TR-002
16
+ text: "Is an explanation of the system's decision-making logic available to affected individuals upon request?"
17
+ guidance: "GDPR Article 22 provides a right not to be subject to solely automated decisions. An explanation capability is a key technical control."
18
+ annex_iii_categories: [employment, essential_services, law_enforcement, education]
19
+ default_likelihood_hint: 2
20
+ default_severity_hint: 4
21
+ article_refs: ["Art.13", "Art.14"]
22
+ nist_rmf_ref: "GOVERN 1.7"
23
+ iso42001_ref: "Clause A.6"
24
+ regulatory_status: settled
25
+
26
+ - id: TR-003
27
+ text: "Does the system documentation include clear capability limitations, known failure modes, and out-of-distribution behaviour?"
28
+ guidance: "Article 13(3)(b) requires information about the system's capabilities and limitations for the benefit of deployers."
29
+ annex_iii_categories: [employment, essential_services, education, law_enforcement]
30
+ default_likelihood_hint: 2
31
+ default_severity_hint: 3
32
+ article_refs: ["Art.13(3)(b)"]
33
+ nist_rmf_ref: "GOVERN 1.7"
34
+ iso42001_ref: "Clause A.6"
35
+ regulatory_status: settled
36
+
37
+ - id: TR-004
38
+ text: "Are system confidence scores or uncertainty estimates communicated to human operators making decisions based on system output?"
39
+ guidance: "Operators making high-stakes decisions based on AI output need calibrated uncertainty estimates to exercise appropriate oversight."
40
+ annex_iii_categories: [law_enforcement, essential_services, employment]
41
+ default_likelihood_hint: 3
42
+ default_severity_hint: 3
43
+ article_refs: ["Art.13", "Art.14(4)"]
44
+ nist_rmf_ref: "MEASURE 1.1"
45
+ iso42001_ref: "Clause A.6"
46
+ regulatory_status: settled
File without changes