@trohde/earos 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +156 -0
- package/assets/init/.agents/skills/earos-artifact-gen/SKILL.md +106 -0
- package/assets/init/.agents/skills/earos-artifact-gen/references/interview-guide.md +313 -0
- package/assets/init/.agents/skills/earos-artifact-gen/references/output-guide.md +367 -0
- package/assets/init/.agents/skills/earos-assess/SKILL.md +212 -0
- package/assets/init/.agents/skills/earos-assess/references/calibration-benchmarks.md +160 -0
- package/assets/init/.agents/skills/earos-assess/references/output-templates.md +311 -0
- package/assets/init/.agents/skills/earos-assess/references/scoring-protocol.md +281 -0
- package/assets/init/.agents/skills/earos-calibrate/SKILL.md +153 -0
- package/assets/init/.agents/skills/earos-calibrate/references/agreement-metrics.md +188 -0
- package/assets/init/.agents/skills/earos-calibrate/references/calibration-protocol.md +263 -0
- package/assets/init/.agents/skills/earos-create/SKILL.md +257 -0
- package/assets/init/.agents/skills/earos-create/references/criterion-writing-guide.md +268 -0
- package/assets/init/.agents/skills/earos-create/references/dependency-rules.md +193 -0
- package/assets/init/.agents/skills/earos-create/references/rubric-interview-guide.md +123 -0
- package/assets/init/.agents/skills/earos-create/references/validation-checklist.md +238 -0
- package/assets/init/.agents/skills/earos-profile-author/SKILL.md +251 -0
- package/assets/init/.agents/skills/earos-profile-author/references/criterion-writing-guide.md +280 -0
- package/assets/init/.agents/skills/earos-profile-author/references/design-methods.md +158 -0
- package/assets/init/.agents/skills/earos-profile-author/references/profile-checklist.md +173 -0
- package/assets/init/.agents/skills/earos-remediate/SKILL.md +118 -0
- package/assets/init/.agents/skills/earos-remediate/references/output-template.md +199 -0
- package/assets/init/.agents/skills/earos-remediate/references/remediation-patterns.md +330 -0
- package/assets/init/.agents/skills/earos-report/SKILL.md +85 -0
- package/assets/init/.agents/skills/earos-report/references/portfolio-template.md +181 -0
- package/assets/init/.agents/skills/earos-report/references/single-artifact-template.md +168 -0
- package/assets/init/.agents/skills/earos-review/SKILL.md +130 -0
- package/assets/init/.agents/skills/earos-review/references/challenge-patterns.md +163 -0
- package/assets/init/.agents/skills/earos-review/references/output-template.md +180 -0
- package/assets/init/.agents/skills/earos-template-fill/SKILL.md +177 -0
- package/assets/init/.agents/skills/earos-template-fill/references/evidence-writing-guide.md +186 -0
- package/assets/init/.agents/skills/earos-template-fill/references/section-rubric-mapping.md +200 -0
- package/assets/init/.agents/skills/earos-validate/SKILL.md +113 -0
- package/assets/init/.agents/skills/earos-validate/references/fix-patterns.md +281 -0
- package/assets/init/.agents/skills/earos-validate/references/validation-checks.md +287 -0
- package/assets/init/.claude/CLAUDE.md +4 -0
- package/assets/init/AGENTS.md +293 -0
- package/assets/init/CLAUDE.md +635 -0
- package/assets/init/README.md +507 -0
- package/assets/init/calibration/gold-set/.gitkeep +0 -0
- package/assets/init/calibration/results/.gitkeep +0 -0
- package/assets/init/core/core-meta-rubric.yaml +643 -0
- package/assets/init/docs/consistency-report.md +325 -0
- package/assets/init/docs/getting-started.md +194 -0
- package/assets/init/docs/profile-authoring-guide.md +51 -0
- package/assets/init/docs/terminology.md +126 -0
- package/assets/init/earos.manifest.yaml +104 -0
- package/assets/init/evaluations/.gitkeep +0 -0
- package/assets/init/examples/aws-event-driven-order-processing/artifact.yaml +2056 -0
- package/assets/init/examples/aws-event-driven-order-processing/evaluation.yaml +973 -0
- package/assets/init/examples/aws-event-driven-order-processing/report.md +244 -0
- package/assets/init/examples/example-solution-architecture.evaluation.yaml +136 -0
- package/assets/init/examples/multi-cloud-data-analytics/artifact.yaml +715 -0
- package/assets/init/overlays/data-governance.yaml +94 -0
- package/assets/init/overlays/regulatory.yaml +154 -0
- package/assets/init/overlays/security.yaml +92 -0
- package/assets/init/profiles/adr.yaml +225 -0
- package/assets/init/profiles/capability-map.yaml +223 -0
- package/assets/init/profiles/reference-architecture.yaml +426 -0
- package/assets/init/profiles/roadmap.yaml +205 -0
- package/assets/init/profiles/solution-architecture.yaml +227 -0
- package/assets/init/research/architecture-assessment-rubrics-research.docx +0 -0
- package/assets/init/research/architecture-assessment-rubrics-research.md +566 -0
- package/assets/init/research/reference-architecture-research.md +751 -0
- package/assets/init/standard/EAROS.md +1426 -0
- package/assets/init/standard/schemas/artifact.schema.json +1295 -0
- package/assets/init/standard/schemas/artifact.uischema.json +65 -0
- package/assets/init/standard/schemas/evaluation.schema.json +284 -0
- package/assets/init/standard/schemas/rubric.schema.json +383 -0
- package/assets/init/templates/evaluation-record.template.yaml +58 -0
- package/assets/init/templates/new-profile.template.yaml +65 -0
- package/bin.js +188 -0
- package/dist/assets/_basePickBy-BVu6YmSW.js +1 -0
- package/dist/assets/_baseUniq-CWRzQDz_.js +1 -0
- package/dist/assets/arc-CyDBhtDM.js +1 -0
- package/dist/assets/architectureDiagram-2XIMDMQ5-BH6O4dvN.js +36 -0
- package/dist/assets/blockDiagram-WCTKOSBZ-2xmwdjpg.js +132 -0
- package/dist/assets/c4Diagram-IC4MRINW-BNmPRFJF.js +10 -0
- package/dist/assets/channel-CiySTNoJ.js +1 -0
- package/dist/assets/chunk-4BX2VUAB-DGQTvirp.js +1 -0
- package/dist/assets/chunk-55IACEB6-DNMAQAC_.js +1 -0
- package/dist/assets/chunk-FMBD7UC4-BJbVTQ5o.js +15 -0
- package/dist/assets/chunk-JSJVCQXG-BCxUL74A.js +1 -0
- package/dist/assets/chunk-KX2RTZJC-H7wWZOfz.js +1 -0
- package/dist/assets/chunk-NQ4KR5QH-BK4RlTQF.js +220 -0
- package/dist/assets/chunk-QZHKN3VN-0chxDV5g.js +1 -0
- package/dist/assets/chunk-WL4C6EOR-DexfQ-AV.js +189 -0
- package/dist/assets/classDiagram-VBA2DB6C-D7luWJQn.js +1 -0
- package/dist/assets/classDiagram-v2-RAHNMMFH-D7luWJQn.js +1 -0
- package/dist/assets/clone-ylgRbd3D.js +1 -0
- package/dist/assets/cose-bilkent-S5V4N54A-DS2IOCfZ.js +1 -0
- package/dist/assets/cytoscape.esm-CyJtwmzi.js +331 -0
- package/dist/assets/dagre-KLK3FWXG-BbSoTTa3.js +4 -0
- package/dist/assets/defaultLocale-DX6XiGOO.js +1 -0
- package/dist/assets/diagram-E7M64L7V-C9TvYgv0.js +24 -0
- package/dist/assets/diagram-IFDJBPK2-DowUMWrg.js +43 -0
- package/dist/assets/diagram-P4PSJMXO-BL6nrnQF.js +24 -0
- package/dist/assets/erDiagram-INFDFZHY-rXPRl8VM.js +70 -0
- package/dist/assets/flowDiagram-PKNHOUZH-DBRM99-W.js +162 -0
- package/dist/assets/ganttDiagram-A5KZAMGK-INcWFsBT.js +292 -0
- package/dist/assets/gitGraphDiagram-K3NZZRJ6-DMwpfE91.js +65 -0
- package/dist/assets/graph-DLQn37b-.js +1 -0
- package/dist/assets/index-BFFITMT8.js +650 -0
- package/dist/assets/index-H7f6VTz1.css +1 -0
- package/dist/assets/infoDiagram-LFFYTUFH-B0f4TWRM.js +2 -0
- package/dist/assets/init-Gi6I4Gst.js +1 -0
- package/dist/assets/ishikawaDiagram-PHBUUO56-CsU6XimZ.js +70 -0
- package/dist/assets/journeyDiagram-4ABVD52K-CQ7ibNib.js +139 -0
- package/dist/assets/kanban-definition-K7BYSVSG-DzEN7THt.js +89 -0
- package/dist/assets/katex-B1X10hvy.js +261 -0
- package/dist/assets/layout-C0dvb42R.js +1 -0
- package/dist/assets/linear-j4a8mGj7.js +1 -0
- package/dist/assets/mindmap-definition-YRQLILUH-DP8iEuCf.js +68 -0
- package/dist/assets/ordinal-Cboi1Yqb.js +1 -0
- package/dist/assets/pieDiagram-SKSYHLDU-BpIAXgAm.js +30 -0
- package/dist/assets/quadrantDiagram-337W2JSQ-DrpXn5Eg.js +7 -0
- package/dist/assets/requirementDiagram-Z7DCOOCP-Bg7EwHlG.js +73 -0
- package/dist/assets/sankeyDiagram-WA2Y5GQK-BWagRs1F.js +10 -0
- package/dist/assets/sequenceDiagram-2WXFIKYE-q5jwhivG.js +145 -0
- package/dist/assets/stateDiagram-RAJIS63D-B_J9pE-2.js +1 -0
- package/dist/assets/stateDiagram-v2-FVOUBMTO-Q_1GcybB.js +1 -0
- package/dist/assets/timeline-definition-YZTLITO2-dv0jgQ0z.js +61 -0
- package/dist/assets/treemap-KZPCXAKY-Dt1dkIE7.js +162 -0
- package/dist/assets/vennDiagram-LZ73GAT5-BdO5RgRZ.js +34 -0
- package/dist/assets/xychartDiagram-JWTSCODW-CpDVe-8v.js +7 -0
- package/dist/index.html +23 -0
- package/export-docx.js +1583 -0
- package/init.js +353 -0
- package/manifest-cli.mjs +207 -0
- package/package.json +83 -0
- package/schemas/artifact.schema.json +1295 -0
- package/schemas/artifact.uischema.json +65 -0
- package/schemas/evaluation.schema.json +284 -0
- package/schemas/rubric.schema.json +383 -0
- package/serve.js +238 -0
|
@@ -0,0 +1,223 @@
|
|
|
1
|
+
rubric_id: EAROS-CAP-001
|
|
2
|
+
version: 2.0.0
|
|
3
|
+
kind: profile
|
|
4
|
+
title: Capability Map Profile
|
|
5
|
+
status: approved
|
|
6
|
+
artifact_type: capability_map
|
|
7
|
+
inherits:
|
|
8
|
+
- EAROS-CORE-002
|
|
9
|
+
design_method: viewpoint_centred
|
|
10
|
+
purpose:
|
|
11
|
+
- strategy_alignment_review
|
|
12
|
+
- operating_model_review
|
|
13
|
+
- investment_planning_review
|
|
14
|
+
stakeholders:
|
|
15
|
+
- enterprise_architect
|
|
16
|
+
- business_architect
|
|
17
|
+
- strategy
|
|
18
|
+
- portfolio
|
|
19
|
+
- finance
|
|
20
|
+
viewpoints:
|
|
21
|
+
- capability
|
|
22
|
+
- ownership
|
|
23
|
+
- heatmap
|
|
24
|
+
- target-state
|
|
25
|
+
|
|
26
|
+
dimensions:
|
|
27
|
+
- id: CP1
|
|
28
|
+
name: Decomposition quality
|
|
29
|
+
description: >
|
|
30
|
+
A capability map is only as useful as its logical integrity. If capabilities overlap,
|
|
31
|
+
mix with processes or systems, or jump between abstraction levels, the map cannot
|
|
32
|
+
support consistent portfolio decisions or comparative analysis over time. Decomposition
|
|
33
|
+
quality is the foundation that all other uses depend on.
|
|
34
|
+
weight: 1.0
|
|
35
|
+
criteria:
|
|
36
|
+
- id: CAP-01
|
|
37
|
+
question: Is the capability decomposition stable, non-overlapping, and expressed at a coherent level of abstraction?
|
|
38
|
+
description: >
|
|
39
|
+
Business capabilities describe what an organization does, independent of how it does
|
|
40
|
+
it. When capabilities are confused with processes ('Perform KYC'), organisational
|
|
41
|
+
units ('Finance Department'), or systems ('SAP'), the map loses its strategic value
|
|
42
|
+
and cannot be compared year-over-year. Decomposition must be non-overlapping: each
|
|
43
|
+
business function belongs in exactly one capability, and the definitions must make
|
|
44
|
+
that boundary clear.
|
|
45
|
+
metric_type: ordinal
|
|
46
|
+
scale: [0, 1, 2, 3, 4, "N/A"]
|
|
47
|
+
gate:
|
|
48
|
+
enabled: true
|
|
49
|
+
severity: major
|
|
50
|
+
failure_effect: Cannot pass above conditional_pass
|
|
51
|
+
required_evidence:
|
|
52
|
+
- level definitions (what distinguishes L1, L2, L3)
|
|
53
|
+
- decomposition structure (parent-child hierarchy)
|
|
54
|
+
- naming conventions (capability names follow consistent style)
|
|
55
|
+
scoring_guide:
|
|
56
|
+
"0": Chaotic or heavily overlapping decomposition — structure unusable for analysis
|
|
57
|
+
"1": Frequent overlap or mixed abstraction levels throughout (e.g. capabilities and processes side by side)
|
|
58
|
+
"2": Partly coherent — mostly capability-based but with notable inconsistencies or mixed types
|
|
59
|
+
"3": Mostly stable and coherent decomposition — occasional minor inconsistency
|
|
60
|
+
"4": Stable, non-overlapping, structurally consistent decomposition with documented definition principles
|
|
61
|
+
anti_patterns:
|
|
62
|
+
- Capabilities mixed with processes, systems, or organisational units at the same level
|
|
63
|
+
- Different abstraction levels side by side without differentiation
|
|
64
|
+
- Capability boundaries undefined or implicit
|
|
65
|
+
- Decomposition changes structure every review cycle
|
|
66
|
+
examples:
|
|
67
|
+
good:
|
|
68
|
+
- >
|
|
69
|
+
"Level 1: Customer Lifecycle Management. Level 2: Customer Acquisition, Customer
|
|
70
|
+
Onboarding, Customer Retention, Customer Offboarding. Level 3 of Customer
|
|
71
|
+
Onboarding: Identity Verification, Account Setup, Welcome Journey. Each capability
|
|
72
|
+
defined with: what it does, what it does NOT include, and example value streams."
|
|
73
|
+
bad:
|
|
74
|
+
- >
|
|
75
|
+
"Level 2 includes: Customer Onboarding, Run KYC Process (process), CRM System
|
|
76
|
+
(system), Finance Department Approval (org unit). [Mixed abstraction levels]"
|
|
77
|
+
decision_tree: >
|
|
78
|
+
IF capabilities overlap significantly or are not distinguishable THEN score 0.
|
|
79
|
+
IF frequent overlap or mixed types throughout the map THEN score 1.
|
|
80
|
+
IF partly coherent but notable inconsistencies present THEN score 2.
|
|
81
|
+
IF mostly stable decomposition with minor inconsistencies THEN score 3.
|
|
82
|
+
IF stable, non-overlapping, consistent across all levels, and definition principles documented THEN score 4.
|
|
83
|
+
remediation_hints:
|
|
84
|
+
- Normalize level definitions — write a one-sentence definition for each capability
|
|
85
|
+
- Separate capability from implementation: remove system and process names from the map
|
|
86
|
+
- Assign each item to exactly one parent and review for overlap
|
|
87
|
+
|
|
88
|
+
- id: CP2
|
|
89
|
+
name: Business relevance
|
|
90
|
+
description: >
|
|
91
|
+
A capability map without business relevance is an academic exercise. The map's value
|
|
92
|
+
lies in its ability to inform decisions about investment, operating model design,
|
|
93
|
+
consolidation, and outsourcing. This requires explicit links between capabilities and
|
|
94
|
+
business outcomes, ownership, and investment or maturity decisions.
|
|
95
|
+
weight: 1.0
|
|
96
|
+
criteria:
|
|
97
|
+
- id: CAP-02
|
|
98
|
+
question: Does the map connect capabilities to business outcomes, ownership, and investment or maturity decisions?
|
|
99
|
+
description: >
|
|
100
|
+
Capability maps exist to answer business questions: where should we invest? Which
|
|
101
|
+
capabilities are underdeveloped relative to our strategy? Which are over-invested
|
|
102
|
+
relative to their strategic value? Without business outcomes, ownership, and
|
|
103
|
+
maturity or investment heat-maps overlaid, the map is a taxonomy poster, not a
|
|
104
|
+
decision instrument.
|
|
105
|
+
metric_type: ordinal
|
|
106
|
+
scale: [0, 1, 2, 3, 4, "N/A"]
|
|
107
|
+
gate: false
|
|
108
|
+
required_evidence:
|
|
109
|
+
- business outcomes linked to capabilities
|
|
110
|
+
- ownership assignments (domain or team)
|
|
111
|
+
- maturity assessment or investment heat-map
|
|
112
|
+
scoring_guide:
|
|
113
|
+
"0": No business relevance — capability taxonomy with no connection to decisions
|
|
114
|
+
"1": Weak narrative connection — business language used but no data or ownership
|
|
115
|
+
"2": Partial relevance — some capabilities linked to outcomes or owners, but incomplete
|
|
116
|
+
"3": Clear link to business concerns — ownership and outcome alignment for most capabilities
|
|
117
|
+
"4": Strong basis for strategy and portfolio decisions — heat-maps, ownership, maturity ratings, and investment cues all present
|
|
118
|
+
anti_patterns:
|
|
119
|
+
- Map is a taxonomy poster with no annotations or overlays
|
|
120
|
+
- No ownership assigned to any capability
|
|
121
|
+
- Maturity or investment data exists in spreadsheet but not in the map
|
|
122
|
+
examples:
|
|
123
|
+
good:
|
|
124
|
+
- >
|
|
125
|
+
"Heat-map overlay: Customer Onboarding (maturity: L2, investment priority: HIGH —
|
|
126
|
+
strategic differentiator for 2026 growth target). Core Banking Processing (maturity:
|
|
127
|
+
L3, investment priority: LOW — commodity, consider shared service). Owner assignments
|
|
128
|
+
visible per domain. Map used as input to Q3 2026 portfolio review."
|
|
129
|
+
bad:
|
|
130
|
+
- >
|
|
131
|
+
"The capability map shows the full range of business capabilities organized by
|
|
132
|
+
domain. [No ownership, no maturity, no business outcome links]"
|
|
133
|
+
decision_tree: >
|
|
134
|
+
IF no business context or outcomes linked THEN score 0.
|
|
135
|
+
IF weak narrative connection without data or ownership THEN score 1.
|
|
136
|
+
IF some capabilities linked to outcomes or owners but significant gaps THEN score 2.
|
|
137
|
+
IF clear link to business concerns with ownership for most capabilities THEN score 3.
|
|
138
|
+
IF heat-maps, ownership, maturity ratings, investment cues, and strategic alignment all present THEN score 4.
|
|
139
|
+
remediation_hints:
|
|
140
|
+
- Add ownership to every Level 1 and Level 2 capability
|
|
141
|
+
- Add a maturity or investment priority overlay
|
|
142
|
+
- Link the map explicitly to portfolio or strategy decisions
|
|
143
|
+
|
|
144
|
+
- id: CP3
|
|
145
|
+
name: Comparability and stewardship
|
|
146
|
+
description: >
|
|
147
|
+
The value of a capability map as a longitudinal decision instrument depends on structural
|
|
148
|
+
stability. If the decomposition changes significantly each quarter, historical comparisons
|
|
149
|
+
become meaningless, investment trends cannot be tracked, and teams lose confidence in
|
|
150
|
+
the map as a reference.
|
|
151
|
+
weight: 1.0
|
|
152
|
+
criteria:
|
|
153
|
+
- id: CAP-03
|
|
154
|
+
question: Can the capability map be reused over time for comparative analysis without frequent structural rework?
|
|
155
|
+
description: >
|
|
156
|
+
Capability maps are used year-over-year for maturity tracking, investment planning,
|
|
157
|
+
and portfolio analysis. This requires that the structural definitions remain stable
|
|
158
|
+
enough to support comparison. Changes must be governed with clear policies, and
|
|
159
|
+
migration paths must be provided so that historical data and heat-maps remain
|
|
160
|
+
interpretable across versions.
|
|
161
|
+
metric_type: ordinal
|
|
162
|
+
scale: [0, 1, 2, 3, 4, "N/A"]
|
|
163
|
+
gate: false
|
|
164
|
+
required_evidence:
|
|
165
|
+
- change control policy or governance process
|
|
166
|
+
- versioning information
|
|
167
|
+
- definition stability commitment or review cadence
|
|
168
|
+
scoring_guide:
|
|
169
|
+
"0": Not reusable — structure changes without documentation or governance
|
|
170
|
+
"1": Highly unstable — map frequently reorganised without version control
|
|
171
|
+
"2": Some comparative reuse possible — versioned but no change policy or ownership
|
|
172
|
+
"3": Generally comparable over time — versioned with change policy and defined owner
|
|
173
|
+
"4": Strong stewardship and longitudinal usability — versioning, change control, owner, and migration guidance for breaking changes
|
|
174
|
+
anti_patterns:
|
|
175
|
+
- Decomposition structure changes every quarter
|
|
176
|
+
- No definition owner or change authority
|
|
177
|
+
- Breaking structural changes made without migration documentation
|
|
178
|
+
examples:
|
|
179
|
+
good:
|
|
180
|
+
- >
|
|
181
|
+
"Version: 3.0.0. Owner: Business Architecture team. Change policy: structural
|
|
182
|
+
changes (Level 1/2 additions or moves) require Architecture Board approval; Level 3+
|
|
183
|
+
additions approved by domain architect. Last review: 2026-02-15. Next review:
|
|
184
|
+
2026-08-15. Version history in Git. Migration guide provided for v2.x → v3.0
|
|
185
|
+
capability renames."
|
|
186
|
+
bad:
|
|
187
|
+
- >
|
|
188
|
+
"The capability map is updated as needed. [No versioning, no owner, no policy]"
|
|
189
|
+
decision_tree: >
|
|
190
|
+
IF no versioning and structure changes without documentation THEN score 0.
|
|
191
|
+
IF map frequently reorganised without governance THEN score 1.
|
|
192
|
+
IF versioned but no change policy or ownership THEN score 2.
|
|
193
|
+
IF versioned with change policy and defined owner THEN score 3.
|
|
194
|
+
IF full stewardship including versioning, change control, longitudinal use guidance,
|
|
195
|
+
and migration documentation for breaking changes THEN score 4.
|
|
196
|
+
remediation_hints:
|
|
197
|
+
- Assign a named owner and define the change authority
|
|
198
|
+
- Version the map and maintain a change log
|
|
199
|
+
- Define which structural changes require governance approval
|
|
200
|
+
|
|
201
|
+
scoring:
|
|
202
|
+
scale: 0-4 ordinal plus N/A
|
|
203
|
+
method: gates_first_then_weighted_average
|
|
204
|
+
thresholds:
|
|
205
|
+
pass: No critical gate failure, overall >= 3.2, and no dimension < 2.0
|
|
206
|
+
conditional_pass: No critical gate failure and overall 2.4-3.19 or one weak dimension
|
|
207
|
+
rework_required: Overall < 2.4 or repeated weak dimensions
|
|
208
|
+
reject: Critical gate failure or mandatory control breach
|
|
209
|
+
not_reviewable: Evidence insufficient for core gate criteria
|
|
210
|
+
profile_specific_escalation: Escalate when CAP-01 < 3 for enterprise-wide portfolio use
|
|
211
|
+
na_policy: Exclude N/A criteria from denominator; evaluator must justify N/A
|
|
212
|
+
confidence_policy: Confidence reported separately, must not modify score
|
|
213
|
+
|
|
214
|
+
outputs:
|
|
215
|
+
require_evidence_refs: true
|
|
216
|
+
require_confidence: true
|
|
217
|
+
require_actions: true
|
|
218
|
+
require_evidence_class: true
|
|
219
|
+
require_evidence_anchors: true
|
|
220
|
+
formats:
|
|
221
|
+
- yaml
|
|
222
|
+
- json
|
|
223
|
+
- markdown-report
|
|
@@ -0,0 +1,426 @@
|
|
|
1
|
+
rubric_id: EAROS-REFARCH-001
|
|
2
|
+
version: 2.0.0
|
|
3
|
+
kind: profile
|
|
4
|
+
title: Reference Architecture Profile
|
|
5
|
+
status: draft
|
|
6
|
+
effective_date: "2026-03-18"
|
|
7
|
+
next_review_date: "2026-09-18"
|
|
8
|
+
owner: enterprise-architecture
|
|
9
|
+
artifact_type: reference_architecture
|
|
10
|
+
inherits:
|
|
11
|
+
- EAROS-CORE-002
|
|
12
|
+
design_method: pattern_library
|
|
13
|
+
|
|
14
|
+
purpose:
|
|
15
|
+
- blueprint_review
|
|
16
|
+
- golden_path_review
|
|
17
|
+
- platform_standard_review
|
|
18
|
+
- reuse_assessment
|
|
19
|
+
|
|
20
|
+
stakeholders:
|
|
21
|
+
- architecture_board
|
|
22
|
+
- platform_team
|
|
23
|
+
- domain_architect
|
|
24
|
+
- development_team
|
|
25
|
+
- operations
|
|
26
|
+
- security
|
|
27
|
+
- compliance
|
|
28
|
+
|
|
29
|
+
viewpoints:
|
|
30
|
+
- context
|
|
31
|
+
- functional
|
|
32
|
+
- deployment
|
|
33
|
+
- data_flow
|
|
34
|
+
- security
|
|
35
|
+
- operational
|
|
36
|
+
|
|
37
|
+
# This profile adds 9 criteria across 6 dimensions specific to reference architectures.
|
|
38
|
+
# Combined with the 10 core meta-rubric criteria, this gives 19 criteria for a full assessment.
|
|
39
|
+
|
|
40
|
+
dimensions:
|
|
41
|
+
- id: RA-D1
|
|
42
|
+
name: Architecture views and completeness
|
|
43
|
+
description: Does the reference architecture provide the necessary views for its audience, covering structure, behaviour, deployment, and data flow?
|
|
44
|
+
weight: 1.2
|
|
45
|
+
criteria:
|
|
46
|
+
- id: RA-VIEW-01
|
|
47
|
+
question: Does the reference architecture include context, functional, deployment, and data flow views?
|
|
48
|
+
description: >
|
|
49
|
+
A reference architecture must show how the system relates to its environment (context),
|
|
50
|
+
how it is structurally decomposed (functional/container), how it is deployed (infrastructure),
|
|
51
|
+
and how data moves through it (runtime scenarios). Missing views leave critical gaps
|
|
52
|
+
that prevent teams from implementing the architecture correctly.
|
|
53
|
+
metric_type: ordinal
|
|
54
|
+
scale: [0, 1, 2, 3, 4, "N/A"]
|
|
55
|
+
gate:
|
|
56
|
+
enabled: true
|
|
57
|
+
severity: major
|
|
58
|
+
failure_effect: Cannot pass if score < 2
|
|
59
|
+
required_evidence:
|
|
60
|
+
- context diagram (C4 Level 1 or equivalent)
|
|
61
|
+
- container/functional diagram (C4 Level 2 or equivalent)
|
|
62
|
+
- deployment diagram showing infrastructure topology
|
|
63
|
+
- data flow narrative with numbered steps
|
|
64
|
+
scoring_guide:
|
|
65
|
+
"0": Single diagram only, or no architectural views
|
|
66
|
+
"1": Two views present but incomplete or inconsistent
|
|
67
|
+
"2": Three views present, data flow narrative exists but is partial
|
|
68
|
+
"3": All four views present with adequate detail
|
|
69
|
+
"4": All four views present, consistent, with security view and cross-references between views
|
|
70
|
+
anti_patterns:
|
|
71
|
+
- Single box-and-arrow diagram presented as complete architecture
|
|
72
|
+
- Deployment view missing entirely
|
|
73
|
+
- No data flow narrative (diagram without numbered walkthrough)
|
|
74
|
+
- Views at inconsistent abstraction levels
|
|
75
|
+
examples:
|
|
76
|
+
good:
|
|
77
|
+
- "Section 3 provides C4 context diagram. Section 5 shows container decomposition with technology annotations. Section 7 shows Kubernetes deployment topology across 3 AZs. Section 6 walks through the order processing flow in 8 numbered steps."
|
|
78
|
+
bad:
|
|
79
|
+
- "See architecture diagram on page 3 [single diagram showing all components with no narrative]."
|
|
80
|
+
decision_tree: >
|
|
81
|
+
Count distinct views: IF < 2 THEN score 0-1. IF 2-3 views THEN score 2.
|
|
82
|
+
IF 4+ views AND data flow narrative exists THEN score 3.
|
|
83
|
+
IF all views are cross-referenced AND security view included THEN score 4.
|
|
84
|
+
remediation_hints:
|
|
85
|
+
- Add missing views using C4 model levels
|
|
86
|
+
- Add numbered data flow walkthrough
|
|
87
|
+
- Add deployment topology diagram
|
|
88
|
+
|
|
89
|
+
- id: RA-VIEW-02
|
|
90
|
+
question: Are architecture diagrams machine-readable or accompanied by structured element catalogs?
|
|
91
|
+
description: >
|
|
92
|
+
For automated assessment and ongoing governance, diagrams should be stored in
|
|
93
|
+
machine-readable formats (Structurizr DSL, PlantUML, Mermaid, ArchiMate exchange format)
|
|
94
|
+
or at minimum accompanied by a structured element catalog listing all components,
|
|
95
|
+
their responsibilities, technologies, and relationships.
|
|
96
|
+
metric_type: ordinal
|
|
97
|
+
scale: [0, 1, 2, 3, 4, "N/A"]
|
|
98
|
+
gate: false
|
|
99
|
+
required_evidence:
|
|
100
|
+
- diagram source files (DSL, PlantUML, Mermaid) OR element catalog
|
|
101
|
+
- technology annotations on components
|
|
102
|
+
- relationship descriptions
|
|
103
|
+
scoring_guide:
|
|
104
|
+
"0": Image-only diagrams with no structured metadata
|
|
105
|
+
"1": Diagrams with informal text descriptions
|
|
106
|
+
"2": Structured element catalog accompanies diagrams
|
|
107
|
+
"3": Diagram-as-code (Structurizr/PlantUML/Mermaid) used for main views
|
|
108
|
+
"4": Full architecture-as-code with model as single source of truth, all views generated from model
|
|
109
|
+
anti_patterns:
|
|
110
|
+
- PowerPoint or Visio diagrams with no structured metadata
|
|
111
|
+
- Diagrams without element names or technology labels
|
|
112
|
+
remediation_hints:
|
|
113
|
+
- Add structured element catalog table (name, type, technology, responsibility)
|
|
114
|
+
- Migrate key diagrams to diagram-as-code format
|
|
115
|
+
|
|
116
|
+
- id: RA-D2
|
|
117
|
+
name: Prescriptiveness and decision guidance
|
|
118
|
+
description: Does the reference architecture provide clear, opinionated guidance while documenting the decisions and trade-offs behind those opinions?
|
|
119
|
+
weight: 1.0
|
|
120
|
+
criteria:
|
|
121
|
+
- id: RA-DEC-01
|
|
122
|
+
question: Are key architecture decisions documented with context, options considered, and rationale?
|
|
123
|
+
description: >
|
|
124
|
+
A reference architecture embodies a set of architectural decisions. These must be
|
|
125
|
+
explicit so that implementers understand not just what to build but why these choices
|
|
126
|
+
were made. Without decision rationale, teams cannot judge whether the reference
|
|
127
|
+
architecture applies to their specific context.
|
|
128
|
+
metric_type: ordinal
|
|
129
|
+
scale: [0, 1, 2, 3, 4, "N/A"]
|
|
130
|
+
gate:
|
|
131
|
+
enabled: true
|
|
132
|
+
severity: major
|
|
133
|
+
failure_effect: Cannot pass above conditional_pass when score < 2
|
|
134
|
+
required_evidence:
|
|
135
|
+
- architecture decision records (ADRs) or equivalent
|
|
136
|
+
- alternatives considered
|
|
137
|
+
- trade-off analysis
|
|
138
|
+
- conditions under which decisions should be revisited
|
|
139
|
+
scoring_guide:
|
|
140
|
+
"0": No decisions documented
|
|
141
|
+
"1": Technology choices listed without rationale
|
|
142
|
+
"2": Key decisions have rationale but alternatives not discussed
|
|
143
|
+
"3": Decisions documented in ADR format with alternatives and trade-offs
|
|
144
|
+
"4": Full ADRs with context, options, consequences, trade-offs, and revisit triggers
|
|
145
|
+
anti_patterns:
|
|
146
|
+
- Technology choices presented as self-evident
|
|
147
|
+
- Only the chosen option documented
|
|
148
|
+
- No discussion of when the reference architecture does NOT apply
|
|
149
|
+
examples:
|
|
150
|
+
good:
|
|
151
|
+
- "ADR-001: Use event-driven architecture for inter-service communication. Context: Services need loose coupling for independent deployment. Options: (A) Synchronous REST, (B) Async messaging, (C) Event sourcing. Decision: Option C. Trade-off: Increased complexity in exchange for auditability and decoupling. Revisit if: throughput exceeds 50K events/sec or team lacks event sourcing experience."
|
|
152
|
+
bad:
|
|
153
|
+
- "We use Kafka for messaging."
|
|
154
|
+
remediation_hints:
|
|
155
|
+
- Add ADR section using MADR template
|
|
156
|
+
- Document at least 5 key decisions with alternatives
|
|
157
|
+
|
|
158
|
+
- id: RA-DEC-02
|
|
159
|
+
question: Does the reference architecture clearly define what is fixed, what is configurable, and where teams have discretion?
|
|
160
|
+
description: >
|
|
161
|
+
Reference architectures sit on a prescriptiveness spectrum. The most useful ones
|
|
162
|
+
make explicit which elements are mandatory (must use), recommended (should use),
|
|
163
|
+
optional (may use), and which are decision points where teams must make their
|
|
164
|
+
own choices based on context.
|
|
165
|
+
metric_type: ordinal
|
|
166
|
+
scale: [0, 1, 2, 3, 4, "N/A"]
|
|
167
|
+
gate: false
|
|
168
|
+
required_evidence:
|
|
169
|
+
- classification of components as mandatory/recommended/optional
|
|
170
|
+
- documented extension points
|
|
171
|
+
- decision framework for variant selection
|
|
172
|
+
scoring_guide:
|
|
173
|
+
"0": No distinction between fixed and flexible elements
|
|
174
|
+
"1": Some elements marked as mandatory
|
|
175
|
+
"2": Mandatory and optional elements distinguished
|
|
176
|
+
"3": Clear three-level classification (mandatory/recommended/optional) with rationale
|
|
177
|
+
"4": Full customisation framework with decision trees for when to deviate
|
|
178
|
+
anti_patterns:
|
|
179
|
+
- Everything presented as mandatory with no flexibility
|
|
180
|
+
- Everything presented as optional with no guidance
|
|
181
|
+
- No discussion of when the reference architecture does not apply
|
|
182
|
+
remediation_hints:
|
|
183
|
+
- Add classification table (component, mandate level, rationale)
|
|
184
|
+
- Document extension points and variation rules
|
|
185
|
+
|
|
186
|
+
- id: RA-D3
|
|
187
|
+
name: Operational readiness
|
|
188
|
+
description: Does the reference architecture address how the solution is operated, monitored, scaled, and recovered in production?
|
|
189
|
+
weight: 1.0
|
|
190
|
+
criteria:
|
|
191
|
+
- id: RA-OPS-01
|
|
192
|
+
question: Does the reference architecture include monitoring, alerting, scaling, and disaster recovery guidance?
|
|
193
|
+
description: >
|
|
194
|
+
A reference architecture that only describes the build-time structure without
|
|
195
|
+
addressing operational concerns leaves teams to figure out production readiness
|
|
196
|
+
on their own, undermining the purpose of having a standard blueprint.
|
|
197
|
+
metric_type: ordinal
|
|
198
|
+
scale: [0, 1, 2, 3, 4, "N/A"]
|
|
199
|
+
gate:
|
|
200
|
+
enabled: true
|
|
201
|
+
severity: major
|
|
202
|
+
failure_effect: Cannot pass above conditional_pass when score < 2
|
|
203
|
+
required_evidence:
|
|
204
|
+
- monitoring strategy (metrics, dashboards, alerting rules)
|
|
205
|
+
- scaling policies (auto-scaling triggers, capacity planning)
|
|
206
|
+
- disaster recovery plan (RTO, RPO, failover procedures)
|
|
207
|
+
- SLO definitions
|
|
208
|
+
scoring_guide:
|
|
209
|
+
"0": No operational guidance
|
|
210
|
+
"1": Vague mention of monitoring or scaling
|
|
211
|
+
"2": Monitoring and scaling described in principle, SLOs mentioned
|
|
212
|
+
"3": Concrete monitoring dashboards, scaling policies, SLOs, basic DR
|
|
213
|
+
"4": Full operational model with dashboard templates, alerting rules, auto-scaling configs, tested DR runbooks, and SLO definitions with error budgets
|
|
214
|
+
anti_patterns:
|
|
215
|
+
- Architecture focuses only on build-time, no run-time guidance
|
|
216
|
+
- Monitoring mentioned as future work
|
|
217
|
+
- No SLO or SLA definitions
|
|
218
|
+
examples:
|
|
219
|
+
good:
|
|
220
|
+
- "SLOs: Availability 99.95%, P99 latency < 200ms, error rate < 0.1%. Monitoring: Prometheus metrics exposed on /metrics, Grafana dashboard template in /ops/dashboards/. Alerting: PagerDuty integration with escalation policy. Auto-scaling: HPA targeting 70% CPU, min 3 / max 20 pods per service."
|
|
221
|
+
bad:
|
|
222
|
+
- "Monitoring should be implemented. Consider using CloudWatch."
|
|
223
|
+
remediation_hints:
|
|
224
|
+
- Add operational model section
|
|
225
|
+
- Define SLOs with measurable targets
|
|
226
|
+
- Include dashboard/alerting templates
|
|
227
|
+
|
|
228
|
+
- id: RA-D4
|
|
229
|
+
name: Implementation actionability
|
|
230
|
+
description: Does the reference architecture provide concrete implementation artefacts that enable teams to get started quickly?
|
|
231
|
+
weight: 1.2
|
|
232
|
+
criteria:
|
|
233
|
+
- id: RA-IMP-01
|
|
234
|
+
question: Does the reference architecture include infrastructure-as-code templates, API specifications, or starter kits?
|
|
235
|
+
description: >
|
|
236
|
+
The hallmark of a modern reference architecture is that it is deployable, not just
|
|
237
|
+
describable. Teams should be able to instantiate the architecture from templates
|
|
238
|
+
rather than rebuilding from scratch. This is the golden path principle applied
|
|
239
|
+
to reference architectures.
|
|
240
|
+
metric_type: ordinal
|
|
241
|
+
scale: [0, 1, 2, 3, 4, "N/A"]
|
|
242
|
+
gate: false
|
|
243
|
+
required_evidence:
|
|
244
|
+
- infrastructure-as-code templates (Terraform, CDK, Bicep, CloudFormation)
|
|
245
|
+
- API specifications (OpenAPI, AsyncAPI)
|
|
246
|
+
- CI/CD pipeline templates
|
|
247
|
+
- starter kit or scaffold template
|
|
248
|
+
scoring_guide:
|
|
249
|
+
"0": No implementation artefacts
|
|
250
|
+
"1": Code snippets or partial examples only
|
|
251
|
+
"2": Some IaC templates or API specs provided
|
|
252
|
+
"3": Complete IaC templates, API specs, and CI/CD pipeline templates
|
|
253
|
+
"4": Full golden path including scaffold template, IaC, API specs, CI/CD, observability config, and working sample application
|
|
254
|
+
anti_patterns:
|
|
255
|
+
- Architecture described only in documents with no runnable artefacts
|
|
256
|
+
- Outdated code samples that no longer compile
|
|
257
|
+
- IaC templates that don't match the architecture diagrams
|
|
258
|
+
examples:
|
|
259
|
+
good:
|
|
260
|
+
- "Repository structure: /infra (Terraform modules), /api (OpenAPI specs), /pipeline (GitHub Actions), /scaffold (Backstage template), /sample-app (working reference implementation), /ops (Grafana dashboards, alerting rules)."
|
|
261
|
+
bad:
|
|
262
|
+
- "See code examples in appendix A [3-year-old Java snippets using deprecated libraries]."
|
|
263
|
+
remediation_hints:
|
|
264
|
+
- Add IaC templates matching the deployment view
|
|
265
|
+
- Add OpenAPI specs for service interfaces
|
|
266
|
+
- Create a scaffold template for new services
|
|
267
|
+
|
|
268
|
+
- id: RA-IMP-02
|
|
269
|
+
question: Does the reference architecture include a clear getting-started guide or golden path for new adopters?
|
|
270
|
+
description: >
|
|
271
|
+
Following Spotify's golden path principle, the reference architecture should
|
|
272
|
+
dramatically reduce the time to first deployment. A new team should be able
|
|
273
|
+
to go from zero to running service in hours, not weeks.
|
|
274
|
+
metric_type: ordinal
|
|
275
|
+
scale: [0, 1, 2, 3, 4, "N/A"]
|
|
276
|
+
gate: false
|
|
277
|
+
required_evidence:
|
|
278
|
+
- step-by-step getting started guide
|
|
279
|
+
- estimated time to first deployment
|
|
280
|
+
- prerequisites checklist
|
|
281
|
+
- troubleshooting section
|
|
282
|
+
scoring_guide:
|
|
283
|
+
"0": No getting started guide
|
|
284
|
+
"1": Informal setup notes
|
|
285
|
+
"2": Step-by-step guide but untested or incomplete
|
|
286
|
+
"3": Tested getting started guide with prerequisites and estimated time
|
|
287
|
+
"4": Fully automated onboarding (Backstage template or equivalent) with <1 hour to first deployment
|
|
288
|
+
anti_patterns:
|
|
289
|
+
- Getting started requires tribal knowledge
|
|
290
|
+
- Setup takes more than a day
|
|
291
|
+
- Guide assumes context that new team members don't have
|
|
292
|
+
remediation_hints:
|
|
293
|
+
- Write a step-by-step getting started guide
|
|
294
|
+
- Test the guide with a team unfamiliar with the architecture
|
|
295
|
+
- Create a scaffold template or Backstage integration
|
|
296
|
+
|
|
297
|
+
- id: RA-D5
|
|
298
|
+
name: Quality attribute specification
|
|
299
|
+
description: Are quality attributes explicitly defined with measurable targets and validation strategies?
|
|
300
|
+
weight: 1.0
|
|
301
|
+
criteria:
|
|
302
|
+
- id: RA-QA-01
|
|
303
|
+
question: Are quality attributes defined with measurable acceptance criteria and validation approaches?
|
|
304
|
+
description: >
|
|
305
|
+
Quality attributes (availability, latency, throughput, security posture) must be
|
|
306
|
+
stated as concrete, measurable targets — not vague aspirations. Each should include
|
|
307
|
+
how it will be validated (load testing, chaos engineering, penetration testing).
|
|
308
|
+
This enables both human reviewers and AI agents to objectively assess compliance.
|
|
309
|
+
metric_type: ordinal
|
|
310
|
+
scale: [0, 1, 2, 3, 4, "N/A"]
|
|
311
|
+
gate:
|
|
312
|
+
enabled: true
|
|
313
|
+
severity: major
|
|
314
|
+
failure_effect: Cannot pass above conditional_pass when score < 2
|
|
315
|
+
required_evidence:
|
|
316
|
+
- quality attribute list with measurable targets
|
|
317
|
+
- quality scenarios (TOGAF format or equivalent)
|
|
318
|
+
- validation strategy for each attribute
|
|
319
|
+
- fitness functions or automated tests
|
|
320
|
+
scoring_guide:
|
|
321
|
+
"0": No quality attributes defined
|
|
322
|
+
"1": Quality attributes mentioned informally (e.g. 'the system should be fast')
|
|
323
|
+
"2": Quality attributes stated with some measurable targets but incomplete coverage
|
|
324
|
+
"3": All material quality attributes have measurable targets and validation approaches
|
|
325
|
+
"4": Full quality model with measurable targets, automated fitness functions, quality scenarios, and continuous validation in CI/CD
|
|
326
|
+
anti_patterns:
|
|
327
|
+
- Quality attributes described as adjectives rather than metrics
|
|
328
|
+
- No latency, throughput, or availability targets
|
|
329
|
+
- No validation strategy
|
|
330
|
+
examples:
|
|
331
|
+
good:
|
|
332
|
+
- "Availability: 99.95% measured monthly (synthetic probes every 30s). Latency: P99 < 200ms (validated by load test with 10K concurrent users). Security: SOC 2 Type II compliant (annual audit). Throughput: 5000 TPS sustained (validated by performance test in staging)."
|
|
333
|
+
bad:
|
|
334
|
+
- "The system should be highly available, performant, and secure."
|
|
335
|
+
decision_tree: >
|
|
336
|
+
IF no quality attribute section THEN score 0.
|
|
337
|
+
IF quality attributes are adjectives without numbers THEN score 1.
|
|
338
|
+
IF measurable targets exist for some attributes THEN score 2.
|
|
339
|
+
IF all material attributes have targets AND validation approaches THEN score 3.
|
|
340
|
+
IF automated fitness functions exist THEN score 4.
|
|
341
|
+
remediation_hints:
|
|
342
|
+
- Replace adjective-based requirements with measurable targets
|
|
343
|
+
- Add validation strategy for each quality attribute
|
|
344
|
+
- Implement fitness functions for critical attributes
|
|
345
|
+
|
|
346
|
+
- id: RA-D6
|
|
347
|
+
name: Reusability and evolution
|
|
348
|
+
description: Is the reference architecture designed for reuse across teams and evolution over time?
|
|
349
|
+
weight: 0.8
|
|
350
|
+
criteria:
|
|
351
|
+
- id: RA-REU-01
|
|
352
|
+
question: Is the reference architecture version-controlled with a clear evolution roadmap?
|
|
353
|
+
description: >
|
|
354
|
+
Reference architectures must evolve as technology, business needs, and security
|
|
355
|
+
requirements change. A reference architecture without versioning and an evolution
|
|
356
|
+
plan will become stale and eventually harmful as teams implement outdated patterns.
|
|
357
|
+
metric_type: ordinal
|
|
358
|
+
scale: [0, 1, 2, 3, 4, "N/A"]
|
|
359
|
+
gate: false
|
|
360
|
+
required_evidence:
|
|
361
|
+
- version number and change log
|
|
362
|
+
- evolution roadmap or backlog
|
|
363
|
+
- deprecation strategy for superseded patterns
|
|
364
|
+
- feedback mechanism for adopting teams
|
|
365
|
+
scoring_guide:
|
|
366
|
+
"0": No versioning or change management
|
|
367
|
+
"1": Version number exists but no change history
|
|
368
|
+
"2": Versioned with change log but no evolution plan
|
|
369
|
+
"3": Versioned, change log, known limitations documented, basic evolution plan
|
|
370
|
+
"4": Full lifecycle management with roadmap, deprecation strategy, feedback loop, and migration guidance for breaking changes
|
|
371
|
+
anti_patterns:
|
|
372
|
+
- Static document with no version or last-updated date
|
|
373
|
+
- Major changes made without migration guidance
|
|
374
|
+
- No way for adopting teams to report issues
|
|
375
|
+
remediation_hints:
|
|
376
|
+
- Add version number and change log
|
|
377
|
+
- Document known limitations and planned improvements
|
|
378
|
+
- Create a feedback channel for adopting teams
|
|
379
|
+
|
|
380
|
+
scoring:
|
|
381
|
+
scale: 0-4 ordinal plus N/A
|
|
382
|
+
agent_scale: 0-3 ordinal plus N/A (optional collapse for pure agent evaluation)
|
|
383
|
+
method: gates_first_then_weighted_average
|
|
384
|
+
thresholds:
|
|
385
|
+
pass: No critical gate failure, overall >= 3.2, no dimension < 2.0
|
|
386
|
+
conditional_pass: No critical gate failure, overall 2.4-3.19 or one weak dimension
|
|
387
|
+
rework_required: Overall < 2.4 or repeated weak dimensions
|
|
388
|
+
reject: Critical gate failure or mandatory control breach
|
|
389
|
+
not_reviewable: Evidence insufficient for core gate criteria
|
|
390
|
+
na_policy: Exclude N/A criteria from denominator; evaluator must justify N/A
|
|
391
|
+
confidence_policy: Confidence reported separately, must not modify score
|
|
392
|
+
|
|
393
|
+
outputs:
|
|
394
|
+
require_evidence_refs: true
|
|
395
|
+
require_confidence: true
|
|
396
|
+
require_actions: true
|
|
397
|
+
require_evidence_class: true
|
|
398
|
+
require_evidence_anchors: true
|
|
399
|
+
formats:
|
|
400
|
+
- yaml
|
|
401
|
+
- json
|
|
402
|
+
- markdown-report
|
|
403
|
+
- xlsx
|
|
404
|
+
|
|
405
|
+
calibration:
|
|
406
|
+
required_before_production: true
|
|
407
|
+
minimum_examples: 3
|
|
408
|
+
recommended_reviewers:
|
|
409
|
+
- 2 human reviewers (1 platform architect, 1 domain architect)
|
|
410
|
+
- 1 evaluator agent
|
|
411
|
+
- 1 challenger agent
|
|
412
|
+
calibration_artifacts:
|
|
413
|
+
- 1 strong reference architecture (well-established, production-proven)
|
|
414
|
+
- 1 weak reference architecture (diagram-only, no decisions, no operational guidance)
|
|
415
|
+
- 1 ambiguous reference architecture (good structure but outdated technology choices)
|
|
416
|
+
- 1 golden-path reference architecture (fully automated, with scaffold templates)
|
|
417
|
+
|
|
418
|
+
change_log:
|
|
419
|
+
- version: "2.0.0"
|
|
420
|
+
date: "2026-03-18"
|
|
421
|
+
author: "Thomas Rohde"
|
|
422
|
+
changes:
|
|
423
|
+
- Initial reference architecture profile for EAROS v2.0
|
|
424
|
+
- 9 criteria across 6 dimensions (19 total when combined with the 10 core criteria)
|
|
425
|
+
- Designed using pattern_library method
|
|
426
|
+
- Incorporates golden path, diagram-as-code, and operational readiness research
|