@fredcallagan/arn-spark 5.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude-plugin/plugin.json +9 -0
- package/.opencode/plugins/arn-spark.js +272 -0
- package/package.json +17 -0
- package/plugins/arn-spark/.claude-plugin/plugin.json +9 -0
- package/plugins/arn-spark/LICENSE +21 -0
- package/plugins/arn-spark/README.md +25 -0
- package/plugins/arn-spark/agents/arn-spark-brand-strategist.md +299 -0
- package/plugins/arn-spark/agents/arn-spark-dev-env-builder.md +228 -0
- package/plugins/arn-spark/agents/arn-spark-doctor.md +92 -0
- package/plugins/arn-spark/agents/arn-spark-forensic-investigator.md +181 -0
- package/plugins/arn-spark/agents/arn-spark-market-researcher.md +232 -0
- package/plugins/arn-spark/agents/arn-spark-marketing-pm.md +225 -0
- package/plugins/arn-spark/agents/arn-spark-persona-architect.md +259 -0
- package/plugins/arn-spark/agents/arn-spark-persona-impersonator.md +183 -0
- package/plugins/arn-spark/agents/arn-spark-product-strategist.md +191 -0
- package/plugins/arn-spark/agents/arn-spark-prototype-builder.md +497 -0
- package/plugins/arn-spark/agents/arn-spark-scaffolder.md +228 -0
- package/plugins/arn-spark/agents/arn-spark-spike-runner.md +209 -0
- package/plugins/arn-spark/agents/arn-spark-style-capture.md +196 -0
- package/plugins/arn-spark/agents/arn-spark-tech-evaluator.md +229 -0
- package/plugins/arn-spark/agents/arn-spark-ui-interactor.md +235 -0
- package/plugins/arn-spark/agents/arn-spark-use-case-writer.md +280 -0
- package/plugins/arn-spark/agents/arn-spark-ux-judge.md +215 -0
- package/plugins/arn-spark/agents/arn-spark-ux-specialist.md +200 -0
- package/plugins/arn-spark/agents/arn-spark-visual-sketcher.md +285 -0
- package/plugins/arn-spark/agents/arn-spark-visual-test-engineer.md +224 -0
- package/plugins/arn-spark/references/copilot-tools.md +62 -0
- package/plugins/arn-spark/skills/arn-brainstorming/SKILL.md +520 -0
- package/plugins/arn-spark/skills/arn-brainstorming/references/add-feature-flow.md +155 -0
- package/plugins/arn-spark/skills/arn-spark-arch-vision/SKILL.md +226 -0
- package/plugins/arn-spark/skills/arn-spark-arch-vision/references/architecture-vision-template.md +153 -0
- package/plugins/arn-spark/skills/arn-spark-arch-vision/references/technology-evaluation-guide.md +86 -0
- package/plugins/arn-spark/skills/arn-spark-clickable-prototype/SKILL.md +471 -0
- package/plugins/arn-spark/skills/arn-spark-clickable-prototype/references/clickable-prototype-criteria.md +65 -0
- package/plugins/arn-spark/skills/arn-spark-clickable-prototype/references/journey-template.md +62 -0
- package/plugins/arn-spark/skills/arn-spark-clickable-prototype/references/review-report-template.md +75 -0
- package/plugins/arn-spark/skills/arn-spark-clickable-prototype/references/showcase-capture-guide.md +213 -0
- package/plugins/arn-spark/skills/arn-spark-clickable-prototype-teams/SKILL.md +642 -0
- package/plugins/arn-spark/skills/arn-spark-clickable-prototype-teams/references/debate-protocol.md +242 -0
- package/plugins/arn-spark/skills/arn-spark-clickable-prototype-teams/references/debate-review-report-template.md +161 -0
- package/plugins/arn-spark/skills/arn-spark-clickable-prototype-teams/references/expert-interaction-review-template.md +152 -0
- package/plugins/arn-spark/skills/arn-spark-concept-review/SKILL.md +350 -0
- package/plugins/arn-spark/skills/arn-spark-concept-review/references/conflict-resolution-protocol.md +145 -0
- package/plugins/arn-spark/skills/arn-spark-concept-review/references/review-report-template.md +185 -0
- package/plugins/arn-spark/skills/arn-spark-dev-setup/SKILL.md +366 -0
- package/plugins/arn-spark/skills/arn-spark-dev-setup/references/dev-setup-checklist.md +84 -0
- package/plugins/arn-spark/skills/arn-spark-dev-setup/references/dev-setup-template.md +205 -0
- package/plugins/arn-spark/skills/arn-spark-discover/SKILL.md +303 -0
- package/plugins/arn-spark/skills/arn-spark-discover/references/competitive-landscape-template.md +87 -0
- package/plugins/arn-spark/skills/arn-spark-discover/references/discovery-questions.md +120 -0
- package/plugins/arn-spark/skills/arn-spark-discover/references/persona-profile-template.md +97 -0
- package/plugins/arn-spark/skills/arn-spark-discover/references/product-concept-template.md +253 -0
- package/plugins/arn-spark/skills/arn-spark-ensure-config/SKILL.md +23 -0
- package/plugins/arn-spark/skills/arn-spark-ensure-config/references/ensure-config.md +388 -0
- package/plugins/arn-spark/skills/arn-spark-ensure-config/references/step-0-fast-path.md +25 -0
- package/plugins/arn-spark/skills/arn-spark-ensure-config/scripts/cache-check.sh +127 -0
- package/plugins/arn-spark/skills/arn-spark-feature-extract/SKILL.md +483 -0
- package/plugins/arn-spark/skills/arn-spark-feature-extract/references/feature-backlog-template.md +176 -0
- package/plugins/arn-spark/skills/arn-spark-feature-extract/references/feature-entry-template.md +209 -0
- package/plugins/arn-spark/skills/arn-spark-help/SKILL.md +149 -0
- package/plugins/arn-spark/skills/arn-spark-help/references/pipeline-map.md +211 -0
- package/plugins/arn-spark/skills/arn-spark-init/SKILL.md +312 -0
- package/plugins/arn-spark/skills/arn-spark-init/references/agent-models-presets/all-opus.md +23 -0
- package/plugins/arn-spark/skills/arn-spark-init/references/agent-models-presets/balanced.md +23 -0
- package/plugins/arn-spark/skills/arn-spark-init/references/bkt-setup.md +55 -0
- package/plugins/arn-spark/skills/arn-spark-init/references/jira-mcp-setup.md +61 -0
- package/plugins/arn-spark/skills/arn-spark-init/references/platform-labels.md +97 -0
- package/plugins/arn-spark/skills/arn-spark-naming/SKILL.md +275 -0
- package/plugins/arn-spark/skills/arn-spark-naming/references/creative-brief-template.md +146 -0
- package/plugins/arn-spark/skills/arn-spark-naming/references/naming-methodology.md +237 -0
- package/plugins/arn-spark/skills/arn-spark-naming/references/naming-report-template.md +122 -0
- package/plugins/arn-spark/skills/arn-spark-naming/references/trademark-databases.md +88 -0
- package/plugins/arn-spark/skills/arn-spark-naming/references/whois-server-map.md +164 -0
- package/plugins/arn-spark/skills/arn-spark-naming/scripts/whois-check.js +502 -0
- package/plugins/arn-spark/skills/arn-spark-naming/scripts/whois-check.py +533 -0
- package/plugins/arn-spark/skills/arn-spark-prototype-lock/SKILL.md +260 -0
- package/plugins/arn-spark/skills/arn-spark-prototype-lock/references/lock-report-template.md +68 -0
- package/plugins/arn-spark/skills/arn-spark-prototype-lock/references/pretooluse-hook-template.json +35 -0
- package/plugins/arn-spark/skills/arn-spark-prototype-lock/references/prototype-guardrail-rules.md +38 -0
- package/plugins/arn-spark/skills/arn-spark-report/SKILL.md +144 -0
- package/plugins/arn-spark/skills/arn-spark-report/references/issue-template.md +81 -0
- package/plugins/arn-spark/skills/arn-spark-report/references/spark-knowledge-base.md +293 -0
- package/plugins/arn-spark/skills/arn-spark-scaffold/SKILL.md +239 -0
- package/plugins/arn-spark/skills/arn-spark-scaffold/references/scaffold-checklist.md +79 -0
- package/plugins/arn-spark/skills/arn-spark-scaffold/references/scaffold-summary-template.md +74 -0
- package/plugins/arn-spark/skills/arn-spark-spike/SKILL.md +209 -0
- package/plugins/arn-spark/skills/arn-spark-spike/references/spike-report-template.md +123 -0
- package/plugins/arn-spark/skills/arn-spark-static-prototype/SKILL.md +362 -0
- package/plugins/arn-spark/skills/arn-spark-static-prototype/references/review-report-template.md +65 -0
- package/plugins/arn-spark/skills/arn-spark-static-prototype/references/showcase-capture-guide.md +153 -0
- package/plugins/arn-spark/skills/arn-spark-static-prototype/references/static-prototype-criteria.md +54 -0
- package/plugins/arn-spark/skills/arn-spark-static-prototype-teams/SKILL.md +518 -0
- package/plugins/arn-spark/skills/arn-spark-static-prototype-teams/references/debate-protocol.md +230 -0
- package/plugins/arn-spark/skills/arn-spark-static-prototype-teams/references/debate-review-report-template.md +148 -0
- package/plugins/arn-spark/skills/arn-spark-static-prototype-teams/references/expert-visual-review-template.md +130 -0
- package/plugins/arn-spark/skills/arn-spark-stress-competitive/SKILL.md +166 -0
- package/plugins/arn-spark/skills/arn-spark-stress-competitive/references/competitive-report-template.md +139 -0
- package/plugins/arn-spark/skills/arn-spark-stress-competitive/references/gap-analysis-framework.md +111 -0
- package/plugins/arn-spark/skills/arn-spark-stress-interview/SKILL.md +257 -0
- package/plugins/arn-spark/skills/arn-spark-stress-interview/references/interview-protocol.md +140 -0
- package/plugins/arn-spark/skills/arn-spark-stress-interview/references/interview-report-template.md +165 -0
- package/plugins/arn-spark/skills/arn-spark-stress-interview/references/persona-casting-spec.md +138 -0
- package/plugins/arn-spark/skills/arn-spark-stress-premortem/SKILL.md +181 -0
- package/plugins/arn-spark/skills/arn-spark-stress-premortem/references/premortem-protocol.md +112 -0
- package/plugins/arn-spark/skills/arn-spark-stress-premortem/references/premortem-report-template.md +158 -0
- package/plugins/arn-spark/skills/arn-spark-stress-prfaq/SKILL.md +206 -0
- package/plugins/arn-spark/skills/arn-spark-stress-prfaq/references/prfaq-report-template.md +139 -0
- package/plugins/arn-spark/skills/arn-spark-stress-prfaq/references/prfaq-workflow.md +118 -0
- package/plugins/arn-spark/skills/arn-spark-style-explore/SKILL.md +281 -0
- package/plugins/arn-spark/skills/arn-spark-style-explore/references/style-brief-template.md +198 -0
- package/plugins/arn-spark/skills/arn-spark-use-cases/SKILL.md +359 -0
- package/plugins/arn-spark/skills/arn-spark-use-cases/references/expert-review-template.md +94 -0
- package/plugins/arn-spark/skills/arn-spark-use-cases/references/review-protocol.md +150 -0
- package/plugins/arn-spark/skills/arn-spark-use-cases/references/use-case-index-template.md +108 -0
- package/plugins/arn-spark/skills/arn-spark-use-cases/references/use-case-template.md +125 -0
- package/plugins/arn-spark/skills/arn-spark-use-cases-teams/SKILL.md +306 -0
- package/plugins/arn-spark/skills/arn-spark-use-cases-teams/references/debate-protocol.md +272 -0
- package/plugins/arn-spark/skills/arn-spark-use-cases-teams/references/review-report-template.md +112 -0
- package/plugins/arn-spark/skills/arn-spark-visual-readiness/SKILL.md +293 -0
- package/plugins/arn-spark/skills/arn-spark-visual-readiness/references/readiness-checklist.md +196 -0
- package/plugins/arn-spark/skills/arn-spark-visual-sketch/SKILL.md +376 -0
- package/plugins/arn-spark/skills/arn-spark-visual-sketch/references/aesthetic-philosophy.md +210 -0
- package/plugins/arn-spark/skills/arn-spark-visual-sketch/references/sketch-gallery-guide.md +282 -0
- package/plugins/arn-spark/skills/arn-spark-visual-sketch/references/visual-direction-template.md +174 -0
- package/plugins/arn-spark/skills/arn-spark-visual-strategy/SKILL.md +447 -0
- package/plugins/arn-spark/skills/arn-spark-visual-strategy/references/baseline-capture-script-template.js +89 -0
- package/plugins/arn-spark/skills/arn-spark-visual-strategy/references/journey-schema.md +375 -0
- package/plugins/arn-spark/skills/arn-spark-visual-strategy/references/spike-checklist.md +122 -0
- package/plugins/arn-spark/skills/arn-spark-visual-strategy/references/strategy-layers-guide.md +132 -0
- package/plugins/arn-spark/skills/arn-spark-visual-strategy/references/visual-strategy-template.md +141 -0
|
@@ -0,0 +1,388 @@
|
|
|
1
|
+
# Arness Spark Ensure Config — Step 0 Reference
|
|
2
|
+
|
|
3
|
+
This reference is read by entry-point skills (arn-brainstorming, arn-spark-discover, arn-spark-arch-vision) as Step 0 before their workflow begins.
|
|
4
|
+
|
|
5
|
+
Follow the layers below in order. Each layer has a fast path (skip when already satisfied) and a setup path (run once).
|
|
6
|
+
|
|
7
|
+
**Special note for `arn-spark-discover`:** Ensure-config should run but must not hard-block if something fails. Discover may be invoked before any project exists — it is exploratory by nature. If arness.md cannot be written (no project directory), skip Layer 2 and proceed with the original skill.
|
|
8
|
+
|
|
9
|
+
---
|
|
10
|
+
|
|
11
|
+
## Layer 1: Profile Check (Welcome & Profile)
|
|
12
|
+
|
|
13
|
+
### 1a. Check for Existing Profile
|
|
14
|
+
|
|
15
|
+
Check whether a project profile already exists:
|
|
16
|
+
|
|
17
|
+
1. Run via Bash: `test -f .arness/profile.yaml && echo "EXISTS" || echo "MISSING"`
|
|
18
|
+
|
|
19
|
+
**Decision tree:**
|
|
20
|
+
|
|
21
|
+
- **Profile exists:** Read `.arness/profile.yaml`. Ask the user:
|
|
22
|
+
|
|
23
|
+
> **Use your existing Arness profile for this project?**
|
|
24
|
+
> 1. Yes, use my existing profile
|
|
25
|
+
> 2. No, let me adjust for this project
|
|
26
|
+
|
|
27
|
+
- If **Yes:** Use the existing profile. Proceed to Layer 2.
|
|
28
|
+
- If **No:** Show the current profile values and let the user modify any fields. Write the adjusted profile to `.arness/profile.yaml`. Proceed to Layer 2.
|
|
29
|
+
|
|
30
|
+
- **No profile exists:** Run the Welcome Flow (Section 1b).
|
|
31
|
+
|
|
32
|
+
### 1b. Welcome Flow (First-Time Only)
|
|
33
|
+
|
|
34
|
+
Display a brief welcome message:
|
|
35
|
+
|
|
36
|
+
> **Welcome to Arness!** Let me set up your profile so Arness can tailor recommendations to your experience. This takes about 30 seconds and only happens once.
|
|
37
|
+
|
|
38
|
+
Then ask 4 questions:
|
|
39
|
+
|
|
40
|
+
**Q1 — Primary role:**
|
|
41
|
+
|
|
42
|
+
Ask the user:
|
|
43
|
+
> **What best describes your primary role?**
|
|
44
|
+
> 1. Developer (frontend, backend, or full-stack)
|
|
45
|
+
> 2. DevOps / Infrastructure Engineer
|
|
46
|
+
> 3. Product Manager / Designer
|
|
47
|
+
> 4. Tech Lead / Engineering Manager
|
|
48
|
+
|
|
49
|
+
If the user's role does not fit these options, accept a free-text description and record it under `role: other` with the description in `role_description`.
|
|
50
|
+
|
|
51
|
+
**Q2 — Development experience:**
|
|
52
|
+
|
|
53
|
+
Skip this question if the user selected "Product Manager / Designer" in Q1.
|
|
54
|
+
|
|
55
|
+
Ask the user:
|
|
56
|
+
> **How would you describe your development experience?**
|
|
57
|
+
> 1. Expert — I architect systems and mentor others
|
|
58
|
+
> 2. Experienced — I build features independently
|
|
59
|
+
> 3. Learning — I'm growing my skills with guidance
|
|
60
|
+
> 4. Non-technical — I work with developers but don't code
|
|
61
|
+
|
|
62
|
+
**Q3 — Technologies:**
|
|
63
|
+
|
|
64
|
+
Skip this question if the user selected "Non-technical" in Q2, or if Q2 was skipped (PM/Designer role in Q1) and the user's role suggests non-technical background.
|
|
65
|
+
|
|
66
|
+
Ask as free text (not a multi-choice question — this is open-ended):
|
|
67
|
+
|
|
68
|
+
> **What technologies do you work with?** List your primary languages, frameworks, databases, and infrastructure tools (e.g., "TypeScript, React, Next.js, PostgreSQL, AWS, Docker").
|
|
69
|
+
|
|
70
|
+
Parse the response into structured categories:
|
|
71
|
+
- `languages`: Programming languages (TypeScript, Python, Go, Java, Rust, etc.)
|
|
72
|
+
- `frameworks`: Frameworks and libraries (React, Next.js, Django, Spring, etc.)
|
|
73
|
+
- `databases`: Databases and data stores (PostgreSQL, MongoDB, Redis, etc.)
|
|
74
|
+
- `infrastructure`: Infrastructure tools and platforms (AWS, GCP, Docker, Kubernetes, Terraform, etc.)
|
|
75
|
+
|
|
76
|
+
**Q4 — Expertise-aware recommendations:**
|
|
77
|
+
|
|
78
|
+
Ask the user:
|
|
79
|
+
> **Should Arness tailor recommendations to your experience level?** When enabled, guidance adapts to your expertise — experts get terse, direct advice while learners get more context and explanation.
|
|
80
|
+
> 1. Yes, tailor to my experience
|
|
81
|
+
> 2. No, give me the standard experience
|
|
82
|
+
|
|
83
|
+
### 1c. Write Profile
|
|
84
|
+
|
|
85
|
+
1. Ensure the `.arness/` directory exists: `mkdir -p .arness`
|
|
86
|
+
2. Write `.arness/profile.yaml` with the following schema:
|
|
87
|
+
|
|
88
|
+
```yaml
|
|
89
|
+
# Arness Project Profile
|
|
90
|
+
# Edit freely — this file is gitignored
|
|
91
|
+
role: developer | devops | product | lead | other
|
|
92
|
+
role_description: "" # Free-text if role is "other" or for additional context
|
|
93
|
+
development_experience: expert | experienced | learning | non-technical
|
|
94
|
+
technology_preferences:
|
|
95
|
+
languages: []
|
|
96
|
+
frameworks: []
|
|
97
|
+
databases: []
|
|
98
|
+
infrastructure: []
|
|
99
|
+
expertise_aware: true | false
|
|
100
|
+
```
|
|
101
|
+
|
|
102
|
+
3. Verify `.arness/profile.yaml` is in the project's `.gitignore`:
|
|
103
|
+
- Read `.gitignore` in the project root
|
|
104
|
+
- If `.gitignore` does not exist, create it with `.arness/profile.yaml` as an entry
|
|
105
|
+
- If `.gitignore` exists but does not contain `.arness/profile.yaml`, append `.arness/profile.yaml` on a new line
|
|
106
|
+
- **Important:** Do NOT gitignore the entire `[PLATFORM_CONFIG_DIR]` directory — `[PLATFORM_CONFIG_DIR]/settings.json` and other project-level platform settings should remain committable for team sharing.
|
|
107
|
+
- **Important:** Do NOT gitignore the entire `.arness/` directory — other Arness project files must remain committable.
|
|
108
|
+
|
|
109
|
+
Display a closing message:
|
|
110
|
+
|
|
111
|
+
> **Profile saved.** Your profile is stored at `.arness/profile.yaml` and stays with this project. You can edit it anytime.
|
|
112
|
+
|
|
113
|
+
Proceed to Layer 2.
|
|
114
|
+
|
|
115
|
+
---
|
|
116
|
+
|
|
117
|
+
## Layer 2: Config Check (Ensure-Config)
|
|
118
|
+
|
|
119
|
+
### 2a. Read arness.md
|
|
120
|
+
|
|
121
|
+
Read the project's arness.md and look for a `## Arness` section.
|
|
122
|
+
|
|
123
|
+
If arness.md does not exist and no project directory is established (e.g., `arn-spark-discover` invoked in a temporary context), skip Layer 2 entirely and proceed with the original skill.
|
|
124
|
+
|
|
125
|
+
### 2b. If No `## Arness` Section Exists
|
|
126
|
+
|
|
127
|
+
Perform auto-detection and create the section with sensible defaults.
|
|
128
|
+
|
|
129
|
+
**Auto-detect:**
|
|
130
|
+
|
|
131
|
+
1. Git: `git rev-parse --is-inside-work-tree 2>/dev/null && echo "yes" || echo "no"`
|
|
132
|
+
2. Remote: `git remote -v 2>/dev/null`
|
|
133
|
+
3. Platform: Check for GitHub (`gh auth status 2>/dev/null`) or Bitbucket (`bkt --version 2>/dev/null`). If neither is detected, set Platform to `none`.
|
|
134
|
+
4. Issue tracker: If Platform is `github`, set Issue tracker to `github`. If Jira MCP is available, set to `jira`. Otherwise `none`.
|
|
135
|
+
|
|
136
|
+
**Present defaults:**
|
|
137
|
+
|
|
138
|
+
Show the user the detected and default values:
|
|
139
|
+
|
|
140
|
+
| Field | Value |
|
|
141
|
+
|-------|-------|
|
|
142
|
+
| Vision directory | .arness/vision |
|
|
143
|
+
| Use cases directory | .arness/use-cases |
|
|
144
|
+
| Prototypes directory | .arness/prototypes |
|
|
145
|
+
| Spikes directory | .arness/spikes |
|
|
146
|
+
| Visual grounding directory | .arness/visual-grounding |
|
|
147
|
+
| Reports directory | .arness/reports |
|
|
148
|
+
| Git | (detected) |
|
|
149
|
+
| Platform | (detected) |
|
|
150
|
+
| Issue tracker | (detected) |
|
|
151
|
+
|
|
152
|
+
Ask the user:
|
|
153
|
+
> **Use these defaults or customize folder locations?**
|
|
154
|
+
> 1. Use defaults
|
|
155
|
+
> 2. Let me customize
|
|
156
|
+
|
|
157
|
+
- If **Use defaults:** Set `Folder preference: defaults`. Use all values as shown.
|
|
158
|
+
- If **Let me customize:** Ask about each directory individually. Set `Folder preference: custom`.
|
|
159
|
+
|
|
160
|
+
**Write `## Arness` section to arness.md:**
|
|
161
|
+
|
|
162
|
+
Construct the `## Arness` section with all fields. If arness.md does not exist, create it with the `## Arness` section. If arness.md exists, append the section at the end.
|
|
163
|
+
|
|
164
|
+
For the `Spark agent model profile:` field: do NOT default it here. Leave the field absent so Layer 2c routes to the **Profile selection** procedure (see "Model profile field" section below) on the next ensure-config invocation. This guarantees the user is asked rather than silently set to `all-opus`. The corresponding `.arness/agent-models/spark.md` file is created by the Profile selection procedure, not by this fast path.
|
|
165
|
+
|
|
166
|
+
Fields to write:
|
|
167
|
+
```
|
|
168
|
+
## Arness
|
|
169
|
+
|
|
170
|
+
- **Vision directory:** .arness/vision
|
|
171
|
+
- **Use cases directory:** .arness/use-cases
|
|
172
|
+
- **Prototypes directory:** .arness/prototypes
|
|
173
|
+
- **Spikes directory:** .arness/spikes
|
|
174
|
+
- **Visual grounding directory:** .arness/visual-grounding
|
|
175
|
+
- **Reports directory:** .arness/reports
|
|
176
|
+
- **Git:** yes
|
|
177
|
+
- **Platform:** github
|
|
178
|
+
- **Issue tracker:** github
|
|
179
|
+
- **Folder preference:** defaults
|
|
180
|
+
```
|
|
181
|
+
|
|
182
|
+
**Create directories:**
|
|
183
|
+
|
|
184
|
+
Run via Bash: `mkdir -p` for each configured directory (vision, use-cases, prototypes, spikes, visual-grounding, reports).
|
|
185
|
+
|
|
186
|
+
### 2c. If `## Arness` Exists But Arness Spark Fields Are Missing
|
|
187
|
+
|
|
188
|
+
Check for the presence of Arness Spark fields: `Vision directory`, `Use cases directory`, `Prototypes directory`, `Spikes directory`, `Visual grounding directory`, `Reports directory`, `Spark agent model profile`.
|
|
189
|
+
|
|
190
|
+
If any directory-style fields are missing (`Vision directory`, `Use cases directory`, `Prototypes directory`, `Spikes directory`, `Visual grounding directory`, `Reports directory`):
|
|
191
|
+
|
|
192
|
+
1. Check the `Folder preference` field in the existing `## Arness` section.
|
|
193
|
+
2. If `Folder preference: defaults` — silently add missing Spark fields with default values. Create directories via `mkdir -p`.
|
|
194
|
+
3. If `Folder preference: custom` — ask the user about each missing Spark directory. Add fields with the user's chosen values. Create directories.
|
|
195
|
+
4. If no `Folder preference` field exists — add it with value `defaults` and silently add missing fields.
|
|
196
|
+
5. **Preserve all existing fields** from other plugins (Code fields, Infra fields) per the arness.md Config Section pattern.
|
|
197
|
+
|
|
198
|
+
If the `Spark agent model profile:` field is missing (separate logic — this field requires a real choice and downstream artifact copy):
|
|
199
|
+
|
|
200
|
+
1. Run the **Profile selection** procedure documented in the "Model profile field" section below. The procedure handles the profile selection prompt, writes the field to the `## Arness` block, copies the chosen preset to `.arness/agent-models/spark.md`, and records the SHA-256 checksum.
|
|
201
|
+
|
|
202
|
+
If the `Spark agent model profile:` field is **present** (consistency check — runs whether or not directory fields are also missing):
|
|
203
|
+
|
|
204
|
+
1. **If value is `all-opus` or `balanced`:**
|
|
205
|
+
a. Compute the SHA-256 checksum of `.arness/agent-models/spark.md` and compare it to the recorded checksum in `.arness/agent-models/.checksums.json`.
|
|
206
|
+
b. If checksums **differ** (user edited the file): flip the field value in `## Arness` from its current value to `custom` and inform the user with a one-line message: `"Detected your edits to .arness/agent-models/spark.md — set profile to 'custom' so future updates won't overwrite your changes."` Do NOT overwrite the user's edits; do NOT recompute the checksum (the `custom` profile means "user-managed").
|
|
207
|
+
c. If checksums match: read the `# Version:` header from `${CLAUDE_PLUGIN_ROOT}/skills/arn-spark-init/references/agent-models-presets/<value>.md` (the upstream preset) and compare to the `# Version:` header recorded in `.arness/agent-models/spark.md`. If they differ, apply the project's `Template updates:` policy (reuse the procedure in `${CLAUDE_PLUGIN_ROOT}/skills/arn-code-save-plan/references/template-versioning.md` — Spark reuses Arness Code's template-versioning machinery):
|
|
208
|
+
- `auto`: copy the new preset, regenerate the checksum, inform the user "Refreshed `.arness/agent-models/spark.md` from preset `<value>` v<old>→v<new>."
|
|
209
|
+
- `ask`: prompt the user; on accept, copy + regenerate; on decline, leave file alone and skip until the user re-runs.
|
|
210
|
+
- `manual`: do nothing this run.
|
|
211
|
+
|
|
212
|
+
2. **If value is `custom`:**
|
|
213
|
+
a. Read the canonical agent list from `${CLAUDE_PLUGIN_ROOT}/skills/arn-spark-init/references/agent-models-presets/all-opus.md` (every entry of the form `<agent-name>: <model>`).
|
|
214
|
+
b. Read the user's `.arness/agent-models/spark.md` and collect the agent names present.
|
|
215
|
+
c. For any agent in the canonical list that is NOT present in the user's file, surface as an info-level diagnostic: `"Note: .arness/agent-models/spark.md is missing entries for: <comma-separated agent list>. Add them or run with 'all-opus'/'balanced' profile to refresh."` This is informational only — do not block the workflow.
|
|
216
|
+
|
|
217
|
+
3. **If value is anything else** (legacy/typo): treat as missing — run the Profile selection procedure to repair.
|
|
218
|
+
|
|
219
|
+
### 2d. If `## Arness` Exists and All Spark Fields Are Present
|
|
220
|
+
|
|
221
|
+
**Fast path.** Verify against the canonical Spark fields list at the top of section 2c — every field on that list (including `Spark agent model profile`) must be present. If `Spark agent model profile` is present, also run the checksum/version/custom-diagnostic checks documented in section 2c (they are cheap and idempotent). If all checks pass, no action needed; proceed with the original skill's workflow.
|
|
222
|
+
|
|
223
|
+
---
|
|
224
|
+
|
|
225
|
+
## Layer 3: Cache Write
|
|
226
|
+
|
|
227
|
+
After Layers 1–2 complete successfully, write the validation cache so future ensure-config invocations can fast-path via `${CLAUDE_PLUGIN_ROOT}/skills/arn-spark-ensure-config/scripts/cache-check.sh`. This is the final step of the validation flow.
|
|
228
|
+
|
|
229
|
+
### Why a cache?
|
|
230
|
+
|
|
231
|
+
Entry-point skills invoke ensure-config as Step 0 on every workflow trigger (~30 trigger points across the plugin). Re-running the full Layers 1–2 flow on every invocation costs ~2k tokens even when nothing has changed. The cache lets the cache-check shell script (zero model tokens) verify validity in milliseconds; on hit, the entry-point skips reading this references file entirely (per the procedure in `references/step-0-fast-path.md`).
|
|
232
|
+
|
|
233
|
+
### Cache file location
|
|
234
|
+
|
|
235
|
+
`.arness/arn-spark-ensure-config.local.json` (project-local, gitignored via the `.arness/*.local.*` pattern from Layer 1c).
|
|
236
|
+
|
|
237
|
+
### Cache schema
|
|
238
|
+
|
|
239
|
+
```json
|
|
240
|
+
{
|
|
241
|
+
"schemaVersion": 1,
|
|
242
|
+
"validatedAt": "2026-05-10T12:34:56Z",
|
|
243
|
+
"pluginVersion": "2.4.0",
|
|
244
|
+
"fingerprints": {
|
|
245
|
+
"claudeMdArnessSection": "<sha256 of the ## Arness block content>",
|
|
246
|
+
"agentModelsCodeMd": "<sha256 of .arness/agent-models/spark.md, or 'MISSING'>",
|
|
247
|
+
"agentModelsChecksums": "<sha256 of .arness/agent-models/.checksums.json, or 'MISSING'>",
|
|
248
|
+
"templatesChecksums": "<sha256 of .arness/templates/.checksums.json, or 'MISSING'>",
|
|
249
|
+
"profile": "<sha256 of .arness/profile.yaml, or 'MISSING'>",
|
|
250
|
+
"gitignoreContent": "<sha256 of .gitignore, or 'MISSING'>"
|
|
251
|
+
},
|
|
252
|
+
"validationStatus": "pass"
|
|
253
|
+
}
|
|
254
|
+
```
|
|
255
|
+
|
|
256
|
+
(Note: the fingerprint key remains `agentModelsCodeMd` for cross-plugin schema consistency, but its value hashes the spark-specific file `.arness/agent-models/spark.md`.)
|
|
257
|
+
|
|
258
|
+
### Write procedure
|
|
259
|
+
|
|
260
|
+
1. **Compute the 6 fingerprints** using `sha256sum` (Linux + Git Bash) || `shasum -a 256` (Mac BSD). For the `claudeMdArnessSection` hash, extract the `## Arness` block via `awk '/^## Arness$/{flag=1;next} /^## /{flag=0} flag' arness.md` then pipe to the hasher. For each file fingerprint: if the file does not exist, use the literal string `MISSING` instead of computing a hash.
|
|
261
|
+
|
|
262
|
+
2. **Read plugin version** from `${CLAUDE_PLUGIN_ROOT}/.claude-plugin/plugin.json` if present (legacy), else from the marketplace's `marketplace.json` entry for `arn-spark`. Use the empty string if neither is resolvable.
|
|
263
|
+
|
|
264
|
+
3. **Construct the JSON object** with the schema above. `validatedAt` is the current ISO 8601 UTC timestamp. `validationStatus` is `"pass"` (only written on successful Layers 1–2).
|
|
265
|
+
|
|
266
|
+
4. **Write atomically** using a project-local temp file (NOT `/tmp/` — Windows compat):
|
|
267
|
+
|
|
268
|
+
```bash
|
|
269
|
+
<json content> > .arness/arn-spark-ensure-config.local.json.tmp
|
|
270
|
+
mv .arness/arn-spark-ensure-config.local.json.tmp .arness/arn-spark-ensure-config.local.json
|
|
271
|
+
```
|
|
272
|
+
|
|
273
|
+
The `mv` is atomic on POSIX filesystems (Linux ext4, Mac APFS, Windows NTFS via Git Bash).
|
|
274
|
+
|
|
275
|
+
5. **Verify** by reading the file back and confirming valid JSON. If the verify fails, surface as a warning but do not block — the next invocation will simply cache-miss.
|
|
276
|
+
|
|
277
|
+
### When the cache invalidates
|
|
278
|
+
|
|
279
|
+
- `pluginVersion` differs from current (plugin upgrade)
|
|
280
|
+
- `schemaVersion` differs from current (cache schema bump — silently invalidates)
|
|
281
|
+
- Any of the 6 fingerprints differs from current state
|
|
282
|
+
|
|
283
|
+
All invalidation paths trigger a cache miss, which causes the entry-point to read the full ensure-config.md and re-run Layers 1–2 — at the end of which this Layer 3 step writes a new cache.
|
|
284
|
+
|
|
285
|
+
---
|
|
286
|
+
|
|
287
|
+
## Model profile field
|
|
288
|
+
|
|
289
|
+
The `Spark agent model profile` field controls which Claude model each Arness Spark agent is dispatched on. It mirrors the structure of the Linting field flow used by Arness Code and reuses the same checksum + version-update machinery as report templates.
|
|
290
|
+
|
|
291
|
+
### Field summary
|
|
292
|
+
|
|
293
|
+
| Property | Value |
|
|
294
|
+
|----------|-------|
|
|
295
|
+
| Field name | `Spark agent model profile` |
|
|
296
|
+
| Valid values | `all-opus` \| `balanced` \| `custom` |
|
|
297
|
+
| Default on init | `all-opus` (preserves prior behavior) |
|
|
298
|
+
| Where the choice lives | The field in `## Arness` records the user's choice; the actual model-per-agent map lives at `.arness/agent-models/spark.md`. |
|
|
299
|
+
| Update policy | Reuses the project's `Template updates:` field (`ask` \| `auto` \| `manual`). If `Template updates:` is not present (Spark-only project), default to `ask`. |
|
|
300
|
+
| Drift detection | SHA-256 checksum of `.arness/agent-models/spark.md` compared to the recorded checksum. Mismatch flips the field to `custom`. |
|
|
301
|
+
| Custom diagnostic | When value is `custom`, missing-agent entries (against the canonical `all-opus.md` agent list) are surfaced as info-level diagnostics. |
|
|
302
|
+
| User example | The arness repo's own `## Arness` block uses `Template path: .arness/templates`, `Template version: 2.3.0`, `Template updates: ask`. The model-profile field is appended in the same `- **Field name:** value` style. |
|
|
303
|
+
|
|
304
|
+
### Profile selection procedure
|
|
305
|
+
|
|
306
|
+
This procedure is the single source of truth for the prompt + write + copy + checksum flow. It is invoked from two places:
|
|
307
|
+
- `arn-spark-init` ("Choose model profile" step), and
|
|
308
|
+
- `arn-spark-ensure-config` Layer 2c (when the field is missing).
|
|
309
|
+
|
|
310
|
+
Both call sites must read this section and follow it verbatim — do NOT duplicate the prompt + write + copy + checksum logic at the call sites.
|
|
311
|
+
|
|
312
|
+
**Steps:**
|
|
313
|
+
|
|
314
|
+
1. **Cross-plugin default suggestion.** Before asking, read the project's arness.md `## Arness` block. If a sibling plugin's profile field (`Code agent model profile:` or `Infra agent model profile:`) is set to `all-opus` or `balanced`, suggest that value as the default in the options ordering (the recommended/default option goes first, with `(Recommended)` appended). If both siblings are set to different values, prefer the most recently written field; if neither is set, the default is `all-opus`.
|
|
315
|
+
|
|
316
|
+
2. **Ask the user.** Ask the user:
|
|
317
|
+
|
|
318
|
+
> **Choose model profile for arn-spark agents**
|
|
319
|
+
> 1. **all-opus (Recommended)** — Every agent uses Opus. Maximum quality, maximum cost. (Current behavior.)
|
|
320
|
+
> 2. **balanced** — Opus for heavy reasoning, Sonnet for operational work. Lower cost, similar quality on routine tasks.
|
|
321
|
+
|
|
322
|
+
The label `(Recommended)` is appended to the suggested default (which may be `balanced` if a sibling plugin chose `balanced`). The recommended option appears first in the option list.
|
|
323
|
+
|
|
324
|
+
3. **Write the field.** Append `- **Spark agent model profile:** <choice>` to the `## Arness` block in arness.md, using the existing field-write idiom (preserve all other fields, replace the single field if it already exists).
|
|
325
|
+
|
|
326
|
+
4. **Copy the preset.** Create `.arness/agent-models/` if it does not exist (`mkdir -p .arness/agent-models`). Copy `${CLAUDE_PLUGIN_ROOT}/skills/arn-spark-init/references/agent-models-presets/<choice>.md` to `.arness/agent-models/spark.md`.
|
|
327
|
+
|
|
328
|
+
5. **Record the checksum.** Compute the SHA-256 checksum of the copied file via Bash:
|
|
329
|
+
```bash
|
|
330
|
+
sha256sum .arness/agent-models/spark.md 2>/dev/null || shasum -a 256 .arness/agent-models/spark.md
|
|
331
|
+
```
|
|
332
|
+
Read or create `.arness/agent-models/.checksums.json`. Add or update the entry for `spark.md`:
|
|
333
|
+
```json
|
|
334
|
+
{
|
|
335
|
+
"spark.md": {
|
|
336
|
+
"sha256": "<hex digest>",
|
|
337
|
+
"profile": "<choice>",
|
|
338
|
+
"version": "<version from the preset's # Version: header>"
|
|
339
|
+
}
|
|
340
|
+
}
|
|
341
|
+
```
|
|
342
|
+
Preserve any sibling entries (`code.md`, `infra.md`) already present in `.checksums.json` — those belong to the other plugins and are managed by their own ensure-config flows.
|
|
343
|
+
|
|
344
|
+
6. **Inform the user** with a one-line confirmation: `"Set Spark agent model profile to <choice>. Wrote .arness/agent-models/spark.md (sha256: <first-8-chars>...)."`
|
|
345
|
+
|
|
346
|
+
### Drift detection (Layer 2c integration)
|
|
347
|
+
|
|
348
|
+
Layer 2c runs the following whenever the field is present:
|
|
349
|
+
|
|
350
|
+
1. **Checksum check.** Compute the current sha256 of `.arness/agent-models/spark.md` and compare to the recorded checksum in `.arness/agent-models/.checksums.json`. If they differ, flip the field to `custom` (one-line user message; do NOT overwrite the user's file).
|
|
351
|
+
2. **Version check** (only when value is `all-opus` or `balanced` and checksums match): compare the `# Version:` header in the user's `.arness/agent-models/spark.md` against the upstream preset at `${CLAUDE_PLUGIN_ROOT}/skills/arn-spark-init/references/agent-models-presets/<value>.md`. On mismatch, apply the `Template updates:` policy (reuse `${CLAUDE_PLUGIN_ROOT}/skills/arn-code-save-plan/references/template-versioning.md`).
|
|
352
|
+
3. **Custom diagnostic** (only when value is `custom`): read the canonical agent list from `${CLAUDE_PLUGIN_ROOT}/skills/arn-spark-init/references/agent-models-presets/all-opus.md` and surface info-level diagnostics for any canonical agent missing from the user's file.
|
|
353
|
+
|
|
354
|
+
---
|
|
355
|
+
|
|
356
|
+
## Important Rules
|
|
357
|
+
|
|
358
|
+
1. **Never hard-block.** If auto-detection fails for a non-critical field (Platform, Issue tracker), default gracefully (`none`). Only the profile welcome flow is mandatory on first invocation. For `arn-spark-discover`, if Layer 2 cannot complete (no project directory), skip it entirely.
|
|
359
|
+
2. **Preserve ALL existing `## Arness` fields** not managed by Arness Spark. When writing or updating the section, read all existing fields first and include them unchanged. Arness Code fields (Plans directory, Specs directory, Template path, Code patterns, Docs directory, etc.) and Arness Infra fields (Infra plans directory, Infra specs directory, Infra docs directory, etc.) must be preserved.
|
|
360
|
+
3. **Use `${CLAUDE_PLUGIN_ROOT}`** for all plugin-internal path references. Never hardcode absolute paths.
|
|
361
|
+
4. **No template setup.** Arness Spark does not have report templates. Template setup is handled exclusively by Arness Code's ensure-config.
|
|
362
|
+
5. **Profile YAML uses structured `technology_preferences`** with separate `languages`, `frameworks`, `databases`, `infrastructure` arrays. Do not store technologies as a flat string.
|
|
363
|
+
6. **Profile data is non-sensitive** (role, technology preferences — no credentials or secrets). The `.arness/profile.yaml` gitignore pattern protects against accidental commits of the profile while keeping other `.arness/` files committable for team sharing.
|
|
364
|
+
7. **Folder preference coordination:** When setting `Folder preference`, this value is shared across all three plugins. If another plugin already set it, respect that value.
|
|
365
|
+
8. **Discover resilience:** `arn-spark-discover` has "Prerequisites: None" in its design. If ensure-config encounters any error during Layer 2 (e.g., no writable directory, no arness.md), log the issue silently and let the original skill proceed. The profile (Layer 1) should still be captured if possible since it lives at `~/.arness/` which is always writable.
|
|
366
|
+
|
|
367
|
+
---
|
|
368
|
+
|
|
369
|
+
## Dispatch convention (agent model lookup)
|
|
370
|
+
|
|
371
|
+
Every skill in this plugin that dispatches a subagent via the Task tool consults a per-plugin model profile to decide which model the agent runs on. The profile lives at `.arness/agent-models/spark.md` (project-relative, NOT plugin-relative — this path is project-rooted while plugin assets use `${CLAUDE_PLUGIN_ROOT}` per Pattern 8 in INTRODUCTION.md). The file is created during init and is one of the presets shipped under `${CLAUDE_PLUGIN_ROOT}/skills/arn-spark-init/references/agent-models-presets/`.
|
|
372
|
+
|
|
373
|
+
### Lookup procedure (apply at every dispatch site)
|
|
374
|
+
|
|
375
|
+
1. **Read `.arness/agent-models/spark.md`** (project-relative).
|
|
376
|
+
2. **If the file is missing or unparseable:** omit the `model:` parameter when invoking the Task tool. The agent's frontmatter default (`opus`) applies. Do not surface an error — fallback is silent.
|
|
377
|
+
3. **Look up the agent's name** (e.g., `arn-spark-doctor`, `arn-spark-product-strategist`) in the file's mapping.
|
|
378
|
+
4. **If the agent name is found:** pass the mapped value (e.g., `opus`, `sonnet`, `haiku`) as the Task tool's `model` parameter. This overrides the agent's `model:` frontmatter.
|
|
379
|
+
5. **If the agent name is NOT found in the file:** omit the `model:` parameter — frontmatter fallback applies. This keeps user-edited `custom` profiles forward-compatible: agents added to the plugin after the user customized their profile still run on their frontmatter default (`opus`) until the user adds them to the file.
|
|
380
|
+
6. **Multi-agent parallel dispatches:** apply the lookup to each agent independently. A single instruction that spawns three agents in parallel produces three independent lookups, each potentially passing a different `model:` value.
|
|
381
|
+
7. **Resume-mode dispatches** (calls that pass an existing agent ID via the Task tool's `resume` parameter): do NOT consult the profile. Resume calls inherit the model from the original invocation. Dispatch sites that resume an existing agent do not include the model-lookup phrasing.
|
|
382
|
+
|
|
383
|
+
### Why this design
|
|
384
|
+
|
|
385
|
+
- **Native Task tool support.** The Task tool's `model:` parameter takes precedence over agent frontmatter (Pattern 2 in INTRODUCTION.md). No wrapper or shim is needed.
|
|
386
|
+
- **Self-documenting.** Every dispatch site explicitly references this convention, so the lookup behavior is visible at the point of dispatch rather than buried in a global default. A static grep for `via the Task tool` confirms coverage.
|
|
387
|
+
- **Graceful degradation.** Missing file, missing agent entry, and unparseable content all fall back to frontmatter — there is no failure mode where dispatch hangs or errors on a config issue.
|
|
388
|
+
- **Single source of truth.** `.arness/agent-models/spark.md` is the only place a user edits to change behavior across all arn-spark dispatches. The file is template-managed (Pattern 6) so version bumps, drift detection, and the `Template updates: ask | auto | manual` policy all apply.
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
# Arness Spark Ensure-Config — Step 0 Fast-Path
|
|
2
|
+
|
|
3
|
+
This is the **first read** for entry-point skills' Step 0. It runs a shell-only cache check; if the cache is valid, the entry-point can skip reading the full `references/ensure-config.md`. On cache miss, falls through to the full validation flow.
|
|
4
|
+
|
|
5
|
+
## Procedure
|
|
6
|
+
|
|
7
|
+
1. **Run the cache check** via Bash:
|
|
8
|
+
|
|
9
|
+
```
|
|
10
|
+
bash ${CLAUDE_PLUGIN_ROOT}/skills/arn-spark-ensure-config/scripts/cache-check.sh
|
|
11
|
+
```
|
|
12
|
+
|
|
13
|
+
2. **If exit 0 (cache hit):**
|
|
14
|
+
- Emit a one-line status to the user: `Ensure-config: cache valid (arn-spark, last validated <duration> ago)`. Read the `validatedAt` timestamp from `.arness/arn-spark-ensure-config.local.json` to format the duration; if reading fails, just say "cache valid".
|
|
15
|
+
- Return immediately. The entry-point skill's own workflow proceeds with no further config work.
|
|
16
|
+
- **Do NOT read `references/ensure-config.md`** — that is the whole point of the fast path.
|
|
17
|
+
|
|
18
|
+
3. **If exit non-zero (cache miss):**
|
|
19
|
+
- The script's stderr output includes the reason (e.g., `cache miss: pluginVersion changed`, `cache miss: fingerprint claudeMdArnessSection changed`). Surface it as an info-level note: `Ensure-config: cache miss (<reason>) — running full validation.`
|
|
20
|
+
- **Read `${CLAUDE_PLUGIN_ROOT}/skills/arn-spark-ensure-config/references/ensure-config.md`** and follow ALL its instructions.
|
|
21
|
+
- At the end of successful validation, the ensure-config.md flow's "Cache Write" step writes the new `.arness/arn-spark-ensure-config.local.json` per the cache schema documented inline there.
|
|
22
|
+
|
|
23
|
+
## Cross-platform notes
|
|
24
|
+
|
|
25
|
+
The cache-check script works on Linux, Mac, and Windows-via-Git-Bash. It requires `bash`, `sha256sum` or `shasum`, `awk`, `grep`, `cat`, and `jq`. The `gh` CLI is optional (used only for GitHub label count verification). If any required tool is missing, the script exits non-zero and full validation runs as the fallback.
|
|
@@ -0,0 +1,127 @@
|
|
|
1
|
+
#!/usr/bin/env bash
|
|
2
|
+
# arn-spark-ensure-config cache-check.sh
|
|
3
|
+
#
|
|
4
|
+
# Returns exit 0 when the cache at .arness/arn-spark-ensure-config.local.json
|
|
5
|
+
# is valid (all fingerprints match current state). Returns non-zero with a
|
|
6
|
+
# stderr reason on miss.
|
|
7
|
+
#
|
|
8
|
+
# Cross-platform: works on Linux, Mac, and Windows-via-Git-Bash.
|
|
9
|
+
# Required tools: bash, sha256sum or shasum, awk, grep, mv, cat, jq.
|
|
10
|
+
# Optional tool: gh (only used for GitHub label count check).
|
|
11
|
+
|
|
12
|
+
set -u
|
|
13
|
+
|
|
14
|
+
PLUGIN_NAME="arn-spark"
|
|
15
|
+
SHORT_NAME="spark"
|
|
16
|
+
SCHEMA_VERSION=1
|
|
17
|
+
EXPECTED_LABELS_COUNT=7
|
|
18
|
+
CACHE_FILE=".arness/${PLUGIN_NAME}-ensure-config.local.json"
|
|
19
|
+
|
|
20
|
+
# --- Helper: fail fast with reason ---
|
|
21
|
+
miss() {
|
|
22
|
+
echo "cache miss: $1" >&2
|
|
23
|
+
exit 1
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
# --- Helper: cross-platform SHA-256 ---
|
|
27
|
+
hash_str() {
|
|
28
|
+
local data="$1"
|
|
29
|
+
if command -v sha256sum >/dev/null 2>&1; then
|
|
30
|
+
printf '%s' "$data" | sha256sum | awk '{print $1}'
|
|
31
|
+
elif command -v shasum >/dev/null 2>&1; then
|
|
32
|
+
printf '%s' "$data" | shasum -a 256 | awk '{print $1}'
|
|
33
|
+
else
|
|
34
|
+
echo "ERROR: neither sha256sum nor shasum available" >&2
|
|
35
|
+
exit 2
|
|
36
|
+
fi
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
hash_file() {
|
|
40
|
+
local path="$1"
|
|
41
|
+
if [ ! -f "$path" ]; then
|
|
42
|
+
echo "MISSING"
|
|
43
|
+
return 0
|
|
44
|
+
fi
|
|
45
|
+
if command -v sha256sum >/dev/null 2>&1; then
|
|
46
|
+
sha256sum "$path" | awk '{print $1}'
|
|
47
|
+
elif command -v shasum >/dev/null 2>&1; then
|
|
48
|
+
shasum -a 256 "$path" | awk '{print $1}'
|
|
49
|
+
else
|
|
50
|
+
echo "ERROR: neither sha256sum nor shasum available" >&2
|
|
51
|
+
exit 2
|
|
52
|
+
fi
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
# --- 1. Cache file present? ---
|
|
56
|
+
[ -f "$CACHE_FILE" ] || miss "no cache file at $CACHE_FILE"
|
|
57
|
+
|
|
58
|
+
# --- 2. jq available for parsing? ---
|
|
59
|
+
command -v jq >/dev/null 2>&1 || miss "jq not available (required for cache parsing)"
|
|
60
|
+
|
|
61
|
+
# --- 3. Schema version matches? ---
|
|
62
|
+
cached_schema=$(jq -r '.schemaVersion // empty' "$CACHE_FILE" 2>/dev/null)
|
|
63
|
+
[ "$cached_schema" = "$SCHEMA_VERSION" ] || miss "schemaVersion mismatch (cache: ${cached_schema:-empty}, expected: $SCHEMA_VERSION)"
|
|
64
|
+
|
|
65
|
+
# --- 4. Plugin version matches? ---
|
|
66
|
+
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
67
|
+
PLUGIN_ROOT="$(cd "$SCRIPT_DIR/../../.." && pwd)"
|
|
68
|
+
MARKETPLACE_JSON="[PLATFORM_PLUGIN_METADATA]/marketplace.json"
|
|
69
|
+
|
|
70
|
+
if [ -f "$MARKETPLACE_JSON" ]; then
|
|
71
|
+
current_version=$(jq -r --arg name "$PLUGIN_NAME" '.plugins[] | select(.name==$name) | .version' "$MARKETPLACE_JSON" 2>/dev/null)
|
|
72
|
+
cached_version=$(jq -r '.pluginVersion // empty' "$CACHE_FILE")
|
|
73
|
+
if [ -n "$current_version" ] && [ "$current_version" != "$cached_version" ]; then
|
|
74
|
+
miss "pluginVersion changed (cache: $cached_version, current: $current_version)"
|
|
75
|
+
fi
|
|
76
|
+
fi
|
|
77
|
+
|
|
78
|
+
# --- 5. Compute current fingerprints ---
|
|
79
|
+
if [ -f "arness.md" ]; then
|
|
80
|
+
arness_section=$(awk '/^## Arness$/{flag=1;next} /^## /{flag=0} flag' arness.md)
|
|
81
|
+
current_arness_section_hash=$(hash_str "$arness_section")
|
|
82
|
+
else
|
|
83
|
+
current_arness_section_hash="MISSING"
|
|
84
|
+
fi
|
|
85
|
+
|
|
86
|
+
current_agent_models_hash=$(hash_file ".arness/agent-models/${SHORT_NAME}.md")
|
|
87
|
+
current_agent_models_checksums_hash=$(hash_file ".arness/agent-models/.checksums.json")
|
|
88
|
+
current_templates_checksums_hash=$(hash_file ".arness/templates/.checksums.json")
|
|
89
|
+
current_profile_hash=$(hash_file ".arness/profile.yaml")
|
|
90
|
+
current_gitignore_hash=$(hash_file ".gitignore")
|
|
91
|
+
|
|
92
|
+
# --- 6. Compare against cache ---
|
|
93
|
+
# Use parallel indexed arrays (bash 3.2 compatible — Mac's default /bin/bash is 3.2,
|
|
94
|
+
# associative arrays via 'declare -A' are bash 4+ only).
|
|
95
|
+
KEYS=(claudeMdArnessSection agentModelsCodeMd agentModelsChecksums templatesChecksums profile gitignoreContent)
|
|
96
|
+
VALUES=(
|
|
97
|
+
"$current_arness_section_hash"
|
|
98
|
+
"$current_agent_models_hash"
|
|
99
|
+
"$current_agent_models_checksums_hash"
|
|
100
|
+
"$current_templates_checksums_hash"
|
|
101
|
+
"$current_profile_hash"
|
|
102
|
+
"$current_gitignore_hash"
|
|
103
|
+
)
|
|
104
|
+
|
|
105
|
+
i=0
|
|
106
|
+
while [ $i -lt ${#KEYS[@]} ]; do
|
|
107
|
+
key="${KEYS[$i]}"
|
|
108
|
+
current="${VALUES[$i]}"
|
|
109
|
+
cached=$(jq -r ".fingerprints.${key} // empty" "$CACHE_FILE")
|
|
110
|
+
if [ "$cached" != "$current" ]; then
|
|
111
|
+
miss "fingerprint $key changed (cache: ${cached:0:16}..., current: ${current:0:16}...)"
|
|
112
|
+
fi
|
|
113
|
+
i=$((i + 1))
|
|
114
|
+
done
|
|
115
|
+
|
|
116
|
+
# --- 7. GitHub labels check (only if Platform=github AND gh available) ---
|
|
117
|
+
if [ -f "arness.md" ]; then
|
|
118
|
+
platform=$(grep -E '^- \*\*Platform:\*\*' arness.md 2>/dev/null | head -1 | sed -E 's/.*Platform:\*\*\s*//')
|
|
119
|
+
if [ "$platform" = "github" ] && command -v gh >/dev/null 2>&1; then
|
|
120
|
+
label_count=$(gh label list --json name --jq '.[].name' 2>/dev/null | grep -c '^arness-' || echo 0)
|
|
121
|
+
if [ "$label_count" != "$EXPECTED_LABELS_COUNT" ]; then
|
|
122
|
+
miss "GitHub arness-* label count is $label_count, expected $EXPECTED_LABELS_COUNT"
|
|
123
|
+
fi
|
|
124
|
+
fi
|
|
125
|
+
fi
|
|
126
|
+
|
|
127
|
+
exit 0
|