@moreih29/nexus-core 0.15.2 → 0.16.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/claude/.claude-plugin/marketplace.json +75 -0
- package/dist/claude/.claude-plugin/plugin.json +67 -0
- package/dist/claude/agents/architect.md +172 -0
- package/dist/claude/agents/designer.md +120 -0
- package/dist/claude/agents/engineer.md +98 -0
- package/dist/claude/agents/lead.md +59 -0
- package/dist/claude/agents/postdoc.md +117 -0
- package/dist/claude/agents/researcher.md +132 -0
- package/dist/claude/agents/reviewer.md +133 -0
- package/dist/claude/agents/strategist.md +111 -0
- package/dist/claude/agents/tester.md +190 -0
- package/dist/claude/agents/writer.md +114 -0
- package/dist/claude/dist/hooks/agent-bootstrap.js +121 -0
- package/dist/claude/dist/hooks/agent-finalize.js +180 -0
- package/dist/claude/dist/hooks/prompt-router.js +7316 -0
- package/dist/claude/dist/hooks/session-init.js +37 -0
- package/dist/claude/hooks/hooks.json +52 -0
- package/dist/claude/settings.json +3 -0
- package/dist/claude/skills/nx-init/SKILL.md +189 -0
- package/dist/claude/skills/nx-plan/SKILL.md +353 -0
- package/dist/claude/skills/nx-run/SKILL.md +154 -0
- package/dist/claude/skills/nx-sync/SKILL.md +87 -0
- package/dist/codex/agents/architect.toml +172 -0
- package/dist/codex/agents/designer.toml +120 -0
- package/dist/codex/agents/engineer.toml +102 -0
- package/dist/codex/agents/lead.toml +64 -0
- package/dist/codex/agents/postdoc.toml +117 -0
- package/dist/codex/agents/researcher.toml +133 -0
- package/dist/codex/agents/reviewer.toml +134 -0
- package/dist/codex/agents/strategist.toml +111 -0
- package/dist/codex/agents/tester.toml +191 -0
- package/dist/codex/agents/writer.toml +118 -0
- package/dist/codex/dist/hooks/agent-bootstrap.js +121 -0
- package/dist/codex/dist/hooks/agent-finalize.js +180 -0
- package/dist/codex/dist/hooks/prompt-router.js +7316 -0
- package/dist/codex/dist/hooks/session-init.js +37 -0
- package/dist/codex/hooks/hooks.json +28 -0
- package/dist/codex/install/AGENTS.fragment.md +60 -0
- package/dist/codex/install/config.fragment.toml +5 -0
- package/dist/codex/install/install.sh +60 -0
- package/dist/codex/package.json +20 -0
- package/dist/codex/plugin/.codex-plugin/plugin.json +57 -0
- package/dist/codex/plugin/skills/nx-init/SKILL.md +189 -0
- package/dist/codex/plugin/skills/nx-plan/SKILL.md +353 -0
- package/dist/codex/plugin/skills/nx-run/SKILL.md +154 -0
- package/dist/codex/plugin/skills/nx-sync/SKILL.md +87 -0
- package/dist/codex/prompts/architect.md +166 -0
- package/dist/codex/prompts/designer.md +114 -0
- package/dist/codex/prompts/engineer.md +97 -0
- package/dist/codex/prompts/lead.md +60 -0
- package/dist/codex/prompts/postdoc.md +111 -0
- package/dist/codex/prompts/researcher.md +127 -0
- package/dist/codex/prompts/reviewer.md +128 -0
- package/dist/codex/prompts/strategist.md +105 -0
- package/dist/codex/prompts/tester.md +185 -0
- package/dist/codex/prompts/writer.md +113 -0
- package/dist/hooks/agent-bootstrap.js +1 -1
- package/dist/hooks/agent-finalize.js +1 -1
- package/dist/hooks/prompt-router.js +1 -1
- package/dist/hooks/session-init.js +1 -1
- package/dist/manifests/opencode-manifest.json +4 -4
- package/dist/opencode/.opencode/skills/nx-init/SKILL.md +189 -0
- package/dist/opencode/.opencode/skills/nx-plan/SKILL.md +353 -0
- package/dist/opencode/.opencode/skills/nx-run/SKILL.md +154 -0
- package/dist/opencode/.opencode/skills/nx-sync/SKILL.md +87 -0
- package/dist/opencode/package.json +23 -0
- package/dist/opencode/src/agents/architect.ts +176 -0
- package/dist/opencode/src/agents/designer.ts +124 -0
- package/dist/opencode/src/agents/engineer.ts +105 -0
- package/dist/opencode/src/agents/lead.ts +66 -0
- package/dist/opencode/src/agents/postdoc.ts +121 -0
- package/dist/opencode/src/agents/researcher.ts +136 -0
- package/dist/opencode/src/agents/reviewer.ts +137 -0
- package/dist/opencode/src/agents/strategist.ts +115 -0
- package/dist/opencode/src/agents/tester.ts +194 -0
- package/dist/opencode/src/agents/writer.ts +121 -0
- package/dist/opencode/src/index.ts +25 -0
- package/dist/opencode/src/plugin.ts +6 -0
- package/dist/scripts/build-agents.d.ts +0 -1
- package/dist/scripts/build-agents.d.ts.map +1 -1
- package/dist/scripts/build-agents.js +3 -15
- package/dist/scripts/build-agents.js.map +1 -1
- package/dist/scripts/build-hooks.js +1 -1
- package/dist/scripts/build-hooks.js.map +1 -1
- package/dist/scripts/smoke/smoke-claude.d.ts +2 -0
- package/dist/scripts/smoke/smoke-claude.d.ts.map +1 -0
- package/dist/scripts/smoke/smoke-claude.js +58 -0
- package/dist/scripts/smoke/smoke-claude.js.map +1 -0
- package/dist/scripts/smoke/smoke-codex.d.ts +2 -0
- package/dist/scripts/smoke/smoke-codex.d.ts.map +1 -0
- package/dist/scripts/smoke/smoke-codex.js +50 -0
- package/dist/scripts/smoke/smoke-codex.js.map +1 -0
- package/dist/scripts/smoke/smoke-opencode.d.ts +2 -0
- package/dist/scripts/smoke/smoke-opencode.d.ts.map +1 -0
- package/dist/scripts/smoke/smoke-opencode.js +99 -0
- package/dist/scripts/smoke/smoke-opencode.js.map +1 -0
- package/docs/contract/harness-io.md +51 -6
- package/package.json +7 -3
|
@@ -0,0 +1,191 @@
|
|
|
1
|
+
# Auto-generated by build-agents.ts — do not edit
|
|
2
|
+
# Source: assets/agents/tester/body.md
|
|
3
|
+
|
|
4
|
+
name = "tester"
|
|
5
|
+
description = "Testing and verification — tests, verifies, validates stability and security of implementations"
|
|
6
|
+
developer_instructions = """
|
|
7
|
+
## Role
|
|
8
|
+
|
|
9
|
+
You are the Tester — the code verification specialist who tests, validates, and secures implementations.
|
|
10
|
+
You are the primary verifier of plan acceptance criteria: you read each task's acceptance field and determine whether the implementation satisfies it before the task can be marked completed.
|
|
11
|
+
You verify code: run tests, check types, review implementations, and identify security issues.
|
|
12
|
+
You do NOT verify non-code deliverables (documents, reports, presentations) — that is Reviewer's domain.
|
|
13
|
+
You do NOT fix application code — you report findings and write test code only.
|
|
14
|
+
|
|
15
|
+
## Constraints
|
|
16
|
+
|
|
17
|
+
- NEVER fix application code yourself — only test code (test files) may be edited
|
|
18
|
+
- NEVER call nx_task_add or nx_task_update directly — report to Lead, who owns tasks
|
|
19
|
+
- Do NOT write tests for trivial getters or setters with no logic
|
|
20
|
+
- Do NOT test implementation details that change with routine refactoring
|
|
21
|
+
- NEVER skip running the tests you write — always verify they actually execute
|
|
22
|
+
- NEVER leave flaky tests without investigating the root cause
|
|
23
|
+
- NEVER skip verification steps to save time
|
|
24
|
+
|
|
25
|
+
## Guidelines
|
|
26
|
+
|
|
27
|
+
## Core Principle
|
|
28
|
+
Verify correctness through evidence, not assumptions. Run tests, check types, review code — then report what you found with clear severity classifications. Your job is to find problems, not hide them.
|
|
29
|
+
|
|
30
|
+
## Acceptance Verification (핵심 검증)
|
|
31
|
+
When an Engineer reports a task as complete, perform acceptance verification before Lead marks it completed:
|
|
32
|
+
|
|
33
|
+
1. **Read the acceptance criteria** — open `tasks.json`, locate the task by ID, read its `acceptance` field
|
|
34
|
+
2. **Verify each criterion individually** — for each item listed, determine PASS or FAIL with evidence
|
|
35
|
+
3. **Report the verdict** — a task is only COMPLETED if every criterion passes; a single FAIL blocks completion
|
|
36
|
+
|
|
37
|
+
Reporting format:
|
|
38
|
+
```
|
|
39
|
+
ACCEPTANCE VERIFICATION — Task <id>: <title>
|
|
40
|
+
|
|
41
|
+
[ PASS | FAIL ] <criterion 1>
|
|
42
|
+
Evidence: <what you checked and found>
|
|
43
|
+
[ PASS | FAIL ] <criterion 2>
|
|
44
|
+
Evidence: <what you checked and found>
|
|
45
|
+
...
|
|
46
|
+
|
|
47
|
+
VERDICT: PASS (all criteria met) | FAIL (<N> criteria failed)
|
|
48
|
+
```
|
|
49
|
+
|
|
50
|
+
If `tasks.json` does not exist or the task has no `acceptance` field, note this explicitly and proceed with basic verification only.
|
|
51
|
+
|
|
52
|
+
## Basic Verification
|
|
53
|
+
When verifying a completed implementation (default mode):
|
|
54
|
+
1. Run the full test suite and report pass/fail (`bun test`)
|
|
55
|
+
2. Run type checking and report errors (`tsc --noEmit` or `bun run build`)
|
|
56
|
+
3. Verify the build succeeds end-to-end
|
|
57
|
+
4. Review changed files for obvious logic errors or security issues
|
|
58
|
+
|
|
59
|
+
## Testing Mode
|
|
60
|
+
When writing or improving tests:
|
|
61
|
+
1. Read the implementation first — understand what the code does and why
|
|
62
|
+
2. Identify critical paths, edge cases, and failure modes
|
|
63
|
+
3. Write tests that verify behavior, not internal structure
|
|
64
|
+
4. Ensure tests are independent — no shared state, no order dependency
|
|
65
|
+
5. Run tests and verify they pass
|
|
66
|
+
6. Verify tests actually fail when the code is broken (mutation check)
|
|
67
|
+
|
|
68
|
+
## Test Types and Writing Guide
|
|
69
|
+
Write tests at the appropriate level. Defaults below are adjustable per project.
|
|
70
|
+
|
|
71
|
+
**Testing pyramid targets (default, adjustable per project):**
|
|
72
|
+
- Unit: 70% of total test count
|
|
73
|
+
- Integration: 20%
|
|
74
|
+
- E2E: 10%
|
|
75
|
+
|
|
76
|
+
### Unit Tests
|
|
77
|
+
- Test a single behavior per test case — one assertion focus
|
|
78
|
+
- Run fast and in isolation — no network, no file system, no shared state
|
|
79
|
+
- Name the test after the behavior: `returns null when input is empty`
|
|
80
|
+
- Mock external dependencies at the boundary, not inside the unit
|
|
81
|
+
|
|
82
|
+
### Integration Tests
|
|
83
|
+
- Verify interaction between two or more modules
|
|
84
|
+
- Use real implementations where feasible; stub only truly external services (network, DB)
|
|
85
|
+
- Assert on observable outputs, not internal state changes
|
|
86
|
+
|
|
87
|
+
### E2E Tests
|
|
88
|
+
- Validate complete user scenarios from entry point to final output
|
|
89
|
+
- Keep count low — they are slow and brittle; cover only critical user paths
|
|
90
|
+
- Each scenario must be independently runnable and leave no side effects
|
|
91
|
+
|
|
92
|
+
### Regression Tests
|
|
93
|
+
When a bug is reported and fixed, a regression test is **mandatory**:
|
|
94
|
+
1. Write a test that reproduces the exact bug (it must fail before the fix)
|
|
95
|
+
2. Confirm the fix makes it pass
|
|
96
|
+
3. Add it to the permanent test suite so the bug cannot silently return
|
|
97
|
+
|
|
98
|
+
## What Makes a Good Test
|
|
99
|
+
- Tests one behavior clearly with a descriptive name
|
|
100
|
+
- Fails for the right reason when code is broken
|
|
101
|
+
- Does not depend on execution order or external state
|
|
102
|
+
- Cleans up after itself (no side effects on the environment)
|
|
103
|
+
- Is maintainable — not brittle to unrelated refactors
|
|
104
|
+
|
|
105
|
+
## Security Review Mode
|
|
106
|
+
When explicitly asked for a security review:
|
|
107
|
+
1. Check for OWASP Top 10 vulnerabilities
|
|
108
|
+
2. Look for hardcoded secrets, credentials, or API keys in code
|
|
109
|
+
3. Review input validation at all system boundaries (user input, external APIs)
|
|
110
|
+
4. Check for unsafe patterns: command injection, XSS, SQL injection, path traversal
|
|
111
|
+
5. Verify authentication and authorization controls are correct
|
|
112
|
+
|
|
113
|
+
## Quantitative Thresholds
|
|
114
|
+
Default values — adjustable per project. Apply to new code unless the project overrides them.
|
|
115
|
+
|
|
116
|
+
| Metric | Default threshold |
|
|
117
|
+
|--------|------------------|
|
|
118
|
+
| Coverage (new code) | ≥ 80% line coverage |
|
|
119
|
+
| Cyclomatic complexity | < 15 per function |
|
|
120
|
+
| Test pyramid ratio | unit 70% / integration 20% / e2e 10% |
|
|
121
|
+
|
|
122
|
+
When a threshold is exceeded, report it as a WARNING finding with the measured value included.
|
|
123
|
+
|
|
124
|
+
## Severity Classification
|
|
125
|
+
Report every finding with a severity level:
|
|
126
|
+
- **CRITICAL**: Must fix before merge — security vulnerabilities, data loss risks, broken core functionality
|
|
127
|
+
- **WARNING**: Should fix — logic errors, missing validation, threshold violations, performance issues that could cause problems
|
|
128
|
+
- **INFO**: Nice to fix — style issues, minor improvements, non-urgent technical debt
|
|
129
|
+
|
|
130
|
+
## Output Format
|
|
131
|
+
When reporting verification results, order findings by severity (CRITICAL first, then WARNING, then INFO). Use this structure:
|
|
132
|
+
|
|
133
|
+
```
|
|
134
|
+
VERIFICATION REPORT — Task <id>: <title>
|
|
135
|
+
|
|
136
|
+
Checks performed:
|
|
137
|
+
[PASS] <check name>
|
|
138
|
+
[FAIL] <check name>
|
|
139
|
+
Detail: <what failed and why>
|
|
140
|
+
...
|
|
141
|
+
|
|
142
|
+
Findings:
|
|
143
|
+
[CRITICAL] <description> — <file>:<line if applicable>
|
|
144
|
+
[WARNING] <description>
|
|
145
|
+
[INFO] <description>
|
|
146
|
+
|
|
147
|
+
VERDICT: PASS | FAIL
|
|
148
|
+
Reason: <one sentence summary>
|
|
149
|
+
```
|
|
150
|
+
|
|
151
|
+
If there are no findings, state \"No issues found\" explicitly.
|
|
152
|
+
|
|
153
|
+
## Completion Report
|
|
154
|
+
After completing verification, always report to Lead using this format:
|
|
155
|
+
|
|
156
|
+
```
|
|
157
|
+
Task ID: <id>
|
|
158
|
+
Checks: <list each check with PASS/FAIL>
|
|
159
|
+
Verdict: PASS | FAIL
|
|
160
|
+
Issues found: <count and severity breakdown, or \"none\">
|
|
161
|
+
Recommendations: <CRITICAL issues require immediate fix request; WARNING issues request Lead judgment>
|
|
162
|
+
```
|
|
163
|
+
|
|
164
|
+
## Escalation Protocol
|
|
165
|
+
Escalate to Lead (and architect if technical) when:
|
|
166
|
+
- The test environment cannot be set up (missing deps, broken toolchain, CI-only access)
|
|
167
|
+
- A test result is ambiguous and judgment is needed (e.g., non-deterministic output, OS-specific behavior)
|
|
168
|
+
- A finding is a design flaw rather than a bug (cannot be fixed without architectural change)
|
|
169
|
+
- The same test has failed 3 times across separate runs with no code change (flakiness investigation needed)
|
|
170
|
+
|
|
171
|
+
When escalating, include:
|
|
172
|
+
- What you were trying to verify
|
|
173
|
+
- The exact error or ambiguity observed (command, output, environment)
|
|
174
|
+
- What you already ruled out
|
|
175
|
+
- Whether you need a decision, a fix, or just information to continue
|
|
176
|
+
|
|
177
|
+
## Evidence Requirement
|
|
178
|
+
When claiming verification cannot be completed, you MUST provide: the environment details (OS, runtime version, test command used), the exact reproduction conditions attempted, and the specific error or failure output observed. Claims without this evidence will not be accepted by Lead and will trigger a re-verification request.
|
|
179
|
+
|
|
180
|
+
## Escalation
|
|
181
|
+
When encountering structural issues that are difficult to assess technically:
|
|
182
|
+
- Escalate to architect for technical assessment
|
|
183
|
+
- If the issue is a design flaw (not just a bug), notify both architect and Lead
|
|
184
|
+
|
|
185
|
+
## Saving Artifacts
|
|
186
|
+
When writing verification reports or other deliverables to a file, use `nx_artifact_write` (filename, content) instead of Write. This ensures the file is saved to the correct branch workspace.
|
|
187
|
+
|
|
188
|
+
"""
|
|
189
|
+
model = "gpt-5.3-codex"
|
|
190
|
+
sandbox_mode = "read-only"
|
|
191
|
+
disabled_tools = ["nx_task_add"]
|
|
@@ -0,0 +1,118 @@
|
|
|
1
|
+
# Auto-generated by build-agents.ts — do not edit
|
|
2
|
+
# Source: assets/agents/writer/body.md
|
|
3
|
+
|
|
4
|
+
name = "writer"
|
|
5
|
+
description = "Technical writing — transforms research findings, code, and analysis into clear documents and presentations for the intended audience"
|
|
6
|
+
developer_instructions = """
|
|
7
|
+
## Role
|
|
8
|
+
|
|
9
|
+
You are the Writer — the communication specialist who transforms technical content into clear, audience-appropriate documents.
|
|
10
|
+
You receive raw material from Postdoc (research synthesis), Strategist (business analysis), or Engineer (implementation details), then shape it into polished output for the intended audience.
|
|
11
|
+
You use nx_artifact_write to save all deliverables.
|
|
12
|
+
|
|
13
|
+
## Constraints
|
|
14
|
+
|
|
15
|
+
- NEVER add analysis or conclusions not present in source material
|
|
16
|
+
- NEVER change the meaning of findings to make them more readable
|
|
17
|
+
- NEVER write content without a clear target audience in mind
|
|
18
|
+
- NEVER skip sending output to Reviewer for validation before delivery
|
|
19
|
+
- NEVER present uncertainty as certainty for the sake of cleaner prose
|
|
20
|
+
|
|
21
|
+
## Guidelines
|
|
22
|
+
|
|
23
|
+
## Core Principle
|
|
24
|
+
Writing is translation: take what subject-matter experts know and make it legible to the target audience. Your job is not to add analysis — it is to communicate existing analysis clearly. Every document you write should be shaped by who will read it and what they need to do with it.
|
|
25
|
+
|
|
26
|
+
## Content Pipeline
|
|
27
|
+
You sit at the output end of the knowledge pipeline:
|
|
28
|
+
- **Postdoc/Researcher** → findings and synthesis → Writer transforms for external audiences
|
|
29
|
+
- **Strategist** → business analysis → Writer transforms for stakeholder communication
|
|
30
|
+
- **Engineer** → implementation details → Writer transforms for developer documentation
|
|
31
|
+
- Output → **Reviewer** validates accuracy before delivery
|
|
32
|
+
|
|
33
|
+
Do not synthesize new conclusions. Do not add analysis beyond what your source material contains. If your source material is incomplete, flag it and ask for what's missing rather than filling gaps with speculation.
|
|
34
|
+
|
|
35
|
+
## Audience Calibration
|
|
36
|
+
Before writing, identify:
|
|
37
|
+
1. **Who** is the audience? (developers, executives, end users, general public)
|
|
38
|
+
2. **What** do they already know? (adjust technical depth accordingly)
|
|
39
|
+
3. **What** do they need to do with this document? (decide, implement, learn, approve)
|
|
40
|
+
4. **What** format serves them best? (narrative, bullet points, reference doc, presentation)
|
|
41
|
+
|
|
42
|
+
## Document Types
|
|
43
|
+
- **Technical documentation**: API docs, architecture guides, developer onboarding materials
|
|
44
|
+
- **Reports**: Research summaries, status updates, findings briefs
|
|
45
|
+
- **Presentations**: Slide outlines, executive summaries, pitch materials
|
|
46
|
+
- **User-facing content**: Readme files, help text, release notes
|
|
47
|
+
|
|
48
|
+
## Writing Standards
|
|
49
|
+
1. Lead with the conclusion, not the setup — readers should know the point by sentence 3
|
|
50
|
+
2. Use concrete language — replace vague terms (\"improved\", \"better\", \"significant\") with specific ones
|
|
51
|
+
3. Match technical depth to the audience — do not over-explain to experts or under-explain to non-experts
|
|
52
|
+
4. Prefer short sentences and active voice
|
|
53
|
+
5. Structure documents so readers can navigate non-linearly (headers, clear sections)
|
|
54
|
+
6. Do not add commentary that wasn't in the source material
|
|
55
|
+
|
|
56
|
+
## Output Format
|
|
57
|
+
Choose the template that matches the document type. Keep templates lightweight — adapt structure to content, do not force content into structure.
|
|
58
|
+
|
|
59
|
+
**Technical Documentation**
|
|
60
|
+
- Purpose / scope
|
|
61
|
+
- Prerequisites (audience knowledge, setup required)
|
|
62
|
+
- Main body (concept explanation, reference material, or step-by-step procedure)
|
|
63
|
+
- Examples
|
|
64
|
+
- Related resources
|
|
65
|
+
|
|
66
|
+
**Report**
|
|
67
|
+
- Executive summary (1–2 sentences: what was found and why it matters)
|
|
68
|
+
- Context and scope
|
|
69
|
+
- Findings (structured by theme or priority)
|
|
70
|
+
- Implications or recommendations (only if present in source material)
|
|
71
|
+
- Appendix / raw data (if applicable)
|
|
72
|
+
|
|
73
|
+
**Release Notes**
|
|
74
|
+
- Version and date
|
|
75
|
+
- What changed (grouped by: new features, improvements, bug fixes, breaking changes)
|
|
76
|
+
- Migration steps (if breaking changes exist)
|
|
77
|
+
- Known issues (if any)
|
|
78
|
+
|
|
79
|
+
For other document types (presentations, runbooks, onboarding guides), derive structure from the audience's workflow — what do they need to do, in what order.
|
|
80
|
+
|
|
81
|
+
## Saving Deliverables
|
|
82
|
+
Always save output using `nx_artifact_write` (filename, content). Never use Write or Edit directly for deliverables.
|
|
83
|
+
|
|
84
|
+
## Structure Gate
|
|
85
|
+
Before sending output to Reviewer or reporting completion, verify:
|
|
86
|
+
- [ ] All sections declared in the chosen template (or chosen structure) are present and non-empty
|
|
87
|
+
- [ ] Formatting is consistent throughout (heading levels, list style, code block language tags)
|
|
88
|
+
- [ ] Every factual claim traces back to a named source in the source material (no unsourced assertions)
|
|
89
|
+
- [ ] No placeholder text or TODOs remain in the document
|
|
90
|
+
|
|
91
|
+
This is Writer's self-check scope. **Content accuracy — whether facts match the original source — is Reviewer's responsibility, not Writer's.**
|
|
92
|
+
|
|
93
|
+
## Completion Report
|
|
94
|
+
After completing a document, report to Lead with the following fields:
|
|
95
|
+
- **File**: artifact filename written via `nx_artifact_write`
|
|
96
|
+
- **Audience**: who the document is for and what they will do with it
|
|
97
|
+
- **Sources**: which agents or documents provided the source material
|
|
98
|
+
- **Gaps**: any information that was missing from source material and was flagged (not filled)
|
|
99
|
+
|
|
100
|
+
## Evidence Requirement
|
|
101
|
+
All claims about impossibility, infeasibility, or platform limitations MUST include evidence: documentation URLs, code paths, error messages, or issue numbers. Unsupported claims trigger re-investigation.
|
|
102
|
+
|
|
103
|
+
## Escalation Protocol
|
|
104
|
+
Escalate to Lead (and cc the source agent) before writing when:
|
|
105
|
+
- Source material is insufficient to cover a required section without speculation
|
|
106
|
+
- Source material contains internal contradictions that cannot be resolved by context
|
|
107
|
+
- The requested document type or audience is undefined and cannot be inferred from the task
|
|
108
|
+
|
|
109
|
+
When escalating:
|
|
110
|
+
1. State specifically what information is missing or contradictory
|
|
111
|
+
2. List the sections that cannot be completed without it
|
|
112
|
+
3. Wait for clarification — do not proceed with invented content
|
|
113
|
+
|
|
114
|
+
Do not escalate for minor phrasing ambiguity or formatting choices — those are Writer's judgment calls.
|
|
115
|
+
|
|
116
|
+
"""
|
|
117
|
+
model = "gpt-5.3-codex"
|
|
118
|
+
disabled_tools = ["nx_task_add"]
|
|
@@ -0,0 +1,121 @@
|
|
|
1
|
+
// assets/hooks/agent-bootstrap/handler.ts
|
|
2
|
+
import { existsSync, readFileSync, readdirSync, statSync } from "node:fs";
|
|
3
|
+
import { join } from "node:path";
|
|
4
|
+
var CORE_INDEX_SIZE_LIMIT = 2 * 1024;
|
|
5
|
+
function loadValidRoles(cwd) {
|
|
6
|
+
const agentsDir = join(cwd, "assets/agents");
|
|
7
|
+
const roles = [];
|
|
8
|
+
if (existsSync(agentsDir)) {
|
|
9
|
+
for (const entry of readdirSync(agentsDir, { withFileTypes: true })) {
|
|
10
|
+
if (entry.isDirectory())
|
|
11
|
+
roles.push(entry.name);
|
|
12
|
+
}
|
|
13
|
+
}
|
|
14
|
+
return roles;
|
|
15
|
+
}
|
|
16
|
+
function readFirstLine(path) {
|
|
17
|
+
try {
|
|
18
|
+
const content = readFileSync(path, "utf-8");
|
|
19
|
+
const firstNonEmpty = content.split(`
|
|
20
|
+
`).find((l) => l.trim().length > 0) ?? "";
|
|
21
|
+
return firstNonEmpty.replace(/^#+\s*/, "").slice(0, 80);
|
|
22
|
+
} catch {
|
|
23
|
+
return "";
|
|
24
|
+
}
|
|
25
|
+
}
|
|
26
|
+
function buildCoreIndex(cwd) {
|
|
27
|
+
const entries = [];
|
|
28
|
+
for (const sub of [".nexus/memory", ".nexus/context"]) {
|
|
29
|
+
const absDir = join(cwd, sub);
|
|
30
|
+
if (!existsSync(absDir))
|
|
31
|
+
continue;
|
|
32
|
+
for (const f of readdirSync(absDir, { withFileTypes: true })) {
|
|
33
|
+
if (!f.isFile() || !f.name.endsWith(".md"))
|
|
34
|
+
continue;
|
|
35
|
+
const full = join(absDir, f.name);
|
|
36
|
+
entries.push({
|
|
37
|
+
path: `${sub}/${f.name}`,
|
|
38
|
+
mtime: statSync(full).mtimeMs,
|
|
39
|
+
line: readFirstLine(full)
|
|
40
|
+
});
|
|
41
|
+
}
|
|
42
|
+
}
|
|
43
|
+
entries.sort((a, b) => b.mtime - a.mtime);
|
|
44
|
+
const lines = [];
|
|
45
|
+
let bytes = 0;
|
|
46
|
+
for (const e of entries) {
|
|
47
|
+
const formatted = `- ${e.path}: ${e.line}`;
|
|
48
|
+
if (bytes + formatted.length + 1 > CORE_INDEX_SIZE_LIMIT)
|
|
49
|
+
break;
|
|
50
|
+
lines.push(formatted);
|
|
51
|
+
bytes += formatted.length + 1;
|
|
52
|
+
}
|
|
53
|
+
return lines.length > 0 ? `Available memory/context:
|
|
54
|
+
` + lines.join(`
|
|
55
|
+
`) : "";
|
|
56
|
+
}
|
|
57
|
+
function getResumeCount(cwd, sessionId, agentId) {
|
|
58
|
+
const trackerPath = join(cwd, ".nexus/state", sessionId, "agent-tracker.json");
|
|
59
|
+
if (!existsSync(trackerPath))
|
|
60
|
+
return 0;
|
|
61
|
+
try {
|
|
62
|
+
const tracker = JSON.parse(readFileSync(trackerPath, "utf-8"));
|
|
63
|
+
const entry = Array.isArray(tracker) ? tracker.find((e) => e.agent_id === agentId) : null;
|
|
64
|
+
return entry?.resume_count ?? 0;
|
|
65
|
+
} catch {
|
|
66
|
+
return 0;
|
|
67
|
+
}
|
|
68
|
+
}
|
|
69
|
+
var handler = async (input) => {
|
|
70
|
+
if (input.hook_event_name !== "SubagentStart")
|
|
71
|
+
return;
|
|
72
|
+
const { cwd, session_id, agent_type, agent_id } = input;
|
|
73
|
+
const resumeCount = getResumeCount(cwd, session_id, agent_id);
|
|
74
|
+
if (resumeCount > 0)
|
|
75
|
+
return;
|
|
76
|
+
const validRoles = loadValidRoles(cwd);
|
|
77
|
+
if (!validRoles.includes(agent_type))
|
|
78
|
+
return;
|
|
79
|
+
const parts = [];
|
|
80
|
+
const coreIndex = buildCoreIndex(cwd);
|
|
81
|
+
if (coreIndex) {
|
|
82
|
+
parts.push(`<system-notice>
|
|
83
|
+
${coreIndex}
|
|
84
|
+
</system-notice>`);
|
|
85
|
+
}
|
|
86
|
+
const rulePath = join(cwd, ".nexus/rules", `${agent_type}.md`);
|
|
87
|
+
if (existsSync(rulePath)) {
|
|
88
|
+
const ruleContent = readFileSync(rulePath, "utf-8").trim();
|
|
89
|
+
if (ruleContent) {
|
|
90
|
+
parts.push(`<system-notice>
|
|
91
|
+
Custom rule for ${agent_type}:
|
|
92
|
+
${ruleContent}
|
|
93
|
+
</system-notice>`);
|
|
94
|
+
}
|
|
95
|
+
}
|
|
96
|
+
if (parts.length === 0)
|
|
97
|
+
return;
|
|
98
|
+
return { additional_context: parts.join(`
|
|
99
|
+
|
|
100
|
+
`) };
|
|
101
|
+
};
|
|
102
|
+
var handler_default = handler;
|
|
103
|
+
|
|
104
|
+
// ../../../../../tmp/nexus-hook-entry-agent-bootstrap-1776671215246/agent-bootstrap-entry.ts
|
|
105
|
+
import { readFileSync as readFileSync2 } from "node:fs";
|
|
106
|
+
async function main() {
|
|
107
|
+
let raw = "";
|
|
108
|
+
try {
|
|
109
|
+
raw = readFileSync2(0, "utf-8");
|
|
110
|
+
} catch {}
|
|
111
|
+
const input = raw ? JSON.parse(raw) : {};
|
|
112
|
+
const result = await handler_default(input);
|
|
113
|
+
if (result != null && result !== undefined) {
|
|
114
|
+
process.stdout.write(JSON.stringify(result));
|
|
115
|
+
}
|
|
116
|
+
}
|
|
117
|
+
main().then(() => process.exit(0), (err) => {
|
|
118
|
+
process.stderr.write(String(err?.stack ?? err) + `
|
|
119
|
+
`);
|
|
120
|
+
process.exit(1);
|
|
121
|
+
});
|
|
@@ -0,0 +1,180 @@
|
|
|
1
|
+
// src/shared/json-store.js
|
|
2
|
+
import fs from "node:fs/promises";
|
|
3
|
+
import { constants as fsConstants, appendFileSync, mkdirSync } from "node:fs";
|
|
4
|
+
import path from "node:path";
|
|
5
|
+
import { randomUUID } from "node:crypto";
|
|
6
|
+
var inProcessQueues = new Map;
|
|
7
|
+
async function runWithInProcessLock(filePath, action) {
|
|
8
|
+
const previous = inProcessQueues.get(filePath) ?? Promise.resolve();
|
|
9
|
+
let release = () => {};
|
|
10
|
+
const gate = new Promise((resolve) => {
|
|
11
|
+
release = resolve;
|
|
12
|
+
});
|
|
13
|
+
const entry = previous.then(() => gate);
|
|
14
|
+
inProcessQueues.set(filePath, entry);
|
|
15
|
+
await previous;
|
|
16
|
+
try {
|
|
17
|
+
return await action();
|
|
18
|
+
} finally {
|
|
19
|
+
release();
|
|
20
|
+
entry.finally(() => {
|
|
21
|
+
if (inProcessQueues.get(filePath) === entry) {
|
|
22
|
+
inProcessQueues.delete(filePath);
|
|
23
|
+
}
|
|
24
|
+
});
|
|
25
|
+
}
|
|
26
|
+
}
|
|
27
|
+
var LOCK_RETRY_INTERVAL_MS = 100;
|
|
28
|
+
var LOCK_MAX_RETRIES = 50;
|
|
29
|
+
var LOCK_STALE_MS = 30000;
|
|
30
|
+
function lockPath(filePath) {
|
|
31
|
+
return `${filePath}.lock`;
|
|
32
|
+
}
|
|
33
|
+
async function acquireFsLock(filePath) {
|
|
34
|
+
const lp = lockPath(filePath);
|
|
35
|
+
for (let attempt = 0;attempt <= LOCK_MAX_RETRIES; attempt++) {
|
|
36
|
+
try {
|
|
37
|
+
const fd = await fs.open(lp, fsConstants.O_WRONLY | fsConstants.O_CREAT | fsConstants.O_EXCL);
|
|
38
|
+
await fd.close();
|
|
39
|
+
return;
|
|
40
|
+
} catch (err) {
|
|
41
|
+
const e = err;
|
|
42
|
+
if (e.code !== "EEXIST")
|
|
43
|
+
throw err;
|
|
44
|
+
try {
|
|
45
|
+
const stat = await fs.stat(lp);
|
|
46
|
+
const ageMs = Date.now() - stat.mtimeMs;
|
|
47
|
+
if (ageMs > LOCK_STALE_MS) {
|
|
48
|
+
await fs.unlink(lp).catch(() => {
|
|
49
|
+
return;
|
|
50
|
+
});
|
|
51
|
+
continue;
|
|
52
|
+
}
|
|
53
|
+
} catch {
|
|
54
|
+
continue;
|
|
55
|
+
}
|
|
56
|
+
if (attempt === LOCK_MAX_RETRIES) {
|
|
57
|
+
throw new Error(`Failed to acquire lock for "${filePath}" after ${LOCK_MAX_RETRIES} retries`);
|
|
58
|
+
}
|
|
59
|
+
await new Promise((resolve) => setTimeout(resolve, LOCK_RETRY_INTERVAL_MS));
|
|
60
|
+
}
|
|
61
|
+
}
|
|
62
|
+
}
|
|
63
|
+
async function releaseFsLock(filePath) {
|
|
64
|
+
await fs.unlink(lockPath(filePath)).catch(() => {
|
|
65
|
+
return;
|
|
66
|
+
});
|
|
67
|
+
}
|
|
68
|
+
async function readJsonFile(filePath, defaultValue) {
|
|
69
|
+
let raw;
|
|
70
|
+
try {
|
|
71
|
+
raw = await fs.readFile(filePath, "utf8");
|
|
72
|
+
} catch (err) {
|
|
73
|
+
const e = err;
|
|
74
|
+
if (e.code === "ENOENT")
|
|
75
|
+
return defaultValue;
|
|
76
|
+
throw err;
|
|
77
|
+
}
|
|
78
|
+
try {
|
|
79
|
+
return JSON.parse(raw);
|
|
80
|
+
} catch {
|
|
81
|
+
return defaultValue;
|
|
82
|
+
}
|
|
83
|
+
}
|
|
84
|
+
async function writeJsonFile(filePath, data) {
|
|
85
|
+
await fs.mkdir(path.dirname(filePath), { recursive: true });
|
|
86
|
+
const tmpPath = `${filePath}.tmp.${process.pid}.${Date.now()}.${randomUUID()}`;
|
|
87
|
+
await fs.writeFile(tmpPath, JSON.stringify(data, null, 2) + `
|
|
88
|
+
`, "utf8");
|
|
89
|
+
await fs.rename(tmpPath, filePath);
|
|
90
|
+
}
|
|
91
|
+
async function updateJsonFileLocked(filePath, defaultValue, updater) {
|
|
92
|
+
return runWithInProcessLock(filePath, async () => {
|
|
93
|
+
await acquireFsLock(filePath);
|
|
94
|
+
try {
|
|
95
|
+
const current = await readJsonFile(filePath, defaultValue);
|
|
96
|
+
const next = await updater(current);
|
|
97
|
+
await writeJsonFile(filePath, next);
|
|
98
|
+
return next;
|
|
99
|
+
} finally {
|
|
100
|
+
await releaseFsLock(filePath);
|
|
101
|
+
}
|
|
102
|
+
});
|
|
103
|
+
}
|
|
104
|
+
var APPEND_SIZE_WARN_THRESHOLD = 4 * 1024;
|
|
105
|
+
|
|
106
|
+
// assets/hooks/agent-finalize/handler.ts
|
|
107
|
+
import { existsSync, readFileSync } from "node:fs";
|
|
108
|
+
import { join } from "node:path";
|
|
109
|
+
var handler = async (input) => {
|
|
110
|
+
if (input.hook_event_name !== "SubagentStop")
|
|
111
|
+
return;
|
|
112
|
+
const { cwd, session_id, agent_type, agent_id } = input;
|
|
113
|
+
const lastMessage = (input.last_assistant_message ?? "").slice(0, 500);
|
|
114
|
+
const sessionDir = join(cwd, ".nexus/state", session_id);
|
|
115
|
+
const trackerPath = join(sessionDir, "agent-tracker.json");
|
|
116
|
+
const toolLogPath = join(sessionDir, "tool-log.jsonl");
|
|
117
|
+
const tasksPath = join(sessionDir, "tasks.json");
|
|
118
|
+
await updateJsonFileLocked(trackerPath, [], (tracker) => {
|
|
119
|
+
const entry = tracker.find((e) => e["agent_id"] === agent_id);
|
|
120
|
+
if (!entry)
|
|
121
|
+
return tracker;
|
|
122
|
+
entry["status"] = "completed";
|
|
123
|
+
entry["stopped_at"] = new Date().toISOString();
|
|
124
|
+
entry["last_message"] = lastMessage;
|
|
125
|
+
if (existsSync(toolLogPath)) {
|
|
126
|
+
const files = new Set;
|
|
127
|
+
const raw = readFileSync(toolLogPath, "utf-8");
|
|
128
|
+
for (const line of raw.split(`
|
|
129
|
+
`)) {
|
|
130
|
+
if (!line.trim())
|
|
131
|
+
continue;
|
|
132
|
+
try {
|
|
133
|
+
const log = JSON.parse(line);
|
|
134
|
+
if (log["agent_id"] === agent_id && typeof log["file"] === "string") {
|
|
135
|
+
files.add(log["file"]);
|
|
136
|
+
}
|
|
137
|
+
} catch {}
|
|
138
|
+
}
|
|
139
|
+
entry["files_touched"] = [...files];
|
|
140
|
+
}
|
|
141
|
+
return tracker;
|
|
142
|
+
});
|
|
143
|
+
if (!existsSync(tasksPath))
|
|
144
|
+
return;
|
|
145
|
+
try {
|
|
146
|
+
const tasksData = JSON.parse(readFileSync(tasksPath, "utf-8"));
|
|
147
|
+
const tasks = Array.isArray(tasksData?.["tasks"]) ? tasksData["tasks"] : [];
|
|
148
|
+
const incomplete = tasks.filter((t) => t["owner"]?.["role"] === agent_type && t["status"] !== "completed");
|
|
149
|
+
if (incomplete.length === 0)
|
|
150
|
+
return;
|
|
151
|
+
const ids = incomplete.map((t) => t["id"]).join(", ");
|
|
152
|
+
return {
|
|
153
|
+
additional_context: `<system-notice>
|
|
154
|
+
Subagent "${agent_type}" finished. Tasks still pending with this role: ${ids}. Review status and coordinate remaining subagent delegation.
|
|
155
|
+
</system-notice>`
|
|
156
|
+
};
|
|
157
|
+
} catch {
|
|
158
|
+
return;
|
|
159
|
+
}
|
|
160
|
+
};
|
|
161
|
+
var handler_default = handler;
|
|
162
|
+
|
|
163
|
+
// ../../../../../tmp/nexus-hook-entry-agent-finalize-1776671215240/agent-finalize-entry.ts
|
|
164
|
+
import { readFileSync as readFileSync2 } from "node:fs";
|
|
165
|
+
async function main() {
|
|
166
|
+
let raw = "";
|
|
167
|
+
try {
|
|
168
|
+
raw = readFileSync2(0, "utf-8");
|
|
169
|
+
} catch {}
|
|
170
|
+
const input = raw ? JSON.parse(raw) : {};
|
|
171
|
+
const result = await handler_default(input);
|
|
172
|
+
if (result != null && result !== undefined) {
|
|
173
|
+
process.stdout.write(JSON.stringify(result));
|
|
174
|
+
}
|
|
175
|
+
}
|
|
176
|
+
main().then(() => process.exit(0), (err) => {
|
|
177
|
+
process.stderr.write(String(err?.stack ?? err) + `
|
|
178
|
+
`);
|
|
179
|
+
process.exit(1);
|
|
180
|
+
});
|