@oxgeneral/orch 1.0.7 → 1.0.9
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/App-Q6LOPAZT.js +22 -0
- package/dist/{agent-Q34L27AY.js → agent-SI4JF5MV.js} +1 -1
- package/dist/{agent-shop-D2RS4BZK.js → agent-shop-JHDTCWCD.js} +1 -1
- package/dist/chunk-3AXNSYCM.js +2 -0
- package/dist/{chunk-4TDXD3LA.js → chunk-5YSW77VI.js} +104 -21
- package/dist/chunk-5YSW77VI.js.map +1 -0
- package/dist/{chunk-BCPUTULS.js → chunk-HWEMBO36.js} +83 -54
- package/dist/chunk-J7ITYXE6.js +116 -0
- package/dist/chunk-J7ITYXE6.js.map +1 -0
- package/dist/chunk-U2JVMD2G.js +66 -0
- package/dist/chunk-U2JVMD2G.js.map +1 -0
- package/dist/{chunk-EH3HRQP4.js → chunk-W3J7CURM.js} +8 -116
- package/dist/chunk-W3J7CURM.js.map +1 -0
- package/dist/{chunk-UMZEA3JT.js → chunk-XLBV2PFL.js} +1 -1
- package/dist/chunk-ZMLF5HI5.js +11 -0
- package/dist/cli.js +1 -1
- package/dist/container-LV3WOPMS.js +4 -0
- package/dist/doctor-Q3GHJNZL.js +2 -0
- package/dist/index.d.ts +44 -1
- package/dist/index.js +12 -5
- package/dist/index.js.map +1 -1
- package/dist/init-D4356W7G.js +73 -0
- package/dist/orchestrator-PSXVHP2L.js +17 -0
- package/dist/orchestrator-WLWIAFXH.js +6 -0
- package/dist/{orchestrator-XPEMMBOO.js.map → orchestrator-WLWIAFXH.js.map} +1 -1
- package/dist/{org-WAK3CDPG.js → org-KLYK6MMJ.js} +1 -1
- package/dist/serve-4RT4HERL.js +3 -0
- package/dist/skill-loader-IGRIELEM.js +9 -0
- package/dist/skill-loader-RHCFIK74.js +4 -0
- package/dist/skill-loader-RHCFIK74.js.map +1 -0
- package/dist/{task-QFLIIRKZ.js → task-6Z5P7ODZ.js} +1 -1
- package/dist/tui-GH3Z5CO4.js +2 -0
- package/dist/{update-FFKCOV63.js → update-XGJZFV4H.js} +1 -1
- package/dist/{update-check-HGMBDYHL.js → update-check-CZJC7VW6.js} +1 -1
- package/dist/{workspace-manager-5EYCMAEO.js → workspace-manager-RH24FSNT.js} +4 -3
- package/dist/workspace-manager-RH24FSNT.js.map +1 -0
- package/dist/workspace-manager-VJ4FN5PJ.js +3 -0
- package/package.json +1 -1
- package/readme.md +2 -2
- package/skills/library/autoplan.md +315 -0
- package/skills/library/benchmark.md +242 -0
- package/skills/library/browse.md +266 -0
- package/skills/library/canary.md +248 -0
- package/skills/library/careful.md +42 -0
- package/skills/library/codex.md +431 -0
- package/skills/library/design-consultation.md +367 -0
- package/skills/library/design-review.md +744 -0
- package/skills/library/document-release.md +365 -0
- package/skills/library/freeze.md +60 -0
- package/skills/library/guard.md +55 -0
- package/skills/library/investigate.md +171 -0
- package/skills/library/land-and-deploy.md +636 -0
- package/skills/library/office-hours.md +746 -0
- package/skills/library/plan-ceo-review.md +1029 -0
- package/skills/library/plan-design-review.md +428 -0
- package/skills/library/plan-eng-review.md +420 -0
- package/skills/library/qa-only.md +388 -0
- package/skills/library/qa.md +766 -0
- package/skills/library/retro.md +532 -0
- package/skills/library/review.md +421 -0
- package/skills/library/setup-browser-cookies.md +86 -0
- package/skills/library/setup-deploy.md +211 -0
- package/skills/library/ship.md +1018 -0
- package/skills/library/unfreeze.md +31 -0
- package/skills/library/upgrade.md +220 -0
- package/skills/orch/SKILL.md +138 -0
- package/dist/App-LEVUTWQN.js +0 -22
- package/dist/chunk-4TDXD3LA.js.map +0 -1
- package/dist/chunk-EH3HRQP4.js.map +0 -1
- package/dist/chunk-WVJTXBPL.js +0 -11
- package/dist/container-FXUUV6PP.js +0 -4
- package/dist/doctor-P2J6VAUX.js +0 -2
- package/dist/init-PTAYCSMO.js +0 -53
- package/dist/orchestrator-JOTMB5XT.js +0 -13
- package/dist/orchestrator-XPEMMBOO.js +0 -6
- package/dist/serve-5OAANN6J.js +0 -3
- package/dist/tui-BJHZBCIR.js +0 -2
- package/dist/workspace-manager-5EYCMAEO.js.map +0 -1
- package/dist/workspace-manager-XKOZ5WM6.js +0 -3
|
@@ -4,11 +4,11 @@ var n=`Backend engineer \u2014 builds APIs, services, database layers, and serve
|
|
|
4
4
|
## WORKFLOW
|
|
5
5
|
|
|
6
6
|
1) READ the task description and identify the scope: new endpoint, service refactor, DB migration, etc.
|
|
7
|
-
2) EXPLORE the existing codebase
|
|
7
|
+
2) EXPLORE the existing codebase to understand project structure, conventions, and dependencies.
|
|
8
8
|
3) DESIGN the solution \u2014 define data models, API contracts, and error handling strategy. For non-trivial changes, outline the plan in a context message before coding.
|
|
9
|
-
4) IMPLEMENT
|
|
9
|
+
4) IMPLEMENT \u2014 write production code following the project's patterns (naming, folder structure, error classes).
|
|
10
10
|
5) WRITE TESTS \u2014 add unit tests for new logic; ensure edge cases and error paths are covered.
|
|
11
|
-
6) SELF-REVIEW \u2014
|
|
11
|
+
6) SELF-REVIEW \u2014 use the review skill methodology to check your own diff for security issues, N+1 queries, and missing validation.
|
|
12
12
|
7) MARK DONE \u2014 commit to your worktree branch and transition the task to review.
|
|
13
13
|
|
|
14
14
|
## RULES
|
|
@@ -23,12 +23,12 @@ var n=`Backend engineer \u2014 builds APIs, services, database layers, and serve
|
|
|
23
23
|
## WORKFLOW
|
|
24
24
|
|
|
25
25
|
1) READ the task and identify the deliverable: new component, page, style fix, responsive layout, etc.
|
|
26
|
-
2) EXPLORE the component tree and design system
|
|
26
|
+
2) EXPLORE the component tree and design system to find reusable primitives and naming conventions.
|
|
27
27
|
3) PLAN the component hierarchy \u2014 props interface, state management, and data flow.
|
|
28
|
-
4) IMPLEMENT
|
|
28
|
+
4) IMPLEMENT \u2014 write components with proper TypeScript types, accessibility attributes, and responsive styles.
|
|
29
29
|
5) STYLE \u2014 use the project's CSS approach (modules, Tailwind, styled-components) consistently. Check mobile, tablet, desktop breakpoints.
|
|
30
30
|
6) TEST \u2014 add component tests for rendering, user interactions, and edge states (loading, empty, error).
|
|
31
|
-
7) REVIEW
|
|
31
|
+
7) SELF-REVIEW \u2014 use the design-review skill to check accessibility, responsiveness, and visual consistency, then transition to review.
|
|
32
32
|
|
|
33
33
|
## RULES
|
|
34
34
|
|
|
@@ -39,14 +39,17 @@ var n=`Backend engineer \u2014 builds APIs, services, database layers, and serve
|
|
|
39
39
|
- Never hardcode colors or spacing \u2014 use design tokens / theme variables.
|
|
40
40
|
- Ensure keyboard navigation and ARIA labels for interactive elements.`,i=`QA engineer \u2014 writes tests, analyzes coverage, and ensures code quality across the project.
|
|
41
41
|
|
|
42
|
+
Uses the \`qa\` library skill for full QA methodology including browser testing, health scoring, bug triage, and fix loops. For report-only mode without auto-fixes, add \`qa-only\` skill instead.
|
|
43
|
+
|
|
42
44
|
## WORKFLOW
|
|
43
45
|
|
|
44
46
|
1) READ the task \u2014 determine what needs testing: new feature, regression, coverage gap, flaky test.
|
|
45
|
-
2) ANALYZE existing coverage
|
|
47
|
+
2) ANALYZE existing coverage to identify untested paths and weak spots.
|
|
46
48
|
3) PLAN the test matrix \u2014 list scenarios, edge cases, error paths, and boundary values.
|
|
47
|
-
4)
|
|
48
|
-
5)
|
|
49
|
-
6)
|
|
49
|
+
4) EXECUTE QA \u2014 follow the qa skill's phased approach: orient, explore, document, triage, fix, verify.
|
|
50
|
+
5) WRITE TESTS \u2014 unit tests for logic, integration tests for services, e2e for critical flows.
|
|
51
|
+
6) RUN the test suite and verify all new tests pass. Fix flaky tests if discovered.
|
|
52
|
+
7) REPORT \u2014 generate a QA report with health score, coverage delta, and risks.
|
|
50
53
|
|
|
51
54
|
## RULES
|
|
52
55
|
|
|
@@ -55,20 +58,21 @@ var n=`Backend engineer \u2014 builds APIs, services, database layers, and serve
|
|
|
55
58
|
- Never test implementation details \u2014 test behavior and contracts.
|
|
56
59
|
- Mock external dependencies at the boundary, not deep inside the code.
|
|
57
60
|
- Coverage targets: aim for >80% line coverage on new code, >90% on critical paths.
|
|
58
|
-
- Flag any untestable code as a design smell and suggest refactoring.`,
|
|
61
|
+
- Flag any untestable code as a design smell and suggest refactoring.`,r=`Senior code reviewer \u2014 performs thorough PR reviews focused on correctness, security, maintainability, and adherence to project standards.
|
|
62
|
+
|
|
63
|
+
Uses the \`review\` library skill for structured two-pass review (Critical + Informational), auto-fix workflow, TODOS cross-reference, doc staleness checking, and adversarial review scaled by diff size.
|
|
59
64
|
|
|
60
65
|
## WORKFLOW
|
|
61
66
|
|
|
62
67
|
1) READ the task and the diff \u2014 understand the intent of the change, not just the code.
|
|
63
|
-
2) EXPLORE context
|
|
64
|
-
3) REVIEW
|
|
65
|
-
a)
|
|
66
|
-
b)
|
|
67
|
-
c)
|
|
68
|
-
d)
|
|
69
|
-
e) Conventions \u2014 project style, naming, import order, error handling patterns.
|
|
68
|
+
2) EXPLORE context \u2014 check how the changed code integrates with the rest of the system.
|
|
69
|
+
3) REVIEW \u2014 follow the review skill's multi-step methodology:
|
|
70
|
+
a) Scope drift detection \u2014 did they build what was requested?
|
|
71
|
+
b) Two-pass review: Critical issues first, then Informational.
|
|
72
|
+
c) Fix-First approach \u2014 auto-fix what you can, batch-ask the rest.
|
|
73
|
+
d) Adversarial review \u2014 auto-scaled by diff size (small/medium/large).
|
|
70
74
|
4) WRITE FEEDBACK \u2014 be specific, cite line numbers, suggest concrete fixes. Distinguish blockers from nits.
|
|
71
|
-
5) DECIDE \u2014 approve, request changes, or flag for architect review
|
|
75
|
+
5) DECIDE \u2014 approve, request changes, or flag for architect review.
|
|
72
76
|
|
|
73
77
|
## RULES
|
|
74
78
|
|
|
@@ -77,16 +81,20 @@ var n=`Backend engineer \u2014 builds APIs, services, database layers, and serve
|
|
|
77
81
|
- Never approve code with known security issues, even if the task is urgent.
|
|
78
82
|
- Be respectful \u2014 critique code, not the author.
|
|
79
83
|
- If the change is too large to review safely, request it be split.
|
|
80
|
-
- Check that tests exist for new logic; flag untested paths.`,
|
|
84
|
+
- Check that tests exist for new logic; flag untested paths.`,o=`Software architect and technical leader \u2014 makes system-level design decisions, defines architecture, and ensures technical coherence across the project.
|
|
85
|
+
|
|
86
|
+
Uses \`plan-eng-review\` for structured engineering review of technical plans, and \`office-hours\` for YC-style product thinking before major decisions.
|
|
81
87
|
|
|
82
88
|
## WORKFLOW
|
|
83
89
|
|
|
84
90
|
1) READ the task \u2014 understand the architectural question: new system, scaling challenge, tech debt, migration.
|
|
85
|
-
2) EXPLORE the full codebase
|
|
86
|
-
3)
|
|
87
|
-
4)
|
|
88
|
-
5)
|
|
89
|
-
6)
|
|
91
|
+
2) EXPLORE the full codebase to map dependencies, layers, and boundaries.
|
|
92
|
+
3) THINK \u2014 use the office-hours skill to challenge premises and explore alternatives before committing to a direction.
|
|
93
|
+
4) ANALYZE trade-offs \u2014 document at least two alternative approaches with pros/cons for each.
|
|
94
|
+
5) DESIGN the solution \u2014 define component boundaries, data flow, API contracts, and failure modes.
|
|
95
|
+
6) REVIEW \u2014 use plan-eng-review to validate the technical plan against engineering standards.
|
|
96
|
+
7) DOCUMENT the decision \u2014 write an ADR explaining the chosen approach and rejected alternatives.
|
|
97
|
+
8) COMMUNICATE \u2014 set context for the team explaining the architectural direction and constraints.
|
|
90
98
|
|
|
91
99
|
## RULES
|
|
92
100
|
|
|
@@ -98,14 +106,18 @@ var n=`Backend engineer \u2014 builds APIs, services, database layers, and serve
|
|
|
98
106
|
- Think in interfaces first, implementations second.
|
|
99
107
|
- Flag technical debt explicitly; don't let it accumulate silently.`,s=`DevOps engineer \u2014 manages CI/CD pipelines, infrastructure, deployment automation, and cloud configuration.
|
|
100
108
|
|
|
109
|
+
Uses \`ship\` for automated deployment pipelines and \`canary\` for post-deploy monitoring. For production deployment verification, add \`land-and-deploy\` skill to the agent when needed.
|
|
110
|
+
|
|
101
111
|
## WORKFLOW
|
|
102
112
|
|
|
103
113
|
1) READ the task \u2014 identify the scope: pipeline fix, infra provisioning, deployment config, monitoring setup.
|
|
104
|
-
2) EXPLORE current infrastructure and CI/CD config
|
|
105
|
-
3) DESIGN the change
|
|
114
|
+
2) EXPLORE current infrastructure and CI/CD config to understand the existing setup.
|
|
115
|
+
3) DESIGN the change \u2014 plan the infrastructure or pipeline modification with rollback strategy.
|
|
106
116
|
4) IMPLEMENT \u2014 write IaC (Terraform, CloudFormation, Docker, K8s manifests) or pipeline configs (GitHub Actions, GitLab CI).
|
|
107
117
|
5) VALIDATE \u2014 dry-run or plan the change; verify no destructive modifications to production resources.
|
|
108
|
-
6)
|
|
118
|
+
6) DEPLOY \u2014 use the ship skill for structured deployment with health checks.
|
|
119
|
+
7) MONITOR \u2014 use canary skill for post-deploy verification.
|
|
120
|
+
8) DOCUMENT \u2014 update runbooks, env variable lists, and deployment docs.
|
|
109
121
|
|
|
110
122
|
## RULES
|
|
111
123
|
|
|
@@ -117,15 +129,21 @@ var n=`Backend engineer \u2014 builds APIs, services, database layers, and serve
|
|
|
117
129
|
- Prefer declarative config over imperative scripts.
|
|
118
130
|
- Monitor cost implications of infrastructure changes.`,c=`Bug hunter \u2014 finds, reproduces, and diagnoses bugs through systematic investigation and proposes minimal fixes.
|
|
119
131
|
|
|
132
|
+
Uses the \`investigate\` library skill for structured debugging with root cause methodology, 3-strike hypothesis testing, scope lock, and 5-file blast radius check.
|
|
133
|
+
|
|
120
134
|
## WORKFLOW
|
|
121
135
|
|
|
122
|
-
1) READ the bug report
|
|
123
|
-
2)
|
|
136
|
+
1) READ the bug report \u2014 extract symptoms, reproduction steps, and expected behavior.
|
|
137
|
+
2) INVESTIGATE \u2014 follow the investigate skill's phased approach:
|
|
138
|
+
a) Collect symptoms and trace the execution path.
|
|
139
|
+
b) Scope lock \u2014 freeze edits to the affected module.
|
|
140
|
+
c) Form hypotheses and test them (3-strike rule).
|
|
141
|
+
d) Implement minimal fix with regression test.
|
|
142
|
+
e) Verify with 5-file blast radius check.
|
|
124
143
|
3) REPRODUCE \u2014 write a failing test that captures the bug before attempting any fix.
|
|
125
|
-
4)
|
|
126
|
-
5)
|
|
127
|
-
6)
|
|
128
|
-
7) REPORT \u2014 set context explaining the root cause, the fix, and any related areas that may have the same issue.
|
|
144
|
+
4) FIX \u2014 apply the minimal change that resolves the root cause. Avoid collateral refactoring.
|
|
145
|
+
5) VERIFY \u2014 confirm the failing test now passes and no existing tests regress.
|
|
146
|
+
6) REPORT \u2014 structured debug report explaining root cause, fix, and related areas.
|
|
129
147
|
|
|
130
148
|
## RULES
|
|
131
149
|
|
|
@@ -136,10 +154,12 @@ var n=`Backend engineer \u2014 builds APIs, services, database layers, and serve
|
|
|
136
154
|
- Never suppress errors to hide bugs \u2014 surface them properly.
|
|
137
155
|
- If the bug is in a dependency, document the workaround and file upstream.`,d=`Technical writer \u2014 creates and maintains documentation, READMEs, API references, guides, and inline code comments.
|
|
138
156
|
|
|
157
|
+
Uses \`document-release\` for automated post-ship documentation updates, ensuring docs stay in sync with code changes.
|
|
158
|
+
|
|
139
159
|
## WORKFLOW
|
|
140
160
|
|
|
141
161
|
1) READ the task \u2014 determine the documentation need: new feature docs, API reference, migration guide, README update.
|
|
142
|
-
2) EXPLORE the codebase
|
|
162
|
+
2) EXPLORE the codebase to understand the feature, its API surface, configuration options, and edge cases.
|
|
143
163
|
3) OUTLINE the document structure \u2014 headings, sections, and key points to cover.
|
|
144
164
|
4) WRITE using clear, concise language:
|
|
145
165
|
- Lead with the most important information (inverted pyramid).
|
|
@@ -157,17 +177,20 @@ var n=`Backend engineer \u2014 builds APIs, services, database layers, and serve
|
|
|
157
177
|
- Code examples must be complete and runnable \u2014 no pseudo-code in docs.
|
|
158
178
|
- Never document internal implementation details in user-facing docs.`,l=`Marketing strategist \u2014 develops positioning, messaging, copy, and campaign strategies using marketing psychology principles.
|
|
159
179
|
|
|
180
|
+
Uses \`office-hours\` for product reframing and premise challenge before crafting positioning.
|
|
181
|
+
|
|
160
182
|
## WORKFLOW
|
|
161
183
|
|
|
162
184
|
1) READ the task \u2014 identify the marketing objective: positioning, landing page copy, campaign plan, competitor analysis.
|
|
163
|
-
2)
|
|
164
|
-
3)
|
|
165
|
-
4)
|
|
185
|
+
2) THINK \u2014 use office-hours to challenge assumptions and reframe the product from the customer's perspective.
|
|
186
|
+
3) RESEARCH the product and market \u2014 understand the target audience, pain points, and competitive landscape.
|
|
187
|
+
4) STRATEGIZE \u2014 define messaging pillars, value propositions, and differentiation angles.
|
|
188
|
+
5) CREATE the deliverable:
|
|
166
189
|
- Copy: headlines, body text, CTAs \u2014 with A/B variants.
|
|
167
190
|
- Strategy: channel plan, funnel stages, KPIs.
|
|
168
191
|
- Analysis: competitive matrix, SWOT, positioning map.
|
|
169
|
-
|
|
170
|
-
|
|
192
|
+
6) REVIEW \u2014 check for clarity, consistency, and alignment with brand voice.
|
|
193
|
+
7) DELIVER \u2014 commit artifacts and set context with rationale for the chosen approach.
|
|
171
194
|
|
|
172
195
|
## RULES
|
|
173
196
|
|
|
@@ -181,7 +204,7 @@ var n=`Backend engineer \u2014 builds APIs, services, database layers, and serve
|
|
|
181
204
|
## WORKFLOW
|
|
182
205
|
|
|
183
206
|
1) READ the task \u2014 understand the content goal: thought leadership, tutorial, announcement, social post.
|
|
184
|
-
2) RESEARCH the topic
|
|
207
|
+
2) RESEARCH the topic \u2014 gather key points, statistics, and angles that resonate with the target audience.
|
|
185
208
|
3) OUTLINE the content structure \u2014 hook, key sections, CTA. For long-form, plan 3-5 main sections.
|
|
186
209
|
4) WRITE the first draft:
|
|
187
210
|
- Hook the reader in the first two sentences.
|
|
@@ -202,10 +225,10 @@ var n=`Backend engineer \u2014 builds APIs, services, database layers, and serve
|
|
|
202
225
|
## WORKFLOW
|
|
203
226
|
|
|
204
227
|
1) READ the task \u2014 identify the growth lever: onboarding funnel, activation rate, retention loop, referral mechanism.
|
|
205
|
-
2) ANALYZE current metrics
|
|
228
|
+
2) ANALYZE current metrics \u2014 map the funnel, identify drop-off points, and size opportunities.
|
|
206
229
|
3) HYPOTHESIZE \u2014 formulate a testable hypothesis: "If we [change X], then [metric Y] will improve by [Z%] because [reason]."
|
|
207
230
|
4) DESIGN the experiment \u2014 define the test, control group, success metric, sample size, and duration.
|
|
208
|
-
5) IMPLEMENT \u2014 build the experiment (feature flag, A/B test, new flow)
|
|
231
|
+
5) IMPLEMENT \u2014 build the experiment (feature flag, A/B test, new flow) if code changes are needed.
|
|
209
232
|
6) REPORT \u2014 document the experiment design, expected impact, and measurement plan.
|
|
210
233
|
|
|
211
234
|
## RULES
|
|
@@ -217,11 +240,13 @@ var n=`Backend engineer \u2014 builds APIs, services, database layers, and serve
|
|
|
217
240
|
- Never ship a "growth hack" that degrades user experience long-term.
|
|
218
241
|
- Document results of every experiment, including failures \u2014 they are data.`,h=`Security auditor \u2014 performs security analysis, identifies vulnerabilities, and recommends hardening measures following OWASP and industry best practices.
|
|
219
242
|
|
|
243
|
+
Uses the \`review\` skill for structured code review with security focus, and \`careful\`/\`guard\` skills for safety guardrails on destructive operations.
|
|
244
|
+
|
|
220
245
|
## WORKFLOW
|
|
221
246
|
|
|
222
247
|
1) READ the task \u2014 determine the audit scope: full codebase review, specific feature, dependency check, or incident response.
|
|
223
|
-
2) EXPLORE the attack surface
|
|
224
|
-
3) AUDIT systematically
|
|
248
|
+
2) EXPLORE the attack surface \u2014 map entry points (APIs, forms, file uploads), auth boundaries, and data flows.
|
|
249
|
+
3) AUDIT systematically:
|
|
225
250
|
a) OWASP Top 10 \u2014 injection, broken auth, XSS, CSRF, insecure deserialization.
|
|
226
251
|
b) Dependency vulnerabilities \u2014 outdated packages, known CVEs.
|
|
227
252
|
c) Secrets \u2014 hardcoded credentials, API keys in code or config.
|
|
@@ -240,17 +265,19 @@ var n=`Backend engineer \u2014 builds APIs, services, database layers, and serve
|
|
|
240
265
|
- Recommend defense-in-depth \u2014 never rely on a single security control.
|
|
241
266
|
- Flag any plaintext secrets immediately as Critical, even in test code.`,m=`Performance engineer \u2014 profiles, benchmarks, and optimizes code for speed, memory efficiency, and scalability.
|
|
242
267
|
|
|
268
|
+
Uses the \`benchmark\` library skill for structured performance benchmarking with before/after metrics, regression detection, and reporting.
|
|
269
|
+
|
|
243
270
|
## WORKFLOW
|
|
244
271
|
|
|
245
272
|
1) READ the task \u2014 identify the performance concern: slow endpoint, high memory usage, scaling bottleneck, build time.
|
|
246
|
-
2) MEASURE first
|
|
273
|
+
2) MEASURE first \u2014 use the benchmark skill to profile the current state, establish baseline metrics (latency, throughput, memory, CPU).
|
|
247
274
|
3) ANALYZE \u2014 identify hotspots, bottlenecks, and inefficient patterns. Look for:
|
|
248
275
|
- O(n^2) or worse algorithms where O(n log n) or O(n) is possible.
|
|
249
276
|
- Unnecessary allocations, memory leaks, missing cleanup.
|
|
250
277
|
- N+1 queries, missing indexes, unoptimized joins.
|
|
251
278
|
- Blocking I/O on the main thread, missing parallelism.
|
|
252
|
-
4) OPTIMIZE
|
|
253
|
-
5) BENCHMARK \u2014
|
|
279
|
+
4) OPTIMIZE \u2014 apply targeted fixes. One optimization per commit for clear attribution.
|
|
280
|
+
5) BENCHMARK \u2014 use the benchmark skill to measure improvement against baseline. Report absolute numbers and percentage change.
|
|
254
281
|
6) DOCUMENT \u2014 set context with before/after metrics and explain the optimization rationale.
|
|
255
282
|
|
|
256
283
|
## RULES
|
|
@@ -265,9 +292,9 @@ var n=`Backend engineer \u2014 builds APIs, services, database layers, and serve
|
|
|
265
292
|
## WORKFLOW
|
|
266
293
|
|
|
267
294
|
1) READ the task \u2014 identify the data need: new pipeline, query optimization, schema migration, analytics report.
|
|
268
|
-
2) EXPLORE existing data models and pipelines
|
|
295
|
+
2) EXPLORE existing data models and pipelines to understand the current data architecture.
|
|
269
296
|
3) DESIGN the data flow \u2014 source, transformation steps, destination, error handling, and idempotency strategy.
|
|
270
|
-
4) IMPLEMENT
|
|
297
|
+
4) IMPLEMENT:
|
|
271
298
|
- Schema changes with migrations (never modify in place).
|
|
272
299
|
- ETL logic with proper error handling and retry.
|
|
273
300
|
- Queries optimized for the target database engine.
|
|
@@ -283,12 +310,14 @@ var n=`Backend engineer \u2014 builds APIs, services, database layers, and serve
|
|
|
283
310
|
- Log pipeline metrics: rows processed, duration, error count.
|
|
284
311
|
- Never run DELETE or UPDATE without a WHERE clause and a backup plan.`,f=`Full-stack developer \u2014 works across the entire stack, from database and API to UI components and styling.
|
|
285
312
|
|
|
313
|
+
Uses \`review\` for self-review of diffs before transitioning, and \`design-review\` for frontend visual consistency checks.
|
|
314
|
+
|
|
286
315
|
## WORKFLOW
|
|
287
316
|
|
|
288
317
|
1) READ the task \u2014 identify scope: does it span backend and frontend, or is it a vertical slice of a feature?
|
|
289
|
-
2) EXPLORE both backend and frontend code
|
|
318
|
+
2) EXPLORE both backend and frontend code to understand existing patterns and data flow end-to-end.
|
|
290
319
|
3) PLAN the implementation \u2014 define the API contract first (request/response shapes), then plan UI components that consume it.
|
|
291
|
-
4) IMPLEMENT BACKEND
|
|
320
|
+
4) IMPLEMENT BACKEND:
|
|
292
321
|
- Data model, validation, service logic, API endpoint.
|
|
293
322
|
- Error handling with proper HTTP status codes and messages.
|
|
294
323
|
5) IMPLEMENT FRONTEND:
|
|
@@ -296,7 +325,7 @@ var n=`Backend engineer \u2014 builds APIs, services, database layers, and serve
|
|
|
296
325
|
- Loading, error, and empty states.
|
|
297
326
|
- Responsive layout and accessibility.
|
|
298
327
|
6) TEST \u2014 backend unit/integration tests + frontend component tests. Verify the full data flow works end-to-end.
|
|
299
|
-
7) REVIEW
|
|
328
|
+
7) SELF-REVIEW \u2014 use the review skill to check your own diff holistically before transitioning.
|
|
300
329
|
|
|
301
330
|
## RULES
|
|
302
331
|
|
|
@@ -305,4 +334,4 @@ var n=`Backend engineer \u2014 builds APIs, services, database layers, and serve
|
|
|
305
334
|
- Keep frontend and backend changes in the same branch for atomic features.
|
|
306
335
|
- Follow each layer's conventions independently \u2014 backend patterns for backend, frontend patterns for frontend.
|
|
307
336
|
- Handle every error state in the UI \u2014 users should never see a blank screen.
|
|
308
|
-
- If a task is too large to deliver end-to-end, split it and communicate the dependency.`,y=[{key:"backend-dev",name:"Backend Developer",description:"APIs, databases, backend services",adapter:"claude",model:"claude-sonnet-4-6",approval_policy:"auto",skills:["feature-dev:feature-dev","feature-dev:code-explorer"],role:n},{key:"frontend-dev",name:"Frontend Developer",description:"React, UI components, CSS, responsive design",adapter:"claude",model:"claude-sonnet-4-6",approval_policy:"auto",skills:["feature-dev:feature-dev","feature-dev:code-explorer"
|
|
337
|
+
- If a task is too large to deliver end-to-end, split it and communicate the dependency.`,y=[{key:"backend-dev",name:"Backend Developer",description:"APIs, databases, backend services",adapter:"claude",model:"claude-sonnet-4-6",approval_policy:"auto",skills:["review","careful","feature-dev:feature-dev","feature-dev:code-explorer"],role:n},{key:"frontend-dev",name:"Frontend Developer",description:"React, UI components, CSS, responsive design",adapter:"claude",model:"claude-sonnet-4-6",approval_policy:"auto",skills:["design-review","review","feature-dev:feature-dev","feature-dev:code-explorer"],role:a},{key:"qa-engineer",name:"QA Engineer",description:"Test writing, coverage analysis, quality assurance, browser testing",adapter:"claude",model:"claude-sonnet-4-6",approval_policy:"auto",skills:["qa","testing-suite:generate-tests","testing-suite:test-coverage"],role:i},{key:"code-reviewer",name:"Code Reviewer",description:"PR review with auto-fix, adversarial review, security checks",adapter:"claude",model:"claude-opus-4-6",approval_policy:"suggest",skills:["review","careful","feature-dev:code-reviewer","feature-dev:code-explorer"],role:r},{key:"architect",name:"Architect",description:"System design, architecture decisions, tech leadership",adapter:"claude",model:"claude-opus-4-6",approval_policy:"suggest",skills:["plan-eng-review","office-hours","feature-dev:code-architect","feature-dev:code-explorer"],role:o},{key:"devops-engineer",name:"DevOps Engineer",description:"CI/CD, infrastructure, deployment, monitoring",adapter:"claude",model:"claude-sonnet-4-6",approval_policy:"auto",skills:["ship","canary","devops-automation:cloud-architect"],role:s},{key:"bug-hunter",name:"Bug Hunter",description:"Systematic debugging, root cause analysis, minimal fixes",adapter:"claude",model:"claude-sonnet-4-6",approval_policy:"auto",skills:["investigate","careful","feature-dev:feature-dev","feature-dev:code-explorer"],role:c},{key:"tech-writer",name:"Technical Writer",description:"Documentation, READMEs, API docs, release notes",adapter:"claude",model:"claude-sonnet-4-6",approval_policy:"auto",skills:["document-release","review","feature-dev:code-explorer"],role:d},{key:"marketer",name:"Marketer",description:"Marketing strategy, positioning, copy, campaigns",adapter:"claude",model:"claude-sonnet-4-6",approval_policy:"auto",skills:["office-hours"],role:l},{key:"content-creator",name:"Content Creator",description:"Blog posts, articles, social media content",adapter:"claude",model:"claude-sonnet-4-6",approval_policy:"auto",skills:["office-hours"],role:p},{key:"growth-hacker",name:"Growth Hacker",description:"Growth experiments, analytics, user acquisition",adapter:"claude",model:"claude-sonnet-4-6",approval_policy:"auto",skills:["office-hours","feature-dev:feature-dev"],role:u},{key:"security-auditor",name:"Security Auditor",description:"Security scanning, vulnerability analysis, OWASP, guardrails",adapter:"claude",model:"claude-opus-4-6",approval_policy:"suggest",skills:["review","careful","guard","feature-dev:code-reviewer"],role:h},{key:"performance-engineer",name:"Performance Engineer",description:"Optimization, profiling, benchmarks, load testing",adapter:"claude",model:"claude-sonnet-4-6",approval_policy:"auto",skills:["benchmark","investigate","feature-dev:feature-dev","feature-dev:code-explorer"],role:m},{key:"data-engineer",name:"Data Engineer",description:"Data pipelines, ETL, analytics, SQL",adapter:"claude",model:"claude-sonnet-4-6",approval_policy:"auto",skills:["careful","feature-dev:feature-dev","feature-dev:code-explorer"],role:g},{key:"fullstack-dev",name:"Full-Stack Developer",description:"End-to-end development, frontend and backend",adapter:"claude",model:"claude-sonnet-4-6",approval_policy:"auto",skills:["review","design-review","feature-dev:feature-dev","feature-dev:code-explorer"],role:f}];function v(e){return y.find(t=>t.key===e)}export{y as a,v as b};
|
|
@@ -0,0 +1,116 @@
|
|
|
1
|
+
import { pathExists } from './chunk-W3J7CURM.js';
|
|
2
|
+
import { NotInitializedError } from './chunk-NLQAJ7TW.js';
|
|
3
|
+
import path from 'path';
|
|
4
|
+
import 'fs';
|
|
5
|
+
|
|
6
|
+
var ORCHESTRY_DIR = ".orchestry";
|
|
7
|
+
var ID_PATTERN = /^[A-Za-z0-9._-]+$/;
|
|
8
|
+
var Paths = class {
|
|
9
|
+
constructor(projectRoot) {
|
|
10
|
+
this.projectRoot = projectRoot;
|
|
11
|
+
}
|
|
12
|
+
/** Root .orchestry/ directory */
|
|
13
|
+
get root() {
|
|
14
|
+
return path.join(this.projectRoot, ORCHESTRY_DIR);
|
|
15
|
+
}
|
|
16
|
+
get configPath() {
|
|
17
|
+
return path.join(this.root, "config.yml");
|
|
18
|
+
}
|
|
19
|
+
get statePath() {
|
|
20
|
+
return path.join(this.root, "state.json");
|
|
21
|
+
}
|
|
22
|
+
get lockPath() {
|
|
23
|
+
return path.join(this.root, "orchestry.lock");
|
|
24
|
+
}
|
|
25
|
+
get tasksDir() {
|
|
26
|
+
return path.join(this.root, "tasks");
|
|
27
|
+
}
|
|
28
|
+
get agentsDir() {
|
|
29
|
+
return path.join(this.root, "agents");
|
|
30
|
+
}
|
|
31
|
+
get runsDir() {
|
|
32
|
+
return path.join(this.root, "runs");
|
|
33
|
+
}
|
|
34
|
+
get templatesDir() {
|
|
35
|
+
return path.join(this.root, "templates");
|
|
36
|
+
}
|
|
37
|
+
get logsDir() {
|
|
38
|
+
return path.join(this.root, "logs");
|
|
39
|
+
}
|
|
40
|
+
get contextDir() {
|
|
41
|
+
return path.join(this.root, "context");
|
|
42
|
+
}
|
|
43
|
+
contextPath(key) {
|
|
44
|
+
return path.join(this.contextDir, `${sanitizeId(key)}.json`);
|
|
45
|
+
}
|
|
46
|
+
get messagesDir() {
|
|
47
|
+
return path.join(this.root, "messages");
|
|
48
|
+
}
|
|
49
|
+
messagePath(id) {
|
|
50
|
+
return path.join(this.messagesDir, `${sanitizeId(id)}.json`);
|
|
51
|
+
}
|
|
52
|
+
get goalsDir() {
|
|
53
|
+
return path.join(this.root, "goals");
|
|
54
|
+
}
|
|
55
|
+
goalPath(id) {
|
|
56
|
+
return path.join(this.goalsDir, `${sanitizeId(id)}.yml`);
|
|
57
|
+
}
|
|
58
|
+
get teamsDir() {
|
|
59
|
+
return path.join(this.root, "teams");
|
|
60
|
+
}
|
|
61
|
+
get attachmentsDir() {
|
|
62
|
+
return path.join(this.root, "attachments");
|
|
63
|
+
}
|
|
64
|
+
taskAttachmentsDir(taskId) {
|
|
65
|
+
return path.join(this.attachmentsDir, sanitizeId(taskId));
|
|
66
|
+
}
|
|
67
|
+
teamPath(id) {
|
|
68
|
+
return path.join(this.teamsDir, `${sanitizeId(id)}.yml`);
|
|
69
|
+
}
|
|
70
|
+
get gitignorePath() {
|
|
71
|
+
return path.join(this.root, ".gitignore");
|
|
72
|
+
}
|
|
73
|
+
get workspaceExcludePath() {
|
|
74
|
+
return path.join(this.root, "workspace-exclude");
|
|
75
|
+
}
|
|
76
|
+
taskPath(id) {
|
|
77
|
+
return path.join(this.tasksDir, `${sanitizeId(id)}.yml`);
|
|
78
|
+
}
|
|
79
|
+
agentPath(id) {
|
|
80
|
+
return path.join(this.agentsDir, `${sanitizeId(id)}.yml`);
|
|
81
|
+
}
|
|
82
|
+
runPath(id) {
|
|
83
|
+
return path.join(this.runsDir, `${sanitizeId(id)}.json`);
|
|
84
|
+
}
|
|
85
|
+
runEventsPath(id) {
|
|
86
|
+
return path.join(this.runsDir, `${sanitizeId(id)}.jsonl`);
|
|
87
|
+
}
|
|
88
|
+
defaultTemplatePath() {
|
|
89
|
+
return path.join(this.templatesDir, "default.md");
|
|
90
|
+
}
|
|
91
|
+
async isInitialized() {
|
|
92
|
+
return pathExists(this.root);
|
|
93
|
+
}
|
|
94
|
+
async requireInit() {
|
|
95
|
+
if (!await this.isInitialized()) {
|
|
96
|
+
throw new NotInitializedError();
|
|
97
|
+
}
|
|
98
|
+
}
|
|
99
|
+
};
|
|
100
|
+
function sanitizeId(id) {
|
|
101
|
+
if (!ID_PATTERN.test(id)) {
|
|
102
|
+
throw new Error(`Invalid identifier: "${id}"`);
|
|
103
|
+
}
|
|
104
|
+
return id;
|
|
105
|
+
}
|
|
106
|
+
function validateWorkspacePath(workspacePath, projectRoot) {
|
|
107
|
+
const resolved = path.resolve(workspacePath);
|
|
108
|
+
const root = path.resolve(projectRoot);
|
|
109
|
+
if (!resolved.startsWith(root + path.sep) && resolved !== root) {
|
|
110
|
+
throw new Error(`Workspace path "${workspacePath}" is outside project root`);
|
|
111
|
+
}
|
|
112
|
+
}
|
|
113
|
+
|
|
114
|
+
export { Paths, sanitizeId, validateWorkspacePath };
|
|
115
|
+
//# sourceMappingURL=chunk-J7ITYXE6.js.map
|
|
116
|
+
//# sourceMappingURL=chunk-J7ITYXE6.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../src/infrastructure/storage/paths.ts"],"names":[],"mappings":";;;;;AAYO,IAAM,aAAA,GAAgB,YAAA;AAC7B,IAAM,UAAA,GAAa,mBAAA;AAEZ,IAAM,QAAN,MAAY;AAAA,EACjB,YAA6B,WAAA,EAAqB;AAArB,IAAA,IAAA,CAAA,WAAA,GAAA,WAAA;AAAA,EAAsB;AAAA;AAAA,EAGnD,IAAI,IAAA,GAAe;AACjB,IAAA,OAAO,IAAA,CAAK,IAAA,CAAK,IAAA,CAAK,WAAA,EAAa,aAAa,CAAA;AAAA,EAClD;AAAA,EAEA,IAAI,UAAA,GAAqB;AACvB,IAAA,OAAO,IAAA,CAAK,IAAA,CAAK,IAAA,CAAK,IAAA,EAAM,YAAY,CAAA;AAAA,EAC1C;AAAA,EAEA,IAAI,SAAA,GAAoB;AACtB,IAAA,OAAO,IAAA,CAAK,IAAA,CAAK,IAAA,CAAK,IAAA,EAAM,YAAY,CAAA;AAAA,EAC1C;AAAA,EAEA,IAAI,QAAA,GAAmB;AACrB,IAAA,OAAO,IAAA,CAAK,IAAA,CAAK,IAAA,CAAK,IAAA,EAAM,gBAAgB,CAAA;AAAA,EAC9C;AAAA,EAEA,IAAI,QAAA,GAAmB;AACrB,IAAA,OAAO,IAAA,CAAK,IAAA,CAAK,IAAA,CAAK,IAAA,EAAM,OAAO,CAAA;AAAA,EACrC;AAAA,EAEA,IAAI,SAAA,GAAoB;AACtB,IAAA,OAAO,IAAA,CAAK,IAAA,CAAK,IAAA,CAAK,IAAA,EAAM,QAAQ,CAAA;AAAA,EACtC;AAAA,EAEA,IAAI,OAAA,GAAkB;AACpB,IAAA,OAAO,IAAA,CAAK,IAAA,CAAK,IAAA,CAAK,IAAA,EAAM,MAAM,CAAA;AAAA,EACpC;AAAA,EAEA,IAAI,YAAA,GAAuB;AACzB,IAAA,OAAO,IAAA,CAAK,IAAA,CAAK,IAAA,CAAK,IAAA,EAAM,WAAW,CAAA;AAAA,EACzC;AAAA,EAEA,IAAI,OAAA,GAAkB;AACpB,IAAA,OAAO,IAAA,CAAK,IAAA,CAAK,IAAA,CAAK,IAAA,EAAM,MAAM,CAAA;AAAA,EACpC;AAAA,EAEA,IAAI,UAAA,GAAqB;AACvB,IAAA,OAAO,IAAA,CAAK,IAAA,CAAK,IAAA,CAAK,IAAA,EAAM,SAAS,CAAA;AAAA,EACvC;AAAA,EAEA,YAAY,GAAA,EAAqB;AAC/B,IAAA,OAAO,IAAA,CAAK,KAAK,IAAA,CAAK,UAAA,EAAY,GAAG,UAAA,CAAW,GAAG,CAAC,CAAA,KAAA,CAAO,CAAA;AAAA,EAC7D;AAAA,EAEA,IAAI,WAAA,GAAsB;AACxB,IAAA,OAAO,IAAA,CAAK,IAAA,CAAK,IAAA,CAAK,IAAA,EAAM,UAAU,CAAA;AAAA,EACxC;AAAA,EAEA,YAAY,EAAA,EAAoB;AAC9B,IAAA,OAAO,IAAA,CAAK,KAAK,IAAA,CAAK,WAAA,EAAa,GAAG,UAAA,CAAW,EAAE,CAAC,CAAA,KAAA,CAAO,CAAA;AAAA,EAC7D;AAAA,EAEA,IAAI,QAAA,GAAmB;AACrB,IAAA,OAAO,IAAA,CAAK,IAAA,CAAK,IAAA,CAAK,IAAA,EAAM,OAAO,CAAA;AAAA,EACrC;AAAA,EAEA,SAAS,EAAA,EAAoB;AAC3B,IAAA,OAAO,IAAA,CAAK,KAAK,IAAA,CAAK,QAAA,EAAU,GAAG,UAAA,CAAW,EAAE,CAAC,CAAA,IAAA,CAAM,CAAA;AAAA,EACzD;AAAA,EAEA,IAAI,QAAA,GAAmB;AACrB,IAAA,OAAO,IAAA,CAAK,IAAA,CAAK,IAAA,CAAK,IAAA,EAAM,OAAO,CAAA;AAAA,EACrC;AAAA,EAEA,IAAI,cAAA,GAAyB;AAC3B,IAAA,OAAO,IAAA,CAAK,IAAA,CAAK,IAAA,CAAK,IAAA,EAAM,aAAa,CAAA;AAAA,EAC3C;AAAA,EAEA,mBAAmB,MAAA,EAAwB;AACzC,IAAA,OAAO,KAAK,IAAA,CAAK,IAAA,CAAK,cAAA,EAAgB,UAAA,CAAW,MAAM,CAAC,CAAA;AAAA,EAC1D;AAAA,EAEA,SAAS,EAAA,EAAoB;AAC3B,IAAA,OAAO,IAAA,CAAK,KAAK,IAAA,CAAK,QAAA,EAAU,GAAG,UAAA,CAAW,EAAE,CAAC,CAAA,IAAA,CAAM,CAAA;AAAA,EACzD;AAAA,EAEA,IAAI,aAAA,GAAwB;AAC1B,IAAA,OAAO,IAAA,CAAK,IAAA,CAAK,IAAA,CAAK,IAAA,EAAM,YAAY,CAAA;AAAA,EAC1C;AAAA,EAEA,IAAI,oBAAA,GAA+B;AACjC,IAAA,OAAO,IAAA,CAAK,IAAA,CAAK,IAAA,CAAK,IAAA,EAAM,mBAAmB,CAAA;AAAA,EACjD;AAAA,EAEA,SAAS,EAAA,EAAoB;AAC3B,IAAA,OAAO,IAAA,CAAK,KAAK,IAAA,CAAK,QAAA,EAAU,GAAG,UAAA,CAAW,EAAE,CAAC,CAAA,IAAA,CAAM,CAAA;AAAA,EACzD;AAAA,EAEA,UAAU,EAAA,EAAoB;AAC5B,IAAA,OAAO,IAAA,CAAK,KAAK,IAAA,CAAK,SAAA,EAAW,GAAG,UAAA,CAAW,EAAE,CAAC,CAAA,IAAA,CAAM,CAAA;AAAA,EAC1D;AAAA,EAEA,QAAQ,EAAA,EAAoB;AAC1B,IAAA,OAAO,IAAA,CAAK,KAAK,IAAA,CAAK,OAAA,EAAS,GAAG,UAAA,CAAW,EAAE,CAAC,CAAA,KAAA,CAAO,CAAA;AAAA,EACzD;AAAA,EAEA,cAAc,EAAA,EAAoB;AAChC,IAAA,OAAO,IAAA,CAAK,KAAK,IAAA,CAAK,OAAA,EAAS,GAAG,UAAA,CAAW,EAAE,CAAC,CAAA,MAAA,CAAQ,CAAA;AAAA,EAC1D;AAAA,EAEA,mBAAA,GAA8B;AAC5B,IAAA,OAAO,IAAA,CAAK,IAAA,CAAK,IAAA,CAAK,YAAA,EAAc,YAAY,CAAA;AAAA,EAClD;AAAA,EAEA,MAAM,aAAA,GAAkC;AACtC,IAAA,OAAO,UAAA,CAAW,KAAK,IAAI,CAAA;AAAA,EAC7B;AAAA,EAEA,MAAM,WAAA,GAA6B;AACjC,IAAA,IAAI,CAAE,MAAM,IAAA,CAAK,aAAA,EAAc,EAAI;AACjC,MAAA,MAAM,IAAI,mBAAA,EAAoB;AAAA,IAChC;AAAA,EACF;AACF;AAQO,SAAS,WAAW,EAAA,EAAoB;AAC7C,EAAA,IAAI,CAAC,UAAA,CAAW,IAAA,CAAK,EAAE,CAAA,EAAG;AACxB,IAAA,MAAM,IAAI,KAAA,CAAM,CAAA,qBAAA,EAAwB,EAAE,CAAA,CAAA,CAAG,CAAA;AAAA,EAC/C;AACA,EAAA,OAAO,EAAA;AACT;AAMO,SAAS,qBAAA,CAAsB,eAAuB,WAAA,EAA2B;AACtF,EAAA,MAAM,QAAA,GAAW,IAAA,CAAK,OAAA,CAAQ,aAAa,CAAA;AAC3C,EAAA,MAAM,IAAA,GAAO,IAAA,CAAK,OAAA,CAAQ,WAAW,CAAA;AAErC,EAAA,IAAI,CAAC,SAAS,UAAA,CAAW,IAAA,GAAO,KAAK,GAAG,CAAA,IAAK,aAAa,IAAA,EAAM;AAC9D,IAAA,MAAM,IAAI,KAAA,CAAM,CAAA,gBAAA,EAAmB,aAAa,CAAA,yBAAA,CAA2B,CAAA;AAAA,EAC7E;AACF","file":"chunk-J7ITYXE6.js","sourcesContent":["/**\n * Path resolution for .orchestry/ directory.\n *\n * All path construction goes through this module.\n * Validates initialization state and sanitizes identifiers.\n */\n\nimport path from 'node:path';\nimport { accessSync } from 'node:fs';\nimport { NotInitializedError } from '../../domain/errors.js';\nimport { pathExists } from './fs-utils.js';\n\nexport const ORCHESTRY_DIR = '.orchestry';\nconst ID_PATTERN = /^[A-Za-z0-9._-]+$/;\n\nexport class Paths {\n constructor(private readonly projectRoot: string) {}\n\n /** Root .orchestry/ directory */\n get root(): string {\n return path.join(this.projectRoot, ORCHESTRY_DIR);\n }\n\n get configPath(): string {\n return path.join(this.root, 'config.yml');\n }\n\n get statePath(): string {\n return path.join(this.root, 'state.json');\n }\n\n get lockPath(): string {\n return path.join(this.root, 'orchestry.lock');\n }\n\n get tasksDir(): string {\n return path.join(this.root, 'tasks');\n }\n\n get agentsDir(): string {\n return path.join(this.root, 'agents');\n }\n\n get runsDir(): string {\n return path.join(this.root, 'runs');\n }\n\n get templatesDir(): string {\n return path.join(this.root, 'templates');\n }\n\n get logsDir(): string {\n return path.join(this.root, 'logs');\n }\n\n get contextDir(): string {\n return path.join(this.root, 'context');\n }\n\n contextPath(key: string): string {\n return path.join(this.contextDir, `${sanitizeId(key)}.json`);\n }\n\n get messagesDir(): string {\n return path.join(this.root, 'messages');\n }\n\n messagePath(id: string): string {\n return path.join(this.messagesDir, `${sanitizeId(id)}.json`);\n }\n\n get goalsDir(): string {\n return path.join(this.root, 'goals');\n }\n\n goalPath(id: string): string {\n return path.join(this.goalsDir, `${sanitizeId(id)}.yml`);\n }\n\n get teamsDir(): string {\n return path.join(this.root, 'teams');\n }\n\n get attachmentsDir(): string {\n return path.join(this.root, 'attachments');\n }\n\n taskAttachmentsDir(taskId: string): string {\n return path.join(this.attachmentsDir, sanitizeId(taskId));\n }\n\n teamPath(id: string): string {\n return path.join(this.teamsDir, `${sanitizeId(id)}.yml`);\n }\n\n get gitignorePath(): string {\n return path.join(this.root, '.gitignore');\n }\n\n get workspaceExcludePath(): string {\n return path.join(this.root, 'workspace-exclude');\n }\n\n taskPath(id: string): string {\n return path.join(this.tasksDir, `${sanitizeId(id)}.yml`);\n }\n\n agentPath(id: string): string {\n return path.join(this.agentsDir, `${sanitizeId(id)}.yml`);\n }\n\n runPath(id: string): string {\n return path.join(this.runsDir, `${sanitizeId(id)}.json`);\n }\n\n runEventsPath(id: string): string {\n return path.join(this.runsDir, `${sanitizeId(id)}.jsonl`);\n }\n\n defaultTemplatePath(): string {\n return path.join(this.templatesDir, 'default.md');\n }\n\n async isInitialized(): Promise<boolean> {\n return pathExists(this.root);\n }\n\n async requireInit(): Promise<void> {\n if (!(await this.isInitialized())) {\n throw new NotInitializedError();\n }\n }\n}\n\n/**\n * Validate an identifier for use in file paths.\n * Only allows [A-Za-z0-9._-] characters.\n * Rejects identifiers containing forbidden characters (path separators, etc.)\n * to prevent path traversal attacks.\n */\nexport function sanitizeId(id: string): string {\n if (!ID_PATTERN.test(id)) {\n throw new Error(`Invalid identifier: \"${id}\"`);\n }\n return id;\n}\n\n/**\n * Validate that a workspace path is within the project root.\n * Prevents path traversal attacks.\n */\nexport function validateWorkspacePath(workspacePath: string, projectRoot: string): void {\n const resolved = path.resolve(workspacePath);\n const root = path.resolve(projectRoot);\n\n if (!resolved.startsWith(root + path.sep) && resolved !== root) {\n throw new Error(`Workspace path \"${workspacePath}\" is outside project root`);\n }\n}\n\n/**\n * Module-level cache for findProjectRoot().\n * Key: resolved startDir, Value: found project root.\n * Avoids repeated accessSync() traversals on every CLI invocation.\n */\nconst projectRootCache = new Map<string, string>();\n\n/**\n * Resolve project root by walking up from cwd looking for .orchestry/.\n * Returns cwd if not found (for init command).\n *\n * Results are cached per startDir to avoid redundant filesystem traversals.\n */\nexport function findProjectRoot(startDir: string = process.cwd()): string {\n const resolvedStart = path.resolve(startDir);\n const cached = projectRootCache.get(resolvedStart);\n if (cached !== undefined) return cached;\n\n let dir = resolvedStart;\n const root = path.parse(dir).root;\n\n while (dir !== root) {\n try {\n accessSync(path.join(dir, '.orchestry'));\n projectRootCache.set(resolvedStart, dir);\n return dir;\n } catch {\n // Not found, go up\n }\n dir = path.dirname(dir);\n }\n\n // Not found — return resolved dir (for init command)\n projectRootCache.set(resolvedStart, resolvedStart);\n return resolvedStart;\n}\n\n/**\n * Clear the findProjectRoot cache.\n * Useful in tests or after `orch init` changes the project structure.\n */\nexport function clearProjectRootCache(): void {\n projectRootCache.clear();\n}\n"]}
|
|
@@ -0,0 +1,66 @@
|
|
|
1
|
+
import { listFiles, pathExists } from './chunk-W3J7CURM.js';
|
|
2
|
+
import { readFile } from 'fs/promises';
|
|
3
|
+
import { fileURLToPath } from 'url';
|
|
4
|
+
import { join, dirname } from 'path';
|
|
5
|
+
|
|
6
|
+
var VALID_SKILL_NAME = /^[a-z0-9-]+$/;
|
|
7
|
+
async function resolveLibraryDir() {
|
|
8
|
+
const thisDir = dirname(fileURLToPath(import.meta.url));
|
|
9
|
+
let dir = thisDir;
|
|
10
|
+
for (let i = 0; i < 5; i++) {
|
|
11
|
+
const candidate = join(dir, "skills", "library");
|
|
12
|
+
if (await pathExists(candidate)) return candidate;
|
|
13
|
+
dir = dirname(dir);
|
|
14
|
+
}
|
|
15
|
+
return join(thisDir, "..", "..", "..", "skills", "library");
|
|
16
|
+
}
|
|
17
|
+
var SkillLoader = class {
|
|
18
|
+
cache = /* @__PURE__ */ new Map();
|
|
19
|
+
libraryDirPromise;
|
|
20
|
+
availableCache = null;
|
|
21
|
+
constructor(libraryDir) {
|
|
22
|
+
this.libraryDirPromise = libraryDir ? Promise.resolve(libraryDir) : resolveLibraryDir();
|
|
23
|
+
}
|
|
24
|
+
async loadSkills(skillNames) {
|
|
25
|
+
const librarySkills = skillNames.filter((s) => !s.includes(":"));
|
|
26
|
+
if (librarySkills.length === 0) return "";
|
|
27
|
+
const results = await Promise.all(librarySkills.map((name) => this.loadOne(name)));
|
|
28
|
+
const sections = librarySkills.map((name, i) => results[i] ? `### ${name}
|
|
29
|
+
|
|
30
|
+
${results[i]}` : null).filter((s) => s !== null);
|
|
31
|
+
if (sections.length === 0) return "";
|
|
32
|
+
return `## Skills
|
|
33
|
+
|
|
34
|
+
${sections.join("\n\n")}`;
|
|
35
|
+
}
|
|
36
|
+
async listAvailable() {
|
|
37
|
+
if (this.availableCache) return this.availableCache;
|
|
38
|
+
const dir = await this.libraryDirPromise;
|
|
39
|
+
const entries = await listFiles(dir, ".md");
|
|
40
|
+
this.availableCache = entries.map((e) => e.replace(/\.md$/, "")).sort();
|
|
41
|
+
return this.availableCache;
|
|
42
|
+
}
|
|
43
|
+
async loadOne(name) {
|
|
44
|
+
const cached = this.cache.get(name);
|
|
45
|
+
if (cached !== void 0) return cached || null;
|
|
46
|
+
if (!VALID_SKILL_NAME.test(name)) {
|
|
47
|
+
return null;
|
|
48
|
+
}
|
|
49
|
+
const dir = await this.libraryDirPromise;
|
|
50
|
+
const filePath = join(dir, `${name}.md`);
|
|
51
|
+
try {
|
|
52
|
+
const content = await readFile(filePath, "utf8");
|
|
53
|
+
this.cache.set(name, content);
|
|
54
|
+
return content;
|
|
55
|
+
} catch {
|
|
56
|
+
process.stderr.write(`[orch] skill library: "${name}" not found in ${dir}
|
|
57
|
+
`);
|
|
58
|
+
this.cache.set(name, "");
|
|
59
|
+
return null;
|
|
60
|
+
}
|
|
61
|
+
}
|
|
62
|
+
};
|
|
63
|
+
|
|
64
|
+
export { SkillLoader };
|
|
65
|
+
//# sourceMappingURL=chunk-U2JVMD2G.js.map
|
|
66
|
+
//# sourceMappingURL=chunk-U2JVMD2G.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../src/infrastructure/skills/skill-loader.ts"],"names":[],"mappings":";;;;;AAgBA,IAAM,gBAAA,GAAmB,cAAA;AAMzB,eAAe,iBAAA,GAAqC;AAClD,EAAA,MAAM,OAAA,GAAU,OAAA,CAAQ,aAAA,CAAc,MAAA,CAAA,IAAA,CAAY,GAAG,CAAC,CAAA;AAGtD,EAAA,IAAI,GAAA,GAAM,OAAA;AACV,EAAA,KAAA,IAAS,CAAA,GAAI,CAAA,EAAG,CAAA,GAAI,CAAA,EAAG,CAAA,EAAA,EAAK;AAC1B,IAAA,MAAM,SAAA,GAAY,IAAA,CAAK,GAAA,EAAK,QAAA,EAAU,SAAS,CAAA;AAC/C,IAAA,IAAI,MAAM,UAAA,CAAW,SAAS,CAAA,EAAG,OAAO,SAAA;AACxC,IAAA,GAAA,GAAM,QAAQ,GAAG,CAAA;AAAA,EACnB;AAGA,EAAA,OAAO,KAAK,OAAA,EAAS,IAAA,EAAM,IAAA,EAAM,IAAA,EAAM,UAAU,SAAS,CAAA;AAC5D;AAcO,IAAM,cAAN,MAA0C;AAAA,EAC9B,KAAA,uBAAY,GAAA,EAAoB;AAAA,EAChC,iBAAA;AAAA,EACT,cAAA,GAAkC,IAAA;AAAA,EAE1C,YAAY,UAAA,EAAqB;AAC/B,IAAA,IAAA,CAAK,oBAAoB,UAAA,GACrB,OAAA,CAAQ,OAAA,CAAQ,UAAU,IAC1B,iBAAA,EAAkB;AAAA,EACxB;AAAA,EAEA,MAAM,WAAW,UAAA,EAAuC;AACtD,IAAA,MAAM,aAAA,GAAgB,WAAW,MAAA,CAAO,CAAC,MAAM,CAAC,CAAA,CAAE,QAAA,CAAS,GAAG,CAAC,CAAA;AAC/D,IAAA,IAAI,aAAA,CAAc,MAAA,KAAW,CAAA,EAAG,OAAO,EAAA;AAEvC,IAAA,MAAM,OAAA,GAAU,MAAM,OAAA,CAAQ,GAAA,CAAI,aAAA,CAAc,GAAA,CAAI,CAAC,IAAA,KAAS,IAAA,CAAK,OAAA,CAAQ,IAAI,CAAC,CAAC,CAAA;AACjF,IAAA,MAAM,QAAA,GAAW,aAAA,CACd,GAAA,CAAI,CAAC,IAAA,EAAM,MAAO,OAAA,CAAQ,CAAC,CAAA,GAAI,CAAA,IAAA,EAAO,IAAI;;AAAA,EAAO,OAAA,CAAQ,CAAC,CAAC,CAAA,CAAA,GAAK,IAAK,EACrE,MAAA,CAAO,CAAC,CAAA,KAAmB,CAAA,KAAM,IAAI,CAAA;AAExC,IAAA,IAAI,QAAA,CAAS,MAAA,KAAW,CAAA,EAAG,OAAO,EAAA;AAClC,IAAA,OAAO,CAAA;;AAAA,EAAgB,QAAA,CAAS,IAAA,CAAK,MAAM,CAAC,CAAA,CAAA;AAAA,EAC9C;AAAA,EAEA,MAAM,aAAA,GAAmC;AACvC,IAAA,IAAI,IAAA,CAAK,cAAA,EAAgB,OAAO,IAAA,CAAK,cAAA;AAErC,IAAA,MAAM,GAAA,GAAM,MAAM,IAAA,CAAK,iBAAA;AACvB,IAAA,MAAM,OAAA,GAAU,MAAM,SAAA,CAAU,GAAA,EAAK,KAAK,CAAA;AAC1C,IAAA,IAAA,CAAK,cAAA,GAAiB,OAAA,CACnB,GAAA,CAAI,CAAC,CAAA,KAAM,CAAA,CAAE,OAAA,CAAQ,OAAA,EAAS,EAAE,CAAC,CAAA,CACjC,IAAA,EAAK;AACR,IAAA,OAAO,IAAA,CAAK,cAAA;AAAA,EACd;AAAA,EAEA,MAAc,QAAQ,IAAA,EAAsC;AAC1D,IAAA,MAAM,MAAA,GAAS,IAAA,CAAK,KAAA,CAAM,GAAA,CAAI,IAAI,CAAA;AAClC,IAAA,IAAI,MAAA,KAAW,MAAA,EAAW,OAAO,MAAA,IAAU,IAAA;AAE3C,IAAA,IAAI,CAAC,gBAAA,CAAiB,IAAA,CAAK,IAAI,CAAA,EAAG;AAChC,MAAA,OAAO,IAAA;AAAA,IACT;AAEA,IAAA,MAAM,GAAA,GAAM,MAAM,IAAA,CAAK,iBAAA;AACvB,IAAA,MAAM,QAAA,GAAW,IAAA,CAAK,GAAA,EAAK,CAAA,EAAG,IAAI,CAAA,GAAA,CAAK,CAAA;AACvC,IAAA,IAAI;AACF,MAAA,MAAM,OAAA,GAAU,MAAM,QAAA,CAAS,QAAA,EAAU,MAAM,CAAA;AAC/C,MAAA,IAAA,CAAK,KAAA,CAAM,GAAA,CAAI,IAAA,EAAM,OAAO,CAAA;AAC5B,MAAA,OAAO,OAAA;AAAA,IACT,CAAA,CAAA,MAAQ;AACN,MAAA,OAAA,CAAQ,MAAA,CAAO,KAAA,CAAM,CAAA,uBAAA,EAA0B,IAAI,kBAAkB,GAAG;AAAA,CAAI,CAAA;AAC5E,MAAA,IAAA,CAAK,KAAA,CAAM,GAAA,CAAI,IAAA,EAAM,EAAE,CAAA;AACvB,MAAA,OAAO,IAAA;AAAA,IACT;AAAA,EACF;AACF","file":"chunk-U2JVMD2G.js","sourcesContent":["/**\n * Skill Library loader.\n *\n * Resolves agent skill names to Markdown content from the bundled\n * `skills/library/` directory. Skills containing ':' are Claude Code\n * MCP skills — handled natively by Claude CLI, skipped here.\n *\n * Content is cached in-process for the lifetime of the SkillLoader instance.\n */\n\nimport { readFile } from 'node:fs/promises';\nimport { fileURLToPath } from 'node:url';\nimport { dirname, join } from 'node:path';\nimport { listFiles, pathExists } from '../storage/fs-utils.js';\n\n/** Valid skill name: lowercase alphanumeric + hyphens only. */\nconst VALID_SKILL_NAME = /^[a-z0-9-]+$/;\n\n/**\n * Resolve the skills/library/ directory relative to the package root.\n * Works in both dev mode (src/infrastructure/skills/) and production (dist/).\n */\nasync function resolveLibraryDir(): Promise<string> {\n const thisDir = dirname(fileURLToPath(import.meta.url));\n\n // Walk up from current file until we find skills/library/\n let dir = thisDir;\n for (let i = 0; i < 5; i++) {\n const candidate = join(dir, 'skills', 'library');\n if (await pathExists(candidate)) return candidate;\n dir = dirname(dir);\n }\n\n // Fallback: assume 3 levels up (src/infrastructure/skills/ → root)\n return join(thisDir, '..', '..', '..', 'skills', 'library');\n}\n\nexport interface ISkillLoader {\n /**\n * Load and format library skill content for the given skill names.\n * MCP skills (containing ':') are silently skipped.\n * Returns formatted Markdown block or empty string if no library skills resolved.\n */\n loadSkills(skillNames: string[]): Promise<string>;\n\n /** List all available library skill names. */\n listAvailable(): Promise<string[]>;\n}\n\nexport class SkillLoader implements ISkillLoader {\n private readonly cache = new Map<string, string>();\n private readonly libraryDirPromise: Promise<string>;\n private availableCache: string[] | null = null;\n\n constructor(libraryDir?: string) {\n this.libraryDirPromise = libraryDir\n ? Promise.resolve(libraryDir)\n : resolveLibraryDir();\n }\n\n async loadSkills(skillNames: string[]): Promise<string> {\n const librarySkills = skillNames.filter((s) => !s.includes(':'));\n if (librarySkills.length === 0) return '';\n\n const results = await Promise.all(librarySkills.map((name) => this.loadOne(name)));\n const sections = librarySkills\n .map((name, i) => (results[i] ? `### ${name}\\n\\n${results[i]}` : null))\n .filter((s): s is string => s !== null);\n\n if (sections.length === 0) return '';\n return `## Skills\\n\\n${sections.join('\\n\\n')}`;\n }\n\n async listAvailable(): Promise<string[]> {\n if (this.availableCache) return this.availableCache;\n\n const dir = await this.libraryDirPromise;\n const entries = await listFiles(dir, '.md');\n this.availableCache = entries\n .map((e) => e.replace(/\\.md$/, ''))\n .sort();\n return this.availableCache;\n }\n\n private async loadOne(name: string): Promise<string | null> {\n const cached = this.cache.get(name);\n if (cached !== undefined) return cached || null;\n\n if (!VALID_SKILL_NAME.test(name)) {\n return null;\n }\n\n const dir = await this.libraryDirPromise;\n const filePath = join(dir, `${name}.md`);\n try {\n const content = await readFile(filePath, 'utf8');\n this.cache.set(name, content);\n return content;\n } catch {\n process.stderr.write(`[orch] skill library: \"${name}\" not found in ${dir}\\n`);\n this.cache.set(name, '');\n return null;\n }\n }\n}\n"]}
|