atomism 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +210 -0
- package/dist/chunk-34O5KJWR.js +81 -0
- package/dist/chunk-34O5KJWR.js.map +1 -0
- package/dist/chunk-55AP34JO.js +116 -0
- package/dist/chunk-55AP34JO.js.map +1 -0
- package/dist/chunk-6MDHM2B4.js +17 -0
- package/dist/chunk-6MDHM2B4.js.map +1 -0
- package/dist/chunk-GU2R4KLP.js +43 -0
- package/dist/chunk-GU2R4KLP.js.map +1 -0
- package/dist/chunk-H7WC3NXZ.js +39 -0
- package/dist/chunk-H7WC3NXZ.js.map +1 -0
- package/dist/chunk-P33CQFMY.js +329 -0
- package/dist/chunk-P33CQFMY.js.map +1 -0
- package/dist/chunk-P6X7T4KA.js +200 -0
- package/dist/chunk-P6X7T4KA.js.map +1 -0
- package/dist/chunk-PLQJM2KT.js +9 -0
- package/dist/chunk-PLQJM2KT.js.map +1 -0
- package/dist/chunk-RS2IEGW3.js +10 -0
- package/dist/chunk-RS2IEGW3.js.map +1 -0
- package/dist/chunk-S6Z5G5DB.js +84 -0
- package/dist/chunk-S6Z5G5DB.js.map +1 -0
- package/dist/chunk-UVUDQ4XP.js +259 -0
- package/dist/chunk-UVUDQ4XP.js.map +1 -0
- package/dist/chunk-UWVZQSP4.js +597 -0
- package/dist/chunk-UWVZQSP4.js.map +1 -0
- package/dist/chunk-YKJO3ZFY.js +308 -0
- package/dist/chunk-YKJO3ZFY.js.map +1 -0
- package/dist/cli.d.ts +1 -0
- package/dist/cli.js +152 -0
- package/dist/cli.js.map +1 -0
- package/dist/create-atom-AXPDBYQL.js +153 -0
- package/dist/create-atom-AXPDBYQL.js.map +1 -0
- package/dist/escalate-BTEJT5NL.js +211 -0
- package/dist/escalate-BTEJT5NL.js.map +1 -0
- package/dist/extract-RPKCTINT.js +514 -0
- package/dist/extract-RPKCTINT.js.map +1 -0
- package/dist/graduate-453M7ZRQ.js +222 -0
- package/dist/graduate-453M7ZRQ.js.map +1 -0
- package/dist/helpers-PJPFPYBQ.js +11 -0
- package/dist/helpers-PJPFPYBQ.js.map +1 -0
- package/dist/history-OPD7NLZW.js +258 -0
- package/dist/history-OPD7NLZW.js.map +1 -0
- package/dist/import-generator-4CKRBMTE.js +1864 -0
- package/dist/import-generator-4CKRBMTE.js.map +1 -0
- package/dist/index.d.ts +230 -0
- package/dist/index.js +41 -0
- package/dist/index.js.map +1 -0
- package/dist/init-2FINDMYK.js +741 -0
- package/dist/init-2FINDMYK.js.map +1 -0
- package/dist/list-NEBVBGG3.js +71 -0
- package/dist/list-NEBVBGG3.js.map +1 -0
- package/dist/parser-3BILOSOO.js +157 -0
- package/dist/parser-3BILOSOO.js.map +1 -0
- package/dist/plan-DNVARHWH.js +249 -0
- package/dist/plan-DNVARHWH.js.map +1 -0
- package/dist/register-XTRMSH7Y.js +91 -0
- package/dist/register-XTRMSH7Y.js.map +1 -0
- package/dist/revert-J4CRDE2K.js +87 -0
- package/dist/revert-J4CRDE2K.js.map +1 -0
- package/dist/run-3GI3SBYL.js +188 -0
- package/dist/run-3GI3SBYL.js.map +1 -0
- package/dist/scan-generators-ST4TBEY7.js +375 -0
- package/dist/scan-generators-ST4TBEY7.js.map +1 -0
- package/dist/signatures-K5QIL4WG.js +258 -0
- package/dist/signatures-K5QIL4WG.js.map +1 -0
- package/dist/skills-assign-IHOXX4AI.js +182 -0
- package/dist/skills-assign-IHOXX4AI.js.map +1 -0
- package/dist/skills-load-JSD5UG2K.js +20 -0
- package/dist/skills-load-JSD5UG2K.js.map +1 -0
- package/dist/skills-scan-WACJFRJN.js +25 -0
- package/dist/skills-scan-WACJFRJN.js.map +1 -0
- package/dist/skills-suggest-JFI2NUJI.js +269 -0
- package/dist/skills-suggest-JFI2NUJI.js.map +1 -0
- package/dist/status-KQVSAZFR.js +111 -0
- package/dist/status-KQVSAZFR.js.map +1 -0
- package/dist/suggest-IFFJQFIW.js +183 -0
- package/dist/suggest-IFFJQFIW.js.map +1 -0
- package/dist/test-HP3FG3MO.js +152 -0
- package/dist/test-HP3FG3MO.js.map +1 -0
- package/dist/test-gen-2ZGPOP35.js +347 -0
- package/dist/test-gen-2ZGPOP35.js.map +1 -0
- package/dist/trust-4R26DULG.js +248 -0
- package/dist/trust-4R26DULG.js.map +1 -0
- package/dist/validate-generator-46H2LYYQ.js +410 -0
- package/dist/validate-generator-46H2LYYQ.js.map +1 -0
- package/dist/workflow-5UVLBS7J.js +655 -0
- package/dist/workflow-5UVLBS7J.js.map +1 -0
- package/package.json +84 -0
package/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2024
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
package/README.md
ADDED
|
@@ -0,0 +1,210 @@
|
|
|
1
|
+
# Atomism
|
|
2
|
+
|
|
3
|
+
Schema-first agent swarm orchestration framework.
|
|
4
|
+
|
|
5
|
+
Build verifiable, composable AI workflows with type-safe atoms that graduate to deterministic generators.
|
|
6
|
+
|
|
7
|
+
## Install
|
|
8
|
+
|
|
9
|
+
```bash
|
|
10
|
+
npm install -g atomism
|
|
11
|
+
```
|
|
12
|
+
|
|
13
|
+
## Quick Start
|
|
14
|
+
|
|
15
|
+
```bash
|
|
16
|
+
atomic init
|
|
17
|
+
atomic create atom hello_world
|
|
18
|
+
atomic register atoms/hello_world.ts
|
|
19
|
+
atomic run hello_world --input '{"name": "World"}' --yes
|
|
20
|
+
```
|
|
21
|
+
|
|
22
|
+
## What is an Atom?
|
|
23
|
+
|
|
24
|
+
An atom is a small, testable, reusable unit of work with validated inputs and outputs:
|
|
25
|
+
|
|
26
|
+
```typescript
|
|
27
|
+
import { z } from 'zod';
|
|
28
|
+
import { defineAtom, success } from 'atomism';
|
|
29
|
+
|
|
30
|
+
export default defineAtom({
|
|
31
|
+
name: 'hello_world',
|
|
32
|
+
description: 'Generates a greeting',
|
|
33
|
+
input: z.object({
|
|
34
|
+
name: z.string().describe('Name to greet'),
|
|
35
|
+
}),
|
|
36
|
+
output: z.object({
|
|
37
|
+
greeting: z.string(),
|
|
38
|
+
}),
|
|
39
|
+
tests: { path: './hello_world.test.ts' },
|
|
40
|
+
idempotent: true,
|
|
41
|
+
handler: async ({ name }) => {
|
|
42
|
+
return success({ greeting: `Hello, ${name}!` });
|
|
43
|
+
},
|
|
44
|
+
});
|
|
45
|
+
```
|
|
46
|
+
|
|
47
|
+
## Features
|
|
48
|
+
|
|
49
|
+
### Schema-First Design
|
|
50
|
+
|
|
51
|
+
Every atom has Zod schemas for inputs and outputs. Type safety at compile time, validation at runtime.
|
|
52
|
+
|
|
53
|
+
### Generator Evolution
|
|
54
|
+
|
|
55
|
+
When atoms produce similar outputs repeatedly, graduate them to deterministic generators:
|
|
56
|
+
|
|
57
|
+
```bash
|
|
58
|
+
atomic graduate hello_world # Creates generators/hello_world.ts
|
|
59
|
+
atomic revert hello_world # Reverts to handler if needed
|
|
60
|
+
```
|
|
61
|
+
|
|
62
|
+
### Trust-Based Execution
|
|
63
|
+
|
|
64
|
+
Atoms earn trust through successful runs. New atoms require confirmation, trusted atoms run automatically:
|
|
65
|
+
|
|
66
|
+
```bash
|
|
67
|
+
atomic trust --list # View trust levels
|
|
68
|
+
atomic trust my_stack --level proven # Promote trust
|
|
69
|
+
```
|
|
70
|
+
|
|
71
|
+
Trust levels: `new` → `proven` → `trusted`
|
|
72
|
+
|
|
73
|
+
### Workflows
|
|
74
|
+
|
|
75
|
+
Compose atoms into dependency-resolved workflows:
|
|
76
|
+
|
|
77
|
+
```bash
|
|
78
|
+
atomic workflow run feature_pipeline # Execute workflow
|
|
79
|
+
atomic workflow resume feature_pipeline # Resume failed workflow
|
|
80
|
+
atomic plan feature_pipeline # Preview execution plan
|
|
81
|
+
```
|
|
82
|
+
|
|
83
|
+
### Generator Import
|
|
84
|
+
|
|
85
|
+
Import existing generators from other systems:
|
|
86
|
+
|
|
87
|
+
```bash
|
|
88
|
+
atomic scan-generators # Detect Plop, Hygen, Yeoman, etc.
|
|
89
|
+
atomic import-generator --plop component # Import to atomic format
|
|
90
|
+
atomic import-generator --hygen model/new
|
|
91
|
+
atomic import-generator --cookiecutter ./template
|
|
92
|
+
atomic validate-generator --all # Verify imports work
|
|
93
|
+
```
|
|
94
|
+
|
|
95
|
+
### Skills Integration
|
|
96
|
+
|
|
97
|
+
Connect atoms to Claude Code skills:
|
|
98
|
+
|
|
99
|
+
```bash
|
|
100
|
+
atomic skills scan # Discover available skills
|
|
101
|
+
atomic skills suggest my_atom # Get recommendations
|
|
102
|
+
atomic skills assign my_atom --skill code-review
|
|
103
|
+
```
|
|
104
|
+
|
|
105
|
+
### Extraction
|
|
106
|
+
|
|
107
|
+
Create atoms from existing sources:
|
|
108
|
+
|
|
109
|
+
```bash
|
|
110
|
+
atomic extract --skill ./skills/refactor.md
|
|
111
|
+
atomic extract --code ./lib/parser.ts
|
|
112
|
+
atomic extract --mcp ./tools/search.json
|
|
113
|
+
```
|
|
114
|
+
|
|
115
|
+
## CLI Reference
|
|
116
|
+
|
|
117
|
+
### Core Commands
|
|
118
|
+
|
|
119
|
+
| Command | Description |
|
|
120
|
+
|---------|-------------|
|
|
121
|
+
| `atomic init` | Initialize `.atomic/` directory |
|
|
122
|
+
| `atomic create atom <name>` | Scaffold new atom |
|
|
123
|
+
| `atomic register <path>` | Register atom file |
|
|
124
|
+
| `atomic run <atom>` | Execute atom or generator |
|
|
125
|
+
| `atomic list` | List registered atoms |
|
|
126
|
+
| `atomic status` | Show system state |
|
|
127
|
+
| `atomic history [runId]` | View execution history |
|
|
128
|
+
|
|
129
|
+
### Testing
|
|
130
|
+
|
|
131
|
+
| Command | Description |
|
|
132
|
+
|---------|-------------|
|
|
133
|
+
| `atomic test <atom>` | Run atom tests |
|
|
134
|
+
| `atomic test-gen <path>` | Generate structural tests |
|
|
135
|
+
|
|
136
|
+
### Graduation
|
|
137
|
+
|
|
138
|
+
| Command | Description |
|
|
139
|
+
|---------|-------------|
|
|
140
|
+
| `atomic graduate <atom>` | Graduate to generator |
|
|
141
|
+
| `atomic revert <atom>` | Revert to handler |
|
|
142
|
+
|
|
143
|
+
### Trust
|
|
144
|
+
|
|
145
|
+
| Command | Description |
|
|
146
|
+
|---------|-------------|
|
|
147
|
+
| `atomic trust --list` | List all trust levels |
|
|
148
|
+
| `atomic trust <stack> --level <level>` | Set trust level |
|
|
149
|
+
| `atomic trust <stack> --reset` | Reset to new |
|
|
150
|
+
|
|
151
|
+
### Workflows
|
|
152
|
+
|
|
153
|
+
| Command | Description |
|
|
154
|
+
|---------|-------------|
|
|
155
|
+
| `atomic workflow run <name>` | Execute workflow |
|
|
156
|
+
| `atomic workflow list` | List workflows |
|
|
157
|
+
| `atomic workflow resume <name>` | Resume failed workflow |
|
|
158
|
+
| `atomic plan <workflow>` | Preview execution plan |
|
|
159
|
+
| `atomic escalate` | Create issue for blocked work |
|
|
160
|
+
|
|
161
|
+
### Generator Import
|
|
162
|
+
|
|
163
|
+
| Command | Description |
|
|
164
|
+
|---------|-------------|
|
|
165
|
+
| `atomic scan-generators` | Detect existing generators |
|
|
166
|
+
| `atomic import-generator` | Import from Plop/Hygen/Yeoman/etc |
|
|
167
|
+
| `atomic validate-generator` | Validate imported generators |
|
|
168
|
+
|
|
169
|
+
### Skills
|
|
170
|
+
|
|
171
|
+
| Command | Description |
|
|
172
|
+
|---------|-------------|
|
|
173
|
+
| `atomic skills scan` | Discover Claude Code skills |
|
|
174
|
+
| `atomic skills suggest <atom>` | Get skill recommendations |
|
|
175
|
+
| `atomic skills assign <atom>` | Assign skill to atom |
|
|
176
|
+
| `atomic skills list <atom>` | List assigned skills |
|
|
177
|
+
| `atomic skills remove <atom>` | Remove skill from atom |
|
|
178
|
+
|
|
179
|
+
All commands support `--json` for structured output.
|
|
180
|
+
|
|
181
|
+
## Library API
|
|
182
|
+
|
|
183
|
+
```typescript
|
|
184
|
+
import {
|
|
185
|
+
defineAtom,
|
|
186
|
+
success,
|
|
187
|
+
failure,
|
|
188
|
+
validationError,
|
|
189
|
+
executionError,
|
|
190
|
+
z,
|
|
191
|
+
} from 'atomism';
|
|
192
|
+
|
|
193
|
+
// Result helpers
|
|
194
|
+
success({ data: 'value' }) // Success response
|
|
195
|
+
failure('Something went wrong') // Error response
|
|
196
|
+
validationError('Invalid input') // Validation error
|
|
197
|
+
executionError('Runtime failure') // Execution error
|
|
198
|
+
```
|
|
199
|
+
|
|
200
|
+
## Documentation
|
|
201
|
+
|
|
202
|
+
Full documentation: https://trust-will.github.io/atomic-explore/
|
|
203
|
+
|
|
204
|
+
## Requirements
|
|
205
|
+
|
|
206
|
+
- Node.js 18+
|
|
207
|
+
|
|
208
|
+
## License
|
|
209
|
+
|
|
210
|
+
MIT
|
|
@@ -0,0 +1,81 @@
|
|
|
1
|
+
import {
|
|
2
|
+
TrustLevel
|
|
3
|
+
} from "./chunk-YKJO3ZFY.js";
|
|
4
|
+
|
|
5
|
+
// src/schemas/atom.ts
|
|
6
|
+
import { z } from "zod";
|
|
7
|
+
var ErrorType = z.enum([
|
|
8
|
+
"validation",
|
|
9
|
+
"execution",
|
|
10
|
+
"dependency",
|
|
11
|
+
"timeout"
|
|
12
|
+
]);
|
|
13
|
+
var AtomError = z.object({
|
|
14
|
+
type: ErrorType,
|
|
15
|
+
message: z.string(),
|
|
16
|
+
recoverable: z.boolean(),
|
|
17
|
+
suggestion: z.string().optional(),
|
|
18
|
+
context: z.record(z.string(), z.unknown()).optional()
|
|
19
|
+
});
|
|
20
|
+
var SuccessResponse = (outputSchema) => z.object({
|
|
21
|
+
success: z.literal(true),
|
|
22
|
+
data: outputSchema
|
|
23
|
+
});
|
|
24
|
+
var ErrorResponse = z.object({
|
|
25
|
+
success: z.literal(false),
|
|
26
|
+
error: AtomError
|
|
27
|
+
});
|
|
28
|
+
var AtomResult = (outputSchema) => z.discriminatedUnion("success", [
|
|
29
|
+
SuccessResponse(outputSchema),
|
|
30
|
+
ErrorResponse
|
|
31
|
+
]);
|
|
32
|
+
var TestManifest = z.object({
|
|
33
|
+
path: z.string(),
|
|
34
|
+
coverage_threshold: z.number().min(0).max(100).optional()
|
|
35
|
+
});
|
|
36
|
+
var AtomDefinition = z.object({
|
|
37
|
+
name: z.string().regex(/^[a-z][a-z0-9_]*$/, "Name must be snake_case"),
|
|
38
|
+
description: z.string().min(1),
|
|
39
|
+
input: z.instanceof(z.ZodType),
|
|
40
|
+
output: z.instanceof(z.ZodType),
|
|
41
|
+
idempotent: z.boolean().default(true),
|
|
42
|
+
tests: TestManifest,
|
|
43
|
+
// Optional fields for enhanced execution
|
|
44
|
+
agent: z.string().optional(),
|
|
45
|
+
skill: z.string().optional()
|
|
46
|
+
});
|
|
47
|
+
|
|
48
|
+
// src/schemas/stack.ts
|
|
49
|
+
import { z as z2 } from "zod";
|
|
50
|
+
var StackAtom = z2.object({
|
|
51
|
+
/** Atom name */
|
|
52
|
+
name: z2.string().min(1),
|
|
53
|
+
/** Optional description of this atom's role in the stack */
|
|
54
|
+
description: z2.string().optional()
|
|
55
|
+
});
|
|
56
|
+
var AtomStack = z2.object({
|
|
57
|
+
/** Stack name (must be snake_case) */
|
|
58
|
+
name: z2.string().regex(/^[a-z][a-z0-9_]*$/, "Stack name must be snake_case"),
|
|
59
|
+
/** Human-readable description */
|
|
60
|
+
description: z2.string(),
|
|
61
|
+
/** Ordered list of atoms in this stack */
|
|
62
|
+
atoms: z2.array(StackAtom),
|
|
63
|
+
/** Trust level for this stack */
|
|
64
|
+
trustLevel: TrustLevel.default("new"),
|
|
65
|
+
/** When the stack was created */
|
|
66
|
+
createdAt: z2.iso.datetime(),
|
|
67
|
+
/** When the stack was last updated */
|
|
68
|
+
updatedAt: z2.iso.datetime()
|
|
69
|
+
});
|
|
70
|
+
|
|
71
|
+
export {
|
|
72
|
+
ErrorType,
|
|
73
|
+
AtomError,
|
|
74
|
+
SuccessResponse,
|
|
75
|
+
ErrorResponse,
|
|
76
|
+
AtomResult,
|
|
77
|
+
TestManifest,
|
|
78
|
+
AtomDefinition,
|
|
79
|
+
AtomStack
|
|
80
|
+
};
|
|
81
|
+
//# sourceMappingURL=chunk-34O5KJWR.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../src/schemas/atom.ts","../src/schemas/stack.ts"],"sourcesContent":["import { z } from 'zod';\n\n// Error types for atom execution\nexport const ErrorType = z.enum([\n 'validation',\n 'execution',\n 'dependency',\n 'timeout',\n]);\nexport type ErrorType = z.infer<typeof ErrorType>;\n\n// Structured error with actionable information\nexport const AtomError = z.object({\n type: ErrorType,\n message: z.string(),\n recoverable: z.boolean(),\n suggestion: z.string().optional(),\n context: z.record(z.string(), z.unknown()).optional(),\n});\nexport type AtomError = z.infer<typeof AtomError>;\n\n// Success response\nexport const SuccessResponse = <T extends z.ZodType>(outputSchema: T) =>\n z.object({\n success: z.literal(true),\n data: outputSchema,\n });\n\n// Error response\nexport const ErrorResponse = z.object({\n success: z.literal(false),\n error: AtomError,\n});\nexport type ErrorResponse = z.infer<typeof ErrorResponse>;\n\n// Union result type - parameterized by output schema\nexport const AtomResult = <T extends z.ZodType>(outputSchema: T) =>\n z.discriminatedUnion('success', [\n SuccessResponse(outputSchema),\n ErrorResponse,\n ]);\n\n// Helper type for AtomResult inference\nexport type AtomResult<T> =\n | { success: true; data: T }\n | { success: false; error: AtomError };\n\n// Test manifest for atom registration\nexport const TestManifest = z.object({\n path: z.string(),\n coverage_threshold: z.number().min(0).max(100).optional(),\n});\nexport type TestManifest = z.infer<typeof TestManifest>;\n\n// Atom definition schema\nexport const AtomDefinition = z.object({\n name: z\n .string()\n .regex(/^[a-z][a-z0-9_]*$/, 'Name must be snake_case'),\n description: z.string().min(1),\n input: z.instanceof(z.ZodType),\n output: z.instanceof(z.ZodType),\n idempotent: z.boolean().default(true),\n tests: TestManifest,\n // Optional fields for enhanced execution\n agent: z.string().optional(),\n skill: z.string().optional(),\n});\nexport type AtomDefinition = z.infer<typeof AtomDefinition>;\n","/**\n * Stack schemas for the atomic framework.\n *\n * Stacks are reusable compositions of atoms that can be reviewed once\n * and reused. They enable pattern-based trust management.\n *\n * @module schemas/stack\n */\n\nimport { z } from 'zod';\nimport { TrustLevel } from './trust.js';\n\n/**\n * An atom reference within a stack.\n */\nexport const StackAtom = z.object({\n /** Atom name */\n name: z.string().min(1),\n /** Optional description of this atom's role in the stack */\n description: z.string().optional(),\n});\nexport type StackAtom = z.infer<typeof StackAtom>;\n\n/**\n * An atom stack definition.\n */\nexport const AtomStack = z.object({\n /** Stack name (must be snake_case) */\n name: z.string().regex(/^[a-z][a-z0-9_]*$/, 'Stack name must be snake_case'),\n /** Human-readable description */\n description: z.string(),\n /** Ordered list of atoms in this stack */\n atoms: z.array(StackAtom),\n /** Trust level for this stack */\n trustLevel: TrustLevel.default('new'),\n /** When the stack was created */\n createdAt: z.iso.datetime(),\n /** When the stack was last updated */\n updatedAt: z.iso.datetime(),\n});\nexport type AtomStack = z.infer<typeof AtomStack>;\n\n/**\n * Create a new atom stack with defaults.\n */\nexport function createStack(name: string, description: string = ''): AtomStack {\n const now = new Date().toISOString();\n return AtomStack.parse({\n name,\n description,\n atoms: [],\n trustLevel: 'new',\n createdAt: now,\n updatedAt: now,\n });\n}\n\n/**\n * Add an atom to a stack.\n * Returns a new stack with the atom added (immutable).\n */\nexport function addAtomToStack(\n stack: AtomStack,\n atomName: string,\n atomDescription?: string\n): AtomStack {\n // Validate atom first for clearer error messages\n const stackAtom = StackAtom.parse({\n name: atomName,\n ...(atomDescription && { description: atomDescription }),\n });\n\n return AtomStack.parse({\n ...stack,\n atoms: [...stack.atoms, stackAtom],\n updatedAt: new Date().toISOString(),\n });\n}\n\n/**\n * Remove an atom from a stack by name.\n * Returns a new stack with the atom removed (immutable).\n */\nexport function removeAtomFromStack(\n stack: AtomStack,\n atomName: string\n): AtomStack {\n return AtomStack.parse({\n ...stack,\n atoms: stack.atoms.filter((a) => a.name !== atomName),\n updatedAt: new Date().toISOString(),\n });\n}\n\n"],"mappings":";;;;;AAAA,SAAS,SAAS;AAGX,IAAM,YAAY,EAAE,KAAK;AAAA,EAC9B;AAAA,EACA;AAAA,EACA;AAAA,EACA;AACF,CAAC;AAIM,IAAM,YAAY,EAAE,OAAO;AAAA,EAChC,MAAM;AAAA,EACN,SAAS,EAAE,OAAO;AAAA,EAClB,aAAa,EAAE,QAAQ;AAAA,EACvB,YAAY,EAAE,OAAO,EAAE,SAAS;AAAA,EAChC,SAAS,EAAE,OAAO,EAAE,OAAO,GAAG,EAAE,QAAQ,CAAC,EAAE,SAAS;AACtD,CAAC;AAIM,IAAM,kBAAkB,CAAsB,iBACnD,EAAE,OAAO;AAAA,EACP,SAAS,EAAE,QAAQ,IAAI;AAAA,EACvB,MAAM;AACR,CAAC;AAGI,IAAM,gBAAgB,EAAE,OAAO;AAAA,EACpC,SAAS,EAAE,QAAQ,KAAK;AAAA,EACxB,OAAO;AACT,CAAC;AAIM,IAAM,aAAa,CAAsB,iBAC9C,EAAE,mBAAmB,WAAW;AAAA,EAC9B,gBAAgB,YAAY;AAAA,EAC5B;AACF,CAAC;AAQI,IAAM,eAAe,EAAE,OAAO;AAAA,EACnC,MAAM,EAAE,OAAO;AAAA,EACf,oBAAoB,EAAE,OAAO,EAAE,IAAI,CAAC,EAAE,IAAI,GAAG,EAAE,SAAS;AAC1D,CAAC;AAIM,IAAM,iBAAiB,EAAE,OAAO;AAAA,EACrC,MAAM,EACH,OAAO,EACP,MAAM,qBAAqB,yBAAyB;AAAA,EACvD,aAAa,EAAE,OAAO,EAAE,IAAI,CAAC;AAAA,EAC7B,OAAO,EAAE,WAAW,EAAE,OAAO;AAAA,EAC7B,QAAQ,EAAE,WAAW,EAAE,OAAO;AAAA,EAC9B,YAAY,EAAE,QAAQ,EAAE,QAAQ,IAAI;AAAA,EACpC,OAAO;AAAA;AAAA,EAEP,OAAO,EAAE,OAAO,EAAE,SAAS;AAAA,EAC3B,OAAO,EAAE,OAAO,EAAE,SAAS;AAC7B,CAAC;;;AC1DD,SAAS,KAAAA,UAAS;AAMX,IAAM,YAAYC,GAAE,OAAO;AAAA;AAAA,EAEhC,MAAMA,GAAE,OAAO,EAAE,IAAI,CAAC;AAAA;AAAA,EAEtB,aAAaA,GAAE,OAAO,EAAE,SAAS;AACnC,CAAC;AAMM,IAAM,YAAYA,GAAE,OAAO;AAAA;AAAA,EAEhC,MAAMA,GAAE,OAAO,EAAE,MAAM,qBAAqB,+BAA+B;AAAA;AAAA,EAE3E,aAAaA,GAAE,OAAO;AAAA;AAAA,EAEtB,OAAOA,GAAE,MAAM,SAAS;AAAA;AAAA,EAExB,YAAY,WAAW,QAAQ,KAAK;AAAA;AAAA,EAEpC,WAAWA,GAAE,IAAI,SAAS;AAAA;AAAA,EAE1B,WAAWA,GAAE,IAAI,SAAS;AAC5B,CAAC;","names":["z","z"]}
|
|
@@ -0,0 +1,116 @@
|
|
|
1
|
+
import {
|
|
2
|
+
getGlobalSkillsPath,
|
|
3
|
+
getProjectSkillsPath,
|
|
4
|
+
parseFrontmatter,
|
|
5
|
+
scanSkillEntry
|
|
6
|
+
} from "./chunk-UVUDQ4XP.js";
|
|
7
|
+
import {
|
|
8
|
+
fmt
|
|
9
|
+
} from "./chunk-S6Z5G5DB.js";
|
|
10
|
+
import {
|
|
11
|
+
toErrorMessage
|
|
12
|
+
} from "./chunk-PLQJM2KT.js";
|
|
13
|
+
|
|
14
|
+
// src/commands/skills-load.ts
|
|
15
|
+
import { readFile } from "fs/promises";
|
|
16
|
+
import { join } from "path";
|
|
17
|
+
async function findSkillInDirectory(skillName, dirPath, source) {
|
|
18
|
+
const directPath = join(dirPath, `${skillName}.md`);
|
|
19
|
+
const directSkill = await scanSkillEntry(directPath, source);
|
|
20
|
+
if (directSkill) {
|
|
21
|
+
return directSkill;
|
|
22
|
+
}
|
|
23
|
+
const dirSkillPath = join(dirPath, skillName);
|
|
24
|
+
const dirSkill = await scanSkillEntry(dirSkillPath, source);
|
|
25
|
+
if (dirSkill) {
|
|
26
|
+
return dirSkill;
|
|
27
|
+
}
|
|
28
|
+
return null;
|
|
29
|
+
}
|
|
30
|
+
async function findSkillByName(skillName) {
|
|
31
|
+
const projectSkill = await findSkillInDirectory(skillName, getProjectSkillsPath(), "project");
|
|
32
|
+
if (projectSkill) {
|
|
33
|
+
return projectSkill;
|
|
34
|
+
}
|
|
35
|
+
const globalSkill = await findSkillInDirectory(skillName, getGlobalSkillsPath(), "global");
|
|
36
|
+
if (globalSkill) {
|
|
37
|
+
return globalSkill;
|
|
38
|
+
}
|
|
39
|
+
return null;
|
|
40
|
+
}
|
|
41
|
+
async function loadSkillContent(skillInfo) {
|
|
42
|
+
let skillPath = skillInfo.resolvedPath ?? skillInfo.location;
|
|
43
|
+
if (!skillPath.endsWith(".md")) {
|
|
44
|
+
skillPath = join(skillPath, "SKILL.md");
|
|
45
|
+
}
|
|
46
|
+
return await readFile(skillPath, "utf-8");
|
|
47
|
+
}
|
|
48
|
+
function extractSkillInstructions(content) {
|
|
49
|
+
const normalized = content.replace(/\r\n/g, "\n");
|
|
50
|
+
const frontmatterMatch = normalized.match(/^---\n[\s\S]*?\n---\n?/);
|
|
51
|
+
if (frontmatterMatch) {
|
|
52
|
+
return normalized.slice(frontmatterMatch[0].length).trim();
|
|
53
|
+
}
|
|
54
|
+
return normalized.trim();
|
|
55
|
+
}
|
|
56
|
+
async function loadSkill(skillName) {
|
|
57
|
+
const skillInfo = await findSkillByName(skillName);
|
|
58
|
+
if (!skillInfo) {
|
|
59
|
+
return { skill: null };
|
|
60
|
+
}
|
|
61
|
+
try {
|
|
62
|
+
const fullContent = await loadSkillContent(skillInfo);
|
|
63
|
+
const { name, description } = parseFrontmatter(fullContent);
|
|
64
|
+
const instructions = extractSkillInstructions(fullContent);
|
|
65
|
+
return {
|
|
66
|
+
skill: {
|
|
67
|
+
name: name || skillName,
|
|
68
|
+
description: description || "",
|
|
69
|
+
content: instructions,
|
|
70
|
+
location: skillInfo.location,
|
|
71
|
+
source: skillInfo.source
|
|
72
|
+
}
|
|
73
|
+
};
|
|
74
|
+
} catch (error) {
|
|
75
|
+
const message = toErrorMessage(error);
|
|
76
|
+
return { skill: null, error: `Failed to load skill "${skillName}": ${message}` };
|
|
77
|
+
}
|
|
78
|
+
}
|
|
79
|
+
async function loadSkillsForAtom(skillNames) {
|
|
80
|
+
const warnings = [];
|
|
81
|
+
const results = await Promise.all(
|
|
82
|
+
skillNames.map(async (skillName) => {
|
|
83
|
+
const result = await loadSkill(skillName);
|
|
84
|
+
return { skillName, ...result };
|
|
85
|
+
})
|
|
86
|
+
);
|
|
87
|
+
const loaded = [];
|
|
88
|
+
for (const { skillName, skill, error } of results) {
|
|
89
|
+
if (skill) {
|
|
90
|
+
loaded.push(skill);
|
|
91
|
+
} else if (error) {
|
|
92
|
+
warnings.push(error);
|
|
93
|
+
} else {
|
|
94
|
+
warnings.push(`Skill "${skillName}" not found - execution will continue without it`);
|
|
95
|
+
}
|
|
96
|
+
}
|
|
97
|
+
return { loaded, warnings };
|
|
98
|
+
}
|
|
99
|
+
function formatLoadedSkills(result) {
|
|
100
|
+
const lines = [];
|
|
101
|
+
for (const skill of result.loaded) {
|
|
102
|
+
lines.push(fmt.dim(`Loaded skill: ${fmt.cyan(skill.name)} (${skill.source})`));
|
|
103
|
+
}
|
|
104
|
+
for (const warning of result.warnings) {
|
|
105
|
+
lines.push(fmt.yellow(`Warning: ${warning}`));
|
|
106
|
+
}
|
|
107
|
+
return lines;
|
|
108
|
+
}
|
|
109
|
+
|
|
110
|
+
export {
|
|
111
|
+
extractSkillInstructions,
|
|
112
|
+
loadSkill,
|
|
113
|
+
loadSkillsForAtom,
|
|
114
|
+
formatLoadedSkills
|
|
115
|
+
};
|
|
116
|
+
//# sourceMappingURL=chunk-55AP34JO.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../src/commands/skills-load.ts"],"sourcesContent":["/**\n * Skill Loading - Load skill content for atom execution.\n *\n * Story 6.9: AOP-1260\n *\n * Acceptance Criteria:\n * - Skill loaded into Claude's context during execution\n * - Skill's instructions/prompts available to handler\n * - Execution log shows: 'Loaded skill: testing'\n * - Missing skill file shows warning but execution continues\n */\n\nimport { readFile } from 'node:fs/promises';\nimport { join } from 'node:path';\nimport { fmt } from '../cli/format.js';\nimport { toErrorMessage } from '../utils/errors.js';\nimport {\n scanSkillEntry,\n parseFrontmatter,\n SkillInfo,\n getProjectSkillsPath,\n getGlobalSkillsPath,\n} from './skills-scan.js';\n\nexport type { LoadedSkill, SkillLoadResult } from '../atom/skills-load.js';\nimport type { LoadedSkill, SkillLoadResult } from '../atom/skills-load.js';\n\n/**\n * Try to find a skill file by name in the given directory.\n * Looks for:\n * - <name>.md directly in the directory\n * - <name>/SKILL.md for directory-based skills\n */\nasync function findSkillInDirectory(\n skillName: string,\n dirPath: string,\n source: 'project' | 'global'\n): Promise<SkillInfo | null> {\n // Try direct file: <name>.md\n const directPath = join(dirPath, `${skillName}.md`);\n const directSkill = await scanSkillEntry(directPath, source);\n if (directSkill) {\n return directSkill;\n }\n\n // Try directory: <name>/SKILL.md\n const dirSkillPath = join(dirPath, skillName);\n const dirSkill = await scanSkillEntry(dirSkillPath, source);\n if (dirSkill) {\n return dirSkill;\n }\n\n return null;\n}\n\n/**\n * Find a skill by name across all skill locations.\n * Search order: project -> global -> mcp\n */\nasync function findSkillByName(skillName: string): Promise<SkillInfo | null> {\n // Try project skills first\n const projectSkill = await findSkillInDirectory(skillName, getProjectSkillsPath(), 'project');\n if (projectSkill) {\n return projectSkill;\n }\n\n // Try global skills\n const globalSkill = await findSkillInDirectory(skillName, getGlobalSkillsPath(), 'global');\n if (globalSkill) {\n return globalSkill;\n }\n\n // MCP skills are runtime-provided and don't have loadable content\n // They're discovered through config but content comes from MCP servers\n return null;\n}\n\n/**\n * Load the full content of a skill file.\n */\nasync function loadSkillContent(skillInfo: SkillInfo): Promise<string> {\n // Use resolved path if available (for symlinks), otherwise use location\n let skillPath = skillInfo.resolvedPath ?? skillInfo.location;\n\n // If path is a directory, look for SKILL.md inside\n if (!skillPath.endsWith('.md')) {\n skillPath = join(skillPath, 'SKILL.md');\n }\n\n return await readFile(skillPath, 'utf-8');\n}\n\n/**\n * Extract the instructions/body content from a skill file.\n * Strips frontmatter and returns the main content.\n */\nexport function extractSkillInstructions(content: string): string {\n // Normalize line endings\n const normalized = content.replace(/\\r\\n/g, '\\n');\n\n // Remove frontmatter if present\n const frontmatterMatch = normalized.match(/^---\\n[\\s\\S]*?\\n---\\n?/);\n if (frontmatterMatch) {\n return normalized.slice(frontmatterMatch[0].length).trim();\n }\n\n return normalized.trim();\n}\n\n/**\n * Result of attempting to load a skill.\n */\ninterface SkillLoadAttempt {\n skill: LoadedSkill | null;\n error?: string;\n}\n\n/**\n * Load a single skill by name.\n * Returns the skill and any error message for diagnostics.\n */\nexport async function loadSkill(skillName: string): Promise<SkillLoadAttempt> {\n const skillInfo = await findSkillByName(skillName);\n if (!skillInfo) {\n return { skill: null };\n }\n\n try {\n const fullContent = await loadSkillContent(skillInfo);\n const { name, description } = parseFrontmatter(fullContent);\n const instructions = extractSkillInstructions(fullContent);\n\n return {\n skill: {\n name: name || skillName,\n description: description || '',\n content: instructions,\n location: skillInfo.location,\n source: skillInfo.source,\n },\n };\n } catch (error) {\n // Return error context for diagnostics\n const message = toErrorMessage(error);\n return { skill: null, error: `Failed to load skill \"${skillName}\": ${message}` };\n }\n}\n\n/**\n * Load multiple skills for an atom execution.\n * Returns loaded skills and warnings for any that couldn't be found.\n * Skills are loaded in parallel for improved performance.\n */\nexport async function loadSkillsForAtom(skillNames: string[]): Promise<SkillLoadResult> {\n const warnings: string[] = [];\n\n // Load all skills in parallel\n const results = await Promise.all(\n skillNames.map(async (skillName) => {\n const result = await loadSkill(skillName);\n return { skillName, ...result };\n })\n );\n\n const loaded: LoadedSkill[] = [];\n for (const { skillName, skill, error } of results) {\n if (skill) {\n loaded.push(skill);\n } else if (error) {\n // Include error details for diagnostics\n warnings.push(error);\n } else {\n warnings.push(`Skill \"${skillName}\" not found - execution will continue without it`);\n }\n }\n\n return { loaded, warnings };\n}\n\n/**\n * Format loaded skills for logging/display.\n */\nexport function formatLoadedSkills(result: SkillLoadResult): string[] {\n const lines: string[] = [];\n\n for (const skill of result.loaded) {\n lines.push(fmt.dim(`Loaded skill: ${fmt.cyan(skill.name)} (${skill.source})`));\n }\n\n for (const warning of result.warnings) {\n lines.push(fmt.yellow(`Warning: ${warning}`));\n }\n\n return lines;\n}\n\n// Re-export from canonical location (atom/skill-context.ts)\nexport { buildSkillContext, type SkillWithContent } from '../atom/skill-context.js';\n"],"mappings":";;;;;;;;;;;;;;AAYA,SAAS,gBAAgB;AACzB,SAAS,YAAY;AAoBrB,eAAe,qBACb,WACA,SACA,QAC2B;AAE3B,QAAM,aAAa,KAAK,SAAS,GAAG,SAAS,KAAK;AAClD,QAAM,cAAc,MAAM,eAAe,YAAY,MAAM;AAC3D,MAAI,aAAa;AACf,WAAO;AAAA,EACT;AAGA,QAAM,eAAe,KAAK,SAAS,SAAS;AAC5C,QAAM,WAAW,MAAM,eAAe,cAAc,MAAM;AAC1D,MAAI,UAAU;AACZ,WAAO;AAAA,EACT;AAEA,SAAO;AACT;AAMA,eAAe,gBAAgB,WAA8C;AAE3E,QAAM,eAAe,MAAM,qBAAqB,WAAW,qBAAqB,GAAG,SAAS;AAC5F,MAAI,cAAc;AAChB,WAAO;AAAA,EACT;AAGA,QAAM,cAAc,MAAM,qBAAqB,WAAW,oBAAoB,GAAG,QAAQ;AACzF,MAAI,aAAa;AACf,WAAO;AAAA,EACT;AAIA,SAAO;AACT;AAKA,eAAe,iBAAiB,WAAuC;AAErE,MAAI,YAAY,UAAU,gBAAgB,UAAU;AAGpD,MAAI,CAAC,UAAU,SAAS,KAAK,GAAG;AAC9B,gBAAY,KAAK,WAAW,UAAU;AAAA,EACxC;AAEA,SAAO,MAAM,SAAS,WAAW,OAAO;AAC1C;AAMO,SAAS,yBAAyB,SAAyB;AAEhE,QAAM,aAAa,QAAQ,QAAQ,SAAS,IAAI;AAGhD,QAAM,mBAAmB,WAAW,MAAM,wBAAwB;AAClE,MAAI,kBAAkB;AACpB,WAAO,WAAW,MAAM,iBAAiB,CAAC,EAAE,MAAM,EAAE,KAAK;AAAA,EAC3D;AAEA,SAAO,WAAW,KAAK;AACzB;AAcA,eAAsB,UAAU,WAA8C;AAC5E,QAAM,YAAY,MAAM,gBAAgB,SAAS;AACjD,MAAI,CAAC,WAAW;AACd,WAAO,EAAE,OAAO,KAAK;AAAA,EACvB;AAEA,MAAI;AACF,UAAM,cAAc,MAAM,iBAAiB,SAAS;AACpD,UAAM,EAAE,MAAM,YAAY,IAAI,iBAAiB,WAAW;AAC1D,UAAM,eAAe,yBAAyB,WAAW;AAEzD,WAAO;AAAA,MACL,OAAO;AAAA,QACL,MAAM,QAAQ;AAAA,QACd,aAAa,eAAe;AAAA,QAC5B,SAAS;AAAA,QACT,UAAU,UAAU;AAAA,QACpB,QAAQ,UAAU;AAAA,MACpB;AAAA,IACF;AAAA,EACF,SAAS,OAAO;AAEd,UAAM,UAAU,eAAe,KAAK;AACpC,WAAO,EAAE,OAAO,MAAM,OAAO,yBAAyB,SAAS,MAAM,OAAO,GAAG;AAAA,EACjF;AACF;AAOA,eAAsB,kBAAkB,YAAgD;AACtF,QAAM,WAAqB,CAAC;AAG5B,QAAM,UAAU,MAAM,QAAQ;AAAA,IAC5B,WAAW,IAAI,OAAO,cAAc;AAClC,YAAM,SAAS,MAAM,UAAU,SAAS;AACxC,aAAO,EAAE,WAAW,GAAG,OAAO;AAAA,IAChC,CAAC;AAAA,EACH;AAEA,QAAM,SAAwB,CAAC;AAC/B,aAAW,EAAE,WAAW,OAAO,MAAM,KAAK,SAAS;AACjD,QAAI,OAAO;AACT,aAAO,KAAK,KAAK;AAAA,IACnB,WAAW,OAAO;AAEhB,eAAS,KAAK,KAAK;AAAA,IACrB,OAAO;AACL,eAAS,KAAK,UAAU,SAAS,kDAAkD;AAAA,IACrF;AAAA,EACF;AAEA,SAAO,EAAE,QAAQ,SAAS;AAC5B;AAKO,SAAS,mBAAmB,QAAmC;AACpE,QAAM,QAAkB,CAAC;AAEzB,aAAW,SAAS,OAAO,QAAQ;AACjC,UAAM,KAAK,IAAI,IAAI,iBAAiB,IAAI,KAAK,MAAM,IAAI,CAAC,KAAK,MAAM,MAAM,GAAG,CAAC;AAAA,EAC/E;AAEA,aAAW,WAAW,OAAO,UAAU;AACrC,UAAM,KAAK,IAAI,OAAO,YAAY,OAAO,EAAE,CAAC;AAAA,EAC9C;AAEA,SAAO;AACT;","names":[]}
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
// src/atom/skill-context.ts
|
|
2
|
+
function buildSkillContext(skills) {
|
|
3
|
+
if (skills.length === 0) {
|
|
4
|
+
return "";
|
|
5
|
+
}
|
|
6
|
+
const sections = skills.map((skill) => {
|
|
7
|
+
return `## Skill: ${skill.name}
|
|
8
|
+
|
|
9
|
+
${skill.content}`;
|
|
10
|
+
});
|
|
11
|
+
return sections.join("\n\n---\n\n");
|
|
12
|
+
}
|
|
13
|
+
|
|
14
|
+
export {
|
|
15
|
+
buildSkillContext
|
|
16
|
+
};
|
|
17
|
+
//# sourceMappingURL=chunk-6MDHM2B4.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../src/atom/skill-context.ts"],"sourcesContent":["/**\n * Skill context building for atom execution.\n *\n * Extracted from commands/skills-load.ts to fix inverted dependency\n * (atom/ should not import from commands/).\n *\n * @module atom/skill-context\n */\n\n/**\n * Minimal interface for building skill context.\n * Allows both LoadedSkill and SkillContext to be used.\n */\nexport interface SkillWithContent {\n name: string;\n content: string;\n}\n\n/**\n * Build execution context from skills.\n * Combines skill instructions into a single context string.\n */\nexport function buildSkillContext(skills: SkillWithContent[]): string {\n if (skills.length === 0) {\n return '';\n }\n\n const sections = skills.map((skill) => {\n return `## Skill: ${skill.name}\\n\\n${skill.content}`;\n });\n\n return sections.join('\\n\\n---\\n\\n');\n}\n"],"mappings":";AAsBO,SAAS,kBAAkB,QAAoC;AACpE,MAAI,OAAO,WAAW,GAAG;AACvB,WAAO;AAAA,EACT;AAEA,QAAM,WAAW,OAAO,IAAI,CAAC,UAAU;AACrC,WAAO,aAAa,MAAM,IAAI;AAAA;AAAA,EAAO,MAAM,OAAO;AAAA,EACpD,CAAC;AAED,SAAO,SAAS,KAAK,aAAa;AACpC;","names":[]}
|
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
import {
|
|
2
|
+
StorageNotInitializedError
|
|
3
|
+
} from "./chunk-YKJO3ZFY.js";
|
|
4
|
+
import {
|
|
5
|
+
toErrorMessage
|
|
6
|
+
} from "./chunk-PLQJM2KT.js";
|
|
7
|
+
|
|
8
|
+
// src/commands/helpers.ts
|
|
9
|
+
function deriveErrorCode(error) {
|
|
10
|
+
if (error instanceof StorageNotInitializedError) return "STORAGE_NOT_INITIALIZED";
|
|
11
|
+
if (error instanceof SyntaxError) return "INVALID_JSON";
|
|
12
|
+
if (error instanceof TypeError) return "TYPE_ERROR";
|
|
13
|
+
const msg = toErrorMessage(error);
|
|
14
|
+
if (msg.includes("not found")) return "NOT_FOUND";
|
|
15
|
+
if (msg.includes("already exists")) return "ALREADY_EXISTS";
|
|
16
|
+
if (msg.includes("permission")) return "PERMISSION_DENIED";
|
|
17
|
+
return "UNKNOWN_ERROR";
|
|
18
|
+
}
|
|
19
|
+
async function withErrorHandling(options, fn) {
|
|
20
|
+
try {
|
|
21
|
+
await fn();
|
|
22
|
+
} catch (error) {
|
|
23
|
+
const message = toErrorMessage(error);
|
|
24
|
+
if (options.json) {
|
|
25
|
+
console.log(
|
|
26
|
+
JSON.stringify({
|
|
27
|
+
success: false,
|
|
28
|
+
error: message,
|
|
29
|
+
errorCode: deriveErrorCode(error)
|
|
30
|
+
})
|
|
31
|
+
);
|
|
32
|
+
} else {
|
|
33
|
+
console.error(`Error: ${message}`);
|
|
34
|
+
}
|
|
35
|
+
process.exit(1);
|
|
36
|
+
}
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
export {
|
|
40
|
+
deriveErrorCode,
|
|
41
|
+
withErrorHandling
|
|
42
|
+
};
|
|
43
|
+
//# sourceMappingURL=chunk-GU2R4KLP.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../src/commands/helpers.ts"],"sourcesContent":["/**\n * Shared command helpers for the atomic CLI.\n *\n * Provides error-handling wrappers to reduce boilerplate across commands.\n *\n * @module commands/helpers\n */\n\nimport { toErrorMessage } from '../utils/errors.js';\nimport { StorageNotInitializedError } from '../storage/index.js';\n\n/**\n * Options that all commands with error handling share.\n */\nexport interface CommandOptions {\n json?: boolean;\n}\n\n/**\n * Derive a machine-readable error code from an error instance.\n * Agents can use this to distinguish error types programmatically.\n */\nexport function deriveErrorCode(error: unknown): string {\n if (error instanceof StorageNotInitializedError) return 'STORAGE_NOT_INITIALIZED';\n if (error instanceof SyntaxError) return 'INVALID_JSON';\n if (error instanceof TypeError) return 'TYPE_ERROR';\n const msg = toErrorMessage(error);\n if (msg.includes('not found')) return 'NOT_FOUND';\n if (msg.includes('already exists')) return 'ALREADY_EXISTS';\n if (msg.includes('permission')) return 'PERMISSION_DENIED';\n return 'UNKNOWN_ERROR';\n}\n\n/**\n * Wrap a command function with standard error handling.\n *\n * The wrapped function (`fn`) is responsible for ALL success output (both JSON\n * and human-readable). This wrapper only handles the error/catch path:\n *\n * - JSON mode: prints `{ success: false, error: \"...\", errorCode: \"...\" }` and calls process.exit(1)\n * - Human mode: prints `Error: ...` to stderr and calls process.exit(1)\n *\n * Commands with non-standard error output (extra fields, custom formatting)\n * should continue using their own try/catch.\n *\n * @param options - Must include at least `{ json?: boolean }`\n * @param fn - The command's main logic. Handles its own success output.\n */\nexport async function withErrorHandling(\n options: CommandOptions,\n fn: () => Promise<void>\n): Promise<void> {\n try {\n await fn();\n } catch (error) {\n const message = toErrorMessage(error);\n if (options.json) {\n console.log(\n JSON.stringify({\n success: false,\n error: message,\n errorCode: deriveErrorCode(error),\n })\n );\n } else {\n console.error(`Error: ${message}`);\n }\n process.exit(1);\n }\n}\n"],"mappings":";;;;;;;;AAsBO,SAAS,gBAAgB,OAAwB;AACtD,MAAI,iBAAiB,2BAA4B,QAAO;AACxD,MAAI,iBAAiB,YAAa,QAAO;AACzC,MAAI,iBAAiB,UAAW,QAAO;AACvC,QAAM,MAAM,eAAe,KAAK;AAChC,MAAI,IAAI,SAAS,WAAW,EAAG,QAAO;AACtC,MAAI,IAAI,SAAS,gBAAgB,EAAG,QAAO;AAC3C,MAAI,IAAI,SAAS,YAAY,EAAG,QAAO;AACvC,SAAO;AACT;AAiBA,eAAsB,kBACpB,SACA,IACe;AACf,MAAI;AACF,UAAM,GAAG;AAAA,EACX,SAAS,OAAO;AACd,UAAM,UAAU,eAAe,KAAK;AACpC,QAAI,QAAQ,MAAM;AAChB,cAAQ;AAAA,QACN,KAAK,UAAU;AAAA,UACb,SAAS;AAAA,UACT,OAAO;AAAA,UACP,WAAW,gBAAgB,KAAK;AAAA,QAClC,CAAC;AAAA,MACH;AAAA,IACF,OAAO;AACL,cAAQ,MAAM,UAAU,OAAO,EAAE;AAAA,IACnC;AACA,YAAQ,KAAK,CAAC;AAAA,EAChB;AACF;","names":[]}
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
// src/schemas/workflow.ts
|
|
2
|
+
import { z } from "zod";
|
|
3
|
+
var WorkflowAtom = z.object({
|
|
4
|
+
/** Atom name (must match a registered atom) */
|
|
5
|
+
atom: z.string().min(1),
|
|
6
|
+
/** Atoms this depends on (must complete before this runs) */
|
|
7
|
+
dependsOn: z.array(z.string()).default([]),
|
|
8
|
+
/** Optional alias for this atom instance in the workflow */
|
|
9
|
+
alias: z.string().optional(),
|
|
10
|
+
/** Optional description of this atom's role in the workflow */
|
|
11
|
+
description: z.string().optional()
|
|
12
|
+
});
|
|
13
|
+
var WorkflowOutput = z.object({
|
|
14
|
+
/** Output artifact name */
|
|
15
|
+
name: z.string().min(1),
|
|
16
|
+
/** Which atom produces this output */
|
|
17
|
+
from: z.string().min(1),
|
|
18
|
+
/** Optional description of the output */
|
|
19
|
+
description: z.string().optional()
|
|
20
|
+
});
|
|
21
|
+
var WorkflowDefinition = z.object({
|
|
22
|
+
/** Workflow name (must be snake_case) */
|
|
23
|
+
name: z.string().regex(/^[a-z][a-z0-9_]*$/, "Workflow name must be snake_case"),
|
|
24
|
+
/** Human-readable description */
|
|
25
|
+
description: z.string(),
|
|
26
|
+
/** Atoms with dependency declarations */
|
|
27
|
+
atoms: z.array(WorkflowAtom),
|
|
28
|
+
/** Output artifact declarations */
|
|
29
|
+
outputs: z.array(WorkflowOutput).default([]),
|
|
30
|
+
/** When the workflow was created */
|
|
31
|
+
createdAt: z.iso.datetime(),
|
|
32
|
+
/** When the workflow was last updated */
|
|
33
|
+
updatedAt: z.iso.datetime()
|
|
34
|
+
});
|
|
35
|
+
|
|
36
|
+
export {
|
|
37
|
+
WorkflowDefinition
|
|
38
|
+
};
|
|
39
|
+
//# sourceMappingURL=chunk-H7WC3NXZ.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../src/schemas/workflow.ts"],"sourcesContent":["/**\n * Workflow definition schemas for the atomic framework.\n *\n * Workflows are atom graphs with artifact dependencies that compose\n * multi-step tasks. Unlike stacks (which are ordered lists for trust),\n * workflows define execution order through explicit dependencies.\n *\n * @module schemas/workflow\n */\n\nimport { z } from 'zod';\n\n/**\n * An atom reference within a workflow with dependency declarations.\n */\nexport const WorkflowAtom = z.object({\n /** Atom name (must match a registered atom) */\n atom: z.string().min(1),\n /** Atoms this depends on (must complete before this runs) */\n dependsOn: z.array(z.string()).default([]),\n /** Optional alias for this atom instance in the workflow */\n alias: z.string().optional(),\n /** Optional description of this atom's role in the workflow */\n description: z.string().optional(),\n});\nexport type WorkflowAtom = z.infer<typeof WorkflowAtom>;\n\n/**\n * Output artifact declaration.\n */\nexport const WorkflowOutput = z.object({\n /** Output artifact name */\n name: z.string().min(1),\n /** Which atom produces this output */\n from: z.string().min(1),\n /** Optional description of the output */\n description: z.string().optional(),\n});\nexport type WorkflowOutput = z.infer<typeof WorkflowOutput>;\n\n/**\n * A workflow definition.\n */\nexport const WorkflowDefinition = z.object({\n /** Workflow name (must be snake_case) */\n name: z.string().regex(/^[a-z][a-z0-9_]*$/, 'Workflow name must be snake_case'),\n /** Human-readable description */\n description: z.string(),\n /** Atoms with dependency declarations */\n atoms: z.array(WorkflowAtom),\n /** Output artifact declarations */\n outputs: z.array(WorkflowOutput).default([]),\n /** When the workflow was created */\n createdAt: z.iso.datetime(),\n /** When the workflow was last updated */\n updatedAt: z.iso.datetime(),\n});\nexport type WorkflowDefinition = z.infer<typeof WorkflowDefinition>;\n\n/**\n * Result of cycle detection.\n */\nexport interface CycleDetectionResult {\n hasCycle: boolean;\n cycle?: string[];\n}\n\n/**\n * Detect cycles in a workflow's dependency graph using DFS.\n *\n * @param workflow - The workflow to check\n * @returns CycleDetectionResult with hasCycle flag and cycle path if found\n */\nexport function detectCycles(workflow: WorkflowDefinition): CycleDetectionResult {\n // Build adjacency list from atoms\n const nodes = new Set<string>();\n const edges = new Map<string, string[]>();\n\n for (const wfAtom of workflow.atoms) {\n const nodeId = wfAtom.alias ?? wfAtom.atom;\n nodes.add(nodeId);\n edges.set(nodeId, wfAtom.dependsOn);\n }\n\n // Track visited state: 0 = unvisited, 1 = visiting (in stack), 2 = visited\n const state = new Map<string, number>();\n const path: string[] = [];\n\n function dfs(node: string): string[] | null {\n const currentState = state.get(node) ?? 0;\n\n if (currentState === 1) {\n // Found cycle - return the cycle path\n const cycleStart = path.indexOf(node);\n return [...path.slice(cycleStart), node];\n }\n\n if (currentState === 2) {\n // Already fully visited, no cycle through this node\n return null;\n }\n\n // Mark as visiting\n state.set(node, 1);\n path.push(node);\n\n // Visit dependencies\n const deps = edges.get(node) ?? [];\n for (const dep of deps) {\n const cycle = dfs(dep);\n if (cycle) {\n return cycle;\n }\n }\n\n // Mark as visited\n path.pop();\n state.set(node, 2);\n return null;\n }\n\n // Check all nodes (handles disconnected graphs)\n for (const node of nodes) {\n const cycle = dfs(node);\n if (cycle) {\n return { hasCycle: true, cycle };\n }\n }\n\n return { hasCycle: false };\n}\n\n/**\n * Validate a workflow definition, including cycle detection.\n *\n * @param workflow - The workflow to validate\n * @throws Error if validation fails\n */\nexport function validateWorkflow(workflow: WorkflowDefinition): void {\n // Schema validation (throws on failure)\n const parsed = WorkflowDefinition.parse(workflow);\n\n // Cycle detection\n const cycleResult = detectCycles(parsed);\n if (cycleResult.hasCycle) {\n throw new Error(\n `Cycle detected in workflow dependencies: ${cycleResult.cycle?.join(' → ')}`\n );\n }\n\n // Validate that all dependencies reference existing atoms\n // Also check for duplicate atom identifiers\n const atomIds = new Set<string>();\n for (const wfAtom of parsed.atoms) {\n const id = wfAtom.alias ?? wfAtom.atom;\n if (atomIds.has(id)) {\n throw new Error(`Duplicate atom identifier \"${id}\"`);\n }\n atomIds.add(id);\n }\n for (const wfAtom of parsed.atoms) {\n for (const dep of wfAtom.dependsOn) {\n if (!atomIds.has(dep)) {\n throw new Error(\n `Atom \"${wfAtom.alias ?? wfAtom.atom}\" depends on unknown atom \"${dep}\"`\n );\n }\n }\n }\n\n // Validate that outputs reference existing atoms\n for (const output of parsed.outputs) {\n if (!atomIds.has(output.from)) {\n throw new Error(\n `Output \"${output.name}\" references unknown atom \"${output.from}\"`\n );\n }\n }\n}\n\n/**\n * Create a new workflow with defaults.\n */\nexport function createWorkflow(\n name: string,\n description: string = ''\n): WorkflowDefinition {\n const now = new Date().toISOString();\n return WorkflowDefinition.parse({\n name,\n description,\n atoms: [],\n outputs: [],\n createdAt: now,\n updatedAt: now,\n });\n}\n\n/**\n * Add an atom to a workflow.\n * Returns a new workflow with the atom added (immutable).\n *\n * @param workflow - The workflow to add to\n * @param atomName - Atom name\n * @param dependsOn - Array of atom names this depends on\n * @param options - Optional alias and description\n */\nexport function addAtomToWorkflow(\n workflow: WorkflowDefinition,\n atomName: string,\n dependsOn: string[] = [],\n options?: { alias?: string; description?: string }\n): WorkflowDefinition {\n const newAtom = WorkflowAtom.parse({\n atom: atomName,\n dependsOn,\n ...options,\n });\n\n const updated = WorkflowDefinition.parse({\n ...workflow,\n atoms: [...workflow.atoms, newAtom],\n updatedAt: new Date().toISOString(),\n });\n\n // Validate the updated workflow (including cycle detection)\n validateWorkflow(updated);\n\n return updated;\n}\n\n/**\n * Add an output declaration to a workflow.\n * Returns a new workflow with the output added (immutable).\n */\nexport function addOutputToWorkflow(\n workflow: WorkflowDefinition,\n name: string,\n from: string,\n description?: string\n): WorkflowDefinition {\n const newOutput = WorkflowOutput.parse({\n name,\n from,\n ...(description && { description }),\n });\n\n const updated = WorkflowDefinition.parse({\n ...workflow,\n outputs: [...workflow.outputs, newOutput],\n updatedAt: new Date().toISOString(),\n });\n\n // Validate the updated workflow\n validateWorkflow(updated);\n\n return updated;\n}\n\n"],"mappings":";AAUA,SAAS,SAAS;AAKX,IAAM,eAAe,EAAE,OAAO;AAAA;AAAA,EAEnC,MAAM,EAAE,OAAO,EAAE,IAAI,CAAC;AAAA;AAAA,EAEtB,WAAW,EAAE,MAAM,EAAE,OAAO,CAAC,EAAE,QAAQ,CAAC,CAAC;AAAA;AAAA,EAEzC,OAAO,EAAE,OAAO,EAAE,SAAS;AAAA;AAAA,EAE3B,aAAa,EAAE,OAAO,EAAE,SAAS;AACnC,CAAC;AAMM,IAAM,iBAAiB,EAAE,OAAO;AAAA;AAAA,EAErC,MAAM,EAAE,OAAO,EAAE,IAAI,CAAC;AAAA;AAAA,EAEtB,MAAM,EAAE,OAAO,EAAE,IAAI,CAAC;AAAA;AAAA,EAEtB,aAAa,EAAE,OAAO,EAAE,SAAS;AACnC,CAAC;AAMM,IAAM,qBAAqB,EAAE,OAAO;AAAA;AAAA,EAEzC,MAAM,EAAE,OAAO,EAAE,MAAM,qBAAqB,kCAAkC;AAAA;AAAA,EAE9E,aAAa,EAAE,OAAO;AAAA;AAAA,EAEtB,OAAO,EAAE,MAAM,YAAY;AAAA;AAAA,EAE3B,SAAS,EAAE,MAAM,cAAc,EAAE,QAAQ,CAAC,CAAC;AAAA;AAAA,EAE3C,WAAW,EAAE,IAAI,SAAS;AAAA;AAAA,EAE1B,WAAW,EAAE,IAAI,SAAS;AAC5B,CAAC;","names":[]}
|