@mod-computer/cli 0.1.1 → 0.2.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. package/README.md +98 -76
  2. package/dist/cli.bundle.js +23750 -12931
  3. package/dist/cli.bundle.js.map +4 -4
  4. package/dist/cli.js +23 -12
  5. package/dist/commands/add.js +245 -0
  6. package/dist/commands/auth.js +129 -21
  7. package/dist/commands/comment.js +568 -0
  8. package/dist/commands/diff.js +182 -0
  9. package/dist/commands/index.js +33 -3
  10. package/dist/commands/init.js +475 -221
  11. package/dist/commands/ls.js +135 -0
  12. package/dist/commands/members.js +687 -0
  13. package/dist/commands/mv.js +282 -0
  14. package/dist/commands/rm.js +257 -0
  15. package/dist/commands/status.js +273 -306
  16. package/dist/commands/sync.js +99 -75
  17. package/dist/commands/trace.js +1752 -0
  18. package/dist/commands/workspace.js +354 -330
  19. package/dist/config/features.js +18 -7
  20. package/dist/config/release-profiles/development.json +4 -1
  21. package/dist/config/release-profiles/mvp.json +4 -2
  22. package/dist/daemon/conflict-resolution.js +172 -0
  23. package/dist/daemon/content-hash.js +31 -0
  24. package/dist/daemon/file-sync.js +985 -0
  25. package/dist/daemon/index.js +203 -0
  26. package/dist/daemon/mime-types.js +166 -0
  27. package/dist/daemon/offline-queue.js +211 -0
  28. package/dist/daemon/path-utils.js +64 -0
  29. package/dist/daemon/share-policy.js +83 -0
  30. package/dist/daemon/wasm-errors.js +189 -0
  31. package/dist/daemon/worker.js +557 -0
  32. package/dist/daemon-worker.js +3 -2
  33. package/dist/errors/workspace-errors.js +48 -0
  34. package/dist/lib/auth-server.js +89 -26
  35. package/dist/lib/browser.js +1 -1
  36. package/dist/lib/diff.js +284 -0
  37. package/dist/lib/formatters.js +204 -0
  38. package/dist/lib/git.js +137 -0
  39. package/dist/lib/local-fs.js +201 -0
  40. package/dist/lib/prompts.js +23 -83
  41. package/dist/lib/storage.js +11 -1
  42. package/dist/lib/trace-formatters.js +314 -0
  43. package/dist/services/add-service.js +554 -0
  44. package/dist/services/add-validation.js +124 -0
  45. package/dist/services/mod-config.js +8 -2
  46. package/dist/services/modignore-service.js +2 -0
  47. package/dist/stores/use-workspaces-store.js +36 -14
  48. package/dist/types/add-types.js +99 -0
  49. package/dist/types/config.js +1 -1
  50. package/dist/types/workspace-connection.js +53 -2
  51. package/package.json +7 -5
  52. package/commands/execute.md +0 -156
  53. package/commands/overview.md +0 -233
  54. package/commands/review.md +0 -151
  55. package/commands/spec.md +0 -169
@@ -1,3 +1,5 @@
1
+ // glassware[type="implementation", id="impl-cli-add-modignore-service--ae506406", requirements="requirement-cli-add-ignore-modignore--92ba9b60,requirement-cli-add-ignore-gitignore--054c4b53,requirement-cli-add-ignore-default--facbedfe,requirement-cli-add-ignore-negation--79b9bf48"]
2
+ // spec: packages/mod-cli/specs/add.md
1
3
  import fs from 'fs';
2
4
  import path from 'path';
3
5
  export class ModIgnoreService {
@@ -1,4 +1,5 @@
1
1
  import { create } from 'zustand';
2
+ import { readConfig } from '../lib/storage.js';
2
3
  export const useWorkspacesStore = create((set) => ({
3
4
  workspaces: [],
4
5
  loading: false,
@@ -6,20 +7,41 @@ export const useWorkspacesStore = create((set) => ({
6
7
  fetchWorkspaces: async (repo) => {
7
8
  set({ loading: true, error: null });
8
9
  try {
9
- // const rootDocId = '3RrsuQCaieHkXPUNSsq3UhEAxnHW';
10
- // const rootDocId = 'rY75AHsEfNx1vKnrtpdfmo5dyjr';
11
- // const rootDocId = '2a9anXJNoBxstgrhRJfkBGS3uxs';
12
- // const rootDocId = '3pW2EWyQPrshmj4Y1KqEo7sMTqxV';
13
- const rootDocId = 'KRkJrMQeNQo9bTwSsoKZBxcpz1y';
14
- const wsHandle = await repo.find(rootDocId);
15
- const doc = wsHandle.doc();
16
- const workspacesRaw = doc.workspaces || [];
17
- const workspacesList = (Array.isArray(workspacesRaw)
18
- ? workspacesRaw
19
- : []).map((w) => ({
20
- id: w.id || '',
21
- name: w.name || w.title || 'Untitled',
22
- }));
10
+ const config = readConfig();
11
+ const userDocId = config.auth?.userDocId;
12
+ if (!userDocId) {
13
+ set({ workspaces: [], error: 'Not authenticated. Run `mod auth login` first.' });
14
+ return;
15
+ }
16
+ // Fetch workspaces from user document
17
+ const userHandle = await repo.find(userDocId);
18
+ await userHandle.whenReady();
19
+ const userDoc = userHandle.doc();
20
+ if (!userDoc) {
21
+ set({ workspaces: [], error: 'Could not load user document.' });
22
+ return;
23
+ }
24
+ const workspaceIds = userDoc.workspaceIds || [];
25
+ const workspacesList = [];
26
+ // Load workspace metadata for each workspace
27
+ for (const wsId of workspaceIds) {
28
+ try {
29
+ const wsHandle = await repo.find(wsId);
30
+ await wsHandle.whenReady();
31
+ const ws = wsHandle.doc();
32
+ workspacesList.push({
33
+ id: wsId,
34
+ name: ws?.title || ws?.name || 'Untitled',
35
+ });
36
+ }
37
+ catch {
38
+ // Workspace might not be available, add with just ID
39
+ workspacesList.push({
40
+ id: wsId,
41
+ name: 'Untitled',
42
+ });
43
+ }
44
+ }
23
45
  set({ workspaces: workspacesList });
24
46
  }
25
47
  catch (err) {
@@ -0,0 +1,99 @@
1
+ // glassware[type="implementation", id="impl-cli-add-types--03cec234", requirements="requirement-cli-add-types--e6c546b5"]
2
+ // spec: packages/mod-cli/specs/add.md
3
+ /**
4
+ * Create a typed AddError
5
+ */
6
+ export function createAddError(code, message, path, cause) {
7
+ return { code, message, path, cause };
8
+ }
9
+ /**
10
+ * Check if an error is an AddError
11
+ */
12
+ export function isAddError(error) {
13
+ return (typeof error === 'object' &&
14
+ error !== null &&
15
+ 'code' in error &&
16
+ 'message' in error);
17
+ }
18
+ /**
19
+ * Error strategy mapping - defines how each error type should be handled
20
+ */
21
+ export const ERROR_STRATEGIES = {
22
+ // Input validation - abort
23
+ INVALID_PATH: { action: 'abort', userMessage: 'Show error, exit' },
24
+ PATH_OUTSIDE_WORKSPACE: { action: 'abort', userMessage: 'Show error, exit' },
25
+ NOT_CONNECTED: { action: 'abort', userMessage: 'Show error, suggest `mod init`' },
26
+ // File validation - skip
27
+ FILE_TOO_LARGE: { action: 'skip', userMessage: 'Warning, continue' },
28
+ PERMISSION_DENIED: { action: 'skip', userMessage: 'Warning, continue' },
29
+ ENCODING_ERROR: { action: 'fallback', userMessage: 'Auto-convert to binary, continue' },
30
+ CIRCULAR_SYMLINK: { action: 'skip', userMessage: 'Warning, continue' },
31
+ // Workspace errors - abort/skip
32
+ WORKSPACE_NOT_FOUND: { action: 'abort', userMessage: 'Show error, exit' },
33
+ FOLDER_NOT_FOUND: { action: 'skip', userMessage: 'Warning, continue' },
34
+ FOLDER_LIMIT_EXCEEDED: { action: 'skip', userMessage: 'Warning, continue' },
35
+ // Automerge errors - retry
36
+ DOC_CREATE_FAILED: { action: 'retry', maxRetries: 3, userMessage: 'Retry, then skip' },
37
+ DOC_UPDATE_FAILED: { action: 'retry', maxRetries: 3, userMessage: 'Retry, then skip' },
38
+ SYNC_FAILED: { action: 'retry', maxRetries: 3, userMessage: 'Retry, then skip' },
39
+ HANDLE_TIMEOUT: { action: 'retry', maxRetries: 3, userMessage: 'Retry, then skip' },
40
+ // System errors - abort
41
+ OUT_OF_MEMORY: { action: 'abort', userMessage: 'Show error, suggest smaller batch' },
42
+ CANCELLED: { action: 'abort', userMessage: 'Show progress, exit' },
43
+ UNKNOWN: { action: 'skip', userMessage: 'Log error, continue' },
44
+ };
45
+ /**
46
+ * Get the error strategy for an error code
47
+ */
48
+ export function getErrorStrategy(code) {
49
+ return ERROR_STRATEGIES[code];
50
+ }
51
+ /**
52
+ * Constants for add operation
53
+ */
54
+ export const ADD_CONSTANTS = {
55
+ /** Maximum concurrent file document creations */
56
+ PARALLEL_FILE_LIMIT: 10,
57
+ /** Maximum binary file size in bytes (100KB) */
58
+ MAX_BINARY_SIZE: 100 * 1024,
59
+ /** Progress update throttle in ms */
60
+ PROGRESS_THROTTLE_MS: 100,
61
+ /** Retry attempts for document operations */
62
+ MAX_RETRIES: 3,
63
+ /** Backoff multiplier for retries in ms */
64
+ RETRY_BACKOFF_MS: 100,
65
+ /** Memory limit in bytes (200MB) */
66
+ MEMORY_LIMIT: 200 * 1024 * 1024,
67
+ /** Small add threshold (no progress bar) */
68
+ SMALL_ADD_THRESHOLD: 100,
69
+ /** Medium add threshold (spinner) */
70
+ MEDIUM_ADD_THRESHOLD: 1000,
71
+ };
72
+ /**
73
+ * Known text file extensions
74
+ */
75
+ export const TEXT_EXTENSIONS = new Set([
76
+ '.txt', '.md', '.js', '.ts', '.tsx', '.jsx', '.json', '.yaml', '.yml',
77
+ '.toml', '.xml', '.html', '.css', '.scss', '.py', '.rb', '.go', '.rs',
78
+ '.java', '.c', '.cpp', '.h', '.sh', '.sql', '.graphql', '.vue', '.svelte',
79
+ '.astro', '.php', '.kt', '.scala', '.swift', '.dockerfile', '.tf', '.hcl',
80
+ '.ps1', '.bat', '.cmd', '.env', '.gitignore', '.editorconfig', '.prettierrc',
81
+ '.eslintrc', '.babelrc', 'makefile', 'dockerfile', '.lock'
82
+ ]);
83
+ /**
84
+ * Default ignore patterns
85
+ */
86
+ export const DEFAULT_IGNORE_PATTERNS = [
87
+ 'node_modules/',
88
+ '.git/',
89
+ 'dist/',
90
+ 'build/',
91
+ '.mod/',
92
+ '*.log',
93
+ '.DS_Store',
94
+ 'Thumbs.db',
95
+ '.env',
96
+ '.env.local',
97
+ '.env.*',
98
+ 'secrets/',
99
+ ];
@@ -1,4 +1,4 @@
1
- // glassware[type=implementation, id=cli-config-types, requirements=req-cli-storage-config-1,req-cli-storage-config-2]
1
+ // glassware[type="implementation", id="cli-config-types--dd74d8f7", requirements="requirement-cli-storage-config-1--6e0f731a,requirement-cli-storage-config-2--f95aa032"]
2
2
  /**
3
3
  * Default settings values.
4
4
  */
@@ -1,2 +1,53 @@
1
- // glassware[type=implementation, id=cli-workspace-connection-types, requirements=req-cli-storage-conn-1,req-cli-storage-conn-3]
2
- export {};
1
+ // glassware[type="implementation", id="cli-workspace-connection-types--13014cb2", requirements="requirement-cli-storage-conn-1--ce1b8ad9,requirement-cli-storage-conn-3--6536b6b2,requirement-cli-init-data-1--9f55757e"]
2
+ // glassware[type="implementation", id="impl-cli-ws-type-connection--5c50b266", requirements="requirement-cli-ws-type-connection--bc33fa7a,requirement-cli-ws-conn-path--1a1a1301,requirement-cli-ws-conn-workspace-id--39acf87e,requirement-cli-ws-conn-name--791b4633,requirement-cli-ws-conn-connected-at--2ac4aba8,requirement-cli-ws-conn-last-synced--9b1ee720"]
3
+ // spec: packages/mod-cli/specs/workspaces.md, packages/mod-cli/specs/initialization.md
4
+ import path from 'path';
5
+ // ISO 8601 regex pattern for timestamp validation
6
+ const ISO8601_REGEX = /^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(\.\d{3})?Z?$/;
7
+ // glassware[type="implementation", id="impl-cli-ws-conn-validate--c0c28675", requirements="requirement-cli-ws-conn-validate-path--1278682d,requirement-cli-ws-conn-validate-id--826810aa,requirement-cli-ws-conn-validate-timestamps--77971858"]
8
+ /**
9
+ * Validate a WorkspaceConnection object
10
+ */
11
+ export function validateWorkspaceConnection(conn) {
12
+ const errors = [];
13
+ if (!conn || typeof conn !== 'object') {
14
+ return {
15
+ valid: false,
16
+ errors: [{ field: 'root', message: 'Connection must be an object', code: 'REQUIRED' }]
17
+ };
18
+ }
19
+ const c = conn;
20
+ // Validate path - must be absolute
21
+ if (typeof c.path !== 'string' || !c.path) {
22
+ errors.push({ field: 'path', message: 'path is required', code: 'REQUIRED' });
23
+ }
24
+ else if (!path.isAbsolute(c.path)) {
25
+ errors.push({ field: 'path', message: 'path must be absolute', code: 'INVALID_PATH' });
26
+ }
27
+ // Validate workspaceId - must be non-empty string
28
+ if (typeof c.workspaceId !== 'string' || !c.workspaceId) {
29
+ errors.push({ field: 'workspaceId', message: 'workspaceId is required', code: 'REQUIRED' });
30
+ }
31
+ // Validate workspaceName - must be non-empty string
32
+ if (typeof c.workspaceName !== 'string' || !c.workspaceName) {
33
+ errors.push({ field: 'workspaceName', message: 'workspaceName is required', code: 'REQUIRED' });
34
+ }
35
+ // Validate connectedAt - must be ISO 8601
36
+ if (typeof c.connectedAt !== 'string' || !c.connectedAt) {
37
+ errors.push({ field: 'connectedAt', message: 'connectedAt is required', code: 'REQUIRED' });
38
+ }
39
+ else if (!ISO8601_REGEX.test(c.connectedAt)) {
40
+ errors.push({ field: 'connectedAt', message: 'connectedAt must be ISO 8601 format', code: 'INVALID_FORMAT' });
41
+ }
42
+ // Validate lastSyncedAt - must be ISO 8601
43
+ if (typeof c.lastSyncedAt !== 'string' || !c.lastSyncedAt) {
44
+ errors.push({ field: 'lastSyncedAt', message: 'lastSyncedAt is required', code: 'REQUIRED' });
45
+ }
46
+ else if (!ISO8601_REGEX.test(c.lastSyncedAt)) {
47
+ errors.push({ field: 'lastSyncedAt', message: 'lastSyncedAt must be ISO 8601 format', code: 'INVALID_FORMAT' });
48
+ }
49
+ return {
50
+ valid: errors.length === 0,
51
+ errors
52
+ };
53
+ }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@mod-computer/cli",
3
- "version": "0.1.1",
3
+ "version": "0.2.2",
4
4
  "license": "MIT",
5
5
  "bin": {
6
6
  "mod": "dist/cli.bundle.js"
@@ -22,12 +22,13 @@
22
22
  "commands"
23
23
  ],
24
24
  "dependencies": {
25
+ "@ai-sdk/anthropic": "2.0.0-beta.3",
26
+ "@ai-sdk/openai": "2.0.0-beta.5",
25
27
  "@automerge/automerge": "^3.1.2",
26
28
  "@automerge/automerge-repo": "^2.3.1",
27
29
  "@automerge/automerge-repo-network-websocket": "^2.3.1",
28
30
  "@automerge/automerge-repo-storage-nodefs": "^2.3.1",
29
- "@ai-sdk/anthropic": "2.0.0-beta.3",
30
- "@ai-sdk/openai": "2.0.0-beta.5",
31
+ "@inquirer/prompts": "^8.1.0",
31
32
  "ai": "5.0.0-beta.11",
32
33
  "chokidar": "^4.0.3",
33
34
  "dotenv": "^17.1.0",
@@ -35,15 +36,15 @@
35
36
  "ink-select-input": "^6.2.0",
36
37
  "ink-text-input": "^6.0.0",
37
38
  "meow": "^11.0.0",
39
+ "ora": "^9.0.0",
38
40
  "react": "19.1.0",
39
41
  "react-dom": "19.1.0",
40
42
  "zustand": "^5.0.6"
41
43
  },
42
44
  "devDependencies": {
43
- "@mod/mod-core": "workspace:*",
44
- "esbuild": "^0.24.0",
45
45
  "@babel/cli": "^7.21.0",
46
46
  "@babel/preset-react": "^7.18.6",
47
+ "@mod/mod-core": "workspace:*",
47
48
  "@types/ink": "^2.0.3",
48
49
  "@types/ink-select-input": "^3.0.5",
49
50
  "@types/ink-text-input": "^2.0.5",
@@ -52,6 +53,7 @@
52
53
  "@types/react": "^19.1.8",
53
54
  "@vdemedes/prettier-config": "^2.0.1",
54
55
  "chalk": "^5.2.0",
56
+ "esbuild": "^0.24.0",
55
57
  "eslint-config-xo-react": "^0.27.0",
56
58
  "eslint-plugin-react": "^7.32.2",
57
59
  "eslint-plugin-react-hooks": "^4.6.0",
@@ -1,156 +0,0 @@
1
- ---
2
- version: 2.0.0
3
- updated: 2026-01-03
4
- description: Execute specification with complete traceability implementation
5
- ---
6
-
7
- # Specification Execution with Complete Traceability
8
-
9
- Given this specification file: ".mod/specs/<optional-folder>/$ARGUMENTS"
10
-
11
- ## Branch Context and Specification Setup
12
-
13
- **Automatic Branch Detection and Context Gathering:**
14
-
15
- **If no specification argument provided:**
16
- 1. **Check active branch** for linked specification:
17
- ```bash
18
- mod branch status
19
- # Should show active branch and linked specification
20
- ```
21
- 2. **Error handling** for lightweight branches:
22
- ```bash
23
- # If current branch has no specification:
24
- # > Current branch "fix-auth-timeout" is a lightweight branch with no specification
25
- # > Options:
26
- # > 1. Link specification: mod branch spec-link <spec-file>
27
- # > 2. Create specification branch: mod branch create feature-<name> --spec <spec-file>
28
- # > 3. Switch to specification branch: mod branch switch <spec-branch>
29
- ```
30
-
31
- **If specification argument provided (e.g., `branching-cli.spec.md`):**
32
- 1. **Auto-switch to specification branch**:
33
- ```bash
34
- # Check if corresponding branch exists
35
- mod branch list | grep "feature-.*branching-cli"
36
-
37
- # If exists, switch to it
38
- mod branch switch feature-mod-cli-branching-cli
39
-
40
- # If doesn't exist, create it
41
- mod branch create feature-mod-cli-branching-cli --spec mod-cli/branching-cli.spec.md
42
- ```
43
- 2. **Verify branch setup**:
44
- ```bash
45
- mod branch status
46
- # Should confirm specification branch is active and linked
47
- ```
48
-
49
- **Context Gathering (Automatic):**
50
- 1. **Implementation Progress**: `mod branch status` shows current implementation changes
51
- 2. **Specification Requirements**: `mod status <spec-name>.spec.md` shows requirements and traceability
52
- 3. **Coverage Analysis**: `glassware` shows which code has implementation traces
53
-
54
- ## Pre-Execution Analysis
55
-
56
- **BEFORE implementation:**
57
- 1. Read and fully understand the complete specification, including all sub-requirements
58
- 2. Parse all ARCH-*, BEH-*, DATA-*, and TEST-* requirements for dependency analysis
59
- 3. Identify numbered sub-requirements (e.g., BEH-AUTH-1.1, BEH-AUTH-1.2) for granular execution
60
- 4. Create implementation plan prioritizing ARCH → DATA → BEH → TEST requirements
61
-
62
- **Understanding existing code changes:**
63
- 5. Use `mod branch status` to see what code changes have already been made on this branch
64
- 6. Run `glassware` to understand spec traceability coverage of existing code on branch
65
-
66
- ## Mandatory Traceability Implementation
67
-
68
- **CRITICAL: Use glassware annotations for all traceability**
69
-
70
- ### Glassware Annotation Format
71
-
72
- **In Markdown specs/requirements:**
73
- ```markdown
74
- The system must validate user credentials. <glassware type="requirement" id="REQ-AUTH-1" />
75
- ```
76
-
77
- **In TypeScript implementations:**
78
- ```typescript
79
- // glassware[type=implementation, id=unique-impl-id, requirements=REQ-ID]
80
- ```
81
-
82
- ### Complexity-Based Traceability Standards
83
- - **Method-Level (Default)**: Single glassware comment for simple functions (<10 lines, no branching)
84
- - **Line-Level (Complex)**: Detailed glassware comments for complex logic (>10 lines, branching, error handling)
85
- - **Business Logic Priority**: Always use line-level traces for business logic patterns regardless of complexity
86
-
87
- ### Granularity Decision Rules
88
- **For each function, analyze and apply:**
89
-
90
- 1. **Business Logic Check** - Does function name/purpose include: validate, auth, pay, charge, encrypt, save, persist?
91
- → If YES: Always use **line-level** traces
92
-
93
- 2. **Complexity Check** - Count lines of actual code (exclude comments/whitespace):
94
- → If >10 lines OR has conditionals (if/switch/try/for/while): Use **line-level** traces
95
- → If ≤10 lines AND no conditionals: Use **method-level** traces
96
-
97
- 3. **Error Handling Check** - Does function have try-catch or multiple return paths?
98
- → If YES: Use **line-level** traces
99
-
100
- ### Method-Level Example (Simple Function)
101
- ```typescript
102
- // glassware[type=implementation, id=validate-user, requirements=REQ-AUTH-1]
103
- function validateUser(email: string, password: string): boolean {
104
- return bcrypt.compare(password, getStoredHash(email));
105
- }
106
- ```
107
-
108
- ### Line-Level Example (Complex Function)
109
- ```typescript
110
- function processPayment(order: Order, card: CreditCard): PaymentResult {
111
- // glassware[type=implementation, id=validate-order, requirements=REQ-PAY-1]
112
- if (order.amount <= 0 || !SUPPORTED_CURRENCIES.includes(order.currency)) {
113
- throw new ValidationError("Invalid order parameters");
114
- }
115
-
116
- // glassware[type=implementation, id=fraud-check, requirements=REQ-PAY-2]
117
- const fraudScore = await fraudDetection.analyze(card, order);
118
- if (fraudScore > FRAUD_THRESHOLD) {
119
- // glassware[type=implementation, id=fraud-logging, requirements=REQ-PAY-3]
120
- await auditLog.record('fraud_detected', { order: order.id, score: fraudScore });
121
- throw new FraudError("Transaction blocked by fraud detection");
122
- }
123
-
124
- // glassware[type=implementation, id=process-charge, requirements=REQ-PAY-4]
125
- return await paymentGateway.charge(card, order.amount);
126
- }
127
- ```
128
-
129
- ## Execution Workflow
130
-
131
- **For each implementation cycle, MUST apply this process:**
132
- 1. **Analyze Complexity**: Count lines, branches, and business logic patterns
133
- 2. **Apply Granularity Rules**: Use method-level for simple functions, line-level for complex
134
- 3. **Business Logic Override**: Always apply line-level traces for business logic patterns
135
- 4. **Embed During Generation**: Add glassware traces as code is generated, not afterward
136
- 5. **Test Progress**: Run `glassware` to verify traceability coverage
137
- 6. **Iterate**: Continue implementing and tracing until all requirements show as implemented
138
-
139
- ## Real-Time Quality Gates
140
-
141
- **Execution agents MUST enforce these standards:**
142
- - **Specification Coverage**: ≥90% of sub-requirements implemented
143
- - **Appropriate Granularity**: Complex functions have line-level traces, simple functions have method-level
144
- - **Business Logic Traceability**: All business logic has detailed line-level glassware traces
145
- - **Invalid References**: 0 glassware comments pointing to non-existent requirements
146
-
147
- ## Success Criteria
148
-
149
- ✅ **Complete Implementation**: All numbered sub-requirements implemented and working
150
- ✅ **Full Traceability**: Every implementation decision traced to specific sub-requirement
151
- ✅ **Status Verification**: `glassware` shows all requirements as implemented
152
- ✅ **Quality Gates Passed**: Coverage and traceability thresholds met
153
- ✅ **Test Coverage**: All TEST-* requirements implemented with glassware traces
154
- ✅ **Zero Orphaned Code**: All new code has valid glassware requirement references
155
-
156
- **CRITICAL**: Run `glassware` before and after each implementation phase to verify progress. Additionally, use `mod branch status` to understand what code changes were made for the spec. Do not mark tasks complete until glassware shows all requirements implemented.
@@ -1,233 +0,0 @@
1
- ---
2
- version: 2.0.0
3
- updated: 2026-01-03
4
- description: Generate workspace specification scaffolding and overview
5
- ---
6
-
7
- # Workspace Specification Scaffolding
8
-
9
- ## Command Modes
10
-
11
- **Two-Step Process:**
12
-
13
- 1. **Overview Mode** (no arguments): `workspace-spec`
14
- - Performs full codebase analysis
15
- - Generates overview of current architecture
16
- - Plans specification structure
17
- - Outputs proposed organization without creating files
18
-
19
- 2. **Implementation Mode** (with argument): `workspace-spec <workspace-name>` or `workspace-spec overview.spec.md`
20
- - Creates the planned specification directory structure
21
- - Generates template files based on analysis from overview mode
22
- - Implements the full scaffolding process
23
-
24
- ---
25
-
26
- ### Overview Mode (No Arguments)
27
-
28
- When no arguments are provided, perform analysis and planning only:
29
-
30
- **ANALYSIS TASKS:**
31
- 1. Analyze existing codebase architecture and component organization
32
- 2. Scan `.mod/specs/` (or existing spec directories) for current specification structure and patterns
33
- 3. Identify feature domains from component names and folder hierarchy
34
- 4. Detect specification gaps where components exist but specifications are missing
35
- 5. Generate hierarchical specification organization plan
36
-
37
- **OUTPUT OVERVIEW:**
38
- - Current codebase architecture summary
39
- - Identified components and their organization
40
- - Existing specification coverage analysis
41
- - Project directory hierarchy map with key ownership notes
42
- - Recommendation for workspace name and organization
43
-
44
- **NO FILES CREATED** - This mode only analyzes and plans.
45
-
46
- ---
47
-
48
- ### Implementation Mode (With Arguments)
49
-
50
- When arguments are provided (`workspace-spec <workspace-name>` or `workspace-spec overview.spec.md`):
51
-
52
- **Argument Handling:**
53
- - If argument is "overview.spec.md" → Use default workspace name based on project directory
54
- - If argument is a custom name → Use provided workspace name: "$ARGUMENTS"
55
- - Both cases proceed with full scaffolding implementation
56
-
57
- ## Workspace Scaffolding Process
58
-
59
- ### 1. Codebase Architecture Discovery
60
-
61
- **Project Structure Analysis**:
62
- - Scan project directory for all components/modules and their organization
63
- - Identify core components (main application logic, shared libraries)
64
- - Categorize feature components (domain-specific functionality, user-facing features)
65
- - Discover infrastructure components (utilities, services, configuration, deployment)
66
-
67
- **Existing Specification Audit**:
68
- - List all current `.mod/specs/` folders and specification files (or detect existing spec directories)
69
- - Identify coverage gaps where components have no specifications
70
- - Analyze specification quality and completion status
71
- - Map specification relationships and dependencies
72
-
73
- ### 2. Project Hierarchy Mapping
74
-
75
- **Project Organization Deliverable**:
76
- - Enumerate the actual workspace directories (packages, apps, services, docs, tooling)
77
- - Highlight the major subdirectories under the focus package (e.g., `source/commands`, `source/services`, `tests/containers`)
78
- - Note ownership and responsibility per directory so downstream specs know where behavior lives
79
-
80
- **Suggested Output Format**:
81
- ```
82
- packages/
83
- └── [workspace-name]/
84
- ├── source/
85
- │ ├── app.tsx
86
- │ ├── cli.tsx
87
- │ ├── commands/ # Headless commands
88
- │ ├── containers/ # Ink UI views
89
- │ ├── services/ # Shared domain logic
90
- │ ├── components/ # Reusable Ink components
91
- │ ├── stores/ # Zustand state
92
- │ └── shims/ # Polyfills/adapters
93
- ├── tests/
94
- │ ├── services/
95
- │ ├── containers/
96
- │ └── _harness/
97
- ├── docs/
98
- ├── dist/
99
- ├── README.md
100
- ├── package.json
101
- └── vitest.config.ts
102
- ```
103
-
104
- Use this hierarchy to anchor the overview narrative, tie directories to functionality, and call out gaps (e.g., missing tests folder, duplicated services). This replaces the previous "proposed spec tree" so the overview focuses on the real project layout before any scaffolding occurs.
105
-
106
- ### 3. Template Population System
107
-
108
- **Generic Specification Template Structure for Each Specification**:
109
- ```markdown
110
- ---
111
- title: [feature-name]
112
- type: specification
113
- ---
114
- # [Feature Name] Specification
115
-
116
- ## Abstract
117
- [One-sentence system summary with key capabilities and scale targets]
118
-
119
- ## Motivation
120
- ### Current Problems
121
- - [Specific problem this feature solves]
122
-
123
- ### Business Impact
124
- - [Value delivered and success metrics]
125
-
126
- ## Approach
127
- [High-level solution architecture and key technical decisions]
128
-
129
- ## UX Requirements (UX-*)
130
- [UI components, user flows, and accessibility]
131
-
132
- ## Application Requirements (APP-*)
133
- [Business logic, workflows, and validation rules]
134
-
135
- ## Integration Requirements (INT-*)
136
- [APIs, external services, and data exchange]
137
-
138
- ## Infrastructure Requirements (INFRA-*)
139
- [System architecture, deployment, and scaling]
140
-
141
- ## Quality Requirements (QUAL-*)
142
- [Testing, performance, and security criteria]
143
-
144
- ## Expected Glassware Trace Formats
145
- **Method-Level**: `// glassware[type=implementation, id=unique-id, requirements=REQ-CATEGORY-N]`
146
- **Line-Level**: Same format, placed above specific logic blocks
147
- ```
148
-
149
- ### 4. Intelligent Content Generation
150
-
151
- **For Each Component**:
152
- 1. **Analyze project files** (package.json, pyproject.toml, Cargo.toml, etc.) for dependencies and scripts to understand functionality
153
- 2. **Scan source code** for main classes, services, and architectural patterns
154
- 3. **Identify integration points** through import analysis and API usage
155
- 4. **Generate initial requirements** based on discovered functionality patterns
156
- 5. **Create placeholder specifications** with discovered structure and TODO sections
157
-
158
- **Business Logic Pattern Detection**:
159
- - Authentication/authorization functions → Security specification requirements
160
- - Data persistence operations → Data management specification requirements
161
- - External API integrations → Integration specification requirements
162
- - UI components and workflows → UX specification requirements
163
-
164
- ### 5. Cross-Component Dependency Mapping
165
-
166
- **Integration Analysis**:
167
- - Map import relationships between components to identify integration specifications needed
168
- - Identify shared types and interfaces requiring API contract specifications
169
- - Discover common patterns requiring cross-cutting concern specifications
170
- - Generate integration requirement matrix for multi-component features
171
-
172
- **Dependency Documentation**:
173
- - Create visual dependency graph in `requirements-matrix.md`
174
- - Document shared service contracts in `integration-points.md`
175
- - Identify specification completion priorities based on dependency criticality
176
-
177
- ## Scaffolding Execution Workflow
178
-
179
- ### Phase 1: Discovery and Planning
180
- 1. Run codebase analysis to understand current architecture
181
- 2. Generate proposed specification folder structure
182
- 3. Display plan to user with component coverage analysis
183
- 4. Identify critical specification gaps requiring immediate attention
184
-
185
- ### Phase 2: Structure Creation
186
- 1. Detect existing specification directory structure (`.mod/specs/`) or create `.mod/specs/` if none exists
187
- 2. Create hierarchical folder structure within the specification directory
188
- 3. Generate template specification files with component-specific content
189
- 4. Populate initial requirements based on code analysis
190
- 5. Create cross-reference documentation and dependency matrix
191
-
192
- ### Phase 3: Integration Setup
193
- 1. Update existing specifications with cross-component integration requirements
194
- 2. Generate shared specification files for cross-cutting concerns
195
- 3. Create workspace overview documentation linking all specifications
196
- 4. Establish specification completion roadmap with priorities
197
-
198
- ### Phase 4: Validation and Reporting
199
- 1. Validate all generated specification files for template compliance
200
- 2. Check cross-reference integrity between specifications
201
- 3. Generate workspace specification summary report
202
- 4. Provide next steps for specification completion and implementation
203
-
204
- ## Quality Standards
205
-
206
- **Template Compliance**:
207
- - All specifications include complete template structure with all required sections
208
- - Requirement numbering follows category prefix standards (UX-*, APP-*, INT-*, INFRA-*, QUAL-*)
209
- - Expected glassware trace formats defined for optimal traceability integration
210
- - Cross-component dependencies properly documented and referenced
211
-
212
- **Scaffolding Completeness**:
213
- - 100% component coverage - every significant component has at least one specification
214
- - Critical integration points identified and documented
215
- - Shared concerns extracted into cross-cutting specifications
216
- - Specification priorities established based on dependency analysis
217
-
218
- **Integration Readiness**:
219
- - Generated specifications ready for `/execute` command implementation
220
- - Clear requirement traceability paths established
221
- - Component boundaries and contracts clearly defined
222
- - Cross-component workflows properly specified
223
-
224
- ## Success Criteria
225
-
226
- ✅ **Complete Component Coverage**: Every significant component in workspace has specification structure
227
- ✅ **Hierarchical Organization**: Logical folder structure mirrors codebase architecture
228
- ✅ **Template Consistency**: All specifications follow generic template standards
229
- ✅ **Integration Documentation**: Cross-component dependencies and contracts specified
230
- ✅ **Implementation Ready**: Generated specifications contain actionable requirements ready for `/execute`
231
- ✅ **Quality Foundation**: Specification quality standards established for workspace
232
-
233
- **Result**: Complete workspace specification scaffolding with hierarchical organization, template consistency, and integration documentation ready for systematic implementation via `/execute` commands.