@shipwellapp/cli 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (3) hide show
  1. package/README.md +65 -0
  2. package/dist/index.js +898 -0
  3. package/package.json +59 -0
package/README.md ADDED
@@ -0,0 +1,65 @@
1
+ # @shipwell/cli
2
+
3
+ Command-line interface for Shipwell. Run deep codebase analysis from your terminal.
4
+
5
+ ## Usage
6
+
7
+ ```bash
8
+ # Build
9
+ pnpm build
10
+
11
+ # Run
12
+ node dist/index.js <operation> <source> [options]
13
+ ```
14
+
15
+ ## Operations
16
+
17
+ ```bash
18
+ # Security audit
19
+ shipwell audit ./my-repo
20
+
21
+ # Migration plan (specify target)
22
+ shipwell migrate ./my-repo --target "React 19"
23
+
24
+ # Refactor analysis
25
+ shipwell refactor ./my-repo
26
+
27
+ # Generate documentation
28
+ shipwell docs ./my-repo
29
+
30
+ # Dependency upgrade plan
31
+ shipwell upgrade ./my-repo
32
+ ```
33
+
34
+ ## Options
35
+
36
+ ```
37
+ -k, --api-key <key> Anthropic API key (or set ANTHROPIC_API_KEY)
38
+ -m, --model <model> Claude model (or set SHIPWELL_MODEL)
39
+ -t, --target <target> Migration target (for migrate operation)
40
+ -c, --context <context> Additional context for analysis
41
+ -r, --raw Also print raw streaming output
42
+ -h, --help Display help
43
+ ```
44
+
45
+ ## Output
46
+
47
+ The CLI displays:
48
+
49
+ 1. **Ingestion stats** — files read, tokens estimated, files skipped
50
+ 2. **Bundle stats** — files included in the analysis
51
+ 3. **Live progress** — finding/metric count updates during analysis
52
+ 4. **Findings** — numbered list with severity badges, descriptions, affected files
53
+ 5. **Metrics** — before/after comparisons with color-coded values
54
+ 6. **Summary** — overall analysis summary
55
+
56
+ Severity levels are color-coded: `CRITICAL` (red), `HIGH` (orange), `MEDIUM` (yellow), `LOW` (blue), `INFO` (dim).
57
+
58
+ Cross-file issues are marked with `⟷ cross-file`.
59
+
60
+ ## Environment Variables
61
+
62
+ | Variable | Description |
63
+ |----------|-------------|
64
+ | `ANTHROPIC_API_KEY` | Anthropic API key (required if not using --api-key) |
65
+ | `SHIPWELL_MODEL` | Default model (default: claude-sonnet-4-5-20250929) |
package/dist/index.js ADDED
@@ -0,0 +1,898 @@
1
+ #!/usr/bin/env node
2
+
3
+ // src/index.ts
4
+ import { Command } from "commander";
5
+
6
+ // src/commands/analyze.ts
7
+ import ora from "ora";
8
+ import chalk from "chalk";
9
+
10
+ // ../../packages/core/dist/models.js
11
+ var AVAILABLE_MODELS = [
12
+ { id: "claude-sonnet-4-5-20250929", label: "Claude Sonnet 4.5", contextWindow: 2e5, default: true },
13
+ { id: "claude-opus-4-6", label: "Claude Opus 4.6", contextWindow: 2e5 },
14
+ { id: "claude-haiku-4-5-20251001", label: "Claude Haiku 4.5", contextWindow: 2e5 }
15
+ ];
16
+ var DEFAULT_MODEL = "claude-sonnet-4-5-20250929";
17
+ function getMaxCodebaseTokens(modelId) {
18
+ const model = AVAILABLE_MODELS.find((m) => m.id === modelId);
19
+ const contextWindow = model?.contextWindow ?? 2e5;
20
+ const outputTokens = 16e3;
21
+ const systemOverhead = 8e3;
22
+ const safetyMargin = 1e4;
23
+ return contextWindow - outputTokens - systemOverhead - safetyMargin;
24
+ }
25
+
26
+ // ../../packages/core/dist/ingest/reader.js
27
+ import { readFile, stat } from "fs/promises";
28
+ import { join } from "path";
29
+ import { simpleGit } from "simple-git";
30
+ import { glob } from "glob";
31
+
32
+ // ../../packages/core/dist/ingest/filters.js
33
+ import ignore from "ignore";
34
+ var ALWAYS_IGNORE = [
35
+ "node_modules",
36
+ ".git",
37
+ ".next",
38
+ "dist",
39
+ "build",
40
+ ".turbo",
41
+ "coverage",
42
+ "__pycache__",
43
+ ".venv",
44
+ "venv",
45
+ ".env",
46
+ ".env.local",
47
+ "*.lock",
48
+ "package-lock.json",
49
+ "pnpm-lock.yaml",
50
+ "yarn.lock",
51
+ "*.min.js",
52
+ "*.min.css",
53
+ "*.map",
54
+ "*.woff",
55
+ "*.woff2",
56
+ "*.ttf",
57
+ "*.eot",
58
+ "*.ico",
59
+ "*.png",
60
+ "*.jpg",
61
+ "*.jpeg",
62
+ "*.gif",
63
+ "*.svg",
64
+ "*.webp",
65
+ "*.mp4",
66
+ "*.mp3",
67
+ "*.pdf",
68
+ "*.zip",
69
+ "*.tar.gz",
70
+ "*.exe",
71
+ "*.dll",
72
+ "*.so",
73
+ "*.dylib",
74
+ "*.bin",
75
+ "*.dat",
76
+ "*.db",
77
+ "*.sqlite",
78
+ ".DS_Store",
79
+ "Thumbs.db"
80
+ ];
81
+ var CODE_EXTENSIONS = /* @__PURE__ */ new Set([
82
+ ".ts",
83
+ ".tsx",
84
+ ".js",
85
+ ".jsx",
86
+ ".mjs",
87
+ ".cjs",
88
+ ".py",
89
+ ".pyw",
90
+ ".rs",
91
+ ".go",
92
+ ".java",
93
+ ".kt",
94
+ ".kts",
95
+ ".c",
96
+ ".cpp",
97
+ ".cc",
98
+ ".h",
99
+ ".hpp",
100
+ ".cs",
101
+ ".rb",
102
+ ".php",
103
+ ".swift",
104
+ ".scala",
105
+ ".clj",
106
+ ".cljs",
107
+ ".ex",
108
+ ".exs",
109
+ ".hs",
110
+ ".lua",
111
+ ".r",
112
+ ".R",
113
+ ".sql",
114
+ ".sh",
115
+ ".bash",
116
+ ".zsh",
117
+ ".html",
118
+ ".htm",
119
+ ".css",
120
+ ".scss",
121
+ ".sass",
122
+ ".less",
123
+ ".vue",
124
+ ".svelte",
125
+ ".json",
126
+ ".yaml",
127
+ ".yml",
128
+ ".toml",
129
+ ".xml",
130
+ ".md",
131
+ ".mdx",
132
+ ".txt",
133
+ ".rst",
134
+ ".dockerfile",
135
+ ".dockerignore",
136
+ ".env.example",
137
+ ".gitignore",
138
+ ".eslintrc",
139
+ ".prettierrc",
140
+ ".graphql",
141
+ ".gql",
142
+ ".proto"
143
+ ]);
144
+ var LANGUAGE_MAP = {
145
+ ".ts": "typescript",
146
+ ".tsx": "tsx",
147
+ ".js": "javascript",
148
+ ".jsx": "jsx",
149
+ ".mjs": "javascript",
150
+ ".cjs": "javascript",
151
+ ".py": "python",
152
+ ".pyw": "python",
153
+ ".rs": "rust",
154
+ ".go": "go",
155
+ ".java": "java",
156
+ ".kt": "kotlin",
157
+ ".kts": "kotlin",
158
+ ".c": "c",
159
+ ".cpp": "cpp",
160
+ ".cc": "cpp",
161
+ ".h": "c",
162
+ ".hpp": "cpp",
163
+ ".cs": "csharp",
164
+ ".rb": "ruby",
165
+ ".php": "php",
166
+ ".swift": "swift",
167
+ ".scala": "scala",
168
+ ".clj": "clojure",
169
+ ".cljs": "clojurescript",
170
+ ".ex": "elixir",
171
+ ".exs": "elixir",
172
+ ".hs": "haskell",
173
+ ".lua": "lua",
174
+ ".r": "r",
175
+ ".R": "r",
176
+ ".sql": "sql",
177
+ ".sh": "bash",
178
+ ".bash": "bash",
179
+ ".zsh": "zsh",
180
+ ".html": "html",
181
+ ".htm": "html",
182
+ ".css": "css",
183
+ ".scss": "scss",
184
+ ".sass": "sass",
185
+ ".less": "less",
186
+ ".vue": "vue",
187
+ ".svelte": "svelte",
188
+ ".json": "json",
189
+ ".yaml": "yaml",
190
+ ".yml": "yaml",
191
+ ".toml": "toml",
192
+ ".xml": "xml",
193
+ ".md": "markdown",
194
+ ".mdx": "mdx",
195
+ ".txt": "text",
196
+ ".rst": "rst",
197
+ ".graphql": "graphql",
198
+ ".gql": "graphql",
199
+ ".proto": "protobuf"
200
+ };
201
+ function createFilter(gitignoreContent) {
202
+ const ig = ignore();
203
+ ig.add(ALWAYS_IGNORE);
204
+ if (gitignoreContent) {
205
+ ig.add(gitignoreContent);
206
+ }
207
+ return ig;
208
+ }
209
+ function isCodeFile(filePath) {
210
+ const lower = filePath.toLowerCase();
211
+ const basename = lower.split("/").pop() || "";
212
+ if (basename === "dockerfile" || basename === "makefile" || basename === "rakefile" || basename === "gemfile" || basename === "procfile" || basename === "cmakelists.txt") {
213
+ return true;
214
+ }
215
+ const ext = "." + basename.split(".").slice(1).join(".");
216
+ if (CODE_EXTENSIONS.has(ext))
217
+ return true;
218
+ const lastExt = "." + (basename.split(".").pop() || "");
219
+ return CODE_EXTENSIONS.has(lastExt);
220
+ }
221
+ function getLanguage(filePath) {
222
+ const basename = filePath.split("/").pop() || "";
223
+ const lower = basename.toLowerCase();
224
+ if (lower === "dockerfile")
225
+ return "dockerfile";
226
+ if (lower === "makefile")
227
+ return "makefile";
228
+ const ext = "." + (basename.split(".").pop() || "");
229
+ return LANGUAGE_MAP[ext] || "text";
230
+ }
231
+
232
+ // ../../packages/core/dist/ingest/tokens.js
233
+ function estimateTokens(text) {
234
+ return Math.ceil(text.length / 3.2);
235
+ }
236
+
237
+ // ../../packages/core/dist/ingest/priority.js
238
+ var CONFIG_FILES = /* @__PURE__ */ new Set([
239
+ "package.json",
240
+ "tsconfig.json",
241
+ "next.config.ts",
242
+ "next.config.js",
243
+ "next.config.mjs",
244
+ "vite.config.ts",
245
+ "vite.config.js",
246
+ "webpack.config.js",
247
+ "webpack.config.ts",
248
+ ".eslintrc.js",
249
+ ".eslintrc.json",
250
+ "eslint.config.js",
251
+ "eslint.config.mjs",
252
+ "tailwind.config.ts",
253
+ "tailwind.config.js",
254
+ "postcss.config.js",
255
+ "postcss.config.mjs",
256
+ "cargo.toml",
257
+ "go.mod",
258
+ "requirements.txt",
259
+ "pyproject.toml",
260
+ "gemfile",
261
+ "build.gradle",
262
+ "pom.xml",
263
+ "cmakelists.txt",
264
+ "docker-compose.yml",
265
+ "docker-compose.yaml",
266
+ "dockerfile",
267
+ ".env.example",
268
+ "makefile"
269
+ ]);
270
+ var ENTRY_PATTERNS = [
271
+ /^src\/(index|main|app)\.[tj]sx?$/,
272
+ /^src\/app\/layout\.[tj]sx?$/,
273
+ /^src\/app\/page\.[tj]sx?$/,
274
+ /^src\/pages\/_app\.[tj]sx?$/,
275
+ /^src\/pages\/index\.[tj]sx?$/,
276
+ /^app\/layout\.[tj]sx?$/,
277
+ /^app\/page\.[tj]sx?$/,
278
+ /^src\/lib\/.*\.[tj]sx?$/,
279
+ /^lib\/.*\.[tj]sx?$/,
280
+ /^main\.\w+$/,
281
+ /^index\.\w+$/
282
+ ];
283
+ function getFilePriority(filePath) {
284
+ const basename = (filePath.split("/").pop() || "").toLowerCase();
285
+ const normalized = filePath.replace(/\\/g, "/").toLowerCase();
286
+ if (CONFIG_FILES.has(basename))
287
+ return 100;
288
+ for (const pattern of ENTRY_PATTERNS) {
289
+ if (pattern.test(normalized))
290
+ return 90;
291
+ }
292
+ if (/\/(api|routes?)\//.test(normalized))
293
+ return 85;
294
+ if (/middleware\.[tj]sx?$/.test(normalized))
295
+ return 85;
296
+ if (/auth/.test(normalized))
297
+ return 80;
298
+ if (/^src\//.test(normalized))
299
+ return 70;
300
+ if (/components?\//.test(normalized))
301
+ return 65;
302
+ if (/hooks?\//.test(normalized))
303
+ return 60;
304
+ if (/(utils?|helpers?|lib)\//.test(normalized))
305
+ return 60;
306
+ if (/types?\.[tj]s$/.test(normalized))
307
+ return 55;
308
+ if (/\.(test|spec)\.[tj]sx?$/.test(normalized))
309
+ return 30;
310
+ if (/__(tests?|mocks?)__\//.test(normalized))
311
+ return 30;
312
+ if (/\.md$/.test(normalized))
313
+ return 20;
314
+ return 50;
315
+ }
316
+
317
+ // ../../packages/core/dist/ingest/reader.js
318
+ var MAX_FILE_SIZE = 512 * 1024;
319
+ var DEFAULT_MAX_TOKENS = 865e3;
320
+ function isGitHubUrl(source) {
321
+ return source.startsWith("https://github.com/") || source.startsWith("git@github.com:");
322
+ }
323
+ async function cloneRepo(url) {
324
+ const tmpDir = join("/tmp", "shipwell-" + Date.now().toString(36));
325
+ const git = simpleGit();
326
+ await git.clone(url, tmpDir, ["--depth", "1", "--single-branch"]);
327
+ return tmpDir;
328
+ }
329
+ async function ingestRepo(options) {
330
+ const maxTokens = options.maxTokens ?? DEFAULT_MAX_TOKENS;
331
+ let repoPath;
332
+ if (isGitHubUrl(options.source)) {
333
+ repoPath = await cloneRepo(options.source);
334
+ } else {
335
+ repoPath = options.source;
336
+ }
337
+ let gitignoreContent;
338
+ try {
339
+ gitignoreContent = await readFile(join(repoPath, ".gitignore"), "utf-8");
340
+ } catch {
341
+ }
342
+ const filter = createFilter(gitignoreContent);
343
+ const allFiles = await glob("**/*", {
344
+ cwd: repoPath,
345
+ nodir: true,
346
+ dot: false,
347
+ absolute: false
348
+ });
349
+ const files = [];
350
+ let skippedFiles = 0;
351
+ for (const filePath of allFiles) {
352
+ if (filter.ignores(filePath)) {
353
+ skippedFiles++;
354
+ continue;
355
+ }
356
+ if (!isCodeFile(filePath)) {
357
+ skippedFiles++;
358
+ continue;
359
+ }
360
+ const fullPath = join(repoPath, filePath);
361
+ try {
362
+ const fileStat = await stat(fullPath);
363
+ if (fileStat.size > MAX_FILE_SIZE) {
364
+ skippedFiles++;
365
+ continue;
366
+ }
367
+ } catch {
368
+ skippedFiles++;
369
+ continue;
370
+ }
371
+ try {
372
+ const content = await readFile(fullPath, "utf-8");
373
+ if (content.includes("\0")) {
374
+ skippedFiles++;
375
+ continue;
376
+ }
377
+ const xmlOverhead = `<file path="${filePath}" language="${getLanguage(filePath)}">
378
+ </file>
379
+ `;
380
+ const tokens = estimateTokens(content + xmlOverhead);
381
+ files.push({
382
+ path: filePath,
383
+ content,
384
+ language: getLanguage(filePath),
385
+ tokens,
386
+ priority: getFilePriority(filePath)
387
+ });
388
+ } catch {
389
+ skippedFiles++;
390
+ }
391
+ }
392
+ files.sort((a, b) => b.priority - a.priority);
393
+ let totalTokens = 0;
394
+ const includedFiles = [];
395
+ for (const file of files) {
396
+ if (totalTokens + file.tokens > maxTokens) {
397
+ skippedFiles++;
398
+ continue;
399
+ }
400
+ totalTokens += file.tokens;
401
+ includedFiles.push(file);
402
+ }
403
+ return {
404
+ files: includedFiles,
405
+ totalTokens,
406
+ totalFiles: includedFiles.length,
407
+ skippedFiles,
408
+ repoPath
409
+ };
410
+ }
411
+
412
+ // ../../packages/core/dist/ingest/bundler.js
413
+ function bundleCodebase(ingestResult) {
414
+ const parts = ["<codebase>"];
415
+ for (const file of ingestResult.files) {
416
+ parts.push(`<file path="${escapeXmlAttr(file.path)}" language="${file.language}">`);
417
+ parts.push(file.content);
418
+ parts.push("</file>");
419
+ }
420
+ parts.push("</codebase>");
421
+ const xml = parts.join("\n");
422
+ return {
423
+ xml,
424
+ totalTokens: estimateTokens(xml),
425
+ includedFiles: ingestResult.totalFiles,
426
+ skippedFiles: ingestResult.skippedFiles
427
+ };
428
+ }
429
+ function escapeXmlAttr(str) {
430
+ return str.replace(/&/g, "&amp;").replace(/"/g, "&quot;").replace(/</g, "&lt;").replace(/>/g, "&gt;");
431
+ }
432
+
433
+ // ../../packages/core/dist/engine/client.js
434
+ import Anthropic from "@anthropic-ai/sdk";
435
+
436
+ // ../../packages/core/dist/prompts/audit.js
437
+ var AUDIT_PROMPT = `Perform a comprehensive SECURITY AUDIT of this codebase.
438
+
439
+ Focus on:
440
+ 1. **Injection vulnerabilities** \u2014 SQL injection, XSS, command injection, path traversal
441
+ 2. **Authentication & authorization flaws** \u2014 missing auth checks, insecure session handling, broken access control
442
+ 3. **Data exposure** \u2014 hardcoded secrets, sensitive data in logs, unencrypted PII
443
+ 4. **Cross-file security issues** \u2014 middleware bypasses, inconsistent validation across endpoints, shared state vulnerabilities
444
+ 5. **Dependency risks** \u2014 known vulnerable patterns, insecure configurations
445
+ 6. **Cryptographic issues** \u2014 weak algorithms, improper random generation
446
+ 7. **API security** \u2014 missing rate limiting, CORS misconfiguration, insecure headers
447
+
448
+ For each finding:
449
+ - Assign a severity (critical/high/medium/low/info)
450
+ - Explain the attack vector
451
+ - Show the vulnerable code
452
+ - Provide a fixed version as a diff
453
+ - Mark cross-file issues that span multiple files
454
+
455
+ Provide metrics:
456
+ - Security score (0-100, before and after your suggested fixes)
457
+ - Total vulnerabilities by severity
458
+ - Cross-file issues count`;
459
+
460
+ // ../../packages/core/dist/prompts/migrate.js
461
+ function getMigratePrompt(target) {
462
+ return `Perform a comprehensive MIGRATION PLAN for this codebase to ${target}.
463
+
464
+ Analyze the entire codebase and produce:
465
+
466
+ 1. **Breaking changes** \u2014 identify every usage of deprecated/changed APIs across ALL files
467
+ 2. **Migration steps** \u2014 ordered list of changes, grouped by file, with dependencies between steps
468
+ 3. **Cross-file impacts** \u2014 changes in one file that require changes in other files (imports, types, shared state)
469
+ 4. **Configuration changes** \u2014 package.json, tsconfig, config files that need updating
470
+ 5. **Type changes** \u2014 type definitions that need updating and all their downstream consumers
471
+ 6. **Test updates** \u2014 tests that will break and how to fix them
472
+
473
+ For each change:
474
+ - Show exact before/after code as diffs
475
+ - Explain why the change is needed
476
+ - Note if it's a breaking change
477
+ - Mark cross-file dependencies
478
+
479
+ Provide metrics:
480
+ - Total files requiring changes
481
+ - Breaking changes count
482
+ - Estimated migration completeness (percentage)
483
+ - Cross-file dependency chains identified`;
484
+ }
485
+
486
+ // ../../packages/core/dist/prompts/refactor.js
487
+ var REFACTOR_PROMPT = `Perform a comprehensive REFACTORING ANALYSIS of this codebase.
488
+
489
+ Analyze the entire codebase for:
490
+
491
+ 1. **Code duplication** \u2014 identical or near-identical code across files, extract shared utilities
492
+ 2. **Architecture issues** \u2014 circular dependencies, god classes/modules, misplaced logic
493
+ 3. **Dead code** \u2014 unused exports, unreachable code paths, orphaned files
494
+ 4. **Naming inconsistencies** \u2014 inconsistent naming conventions across the codebase
495
+ 5. **Type safety gaps** \u2014 any casts, implicit any, missing generics
496
+ 6. **Cross-cutting concerns** \u2014 logging, error handling, validation that should be centralized
497
+ 7. **Pattern violations** \u2014 deviations from the codebase's own patterns/conventions
498
+
499
+ For each finding:
500
+ - Show the current code across all affected files
501
+ - Provide refactored code as diffs
502
+ - Explain the improvement and its impact
503
+ - Mark cross-file refactors that touch multiple files
504
+
505
+ Provide metrics:
506
+ - Code quality score (0-100, before and after)
507
+ - Duplicated code blocks found
508
+ - Dead code instances
509
+ - Cross-file refactoring opportunities`;
510
+
511
+ // ../../packages/core/dist/prompts/docs.js
512
+ var DOCS_PROMPT = `Generate comprehensive DOCUMENTATION for this codebase.
513
+
514
+ Analyze the entire codebase and produce:
515
+
516
+ 1. **Architecture overview** \u2014 high-level system diagram description, key components and their relationships
517
+ 2. **API documentation** \u2014 all endpoints/functions with parameters, return types, and examples
518
+ 3. **Cross-file data flows** \u2014 how data moves through the system, from input to output
519
+ 4. **Setup guide** \u2014 environment setup, dependencies, configuration
520
+ 5. **Key patterns** \u2014 design patterns used, conventions to follow
521
+ 6. **Type reference** \u2014 important interfaces/types and where they're used
522
+
523
+ For each documentation section:
524
+ - Reference specific files and line numbers
525
+ - Include code examples from the actual codebase
526
+ - Mark cross-file relationships and data flows
527
+ - Note any undocumented or confusing areas
528
+
529
+ Provide metrics:
530
+ - Documentation coverage (percentage of public APIs documented)
531
+ - Undocumented public functions count
532
+ - Cross-file flows documented`;
533
+
534
+ // ../../packages/core/dist/prompts/upgrade.js
535
+ var UPGRADE_PROMPT = `Perform a comprehensive DEPENDENCY UPGRADE analysis for this codebase.
536
+
537
+ Analyze all dependencies and their usage across the entire codebase:
538
+
539
+ 1. **Outdated dependencies** \u2014 identify packages that need upgrading, check for major version bumps
540
+ 2. **Breaking changes** \u2014 for each upgrade, list every file that uses changed APIs
541
+ 3. **Security advisories** \u2014 known vulnerable versions in use
542
+ 4. **Unused dependencies** \u2014 packages in package.json that aren't imported anywhere
543
+ 5. **Missing types** \u2014 @types packages needed but not installed
544
+ 6. **Cross-file upgrade impacts** \u2014 how upgrading one dependency affects usage across multiple files
545
+ 7. **Peer dependency conflicts** \u2014 potential version conflicts between packages
546
+
547
+ For each upgrade:
548
+ - Show current vs recommended version
549
+ - List all files that import/use the package
550
+ - Show code changes needed as diffs
551
+ - Mark cross-file impacts
552
+
553
+ Provide metrics:
554
+ - Dependencies needing upgrade
555
+ - Security vulnerabilities fixed
556
+ - Unused dependencies to remove
557
+ - Breaking changes requiring code updates`;
558
+
559
+ // ../../packages/core/dist/prompts/system.js
560
+ var SYSTEM_PROMPT = `You are Shipwell, an expert code analysis engine. You perform deep cross-file analysis of entire codebases.
561
+
562
+ You receive an entire codebase in XML format and perform the requested analysis operation.
563
+
564
+ CRITICAL RULES:
565
+ 1. Your analysis MUST identify cross-file issues \u2014 problems that span multiple files. This is your key differentiator.
566
+ 2. Always reference specific file paths and line numbers.
567
+ 3. Output your findings in the structured XML format specified.
568
+ 4. Be thorough but practical \u2014 prioritize actionable findings over theoretical concerns.
569
+ 5. When suggesting changes, provide complete before/after code snippets.
570
+
571
+ OUTPUT FORMAT:
572
+ Wrap all output in <analysis> tags. Each finding goes in a <finding> tag:
573
+
574
+ <analysis>
575
+ <summary>Brief overall summary of the analysis</summary>
576
+
577
+ <metrics>
578
+ <metric label="Label" before="value" after="value" unit="optional unit" />
579
+ </metrics>
580
+
581
+ <finding id="1" type="issue|suggestion|change|doc" severity="critical|high|medium|low|info">
582
+ <title>Short descriptive title</title>
583
+ <description>Detailed explanation of the finding</description>
584
+ <files>
585
+ <file>path/to/file1.ts</file>
586
+ <file>path/to/file2.ts</file>
587
+ </files>
588
+ <cross-file>true|false</cross-file>
589
+ <category>security|performance|type-safety|architecture|etc</category>
590
+ <diff>
591
+ \`\`\`diff
592
+ --- a/path/to/file.ts
593
+ +++ b/path/to/file.ts
594
+ @@ -10,5 +10,5 @@
595
+ -old code
596
+ +new code
597
+ \`\`\`
598
+ </diff>
599
+ </finding>
600
+
601
+ </analysis>`;
602
+
603
+ // ../../packages/core/dist/prompts/index.js
604
+ function getOperationPrompt(operation, options) {
605
+ let prompt;
606
+ switch (operation) {
607
+ case "audit":
608
+ prompt = AUDIT_PROMPT;
609
+ break;
610
+ case "migrate":
611
+ prompt = getMigratePrompt(options?.target || "latest version");
612
+ break;
613
+ case "refactor":
614
+ prompt = REFACTOR_PROMPT;
615
+ break;
616
+ case "docs":
617
+ prompt = DOCS_PROMPT;
618
+ break;
619
+ case "upgrade":
620
+ prompt = UPGRADE_PROMPT;
621
+ break;
622
+ }
623
+ if (options?.context) {
624
+ prompt += `
625
+
626
+ Additional context from the user:
627
+ ${options.context}`;
628
+ }
629
+ return prompt;
630
+ }
631
+
632
+ // ../../packages/core/dist/engine/client.js
633
+ async function* streamAnalysis(options) {
634
+ const { apiKey, operation, codebaseXml, model, target, context, onText } = options;
635
+ const client = new Anthropic({ apiKey });
636
+ const userPrompt = `${getOperationPrompt(operation, { target, context })}
637
+
638
+ Here is the complete codebase to analyze:
639
+
640
+ ${codebaseXml}`;
641
+ const stream = client.messages.stream({
642
+ model: model || DEFAULT_MODEL,
643
+ max_tokens: 16e3,
644
+ system: SYSTEM_PROMPT,
645
+ messages: [{ role: "user", content: userPrompt }]
646
+ });
647
+ for await (const event of stream) {
648
+ if (event.type === "content_block_delta" && "text" in event.delta) {
649
+ const text = event.delta.text;
650
+ onText?.(text);
651
+ yield text;
652
+ }
653
+ }
654
+ }
655
+
656
+ // ../../packages/core/dist/output/parser.js
657
+ var StreamingParser = class {
658
+ buffer = "";
659
+ findingCount = 0;
660
+ emittedFindings = /* @__PURE__ */ new Set();
661
+ emittedMetrics = /* @__PURE__ */ new Set();
662
+ /** Append new text chunk and extract any complete findings */
663
+ push(chunk) {
664
+ this.buffer += chunk;
665
+ return {
666
+ findings: this.extractFindings(),
667
+ metrics: this.extractMetrics()
668
+ };
669
+ }
670
+ extractFindings() {
671
+ const findings = [];
672
+ const regex = /<finding\s+id="([^"]*)"[^>]*type="([^"]*)"[^>]*(?:severity="([^"]*)")?[^>]*>([\s\S]*?)<\/finding>/g;
673
+ let match;
674
+ while ((match = regex.exec(this.buffer)) !== null) {
675
+ const id = match[1];
676
+ if (this.emittedFindings.has(id))
677
+ continue;
678
+ this.emittedFindings.add(id);
679
+ const body = match[4];
680
+ const title = extractTag(body, "title") || "Untitled";
681
+ const description = extractTag(body, "description") || "";
682
+ const crossFile = extractTag(body, "cross-file") === "true";
683
+ const category = extractTag(body, "category") || void 0;
684
+ const diff = extractTag(body, "diff") || void 0;
685
+ const files = [];
686
+ const fileRegex = /<file>(.*?)<\/file>/g;
687
+ let fileMatch;
688
+ while ((fileMatch = fileRegex.exec(body)) !== null) {
689
+ files.push(fileMatch[1].trim());
690
+ }
691
+ findings.push({
692
+ id,
693
+ type: match[2],
694
+ severity: match[3] || void 0,
695
+ title,
696
+ description,
697
+ files,
698
+ crossFile,
699
+ category,
700
+ diff
701
+ });
702
+ }
703
+ return findings;
704
+ }
705
+ extractMetrics() {
706
+ const metrics = [];
707
+ const regex = /<metric\s+label="([^"]*)"[^>]*before="([^"]*)"[^>]*after="([^"]*)"[^>]*(?:unit="([^"]*)")?[^>]*\/>/g;
708
+ let match;
709
+ while ((match = regex.exec(this.buffer)) !== null) {
710
+ const key = `${match[1]}:${match[2]}:${match[3]}`;
711
+ if (this.emittedMetrics.has(key))
712
+ continue;
713
+ this.emittedMetrics.add(key);
714
+ metrics.push({
715
+ label: match[1],
716
+ before: match[2],
717
+ after: match[3],
718
+ unit: match[4] || void 0
719
+ });
720
+ }
721
+ return metrics;
722
+ }
723
+ /** Get the analysis summary */
724
+ getSummary() {
725
+ return extractTag(this.buffer, "summary");
726
+ }
727
+ /** Get full accumulated text */
728
+ getFullText() {
729
+ return this.buffer;
730
+ }
731
+ };
732
+ function extractTag(text, tag) {
733
+ const regex = new RegExp(`<${tag}>([\\s\\S]*?)<\\/${tag}>`);
734
+ const match = regex.exec(text);
735
+ return match ? match[1].trim() : null;
736
+ }
737
+
738
+ // src/commands/analyze.ts
739
+ var accent = chalk.hex("#6366f1");
740
+ var dim = chalk.dim;
741
+ var bold = chalk.bold;
742
+ var severityColor = {
743
+ critical: chalk.red.bold,
744
+ high: chalk.hex("#f97316").bold,
745
+ medium: chalk.yellow,
746
+ low: chalk.blue,
747
+ info: chalk.dim
748
+ };
749
+ var opLabels = {
750
+ audit: "Security Audit",
751
+ migrate: "Migration Plan",
752
+ refactor: "Refactor Analysis",
753
+ docs: "Documentation",
754
+ upgrade: "Dependency Upgrade"
755
+ };
756
+ function severityBadge(sev) {
757
+ if (!sev) return "";
758
+ const color = severityColor[sev] || chalk.dim;
759
+ return color(` [${sev.toUpperCase()}]`);
760
+ }
761
+ function formatFinding(f, i) {
762
+ const lines = [];
763
+ const num = dim(`${String(i + 1).padStart(2)}.`);
764
+ const cross = f.crossFile ? accent(" \u27F7 cross-file") : "";
765
+ lines.push(` ${num} ${bold(f.title)}${severityBadge(f.severity)}${cross}`);
766
+ if (f.description) {
767
+ lines.push(` ${dim(f.description)}`);
768
+ }
769
+ if (f.files.length > 0) {
770
+ lines.push(` ${dim("files:")} ${f.files.map((file) => chalk.cyan(file)).join(dim(", "))}`);
771
+ }
772
+ return lines.join("\n");
773
+ }
774
+ function formatMetric(m) {
775
+ return ` ${dim("\u2022")} ${m.label}: ${chalk.red(m.before)} ${dim("\u2192")} ${chalk.green(m.after)}${m.unit ? dim(` ${m.unit}`) : ""}`;
776
+ }
777
+ async function analyzeCommand(operation, source, options) {
778
+ const apiKey = options.apiKey || process.env.ANTHROPIC_API_KEY;
779
+ if (!apiKey) {
780
+ console.error(chalk.red("\n Error: Anthropic API key is required.\n"));
781
+ console.error(dim(" Set ANTHROPIC_API_KEY env var or use --api-key flag"));
782
+ console.error(dim(" Example: shipwell audit ./my-repo --api-key sk-ant-...\n"));
783
+ process.exit(1);
784
+ }
785
+ const model = options.model || process.env.SHIPWELL_MODEL || "claude-sonnet-4-5-20250929";
786
+ const startTime = Date.now();
787
+ console.log();
788
+ console.log(accent(" \u26F5 Shipwell"), dim("\u2014 Full Codebase Autopilot"));
789
+ console.log(dim(` ${opLabels[operation] || operation} \xB7 ${model}`));
790
+ console.log();
791
+ const spinner = ora({ text: "Reading repository...", color: "cyan", prefixText: " " }).start();
792
+ let ingestResult;
793
+ try {
794
+ ingestResult = await ingestRepo({ source, maxTokens: getMaxCodebaseTokens(model) });
795
+ spinner.succeed(
796
+ `Read ${bold(ingestResult.totalFiles)} files ${dim(`(${ingestResult.skippedFiles} skipped, ~${Math.round(ingestResult.totalTokens / 1e3)}K tokens)`)}`
797
+ );
798
+ } catch (err) {
799
+ spinner.fail(`Failed to read repository: ${err.message}`);
800
+ process.exit(1);
801
+ }
802
+ const bundleSpinner = ora({ text: "Bundling codebase...", color: "cyan", prefixText: " " }).start();
803
+ const bundle = bundleCodebase(ingestResult);
804
+ bundleSpinner.succeed(
805
+ `Bundled ${bold(bundle.includedFiles)} files ${dim(`(~${Math.round(bundle.totalTokens / 1e3)}K tokens)`)}`
806
+ );
807
+ const analyzeSpinner = ora({ text: `Running ${operation} analysis...`, color: "magenta", prefixText: " " }).start();
808
+ const parser = new StreamingParser();
809
+ const allFindings = [];
810
+ const allMetrics = [];
811
+ try {
812
+ for await (const chunk of streamAnalysis({
813
+ apiKey,
814
+ operation,
815
+ model,
816
+ codebaseXml: bundle.xml,
817
+ target: options.target,
818
+ context: options.context
819
+ })) {
820
+ const { findings, metrics } = parser.push(chunk);
821
+ if (findings.length > 0 || metrics.length > 0) {
822
+ allFindings.push(...findings);
823
+ allMetrics.push(...metrics);
824
+ analyzeSpinner.text = `Analyzing... ${dim(`${allFindings.length} findings, ${allMetrics.length} metrics`)}`;
825
+ }
826
+ if (options.raw) {
827
+ process.stdout.write(chunk);
828
+ }
829
+ }
830
+ } catch (err) {
831
+ analyzeSpinner.fail(`Analysis failed: ${err.message}`);
832
+ process.exit(1);
833
+ }
834
+ const elapsed = ((Date.now() - startTime) / 1e3).toFixed(1);
835
+ analyzeSpinner.succeed(`Analysis complete ${dim(`(${elapsed}s)`)}`);
836
+ console.log();
837
+ console.log(accent(" \u2500\u2500\u2500 Results \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500"));
838
+ console.log();
839
+ if (allFindings.length > 0) {
840
+ const critCount = allFindings.filter((f) => f.severity === "critical").length;
841
+ const highCount = allFindings.filter((f) => f.severity === "high").length;
842
+ const medCount = allFindings.filter((f) => f.severity === "medium").length;
843
+ const lowCount = allFindings.filter((f) => f.severity === "low").length;
844
+ const crossCount = allFindings.filter((f) => f.crossFile).length;
845
+ const counts = [
846
+ critCount > 0 ? chalk.red(`${critCount} critical`) : null,
847
+ highCount > 0 ? chalk.hex("#f97316")(`${highCount} high`) : null,
848
+ medCount > 0 ? chalk.yellow(`${medCount} medium`) : null,
849
+ lowCount > 0 ? chalk.blue(`${lowCount} low`) : null
850
+ ].filter(Boolean).join(dim(" \xB7 "));
851
+ console.log(` ${bold(`${allFindings.length} Findings`)} ${dim("(")}${counts}${dim(")")}`);
852
+ if (crossCount > 0) {
853
+ console.log(` ${accent(`${crossCount} cross-file issues`)}`);
854
+ }
855
+ console.log();
856
+ for (let i = 0; i < allFindings.length; i++) {
857
+ console.log(formatFinding(allFindings[i], i));
858
+ if (i < allFindings.length - 1) console.log();
859
+ }
860
+ } else {
861
+ console.log(dim(" No findings."));
862
+ }
863
+ if (allMetrics.length > 0) {
864
+ console.log();
865
+ console.log(` ${bold("Metrics")}`);
866
+ for (const m of allMetrics) {
867
+ console.log(formatMetric(m));
868
+ }
869
+ }
870
+ const summary = parser.getSummary();
871
+ if (summary) {
872
+ console.log();
873
+ console.log(` ${bold("Summary")}`);
874
+ console.log(` ${dim(summary)}`);
875
+ }
876
+ console.log();
877
+ console.log(accent(" \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500"));
878
+ console.log(` ${chalk.green("\u2713")} ${bold(`${allFindings.length} findings`)} in ${elapsed}s \xB7 ${dim(`${bundle.includedFiles} files \xB7 ~${Math.round(bundle.totalTokens / 1e3)}K tokens`)}`);
879
+ console.log();
880
+ }
881
+
882
+ // src/index.ts
883
+ var program = new Command();
884
+ program.name("shipwell").description("Full Codebase Autopilot \u2014 deep cross-file analysis powered by Claude").version("0.1.0");
885
+ var operations = ["audit", "migrate", "refactor", "docs", "upgrade"];
886
+ var opDesc = {
887
+ audit: "Run a security audit on a codebase",
888
+ migrate: "Plan a framework/library migration",
889
+ refactor: "Detect code smells, duplication & architecture issues",
890
+ docs: "Generate comprehensive documentation",
891
+ upgrade: "Analyze dependencies & plan safe upgrades"
892
+ };
893
+ for (const op of operations) {
894
+ program.command(op).description(opDesc[op] || `Run ${op} analysis on a codebase`).argument("<source>", "Local path or GitHub URL").option("-k, --api-key <key>", "Anthropic API key (or set ANTHROPIC_API_KEY env var)").option("-m, --model <model>", "Claude model to use (or set SHIPWELL_MODEL env var)").option("-t, --target <target>", "Migration target (for migrate operation)").option("-c, --context <context>", "Additional context for the analysis").option("-r, --raw", "Also print raw streaming output").action((source, options) => {
895
+ analyzeCommand(op, source, options);
896
+ });
897
+ }
898
+ program.parse();
package/package.json ADDED
@@ -0,0 +1,59 @@
1
+ {
2
+ "name": "@shipwellapp/cli",
3
+ "version": "0.1.0",
4
+ "description": "Full Codebase Autopilot — deep cross-file analysis powered by Claude",
5
+ "type": "module",
6
+ "bin": {
7
+ "shipwell": "./dist/index.js"
8
+ },
9
+ "files": [
10
+ "dist"
11
+ ],
12
+ "scripts": {
13
+ "build": "tsup",
14
+ "dev": "tsc --watch"
15
+ },
16
+ "keywords": [
17
+ "claude",
18
+ "ai",
19
+ "codebase",
20
+ "analysis",
21
+ "security",
22
+ "audit",
23
+ "migration",
24
+ "refactor",
25
+ "documentation",
26
+ "anthropic",
27
+ "opus",
28
+ "cli"
29
+ ],
30
+ "author": "Manas Dutta",
31
+ "license": "MIT",
32
+ "repository": {
33
+ "type": "git",
34
+ "url": "git+https://github.com/manasdutta04/shipwell.git",
35
+ "directory": "apps/cli"
36
+ },
37
+ "homepage": "https://shipwell.app",
38
+ "bugs": {
39
+ "url": "https://github.com/manasdutta04/shipwell/issues"
40
+ },
41
+ "engines": {
42
+ "node": ">=18"
43
+ },
44
+ "dependencies": {
45
+ "@anthropic-ai/sdk": "^0.39.0",
46
+ "@shipwell/core": "workspace:*",
47
+ "chalk": "^5.4.1",
48
+ "commander": "^13.1.0",
49
+ "glob": "^11.0.0",
50
+ "ignore": "^7.0.0",
51
+ "ora": "^8.2.0",
52
+ "simple-git": "^3.27.0"
53
+ },
54
+ "devDependencies": {
55
+ "@types/node": "^22.10.0",
56
+ "tsup": "^8.5.1",
57
+ "typescript": "^5.7.0"
58
+ }
59
+ }