crack-code 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md ADDED
@@ -0,0 +1,15 @@
1
+ # crack-code
2
+
3
+ To install dependencies:
4
+
5
+ ```bash
6
+ bun install
7
+ ```
8
+
9
+ To run:
10
+
11
+ ```bash
12
+ bun run src/index.ts
13
+ ```
14
+
15
+ This project was created using `bun init` in bun v1.3.5. [Bun](https://bun.com) is a fast all-in-one JavaScript runtime.
package/bun.lock ADDED
@@ -0,0 +1,79 @@
1
+ {
2
+ "lockfileVersion": 1,
3
+ "configVersion": 1,
4
+ "workspaces": {
5
+ "": {
6
+ "name": "crack-code",
7
+ "dependencies": {
8
+ "@ai-sdk/anthropic": "^3.0.54",
9
+ "@ai-sdk/google": "^3.0.37",
10
+ "@ai-sdk/openai": "^3.0.39",
11
+ "ai": "^6.0.111",
12
+ "gradient-string": "^3.0.0",
13
+ "ollama-ai-provider-v2": "^3.3.1",
14
+ "picocolors": "^1.1.1",
15
+ "zod": "^4.3.6",
16
+ },
17
+ "devDependencies": {
18
+ "@types/bun": "latest",
19
+ "@types/gradient-string": "^1.1.6",
20
+ },
21
+ "peerDependencies": {
22
+ "typescript": "^5",
23
+ },
24
+ },
25
+ },
26
+ "packages": {
27
+ "@ai-sdk/anthropic": ["@ai-sdk/anthropic@3.0.54", "", { "dependencies": { "@ai-sdk/provider": "3.0.8", "@ai-sdk/provider-utils": "4.0.17" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-UhSPZ63FsTNO7PQCfxsqJIgkij1sivU3qfXydlSd4ugshpkNhd2v9s78G/40/G5C3pKSRfp/CfaSvivrneQfCg=="],
28
+
29
+ "@ai-sdk/gateway": ["@ai-sdk/gateway@3.0.63", "", { "dependencies": { "@ai-sdk/provider": "3.0.8", "@ai-sdk/provider-utils": "4.0.17", "@vercel/oidc": "3.1.0" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-0jwdkN3elC4Q9aT2ALxjXtGGVoye15zYgof6GfvuH1a9QKx9Rj4Wi2vy6SyyLvtSA/lB786dTZgC+cGwe6vzmA=="],
30
+
31
+ "@ai-sdk/google": ["@ai-sdk/google@3.0.37", "", { "dependencies": { "@ai-sdk/provider": "3.0.8", "@ai-sdk/provider-utils": "4.0.17" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-hZ7nO55+GfQEWJe2B5XRoLNeEubMTuk6OjJJUDS+XCtjKLCd973rRkc62vS86rHw5VuCGITwn3gUZRhSbuX5vw=="],
32
+
33
+ "@ai-sdk/openai": ["@ai-sdk/openai@3.0.39", "", { "dependencies": { "@ai-sdk/provider": "3.0.8", "@ai-sdk/provider-utils": "4.0.17" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-EZrs4L6kMkPQhpodagpEvqLSryOIK99WgblN0IsVHr1xhajWizQOZ0XMa7c5JpSYgIjV6u8GCpGV6hS3Mk2Bug=="],
34
+
35
+ "@ai-sdk/provider": ["@ai-sdk/provider@3.0.8", "", { "dependencies": { "json-schema": "^0.4.0" } }, "sha512-oGMAgGoQdBXbZqNG0Ze56CHjDZ1IDYOwGYxYjO5KLSlz5HiNQ9udIXsPZ61VWaHGZ5XW/jyjmr6t2xz2jGVwbQ=="],
36
+
37
+ "@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@4.0.17", "", { "dependencies": { "@ai-sdk/provider": "3.0.8", "@standard-schema/spec": "^1.1.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-oyCeFINTYK0B8ZGUBiQc05G5vytPlKSmTTtm19xfJuUgoi8zkvvRcoPQci4mSnyfpPn2XSFFDfsALG8uGcapfg=="],
38
+
39
+ "@opentelemetry/api": ["@opentelemetry/api@1.9.0", "", {}, "sha512-3giAOQvZiH5F9bMlMiv8+GSPMeqg0dbaeo58/0SlA9sxSqZhnUtxzX9/2FzyhS9sWQf5S0GJE0AKBrFqjpeYcg=="],
40
+
41
+ "@standard-schema/spec": ["@standard-schema/spec@1.1.0", "", {}, "sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w=="],
42
+
43
+ "@types/bun": ["@types/bun@1.3.9", "", { "dependencies": { "bun-types": "1.3.9" } }, "sha512-KQ571yULOdWJiMH+RIWIOZ7B2RXQGpL1YQrBtLIV3FqDcCu6FsbFUBwhdKUlCKUpS3PJDsHlJ1QKlpxoVR+xtw=="],
44
+
45
+ "@types/gradient-string": ["@types/gradient-string@1.1.6", "", { "dependencies": { "@types/tinycolor2": "*" } }, "sha512-LkaYxluY4G5wR1M4AKQUal2q61Di1yVVCw42ImFTuaIoQVgmV0WP1xUaLB8zwb47mp82vWTpePI9JmrjEnJ7nQ=="],
46
+
47
+ "@types/node": ["@types/node@25.3.3", "", { "dependencies": { "undici-types": "~7.18.0" } }, "sha512-DpzbrH7wIcBaJibpKo9nnSQL0MTRdnWttGyE5haGwK86xgMOkFLp7vEyfQPGLOJh5wNYiJ3V9PmUMDhV9u8kkQ=="],
48
+
49
+ "@types/tinycolor2": ["@types/tinycolor2@1.4.6", "", {}, "sha512-iEN8J0BoMnsWBqjVbWH/c0G0Hh7O21lpR2/+PrvAVgWdzL7eexIFm4JN/Wn10PTcmNdtS6U67r499mlWMXOxNw=="],
50
+
51
+ "@vercel/oidc": ["@vercel/oidc@3.1.0", "", {}, "sha512-Fw28YZpRnA3cAHHDlkt7xQHiJ0fcL+NRcIqsocZQUSmbzeIKRpwttJjik5ZGanXP+vlA4SbTg+AbA3bP363l+w=="],
52
+
53
+ "ai": ["ai@6.0.111", "", { "dependencies": { "@ai-sdk/gateway": "3.0.63", "@ai-sdk/provider": "3.0.8", "@ai-sdk/provider-utils": "4.0.17", "@opentelemetry/api": "1.9.0" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-K5aikNm4JGfJkzwIr3yA/qhOYIOIvOqjCxSQjQQ7bWWqm0uuPO2/qgdXL23gYJdTLPPYfvi2TTS+bg2Yp+r2Lw=="],
54
+
55
+ "bun-types": ["bun-types@1.3.9", "", { "dependencies": { "@types/node": "*" } }, "sha512-+UBWWOakIP4Tswh0Bt0QD0alpTY8cb5hvgiYeWCMet9YukHbzuruIEeXC2D7nMJPB12kbh8C7XJykSexEqGKJg=="],
56
+
57
+ "chalk": ["chalk@5.6.2", "", {}, "sha512-7NzBL0rN6fMUW+f7A6Io4h40qQlG+xGmtMxfbnH/K7TAtt8JQWVQK+6g0UXKMeVJoyV5EkkNsErQ8pVD3bLHbA=="],
58
+
59
+ "eventsource-parser": ["eventsource-parser@3.0.6", "", {}, "sha512-Vo1ab+QXPzZ4tCa8SwIHJFaSzy4R6SHf7BY79rFBDf0idraZWAkYrDjDj8uWaSm3S2TK+hJ7/t1CEmZ7jXw+pg=="],
60
+
61
+ "gradient-string": ["gradient-string@3.0.0", "", { "dependencies": { "chalk": "^5.3.0", "tinygradient": "^1.1.5" } }, "sha512-frdKI4Qi8Ihp4C6wZNB565de/THpIaw3DjP5ku87M+N9rNSGmPTjfkq61SdRXB7eCaL8O1hkKDvf6CDMtOzIAg=="],
62
+
63
+ "json-schema": ["json-schema@0.4.0", "", {}, "sha512-es94M3nTIfsEPisRafak+HDLfHXnKBhV3vU5eqPcS3flIWqcxJWgXHXiey3YrpaNsanY5ei1VoYEbOzijuq9BA=="],
64
+
65
+ "ollama-ai-provider-v2": ["ollama-ai-provider-v2@3.3.1", "", { "dependencies": { "@ai-sdk/provider": "^3.0.8", "@ai-sdk/provider-utils": "^4.0.15" }, "peerDependencies": { "ai": "^5.0.0 || ^6.0.0", "zod": "^4.0.16" } }, "sha512-j4BBqqQnvf/uDz+aPYcgU4/MQZERw087Fn1DMGtViA/PgahBq36jHKHVoZfx8mxj+w8cxsKd3eYaDgyZPhE6YA=="],
66
+
67
+ "picocolors": ["picocolors@1.1.1", "", {}, "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA=="],
68
+
69
+ "tinycolor2": ["tinycolor2@1.6.0", "", {}, "sha512-XPaBkWQJdsf3pLKJV9p4qN/S+fm2Oj8AIPo1BTUhg5oxkvm9+SVEGFdhyOz7tTdUTfvxMiAs4sp6/eZO2Ew+pw=="],
70
+
71
+ "tinygradient": ["tinygradient@1.1.5", "", { "dependencies": { "@types/tinycolor2": "^1.4.0", "tinycolor2": "^1.0.0" } }, "sha512-8nIfc2vgQ4TeLnk2lFj4tRLvvJwEfQuabdsmvDdQPT0xlk9TaNtpGd6nNRxXoK6vQhN6RSzj+Cnp5tTQmpxmbw=="],
72
+
73
+ "typescript": ["typescript@5.9.3", "", { "bin": { "tsc": "bin/tsc", "tsserver": "bin/tsserver" } }, "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw=="],
74
+
75
+ "undici-types": ["undici-types@7.18.2", "", {}, "sha512-AsuCzffGHJybSaRrmr5eHr81mwJU3kjw6M+uprWvCXiNeN9SOGwQ3Jn8jb8m3Z6izVgknn1R0FTCEAP2QrLY/w=="],
76
+
77
+ "zod": ["zod@4.3.6", "", {}, "sha512-rftlrkhHZOcjDwkGlnUtZZkvaPHCsDATp4pGpuOOMDaTdDDXF91wuVDJoWoPsKX/3YPQ5fHuF3STjcYyKr+Qhg=="],
78
+ }
79
+ }
package/package.json ADDED
@@ -0,0 +1,26 @@
1
+ {
2
+ "name": "crack-code",
3
+ "module": "src/index.ts",
4
+ "version": "0.1.0",
5
+ "scripts": {
6
+ "dev": "bun run src/index.ts"
7
+ },
8
+ "type": "module",
9
+ "devDependencies": {
10
+ "@types/bun": "latest",
11
+ "@types/gradient-string": "^1.1.6"
12
+ },
13
+ "peerDependencies": {
14
+ "typescript": "^5"
15
+ },
16
+ "dependencies": {
17
+ "@ai-sdk/anthropic": "^3.0.54",
18
+ "@ai-sdk/google": "^3.0.37",
19
+ "@ai-sdk/openai": "^3.0.39",
20
+ "ai": "^6.0.111",
21
+ "gradient-string": "^3.0.0",
22
+ "ollama-ai-provider-v2": "^3.3.1",
23
+ "picocolors": "^1.1.1",
24
+ "zod": "^4.3.6"
25
+ }
26
+ }
package/src/agent.ts ADDED
@@ -0,0 +1,104 @@
1
+ import {
2
+ streamText,
3
+ stepCountIs,
4
+ type ModelMessage,
5
+ type LanguageModel,
6
+ } from "ai";
7
+ import type { ToolRegistry } from "./tools/registry.js";
8
+ import type { PermissionManager } from "./permissions/index.js";
9
+
10
+ // --- Types ---
11
+
12
+ export interface TokenUsage {
13
+ inputTokens: number;
14
+ outputTokens: number;
15
+ totalTokens: number;
16
+ }
17
+
18
+ export interface AgentCallbacks {
19
+ onText?: (delta: string) => void;
20
+ onToolStart?: (name: string, input: unknown) => void;
21
+ onToolEnd?: (name: string, result: string) => void;
22
+ onStepComplete?: (stepNumber: number) => void;
23
+ onUsage?: (usage: TokenUsage) => void;
24
+ onError?: (error: string) => void;
25
+ }
26
+
27
+ export interface AgentOptions {
28
+ model: LanguageModel;
29
+ tools: ToolRegistry;
30
+ permissions: PermissionManager;
31
+ systemPrompt: string;
32
+ maxSteps?: number;
33
+ maxTokens?: number;
34
+ }
35
+
36
+ // --- Agent ---
37
+
38
+ export async function runAgent(
39
+ messages: ModelMessage[],
40
+ opts: AgentOptions,
41
+ callbacks: AgentCallbacks = {},
42
+ ): Promise<ModelMessage[]> {
43
+ const sdkTools = opts.tools.toAISDKTools(opts.permissions);
44
+
45
+ const result = streamText({
46
+ model: opts.model,
47
+ system: opts.systemPrompt,
48
+ messages,
49
+ tools: sdkTools,
50
+ stopWhen: stepCountIs(opts.maxSteps ?? 30),
51
+ maxOutputTokens: opts.maxTokens ?? 16384,
52
+ });
53
+
54
+ let stepCount = 0;
55
+
56
+ for await (const event of result.fullStream) {
57
+ switch (event.type) {
58
+ case "text-delta":
59
+ callbacks.onText?.(event.text);
60
+ break;
61
+
62
+ case "tool-call":
63
+ callbacks.onToolStart?.(event.toolName, event.input);
64
+ break;
65
+
66
+ case "tool-result": {
67
+ const text =
68
+ typeof event.output === "string"
69
+ ? event.output
70
+ : JSON.stringify(event.output);
71
+ callbacks.onToolEnd?.(event.toolName, text);
72
+ break;
73
+ }
74
+
75
+ case "finish-step":
76
+ stepCount++;
77
+ callbacks.onStepComplete?.(stepCount);
78
+ break;
79
+
80
+ case "error":
81
+ callbacks.onError?.(
82
+ event.error instanceof Error
83
+ ? event.error.message
84
+ : String(event.error),
85
+ );
86
+ break;
87
+ }
88
+ }
89
+
90
+ // Resolve final usage across all steps
91
+ const usage = await result.usage;
92
+ if (usage) {
93
+ callbacks.onUsage?.({
94
+ inputTokens: usage.inputTokens ?? 0,
95
+ outputTokens: usage.outputTokens ?? 0,
96
+ totalTokens: (usage.inputTokens ?? 0) + (usage.outputTokens ?? 0),
97
+ });
98
+ }
99
+
100
+ // Return full conversation including AI responses and tool results
101
+ // so the REPL can maintain context across turns
102
+ const response = await result.response;
103
+ return [...messages, ...response.messages];
104
+ }
package/src/config.ts ADDED
@@ -0,0 +1,410 @@
1
+ import { mkdir } from "fs/promises";
2
+ import { homedir } from "os";
3
+ import { join } from "path";
4
+ import pc from "picocolors";
5
+
6
+ import type { ModelInfo } from "./providers/types";
7
+ import { fetchAnthropicModels } from "./providers/anthropic";
8
+ import { fetchGoogleModels } from "./providers/google";
9
+ import { fetchOpenAIModels } from "./providers/openai";
10
+ import { fetchOllamaModels } from "./providers/ollama";
11
+
12
+ import * as readline from "node:readline";
13
+ import { CrackCodeLogo } from "./logo/crack-code";
14
+ import { pastel } from "gradient-string";
15
+
16
+ export interface Config {
17
+ provider: "openai" | "google" | "anthropic" | "ollama";
18
+ model: string;
19
+ apiKey: string;
20
+
21
+ // generation
22
+ maxTokens: number;
23
+ maxSteps: number;
24
+
25
+ /*
26
+ * permission behavior
27
+ * by-default the editing policy would be false
28
+ * which later can be made true by the user.
29
+ */
30
+ permissionPolicy: "ask" | "skip" | "allow-all" | "deny-all";
31
+ allowEdits: boolean;
32
+ systemPrompt: string;
33
+
34
+ // scan settings
35
+ scanPatterns: string[];
36
+ ignorePatterns: string[];
37
+
38
+ // context
39
+ cwd: string;
40
+ }
41
+
42
+ interface StoredConfig {
43
+ provider: Config["provider"];
44
+ model: string;
45
+ apiKey: string;
46
+ allowEdits?: boolean;
47
+ }
48
+
49
+ export interface ConfigOverrides {
50
+ provider?: string;
51
+ model?: string;
52
+ apiKey?: string;
53
+ maxTokens?: number;
54
+ maxSteps?: number;
55
+ permissionPolicy?: Config["permissionPolicy"];
56
+ allowEdits?: boolean;
57
+ scanPatterns?: string[];
58
+ ignorePatterns?: string[];
59
+ }
60
+
61
+ // constants
62
+ const CONFIG_DIR = join(homedir(), ".crack-code");
63
+ const CONFIG_PATH = join(CONFIG_DIR, "config.json");
64
+
65
+ const PROVIDERS = ["anthropic", "google", "openai", "ollama"] as const;
66
+
67
+ const API_KEY_ENV: Record<Config["provider"], string> = {
68
+ anthropic: "ANTHROPIC_API_KEY",
69
+ openai: "OPENAI_API_KEY",
70
+ google: "GOOGLE_GENERATIVE_AI_API_KEY",
71
+ ollama: "OLLAMA_ENDPOINT",
72
+ };
73
+
74
+ const DEFAULT_SCAN_PATTERNS = [
75
+ "**/*.ts",
76
+ "**/*.tsx",
77
+ "**/*.js",
78
+ "**/*.jsx",
79
+ "**/*.py",
80
+ "**/*.go",
81
+ "**/*.rs",
82
+ "**/*.java",
83
+ "**/*.rb",
84
+ "**/*.php",
85
+ "**/*.sol",
86
+ "**/*.yaml",
87
+ "**/*.yml",
88
+ "**/*.toml",
89
+ "**/*.json",
90
+ "**/*.env*",
91
+ "**/Dockerfile*",
92
+ "**/*.tf",
93
+ ];
94
+
95
+ const DEFAULT_IGNORE_PATTERNS = [
96
+ "node_modules/**",
97
+ ".git/**",
98
+ "dist/**",
99
+ "build/**",
100
+ "coverage/**",
101
+ ".next/**",
102
+ "__pycache__/**",
103
+ "*.lock",
104
+ "*.min.js",
105
+ "*.min.css",
106
+ "vendor/**",
107
+ "target/**",
108
+ ];
109
+
110
+ // Stored config (read/write)
111
+ async function readStoredConfig(): Promise<StoredConfig | null> {
112
+ const file = Bun.file(CONFIG_PATH);
113
+ if (!(await file.exists())) return null;
114
+
115
+ try {
116
+ const data = await file.json();
117
+
118
+ // Validate shape — reject configs from older/different versions
119
+ if (
120
+ typeof data !== "object" ||
121
+ data === null ||
122
+ typeof data.provider !== "string" ||
123
+ typeof data.model !== "string" ||
124
+ typeof data.apiKey !== "string"
125
+ ) {
126
+ return null;
127
+ }
128
+
129
+ return data as StoredConfig;
130
+ } catch {
131
+ return null;
132
+ }
133
+ }
134
+
135
+ async function writeStoredConfig(stored: StoredConfig): Promise<any> {
136
+ await mkdir(CONFIG_DIR, { recursive: true });
137
+ await Bun.write(CONFIG_PATH, JSON.stringify(stored, null, 2) + "\n");
138
+ }
139
+
140
+ // function for fetching models from the providers api
141
+ async function fetchModels(
142
+ provider: Config["provider"],
143
+ apiKey: string,
144
+ ): Promise<ModelInfo[]> {
145
+ try {
146
+ switch (provider) {
147
+ case "anthropic":
148
+ return await fetchAnthropicModels(apiKey);
149
+ case "google":
150
+ return await fetchGoogleModels(apiKey);
151
+ case "openai":
152
+ return await fetchOpenAIModels(apiKey);
153
+ case "ollama":
154
+ return await fetchOllamaModels(apiKey); // Fetching the available models that supports tool calling.
155
+ }
156
+ } catch (e: any) {
157
+ console.log(pc.red(`\n Could not fetch models: ${e.message}\n`));
158
+ return [];
159
+ }
160
+ }
161
+
162
+ function prompt(question: string): Promise<string> {
163
+ const rl = readline.createInterface({
164
+ input: process.stdin,
165
+ output: process.stdout,
166
+ });
167
+ return new Promise((resolve) => {
168
+ rl.question(question, (answer) => {
169
+ rl.close();
170
+ resolve(answer.trim());
171
+ });
172
+ });
173
+ }
174
+
175
+ function spinner(text: string): { stop: () => void } {
176
+ const frames = ["", "", "", "", "", "", ""];
177
+ let i = 0;
178
+ const id = setInterval(() => {
179
+ process.stdout.write(
180
+ `\r\x1b[90m${frames[i++ % frames.length]} ${text}\x1b[0m`,
181
+ );
182
+ }, 80);
183
+ return {
184
+ stop: () => {
185
+ clearInterval(id);
186
+ process.stdout.write(`\r\x1b[K`); // clear line
187
+ },
188
+ };
189
+ }
190
+
191
+ // first run startup
192
+ async function firstRunSetup(): Promise<StoredConfig> {
193
+ console.log(pastel(CrackCodeLogo()), "\n");
194
+ console.log("This will be saved to ~/.crack-code/config.json\n");
195
+ console.log("You can change it anytime with: crack-code --setup");
196
+
197
+ // Provider setup
198
+ console.log("\x1b[1mSelect a provider:\x1b[0m");
199
+ PROVIDERS.forEach((p, i) => {
200
+ console.log(` \x1b[36m${i + 1}\x1b[0m) ${p}`);
201
+ });
202
+
203
+ let providerIdx = 0;
204
+ while (true) {
205
+ const answer = await prompt("\nProvider [1]: ");
206
+ providerIdx = answer ? parseInt(answer, 10) - 1 : 0;
207
+ if (providerIdx >= 0 && providerIdx < PROVIDERS.length) break;
208
+ console.log("\x1b[31mInvalid choice.\x1b[0m");
209
+ }
210
+ const provider = PROVIDERS[providerIdx]!;
211
+
212
+ // API key
213
+ const envKey = process.env[API_KEY_ENV[provider]];
214
+ let apiKey: string;
215
+
216
+ if (envKey) {
217
+ const masked = envKey.slice(0, 8) + "..." + envKey.slice(-4);
218
+ const useEnv = await prompt(
219
+ `\nFound ${API_KEY_ENV[provider]} (${masked}). Use it? [Y/n]: `,
220
+ );
221
+ if (!useEnv || useEnv.toLowerCase() === "y") {
222
+ apiKey = envKey;
223
+ } else {
224
+ apiKey = await prompt("Enter API key: ");
225
+ }
226
+ } else {
227
+ apiKey = await prompt(`\nEnter your ${provider} API key: `);
228
+ }
229
+
230
+ if (!apiKey) {
231
+ throw new Error("API key is required.");
232
+ }
233
+
234
+ const loading = spinner("Fetching available models...");
235
+ const models = await fetchModels(provider, apiKey);
236
+ loading.stop();
237
+
238
+ let model: string;
239
+
240
+ if (models.length > 0) {
241
+ console.log(`\n\x1b[1mAvailable models:\x1b[0m`);
242
+
243
+ // Show paginated if too many
244
+ const display = models.slice(0, 30);
245
+ display.forEach((m, i) => {
246
+ const label =
247
+ m.name !== m.id ? `${m.id} \x1b[90m(${m.name})\x1b[0m` : m.id;
248
+ console.log(` \x1b[36m${String(i + 1).padStart(2)}\x1b[0m) ${label}`);
249
+ });
250
+ if (models.length > 30) {
251
+ console.log(
252
+ `\x1b[90m ... and ${models.length - 30} more. Enter a custom name to use unlisted models.\x1b[0m`,
253
+ );
254
+ }
255
+ console.log(
256
+ ` \x1b[36m ${display.length + 1}\x1b[0m) Enter custom model name`,
257
+ );
258
+
259
+ while (true) {
260
+ const answer = await prompt(`\nModel [1]: `);
261
+ const idx = answer ? parseInt(answer, 10) - 1 : 0;
262
+
263
+ if (!isNaN(idx) && idx >= 0 && idx < display.length) {
264
+ model = display[idx]!.id;
265
+ break;
266
+ } else if (idx === display.length) {
267
+ model = await prompt("Enter model name: ");
268
+ if (model) break;
269
+ } else if (answer && isNaN(parseInt(answer, 10))) {
270
+ // User typed a model name directly
271
+ model = answer;
272
+ break;
273
+ }
274
+ console.log("\x1b[31mInvalid choice.\x1b[0m");
275
+ }
276
+ } else {
277
+ // Fallback — API fetch failed, ask for manual input
278
+ console.log(
279
+ "\n\x1b[33mCouldn't fetch models. Enter a model name manually.\x1b[0m",
280
+ );
281
+ model = await prompt("Model: ");
282
+ if (!model) {
283
+ throw new Error("Model is required.");
284
+ }
285
+ }
286
+ const stored: StoredConfig = { provider, model, apiKey };
287
+ await writeStoredConfig(stored);
288
+
289
+ console.log(`\n\x1b[32m✓ Saved: provider=${provider}, model=${model}\x1b[0m`);
290
+ console.log(`\x1b[90m ${CONFIG_PATH}\x1b[0m\n`);
291
+
292
+ return stored;
293
+ }
294
+
295
+ function buildSystemPrompt(cwd: string, allowEdits: boolean): string {
296
+ const lines = [
297
+ "You are Crack Code — a security-focused code analysis assistant running in the user's terminal.",
298
+ "",
299
+ `Working directory: ${cwd}`,
300
+ "",
301
+ "## Your Role",
302
+ "Analyze codebases to find security vulnerabilities, bugs, logic flaws, and potential exploits.",
303
+ "You think like an attacker but report like a senior security engineer.",
304
+ "",
305
+ "## What You Look For",
306
+ "- Injection vulnerabilities (SQL, XSS, command injection, path traversal)",
307
+ "- Authentication & authorization flaws",
308
+ "- Hardcoded secrets, API keys, credentials",
309
+ "- Insecure cryptography or hashing",
310
+ "- Race conditions and TOCTOU bugs",
311
+ "- Unsafe deserialization",
312
+ "- Missing input validation and sanitization",
313
+ "- Insecure dependencies or configurations",
314
+ "- Business logic flaws",
315
+ "- Information leakage (error messages, stack traces, debug endpoints)",
316
+ "- SSRF, CSRF, open redirects",
317
+ "- Improper error handling",
318
+ "- Memory safety issues (buffer overflows, use-after-free) where applicable",
319
+ "",
320
+ "## How You Report",
321
+ "For each finding:",
322
+ "1. **Severity** — CRITICAL / HIGH / MEDIUM / LOW / INFO",
323
+ "2. **File & Line** — exact location",
324
+ "3. **Vulnerable Code** — show the actual problematic code",
325
+ "4. **Explanation** — what the vulnerability is and why it matters",
326
+ "5. **Attack Scenario** — how an attacker could exploit this",
327
+ "6. **Fix** — concrete code showing the remediation",
328
+ "",
329
+ "## Rules",
330
+ "- Always read the actual source files before making claims. Never guess.",
331
+ "- Start by understanding the project structure (list files, read configs).",
332
+ "- Prioritize findings by severity. CRITICAL and HIGH first.",
333
+ "- Be precise. Cite exact file paths and line numbers.",
334
+ "- No false positives. If you're unsure, say so.",
335
+ "- When showing fixes, show complete corrected code, not just diffs.",
336
+ ];
337
+
338
+ if (!allowEdits) {
339
+ lines.push(
340
+ "",
341
+ "## Mode: READ-ONLY",
342
+ "You are in read-only mode. You may read files and run non-destructive commands.",
343
+ "Do NOT write files or run commands that modify the filesystem.",
344
+ "Show fixes as code suggestions only.",
345
+ );
346
+ } else {
347
+ lines.push(
348
+ "",
349
+ "## Mode: EDIT ENABLED",
350
+ "The user has enabled edits. You may apply fixes directly when asked.",
351
+ "Always show the fix and get confirmation before writing.",
352
+ );
353
+ }
354
+
355
+ return lines.join("\n");
356
+ }
357
+
358
+ export async function loadConfig(
359
+ overrides: ConfigOverrides = {},
360
+ ): Promise<Config> {
361
+ let stored = await readStoredConfig();
362
+ if (!stored) {
363
+ stored = await firstRunSetup();
364
+ }
365
+
366
+ const provider = (overrides.provider ??
367
+ stored.provider) as Config["provider"];
368
+ if (!PROVIDERS.includes(provider)) {
369
+ throw new Error(
370
+ `Unknown provider "${provider}". Use: ${PROVIDERS.join(", ")}`,
371
+ );
372
+ }
373
+
374
+ const apiKey =
375
+ overrides.apiKey ?? stored.apiKey ?? process.env[API_KEY_ENV[provider]];
376
+ if (!apiKey) {
377
+ throw new Error(`No API key found. Run: crack-code --setup`);
378
+ }
379
+
380
+ const model = overrides.model ?? stored.model;
381
+ const allowEdits = overrides.allowEdits ?? stored.allowEdits ?? false;
382
+ const cwd = process.cwd();
383
+
384
+ return {
385
+ provider,
386
+ model,
387
+ apiKey,
388
+ maxTokens: overrides.maxTokens ?? 16384,
389
+ maxSteps: overrides.maxSteps ?? 30,
390
+ permissionPolicy: overrides.permissionPolicy ?? "ask",
391
+ allowEdits,
392
+ systemPrompt: buildSystemPrompt(cwd, allowEdits),
393
+ scanPatterns: overrides.scanPatterns ?? DEFAULT_SCAN_PATTERNS,
394
+ ignorePatterns: overrides.ignorePatterns ?? DEFAULT_IGNORE_PATTERNS,
395
+ cwd,
396
+ };
397
+ }
398
+
399
+ export async function runSetup(): Promise<void> {
400
+ await firstRunSetup();
401
+ }
402
+
403
+ export async function updateStoredConfig(
404
+ updates: Partial<StoredConfig>,
405
+ ): Promise<void> {
406
+ const existing = (await readStoredConfig()) ?? ({} as StoredConfig);
407
+ const merged = { ...existing, ...updates };
408
+ await writeStoredConfig(merged);
409
+ console.log(`\x1b[32m✓ Config updated\x1b[0m`);
410
+ }