skrypt-ai 0.3.3 → 0.4.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +1 -1
- package/dist/auth/index.d.ts +0 -1
- package/dist/auth/index.js +3 -5
- package/dist/autofix/index.js +15 -3
- package/dist/cli.js +19 -4
- package/dist/commands/check-links.js +164 -174
- package/dist/commands/deploy.js +5 -2
- package/dist/commands/generate.js +206 -199
- package/dist/commands/i18n.js +3 -20
- package/dist/commands/init.js +47 -40
- package/dist/commands/lint.js +3 -20
- package/dist/commands/mcp.js +125 -122
- package/dist/commands/monitor.js +125 -108
- package/dist/commands/review-pr.js +1 -1
- package/dist/commands/sdk.js +1 -1
- package/dist/config/loader.js +21 -2
- package/dist/generator/organizer.d.ts +3 -0
- package/dist/generator/organizer.js +4 -9
- package/dist/generator/writer.js +2 -10
- package/dist/github/pr-comments.js +21 -8
- package/dist/plugins/index.js +1 -0
- package/dist/scanner/index.js +8 -2
- package/dist/template/docs.json +2 -1
- package/dist/template/next.config.mjs +3 -1
- package/dist/template/package.json +17 -14
- package/dist/template/public/favicon.svg +4 -0
- package/dist/template/public/search-index.json +1 -1
- package/dist/template/scripts/build-search-index.mjs +120 -25
- package/dist/template/src/app/api/chat/route.ts +11 -3
- package/dist/template/src/app/docs/README.md +28 -0
- package/dist/template/src/app/docs/[...slug]/page.tsx +141 -14
- package/dist/template/src/app/docs/auth/page.mdx +589 -0
- package/dist/template/src/app/docs/autofix/page.mdx +624 -0
- package/dist/template/src/app/docs/cli/page.mdx +217 -0
- package/dist/template/src/app/docs/config/page.mdx +428 -0
- package/dist/template/src/app/docs/configuration/page.mdx +86 -0
- package/dist/template/src/app/docs/deployment/page.mdx +112 -0
- package/dist/template/src/app/docs/error.tsx +20 -0
- package/dist/template/src/app/docs/generator/generator.md +504 -0
- package/dist/template/src/app/docs/generator/organizer.md +779 -0
- package/dist/template/src/app/docs/generator/page.mdx +613 -0
- package/dist/template/src/app/docs/github/page.mdx +502 -0
- package/dist/template/src/app/docs/llm/anthropic-client.md +549 -0
- package/dist/template/src/app/docs/llm/index.md +471 -0
- package/dist/template/src/app/docs/llm/page.mdx +428 -0
- package/dist/template/src/app/docs/llms-full.md +256 -0
- package/dist/template/src/app/docs/llms.txt +2971 -0
- package/dist/template/src/app/docs/not-found.tsx +23 -0
- package/dist/template/src/app/docs/page.mdx +0 -3
- package/dist/template/src/app/docs/plugins/page.mdx +1793 -0
- package/dist/template/src/app/docs/pro/page.mdx +121 -0
- package/dist/template/src/app/docs/quickstart/page.mdx +93 -0
- package/dist/template/src/app/docs/scanner/content-type.md +599 -0
- package/dist/template/src/app/docs/scanner/index.md +212 -0
- package/dist/template/src/app/docs/scanner/page.mdx +307 -0
- package/dist/template/src/app/docs/scanner/python.md +469 -0
- package/dist/template/src/app/docs/scanner/python_parser.md +1056 -0
- package/dist/template/src/app/docs/scanner/rust.md +325 -0
- package/dist/template/src/app/docs/scanner/typescript.md +201 -0
- package/dist/template/src/app/error.tsx +3 -3
- package/dist/template/src/app/icon.tsx +29 -0
- package/dist/template/src/app/layout.tsx +57 -7
- package/dist/template/src/app/not-found.tsx +35 -0
- package/dist/template/src/app/page.tsx +95 -11
- package/dist/template/src/components/ai-chat.tsx +26 -21
- package/dist/template/src/components/breadcrumbs.tsx +56 -12
- package/dist/template/src/components/copy-button.tsx +17 -3
- package/dist/template/src/components/docs-layout.tsx +202 -8
- package/dist/template/src/components/feedback.tsx +4 -2
- package/dist/template/src/components/footer.tsx +42 -0
- package/dist/template/src/components/header.tsx +56 -20
- package/dist/template/src/components/mdx/accordion.tsx +17 -13
- package/dist/template/src/components/mdx/callout.tsx +50 -37
- package/dist/template/src/components/mdx/card.tsx +24 -12
- package/dist/template/src/components/mdx/code-block.tsx +17 -3
- package/dist/template/src/components/mdx/code-group.tsx +78 -18
- package/dist/template/src/components/mdx/code-playground.tsx +3 -0
- package/dist/template/src/components/mdx/go-playground.tsx +3 -0
- package/dist/template/src/components/mdx/highlighted-code.tsx +178 -38
- package/dist/template/src/components/mdx/python-playground.tsx +2 -0
- package/dist/template/src/components/mdx/steps.tsx +6 -6
- package/dist/template/src/components/mdx/tabs.tsx +76 -8
- package/dist/template/src/components/page-header.tsx +19 -0
- package/dist/template/src/components/scroll-to-top.tsx +33 -0
- package/dist/template/src/components/search-dialog.tsx +251 -57
- package/dist/template/src/components/sidebar.tsx +137 -77
- package/dist/template/src/components/table-of-contents.tsx +29 -13
- package/dist/template/src/lib/highlight.ts +90 -31
- package/dist/template/src/lib/search.ts +14 -4
- package/dist/template/src/lib/theme-utils.ts +140 -0
- package/dist/template/src/styles/globals.css +397 -84
- package/dist/template/src/types/remark-gfm.d.ts +2 -0
- package/dist/utils/files.d.ts +9 -0
- package/dist/utils/files.js +33 -0
- package/dist/utils/validation.d.ts +4 -0
- package/dist/utils/validation.js +38 -0
- package/package.json +1 -4
|
@@ -0,0 +1,471 @@
|
|
|
1
|
+
# Index.ts
|
|
2
|
+
|
|
3
|
+
## Functions
|
|
4
|
+
|
|
5
|
+
### `createLLMClient`
|
|
6
|
+
|
|
7
|
+
```typescript
|
|
8
|
+
function createLLMClient(config: {
|
|
9
|
+
provider: LLMProvider
|
|
10
|
+
model: string
|
|
11
|
+
baseUrl?: string
|
|
12
|
+
timeout?: number
|
|
13
|
+
maxRetries?: number
|
|
14
|
+
}): LLMClient
|
|
15
|
+
```
|
|
16
|
+
|
|
17
|
+
Use this to initialize a typed LLM client for a specific AI provider (OpenAI, Anthropic, etc.) with retry and timeout handling built in.
|
|
18
|
+
|
|
19
|
+
This is your entry point for all LLM interactions — call it once at startup and reuse the returned `LLMClient` throughout your application.
|
|
20
|
+
|
|
21
|
+
## Parameters
|
|
22
|
+
|
|
23
|
+
| Name | Type | Required | Description |
|
|
24
|
+
|------|------|----------|-------------|
|
|
25
|
+
| `config.provider` | `LLMProvider` | ✅ Yes | The AI provider to use. Accepted values: `"openai"`, `"anthropic"`, `"azure"` |
|
|
26
|
+
| `config.model` | `string` | ✅ Yes | The model identifier to use (e.g. `"gpt-4o"`, `"claude-3-5-sonnet-20241022"`) |
|
|
27
|
+
| `config.baseUrl` | `string` | No | Override the default API endpoint. Useful for proxies or self-hosted models |
|
|
28
|
+
| `config.timeout` | `number` | No | Request timeout in milliseconds. Defaults to `30000` (30s) |
|
|
29
|
+
| `config.maxRetries` | `number` | No | Number of times to retry on transient failures. Defaults to `3` |
|
|
30
|
+
|
|
31
|
+
## Returns
|
|
32
|
+
|
|
33
|
+
Returns an `LLMClient` instance with methods to send completions and chat messages to the configured provider. The client is pre-configured with your chosen model, timeout, and retry policy.
|
|
34
|
+
|
|
35
|
+
| Scenario | Result |
|
|
36
|
+
|----------|--------|
|
|
37
|
+
| Valid config | Returns a ready-to-use `LLMClient` |
|
|
38
|
+
| Unknown provider | Throws an error at construction time |
|
|
39
|
+
| Network failure (within retries) | Automatically retried up to `maxRetries` times |
|
|
40
|
+
| Timeout exceeded | Throws a timeout error after `timeout` ms |
|
|
41
|
+
|
|
42
|
+
**Example:**
|
|
43
|
+
|
|
44
|
+
```typescript example.ts
|
|
45
|
+
// --- Inline types (do not import from autodocs) ---
|
|
46
|
+
type LLMProvider = "openai" | "anthropic" | "azure"
|
|
47
|
+
|
|
48
|
+
interface LLMClientConfig {
|
|
49
|
+
provider: LLMProvider
|
|
50
|
+
model: string
|
|
51
|
+
baseUrl?: string
|
|
52
|
+
timeout?: number
|
|
53
|
+
maxRetries?: number
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
interface ChatMessage {
|
|
57
|
+
role: "user" | "assistant" | "system"
|
|
58
|
+
content: string
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
interface LLMClient {
|
|
62
|
+
chat: (messages: ChatMessage[]) => Promise<{ content: string; model: string; usage: { promptTokens: number; completionTokens: number } }>
|
|
63
|
+
provider: LLMProvider
|
|
64
|
+
model: string
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
// --- Simulated createLLMClient (mirrors real behavior) ---
|
|
68
|
+
function createLLMClient(config: LLMClientConfig): LLMClient {
|
|
69
|
+
const SUPPORTED_PROVIDERS: LLMProvider[] = ["openai", "anthropic", "azure"]
|
|
70
|
+
|
|
71
|
+
if (!SUPPORTED_PROVIDERS.includes(config.provider)) {
|
|
72
|
+
throw new Error(`Unsupported provider: "${config.provider}". Must be one of: ${SUPPORTED_PROVIDERS.join(", ")}`)
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
const timeout = config.timeout ?? 30_000
|
|
76
|
+
const maxRetries = config.maxRetries ?? 3
|
|
77
|
+
const baseUrl = config.baseUrl ?? `https://api.${config.provider}.com/v1`
|
|
78
|
+
|
|
79
|
+
console.log(`[LLMClient] Initialized — provider: ${config.provider}, model: ${config.model}`)
|
|
80
|
+
console.log(`[LLMClient] baseUrl: ${baseUrl}, timeout: ${timeout}ms, maxRetries: ${maxRetries}`)
|
|
81
|
+
|
|
82
|
+
return {
|
|
83
|
+
provider: config.provider,
|
|
84
|
+
model: config.model,
|
|
85
|
+
chat: async (messages: ChatMessage[]) => {
|
|
86
|
+
// Simulated API response
|
|
87
|
+
return {
|
|
88
|
+
content: `Hello! You asked: "${messages.at(-1)?.content}"`,
|
|
89
|
+
model: config.model,
|
|
90
|
+
usage: { promptTokens: 24, completionTokens: 18 },
|
|
91
|
+
}
|
|
92
|
+
},
|
|
93
|
+
}
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
// --- Usage ---
|
|
97
|
+
async function main() {
|
|
98
|
+
try {
|
|
99
|
+
// Basic setup — reads API key from environment
|
|
100
|
+
const apiKey = process.env.OPENAI_API_KEY || "sk-your-api-key-here"
|
|
101
|
+
console.log(`Using API key: ${apiKey.slice(0, 6)}...`)
|
|
102
|
+
|
|
103
|
+
const client = createLLMClient({
|
|
104
|
+
provider: "openai",
|
|
105
|
+
model: "gpt-4o",
|
|
106
|
+
timeout: 15_000, // 15 second timeout
|
|
107
|
+
maxRetries: 2, // retry twice on failure
|
|
108
|
+
})
|
|
109
|
+
|
|
110
|
+
const response = await client.chat([
|
|
111
|
+
{ role: "system", content: "You are a helpful assistant." },
|
|
112
|
+
{ role: "user", content: "What is the capital of France?" },
|
|
113
|
+
])
|
|
114
|
+
|
|
115
|
+
console.log("\n--- Response ---")
|
|
116
|
+
console.log("Content: ", response.content)
|
|
117
|
+
console.log("Model: ", response.model)
|
|
118
|
+
console.log("Usage: ", response.usage)
|
|
119
|
+
// Output:
|
|
120
|
+
// Content: Hello! You asked: "What is the capital of France?"
|
|
121
|
+
// Model: gpt-4o
|
|
122
|
+
// Usage: { promptTokens: 24, completionTokens: 18 }
|
|
123
|
+
|
|
124
|
+
// Example: custom baseUrl for a proxy or Azure endpoint
|
|
125
|
+
const azureClient = createLLMClient({
|
|
126
|
+
provider: "azure",
|
|
127
|
+
model: "gpt-4o",
|
|
128
|
+
baseUrl: process.env.AZURE_OPENAI_ENDPOINT || "https://my-org.openai.azure.com/v1",
|
|
129
|
+
timeout: 20_000,
|
|
130
|
+
maxRetries: 3,
|
|
131
|
+
})
|
|
132
|
+
|
|
133
|
+
console.log(`\nAzure client ready — provider: ${azureClient.provider}, model: ${azureClient.model}`)
|
|
134
|
+
// Output: Azure client ready — provider: azure, model: gpt-4o
|
|
135
|
+
|
|
136
|
+
} catch (error) {
|
|
137
|
+
if (error instanceof Error) {
|
|
138
|
+
console.error("Failed to create LLM client:", error.message)
|
|
139
|
+
}
|
|
140
|
+
}
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
main()
|
|
144
|
+
```
|
|
145
|
+
|
|
146
|
+
### `generateDocumentation`
|
|
147
|
+
|
|
148
|
+
```typescript
|
|
149
|
+
async function generateDocumentation(client: LLMClient, element: ElementContext, options?: { multiLanguage?: boolean }): Promise<GeneratedDocResult>
|
|
150
|
+
```
|
|
151
|
+
|
|
152
|
+
Use this to automatically generate documentation for a code element (function, class, method, etc.) using an LLM client, with optional multi-language support.
|
|
153
|
+
|
|
154
|
+
Takes a configured LLM client and a code element's context, then returns structured documentation output — useful for automating doc generation pipelines or IDE tooling.
|
|
155
|
+
|
|
156
|
+
### Parameters
|
|
157
|
+
|
|
158
|
+
| Name | Type | Required | Description |
|
|
159
|
+
|------|------|----------|-------------|
|
|
160
|
+
| `client` | `LLMClient` | ✅ Yes | A configured LLM client instance (e.g. OpenAI-compatible or Anthropic) used to generate the documentation |
|
|
161
|
+
| `element` | `ElementContext` | ✅ Yes | Context object describing the code element — includes its name, signature, source code, and metadata |
|
|
162
|
+
| `options` | `{ multiLanguage?: boolean }` | ❌ No | Optional config. Set `multiLanguage: false` to generate docs in English only; defaults to `true` |
|
|
163
|
+
|
|
164
|
+
### Returns
|
|
165
|
+
|
|
166
|
+
Returns a `Promise<GeneratedDocResult>` that resolves to a structured documentation result. Typically includes:
|
|
167
|
+
|
|
168
|
+
| Field | Description |
|
|
169
|
+
|-------|-------------|
|
|
170
|
+
| `summary` | A concise description of what the element does |
|
|
171
|
+
| `params` | Documentation for each parameter |
|
|
172
|
+
| `returns` | Description of the return value |
|
|
173
|
+
| `examples` | Generated usage examples (may include multiple languages if `multiLanguage` is enabled) |
|
|
174
|
+
|
|
175
|
+
Rejects with an error if the LLM client fails to respond or the element context is malformed.
|
|
176
|
+
|
|
177
|
+
**Example:**
|
|
178
|
+
|
|
179
|
+
```typescript example.ts
|
|
180
|
+
// ── Inline type definitions (no external imports needed) ──────────────────────
|
|
181
|
+
|
|
182
|
+
type ElementKind = 'function' | 'class' | 'method' | 'interface'
|
|
183
|
+
|
|
184
|
+
interface ElementContext {
|
|
185
|
+
name: string
|
|
186
|
+
kind: ElementKind
|
|
187
|
+
signature: string
|
|
188
|
+
sourceCode: string
|
|
189
|
+
filePath: string
|
|
190
|
+
language: string
|
|
191
|
+
}
|
|
192
|
+
|
|
193
|
+
interface GeneratedDocResult {
|
|
194
|
+
summary: string
|
|
195
|
+
params: Array<{ name: string; description: string }>
|
|
196
|
+
returns: string
|
|
197
|
+
examples: Array<{ language: string; code: string }>
|
|
198
|
+
rawOutput: string
|
|
199
|
+
}
|
|
200
|
+
|
|
201
|
+
interface LLMClient {
|
|
202
|
+
complete: (prompt: string) => Promise<string>
|
|
203
|
+
}
|
|
204
|
+
|
|
205
|
+
// ── Simulated generateDocumentation implementation ────────────────────────────
|
|
206
|
+
|
|
207
|
+
async function generateDocumentation(
|
|
208
|
+
client: LLMClient,
|
|
209
|
+
element: ElementContext,
|
|
210
|
+
options?: { multiLanguage?: boolean }
|
|
211
|
+
): Promise<GeneratedDocResult> {
|
|
212
|
+
const useMultiLang = options?.multiLanguage ?? true
|
|
213
|
+
|
|
214
|
+
const prompt = `
|
|
215
|
+
Generate documentation for the following ${element.kind}:
|
|
216
|
+
|
|
217
|
+
Name: ${element.name}
|
|
218
|
+
Signature: ${element.signature}
|
|
219
|
+
Language: ${element.language}
|
|
220
|
+
Source:
|
|
221
|
+
${element.sourceCode}
|
|
222
|
+
|
|
223
|
+
Provide: summary, parameter descriptions, return value, and usage examples.
|
|
224
|
+
${useMultiLang ? 'Include examples in TypeScript and Python.' : 'Include examples in TypeScript only.'}
|
|
225
|
+
`.trim()
|
|
226
|
+
|
|
227
|
+
const rawOutput = await client.complete(prompt)
|
|
228
|
+
|
|
229
|
+
// Parse the LLM response into structured output
|
|
230
|
+
return {
|
|
231
|
+
summary: `Calculates the total price of items in a cart after applying a discount.`,
|
|
232
|
+
params: [
|
|
233
|
+
{ name: 'items', description: 'Array of cart items with price and quantity' },
|
|
234
|
+
{ name: 'discount', description: 'Fractional discount to apply, e.g. 0.1 for 10% off' },
|
|
235
|
+
],
|
|
236
|
+
returns: 'The final total as a number, rounded to 2 decimal places.',
|
|
237
|
+
examples: [
|
|
238
|
+
{
|
|
239
|
+
language: 'typescript',
|
|
240
|
+
code: `const total = calculateTotal([{ price: 9.99, qty: 2 }], 0.1)\nconsole.log(total) // 17.98`,
|
|
241
|
+
},
|
|
242
|
+
...(useMultiLang
|
|
243
|
+
? [{
|
|
244
|
+
language: 'python',
|
|
245
|
+
code: `total = calculate_total([{"price": 9.99, "qty": 2}], 0.1)\nprint(total) # 17.98`,
|
|
246
|
+
}]
|
|
247
|
+
: []),
|
|
248
|
+
],
|
|
249
|
+
rawOutput,
|
|
250
|
+
}
|
|
251
|
+
}
|
|
252
|
+
|
|
253
|
+
// ── Mock LLM client (replace with a real client in production) ────────────────
|
|
254
|
+
|
|
255
|
+
function createMockLLMClient(apiKey: string): LLMClient {
|
|
256
|
+
return {
|
|
257
|
+
complete: async (prompt: string): Promise<string> => {
|
|
258
|
+
// In production this would call OpenAI / Anthropic / etc.
|
|
259
|
+
console.log(`[LLMClient] Sending prompt (${prompt.length} chars) to API...`)
|
|
260
|
+
return `Generated documentation response from LLM using key: ${apiKey.slice(0, 8)}...`
|
|
261
|
+
},
|
|
262
|
+
}
|
|
263
|
+
}
|
|
264
|
+
|
|
265
|
+
// ── Main usage example ────────────────────────────────────────────────────────
|
|
266
|
+
|
|
267
|
+
async function main() {
|
|
268
|
+
const client = createMockLLMClient(
|
|
269
|
+
process.env.OPENAI_API_KEY || 'sk-proj-abc123xyz'
|
|
270
|
+
)
|
|
271
|
+
|
|
272
|
+
const element: ElementContext = {
|
|
273
|
+
name: 'calculateTotal',
|
|
274
|
+
kind: 'function',
|
|
275
|
+
signature: 'function calculateTotal(items: CartItem[], discount: number): number',
|
|
276
|
+
language: 'typescript',
|
|
277
|
+
filePath: 'src/cart/pricing.ts',
|
|
278
|
+
sourceCode: `
|
|
279
|
+
function calculateTotal(items: CartItem[], discount: number): number {
|
|
280
|
+
const subtotal = items.reduce((sum, item) => sum + item.price * item.qty, 0)
|
|
281
|
+
return Math.round(subtotal * (1 - discount) * 100) / 100
|
|
282
|
+
}
|
|
283
|
+
`.trim(),
|
|
284
|
+
}
|
|
285
|
+
|
|
286
|
+
try {
|
|
287
|
+
// Generate docs with multi-language examples (default)
|
|
288
|
+
const result = await generateDocumentation(client, element, {
|
|
289
|
+
multiLanguage: true,
|
|
290
|
+
})
|
|
291
|
+
|
|
292
|
+
console.log('✅ Documentation generated:\n')
|
|
293
|
+
console.log('Summary: ', result.summary)
|
|
294
|
+
console.log('Returns: ', result.returns)
|
|
295
|
+
console.log('Params:')
|
|
296
|
+
result.params.forEach(p => console.log(` - ${p.name}: ${p.description}`))
|
|
297
|
+
console.log('Examples:')
|
|
298
|
+
result.examples.forEach(ex => {
|
|
299
|
+
console.log(`\n [${ex.language}]\n ${ex.code}`)
|
|
300
|
+
})
|
|
301
|
+
|
|
302
|
+
// Expected output:
|
|
303
|
+
// ✅ Documentation generated:
|
|
304
|
+
// Summary: Calculates the total price of items in a cart after applying a discount.
|
|
305
|
+
// Returns: The final total as a number, rounded to 2 decimal places.
|
|
306
|
+
// Params:
|
|
307
|
+
// - items: Array of cart items with price and quantity
|
|
308
|
+
// - discount: Fractional discount to apply, e.g. 0.1 for 10% off
|
|
309
|
+
// Examples:
|
|
310
|
+
// [typescript] const total = calculateTotal(...)
|
|
311
|
+
// [python] total = calculate_total(...)
|
|
312
|
+
|
|
313
|
+
} catch (error) {
|
|
314
|
+
console.error('❌ Documentation generation failed:', error)
|
|
315
|
+
process.exit(1)
|
|
316
|
+
}
|
|
317
|
+
}
|
|
318
|
+
|
|
319
|
+
main()
|
|
320
|
+
```
|
|
321
|
+
|
|
322
|
+
### `fixCodeSample`
|
|
323
|
+
|
|
324
|
+
```typescript
|
|
325
|
+
async function fixCodeSample(client: LLMClient, code: string, error: string, context: string, iteration: number = 1): Promise<string>
|
|
326
|
+
```
|
|
327
|
+
|
|
328
|
+
Use this to automatically repair broken code samples by sending them to an LLM with the error context and receiving a corrected version. Ideal for iterative code generation pipelines where generated code fails validation or execution.
|
|
329
|
+
|
|
330
|
+
## Parameters
|
|
331
|
+
|
|
332
|
+
| Name | Type | Required | Description |
|
|
333
|
+
|------|------|----------|-------------|
|
|
334
|
+
| `client` | `LLMClient` | Yes | An initialized LLM client instance used to send the fix request |
|
|
335
|
+
| `code` | `string` | Yes | The broken code sample that needs to be fixed |
|
|
336
|
+
| `error` | `string` | Yes | The error message or stack trace produced by the broken code |
|
|
337
|
+
| `context` | `string` | Yes | Additional context about what the code is supposed to do (e.g., function docs, expected behavior) |
|
|
338
|
+
| `iteration` | `number` | No | Which fix attempt this is (default: `1`). Used to apply progressively smarter fix strategies on repeated failures |
|
|
339
|
+
|
|
340
|
+
## Returns
|
|
341
|
+
|
|
342
|
+
Returns a `Promise<string>` containing the corrected code sample. The returned string is the LLM's best attempt at fixing the code given the error and context provided.
|
|
343
|
+
|
|
344
|
+
## Notes
|
|
345
|
+
|
|
346
|
+
- The `iteration` parameter enables **progressive fix strategies** — later iterations may prompt the LLM differently (e.g., more conservative rewrites, stripping problematic patterns) to break out of repeated failure loops.
|
|
347
|
+
- Pass the output back into your validation/execution step and call `fixCodeSample` again with `iteration + 1` if it still fails.
|
|
348
|
+
- Keep `context` focused and concise — include the function signature, purpose, and any constraints the code must satisfy.
|
|
349
|
+
|
|
350
|
+
**Example:**
|
|
351
|
+
|
|
352
|
+
```typescript example.ts
|
|
353
|
+
// --- Inline types (do not import from autodocs) ---
|
|
354
|
+
interface LLMMessage {
|
|
355
|
+
role: 'system' | 'user' | 'assistant'
|
|
356
|
+
content: string
|
|
357
|
+
}
|
|
358
|
+
|
|
359
|
+
interface LLMClient {
|
|
360
|
+
complete(messages: LLMMessage[]): Promise<string>
|
|
361
|
+
}
|
|
362
|
+
|
|
363
|
+
// --- Inline implementation of fixCodeSample ---
|
|
364
|
+
async function fixCodeSample(
|
|
365
|
+
client: LLMClient,
|
|
366
|
+
code: string,
|
|
367
|
+
error: string,
|
|
368
|
+
context: string,
|
|
369
|
+
iteration: number = 1
|
|
370
|
+
): Promise<string> {
|
|
371
|
+
const strategy =
|
|
372
|
+
iteration === 1
|
|
373
|
+
? 'Fix the specific error while keeping the structure intact.'
|
|
374
|
+
: iteration === 2
|
|
375
|
+
? 'Consider a more significant rewrite to avoid the root cause of the error.'
|
|
376
|
+
: 'Simplify the code as much as possible and avoid any patterns that could cause the error.'
|
|
377
|
+
|
|
378
|
+
const messages: LLMMessage[] = [
|
|
379
|
+
{
|
|
380
|
+
role: 'system',
|
|
381
|
+
content:
|
|
382
|
+
'You are an expert code repair assistant. Return ONLY the corrected code with no explanation or markdown fences.',
|
|
383
|
+
},
|
|
384
|
+
{
|
|
385
|
+
role: 'user',
|
|
386
|
+
content: `The following code sample has an error. Fix it.
|
|
387
|
+
|
|
388
|
+
## Context
|
|
389
|
+
${context}
|
|
390
|
+
|
|
391
|
+
## Broken Code
|
|
392
|
+
\`\`\`typescript
|
|
393
|
+
${code}
|
|
394
|
+
\`\`\`
|
|
395
|
+
|
|
396
|
+
## Error
|
|
397
|
+
${error}
|
|
398
|
+
|
|
399
|
+
## Fix Strategy (attempt ${iteration})
|
|
400
|
+
${strategy}
|
|
401
|
+
|
|
402
|
+
Return only the corrected code.`,
|
|
403
|
+
},
|
|
404
|
+
]
|
|
405
|
+
|
|
406
|
+
const fixed = await client.complete(messages)
|
|
407
|
+
return fixed.trim()
|
|
408
|
+
}
|
|
409
|
+
|
|
410
|
+
// --- Mock LLM client (simulates an OpenAI-compatible response) ---
|
|
411
|
+
function createMockLLMClient(apiKey: string): LLMClient {
|
|
412
|
+
return {
|
|
413
|
+
async complete(messages: LLMMessage[]): Promise<string> {
|
|
414
|
+
// In production this would call OpenAI/Anthropic/etc.
|
|
415
|
+
console.log(`[LLMClient] Sending ${messages.length} messages to API...`)
|
|
416
|
+
console.log(`[LLMClient] Using API key: ${apiKey.slice(0, 8)}...`)
|
|
417
|
+
|
|
418
|
+
// Simulate a fixed version of the broken code
|
|
419
|
+
return `
|
|
420
|
+
async function fetchUserData(userId: string): Promise<{ id: string; name: string }> {
|
|
421
|
+
const response = await fetch(\`https://api.example.com/users/\${userId}\`)
|
|
422
|
+
if (!response.ok) {
|
|
423
|
+
throw new Error(\`HTTP error: \${response.status}\`)
|
|
424
|
+
}
|
|
425
|
+
return response.json()
|
|
426
|
+
}`.trim()
|
|
427
|
+
},
|
|
428
|
+
}
|
|
429
|
+
}
|
|
430
|
+
|
|
431
|
+
// --- Example usage ---
|
|
432
|
+
async function main() {
|
|
433
|
+
const client = createMockLLMClient(
|
|
434
|
+
process.env.OPENAI_API_KEY || 'sk-your-api-key-here'
|
|
435
|
+
)
|
|
436
|
+
|
|
437
|
+
const brokenCode = `
|
|
438
|
+
async function fetchUserData(userId: string) {
|
|
439
|
+
const response = await fetch('https://api.example.com/users/' + userId)
|
|
440
|
+
return response.json() // Missing error handling
|
|
441
|
+
const data = response.text() // Unreachable code
|
|
442
|
+
}`.trim()
|
|
443
|
+
|
|
444
|
+
const error = `
|
|
445
|
+
TypeError: Cannot read properties of undefined (reading 'json')
|
|
446
|
+
at fetchUserData (example.ts:3:20)`.trim()
|
|
447
|
+
|
|
448
|
+
const context = `
|
|
449
|
+
Function: fetchUserData
|
|
450
|
+
Purpose: Fetches a user object by ID from the REST API.
|
|
451
|
+
Returns: Promise<{ id: string; name: string }>
|
|
452
|
+
Must throw a descriptive error if the HTTP response is not OK.`.trim()
|
|
453
|
+
|
|
454
|
+
try {
|
|
455
|
+
console.log('=== Attempt 1: Targeted fix ===')
|
|
456
|
+
const fixedCode = await fixCodeSample(client, brokenCode, error, context, 1)
|
|
457
|
+
console.log('Fixed code:\n', fixedCode)
|
|
458
|
+
// Output: corrected fetchUserData with proper error handling
|
|
459
|
+
|
|
460
|
+
console.log('\n=== Attempt 2: Broader rewrite strategy ===')
|
|
461
|
+
const fixedCodeV2 = await fixCodeSample(client, brokenCode, error, context, 2)
|
|
462
|
+
console.log('Fixed code (v2):\n', fixedCodeV2)
|
|
463
|
+
// Output: same fix, but LLM prompted to consider deeper rewrites
|
|
464
|
+
} catch (err) {
|
|
465
|
+
console.error('Failed to fix code sample:', err)
|
|
466
|
+
}
|
|
467
|
+
}
|
|
468
|
+
|
|
469
|
+
main()
|
|
470
|
+
```
|
|
471
|
+
|