@polka-codes/cli 0.10.21 → 0.10.23
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.js +3990 -353
- package/package.json +4 -4
- package/dist/chunk-2LRQ2QH6.js +0 -1353
- package/dist/chunk-FSNPWI3C.js +0 -128
- package/dist/chunk-HB7PTE3H.js +0 -176
- package/dist/chunk-LLMPMGV3.js +0 -140
- package/dist/chunk-NRDSZGMF.js +0 -675
- package/dist/chunk-UEEU3SCC.js +0 -390
- package/dist/chunk-YPUL66UK.js +0 -277
- package/dist/chunk-ZS4K5RFU.js +0 -176
- package/dist/chunk-ZU4UU65A.js +0 -40
- package/dist/code.workflow-5TAWK2DE.js +0 -10
- package/dist/commit.workflow-Z64PNSTS.js +0 -9
- package/dist/fix.workflow-KLHJU5Z6.js +0 -7
- package/dist/plan.workflow-P2Y6W4FA.js +0 -8
- package/dist/review.workflow-I7RHWKU7.js +0 -8
- package/dist/sdk-client-KBYJRPEG.js +0 -155
package/dist/chunk-2LRQ2QH6.js
DELETED
|
@@ -1,1353 +0,0 @@
|
|
|
1
|
-
// src/workflows/prompts/shared.ts
|
|
2
|
-
function createJsonResponseInstruction(schema) {
|
|
3
|
-
return `Respond with a JSON object in a markdown code block matching this schema:
|
|
4
|
-
\`\`\`json
|
|
5
|
-
${JSON.stringify(schema, null, 2)}
|
|
6
|
-
\`\`\`
|
|
7
|
-
`;
|
|
8
|
-
}
|
|
9
|
-
var TOOL_USAGE_INSTRUCTION = `
|
|
10
|
-
## Action Line
|
|
11
|
-
|
|
12
|
-
Before any tool call, emit a single high-level action line.
|
|
13
|
-
|
|
14
|
-
You MUST follow these style constraints for the action line:
|
|
15
|
-
- NO filler or preambles.
|
|
16
|
-
- DO NOT use "ok", "okay", "alright".
|
|
17
|
-
- DO NOT use first person ("I", "I'm", "I will", "I'll", etc.).
|
|
18
|
-
- NO apologies, hedging, or promises about later work.
|
|
19
|
-
`;
|
|
20
|
-
var MEMORY_USAGE_SECTION = `## Memory Usage
|
|
21
|
-
|
|
22
|
-
You have access to a persistent SQLite-based memory store to track information across sessions. This is particularly useful for managing todos, bugs, decisions, and notes.
|
|
23
|
-
|
|
24
|
-
### Memory Entry Types
|
|
25
|
-
|
|
26
|
-
Memory entries can be organized by type:
|
|
27
|
-
- **todo**: Task items with status (open/done), priority, and tags
|
|
28
|
-
- **bug**: Bug reports with priority and status
|
|
29
|
-
- **decision**: Architectural decisions and rationale
|
|
30
|
-
- **note**: General notes and documentation
|
|
31
|
-
|
|
32
|
-
### Todo List Workflow
|
|
33
|
-
|
|
34
|
-
When working on multi-step tasks, use the memory store to track progress:
|
|
35
|
-
|
|
36
|
-
**Creating a todo item:**
|
|
37
|
-
Use topic names that clearly describe the task. The content should include the full description.
|
|
38
|
-
|
|
39
|
-
**Updating todo status:**
|
|
40
|
-
- Mark todos as done when completed
|
|
41
|
-
- Update descriptions as requirements evolve
|
|
42
|
-
- Add tags for organization (e.g., "bug,urgent", "feature,auth")
|
|
43
|
-
|
|
44
|
-
**Querying todos:**
|
|
45
|
-
- Filter by type: "todo" to see all tasks
|
|
46
|
-
- Filter by status: "open" for pending work
|
|
47
|
-
- Filter by priority: "high" or "critical" for urgent items
|
|
48
|
-
- Search: Find specific todos by keyword
|
|
49
|
-
|
|
50
|
-
### Best Practices
|
|
51
|
-
|
|
52
|
-
- **Use descriptive topic names**: "fix-login-bug" is better than "bug-1"
|
|
53
|
-
- **Set appropriate priorities**: Use "critical", "high", "medium", or "low"
|
|
54
|
-
- **Add relevant tags**: Group related items with tags like "auth", "ui", "backend"
|
|
55
|
-
- **Update status regularly**: Mark items as done when completed
|
|
56
|
-
- **Store context**: Include important decisions in memory for future reference
|
|
57
|
-
- **Memory persists**: All stored information is available across sessions in the current project
|
|
58
|
-
|
|
59
|
-
### Memory Scopes
|
|
60
|
-
|
|
61
|
-
- **Project scope**: When working in a project directory (with .polkacodes.yml), memory is isolated to that project
|
|
62
|
-
- **Global scope**: When working outside a project, memory is shared globally
|
|
63
|
-
`;
|
|
64
|
-
function AGENTS_INSTRUCTION(loadRules) {
|
|
65
|
-
const defaultLoadRules = {
|
|
66
|
-
"AGENTS.md": true,
|
|
67
|
-
"CLAUDE.md": true
|
|
68
|
-
};
|
|
69
|
-
const mergedRules = { ...defaultLoadRules, ...loadRules };
|
|
70
|
-
const enabledFiles = Object.entries(mergedRules).filter(([, enabled]) => enabled).map(([fileName]) => fileName);
|
|
71
|
-
if (enabledFiles.length === 0) {
|
|
72
|
-
return `## Project Instructions
|
|
73
|
-
|
|
74
|
-
Project-specific instruction files are currently disabled via configuration.`;
|
|
75
|
-
}
|
|
76
|
-
const fileList = enabledFiles.join(", ");
|
|
77
|
-
return `## Project Instructions (${fileList})
|
|
78
|
-
|
|
79
|
-
If you are working in a subdirectory, check if there is an ${enabledFiles.join(" or ")} file in that directory or parent directories for specific instructions. These files contain project-specific guidelines and conventions that you must follow.
|
|
80
|
-
|
|
81
|
-
Note: The loading of these files can be controlled via the loadRules configuration option in .polkacodes.yml.
|
|
82
|
-
|
|
83
|
-
Example:
|
|
84
|
-
\`\`\`yaml
|
|
85
|
-
loadRules:
|
|
86
|
-
AGENTS.md: true
|
|
87
|
-
CLAUDE.md: true
|
|
88
|
-
\`\`\`
|
|
89
|
-
`;
|
|
90
|
-
}
|
|
91
|
-
|
|
92
|
-
// src/workflows/prompts/coder.ts
|
|
93
|
-
function getCoderSystemPrompt(loadRules) {
|
|
94
|
-
return `Role: AI developer.
|
|
95
|
-
Goal: Implement the provided plan by writing and modifying code.
|
|
96
|
-
|
|
97
|
-
Your task is to implement the plan created and approved in Phase 1.
|
|
98
|
-
|
|
99
|
-
${MEMORY_USAGE_SECTION}
|
|
100
|
-
|
|
101
|
-
${TOOL_USAGE_INSTRUCTION}
|
|
102
|
-
|
|
103
|
-
${AGENTS_INSTRUCTION(loadRules)}
|
|
104
|
-
|
|
105
|
-
## Implementation Guidelines
|
|
106
|
-
|
|
107
|
-
### 1. Plan Analysis
|
|
108
|
-
|
|
109
|
-
Before starting implementation:
|
|
110
|
-
- Review the plan carefully and understand all requirements
|
|
111
|
-
- Identify dependencies between different parts of the plan
|
|
112
|
-
- Determine if this is a single cohesive task or multiple independent tasks
|
|
113
|
-
- Consider the scope and complexity of the work
|
|
114
|
-
|
|
115
|
-
### 2. Gather Context
|
|
116
|
-
|
|
117
|
-
Before making changes:
|
|
118
|
-
- **Search for similar existing files** to understand patterns and conventions
|
|
119
|
-
- **Read relevant files** to see how similar features are implemented
|
|
120
|
-
- Look for existing tests, utilities, or helpers you can leverage
|
|
121
|
-
- Understand the project structure and naming conventions
|
|
122
|
-
- Verify you have all necessary context to proceed
|
|
123
|
-
|
|
124
|
-
### 3. Implementation Best Practices
|
|
125
|
-
|
|
126
|
-
- **Make incremental changes**: Implement one piece at a time
|
|
127
|
-
- **Follow existing patterns**: Match the style and structure of similar code
|
|
128
|
-
- **Add documentation**: Include comments explaining complex logic
|
|
129
|
-
- **Consider edge cases**: Think about error handling and boundary conditions
|
|
130
|
-
- **Verify as you go**: Test your changes incrementally if possible
|
|
131
|
-
|
|
132
|
-
### 4. Code Quality
|
|
133
|
-
|
|
134
|
-
- Follow the project's existing code style and conventions
|
|
135
|
-
- Use appropriate TypeScript types (avoid 'any' unless necessary)
|
|
136
|
-
- Add JSDoc comments for public APIs and complex functions
|
|
137
|
-
- Ensure proper error handling and validation
|
|
138
|
-
- Keep functions focused and maintainable
|
|
139
|
-
|
|
140
|
-
## Your Task
|
|
141
|
-
|
|
142
|
-
Implement the plan above following these guidelines. Start by:
|
|
143
|
-
1. Analyzing the plan structure
|
|
144
|
-
2. Searching for similar existing code patterns
|
|
145
|
-
3. Proceeding with implementation
|
|
146
|
-
|
|
147
|
-
Please implement all the necessary code changes according to this plan.
|
|
148
|
-
|
|
149
|
-
After making changes, you MUST return a JSON object in a markdown block with either a summary of the changes OR a bailReason if you cannot complete the task.
|
|
150
|
-
|
|
151
|
-
DO NOT save this JSON object to a file. Output it directly in your response.
|
|
152
|
-
|
|
153
|
-
Example for successful implementation:
|
|
154
|
-
${createJsonResponseInstruction({
|
|
155
|
-
summary: "Implemented user authentication with JWT tokens and password hashing.",
|
|
156
|
-
bailReason: null
|
|
157
|
-
})}
|
|
158
|
-
|
|
159
|
-
Example if unable to implement:
|
|
160
|
-
${createJsonResponseInstruction({
|
|
161
|
-
summary: null,
|
|
162
|
-
bailReason: "The plan requires access to external services that are not available in the current environment."
|
|
163
|
-
})}
|
|
164
|
-
`;
|
|
165
|
-
}
|
|
166
|
-
var CODER_SYSTEM_PROMPT = getCoderSystemPrompt();
|
|
167
|
-
function getImplementPrompt(plan) {
|
|
168
|
-
return `## Your Plan
|
|
169
|
-
|
|
170
|
-
<plan>
|
|
171
|
-
${plan}
|
|
172
|
-
</plan>
|
|
173
|
-
`;
|
|
174
|
-
}
|
|
175
|
-
|
|
176
|
-
// src/workflows/prompts/commit.ts
|
|
177
|
-
var COMMIT_MESSAGE_SYSTEM_PROMPT = `Role: Expert git user.
|
|
178
|
-
Goal: Generate a concise and descriptive commit message in conventional commit format based on staged changes.
|
|
179
|
-
|
|
180
|
-
${TOOL_USAGE_INSTRUCTION}
|
|
181
|
-
|
|
182
|
-
You are an expert at writing git commit messages.
|
|
183
|
-
Based on the provided list of staged files in <file_status>, the diff in <diff> and optional user context in <tool_input_context>, generate a concise and descriptive commit message.
|
|
184
|
-
|
|
185
|
-
Follow the conventional commit format.
|
|
186
|
-
|
|
187
|
-
${createJsonResponseInstruction({
|
|
188
|
-
commitMessage: "feat: add new feature\\n\\ndescribe the new feature in more detail"
|
|
189
|
-
})}
|
|
190
|
-
`;
|
|
191
|
-
|
|
192
|
-
// src/workflows/prompts/fix.ts
|
|
193
|
-
var FIX_SYSTEM_PROMPT = `Role: Expert software developer.
|
|
194
|
-
Goal: Fix a failing command by analyzing the error and modifying the code.
|
|
195
|
-
|
|
196
|
-
You are an expert software developer. Your task is to fix a project that is failing a command. You have been provided with the failing command, its output (stdout and stderr), and the exit code. Your goal is to use the available tools to modify the files in the project to make the command pass. Analyze the error, inspect the relevant files, and apply the necessary code changes.
|
|
197
|
-
|
|
198
|
-
${MEMORY_USAGE_SECTION}
|
|
199
|
-
|
|
200
|
-
${TOOL_USAGE_INSTRUCTION}
|
|
201
|
-
|
|
202
|
-
After making changes, you MUST return a JSON object in a markdown block with either a summary of the changes OR a bailReason if you cannot complete the task.
|
|
203
|
-
|
|
204
|
-
DO NOT save this JSON object to a file. Output it directly in your response.
|
|
205
|
-
|
|
206
|
-
Example for successful fix:
|
|
207
|
-
${createJsonResponseInstruction({
|
|
208
|
-
summary: "Fixed the 'add' function in 'math.ts' to correctly handle negative numbers.",
|
|
209
|
-
bailReason: null
|
|
210
|
-
})}
|
|
211
|
-
|
|
212
|
-
Example if unable to fix:
|
|
213
|
-
${createJsonResponseInstruction({
|
|
214
|
-
summary: null,
|
|
215
|
-
bailReason: "Unable to identify the root cause of the error. The error message is ambiguous and requires human investigation."
|
|
216
|
-
})}
|
|
217
|
-
`;
|
|
218
|
-
function getFixUserPrompt(command, exitCode, stdout, stderr, task, prompt) {
|
|
219
|
-
const taskSection = task ? `
|
|
220
|
-
## Task
|
|
221
|
-
|
|
222
|
-
${task}
|
|
223
|
-
` : "";
|
|
224
|
-
const promptSection = prompt ? `
|
|
225
|
-
## User Prompt
|
|
226
|
-
|
|
227
|
-
${prompt}
|
|
228
|
-
` : "";
|
|
229
|
-
return `## Context${taskSection}${promptSection}
|
|
230
|
-
|
|
231
|
-
The following command failed with exit code ${exitCode}:
|
|
232
|
-
\`${command}\`
|
|
233
|
-
|
|
234
|
-
<stdout>
|
|
235
|
-
${stdout || "(empty)"}
|
|
236
|
-
</stdout>
|
|
237
|
-
|
|
238
|
-
<stderr>
|
|
239
|
-
${stderr || "(empty)"}
|
|
240
|
-
</stderr>
|
|
241
|
-
`;
|
|
242
|
-
}
|
|
243
|
-
|
|
244
|
-
// src/workflows/prompts/plan.ts
|
|
245
|
-
import { z } from "zod";
|
|
246
|
-
function getPlanPrompt(task, planContent) {
|
|
247
|
-
const planSection = planContent ? `
|
|
248
|
-
The content of an existing plan file:
|
|
249
|
-
<plan_file>
|
|
250
|
-
${planContent}
|
|
251
|
-
</plan_file>
|
|
252
|
-
` : "";
|
|
253
|
-
return `# Task Input
|
|
254
|
-
|
|
255
|
-
The user has provided a task:
|
|
256
|
-
<task>
|
|
257
|
-
${task}
|
|
258
|
-
</task>
|
|
259
|
-
${planSection}`;
|
|
260
|
-
}
|
|
261
|
-
function getPlannerSystemPrompt(loadRules) {
|
|
262
|
-
return `Role: Expert software architect and planner.
|
|
263
|
-
Goal: Analyze user requests and create detailed, actionable implementation plans for software development tasks.
|
|
264
|
-
|
|
265
|
-
You are an expert software architect and planner with deep experience in breaking down complex requirements into actionable implementation plans.
|
|
266
|
-
|
|
267
|
-
${MEMORY_USAGE_SECTION}
|
|
268
|
-
|
|
269
|
-
${TOOL_USAGE_INSTRUCTION}
|
|
270
|
-
|
|
271
|
-
${AGENTS_INSTRUCTION(loadRules)}
|
|
272
|
-
|
|
273
|
-
## Your Role
|
|
274
|
-
|
|
275
|
-
As a planner, your expertise lies in:
|
|
276
|
-
- Analyzing requirements to understand the core objective and technical implications
|
|
277
|
-
- Exploring codebases to identify patterns, conventions, and integration points
|
|
278
|
-
- Breaking down complex tasks into clear, logical sequences of steps
|
|
279
|
-
- Anticipating dependencies, edge cases, and potential challenges
|
|
280
|
-
- Creating plans that can be executed autonomously by an AI coding agent
|
|
281
|
-
- Providing technical specificity required for autonomous implementation
|
|
282
|
-
|
|
283
|
-
## Planning Philosophy
|
|
284
|
-
|
|
285
|
-
Effective planning requires understanding before action:
|
|
286
|
-
|
|
287
|
-
1. **Explore First, Plan Second**
|
|
288
|
-
- Never plan in a vacuum. Use available tools to understand the existing codebase
|
|
289
|
-
- Identify similar implementations, patterns, and conventions already in use
|
|
290
|
-
- Understand the project structure, naming conventions, and architectural patterns
|
|
291
|
-
- Look at tests to understand expected behavior and testing approaches
|
|
292
|
-
|
|
293
|
-
2. **Context is Critical**
|
|
294
|
-
- The best plans are informed by the actual state of the codebase
|
|
295
|
-
- File system exploration (\`listFiles\`, \`searchFiles\`) reveals structure and patterns
|
|
296
|
-
- Reading existing files (\`readFile\`) shows coding style and conventions
|
|
297
|
-
- Understanding context prevents suggesting solutions that don't fit the project
|
|
298
|
-
|
|
299
|
-
3. **Specificity Over Generality**
|
|
300
|
-
- Vague plans lead to implementation confusion and prevent autonomous execution
|
|
301
|
-
- Instead of "implement the feature," specify which files to modify, what functions to add, and what logic to implement
|
|
302
|
-
- Name specific components, modules, or files when possible
|
|
303
|
-
- Describe what needs to change and why
|
|
304
|
-
- Examples:
|
|
305
|
-
* \u274C Vague: "Implement the feature"
|
|
306
|
-
* \u2705 Specific: "Create \`src/components/LoginForm.tsx\` with a React component that includes email and password fields, using the existing \`useAuth\` hook from \`src/hooks/useAuth.ts\`"
|
|
307
|
-
* \u274C Vague: "Add error handling"
|
|
308
|
-
* \u2705 Specific: "In \`src/api/client.ts\`, wrap the fetch call in a try-catch block and throw custom errors using the \`ApiError\` class from \`src/errors.ts\`"
|
|
309
|
-
|
|
310
|
-
4. **Clarity for AI Coding Agents**
|
|
311
|
-
- Plans will be executed autonomously by an AI coding agent without human intervention
|
|
312
|
-
- Break complex tasks into smaller, logical units that can be completed independently
|
|
313
|
-
- Use clear structure (numbered lists, narrative text, or combined formats) to organize steps
|
|
314
|
-
- Include exact file paths, function names, and implementation patterns
|
|
315
|
-
|
|
316
|
-
## Planning for AI Implementation
|
|
317
|
-
|
|
318
|
-
Plans will be executed by an AI coding agent that operates autonomously with the following capabilities:
|
|
319
|
-
|
|
320
|
-
**Planning Requirements:**
|
|
321
|
-
Plans should include specific technical details to enable autonomous implementation:
|
|
322
|
-
- **Function/class names**: Name specific functions, classes, or components to implement
|
|
323
|
-
- **Implementation patterns**: Reference existing patterns or provide clear guidance on approach
|
|
324
|
-
- **Import statements**: Specify required dependencies and where to import them from
|
|
325
|
-
- **Technical constraints**: Note any architectural decisions, performance requirements, or compatibility concerns
|
|
326
|
-
|
|
327
|
-
**What Makes a Good AI-Actionable Plan:**
|
|
328
|
-
- Each step can be completed using the available tools
|
|
329
|
-
- File paths and code structures are explicitly named
|
|
330
|
-
- Dependencies between steps are clear
|
|
331
|
-
- Implementation approach follows existing codebase patterns
|
|
332
|
-
- Technical requirements are specific, not general
|
|
333
|
-
|
|
334
|
-
## Your Approach
|
|
335
|
-
|
|
336
|
-
When given a planning task:
|
|
337
|
-
|
|
338
|
-
1. **Understand the Goal**: Analyze the request thoroughly to grasp the primary objective and any constraints
|
|
339
|
-
2. **Gather Context**: Explore the codebase using available tools to understand existing patterns and structure
|
|
340
|
-
3. **Identify Patterns**: Look for similar implementations that can guide the approach
|
|
341
|
-
4. **Break Down the Work**: Decompose the solution into logical, sequential steps
|
|
342
|
-
5. **Be Specific**: Provide concrete details about files, functions, and implementations
|
|
343
|
-
6. **Seek Clarity**: If requirements are ambiguous or critical information is missing, ask for clarification
|
|
344
|
-
|
|
345
|
-
## Tool Usage Strategy
|
|
346
|
-
|
|
347
|
-
Use exploration tools strategically:
|
|
348
|
-
- \`listFiles\`: Understand project structure and locate relevant directories
|
|
349
|
-
- \`searchFiles\`: Find existing patterns, similar implementations, or specific code
|
|
350
|
-
- \`readFile\`: Examine existing code to understand style, patterns, and conventions
|
|
351
|
-
- \`fetchUrl\`: Access external documentation or resources when needed
|
|
352
|
-
- \`askFollowupQuestion\`: Request clarification when requirements are unclear or ambiguous
|
|
353
|
-
|
|
354
|
-
The goal is to create well-informed plans based on actual codebase understanding, not assumptions.
|
|
355
|
-
|
|
356
|
-
## Plan Format Guidelines
|
|
357
|
-
|
|
358
|
-
When generating your plan, follow these formatting guidelines:
|
|
359
|
-
|
|
360
|
-
1. Number major sections to provide clear structure:
|
|
361
|
-
a. Use numbers (1., 2., 3., etc.) for top-level sections
|
|
362
|
-
b. Use nested numbering (1.1, 1.2) or letters (a., b., c.) for sub-sections
|
|
363
|
-
c. This makes sections easy to reference and understand
|
|
364
|
-
d. Provides clear hierarchy and organization
|
|
365
|
-
|
|
366
|
-
Example section numbering:
|
|
367
|
-
1. Project Setup
|
|
368
|
-
1.1 Initialize repository
|
|
369
|
-
1.2 Configure dependencies
|
|
370
|
-
2. Implementation
|
|
371
|
-
2.1 Core features
|
|
372
|
-
2.2 Tests
|
|
373
|
-
|
|
374
|
-
2. Use numbered lists when the order of steps matters:
|
|
375
|
-
a. Sequential steps where one depends on the previous
|
|
376
|
-
b. Steps that must be performed in a specific order
|
|
377
|
-
c. Processes with clear progression
|
|
378
|
-
d. When steps need to be referenced by number
|
|
379
|
-
|
|
380
|
-
Example numbered list format:
|
|
381
|
-
1. First step that must be completed first
|
|
382
|
-
2. Second step that depends on the first
|
|
383
|
-
3. Third step that follows from the second
|
|
384
|
-
|
|
385
|
-
3. Use narrative or structured text format when the plan involves:
|
|
386
|
-
a. High-level strategies or conceptual approaches
|
|
387
|
-
b. Explanations or background information
|
|
388
|
-
c. Decision-making guidance
|
|
389
|
-
d. Context that doesn't translate well to discrete steps
|
|
390
|
-
|
|
391
|
-
4. Combine formats when appropriate:
|
|
392
|
-
a. Use numbered sections for overall structure
|
|
393
|
-
b. Use narrative text for context and explanation
|
|
394
|
-
c. Use numbered lists for sequential steps
|
|
395
|
-
|
|
396
|
-
Example combined format:
|
|
397
|
-
1. Phase 1: Setup
|
|
398
|
-
First, we need to configure the environment...
|
|
399
|
-
1. Install dependencies
|
|
400
|
-
2. Configure settings
|
|
401
|
-
3. Verify installation
|
|
402
|
-
|
|
403
|
-
2. Phase 2: Implementation
|
|
404
|
-
The implementation should focus on...
|
|
405
|
-
1. Implement feature A
|
|
406
|
-
2. Implement feature B
|
|
407
|
-
3. Write tests
|
|
408
|
-
|
|
409
|
-
5. Include implementation-ready details for AI agents:
|
|
410
|
-
a. Provide specific technical details the coding agent needs (file paths, function signatures, etc.)
|
|
411
|
-
b. Avoid steps that require human intervention or manual processes
|
|
412
|
-
c. Each step should be implementable using the AI agent's available tools
|
|
413
|
-
d. Reference existing code patterns and conventions from the codebase
|
|
414
|
-
|
|
415
|
-
**Note**: Plans should use flexible formats such as numbered lists or narrative text. Checklist formats (markdown checkboxes) are NOT required and should only be used when specifically appropriate for tracking independent action items.
|
|
416
|
-
|
|
417
|
-
## Decision Logic
|
|
418
|
-
|
|
419
|
-
1. Analyze the task and the existing plan (if any).
|
|
420
|
-
2. If the requirements are clear and you can generate or update the plan:
|
|
421
|
-
a. Provide the plan in the "plan" field
|
|
422
|
-
b. Apply appropriate formatting based on guidelines above
|
|
423
|
-
c. Include relevant file paths in the "files" array if applicable
|
|
424
|
-
3. If the requirements are not clear:
|
|
425
|
-
a. Ask a clarifying question in the "question" field
|
|
426
|
-
4. If the task is already implemented or no action is needed:
|
|
427
|
-
a. Do not generate a plan
|
|
428
|
-
b. Provide a concise reason in the "reason" field
|
|
429
|
-
|
|
430
|
-
## IMPORTANT NOTE
|
|
431
|
-
|
|
432
|
-
You MUST NOT attempt to make any modifications to the codebase. You DO NOT have access to any tools with write access.
|
|
433
|
-
|
|
434
|
-
## Response Format
|
|
435
|
-
|
|
436
|
-
${createJsonResponseInstruction({
|
|
437
|
-
plan: "The generated or updated plan.",
|
|
438
|
-
question: {
|
|
439
|
-
question: "The clarifying question to ask the user.",
|
|
440
|
-
defaultAnswer: "The default answer to provide if the user does not provide an answer."
|
|
441
|
-
},
|
|
442
|
-
reason: "If no plan is needed, provide a reason here.",
|
|
443
|
-
files: ["path/to/file1.ts", "path/to/file2.ts"]
|
|
444
|
-
})}
|
|
445
|
-
`;
|
|
446
|
-
}
|
|
447
|
-
var PLANNER_SYSTEM_PROMPT = getPlannerSystemPrompt();
|
|
448
|
-
var PlanSchema = z.object({
|
|
449
|
-
plan: z.string().nullish(),
|
|
450
|
-
question: z.object({
|
|
451
|
-
question: z.string(),
|
|
452
|
-
defaultAnswer: z.string().nullish()
|
|
453
|
-
}).nullish(),
|
|
454
|
-
reason: z.string().nullish(),
|
|
455
|
-
files: z.array(z.string()).nullish()
|
|
456
|
-
});
|
|
457
|
-
|
|
458
|
-
// src/workflows/prompts/review.ts
|
|
459
|
-
var CODE_REVIEW_SYSTEM_PROMPT = `Role: Senior software engineer.
|
|
460
|
-
Goal: Review code changes and provide comprehensive, actionable feedback on issues found.
|
|
461
|
-
|
|
462
|
-
CRITICAL: You have VERY LIMITED tools. ONLY use: gitDiff, readFile, readBinaryFile, searchFiles, listFiles.
|
|
463
|
-
DO NOT use executeCommand - it does NOT exist.
|
|
464
|
-
DO NOT inspect node_modules or any dependency directories.
|
|
465
|
-
|
|
466
|
-
${TOOL_USAGE_INSTRUCTION}
|
|
467
|
-
|
|
468
|
-
## Review Process
|
|
469
|
-
|
|
470
|
-
1. **Identify Reviewable Files**: Use the \`<file_status>\` list to determine which files have been modified.
|
|
471
|
-
2. **Select Files for Diff**: From the modified files, select only the reviewable source and configuration files.
|
|
472
|
-
- **Include**: Source code, config files, and template files.
|
|
473
|
-
- **Exclude**: Lockfiles, build artifacts, test snapshots, binary/media files, data and fixtures and other generated files.
|
|
474
|
-
3. **Inspect Changes**:
|
|
475
|
-
- The \`gitDiff\` tool is ALWAYS available for reviewing changes.
|
|
476
|
-
- When reviewing pull requests or commit ranges: Use \`gitDiff\` with the file parameter to see exact changes.
|
|
477
|
-
- When reviewing local changes: Use \`gitDiff\` with staged: true for staged changes, or without parameters for unstaged changes.
|
|
478
|
-
- When reviewing a specific commit: Use \`gitDiff\` with the file parameter to see what changed in that commit.
|
|
479
|
-
4. **Analyze and Review**: Analyze the code for issues. When using \`gitDiff\`, focus only on the modified lines (additions/deletions). Provide specific, actionable feedback with accurate line numbers.
|
|
480
|
-
|
|
481
|
-
## Critical Rules
|
|
482
|
-
|
|
483
|
-
- **Focus on Changes**: When using \`gitDiff\`, ONLY review the actual changes shown in the diff. Do not comment on existing, unmodified code.
|
|
484
|
-
- **Focus Scope**: Do not comment on overall project structure or architecture unless directly impacted by the changes in the diff.
|
|
485
|
-
- **No Feature Requests**: Do not comment on missing features or functionality that are not part of this diff.
|
|
486
|
-
- **One File at a Time**: Review files individually using \`gitDiff\` with the specific file path.
|
|
487
|
-
- **No Empty Diffs**: MUST NOT call \`gitDiff\` with an empty or omitted file parameter. Always specify a file path.
|
|
488
|
-
- **Accurate Line Numbers**: When using \`gitDiff\`, use the line numbers from the diff annotations (\`[Line N]\` for additions, \`[Line N removed]\` for deletions).
|
|
489
|
-
- **No Praise**: Provide only reviews for actual issues found. Do not include praise or positive feedback.
|
|
490
|
-
- **Clear Reasoning**: For each issue, provide clear reasoning explaining why it's a problem and what the impact could be.
|
|
491
|
-
- **Specific Advice**: Avoid generic advice. Provide concrete, actionable suggestions specific to the code being reviewed.
|
|
492
|
-
- **Assumptions**: Assume all changes have passed linter, type-checking, and unit tests. Do not check for compile errors.
|
|
493
|
-
- **NO Command Execution**: DO NOT attempt to use \`executeCommand\` or any shell commands. You only have access to: \`gitDiff\`, \`readFile\`, \`readBinaryFile\`, \`searchFiles\`, and \`listFiles\`.
|
|
494
|
-
- **No Dependency Inspection**: DO NOT inspect files in \`node_modules\`, \`vendor\`, or any dependency directories. Review only the project's own source code.
|
|
495
|
-
|
|
496
|
-
You may receive the following context:
|
|
497
|
-
- \`<pr_title>\` and \`<pr_description>\`: PR context
|
|
498
|
-
- \`<commit_messages>\`: Commits in the change
|
|
499
|
-
- \`<user_context>\`: Specific review focus from the user
|
|
500
|
-
- \`<file_status>\`: List of modified files with their status
|
|
501
|
-
- \`<review_instructions>\`: Specific instructions for this review
|
|
502
|
-
- \`<target_commit>\`: The specific commit being reviewed (when reviewing past commits)
|
|
503
|
-
|
|
504
|
-
## Output Format
|
|
505
|
-
|
|
506
|
-
${createJsonResponseInstruction({
|
|
507
|
-
overview: "Summary of issues found, 'No issues found', or 'No reviewable changes' if all files were excluded.",
|
|
508
|
-
specificReviews: [
|
|
509
|
-
{
|
|
510
|
-
file: "path/to/file.ts",
|
|
511
|
-
lines: "42 or 15-20",
|
|
512
|
-
review: "Specific issue description and actionable fix."
|
|
513
|
-
}
|
|
514
|
-
]
|
|
515
|
-
})}
|
|
516
|
-
|
|
517
|
-
### Examples
|
|
518
|
-
|
|
519
|
-
**Example 1: Issues found**
|
|
520
|
-
\`\`\`json
|
|
521
|
-
{
|
|
522
|
-
"overview": "Found 2 security and 1 logic issue in the authentication changes.",
|
|
523
|
-
"specificReviews": [
|
|
524
|
-
{
|
|
525
|
-
"file": "src/auth/login.ts",
|
|
526
|
-
"lines": "23",
|
|
527
|
-
"review": "Password is logged in plaintext. Remove the console.log statement or hash the password before logging."
|
|
528
|
-
},
|
|
529
|
-
{
|
|
530
|
-
"file": "src/auth/login.ts",
|
|
531
|
-
"lines": "45-48",
|
|
532
|
-
"review": "Missing input validation for email field. Add email format validation before processing the login request."
|
|
533
|
-
},
|
|
534
|
-
{
|
|
535
|
-
"file": "src/utils/token.ts",
|
|
536
|
-
"lines": "12",
|
|
537
|
-
"review": "Token expiration is set to 365 days which is too long for security. Reduce to 24 hours or use refresh tokens."
|
|
538
|
-
}
|
|
539
|
-
]
|
|
540
|
-
}
|
|
541
|
-
\`\`\`
|
|
542
|
-
|
|
543
|
-
**Example 2: No issues**
|
|
544
|
-
\`\`\`json
|
|
545
|
-
{
|
|
546
|
-
"overview": "No issues found.",
|
|
547
|
-
"specificReviews": []
|
|
548
|
-
}
|
|
549
|
-
\`\`\`
|
|
550
|
-
|
|
551
|
-
**Example 3: No reviewable changes**
|
|
552
|
-
\`\`\`json
|
|
553
|
-
{
|
|
554
|
-
"overview": "No reviewable changes. All modified files are lockfiles or generated artifacts.",
|
|
555
|
-
"specificReviews": []
|
|
556
|
-
}
|
|
557
|
-
\`\`\`
|
|
558
|
-
`;
|
|
559
|
-
function formatContext(tag, value) {
|
|
560
|
-
if (!value) {
|
|
561
|
-
return void 0;
|
|
562
|
-
}
|
|
563
|
-
return `<${tag}>
|
|
564
|
-
${value}
|
|
565
|
-
</${tag}>`;
|
|
566
|
-
}
|
|
567
|
-
function getReviewInstructions(params) {
|
|
568
|
-
if (params.targetCommit) {
|
|
569
|
-
return `Review the changes in commit '${params.targetCommit}'. Use the gitDiff tool with the file parameter to inspect what changed in each file. Focus your review on the actual changes shown in the diff.`;
|
|
570
|
-
}
|
|
571
|
-
if (params.commitRange) {
|
|
572
|
-
return `Review the pull request or commit range '${params.commitRange}'. Use the gitDiff tool with the file parameter to inspect the actual code changes.`;
|
|
573
|
-
}
|
|
574
|
-
if (params.staged) {
|
|
575
|
-
return "Review the staged changes. Use the gitDiff tool with the file parameter and staged: true to inspect the actual code changes.";
|
|
576
|
-
}
|
|
577
|
-
return "Review the unstaged changes. Use the gitDiff tool with the file parameter to inspect the actual code changes.";
|
|
578
|
-
}
|
|
579
|
-
function formatReviewToolInput(params) {
|
|
580
|
-
const fileList = params.changedFiles && params.changedFiles.length > 0 ? params.changedFiles.map((file) => {
|
|
581
|
-
let statString = "";
|
|
582
|
-
if (file.insertions !== void 0 || file.deletions !== void 0) {
|
|
583
|
-
const ins = file.insertions ?? 0;
|
|
584
|
-
const del = file.deletions ?? 0;
|
|
585
|
-
statString = ` (+${ins}/-${del})`;
|
|
586
|
-
}
|
|
587
|
-
return `${file.status}: ${file.path}${statString}`;
|
|
588
|
-
}).join("\n") : void 0;
|
|
589
|
-
const parts = [
|
|
590
|
-
formatContext("pr_title", params.pullRequestTitle),
|
|
591
|
-
formatContext("pr_description", params.pullRequestDescription),
|
|
592
|
-
formatContext("commit_messages", params.commitMessages),
|
|
593
|
-
formatContext("target_commit", params.targetCommit),
|
|
594
|
-
formatContext("user_context", params.context),
|
|
595
|
-
formatContext("file_status", fileList),
|
|
596
|
-
formatContext("review_instructions", getReviewInstructions(params))
|
|
597
|
-
];
|
|
598
|
-
return parts.filter(Boolean).join("\n");
|
|
599
|
-
}
|
|
600
|
-
|
|
601
|
-
// src/workflows/prompts/init.ts
|
|
602
|
-
var INIT_WORKFLOW_ANALYZE_SYSTEM_PROMPT = `
|
|
603
|
-
Role: Analyzer agent
|
|
604
|
-
Goal: Produce a valid polkacodes YAML configuration for the project.
|
|
605
|
-
|
|
606
|
-
${TOOL_USAGE_INSTRUCTION}
|
|
607
|
-
|
|
608
|
-
Workflow
|
|
609
|
-
1. Scan project files to identify the project's characteristics. Start using the "readFile" tool to understand the project's dependencies, scripts, and basic configuration.
|
|
610
|
-
- Package/build tool (npm, bun, pnpm, etc.)
|
|
611
|
-
- Test framework and patterns (snapshot tests, coverage, etc.)
|
|
612
|
-
- Formatter / linter and their rules
|
|
613
|
-
- Folder structure and naming conventions.
|
|
614
|
-
- CI / development workflows (e.g., GitHub Actions in .github/workflows).
|
|
615
|
-
|
|
616
|
-
2. Build a YAML config with three root keys:
|
|
617
|
-
|
|
618
|
-
\`\`\`yaml
|
|
619
|
-
scripts: # derive from package.json and CI workflows. Only include scripts that are relevant for development.
|
|
620
|
-
format: # code formatter
|
|
621
|
-
command: "<formatter cmd>"
|
|
622
|
-
description: "Format code"
|
|
623
|
-
check: # linter / type checker
|
|
624
|
-
command: "<linter cmd>"
|
|
625
|
-
description: "Static checks"
|
|
626
|
-
test: # test runner
|
|
627
|
-
command: "<test cmd>"
|
|
628
|
-
description: "Run tests"
|
|
629
|
-
# add any other meaningful project scripts like 'build', 'dev', etc.
|
|
630
|
-
|
|
631
|
-
rules: # A bullet list of key conventions, frameworks, and libraries used (e.g., "- React", "- TypeScript", "- Jest"). This helps other agents understand the project.
|
|
632
|
-
|
|
633
|
-
excludeFiles: # A list of glob patterns for files that should not be read. Only include files that might contain secrets.
|
|
634
|
-
- ".env"
|
|
635
|
-
- ".env.*"
|
|
636
|
-
- "*.pem"
|
|
637
|
-
- "*.key"
|
|
638
|
-
- ".npmrc"
|
|
639
|
-
# do NOT list build artifacts, lockfiles, or paths already in .gitignore
|
|
640
|
-
\`\`\`
|
|
641
|
-
|
|
642
|
-
3. Return a JSON object with the generated YAML configuration as a string in the 'yaml' property.
|
|
643
|
-
|
|
644
|
-
${createJsonResponseInstruction({
|
|
645
|
-
yaml: "<yaml_string>"
|
|
646
|
-
})}
|
|
647
|
-
`;
|
|
648
|
-
|
|
649
|
-
// src/workflows/prompts/meta.ts
|
|
650
|
-
var META_SYSTEM_PROMPT = `Role: Meta-agent.
|
|
651
|
-
Goal: Decide which workflow ('code' or 'task') to use for a given task.
|
|
652
|
-
|
|
653
|
-
You are a meta-agent that decides which workflow to use for a given task.
|
|
654
|
-
Based on the user's task, decide whether to use the 'code' or 'task' workflow.
|
|
655
|
-
|
|
656
|
-
- Use the 'code' workflow for tasks that are well-defined and can be implemented directly without a separate planning phase.
|
|
657
|
-
- Use the 'task' workflow for simple, single-action tasks like answering a question or running a command.
|
|
658
|
-
|
|
659
|
-
The user's task is provided in the <task> tag.
|
|
660
|
-
|
|
661
|
-
${createJsonResponseInstruction({
|
|
662
|
-
workflow: "<workflow_name>"
|
|
663
|
-
// 'code' or 'task'
|
|
664
|
-
})}
|
|
665
|
-
`;
|
|
666
|
-
|
|
667
|
-
// src/workflows/prompts/pr.ts
|
|
668
|
-
var GET_PR_DETAILS_SYSTEM_PROMPT = `Role: Expert developer.
|
|
669
|
-
Goal: Generate a pull request title and description based on the branch name, commits, and diff.
|
|
670
|
-
|
|
671
|
-
${TOOL_USAGE_INSTRUCTION}
|
|
672
|
-
|
|
673
|
-
You are an expert at creating pull requests.
|
|
674
|
-
Based on the provided branch name, commit messages, and diff, generate a title and description for the pull request.
|
|
675
|
-
|
|
676
|
-
${createJsonResponseInstruction({
|
|
677
|
-
title: "feat: add new feature",
|
|
678
|
-
description: "This pull request adds a new feature that does...\\n\\n### Changes\\n- ..."
|
|
679
|
-
})}
|
|
680
|
-
`;
|
|
681
|
-
|
|
682
|
-
// src/workflows/workflow.utils.ts
|
|
683
|
-
import { execSync } from "child_process";
|
|
684
|
-
import { promises as fs } from "fs";
|
|
685
|
-
import path from "path";
|
|
686
|
-
import { listFiles, resolveRules } from "@polka-codes/cli-shared";
|
|
687
|
-
import { z as z2 } from "zod";
|
|
688
|
-
|
|
689
|
-
// src/getModel.ts
|
|
690
|
-
import { appendFileSync } from "fs";
|
|
691
|
-
import { inspect } from "util";
|
|
692
|
-
import { createAnthropic } from "@ai-sdk/anthropic";
|
|
693
|
-
import { createDeepSeek } from "@ai-sdk/deepseek";
|
|
694
|
-
import { createGoogleGenerativeAI } from "@ai-sdk/google";
|
|
695
|
-
import { createVertex } from "@ai-sdk/google-vertex";
|
|
696
|
-
import { createOpenAI } from "@ai-sdk/openai";
|
|
697
|
-
import { createOpenAICompatible } from "@ai-sdk/openai-compatible";
|
|
698
|
-
import { createOpenRouter } from "@openrouter/ai-sdk-provider";
|
|
699
|
-
|
|
700
|
-
// src/env.ts
|
|
701
|
-
function getEnv(override) {
|
|
702
|
-
return {
|
|
703
|
-
POLKA_API_PROVIDER: process.env.POLKA_API_PROVIDER,
|
|
704
|
-
POLKA_MODEL: process.env.POLKA_MODEL,
|
|
705
|
-
POLKA_API_KEY: process.env.POLKA_API_KEY,
|
|
706
|
-
POLKA_BUDGET: process.env.POLKA_BUDGET,
|
|
707
|
-
ANTHROPIC_API_KEY: process.env.ANTHROPIC_API_KEY,
|
|
708
|
-
DEEPSEEK_API_KEY: process.env.DEEPSEEK_API_KEY,
|
|
709
|
-
OPENROUTER_API_KEY: process.env.OPENROUTER_API_KEY,
|
|
710
|
-
OPENAI_API_KEY: process.env.OPENAI_API_KEY,
|
|
711
|
-
GOOGLE_API_KEY: process.env.GOOGLE_API_KEY,
|
|
712
|
-
TRACING_FILE: process.env.TRACING_FILE,
|
|
713
|
-
...override
|
|
714
|
-
};
|
|
715
|
-
}
|
|
716
|
-
|
|
717
|
-
// src/getModel.ts
|
|
718
|
-
function headersToObject(headers) {
|
|
719
|
-
if (!headers) {
|
|
720
|
-
return void 0;
|
|
721
|
-
}
|
|
722
|
-
if (headers instanceof Headers) {
|
|
723
|
-
return Object.fromEntries(headers.entries());
|
|
724
|
-
}
|
|
725
|
-
if (Array.isArray(headers)) {
|
|
726
|
-
return Object.fromEntries(headers);
|
|
727
|
-
}
|
|
728
|
-
return headers;
|
|
729
|
-
}
|
|
730
|
-
function redactHeaders(headers) {
|
|
731
|
-
if (!headers) {
|
|
732
|
-
return void 0;
|
|
733
|
-
}
|
|
734
|
-
const redactedHeaders = {};
|
|
735
|
-
const sensitiveKeywords = ["authorization", "cookie", "key", "token"];
|
|
736
|
-
for (const [key, value] of Object.entries(headers)) {
|
|
737
|
-
if (sensitiveKeywords.some((keyword) => key.toLowerCase().includes(keyword))) {
|
|
738
|
-
redactedHeaders[key] = "REDACTED";
|
|
739
|
-
} else {
|
|
740
|
-
redactedHeaders[key] = value;
|
|
741
|
-
}
|
|
742
|
-
}
|
|
743
|
-
return redactedHeaders;
|
|
744
|
-
}
|
|
745
|
-
var AiProvider = /* @__PURE__ */ ((AiProvider2) => {
|
|
746
|
-
AiProvider2["Anthropic"] = "anthropic";
|
|
747
|
-
AiProvider2["DeepSeek"] = "deepseek";
|
|
748
|
-
AiProvider2["OpenRouter"] = "openrouter";
|
|
749
|
-
AiProvider2["OpenAI"] = "openai";
|
|
750
|
-
AiProvider2["OpenAICompatible"] = "openai-compatible";
|
|
751
|
-
AiProvider2["GoogleVertex"] = "google-vertex";
|
|
752
|
-
AiProvider2["Google"] = "google";
|
|
753
|
-
return AiProvider2;
|
|
754
|
-
})(AiProvider || {});
|
|
755
|
-
var getModel = (config, debugLogging = false) => {
|
|
756
|
-
const { TRACING_FILE } = getEnv();
|
|
757
|
-
const fetchOverride = debugLogging || TRACING_FILE ? (async (url, options) => {
|
|
758
|
-
const requestBody = options?.body ? JSON.parse(options.body) : void 0;
|
|
759
|
-
if (debugLogging) {
|
|
760
|
-
console.error("-> Request URL:", url);
|
|
761
|
-
console.error("-> Request Headers:", options?.headers);
|
|
762
|
-
console.error("-> Request Body:");
|
|
763
|
-
console.error(inspect(requestBody, { depth: null, colors: process.stderr.isTTY }));
|
|
764
|
-
}
|
|
765
|
-
if (TRACING_FILE) {
|
|
766
|
-
appendFileSync(
|
|
767
|
-
TRACING_FILE,
|
|
768
|
-
`${JSON.stringify(
|
|
769
|
-
{
|
|
770
|
-
type: "request",
|
|
771
|
-
timestamp: (/* @__PURE__ */ new Date()).toISOString(),
|
|
772
|
-
url,
|
|
773
|
-
headers: redactHeaders(headersToObject(options?.headers)),
|
|
774
|
-
body: requestBody
|
|
775
|
-
},
|
|
776
|
-
null,
|
|
777
|
-
2
|
|
778
|
-
)}
|
|
779
|
-
`
|
|
780
|
-
);
|
|
781
|
-
}
|
|
782
|
-
const res = await fetch(url, options);
|
|
783
|
-
if (debugLogging) {
|
|
784
|
-
console.error("<- Response Status:", res.status);
|
|
785
|
-
}
|
|
786
|
-
const contentType = res.headers.get("content-type") || "";
|
|
787
|
-
if (contentType.includes("text/event-stream") && res.body) {
|
|
788
|
-
const [branch, clientStream] = res.body.tee();
|
|
789
|
-
(async () => {
|
|
790
|
-
const reader = branch.getReader();
|
|
791
|
-
const decoder = new TextDecoder();
|
|
792
|
-
try {
|
|
793
|
-
let done = false;
|
|
794
|
-
while (!done) {
|
|
795
|
-
const { value, done: d } = await reader.read();
|
|
796
|
-
done = d;
|
|
797
|
-
if (value) {
|
|
798
|
-
const text = decoder.decode(value);
|
|
799
|
-
if (debugLogging) {
|
|
800
|
-
console.error("<- Stream chunk:", text.replace(/\n/g, "\\n"));
|
|
801
|
-
}
|
|
802
|
-
if (TRACING_FILE) {
|
|
803
|
-
for (const line of text.split("\n")) {
|
|
804
|
-
if (line.startsWith("data:")) {
|
|
805
|
-
const content = line.slice("data:".length).trim();
|
|
806
|
-
if (content) {
|
|
807
|
-
try {
|
|
808
|
-
const json = JSON.parse(content);
|
|
809
|
-
appendFileSync(
|
|
810
|
-
TRACING_FILE,
|
|
811
|
-
`${JSON.stringify(
|
|
812
|
-
{
|
|
813
|
-
type: "response-chunk",
|
|
814
|
-
timestamp: (/* @__PURE__ */ new Date()).toISOString(),
|
|
815
|
-
chunk: json
|
|
816
|
-
},
|
|
817
|
-
null,
|
|
818
|
-
2
|
|
819
|
-
)}
|
|
820
|
-
`
|
|
821
|
-
);
|
|
822
|
-
} catch (_e) {
|
|
823
|
-
appendFileSync(
|
|
824
|
-
TRACING_FILE,
|
|
825
|
-
`${JSON.stringify(
|
|
826
|
-
{
|
|
827
|
-
type: "response-chunk",
|
|
828
|
-
timestamp: (/* @__PURE__ */ new Date()).toISOString(),
|
|
829
|
-
chunk: content
|
|
830
|
-
},
|
|
831
|
-
null,
|
|
832
|
-
2
|
|
833
|
-
)}
|
|
834
|
-
`
|
|
835
|
-
);
|
|
836
|
-
}
|
|
837
|
-
}
|
|
838
|
-
}
|
|
839
|
-
}
|
|
840
|
-
}
|
|
841
|
-
}
|
|
842
|
-
}
|
|
843
|
-
} finally {
|
|
844
|
-
reader.releaseLock();
|
|
845
|
-
}
|
|
846
|
-
})().catch((error) => {
|
|
847
|
-
if (debugLogging) {
|
|
848
|
-
console.error("Stream reading error:", error);
|
|
849
|
-
}
|
|
850
|
-
});
|
|
851
|
-
return new Response(clientStream, {
|
|
852
|
-
headers: res.headers,
|
|
853
|
-
status: res.status
|
|
854
|
-
});
|
|
855
|
-
}
|
|
856
|
-
const full = await res.text();
|
|
857
|
-
let responseBody;
|
|
858
|
-
try {
|
|
859
|
-
responseBody = JSON.parse(full);
|
|
860
|
-
} catch (_error) {
|
|
861
|
-
responseBody = full;
|
|
862
|
-
}
|
|
863
|
-
if (debugLogging) {
|
|
864
|
-
console.error("<- Response Body:");
|
|
865
|
-
console.error(inspect(responseBody, { depth: null, colors: process.stderr.isTTY }));
|
|
866
|
-
}
|
|
867
|
-
if (TRACING_FILE) {
|
|
868
|
-
appendFileSync(
|
|
869
|
-
TRACING_FILE,
|
|
870
|
-
`${JSON.stringify(
|
|
871
|
-
{
|
|
872
|
-
type: "response",
|
|
873
|
-
timestamp: (/* @__PURE__ */ new Date()).toISOString(),
|
|
874
|
-
status: res.status,
|
|
875
|
-
headers: redactHeaders(Object.fromEntries(res.headers.entries())),
|
|
876
|
-
body: responseBody
|
|
877
|
-
},
|
|
878
|
-
null,
|
|
879
|
-
2
|
|
880
|
-
)}
|
|
881
|
-
`
|
|
882
|
-
);
|
|
883
|
-
}
|
|
884
|
-
return new Response(full, {
|
|
885
|
-
headers: res.headers,
|
|
886
|
-
status: res.status
|
|
887
|
-
});
|
|
888
|
-
}) : void 0;
|
|
889
|
-
switch (config.provider) {
|
|
890
|
-
case "anthropic" /* Anthropic */: {
|
|
891
|
-
const anthropic = createAnthropic({
|
|
892
|
-
apiKey: config.apiKey,
|
|
893
|
-
baseURL: config.baseUrl,
|
|
894
|
-
fetch: fetchOverride
|
|
895
|
-
});
|
|
896
|
-
return anthropic(config.model);
|
|
897
|
-
}
|
|
898
|
-
case "deepseek" /* DeepSeek */: {
|
|
899
|
-
const deepseek = createDeepSeek({
|
|
900
|
-
apiKey: config.apiKey,
|
|
901
|
-
baseURL: config.baseUrl,
|
|
902
|
-
fetch: fetchOverride
|
|
903
|
-
});
|
|
904
|
-
return deepseek(config.model);
|
|
905
|
-
}
|
|
906
|
-
case "openrouter" /* OpenRouter */: {
|
|
907
|
-
const openrouter = createOpenRouter({
|
|
908
|
-
apiKey: config.apiKey,
|
|
909
|
-
baseURL: config.baseUrl,
|
|
910
|
-
fetch: fetchOverride,
|
|
911
|
-
headers: {
|
|
912
|
-
"HTTP-Referer": "https://polka.codes",
|
|
913
|
-
"X-Title": "Polka Codes"
|
|
914
|
-
}
|
|
915
|
-
});
|
|
916
|
-
return openrouter.chat(config.model, {
|
|
917
|
-
usage: { include: true }
|
|
918
|
-
});
|
|
919
|
-
}
|
|
920
|
-
case "openai" /* OpenAI */: {
|
|
921
|
-
const openai = createOpenAI({
|
|
922
|
-
apiKey: config.apiKey,
|
|
923
|
-
baseURL: config.baseUrl,
|
|
924
|
-
fetch: fetchOverride
|
|
925
|
-
});
|
|
926
|
-
return openai(config.model);
|
|
927
|
-
}
|
|
928
|
-
case "openai-compatible" /* OpenAICompatible */: {
|
|
929
|
-
if (!config.baseUrl) {
|
|
930
|
-
throw new Error("OpenAI-compatible providers require a baseUrl");
|
|
931
|
-
}
|
|
932
|
-
const openaiCompatible = createOpenAICompatible({
|
|
933
|
-
apiKey: config.apiKey,
|
|
934
|
-
baseURL: config.baseUrl,
|
|
935
|
-
name: config.name || "OpenAI Compatible",
|
|
936
|
-
fetch: fetchOverride
|
|
937
|
-
});
|
|
938
|
-
return openaiCompatible(config.model);
|
|
939
|
-
}
|
|
940
|
-
case "google-vertex" /* GoogleVertex */: {
|
|
941
|
-
const vertex = createVertex({
|
|
942
|
-
fetch: fetchOverride,
|
|
943
|
-
location: config.location,
|
|
944
|
-
project: config.project,
|
|
945
|
-
googleAuthOptions: {
|
|
946
|
-
keyFile: config.keyFile
|
|
947
|
-
}
|
|
948
|
-
});
|
|
949
|
-
return vertex(config.model);
|
|
950
|
-
}
|
|
951
|
-
case "google" /* Google */: {
|
|
952
|
-
const google = createGoogleGenerativeAI({
|
|
953
|
-
fetch: fetchOverride,
|
|
954
|
-
apiKey: config.apiKey
|
|
955
|
-
});
|
|
956
|
-
return google(config.model);
|
|
957
|
-
}
|
|
958
|
-
}
|
|
959
|
-
};
|
|
960
|
-
|
|
961
|
-
// src/ApiProviderConfig.ts
|
|
962
|
-
var defaultModels = {
|
|
963
|
-
["anthropic" /* Anthropic */]: "claude-sonnet-4-20250514",
|
|
964
|
-
["deepseek" /* DeepSeek */]: "deepseek-chat",
|
|
965
|
-
["openrouter" /* OpenRouter */]: "google/gemini-2.5-pro",
|
|
966
|
-
["openai" /* OpenAI */]: "gpt-5-2025-08-07",
|
|
967
|
-
["openai-compatible" /* OpenAICompatible */]: "gpt-4o",
|
|
968
|
-
["google-vertex" /* GoogleVertex */]: "gemini-2.5-pro",
|
|
969
|
-
["google" /* Google */]: "gemini-2.5-pro"
|
|
970
|
-
};
|
|
971
|
-
var ApiProviderConfig = class {
|
|
972
|
-
defaultProvider;
|
|
973
|
-
providers;
|
|
974
|
-
commands;
|
|
975
|
-
defaultParameters;
|
|
976
|
-
constructor(config) {
|
|
977
|
-
this.defaultProvider = config.defaultProvider;
|
|
978
|
-
this.defaultParameters = config.defaultParameters ?? {};
|
|
979
|
-
this.providers = config.providers ?? {};
|
|
980
|
-
this.commands = config.commands;
|
|
981
|
-
}
|
|
982
|
-
getConfigForCommand(command) {
|
|
983
|
-
const commandConfig = this.commands?.[command];
|
|
984
|
-
const defaultConfig = this.commands?.default;
|
|
985
|
-
const mergedConfig = { ...defaultConfig, ...commandConfig };
|
|
986
|
-
return this.resolveModelConfig(mergedConfig);
|
|
987
|
-
}
|
|
988
|
-
resolveModelConfig(config) {
|
|
989
|
-
const { provider, model, parameters, budget, rules } = config;
|
|
990
|
-
const finalProvider = provider ?? this.defaultProvider;
|
|
991
|
-
if (!finalProvider) {
|
|
992
|
-
return void 0;
|
|
993
|
-
}
|
|
994
|
-
const { apiKey, defaultModel, defaultParameters, location, project, keyFile, baseUrl, name } = this.providers[finalProvider] ?? {};
|
|
995
|
-
const finalModel = model ?? defaultModel ?? defaultModels[finalProvider];
|
|
996
|
-
const finalParameters = {
|
|
997
|
-
...this.defaultParameters,
|
|
998
|
-
...defaultParameters ?? {},
|
|
999
|
-
...parameters ?? {}
|
|
1000
|
-
};
|
|
1001
|
-
return {
|
|
1002
|
-
provider: finalProvider,
|
|
1003
|
-
model: finalModel,
|
|
1004
|
-
apiKey,
|
|
1005
|
-
location,
|
|
1006
|
-
project,
|
|
1007
|
-
keyFile,
|
|
1008
|
-
baseUrl,
|
|
1009
|
-
name,
|
|
1010
|
-
parameters: finalParameters,
|
|
1011
|
-
budget,
|
|
1012
|
-
rules
|
|
1013
|
-
};
|
|
1014
|
-
}
|
|
1015
|
-
};
|
|
1016
|
-
|
|
1017
|
-
// src/workflows/workflow.utils.ts
|
|
1018
|
-
function parseGitDiffNameStatus(diffOutput) {
|
|
1019
|
-
const lines = diffOutput.split("\n").filter((line) => line.trim());
|
|
1020
|
-
return lines.map((line) => {
|
|
1021
|
-
const [status, ...pathParts] = line.split(" ");
|
|
1022
|
-
const path2 = pathParts.join(" ");
|
|
1023
|
-
let statusDescription;
|
|
1024
|
-
switch (status[0]) {
|
|
1025
|
-
case "A":
|
|
1026
|
-
statusDescription = "Added";
|
|
1027
|
-
break;
|
|
1028
|
-
case "M":
|
|
1029
|
-
statusDescription = "Modified";
|
|
1030
|
-
break;
|
|
1031
|
-
case "D":
|
|
1032
|
-
statusDescription = "Deleted";
|
|
1033
|
-
break;
|
|
1034
|
-
case "R":
|
|
1035
|
-
statusDescription = "Renamed";
|
|
1036
|
-
break;
|
|
1037
|
-
case "C":
|
|
1038
|
-
statusDescription = "Copied";
|
|
1039
|
-
break;
|
|
1040
|
-
case "T":
|
|
1041
|
-
statusDescription = "Type changed";
|
|
1042
|
-
break;
|
|
1043
|
-
default:
|
|
1044
|
-
statusDescription = "Unknown";
|
|
1045
|
-
}
|
|
1046
|
-
return { path: path2, status: statusDescription };
|
|
1047
|
-
});
|
|
1048
|
-
}
|
|
1049
|
-
function printChangedFiles(logger, changedFiles) {
|
|
1050
|
-
if (changedFiles.length === 0) {
|
|
1051
|
-
return;
|
|
1052
|
-
}
|
|
1053
|
-
logger.info("Changed Files:");
|
|
1054
|
-
for (const file of changedFiles) {
|
|
1055
|
-
let statString = "";
|
|
1056
|
-
if (file.insertions !== void 0 || file.deletions !== void 0) {
|
|
1057
|
-
const ins = file.insertions ?? 0;
|
|
1058
|
-
const del = file.deletions ?? 0;
|
|
1059
|
-
statString = ` (+${ins}/-${del})`;
|
|
1060
|
-
}
|
|
1061
|
-
logger.info(`- ${file.status}: ${file.path}${statString}`);
|
|
1062
|
-
}
|
|
1063
|
-
}
|
|
1064
|
-
function parseGitDiffNumStat(output) {
|
|
1065
|
-
const stats = {};
|
|
1066
|
-
const lines = output.split("\n").filter((line) => line.trim());
|
|
1067
|
-
for (const line of lines) {
|
|
1068
|
-
const parts = line.split(" ");
|
|
1069
|
-
if (parts.length >= 3) {
|
|
1070
|
-
const insertions = parts[0] === "-" ? 0 : Number.parseInt(parts[0], 10);
|
|
1071
|
-
const deletions = parts[1] === "-" ? 0 : Number.parseInt(parts[1], 10);
|
|
1072
|
-
const path2 = unquotePath(parts.slice(2).join(" "));
|
|
1073
|
-
stats[path2] = { insertions, deletions };
|
|
1074
|
-
}
|
|
1075
|
-
}
|
|
1076
|
-
return stats;
|
|
1077
|
-
}
|
|
1078
|
-
var unquotePath = (path2) => {
|
|
1079
|
-
if (path2.startsWith('"') && path2.endsWith('"')) {
|
|
1080
|
-
try {
|
|
1081
|
-
return JSON.parse(path2);
|
|
1082
|
-
} catch {
|
|
1083
|
-
return path2;
|
|
1084
|
-
}
|
|
1085
|
-
}
|
|
1086
|
-
return path2;
|
|
1087
|
-
};
|
|
1088
|
-
function parseGitStatus(statusOutput) {
|
|
1089
|
-
const statusLines = statusOutput.split("\n").filter((line) => line);
|
|
1090
|
-
const files = [];
|
|
1091
|
-
for (const line of statusLines) {
|
|
1092
|
-
const indexStatus = line[0];
|
|
1093
|
-
const workingTreeStatus = line[1];
|
|
1094
|
-
const path2 = line.length > 3 ? unquotePath(line.slice(3)) : line;
|
|
1095
|
-
const statuses = [];
|
|
1096
|
-
if (indexStatus !== " " && indexStatus !== "?") {
|
|
1097
|
-
switch (indexStatus) {
|
|
1098
|
-
case "A":
|
|
1099
|
-
statuses.push("Added (staged)");
|
|
1100
|
-
break;
|
|
1101
|
-
case "M":
|
|
1102
|
-
statuses.push("Modified (staged)");
|
|
1103
|
-
break;
|
|
1104
|
-
case "D":
|
|
1105
|
-
statuses.push("Deleted (staged)");
|
|
1106
|
-
break;
|
|
1107
|
-
case "R":
|
|
1108
|
-
statuses.push("Renamed (staged)");
|
|
1109
|
-
break;
|
|
1110
|
-
case "C":
|
|
1111
|
-
statuses.push("Copied (staged)");
|
|
1112
|
-
break;
|
|
1113
|
-
default:
|
|
1114
|
-
statuses.push("Changed (staged)");
|
|
1115
|
-
}
|
|
1116
|
-
}
|
|
1117
|
-
if (workingTreeStatus !== " ") {
|
|
1118
|
-
switch (workingTreeStatus) {
|
|
1119
|
-
case "M":
|
|
1120
|
-
statuses.push("Modified (unstaged)");
|
|
1121
|
-
break;
|
|
1122
|
-
case "D":
|
|
1123
|
-
statuses.push("Deleted (unstaged)");
|
|
1124
|
-
break;
|
|
1125
|
-
case "?":
|
|
1126
|
-
statuses.push("Untracked");
|
|
1127
|
-
break;
|
|
1128
|
-
default:
|
|
1129
|
-
statuses.push("Changed (unstaged)");
|
|
1130
|
-
}
|
|
1131
|
-
}
|
|
1132
|
-
if (statuses.length > 0) {
|
|
1133
|
-
files.push({ path: path2, status: statuses.join(", ") });
|
|
1134
|
-
}
|
|
1135
|
-
}
|
|
1136
|
-
return files;
|
|
1137
|
-
}
|
|
1138
|
-
function getLocalChanges() {
|
|
1139
|
-
const statusOutput = execSync("git status --porcelain=v1", {
|
|
1140
|
-
encoding: "utf-8"
|
|
1141
|
-
});
|
|
1142
|
-
const allFiles = parseGitStatus(statusOutput);
|
|
1143
|
-
let stagedStats = {};
|
|
1144
|
-
try {
|
|
1145
|
-
const stagedDiffOutput = execSync("git diff --staged --numstat --no-color", { encoding: "utf-8" });
|
|
1146
|
-
stagedStats = parseGitDiffNumStat(stagedDiffOutput);
|
|
1147
|
-
} catch {
|
|
1148
|
-
}
|
|
1149
|
-
let unstagedStats = {};
|
|
1150
|
-
try {
|
|
1151
|
-
const unstagedDiffOutput = execSync("git diff --numstat --no-color", { encoding: "utf-8" });
|
|
1152
|
-
unstagedStats = parseGitDiffNumStat(unstagedDiffOutput);
|
|
1153
|
-
} catch {
|
|
1154
|
-
}
|
|
1155
|
-
const stagedFiles = [];
|
|
1156
|
-
const unstagedFiles = [];
|
|
1157
|
-
for (const file of allFiles) {
|
|
1158
|
-
let totalInsertions = 0;
|
|
1159
|
-
let totalDeletions = 0;
|
|
1160
|
-
if (file.status.includes("(staged)")) {
|
|
1161
|
-
const stats = stagedStats[file.path];
|
|
1162
|
-
const stagedFile = { ...file };
|
|
1163
|
-
if (stats) {
|
|
1164
|
-
stagedFile.insertions = stats.insertions;
|
|
1165
|
-
stagedFile.deletions = stats.deletions;
|
|
1166
|
-
totalInsertions += stats.insertions;
|
|
1167
|
-
totalDeletions += stats.deletions;
|
|
1168
|
-
}
|
|
1169
|
-
stagedFiles.push(stagedFile);
|
|
1170
|
-
}
|
|
1171
|
-
if (file.status.includes("(unstaged)")) {
|
|
1172
|
-
const stats = unstagedStats[file.path];
|
|
1173
|
-
const unstagedFile = { ...file };
|
|
1174
|
-
if (stats) {
|
|
1175
|
-
unstagedFile.insertions = stats.insertions;
|
|
1176
|
-
unstagedFile.deletions = stats.deletions;
|
|
1177
|
-
totalInsertions += stats.insertions;
|
|
1178
|
-
totalDeletions += stats.deletions;
|
|
1179
|
-
}
|
|
1180
|
-
unstagedFiles.push(unstagedFile);
|
|
1181
|
-
} else if (file.status.includes("Untracked")) {
|
|
1182
|
-
unstagedFiles.push(file);
|
|
1183
|
-
}
|
|
1184
|
-
if (totalInsertions > 0 || totalDeletions > 0) {
|
|
1185
|
-
file.insertions = totalInsertions;
|
|
1186
|
-
file.deletions = totalDeletions;
|
|
1187
|
-
}
|
|
1188
|
-
}
|
|
1189
|
-
return { stagedFiles, unstagedFiles, allFiles };
|
|
1190
|
-
}
|
|
1191
|
-
var specificReviewSchema = z2.object({
|
|
1192
|
-
file: z2.string(),
|
|
1193
|
-
lines: z2.string(),
|
|
1194
|
-
review: z2.string()
|
|
1195
|
-
});
|
|
1196
|
-
var reviewOutputSchema = z2.object({
|
|
1197
|
-
overview: z2.string(),
|
|
1198
|
-
specificReviews: z2.array(specificReviewSchema).optional()
|
|
1199
|
-
});
|
|
1200
|
-
function formatReviewForConsole(output) {
|
|
1201
|
-
let formatted = `### Overview
|
|
1202
|
-
|
|
1203
|
-
${output.overview}`;
|
|
1204
|
-
if (output.specificReviews && output.specificReviews.length > 0) {
|
|
1205
|
-
formatted += "\n\n### File-specific feedback\n";
|
|
1206
|
-
for (const item of output.specificReviews) {
|
|
1207
|
-
formatted += `
|
|
1208
|
-
- ${item.file}#${item.lines}
|
|
1209
|
-
|
|
1210
|
-
${item.review}
|
|
1211
|
-
`;
|
|
1212
|
-
}
|
|
1213
|
-
}
|
|
1214
|
-
return formatted;
|
|
1215
|
-
}
|
|
1216
|
-
var checkGhInstalled = async (executeCommand) => {
|
|
1217
|
-
const result = await executeCommand({ command: "gh", args: ["--version"] });
|
|
1218
|
-
if (result.exitCode !== 0) {
|
|
1219
|
-
throw new Error("GitHub CLI (gh) is not installed. Please install it from https://cli.github.com/");
|
|
1220
|
-
}
|
|
1221
|
-
};
|
|
1222
|
-
var getDefaultBranch = async (executeCommand) => {
|
|
1223
|
-
const branchResult = await executeCommand({
|
|
1224
|
-
command: "gh",
|
|
1225
|
-
args: ["repo", "view", "--json", "defaultBranchRef", "--jq", ".defaultBranchRef.name"]
|
|
1226
|
-
});
|
|
1227
|
-
if (branchResult.exitCode === 0) {
|
|
1228
|
-
const branch = branchResult.stdout.trim();
|
|
1229
|
-
if (branch) {
|
|
1230
|
-
return branch;
|
|
1231
|
-
}
|
|
1232
|
-
}
|
|
1233
|
-
const defaultBranches = ["master", "main", "develop"];
|
|
1234
|
-
for (const branch of defaultBranches) {
|
|
1235
|
-
const result = await executeCommand({
|
|
1236
|
-
command: "git",
|
|
1237
|
-
args: ["show-ref", "--verify", "--quiet", `refs/heads/${branch}`]
|
|
1238
|
-
});
|
|
1239
|
-
if (result.exitCode === 0) {
|
|
1240
|
-
return branch;
|
|
1241
|
-
}
|
|
1242
|
-
}
|
|
1243
|
-
const remoteResult = await executeCommand({ command: "git", args: ["remote", "show", "origin"] });
|
|
1244
|
-
if (remoteResult.exitCode === 0) {
|
|
1245
|
-
const match = remoteResult.stdout.match(/HEAD branch: (.*)/);
|
|
1246
|
-
if (match?.[1]) {
|
|
1247
|
-
return match[1];
|
|
1248
|
-
}
|
|
1249
|
-
}
|
|
1250
|
-
return void 0;
|
|
1251
|
-
};
|
|
1252
|
-
async function getDefaultContext(config, commandName) {
|
|
1253
|
-
const cwd = process.cwd();
|
|
1254
|
-
const [files, truncated] = await listFiles(cwd, true, 2e3, cwd, config?.excludeFiles ?? []);
|
|
1255
|
-
const fileList = files.join("\n");
|
|
1256
|
-
const now = /* @__PURE__ */ new Date();
|
|
1257
|
-
const formattedDate = `${now.getUTCFullYear()}-${String(now.getUTCMonth() + 1).padStart(2, "0")}-${String(now.getUTCDate()).padStart(2, "0")}`;
|
|
1258
|
-
const contextParts = [
|
|
1259
|
-
`<file_list truncated="${truncated}">
|
|
1260
|
-
${fileList}
|
|
1261
|
-
</file_list>`,
|
|
1262
|
-
`<now_date>${formattedDate}</now_date>`
|
|
1263
|
-
];
|
|
1264
|
-
const loadRules = config?.loadRules;
|
|
1265
|
-
const defaultLoadRules = {
|
|
1266
|
-
"AGENTS.md": true,
|
|
1267
|
-
"CLAUDE.md": true
|
|
1268
|
-
};
|
|
1269
|
-
const mergedRules = { ...defaultLoadRules, ...loadRules };
|
|
1270
|
-
for (const [fileName, shouldLoad] of Object.entries(mergedRules)) {
|
|
1271
|
-
if (shouldLoad) {
|
|
1272
|
-
try {
|
|
1273
|
-
const content = await fs.readFile(path.join(cwd, fileName), "utf-8");
|
|
1274
|
-
const tagName = fileName.replace(/\./g, "_");
|
|
1275
|
-
contextParts.push(`<${tagName}_instructions>
|
|
1276
|
-
${content}
|
|
1277
|
-
</${tagName}_instructions>`);
|
|
1278
|
-
} catch {
|
|
1279
|
-
}
|
|
1280
|
-
}
|
|
1281
|
-
}
|
|
1282
|
-
let rules = await resolveRules(config?.rules);
|
|
1283
|
-
if (commandName && config) {
|
|
1284
|
-
const apiConfig = new ApiProviderConfig(config);
|
|
1285
|
-
const commandConfig = apiConfig.getConfigForCommand(commandName);
|
|
1286
|
-
if (commandConfig?.rules) {
|
|
1287
|
-
const commandRules = await resolveRules(commandConfig.rules);
|
|
1288
|
-
if (commandRules) {
|
|
1289
|
-
rules = rules ? `${rules}
|
|
1290
|
-
|
|
1291
|
-
${commandRules}` : commandRules;
|
|
1292
|
-
}
|
|
1293
|
-
}
|
|
1294
|
-
}
|
|
1295
|
-
if (rules) {
|
|
1296
|
-
contextParts.push(`<rules>
|
|
1297
|
-
${rules}
|
|
1298
|
-
</rules>`);
|
|
1299
|
-
}
|
|
1300
|
-
if (config?.scripts) {
|
|
1301
|
-
const scripts = Object.entries(config.scripts).map(([name, script]) => {
|
|
1302
|
-
if (typeof script === "string") {
|
|
1303
|
-
return `${name}: ${script}`;
|
|
1304
|
-
}
|
|
1305
|
-
if ("command" in script) {
|
|
1306
|
-
return `${name}: ${script.command}${script.description ? ` # ${script.description}` : ""}`;
|
|
1307
|
-
}
|
|
1308
|
-
if ("workflow" in script) {
|
|
1309
|
-
return `${name}: workflow:${script.workflow}${script.description ? ` # ${script.description}` : ""}`;
|
|
1310
|
-
}
|
|
1311
|
-
if ("script" in script) {
|
|
1312
|
-
return `${name}: script:${script.script}${script.description ? ` # ${script.description}` : ""}`;
|
|
1313
|
-
}
|
|
1314
|
-
return `${name}: unknown`;
|
|
1315
|
-
}).join("\n");
|
|
1316
|
-
if (scripts.length > 0) {
|
|
1317
|
-
contextParts.push(`<scripts>
|
|
1318
|
-
${scripts}
|
|
1319
|
-
</scripts>`);
|
|
1320
|
-
}
|
|
1321
|
-
}
|
|
1322
|
-
return { context: contextParts.join("\n"), loadRules };
|
|
1323
|
-
}
|
|
1324
|
-
|
|
1325
|
-
export {
|
|
1326
|
-
getCoderSystemPrompt,
|
|
1327
|
-
getImplementPrompt,
|
|
1328
|
-
COMMIT_MESSAGE_SYSTEM_PROMPT,
|
|
1329
|
-
FIX_SYSTEM_PROMPT,
|
|
1330
|
-
getFixUserPrompt,
|
|
1331
|
-
INIT_WORKFLOW_ANALYZE_SYSTEM_PROMPT,
|
|
1332
|
-
META_SYSTEM_PROMPT,
|
|
1333
|
-
getPlanPrompt,
|
|
1334
|
-
getPlannerSystemPrompt,
|
|
1335
|
-
PlanSchema,
|
|
1336
|
-
GET_PR_DETAILS_SYSTEM_PROMPT,
|
|
1337
|
-
CODE_REVIEW_SYSTEM_PROMPT,
|
|
1338
|
-
formatReviewToolInput,
|
|
1339
|
-
getEnv,
|
|
1340
|
-
AiProvider,
|
|
1341
|
-
getModel,
|
|
1342
|
-
ApiProviderConfig,
|
|
1343
|
-
parseGitDiffNameStatus,
|
|
1344
|
-
printChangedFiles,
|
|
1345
|
-
parseGitDiffNumStat,
|
|
1346
|
-
parseGitStatus,
|
|
1347
|
-
getLocalChanges,
|
|
1348
|
-
reviewOutputSchema,
|
|
1349
|
-
formatReviewForConsole,
|
|
1350
|
-
checkGhInstalled,
|
|
1351
|
-
getDefaultBranch,
|
|
1352
|
-
getDefaultContext
|
|
1353
|
-
};
|