@skroyc/librarian 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +176 -0
- package/LICENSE +210 -0
- package/README.md +614 -0
- package/biome.jsonc +9 -0
- package/dist/agents/context-schema.d.ts +17 -0
- package/dist/agents/context-schema.d.ts.map +1 -0
- package/dist/agents/context-schema.js +16 -0
- package/dist/agents/context-schema.js.map +1 -0
- package/dist/agents/react-agent.d.ts +38 -0
- package/dist/agents/react-agent.d.ts.map +1 -0
- package/dist/agents/react-agent.js +719 -0
- package/dist/agents/react-agent.js.map +1 -0
- package/dist/agents/tool-runtime.d.ts +7 -0
- package/dist/agents/tool-runtime.d.ts.map +1 -0
- package/dist/agents/tool-runtime.js +2 -0
- package/dist/agents/tool-runtime.js.map +1 -0
- package/dist/cli.d.ts +4 -0
- package/dist/cli.d.ts.map +1 -0
- package/dist/cli.js +172 -0
- package/dist/cli.js.map +1 -0
- package/dist/config.d.ts +4 -0
- package/dist/config.d.ts.map +1 -0
- package/dist/config.js +243 -0
- package/dist/config.js.map +1 -0
- package/dist/index.d.ts +41 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +470 -0
- package/dist/index.js.map +1 -0
- package/dist/tools/file-finding.tool.d.ts +24 -0
- package/dist/tools/file-finding.tool.d.ts.map +1 -0
- package/dist/tools/file-finding.tool.js +198 -0
- package/dist/tools/file-finding.tool.js.map +1 -0
- package/dist/tools/file-listing.tool.d.ts +12 -0
- package/dist/tools/file-listing.tool.d.ts.map +1 -0
- package/dist/tools/file-listing.tool.js +132 -0
- package/dist/tools/file-listing.tool.js.map +1 -0
- package/dist/tools/file-reading.tool.d.ts +9 -0
- package/dist/tools/file-reading.tool.d.ts.map +1 -0
- package/dist/tools/file-reading.tool.js +112 -0
- package/dist/tools/file-reading.tool.js.map +1 -0
- package/dist/tools/grep-content.tool.d.ts +27 -0
- package/dist/tools/grep-content.tool.d.ts.map +1 -0
- package/dist/tools/grep-content.tool.js +229 -0
- package/dist/tools/grep-content.tool.js.map +1 -0
- package/dist/utils/file-utils.d.ts +2 -0
- package/dist/utils/file-utils.d.ts.map +1 -0
- package/dist/utils/file-utils.js +28 -0
- package/dist/utils/file-utils.js.map +1 -0
- package/dist/utils/logger.d.ts +32 -0
- package/dist/utils/logger.d.ts.map +1 -0
- package/dist/utils/logger.js +177 -0
- package/dist/utils/logger.js.map +1 -0
- package/dist/utils/path-utils.d.ts +2 -0
- package/dist/utils/path-utils.d.ts.map +1 -0
- package/dist/utils/path-utils.js +9 -0
- package/dist/utils/path-utils.js.map +1 -0
- package/package.json +84 -0
- package/src/agents/context-schema.ts +61 -0
- package/src/agents/react-agent.ts +928 -0
- package/src/agents/tool-runtime.ts +21 -0
- package/src/cli.ts +206 -0
- package/src/config.ts +309 -0
- package/src/index.ts +628 -0
- package/src/tools/file-finding.tool.ts +324 -0
- package/src/tools/file-listing.tool.ts +212 -0
- package/src/tools/file-reading.tool.ts +154 -0
- package/src/tools/grep-content.tool.ts +325 -0
- package/src/utils/file-utils.ts +39 -0
- package/src/utils/logger.ts +295 -0
- package/src/utils/path-utils.ts +17 -0
- package/tsconfig.json +37 -0
- package/tsconfig.test.json +17 -0
|
@@ -0,0 +1,719 @@
|
|
|
1
|
+
import { createAgent, anthropicPromptCachingMiddleware, todoListMiddleware } from "langchain";
|
|
2
|
+
import { fileListTool } from "../tools/file-listing.tool.js";
|
|
3
|
+
import { fileReadTool } from "../tools/file-reading.tool.js";
|
|
4
|
+
import { grepContentTool } from "../tools/grep-content.tool.js";
|
|
5
|
+
import { fileFindTool } from "../tools/file-finding.tool.js";
|
|
6
|
+
import { ChatOpenAI } from "@langchain/openai";
|
|
7
|
+
import { ChatAnthropic } from "@langchain/anthropic";
|
|
8
|
+
import { ChatGoogleGenerativeAI } from "@langchain/google-genai";
|
|
9
|
+
import { HumanMessage } from "@langchain/core/messages";
|
|
10
|
+
import { logger } from "../utils/logger.js";
|
|
11
|
+
import os from "node:os";
|
|
12
|
+
import { mkdir, rm } from "node:fs/promises";
|
|
13
|
+
import path from "node:path";
|
|
14
|
+
import { spawn } from "node:child_process";
|
|
15
|
+
import { Readable } from "node:stream";
|
|
16
|
+
export class ReactAgent {
|
|
17
|
+
aiModel;
|
|
18
|
+
tools;
|
|
19
|
+
agent;
|
|
20
|
+
config;
|
|
21
|
+
contextSchema;
|
|
22
|
+
constructor(config) {
|
|
23
|
+
this.config = config;
|
|
24
|
+
this.contextSchema = config.contextSchema;
|
|
25
|
+
if (config.aiProvider.type !== "claude-code" &&
|
|
26
|
+
config.aiProvider.type !== "gemini-cli") {
|
|
27
|
+
this.aiModel = this.createAIModel(config.aiProvider);
|
|
28
|
+
}
|
|
29
|
+
this.tools = [fileListTool, fileReadTool, grepContentTool, fileFindTool];
|
|
30
|
+
logger.info("AGENT", "Initializing ReactAgent", {
|
|
31
|
+
aiProviderType: config.aiProvider.type,
|
|
32
|
+
model: config.aiProvider.model,
|
|
33
|
+
workingDir: config.workingDir.replace(os.homedir(), "~"),
|
|
34
|
+
toolCount: this.tools.length,
|
|
35
|
+
hasContextSchema: !!this.contextSchema,
|
|
36
|
+
});
|
|
37
|
+
}
|
|
38
|
+
createDynamicSystemPrompt() {
|
|
39
|
+
const { workingDir, technology } = this.config;
|
|
40
|
+
let prompt = `
|
|
41
|
+
You are a **Codebase Investigator** specializing in technology exploration and architectural analysis. Your core purpose is to provide deep technical insights grounded in actual source code evidence. You approach every question as an investigation, requiring verification before drawing conclusions.
|
|
42
|
+
|
|
43
|
+
**Your Key Traits:**
|
|
44
|
+
- Methodical exploration of codebases
|
|
45
|
+
- Evidence-based conclusions backed by specific source citations
|
|
46
|
+
- Clear and accessible technical communication
|
|
47
|
+
- Intellectual honesty about knowledge boundaries
|
|
48
|
+
|
|
49
|
+
# Instructions
|
|
50
|
+
|
|
51
|
+
## Investigation Protocol
|
|
52
|
+
|
|
53
|
+
**INVESTIGATION RULE 1 - Boundaries:**
|
|
54
|
+
- Work only within read-only exploration of the sandboxed working directory
|
|
55
|
+
- Every technical claim must be tied to specific source code evidence
|
|
56
|
+
- Admit uncertainty when code hasn't been verified—read files rather than speculate
|
|
57
|
+
|
|
58
|
+
**INVESTIGATION RULE 2 - Methodology:**
|
|
59
|
+
- Start by mapping the codebase structure (directories, key files)
|
|
60
|
+
- Trace how components connect through imports, exports, and function calls
|
|
61
|
+
- Validate assumptions by reading actual implementations
|
|
62
|
+
- Build your answer from verified source evidence, not assumptions
|
|
63
|
+
|
|
64
|
+
**INVESTIGATION RULE 3 - User Focus:**
|
|
65
|
+
- Prioritize complete answers over asking follow-up questions
|
|
66
|
+
- Provide context that helps users understand patterns, not just individual functions
|
|
67
|
+
- Bridge the gap between code behavior and practical application
|
|
68
|
+
|
|
69
|
+
## Verification Threshold
|
|
70
|
+
|
|
71
|
+
**DECISION RULE 1 - Action Threshold:**
|
|
72
|
+
- If seeing a file would improve your answer, read it immediately—do not ask the user first
|
|
73
|
+
- If asked about an unseen component, investigate it before responding
|
|
74
|
+
|
|
75
|
+
**DECISION RULE 2 - Confidence Check:**
|
|
76
|
+
- Before finalizing any answer, verify: "Am I relying on external libraries or modules I haven't confirmed in this codebase?"
|
|
77
|
+
- If yes: either read the local source or explicitly state the limitation
|
|
78
|
+
|
|
79
|
+
**DECISION RULE 3 - Ambiguity Protocol:**
|
|
80
|
+
- When multiple interpretations exist, state the uncertainty
|
|
81
|
+
- Provide the most likely answer with supporting evidence
|
|
82
|
+
- Note alternative possibilities and their conditions
|
|
83
|
+
|
|
84
|
+
## Diagnostic Reasoning
|
|
85
|
+
|
|
86
|
+
**DIAGNOSTIC RULE 1 - Generation:**
|
|
87
|
+
- For complex logic, list multiple possible explanations
|
|
88
|
+
- Do not settle on the first explanation you find
|
|
89
|
+
|
|
90
|
+
**DIAGNOSTIC RULE 2 - Validation:**
|
|
91
|
+
- Use file reads to confirm which explanation matches reality
|
|
92
|
+
- Look for contradictory evidence in other files
|
|
93
|
+
|
|
94
|
+
**DIAGNOSTIC RULE 3 - Reporting:**
|
|
95
|
+
- Present the winning explanation with supporting citations
|
|
96
|
+
- Explain why other options don't fit the evidence
|
|
97
|
+
- Note any questions that remain unanswered
|
|
98
|
+
|
|
99
|
+
## Adaptive Validation Protocol
|
|
100
|
+
|
|
101
|
+
**VALIDATION RULE 1 - Self-Correction Loop:**
|
|
102
|
+
- After examining any file, challenge your planned explanation
|
|
103
|
+
- Ask: "Does this contradict what I was about to say?"
|
|
104
|
+
|
|
105
|
+
**VALIDATION RULE 2 - Pivot Strategy:**
|
|
106
|
+
- When initial searches fail, expand your approach
|
|
107
|
+
- Check configuration files, related directories, or alternative naming patterns
|
|
108
|
+
- Never declare something missing without exhaustive exploration
|
|
109
|
+
|
|
110
|
+
**VALIDATION RULE 3 - Integration Check:**
|
|
111
|
+
- Ensure new findings integrate with your existing understanding
|
|
112
|
+
- Update your mental model rather than ignoring contradictory evidence
|
|
113
|
+
|
|
114
|
+
## Information Scoping Rules
|
|
115
|
+
|
|
116
|
+
**SCOPE 1 - Primary Source:**
|
|
117
|
+
The working directory contains the definitive truth. Start and end here.
|
|
118
|
+
|
|
119
|
+
**SCOPE 2 - Supporting Context:**
|
|
120
|
+
- Language documentation explains expected behavior
|
|
121
|
+
- Configuration files set constraints and options
|
|
122
|
+
- Use these to interpret what you find
|
|
123
|
+
|
|
124
|
+
**SCOPE 3 - Inferred Patterns:**
|
|
125
|
+
- Consistent patterns across files suggest conventions
|
|
126
|
+
- Use patterns to guide interpretation, not as definitive proof
|
|
127
|
+
|
|
128
|
+
**NOTE:** If external documentation contradicts local code, the local code is always correct for this repository.
|
|
129
|
+
|
|
130
|
+
## Citation Standards Protocol
|
|
131
|
+
|
|
132
|
+
**CITATION RULE 1 - Evidence Requirement:**
|
|
133
|
+
- Every technical claim must cite specific file paths and, where possible, line numbers or function names
|
|
134
|
+
- Vague references like "the code" or "this file" are insufficient
|
|
135
|
+
|
|
136
|
+
**CITATION RULE 2 - Acknowledgment Protocol:**
|
|
137
|
+
- When information is not found in the directory, explicitly state: "Based on the accessible files, I cannot find [X], but typically [Y] applies."
|
|
138
|
+
|
|
139
|
+
**CITATION RULE 3 - Confidence Calibration:**
|
|
140
|
+
- Distinguish between verified facts (citing files) and inferred patterns (noting the distinction)
|
|
141
|
+
- Never present inference as fact without clear labeling
|
|
142
|
+
|
|
143
|
+
## Thoroughness Verification System
|
|
144
|
+
|
|
145
|
+
**VERIFICATION RULE 1 - Configuration Check:**
|
|
146
|
+
- Have you considered all config files that might affect this behavior?
|
|
147
|
+
- Do not explain code in isolation from its configuration context
|
|
148
|
+
|
|
149
|
+
**VERIFICATION RULE 2 - Principle Coverage:**
|
|
150
|
+
- Does your answer explain both the specific case AND the general pattern?
|
|
151
|
+
- Help users apply this knowledge beyond the immediate example
|
|
152
|
+
|
|
153
|
+
**VERIFICATION RULE 3 - Question Coverage:**
|
|
154
|
+
- Have you addressed every part of the user's question?
|
|
155
|
+
- Note any intentional limitations or scope boundaries
|
|
156
|
+
|
|
157
|
+
## Failure Response System
|
|
158
|
+
|
|
159
|
+
**RESPONSE RULE 1 - Temporary Failures:**
|
|
160
|
+
- Timeouts and transient issues warrant retry (max 3 attempts)
|
|
161
|
+
- After retries exhaust, document the access issue
|
|
162
|
+
|
|
163
|
+
**RESPONSE RULE 2 - Permanent Failures:**
|
|
164
|
+
- Missing files, permission issues: stop retrying immediately
|
|
165
|
+
- Attempt alternative discovery methods or acknowledge the gap
|
|
166
|
+
|
|
167
|
+
**RESPONSE RULE 3 - Best Effort Resolution:**
|
|
168
|
+
- For obfuscated, missing, or inaccessible code:
|
|
169
|
+
- Provide answers grounded in standard practices
|
|
170
|
+
- Explicitly note confidence levels and knowledge boundaries
|
|
171
|
+
|
|
172
|
+
## Response Integrity Standard
|
|
173
|
+
|
|
174
|
+
**INTEGRITY RULE 1 - No Premature Responses:**
|
|
175
|
+
- Complete your full investigation before answering
|
|
176
|
+
- Resist the urge to respond before verification
|
|
177
|
+
|
|
178
|
+
**INTEGRITY RULE 2 - Evidence Compilation:**
|
|
179
|
+
- Gather all relevant file evidence before synthesizing
|
|
180
|
+
- Confirm no stone has been left unturned
|
|
181
|
+
|
|
182
|
+
**INTEGRITY RULE 3 - Final Validation:**
|
|
183
|
+
- Deliver your answer only when:
|
|
184
|
+
- All tools have been exhausted
|
|
185
|
+
- Evidence supports your conclusions
|
|
186
|
+
- You can cite specific sources for every claim
|
|
187
|
+
|
|
188
|
+
**INTEGRITY RULE 4 - Developer Consumption Focus (Default Behavior):**
|
|
189
|
+
- Frame explanations around how a developer WOULD USE this code, not how they might EXTEND it
|
|
190
|
+
- Focus on APIs, parameters, return values, and integration patterns
|
|
191
|
+
- Provide usage examples that show calling code, not implementation code
|
|
192
|
+
- When explaining implementation details, contextualize them for consumption use cases
|
|
193
|
+
|
|
194
|
+
**EXCEPTION - Architecture/Extension Queries:**
|
|
195
|
+
- ONLY deviate from the consumption focus when the user explicitly asks for it
|
|
196
|
+
- Examples: "What is the architecture of X?", "How can we extend X?", "How is X structured?"
|
|
197
|
+
- In these cases, provide architectural perspective as requested
|
|
198
|
+
|
|
199
|
+
# Reasoning Steps
|
|
200
|
+
|
|
201
|
+
Before taking any action (tool calls or user responses), you must proactively, methodically, and independently plan:
|
|
202
|
+
|
|
203
|
+
1. **Analyze the Request:** What specifically is the user asking about? What technology or component is involved?
|
|
204
|
+
2. **Map the Scope:** What directories and files are relevant? What is the boundary of investigation?
|
|
205
|
+
3. **Gather Evidence:** Read relevant source files to build an evidence base
|
|
206
|
+
4. **Synthesize Findings:** Connect evidence to form a coherent answer
|
|
207
|
+
5. **Verify Completeness:** Have you addressed all parts of the question? Are citations complete?
|
|
208
|
+
6. **Finalize Response:** Deliver the answer with appropriate context and citations
|
|
209
|
+
|
|
210
|
+
# Output Format
|
|
211
|
+
|
|
212
|
+
- Provide technical answers in clear prose with specific file citations
|
|
213
|
+
- Use code blocks for actual code examples
|
|
214
|
+
- Structure responses logically: context → evidence → conclusion
|
|
215
|
+
- Include relevant file paths and line numbers for all technical claims
|
|
216
|
+
- When uncertain, state limitations explicitly rather than speculating
|
|
217
|
+
|
|
218
|
+
# Examples
|
|
219
|
+
|
|
220
|
+
<example>
|
|
221
|
+
<user_query>
|
|
222
|
+
How does the authentication system handle token refresh in this codebase?
|
|
223
|
+
</user_query>
|
|
224
|
+
<assistant_response>
|
|
225
|
+
Based on my investigation of the codebase, here's how the authentication token refresh works:
|
|
226
|
+
|
|
227
|
+
**Token Refresh Mechanism** (\`src/auth/tokenManager.ts:45-78\`)
|
|
228
|
+
|
|
229
|
+
The token refresh is handled by the \`refreshToken()\` method in the \`TokenManager\` class. When a token expires:
|
|
230
|
+
|
|
231
|
+
1. The method first checks if a refresh token exists in secure storage
|
|
232
|
+
2. If found, it makes a POST request to \`/api/auth/refresh\` endpoint with the refresh token
|
|
233
|
+
3. On success, both access and refresh tokens are updated in localStorage
|
|
234
|
+
4. On failure, the user is redirected to login
|
|
235
|
+
|
|
236
|
+
**Key Files:**
|
|
237
|
+
- \`src/auth/tokenManager.ts:45-78\` - Main refresh logic
|
|
238
|
+
- \`src/auth/api.ts:12-24\` - API call implementation
|
|
239
|
+
- \`src/auth/middleware.ts:89-101\` - Interceptor that triggers refresh
|
|
240
|
+
|
|
241
|
+
The refresh flow is triggered automatically by the axios interceptor when a 401 response is received, ensuring seamless token renewal without user intervention.
|
|
242
|
+
</assistant_response>
|
|
243
|
+
</example>
|
|
244
|
+
|
|
245
|
+
<example>
|
|
246
|
+
<user_query>
|
|
247
|
+
What database queries are used for the user search feature?
|
|
248
|
+
</user_query>
|
|
249
|
+
<assistant_response>
|
|
250
|
+
I've located the user search implementation across two files:
|
|
251
|
+
|
|
252
|
+
**Query Implementation** (\`src/services/userSearch.ts:23-67\`)
|
|
253
|
+
|
|
254
|
+
The search feature uses Prisma's filtered findMany query:
|
|
255
|
+
|
|
256
|
+
\`\`\`typescript
|
|
257
|
+
const users = await prisma.user.findMany({
|
|
258
|
+
where: {
|
|
259
|
+
OR: [
|
|
260
|
+
{ name: { contains: query, mode: 'insensitive' } },
|
|
261
|
+
{ email: { contains: query, mode: 'insensitive' } },
|
|
262
|
+
],
|
|
263
|
+
deletedAt: null,
|
|
264
|
+
},
|
|
265
|
+
take: 20,
|
|
266
|
+
orderBy: { updatedAt: 'desc' },
|
|
267
|
+
});
|
|
268
|
+
\`\`\`
|
|
269
|
+
|
|
270
|
+
**Related Files:**
|
|
271
|
+
- \`src/services/userSearch.ts:23-67\` - Main query logic
|
|
272
|
+
- \`src/types/user.ts:1-45\` - User model definition
|
|
273
|
+
- \`src/dto/userSearchDTO.ts\` - Input validation
|
|
274
|
+
|
|
275
|
+
The query performs case-insensitive searches on both name and email fields, excludes soft-deleted users, limits results to 20, and orders by most recently updated.
|
|
276
|
+
|
|
277
|
+
**Note:** I could not find any Elasticsearch or full-text search integration—this appears to use standard Prisma filtering for small-to-medium datasets.
|
|
278
|
+
</assistant_response>
|
|
279
|
+
</example>
|
|
280
|
+
|
|
281
|
+
# Context
|
|
282
|
+
|
|
283
|
+
<context_block>
|
|
284
|
+
You have been provided the **[TECHNOLOGY_NAME]** repository.
|
|
285
|
+
Repository: [REPOSITORY_URL]
|
|
286
|
+
Your Working Directory: [WORKING_DIRECTORY]
|
|
287
|
+
|
|
288
|
+
Remember: ALL tool calls MUST be executed using absolute path in \`[WORKING_DIRECTORY]\`
|
|
289
|
+
</context_block>
|
|
290
|
+
|
|
291
|
+
**Note:** If no specific technology context is provided, you are working with multiple related repositories in the specified working directory.
|
|
292
|
+
|
|
293
|
+
---
|
|
294
|
+
|
|
295
|
+
**Before responding to any user query, verify you have sufficient evidence to support your claims. When in doubt, read more files rather than speculate.**
|
|
296
|
+
`;
|
|
297
|
+
if (technology) {
|
|
298
|
+
prompt = prompt.replace("<context_block>", `You have been provided the **${technology.name}** repository.
|
|
299
|
+
Repository: ${technology.repository}
|
|
300
|
+
Your Working Directory: ${workingDir}
|
|
301
|
+
|
|
302
|
+
Remember that ALL tool calls MUST be executed using absolute path in \`${workingDir}\``);
|
|
303
|
+
prompt = prompt.replace("</context_block>", "");
|
|
304
|
+
}
|
|
305
|
+
else {
|
|
306
|
+
prompt = prompt.replace("<context_block>", `You have been provided several related repositories to work with grouped in the following working directory: ${workingDir}
|
|
307
|
+
|
|
308
|
+
Remember that ALL tool calls MUST be executed using absolute path in \`${workingDir}\``);
|
|
309
|
+
prompt = prompt.replace("</context_block>", "");
|
|
310
|
+
}
|
|
311
|
+
logger.debug("AGENT", "Dynamic system prompt generated", {
|
|
312
|
+
hasTechnologyContext: !!technology,
|
|
313
|
+
promptLength: prompt.length,
|
|
314
|
+
});
|
|
315
|
+
return prompt;
|
|
316
|
+
}
|
|
317
|
+
async createGeminiTempDir() {
|
|
318
|
+
const tempDir = path.join(os.tmpdir(), `librarian-gemini-${Date.now()}`);
|
|
319
|
+
await mkdir(tempDir, { recursive: true });
|
|
320
|
+
return tempDir;
|
|
321
|
+
}
|
|
322
|
+
async setupGeminiConfig(tempDir, systemPrompt, _model) {
|
|
323
|
+
const systemPromptPath = path.join(tempDir, "system.md");
|
|
324
|
+
const settingsPath = path.join(tempDir, "settings.json");
|
|
325
|
+
await Bun.write(systemPromptPath, systemPrompt);
|
|
326
|
+
const settings = {
|
|
327
|
+
tools: {
|
|
328
|
+
core: ["list_directory", "read_file", "glob", "search_file_content"],
|
|
329
|
+
autoAccept: true,
|
|
330
|
+
},
|
|
331
|
+
mcpServers: {},
|
|
332
|
+
mcp: {
|
|
333
|
+
excluded: ["*"],
|
|
334
|
+
},
|
|
335
|
+
experimental: {
|
|
336
|
+
enableAgents: false,
|
|
337
|
+
},
|
|
338
|
+
output: {
|
|
339
|
+
format: "json",
|
|
340
|
+
},
|
|
341
|
+
};
|
|
342
|
+
await Bun.write(settingsPath, JSON.stringify(settings, null, 2));
|
|
343
|
+
return { systemPromptPath, settingsPath };
|
|
344
|
+
}
|
|
345
|
+
buildGeminiEnv(tempDir, model) {
|
|
346
|
+
const settingsPath = path.join(tempDir, "settings.json");
|
|
347
|
+
const systemPromptPath = path.join(tempDir, "system.md");
|
|
348
|
+
return {
|
|
349
|
+
...Bun.env,
|
|
350
|
+
GEMINI_SYSTEM_MD: systemPromptPath,
|
|
351
|
+
GEMINI_CLI_SYSTEM_DEFAULTS_PATH: settingsPath,
|
|
352
|
+
GEMINI_CLI_SYSTEM_SETTINGS_PATH: settingsPath,
|
|
353
|
+
GEMINI_MODEL: model,
|
|
354
|
+
};
|
|
355
|
+
}
|
|
356
|
+
async cleanupGeminiTempDir(tempDir) {
|
|
357
|
+
try {
|
|
358
|
+
await rm(tempDir, { recursive: true, force: true });
|
|
359
|
+
}
|
|
360
|
+
catch (err) {
|
|
361
|
+
logger.warn("AGENT", "Failed to cleanup Gemini temp files", {
|
|
362
|
+
error: err,
|
|
363
|
+
});
|
|
364
|
+
}
|
|
365
|
+
}
|
|
366
|
+
async *streamClaudeCli(query, context) {
|
|
367
|
+
const workingDir = context?.workingDir || this.config.workingDir;
|
|
368
|
+
const systemPrompt = this.createDynamicSystemPrompt();
|
|
369
|
+
const args = [
|
|
370
|
+
"-p",
|
|
371
|
+
query,
|
|
372
|
+
"--system-prompt",
|
|
373
|
+
systemPrompt,
|
|
374
|
+
"--tools",
|
|
375
|
+
"Read,Glob,Grep",
|
|
376
|
+
"--dangerously-skip-permissions",
|
|
377
|
+
"--output-format",
|
|
378
|
+
"stream-json",
|
|
379
|
+
];
|
|
380
|
+
const env = {
|
|
381
|
+
...Bun.env,
|
|
382
|
+
CLAUDE_PROJECT_DIR: workingDir,
|
|
383
|
+
...(this.config.aiProvider.model && {
|
|
384
|
+
ANTHROPIC_MODEL: this.config.aiProvider.model,
|
|
385
|
+
}),
|
|
386
|
+
};
|
|
387
|
+
logger.debug("AGENT", "Spawning Claude CLI", {
|
|
388
|
+
args: args.map((a) => (a.length > 100 ? a.substring(0, 100) + "..." : a)),
|
|
389
|
+
workingDir,
|
|
390
|
+
});
|
|
391
|
+
const proc = spawn("claude", args, {
|
|
392
|
+
cwd: workingDir,
|
|
393
|
+
env,
|
|
394
|
+
});
|
|
395
|
+
let buffer = "";
|
|
396
|
+
if (!proc.stdout) {
|
|
397
|
+
throw new Error("Failed to capture Claude CLI output");
|
|
398
|
+
}
|
|
399
|
+
const readable = Readable.from(proc.stdout);
|
|
400
|
+
for await (const chunk of readable) {
|
|
401
|
+
buffer += chunk.toString();
|
|
402
|
+
const lines = buffer.split("\n");
|
|
403
|
+
buffer = lines.pop() || "";
|
|
404
|
+
for (const line of lines) {
|
|
405
|
+
if (!line.trim())
|
|
406
|
+
continue;
|
|
407
|
+
try {
|
|
408
|
+
const data = JSON.parse(line);
|
|
409
|
+
if (data.type === "text" && data.content) {
|
|
410
|
+
yield data.content;
|
|
411
|
+
}
|
|
412
|
+
else if (data.type === "content_block_delta" && data.delta?.text) {
|
|
413
|
+
yield data.delta.text;
|
|
414
|
+
}
|
|
415
|
+
else if (data.type === "message" && data.content) {
|
|
416
|
+
if (Array.isArray(data.content)) {
|
|
417
|
+
for (const block of data.content) {
|
|
418
|
+
if (block.type === "text" && block.text) {
|
|
419
|
+
yield block.text;
|
|
420
|
+
}
|
|
421
|
+
}
|
|
422
|
+
}
|
|
423
|
+
}
|
|
424
|
+
}
|
|
425
|
+
catch {
|
|
426
|
+
}
|
|
427
|
+
}
|
|
428
|
+
}
|
|
429
|
+
await new Promise((resolve, reject) => {
|
|
430
|
+
proc.on("exit", (code) => {
|
|
431
|
+
if (code === 0)
|
|
432
|
+
resolve();
|
|
433
|
+
else
|
|
434
|
+
reject(new Error(`Claude CLI exited with code ${code}`));
|
|
435
|
+
});
|
|
436
|
+
proc.on("error", reject);
|
|
437
|
+
});
|
|
438
|
+
}
|
|
439
|
+
async *streamGeminiCli(query, context) {
|
|
440
|
+
const workingDir = context?.workingDir || this.config.workingDir;
|
|
441
|
+
const systemPrompt = this.createDynamicSystemPrompt();
|
|
442
|
+
const tempDir = await this.createGeminiTempDir();
|
|
443
|
+
const model = this.config.aiProvider.model || "gemini-2.5-flash";
|
|
444
|
+
try {
|
|
445
|
+
await this.setupGeminiConfig(tempDir, systemPrompt, model);
|
|
446
|
+
const args = [
|
|
447
|
+
"gemini",
|
|
448
|
+
"-p",
|
|
449
|
+
query,
|
|
450
|
+
"--output-format",
|
|
451
|
+
"stream-json",
|
|
452
|
+
"--yolo",
|
|
453
|
+
];
|
|
454
|
+
const env = this.buildGeminiEnv(tempDir, model);
|
|
455
|
+
logger.debug("AGENT", "Spawning Gemini CLI", {
|
|
456
|
+
args,
|
|
457
|
+
workingDir,
|
|
458
|
+
model,
|
|
459
|
+
});
|
|
460
|
+
const proc = Bun.spawn(args, {
|
|
461
|
+
cwd: workingDir,
|
|
462
|
+
env,
|
|
463
|
+
stdout: "pipe",
|
|
464
|
+
stderr: "pipe",
|
|
465
|
+
});
|
|
466
|
+
const reader = proc.stdout.getReader();
|
|
467
|
+
let buffer = "";
|
|
468
|
+
while (true) {
|
|
469
|
+
const { done, value } = await reader.read();
|
|
470
|
+
if (done) {
|
|
471
|
+
break;
|
|
472
|
+
}
|
|
473
|
+
buffer += new TextDecoder().decode(value);
|
|
474
|
+
const lines = buffer.split("\n");
|
|
475
|
+
buffer = lines.pop() || "";
|
|
476
|
+
for (const line of lines) {
|
|
477
|
+
if (!line.trim()) {
|
|
478
|
+
continue;
|
|
479
|
+
}
|
|
480
|
+
try {
|
|
481
|
+
const data = JSON.parse(line);
|
|
482
|
+
const text = this.parseGeminiStreamLine(data);
|
|
483
|
+
if (text) {
|
|
484
|
+
yield text;
|
|
485
|
+
}
|
|
486
|
+
}
|
|
487
|
+
catch {
|
|
488
|
+
}
|
|
489
|
+
}
|
|
490
|
+
}
|
|
491
|
+
const exitCode = await proc.exited;
|
|
492
|
+
if (exitCode !== 0) {
|
|
493
|
+
throw new Error(`Gemini CLI exited with code ${exitCode}`);
|
|
494
|
+
}
|
|
495
|
+
}
|
|
496
|
+
finally {
|
|
497
|
+
await this.cleanupGeminiTempDir(tempDir);
|
|
498
|
+
}
|
|
499
|
+
}
|
|
500
|
+
parseGeminiStreamLine(data) {
|
|
501
|
+
if (data && typeof data === "object" && "type" in data && "role" in data && "content" in data) {
|
|
502
|
+
const typedData = data;
|
|
503
|
+
if (typedData.type === "message" && typedData.role === "assistant" && typedData.content) {
|
|
504
|
+
return typedData.content;
|
|
505
|
+
}
|
|
506
|
+
}
|
|
507
|
+
return null;
|
|
508
|
+
}
|
|
509
|
+
createAIModel(aiProvider) {
|
|
510
|
+
const { type, apiKey, model, baseURL } = aiProvider;
|
|
511
|
+
logger.debug("AGENT", "Creating AI model instance", {
|
|
512
|
+
type,
|
|
513
|
+
model,
|
|
514
|
+
hasBaseURL: !!baseURL,
|
|
515
|
+
});
|
|
516
|
+
switch (type) {
|
|
517
|
+
case "openai":
|
|
518
|
+
return new ChatOpenAI({
|
|
519
|
+
apiKey,
|
|
520
|
+
modelName: model || "gpt-5.2",
|
|
521
|
+
});
|
|
522
|
+
case "openai-compatible":
|
|
523
|
+
return new ChatOpenAI({
|
|
524
|
+
apiKey,
|
|
525
|
+
modelName: model || "gpt-5.2",
|
|
526
|
+
configuration: {
|
|
527
|
+
baseURL: baseURL || "https://api.openai.com/v1",
|
|
528
|
+
},
|
|
529
|
+
});
|
|
530
|
+
case "anthropic":
|
|
531
|
+
return new ChatAnthropic({
|
|
532
|
+
apiKey,
|
|
533
|
+
modelName: model || "claude-sonnet-4-5",
|
|
534
|
+
});
|
|
535
|
+
case "anthropic-compatible":
|
|
536
|
+
if (!baseURL) {
|
|
537
|
+
throw new Error("baseURL is required for anthropic-compatible provider");
|
|
538
|
+
}
|
|
539
|
+
if (!model) {
|
|
540
|
+
throw new Error("model is required for anthropic-compatible provider");
|
|
541
|
+
}
|
|
542
|
+
return new ChatAnthropic({
|
|
543
|
+
apiKey,
|
|
544
|
+
modelName: model,
|
|
545
|
+
anthropicApiUrl: baseURL,
|
|
546
|
+
});
|
|
547
|
+
case "google":
|
|
548
|
+
return new ChatGoogleGenerativeAI({
|
|
549
|
+
apiKey,
|
|
550
|
+
model: model || "gemini-3-flash-preview",
|
|
551
|
+
});
|
|
552
|
+
default:
|
|
553
|
+
logger.error("AGENT", "Unsupported AI provider type", new Error(`Unsupported AI provider type: ${type}`), { type });
|
|
554
|
+
throw new Error(`Unsupported AI provider type: ${type}`);
|
|
555
|
+
}
|
|
556
|
+
}
|
|
557
|
+
initialize() {
|
|
558
|
+
if (this.config.aiProvider.type === "claude-code" ||
|
|
559
|
+
this.config.aiProvider.type === "gemini-cli") {
|
|
560
|
+
logger.info("AGENT", `${this.config.aiProvider.type} CLI mode initialized (skipping LangChain setup)`);
|
|
561
|
+
return Promise.resolve();
|
|
562
|
+
}
|
|
563
|
+
if (!this.aiModel) {
|
|
564
|
+
throw new Error("AI model not created for non-CLI provider");
|
|
565
|
+
}
|
|
566
|
+
this.agent = createAgent({
|
|
567
|
+
model: this.aiModel,
|
|
568
|
+
tools: this.tools,
|
|
569
|
+
systemPrompt: this.createDynamicSystemPrompt(),
|
|
570
|
+
middleware: [
|
|
571
|
+
todoListMiddleware(),
|
|
572
|
+
...(this.config.aiProvider.type === "anthropic" ||
|
|
573
|
+
this.config.aiProvider.type === "anthropic-compatible"
|
|
574
|
+
? [anthropicPromptCachingMiddleware()]
|
|
575
|
+
: []),
|
|
576
|
+
],
|
|
577
|
+
});
|
|
578
|
+
logger.info("AGENT", "Agent initialized successfully", {
|
|
579
|
+
toolCount: this.tools.length,
|
|
580
|
+
hasContextSchema: !!this.contextSchema,
|
|
581
|
+
});
|
|
582
|
+
return Promise.resolve();
|
|
583
|
+
}
|
|
584
|
+
async queryRepository(_repoPath, query, context) {
|
|
585
|
+
logger.info("AGENT", "Query started", {
|
|
586
|
+
queryLength: query.length,
|
|
587
|
+
hasContext: !!context,
|
|
588
|
+
});
|
|
589
|
+
if (this.config.aiProvider.type === "claude-code") {
|
|
590
|
+
let fullContent = "";
|
|
591
|
+
for await (const chunk of this.streamClaudeCli(query, context)) {
|
|
592
|
+
fullContent += chunk;
|
|
593
|
+
}
|
|
594
|
+
return fullContent;
|
|
595
|
+
}
|
|
596
|
+
if (this.config.aiProvider.type === "gemini-cli") {
|
|
597
|
+
let fullContent = "";
|
|
598
|
+
for await (const chunk of this.streamGeminiCli(query, context)) {
|
|
599
|
+
fullContent += chunk;
|
|
600
|
+
}
|
|
601
|
+
return fullContent;
|
|
602
|
+
}
|
|
603
|
+
const timingId = logger.timingStart("agentQuery");
|
|
604
|
+
if (!this.agent) {
|
|
605
|
+
logger.error("AGENT", "Agent not initialized", new Error("Agent not initialized. Call initialize() first."));
|
|
606
|
+
throw new Error("Agent not initialized. Call initialize() first.");
|
|
607
|
+
}
|
|
608
|
+
const messages = [new HumanMessage(query)];
|
|
609
|
+
logger.debug("AGENT", "Invoking agent with messages", {
|
|
610
|
+
messageCount: messages.length,
|
|
611
|
+
hasContext: !!context,
|
|
612
|
+
});
|
|
613
|
+
const result = await this.agent.invoke({
|
|
614
|
+
messages,
|
|
615
|
+
}, context ? { context, recursionLimit: 100 } : { recursionLimit: 100 });
|
|
616
|
+
const lastMessage = result.messages.at(-1);
|
|
617
|
+
const content = typeof lastMessage.content === "string"
|
|
618
|
+
? lastMessage.content
|
|
619
|
+
: JSON.stringify(lastMessage.content);
|
|
620
|
+
logger.timingEnd(timingId, "AGENT", "Query completed");
|
|
621
|
+
logger.info("AGENT", "Query result received", {
|
|
622
|
+
responseLength: content.length,
|
|
623
|
+
});
|
|
624
|
+
return content;
|
|
625
|
+
}
|
|
626
|
+
async *streamRepository(_repoPath, query, context) {
|
|
627
|
+
logger.info("AGENT", "Stream started", {
|
|
628
|
+
queryLength: query.length,
|
|
629
|
+
hasContext: !!context,
|
|
630
|
+
});
|
|
631
|
+
if (this.config.aiProvider.type === "claude-code") {
|
|
632
|
+
yield* this.streamClaudeCli(query, context);
|
|
633
|
+
return;
|
|
634
|
+
}
|
|
635
|
+
if (this.config.aiProvider.type === "gemini-cli") {
|
|
636
|
+
yield* this.streamGeminiCli(query, context);
|
|
637
|
+
return;
|
|
638
|
+
}
|
|
639
|
+
const timingId = logger.timingStart("agentStream");
|
|
640
|
+
if (!this.agent) {
|
|
641
|
+
logger.error("AGENT", "Agent not initialized", new Error("Agent not initialized. Call initialize() first."));
|
|
642
|
+
throw new Error("Agent not initialized. Call initialize() first.");
|
|
643
|
+
}
|
|
644
|
+
const messages = [new HumanMessage(query)];
|
|
645
|
+
logger.debug("AGENT", "Invoking agent stream with messages", {
|
|
646
|
+
messageCount: messages.length,
|
|
647
|
+
hasContext: !!context,
|
|
648
|
+
});
|
|
649
|
+
const cleanup = () => {
|
|
650
|
+
};
|
|
651
|
+
logger.debug("AGENT", "Setting up interruption handlers for streaming");
|
|
652
|
+
process.on("SIGINT", cleanup);
|
|
653
|
+
process.on("SIGTERM", cleanup);
|
|
654
|
+
try {
|
|
655
|
+
const result = await this.agent.invoke({ messages }, context ? { context, recursionLimit: 100 } : { recursionLimit: 100 });
|
|
656
|
+
const content = extractMessageContent(result);
|
|
657
|
+
if (content) {
|
|
658
|
+
yield content;
|
|
659
|
+
}
|
|
660
|
+
yield "\n";
|
|
661
|
+
}
|
|
662
|
+
catch (error) {
|
|
663
|
+
const errorMessage = getStreamingErrorMessage(error);
|
|
664
|
+
logger.error("AGENT", "Streaming error", error instanceof Error ? error : new Error(errorMessage));
|
|
665
|
+
yield `\n\n[Error: ${errorMessage}]`;
|
|
666
|
+
throw error;
|
|
667
|
+
}
|
|
668
|
+
finally {
|
|
669
|
+
process.removeListener("SIGINT", cleanup);
|
|
670
|
+
process.removeListener("SIGTERM", cleanup);
|
|
671
|
+
logger.timingEnd(timingId, "AGENT", "Streaming completed");
|
|
672
|
+
}
|
|
673
|
+
}
|
|
674
|
+
}
|
|
675
|
+
function extractMessageContent(result) {
|
|
676
|
+
if (!result.messages || result.messages.length === 0) {
|
|
677
|
+
return null;
|
|
678
|
+
}
|
|
679
|
+
const lastMessage = result.messages.at(-1);
|
|
680
|
+
if (!lastMessage?.content) {
|
|
681
|
+
return null;
|
|
682
|
+
}
|
|
683
|
+
const content = lastMessage.content;
|
|
684
|
+
if (typeof content === "string") {
|
|
685
|
+
return content;
|
|
686
|
+
}
|
|
687
|
+
if (Array.isArray(content)) {
|
|
688
|
+
const parts = [];
|
|
689
|
+
for (const block of content) {
|
|
690
|
+
if (block && typeof block === "object") {
|
|
691
|
+
const blockObj = block;
|
|
692
|
+
if (blockObj.type === "text" && typeof blockObj.text === "string") {
|
|
693
|
+
parts.push(blockObj.text);
|
|
694
|
+
}
|
|
695
|
+
}
|
|
696
|
+
}
|
|
697
|
+
return parts.length > 0 ? parts.join("") : null;
|
|
698
|
+
}
|
|
699
|
+
return null;
|
|
700
|
+
}
|
|
701
|
+
function getStreamingErrorMessage(error) {
|
|
702
|
+
if (!(error instanceof Error)) {
|
|
703
|
+
return "Unknown streaming error";
|
|
704
|
+
}
|
|
705
|
+
if (error.message.includes("timeout")) {
|
|
706
|
+
return "Streaming timeout - request took too long to complete";
|
|
707
|
+
}
|
|
708
|
+
if (error.message.includes("network") || error.message.includes("ENOTFOUND")) {
|
|
709
|
+
return "Network error - unable to connect to AI provider";
|
|
710
|
+
}
|
|
711
|
+
if (error.message.includes("rate limit")) {
|
|
712
|
+
return "Rate limit exceeded - please try again later";
|
|
713
|
+
}
|
|
714
|
+
if (error.message.includes("authentication") || error.message.includes("unauthorized")) {
|
|
715
|
+
return "Authentication error - check your API credentials";
|
|
716
|
+
}
|
|
717
|
+
return `Streaming error: ${error.message}`;
|
|
718
|
+
}
|
|
719
|
+
//# sourceMappingURL=react-agent.js.map
|