copilot-liku-cli 0.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/ARCHITECTURE.md +411 -0
- package/CONFIGURATION.md +302 -0
- package/CONTRIBUTING.md +225 -0
- package/ELECTRON_README.md +121 -0
- package/INSTALLATION.md +350 -0
- package/LICENSE.md +1 -0
- package/PROJECT_STATUS.md +229 -0
- package/QUICKSTART.md +255 -0
- package/README.md +167 -0
- package/TESTING.md +274 -0
- package/package.json +61 -0
- package/scripts/start.js +30 -0
- package/src/assets/tray-icon.png +0 -0
- package/src/cli/commands/agent.js +327 -0
- package/src/cli/commands/click.js +108 -0
- package/src/cli/commands/drag.js +85 -0
- package/src/cli/commands/find.js +109 -0
- package/src/cli/commands/keys.js +132 -0
- package/src/cli/commands/mouse.js +79 -0
- package/src/cli/commands/repl.js +290 -0
- package/src/cli/commands/screenshot.js +72 -0
- package/src/cli/commands/scroll.js +74 -0
- package/src/cli/commands/start.js +67 -0
- package/src/cli/commands/type.js +57 -0
- package/src/cli/commands/wait.js +84 -0
- package/src/cli/commands/window.js +104 -0
- package/src/cli/liku.js +249 -0
- package/src/cli/util/output.js +174 -0
- package/src/main/agents/base-agent.js +410 -0
- package/src/main/agents/builder.js +484 -0
- package/src/main/agents/index.js +62 -0
- package/src/main/agents/orchestrator.js +362 -0
- package/src/main/agents/researcher.js +511 -0
- package/src/main/agents/state-manager.js +344 -0
- package/src/main/agents/supervisor.js +365 -0
- package/src/main/agents/verifier.js +452 -0
- package/src/main/ai-service.js +1633 -0
- package/src/main/index.js +2208 -0
- package/src/main/inspect-service.js +467 -0
- package/src/main/system-automation.js +1186 -0
- package/src/main/ui-automation/config.js +76 -0
- package/src/main/ui-automation/core/helpers.js +41 -0
- package/src/main/ui-automation/core/index.js +15 -0
- package/src/main/ui-automation/core/powershell.js +82 -0
- package/src/main/ui-automation/elements/finder.js +274 -0
- package/src/main/ui-automation/elements/index.js +14 -0
- package/src/main/ui-automation/elements/wait.js +66 -0
- package/src/main/ui-automation/index.js +164 -0
- package/src/main/ui-automation/interactions/element-click.js +211 -0
- package/src/main/ui-automation/interactions/high-level.js +230 -0
- package/src/main/ui-automation/interactions/index.js +47 -0
- package/src/main/ui-automation/keyboard/index.js +15 -0
- package/src/main/ui-automation/keyboard/input.js +179 -0
- package/src/main/ui-automation/mouse/click.js +186 -0
- package/src/main/ui-automation/mouse/drag.js +88 -0
- package/src/main/ui-automation/mouse/index.js +30 -0
- package/src/main/ui-automation/mouse/movement.js +51 -0
- package/src/main/ui-automation/mouse/scroll.js +116 -0
- package/src/main/ui-automation/screenshot.js +183 -0
- package/src/main/ui-automation/window/index.js +23 -0
- package/src/main/ui-automation/window/manager.js +305 -0
- package/src/main/utils/time.js +62 -0
- package/src/main/visual-awareness.js +597 -0
- package/src/renderer/chat/chat.js +671 -0
- package/src/renderer/chat/index.html +725 -0
- package/src/renderer/chat/preload.js +112 -0
- package/src/renderer/overlay/index.html +648 -0
- package/src/renderer/overlay/overlay.js +782 -0
- package/src/renderer/overlay/preload.js +90 -0
- package/src/shared/grid-math.js +82 -0
- package/src/shared/inspect-types.js +230 -0
|
@@ -0,0 +1,511 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Researcher Agent
|
|
3
|
+
*
|
|
4
|
+
* Gathers context and information for complex tasks.
|
|
5
|
+
* Supports Recursive Long-Context (RLC) patterns for massive inputs.
|
|
6
|
+
*
|
|
7
|
+
* Operating Rules:
|
|
8
|
+
* - Probe and filter large contexts without full loading
|
|
9
|
+
* - Recursive decomposition for massive inputs
|
|
10
|
+
* - Aggregation patterns for coherent results
|
|
11
|
+
* - READ-ONLY operations
|
|
12
|
+
*/
|
|
13
|
+
|
|
14
|
+
const { BaseAgent, AgentRole, AgentCapabilities } = require('./base-agent');
|
|
15
|
+
const fs = require('fs');
|
|
16
|
+
const path = require('path');
|
|
17
|
+
|
|
18
|
+
class ResearcherAgent extends BaseAgent {
|
|
19
|
+
constructor(options = {}) {
|
|
20
|
+
super({
|
|
21
|
+
...options,
|
|
22
|
+
role: AgentRole.RESEARCHER,
|
|
23
|
+
name: options.name || 'researcher',
|
|
24
|
+
description: 'Gathers context and information with RLC support',
|
|
25
|
+
capabilities: [
|
|
26
|
+
AgentCapabilities.SEARCH,
|
|
27
|
+
AgentCapabilities.READ,
|
|
28
|
+
AgentCapabilities.WEB_FETCH,
|
|
29
|
+
AgentCapabilities.TODO,
|
|
30
|
+
AgentCapabilities.HANDOFF
|
|
31
|
+
]
|
|
32
|
+
// NOTE: No EDIT capability - Researcher is read-only
|
|
33
|
+
});
|
|
34
|
+
|
|
35
|
+
// RLC-specific configuration
|
|
36
|
+
this.chunkSize = options.chunkSize || 4000; // tokens per chunk
|
|
37
|
+
this.maxChunks = options.maxChunks || 10;
|
|
38
|
+
this.researchResults = [];
|
|
39
|
+
|
|
40
|
+
// Caching and credibility tracking
|
|
41
|
+
this.researchCache = new Map();
|
|
42
|
+
this.cacheMaxAge = options.cacheMaxAge || 3600000; // 1 hour
|
|
43
|
+
this.sourceCredibility = new Map();
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
getSystemPrompt() {
|
|
47
|
+
return `You are the RESEARCHER agent in a multi-agent coding system.
|
|
48
|
+
|
|
49
|
+
# OPERATING CONTRACT (NON-NEGOTIABLE)
|
|
50
|
+
- **No guessing**: Ground all findings with sources.
|
|
51
|
+
- **Read-only**: Do not modify any files.
|
|
52
|
+
- **Efficiency**: Filter before full load; sample massive contexts.
|
|
53
|
+
- **Recursion limits**: Depth ≤3; chunk count ≤10.
|
|
54
|
+
- **Citations**: Always provide file paths, URLs, or line numbers.
|
|
55
|
+
|
|
56
|
+
# CAPABILITIES
|
|
57
|
+
You have access to the Recursive Long-Context (RLC) Skill:
|
|
58
|
+
- **Probe and Filter**: Sample large contexts efficiently
|
|
59
|
+
- **Decomposition**: Break massive inputs into chunks
|
|
60
|
+
- **Aggregation**: Merge findings coherently
|
|
61
|
+
- **Verification**: Validate intermediate results
|
|
62
|
+
|
|
63
|
+
# WORKFLOW
|
|
64
|
+
1. Receive research query from Supervisor
|
|
65
|
+
2. Probe: Sample the target context (first 1000 chars, etc.)
|
|
66
|
+
3. Filter: Use regex/keywords to identify relevant sections
|
|
67
|
+
4. Decompose: If >50K tokens, chunk and process recursively
|
|
68
|
+
5. Aggregate: Merge findings with deduplication
|
|
69
|
+
6. Report: Structured findings with citations
|
|
70
|
+
|
|
71
|
+
# OUTPUT FORMAT
|
|
72
|
+
Always structure your response as:
|
|
73
|
+
1. Query: [what was researched]
|
|
74
|
+
2. Sources: [files/URLs examined]
|
|
75
|
+
3. Findings: [key discoveries]
|
|
76
|
+
4. Evidence: [citations with file:line]
|
|
77
|
+
5. Gaps: [what couldn't be found]
|
|
78
|
+
6. Suggestions: [next research steps]`;
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
async process(task, context = {}) {
|
|
82
|
+
this.log('info', 'Researcher processing query', { task: task.description || task });
|
|
83
|
+
|
|
84
|
+
// Check recursion limits
|
|
85
|
+
const limits = this.checkRecursionLimits();
|
|
86
|
+
if (!limits.allowed) {
|
|
87
|
+
return {
|
|
88
|
+
success: false,
|
|
89
|
+
error: limits.reason
|
|
90
|
+
};
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
try {
|
|
94
|
+
this.enterRecursion();
|
|
95
|
+
|
|
96
|
+
const query = typeof task === 'string' ? task : task.description;
|
|
97
|
+
|
|
98
|
+
// Step 1: Probe the context
|
|
99
|
+
const probeResult = await this.probe(query, context);
|
|
100
|
+
|
|
101
|
+
// Step 2: Determine if decomposition is needed
|
|
102
|
+
if (probeResult.estimatedTokens > 50000) {
|
|
103
|
+
// Use RLC decomposition
|
|
104
|
+
const chunks = await this.decompose(probeResult);
|
|
105
|
+
const chunkResults = await this.processChunks(chunks, query);
|
|
106
|
+
const aggregated = await this.aggregate(chunkResults);
|
|
107
|
+
|
|
108
|
+
this.exitRecursion();
|
|
109
|
+
return aggregated;
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
// Step 3: Direct research for smaller contexts
|
|
113
|
+
const findings = await this.research(query, probeResult);
|
|
114
|
+
|
|
115
|
+
this.exitRecursion();
|
|
116
|
+
return findings;
|
|
117
|
+
|
|
118
|
+
} catch (error) {
|
|
119
|
+
this.exitRecursion();
|
|
120
|
+
return {
|
|
121
|
+
success: false,
|
|
122
|
+
error: error.message,
|
|
123
|
+
partialResults: this.researchResults
|
|
124
|
+
};
|
|
125
|
+
}
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
// ===== RLC Core Functions =====
|
|
129
|
+
|
|
130
|
+
async probe(query, context) {
|
|
131
|
+
this.log('info', 'Probing context for query', { query });
|
|
132
|
+
|
|
133
|
+
const sources = [];
|
|
134
|
+
let estimatedTokens = 0;
|
|
135
|
+
|
|
136
|
+
// Probe workspace files - always default to process.cwd()
|
|
137
|
+
const cwd = context.workspace || context.cwd || process.cwd();
|
|
138
|
+
const files = await this.findRelevantFiles(query, cwd);
|
|
139
|
+
|
|
140
|
+
for (const file of files.slice(0, 20)) {
|
|
141
|
+
const filePath = path.join(cwd, file);
|
|
142
|
+
if (fs.existsSync(filePath)) {
|
|
143
|
+
const stat = fs.statSync(filePath);
|
|
144
|
+
const sample = fs.readFileSync(filePath, 'utf-8').slice(0, 1000);
|
|
145
|
+
|
|
146
|
+
sources.push({
|
|
147
|
+
type: 'file',
|
|
148
|
+
path: file,
|
|
149
|
+
size: stat.size,
|
|
150
|
+
sample,
|
|
151
|
+
relevant: this.isRelevant(sample, query)
|
|
152
|
+
});
|
|
153
|
+
|
|
154
|
+
estimatedTokens += Math.ceil(stat.size / 4); // ~4 chars per token
|
|
155
|
+
}
|
|
156
|
+
}
|
|
157
|
+
|
|
158
|
+
// Probe URLs if provided
|
|
159
|
+
if (context.urls) {
|
|
160
|
+
for (const url of context.urls) {
|
|
161
|
+
sources.push({
|
|
162
|
+
type: 'url',
|
|
163
|
+
url,
|
|
164
|
+
sample: null, // Would fetch here
|
|
165
|
+
relevant: true
|
|
166
|
+
});
|
|
167
|
+
}
|
|
168
|
+
}
|
|
169
|
+
|
|
170
|
+
return {
|
|
171
|
+
query,
|
|
172
|
+
sources,
|
|
173
|
+
estimatedTokens,
|
|
174
|
+
relevantSources: sources.filter(s => s.relevant)
|
|
175
|
+
};
|
|
176
|
+
}
|
|
177
|
+
|
|
178
|
+
async findRelevantFiles(query, cwd) {
|
|
179
|
+
const extensions = ['.js', '.ts', '.jsx', '.tsx', '.md', '.json', '.py'];
|
|
180
|
+
const files = [];
|
|
181
|
+
|
|
182
|
+
// Extract potential file patterns from query (min 3 chars for keywords)
|
|
183
|
+
const keywords = query.toLowerCase().split(/\s+/)
|
|
184
|
+
.filter(w => w.length >= 3);
|
|
185
|
+
|
|
186
|
+
const walkDir = (dir, depth = 0) => {
|
|
187
|
+
if (depth > 3) return; // Max depth
|
|
188
|
+
|
|
189
|
+
try {
|
|
190
|
+
const entries = fs.readdirSync(dir, { withFileTypes: true });
|
|
191
|
+
|
|
192
|
+
for (const entry of entries) {
|
|
193
|
+
if (entry.name.startsWith('.') || entry.name === 'node_modules') continue;
|
|
194
|
+
|
|
195
|
+
const fullPath = path.join(dir, entry.name);
|
|
196
|
+
const relativePath = path.relative(cwd, fullPath);
|
|
197
|
+
|
|
198
|
+
if (entry.isDirectory()) {
|
|
199
|
+
walkDir(fullPath, depth + 1);
|
|
200
|
+
} else if (extensions.some(ext => entry.name.endsWith(ext))) {
|
|
201
|
+
// Check if filename matches any keyword
|
|
202
|
+
const nameMatch = keywords.some(k =>
|
|
203
|
+
entry.name.toLowerCase().includes(k)
|
|
204
|
+
);
|
|
205
|
+
|
|
206
|
+
if (nameMatch || files.length < 50) {
|
|
207
|
+
files.push(relativePath);
|
|
208
|
+
}
|
|
209
|
+
}
|
|
210
|
+
}
|
|
211
|
+
} catch (error) {
|
|
212
|
+
// Skip inaccessible directories
|
|
213
|
+
}
|
|
214
|
+
};
|
|
215
|
+
|
|
216
|
+
walkDir(cwd);
|
|
217
|
+
|
|
218
|
+
return files;
|
|
219
|
+
}
|
|
220
|
+
|
|
221
|
+
isRelevant(content, query) {
|
|
222
|
+
const keywords = query.toLowerCase().split(/\s+/)
|
|
223
|
+
.filter(w => w.length >= 3);
|
|
224
|
+
|
|
225
|
+
const contentLower = content.toLowerCase();
|
|
226
|
+
return keywords.some(k => contentLower.includes(k));
|
|
227
|
+
}
|
|
228
|
+
|
|
229
|
+
async decompose(probeResult) {
|
|
230
|
+
this.log('info', 'Decomposing large context into chunks');
|
|
231
|
+
|
|
232
|
+
const chunks = [];
|
|
233
|
+
const relevantSources = probeResult.relevantSources;
|
|
234
|
+
|
|
235
|
+
// Group files into chunks
|
|
236
|
+
let currentChunk = {
|
|
237
|
+
id: `chunk-${chunks.length}`,
|
|
238
|
+
sources: [],
|
|
239
|
+
estimatedTokens: 0
|
|
240
|
+
};
|
|
241
|
+
|
|
242
|
+
for (const source of relevantSources) {
|
|
243
|
+
const sourceTokens = source.type === 'file'
|
|
244
|
+
? Math.ceil(source.size / 4)
|
|
245
|
+
: 1000; // Estimate for URLs
|
|
246
|
+
|
|
247
|
+
if (currentChunk.estimatedTokens + sourceTokens > this.chunkSize) {
|
|
248
|
+
if (currentChunk.sources.length > 0) {
|
|
249
|
+
chunks.push(currentChunk);
|
|
250
|
+
}
|
|
251
|
+
|
|
252
|
+
currentChunk = {
|
|
253
|
+
id: `chunk-${chunks.length}`,
|
|
254
|
+
sources: [],
|
|
255
|
+
estimatedTokens: 0
|
|
256
|
+
};
|
|
257
|
+
}
|
|
258
|
+
|
|
259
|
+
currentChunk.sources.push(source);
|
|
260
|
+
currentChunk.estimatedTokens += sourceTokens;
|
|
261
|
+
|
|
262
|
+
if (chunks.length >= this.maxChunks) {
|
|
263
|
+
this.log('warn', `Reached max chunks (${this.maxChunks})`);
|
|
264
|
+
break;
|
|
265
|
+
}
|
|
266
|
+
}
|
|
267
|
+
|
|
268
|
+
if (currentChunk.sources.length > 0) {
|
|
269
|
+
chunks.push(currentChunk);
|
|
270
|
+
}
|
|
271
|
+
|
|
272
|
+
return chunks;
|
|
273
|
+
}
|
|
274
|
+
|
|
275
|
+
async processChunks(chunks, query) {
|
|
276
|
+
const results = [];
|
|
277
|
+
|
|
278
|
+
for (const chunk of chunks) {
|
|
279
|
+
this.log('info', `Processing chunk ${chunk.id}`);
|
|
280
|
+
|
|
281
|
+
// Read chunk contents
|
|
282
|
+
const contents = [];
|
|
283
|
+
for (const source of chunk.sources) {
|
|
284
|
+
if (source.type === 'file') {
|
|
285
|
+
const filePath = path.join(process.cwd(), source.path);
|
|
286
|
+
if (fs.existsSync(filePath)) {
|
|
287
|
+
contents.push({
|
|
288
|
+
path: source.path,
|
|
289
|
+
content: fs.readFileSync(filePath, 'utf-8')
|
|
290
|
+
});
|
|
291
|
+
}
|
|
292
|
+
}
|
|
293
|
+
}
|
|
294
|
+
|
|
295
|
+
// Ask LLM to analyze this chunk
|
|
296
|
+
const prompt = `Analyze these files for information about: ${query}
|
|
297
|
+
|
|
298
|
+
Files:
|
|
299
|
+
${contents.map(c => `--- ${c.path} ---\n${c.content.slice(0, 3000)}`).join('\n\n')}
|
|
300
|
+
|
|
301
|
+
Extract:
|
|
302
|
+
1. Key findings related to the query
|
|
303
|
+
2. Important code patterns or structures
|
|
304
|
+
3. Dependencies and relationships
|
|
305
|
+
4. Potential issues or concerns`;
|
|
306
|
+
|
|
307
|
+
const response = await this.chat(prompt);
|
|
308
|
+
|
|
309
|
+
results.push({
|
|
310
|
+
chunkId: chunk.id,
|
|
311
|
+
sources: chunk.sources.map(s => s.path),
|
|
312
|
+
findings: response.text,
|
|
313
|
+
timestamp: new Date().toISOString()
|
|
314
|
+
});
|
|
315
|
+
}
|
|
316
|
+
|
|
317
|
+
return results;
|
|
318
|
+
}
|
|
319
|
+
|
|
320
|
+
async aggregate(chunkResults) {
|
|
321
|
+
this.log('info', 'Aggregating chunk results');
|
|
322
|
+
|
|
323
|
+
// Merge findings
|
|
324
|
+
const allFindings = chunkResults.map(r => r.findings).join('\n\n---\n\n');
|
|
325
|
+
const allSources = [...new Set(chunkResults.flatMap(r => r.sources))];
|
|
326
|
+
|
|
327
|
+
// Ask LLM to synthesize
|
|
328
|
+
const prompt = `Synthesize these research findings into a coherent report.
|
|
329
|
+
|
|
330
|
+
Findings from ${chunkResults.length} chunks:
|
|
331
|
+
${allFindings}
|
|
332
|
+
|
|
333
|
+
Provide:
|
|
334
|
+
1. Summary: Key discoveries (deduplicated)
|
|
335
|
+
2. Evidence: Citations with file paths
|
|
336
|
+
3. Patterns: Common themes
|
|
337
|
+
4. Gaps: What's missing
|
|
338
|
+
5. Recommendations: Next steps`;
|
|
339
|
+
|
|
340
|
+
const response = await this.chat(prompt);
|
|
341
|
+
|
|
342
|
+
const result = {
|
|
343
|
+
success: true,
|
|
344
|
+
query: chunkResults[0]?.query,
|
|
345
|
+
sources: allSources,
|
|
346
|
+
findings: response.text,
|
|
347
|
+
chunksProcessed: chunkResults.length,
|
|
348
|
+
synthesis: true,
|
|
349
|
+
timestamp: new Date().toISOString()
|
|
350
|
+
};
|
|
351
|
+
|
|
352
|
+
this.researchResults.push(result);
|
|
353
|
+
return result;
|
|
354
|
+
}
|
|
355
|
+
|
|
356
|
+
async research(query, probeResult) {
|
|
357
|
+
const cacheKey = this.getCacheKey(query, probeResult);
|
|
358
|
+
const cached = this.researchCache.get(cacheKey);
|
|
359
|
+
|
|
360
|
+
if (cached && (Date.now() - cached.timestamp) < this.cacheMaxAge) {
|
|
361
|
+
this.log('info', 'Returning cached research result');
|
|
362
|
+
return {
|
|
363
|
+
...cached.result,
|
|
364
|
+
fromCache: true,
|
|
365
|
+
cacheAge: Date.now() - cached.timestamp
|
|
366
|
+
};
|
|
367
|
+
}
|
|
368
|
+
|
|
369
|
+
this.log('info', 'Conducting direct research');
|
|
370
|
+
|
|
371
|
+
// Read relevant files
|
|
372
|
+
const contents = [];
|
|
373
|
+
for (const source of probeResult.relevantSources) {
|
|
374
|
+
if (source.type === 'file') {
|
|
375
|
+
const filePath = path.join(process.cwd(), source.path);
|
|
376
|
+
if (fs.existsSync(filePath)) {
|
|
377
|
+
contents.push({
|
|
378
|
+
path: source.path,
|
|
379
|
+
content: fs.readFileSync(filePath, 'utf-8')
|
|
380
|
+
});
|
|
381
|
+
}
|
|
382
|
+
}
|
|
383
|
+
}
|
|
384
|
+
|
|
385
|
+
// Ask LLM for research findings
|
|
386
|
+
const prompt = `Research query: ${query}
|
|
387
|
+
|
|
388
|
+
Relevant files:
|
|
389
|
+
${contents.map(c => `--- ${c.path} ---\n${c.content.slice(0, 4000)}`).join('\n\n')}
|
|
390
|
+
|
|
391
|
+
Provide comprehensive findings with:
|
|
392
|
+
1. Direct answers to the query
|
|
393
|
+
2. Relevant code examples (with file:line citations)
|
|
394
|
+
3. Related concepts or patterns
|
|
395
|
+
4. Potential gaps in the codebase
|
|
396
|
+
5. Recommendations`;
|
|
397
|
+
|
|
398
|
+
const response = await this.chat(prompt);
|
|
399
|
+
|
|
400
|
+
const result = {
|
|
401
|
+
success: true,
|
|
402
|
+
query,
|
|
403
|
+
sources: contents.map(c => c.path),
|
|
404
|
+
findings: response.text,
|
|
405
|
+
synthesis: false,
|
|
406
|
+
timestamp: new Date().toISOString()
|
|
407
|
+
};
|
|
408
|
+
|
|
409
|
+
this.researchResults.push(result);
|
|
410
|
+
|
|
411
|
+
// Cache the result
|
|
412
|
+
this.researchCache.set(cacheKey, {
|
|
413
|
+
result,
|
|
414
|
+
timestamp: Date.now(),
|
|
415
|
+
query,
|
|
416
|
+
modelMetadata: this.modelMetadata
|
|
417
|
+
});
|
|
418
|
+
|
|
419
|
+
return result;
|
|
420
|
+
}
|
|
421
|
+
|
|
422
|
+
getCacheKey(query, probeResult) {
|
|
423
|
+
const sources = probeResult.relevantSources.map(s => s.path || s.url).sort().join('|');
|
|
424
|
+
return `${query}::${sources}`;
|
|
425
|
+
}
|
|
426
|
+
|
|
427
|
+
updateSourceCredibility(sourcePath, wasHelpful) {
|
|
428
|
+
const current = this.sourceCredibility.get(sourcePath) || {
|
|
429
|
+
helpful: 0,
|
|
430
|
+
unhelpful: 0,
|
|
431
|
+
lastAccessed: null
|
|
432
|
+
};
|
|
433
|
+
|
|
434
|
+
if (wasHelpful) {
|
|
435
|
+
current.helpful++;
|
|
436
|
+
} else {
|
|
437
|
+
current.unhelpful++;
|
|
438
|
+
}
|
|
439
|
+
current.lastAccessed = new Date().toISOString();
|
|
440
|
+
|
|
441
|
+
this.sourceCredibility.set(sourcePath, current);
|
|
442
|
+
}
|
|
443
|
+
|
|
444
|
+
clearCache() {
|
|
445
|
+
this.researchCache.clear();
|
|
446
|
+
}
|
|
447
|
+
|
|
448
|
+
getCacheStats() {
|
|
449
|
+
return {
|
|
450
|
+
size: this.researchCache.size,
|
|
451
|
+
maxAge: this.cacheMaxAge,
|
|
452
|
+
entries: Array.from(this.researchCache.keys())
|
|
453
|
+
};
|
|
454
|
+
}
|
|
455
|
+
|
|
456
|
+
// ===== Utility Methods =====
|
|
457
|
+
|
|
458
|
+
async searchCodebase(pattern, options = {}) {
|
|
459
|
+
const results = [];
|
|
460
|
+
const cwd = options.cwd || process.cwd();
|
|
461
|
+
|
|
462
|
+
const walkDir = (dir, depth = 0) => {
|
|
463
|
+
if (depth > 4) return;
|
|
464
|
+
|
|
465
|
+
try {
|
|
466
|
+
const entries = fs.readdirSync(dir, { withFileTypes: true });
|
|
467
|
+
|
|
468
|
+
for (const entry of entries) {
|
|
469
|
+
if (entry.name.startsWith('.') || entry.name === 'node_modules') continue;
|
|
470
|
+
|
|
471
|
+
const fullPath = path.join(dir, entry.name);
|
|
472
|
+
|
|
473
|
+
if (entry.isDirectory()) {
|
|
474
|
+
walkDir(fullPath, depth + 1);
|
|
475
|
+
} else if (/\.(js|ts|jsx|tsx|md|json)$/.test(entry.name)) {
|
|
476
|
+
try {
|
|
477
|
+
const content = fs.readFileSync(fullPath, 'utf-8');
|
|
478
|
+
const regex = new RegExp(pattern, 'gi');
|
|
479
|
+
const matches = content.match(regex);
|
|
480
|
+
|
|
481
|
+
if (matches) {
|
|
482
|
+
results.push({
|
|
483
|
+
file: path.relative(cwd, fullPath),
|
|
484
|
+
matchCount: matches.length,
|
|
485
|
+
sample: matches.slice(0, 3)
|
|
486
|
+
});
|
|
487
|
+
}
|
|
488
|
+
} catch (e) {
|
|
489
|
+
// Skip unreadable files
|
|
490
|
+
}
|
|
491
|
+
}
|
|
492
|
+
}
|
|
493
|
+
} catch (error) {
|
|
494
|
+
// Skip inaccessible directories
|
|
495
|
+
}
|
|
496
|
+
};
|
|
497
|
+
|
|
498
|
+
walkDir(cwd);
|
|
499
|
+
|
|
500
|
+
return results.slice(0, 50);
|
|
501
|
+
}
|
|
502
|
+
|
|
503
|
+
reset() {
|
|
504
|
+
super.reset();
|
|
505
|
+
this.researchResults = [];
|
|
506
|
+
this.researchCache.clear();
|
|
507
|
+
this.sourceCredibility.clear();
|
|
508
|
+
}
|
|
509
|
+
}
|
|
510
|
+
|
|
511
|
+
module.exports = { ResearcherAgent };
|