@codexa/cli 9.0.24 → 9.0.26
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/commands/discover.ts +14 -1
- package/commands/patterns.ts +207 -0
- package/commands/task.ts +28 -0
- package/context/assembly.ts +9 -1
- package/context/file-writer.ts +12 -1
- package/context/generator.ts +39 -18
- package/context/sections.ts +45 -15
- package/db/schema.ts +40 -0
- package/gates/validator.ts +37 -1
- package/package.json +1 -1
- package/protocol/process-return.ts +12 -1
- package/templates/subagent-prompt-lean.md +14 -6
- package/workflow.ts +26 -0
package/commands/discover.ts
CHANGED
|
@@ -12,7 +12,7 @@ import {
|
|
|
12
12
|
type UnifiedDetectionResult,
|
|
13
13
|
} from "../detectors/loader";
|
|
14
14
|
import { CodexaError } from "../errors";
|
|
15
|
-
import { getGrepaiWorkspace } from "./patterns";
|
|
15
|
+
import { getGrepaiWorkspace, detectSemanticPatterns, saveSemanticPatterns, isGrepaiAvailable } from "./patterns";
|
|
16
16
|
|
|
17
17
|
interface StackDetection {
|
|
18
18
|
frontend?: string;
|
|
@@ -274,8 +274,21 @@ export function discoverConfirm(): void {
|
|
|
274
274
|
// Auto-setup: deep-explore agent
|
|
275
275
|
ensureDeepExploreAgent();
|
|
276
276
|
|
|
277
|
+
// v10.2: Detect semantic patterns via grepai (if available)
|
|
278
|
+
let semanticCount = 0;
|
|
279
|
+
if (isGrepaiAvailable()) {
|
|
280
|
+
const workspace = getGrepaiWorkspace() || undefined;
|
|
281
|
+
const semanticPatterns = detectSemanticPatterns(workspace);
|
|
282
|
+
if (semanticPatterns.length > 0) {
|
|
283
|
+
semanticCount = saveSemanticPatterns(semanticPatterns);
|
|
284
|
+
}
|
|
285
|
+
}
|
|
286
|
+
|
|
277
287
|
console.log("\nProjeto descoberto e configurado!");
|
|
278
288
|
console.log(`Standards criados: ${standardsCount.c}`);
|
|
289
|
+
if (semanticCount > 0) {
|
|
290
|
+
console.log(`Patterns semanticos detectados: ${semanticCount}`);
|
|
291
|
+
}
|
|
279
292
|
console.log("\nArquivo gerado: .codexa/standards.md");
|
|
280
293
|
console.log("\nProximo passo: /codexa:feature para iniciar uma feature\n");
|
|
281
294
|
}
|
package/commands/patterns.ts
CHANGED
|
@@ -372,6 +372,213 @@ export function updatePatternsIncremental(files: string[], taskNumber: number):
|
|
|
372
372
|
}
|
|
373
373
|
}
|
|
374
374
|
|
|
375
|
+
// ═══════════════════════════════════════════════════════════════
|
|
376
|
+
// v10.2: DETECÇÃO SEMÂNTICA DE PATTERNS VIA GREPAI
|
|
377
|
+
// Identifica patterns reais do projeto (error handling, data fetching,
|
|
378
|
+
// component composition, etc.) usando busca semântica.
|
|
379
|
+
// ═══════════════════════════════════════════════════════════════
|
|
380
|
+
|
|
381
|
+
const SEMANTIC_PATTERN_QUERIES = [
|
|
382
|
+
{ query: "error handling pattern", category: "error-handling", scope: "shared" },
|
|
383
|
+
{ query: "data fetching and API call pattern", category: "data-fetching", scope: "frontend" },
|
|
384
|
+
{ query: "component composition and reuse pattern", category: "component", scope: "frontend" },
|
|
385
|
+
{ query: "service layer and dependency injection", category: "service", scope: "backend" },
|
|
386
|
+
{ query: "state management pattern", category: "state", scope: "frontend" },
|
|
387
|
+
{ query: "testing patterns and test utilities", category: "test", scope: "testing" },
|
|
388
|
+
];
|
|
389
|
+
|
|
390
|
+
export interface SemanticPattern {
|
|
391
|
+
name: string;
|
|
392
|
+
category: string;
|
|
393
|
+
scope: string;
|
|
394
|
+
files: string[];
|
|
395
|
+
confidence: number;
|
|
396
|
+
description: string;
|
|
397
|
+
}
|
|
398
|
+
|
|
399
|
+
export function detectSemanticPatterns(workspace?: string): SemanticPattern[] {
|
|
400
|
+
if (!isGrepaiAvailable()) return [];
|
|
401
|
+
|
|
402
|
+
const ws = workspace || getGrepaiWorkspace() || undefined;
|
|
403
|
+
const detected: SemanticPattern[] = [];
|
|
404
|
+
|
|
405
|
+
for (const { query, category, scope } of SEMANTIC_PATTERN_QUERIES) {
|
|
406
|
+
const results = searchWithGrepai(query, 5, ws);
|
|
407
|
+
if (results.length < 2) continue;
|
|
408
|
+
|
|
409
|
+
const avgScore = results.reduce((sum, r) => sum + r.score, 0) / results.length;
|
|
410
|
+
if (avgScore < 0.3) continue;
|
|
411
|
+
|
|
412
|
+
detected.push({
|
|
413
|
+
name: `${category}-pattern`,
|
|
414
|
+
category,
|
|
415
|
+
scope,
|
|
416
|
+
files: results.map(r => r.path),
|
|
417
|
+
confidence: Math.min(avgScore, 1),
|
|
418
|
+
description: `Semantic pattern detected: ${query} (${results.length} files, avg score ${avgScore.toFixed(2)})`,
|
|
419
|
+
});
|
|
420
|
+
}
|
|
421
|
+
|
|
422
|
+
return detected;
|
|
423
|
+
}
|
|
424
|
+
|
|
425
|
+
export function saveSemanticPatterns(patterns: SemanticPattern[]): number {
|
|
426
|
+
initSchema();
|
|
427
|
+
const db = getDb();
|
|
428
|
+
const now = new Date().toISOString();
|
|
429
|
+
let saved = 0;
|
|
430
|
+
|
|
431
|
+
for (const p of patterns) {
|
|
432
|
+
try {
|
|
433
|
+
const existing = db.query(
|
|
434
|
+
"SELECT id FROM implementation_patterns WHERE name = ?"
|
|
435
|
+
).get(p.name) as any;
|
|
436
|
+
|
|
437
|
+
if (existing) {
|
|
438
|
+
db.run(
|
|
439
|
+
`UPDATE implementation_patterns SET
|
|
440
|
+
confidence = ?, examples = ?, extracted_from = ?, updated_at = ?
|
|
441
|
+
WHERE id = ?`,
|
|
442
|
+
[p.confidence, JSON.stringify(p.files.map(f => ({ path: f, relevance: p.confidence }))), p.files.length, now, existing.id]
|
|
443
|
+
);
|
|
444
|
+
} else {
|
|
445
|
+
db.run(
|
|
446
|
+
`INSERT INTO implementation_patterns
|
|
447
|
+
(category, name, scope, applies_to, structure, template, examples, confidence, extracted_from, created_at, updated_at)
|
|
448
|
+
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
|
449
|
+
[
|
|
450
|
+
p.category, p.name, p.scope,
|
|
451
|
+
`**/*`,
|
|
452
|
+
JSON.stringify({ source: "semantic", description: p.description }),
|
|
453
|
+
`[Semantic pattern - see examples for reference files]`,
|
|
454
|
+
JSON.stringify(p.files.map(f => ({ path: f, relevance: p.confidence }))),
|
|
455
|
+
p.confidence, p.files.length, now, now,
|
|
456
|
+
]
|
|
457
|
+
);
|
|
458
|
+
}
|
|
459
|
+
saved++;
|
|
460
|
+
} catch { /* skip duplicates */ }
|
|
461
|
+
}
|
|
462
|
+
|
|
463
|
+
return saved;
|
|
464
|
+
}
|
|
465
|
+
|
|
466
|
+
// ═══════════════════════════════════════════════════════════════
|
|
467
|
+
// v10.2: PATTERN COMPLIANCE VALIDATION (used by Gate 4.7)
|
|
468
|
+
// ═══════════════════════════════════════════════════════════════
|
|
469
|
+
|
|
470
|
+
export interface PatternComplianceResult {
|
|
471
|
+
passed: boolean;
|
|
472
|
+
violations: Array<{ file: string; pattern: string; reason: string }>;
|
|
473
|
+
checked: number;
|
|
474
|
+
}
|
|
475
|
+
|
|
476
|
+
export function validatePatternCompliance(
|
|
477
|
+
files: string[],
|
|
478
|
+
minConfidence: number = 0.7
|
|
479
|
+
): PatternComplianceResult {
|
|
480
|
+
initSchema();
|
|
481
|
+
const db = getDb();
|
|
482
|
+
const result: PatternComplianceResult = { passed: true, violations: [], checked: 0 };
|
|
483
|
+
|
|
484
|
+
if (files.length === 0) return result;
|
|
485
|
+
|
|
486
|
+
const allPatterns = db.query(
|
|
487
|
+
"SELECT * FROM implementation_patterns WHERE confidence >= ?"
|
|
488
|
+
).all(minConfidence) as any[];
|
|
489
|
+
|
|
490
|
+
if (allPatterns.length === 0) return result;
|
|
491
|
+
|
|
492
|
+
for (const file of files) {
|
|
493
|
+
if (!existsSync(file)) continue;
|
|
494
|
+
|
|
495
|
+
const matchingPatterns = allPatterns.filter(p => fileMatchesGlob(file, p.applies_to));
|
|
496
|
+
if (matchingPatterns.length === 0) continue;
|
|
497
|
+
|
|
498
|
+
const analysis = analyzeFile(file);
|
|
499
|
+
if (!analysis) continue;
|
|
500
|
+
|
|
501
|
+
result.checked++;
|
|
502
|
+
|
|
503
|
+
for (const pattern of matchingPatterns) {
|
|
504
|
+
let structure: any;
|
|
505
|
+
try { structure = JSON.parse(pattern.structure); } catch { continue; }
|
|
506
|
+
|
|
507
|
+
// Semantic patterns (source: "semantic") — use grepai if available
|
|
508
|
+
if (structure.source === "semantic") {
|
|
509
|
+
if (isGrepaiAvailable()) {
|
|
510
|
+
const ws = getGrepaiWorkspace() || undefined;
|
|
511
|
+
const grepaiResults = searchWithGrepai(
|
|
512
|
+
`file:${file} follows ${pattern.category} pattern`, 1, ws
|
|
513
|
+
);
|
|
514
|
+
if (grepaiResults.length > 0 && grepaiResults[0].score < 0.4) {
|
|
515
|
+
result.violations.push({
|
|
516
|
+
file,
|
|
517
|
+
pattern: pattern.name,
|
|
518
|
+
reason: `Low semantic match (${grepaiResults[0].score.toFixed(2)}) for ${pattern.category} pattern`,
|
|
519
|
+
});
|
|
520
|
+
}
|
|
521
|
+
}
|
|
522
|
+
continue;
|
|
523
|
+
}
|
|
524
|
+
|
|
525
|
+
// Structural patterns — check imports, hooks, conventions
|
|
526
|
+
if (structure.requiredImports) {
|
|
527
|
+
for (const imp of structure.requiredImports) {
|
|
528
|
+
if (!analysis.imports.includes(imp)) {
|
|
529
|
+
result.violations.push({
|
|
530
|
+
file,
|
|
531
|
+
pattern: pattern.name,
|
|
532
|
+
reason: `Missing required import: ${imp}`,
|
|
533
|
+
});
|
|
534
|
+
}
|
|
535
|
+
}
|
|
536
|
+
}
|
|
537
|
+
|
|
538
|
+
if (structure.requiredExports) {
|
|
539
|
+
for (const exp of structure.requiredExports) {
|
|
540
|
+
if (!analysis.exports.includes(exp)) {
|
|
541
|
+
result.violations.push({
|
|
542
|
+
file,
|
|
543
|
+
pattern: pattern.name,
|
|
544
|
+
reason: `Missing required export: ${exp}`,
|
|
545
|
+
});
|
|
546
|
+
}
|
|
547
|
+
}
|
|
548
|
+
}
|
|
549
|
+
|
|
550
|
+
if (structure.requiredDirectives) {
|
|
551
|
+
for (const dir of structure.requiredDirectives) {
|
|
552
|
+
if (!analysis.directives.includes(dir)) {
|
|
553
|
+
result.violations.push({
|
|
554
|
+
file,
|
|
555
|
+
pattern: pattern.name,
|
|
556
|
+
reason: `Missing required directive: "${dir}"`,
|
|
557
|
+
});
|
|
558
|
+
}
|
|
559
|
+
}
|
|
560
|
+
}
|
|
561
|
+
|
|
562
|
+
if (structure.commonHooks) {
|
|
563
|
+
const requiredHookCount = Math.ceil(structure.commonHooks.length * 0.3);
|
|
564
|
+
const matchedHooks = structure.commonHooks.filter(
|
|
565
|
+
(h: string) => analysis.hooksUsed.includes(h)
|
|
566
|
+
);
|
|
567
|
+
if (matchedHooks.length < requiredHookCount && analysis.hooksUsed.length > 0) {
|
|
568
|
+
result.violations.push({
|
|
569
|
+
file,
|
|
570
|
+
pattern: pattern.name,
|
|
571
|
+
reason: `Hook pattern divergence: uses ${analysis.hooksUsed.join(",")} but pattern expects ${structure.commonHooks.join(",")}`,
|
|
572
|
+
});
|
|
573
|
+
}
|
|
574
|
+
}
|
|
575
|
+
}
|
|
576
|
+
}
|
|
577
|
+
|
|
578
|
+
result.passed = result.violations.length === 0;
|
|
579
|
+
return result;
|
|
580
|
+
}
|
|
581
|
+
|
|
375
582
|
function fileMatchesGlob(filePath: string, glob: string): boolean {
|
|
376
583
|
const normalized = filePath.replace(/\\/g, "/");
|
|
377
584
|
const regexStr = glob
|
package/commands/task.ts
CHANGED
|
@@ -12,6 +12,7 @@ import { getAgentDomain, domainToScope } from "../context/domains";
|
|
|
12
12
|
import { resolveAgent } from "../context/agent-registry";
|
|
13
13
|
import { loadAgentExpertise, getAgentDescription } from "../context/agent-expertise";
|
|
14
14
|
import { generateContextFile } from "../context/generator";
|
|
15
|
+
import { getFullContextFilePath } from "../context/file-writer";
|
|
15
16
|
import { getModelForTask } from "../context/model-profiles";
|
|
16
17
|
import { getContextBudget, formatContextWarning, estimateTokens } from "../context/monitor";
|
|
17
18
|
import { invalidateCache } from "../context/cache";
|
|
@@ -252,6 +253,19 @@ export function taskStart(ids: string, json: boolean = false, minimalContext: bo
|
|
|
252
253
|
contextSummary = getMinimalContextForSubagent(task.id);
|
|
253
254
|
}
|
|
254
255
|
|
|
256
|
+
// v10.2: Full context file path (generated alongside lean file)
|
|
257
|
+
const fullContextFile = useFileContext ? getFullContextFilePath(task.id) : "";
|
|
258
|
+
|
|
259
|
+
// v10.2: Build pending knowledge section for template
|
|
260
|
+
const pendingKnowledgeForPrompt = useFileContext
|
|
261
|
+
? (() => {
|
|
262
|
+
const unread = getUnreadKnowledgeForTask(spec.id, task.id);
|
|
263
|
+
const critical = unread.filter((k: any) => k.severity === 'critical' && k.task_origin !== task.id);
|
|
264
|
+
if (critical.length === 0) return "";
|
|
265
|
+
return `## KNOWLEDGE PENDENTE (OBRIGATORIO)\n\nVoce DEVE reconhecer estes items antes de completar a task:\n${critical.map((k: any) => `- [ID ${k.id}] ${k.content}`).join("\n")}\n\nUse: \`codexa knowledge ack <id>\` para cada item.`;
|
|
266
|
+
})()
|
|
267
|
+
: "";
|
|
268
|
+
|
|
255
269
|
// v10.0: Pre-built lean prompt for subagent (orchestrator just passes it)
|
|
256
270
|
const subagentPrompt = useFileContext
|
|
257
271
|
? loadTemplate("subagent-prompt-lean", {
|
|
@@ -260,6 +274,8 @@ export function taskStart(ids: string, json: boolean = false, minimalContext: bo
|
|
|
260
274
|
taskDescription: task.checkpoint || task.name,
|
|
261
275
|
contextSummary: contextSummary!,
|
|
262
276
|
contextFile: contextFile || "",
|
|
277
|
+
fullContextFile,
|
|
278
|
+
pendingKnowledge: pendingKnowledgeForPrompt,
|
|
263
279
|
agentIdentity,
|
|
264
280
|
agentExpertise: agentExpertise
|
|
265
281
|
? `\n## EXPERTISE DO AGENTE\n\n${agentExpertise}\n`
|
|
@@ -304,9 +320,21 @@ export function taskStart(ids: string, json: boolean = false, minimalContext: bo
|
|
|
304
320
|
if (useFileContext) {
|
|
305
321
|
// v10.0: File-based context (new default)
|
|
306
322
|
output.contextFile = contextFile;
|
|
323
|
+
output.fullContextFile = fullContextFile;
|
|
307
324
|
output.contextSummary = contextSummary!;
|
|
308
325
|
output.contextMode = "file";
|
|
309
326
|
output.subagentPrompt = subagentPrompt;
|
|
327
|
+
|
|
328
|
+
// v10.2: Fix — unreadKnowledge must be available in ALL modes
|
|
329
|
+
// Without this, file-mode subagents can't acknowledge critical knowledge,
|
|
330
|
+
// causing taskDone() to block with no way for the subagent to comply
|
|
331
|
+
const unreadKnowledge = getUnreadKnowledgeForTask(spec.id, task.id);
|
|
332
|
+
if (unreadKnowledge.length > 0) {
|
|
333
|
+
output.unreadKnowledge = unreadKnowledge.map((k: any) => ({
|
|
334
|
+
id: k.id, category: k.category, content: k.content,
|
|
335
|
+
severity: k.severity, origin_task: k.task_origin,
|
|
336
|
+
}));
|
|
337
|
+
}
|
|
310
338
|
} else if (inlineContext) {
|
|
311
339
|
// Backward compat: full inline
|
|
312
340
|
output.context = contextText;
|
package/context/assembly.ts
CHANGED
|
@@ -29,6 +29,7 @@ export interface ContextData {
|
|
|
29
29
|
libContexts: any[];
|
|
30
30
|
graphDecisions: any[];
|
|
31
31
|
discoveredPatterns: any[];
|
|
32
|
+
fullMode?: boolean;
|
|
32
33
|
}
|
|
33
34
|
|
|
34
35
|
const RETURN_PROTOCOL = `
|
|
@@ -38,7 +39,9 @@ const RETURN_PROTOCOL = `
|
|
|
38
39
|
\`\`\`
|
|
39
40
|
`;
|
|
40
41
|
|
|
41
|
-
export
|
|
42
|
+
export type AssemblyMode = "lean" | "full";
|
|
43
|
+
|
|
44
|
+
export function assembleSections(header: string, sections: ContextSection[], mode: AssemblyMode = "lean"): string {
|
|
42
45
|
// Sort by priority (lower = higher priority, kept during truncation)
|
|
43
46
|
const sorted = [...sections].sort((a, b) => a.priority - b.priority);
|
|
44
47
|
|
|
@@ -49,6 +52,11 @@ export function assembleSections(header: string, sections: ContextSection[]): st
|
|
|
49
52
|
|
|
50
53
|
output += RETURN_PROTOCOL;
|
|
51
54
|
|
|
55
|
+
// v10.2: Full mode — no truncation, no caps. Used for the full reference file.
|
|
56
|
+
if (mode === "full") {
|
|
57
|
+
return output;
|
|
58
|
+
}
|
|
59
|
+
|
|
52
60
|
// v10.0: Progressive truncation — halve sections before dropping them entirely
|
|
53
61
|
if (output.length > MAX_CONTEXT_SIZE) {
|
|
54
62
|
return truncateWithBudget(header, sorted, MAX_CONTEXT_SIZE);
|
package/context/file-writer.ts
CHANGED
|
@@ -25,10 +25,21 @@ export function writeContextFile(taskId: number, content: string): string {
|
|
|
25
25
|
return resolve(filePath);
|
|
26
26
|
}
|
|
27
27
|
|
|
28
|
+
export function writeFullContextFile(taskId: number, content: string): string {
|
|
29
|
+
const dir = ensureContextDir();
|
|
30
|
+
const filePath = join(dir, `task-${taskId}-full.md`);
|
|
31
|
+
Bun.write(filePath, content);
|
|
32
|
+
return resolve(filePath);
|
|
33
|
+
}
|
|
34
|
+
|
|
28
35
|
export function getContextFilePath(taskId: number): string {
|
|
29
36
|
return resolve(process.cwd(), CONTEXT_DIR, `task-${taskId}.md`);
|
|
30
37
|
}
|
|
31
38
|
|
|
39
|
+
export function getFullContextFilePath(taskId: number): string {
|
|
40
|
+
return resolve(process.cwd(), CONTEXT_DIR, `task-${taskId}-full.md`);
|
|
41
|
+
}
|
|
42
|
+
|
|
32
43
|
export function cleanupContextFiles(specId: string): void {
|
|
33
44
|
const dir = resolve(process.cwd(), CONTEXT_DIR);
|
|
34
45
|
if (!existsSync(dir)) return;
|
|
@@ -37,7 +48,7 @@ export function cleanupContextFiles(specId: string): void {
|
|
|
37
48
|
const { readdirSync, unlinkSync } = require("fs");
|
|
38
49
|
const files = readdirSync(dir) as string[];
|
|
39
50
|
for (const file of files) {
|
|
40
|
-
if (file.startsWith("task-") && file.endsWith(".md")) {
|
|
51
|
+
if ((file.startsWith("task-") || file.startsWith("simplify-")) && file.endsWith(".md")) {
|
|
41
52
|
unlinkSync(join(dir, file));
|
|
42
53
|
}
|
|
43
54
|
}
|
package/context/generator.ts
CHANGED
|
@@ -2,11 +2,11 @@ import { getDb } from "../db/connection";
|
|
|
2
2
|
import { initSchema, getPatternsForFiles, getRelatedDecisions, getArchitecturalAnalysisForSpec } from "../db/schema";
|
|
3
3
|
import { getKnowledgeForTask } from "../commands/knowledge";
|
|
4
4
|
import type { ContextSection, ContextData } from "./assembly";
|
|
5
|
-
import { assembleSections } from "./assembly";
|
|
5
|
+
import { assembleSections, type AssemblyMode } from "./assembly";
|
|
6
6
|
import { getAgentDomain, adjustSectionPriorities, domainToScope } from "./domains";
|
|
7
7
|
import { filterRelevantDecisions, filterRelevantStandards } from "./scoring";
|
|
8
8
|
import { computeContextHash, getCachedContextPath, setCachedContext } from "./cache";
|
|
9
|
-
import { writeContextFile } from "./file-writer";
|
|
9
|
+
import { writeContextFile, writeFullContextFile } from "./file-writer";
|
|
10
10
|
import {
|
|
11
11
|
buildProductSection,
|
|
12
12
|
buildArchitectureSection,
|
|
@@ -114,7 +114,7 @@ export function getMinimalContextForSubagent(taskId: number): string {
|
|
|
114
114
|
// CONTEXT BUILDER (v9.0 — decomposed from v8.1 monolith)
|
|
115
115
|
// ═══════════════════════════════════════════════════════════════
|
|
116
116
|
|
|
117
|
-
function fetchContextData(taskId: number): ContextData | null {
|
|
117
|
+
function fetchContextData(taskId: number, fullMode: boolean = false): ContextData | null {
|
|
118
118
|
const db = getDb();
|
|
119
119
|
|
|
120
120
|
const task = db.query("SELECT * FROM tasks WHERE id = ?").get(taskId) as any;
|
|
@@ -131,11 +131,12 @@ function fetchContextData(taskId: number): ContextData | null {
|
|
|
131
131
|
const taskFiles = task.files ? JSON.parse(task.files) : [];
|
|
132
132
|
const domain = domainToScope(getAgentDomain(task.agent));
|
|
133
133
|
|
|
134
|
-
// Decisoes relevantes (
|
|
134
|
+
// Decisoes relevantes (v10.2: fullMode removes limits)
|
|
135
|
+
const decisionLimit = fullMode ? 200 : 30;
|
|
135
136
|
const allDecisions = db
|
|
136
|
-
.query(
|
|
137
|
-
.all(task.spec_id) as any[];
|
|
138
|
-
const decisions = filterRelevantDecisions(allDecisions, taskFiles, 8);
|
|
137
|
+
.query(`SELECT * FROM decisions WHERE spec_id = ? AND status = 'active' ORDER BY created_at DESC LIMIT ?`)
|
|
138
|
+
.all(task.spec_id, decisionLimit) as any[];
|
|
139
|
+
const decisions = fullMode ? allDecisions : filterRelevantDecisions(allDecisions, taskFiles, 8);
|
|
139
140
|
|
|
140
141
|
// Standards required + recommended que se aplicam aos arquivos
|
|
141
142
|
const standards = db
|
|
@@ -147,13 +148,13 @@ function fetchContextData(taskId: number): ContextData | null {
|
|
|
147
148
|
.all(domain) as any[];
|
|
148
149
|
const relevantStandards = filterRelevantStandards(standards, taskFiles);
|
|
149
150
|
|
|
150
|
-
// Knowledge com caps e indicadores de truncamento
|
|
151
|
+
// Knowledge com caps e indicadores de truncamento (v10.2: fullMode removes caps)
|
|
151
152
|
const allKnowledge = getKnowledgeForTask(task.spec_id, taskId);
|
|
152
153
|
const allCriticalKnowledge = allKnowledge.filter((k: any) => k.severity === 'critical' || k.severity === 'warning');
|
|
153
|
-
const criticalKnowledge = allCriticalKnowledge.slice(0, 20);
|
|
154
|
+
const criticalKnowledge = fullMode ? allCriticalKnowledge : allCriticalKnowledge.slice(0, 20);
|
|
154
155
|
const truncatedCritical = allCriticalKnowledge.length - criticalKnowledge.length;
|
|
155
156
|
const allInfoKnowledge = allKnowledge.filter((k: any) => k.severity === 'info');
|
|
156
|
-
const infoKnowledge = allInfoKnowledge.slice(0, 10);
|
|
157
|
+
const infoKnowledge = fullMode ? allInfoKnowledge : allInfoKnowledge.slice(0, 10);
|
|
157
158
|
const truncatedInfo = allInfoKnowledge.length - infoKnowledge.length;
|
|
158
159
|
|
|
159
160
|
const productContext = db.query("SELECT * FROM product_context WHERE id = 'default'").get() as any;
|
|
@@ -239,10 +240,14 @@ function fetchContextData(taskId: number): ContextData | null {
|
|
|
239
240
|
|
|
240
241
|
// ── Main Entry Point ──────────────────────────────────────────
|
|
241
242
|
|
|
242
|
-
function
|
|
243
|
-
const
|
|
243
|
+
function buildContext(taskId: number, mode: AssemblyMode = "lean"): { content: string; data: ContextData } | null {
|
|
244
|
+
const isFullMode = mode === "full";
|
|
245
|
+
const data = fetchContextData(taskId, isFullMode);
|
|
244
246
|
if (!data) return null;
|
|
245
247
|
|
|
248
|
+
// Pass fullMode flag so section builders remove caps
|
|
249
|
+
data.fullMode = isFullMode;
|
|
250
|
+
|
|
246
251
|
const header = `## CONTEXTO (Task #${data.task.number})
|
|
247
252
|
|
|
248
253
|
**Feature:** ${data.spec.name}
|
|
@@ -269,7 +274,12 @@ function buildFullContext(taskId: number): { content: string; data: ContextData
|
|
|
269
274
|
const agentDomain = getAgentDomain(data.task.agent);
|
|
270
275
|
const sections = adjustSectionPriorities(allSections, agentDomain);
|
|
271
276
|
|
|
272
|
-
return { content: assembleSections(header, sections), data };
|
|
277
|
+
return { content: assembleSections(header, sections, mode), data };
|
|
278
|
+
}
|
|
279
|
+
|
|
280
|
+
// Backward compat wrapper
|
|
281
|
+
function buildFullContext(taskId: number): { content: string; data: ContextData } | null {
|
|
282
|
+
return buildContext(taskId, "lean");
|
|
273
283
|
}
|
|
274
284
|
|
|
275
285
|
export function getContextForSubagent(taskId: number): string {
|
|
@@ -280,6 +290,11 @@ export function getContextForSubagent(taskId: number): string {
|
|
|
280
290
|
|
|
281
291
|
// ── v10.0: File-Based Context ─────────────────────────────────
|
|
282
292
|
|
|
293
|
+
export interface GeneratedContextFiles {
|
|
294
|
+
leanPath: string;
|
|
295
|
+
fullPath: string;
|
|
296
|
+
}
|
|
297
|
+
|
|
283
298
|
export function generateContextFile(taskId: number): string {
|
|
284
299
|
initSchema();
|
|
285
300
|
|
|
@@ -292,12 +307,18 @@ export function generateContextFile(taskId: number): string {
|
|
|
292
307
|
const cached = getCachedContextPath(hash);
|
|
293
308
|
if (cached) return cached;
|
|
294
309
|
|
|
295
|
-
// Generate
|
|
296
|
-
const
|
|
297
|
-
if (!
|
|
310
|
+
// Generate lean context (16KB max, truncated)
|
|
311
|
+
const leanResult = buildFullContext(taskId);
|
|
312
|
+
if (!leanResult) return "";
|
|
313
|
+
|
|
314
|
+
// v10.2: Generate full context (no truncation, no caps)
|
|
315
|
+
const fullResult = buildContext(taskId, "full");
|
|
316
|
+
if (fullResult) {
|
|
317
|
+
writeFullContextFile(taskId, fullResult.content);
|
|
318
|
+
}
|
|
298
319
|
|
|
299
|
-
// Write
|
|
300
|
-
const filePath = writeContextFile(taskId,
|
|
320
|
+
// Write lean file and cache
|
|
321
|
+
const filePath = writeContextFile(taskId, leanResult.content);
|
|
301
322
|
setCachedContext(hash, filePath, task.spec_id);
|
|
302
323
|
|
|
303
324
|
return filePath;
|
package/context/sections.ts
CHANGED
|
@@ -8,21 +8,43 @@ import { findReferenceFiles } from "./references";
|
|
|
8
8
|
export function buildProductSection(data: ContextData): ContextSection | null {
|
|
9
9
|
if (!data.productContext) return null;
|
|
10
10
|
|
|
11
|
+
const p = data.productContext;
|
|
11
12
|
let content = `
|
|
12
13
|
### PRODUTO
|
|
13
|
-
- **Problema:** ${
|
|
14
|
-
|
|
15
|
-
if (
|
|
14
|
+
- **Problema:** ${p.problem || "N/A"}`;
|
|
15
|
+
|
|
16
|
+
if (p.solution) {
|
|
17
|
+
content += `\n- **Solucao:** ${p.solution}`;
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
content += `\n- **Usuarios:** ${p.target_users || "N/A"}`;
|
|
21
|
+
|
|
22
|
+
if (p.value_proposition) {
|
|
23
|
+
content += `\n- **Proposta de Valor:** ${p.value_proposition}`;
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
if (p.success_metrics) {
|
|
27
|
+
content += `\n- **Metricas de Sucesso:** ${p.success_metrics}`;
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
if (p.constraints) {
|
|
16
31
|
try {
|
|
17
|
-
const constraints = JSON.parse(
|
|
32
|
+
const constraints = JSON.parse(p.constraints);
|
|
18
33
|
if (constraints.length > 0) {
|
|
19
|
-
content += `\n- **Restricoes:** ${constraints.
|
|
34
|
+
content += `\n- **Restricoes:** ${constraints.join("; ")}`;
|
|
20
35
|
}
|
|
21
36
|
} catch { /* ignore parse errors */ }
|
|
22
37
|
}
|
|
38
|
+
|
|
39
|
+
if (p.out_of_scope) {
|
|
40
|
+
content += `\n- **Fora de Escopo:** ${p.out_of_scope}`;
|
|
41
|
+
}
|
|
42
|
+
|
|
23
43
|
content += "\n";
|
|
24
44
|
|
|
25
|
-
|
|
45
|
+
// v10.2: Product context is small (~500-2000 bytes) but high-impact.
|
|
46
|
+
// Priority 1 ensures it is NEVER truncated.
|
|
47
|
+
return { name: "PRODUTO", content, priority: 1 };
|
|
26
48
|
}
|
|
27
49
|
|
|
28
50
|
export function buildArchitectureSection(data: ContextData): ContextSection | null {
|
|
@@ -92,10 +114,12 @@ export function buildStandardsSection(data: ContextData): ContextSection {
|
|
|
92
114
|
}
|
|
93
115
|
|
|
94
116
|
export function buildDecisionsSection(data: ContextData): ContextSection {
|
|
95
|
-
|
|
117
|
+
// v10.2: In full mode, show ALL decisions (no cap at 8)
|
|
118
|
+
const decisionsToShow = data.fullMode ? data.allDecisions : data.decisions;
|
|
119
|
+
const truncatedDecisions = data.allDecisions.length - decisionsToShow.length;
|
|
96
120
|
const content = `
|
|
97
|
-
### DECISOES (${
|
|
98
|
-
${
|
|
121
|
+
### DECISOES (${decisionsToShow.length}${truncatedDecisions > 0 ? ` [+${truncatedDecisions} mais - use: decisions list]` : ''})
|
|
122
|
+
${decisionsToShow.length > 0 ? decisionsToShow.map((d: any) => `- **${d.title}**: ${d.decision}`).join("\n") : "Nenhuma"}
|
|
99
123
|
`;
|
|
100
124
|
return { name: "DECISOES", content, priority: 4 };
|
|
101
125
|
}
|
|
@@ -146,10 +170,12 @@ ${data.patterns.map((p: any) => {
|
|
|
146
170
|
}
|
|
147
171
|
|
|
148
172
|
if (data.discoveredPatterns.length > 0) {
|
|
149
|
-
|
|
173
|
+
// v10.2: Full mode shows ALL discovered patterns
|
|
174
|
+
const patternsToShow = data.fullMode ? data.discoveredPatterns : data.discoveredPatterns.slice(-10);
|
|
175
|
+
const truncated = data.discoveredPatterns.length - patternsToShow.length;
|
|
150
176
|
content += `
|
|
151
177
|
### PATTERNS DESCOBERTOS (${data.discoveredPatterns.length})
|
|
152
|
-
${
|
|
178
|
+
${truncated > 0 ? `[mostrando ultimos 10 de ${data.discoveredPatterns.length}]\n` : ''}${patternsToShow.map((p: any) => `- ${p.pattern}${p.source_task ? ` (Task #${p.source_task})` : ""}`).join("\n")}
|
|
153
179
|
`;
|
|
154
180
|
}
|
|
155
181
|
|
|
@@ -165,7 +191,9 @@ export function buildUtilitiesSection(data: ContextData): ContextSection | null
|
|
|
165
191
|
}).filter(Boolean))];
|
|
166
192
|
|
|
167
193
|
const agentScope = domainToScope(getAgentDomain(data.task.agent)) || undefined;
|
|
168
|
-
|
|
194
|
+
// v10.2: Full mode fetches all utilities
|
|
195
|
+
const utilLimit = data.fullMode ? 200 : 15;
|
|
196
|
+
let relevantUtilities = getUtilitiesForContext(taskDirs, undefined, utilLimit);
|
|
169
197
|
|
|
170
198
|
if (relevantUtilities.length < 5 && agentScope) {
|
|
171
199
|
const scopeUtils = getUtilitiesForContext([], agentScope, 15);
|
|
@@ -174,7 +202,7 @@ export function buildUtilitiesSection(data: ContextData): ContextSection | null
|
|
|
174
202
|
if (!existingKeys.has(`${u.file_path}:${u.utility_name}`)) {
|
|
175
203
|
relevantUtilities.push(u);
|
|
176
204
|
}
|
|
177
|
-
if (relevantUtilities.length >=
|
|
205
|
+
if (relevantUtilities.length >= utilLimit) break;
|
|
178
206
|
}
|
|
179
207
|
}
|
|
180
208
|
|
|
@@ -215,10 +243,12 @@ export function buildStackSection(data: ContextData): ContextSection | null {
|
|
|
215
243
|
if (data.project) {
|
|
216
244
|
const stack = JSON.parse(data.project.stack);
|
|
217
245
|
const allStackEntries = Object.entries(stack);
|
|
218
|
-
|
|
246
|
+
// v10.2: Full mode shows all stack entries
|
|
247
|
+
const mainStack = data.fullMode ? allStackEntries : allStackEntries.slice(0, 6);
|
|
248
|
+
const truncatedStack = allStackEntries.length - mainStack.length;
|
|
219
249
|
content += `
|
|
220
250
|
### STACK
|
|
221
|
-
${mainStack.map(([k, v]) => `${k}: ${v}`).join(" | ")}${
|
|
251
|
+
${mainStack.map(([k, v]) => `${k}: ${v}`).join(" | ")}${truncatedStack > 0 ? ` [+${truncatedStack} mais]` : ''}
|
|
222
252
|
`;
|
|
223
253
|
}
|
|
224
254
|
|
package/db/schema.ts
CHANGED
|
@@ -544,6 +544,46 @@ export function runMigrations(): void {
|
|
|
544
544
|
// Exportar MIGRATIONS para testes
|
|
545
545
|
export { MIGRATIONS };
|
|
546
546
|
|
|
547
|
+
// ═══════════════════════════════════════════════════════════════
|
|
548
|
+
// v10.2: Transacao Atomica Helper
|
|
549
|
+
// Wrapa qualquer operacao em BEGIN IMMEDIATE / COMMIT com
|
|
550
|
+
// rollback automatico em caso de erro.
|
|
551
|
+
// BEGIN IMMEDIATE garante lock exclusivo para escrita desde o
|
|
552
|
+
// inicio, evitando deadlocks em WAL mode.
|
|
553
|
+
// ═══════════════════════════════════════════════════════════════
|
|
554
|
+
|
|
555
|
+
const MAX_BUSY_RETRIES = 3;
|
|
556
|
+
const BUSY_RETRY_BASE_MS = 50;
|
|
557
|
+
|
|
558
|
+
export function runInTransaction<T>(fn: () => T): T {
|
|
559
|
+
const db = getDb();
|
|
560
|
+
let retries = MAX_BUSY_RETRIES;
|
|
561
|
+
|
|
562
|
+
while (retries > 0) {
|
|
563
|
+
try {
|
|
564
|
+
db.exec("BEGIN IMMEDIATE");
|
|
565
|
+
break;
|
|
566
|
+
} catch (e: any) {
|
|
567
|
+
if (e.message?.includes("SQLITE_BUSY") && retries > 1) {
|
|
568
|
+
retries--;
|
|
569
|
+
const delay = BUSY_RETRY_BASE_MS * (MAX_BUSY_RETRIES - retries);
|
|
570
|
+
Bun.sleepSync(delay);
|
|
571
|
+
continue;
|
|
572
|
+
}
|
|
573
|
+
throw e;
|
|
574
|
+
}
|
|
575
|
+
}
|
|
576
|
+
|
|
577
|
+
try {
|
|
578
|
+
const result = fn();
|
|
579
|
+
db.exec("COMMIT");
|
|
580
|
+
return result;
|
|
581
|
+
} catch (e) {
|
|
582
|
+
try { db.exec("ROLLBACK"); } catch { /* already rolled back */ }
|
|
583
|
+
throw e;
|
|
584
|
+
}
|
|
585
|
+
}
|
|
586
|
+
|
|
547
587
|
// Gera proximo ID de decisao para um spec
|
|
548
588
|
// Usa timestamp + random hash para eliminar race conditions entre tasks paralelas
|
|
549
589
|
export function getNextDecisionId(specId: string): string {
|
package/gates/validator.ts
CHANGED
|
@@ -3,7 +3,7 @@ import { existsSync, readFileSync, statSync } from "fs";
|
|
|
3
3
|
import { extname } from "path";
|
|
4
4
|
import { validateAgainstStandards, printValidationResult } from "./standards-validator";
|
|
5
5
|
import { runTypecheck, printTypecheckResult } from "./typecheck-validator";
|
|
6
|
-
import { extractUtilitiesFromFile, inferScopeFromPath } from "../commands/patterns";
|
|
6
|
+
import { extractUtilitiesFromFile, inferScopeFromPath, validatePatternCompliance } from "../commands/patterns";
|
|
7
7
|
import { findDuplicateUtilities } from "../db/schema";
|
|
8
8
|
import { GateError, RecoverySuggestion } from "../errors";
|
|
9
9
|
import { resolveSpecOrNull } from "../commands/spec-resolver";
|
|
@@ -86,6 +86,11 @@ const GATES: Record<string, GateCheck[]> = {
|
|
|
86
86
|
message: "Duplicacao de utilities detectada (DRY)",
|
|
87
87
|
resolution: "Importe do arquivo existente ou use --force --force-reason para bypass",
|
|
88
88
|
},
|
|
89
|
+
{
|
|
90
|
+
check: "pattern-compliance",
|
|
91
|
+
message: "Codigo diverge dos patterns do projeto",
|
|
92
|
+
resolution: "Siga os patterns detectados ou use --force --force-reason para bypass",
|
|
93
|
+
},
|
|
89
94
|
{
|
|
90
95
|
check: "reasoning-provided",
|
|
91
96
|
message: "Reasoning obrigatorio ausente no retorno do subagent",
|
|
@@ -171,6 +176,15 @@ const RECOVERY_STRATEGIES: Record<string, (details?: string) => RecoverySuggesti
|
|
|
171
176
|
],
|
|
172
177
|
command: "codexa decisions",
|
|
173
178
|
}),
|
|
179
|
+
"pattern-compliance": (details) => ({
|
|
180
|
+
diagnostic: `Divergencias de pattern detectadas:\n${details || "Detalhes nao disponiveis"}`,
|
|
181
|
+
steps: [
|
|
182
|
+
"Revise os patterns com: codexa context detail patterns",
|
|
183
|
+
"Ajuste o codigo para seguir os patterns do projeto",
|
|
184
|
+
"Ou use --force --force-reason 'motivo' para bypass (auditado no review)",
|
|
185
|
+
],
|
|
186
|
+
command: "codexa context detail patterns",
|
|
187
|
+
}),
|
|
174
188
|
"reasoning-provided": () => ({
|
|
175
189
|
diagnostic: "Subagent retornou sem reasoning.approach adequado",
|
|
176
190
|
steps: [
|
|
@@ -519,6 +533,28 @@ function executeCheck(check: string, context: any): { passed: boolean; details?:
|
|
|
519
533
|
return { passed: true };
|
|
520
534
|
}
|
|
521
535
|
|
|
536
|
+
case "pattern-compliance": {
|
|
537
|
+
// v10.2: Gate 4.7 — validate code follows project patterns
|
|
538
|
+
if (context.force) {
|
|
539
|
+
logGateBypass(context.taskId, "pattern-compliance", context.forceReason);
|
|
540
|
+
return { passed: true };
|
|
541
|
+
}
|
|
542
|
+
|
|
543
|
+
if (!context.files || context.files.length === 0) return { passed: true };
|
|
544
|
+
|
|
545
|
+
const patternResult = validatePatternCompliance(context.files);
|
|
546
|
+
if (!patternResult.passed && patternResult.violations.length > 0) {
|
|
547
|
+
// v10.2: Start in warning mode (don't block, just warn)
|
|
548
|
+
// Change to `return { passed: false, ... }` after validation in real projects
|
|
549
|
+
console.warn("\n[PATTERN] Divergencias de pattern detectadas (aviso, nao bloqueante):");
|
|
550
|
+
for (const v of patternResult.violations) {
|
|
551
|
+
console.warn(` - ${v.file}: ${v.reason} (pattern: ${v.pattern})`);
|
|
552
|
+
}
|
|
553
|
+
console.warn();
|
|
554
|
+
}
|
|
555
|
+
return { passed: true };
|
|
556
|
+
}
|
|
557
|
+
|
|
522
558
|
case "reasoning-provided": {
|
|
523
559
|
if (!context.subagentData) return { passed: true };
|
|
524
560
|
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@codexa/cli",
|
|
3
|
-
"version": "9.0.
|
|
3
|
+
"version": "9.0.26",
|
|
4
4
|
"description": "Orchestrated workflow system for Claude Code - manages feature development through parallel subagents with structured phases, gates, and quality enforcement.",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"bin": {
|
|
@@ -11,7 +11,7 @@
|
|
|
11
11
|
|
|
12
12
|
import { getDb } from "../db/connection";
|
|
13
13
|
import { SubagentReturn, Knowledge } from "./subagent-protocol";
|
|
14
|
-
import { addReasoning, addGraphRelation, upsertUtility, getNextDecisionId } from "../db/schema";
|
|
14
|
+
import { addReasoning, addGraphRelation, upsertUtility, getNextDecisionId, runInTransaction } from "../db/schema";
|
|
15
15
|
import { extractUtilitiesFromFile, inferScopeFromPath } from "../commands/patterns";
|
|
16
16
|
import { detectConflicts } from "../commands/decide";
|
|
17
17
|
|
|
@@ -37,6 +37,17 @@ export function processSubagentReturn(
|
|
|
37
37
|
taskId: number,
|
|
38
38
|
taskNumber: number,
|
|
39
39
|
data: SubagentReturn
|
|
40
|
+
): ProcessResult {
|
|
41
|
+
// v10.2: Wrap entire processing in atomic transaction
|
|
42
|
+
// Prevents race conditions in parallel tasks (pattern storage, knowledge dedup)
|
|
43
|
+
return runInTransaction(() => processSubagentReturnInner(specId, taskId, taskNumber, data));
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
function processSubagentReturnInner(
|
|
47
|
+
specId: string,
|
|
48
|
+
taskId: number,
|
|
49
|
+
taskNumber: number,
|
|
50
|
+
data: SubagentReturn
|
|
40
51
|
): ProcessResult {
|
|
41
52
|
const db = getDb();
|
|
42
53
|
const now = new Date().toISOString();
|
|
@@ -7,17 +7,25 @@
|
|
|
7
7
|
|
|
8
8
|
{{contextSummary}}
|
|
9
9
|
|
|
10
|
-
## CONTEXTO
|
|
10
|
+
## CONTEXTO
|
|
11
11
|
|
|
12
|
-
|
|
12
|
+
**Contexto resumido (standards, alertas, decisoes):**
|
|
13
13
|
{{contextFile}}
|
|
14
14
|
|
|
15
|
+
**Contexto COMPLETO (todos os patterns, utilities, decisions, knowledge SEM truncacao):**
|
|
16
|
+
{{fullContextFile}}
|
|
17
|
+
|
|
18
|
+
Leia o arquivo de contexto completo quando precisar de detalhes sobre patterns, utilities, stack ou discoveries que nao aparecem no resumido.
|
|
19
|
+
|
|
20
|
+
{{pendingKnowledge}}
|
|
21
|
+
|
|
15
22
|
## EXECUTE AGORA
|
|
16
23
|
|
|
17
|
-
1. Read o arquivo de contexto
|
|
18
|
-
2. Read
|
|
19
|
-
3.
|
|
20
|
-
4.
|
|
24
|
+
1. Read o arquivo de contexto resumido
|
|
25
|
+
2. Se precisar de mais detalhes, Read o arquivo de contexto completo (secoes relevantes)
|
|
26
|
+
3. Read arquivos existentes que precisa modificar
|
|
27
|
+
4. Edit/Write para criar/modificar os arquivos listados
|
|
28
|
+
5. Retorne JSON:
|
|
21
29
|
|
|
22
30
|
```json
|
|
23
31
|
{"status": "completed", "summary": "...", "files_created": [], "files_modified": [], "reasoning": {"approach": "como abordou (min 20 chars)", "challenges": [], "recommendations": "para proximas tasks"}, "knowledge_to_broadcast": [], "decisions_made": []}
|
package/workflow.ts
CHANGED
|
@@ -787,6 +787,32 @@ discoverCmd
|
|
|
787
787
|
discoverRefreshPatterns();
|
|
788
788
|
});
|
|
789
789
|
|
|
790
|
+
discoverCmd
|
|
791
|
+
.command("detect-semantic")
|
|
792
|
+
.description("Detecta patterns semanticos via grepai (error handling, data fetching, etc.)")
|
|
793
|
+
.option("--json", "Saida em JSON")
|
|
794
|
+
.action((options) => {
|
|
795
|
+
const { detectSemanticPatterns, saveSemanticPatterns, isGrepaiAvailable } = require("./commands/patterns");
|
|
796
|
+
if (!isGrepaiAvailable()) {
|
|
797
|
+
console.error("[!] grepai nao disponivel. Instale com: pip install grepai");
|
|
798
|
+
process.exit(1);
|
|
799
|
+
}
|
|
800
|
+
const patterns = detectSemanticPatterns();
|
|
801
|
+
if (patterns.length === 0) {
|
|
802
|
+
console.log("\nNenhum pattern semantico detectado.");
|
|
803
|
+
return;
|
|
804
|
+
}
|
|
805
|
+
const saved = saveSemanticPatterns(patterns);
|
|
806
|
+
if (options.json) {
|
|
807
|
+
console.log(JSON.stringify({ detected: patterns.length, saved }));
|
|
808
|
+
} else {
|
|
809
|
+
console.log(`\nPatterns semanticos detectados: ${patterns.length}, salvos: ${saved}`);
|
|
810
|
+
for (const p of patterns) {
|
|
811
|
+
console.log(` - ${p.name} (${p.scope}, confidence: ${p.confidence.toFixed(2)}, ${p.files.length} files)`);
|
|
812
|
+
}
|
|
813
|
+
}
|
|
814
|
+
});
|
|
815
|
+
|
|
790
816
|
discoverCmd
|
|
791
817
|
.command("export-patterns", { hidden: true })
|
|
792
818
|
.description("Regenera arquivo patterns.md")
|