@papyruslabsai/seshat-mcp 0.4.2 → 0.7.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/graph.js +89 -37
- package/dist/index.d.ts +2 -1
- package/dist/index.js +198 -3
- package/dist/tools/diff.d.ts +23 -0
- package/dist/tools/diff.js +491 -0
- package/dist/tools/functors.d.ts +32 -0
- package/dist/tools/functors.js +345 -1
- package/dist/types.d.ts +4 -0
- package/package.json +8 -2
package/dist/graph.js
CHANGED
|
@@ -31,59 +31,111 @@ export function buildCallGraph(entities) {
|
|
|
31
31
|
}
|
|
32
32
|
// Build call edges from dependency data
|
|
33
33
|
for (const caller of entities) {
|
|
34
|
-
if (!caller.id || !caller.edges
|
|
34
|
+
if (!caller.id || !caller.edges)
|
|
35
35
|
continue;
|
|
36
|
+
// Process direct function/method calls
|
|
36
37
|
const callsArray = caller.edges.calls;
|
|
37
|
-
if (
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
// Strategy 1: Exact match
|
|
45
|
-
if (entityById.has(target)) {
|
|
46
|
-
calleeId = target;
|
|
47
|
-
}
|
|
48
|
-
// Strategy 2: Method match (target = "module.method")
|
|
49
|
-
else if (target.includes('.')) {
|
|
50
|
-
const dotIdx = target.indexOf('.');
|
|
51
|
-
const modulePart = target.substring(0, dotIdx);
|
|
52
|
-
const methodPart = target.substring(dotIdx + 1);
|
|
53
|
-
// First try "module.method" as a full entity ID
|
|
38
|
+
if (Array.isArray(callsArray)) {
|
|
39
|
+
for (const call of callsArray) {
|
|
40
|
+
if (!call.target)
|
|
41
|
+
continue;
|
|
42
|
+
const target = call.target;
|
|
43
|
+
let calleeId = null;
|
|
44
|
+
// Strategy 1: Exact match
|
|
54
45
|
if (entityById.has(target)) {
|
|
55
46
|
calleeId = target;
|
|
56
47
|
}
|
|
57
|
-
//
|
|
48
|
+
// Strategy 2: Method match (target = "module.method")
|
|
49
|
+
else if (target.includes('.')) {
|
|
50
|
+
const dotIdx = target.indexOf('.');
|
|
51
|
+
const modulePart = target.substring(0, dotIdx);
|
|
52
|
+
const methodPart = target.substring(dotIdx + 1);
|
|
53
|
+
// First try "module.method" as a full entity ID
|
|
54
|
+
if (entityById.has(target)) {
|
|
55
|
+
calleeId = target;
|
|
56
|
+
}
|
|
57
|
+
// Search within the module's entities for one named methodPart
|
|
58
|
+
if (!calleeId) {
|
|
59
|
+
const moduleEntities = entityByModule.get(modulePart);
|
|
60
|
+
if (moduleEntities) {
|
|
61
|
+
const match = moduleEntities.find(e => {
|
|
62
|
+
const name = typeof e.struct === 'string' ? e.struct : e.struct?.name;
|
|
63
|
+
return e.id === methodPart || name === methodPart;
|
|
64
|
+
});
|
|
65
|
+
if (match) {
|
|
66
|
+
calleeId = match.id;
|
|
67
|
+
}
|
|
68
|
+
}
|
|
69
|
+
}
|
|
70
|
+
// Also try "module.method" concatenated as an ID
|
|
71
|
+
if (!calleeId && entityById.has(`${modulePart}.${methodPart}`)) {
|
|
72
|
+
calleeId = `${modulePart}.${methodPart}`;
|
|
73
|
+
}
|
|
74
|
+
}
|
|
75
|
+
// Strategy 3: Prefix match
|
|
58
76
|
if (!calleeId) {
|
|
59
|
-
const
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
return e.id === methodPart || name === methodPart;
|
|
64
|
-
});
|
|
65
|
-
if (match) {
|
|
66
|
-
calleeId = match.id;
|
|
77
|
+
for (const [id] of entityById) {
|
|
78
|
+
if (id.endsWith(`.${target}`) || id === target) {
|
|
79
|
+
calleeId = id;
|
|
80
|
+
break;
|
|
67
81
|
}
|
|
68
82
|
}
|
|
69
83
|
}
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
calleeId
|
|
84
|
+
if (calleeId && calleeId !== caller.id) {
|
|
85
|
+
callees.get(caller.id)?.add(calleeId);
|
|
86
|
+
callers.get(calleeId)?.add(caller.id);
|
|
73
87
|
}
|
|
74
88
|
}
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
89
|
+
}
|
|
90
|
+
// Process lexical imports (wiring file-level dependencies into the graph)
|
|
91
|
+
const importsArray = caller.edges.imports;
|
|
92
|
+
if (Array.isArray(importsArray)) {
|
|
93
|
+
for (const imp of importsArray) {
|
|
94
|
+
// If it's a module-level entity, we link to the module
|
|
95
|
+
let calleeId = null;
|
|
96
|
+
const source = imp.source || imp.module;
|
|
97
|
+
if (!source)
|
|
98
|
+
continue;
|
|
99
|
+
// Attempt to find the imported module or entity
|
|
100
|
+
// Here we do a basic substring match against source files/modules
|
|
101
|
+
for (const [id, entity] of entityById) {
|
|
102
|
+
const eSource = entity._sourceFile || entity.context?.module || '';
|
|
103
|
+
if (eSource && (eSource.includes(source) || source.includes(eSource))) {
|
|
79
104
|
calleeId = id;
|
|
80
105
|
break;
|
|
81
106
|
}
|
|
82
107
|
}
|
|
108
|
+
if (calleeId && calleeId !== caller.id) {
|
|
109
|
+
callees.get(caller.id)?.add(calleeId);
|
|
110
|
+
callers.get(calleeId)?.add(caller.id);
|
|
111
|
+
}
|
|
83
112
|
}
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
113
|
+
}
|
|
114
|
+
// Process IoC / Dynamic Dispatch (calledBy)
|
|
115
|
+
const calledByArray = caller.edges.calledBy;
|
|
116
|
+
if (Array.isArray(calledByArray)) {
|
|
117
|
+
for (const cb of calledByArray) {
|
|
118
|
+
const source = cb.source;
|
|
119
|
+
if (!source)
|
|
120
|
+
continue;
|
|
121
|
+
let callerId = null;
|
|
122
|
+
if (entityById.has(source)) {
|
|
123
|
+
callerId = source;
|
|
124
|
+
}
|
|
125
|
+
else {
|
|
126
|
+
for (const [id] of entityById) {
|
|
127
|
+
if (id.endsWith(`.${source}`) || id === source) {
|
|
128
|
+
callerId = id;
|
|
129
|
+
break;
|
|
130
|
+
}
|
|
131
|
+
}
|
|
132
|
+
}
|
|
133
|
+
// In this relationship, the current entity (`caller` in this loop context, despite the name)
|
|
134
|
+
// is actually the CALLEE, and the `source` is the true CALLER.
|
|
135
|
+
if (callerId && callerId !== caller.id) {
|
|
136
|
+
callees.get(callerId)?.add(caller.id); // true caller calls this entity
|
|
137
|
+
callers.get(caller.id)?.add(callerId); // this entity is called by true caller
|
|
138
|
+
}
|
|
87
139
|
}
|
|
88
140
|
}
|
|
89
141
|
}
|
package/dist/index.d.ts
CHANGED
|
@@ -13,12 +13,13 @@
|
|
|
13
13
|
* Single-project mode (default):
|
|
14
14
|
* When SESHAT_PROJECTS is not set, loads from CWD. No `project` param needed.
|
|
15
15
|
*
|
|
16
|
-
*
|
|
16
|
+
* 20 tools across 4 categories:
|
|
17
17
|
* Discovery: list_projects, query_entities, get_entity, list_modules, get_topology
|
|
18
18
|
* Graph: get_dependencies, get_data_flow, find_by_constraint, get_blast_radius
|
|
19
19
|
* Analysis: find_dead_code, find_layer_violations, get_coupling_metrics,
|
|
20
20
|
* get_auth_matrix, find_error_gaps, get_test_coverage,
|
|
21
21
|
* get_optimal_context, estimate_task_cost, report_actual_burn
|
|
22
|
+
* Diff: diff_bundle, conflict_matrix
|
|
22
23
|
*
|
|
23
24
|
* Usage:
|
|
24
25
|
* npx @papyruslabs/seshat-mcp # single project (CWD)
|
package/dist/index.js
CHANGED
|
@@ -13,12 +13,13 @@
|
|
|
13
13
|
* Single-project mode (default):
|
|
14
14
|
* When SESHAT_PROJECTS is not set, loads from CWD. No `project` param needed.
|
|
15
15
|
*
|
|
16
|
-
*
|
|
16
|
+
* 20 tools across 4 categories:
|
|
17
17
|
* Discovery: list_projects, query_entities, get_entity, list_modules, get_topology
|
|
18
18
|
* Graph: get_dependencies, get_data_flow, find_by_constraint, get_blast_radius
|
|
19
19
|
* Analysis: find_dead_code, find_layer_violations, get_coupling_metrics,
|
|
20
20
|
* get_auth_matrix, find_error_gaps, get_test_coverage,
|
|
21
21
|
* get_optimal_context, estimate_task_cost, report_actual_burn
|
|
22
|
+
* Diff: diff_bundle, conflict_matrix
|
|
22
23
|
*
|
|
23
24
|
* Usage:
|
|
24
25
|
* npx @papyruslabs/seshat-mcp # single project (CWD)
|
|
@@ -33,7 +34,8 @@ import { CallToolRequestSchema, ListToolsRequestSchema, } from '@modelcontextpro
|
|
|
33
34
|
import { MultiLoader } from './loader.js';
|
|
34
35
|
import { bootstrap } from './bootstrap.js';
|
|
35
36
|
import { initTools, queryEntities, getEntity, getDependencies, getDataFlow, findByConstraint, getBlastRadius, listModules, getTopology, } from './tools/index.js';
|
|
36
|
-
import { findDeadCode, findLayerViolations, getCouplingMetrics, getAuthMatrix, findErrorGaps, getTestCoverage, getOptimalContext, estimateTaskCost, reportActualBurn, } from './tools/functors.js';
|
|
37
|
+
import { findDeadCode, findLayerViolations, getCouplingMetrics, getAuthMatrix, findErrorGaps, getTestCoverage, getOptimalContext, estimateTaskCost, reportActualBurn, find_runtime_violations, find_ownership_violations, query_traits, simulate_mutation, query_data_targets, find_exposure_leaks, find_semantic_clones, } from './tools/functors.js';
|
|
38
|
+
import { diffBundle, conflictMatrix, } from './tools/diff.js';
|
|
37
39
|
// ─── Project Discovery ───────────────────────────────────────────
|
|
38
40
|
/**
|
|
39
41
|
* Discover project directories from SESHAT_PROJECTS env var.
|
|
@@ -386,6 +388,170 @@ const TOOLS = [
|
|
|
386
388
|
},
|
|
387
389
|
},
|
|
388
390
|
},
|
|
391
|
+
// ─── Semantic (9D) JSTF-T Tools ─────────────────────────────────
|
|
392
|
+
{
|
|
393
|
+
name: 'find_runtime_violations',
|
|
394
|
+
description: 'Analyze the call graph across the ρ (Runtime) dimension. Finds architectural leaks where framework-agnostic code improperly imports framework-specific code (e.g. pure logic calling React hooks) or where incompatible frameworks mix directly.',
|
|
395
|
+
inputSchema: {
|
|
396
|
+
type: 'object',
|
|
397
|
+
properties: {
|
|
398
|
+
project: projectParam,
|
|
399
|
+
},
|
|
400
|
+
},
|
|
401
|
+
},
|
|
402
|
+
{
|
|
403
|
+
name: 'find_ownership_violations',
|
|
404
|
+
description: 'Analyze the codebase across the λ (Ownership/Lifetimes) dimension. Flags entities with complex memory management constraints, unsafe blocks, escaping boundaries, or illegal mutability patterns on borrowed references.',
|
|
405
|
+
inputSchema: {
|
|
406
|
+
type: 'object',
|
|
407
|
+
properties: {
|
|
408
|
+
project: projectParam,
|
|
409
|
+
},
|
|
410
|
+
},
|
|
411
|
+
},
|
|
412
|
+
{
|
|
413
|
+
name: 'query_traits',
|
|
414
|
+
description: 'Search the codebase across the τ (Traits/Capabilities) dimension. Allows you to find entities by their abstract capabilities (e.g., "fallible", "asyncContext", "generator") regardless of their structural syntax.',
|
|
415
|
+
inputSchema: {
|
|
416
|
+
type: 'object',
|
|
417
|
+
properties: {
|
|
418
|
+
project: projectParam,
|
|
419
|
+
trait: {
|
|
420
|
+
type: 'string',
|
|
421
|
+
description: 'The trait or capability to search for (e.g., "fallible", "asyncContext")',
|
|
422
|
+
},
|
|
423
|
+
},
|
|
424
|
+
required: ['trait'],
|
|
425
|
+
},
|
|
426
|
+
},
|
|
427
|
+
{
|
|
428
|
+
name: 'simulate_mutation',
|
|
429
|
+
description: 'The Semantic Physics Engine. Proposes a hypothetical change to an entity\'s dimensions (like adding a "fallible" or "auth" trait) and simulates the topological fallout upstream and downstream. Returns a blueprint of exactly which other entities will break and what fixes they require.',
|
|
430
|
+
inputSchema: {
|
|
431
|
+
type: 'object',
|
|
432
|
+
properties: {
|
|
433
|
+
project: projectParam,
|
|
434
|
+
entity_id: {
|
|
435
|
+
type: 'string',
|
|
436
|
+
description: 'The target entity to mutate.',
|
|
437
|
+
},
|
|
438
|
+
mutation: {
|
|
439
|
+
type: 'object',
|
|
440
|
+
description: 'The hypothetical change to apply.',
|
|
441
|
+
properties: {
|
|
442
|
+
dimension: { type: 'string', enum: ['constraints', 'traits'] },
|
|
443
|
+
change: {
|
|
444
|
+
type: 'object',
|
|
445
|
+
properties: {
|
|
446
|
+
add: { type: 'array', items: { type: 'string' } },
|
|
447
|
+
remove: { type: 'array', items: { type: 'string' } },
|
|
448
|
+
},
|
|
449
|
+
},
|
|
450
|
+
},
|
|
451
|
+
required: ['dimension', 'change'],
|
|
452
|
+
},
|
|
453
|
+
},
|
|
454
|
+
required: ['entity_id', 'mutation'],
|
|
455
|
+
},
|
|
456
|
+
},
|
|
457
|
+
{
|
|
458
|
+
name: 'query_data_targets',
|
|
459
|
+
description: 'Search the codebase across the δ (Data) dimension to find all entities that read or write to a specific database table, state object, or data source. This acts as a reverse-index for data flow, essential for planning migrations or state refactors.',
|
|
460
|
+
inputSchema: {
|
|
461
|
+
type: 'object',
|
|
462
|
+
properties: {
|
|
463
|
+
project: projectParam,
|
|
464
|
+
target_name: {
|
|
465
|
+
type: 'string',
|
|
466
|
+
description: 'The name of the database table, state object, or data source to query (e.g., "users", "auth_token").',
|
|
467
|
+
},
|
|
468
|
+
},
|
|
469
|
+
required: ['target_name'],
|
|
470
|
+
},
|
|
471
|
+
},
|
|
472
|
+
{
|
|
473
|
+
name: 'find_exposure_leaks',
|
|
474
|
+
description: 'Analyze the call graph across the χ (Context) dimension to find architectural visibility leaks. Flags paths where a "public" or "api" entity directly accesses a "private" entity, potentially leaking sensitive data or bypassing internal service boundaries.',
|
|
475
|
+
inputSchema: {
|
|
476
|
+
type: 'object',
|
|
477
|
+
properties: {
|
|
478
|
+
project: projectParam,
|
|
479
|
+
},
|
|
480
|
+
},
|
|
481
|
+
},
|
|
482
|
+
{
|
|
483
|
+
name: 'find_semantic_clones',
|
|
484
|
+
description: 'Analyze the codebase across the Σ (Semantics) dimension to find duplicated logic blocks. Normalizes variables and compares abstract syntax tree shapes to identify identical algorithms written across different files or even different languages.',
|
|
485
|
+
inputSchema: {
|
|
486
|
+
type: 'object',
|
|
487
|
+
properties: {
|
|
488
|
+
project: projectParam,
|
|
489
|
+
min_complexity: {
|
|
490
|
+
type: 'number',
|
|
491
|
+
description: 'Minimum number of logic expressions required to constitute a match (default: 5).',
|
|
492
|
+
},
|
|
493
|
+
},
|
|
494
|
+
},
|
|
495
|
+
},
|
|
496
|
+
// ─── Diff Tools ─────────────────────────────────────────────────
|
|
497
|
+
{
|
|
498
|
+
name: 'diff_bundle',
|
|
499
|
+
description: 'Compare entities between a worktree and the loaded project. Shows which entities were added, removed, or modified at the symbol level — not a line diff, but a structural diff showing changed signatures, call graphs, constraints, and logic. Extracts the worktree automatically if no bundle exists.',
|
|
500
|
+
inputSchema: {
|
|
501
|
+
type: 'object',
|
|
502
|
+
properties: {
|
|
503
|
+
project: projectParam,
|
|
504
|
+
worktree_path: {
|
|
505
|
+
type: 'string',
|
|
506
|
+
description: 'Absolute path to the worktree or branch checkout to compare against the loaded project',
|
|
507
|
+
},
|
|
508
|
+
include_unchanged: {
|
|
509
|
+
type: 'boolean',
|
|
510
|
+
description: 'Include unchanged entities in the output (default: false)',
|
|
511
|
+
},
|
|
512
|
+
},
|
|
513
|
+
required: ['worktree_path'],
|
|
514
|
+
},
|
|
515
|
+
},
|
|
516
|
+
{
|
|
517
|
+
name: 'conflict_matrix',
|
|
518
|
+
description: 'Given multiple tasks, classify every task pair into conflict tiers bridging JSTF-T theory and Git reality. Tier 1 (different files, safe), Tier 2 (same file, different entities, safe), Tier 3 (same entity, orthogonal Spatial Zones like imports vs logic, risky but parallelizable), Tier 4 (same entity, same Spatial Zone, MUST sequence). Passing "dimensions" per task enables Tier 3 downgrades.',
|
|
519
|
+
inputSchema: {
|
|
520
|
+
type: 'object',
|
|
521
|
+
properties: {
|
|
522
|
+
project: projectParam,
|
|
523
|
+
tasks: {
|
|
524
|
+
type: 'array',
|
|
525
|
+
items: {
|
|
526
|
+
type: 'object',
|
|
527
|
+
properties: {
|
|
528
|
+
id: {
|
|
529
|
+
type: 'string',
|
|
530
|
+
description: 'Unique task identifier (e.g. "add-dark-mode")',
|
|
531
|
+
},
|
|
532
|
+
entity_ids: {
|
|
533
|
+
type: 'array',
|
|
534
|
+
items: { type: 'string' },
|
|
535
|
+
description: 'Entity IDs or names that this task will modify',
|
|
536
|
+
},
|
|
537
|
+
dimensions: {
|
|
538
|
+
type: 'array',
|
|
539
|
+
items: { type: 'string' },
|
|
540
|
+
description: 'Optional: JSTF-T dimensions this task will modify (e.g., "edges", "struct", "semantics", "constraints"). Used to downgrade conflicts via Spatial Zones.',
|
|
541
|
+
},
|
|
542
|
+
expand_blast_radius: {
|
|
543
|
+
type: 'boolean',
|
|
544
|
+
description: 'Include transitively affected entities in the conflict check (default: false)',
|
|
545
|
+
},
|
|
546
|
+
},
|
|
547
|
+
required: ['id', 'entity_ids'],
|
|
548
|
+
},
|
|
549
|
+
description: 'Array of tasks to check for conflicts. Each task specifies which entities it will modify.',
|
|
550
|
+
},
|
|
551
|
+
},
|
|
552
|
+
required: ['tasks'],
|
|
553
|
+
},
|
|
554
|
+
},
|
|
389
555
|
];
|
|
390
556
|
// ─── Server Setup ─────────────────────────────────────────────────
|
|
391
557
|
async function main() {
|
|
@@ -441,7 +607,7 @@ async function main() {
|
|
|
441
607
|
}
|
|
442
608
|
const server = new Server({
|
|
443
609
|
name: serverLabel,
|
|
444
|
-
version: '0.
|
|
610
|
+
version: '0.5.0',
|
|
445
611
|
}, {
|
|
446
612
|
capabilities: {
|
|
447
613
|
tools: {},
|
|
@@ -531,6 +697,35 @@ async function main() {
|
|
|
531
697
|
case 'report_actual_burn':
|
|
532
698
|
result = await reportActualBurn(args);
|
|
533
699
|
break;
|
|
700
|
+
// Semantic (9D) JSTF-T Tools
|
|
701
|
+
case 'find_runtime_violations':
|
|
702
|
+
result = find_runtime_violations(args);
|
|
703
|
+
break;
|
|
704
|
+
case 'find_ownership_violations':
|
|
705
|
+
result = find_ownership_violations(args);
|
|
706
|
+
break;
|
|
707
|
+
case 'query_traits':
|
|
708
|
+
result = query_traits(args);
|
|
709
|
+
break;
|
|
710
|
+
case 'simulate_mutation':
|
|
711
|
+
result = simulate_mutation(args);
|
|
712
|
+
break;
|
|
713
|
+
case 'query_data_targets':
|
|
714
|
+
result = query_data_targets(args);
|
|
715
|
+
break;
|
|
716
|
+
case 'find_exposure_leaks':
|
|
717
|
+
result = find_exposure_leaks(args);
|
|
718
|
+
break;
|
|
719
|
+
case 'find_semantic_clones':
|
|
720
|
+
result = find_semantic_clones(args);
|
|
721
|
+
break;
|
|
722
|
+
// Diff Tools
|
|
723
|
+
case 'diff_bundle':
|
|
724
|
+
result = await diffBundle(args);
|
|
725
|
+
break;
|
|
726
|
+
case 'conflict_matrix':
|
|
727
|
+
result = conflictMatrix(args);
|
|
728
|
+
break;
|
|
534
729
|
default:
|
|
535
730
|
result = { error: `Unknown tool: ${name}` };
|
|
536
731
|
}
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Cross-Bundle Analysis Tools: diff_bundle + conflict_matrix
|
|
3
|
+
*
|
|
4
|
+
* diff_bundle: Compare entities between a worktree and the loaded project.
|
|
5
|
+
* conflict_matrix: Classify conflict tiers for parallel task scheduling.
|
|
6
|
+
*
|
|
7
|
+
* These tools compare TWO entity sets (base vs branch, or task vs task),
|
|
8
|
+
* unlike the single-bundle queries in index.ts and functors.ts.
|
|
9
|
+
*/
|
|
10
|
+
export declare function diffBundle(args: {
|
|
11
|
+
worktree_path: string;
|
|
12
|
+
project?: string;
|
|
13
|
+
include_unchanged?: boolean;
|
|
14
|
+
}): Promise<unknown>;
|
|
15
|
+
export declare function conflictMatrix(args: {
|
|
16
|
+
tasks: Array<{
|
|
17
|
+
id: string;
|
|
18
|
+
entity_ids: string[];
|
|
19
|
+
dimensions?: string[];
|
|
20
|
+
expand_blast_radius?: boolean;
|
|
21
|
+
}>;
|
|
22
|
+
project?: string;
|
|
23
|
+
}): unknown;
|
|
@@ -0,0 +1,491 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Cross-Bundle Analysis Tools: diff_bundle + conflict_matrix
|
|
3
|
+
*
|
|
4
|
+
* diff_bundle: Compare entities between a worktree and the loaded project.
|
|
5
|
+
* conflict_matrix: Classify conflict tiers for parallel task scheduling.
|
|
6
|
+
*
|
|
7
|
+
* These tools compare TWO entity sets (base vs branch, or task vs task),
|
|
8
|
+
* unlike the single-bundle queries in index.ts and functors.ts.
|
|
9
|
+
*/
|
|
10
|
+
import fs from 'fs';
|
|
11
|
+
import path from 'path';
|
|
12
|
+
import { bootstrap } from '../bootstrap.js';
|
|
13
|
+
import { computeBlastRadius } from '../graph.js';
|
|
14
|
+
import { getLoader, getGraph, validateProject, entityName, entityLayer, } from './index.js';
|
|
15
|
+
// ─── Entity Identity ─────────────────────────────────────────────
|
|
16
|
+
// Ported from api-v2/translator/seshat-pipeline/src/incremental/diff-engine.mjs
|
|
17
|
+
/**
|
|
18
|
+
* Generate a unique key for an entity: <relative_path>::<id>
|
|
19
|
+
*
|
|
20
|
+
* Uses _sourceFile (already relative in bundles) or derives from
|
|
21
|
+
* sourceFile (absolute path) by stripping the repo root.
|
|
22
|
+
*/
|
|
23
|
+
function entityKey(entity, repoRoot) {
|
|
24
|
+
let relativePath = entity._sourceFile || null;
|
|
25
|
+
// If _sourceFile not set, try deriving from sourceFile (absolute)
|
|
26
|
+
if (!relativePath) {
|
|
27
|
+
const raw = entity;
|
|
28
|
+
const sourceFile = (raw.sourceFile || '').replace(/\\/g, '/');
|
|
29
|
+
if (repoRoot && sourceFile) {
|
|
30
|
+
const normalizedRoot = repoRoot.replace(/\\/g, '/').replace(/\/+$/, '') + '/';
|
|
31
|
+
if (sourceFile.startsWith(normalizedRoot)) {
|
|
32
|
+
relativePath = sourceFile.substring(normalizedRoot.length);
|
|
33
|
+
}
|
|
34
|
+
else {
|
|
35
|
+
// Case-insensitive match (Windows paths)
|
|
36
|
+
const lowerFile = sourceFile.toLowerCase();
|
|
37
|
+
const lowerRoot = normalizedRoot.toLowerCase();
|
|
38
|
+
if (lowerFile.startsWith(lowerRoot)) {
|
|
39
|
+
relativePath = sourceFile.substring(normalizedRoot.length);
|
|
40
|
+
}
|
|
41
|
+
}
|
|
42
|
+
}
|
|
43
|
+
if (!relativePath) {
|
|
44
|
+
relativePath = sourceFile || 'unknown';
|
|
45
|
+
}
|
|
46
|
+
}
|
|
47
|
+
// Normalize path separators
|
|
48
|
+
relativePath = (relativePath || 'unknown').replace(/\\/g, '/');
|
|
49
|
+
const id = entity.id || (typeof entity.struct === 'string' ? entity.struct : entity.struct?.name) || 'anonymous';
|
|
50
|
+
return `${relativePath}::${id}`;
|
|
51
|
+
}
|
|
52
|
+
// ─── Bundle Loading ───────────────────────────────────────────────
|
|
53
|
+
/**
|
|
54
|
+
* Load a branch bundle from a worktree path.
|
|
55
|
+
* Does NOT add to the global MultiLoader — this is a transient comparison target.
|
|
56
|
+
*
|
|
57
|
+
* 1. Check <worktree_path>/.seshat/_bundle.json — parse if exists
|
|
58
|
+
* 2. If not, run bootstrap() to extract on-the-fly
|
|
59
|
+
* 3. Apply the same field remapping as BundleLoader (sourceFile → _sourceFile, etc.)
|
|
60
|
+
*/
|
|
61
|
+
async function loadBranchBundle(worktreePath) {
|
|
62
|
+
const absPath = path.resolve(worktreePath);
|
|
63
|
+
const bundlePath = path.join(absPath, '.seshat', '_bundle.json');
|
|
64
|
+
let bundle;
|
|
65
|
+
if (fs.existsSync(bundlePath)) {
|
|
66
|
+
const raw = fs.readFileSync(bundlePath, 'utf-8');
|
|
67
|
+
bundle = JSON.parse(raw);
|
|
68
|
+
}
|
|
69
|
+
else {
|
|
70
|
+
// Auto-extract via bootstrap
|
|
71
|
+
const result = await bootstrap(absPath);
|
|
72
|
+
if (!result.success) {
|
|
73
|
+
throw new Error(`Failed to extract bundle from ${absPath}: ${result.error}`);
|
|
74
|
+
}
|
|
75
|
+
// Read the freshly generated bundle
|
|
76
|
+
if (!fs.existsSync(bundlePath)) {
|
|
77
|
+
throw new Error(`Bootstrap succeeded but no bundle found at ${bundlePath}`);
|
|
78
|
+
}
|
|
79
|
+
const raw = fs.readFileSync(bundlePath, 'utf-8');
|
|
80
|
+
bundle = JSON.parse(raw);
|
|
81
|
+
}
|
|
82
|
+
const entities = bundle.entities || [];
|
|
83
|
+
// Apply the same field remapping as BundleLoader.load()
|
|
84
|
+
for (const e of entities) {
|
|
85
|
+
const raw = e;
|
|
86
|
+
if (raw.sourceFile && !e._sourceFile) {
|
|
87
|
+
e._sourceFile = raw.sourceFile;
|
|
88
|
+
}
|
|
89
|
+
if (raw.sourceLanguage && !e._sourceLanguage) {
|
|
90
|
+
e._sourceLanguage = raw.sourceLanguage;
|
|
91
|
+
}
|
|
92
|
+
}
|
|
93
|
+
// Try to get commit SHA from manifest
|
|
94
|
+
let commitSha = '';
|
|
95
|
+
const manifestPath = path.join(absPath, '.seshat', 'manifest.json');
|
|
96
|
+
if (fs.existsSync(manifestPath)) {
|
|
97
|
+
try {
|
|
98
|
+
const manifest = JSON.parse(fs.readFileSync(manifestPath, 'utf-8'));
|
|
99
|
+
commitSha = manifest.commitSha || '';
|
|
100
|
+
}
|
|
101
|
+
catch { /* ignore */ }
|
|
102
|
+
}
|
|
103
|
+
return {
|
|
104
|
+
entities,
|
|
105
|
+
source: bundle.source || absPath,
|
|
106
|
+
commitSha,
|
|
107
|
+
entityCount: entities.length,
|
|
108
|
+
};
|
|
109
|
+
}
|
|
110
|
+
// ─── Field Comparison ─────────────────────────────────────────────
|
|
111
|
+
/** The entity fields to compare, mapped to user-facing names (v0.4.2 convention) */
|
|
112
|
+
const FIELD_MAP = [
|
|
113
|
+
['struct', 'structure'],
|
|
114
|
+
['edges', 'call_graph'],
|
|
115
|
+
['data', 'data_flow'],
|
|
116
|
+
['constraints', 'constraints'],
|
|
117
|
+
['context', 'context'],
|
|
118
|
+
['ownership', 'ownership'],
|
|
119
|
+
['traits', 'type_info'],
|
|
120
|
+
['runtime', 'runtime'],
|
|
121
|
+
['semantics', 'logic'],
|
|
122
|
+
];
|
|
123
|
+
/**
|
|
124
|
+
* Compare two entities field-by-field. Returns which fields changed
|
|
125
|
+
* using user-facing names.
|
|
126
|
+
*/
|
|
127
|
+
function compareEntityFields(baseEntity, branchEntity) {
|
|
128
|
+
const changed = [];
|
|
129
|
+
for (const [field, displayName] of FIELD_MAP) {
|
|
130
|
+
const baseVal = baseEntity[field];
|
|
131
|
+
const branchVal = branchEntity[field];
|
|
132
|
+
// Both undefined/null → no change
|
|
133
|
+
if (baseVal == null && branchVal == null)
|
|
134
|
+
continue;
|
|
135
|
+
// One null, other not → changed
|
|
136
|
+
if (baseVal == null || branchVal == null) {
|
|
137
|
+
changed.push(displayName);
|
|
138
|
+
continue;
|
|
139
|
+
}
|
|
140
|
+
// Deep compare via JSON
|
|
141
|
+
if (JSON.stringify(baseVal) !== JSON.stringify(branchVal)) {
|
|
142
|
+
changed.push(displayName);
|
|
143
|
+
}
|
|
144
|
+
}
|
|
145
|
+
return changed;
|
|
146
|
+
}
|
|
147
|
+
// ─── Tool: diff_bundle ───────────────────────────────────────────
|
|
148
|
+
export async function diffBundle(args) {
|
|
149
|
+
const projErr = validateProject(args.project);
|
|
150
|
+
if (projErr)
|
|
151
|
+
return { error: projErr };
|
|
152
|
+
const loader = getLoader();
|
|
153
|
+
const baseEntities = loader.getEntities(args.project);
|
|
154
|
+
const baseManifest = loader.getManifest(args.project);
|
|
155
|
+
if (baseEntities.length === 0) {
|
|
156
|
+
return { error: 'No base entities loaded. Ensure the project has been extracted.' };
|
|
157
|
+
}
|
|
158
|
+
// Load branch bundle
|
|
159
|
+
let branchData;
|
|
160
|
+
try {
|
|
161
|
+
branchData = await loadBranchBundle(args.worktree_path);
|
|
162
|
+
}
|
|
163
|
+
catch (err) {
|
|
164
|
+
return { error: `Failed to load branch bundle: ${err.message}` };
|
|
165
|
+
}
|
|
166
|
+
const branchEntities = branchData.entities;
|
|
167
|
+
// Build entity maps by key
|
|
168
|
+
const baseSource = baseManifest?.commitSha ? '' : ''; // base entities have relative paths already
|
|
169
|
+
const baseMap = new Map();
|
|
170
|
+
for (const entity of baseEntities) {
|
|
171
|
+
const key = entityKey(entity);
|
|
172
|
+
baseMap.set(key, entity);
|
|
173
|
+
}
|
|
174
|
+
const branchMap = new Map();
|
|
175
|
+
for (const entity of branchEntities) {
|
|
176
|
+
const key = entityKey(entity);
|
|
177
|
+
branchMap.set(key, entity);
|
|
178
|
+
}
|
|
179
|
+
// Classify entities
|
|
180
|
+
const added = [];
|
|
181
|
+
const removed = [];
|
|
182
|
+
const modified = [];
|
|
183
|
+
const unchanged = [];
|
|
184
|
+
// Added: in branch but not base
|
|
185
|
+
for (const [key, entity] of branchMap) {
|
|
186
|
+
if (!baseMap.has(key)) {
|
|
187
|
+
added.push({
|
|
188
|
+
id: entity.id,
|
|
189
|
+
name: entityName(entity),
|
|
190
|
+
sourceFile: entity._sourceFile || null,
|
|
191
|
+
layer: entityLayer(entity),
|
|
192
|
+
});
|
|
193
|
+
}
|
|
194
|
+
}
|
|
195
|
+
// Removed: in base but not branch
|
|
196
|
+
for (const [key, entity] of baseMap) {
|
|
197
|
+
if (!branchMap.has(key)) {
|
|
198
|
+
removed.push({
|
|
199
|
+
id: entity.id,
|
|
200
|
+
name: entityName(entity),
|
|
201
|
+
sourceFile: entity._sourceFile || null,
|
|
202
|
+
layer: entityLayer(entity),
|
|
203
|
+
});
|
|
204
|
+
}
|
|
205
|
+
}
|
|
206
|
+
// Modified / Unchanged: in both
|
|
207
|
+
for (const [key, branchEntity] of branchMap) {
|
|
208
|
+
const baseEntity = baseMap.get(key);
|
|
209
|
+
if (!baseEntity)
|
|
210
|
+
continue; // already in added
|
|
211
|
+
const changedFields = compareEntityFields(baseEntity, branchEntity);
|
|
212
|
+
if (changedFields.length > 0) {
|
|
213
|
+
modified.push({
|
|
214
|
+
id: branchEntity.id,
|
|
215
|
+
name: entityName(branchEntity),
|
|
216
|
+
sourceFile: branchEntity._sourceFile || null,
|
|
217
|
+
layer: entityLayer(branchEntity),
|
|
218
|
+
changedFields,
|
|
219
|
+
});
|
|
220
|
+
}
|
|
221
|
+
else if (args.include_unchanged) {
|
|
222
|
+
unchanged.push({
|
|
223
|
+
id: branchEntity.id,
|
|
224
|
+
name: entityName(branchEntity),
|
|
225
|
+
sourceFile: branchEntity._sourceFile || null,
|
|
226
|
+
layer: entityLayer(branchEntity),
|
|
227
|
+
});
|
|
228
|
+
}
|
|
229
|
+
}
|
|
230
|
+
// Count unique files touched
|
|
231
|
+
const touchedFiles = new Set();
|
|
232
|
+
for (const e of [...added, ...removed, ...modified]) {
|
|
233
|
+
if (e.sourceFile)
|
|
234
|
+
touchedFiles.add(e.sourceFile);
|
|
235
|
+
}
|
|
236
|
+
const totalChanges = added.length + removed.length + modified.length;
|
|
237
|
+
const result = {
|
|
238
|
+
base: {
|
|
239
|
+
project: baseManifest?.projectName || args.project || 'default',
|
|
240
|
+
commitSha: baseManifest?.commitSha || '',
|
|
241
|
+
entityCount: baseEntities.length,
|
|
242
|
+
},
|
|
243
|
+
branch: {
|
|
244
|
+
path: args.worktree_path,
|
|
245
|
+
commitSha: branchData.commitSha,
|
|
246
|
+
entityCount: branchData.entityCount,
|
|
247
|
+
},
|
|
248
|
+
summary: {
|
|
249
|
+
added: added.length,
|
|
250
|
+
removed: removed.length,
|
|
251
|
+
modified: modified.length,
|
|
252
|
+
unchanged: baseEntities.length - removed.length - modified.length,
|
|
253
|
+
},
|
|
254
|
+
added,
|
|
255
|
+
removed,
|
|
256
|
+
modified,
|
|
257
|
+
_summary: `Branch modifies ${totalChanges} entities (${added.length} added, ${removed.length} removed, ${modified.length} modified) across ${touchedFiles.size} files`,
|
|
258
|
+
};
|
|
259
|
+
if (args.include_unchanged) {
|
|
260
|
+
result.unchanged = unchanged;
|
|
261
|
+
}
|
|
262
|
+
return result;
|
|
263
|
+
}
|
|
264
|
+
// ─── Conflict Tier Classification ─────────────────────────────────
|
|
265
|
+
const ZONE_1 = new Set(['edges', 'imports', 'ε']); // Header / Far from body
|
|
266
|
+
const ZONE_2 = new Set(['struct', 'σ', 'constraints', 'κ', 'traits', 'τ', 'ownership', 'λ']); // Signature / Decorators (High collision risk)
|
|
267
|
+
const ZONE_3 = new Set(['semantics', 'Σ', 'data', 'δ', 'runtime', 'ρ']); // Body / Implementation
|
|
268
|
+
function getZones(dimensions) {
|
|
269
|
+
const zones = new Set();
|
|
270
|
+
for (const d of dimensions) {
|
|
271
|
+
const lower = d.toLowerCase();
|
|
272
|
+
if (ZONE_1.has(lower))
|
|
273
|
+
zones.add(1);
|
|
274
|
+
if (ZONE_2.has(lower))
|
|
275
|
+
zones.add(2);
|
|
276
|
+
if (ZONE_3.has(lower))
|
|
277
|
+
zones.add(3);
|
|
278
|
+
}
|
|
279
|
+
return zones;
|
|
280
|
+
}
|
|
281
|
+
function classifyConflictTier(taskA, taskB) {
|
|
282
|
+
// Check entity overlap
|
|
283
|
+
const sharedEntities = [];
|
|
284
|
+
for (const id of taskA.entityIds) {
|
|
285
|
+
if (taskB.entityIds.has(id)) {
|
|
286
|
+
sharedEntities.push(id);
|
|
287
|
+
}
|
|
288
|
+
}
|
|
289
|
+
if (sharedEntities.length > 0) {
|
|
290
|
+
// Both touch same entity. Check dimensions to see if we can downgrade from Tier 4 (Sequential) to Tier 3 (Parallelizable Orthogonal)
|
|
291
|
+
if (taskA.dimensions.size > 0 && taskB.dimensions.size > 0) {
|
|
292
|
+
const zonesA = getZones(taskA.dimensions);
|
|
293
|
+
const zonesB = getZones(taskB.dimensions);
|
|
294
|
+
let spatialCollision = false;
|
|
295
|
+
for (const z of zonesA) {
|
|
296
|
+
if (zonesB.has(z))
|
|
297
|
+
spatialCollision = true;
|
|
298
|
+
}
|
|
299
|
+
if (!spatialCollision) {
|
|
300
|
+
return {
|
|
301
|
+
tier: 3,
|
|
302
|
+
reason: `${sharedEntities.length} shared entities, but orthogonal Spatial Zones (Tier 3) — safe to parallelize`,
|
|
303
|
+
sharedFiles: [],
|
|
304
|
+
sharedEntities,
|
|
305
|
+
};
|
|
306
|
+
}
|
|
307
|
+
}
|
|
308
|
+
return {
|
|
309
|
+
tier: 4,
|
|
310
|
+
reason: `${sharedEntities.length} shared entities in same/unknown Spatial Zone (Tier 4) — MUST sequence to prevent git conflict`,
|
|
311
|
+
sharedFiles: [],
|
|
312
|
+
sharedEntities,
|
|
313
|
+
};
|
|
314
|
+
}
|
|
315
|
+
// Check file overlap → Tier 1 or 2
|
|
316
|
+
const sharedFiles = [];
|
|
317
|
+
for (const file of taskA.files) {
|
|
318
|
+
if (taskB.files.has(file)) {
|
|
319
|
+
sharedFiles.push(file);
|
|
320
|
+
}
|
|
321
|
+
}
|
|
322
|
+
if (sharedFiles.length === 0) {
|
|
323
|
+
return {
|
|
324
|
+
tier: 1,
|
|
325
|
+
reason: 'No shared files — safe to parallelize',
|
|
326
|
+
sharedFiles: [],
|
|
327
|
+
sharedEntities: [],
|
|
328
|
+
};
|
|
329
|
+
}
|
|
330
|
+
return {
|
|
331
|
+
tier: 2,
|
|
332
|
+
reason: `${sharedFiles.length} shared files but different entities — git auto-merge handles it`,
|
|
333
|
+
sharedFiles,
|
|
334
|
+
sharedEntities: [],
|
|
335
|
+
};
|
|
336
|
+
}
|
|
337
|
+
// ─── Execution Plan Builder ───────────────────────────────────────
|
|
338
|
+
/**
|
|
339
|
+
* Build execution plan from tier-4 conflict graph using connected components.
|
|
340
|
+
* Tasks with no tier-4 edges to each other run in parallel.
|
|
341
|
+
* Tasks in the same tier-4 component run sequentially.
|
|
342
|
+
*/
|
|
343
|
+
function buildExecutionPlan(taskIds, tier4Edges) {
|
|
344
|
+
// Build adjacency list for tier-4 conflicts
|
|
345
|
+
const adj = new Map();
|
|
346
|
+
for (const id of taskIds) {
|
|
347
|
+
adj.set(id, new Set());
|
|
348
|
+
}
|
|
349
|
+
for (const [a, b] of tier4Edges) {
|
|
350
|
+
adj.get(a)?.add(b);
|
|
351
|
+
adj.get(b)?.add(a);
|
|
352
|
+
}
|
|
353
|
+
// Find connected components via BFS
|
|
354
|
+
const visited = new Set();
|
|
355
|
+
const components = [];
|
|
356
|
+
for (const id of taskIds) {
|
|
357
|
+
if (visited.has(id))
|
|
358
|
+
continue;
|
|
359
|
+
const component = [];
|
|
360
|
+
const queue = [id];
|
|
361
|
+
visited.add(id);
|
|
362
|
+
while (queue.length > 0) {
|
|
363
|
+
const current = queue.shift();
|
|
364
|
+
component.push(current);
|
|
365
|
+
const neighbors = adj.get(current);
|
|
366
|
+
if (neighbors) {
|
|
367
|
+
for (const neighbor of neighbors) {
|
|
368
|
+
if (!visited.has(neighbor)) {
|
|
369
|
+
visited.add(neighbor);
|
|
370
|
+
queue.push(neighbor);
|
|
371
|
+
}
|
|
372
|
+
}
|
|
373
|
+
}
|
|
374
|
+
}
|
|
375
|
+
components.push(component);
|
|
376
|
+
}
|
|
377
|
+
// Build groups
|
|
378
|
+
const groups = components.map(component => {
|
|
379
|
+
if (component.length === 1) {
|
|
380
|
+
return { tasks: component, sequential: false };
|
|
381
|
+
}
|
|
382
|
+
return { tasks: component, sequential: true, order: component };
|
|
383
|
+
});
|
|
384
|
+
// Count parallelizable groups
|
|
385
|
+
const parallelGroups = groups.length;
|
|
386
|
+
const sequentialCount = groups.filter(g => g.sequential).length;
|
|
387
|
+
const parallelCount = groups.filter(g => !g.sequential).length;
|
|
388
|
+
let summaryParts = [`${taskIds.length} tasks`];
|
|
389
|
+
if (parallelCount > 0) {
|
|
390
|
+
summaryParts.push(`${parallelCount === taskIds.length ? 'all' : parallelCount} can run in parallel`);
|
|
391
|
+
}
|
|
392
|
+
if (sequentialCount > 0) {
|
|
393
|
+
const seqTasks = groups.filter(g => g.sequential).reduce((sum, g) => sum + g.tasks.length, 0);
|
|
394
|
+
summaryParts.push(`${seqTasks} must be sequenced (${sequentialCount} sequential group${sequentialCount > 1 ? 's' : ''})`);
|
|
395
|
+
}
|
|
396
|
+
return {
|
|
397
|
+
parallelGroups,
|
|
398
|
+
groups,
|
|
399
|
+
_summary: summaryParts.join(': '),
|
|
400
|
+
};
|
|
401
|
+
}
|
|
402
|
+
// ─── Tool: conflict_matrix ────────────────────────────────────────
|
|
403
|
+
export function conflictMatrix(args) {
|
|
404
|
+
const projErr = validateProject(args.project);
|
|
405
|
+
if (projErr)
|
|
406
|
+
return { error: projErr };
|
|
407
|
+
const loader = getLoader();
|
|
408
|
+
const { tasks } = args;
|
|
409
|
+
if (!tasks || tasks.length < 2) {
|
|
410
|
+
return { error: 'At least 2 tasks are required to compute a conflict matrix.' };
|
|
411
|
+
}
|
|
412
|
+
const warnings = [];
|
|
413
|
+
// Resolve entity sets for each task
|
|
414
|
+
const resolvedTasks = [];
|
|
415
|
+
for (const task of tasks) {
|
|
416
|
+
const entityIds = new Set();
|
|
417
|
+
const files = new Set();
|
|
418
|
+
const entities = [];
|
|
419
|
+
const notFound = [];
|
|
420
|
+
const dimensions = new Set(task.dimensions || []);
|
|
421
|
+
for (const nameOrId of task.entity_ids) {
|
|
422
|
+
const entity = loader.getEntityById(nameOrId, args.project)
|
|
423
|
+
|| loader.getEntityByName(nameOrId, args.project);
|
|
424
|
+
if (entity) {
|
|
425
|
+
entityIds.add(entity.id);
|
|
426
|
+
entities.push(entity);
|
|
427
|
+
if (entity._sourceFile)
|
|
428
|
+
files.add(entity._sourceFile);
|
|
429
|
+
}
|
|
430
|
+
else {
|
|
431
|
+
notFound.push(nameOrId);
|
|
432
|
+
}
|
|
433
|
+
}
|
|
434
|
+
if (notFound.length > 0) {
|
|
435
|
+
warnings.push(`Task "${task.id}": entities not found: ${notFound.join(', ')}`);
|
|
436
|
+
}
|
|
437
|
+
// Expand blast radius if requested
|
|
438
|
+
if (task.expand_blast_radius && entityIds.size > 0) {
|
|
439
|
+
const g = getGraph(args.project);
|
|
440
|
+
const blastResult = computeBlastRadius(g, entityIds);
|
|
441
|
+
for (const affectedId of blastResult.affected) {
|
|
442
|
+
if (!entityIds.has(affectedId)) {
|
|
443
|
+
entityIds.add(affectedId);
|
|
444
|
+
const affectedEntity = g.entityById.get(affectedId);
|
|
445
|
+
if (affectedEntity) {
|
|
446
|
+
entities.push(affectedEntity);
|
|
447
|
+
if (affectedEntity._sourceFile)
|
|
448
|
+
files.add(affectedEntity._sourceFile);
|
|
449
|
+
}
|
|
450
|
+
}
|
|
451
|
+
}
|
|
452
|
+
}
|
|
453
|
+
resolvedTasks.push({ id: task.id, entityIds, files, entities, dimensions });
|
|
454
|
+
}
|
|
455
|
+
// Pairwise comparison
|
|
456
|
+
const matrix = [];
|
|
457
|
+
const tier4Edges = [];
|
|
458
|
+
for (let i = 0; i < resolvedTasks.length; i++) {
|
|
459
|
+
for (let j = i + 1; j < resolvedTasks.length; j++) {
|
|
460
|
+
const taskA = resolvedTasks[i];
|
|
461
|
+
const taskB = resolvedTasks[j];
|
|
462
|
+
const result = classifyConflictTier(taskA, taskB);
|
|
463
|
+
const entry = {
|
|
464
|
+
taskA: taskA.id,
|
|
465
|
+
taskB: taskB.id,
|
|
466
|
+
tier: result.tier,
|
|
467
|
+
reason: result.reason,
|
|
468
|
+
};
|
|
469
|
+
if (result.sharedFiles.length > 0)
|
|
470
|
+
entry.sharedFiles = result.sharedFiles;
|
|
471
|
+
if (result.sharedEntities.length > 0)
|
|
472
|
+
entry.sharedEntities = result.sharedEntities;
|
|
473
|
+
matrix.push(entry);
|
|
474
|
+
if (result.tier === 4) {
|
|
475
|
+
tier4Edges.push([taskA.id, taskB.id]);
|
|
476
|
+
}
|
|
477
|
+
}
|
|
478
|
+
}
|
|
479
|
+
// Build execution plan
|
|
480
|
+
const taskIds = resolvedTasks.map(t => t.id);
|
|
481
|
+
const executionPlan = buildExecutionPlan(taskIds, tier4Edges);
|
|
482
|
+
const result = {
|
|
483
|
+
taskCount: tasks.length,
|
|
484
|
+
matrix,
|
|
485
|
+
executionPlan,
|
|
486
|
+
};
|
|
487
|
+
if (warnings.length > 0) {
|
|
488
|
+
result.warnings = warnings;
|
|
489
|
+
}
|
|
490
|
+
return result;
|
|
491
|
+
}
|
package/dist/tools/functors.d.ts
CHANGED
|
@@ -66,3 +66,35 @@ export declare function reportActualBurn(args: {
|
|
|
66
66
|
project?: string;
|
|
67
67
|
notes?: string;
|
|
68
68
|
}): Promise<unknown>;
|
|
69
|
+
export declare function find_runtime_violations(args?: {
|
|
70
|
+
project?: string;
|
|
71
|
+
}): unknown;
|
|
72
|
+
export declare function find_ownership_violations(args?: {
|
|
73
|
+
project?: string;
|
|
74
|
+
}): unknown;
|
|
75
|
+
export declare function query_traits(args: {
|
|
76
|
+
trait: string;
|
|
77
|
+
project?: string;
|
|
78
|
+
}): unknown;
|
|
79
|
+
export declare function simulate_mutation(args: {
|
|
80
|
+
entity_id: string;
|
|
81
|
+
mutation: {
|
|
82
|
+
dimension: 'constraints' | 'traits';
|
|
83
|
+
change: {
|
|
84
|
+
add?: string[];
|
|
85
|
+
remove?: string[];
|
|
86
|
+
};
|
|
87
|
+
};
|
|
88
|
+
project?: string;
|
|
89
|
+
}): unknown;
|
|
90
|
+
export declare function query_data_targets(args: {
|
|
91
|
+
target_name: string;
|
|
92
|
+
project?: string;
|
|
93
|
+
}): unknown;
|
|
94
|
+
export declare function find_exposure_leaks(args?: {
|
|
95
|
+
project?: string;
|
|
96
|
+
}): unknown;
|
|
97
|
+
export declare function find_semantic_clones(args?: {
|
|
98
|
+
project?: string;
|
|
99
|
+
min_complexity?: number;
|
|
100
|
+
}): unknown;
|
package/dist/tools/functors.js
CHANGED
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
* coupling metrics, auth coverage, etc.) from the entity data.
|
|
7
7
|
*/
|
|
8
8
|
import { computeBlastRadius } from '../graph.js';
|
|
9
|
-
import { getLoader, getGraph, validateProject, entityLayer, entitySummary, } from './index.js';
|
|
9
|
+
import { getLoader, getGraph, validateProject, entityName, entityLayer, entitySummary, } from './index.js';
|
|
10
10
|
import { isSupabaseConfigured, insertPrediction, updateActualBurn, abandonPrediction, listPredictions, } from '../supabase.js';
|
|
11
11
|
// ─── Layer ordering for violation detection ──────────────────────
|
|
12
12
|
const LAYER_ORDER = {
|
|
@@ -866,3 +866,347 @@ export async function reportActualBurn(args) {
|
|
|
866
866
|
_summary: `Prediction ${updated.id.slice(0, 8)}… closed. Predicted ${updated.predicted_total} tokens, actual ${updated.actual_total_tokens} tokens. Drift: ${updated.drift_ratio != null ? `${(updated.drift_ratio * 100).toFixed(1)}%` : 'N/A'}.`,
|
|
867
867
|
};
|
|
868
868
|
}
|
|
869
|
+
// ─── Tool: find_runtime_violations (ρ Dimension) ─────────────────
|
|
870
|
+
export function find_runtime_violations(args) {
|
|
871
|
+
const projErr = validateProject(args?.project);
|
|
872
|
+
if (projErr)
|
|
873
|
+
return { error: projErr };
|
|
874
|
+
const g = getGraph(args?.project);
|
|
875
|
+
const violations = [];
|
|
876
|
+
for (const [callerId, calleeIds] of g.callees) {
|
|
877
|
+
const callerEntity = g.entityById.get(callerId);
|
|
878
|
+
if (!callerEntity)
|
|
879
|
+
continue;
|
|
880
|
+
const callerFramework = callerEntity.runtime?.framework || 'agnostic';
|
|
881
|
+
for (const calleeId of calleeIds) {
|
|
882
|
+
const calleeEntity = g.entityById.get(calleeId);
|
|
883
|
+
if (!calleeEntity)
|
|
884
|
+
continue;
|
|
885
|
+
const calleeFramework = calleeEntity.runtime?.framework;
|
|
886
|
+
// If caller is framework-agnostic but calls framework-specific code (e.g., pure logic calling React)
|
|
887
|
+
if (callerFramework === 'agnostic' && calleeFramework && calleeFramework !== 'agnostic') {
|
|
888
|
+
violations.push({
|
|
889
|
+
caller: { ...entitySummary(callerEntity), framework: callerFramework },
|
|
890
|
+
callee: { ...entitySummary(calleeEntity), framework: calleeFramework },
|
|
891
|
+
issue: `Framework leak: Agnostic caller depends on ${calleeFramework}-specific callee`,
|
|
892
|
+
});
|
|
893
|
+
}
|
|
894
|
+
// If mixing frameworks directly (e.g., Vue calling React)
|
|
895
|
+
if (callerFramework !== 'agnostic' && calleeFramework && calleeFramework !== 'agnostic' && callerFramework !== calleeFramework) {
|
|
896
|
+
violations.push({
|
|
897
|
+
caller: { ...entitySummary(callerEntity), framework: callerFramework },
|
|
898
|
+
callee: { ...entitySummary(calleeEntity), framework: calleeFramework },
|
|
899
|
+
issue: `Cross-framework boundary: ${callerFramework} calling ${calleeFramework}`,
|
|
900
|
+
});
|
|
901
|
+
}
|
|
902
|
+
}
|
|
903
|
+
}
|
|
904
|
+
return {
|
|
905
|
+
totalViolations: violations.length,
|
|
906
|
+
_summary: `Found ${violations.length} runtime/framework boundary violations (ρ dimension)`,
|
|
907
|
+
violations: violations.slice(0, 100),
|
|
908
|
+
};
|
|
909
|
+
}
|
|
910
|
+
// ─── Tool: find_ownership_violations (λ Dimension) ───────────────
|
|
911
|
+
export function find_ownership_violations(args) {
|
|
912
|
+
const projErr = validateProject(args?.project);
|
|
913
|
+
if (projErr)
|
|
914
|
+
return { error: projErr };
|
|
915
|
+
const loader = getLoader();
|
|
916
|
+
const entities = loader.getEntities(args?.project);
|
|
917
|
+
const violations = [];
|
|
918
|
+
for (const e of entities) {
|
|
919
|
+
if (!e.ownership || Object.keys(e.ownership).length === 0)
|
|
920
|
+
continue;
|
|
921
|
+
// We look for 'unsafe', 'escapes', 'mutates_borrowed', or complex lifetime constraints
|
|
922
|
+
const own = e.ownership;
|
|
923
|
+
if (own.unsafe) {
|
|
924
|
+
violations.push({
|
|
925
|
+
entity: entitySummary(e),
|
|
926
|
+
ownership_details: own,
|
|
927
|
+
issue: 'Unsafe memory access or pointer manipulation flagged',
|
|
928
|
+
});
|
|
929
|
+
}
|
|
930
|
+
else if (own.escapes) {
|
|
931
|
+
violations.push({
|
|
932
|
+
entity: entitySummary(e),
|
|
933
|
+
ownership_details: own,
|
|
934
|
+
issue: 'Value potentially escapes its lifetime boundary',
|
|
935
|
+
});
|
|
936
|
+
}
|
|
937
|
+
else if (own.mutates_borrowed) {
|
|
938
|
+
violations.push({
|
|
939
|
+
entity: entitySummary(e),
|
|
940
|
+
ownership_details: own,
|
|
941
|
+
issue: 'Attempts to mutate an immutably borrowed reference',
|
|
942
|
+
});
|
|
943
|
+
}
|
|
944
|
+
}
|
|
945
|
+
return {
|
|
946
|
+
totalViolations: violations.length,
|
|
947
|
+
_summary: `Found ${violations.length} entities with strict/violating ownership constraints (λ dimension)`,
|
|
948
|
+
violations: violations.slice(0, 100),
|
|
949
|
+
};
|
|
950
|
+
}
|
|
951
|
+
// ─── Tool: query_traits (τ Dimension) ────────────────────────────
|
|
952
|
+
export function query_traits(args) {
|
|
953
|
+
const projErr = validateProject(args.project);
|
|
954
|
+
if (projErr)
|
|
955
|
+
return { error: projErr };
|
|
956
|
+
const target = args.trait.toLowerCase();
|
|
957
|
+
const loader = getLoader();
|
|
958
|
+
const entities = loader.getEntities(args.project);
|
|
959
|
+
const results = entities.filter(e => {
|
|
960
|
+
if (!e.traits)
|
|
961
|
+
return false;
|
|
962
|
+
// Handle array of strings [ 'asyncContext', 'generator' ]
|
|
963
|
+
if (Array.isArray(e.traits)) {
|
|
964
|
+
return e.traits.some(t => t.toLowerCase().includes(target));
|
|
965
|
+
}
|
|
966
|
+
// Handle structured traits { self: { fallible: true } }
|
|
967
|
+
if (typeof e.traits === 'object') {
|
|
968
|
+
const tr = e.traits;
|
|
969
|
+
if (tr.self && typeof tr.self === 'object') {
|
|
970
|
+
const selfTraits = tr.self;
|
|
971
|
+
for (const [key, val] of Object.entries(selfTraits)) {
|
|
972
|
+
if (key.toLowerCase().includes(target) && val === true)
|
|
973
|
+
return true;
|
|
974
|
+
}
|
|
975
|
+
}
|
|
976
|
+
if (tr.params && typeof tr.params === 'object') {
|
|
977
|
+
const paramTraits = tr.params;
|
|
978
|
+
for (const p of Object.values(paramTraits)) {
|
|
979
|
+
if (p.bounds && Array.isArray(p.bounds) && p.bounds.some((b) => b.toLowerCase().includes(target)))
|
|
980
|
+
return true;
|
|
981
|
+
if (p.markers && Array.isArray(p.markers) && p.markers.some((m) => m.toLowerCase().includes(target)))
|
|
982
|
+
return true;
|
|
983
|
+
}
|
|
984
|
+
}
|
|
985
|
+
}
|
|
986
|
+
return false;
|
|
987
|
+
});
|
|
988
|
+
return {
|
|
989
|
+
trait: args.trait,
|
|
990
|
+
total: results.length,
|
|
991
|
+
_summary: `Found ${results.length} entities implementing the '${args.trait}' trait (τ dimension)`,
|
|
992
|
+
entities: results.slice(0, 100).map(entitySummary),
|
|
993
|
+
};
|
|
994
|
+
}
|
|
995
|
+
// ─── Tool: simulate_mutation (The Physics Engine) ────────────────
|
|
996
|
+
export function simulate_mutation(args) {
|
|
997
|
+
const projErr = validateProject(args.project);
|
|
998
|
+
if (projErr)
|
|
999
|
+
return { error: projErr };
|
|
1000
|
+
const loader = getLoader();
|
|
1001
|
+
const g = getGraph(args.project);
|
|
1002
|
+
const targetEntity = loader.getEntityById(args.entity_id, args.project)
|
|
1003
|
+
|| loader.getEntityByName(args.entity_id, args.project);
|
|
1004
|
+
if (!targetEntity) {
|
|
1005
|
+
return { error: `Entity not found: ${args.entity_id}` };
|
|
1006
|
+
}
|
|
1007
|
+
const targetId = targetEntity.id;
|
|
1008
|
+
const { dimension, change } = args.mutation;
|
|
1009
|
+
const addedTags = (change.add || []).map(t => t.toLowerCase());
|
|
1010
|
+
// We are currently simulating topological fallout for newly ADDED constraints/traits.
|
|
1011
|
+
// Example: If we add "fallible" or "auth", who upstream breaks because they don't handle it?
|
|
1012
|
+
const fallout = [];
|
|
1013
|
+
// Simulate Fallout: Traversal Upstream (Callers)
|
|
1014
|
+
const upQueue = [[targetId, 0]];
|
|
1015
|
+
const upVisited = new Set([targetId]);
|
|
1016
|
+
while (upQueue.length > 0) {
|
|
1017
|
+
const [currentId, depth] = upQueue.shift();
|
|
1018
|
+
if (depth > 5)
|
|
1019
|
+
continue; // bound simulation depth
|
|
1020
|
+
const callerSet = g.callers.get(currentId);
|
|
1021
|
+
if (!callerSet)
|
|
1022
|
+
continue;
|
|
1023
|
+
for (const callerId of callerSet) {
|
|
1024
|
+
if (upVisited.has(callerId))
|
|
1025
|
+
continue;
|
|
1026
|
+
upVisited.add(callerId);
|
|
1027
|
+
const callerEntity = g.entityById.get(callerId);
|
|
1028
|
+
if (!callerEntity)
|
|
1029
|
+
continue;
|
|
1030
|
+
const callerConstraints = callerEntity.constraints;
|
|
1031
|
+
const callerTraits = callerEntity.traits;
|
|
1032
|
+
// Rule 1: Fallible Simulation
|
|
1033
|
+
if (addedTags.includes('fallible') || addedTags.includes('throws')) {
|
|
1034
|
+
const hasErrorHandling = callerConstraints?.errorHandling &&
|
|
1035
|
+
(callerConstraints.errorHandling.tryCatch || callerConstraints.errorHandling.catchClause);
|
|
1036
|
+
if (!hasErrorHandling) {
|
|
1037
|
+
fallout.push({
|
|
1038
|
+
entity: entitySummary(callerEntity),
|
|
1039
|
+
distance: depth + 1,
|
|
1040
|
+
reason: `Calls a newly fallible pipeline but lacks error handling.`,
|
|
1041
|
+
requiredFix: `Wrap call to ${currentId} in try/catch or propagate error.`,
|
|
1042
|
+
});
|
|
1043
|
+
// Error propagates up if not caught, so we continue queueing
|
|
1044
|
+
upQueue.push([callerId, depth + 1]);
|
|
1045
|
+
}
|
|
1046
|
+
}
|
|
1047
|
+
// Rule 2: Auth / Security Simulation
|
|
1048
|
+
else if (addedTags.includes('auth') || addedTags.includes('secure')) {
|
|
1049
|
+
// Does the caller provide Auth? Check constraints.auth or if they are a known auth middleware
|
|
1050
|
+
const hasAuth = callerConstraints?.auth || callerTraits?.self?.authContext;
|
|
1051
|
+
const isMiddleware = entityLayer(callerEntity) === 'middleware';
|
|
1052
|
+
if (!hasAuth && !isMiddleware) {
|
|
1053
|
+
fallout.push({
|
|
1054
|
+
entity: entitySummary(callerEntity),
|
|
1055
|
+
distance: depth + 1,
|
|
1056
|
+
reason: `Upstream path is unauthenticated but downstream requires auth.`,
|
|
1057
|
+
requiredFix: `Add authentication context to ${entityName(callerEntity)} or its router.`,
|
|
1058
|
+
});
|
|
1059
|
+
upQueue.push([callerId, depth + 1]);
|
|
1060
|
+
}
|
|
1061
|
+
}
|
|
1062
|
+
// Generic unhandled propagation
|
|
1063
|
+
else {
|
|
1064
|
+
upQueue.push([callerId, depth + 1]);
|
|
1065
|
+
}
|
|
1066
|
+
}
|
|
1067
|
+
}
|
|
1068
|
+
return {
|
|
1069
|
+
target: entitySummary(targetEntity),
|
|
1070
|
+
mutation: args.mutation,
|
|
1071
|
+
affectedEntitiesCount: fallout.length,
|
|
1072
|
+
_summary: `Simulating adding [${addedTags.join(', ')}] to ${entityName(targetEntity)}. This structurally breaks ${fallout.length} upstream entities.`,
|
|
1073
|
+
fallout,
|
|
1074
|
+
};
|
|
1075
|
+
}
|
|
1076
|
+
// ─── Tool: query_data_targets (δ Dimension Reverse-Index) ────────
|
|
1077
|
+
export function query_data_targets(args) {
|
|
1078
|
+
const projErr = validateProject(args.project);
|
|
1079
|
+
if (projErr)
|
|
1080
|
+
return { error: projErr };
|
|
1081
|
+
const loader = getLoader();
|
|
1082
|
+
const entities = loader.getEntities(args.project);
|
|
1083
|
+
const target = args.target_name.toLowerCase();
|
|
1084
|
+
const readers = [];
|
|
1085
|
+
const writers = [];
|
|
1086
|
+
for (const e of entities) {
|
|
1087
|
+
if (!e.data)
|
|
1088
|
+
continue;
|
|
1089
|
+
let isReader = false;
|
|
1090
|
+
let isWriter = false;
|
|
1091
|
+
// Check tables
|
|
1092
|
+
if (Array.isArray(e.data.tables) && e.data.tables.some(t => String(t).toLowerCase() === target)) {
|
|
1093
|
+
// By default, if they touch a table but don't specify mutation, we assume read
|
|
1094
|
+
isReader = true;
|
|
1095
|
+
}
|
|
1096
|
+
// Check inputs (sources)
|
|
1097
|
+
if (Array.isArray(e.data.inputs)) {
|
|
1098
|
+
if (e.data.inputs.some(i => i && typeof i === 'object' && String(i.name || i.source).toLowerCase() === target)) {
|
|
1099
|
+
isReader = true;
|
|
1100
|
+
}
|
|
1101
|
+
}
|
|
1102
|
+
else if (Array.isArray(e.data.sources)) {
|
|
1103
|
+
if (e.data.sources.some(s => String(s).toLowerCase() === target)) {
|
|
1104
|
+
isReader = true;
|
|
1105
|
+
}
|
|
1106
|
+
}
|
|
1107
|
+
// Check mutations
|
|
1108
|
+
if (Array.isArray(e.data.mutations)) {
|
|
1109
|
+
if (e.data.mutations.some(m => m && typeof m === 'object' && String(m.target).toLowerCase() === target)) {
|
|
1110
|
+
isWriter = true;
|
|
1111
|
+
}
|
|
1112
|
+
}
|
|
1113
|
+
if (isWriter) {
|
|
1114
|
+
writers.push(entitySummary(e));
|
|
1115
|
+
}
|
|
1116
|
+
else if (isReader) {
|
|
1117
|
+
readers.push(entitySummary(e));
|
|
1118
|
+
}
|
|
1119
|
+
}
|
|
1120
|
+
return {
|
|
1121
|
+
target: args.target_name,
|
|
1122
|
+
totalInteractions: readers.length + writers.length,
|
|
1123
|
+
writersCount: writers.length,
|
|
1124
|
+
readersCount: readers.length,
|
|
1125
|
+
_summary: `Found ${writers.length} entities mutating and ${readers.length} entities reading data target '${args.target_name}' (δ dimension reverse-index)`,
|
|
1126
|
+
writers: writers.slice(0, 50),
|
|
1127
|
+
readers: readers.slice(0, 50),
|
|
1128
|
+
};
|
|
1129
|
+
}
|
|
1130
|
+
// ─── Tool: find_exposure_leaks (χ Context Dimension) ─────────────
|
|
1131
|
+
export function find_exposure_leaks(args) {
|
|
1132
|
+
const projErr = validateProject(args?.project);
|
|
1133
|
+
if (projErr)
|
|
1134
|
+
return { error: projErr };
|
|
1135
|
+
const g = getGraph(args?.project);
|
|
1136
|
+
const leaks = [];
|
|
1137
|
+
for (const [callerId, calleeIds] of g.callees) {
|
|
1138
|
+
const callerEntity = g.entityById.get(callerId);
|
|
1139
|
+
if (!callerEntity)
|
|
1140
|
+
continue;
|
|
1141
|
+
const callerVisibility = callerEntity.context?.visibility || callerEntity.context?.exposure;
|
|
1142
|
+
if (callerVisibility !== 'public' && callerVisibility !== 'api')
|
|
1143
|
+
continue;
|
|
1144
|
+
for (const calleeId of calleeIds) {
|
|
1145
|
+
const calleeEntity = g.entityById.get(calleeId);
|
|
1146
|
+
if (!calleeEntity)
|
|
1147
|
+
continue;
|
|
1148
|
+
const calleeVisibility = calleeEntity.context?.visibility;
|
|
1149
|
+
if (calleeVisibility === 'private') {
|
|
1150
|
+
leaks.push({
|
|
1151
|
+
publicCaller: entitySummary(callerEntity),
|
|
1152
|
+
privateCallee: entitySummary(calleeEntity),
|
|
1153
|
+
issue: `Exposure Leak: Public/API entity directly calls deeply private entity.`,
|
|
1154
|
+
});
|
|
1155
|
+
}
|
|
1156
|
+
}
|
|
1157
|
+
}
|
|
1158
|
+
return {
|
|
1159
|
+
totalLeaks: leaks.length,
|
|
1160
|
+
_summary: `Found ${leaks.length} architectural visibility leaks where public edges bypass internal boundaries to reach private entities (χ dimension)`,
|
|
1161
|
+
leaks: leaks.slice(0, 100),
|
|
1162
|
+
};
|
|
1163
|
+
}
|
|
1164
|
+
// ─── Tool: find_semantic_clones (Σ Logic Dimension) ──────────────
|
|
1165
|
+
import { createHash } from 'crypto';
|
|
1166
|
+
export function find_semantic_clones(args) {
|
|
1167
|
+
const projErr = validateProject(args?.project);
|
|
1168
|
+
if (projErr)
|
|
1169
|
+
return { error: projErr };
|
|
1170
|
+
const loader = getLoader();
|
|
1171
|
+
const entities = loader.getEntities(args?.project);
|
|
1172
|
+
const { min_complexity = 5 } = args || {};
|
|
1173
|
+
const logicHashes = new Map();
|
|
1174
|
+
for (const e of entities) {
|
|
1175
|
+
if (!e.semantics || !Array.isArray(e.semantics) || e.semantics.length < min_complexity) {
|
|
1176
|
+
continue;
|
|
1177
|
+
}
|
|
1178
|
+
// A highly simplified logic stringification that ignores specific variable names
|
|
1179
|
+
// but captures the structural AST shape.
|
|
1180
|
+
// In a production scenario, you would normalize variable identifiers ($1, $2) here.
|
|
1181
|
+
const shapeString = JSON.stringify(e.semantics, (key, value) => {
|
|
1182
|
+
// Omit line numbers and exact variable names to find structural clones
|
|
1183
|
+
if (key === 'loc' || key === 'name' || key === 'value' || key === 'id')
|
|
1184
|
+
return undefined;
|
|
1185
|
+
return value;
|
|
1186
|
+
});
|
|
1187
|
+
const hash = createHash('sha256').update(shapeString).digest('hex');
|
|
1188
|
+
if (!logicHashes.has(hash))
|
|
1189
|
+
logicHashes.set(hash, []);
|
|
1190
|
+
logicHashes.get(hash).push(e);
|
|
1191
|
+
}
|
|
1192
|
+
const clones = [];
|
|
1193
|
+
for (const [hash, group] of logicHashes.entries()) {
|
|
1194
|
+
if (group.length > 1) {
|
|
1195
|
+
clones.push({
|
|
1196
|
+
count: group.length,
|
|
1197
|
+
entities: group.map(e => entitySummary(e)),
|
|
1198
|
+
});
|
|
1199
|
+
}
|
|
1200
|
+
}
|
|
1201
|
+
// Sort by highest duplication count
|
|
1202
|
+
clones.sort((a, b) => b.count - a.count);
|
|
1203
|
+
let totalDuplicatedEntities = 0;
|
|
1204
|
+
for (const c of clones)
|
|
1205
|
+
totalDuplicatedEntities += c.count;
|
|
1206
|
+
return {
|
|
1207
|
+
cloneGroupsFound: clones.length,
|
|
1208
|
+
totalDuplicatedEntities,
|
|
1209
|
+
_summary: `Found ${clones.length} semantic clone groups involving ${totalDuplicatedEntities} total entities (Σ dimension analysis)`,
|
|
1210
|
+
cloneGroups: clones.slice(0, 50),
|
|
1211
|
+
};
|
|
1212
|
+
}
|
package/dist/types.d.ts
CHANGED
|
@@ -11,6 +11,10 @@ export interface JstfEntity {
|
|
|
11
11
|
_sourceFile?: string | null;
|
|
12
12
|
_sourceLanguage?: string;
|
|
13
13
|
_project?: string;
|
|
14
|
+
_sourceLocation?: {
|
|
15
|
+
startLine: number;
|
|
16
|
+
endLine: number;
|
|
17
|
+
};
|
|
14
18
|
/** Structure: function shape, signature, modifiers */
|
|
15
19
|
struct?: {
|
|
16
20
|
name?: string;
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@papyruslabsai/seshat-mcp",
|
|
3
|
-
"version": "0.
|
|
3
|
+
"version": "0.7.0",
|
|
4
4
|
"description": "Semantic MCP server — exposes a codebase's structure, dependencies, and constraints as queryable tools",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"bin": {
|
|
@@ -30,6 +30,12 @@
|
|
|
30
30
|
"url": "https://github.com/papyruslabs-ai/seshat.git",
|
|
31
31
|
"directory": "packages/seshat-mcp"
|
|
32
32
|
},
|
|
33
|
-
"keywords": [
|
|
33
|
+
"keywords": [
|
|
34
|
+
"mcp",
|
|
35
|
+
"semantic",
|
|
36
|
+
"code-analysis",
|
|
37
|
+
"seshat",
|
|
38
|
+
"static-analysis"
|
|
39
|
+
],
|
|
34
40
|
"license": "MIT"
|
|
35
41
|
}
|