@danielblomma/cortex-mcp 2.0.2 → 2.0.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -255,30 +255,9 @@ Input:
255
255
  - `depth` (int, 1-3, default `1`)
256
256
  - `include_edges` (bool, default `true`)
257
257
 
258
- ### `context.find_callers`
258
+ ### `context.impact`
259
259
 
260
- Return chunk callers for a chunk or file entity using the indexed call graph.
261
-
262
- Input:
263
-
264
- - `entity_id` (string, required)
265
- - `depth` (int, 1-4, default `1`)
266
- - `include_edges` (bool, default `true`)
267
-
268
- ### `context.trace_calls`
269
-
270
- Trace call graph neighbors from a chunk or file entity in the requested direction.
271
-
272
- Input:
273
-
274
- - `entity_id` (string, required)
275
- - `depth` (int, 1-4, default `2`)
276
- - `direction` (`"outgoing"` | `"incoming"` | `"both"`, default `"outgoing"`)
277
- - `include_edges` (bool, default `true`)
278
-
279
- ### `context.impact_analysis`
280
-
281
- Analyze likely impacted call-graph entities starting from an entity id or search query.
260
+ Traverse likely impact paths across config, code and SQL starting from an entity id or query.
282
261
 
283
262
  Input:
284
263
 
@@ -286,8 +265,9 @@ Input:
286
265
  - `query` (string, optional)
287
266
  - `depth` (int, 1-4, default `2`)
288
267
  - `top_k` (int, 1-20, default `8`)
289
- - `direction` (`"incoming"` | `"outgoing"` | `"both"`, default `"incoming"`)
290
268
  - `include_edges` (bool, default `true`)
269
+ - `profile` (`"all"` | `"config_only"` | `"config_to_sql"` | `"code_only"` | `"sql_only"`, default `"all"`)
270
+ - `sort_by` (`"impact_score"` | `"shortest_path"` | `"semantic_score"` | `"graph_score"` | `"trust_score"`, default `"impact_score"`)
291
271
 
292
272
  ### `context.get_rules`
293
273
 
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "@danielblomma/cortex-mcp",
3
3
  "mcpName": "io.github.DanielBlomma/cortex",
4
- "version": "2.0.2",
4
+ "version": "2.0.4",
5
5
  "description": "Local, repo-scoped context platform for coding assistants. Semantic search, graph relationships, and architectural rule context.",
6
6
  "type": "module",
7
7
  "author": "Daniel Blomma",
@@ -95,6 +95,43 @@ export type TelemetryEvent = {
95
95
  duration_ms?: number;
96
96
  };
97
97
 
98
+ function subtractCounter(current: number, pushed: number): number {
99
+ return Math.max(0, current - pushed);
100
+ }
101
+
102
+ function hasUsage(metrics: TelemetryMetrics): boolean {
103
+ if (
104
+ metrics.total_tool_calls > 0 ||
105
+ metrics.successful_tool_calls > 0 ||
106
+ metrics.failed_tool_calls > 0 ||
107
+ metrics.total_duration_ms > 0 ||
108
+ metrics.session_starts > 0 ||
109
+ metrics.session_ends > 0 ||
110
+ metrics.session_duration_ms_total > 0 ||
111
+ metrics.searches > 0 ||
112
+ metrics.related_lookups > 0 ||
113
+ metrics.caller_lookups > 0 ||
114
+ metrics.trace_lookups > 0 ||
115
+ metrics.impact_analyses > 0 ||
116
+ metrics.rule_lookups > 0 ||
117
+ metrics.reloads > 0 ||
118
+ metrics.total_results_returned > 0 ||
119
+ metrics.estimated_tokens_saved > 0 ||
120
+ metrics.estimated_tokens_total > 0
121
+ ) {
122
+ return true;
123
+ }
124
+
125
+ return Object.values(metrics.tool_metrics).some(
126
+ (bucket) =>
127
+ bucket.calls > 0 ||
128
+ bucket.failures > 0 ||
129
+ bucket.total_duration_ms > 0 ||
130
+ bucket.total_results_returned > 0 ||
131
+ bucket.estimated_tokens_saved > 0,
132
+ );
133
+ }
134
+
98
135
  export class TelemetryCollector {
99
136
  private metrics: TelemetryMetrics;
100
137
  private readonly metricsPath: string;
@@ -167,6 +204,7 @@ export class TelemetryCollector {
167
204
  case "context.trace_calls":
168
205
  this.metrics.trace_lookups++;
169
206
  break;
207
+ case "context.impact":
170
208
  case "context.impact_analysis":
171
209
  this.metrics.impact_analyses++;
172
210
  break;
@@ -211,7 +249,143 @@ export class TelemetryCollector {
211
249
  }
212
250
 
213
251
  getMetrics(): TelemetryMetrics {
214
- return { ...this.metrics };
252
+ return {
253
+ ...this.metrics,
254
+ tool_metrics: Object.fromEntries(
255
+ Object.entries(this.metrics.tool_metrics).map(([toolName, bucket]) => [
256
+ toolName,
257
+ { ...bucket },
258
+ ]),
259
+ ),
260
+ };
261
+ }
262
+
263
+ acknowledgePush(pushed: TelemetryMetrics): void {
264
+ const nextToolMetrics: TelemetryMetrics["tool_metrics"] = {};
265
+ const toolNames = new Set([
266
+ ...Object.keys(this.metrics.tool_metrics),
267
+ ...Object.keys(pushed.tool_metrics ?? {}),
268
+ ]);
269
+
270
+ for (const toolName of toolNames) {
271
+ const currentBucket = this.metrics.tool_metrics[toolName] ?? {
272
+ calls: 0,
273
+ failures: 0,
274
+ total_duration_ms: 0,
275
+ total_results_returned: 0,
276
+ estimated_tokens_saved: 0,
277
+ };
278
+ const pushedBucket = pushed.tool_metrics?.[toolName] ?? {
279
+ calls: 0,
280
+ failures: 0,
281
+ total_duration_ms: 0,
282
+ total_results_returned: 0,
283
+ estimated_tokens_saved: 0,
284
+ };
285
+
286
+ const nextBucket = {
287
+ calls: subtractCounter(currentBucket.calls, pushedBucket.calls),
288
+ failures: subtractCounter(currentBucket.failures, pushedBucket.failures),
289
+ total_duration_ms: subtractCounter(
290
+ currentBucket.total_duration_ms,
291
+ pushedBucket.total_duration_ms,
292
+ ),
293
+ total_results_returned: subtractCounter(
294
+ currentBucket.total_results_returned,
295
+ pushedBucket.total_results_returned,
296
+ ),
297
+ estimated_tokens_saved: subtractCounter(
298
+ currentBucket.estimated_tokens_saved,
299
+ pushedBucket.estimated_tokens_saved,
300
+ ),
301
+ };
302
+
303
+ if (
304
+ nextBucket.calls > 0 ||
305
+ nextBucket.failures > 0 ||
306
+ nextBucket.total_duration_ms > 0 ||
307
+ nextBucket.total_results_returned > 0 ||
308
+ nextBucket.estimated_tokens_saved > 0
309
+ ) {
310
+ nextToolMetrics[toolName] = nextBucket;
311
+ }
312
+ }
313
+
314
+ const nextMetrics: TelemetryMetrics = {
315
+ ...this.metrics,
316
+ period_start: pushed.period_end,
317
+ total_tool_calls: subtractCounter(
318
+ this.metrics.total_tool_calls,
319
+ pushed.total_tool_calls,
320
+ ),
321
+ successful_tool_calls: subtractCounter(
322
+ this.metrics.successful_tool_calls,
323
+ pushed.successful_tool_calls,
324
+ ),
325
+ failed_tool_calls: subtractCounter(
326
+ this.metrics.failed_tool_calls,
327
+ pushed.failed_tool_calls,
328
+ ),
329
+ total_duration_ms: subtractCounter(
330
+ this.metrics.total_duration_ms,
331
+ pushed.total_duration_ms,
332
+ ),
333
+ session_starts: subtractCounter(
334
+ this.metrics.session_starts,
335
+ pushed.session_starts,
336
+ ),
337
+ session_ends: subtractCounter(this.metrics.session_ends, pushed.session_ends),
338
+ session_duration_ms_total: subtractCounter(
339
+ this.metrics.session_duration_ms_total,
340
+ pushed.session_duration_ms_total,
341
+ ),
342
+ searches: subtractCounter(this.metrics.searches, pushed.searches),
343
+ related_lookups: subtractCounter(
344
+ this.metrics.related_lookups,
345
+ pushed.related_lookups,
346
+ ),
347
+ caller_lookups: subtractCounter(
348
+ this.metrics.caller_lookups,
349
+ pushed.caller_lookups,
350
+ ),
351
+ trace_lookups: subtractCounter(
352
+ this.metrics.trace_lookups,
353
+ pushed.trace_lookups,
354
+ ),
355
+ impact_analyses: subtractCounter(
356
+ this.metrics.impact_analyses,
357
+ pushed.impact_analyses,
358
+ ),
359
+ rule_lookups: subtractCounter(
360
+ this.metrics.rule_lookups,
361
+ pushed.rule_lookups,
362
+ ),
363
+ reloads: subtractCounter(this.metrics.reloads, pushed.reloads),
364
+ total_results_returned: subtractCounter(
365
+ this.metrics.total_results_returned,
366
+ pushed.total_results_returned,
367
+ ),
368
+ estimated_tokens_saved: subtractCounter(
369
+ this.metrics.estimated_tokens_saved,
370
+ pushed.estimated_tokens_saved,
371
+ ),
372
+ estimated_tokens_total: subtractCounter(
373
+ this.metrics.estimated_tokens_total,
374
+ pushed.estimated_tokens_total,
375
+ ),
376
+ client_version: this.clientVersion,
377
+ instance_id: this.instanceId,
378
+ tool_metrics: nextToolMetrics,
379
+ };
380
+
381
+ if (hasUsage(nextMetrics)) {
382
+ this.metrics = nextMetrics;
383
+ this.metrics.period_end = new Date().toISOString();
384
+ } else {
385
+ this.metrics = emptyMetrics(this.clientVersion, this.instanceId);
386
+ }
387
+
388
+ this.dirty = true;
215
389
  }
216
390
 
217
391
  flush(): void {
@@ -1,5 +1,6 @@
1
- import { readFileSync, existsSync } from "node:fs";
1
+ import { readFileSync, existsSync, writeFileSync, mkdirSync, rmSync } from "node:fs";
2
2
  import { basename, join } from "node:path";
3
+ import { randomUUID } from "node:crypto";
3
4
  import { CortexDaemon } from "./server.js";
4
5
  import type {
5
6
  PolicyCheckPayload,
@@ -11,7 +12,7 @@ import type {
11
12
  } from "./protocol.js";
12
13
  import { loadEnterpriseConfig, resolveEnterpriseActivation } from "../core/config.js";
13
14
  import { pushMetrics } from "../enterprise/telemetry/sync.js";
14
- import type { TelemetryMetrics } from "../core/telemetry/collector.js";
15
+ import { TelemetryCollector, type TelemetryMetrics } from "../core/telemetry/collector.js";
15
16
  import { AuditWriter, type AuditEntry } from "../core/audit/writer.js";
16
17
  import { PolicyStore } from "../core/policy/store.js";
17
18
  import {
@@ -26,6 +27,7 @@ import {
26
27
  emitTamperAudit,
27
28
  } from "./heartbeat-tracker.js";
28
29
  import { startSyncTimer } from "./sync-checker.js";
30
+ import { startSkillSyncTimer } from "./skill-sync-checker.js";
29
31
  import { startHostEventsPusher } from "./host-events-pusher.js";
30
32
  import { startEgressProxy } from "./egress-proxy.js";
31
33
  import { startHeartbeatPusher } from "./heartbeat-pusher.js";
@@ -89,6 +91,76 @@ function readMetrics(contextDir: string): TelemetryMetrics | null {
89
91
  }
90
92
  }
91
93
 
94
+ // Pending-push state: snapshot + push_id are written to disk before the
95
+ // network call. If the daemon crashes mid-push, the next tick replays the
96
+ // same push_id so the server can deduplicate.
97
+ type PendingPush = {
98
+ snapshot: TelemetryMetrics;
99
+ push_id: string;
100
+ written_at: string;
101
+ };
102
+
103
+ function pendingPushPath(contextDir: string): string {
104
+ return join(contextDir, "telemetry", "pending-push.json");
105
+ }
106
+
107
+ function readPendingPush(contextDir: string): PendingPush | null {
108
+ const path = pendingPushPath(contextDir);
109
+ if (!existsSync(path)) return null;
110
+ try {
111
+ return JSON.parse(readFileSync(path, "utf8")) as PendingPush;
112
+ } catch {
113
+ return null;
114
+ }
115
+ }
116
+
117
+ function writePendingPush(contextDir: string, pending: PendingPush): void {
118
+ const path = pendingPushPath(contextDir);
119
+ mkdirSync(join(contextDir, "telemetry"), { recursive: true });
120
+ writeFileSync(path, JSON.stringify(pending, null, 2), "utf8");
121
+ }
122
+
123
+ function deletePendingPush(contextDir: string): void {
124
+ const path = pendingPushPath(contextDir);
125
+ try {
126
+ rmSync(path, { force: true });
127
+ } catch {
128
+ // best effort
129
+ }
130
+ }
131
+
132
+ function ackOnDisk(contextDir: string, pushed: TelemetryMetrics): void {
133
+ const collector = new TelemetryCollector(contextDir, pushed.client_version || "unknown");
134
+ collector.acknowledgePush(pushed);
135
+ collector.flush();
136
+ }
137
+
138
+ // Per-cwd exponential backoff so a flapping endpoint doesn't get hammered.
139
+ // 1m, 2m, 4m, 8m, 16m, cap 30m. Reset on success.
140
+ type TelemetryBackoffState = { nextPushAt: number; consecutiveFailures: number };
141
+ const telemetryBackoff = new Map<string, TelemetryBackoffState>();
142
+ const TELEMETRY_BACKOFF_BASE_MS = 60_000;
143
+ const TELEMETRY_BACKOFF_CAP_MS = 30 * 60_000;
144
+
145
+ function shouldSkipTelemetryPush(cwd: string, now = Date.now()): boolean {
146
+ const state = telemetryBackoff.get(cwd);
147
+ return state ? now < state.nextPushAt : false;
148
+ }
149
+
150
+ function recordTelemetryPushOutcome(cwd: string, success: boolean, now = Date.now()): void {
151
+ if (success) {
152
+ telemetryBackoff.delete(cwd);
153
+ return;
154
+ }
155
+ const prev = telemetryBackoff.get(cwd) ?? { nextPushAt: 0, consecutiveFailures: 0 };
156
+ const failures = prev.consecutiveFailures + 1;
157
+ const delay = Math.min(TELEMETRY_BACKOFF_BASE_MS * 2 ** (failures - 1), TELEMETRY_BACKOFF_CAP_MS);
158
+ telemetryBackoff.set(cwd, {
159
+ consecutiveFailures: failures,
160
+ nextPushAt: now + delay,
161
+ });
162
+ }
163
+
92
164
  async function telemetryFlush(
93
165
  payload: TelemetryFlushPayload,
94
166
  ): Promise<TelemetryFlushResult> {
@@ -111,31 +183,68 @@ async function telemetryFlush(
111
183
  return { flushed: false, events_pushed: 0 };
112
184
  }
113
185
 
186
+ if (shouldSkipTelemetryPush(cwd)) {
187
+ return { flushed: false, events_pushed: 0 };
188
+ }
189
+
190
+ const repo = basename(cwd);
191
+ const endpoint = config.telemetry.endpoint;
192
+ const apiKey = config.telemetry.api_key;
193
+
194
+ // Recovery: if a pending push exists, retry it first with the same
195
+ // push_id so the server can deduplicate against an earlier in-flight
196
+ // attempt that may have crashed before delete.
197
+ const pending = readPendingPush(contextDir);
198
+ if (pending) {
199
+ const result = await pushMetrics(pending.snapshot, endpoint, apiKey, {
200
+ repo,
201
+ session_id: payload.session_id,
202
+ push_id: pending.push_id,
203
+ });
204
+ recordTelemetryPushOutcome(cwd, result.success);
205
+ if (!result.success) {
206
+ process.stderr.write(
207
+ `[cortex-daemon] pending telemetry push retry failed: ${result.error ?? "unknown"}\n`,
208
+ );
209
+ return { flushed: false, events_pushed: 0 };
210
+ }
211
+ ackOnDisk(contextDir, pending.snapshot);
212
+ deletePendingPush(contextDir);
213
+ return { flushed: true, events_pushed: pending.snapshot.total_tool_calls };
214
+ }
215
+
114
216
  const metrics = readMetrics(contextDir);
115
217
  if (!metrics) {
116
- // No metrics on disk yet — MCP probably hasn't flushed once. Nothing
117
- // to push from disk. (MCP's interval flush + session-end push handle
118
- // the in-memory case.)
218
+ // No metrics on disk yet — MCP hasn't flushed. Nothing to push.
119
219
  return { flushed: false, events_pushed: 0 };
120
220
  }
121
221
 
122
- const result = await pushMetrics(
123
- metrics,
124
- config.telemetry.endpoint,
125
- config.telemetry.api_key,
126
- {
127
- repo: basename(cwd),
128
- session_id: payload.session_id,
129
- },
130
- );
222
+ const push_id = randomUUID();
223
+ writePendingPush(contextDir, {
224
+ snapshot: metrics,
225
+ push_id,
226
+ written_at: new Date().toISOString(),
227
+ });
228
+
229
+ const result = await pushMetrics(metrics, endpoint, apiKey, {
230
+ repo,
231
+ session_id: payload.session_id,
232
+ push_id,
233
+ });
234
+
235
+ recordTelemetryPushOutcome(cwd, result.success);
131
236
 
132
237
  if (!result.success) {
133
238
  process.stderr.write(
134
239
  `[cortex-daemon] telemetry push failed: ${result.error ?? "unknown"}\n`,
135
240
  );
241
+ // Pending stays on disk; next tick (after backoff) will retry.
136
242
  return { flushed: false, events_pushed: 0 };
137
243
  }
138
244
 
245
+ ackOnDisk(contextDir, metrics);
246
+ deletePendingPush(contextDir);
247
+
139
248
  return {
140
249
  flushed: true,
141
250
  events_pushed: metrics.total_tool_calls,
@@ -237,6 +346,16 @@ async function main(): Promise<void> {
237
346
  if (process.env.CORTEX_DISABLE_HOST_EVENTS_PUSH !== "1") {
238
347
  startHostEventsPusher(process.cwd(), pushIntervalMs);
239
348
  }
349
+ // Skills v3: poll cortex-web for org-authored skills, write SKILL.md
350
+ // files into per-CLI user-scope directories. Runs at the same cadence
351
+ // as the govern-config sync check by default but is independently
352
+ // configurable.
353
+ const skillSyncRaw = parseInt(process.env.CORTEX_SKILL_SYNC_MS ?? "", 10);
354
+ const skillSyncMs =
355
+ Number.isFinite(skillSyncRaw) && skillSyncRaw > 0 ? skillSyncRaw : syncIntervalMs;
356
+ if (process.env.CORTEX_DISABLE_SKILL_SYNC !== "1") {
357
+ startSkillSyncTimer(process.cwd(), skillSyncMs);
358
+ }
240
359
 
241
360
  // Govern host heartbeat — fills host_enrollment on cortex-web so the
242
361
  // dashboard at /dashboard/govern actually shows this host.
@@ -267,6 +386,33 @@ async function main(): Promise<void> {
267
386
  });
268
387
  }
269
388
 
389
+ // Periodic telemetry push. Daemon owns the network call so MCP doesn't
390
+ // race with itself or with this loop. Walks active sessions, dedupes
391
+ // cwds, and runs the existing per-cwd flush handler.
392
+ const telemetryPushRaw = parseInt(process.env.CORTEX_TELEMETRY_PUSH_MS ?? "", 10);
393
+ const telemetryPushMs =
394
+ Number.isFinite(telemetryPushRaw) && telemetryPushRaw > 0
395
+ ? telemetryPushRaw
396
+ : 5 * 60 * 1000;
397
+ if (process.env.CORTEX_DISABLE_TELEMETRY_PUSH !== "1") {
398
+ const telemetryTimer = setInterval(async () => {
399
+ const cwds = new Set<string>();
400
+ for (const [, state] of tracker.getActiveSessions()) {
401
+ if (state.cwd) cwds.add(state.cwd);
402
+ }
403
+ for (const cwd of cwds) {
404
+ try {
405
+ await telemetryFlush({ reason: "interval", cwd });
406
+ } catch (err) {
407
+ process.stderr.write(
408
+ `[cortex-daemon] telemetry push failed for ${cwd}: ${err instanceof Error ? err.message : String(err)}\n`,
409
+ );
410
+ }
411
+ }
412
+ }, telemetryPushMs);
413
+ if (typeof telemetryTimer.unref === "function") telemetryTimer.unref();
414
+ }
415
+
270
416
  if (process.env.CORTEX_DISABLE_TAMPER_CHECK !== "1") {
271
417
  const checkTimer = setInterval(() => {
272
418
  const detected = tracker.detectTamper({
@@ -0,0 +1,375 @@
1
+ import {
2
+ existsSync,
3
+ mkdirSync,
4
+ readFileSync,
5
+ rmSync,
6
+ writeFileSync,
7
+ } from "node:fs";
8
+ import { homedir, hostname } from "node:os";
9
+ import { join } from "node:path";
10
+ import { loadEnterpriseConfig } from "../core/config.js";
11
+ import { writeHostAuditEvent } from "./ungoverned-scanner.js";
12
+ import { daemonDir } from "./paths.js";
13
+
14
+ /**
15
+ * Skills v3 sync flow — daemon side.
16
+ *
17
+ * The daemon polls cortex-web /api/v1/govern/skills/manifest each tick to
18
+ * learn what skills the org has authored. It diffs against a local state
19
+ * file, then for each new/changed skill it fetches the assembled SKILL.md
20
+ * and writes it to the appropriate per-CLI skills directory. Removed
21
+ * skills are unlinked. Unlike govern-config sync, this does NOT need
22
+ * root: SKILL.md files live in user-owned directories the daemon can
23
+ * write to directly.
24
+ *
25
+ * Three audit outcomes per tick:
26
+ * - skills_unchanged — manifest matches local state
27
+ * - skills_synced — at least one skill was written or removed
28
+ * (metadata: added/changed/removed counts)
29
+ * - skills_sync_failed — network / auth / disk error
30
+ *
31
+ * When something changes, a notification file is written so
32
+ * 'cortex enterprise status' can prompt the user to restart Claude
33
+ * Code / Codex CLI to pick up the new skills.
34
+ */
35
+
36
+ const STATE_FILENAME = "skills.local.json";
37
+ const NOTIFICATION_FILENAME = ".skills-update-applied.json";
38
+
39
+ const SUPPORTED_CLIS = ["claude", "codex"] as const;
40
+ type SkillCli = (typeof SUPPORTED_CLIS)[number];
41
+
42
+ type ManifestEntry = {
43
+ name: string;
44
+ scope: string;
45
+ updated_at: string;
46
+ };
47
+
48
+ type LocalSkillRecord = {
49
+ scope: string;
50
+ updated_at: string;
51
+ path: string;
52
+ };
53
+
54
+ type LocalSkillsState = {
55
+ skills: Record<string, LocalSkillRecord>;
56
+ last_synced_at?: string;
57
+ };
58
+
59
+ export type SkillSyncOutcome =
60
+ | {
61
+ kind: "unchanged";
62
+ cli: SkillCli;
63
+ count: number;
64
+ }
65
+ | {
66
+ kind: "synced";
67
+ cli: SkillCli;
68
+ added: string[];
69
+ changed: string[];
70
+ removed: string[];
71
+ }
72
+ | {
73
+ kind: "failed";
74
+ cli: SkillCli;
75
+ error: string;
76
+ };
77
+
78
+ function stateFilePath(): string {
79
+ return join(daemonDir(), STATE_FILENAME);
80
+ }
81
+
82
+ function notificationFilePath(): string {
83
+ return join(daemonDir(), NOTIFICATION_FILENAME);
84
+ }
85
+
86
+ function readState(): LocalSkillsState {
87
+ const path = stateFilePath();
88
+ if (!existsSync(path)) return { skills: {} };
89
+ try {
90
+ const parsed = JSON.parse(readFileSync(path, "utf8")) as LocalSkillsState;
91
+ return { skills: parsed.skills ?? {}, last_synced_at: parsed.last_synced_at };
92
+ } catch {
93
+ return { skills: {} };
94
+ }
95
+ }
96
+
97
+ function writeState(state: LocalSkillsState): void {
98
+ writeFileSync(
99
+ stateFilePath(),
100
+ JSON.stringify(state, null, 2) + "\n",
101
+ "utf8",
102
+ );
103
+ }
104
+
105
+ /**
106
+ * Resolve the on-disk SKILL.md path for a skill. Global skills live under
107
+ * ~/.claude/skills (Claude Code's user-scope skills directory); cli:codex
108
+ * skills live under ~/.codex/skills. cli:claude scope is treated as
109
+ * Claude-only and lands in ~/.claude/skills.
110
+ */
111
+ function skillFilePath(scope: string, name: string): string {
112
+ const root =
113
+ scope === "cli:codex"
114
+ ? join(homedir(), ".codex", "skills")
115
+ : join(homedir(), ".claude", "skills");
116
+ return join(root, name, "SKILL.md");
117
+ }
118
+
119
+ function shouldSyncForCli(scope: string, cli: SkillCli): boolean {
120
+ if (scope === "global") return true;
121
+ return scope === `cli:${cli}`;
122
+ }
123
+
124
+ async function fetchManifest(
125
+ baseUrl: string,
126
+ apiKey: string,
127
+ cli: SkillCli,
128
+ ): Promise<ManifestEntry[]> {
129
+ const url = new URL(
130
+ baseUrl.replace(/\/$/, "") + "/api/v1/govern/skills/manifest",
131
+ );
132
+ url.searchParams.set("cli", cli);
133
+ const res = await fetch(url, {
134
+ headers: { Authorization: `Bearer ${apiKey}` },
135
+ });
136
+ if (!res.ok) {
137
+ throw new Error(`HTTP ${res.status} ${res.statusText}`);
138
+ }
139
+ const body = (await res.json()) as { skills?: ManifestEntry[] };
140
+ return body.skills ?? [];
141
+ }
142
+
143
+ async function fetchSkillBody(
144
+ baseUrl: string,
145
+ apiKey: string,
146
+ name: string,
147
+ ): Promise<string> {
148
+ const url = new URL(
149
+ baseUrl.replace(/\/$/, "") +
150
+ "/api/v1/govern/skills/" +
151
+ encodeURIComponent(name),
152
+ );
153
+ const res = await fetch(url, {
154
+ headers: { Authorization: `Bearer ${apiKey}` },
155
+ });
156
+ if (!res.ok) {
157
+ throw new Error(`HTTP ${res.status} ${res.statusText}`);
158
+ }
159
+ return res.text();
160
+ }
161
+
162
+ function writeSkillFile(path: string, content: string): void {
163
+ const dir = path.replace(/\/SKILL\.md$/, "");
164
+ mkdirSync(dir, { recursive: true });
165
+ writeFileSync(path, content, "utf8");
166
+ }
167
+
168
+ function removeSkillFile(path: string): void {
169
+ if (!existsSync(path)) return;
170
+ // Remove the per-skill directory (parent of SKILL.md). The skills root
171
+ // is shared with non-Cortex skills so we never recurse beyond the
172
+ // skill's own directory.
173
+ const dir = path.replace(/\/SKILL\.md$/, "");
174
+ rmSync(dir, { recursive: true, force: true });
175
+ }
176
+
177
+ function writeNotification(data: {
178
+ added: number;
179
+ changed: number;
180
+ removed: number;
181
+ cli: SkillCli;
182
+ detected_at: string;
183
+ }): void {
184
+ writeFileSync(
185
+ notificationFilePath(),
186
+ JSON.stringify(data, null, 2) + "\n",
187
+ "utf8",
188
+ );
189
+ }
190
+
191
+ export async function runSkillSyncForCli(
192
+ cwd: string,
193
+ cli: SkillCli,
194
+ ): Promise<SkillSyncOutcome> {
195
+ const config = loadEnterpriseConfig(join(cwd, ".context"));
196
+ const apiKey = config.enterprise.api_key.trim();
197
+ const baseUrl = (config.enterprise.base_url || config.enterprise.endpoint).trim();
198
+ if (!apiKey || !baseUrl) {
199
+ return { kind: "failed", cli, error: "enterprise not configured" };
200
+ }
201
+
202
+ let manifest: ManifestEntry[];
203
+ try {
204
+ manifest = await fetchManifest(baseUrl, apiKey, cli);
205
+ } catch (err) {
206
+ return {
207
+ kind: "failed",
208
+ cli,
209
+ error: err instanceof Error ? err.message : String(err),
210
+ };
211
+ }
212
+
213
+ const state = readState();
214
+ const relevantManifest = manifest.filter((entry) =>
215
+ shouldSyncForCli(entry.scope, cli),
216
+ );
217
+ const remoteByName = new Map(relevantManifest.map((e) => [e.name, e]));
218
+
219
+ const added: string[] = [];
220
+ const changed: string[] = [];
221
+ const removed: string[] = [];
222
+
223
+ // Detect adds + changes
224
+ for (const entry of relevantManifest) {
225
+ const local = state.skills[entry.name];
226
+ const isNew = !local;
227
+ const isChanged =
228
+ Boolean(local) &&
229
+ (local.updated_at !== entry.updated_at || local.scope !== entry.scope);
230
+ if (!isNew && !isChanged) continue;
231
+
232
+ let body: string;
233
+ try {
234
+ body = await fetchSkillBody(baseUrl, apiKey, entry.name);
235
+ } catch (err) {
236
+ return {
237
+ kind: "failed",
238
+ cli,
239
+ error:
240
+ err instanceof Error
241
+ ? `fetch ${entry.name}: ${err.message}`
242
+ : `fetch ${entry.name}: ${String(err)}`,
243
+ };
244
+ }
245
+
246
+ const path = skillFilePath(entry.scope, entry.name);
247
+ try {
248
+ writeSkillFile(path, body);
249
+ } catch (err) {
250
+ return {
251
+ kind: "failed",
252
+ cli,
253
+ error:
254
+ err instanceof Error
255
+ ? `write ${entry.name}: ${err.message}`
256
+ : `write ${entry.name}: ${String(err)}`,
257
+ };
258
+ }
259
+
260
+ state.skills[entry.name] = {
261
+ scope: entry.scope,
262
+ updated_at: entry.updated_at,
263
+ path,
264
+ };
265
+ (isNew ? added : changed).push(entry.name);
266
+ }
267
+
268
+ // Detect removes — entries we have locally for this cli but the manifest
269
+ // dropped (or disabled). We only consider state entries whose scope
270
+ // matches this cli, so we don't accidentally remove the other CLI's
271
+ // skills when running a per-cli tick.
272
+ for (const [name, record] of Object.entries(state.skills)) {
273
+ if (!shouldSyncForCli(record.scope, cli)) continue;
274
+ if (remoteByName.has(name)) continue;
275
+ try {
276
+ removeSkillFile(record.path);
277
+ } catch {
278
+ // best-effort; if unlink fails the next tick will retry
279
+ }
280
+ delete state.skills[name];
281
+ removed.push(name);
282
+ }
283
+
284
+ const totalChanged = added.length + changed.length + removed.length;
285
+ if (totalChanged === 0) {
286
+ return { kind: "unchanged", cli, count: relevantManifest.length };
287
+ }
288
+
289
+ state.last_synced_at = new Date().toISOString();
290
+ writeState(state);
291
+ return { kind: "synced", cli, added, changed, removed };
292
+ }
293
+
294
+ export async function runSkillSyncOnce(
295
+ cwd: string,
296
+ clis: ReadonlyArray<SkillCli> = SUPPORTED_CLIS,
297
+ ): Promise<SkillSyncOutcome[]> {
298
+ const outcomes: SkillSyncOutcome[] = [];
299
+ const now = new Date().toISOString();
300
+
301
+ for (const cli of clis) {
302
+ const outcome = await runSkillSyncForCli(cwd, cli);
303
+ outcomes.push(outcome);
304
+
305
+ const eventBase = {
306
+ timestamp: now,
307
+ host_id: hostname(),
308
+ cli,
309
+ };
310
+
311
+ if (outcome.kind === "unchanged") {
312
+ await writeHostAuditEvent(cwd, {
313
+ ...eventBase,
314
+ event_type: "skills_unchanged",
315
+ count: outcome.count,
316
+ }).catch(() => undefined);
317
+ } else if (outcome.kind === "synced") {
318
+ await writeHostAuditEvent(cwd, {
319
+ ...eventBase,
320
+ event_type: "skills_synced",
321
+ added: outcome.added,
322
+ changed: outcome.changed,
323
+ removed: outcome.removed,
324
+ }).catch(() => undefined);
325
+ writeNotification({
326
+ added: outcome.added.length,
327
+ changed: outcome.changed.length,
328
+ removed: outcome.removed.length,
329
+ cli,
330
+ detected_at: now,
331
+ });
332
+ } else {
333
+ await writeHostAuditEvent(cwd, {
334
+ ...eventBase,
335
+ event_type: "skills_sync_failed",
336
+ error: outcome.error,
337
+ }).catch(() => undefined);
338
+ }
339
+ }
340
+
341
+ // We deliberately leave the notification file in place when this tick
342
+ // had no changes — it represents "restart pending" from a prior sync,
343
+ // not current drift. `cortex enterprise status --acknowledge-skills`
344
+ // (future CLI) will be the explicit clear path.
345
+
346
+ return outcomes;
347
+ }
348
+
349
+ export type SkillSyncTimerHandle = {
350
+ stop(): void;
351
+ };
352
+
353
+ export function startSkillSyncTimer(
354
+ cwd: string,
355
+ intervalMs: number,
356
+ ): SkillSyncTimerHandle {
357
+ const tick = () => {
358
+ void runSkillSyncOnce(cwd).catch((err) => {
359
+ process.stderr.write(
360
+ `[cortex-daemon] skill sync failed: ${
361
+ err instanceof Error ? err.message : String(err)
362
+ }\n`,
363
+ );
364
+ });
365
+ };
366
+
367
+ void Promise.resolve().then(tick);
368
+ const handle = setInterval(tick, intervalMs);
369
+ if (typeof handle.unref === "function") handle.unref();
370
+ return {
371
+ stop() {
372
+ clearInterval(handle);
373
+ },
374
+ };
375
+ }
@@ -9,7 +9,6 @@ import {
9
9
  } from "../core/config.js";
10
10
  import { deployBundledModel } from "./model/deploy.js";
11
11
  import { TelemetryCollector } from "../core/telemetry/collector.js";
12
- import { pushMetrics } from "./telemetry/sync.js";
13
12
  import { AuditWriter, type AuditEntry } from "../core/audit/writer.js";
14
13
  import { pushAuditEvents, queueAuditEvent, setAuditPushContext } from "./audit/push.js";
15
14
  import { PolicyStore } from "../core/policy/store.js";
@@ -203,24 +202,10 @@ export function recordToolActivity(activity: ToolActivity): void {
203
202
  export async function onSessionEnd(): Promise<void> {
204
203
  if (!activeConfig) return;
205
204
  const config = activeConfig;
206
- if (config.telemetry.enabled && config.telemetry.endpoint && activeCollector) {
205
+ // Telemetry push is owned by the daemon. MCP only persists in-memory
206
+ // metrics to disk so the daemon can pick them up on its next push tick.
207
+ if (config.telemetry.enabled && activeCollector) {
207
208
  activeCollector.flush();
208
- try {
209
- const result = await pushMetrics(
210
- activeCollector.getMetrics(),
211
- config.telemetry.endpoint,
212
- config.telemetry.api_key,
213
- {
214
- repo: activeRepo ?? undefined,
215
- session_id: activeSessionId ?? undefined,
216
- },
217
- );
218
- if (!result.success) {
219
- process.stderr.write(`[cortex-enterprise] Shutdown telemetry push failed: ${result.error}\n`);
220
- }
221
- } catch (err) {
222
- process.stderr.write(`[cortex-enterprise] Shutdown telemetry push error: ${err}\n`);
223
- }
224
209
  }
225
210
 
226
211
  await flushComplianceQueues(config, "shutdown");
@@ -351,28 +336,14 @@ export async function register(server: McpServer): Promise<void> {
351
336
  process.stderr.write(`[cortex-enterprise] Active: ${features.join(", ")}\n`);
352
337
  }
353
338
 
354
- // Schedule telemetry flush + push
339
+ // Telemetry push is owned by the daemon (single network writer).
340
+ // MCP only persists in-memory metrics to disk on a tick so the daemon
341
+ // can read and push them.
355
342
  if (config.telemetry.enabled) {
356
- // Push any accumulated metrics from previous sessions on startup
357
- if (config.telemetry.endpoint) {
358
- pushMetrics(collector.getMetrics(), config.telemetry.endpoint, config.telemetry.api_key, {
359
- repo: activeRepo ?? undefined,
360
- session_id: activeSessionId ?? undefined,
361
- })
362
- .then((r) => { if (!r.success) process.stderr.write(`[cortex-enterprise] Startup telemetry push failed: ${r.error}\n`); })
363
- .catch((err) => { process.stderr.write(`[cortex-enterprise] Startup telemetry push error: ${err}\n`); });
364
- }
365
-
366
343
  const intervalMs = config.telemetry.interval_minutes * 60000;
367
- const timer = setInterval(async () => {
344
+ const timer = setInterval(() => {
368
345
  try {
369
346
  collector.flush();
370
- if (config.telemetry.endpoint) {
371
- await pushMetrics(collector.getMetrics(), config.telemetry.endpoint, config.telemetry.api_key, {
372
- repo: activeRepo ?? undefined,
373
- session_id: activeSessionId ?? undefined,
374
- });
375
- }
376
347
  } catch (err) {
377
348
  process.stderr.write(`[cortex-enterprise] Telemetry flush error: ${err}\n`);
378
349
  }
@@ -67,6 +67,7 @@ export const OUTBOUND_DATA_BOUNDARY = {
67
67
  type TelemetryPushContext = {
68
68
  repo?: string;
69
69
  session_id?: string;
70
+ push_id?: string;
70
71
  };
71
72
 
72
73
  const MAX_OBJECT_KEYS = 12;
@@ -207,6 +208,7 @@ export function buildTelemetryPushPayload(
207
208
  repo: context.repo,
208
209
  instance_id: metrics.instance_id,
209
210
  session_id: context.session_id,
211
+ push_id: context.push_id,
210
212
  tool_metrics: metrics.tool_metrics,
211
213
  };
212
214
  }
@@ -11,6 +11,7 @@ export type PushResult = {
11
11
  export type PushContext = {
12
12
  repo?: string;
13
13
  session_id?: string;
14
+ push_id?: string;
14
15
  };
15
16
 
16
17
  let lastPush: PushResult | null = null;
@@ -443,6 +443,8 @@ async function main(): Promise<void> {
443
443
 
444
444
  if (reset) {
445
445
  fs.rmSync(DB_PATH, { recursive: true, force: true });
446
+ fs.rmSync(`${DB_PATH}.wal`, { force: true });
447
+ fs.rmSync(`${DB_PATH}.shm`, { force: true });
446
448
  }
447
449
  fs.mkdirSync(path.dirname(DB_PATH), { recursive: true });
448
450
 
@@ -0,0 +1,30 @@
1
+ import test from "node:test";
2
+ import assert from "node:assert/strict";
3
+ import { mkdtempSync } from "node:fs";
4
+ import { tmpdir } from "node:os";
5
+ import path from "node:path";
6
+
7
+ import { TelemetryCollector } from "../dist/core/telemetry/collector.js";
8
+
9
+ function createContextDir(prefix) {
10
+ return mkdtempSync(path.join(tmpdir(), prefix));
11
+ }
12
+
13
+ test("TelemetryCollector counts context.impact as an impact analysis", () => {
14
+ const contextDir = createContextDir("cortex-telemetry-");
15
+ const collector = new TelemetryCollector(contextDir, "test-version");
16
+
17
+ collector.recordEvent({
18
+ tool: "context.impact",
19
+ phase: "success",
20
+ result_count: 2,
21
+ estimated_tokens_saved: 800,
22
+ duration_ms: 15,
23
+ });
24
+
25
+ const metrics = collector.getMetrics();
26
+ assert.equal(metrics.total_tool_calls, 1);
27
+ assert.equal(metrics.successful_tool_calls, 1);
28
+ assert.equal(metrics.impact_analyses, 1);
29
+ assert.equal(metrics.tool_metrics["context.impact"].calls, 1);
30
+ });
@@ -1,160 +0,0 @@
1
- # MCP Marketplace Submission
2
-
3
- ## Package Information
4
-
5
- **Name:** `@danielblomma/cortex-mcp`
6
- **Description:** Local, repo-scoped context platform for coding assistants. Semantic search, graph relationships, and architectural rule context.
7
- **Author:** Daniel Blomma
8
- **License:** MIT
9
- **Repository:** https://github.com/DanielBlomma/cortex
10
-
11
- ## MCP Server Details
12
-
13
- ### Tools Provided
14
-
15
- 1. **context.search**
16
- - Semantic search across indexed entities (files, rules, ADRs)
17
- - Hybrid ranking (semantic + graph + trust + recency)
18
- - Optional content return for high-signal snippets
19
-
20
- 2. **context.get_related**
21
- - Graph-based entity relationships
22
- - Finds connected rules/files/ADRs with optional edge details
23
-
24
- 3. **context.get_rules**
25
- - Active rules and architectural decisions
26
- - Scope-based filtering
27
-
28
- 4. **context.reload**
29
- - Hot-reload graph after code changes
30
-
31
- ### Advanced Features (Experimental)
32
-
33
- Cortex can extract function-level chunks and build call graphs in experimental builds:
34
-
35
- - `context.find_callers` - What calls this function?
36
- - `context.trace_calls` - What does this function call?
37
- - `context.impact_analysis` - What is impacted if this function changes?
38
- - Requires JavaScript/TypeScript codebase and semantic chunking/call graph indexing enabled.
39
-
40
- Note: these APIs are experimental and are not part of the stable tool contract in this submission.
41
-
42
- ### Installation
43
-
44
- #### For MCP Marketplace Users
45
-
46
- ```bash
47
- # Install CLI globally
48
- npm i -g @danielblomma/cortex-mcp
49
-
50
- # Navigate to your project
51
- cd ~/my-project
52
-
53
- # Initialize Cortex in your project
54
- cortex init --bootstrap
55
- ```
56
-
57
- This will:
58
- - Create `.context/` directory with graph schema
59
- - Set up MCP server for Claude Desktop/Code
60
- - Start background sync for automatic updates
61
- - Build a local context graph for indexed files/rules/ADRs
62
-
63
- #### Manual MCP Configuration
64
-
65
- If `cortex init` doesn't auto-register, add to Claude's MCP config:
66
-
67
- **Claude Desktop** (`~/Library/Application Support/Claude/claude_desktop_config.json`):
68
- ```json
69
- {
70
- "mcpServers": {
71
- "cortex": {
72
- "command": "cortex",
73
- "args": ["mcp"],
74
- "env": {
75
- "CORTEX_PROJECT_ROOT": "/absolute/path/to/your-project"
76
- }
77
- }
78
- }
79
- }
80
- ```
81
-
82
- **Codex** (`~/.config/codex/mcp-config.json`):
83
- ```json
84
- {
85
- "mcpServers": {
86
- "cortex-myproject": {
87
- "command": "cortex",
88
- "args": ["mcp"],
89
- "cwd": "/absolute/path/to/your-project"
90
- }
91
- }
92
- }
93
- ```
94
-
95
- ### Usage
96
-
97
- Once installed and initialized, Cortex tools are available in Claude:
98
-
99
- ```
100
- "Find files that handle authentication"
101
- "Show related files for this ADR"
102
- "What are the active architectural rules for this API?"
103
- ```
104
-
105
- ### Key Features
106
-
107
- - **Semantic search**: ranked retrieval across source files, rules and ADRs
108
- - **Graph relationships**: quickly discover related entities and constraints
109
- - **Experimental call graph APIs**: function caller/callee and impact traversal in semantic chunking builds
110
- - **Local & private**: All data stays on your machine
111
- - **Incremental updates**: Background sync keeps context fresh
112
- - **Flexible ingestion**: configurable source paths and ranking signals
113
-
114
- ### Requirements
115
-
116
- - Node.js 18+
117
- - Git repository (for change tracking)
118
- - ~50MB disk space per project
119
-
120
- ### Unique Value Proposition
121
-
122
- Unlike other MCP servers that provide external data (GitHub, web search), Cortex provides **deep, structured knowledge of YOUR codebase**:
123
-
124
- - Search with semantic ranking across files, rules, and ADRs
125
- - Understand rule and ADR dependencies in your repo
126
- - Enforce architectural rules and ADRs
127
- - Context that evolves with your code
128
-
129
- Perfect for:
130
- - Large codebases where plain keyword search is not enough
131
- - Refactoring guided by rule and ADR context
132
- - Onboarding (architectural rules, design decisions)
133
- - Code review (what constraints and related entities apply?)
134
-
135
- ### Limitations
136
-
137
- - **Setup required**: Not instant plug-and-play (needs `cortex init`)
138
- - **Per-project**: Each repo needs its own Cortex instance
139
- - **Local only**: No cloud sync (by design - your code stays private)
140
-
141
- ### Support
142
-
143
- - Issues: https://github.com/DanielBlomma/cortex/issues
144
- - Docs: https://github.com/DanielBlomma/cortex/blob/main/README.md
145
-
146
- ## Submission Checklist
147
-
148
- - [x] MCP SDK integration (JSON-RPC over stdio)
149
- - [x] Tools documented with schemas
150
- - [ ] npm package published (@danielblomma/cortex-mcp)
151
- - [x] Marketplace-ready README
152
- - [ ] Example usage screenshots/GIFs
153
- - [ ] Submit PR to modelcontextprotocol/servers
154
-
155
- ## Next Steps
156
-
157
- 1. Publish to npm as `@danielblomma/cortex-mcp`
158
- 2. Test installation from marketplace perspective
159
- 3. Submit to https://github.com/modelcontextprotocol/servers
160
- 4. Add to Anthropic's community registry