@danielblomma/cortex-mcp 1.7.2 → 2.0.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +4 -24
- package/bin/cortex.mjs +679 -32
- package/bin/style.mjs +349 -0
- package/package.json +4 -3
- package/scaffold/mcp/src/cli/enterprise-setup.ts +124 -0
- package/scaffold/mcp/src/cli/govern.ts +987 -0
- package/scaffold/mcp/src/cli/run.ts +306 -0
- package/scaffold/mcp/src/cli/telemetry-test.ts +158 -0
- package/scaffold/mcp/src/cli/ungoverned-detector.ts +168 -0
- package/scaffold/mcp/src/core/audit/query.ts +81 -0
- package/scaffold/mcp/src/core/audit/writer.ts +68 -0
- package/scaffold/mcp/src/core/config.ts +329 -0
- package/scaffold/mcp/src/core/index.ts +34 -0
- package/scaffold/mcp/src/core/license.ts +202 -0
- package/scaffold/mcp/src/core/policy/enforce.ts +98 -0
- package/scaffold/mcp/src/core/policy/injection.ts +229 -0
- package/scaffold/mcp/src/core/policy/store.ts +197 -0
- package/scaffold/mcp/src/core/rbac/check.ts +40 -0
- package/scaffold/mcp/src/core/telemetry/collector.ts +408 -0
- package/scaffold/mcp/src/core/validators/builtins.ts +711 -0
- package/scaffold/mcp/src/core/validators/config.ts +47 -0
- package/scaffold/mcp/src/core/validators/engine.ts +199 -0
- package/scaffold/mcp/src/core/validators/evaluators/code_comments.ts +294 -0
- package/scaffold/mcp/src/core/validators/evaluators/regex.ts +144 -0
- package/scaffold/mcp/src/daemon/client.ts +155 -0
- package/scaffold/mcp/src/daemon/egress-proxy.ts +331 -0
- package/scaffold/mcp/src/daemon/heartbeat-pusher.ts +147 -0
- package/scaffold/mcp/src/daemon/heartbeat-tracker.ts +223 -0
- package/scaffold/mcp/src/daemon/host-events-pusher.ts +285 -0
- package/scaffold/mcp/src/daemon/main.ts +435 -0
- package/scaffold/mcp/src/daemon/paths.ts +41 -0
- package/scaffold/mcp/src/daemon/protocol.ts +101 -0
- package/scaffold/mcp/src/daemon/server.ts +227 -0
- package/scaffold/mcp/src/daemon/sync-checker.ts +213 -0
- package/scaffold/mcp/src/daemon/ungoverned-scanner.ts +149 -0
- package/scaffold/mcp/src/enterprise/audit/push.ts +84 -0
- package/scaffold/mcp/src/enterprise/index.ts +386 -0
- package/scaffold/mcp/src/enterprise/model/deploy.ts +33 -0
- package/scaffold/mcp/src/enterprise/policy/sync.ts +146 -0
- package/scaffold/mcp/src/enterprise/privacy/boundary.ts +214 -0
- package/scaffold/mcp/src/enterprise/reviews/push.ts +79 -0
- package/scaffold/mcp/src/enterprise/telemetry/sync.ts +73 -0
- package/scaffold/mcp/src/enterprise/tools/enterprise.ts +1031 -0
- package/scaffold/mcp/src/enterprise/tools/walk.ts +79 -0
- package/scaffold/mcp/src/enterprise/violations/push.ts +102 -0
- package/scaffold/mcp/src/enterprise/workflow/push.ts +60 -0
- package/scaffold/mcp/src/enterprise/workflow/state.ts +535 -0
- package/scaffold/mcp/src/hooks/pre-compact.ts +54 -0
- package/scaffold/mcp/src/hooks/pre-tool-use.ts +96 -0
- package/scaffold/mcp/src/hooks/session-end.ts +73 -0
- package/scaffold/mcp/src/hooks/session-start.ts +78 -0
- package/scaffold/mcp/src/hooks/shared.ts +134 -0
- package/scaffold/mcp/src/hooks/stop.ts +60 -0
- package/scaffold/mcp/src/hooks/user-prompt-submit.ts +64 -0
- package/scaffold/mcp/src/loadGraph.ts +2 -0
- package/scaffold/mcp/src/plugin.ts +150 -0
- package/scaffold/mcp/src/server.ts +218 -7
- package/scaffold/mcp/tests/copilot-shim.test.mjs +146 -0
- package/scaffold/mcp/tests/daemon-client.test.mjs +32 -0
- package/scaffold/mcp/tests/egress-proxy.test.mjs +239 -0
- package/scaffold/mcp/tests/enterprise-config.test.mjs +154 -0
- package/scaffold/mcp/tests/govern-install.test.mjs +320 -0
- package/scaffold/mcp/tests/govern-repair.test.mjs +157 -0
- package/scaffold/mcp/tests/govern-status.test.mjs +538 -0
- package/scaffold/mcp/tests/govern.test.mjs +74 -0
- package/scaffold/mcp/tests/heartbeat-pusher.test.mjs +154 -0
- package/scaffold/mcp/tests/heartbeat-tracker.test.mjs +237 -0
- package/scaffold/mcp/tests/host-events-pusher.test.mjs +347 -0
- package/scaffold/mcp/tests/policy-check.test.mjs +220 -0
- package/scaffold/mcp/tests/repo-name.test.mjs +134 -0
- package/scaffold/mcp/tests/run.test.mjs +109 -0
- package/scaffold/mcp/tests/sync-checker.test.mjs +188 -0
- package/scaffold/mcp/tests/telemetry-collector.test.mjs +30 -0
- package/scaffold/mcp/tests/ungoverned-detector.test.mjs +191 -0
- package/scaffold/mcp/tests/ungoverned-scanner.test.mjs +198 -0
- package/scaffold/scripts/bootstrap.sh +0 -11
- package/scaffold/scripts/doctor.sh +24 -4
- package/types.js +5 -0
- package/docs/MCP_MARKETPLACE.md +0 -160
|
@@ -0,0 +1,435 @@
|
|
|
1
|
+
import { readFileSync, existsSync, writeFileSync, mkdirSync, rmSync } from "node:fs";
|
|
2
|
+
import { basename, join } from "node:path";
|
|
3
|
+
import { randomUUID } from "node:crypto";
|
|
4
|
+
import { CortexDaemon } from "./server.js";
|
|
5
|
+
import type {
|
|
6
|
+
PolicyCheckPayload,
|
|
7
|
+
PolicyCheckResult,
|
|
8
|
+
TelemetryFlushPayload,
|
|
9
|
+
TelemetryFlushResult,
|
|
10
|
+
AuditLogPayload,
|
|
11
|
+
AuditLogResult,
|
|
12
|
+
} from "./protocol.js";
|
|
13
|
+
import { loadEnterpriseConfig, resolveEnterpriseActivation } from "../core/config.js";
|
|
14
|
+
import { pushMetrics } from "../enterprise/telemetry/sync.js";
|
|
15
|
+
import { TelemetryCollector, type TelemetryMetrics } from "../core/telemetry/collector.js";
|
|
16
|
+
import { AuditWriter, type AuditEntry } from "../core/audit/writer.js";
|
|
17
|
+
import { PolicyStore } from "../core/policy/store.js";
|
|
18
|
+
import {
|
|
19
|
+
enforceInjectionPolicy,
|
|
20
|
+
isInjectionDefenseActive,
|
|
21
|
+
} from "../core/policy/enforce.js";
|
|
22
|
+
import { syncFromCloud } from "../enterprise/policy/sync.js";
|
|
23
|
+
import { startUngovernedScanner } from "./ungoverned-scanner.js";
|
|
24
|
+
import {
|
|
25
|
+
HeartbeatTracker,
|
|
26
|
+
writeTamperLock,
|
|
27
|
+
emitTamperAudit,
|
|
28
|
+
} from "./heartbeat-tracker.js";
|
|
29
|
+
import { startSyncTimer } from "./sync-checker.js";
|
|
30
|
+
import { startHostEventsPusher } from "./host-events-pusher.js";
|
|
31
|
+
import { startEgressProxy } from "./egress-proxy.js";
|
|
32
|
+
import { startHeartbeatPusher } from "./heartbeat-pusher.js";
|
|
33
|
+
import type { HeartbeatPayload, HeartbeatResult } from "./protocol.js";
|
|
34
|
+
|
|
35
|
+
/**
|
|
36
|
+
* Daemon entry point. Run by `cortex daemon start` (or auto-spawned by
|
|
37
|
+
* the first hook that needs it).
|
|
38
|
+
*
|
|
39
|
+
* v2.0.0: policy.check is currently a stub allowing all calls (real policy
|
|
40
|
+
* evaluation in subsequent commit). telemetry.flush is fully wired — the
|
|
41
|
+
* Stop hook now reliably pushes metrics.json even if MCP died abruptly.
|
|
42
|
+
*/
|
|
43
|
+
|
|
44
|
+
function extractStringFields(value: unknown, out: string[] = []): string[] {
|
|
45
|
+
if (typeof value === "string") {
|
|
46
|
+
out.push(value);
|
|
47
|
+
} else if (Array.isArray(value)) {
|
|
48
|
+
for (const v of value) extractStringFields(v, out);
|
|
49
|
+
} else if (value && typeof value === "object") {
|
|
50
|
+
for (const v of Object.values(value as Record<string, unknown>)) {
|
|
51
|
+
extractStringFields(v, out);
|
|
52
|
+
}
|
|
53
|
+
}
|
|
54
|
+
return out;
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
async function policyCheck(
|
|
58
|
+
payload: PolicyCheckPayload,
|
|
59
|
+
): Promise<PolicyCheckResult> {
|
|
60
|
+
if (!payload.cwd) return { allow: true };
|
|
61
|
+
const contextDir = join(payload.cwd, ".context");
|
|
62
|
+
if (!existsSync(contextDir)) return { allow: true };
|
|
63
|
+
|
|
64
|
+
const store = new PolicyStore(contextDir);
|
|
65
|
+
const policies = store.getMergedPolicies();
|
|
66
|
+
if (!isInjectionDefenseActive(policies)) {
|
|
67
|
+
return { allow: true };
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
const haystack = extractStringFields(payload.input).join("\n");
|
|
71
|
+
if (!haystack) return { allow: true };
|
|
72
|
+
|
|
73
|
+
const result = enforceInjectionPolicy(haystack, policies);
|
|
74
|
+
if (result.allowed) return { allow: true };
|
|
75
|
+
|
|
76
|
+
const topMatch = result.scan.matches[0];
|
|
77
|
+
const reason = topMatch
|
|
78
|
+
? `prompt-injection-defense: ${topMatch.category} (${topMatch.matched.slice(0, 80)})`
|
|
79
|
+
: "prompt-injection-defense: flagged";
|
|
80
|
+
return { allow: false, reason };
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
function readMetrics(contextDir: string): TelemetryMetrics | null {
|
|
84
|
+
const path = join(contextDir, "telemetry", "metrics.json");
|
|
85
|
+
if (!existsSync(path)) return null;
|
|
86
|
+
try {
|
|
87
|
+
return JSON.parse(readFileSync(path, "utf8")) as TelemetryMetrics;
|
|
88
|
+
} catch {
|
|
89
|
+
return null;
|
|
90
|
+
}
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
// Pending-push state: snapshot + push_id are written to disk before the
|
|
94
|
+
// network call. If the daemon crashes mid-push, the next tick replays the
|
|
95
|
+
// same push_id so the server can deduplicate.
|
|
96
|
+
type PendingPush = {
|
|
97
|
+
snapshot: TelemetryMetrics;
|
|
98
|
+
push_id: string;
|
|
99
|
+
written_at: string;
|
|
100
|
+
};
|
|
101
|
+
|
|
102
|
+
function pendingPushPath(contextDir: string): string {
|
|
103
|
+
return join(contextDir, "telemetry", "pending-push.json");
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
function readPendingPush(contextDir: string): PendingPush | null {
|
|
107
|
+
const path = pendingPushPath(contextDir);
|
|
108
|
+
if (!existsSync(path)) return null;
|
|
109
|
+
try {
|
|
110
|
+
return JSON.parse(readFileSync(path, "utf8")) as PendingPush;
|
|
111
|
+
} catch {
|
|
112
|
+
return null;
|
|
113
|
+
}
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
function writePendingPush(contextDir: string, pending: PendingPush): void {
|
|
117
|
+
const path = pendingPushPath(contextDir);
|
|
118
|
+
mkdirSync(join(contextDir, "telemetry"), { recursive: true });
|
|
119
|
+
writeFileSync(path, JSON.stringify(pending, null, 2), "utf8");
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
function deletePendingPush(contextDir: string): void {
|
|
123
|
+
const path = pendingPushPath(contextDir);
|
|
124
|
+
try {
|
|
125
|
+
rmSync(path, { force: true });
|
|
126
|
+
} catch {
|
|
127
|
+
// best effort
|
|
128
|
+
}
|
|
129
|
+
}
|
|
130
|
+
|
|
131
|
+
function ackOnDisk(contextDir: string, pushed: TelemetryMetrics): void {
|
|
132
|
+
const collector = new TelemetryCollector(contextDir, pushed.client_version || "unknown");
|
|
133
|
+
collector.acknowledgePush(pushed);
|
|
134
|
+
collector.flush();
|
|
135
|
+
}
|
|
136
|
+
|
|
137
|
+
// Per-cwd exponential backoff so a flapping endpoint doesn't get hammered.
|
|
138
|
+
// 1m, 2m, 4m, 8m, 16m, cap 30m. Reset on success.
|
|
139
|
+
type TelemetryBackoffState = { nextPushAt: number; consecutiveFailures: number };
|
|
140
|
+
const telemetryBackoff = new Map<string, TelemetryBackoffState>();
|
|
141
|
+
const TELEMETRY_BACKOFF_BASE_MS = 60_000;
|
|
142
|
+
const TELEMETRY_BACKOFF_CAP_MS = 30 * 60_000;
|
|
143
|
+
|
|
144
|
+
function shouldSkipTelemetryPush(cwd: string, now = Date.now()): boolean {
|
|
145
|
+
const state = telemetryBackoff.get(cwd);
|
|
146
|
+
return state ? now < state.nextPushAt : false;
|
|
147
|
+
}
|
|
148
|
+
|
|
149
|
+
function recordTelemetryPushOutcome(cwd: string, success: boolean, now = Date.now()): void {
|
|
150
|
+
if (success) {
|
|
151
|
+
telemetryBackoff.delete(cwd);
|
|
152
|
+
return;
|
|
153
|
+
}
|
|
154
|
+
const prev = telemetryBackoff.get(cwd) ?? { nextPushAt: 0, consecutiveFailures: 0 };
|
|
155
|
+
const failures = prev.consecutiveFailures + 1;
|
|
156
|
+
const delay = Math.min(TELEMETRY_BACKOFF_BASE_MS * 2 ** (failures - 1), TELEMETRY_BACKOFF_CAP_MS);
|
|
157
|
+
telemetryBackoff.set(cwd, {
|
|
158
|
+
consecutiveFailures: failures,
|
|
159
|
+
nextPushAt: now + delay,
|
|
160
|
+
});
|
|
161
|
+
}
|
|
162
|
+
|
|
163
|
+
async function telemetryFlush(
|
|
164
|
+
payload: TelemetryFlushPayload,
|
|
165
|
+
): Promise<TelemetryFlushResult> {
|
|
166
|
+
const cwd = payload.cwd ?? process.cwd();
|
|
167
|
+
const contextDir = join(cwd, ".context");
|
|
168
|
+
|
|
169
|
+
if (!existsSync(contextDir)) {
|
|
170
|
+
return { flushed: false, events_pushed: 0 };
|
|
171
|
+
}
|
|
172
|
+
|
|
173
|
+
const config = loadEnterpriseConfig(contextDir);
|
|
174
|
+
const activation = resolveEnterpriseActivation(config);
|
|
175
|
+
|
|
176
|
+
if (!activation.active || !config.telemetry.enabled) {
|
|
177
|
+
// Community mode or telemetry disabled → nothing to push.
|
|
178
|
+
return { flushed: false, events_pushed: 0 };
|
|
179
|
+
}
|
|
180
|
+
|
|
181
|
+
if (!config.telemetry.endpoint || !config.telemetry.api_key) {
|
|
182
|
+
return { flushed: false, events_pushed: 0 };
|
|
183
|
+
}
|
|
184
|
+
|
|
185
|
+
if (shouldSkipTelemetryPush(cwd)) {
|
|
186
|
+
return { flushed: false, events_pushed: 0 };
|
|
187
|
+
}
|
|
188
|
+
|
|
189
|
+
const repo = basename(cwd);
|
|
190
|
+
const endpoint = config.telemetry.endpoint;
|
|
191
|
+
const apiKey = config.telemetry.api_key;
|
|
192
|
+
|
|
193
|
+
// Recovery: if a pending push exists, retry it first with the same
|
|
194
|
+
// push_id so the server can deduplicate against an earlier in-flight
|
|
195
|
+
// attempt that may have crashed before delete.
|
|
196
|
+
const pending = readPendingPush(contextDir);
|
|
197
|
+
if (pending) {
|
|
198
|
+
const result = await pushMetrics(pending.snapshot, endpoint, apiKey, {
|
|
199
|
+
repo,
|
|
200
|
+
session_id: payload.session_id,
|
|
201
|
+
push_id: pending.push_id,
|
|
202
|
+
});
|
|
203
|
+
recordTelemetryPushOutcome(cwd, result.success);
|
|
204
|
+
if (!result.success) {
|
|
205
|
+
process.stderr.write(
|
|
206
|
+
`[cortex-daemon] pending telemetry push retry failed: ${result.error ?? "unknown"}\n`,
|
|
207
|
+
);
|
|
208
|
+
return { flushed: false, events_pushed: 0 };
|
|
209
|
+
}
|
|
210
|
+
ackOnDisk(contextDir, pending.snapshot);
|
|
211
|
+
deletePendingPush(contextDir);
|
|
212
|
+
return { flushed: true, events_pushed: pending.snapshot.total_tool_calls };
|
|
213
|
+
}
|
|
214
|
+
|
|
215
|
+
const metrics = readMetrics(contextDir);
|
|
216
|
+
if (!metrics) {
|
|
217
|
+
// No metrics on disk yet — MCP hasn't flushed. Nothing to push.
|
|
218
|
+
return { flushed: false, events_pushed: 0 };
|
|
219
|
+
}
|
|
220
|
+
|
|
221
|
+
const push_id = randomUUID();
|
|
222
|
+
writePendingPush(contextDir, {
|
|
223
|
+
snapshot: metrics,
|
|
224
|
+
push_id,
|
|
225
|
+
written_at: new Date().toISOString(),
|
|
226
|
+
});
|
|
227
|
+
|
|
228
|
+
const result = await pushMetrics(metrics, endpoint, apiKey, {
|
|
229
|
+
repo,
|
|
230
|
+
session_id: payload.session_id,
|
|
231
|
+
push_id,
|
|
232
|
+
});
|
|
233
|
+
|
|
234
|
+
recordTelemetryPushOutcome(cwd, result.success);
|
|
235
|
+
|
|
236
|
+
if (!result.success) {
|
|
237
|
+
process.stderr.write(
|
|
238
|
+
`[cortex-daemon] telemetry push failed: ${result.error ?? "unknown"}\n`,
|
|
239
|
+
);
|
|
240
|
+
// Pending stays on disk; next tick (after backoff) will retry.
|
|
241
|
+
return { flushed: false, events_pushed: 0 };
|
|
242
|
+
}
|
|
243
|
+
|
|
244
|
+
ackOnDisk(contextDir, metrics);
|
|
245
|
+
deletePendingPush(contextDir);
|
|
246
|
+
|
|
247
|
+
return {
|
|
248
|
+
flushed: true,
|
|
249
|
+
events_pushed: metrics.total_tool_calls,
|
|
250
|
+
};
|
|
251
|
+
}
|
|
252
|
+
|
|
253
|
+
// Per-cwd AuditWriter cache. Daemon serves multiple projects so we don't
|
|
254
|
+
// want to instantiate (and lose buffered state) on every audit.log call.
|
|
255
|
+
const auditWriters = new Map<string, AuditWriter>();
|
|
256
|
+
|
|
257
|
+
function getAuditWriter(cwd: string): AuditWriter {
|
|
258
|
+
const contextDir = join(cwd, ".context");
|
|
259
|
+
let writer = auditWriters.get(contextDir);
|
|
260
|
+
if (!writer) {
|
|
261
|
+
writer = new AuditWriter(contextDir);
|
|
262
|
+
auditWriters.set(contextDir, writer);
|
|
263
|
+
}
|
|
264
|
+
return writer;
|
|
265
|
+
}
|
|
266
|
+
|
|
267
|
+
async function auditLog(payload: AuditLogPayload): Promise<AuditLogResult> {
|
|
268
|
+
if (!payload.cwd || !payload.entry) {
|
|
269
|
+
return { written: false };
|
|
270
|
+
}
|
|
271
|
+
|
|
272
|
+
const contextDir = join(payload.cwd, ".context");
|
|
273
|
+
if (!existsSync(contextDir)) {
|
|
274
|
+
return { written: false };
|
|
275
|
+
}
|
|
276
|
+
|
|
277
|
+
const writer = getAuditWriter(payload.cwd);
|
|
278
|
+
const entry: AuditEntry = {
|
|
279
|
+
timestamp: payload.entry.timestamp,
|
|
280
|
+
tool: payload.entry.tool,
|
|
281
|
+
input: payload.entry.input,
|
|
282
|
+
result_count: payload.entry.result_count ?? 0,
|
|
283
|
+
entities_returned: [],
|
|
284
|
+
rules_applied: [],
|
|
285
|
+
duration_ms: payload.entry.duration_ms ?? 0,
|
|
286
|
+
status: payload.entry.status,
|
|
287
|
+
event_type: payload.entry.event_type as AuditEntry["event_type"],
|
|
288
|
+
evidence_level: payload.entry.evidence_level,
|
|
289
|
+
resource_type: payload.entry.resource_type,
|
|
290
|
+
session_id: payload.entry.session_id,
|
|
291
|
+
metadata: payload.entry.metadata,
|
|
292
|
+
};
|
|
293
|
+
|
|
294
|
+
writer.log(entry);
|
|
295
|
+
return { written: true };
|
|
296
|
+
}
|
|
297
|
+
|
|
298
|
+
async function main(): Promise<void> {
|
|
299
|
+
// Phase 6: hook heartbeat tracker (per-session activity record + tamper detect).
|
|
300
|
+
const tracker = new HeartbeatTracker();
|
|
301
|
+
async function heartbeat(payload: HeartbeatPayload): Promise<HeartbeatResult> {
|
|
302
|
+
return tracker.recordHeartbeat(payload);
|
|
303
|
+
}
|
|
304
|
+
|
|
305
|
+
const daemon = new CortexDaemon({
|
|
306
|
+
onPolicyCheck: policyCheck,
|
|
307
|
+
onTelemetryFlush: telemetryFlush,
|
|
308
|
+
onAuditLog: auditLog,
|
|
309
|
+
onHeartbeat: heartbeat,
|
|
310
|
+
});
|
|
311
|
+
await daemon.start();
|
|
312
|
+
|
|
313
|
+
// Phase 5: Tier 3 ungoverned-session detection. Periodic process scan, audit
|
|
314
|
+
// emit per finding, optional SIGTERM in enforced mode (same-user only).
|
|
315
|
+
const scanInterval = parseInt(process.env.CORTEX_UNGOVERNED_SCAN_MS ?? "", 10);
|
|
316
|
+
const intervalMs = Number.isFinite(scanInterval) && scanInterval > 0 ? scanInterval : 60_000;
|
|
317
|
+
if (process.env.CORTEX_DISABLE_UNGOVERNED_SCAN !== "1") {
|
|
318
|
+
startUngovernedScanner({ cwd: process.cwd(), intervalMs });
|
|
319
|
+
}
|
|
320
|
+
|
|
321
|
+
// Phase 6: periodic tamper-checker. For each active session that had at
|
|
322
|
+
// least one tool-fired hook then went silent past missing_threshold_seconds,
|
|
323
|
+
// write .cortex-tamper.lock + audit event. The next SessionStart in
|
|
324
|
+
// enforced mode will refuse to register tools until 'cortex enterprise
|
|
325
|
+
// repair' clears the lock.
|
|
326
|
+
const tamperThreshold = parseInt(process.env.CORTEX_TAMPER_MISSING_THRESHOLD_S ?? "", 10);
|
|
327
|
+
const missingThresholdSeconds =
|
|
328
|
+
Number.isFinite(tamperThreshold) && tamperThreshold > 0 ? tamperThreshold : 300;
|
|
329
|
+
const tamperCheckInterval = parseInt(process.env.CORTEX_TAMPER_CHECK_MS ?? "", 10);
|
|
330
|
+
const tamperCheckMs =
|
|
331
|
+
Number.isFinite(tamperCheckInterval) && tamperCheckInterval > 0 ? tamperCheckInterval : 60_000;
|
|
332
|
+
// Phase 7: periodic sync-version-check + host-events push to cortex-web.
|
|
333
|
+
// Daemon runs as the user post-Fas-3 privilege drop, so sync only checks
|
|
334
|
+
// version availability (writes a notification + audit). Re-applying
|
|
335
|
+
// managed-settings still requires 'sudo cortex enterprise sync'.
|
|
336
|
+
const syncIntervalRaw = parseInt(process.env.CORTEX_SYNC_CHECK_MS ?? "", 10);
|
|
337
|
+
const syncIntervalMs =
|
|
338
|
+
Number.isFinite(syncIntervalRaw) && syncIntervalRaw > 0 ? syncIntervalRaw : 60 * 60 * 1000;
|
|
339
|
+
const pushIntervalRaw = parseInt(process.env.CORTEX_HOST_EVENTS_PUSH_MS ?? "", 10);
|
|
340
|
+
const pushIntervalMs =
|
|
341
|
+
Number.isFinite(pushIntervalRaw) && pushIntervalRaw > 0 ? pushIntervalRaw : 5 * 60 * 1000;
|
|
342
|
+
if (process.env.CORTEX_DISABLE_SYNC_CHECK !== "1") {
|
|
343
|
+
startSyncTimer(process.cwd(), syncIntervalMs);
|
|
344
|
+
}
|
|
345
|
+
if (process.env.CORTEX_DISABLE_HOST_EVENTS_PUSH !== "1") {
|
|
346
|
+
startHostEventsPusher(process.cwd(), pushIntervalMs);
|
|
347
|
+
}
|
|
348
|
+
|
|
349
|
+
// Govern host heartbeat — fills host_enrollment on cortex-web so the
|
|
350
|
+
// dashboard at /dashboard/govern actually shows this host.
|
|
351
|
+
const heartbeatRaw = parseInt(process.env.CORTEX_HEARTBEAT_PUSH_MS ?? "", 10);
|
|
352
|
+
const heartbeatMs =
|
|
353
|
+
Number.isFinite(heartbeatRaw) && heartbeatRaw > 0 ? heartbeatRaw : 5 * 60 * 1000;
|
|
354
|
+
if (process.env.CORTEX_DISABLE_HEARTBEAT_PUSH !== "1") {
|
|
355
|
+
startHeartbeatPusher(process.cwd(), heartbeatMs);
|
|
356
|
+
}
|
|
357
|
+
|
|
358
|
+
// Phase 4 task 19: cortex egress proxy. Logs SNI + destination per
|
|
359
|
+
// outbound connection (no TLS termination). cortex run sets
|
|
360
|
+
// HTTPS_PROXY/HTTP_PROXY for the Copilot wrap; other AI CLIs respect
|
|
361
|
+
// these env vars too if a developer wires them in.
|
|
362
|
+
const proxyPortRaw = parseInt(process.env.CORTEX_EGRESS_PROXY_PORT ?? "", 10);
|
|
363
|
+
const proxyPort = Number.isFinite(proxyPortRaw) && proxyPortRaw > 0 ? proxyPortRaw : 18888;
|
|
364
|
+
if (process.env.CORTEX_DISABLE_EGRESS_PROXY !== "1") {
|
|
365
|
+
startEgressProxy({ cwd: process.cwd(), port: proxyPort })
|
|
366
|
+
.then((handle) => {
|
|
367
|
+
process.stderr.write(
|
|
368
|
+
`[cortex-daemon] egress proxy listening on 127.0.0.1:${handle.port}\n`,
|
|
369
|
+
);
|
|
370
|
+
})
|
|
371
|
+
.catch((err) => {
|
|
372
|
+
process.stderr.write(
|
|
373
|
+
`[cortex-daemon] egress proxy failed to start: ${err instanceof Error ? err.message : String(err)}\n`,
|
|
374
|
+
);
|
|
375
|
+
});
|
|
376
|
+
}
|
|
377
|
+
|
|
378
|
+
// Periodic telemetry push. Daemon owns the network call so MCP doesn't
|
|
379
|
+
// race with itself or with this loop. Walks active sessions, dedupes
|
|
380
|
+
// cwds, and runs the existing per-cwd flush handler.
|
|
381
|
+
const telemetryPushRaw = parseInt(process.env.CORTEX_TELEMETRY_PUSH_MS ?? "", 10);
|
|
382
|
+
const telemetryPushMs =
|
|
383
|
+
Number.isFinite(telemetryPushRaw) && telemetryPushRaw > 0
|
|
384
|
+
? telemetryPushRaw
|
|
385
|
+
: 5 * 60 * 1000;
|
|
386
|
+
if (process.env.CORTEX_DISABLE_TELEMETRY_PUSH !== "1") {
|
|
387
|
+
const telemetryTimer = setInterval(async () => {
|
|
388
|
+
const cwds = new Set<string>();
|
|
389
|
+
for (const [, state] of tracker.getActiveSessions()) {
|
|
390
|
+
if (state.cwd) cwds.add(state.cwd);
|
|
391
|
+
}
|
|
392
|
+
for (const cwd of cwds) {
|
|
393
|
+
try {
|
|
394
|
+
await telemetryFlush({ reason: "interval", cwd });
|
|
395
|
+
} catch (err) {
|
|
396
|
+
process.stderr.write(
|
|
397
|
+
`[cortex-daemon] telemetry push failed for ${cwd}: ${err instanceof Error ? err.message : String(err)}\n`,
|
|
398
|
+
);
|
|
399
|
+
}
|
|
400
|
+
}
|
|
401
|
+
}, telemetryPushMs);
|
|
402
|
+
if (typeof telemetryTimer.unref === "function") telemetryTimer.unref();
|
|
403
|
+
}
|
|
404
|
+
|
|
405
|
+
if (process.env.CORTEX_DISABLE_TAMPER_CHECK !== "1") {
|
|
406
|
+
const checkTimer = setInterval(() => {
|
|
407
|
+
const detected = tracker.detectTamper({
|
|
408
|
+
cwds: [process.cwd()],
|
|
409
|
+
missingThresholdSeconds,
|
|
410
|
+
});
|
|
411
|
+
for (const entry of detected) {
|
|
412
|
+
try {
|
|
413
|
+
writeTamperLock(entry.cwd, entry);
|
|
414
|
+
} catch (err) {
|
|
415
|
+
process.stderr.write(
|
|
416
|
+
`[cortex-daemon] failed to write tamper lock: ${err instanceof Error ? err.message : String(err)}\n`,
|
|
417
|
+
);
|
|
418
|
+
}
|
|
419
|
+
void emitTamperAudit(entry.cwd, entry).catch((err) => {
|
|
420
|
+
process.stderr.write(
|
|
421
|
+
`[cortex-daemon] failed to emit tamper audit: ${err instanceof Error ? err.message : String(err)}\n`,
|
|
422
|
+
);
|
|
423
|
+
});
|
|
424
|
+
}
|
|
425
|
+
}, tamperCheckMs);
|
|
426
|
+
if (typeof checkTimer.unref === "function") checkTimer.unref();
|
|
427
|
+
}
|
|
428
|
+
}
|
|
429
|
+
|
|
430
|
+
main().catch((err) => {
|
|
431
|
+
process.stderr.write(
|
|
432
|
+
`[cortex-daemon] fatal: ${err instanceof Error ? err.message : String(err)}\n`,
|
|
433
|
+
);
|
|
434
|
+
process.exit(1);
|
|
435
|
+
});
|
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
import { join } from "node:path";
|
|
2
|
+
import { homedir, tmpdir, userInfo } from "node:os";
|
|
3
|
+
import { mkdirSync } from "node:fs";
|
|
4
|
+
|
|
5
|
+
/**
|
|
6
|
+
* Resolves filesystem locations the daemon and hooks share.
|
|
7
|
+
* Per-user, not per-project — one daemon serves all projects so warm graph
|
|
8
|
+
* + embeddings stay loaded across switches.
|
|
9
|
+
*/
|
|
10
|
+
|
|
11
|
+
function safeUid(): string {
|
|
12
|
+
try {
|
|
13
|
+
const info = userInfo();
|
|
14
|
+
if (typeof info.uid === "number" && info.uid >= 0) {
|
|
15
|
+
return String(info.uid);
|
|
16
|
+
}
|
|
17
|
+
return info.username || "anon";
|
|
18
|
+
} catch {
|
|
19
|
+
return "anon";
|
|
20
|
+
}
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
export function daemonDir(): string {
|
|
24
|
+
const dir = join(homedir(), ".cortex");
|
|
25
|
+
mkdirSync(dir, { recursive: true });
|
|
26
|
+
return dir;
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
export function pidFilePath(): string {
|
|
30
|
+
return join(daemonDir(), "daemon.pid");
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
export function logFilePath(): string {
|
|
34
|
+
return join(daemonDir(), "daemon.log");
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
export function socketPath(): string {
|
|
38
|
+
// Keep socket in tmpdir per-user — Linux has 108-char path limit on
|
|
39
|
+
// sockaddr_un.sun_path so we avoid putting it under $HOME.
|
|
40
|
+
return join(tmpdir(), `cortex-${safeUid()}.sock`);
|
|
41
|
+
}
|
|
@@ -0,0 +1,101 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Wire protocol between cortex hooks and the cortex daemon.
|
|
3
|
+
* Newline-delimited JSON over a Unix socket.
|
|
4
|
+
*
|
|
5
|
+
* Each request: { id, type, payload }
|
|
6
|
+
* Each response: { id, ok, result?, error? }
|
|
7
|
+
*/
|
|
8
|
+
|
|
9
|
+
export type RequestType =
|
|
10
|
+
| "ping"
|
|
11
|
+
| "policy.check"
|
|
12
|
+
| "telemetry.flush"
|
|
13
|
+
| "audit.log"
|
|
14
|
+
| "heartbeat"
|
|
15
|
+
| "shutdown";
|
|
16
|
+
|
|
17
|
+
export type Request<T extends RequestType = RequestType> = {
|
|
18
|
+
id: string;
|
|
19
|
+
type: T;
|
|
20
|
+
payload: unknown;
|
|
21
|
+
};
|
|
22
|
+
|
|
23
|
+
export type Response = {
|
|
24
|
+
id: string;
|
|
25
|
+
ok: boolean;
|
|
26
|
+
result?: unknown;
|
|
27
|
+
error?: string;
|
|
28
|
+
};
|
|
29
|
+
|
|
30
|
+
export type PolicyCheckPayload = {
|
|
31
|
+
tool: string;
|
|
32
|
+
cwd: string;
|
|
33
|
+
// Tool-specific input — Claude Code sends this verbatim from PreToolUse
|
|
34
|
+
input: Record<string, unknown>;
|
|
35
|
+
};
|
|
36
|
+
|
|
37
|
+
export type PolicyCheckResult = {
|
|
38
|
+
allow: boolean;
|
|
39
|
+
reason?: string;
|
|
40
|
+
// Optional context to inject when allowing (rules, ADRs)
|
|
41
|
+
inject?: string[];
|
|
42
|
+
};
|
|
43
|
+
|
|
44
|
+
export type TelemetryFlushPayload = {
|
|
45
|
+
reason: "stop" | "session_end" | "interval";
|
|
46
|
+
session_id?: string;
|
|
47
|
+
// Working directory of the project whose telemetry should flush.
|
|
48
|
+
// Hook scripts pass Claude Code's cwd through here.
|
|
49
|
+
cwd?: string;
|
|
50
|
+
};
|
|
51
|
+
|
|
52
|
+
export type TelemetryFlushResult = {
|
|
53
|
+
flushed: boolean;
|
|
54
|
+
events_pushed: number;
|
|
55
|
+
};
|
|
56
|
+
|
|
57
|
+
export type AuditLogPayload = {
|
|
58
|
+
cwd: string;
|
|
59
|
+
// Subset of AuditEntry — daemon fills in date-based file routing.
|
|
60
|
+
// Caller passes only the event-shaped fields; daemon writes them
|
|
61
|
+
// as-is to the per-day audit log.
|
|
62
|
+
entry: {
|
|
63
|
+
timestamp: string;
|
|
64
|
+
tool: string;
|
|
65
|
+
input: Record<string, unknown>;
|
|
66
|
+
result_count?: number;
|
|
67
|
+
duration_ms?: number;
|
|
68
|
+
status?: "success" | "error";
|
|
69
|
+
event_type?: string;
|
|
70
|
+
evidence_level?: "required" | "diagnostic";
|
|
71
|
+
resource_type?: string;
|
|
72
|
+
session_id?: string;
|
|
73
|
+
metadata?: Record<string, unknown>;
|
|
74
|
+
};
|
|
75
|
+
};
|
|
76
|
+
|
|
77
|
+
export type AuditLogResult = {
|
|
78
|
+
written: boolean;
|
|
79
|
+
};
|
|
80
|
+
|
|
81
|
+
export type HeartbeatPayload = {
|
|
82
|
+
cli: "claude" | "codex" | "copilot";
|
|
83
|
+
hook:
|
|
84
|
+
| "PreToolUse"
|
|
85
|
+
| "UserPromptSubmit"
|
|
86
|
+
| "SessionStart"
|
|
87
|
+
| "SessionEnd"
|
|
88
|
+
| "Stop"
|
|
89
|
+
| "PreCompact";
|
|
90
|
+
session_id: string;
|
|
91
|
+
instance_id?: string;
|
|
92
|
+
cwd: string;
|
|
93
|
+
ts: string;
|
|
94
|
+
};
|
|
95
|
+
|
|
96
|
+
export type HeartbeatResult = {
|
|
97
|
+
recorded: boolean;
|
|
98
|
+
tamper_lock_active?: boolean;
|
|
99
|
+
};
|
|
100
|
+
|
|
101
|
+
export const DEFAULT_REQUEST_TIMEOUT_MS = 5000;
|