@oculisecurity/cli 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE.txt +201 -0
- package/README.md +67 -0
- package/dist/cli.d.ts +18 -0
- package/dist/cli.js +565 -0
- package/dist/commands/init.d.ts +14 -0
- package/dist/commands/init.js +135 -0
- package/dist/commands/report.d.ts +33 -0
- package/dist/commands/report.js +145 -0
- package/dist/commands/serve.d.ts +27 -0
- package/dist/commands/serve.js +163 -0
- package/dist/commands/tail.d.ts +7 -0
- package/dist/commands/tail.js +211 -0
- package/dist/commands/uninstall.d.ts +13 -0
- package/dist/commands/uninstall.js +111 -0
- package/dist/config.d.ts +17 -0
- package/dist/config.js +90 -0
- package/dist/index.d.ts +1 -0
- package/dist/index.js +35 -0
- package/dist/init.d.ts +9 -0
- package/dist/init.js +50 -0
- package/dist/install/claude-code.d.ts +13 -0
- package/dist/install/claude-code.js +118 -0
- package/dist/install/cursor.d.ts +13 -0
- package/dist/install/cursor.js +119 -0
- package/dist/install/detect.d.ts +5 -0
- package/dist/install/detect.js +64 -0
- package/dist/middleware/auth.d.ts +15 -0
- package/dist/middleware/auth.js +116 -0
- package/dist/routes/adapters/claude-code.d.ts +38 -0
- package/dist/routes/adapters/claude-code.js +125 -0
- package/dist/routes/adapters/cursor.d.ts +21 -0
- package/dist/routes/adapters/cursor.js +139 -0
- package/dist/routes/adapters/index.d.ts +16 -0
- package/dist/routes/adapters/index.js +56 -0
- package/dist/routes/adapters/router.d.ts +31 -0
- package/dist/routes/adapters/router.js +97 -0
- package/dist/routes/adapters/schema.d.ts +141 -0
- package/dist/routes/adapters/schema.js +83 -0
- package/dist/routes/adapters/windsurf.d.ts +6 -0
- package/dist/routes/adapters/windsurf.js +48 -0
- package/dist/routes/admin.d.ts +15 -0
- package/dist/routes/admin.js +399 -0
- package/dist/routes/call.d.ts +13 -0
- package/dist/routes/call.js +68 -0
- package/dist/routes/events.d.ts +7 -0
- package/dist/routes/events.js +125 -0
- package/dist/routes/health.d.ts +2 -0
- package/dist/routes/health.js +12 -0
- package/dist/routes/hooks.d.ts +11 -0
- package/dist/routes/hooks.js +166 -0
- package/dist/routes/mcp.d.ts +10 -0
- package/dist/routes/mcp.js +170 -0
- package/dist/routes/openai-tools.d.ts +9 -0
- package/dist/routes/openai-tools.js +121 -0
- package/dist/server.d.ts +11 -0
- package/dist/server.js +118 -0
- package/dist/services/audit.d.ts +92 -0
- package/dist/services/audit.js +388 -0
- package/dist/services/data-dir.d.ts +7 -0
- package/dist/services/data-dir.js +61 -0
- package/dist/services/local-policy-templates.d.ts +9 -0
- package/dist/services/local-policy-templates.js +47 -0
- package/dist/services/local-policy.d.ts +39 -0
- package/dist/services/local-policy.js +172 -0
- package/dist/services/policy-store.d.ts +82 -0
- package/dist/services/policy-store.js +331 -0
- package/dist/services/policy.d.ts +8 -0
- package/dist/services/policy.js +126 -0
- package/dist/services/ratelimit.d.ts +26 -0
- package/dist/services/ratelimit.js +60 -0
- package/dist/services/sanitizer.d.ts +9 -0
- package/dist/services/sanitizer.js +73 -0
- package/dist/services/sqlite-loader.d.ts +4 -0
- package/dist/services/sqlite-loader.js +16 -0
- package/dist/services/telemetry-log.d.ts +76 -0
- package/dist/services/telemetry-log.js +260 -0
- package/dist/services/tool-executor.d.ts +46 -0
- package/dist/services/tool-executor.js +167 -0
- package/dist/services/upstream.d.ts +18 -0
- package/dist/services/upstream.js +72 -0
- package/dist/types.d.ts +112 -0
- package/dist/types.js +3 -0
- package/package.json +72 -0
- package/public/favicon.svg +4 -0
- package/public/index.html +3893 -0
|
@@ -0,0 +1,126 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
3
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
4
|
+
};
|
|
5
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
6
|
+
exports.PolicyService = void 0;
|
|
7
|
+
const axios_1 = __importDefault(require("axios"));
|
|
8
|
+
// ---------------------------------------------------------------------------
|
|
9
|
+
// OPA Client
|
|
10
|
+
// ---------------------------------------------------------------------------
|
|
11
|
+
async function queryOPA(opaUrl, input) {
|
|
12
|
+
const url = `${opaUrl}/v1/data/gateway/authz`;
|
|
13
|
+
const response = await axios_1.default.post(url, { input }, { timeout: 3000 });
|
|
14
|
+
const result = response.data.result ?? {};
|
|
15
|
+
return {
|
|
16
|
+
allow: result.allow ?? false,
|
|
17
|
+
reason: result.reason ?? 'denied by policy',
|
|
18
|
+
redactions: result.redactions ?? {},
|
|
19
|
+
maxArgsSize: result.max_args_size ?? 102400,
|
|
20
|
+
};
|
|
21
|
+
}
|
|
22
|
+
// ---------------------------------------------------------------------------
|
|
23
|
+
// Built-in fallback policy (mirrors gateway.rego logic in TypeScript)
|
|
24
|
+
// Used when OPA_ENABLED=false or OPA is unreachable.
|
|
25
|
+
// ---------------------------------------------------------------------------
|
|
26
|
+
const TOOL_ALLOWLISTS = {
|
|
27
|
+
'fs-server': new Set(['readFile', 'listDir', 'writeFile']),
|
|
28
|
+
'http-server': new Set(['fetchUrl', 'postUrl']),
|
|
29
|
+
};
|
|
30
|
+
const DANGEROUS_PATTERNS = [
|
|
31
|
+
'../',
|
|
32
|
+
'..\\',
|
|
33
|
+
';',
|
|
34
|
+
'&&',
|
|
35
|
+
'||',
|
|
36
|
+
'`',
|
|
37
|
+
'$(',
|
|
38
|
+
'${',
|
|
39
|
+
'%2e%2e',
|
|
40
|
+
'%252e',
|
|
41
|
+
];
|
|
42
|
+
const WRITE_TOOLS = new Set(['writeFile', 'postUrl']);
|
|
43
|
+
const SENSITIVE_FIELDS = {
|
|
44
|
+
secretKey: true,
|
|
45
|
+
password: true,
|
|
46
|
+
token: true,
|
|
47
|
+
apiKey: true,
|
|
48
|
+
secret: true,
|
|
49
|
+
privateKey: true,
|
|
50
|
+
};
|
|
51
|
+
function fallbackPolicy(input) {
|
|
52
|
+
// 1. Tool must be specified
|
|
53
|
+
if (!input.tool) {
|
|
54
|
+
return { allow: false, reason: 'tool not specified' };
|
|
55
|
+
}
|
|
56
|
+
// 2. Hook sources (IDE events) → always allow at gateway level.
|
|
57
|
+
// Enforcement is handled by local policy on the client side.
|
|
58
|
+
// The gateway's role for hooks is audit + centralized policy overlay.
|
|
59
|
+
const isHookSource = input.upstreamId.startsWith('ext:');
|
|
60
|
+
if (isHookSource) {
|
|
61
|
+
return { allow: true, reason: 'allowed', redactions: {}, maxArgsSize: 102400 };
|
|
62
|
+
}
|
|
63
|
+
// ── Everything below applies only to MCP upstream proxy calls ──
|
|
64
|
+
// 3. Tool must be in allowlist for this upstream
|
|
65
|
+
const allowed = TOOL_ALLOWLISTS[input.upstreamId];
|
|
66
|
+
if (!allowed || !allowed.has(input.tool)) {
|
|
67
|
+
return { allow: false, reason: 'tool not in allowlist for this upstream' };
|
|
68
|
+
}
|
|
69
|
+
// 4. Dangerous argument patterns
|
|
70
|
+
for (const [key, val] of Object.entries(input.args)) {
|
|
71
|
+
if (typeof val === 'string') {
|
|
72
|
+
for (const pattern of DANGEROUS_PATTERNS) {
|
|
73
|
+
if (val.toLowerCase().includes(pattern.toLowerCase()) || val.includes(pattern)) {
|
|
74
|
+
return {
|
|
75
|
+
allow: false,
|
|
76
|
+
reason: `dangerous argument pattern detected in '${key}'`,
|
|
77
|
+
};
|
|
78
|
+
}
|
|
79
|
+
}
|
|
80
|
+
}
|
|
81
|
+
}
|
|
82
|
+
// 5. Role restriction for write tools
|
|
83
|
+
if (WRITE_TOOLS.has(input.tool)) {
|
|
84
|
+
const hasWritePermission = input.roles.includes('admin') || input.roles.includes('editor');
|
|
85
|
+
if (!hasWritePermission) {
|
|
86
|
+
return {
|
|
87
|
+
allow: false,
|
|
88
|
+
reason: 'role restriction: insufficient permissions for write operations',
|
|
89
|
+
};
|
|
90
|
+
}
|
|
91
|
+
}
|
|
92
|
+
// Allow — determine redactions based on role
|
|
93
|
+
const redactions = input.roles.includes('admin') ? {} : SENSITIVE_FIELDS;
|
|
94
|
+
return {
|
|
95
|
+
allow: true,
|
|
96
|
+
reason: 'allowed',
|
|
97
|
+
redactions,
|
|
98
|
+
maxArgsSize: 102400,
|
|
99
|
+
};
|
|
100
|
+
}
|
|
101
|
+
// ---------------------------------------------------------------------------
|
|
102
|
+
// PolicyService — chooses OPA or fallback
|
|
103
|
+
// ---------------------------------------------------------------------------
|
|
104
|
+
class PolicyService {
|
|
105
|
+
opaUrl;
|
|
106
|
+
opaEnabled;
|
|
107
|
+
constructor(config) {
|
|
108
|
+
this.opaUrl = config.opaUrl;
|
|
109
|
+
this.opaEnabled = config.opaEnabled;
|
|
110
|
+
}
|
|
111
|
+
async evaluate(input) {
|
|
112
|
+
if (!this.opaEnabled) {
|
|
113
|
+
return fallbackPolicy(input);
|
|
114
|
+
}
|
|
115
|
+
try {
|
|
116
|
+
return await queryOPA(this.opaUrl, input);
|
|
117
|
+
}
|
|
118
|
+
catch (err) {
|
|
119
|
+
// OPA unreachable — fall back to built-in policy and log warning
|
|
120
|
+
const msg = err instanceof Error ? err.message : String(err);
|
|
121
|
+
console.warn(`[policy] OPA unreachable (${msg}), using built-in fallback`);
|
|
122
|
+
return fallbackPolicy(input);
|
|
123
|
+
}
|
|
124
|
+
}
|
|
125
|
+
}
|
|
126
|
+
exports.PolicyService = PolicyService;
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Token-bucket rate limiter (in-memory, per actor:tool key).
|
|
3
|
+
*
|
|
4
|
+
* Each bucket starts at `capacity` tokens and refills at `refillPerMs` tokens/ms.
|
|
5
|
+
* A successful check consumes 1 token; if the bucket is empty the call is denied.
|
|
6
|
+
*
|
|
7
|
+
* NOTE: This is single-process only. For multi-replica deployments, swap to a
|
|
8
|
+
* shared store (Redis INCR + sliding-window, for example).
|
|
9
|
+
*/
|
|
10
|
+
export interface RateLimitResult {
|
|
11
|
+
allowed: boolean;
|
|
12
|
+
remaining: number;
|
|
13
|
+
/** Approximate ms until at least one token is available */
|
|
14
|
+
retryAfterMs: number;
|
|
15
|
+
}
|
|
16
|
+
export declare class RateLimiter {
|
|
17
|
+
private readonly buckets;
|
|
18
|
+
private readonly capacity;
|
|
19
|
+
private readonly refillPerMs;
|
|
20
|
+
constructor(capacity: number, refillPerSecond: number);
|
|
21
|
+
check(key: string): RateLimitResult;
|
|
22
|
+
/** Prune stale buckets (call periodically to avoid unbounded memory growth) */
|
|
23
|
+
cleanup(maxIdleMs?: number): void;
|
|
24
|
+
/** For testing: reset all buckets */
|
|
25
|
+
reset(): void;
|
|
26
|
+
}
|
|
@@ -0,0 +1,60 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* Token-bucket rate limiter (in-memory, per actor:tool key).
|
|
4
|
+
*
|
|
5
|
+
* Each bucket starts at `capacity` tokens and refills at `refillPerMs` tokens/ms.
|
|
6
|
+
* A successful check consumes 1 token; if the bucket is empty the call is denied.
|
|
7
|
+
*
|
|
8
|
+
* NOTE: This is single-process only. For multi-replica deployments, swap to a
|
|
9
|
+
* shared store (Redis INCR + sliding-window, for example).
|
|
10
|
+
*/
|
|
11
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
12
|
+
exports.RateLimiter = void 0;
|
|
13
|
+
class RateLimiter {
|
|
14
|
+
buckets = new Map();
|
|
15
|
+
capacity;
|
|
16
|
+
refillPerMs;
|
|
17
|
+
constructor(capacity, refillPerSecond) {
|
|
18
|
+
this.capacity = capacity;
|
|
19
|
+
this.refillPerMs = refillPerSecond / 1000;
|
|
20
|
+
}
|
|
21
|
+
check(key) {
|
|
22
|
+
// capacity === 0 disables rate limiting (every request allowed). Used by
|
|
23
|
+
// `oculi serve` on localhost so IDE hook bursts don't trip 429s.
|
|
24
|
+
if (this.capacity === 0) {
|
|
25
|
+
return { allowed: true, remaining: Number.POSITIVE_INFINITY, retryAfterMs: 0 };
|
|
26
|
+
}
|
|
27
|
+
const now = Date.now();
|
|
28
|
+
let bucket = this.buckets.get(key);
|
|
29
|
+
if (!bucket) {
|
|
30
|
+
bucket = { tokens: this.capacity, lastRefillMs: now };
|
|
31
|
+
this.buckets.set(key, bucket);
|
|
32
|
+
}
|
|
33
|
+
// Refill based on elapsed time
|
|
34
|
+
const elapsedMs = now - bucket.lastRefillMs;
|
|
35
|
+
bucket.tokens = Math.min(this.capacity, bucket.tokens + elapsedMs * this.refillPerMs);
|
|
36
|
+
bucket.lastRefillMs = now;
|
|
37
|
+
if (bucket.tokens >= 1) {
|
|
38
|
+
bucket.tokens -= 1;
|
|
39
|
+
return { allowed: true, remaining: Math.floor(bucket.tokens), retryAfterMs: 0 };
|
|
40
|
+
}
|
|
41
|
+
// Denied: calculate wait time
|
|
42
|
+
const tokensNeeded = 1 - bucket.tokens;
|
|
43
|
+
const retryAfterMs = Math.ceil(tokensNeeded / this.refillPerMs);
|
|
44
|
+
return { allowed: false, remaining: 0, retryAfterMs };
|
|
45
|
+
}
|
|
46
|
+
/** Prune stale buckets (call periodically to avoid unbounded memory growth) */
|
|
47
|
+
cleanup(maxIdleMs = 3_600_000 /* 1 hour */) {
|
|
48
|
+
const cutoff = Date.now() - maxIdleMs;
|
|
49
|
+
for (const [key, bucket] of this.buckets) {
|
|
50
|
+
if (bucket.lastRefillMs < cutoff) {
|
|
51
|
+
this.buckets.delete(key);
|
|
52
|
+
}
|
|
53
|
+
}
|
|
54
|
+
}
|
|
55
|
+
/** For testing: reset all buckets */
|
|
56
|
+
reset() {
|
|
57
|
+
this.buckets.clear();
|
|
58
|
+
}
|
|
59
|
+
}
|
|
60
|
+
exports.RateLimiter = RateLimiter;
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
import { PolicyDecision } from '../types';
|
|
2
|
+
/**
|
|
3
|
+
* Apply policy redactions and transforms to an upstream response payload.
|
|
4
|
+
*
|
|
5
|
+
* - redactions: remove keys from the top-level response object (and recursively
|
|
6
|
+
* from nested objects)
|
|
7
|
+
* - transforms: apply truncation, masking, or removal to specific fields
|
|
8
|
+
*/
|
|
9
|
+
export declare function sanitizeResponse(payload: unknown, decision: PolicyDecision): unknown;
|
|
@@ -0,0 +1,73 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.sanitizeResponse = sanitizeResponse;
|
|
4
|
+
/**
|
|
5
|
+
* Apply policy redactions and transforms to an upstream response payload.
|
|
6
|
+
*
|
|
7
|
+
* - redactions: remove keys from the top-level response object (and recursively
|
|
8
|
+
* from nested objects)
|
|
9
|
+
* - transforms: apply truncation, masking, or removal to specific fields
|
|
10
|
+
*/
|
|
11
|
+
function sanitizeResponse(payload, decision) {
|
|
12
|
+
if (payload === null || payload === undefined)
|
|
13
|
+
return payload;
|
|
14
|
+
if (typeof payload !== 'object')
|
|
15
|
+
return payload;
|
|
16
|
+
if (Array.isArray(payload)) {
|
|
17
|
+
return payload.map((item) => sanitizeResponse(item, decision));
|
|
18
|
+
}
|
|
19
|
+
const obj = payload;
|
|
20
|
+
const result = {};
|
|
21
|
+
const redactions = decision.redactions ?? {};
|
|
22
|
+
const transforms = decision.transforms ?? {};
|
|
23
|
+
for (const [key, value] of Object.entries(obj)) {
|
|
24
|
+
// Apply transform first (takes precedence)
|
|
25
|
+
if (transforms[key]) {
|
|
26
|
+
const transformed = applyTransform(value, transforms[key]);
|
|
27
|
+
if (transformed !== REMOVED) {
|
|
28
|
+
result[key] = transformed;
|
|
29
|
+
}
|
|
30
|
+
continue;
|
|
31
|
+
}
|
|
32
|
+
// Apply redaction
|
|
33
|
+
if (redactions[key]) {
|
|
34
|
+
// Replace value with placeholder rather than deleting the key,
|
|
35
|
+
// so callers know a redaction occurred.
|
|
36
|
+
result[key] = '[REDACTED]';
|
|
37
|
+
continue;
|
|
38
|
+
}
|
|
39
|
+
// Recurse into nested objects
|
|
40
|
+
if (value !== null && typeof value === 'object' && !Array.isArray(value)) {
|
|
41
|
+
result[key] = sanitizeResponse(value, decision);
|
|
42
|
+
}
|
|
43
|
+
else if (Array.isArray(value)) {
|
|
44
|
+
result[key] = value.map((item) => sanitizeResponse(item, decision));
|
|
45
|
+
}
|
|
46
|
+
else {
|
|
47
|
+
result[key] = value;
|
|
48
|
+
}
|
|
49
|
+
}
|
|
50
|
+
return result;
|
|
51
|
+
}
|
|
52
|
+
const REMOVED = Symbol('REMOVED');
|
|
53
|
+
function applyTransform(value, transform) {
|
|
54
|
+
switch (transform.type) {
|
|
55
|
+
case 'remove':
|
|
56
|
+
return REMOVED;
|
|
57
|
+
case 'mask':
|
|
58
|
+
if (typeof value === 'string') {
|
|
59
|
+
return transform.mask ?? '***';
|
|
60
|
+
}
|
|
61
|
+
return '***';
|
|
62
|
+
case 'truncate':
|
|
63
|
+
if (typeof value === 'string') {
|
|
64
|
+
const max = transform.maxLength ?? 1000;
|
|
65
|
+
if (value.length > max) {
|
|
66
|
+
return value.slice(0, max) + `...[truncated ${value.length - max} chars]`;
|
|
67
|
+
}
|
|
68
|
+
}
|
|
69
|
+
return value;
|
|
70
|
+
default:
|
|
71
|
+
return value;
|
|
72
|
+
}
|
|
73
|
+
}
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.loadSqlite = loadSqlite;
|
|
4
|
+
let cached = null;
|
|
5
|
+
function loadSqlite() {
|
|
6
|
+
if (cached)
|
|
7
|
+
return cached;
|
|
8
|
+
try {
|
|
9
|
+
// eslint-disable-next-line @typescript-eslint/no-var-requires
|
|
10
|
+
cached = require('better-sqlite3');
|
|
11
|
+
return cached;
|
|
12
|
+
}
|
|
13
|
+
catch {
|
|
14
|
+
throw new Error('better-sqlite3 is not installed. The Oculi gateway server requires a C++ toolchain to build. See https://oculisecurity.com/docs/gateway-install for setup, or use the CLI in local-policy-only mode.');
|
|
15
|
+
}
|
|
16
|
+
}
|
|
@@ -0,0 +1,76 @@
|
|
|
1
|
+
import { OculiEvent } from '../routes/adapters/schema';
|
|
2
|
+
export interface TelemetryLogEntry {
|
|
3
|
+
schema_version: string;
|
|
4
|
+
ide_source: string;
|
|
5
|
+
actor: string;
|
|
6
|
+
org_id: string;
|
|
7
|
+
hook_event_name: string;
|
|
8
|
+
phase: string;
|
|
9
|
+
session_id: string;
|
|
10
|
+
trace_id?: string;
|
|
11
|
+
tool?: string;
|
|
12
|
+
tool_args?: Record<string, unknown>;
|
|
13
|
+
file_path?: string;
|
|
14
|
+
shell_command?: string;
|
|
15
|
+
mcp_server?: string;
|
|
16
|
+
timestamp: string;
|
|
17
|
+
duration_ms?: number;
|
|
18
|
+
error?: string;
|
|
19
|
+
context?: {
|
|
20
|
+
workspace?: string;
|
|
21
|
+
conversation_id?: string;
|
|
22
|
+
};
|
|
23
|
+
action?: {
|
|
24
|
+
command?: string;
|
|
25
|
+
content_hash?: string;
|
|
26
|
+
};
|
|
27
|
+
policy_decision: 'allow' | 'warn' | 'deny';
|
|
28
|
+
policy_rule_ids: string[];
|
|
29
|
+
}
|
|
30
|
+
/**
|
|
31
|
+
* Find the nearest `.oculi/` directory by walking up from `startDir`.
|
|
32
|
+
* If none is found, creates `.oculi/` in startDir (or cwd).
|
|
33
|
+
*/
|
|
34
|
+
export declare function findOrCreateOculiDir(startDir?: string): string;
|
|
35
|
+
/**
|
|
36
|
+
* Find existing `.oculi/telemetry.jsonl` by walking up. Returns null if none exists.
|
|
37
|
+
*/
|
|
38
|
+
export declare function findTelemetryLog(startDir?: string): string | null;
|
|
39
|
+
/**
|
|
40
|
+
* Convert an OculiEvent + policy decision into a log entry (omitting raw_payload).
|
|
41
|
+
*/
|
|
42
|
+
export declare function eventToLogEntry(event: OculiEvent, decision: 'allow' | 'warn' | 'deny', ruleIds?: string[]): TelemetryLogEntry;
|
|
43
|
+
/**
|
|
44
|
+
* Append a telemetry log entry to `.oculi/telemetry.jsonl`.
|
|
45
|
+
* Handles rotation automatically.
|
|
46
|
+
*/
|
|
47
|
+
export declare function appendTelemetry(entry: TelemetryLogEntry, oculiDir?: string): string;
|
|
48
|
+
export interface ReadOptions {
|
|
49
|
+
since?: Date;
|
|
50
|
+
filter?: 'allow' | 'warn' | 'deny';
|
|
51
|
+
}
|
|
52
|
+
/**
|
|
53
|
+
* Read telemetry log entries from a file with optional filtering.
|
|
54
|
+
*/
|
|
55
|
+
export declare function readTelemetryLines(logPath: string, opts?: ReadOptions): TelemetryLogEntry[];
|
|
56
|
+
/**
|
|
57
|
+
* Get the path to the telemetry log file (creating .oculi/ dir if needed).
|
|
58
|
+
*/
|
|
59
|
+
export declare function getTelemetryLogPath(oculiDir?: string): string;
|
|
60
|
+
export interface OffsetReadResult {
|
|
61
|
+
entries: TelemetryLogEntry[];
|
|
62
|
+
/** End offset after the last fully-read line (suitable as the next checkpoint). */
|
|
63
|
+
newOffset: number;
|
|
64
|
+
/** Number of lines that failed to parse as JSON. */
|
|
65
|
+
skipped: number;
|
|
66
|
+
/** True iff the file was smaller than the requested offset — likely rotated. */
|
|
67
|
+
rotated: boolean;
|
|
68
|
+
}
|
|
69
|
+
/**
|
|
70
|
+
* Read telemetry entries starting at a byte offset. Used for incremental
|
|
71
|
+
* replay into sqlite — caller persists `newOffset` to resume next time.
|
|
72
|
+
*
|
|
73
|
+
* If the file is shorter than `offset` (rotation happened since last read),
|
|
74
|
+
* starts from byte 0 and reports `rotated: true`.
|
|
75
|
+
*/
|
|
76
|
+
export declare function readTelemetryLinesFromOffset(filePath: string, offset: number): OffsetReadResult;
|
|
@@ -0,0 +1,260 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
|
|
3
|
+
if (k2 === undefined) k2 = k;
|
|
4
|
+
var desc = Object.getOwnPropertyDescriptor(m, k);
|
|
5
|
+
if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
|
|
6
|
+
desc = { enumerable: true, get: function() { return m[k]; } };
|
|
7
|
+
}
|
|
8
|
+
Object.defineProperty(o, k2, desc);
|
|
9
|
+
}) : (function(o, m, k, k2) {
|
|
10
|
+
if (k2 === undefined) k2 = k;
|
|
11
|
+
o[k2] = m[k];
|
|
12
|
+
}));
|
|
13
|
+
var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
|
|
14
|
+
Object.defineProperty(o, "default", { enumerable: true, value: v });
|
|
15
|
+
}) : function(o, v) {
|
|
16
|
+
o["default"] = v;
|
|
17
|
+
});
|
|
18
|
+
var __importStar = (this && this.__importStar) || (function () {
|
|
19
|
+
var ownKeys = function(o) {
|
|
20
|
+
ownKeys = Object.getOwnPropertyNames || function (o) {
|
|
21
|
+
var ar = [];
|
|
22
|
+
for (var k in o) if (Object.prototype.hasOwnProperty.call(o, k)) ar[ar.length] = k;
|
|
23
|
+
return ar;
|
|
24
|
+
};
|
|
25
|
+
return ownKeys(o);
|
|
26
|
+
};
|
|
27
|
+
return function (mod) {
|
|
28
|
+
if (mod && mod.__esModule) return mod;
|
|
29
|
+
var result = {};
|
|
30
|
+
if (mod != null) for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]);
|
|
31
|
+
__setModuleDefault(result, mod);
|
|
32
|
+
return result;
|
|
33
|
+
};
|
|
34
|
+
})();
|
|
35
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
36
|
+
exports.findOrCreateOculiDir = findOrCreateOculiDir;
|
|
37
|
+
exports.findTelemetryLog = findTelemetryLog;
|
|
38
|
+
exports.eventToLogEntry = eventToLogEntry;
|
|
39
|
+
exports.appendTelemetry = appendTelemetry;
|
|
40
|
+
exports.readTelemetryLines = readTelemetryLines;
|
|
41
|
+
exports.getTelemetryLogPath = getTelemetryLogPath;
|
|
42
|
+
exports.readTelemetryLinesFromOffset = readTelemetryLinesFromOffset;
|
|
43
|
+
const fs = __importStar(require("fs"));
|
|
44
|
+
const path = __importStar(require("path"));
|
|
45
|
+
const os = __importStar(require("os"));
|
|
46
|
+
// ---------------------------------------------------------------------------
|
|
47
|
+
// Directory discovery
|
|
48
|
+
// ---------------------------------------------------------------------------
|
|
49
|
+
const LOG_FILENAME = 'telemetry.jsonl';
|
|
50
|
+
const MAX_FILE_SIZE = 10 * 1024 * 1024; // 10 MB
|
|
51
|
+
const MAX_ROTATIONS = 5;
|
|
52
|
+
/**
|
|
53
|
+
* Find the nearest `.oculi/` directory by walking up from `startDir`.
|
|
54
|
+
* If none is found, creates `.oculi/` in startDir (or cwd).
|
|
55
|
+
*/
|
|
56
|
+
function findOrCreateOculiDir(startDir) {
|
|
57
|
+
let dir = startDir ?? process.cwd();
|
|
58
|
+
// Walk up to find existing .oculi/
|
|
59
|
+
let search = dir;
|
|
60
|
+
while (true) {
|
|
61
|
+
const candidate = path.join(search, '.oculi');
|
|
62
|
+
if (fs.existsSync(candidate) && fs.statSync(candidate).isDirectory()) {
|
|
63
|
+
return candidate;
|
|
64
|
+
}
|
|
65
|
+
const parent = path.dirname(search);
|
|
66
|
+
if (parent === search)
|
|
67
|
+
break;
|
|
68
|
+
search = parent;
|
|
69
|
+
}
|
|
70
|
+
// Check home directory
|
|
71
|
+
const globalDir = path.join(os.homedir(), '.oculi');
|
|
72
|
+
if (fs.existsSync(globalDir) && fs.statSync(globalDir).isDirectory()) {
|
|
73
|
+
return globalDir;
|
|
74
|
+
}
|
|
75
|
+
// Create in the starting directory
|
|
76
|
+
const newDir = path.join(dir, '.oculi');
|
|
77
|
+
fs.mkdirSync(newDir, { recursive: true });
|
|
78
|
+
return newDir;
|
|
79
|
+
}
|
|
80
|
+
/**
|
|
81
|
+
* Find existing `.oculi/telemetry.jsonl` by walking up. Returns null if none exists.
|
|
82
|
+
*/
|
|
83
|
+
function findTelemetryLog(startDir) {
|
|
84
|
+
let dir = startDir ?? process.cwd();
|
|
85
|
+
while (true) {
|
|
86
|
+
const candidate = path.join(dir, '.oculi', LOG_FILENAME);
|
|
87
|
+
if (fs.existsSync(candidate))
|
|
88
|
+
return candidate;
|
|
89
|
+
const parent = path.dirname(dir);
|
|
90
|
+
if (parent === dir)
|
|
91
|
+
break;
|
|
92
|
+
dir = parent;
|
|
93
|
+
}
|
|
94
|
+
const globalPath = path.join(os.homedir(), '.oculi', LOG_FILENAME);
|
|
95
|
+
if (fs.existsSync(globalPath))
|
|
96
|
+
return globalPath;
|
|
97
|
+
return null;
|
|
98
|
+
}
|
|
99
|
+
// ---------------------------------------------------------------------------
|
|
100
|
+
// Rotation
|
|
101
|
+
// ---------------------------------------------------------------------------
|
|
102
|
+
function rotatedName(dir, n) {
|
|
103
|
+
return path.join(dir, `telemetry.${n}.jsonl`);
|
|
104
|
+
}
|
|
105
|
+
function rotateIfNeeded(logPath) {
|
|
106
|
+
if (!fs.existsSync(logPath))
|
|
107
|
+
return;
|
|
108
|
+
let size;
|
|
109
|
+
try {
|
|
110
|
+
size = fs.statSync(logPath).size;
|
|
111
|
+
}
|
|
112
|
+
catch {
|
|
113
|
+
return;
|
|
114
|
+
}
|
|
115
|
+
if (size < MAX_FILE_SIZE)
|
|
116
|
+
return;
|
|
117
|
+
const dir = path.dirname(logPath);
|
|
118
|
+
// Delete oldest if at cap
|
|
119
|
+
const oldest = rotatedName(dir, MAX_ROTATIONS);
|
|
120
|
+
if (fs.existsSync(oldest)) {
|
|
121
|
+
fs.unlinkSync(oldest);
|
|
122
|
+
}
|
|
123
|
+
// Shift existing rotated files up: N → N+1
|
|
124
|
+
for (let i = MAX_ROTATIONS - 1; i >= 1; i--) {
|
|
125
|
+
const from = rotatedName(dir, i);
|
|
126
|
+
const to = rotatedName(dir, i + 1);
|
|
127
|
+
if (fs.existsSync(from)) {
|
|
128
|
+
fs.renameSync(from, to);
|
|
129
|
+
}
|
|
130
|
+
}
|
|
131
|
+
// Rename current → .1
|
|
132
|
+
fs.renameSync(logPath, rotatedName(dir, 1));
|
|
133
|
+
}
|
|
134
|
+
// ---------------------------------------------------------------------------
|
|
135
|
+
// Write
|
|
136
|
+
// ---------------------------------------------------------------------------
|
|
137
|
+
/**
|
|
138
|
+
* Convert an OculiEvent + policy decision into a log entry (omitting raw_payload).
|
|
139
|
+
*/
|
|
140
|
+
function eventToLogEntry(event, decision, ruleIds = []) {
|
|
141
|
+
return {
|
|
142
|
+
schema_version: event.schema_version,
|
|
143
|
+
ide_source: event.ide_source,
|
|
144
|
+
actor: event.actor,
|
|
145
|
+
org_id: event.org_id,
|
|
146
|
+
hook_event_name: event.hook_event_name,
|
|
147
|
+
phase: event.phase,
|
|
148
|
+
session_id: event.session_id,
|
|
149
|
+
trace_id: event.trace_id,
|
|
150
|
+
tool: event.tool,
|
|
151
|
+
tool_args: event.tool_args,
|
|
152
|
+
file_path: event.file_path,
|
|
153
|
+
shell_command: event.shell_command,
|
|
154
|
+
mcp_server: event.mcp_server,
|
|
155
|
+
timestamp: event.timestamp,
|
|
156
|
+
duration_ms: event.duration_ms,
|
|
157
|
+
error: event.error,
|
|
158
|
+
context: event.context,
|
|
159
|
+
action: event.action,
|
|
160
|
+
policy_decision: decision,
|
|
161
|
+
policy_rule_ids: ruleIds,
|
|
162
|
+
};
|
|
163
|
+
}
|
|
164
|
+
/**
|
|
165
|
+
* Append a telemetry log entry to `.oculi/telemetry.jsonl`.
|
|
166
|
+
* Handles rotation automatically.
|
|
167
|
+
*/
|
|
168
|
+
function appendTelemetry(entry, oculiDir) {
|
|
169
|
+
const dir = oculiDir ?? findOrCreateOculiDir();
|
|
170
|
+
const logPath = path.join(dir, LOG_FILENAME);
|
|
171
|
+
rotateIfNeeded(logPath);
|
|
172
|
+
const line = JSON.stringify(entry) + '\n';
|
|
173
|
+
fs.appendFileSync(logPath, line, 'utf8');
|
|
174
|
+
return logPath;
|
|
175
|
+
}
|
|
176
|
+
/**
|
|
177
|
+
* Read telemetry log entries from a file with optional filtering.
|
|
178
|
+
*/
|
|
179
|
+
function readTelemetryLines(logPath, opts = {}) {
|
|
180
|
+
if (!fs.existsSync(logPath))
|
|
181
|
+
return [];
|
|
182
|
+
const raw = fs.readFileSync(logPath, 'utf8');
|
|
183
|
+
const lines = raw.split('\n').filter((l) => l.trim().length > 0);
|
|
184
|
+
const entries = [];
|
|
185
|
+
for (const line of lines) {
|
|
186
|
+
try {
|
|
187
|
+
const entry = JSON.parse(line);
|
|
188
|
+
if (opts.since && new Date(entry.timestamp) < opts.since)
|
|
189
|
+
continue;
|
|
190
|
+
if (opts.filter && entry.policy_decision !== opts.filter)
|
|
191
|
+
continue;
|
|
192
|
+
entries.push(entry);
|
|
193
|
+
}
|
|
194
|
+
catch {
|
|
195
|
+
// Skip malformed lines
|
|
196
|
+
}
|
|
197
|
+
}
|
|
198
|
+
return entries;
|
|
199
|
+
}
|
|
200
|
+
/**
|
|
201
|
+
* Get the path to the telemetry log file (creating .oculi/ dir if needed).
|
|
202
|
+
*/
|
|
203
|
+
function getTelemetryLogPath(oculiDir) {
|
|
204
|
+
const dir = oculiDir ?? findOrCreateOculiDir();
|
|
205
|
+
return path.join(dir, LOG_FILENAME);
|
|
206
|
+
}
|
|
207
|
+
/**
|
|
208
|
+
* Read telemetry entries starting at a byte offset. Used for incremental
|
|
209
|
+
* replay into sqlite — caller persists `newOffset` to resume next time.
|
|
210
|
+
*
|
|
211
|
+
* If the file is shorter than `offset` (rotation happened since last read),
|
|
212
|
+
* starts from byte 0 and reports `rotated: true`.
|
|
213
|
+
*/
|
|
214
|
+
function readTelemetryLinesFromOffset(filePath, offset) {
|
|
215
|
+
if (!fs.existsSync(filePath)) {
|
|
216
|
+
return { entries: [], newOffset: offset, skipped: 0, rotated: false };
|
|
217
|
+
}
|
|
218
|
+
const size = fs.statSync(filePath).size;
|
|
219
|
+
let rotated = false;
|
|
220
|
+
let startOffset = offset;
|
|
221
|
+
if (size < offset) {
|
|
222
|
+
rotated = true;
|
|
223
|
+
startOffset = 0;
|
|
224
|
+
}
|
|
225
|
+
if (size === startOffset) {
|
|
226
|
+
return { entries: [], newOffset: startOffset, skipped: 0, rotated };
|
|
227
|
+
}
|
|
228
|
+
const fd = fs.openSync(filePath, 'r');
|
|
229
|
+
const length = size - startOffset;
|
|
230
|
+
const buf = Buffer.alloc(length);
|
|
231
|
+
try {
|
|
232
|
+
fs.readSync(fd, buf, 0, length, startOffset);
|
|
233
|
+
}
|
|
234
|
+
finally {
|
|
235
|
+
fs.closeSync(fd);
|
|
236
|
+
}
|
|
237
|
+
const text = buf.toString('utf8');
|
|
238
|
+
// If the file does NOT end in a newline, the trailing fragment is a
|
|
239
|
+
// half-written line — don't consume it. The next read will pick it up.
|
|
240
|
+
const lastNewlineRel = text.lastIndexOf('\n');
|
|
241
|
+
if (lastNewlineRel === -1) {
|
|
242
|
+
// No complete line in this slice yet — leave offset unchanged.
|
|
243
|
+
return { entries: [], newOffset: offset, skipped: 0, rotated };
|
|
244
|
+
}
|
|
245
|
+
const consumed = text.slice(0, lastNewlineRel + 1);
|
|
246
|
+
const newOffset = startOffset + Buffer.byteLength(consumed, 'utf8');
|
|
247
|
+
const entries = [];
|
|
248
|
+
let skipped = 0;
|
|
249
|
+
for (const line of consumed.split('\n')) {
|
|
250
|
+
if (line.trim().length === 0)
|
|
251
|
+
continue;
|
|
252
|
+
try {
|
|
253
|
+
entries.push(JSON.parse(line));
|
|
254
|
+
}
|
|
255
|
+
catch {
|
|
256
|
+
skipped++;
|
|
257
|
+
}
|
|
258
|
+
}
|
|
259
|
+
return { entries, newOffset, skipped, rotated };
|
|
260
|
+
}
|