@arkheia/mcp-server 0.1.5 → 0.1.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,320 @@
1
+ "use strict";
2
+ var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
3
+ if (k2 === undefined) k2 = k;
4
+ var desc = Object.getOwnPropertyDescriptor(m, k);
5
+ if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
6
+ desc = { enumerable: true, get: function() { return m[k]; } };
7
+ }
8
+ Object.defineProperty(o, k2, desc);
9
+ }) : (function(o, m, k, k2) {
10
+ if (k2 === undefined) k2 = k;
11
+ o[k2] = m[k];
12
+ }));
13
+ var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
14
+ Object.defineProperty(o, "default", { enumerable: true, value: v });
15
+ }) : function(o, v) {
16
+ o["default"] = v;
17
+ });
18
+ var __importStar = (this && this.__importStar) || (function () {
19
+ var ownKeys = function(o) {
20
+ ownKeys = Object.getOwnPropertyNames || function (o) {
21
+ var ar = [];
22
+ for (var k in o) if (Object.prototype.hasOwnProperty.call(o, k)) ar[ar.length] = k;
23
+ return ar;
24
+ };
25
+ return ownKeys(o);
26
+ };
27
+ return function (mod) {
28
+ if (mod && mod.__esModule) return mod;
29
+ var result = {};
30
+ if (mod != null) for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]);
31
+ __setModuleDefault(result, mod);
32
+ return result;
33
+ };
34
+ })();
35
+ Object.defineProperty(exports, "__esModule", { value: true });
36
+ exports.call_grok = call_grok;
37
+ exports.call_gemini = call_gemini;
38
+ exports.call_together = call_together;
39
+ exports.call_ollama = call_ollama;
40
+ const crypto = __importStar(require("crypto"));
41
+ // Node.js native fetch doesn't have a global logger, use console
42
+ const logger = console;
43
+ const _DEFAULT_TIMEOUT = 60 * 1000; // 60 seconds in milliseconds
44
+ const _OLLAMA_TIMEOUT = 120 * 1000; // 120 seconds in milliseconds
45
+ // ---------------------------------------------------------------------------
46
+ // Internal helpers
47
+ // ---------------------------------------------------------------------------
48
+ function _prompt_hash(prompt) {
49
+ return crypto.createHash('sha256').update(prompt).digest('hex');
50
+ }
51
+ function _err_response(model, prompt, error) {
52
+ return {
53
+ response: `[provider_error: ${error}]`,
54
+ model: model,
55
+ prompt_hash: _prompt_hash(prompt),
56
+ error: error,
57
+ };
58
+ }
59
+ // ---------------------------------------------------------------------------
60
+ // Grok (xAI) — OpenAI-compatible /v1/chat/completions
61
+ // ---------------------------------------------------------------------------
62
+ async function call_grok(prompt, model = "grok-4-fast-non-reasoning", kwargs = {}) {
63
+ /**
64
+ * Call xAI Grok chat completions API.
65
+ *
66
+ * Returns: {response, model, prompt_hash, error}
67
+ */
68
+ const api_key = process.env.XAI_API_KEY;
69
+ if (!api_key) {
70
+ return _err_response(model, prompt, "XAI_API_KEY not set");
71
+ }
72
+ const controller = new AbortController();
73
+ const timeoutId = setTimeout(() => controller.abort(), _DEFAULT_TIMEOUT);
74
+ try {
75
+ const resp = await fetch("https://api.x.ai/v1/chat/completions", {
76
+ method: "POST",
77
+ headers: {
78
+ "Authorization": `Bearer ${api_key}`,
79
+ "Content-Type": "application/json",
80
+ },
81
+ body: JSON.stringify({
82
+ model: model,
83
+ messages: [{ role: "user", content: prompt }],
84
+ ...kwargs,
85
+ }),
86
+ signal: controller.signal,
87
+ });
88
+ clearTimeout(timeoutId);
89
+ if (!resp.ok) {
90
+ logger.error(`call_grok: HTTP ${resp.status} for model=${model}`);
91
+ return _err_response(model, prompt, `http_${resp.status}`);
92
+ }
93
+ const data = await resp.json();
94
+ const response_text = data.choices?.[0]?.message?.content;
95
+ if (typeof response_text !== 'string') {
96
+ throw new Error("Invalid response format from Grok API");
97
+ }
98
+ return {
99
+ response: response_text,
100
+ model: model,
101
+ prompt_hash: _prompt_hash(prompt),
102
+ usage: data.usage || {},
103
+ error: null,
104
+ };
105
+ }
106
+ catch (e) {
107
+ clearTimeout(timeoutId);
108
+ if (e.name === 'AbortError') {
109
+ logger.error(`call_grok: request timed out for model=${model}`);
110
+ return _err_response(model, prompt, "timeout");
111
+ }
112
+ else if (e.name === 'TypeError' && e.message.includes('fetch failed')) {
113
+ logger.error(`call_grok: network error for model=${model}: ${e.message}`);
114
+ return _err_response(model, prompt, "network_error");
115
+ }
116
+ else {
117
+ logger.error(`call_grok: unexpected error: ${e.message}`);
118
+ return _err_response(model, prompt, e.message);
119
+ }
120
+ }
121
+ }
122
+ // ---------------------------------------------------------------------------
123
+ // Gemini (Google) — generateContent REST API
124
+ // ---------------------------------------------------------------------------
125
+ async function call_gemini(prompt, model = "gemini-2.5-flash", max_output_tokens = 1000, kwargs = {}) {
126
+ /**
127
+ * Call Google Gemini generateContent API.
128
+ *
129
+ * Note: gemini-2.5-flash and -pro are thinking models — they need
130
+ * max_output_tokens >= 1000 to produce content after thinking tokens.
131
+ *
132
+ * Returns: {response, model, prompt_hash, error}
133
+ */
134
+ const api_key = process.env.GOOGLE_API_KEY;
135
+ if (!api_key) {
136
+ return _err_response(model, prompt, "GOOGLE_API_KEY not set");
137
+ }
138
+ const url = `https://generativelanguage.googleapis.com/v1beta/models/${model}:generateContent`;
139
+ const controller = new AbortController();
140
+ const timeoutId = setTimeout(() => controller.abort(), _DEFAULT_TIMEOUT);
141
+ try {
142
+ const resp = await fetch(`${url}?key=${api_key}`, {
143
+ method: "POST",
144
+ headers: {
145
+ "Content-Type": "application/json",
146
+ },
147
+ body: JSON.stringify({
148
+ contents: [{ parts: [{ text: prompt }] }],
149
+ generationConfig: {
150
+ maxOutputTokens: max_output_tokens,
151
+ ...kwargs,
152
+ },
153
+ }),
154
+ signal: controller.signal,
155
+ });
156
+ clearTimeout(timeoutId);
157
+ if (!resp.ok) {
158
+ logger.error(`call_gemini: HTTP ${resp.status} for model=${model}`);
159
+ return _err_response(model, prompt, `http_${resp.status}`);
160
+ }
161
+ const data = await resp.json();
162
+ const response_text = data.candidates?.[0]?.content?.parts?.[0]?.text;
163
+ if (typeof response_text !== 'string') {
164
+ throw new Error("Invalid response format from Gemini API");
165
+ }
166
+ return {
167
+ response: response_text,
168
+ model: model,
169
+ prompt_hash: _prompt_hash(prompt),
170
+ usage: data.usageMetadata || {},
171
+ error: null,
172
+ };
173
+ }
174
+ catch (e) {
175
+ clearTimeout(timeoutId);
176
+ if (e.name === 'AbortError') {
177
+ logger.error(`call_gemini: request timed out for model=${model}`);
178
+ return _err_response(model, prompt, "timeout");
179
+ }
180
+ else if (e.name === 'TypeError' && e.message.includes('fetch failed')) {
181
+ logger.error(`call_gemini: network error for model=${model}: ${e.message}`);
182
+ return _err_response(model, prompt, "network_error");
183
+ }
184
+ else {
185
+ logger.error(`call_gemini: unexpected error: ${e.message}`);
186
+ return _err_response(model, prompt, e.message);
187
+ }
188
+ }
189
+ }
190
+ // ---------------------------------------------------------------------------
191
+ // Together AI — OpenAI-compatible, cloud inference
192
+ // ---------------------------------------------------------------------------
193
+ async function call_together(prompt, model = "moonshotai/Kimi-K2.5", max_tokens = 2048, kwargs = {}) {
194
+ /**
195
+ * Call Together AI chat completions API (OpenAI-compatible).
196
+ *
197
+ * Default model is Kimi K2.5 — a thinking model that consumes
198
+ * 100-500 tokens internally before producing output, so max_tokens
199
+ * must be >= 2048 to reliably get a response.
200
+ *
201
+ * Returns: {response, model, prompt_hash, usage, error}
202
+ */
203
+ const api_key = process.env.TOGETHER_API_KEY;
204
+ if (!api_key) {
205
+ return _err_response(model, prompt, "TOGETHER_API_KEY not set");
206
+ }
207
+ const controller = new AbortController();
208
+ const timeoutId = setTimeout(() => controller.abort(), _DEFAULT_TIMEOUT);
209
+ try {
210
+ const resp = await fetch("https://api.together.xyz/v1/chat/completions", {
211
+ method: "POST",
212
+ headers: {
213
+ "Authorization": `Bearer ${api_key}`,
214
+ "Content-Type": "application/json",
215
+ },
216
+ body: JSON.stringify({
217
+ model: model,
218
+ max_tokens: max_tokens,
219
+ messages: [{ role: "user", content: prompt }],
220
+ ...kwargs,
221
+ }),
222
+ signal: controller.signal,
223
+ });
224
+ clearTimeout(timeoutId);
225
+ if (!resp.ok) {
226
+ logger.error(`call_together: HTTP ${resp.status} for model=${model}`);
227
+ return _err_response(model, prompt, `http_${resp.status}`);
228
+ }
229
+ const data = await resp.json();
230
+ const response_text = data.choices?.[0]?.message?.content;
231
+ if (typeof response_text !== 'string') {
232
+ throw new Error("Invalid response format from Together AI API");
233
+ }
234
+ return {
235
+ response: response_text,
236
+ model: model,
237
+ prompt_hash: _prompt_hash(prompt),
238
+ usage: data.usage || {},
239
+ error: null,
240
+ };
241
+ }
242
+ catch (e) {
243
+ clearTimeout(timeoutId);
244
+ if (e.name === 'AbortError') {
245
+ logger.error(`call_together: request timed out for model=${model}`);
246
+ return _err_response(model, prompt, "timeout");
247
+ }
248
+ else if (e.name === 'TypeError' && e.message.includes('fetch failed')) {
249
+ logger.error(`call_together: network error for model=${model}: ${e.message}`);
250
+ return _err_response(model, prompt, "network_error");
251
+ }
252
+ else {
253
+ logger.error(`call_together: unexpected error: ${e.message}`);
254
+ return _err_response(model, prompt, e.message);
255
+ }
256
+ }
257
+ }
258
+ // ---------------------------------------------------------------------------
259
+ // Ollama — local inference, no network egress
260
+ // ---------------------------------------------------------------------------
261
+ async function call_ollama(prompt, model = "phi4:14b", kwargs = {}) {
262
+ /**
263
+ * Call local Ollama model via /api/generate (non-streaming).
264
+ *
265
+ * OLLAMA_BASE_URL defaults to http://localhost:11434.
266
+ * No network egress — local eval only.
267
+ *
268
+ * Returns: {response, model, prompt_hash, eval_count, error}
269
+ */
270
+ const base_url = process.env.OLLAMA_BASE_URL || "http://localhost:11434";
271
+ const controller = new AbortController();
272
+ const timeoutId = setTimeout(() => controller.abort(), _OLLAMA_TIMEOUT);
273
+ try {
274
+ const resp = await fetch(`${base_url}/api/generate`, {
275
+ method: "POST",
276
+ headers: {
277
+ "Content-Type": "application/json",
278
+ },
279
+ body: JSON.stringify({
280
+ model: model,
281
+ prompt: prompt,
282
+ stream: false,
283
+ ...kwargs,
284
+ }),
285
+ signal: controller.signal,
286
+ });
287
+ clearTimeout(timeoutId);
288
+ if (!resp.ok) {
289
+ logger.error(`call_ollama: HTTP ${resp.status} for model=${model}`);
290
+ return _err_response(model, prompt, `http_${resp.status}`);
291
+ }
292
+ const data = await resp.json();
293
+ const response_text = data.response;
294
+ if (typeof response_text !== 'string') {
295
+ throw new Error("Invalid response format from Ollama API");
296
+ }
297
+ return {
298
+ response: response_text,
299
+ model: model,
300
+ prompt_hash: _prompt_hash(prompt),
301
+ eval_count: data.eval_count,
302
+ error: null,
303
+ };
304
+ }
305
+ catch (e) {
306
+ clearTimeout(timeoutId);
307
+ if (e.name === 'AbortError') {
308
+ logger.error(`call_ollama: request timed out for model=${model}`);
309
+ return _err_response(model, prompt, "timeout");
310
+ }
311
+ else if (e.name === 'TypeError' && e.message.includes('fetch failed')) {
312
+ logger.error(`call_ollama: cannot connect to Ollama at ${base_url}`);
313
+ return _err_response(model, prompt, "ollama_unavailable");
314
+ }
315
+ else {
316
+ logger.error(`call_ollama: unexpected error: ${e.message}`);
317
+ return _err_response(model, prompt, e.message);
318
+ }
319
+ }
320
+ }
@@ -0,0 +1,219 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.ProxyClient = void 0;
4
+ const url_1 = require("url");
5
+ // Node.js native fetch doesn't have a global logger, use console
6
+ const logger = console;
7
+ // Hosted API defaults
8
+ const HOSTED_API_URL = "https://arkheia-proxy-production.up.railway.app";
9
+ class ProxyClient {
10
+ base_url;
11
+ timeout; // in milliseconds
12
+ hosted_url;
13
+ api_key;
14
+ _local_available; // optimistic; flips on network errors
15
+ constructor(base_url, timeout = 10.0, // in seconds
16
+ hosted_url, api_key) {
17
+ this.base_url = base_url.endsWith("/") ? base_url.slice(0, -1) : base_url;
18
+ this.timeout = timeout * 1000; // Convert to milliseconds for fetch AbortController
19
+ this.hosted_url = (hosted_url || HOSTED_API_URL).endsWith("/")
20
+ ? (hosted_url || HOSTED_API_URL).slice(0, -1)
21
+ : (hosted_url || HOSTED_API_URL);
22
+ this.api_key = api_key || process.env.ARKHEIA_API_KEY;
23
+ this._local_available = true; // optimistic; flips on ConnectError
24
+ }
25
+ async verify(prompt, response, model_id, session_id) {
26
+ /**
27
+ * Detect fabrication in a model response.
28
+ *
29
+ * Tries local proxy first. If unavailable, falls back to hosted API.
30
+ * Never raises -- returns UNKNOWN on any error.
31
+ */
32
+ // Try local proxy first (if last attempt didn't fail with ConnectError)
33
+ if (this._local_available) {
34
+ const result = await this._verify_local(prompt, response, model_id, session_id);
35
+ if (result.error !== "proxy_unavailable" && result.error !== "proxy_timeout") {
36
+ return result;
37
+ }
38
+ // Local proxy down -- fall through to hosted
39
+ this._local_available = false;
40
+ logger.info(`Local proxy unavailable, falling back to hosted API at ${this.hosted_url}`);
41
+ }
42
+ // Fallback: hosted API
43
+ if (this.api_key) {
44
+ const result = await this._verify_hosted(prompt, response, model_id);
45
+ if (result.error !== "hosted_unavailable") {
46
+ return result;
47
+ }
48
+ // Hosted also failed -- try local once more in case it came back
49
+ this._local_available = true; // Reset local availability for next call
50
+ }
51
+ // No hosted API key and local is down
52
+ if (!this.api_key) {
53
+ logger.warn("Local proxy unavailable and no ARKHEIA_API_KEY set for hosted fallback");
54
+ return _unavailable("no_detection_available");
55
+ }
56
+ return _unavailable("all_detection_paths_failed");
57
+ }
58
+ async _verify_local(prompt, response, model_id, session_id) {
59
+ /** POST /detect/verify on local Enterprise Proxy. */
60
+ const payload = {
61
+ prompt: prompt,
62
+ response: response,
63
+ model_id: model_id,
64
+ };
65
+ if (session_id) {
66
+ payload["session_id"] = session_id;
67
+ }
68
+ const controller = new AbortController();
69
+ const timeoutId = setTimeout(() => controller.abort(), this.timeout);
70
+ try {
71
+ const resp = await fetch(`${this.base_url}/detect/verify`, {
72
+ method: "POST",
73
+ headers: {
74
+ "Content-Type": "application/json",
75
+ },
76
+ body: JSON.stringify(payload),
77
+ signal: controller.signal,
78
+ });
79
+ clearTimeout(timeoutId);
80
+ if (!resp.ok) {
81
+ logger.error(`ProxyClient: /detect/verify HTTP error: ${resp.status} ${resp.statusText}`);
82
+ return _unavailable(`proxy_http_error_${resp.status}`);
83
+ }
84
+ return await resp.json();
85
+ }
86
+ catch (e) {
87
+ clearTimeout(timeoutId);
88
+ if (e.name === 'AbortError') {
89
+ logger.warn(`ProxyClient: /detect/verify timed out for model=${model_id}`);
90
+ return _unavailable("proxy_timeout");
91
+ }
92
+ else if (e.name === 'TypeError' && e.message.includes('fetch failed')) { // Common for network errors like connection refused
93
+ logger.warn(`ProxyClient: cannot connect to proxy at ${this.base_url}`);
94
+ return _unavailable("proxy_unavailable");
95
+ }
96
+ else {
97
+ logger.error(`ProxyClient: /detect/verify unexpected error: ${e.message}`);
98
+ return _unavailable("proxy_error");
99
+ }
100
+ }
101
+ }
102
+ async _verify_hosted(prompt, response, model_id) {
103
+ /** POST /v1/detect on hosted Arkheia API (arkheia-proxy-production.up.railway.app). */
104
+ const payload = {
105
+ model: model_id,
106
+ response: response,
107
+ prompt: prompt,
108
+ };
109
+ const headers = { "X-Arkheia-Key": this.api_key || "" };
110
+ const controller = new AbortController();
111
+ const timeoutId = setTimeout(() => controller.abort(), this.timeout);
112
+ try {
113
+ const resp = await fetch(`${this.hosted_url}/v1/detect`, {
114
+ method: "POST",
115
+ headers: headers,
116
+ body: JSON.stringify(payload),
117
+ signal: controller.signal,
118
+ });
119
+ clearTimeout(timeoutId);
120
+ if (!resp.ok) {
121
+ const status = resp.status;
122
+ if (status === 401) {
123
+ logger.error("ProxyClient: hosted API rejected API key (401)");
124
+ return _unavailable("hosted_auth_failed");
125
+ }
126
+ if (status === 429) {
127
+ logger.warn("ProxyClient: hosted API rate/quota limit (429)");
128
+ return _unavailable("hosted_quota_exceeded");
129
+ }
130
+ logger.error(`ProxyClient: hosted /v1/detect HTTP error: ${status} ${resp.statusText}`);
131
+ return _unavailable(`hosted_http_error_${status}`);
132
+ }
133
+ const data = await resp.json();
134
+ // Map hosted response format to local format
135
+ return {
136
+ risk_level: data.risk || "UNKNOWN",
137
+ confidence: data.confidence || 0.0,
138
+ features_triggered: data.features_triggered || [],
139
+ detection_id: data.detection_id,
140
+ detection_method: data.detection_method,
141
+ evidence_depth_limited: data.evidence_depth_limited ?? true,
142
+ source: "hosted",
143
+ };
144
+ }
145
+ catch (e) {
146
+ clearTimeout(timeoutId);
147
+ if (e.name === 'AbortError') {
148
+ logger.warn(`ProxyClient: hosted /v1/detect timed out for model=${model_id}`);
149
+ return _unavailable("hosted_timeout");
150
+ }
151
+ else if (e.name === 'TypeError' && e.message.includes('fetch failed')) {
152
+ logger.warn(`ProxyClient: cannot connect to hosted API at ${this.hosted_url}`);
153
+ return _unavailable("hosted_unavailable");
154
+ }
155
+ else {
156
+ logger.error(`ProxyClient: hosted /v1/detect unexpected error: ${e.message}`);
157
+ return _unavailable("hosted_error");
158
+ }
159
+ }
160
+ }
161
+ async get_audit_log(session_id, limit = 50) {
162
+ /**
163
+ * GET /audit/log
164
+ *
165
+ * Returns audit log dict. Never raises -- returns empty log on any error.
166
+ * Note: audit log is only available from local proxy, not hosted API.
167
+ */
168
+ const params = new url_1.URLSearchParams({ limit: String(Math.min(limit, 500)) });
169
+ if (session_id) {
170
+ params.append("session_id", session_id);
171
+ }
172
+ const controller = new AbortController();
173
+ const timeoutId = setTimeout(() => controller.abort(), this.timeout);
174
+ try {
175
+ const resp = await fetch(`${this.base_url}/audit/log?${params.toString()}`, {
176
+ method: "GET",
177
+ signal: controller.signal,
178
+ });
179
+ clearTimeout(timeoutId);
180
+ if (!resp.ok) {
181
+ logger.error(`ProxyClient: /audit/log HTTP error: ${resp.status} ${resp.statusText}`);
182
+ return _empty_log(`proxy_http_error_${resp.status}`);
183
+ }
184
+ return await resp.json();
185
+ }
186
+ catch (e) {
187
+ clearTimeout(timeoutId);
188
+ if (e.name === 'AbortError') {
189
+ logger.warn("ProxyClient: /audit/log timed out");
190
+ return _empty_log("proxy_timeout");
191
+ }
192
+ else if (e.name === 'TypeError' && e.message.includes('fetch failed')) {
193
+ logger.warn(`ProxyClient: cannot connect to proxy at ${this.base_url}`);
194
+ return _empty_log("proxy_unavailable");
195
+ }
196
+ else {
197
+ logger.error(`ProxyClient: /audit/log unexpected error: ${e.message}`);
198
+ return _empty_log("proxy_error");
199
+ }
200
+ }
201
+ }
202
+ }
203
+ exports.ProxyClient = ProxyClient;
204
+ function _unavailable(error) {
205
+ /** Standard UNKNOWN response when detection is unreachable. */
206
+ return {
207
+ risk_level: "UNKNOWN",
208
+ confidence: 0.0,
209
+ features_triggered: [],
210
+ error: error,
211
+ };
212
+ }
213
+ function _empty_log(error) {
214
+ return {
215
+ events: [],
216
+ summary: { LOW: 0, MEDIUM: 0, HIGH: 0, UNKNOWN: 0 },
217
+ error: error,
218
+ };
219
+ }
@@ -0,0 +1,90 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.PolicyViolation = exports.REGISTRY = exports.Permission = void 0;
4
+ exports.check = check;
5
+ var Permission;
6
+ (function (Permission) {
7
+ Permission["READ"] = "read";
8
+ Permission["EXECUTE"] = "execute";
9
+ Permission["WRITE"] = "write";
10
+ Permission["DEPLOY"] = "deploy";
11
+ })(Permission || (exports.Permission = Permission = {}));
12
+ // ---------------------------------------------------------------------------
13
+ // The allowlist
14
+ // ---------------------------------------------------------------------------
15
+ exports.REGISTRY = {
16
+ arkheia_verify: {
17
+ name: "arkheia_verify",
18
+ permissions: [Permission.READ],
19
+ network_egress: true,
20
+ description: "Screen an AI response for fabrication risk",
21
+ },
22
+ arkheia_audit_log: {
23
+ name: "arkheia_audit_log",
24
+ permissions: [Permission.READ],
25
+ network_egress: false,
26
+ description: "Retrieve structured audit evidence",
27
+ },
28
+ run_grok: {
29
+ name: "run_grok",
30
+ permissions: [Permission.READ, Permission.EXECUTE],
31
+ network_egress: true,
32
+ description: "Call xAI Grok API and screen response through Arkheia",
33
+ },
34
+ run_gemini: {
35
+ name: "run_gemini",
36
+ permissions: [Permission.READ, Permission.EXECUTE],
37
+ network_egress: true,
38
+ description: "Call Google Gemini API and screen response through Arkheia",
39
+ },
40
+ run_together: {
41
+ name: "run_together",
42
+ permissions: [Permission.READ, Permission.EXECUTE],
43
+ network_egress: true,
44
+ description: "Call Together AI API and screen response through Arkheia",
45
+ },
46
+ run_ollama: {
47
+ name: "run_ollama",
48
+ permissions: [Permission.READ, Permission.EXECUTE],
49
+ network_egress: false,
50
+ description: "Call local Ollama model and screen response through Arkheia",
51
+ },
52
+ memory_store: {
53
+ name: "memory_store",
54
+ permissions: [Permission.READ, Permission.WRITE],
55
+ network_egress: false,
56
+ description: "Store an entity and observations in the persistent knowledge graph",
57
+ },
58
+ memory_retrieve: {
59
+ name: "memory_retrieve",
60
+ permissions: [Permission.READ],
61
+ network_egress: false,
62
+ description: "Retrieve entities and their observations from the knowledge graph",
63
+ },
64
+ memory_relate: {
65
+ name: "memory_relate",
66
+ permissions: [Permission.READ, Permission.WRITE],
67
+ network_egress: false,
68
+ description: "Store a named relationship between two entities in the knowledge graph",
69
+ },
70
+ };
71
+ // ---------------------------------------------------------------------------
72
+ // Policy gate
73
+ // ---------------------------------------------------------------------------
74
+ class PolicyViolation extends Error {
75
+ tool_name;
76
+ reason;
77
+ constructor(tool_name, reason) {
78
+ super(`Policy violation for '${tool_name}': ${reason}`);
79
+ this.tool_name = tool_name;
80
+ this.reason = reason;
81
+ }
82
+ }
83
+ exports.PolicyViolation = PolicyViolation;
84
+ function check(tool_name) {
85
+ const policy = exports.REGISTRY[tool_name];
86
+ if (!policy) {
87
+ throw new PolicyViolation(tool_name, `not in allowlist — default deny. Known tools: ${Object.keys(exports.REGISTRY).sort().join(", ")}`);
88
+ }
89
+ return policy;
90
+ }