@aris-mcp/server 1.0.2 → 2.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/dist/index.js +72 -643
  2. package/package.json +1 -2
package/dist/index.js CHANGED
@@ -5,500 +5,14 @@ import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
5
5
  import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
6
6
  import { z as z2 } from "zod";
7
7
 
8
- // ../core/dist/redact.js
9
- var SENSITIVE_KEYS = /* @__PURE__ */ new Set([
10
- "ssn",
11
- "password",
12
- "api_key",
13
- "email",
14
- "credit",
15
- "secret",
16
- "token",
17
- "authorization"
18
- ]);
19
- var EMAIL_REGEX = /[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}/g;
20
- var SSN_REGEX = /\b\d{3}-\d{2}-\d{4}\b/g;
21
- var PHONE_REGEX = /\b(\+?1[-.\s]?)?\(?\d{3}\)?[-.\s]?\d{3}[-.\s]?\d{4}\b/g;
22
- function isSensitiveKey(key) {
23
- const lower = key.toLowerCase();
24
- for (const sensitive of SENSITIVE_KEYS) {
25
- if (lower.includes(sensitive)) {
26
- return true;
27
- }
28
- }
29
- return false;
30
- }
31
- function redactStringValue(value) {
32
- let result = value;
33
- result = result.replace(EMAIL_REGEX, "[REDACTED_EMAIL]");
34
- result = result.replace(SSN_REGEX, "[REDACTED_SSN]");
35
- result = result.replace(PHONE_REGEX, "[REDACTED_PHONE]");
36
- return result;
37
- }
38
- function walkAndRedact(obj) {
39
- if (obj === null || obj === void 0) {
40
- return obj;
41
- }
42
- if (Array.isArray(obj)) {
43
- return obj.map((item) => walkAndRedact(item));
44
- }
45
- if (typeof obj === "object") {
46
- const result = {};
47
- for (const [key, value] of Object.entries(obj)) {
48
- if (isSensitiveKey(key)) {
49
- result[key] = "[REDACTED]";
50
- } else if (typeof value === "string") {
51
- result[key] = redactStringValue(value);
52
- } else {
53
- result[key] = walkAndRedact(value);
54
- }
55
- }
56
- return result;
57
- }
58
- if (typeof obj === "string") {
59
- return redactStringValue(obj);
60
- }
61
- return obj;
62
- }
63
- function redactPII(input) {
64
- try {
65
- const parsed = JSON.parse(input);
66
- const redacted = walkAndRedact(parsed);
67
- return JSON.stringify(redacted);
68
- } catch {
69
- return input;
70
- }
71
- }
72
-
73
- // ../core/dist/compress.js
74
- var STOP_WORDS = /* @__PURE__ */ new Set([
75
- "the",
76
- "is",
77
- "at",
78
- "of",
79
- "on",
80
- "and",
81
- "a",
82
- "to",
83
- "in",
84
- "for",
85
- "it",
86
- "this",
87
- "that",
88
- "with",
89
- "from",
90
- "by",
91
- "an",
92
- "be",
93
- "as",
94
- "are",
95
- "was",
96
- "were",
97
- "been",
98
- "has",
99
- "have",
100
- "had",
101
- "do",
102
- "does",
103
- "did",
104
- "will",
105
- "would",
106
- "could",
107
- "should",
108
- "can",
109
- "may",
110
- "might"
111
- ]);
112
- function estimateTokens(text) {
113
- return Math.ceil(text.length / 4);
114
- }
115
- function extractKeywords(query) {
116
- const words = query.toLowerCase().replace(/[^\w\s]/g, "").split(/\s+/).filter((w) => w.length > 0 && !STOP_WORDS.has(w));
117
- return new Set(words);
118
- }
119
- function splitSentences(text) {
120
- return text.split(/(?<=[.?!])\s+/).map((s) => s.trim()).filter((s) => s.length > 0);
121
- }
122
- function scoreSentence(sentence, keywords) {
123
- const words = sentence.toLowerCase().replace(/[^\w\s]/g, "").split(/\s+/);
124
- let score = 0;
125
- for (const word of words) {
126
- if (keywords.has(word)) {
127
- score++;
128
- }
129
- }
130
- return score;
131
- }
132
- function compressContext(query, context) {
133
- const originalTokens = estimateTokens(context);
134
- const keywords = extractKeywords(query);
135
- let compressed;
136
- if (keywords.size === 0) {
137
- compressed = context.length > 500 ? context.slice(0, 500) + "..." : context;
138
- } else {
139
- const sentences = splitSentences(context);
140
- const relevant = sentences.filter((sentence) => scoreSentence(sentence, keywords) > 0);
141
- if (relevant.length === 0) {
142
- compressed = context.length > 500 ? context.slice(0, 500) + "..." : context;
143
- } else {
144
- compressed = relevant.join(" ");
145
- }
146
- }
147
- const compressedTokens = estimateTokens(compressed);
148
- const savedTokens = originalTokens - compressedTokens;
149
- const savingsPercent = originalTokens > 0 ? Math.round(savedTokens / originalTokens * 100) : 0;
150
- return {
151
- original: context,
152
- compressed,
153
- originalTokens,
154
- compressedTokens,
155
- savedTokens,
156
- savingsPercent
157
- };
158
- }
159
-
160
- // ../core/dist/pipeline.js
161
- var MAX_PROMPT_LENGTH = 1e5;
162
- var MAX_CONTEXT_LENGTH = 5e5;
163
- var DEFAULT_TIMEOUT_MS = 3e4;
164
- function sanitizeContext(context) {
165
- let sanitized = context.replace(/<[^>]*>/g, "");
166
- const injectionPatterns = [
167
- /\[SYSTEM\]/gi,
168
- /\[INST\]/gi,
169
- /\[\/INST\]/gi,
170
- /<<SYS>>/gi,
171
- /<\/SYS>/gi,
172
- /\[IMPORTANT\]/gi,
173
- /\{system\}/gi,
174
- /###\s*(?:system|instruction|admin)\s*:/gi,
175
- /(?:^|\n)system\s*:\s*/gi,
176
- /BEGIN SYSTEM PROMPT/gi,
177
- /END SYSTEM PROMPT/gi,
178
- /IGNORE PREVIOUS INSTRUCTIONS/gi,
179
- /IGNORE ALL PREVIOUS/gi,
180
- /YOU ARE NOW/gi,
181
- /NEW INSTRUCTIONS:/gi,
182
- /OVERRIDE:/gi
183
- ];
184
- for (const pattern of injectionPatterns) {
185
- sanitized = sanitized.replace(pattern, "");
186
- }
187
- return sanitized;
188
- }
189
- var ArisPipeline = class {
190
- llm;
191
- cache;
192
- enableRedaction;
193
- enableCompression;
194
- debug;
195
- constructor(config) {
196
- this.llm = config.llmProvider;
197
- this.cache = config.cache;
198
- this.enableRedaction = config.enableRedaction ?? true;
199
- this.enableCompression = config.enableCompression ?? true;
200
- this.debug = config.debug ?? false;
201
- }
202
- async process(query, context, options) {
203
- if (query.length > MAX_PROMPT_LENGTH) {
204
- throw new Error(`Prompt exceeds maximum length of ${MAX_PROMPT_LENGTH} characters (got ${query.length})`);
205
- }
206
- if (context && context.length > MAX_CONTEXT_LENGTH) {
207
- throw new Error(`Context exceeds maximum length of ${MAX_CONTEXT_LENGTH} characters (got ${context.length})`);
208
- }
209
- const sanitizedContext = context ? sanitizeContext(context) : "";
210
- const originalTokens = estimateTokens(query + sanitizedContext);
211
- const externalSignal = options?.signal;
212
- const timeoutController = new AbortController();
213
- let timeoutId;
214
- if (!externalSignal) {
215
- timeoutId = setTimeout(() => timeoutController.abort(), DEFAULT_TIMEOUT_MS);
216
- }
217
- const signal = externalSignal ?? timeoutController.signal;
218
- try {
219
- if (signal.aborted) {
220
- throw new Error("Operation aborted");
221
- }
222
- const cacheKey = this.cache ? `aris:pipeline:${this.cache.hashRequest(query + sanitizedContext)}` : "";
223
- if (this.cache && cacheKey) {
224
- const cached = await this.cache.get(cacheKey);
225
- if (cached) {
226
- try {
227
- const parsed = JSON.parse(cached);
228
- this.log("Cache hit");
229
- return { ...parsed, cached: true };
230
- } catch {
231
- }
232
- }
233
- }
234
- let processedQuery = query;
235
- let processedContext = sanitizedContext;
236
- let redacted = false;
237
- if (this.enableRedaction) {
238
- processedQuery = redactPII(processedQuery);
239
- if (processedContext) {
240
- processedContext = redactPII(processedContext);
241
- }
242
- redacted = true;
243
- this.log("PII redaction applied");
244
- }
245
- let compressed = false;
246
- if (this.enableCompression && processedContext) {
247
- const compression = compressContext(processedQuery, processedContext);
248
- processedContext = compression.compressed;
249
- compressed = true;
250
- this.log(`Compression: ${compression.savedTokens} tokens saved (${compression.savingsPercent}%)`);
251
- }
252
- const prompt = processedContext ? `Context:
253
- ${processedContext}
254
-
255
- Query: ${processedQuery}` : processedQuery;
256
- const processedTokens = estimateTokens(prompt);
257
- if (signal.aborted) {
258
- throw new Error("Operation aborted");
259
- }
260
- this.log(`Sending to ${this.llm.name}`);
261
- const llmResponse = await this.llm.send(prompt, { signal });
262
- const savedTokens = originalTokens - processedTokens;
263
- const savingsPercent = originalTokens > 0 ? Math.round(savedTokens / originalTokens * 100) : 0;
264
- const result = {
265
- response: llmResponse.content,
266
- model: llmResponse.model,
267
- originalTokens,
268
- processedTokens,
269
- savedTokens,
270
- savingsPercent,
271
- cached: false,
272
- redacted,
273
- compressed,
274
- usage: llmResponse.usage
275
- };
276
- if (this.cache && cacheKey) {
277
- await this.cache.set(cacheKey, JSON.stringify(result));
278
- this.log("Response cached");
279
- }
280
- return result;
281
- } finally {
282
- if (timeoutId !== void 0) {
283
- clearTimeout(timeoutId);
284
- }
285
- }
286
- }
287
- log(message) {
288
- if (this.debug) {
289
- process.stderr.write(`[aris:pipeline] ${message}
290
- `);
291
- }
292
- }
293
- };
294
-
295
- // ../core/dist/providers/openrouter.js
296
- var DEFAULT_MODEL = "minimax/minimax-m2.5";
297
- var DEFAULT_BASE_URL = "https://openrouter.ai/api/v1";
298
- var OpenRouterProvider = class {
299
- name = "openrouter";
300
- apiKey;
301
- model;
302
- baseUrl;
303
- constructor(config) {
304
- this.apiKey = config.apiKey;
305
- this.model = config.model ?? DEFAULT_MODEL;
306
- this.baseUrl = config.baseUrl ?? DEFAULT_BASE_URL;
307
- }
308
- async send(prompt, options) {
309
- const model = options?.model ?? this.model;
310
- const response = await fetch(`${this.baseUrl}/chat/completions`, {
311
- method: "POST",
312
- headers: {
313
- Authorization: `Bearer ${this.apiKey}`,
314
- "Content-Type": "application/json",
315
- "HTTP-Referer": "https://aris.dev",
316
- "X-Title": "Aris MCP"
317
- },
318
- body: JSON.stringify({
319
- model,
320
- messages: [{ role: "user", content: prompt }],
321
- max_tokens: options?.maxTokens ?? 4096,
322
- temperature: options?.temperature ?? 0.7
323
- }),
324
- signal: options?.signal
325
- });
326
- if (!response.ok) {
327
- const errorText = await response.text();
328
- throw new Error(`OpenRouter API error (${response.status}): ${errorText}`);
329
- }
330
- const data = await response.json();
331
- const content = data.choices?.[0]?.message?.content ?? "";
332
- return {
333
- content,
334
- model: data.model ?? model,
335
- usage: data.usage ? {
336
- promptTokens: data.usage.prompt_tokens,
337
- completionTokens: data.usage.completion_tokens,
338
- totalTokens: data.usage.total_tokens
339
- } : void 0
340
- };
341
- }
342
- async healthCheck() {
343
- try {
344
- const response = await fetch(`${this.baseUrl}/models`, {
345
- headers: { Authorization: `Bearer ${this.apiKey}` }
346
- });
347
- return response.ok;
348
- } catch {
349
- return false;
350
- }
351
- }
352
- };
353
-
354
- // ../core/dist/providers/ollama.js
355
- var DEFAULT_MODEL2 = "llama3";
356
- var DEFAULT_BASE_URL2 = "http://localhost:11434";
357
- var OllamaProvider = class {
358
- name = "ollama";
359
- model;
360
- baseUrl;
361
- constructor(config = {}) {
362
- this.model = config.model ?? DEFAULT_MODEL2;
363
- this.baseUrl = config.baseUrl ?? DEFAULT_BASE_URL2;
364
- }
365
- async send(prompt, options) {
366
- const model = options?.model ?? this.model;
367
- const response = await fetch(`${this.baseUrl}/api/chat`, {
368
- method: "POST",
369
- headers: { "Content-Type": "application/json" },
370
- body: JSON.stringify({
371
- model,
372
- messages: [{ role: "user", content: prompt }],
373
- stream: false,
374
- options: {
375
- num_predict: options?.maxTokens ?? 4096,
376
- temperature: options?.temperature ?? 0.7
377
- }
378
- }),
379
- signal: options?.signal
380
- });
381
- if (!response.ok) {
382
- const errorText = await response.text();
383
- throw new Error(`Ollama API error (${response.status}): ${errorText}`);
384
- }
385
- const data = await response.json();
386
- const promptTokens = data.prompt_eval_count ?? 0;
387
- const completionTokens = data.eval_count ?? 0;
388
- return {
389
- content: data.message?.content ?? "",
390
- model: data.model ?? model,
391
- usage: {
392
- promptTokens,
393
- completionTokens,
394
- totalTokens: promptTokens + completionTokens
395
- }
396
- };
397
- }
398
- async healthCheck() {
399
- try {
400
- const response = await fetch(`${this.baseUrl}/api/tags`);
401
- return response.ok;
402
- } catch {
403
- return false;
404
- }
405
- }
406
- };
407
-
408
- // ../core/dist/cache.js
409
- import { createHash } from "crypto";
410
- import { Redis } from "ioredis";
411
- var ArisCache = class {
412
- redis;
413
- constructor(redisUrl) {
414
- this.redis = new Redis(redisUrl ?? "redis://localhost:6379", {
415
- lazyConnect: true,
416
- retryStrategy: (times) => {
417
- if (times > 3)
418
- return null;
419
- return Math.min(times * 200, 2e3);
420
- }
421
- });
422
- this.redis.on("error", (err) => {
423
- process.stderr.write(`[ArisCache] Redis connection error: ${err.message}
424
- `);
425
- });
426
- this.redis.connect().catch((err) => {
427
- process.stderr.write(`[ArisCache] Failed to connect to Redis: ${err.message}
428
- `);
429
- });
430
- }
431
- async get(key) {
432
- try {
433
- return await this.redis.get(key);
434
- } catch (err) {
435
- console.warn(`[ArisCache] get failed: ${err.message}`);
436
- return null;
437
- }
438
- }
439
- async set(key, value, ttlSeconds = 600) {
440
- try {
441
- await this.redis.set(key, value, "EX", ttlSeconds);
442
- } catch (err) {
443
- console.warn(`[ArisCache] set failed: ${err.message}`);
444
- }
445
- }
446
- hashRequest(body) {
447
- return createHash("sha256").update(body).digest("hex");
448
- }
449
- async ping() {
450
- try {
451
- const result = await this.redis.ping();
452
- return result === "PONG";
453
- } catch {
454
- return false;
455
- }
456
- }
457
- async checkThrottle(maxJoules = 5e3) {
458
- try {
459
- const value = await this.redis.get("aris_system_joules");
460
- if (value === null)
461
- return false;
462
- const joules = parseFloat(value);
463
- return joules > maxJoules;
464
- } catch (err) {
465
- console.warn(`[ArisCache] checkThrottle failed: ${err.message}`);
466
- return false;
467
- }
468
- }
469
- async disconnect() {
470
- try {
471
- await this.redis.quit();
472
- } catch {
473
- }
474
- }
475
- };
476
-
477
8
  // src/config.ts
478
- function envBool(value, defaultValue) {
479
- if (value === void 0) return defaultValue;
480
- return value.toLowerCase() === "true";
481
- }
482
9
  function loadConfig() {
483
- const llmProvider = process.env.ARIS_LLM_PROVIDER ?? "openrouter";
484
- const llmApiKey = process.env.ARIS_API_KEY;
485
- const ollamaUrl = process.env.ARIS_OLLAMA_URL ?? "http://localhost:11434";
486
- if (llmProvider === "openrouter" && !llmApiKey) {
487
- throw new Error(
488
- "ARIS_API_KEY is required when ARIS_LLM_PROVIDER is 'openrouter'"
489
- );
10
+ const apiKey = process.env.ARIS_API_KEY;
11
+ if (!apiKey) {
12
+ throw new Error("ARIS_API_KEY is required");
490
13
  }
491
- return {
492
- endpoint: process.env.ARIS_ENDPOINT,
493
- llmProvider,
494
- llmApiKey,
495
- llmModel: process.env.ARIS_LLM_MODEL,
496
- ollamaUrl,
497
- redisUrl: process.env.ARIS_REDIS_URL,
498
- enableRedaction: envBool(process.env.ARIS_ENABLE_REDACTION, true),
499
- enableCompression: envBool(process.env.ARIS_ENABLE_COMPRESSION, true),
500
- debug: envBool(process.env.ARIS_DEBUG, false)
501
- };
14
+ const apiUrl = process.env.ARIS_API_URL ?? "https://energetic-light-production.up.railway.app";
15
+ return { apiKey, apiUrl };
502
16
  }
503
17
 
504
18
  // src/tools.ts
@@ -586,18 +100,6 @@ function log(message) {
586
100
  process.stderr.write(`[aris-mcp] ${message}
587
101
  `);
588
102
  }
589
- function createLLMProvider(config) {
590
- if (config.llmProvider === "ollama") {
591
- return new OllamaProvider({
592
- baseUrl: config.ollamaUrl,
593
- model: config.llmModel
594
- });
595
- }
596
- return new OpenRouterProvider({
597
- apiKey: config.llmApiKey,
598
- model: config.llmModel
599
- });
600
- }
601
103
  async function main() {
602
104
  let config;
603
105
  try {
@@ -606,111 +108,67 @@ async function main() {
606
108
  log(`Configuration error: ${err.message}`);
607
109
  process.exit(1);
608
110
  }
609
- log(`Provider: ${config.llmProvider}`);
610
- log(`Redaction: ${config.enableRedaction ? "enabled" : "disabled"}`);
611
- log(`Compression: ${config.enableCompression ? "enabled" : "disabled"}`);
612
- log(`Cache: ${config.redisUrl ? "enabled" : "disabled"}`);
613
- const llmProvider = createLLMProvider(config);
614
- let cache;
615
- if (config.redisUrl) {
616
- cache = new ArisCache(config.redisUrl);
617
- log("Redis cache initialized");
111
+ log(`API: ${config.apiUrl}`);
112
+ async function apiCall(path, body) {
113
+ const res = await fetch(`${config.apiUrl}${path}`, {
114
+ method: body ? "POST" : "GET",
115
+ headers: {
116
+ Authorization: `Bearer ${config.apiKey}`,
117
+ "Content-Type": "application/json"
118
+ },
119
+ body: body ? JSON.stringify(body) : void 0,
120
+ signal: AbortSignal.timeout(35e3)
121
+ });
122
+ const data = await res.json();
123
+ return { status: res.status, data };
618
124
  }
619
- const pipeline = new ArisPipeline({
620
- llmProvider,
621
- cache,
622
- enableRedaction: config.enableRedaction,
623
- enableCompression: config.enableCompression,
624
- debug: config.debug
625
- });
626
125
  const server = new McpServer(
627
- {
628
- name: "aris-mcp",
629
- version: "1.0.0"
630
- },
631
- {
632
- capabilities: {
633
- tools: {}
634
- }
635
- }
126
+ { name: "aris-mcp", version: "2.0.0" },
127
+ { capabilities: { tools: {} } }
636
128
  );
637
129
  server.tool(
638
130
  ToolName.OPTIMIZE,
639
131
  TOOL_DESCRIPTIONS[ToolName.OPTIMIZE],
640
132
  {
641
- prompt: z2.string().min(1, "Prompt must not be empty").max(1e5, "Prompt must be under 100,000 characters").describe(
642
- "The user prompt to optimize and send through the Aris pipeline. This is the main instruction or question for the LLM."
133
+ prompt: z2.string().min(1).max(1e5).describe(
134
+ "The user prompt to optimize and send through the Aris pipeline."
643
135
  ),
644
- context: z2.string().max(5e5, "Context must be under 500,000 characters").optional().describe(
645
- "Optional background context to compress before including with the prompt. Long documents, code snippets, or reference material that will be filtered to only the sentences relevant to the prompt."
136
+ context: z2.string().max(5e5).optional().describe(
137
+ "Optional background context to compress before including with the prompt."
646
138
  )
647
139
  },
648
- async ({ prompt, context }, { sendNotification }) => {
140
+ async ({ prompt, context }) => {
649
141
  try {
650
- await sendNotification({
651
- method: "notifications/progress",
652
- params: {
653
- progressToken: "aris_optimize",
654
- progress: 0,
655
- total: 100,
656
- message: "Starting Aris optimization pipeline..."
657
- }
658
- });
659
- await sendNotification({
660
- method: "notifications/progress",
661
- params: {
662
- progressToken: "aris_optimize",
663
- progress: 20,
664
- total: 100,
665
- message: "Applying PII redaction and context compression..."
666
- }
667
- });
668
- const controller = new AbortController();
669
- const timeout = setTimeout(() => controller.abort(), 3e4);
670
- let result;
671
- try {
672
- result = await pipeline.process(prompt, context, {
673
- signal: controller.signal
674
- });
675
- } finally {
676
- clearTimeout(timeout);
142
+ const { status, data } = await apiCall("/optimize", { prompt, context });
143
+ if (status !== 200) {
144
+ return {
145
+ content: [{ type: "text", text: `Pipeline error: ${data.error ?? "Unknown error"}` }],
146
+ isError: true
147
+ };
677
148
  }
678
- await sendNotification({
679
- method: "notifications/progress",
680
- params: {
681
- progressToken: "aris_optimize",
682
- progress: 100,
683
- total: 100,
684
- message: "Pipeline complete."
685
- }
686
- });
687
149
  const summary = [
688
- result.response,
150
+ data.response,
689
151
  "",
690
152
  "--- Aris Pipeline Stats ---",
691
- `Model: ${result.model}`,
692
- `Original tokens: ${result.originalTokens}`,
693
- `Processed tokens: ${result.processedTokens}`,
694
- `Tokens saved: ${result.savedTokens} (${result.savingsPercent}%)`,
695
- `Cached: ${result.cached}`,
696
- `Redacted: ${result.redacted}`,
697
- `Compressed: ${result.compressed}`
153
+ `Model: ${data.model}`,
154
+ `Original tokens: ${data.originalTokens}`,
155
+ `Processed tokens: ${data.processedTokens}`,
156
+ `Tokens saved: ${data.savedTokens} (${data.savingsPercent}%)`,
157
+ `Cached: ${data.cached}`,
158
+ `Redacted: ${data.redacted}`,
159
+ `Compressed: ${data.compressed}`
698
160
  ];
699
- if (result.usage) {
161
+ const usage = data.usage;
162
+ if (usage) {
700
163
  summary.push(
701
- `LLM usage - prompt: ${result.usage.promptTokens}, completion: ${result.usage.completionTokens}, total: ${result.usage.totalTokens}`
164
+ `LLM usage - prompt: ${usage.promptTokens}, completion: ${usage.completionTokens}, total: ${usage.totalTokens}`
702
165
  );
703
166
  }
704
- return {
705
- content: [{ type: "text", text: summary.join("\n") }]
706
- };
167
+ return { content: [{ type: "text", text: summary.join("\n") }] };
707
168
  } catch (err) {
708
169
  const message = err instanceof Error ? err.message : String(err);
709
- const errorText = message.includes("aborted") ? "Pipeline timed out after 30 seconds" : `Pipeline error: ${message}`;
710
- return {
711
- content: [{ type: "text", text: errorText }],
712
- isError: true
713
- };
170
+ const errorText = message.includes("TimeoutError") || message.includes("aborted") ? "Pipeline timed out" : message.includes("fetch") ? "Aris API unavailable" : `Pipeline error: ${message}`;
171
+ return { content: [{ type: "text", text: errorText }], isError: true };
714
172
  }
715
173
  }
716
174
  );
@@ -720,42 +178,18 @@ async function main() {
720
178
  {},
721
179
  async () => {
722
180
  try {
723
- const llmHealthy = await llmProvider.healthCheck();
724
- const cacheHealthy = cache ? await cache.ping() : null;
725
- const status = {
726
- llm: {
727
- provider: config.llmProvider,
728
- model: config.llmModel ?? "(default)",
729
- healthy: llmHealthy
730
- },
731
- cache: {
732
- enabled: !!cache,
733
- healthy: cacheHealthy,
734
- url: config.redisUrl ? "(configured)" : "(not configured)"
735
- },
736
- features: {
737
- redaction: config.enableRedaction,
738
- compression: config.enableCompression
739
- },
740
- debug: config.debug
741
- };
742
- return {
743
- content: [
744
- {
745
- type: "text",
746
- text: JSON.stringify(status, null, 2)
747
- }
748
- ]
749
- };
181
+ const { status, data } = await apiCall("/health");
182
+ if (status !== 200) {
183
+ return {
184
+ content: [{ type: "text", text: `Health check error: ${data.error ?? "Unknown error"}` }],
185
+ isError: true
186
+ };
187
+ }
188
+ return { content: [{ type: "text", text: JSON.stringify(data, null, 2) }] };
750
189
  } catch (err) {
751
190
  const message = err instanceof Error ? err.message : String(err);
752
191
  return {
753
- content: [
754
- {
755
- type: "text",
756
- text: `Health check error: ${message}`
757
- }
758
- ],
192
+ content: [{ type: "text", text: `Health check error: ${message}` }],
759
193
  isError: true
760
194
  };
761
195
  }
@@ -765,37 +199,36 @@ async function main() {
765
199
  ToolName.COMPRESS,
766
200
  TOOL_DESCRIPTIONS[ToolName.COMPRESS],
767
201
  {
768
- query: z2.string().min(1, "Query must not be empty").max(1e5, "Query must be under 100,000 characters").describe(
769
- "The query used to determine which sentences in the context are relevant. Only sentences matching keywords from this query are retained."
202
+ query: z2.string().min(1).max(1e5).describe(
203
+ "The query used to determine which sentences in the context are relevant."
770
204
  ),
771
- context: z2.string().min(1, "Context must not be empty").max(5e5, "Context must be under 500,000 characters").describe(
772
- "The context text to compress. Sentences not relevant to the query will be removed to reduce token usage."
205
+ context: z2.string().min(1).max(5e5).describe(
206
+ "The context text to compress."
773
207
  )
774
208
  },
775
209
  async ({ query, context }) => {
776
210
  try {
777
- const result = compressContext(query, context);
211
+ const { status, data } = await apiCall("/compress", { query, context });
212
+ if (status !== 200) {
213
+ return {
214
+ content: [{ type: "text", text: `Compression error: ${data.error ?? "Unknown error"}` }],
215
+ isError: true
216
+ };
217
+ }
778
218
  const summary = [
779
219
  "Compressed context:",
780
- result.compressed,
220
+ data.compressed,
781
221
  "",
782
222
  "--- Compression Stats ---",
783
- `Original tokens: ${result.originalTokens}`,
784
- `Compressed tokens: ${result.compressedTokens}`,
785
- `Tokens saved: ${result.savedTokens} (${result.savingsPercent}%)`
223
+ `Original tokens: ${data.originalTokens}`,
224
+ `Compressed tokens: ${data.compressedTokens}`,
225
+ `Tokens saved: ${data.savedTokens} (${data.savingsPercent}%)`
786
226
  ];
787
- return {
788
- content: [{ type: "text", text: summary.join("\n") }]
789
- };
227
+ return { content: [{ type: "text", text: summary.join("\n") }] };
790
228
  } catch (err) {
791
229
  const message = err instanceof Error ? err.message : String(err);
792
230
  return {
793
- content: [
794
- {
795
- type: "text",
796
- text: `Compression error: ${message}`
797
- }
798
- ],
231
+ content: [{ type: "text", text: `Compression error: ${message}` }],
799
232
  isError: true
800
233
  };
801
234
  }
@@ -807,12 +240,8 @@ async function main() {
807
240
  const shutdown = async () => {
808
241
  log("Shutting down...");
809
242
  try {
810
- if (cache) {
811
- await cache.disconnect();
812
- }
813
243
  await server.close();
814
- } catch (err) {
815
- log(`Error during shutdown: ${err.message}`);
244
+ } catch {
816
245
  }
817
246
  process.exit(0);
818
247
  };
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@aris-mcp/server",
3
- "version": "1.0.2",
3
+ "version": "2.0.0",
4
4
  "type": "module",
5
5
  "bin": {
6
6
  "aris-mcp": "dist/index.js"
@@ -31,7 +31,6 @@
31
31
  },
32
32
  "dependencies": {
33
33
  "@modelcontextprotocol/sdk": "^1.12.1",
34
- "ioredis": "^5.6.1",
35
34
  "zod": "^3.24.4"
36
35
  },
37
36
  "devDependencies": {