@anyshift/mcp-proxy 0.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,90 @@
1
+ import { spawn } from 'child_process';
2
+ import { existsSync } from 'fs';
3
+ import path from 'path';
4
+ import { validatePathWithinAllowedDirs } from '../utils/pathValidation.js';
5
+ // Default timeout for JQ execution
6
+ const DEFAULT_TIMEOUT_MS = 30000;
7
+ /**
8
+ * Execute a JQ query on a JSON file
9
+ * @param config - JQ configuration
10
+ * @param jqQuery - The JQ query to execute
11
+ * @param filePath - Absolute path to the JSON file
12
+ * @returns Promise with the query result
13
+ */
14
+ export async function executeJqQuery(config, jqQuery, filePath) {
15
+ // Input validation
16
+ if (!jqQuery || !filePath) {
17
+ throw new Error('jq_query and file_path are required');
18
+ }
19
+ // Sanitize jq query to prevent environment variable access
20
+ const dangerousPatterns = [
21
+ /\$ENV/i, // $ENV variable access
22
+ /env\./i, // env.VARIABLE access
23
+ /@env/i, // @env function
24
+ /\.env\[/i, // .env["VARIABLE"] access
25
+ /getenv/i, // getenv function
26
+ /\$__loc__/i, // location info that might leak paths
27
+ /input_filename/i, // input filename access
28
+ ];
29
+ const isDangerous = dangerousPatterns.some((pattern) => pattern.test(jqQuery));
30
+ if (isDangerous) {
31
+ throw new Error('The jq query contains patterns that could access environment variables or system information. Please use a different query.');
32
+ }
33
+ // Validate file path
34
+ if (!path.isAbsolute(filePath)) {
35
+ throw new Error(`File path must be an absolute path starting with "/": ${filePath}`);
36
+ }
37
+ if (!existsSync(filePath)) {
38
+ throw new Error(`File not found: ${filePath}`);
39
+ }
40
+ // Validate file extension
41
+ if (!filePath.toLowerCase().endsWith('.json')) {
42
+ throw new Error(`Only JSON files (.json) are supported for jq processing: ${filePath}`);
43
+ }
44
+ // Validate path is within allowed directories
45
+ validatePathWithinAllowedDirs(filePath, config.allowedPaths);
46
+ // Execute jq query
47
+ const timeoutMs = config.timeoutMs ?? DEFAULT_TIMEOUT_MS;
48
+ return new Promise((resolve, reject) => {
49
+ const jqProcess = spawn('jq', [jqQuery, filePath], {
50
+ stdio: ['pipe', 'pipe', 'pipe'],
51
+ timeout: timeoutMs,
52
+ });
53
+ let stdout = '';
54
+ let stderr = '';
55
+ jqProcess.stdout.on('data', (data) => {
56
+ stdout += data.toString();
57
+ });
58
+ jqProcess.stderr.on('data', (data) => {
59
+ stderr += data.toString();
60
+ });
61
+ jqProcess.on('close', (code) => {
62
+ if (code === 0) {
63
+ // Success - return clean response directly to AI
64
+ const responseText = stdout.trim();
65
+ resolve({
66
+ content: [
67
+ {
68
+ type: 'text',
69
+ text: responseText,
70
+ },
71
+ ],
72
+ });
73
+ }
74
+ else {
75
+ // Error
76
+ reject(new Error(`jq command failed with exit code ${code}: ${stderr.trim()}`));
77
+ }
78
+ });
79
+ jqProcess.on('error', (error) => {
80
+ reject(new Error(`Failed to execute jq command: ${error.message}`));
81
+ });
82
+ // Handle timeout
83
+ setTimeout(() => {
84
+ if (!jqProcess.killed) {
85
+ jqProcess.kill('SIGTERM');
86
+ reject(new Error(`jq command timed out after ${timeoutMs}ms`));
87
+ }
88
+ }, timeoutMs);
89
+ });
90
+ }
@@ -0,0 +1,51 @@
1
+ import { JqConfig } from './types.js';
2
+ /**
3
+ * Create a JQ tool instance with the given configuration
4
+ * @param config - JQ configuration
5
+ * @returns Object with handler and toolDefinition
6
+ */
7
+ export declare function createJqTool(config: JqConfig): {
8
+ /**
9
+ * Tool definition for MCP server registration
10
+ */
11
+ toolDefinition: {
12
+ name: string;
13
+ description: string;
14
+ inputSchema: {
15
+ type: string;
16
+ properties: {
17
+ jq_query: {
18
+ type: string;
19
+ description: string;
20
+ };
21
+ file_path: {
22
+ type: string;
23
+ description: string;
24
+ };
25
+ description: {
26
+ type: string;
27
+ description: string;
28
+ };
29
+ };
30
+ required: string[];
31
+ };
32
+ };
33
+ /**
34
+ * Handler for JQ query execution requests
35
+ * @param request - MCP request containing jq_query and file_path
36
+ * @returns Promise with the query result
37
+ */
38
+ handler: (request: {
39
+ params: {
40
+ arguments: Record<string, unknown>;
41
+ };
42
+ }) => Promise<{
43
+ content: Array<{
44
+ type: "text";
45
+ text: string;
46
+ }>;
47
+ }>;
48
+ };
49
+ export type { JqConfig } from './types.js';
50
+ export { ExecuteJqQuerySchema, JQ_TOOL_DEFINITION } from './tool.js';
51
+ export { executeJqQuery } from './handler.js';
@@ -0,0 +1,26 @@
1
+ import { executeJqQuery } from './handler.js';
2
+ import { ExecuteJqQuerySchema, JQ_TOOL_DEFINITION } from './tool.js';
3
+ /**
4
+ * Create a JQ tool instance with the given configuration
5
+ * @param config - JQ configuration
6
+ * @returns Object with handler and toolDefinition
7
+ */
8
+ export function createJqTool(config) {
9
+ return {
10
+ /**
11
+ * Tool definition for MCP server registration
12
+ */
13
+ toolDefinition: JQ_TOOL_DEFINITION,
14
+ /**
15
+ * Handler for JQ query execution requests
16
+ * @param request - MCP request containing jq_query and file_path
17
+ * @returns Promise with the query result
18
+ */
19
+ handler: async (request) => {
20
+ const { jq_query, file_path } = ExecuteJqQuerySchema.parse(request.params.arguments);
21
+ return executeJqQuery(config, jq_query, file_path);
22
+ },
23
+ };
24
+ }
25
+ export { ExecuteJqQuerySchema, JQ_TOOL_DEFINITION } from './tool.js';
26
+ export { executeJqQuery } from './handler.js';
@@ -0,0 +1,43 @@
1
+ import { z } from 'zod';
2
+ /**
3
+ * Zod schema for JQ query execution
4
+ */
5
+ export declare const ExecuteJqQuerySchema: z.ZodObject<{
6
+ jq_query: z.ZodString;
7
+ file_path: z.ZodString;
8
+ description: z.ZodOptional<z.ZodString>;
9
+ }, "strip", z.ZodTypeAny, {
10
+ jq_query: string;
11
+ file_path: string;
12
+ description?: string | undefined;
13
+ }, {
14
+ jq_query: string;
15
+ file_path: string;
16
+ description?: string | undefined;
17
+ }>;
18
+ /**
19
+ * Tool definition for JQ query execution with enhanced prompts
20
+ * This includes schema-first development hints for better LLM usage
21
+ */
22
+ export declare const JQ_TOOL_DEFINITION: {
23
+ name: string;
24
+ description: string;
25
+ inputSchema: {
26
+ type: string;
27
+ properties: {
28
+ jq_query: {
29
+ type: string;
30
+ description: string;
31
+ };
32
+ file_path: {
33
+ type: string;
34
+ description: string;
35
+ };
36
+ description: {
37
+ type: string;
38
+ description: string;
39
+ };
40
+ };
41
+ required: string[];
42
+ };
43
+ };
@@ -0,0 +1,106 @@
1
+ import { z } from 'zod';
2
+ /**
3
+ * Zod schema for JQ query execution
4
+ */
5
+ export const ExecuteJqQuerySchema = z.object({
6
+ jq_query: z
7
+ .string()
8
+ .describe('The jq query to execute on the JSON file. Query will be sanitized to prevent environment variable access.'),
9
+ file_path: z
10
+ .string()
11
+ .describe('Absolute path starting with "/" pointing to the JSON file to process. Must be a valid, existing file with .json extension. The file will be validated for existence and readability before processing.'),
12
+ description: z
13
+ .string()
14
+ .optional()
15
+ .describe('Optional description; a short explanation of the purpose of the query'),
16
+ });
17
+ /**
18
+ * Tool definition for JQ query execution with enhanced prompts
19
+ * This includes schema-first development hints for better LLM usage
20
+ */
21
+ export const JQ_TOOL_DEFINITION = {
22
+ name: 'execute_jq_query',
23
+ description: 'Execute a jq query on a JSON file with comprehensive debugging and retry strategies.' +
24
+ 'This tool processes JSON files using jq operations for data extraction, transformation, and filtering. ' +
25
+ '\n\n⚠️ **CRITICAL SYNTAX RULE**: The jq_query parameter is a STRING. Use PLAIN quotes like .["0"] - DO NOT ESCAPE them as .[\"0\"] or .[\\"0\\"]. The MCP framework handles escaping automatically.' +
26
+ '\n\n📋 **SCHEMA-FIRST WORKFLOW**: READ the schema from the file write response. For nullable fields use: select(.field != null) or .field // "default"' +
27
+ '\n\nCRITICAL: When queries fail or return null/empty results, DO NOT abandon the jq approach immediately. Instead, follow the debugging workflow below.' +
28
+ '\n\n## JQ SYNTAX REFERENCE (Fix Common Errors):' +
29
+ '\n**QUOTE SYNTAX (MOST COMMON ERROR):**' +
30
+ '\n- The jq_query is passed as a STRING to the jq binary' +
31
+ '\n- Use PLAIN quotes: .["key"] NOT .[\"key\"] or .[\\"key\\"]' +
32
+ '\n- DO NOT escape quotes - the parameter is already a string' +
33
+ '\n- ✅ CORRECT: .["0"], .["field"], .field' +
34
+ '\n- ❌ WRONG: .[\"0\"], .[\\"field\\"], .\\\"field\\\"' +
35
+ '\n\n**ARRAY SLICING (Not Shell Commands):**' +
36
+ '\n- First N items: .[:5] NOT head -5 or head(5)' +
37
+ '\n- Last N items: .[-5:] NOT tail -5' +
38
+ '\n- Skip first N: .[5:] NOT tail +6' +
39
+ '\n- With limit: limit(10; .) NOT head(10)' +
40
+ '\n- ✅ CORRECT: .[:10], limit(20; .), .[-5:]' +
41
+ '\n- ❌ WRONG: head -10, head(20), tail -5' +
42
+ '\n\n**COMMON FUNCTION MISTAKES:**' +
43
+ '\n- ❌ head, tail, wc, cut, grep, awk, sed → These are SHELL commands, not jq!' +
44
+ '\n- ✅ .[:N] → Take first N (array slice)' +
45
+ '\n- ✅ limit(N; .) → Limit to N items' +
46
+ '\n- ✅ length → Count items' +
47
+ '\n- ✅ sort_by(.field) → Sort by field' +
48
+ '\n- ✅ group_by(.field) → Group by field' +
49
+ '\n\n**STRING OPERATIONS:**' +
50
+ '\n- Regex: test("pattern") NOT test(\'pattern\')' +
51
+ '\n- Contains: contains("text") or test("text")' +
52
+ '\n- Split: split(",")' +
53
+ '\n- Join: join(",")' +
54
+ '\n\n**OBJECT CONSTRUCTION:** Arithmetic needs extra parens: {count: ((.items | length) + 1)} or use variables' +
55
+ '\n**TYPE SAFETY:** Check schema types. Error "Cannot index X with Y" = wrong type, use: type == "object"' +
56
+ '\n\n## DEBUGGING WORKFLOW (Use when queries fail or return unexpected results):' +
57
+ '\n1. **Structure Inspection**: Start with `keys`, `type`, `length` to understand the data structure' +
58
+ '\n2. **Single Record Access**: Test `.["0"]`, `.["1"]` to access specific items (for object with numeric string keys)' +
59
+ '\n3. **Field Exploration**: Check `.["0"].Values`, `.["0"].Keys` to understand nested structures' +
60
+ '\n4. **Build Incrementally**: Start with simple queries, add complexity step by step' +
61
+ '\n5. **Test Without Filters**: Remove `select()` clauses to see if data exists' +
62
+ '\n\n## CYPHER RESULT STRUCTURES (Common patterns from Neo4j/graph queries):' +
63
+ '\n- **Objects with numeric string keys**: {"0": {...}, "1": {...}, "2": {...}} - Use `.["0"]`, NOT `.[0]`' +
64
+ '\n- **Values/Keys arrays**: Each result has `.Values` and `.Keys` arrays with corresponding data' +
65
+ '\n- **Null handling**: Many Values arrays contain null - always check for null before processing' +
66
+ '\n\n## OBJECT vs ARRAY ACCESS PATTERNS:' +
67
+ '\n✅ **For objects with numeric keys**: `.["0"].Values`, `to_entries[] | .value`, `.[keys[0]]`' +
68
+ '\n❌ **Wrong**: `.[0].Values` (treats object as array - will fail)' +
69
+ '\n✅ **Iteration over objects**: `to_entries[] | .value | ...` or `.[] | ...`' +
70
+ '\n✅ **Safe null checking**: `select(.Values[0] != null)` or `select(.Values // [] | length > 0)`' +
71
+ '\n\n## RETRY STRATEGIES (Try multiple approaches):' +
72
+ '\n1. **Different iteration methods**: `.[]`, `to_entries[] | .value`, `.[keys[]]`' +
73
+ '\n2. **Incremental filtering**: Start with no filters, add conditions one by one' +
74
+ '\n3. **Alternative null handling**: Use `// empty`, `select(. != null)`, or `try ... catch`' +
75
+ '\n4. **Simplified queries**: Break complex queries into smaller, testable parts' +
76
+ '\n\n## COMPREHENSIVE EXAMPLES:' +
77
+ '\n**Debugging sequence for Cypher results**:' +
78
+ '\n- `keys` → ["0", "1", "2", ...] (shows object structure)' +
79
+ '\n- `.["0"]` → {"Values": [...], "Keys": [...]} (shows single result format)' +
80
+ '\n- `.["0"].Values` → [value1, value2, null, ...] (shows actual data)' +
81
+ '\n- `to_entries[] | .value | select(.Values[0] != null)` → final query' +
82
+ '\n\n**Common transformations**:' +
83
+ '\n- Extract non-null records: `to_entries[] | .value | select(.Values[0] != null)`' +
84
+ '\n- Build objects from Values/Keys: `.["0"] | {(.Keys[0]): .Values[0], (.Keys[1]): .Values[1]}`' +
85
+ '\n- Count results: `to_entries | length` or `keys | length`' +
86
+ '\n- Filter by position: `to_entries[] | select(.key | tonumber < 5) | .value`' +
87
+ '\n\n**IMPORTANT**: If a query returns null/empty, try simpler versions first to verify data exists before assuming the approach is wrong.',
88
+ inputSchema: {
89
+ type: 'object',
90
+ properties: {
91
+ jq_query: {
92
+ type: 'string',
93
+ description: 'The jq query to execute on the JSON file. Query will be sanitized to prevent environment variable access.',
94
+ },
95
+ file_path: {
96
+ type: 'string',
97
+ description: 'Absolute path starting with "/" pointing to the JSON file to process. Must be a valid, existing file with .json extension. The file will be validated for existence and readability before processing.',
98
+ },
99
+ description: {
100
+ type: 'string',
101
+ description: 'Optional description; a short explanation of the purpose of the query',
102
+ },
103
+ },
104
+ required: ['jq_query', 'file_path'],
105
+ },
106
+ };
@@ -0,0 +1,4 @@
1
+ /**
2
+ * Re-export JqConfig from the main types module
3
+ */
4
+ export type { JqConfig } from '../types/index.js';
@@ -0,0 +1 @@
1
+ export {};
@@ -0,0 +1,8 @@
1
+ /**
2
+ * Truncation module for MCP response size limiting
3
+ *
4
+ * Provides token-based truncation to prevent context overflow in LLM responses.
5
+ * Supports configurable limits, optional detailed logging, and helper functions.
6
+ */
7
+ export { truncateResponseIfNeeded, estimateTokens, wouldBeTruncated, } from './truncate.js';
8
+ export type { TruncationConfig } from './types.js';
@@ -0,0 +1,7 @@
1
+ /**
2
+ * Truncation module for MCP response size limiting
3
+ *
4
+ * Provides token-based truncation to prevent context overflow in LLM responses.
5
+ * Supports configurable limits, optional detailed logging, and helper functions.
6
+ */
7
+ export { truncateResponseIfNeeded, estimateTokens, wouldBeTruncated, } from './truncate.js';
@@ -0,0 +1,28 @@
1
+ import { TruncationConfig } from './types.js';
2
+ /**
3
+ * Estimate token count using chars/token ratio
4
+ * Uses a simple approximation where 1 token ≈ 4 characters
5
+ *
6
+ * @param text - The text to estimate tokens for
7
+ * @param charsPerToken - Characters per token ratio (default: 4)
8
+ * @returns Estimated token count
9
+ */
10
+ export declare function estimateTokens(text: string, charsPerToken?: number): number;
11
+ /**
12
+ * Check if content would be truncated without actually truncating
13
+ *
14
+ * @param content - The content to check
15
+ * @param maxTokens - Maximum token limit
16
+ * @param charsPerToken - Characters per token ratio (default: 4)
17
+ * @returns True if content exceeds token limit
18
+ */
19
+ export declare function wouldBeTruncated(content: string, maxTokens: number, charsPerToken?: number): boolean;
20
+ /**
21
+ * Truncate response content if it exceeds token limit
22
+ * Optionally logs detailed metrics to stderr for monitoring
23
+ *
24
+ * @param config - Truncation configuration
25
+ * @param content - The content to potentially truncate
26
+ * @returns Original content if under limit, or truncated content with notice
27
+ */
28
+ export declare function truncateResponseIfNeeded(config: TruncationConfig, content: string): string;
@@ -0,0 +1,80 @@
1
+ const DEFAULT_CHARS_PER_TOKEN = 4;
2
+ /**
3
+ * Estimate token count using chars/token ratio
4
+ * Uses a simple approximation where 1 token ≈ 4 characters
5
+ *
6
+ * @param text - The text to estimate tokens for
7
+ * @param charsPerToken - Characters per token ratio (default: 4)
8
+ * @returns Estimated token count
9
+ */
10
+ export function estimateTokens(text, charsPerToken = DEFAULT_CHARS_PER_TOKEN) {
11
+ return Math.ceil(text.length / charsPerToken);
12
+ }
13
+ /**
14
+ * Check if content would be truncated without actually truncating
15
+ *
16
+ * @param content - The content to check
17
+ * @param maxTokens - Maximum token limit
18
+ * @param charsPerToken - Characters per token ratio (default: 4)
19
+ * @returns True if content exceeds token limit
20
+ */
21
+ export function wouldBeTruncated(content, maxTokens, charsPerToken = DEFAULT_CHARS_PER_TOKEN) {
22
+ return estimateTokens(content, charsPerToken) > maxTokens;
23
+ }
24
+ /**
25
+ * Truncate response content if it exceeds token limit
26
+ * Optionally logs detailed metrics to stderr for monitoring
27
+ *
28
+ * @param config - Truncation configuration
29
+ * @param content - The content to potentially truncate
30
+ * @returns Original content if under limit, or truncated content with notice
31
+ */
32
+ export function truncateResponseIfNeeded(config, content) {
33
+ const charsPerToken = config.charsPerToken || DEFAULT_CHARS_PER_TOKEN;
34
+ const estimatedTokens = estimateTokens(content, charsPerToken);
35
+ const contentLength = content.length;
36
+ const maxChars = config.maxTokens * charsPerToken;
37
+ // Optional detailed logging to stderr (doesn't interfere with stdio protocol)
38
+ if (config.enableLogging) {
39
+ process.stderr.write(JSON.stringify({
40
+ event: 'truncation_check',
41
+ content_length: contentLength,
42
+ estimated_tokens: estimatedTokens,
43
+ max_token_limit: config.maxTokens,
44
+ max_chars: maxChars,
45
+ will_truncate: estimatedTokens > config.maxTokens,
46
+ }) + '\n');
47
+ }
48
+ // No truncation needed
49
+ if (estimatedTokens <= config.maxTokens) {
50
+ if (config.enableLogging) {
51
+ process.stderr.write(JSON.stringify({
52
+ event: 'no_truncation_needed',
53
+ estimated_tokens: estimatedTokens,
54
+ limit: config.maxTokens,
55
+ }) + '\n');
56
+ }
57
+ return content;
58
+ }
59
+ // Truncate to max characters
60
+ const truncated = content.substring(0, maxChars);
61
+ const truncatedTokens = estimateTokens(truncated, charsPerToken);
62
+ if (config.enableLogging) {
63
+ process.stderr.write(JSON.stringify({
64
+ event: 'content_truncated',
65
+ original_length: contentLength,
66
+ original_tokens: estimatedTokens,
67
+ truncated_length: truncated.length,
68
+ truncated_tokens: truncatedTokens,
69
+ limit: config.maxTokens,
70
+ }) + '\n');
71
+ }
72
+ const messagePrefix = config.messagePrefix || 'RESPONSE TRUNCATED';
73
+ return `=== ${messagePrefix} ===
74
+ Estimated tokens: ${estimatedTokens} (limit: ${config.maxTokens})
75
+ Response truncated to prevent context overflow.
76
+ Please refine your query to be more specific and fetch less data.
77
+ === END TRUNCATION NOTICE ===
78
+
79
+ ${truncated}`;
80
+ }
@@ -0,0 +1,26 @@
1
+ /**
2
+ * Configuration for response truncation
3
+ */
4
+ export interface TruncationConfig {
5
+ /**
6
+ * Maximum number of tokens allowed in response
7
+ * Common values: 10000 (Datadog), 15000 (Anyshift)
8
+ */
9
+ maxTokens: number;
10
+ /**
11
+ * Enable detailed JSON logging to stderr
12
+ * Useful for debugging and monitoring truncation behavior
13
+ * Default: false
14
+ */
15
+ enableLogging?: boolean;
16
+ /**
17
+ * Custom prefix for truncation notice message
18
+ * Default: "RESPONSE TRUNCATED"
19
+ */
20
+ messagePrefix?: string;
21
+ /**
22
+ * Characters per token ratio for estimation
23
+ * Default: 4 (standard approximation)
24
+ */
25
+ charsPerToken?: number;
26
+ }
@@ -0,0 +1 @@
1
+ export {};
@@ -0,0 +1,52 @@
1
+ /**
2
+ * Configuration for the file writer module
3
+ */
4
+ export interface FileWriterConfig {
5
+ /** Whether file writing is enabled */
6
+ enabled: boolean;
7
+ /** Directory where files should be written */
8
+ outputPath: string;
9
+ /** Minimum character count to trigger file writing (default: 1000) */
10
+ minCharsForWrite?: number;
11
+ /** Custom abbreviations for tool names in filenames */
12
+ toolAbbreviations?: Record<string, string>;
13
+ }
14
+ /**
15
+ * Configuration for the JQ tool
16
+ */
17
+ export interface JqConfig {
18
+ /** Paths where JQ is allowed to read files */
19
+ allowedPaths: string[];
20
+ /** Timeout for JQ query execution in milliseconds (default: 30000) */
21
+ timeoutMs?: number;
22
+ }
23
+ /**
24
+ * Result from file writer operations
25
+ */
26
+ export interface FileWriterResult {
27
+ content: Array<{
28
+ type: string;
29
+ text: string;
30
+ }>;
31
+ }
32
+ /**
33
+ * Schema analysis result for a JSON structure
34
+ */
35
+ export interface JsonSchema {
36
+ type: string;
37
+ properties?: Record<string, unknown>;
38
+ items?: unknown;
39
+ length?: number;
40
+ _hasNulls?: boolean;
41
+ _keysAreNumeric?: boolean;
42
+ _accessPattern?: string;
43
+ }
44
+ /**
45
+ * Nullable fields extracted from schema
46
+ */
47
+ export interface NullableFields {
48
+ /** Fields that are always null */
49
+ alwaysNull: string[];
50
+ /** Fields that can be null (mixed types) */
51
+ nullable: string[];
52
+ }
@@ -0,0 +1 @@
1
+ export {};
@@ -0,0 +1,8 @@
1
+ /**
2
+ * Generate LLM-friendly compact filename
3
+ * @param toolName - Name of the tool that generated the data
4
+ * @param args - Arguments passed to the tool
5
+ * @param toolAbbreviations - Optional custom abbreviations for tool names
6
+ * @returns Compact filename like "1697834567123_met_qry_a3b4c5.json"
7
+ */
8
+ export declare const generateCompactFilename: (toolName: string, args: Record<string, unknown>, toolAbbreviations?: Record<string, string>) => string;
@@ -0,0 +1,42 @@
1
+ import crypto from 'crypto';
2
+ /**
3
+ * Generate compact timestamp using epoch + milliseconds
4
+ */
5
+ const generateCompactTimestamp = () => {
6
+ const now = new Date();
7
+ const epoch = Math.floor(now.getTime() / 1000);
8
+ const ms = now.getMilliseconds().toString().padStart(3, '0');
9
+ return `${epoch}${ms}`;
10
+ };
11
+ /**
12
+ * Generate short hash from arguments for collision resistance
13
+ */
14
+ const hashArgs = (args) => {
15
+ const argsString = Object.entries(args)
16
+ .filter(([key, value]) =>
17
+ // Filter out sensitive or irrelevant keys
18
+ !['api_key', 'app_key', 'token', 'api_token'].includes(key.toLowerCase()) &&
19
+ value !== undefined)
20
+ .map(([key, value]) => `${key}=${value}`)
21
+ .join('_');
22
+ if (!argsString)
23
+ return 'noargs';
24
+ return crypto
25
+ .createHash('md5')
26
+ .update(argsString)
27
+ .digest('hex')
28
+ .substring(0, 6);
29
+ };
30
+ /**
31
+ * Generate LLM-friendly compact filename
32
+ * @param toolName - Name of the tool that generated the data
33
+ * @param args - Arguments passed to the tool
34
+ * @param toolAbbreviations - Optional custom abbreviations for tool names
35
+ * @returns Compact filename like "1697834567123_met_qry_a3b4c5.json"
36
+ */
37
+ export const generateCompactFilename = (toolName, args, toolAbbreviations) => {
38
+ const timestamp = generateCompactTimestamp();
39
+ const toolAbbrev = toolAbbreviations?.[toolName] || toolName.substring(0, 6);
40
+ const argsHash = hashArgs(args);
41
+ return `${timestamp}_${toolAbbrev}_${argsHash}.json`;
42
+ };
@@ -0,0 +1,8 @@
1
+ /**
2
+ * Validate that a file path is within allowed directories
3
+ * @param filePath - The file path to validate
4
+ * @param allowedPaths - Array of allowed directory paths
5
+ * @returns The validated real path (with symlinks resolved)
6
+ * @throws Error if path is invalid, doesn't exist, or is outside allowed directories
7
+ */
8
+ export declare const validatePathWithinAllowedDirs: (filePath: string, allowedPaths: string[]) => string;
@@ -0,0 +1,42 @@
1
+ import { existsSync, realpathSync } from 'fs';
2
+ import path from 'path';
3
+ /**
4
+ * Validate that a file path is within allowed directories
5
+ * @param filePath - The file path to validate
6
+ * @param allowedPaths - Array of allowed directory paths
7
+ * @returns The validated real path (with symlinks resolved)
8
+ * @throws Error if path is invalid, doesn't exist, or is outside allowed directories
9
+ */
10
+ export const validatePathWithinAllowedDirs = (filePath, allowedPaths) => {
11
+ // Require absolute paths for security and clarity
12
+ if (!path.isAbsolute(filePath)) {
13
+ throw new Error(`Absolute path required. Received: ${filePath}. Paths must start with "/" to prevent ambiguity.`);
14
+ }
15
+ // Resolve the absolute path (for normalization and symlink handling)
16
+ const absolutePath = path.resolve(filePath);
17
+ // Check if file exists before resolving real path (to handle symlinks)
18
+ if (!existsSync(absolutePath)) {
19
+ throw new Error(`File does not exist: ${filePath}`);
20
+ }
21
+ // Get the real path (resolves symlinks)
22
+ let realPath;
23
+ try {
24
+ realPath = realpathSync(absolutePath);
25
+ }
26
+ catch (error) {
27
+ throw new Error(`Cannot resolve path: ${filePath}`);
28
+ }
29
+ // Check if path is within any of the allowed directories
30
+ for (const allowedPath of allowedPaths) {
31
+ // Resolve and normalize the allowed path
32
+ const allowedPathReal = realpathSync(path.resolve(allowedPath));
33
+ // Check if the real path is within this allowed directory
34
+ if (realPath.startsWith(allowedPathReal + path.sep) || realPath === allowedPathReal) {
35
+ console.error(`[validatePathWithinAllowedDirs] Path allowed (within ${allowedPathReal}): ${realPath}`);
36
+ return realPath;
37
+ }
38
+ }
39
+ // Path is not within any allowed directories
40
+ const allowedPathsList = allowedPaths.join(', ');
41
+ throw new Error(`Access denied: File path must be within allowed directories (${allowedPathsList}). Attempted path: ${realPath}`);
42
+ };