@anyshift/mcp-proxy 0.3.0 → 0.3.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/__tests__/unit/dynatrace.test.d.ts +1 -0
- package/dist/__tests__/unit/dynatrace.test.js +204 -0
- package/dist/fileWriter/dynatrace.d.ts +43 -0
- package/dist/fileWriter/dynatrace.js +173 -0
- package/dist/fileWriter/index.d.ts +4 -4
- package/dist/fileWriter/index.js +1 -1
- package/dist/fileWriter/types.d.ts +1 -1
- package/dist/fileWriter/writer.d.ts +3 -3
- package/dist/fileWriter/writer.js +145 -39
- package/dist/index.js +134 -56
- package/dist/jq/tool.js +2 -2
- package/dist/types/index.d.ts +18 -0
- package/dist/utils/filename.d.ts +8 -0
- package/dist/utils/filename.js +14 -4
- package/package.json +2 -2
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {};
|
|
@@ -0,0 +1,204 @@
|
|
|
1
|
+
import { describe, it, expect } from '@jest/globals';
|
|
2
|
+
import { parseDynatraceDqlResponse, isDynatraceDqlTool, } from '../../fileWriter/dynatrace.js';
|
|
3
|
+
// Type guards for test assertions
|
|
4
|
+
function isDqlSuccess(result) {
|
|
5
|
+
return (typeof result === 'object' &&
|
|
6
|
+
result !== null &&
|
|
7
|
+
'isDynatraceDql' in result &&
|
|
8
|
+
result.isDynatraceDql === true &&
|
|
9
|
+
'isError' in result &&
|
|
10
|
+
result.isError === false);
|
|
11
|
+
}
|
|
12
|
+
function isDqlError(result) {
|
|
13
|
+
return (typeof result === 'object' &&
|
|
14
|
+
result !== null &&
|
|
15
|
+
'isDynatraceDql' in result &&
|
|
16
|
+
result.isDynatraceDql === true &&
|
|
17
|
+
'isError' in result &&
|
|
18
|
+
result.isError === true);
|
|
19
|
+
}
|
|
20
|
+
describe('Dynatrace DQL Parser', () => {
|
|
21
|
+
describe('isDynatraceDqlTool', () => {
|
|
22
|
+
it('should return true for execute_dql tool', () => {
|
|
23
|
+
expect(isDynatraceDqlTool('execute_dql')).toBe(true);
|
|
24
|
+
});
|
|
25
|
+
it('should return false for other tools', () => {
|
|
26
|
+
expect(isDynatraceDqlTool('execute_jq_query')).toBe(false);
|
|
27
|
+
expect(isDynatraceDqlTool('get_metrics')).toBe(false);
|
|
28
|
+
expect(isDynatraceDqlTool('list_entities')).toBe(false);
|
|
29
|
+
});
|
|
30
|
+
});
|
|
31
|
+
describe('parseDynatraceDqlResponse - Success Responses', () => {
|
|
32
|
+
const successResponse = `📊 **DQL Query Results**
|
|
33
|
+
|
|
34
|
+
- **Scanned Records:** 3,988,787
|
|
35
|
+
- **Scanned Bytes:** 10.00 GB (Session total: 10.00 GB / 100 GB budget, 10.0% used)
|
|
36
|
+
💡 **Moderate Data Usage:** This query scanned 10.00 GB of data.
|
|
37
|
+
- **⚠️ Sampling Used:** Yes (results may be approximate)
|
|
38
|
+
|
|
39
|
+
📋 **Query Results**: (50 records):
|
|
40
|
+
|
|
41
|
+
\`\`\`json
|
|
42
|
+
[
|
|
43
|
+
{
|
|
44
|
+
"faas.name": "compassdigital-order-v1-root",
|
|
45
|
+
"logCount": "861021"
|
|
46
|
+
},
|
|
47
|
+
{
|
|
48
|
+
"faas.name": "cdl-shoppingcart-v1-root",
|
|
49
|
+
"logCount": "421993"
|
|
50
|
+
}
|
|
51
|
+
]
|
|
52
|
+
\`\`\``;
|
|
53
|
+
it('should parse a success response correctly', () => {
|
|
54
|
+
const result = parseDynatraceDqlResponse(successResponse);
|
|
55
|
+
expect(result.isDynatraceDql).toBe(true);
|
|
56
|
+
expect(isDqlSuccess(result)).toBe(true);
|
|
57
|
+
if (isDqlSuccess(result)) {
|
|
58
|
+
expect(result.metadata.scannedRecords).toBe(3988787);
|
|
59
|
+
expect(result.metadata.scannedBytes).toBe('10.00 GB');
|
|
60
|
+
expect(result.metadata.sessionTotal).toBe('10.00 GB');
|
|
61
|
+
expect(result.metadata.sessionBudget).toBe('100 GB');
|
|
62
|
+
expect(result.metadata.budgetUsedPercent).toBe('10.0%');
|
|
63
|
+
expect(result.metadata.samplingUsed).toBe(true);
|
|
64
|
+
expect(result.metadata.recordCount).toBe(50);
|
|
65
|
+
// dataUsageNote is optional, may or may not be present depending on formatting
|
|
66
|
+
expect(result.data).toBeInstanceOf(Array);
|
|
67
|
+
expect(result.data.length).toBe(2);
|
|
68
|
+
expect(result.data[0]['faas.name']).toBe('compassdigital-order-v1-root');
|
|
69
|
+
}
|
|
70
|
+
});
|
|
71
|
+
it('should parse response without sampling info', () => {
|
|
72
|
+
const responseNoSampling = `📊 **DQL Query Results**
|
|
73
|
+
|
|
74
|
+
- **Scanned Records:** 1,000
|
|
75
|
+
- **Scanned Bytes:** 100 MB (Session total: 100 MB / 100 GB budget, 0.1% used)
|
|
76
|
+
|
|
77
|
+
📋 **Query Results**: (10 records):
|
|
78
|
+
|
|
79
|
+
\`\`\`json
|
|
80
|
+
[{"id": 1}]
|
|
81
|
+
\`\`\``;
|
|
82
|
+
const result = parseDynatraceDqlResponse(responseNoSampling);
|
|
83
|
+
expect(result.isDynatraceDql).toBe(true);
|
|
84
|
+
if (isDqlSuccess(result)) {
|
|
85
|
+
expect(result.metadata.samplingUsed).toBeUndefined();
|
|
86
|
+
expect(result.metadata.recordCount).toBe(10);
|
|
87
|
+
}
|
|
88
|
+
});
|
|
89
|
+
it('should handle single record count', () => {
|
|
90
|
+
const responseSingleRecord = `📊 **DQL Query Results**
|
|
91
|
+
|
|
92
|
+
- **Scanned Records:** 100
|
|
93
|
+
- **Scanned Bytes:** 1 MB
|
|
94
|
+
|
|
95
|
+
📋 **Query Results**: (1 record):
|
|
96
|
+
|
|
97
|
+
\`\`\`json
|
|
98
|
+
{"id": 1}
|
|
99
|
+
\`\`\``;
|
|
100
|
+
const result = parseDynatraceDqlResponse(responseSingleRecord);
|
|
101
|
+
expect(result.isDynatraceDql).toBe(true);
|
|
102
|
+
if (isDqlSuccess(result)) {
|
|
103
|
+
expect(result.metadata.recordCount).toBe(1);
|
|
104
|
+
}
|
|
105
|
+
});
|
|
106
|
+
});
|
|
107
|
+
describe('parseDynatraceDqlResponse - Error Responses', () => {
|
|
108
|
+
const errorResponse = `Client Request Error: PARAMETER_MUST_NOT_BE_AN_AGGREGATION. exceptionType: "DQL-SYNTAX-ERROR". syntaxErrorPosition: {"start":{"column":113,"index":112,"line":1},"end":{"column":119,"index":118,"line":1}}. errorType: "PARAMETER_MUST_NOT_BE_AN_AGGREGATION". errorMessage: "Aggregations aren't allowed here, but \`count()\` is an aggregation function.". arguments: ["count()"]. queryString: "fetch logs, scanLimitGBytes: 10, samplingRatio: 100, from: now()-7d | summarize count(), by: {faas.name} | sort count() desc | limit 50". errorMessageFormatSpecifierTypes: ["INPUT_QUERY_PART"]. errorMessageFormat: "Aggregations aren't allowed here, but \`%1$s\` is an aggregation function.". queryId: "9be6a579-545c-4157-957f-5d8d39129a78" with HTTP status: 400. (body: {"error":{"message":"PARAMETER_MUST_NOT_BE_AN_AGGREGATION","details":{"exceptionType":"DQL-SYNTAX-ERROR"}}})`;
|
|
109
|
+
it('should parse an error response correctly', () => {
|
|
110
|
+
const result = parseDynatraceDqlResponse(errorResponse);
|
|
111
|
+
expect(result.isDynatraceDql).toBe(true);
|
|
112
|
+
expect(isDqlError(result)).toBe(true);
|
|
113
|
+
if (isDqlError(result)) {
|
|
114
|
+
expect(result.errorMessage).toContain("Aggregations aren't allowed here");
|
|
115
|
+
expect(result.errorMessage).toContain('count()');
|
|
116
|
+
expect(result.errorMessage).toContain('Query:');
|
|
117
|
+
expect(result.errorMessage).toContain('HTTP 400');
|
|
118
|
+
}
|
|
119
|
+
});
|
|
120
|
+
it('should extract error type when no errorMessage field', () => {
|
|
121
|
+
const simpleError = `Client Request Error: INVALID_QUERY. exceptionType: "DQL-ERROR". with HTTP status: 400.`;
|
|
122
|
+
const result = parseDynatraceDqlResponse(simpleError);
|
|
123
|
+
expect(result.isDynatraceDql).toBe(true);
|
|
124
|
+
expect(isDqlError(result)).toBe(true);
|
|
125
|
+
if (isDqlError(result)) {
|
|
126
|
+
expect(result.errorMessage).toContain('INVALID_QUERY');
|
|
127
|
+
expect(result.errorMessage).toContain('HTTP 400');
|
|
128
|
+
}
|
|
129
|
+
});
|
|
130
|
+
});
|
|
131
|
+
describe('parseDynatraceDqlResponse - Non-Dynatrace Responses', () => {
|
|
132
|
+
it('should return isDynatraceDql: false for regular JSON', () => {
|
|
133
|
+
const regularJson = '{"status": "success", "data": [1, 2, 3]}';
|
|
134
|
+
const result = parseDynatraceDqlResponse(regularJson);
|
|
135
|
+
expect(result.isDynatraceDql).toBe(false);
|
|
136
|
+
});
|
|
137
|
+
it('should return isDynatraceDql: false for plain text', () => {
|
|
138
|
+
const plainText = 'This is just some plain text response';
|
|
139
|
+
const result = parseDynatraceDqlResponse(plainText);
|
|
140
|
+
expect(result.isDynatraceDql).toBe(false);
|
|
141
|
+
});
|
|
142
|
+
it('should return isDynatraceDql: false for other MCP tool outputs', () => {
|
|
143
|
+
const otherToolOutput = `Listed metrics:
|
|
144
|
+
- metric.cpu.usage
|
|
145
|
+
- metric.memory.used`;
|
|
146
|
+
const result = parseDynatraceDqlResponse(otherToolOutput);
|
|
147
|
+
expect(result.isDynatraceDql).toBe(false);
|
|
148
|
+
});
|
|
149
|
+
});
|
|
150
|
+
describe('Edge Cases', () => {
|
|
151
|
+
it('should handle malformed JSON in success response', () => {
|
|
152
|
+
const malformedJson = `📊 **DQL Query Results**
|
|
153
|
+
|
|
154
|
+
- **Scanned Records:** 100
|
|
155
|
+
|
|
156
|
+
📋 **Query Results**: (1 record):
|
|
157
|
+
|
|
158
|
+
\`\`\`json
|
|
159
|
+
{this is not valid json}
|
|
160
|
+
\`\`\``;
|
|
161
|
+
const result = parseDynatraceDqlResponse(malformedJson);
|
|
162
|
+
expect(result.isDynatraceDql).toBe(true);
|
|
163
|
+
if (isDqlSuccess(result)) {
|
|
164
|
+
// Should still parse metadata
|
|
165
|
+
expect(result.metadata.scannedRecords).toBe(100);
|
|
166
|
+
// Data should be the raw text from code block since JSON parsing failed
|
|
167
|
+
expect(result.data).toBe('{this is not valid json}');
|
|
168
|
+
}
|
|
169
|
+
});
|
|
170
|
+
it('should handle empty JSON array', () => {
|
|
171
|
+
const emptyResults = `📊 **DQL Query Results**
|
|
172
|
+
|
|
173
|
+
- **Scanned Records:** 0
|
|
174
|
+
|
|
175
|
+
📋 **Query Results**: (0 records):
|
|
176
|
+
|
|
177
|
+
\`\`\`json
|
|
178
|
+
[]
|
|
179
|
+
\`\`\``;
|
|
180
|
+
const result = parseDynatraceDqlResponse(emptyResults);
|
|
181
|
+
expect(result.isDynatraceDql).toBe(true);
|
|
182
|
+
if (isDqlSuccess(result)) {
|
|
183
|
+
expect(result.metadata.scannedRecords).toBe(0);
|
|
184
|
+
expect(result.metadata.recordCount).toBe(0);
|
|
185
|
+
expect(result.data).toEqual([]);
|
|
186
|
+
}
|
|
187
|
+
});
|
|
188
|
+
it('should handle response with no code block', () => {
|
|
189
|
+
const noCodeBlock = `📊 **DQL Query Results**
|
|
190
|
+
|
|
191
|
+
- **Scanned Records:** 100
|
|
192
|
+
|
|
193
|
+
📋 **Query Results**: (0 records):
|
|
194
|
+
|
|
195
|
+
No data found.`;
|
|
196
|
+
const result = parseDynatraceDqlResponse(noCodeBlock);
|
|
197
|
+
expect(result.isDynatraceDql).toBe(true);
|
|
198
|
+
if (isDqlSuccess(result)) {
|
|
199
|
+
expect(result.metadata.scannedRecords).toBe(100);
|
|
200
|
+
expect(result.data).toBeNull();
|
|
201
|
+
}
|
|
202
|
+
});
|
|
203
|
+
});
|
|
204
|
+
});
|
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Dynatrace DQL Response Parser
|
|
3
|
+
*
|
|
4
|
+
* Parses Dynatrace DQL query responses to extract:
|
|
5
|
+
* - Metadata (scanned records, bytes, sampling info)
|
|
6
|
+
* - Data (the actual query results)
|
|
7
|
+
* - Errors (if the query failed)
|
|
8
|
+
*/
|
|
9
|
+
export interface DynatraceDqlMetadata {
|
|
10
|
+
scannedRecords?: number;
|
|
11
|
+
scannedBytes?: string;
|
|
12
|
+
sessionTotal?: string;
|
|
13
|
+
sessionBudget?: string;
|
|
14
|
+
budgetUsedPercent?: string;
|
|
15
|
+
dataUsageNote?: string;
|
|
16
|
+
samplingUsed?: boolean;
|
|
17
|
+
recordCount?: number;
|
|
18
|
+
}
|
|
19
|
+
export interface DynatraceDqlParsedResponse {
|
|
20
|
+
isDynatraceDql: true;
|
|
21
|
+
isError: false;
|
|
22
|
+
metadata: DynatraceDqlMetadata;
|
|
23
|
+
data: unknown;
|
|
24
|
+
}
|
|
25
|
+
export interface DynatraceDqlErrorResponse {
|
|
26
|
+
isDynatraceDql: true;
|
|
27
|
+
isError: true;
|
|
28
|
+
errorMessage: string;
|
|
29
|
+
}
|
|
30
|
+
export interface NotDynatraceDqlResponse {
|
|
31
|
+
isDynatraceDql: false;
|
|
32
|
+
}
|
|
33
|
+
export type DynatraceParseResult = DynatraceDqlParsedResponse | DynatraceDqlErrorResponse | NotDynatraceDqlResponse;
|
|
34
|
+
/**
|
|
35
|
+
* Parse a Dynatrace DQL response
|
|
36
|
+
* @param text - The raw text response from Dynatrace
|
|
37
|
+
* @returns Parsed response with metadata/data or error, or indication that it's not a Dynatrace response
|
|
38
|
+
*/
|
|
39
|
+
export declare function parseDynatraceDqlResponse(text: string): DynatraceParseResult;
|
|
40
|
+
/**
|
|
41
|
+
* Check if a tool name is a Dynatrace DQL tool
|
|
42
|
+
*/
|
|
43
|
+
export declare function isDynatraceDqlTool(toolName: string): boolean;
|
|
@@ -0,0 +1,173 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Dynatrace DQL Response Parser
|
|
3
|
+
*
|
|
4
|
+
* Parses Dynatrace DQL query responses to extract:
|
|
5
|
+
* - Metadata (scanned records, bytes, sampling info)
|
|
6
|
+
* - Data (the actual query results)
|
|
7
|
+
* - Errors (if the query failed)
|
|
8
|
+
*/
|
|
9
|
+
/**
|
|
10
|
+
* Detect if a text response is a Dynatrace DQL error
|
|
11
|
+
*/
|
|
12
|
+
function isDynatraceError(text) {
|
|
13
|
+
return text.includes('Client Request Error:') && text.includes('HTTP status:');
|
|
14
|
+
}
|
|
15
|
+
/**
|
|
16
|
+
* Detect if a text response is a Dynatrace DQL success response
|
|
17
|
+
*/
|
|
18
|
+
function isDynatraceSuccess(text) {
|
|
19
|
+
return text.includes('📊 **DQL Query Results**');
|
|
20
|
+
}
|
|
21
|
+
/**
|
|
22
|
+
* Parse a Dynatrace error response - keeps more context for debugging
|
|
23
|
+
*/
|
|
24
|
+
function parseErrorResponse(text) {
|
|
25
|
+
// The error format is:
|
|
26
|
+
// Client Request Error: ERROR_TYPE. exceptionType: "...". ... errorMessage: "...". ...
|
|
27
|
+
const parts = [];
|
|
28
|
+
// Extract error type
|
|
29
|
+
const errorTypeMatch = text.match(/Client Request Error:\s*([^.]+)/);
|
|
30
|
+
if (errorTypeMatch) {
|
|
31
|
+
parts.push(errorTypeMatch[1].trim());
|
|
32
|
+
}
|
|
33
|
+
// Extract the human-readable error message
|
|
34
|
+
const errorMessageMatch = text.match(/errorMessage:\s*"([^"]+)"/);
|
|
35
|
+
if (errorMessageMatch) {
|
|
36
|
+
parts.push(errorMessageMatch[1]);
|
|
37
|
+
}
|
|
38
|
+
// Extract the query string for context
|
|
39
|
+
const queryStringMatch = text.match(/queryString:\s*"([^"]+)"/);
|
|
40
|
+
if (queryStringMatch) {
|
|
41
|
+
parts.push(`Query: ${queryStringMatch[1]}`);
|
|
42
|
+
}
|
|
43
|
+
// Extract syntax error position if available
|
|
44
|
+
const positionMatch = text.match(/syntaxErrorPosition:\s*(\{[^}]+\})/);
|
|
45
|
+
if (positionMatch) {
|
|
46
|
+
try {
|
|
47
|
+
const pos = JSON.parse(positionMatch[1]);
|
|
48
|
+
if (pos.start) {
|
|
49
|
+
parts.push(`Position: line ${pos.start.line}, column ${pos.start.column}`);
|
|
50
|
+
}
|
|
51
|
+
}
|
|
52
|
+
catch {
|
|
53
|
+
// Ignore JSON parse errors
|
|
54
|
+
}
|
|
55
|
+
}
|
|
56
|
+
// Extract HTTP status
|
|
57
|
+
const httpStatusMatch = text.match(/HTTP status:\s*(\d+)/);
|
|
58
|
+
if (httpStatusMatch) {
|
|
59
|
+
parts.push(`HTTP ${httpStatusMatch[1]}`);
|
|
60
|
+
}
|
|
61
|
+
if (parts.length > 0) {
|
|
62
|
+
return parts.join('. ');
|
|
63
|
+
}
|
|
64
|
+
// Last fallback: return a truncated version of the raw error
|
|
65
|
+
const firstLine = text.split('\n')[0];
|
|
66
|
+
if (firstLine.length > 500) {
|
|
67
|
+
return firstLine.substring(0, 500) + '...';
|
|
68
|
+
}
|
|
69
|
+
return firstLine;
|
|
70
|
+
}
|
|
71
|
+
/**
|
|
72
|
+
* Parse metadata from a Dynatrace DQL success response
|
|
73
|
+
*/
|
|
74
|
+
function parseMetadata(text) {
|
|
75
|
+
const metadata = {};
|
|
76
|
+
// Parse scanned records: "- **Scanned Records:** 3,988,787"
|
|
77
|
+
const scannedRecordsMatch = text.match(/\*\*Scanned Records:\*\*\s*([\d,]+)/);
|
|
78
|
+
if (scannedRecordsMatch) {
|
|
79
|
+
metadata.scannedRecords = parseInt(scannedRecordsMatch[1].replace(/,/g, ''), 10);
|
|
80
|
+
}
|
|
81
|
+
// Parse scanned bytes: "- **Scanned Bytes:** 10.00 GB (Session total: 10.00 GB / 100 GB budget, 10.0% used)"
|
|
82
|
+
const scannedBytesMatch = text.match(/\*\*Scanned Bytes:\*\*\s*([^\n(]+)/);
|
|
83
|
+
if (scannedBytesMatch) {
|
|
84
|
+
metadata.scannedBytes = scannedBytesMatch[1].trim();
|
|
85
|
+
}
|
|
86
|
+
// Parse session total and budget from the parenthetical
|
|
87
|
+
const sessionMatch = text.match(/Session total:\s*([^/]+)\/\s*([^,]+)\s*budget,\s*([^)%]+)%\s*used/);
|
|
88
|
+
if (sessionMatch) {
|
|
89
|
+
metadata.sessionTotal = sessionMatch[1].trim();
|
|
90
|
+
metadata.sessionBudget = sessionMatch[2].trim();
|
|
91
|
+
metadata.budgetUsedPercent = sessionMatch[3].trim() + '%';
|
|
92
|
+
}
|
|
93
|
+
// Parse data usage note: " 💡 **Moderate Data Usage:** This query scanned 10.00 GB of data."
|
|
94
|
+
const dataUsageMatch = text.match(/\s*💡\s*\*\*([^*]+)\*\*:\s*([^\n]+)/);
|
|
95
|
+
if (dataUsageMatch) {
|
|
96
|
+
metadata.dataUsageNote = `${dataUsageMatch[1]}: ${dataUsageMatch[2].trim()}`;
|
|
97
|
+
}
|
|
98
|
+
// Parse sampling info: "- **⚠️ Sampling Used:** Yes"
|
|
99
|
+
const samplingMatch = text.match(/\*\*⚠️?\s*Sampling Used:\*\*\s*(Yes|No)/i);
|
|
100
|
+
if (samplingMatch) {
|
|
101
|
+
metadata.samplingUsed = samplingMatch[1].toLowerCase() === 'yes';
|
|
102
|
+
}
|
|
103
|
+
// Parse record count: "📋 **Query Results**: (50 records):"
|
|
104
|
+
const recordCountMatch = text.match(/\*\*Query Results\*\*[^(]*\((\d+)\s*records?\)/);
|
|
105
|
+
if (recordCountMatch) {
|
|
106
|
+
metadata.recordCount = parseInt(recordCountMatch[1], 10);
|
|
107
|
+
}
|
|
108
|
+
return metadata;
|
|
109
|
+
}
|
|
110
|
+
/**
|
|
111
|
+
* Extract JSON data from a Dynatrace DQL success response
|
|
112
|
+
*/
|
|
113
|
+
function extractData(text) {
|
|
114
|
+
// The JSON is in a code block: ```json\n[...]\n```
|
|
115
|
+
const jsonBlockMatch = text.match(/```json\s*\n([\s\S]*?)\n```/);
|
|
116
|
+
if (jsonBlockMatch) {
|
|
117
|
+
try {
|
|
118
|
+
return JSON.parse(jsonBlockMatch[1]);
|
|
119
|
+
}
|
|
120
|
+
catch {
|
|
121
|
+
// If JSON parsing fails, return the raw text from the block
|
|
122
|
+
return jsonBlockMatch[1];
|
|
123
|
+
}
|
|
124
|
+
}
|
|
125
|
+
// Fallback: try to find a JSON array or object anywhere after the header
|
|
126
|
+
const jsonMatch = text.match(/(\[[\s\S]*\]|\{[\s\S]*\})/);
|
|
127
|
+
if (jsonMatch) {
|
|
128
|
+
try {
|
|
129
|
+
return JSON.parse(jsonMatch[1]);
|
|
130
|
+
}
|
|
131
|
+
catch {
|
|
132
|
+
return null;
|
|
133
|
+
}
|
|
134
|
+
}
|
|
135
|
+
return null;
|
|
136
|
+
}
|
|
137
|
+
/**
|
|
138
|
+
* Parse a Dynatrace DQL response
|
|
139
|
+
* @param text - The raw text response from Dynatrace
|
|
140
|
+
* @returns Parsed response with metadata/data or error, or indication that it's not a Dynatrace response
|
|
141
|
+
*/
|
|
142
|
+
export function parseDynatraceDqlResponse(text) {
|
|
143
|
+
// Check if it's an error response
|
|
144
|
+
if (isDynatraceError(text)) {
|
|
145
|
+
return {
|
|
146
|
+
isDynatraceDql: true,
|
|
147
|
+
isError: true,
|
|
148
|
+
errorMessage: parseErrorResponse(text),
|
|
149
|
+
};
|
|
150
|
+
}
|
|
151
|
+
// Check if it's a success response
|
|
152
|
+
if (isDynatraceSuccess(text)) {
|
|
153
|
+
const metadata = parseMetadata(text);
|
|
154
|
+
const data = extractData(text);
|
|
155
|
+
return {
|
|
156
|
+
isDynatraceDql: true,
|
|
157
|
+
isError: false,
|
|
158
|
+
metadata,
|
|
159
|
+
data,
|
|
160
|
+
};
|
|
161
|
+
}
|
|
162
|
+
// Not a Dynatrace DQL response
|
|
163
|
+
return {
|
|
164
|
+
isDynatraceDql: false,
|
|
165
|
+
};
|
|
166
|
+
}
|
|
167
|
+
/**
|
|
168
|
+
* Check if a tool name is a Dynatrace DQL tool
|
|
169
|
+
*/
|
|
170
|
+
export function isDynatraceDqlTool(toolName) {
|
|
171
|
+
// The tool that executes DQL queries
|
|
172
|
+
return toolName === 'execute_dql';
|
|
173
|
+
}
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import { FileWriterConfig,
|
|
1
|
+
import { FileWriterConfig, UnifiedToolResponse } from './types.js';
|
|
2
2
|
/**
|
|
3
3
|
* Create a file writer instance with the given configuration
|
|
4
4
|
* @param config - File writer configuration
|
|
@@ -10,9 +10,9 @@ export declare function createFileWriter(config: FileWriterConfig): {
|
|
|
10
10
|
* @param toolName - Name of the tool that generated the response
|
|
11
11
|
* @param args - Arguments passed to the tool
|
|
12
12
|
* @param responseData - The response data to potentially write to file
|
|
13
|
-
* @returns
|
|
13
|
+
* @returns UnifiedToolResponse with consistent structure
|
|
14
14
|
*/
|
|
15
|
-
handleResponse: (toolName: string, args: Record<string, unknown>, responseData: unknown) => Promise<
|
|
15
|
+
handleResponse: (toolName: string, args: Record<string, unknown>, responseData: unknown) => Promise<UnifiedToolResponse>;
|
|
16
16
|
};
|
|
17
|
-
export type { FileWriterConfig, FileWriterResult } from './types.js';
|
|
17
|
+
export type { FileWriterConfig, FileWriterResult, UnifiedToolResponse } from './types.js';
|
|
18
18
|
export { generateQueryAssistSchema } from './schema.js';
|
package/dist/fileWriter/index.js
CHANGED
|
@@ -11,7 +11,7 @@ export function createFileWriter(config) {
|
|
|
11
11
|
* @param toolName - Name of the tool that generated the response
|
|
12
12
|
* @param args - Arguments passed to the tool
|
|
13
13
|
* @param responseData - The response data to potentially write to file
|
|
14
|
-
* @returns
|
|
14
|
+
* @returns UnifiedToolResponse with consistent structure
|
|
15
15
|
*/
|
|
16
16
|
handleResponse: async (toolName, args, responseData) => {
|
|
17
17
|
return handleToolResponse(config, toolName, args, responseData);
|
|
@@ -1,10 +1,10 @@
|
|
|
1
|
-
import { FileWriterConfig,
|
|
1
|
+
import { FileWriterConfig, UnifiedToolResponse } from '../types/index.js';
|
|
2
2
|
/**
|
|
3
3
|
* Centralized response handler with file writing capability
|
|
4
4
|
* @param config - File writer configuration
|
|
5
5
|
* @param toolName - Name of the tool that generated the response
|
|
6
6
|
* @param args - Arguments passed to the tool
|
|
7
7
|
* @param responseData - The response data to potentially write to file
|
|
8
|
-
* @returns
|
|
8
|
+
* @returns UnifiedToolResponse with consistent structure
|
|
9
9
|
*/
|
|
10
|
-
export declare function handleToolResponse(config: FileWriterConfig, toolName: string, args: Record<string, unknown>, responseData: unknown): Promise<
|
|
10
|
+
export declare function handleToolResponse(config: FileWriterConfig, toolName: string, args: Record<string, unknown>, responseData: unknown): Promise<UnifiedToolResponse>;
|
|
@@ -1,7 +1,8 @@
|
|
|
1
1
|
import fs from 'fs/promises';
|
|
2
2
|
import path from 'path';
|
|
3
|
-
import {
|
|
3
|
+
import { generateToolId } from '../utils/filename.js';
|
|
4
4
|
import { generateQueryAssistSchema } from './schema.js';
|
|
5
|
+
import { parseDynatraceDqlResponse, isDynatraceDqlTool, } from './dynatrace.js';
|
|
5
6
|
// Default minimum character count to trigger file writing
|
|
6
7
|
const DEFAULT_MIN_CHARS = 1000;
|
|
7
8
|
/**
|
|
@@ -115,14 +116,23 @@ const extractContentForFile = (responseData) => {
|
|
|
115
116
|
let parsedForSchema = null;
|
|
116
117
|
if (rawText) {
|
|
117
118
|
try {
|
|
118
|
-
// Try to parse the raw text as JSON
|
|
119
|
-
let
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
119
|
+
// Try to parse the raw text directly as JSON first
|
|
120
|
+
let parsed;
|
|
121
|
+
try {
|
|
122
|
+
parsed = JSON.parse(rawText);
|
|
123
|
+
}
|
|
124
|
+
catch {
|
|
125
|
+
// If direct parsing fails, try extracting JSON from prefixed patterns
|
|
126
|
+
// like "Listed incidents: {...}" or "Queried metrics data: [...]"
|
|
127
|
+
// Only match colon at the START, before any JSON content
|
|
128
|
+
const prefixMatch = rawText.match(/^[^[{]*?:\s*(\{.*\}|\[.*\])$/s);
|
|
129
|
+
if (prefixMatch) {
|
|
130
|
+
parsed = JSON.parse(prefixMatch[1]);
|
|
131
|
+
}
|
|
132
|
+
else {
|
|
133
|
+
throw new Error('Could not parse as JSON');
|
|
134
|
+
}
|
|
124
135
|
}
|
|
125
|
-
const parsed = JSON.parse(jsonText);
|
|
126
136
|
parsedForSchema = parsed;
|
|
127
137
|
// Remove pagination-related fields before writing
|
|
128
138
|
const { pagination, has_more, next_page, previous_page, page, page_size, total_pages, ...cleanData } = parsed;
|
|
@@ -151,83 +161,179 @@ const extractContentForFile = (responseData) => {
|
|
|
151
161
|
}
|
|
152
162
|
return { contentToWrite, parsedForSchema };
|
|
153
163
|
};
|
|
164
|
+
/**
|
|
165
|
+
* Handle Dynatrace DQL response - extracts metadata and data, writes structured file
|
|
166
|
+
* @param config - File writer configuration
|
|
167
|
+
* @param tool_id - Generated tool ID
|
|
168
|
+
* @param parsedDql - Parsed DQL response with metadata and data
|
|
169
|
+
* @returns UnifiedToolResponse or null if should fall through to default handling
|
|
170
|
+
*/
|
|
171
|
+
async function handleDynatraceDqlResponse(config, tool_id, parsedDql) {
|
|
172
|
+
// Build the structured content to write
|
|
173
|
+
const structuredContent = {
|
|
174
|
+
metadata: parsedDql.metadata,
|
|
175
|
+
data: parsedDql.data,
|
|
176
|
+
};
|
|
177
|
+
const contentToWrite = JSON.stringify(structuredContent, null, 2);
|
|
178
|
+
const contentLength = contentToWrite.length;
|
|
179
|
+
// If file writing is disabled, return null to fall through to default handling
|
|
180
|
+
if (!config.enabled || !config.outputPath) {
|
|
181
|
+
return null;
|
|
182
|
+
}
|
|
183
|
+
// Check character count threshold
|
|
184
|
+
const minChars = config.minCharsForWrite ?? DEFAULT_MIN_CHARS;
|
|
185
|
+
if (contentLength < minChars) {
|
|
186
|
+
return null;
|
|
187
|
+
}
|
|
188
|
+
// Write structured file
|
|
189
|
+
try {
|
|
190
|
+
const filename = `${tool_id}.json`;
|
|
191
|
+
const filepath = path.join(config.outputPath, filename);
|
|
192
|
+
await fs.mkdir(config.outputPath, { recursive: true });
|
|
193
|
+
await fs.writeFile(filepath, contentToWrite);
|
|
194
|
+
// Generate schema from the full structured content (metadata + data)
|
|
195
|
+
const fileSchema = generateQueryAssistSchema(structuredContent, {
|
|
196
|
+
maxDepth: config.schemaMaxDepth ?? 2,
|
|
197
|
+
maxPaths: config.schemaMaxPaths ?? 20,
|
|
198
|
+
maxKeys: config.schemaMaxKeys ?? 50,
|
|
199
|
+
dataSize: contentLength,
|
|
200
|
+
});
|
|
201
|
+
return {
|
|
202
|
+
tool_id,
|
|
203
|
+
wroteToFile: true,
|
|
204
|
+
filePath: filepath,
|
|
205
|
+
fileSchema,
|
|
206
|
+
};
|
|
207
|
+
}
|
|
208
|
+
catch (error) {
|
|
209
|
+
console.error(`[handleDynatraceDqlResponse] Error writing file:`, error);
|
|
210
|
+
return {
|
|
211
|
+
tool_id,
|
|
212
|
+
wroteToFile: false,
|
|
213
|
+
outputContent: structuredContent,
|
|
214
|
+
error: `File write failed: ${error instanceof Error ? error.message : String(error)}`,
|
|
215
|
+
};
|
|
216
|
+
}
|
|
217
|
+
}
|
|
154
218
|
/**
|
|
155
219
|
* Centralized response handler with file writing capability
|
|
156
220
|
* @param config - File writer configuration
|
|
157
221
|
* @param toolName - Name of the tool that generated the response
|
|
158
222
|
* @param args - Arguments passed to the tool
|
|
159
223
|
* @param responseData - The response data to potentially write to file
|
|
160
|
-
* @returns
|
|
224
|
+
* @returns UnifiedToolResponse with consistent structure
|
|
161
225
|
*/
|
|
162
226
|
export async function handleToolResponse(config, toolName, args, responseData) {
|
|
227
|
+
// Generate tool_id for all responses
|
|
228
|
+
const tool_id = generateToolId(toolName, args, config.toolAbbreviations);
|
|
163
229
|
// Some tools should always return directly to AI (never write to file)
|
|
164
230
|
if (toolName === 'execute_jq_query' || toolName === 'get_label_schema') {
|
|
165
|
-
|
|
231
|
+
const { contentToWrite, parsedForSchema } = extractContentForFile(responseData);
|
|
232
|
+
return {
|
|
233
|
+
tool_id,
|
|
234
|
+
wroteToFile: false,
|
|
235
|
+
outputContent: parsedForSchema ?? contentToWrite,
|
|
236
|
+
};
|
|
237
|
+
}
|
|
238
|
+
// Handle Dynatrace DQL responses specially
|
|
239
|
+
if (isDynatraceDqlTool(toolName)) {
|
|
240
|
+
const rawText = extractRawText(responseData);
|
|
241
|
+
if (rawText) {
|
|
242
|
+
const parsedDql = parseDynatraceDqlResponse(rawText);
|
|
243
|
+
if (parsedDql.isDynatraceDql) {
|
|
244
|
+
// If it's an error, return error directly
|
|
245
|
+
if (parsedDql.isError) {
|
|
246
|
+
return {
|
|
247
|
+
tool_id,
|
|
248
|
+
wroteToFile: false,
|
|
249
|
+
error: parsedDql.errorMessage,
|
|
250
|
+
};
|
|
251
|
+
}
|
|
252
|
+
// Try to handle as structured Dynatrace response
|
|
253
|
+
const dqlResult = await handleDynatraceDqlResponse(config, tool_id, parsedDql);
|
|
254
|
+
if (dqlResult) {
|
|
255
|
+
return dqlResult;
|
|
256
|
+
}
|
|
257
|
+
// If dqlResult is null, fall through to return the structured content directly
|
|
258
|
+
return {
|
|
259
|
+
tool_id,
|
|
260
|
+
wroteToFile: false,
|
|
261
|
+
outputContent: {
|
|
262
|
+
metadata: parsedDql.metadata,
|
|
263
|
+
data: parsedDql.data,
|
|
264
|
+
},
|
|
265
|
+
};
|
|
266
|
+
}
|
|
267
|
+
}
|
|
268
|
+
// If not a recognized Dynatrace DQL format, fall through to default handling
|
|
166
269
|
}
|
|
167
|
-
// If there's an error, return
|
|
270
|
+
// If there's an error, return error in unified format
|
|
168
271
|
if (isErrorResponse(responseData)) {
|
|
169
272
|
const errorMessage = extractErrorMessage(responseData);
|
|
170
273
|
return {
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
text: `Error: ${errorMessage}`,
|
|
175
|
-
},
|
|
176
|
-
],
|
|
177
|
-
isError: true,
|
|
274
|
+
tool_id,
|
|
275
|
+
wroteToFile: false,
|
|
276
|
+
error: errorMessage,
|
|
178
277
|
};
|
|
179
278
|
}
|
|
180
|
-
// If file writing is disabled, just return the response
|
|
181
|
-
if (!config.enabled || !config.outputPath) {
|
|
182
|
-
return responseData;
|
|
183
|
-
}
|
|
184
279
|
// Extract the content that will be written to file
|
|
185
280
|
// This ensures we count the EXACT same content that will be written
|
|
186
281
|
const { contentToWrite, parsedForSchema } = extractContentForFile(responseData);
|
|
282
|
+
// If file writing is disabled, return content directly
|
|
283
|
+
if (!config.enabled || !config.outputPath) {
|
|
284
|
+
return {
|
|
285
|
+
tool_id,
|
|
286
|
+
wroteToFile: false,
|
|
287
|
+
outputContent: parsedForSchema ?? contentToWrite,
|
|
288
|
+
};
|
|
289
|
+
}
|
|
187
290
|
// Check character count threshold - if response is too short, return directly
|
|
188
291
|
const contentLength = contentToWrite.length;
|
|
189
292
|
const minChars = config.minCharsForWrite ?? DEFAULT_MIN_CHARS;
|
|
190
293
|
if (contentLength < minChars) {
|
|
191
|
-
return
|
|
294
|
+
return {
|
|
295
|
+
tool_id,
|
|
296
|
+
wroteToFile: false,
|
|
297
|
+
outputContent: parsedForSchema ?? contentToWrite,
|
|
298
|
+
};
|
|
192
299
|
}
|
|
193
300
|
// Success case: write to file
|
|
194
301
|
try {
|
|
195
|
-
|
|
196
|
-
const filename = generateCompactFilename(toolName, args, config.toolAbbreviations);
|
|
302
|
+
const filename = `${tool_id}.json`;
|
|
197
303
|
const filepath = path.join(config.outputPath, filename);
|
|
198
304
|
// Ensure output directory exists
|
|
199
305
|
await fs.mkdir(config.outputPath, { recursive: true });
|
|
200
306
|
// Write the exact content we counted
|
|
201
307
|
await fs.writeFile(filepath, contentToWrite);
|
|
202
308
|
// Generate query-assist schema if we have valid JSON
|
|
203
|
-
let
|
|
309
|
+
let fileSchema;
|
|
204
310
|
if (parsedForSchema) {
|
|
205
311
|
// Use the clean data (without pagination) for schema analysis
|
|
206
312
|
const { pagination, has_more, next_page, previous_page, page, page_size, total_pages, ...cleanData } = parsedForSchema;
|
|
207
313
|
// Generate compact query-assist schema using config values
|
|
208
314
|
// Pass contentLength to avoid re-stringifying large payloads
|
|
209
|
-
|
|
315
|
+
fileSchema = generateQueryAssistSchema(cleanData, {
|
|
210
316
|
maxDepth: config.schemaMaxDepth ?? 2,
|
|
211
317
|
maxPaths: config.schemaMaxPaths ?? 20,
|
|
212
318
|
maxKeys: config.schemaMaxKeys ?? 50,
|
|
213
319
|
dataSize: contentLength
|
|
214
|
-
})
|
|
320
|
+
});
|
|
215
321
|
}
|
|
216
|
-
// Count lines in the content
|
|
217
|
-
const lineCount = contentToWrite.split('\n').length;
|
|
218
|
-
// Return success message with file path, size, lines, and schema
|
|
219
322
|
return {
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
},
|
|
225
|
-
],
|
|
323
|
+
tool_id,
|
|
324
|
+
wroteToFile: true,
|
|
325
|
+
filePath: filepath,
|
|
326
|
+
fileSchema,
|
|
226
327
|
};
|
|
227
328
|
}
|
|
228
329
|
catch (error) {
|
|
229
|
-
// If file writing fails, return the
|
|
330
|
+
// If file writing fails, return the content directly with error note
|
|
230
331
|
console.error(`[handleToolResponse] Error writing file:`, error);
|
|
231
|
-
return
|
|
332
|
+
return {
|
|
333
|
+
tool_id,
|
|
334
|
+
wroteToFile: false,
|
|
335
|
+
outputContent: parsedForSchema ?? contentToWrite,
|
|
336
|
+
error: `File write failed: ${error instanceof Error ? error.message : String(error)}`,
|
|
337
|
+
};
|
|
232
338
|
}
|
|
233
339
|
}
|
package/dist/index.js
CHANGED
|
@@ -22,6 +22,36 @@ import { ListToolsRequestSchema, CallToolRequestSchema } from '@modelcontextprot
|
|
|
22
22
|
import { createJqTool } from './jq/index.js';
|
|
23
23
|
import { truncateResponseIfNeeded } from './truncation/index.js';
|
|
24
24
|
import { createFileWriter } from './fileWriter/index.js';
|
|
25
|
+
import { generateToolId } from './utils/filename.js';
|
|
26
|
+
// ============================================================================
|
|
27
|
+
// HELPER FUNCTIONS
|
|
28
|
+
// ============================================================================
|
|
29
|
+
/**
|
|
30
|
+
* Inject 'description' parameter into a tool's inputSchema
|
|
31
|
+
* This ensures LLMs explain why they're calling each tool
|
|
32
|
+
*/
|
|
33
|
+
function injectDescriptionParam(tool) {
|
|
34
|
+
// Clone the tool to avoid mutating the original
|
|
35
|
+
const modifiedTool = { ...tool };
|
|
36
|
+
if (!modifiedTool.inputSchema) {
|
|
37
|
+
modifiedTool.inputSchema = { type: 'object', properties: {} };
|
|
38
|
+
}
|
|
39
|
+
// Clone inputSchema
|
|
40
|
+
modifiedTool.inputSchema = { ...modifiedTool.inputSchema };
|
|
41
|
+
if (!modifiedTool.inputSchema.properties) {
|
|
42
|
+
modifiedTool.inputSchema.properties = {};
|
|
43
|
+
}
|
|
44
|
+
// Clone properties
|
|
45
|
+
modifiedTool.inputSchema.properties = { ...modifiedTool.inputSchema.properties };
|
|
46
|
+
// Only add if not already present
|
|
47
|
+
if (!modifiedTool.inputSchema.properties.description) {
|
|
48
|
+
modifiedTool.inputSchema.properties.description = {
|
|
49
|
+
type: 'string',
|
|
50
|
+
description: 'Brief explanation of why you are calling this tool and what you expect to learn/achieve'
|
|
51
|
+
};
|
|
52
|
+
}
|
|
53
|
+
return modifiedTool;
|
|
54
|
+
}
|
|
25
55
|
/**
|
|
26
56
|
* ENVIRONMENT VARIABLE CONTRACT
|
|
27
57
|
* =============================
|
|
@@ -267,11 +297,13 @@ async function main() {
|
|
|
267
297
|
});
|
|
268
298
|
}
|
|
269
299
|
// ------------------------------------------------------------------------
|
|
270
|
-
// 5. REGISTER ALL TOOLS (CHILD + PROXY)
|
|
300
|
+
// 5. REGISTER ALL TOOLS (CHILD + PROXY) WITH DESCRIPTION INJECTION
|
|
271
301
|
// ------------------------------------------------------------------------
|
|
302
|
+
// Inject 'description' parameter into all child tools
|
|
303
|
+
const enhancedChildTools = childToolsResponse.tools.map(injectDescriptionParam);
|
|
272
304
|
const allTools = [
|
|
273
|
-
...
|
|
274
|
-
...(jqTool ? [jqTool.toolDefinition] : [])
|
|
305
|
+
...enhancedChildTools,
|
|
306
|
+
...(jqTool ? [jqTool.toolDefinition] : []) // JQ tool already has description param
|
|
275
307
|
];
|
|
276
308
|
console.debug(`[mcp-proxy] Exposing ${allTools.length} tools total (${childToolsResponse.tools.length} from child${jqTool ? ' + 1 JQ' : ''})`);
|
|
277
309
|
// ------------------------------------------------------------------------
|
|
@@ -281,13 +313,16 @@ async function main() {
|
|
|
281
313
|
return { tools: allTools };
|
|
282
314
|
});
|
|
283
315
|
// ------------------------------------------------------------------------
|
|
284
|
-
// 7. HANDLE TOOL CALL REQUESTS (WITH
|
|
316
|
+
// 7. HANDLE TOOL CALL REQUESTS (WITH UNIFIED RESPONSE FORMAT)
|
|
285
317
|
// ------------------------------------------------------------------------
|
|
286
318
|
server.setRequestHandler(CallToolRequestSchema, async (request) => {
|
|
287
319
|
const toolName = request.params.name;
|
|
288
320
|
const toolArgs = request.params.arguments || {};
|
|
289
321
|
if (ENABLE_LOGGING) {
|
|
290
322
|
console.debug(`[mcp-proxy] Tool call: ${toolName}`);
|
|
323
|
+
if (toolArgs.description) {
|
|
324
|
+
console.debug(`[mcp-proxy] Description: ${toolArgs.description}`);
|
|
325
|
+
}
|
|
291
326
|
}
|
|
292
327
|
try {
|
|
293
328
|
let result;
|
|
@@ -299,73 +334,116 @@ async function main() {
|
|
|
299
334
|
result = await jqTool.handler({
|
|
300
335
|
params: { arguments: toolArgs }
|
|
301
336
|
});
|
|
337
|
+
// JQ tool returns directly, wrap in unified format
|
|
338
|
+
const tool_id = generateToolId(toolName, toolArgs, fileWriterConfig.toolAbbreviations);
|
|
339
|
+
const unifiedResponse = {
|
|
340
|
+
tool_id,
|
|
341
|
+
wroteToFile: false,
|
|
342
|
+
outputContent: result.content?.[0]?.text
|
|
343
|
+
};
|
|
344
|
+
return {
|
|
345
|
+
content: [{
|
|
346
|
+
type: 'text',
|
|
347
|
+
text: JSON.stringify(unifiedResponse, null, 2)
|
|
348
|
+
}],
|
|
349
|
+
isError: result.isError
|
|
350
|
+
};
|
|
302
351
|
}
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
});
|
|
352
|
+
// Forward all other tools to child MCP (if child exists)
|
|
353
|
+
if (!childClient) {
|
|
354
|
+
const tool_id = generateToolId(toolName, toolArgs, fileWriterConfig.toolAbbreviations);
|
|
355
|
+
const errorResponse = {
|
|
356
|
+
tool_id,
|
|
357
|
+
wroteToFile: false,
|
|
358
|
+
error: `Tool ${toolName} not available in standalone mode (no child MCP)`
|
|
359
|
+
};
|
|
360
|
+
return {
|
|
361
|
+
content: [{
|
|
362
|
+
type: 'text',
|
|
363
|
+
text: JSON.stringify(errorResponse, null, 2)
|
|
364
|
+
}],
|
|
365
|
+
isError: true
|
|
366
|
+
};
|
|
367
|
+
}
|
|
368
|
+
if (ENABLE_LOGGING) {
|
|
369
|
+
console.debug(`[mcp-proxy] Forwarding to child MCP: ${toolName}`);
|
|
321
370
|
}
|
|
322
|
-
|
|
371
|
+
result = await childClient.callTool({
|
|
372
|
+
name: toolName,
|
|
373
|
+
arguments: toolArgs
|
|
374
|
+
});
|
|
375
|
+
// Process result through file writer to get unified response
|
|
323
376
|
if (result.content && Array.isArray(result.content) && result.content.length > 0) {
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
if (
|
|
333
|
-
|
|
334
|
-
content: [{ type: 'text', text: item.text }]
|
|
335
|
-
});
|
|
336
|
-
// Check if file was actually written (file reference returned)
|
|
337
|
-
if (fileResult && fileResult.content && Array.isArray(fileResult.content) &&
|
|
338
|
-
fileResult.content.length > 0 && fileResult.content[0].type === 'text') {
|
|
339
|
-
const resultText = fileResult.content[0].text;
|
|
340
|
-
// File reference contains "📄 File:" - this means file was written
|
|
341
|
-
if (resultText.includes('📄 File:')) {
|
|
342
|
-
item.text = resultText;
|
|
343
|
-
fileWasWritten = true;
|
|
344
|
-
if (ENABLE_LOGGING) {
|
|
345
|
-
console.debug(`[mcp-proxy] File writing applied for ${toolName} (${originalLength} chars written to file)`);
|
|
346
|
-
}
|
|
347
|
-
}
|
|
348
|
-
}
|
|
377
|
+
const item = result.content[0];
|
|
378
|
+
if (item.type === 'text' && typeof item.text === 'string') {
|
|
379
|
+
const originalLength = item.text.length;
|
|
380
|
+
// Get unified response from file writer
|
|
381
|
+
const unifiedResponse = await fileWriter.handleResponse(toolName, toolArgs, {
|
|
382
|
+
content: [{ type: 'text', text: item.text }]
|
|
383
|
+
});
|
|
384
|
+
if (ENABLE_LOGGING) {
|
|
385
|
+
if (unifiedResponse.wroteToFile) {
|
|
386
|
+
console.debug(`[mcp-proxy] File written for ${toolName} (${originalLength} chars) → ${unifiedResponse.filePath}`);
|
|
349
387
|
}
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
388
|
+
else {
|
|
389
|
+
console.debug(`[mcp-proxy] Response for ${toolName} (${originalLength} chars) returned directly`);
|
|
390
|
+
}
|
|
391
|
+
}
|
|
392
|
+
// If not written to file, apply truncation to outputContent
|
|
393
|
+
if (!unifiedResponse.wroteToFile && unifiedResponse.outputContent) {
|
|
394
|
+
const contentStr = typeof unifiedResponse.outputContent === 'string'
|
|
395
|
+
? unifiedResponse.outputContent
|
|
396
|
+
: JSON.stringify(unifiedResponse.outputContent);
|
|
397
|
+
const truncated = truncateResponseIfNeeded(truncationConfig, contentStr);
|
|
398
|
+
if (truncated.length < contentStr.length) {
|
|
399
|
+
if (ENABLE_LOGGING) {
|
|
400
|
+
console.debug(`[mcp-proxy] Truncated response: ${contentStr.length} → ${truncated.length} chars`);
|
|
401
|
+
}
|
|
402
|
+
// Re-parse if it was JSON, otherwise keep as string
|
|
403
|
+
try {
|
|
404
|
+
unifiedResponse.outputContent = JSON.parse(truncated);
|
|
405
|
+
}
|
|
406
|
+
catch {
|
|
407
|
+
unifiedResponse.outputContent = truncated;
|
|
356
408
|
}
|
|
357
409
|
}
|
|
358
410
|
}
|
|
411
|
+
// Return unified response as JSON
|
|
412
|
+
return {
|
|
413
|
+
content: [{
|
|
414
|
+
type: 'text',
|
|
415
|
+
text: JSON.stringify(unifiedResponse, null, 2)
|
|
416
|
+
}],
|
|
417
|
+
isError: !!unifiedResponse.error
|
|
418
|
+
};
|
|
359
419
|
}
|
|
360
420
|
}
|
|
361
|
-
return result
|
|
421
|
+
// Fallback: return result with generated tool_id
|
|
422
|
+
const tool_id = generateToolId(toolName, toolArgs, fileWriterConfig.toolAbbreviations);
|
|
423
|
+
const fallbackResponse = {
|
|
424
|
+
tool_id,
|
|
425
|
+
wroteToFile: false,
|
|
426
|
+
outputContent: result
|
|
427
|
+
};
|
|
428
|
+
return {
|
|
429
|
+
content: [{
|
|
430
|
+
type: 'text',
|
|
431
|
+
text: JSON.stringify(fallbackResponse, null, 2)
|
|
432
|
+
}]
|
|
433
|
+
};
|
|
362
434
|
}
|
|
363
435
|
catch (error) {
|
|
364
436
|
console.error(`[mcp-proxy] Error executing tool ${toolName}:`, error);
|
|
437
|
+
const tool_id = generateToolId(toolName, toolArgs, fileWriterConfig.toolAbbreviations);
|
|
438
|
+
const errorResponse = {
|
|
439
|
+
tool_id,
|
|
440
|
+
wroteToFile: false,
|
|
441
|
+
error: `Error executing ${toolName}: ${error.message || String(error)}`
|
|
442
|
+
};
|
|
365
443
|
return {
|
|
366
444
|
content: [{
|
|
367
445
|
type: 'text',
|
|
368
|
-
text:
|
|
446
|
+
text: JSON.stringify(errorResponse, null, 2)
|
|
369
447
|
}],
|
|
370
448
|
isError: true
|
|
371
449
|
};
|
package/dist/jq/tool.js
CHANGED
|
@@ -12,7 +12,7 @@ export const ExecuteJqQuerySchema = z.object({
|
|
|
12
12
|
description: z
|
|
13
13
|
.string()
|
|
14
14
|
.optional()
|
|
15
|
-
.describe('
|
|
15
|
+
.describe('Brief explanation of why you are calling this tool and what you expect to learn/achieve'),
|
|
16
16
|
});
|
|
17
17
|
/**
|
|
18
18
|
* Tool definition for JQ query execution with enhanced prompts
|
|
@@ -113,7 +113,7 @@ export const JQ_TOOL_DEFINITION = {
|
|
|
113
113
|
},
|
|
114
114
|
description: {
|
|
115
115
|
type: 'string',
|
|
116
|
-
description: '
|
|
116
|
+
description: 'Brief explanation of why you are calling this tool and what you expect to learn/achieve',
|
|
117
117
|
},
|
|
118
118
|
},
|
|
119
119
|
required: ['jq_query', 'file_path'],
|
package/dist/types/index.d.ts
CHANGED
|
@@ -56,3 +56,21 @@ export interface NullableFields {
|
|
|
56
56
|
/** Fields that can be null (mixed types) */
|
|
57
57
|
nullable: string[];
|
|
58
58
|
}
|
|
59
|
+
/**
|
|
60
|
+
* Unified response format for all tool calls
|
|
61
|
+
* Provides a consistent structure for LLM consumption
|
|
62
|
+
*/
|
|
63
|
+
export interface UnifiedToolResponse {
|
|
64
|
+
/** LLM-friendly unique identifier for this tool call */
|
|
65
|
+
tool_id: string;
|
|
66
|
+
/** Whether the response was written to a file */
|
|
67
|
+
wroteToFile: boolean;
|
|
68
|
+
/** Path to the file (only present if wroteToFile is true) */
|
|
69
|
+
filePath?: string;
|
|
70
|
+
/** Schema/structure guide for the data (only present if wroteToFile is true) */
|
|
71
|
+
fileSchema?: string;
|
|
72
|
+
/** The actual response content (only present if wroteToFile is false) */
|
|
73
|
+
outputContent?: unknown;
|
|
74
|
+
/** Error message if the tool call failed */
|
|
75
|
+
error?: string;
|
|
76
|
+
}
|
package/dist/utils/filename.d.ts
CHANGED
|
@@ -1,3 +1,11 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Generate LLM-friendly tool ID (without file extension)
|
|
3
|
+
* @param toolName - Name of the tool that generated the data
|
|
4
|
+
* @param args - Arguments passed to the tool
|
|
5
|
+
* @param toolAbbreviations - Optional custom abbreviations for tool names
|
|
6
|
+
* @returns Tool ID like "1697834567123_met_qry_a3b4c5"
|
|
7
|
+
*/
|
|
8
|
+
export declare const generateToolId: (toolName: string, args: Record<string, unknown>, toolAbbreviations?: Record<string, string>) => string;
|
|
1
9
|
/**
|
|
2
10
|
* Generate LLM-friendly compact filename
|
|
3
11
|
* @param toolName - Name of the tool that generated the data
|
package/dist/utils/filename.js
CHANGED
|
@@ -28,15 +28,25 @@ const hashArgs = (args) => {
|
|
|
28
28
|
.substring(0, 6);
|
|
29
29
|
};
|
|
30
30
|
/**
|
|
31
|
-
* Generate LLM-friendly
|
|
31
|
+
* Generate LLM-friendly tool ID (without file extension)
|
|
32
32
|
* @param toolName - Name of the tool that generated the data
|
|
33
33
|
* @param args - Arguments passed to the tool
|
|
34
34
|
* @param toolAbbreviations - Optional custom abbreviations for tool names
|
|
35
|
-
* @returns
|
|
35
|
+
* @returns Tool ID like "1697834567123_met_qry_a3b4c5"
|
|
36
36
|
*/
|
|
37
|
-
export const
|
|
37
|
+
export const generateToolId = (toolName, args, toolAbbreviations) => {
|
|
38
38
|
const timestamp = generateCompactTimestamp();
|
|
39
39
|
const toolAbbrev = toolAbbreviations?.[toolName] || toolName.substring(0, 6);
|
|
40
40
|
const argsHash = hashArgs(args);
|
|
41
|
-
return `${timestamp}_${toolAbbrev}_${argsHash}
|
|
41
|
+
return `${timestamp}_${toolAbbrev}_${argsHash}`;
|
|
42
|
+
};
|
|
43
|
+
/**
|
|
44
|
+
* Generate LLM-friendly compact filename
|
|
45
|
+
* @param toolName - Name of the tool that generated the data
|
|
46
|
+
* @param args - Arguments passed to the tool
|
|
47
|
+
* @param toolAbbreviations - Optional custom abbreviations for tool names
|
|
48
|
+
* @returns Compact filename like "1697834567123_met_qry_a3b4c5.json"
|
|
49
|
+
*/
|
|
50
|
+
export const generateCompactFilename = (toolName, args, toolAbbreviations) => {
|
|
51
|
+
return `${generateToolId(toolName, args, toolAbbreviations)}.json`;
|
|
42
52
|
};
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@anyshift/mcp-proxy",
|
|
3
|
-
"version": "0.3.
|
|
3
|
+
"version": "0.3.2",
|
|
4
4
|
"description": "Generic MCP proxy that adds truncation, file writing, and JQ capabilities to any MCP server",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"main": "dist/index.js",
|
|
@@ -13,7 +13,7 @@
|
|
|
13
13
|
"README.md"
|
|
14
14
|
],
|
|
15
15
|
"dependencies": {
|
|
16
|
-
"@modelcontextprotocol/sdk": "^1.
|
|
16
|
+
"@modelcontextprotocol/sdk": "^1.24.0",
|
|
17
17
|
"zod": "^3.24.2"
|
|
18
18
|
},
|
|
19
19
|
"devDependencies": {
|