@anyshift/mcp-proxy 0.3.1 → 0.3.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1 @@
1
+ export {};
@@ -0,0 +1,204 @@
1
+ import { describe, it, expect } from '@jest/globals';
2
+ import { parseDynatraceDqlResponse, isDynatraceDqlTool, } from '../../fileWriter/dynatrace.js';
3
+ // Type guards for test assertions
4
+ function isDqlSuccess(result) {
5
+ return (typeof result === 'object' &&
6
+ result !== null &&
7
+ 'isDynatraceDql' in result &&
8
+ result.isDynatraceDql === true &&
9
+ 'isError' in result &&
10
+ result.isError === false);
11
+ }
12
+ function isDqlError(result) {
13
+ return (typeof result === 'object' &&
14
+ result !== null &&
15
+ 'isDynatraceDql' in result &&
16
+ result.isDynatraceDql === true &&
17
+ 'isError' in result &&
18
+ result.isError === true);
19
+ }
20
+ describe('Dynatrace DQL Parser', () => {
21
+ describe('isDynatraceDqlTool', () => {
22
+ it('should return true for execute_dql tool', () => {
23
+ expect(isDynatraceDqlTool('execute_dql')).toBe(true);
24
+ });
25
+ it('should return false for other tools', () => {
26
+ expect(isDynatraceDqlTool('execute_jq_query')).toBe(false);
27
+ expect(isDynatraceDqlTool('get_metrics')).toBe(false);
28
+ expect(isDynatraceDqlTool('list_entities')).toBe(false);
29
+ });
30
+ });
31
+ describe('parseDynatraceDqlResponse - Success Responses', () => {
32
+ const successResponse = `📊 **DQL Query Results**
33
+
34
+ - **Scanned Records:** 3,988,787
35
+ - **Scanned Bytes:** 10.00 GB (Session total: 10.00 GB / 100 GB budget, 10.0% used)
36
+ 💡 **Moderate Data Usage:** This query scanned 10.00 GB of data.
37
+ - **⚠️ Sampling Used:** Yes (results may be approximate)
38
+
39
+ 📋 **Query Results**: (50 records):
40
+
41
+ \`\`\`json
42
+ [
43
+ {
44
+ "faas.name": "compassdigital-order-v1-root",
45
+ "logCount": "861021"
46
+ },
47
+ {
48
+ "faas.name": "cdl-shoppingcart-v1-root",
49
+ "logCount": "421993"
50
+ }
51
+ ]
52
+ \`\`\``;
53
+ it('should parse a success response correctly', () => {
54
+ const result = parseDynatraceDqlResponse(successResponse);
55
+ expect(result.isDynatraceDql).toBe(true);
56
+ expect(isDqlSuccess(result)).toBe(true);
57
+ if (isDqlSuccess(result)) {
58
+ expect(result.metadata.scannedRecords).toBe(3988787);
59
+ expect(result.metadata.scannedBytes).toBe('10.00 GB');
60
+ expect(result.metadata.sessionTotal).toBe('10.00 GB');
61
+ expect(result.metadata.sessionBudget).toBe('100 GB');
62
+ expect(result.metadata.budgetUsedPercent).toBe('10.0%');
63
+ expect(result.metadata.samplingUsed).toBe(true);
64
+ expect(result.metadata.recordCount).toBe(50);
65
+ // dataUsageNote is optional, may or may not be present depending on formatting
66
+ expect(result.data).toBeInstanceOf(Array);
67
+ expect(result.data.length).toBe(2);
68
+ expect(result.data[0]['faas.name']).toBe('compassdigital-order-v1-root');
69
+ }
70
+ });
71
+ it('should parse response without sampling info', () => {
72
+ const responseNoSampling = `📊 **DQL Query Results**
73
+
74
+ - **Scanned Records:** 1,000
75
+ - **Scanned Bytes:** 100 MB (Session total: 100 MB / 100 GB budget, 0.1% used)
76
+
77
+ 📋 **Query Results**: (10 records):
78
+
79
+ \`\`\`json
80
+ [{"id": 1}]
81
+ \`\`\``;
82
+ const result = parseDynatraceDqlResponse(responseNoSampling);
83
+ expect(result.isDynatraceDql).toBe(true);
84
+ if (isDqlSuccess(result)) {
85
+ expect(result.metadata.samplingUsed).toBeUndefined();
86
+ expect(result.metadata.recordCount).toBe(10);
87
+ }
88
+ });
89
+ it('should handle single record count', () => {
90
+ const responseSingleRecord = `📊 **DQL Query Results**
91
+
92
+ - **Scanned Records:** 100
93
+ - **Scanned Bytes:** 1 MB
94
+
95
+ 📋 **Query Results**: (1 record):
96
+
97
+ \`\`\`json
98
+ {"id": 1}
99
+ \`\`\``;
100
+ const result = parseDynatraceDqlResponse(responseSingleRecord);
101
+ expect(result.isDynatraceDql).toBe(true);
102
+ if (isDqlSuccess(result)) {
103
+ expect(result.metadata.recordCount).toBe(1);
104
+ }
105
+ });
106
+ });
107
+ describe('parseDynatraceDqlResponse - Error Responses', () => {
108
+ const errorResponse = `Client Request Error: PARAMETER_MUST_NOT_BE_AN_AGGREGATION. exceptionType: "DQL-SYNTAX-ERROR". syntaxErrorPosition: {"start":{"column":113,"index":112,"line":1},"end":{"column":119,"index":118,"line":1}}. errorType: "PARAMETER_MUST_NOT_BE_AN_AGGREGATION". errorMessage: "Aggregations aren't allowed here, but \`count()\` is an aggregation function.". arguments: ["count()"]. queryString: "fetch logs, scanLimitGBytes: 10, samplingRatio: 100, from: now()-7d | summarize count(), by: {faas.name} | sort count() desc | limit 50". errorMessageFormatSpecifierTypes: ["INPUT_QUERY_PART"]. errorMessageFormat: "Aggregations aren't allowed here, but \`%1$s\` is an aggregation function.". queryId: "9be6a579-545c-4157-957f-5d8d39129a78" with HTTP status: 400. (body: {"error":{"message":"PARAMETER_MUST_NOT_BE_AN_AGGREGATION","details":{"exceptionType":"DQL-SYNTAX-ERROR"}}})`;
109
+ it('should parse an error response correctly', () => {
110
+ const result = parseDynatraceDqlResponse(errorResponse);
111
+ expect(result.isDynatraceDql).toBe(true);
112
+ expect(isDqlError(result)).toBe(true);
113
+ if (isDqlError(result)) {
114
+ expect(result.errorMessage).toContain("Aggregations aren't allowed here");
115
+ expect(result.errorMessage).toContain('count()');
116
+ expect(result.errorMessage).toContain('Query:');
117
+ expect(result.errorMessage).toContain('HTTP 400');
118
+ }
119
+ });
120
+ it('should extract error type when no errorMessage field', () => {
121
+ const simpleError = `Client Request Error: INVALID_QUERY. exceptionType: "DQL-ERROR". with HTTP status: 400.`;
122
+ const result = parseDynatraceDqlResponse(simpleError);
123
+ expect(result.isDynatraceDql).toBe(true);
124
+ expect(isDqlError(result)).toBe(true);
125
+ if (isDqlError(result)) {
126
+ expect(result.errorMessage).toContain('INVALID_QUERY');
127
+ expect(result.errorMessage).toContain('HTTP 400');
128
+ }
129
+ });
130
+ });
131
+ describe('parseDynatraceDqlResponse - Non-Dynatrace Responses', () => {
132
+ it('should return isDynatraceDql: false for regular JSON', () => {
133
+ const regularJson = '{"status": "success", "data": [1, 2, 3]}';
134
+ const result = parseDynatraceDqlResponse(regularJson);
135
+ expect(result.isDynatraceDql).toBe(false);
136
+ });
137
+ it('should return isDynatraceDql: false for plain text', () => {
138
+ const plainText = 'This is just some plain text response';
139
+ const result = parseDynatraceDqlResponse(plainText);
140
+ expect(result.isDynatraceDql).toBe(false);
141
+ });
142
+ it('should return isDynatraceDql: false for other MCP tool outputs', () => {
143
+ const otherToolOutput = `Listed metrics:
144
+ - metric.cpu.usage
145
+ - metric.memory.used`;
146
+ const result = parseDynatraceDqlResponse(otherToolOutput);
147
+ expect(result.isDynatraceDql).toBe(false);
148
+ });
149
+ });
150
+ describe('Edge Cases', () => {
151
+ it('should handle malformed JSON in success response', () => {
152
+ const malformedJson = `📊 **DQL Query Results**
153
+
154
+ - **Scanned Records:** 100
155
+
156
+ 📋 **Query Results**: (1 record):
157
+
158
+ \`\`\`json
159
+ {this is not valid json}
160
+ \`\`\``;
161
+ const result = parseDynatraceDqlResponse(malformedJson);
162
+ expect(result.isDynatraceDql).toBe(true);
163
+ if (isDqlSuccess(result)) {
164
+ // Should still parse metadata
165
+ expect(result.metadata.scannedRecords).toBe(100);
166
+ // Data should be the raw text from code block since JSON parsing failed
167
+ expect(result.data).toBe('{this is not valid json}');
168
+ }
169
+ });
170
+ it('should handle empty JSON array', () => {
171
+ const emptyResults = `📊 **DQL Query Results**
172
+
173
+ - **Scanned Records:** 0
174
+
175
+ 📋 **Query Results**: (0 records):
176
+
177
+ \`\`\`json
178
+ []
179
+ \`\`\``;
180
+ const result = parseDynatraceDqlResponse(emptyResults);
181
+ expect(result.isDynatraceDql).toBe(true);
182
+ if (isDqlSuccess(result)) {
183
+ expect(result.metadata.scannedRecords).toBe(0);
184
+ expect(result.metadata.recordCount).toBe(0);
185
+ expect(result.data).toEqual([]);
186
+ }
187
+ });
188
+ it('should handle response with no code block', () => {
189
+ const noCodeBlock = `📊 **DQL Query Results**
190
+
191
+ - **Scanned Records:** 100
192
+
193
+ 📋 **Query Results**: (0 records):
194
+
195
+ No data found.`;
196
+ const result = parseDynatraceDqlResponse(noCodeBlock);
197
+ expect(result.isDynatraceDql).toBe(true);
198
+ if (isDqlSuccess(result)) {
199
+ expect(result.metadata.scannedRecords).toBe(100);
200
+ expect(result.data).toBeNull();
201
+ }
202
+ });
203
+ });
204
+ });
@@ -0,0 +1,43 @@
1
+ /**
2
+ * Dynatrace DQL Response Parser
3
+ *
4
+ * Parses Dynatrace DQL query responses to extract:
5
+ * - Metadata (scanned records, bytes, sampling info)
6
+ * - Data (the actual query results)
7
+ * - Errors (if the query failed)
8
+ */
9
+ export interface DynatraceDqlMetadata {
10
+ scannedRecords?: number;
11
+ scannedBytes?: string;
12
+ sessionTotal?: string;
13
+ sessionBudget?: string;
14
+ budgetUsedPercent?: string;
15
+ dataUsageNote?: string;
16
+ samplingUsed?: boolean;
17
+ recordCount?: number;
18
+ }
19
+ export interface DynatraceDqlParsedResponse {
20
+ isDynatraceDql: true;
21
+ isError: false;
22
+ metadata: DynatraceDqlMetadata;
23
+ data: unknown;
24
+ }
25
+ export interface DynatraceDqlErrorResponse {
26
+ isDynatraceDql: true;
27
+ isError: true;
28
+ errorMessage: string;
29
+ }
30
+ export interface NotDynatraceDqlResponse {
31
+ isDynatraceDql: false;
32
+ }
33
+ export type DynatraceParseResult = DynatraceDqlParsedResponse | DynatraceDqlErrorResponse | NotDynatraceDqlResponse;
34
+ /**
35
+ * Parse a Dynatrace DQL response
36
+ * @param text - The raw text response from Dynatrace
37
+ * @returns Parsed response with metadata/data or error, or indication that it's not a Dynatrace response
38
+ */
39
+ export declare function parseDynatraceDqlResponse(text: string): DynatraceParseResult;
40
+ /**
41
+ * Check if a tool name is a Dynatrace DQL tool
42
+ */
43
+ export declare function isDynatraceDqlTool(toolName: string): boolean;
@@ -0,0 +1,173 @@
1
+ /**
2
+ * Dynatrace DQL Response Parser
3
+ *
4
+ * Parses Dynatrace DQL query responses to extract:
5
+ * - Metadata (scanned records, bytes, sampling info)
6
+ * - Data (the actual query results)
7
+ * - Errors (if the query failed)
8
+ */
9
+ /**
10
+ * Detect if a text response is a Dynatrace DQL error
11
+ */
12
+ function isDynatraceError(text) {
13
+ return text.includes('Client Request Error:') && text.includes('HTTP status:');
14
+ }
15
+ /**
16
+ * Detect if a text response is a Dynatrace DQL success response
17
+ */
18
+ function isDynatraceSuccess(text) {
19
+ return text.includes('📊 **DQL Query Results**');
20
+ }
21
+ /**
22
+ * Parse a Dynatrace error response - keeps more context for debugging
23
+ */
24
+ function parseErrorResponse(text) {
25
+ // The error format is:
26
+ // Client Request Error: ERROR_TYPE. exceptionType: "...". ... errorMessage: "...". ...
27
+ const parts = [];
28
+ // Extract error type
29
+ const errorTypeMatch = text.match(/Client Request Error:\s*([^.]+)/);
30
+ if (errorTypeMatch) {
31
+ parts.push(errorTypeMatch[1].trim());
32
+ }
33
+ // Extract the human-readable error message
34
+ const errorMessageMatch = text.match(/errorMessage:\s*"([^"]+)"/);
35
+ if (errorMessageMatch) {
36
+ parts.push(errorMessageMatch[1]);
37
+ }
38
+ // Extract the query string for context
39
+ const queryStringMatch = text.match(/queryString:\s*"([^"]+)"/);
40
+ if (queryStringMatch) {
41
+ parts.push(`Query: ${queryStringMatch[1]}`);
42
+ }
43
+ // Extract syntax error position if available
44
+ const positionMatch = text.match(/syntaxErrorPosition:\s*(\{[^}]+\})/);
45
+ if (positionMatch) {
46
+ try {
47
+ const pos = JSON.parse(positionMatch[1]);
48
+ if (pos.start) {
49
+ parts.push(`Position: line ${pos.start.line}, column ${pos.start.column}`);
50
+ }
51
+ }
52
+ catch {
53
+ // Ignore JSON parse errors
54
+ }
55
+ }
56
+ // Extract HTTP status
57
+ const httpStatusMatch = text.match(/HTTP status:\s*(\d+)/);
58
+ if (httpStatusMatch) {
59
+ parts.push(`HTTP ${httpStatusMatch[1]}`);
60
+ }
61
+ if (parts.length > 0) {
62
+ return parts.join('. ');
63
+ }
64
+ // Last fallback: return a truncated version of the raw error
65
+ const firstLine = text.split('\n')[0];
66
+ if (firstLine.length > 500) {
67
+ return firstLine.substring(0, 500) + '...';
68
+ }
69
+ return firstLine;
70
+ }
71
+ /**
72
+ * Parse metadata from a Dynatrace DQL success response
73
+ */
74
+ function parseMetadata(text) {
75
+ const metadata = {};
76
+ // Parse scanned records: "- **Scanned Records:** 3,988,787"
77
+ const scannedRecordsMatch = text.match(/\*\*Scanned Records:\*\*\s*([\d,]+)/);
78
+ if (scannedRecordsMatch) {
79
+ metadata.scannedRecords = parseInt(scannedRecordsMatch[1].replace(/,/g, ''), 10);
80
+ }
81
+ // Parse scanned bytes: "- **Scanned Bytes:** 10.00 GB (Session total: 10.00 GB / 100 GB budget, 10.0% used)"
82
+ const scannedBytesMatch = text.match(/\*\*Scanned Bytes:\*\*\s*([^\n(]+)/);
83
+ if (scannedBytesMatch) {
84
+ metadata.scannedBytes = scannedBytesMatch[1].trim();
85
+ }
86
+ // Parse session total and budget from the parenthetical
87
+ const sessionMatch = text.match(/Session total:\s*([^/]+)\/\s*([^,]+)\s*budget,\s*([^)%]+)%\s*used/);
88
+ if (sessionMatch) {
89
+ metadata.sessionTotal = sessionMatch[1].trim();
90
+ metadata.sessionBudget = sessionMatch[2].trim();
91
+ metadata.budgetUsedPercent = sessionMatch[3].trim() + '%';
92
+ }
93
+ // Parse data usage note: " 💡 **Moderate Data Usage:** This query scanned 10.00 GB of data."
94
+ const dataUsageMatch = text.match(/\s*💡\s*\*\*([^*]+)\*\*:\s*([^\n]+)/);
95
+ if (dataUsageMatch) {
96
+ metadata.dataUsageNote = `${dataUsageMatch[1]}: ${dataUsageMatch[2].trim()}`;
97
+ }
98
+ // Parse sampling info: "- **⚠️ Sampling Used:** Yes"
99
+ const samplingMatch = text.match(/\*\*⚠️?\s*Sampling Used:\*\*\s*(Yes|No)/i);
100
+ if (samplingMatch) {
101
+ metadata.samplingUsed = samplingMatch[1].toLowerCase() === 'yes';
102
+ }
103
+ // Parse record count: "📋 **Query Results**: (50 records):"
104
+ const recordCountMatch = text.match(/\*\*Query Results\*\*[^(]*\((\d+)\s*records?\)/);
105
+ if (recordCountMatch) {
106
+ metadata.recordCount = parseInt(recordCountMatch[1], 10);
107
+ }
108
+ return metadata;
109
+ }
110
+ /**
111
+ * Extract JSON data from a Dynatrace DQL success response
112
+ */
113
+ function extractData(text) {
114
+ // The JSON is in a code block: ```json\n[...]\n```
115
+ const jsonBlockMatch = text.match(/```json\s*\n([\s\S]*?)\n```/);
116
+ if (jsonBlockMatch) {
117
+ try {
118
+ return JSON.parse(jsonBlockMatch[1]);
119
+ }
120
+ catch {
121
+ // If JSON parsing fails, return the raw text from the block
122
+ return jsonBlockMatch[1];
123
+ }
124
+ }
125
+ // Fallback: try to find a JSON array or object anywhere after the header
126
+ const jsonMatch = text.match(/(\[[\s\S]*\]|\{[\s\S]*\})/);
127
+ if (jsonMatch) {
128
+ try {
129
+ return JSON.parse(jsonMatch[1]);
130
+ }
131
+ catch {
132
+ return null;
133
+ }
134
+ }
135
+ return null;
136
+ }
137
+ /**
138
+ * Parse a Dynatrace DQL response
139
+ * @param text - The raw text response from Dynatrace
140
+ * @returns Parsed response with metadata/data or error, or indication that it's not a Dynatrace response
141
+ */
142
+ export function parseDynatraceDqlResponse(text) {
143
+ // Check if it's an error response
144
+ if (isDynatraceError(text)) {
145
+ return {
146
+ isDynatraceDql: true,
147
+ isError: true,
148
+ errorMessage: parseErrorResponse(text),
149
+ };
150
+ }
151
+ // Check if it's a success response
152
+ if (isDynatraceSuccess(text)) {
153
+ const metadata = parseMetadata(text);
154
+ const data = extractData(text);
155
+ return {
156
+ isDynatraceDql: true,
157
+ isError: false,
158
+ metadata,
159
+ data,
160
+ };
161
+ }
162
+ // Not a Dynatrace DQL response
163
+ return {
164
+ isDynatraceDql: false,
165
+ };
166
+ }
167
+ /**
168
+ * Check if a tool name is a Dynatrace DQL tool
169
+ */
170
+ export function isDynatraceDqlTool(toolName) {
171
+ // The tool that executes DQL queries
172
+ return toolName === 'execute_dql';
173
+ }
@@ -32,6 +32,10 @@ export interface QueryAssistOptions {
32
32
  maxKeys?: number;
33
33
  dataSize?: number;
34
34
  }
35
+ export interface JsonlSchemaOptions extends QueryAssistOptions {
36
+ totalLines: number;
37
+ sampleSize?: number;
38
+ }
35
39
  /**
36
40
  * Generate query-assist schema for JSON data
37
41
  * Main entry point for schema generation
@@ -40,3 +44,10 @@ export interface QueryAssistOptions {
40
44
  * @returns Compact text schema optimized for JQ queries
41
45
  */
42
46
  export declare function generateQueryAssistSchema(data: unknown, options?: QueryAssistOptions): string;
47
+ /**
48
+ * Generate schema for JSONL data by sampling multiple records
49
+ * @param lines - Array of raw JSON lines
50
+ * @param options - Configuration options including totalLines and sampleSize
51
+ * @returns Formatted text schema optimized for JQ queries on JSONL files
52
+ */
53
+ export declare function generateJsonlQueryAssistSchema(lines: string[], options: JsonlSchemaOptions): string;
@@ -268,3 +268,117 @@ export function generateQueryAssistSchema(data, options = {}) {
268
268
  // Format as text
269
269
  return formatQueryAssist(selectedPaths, limits, dataSize);
270
270
  }
271
+ /**
272
+ * Select sample indices evenly distributed through the data
273
+ */
274
+ function selectSampleIndices(totalLines, sampleSize) {
275
+ if (totalLines <= sampleSize) {
276
+ return Array.from({ length: totalLines }, (_, i) => i);
277
+ }
278
+ const indices = [0]; // Always include first
279
+ const step = Math.floor(totalLines / (sampleSize - 1));
280
+ for (let i = 1; i < sampleSize - 1; i++) {
281
+ indices.push(i * step);
282
+ }
283
+ indices.push(totalLines - 1); // Always include last
284
+ return indices;
285
+ }
286
+ /**
287
+ * Analyze schema consistency across sampled records
288
+ */
289
+ function analyzeSchemaConsistency(records) {
290
+ if (records.length < 2) {
291
+ return { isConsistent: true, optionalFields: [] };
292
+ }
293
+ // Collect all keys from all records
294
+ const allKeys = new Set();
295
+ const keyPresenceCount = new Map();
296
+ for (const record of records) {
297
+ if (record && typeof record === 'object' && !Array.isArray(record)) {
298
+ const keys = Object.keys(record);
299
+ for (const key of keys) {
300
+ allKeys.add(key);
301
+ keyPresenceCount.set(key, (keyPresenceCount.get(key) || 0) + 1);
302
+ }
303
+ }
304
+ }
305
+ // Find optional fields (not present in all records)
306
+ const optionalFields = [];
307
+ for (const key of allKeys) {
308
+ if (keyPresenceCount.get(key) !== records.length) {
309
+ optionalFields.push(key);
310
+ }
311
+ }
312
+ return {
313
+ isConsistent: optionalFields.length === 0,
314
+ optionalFields
315
+ };
316
+ }
317
+ /**
318
+ * Generate schema for JSONL data by sampling multiple records
319
+ * @param lines - Array of raw JSON lines
320
+ * @param options - Configuration options including totalLines and sampleSize
321
+ * @returns Formatted text schema optimized for JQ queries on JSONL files
322
+ */
323
+ export function generateJsonlQueryAssistSchema(lines, options) {
324
+ const { totalLines, sampleSize = 5 } = options;
325
+ const parts = [];
326
+ parts.push('📊 JSONL STRUCTURE GUIDE (for JQ queries)');
327
+ parts.push('');
328
+ parts.push(`Format: JSON Lines (${totalLines.toLocaleString()} records)`);
329
+ if (options.dataSize) {
330
+ parts.push(`Size: ${options.dataSize.toLocaleString()} characters`);
331
+ }
332
+ parts.push('');
333
+ // JQ usage guide for JSONL
334
+ parts.push('JQ USAGE (JSONL files):');
335
+ parts.push(' • Stream records: jq -c "." file.jsonl');
336
+ parts.push(' • Filter: jq -c "select(.field == value)" file.jsonl');
337
+ parts.push(' • Extract field: jq -r ".fieldName" file.jsonl');
338
+ parts.push(' • Count records: jq -s "length" file.jsonl');
339
+ parts.push(' • First N records: jq -s ".[:N]" file.jsonl');
340
+ parts.push(' • Unique values: jq -s "[.[].field] | unique" file.jsonl');
341
+ parts.push('');
342
+ // Sample records evenly distributed through the file
343
+ const indicesToSample = selectSampleIndices(totalLines, sampleSize);
344
+ const sampledRecords = [];
345
+ for (const idx of indicesToSample) {
346
+ if (lines[idx]) {
347
+ try {
348
+ sampledRecords.push(JSON.parse(lines[idx].trim()));
349
+ }
350
+ catch {
351
+ // Skip unparseable lines
352
+ }
353
+ }
354
+ }
355
+ if (sampledRecords.length === 0) {
356
+ parts.push('⚠️ Could not parse any records');
357
+ return parts.join('\n');
358
+ }
359
+ // Analyze schema consistency across samples
360
+ const schemaAnalysis = analyzeSchemaConsistency(sampledRecords);
361
+ if (schemaAnalysis.isConsistent) {
362
+ parts.push(`RECORD STRUCTURE (consistent across ${sampledRecords.length} sampled records):`);
363
+ }
364
+ else {
365
+ parts.push(`RECORD STRUCTURE (⚠️ variations detected in ${sampledRecords.length} samples):`);
366
+ if (schemaAnalysis.optionalFields.length > 0) {
367
+ parts.push(` Optional fields: ${schemaAnalysis.optionalFields.join(', ')}`);
368
+ }
369
+ }
370
+ parts.push('');
371
+ // Generate schema from first record (representative)
372
+ const recordSchema = generateQueryAssistSchema(sampledRecords[0], {
373
+ maxDepth: options.maxDepth,
374
+ maxPaths: options.maxPaths,
375
+ maxKeys: options.maxKeys,
376
+ });
377
+ // Indent the record schema
378
+ const indentedSchema = recordSchema
379
+ .split('\n')
380
+ .map(l => ' ' + l)
381
+ .join('\n');
382
+ parts.push(indentedSchema);
383
+ return parts.join('\n');
384
+ }
@@ -1,9 +1,58 @@
1
1
  import fs from 'fs/promises';
2
2
  import path from 'path';
3
3
  import { generateToolId } from '../utils/filename.js';
4
- import { generateQueryAssistSchema } from './schema.js';
4
+ import { generateQueryAssistSchema, generateJsonlQueryAssistSchema } from './schema.js';
5
+ import { parseDynatraceDqlResponse, isDynatraceDqlTool, } from './dynatrace.js';
5
6
  // Default minimum character count to trigger file writing
6
7
  const DEFAULT_MIN_CHARS = 1000;
8
+ /**
9
+ * Detect whether content is JSON, JSONL, or plain text
10
+ */
11
+ function detectContentFormat(content) {
12
+ const trimmed = content.trim();
13
+ // Empty content
14
+ if (!trimmed)
15
+ return 'text';
16
+ // Try parsing as single JSON object/array
17
+ if (trimmed.startsWith('{') || trimmed.startsWith('[')) {
18
+ try {
19
+ JSON.parse(trimmed);
20
+ return 'json';
21
+ }
22
+ catch {
23
+ // Could be JSONL starting with {
24
+ }
25
+ }
26
+ // Check for JSONL (multiple JSON objects, one per line)
27
+ const lines = trimmed.split('\n').filter(l => l.trim());
28
+ if (lines.length > 1) {
29
+ // Sample first few lines to avoid parsing huge files
30
+ const sampleSize = Math.min(lines.length, 10);
31
+ const allSampledLinesAreJson = lines.slice(0, sampleSize).every(line => {
32
+ try {
33
+ JSON.parse(line.trim());
34
+ return true;
35
+ }
36
+ catch {
37
+ return false;
38
+ }
39
+ });
40
+ if (allSampledLinesAreJson) {
41
+ return 'jsonl';
42
+ }
43
+ }
44
+ return 'text';
45
+ }
46
+ /**
47
+ * Get file extension based on content format
48
+ */
49
+ function getFileExtension(format) {
50
+ switch (format) {
51
+ case 'jsonl': return '.jsonl';
52
+ case 'text': return '.txt';
53
+ default: return '.json';
54
+ }
55
+ }
7
56
  /**
8
57
  * Helper function to detect if a response contains an error
9
58
  */
@@ -160,6 +209,60 @@ const extractContentForFile = (responseData) => {
160
209
  }
161
210
  return { contentToWrite, parsedForSchema };
162
211
  };
212
+ /**
213
+ * Handle Dynatrace DQL response - extracts metadata and data, writes structured file
214
+ * @param config - File writer configuration
215
+ * @param tool_id - Generated tool ID
216
+ * @param parsedDql - Parsed DQL response with metadata and data
217
+ * @returns UnifiedToolResponse or null if should fall through to default handling
218
+ */
219
+ async function handleDynatraceDqlResponse(config, tool_id, parsedDql) {
220
+ // Build the structured content to write
221
+ const structuredContent = {
222
+ metadata: parsedDql.metadata,
223
+ data: parsedDql.data,
224
+ };
225
+ const contentToWrite = JSON.stringify(structuredContent, null, 2);
226
+ const contentLength = contentToWrite.length;
227
+ // If file writing is disabled, return null to fall through to default handling
228
+ if (!config.enabled || !config.outputPath) {
229
+ return null;
230
+ }
231
+ // Check character count threshold
232
+ const minChars = config.minCharsForWrite ?? DEFAULT_MIN_CHARS;
233
+ if (contentLength < minChars) {
234
+ return null;
235
+ }
236
+ // Write structured file
237
+ try {
238
+ const filename = `${tool_id}.json`;
239
+ const filepath = path.join(config.outputPath, filename);
240
+ await fs.mkdir(config.outputPath, { recursive: true });
241
+ await fs.writeFile(filepath, contentToWrite);
242
+ // Generate schema from the full structured content (metadata + data)
243
+ const fileSchema = generateQueryAssistSchema(structuredContent, {
244
+ maxDepth: config.schemaMaxDepth ?? 2,
245
+ maxPaths: config.schemaMaxPaths ?? 20,
246
+ maxKeys: config.schemaMaxKeys ?? 50,
247
+ dataSize: contentLength,
248
+ });
249
+ return {
250
+ tool_id,
251
+ wroteToFile: true,
252
+ filePath: filepath,
253
+ fileSchema,
254
+ };
255
+ }
256
+ catch (error) {
257
+ console.error(`[handleDynatraceDqlResponse] Error writing file:`, error);
258
+ return {
259
+ tool_id,
260
+ wroteToFile: false,
261
+ outputContent: structuredContent,
262
+ error: `File write failed: ${error instanceof Error ? error.message : String(error)}`,
263
+ };
264
+ }
265
+ }
163
266
  /**
164
267
  * Centralized response handler with file writing capability
165
268
  * @param config - File writer configuration
@@ -180,6 +283,38 @@ export async function handleToolResponse(config, toolName, args, responseData) {
180
283
  outputContent: parsedForSchema ?? contentToWrite,
181
284
  };
182
285
  }
286
+ // Handle Dynatrace DQL responses specially
287
+ if (isDynatraceDqlTool(toolName)) {
288
+ const rawText = extractRawText(responseData);
289
+ if (rawText) {
290
+ const parsedDql = parseDynatraceDqlResponse(rawText);
291
+ if (parsedDql.isDynatraceDql) {
292
+ // If it's an error, return error directly
293
+ if (parsedDql.isError) {
294
+ return {
295
+ tool_id,
296
+ wroteToFile: false,
297
+ error: parsedDql.errorMessage,
298
+ };
299
+ }
300
+ // Try to handle as structured Dynatrace response
301
+ const dqlResult = await handleDynatraceDqlResponse(config, tool_id, parsedDql);
302
+ if (dqlResult) {
303
+ return dqlResult;
304
+ }
305
+ // If dqlResult is null, fall through to return the structured content directly
306
+ return {
307
+ tool_id,
308
+ wroteToFile: false,
309
+ outputContent: {
310
+ metadata: parsedDql.metadata,
311
+ data: parsedDql.data,
312
+ },
313
+ };
314
+ }
315
+ }
316
+ // If not a recognized Dynatrace DQL format, fall through to default handling
317
+ }
183
318
  // If there's an error, return error in unified format
184
319
  if (isErrorResponse(responseData)) {
185
320
  const errorMessage = extractErrorMessage(responseData);
@@ -212,15 +347,31 @@ export async function handleToolResponse(config, toolName, args, responseData) {
212
347
  }
213
348
  // Success case: write to file
214
349
  try {
215
- const filename = `${tool_id}.json`;
350
+ // Detect content format for appropriate extension and schema
351
+ const format = detectContentFormat(contentToWrite);
352
+ const extension = getFileExtension(format);
353
+ const filename = `${tool_id}${extension}`;
216
354
  const filepath = path.join(config.outputPath, filename);
217
355
  // Ensure output directory exists
218
356
  await fs.mkdir(config.outputPath, { recursive: true });
219
357
  // Write the exact content we counted
220
358
  await fs.writeFile(filepath, contentToWrite);
221
- // Generate query-assist schema if we have valid JSON
359
+ // Generate query-assist schema based on format
222
360
  let fileSchema;
223
- if (parsedForSchema) {
361
+ if (format === 'jsonl') {
362
+ // JSONL format: sample multiple records for schema
363
+ const lines = contentToWrite.trim().split('\n').filter(l => l.trim());
364
+ fileSchema = generateJsonlQueryAssistSchema(lines, {
365
+ maxDepth: config.schemaMaxDepth ?? 2,
366
+ maxPaths: config.schemaMaxPaths ?? 20,
367
+ maxKeys: config.schemaMaxKeys ?? 50,
368
+ dataSize: contentLength,
369
+ totalLines: lines.length,
370
+ sampleSize: 5
371
+ });
372
+ }
373
+ else if (parsedForSchema) {
374
+ // Standard JSON format: use existing schema generator
224
375
  // Use the clean data (without pagination) for schema analysis
225
376
  const { pagination, has_more, next_page, previous_page, page, page_size, total_pages, ...cleanData } = parsedForSchema;
226
377
  // Generate compact query-assist schema using config values
@@ -37,9 +37,10 @@ export async function executeJqQuery(config, jqQuery, filePath) {
37
37
  if (!existsSync(filePath)) {
38
38
  throw new Error(`File not found: ${filePath}`);
39
39
  }
40
- // Validate file extension
41
- if (!filePath.toLowerCase().endsWith('.json')) {
42
- throw new Error(`Only JSON files (.json) are supported for jq processing: ${filePath}`);
40
+ // Validate file extension (support both .json and .jsonl)
41
+ const lowerPath = filePath.toLowerCase();
42
+ if (!lowerPath.endsWith('.json') && !lowerPath.endsWith('.jsonl')) {
43
+ throw new Error(`Only JSON (.json) and JSONL (.jsonl) files are supported for jq processing: ${filePath}`);
43
44
  }
44
45
  // Validate path is within allowed directories
45
46
  validatePathWithinAllowedDirs(filePath, config.allowedPaths);
package/dist/jq/tool.js CHANGED
@@ -8,7 +8,7 @@ export const ExecuteJqQuerySchema = z.object({
8
8
  .describe('The jq query to execute on the JSON file. Query will be sanitized to prevent environment variable access.'),
9
9
  file_path: z
10
10
  .string()
11
- .describe('Absolute path starting with "/" pointing to the JSON file to process. Must be a valid, existing file with .json extension. The file will be validated for existence and readability before processing.'),
11
+ .describe('Absolute path starting with "/" pointing to the JSON or JSONL file to process. Must be a valid, existing file with .json or .jsonl extension. The file will be validated for existence and readability before processing.'),
12
12
  description: z
13
13
  .string()
14
14
  .optional()
@@ -109,7 +109,7 @@ export const JQ_TOOL_DEFINITION = {
109
109
  },
110
110
  file_path: {
111
111
  type: 'string',
112
- description: 'Absolute path starting with "/" pointing to the JSON file to process. Must be a valid, existing file with .json extension. The file will be validated for existence and readability before processing.',
112
+ description: 'Absolute path starting with "/" pointing to the JSON or JSONL file to process. Must be a valid, existing file with .json or .jsonl extension. The file will be validated for existence and readability before processing.',
113
113
  },
114
114
  description: {
115
115
  type: 'string',
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@anyshift/mcp-proxy",
3
- "version": "0.3.1",
3
+ "version": "0.3.3",
4
4
  "description": "Generic MCP proxy that adds truncation, file writing, and JQ capabilities to any MCP server",
5
5
  "type": "module",
6
6
  "main": "dist/index.js",