har-mcp 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +261 -0
- package/package.json +38 -0
- package/src/index.ts +33 -0
- package/src/server.ts +28 -0
- package/src/tools/export-curl.ts +134 -0
- package/src/tools/index.ts +17 -0
- package/src/tools/read-har/filters.ts +109 -0
- package/src/tools/read-har/formatters/content.ts +90 -0
- package/src/tools/read-har/formatters/cookies.ts +164 -0
- package/src/tools/read-har/formatters/detail.ts +148 -0
- package/src/tools/read-har/formatters/index.ts +12 -0
- package/src/tools/read-har/formatters/list.ts +89 -0
- package/src/tools/read-har/formatters/size.ts +150 -0
- package/src/tools/read-har/formatters/stats.ts +129 -0
- package/src/tools/read-har/formatters/summary.ts +174 -0
- package/src/tools/read-har/formatters/timeline.ts +95 -0
- package/src/tools/read-har/helpers.ts +237 -0
- package/src/tools/read-har/index.ts +393 -0
- package/src/tools/read-har/repair.ts +277 -0
- package/src/tools/read-har/schema.ts +82 -0
- package/src/tools/read-har.ts +15 -0
- package/src/types/har-to-curl.d.ts +11 -0
- package/src/types/har.ts +386 -0
package/README.md
ADDED
|
@@ -0,0 +1,261 @@
|
|
|
1
|
+
# HAR MCP Server
|
|
2
|
+
|
|
3
|
+
An MCP (Model Context Protocol) server for analyzing HAR (HTTP Archive) files with LLM-friendly output.
|
|
4
|
+
|
|
5
|
+
**Runtime Requirement:** This server requires **Bun >= 1.0.0**.
|
|
6
|
+
|
|
7
|
+
## Features
|
|
8
|
+
|
|
9
|
+
- **Multiple analysis modes**: summary, list, detail, content, stats, timeline, size, cookies
|
|
10
|
+
- **Powerful filtering**: URL patterns, HTTP methods, status codes, content types, duration, body content
|
|
11
|
+
- **Sorting & pagination**: Sort by various fields, paginate large result sets
|
|
12
|
+
- **Export capabilities**: Export entries as cURL commands
|
|
13
|
+
- **JSON output**: Programmatic JSON output for integration
|
|
14
|
+
- **Chrome DevTools support**: Full support for Chrome-exported HAR files with extension fields
|
|
15
|
+
- **Truncated HAR repair**: Automatic recovery of truncated HAR files (common with large exports)
|
|
16
|
+
|
|
17
|
+
## Installation
|
|
18
|
+
|
|
19
|
+
```bash
|
|
20
|
+
bun install
|
|
21
|
+
```
|
|
22
|
+
|
|
23
|
+
**This server only supports Bun runtime.**
|
|
24
|
+
|
|
25
|
+
## Usage
|
|
26
|
+
|
|
27
|
+
### With Claude Desktop
|
|
28
|
+
|
|
29
|
+
After installing from npm, use `bunx`:
|
|
30
|
+
|
|
31
|
+
```json
|
|
32
|
+
{
|
|
33
|
+
"mcpServers": {
|
|
34
|
+
"har": {
|
|
35
|
+
"command": "bunx",
|
|
36
|
+
"args": ["har-mcp"]
|
|
37
|
+
}
|
|
38
|
+
}
|
|
39
|
+
}
|
|
40
|
+
```
|
|
41
|
+
|
|
42
|
+
For local development:
|
|
43
|
+
|
|
44
|
+
```json
|
|
45
|
+
{
|
|
46
|
+
"mcpServers": {
|
|
47
|
+
"har": {
|
|
48
|
+
"command": "bun",
|
|
49
|
+
"args": ["run", "/path/to/har-mcp/src/index.ts"]
|
|
50
|
+
}
|
|
51
|
+
}
|
|
52
|
+
}
|
|
53
|
+
```
|
|
54
|
+
|
|
55
|
+
### With OpenCode
|
|
56
|
+
|
|
57
|
+
After installing from npm, use `bunx`:
|
|
58
|
+
|
|
59
|
+
```json
|
|
60
|
+
{
|
|
61
|
+
"mcp": {
|
|
62
|
+
"servers": {
|
|
63
|
+
"har": {
|
|
64
|
+
"type": "local",
|
|
65
|
+
"command": ["bunx", "har-mcp"]
|
|
66
|
+
}
|
|
67
|
+
}
|
|
68
|
+
}
|
|
69
|
+
}
|
|
70
|
+
```
|
|
71
|
+
|
|
72
|
+
For local development:
|
|
73
|
+
|
|
74
|
+
```json
|
|
75
|
+
{
|
|
76
|
+
"mcp": {
|
|
77
|
+
"servers": {
|
|
78
|
+
"har": {
|
|
79
|
+
"type": "local",
|
|
80
|
+
"command": ["bun", "run", "/path/to/har-mcp/src/index.ts"]
|
|
81
|
+
}
|
|
82
|
+
}
|
|
83
|
+
}
|
|
84
|
+
}
|
|
85
|
+
```
|
|
86
|
+
|
|
87
|
+
### With MCP Inspector
|
|
88
|
+
|
|
89
|
+
```bash
|
|
90
|
+
npx @modelcontextprotocol/inspector bun run src/index.ts
|
|
91
|
+
```
|
|
92
|
+
|
|
93
|
+
## Tools
|
|
94
|
+
|
|
95
|
+
### read_har
|
|
96
|
+
|
|
97
|
+
Reads and analyzes HAR files with multiple output modes.
|
|
98
|
+
|
|
99
|
+
**Parameters:**
|
|
100
|
+
|
|
101
|
+
| Parameter | Type | Description |
|
|
102
|
+
| ---------- | -------- | ---------------------------------------------------------- |
|
|
103
|
+
| `path` | string | **Required.** Path to the HAR file |
|
|
104
|
+
| `mode` | string | Output mode (default: "summary") |
|
|
105
|
+
| `filter` | object | Filter criteria |
|
|
106
|
+
| `sort` | object | Sorting options |
|
|
107
|
+
| `page` | number | Page number for pagination (default: 1) |
|
|
108
|
+
| `pageSize` | number | Entries per page (default: 20, max: 100) |
|
|
109
|
+
| `entries` | number[] | Specific entry indexes (required for content/detail modes) |
|
|
110
|
+
| `format` | string | Output format: "markdown" or "json" |
|
|
111
|
+
|
|
112
|
+
**Output Modes:**
|
|
113
|
+
|
|
114
|
+
| Mode | Description |
|
|
115
|
+
| ---------- | ------------------------------------------------------------------ |
|
|
116
|
+
| `summary` | Overview statistics (total entries, methods, status codes, timing) |
|
|
117
|
+
| `list` | Paginated entry list with basic info |
|
|
118
|
+
| `detail` | Full metadata for specific entries (headers, timing, query params) |
|
|
119
|
+
| `content` | Response/request bodies for specific entries |
|
|
120
|
+
| `stats` | Aggregate statistics grouped by endpoint |
|
|
121
|
+
| `timeline` | Text-based waterfall visualization |
|
|
122
|
+
| `size` | Payload size analysis |
|
|
123
|
+
| `cookies` | Cookie propagation tracking |
|
|
124
|
+
|
|
125
|
+
**Filter Options:**
|
|
126
|
+
|
|
127
|
+
| Filter | Type | Description |
|
|
128
|
+
| -------------- | -------------------------- | --------------------------------------------------------- |
|
|
129
|
+
| `url` | string | URL pattern (substring, glob with `*`, or `~regex`) |
|
|
130
|
+
| `method` | string \| string[] | HTTP method(s) to filter |
|
|
131
|
+
| `status` | number \| string \| object | Status code, pattern (e.g., "4xx"), or range `{min, max}` |
|
|
132
|
+
| `contentType` | string | Response content type filter |
|
|
133
|
+
| `minDuration` | number | Minimum request duration in ms |
|
|
134
|
+
| `hasError` | boolean | Filter to errors only (4xx/5xx) |
|
|
135
|
+
| `bodyContains` | string | Filter by response body content |
|
|
136
|
+
|
|
137
|
+
**Sort Options:**
|
|
138
|
+
|
|
139
|
+
| Field | Description |
|
|
140
|
+
| ---------- | -------------------------- |
|
|
141
|
+
| `index` | Original order in HAR file |
|
|
142
|
+
| `time` | Request start time |
|
|
143
|
+
| `duration` | Request duration |
|
|
144
|
+
| `size` | Response size |
|
|
145
|
+
| `status` | HTTP status code |
|
|
146
|
+
|
|
147
|
+
**Examples:**
|
|
148
|
+
|
|
149
|
+
```javascript
|
|
150
|
+
// Get summary of a HAR file
|
|
151
|
+
read_har(path: "/path/to/file.har")
|
|
152
|
+
|
|
153
|
+
// List all POST requests
|
|
154
|
+
read_har(path: "...", mode: "list", filter: { method: "POST" })
|
|
155
|
+
|
|
156
|
+
// Find slow requests (over 1 second)
|
|
157
|
+
read_har(path: "...", mode: "list", filter: { minDuration: 1000 }, sort: { by: "duration", order: "desc" })
|
|
158
|
+
|
|
159
|
+
// Get response body for first two entries
|
|
160
|
+
read_har(path: "...", mode: "content", entries: [0, 1])
|
|
161
|
+
|
|
162
|
+
// Find requests matching URL pattern
|
|
163
|
+
read_har(path: "...", mode: "list", filter: { url: "*/api/*" })
|
|
164
|
+
|
|
165
|
+
// Filter by regex
|
|
166
|
+
read_har(path: "...", mode: "list", filter: { url: "~\\.json$" })
|
|
167
|
+
|
|
168
|
+
// Find errors
|
|
169
|
+
read_har(path: "...", mode: "list", filter: { hasError: true })
|
|
170
|
+
|
|
171
|
+
// Filter by status range
|
|
172
|
+
read_har(path: "...", mode: "list", filter: { status: { min: 400, max: 499 } })
|
|
173
|
+
|
|
174
|
+
// Analyze request sizes
|
|
175
|
+
read_har(path: "...", mode: "size")
|
|
176
|
+
|
|
177
|
+
// View timeline waterfall
|
|
178
|
+
read_har(path: "...", mode: "timeline")
|
|
179
|
+
|
|
180
|
+
// Track cookies
|
|
181
|
+
read_har(path: "...", mode: "cookies")
|
|
182
|
+
|
|
183
|
+
// Get JSON output
|
|
184
|
+
read_har(path: "...", mode: "summary", format: "json")
|
|
185
|
+
```
|
|
186
|
+
|
|
187
|
+
### export_har_curl
|
|
188
|
+
|
|
189
|
+
Export HAR entries as cURL commands for replay or debugging.
|
|
190
|
+
|
|
191
|
+
**Parameters:**
|
|
192
|
+
|
|
193
|
+
| Parameter | Type | Description |
|
|
194
|
+
| ---------------- | -------- | --------------------------------------- |
|
|
195
|
+
| `path` | string | **Required.** Path to the HAR file |
|
|
196
|
+
| `entries` | number[] | **Required.** Entry indexes to export |
|
|
197
|
+
| `includeHeaders` | boolean | Include request headers (default: true) |
|
|
198
|
+
| `includeCookies` | boolean | Include cookies (default: false) |
|
|
199
|
+
| `compressed` | boolean | Add `--compressed` flag |
|
|
200
|
+
| `verbose` | boolean | Add `-v` flag |
|
|
201
|
+
|
|
202
|
+
**Example:**
|
|
203
|
+
|
|
204
|
+
```javascript
|
|
205
|
+
// Export first request as cURL
|
|
206
|
+
export_har_curl(path: "/path/to/file.har", entries: [0])
|
|
207
|
+
|
|
208
|
+
// Export with cookies and verbose output
|
|
209
|
+
export_har_curl(path: "...", entries: [0, 1], includeCookies: true, verbose: true)
|
|
210
|
+
```
|
|
211
|
+
|
|
212
|
+
## Browser Compatibility
|
|
213
|
+
|
|
214
|
+
This MCP server fully supports HAR files exported from:
|
|
215
|
+
|
|
216
|
+
- **Chrome DevTools** - Including all Chrome-specific extension fields (`_initiator`, `_priority`, `_resourceType`, `_transferSize`, `_workerRespondWithSettled`, etc.)
|
|
217
|
+
- **Firefox DevTools** - Standard HAR 1.2 format
|
|
218
|
+
- **Safari Web Inspector** - Standard HAR format
|
|
219
|
+
- **Other tools** - Any HAR 1.2 compliant export
|
|
220
|
+
|
|
221
|
+
### Truncated HAR Repair
|
|
222
|
+
|
|
223
|
+
Large HAR files exported from browsers can sometimes be truncated (incomplete JSON). This server automatically detects and repairs truncated HAR files by:
|
|
224
|
+
|
|
225
|
+
1. Finding the last complete HTTP entry
|
|
226
|
+
2. Reconstructing valid JSON structure
|
|
227
|
+
3. Displaying a warning with the number of recovered entries
|
|
228
|
+
|
|
229
|
+
When a truncated file is repaired, you'll see a warning like:
|
|
230
|
+
|
|
231
|
+
```
|
|
232
|
+
⚠️ **Warning**: HAR file was truncated and has been repaired.
|
|
233
|
+
Recovered 182 entries using chrome-timings repair method.
|
|
234
|
+
Some entries at the end of the file may be missing.
|
|
235
|
+
```
|
|
236
|
+
|
|
237
|
+
## Development
|
|
238
|
+
|
|
239
|
+
```bash
|
|
240
|
+
bun start # Run the server
|
|
241
|
+
bun test # Run tests
|
|
242
|
+
bun lint # Check code style
|
|
243
|
+
bun format # Format code
|
|
244
|
+
```
|
|
245
|
+
|
|
246
|
+
## Project Structure
|
|
247
|
+
|
|
248
|
+
```
|
|
249
|
+
src/
|
|
250
|
+
├── index.ts # Entry point - stdio transport setup
|
|
251
|
+
├── server.ts # MCP server configuration and tool registration
|
|
252
|
+
└── tools/
|
|
253
|
+
└── har/
|
|
254
|
+
├── index.ts # Tool exports and registration
|
|
255
|
+
├── schema.ts # Zod schemas for input validation
|
|
256
|
+
└── modes/ # Output mode implementations
|
|
257
|
+
```
|
|
258
|
+
|
|
259
|
+
## License
|
|
260
|
+
|
|
261
|
+
MIT
|
package/package.json
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "har-mcp",
|
|
3
|
+
"version": "0.1.0",
|
|
4
|
+
"description": "MCP server for analyzing HAR (HTTP Archive) files with LLM-friendly output",
|
|
5
|
+
"type": "module",
|
|
6
|
+
"main": "src/index.ts",
|
|
7
|
+
"bin": {
|
|
8
|
+
"har-mcp": "./src/index.ts"
|
|
9
|
+
},
|
|
10
|
+
"engines": {
|
|
11
|
+
"bun": ">=1.0.0"
|
|
12
|
+
},
|
|
13
|
+
"files": ["src", "package.json", "README.md", "LICENSE"],
|
|
14
|
+
"scripts": {
|
|
15
|
+
"start": "bun run src/index.ts",
|
|
16
|
+
"dev": "bun --watch src/index.ts",
|
|
17
|
+
"test": "bun test",
|
|
18
|
+
"lint": "bunx @biomejs/biome check .",
|
|
19
|
+
"lint:fix": "bunx @biomejs/biome check --write .",
|
|
20
|
+
"format": "bunx @biomejs/biome format --write .",
|
|
21
|
+
"format:check": "bunx @biomejs/biome format .",
|
|
22
|
+
"check": "bunx @biomejs/biome check --write . && bunx @biomejs/biome format --write .",
|
|
23
|
+
"check:ci": "bunx @biomejs/biome check . && bunx @biomejs/biome format .",
|
|
24
|
+
"typecheck": "bunx tsc --noEmit"
|
|
25
|
+
},
|
|
26
|
+
"dependencies": {
|
|
27
|
+
"@modelcontextprotocol/sdk": "^1.26.0",
|
|
28
|
+
"har-to-curl": "^0.5.0",
|
|
29
|
+
"zod": "^3.25.76"
|
|
30
|
+
},
|
|
31
|
+
"devDependencies": {
|
|
32
|
+
"@biomejs/biome": "^1.9.4",
|
|
33
|
+
"@types/bun": "^1.1.15",
|
|
34
|
+
"typescript": "^5.9.3"
|
|
35
|
+
},
|
|
36
|
+
"keywords": ["mcp", "model-context-protocol", "ai", "llm", "har", "http-archive", "network-analysis"],
|
|
37
|
+
"license": "MIT"
|
|
38
|
+
}
|
package/src/index.ts
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
/**
|
|
3
|
+
* MCP Server Entry Point
|
|
4
|
+
*
|
|
5
|
+
* This is the main entry point that sets up the stdio transport
|
|
6
|
+
* and connects it to the MCP server.
|
|
7
|
+
*/
|
|
8
|
+
|
|
9
|
+
import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js"
|
|
10
|
+
import { createServer } from "./server.ts"
|
|
11
|
+
|
|
12
|
+
async function main(): Promise<void> {
|
|
13
|
+
const server = createServer()
|
|
14
|
+
const transport = new StdioServerTransport()
|
|
15
|
+
|
|
16
|
+
await server.connect(transport)
|
|
17
|
+
|
|
18
|
+
// Handle graceful shutdown
|
|
19
|
+
process.on("SIGINT", async () => {
|
|
20
|
+
await server.close()
|
|
21
|
+
process.exit(0)
|
|
22
|
+
})
|
|
23
|
+
|
|
24
|
+
process.on("SIGTERM", async () => {
|
|
25
|
+
await server.close()
|
|
26
|
+
process.exit(0)
|
|
27
|
+
})
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
main().catch(error => {
|
|
31
|
+
console.error("Fatal error:", error)
|
|
32
|
+
process.exit(1)
|
|
33
|
+
})
|
package/src/server.ts
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* MCP Server Configuration
|
|
3
|
+
*
|
|
4
|
+
* This file contains the server setup and tool registration.
|
|
5
|
+
* Add your tools by importing them and registering with the server.
|
|
6
|
+
*/
|
|
7
|
+
|
|
8
|
+
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js"
|
|
9
|
+
import { registerTools } from "./tools/index.ts"
|
|
10
|
+
|
|
11
|
+
// Server metadata
|
|
12
|
+
export const SERVER_NAME = "har-mcp"
|
|
13
|
+
export const SERVER_VERSION = "0.1.0"
|
|
14
|
+
|
|
15
|
+
/**
|
|
16
|
+
* Creates and configures the MCP server with all tools registered.
|
|
17
|
+
*/
|
|
18
|
+
export function createServer(): McpServer {
|
|
19
|
+
const server = new McpServer({
|
|
20
|
+
name: SERVER_NAME,
|
|
21
|
+
version: SERVER_VERSION,
|
|
22
|
+
})
|
|
23
|
+
|
|
24
|
+
// Register all tools
|
|
25
|
+
registerTools(server)
|
|
26
|
+
|
|
27
|
+
return server
|
|
28
|
+
}
|
|
@@ -0,0 +1,134 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Export HAR entries as cURL commands
|
|
3
|
+
*/
|
|
4
|
+
|
|
5
|
+
import * as fs from "node:fs/promises"
|
|
6
|
+
import type { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js"
|
|
7
|
+
import harToCurl from "har-to-curl"
|
|
8
|
+
import { z } from "zod"
|
|
9
|
+
import { type HarEntry, HarFileSchema } from "../types/har.ts"
|
|
10
|
+
|
|
11
|
+
export const exportCurlSchema = z.object({
|
|
12
|
+
path: z.string().describe("Path to the HAR file"),
|
|
13
|
+
entries: z.array(z.number().int().min(0)).describe("Entry indexes to export"),
|
|
14
|
+
includeHeaders: z.boolean().default(true).describe("Include request headers"),
|
|
15
|
+
includeCookies: z.boolean().default(false).describe("Include cookies"),
|
|
16
|
+
compressed: z.boolean().default(false).describe("Add --compressed flag"),
|
|
17
|
+
verbose: z.boolean().default(false).describe("Add -v flag for verbose output"),
|
|
18
|
+
})
|
|
19
|
+
|
|
20
|
+
export type ExportCurlInput = z.infer<typeof exportCurlSchema>
|
|
21
|
+
|
|
22
|
+
// Headers that contain sensitive information and should be redacted
|
|
23
|
+
const SENSITIVE_HEADERS = /^(authorization|x-api-key|x-auth-token|api-key|bearer)/i
|
|
24
|
+
|
|
25
|
+
/**
|
|
26
|
+
* Prepares a HAR entry for conversion by applying options like header filtering,
|
|
27
|
+
* cookie removal, and sensitive header redaction
|
|
28
|
+
*/
|
|
29
|
+
function prepareEntryForConversion(entry: HarEntry, options: ExportCurlInput): HarEntry {
|
|
30
|
+
// Deep clone the entry to avoid mutating the original
|
|
31
|
+
const preparedEntry = JSON.parse(JSON.stringify(entry)) as HarEntry
|
|
32
|
+
|
|
33
|
+
// Filter headers if not including all headers
|
|
34
|
+
if (!options.includeHeaders) {
|
|
35
|
+
preparedEntry.request.headers = []
|
|
36
|
+
} else {
|
|
37
|
+
// Redact sensitive headers
|
|
38
|
+
preparedEntry.request.headers = preparedEntry.request.headers.map(header => {
|
|
39
|
+
if (SENSITIVE_HEADERS.test(header.name)) {
|
|
40
|
+
return { ...header, value: "[REDACTED]" }
|
|
41
|
+
}
|
|
42
|
+
return header
|
|
43
|
+
})
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
// Remove cookies if not included
|
|
47
|
+
if (!options.includeCookies) {
|
|
48
|
+
preparedEntry.request.cookies = []
|
|
49
|
+
// Also remove Cookie header if present
|
|
50
|
+
preparedEntry.request.headers = preparedEntry.request.headers.filter(h => h.name.toLowerCase() !== "cookie")
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
return preparedEntry
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
/**
|
|
57
|
+
* Appends additional curl flags based on options
|
|
58
|
+
*/
|
|
59
|
+
function appendFlags(curlCommand: string, options: ExportCurlInput): string {
|
|
60
|
+
const flags: string[] = []
|
|
61
|
+
|
|
62
|
+
if (options.compressed) {
|
|
63
|
+
flags.push("--compressed")
|
|
64
|
+
}
|
|
65
|
+
if (options.verbose) {
|
|
66
|
+
flags.push("-v")
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
if (flags.length > 0) {
|
|
70
|
+
return `${curlCommand} ${flags.join(" ")}`
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
return curlCommand
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
export async function exportCurlHandler(input: ExportCurlInput): Promise<string> {
|
|
77
|
+
const fileContent = await fs.readFile(input.path, "utf-8")
|
|
78
|
+
const harData = JSON.parse(fileContent)
|
|
79
|
+
const parseResult = HarFileSchema.safeParse(harData)
|
|
80
|
+
|
|
81
|
+
if (!parseResult.success) {
|
|
82
|
+
throw new Error("Invalid HAR file format")
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
const harFile = parseResult.data
|
|
86
|
+
const results: string[] = []
|
|
87
|
+
|
|
88
|
+
for (const idx of input.entries) {
|
|
89
|
+
const entry = harFile.log.entries[idx]
|
|
90
|
+
if (!entry) {
|
|
91
|
+
results.push(`# Entry ${idx}: Not found`)
|
|
92
|
+
continue
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
results.push(`# Entry ${idx}: ${entry.request.method} ${entry.request.url}`)
|
|
96
|
+
|
|
97
|
+
// Prepare entry with options applied
|
|
98
|
+
const preparedEntry = prepareEntryForConversion(entry, input)
|
|
99
|
+
|
|
100
|
+
// Use har-to-curl library for conversion
|
|
101
|
+
const curlCommand = harToCurl(preparedEntry)
|
|
102
|
+
|
|
103
|
+
// Append additional flags
|
|
104
|
+
const finalCommand = appendFlags(curlCommand, input)
|
|
105
|
+
|
|
106
|
+
results.push(finalCommand)
|
|
107
|
+
results.push("")
|
|
108
|
+
}
|
|
109
|
+
|
|
110
|
+
return results.join("\n")
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
export function registerExportCurlTool(server: McpServer): void {
|
|
114
|
+
server.tool(
|
|
115
|
+
"export_har_curl",
|
|
116
|
+
"Export HAR entries as cURL commands for replay or debugging",
|
|
117
|
+
{
|
|
118
|
+
path: z.string().describe("Path to the HAR file"),
|
|
119
|
+
entries: z.array(z.number().int().min(0)).describe("Entry indexes to export"),
|
|
120
|
+
includeHeaders: z.boolean().default(true).describe("Include request headers"),
|
|
121
|
+
includeCookies: z.boolean().default(false).describe("Include cookies"),
|
|
122
|
+
compressed: z.boolean().default(false).describe("Add --compressed flag"),
|
|
123
|
+
verbose: z.boolean().default(false).describe("Add -v flag"),
|
|
124
|
+
},
|
|
125
|
+
async args => {
|
|
126
|
+
try {
|
|
127
|
+
const result = await exportCurlHandler(args as ExportCurlInput)
|
|
128
|
+
return { content: [{ type: "text" as const, text: result }] }
|
|
129
|
+
} catch (err) {
|
|
130
|
+
return { content: [{ type: "text" as const, text: `Error: ${(err as Error).message}` }], isError: true }
|
|
131
|
+
}
|
|
132
|
+
},
|
|
133
|
+
)
|
|
134
|
+
}
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Tool Registration
|
|
3
|
+
*
|
|
4
|
+
* This file exports a function to register all tools with the MCP server.
|
|
5
|
+
*/
|
|
6
|
+
|
|
7
|
+
import type { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js"
|
|
8
|
+
import { registerExportCurlTool } from "./export-curl.ts"
|
|
9
|
+
import { registerReadHarTool } from "./read-har.ts"
|
|
10
|
+
|
|
11
|
+
/**
|
|
12
|
+
* Registers all tools with the MCP server.
|
|
13
|
+
*/
|
|
14
|
+
export function registerTools(server: McpServer): void {
|
|
15
|
+
registerReadHarTool(server)
|
|
16
|
+
registerExportCurlTool(server)
|
|
17
|
+
}
|
|
@@ -0,0 +1,109 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Filter and Sort Functions for Read HAR Tool
|
|
3
|
+
*/
|
|
4
|
+
|
|
5
|
+
import type { HarEntry } from "../../types/har.ts"
|
|
6
|
+
import { getResponseSize, isError, matchStatus, matchUrl } from "./helpers.ts"
|
|
7
|
+
import type { ReadHarInput } from "./schema.ts"
|
|
8
|
+
|
|
9
|
+
/**
|
|
10
|
+
* Entry with its original index
|
|
11
|
+
*/
|
|
12
|
+
export interface IndexedEntry {
|
|
13
|
+
index: number
|
|
14
|
+
entry: HarEntry
|
|
15
|
+
}
|
|
16
|
+
|
|
17
|
+
/**
|
|
18
|
+
* Filters entries based on filter criteria
|
|
19
|
+
*/
|
|
20
|
+
export function filterEntries(entries: IndexedEntry[], filter: ReadHarInput["filter"]): IndexedEntry[] {
|
|
21
|
+
if (!filter) return entries
|
|
22
|
+
|
|
23
|
+
return entries.filter(({ entry }) => {
|
|
24
|
+
// URL filter
|
|
25
|
+
if (filter.url && !matchUrl(entry.request.url, filter.url)) {
|
|
26
|
+
return false
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
// Method filter
|
|
30
|
+
if (filter.method) {
|
|
31
|
+
const methods = Array.isArray(filter.method) ? filter.method : [filter.method]
|
|
32
|
+
if (!methods.some(m => m.toUpperCase() === entry.request.method.toUpperCase())) {
|
|
33
|
+
return false
|
|
34
|
+
}
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
// Status filter
|
|
38
|
+
if (filter.status !== undefined && !matchStatus(entry.response.status, filter.status)) {
|
|
39
|
+
return false
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
// Content type filter
|
|
43
|
+
if (filter.contentType) {
|
|
44
|
+
const responseType = entry.response.content.mimeType.toLowerCase()
|
|
45
|
+
if (!responseType.includes(filter.contentType.toLowerCase())) {
|
|
46
|
+
return false
|
|
47
|
+
}
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
// Min duration filter
|
|
51
|
+
if (filter.minDuration !== undefined && entry.time < filter.minDuration) {
|
|
52
|
+
return false
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
// Has error filter
|
|
56
|
+
if (filter.hasError !== undefined) {
|
|
57
|
+
const entryHasError = isError(entry)
|
|
58
|
+
if (filter.hasError !== entryHasError) {
|
|
59
|
+
return false
|
|
60
|
+
}
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
// Body contains filter
|
|
64
|
+
if (filter.bodyContains) {
|
|
65
|
+
const responseText = entry.response.content.text || ""
|
|
66
|
+
if (!responseText.toLowerCase().includes(filter.bodyContains.toLowerCase())) {
|
|
67
|
+
return false
|
|
68
|
+
}
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
return true
|
|
72
|
+
})
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
/**
|
|
76
|
+
* Sorts entries based on sort criteria
|
|
77
|
+
*/
|
|
78
|
+
export function sortEntries(entries: IndexedEntry[], sort: ReadHarInput["sort"]): IndexedEntry[] {
|
|
79
|
+
if (!sort) return entries
|
|
80
|
+
|
|
81
|
+
const sorted = [...entries]
|
|
82
|
+
const multiplier = sort.order === "desc" ? -1 : 1
|
|
83
|
+
|
|
84
|
+
sorted.sort((a, b) => {
|
|
85
|
+
let comparison = 0
|
|
86
|
+
|
|
87
|
+
switch (sort.by) {
|
|
88
|
+
case "index":
|
|
89
|
+
comparison = a.index - b.index
|
|
90
|
+
break
|
|
91
|
+
case "time":
|
|
92
|
+
comparison = new Date(a.entry.startedDateTime).getTime() - new Date(b.entry.startedDateTime).getTime()
|
|
93
|
+
break
|
|
94
|
+
case "duration":
|
|
95
|
+
comparison = a.entry.time - b.entry.time
|
|
96
|
+
break
|
|
97
|
+
case "size":
|
|
98
|
+
comparison = getResponseSize(a.entry) - getResponseSize(b.entry)
|
|
99
|
+
break
|
|
100
|
+
case "status":
|
|
101
|
+
comparison = a.entry.response.status - b.entry.response.status
|
|
102
|
+
break
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
return comparison * multiplier
|
|
106
|
+
})
|
|
107
|
+
|
|
108
|
+
return sorted
|
|
109
|
+
}
|