@testledger/mcp 0.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +126 -0
- package/dist/index.d.ts +2 -0
- package/dist/index.js +348 -0
- package/package.json +45 -0
package/README.md
ADDED
|
@@ -0,0 +1,126 @@
|
|
|
1
|
+
# Test Reporter MCP Server
|
|
2
|
+
|
|
3
|
+
MCP (Model Context Protocol) server for [Test Ledger](https://testledger.dev) that enables Claude Code to analyze flaky tests, find failure patterns, and suggest fixes.
|
|
4
|
+
|
|
5
|
+
## Installation
|
|
6
|
+
|
|
7
|
+
No installation required! Just add the configuration to Claude Code.
|
|
8
|
+
|
|
9
|
+
## Quick Start
|
|
10
|
+
|
|
11
|
+
### 1. Get your API key
|
|
12
|
+
|
|
13
|
+
Log into [testledger.dev](https://testledger.dev) and go to Settings → API Keys to generate a key.
|
|
14
|
+
|
|
15
|
+
### 2. Configure Claude Code
|
|
16
|
+
|
|
17
|
+
Add this to your Claude Code MCP config:
|
|
18
|
+
|
|
19
|
+
**Location:** `~/.claude.json` (global) or `.mcp.json` (project)
|
|
20
|
+
|
|
21
|
+
```json
|
|
22
|
+
{
|
|
23
|
+
"mcpServers": {
|
|
24
|
+
"test-reporter": {
|
|
25
|
+
"command": "npx",
|
|
26
|
+
"args": ["-y", "@testledger/mcp"],
|
|
27
|
+
"env": {
|
|
28
|
+
"TEST_REPORTER_API_KEY": "your-api-key-here"
|
|
29
|
+
}
|
|
30
|
+
}
|
|
31
|
+
}
|
|
32
|
+
}
|
|
33
|
+
```
|
|
34
|
+
|
|
35
|
+
### 3. Restart Claude Code
|
|
36
|
+
|
|
37
|
+
That's it! Claude Code now has access to your test results.
|
|
38
|
+
|
|
39
|
+
## Usage
|
|
40
|
+
|
|
41
|
+
Once configured, you can ask Claude Code things like:
|
|
42
|
+
|
|
43
|
+
- "Why is `checkout.spec.js` flaky?"
|
|
44
|
+
- "What tests have been failing the most this week?"
|
|
45
|
+
- "Find all tests with timeout errors"
|
|
46
|
+
- "Are there any tests that always fail together?"
|
|
47
|
+
|
|
48
|
+
### With the /fix-flaky-test command
|
|
49
|
+
|
|
50
|
+
For the best experience, add the [fix-flaky-test slash command](https://github.com/your-company/test-reporter-mcp/blob/main/commands/fix-flaky-test.md) to your project:
|
|
51
|
+
|
|
52
|
+
```bash
|
|
53
|
+
mkdir -p .claude/commands
|
|
54
|
+
curl -o .claude/commands/fix-flaky-test.md https://raw.githubusercontent.com/your-company/test-reporter-mcp/main/commands/fix-flaky-test.md
|
|
55
|
+
```
|
|
56
|
+
|
|
57
|
+
Then use it:
|
|
58
|
+
|
|
59
|
+
```
|
|
60
|
+
/fix-flaky-test
|
|
61
|
+
|
|
62
|
+
Test: LoginPage.should allow user to login with valid credentials
|
|
63
|
+
Error: element ("#submit-btn") still not clickable after 3000ms
|
|
64
|
+
at login.spec.js:42:24
|
|
65
|
+
```
|
|
66
|
+
|
|
67
|
+
## Available Tools
|
|
68
|
+
|
|
69
|
+
The MCP server provides these tools to Claude:
|
|
70
|
+
|
|
71
|
+
| Tool | Description |
|
|
72
|
+
|------|-------------|
|
|
73
|
+
| `get_test_history` | Pass/fail/flaky statistics for a test |
|
|
74
|
+
| `get_test_errors` | Error messages and stacktraces, grouped by frequency |
|
|
75
|
+
| `get_failure_patterns` | Time-of-day, browser, and version patterns |
|
|
76
|
+
| `get_correlated_failures` | Tests that fail together (shared setup issues) |
|
|
77
|
+
| `get_flaky_tests` | Project-wide flaky test leaderboard |
|
|
78
|
+
| `get_recent_failures` | Recent failures for quick triage |
|
|
79
|
+
| `get_test_trend` | Failure rate over time |
|
|
80
|
+
| `search_errors` | Full-text search across all errors |
|
|
81
|
+
|
|
82
|
+
## Configuration Options
|
|
83
|
+
|
|
84
|
+
| Environment Variable | Required | Description |
|
|
85
|
+
|---------------------|----------|-------------|
|
|
86
|
+
| `TEST_REPORTER_API_KEY` | Yes | Your API key from the dashboard |
|
|
87
|
+
| `TEST_REPORTER_API_URL` | No | Custom API URL (default: `https://app-api.testledger.dev`) |
|
|
88
|
+
| `TEST_REPORTER_PROJECT_ID` | No | Default project ID to use for queries |
|
|
89
|
+
|
|
90
|
+
### Example with all options
|
|
91
|
+
|
|
92
|
+
```json
|
|
93
|
+
{
|
|
94
|
+
"mcpServers": {
|
|
95
|
+
"test-reporter": {
|
|
96
|
+
"command": "npx",
|
|
97
|
+
"args": ["-y", "@testledger/mcp"],
|
|
98
|
+
"env": {
|
|
99
|
+
"TEST_REPORTER_API_KEY": "tr_live_abc123",
|
|
100
|
+
"TEST_REPORTER_PROJECT_ID": "42"
|
|
101
|
+
}
|
|
102
|
+
}
|
|
103
|
+
}
|
|
104
|
+
}
|
|
105
|
+
```
|
|
106
|
+
|
|
107
|
+
## Troubleshooting
|
|
108
|
+
|
|
109
|
+
### "Tool not found" errors
|
|
110
|
+
|
|
111
|
+
1. Restart Claude Code after updating config
|
|
112
|
+
2. Check for JSON syntax errors in your config file
|
|
113
|
+
3. Verify your API key is valid
|
|
114
|
+
|
|
115
|
+
### "API error 401"
|
|
116
|
+
|
|
117
|
+
Your API key is invalid or expired. Generate a new one from the dashboard.
|
|
118
|
+
|
|
119
|
+
### "API error 403"
|
|
120
|
+
|
|
121
|
+
Your API key doesn't have access to the requested project. Check project permissions.
|
|
122
|
+
|
|
123
|
+
## Support
|
|
124
|
+
|
|
125
|
+
- Documentation: [testledger.dev](https://testledger.dev)
|
|
126
|
+
- Issues: [GitHub Issues](https://github.com/your-company/test-reporter-mcp/issues)
|
package/dist/index.d.ts
ADDED
package/dist/index.js
ADDED
|
@@ -0,0 +1,348 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
import { Server } from "@modelcontextprotocol/sdk/server/index.js";
|
|
3
|
+
import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
|
|
4
|
+
import { CallToolRequestSchema, ListToolsRequestSchema, } from "@modelcontextprotocol/sdk/types.js";
|
|
5
|
+
// Configuration from environment
|
|
6
|
+
const API_BASE_URL = process.env.TEST_REPORTER_API_URL;
|
|
7
|
+
const API_KEY = process.env.TEST_REPORTER_API_KEY || "";
|
|
8
|
+
const DEFAULT_PROJECT_ID = process.env.TEST_REPORTER_PROJECT_ID;
|
|
9
|
+
// Helper to make API calls
|
|
10
|
+
async function apiCall(endpoint, params = {}) {
|
|
11
|
+
const url = new URL(endpoint, API_BASE_URL);
|
|
12
|
+
// Inject default project ID if not provided
|
|
13
|
+
if (DEFAULT_PROJECT_ID && !params.project_id) {
|
|
14
|
+
params.project_id = DEFAULT_PROJECT_ID;
|
|
15
|
+
}
|
|
16
|
+
// Add query params
|
|
17
|
+
Object.entries(params).forEach(([key, value]) => {
|
|
18
|
+
if (value !== undefined && value !== null) {
|
|
19
|
+
url.searchParams.append(key, String(value));
|
|
20
|
+
}
|
|
21
|
+
});
|
|
22
|
+
const response = await fetch(url.toString(), {
|
|
23
|
+
headers: {
|
|
24
|
+
"Authorization": `Bearer ${API_KEY}`,
|
|
25
|
+
"Content-Type": "application/json",
|
|
26
|
+
},
|
|
27
|
+
});
|
|
28
|
+
if (!response.ok) {
|
|
29
|
+
const error = await response.text();
|
|
30
|
+
throw new Error(`API error ${response.status}: ${error}`);
|
|
31
|
+
}
|
|
32
|
+
return response.json();
|
|
33
|
+
}
|
|
34
|
+
// Define the tools
|
|
35
|
+
const tools = [
|
|
36
|
+
{
|
|
37
|
+
name: "get_test_history",
|
|
38
|
+
description: "Get historical pass/fail/flaky statistics for a specific test. Use this to understand how often a test fails and its overall reliability.",
|
|
39
|
+
inputSchema: {
|
|
40
|
+
type: "object",
|
|
41
|
+
properties: {
|
|
42
|
+
spec_file: {
|
|
43
|
+
type: "string",
|
|
44
|
+
description: "The spec file path (e.g., 'login.spec.js' or 'tests/checkout.spec.ts')",
|
|
45
|
+
},
|
|
46
|
+
test_title: {
|
|
47
|
+
type: "string",
|
|
48
|
+
description: "Specific test title to filter by (optional - omit to get all tests in the spec)",
|
|
49
|
+
},
|
|
50
|
+
project_id: {
|
|
51
|
+
type: "number",
|
|
52
|
+
description: "Project ID to filter by (optional)",
|
|
53
|
+
},
|
|
54
|
+
days: {
|
|
55
|
+
type: "number",
|
|
56
|
+
description: "Number of days to look back (default: 30)",
|
|
57
|
+
default: 30,
|
|
58
|
+
},
|
|
59
|
+
},
|
|
60
|
+
required: ["spec_file"],
|
|
61
|
+
},
|
|
62
|
+
},
|
|
63
|
+
{
|
|
64
|
+
name: "get_test_errors",
|
|
65
|
+
description: "Get error messages and stacktraces for a test's failures, grouped by unique error. Use this to see what errors are occurring and how often.",
|
|
66
|
+
inputSchema: {
|
|
67
|
+
type: "object",
|
|
68
|
+
properties: {
|
|
69
|
+
spec_file: {
|
|
70
|
+
type: "string",
|
|
71
|
+
description: "The spec file path",
|
|
72
|
+
},
|
|
73
|
+
test_title: {
|
|
74
|
+
type: "string",
|
|
75
|
+
description: "Specific test title (optional)",
|
|
76
|
+
},
|
|
77
|
+
project_id: {
|
|
78
|
+
type: "number",
|
|
79
|
+
description: "Project ID to filter by (optional)",
|
|
80
|
+
},
|
|
81
|
+
days: {
|
|
82
|
+
type: "number",
|
|
83
|
+
description: "Days to look back (default: 30)",
|
|
84
|
+
default: 30,
|
|
85
|
+
},
|
|
86
|
+
limit: {
|
|
87
|
+
type: "number",
|
|
88
|
+
description: "Maximum number of unique errors to return (default: 20)",
|
|
89
|
+
default: 20,
|
|
90
|
+
},
|
|
91
|
+
},
|
|
92
|
+
required: ["spec_file"],
|
|
93
|
+
},
|
|
94
|
+
},
|
|
95
|
+
{
|
|
96
|
+
name: "get_failure_patterns",
|
|
97
|
+
description: "Analyze when and how tests fail to identify patterns. Returns failure rates by hour, day of week, version, browser/site, and duration analysis.",
|
|
98
|
+
inputSchema: {
|
|
99
|
+
type: "object",
|
|
100
|
+
properties: {
|
|
101
|
+
spec_file: {
|
|
102
|
+
type: "string",
|
|
103
|
+
description: "The spec file path",
|
|
104
|
+
},
|
|
105
|
+
test_title: {
|
|
106
|
+
type: "string",
|
|
107
|
+
description: "Specific test title (optional)",
|
|
108
|
+
},
|
|
109
|
+
project_id: {
|
|
110
|
+
type: "number",
|
|
111
|
+
description: "Project ID to filter by (optional)",
|
|
112
|
+
},
|
|
113
|
+
days: {
|
|
114
|
+
type: "number",
|
|
115
|
+
description: "Days to look back (default: 30)",
|
|
116
|
+
default: 30,
|
|
117
|
+
},
|
|
118
|
+
},
|
|
119
|
+
required: ["spec_file"],
|
|
120
|
+
},
|
|
121
|
+
},
|
|
122
|
+
{
|
|
123
|
+
name: "get_correlated_failures",
|
|
124
|
+
description: "Find tests that tend to fail together with a given test. High correlation suggests shared setup issues, test pollution, or dependencies.",
|
|
125
|
+
inputSchema: {
|
|
126
|
+
type: "object",
|
|
127
|
+
properties: {
|
|
128
|
+
spec_file: {
|
|
129
|
+
type: "string",
|
|
130
|
+
description: "The spec file to find correlations for",
|
|
131
|
+
},
|
|
132
|
+
test_title: {
|
|
133
|
+
type: "string",
|
|
134
|
+
description: "Specific test title (optional)",
|
|
135
|
+
},
|
|
136
|
+
project_id: {
|
|
137
|
+
type: "number",
|
|
138
|
+
description: "Project ID to filter by (optional)",
|
|
139
|
+
},
|
|
140
|
+
days: {
|
|
141
|
+
type: "number",
|
|
142
|
+
description: "Days to look back (default: 30)",
|
|
143
|
+
default: 30,
|
|
144
|
+
},
|
|
145
|
+
min_correlation: {
|
|
146
|
+
type: "number",
|
|
147
|
+
description: "Minimum correlation threshold 0-1 (default: 0.5)",
|
|
148
|
+
default: 0.5,
|
|
149
|
+
},
|
|
150
|
+
},
|
|
151
|
+
required: ["spec_file"],
|
|
152
|
+
},
|
|
153
|
+
},
|
|
154
|
+
{
|
|
155
|
+
name: "get_flaky_tests",
|
|
156
|
+
description: "Get a list of flaky tests (tests that fail then pass on retry) across the project, sorted by flakiness rate.",
|
|
157
|
+
inputSchema: {
|
|
158
|
+
type: "object",
|
|
159
|
+
properties: {
|
|
160
|
+
project_id: {
|
|
161
|
+
type: "number",
|
|
162
|
+
description: "Project ID to filter by (optional)",
|
|
163
|
+
},
|
|
164
|
+
days: {
|
|
165
|
+
type: "number",
|
|
166
|
+
description: "Days to look back (default: 30)",
|
|
167
|
+
default: 30,
|
|
168
|
+
},
|
|
169
|
+
min_flaky_rate: {
|
|
170
|
+
type: "number",
|
|
171
|
+
description: "Minimum flaky rate percentage to include (default: 5)",
|
|
172
|
+
default: 5,
|
|
173
|
+
},
|
|
174
|
+
limit: {
|
|
175
|
+
type: "number",
|
|
176
|
+
description: "Maximum results to return (default: 50)",
|
|
177
|
+
default: 50,
|
|
178
|
+
},
|
|
179
|
+
},
|
|
180
|
+
},
|
|
181
|
+
},
|
|
182
|
+
{
|
|
183
|
+
name: "get_recent_failures",
|
|
184
|
+
description: "Get the most recent test failures for quick triage. Useful for seeing what's currently broken.",
|
|
185
|
+
inputSchema: {
|
|
186
|
+
type: "object",
|
|
187
|
+
properties: {
|
|
188
|
+
project_id: {
|
|
189
|
+
type: "number",
|
|
190
|
+
description: "Project ID to filter by (optional)",
|
|
191
|
+
},
|
|
192
|
+
spec_file: {
|
|
193
|
+
type: "string",
|
|
194
|
+
description: "Filter by spec file (optional)",
|
|
195
|
+
},
|
|
196
|
+
hours: {
|
|
197
|
+
type: "number",
|
|
198
|
+
description: "Hours to look back (default: 24)",
|
|
199
|
+
default: 24,
|
|
200
|
+
},
|
|
201
|
+
limit: {
|
|
202
|
+
type: "number",
|
|
203
|
+
description: "Maximum results (default: 50)",
|
|
204
|
+
default: 50,
|
|
205
|
+
},
|
|
206
|
+
},
|
|
207
|
+
},
|
|
208
|
+
},
|
|
209
|
+
{
|
|
210
|
+
name: "get_test_trend",
|
|
211
|
+
description: "Get trend data for a test over time, useful for seeing if a test is getting more or less reliable.",
|
|
212
|
+
inputSchema: {
|
|
213
|
+
type: "object",
|
|
214
|
+
properties: {
|
|
215
|
+
spec_file: {
|
|
216
|
+
type: "string",
|
|
217
|
+
description: "The spec file path",
|
|
218
|
+
},
|
|
219
|
+
test_title: {
|
|
220
|
+
type: "string",
|
|
221
|
+
description: "Specific test title (optional)",
|
|
222
|
+
},
|
|
223
|
+
project_id: {
|
|
224
|
+
type: "number",
|
|
225
|
+
description: "Project ID to filter by (optional)",
|
|
226
|
+
},
|
|
227
|
+
days: {
|
|
228
|
+
type: "number",
|
|
229
|
+
description: "Days to look back (default: 30)",
|
|
230
|
+
default: 30,
|
|
231
|
+
},
|
|
232
|
+
granularity: {
|
|
233
|
+
type: "string",
|
|
234
|
+
enum: ["day", "week"],
|
|
235
|
+
description: "Time granularity for trend data (default: 'day')",
|
|
236
|
+
default: "day",
|
|
237
|
+
},
|
|
238
|
+
},
|
|
239
|
+
required: ["spec_file"],
|
|
240
|
+
},
|
|
241
|
+
},
|
|
242
|
+
{
|
|
243
|
+
name: "search_errors",
|
|
244
|
+
description: "Full-text search across error messages and stacktraces. Use this to find all tests affected by a specific type of error.",
|
|
245
|
+
inputSchema: {
|
|
246
|
+
type: "object",
|
|
247
|
+
properties: {
|
|
248
|
+
query: {
|
|
249
|
+
type: "string",
|
|
250
|
+
description: "Search term (e.g., 'timeout', 'element not found', 'ECONNREFUSED')",
|
|
251
|
+
},
|
|
252
|
+
project_id: {
|
|
253
|
+
type: "number",
|
|
254
|
+
description: "Project ID to filter by (optional)",
|
|
255
|
+
},
|
|
256
|
+
days: {
|
|
257
|
+
type: "number",
|
|
258
|
+
description: "Days to look back (default: 30)",
|
|
259
|
+
default: 30,
|
|
260
|
+
},
|
|
261
|
+
limit: {
|
|
262
|
+
type: "number",
|
|
263
|
+
description: "Maximum results (default: 50)",
|
|
264
|
+
default: 50,
|
|
265
|
+
},
|
|
266
|
+
},
|
|
267
|
+
required: ["query"],
|
|
268
|
+
},
|
|
269
|
+
},
|
|
270
|
+
];
|
|
271
|
+
// Create the server
|
|
272
|
+
const server = new Server({
|
|
273
|
+
name: "test-results-mcp",
|
|
274
|
+
version: "1.0.0",
|
|
275
|
+
}, {
|
|
276
|
+
capabilities: {
|
|
277
|
+
tools: {},
|
|
278
|
+
},
|
|
279
|
+
});
|
|
280
|
+
// Handle tool listing
|
|
281
|
+
server.setRequestHandler(ListToolsRequestSchema, async () => {
|
|
282
|
+
return { tools };
|
|
283
|
+
});
|
|
284
|
+
// Handle tool calls
|
|
285
|
+
server.setRequestHandler(CallToolRequestSchema, async (request) => {
|
|
286
|
+
const { name, arguments: args } = request.params;
|
|
287
|
+
try {
|
|
288
|
+
let result;
|
|
289
|
+
switch (name) {
|
|
290
|
+
case "get_test_history":
|
|
291
|
+
result = await apiCall("/api/tests/history", args);
|
|
292
|
+
break;
|
|
293
|
+
case "get_test_errors":
|
|
294
|
+
result = await apiCall("/api/tests/errors", args);
|
|
295
|
+
break;
|
|
296
|
+
case "get_failure_patterns":
|
|
297
|
+
result = await apiCall("/api/tests/patterns", args);
|
|
298
|
+
break;
|
|
299
|
+
case "get_correlated_failures":
|
|
300
|
+
result = await apiCall("/api/tests/correlations", args);
|
|
301
|
+
break;
|
|
302
|
+
case "get_flaky_tests":
|
|
303
|
+
result = await apiCall("/api/tests/flaky", args);
|
|
304
|
+
break;
|
|
305
|
+
case "get_recent_failures":
|
|
306
|
+
result = await apiCall("/api/tests/recent-failures", args);
|
|
307
|
+
break;
|
|
308
|
+
case "get_test_trend":
|
|
309
|
+
result = await apiCall("/api/tests/trend", args);
|
|
310
|
+
break;
|
|
311
|
+
case "search_errors":
|
|
312
|
+
result = await apiCall("/api/tests/search-errors", args);
|
|
313
|
+
break;
|
|
314
|
+
default:
|
|
315
|
+
throw new Error(`Unknown tool: ${name}`);
|
|
316
|
+
}
|
|
317
|
+
return {
|
|
318
|
+
content: [
|
|
319
|
+
{
|
|
320
|
+
type: "text",
|
|
321
|
+
text: JSON.stringify(result, null, 2),
|
|
322
|
+
},
|
|
323
|
+
],
|
|
324
|
+
};
|
|
325
|
+
}
|
|
326
|
+
catch (error) {
|
|
327
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
328
|
+
return {
|
|
329
|
+
content: [
|
|
330
|
+
{
|
|
331
|
+
type: "text",
|
|
332
|
+
text: `Error: ${message}`,
|
|
333
|
+
},
|
|
334
|
+
],
|
|
335
|
+
isError: true,
|
|
336
|
+
};
|
|
337
|
+
}
|
|
338
|
+
});
|
|
339
|
+
// Start the server
|
|
340
|
+
async function main() {
|
|
341
|
+
const transport = new StdioServerTransport();
|
|
342
|
+
await server.connect(transport);
|
|
343
|
+
console.error("Test Results MCP Server running on stdio");
|
|
344
|
+
}
|
|
345
|
+
main().catch((error) => {
|
|
346
|
+
console.error("Fatal error:", error);
|
|
347
|
+
process.exit(1);
|
|
348
|
+
});
|
package/package.json
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@testledger/mcp",
|
|
3
|
+
"version": "0.0.1",
|
|
4
|
+
"description": "MCP server for Test Ledger - analyze flaky tests with Claude Code",
|
|
5
|
+
"type": "module",
|
|
6
|
+
"main": "dist/index.js",
|
|
7
|
+
"bin": {
|
|
8
|
+
"test-reporter-mcp": "./dist/index.js"
|
|
9
|
+
},
|
|
10
|
+
"files": [
|
|
11
|
+
"dist"
|
|
12
|
+
],
|
|
13
|
+
"scripts": {
|
|
14
|
+
"build": "tsc",
|
|
15
|
+
"dev": "tsc --watch",
|
|
16
|
+
"start": "node dist/index.js",
|
|
17
|
+
"prepublishOnly": "npm run build"
|
|
18
|
+
},
|
|
19
|
+
"dependencies": {
|
|
20
|
+
"@modelcontextprotocol/sdk": "^1.0.0"
|
|
21
|
+
},
|
|
22
|
+
"devDependencies": {
|
|
23
|
+
"@types/node": "^20.10.0",
|
|
24
|
+
"typescript": "^5.3.0"
|
|
25
|
+
},
|
|
26
|
+
"engines": {
|
|
27
|
+
"node": ">=18"
|
|
28
|
+
},
|
|
29
|
+
"repository": {
|
|
30
|
+
"type": "git",
|
|
31
|
+
"url": "https://github.com/willbrock/test-ledger-mcp"
|
|
32
|
+
},
|
|
33
|
+
"keywords": [
|
|
34
|
+
"mcp",
|
|
35
|
+
"model-context-protocol",
|
|
36
|
+
"claude",
|
|
37
|
+
"testing",
|
|
38
|
+
"flaky-tests",
|
|
39
|
+
"webdriverio",
|
|
40
|
+
"e2e-testing"
|
|
41
|
+
],
|
|
42
|
+
"author": "Will Brock",
|
|
43
|
+
"license": "MIT",
|
|
44
|
+
"homepage": "https://testledger.dev"
|
|
45
|
+
}
|