testdriverai 7.2.76 → 7.2.78
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/agent/lib/redraw.js +15 -4
- package/ai/agents/testdriver.md +58 -14
- package/ai/skills/testdriver:client/SKILL.md +16 -5
- package/ai/skills/testdriver:generating-tests/SKILL.md +3 -3
- package/ai/skills/testdriver:screenshot/SKILL.md +28 -0
- package/ai/skills/testdriver:testdriver/SKILL.md +513 -21
- package/package.json +1 -1
package/agent/lib/redraw.js
CHANGED
|
@@ -8,6 +8,7 @@ const DEFAULT_REDRAW_OPTIONS = {
|
|
|
8
8
|
enabled: true, // Master switch to enable/disable redraw detection
|
|
9
9
|
screenRedraw: true, // Enable screen redraw detection
|
|
10
10
|
networkMonitor: true, // Enable network activity monitoring
|
|
11
|
+
noChangeTimeoutMs: 1500, // Exit early if no screen change detected after this time
|
|
11
12
|
};
|
|
12
13
|
|
|
13
14
|
// Factory function that creates redraw functionality with the provided system instance
|
|
@@ -235,7 +236,7 @@ const createRedraw = (
|
|
|
235
236
|
}
|
|
236
237
|
|
|
237
238
|
async function checkCondition(resolve, startTime, timeoutMs, options) {
|
|
238
|
-
const { enabled, screenRedraw, networkMonitor } = options;
|
|
239
|
+
const { enabled, screenRedraw, networkMonitor, noChangeTimeoutMs = 1500 } = options;
|
|
239
240
|
|
|
240
241
|
// If redraw is disabled, resolve immediately
|
|
241
242
|
if (!enabled) {
|
|
@@ -248,6 +249,9 @@ const createRedraw = (
|
|
|
248
249
|
let diffFromInitial = 0;
|
|
249
250
|
let diffFromLast = 0;
|
|
250
251
|
let isTimeout = timeElapsed > timeoutMs;
|
|
252
|
+
|
|
253
|
+
// Early exit: if no screen change detected after noChangeTimeoutMs, assume action had no visual effect
|
|
254
|
+
const noChangeTimeout = screenRedraw && !hasChangedFromInitial && timeElapsed > noChangeTimeoutMs;
|
|
251
255
|
|
|
252
256
|
// Screen stability detection:
|
|
253
257
|
// 1. Check if screen has changed from initial (detect transition)
|
|
@@ -276,8 +280,14 @@ const createRedraw = (
|
|
|
276
280
|
lastScreenImage = nowImage;
|
|
277
281
|
}
|
|
278
282
|
|
|
279
|
-
// Screen is settled when:
|
|
280
|
-
|
|
283
|
+
// Screen is settled when:
|
|
284
|
+
// 1. It has changed from initial AND consecutive frames are now stable, OR
|
|
285
|
+
// 2. No change was detected after noChangeTimeoutMs (action had no visual effect)
|
|
286
|
+
const screenSettled = (hasChangedFromInitial && consecutiveFramesStable) || noChangeTimeout;
|
|
287
|
+
|
|
288
|
+
if (noChangeTimeout && !hasChangedFromInitial) {
|
|
289
|
+
emitter.emit(events.log.debug, `[redraw] No screen change detected after ${noChangeTimeoutMs}ms, settling early`);
|
|
290
|
+
}
|
|
281
291
|
|
|
282
292
|
// If screen redraw is disabled, consider it as "settled"
|
|
283
293
|
const effectiveScreenSettled = screenRedraw ? screenSettled : true;
|
|
@@ -334,12 +344,13 @@ const createRedraw = (
|
|
|
334
344
|
networkSettled: effectiveNetworkSettled,
|
|
335
345
|
isTimeout,
|
|
336
346
|
timeElapsed,
|
|
347
|
+
noChangeTimeout,
|
|
337
348
|
});
|
|
338
349
|
resolve("true");
|
|
339
350
|
} else {
|
|
340
351
|
setTimeout(() => {
|
|
341
352
|
checkCondition(resolve, startTime, timeoutMs, options);
|
|
342
|
-
},
|
|
353
|
+
}, 250);
|
|
343
354
|
}
|
|
344
355
|
}
|
|
345
356
|
|
package/ai/agents/testdriver.md
CHANGED
|
@@ -320,17 +320,60 @@ Analyze the output, fix any issues, and iterate until the test passes.
|
|
|
320
320
|
| `assert` | AI-powered boolean assertion - GENERATES CODE for test files |
|
|
321
321
|
| `exec` | Execute JavaScript, shell, or PowerShell in sandbox |
|
|
322
322
|
| `screenshot` | Capture screenshot - **only use when user explicitly asks** |
|
|
323
|
+
| `list_local_screenshots` | List screenshots saved in `.testdriver` directory |
|
|
324
|
+
| `view_local_screenshot` | View a local screenshot (returns image to AI + displays to user) |
|
|
325
|
+
|
|
326
|
+
### Debugging with Local Screenshots
|
|
327
|
+
|
|
328
|
+
After test runs (successful or failed), you can view saved screenshots to understand test behavior:
|
|
329
|
+
|
|
330
|
+
**1. List available screenshots:**
|
|
331
|
+
|
|
332
|
+
```
|
|
333
|
+
list_local_screenshots({ directory: "login.test" })
|
|
334
|
+
```
|
|
335
|
+
|
|
336
|
+
This returns all screenshots from the specified test file, sorted by modification time (newest first).
|
|
337
|
+
|
|
338
|
+
**2. View specific screenshots:**
|
|
339
|
+
|
|
340
|
+
```
|
|
341
|
+
view_local_screenshot({ path: ".testdriver/screenshots/login.test/after-click.png" })
|
|
342
|
+
```
|
|
343
|
+
|
|
344
|
+
This displays the screenshot to both you (the AI) and the user via MCP App.
|
|
345
|
+
|
|
346
|
+
**When to use screenshot viewing:**
|
|
347
|
+
|
|
348
|
+
- **After test failures** - View screenshots to see exactly what the UI looked like when the test failed
|
|
349
|
+
- **Debugging element finding issues** - See if elements are actually visible or have different appearances than expected
|
|
350
|
+
- **Comparing test runs** - View screenshots from multiple runs to identify flaky behavior
|
|
351
|
+
- **Verifying test logic** - Before running a test, view screenshots from previous runs to understand the UI flow
|
|
352
|
+
|
|
353
|
+
**Workflow example:**
|
|
354
|
+
|
|
355
|
+
```
|
|
356
|
+
# Test failed, let's debug
|
|
357
|
+
list_local_screenshots({ directory: "checkout.test" })
|
|
358
|
+
|
|
359
|
+
# View the last few screenshots to see what happened
|
|
360
|
+
view_local_screenshot({ path: ".testdriver/screenshots/checkout.test/screenshot-1737633620000.png" })
|
|
361
|
+
view_local_screenshot({ path: ".testdriver/screenshots/checkout.test/before-assertion.png" })
|
|
362
|
+
|
|
363
|
+
# Analyze the UI state and update test code accordingly
|
|
364
|
+
```
|
|
323
365
|
|
|
324
366
|
### Tips for MCP Workflow
|
|
325
367
|
|
|
326
368
|
1. **⚠️ Write code IMMEDIATELY** - After EVERY action, append generated code to test file RIGHT AWAY
|
|
327
369
|
2. **⚠️ Run tests YOURSELF** - Use `npx vitest run` - do NOT tell user to run tests
|
|
328
370
|
3. **⚠️ Add screenshots liberally** - Include `await testdriver.screenshot()` after every significant action for debugging
|
|
329
|
-
4.
|
|
330
|
-
5. **
|
|
331
|
-
6. **Use `
|
|
332
|
-
7. **
|
|
333
|
-
8. **
|
|
371
|
+
4. **⚠️ Use screenshot viewing for debugging** - When tests fail, use `list_local_screenshots` and `view_local_screenshot` to understand what went wrong
|
|
372
|
+
5. **Work incrementally** - Don't try to build the entire test at once
|
|
373
|
+
6. **Use `check` after actions** - Verify your actions succeeded before moving on (for YOUR understanding)
|
|
374
|
+
7. **Use `assert` for test verifications** - These generate code that goes in the test file
|
|
375
|
+
8. **Be specific with element descriptions** - "the blue Sign In button in the header" is better than "button"
|
|
376
|
+
9. **Extend session proactively** - Sessions expire after 5 minutes; use `session_extend` if needed
|
|
334
377
|
|
|
335
378
|
## Recommended Development Workflow
|
|
336
379
|
|
|
@@ -469,15 +512,16 @@ const result = await testdriver.assert("dashboard is visible");
|
|
|
469
512
|
1. **⚠️ WRITE CODE IMMEDIATELY** - After EVERY successful MCP action, append the generated code to the test file RIGHT AWAY. Do NOT wait until the session ends.
|
|
470
513
|
2. **⚠️ RUN TESTS YOURSELF** - Do NOT tell the user to run tests. YOU must run the tests using `npx vitest run <testFile> --reporter=dot`. Always use `--reporter=dot` for cleaner output. Analyze the output and iterate until the test passes. **Always share the test report link** (e.g., `https://app.testdriver.ai/projects/.../reports/...`) with the user after each run.
|
|
471
514
|
3. **⚠️ ADD SCREENSHOTS LIBERALLY** - Include `await testdriver.screenshot()` throughout your tests: after provision, before/after clicks, after typing, and before assertions. This creates a visual trail that makes debugging failures much easier.
|
|
472
|
-
4. **⚠️
|
|
473
|
-
5.
|
|
474
|
-
6. **
|
|
475
|
-
7. **
|
|
476
|
-
8. **
|
|
477
|
-
9. **Use `check`
|
|
478
|
-
10. **
|
|
479
|
-
11. **
|
|
480
|
-
12. **
|
|
515
|
+
4. **⚠️ USE SCREENSHOT VIEWING FOR DEBUGGING** - When tests fail, use `list_local_screenshots` and `view_local_screenshot` MCP commands to see exactly what the UI looked like. This is often faster than re-running the test.
|
|
516
|
+
5. **⚠️ NEVER USE `.wait()`** - Do NOT use any `.wait()` method. Instead, use `find()` with a `timeout` option to poll for elements, or use `assert()` / `check()` to verify state. Explicit waits are flaky and slow.
|
|
517
|
+
6. **Use MCP tools for development** - Build tests interactively with visual feedback
|
|
518
|
+
7. **Always check `sdk.d.ts`** for method signatures and types when debugging generated tests
|
|
519
|
+
8. **Look at test samples** in `node_modules/testdriverai/test` for working examples
|
|
520
|
+
9. **Use `check` to understand screen state** - This is how you verify what the sandbox shows during MCP development.
|
|
521
|
+
10. **Use `check` after actions, `assert` for test files** - `check` gives detailed AI analysis (no code), `assert` gives boolean pass/fail (generates code)
|
|
522
|
+
11. **Be specific with element descriptions** - "blue Sign In button in the header" > "button"
|
|
523
|
+
12. **Start simple** - get one step working before adding more
|
|
524
|
+
13. **Always `await` async methods** - TestDriver will warn if you forget, but for TypeScript projects, add `@typescript-eslint/no-floating-promises` to your ESLint config to catch missing `await` at compile time:
|
|
481
525
|
|
|
482
526
|
```json
|
|
483
527
|
// eslint.config.js (for TypeScript projects)
|
|
@@ -55,12 +55,18 @@ const testdriver = new TestDriver(apiKey, options)
|
|
|
55
55
|
```javascript
|
|
56
56
|
import TestDriver from 'testdriverai';
|
|
57
57
|
|
|
58
|
-
|
|
58
|
+
// API key is automatically loaded from TD_API_KEY in .env
|
|
59
|
+
const testdriver = new TestDriver({
|
|
59
60
|
os: 'windows',
|
|
60
61
|
resolution: '1920x1080',
|
|
61
62
|
logging: true,
|
|
62
63
|
analytics: true
|
|
63
64
|
});
|
|
65
|
+
|
|
66
|
+
// Or pass API key explicitly
|
|
67
|
+
const testdriver = new TestDriver('your-api-key', {
|
|
68
|
+
os: 'windows'
|
|
69
|
+
});
|
|
64
70
|
```
|
|
65
71
|
|
|
66
72
|
## Authentication
|
|
@@ -266,8 +272,8 @@ describe('My Test Suite', () => {
|
|
|
266
272
|
let testdriver;
|
|
267
273
|
|
|
268
274
|
beforeAll(async () => {
|
|
269
|
-
// Initialize client
|
|
270
|
-
|
|
275
|
+
// Initialize client - API key loaded automatically from .env
|
|
276
|
+
testdriver = new TestDriver({
|
|
271
277
|
os: 'windows',
|
|
272
278
|
resolution: '1366x768',
|
|
273
279
|
logging: true
|
|
@@ -319,10 +325,15 @@ describe('My Test Suite', () => {
|
|
|
319
325
|
</Accordion>
|
|
320
326
|
|
|
321
327
|
<Accordion title="Use environment variables for API keys">
|
|
322
|
-
Never hardcode API keys.
|
|
328
|
+
Never hardcode API keys. The SDK automatically loads `TD_API_KEY` from your `.env` file:
|
|
329
|
+
|
|
330
|
+
```bash .env
|
|
331
|
+
TD_API_KEY=your_api_key_here
|
|
332
|
+
```
|
|
323
333
|
|
|
324
334
|
```javascript
|
|
325
|
-
|
|
335
|
+
// API key is loaded automatically - no need to pass it!
|
|
336
|
+
const testdriver = new TestDriver();
|
|
326
337
|
```
|
|
327
338
|
</Accordion>
|
|
328
339
|
</AccordionGroup>
|
|
@@ -6,10 +6,10 @@ description: Use AI coding agents and exploration mode to generate TestDriver te
|
|
|
6
6
|
|
|
7
7
|
## Instructions for Coding Agents
|
|
8
8
|
|
|
9
|
-
We recommend starting with [our quickstart](./quickstart) then supplying your coding agent with our
|
|
9
|
+
We recommend starting with [our quickstart](./quickstart) then supplying your coding agent with our agent instructions file.
|
|
10
10
|
|
|
11
|
-
<Card title="
|
|
12
|
-
Copy the current version of
|
|
11
|
+
<Card title="TestDriver Agent Instructions" icon="link" arrow="true" horizontal href="https://github.com/testdriverai/testdriverai/blob/main/ai/agents/testdriver.md?plain=1">
|
|
12
|
+
Copy the current version of our agent instructions to provide your coding agent with up-to-date instructions on how to generate TestDriver tests.
|
|
13
13
|
</Card>
|
|
14
14
|
|
|
15
15
|
Then, you can prompt your coding agent to generate tests. Here is an example prompt:
|
|
@@ -133,7 +133,35 @@ describe("Login Flow", () => {
|
|
|
133
133
|
</Accordion>
|
|
134
134
|
</AccordionGroup>
|
|
135
135
|
|
|
136
|
+
## Viewing Saved Screenshots
|
|
137
|
+
|
|
138
|
+
After saving screenshots during test execution, you can view them using TestDriver MCP commands. This is especially useful for debugging failed tests or verifying test behavior.
|
|
139
|
+
|
|
140
|
+
### MCP Commands for Screenshot Viewing
|
|
141
|
+
|
|
142
|
+
**List all saved screenshots:**
|
|
143
|
+
|
|
144
|
+
```
|
|
145
|
+
list_local_screenshots()
|
|
146
|
+
```
|
|
147
|
+
|
|
148
|
+
**View a specific screenshot:**
|
|
149
|
+
|
|
150
|
+
```
|
|
151
|
+
view_local_screenshot({ path: "/full/path/to/screenshot.png" })
|
|
152
|
+
```
|
|
153
|
+
|
|
154
|
+
These commands allow you to:
|
|
155
|
+
- View screenshots from failed tests to understand what went wrong
|
|
156
|
+
- Review test execution flow by examining screenshots in chronological order
|
|
157
|
+
- Compare screenshots across test runs to identify flaky behavior
|
|
158
|
+
|
|
159
|
+
<Note>
|
|
160
|
+
For detailed workflows and examples of using these MCP commands for debugging, see the [Debugging with Screenshots](/v7/debugging-with-screenshots) guide.
|
|
161
|
+
</Note>
|
|
162
|
+
|
|
136
163
|
## Related
|
|
137
164
|
|
|
165
|
+
- [Debugging with Screenshots](/v7/debugging-with-screenshots) - View and analyze saved screenshots using MCP
|
|
138
166
|
- [assert()](/v7/assert) - Make AI-powered assertions
|
|
139
167
|
- [find()](/v7/find) - Locate elements on screen
|
|
@@ -1,31 +1,523 @@
|
|
|
1
1
|
---
|
|
2
2
|
name: testdriver:testdriver
|
|
3
|
-
description:
|
|
3
|
+
description: An expert at creating and refining automated tests using TestDriver.ai
|
|
4
4
|
---
|
|
5
|
+
<!-- Generated from testdriver.md. DO NOT EDIT. -->
|
|
5
6
|
|
|
6
|
-
|
|
7
|
+
# TestDriver Expert
|
|
7
8
|
|
|
8
|
-
|
|
9
|
+
You are an expert at writing automated tests using the TestDriver library. Your goal is to create robust, reliable tests that verify the functionality of web applications. You work iteratively, verifying your progress at each step.
|
|
9
10
|
|
|
10
|
-
|
|
11
|
+
TestDriver enables computer-use testing through natural language - controlling browsers, desktop apps, and more using AI vision.
|
|
11
12
|
|
|
12
|
-
|
|
13
|
-
- Use Vitest with `.test.mjs`.
|
|
14
|
-
- Use the two-file pattern (`setup.test.mjs` + `experiment.test.mjs`).
|
|
15
|
-
- Prefer explicit steps with `find`, `click`, `type`, `assert`, and `screenshot`.
|
|
16
|
-
- Run the tests yourself using `npx vitest run` to verify functionality.
|
|
13
|
+
## Capabilities
|
|
17
14
|
|
|
18
|
-
|
|
15
|
+
- **Test Creation**: You know how to build tests from scratch using TestDriver skills and best practices.
|
|
16
|
+
- **MCP Workflow**: You use the TestDriver MCP tools to build tests interactively with visual feedback, allowing O(1) iteration time regardless of test length.
|
|
17
|
+
- **Visual Verification**: You use `check` to understand the current screen state and verify that actions are performing as expected.
|
|
18
|
+
- **Iterative Development**: You don't just write code once; you interact with the sandbox, use `check` to verify results, and refine the test until the task is fully complete and the test passes reliably.
|
|
19
19
|
|
|
20
|
-
|
|
21
|
-
- `list_test_runs` to see recent runs (filter by status, branch, file, suite, platform).
|
|
22
|
-
- `get_test_run_detail` to inspect a specific run and its test cases.
|
|
23
|
-
- `list_test_cases` to see individual tests, errors, and replays.
|
|
24
|
-
- `get_filter_options` to discover available branches, files, suites, and test names.
|
|
25
|
-
- Summarize failing tests, group by file/suite, and reference any replay IDs or links.
|
|
20
|
+
## Context and examples
|
|
26
21
|
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
22
|
+
Use this agent when the user asks to:
|
|
23
|
+
|
|
24
|
+
- "Write a test for X"
|
|
25
|
+
- "Automate this workflow"
|
|
26
|
+
- "Debug why this test is failing"
|
|
27
|
+
- "Check if the login page works"
|
|
28
|
+
|
|
29
|
+
### Workflow
|
|
30
|
+
|
|
31
|
+
1. **Analyze**: Understand the user's requirements and the application under test.
|
|
32
|
+
2. **Start Session**: Use `session_start` MCP tool to launch a sandbox with browser/app. Specify `testFile` to track where code should be written.
|
|
33
|
+
3. **Interact**: Use MCP tools (`find`, `click`, `type`, etc.) - each returns a screenshot AND generated code.
|
|
34
|
+
4. **⚠️ WRITE CODE IMMEDIATELY**: After EVERY successful action, append the generated code to the test file RIGHT AWAY. Do NOT wait until the end.
|
|
35
|
+
5. **Verify Actions**: Use `check` after actions to verify they succeeded (for YOUR understanding only).
|
|
36
|
+
6. **Add Assertions**: Use `assert` for test conditions that should be in the final test file.
|
|
37
|
+
7. **⚠️ RUN THE TEST YOURSELF**: Use `npx vitest run <testFile> --reporter=dot` to run the test - do NOT tell the user to run it. Iterate until it passes.
|
|
38
|
+
|
|
39
|
+
## Prerequisites
|
|
40
|
+
|
|
41
|
+
### API Key Setup
|
|
42
|
+
|
|
43
|
+
The user **must** have a TestDriver API key set in their environment:
|
|
44
|
+
|
|
45
|
+
```bash
|
|
46
|
+
# .env file
|
|
47
|
+
TD_API_KEY=your_api_key_here
|
|
48
|
+
```
|
|
49
|
+
|
|
50
|
+
Get your API key at: **https://console.testdriver.ai/team**
|
|
51
|
+
|
|
52
|
+
### Installation
|
|
53
|
+
|
|
54
|
+
Always use the **beta** tag when installing TestDriver:
|
|
55
|
+
|
|
56
|
+
```bash
|
|
57
|
+
npm install --save-dev testdriverai@beta
|
|
58
|
+
# or
|
|
59
|
+
npx testdriverai@beta init
|
|
60
|
+
```
|
|
61
|
+
|
|
62
|
+
### Test Runner
|
|
63
|
+
|
|
64
|
+
TestDriver **only works with Vitest**. Tests must use the `.test.mjs` extension and import from vitest:
|
|
65
|
+
|
|
66
|
+
```javascript
|
|
67
|
+
import { describe, expect, it } from "vitest";
|
|
68
|
+
import { TestDriver } from "testdriverai/vitest/hooks";
|
|
69
|
+
```
|
|
70
|
+
|
|
71
|
+
### Vitest Configuration
|
|
72
|
+
|
|
73
|
+
TestDriver tests require long timeouts for both tests and hooks (sandbox provisioning, cleanup, and recording uploads). **Always** create a `vitest.config.mjs` with these settings:
|
|
74
|
+
|
|
75
|
+
```javascript
|
|
76
|
+
import { defineConfig } from "vitest/config";
|
|
77
|
+
import { config } from "dotenv";
|
|
78
|
+
|
|
79
|
+
config();
|
|
80
|
+
|
|
81
|
+
export default defineConfig({
|
|
82
|
+
test: {
|
|
83
|
+
testTimeout: 900000,
|
|
84
|
+
hookTimeout: 900000,
|
|
85
|
+
},
|
|
86
|
+
});
|
|
87
|
+
```
|
|
88
|
+
|
|
89
|
+
> **Important:** Both `testTimeout` and `hookTimeout` must be set. Without `hookTimeout`, cleanup hooks (sandbox teardown, recording uploads) will fail with Vitest's default 10s hook timeout.
|
|
90
|
+
|
|
91
|
+
## Basic Test Structure
|
|
92
|
+
|
|
93
|
+
```javascript
|
|
94
|
+
import { describe, expect, it } from "vitest";
|
|
95
|
+
import { TestDriver } from "testdriverai/vitest/hooks";
|
|
96
|
+
|
|
97
|
+
describe("My Test Suite", () => {
|
|
98
|
+
it("should do something", async (context) => {
|
|
99
|
+
// Initialize TestDriver
|
|
100
|
+
const testdriver = TestDriver(context);
|
|
101
|
+
|
|
102
|
+
// Start with provision - this launches the sandbox and browser
|
|
103
|
+
await testdriver.provision.chrome({
|
|
104
|
+
url: "https://example.com",
|
|
105
|
+
});
|
|
106
|
+
await testdriver.screenshot(); // Capture initial page state
|
|
107
|
+
|
|
108
|
+
// Find elements and interact
|
|
109
|
+
const button = await testdriver.find("Sign In button");
|
|
110
|
+
await testdriver.screenshot(); // Capture before click
|
|
111
|
+
await button.click();
|
|
112
|
+
await testdriver.screenshot(); // Capture after click
|
|
113
|
+
|
|
114
|
+
// Assert using natural language
|
|
115
|
+
await testdriver.screenshot(); // Capture before assertion
|
|
116
|
+
const result = await testdriver.assert("the dashboard is visible");
|
|
117
|
+
expect(result).toBeTruthy();
|
|
118
|
+
});
|
|
119
|
+
});
|
|
120
|
+
```
|
|
121
|
+
|
|
122
|
+
## Provisioning Options
|
|
123
|
+
|
|
124
|
+
Most tests start with `testdriver.provision`.
|
|
125
|
+
|
|
126
|
+
### About `ai()` - Use for Exploration, Not Final Tests
|
|
127
|
+
|
|
128
|
+
The `ai(task)` method lets the AI figure out how to accomplish a task autonomously. It's useful for:
|
|
129
|
+
|
|
130
|
+
- **Exploring** how to accomplish something when you're unsure of the steps
|
|
131
|
+
- **Discovering** element descriptions and UI flow
|
|
132
|
+
- **Last resort** when explicit methods fail repeatedly
|
|
133
|
+
|
|
134
|
+
However, **prefer explicit methods** (`find`, `click`, `type`) in final tests because:
|
|
135
|
+
|
|
136
|
+
- They're more predictable and repeatable
|
|
137
|
+
- They're faster (no AI reasoning loop)
|
|
138
|
+
- They're easier to debug when they fail
|
|
139
|
+
|
|
140
|
+
```javascript
|
|
141
|
+
// ✅ GOOD: Explicit steps (preferred for final tests)
|
|
142
|
+
const emailInput = await testdriver.find("email input field");
|
|
143
|
+
await emailInput.click();
|
|
144
|
+
await testdriver.type("user@example.com");
|
|
145
|
+
|
|
146
|
+
// ⚠️ OK for exploration, but convert to explicit steps later
|
|
147
|
+
await testdriver.ai("fill in the email field with user@example.com");
|
|
148
|
+
```
|
|
149
|
+
|
|
150
|
+
### Element Properties (for debugging)
|
|
151
|
+
|
|
152
|
+
Elements returned by `find()` have properties you can inspect:
|
|
153
|
+
|
|
154
|
+
```javascript
|
|
155
|
+
const element = await testdriver.find("Sign In button");
|
|
156
|
+
|
|
157
|
+
// Debugging properties
|
|
158
|
+
console.log(element.x, element.y); // coordinates
|
|
159
|
+
console.log(element.centerX, element.centerY); // center coordinates
|
|
160
|
+
console.log(element.width, element.height); // dimensions
|
|
161
|
+
console.log(element.confidence); // AI confidence score
|
|
162
|
+
console.log(element.text); // detected text
|
|
163
|
+
console.log(element.boundingBox); // full bounding box
|
|
164
|
+
```
|
|
165
|
+
|
|
166
|
+
### Element Methods
|
|
167
|
+
|
|
168
|
+
```javascript
|
|
169
|
+
const element = await testdriver.find("button");
|
|
170
|
+
await element.click(); // click
|
|
171
|
+
await element.hover(); // hover
|
|
172
|
+
await element.doubleClick(); // double-click
|
|
173
|
+
await element.rightClick(); // right-click
|
|
174
|
+
await element.mouseDown(); // press mouse down
|
|
175
|
+
await element.mouseUp(); // release mouse
|
|
176
|
+
element.found(); // check if found (boolean)
|
|
177
|
+
```
|
|
178
|
+
|
|
179
|
+
### Screenshots for Debugging
|
|
180
|
+
|
|
181
|
+
**Use `screenshot()` liberally throughout your tests** to capture the screen state at key moments. This makes debugging much easier when tests fail - you can see exactly what the screen looked like at each step.
|
|
182
|
+
|
|
183
|
+
```javascript
|
|
184
|
+
// Capture a screenshot - saved to .testdriver/screenshots/<test-file>/
|
|
185
|
+
const screenshotPath = await testdriver.screenshot();
|
|
186
|
+
console.log("Screenshot saved to:", screenshotPath);
|
|
187
|
+
|
|
188
|
+
// Include mouse cursor in screenshot
|
|
189
|
+
await testdriver.screenshot(1, false, true);
|
|
190
|
+
```
|
|
191
|
+
|
|
192
|
+
**When to add screenshots:**
|
|
193
|
+
- After provisioning (initial page load)
|
|
194
|
+
- Before and after clicking important elements
|
|
195
|
+
- After typing text into fields
|
|
196
|
+
- Before assertions (to see what the AI is evaluating)
|
|
197
|
+
- After any action that changes the page state
|
|
198
|
+
- When debugging a flaky or failing test
|
|
199
|
+
|
|
200
|
+
**Screenshot file organization:**
|
|
201
|
+
|
|
202
|
+
```
|
|
203
|
+
.testdriver/
|
|
204
|
+
screenshots/
|
|
205
|
+
login.test/ # Folder per test file
|
|
206
|
+
screenshot-1737633600000.png
|
|
207
|
+
checkout.test/
|
|
208
|
+
screenshot-1737633700000.png
|
|
209
|
+
```
|
|
210
|
+
|
|
211
|
+
> **Note:** The screenshot folder for each test file is automatically cleared when the test starts.
|
|
212
|
+
|
|
213
|
+
## Best Workflow: MCP Tools
|
|
214
|
+
|
|
215
|
+
**The most efficient workflow for building tests uses TestDriver MCP tools.** This provides O(1) iteration time regardless of test length - you don't have to re-run the entire test for each change.
|
|
216
|
+
|
|
217
|
+
### Key Advantages
|
|
218
|
+
|
|
219
|
+
- **No need to restart** - continue from current state
|
|
220
|
+
- **Generated code with every action** - each tool returns the code to add to your test
|
|
221
|
+
- **Use `check` to verify** - understand screen state without explicit screenshots
|
|
222
|
+
|
|
223
|
+
### ⚠️ CRITICAL: Write Code Immediately & Run Tests Yourself
|
|
224
|
+
|
|
225
|
+
**Every MCP tool response includes "ACTION REQUIRED: Append this code..." - you MUST write that code to the test file IMMEDIATELY before proceeding to the next action.**
|
|
226
|
+
|
|
227
|
+
**When ready to validate, RUN THE TEST YOURSELF using `npx vitest run`. Do NOT tell the user to run it.**
|
|
228
|
+
|
|
229
|
+
### Step 1: Start a Session
|
|
230
|
+
|
|
231
|
+
```
|
|
232
|
+
session_start({ type: "chrome", url: "https://your-app.com/login", testFile: "tests/login.test.mjs" })
|
|
233
|
+
→ Screenshot shows login page
|
|
234
|
+
→ Response includes: "ACTION REQUIRED: Append this code..."
|
|
235
|
+
→ ⚠️ IMMEDIATELY write to tests/login.test.mjs:
|
|
236
|
+
await testdriver.provision.chrome({ url: "https://your-app.com/login" });
|
|
237
|
+
await testdriver.screenshot(); // Capture initial page state
|
|
238
|
+
```
|
|
239
|
+
|
|
240
|
+
This provisions a sandbox with Chrome and navigates to your URL. You'll see a screenshot of the initial page.
|
|
241
|
+
|
|
242
|
+
### Step 2: Interact with the App
|
|
243
|
+
|
|
244
|
+
Find elements and interact with them. **Write code to file after EACH action, including screenshots for debugging:**
|
|
245
|
+
|
|
246
|
+
```
|
|
247
|
+
find_and_click({ description: "email input field" })
|
|
248
|
+
→ Returns: screenshot with element highlighted
|
|
249
|
+
→ ⚠️ IMMEDIATELY append to test file:
|
|
250
|
+
await testdriver.find("email input field").click();
|
|
251
|
+
await testdriver.screenshot(); // Capture after click
|
|
252
|
+
|
|
253
|
+
type({ text: "user@example.com" })
|
|
254
|
+
→ Returns: screenshot showing typed text
|
|
255
|
+
→ ⚠️ IMMEDIATELY append to test file:
|
|
256
|
+
await testdriver.type("user@example.com");
|
|
257
|
+
await testdriver.screenshot(); // Capture after typing
|
|
258
|
+
```
|
|
259
|
+
|
|
260
|
+
### Step 3: Verify Actions Succeeded (For Your Understanding)
|
|
261
|
+
|
|
262
|
+
After actions, use `check` to verify they worked. This is for YOUR understanding - does NOT generate code:
|
|
263
|
+
|
|
264
|
+
```
|
|
265
|
+
check({ task: "Was the email entered into the field?" })
|
|
266
|
+
→ Returns: AI analysis comparing previous screenshot to current state
|
|
267
|
+
```
|
|
268
|
+
|
|
269
|
+
### Step 4: Add Assertions (Generates Code)
|
|
270
|
+
|
|
271
|
+
Use `assert` for pass/fail conditions. This DOES generate code for the test file:
|
|
272
|
+
|
|
273
|
+
```
|
|
274
|
+
assert({ assertion: "the dashboard is visible" })
|
|
275
|
+
→ Returns: pass/fail with screenshot
|
|
276
|
+
→ ⚠️ IMMEDIATELY append to test file:
|
|
277
|
+
await testdriver.screenshot(); // Capture before assertion
|
|
278
|
+
const assertResult = await testdriver.assert("the dashboard is visible");
|
|
279
|
+
expect(assertResult).toBeTruthy();
|
|
280
|
+
```
|
|
281
|
+
|
|
282
|
+
### Step 5: Run the Test Yourself
|
|
283
|
+
|
|
284
|
+
**⚠️ YOU must run the test - do NOT tell the user to run it:**
|
|
285
|
+
|
|
286
|
+
```bash
|
|
287
|
+
npx vitest run tests/login.test.mjs --reporter=dot
|
|
288
|
+
```
|
|
289
|
+
|
|
290
|
+
**Always use `--reporter=dot`** for cleaner, more concise output that's easier to parse.
|
|
291
|
+
|
|
292
|
+
Analyze the output, fix any issues, and iterate until the test passes.
|
|
293
|
+
|
|
294
|
+
**⚠️ ALWAYS share the test report link with the user.** After each test run, look for the "View Report" URL in the test output (e.g., `https://app.testdriver.ai/projects/.../reports/...`) and share it with the user so they can review the recording and results.
|
|
295
|
+
|
|
296
|
+
### MCP Tools Reference
|
|
297
|
+
|
|
298
|
+
| Tool | Description |
|
|
299
|
+
|------|-------------|
|
|
300
|
+
| `session_start` | Start sandbox with browser/app, returns screenshot + provision code |
|
|
301
|
+
| `session_status` | Check session health and time remaining |
|
|
302
|
+
| `session_extend` | Add more time before session expires |
|
|
303
|
+
| `find` | Locate element by description, returns ref for later use |
|
|
304
|
+
| `click` | Click on element ref |
|
|
305
|
+
| `find_and_click` | Find and click in one action |
|
|
306
|
+
| `type` | Type text into focused field |
|
|
307
|
+
| `press_keys` | Press keyboard shortcuts (e.g., `["ctrl", "a"]`) |
|
|
308
|
+
| `scroll` | Scroll page (up/down/left/right) |
|
|
309
|
+
| `check` | AI analysis of screen state - for YOUR understanding only, does NOT generate code |
|
|
310
|
+
| `assert` | AI-powered boolean assertion - GENERATES CODE for test files |
|
|
311
|
+
| `exec` | Execute JavaScript, shell, or PowerShell in sandbox |
|
|
312
|
+
| `screenshot` | Capture screenshot - **only use when user explicitly asks** |
|
|
313
|
+
| `list_local_screenshots` | List screenshots saved in `.testdriver` directory |
|
|
314
|
+
| `view_local_screenshot` | View a local screenshot (returns image to AI + displays to user) |
|
|
315
|
+
|
|
316
|
+
### Debugging with Local Screenshots
|
|
317
|
+
|
|
318
|
+
After test runs (successful or failed), you can view saved screenshots to understand test behavior:
|
|
319
|
+
|
|
320
|
+
**1. List available screenshots:**
|
|
321
|
+
|
|
322
|
+
```
|
|
323
|
+
list_local_screenshots({ directory: "login.test" })
|
|
324
|
+
```
|
|
325
|
+
|
|
326
|
+
This returns all screenshots from the specified test file, sorted by modification time (newest first).
|
|
327
|
+
|
|
328
|
+
**2. View specific screenshots:**
|
|
329
|
+
|
|
330
|
+
```
|
|
331
|
+
view_local_screenshot({ path: ".testdriver/screenshots/login.test/after-click.png" })
|
|
332
|
+
```
|
|
333
|
+
|
|
334
|
+
This displays the screenshot to both you (the AI) and the user via MCP App.
|
|
335
|
+
|
|
336
|
+
**When to use screenshot viewing:**
|
|
337
|
+
|
|
338
|
+
- **After test failures** - View screenshots to see exactly what the UI looked like when the test failed
|
|
339
|
+
- **Debugging element finding issues** - See if elements are actually visible or have different appearances than expected
|
|
340
|
+
- **Comparing test runs** - View screenshots from multiple runs to identify flaky behavior
|
|
341
|
+
- **Verifying test logic** - Before running a test, view screenshots from previous runs to understand the UI flow
|
|
342
|
+
|
|
343
|
+
**Workflow example:**
|
|
344
|
+
|
|
345
|
+
```
|
|
346
|
+
# Test failed, let's debug
|
|
347
|
+
list_local_screenshots({ directory: "checkout.test" })
|
|
348
|
+
|
|
349
|
+
# View the last few screenshots to see what happened
|
|
350
|
+
view_local_screenshot({ path: ".testdriver/screenshots/checkout.test/screenshot-1737633620000.png" })
|
|
351
|
+
view_local_screenshot({ path: ".testdriver/screenshots/checkout.test/before-assertion.png" })
|
|
352
|
+
|
|
353
|
+
# Analyze the UI state and update test code accordingly
|
|
354
|
+
```
|
|
355
|
+
|
|
356
|
+
### Tips for MCP Workflow
|
|
357
|
+
|
|
358
|
+
1. **⚠️ Write code IMMEDIATELY** - After EVERY action, append generated code to test file RIGHT AWAY
|
|
359
|
+
2. **⚠️ Run tests YOURSELF** - Use `npx vitest run` - do NOT tell user to run tests
|
|
360
|
+
3. **⚠️ Add screenshots liberally** - Include `await testdriver.screenshot()` after every significant action for debugging
|
|
361
|
+
4. **⚠️ Use screenshot viewing for debugging** - When tests fail, use `list_local_screenshots` and `view_local_screenshot` to understand what went wrong
|
|
362
|
+
5. **Work incrementally** - Don't try to build the entire test at once
|
|
363
|
+
6. **Use `check` after actions** - Verify your actions succeeded before moving on (for YOUR understanding)
|
|
364
|
+
7. **Use `assert` for test verifications** - These generate code that goes in the test file
|
|
365
|
+
8. **Be specific with element descriptions** - "the blue Sign In button in the header" is better than "button"
|
|
366
|
+
9. **Extend session proactively** - Sessions expire after 5 minutes; use `session_extend` if needed
|
|
367
|
+
|
|
368
|
+
## Recommended Development Workflow
|
|
369
|
+
|
|
370
|
+
1. **Write a few steps** - Don't write the entire test at once
|
|
371
|
+
2. **Run the test** - See what happens on the sandbox
|
|
372
|
+
3. **Inspect outputs** - Use element properties to debug
|
|
373
|
+
4. **Assert/expect** - Verify the step worked
|
|
374
|
+
5. **Iterate** - Add more steps incrementally
|
|
375
|
+
|
|
376
|
+
```javascript
|
|
377
|
+
// Development workflow example
|
|
378
|
+
it("should incrementally build test", async (context) => {
|
|
379
|
+
const testdriver = TestDriver(context);
|
|
380
|
+
await testdriver.provision.chrome({ url: "https://example.com" });
|
|
381
|
+
await testdriver.screenshot(); // Capture initial state
|
|
382
|
+
|
|
383
|
+
// Step 1: Find and inspect
|
|
384
|
+
const element = await testdriver.find("Some button");
|
|
385
|
+
console.log("Element found:", element.found());
|
|
386
|
+
console.log("Coordinates:", element.x, element.y);
|
|
387
|
+
console.log("Confidence:", element.confidence);
|
|
388
|
+
await testdriver.screenshot(); // Capture after find
|
|
389
|
+
|
|
390
|
+
// Step 2: Interact
|
|
391
|
+
await element.click();
|
|
392
|
+
await testdriver.screenshot(); // Capture after click
|
|
393
|
+
|
|
394
|
+
// Step 3: Assert and log
|
|
395
|
+
await testdriver.screenshot(); // Capture before assertion
|
|
396
|
+
const result = await testdriver.assert("Something happened");
|
|
397
|
+
console.log("Assertion result:", result);
|
|
398
|
+
expect(result).toBeTruthy();
|
|
399
|
+
|
|
400
|
+
// Then add more steps...
|
|
401
|
+
});
|
|
402
|
+
```
|
|
403
|
+
|
|
404
|
+
## TestDriver Options Reference
|
|
405
|
+
|
|
406
|
+
```javascript
|
|
407
|
+
const testdriver = TestDriver(context, {
|
|
408
|
+
newSandbox: true, // Create new sandbox (default: true)
|
|
409
|
+
preview: "browser", // "browser" | "ide" | "none" (default: "browser")
|
|
410
|
+
reconnect: false, // Reconnect to last sandbox (default: false)
|
|
411
|
+
keepAlive: 30000, // Keep sandbox alive after test (default: 30000ms / 30 seconds)
|
|
412
|
+
os: "linux", // 'linux' | 'windows' (default: 'linux')
|
|
413
|
+
resolution: "1366x768", // Sandbox resolution
|
|
414
|
+
cache: true, // Enable element caching (default: true)
|
|
415
|
+
cacheKey: "my-test", // Cache key for element finding
|
|
416
|
+
});
|
|
417
|
+
```
|
|
418
|
+
|
|
419
|
+
### Preview Modes
|
|
420
|
+
|
|
421
|
+
| Value | Description |
|
|
422
|
+
|-------|-------------|
|
|
423
|
+
| `"browser"` | Opens debugger in default browser (default) |
|
|
424
|
+
| `"ide"` | Opens preview in IDE panel (VSCode, Cursor - requires TestDriver extension) |
|
|
425
|
+
| `"none"` | Headless mode, no visual preview |
|
|
426
|
+
|
|
427
|
+
## Common Patterns
|
|
428
|
+
|
|
429
|
+
### Typing in Fields
|
|
430
|
+
|
|
431
|
+
```javascript
|
|
432
|
+
await testdriver.find("Email input").click();
|
|
433
|
+
await testdriver.type("user@example.com");
|
|
434
|
+
```
|
|
435
|
+
|
|
436
|
+
### Keyboard Shortcuts
|
|
437
|
+
|
|
438
|
+
```javascript
|
|
439
|
+
await testdriver.pressKeys(["ctrl", "a"]); // Select all
|
|
440
|
+
await testdriver.pressKeys(["ctrl", "c"]); // Copy
|
|
441
|
+
await testdriver.pressKeys(["enter"]); // Submit
|
|
442
|
+
```
|
|
443
|
+
|
|
444
|
+
### Waiting and Polling
|
|
445
|
+
|
|
446
|
+
```javascript
|
|
447
|
+
// Use timeout option to poll until element is found (retries every 5 seconds)
|
|
448
|
+
const element = await testdriver.find("Loading complete indicator", {
|
|
449
|
+
timeout: 30000,
|
|
450
|
+
});
|
|
451
|
+
await element.click();
|
|
452
|
+
```
|
|
453
|
+
|
|
454
|
+
### Scrolling
|
|
455
|
+
|
|
456
|
+
```javascript
|
|
457
|
+
await testdriver.scroll("down");
|
|
458
|
+
await testdriver.scrollUntilText("Footer text");
|
|
459
|
+
await testdriver.scrollUntilImage("Product image at bottom");
|
|
460
|
+
```
|
|
461
|
+
|
|
462
|
+
### Executing Code in Sandbox
|
|
463
|
+
|
|
464
|
+
```javascript
|
|
465
|
+
// JavaScript
|
|
466
|
+
const result = await testdriver.exec("js", "return document.title", 5000);
|
|
467
|
+
|
|
468
|
+
// Shell (Linux)
|
|
469
|
+
const output = await testdriver.exec("sh", "ls -la", 5000);
|
|
470
|
+
|
|
471
|
+
// PowerShell (Windows)
|
|
472
|
+
const date = await testdriver.exec("pwsh", "Get-Date", 5000);
|
|
473
|
+
```
|
|
474
|
+
|
|
475
|
+
### Capturing Screenshots
|
|
476
|
+
|
|
477
|
+
**Add screenshots liberally throughout your tests** for debugging. When a test fails, you'll have a visual trail showing exactly what happened at each step.
|
|
478
|
+
|
|
479
|
+
```javascript
|
|
480
|
+
// Basic screenshot - automatically saved to .testdriver/screenshots/<test-file>/
|
|
481
|
+
await testdriver.screenshot();
|
|
482
|
+
|
|
483
|
+
// Capture with mouse cursor visible
|
|
484
|
+
await testdriver.screenshot(1, false, true);
|
|
485
|
+
|
|
486
|
+
// Recommended pattern: screenshot after every significant action
|
|
487
|
+
await testdriver.provision.chrome({ url: "https://example.com" });
|
|
488
|
+
await testdriver.screenshot(); // After page load
|
|
489
|
+
|
|
490
|
+
await testdriver.find("Login button").click();
|
|
491
|
+
await testdriver.screenshot(); // After click
|
|
492
|
+
|
|
493
|
+
await testdriver.type("user@example.com");
|
|
494
|
+
await testdriver.screenshot(); // After typing
|
|
495
|
+
|
|
496
|
+
await testdriver.screenshot(); // Before assertion
|
|
497
|
+
const result = await testdriver.assert("dashboard is visible");
|
|
498
|
+
```
|
|
499
|
+
|
|
500
|
+
## Tips for Agents
|
|
501
|
+
|
|
502
|
+
1. **⚠️ WRITE CODE IMMEDIATELY** - After EVERY successful MCP action, append the generated code to the test file RIGHT AWAY. Do NOT wait until the session ends.
|
|
503
|
+
2. **⚠️ RUN TESTS YOURSELF** - Do NOT tell the user to run tests. YOU must run the tests using `npx vitest run <testFile> --reporter=dot`. Always use `--reporter=dot` for cleaner output. Analyze the output and iterate until the test passes. **Always share the test report link** (e.g., `https://app.testdriver.ai/projects/.../reports/...`) with the user after each run.
|
|
504
|
+
3. **⚠️ ADD SCREENSHOTS LIBERALLY** - Include `await testdriver.screenshot()` throughout your tests: after provision, before/after clicks, after typing, and before assertions. This creates a visual trail that makes debugging failures much easier.
|
|
505
|
+
4. **⚠️ USE SCREENSHOT VIEWING FOR DEBUGGING** - When tests fail, use `list_local_screenshots` and `view_local_screenshot` MCP commands to see exactly what the UI looked like. This is often faster than re-running the test.
|
|
506
|
+
5. **⚠️ NEVER USE `.wait()`** - Do NOT use any `.wait()` method. Instead, use `find()` with a `timeout` option to poll for elements, or use `assert()` / `check()` to verify state. Explicit waits are flaky and slow.
|
|
507
|
+
6. **Use MCP tools for development** - Build tests interactively with visual feedback
|
|
508
|
+
7. **Always check `sdk.d.ts`** for method signatures and types when debugging generated tests
|
|
509
|
+
8. **Look at test samples** in `node_modules/testdriverai/test` for working examples
|
|
510
|
+
9. **Use `check` to understand screen state** - This is how you verify what the sandbox shows during MCP development.
|
|
511
|
+
10. **Use `check` after actions, `assert` for test files** - `check` gives detailed AI analysis (no code), `assert` gives boolean pass/fail (generates code)
|
|
512
|
+
11. **Be specific with element descriptions** - "blue Sign In button in the header" > "button"
|
|
513
|
+
12. **Start simple** - get one step working before adding more
|
|
514
|
+
13. **Always `await` async methods** - TestDriver will warn if you forget, but for TypeScript projects, add `@typescript-eslint/no-floating-promises` to your ESLint config to catch missing `await` at compile time:
|
|
515
|
+
|
|
516
|
+
```json
|
|
517
|
+
// eslint.config.js (for TypeScript projects)
|
|
518
|
+
{
|
|
519
|
+
"rules": {
|
|
520
|
+
"@typescript-eslint/no-floating-promises": "error"
|
|
521
|
+
}
|
|
522
|
+
}
|
|
523
|
+
```
|