testdriverai 7.2.9 → 7.2.11
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.github/workflows/testdriver.yml +127 -0
- package/.testdriver/last-sandbox +7 -0
- package/agent/events.js +1 -0
- package/agent/index.js +71 -54
- package/agent/lib/sandbox.js +11 -1
- package/agents.md +393 -0
- package/debug/01-table-initial.png +0 -0
- package/debug/02-after-ai-explore.png +0 -0
- package/debug/02-after-scroll.png +0 -0
- package/docs/docs.json +87 -126
- package/docs/v7/_drafts/caching.mdx +2 -2
- package/docs/v7/{getting-started → _drafts}/installation.mdx +0 -66
- package/docs/v7/{features/coverage.mdx → _drafts/powerful.mdx} +1 -90
- package/docs/v7/{features → _drafts}/scalable.mdx +126 -4
- package/docs/v7/_drafts/screenshot.mdx +155 -0
- package/docs/v7/_drafts/writing-tests.mdx +25 -0
- package/docs/v7/{api/act.mdx → ai.mdx} +27 -27
- package/docs/v7/{api/assert.mdx → assert.mdx} +3 -3
- package/docs/v7/aws-setup.mdx +338 -0
- package/docs/v7/caching.mdx +128 -0
- package/docs/v7/ci-cd.mdx +605 -0
- package/docs/v7/{api/click.mdx → click.mdx} +4 -4
- package/docs/v7/cloud.mdx +120 -0
- package/docs/v7/customizing-devices.mdx +129 -0
- package/docs/v7/{api/dashcam.mdx → dashcam.mdx} +0 -78
- package/docs/v7/{api/doubleClick.mdx → double-click.mdx} +5 -5
- package/docs/v7/{api/elements.mdx → elements.mdx} +1 -54
- package/docs/v7/enterprise.mdx +116 -0
- package/docs/v7/examples.mdx +5 -0
- package/docs/v7/{api/exec.mdx → exec.mdx} +3 -3
- package/docs/v7/{api/find.mdx → find.mdx} +17 -21
- package/docs/v7/{api/focusApplication.mdx → focus-application.mdx} +3 -3
- package/docs/v7/generating-tests.mdx +36 -0
- package/docs/v7/{api/hover.mdx → hover.mdx} +3 -3
- package/docs/v7/locating-elements.mdx +71 -0
- package/docs/v7/making-assertions.mdx +32 -0
- package/docs/v7/{api/mouseDown.mdx → mouse-down.mdx} +7 -7
- package/docs/v7/{api/mouseUp.mdx → mouse-up.mdx} +8 -8
- package/docs/v7/performing-actions.mdx +51 -0
- package/docs/v7/{api/pressKeys.mdx → press-keys.mdx} +3 -3
- package/docs/v7/quickstart.mdx +162 -0
- package/docs/v7/reusable-code.mdx +240 -0
- package/docs/v7/{api/rightClick.mdx → right-click.mdx} +5 -5
- package/docs/v7/running-tests.mdx +181 -0
- package/docs/v7/{api/scroll.mdx → scroll.mdx} +3 -3
- package/docs/v7/secrets.mdx +115 -0
- package/docs/v7/self-hosted.mdx +66 -0
- package/docs/v7/{api/type.mdx → type.mdx} +3 -3
- package/docs/v7/variables.mdx +111 -0
- package/docs/v7/waiting-for-elements.mdx +66 -0
- package/docs/v7/what-is-testdriver.mdx +54 -0
- package/lib/vitest/hooks.mjs +80 -68
- package/package.json +1 -1
- package/sdk.d.ts +22 -9
- package/sdk.js +177 -44
- package/test/manual/reconnect-provision.test.mjs +49 -0
- package/test/manual/reconnect-signin.test.mjs +41 -0
- package/test/testdriver/ai.test.mjs +30 -0
- package/test/testdriver/setup/testHelpers.mjs +0 -1
- package/test/testdriver/windows-installer.test.mjs +61 -0
- package/tests/table-sort-enrollments.test.mjs +72 -0
- package/tests/table-sort-experiment.test.mjs +42 -0
- package/tests/table-sort-setup.test.mjs +59 -0
- package/vitest.config.mjs +1 -0
- package/docs/v7/api/assertions.mdx +0 -403
- package/docs/v7/api/sandbox.mdx +0 -404
- package/docs/v7/features/ai-native.mdx +0 -413
- package/docs/v7/features/application-logs.mdx +0 -353
- package/docs/v7/features/browser-logs.mdx +0 -414
- package/docs/v7/features/cache-management.mdx +0 -402
- package/docs/v7/features/continuous-testing.mdx +0 -346
- package/docs/v7/features/data-driven-testing.mdx +0 -441
- package/docs/v7/features/easy-to-write.mdx +0 -280
- package/docs/v7/features/enterprise.mdx +0 -656
- package/docs/v7/features/fast.mdx +0 -406
- package/docs/v7/features/managed-sandboxes.mdx +0 -384
- package/docs/v7/features/network-monitoring.mdx +0 -568
- package/docs/v7/features/parallel-execution.mdx +0 -381
- package/docs/v7/features/powerful.mdx +0 -531
- package/docs/v7/features/sandbox-customization.mdx +0 -229
- package/docs/v7/features/stable.mdx +0 -473
- package/docs/v7/features/system-performance.mdx +0 -616
- package/docs/v7/features/test-analytics.mdx +0 -373
- package/docs/v7/features/test-cases.mdx +0 -393
- package/docs/v7/features/test-replays.mdx +0 -408
- package/docs/v7/features/test-reports.mdx +0 -308
- package/docs/v7/getting-started/debugging-tests.mdx +0 -382
- package/docs/v7/getting-started/quickstart.mdx +0 -90
- package/docs/v7/getting-started/running-tests.mdx +0 -173
- package/docs/v7/getting-started/setting-up-in-ci.mdx +0 -612
- package/docs/v7/getting-started/writing-tests.mdx +0 -534
- package/docs/v7/overview/what-is-testdriver.mdx +0 -386
- package/docs/v7/presets/chrome-extension.mdx +0 -248
- package/docs/v7/presets/chrome.mdx +0 -300
- package/docs/v7/presets/electron.mdx +0 -460
- package/docs/v7/presets/vscode.mdx +0 -417
- package/docs/v7/presets/webapp.mdx +0 -393
- package/vitest.config.js +0 -18
- /package/docs/v7/{commands → _drafts/commands}/assert.mdx +0 -0
- /package/docs/v7/{commands → _drafts/commands}/exec.mdx +0 -0
- /package/docs/v7/{commands → _drafts/commands}/focus-application.mdx +0 -0
- /package/docs/v7/{commands → _drafts/commands}/hover-image.mdx +0 -0
- /package/docs/v7/{commands → _drafts/commands}/hover-text.mdx +0 -0
- /package/docs/v7/{commands → _drafts/commands}/if.mdx +0 -0
- /package/docs/v7/{commands → _drafts/commands}/match-image.mdx +0 -0
- /package/docs/v7/{commands → _drafts/commands}/press-keys.mdx +0 -0
- /package/docs/v7/{commands → _drafts/commands}/remember.mdx +0 -0
- /package/docs/v7/{commands → _drafts/commands}/run.mdx +0 -0
- /package/docs/v7/{commands → _drafts/commands}/scroll-until-image.mdx +0 -0
- /package/docs/v7/{commands → _drafts/commands}/scroll-until-text.mdx +0 -0
- /package/docs/v7/{commands → _drafts/commands}/scroll.mdx +0 -0
- /package/docs/v7/{commands → _drafts/commands}/type.mdx +0 -0
- /package/docs/v7/{commands → _drafts/commands}/wait-for-image.mdx +0 -0
- /package/docs/v7/{commands → _drafts/commands}/wait-for-text.mdx +0 -0
- /package/docs/v7/{commands → _drafts/commands}/wait.mdx +0 -0
- /package/docs/v7/{getting-started → _drafts}/configuration.mdx +0 -0
- /package/docs/v7/{features → _drafts}/observable.mdx +0 -0
- /package/docs/v7/{platforms → _drafts/platforms}/linux.mdx +0 -0
- /package/docs/v7/{platforms → _drafts/platforms}/macos.mdx +0 -0
- /package/docs/v7/{platforms → _drafts/platforms}/windows.mdx +0 -0
- /package/docs/v7/{playwright.mdx → _drafts/playwright.mdx} +0 -0
- /package/docs/v7/{overview → _drafts}/readme.mdx +0 -0
- /package/docs/v7/{features → _drafts}/reports.mdx +0 -0
- /package/docs/v7/{api/client.mdx → client.mdx} +0 -0
|
@@ -0,0 +1,181 @@
|
|
|
1
|
+
---
|
|
2
|
+
title: "Running Tests"
|
|
3
|
+
description: "Run TestDriver tests with Vitest test runner"
|
|
4
|
+
icon: "play"
|
|
5
|
+
---
|
|
6
|
+
|
|
7
|
+
Learn how to run TestDriver tests efficiently with Vitest's powerful test runner.
|
|
8
|
+
|
|
9
|
+
## Running Tests
|
|
10
|
+
|
|
11
|
+
TestDriver works with Vitest's powerful test runner.
|
|
12
|
+
|
|
13
|
+
### Run All Tests
|
|
14
|
+
|
|
15
|
+
```bash
|
|
16
|
+
npx vitest run
|
|
17
|
+
```
|
|
18
|
+
|
|
19
|
+
Executes all test files in your project once and exits. Vitest automatically discovers files matching patterns like `*.test.js`, `*.test.mjs`, or `*.spec.js`.
|
|
20
|
+
|
|
21
|
+
### Run with Coverage
|
|
22
|
+
|
|
23
|
+
```bash
|
|
24
|
+
npx vitest run --coverage
|
|
25
|
+
```
|
|
26
|
+
|
|
27
|
+
Generates a code coverage report showing which lines of your source code were executed during tests. Coverage helps identify untested code paths. Results are displayed in the terminal and saved to a `coverage/` directory.
|
|
28
|
+
|
|
29
|
+
<Info>
|
|
30
|
+
Coverage requires the `@vitest/coverage-v8` package. Install it with `npm install -D @vitest/coverage-v8`.
|
|
31
|
+
</Info>
|
|
32
|
+
|
|
33
|
+
### Run Specific Tests
|
|
34
|
+
|
|
35
|
+
```bash
|
|
36
|
+
npx vitest run login.test.js
|
|
37
|
+
```
|
|
38
|
+
|
|
39
|
+
Runs only the specified test file. Useful when debugging a single test or working on a specific feature.
|
|
40
|
+
|
|
41
|
+
```bash
|
|
42
|
+
npx vitest run login.test.js checkout.test.js
|
|
43
|
+
```
|
|
44
|
+
|
|
45
|
+
Runs multiple specific test files. List as many files as needed, separated by spaces.
|
|
46
|
+
|
|
47
|
+
### Filter Tests by Name
|
|
48
|
+
|
|
49
|
+
```bash
|
|
50
|
+
npx vitest run --grep "login"
|
|
51
|
+
```
|
|
52
|
+
|
|
53
|
+
The `--grep` flag filters tests by their name (the string passed to `it()` or `test()`). Only tests whose names match the pattern will run. Supports regex patterns for complex matching.
|
|
54
|
+
|
|
55
|
+
### Run Tests in a Folder
|
|
56
|
+
|
|
57
|
+
```bash
|
|
58
|
+
npx vitest run tests/e2e/
|
|
59
|
+
```
|
|
60
|
+
|
|
61
|
+
Runs all test files within a specific directory. Great for organizing tests by type (unit, integration, e2e) and running them separately.
|
|
62
|
+
|
|
63
|
+
## Parallel Execution
|
|
64
|
+
|
|
65
|
+
TestDriver runs each test in its own cloud sandbox, enabling true parallel execution. Run your entire test suite in minutes instead of hours.
|
|
66
|
+
|
|
67
|
+
### Control Concurrency
|
|
68
|
+
|
|
69
|
+
```bash
|
|
70
|
+
npx vitest run --maxConcurrency=5
|
|
71
|
+
```
|
|
72
|
+
|
|
73
|
+
The `--maxConcurrency` flag limits how many tests run simultaneously. This should match your TestDriver license slots to avoid failures from exhausted slots.
|
|
74
|
+
|
|
75
|
+
### Thread Configuration
|
|
76
|
+
|
|
77
|
+
```bash
|
|
78
|
+
npx vitest run --pool=threads --minThreads=2 --maxThreads=8
|
|
79
|
+
```
|
|
80
|
+
|
|
81
|
+
Fine-tune thread allocation for optimal performance:
|
|
82
|
+
- `--pool=threads` — Uses worker threads for test isolation
|
|
83
|
+
- `--minThreads` — Minimum number of threads to keep alive (reduces startup overhead)
|
|
84
|
+
- `--maxThreads` — Maximum threads to spawn (limits resource usage)
|
|
85
|
+
|
|
86
|
+
### License Slots
|
|
87
|
+
|
|
88
|
+
Your TestDriver plan includes a set number of **license slots** that determine how many tests can run simultaneously. Each running test occupies one slot—when the test completes and the sandbox is destroyed, the slot is immediately available for the next test.
|
|
89
|
+
|
|
90
|
+
<Info>
|
|
91
|
+
View your available slots at [console.testdriver.ai](https://console.testdriver.ai). Upgrade anytime to increase parallelization.
|
|
92
|
+
</Info>
|
|
93
|
+
|
|
94
|
+
### Configuring Concurrency
|
|
95
|
+
|
|
96
|
+
Set `maxConcurrency` in your Vitest config to match your license slot limit:
|
|
97
|
+
|
|
98
|
+
```javascript vitest.config.mjs
|
|
99
|
+
import { defineConfig } from 'vitest/config';
|
|
100
|
+
import { TestDriver } from 'testdriverai/vitest';
|
|
101
|
+
|
|
102
|
+
export default defineConfig({
|
|
103
|
+
test: {
|
|
104
|
+
testTimeout: 900000,
|
|
105
|
+
hookTimeout: 900000,
|
|
106
|
+
maxConcurrency: 5, // Match your license slot limit
|
|
107
|
+
reporters: ['default', TestDriver()],
|
|
108
|
+
setupFiles: ['testdriverai/vitest/setup'],
|
|
109
|
+
},
|
|
110
|
+
});
|
|
111
|
+
```
|
|
112
|
+
|
|
113
|
+
<Warning>
|
|
114
|
+
Setting `maxConcurrency` higher than your license slots will cause tests to fail when slots are exhausted. Always match this value to your plan's limit.
|
|
115
|
+
</Warning>
|
|
116
|
+
|
|
117
|
+
### Why Parallelization Matters
|
|
118
|
+
|
|
119
|
+
| Test Suite | Sequential (1 slot) | Parallel (5 slots) | Parallel (10 slots) |
|
|
120
|
+
|------------|--------------------|--------------------|---------------------|
|
|
121
|
+
| 10 tests @ 2min each | 20 min | 4 min | 2 min |
|
|
122
|
+
| 50 tests @ 2min each | 100 min | 20 min | 10 min |
|
|
123
|
+
| 100 tests @ 2min each | 200 min | 40 min | 20 min |
|
|
124
|
+
|
|
125
|
+
<Tip>
|
|
126
|
+
**Pro tip:** Upgrading your plan doesn't just increase speed—it enables faster CI/CD feedback loops, letting your team ship with confidence.
|
|
127
|
+
</Tip>
|
|
128
|
+
|
|
129
|
+
<Card
|
|
130
|
+
title="View Plans & Pricing"
|
|
131
|
+
icon="credit-card"
|
|
132
|
+
href="/v7/cloud"
|
|
133
|
+
>
|
|
134
|
+
Compare plans and find the right level of parallelization for your team.
|
|
135
|
+
</Card>
|
|
136
|
+
|
|
137
|
+
## Vitest UI
|
|
138
|
+
|
|
139
|
+
Use Vitest UI for interactive debugging:
|
|
140
|
+
|
|
141
|
+
```bash
|
|
142
|
+
npx vitest --ui
|
|
143
|
+
```
|
|
144
|
+
|
|
145
|
+
The `--ui` flag launches a web-based interface for managing your test suite. Unlike `vitest run`, this starts in watch mode by default.
|
|
146
|
+
|
|
147
|
+
Open http://localhost:51204 to see:
|
|
148
|
+
- **Test file tree** — Browse and navigate your test structure
|
|
149
|
+
- **Test status and duration** — See pass/fail states and timing at a glance
|
|
150
|
+
- **Console output** — View logs and errors inline with each test
|
|
151
|
+
- **Re-run individual tests** — Click to re-execute specific tests without restarting
|
|
152
|
+
- **Filter and search** — Quickly find tests by name or status
|
|
153
|
+
|
|
154
|
+
<Tip>
|
|
155
|
+
Combine with `--open` to automatically open the UI in your browser: `npx vitest --ui --open`
|
|
156
|
+
</Tip>
|
|
157
|
+
|
|
158
|
+
|
|
159
|
+
## Test Reports
|
|
160
|
+
|
|
161
|
+
After running tests, view detailed reports and video replays at [console.testdriver.ai](https://console.testdriver.ai).
|
|
162
|
+
|
|
163
|
+
Reports include:
|
|
164
|
+
- **Video replays** - Watch exactly what happened during each test
|
|
165
|
+
- **Screenshots** - See the state at each step
|
|
166
|
+
- **Timing breakdown** - Identify slow operations
|
|
167
|
+
- **Error details** - Debug failures with full context
|
|
168
|
+
|
|
169
|
+
```bash
|
|
170
|
+
$ npx vitest run
|
|
171
|
+
|
|
172
|
+
✓ login.test.js (2) 18.4s
|
|
173
|
+
✓ user can login 12.3s
|
|
174
|
+
✓ shows error for invalid credentials 6.1s
|
|
175
|
+
|
|
176
|
+
📹 View reports at: https://console.testdriver.ai
|
|
177
|
+
```
|
|
178
|
+
|
|
179
|
+
<Tip>
|
|
180
|
+
Bookmark your team's dashboard at [console.testdriver.ai](https://console.testdriver.ai) for quick access to test history and analytics.
|
|
181
|
+
</Tip>
|
|
@@ -295,6 +295,6 @@ describe('Scrolling', () => {
|
|
|
295
295
|
|
|
296
296
|
## Related Methods
|
|
297
297
|
|
|
298
|
-
- [`find()`](/v7/
|
|
299
|
-
- [`pressKeys()`](/v7/
|
|
300
|
-
- [`wait()`](/v7/
|
|
298
|
+
- [`find()`](/v7/find) - Locate elements after scrolling
|
|
299
|
+
- [`pressKeys()`](/v7/press-keys) - Use Page Down/Up keys
|
|
300
|
+
- [`wait()`](/v7/wait) - Wait after scrolling
|
|
@@ -0,0 +1,115 @@
|
|
|
1
|
+
---
|
|
2
|
+
title: "Using Secrets"
|
|
3
|
+
description: "Securely manage passwords and sensitive data in your tests"
|
|
4
|
+
icon: "key"
|
|
5
|
+
---
|
|
6
|
+
|
|
7
|
+
Protect sensitive information like passwords, API keys, and tokens in your TestDriver tests.
|
|
8
|
+
|
|
9
|
+
## Typing Secrets Securely
|
|
10
|
+
|
|
11
|
+
When typing sensitive information like passwords, use the `secret: true` option to prevent the value from being logged or stored:
|
|
12
|
+
|
|
13
|
+
```javascript
|
|
14
|
+
import { test } from 'vitest';
|
|
15
|
+
import { chrome } from 'testdriverai/presets';
|
|
16
|
+
|
|
17
|
+
test('login with secure password', async (context) => {
|
|
18
|
+
const { testdriver } = await chrome(context, {
|
|
19
|
+
url: 'https://myapp.com/login'
|
|
20
|
+
});
|
|
21
|
+
|
|
22
|
+
await testdriver.find('email input').click();
|
|
23
|
+
await testdriver.type(process.env.TD_USERNAME);
|
|
24
|
+
|
|
25
|
+
await testdriver.find('password input').click();
|
|
26
|
+
// Password is masked in logs and recordings
|
|
27
|
+
await testdriver.type(process.env.TD_PASSWORD, { secret: true });
|
|
28
|
+
|
|
29
|
+
await testdriver.find('login button').click();
|
|
30
|
+
await testdriver.assert('dashboard is visible');
|
|
31
|
+
});
|
|
32
|
+
```
|
|
33
|
+
|
|
34
|
+
<Note>
|
|
35
|
+
When `secret: true` is set, the typed text appears as `****` in all logs, recordings, and dashcam output.
|
|
36
|
+
</Note>
|
|
37
|
+
|
|
38
|
+
## Storing Secrets in GitHub
|
|
39
|
+
|
|
40
|
+
Store sensitive credentials as GitHub repository secrets so they're never exposed in your code:
|
|
41
|
+
|
|
42
|
+
<Steps>
|
|
43
|
+
<Step title="Navigate to Repository Settings">
|
|
44
|
+
Go to your GitHub repository → **Settings** → **Secrets and variables** → **Actions**
|
|
45
|
+
</Step>
|
|
46
|
+
<Step title="Add Repository Secrets">
|
|
47
|
+
Click **New repository secret** and add your secrets:
|
|
48
|
+
- `TD_API_KEY` - Your TestDriver API key
|
|
49
|
+
- `TD_USERNAME` - Test account username
|
|
50
|
+
- `TD_PASSWORD` - Test account password
|
|
51
|
+
</Step>
|
|
52
|
+
<Step title="Use in GitHub Actions">
|
|
53
|
+
Reference secrets in your workflow file:
|
|
54
|
+
```yaml .github/workflows/test.yml
|
|
55
|
+
- name: Run TestDriver tests
|
|
56
|
+
env:
|
|
57
|
+
TD_API_KEY: ${{ secrets.TD_API_KEY }}
|
|
58
|
+
TD_USERNAME: ${{ secrets.TD_USERNAME }}
|
|
59
|
+
TD_PASSWORD: ${{ secrets.TD_PASSWORD }}
|
|
60
|
+
run: npx vitest run
|
|
61
|
+
```
|
|
62
|
+
</Step>
|
|
63
|
+
</Steps>
|
|
64
|
+
|
|
65
|
+
## Local Development
|
|
66
|
+
|
|
67
|
+
For local development, store secrets in a `.env` file:
|
|
68
|
+
|
|
69
|
+
```bash .env
|
|
70
|
+
TD_API_KEY=your_api_key_here
|
|
71
|
+
TD_USERNAME=testuser@example.com
|
|
72
|
+
TD_PASSWORD=your_secure_password
|
|
73
|
+
```
|
|
74
|
+
|
|
75
|
+
<Warning>
|
|
76
|
+
Never commit `.env` files to version control. Add `.env` to your `.gitignore` file.
|
|
77
|
+
</Warning>
|
|
78
|
+
|
|
79
|
+
## Complete Example
|
|
80
|
+
|
|
81
|
+
Here's a full login test with proper secrets handling:
|
|
82
|
+
|
|
83
|
+
```javascript tests/login.test.js
|
|
84
|
+
import { test, expect } from 'vitest';
|
|
85
|
+
import { chrome } from 'testdriverai/presets';
|
|
86
|
+
|
|
87
|
+
test('secure login flow', async (context) => {
|
|
88
|
+
const { testdriver } = await chrome(context, {
|
|
89
|
+
url: process.env.TD_WEBSITE || 'https://staging.myapp.com'
|
|
90
|
+
});
|
|
91
|
+
|
|
92
|
+
// Enter username (not sensitive)
|
|
93
|
+
await testdriver.find('email input').click();
|
|
94
|
+
await testdriver.type(process.env.TD_USERNAME);
|
|
95
|
+
|
|
96
|
+
// Enter password securely
|
|
97
|
+
await testdriver.find('password input').click();
|
|
98
|
+
await testdriver.type(process.env.TD_PASSWORD, { secret: true });
|
|
99
|
+
|
|
100
|
+
// Submit login
|
|
101
|
+
await testdriver.find('login button').click();
|
|
102
|
+
|
|
103
|
+
// Verify successful login
|
|
104
|
+
const loggedIn = await testdriver.assert('user is logged in');
|
|
105
|
+
expect(loggedIn).toBeTruthy();
|
|
106
|
+
});
|
|
107
|
+
```
|
|
108
|
+
|
|
109
|
+
<Card title="Secrets Best Practices" icon="shield-check">
|
|
110
|
+
- **Always use `secret: true`** when typing passwords, tokens, or sensitive data
|
|
111
|
+
- **Use environment variables** to keep secrets out of code
|
|
112
|
+
- **Store secrets in your CI provider** (GitHub Actions, GitLab CI, etc.)
|
|
113
|
+
- **Never commit secrets** to version control
|
|
114
|
+
- **Rotate secrets regularly** to maintain security
|
|
115
|
+
</Card>
|
|
@@ -0,0 +1,66 @@
|
|
|
1
|
+
---
|
|
2
|
+
title: "Self-Hosted"
|
|
3
|
+
sidebarTitle: "Self-Hosted"
|
|
4
|
+
description: "Unlimited test execution, complete privacy, and the ability to customize everything — all for a predictable flat license fee."
|
|
5
|
+
icon: "server"
|
|
6
|
+
---
|
|
7
|
+
|
|
8
|
+
Self-hosted pricing is based on **parallel test capacity**: the number of tests you can run simultaneously on **your infrastructure**.
|
|
9
|
+
|
|
10
|
+
With self-hosting, you get:.
|
|
11
|
+
|
|
12
|
+
- **Flat license fee** per parallel test slot
|
|
13
|
+
- **Unlimited test execution** — run as many tests as you want
|
|
14
|
+
- **No device-second metering** — predictable monthly costs
|
|
15
|
+
- **Use your own AI keys** — control data usage with your own OpenAI, Anthropic, or other AI provider keys
|
|
16
|
+
- **Custom hardware & software** — choose instance types, resolution, install specific software, and configure networking as needed
|
|
17
|
+
- **Debug & Customize** — RDP into test machines, install custom software, modify the AMI, and debug issues directly. No black boxes.
|
|
18
|
+
|
|
19
|
+
## Get Started
|
|
20
|
+
|
|
21
|
+
Ready to self-host? Follow our comprehensive AWS setup guide:
|
|
22
|
+
|
|
23
|
+
<Card
|
|
24
|
+
title="AWS Setup Guide"
|
|
25
|
+
icon="aws"
|
|
26
|
+
href="/v7/aws-setup"
|
|
27
|
+
>
|
|
28
|
+
Step-by-step instructions for deploying TestDriver on your AWS infrastructure using CloudFormation.
|
|
29
|
+
</Card>
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
## Who Should Self-Host?
|
|
33
|
+
|
|
34
|
+
Self-hosting is ideal for teams that:
|
|
35
|
+
|
|
36
|
+
- **Run high test volumes** — Flat pricing becomes more economical at scale
|
|
37
|
+
- **Want infrastructure control** — Custom hardware, specific software dependencies, or network configurations
|
|
38
|
+
- **Prefer predictable costs** — Budget with confidence using flat monthly fees
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
## How It Works
|
|
42
|
+
|
|
43
|
+
With self-hosting, you run test sandboxes on your own AWS infrastructure. TestDriver still provides:
|
|
44
|
+
|
|
45
|
+
- **Dashboard** — View test results, analytics, and reports at [console.testdriver.ai](https://console.testdriver.ai)
|
|
46
|
+
- **API** — Orchestration and AI-powered test execution
|
|
47
|
+
- **License Management** — Your parallel test capacity
|
|
48
|
+
|
|
49
|
+
You provide:
|
|
50
|
+
|
|
51
|
+
- **AWS Infrastructure** — EC2 instances running in your account
|
|
52
|
+
- **AI API Keys** — Use your own OpenAI, Anthropic, or other AI provider keys
|
|
53
|
+
- **Custom Configuration** — Hardware specs, networking, installed software
|
|
54
|
+
|
|
55
|
+
## Comparison vs Cloud
|
|
56
|
+
|
|
57
|
+
| Feature | Cloud | Self-Hosted |
|
|
58
|
+
|---------|-------|-------------|
|
|
59
|
+
| **Setup Time** | Minutes | Hours |
|
|
60
|
+
| **Pricing Model** | Device-seconds | Flat license fee |
|
|
61
|
+
| **Infrastructure Management** | TestDriver | You |
|
|
62
|
+
| **Device Location** | TestDriver cloud | Your AWS account |
|
|
63
|
+
| **AI API Keys** | TestDriver's | Your own |
|
|
64
|
+
| **Custom Software** | Limited | Full control |
|
|
65
|
+
| **Hardware Selection** | Standard | Your choice |
|
|
66
|
+
| **Debugging Access** | Replays only | Full RDP access |
|
|
@@ -353,6 +353,6 @@ describe('Form Filling with Type', () => {
|
|
|
353
353
|
|
|
354
354
|
## Related Methods
|
|
355
355
|
|
|
356
|
-
- [`pressKeys()`](/v7/
|
|
357
|
-
- [`find()`](/v7/
|
|
358
|
-
- [`click()`](/v7/
|
|
356
|
+
- [`pressKeys()`](/v7/press-keys) - Press keyboard keys and shortcuts
|
|
357
|
+
- [`find()`](/v7/find) - Locate input fields
|
|
358
|
+
- [`click()`](/v7/click) - Focus input fields
|
|
@@ -0,0 +1,111 @@
|
|
|
1
|
+
---
|
|
2
|
+
title: "Using Variables"
|
|
3
|
+
description: "Use dynamic data and secure secrets in your tests"
|
|
4
|
+
icon: "square-root-variable"
|
|
5
|
+
---
|
|
6
|
+
|
|
7
|
+
Scale your testing with dynamic data and secure secrets management. Choose the right approach based on your testing needs.
|
|
8
|
+
|
|
9
|
+
## Environment Variables
|
|
10
|
+
|
|
11
|
+
Environment variables are ideal for **configuration that changes between environments** (dev, staging, production) or for **secrets that shouldn't be committed to code**. Use this approach when you need to run the same tests against different servers or with different credentials.
|
|
12
|
+
|
|
13
|
+
```javascript
|
|
14
|
+
import { test } from 'vitest';
|
|
15
|
+
import { chrome } from 'testdriverai/presets';
|
|
16
|
+
|
|
17
|
+
test('multi-environment testing', async (context) => {
|
|
18
|
+
const env = process.env.TEST_ENV || 'staging';
|
|
19
|
+
const urls = {
|
|
20
|
+
dev: 'https://dev.myapp.com',
|
|
21
|
+
staging: 'https://staging.myapp.com',
|
|
22
|
+
production: 'https://myapp.com'
|
|
23
|
+
};
|
|
24
|
+
|
|
25
|
+
const { testdriver } = await chrome(context, {
|
|
26
|
+
url: urls[env]
|
|
27
|
+
});
|
|
28
|
+
|
|
29
|
+
await testdriver.assert('app is running');
|
|
30
|
+
});
|
|
31
|
+
```
|
|
32
|
+
|
|
33
|
+
```bash
|
|
34
|
+
# Run against different environments
|
|
35
|
+
TEST_ENV=dev npx vitest run
|
|
36
|
+
TEST_ENV=staging npx vitest run
|
|
37
|
+
TEST_ENV=production npx vitest run
|
|
38
|
+
```
|
|
39
|
+
|
|
40
|
+
## Test Fixtures
|
|
41
|
+
|
|
42
|
+
Test fixtures work best when you have **structured, reusable test data** that needs to be shared across multiple tests. Use fixtures when testing different user roles, product catalogs, or any scenario where you want to parameterize tests with a known set of data.
|
|
43
|
+
|
|
44
|
+
```javascript test/fixtures/users.js
|
|
45
|
+
export const testUsers = [
|
|
46
|
+
{ email: 'admin@test.com', role: 'admin' },
|
|
47
|
+
{ email: 'user@test.com', role: 'user' },
|
|
48
|
+
{ email: 'guest@test.com', role: 'guest' }
|
|
49
|
+
];
|
|
50
|
+
|
|
51
|
+
export const products = [
|
|
52
|
+
{ name: 'Laptop', price: 999 },
|
|
53
|
+
{ name: 'Mouse', price: 29 },
|
|
54
|
+
{ name: 'Keyboard', price: 89 }
|
|
55
|
+
];
|
|
56
|
+
```
|
|
57
|
+
|
|
58
|
+
```javascript test/permissions.test.js
|
|
59
|
+
import { test } from 'vitest';
|
|
60
|
+
import { chrome } from 'testdriverai/presets';
|
|
61
|
+
import { testUsers } from './fixtures/users.js';
|
|
62
|
+
|
|
63
|
+
test.each(testUsers)('$role can access dashboard', async ({ email, role }, context) => {
|
|
64
|
+
const { testdriver } = await chrome(context, { url });
|
|
65
|
+
|
|
66
|
+
await testdriver.find('email input').type(email);
|
|
67
|
+
await testdriver.find('password input').type('password123');
|
|
68
|
+
await testdriver.find('login button').click();
|
|
69
|
+
|
|
70
|
+
if (role === 'admin') {
|
|
71
|
+
await testdriver.assert('admin panel is visible');
|
|
72
|
+
} else {
|
|
73
|
+
await testdriver.assert('user dashboard is visible');
|
|
74
|
+
}
|
|
75
|
+
});
|
|
76
|
+
```
|
|
77
|
+
|
|
78
|
+
## Dynamic Data Generation
|
|
79
|
+
|
|
80
|
+
Dynamic data generation is perfect for **creating unique test data on each run**, avoiding conflicts with existing records, and **testing edge cases with realistic data**. Use libraries like Faker when you need fresh emails, names, or other data that won't collide with previous test runs.
|
|
81
|
+
|
|
82
|
+
```javascript
|
|
83
|
+
import { test } from 'vitest';
|
|
84
|
+
import { chrome } from 'testdriverai/presets';
|
|
85
|
+
import { faker } from '@faker-js/faker';
|
|
86
|
+
|
|
87
|
+
test('user registration with dynamic data', async (context) => {
|
|
88
|
+
const { testdriver } = await chrome(context, { url });
|
|
89
|
+
|
|
90
|
+
// Generate unique test data for each run
|
|
91
|
+
const userData = {
|
|
92
|
+
firstName: faker.person.firstName(),
|
|
93
|
+
lastName: faker.person.lastName(),
|
|
94
|
+
email: faker.internet.email(),
|
|
95
|
+
password: faker.internet.password({ length: 12 })
|
|
96
|
+
};
|
|
97
|
+
|
|
98
|
+
await testdriver.find('first name input').type(userData.firstName);
|
|
99
|
+
await testdriver.find('last name input').type(userData.lastName);
|
|
100
|
+
await testdriver.find('email input').type(userData.email);
|
|
101
|
+
await testdriver.find('password input').type(userData.password);
|
|
102
|
+
await testdriver.find('register button').click();
|
|
103
|
+
|
|
104
|
+
await testdriver.assert('registration successful');
|
|
105
|
+
console.log('Registered user:', userData.email);
|
|
106
|
+
});
|
|
107
|
+
```
|
|
108
|
+
|
|
109
|
+
```bash
|
|
110
|
+
npm install --save-dev @faker-js/faker
|
|
111
|
+
```
|
|
@@ -0,0 +1,66 @@
|
|
|
1
|
+
---
|
|
2
|
+
title: "Waiting for Elements"
|
|
3
|
+
description: "Handle async operations and prevent flaky tests"
|
|
4
|
+
icon: "clock"
|
|
5
|
+
---
|
|
6
|
+
|
|
7
|
+
## Waiting for Elements
|
|
8
|
+
|
|
9
|
+
Use the `timeout` option with `find()` to wait for elements that appear after async operations:
|
|
10
|
+
|
|
11
|
+
```javascript
|
|
12
|
+
// Wait up to 30 seconds for element to appear (polls every 5 seconds)
|
|
13
|
+
const element = await testdriver.find('Loading complete indicator', { timeout: 30000 });
|
|
14
|
+
await element.click();
|
|
15
|
+
|
|
16
|
+
// Useful after actions that trigger loading states
|
|
17
|
+
await testdriver.find('submit button').click();
|
|
18
|
+
await testdriver.find('success message', { timeout: 15000 });
|
|
19
|
+
|
|
20
|
+
// Short timeout for quick checks
|
|
21
|
+
const toast = await testdriver.find('notification toast', { timeout: 5000 });
|
|
22
|
+
```
|
|
23
|
+
|
|
24
|
+
## Flake Prevention
|
|
25
|
+
|
|
26
|
+
TestDriver automatically waits for the screen and network to stabilize after each action using **redraw detection**. This prevents flaky tests caused by animations, loading states, or dynamic content updates.
|
|
27
|
+
|
|
28
|
+
<Note>
|
|
29
|
+
Redraw detection adds a small delay after each action but significantly reduces test flakiness.
|
|
30
|
+
</Note>
|
|
31
|
+
|
|
32
|
+
For example, when clicking a submit button that navigates to a new page:
|
|
33
|
+
|
|
34
|
+
```javascript
|
|
35
|
+
// Click submit - TestDriver automatically waits for the new page to load
|
|
36
|
+
await testdriver.find('submit button').click();
|
|
37
|
+
|
|
38
|
+
// By the time this runs, the page has fully loaded and stabilized
|
|
39
|
+
await testdriver.assert('dashboard is displayed');
|
|
40
|
+
await testdriver.find('welcome message');
|
|
41
|
+
```
|
|
42
|
+
|
|
43
|
+
Without redraw detection, you'd need manual waits or retries to handle the page transition. TestDriver handles this automatically by detecting when the screen stops changing and network requests complete.
|
|
44
|
+
|
|
45
|
+
You can disable redraw detection or customize its behavior:
|
|
46
|
+
|
|
47
|
+
```javascript
|
|
48
|
+
// Disable redraw detection for faster tests (less reliable)
|
|
49
|
+
const testdriver = TestDriver(context, {
|
|
50
|
+
redraw: false
|
|
51
|
+
});
|
|
52
|
+
```
|
|
53
|
+
|
|
54
|
+
Here is an example of customizing redraw detection:
|
|
55
|
+
|
|
56
|
+
```javascript
|
|
57
|
+
// Fine-tune redraw detection
|
|
58
|
+
const testdriver = TestDriver(context, {
|
|
59
|
+
redraw: {
|
|
60
|
+
enabled: true,
|
|
61
|
+
diffThreshold: 0.1, // Pixel difference threshold (0-1)
|
|
62
|
+
screenRedraw: true, // Monitor screen changes
|
|
63
|
+
networkMonitor: true, // Wait for network idle
|
|
64
|
+
}
|
|
65
|
+
});
|
|
66
|
+
```
|
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
---
|
|
2
|
+
title: "What is TestDriver?"
|
|
3
|
+
description: "Reliably test your most difficult user flows"
|
|
4
|
+
icon: "circle-info"
|
|
5
|
+
---
|
|
6
|
+
|
|
7
|
+
## The problem with modern testing tools
|
|
8
|
+
|
|
9
|
+
Modern testing tools like Playwright are designed to test a single web application, running in a single browser tab using selectors.
|
|
10
|
+
|
|
11
|
+
However, selectors are often either unreliable or unavailable in complex scenarios, leading to brittle and flaky tests:
|
|
12
|
+
|
|
13
|
+
| Challenge | Problem | Examples |
|
|
14
|
+
|-----------|---------|----------|
|
|
15
|
+
| **Fast moving teams** | Frequently change UI structure, breaking CSS/XPath selectors | Agile teams, startups, vibe-coders |
|
|
16
|
+
| **Dynamic content** | Cannot be targeted with selectors | AI chatbots, PDFs, images, videos |
|
|
17
|
+
| **Software you don't own** | May lack proper accessibility attributes | Other websites, extensions, third-party applications |
|
|
18
|
+
| **Multi-application workflows** | Cannot be tested with web-only tools | Desktop apps, browser extensions, IDEs |
|
|
19
|
+
| **Visual states** | Impossible to verify with code-based selectors | Charts, graphs, videos, images, spelling errors, UI layout |
|
|
20
|
+
|
|
21
|
+
## The TestDriver Solution
|
|
22
|
+
|
|
23
|
+
TestDriver is a complete testing platform built specifically for handling these scenarios. It consists of a Javascript SDK, hosted infrastructure, and debugging tools that make it easy to write, run, and maintain tests for your most difficult user flows.
|
|
24
|
+
|
|
25
|
+
### Javascript SDK
|
|
26
|
+
|
|
27
|
+
Here is an example of a TestDriver test that installs a production Chrome extension from the Chrome Web Store and verifies that it appears in the extensions menu:
|
|
28
|
+
|
|
29
|
+
```javascript Installing Loom from the Chrome Web Store
|
|
30
|
+
import { describe, expect, it } from "vitest";
|
|
31
|
+
import { TestDriver } from "testdriverai/vitest/hooks";
|
|
32
|
+
|
|
33
|
+
describe("Chrome Extension Test", () => {
|
|
34
|
+
const testdriver = TestDriver(context);
|
|
35
|
+
|
|
36
|
+
// Launch Chrome with Loom loaded by its Chrome Web Store ID
|
|
37
|
+
await testdriver.provision.chromeExtension({
|
|
38
|
+
extensionId: 'liecbddmkiiihnedobmlmillhodjkdmb'
|
|
39
|
+
});
|
|
40
|
+
|
|
41
|
+
// Click on the extensions button (puzzle piece icon) in Chrome toolbar
|
|
42
|
+
const extensionsButton = await testdriver.find("The puzzle-shaped icon in the Chrome toolbar.");
|
|
43
|
+
await extensionsButton.click();
|
|
44
|
+
|
|
45
|
+
// Look for Loom in the extensions menu
|
|
46
|
+
const loomExtension = await testdriver.find("Loom extension in the extensions dropdown");
|
|
47
|
+
expect(loomExtension.found()).toBeTruthy();
|
|
48
|
+
});
|
|
49
|
+
```
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
<Tip>[vitest](https://vitest.dev/) is the preferred test runner for TestDriver.</Tip>
|
|
53
|
+
|
|
54
|
+
,
|