testdriverai 7.2.9 → 7.2.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (124) hide show
  1. package/.github/workflows/testdriver.yml +127 -0
  2. package/.testdriver/last-sandbox +7 -0
  3. package/agent/events.js +1 -0
  4. package/agent/index.js +71 -54
  5. package/agent/lib/sandbox.js +11 -1
  6. package/agents.md +393 -0
  7. package/debug/01-table-initial.png +0 -0
  8. package/debug/02-after-ai-explore.png +0 -0
  9. package/debug/02-after-scroll.png +0 -0
  10. package/docs/docs.json +93 -125
  11. package/docs/v7/_drafts/caching.mdx +2 -2
  12. package/docs/v7/{getting-started → _drafts}/installation.mdx +0 -66
  13. package/docs/v7/{features/coverage.mdx → _drafts/powerful.mdx} +1 -90
  14. package/docs/v7/{features → _drafts}/scalable.mdx +126 -4
  15. package/docs/v7/_drafts/screenshot.mdx +155 -0
  16. package/docs/v7/_drafts/writing-tests.mdx +25 -0
  17. package/docs/v7/{api/act.mdx → ai.mdx} +27 -27
  18. package/docs/v7/{api/assert.mdx → assert.mdx} +3 -3
  19. package/docs/v7/aws-setup.mdx +338 -0
  20. package/docs/v7/caching.mdx +128 -0
  21. package/docs/v7/ci-cd.mdx +605 -0
  22. package/docs/v7/{api/click.mdx → click.mdx} +4 -4
  23. package/docs/v7/cloud.mdx +120 -0
  24. package/docs/v7/customizing-devices.mdx +129 -0
  25. package/docs/v7/{api/doubleClick.mdx → double-click.mdx} +5 -5
  26. package/docs/v7/enterprise.mdx +135 -0
  27. package/docs/v7/examples.mdx +5 -0
  28. package/docs/v7/{api/exec.mdx → exec.mdx} +3 -3
  29. package/docs/v7/{api/find.mdx → find.mdx} +17 -21
  30. package/docs/v7/{api/focusApplication.mdx → focus-application.mdx} +3 -3
  31. package/docs/v7/generating-tests.mdx +32 -0
  32. package/docs/v7/{api/hover.mdx → hover.mdx} +3 -3
  33. package/docs/v7/locating-elements.mdx +71 -0
  34. package/docs/v7/making-assertions.mdx +32 -0
  35. package/docs/v7/{api/mouseDown.mdx → mouse-down.mdx} +7 -7
  36. package/docs/v7/{api/mouseUp.mdx → mouse-up.mdx} +8 -8
  37. package/docs/v7/performing-actions.mdx +51 -0
  38. package/docs/v7/{api/pressKeys.mdx → press-keys.mdx} +3 -3
  39. package/docs/v7/quickstart.mdx +162 -0
  40. package/docs/v7/reusable-code.mdx +240 -0
  41. package/docs/v7/{api/rightClick.mdx → right-click.mdx} +5 -5
  42. package/docs/v7/running-tests.mdx +181 -0
  43. package/docs/v7/{api/scroll.mdx → scroll.mdx} +3 -3
  44. package/docs/v7/secrets.mdx +115 -0
  45. package/docs/v7/self-hosted.mdx +66 -0
  46. package/docs/v7/{api/type.mdx → type.mdx} +3 -3
  47. package/docs/v7/variables.mdx +111 -0
  48. package/docs/v7/waiting-for-elements.mdx +66 -0
  49. package/docs/v7/what-is-testdriver.mdx +54 -0
  50. package/lib/vitest/hooks.mjs +80 -68
  51. package/package.json +1 -1
  52. package/sdk.d.ts +22 -9
  53. package/sdk.js +177 -44
  54. package/test/manual/reconnect-provision.test.mjs +49 -0
  55. package/test/manual/reconnect-signin.test.mjs +41 -0
  56. package/test/testdriver/ai.test.mjs +30 -0
  57. package/test/testdriver/setup/testHelpers.mjs +0 -1
  58. package/test/testdriver/windows-installer.test.mjs +61 -0
  59. package/tests/table-sort-enrollments.test.mjs +72 -0
  60. package/tests/table-sort-experiment.test.mjs +42 -0
  61. package/tests/table-sort-setup.test.mjs +59 -0
  62. package/vitest.config.mjs +1 -0
  63. package/docs/v7/api/assertions.mdx +0 -403
  64. package/docs/v7/features/ai-native.mdx +0 -413
  65. package/docs/v7/features/application-logs.mdx +0 -353
  66. package/docs/v7/features/browser-logs.mdx +0 -414
  67. package/docs/v7/features/cache-management.mdx +0 -402
  68. package/docs/v7/features/continuous-testing.mdx +0 -346
  69. package/docs/v7/features/data-driven-testing.mdx +0 -441
  70. package/docs/v7/features/easy-to-write.mdx +0 -280
  71. package/docs/v7/features/enterprise.mdx +0 -656
  72. package/docs/v7/features/fast.mdx +0 -406
  73. package/docs/v7/features/managed-sandboxes.mdx +0 -384
  74. package/docs/v7/features/network-monitoring.mdx +0 -568
  75. package/docs/v7/features/parallel-execution.mdx +0 -381
  76. package/docs/v7/features/powerful.mdx +0 -531
  77. package/docs/v7/features/sandbox-customization.mdx +0 -229
  78. package/docs/v7/features/stable.mdx +0 -473
  79. package/docs/v7/features/system-performance.mdx +0 -616
  80. package/docs/v7/features/test-analytics.mdx +0 -373
  81. package/docs/v7/features/test-cases.mdx +0 -393
  82. package/docs/v7/features/test-replays.mdx +0 -408
  83. package/docs/v7/features/test-reports.mdx +0 -308
  84. package/docs/v7/getting-started/debugging-tests.mdx +0 -382
  85. package/docs/v7/getting-started/quickstart.mdx +0 -90
  86. package/docs/v7/getting-started/running-tests.mdx +0 -173
  87. package/docs/v7/getting-started/setting-up-in-ci.mdx +0 -612
  88. package/docs/v7/getting-started/writing-tests.mdx +0 -534
  89. package/docs/v7/overview/what-is-testdriver.mdx +0 -386
  90. package/docs/v7/presets/chrome-extension.mdx +0 -248
  91. package/docs/v7/presets/chrome.mdx +0 -300
  92. package/docs/v7/presets/electron.mdx +0 -460
  93. package/docs/v7/presets/vscode.mdx +0 -417
  94. package/docs/v7/presets/webapp.mdx +0 -393
  95. package/vitest.config.js +0 -18
  96. /package/docs/v7/{commands → _drafts/commands}/assert.mdx +0 -0
  97. /package/docs/v7/{commands → _drafts/commands}/exec.mdx +0 -0
  98. /package/docs/v7/{commands → _drafts/commands}/focus-application.mdx +0 -0
  99. /package/docs/v7/{commands → _drafts/commands}/hover-image.mdx +0 -0
  100. /package/docs/v7/{commands → _drafts/commands}/hover-text.mdx +0 -0
  101. /package/docs/v7/{commands → _drafts/commands}/if.mdx +0 -0
  102. /package/docs/v7/{commands → _drafts/commands}/match-image.mdx +0 -0
  103. /package/docs/v7/{commands → _drafts/commands}/press-keys.mdx +0 -0
  104. /package/docs/v7/{commands → _drafts/commands}/remember.mdx +0 -0
  105. /package/docs/v7/{commands → _drafts/commands}/run.mdx +0 -0
  106. /package/docs/v7/{commands → _drafts/commands}/scroll-until-image.mdx +0 -0
  107. /package/docs/v7/{commands → _drafts/commands}/scroll-until-text.mdx +0 -0
  108. /package/docs/v7/{commands → _drafts/commands}/scroll.mdx +0 -0
  109. /package/docs/v7/{commands → _drafts/commands}/type.mdx +0 -0
  110. /package/docs/v7/{commands → _drafts/commands}/wait-for-image.mdx +0 -0
  111. /package/docs/v7/{commands → _drafts/commands}/wait-for-text.mdx +0 -0
  112. /package/docs/v7/{commands → _drafts/commands}/wait.mdx +0 -0
  113. /package/docs/v7/{getting-started → _drafts}/configuration.mdx +0 -0
  114. /package/docs/v7/{features → _drafts}/observable.mdx +0 -0
  115. /package/docs/v7/{platforms → _drafts/platforms}/linux.mdx +0 -0
  116. /package/docs/v7/{platforms → _drafts/platforms}/macos.mdx +0 -0
  117. /package/docs/v7/{platforms → _drafts/platforms}/windows.mdx +0 -0
  118. /package/docs/v7/{playwright.mdx → _drafts/playwright.mdx} +0 -0
  119. /package/docs/v7/{overview → _drafts}/readme.mdx +0 -0
  120. /package/docs/v7/{features → _drafts}/reports.mdx +0 -0
  121. /package/docs/v7/{api/client.mdx → client.mdx} +0 -0
  122. /package/docs/v7/{api/dashcam.mdx → dashcam.mdx} +0 -0
  123. /package/docs/v7/{api/elements.mdx → elements.mdx} +0 -0
  124. /package/docs/v7/{api/sandbox.mdx → sandbox.mdx} +0 -0
@@ -295,6 +295,6 @@ describe('Scrolling', () => {
295
295
 
296
296
  ## Related Methods
297
297
 
298
- - [`find()`](/v7/api/find) - Locate elements after scrolling
299
- - [`pressKeys()`](/v7/api/pressKeys) - Use Page Down/Up keys
300
- - [`wait()`](/v7/api/wait) - Wait after scrolling
298
+ - [`find()`](/v7/find) - Locate elements after scrolling
299
+ - [`pressKeys()`](/v7/press-keys) - Use Page Down/Up keys
300
+ - [`wait()`](/v7/wait) - Wait after scrolling
@@ -0,0 +1,115 @@
1
+ ---
2
+ title: "Using Secrets"
3
+ description: "Securely manage passwords and sensitive data in your tests"
4
+ icon: "key"
5
+ ---
6
+
7
+ Protect sensitive information like passwords, API keys, and tokens in your TestDriver tests.
8
+
9
+ ## Typing Secrets Securely
10
+
11
+ When typing sensitive information like passwords, use the `secret: true` option to prevent the value from being logged or stored:
12
+
13
+ ```javascript
14
+ import { test } from 'vitest';
15
+ import { chrome } from 'testdriverai/presets';
16
+
17
+ test('login with secure password', async (context) => {
18
+ const { testdriver } = await chrome(context, {
19
+ url: 'https://myapp.com/login'
20
+ });
21
+
22
+ await testdriver.find('email input').click();
23
+ await testdriver.type(process.env.TD_USERNAME);
24
+
25
+ await testdriver.find('password input').click();
26
+ // Password is masked in logs and recordings
27
+ await testdriver.type(process.env.TD_PASSWORD, { secret: true });
28
+
29
+ await testdriver.find('login button').click();
30
+ await testdriver.assert('dashboard is visible');
31
+ });
32
+ ```
33
+
34
+ <Note>
35
+ When `secret: true` is set, the typed text appears as `****` in all logs, recordings, and dashcam output.
36
+ </Note>
37
+
38
+ ## Storing Secrets in GitHub
39
+
40
+ Store sensitive credentials as GitHub repository secrets so they're never exposed in your code:
41
+
42
+ <Steps>
43
+ <Step title="Navigate to Repository Settings">
44
+ Go to your GitHub repository → **Settings** → **Secrets and variables** → **Actions**
45
+ </Step>
46
+ <Step title="Add Repository Secrets">
47
+ Click **New repository secret** and add your secrets:
48
+ - `TD_API_KEY` - Your TestDriver API key
49
+ - `TD_USERNAME` - Test account username
50
+ - `TD_PASSWORD` - Test account password
51
+ </Step>
52
+ <Step title="Use in GitHub Actions">
53
+ Reference secrets in your workflow file:
54
+ ```yaml .github/workflows/test.yml
55
+ - name: Run TestDriver tests
56
+ env:
57
+ TD_API_KEY: ${{ secrets.TD_API_KEY }}
58
+ TD_USERNAME: ${{ secrets.TD_USERNAME }}
59
+ TD_PASSWORD: ${{ secrets.TD_PASSWORD }}
60
+ run: npx vitest run
61
+ ```
62
+ </Step>
63
+ </Steps>
64
+
65
+ ## Local Development
66
+
67
+ For local development, store secrets in a `.env` file:
68
+
69
+ ```bash .env
70
+ TD_API_KEY=your_api_key_here
71
+ TD_USERNAME=testuser@example.com
72
+ TD_PASSWORD=your_secure_password
73
+ ```
74
+
75
+ <Warning>
76
+ Never commit `.env` files to version control. Add `.env` to your `.gitignore` file.
77
+ </Warning>
78
+
79
+ ## Complete Example
80
+
81
+ Here's a full login test with proper secrets handling:
82
+
83
+ ```javascript tests/login.test.js
84
+ import { test, expect } from 'vitest';
85
+ import { chrome } from 'testdriverai/presets';
86
+
87
+ test('secure login flow', async (context) => {
88
+ const { testdriver } = await chrome(context, {
89
+ url: process.env.TD_WEBSITE || 'https://staging.myapp.com'
90
+ });
91
+
92
+ // Enter username (not sensitive)
93
+ await testdriver.find('email input').click();
94
+ await testdriver.type(process.env.TD_USERNAME);
95
+
96
+ // Enter password securely
97
+ await testdriver.find('password input').click();
98
+ await testdriver.type(process.env.TD_PASSWORD, { secret: true });
99
+
100
+ // Submit login
101
+ await testdriver.find('login button').click();
102
+
103
+ // Verify successful login
104
+ const loggedIn = await testdriver.assert('user is logged in');
105
+ expect(loggedIn).toBeTruthy();
106
+ });
107
+ ```
108
+
109
+ <Card title="Secrets Best Practices" icon="shield-check">
110
+ - **Always use `secret: true`** when typing passwords, tokens, or sensitive data
111
+ - **Use environment variables** to keep secrets out of code
112
+ - **Store secrets in your CI provider** (GitHub Actions, GitLab CI, etc.)
113
+ - **Never commit secrets** to version control
114
+ - **Rotate secrets regularly** to maintain security
115
+ </Card>
@@ -0,0 +1,66 @@
1
+ ---
2
+ title: "Self-Hosted"
3
+ sidebarTitle: "Self-Hosted"
4
+ description: "Unlimited test execution, complete privacy, and the ability to customize everything — all for a predictable flat license fee."
5
+ icon: "server"
6
+ ---
7
+
8
+ Self-hosted pricing is based on **parallel test capacity**: the number of tests you can run simultaneously on **your infrastructure**.
9
+
10
+ With self-hosting, you get:.
11
+
12
+ - **Flat license fee** per parallel test slot
13
+ - **Unlimited test execution** — run as many tests as you want
14
+ - **No device-second metering** — predictable monthly costs
15
+ - **Use your own AI keys** — control data usage with your own OpenAI, Anthropic, or other AI provider keys
16
+ - **Custom hardware & software** — choose instance types, resolution, install specific software, and configure networking as needed
17
+ - **Debug & Customize** — RDP into test machines, install custom software, modify the AMI, and debug issues directly. No black boxes.
18
+
19
+ ## Get Started
20
+
21
+ Ready to self-host? Follow our comprehensive AWS setup guide:
22
+
23
+ <Card
24
+ title="AWS Setup Guide"
25
+ icon="aws"
26
+ href="/v7/aws-setup"
27
+ >
28
+ Step-by-step instructions for deploying TestDriver on your AWS infrastructure using CloudFormation.
29
+ </Card>
30
+
31
+
32
+ ## Who Should Self-Host?
33
+
34
+ Self-hosting is ideal for teams that:
35
+
36
+ - **Run high test volumes** — Flat pricing becomes more economical at scale
37
+ - **Want infrastructure control** — Custom hardware, specific software dependencies, or network configurations
38
+ - **Prefer predictable costs** — Budget with confidence using flat monthly fees
39
+
40
+
41
+ ## How It Works
42
+
43
+ With self-hosting, you run test sandboxes on your own AWS infrastructure. TestDriver still provides:
44
+
45
+ - **Dashboard** — View test results, analytics, and reports at [console.testdriver.ai](https://console.testdriver.ai)
46
+ - **API** — Orchestration and AI-powered test execution
47
+ - **License Management** — Your parallel test capacity
48
+
49
+ You provide:
50
+
51
+ - **AWS Infrastructure** — EC2 instances running in your account
52
+ - **AI API Keys** — Use your own OpenAI, Anthropic, or other AI provider keys
53
+ - **Custom Configuration** — Hardware specs, networking, installed software
54
+
55
+ ## Comparison vs Cloud
56
+
57
+ | Feature | Cloud | Self-Hosted |
58
+ |---------|-------|-------------|
59
+ | **Setup Time** | Minutes | Hours |
60
+ | **Pricing Model** | Device-seconds | Flat license fee |
61
+ | **Infrastructure Management** | TestDriver | You |
62
+ | **Device Location** | TestDriver cloud | Your AWS account |
63
+ | **AI API Keys** | TestDriver's | Your own |
64
+ | **Custom Software** | Limited | Full control |
65
+ | **Hardware Selection** | Standard | Your choice |
66
+ | **Debugging Access** | Replays only | Full RDP access |
@@ -353,6 +353,6 @@ describe('Form Filling with Type', () => {
353
353
 
354
354
  ## Related Methods
355
355
 
356
- - [`pressKeys()`](/v7/api/pressKeys) - Press keyboard keys and shortcuts
357
- - [`find()`](/v7/api/find) - Locate input fields
358
- - [`click()`](/v7/api/click) - Focus input fields
356
+ - [`pressKeys()`](/v7/press-keys) - Press keyboard keys and shortcuts
357
+ - [`find()`](/v7/find) - Locate input fields
358
+ - [`click()`](/v7/click) - Focus input fields
@@ -0,0 +1,111 @@
1
+ ---
2
+ title: "Using Variables"
3
+ description: "Use dynamic data and secure secrets in your tests"
4
+ icon: "square-root-variable"
5
+ ---
6
+
7
+ Scale your testing with dynamic data and secure secrets management. Choose the right approach based on your testing needs.
8
+
9
+ ## Environment Variables
10
+
11
+ Environment variables are ideal for **configuration that changes between environments** (dev, staging, production) or for **secrets that shouldn't be committed to code**. Use this approach when you need to run the same tests against different servers or with different credentials.
12
+
13
+ ```javascript
14
+ import { test } from 'vitest';
15
+ import { chrome } from 'testdriverai/presets';
16
+
17
+ test('multi-environment testing', async (context) => {
18
+ const env = process.env.TEST_ENV || 'staging';
19
+ const urls = {
20
+ dev: 'https://dev.myapp.com',
21
+ staging: 'https://staging.myapp.com',
22
+ production: 'https://myapp.com'
23
+ };
24
+
25
+ const { testdriver } = await chrome(context, {
26
+ url: urls[env]
27
+ });
28
+
29
+ await testdriver.assert('app is running');
30
+ });
31
+ ```
32
+
33
+ ```bash
34
+ # Run against different environments
35
+ TEST_ENV=dev npx vitest run
36
+ TEST_ENV=staging npx vitest run
37
+ TEST_ENV=production npx vitest run
38
+ ```
39
+
40
+ ## Test Fixtures
41
+
42
+ Test fixtures work best when you have **structured, reusable test data** that needs to be shared across multiple tests. Use fixtures when testing different user roles, product catalogs, or any scenario where you want to parameterize tests with a known set of data.
43
+
44
+ ```javascript test/fixtures/users.js
45
+ export const testUsers = [
46
+ { email: 'admin@test.com', role: 'admin' },
47
+ { email: 'user@test.com', role: 'user' },
48
+ { email: 'guest@test.com', role: 'guest' }
49
+ ];
50
+
51
+ export const products = [
52
+ { name: 'Laptop', price: 999 },
53
+ { name: 'Mouse', price: 29 },
54
+ { name: 'Keyboard', price: 89 }
55
+ ];
56
+ ```
57
+
58
+ ```javascript test/permissions.test.js
59
+ import { test } from 'vitest';
60
+ import { chrome } from 'testdriverai/presets';
61
+ import { testUsers } from './fixtures/users.js';
62
+
63
+ test.each(testUsers)('$role can access dashboard', async ({ email, role }, context) => {
64
+ const { testdriver } = await chrome(context, { url });
65
+
66
+ await testdriver.find('email input').type(email);
67
+ await testdriver.find('password input').type('password123');
68
+ await testdriver.find('login button').click();
69
+
70
+ if (role === 'admin') {
71
+ await testdriver.assert('admin panel is visible');
72
+ } else {
73
+ await testdriver.assert('user dashboard is visible');
74
+ }
75
+ });
76
+ ```
77
+
78
+ ## Dynamic Data Generation
79
+
80
+ Dynamic data generation is perfect for **creating unique test data on each run**, avoiding conflicts with existing records, and **testing edge cases with realistic data**. Use libraries like Faker when you need fresh emails, names, or other data that won't collide with previous test runs.
81
+
82
+ ```javascript
83
+ import { test } from 'vitest';
84
+ import { chrome } from 'testdriverai/presets';
85
+ import { faker } from '@faker-js/faker';
86
+
87
+ test('user registration with dynamic data', async (context) => {
88
+ const { testdriver } = await chrome(context, { url });
89
+
90
+ // Generate unique test data for each run
91
+ const userData = {
92
+ firstName: faker.person.firstName(),
93
+ lastName: faker.person.lastName(),
94
+ email: faker.internet.email(),
95
+ password: faker.internet.password({ length: 12 })
96
+ };
97
+
98
+ await testdriver.find('first name input').type(userData.firstName);
99
+ await testdriver.find('last name input').type(userData.lastName);
100
+ await testdriver.find('email input').type(userData.email);
101
+ await testdriver.find('password input').type(userData.password);
102
+ await testdriver.find('register button').click();
103
+
104
+ await testdriver.assert('registration successful');
105
+ console.log('Registered user:', userData.email);
106
+ });
107
+ ```
108
+
109
+ ```bash
110
+ npm install --save-dev @faker-js/faker
111
+ ```
@@ -0,0 +1,66 @@
1
+ ---
2
+ title: "Waiting for Elements"
3
+ description: "Handle async operations and prevent flaky tests"
4
+ icon: "clock"
5
+ ---
6
+
7
+ ## Waiting for Elements
8
+
9
+ Use the `timeout` option with `find()` to wait for elements that appear after async operations:
10
+
11
+ ```javascript
12
+ // Wait up to 30 seconds for element to appear (polls every 5 seconds)
13
+ const element = await testdriver.find('Loading complete indicator', { timeout: 30000 });
14
+ await element.click();
15
+
16
+ // Useful after actions that trigger loading states
17
+ await testdriver.find('submit button').click();
18
+ await testdriver.find('success message', { timeout: 15000 });
19
+
20
+ // Short timeout for quick checks
21
+ const toast = await testdriver.find('notification toast', { timeout: 5000 });
22
+ ```
23
+
24
+ ## Flake Prevention
25
+
26
+ TestDriver automatically waits for the screen and network to stabilize after each action using **redraw detection**. This prevents flaky tests caused by animations, loading states, or dynamic content updates.
27
+
28
+ <Note>
29
+ Redraw detection adds a small delay after each action but significantly reduces test flakiness.
30
+ </Note>
31
+
32
+ For example, when clicking a submit button that navigates to a new page:
33
+
34
+ ```javascript
35
+ // Click submit - TestDriver automatically waits for the new page to load
36
+ await testdriver.find('submit button').click();
37
+
38
+ // By the time this runs, the page has fully loaded and stabilized
39
+ await testdriver.assert('dashboard is displayed');
40
+ await testdriver.find('welcome message');
41
+ ```
42
+
43
+ Without redraw detection, you'd need manual waits or retries to handle the page transition. TestDriver handles this automatically by detecting when the screen stops changing and network requests complete.
44
+
45
+ You can disable redraw detection or customize its behavior:
46
+
47
+ ```javascript
48
+ // Disable redraw detection for faster tests (less reliable)
49
+ const testdriver = TestDriver(context, {
50
+ redraw: false
51
+ });
52
+ ```
53
+
54
+ Here is an example of customizing redraw detection:
55
+
56
+ ```javascript
57
+ // Fine-tune redraw detection
58
+ const testdriver = TestDriver(context, {
59
+ redraw: {
60
+ enabled: true,
61
+ diffThreshold: 0.1, // Pixel difference threshold (0-1)
62
+ screenRedraw: true, // Monitor screen changes
63
+ networkMonitor: true, // Wait for network idle
64
+ }
65
+ });
66
+ ```
@@ -0,0 +1,54 @@
1
+ ---
2
+ title: "What is TestDriver?"
3
+ description: "Reliably test your most difficult user flows"
4
+ icon: "circle-info"
5
+ ---
6
+
7
+ ## The problem with modern testing tools
8
+
9
+ Modern testing tools like Playwright are designed to test a single web application, running in a single browser tab using selectors.
10
+
11
+ However, selectors are often either unreliable or unavailable in complex scenarios, leading to brittle and flaky tests:
12
+
13
+ | Challenge | Problem | Examples |
14
+ |-----------|---------|----------|
15
+ | **Fast moving teams** | Frequently change UI structure, breaking CSS/XPath selectors | Agile teams, startups, vibe-coders |
16
+ | **Dynamic content** | Cannot be targeted with selectors | AI chatbots, PDFs, images, videos |
17
+ | **Software you don't own** | May lack proper accessibility attributes | Other websites, extensions, third-party applications |
18
+ | **Multi-application workflows** | Cannot be tested with web-only tools | Desktop apps, browser extensions, IDEs |
19
+ | **Visual states** | Impossible to verify with code-based selectors | Charts, graphs, videos, images, spelling errors, UI layout |
20
+
21
+ ## The TestDriver Solution
22
+
23
+ TestDriver is a complete testing platform built specifically for handling these scenarios. It consists of a Javascript SDK, hosted infrastructure, and debugging tools that make it easy to write, run, and maintain tests for your most difficult user flows.
24
+
25
+ ### Javascript SDK
26
+
27
+ Here is an example of a TestDriver test that installs a production Chrome extension from the Chrome Web Store and verifies that it appears in the extensions menu:
28
+
29
+ ```javascript Installing Loom from the Chrome Web Store
30
+ import { describe, expect, it } from "vitest";
31
+ import { TestDriver } from "testdriverai/vitest/hooks";
32
+
33
+ describe("Chrome Extension Test", () => {
34
+ const testdriver = TestDriver(context);
35
+
36
+ // Launch Chrome with Loom loaded by its Chrome Web Store ID
37
+ await testdriver.provision.chromeExtension({
38
+ extensionId: 'liecbddmkiiihnedobmlmillhodjkdmb'
39
+ });
40
+
41
+ // Click on the extensions button (puzzle piece icon) in Chrome toolbar
42
+ const extensionsButton = await testdriver.find("The puzzle-shaped icon in the Chrome toolbar.");
43
+ await extensionsButton.click();
44
+
45
+ // Look for Loom in the extensions menu
46
+ const loomExtension = await testdriver.find("Loom extension in the extensions dropdown");
47
+ expect(loomExtension.found()).toBeTruthy();
48
+ });
49
+ ```
50
+
51
+
52
+ <Tip>[vitest](https://vitest.dev/) is the preferred test runner for TestDriver.</Tip>
53
+
54
+ ,
@@ -154,9 +154,16 @@ export function TestDriver(context, options = {}) {
154
154
  throw new Error('TestDriver() requires Vitest context. Pass the context parameter from your test function: test("name", async (context) => { ... })');
155
155
  }
156
156
 
157
- // Return existing instance if already created for this test
157
+ // Return existing instance if already created for this test AND it's still connected
158
+ // On retry, the previous instance will be disconnected, so we need to create a new one
158
159
  if (testDriverInstances.has(context.task)) {
159
- return testDriverInstances.get(context.task);
160
+ const existingInstance = testDriverInstances.get(context.task);
161
+ if (existingInstance.connected) {
162
+ return existingInstance;
163
+ }
164
+ // Instance exists but is disconnected (likely a retry) - remove it and create fresh
165
+ testDriverInstances.delete(context.task);
166
+ lifecycleHandlers.delete(context.task);
160
167
  }
161
168
 
162
169
  // Get global plugin options if available
@@ -194,7 +201,6 @@ export function TestDriver(context, options = {}) {
194
201
 
195
202
  if (autoConnect) {
196
203
  testdriver.__connectionPromise = (async () => {
197
- console.log('[testdriver] Connecting to sandbox...');
198
204
  if (debugConsoleSpy) {
199
205
  console.log('[DEBUG] Before auth - sandbox.instanceSocketConnected:', testdriver.sandbox?.instanceSocketConnected);
200
206
  }
@@ -231,74 +237,80 @@ export function TestDriver(context, options = {}) {
231
237
  }
232
238
 
233
239
  // Register cleanup handler with dashcam.stop()
234
- if (!lifecycleHandlers.has(context.task)) {
235
- const cleanup = async () => {
236
- try {
237
- // Stop dashcam if it was started - with timeout to prevent hanging
238
- if (testdriver._dashcam && testdriver._dashcam.recording) {
239
- try {
240
- const dashcamUrl = await testdriver.dashcam.stop();
241
- console.log('');
242
- console.log('🎥' + chalk.yellow(` Dashcam URL`) + `: ${dashcamUrl}`);
243
- console.log('');
244
-
245
- // Set test metadata directly on the Vitest task context
246
- // This is the proper way to pass data from test to reporter
247
- const platform = testdriver.os || 'linux';
248
- const absolutePath = context.task.file?.filepath || context.task.file?.name || 'unknown';
249
- const projectRoot = process.cwd();
250
- const testFile = absolutePath !== 'unknown'
251
- ? path.relative(projectRoot, absolutePath)
252
- : absolutePath;
253
-
254
- // Set metadata on the task for the reporter to read
255
- context.task.meta.dashcamUrl = dashcamUrl || null;
256
- context.task.meta.platform = platform;
257
- context.task.meta.testFile = testFile;
258
- context.task.meta.testOrder = 0;
259
- context.task.meta.sessionId = testdriver.getSessionId();
260
-
261
- // Also register in memory if plugin is available (for cross-process scenarios)
262
- if (globalThis.__testdriverPlugin?.registerDashcamUrl) {
263
- globalThis.__testdriverPlugin.registerDashcamUrl(context.task.id, dashcamUrl, platform);
264
- }
265
- } catch (error) {
266
- // Log more detailed error information for debugging
267
- console.error('❌ Failed to stop dashcam:', error.name || error.constructor?.name || 'Error');
268
- if (error.message) console.error(' Message:', error.message);
269
- // NotFoundError during cleanup is expected if sandbox already terminated
270
- if (error.name === 'NotFoundError' || error.responseData?.error === 'NotFoundError') {
271
- console.log(' ℹ️ Sandbox session already terminated - dashcam stop skipped');
272
- }
273
- // Mark as not recording to prevent retries
274
- if (testdriver._dashcam) {
275
- testdriver._dashcam.recording = false;
276
- }
240
+ // We always register a new cleanup handler because on retry we need to clean up the new instance
241
+ const cleanup = async () => {
242
+ // Get the current instance from the WeakMap (not from closure)
243
+ // This ensures we clean up the correct instance on retries
244
+ const currentInstance = testDriverInstances.get(context.task);
245
+ if (!currentInstance) {
246
+ return; // Already cleaned up
247
+ }
248
+
249
+ try {
250
+ // Stop dashcam if it was started - with timeout to prevent hanging
251
+ if (currentInstance._dashcam && currentInstance._dashcam.recording) {
252
+ try {
253
+ const dashcamUrl = await currentInstance.dashcam.stop();
254
+ console.log('');
255
+ console.log('🎥' + chalk.yellow(` Dashcam URL`) + `: ${dashcamUrl}`);
256
+ console.log('');
257
+
258
+ // Set test metadata directly on the Vitest task context
259
+ // This is the proper way to pass data from test to reporter
260
+ const platform = currentInstance.os || 'linux';
261
+ const absolutePath = context.task.file?.filepath || context.task.file?.name || 'unknown';
262
+ const projectRoot = process.cwd();
263
+ const testFile = absolutePath !== 'unknown'
264
+ ? path.relative(projectRoot, absolutePath)
265
+ : absolutePath;
266
+
267
+ // Set metadata on the task for the reporter to read
268
+ context.task.meta.dashcamUrl = dashcamUrl || null;
269
+ context.task.meta.platform = platform;
270
+ context.task.meta.testFile = testFile;
271
+ context.task.meta.testOrder = 0;
272
+ context.task.meta.sessionId = currentInstance.getSessionId();
273
+
274
+ // Also register in memory if plugin is available (for cross-process scenarios)
275
+ if (globalThis.__testdriverPlugin?.registerDashcamUrl) {
276
+ globalThis.__testdriverPlugin.registerDashcamUrl(context.task.id, dashcamUrl, platform);
277
+ }
278
+ } catch (error) {
279
+ // Log more detailed error information for debugging
280
+ console.error('❌ Failed to stop dashcam:', error.name || error.constructor?.name || 'Error');
281
+ if (error.message) console.error(' Message:', error.message);
282
+ // NotFoundError during cleanup is expected if sandbox already terminated
283
+ if (error.name === 'NotFoundError' || error.responseData?.error === 'NotFoundError') {
284
+ console.log(' ℹ️ Sandbox session already terminated - dashcam stop skipped');
285
+ }
286
+ // Mark as not recording to prevent retries
287
+ if (currentInstance._dashcam) {
288
+ currentInstance._dashcam.recording = false;
277
289
  }
278
290
  }
279
-
280
- // Clean up console spies
281
- cleanupConsoleSpy(testdriver);
282
-
283
- // Wait for connection to finish if it was initiated
284
- if (testdriver.__connectionPromise) {
285
- await testdriver.__connectionPromise.catch(() => {}); // Ignore connection errors during cleanup
286
- }
287
-
288
- // Disconnect with timeout
289
- await Promise.race([
290
- testdriver.disconnect(),
291
- new Promise((resolve) => setTimeout(resolve, 5000)) // 5s timeout for disconnect
292
- ]);
293
- } catch (error) {
294
- console.error('Error disconnecting client:', error);
295
291
  }
296
- };
297
- lifecycleHandlers.set(context.task, cleanup);
298
-
299
- // Vitest will call this automatically after the test
300
- context.onTestFinished?.(cleanup);
301
- }
292
+
293
+ // Clean up console spies
294
+ cleanupConsoleSpy(currentInstance);
295
+
296
+ // Wait for connection to finish if it was initiated
297
+ if (currentInstance.__connectionPromise) {
298
+ await currentInstance.__connectionPromise.catch(() => {}); // Ignore connection errors during cleanup
299
+ }
300
+
301
+ // Disconnect with timeout
302
+ await Promise.race([
303
+ currentInstance.disconnect(),
304
+ new Promise((resolve) => setTimeout(resolve, 5000)) // 5s timeout for disconnect
305
+ ]);
306
+ } catch (error) {
307
+ console.error('Error disconnecting client:', error);
308
+ }
309
+ };
310
+ lifecycleHandlers.set(context.task, cleanup);
311
+
312
+ // Vitest will call this automatically after the test (each retry attempt)
313
+ context.onTestFinished?.(cleanup);
302
314
 
303
315
  return testdriver;
304
316
  }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "testdriverai",
3
- "version": "7.2.9",
3
+ "version": "7.2.10",
4
4
  "description": "Next generation autonomous AI agent for end-to-end testing of web & desktop",
5
5
  "main": "sdk.js",
6
6
  "exports": {