testdriverai 7.8.0-test.8 → 7.8.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. package/agent/index.js +6 -5
  2. package/agent/lib/commands.js +3 -2
  3. package/agent/lib/http.js +144 -0
  4. package/agent/lib/sandbox.js +117 -102
  5. package/agent/lib/sdk.js +4 -2
  6. package/agent/lib/system.js +25 -65
  7. package/ai/skills/testdriver-mcp/SKILL.md +7 -0
  8. package/ai/skills/testdriver-running-tests/SKILL.md +1 -1
  9. package/docs/changelog.mdx +148 -8
  10. package/docs/docs.json +44 -37
  11. package/docs/images/content/vscode/v7-chat.png +0 -0
  12. package/docs/images/content/vscode/v7-choose-agent.png +0 -0
  13. package/docs/images/content/vscode/v7-full.png +0 -0
  14. package/docs/images/content/vscode/v7-onboarding.png +0 -0
  15. package/docs/v7/cache.mdx +223 -0
  16. package/docs/v7/copilot/auto-healing.mdx +265 -0
  17. package/docs/v7/copilot/creating-tests.mdx +156 -0
  18. package/docs/v7/copilot/github.mdx +143 -0
  19. package/docs/v7/copilot/running-tests.mdx +149 -0
  20. package/docs/v7/copilot/setup.mdx +143 -0
  21. package/docs/v7/enterprise.mdx +3 -110
  22. package/docs/v7/errors.mdx +248 -0
  23. package/docs/v7/events.mdx +358 -0
  24. package/docs/v7/examples/exec-output.mdx +85 -0
  25. package/docs/v7/examples/exec-pwsh.mdx +83 -0
  26. package/docs/v7/examples/focus-window.mdx +62 -0
  27. package/docs/v7/{cloud.mdx → hosted.mdx} +43 -5
  28. package/docs/v7/mcp.mdx +9 -0
  29. package/docs/v7/provision.mdx +333 -0
  30. package/docs/v7/quickstart.mdx +30 -2
  31. package/docs/v7/redraw.mdx +216 -0
  32. package/docs/v7/running-tests.mdx +1 -1
  33. package/docs/v7/screenshots.mdx +186 -0
  34. package/docs/v7/self-hosted.mdx +127 -44
  35. package/interfaces/logger.js +0 -12
  36. package/interfaces/vitest-plugin.mjs +3 -0
  37. package/lib/core/Dashcam.js +13 -16
  38. package/lib/environments.json +18 -0
  39. package/lib/resolve-channel.js +4 -3
  40. package/{examples → manual}/drag-and-drop.test.mjs +1 -1
  41. package/package.json +3 -3
  42. package/sdk.js +3 -3
  43. package/vitest.config.mjs +20 -32
  44. /package/{examples → manual}/flake-diffthreshold-001.test.mjs +0 -0
  45. /package/{examples → manual}/flake-diffthreshold-01.test.mjs +0 -0
  46. /package/{examples → manual}/flake-diffthreshold-05.test.mjs +0 -0
  47. /package/{examples → manual}/flake-noredraw-cache.test.mjs +0 -0
  48. /package/{examples → manual}/flake-noredraw-nocache.test.mjs +0 -0
  49. /package/{examples → manual}/flake-redraw-cache.test.mjs +0 -0
  50. /package/{examples → manual}/flake-redraw-nocache.test.mjs +0 -0
  51. /package/{examples → manual}/flake-rocket-match.test.mjs +0 -0
  52. /package/{examples → manual}/flake-shared.mjs +0 -0
  53. /package/{examples → manual}/no-provision.test.mjs +0 -0
  54. /package/{examples → manual}/scroll-until-text.test.mjs +0 -0
@@ -0,0 +1,186 @@
1
+ ---
2
+ title: "Screenshots"
3
+ sidebarTitle: "Screenshots"
4
+ description: "Capture and manage screenshots during test execution"
5
+ icon: "camera"
6
+ mode: "wide"
7
+ ---
8
+
9
+ ## Overview
10
+
11
+ TestDriver can capture screenshots manually at any point during a test, or automatically before and after every command. Screenshots are saved to a structured directory for easy debugging.
12
+
13
+ ## Manual Screenshots
14
+
15
+ Use `testdriver.screenshot()` to capture the current screen:
16
+
17
+ ```javascript
18
+ const path = await testdriver.screenshot();
19
+ console.log('Saved to:', path);
20
+ // .testdriver/screenshots/my-test/screenshot-1719849312345.png
21
+ ```
22
+
23
+ ### Options
24
+
25
+ ```javascript
26
+ await testdriver.screenshot(filename?)
27
+ ```
28
+
29
+ <ParamField path="filename" type="string">
30
+ Custom filename for the screenshot. `.png` is appended automatically if missing. If omitted, defaults to `screenshot-<timestamp>.png`.
31
+ </ParamField>
32
+
33
+ **Returns:** `Promise<string>` — the absolute file path of the saved screenshot.
34
+
35
+ ```javascript
36
+ // Default filename
37
+ await testdriver.screenshot();
38
+ // → .testdriver/screenshots/my-test/screenshot-1719849312345.png
39
+
40
+ // Custom filename
41
+ await testdriver.screenshot('login-page');
42
+ // → .testdriver/screenshots/my-test/login-page.png
43
+
44
+ // With .png extension
45
+ await testdriver.screenshot('dashboard-loaded.png');
46
+ // → .testdriver/screenshots/my-test/dashboard-loaded.png
47
+ ```
48
+
49
+ ## Auto Screenshots
50
+
51
+ Enable automatic screenshots before and after every command:
52
+
53
+ ```javascript
54
+ const testdriver = new TestDriver({
55
+ autoScreenshots: true,
56
+ });
57
+ ```
58
+
59
+ <ParamField path="autoScreenshots" type="boolean" default={false}>
60
+ When `true`, captures a screenshot before and after every SDK command (`click`, `type`, `find`, `scroll`, `hover`, `pressKeys`, `assert`, `exec`, etc.). On error, an error-phase screenshot replaces the after-phase screenshot.
61
+ </ParamField>
62
+
63
+ ### Filename Format
64
+
65
+ Auto-screenshots follow this naming convention:
66
+
67
+ ```
68
+ <seq>-<action>-<phase>-L<line>-<description>.png
69
+ ```
70
+
71
+ | Part | Description | Example |
72
+ |---|---|---|
73
+ | `seq` | 3-digit zero-padded sequence number | `001` |
74
+ | `action` | Command name | `click`, `type`, `find` |
75
+ | `phase` | `before`, `after`, or `error` | `before` |
76
+ | `L<line>` | Source line number from your test file | `L42` |
77
+ | `description` | Sanitized from command arguments (max 30 chars) | `submit-button` |
78
+
79
+ **Examples:**
80
+ ```
81
+ 001-find-before-L15-login-button.png
82
+ 002-find-after-L15-login-button.png
83
+ 003-click-before-L16-login-button.png
84
+ 004-click-after-L16-login-button.png
85
+ 005-type-before-L18-username-field.png
86
+ 006-type-error-L18-username-field.png
87
+ ```
88
+
89
+ ### Phases
90
+
91
+ | Phase | When | Description |
92
+ |---|---|---|
93
+ | `before` | Before command executes | Captures the screen state before the action |
94
+ | `after` | After successful command | Captures the result of the action |
95
+ | `error` | After failed command | Captures the screen at the point of failure (replaces `after`) |
96
+
97
+ ## Screenshot Directory
98
+
99
+ Screenshots are saved to:
100
+
101
+ ```
102
+ <cwd>/.testdriver/screenshots/<testFileName>/
103
+ ```
104
+
105
+ Where `<testFileName>` is the test file name without its extension. For example, a test at `tests/login.test.mjs` saves screenshots to `.testdriver/screenshots/login.test/`.
106
+
107
+ ### Directory Cleanup
108
+
109
+ The screenshot directory for each test file is **automatically cleaned** at the start of a test run. This happens once per process per test file to prevent concurrent tests from the same file from interfering with each other.
110
+
111
+ ## Debug Screenshots
112
+
113
+ Elements have a `saveDebugScreenshot()` method for debugging element detection:
114
+
115
+ ```javascript
116
+ const el = await testdriver.find('submit button');
117
+
118
+ // Save the screenshot that was used to detect this element
119
+ const debugPath = await el.saveDebugScreenshot();
120
+ console.log('Debug screenshot:', debugPath);
121
+ // → ./debug-screenshot-1719849312345.png
122
+
123
+ // Custom path
124
+ await el.saveDebugScreenshot('./my-debug.png');
125
+ ```
126
+
127
+ This saves the screenshot that was captured during the `find()` call, which can be useful for understanding what the AI "saw" when locating the element.
128
+
129
+ ## Complete Example
130
+
131
+ ```javascript
132
+ import { describe, it, beforeAll, afterAll } from 'vitest';
133
+ import TestDriver from 'testdriverai';
134
+
135
+ describe('Screenshot Example', () => {
136
+ let testdriver;
137
+
138
+ beforeAll(async () => {
139
+ testdriver = new TestDriver({
140
+ autoScreenshots: true, // capture every step
141
+ });
142
+ await testdriver.ready();
143
+ await testdriver.provision.chrome({ url: 'https://example.com' });
144
+ });
145
+
146
+ afterAll(async () => {
147
+ await testdriver.disconnect();
148
+ });
149
+
150
+ it('captures the login flow', async () => {
151
+ // Auto-screenshots capture before/after each command
152
+
153
+ // Manual screenshot for a specific moment
154
+ await testdriver.screenshot('initial-page-load');
155
+
156
+ const username = await testdriver.find('username input');
157
+ await username.click();
158
+ await testdriver.type('testuser@example.com');
159
+
160
+ await testdriver.screenshot('after-username-entry');
161
+
162
+ const password = await testdriver.find('password input');
163
+ await password.click();
164
+ await testdriver.type('password123');
165
+
166
+ await testdriver.find('login button').click();
167
+
168
+ await testdriver.screenshot('after-login-click');
169
+ });
170
+ });
171
+ ```
172
+
173
+ After running, your screenshot directory will contain:
174
+ ```
175
+ .testdriver/screenshots/login-flow.test/
176
+ ├── initial-page-load.png
177
+ ├── 001-find-before-L18-username-input.png
178
+ ├── 002-find-after-L18-username-input.png
179
+ ├── 003-click-before-L19-username-input.png
180
+ ├── 004-click-after-L19-username-input.png
181
+ ├── 005-type-before-L20-testuser-example-com.png
182
+ ├── 006-type-after-L20-testuser-example-com.png
183
+ ├── after-username-entry.png
184
+ ├── 007-find-before-L24-password-input.png
185
+ ├── ...
186
+ ```
@@ -1,24 +1,50 @@
1
1
  ---
2
- title: "Self-Hosted"
2
+ title: "Self-Hosted (Enterprise)"
3
3
  sidebarTitle: "Self-Hosted"
4
- description: "Unlimited test execution, complete privacy, and the ability to customize everything — all for a predictable flat license fee."
4
+ description: "Our enterprise solution with unlimited test execution, assisted setup, and dedicated support."
5
5
  icon: "server"
6
+ mode: "wide"
6
7
  ---
7
8
 
8
- Self-hosted pricing is based on **parallel test capacity**: the number of tests you can run simultaneously on **your infrastructure**.
9
-
10
- With self-hosting, you get:.
11
-
12
- - **Flat license fee** per parallel test slot
13
- - **Unlimited test execution** — run as many tests as you want
14
- - **No device-second metering** predictable monthly costs
15
- - **Use your own AI keys** control data usage with your own OpenAI, Anthropic, or other AI provider keys
16
- - **Custom hardware & software** — choose instance types, resolution, install specific software, and configure networking as needed
17
- - **Debug & Customize** — RDP into test machines, install custom software, modify the AMI, and debug issues directly. No black boxes.
18
-
19
- ## Get Started
20
-
21
- Ready to self-host? Follow our comprehensive AWS setup guide:
9
+ Self-hosted is our enterprise solution for teams that need unlimited test execution, infrastructure control, and dedicated support. Pricing is based on **parallel test capacity** with a flat license fee no per-second billing.
10
+
11
+ <CardGroup cols={2}>
12
+ <Card title="Unlimited Execution" icon="infinity">
13
+ Run as many tests as you want with no device-second metering. Predictable monthly costs.
14
+ </Card>
15
+ <Card title="Assisted Setup & Support" icon="headset">
16
+ Our team helps you deploy, configure, and optimize your infrastructure. Dedicated engineering support included.
17
+ </Card>
18
+ <Card title="Full Control" icon="gear">
19
+ Use your own AI keys, custom hardware, specific software, and network configurations. RDP into test machines for debugging.
20
+ </Card>
21
+ <Card title="Security & Compliance" icon="shield-check">
22
+ Keep data in your environment. Air-gapped deployment available for regulated industries.
23
+ </Card>
24
+ </CardGroup>
25
+
26
+ ## Deployment Options
27
+
28
+ Choose the level of control you need:
29
+
30
+ | Component | Standard | Air-Gapped |
31
+ |-----------|----------|------------|
32
+ | **Test Sandboxes** | Your AWS | Your infrastructure (any cloud or on-prem) |
33
+ | **Dashboard** | TestDriver hosted | Your infrastructure |
34
+ | **API** | TestDriver hosted | Your infrastructure |
35
+ | **AI Processing** | Your API keys | Your infrastructure |
36
+ | **Data Storage** | Your AWS account | 100% your infrastructure |
37
+ | **Network** | Internet access required | Fully air-gapped |
38
+ | **Cloud Providers** | AWS | AWS, Azure, GCP, on-prem |
39
+
40
+ ### Standard Deployment
41
+
42
+ Run test sandboxes on your AWS infrastructure while using TestDriver's hosted dashboard and API:
43
+
44
+ - **Quick setup** via CloudFormation — deploy in hours
45
+ - **Dashboard access** at [console.testdriver.ai](https://console.testdriver.ai)
46
+ - **Your AI keys** — control costs with your own OpenAI, Anthropic, or other provider
47
+ - **Custom AMIs** — install specific software, configure networking
22
48
 
23
49
  <Card
24
50
  title="AWS Setup Guide"
@@ -28,39 +54,96 @@ Ready to self-host? Follow our comprehensive AWS setup guide:
28
54
  Step-by-step instructions for deploying TestDriver on your AWS infrastructure using CloudFormation.
29
55
  </Card>
30
56
 
57
+ ### Air-Gapped Deployment
58
+
59
+ Deploy the entire TestDriver stack in your environment for complete isolation:
60
+
61
+ - **Full stack** — dashboard, API, and test infrastructure all in your environment
62
+ - **No external dependencies** — data never leaves your network perimeter
63
+ - **Any infrastructure** — AWS, Azure, GCP, or on-premises
64
+ - **Regulated industries** — government, defense, healthcare, finance
65
+
66
+ ## Custom VM Images
67
+
68
+ Build test environments with your applications, dependencies, and user data pre-installed. You get full access to:
69
+
70
+ - **Golden VM** — our pre-configured base image with TestDriver agent, drivers, and optimizations
71
+ - **Packer scripts** — build custom AMIs with your applications, user data, and configurations
72
+ - **Faster test startup** — skip installation steps by baking dependencies into your image
73
+ - **Consistent environments** — every test runs on an identical, reproducible machine
74
+
75
+ <AccordionGroup>
76
+ <Accordion title="What can you customize?">
77
+ - Install applications (browsers, desktop apps, dev tools)
78
+ - Configure user accounts and credentials
79
+ - Set up network proxies and certificates
80
+ - Install fonts, language packs, and locales
81
+ - Pre-seed databases or test fixtures
82
+ - Configure Windows/Linux settings
83
+ </Accordion>
84
+
85
+ <Accordion title="How it works">
86
+ 1. We provide our golden VM base image and Packer scripts
87
+ 2. You customize the scripts to install your software and configuration
88
+ 3. Run Packer to build your custom AMI
89
+ 4. Configure TestDriver to use your custom AMI for test sandboxes
90
+ 5. Tests spin up with everything pre-installed — no setup time wasted
91
+ </Accordion>
92
+ </AccordionGroup>
93
+
94
+ ## Implementation Process
95
+
96
+ <Steps>
97
+ <Step title="Discovery Call">
98
+ Discuss your requirements, security constraints, and integration needs with our team.
99
+ </Step>
100
+
101
+ <Step title="Architecture Review">
102
+ Our engineers design a deployment architecture that meets your security and compliance requirements.
103
+ </Step>
104
+
105
+ <Step title="Deployment">
106
+ We work with your team to deploy TestDriver, including assisted setup and configuration.
107
+ </Step>
108
+
109
+ <Step title="Integration">
110
+ Connect TestDriver to your CI/CD pipelines, internal tools, and workflows.
111
+ </Step>
112
+
113
+ <Step title="Training & Handoff">
114
+ Comprehensive training for your team on operating and maintaining the deployment.
115
+ </Step>
116
+ </Steps>
117
+
118
+ ## What's Included
31
119
 
32
- ## Who Should Self-Host?
33
-
34
- Self-hosting is ideal for teams that:
35
-
36
- - **Run high test volumes** — Flat pricing becomes more economical at scale
37
- - **Want infrastructure control** — Custom hardware, specific software dependencies, or network configurations
38
- - **Prefer predictable costs** — Budget with confidence using flat monthly fees
39
-
40
-
41
- ## How It Works
42
-
43
- With self-hosting, you run test sandboxes on your own AWS infrastructure. TestDriver still provides:
44
-
45
- - **Dashboard** — View test results, analytics, and reports at [console.testdriver.ai](https://console.testdriver.ai)
46
- - **API** — Orchestration and AI-powered test execution
47
- - **License Management** — Your parallel test capacity
48
-
49
- You provide:
50
-
51
- - **AWS Infrastructure** — EC2 instances running in your account
52
- - **AI API Keys** — Use your own OpenAI, Anthropic, or other AI provider keys
53
- - **Custom Configuration** — Hardware specs, networking, installed software
120
+ - **Flat license fee** per parallel test slot
121
+ - **Unlimited test execution** — no device-second charges
122
+ - **Assisted setup** our team helps you deploy and configure
123
+ - **Dedicated support** — direct access to our engineering team
124
+ - **Custom contract terms** — volume-based pricing, custom SLAs
125
+ - **Professional services** — implementation assistance and training
54
126
 
55
- ## Comparison vs Cloud
127
+ ## Comparison: Hosted vs Self-Hosted
56
128
 
57
- | Feature | Cloud | Self-Hosted |
58
- |---------|-------|-------------|
59
- | **Setup Time** | Minutes | Hours |
129
+ | Feature | Hosted | Self-Hosted |
130
+ |---------|--------|-------------|
131
+ | **Setup Time** | Minutes | Hours (assisted) |
60
132
  | **Pricing Model** | Device-seconds | Flat license fee |
61
- | **Infrastructure Management** | TestDriver | You |
62
- | **Device Location** | TestDriver cloud | Your AWS account |
133
+ | **Infrastructure** | TestDriver | Your AWS or any cloud |
63
134
  | **AI API Keys** | TestDriver's | Your own |
64
135
  | **Custom Software** | Limited | Full control |
65
136
  | **Hardware Selection** | Standard | Your choice |
66
137
  | **Debugging Access** | Replays only | Full RDP access |
138
+ | **Support** | Community/Standard | Dedicated engineering |
139
+ | **Air-Gapped Option** | No | Yes |
140
+
141
+ ## Get Started
142
+
143
+ <Card
144
+ title="Schedule a Consultation"
145
+ icon="calendar"
146
+ href="https://testdriver.ai/demo"
147
+ >
148
+ Discuss your requirements with our team and get a custom proposal for your self-hosted deployment.
149
+ </Card>
@@ -24,18 +24,6 @@ class CustomTransport extends Transport {
24
24
  return;
25
25
  }
26
26
 
27
- if (!this.sandbox) {
28
- this.sandbox = require("../agent/lib/sandbox");
29
- }
30
-
31
- if (this.sandbox && this.sandbox.instanceSocketConnected) {
32
- this.sandbox.send({
33
- type: "output",
34
- output: Buffer.from(message).toString("base64"),
35
- }).catch((e) => {
36
- console.error("Error sending log:", e);
37
- });
38
- }
39
27
  } catch (e) {
40
28
  console.error("Error in CustomTransport log method:", e);
41
29
  }
@@ -840,6 +840,9 @@ class TestDriverReporter {
840
840
  logger.debug("API key present:", !!pluginState.apiKey);
841
841
  logger.debug("API root:", pluginState.apiRoot);
842
842
 
843
+ // Environment info is printed by the SDK when each test initializes,
844
+ // so we skip the duplicate banner here in the reporter.
845
+
843
846
  // Check if we should enable the reporter
844
847
  if (!pluginState.apiKey) {
845
848
  logger.warn("No API key provided, skipping test recording");
@@ -147,20 +147,13 @@ class Dashcam {
147
147
  * @private
148
148
  */
149
149
  async _getDashcamPath() {
150
- const shell = this._getShell();
151
150
 
152
151
  if (this.client.os === "windows") {
153
152
  return "C:\\Program Files\\nodejs\\dashcam.cmd";
153
+ } else {
154
+ return "/usr/bin/dashcam";
154
155
  }
155
156
 
156
- const npmPrefix = await this.client.exec(
157
- shell,
158
- "npm prefix -g",
159
- 40000,
160
- process.env.TD_DEBUG == "true" ? false : true,
161
- );
162
-
163
- return npmPrefix.trim() + "/bin/dashcam";
164
157
  }
165
158
 
166
159
  /**
@@ -289,13 +282,17 @@ class Dashcam {
289
282
  const apiRoot = this._getApiRoot();
290
283
 
291
284
  if (this.client.os === "windows") {
292
- const addLogOutput = await this.client.exec(
293
- shell,
294
- `$env:TD_API_ROOT="${apiRoot}"; & "${dashcamPath}" logs --add --type=web --pattern="${pattern}" --name="${name}"`,
295
- 120000,
296
- process.env.TD_DEBUG == "true" ? false : true,
297
- );
298
- this._log("debug", "Add web log tracking output:", addLogOutput);
285
+ try {
286
+ const addLogOutput = await this.client.exec(
287
+ shell,
288
+ `$env:TD_API_ROOT="${apiRoot}"; & "${dashcamPath}" logs --add --type=web --pattern="${pattern}" --name="${name}"`,
289
+ 120000,
290
+ process.env.TD_DEBUG == "true" ? false : true,
291
+ );
292
+ this._log("debug", "Add web log tracking output:", addLogOutput);
293
+ } catch (err) {
294
+ this._log("warn", "Add web log tracking failed:", err.message);
295
+ }
299
296
  } else {
300
297
  const addLogOutput = await this.client.exec(
301
298
  shell,
@@ -0,0 +1,18 @@
1
+ {
2
+ "dev": {
3
+ "apiRoot": "https://api-dev.testdriver.ai",
4
+ "consoleUrl": "https://console-dev.testdriver.ai"
5
+ },
6
+ "test": {
7
+ "apiRoot": "https://api-test.testdriver.ai",
8
+ "consoleUrl": "https://console-test.testdriver.ai"
9
+ },
10
+ "canary": {
11
+ "apiRoot": "https://api-canary.testdriver.ai",
12
+ "consoleUrl": "https://console-canary.testdriver.ai"
13
+ },
14
+ "stable": {
15
+ "apiRoot": "https://api.testdriver.ai",
16
+ "consoleUrl": "https://console.testdriver.ai"
17
+ }
18
+ }
@@ -9,12 +9,13 @@
9
9
  */
10
10
 
11
11
  const semver = require("semver");
12
+ const environments = require("./environments.json");
12
13
 
13
14
  const CHANNELS = {
14
15
  dev: "http://localhost:1337",
15
- test: "https://api-test.testdriver.ai",
16
- canary: "https://api-canary.testdriver.ai",
17
- latest: "https://api.testdriver.ai",
16
+ test: environments.test.apiRoot,
17
+ canary: environments.canary.apiRoot,
18
+ latest: environments.stable.apiRoot,
18
19
  };
19
20
 
20
21
  function resolveActiveChannel() {
@@ -46,7 +46,7 @@ describe("Drag and Drop Test", () => {
46
46
 
47
47
  const recycleBin = await testdriver.find(
48
48
  "Recycle Bin, recycle bin icon in the top left corner of the desktop",
49
- );
49
+ ).hover();
50
50
  await recycleBin.mouseUp();
51
51
 
52
52
  // Assert "New Text Document" icon is not on the Desktop
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "testdriverai",
3
- "version": "7.8.0-test.8",
3
+ "version": "7.8.0",
4
4
  "description": "Next generation autonomous AI agent for end-to-end testing of web & desktop",
5
5
  "main": "sdk.js",
6
6
  "types": "sdk.d.ts",
@@ -37,8 +37,8 @@
37
37
  "start": "node bin/testdriverai.js",
38
38
  "dev": "DEV=true node bin/testdriverai.js",
39
39
  "debug": "DEV=true VERBOSE=true node bin/testdriverai.js",
40
- "docs": "npm run docs:skills && cd docs && npx mint@latest dev",
41
- "docs:dev": "cd docs && npx mint dev",
40
+ "docs": "npm run docs:skills && cd docs && npx mint@latest dev --port 3002",
41
+ "docs:dev": "cd docs && npx mint dev --port 3002",
42
42
  "docs:build": "npm run docs:skills && cd docs && npx mint@latest build",
43
43
  "docs:links": "node docs/_scripts/link-replacer.js",
44
44
  "docs:skills": "node docs/_scripts/generate-skills.js",
package/sdk.js CHANGED
@@ -2749,6 +2749,9 @@ CAPTCHA_SOLVER_EOF`,
2749
2749
  }
2750
2750
  }
2751
2751
 
2752
+ // Log environment info immediately so it's visible even if auth fails
2753
+ this._logEnvironmentInfo();
2754
+
2752
2755
  // Authenticate first if not already authenticated
2753
2756
  if (!this.authenticated) {
2754
2757
  await this.auth();
@@ -2865,9 +2868,6 @@ CAPTCHA_SOLVER_EOF`,
2865
2868
  sandboxId: this.instance?.instanceId,
2866
2869
  });
2867
2870
 
2868
- // Log environment info (non-blocking, skip on stable)
2869
- this._logEnvironmentInfo();
2870
-
2871
2871
  return this.instance;
2872
2872
  }
2873
2873
 
package/vitest.config.mjs CHANGED
@@ -1,7 +1,9 @@
1
1
  import TestDriver from "testdriverai/vitest";
2
2
  import { defineConfig } from "vitest/config";
3
- import { readFileSync, existsSync } from "fs";
4
- import { resolve } from "path";
3
+ import { createRequire } from "module";
4
+
5
+ const require = createRequire(import.meta.url);
6
+ const { resolveEnv, getEnvironmentNames } = require("../shared/resolve-env");
5
7
 
6
8
  // Always include AWS setup - it will be a no-op unless TD_OS=windows
7
9
  // Note: dotenv is loaded automatically by the TestDriver SDK
@@ -25,45 +27,31 @@ const sharedTestConfig = {
25
27
  include: ["examples/**/*.test.mjs"],
26
28
  };
27
29
 
28
- // ── Parse a simple KEY=VALUE .env file ──────────────────────────────
29
- function parseEnvFile(filePath) {
30
- if (!existsSync(filePath)) return {};
31
- const env = {};
32
- for (const line of readFileSync(filePath, "utf-8").split("\n")) {
33
- const trimmed = line.trim();
34
- if (!trimmed || trimmed.startsWith("#")) continue;
35
- const idx = trimmed.indexOf("=");
36
- if (idx === -1) continue;
37
- env[trimmed.slice(0, idx)] = trimmed.slice(idx + 1);
38
- }
39
- return env;
40
- }
41
-
42
- // ── Load base .env + per-environment overlay ────────────────────────
43
- const monoRoot = resolve(import.meta.dirname, "..");
44
- const baseEnv = parseEnvFile(resolve(monoRoot, ".env"));
45
-
46
- const environments = ["dev", "test", "canary", "stable"];
30
+ // ── Resolve env vars via shared/resolve-env.js ──────────────────────
31
+ // Uses: environments.json (URLs) + envs/{env}.env (overlay) + fixtures (API keys)
32
+ // TD_PLAN selects which plan's API key to use (default: enterprise)
33
+ const plan = process.env.TD_PLAN || "enterprise";
34
+ const defaultEnv = process.env.TD_ENV || "dev";
35
+ const environments = getEnvironmentNames();
47
36
 
48
- function envForProject(envName) {
49
- const overlay = parseEnvFile(resolve(monoRoot, "envs", `${envName}.env`));
50
- return { ...baseEnv, ...overlay };
51
- }
37
+ // Apply default env to the main process so the reporter/plugin picks it up
38
+ // (vitest's test.env only propagates to worker processes, not the main process)
39
+ const defaultResolved = resolveEnv(defaultEnv, plan);
40
+ Object.assign(process.env, defaultResolved);
52
41
 
53
- // ── If TD_ENV is set (e.g. from CLI), only run that environment ─────
54
- // Usage: TD_ENV=dev vitest run
55
- // TD_ENV=canary vitest run examples/assert.test.mjs
56
- // vitest run --project dev
57
- // vitest run --project canary --project stable
42
+ // ── Usage ───────────────────────────────────────────────────────────
43
+ // TD_PLAN=enterprise vitest run --project dev
44
+ // TD_PLAN=free vitest run --project test examples/assert.test.mjs
45
+ // vitest run --project canary --project stable
58
46
  export default defineConfig({
59
47
  test: {
60
48
  ...sharedTestConfig,
61
- env: envForProject(process.env.TD_ENV || "dev"),
49
+ env: defaultResolved,
62
50
  projects: environments.map((envName) => ({
63
51
  extends: true,
64
52
  test: {
65
53
  name: envName,
66
- env: envForProject(envName),
54
+ env: resolveEnv(envName, plan),
67
55
  },
68
56
  })),
69
57
  },
File without changes
File without changes
File without changes