testdriverai 7.9.59-test → 7.9.60-test

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -771,14 +771,160 @@ const createSandbox = function (emitter, analytics, sessionInstance) {
771
771
  break; // slot approved and provisioned — exit the while loop
772
772
  }
773
773
 
774
+ // ─── Handle async provisioning status ─────────────────────────────
775
+ // API may return early with status: 'provisioning' while a background
776
+ // job is still creating/configuring the sandbox. Prefer Ably control
777
+ // events for completion and only fall back to authenticate polling when
778
+ // an event is not observed in time.
779
+ var provisioningPollStart = Date.now();
780
+ var provisioningTimeoutMs = 10 * 60 * 1000;
781
+ while (reply.status === 'provisioning') {
782
+ var provisionElapsed = Date.now() - provisioningPollStart;
783
+ if (provisionElapsed >= provisioningTimeoutMs) {
784
+ var provisioningTimeoutErr = new Error(
785
+ "Sandbox provisioning timed out after " +
786
+ Math.round(provisionElapsed / 1000) +
787
+ "s" +
788
+ (this._sandboxId || (reply && reply.sandboxId)
789
+ ? " for sandbox " + (this._sandboxId || (reply && reply.sandboxId))
790
+ : "") +
791
+ ". Last known status: provisioning"
792
+ );
793
+ provisioningTimeoutErr.responseData = reply;
794
+ throw provisioningTimeoutErr;
795
+ }
796
+ logger.log(
797
+ 'Waiting for sandbox to be ready...'
798
+ );
799
+
800
+ var self = this;
801
+ var provisioningEvent = null;
802
+
803
+ if (this._sessionChannel) {
804
+ provisioningEvent = await new Promise(function (resolve) {
805
+ var resolved = false;
806
+ var eventTimeout = 30000;
807
+
808
+ function finish(data) {
809
+ if (resolved) return;
810
+ resolved = true;
811
+ clearTimeout(timer);
812
+ try { self._sessionChannel.unsubscribe('control', onProvisionCtrl); } catch (_) {}
813
+ resolve(data || null);
814
+ }
815
+
816
+ function onProvisionCtrl(msg) {
817
+ var data = msg && msg.data;
818
+ if (!data) return;
819
+ if (data.type === 'provisioning.started') {
820
+ logger.log((data.message || 'Provisioning started') + (data.os ? ' (' + data.os + ')' : ''));
821
+ return;
822
+ }
823
+ if (data.type === 'provisioning.progress') {
824
+ var progress = data.message || ('Provisioning step: ' + (data.phase || 'in-progress'));
825
+ logger.log(progress);
826
+ return;
827
+ }
828
+ if (data.type === 'provisioning.completed' || data.type === 'provisioning.failed') {
829
+ finish(data);
830
+ }
831
+ }
832
+
833
+ var timer = setTimeout(function () {
834
+ finish(null);
835
+ }, eventTimeout);
836
+ if (timer.unref) timer.unref();
837
+
838
+ try {
839
+ self._sessionChannel.subscribe('control', onProvisionCtrl);
840
+
841
+ // Check recent history to close race window where event was
842
+ // published before this subscription was attached.
843
+ self._sessionChannel.history({ limit: 20 }).then(function (page) {
844
+ if (!page || !page.items || resolved) return;
845
+ for (var i = 0; i < page.items.length; i++) {
846
+ var item = page.items[i];
847
+ var data = item && item.data;
848
+ if (item && item.name === 'control' && data && (data.type === 'provisioning.completed' || data.type === 'provisioning.failed')) {
849
+ finish(data);
850
+ return;
851
+ }
852
+ }
853
+ }).catch(function (err) {
854
+ logger.warn('Provisioning history lookup failed (non-fatal): ' + (err.message || err));
855
+ });
856
+ } catch (subscribeErr) {
857
+ logger.warn('Provisioning event subscribe failed (non-fatal): ' + (subscribeErr.message || subscribeErr));
858
+ finish(null);
859
+ }
860
+ });
861
+ }
862
+
863
+ if (provisioningEvent && provisioningEvent.type === 'provisioning.failed') {
864
+ var eventErr = new Error(
865
+ provisioningEvent.errorMessage || 'Failed while waiting for sandbox provisioning',
866
+ );
867
+ eventErr.responseData = provisioningEvent;
868
+ throw eventErr;
869
+ }
870
+
871
+ if (provisioningEvent && provisioningEvent.type === 'provisioning.completed') {
872
+ // Event carries the final payload shape from the API, so we can stop
873
+ // polling authenticate in the common case.
874
+ reply = Object.assign({}, reply, provisioningEvent);
875
+ if (reply.status === 'provisioning') {
876
+ reply.status = 'ready';
877
+ }
878
+ if (reply.success !== true) {
879
+ reply.success = true;
880
+ }
881
+ break;
882
+ }
883
+
884
+ await new Promise(function (resolve) {
885
+ var t = setTimeout(resolve, 10000);
886
+ if (t.unref) t.unref();
887
+ });
888
+
889
+ var pollBody = {
890
+ apiKey: this.apiKey,
891
+ version: version,
892
+ os: message.os || this.os || 'linux',
893
+ session: sessionId,
894
+ apiRoot: this.apiRoot,
895
+ sandboxId: this._sandboxId || (reply && reply.sandboxId),
896
+ slotApproved: true,
897
+ };
898
+ if (message.resolution) pollBody.resolution = message.resolution;
899
+ if (message.ci) pollBody.ci = message.ci;
900
+ if (message.ami) pollBody.ami = message.ami;
901
+ if (message.instanceType) pollBody.instanceType = message.instanceType;
902
+ if (message.e2bTemplateId) pollBody.e2bTemplateId = message.e2bTemplateId;
903
+ if (message.keepAlive !== undefined) pollBody.keepAlive = message.keepAlive;
904
+
905
+ reply = await this._httpPostWithConcurrencyRetry(
906
+ "/api/v7/sandbox/authenticate",
907
+ pollBody,
908
+ timeout,
909
+ );
910
+
911
+ if (!reply.success && reply.status !== 'provisioning') {
912
+ var provisioningErr = new Error(
913
+ reply.errorMessage || "Failed while waiting for sandbox provisioning",
914
+ );
915
+ provisioningErr.responseData = reply;
916
+ throw provisioningErr;
917
+ }
918
+ }
919
+
774
920
  if (message.type === "create") {
775
921
  // E2B (Linux) sandboxes return a url directly.
776
922
  // We still need to wait for runner.ready since sandbox-agent.js runs inside E2B.
777
923
  const isE2B = !!reply.url;
778
-
779
- const runnerIp = reply.runner && reply.runner.ip;
780
- const noVncPort = reply.runner && reply.runner.noVncPort;
781
- const runnerVncUrl = reply.runner && reply.runner.vncUrl;
924
+
925
+ let runnerIp = reply.runner && reply.runner.ip;
926
+ let noVncPort = reply.runner && reply.runner.noVncPort;
927
+ let runnerVncUrl = reply.runner && reply.runner.vncUrl;
782
928
 
783
929
  // Log image version info (AMI for Windows, E2B template for Linux)
784
930
  if (reply.imageVersion) {
@@ -801,12 +947,12 @@ const createSandbox = function (emitter, analytics, sessionInstance) {
801
947
  // For presence-based Windows runners (reply.runner already set), the runner
802
948
  // is already listening so we can skip the wait.
803
949
  var self = this;
804
- const needsReadyWait = this._sessionChannel && (isE2B || !reply.runner);
950
+ const needsReadyWait = this._sessionChannel && (isE2B || !reply.runner || (reply.runner && reply.runner.os === 'windows'));
805
951
  if (needsReadyWait) {
806
952
  logger.log('Waiting for runner agent to signal readiness...');
807
- // E2B (Linux) sandboxes need extra time: S3 upload + npm install can add 60-120s on top of sandbox boot
808
- // EC2 (Windows) cold starts can be slow due to AV scanning and native module loading
809
- var readyTimeout = isE2B ? 300000 : 180000; // 5 min for E2B (S3+npm), 3 min for EC2
953
+ // E2B (Linux) sandboxes need extra time: S3 upload + npm install can add 60-120s on top of sandbox boot.
954
+ // Hosted EC2 (Windows) can also take several minutes when launching/provisioning in background.
955
+ var readyTimeout = isE2B ? 300000 : 300000; // 5 min for E2B and EC2
810
956
  await new Promise(function (resolve, reject) {
811
957
  var resolved = false;
812
958
  var waitStart = Date.now();
@@ -817,7 +963,7 @@ const createSandbox = function (emitter, analytics, sessionInstance) {
817
963
  clearInterval(progressTimer);
818
964
  self._sessionChannel.unsubscribe('control', onCtrl);
819
965
  // Update runner info if provided
820
- if (data && data.os) reply.runner = reply.runner || {};
966
+ if (data && (data.os || data.ip)) reply.runner = reply.runner || {};
821
967
  if (data && data.os && reply.runner) reply.runner.os = data.os;
822
968
  if (data && data.ip && reply.runner) reply.runner.ip = data.ip;
823
969
  if (data && data.runnerVersion && reply.runner) reply.runner.version = data.runnerVersion;
@@ -902,6 +1048,13 @@ const createSandbox = function (emitter, analytics, sessionInstance) {
902
1048
  }
903
1049
  });
904
1050
  }
1051
+
1052
+ // Refresh runner metadata after runner.ready wait because the wait handler
1053
+ // can populate reply.runner fields from control messages.
1054
+ runnerIp = reply.runner && reply.runner.ip;
1055
+ noVncPort = reply.runner && reply.runner.noVncPort;
1056
+ runnerVncUrl = reply.runner && reply.runner.vncUrl;
1057
+
905
1058
  // Prefer the full vncUrl reported by the runner (infrastructure-agnostic).
906
1059
  // For E2B sandboxes, use the url from the API reply.
907
1060
  // Fall back to constructing from ip + noVncPort for older runners.
@@ -192,7 +192,7 @@ Tests should use `context.ip || process.env.TD_IP` for the IP configuration:
192
192
 
193
193
  ```javascript
194
194
  import { describe, it } from "vitest";
195
- import { TestDriver } from "testdriverai/lib/vitest/hooks.mjs";
195
+ import { TestDriver } from "testdriverai/vitest/hooks";
196
196
 
197
197
  describe("My Test", () => {
198
198
  it("should run on self-hosted instance", async (context) => {
@@ -571,7 +571,7 @@ When using multi-platform testing, read the `TD_OS` environment variable in your
571
571
 
572
572
  ```javascript tests/cross-platform.test.mjs
573
573
  import { describe, expect, it } from "vitest";
574
- import { TestDriver } from "testdriverai/lib/vitest/hooks.mjs";
574
+ import { TestDriver } from "testdriverai/vitest/hooks";
575
575
 
576
576
  describe("Cross-platform tests", () => {
577
577
  it("should work on both Linux and Windows", async (context) => {
@@ -44,7 +44,7 @@ const testdriver = new TestDriver(apiKey, options)
44
44
  Enable or disable console logging
45
45
  </ParamField>
46
46
 
47
- <ParamField path="autoScreenshots" type="boolean" default="true">
47
+ <ParamField path="autoScreenshots" type="boolean" default="false">
48
48
  Automatically capture screenshots before and after each command. Screenshots are saved to `.testdriver/screenshots/<test>/` with descriptive filenames that include the line number and action name. Format: `<seq>-<action>-<phase>-L<line>-<description>.png`
49
49
  </ParamField>
50
50
 
@@ -56,10 +56,6 @@ const testdriver = new TestDriver(apiKey, options)
56
56
  Reconnect to the last used sandbox instead of creating a new one. When `true`, provision methods (`chrome`, `vscode`, `installer`, etc.) will be skipped since the application is already running. Throws error if no previous sandbox exists.
57
57
  </ParamField>
58
58
 
59
- <ParamField path="keepAlive" type="number" default="60000">
60
- Keep sandbox alive for the specified number of milliseconds after disconnect. Set to `0` to terminate immediately on disconnect. Useful for debugging or reconnecting to the same sandbox.
61
- </ParamField>
62
-
63
59
  <ParamField path="preview" type="string" default="browser">
64
60
  Preview mode for live test visualization:
65
61
  - `"browser"` — Opens debugger in default browser (default)
@@ -274,6 +270,10 @@ await testdriver.connect(options)
274
270
  <ParamField path="headless" type="boolean" default="false">
275
271
  **Deprecated**: Use `preview: "none"` instead. Run in headless mode without opening the debugger.
276
272
  </ParamField>
273
+
274
+ <ParamField path="keepAlive" type="number" default="60000">
275
+ Keep sandbox alive for the specified number of milliseconds after disconnect. Set to `0` to terminate immediately on disconnect. Useful for debugging or reconnecting to the same sandbox.
276
+ </ParamField>
277
277
  </Expandable>
278
278
  </ParamField>
279
279
 
@@ -39,7 +39,7 @@ const testdriver = TestDriver(context, {
39
39
 
40
40
  // === Recording & Screenshots ===
41
41
  dashcam: true, // Enable/disable Dashcam video recording (default: true)
42
- autoScreenshots: true, // Capture screenshots before/after each command (default: true)
42
+ autoScreenshots: true, // Capture screenshots before/after each command (default: false)
43
43
 
44
44
  // === AI Configuration ===
45
45
  ai: { // Global AI sampling configuration
@@ -328,6 +328,20 @@ Understanding the directory structure helps with efficient screenshot viewing:
328
328
  - All screenshots are PNG format
329
329
  - Disable automatic screenshots with `autoScreenshots: false` if needed
330
330
 
331
+ ## Interaction List Sidebar (Source of Truth)
332
+
333
+ When viewing a test run in the TestDriver console, the **interaction list sidebar** displays a screenshot for each interaction call (find, click, type, assert, etc.). These screenshots show exactly what was on the screen at the time each interaction was executed.
334
+
335
+ <Note>
336
+ **The sidebar screenshots are the source of truth.** If a test is behaving unexpectedly, check the screenshot attached to the specific interaction in the sidebar — it shows precisely what the AI saw when making its decision. This is more reliable than inferring screen state from test logs or local screenshots alone.
337
+ </Note>
338
+
339
+ Use the interaction list to:
340
+ - **Verify what the AI saw** — confirm the correct page/state was visible when `find()` or `assert()` ran
341
+ - **Debug misclicks** — see whether the target element was actually on screen
342
+ - **Identify timing issues** — spot cases where the UI hadn't finished loading before an interaction fired
343
+ - **Compare runs** — review interaction screenshots across multiple runs to catch flaky behavior
344
+
331
345
  ## Integration with Test Development
332
346
 
333
347
  ### During MCP Interactive Development
@@ -28,7 +28,7 @@ await testdriver.provision.chrome({
28
28
 
29
29
  ```javascript
30
30
  import { describe, expect, it } from "vitest";
31
- import { TestDriver } from "testdriverai/lib/vitest/hooks.mjs";
31
+ import { TestDriver } from "testdriverai/vitest/hooks";
32
32
 
33
33
  describe("Login Flow", () => {
34
34
  it("should log in successfully", async (context) => {
@@ -113,7 +113,7 @@ await testdriver.provision.chromeExtension({
113
113
 
114
114
  ```javascript
115
115
  import { describe, expect, it } from "vitest";
116
- import { TestDriver } from "testdriverai/lib/vitest/hooks.mjs";
116
+ import { TestDriver } from "testdriverai/vitest/hooks";
117
117
 
118
118
  describe("Chrome Extension Test", () => {
119
119
  it("should load and interact with extension", async (context) => {
@@ -187,7 +187,7 @@ const filePath = await testdriver.provision.installer({
187
187
 
188
188
  ```javascript
189
189
  import { describe, expect, it } from "vitest";
190
- import { TestDriver } from "testdriverai/lib/vitest/hooks.mjs";
190
+ import { TestDriver } from "testdriverai/vitest/hooks";
191
191
 
192
192
  describe("Desktop App Test", () => {
193
193
  it("should install and launch app", async (context) => {
@@ -209,7 +209,7 @@ describe("Desktop App Test", () => {
209
209
 
210
210
  ```javascript
211
211
  import { describe, expect, it } from "vitest";
212
- import { TestDriver } from "testdriverai/lib/vitest/hooks.mjs";
212
+ import { TestDriver } from "testdriverai/vitest/hooks";
213
213
 
214
214
  describe("Windows App Test", () => {
215
215
  it("should install on Windows", async (context) => {
@@ -276,7 +276,7 @@ await testdriver.provision.vscode({
276
276
 
277
277
  ```javascript
278
278
  import { describe, expect, it } from "vitest";
279
- import { TestDriver } from "testdriverai/lib/vitest/hooks.mjs";
279
+ import { TestDriver } from "testdriverai/vitest/hooks";
280
280
 
281
281
  describe("VS Code Test", () => {
282
282
  it("should open workspace with extensions", async (context) => {
@@ -366,6 +366,39 @@ This two-phase approach gives the AI a higher-resolution view of the target area
366
366
  - You need extra precision for closely spaced UI elements
367
367
  </Tip>
368
368
 
369
+ ## Verify Mode
370
+
371
+ Verify mode is **disabled by default**. When enabled, a second AI call checks that the coordinates returned by `find()` actually correspond to the requested element, catching hallucinated or incorrect positions.
372
+
373
+ ```javascript
374
+ // Enable verification for critical interactions
375
+ const deleteBtn = await testdriver.find('delete account button', { verify: true });
376
+ await deleteBtn.click();
377
+ ```
378
+
379
+ ### How Verify Mode Works
380
+
381
+ 1. **Phase 1**: AI locates the element and returns coordinates
382
+ 2. **Phase 2**: A second AI call examines the screenshot at those coordinates to confirm the element matches the description
383
+ 3. **Result**: If verification fails, the find is retried or marked as not found
384
+
385
+ ### Combining Zoom and Verify
386
+
387
+ For maximum accuracy, enable both `zoom` and `verify` together. This is useful for critical interactions where clicking the wrong element could cause cascading failures:
388
+
389
+ ```javascript
390
+ // Maximum accuracy: zoom for precision + verify to catch hallucinations
391
+ const element = await testdriver.find('small cancel icon next to the subscription', {
392
+ zoom: true,
393
+ verify: true,
394
+ });
395
+ await element.click();
396
+ ```
397
+
398
+ <Warning>
399
+ Both `zoom` and `verify` add extra AI calls per `find()` invocation, which increases latency and API usage. When both are enabled, each find may make up to 3 AI calls. **Rate limiting may occur** if many find calls use these options in rapid succession. Use them selectively for critical interactions rather than on every find call.
400
+ </Warning>
401
+
369
402
  ## Cache Options
370
403
 
371
404
  Control caching behavior to optimize performance, especially when using dynamic variables in prompts.
@@ -75,7 +75,7 @@ To prevent tests from failing due to exceeding your license slot limit, we recom
75
75
 
76
76
  ```javascript vitest.config.mjs
77
77
  import { defineConfig } from 'vitest/config';
78
- import { TestDriver } from 'testdriverai/vitest';
78
+ import TestDriver from 'testdriverai/vitest';
79
79
 
80
80
  export default defineConfig({
81
81
  test: {
@@ -0,0 +1,262 @@
1
+ ---
2
+ name: testdriver:machine-setup
3
+ description: Configure Linux and Windows sandboxes, persist machines between runs, and install custom software
4
+ ---
5
+ <!-- Generated from machine-setup.mdx. DO NOT EDIT. -->
6
+
7
+ TestDriver provisions a fresh cloud VM for every test by default. This guide covers how to configure Linux and Windows machines, reduce startup time by keeping machines alive between runs, use provision scripts for repeatable setup, and install custom software on the fly.
8
+
9
+ ---
10
+
11
+ ## Linux Machines
12
+
13
+ Linux is the default operating system. No extra configuration is required.
14
+
15
+ ```javascript
16
+ import { describe, expect, it } from "vitest";
17
+ import { TestDriver } from "testdriverai/vitest/hooks";
18
+
19
+ describe("My Test", () => {
20
+ it("runs on Linux", async (context) => {
21
+ const testdriver = TestDriver(context);
22
+
23
+ await testdriver.provision.chrome({ url: "https://example.com" });
24
+
25
+ const result = await testdriver.assert("the page loaded successfully");
26
+ expect(result).toBeTruthy();
27
+ });
28
+ });
29
+ ```
30
+
31
+ ### Common Linux Options
32
+
33
+ | Option | Type | Default | Description |
34
+ |--------|------|---------|-------------|
35
+ | `os` | string | `"linux"` | Operating system |
36
+ | `resolution` | string | `"1366x768"` | Screen resolution |
37
+ | `e2bTemplateId` | string | — | Custom E2B template ID (see [Self-Hosted](/v7/self-hosted)) |
38
+ | `keepAlive` | number | `60000` | Ms to keep VM alive after disconnect |
39
+ | `reconnect` | boolean | `false` | Reconnect to last used sandbox |
40
+
41
+ ```javascript
42
+ const testdriver = TestDriver(context, {
43
+ os: "linux",
44
+ resolution: "1920x1080",
45
+ keepAlive: 5 * 60 * 1000, // keep alive 5 minutes
46
+ });
47
+ ```
48
+
49
+ ---
50
+
51
+ ## Windows Machines
52
+
53
+ Set `os: "windows"` to provision a Windows VM instead. Everything else works the same way.
54
+
55
+ ```javascript
56
+ const testdriver = TestDriver(context, {
57
+ os: "windows",
58
+ });
59
+
60
+ await testdriver.provision.chrome({ url: "https://example.com" });
61
+ ```
62
+
63
+ Windows sandboxes use EC2 instances and take longer to boot than Linux (E2B) sandboxes — typically 1–3 minutes for a cold start. See [Keeping Machines Alive](#keeping-machines-alive-between-runs) below to avoid this cost on repeated runs.
64
+
65
+ ### Common Windows Options
66
+
67
+ | Option | Type | Default | Description |
68
+ |--------|------|---------|-------------|
69
+ | `os` | string | — | Set to `"windows"` |
70
+ | `resolution` | string | `"1366x768"` | Screen resolution |
71
+ | `sandboxAmi` | string | — | Custom AMI ID (self-hosted) |
72
+ | `sandboxInstance` | string | — | EC2 instance type (self-hosted) |
73
+ | `keepAlive` | number | `60000` | Ms to keep VM alive after disconnect |
74
+ | `reconnect` | boolean | `false` | Reconnect to last used sandbox |
75
+
76
+ ```javascript
77
+ const testdriver = TestDriver(context, {
78
+ os: "windows",
79
+ resolution: "1920x1080",
80
+ keepAlive: 10 * 60 * 1000, // keep alive 10 minutes
81
+ });
82
+ ```
83
+
84
+ ---
85
+
86
+ ## Keeping Machines Alive Between Runs
87
+
88
+ Windows (and Linux) cold starts can be expensive if you're iterating quickly. Use `keepAlive` + `reconnect` to reuse the same VM across multiple test runs.
89
+
90
+ ### Step 1 — Start the machine with a long `keepAlive`
91
+
92
+ ```javascript
93
+ // first.test.mjs
94
+ const testdriver = TestDriver(context, {
95
+ os: "windows",
96
+ keepAlive: 30 * 60 * 1000, // keep alive 30 minutes after this test ends
97
+ });
98
+
99
+ await testdriver.provision.chrome({ url: "https://example.com" });
100
+ // ... your test steps
101
+ ```
102
+
103
+ When this test finishes, the sandbox stays running for 30 minutes instead of being terminated immediately.
104
+
105
+ ### Step 2 — Reconnect in subsequent runs
106
+
107
+ ```javascript
108
+ // second.test.mjs
109
+ const testdriver = TestDriver(context, {
110
+ os: "windows",
111
+ reconnect: true, // reads last sandbox ID from disk, skips provisioning
112
+ });
113
+
114
+ // provision.chrome() is automatically skipped — Chrome is already open
115
+ await testdriver.find("Sign In button").click();
116
+ ```
117
+
118
+ When `reconnect: true` is set:
119
+ - The SDK reads the last sandbox ID from a local file via `getLastSandboxId()`
120
+ - All `provision.*` calls are silently skipped since the application is already running
121
+ - An error is thrown if no previous sandbox ID is found
122
+
123
+ <Tip>
124
+ You can also supply a sandbox ID directly: `connect({ sandboxId: "sandbox-abc123" })`. Use `testdriver.getLastSandboxId()` to retrieve the ID of the last sandbox for scripting purposes.
125
+ </Tip>
126
+
127
+ ### How `keepAlive` works
128
+
129
+ `keepAlive` is a duration in milliseconds. After the SDK disconnects, the server keeps the VM running for that long before terminating it. The default is `60000` (1 minute). Set it to `0` to terminate immediately on disconnect.
130
+
131
+ ```javascript
132
+ const testdriver = TestDriver(context, {
133
+ keepAlive: 0, // terminate immediately
134
+ // keepAlive: 60000, // default — 1 minute
135
+ // keepAlive: 600000, // 10 minutes
136
+ // keepAlive: 3600000, // 1 hour
137
+ });
138
+ ```
139
+
140
+ <Warning>
141
+ Machines kept alive beyond your test session continue to consume credits. Always set a `keepAlive` value appropriate for your workflow.
142
+ </Warning>
143
+
144
+ ---
145
+
146
+ ## Using Provision Scripts
147
+
148
+ Provision scripts let you run arbitrary setup steps before your test starts — downloading fixtures, seeding a database, configuring environment variables, and more. Use `testdriver.exec()` to run shell or PowerShell commands directly in the sandbox.
149
+
150
+ <Card
151
+ title="exec() Reference"
152
+ icon="terminal"
153
+ href="/v7/exec"
154
+ >
155
+ Full reference for running shell and PowerShell commands in the sandbox.
156
+ </Card>
157
+
158
+ ### Linux setup script
159
+
160
+ ```javascript
161
+ await testdriver.provision.chrome({ url: "https://myapp.com" });
162
+
163
+ // Run a setup script from your repo
164
+ await testdriver.exec("sh", `
165
+ curl -s https://myapp.com/api/reset-test-db -X POST
166
+ echo "Test DB reset"
167
+ `, 30000);
168
+ ```
169
+
170
+ ### Windows setup script (PowerShell)
171
+
172
+ ```javascript
173
+ await testdriver.provision.chrome({ url: "https://myapp.com" });
174
+
175
+ await testdriver.exec("pwsh", `
176
+ $env:API_URL = "https://staging.myapp.com"
177
+ Write-Host "Environment configured"
178
+ `, 15000);
179
+ ```
180
+
181
+ ### Clone a repo and run a script
182
+
183
+ ```javascript
184
+ await testdriver.exec("sh", `
185
+ git clone https://github.com/myorg/test-fixtures.git /tmp/fixtures
186
+ bash /tmp/fixtures/seed.sh
187
+ `, 120000);
188
+ ```
189
+
190
+ ---
191
+
192
+ ## Installing Custom Software
193
+
194
+ You can install software at the start of a test using `exec()`. This works for any package available via `apt`, `brew`, `choco`, `winget`, npm, pip, or direct download.
195
+
196
+ ### Linux — apt packages
197
+
198
+ ```javascript
199
+ await testdriver.exec("sh", `
200
+ sudo apt-get update -qq
201
+ sudo apt-get install -y ffmpeg imagemagick
202
+ `, 120000);
203
+ ```
204
+
205
+ ### Linux — Node.js tools
206
+
207
+ ```javascript
208
+ await testdriver.exec("sh", "npm install -g @playwright/test", 60000);
209
+ ```
210
+
211
+ ### Windows — winget
212
+
213
+ ```javascript
214
+ await testdriver.exec("pwsh", `
215
+ winget install --id=7zip.7zip -e --silent
216
+ `, 120000);
217
+ ```
218
+
219
+ ### Windows — Chocolatey
220
+
221
+ ```javascript
222
+ await testdriver.exec("pwsh", `
223
+ choco install googlechrome --yes --no-progress
224
+ `, 180000);
225
+ ```
226
+
227
+ ### Download and run an installer
228
+
229
+ ```javascript
230
+ // Linux
231
+ await testdriver.exec("sh", `
232
+ curl -L https://example.com/installer.sh -o /tmp/installer.sh
233
+ chmod +x /tmp/installer.sh
234
+ /tmp/installer.sh --silent
235
+ `, 300000);
236
+
237
+ // Windows
238
+ await testdriver.exec("pwsh", `
239
+ Invoke-WebRequest -Uri "https://example.com/installer.exe" -OutFile "$env:TEMP\\installer.exe"
240
+ Start-Process "$env:TEMP\\installer.exe" -ArgumentList "/S" -Wait
241
+ `, 300000);
242
+ ```
243
+
244
+ <Note>
245
+ Installing software at test start adds to your test duration. For software you use in every test, consider preloading it into a custom VM image via the Enterprise self-hosted plan.
246
+ </Note>
247
+
248
+ ---
249
+
250
+ ## Want Software Pre-Installed on Every Machine?
251
+
252
+ Installing packages at runtime works well for occasional or lightweight dependencies. But if you're installing the same 5-minute setup on every test run, you're wasting time and credits.
253
+
254
+ With the **Self-Hosted Enterprise plan** you get access to our golden VM base image and Packer scripts, so you can bake your applications, dependencies, and configuration directly into a custom AMI. Tests spin up with everything already installed — zero setup time.
255
+
256
+ <Card
257
+ title="Self-Hosted Enterprise"
258
+ icon="server"
259
+ href="/v7/self-hosted"
260
+ >
261
+ Preload software, configure custom hardware, and run unlimited tests with a flat license fee. Our team assists with deployment and setup.
262
+ </Card>
@@ -120,7 +120,7 @@ console.log(`Found ${buttons.length} buttons`);
120
120
 
121
121
  ```javascript
122
122
  import { describe, expect, it } from "vitest";
123
- import { TestDriver } from "testdriverai/lib/vitest/hooks.mjs";
123
+ import { TestDriver } from "testdriverai/vitest/hooks";
124
124
 
125
125
  describe("Login Page", () => {
126
126
  it("should have expected form elements", async (context) => {
@@ -49,7 +49,7 @@ Now import and use these helpers in any test:
49
49
 
50
50
  ```javascript test/checkout.test.mjs
51
51
  import { describe, expect, it } from "vitest";
52
- import { TestDriver } from "testdriverai/lib/vitest/hooks.mjs";
52
+ import { TestDriver } from "testdriverai/vitest/hooks";
53
53
  import { login } from './helpers/auth.js';
54
54
 
55
55
  describe("Checkout", () => {
@@ -120,7 +120,7 @@ Use the page object in your tests:
120
120
 
121
121
  ```javascript test/auth.test.mjs
122
122
  import { describe, expect, it } from "vitest";
123
- import { TestDriver } from "testdriverai/lib/vitest/hooks.mjs";
123
+ import { TestDriver } from "testdriverai/vitest/hooks";
124
124
  import { LoginPage } from './pages/LoginPage.js';
125
125
 
126
126
  describe("Authentication", () => {
@@ -190,7 +190,7 @@ export async function setupAuthenticatedSession(testdriver, user = testUsers.reg
190
190
 
191
191
  ```javascript test/admin.test.mjs
192
192
  import { describe, expect, it } from "vitest";
193
- import { TestDriver } from "testdriverai/lib/vitest/hooks.mjs";
193
+ import { TestDriver } from "testdriverai/vitest/hooks";
194
194
  import { testUsers, testUrls, setupAuthenticatedSession } from './fixtures/index.js';
195
195
 
196
196
  describe("Admin Panel", () => {
@@ -101,7 +101,7 @@ Set `maxConcurrency` in your Vitest config to match your license slot limit:
101
101
 
102
102
  ```javascript vitest.config.mjs
103
103
  import { defineConfig } from 'vitest/config';
104
- import { TestDriver } from 'testdriverai/vitest';
104
+ import TestDriver from 'testdriverai/vitest';
105
105
 
106
106
  export default defineConfig({
107
107
  test: {
@@ -9,7 +9,7 @@ description: Capture and save screenshots during test execution
9
9
  Capture a screenshot of the current screen and automatically save it to a local file. Screenshots are organized by test file for easy debugging and review.
10
10
 
11
11
  <Note>
12
- **Automatic Screenshots (Default: Enabled)**: TestDriver automatically captures screenshots before and after every command (click, type, find, etc.). These are saved with descriptive filenames like `001-click-before-L42-submit-button.png` that include the line number from your test file. You can disable this with `autoScreenshots: false` in your TestDriver options.
12
+ **Automatic Screenshots**: TestDriver can automatically capture screenshots before and after every command (click, type, find, etc.). These are saved with descriptive filenames like `001-click-before-L42-submit-button.png` that include the line number from your test file. Enable this with `autoScreenshots: true` in your TestDriver options.
13
13
  </Note>
14
14
 
15
15
  ## Syntax
@@ -50,7 +50,7 @@ Screenshots are automatically saved to `.testdriver/screenshots/<test-file-name>
50
50
 
51
51
  ### Automatic Screenshot Naming
52
52
 
53
- When `autoScreenshots` is enabled (default), filenames follow this format:
53
+ When `autoScreenshots` is enabled, filenames follow this format:
54
54
 
55
55
  `<seq>-<action>-<phase>-L<line>-<description>.png`
56
56
 
@@ -91,7 +91,7 @@ await testdriver.screenshot("after-click");
91
91
 
92
92
  ```javascript
93
93
  import { describe, expect, it } from "vitest";
94
- import { TestDriver } from "testdriverai/lib/vitest/hooks.mjs";
94
+ import { TestDriver } from "testdriverai/vitest/hooks";
95
95
 
96
96
  describe("Login Flow", () => {
97
97
  it("should log in successfully", async (context) => {
@@ -188,7 +188,7 @@ If an error occurs, the phase will be `error` instead of `after`.
188
188
 
189
189
  <AccordionGroup>
190
190
  <Accordion title="Let automatic screenshots do the work">
191
- With `autoScreenshots: true` (default), you get comprehensive coverage without adding manual `screenshot()` calls. Only add manual screenshots for specific named checkpoints.
191
+ With `autoScreenshots: true`, you get comprehensive coverage without adding manual `screenshot()` calls. Only add manual screenshots for specific named checkpoints.
192
192
  </Accordion>
193
193
 
194
194
  <Accordion title="Use screenshots for debugging flaky tests">
@@ -26,7 +26,7 @@ Scroll the page or active element in any direction using mouse wheel or keyboard
26
26
  ## Syntax
27
27
 
28
28
  ```javascript
29
- await testdriver.scroll(direction, amount, method)
29
+ await testdriver.scroll(direction, options)
30
30
  ```
31
31
 
32
32
  ## Parameters
@@ -35,12 +35,12 @@ await testdriver.scroll(direction, amount, method)
35
35
  Direction to scroll: `'up'`, `'down'`, `'left'`, `'right'`
36
36
  </ParamField>
37
37
 
38
- <ParamField path="amount" type="number" default="3">
39
- Amount to scroll in clicks (scroll wheel units). Each click is roughly 100px in a browser.
40
- </ParamField>
41
-
42
- <ParamField path="method" type="string" default="mouse">
43
- Scroll method: `'mouse'` or `'keyboard'`
38
+ <ParamField path="options" type="object">
39
+ <Expandable title="properties">
40
+ <ParamField path="amount" type="number" default="300">
41
+ Amount to scroll in pixels
42
+ </ParamField>
43
+ </Expandable>
44
44
  </ParamField>
45
45
 
46
46
  ## Returns
@@ -56,33 +56,33 @@ await testdriver.scroll(direction, amount, method)
56
56
  await testdriver.scroll();
57
57
 
58
58
  // Scroll down 5 clicks
59
- await testdriver.scroll('down', 5);
59
+ await testdriver.scroll('down', { amount: 5 });
60
60
 
61
61
  // Scroll up
62
62
  await testdriver.scroll('up');
63
63
 
64
64
  // Scroll up 2 clicks
65
- await testdriver.scroll('up', 2);
65
+ await testdriver.scroll('up', { amount: 2 });
66
66
  ```
67
67
 
68
68
  ### Horizontal Scrolling
69
69
 
70
70
  ```javascript
71
71
  // Scroll right
72
- await testdriver.scroll('right', 3);
72
+ await testdriver.scroll('right', { amount: 3 });
73
73
 
74
74
  // Scroll left
75
- await testdriver.scroll('left', 3);
75
+ await testdriver.scroll('left', { amount: 3 });
76
76
  ```
77
77
 
78
78
  ### Scroll Methods
79
79
 
80
80
  ```javascript
81
- // Mouse wheel scroll (smooth)
82
- await testdriver.scroll('down', 3, 'mouse');
81
+ // Mouse wheel scroll (default)
82
+ await testdriver.scroll('down', { amount: 3 });
83
83
 
84
- // Keyboard scroll (uses Page Down/Up, more compatible)
85
- await testdriver.scroll('down', 3, 'keyboard');
84
+ // For keyboard-based scrolling, use pressKeys instead
85
+ await testdriver.pressKeys(['pagedown']);
86
86
  ```
87
87
 
88
88
  ## Best Practices
@@ -101,7 +101,7 @@ await testdriver.scroll('down', 3, 'keyboard');
101
101
  // await testdriver.find('page background').click();
102
102
 
103
103
  // Now scroll will work properly
104
- await testdriver.scroll('down', 3);
104
+ await testdriver.scroll('down');
105
105
 
106
106
  // If scroll still doesn't work, use Page Down directly
107
107
  // await testdriver.pressKeys(['pagedown']);
@@ -109,14 +109,14 @@ await testdriver.scroll('down', 3, 'keyboard');
109
109
  </Check>
110
110
 
111
111
  <Check>
112
- **Choose the right scroll method**
112
+ **Control scroll distance with the options object**
113
113
 
114
114
  ```javascript
115
- // For web pages, mouse scroll is usually smoother
116
- await testdriver.scroll('down', 3, 'mouse');
115
+ // For web pages, mouse scroll works well
116
+ await testdriver.scroll('down', { amount: 3 });
117
117
 
118
- // For desktop apps or when mouse doesn't work
119
- await testdriver.scroll('down', 3, 'keyboard');
118
+ // For desktop apps or when mouse doesn't work, use keyboard
119
+ await testdriver.pressKeys(['pagedown']);
120
120
  ```
121
121
  </Check>
122
122
 
@@ -133,7 +133,7 @@ await testdriver.scroll('down', 3, 'keyboard');
133
133
  ```javascript
134
134
  // Scroll multiple times for infinite scroll
135
135
  for (let i = 0; i < 5; i++) {
136
- await testdriver.scroll('down', 5);
136
+ await testdriver.scroll('down', { amount: 5 });
137
137
  await new Promise(r => setTimeout(r, 1000)); // Wait for load
138
138
  }
139
139
  ```
@@ -142,7 +142,7 @@ await testdriver.scroll('down', 3, 'keyboard');
142
142
  <Accordion title="Horizontal Gallery">
143
143
  ```javascript
144
144
  // Navigate horizontal carousel
145
- await testdriver.scroll('right', 3);
145
+ await testdriver.scroll('right', { amount: 3 });
146
146
  await new Promise(r => setTimeout(r, 500));
147
147
 
148
148
  const nextImage = await testdriver.find('next image in carousel');
@@ -174,7 +174,7 @@ describe('Scrolling', () => {
174
174
  await testdriver.focusApplication('Google Chrome');
175
175
 
176
176
  // Scroll down the page
177
- await testdriver.scroll('down', 5);
177
+ await testdriver.scroll('down', { amount: 5 });
178
178
 
179
179
  // Click footer link
180
180
  const privacyLink = await testdriver.find('Privacy Policy link');
@@ -188,7 +188,7 @@ describe('Scrolling', () => {
188
188
 
189
189
  // Scroll multiple times to load content
190
190
  for (let i = 0; i < 3; i++) {
191
- await testdriver.scroll('down', 5);
191
+ await testdriver.scroll('down', { amount: 5 });
192
192
  await new Promise(r => setTimeout(r, 1500)); // Wait for load
193
193
  }
194
194
 
package/docs/docs.json CHANGED
@@ -68,6 +68,7 @@
68
68
  "pages": [
69
69
  "/v7/generating-tests",
70
70
  "/v7/device-config",
71
+ "/v7/machine-setup",
71
72
  "/v7/locating-elements",
72
73
  "/v7/waiting-for-elements",
73
74
  "/v7/performing-actions",
@@ -0,0 +1,262 @@
1
+ ---
2
+ title: "Machine Setup"
3
+ description: "Configure Linux and Windows sandboxes, persist machines between runs, and install custom software"
4
+ icon: "desktop"
5
+ ---
6
+
7
+ TestDriver provisions a fresh cloud VM for every test by default. This guide covers how to configure Linux and Windows machines, reduce startup time by keeping machines alive between runs, use provision scripts for repeatable setup, and install custom software on the fly.
8
+
9
+ ---
10
+
11
+ ## Linux Machines
12
+
13
+ Linux is the default operating system. No extra configuration is required.
14
+
15
+ ```javascript
16
+ import { describe, expect, it } from "vitest";
17
+ import { TestDriver } from "testdriverai/vitest/hooks";
18
+
19
+ describe("My Test", () => {
20
+ it("runs on Linux", async (context) => {
21
+ const testdriver = TestDriver(context);
22
+
23
+ await testdriver.provision.chrome({ url: "https://example.com" });
24
+
25
+ const result = await testdriver.assert("the page loaded successfully");
26
+ expect(result).toBeTruthy();
27
+ });
28
+ });
29
+ ```
30
+
31
+ ### Common Linux Options
32
+
33
+ | Option | Type | Default | Description |
34
+ |--------|------|---------|-------------|
35
+ | `os` | string | `"linux"` | Operating system |
36
+ | `resolution` | string | `"1366x768"` | Screen resolution |
37
+ | `e2bTemplateId` | string | — | Custom E2B template ID (see [Self-Hosted](/v7/self-hosted)) |
38
+ | `keepAlive` | number | `60000` | Ms to keep VM alive after disconnect |
39
+ | `reconnect` | boolean | `false` | Reconnect to last used sandbox |
40
+
41
+ ```javascript
42
+ const testdriver = TestDriver(context, {
43
+ os: "linux",
44
+ resolution: "1920x1080",
45
+ keepAlive: 5 * 60 * 1000, // keep alive 5 minutes
46
+ });
47
+ ```
48
+
49
+ ---
50
+
51
+ ## Windows Machines
52
+
53
+ Set `os: "windows"` to provision a Windows VM instead. Everything else works the same way.
54
+
55
+ ```javascript
56
+ const testdriver = TestDriver(context, {
57
+ os: "windows",
58
+ });
59
+
60
+ await testdriver.provision.chrome({ url: "https://example.com" });
61
+ ```
62
+
63
+ Windows sandboxes use EC2 instances and take longer to boot than Linux (E2B) sandboxes — typically 1–3 minutes for a cold start. See [Keeping Machines Alive](#keeping-machines-alive-between-runs) below to avoid this cost on repeated runs.
64
+
65
+ ### Common Windows Options
66
+
67
+ | Option | Type | Default | Description |
68
+ |--------|------|---------|-------------|
69
+ | `os` | string | — | Set to `"windows"` |
70
+ | `resolution` | string | `"1366x768"` | Screen resolution |
71
+ | `sandboxAmi` | string | — | Custom AMI ID (self-hosted) |
72
+ | `sandboxInstance` | string | — | EC2 instance type (self-hosted) |
73
+ | `keepAlive` | number | `60000` | Ms to keep VM alive after disconnect |
74
+ | `reconnect` | boolean | `false` | Reconnect to last used sandbox |
75
+
76
+ ```javascript
77
+ const testdriver = TestDriver(context, {
78
+ os: "windows",
79
+ resolution: "1920x1080",
80
+ keepAlive: 10 * 60 * 1000, // keep alive 10 minutes
81
+ });
82
+ ```
83
+
84
+ ---
85
+
86
+ ## Keeping Machines Alive Between Runs
87
+
88
+ Windows (and Linux) cold starts can be expensive if you're iterating quickly. Use `keepAlive` + `reconnect` to reuse the same VM across multiple test runs.
89
+
90
+ ### Step 1 — Start the machine with a long `keepAlive`
91
+
92
+ ```javascript
93
+ // first.test.mjs
94
+ const testdriver = TestDriver(context, {
95
+ os: "windows",
96
+ keepAlive: 30 * 60 * 1000, // keep alive 30 minutes after this test ends
97
+ });
98
+
99
+ await testdriver.provision.chrome({ url: "https://example.com" });
100
+ // ... your test steps
101
+ ```
102
+
103
+ When this test finishes, the sandbox stays running for 30 minutes instead of being terminated immediately.
104
+
105
+ ### Step 2 — Reconnect in subsequent runs
106
+
107
+ ```javascript
108
+ // second.test.mjs
109
+ const testdriver = TestDriver(context, {
110
+ os: "windows",
111
+ reconnect: true, // reads last sandbox ID from disk, skips provisioning
112
+ });
113
+
114
+ // provision.chrome() is automatically skipped — Chrome is already open
115
+ await testdriver.find("Sign In button").click();
116
+ ```
117
+
118
+ When `reconnect: true` is set:
119
+ - The SDK reads the last sandbox ID from a local file via `getLastSandboxId()`
120
+ - All `provision.*` calls are silently skipped since the application is already running
121
+ - An error is thrown if no previous sandbox ID is found
122
+
123
+ <Tip>
124
+ You can also supply a sandbox ID directly: `await testdriver.connect({ sandboxId: "sandbox-abc123" })`. Use `testdriver.getLastSandboxId()` to retrieve the ID of the last sandbox for scripting purposes.
125
+ </Tip>
126
+
127
+ ### How `keepAlive` works
128
+
129
+ `keepAlive` is a duration in milliseconds. After the SDK disconnects, the server keeps the VM running for that long before terminating it. The default is `60000` (1 minute). Note: `keepAlive: 0` currently falls back to the default disconnect grace period rather than terminating immediately, so use a positive duration when you want to control the grace window explicitly.
130
+
131
+ ```javascript
132
+ const testdriver = TestDriver(context, {
133
+ keepAlive: 0, // currently uses the default 1 minute grace period
134
+ // keepAlive: 60000, // default — 1 minute
135
+ // keepAlive: 600000, // 10 minutes
136
+ // keepAlive: 3600000, // 1 hour
137
+ });
138
+ ```
139
+
140
+ <Warning>
141
+ Machines kept alive beyond your test session continue to consume credits. Always set a `keepAlive` value appropriate for your workflow.
142
+ </Warning>
143
+
144
+ ---
145
+
146
+ ## Using Provision Scripts
147
+
148
+ Provision scripts let you run arbitrary setup steps before your test starts — downloading fixtures, seeding a database, configuring environment variables, and more. Use `testdriver.exec()` to run shell or PowerShell commands directly in the sandbox.
149
+
150
+ <Card
151
+ title="exec() Reference"
152
+ icon="terminal"
153
+ href="/v7/exec"
154
+ >
155
+ Full reference for running shell and PowerShell commands in the sandbox.
156
+ </Card>
157
+
158
+ ### Linux setup script
159
+
160
+ ```javascript
161
+ await testdriver.provision.chrome({ url: "https://myapp.com" });
162
+
163
+ // Run a setup script from your repo
164
+ await testdriver.exec("sh", `
165
+ curl -s https://myapp.com/api/reset-test-db -X POST
166
+ echo "Test DB reset"
167
+ `, 30000);
168
+ ```
169
+
170
+ ### Windows setup script (PowerShell)
171
+
172
+ ```javascript
173
+ await testdriver.provision.chrome({ url: "https://myapp.com" });
174
+
175
+ await testdriver.exec("pwsh", `
176
+ $env:API_URL = "https://staging.myapp.com"
177
+ Write-Host "Environment configured"
178
+ `, 15000);
179
+ ```
180
+
181
+ ### Clone a repo and run a script
182
+
183
+ ```javascript
184
+ await testdriver.exec("sh", `
185
+ git clone https://github.com/myorg/test-fixtures.git /tmp/fixtures
186
+ bash /tmp/fixtures/seed.sh
187
+ `, 120000);
188
+ ```
189
+
190
+ ---
191
+
192
+ ## Installing Custom Software
193
+
194
+ You can install software at the start of a test using `exec()`. This works for any package available via `apt`, `brew`, `choco`, `winget`, npm, pip, or direct download.
195
+
196
+ ### Linux — apt packages
197
+
198
+ ```javascript
199
+ await testdriver.exec("sh", `
200
+ sudo apt-get update -qq
201
+ sudo apt-get install -y ffmpeg imagemagick
202
+ `, 120000);
203
+ ```
204
+
205
+ ### Linux — Node.js tools
206
+
207
+ ```javascript
208
+ await testdriver.exec("sh", "npm install -g @playwright/test", 60000);
209
+ ```
210
+
211
+ ### Windows — winget
212
+
213
+ ```javascript
214
+ await testdriver.exec("pwsh", `
215
+ winget install --id=7zip.7zip -e --silent
216
+ `, 120000);
217
+ ```
218
+
219
+ ### Windows — Chocolatey
220
+
221
+ ```javascript
222
+ await testdriver.exec("pwsh", `
223
+ choco install googlechrome --yes --no-progress
224
+ `, 180000);
225
+ ```
226
+
227
+ ### Download and run an installer
228
+
229
+ ```javascript
230
+ // Linux
231
+ await testdriver.exec("sh", `
232
+ curl -L https://example.com/installer.sh -o /tmp/installer.sh
233
+ chmod +x /tmp/installer.sh
234
+ /tmp/installer.sh --silent
235
+ `, 300000);
236
+
237
+ // Windows
238
+ await testdriver.exec("pwsh", `
239
+ Invoke-WebRequest -Uri "https://example.com/installer.exe" -OutFile "$env:TEMP\\installer.exe"
240
+ Start-Process "$env:TEMP\\installer.exe" -ArgumentList "/S" -Wait
241
+ `, 300000);
242
+ ```
243
+
244
+ <Note>
245
+ Installing software at test start adds to your test duration. For software you use in every test, consider preloading it into a custom VM image via the Enterprise self-hosted plan.
246
+ </Note>
247
+
248
+ ---
249
+
250
+ ## Want Software Pre-Installed on Every Machine?
251
+
252
+ Installing packages at runtime works well for occasional or lightweight dependencies. But if you're installing the same 5-minute setup on every test run, you're wasting time and credits.
253
+
254
+ With the **Self-Hosted Enterprise plan** you get access to our golden VM base image and Packer scripts, so you can bake your applications, dependencies, and configuration directly into a custom AMI. Tests spin up with everything already installed — zero setup time.
255
+
256
+ <Card
257
+ title="Self-Hosted Enterprise"
258
+ icon="server"
259
+ href="/v7/self-hosted"
260
+ >
261
+ Preload software, configure custom hardware, and run unlimited tests with a flat license fee. Our team assists with deployment and setup.
262
+ </Card>
@@ -1,5 +1,4 @@
1
1
  export const getDefaults = (context) => ({
2
2
  ip: context.ip || process.env.TD_IP,
3
- redraw: { enabled: false },
4
3
  preview: 'web',
5
4
  });
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "testdriverai",
3
- "version": "7.9.59-test",
3
+ "version": "7.9.60-test",
4
4
  "description": "Next generation autonomous AI agent for end-to-end testing of web & desktop",
5
5
  "main": "sdk.js",
6
6
  "types": "sdk.d.ts",
package/sdk.js CHANGED
@@ -3850,7 +3850,7 @@ CAPTCHA_SOLVER_EOF`,
3850
3850
  const apiKey = this.config?.TD_API_KEY || '';
3851
3851
  const maskedKey = apiKey.length > 4 ? '***' + apiKey.slice(-4) : '(not set)';
3852
3852
  const env = process.env.TD_CHANNEL || process.env.TD_ENV || 'unknown';
3853
- const os = this.os || this.agent?.cliArgs?.options?.os || process.env.TD_OS || 'linux';
3853
+ const os = this.os || this.agent?.options?.os || process.env.TD_OS || 'linux';
3854
3854
  const sdkVersion = require('./package.json').version;
3855
3855
 
3856
3856
  // Always print local config immediately