@testnexus/locatai 1.7.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json ADDED
@@ -0,0 +1,83 @@
1
+ {
2
+ "name": "@testnexus/locatai",
3
+ "version": "1.7.0",
4
+ "description": "AI-powered self-healing locators for Playwright. When locators fail, AI automatically finds the correct element.",
5
+ "main": "dist/index.js",
6
+ "module": "dist/index.mjs",
7
+ "types": "dist/index.d.ts",
8
+ "exports": {
9
+ ".": {
10
+ "types": "./dist/index.d.ts",
11
+ "import": "./dist/index.mjs",
12
+ "require": "./dist/index.js"
13
+ }
14
+ },
15
+ "files": [
16
+ "dist",
17
+ "README.md",
18
+ "LICENSE"
19
+ ],
20
+ "scripts": {
21
+ "build": "tsup src/index.ts --format cjs,esm --dts --clean",
22
+ "dev": "tsup src/index.ts --format cjs,esm --dts --watch",
23
+ "test": "SELF_HEAL=1 npx playwright test",
24
+ "test:headed": "SELF_HEAL=1 npx playwright test --headed",
25
+ "test:unit": "vitest run",
26
+ "test:heal": "SELF_HEAL=1 npx playwright test examples/element-zoo-healing.test.ts",
27
+ "test:ci": "npm run test:unit && npm run test:heal",
28
+ "lint": "eslint src/",
29
+ "prepublishOnly": "npm run build"
30
+ },
31
+ "keywords": [
32
+ "playwright",
33
+ "testing",
34
+ "automation",
35
+ "self-healing",
36
+ "ai",
37
+ "locators",
38
+ "e2e",
39
+ "end-to-end",
40
+ "web-testing",
41
+ "resilient-tests",
42
+ "gpt",
43
+ "openai",
44
+ "anthropic",
45
+ "claude",
46
+ "gemini",
47
+ "google",
48
+ "ollama",
49
+ "local-llm"
50
+ ],
51
+ "author": "Divyarajsinh Dodia",
52
+ "license": "MIT",
53
+ "repository": {
54
+ "type": "git",
55
+ "url": "git+https://github.com/Divyarajsinh-Dodia1617/LocatAi.git"
56
+ },
57
+ "bugs": {
58
+ "url": "https://github.com/Divyarajsinh-Dodia1617/LocatAi/issues"
59
+ },
60
+ "homepage": "https://github.com/Divyarajsinh-Dodia1617/LocatAi#readme",
61
+ "peerDependencies": {
62
+ "@playwright/test": ">=1.40.0"
63
+ },
64
+ "dependencies": {
65
+ "@anthropic-ai/sdk": "^0.74.0",
66
+ "@google/genai": "^1.41.0",
67
+ "ollama": "^0.6.3",
68
+ "openai": "^6.22.0",
69
+ "playwright-core": ">=1.40.0",
70
+ "zod": "^4.3.6"
71
+ },
72
+ "devDependencies": {
73
+ "@playwright/test": "^1.58.2",
74
+ "@types/node": "^25.2.3",
75
+ "dotenv": "^17.3.1",
76
+ "tsup": "^8.5.1",
77
+ "typescript": "^5.9.3",
78
+ "vitest": "^4.0.18"
79
+ },
80
+ "engines": {
81
+ "node": ">=18.0.0"
82
+ }
83
+ }
package/readme.md ADDED
@@ -0,0 +1,361 @@
1
+ <div align="center">
2
+ # LocatAI
3
+
4
+ AI-powered self-healing locators for Playwright. When your selectors break, LocatAI figures out what you meant and finds the element anyway.
5
+
6
+ [![npm version](https://badge.fury.io/js/@testnexus/locatai.svg)](https://www.npmjs.com/package/@testnexus/locatai)
7
+ [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](./LICENSE)
8
+ </div>
9
+
10
+ ## Why?
11
+
12
+ We've all been there. You write a solid test suite, everything passes, then the frontend team refactors something and half your locators break. You spend hours updating selectors instead of writing actual tests.
13
+
14
+ LocatAI fixes this. It wraps your Playwright page with AI-powered healing that kicks in when locators fail. You can also skip selectors entirely and just describe what you're looking for in plain English.
15
+
16
+ ## Features
17
+
18
+ - **Self-healing locators** - When a selector breaks, AI analyzes the page and finds the right element
19
+ - **AI-only mode** - Just describe the element, no selector needed
20
+ - **Multi-provider** - OpenAI, Anthropic, Google, or run locally with Ollama — your choice
21
+ - **Local LLM support** - Run completely offline with Ollama. No API keys, no cloud, full privacy
22
+ - **Caching** - Healed selectors are saved so you don't burn API calls on every run
23
+ - **Drop-in** - Works with your existing Playwright setup
24
+
25
+ ## Installation
26
+
27
+ ```bash
28
+ npm install @testnexus/locatai
29
+ ```
30
+
31
+ ## Quick Start
32
+
33
+ Set your API key and enable healing:
34
+
35
+ ```bash
36
+ # OpenAI (default) - also accepts AI_PROVIDER=gpt
37
+ export AI_API_KEY="your-openai-key"
38
+ export SELF_HEAL=1
39
+
40
+ # Anthropic Claude - also accepts AI_PROVIDER=claude
41
+ export AI_PROVIDER=anthropic
42
+ export AI_API_KEY="your-anthropic-key"
43
+ export SELF_HEAL=1
44
+
45
+ # Google Gemini - also accepts AI_PROVIDER=gemini
46
+ export AI_PROVIDER=google
47
+ export AI_API_KEY="your-google-key"
48
+ export SELF_HEAL=1
49
+
50
+ # Local LLM via Ollama — no API key, no cloud, fully offline!
51
+ export AI_PROVIDER=local
52
+ export SELF_HEAL=1
53
+ # Optional: pick a different model (default: gemma3:4b)
54
+ # export AI_MODEL="mistral"
55
+ ```
56
+
57
+ Create a fixture:
58
+
59
+ ```typescript
60
+ // fixtures.ts
61
+ import { test as base } from '@playwright/test';
62
+ import { createLocatAIFixture, LocatAIPage } from '@testnexus/locatai';
63
+
64
+ export const test = base.extend<{ page: LocatAIPage }>(createLocatAIFixture());
65
+ export { expect } from '@playwright/test';
66
+ ```
67
+
68
+ Use it in your tests:
69
+
70
+ ```typescript
71
+ import { test, expect } from './fixtures';
72
+
73
+ test('login flow', async ({ page }) => {
74
+ await page.goto('https://example.com');
75
+
76
+ // If this selector breaks, AI will find the right element
77
+ await page.locatai.click(
78
+ page.locator('[data-testid="submit-btn"]'),
79
+ 'Submit button on form'
80
+ );
81
+
82
+ // Or skip the selector entirely - just describe what you want
83
+ await page.locatai.fill('', 'Email input field', 'user@example.com');
84
+ });
85
+ ```
86
+
87
+ ## API
88
+
89
+ ### `withLocatAI(page)`
90
+
91
+ Wraps a Playwright page with healing methods:
92
+
93
+ ```typescript
94
+ import { withLocatAI } from '@testnexus/locatai';
95
+
96
+ const locataiPage = withLocatAI(page);
97
+ await locataiPage.locatai.click(locator, 'Button description');
98
+ ```
99
+
100
+ ### `createLocatAIFixture()`
101
+
102
+ Creates a test fixture that automatically wraps the page:
103
+
104
+ ```typescript
105
+ import { test as base } from '@playwright/test';
106
+ import { createLocatAIFixture, LocatAIPage } from '@testnexus/locatai';
107
+
108
+ export const test = base.extend<{ page: LocatAIPage }>(createLocatAIFixture());
109
+ ```
110
+
111
+ ### Available Methods
112
+
113
+ All methods take a locator (or empty string for AI-only) and a description:
114
+
115
+ | Method | What it does |
116
+ |--------|-------------|
117
+ | `locatai.click(locator, desc, options?)` | Click an element |
118
+ | `locatai.fill(locator, desc, value)` | Fill an input |
119
+ | `locatai.selectOption(locator, desc, value)` | Select from dropdown |
120
+ | `locatai.check(locator, desc)` | Check a checkbox |
121
+ | `locatai.uncheck(locator, desc)` | Uncheck a checkbox |
122
+ | `locatai.dblclick(locator, desc)` | Double-click |
123
+ | `locatai.hover(locator, desc)` | Hover over element |
124
+ | `locatai.focus(locator, desc)` | Focus element |
125
+ | `locatai.locator(selector, desc)` | Create self-healing locator for chaining |
126
+
127
+ ### Self-Healing Locator
128
+
129
+ Create a locator that combines a CSS selector with a semantic description. If the selector fails, AI takes over:
130
+
131
+ ```typescript
132
+ // Combines selector with semantic fallback - best of both worlds
133
+ await page.locatai.locator('.new-todo', 'Input field for new todos').fill('Buy milk');
134
+ await page.locatai.locator('.submit-btn', 'Submit button').click();
135
+ await page.locatai.locator('.toggle', 'Checkbox').check();
136
+ ```
137
+
138
+ ### AI-Only Mode
139
+
140
+ Pass an empty string as the locator and let AI find the element:
141
+
142
+ ```typescript
143
+ await page.locatai.click('', 'Login button');
144
+ await page.locatai.fill('', 'Search input', 'my query');
145
+ ```
146
+
147
+ This is useful when you don't have good selectors or want to make tests more readable.
148
+
149
+ ### Force Click (Hidden Elements)
150
+
151
+ Some elements only appear on hover (like delete buttons). Use `{ force: true }` to click hidden elements:
152
+
153
+ ```typescript
154
+ // First hover to reveal the element
155
+ await page.locatai.hover('', 'Todo item in the list');
156
+
157
+ // Then force-click the hidden delete button
158
+ await page.locatai.click('', 'Delete button', { force: true });
159
+ ```
160
+
161
+ ### Mixing Healing with Regular Playwright
162
+
163
+ Just because you've added LocatAI doesn't mean you have to use `page.locatai.*` for everything. The wrapped page still works exactly like a normal Playwright page, so you can mix and match as needed:
164
+
165
+ ```typescript
166
+ // Use healing for elements that tend to break
167
+ await page.locatai.fill('', 'Input field for new todo items', 'Buy groceries');
168
+
169
+ // Use regular Playwright for stable selectors
170
+ await page.fill('#username', 'testuser');
171
+ await page.click('button[type="submit"]');
172
+ ```
173
+
174
+ Pick the approach that makes sense for each action. Maybe you use healing for that flaky third-party widget but stick with regular locators for your own well-structured components. It's your call.
175
+
176
+ ## Configuration
177
+
178
+ ### Environment Variables
179
+
180
+ | Variable | Description |
181
+ |----------|-------------|
182
+ | `SELF_HEAL` | Set to `1` to enable healing |
183
+ | `AI_API_KEY` | Your AI provider API key |
184
+ | `AI_PROVIDER` | `openai`/`gpt`, `anthropic`/`claude`, `google`/`gemini`, or `local`/`ollama` |
185
+ | `AI_MODEL` | Override the default model (optional) |
186
+ | `OLLAMA_HOST` | Ollama server URL (default: `http://127.0.0.1:11434`) |
187
+
188
+ **Default models by provider:**
189
+ - **OpenAI:** `gpt-5.2`
190
+ - **Anthropic:** `claude-sonnet-4-5`
191
+ - **Google:** `gemini-3-flash`
192
+ - **Local (Ollama):** `gemma3:4b`
193
+
194
+ ### Fixture Options
195
+
196
+ You can pass options when creating the healing fixture:
197
+
198
+ ```typescript
199
+ const test = base.extend<{ page: LocatAIPage }>(createLocatAIFixture({
200
+ maxCandidates: 30, // Max DOM elements sent to AI (default: 30)
201
+ maxAiTries: 4, // Max AI strategies to validate (default: 4)
202
+ timeout: 5000, // Locator timeout in ms (default: 5000)
203
+ provider: 'openai', // AI provider (default: 'openai')
204
+ model: 'gpt-5.2', // Override default model
205
+ }));
206
+ ```
207
+
208
+ ### Token Optimization
209
+
210
+ LocatAI is designed to minimize AI token consumption and keep costs low:
211
+
212
+ - **Compact candidates** — Only non-null attributes are sent. Null/empty fields are stripped, and short keys are used (`tid` instead of `data-testid`, `txt` instead of `text`, etc.)
213
+ - **Invisible elements filtered** — Hidden elements (`display: none`, `visibility: hidden`, zero-size) are excluded before sending to the AI
214
+ - **Smart pre-filtering** — Before sending candidates to the AI, `rankCandidates()` scores each element by keyword match, tag-type inference, ARIA role relevance, and test-ID presence. Only the top-ranked candidates are sent, typically reducing the set by 20–30% while preserving accuracy
215
+ - **Capped output** — The AI is asked to return a maximum of 3 strategies per request
216
+ - **Configurable limit** — Control how many DOM elements are collected with `maxCandidates`
217
+
218
+ Lower `maxCandidates` = fewer tokens = lower cost and faster responses. The default of 30 works well for most pages. For complex pages with many interactive elements, you can increase it:
219
+
220
+ ```typescript
221
+ // Simple pages — fewer candidates, faster & cheaper
222
+ createLocatAIFixture({ maxCandidates: 15 })
223
+
224
+ // Complex pages — more candidates, better accuracy
225
+ createLocatAIFixture({ maxCandidates: 50 })
226
+ ```
227
+
228
+ ### Token Usage Visibility
229
+
230
+ Each healing call logs the token consumption reported by the AI provider:
231
+
232
+ ```
233
+ ↑ 1350 input · 180 output · 1530 total tokens
234
+ ```
235
+
236
+ Token usage is also recorded in `.self-heal/heal_events.jsonl` for cost tracking and analysis across test runs.
237
+
238
+ ### Cache
239
+
240
+ Healed selectors get cached in `.self-heal/`. Add it to your `.gitignore`:
241
+
242
+ ```
243
+ .self-heal/
244
+ ```
245
+
246
+ ## How It Works
247
+
248
+ 1. Try the original locator
249
+ 2. If it fails, check the cache for a previously healed selector
250
+ 3. If not cached, send page context to the AI and ask it to find the element
251
+ 4. Cache the result for next time
252
+
253
+ ## Example
254
+
255
+ ```typescript
256
+ import { test as base, expect } from '@playwright/test';
257
+ import { createLocatAIFixture, LocatAIPage } from '@testnexus/locatai';
258
+
259
+ const test = base.extend<{ page: LocatAIPage }>(createLocatAIFixture());
260
+
261
+ test('checkout flow', async ({ page }) => {
262
+ await page.goto('/shop');
263
+
264
+ // These might break when the UI changes, but LocatAI will adapt
265
+ await page.locatai.click(
266
+ page.locator('[data-testid="add-to-cart"]'),
267
+ 'Add to cart button'
268
+ );
269
+
270
+ await page.locatai.click(
271
+ page.locator('.cart-icon'),
272
+ 'Shopping cart icon'
273
+ );
274
+
275
+ await page.locatai.fill(
276
+ page.locator('#email'),
277
+ 'Email field in checkout',
278
+ 'customer@example.com'
279
+ );
280
+
281
+ // No selector at all - just describe it
282
+ await page.locatai.click('', 'Place order button');
283
+ });
284
+ ```
285
+
286
+ ## Live Demo
287
+
288
+ Want to see LocatAI in action without touching your own project? Check out the **[`test-published`](https://github.com/Divyarajsinh-Dodia1617/LocatAi/tree/test-published)** branch — a standalone mini-project that imports the published npm package and runs tests with intentionally broken selectors.
289
+
290
+ ```bash
291
+ git clone -b test-published https://github.com/Divyarajsinh-Dodia1617/LocatAi.git locatai-demo
292
+ cd locatai-demo
293
+ npm install && npx playwright install chromium
294
+ cp example.env .env # add your API key
295
+ npm test
296
+ ```
297
+
298
+ It includes two smoke tests:
299
+ - **Broken locator** — uses a wrong selector (`#wrong-todo-input`), AI heals it to the real input
300
+ - **AI-only mode** — no selector at all, AI finds the element from a plain-English description
301
+
302
+ ## Privacy & Security
303
+
304
+ When healing is triggered, LocatAI sends a **DOM snapshot** of candidate elements (tag names, roles, aria labels, text content, test IDs, etc.) to the configured AI provider's API. **No screenshots or full page HTML are sent** — only a structured list of relevant elements.
305
+
306
+ Keep this in mind if your application contains sensitive data in the DOM (e.g., PII, financial data, internal URLs). You can:
307
+ - **Use a local LLM** with `AI_PROVIDER=local` — data never leaves your machine
308
+ - Use a self-hosted or on-premise LLM by configuring a custom `AI_MODEL` and API endpoint
309
+ - Limit healing to non-production environments
310
+ - Review the candidate data sent via the `.self-heal/heal_events.jsonl` log
311
+
312
+ ### Local LLM with Ollama
313
+
314
+ For maximum privacy and zero cost, run healing entirely on your machine with [Ollama](https://ollama.com):
315
+
316
+ ```bash
317
+ # 1. Install Ollama (https://ollama.com) and pull a model
318
+ ollama pull gemma3:4b
319
+
320
+ # 2. Configure LocatAI
321
+ export AI_PROVIDER=local
322
+ export SELF_HEAL=1
323
+ ```
324
+
325
+ That's it — no API keys, no cloud calls, no usage fees. Ollama runs the model locally and LocatAI talks to it directly.
326
+
327
+ **Recommended models for healing** (sorted by size):
328
+
329
+ | Model | Size | Context | Tested | Good for |
330
+ |-------|------|---------|--------|----------|
331
+ | `llama3.2:3b` ⭐ | 2.0 GB | 128K | 5/5 pass | Best overall choice — fast, accurate, and lightweight |
332
+ | `mistral` ⭐ | 4.1 GB | 128K | 5/5 pass | Most accurate response, but slowest (~1.4 min per run) |
333
+ | `gemma3:4b` | 3.3 GB | 128K | 4/5 pass | Faster than mistral (~53s), but slightly less accurate |
334
+
335
+ > **Best results were achieved with `mistral`, `gemma3:4b`, and `llama3.2:3b`** — these three models consistently produced the most accurate healing across our test suite. `llama3.2:3b` stands out as the best overall choice, combining top accuracy with a small footprint. We recommend starting with one of them.
336
+
337
+ > **Tip:** Small models (1B–4B) may struggle with ambiguous elements (e.g. multiple checkboxes on the same page). If accuracy matters more than speed, use `mistral` or a larger model.
338
+
339
+ You can use **any model** from the [Ollama library](https://ollama.com/library) — just set `AI_MODEL`:
340
+
341
+ ```bash
342
+ export AI_MODEL=llama3.2:3b #or mistral, etc.
343
+ ```
344
+
345
+ Custom Ollama host (e.g., running on another machine):
346
+
347
+ ```bash
348
+ export OLLAMA_HOST=http://192.168.1.100:11434
349
+ ```
350
+
351
+ ## Development
352
+
353
+ ```bash
354
+ npm install
355
+ npm run build
356
+ SELF_HEAL=1 npm test
357
+ ```
358
+
359
+ ## License
360
+
361
+ MIT