testchimp-runner-core 0.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/auth-config.d.ts +33 -0
- package/dist/auth-config.d.ts.map +1 -0
- package/dist/auth-config.js +69 -0
- package/dist/auth-config.js.map +1 -0
- package/dist/env-loader.d.ts +20 -0
- package/dist/env-loader.d.ts.map +1 -0
- package/dist/env-loader.js +83 -0
- package/dist/env-loader.js.map +1 -0
- package/dist/execution-service.d.ts +61 -0
- package/dist/execution-service.d.ts.map +1 -0
- package/dist/execution-service.js +822 -0
- package/dist/execution-service.js.map +1 -0
- package/dist/file-handler.d.ts +59 -0
- package/dist/file-handler.d.ts.map +1 -0
- package/dist/file-handler.js +75 -0
- package/dist/file-handler.js.map +1 -0
- package/dist/index.d.ts +46 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +196 -0
- package/dist/index.js.map +1 -0
- package/dist/llm-facade.d.ts +101 -0
- package/dist/llm-facade.d.ts.map +1 -0
- package/dist/llm-facade.js +289 -0
- package/dist/llm-facade.js.map +1 -0
- package/dist/playwright-mcp-service.d.ts +42 -0
- package/dist/playwright-mcp-service.d.ts.map +1 -0
- package/dist/playwright-mcp-service.js +167 -0
- package/dist/playwright-mcp-service.js.map +1 -0
- package/dist/prompts.d.ts +34 -0
- package/dist/prompts.d.ts.map +1 -0
- package/dist/prompts.js +237 -0
- package/dist/prompts.js.map +1 -0
- package/dist/scenario-service.d.ts +25 -0
- package/dist/scenario-service.d.ts.map +1 -0
- package/dist/scenario-service.js +119 -0
- package/dist/scenario-service.js.map +1 -0
- package/dist/scenario-worker-class.d.ts +30 -0
- package/dist/scenario-worker-class.d.ts.map +1 -0
- package/dist/scenario-worker-class.js +263 -0
- package/dist/scenario-worker-class.js.map +1 -0
- package/dist/script-utils.d.ts +44 -0
- package/dist/script-utils.d.ts.map +1 -0
- package/dist/script-utils.js +100 -0
- package/dist/script-utils.js.map +1 -0
- package/dist/types.d.ts +171 -0
- package/dist/types.d.ts.map +1 -0
- package/dist/types.js +28 -0
- package/dist/types.js.map +1 -0
- package/dist/utils/browser-utils.d.ts +13 -0
- package/dist/utils/browser-utils.d.ts.map +1 -0
- package/dist/utils/browser-utils.js +269 -0
- package/dist/utils/browser-utils.js.map +1 -0
- package/dist/utils/page-info-utils.d.ts +16 -0
- package/dist/utils/page-info-utils.d.ts.map +1 -0
- package/dist/utils/page-info-utils.js +77 -0
- package/dist/utils/page-info-utils.js.map +1 -0
- package/env.prod +1 -0
- package/env.staging +1 -0
- package/package.json +38 -0
- package/src/auth-config.ts +84 -0
- package/src/env-loader.ts +91 -0
- package/src/execution-service.ts +999 -0
- package/src/file-handler.ts +104 -0
- package/src/index.ts +205 -0
- package/src/llm-facade.ts +413 -0
- package/src/playwright-mcp-service.ts +203 -0
- package/src/prompts.ts +247 -0
- package/src/scenario-service.ts +138 -0
- package/src/scenario-worker-class.ts +330 -0
- package/src/script-utils.ts +109 -0
- package/src/types.ts +202 -0
- package/src/utils/browser-utils.ts +272 -0
- package/src/utils/page-info-utils.ts +93 -0
- package/tsconfig.json +19 -0
|
@@ -0,0 +1,413 @@
|
|
|
1
|
+
import axios from 'axios';
|
|
2
|
+
import { PROMPTS } from './prompts';
|
|
3
|
+
import { PageInfo } from './utils/page-info-utils';
|
|
4
|
+
import { StepOperation } from './types';
|
|
5
|
+
import { AuthConfig, createAuthConfigFromEnv, getAuthHeaders } from './auth-config';
|
|
6
|
+
import { loadEnvConfig } from './env-loader';
|
|
7
|
+
|
|
8
|
+
// LLM Request/Response interfaces for backend proxy
|
|
9
|
+
interface CallLLMRequest {
|
|
10
|
+
model?: string;
|
|
11
|
+
system_prompt?: string;
|
|
12
|
+
user_prompt?: string;
|
|
13
|
+
}
|
|
14
|
+
|
|
15
|
+
interface CallLLMResponse {
|
|
16
|
+
answer?: string;
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
// LLM Response interfaces
|
|
20
|
+
export interface LLMScenarioBreakdownResponse {
|
|
21
|
+
steps: string[];
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
export interface LLMPlaywrightCommandResponse {
|
|
25
|
+
command: string;
|
|
26
|
+
reasoning?: string;
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
export interface LLMTestNameResponse {
|
|
30
|
+
testName: string;
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
export interface RepairSuggestionResponse {
|
|
34
|
+
shouldContinue: boolean;
|
|
35
|
+
reason: string;
|
|
36
|
+
action: {
|
|
37
|
+
operation: StepOperation;
|
|
38
|
+
stepIndex?: number;
|
|
39
|
+
newStep?: {
|
|
40
|
+
description: string;
|
|
41
|
+
code: string;
|
|
42
|
+
};
|
|
43
|
+
insertAfterIndex?: number;
|
|
44
|
+
};
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
export interface RepairConfidenceResponse {
|
|
48
|
+
confidence: number;
|
|
49
|
+
advice: string;
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
export interface ScenarioStep {
|
|
53
|
+
stepNumber: number;
|
|
54
|
+
description: string;
|
|
55
|
+
playwrightCommand?: string;
|
|
56
|
+
success?: boolean;
|
|
57
|
+
error?: string;
|
|
58
|
+
retryCount?: number;
|
|
59
|
+
attempts?: Array<{
|
|
60
|
+
attemptNumber: number;
|
|
61
|
+
command?: string;
|
|
62
|
+
success: boolean;
|
|
63
|
+
error?: string;
|
|
64
|
+
timestamp: number;
|
|
65
|
+
}>;
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
export class LLMFacade {
|
|
69
|
+
private backendUrl: string;
|
|
70
|
+
private authConfig: AuthConfig | null;
|
|
71
|
+
|
|
72
|
+
constructor(authConfig?: AuthConfig, backendUrl?: string) {
|
|
73
|
+
// Use provided backend URL or fall back to environment configuration
|
|
74
|
+
if (backendUrl) {
|
|
75
|
+
this.backendUrl = backendUrl;
|
|
76
|
+
console.log(`LLMFacade initialized with provided backend URL: ${this.backendUrl}`);
|
|
77
|
+
} else {
|
|
78
|
+
// Fall back to environment configuration for backward compatibility
|
|
79
|
+
const envConfig = loadEnvConfig();
|
|
80
|
+
this.backendUrl = envConfig.TESTCHIMP_BACKEND_URL;
|
|
81
|
+
console.log(`LLMFacade initialized with environment backend URL: ${this.backendUrl}`);
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
// Use provided auth config or try to create from environment
|
|
85
|
+
this.authConfig = authConfig || createAuthConfigFromEnv();
|
|
86
|
+
|
|
87
|
+
if (!this.authConfig) {
|
|
88
|
+
console.warn('TestChimp authentication not configured. LLM calls may fail.');
|
|
89
|
+
}
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
/**
|
|
93
|
+
* Update authentication configuration
|
|
94
|
+
*/
|
|
95
|
+
setAuthConfig(authConfig: AuthConfig): void {
|
|
96
|
+
this.authConfig = authConfig;
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
/**
|
|
100
|
+
* Get current authentication configuration
|
|
101
|
+
*/
|
|
102
|
+
getAuthConfig(): AuthConfig | null {
|
|
103
|
+
return this.authConfig;
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
private async callLLM(request: CallLLMRequest): Promise<string> {
|
|
107
|
+
if (!this.authConfig) {
|
|
108
|
+
throw new Error('Authentication not configured. Please set authentication credentials.');
|
|
109
|
+
}
|
|
110
|
+
|
|
111
|
+
try {
|
|
112
|
+
const authHeaders = getAuthHeaders(this.authConfig);
|
|
113
|
+
const url = `${this.backendUrl}/localagent/call_llm`;
|
|
114
|
+
console.log(`repairing step`);
|
|
115
|
+
|
|
116
|
+
const response = await axios.post(url, request, {
|
|
117
|
+
headers: {
|
|
118
|
+
...authHeaders,
|
|
119
|
+
'Content-Type': 'application/json'
|
|
120
|
+
},
|
|
121
|
+
timeout: 30000 // 30 second timeout for LLM calls
|
|
122
|
+
});
|
|
123
|
+
|
|
124
|
+
if (response.data && response.data.answer) {
|
|
125
|
+
return response.data.answer;
|
|
126
|
+
} else {
|
|
127
|
+
throw new Error('Invalid response from LLM backend');
|
|
128
|
+
}
|
|
129
|
+
} catch (error: any) {
|
|
130
|
+
console.error('LLM call failed:', error);
|
|
131
|
+
throw new Error(`LLM call failed: ${error.message}`);
|
|
132
|
+
}
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
/**
|
|
136
|
+
* Generate a test name from scenario description
|
|
137
|
+
*/
|
|
138
|
+
async generateTestName(scenario: string, model: string = 'gpt-4.1-mini'): Promise<string> {
|
|
139
|
+
console.log('Generating test name with LLM...');
|
|
140
|
+
|
|
141
|
+
const request: CallLLMRequest = {
|
|
142
|
+
model,
|
|
143
|
+
system_prompt: PROMPTS.TEST_NAME_GENERATION.SYSTEM,
|
|
144
|
+
user_prompt: PROMPTS.TEST_NAME_GENERATION.USER(scenario)
|
|
145
|
+
};
|
|
146
|
+
|
|
147
|
+
try {
|
|
148
|
+
const response = await this.callLLM(request);
|
|
149
|
+
const testNameResponse = JSON.parse(response) as LLMTestNameResponse;
|
|
150
|
+
return testNameResponse.testName;
|
|
151
|
+
} catch (error) {
|
|
152
|
+
console.error('Failed to generate test name:', error);
|
|
153
|
+
// Fallback to a simple generated name
|
|
154
|
+
return `Test: ${scenario.substring(0, 50)}...`;
|
|
155
|
+
}
|
|
156
|
+
}
|
|
157
|
+
|
|
158
|
+
/**
|
|
159
|
+
* Break down scenario into steps
|
|
160
|
+
*/
|
|
161
|
+
async breakdownScenario(scenario: string, model: string = 'gpt-4.1-mini'): Promise<ScenarioStep[]> {
|
|
162
|
+
console.log('Breaking down scenario with LLM...');
|
|
163
|
+
|
|
164
|
+
const request: CallLLMRequest = {
|
|
165
|
+
model,
|
|
166
|
+
system_prompt: PROMPTS.SCENARIO_BREAKDOWN.SYSTEM,
|
|
167
|
+
user_prompt: PROMPTS.SCENARIO_BREAKDOWN.USER(scenario)
|
|
168
|
+
};
|
|
169
|
+
|
|
170
|
+
try {
|
|
171
|
+
const response = await this.callLLM(request);
|
|
172
|
+
const breakdownResponse = JSON.parse(response) as LLMScenarioBreakdownResponse;
|
|
173
|
+
|
|
174
|
+
// Validate and clean up steps
|
|
175
|
+
const cleanedSteps = breakdownResponse.steps
|
|
176
|
+
.map(step => step.trim())
|
|
177
|
+
.filter(step => step.length > 0)
|
|
178
|
+
.slice(0, 10); // Limit to 10 steps max
|
|
179
|
+
|
|
180
|
+
return cleanedSteps.map((desc, index) => ({
|
|
181
|
+
stepNumber: index + 1,
|
|
182
|
+
description: desc,
|
|
183
|
+
}));
|
|
184
|
+
} catch (error) {
|
|
185
|
+
console.error('Failed to breakdown scenario:', error);
|
|
186
|
+
// Fallback to simple breakdown
|
|
187
|
+
const stepDescriptions = scenario.split('.').map(s => s.trim()).filter(s => s.length > 0);
|
|
188
|
+
return stepDescriptions.map((desc, index) => ({
|
|
189
|
+
stepNumber: index + 1,
|
|
190
|
+
description: desc,
|
|
191
|
+
}));
|
|
192
|
+
}
|
|
193
|
+
}
|
|
194
|
+
|
|
195
|
+
/**
|
|
196
|
+
* Generate Playwright command for a step
|
|
197
|
+
*/
|
|
198
|
+
async generatePlaywrightCommand(
|
|
199
|
+
stepDescription: string,
|
|
200
|
+
pageInfo: PageInfo,
|
|
201
|
+
previousSteps: ScenarioStep[],
|
|
202
|
+
lastError?: string,
|
|
203
|
+
currentStep?: ScenarioStep,
|
|
204
|
+
model: string = 'gpt-4.1-mini'
|
|
205
|
+
): Promise<string | null> {
|
|
206
|
+
console.log('Generating Playwright command with LLM...');
|
|
207
|
+
|
|
208
|
+
const previousCommands = previousSteps
|
|
209
|
+
.filter(s => s.playwrightCommand && s.success)
|
|
210
|
+
.map(s => `// Step ${s.stepNumber}: ${s.description}\n${s.playwrightCommand}`)
|
|
211
|
+
.join('\n');
|
|
212
|
+
|
|
213
|
+
// Build comprehensive attempt history for current step
|
|
214
|
+
const attemptHistory = this.buildAttemptHistory(currentStep);
|
|
215
|
+
|
|
216
|
+
// Provide raw error context for LLM analysis
|
|
217
|
+
const errorContext = this.buildErrorContext(lastError, currentStep);
|
|
218
|
+
|
|
219
|
+
const prompt = PROMPTS.PLAYWRIGHT_COMMAND.USER(
|
|
220
|
+
stepDescription,
|
|
221
|
+
pageInfo,
|
|
222
|
+
previousCommands,
|
|
223
|
+
attemptHistory,
|
|
224
|
+
errorContext
|
|
225
|
+
);
|
|
226
|
+
|
|
227
|
+
const request: CallLLMRequest = {
|
|
228
|
+
model,
|
|
229
|
+
system_prompt: PROMPTS.PLAYWRIGHT_COMMAND.SYSTEM,
|
|
230
|
+
user_prompt: prompt
|
|
231
|
+
};
|
|
232
|
+
|
|
233
|
+
try {
|
|
234
|
+
const response = await this.callLLM(request);
|
|
235
|
+
const commandResponse = JSON.parse(response) as LLMPlaywrightCommandResponse;
|
|
236
|
+
return commandResponse.command;
|
|
237
|
+
} catch (error) {
|
|
238
|
+
console.error('Failed to generate Playwright command:', error);
|
|
239
|
+
return null;
|
|
240
|
+
}
|
|
241
|
+
}
|
|
242
|
+
|
|
243
|
+
/**
|
|
244
|
+
* Parse script into steps for AI repair
|
|
245
|
+
*/
|
|
246
|
+
async parseScriptIntoSteps(script: string, model: string = 'gpt-4o-mini'): Promise<Array<{ description: string; code: string; success?: boolean; error?: string }>> {
|
|
247
|
+
const request: CallLLMRequest = {
|
|
248
|
+
model,
|
|
249
|
+
system_prompt: PROMPTS.SCRIPT_PARSING.SYSTEM,
|
|
250
|
+
user_prompt: PROMPTS.SCRIPT_PARSING.USER(script)
|
|
251
|
+
};
|
|
252
|
+
|
|
253
|
+
try {
|
|
254
|
+
const response = await this.callLLM(request);
|
|
255
|
+
const parsed = JSON.parse(response);
|
|
256
|
+
|
|
257
|
+
// Expect JSON object with steps array
|
|
258
|
+
if (parsed.steps && Array.isArray(parsed.steps)) {
|
|
259
|
+
return parsed.steps;
|
|
260
|
+
} else {
|
|
261
|
+
console.error('Unexpected LLM response format - expected {steps: [...]}:', parsed);
|
|
262
|
+
return [];
|
|
263
|
+
}
|
|
264
|
+
} catch (error) {
|
|
265
|
+
console.error('Failed to parse LLM response as JSON:', error);
|
|
266
|
+
return [];
|
|
267
|
+
}
|
|
268
|
+
}
|
|
269
|
+
|
|
270
|
+
/**
|
|
271
|
+
* Get repair suggestion for a failing step
|
|
272
|
+
*/
|
|
273
|
+
async getRepairSuggestion(
|
|
274
|
+
stepDescription: string,
|
|
275
|
+
stepCode: string,
|
|
276
|
+
errorMessage: string,
|
|
277
|
+
pageInfo: PageInfo,
|
|
278
|
+
failureHistory: string,
|
|
279
|
+
recentRepairs: string,
|
|
280
|
+
model: string = 'gpt-4.1-mini'
|
|
281
|
+
): Promise<RepairSuggestionResponse> {
|
|
282
|
+
const request: CallLLMRequest = {
|
|
283
|
+
model,
|
|
284
|
+
system_prompt: PROMPTS.REPAIR_SUGGESTION.SYSTEM,
|
|
285
|
+
user_prompt: PROMPTS.REPAIR_SUGGESTION.USER(
|
|
286
|
+
stepDescription,
|
|
287
|
+
stepCode,
|
|
288
|
+
errorMessage,
|
|
289
|
+
pageInfo,
|
|
290
|
+
failureHistory,
|
|
291
|
+
recentRepairs,
|
|
292
|
+
)
|
|
293
|
+
};
|
|
294
|
+
|
|
295
|
+
const response = await this.callLLM(request);
|
|
296
|
+
console.log(`🤖 LLM Repair Response:`, response);
|
|
297
|
+
const parsed = JSON.parse(response) as any;
|
|
298
|
+
console.log(`🤖 Parsed Repair Action:`, parsed);
|
|
299
|
+
|
|
300
|
+
// Convert string operation to enum
|
|
301
|
+
if (parsed.action && parsed.action.operation) {
|
|
302
|
+
switch (parsed.action.operation) {
|
|
303
|
+
case 'MODIFY':
|
|
304
|
+
parsed.action.operation = StepOperation.MODIFY;
|
|
305
|
+
break;
|
|
306
|
+
case 'INSERT':
|
|
307
|
+
parsed.action.operation = StepOperation.INSERT;
|
|
308
|
+
break;
|
|
309
|
+
case 'REMOVE':
|
|
310
|
+
parsed.action.operation = StepOperation.REMOVE;
|
|
311
|
+
break;
|
|
312
|
+
default:
|
|
313
|
+
parsed.action.operation = StepOperation.MODIFY;
|
|
314
|
+
}
|
|
315
|
+
}
|
|
316
|
+
|
|
317
|
+
return parsed as RepairSuggestionResponse;
|
|
318
|
+
}
|
|
319
|
+
|
|
320
|
+
/**
|
|
321
|
+
* Assess repair confidence and generate advice
|
|
322
|
+
*/
|
|
323
|
+
async assessRepairConfidence(
|
|
324
|
+
originalScript: string,
|
|
325
|
+
updatedScript: string,
|
|
326
|
+
model: string = 'gpt-4.1-mini'
|
|
327
|
+
): Promise<RepairConfidenceResponse> {
|
|
328
|
+
const request: CallLLMRequest = {
|
|
329
|
+
model,
|
|
330
|
+
system_prompt: PROMPTS.REPAIR_CONFIDENCE.SYSTEM,
|
|
331
|
+
user_prompt: PROMPTS.REPAIR_CONFIDENCE.USER(originalScript, updatedScript)
|
|
332
|
+
};
|
|
333
|
+
|
|
334
|
+
const response = await this.callLLM(request);
|
|
335
|
+
return JSON.parse(response) as RepairConfidenceResponse;
|
|
336
|
+
}
|
|
337
|
+
|
|
338
|
+
/**
|
|
339
|
+
* Generate final script with repair advice
|
|
340
|
+
*/
|
|
341
|
+
async generateFinalScript(
|
|
342
|
+
originalScript: string,
|
|
343
|
+
updatedScript: string,
|
|
344
|
+
newRepairAdvice: string,
|
|
345
|
+
model: string = 'gpt-4o-mini'
|
|
346
|
+
): Promise<string> {
|
|
347
|
+
const request: CallLLMRequest = {
|
|
348
|
+
model,
|
|
349
|
+
system_prompt: PROMPTS.FINAL_SCRIPT.SYSTEM,
|
|
350
|
+
user_prompt: PROMPTS.FINAL_SCRIPT.USER(originalScript, updatedScript, newRepairAdvice)
|
|
351
|
+
};
|
|
352
|
+
|
|
353
|
+
const response = await this.callLLM(request);
|
|
354
|
+
try {
|
|
355
|
+
const parsed = JSON.parse(response);
|
|
356
|
+
return parsed.script || updatedScript;
|
|
357
|
+
} catch (error) {
|
|
358
|
+
console.error('Failed to parse final script response:', error);
|
|
359
|
+
return updatedScript;
|
|
360
|
+
}
|
|
361
|
+
}
|
|
362
|
+
|
|
363
|
+
/**
|
|
364
|
+
* Build attempt history for current step
|
|
365
|
+
*/
|
|
366
|
+
private buildAttemptHistory(currentStep?: ScenarioStep): string {
|
|
367
|
+
if (!currentStep || !currentStep.attempts || currentStep.attempts.length === 0) {
|
|
368
|
+
return 'This is the first attempt for this step.';
|
|
369
|
+
}
|
|
370
|
+
|
|
371
|
+
const attempts = currentStep.attempts.map((attempt, index) => {
|
|
372
|
+
const status = attempt.success ? '✅ SUCCESS' : '❌ FAILED';
|
|
373
|
+
return `Attempt ${attempt.attemptNumber} (${status}):
|
|
374
|
+
Command: ${attempt.command || 'No command generated'}
|
|
375
|
+
${attempt.error ? `Error: ${attempt.error}` : 'No error'}
|
|
376
|
+
Timestamp: ${new Date(attempt.timestamp).toISOString()}`;
|
|
377
|
+
}).join('\n\n');
|
|
378
|
+
|
|
379
|
+
return `Current step attempt history:
|
|
380
|
+
${attempts}
|
|
381
|
+
|
|
382
|
+
LEARNING FROM FAILURES:
|
|
383
|
+
- Analyze what went wrong in each attempt
|
|
384
|
+
- Try completely different approaches for failed attempts
|
|
385
|
+
- If a selector failed, try alternative selectors
|
|
386
|
+
- If timing failed, add proper waits
|
|
387
|
+
- If element not found, try different strategies`;
|
|
388
|
+
}
|
|
389
|
+
|
|
390
|
+
/**
|
|
391
|
+
* Build error context for LLM analysis
|
|
392
|
+
*/
|
|
393
|
+
private buildErrorContext(lastError?: string, currentStep?: ScenarioStep): string {
|
|
394
|
+
if (!lastError && (!currentStep || !currentStep.error)) {
|
|
395
|
+
return '';
|
|
396
|
+
}
|
|
397
|
+
|
|
398
|
+
const errors = [];
|
|
399
|
+
if (lastError) errors.push(lastError);
|
|
400
|
+
if (currentStep?.error) errors.push(currentStep.error);
|
|
401
|
+
|
|
402
|
+
const errorText = errors.join(' | ');
|
|
403
|
+
|
|
404
|
+
return `ERROR CONTEXT:
|
|
405
|
+
Last Error: ${errorText}
|
|
406
|
+
|
|
407
|
+
ANALYZE THE ERROR AND ADAPT:
|
|
408
|
+
- Study the error message to understand what went wrong
|
|
409
|
+
- Try a completely different approach than what failed
|
|
410
|
+
- Consider alternative selectors, timing, or interaction methods
|
|
411
|
+
- Never repeat the exact same command that failed`;
|
|
412
|
+
}
|
|
413
|
+
}
|
|
@@ -0,0 +1,203 @@
|
|
|
1
|
+
import { ScriptResult, PlaywrightConfig } from './types';
|
|
2
|
+
import { chromium, firefox, webkit, Browser, Page, BrowserContext } from 'playwright';
|
|
3
|
+
import { initializeBrowser } from './utils/browser-utils';
|
|
4
|
+
|
|
5
|
+
/**
|
|
6
|
+
* Service for executing Playwright scripts using worker pool
|
|
7
|
+
*/
|
|
8
|
+
export class PlaywrightMCPService {
|
|
9
|
+
private isConnected = false;
|
|
10
|
+
|
|
11
|
+
constructor() {
|
|
12
|
+
// No initialization needed for direct Playwright execution
|
|
13
|
+
}
|
|
14
|
+
|
|
15
|
+
/**
|
|
16
|
+
* Initialize the service
|
|
17
|
+
*/
|
|
18
|
+
async initialize(): Promise<void> {
|
|
19
|
+
try {
|
|
20
|
+
console.log('Initializing Playwright service...');
|
|
21
|
+
|
|
22
|
+
// No specific initialization needed for direct Playwright execution
|
|
23
|
+
this.isConnected = true;
|
|
24
|
+
console.log('Playwright service initialized successfully');
|
|
25
|
+
} catch (error) {
|
|
26
|
+
throw new Error(`Failed to initialize Playwright service: ${error}`);
|
|
27
|
+
}
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
/**
|
|
31
|
+
* Execute a complete job (prescript + script + postscript) using worker pool
|
|
32
|
+
*/
|
|
33
|
+
async executeJob(prescript: string | undefined, script: string, postscript: string | undefined, config?: PlaywrightConfig): Promise<{
|
|
34
|
+
success: boolean;
|
|
35
|
+
results: {
|
|
36
|
+
prescript?: ScriptResult;
|
|
37
|
+
script: ScriptResult;
|
|
38
|
+
postscript?: ScriptResult;
|
|
39
|
+
};
|
|
40
|
+
executionTime: number;
|
|
41
|
+
error?: string;
|
|
42
|
+
}> {
|
|
43
|
+
if (!this.isConnected) {
|
|
44
|
+
throw new Error('Service not initialized');
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
try {
|
|
48
|
+
// Execute the job directly using Playwright
|
|
49
|
+
return await this.executeScriptDirectly(prescript, script, postscript, config);
|
|
50
|
+
} catch (error) {
|
|
51
|
+
return {
|
|
52
|
+
success: false,
|
|
53
|
+
results: {
|
|
54
|
+
script: { success: false, output: '', error: '', executionTime: 0 }
|
|
55
|
+
},
|
|
56
|
+
executionTime: 0,
|
|
57
|
+
error: error instanceof Error ? error.message : 'Unknown error occurred'
|
|
58
|
+
};
|
|
59
|
+
}
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
/**
|
|
65
|
+
* Prepare the script content for execution
|
|
66
|
+
*/
|
|
67
|
+
private prepareScript(script: string, config?: PlaywrightConfig): string {
|
|
68
|
+
// If the script looks like a test file, return as-is
|
|
69
|
+
if (script.includes('test(') || script.includes('describe(')) {
|
|
70
|
+
return script;
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
// If it's a list of Playwright commands, wrap in a test
|
|
74
|
+
return `
|
|
75
|
+
test('executed script', async ({ page }) => {
|
|
76
|
+
${script}
|
|
77
|
+
});
|
|
78
|
+
`;
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
/**
|
|
82
|
+
* Close the service
|
|
83
|
+
*/
|
|
84
|
+
/**
|
|
85
|
+
* Execute script directly using Playwright
|
|
86
|
+
*/
|
|
87
|
+
private async executeScriptDirectly(
|
|
88
|
+
prescript: string | undefined,
|
|
89
|
+
script: string,
|
|
90
|
+
postscript: string | undefined,
|
|
91
|
+
config?: PlaywrightConfig
|
|
92
|
+
): Promise<{
|
|
93
|
+
success: boolean;
|
|
94
|
+
results: {
|
|
95
|
+
prescript?: ScriptResult;
|
|
96
|
+
script: ScriptResult;
|
|
97
|
+
postscript?: ScriptResult;
|
|
98
|
+
};
|
|
99
|
+
executionTime: number;
|
|
100
|
+
error?: string;
|
|
101
|
+
}> {
|
|
102
|
+
const startTime = Date.now();
|
|
103
|
+
let browser: Browser | undefined;
|
|
104
|
+
let context: BrowserContext | undefined;
|
|
105
|
+
let page: Page | undefined;
|
|
106
|
+
|
|
107
|
+
try {
|
|
108
|
+
// Use the centralized browser initialization utility
|
|
109
|
+
const browserInstance = await initializeBrowser(config);
|
|
110
|
+
browser = browserInstance.browser;
|
|
111
|
+
context = browserInstance.context;
|
|
112
|
+
page = browserInstance.page;
|
|
113
|
+
|
|
114
|
+
const results: {
|
|
115
|
+
prescript?: ScriptResult;
|
|
116
|
+
script: ScriptResult;
|
|
117
|
+
postscript?: ScriptResult;
|
|
118
|
+
} = {
|
|
119
|
+
script: { success: false, output: '', error: '', executionTime: 0 }
|
|
120
|
+
};
|
|
121
|
+
|
|
122
|
+
// Execute prescript
|
|
123
|
+
if (prescript) {
|
|
124
|
+
try {
|
|
125
|
+
const scriptFunction = new Function('page', 'browser', 'context', `
|
|
126
|
+
return (async () => {
|
|
127
|
+
${prescript}
|
|
128
|
+
})();
|
|
129
|
+
`);
|
|
130
|
+
await scriptFunction(page, browser, context);
|
|
131
|
+
results.prescript = { success: true, output: 'Prescript executed successfully', error: '', executionTime: 0 };
|
|
132
|
+
} catch (error: any) {
|
|
133
|
+
results.prescript = { success: false, output: '', error: error.message, executionTime: 0 };
|
|
134
|
+
}
|
|
135
|
+
}
|
|
136
|
+
|
|
137
|
+
// Execute main script
|
|
138
|
+
try {
|
|
139
|
+
const scriptFunction = new Function('page', 'browser', 'context', `
|
|
140
|
+
return (async () => {
|
|
141
|
+
${script}
|
|
142
|
+
})();
|
|
143
|
+
`);
|
|
144
|
+
await scriptFunction(page, browser, context);
|
|
145
|
+
results.script = { success: true, output: 'Script executed successfully', error: '', executionTime: 0 };
|
|
146
|
+
} catch (error: any) {
|
|
147
|
+
results.script = { success: false, output: '', error: error.message, executionTime: 0 };
|
|
148
|
+
}
|
|
149
|
+
|
|
150
|
+
// Execute postscript
|
|
151
|
+
if (postscript) {
|
|
152
|
+
try {
|
|
153
|
+
const scriptFunction = new Function('page', 'browser', 'context', `
|
|
154
|
+
return (async () => {
|
|
155
|
+
${postscript}
|
|
156
|
+
})();
|
|
157
|
+
`);
|
|
158
|
+
await scriptFunction(page, browser, context);
|
|
159
|
+
results.postscript = { success: true, output: 'Postscript executed successfully', error: '', executionTime: 0 };
|
|
160
|
+
} catch (error: any) {
|
|
161
|
+
results.postscript = { success: false, output: '', error: error.message, executionTime: 0 };
|
|
162
|
+
}
|
|
163
|
+
}
|
|
164
|
+
|
|
165
|
+
return {
|
|
166
|
+
success: results.script.success,
|
|
167
|
+
results,
|
|
168
|
+
executionTime: Date.now() - startTime
|
|
169
|
+
};
|
|
170
|
+
|
|
171
|
+
} catch (error: any) {
|
|
172
|
+
return {
|
|
173
|
+
success: false,
|
|
174
|
+
results: {
|
|
175
|
+
script: { success: false, output: '', error: error.message, executionTime: 0 }
|
|
176
|
+
},
|
|
177
|
+
executionTime: Date.now() - startTime,
|
|
178
|
+
error: error.message
|
|
179
|
+
};
|
|
180
|
+
} finally {
|
|
181
|
+
if (browser) {
|
|
182
|
+
await browser.close();
|
|
183
|
+
}
|
|
184
|
+
}
|
|
185
|
+
}
|
|
186
|
+
|
|
187
|
+
async close(): Promise<void> {
|
|
188
|
+
try {
|
|
189
|
+
// No cleanup needed for direct Playwright execution
|
|
190
|
+
this.isConnected = false;
|
|
191
|
+
console.log('Playwright service closed');
|
|
192
|
+
} catch (error) {
|
|
193
|
+
console.error('Error during shutdown:', error);
|
|
194
|
+
}
|
|
195
|
+
}
|
|
196
|
+
|
|
197
|
+
/**
|
|
198
|
+
* Check if the service is ready
|
|
199
|
+
*/
|
|
200
|
+
isReady(): boolean {
|
|
201
|
+
return this.isConnected;
|
|
202
|
+
}
|
|
203
|
+
}
|