@midscene/core 0.26.5-beta-20250814095614.0 → 0.26.5-beta-20250814125155.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (132) hide show
  1. package/dist/es/ai-model/action-executor.mjs +139 -0
  2. package/dist/es/ai-model/action-executor.mjs.map +1 -0
  3. package/dist/es/ai-model/common.mjs +219 -0
  4. package/dist/es/ai-model/common.mjs.map +1 -0
  5. package/dist/es/ai-model/index.mjs +10 -0
  6. package/dist/es/ai-model/inspect.mjs +317 -0
  7. package/dist/es/ai-model/inspect.mjs.map +1 -0
  8. package/dist/es/ai-model/llm-planning.mjs +85 -0
  9. package/dist/es/ai-model/llm-planning.mjs.map +1 -0
  10. package/dist/es/ai-model/prompt/assertion.mjs +55 -0
  11. package/dist/es/ai-model/prompt/assertion.mjs.map +1 -0
  12. package/dist/es/ai-model/prompt/common.mjs +7 -0
  13. package/dist/es/ai-model/prompt/common.mjs.map +1 -0
  14. package/dist/es/ai-model/prompt/describe.mjs +44 -0
  15. package/dist/es/ai-model/prompt/describe.mjs.map +1 -0
  16. package/dist/es/ai-model/prompt/extraction.mjs +137 -0
  17. package/dist/es/ai-model/prompt/extraction.mjs.map +1 -0
  18. package/dist/es/ai-model/prompt/llm-locator.mjs +275 -0
  19. package/dist/es/ai-model/prompt/llm-locator.mjs.map +1 -0
  20. package/dist/es/ai-model/prompt/llm-planning.mjs +359 -0
  21. package/dist/es/ai-model/prompt/llm-planning.mjs.map +1 -0
  22. package/dist/es/ai-model/prompt/llm-section-locator.mjs +47 -0
  23. package/dist/es/ai-model/prompt/llm-section-locator.mjs.map +1 -0
  24. package/dist/es/ai-model/prompt/playwright-generator.mjs +117 -0
  25. package/dist/es/ai-model/prompt/playwright-generator.mjs.map +1 -0
  26. package/dist/es/ai-model/prompt/ui-tars-locator.mjs +34 -0
  27. package/dist/es/ai-model/prompt/ui-tars-locator.mjs.map +1 -0
  28. package/dist/es/ai-model/prompt/ui-tars-planning.mjs +36 -0
  29. package/dist/es/ai-model/prompt/ui-tars-planning.mjs.map +1 -0
  30. package/dist/es/ai-model/prompt/util.mjs +123 -0
  31. package/dist/es/ai-model/prompt/util.mjs.map +1 -0
  32. package/dist/es/ai-model/prompt/yaml-generator.mjs +219 -0
  33. package/dist/es/ai-model/prompt/yaml-generator.mjs.map +1 -0
  34. package/dist/es/ai-model/service-caller/index.mjs +413 -0
  35. package/dist/es/ai-model/service-caller/index.mjs.map +1 -0
  36. package/dist/es/ai-model/ui-tars-planning.mjs +235 -0
  37. package/dist/es/ai-model/ui-tars-planning.mjs.map +1 -0
  38. package/dist/es/image/index.mjs +2 -0
  39. package/dist/es/index.mjs +7 -2360
  40. package/dist/es/index.mjs.map +1 -1
  41. package/dist/es/insight/index.mjs +261 -0
  42. package/dist/es/insight/index.mjs.map +1 -0
  43. package/dist/es/insight/utils.mjs +19 -0
  44. package/dist/es/insight/utils.mjs.map +1 -0
  45. package/dist/es/types.mjs +11 -0
  46. package/dist/es/types.mjs.map +1 -0
  47. package/dist/es/utils.mjs +2 -2
  48. package/dist/es/yaml.mjs +0 -0
  49. package/dist/lib/ai-model/action-executor.js +173 -0
  50. package/dist/lib/ai-model/action-executor.js.map +1 -0
  51. package/dist/lib/ai-model/common.js +289 -0
  52. package/dist/lib/ai-model/common.js.map +1 -0
  53. package/dist/lib/ai-model/index.js +103 -0
  54. package/dist/lib/ai-model/index.js.map +1 -0
  55. package/dist/lib/ai-model/inspect.js +360 -0
  56. package/dist/lib/ai-model/inspect.js.map +1 -0
  57. package/dist/lib/ai-model/llm-planning.js +119 -0
  58. package/dist/lib/ai-model/llm-planning.js.map +1 -0
  59. package/dist/lib/ai-model/prompt/assertion.js +92 -0
  60. package/dist/lib/ai-model/prompt/assertion.js.map +1 -0
  61. package/dist/lib/ai-model/prompt/common.js +41 -0
  62. package/dist/lib/ai-model/prompt/common.js.map +1 -0
  63. package/dist/lib/ai-model/prompt/describe.js +78 -0
  64. package/dist/lib/ai-model/prompt/describe.js.map +1 -0
  65. package/dist/lib/ai-model/prompt/extraction.js +177 -0
  66. package/dist/lib/ai-model/prompt/extraction.js.map +1 -0
  67. package/dist/lib/ai-model/prompt/llm-locator.js +315 -0
  68. package/dist/lib/ai-model/prompt/llm-locator.js.map +1 -0
  69. package/dist/lib/ai-model/prompt/llm-planning.js +415 -0
  70. package/dist/lib/ai-model/prompt/llm-planning.js.map +1 -0
  71. package/dist/lib/ai-model/prompt/llm-section-locator.js +84 -0
  72. package/dist/lib/ai-model/prompt/llm-section-locator.js.map +1 -0
  73. package/dist/lib/ai-model/prompt/playwright-generator.js +178 -0
  74. package/dist/lib/ai-model/prompt/playwright-generator.js.map +1 -0
  75. package/dist/lib/ai-model/prompt/ui-tars-locator.js +68 -0
  76. package/dist/lib/ai-model/prompt/ui-tars-locator.js.map +1 -0
  77. package/dist/lib/ai-model/prompt/ui-tars-planning.js +73 -0
  78. package/dist/lib/ai-model/prompt/ui-tars-planning.js.map +1 -0
  79. package/dist/lib/ai-model/prompt/util.js +175 -0
  80. package/dist/lib/ai-model/prompt/util.js.map +1 -0
  81. package/dist/lib/ai-model/prompt/yaml-generator.js +280 -0
  82. package/dist/lib/ai-model/prompt/yaml-generator.js.map +1 -0
  83. package/dist/lib/ai-model/service-caller/index.js +496 -0
  84. package/dist/lib/ai-model/service-caller/index.js.map +1 -0
  85. package/dist/lib/ai-model/ui-tars-planning.js +272 -0
  86. package/dist/lib/ai-model/ui-tars-planning.js.map +1 -0
  87. package/dist/lib/image/index.js +56 -0
  88. package/dist/lib/image/index.js.map +1 -0
  89. package/dist/lib/index.js +21 -2393
  90. package/dist/lib/index.js.map +1 -1
  91. package/dist/lib/insight/index.js +295 -0
  92. package/dist/lib/insight/index.js.map +1 -0
  93. package/dist/lib/insight/utils.js +53 -0
  94. package/dist/lib/insight/utils.js.map +1 -0
  95. package/dist/lib/types.js +82 -0
  96. package/dist/lib/types.js.map +1 -0
  97. package/dist/lib/utils.js +2 -2
  98. package/dist/lib/yaml.js +20 -0
  99. package/dist/lib/yaml.js.map +1 -0
  100. package/dist/types/ai-model/action-executor.d.ts +19 -0
  101. package/dist/types/ai-model/common.d.ts +34 -0
  102. package/dist/types/ai-model/index.d.ts +11 -0
  103. package/dist/types/ai-model/inspect.d.ts +49 -0
  104. package/dist/types/ai-model/llm-planning.d.ts +10 -0
  105. package/dist/types/ai-model/prompt/assertion.d.ts +5 -0
  106. package/dist/types/ai-model/prompt/common.d.ts +2 -0
  107. package/dist/types/ai-model/prompt/describe.d.ts +1 -0
  108. package/dist/types/ai-model/prompt/extraction.d.ts +4 -0
  109. package/dist/types/ai-model/prompt/llm-locator.d.ts +9 -0
  110. package/dist/types/ai-model/prompt/llm-planning.d.ts +15 -0
  111. package/dist/types/ai-model/prompt/llm-section-locator.d.ts +6 -0
  112. package/dist/types/ai-model/prompt/playwright-generator.d.ts +25 -0
  113. package/dist/types/ai-model/prompt/ui-tars-locator.d.ts +1 -0
  114. package/dist/types/ai-model/prompt/ui-tars-planning.d.ts +2 -0
  115. package/dist/types/ai-model/prompt/util.d.ts +45 -0
  116. package/dist/types/ai-model/prompt/yaml-generator.d.ts +99 -0
  117. package/dist/types/ai-model/service-caller/index.d.ts +26 -0
  118. package/dist/types/ai-model/ui-tars-planning.d.ts +76 -0
  119. package/dist/types/image/index.d.ts +1 -0
  120. package/dist/types/index.d.ts +9 -1289
  121. package/dist/types/insight/index.d.ts +26 -0
  122. package/dist/types/insight/utils.d.ts +2 -0
  123. package/dist/types/tree.d.ts +1 -11
  124. package/dist/types/types.d.ts +399 -0
  125. package/dist/types/utils.d.ts +27 -47
  126. package/dist/types/yaml.d.ts +172 -0
  127. package/package.json +6 -6
  128. package/dist/es/ai-model.mjs +0 -2502
  129. package/dist/es/ai-model.mjs.map +0 -1
  130. package/dist/lib/ai-model.js +0 -2622
  131. package/dist/lib/ai-model.js.map +0 -1
  132. package/dist/types/ai-model.d.ts +0 -596
@@ -1,2622 +0,0 @@
1
- "use strict";
2
- var __webpack_modules__ = {
3
- "langsmith/wrappers": function(module) {
4
- module.exports = import("langsmith/wrappers").then(function(module) {
5
- return module;
6
- });
7
- }
8
- };
9
- var __webpack_module_cache__ = {};
10
- function __webpack_require__(moduleId) {
11
- var cachedModule = __webpack_module_cache__[moduleId];
12
- if (void 0 !== cachedModule) return cachedModule.exports;
13
- var module = __webpack_module_cache__[moduleId] = {
14
- exports: {}
15
- };
16
- __webpack_modules__[moduleId](module, module.exports, __webpack_require__);
17
- return module.exports;
18
- }
19
- (()=>{
20
- __webpack_require__.n = (module)=>{
21
- var getter = module && module.__esModule ? ()=>module['default'] : ()=>module;
22
- __webpack_require__.d(getter, {
23
- a: getter
24
- });
25
- return getter;
26
- };
27
- })();
28
- (()=>{
29
- __webpack_require__.d = (exports1, definition)=>{
30
- for(var key in definition)if (__webpack_require__.o(definition, key) && !__webpack_require__.o(exports1, key)) Object.defineProperty(exports1, key, {
31
- enumerable: true,
32
- get: definition[key]
33
- });
34
- };
35
- })();
36
- (()=>{
37
- __webpack_require__.o = (obj, prop)=>Object.prototype.hasOwnProperty.call(obj, prop);
38
- })();
39
- (()=>{
40
- __webpack_require__.r = (exports1)=>{
41
- if ('undefined' != typeof Symbol && Symbol.toStringTag) Object.defineProperty(exports1, Symbol.toStringTag, {
42
- value: 'Module'
43
- });
44
- Object.defineProperty(exports1, '__esModule', {
45
- value: true
46
- });
47
- };
48
- })();
49
- var __webpack_exports__ = {};
50
- (()=>{
51
- __webpack_require__.r(__webpack_exports__);
52
- __webpack_require__.d(__webpack_exports__, {
53
- plan: ()=>llm_planning_plan,
54
- callAiFnWithStringResponse: ()=>callAiFnWithStringResponse,
55
- generateYamlTest: ()=>generateYamlTest,
56
- elementByPositionWithElementInfo: ()=>elementByPositionWithElementInfo,
57
- AiLocateSection: ()=>AiLocateSection,
58
- callAi: ()=>service_caller_call,
59
- AIActionType: ()=>common_AIActionType,
60
- generatePlaywrightTest: ()=>generatePlaywrightTest,
61
- generateYamlTestStream: ()=>generateYamlTestStream,
62
- AiExtractElementInfo: ()=>AiExtractElementInfo,
63
- resizeImageForUiTars: ()=>resizeImageForUiTars,
64
- systemPromptToLocateElement: ()=>systemPromptToLocateElement,
65
- vlmPlanning: ()=>vlmPlanning,
66
- AiAssert: ()=>AiAssert,
67
- adaptBboxToRect: ()=>adaptBboxToRect,
68
- AiLocateElement: ()=>AiLocateElement,
69
- callAiFn: ()=>callAiFn,
70
- describeUserPage: ()=>describeUserPage,
71
- callToGetJSONObject: ()=>callToGetJSONObject,
72
- generatePlaywrightTestStream: ()=>generatePlaywrightTestStream
73
- });
74
- var types_AIResponseFormat = /*#__PURE__*/ function(AIResponseFormat) {
75
- AIResponseFormat["JSON"] = "json_object";
76
- AIResponseFormat["TEXT"] = "text";
77
- return AIResponseFormat;
78
- }({});
79
- const sdk_namespaceObject = require("@anthropic-ai/sdk");
80
- const identity_namespaceObject = require("@azure/identity");
81
- const env_namespaceObject = require("@midscene/shared/env");
82
- const logger_namespaceObject = require("@midscene/shared/logger");
83
- const utils_namespaceObject = require("@midscene/shared/utils");
84
- const external_https_proxy_agent_namespaceObject = require("https-proxy-agent");
85
- const external_jsonrepair_namespaceObject = require("jsonrepair");
86
- const external_openai_namespaceObject = require("openai");
87
- var external_openai_default = /*#__PURE__*/ __webpack_require__.n(external_openai_namespaceObject);
88
- const external_socks_proxy_agent_namespaceObject = require("socks-proxy-agent");
89
- const constants_namespaceObject = require("@midscene/shared/constants");
90
- const extractor_namespaceObject = require("@midscene/shared/extractor");
91
- const img_namespaceObject = require("@midscene/shared/img");
92
- var common_AIActionType = /*#__PURE__*/ function(AIActionType) {
93
- AIActionType[AIActionType["ASSERT"] = 0] = "ASSERT";
94
- AIActionType[AIActionType["INSPECT_ELEMENT"] = 1] = "INSPECT_ELEMENT";
95
- AIActionType[AIActionType["EXTRACT_DATA"] = 2] = "EXTRACT_DATA";
96
- AIActionType[AIActionType["PLAN"] = 3] = "PLAN";
97
- AIActionType[AIActionType["DESCRIBE_ELEMENT"] = 4] = "DESCRIBE_ELEMENT";
98
- return AIActionType;
99
- }({});
100
- async function callAiFn(msgs, AIActionTypeValue) {
101
- const jsonObject = await callToGetJSONObject(msgs, AIActionTypeValue);
102
- return {
103
- content: jsonObject.content,
104
- usage: jsonObject.usage
105
- };
106
- }
107
- const defaultBboxSize = 20;
108
- const debugInspectUtils = (0, logger_namespaceObject.getDebug)('ai:common');
109
- function fillBboxParam(locate, width, height) {
110
- if (locate.bbox_2d && !(null == locate ? void 0 : locate.bbox)) {
111
- locate.bbox = locate.bbox_2d;
112
- delete locate.bbox_2d;
113
- }
114
- if (null == locate ? void 0 : locate.bbox) locate.bbox = adaptBbox(locate.bbox, width, height);
115
- return locate;
116
- }
117
- function adaptQwenBbox(bbox) {
118
- if (bbox.length < 2) {
119
- const msg = `invalid bbox data for qwen-vl mode: ${JSON.stringify(bbox)} `;
120
- throw new Error(msg);
121
- }
122
- const result = [
123
- Math.round(bbox[0]),
124
- Math.round(bbox[1]),
125
- 'number' == typeof bbox[2] ? Math.round(bbox[2]) : Math.round(bbox[0] + defaultBboxSize),
126
- 'number' == typeof bbox[3] ? Math.round(bbox[3]) : Math.round(bbox[1] + defaultBboxSize)
127
- ];
128
- return result;
129
- }
130
- function adaptDoubaoBbox(bbox, width, height) {
131
- (0, utils_namespaceObject.assert)(width > 0 && height > 0, 'width and height must be greater than 0 in doubao mode');
132
- if ('string' == typeof bbox) {
133
- (0, utils_namespaceObject.assert)(/^(\d+)\s(\d+)\s(\d+)\s(\d+)$/.test(bbox.trim()), `invalid bbox data string for doubao-vision mode: ${bbox}`);
134
- const splitted = bbox.split(' ');
135
- if (4 === splitted.length) return [
136
- Math.round(Number(splitted[0]) * width / 1000),
137
- Math.round(Number(splitted[1]) * height / 1000),
138
- Math.round(Number(splitted[2]) * width / 1000),
139
- Math.round(Number(splitted[3]) * height / 1000)
140
- ];
141
- throw new Error(`invalid bbox data string for doubao-vision mode: ${bbox}`);
142
- }
143
- if (Array.isArray(bbox) && Array.isArray(bbox[0])) bbox = bbox[0];
144
- let bboxList = [];
145
- if (Array.isArray(bbox) && 'string' == typeof bbox[0]) bbox.forEach((item)=>{
146
- if ('string' == typeof item && item.includes(',')) {
147
- const [x, y] = item.split(',');
148
- bboxList.push(Number(x.trim()), Number(y.trim()));
149
- } else if ('string' == typeof item && item.includes(' ')) {
150
- const [x, y] = item.split(' ');
151
- bboxList.push(Number(x.trim()), Number(y.trim()));
152
- } else bboxList.push(Number(item));
153
- });
154
- else bboxList = bbox;
155
- if (4 === bboxList.length || 5 === bboxList.length) return [
156
- Math.round(bboxList[0] * width / 1000),
157
- Math.round(bboxList[1] * height / 1000),
158
- Math.round(bboxList[2] * width / 1000),
159
- Math.round(bboxList[3] * height / 1000)
160
- ];
161
- if (6 === bboxList.length || 2 === bboxList.length || 3 === bboxList.length || 7 === bboxList.length) return [
162
- Math.max(0, Math.round(bboxList[0] * width / 1000) - defaultBboxSize / 2),
163
- Math.max(0, Math.round(bboxList[1] * height / 1000) - defaultBboxSize / 2),
164
- Math.min(width, Math.round(bboxList[0] * width / 1000) + defaultBboxSize / 2),
165
- Math.min(height, Math.round(bboxList[1] * height / 1000) + defaultBboxSize / 2)
166
- ];
167
- if (8 === bbox.length) return [
168
- Math.round(bboxList[0] * width / 1000),
169
- Math.round(bboxList[1] * height / 1000),
170
- Math.round(bboxList[4] * width / 1000),
171
- Math.round(bboxList[5] * height / 1000)
172
- ];
173
- const msg = `invalid bbox data for doubao-vision mode: ${JSON.stringify(bbox)} `;
174
- throw new Error(msg);
175
- }
176
- function adaptBbox(bbox, width, height) {
177
- if ('doubao-vision' === (0, env_namespaceObject.vlLocateMode)() || 'vlm-ui-tars' === (0, env_namespaceObject.vlLocateMode)()) return adaptDoubaoBbox(bbox, width, height);
178
- if ('gemini' === (0, env_namespaceObject.vlLocateMode)()) return adaptGeminiBbox(bbox, width, height);
179
- return adaptQwenBbox(bbox);
180
- }
181
- function adaptGeminiBbox(bbox, width, height) {
182
- const left = Math.round(bbox[1] * width / 1000);
183
- const top = Math.round(bbox[0] * height / 1000);
184
- const right = Math.round(bbox[3] * width / 1000);
185
- const bottom = Math.round(bbox[2] * height / 1000);
186
- return [
187
- left,
188
- top,
189
- right,
190
- bottom
191
- ];
192
- }
193
- function adaptBboxToRect(bbox, width, height, offsetX = 0, offsetY = 0) {
194
- debugInspectUtils('adaptBboxToRect', bbox, width, height, offsetX, offsetY);
195
- const [left, top, right, bottom] = adaptBbox(bbox, width, height);
196
- const rect = {
197
- left: left + offsetX,
198
- top: top + offsetY,
199
- width: right - left,
200
- height: bottom - top
201
- };
202
- debugInspectUtils('adaptBboxToRect, result=', rect);
203
- return rect;
204
- }
205
- let warned = false;
206
- function warnGPT4oSizeLimit(size) {
207
- var _getModelName;
208
- if (warned) return;
209
- if (null == (_getModelName = getModelName()) ? void 0 : _getModelName.toLowerCase().includes('gpt-4o')) {
210
- const warningMsg = `GPT-4o has a maximum image input size of 2000x768 or 768x2000, but got ${size.width}x${size.height}. Please set your page to a smaller resolution. Otherwise, the result may be inaccurate.`;
211
- if (Math.max(size.width, size.height) > 2000 || Math.min(size.width, size.height) > 768) {
212
- console.warn(warningMsg);
213
- warned = true;
214
- }
215
- } else if (size.width > 1800 || size.height > 1800) {
216
- console.warn(`The image size seems too large (${size.width}x${size.height}). It may lead to more token usage, slower response, and inaccurate result.`);
217
- warned = true;
218
- }
219
- }
220
- function mergeRects(rects) {
221
- const minLeft = Math.min(...rects.map((r)=>r.left));
222
- const minTop = Math.min(...rects.map((r)=>r.top));
223
- const maxRight = Math.max(...rects.map((r)=>r.left + r.width));
224
- const maxBottom = Math.max(...rects.map((r)=>r.top + r.height));
225
- return {
226
- left: minLeft,
227
- top: minTop,
228
- width: maxRight - minLeft,
229
- height: maxBottom - minTop
230
- };
231
- }
232
- function expandSearchArea(rect, screenSize) {
233
- const minEdgeSize = 'doubao-vision' === (0, env_namespaceObject.vlLocateMode)() ? 500 : 300;
234
- const defaultPadding = 160;
235
- const paddingSizeHorizontal = rect.width < minEdgeSize ? Math.ceil((minEdgeSize - rect.width) / 2) : defaultPadding;
236
- const paddingSizeVertical = rect.height < minEdgeSize ? Math.ceil((minEdgeSize - rect.height) / 2) : defaultPadding;
237
- rect.left = Math.max(0, rect.left - paddingSizeHorizontal);
238
- rect.width = Math.min(rect.width + 2 * paddingSizeHorizontal, screenSize.width - rect.left);
239
- rect.top = Math.max(0, rect.top - paddingSizeVertical);
240
- rect.height = Math.min(rect.height + 2 * paddingSizeVertical, screenSize.height - rect.top);
241
- return rect;
242
- }
243
- async function markupImageForLLM(screenshotBase64, tree, size) {
244
- const elementsInfo = (0, extractor_namespaceObject.treeToList)(tree);
245
- const elementsPositionInfoWithoutText = elementsInfo.filter((elementInfo)=>{
246
- if (elementInfo.attributes.nodeType === constants_namespaceObject.NodeType.TEXT) return false;
247
- return true;
248
- });
249
- const imagePayload = await (0, img_namespaceObject.compositeElementInfoImg)({
250
- inputImgBase64: screenshotBase64,
251
- elementsPositionInfo: elementsPositionInfoWithoutText,
252
- size
253
- });
254
- return imagePayload;
255
- }
256
- function buildYamlFlowFromPlans(plans, sleep) {
257
- const flow = [];
258
- for (const plan of plans){
259
- var _plan_locate;
260
- const type = plan.type;
261
- const locate = null == (_plan_locate = plan.locate) ? void 0 : _plan_locate.prompt;
262
- if ('Tap' === type) flow.push({
263
- aiTap: locate
264
- });
265
- else if ('Hover' === type) flow.push({
266
- aiHover: locate
267
- });
268
- else if ('Input' === type) {
269
- const param = plan.param;
270
- flow.push({
271
- aiInput: param.value,
272
- locate
273
- });
274
- } else if ('KeyboardPress' === type) {
275
- const param = plan.param;
276
- flow.push({
277
- aiKeyboardPress: param.value,
278
- locate
279
- });
280
- } else if ('Scroll' === type) {
281
- const param = plan.param;
282
- flow.push({
283
- aiScroll: null,
284
- locate,
285
- direction: param.direction,
286
- scrollType: param.scrollType,
287
- distance: param.distance
288
- });
289
- } else if ('Sleep' === type) {
290
- const param = plan.param;
291
- flow.push({
292
- sleep: param.timeMs
293
- });
294
- } else 'AndroidBackButton' === type || 'AndroidHomeButton' === type || 'AndroidRecentAppsButton' === type || 'AndroidLongPress' === type || 'AndroidPull' === type || 'Error' === type || 'Assert' === type || 'AssertWithoutThrow' === type || 'Finished' === type || console.warn(`Cannot convert action ${type} to yaml flow. This should be a bug of Midscene.`);
295
- }
296
- if (sleep) flow.push({
297
- sleep: sleep
298
- });
299
- return flow;
300
- }
301
- const defaultAssertionPrompt = 'You are a senior testing engineer. User will give an assertion and a screenshot of a page. By carefully viewing the screenshot, please tell whether the assertion is truthy.';
302
- const defaultAssertionResponseJsonFormat = `Return in the following JSON format:
303
- {
304
- pass: boolean, // whether the assertion is truthy
305
- thought: string | null, // string, if the result is falsy, give the reason why it is falsy. Otherwise, put null.
306
- }`;
307
- const getUiTarsAssertionResponseJsonFormat = ()=>`## Output Json String Format
308
- \`\`\`
309
- "{
310
- "pass": <<is a boolean value from the enum [true, false], true means the assertion is truthy>>,
311
- "thought": "<<is a string, give the reason why the assertion is falsy or truthy. Otherwise.>>"
312
- }"
313
- \`\`\`
314
-
315
- ## Rules **MUST** follow
316
- - Make sure to return **only** the JSON, with **no additional** text or explanations.
317
- - Use ${(0, env_namespaceObject.getPreferredLanguage)()} in \`thought\` part.
318
- - You **MUST** strictly follow up the **Output Json String Format**.`;
319
- function systemPromptToAssert(model) {
320
- return `${defaultAssertionPrompt}
321
-
322
- ${model.isUITars ? getUiTarsAssertionResponseJsonFormat() : defaultAssertionResponseJsonFormat}`;
323
- }
324
- const assertSchema = {
325
- type: 'json_schema',
326
- json_schema: {
327
- name: 'assert',
328
- strict: true,
329
- schema: {
330
- type: 'object',
331
- properties: {
332
- pass: {
333
- type: 'boolean',
334
- description: 'Whether the assertion passed or failed'
335
- },
336
- thought: {
337
- type: [
338
- 'string',
339
- 'null'
340
- ],
341
- description: 'The thought process behind the assertion'
342
- }
343
- },
344
- required: [
345
- 'pass',
346
- 'thought'
347
- ],
348
- additionalProperties: false
349
- }
350
- }
351
- };
352
- const prompts_namespaceObject = require("@langchain/core/prompts");
353
- function bboxDescription(vlMode) {
354
- if ('gemini' === vlMode) return '2d bounding box as [ymin, xmin, ymax, xmax]';
355
- return '2d bounding box as [xmin, ymin, xmax, ymax]';
356
- }
357
- function systemPromptToLocateElement(vlMode) {
358
- if (vlMode) {
359
- const bboxComment = bboxDescription(vlMode);
360
- return `
361
- ## Role:
362
- You are an expert in software testing.
363
-
364
- ## Objective:
365
- - Identify elements in screenshots and text that match the user's description.
366
- - Give the coordinates of the element that matches the user's description best in the screenshot.
367
- - Determine whether the user's description is order-sensitive (e.g., contains phrases like 'the third item in the list', 'the last button', etc.).
368
-
369
- ## Output Format:
370
- \`\`\`json
371
- {
372
- "bbox": [number, number, number, number], // ${bboxComment}
373
- "errors"?: string[],
374
- "isOrderSensitive": boolean // Whether the targetElementDescription is order-sensitive (true/false)
375
- }
376
- \`\`\`
377
-
378
- Fields:
379
- * \`bbox\` is the bounding box of the element that matches the user's description best in the screenshot
380
- * \`isOrderSensitive\` is a boolean indicating whether the user's description is order-sensitive (true/false)
381
- * \`errors\` is an optional array of error messages (if any)
382
-
383
- Order-sensitive means the description contains phrases like:
384
- - "the third item in the list"
385
- - "the last button"
386
- - "the first input box"
387
- - "the second row"
388
-
389
- Not order-sensitive means the description is like:
390
- - "confirm button"
391
- - "search box"
392
- - "password input"
393
-
394
- For example, when an element is found and the description is order-sensitive:
395
- \`\`\`json
396
- {
397
- "bbox": [100, 100, 200, 200],
398
- "isOrderSensitive": true,
399
- "errors": []
400
- }
401
- \`\`\`
402
-
403
- When no element is found and the description is not order-sensitive:
404
- \`\`\`json
405
- {
406
- "bbox": [],
407
- "isOrderSensitive": false,
408
- "errors": ["I can see ..., but {some element} is not found"]
409
- }
410
- \`\`\`
411
- `;
412
- }
413
- return `
414
- ## Role:
415
- You are an expert in software page image (2D) and page element text analysis.
416
-
417
- ## Objective:
418
- - Identify elements in screenshots and text that match the user's description.
419
- - Return JSON data containing the selection reason and element ID.
420
- - Determine whether the user's description is order-sensitive (e.g., contains phrases like 'the third item in the list', 'the last button', etc.).
421
-
422
- ## Skills:
423
- - Image analysis and recognition
424
- - Multilingual text understanding
425
- - Software UI design and testing
426
-
427
- ## Workflow:
428
- 1. Receive the user's element description, screenshot, and element description information. Note that the text may contain non-English characters (e.g., Chinese), indicating that the application may be non-English.
429
- 2. Based on the user's description, locate the target element ID in the list of element descriptions and the screenshot.
430
- 3. Found the required number of elements
431
- 4. Return JSON data containing the selection reason and element ID.
432
- 5. Judge whether the user's description is order-sensitive (see below for definition and examples).
433
-
434
- ## Constraints:
435
- - Strictly adhere to the specified location when describing the required element; do not select elements from other locations.
436
- - Elements in the image with NodeType other than "TEXT Node" have been highlighted to identify the element among multiple non-text elements.
437
- - Accurately identify element information based on the user's description and return the corresponding element ID from the element description information, not extracted from the image.
438
- - If no elements are found, the "elements" array should be empty.
439
- - The returned data must conform to the specified JSON format.
440
- - The returned value id information must use the id from element info (important: **use id not indexId, id is hash content**)
441
-
442
- ## Order-Sensitive Definition:
443
- - If the description contains phrases like "the third item in the list", "the last button", "the first input box", "the second row", etc., it is order-sensitive (isOrderSensitive = true).
444
- - If the description is like "confirm button", "search box", "password input", etc., it is not order-sensitive (isOrderSensitive = false).
445
-
446
- ## Output Format:
447
-
448
- Please return the result in JSON format as follows:
449
-
450
- \`\`\`json
451
- {
452
- "elements": [
453
- // If no matching elements are found, return an empty array []
454
- {
455
- "reason": "PLACEHOLDER", // The thought process for finding the element, replace PLACEHOLDER with your thought process
456
- "text": "PLACEHOLDER", // Replace PLACEHOLDER with the text of elementInfo, if none, leave empty
457
- "id": "PLACEHOLDER" // Replace PLACEHOLDER with the ID (important: **use id not indexId, id is hash content**) of elementInfo
458
- }
459
- // More elements...
460
- ],
461
- "isOrderSensitive": true, // or false, depending on the user's description
462
- "errors": [] // Array of strings containing any error messages
463
- }
464
- \`\`\`
465
-
466
- ## Example:
467
- Example 1:
468
- Input Example:
469
- \`\`\`json
470
- // Description: "Shopping cart icon in the upper right corner"
471
- {
472
- "description": "PLACEHOLDER", // Description of the target element
473
- "screenshot": "path/screenshot.png",
474
- "text": '{
475
- "pageSize": {
476
- "width": 400, // Width of the page
477
- "height": 905 // Height of the page
478
- },
479
- "elementInfos": [
480
- {
481
- "id": "1231", // ID of the element
482
- "indexId": "0", // Index of the element\u{FF0C}The image is labeled to the left of the element
483
- "attributes": { // Attributes of the element
484
- "nodeType": "IMG Node", // Type of element, types include: TEXT Node, IMG Node, BUTTON Node, INPUT Node
485
- "src": "https://ap-southeast-3.m",
486
- "class": ".img"
487
- },
488
- "content": "", // Text content of the element
489
- "rect": {
490
- "left": 280, // Distance from the left side of the page
491
- "top": 8, // Distance from the top of the page
492
- "width": 44, // Width of the element
493
- "height": 44 // Height of the element
494
- }
495
- },
496
- {
497
- "id": "66551", // ID of the element
498
- "indexId": "1", // Index of the element,The image is labeled to the left of the element
499
- "attributes": { // Attributes of the element
500
- "nodeType": "IMG Node", // Type of element, types include: TEXT Node, IMG Node, BUTTON Node, INPUT Node
501
- "src": "data:image/png;base64,iVBORw0KGgoAAAANSU...",
502
- "class": ".icon"
503
- },
504
- "content": "", // Text content of the element
505
- "rect": {
506
- "left": 350, // Distance from the left side of the page
507
- "top": 16, // Distance from the top of the page
508
- "width": 25, // Width of the element
509
- "height": 25 // Height of the element
510
- }
511
- },
512
- ...
513
- {
514
- "id": "12344",
515
- "indexId": "2", // Index of the element\u{FF0C}The image is labeled to the left of the element
516
- "attributes": {
517
- "nodeType": "TEXT Node",
518
- "class": ".product-name"
519
- },
520
- "center": [
521
- 288,
522
- 834
523
- ],
524
- "content": "Mango Drink",
525
- "rect": {
526
- "left": 188,
527
- "top": 827,
528
- "width": 199,
529
- "height": 13
530
- }
531
- },
532
- ...
533
- ]
534
- }
535
- '
536
- }
537
- \`\`\`
538
- Output Example:
539
- \`\`\`json
540
- {
541
- "elements": [
542
- {
543
- // Describe the reason for finding this element, replace with actual value in practice
544
- "reason": "Reason for finding element 4: It is located in the upper right corner, is an image type, and according to the screenshot, it is a shopping cart icon button",
545
- "text": "",
546
- // ID(**use id not indexId**) of this element, replace with actual value in practice, **use id not indexId**
547
- "id": "1231"
548
- }
549
- ],
550
- "isOrderSensitive": true,
551
- "errors": []
552
- }
553
- \`\`\`
554
-
555
- `;
556
- }
557
- const locatorSchema = {
558
- type: 'json_schema',
559
- json_schema: {
560
- name: 'find_elements',
561
- strict: true,
562
- schema: {
563
- type: 'object',
564
- properties: {
565
- elements: {
566
- type: 'array',
567
- items: {
568
- type: 'object',
569
- properties: {
570
- reason: {
571
- type: 'string',
572
- description: 'Reason for finding this element'
573
- },
574
- text: {
575
- type: 'string',
576
- description: 'Text content of the element'
577
- },
578
- id: {
579
- type: 'string',
580
- description: 'ID of this element'
581
- }
582
- },
583
- required: [
584
- 'reason',
585
- 'text',
586
- 'id'
587
- ],
588
- additionalProperties: false
589
- },
590
- description: 'List of found elements'
591
- },
592
- isOrderSensitive: {
593
- type: 'boolean',
594
- description: "Whether the targetElementDescription is order-sensitive (true/false)"
595
- },
596
- errors: {
597
- type: 'array',
598
- items: {
599
- type: 'string'
600
- },
601
- description: 'List of error messages, if any'
602
- }
603
- },
604
- required: [
605
- 'elements',
606
- 'isOrderSensitive',
607
- 'errors'
608
- ],
609
- additionalProperties: false
610
- }
611
- }
612
- };
613
- const findElementPrompt = new prompts_namespaceObject.PromptTemplate({
614
- template: `
615
- Here is the item user want to find:
616
- =====================================
617
- {targetElementDescription}
618
- =====================================
619
-
620
- {pageDescription}
621
- `,
622
- inputVariables: [
623
- "pageDescription",
624
- "targetElementDescription"
625
- ]
626
- });
627
- const external_node_assert_namespaceObject = require("node:assert");
628
- var external_node_assert_default = /*#__PURE__*/ __webpack_require__.n(external_node_assert_namespaceObject);
629
- const vlCoTLog = '"what_the_user_wants_to_do_next_by_instruction": string, // What the user wants to do according to the instruction and previous logs. ';
630
- const vlCurrentLog = '"log": string, // Log what the next one action (ONLY ONE!) you can do according to the screenshot and the instruction. The typical log looks like "Now i want to use action \'{ action-type }\' to do .. first". If no action should be done, log the reason. ". Use the same language as the user\'s instruction.';
631
- const llmCurrentLog = '"log": string, // Log what the next actions you can do according to the screenshot and the instruction. The typical log looks like "Now i want to use action \'{ action-type }\' to do ..". If no action should be done, log the reason. ". Use the same language as the user\'s instruction.';
632
- const commonOutputFields = `"error"?: string, // Error messages about unexpected situations, if any. Only think it is an error when the situation is not foreseeable according to the instruction. Use the same language as the user's instruction.
633
- "more_actions_needed_by_instruction": boolean, // Consider if there is still more action(s) to do after the action in "Log" is done, according to the instruction. If so, set this field to true. Otherwise, set it to false.`;
634
- const vlLocateParam = (required)=>`locate${required ? '' : '?'}: {bbox: [number, number, number, number], prompt: string }`;
635
- const llmLocateParam = (required)=>`locate${required ? '' : '?'}: {"id": string, "prompt": string}`;
636
- const descriptionForAction = (action, locatorScheme)=>{
637
- const tab = ' ';
638
- let locateParam = '';
639
- if ('required' === action.location) locateParam = locatorScheme;
640
- else if ('optional' === action.location) locateParam = `${locatorScheme} | null`;
641
- else if (false === action.location) locateParam = '';
642
- const locatorParam = locateParam ? `- ${locateParam}` : '';
643
- if (action.whatToLocate) if (locateParam) locateParam += ` // ${action.whatToLocate}`;
644
- else console.warn(`whatToLocate is provided for action ${action.name}, but location is not required or optional. The whatToLocate will be ignored.`);
645
- let paramSchema = '';
646
- if (action.paramSchema) paramSchema = `- param: ${action.paramSchema}`;
647
- if (action.paramDescription) {
648
- external_node_assert_default()(paramSchema, `paramSchema is required when paramDescription is provided for action ${action.name}, but got ${action.paramSchema}`);
649
- paramSchema += ` // ${action.paramDescription}`;
650
- }
651
- const fields = [
652
- paramSchema,
653
- locatorParam
654
- ].filter(Boolean);
655
- return `- ${action.name}, ${action.description}
656
- ${tab}- type: "${action.name}"
657
- ${tab}${fields.join(`\n${tab}`)}
658
- `.trim();
659
- };
660
- const systemTemplateOfVLPlanning = ({ actionSpace, vlMode })=>{
661
- const actionNameList = actionSpace.map((action)=>action.name).join(', ');
662
- const actionDescriptionList = actionSpace.map((action)=>descriptionForAction(action, vlLocateParam('required' === action.location)));
663
- const actionList = actionDescriptionList.join('\n');
664
- return `
665
- Target: User will give you a screenshot, an instruction and some previous logs indicating what have been done. Please tell what the next one action is (or null if no action should be done) to do the tasks the instruction requires.
666
-
667
- Restriction:
668
- - Don't give extra actions or plans beyond the instruction. ONLY plan for what the instruction requires. For example, don't try to submit the form if the instruction is only to fill something.
669
- - Always give ONLY ONE action in \`log\` field (or null if no action should be done), instead of multiple actions. Supported actions are ${actionNameList}.
670
- - Don't repeat actions in the previous logs.
671
- - Bbox is the bounding box of the element to be located. It's an array of 4 numbers, representing ${bboxDescription(vlMode)}.
672
-
673
- Supporting actions:
674
- ${actionList}
675
-
676
- Field description:
677
- * The \`prompt\` field inside the \`locate\` field is a short description that could be used to locate the element.
678
-
679
- Return in JSON format:
680
- {
681
- ${vlCoTLog}
682
- ${vlCurrentLog}
683
- ${commonOutputFields}
684
- "action":
685
- {
686
- // one of the supporting actions
687
- } | null,
688
- ,
689
- "sleep"?: number, // The sleep time after the action, in milliseconds.
690
- }
691
-
692
- For example, when the instruction is "click 'Confirm' button, and click 'Yes' in popup" and the log is "I will use action Tap to click 'Confirm' button", by viewing the screenshot and previous logs, you should consider: We have already clicked the 'Confirm' button, so next we should find and click 'Yes' in popup.
693
-
694
- this and output the JSON:
695
-
696
- {
697
- "what_the_user_wants_to_do_next_by_instruction": "We have already clicked the 'Confirm' button, so next we should find and click 'Yes' in popup",
698
- "log": "I will use action Tap to click 'Yes' in popup",
699
- "more_actions_needed_by_instruction": false,
700
- "action": {
701
- "type": "Tap",
702
- "locate": {
703
- "bbox": [100, 100, 200, 200],
704
- "prompt": "The 'Yes' button in popup"
705
- }
706
- }
707
- }
708
- `;
709
- };
710
- const systemTemplateOfLLM = ({ actionSpace })=>{
711
- const actionNameList = actionSpace.map((action)=>action.name).join(' / ');
712
- const actionDescriptionList = actionSpace.map((action)=>descriptionForAction(action, llmLocateParam('required' === action.location)));
713
- const actionList = actionDescriptionList.join('\n');
714
- return `
715
- ## Role
716
-
717
- You are a versatile professional in software UI automation. Your outstanding contributions will impact the user experience of billions of users.
718
-
719
- ## Objective
720
-
721
- - Decompose the instruction user asked into a series of actions
722
- - Locate the target element if possible
723
- - If the instruction cannot be accomplished, give a further plan.
724
-
725
- ## Workflow
726
-
727
- 1. Receive the screenshot, element description of screenshot(if any), user's instruction and previous logs.
728
- 2. Decompose the user's task into a sequence of feasible actions, and place it in the \`actions\` field. There are different types of actions (${actionNameList}). The "About the action" section below will give you more details.
729
- 3. Consider whether the user's instruction will be accomplished after the actions you composed.
730
- - If the instruction is accomplished, set \`more_actions_needed_by_instruction\` to false.
731
- - If more actions are needed, set \`more_actions_needed_by_instruction\` to true. Get ready to hand over to the next talent people like you. Carefully log what have been done in the \`log\` field, he or she will continue the task according to your logs.
732
- 4. If the task is not feasible on this page, set \`error\` field to the reason.
733
-
734
- ## Constraints
735
-
736
- - All the actions you composed MUST be feasible, which means all the action fields can be filled with the page context information you get. If not, don't plan this action.
737
- - Trust the "What have been done" field about the task (if any), don't repeat actions in it.
738
- - Respond only with valid JSON. Do not write an introduction or summary or markdown prefix like \`\`\`json\`\`\`.
739
- - If the screenshot and the instruction are totally irrelevant, set reason in the \`error\` field.
740
-
741
- ## About the \`actions\` field
742
-
743
- The \`locate\` param is commonly used in the \`param\` field of the action, means to locate the target element to perform the action, it conforms to the following scheme:
744
-
745
- type LocateParam = {
746
- "id": string, // the id of the element found. It should either be the id marked with a rectangle in the screenshot or the id described in the description.
747
- "prompt"?: string // the description of the element to find. It can only be omitted when locate is null.
748
- } | null // If it's not on the page, the LocateParam should be null
749
-
750
- ## Supported actions
751
-
752
- Each action has a \`type\` and corresponding \`param\`. To be detailed:
753
- ${actionList}
754
-
755
- `.trim();
756
- };
757
- const outputTemplate = `
758
- ## Output JSON Format:
759
-
760
- The JSON format is as follows:
761
-
762
- {
763
- "actions": [
764
- // ... some actions
765
- ],
766
- ${llmCurrentLog}
767
- ${commonOutputFields}
768
- }
769
-
770
- ## Examples
771
-
772
- ### Example: Decompose a task
773
-
774
- When you received the following information:
775
-
776
- * Instruction: 'Click the language switch button, wait 1s, click "English"'
777
- * Logs: null
778
- * Page Context (screenshot and description) shows: There is a language switch button, and the "English" option is not shown in the screenshot now.
779
-
780
- By viewing the page screenshot and description, you should consider this and output the JSON:
781
-
782
- * The user intent is: tap the switch button, sleep, and tap the 'English' option
783
- * The language switch button is shown in the screenshot, and can be located by the page description or the id marked with a rectangle. So we can plan a Tap action to do this.
784
- * Plan a Sleep action to wait for 1 second to ensure the language options are displayed.
785
- * The "English" option button is not shown in the screenshot now, it means it may only show after the previous actions are finished. So don't plan any action to do this.
786
- * Log what these action do: Click the language switch button to open the language options. Wait for 1 second.
787
- * The task cannot be accomplished (because the last tapping action is not finished yet), so the \`more_actions_needed_by_instruction\` field is true. The \`error\` field is null.
788
-
789
- {
790
- "actions":[
791
- {
792
- "thought": "Click the language switch button to open the language options.",
793
- "type": "Tap",
794
- "param": null,
795
- "locate": { id: "c81c4e9a33", prompt: "The language switch button" }},
796
- },
797
- {
798
- "thought": "Wait for 1 second to ensure the language options are displayed.",
799
- "type": "Sleep",
800
- "param": { "timeMs": 1000 },
801
- }
802
- ],
803
- "error": null,
804
- "more_actions_needed_by_instruction": true,
805
- "log": "Click the language switch button to open the language options. Wait for 1 second",
806
- }
807
-
808
- ### Example: What NOT to do
809
- Wrong output:
810
- {
811
- "actions":[
812
- {
813
- "thought": "Click the language switch button to open the language options.",
814
- "type": "Tap",
815
- "param": null,
816
- "locate": {
817
- { "id": "c81c4e9a33" }, // WRONG: prompt is missing, this is not a valid LocateParam
818
- }
819
- },
820
- {
821
- "thought": "Click the English option",
822
- "type": "Tap",
823
- "param": null,
824
- "locate": null, // This means the 'English' option is not shown in the screenshot, the task cannot be accomplished
825
- }
826
- ],
827
- "more_actions_needed_by_instruction": false, // WRONG: should be true
828
- "log": "Click the language switch button to open the language options",
829
- }
830
- `;
831
- async function systemPromptToTaskPlanning({ actionSpace, vlMode }) {
832
- if (vlMode) return systemTemplateOfVLPlanning({
833
- actionSpace,
834
- vlMode
835
- });
836
- return `${systemTemplateOfLLM({
837
- actionSpace
838
- })}\n\n${outputTemplate}`;
839
- }
840
- const planSchema = {
841
- type: 'json_schema',
842
- json_schema: {
843
- name: 'action_items',
844
- strict: false,
845
- schema: {
846
- type: 'object',
847
- strict: false,
848
- properties: {
849
- actions: {
850
- type: 'array',
851
- items: {
852
- type: 'object',
853
- strict: false,
854
- properties: {
855
- thought: {
856
- type: 'string',
857
- description: 'Reasons for generating this task, and why this task is feasible on this page'
858
- },
859
- type: {
860
- type: 'string',
861
- description: 'Type of action'
862
- },
863
- param: {
864
- anyOf: [
865
- {
866
- type: 'null'
867
- },
868
- {
869
- type: 'object',
870
- additionalProperties: true
871
- }
872
- ],
873
- description: 'Parameter of the action'
874
- },
875
- locate: {
876
- type: [
877
- 'object',
878
- 'null'
879
- ],
880
- properties: {
881
- id: {
882
- type: 'string'
883
- },
884
- prompt: {
885
- type: 'string'
886
- }
887
- },
888
- required: [
889
- 'id',
890
- 'prompt'
891
- ],
892
- additionalProperties: false,
893
- description: 'Location information for the target element'
894
- }
895
- },
896
- required: [
897
- 'thought',
898
- 'type',
899
- 'param',
900
- 'locate'
901
- ],
902
- additionalProperties: false
903
- },
904
- description: 'List of actions to be performed'
905
- },
906
- more_actions_needed_by_instruction: {
907
- type: 'boolean',
908
- description: 'If all the actions described in the instruction have been covered by this action and logs, set this field to false.'
909
- },
910
- log: {
911
- type: 'string',
912
- description: 'Log what these planned actions do. Do not include further actions that have not been planned.'
913
- },
914
- error: {
915
- type: [
916
- 'string',
917
- 'null'
918
- ],
919
- description: 'Error messages about unexpected situations'
920
- }
921
- },
922
- required: [
923
- 'actions',
924
- 'more_actions_needed_by_instruction',
925
- 'log',
926
- 'error'
927
- ],
928
- additionalProperties: false
929
- }
930
- }
931
- };
932
- const generateTaskBackgroundContext = (userInstruction, log, userActionContext)=>{
933
- if (log) return `
934
- Here is the user's instruction:
935
-
936
- <instruction>
937
- <high_priority_knowledge>
938
- ${userActionContext}
939
- </high_priority_knowledge>
940
-
941
- ${userInstruction}
942
- </instruction>
943
-
944
- These are the logs from previous executions, which indicate what was done in the previous actions.
945
- Do NOT repeat these actions.
946
- <previous_logs>
947
- ${log}
948
- </previous_logs>
949
- `;
950
- return `
951
- Here is the user's instruction:
952
- <instruction>
953
- <high_priority_knowledge>
954
- ${userActionContext}
955
- </high_priority_knowledge>
956
-
957
- ${userInstruction}
958
- </instruction>
959
- `;
960
- };
961
- const automationUserPrompt = (vlMode)=>{
962
- if (vlMode) return new prompts_namespaceObject.PromptTemplate({
963
- template: '{taskBackgroundContext}',
964
- inputVariables: [
965
- 'taskBackgroundContext'
966
- ]
967
- });
968
- return new prompts_namespaceObject.PromptTemplate({
969
- template: `
970
- pageDescription:
971
- =====================================
972
- {pageDescription}
973
- =====================================
974
-
975
- {taskBackgroundContext}`,
976
- inputVariables: [
977
- "pageDescription",
978
- 'taskBackgroundContext'
979
- ]
980
- });
981
- };
982
- function checkAIConfig() {
983
- const openaiKey = (0, env_namespaceObject.getAIConfig)(env_namespaceObject.OPENAI_API_KEY);
984
- const azureConfig = (0, env_namespaceObject.getAIConfig)(env_namespaceObject.MIDSCENE_USE_AZURE_OPENAI);
985
- const anthropicKey = (0, env_namespaceObject.getAIConfig)(env_namespaceObject.ANTHROPIC_API_KEY);
986
- const initConfigJson = (0, env_namespaceObject.getAIConfig)(env_namespaceObject.MIDSCENE_OPENAI_INIT_CONFIG_JSON);
987
- if (openaiKey) return true;
988
- if (azureConfig) return true;
989
- if (anthropicKey) return true;
990
- return Boolean(initConfigJson);
991
- }
992
- let debugConfigInitialized = false;
993
- function initDebugConfig() {
994
- if (debugConfigInitialized) return;
995
- const shouldPrintTiming = (0, env_namespaceObject.getAIConfigInBoolean)(env_namespaceObject.MIDSCENE_DEBUG_AI_PROFILE);
996
- let debugConfig = '';
997
- if (shouldPrintTiming) {
998
- console.warn('MIDSCENE_DEBUG_AI_PROFILE is deprecated, use DEBUG=midscene:ai:profile instead');
999
- debugConfig = 'ai:profile';
1000
- }
1001
- const shouldPrintAIResponse = (0, env_namespaceObject.getAIConfigInBoolean)(env_namespaceObject.MIDSCENE_DEBUG_AI_RESPONSE);
1002
- if (shouldPrintAIResponse) {
1003
- console.warn('MIDSCENE_DEBUG_AI_RESPONSE is deprecated, use DEBUG=midscene:ai:response instead');
1004
- debugConfig = debugConfig ? 'ai:*' : 'ai:call';
1005
- }
1006
- if (debugConfig) (0, logger_namespaceObject.enableDebug)(debugConfig);
1007
- debugConfigInitialized = true;
1008
- }
1009
- const defaultModel = 'gpt-4o';
1010
- function getModelName() {
1011
- let modelName = defaultModel;
1012
- const nameInConfig = (0, env_namespaceObject.getAIConfig)(env_namespaceObject.MIDSCENE_MODEL_NAME);
1013
- if (nameInConfig) modelName = nameInConfig;
1014
- return modelName;
1015
- }
1016
- async function createChatClient({ AIActionTypeValue }) {
1017
- initDebugConfig();
1018
- let openai;
1019
- const extraConfig = (0, env_namespaceObject.getAIConfigInJson)(env_namespaceObject.MIDSCENE_OPENAI_INIT_CONFIG_JSON);
1020
- const socksProxy = (0, env_namespaceObject.getAIConfig)(env_namespaceObject.MIDSCENE_OPENAI_SOCKS_PROXY);
1021
- const httpProxy = (0, env_namespaceObject.getAIConfig)(env_namespaceObject.MIDSCENE_OPENAI_HTTP_PROXY);
1022
- let proxyAgent;
1023
- const debugProxy = (0, logger_namespaceObject.getDebug)('ai:call:proxy');
1024
- if (httpProxy) {
1025
- debugProxy('using http proxy', httpProxy);
1026
- proxyAgent = new external_https_proxy_agent_namespaceObject.HttpsProxyAgent(httpProxy);
1027
- } else if (socksProxy) {
1028
- debugProxy('using socks proxy', socksProxy);
1029
- proxyAgent = new external_socks_proxy_agent_namespaceObject.SocksProxyAgent(socksProxy);
1030
- }
1031
- if ((0, env_namespaceObject.getAIConfig)(env_namespaceObject.OPENAI_USE_AZURE)) openai = new external_openai_namespaceObject.AzureOpenAI({
1032
- baseURL: (0, env_namespaceObject.getAIConfig)(env_namespaceObject.OPENAI_BASE_URL),
1033
- apiKey: (0, env_namespaceObject.getAIConfig)(env_namespaceObject.OPENAI_API_KEY),
1034
- httpAgent: proxyAgent,
1035
- ...extraConfig,
1036
- dangerouslyAllowBrowser: true
1037
- });
1038
- else if ((0, env_namespaceObject.getAIConfig)(env_namespaceObject.MIDSCENE_USE_AZURE_OPENAI)) {
1039
- const extraAzureConfig = (0, env_namespaceObject.getAIConfigInJson)(env_namespaceObject.MIDSCENE_AZURE_OPENAI_INIT_CONFIG_JSON);
1040
- const scope = (0, env_namespaceObject.getAIConfig)(env_namespaceObject.MIDSCENE_AZURE_OPENAI_SCOPE);
1041
- let tokenProvider;
1042
- if (scope) {
1043
- (0, utils_namespaceObject.assert)(!utils_namespaceObject.ifInBrowser, 'Azure OpenAI is not supported in browser with Midscene.');
1044
- const credential = new identity_namespaceObject.DefaultAzureCredential();
1045
- (0, utils_namespaceObject.assert)(scope, 'MIDSCENE_AZURE_OPENAI_SCOPE is required');
1046
- tokenProvider = (0, identity_namespaceObject.getBearerTokenProvider)(credential, scope);
1047
- openai = new external_openai_namespaceObject.AzureOpenAI({
1048
- azureADTokenProvider: tokenProvider,
1049
- endpoint: (0, env_namespaceObject.getAIConfig)(env_namespaceObject.AZURE_OPENAI_ENDPOINT),
1050
- apiVersion: (0, env_namespaceObject.getAIConfig)(env_namespaceObject.AZURE_OPENAI_API_VERSION),
1051
- deployment: (0, env_namespaceObject.getAIConfig)(env_namespaceObject.AZURE_OPENAI_DEPLOYMENT),
1052
- ...extraConfig,
1053
- ...extraAzureConfig
1054
- });
1055
- } else openai = new external_openai_namespaceObject.AzureOpenAI({
1056
- apiKey: (0, env_namespaceObject.getAIConfig)(env_namespaceObject.AZURE_OPENAI_KEY),
1057
- endpoint: (0, env_namespaceObject.getAIConfig)(env_namespaceObject.AZURE_OPENAI_ENDPOINT),
1058
- apiVersion: (0, env_namespaceObject.getAIConfig)(env_namespaceObject.AZURE_OPENAI_API_VERSION),
1059
- deployment: (0, env_namespaceObject.getAIConfig)(env_namespaceObject.AZURE_OPENAI_DEPLOYMENT),
1060
- dangerouslyAllowBrowser: true,
1061
- ...extraConfig,
1062
- ...extraAzureConfig
1063
- });
1064
- } else if (!(0, env_namespaceObject.getAIConfig)(env_namespaceObject.MIDSCENE_USE_ANTHROPIC_SDK)) {
1065
- const baseURL = (0, env_namespaceObject.getAIConfig)(env_namespaceObject.OPENAI_BASE_URL);
1066
- if ('string' == typeof baseURL) {
1067
- if (!/^https?:\/\//.test(baseURL)) throw new Error(`OPENAI_BASE_URL must be a valid URL starting with http:// or https://, but got: ${baseURL}\nPlease check your config.`);
1068
- }
1069
- openai = new (external_openai_default())({
1070
- baseURL: (0, env_namespaceObject.getAIConfig)(env_namespaceObject.OPENAI_BASE_URL),
1071
- apiKey: (0, env_namespaceObject.getAIConfig)(env_namespaceObject.OPENAI_API_KEY),
1072
- httpAgent: proxyAgent,
1073
- ...extraConfig,
1074
- defaultHeaders: {
1075
- ...(null == extraConfig ? void 0 : extraConfig.defaultHeaders) || {},
1076
- [env_namespaceObject.MIDSCENE_API_TYPE]: AIActionTypeValue.toString()
1077
- },
1078
- dangerouslyAllowBrowser: true
1079
- });
1080
- }
1081
- if (openai && (0, env_namespaceObject.getAIConfigInBoolean)(env_namespaceObject.MIDSCENE_LANGSMITH_DEBUG)) {
1082
- if (utils_namespaceObject.ifInBrowser) throw new Error('langsmith is not supported in browser');
1083
- console.log('DEBUGGING MODE: langsmith wrapper enabled');
1084
- const { wrapOpenAI } = await Promise.resolve().then(__webpack_require__.bind(__webpack_require__, "langsmith/wrappers"));
1085
- openai = wrapOpenAI(openai);
1086
- }
1087
- if (void 0 !== openai) return {
1088
- completion: openai.chat.completions,
1089
- style: 'openai'
1090
- };
1091
- if ((0, env_namespaceObject.getAIConfig)(env_namespaceObject.MIDSCENE_USE_ANTHROPIC_SDK)) {
1092
- const apiKey = (0, env_namespaceObject.getAIConfig)(env_namespaceObject.ANTHROPIC_API_KEY);
1093
- (0, utils_namespaceObject.assert)(apiKey, 'ANTHROPIC_API_KEY is required');
1094
- openai = new sdk_namespaceObject.Anthropic({
1095
- apiKey,
1096
- httpAgent: proxyAgent,
1097
- dangerouslyAllowBrowser: true
1098
- });
1099
- }
1100
- if (void 0 !== openai && openai.messages) return {
1101
- completion: openai.messages,
1102
- style: 'anthropic'
1103
- };
1104
- throw new Error('Openai SDK or Anthropic SDK is not initialized');
1105
- }
1106
- async function service_caller_call(messages, AIActionTypeValue, responseFormat, options) {
1107
- (0, utils_namespaceObject.assert)(checkAIConfig(), 'Cannot find config for AI model service. If you are using a self-hosted model without validating the API key, please set `OPENAI_API_KEY` to any non-null value. https://midscenejs.com/model-provider.html');
1108
- const { completion, style } = await createChatClient({
1109
- AIActionTypeValue
1110
- });
1111
- const maxTokens = (0, env_namespaceObject.getAIConfig)(env_namespaceObject.OPENAI_MAX_TOKENS);
1112
- const debugCall = (0, logger_namespaceObject.getDebug)('ai:call');
1113
- const debugProfileStats = (0, logger_namespaceObject.getDebug)('ai:profile:stats');
1114
- const debugProfileDetail = (0, logger_namespaceObject.getDebug)('ai:profile:detail');
1115
- const startTime = Date.now();
1116
- const model = getModelName();
1117
- const isStreaming = (null == options ? void 0 : options.stream) && (null == options ? void 0 : options.onChunk);
1118
- let content;
1119
- let accumulated = '';
1120
- let usage;
1121
- let timeCost;
1122
- const commonConfig = {
1123
- temperature: 'vlm-ui-tars' === (0, env_namespaceObject.vlLocateMode)() ? 0.0 : 0.1,
1124
- stream: !!isStreaming,
1125
- max_tokens: 'number' == typeof maxTokens ? maxTokens : Number.parseInt(maxTokens || '2048', 10),
1126
- ...'qwen-vl' === (0, env_namespaceObject.vlLocateMode)() ? {
1127
- vl_high_resolution_images: true
1128
- } : {}
1129
- };
1130
- try {
1131
- if ('openai' === style) {
1132
- debugCall(`sending ${isStreaming ? 'streaming ' : ''}request to ${model}`);
1133
- if (isStreaming) {
1134
- const stream = await completion.create({
1135
- model,
1136
- messages,
1137
- response_format: responseFormat,
1138
- ...commonConfig
1139
- }, {
1140
- stream: true
1141
- });
1142
- for await (const chunk of stream){
1143
- var _chunk_choices__delta, _chunk_choices_, _chunk_choices, _chunk_choices__delta1, _chunk_choices_1, _chunk_choices1, _chunk_choices_2, _chunk_choices2;
1144
- const content = (null == (_chunk_choices = chunk.choices) ? void 0 : null == (_chunk_choices_ = _chunk_choices[0]) ? void 0 : null == (_chunk_choices__delta = _chunk_choices_.delta) ? void 0 : _chunk_choices__delta.content) || '';
1145
- const reasoning_content = (null == (_chunk_choices1 = chunk.choices) ? void 0 : null == (_chunk_choices_1 = _chunk_choices1[0]) ? void 0 : null == (_chunk_choices__delta1 = _chunk_choices_1.delta) ? void 0 : _chunk_choices__delta1.reasoning_content) || '';
1146
- if (chunk.usage) usage = chunk.usage;
1147
- if (content || reasoning_content) {
1148
- accumulated += content;
1149
- const chunkData = {
1150
- content,
1151
- reasoning_content,
1152
- accumulated,
1153
- isComplete: false,
1154
- usage: void 0
1155
- };
1156
- options.onChunk(chunkData);
1157
- }
1158
- if (null == (_chunk_choices2 = chunk.choices) ? void 0 : null == (_chunk_choices_2 = _chunk_choices2[0]) ? void 0 : _chunk_choices_2.finish_reason) {
1159
- timeCost = Date.now() - startTime;
1160
- if (!usage) {
1161
- const estimatedTokens = Math.max(1, Math.floor(accumulated.length / 4));
1162
- usage = {
1163
- prompt_tokens: estimatedTokens,
1164
- completion_tokens: estimatedTokens,
1165
- total_tokens: 2 * estimatedTokens
1166
- };
1167
- }
1168
- const finalChunk = {
1169
- content: '',
1170
- accumulated,
1171
- reasoning_content: '',
1172
- isComplete: true,
1173
- usage: {
1174
- prompt_tokens: usage.prompt_tokens ?? 0,
1175
- completion_tokens: usage.completion_tokens ?? 0,
1176
- total_tokens: usage.total_tokens ?? 0,
1177
- time_cost: timeCost ?? 0
1178
- }
1179
- };
1180
- options.onChunk(finalChunk);
1181
- break;
1182
- }
1183
- }
1184
- content = accumulated;
1185
- debugProfileStats(`streaming model, ${model}, mode, ${(0, env_namespaceObject.vlLocateMode)() || 'default'}, cost-ms, ${timeCost}`);
1186
- } else {
1187
- var _result_usage, _result_usage1, _result_usage2;
1188
- const result = await completion.create({
1189
- model,
1190
- messages,
1191
- response_format: responseFormat,
1192
- ...commonConfig
1193
- });
1194
- timeCost = Date.now() - startTime;
1195
- debugProfileStats(`model, ${model}, mode, ${(0, env_namespaceObject.vlLocateMode)() || 'default'}, ui-tars-version, ${(0, env_namespaceObject.uiTarsModelVersion)()}, prompt-tokens, ${(null == (_result_usage = result.usage) ? void 0 : _result_usage.prompt_tokens) || ''}, completion-tokens, ${(null == (_result_usage1 = result.usage) ? void 0 : _result_usage1.completion_tokens) || ''}, total-tokens, ${(null == (_result_usage2 = result.usage) ? void 0 : _result_usage2.total_tokens) || ''}, cost-ms, ${timeCost}, requestId, ${result._request_id || ''}`);
1196
- debugProfileDetail(`model usage detail: ${JSON.stringify(result.usage)}`);
1197
- (0, utils_namespaceObject.assert)(result.choices, `invalid response from LLM service: ${JSON.stringify(result)}`);
1198
- content = result.choices[0].message.content;
1199
- usage = result.usage;
1200
- }
1201
- debugCall(`response: ${content}`);
1202
- (0, utils_namespaceObject.assert)(content, 'empty content');
1203
- } else if ('anthropic' === style) {
1204
- const convertImageContent = (content)=>{
1205
- if ('image_url' === content.type) {
1206
- const imgBase64 = content.image_url.url;
1207
- (0, utils_namespaceObject.assert)(imgBase64, 'image_url is required');
1208
- return {
1209
- source: {
1210
- type: 'base64',
1211
- media_type: imgBase64.includes('data:image/png;base64,') ? 'image/png' : 'image/jpeg',
1212
- data: imgBase64.split(',')[1]
1213
- },
1214
- type: 'image'
1215
- };
1216
- }
1217
- return content;
1218
- };
1219
- if (isStreaming) {
1220
- const stream = await completion.create({
1221
- model,
1222
- system: 'You are a versatile professional in software UI automation',
1223
- messages: messages.map((m)=>({
1224
- role: 'user',
1225
- content: Array.isArray(m.content) ? m.content.map(convertImageContent) : m.content
1226
- })),
1227
- response_format: responseFormat,
1228
- ...commonConfig
1229
- });
1230
- for await (const chunk of stream){
1231
- var _chunk_delta;
1232
- const content = (null == (_chunk_delta = chunk.delta) ? void 0 : _chunk_delta.text) || '';
1233
- if (content) {
1234
- accumulated += content;
1235
- const chunkData = {
1236
- content,
1237
- accumulated,
1238
- reasoning_content: '',
1239
- isComplete: false,
1240
- usage: void 0
1241
- };
1242
- options.onChunk(chunkData);
1243
- }
1244
- if ('message_stop' === chunk.type) {
1245
- timeCost = Date.now() - startTime;
1246
- const anthropicUsage = chunk.usage;
1247
- const finalChunk = {
1248
- content: '',
1249
- accumulated,
1250
- reasoning_content: '',
1251
- isComplete: true,
1252
- usage: anthropicUsage ? {
1253
- prompt_tokens: anthropicUsage.input_tokens ?? 0,
1254
- completion_tokens: anthropicUsage.output_tokens ?? 0,
1255
- total_tokens: (anthropicUsage.input_tokens ?? 0) + (anthropicUsage.output_tokens ?? 0),
1256
- time_cost: timeCost ?? 0
1257
- } : void 0
1258
- };
1259
- options.onChunk(finalChunk);
1260
- break;
1261
- }
1262
- }
1263
- content = accumulated;
1264
- } else {
1265
- const result = await completion.create({
1266
- model,
1267
- system: 'You are a versatile professional in software UI automation',
1268
- messages: messages.map((m)=>({
1269
- role: 'user',
1270
- content: Array.isArray(m.content) ? m.content.map(convertImageContent) : m.content
1271
- })),
1272
- response_format: responseFormat,
1273
- ...commonConfig
1274
- });
1275
- timeCost = Date.now() - startTime;
1276
- content = result.content[0].text;
1277
- usage = result.usage;
1278
- }
1279
- (0, utils_namespaceObject.assert)(content, 'empty content');
1280
- }
1281
- if (isStreaming && !usage) {
1282
- const estimatedTokens = Math.max(1, Math.floor((content || '').length / 4));
1283
- usage = {
1284
- prompt_tokens: estimatedTokens,
1285
- completion_tokens: estimatedTokens,
1286
- total_tokens: 2 * estimatedTokens
1287
- };
1288
- }
1289
- return {
1290
- content: content || '',
1291
- usage: usage ? {
1292
- prompt_tokens: usage.prompt_tokens ?? 0,
1293
- completion_tokens: usage.completion_tokens ?? 0,
1294
- total_tokens: usage.total_tokens ?? 0,
1295
- time_cost: timeCost ?? 0
1296
- } : void 0,
1297
- isStreamed: !!isStreaming
1298
- };
1299
- } catch (e) {
1300
- console.error(' call AI error', e);
1301
- const newError = new Error(`failed to call ${isStreaming ? 'streaming ' : ''}AI model service: ${e.message}. Trouble shooting: https://midscenejs.com/model-provider.html`, {
1302
- cause: e
1303
- });
1304
- throw newError;
1305
- }
1306
- }
1307
- async function callToGetJSONObject(messages, AIActionTypeValue) {
1308
- let responseFormat;
1309
- const model = getModelName();
1310
- if (model.includes('gpt-4')) switch(AIActionTypeValue){
1311
- case common_AIActionType.ASSERT:
1312
- responseFormat = assertSchema;
1313
- break;
1314
- case common_AIActionType.INSPECT_ELEMENT:
1315
- responseFormat = locatorSchema;
1316
- break;
1317
- case common_AIActionType.PLAN:
1318
- responseFormat = planSchema;
1319
- break;
1320
- case common_AIActionType.EXTRACT_DATA:
1321
- case common_AIActionType.DESCRIBE_ELEMENT:
1322
- responseFormat = {
1323
- type: types_AIResponseFormat.JSON
1324
- };
1325
- break;
1326
- }
1327
- if ('gpt-4o-2024-05-13' === model) responseFormat = {
1328
- type: types_AIResponseFormat.JSON
1329
- };
1330
- const response = await service_caller_call(messages, AIActionTypeValue, responseFormat);
1331
- (0, utils_namespaceObject.assert)(response, 'empty response');
1332
- const jsonContent = safeParseJson(response.content);
1333
- return {
1334
- content: jsonContent,
1335
- usage: response.usage
1336
- };
1337
- }
1338
- async function callAiFnWithStringResponse(msgs, AIActionTypeValue) {
1339
- const { content, usage } = await service_caller_call(msgs, AIActionTypeValue);
1340
- return {
1341
- content,
1342
- usage
1343
- };
1344
- }
1345
- function extractJSONFromCodeBlock(response) {
1346
- try {
1347
- const jsonMatch = response.match(/^\s*(\{[\s\S]*\})\s*$/);
1348
- if (jsonMatch) return jsonMatch[1];
1349
- const codeBlockMatch = response.match(/```(?:json)?\s*(\{[\s\S]*?\})\s*```/);
1350
- if (codeBlockMatch) return codeBlockMatch[1];
1351
- const jsonLikeMatch = response.match(/\{[\s\S]*\}/);
1352
- if (jsonLikeMatch) return jsonLikeMatch[0];
1353
- } catch {}
1354
- return response;
1355
- }
1356
- function preprocessDoubaoBboxJson(input) {
1357
- if (input.includes('bbox')) while(/\d+\s+\d+/.test(input))input = input.replace(/(\d+)\s+(\d+)/g, '$1,$2');
1358
- return input;
1359
- }
1360
- function safeParseJson(input) {
1361
- const cleanJsonString = extractJSONFromCodeBlock(input);
1362
- if (null == cleanJsonString ? void 0 : cleanJsonString.match(/\((\d+),(\d+)\)/)) {
1363
- var _cleanJsonString_match;
1364
- return null == (_cleanJsonString_match = cleanJsonString.match(/\((\d+),(\d+)\)/)) ? void 0 : _cleanJsonString_match.slice(1).map(Number);
1365
- }
1366
- try {
1367
- return JSON.parse(cleanJsonString);
1368
- } catch {}
1369
- try {
1370
- return JSON.parse((0, external_jsonrepair_namespaceObject.jsonrepair)(cleanJsonString));
1371
- } catch (e) {}
1372
- if ('doubao-vision' === (0, env_namespaceObject.vlLocateMode)() || 'vlm-ui-tars' === (0, env_namespaceObject.vlLocateMode)()) {
1373
- const jsonString = preprocessDoubaoBboxJson(cleanJsonString);
1374
- return JSON.parse((0, external_jsonrepair_namespaceObject.jsonrepair)(jsonString));
1375
- }
1376
- throw Error(`failed to parse json response: ${input}`);
1377
- }
1378
- function describeSize(size) {
1379
- return `${size.width} x ${size.height}`;
1380
- }
1381
- const distanceThreshold = 16;
1382
- function elementByPositionWithElementInfo(treeRoot, position, options) {
1383
- const requireStrictDistance = (null == options ? void 0 : options.requireStrictDistance) ?? true;
1384
- const filterPositionElements = (null == options ? void 0 : options.filterPositionElements) ?? false;
1385
- (0, utils_namespaceObject.assert)(void 0 !== position, 'position is required for query');
1386
- const matchingElements = [];
1387
- function dfs(node) {
1388
- if (null == node ? void 0 : node.node) {
1389
- const item = node.node;
1390
- if (item.rect.left <= position.x && position.x <= item.rect.left + item.rect.width && item.rect.top <= position.y && position.y <= item.rect.top + item.rect.height) {
1391
- var _item_attributes;
1392
- if (!(filterPositionElements && (null == (_item_attributes = item.attributes) ? void 0 : _item_attributes.nodeType) === constants_namespaceObject.NodeType.POSITION) && item.isVisible) matchingElements.push(item);
1393
- }
1394
- }
1395
- for (const child of node.children)dfs(child);
1396
- }
1397
- dfs(treeRoot);
1398
- if (0 === matchingElements.length) return;
1399
- const element = matchingElements.reduce((smallest, current)=>{
1400
- const smallestArea = smallest.rect.width * smallest.rect.height;
1401
- const currentArea = current.rect.width * current.rect.height;
1402
- return currentArea < smallestArea ? current : smallest;
1403
- });
1404
- const distanceToCenter = distance({
1405
- x: element.center[0],
1406
- y: element.center[1]
1407
- }, position);
1408
- if (requireStrictDistance) return distanceToCenter <= distanceThreshold ? element : void 0;
1409
- return element;
1410
- }
1411
- function distance(point1, point2) {
1412
- return Math.sqrt((point1.x - point2.x) ** 2 + (point1.y - point2.y) ** 2);
1413
- }
1414
- async function describeUserPage(context, opt) {
1415
- const { screenshotBase64 } = context;
1416
- let width;
1417
- let height;
1418
- if (context.size) ({ width, height } = context.size);
1419
- else {
1420
- const imgSize = await (0, img_namespaceObject.imageInfoOfBase64)(screenshotBase64);
1421
- ({ width, height } = imgSize);
1422
- }
1423
- const treeRoot = context.tree;
1424
- const idElementMap = {};
1425
- const flatElements = (0, extractor_namespaceObject.treeToList)(treeRoot);
1426
- if ((null == opt ? void 0 : opt.domIncluded) === true && flatElements.length >= 5000) console.warn('The number of elements is too large, it may cause the prompt to be too long, please use domIncluded: "visible-only" to reduce the number of elements');
1427
- flatElements.forEach((element)=>{
1428
- idElementMap[element.id] = element;
1429
- if (void 0 !== element.indexId) idElementMap[`${element.indexId}`] = element;
1430
- });
1431
- let pageDescription = '';
1432
- const visibleOnly = (null == opt ? void 0 : opt.visibleOnly) ?? (null == opt ? void 0 : opt.domIncluded) === 'visible-only';
1433
- if ((null == opt ? void 0 : opt.domIncluded) || !(0, env_namespaceObject.vlLocateMode)()) {
1434
- const contentTree = await (0, extractor_namespaceObject.descriptionOfTree)(treeRoot, null == opt ? void 0 : opt.truncateTextLength, null == opt ? void 0 : opt.filterNonTextContent, visibleOnly);
1435
- const sizeDescription = describeSize({
1436
- width,
1437
- height
1438
- });
1439
- pageDescription = `The size of the page: ${sizeDescription} \n The page elements tree:\n${contentTree}`;
1440
- }
1441
- return {
1442
- description: pageDescription,
1443
- elementById (idOrIndexId) {
1444
- (0, utils_namespaceObject.assert)(void 0 !== idOrIndexId, 'id is required for query');
1445
- const item = idElementMap[`${idOrIndexId}`];
1446
- return item;
1447
- },
1448
- elementByPosition (position, size) {
1449
- return elementByPositionWithElementInfo(treeRoot, position);
1450
- },
1451
- insertElementByPosition (position) {
1452
- const element = (0, extractor_namespaceObject.generateElementByPosition)(position);
1453
- treeRoot.children.push({
1454
- node: element,
1455
- children: []
1456
- });
1457
- flatElements.push(element);
1458
- idElementMap[element.id] = element;
1459
- return element;
1460
- },
1461
- size: {
1462
- width,
1463
- height
1464
- }
1465
- };
1466
- }
1467
- const getScreenshotsForLLM = (events, maxScreenshots = 1)=>{
1468
- const eventsWithScreenshots = events.filter((event)=>event.screenshotBefore || event.screenshotAfter || event.screenshotWithBox);
1469
- const sortedEvents = [
1470
- ...eventsWithScreenshots
1471
- ].sort((a, b)=>{
1472
- if ('navigation' === a.type && 'navigation' !== b.type) return -1;
1473
- if ('navigation' !== a.type && 'navigation' === b.type) return 1;
1474
- if ('click' === a.type && 'click' !== b.type) return -1;
1475
- if ('click' !== a.type && 'click' === b.type) return 1;
1476
- return 0;
1477
- });
1478
- const screenshots = [];
1479
- for (const event of sortedEvents){
1480
- const screenshot = event.screenshotWithBox || event.screenshotAfter || event.screenshotBefore;
1481
- if (screenshot && !screenshots.includes(screenshot)) {
1482
- screenshots.push(screenshot);
1483
- if (screenshots.length >= maxScreenshots) break;
1484
- }
1485
- }
1486
- return screenshots;
1487
- };
1488
- const filterEventsByType = (events)=>({
1489
- navigationEvents: events.filter((event)=>'navigation' === event.type),
1490
- clickEvents: events.filter((event)=>'click' === event.type),
1491
- inputEvents: events.filter((event)=>'input' === event.type),
1492
- scrollEvents: events.filter((event)=>'scroll' === event.type)
1493
- });
1494
- const createEventCounts = (filteredEvents, totalEvents)=>({
1495
- navigation: filteredEvents.navigationEvents.length,
1496
- click: filteredEvents.clickEvents.length,
1497
- input: filteredEvents.inputEvents.length,
1498
- scroll: filteredEvents.scrollEvents.length,
1499
- total: totalEvents
1500
- });
1501
- const extractInputDescriptions = (inputEvents)=>inputEvents.map((event)=>({
1502
- description: event.elementDescription || '',
1503
- value: event.value || ''
1504
- })).filter((item)=>item.description && item.value);
1505
- const processEventsForLLM = (events)=>events.map((event)=>({
1506
- type: event.type,
1507
- timestamp: event.timestamp,
1508
- url: event.url,
1509
- title: event.title,
1510
- elementDescription: event.elementDescription,
1511
- value: event.value,
1512
- pageInfo: event.pageInfo,
1513
- elementRect: event.elementRect
1514
- }));
1515
- const prepareEventSummary = (events, options = {})=>{
1516
- const filteredEvents = filterEventsByType(events);
1517
- const eventCounts = createEventCounts(filteredEvents, events.length);
1518
- const startUrl = filteredEvents.navigationEvents.length > 0 ? filteredEvents.navigationEvents[0].url || '' : '';
1519
- const clickDescriptions = filteredEvents.clickEvents.map((event)=>event.elementDescription).filter((desc)=>Boolean(desc)).slice(0, 10);
1520
- const inputDescriptions = extractInputDescriptions(filteredEvents.inputEvents).slice(0, 10);
1521
- const urls = filteredEvents.navigationEvents.map((e)=>e.url).filter((url)=>Boolean(url)).slice(0, 5);
1522
- const processedEvents = processEventsForLLM(events);
1523
- return {
1524
- testName: options.testName || 'Automated test from recorded events',
1525
- startUrl,
1526
- eventCounts,
1527
- urls,
1528
- clickDescriptions,
1529
- inputDescriptions,
1530
- events: processedEvents
1531
- };
1532
- };
1533
- const createMessageContent = (promptText, screenshots = [], includeScreenshots = true)=>{
1534
- const messageContent = [
1535
- {
1536
- type: 'text',
1537
- text: promptText
1538
- }
1539
- ];
1540
- if (includeScreenshots && screenshots.length > 0) {
1541
- messageContent.unshift({
1542
- type: 'text',
1543
- text: 'Here are screenshots from the recording session to help you understand the context:'
1544
- });
1545
- screenshots.forEach((screenshot)=>{
1546
- messageContent.push({
1547
- type: 'image_url',
1548
- image_url: {
1549
- url: screenshot
1550
- }
1551
- });
1552
- });
1553
- }
1554
- return messageContent;
1555
- };
1556
- const validateEvents = (events)=>{
1557
- if (!events.length) throw new Error('No events provided for test generation');
1558
- };
1559
- const generateYamlTest = async (events, options = {})=>{
1560
- try {
1561
- validateEvents(events);
1562
- const summary = prepareEventSummary(events, {
1563
- testName: options.testName,
1564
- maxScreenshots: options.maxScreenshots || 3
1565
- });
1566
- const yamlSummary = {
1567
- ...summary,
1568
- includeTimestamps: options.includeTimestamps || false
1569
- };
1570
- const screenshots = getScreenshotsForLLM(events, options.maxScreenshots || 3);
1571
- const prompt = [
1572
- {
1573
- role: 'system',
1574
- content: `You are an expert in Midscene.js YAML test generation. Generate clean, accurate YAML following these rules: ${constants_namespaceObject.YAML_EXAMPLE_CODE}`
1575
- },
1576
- {
1577
- role: 'user',
1578
- content: `Generate YAML test for Midscene.js automation from recorded browser events.
1579
-
1580
- Event Summary:
1581
- ${JSON.stringify(yamlSummary, null, 2)}
1582
-
1583
- Convert events:
1584
- - navigation \u{2192} target.url
1585
- - click \u{2192} aiTap with element description
1586
- - input \u{2192} aiInput with value and locate
1587
- - scroll \u{2192} aiScroll with appropriate direction
1588
- - Add aiAssert for important state changes
1589
-
1590
- Respond with YAML only, no explanations.`
1591
- }
1592
- ];
1593
- if (screenshots.length > 0) {
1594
- prompt.push({
1595
- role: 'user',
1596
- content: 'Here are screenshots from the recording session to help you understand the context:'
1597
- });
1598
- prompt.push({
1599
- role: 'user',
1600
- content: screenshots.map((screenshot)=>({
1601
- type: 'image_url',
1602
- image_url: {
1603
- url: screenshot
1604
- }
1605
- }))
1606
- });
1607
- }
1608
- const response = await service_caller_call(prompt, common_AIActionType.EXTRACT_DATA);
1609
- if ((null == response ? void 0 : response.content) && 'string' == typeof response.content) return response.content;
1610
- throw new Error('Failed to generate YAML test configuration');
1611
- } catch (error) {
1612
- throw new Error(`Failed to generate YAML test: ${error}`);
1613
- }
1614
- };
1615
- const generateYamlTestStream = async (events, options = {})=>{
1616
- try {
1617
- validateEvents(events);
1618
- const summary = prepareEventSummary(events, {
1619
- testName: options.testName,
1620
- maxScreenshots: options.maxScreenshots || 3
1621
- });
1622
- const yamlSummary = {
1623
- ...summary,
1624
- includeTimestamps: options.includeTimestamps || false
1625
- };
1626
- const screenshots = getScreenshotsForLLM(events, options.maxScreenshots || 3);
1627
- const prompt = [
1628
- {
1629
- role: 'system',
1630
- content: `You are an expert in Midscene.js YAML test generation. Generate clean, accurate YAML following these rules: ${constants_namespaceObject.YAML_EXAMPLE_CODE}`
1631
- },
1632
- {
1633
- role: 'user',
1634
- content: `Generate YAML test for Midscene.js automation from recorded browser events.
1635
-
1636
- Event Summary:
1637
- ${JSON.stringify(yamlSummary, null, 2)}
1638
-
1639
- Convert events:
1640
- - navigation \u{2192} target.url
1641
- - click \u{2192} aiTap with element description
1642
- - input \u{2192} aiInput with value and locate
1643
- - scroll \u{2192} aiScroll with appropriate direction
1644
- - Add aiAssert for important state changes
1645
-
1646
- Respond with YAML only, no explanations.`
1647
- }
1648
- ];
1649
- if (screenshots.length > 0) {
1650
- prompt.push({
1651
- role: 'user',
1652
- content: 'Here are screenshots from the recording session to help you understand the context:'
1653
- });
1654
- prompt.push({
1655
- role: 'user',
1656
- content: screenshots.map((screenshot)=>({
1657
- type: 'image_url',
1658
- image_url: {
1659
- url: screenshot
1660
- }
1661
- }))
1662
- });
1663
- }
1664
- if (options.stream && options.onChunk) return await service_caller_call(prompt, common_AIActionType.EXTRACT_DATA, void 0, {
1665
- stream: true,
1666
- onChunk: options.onChunk
1667
- });
1668
- {
1669
- const response = await service_caller_call(prompt, common_AIActionType.EXTRACT_DATA);
1670
- if ((null == response ? void 0 : response.content) && 'string' == typeof response.content) return {
1671
- content: response.content,
1672
- usage: response.usage,
1673
- isStreamed: false
1674
- };
1675
- throw new Error('Failed to generate YAML test configuration');
1676
- }
1677
- } catch (error) {
1678
- throw new Error(`Failed to generate YAML test: ${error}`);
1679
- }
1680
- };
1681
- function systemPromptToExtract() {
1682
- return `
1683
- You are a versatile professional in software UI design and testing. Your outstanding contributions will impact the user experience of billions of users.
1684
-
1685
- The user will give you a screenshot, the contents of it (optional), and some data requirements in <DATA_DEMAND>. You need to extract the data according to the <DATA_DEMAND>.
1686
-
1687
- If a key specifies a JSON data type (such as Number, String, Boolean, Object, Array), ensure the returned value strictly matches that data type.
1688
-
1689
- If the user provides multiple reference images, please carefully review the reference images with the screenshot and provide the correct answer for <DATA_DEMAND>.
1690
-
1691
- If the user requests reasons to be provided, please provide the thought field in response, less then 100 words.
1692
-
1693
- Return in the following JSON format:
1694
- {
1695
- thought: string, // the thought process of the extraction, less then 100 words, not required by default.
1696
- data: any, // the extracted data. Make sure both the value and scheme meet the DATA_DEMAND. If you want to write some description in this field, use the same language as the DATA_DEMAND.
1697
- errors: [], // string[], error message if any
1698
- }
1699
-
1700
- # Example 1
1701
- For example, if the DATA_DEMAND is:
1702
-
1703
- <DATA_DEMAND>
1704
- {
1705
- "name": "name shows on the left panel, string",
1706
- "age": "age shows on the right panel, number",
1707
- "isAdmin": "if the user is admin, boolean"
1708
- }
1709
- </DATA_DEMAND>
1710
-
1711
- By viewing the screenshot and page contents, you can extract the following data:
1712
-
1713
- {
1714
- data: {
1715
- name: "John",
1716
- age: 30,
1717
- isAdmin: true
1718
- },
1719
- }
1720
-
1721
- # Example 2
1722
- If the DATA_DEMAND is:
1723
-
1724
- <DATA_DEMAND>
1725
- the todo items list, string[]
1726
- </DATA_DEMAND>
1727
-
1728
- By viewing the screenshot and page contents, you can extract the following data:
1729
-
1730
- {
1731
- data: ["todo 1", "todo 2", "todo 3"],
1732
- }
1733
-
1734
- # Example 3
1735
- If the DATA_DEMAND is:
1736
-
1737
- <DATA_DEMAND>
1738
- the page title, string
1739
- </DATA_DEMAND>
1740
-
1741
- By viewing the screenshot and page contents, you can extract the following data:
1742
-
1743
- {
1744
- data: "todo list",
1745
- }
1746
-
1747
- # Example 4
1748
- If the DATA_DEMAND is:
1749
-
1750
- <DATA_DEMAND>
1751
- {
1752
- "result": "Boolean, is it currently the SMS page?"
1753
- }
1754
- </DATA_DEMAND>
1755
-
1756
- By viewing the screenshot and page contents, you can extract the following data:
1757
-
1758
- {
1759
- data: { result: true },
1760
- }
1761
- `;
1762
- }
1763
- const extractDataQueryPrompt = async (pageDescription, dataQuery)=>{
1764
- let dataQueryText = '';
1765
- dataQueryText = 'string' == typeof dataQuery ? dataQuery : JSON.stringify(dataQuery, null, 2);
1766
- const extractDataPrompt = new prompts_namespaceObject.PromptTemplate({
1767
- template: `
1768
- <PageDescription>
1769
- {pageDescription}
1770
- </PageDescription>
1771
-
1772
- <DATA_DEMAND>
1773
- {dataQuery}
1774
- </DATA_DEMAND>
1775
- `,
1776
- inputVariables: [
1777
- "pageDescription",
1778
- 'dataQuery'
1779
- ]
1780
- });
1781
- return await extractDataPrompt.format({
1782
- pageDescription,
1783
- dataQuery: dataQueryText
1784
- });
1785
- };
1786
- function systemPromptToLocateSection(vlMode) {
1787
- return `
1788
- You goal is to find out one section containing the target element in the screenshot, put it in the \`bbox\` field. If the user describe the target element with some reference elements, you should also find the section containing the reference elements, put it in the \`references_bbox\` field.
1789
-
1790
- Usually, it should be approximately an area not more than 300x300px. Changes of the size are allowed if there are many elements to cover.
1791
-
1792
- return in this JSON format:
1793
- \`\`\`json
1794
- {
1795
- "bbox": [number, number, number, number],
1796
- "references_bbox"?: [
1797
- [number, number, number, number],
1798
- [number, number, number, number],
1799
- ...
1800
- ],
1801
- "error"?: string
1802
- }
1803
- \`\`\`
1804
-
1805
- In which, all the numbers in the \`bbox\` and \`references_bbox\` represent ${bboxDescription(vlMode)}.
1806
-
1807
- For example, if the user describe the target element as "the delete button on the second row with title 'Peter'", you should put the bounding box of the delete button in the \`bbox\` field, and the bounding box of the second row in the \`references_bbox\` field.
1808
-
1809
- the return value should be like this:
1810
- \`\`\`json
1811
- {
1812
- "bbox": [100, 100, 200, 200],
1813
- "references_bbox": [[100, 100, 200, 200]]
1814
- }
1815
- \`\`\`
1816
- `;
1817
- }
1818
- const sectionLocatorInstruction = new prompts_namespaceObject.PromptTemplate({
1819
- template: `Here is the target element user interested in:
1820
- <targetDescription>
1821
- {sectionDescription}
1822
- </targetDescription>
1823
- `,
1824
- inputVariables: [
1825
- "sectionDescription"
1826
- ]
1827
- });
1828
- const debugInspect = (0, logger_namespaceObject.getDebug)('ai:inspect');
1829
- const debugSection = (0, logger_namespaceObject.getDebug)('ai:section');
1830
- const extraTextFromUserPrompt = (prompt)=>{
1831
- if ('string' == typeof prompt) return prompt;
1832
- return prompt.prompt;
1833
- };
1834
- const promptsToChatParam = async (multimodalPrompt)=>{
1835
- var _multimodalPrompt_images;
1836
- const msgs = [];
1837
- if (null == multimodalPrompt ? void 0 : null == (_multimodalPrompt_images = multimodalPrompt.images) ? void 0 : _multimodalPrompt_images.length) {
1838
- msgs.push({
1839
- role: 'user',
1840
- content: [
1841
- {
1842
- type: 'text',
1843
- text: 'Next, I will provide all the reference images.'
1844
- }
1845
- ]
1846
- });
1847
- for (const item of multimodalPrompt.images){
1848
- const base64 = await (0, img_namespaceObject.preProcessImageUrl)(item.url, !!multimodalPrompt.convertHttpImage2Base64);
1849
- msgs.push({
1850
- role: 'user',
1851
- content: [
1852
- {
1853
- type: 'text',
1854
- text: `reference image ${item.name}:`
1855
- }
1856
- ]
1857
- });
1858
- msgs.push({
1859
- role: 'user',
1860
- content: [
1861
- {
1862
- type: 'image_url',
1863
- image_url: {
1864
- url: base64,
1865
- detail: 'high'
1866
- }
1867
- }
1868
- ]
1869
- });
1870
- }
1871
- }
1872
- return msgs;
1873
- };
1874
- async function AiLocateElement(options) {
1875
- const { context, targetElementDescription, callAI } = options;
1876
- const { screenshotBase64 } = context;
1877
- const { description, elementById, insertElementByPosition } = await describeUserPage(context);
1878
- (0, utils_namespaceObject.assert)(targetElementDescription, "cannot find the target element description");
1879
- const userInstructionPrompt = await findElementPrompt.format({
1880
- pageDescription: description,
1881
- targetElementDescription: extraTextFromUserPrompt(targetElementDescription)
1882
- });
1883
- const systemPrompt = systemPromptToLocateElement((0, env_namespaceObject.vlLocateMode)());
1884
- let imagePayload = screenshotBase64;
1885
- if (options.searchConfig) {
1886
- (0, utils_namespaceObject.assert)(options.searchConfig.rect, 'searchArea is provided but its rect cannot be found. Failed to locate element');
1887
- (0, utils_namespaceObject.assert)(options.searchConfig.imageBase64, 'searchArea is provided but its imageBase64 cannot be found. Failed to locate element');
1888
- imagePayload = options.searchConfig.imageBase64;
1889
- } else if ('qwen-vl' === (0, env_namespaceObject.vlLocateMode)()) imagePayload = await (0, img_namespaceObject.paddingToMatchBlockByBase64)(imagePayload);
1890
- else if (!(0, env_namespaceObject.vlLocateMode)()) imagePayload = await markupImageForLLM(screenshotBase64, context.tree, context.size);
1891
- const msgs = [
1892
- {
1893
- role: 'system',
1894
- content: systemPrompt
1895
- },
1896
- {
1897
- role: 'user',
1898
- content: [
1899
- {
1900
- type: 'image_url',
1901
- image_url: {
1902
- url: imagePayload,
1903
- detail: 'high'
1904
- }
1905
- },
1906
- {
1907
- type: 'text',
1908
- text: userInstructionPrompt
1909
- }
1910
- ]
1911
- }
1912
- ];
1913
- if ('string' != typeof targetElementDescription) {
1914
- const addOns = await promptsToChatParam({
1915
- images: targetElementDescription.images,
1916
- convertHttpImage2Base64: targetElementDescription.convertHttpImage2Base64
1917
- });
1918
- msgs.push(...addOns);
1919
- }
1920
- const callAIFn = callAI || callToGetJSONObject;
1921
- const res = await callAIFn(msgs, common_AIActionType.INSPECT_ELEMENT);
1922
- const rawResponse = JSON.stringify(res.content);
1923
- let resRect;
1924
- let matchedElements = 'elements' in res.content ? res.content.elements : [];
1925
- let errors = 'errors' in res.content ? res.content.errors : [];
1926
- try {
1927
- if ('bbox' in res.content && Array.isArray(res.content.bbox)) {
1928
- var _options_searchConfig_rect, _options_searchConfig, _options_searchConfig_rect1, _options_searchConfig1, _options_searchConfig_rect2, _options_searchConfig2, _options_searchConfig_rect3, _options_searchConfig3;
1929
- resRect = adaptBboxToRect(res.content.bbox, (null == (_options_searchConfig = options.searchConfig) ? void 0 : null == (_options_searchConfig_rect = _options_searchConfig.rect) ? void 0 : _options_searchConfig_rect.width) || context.size.width, (null == (_options_searchConfig1 = options.searchConfig) ? void 0 : null == (_options_searchConfig_rect1 = _options_searchConfig1.rect) ? void 0 : _options_searchConfig_rect1.height) || context.size.height, null == (_options_searchConfig2 = options.searchConfig) ? void 0 : null == (_options_searchConfig_rect2 = _options_searchConfig2.rect) ? void 0 : _options_searchConfig_rect2.left, null == (_options_searchConfig3 = options.searchConfig) ? void 0 : null == (_options_searchConfig_rect3 = _options_searchConfig3.rect) ? void 0 : _options_searchConfig_rect3.top);
1930
- debugInspect('resRect', resRect);
1931
- const rectCenter = {
1932
- x: resRect.left + resRect.width / 2,
1933
- y: resRect.top + resRect.height / 2
1934
- };
1935
- let element = elementByPositionWithElementInfo(context.tree, rectCenter);
1936
- const distanceToCenter = element ? distance({
1937
- x: element.center[0],
1938
- y: element.center[1]
1939
- }, rectCenter) : 0;
1940
- if (!element || distanceToCenter > distanceThreshold) element = insertElementByPosition(rectCenter);
1941
- if (element) {
1942
- matchedElements = [
1943
- element
1944
- ];
1945
- errors = [];
1946
- }
1947
- }
1948
- } catch (e) {
1949
- const msg = e instanceof Error ? `Failed to parse bbox: ${e.message}` : 'unknown error in locate';
1950
- if (errors && (null == errors ? void 0 : errors.length) !== 0) errors.push(`(${msg})`);
1951
- else errors = [
1952
- msg
1953
- ];
1954
- }
1955
- return {
1956
- rect: resRect,
1957
- parseResult: {
1958
- elements: matchedElements,
1959
- errors
1960
- },
1961
- rawResponse,
1962
- elementById,
1963
- usage: res.usage,
1964
- isOrderSensitive: 'object' == typeof res.content && null !== res.content && 'isOrderSensitive' in res.content ? res.content.isOrderSensitive : void 0
1965
- };
1966
- }
1967
- async function AiLocateSection(options) {
1968
- const { context, sectionDescription } = options;
1969
- const { screenshotBase64 } = context;
1970
- const systemPrompt = systemPromptToLocateSection((0, env_namespaceObject.vlLocateMode)());
1971
- const sectionLocatorInstructionText = await sectionLocatorInstruction.format({
1972
- sectionDescription: extraTextFromUserPrompt(sectionDescription)
1973
- });
1974
- const msgs = [
1975
- {
1976
- role: 'system',
1977
- content: systemPrompt
1978
- },
1979
- {
1980
- role: 'user',
1981
- content: [
1982
- {
1983
- type: 'image_url',
1984
- image_url: {
1985
- url: screenshotBase64,
1986
- detail: 'high'
1987
- }
1988
- },
1989
- {
1990
- type: 'text',
1991
- text: sectionLocatorInstructionText
1992
- }
1993
- ]
1994
- }
1995
- ];
1996
- if ('string' != typeof sectionDescription) {
1997
- const addOns = await promptsToChatParam({
1998
- images: sectionDescription.images,
1999
- convertHttpImage2Base64: sectionDescription.convertHttpImage2Base64
2000
- });
2001
- msgs.push(...addOns);
2002
- }
2003
- const result = await callAiFn(msgs, common_AIActionType.EXTRACT_DATA);
2004
- let sectionRect;
2005
- const sectionBbox = result.content.bbox;
2006
- if (sectionBbox) {
2007
- const targetRect = adaptBboxToRect(sectionBbox, context.size.width, context.size.height);
2008
- debugSection('original targetRect %j', targetRect);
2009
- const referenceBboxList = result.content.references_bbox || [];
2010
- debugSection('referenceBboxList %j', referenceBboxList);
2011
- const referenceRects = referenceBboxList.filter((bbox)=>Array.isArray(bbox)).map((bbox)=>adaptBboxToRect(bbox, context.size.width, context.size.height));
2012
- debugSection('referenceRects %j', referenceRects);
2013
- const mergedRect = mergeRects([
2014
- targetRect,
2015
- ...referenceRects
2016
- ]);
2017
- debugSection('mergedRect %j', mergedRect);
2018
- sectionRect = expandSearchArea(mergedRect, context.size);
2019
- debugSection('expanded sectionRect %j', sectionRect);
2020
- }
2021
- let imageBase64 = screenshotBase64;
2022
- if (sectionRect) imageBase64 = await (0, img_namespaceObject.cropByRect)(screenshotBase64, sectionRect, (0, env_namespaceObject.getAIConfigInBoolean)(env_namespaceObject.MIDSCENE_USE_QWEN_VL));
2023
- return {
2024
- rect: sectionRect,
2025
- imageBase64,
2026
- error: result.content.error,
2027
- rawResponse: JSON.stringify(result.content),
2028
- usage: result.usage
2029
- };
2030
- }
2031
- async function AiExtractElementInfo(options) {
2032
- var _options_extractOption;
2033
- const { dataQuery, context, extractOption, multimodalPrompt } = options;
2034
- const systemPrompt = systemPromptToExtract();
2035
- const { screenshotBase64 } = context;
2036
- const { description, elementById } = await describeUserPage(context, {
2037
- truncateTextLength: 200,
2038
- filterNonTextContent: false,
2039
- visibleOnly: false,
2040
- domIncluded: null == extractOption ? void 0 : extractOption.domIncluded
2041
- });
2042
- const extractDataPromptText = await extractDataQueryPrompt(description, dataQuery);
2043
- const userContent = [];
2044
- if ((null == extractOption ? void 0 : extractOption.screenshotIncluded) !== false) userContent.push({
2045
- type: 'image_url',
2046
- image_url: {
2047
- url: screenshotBase64,
2048
- detail: 'high'
2049
- }
2050
- });
2051
- userContent.push({
2052
- type: 'text',
2053
- text: extractDataPromptText
2054
- });
2055
- const msgs = [
2056
- {
2057
- role: 'system',
2058
- content: systemPrompt
2059
- },
2060
- {
2061
- role: 'user',
2062
- content: userContent
2063
- }
2064
- ];
2065
- if (null == (_options_extractOption = options.extractOption) ? void 0 : _options_extractOption.returnThought) msgs.push({
2066
- role: 'user',
2067
- content: 'Please provide reasons.'
2068
- });
2069
- if (multimodalPrompt) {
2070
- const addOns = await promptsToChatParam({
2071
- images: multimodalPrompt.images,
2072
- convertHttpImage2Base64: multimodalPrompt.convertHttpImage2Base64
2073
- });
2074
- msgs.push(...addOns);
2075
- }
2076
- const result = await callAiFn(msgs, common_AIActionType.EXTRACT_DATA);
2077
- return {
2078
- parseResult: result.content,
2079
- elementById,
2080
- usage: result.usage
2081
- };
2082
- }
2083
- async function AiAssert(options) {
2084
- const { assertion, context } = options;
2085
- (0, utils_namespaceObject.assert)(assertion, 'assertion should not be empty');
2086
- const { screenshotBase64 } = context;
2087
- const systemPrompt = systemPromptToAssert({
2088
- isUITars: (0, env_namespaceObject.getAIConfigInBoolean)(env_namespaceObject.MIDSCENE_USE_VLM_UI_TARS)
2089
- });
2090
- const assertionText = extraTextFromUserPrompt(assertion);
2091
- const msgs = [
2092
- {
2093
- role: 'system',
2094
- content: systemPrompt
2095
- },
2096
- {
2097
- role: 'user',
2098
- content: [
2099
- {
2100
- type: 'image_url',
2101
- image_url: {
2102
- url: screenshotBase64,
2103
- detail: 'high'
2104
- }
2105
- },
2106
- {
2107
- type: 'text',
2108
- text: `
2109
- Here is the assertion. Please tell whether it is truthy according to the screenshot.
2110
- =====================================
2111
- ${assertionText}
2112
- =====================================
2113
- `
2114
- }
2115
- ]
2116
- }
2117
- ];
2118
- if ('string' != typeof assertion) {
2119
- const addOns = await promptsToChatParam({
2120
- images: assertion.images,
2121
- convertHttpImage2Base64: assertion.convertHttpImage2Base64
2122
- });
2123
- msgs.push(...addOns);
2124
- }
2125
- const { content: assertResult, usage } = await callAiFn(msgs, common_AIActionType.ASSERT);
2126
- return {
2127
- content: assertResult,
2128
- usage
2129
- };
2130
- }
2131
- async function llm_planning_plan(userInstruction, opts) {
2132
- var _planFromAI_action;
2133
- const { callAI, context } = opts || {};
2134
- const { screenshotBase64, size } = context;
2135
- const { description: pageDescription, elementById } = await describeUserPage(context);
2136
- const systemPrompt = await systemPromptToTaskPlanning({
2137
- actionSpace: opts.actionSpace,
2138
- vlMode: (0, env_namespaceObject.vlLocateMode)()
2139
- });
2140
- const taskBackgroundContextText = generateTaskBackgroundContext(userInstruction, opts.log, opts.actionContext);
2141
- const userInstructionPrompt = await automationUserPrompt((0, env_namespaceObject.vlLocateMode)()).format({
2142
- pageDescription,
2143
- taskBackgroundContext: taskBackgroundContextText
2144
- });
2145
- let imagePayload = screenshotBase64;
2146
- if ('qwen-vl' === (0, env_namespaceObject.vlLocateMode)()) imagePayload = await (0, img_namespaceObject.paddingToMatchBlockByBase64)(imagePayload);
2147
- else if (!(0, env_namespaceObject.vlLocateMode)()) imagePayload = await markupImageForLLM(screenshotBase64, context.tree, context.size);
2148
- warnGPT4oSizeLimit(size);
2149
- const msgs = [
2150
- {
2151
- role: 'system',
2152
- content: systemPrompt
2153
- },
2154
- {
2155
- role: 'user',
2156
- content: [
2157
- {
2158
- type: 'image_url',
2159
- image_url: {
2160
- url: imagePayload,
2161
- detail: 'high'
2162
- }
2163
- },
2164
- {
2165
- type: 'text',
2166
- text: userInstructionPrompt
2167
- }
2168
- ]
2169
- }
2170
- ];
2171
- const call = callAI || callAiFn;
2172
- const { content, usage } = await call(msgs, common_AIActionType.PLAN);
2173
- const rawResponse = JSON.stringify(content, void 0, 2);
2174
- const planFromAI = content;
2175
- const actions = ((null == (_planFromAI_action = planFromAI.action) ? void 0 : _planFromAI_action.type) ? [
2176
- planFromAI.action
2177
- ] : planFromAI.actions) || [];
2178
- const returnValue = {
2179
- ...planFromAI,
2180
- actions,
2181
- rawResponse,
2182
- usage,
2183
- yamlFlow: buildYamlFlowFromPlans(actions, planFromAI.sleep)
2184
- };
2185
- (0, utils_namespaceObject.assert)(planFromAI, "can't get plans from AI");
2186
- if ((0, env_namespaceObject.vlLocateMode)()) {
2187
- actions.forEach((action)=>{
2188
- if (action.locate) try {
2189
- action.locate = fillBboxParam(action.locate, size.width, size.height);
2190
- } catch (e) {
2191
- throw new Error(`Failed to fill locate param: ${planFromAI.error} (${e instanceof Error ? e.message : 'unknown error'})`, {
2192
- cause: e
2193
- });
2194
- }
2195
- });
2196
- (0, utils_namespaceObject.assert)(!planFromAI.error, `Failed to plan actions: ${planFromAI.error}`);
2197
- } else actions.forEach((action)=>{
2198
- var _action_locate;
2199
- if (null == (_action_locate = action.locate) ? void 0 : _action_locate.id) {
2200
- const element = elementById(action.locate.id);
2201
- if (element) action.locate.id = element.id;
2202
- }
2203
- });
2204
- if (0 === actions.length && returnValue.more_actions_needed_by_instruction && !returnValue.sleep) console.warn('No actions planned for the prompt, but model said more actions are needed:', userInstruction);
2205
- return returnValue;
2206
- }
2207
- const us_keyboard_layout_namespaceObject = require("@midscene/shared/us-keyboard-layout");
2208
- const action_parser_namespaceObject = require("@ui-tars/action-parser");
2209
- function getUiTarsPlanningPrompt() {
2210
- const preferredLanguage = (0, env_namespaceObject.getPreferredLanguage)();
2211
- return `
2212
- You are a GUI agent. You are given a task and your action history, with screenshots. You need to perform the next action to complete the task.
2213
-
2214
- ## Output Format
2215
- \`\`\`
2216
- Thought: ...
2217
- Action: ...
2218
- \`\`\`
2219
-
2220
- ## Action Space
2221
-
2222
- click(start_box='[x1, y1, x2, y2]')
2223
- left_double(start_box='[x1, y1, x2, y2]')
2224
- right_single(start_box='[x1, y1, x2, y2]')
2225
- drag(start_box='[x1, y1, x2, y2]', end_box='[x3, y3, x4, y4]')
2226
- hotkey(key='')
2227
- type(content='xxx') # Use escape characters \\', \\\", and \\n in content part to ensure we can parse the content in normal python string format. If you want to submit your input, use \\n at the end of content.
2228
- scroll(start_box='[x1, y1, x2, y2]', direction='down or up or right or left')
2229
- wait() #Sleep for 5s and take a screenshot to check for any changes.
2230
- finished(content='xxx') # Use escape characters \\', \\", and \\n in content part to ensure we can parse the content in normal python string format.
2231
-
2232
-
2233
- ## Note
2234
- - Use ${preferredLanguage} in \`Thought\` part.
2235
- - Write a small plan and finally summarize your next action (with its target element) in one sentence in \`Thought\` part.
2236
-
2237
- ## User Instruction
2238
- `;
2239
- }
2240
- const getSummary = (prediction)=>prediction.replace(/Reflection:[\s\S]*?(?=Action_Summary:|Action:|$)/g, '').trim();
2241
- const debug = (0, logger_namespaceObject.getDebug)('ui-tars-planning');
2242
- const bboxSize = 10;
2243
- const pointToBbox = (point, width, height)=>[
2244
- Math.round(Math.max(point.x - bboxSize / 2, 0)),
2245
- Math.round(Math.max(point.y - bboxSize / 2, 0)),
2246
- Math.round(Math.min(point.x + bboxSize / 2, width)),
2247
- Math.round(Math.min(point.y + bboxSize / 2, height))
2248
- ];
2249
- async function vlmPlanning(options) {
2250
- const { conversationHistory, userInstruction, size } = options;
2251
- const systemPrompt = getUiTarsPlanningPrompt() + userInstruction;
2252
- const res = await service_caller_call([
2253
- {
2254
- role: 'user',
2255
- content: systemPrompt
2256
- },
2257
- ...conversationHistory
2258
- ], common_AIActionType.INSPECT_ELEMENT);
2259
- const convertedText = convertBboxToCoordinates(res.content);
2260
- const modelVer = (0, env_namespaceObject.uiTarsModelVersion)();
2261
- const { parsed } = (0, action_parser_namespaceObject.actionParser)({
2262
- prediction: convertedText,
2263
- factor: [
2264
- 1000,
2265
- 1000
2266
- ],
2267
- screenContext: {
2268
- width: size.width,
2269
- height: size.height
2270
- },
2271
- modelVer: modelVer || void 0
2272
- });
2273
- debug('modelVer', modelVer, 'parsed', JSON.stringify(parsed));
2274
- const transformActions = [];
2275
- parsed.forEach((action)=>{
2276
- if ('click' === action.action_type) {
2277
- (0, utils_namespaceObject.assert)(action.action_inputs.start_box, 'start_box is required');
2278
- const point = getPoint(action.action_inputs.start_box, size);
2279
- transformActions.push({
2280
- type: 'Locate',
2281
- param: {},
2282
- locate: {
2283
- prompt: action.thought || '',
2284
- bbox: pointToBbox({
2285
- x: point[0],
2286
- y: point[1]
2287
- }, size.width, size.height)
2288
- }
2289
- });
2290
- transformActions.push({
2291
- type: 'Tap',
2292
- locate: {
2293
- prompt: action.thought || '',
2294
- bbox: pointToBbox({
2295
- x: point[0],
2296
- y: point[1]
2297
- }, size.width, size.height)
2298
- },
2299
- param: action.thought || ''
2300
- });
2301
- } else if ('drag' === action.action_type) {
2302
- (0, utils_namespaceObject.assert)(action.action_inputs.start_box, 'start_box is required');
2303
- (0, utils_namespaceObject.assert)(action.action_inputs.end_box, 'end_box is required');
2304
- const startPoint = getPoint(action.action_inputs.start_box, size);
2305
- const endPoint = getPoint(action.action_inputs.end_box, size);
2306
- transformActions.push({
2307
- type: 'Drag',
2308
- param: {
2309
- start_box: {
2310
- x: startPoint[0],
2311
- y: startPoint[1]
2312
- },
2313
- end_box: {
2314
- x: endPoint[0],
2315
- y: endPoint[1]
2316
- }
2317
- },
2318
- locate: null,
2319
- thought: action.thought || ''
2320
- });
2321
- } else if ('type' === action.action_type) transformActions.push({
2322
- type: 'Input',
2323
- param: {
2324
- value: action.action_inputs.content
2325
- },
2326
- locate: null,
2327
- thought: action.thought || ''
2328
- });
2329
- else if ('scroll' === action.action_type) transformActions.push({
2330
- type: 'Scroll',
2331
- param: {
2332
- direction: action.action_inputs.direction
2333
- },
2334
- locate: null,
2335
- thought: action.thought || ''
2336
- });
2337
- else if ('finished' === action.action_type) transformActions.push({
2338
- type: 'Finished',
2339
- param: {},
2340
- locate: null,
2341
- thought: action.thought || ''
2342
- });
2343
- else if ('hotkey' === action.action_type) if (action.action_inputs.key) {
2344
- const keys = (0, us_keyboard_layout_namespaceObject.transformHotkeyInput)(action.action_inputs.key);
2345
- transformActions.push({
2346
- type: 'KeyboardPress',
2347
- param: {
2348
- value: keys
2349
- },
2350
- locate: null,
2351
- thought: action.thought || ''
2352
- });
2353
- } else console.warn('No key found in action: hotkey. Will not perform action.');
2354
- else if ('wait' === action.action_type) transformActions.push({
2355
- type: 'Sleep',
2356
- param: {
2357
- timeMs: 1000
2358
- },
2359
- locate: null,
2360
- thought: action.thought || ''
2361
- });
2362
- else if ('androidBackButton' === action.action_type) transformActions.push({
2363
- type: 'AndroidBackButton',
2364
- param: {},
2365
- locate: null,
2366
- thought: action.thought || ''
2367
- });
2368
- else if ('androidHomeButton' === action.action_type) transformActions.push({
2369
- type: 'AndroidHomeButton',
2370
- param: {},
2371
- locate: null,
2372
- thought: action.thought || ''
2373
- });
2374
- else if ('androidRecentAppsButton' === action.action_type) transformActions.push({
2375
- type: 'AndroidRecentAppsButton',
2376
- param: {}
2377
- });
2378
- else if ('androidLongPress' === action.action_type) {
2379
- (0, utils_namespaceObject.assert)(action.action_inputs.start_coords, 'start_coords is required for androidLongPress');
2380
- const point = action.action_inputs.start_coords;
2381
- transformActions.push({
2382
- type: 'AndroidLongPress',
2383
- param: {
2384
- x: point[0],
2385
- y: point[1],
2386
- duration: 1000
2387
- },
2388
- locate: null,
2389
- thought: action.thought || ''
2390
- });
2391
- } else if ('androidPull' === action.action_type) {
2392
- const pullDirection = action.action_inputs.direction || 'down';
2393
- const startPoint = action.action_inputs.start_coords ? {
2394
- x: action.action_inputs.start_coords[0],
2395
- y: action.action_inputs.start_coords[1]
2396
- } : void 0;
2397
- transformActions.push({
2398
- type: 'AndroidPull',
2399
- param: {
2400
- direction: pullDirection,
2401
- startPoint,
2402
- distance: action.action_inputs.distance,
2403
- duration: action.action_inputs.duration || 500
2404
- },
2405
- locate: null,
2406
- thought: action.thought || ''
2407
- });
2408
- }
2409
- });
2410
- if (0 === transformActions.length) throw new Error(`No actions found, response: ${res.content}`, {
2411
- cause: {
2412
- prediction: res.content,
2413
- parsed
2414
- }
2415
- });
2416
- return {
2417
- actions: transformActions,
2418
- actionsFromModel: parsed,
2419
- action_summary: getSummary(res.content),
2420
- usage: res.usage,
2421
- rawResponse: JSON.stringify(res.content, void 0, 2)
2422
- };
2423
- }
2424
- function convertBboxToCoordinates(text) {
2425
- const pattern = /<bbox>(\d+)\s+(\d+)\s+(\d+)\s+(\d+)<\/bbox>/g;
2426
- function replaceMatch(match, x1, y1, x2, y2) {
2427
- const x1Num = Number.parseInt(x1, 10);
2428
- const y1Num = Number.parseInt(y1, 10);
2429
- const x2Num = Number.parseInt(x2, 10);
2430
- const y2Num = Number.parseInt(y2, 10);
2431
- const x = Math.floor((x1Num + x2Num) / 2);
2432
- const y = Math.floor((y1Num + y2Num) / 2);
2433
- return `(${x},${y})`;
2434
- }
2435
- const cleanedText = text.replace(/\[EOS\]/g, '');
2436
- return cleanedText.replace(pattern, replaceMatch).trim();
2437
- }
2438
- function getPoint(startBox, size) {
2439
- const [x, y] = JSON.parse(startBox);
2440
- return [
2441
- x * size.width,
2442
- y * size.height
2443
- ];
2444
- }
2445
- async function resizeImageForUiTars(imageBase64, size) {
2446
- if ('vlm-ui-tars' === (0, env_namespaceObject.vlLocateMode)() && (0, env_namespaceObject.uiTarsModelVersion)() === env_namespaceObject.UITarsModelVersion.V1_5) {
2447
- debug('ui-tars-v1.5, will check image size', size);
2448
- const currentPixels = size.width * size.height;
2449
- const maxPixels = 12845056;
2450
- if (currentPixels > maxPixels) {
2451
- const resizeFactor = Math.sqrt(maxPixels / currentPixels);
2452
- const newWidth = Math.floor(size.width * resizeFactor);
2453
- const newHeight = Math.floor(size.height * resizeFactor);
2454
- debug('resize image for ui-tars, new width: %s, new height: %s', newWidth, newHeight);
2455
- const resizedImage = await (0, img_namespaceObject.resizeImgBase64)(imageBase64, {
2456
- width: newWidth,
2457
- height: newHeight
2458
- });
2459
- return resizedImage;
2460
- }
2461
- }
2462
- return imageBase64;
2463
- }
2464
- const generatePlaywrightTest = async (events, options = {})=>{
2465
- validateEvents(events);
2466
- const summary = prepareEventSummary(events, {
2467
- testName: options.testName,
2468
- maxScreenshots: options.maxScreenshots || 3
2469
- });
2470
- const playwrightSummary = {
2471
- ...summary,
2472
- waitForNetworkIdle: false !== options.waitForNetworkIdle,
2473
- waitForNetworkIdleTimeout: options.waitForNetworkIdleTimeout || 2000,
2474
- viewportSize: options.viewportSize || {
2475
- width: 1280,
2476
- height: 800
2477
- }
2478
- };
2479
- const screenshots = getScreenshotsForLLM(events, options.maxScreenshots || 3);
2480
- const promptText = `Generate a Playwright test using @midscene/web/playwright that reproduces this recorded browser session. The test should be based on the following events and follow the structure of the example provided. Make the test descriptive with appropriate assertions and validations.
2481
-
2482
- Event Summary:
2483
- ${JSON.stringify(playwrightSummary, null, 2)}
2484
-
2485
- Generated code should:
2486
- 1. Import required dependencies
2487
- 2. Set up the test with proper configuration
2488
- 3. Include a beforeEach hook to navigate to the starting URL
2489
- 4. Implement a test that uses Midscene AI methods (aiTap, aiInput, aiAssert, etc.)
2490
- 5. Include appropriate assertions and validations
2491
- 6. Follow best practices for Playwright tests
2492
- 7. Be ready to execute without further modification
2493
-
2494
- Respond ONLY with the complete Playwright test code, no explanations.`;
2495
- const messageContent = createMessageContent(promptText, screenshots, false !== options.includeScreenshots);
2496
- const systemPrompt = `You are an expert test automation engineer specializing in Playwright and Midscene.
2497
- Your task is to generate a complete, executable Playwright test using @midscene/web/playwright that reproduces a recorded browser session.
2498
-
2499
- ${constants_namespaceObject.PLAYWRIGHT_EXAMPLE_CODE}`;
2500
- const prompt = [
2501
- {
2502
- role: 'system',
2503
- content: systemPrompt
2504
- },
2505
- {
2506
- role: 'user',
2507
- content: messageContent
2508
- }
2509
- ];
2510
- const response = await service_caller_call(prompt, common_AIActionType.EXTRACT_DATA);
2511
- if ((null == response ? void 0 : response.content) && 'string' == typeof response.content) return response.content;
2512
- throw new Error('Failed to generate Playwright test code');
2513
- };
2514
- const generatePlaywrightTestStream = async (events, options = {})=>{
2515
- validateEvents(events);
2516
- const summary = prepareEventSummary(events, {
2517
- testName: options.testName,
2518
- maxScreenshots: options.maxScreenshots || 3
2519
- });
2520
- const playwrightSummary = {
2521
- ...summary,
2522
- waitForNetworkIdle: false !== options.waitForNetworkIdle,
2523
- waitForNetworkIdleTimeout: options.waitForNetworkIdleTimeout || 2000,
2524
- viewportSize: options.viewportSize || {
2525
- width: 1280,
2526
- height: 800
2527
- }
2528
- };
2529
- const screenshots = getScreenshotsForLLM(events, options.maxScreenshots || 3);
2530
- const promptText = `Generate a Playwright test using @midscene/web/playwright that reproduces this recorded browser session. The test should be based on the following events and follow the structure of the example provided. Make the test descriptive with appropriate assertions and validations.
2531
-
2532
- Event Summary:
2533
- ${JSON.stringify(playwrightSummary, null, 2)}
2534
-
2535
- Generated code should:
2536
- 1. Import required dependencies
2537
- 2. Set up the test with proper configuration
2538
- 3. Include a beforeEach hook to navigate to the starting URL
2539
- 4. Implement a test that uses Midscene AI methods (aiTap, aiInput, aiAssert, etc.)
2540
- 5. Include appropriate assertions and validations
2541
- 6. Follow best practices for Playwright tests
2542
- 7. Be ready to execute without further modification
2543
- 8. can't wrap this test code in markdown code block
2544
-
2545
- Respond ONLY with the complete Playwright test code, no explanations.`;
2546
- const messageContent = createMessageContent(promptText, screenshots, false !== options.includeScreenshots);
2547
- const systemPrompt = `You are an expert test automation engineer specializing in Playwright and Midscene.
2548
- Your task is to generate a complete, executable Playwright test using @midscene/web/playwright that reproduces a recorded browser session.
2549
-
2550
- ${constants_namespaceObject.PLAYWRIGHT_EXAMPLE_CODE}`;
2551
- const prompt = [
2552
- {
2553
- role: 'system',
2554
- content: systemPrompt
2555
- },
2556
- {
2557
- role: 'user',
2558
- content: messageContent
2559
- }
2560
- ];
2561
- if (options.stream && options.onChunk) return await service_caller_call(prompt, common_AIActionType.EXTRACT_DATA, void 0, {
2562
- stream: true,
2563
- onChunk: options.onChunk
2564
- });
2565
- {
2566
- const response = await service_caller_call(prompt, common_AIActionType.EXTRACT_DATA);
2567
- if ((null == response ? void 0 : response.content) && 'string' == typeof response.content) return {
2568
- content: response.content,
2569
- usage: response.usage,
2570
- isStreamed: false
2571
- };
2572
- throw new Error('Failed to generate Playwright test code');
2573
- }
2574
- };
2575
- })();
2576
- exports.AIActionType = __webpack_exports__.AIActionType;
2577
- exports.AiAssert = __webpack_exports__.AiAssert;
2578
- exports.AiExtractElementInfo = __webpack_exports__.AiExtractElementInfo;
2579
- exports.AiLocateElement = __webpack_exports__.AiLocateElement;
2580
- exports.AiLocateSection = __webpack_exports__.AiLocateSection;
2581
- exports.adaptBboxToRect = __webpack_exports__.adaptBboxToRect;
2582
- exports.callAi = __webpack_exports__.callAi;
2583
- exports.callAiFn = __webpack_exports__.callAiFn;
2584
- exports.callAiFnWithStringResponse = __webpack_exports__.callAiFnWithStringResponse;
2585
- exports.callToGetJSONObject = __webpack_exports__.callToGetJSONObject;
2586
- exports.describeUserPage = __webpack_exports__.describeUserPage;
2587
- exports.elementByPositionWithElementInfo = __webpack_exports__.elementByPositionWithElementInfo;
2588
- exports.generatePlaywrightTest = __webpack_exports__.generatePlaywrightTest;
2589
- exports.generatePlaywrightTestStream = __webpack_exports__.generatePlaywrightTestStream;
2590
- exports.generateYamlTest = __webpack_exports__.generateYamlTest;
2591
- exports.generateYamlTestStream = __webpack_exports__.generateYamlTestStream;
2592
- exports.plan = __webpack_exports__.plan;
2593
- exports.resizeImageForUiTars = __webpack_exports__.resizeImageForUiTars;
2594
- exports.systemPromptToLocateElement = __webpack_exports__.systemPromptToLocateElement;
2595
- exports.vlmPlanning = __webpack_exports__.vlmPlanning;
2596
- for(var __webpack_i__ in __webpack_exports__)if (-1 === [
2597
- "AIActionType",
2598
- "AiAssert",
2599
- "AiExtractElementInfo",
2600
- "AiLocateElement",
2601
- "AiLocateSection",
2602
- "adaptBboxToRect",
2603
- "callAi",
2604
- "callAiFn",
2605
- "callAiFnWithStringResponse",
2606
- "callToGetJSONObject",
2607
- "describeUserPage",
2608
- "elementByPositionWithElementInfo",
2609
- "generatePlaywrightTest",
2610
- "generatePlaywrightTestStream",
2611
- "generateYamlTest",
2612
- "generateYamlTestStream",
2613
- "plan",
2614
- "resizeImageForUiTars",
2615
- "systemPromptToLocateElement",
2616
- "vlmPlanning"
2617
- ].indexOf(__webpack_i__)) exports[__webpack_i__] = __webpack_exports__[__webpack_i__];
2618
- Object.defineProperty(exports, '__esModule', {
2619
- value: true
2620
- });
2621
-
2622
- //# sourceMappingURL=ai-model.js.map