@midscene/core 0.26.5-beta-20250814080504.0 → 0.26.5-beta-20250814125155.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (132) hide show
  1. package/dist/es/ai-model/action-executor.mjs +139 -0
  2. package/dist/es/ai-model/action-executor.mjs.map +1 -0
  3. package/dist/es/ai-model/common.mjs +219 -0
  4. package/dist/es/ai-model/common.mjs.map +1 -0
  5. package/dist/es/ai-model/index.mjs +10 -0
  6. package/dist/es/ai-model/inspect.mjs +317 -0
  7. package/dist/es/ai-model/inspect.mjs.map +1 -0
  8. package/dist/es/ai-model/llm-planning.mjs +85 -0
  9. package/dist/es/ai-model/llm-planning.mjs.map +1 -0
  10. package/dist/es/ai-model/prompt/assertion.mjs +55 -0
  11. package/dist/es/ai-model/prompt/assertion.mjs.map +1 -0
  12. package/dist/es/ai-model/prompt/common.mjs +7 -0
  13. package/dist/es/ai-model/prompt/common.mjs.map +1 -0
  14. package/dist/es/ai-model/prompt/describe.mjs +44 -0
  15. package/dist/es/ai-model/prompt/describe.mjs.map +1 -0
  16. package/dist/es/ai-model/prompt/extraction.mjs +137 -0
  17. package/dist/es/ai-model/prompt/extraction.mjs.map +1 -0
  18. package/dist/es/ai-model/prompt/llm-locator.mjs +275 -0
  19. package/dist/es/ai-model/prompt/llm-locator.mjs.map +1 -0
  20. package/dist/es/ai-model/prompt/llm-planning.mjs +359 -0
  21. package/dist/es/ai-model/prompt/llm-planning.mjs.map +1 -0
  22. package/dist/es/ai-model/prompt/llm-section-locator.mjs +47 -0
  23. package/dist/es/ai-model/prompt/llm-section-locator.mjs.map +1 -0
  24. package/dist/es/ai-model/prompt/playwright-generator.mjs +117 -0
  25. package/dist/es/ai-model/prompt/playwright-generator.mjs.map +1 -0
  26. package/dist/es/ai-model/prompt/ui-tars-locator.mjs +34 -0
  27. package/dist/es/ai-model/prompt/ui-tars-locator.mjs.map +1 -0
  28. package/dist/es/ai-model/prompt/ui-tars-planning.mjs +36 -0
  29. package/dist/es/ai-model/prompt/ui-tars-planning.mjs.map +1 -0
  30. package/dist/es/ai-model/prompt/util.mjs +123 -0
  31. package/dist/es/ai-model/prompt/util.mjs.map +1 -0
  32. package/dist/es/ai-model/prompt/yaml-generator.mjs +219 -0
  33. package/dist/es/ai-model/prompt/yaml-generator.mjs.map +1 -0
  34. package/dist/es/ai-model/service-caller/index.mjs +413 -0
  35. package/dist/es/ai-model/service-caller/index.mjs.map +1 -0
  36. package/dist/es/ai-model/ui-tars-planning.mjs +235 -0
  37. package/dist/es/ai-model/ui-tars-planning.mjs.map +1 -0
  38. package/dist/es/image/index.mjs +2 -0
  39. package/dist/es/index.mjs +7 -2360
  40. package/dist/es/index.mjs.map +1 -1
  41. package/dist/es/insight/index.mjs +261 -0
  42. package/dist/es/insight/index.mjs.map +1 -0
  43. package/dist/es/insight/utils.mjs +19 -0
  44. package/dist/es/insight/utils.mjs.map +1 -0
  45. package/dist/es/types.mjs +11 -0
  46. package/dist/es/types.mjs.map +1 -0
  47. package/dist/es/utils.mjs +2 -2
  48. package/dist/es/yaml.mjs +0 -0
  49. package/dist/lib/ai-model/action-executor.js +173 -0
  50. package/dist/lib/ai-model/action-executor.js.map +1 -0
  51. package/dist/lib/ai-model/common.js +289 -0
  52. package/dist/lib/ai-model/common.js.map +1 -0
  53. package/dist/lib/ai-model/index.js +103 -0
  54. package/dist/lib/ai-model/index.js.map +1 -0
  55. package/dist/lib/ai-model/inspect.js +360 -0
  56. package/dist/lib/ai-model/inspect.js.map +1 -0
  57. package/dist/lib/ai-model/llm-planning.js +119 -0
  58. package/dist/lib/ai-model/llm-planning.js.map +1 -0
  59. package/dist/lib/ai-model/prompt/assertion.js +92 -0
  60. package/dist/lib/ai-model/prompt/assertion.js.map +1 -0
  61. package/dist/lib/ai-model/prompt/common.js +41 -0
  62. package/dist/lib/ai-model/prompt/common.js.map +1 -0
  63. package/dist/lib/ai-model/prompt/describe.js +78 -0
  64. package/dist/lib/ai-model/prompt/describe.js.map +1 -0
  65. package/dist/lib/ai-model/prompt/extraction.js +177 -0
  66. package/dist/lib/ai-model/prompt/extraction.js.map +1 -0
  67. package/dist/lib/ai-model/prompt/llm-locator.js +315 -0
  68. package/dist/lib/ai-model/prompt/llm-locator.js.map +1 -0
  69. package/dist/lib/ai-model/prompt/llm-planning.js +415 -0
  70. package/dist/lib/ai-model/prompt/llm-planning.js.map +1 -0
  71. package/dist/lib/ai-model/prompt/llm-section-locator.js +84 -0
  72. package/dist/lib/ai-model/prompt/llm-section-locator.js.map +1 -0
  73. package/dist/lib/ai-model/prompt/playwright-generator.js +178 -0
  74. package/dist/lib/ai-model/prompt/playwright-generator.js.map +1 -0
  75. package/dist/lib/ai-model/prompt/ui-tars-locator.js +68 -0
  76. package/dist/lib/ai-model/prompt/ui-tars-locator.js.map +1 -0
  77. package/dist/lib/ai-model/prompt/ui-tars-planning.js +73 -0
  78. package/dist/lib/ai-model/prompt/ui-tars-planning.js.map +1 -0
  79. package/dist/lib/ai-model/prompt/util.js +175 -0
  80. package/dist/lib/ai-model/prompt/util.js.map +1 -0
  81. package/dist/lib/ai-model/prompt/yaml-generator.js +280 -0
  82. package/dist/lib/ai-model/prompt/yaml-generator.js.map +1 -0
  83. package/dist/lib/ai-model/service-caller/index.js +496 -0
  84. package/dist/lib/ai-model/service-caller/index.js.map +1 -0
  85. package/dist/lib/ai-model/ui-tars-planning.js +272 -0
  86. package/dist/lib/ai-model/ui-tars-planning.js.map +1 -0
  87. package/dist/lib/image/index.js +56 -0
  88. package/dist/lib/image/index.js.map +1 -0
  89. package/dist/lib/index.js +21 -2393
  90. package/dist/lib/index.js.map +1 -1
  91. package/dist/lib/insight/index.js +295 -0
  92. package/dist/lib/insight/index.js.map +1 -0
  93. package/dist/lib/insight/utils.js +53 -0
  94. package/dist/lib/insight/utils.js.map +1 -0
  95. package/dist/lib/types.js +82 -0
  96. package/dist/lib/types.js.map +1 -0
  97. package/dist/lib/utils.js +2 -2
  98. package/dist/lib/yaml.js +20 -0
  99. package/dist/lib/yaml.js.map +1 -0
  100. package/dist/types/ai-model/action-executor.d.ts +19 -0
  101. package/dist/types/ai-model/common.d.ts +34 -0
  102. package/dist/types/ai-model/index.d.ts +11 -0
  103. package/dist/types/ai-model/inspect.d.ts +49 -0
  104. package/dist/types/ai-model/llm-planning.d.ts +10 -0
  105. package/dist/types/ai-model/prompt/assertion.d.ts +5 -0
  106. package/dist/types/ai-model/prompt/common.d.ts +2 -0
  107. package/dist/types/ai-model/prompt/describe.d.ts +1 -0
  108. package/dist/types/ai-model/prompt/extraction.d.ts +4 -0
  109. package/dist/types/ai-model/prompt/llm-locator.d.ts +9 -0
  110. package/dist/types/ai-model/prompt/llm-planning.d.ts +15 -0
  111. package/dist/types/ai-model/prompt/llm-section-locator.d.ts +6 -0
  112. package/dist/types/ai-model/prompt/playwright-generator.d.ts +25 -0
  113. package/dist/types/ai-model/prompt/ui-tars-locator.d.ts +1 -0
  114. package/dist/types/ai-model/prompt/ui-tars-planning.d.ts +2 -0
  115. package/dist/types/ai-model/prompt/util.d.ts +45 -0
  116. package/dist/types/ai-model/prompt/yaml-generator.d.ts +99 -0
  117. package/dist/types/ai-model/service-caller/index.d.ts +26 -0
  118. package/dist/types/ai-model/ui-tars-planning.d.ts +76 -0
  119. package/dist/types/image/index.d.ts +1 -0
  120. package/dist/types/index.d.ts +9 -1289
  121. package/dist/types/insight/index.d.ts +26 -0
  122. package/dist/types/insight/utils.d.ts +2 -0
  123. package/dist/types/tree.d.ts +1 -11
  124. package/dist/types/types.d.ts +399 -0
  125. package/dist/types/utils.d.ts +27 -47
  126. package/dist/types/yaml.d.ts +172 -0
  127. package/package.json +6 -6
  128. package/dist/es/ai-model.mjs +0 -2502
  129. package/dist/es/ai-model.mjs.map +0 -1
  130. package/dist/lib/ai-model.js +0 -2622
  131. package/dist/lib/ai-model.js.map +0 -1
  132. package/dist/types/ai-model.d.ts +0 -596
@@ -0,0 +1,359 @@
1
+ import node_assert from "node:assert";
2
+ import { PromptTemplate } from "@langchain/core/prompts";
3
+ import { bboxDescription } from "./common.mjs";
4
+ const vlCoTLog = '"what_the_user_wants_to_do_next_by_instruction": string, // What the user wants to do according to the instruction and previous logs. ';
5
+ const vlCurrentLog = '"log": string, // Log what the next one action (ONLY ONE!) you can do according to the screenshot and the instruction. The typical log looks like "Now i want to use action \'{ action-type }\' to do .. first". If no action should be done, log the reason. ". Use the same language as the user\'s instruction.';
6
+ const llmCurrentLog = '"log": string, // Log what the next actions you can do according to the screenshot and the instruction. The typical log looks like "Now i want to use action \'{ action-type }\' to do ..". If no action should be done, log the reason. ". Use the same language as the user\'s instruction.';
7
+ const commonOutputFields = `"error"?: string, // Error messages about unexpected situations, if any. Only think it is an error when the situation is not foreseeable according to the instruction. Use the same language as the user's instruction.
8
+ "more_actions_needed_by_instruction": boolean, // Consider if there is still more action(s) to do after the action in "Log" is done, according to the instruction. If so, set this field to true. Otherwise, set it to false.`;
9
+ const vlLocateParam = (required)=>`locate${required ? '' : '?'}: {bbox: [number, number, number, number], prompt: string }`;
10
+ const llmLocateParam = (required)=>`locate${required ? '' : '?'}: {"id": string, "prompt": string}`;
11
+ const descriptionForAction = (action, locatorScheme)=>{
12
+ const tab = ' ';
13
+ let locateParam = '';
14
+ if ('required' === action.location) locateParam = locatorScheme;
15
+ else if ('optional' === action.location) locateParam = `${locatorScheme} | null`;
16
+ else if (false === action.location) locateParam = '';
17
+ const locatorParam = locateParam ? `- ${locateParam}` : '';
18
+ if (action.whatToLocate) if (locateParam) locateParam += ` // ${action.whatToLocate}`;
19
+ else console.warn(`whatToLocate is provided for action ${action.name}, but location is not required or optional. The whatToLocate will be ignored.`);
20
+ let paramSchema = '';
21
+ if (action.paramSchema) paramSchema = `- param: ${action.paramSchema}`;
22
+ if (action.paramDescription) {
23
+ node_assert(paramSchema, `paramSchema is required when paramDescription is provided for action ${action.name}, but got ${action.paramSchema}`);
24
+ paramSchema += ` // ${action.paramDescription}`;
25
+ }
26
+ const fields = [
27
+ paramSchema,
28
+ locatorParam
29
+ ].filter(Boolean);
30
+ return `- ${action.name}, ${action.description}
31
+ ${tab}- type: "${action.name}"
32
+ ${tab}${fields.join(`\n${tab}`)}
33
+ `.trim();
34
+ };
35
+ const systemTemplateOfVLPlanning = ({ actionSpace, vlMode })=>{
36
+ const actionNameList = actionSpace.map((action)=>action.name).join(', ');
37
+ const actionDescriptionList = actionSpace.map((action)=>descriptionForAction(action, vlLocateParam('required' === action.location)));
38
+ const actionList = actionDescriptionList.join('\n');
39
+ return `
40
+ Target: User will give you a screenshot, an instruction and some previous logs indicating what have been done. Please tell what the next one action is (or null if no action should be done) to do the tasks the instruction requires.
41
+
42
+ Restriction:
43
+ - Don't give extra actions or plans beyond the instruction. ONLY plan for what the instruction requires. For example, don't try to submit the form if the instruction is only to fill something.
44
+ - Always give ONLY ONE action in \`log\` field (or null if no action should be done), instead of multiple actions. Supported actions are ${actionNameList}.
45
+ - Don't repeat actions in the previous logs.
46
+ - Bbox is the bounding box of the element to be located. It's an array of 4 numbers, representing ${bboxDescription(vlMode)}.
47
+
48
+ Supporting actions:
49
+ ${actionList}
50
+
51
+ Field description:
52
+ * The \`prompt\` field inside the \`locate\` field is a short description that could be used to locate the element.
53
+
54
+ Return in JSON format:
55
+ {
56
+ ${vlCoTLog}
57
+ ${vlCurrentLog}
58
+ ${commonOutputFields}
59
+ "action":
60
+ {
61
+ // one of the supporting actions
62
+ } | null,
63
+ ,
64
+ "sleep"?: number, // The sleep time after the action, in milliseconds.
65
+ }
66
+
67
+ For example, when the instruction is "click 'Confirm' button, and click 'Yes' in popup" and the log is "I will use action Tap to click 'Confirm' button", by viewing the screenshot and previous logs, you should consider: We have already clicked the 'Confirm' button, so next we should find and click 'Yes' in popup.
68
+
69
+ this and output the JSON:
70
+
71
+ {
72
+ "what_the_user_wants_to_do_next_by_instruction": "We have already clicked the 'Confirm' button, so next we should find and click 'Yes' in popup",
73
+ "log": "I will use action Tap to click 'Yes' in popup",
74
+ "more_actions_needed_by_instruction": false,
75
+ "action": {
76
+ "type": "Tap",
77
+ "locate": {
78
+ "bbox": [100, 100, 200, 200],
79
+ "prompt": "The 'Yes' button in popup"
80
+ }
81
+ }
82
+ }
83
+ `;
84
+ };
85
+ const systemTemplateOfLLM = ({ actionSpace })=>{
86
+ const actionNameList = actionSpace.map((action)=>action.name).join(' / ');
87
+ const actionDescriptionList = actionSpace.map((action)=>descriptionForAction(action, llmLocateParam('required' === action.location)));
88
+ const actionList = actionDescriptionList.join('\n');
89
+ return `
90
+ ## Role
91
+
92
+ You are a versatile professional in software UI automation. Your outstanding contributions will impact the user experience of billions of users.
93
+
94
+ ## Objective
95
+
96
+ - Decompose the instruction user asked into a series of actions
97
+ - Locate the target element if possible
98
+ - If the instruction cannot be accomplished, give a further plan.
99
+
100
+ ## Workflow
101
+
102
+ 1. Receive the screenshot, element description of screenshot(if any), user's instruction and previous logs.
103
+ 2. Decompose the user's task into a sequence of feasible actions, and place it in the \`actions\` field. There are different types of actions (${actionNameList}). The "About the action" section below will give you more details.
104
+ 3. Consider whether the user's instruction will be accomplished after the actions you composed.
105
+ - If the instruction is accomplished, set \`more_actions_needed_by_instruction\` to false.
106
+ - If more actions are needed, set \`more_actions_needed_by_instruction\` to true. Get ready to hand over to the next talent people like you. Carefully log what have been done in the \`log\` field, he or she will continue the task according to your logs.
107
+ 4. If the task is not feasible on this page, set \`error\` field to the reason.
108
+
109
+ ## Constraints
110
+
111
+ - All the actions you composed MUST be feasible, which means all the action fields can be filled with the page context information you get. If not, don't plan this action.
112
+ - Trust the "What have been done" field about the task (if any), don't repeat actions in it.
113
+ - Respond only with valid JSON. Do not write an introduction or summary or markdown prefix like \`\`\`json\`\`\`.
114
+ - If the screenshot and the instruction are totally irrelevant, set reason in the \`error\` field.
115
+
116
+ ## About the \`actions\` field
117
+
118
+ The \`locate\` param is commonly used in the \`param\` field of the action, means to locate the target element to perform the action, it conforms to the following scheme:
119
+
120
+ type LocateParam = {
121
+ "id": string, // the id of the element found. It should either be the id marked with a rectangle in the screenshot or the id described in the description.
122
+ "prompt"?: string // the description of the element to find. It can only be omitted when locate is null.
123
+ } | null // If it's not on the page, the LocateParam should be null
124
+
125
+ ## Supported actions
126
+
127
+ Each action has a \`type\` and corresponding \`param\`. To be detailed:
128
+ ${actionList}
129
+
130
+ `.trim();
131
+ };
132
+ const outputTemplate = `
133
+ ## Output JSON Format:
134
+
135
+ The JSON format is as follows:
136
+
137
+ {
138
+ "actions": [
139
+ // ... some actions
140
+ ],
141
+ ${llmCurrentLog}
142
+ ${commonOutputFields}
143
+ }
144
+
145
+ ## Examples
146
+
147
+ ### Example: Decompose a task
148
+
149
+ When you received the following information:
150
+
151
+ * Instruction: 'Click the language switch button, wait 1s, click "English"'
152
+ * Logs: null
153
+ * Page Context (screenshot and description) shows: There is a language switch button, and the "English" option is not shown in the screenshot now.
154
+
155
+ By viewing the page screenshot and description, you should consider this and output the JSON:
156
+
157
+ * The user intent is: tap the switch button, sleep, and tap the 'English' option
158
+ * The language switch button is shown in the screenshot, and can be located by the page description or the id marked with a rectangle. So we can plan a Tap action to do this.
159
+ * Plan a Sleep action to wait for 1 second to ensure the language options are displayed.
160
+ * The "English" option button is not shown in the screenshot now, it means it may only show after the previous actions are finished. So don't plan any action to do this.
161
+ * Log what these action do: Click the language switch button to open the language options. Wait for 1 second.
162
+ * The task cannot be accomplished (because the last tapping action is not finished yet), so the \`more_actions_needed_by_instruction\` field is true. The \`error\` field is null.
163
+
164
+ {
165
+ "actions":[
166
+ {
167
+ "thought": "Click the language switch button to open the language options.",
168
+ "type": "Tap",
169
+ "param": null,
170
+ "locate": { id: "c81c4e9a33", prompt: "The language switch button" }},
171
+ },
172
+ {
173
+ "thought": "Wait for 1 second to ensure the language options are displayed.",
174
+ "type": "Sleep",
175
+ "param": { "timeMs": 1000 },
176
+ }
177
+ ],
178
+ "error": null,
179
+ "more_actions_needed_by_instruction": true,
180
+ "log": "Click the language switch button to open the language options. Wait for 1 second",
181
+ }
182
+
183
+ ### Example: What NOT to do
184
+ Wrong output:
185
+ {
186
+ "actions":[
187
+ {
188
+ "thought": "Click the language switch button to open the language options.",
189
+ "type": "Tap",
190
+ "param": null,
191
+ "locate": {
192
+ { "id": "c81c4e9a33" }, // WRONG: prompt is missing, this is not a valid LocateParam
193
+ }
194
+ },
195
+ {
196
+ "thought": "Click the English option",
197
+ "type": "Tap",
198
+ "param": null,
199
+ "locate": null, // This means the 'English' option is not shown in the screenshot, the task cannot be accomplished
200
+ }
201
+ ],
202
+ "more_actions_needed_by_instruction": false, // WRONG: should be true
203
+ "log": "Click the language switch button to open the language options",
204
+ }
205
+ `;
206
+ async function systemPromptToTaskPlanning({ actionSpace, vlMode }) {
207
+ if (vlMode) return systemTemplateOfVLPlanning({
208
+ actionSpace,
209
+ vlMode
210
+ });
211
+ return `${systemTemplateOfLLM({
212
+ actionSpace
213
+ })}\n\n${outputTemplate}`;
214
+ }
215
+ const planSchema = {
216
+ type: 'json_schema',
217
+ json_schema: {
218
+ name: 'action_items',
219
+ strict: false,
220
+ schema: {
221
+ type: 'object',
222
+ strict: false,
223
+ properties: {
224
+ actions: {
225
+ type: 'array',
226
+ items: {
227
+ type: 'object',
228
+ strict: false,
229
+ properties: {
230
+ thought: {
231
+ type: 'string',
232
+ description: 'Reasons for generating this task, and why this task is feasible on this page'
233
+ },
234
+ type: {
235
+ type: 'string',
236
+ description: 'Type of action'
237
+ },
238
+ param: {
239
+ anyOf: [
240
+ {
241
+ type: 'null'
242
+ },
243
+ {
244
+ type: 'object',
245
+ additionalProperties: true
246
+ }
247
+ ],
248
+ description: 'Parameter of the action'
249
+ },
250
+ locate: {
251
+ type: [
252
+ 'object',
253
+ 'null'
254
+ ],
255
+ properties: {
256
+ id: {
257
+ type: 'string'
258
+ },
259
+ prompt: {
260
+ type: 'string'
261
+ }
262
+ },
263
+ required: [
264
+ 'id',
265
+ 'prompt'
266
+ ],
267
+ additionalProperties: false,
268
+ description: 'Location information for the target element'
269
+ }
270
+ },
271
+ required: [
272
+ 'thought',
273
+ 'type',
274
+ 'param',
275
+ 'locate'
276
+ ],
277
+ additionalProperties: false
278
+ },
279
+ description: 'List of actions to be performed'
280
+ },
281
+ more_actions_needed_by_instruction: {
282
+ type: 'boolean',
283
+ description: 'If all the actions described in the instruction have been covered by this action and logs, set this field to false.'
284
+ },
285
+ log: {
286
+ type: 'string',
287
+ description: 'Log what these planned actions do. Do not include further actions that have not been planned.'
288
+ },
289
+ error: {
290
+ type: [
291
+ 'string',
292
+ 'null'
293
+ ],
294
+ description: 'Error messages about unexpected situations'
295
+ }
296
+ },
297
+ required: [
298
+ 'actions',
299
+ 'more_actions_needed_by_instruction',
300
+ 'log',
301
+ 'error'
302
+ ],
303
+ additionalProperties: false
304
+ }
305
+ }
306
+ };
307
+ const generateTaskBackgroundContext = (userInstruction, log, userActionContext)=>{
308
+ if (log) return `
309
+ Here is the user's instruction:
310
+
311
+ <instruction>
312
+ <high_priority_knowledge>
313
+ ${userActionContext}
314
+ </high_priority_knowledge>
315
+
316
+ ${userInstruction}
317
+ </instruction>
318
+
319
+ These are the logs from previous executions, which indicate what was done in the previous actions.
320
+ Do NOT repeat these actions.
321
+ <previous_logs>
322
+ ${log}
323
+ </previous_logs>
324
+ `;
325
+ return `
326
+ Here is the user's instruction:
327
+ <instruction>
328
+ <high_priority_knowledge>
329
+ ${userActionContext}
330
+ </high_priority_knowledge>
331
+
332
+ ${userInstruction}
333
+ </instruction>
334
+ `;
335
+ };
336
+ const automationUserPrompt = (vlMode)=>{
337
+ if (vlMode) return new PromptTemplate({
338
+ template: '{taskBackgroundContext}',
339
+ inputVariables: [
340
+ 'taskBackgroundContext'
341
+ ]
342
+ });
343
+ return new PromptTemplate({
344
+ template: `
345
+ pageDescription:
346
+ =====================================
347
+ {pageDescription}
348
+ =====================================
349
+
350
+ {taskBackgroundContext}`,
351
+ inputVariables: [
352
+ "pageDescription",
353
+ 'taskBackgroundContext'
354
+ ]
355
+ });
356
+ };
357
+ export { automationUserPrompt, descriptionForAction, generateTaskBackgroundContext, planSchema, systemPromptToTaskPlanning };
358
+
359
+ //# sourceMappingURL=llm-planning.mjs.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"ai-model/prompt/llm-planning.mjs","sources":["webpack://@midscene/core/./src/ai-model/prompt/llm-planning.ts"],"sourcesContent":["import assert from 'node:assert';\nimport type { DeviceAction } from '@/types';\nimport { PromptTemplate } from '@langchain/core/prompts';\nimport type { vlLocateMode } from '@midscene/shared/env';\nimport type { ResponseFormatJSONSchema } from 'openai/resources/index';\nimport { bboxDescription } from './common';\n\n// Note: put the log field first to trigger the CoT\nconst vlCoTLog = `\"what_the_user_wants_to_do_next_by_instruction\": string, // What the user wants to do according to the instruction and previous logs. `;\nconst vlCurrentLog = `\"log\": string, // Log what the next one action (ONLY ONE!) you can do according to the screenshot and the instruction. The typical log looks like \"Now i want to use action '{ action-type }' to do .. first\". If no action should be done, log the reason. \". Use the same language as the user's instruction.`;\nconst llmCurrentLog = `\"log\": string, // Log what the next actions you can do according to the screenshot and the instruction. The typical log looks like \"Now i want to use action '{ action-type }' to do ..\". If no action should be done, log the reason. \". Use the same language as the user's instruction.`;\n\nconst commonOutputFields = `\"error\"?: string, // Error messages about unexpected situations, if any. Only think it is an error when the situation is not foreseeable according to the instruction. Use the same language as the user's instruction.\n \"more_actions_needed_by_instruction\": boolean, // Consider if there is still more action(s) to do after the action in \"Log\" is done, according to the instruction. If so, set this field to true. Otherwise, set it to false.`;\nconst vlLocateParam = (required: boolean) =>\n `locate${required ? '' : '?'}: {bbox: [number, number, number, number], prompt: string }`;\nconst llmLocateParam = (required: boolean) =>\n `locate${required ? '' : '?'}: {\"id\": string, \"prompt\": string}`;\n\nexport const descriptionForAction = (\n action: DeviceAction,\n locatorScheme: string,\n) => {\n const tab = ' ';\n let locateParam = '';\n if (action.location === 'required') {\n locateParam = locatorScheme;\n } else if (action.location === 'optional') {\n locateParam = `${locatorScheme} | null`;\n } else if (action.location === false) {\n locateParam = '';\n }\n const locatorParam = locateParam ? `- ${locateParam}` : '';\n\n if (action.whatToLocate) {\n if (!locateParam) {\n console.warn(\n `whatToLocate is provided for action ${action.name}, but location is not required or optional. The whatToLocate will be ignored.`,\n );\n } else {\n locateParam += ` // ${action.whatToLocate}`;\n }\n }\n\n let paramSchema = '';\n if (action.paramSchema) {\n paramSchema = `- param: ${action.paramSchema}`;\n }\n if (action.paramDescription) {\n assert(\n paramSchema,\n `paramSchema is required when paramDescription is provided for action ${action.name}, but got ${action.paramSchema}`,\n );\n paramSchema += ` // ${action.paramDescription}`;\n }\n\n const fields = [paramSchema, locatorParam].filter(Boolean);\n\n return `- ${action.name}, ${action.description}\n${tab}- type: \"${action.name}\"\n${tab}${fields.join(`\\n${tab}`)}\n`.trim();\n};\n\nconst systemTemplateOfVLPlanning = ({\n actionSpace,\n vlMode,\n}: {\n actionSpace: DeviceAction[];\n vlMode: ReturnType<typeof vlLocateMode>;\n}) => {\n const actionNameList = actionSpace.map((action) => action.name).join(', ');\n const actionDescriptionList = actionSpace.map((action) =>\n descriptionForAction(action, vlLocateParam(action.location === 'required')),\n );\n const actionList = actionDescriptionList.join('\\n');\n\n return `\nTarget: User will give you a screenshot, an instruction and some previous logs indicating what have been done. Please tell what the next one action is (or null if no action should be done) to do the tasks the instruction requires. \n\nRestriction:\n- Don't give extra actions or plans beyond the instruction. ONLY plan for what the instruction requires. For example, don't try to submit the form if the instruction is only to fill something.\n- Always give ONLY ONE action in \\`log\\` field (or null if no action should be done), instead of multiple actions. Supported actions are ${actionNameList}.\n- Don't repeat actions in the previous logs.\n- Bbox is the bounding box of the element to be located. It's an array of 4 numbers, representing ${bboxDescription(vlMode)}.\n\nSupporting actions:\n${actionList}\n\nField description:\n* The \\`prompt\\` field inside the \\`locate\\` field is a short description that could be used to locate the element.\n\nReturn in JSON format:\n{\n ${vlCoTLog}\n ${vlCurrentLog}\n ${commonOutputFields}\n \"action\": \n {\n // one of the supporting actions\n } | null,\n ,\n \"sleep\"?: number, // The sleep time after the action, in milliseconds.\n}\n\nFor example, when the instruction is \"click 'Confirm' button, and click 'Yes' in popup\" and the log is \"I will use action Tap to click 'Confirm' button\", by viewing the screenshot and previous logs, you should consider: We have already clicked the 'Confirm' button, so next we should find and click 'Yes' in popup.\n\nthis and output the JSON:\n\n{\n \"what_the_user_wants_to_do_next_by_instruction\": \"We have already clicked the 'Confirm' button, so next we should find and click 'Yes' in popup\",\n \"log\": \"I will use action Tap to click 'Yes' in popup\",\n \"more_actions_needed_by_instruction\": false,\n \"action\": {\n \"type\": \"Tap\",\n \"locate\": {\n \"bbox\": [100, 100, 200, 200],\n \"prompt\": \"The 'Yes' button in popup\"\n }\n }\n}\n`;\n};\n\nconst systemTemplateOfLLM = ({\n actionSpace,\n}: { actionSpace: DeviceAction[] }) => {\n const actionNameList = actionSpace.map((action) => action.name).join(' / ');\n const actionDescriptionList = actionSpace.map((action) =>\n descriptionForAction(\n action,\n llmLocateParam(action.location === 'required'),\n ),\n );\n const actionList = actionDescriptionList.join('\\n');\n\n return `\n## Role\n\nYou are a versatile professional in software UI automation. Your outstanding contributions will impact the user experience of billions of users.\n\n## Objective\n\n- Decompose the instruction user asked into a series of actions\n- Locate the target element if possible\n- If the instruction cannot be accomplished, give a further plan.\n\n## Workflow\n\n1. Receive the screenshot, element description of screenshot(if any), user's instruction and previous logs.\n2. Decompose the user's task into a sequence of feasible actions, and place it in the \\`actions\\` field. There are different types of actions (${actionNameList}). The \"About the action\" section below will give you more details.\n3. Consider whether the user's instruction will be accomplished after the actions you composed.\n- If the instruction is accomplished, set \\`more_actions_needed_by_instruction\\` to false.\n- If more actions are needed, set \\`more_actions_needed_by_instruction\\` to true. Get ready to hand over to the next talent people like you. Carefully log what have been done in the \\`log\\` field, he or she will continue the task according to your logs.\n4. If the task is not feasible on this page, set \\`error\\` field to the reason.\n\n## Constraints\n\n- All the actions you composed MUST be feasible, which means all the action fields can be filled with the page context information you get. If not, don't plan this action.\n- Trust the \"What have been done\" field about the task (if any), don't repeat actions in it.\n- Respond only with valid JSON. Do not write an introduction or summary or markdown prefix like \\`\\`\\`json\\`\\`\\`.\n- If the screenshot and the instruction are totally irrelevant, set reason in the \\`error\\` field.\n\n## About the \\`actions\\` field\n\nThe \\`locate\\` param is commonly used in the \\`param\\` field of the action, means to locate the target element to perform the action, it conforms to the following scheme:\n\ntype LocateParam = {\n \"id\": string, // the id of the element found. It should either be the id marked with a rectangle in the screenshot or the id described in the description.\n \"prompt\"?: string // the description of the element to find. It can only be omitted when locate is null.\n} | null // If it's not on the page, the LocateParam should be null\n\n## Supported actions\n\nEach action has a \\`type\\` and corresponding \\`param\\`. To be detailed:\n${actionList}\n\n`.trim();\n};\n\nconst outputTemplate = `\n## Output JSON Format:\n\nThe JSON format is as follows:\n\n{\n \"actions\": [\n // ... some actions\n ],\n ${llmCurrentLog}\n ${commonOutputFields}\n}\n\n## Examples\n\n### Example: Decompose a task\n\nWhen you received the following information:\n\n* Instruction: 'Click the language switch button, wait 1s, click \"English\"'\n* Logs: null\n* Page Context (screenshot and description) shows: There is a language switch button, and the \"English\" option is not shown in the screenshot now.\n\nBy viewing the page screenshot and description, you should consider this and output the JSON:\n\n* The user intent is: tap the switch button, sleep, and tap the 'English' option\n* The language switch button is shown in the screenshot, and can be located by the page description or the id marked with a rectangle. So we can plan a Tap action to do this.\n* Plan a Sleep action to wait for 1 second to ensure the language options are displayed.\n* The \"English\" option button is not shown in the screenshot now, it means it may only show after the previous actions are finished. So don't plan any action to do this.\n* Log what these action do: Click the language switch button to open the language options. Wait for 1 second.\n* The task cannot be accomplished (because the last tapping action is not finished yet), so the \\`more_actions_needed_by_instruction\\` field is true. The \\`error\\` field is null.\n\n{\n \"actions\":[\n {\n \"thought\": \"Click the language switch button to open the language options.\",\n \"type\": \"Tap\", \n \"param\": null,\n \"locate\": { id: \"c81c4e9a33\", prompt: \"The language switch button\" }},\n },\n {\n \"thought\": \"Wait for 1 second to ensure the language options are displayed.\",\n \"type\": \"Sleep\",\n \"param\": { \"timeMs\": 1000 },\n }\n ],\n \"error\": null,\n \"more_actions_needed_by_instruction\": true,\n \"log\": \"Click the language switch button to open the language options. Wait for 1 second\",\n}\n\n### Example: What NOT to do\nWrong output:\n{\n \"actions\":[\n {\n \"thought\": \"Click the language switch button to open the language options.\",\n \"type\": \"Tap\",\n \"param\": null,\n \"locate\": {\n { \"id\": \"c81c4e9a33\" }, // WRONG: prompt is missing, this is not a valid LocateParam\n }\n },\n {\n \"thought\": \"Click the English option\",\n \"type\": \"Tap\", \n \"param\": null,\n \"locate\": null, // This means the 'English' option is not shown in the screenshot, the task cannot be accomplished\n }\n ],\n \"more_actions_needed_by_instruction\": false, // WRONG: should be true\n \"log\": \"Click the language switch button to open the language options\",\n}\n`;\n\nexport async function systemPromptToTaskPlanning({\n actionSpace,\n vlMode,\n}: {\n actionSpace: DeviceAction[];\n vlMode: ReturnType<typeof vlLocateMode>;\n}) {\n if (vlMode) {\n return systemTemplateOfVLPlanning({ actionSpace, vlMode });\n }\n\n return `${systemTemplateOfLLM({ actionSpace })}\\n\\n${outputTemplate}`;\n}\n\nexport const planSchema: ResponseFormatJSONSchema = {\n type: 'json_schema',\n json_schema: {\n name: 'action_items',\n strict: false,\n schema: {\n type: 'object',\n strict: false,\n properties: {\n actions: {\n type: 'array',\n items: {\n type: 'object',\n strict: false,\n properties: {\n thought: {\n type: 'string',\n description:\n 'Reasons for generating this task, and why this task is feasible on this page',\n },\n type: {\n type: 'string',\n description: 'Type of action',\n },\n param: {\n anyOf: [\n { type: 'null' },\n {\n type: 'object',\n additionalProperties: true,\n },\n ],\n description: 'Parameter of the action',\n },\n locate: {\n type: ['object', 'null'],\n properties: {\n id: { type: 'string' },\n prompt: { type: 'string' },\n },\n required: ['id', 'prompt'],\n additionalProperties: false,\n description: 'Location information for the target element',\n },\n },\n required: ['thought', 'type', 'param', 'locate'],\n additionalProperties: false,\n },\n description: 'List of actions to be performed',\n },\n more_actions_needed_by_instruction: {\n type: 'boolean',\n description:\n 'If all the actions described in the instruction have been covered by this action and logs, set this field to false.',\n },\n log: {\n type: 'string',\n description:\n 'Log what these planned actions do. Do not include further actions that have not been planned.',\n },\n error: {\n type: ['string', 'null'],\n description: 'Error messages about unexpected situations',\n },\n },\n required: [\n 'actions',\n 'more_actions_needed_by_instruction',\n 'log',\n 'error',\n ],\n additionalProperties: false,\n },\n },\n};\n\nexport const generateTaskBackgroundContext = (\n userInstruction: string,\n log?: string,\n userActionContext?: string,\n) => {\n if (log) {\n return `\nHere is the user's instruction:\n\n<instruction>\n <high_priority_knowledge>\n ${userActionContext}\n </high_priority_knowledge>\n\n ${userInstruction}\n</instruction>\n\nThese are the logs from previous executions, which indicate what was done in the previous actions.\nDo NOT repeat these actions.\n<previous_logs>\n${log}\n</previous_logs>\n`;\n }\n\n return `\nHere is the user's instruction:\n<instruction>\n <high_priority_knowledge>\n ${userActionContext}\n </high_priority_knowledge>\n\n ${userInstruction}\n</instruction>\n`;\n};\n\nexport const automationUserPrompt = (\n vlMode: ReturnType<typeof vlLocateMode>,\n) => {\n if (vlMode) {\n return new PromptTemplate({\n template: '{taskBackgroundContext}',\n inputVariables: ['taskBackgroundContext'],\n });\n }\n\n return new PromptTemplate({\n template: `\npageDescription:\n=====================================\n{pageDescription}\n=====================================\n\n{taskBackgroundContext}`,\n inputVariables: ['pageDescription', 'taskBackgroundContext'],\n });\n};\n"],"names":["vlCoTLog","vlCurrentLog","llmCurrentLog","commonOutputFields","vlLocateParam","required","llmLocateParam","descriptionForAction","action","locatorScheme","tab","locateParam","locatorParam","console","paramSchema","assert","fields","Boolean","systemTemplateOfVLPlanning","actionSpace","vlMode","actionNameList","actionDescriptionList","actionList","bboxDescription","systemTemplateOfLLM","outputTemplate","systemPromptToTaskPlanning","planSchema","generateTaskBackgroundContext","userInstruction","log","userActionContext","automationUserPrompt","PromptTemplate"],"mappings":";;;AAQA,MAAMA,WAAW;AACjB,MAAMC,eAAe;AACrB,MAAMC,gBAAgB;AAEtB,MAAMC,qBAAqB,CAAC;+NACmM,CAAC;AAChO,MAAMC,gBAAgB,CAACC,WACrB,CAAC,MAAM,EAAEA,WAAW,KAAK,IAAI,2DAA2D,CAAC;AAC3F,MAAMC,iBAAiB,CAACD,WACtB,CAAC,MAAM,EAAEA,WAAW,KAAK,IAAI,kCAAkC,CAAC;AAE3D,MAAME,uBAAuB,CAClCC,QACAC;IAEA,MAAMC,MAAM;IACZ,IAAIC,cAAc;IAClB,IAAIH,AAAoB,eAApBA,OAAO,QAAQ,EACjBG,cAAcF;SACT,IAAID,AAAoB,eAApBA,OAAO,QAAQ,EACxBG,cAAc,GAAGF,cAAc,OAAO,CAAC;SAClC,IAAID,AAAoB,UAApBA,OAAO,QAAQ,EACxBG,cAAc;IAEhB,MAAMC,eAAeD,cAAc,CAAC,EAAE,EAAEA,aAAa,GAAG;IAExD,IAAIH,OAAO,YAAY,EACrB,IAAKG,aAKHA,eAAe,CAAC,IAAI,EAAEH,OAAO,YAAY,EAAE;SAJ3CK,QAAQ,IAAI,CACV,CAAC,oCAAoC,EAAEL,OAAO,IAAI,CAAC,6EAA6E,CAAC;IAOvI,IAAIM,cAAc;IAClB,IAAIN,OAAO,WAAW,EACpBM,cAAc,CAAC,SAAS,EAAEN,OAAO,WAAW,EAAE;IAEhD,IAAIA,OAAO,gBAAgB,EAAE;QAC3BO,YACED,aACA,CAAC,qEAAqE,EAAEN,OAAO,IAAI,CAAC,UAAU,EAAEA,OAAO,WAAW,EAAE;QAEtHM,eAAe,CAAC,IAAI,EAAEN,OAAO,gBAAgB,EAAE;IACjD;IAEA,MAAMQ,SAAS;QAACF;QAAaF;KAAa,CAAC,MAAM,CAACK;IAElD,OAAO,CAAC,EAAE,EAAET,OAAO,IAAI,CAAC,EAAE,EAAEA,OAAO,WAAW,CAAC;AACjD,EAAEE,IAAI,SAAS,EAAEF,OAAO,IAAI,CAAC;AAC7B,EAAEE,MAAMM,OAAO,IAAI,CAAC,CAAC,EAAE,EAAEN,KAAK,EAAE;AAChC,CAAC,CAAC,IAAI;AACN;AAEA,MAAMQ,6BAA6B,CAAC,EAClCC,WAAW,EACXC,MAAM,EAIP;IACC,MAAMC,iBAAiBF,YAAY,GAAG,CAAC,CAACX,SAAWA,OAAO,IAAI,EAAE,IAAI,CAAC;IACrE,MAAMc,wBAAwBH,YAAY,GAAG,CAAC,CAACX,SAC7CD,qBAAqBC,QAAQJ,cAAcI,AAAoB,eAApBA,OAAO,QAAQ;IAE5D,MAAMe,aAAaD,sBAAsB,IAAI,CAAC;IAE9C,OAAO,CAAC;;;;;yIAK+H,EAAED,eAAe;;kGAExD,EAAEG,gBAAgBJ,QAAQ;;;AAG5H,EAAEG,WAAW;;;;;;;EAOX,EAAEvB,SAAS;EACX,EAAEC,aAAa;EACf,EAAEE,mBAAmB;;;;;;;;;;;;;;;;;;;;;;;;;AAyBvB,CAAC;AACD;AAEA,MAAMsB,sBAAsB,CAAC,EAC3BN,WAAW,EACqB;IAChC,MAAME,iBAAiBF,YAAY,GAAG,CAAC,CAACX,SAAWA,OAAO,IAAI,EAAE,IAAI,CAAC;IACrE,MAAMc,wBAAwBH,YAAY,GAAG,CAAC,CAACX,SAC7CD,qBACEC,QACAF,eAAeE,AAAoB,eAApBA,OAAO,QAAQ;IAGlC,MAAMe,aAAaD,sBAAsB,IAAI,CAAC;IAE9C,OAAO,CAAC;;;;;;;;;;;;;;+IAcqI,EAAED,eAAe;;;;;;;;;;;;;;;;;;;;;;;;;AAyBhK,EAAEE,WAAW;;AAEb,CAAC,CAAC,IAAI;AACN;AAEA,MAAMG,iBAAiB,CAAC;;;;;;;;;EAStB,EAAExB,cAAc;EAChB,EAAEC,mBAAmB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AA+DvB,CAAC;AAEM,eAAewB,2BAA2B,EAC/CR,WAAW,EACXC,MAAM,EAIP;IACC,IAAIA,QACF,OAAOF,2BAA2B;QAAEC;QAAaC;IAAO;IAG1D,OAAO,GAAGK,oBAAoB;QAAEN;IAAY,GAAG,IAAI,EAAEO,gBAAgB;AACvE;AAEO,MAAME,aAAuC;IAClD,MAAM;IACN,aAAa;QACX,MAAM;QACN,QAAQ;QACR,QAAQ;YACN,MAAM;YACN,QAAQ;YACR,YAAY;gBACV,SAAS;oBACP,MAAM;oBACN,OAAO;wBACL,MAAM;wBACN,QAAQ;wBACR,YAAY;4BACV,SAAS;gCACP,MAAM;gCACN,aACE;4BACJ;4BACA,MAAM;gCACJ,MAAM;gCACN,aAAa;4BACf;4BACA,OAAO;gCACL,OAAO;oCACL;wCAAE,MAAM;oCAAO;oCACf;wCACE,MAAM;wCACN,sBAAsB;oCACxB;iCACD;gCACD,aAAa;4BACf;4BACA,QAAQ;gCACN,MAAM;oCAAC;oCAAU;iCAAO;gCACxB,YAAY;oCACV,IAAI;wCAAE,MAAM;oCAAS;oCACrB,QAAQ;wCAAE,MAAM;oCAAS;gCAC3B;gCACA,UAAU;oCAAC;oCAAM;iCAAS;gCAC1B,sBAAsB;gCACtB,aAAa;4BACf;wBACF;wBACA,UAAU;4BAAC;4BAAW;4BAAQ;4BAAS;yBAAS;wBAChD,sBAAsB;oBACxB;oBACA,aAAa;gBACf;gBACA,oCAAoC;oBAClC,MAAM;oBACN,aACE;gBACJ;gBACA,KAAK;oBACH,MAAM;oBACN,aACE;gBACJ;gBACA,OAAO;oBACL,MAAM;wBAAC;wBAAU;qBAAO;oBACxB,aAAa;gBACf;YACF;YACA,UAAU;gBACR;gBACA;gBACA;gBACA;aACD;YACD,sBAAsB;QACxB;IACF;AACF;AAEO,MAAMC,gCAAgC,CAC3CC,iBACAC,KACAC;IAEA,IAAID,KACF,OAAO,CAAC;;;;;IAKR,EAAEC,kBAAkB;;;EAGtB,EAAEF,gBAAgB;;;;;;AAMpB,EAAEC,IAAI;;AAEN,CAAC;IAGC,OAAO,CAAC;;;;IAIN,EAAEC,kBAAkB;;;EAGtB,EAAEF,gBAAgB;;AAEpB,CAAC;AACD;AAEO,MAAMG,uBAAuB,CAClCb;IAEA,IAAIA,QACF,OAAO,IAAIc,eAAe;QACxB,UAAU;QACV,gBAAgB;YAAC;SAAwB;IAC3C;IAGF,OAAO,IAAIA,eAAe;QACxB,UAAU,CAAC;;;;;;uBAMQ,CAAC;QACpB,gBAAgB;YAAC;YAAmB;SAAwB;IAC9D;AACF"}
@@ -0,0 +1,47 @@
1
+ import { PromptTemplate } from "@langchain/core/prompts";
2
+ import { bboxDescription } from "./common.mjs";
3
+ function systemPromptToLocateSection(vlMode) {
4
+ return `
5
+ You goal is to find out one section containing the target element in the screenshot, put it in the \`bbox\` field. If the user describe the target element with some reference elements, you should also find the section containing the reference elements, put it in the \`references_bbox\` field.
6
+
7
+ Usually, it should be approximately an area not more than 300x300px. Changes of the size are allowed if there are many elements to cover.
8
+
9
+ return in this JSON format:
10
+ \`\`\`json
11
+ {
12
+ "bbox": [number, number, number, number],
13
+ "references_bbox"?: [
14
+ [number, number, number, number],
15
+ [number, number, number, number],
16
+ ...
17
+ ],
18
+ "error"?: string
19
+ }
20
+ \`\`\`
21
+
22
+ In which, all the numbers in the \`bbox\` and \`references_bbox\` represent ${bboxDescription(vlMode)}.
23
+
24
+ For example, if the user describe the target element as "the delete button on the second row with title 'Peter'", you should put the bounding box of the delete button in the \`bbox\` field, and the bounding box of the second row in the \`references_bbox\` field.
25
+
26
+ the return value should be like this:
27
+ \`\`\`json
28
+ {
29
+ "bbox": [100, 100, 200, 200],
30
+ "references_bbox": [[100, 100, 200, 200]]
31
+ }
32
+ \`\`\`
33
+ `;
34
+ }
35
+ const sectionLocatorInstruction = new PromptTemplate({
36
+ template: `Here is the target element user interested in:
37
+ <targetDescription>
38
+ {sectionDescription}
39
+ </targetDescription>
40
+ `,
41
+ inputVariables: [
42
+ "sectionDescription"
43
+ ]
44
+ });
45
+ export { sectionLocatorInstruction, systemPromptToLocateSection };
46
+
47
+ //# sourceMappingURL=llm-section-locator.mjs.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"ai-model/prompt/llm-section-locator.mjs","sources":["webpack://@midscene/core/./src/ai-model/prompt/llm-section-locator.ts"],"sourcesContent":["import { PromptTemplate } from '@langchain/core/prompts';\nimport type { vlLocateMode } from '@midscene/shared/env';\nimport { bboxDescription } from './common';\n\nexport function systemPromptToLocateSection(\n vlMode: ReturnType<typeof vlLocateMode>,\n) {\n return `\nYou goal is to find out one section containing the target element in the screenshot, put it in the \\`bbox\\` field. If the user describe the target element with some reference elements, you should also find the section containing the reference elements, put it in the \\`references_bbox\\` field.\n\nUsually, it should be approximately an area not more than 300x300px. Changes of the size are allowed if there are many elements to cover.\n\nreturn in this JSON format:\n\\`\\`\\`json\n{\n \"bbox\": [number, number, number, number],\n \"references_bbox\"?: [\n [number, number, number, number],\n [number, number, number, number],\n ...\n ],\n \"error\"?: string\n}\n\\`\\`\\`\n\nIn which, all the numbers in the \\`bbox\\` and \\`references_bbox\\` represent ${bboxDescription(vlMode)}.\n\nFor example, if the user describe the target element as \"the delete button on the second row with title 'Peter'\", you should put the bounding box of the delete button in the \\`bbox\\` field, and the bounding box of the second row in the \\`references_bbox\\` field.\n\nthe return value should be like this:\n\\`\\`\\`json\n{\n \"bbox\": [100, 100, 200, 200],\n \"references_bbox\": [[100, 100, 200, 200]]\n}\n\\`\\`\\`\n`;\n}\n\nexport const sectionLocatorInstruction = new PromptTemplate({\n template: `Here is the target element user interested in:\n<targetDescription>\n{sectionDescription}\n</targetDescription>\n `,\n inputVariables: ['sectionDescription'],\n});\n"],"names":["systemPromptToLocateSection","vlMode","bboxDescription","sectionLocatorInstruction","PromptTemplate"],"mappings":";;AAIO,SAASA,4BACdC,MAAuC;IAEvC,OAAO,CAAC;;;;;;;;;;;;;;;;;;4EAkBkE,EAAEC,gBAAgBD,QAAQ;;;;;;;;;;;AAWtG,CAAC;AACD;AAEO,MAAME,4BAA4B,IAAIC,eAAe;IAC1D,UAAU,CAAC;;;;EAIX,CAAC;IACD,gBAAgB;QAAC;KAAqB;AACxC"}
@@ -0,0 +1,117 @@
1
+ import { PLAYWRIGHT_EXAMPLE_CODE } from "@midscene/shared/constants";
2
+ import { AIActionType, callAi } from "../index.mjs";
3
+ import { createEventCounts, createMessageContent, extractInputDescriptions, filterEventsByType, getScreenshotsForLLM, prepareEventSummary, processEventsForLLM, validateEvents } from "./yaml-generator.mjs";
4
+ const generatePlaywrightTest = async (events, options = {})=>{
5
+ validateEvents(events);
6
+ const summary = prepareEventSummary(events, {
7
+ testName: options.testName,
8
+ maxScreenshots: options.maxScreenshots || 3
9
+ });
10
+ const playwrightSummary = {
11
+ ...summary,
12
+ waitForNetworkIdle: false !== options.waitForNetworkIdle,
13
+ waitForNetworkIdleTimeout: options.waitForNetworkIdleTimeout || 2000,
14
+ viewportSize: options.viewportSize || {
15
+ width: 1280,
16
+ height: 800
17
+ }
18
+ };
19
+ const screenshots = getScreenshotsForLLM(events, options.maxScreenshots || 3);
20
+ const promptText = `Generate a Playwright test using @midscene/web/playwright that reproduces this recorded browser session. The test should be based on the following events and follow the structure of the example provided. Make the test descriptive with appropriate assertions and validations.
21
+
22
+ Event Summary:
23
+ ${JSON.stringify(playwrightSummary, null, 2)}
24
+
25
+ Generated code should:
26
+ 1. Import required dependencies
27
+ 2. Set up the test with proper configuration
28
+ 3. Include a beforeEach hook to navigate to the starting URL
29
+ 4. Implement a test that uses Midscene AI methods (aiTap, aiInput, aiAssert, etc.)
30
+ 5. Include appropriate assertions and validations
31
+ 6. Follow best practices for Playwright tests
32
+ 7. Be ready to execute without further modification
33
+
34
+ Respond ONLY with the complete Playwright test code, no explanations.`;
35
+ const messageContent = createMessageContent(promptText, screenshots, false !== options.includeScreenshots);
36
+ const systemPrompt = `You are an expert test automation engineer specializing in Playwright and Midscene.
37
+ Your task is to generate a complete, executable Playwright test using @midscene/web/playwright that reproduces a recorded browser session.
38
+
39
+ ${PLAYWRIGHT_EXAMPLE_CODE}`;
40
+ const prompt = [
41
+ {
42
+ role: 'system',
43
+ content: systemPrompt
44
+ },
45
+ {
46
+ role: 'user',
47
+ content: messageContent
48
+ }
49
+ ];
50
+ const response = await callAi(prompt, AIActionType.EXTRACT_DATA);
51
+ if ((null == response ? void 0 : response.content) && 'string' == typeof response.content) return response.content;
52
+ throw new Error('Failed to generate Playwright test code');
53
+ };
54
+ const generatePlaywrightTestStream = async (events, options = {})=>{
55
+ validateEvents(events);
56
+ const summary = prepareEventSummary(events, {
57
+ testName: options.testName,
58
+ maxScreenshots: options.maxScreenshots || 3
59
+ });
60
+ const playwrightSummary = {
61
+ ...summary,
62
+ waitForNetworkIdle: false !== options.waitForNetworkIdle,
63
+ waitForNetworkIdleTimeout: options.waitForNetworkIdleTimeout || 2000,
64
+ viewportSize: options.viewportSize || {
65
+ width: 1280,
66
+ height: 800
67
+ }
68
+ };
69
+ const screenshots = getScreenshotsForLLM(events, options.maxScreenshots || 3);
70
+ const promptText = `Generate a Playwright test using @midscene/web/playwright that reproduces this recorded browser session. The test should be based on the following events and follow the structure of the example provided. Make the test descriptive with appropriate assertions and validations.
71
+
72
+ Event Summary:
73
+ ${JSON.stringify(playwrightSummary, null, 2)}
74
+
75
+ Generated code should:
76
+ 1. Import required dependencies
77
+ 2. Set up the test with proper configuration
78
+ 3. Include a beforeEach hook to navigate to the starting URL
79
+ 4. Implement a test that uses Midscene AI methods (aiTap, aiInput, aiAssert, etc.)
80
+ 5. Include appropriate assertions and validations
81
+ 6. Follow best practices for Playwright tests
82
+ 7. Be ready to execute without further modification
83
+ 8. can't wrap this test code in markdown code block
84
+
85
+ Respond ONLY with the complete Playwright test code, no explanations.`;
86
+ const messageContent = createMessageContent(promptText, screenshots, false !== options.includeScreenshots);
87
+ const systemPrompt = `You are an expert test automation engineer specializing in Playwright and Midscene.
88
+ Your task is to generate a complete, executable Playwright test using @midscene/web/playwright that reproduces a recorded browser session.
89
+
90
+ ${PLAYWRIGHT_EXAMPLE_CODE}`;
91
+ const prompt = [
92
+ {
93
+ role: 'system',
94
+ content: systemPrompt
95
+ },
96
+ {
97
+ role: 'user',
98
+ content: messageContent
99
+ }
100
+ ];
101
+ if (options.stream && options.onChunk) return await callAi(prompt, AIActionType.EXTRACT_DATA, void 0, {
102
+ stream: true,
103
+ onChunk: options.onChunk
104
+ });
105
+ {
106
+ const response = await callAi(prompt, AIActionType.EXTRACT_DATA);
107
+ if ((null == response ? void 0 : response.content) && 'string' == typeof response.content) return {
108
+ content: response.content,
109
+ usage: response.usage,
110
+ isStreamed: false
111
+ };
112
+ throw new Error('Failed to generate Playwright test code');
113
+ }
114
+ };
115
+ export { createEventCounts, createMessageContent, extractInputDescriptions, filterEventsByType, generatePlaywrightTest, generatePlaywrightTestStream, getScreenshotsForLLM, prepareEventSummary, processEventsForLLM, validateEvents };
116
+
117
+ //# sourceMappingURL=playwright-generator.mjs.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"ai-model/prompt/playwright-generator.mjs","sources":["webpack://@midscene/core/./src/ai-model/prompt/playwright-generator.ts"],"sourcesContent":["import type {\n StreamingAIResponse,\n StreamingCodeGenerationOptions,\n} from '@/types';\nimport { PLAYWRIGHT_EXAMPLE_CODE } from '@midscene/shared/constants';\nimport type { ChatCompletionMessageParam } from 'openai/resources/index';\nimport { AIActionType, callAi } from '../index';\n\n// Import shared utilities and types from yaml-generator\nimport {\n type ChromeRecordedEvent,\n type EventCounts,\n type EventSummary,\n type InputDescription,\n type ProcessedEvent,\n createEventCounts,\n createMessageContent,\n extractInputDescriptions,\n filterEventsByType,\n getScreenshotsForLLM,\n prepareEventSummary,\n processEventsForLLM,\n validateEvents,\n} from './yaml-generator';\n\n// Playwright-specific interfaces\nexport interface PlaywrightGenerationOptions {\n testName?: string;\n includeScreenshots?: boolean;\n includeTimestamps?: boolean;\n maxScreenshots?: number;\n description?: string;\n viewportSize?: { width: number; height: number };\n waitForNetworkIdle?: boolean;\n waitForNetworkIdleTimeout?: number;\n}\n\n// Re-export shared types for backward compatibility\nexport type {\n ChromeRecordedEvent,\n EventCounts,\n InputDescription,\n ProcessedEvent,\n EventSummary,\n};\n\n// Re-export shared utilities for backward compatibility\nexport {\n getScreenshotsForLLM,\n filterEventsByType,\n createEventCounts,\n extractInputDescriptions,\n processEventsForLLM,\n prepareEventSummary,\n createMessageContent,\n validateEvents,\n};\n\n/**\n * Generates Playwright test code from recorded events\n */\nexport const generatePlaywrightTest = async (\n events: ChromeRecordedEvent[],\n options: PlaywrightGenerationOptions = {},\n): Promise<string> => {\n // Validate input\n validateEvents(events);\n\n // Prepare event summary using shared utilities\n const summary = prepareEventSummary(events, {\n testName: options.testName,\n maxScreenshots: options.maxScreenshots || 3,\n });\n\n // Add Playwright-specific options to summary\n const playwrightSummary = {\n ...summary,\n waitForNetworkIdle: options.waitForNetworkIdle !== false,\n waitForNetworkIdleTimeout: options.waitForNetworkIdleTimeout || 2000,\n viewportSize: options.viewportSize || { width: 1280, height: 800 },\n };\n\n // Get screenshots for visual context\n const screenshots = getScreenshotsForLLM(events, options.maxScreenshots || 3);\n\n // Create prompt text\n const promptText = `Generate a Playwright test using @midscene/web/playwright that reproduces this recorded browser session. The test should be based on the following events and follow the structure of the example provided. Make the test descriptive with appropriate assertions and validations.\n\nEvent Summary:\n${JSON.stringify(playwrightSummary, null, 2)}\n\nGenerated code should:\n1. Import required dependencies\n2. Set up the test with proper configuration\n3. Include a beforeEach hook to navigate to the starting URL\n4. Implement a test that uses Midscene AI methods (aiTap, aiInput, aiAssert, etc.)\n5. Include appropriate assertions and validations\n6. Follow best practices for Playwright tests\n7. Be ready to execute without further modification\n\nRespond ONLY with the complete Playwright test code, no explanations.`;\n\n // Create message content with screenshots\n const messageContent = createMessageContent(\n promptText,\n screenshots,\n options.includeScreenshots !== false,\n );\n\n // Create system prompt\n const systemPrompt = `You are an expert test automation engineer specializing in Playwright and Midscene. \nYour task is to generate a complete, executable Playwright test using @midscene/web/playwright that reproduces a recorded browser session.\n\n${PLAYWRIGHT_EXAMPLE_CODE}`;\n\n // Use LLM to generate the Playwright test code\n const prompt: ChatCompletionMessageParam[] = [\n {\n role: 'system',\n content: systemPrompt,\n },\n {\n role: 'user',\n content: messageContent,\n },\n ];\n\n const response = await callAi(prompt, AIActionType.EXTRACT_DATA);\n\n if (response?.content && typeof response.content === 'string') {\n return response.content;\n }\n\n throw new Error('Failed to generate Playwright test code');\n};\n\n/**\n * Generates Playwright test code from recorded events with streaming support\n */\nexport const generatePlaywrightTestStream = async (\n events: ChromeRecordedEvent[],\n options: PlaywrightGenerationOptions & StreamingCodeGenerationOptions = {},\n): Promise<StreamingAIResponse> => {\n // Validate input\n validateEvents(events);\n\n // Prepare event summary using shared utilities\n const summary = prepareEventSummary(events, {\n testName: options.testName,\n maxScreenshots: options.maxScreenshots || 3,\n });\n\n // Add Playwright-specific options to summary\n const playwrightSummary = {\n ...summary,\n waitForNetworkIdle: options.waitForNetworkIdle !== false,\n waitForNetworkIdleTimeout: options.waitForNetworkIdleTimeout || 2000,\n viewportSize: options.viewportSize || { width: 1280, height: 800 },\n };\n\n // Get screenshots for visual context\n const screenshots = getScreenshotsForLLM(events, options.maxScreenshots || 3);\n\n // Create prompt text\n const promptText = `Generate a Playwright test using @midscene/web/playwright that reproduces this recorded browser session. The test should be based on the following events and follow the structure of the example provided. Make the test descriptive with appropriate assertions and validations.\n\nEvent Summary:\n${JSON.stringify(playwrightSummary, null, 2)}\n\nGenerated code should:\n1. Import required dependencies\n2. Set up the test with proper configuration\n3. Include a beforeEach hook to navigate to the starting URL\n4. Implement a test that uses Midscene AI methods (aiTap, aiInput, aiAssert, etc.)\n5. Include appropriate assertions and validations\n6. Follow best practices for Playwright tests\n7. Be ready to execute without further modification\n8. can't wrap this test code in markdown code block\n\nRespond ONLY with the complete Playwright test code, no explanations.`;\n\n // Create message content with screenshots\n const messageContent = createMessageContent(\n promptText,\n screenshots,\n options.includeScreenshots !== false,\n );\n\n // Create system prompt\n const systemPrompt = `You are an expert test automation engineer specializing in Playwright and Midscene. \nYour task is to generate a complete, executable Playwright test using @midscene/web/playwright that reproduces a recorded browser session.\n\n${PLAYWRIGHT_EXAMPLE_CODE}`;\n\n // Use LLM to generate the Playwright test code with streaming\n const prompt: ChatCompletionMessageParam[] = [\n {\n role: 'system',\n content: systemPrompt,\n },\n {\n role: 'user',\n content: messageContent,\n },\n ];\n\n if (options.stream && options.onChunk) {\n // Use streaming\n return await callAi(prompt, AIActionType.EXTRACT_DATA, undefined, {\n stream: true,\n onChunk: options.onChunk,\n });\n } else {\n // Fallback to non-streaming\n const response = await callAi(prompt, AIActionType.EXTRACT_DATA);\n\n if (response?.content && typeof response.content === 'string') {\n return {\n content: response.content,\n usage: response.usage,\n isStreamed: false,\n };\n }\n\n throw new Error('Failed to generate Playwright test code');\n }\n};\n"],"names":["generatePlaywrightTest","events","options","validateEvents","summary","prepareEventSummary","playwrightSummary","screenshots","getScreenshotsForLLM","promptText","JSON","messageContent","createMessageContent","systemPrompt","PLAYWRIGHT_EXAMPLE_CODE","prompt","response","callAi","AIActionType","Error","generatePlaywrightTestStream","undefined"],"mappings":";;;AA6DO,MAAMA,yBAAyB,OACpCC,QACAC,UAAuC,CAAC,CAAC;IAGzCC,eAAeF;IAGf,MAAMG,UAAUC,oBAAoBJ,QAAQ;QAC1C,UAAUC,QAAQ,QAAQ;QAC1B,gBAAgBA,QAAQ,cAAc,IAAI;IAC5C;IAGA,MAAMI,oBAAoB;QACxB,GAAGF,OAAO;QACV,oBAAoBF,AAA+B,UAA/BA,QAAQ,kBAAkB;QAC9C,2BAA2BA,QAAQ,yBAAyB,IAAI;QAChE,cAAcA,QAAQ,YAAY,IAAI;YAAE,OAAO;YAAM,QAAQ;QAAI;IACnE;IAGA,MAAMK,cAAcC,qBAAqBP,QAAQC,QAAQ,cAAc,IAAI;IAG3E,MAAMO,aAAa,CAAC;;;AAGtB,EAAEC,KAAK,SAAS,CAACJ,mBAAmB,MAAM,GAAG;;;;;;;;;;;qEAWwB,CAAC;IAGpE,MAAMK,iBAAiBC,qBACrBH,YACAF,aACAL,AAA+B,UAA/BA,QAAQ,kBAAkB;IAI5B,MAAMW,eAAe,CAAC;;;AAGxB,EAAEC,yBAAyB;IAGzB,MAAMC,SAAuC;QAC3C;YACE,MAAM;YACN,SAASF;QACX;QACA;YACE,MAAM;YACN,SAASF;QACX;KACD;IAED,MAAMK,WAAW,MAAMC,OAAOF,QAAQG,aAAa,YAAY;IAE/D,IAAIF,AAAAA,CAAAA,QAAAA,WAAAA,KAAAA,IAAAA,SAAU,OAAO,AAAD,KAAK,AAA4B,YAA5B,OAAOA,SAAS,OAAO,EAC9C,OAAOA,SAAS,OAAO;IAGzB,MAAM,IAAIG,MAAM;AAClB;AAKO,MAAMC,+BAA+B,OAC1CnB,QACAC,UAAwE,CAAC,CAAC;IAG1EC,eAAeF;IAGf,MAAMG,UAAUC,oBAAoBJ,QAAQ;QAC1C,UAAUC,QAAQ,QAAQ;QAC1B,gBAAgBA,QAAQ,cAAc,IAAI;IAC5C;IAGA,MAAMI,oBAAoB;QACxB,GAAGF,OAAO;QACV,oBAAoBF,AAA+B,UAA/BA,QAAQ,kBAAkB;QAC9C,2BAA2BA,QAAQ,yBAAyB,IAAI;QAChE,cAAcA,QAAQ,YAAY,IAAI;YAAE,OAAO;YAAM,QAAQ;QAAI;IACnE;IAGA,MAAMK,cAAcC,qBAAqBP,QAAQC,QAAQ,cAAc,IAAI;IAG3E,MAAMO,aAAa,CAAC;;;AAGtB,EAAEC,KAAK,SAAS,CAACJ,mBAAmB,MAAM,GAAG;;;;;;;;;;;;qEAYwB,CAAC;IAGpE,MAAMK,iBAAiBC,qBACrBH,YACAF,aACAL,AAA+B,UAA/BA,QAAQ,kBAAkB;IAI5B,MAAMW,eAAe,CAAC;;;AAGxB,EAAEC,yBAAyB;IAGzB,MAAMC,SAAuC;QAC3C;YACE,MAAM;YACN,SAASF;QACX;QACA;YACE,MAAM;YACN,SAASF;QACX;KACD;IAED,IAAIT,QAAQ,MAAM,IAAIA,QAAQ,OAAO,EAEnC,OAAO,MAAMe,OAAOF,QAAQG,aAAa,YAAY,EAAEG,QAAW;QAChE,QAAQ;QACR,SAASnB,QAAQ,OAAO;IAC1B;IACK;QAEL,MAAMc,WAAW,MAAMC,OAAOF,QAAQG,aAAa,YAAY;QAE/D,IAAIF,AAAAA,CAAAA,QAAAA,WAAAA,KAAAA,IAAAA,SAAU,OAAO,AAAD,KAAK,AAA4B,YAA5B,OAAOA,SAAS,OAAO,EAC9C,OAAO;YACL,SAASA,SAAS,OAAO;YACzB,OAAOA,SAAS,KAAK;YACrB,YAAY;QACd;QAGF,MAAM,IAAIG,MAAM;IAClB;AACF"}
@@ -0,0 +1,34 @@
1
+ import { getPreferredLanguage } from "@midscene/shared/env";
2
+ function systemPromptToLocateElementPosition() {
3
+ const preferredLanguage = getPreferredLanguage();
4
+ return `
5
+ You are a GUI agent. You are given a task and your action history, with screenshots. You need to perform the next action to complete the task.
6
+
7
+ ## Output Format
8
+ \`\`\`
9
+ Thought: ...
10
+ Action: ...
11
+ \`\`\`
12
+
13
+ ## Action Space
14
+ click(start_box='[x1, y1, x2, y2]')
15
+ left_double(start_box='[x1, y1, x2, y2]')
16
+ right_single(start_box='[x1, y1, x2, y2]')
17
+ drag(start_box='[x1, y1, x2, y2]', end_box='[x3, y3, x4, y4]')
18
+ hotkey(key='')
19
+ type(content='') #If you want to submit your input, use "\\n" at the end of \`content\`.
20
+ scroll(start_box='[x1, y1, x2, y2]', direction='down or up or right or left')
21
+ wait() #Sleep for 5s and take a screenshot to check for any changes.
22
+ finished()
23
+ call_user() # Submit the task and call the user when the task is unsolvable, or when you need the user's help.
24
+
25
+ ## Note
26
+ - Use ${preferredLanguage} in \`Thought\` part.
27
+ - Write a small plan and finally summarize your next action (with its target element) in one sentence in \`Thought\` part.
28
+
29
+ ## User Instruction
30
+ `;
31
+ }
32
+ export { systemPromptToLocateElementPosition };
33
+
34
+ //# sourceMappingURL=ui-tars-locator.mjs.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"ai-model/prompt/ui-tars-locator.mjs","sources":["webpack://@midscene/core/./src/ai-model/prompt/ui-tars-locator.ts"],"sourcesContent":["import { getPreferredLanguage } from '@midscene/shared/env';\n\n// claude 3.5 sonnet computer The ability to understand the content of the image is better, Does not provide element snapshot effect\nexport function systemPromptToLocateElementPosition() {\n const preferredLanguage = getPreferredLanguage();\n\n return `\nYou are a GUI agent. You are given a task and your action history, with screenshots. You need to perform the next action to complete the task. \n\n## Output Format\n\\`\\`\\`\nThought: ...\nAction: ...\n\\`\\`\\`\n\n## Action Space\nclick(start_box='[x1, y1, x2, y2]')\nleft_double(start_box='[x1, y1, x2, y2]')\nright_single(start_box='[x1, y1, x2, y2]')\ndrag(start_box='[x1, y1, x2, y2]', end_box='[x3, y3, x4, y4]')\nhotkey(key='')\ntype(content='') #If you want to submit your input, use \"\\\\n\" at the end of \\`content\\`.\nscroll(start_box='[x1, y1, x2, y2]', direction='down or up or right or left')\nwait() #Sleep for 5s and take a screenshot to check for any changes.\nfinished()\ncall_user() # Submit the task and call the user when the task is unsolvable, or when you need the user's help.\n\n## Note\n- Use ${preferredLanguage} in \\`Thought\\` part.\n- Write a small plan and finally summarize your next action (with its target element) in one sentence in \\`Thought\\` part.\n\n## User Instruction\n `;\n}\n"],"names":["systemPromptToLocateElementPosition","preferredLanguage","getPreferredLanguage"],"mappings":";AAGO,SAASA;IACd,MAAMC,oBAAoBC;IAE1B,OAAO,CAAC;;;;;;;;;;;;;;;;;;;;;;MAsBJ,EAAED,kBAAkB;;;;IAItB,CAAC;AACL"}