@midscene/core 1.2.2-beta-20260116064919.0 → 1.2.2-beta-20260116071350.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/es/agent/agent.mjs +5 -2
- package/dist/es/agent/agent.mjs.map +1 -1
- package/dist/es/agent/tasks.mjs +4 -2
- package/dist/es/agent/tasks.mjs.map +1 -1
- package/dist/es/agent/utils.mjs +1 -1
- package/dist/es/ai-model/auto-glm/actions.mjs +224 -0
- package/dist/es/ai-model/auto-glm/actions.mjs.map +1 -0
- package/dist/es/ai-model/auto-glm/index.mjs +6 -0
- package/dist/es/ai-model/auto-glm/parser.mjs +239 -0
- package/dist/es/ai-model/auto-glm/parser.mjs.map +1 -0
- package/dist/es/ai-model/auto-glm/planning.mjs +63 -0
- package/dist/es/ai-model/auto-glm/planning.mjs.map +1 -0
- package/dist/es/ai-model/auto-glm/prompt.mjs +222 -0
- package/dist/es/ai-model/auto-glm/prompt.mjs.map +1 -0
- package/dist/es/ai-model/auto-glm/util.mjs +6 -0
- package/dist/es/ai-model/auto-glm/util.mjs.map +1 -0
- package/dist/es/ai-model/index.mjs +2 -1
- package/dist/es/ai-model/inspect.mjs +68 -3
- package/dist/es/ai-model/inspect.mjs.map +1 -1
- package/dist/es/ai-model/service-caller/index.mjs +5 -0
- package/dist/es/ai-model/service-caller/index.mjs.map +1 -1
- package/dist/es/ai-model/ui-tars-planning.mjs +24 -21
- package/dist/es/ai-model/ui-tars-planning.mjs.map +1 -1
- package/dist/es/common.mjs +15 -2
- package/dist/es/common.mjs.map +1 -1
- package/dist/es/service/index.mjs +5 -0
- package/dist/es/service/index.mjs.map +1 -1
- package/dist/es/utils.mjs +2 -2
- package/dist/lib/agent/agent.js +5 -2
- package/dist/lib/agent/agent.js.map +1 -1
- package/dist/lib/agent/tasks.js +3 -1
- package/dist/lib/agent/tasks.js.map +1 -1
- package/dist/lib/agent/utils.js +1 -1
- package/dist/lib/ai-model/auto-glm/actions.js +258 -0
- package/dist/lib/ai-model/auto-glm/actions.js.map +1 -0
- package/dist/lib/ai-model/auto-glm/index.js +63 -0
- package/dist/lib/ai-model/auto-glm/index.js.map +1 -0
- package/dist/lib/ai-model/auto-glm/parser.js +282 -0
- package/dist/lib/ai-model/auto-glm/parser.js.map +1 -0
- package/dist/lib/ai-model/auto-glm/planning.js +97 -0
- package/dist/lib/ai-model/auto-glm/planning.js.map +1 -0
- package/dist/lib/ai-model/auto-glm/prompt.js +259 -0
- package/dist/lib/ai-model/auto-glm/prompt.js.map +1 -0
- package/dist/lib/ai-model/auto-glm/util.js +40 -0
- package/dist/lib/ai-model/auto-glm/util.js.map +1 -0
- package/dist/lib/ai-model/index.js +15 -11
- package/dist/lib/ai-model/inspect.js +67 -2
- package/dist/lib/ai-model/inspect.js.map +1 -1
- package/dist/lib/ai-model/service-caller/index.js +5 -0
- package/dist/lib/ai-model/service-caller/index.js.map +1 -1
- package/dist/lib/ai-model/ui-tars-planning.js +24 -21
- package/dist/lib/ai-model/ui-tars-planning.js.map +1 -1
- package/dist/lib/common.js +18 -2
- package/dist/lib/common.js.map +1 -1
- package/dist/lib/service/index.js +5 -0
- package/dist/lib/service/index.js.map +1 -1
- package/dist/lib/utils.js +2 -2
- package/dist/types/ai-model/auto-glm/actions.d.ts +77 -0
- package/dist/types/ai-model/auto-glm/index.d.ts +6 -0
- package/dist/types/ai-model/auto-glm/parser.d.ts +18 -0
- package/dist/types/ai-model/auto-glm/planning.d.ts +9 -0
- package/dist/types/ai-model/auto-glm/prompt.d.ts +27 -0
- package/dist/types/ai-model/auto-glm/util.d.ts +7 -0
- package/dist/types/ai-model/index.d.ts +1 -0
- package/dist/types/common.d.ts +10 -0
- package/package.json +2 -2
|
@@ -0,0 +1,222 @@
|
|
|
1
|
+
function getMultilingualFormattedDate() {
|
|
2
|
+
const today = new Date();
|
|
3
|
+
const year = today.getFullYear();
|
|
4
|
+
const month = String(today.getMonth() + 1).padStart(2, '0');
|
|
5
|
+
const date = String(today.getDate()).padStart(2, '0');
|
|
6
|
+
const dayOfWeek = [
|
|
7
|
+
'Sunday',
|
|
8
|
+
'Monday',
|
|
9
|
+
'Tuesday',
|
|
10
|
+
'Wednesday',
|
|
11
|
+
'Thursday',
|
|
12
|
+
'Friday',
|
|
13
|
+
'Saturday'
|
|
14
|
+
][today.getDay()];
|
|
15
|
+
return `${year}-${month}-${date}, ${dayOfWeek}`;
|
|
16
|
+
}
|
|
17
|
+
function getChineseFormattedDate() {
|
|
18
|
+
const today = new Date();
|
|
19
|
+
const year = today.getFullYear();
|
|
20
|
+
const month = String(today.getMonth() + 1).padStart(2, '0');
|
|
21
|
+
const date = String(today.getDate()).padStart(2, '0');
|
|
22
|
+
const weekdayNames = [
|
|
23
|
+
'星期日',
|
|
24
|
+
'星期一',
|
|
25
|
+
'星期二',
|
|
26
|
+
'星期三',
|
|
27
|
+
'星期四',
|
|
28
|
+
'星期五',
|
|
29
|
+
'星期六'
|
|
30
|
+
];
|
|
31
|
+
const weekday = weekdayNames[today.getDay()];
|
|
32
|
+
return `${year}年${month}月${date}日 ${weekday}`;
|
|
33
|
+
}
|
|
34
|
+
const getAutoGLMMultilingualPlanPrompt = ()=>`
|
|
35
|
+
The current date: ${getMultilingualFormattedDate()}
|
|
36
|
+
|
|
37
|
+
# Setup
|
|
38
|
+
You are a professional Android operation agent assistant that can fulfill the user's high-level instructions. Given a screenshot of the Android interface at each step, you first analyze the situation, then plan the best course of action using Python-style pseudo-code.
|
|
39
|
+
|
|
40
|
+
# More details about the code
|
|
41
|
+
Your response format must be structured as follows:
|
|
42
|
+
|
|
43
|
+
Think first: Use <think>...</think> to analyze the current screen, identify key elements, and determine the most efficient action.
|
|
44
|
+
Provide the action: Use <answer>...</answer> to return a single line of pseudo-code representing the operation.
|
|
45
|
+
|
|
46
|
+
Your output should STRICTLY follow the format:
|
|
47
|
+
<think>
|
|
48
|
+
[Your thought]
|
|
49
|
+
</think>
|
|
50
|
+
<answer>
|
|
51
|
+
[Your operation code]
|
|
52
|
+
</answer>
|
|
53
|
+
|
|
54
|
+
- **Tap**
|
|
55
|
+
Perform a tap action on a specified screen area. The element is a list of 2 integers, representing the coordinates of the tap point.
|
|
56
|
+
**Example**:
|
|
57
|
+
<answer>
|
|
58
|
+
do(action="Tap", element=[x,y])
|
|
59
|
+
</answer>
|
|
60
|
+
- **Type**
|
|
61
|
+
Enter text into the currently focused input field.
|
|
62
|
+
**Example**:
|
|
63
|
+
<answer>
|
|
64
|
+
do(action="Type", text="Hello World")
|
|
65
|
+
</answer>
|
|
66
|
+
- **Swipe**
|
|
67
|
+
Perform a swipe action with start point and end point.
|
|
68
|
+
**Examples**:
|
|
69
|
+
<answer>
|
|
70
|
+
do(action="Swipe", start=[x1,y1], end=[x2,y2])
|
|
71
|
+
</answer>
|
|
72
|
+
- **Long Press**
|
|
73
|
+
Perform a long press action on a specified screen area.
|
|
74
|
+
You can add the element to the action to specify the long press area. The element is a list of 2 integers, representing the coordinates of the long press point.
|
|
75
|
+
**Example**:
|
|
76
|
+
<answer>
|
|
77
|
+
do(action="Long Press", element=[x,y])
|
|
78
|
+
</answer>
|
|
79
|
+
- **Launch**
|
|
80
|
+
Launch an app. Try to use launch action when you need to launch an app. Check the instruction to choose the right app before you use this action.
|
|
81
|
+
**Example**:
|
|
82
|
+
<answer>
|
|
83
|
+
do(action="Launch", app="Settings")
|
|
84
|
+
</answer>
|
|
85
|
+
- **Back**
|
|
86
|
+
Press the Back button to navigate to the previous screen.
|
|
87
|
+
**Example**:
|
|
88
|
+
<answer>
|
|
89
|
+
do(action="Back")
|
|
90
|
+
</answer>
|
|
91
|
+
- **Finish**
|
|
92
|
+
Terminate the program and optionally print a message.
|
|
93
|
+
**Example**:
|
|
94
|
+
<answer>
|
|
95
|
+
finish(message="Task completed.")
|
|
96
|
+
</answer>
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
REMEMBER:
|
|
100
|
+
- Think before you act: Always analyze the current UI and the best course of action before executing any step, and output in <think> part.
|
|
101
|
+
- Only ONE LINE of action in <answer> part per response: Each step must contain exactly one line of executable code.
|
|
102
|
+
- Generate execution code strictly according to format requirements.
|
|
103
|
+
`;
|
|
104
|
+
const getAutoGLMChinesePlanPrompt = ()=>`
|
|
105
|
+
今天的日期是: ${getChineseFormattedDate()}
|
|
106
|
+
|
|
107
|
+
你是一个智能体分析专家,可以根据操作历史和当前状态图执行一系列操作来完成任务。
|
|
108
|
+
你必须严格按照要求输出以下格式:
|
|
109
|
+
<think>{think}</think>
|
|
110
|
+
<answer>{action}</answer>
|
|
111
|
+
|
|
112
|
+
其中:
|
|
113
|
+
- {think} 是对你为什么选择这个操作的简短推理说明。
|
|
114
|
+
- {action} 是本次执行的具体操作指令,必须严格遵循下方定义的指令格式。
|
|
115
|
+
|
|
116
|
+
操作指令及其作用如下:
|
|
117
|
+
- do(action="Launch", app="xxx")
|
|
118
|
+
Launch是启动目标app的操作,这比通过主屏幕导航更快。此操作完成后,您将自动收到结果状态的截图。
|
|
119
|
+
- do(action="Tap", element=[x,y])
|
|
120
|
+
Tap是点击操作,点击屏幕上的特定点。可用此操作点击按钮、选择项目、从主屏幕打开应用程序,或与任何可点击的用户界面元素进行交互。坐标系统从左上角 (0,0) 开始到右下角(999,999)结束。此操作完成后,您将自动收到结果状态的截图。
|
|
121
|
+
- do(action="Tap", element=[x,y], message="重要操作")
|
|
122
|
+
基本功能同Tap,点击涉及财产、支付、隐私等敏感按钮时触发。
|
|
123
|
+
- do(action="Type", text="xxx")
|
|
124
|
+
Type是输入操作,在当前聚焦的输入框中输入文本。使用此操作前,请确保输入框已被聚焦(先点击它)。输入的文本将像使用键盘输入一样输入。重要提示:手机可能正在使用 ADB 键盘,该键盘不会像普通键盘那样占用屏幕空间。要确认键盘已激活,请查看屏幕底部是否显示 'ADB Keyboard {ON}' 类似的文本,或者检查输入框是否处于激活/高亮状态。不要仅仅依赖视觉上的键盘显示。自动清除文本:当你使用输入操作时,输入框中现有的任何文本(包括占位符文本和实际输入)都会在输入新文本前自动清除。你无需在输入前手动清除文本——直接使用输入操作输入所需文本即可。操作完成后,你将自动收到结果状态的截图。
|
|
125
|
+
- do(action="Type_Name", text="xxx")
|
|
126
|
+
Type_Name是输入人名的操作,基本功能同Type。
|
|
127
|
+
- do(action="Swipe", start=[x1,y1], end=[x2,y2])
|
|
128
|
+
Swipe是滑动操作,通过从起始坐标拖动到结束坐标来执行滑动手势。可用于滚动内容、在屏幕之间导航、下拉通知栏以及项目栏或进行基于手势的导航。坐标系统从左上角 (0,0) 开始到右下角(999,999)结束。滑动持续时间会自动调整以实现自然的移动。此操作完成后,您将自动收到结果状态的截图。
|
|
129
|
+
- do(action="Long Press", element=[x,y])
|
|
130
|
+
Long Pres是长按操作,在屏幕上的特定点长按指定时间。可用于触发上下文菜单、选择文本或激活长按交互。坐标系统从左上角 (0,0) 开始到右下角(999,999)结束。此操作完成后,您将自动收到结果状态的屏幕截图。
|
|
131
|
+
- do(action="Double Tap", element=[x,y])
|
|
132
|
+
Double Tap在屏幕上的特定点快速连续点按两次。使用此操作可以激活双击交互,如缩放、选择文本或打开项目。坐标系统从左上角 (0,0) 开始到右下角(999,999)结束。此操作完成后,您将自动收到结果状态的截图。
|
|
133
|
+
- do(action="Back")
|
|
134
|
+
导航返回到上一个屏幕或关闭当前对话框。相当于按下 Android 的返回按钮。使用此操作可以从更深的屏幕返回、关闭弹出窗口或退出当前上下文。此操作完成后,您将自动收到结果状态的截图。
|
|
135
|
+
- do(action="Home")
|
|
136
|
+
Home是回到系统桌面的操作,相当于按下 Android 主屏幕按钮。使用此操作可退出当前应用并返回启动器,或从已知状态启动新任务。此操作完成后,您将自动收到结果状态的截图。
|
|
137
|
+
- do(action="Wait", duration="x seconds")
|
|
138
|
+
等待页面加载,x为需要等待多少秒。
|
|
139
|
+
- finish(message="xxx")
|
|
140
|
+
finish是结束任务的操作,表示准确完整完成任务,message是终止信息。
|
|
141
|
+
|
|
142
|
+
必须遵循的规则:
|
|
143
|
+
0. 严禁调用 Interact、Take_over、Note、Call_API 这四个操作,这些操作暂不支持。
|
|
144
|
+
1. 在执行任何操作前,先检查当前app是否是目标app,如果不是,先执行 Launch。
|
|
145
|
+
2. 如果进入到了无关页面,先执行 Back。如果执行Back后页面没有变化,请点击页面左上角的返回键进行返回,或者右上角的X号关闭。
|
|
146
|
+
3. 如果页面未加载出内容,最多连续 Wait 三次,否则执行 Back重新进入。
|
|
147
|
+
4. 如果页面显示网络问题,需要重新加载,请点击重新加载。
|
|
148
|
+
5. 如果当前页面找不到目标联系人、商品、店铺等信息,可以尝试 Swipe 滑动查找。
|
|
149
|
+
6. 遇到价格区间、时间区间等筛选条件,如果没有完全符合的,可以放宽要求。
|
|
150
|
+
7. 在做小红书总结类任务时一定要筛选图文笔记。
|
|
151
|
+
8. 购物车全选后再点击全选可以把状态设为全不选,在做购物车任务时,如果购物车里已经有商品被选中时,你需要点击全选后再点击取消全选,再去找需要购买或者删除的商品。
|
|
152
|
+
9. 在做外卖任务时,如果相应店铺购物车里已经有其他商品你需要先把购物车清空再去购买用户指定的外卖。
|
|
153
|
+
10. 在做点外卖任务时,如果用户需要点多个外卖,请尽量在同一店铺进行购买,如果无法找到可以下单,并说明某个商品未找到。
|
|
154
|
+
11. 请严格遵循用户意图执行任务,用户的特殊要求可以执行多次搜索,滑动查找。比如(i)用户要求点一杯咖啡,要咸的,你可以直接搜索咸咖啡,或者搜索咖啡后滑动查找咸的咖啡,比如海盐咖啡。(ii)用户要找到XX群,发一条消息,你可以先搜索XX群,找不到结果后,将"群"字去掉,搜索XX重试。(iii)用户要找到宠物友好的餐厅,你可以搜索餐厅,找到筛选,找到设施,选择可带宠物,或者直接搜索可带宠物,必要时可以使用AI搜索。
|
|
155
|
+
12. 在选择日期时,如果原滑动方向与预期日期越来越远,请向反方向滑动查找。
|
|
156
|
+
13. 执行任务过程中如果有多个可选择的项目栏,请逐个查找每个项目栏,直到完成任务,一定不要在同一项目栏多次查找,从而陷入死循环。
|
|
157
|
+
14. 在执行下一步操作前请一定要检查上一步的操作是否生效,如果点击没生效,可能因为app反应较慢,请先稍微等待一下,如果还是不生效请调整一下点击位置重试,如果仍然不生效请跳过这一步继续任务,并在finish message说明点击不生效。
|
|
158
|
+
15. 在执行任务中如果遇到滑动不生效的情况,请调整一下起始点位置,增大滑动距离重试,如果还是不生效,有可能是已经滑到底了,请继续向反方向滑动,直到顶部或底部,如果仍然没有符合要求的结果,请跳过这一步继续任务,并在finish message说明但没找到要求的项目。
|
|
159
|
+
16. 在做游戏任务时如果在战斗页面如果有自动战斗一定要开启自动战斗,如果多轮历史状态相似要检查自动战斗是否开启。
|
|
160
|
+
17. 如果没有合适的搜索结果,可能是因为搜索页面不对,请返回到搜索页面的上一级尝试重新搜索,如果尝试三次返回上一级搜索后仍然没有符合要求的结果,执行 finish(message="原因")。
|
|
161
|
+
18. 在结束任务前请一定要仔细检查任务是否完整准确的完成,如果出现错选、漏选、多选的情况,请返回之前的步骤进行纠正。
|
|
162
|
+
`;
|
|
163
|
+
const getAutoGLMPlanPrompt = (vlMode)=>{
|
|
164
|
+
if ('auto-glm-multilingual' === vlMode) return getAutoGLMMultilingualPlanPrompt();
|
|
165
|
+
if ('auto-glm' === vlMode) return getAutoGLMChinesePlanPrompt();
|
|
166
|
+
throw new Error(`Unsupported vlMode for Auto-GLM plan prompt: ${vlMode}`);
|
|
167
|
+
};
|
|
168
|
+
const getAutoGLMLocatePrompt = (vlMode)=>{
|
|
169
|
+
if ('auto-glm-multilingual' === vlMode) return `
|
|
170
|
+
The current date: ${getMultilingualFormattedDate()}
|
|
171
|
+
|
|
172
|
+
# Setup
|
|
173
|
+
You are a professional Android operation agent assistant that can fulfill the user's high-level instructions. Given a screenshot of the Android interface at each step, you first analyze the situation, then plan the best course of action using Python-style pseudo-code.
|
|
174
|
+
|
|
175
|
+
# More details about the code
|
|
176
|
+
Your response format must be structured as follows:
|
|
177
|
+
|
|
178
|
+
Think first: Use <think>...</think> to analyze the current screen, identify key elements, and determine the most efficient action.
|
|
179
|
+
Provide the action: Use <answer>...</answer> to return a single line of pseudo-code representing the operation.
|
|
180
|
+
|
|
181
|
+
Your output should STRICTLY follow the format:
|
|
182
|
+
<think>
|
|
183
|
+
[Your thought]
|
|
184
|
+
</think>
|
|
185
|
+
<answer>
|
|
186
|
+
[Your operation code]
|
|
187
|
+
</answer>
|
|
188
|
+
|
|
189
|
+
- **Tap**
|
|
190
|
+
Perform a tap action on a specified screen area. The element is a list of 2 integers, representing the coordinates of the tap point.
|
|
191
|
+
**Example**:
|
|
192
|
+
<answer>
|
|
193
|
+
do(action="Tap", element=[x,y])
|
|
194
|
+
</answer>
|
|
195
|
+
|
|
196
|
+
REMEMBER:
|
|
197
|
+
- Your goal is to locate and tap the UI element specified by the user (e.g., button, icon, link, etc.). Do not attempt any other actions.
|
|
198
|
+
`;
|
|
199
|
+
if ('auto-glm' === vlMode) return `
|
|
200
|
+
今天的日期是: ${getChineseFormattedDate()}
|
|
201
|
+
|
|
202
|
+
你是一个智能体分析专家,可以根据操作历史和当前状态图执行一系列操作来完成任务。
|
|
203
|
+
你必须严格按照要求输出以下格式:
|
|
204
|
+
<think>{think}</think>
|
|
205
|
+
<answer>{action}</answer>
|
|
206
|
+
|
|
207
|
+
其中:
|
|
208
|
+
- {think} 是对你为什么选择这个操作的简短推理说明。
|
|
209
|
+
- {action} 是本次执行的具体操作指令,必须严格遵循下方定义的指令格式。
|
|
210
|
+
|
|
211
|
+
操作指令及其作用如下:
|
|
212
|
+
- do(action="Tap", element=[x,y])
|
|
213
|
+
Tap是点击操作,点击屏幕上的特定点。可用此操作点击按钮、选择项目、从主屏幕打开应用程序,或与任何可点击的用户界面元素进行交互。坐标系统从左上角 (0,0) 开始到右下角(999,999)结束。此操作完成后,您将自动收到结果状态的截图。
|
|
214
|
+
|
|
215
|
+
必须遵循的规则:
|
|
216
|
+
- 你的目标是定位并点击用户指定的UI元素(例如按钮、图标、链接等),请不要尝试任何其他的操作。
|
|
217
|
+
`;
|
|
218
|
+
throw new Error(`Unsupported vlMode for Auto-GLM locate prompt: ${vlMode}`);
|
|
219
|
+
};
|
|
220
|
+
export { getAutoGLMLocatePrompt, getAutoGLMPlanPrompt };
|
|
221
|
+
|
|
222
|
+
//# sourceMappingURL=prompt.mjs.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"ai-model/auto-glm/prompt.mjs","sources":["../../../../src/ai-model/auto-glm/prompt.ts"],"sourcesContent":["/**\n * Auto-GLM Prompt Templates\n *\n * Portions of this file are derived from Open-AutoGLM\n * Copyright (c) 2024 zai-org\n * Licensed under the Apache License, Version 2.0\n *\n * Source: https://github.com/zai-org/Open-AutoGLM\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * Modifications:\n * - Adapted prompts for Midscene.js integration\n */\n\nimport type { TVlModeTypes } from '@midscene/shared/env';\n\n/**\n * Get formatted date string for system prompts\n * @returns Formatted date string like \"2026-01-12, Sunday\"\n */\nfunction getMultilingualFormattedDate(): string {\n const today = new Date();\n const year = today.getFullYear();\n const month = String(today.getMonth() + 1).padStart(2, '0');\n const date = String(today.getDate()).padStart(2, '0');\n const dayOfWeek = [\n 'Sunday',\n 'Monday',\n 'Tuesday',\n 'Wednesday',\n 'Thursday',\n 'Friday',\n 'Saturday',\n ][today.getDay()];\n\n return `${year}-${month}-${date}, ${dayOfWeek}`;\n}\n\n/**\n * Get formatted Chinese date (e.g., \"2026年01月13日 星期一\")\n */\nfunction getChineseFormattedDate(): string {\n const today = new Date();\n const year = today.getFullYear();\n const month = String(today.getMonth() + 1).padStart(2, '0');\n const date = String(today.getDate()).padStart(2, '0');\n const weekdayNames = [\n '星期日',\n '星期一',\n '星期二',\n '星期三',\n '星期四',\n '星期五',\n '星期六',\n ];\n const weekday = weekdayNames[today.getDay()];\n\n return `${year}年${month}月${date}日 ${weekday}`;\n}\n\nconst getAutoGLMMultilingualPlanPrompt = (): string => {\n return `\nThe current date: ${getMultilingualFormattedDate()}\n\n# Setup\nYou are a professional Android operation agent assistant that can fulfill the user's high-level instructions. Given a screenshot of the Android interface at each step, you first analyze the situation, then plan the best course of action using Python-style pseudo-code.\n\n# More details about the code\nYour response format must be structured as follows:\n\nThink first: Use <think>...</think> to analyze the current screen, identify key elements, and determine the most efficient action.\nProvide the action: Use <answer>...</answer> to return a single line of pseudo-code representing the operation.\n\nYour output should STRICTLY follow the format:\n<think>\n[Your thought]\n</think>\n<answer>\n[Your operation code]\n</answer>\n\n- **Tap**\n Perform a tap action on a specified screen area. The element is a list of 2 integers, representing the coordinates of the tap point.\n **Example**:\n <answer>\n do(action=\"Tap\", element=[x,y])\n </answer>\n- **Type**\n Enter text into the currently focused input field.\n **Example**:\n <answer>\n do(action=\"Type\", text=\"Hello World\")\n </answer>\n- **Swipe**\n Perform a swipe action with start point and end point.\n **Examples**:\n <answer>\n do(action=\"Swipe\", start=[x1,y1], end=[x2,y2])\n </answer>\n- **Long Press**\n Perform a long press action on a specified screen area.\n You can add the element to the action to specify the long press area. The element is a list of 2 integers, representing the coordinates of the long press point.\n **Example**:\n <answer>\n do(action=\"Long Press\", element=[x,y])\n </answer>\n- **Launch**\n Launch an app. Try to use launch action when you need to launch an app. Check the instruction to choose the right app before you use this action.\n **Example**:\n <answer>\n do(action=\"Launch\", app=\"Settings\")\n </answer>\n- **Back**\n Press the Back button to navigate to the previous screen.\n **Example**:\n <answer>\n do(action=\"Back\")\n </answer>\n- **Finish**\n Terminate the program and optionally print a message.\n **Example**:\n <answer>\n finish(message=\"Task completed.\")\n </answer>\n\n\nREMEMBER:\n- Think before you act: Always analyze the current UI and the best course of action before executing any step, and output in <think> part.\n- Only ONE LINE of action in <answer> part per response: Each step must contain exactly one line of executable code.\n- Generate execution code strictly according to format requirements.\n `;\n};\n\nconst getAutoGLMChinesePlanPrompt = (): string => {\n return `\n今天的日期是: ${getChineseFormattedDate()}\n\n你是一个智能体分析专家,可以根据操作历史和当前状态图执行一系列操作来完成任务。\n你必须严格按照要求输出以下格式:\n<think>{think}</think>\n<answer>{action}</answer>\n\n其中:\n- {think} 是对你为什么选择这个操作的简短推理说明。\n- {action} 是本次执行的具体操作指令,必须严格遵循下方定义的指令格式。\n\n操作指令及其作用如下:\n- do(action=\"Launch\", app=\"xxx\") \n Launch是启动目标app的操作,这比通过主屏幕导航更快。此操作完成后,您将自动收到结果状态的截图。\n- do(action=\"Tap\", element=[x,y]) \n Tap是点击操作,点击屏幕上的特定点。可用此操作点击按钮、选择项目、从主屏幕打开应用程序,或与任何可点击的用户界面元素进行交互。坐标系统从左上角 (0,0) 开始到右下角(999,999)结束。此操作完成后,您将自动收到结果状态的截图。\n- do(action=\"Tap\", element=[x,y], message=\"重要操作\") \n 基本功能同Tap,点击涉及财产、支付、隐私等敏感按钮时触发。\n- do(action=\"Type\", text=\"xxx\") \n Type是输入操作,在当前聚焦的输入框中输入文本。使用此操作前,请确保输入框已被聚焦(先点击它)。输入的文本将像使用键盘输入一样输入。重要提示:手机可能正在使用 ADB 键盘,该键盘不会像普通键盘那样占用屏幕空间。要确认键盘已激活,请查看屏幕底部是否显示 'ADB Keyboard {ON}' 类似的文本,或者检查输入框是否处于激活/高亮状态。不要仅仅依赖视觉上的键盘显示。自动清除文本:当你使用输入操作时,输入框中现有的任何文本(包括占位符文本和实际输入)都会在输入新文本前自动清除。你无需在输入前手动清除文本——直接使用输入操作输入所需文本即可。操作完成后,你将自动收到结果状态的截图。\n- do(action=\"Type_Name\", text=\"xxx\") \n Type_Name是输入人名的操作,基本功能同Type。\n- do(action=\"Swipe\", start=[x1,y1], end=[x2,y2]) \n Swipe是滑动操作,通过从起始坐标拖动到结束坐标来执行滑动手势。可用于滚动内容、在屏幕之间导航、下拉通知栏以及项目栏或进行基于手势的导航。坐标系统从左上角 (0,0) 开始到右下角(999,999)结束。滑动持续时间会自动调整以实现自然的移动。此操作完成后,您将自动收到结果状态的截图。\n- do(action=\"Long Press\", element=[x,y]) \n Long Pres是长按操作,在屏幕上的特定点长按指定时间。可用于触发上下文菜单、选择文本或激活长按交互。坐标系统从左上角 (0,0) 开始到右下角(999,999)结束。此操作完成后,您将自动收到结果状态的屏幕截图。\n- do(action=\"Double Tap\", element=[x,y]) \n Double Tap在屏幕上的特定点快速连续点按两次。使用此操作可以激活双击交互,如缩放、选择文本或打开项目。坐标系统从左上角 (0,0) 开始到右下角(999,999)结束。此操作完成后,您将自动收到结果状态的截图。\n- do(action=\"Back\") \n 导航返回到上一个屏幕或关闭当前对话框。相当于按下 Android 的返回按钮。使用此操作可以从更深的屏幕返回、关闭弹出窗口或退出当前上下文。此操作完成后,您将自动收到结果状态的截图。\n- do(action=\"Home\") \n Home是回到系统桌面的操作,相当于按下 Android 主屏幕按钮。使用此操作可退出当前应用并返回启动器,或从已知状态启动新任务。此操作完成后,您将自动收到结果状态的截图。\n- do(action=\"Wait\", duration=\"x seconds\") \n 等待页面加载,x为需要等待多少秒。\n- finish(message=\"xxx\") \n finish是结束任务的操作,表示准确完整完成任务,message是终止信息。 \n\n必须遵循的规则:\n0. 严禁调用 Interact、Take_over、Note、Call_API 这四个操作,这些操作暂不支持。\n1. 在执行任何操作前,先检查当前app是否是目标app,如果不是,先执行 Launch。\n2. 如果进入到了无关页面,先执行 Back。如果执行Back后页面没有变化,请点击页面左上角的返回键进行返回,或者右上角的X号关闭。\n3. 如果页面未加载出内容,最多连续 Wait 三次,否则执行 Back重新进入。\n4. 如果页面显示网络问题,需要重新加载,请点击重新加载。\n5. 如果当前页面找不到目标联系人、商品、店铺等信息,可以尝试 Swipe 滑动查找。\n6. 遇到价格区间、时间区间等筛选条件,如果没有完全符合的,可以放宽要求。\n7. 在做小红书总结类任务时一定要筛选图文笔记。\n8. 购物车全选后再点击全选可以把状态设为全不选,在做购物车任务时,如果购物车里已经有商品被选中时,你需要点击全选后再点击取消全选,再去找需要购买或者删除的商品。\n9. 在做外卖任务时,如果相应店铺购物车里已经有其他商品你需要先把购物车清空再去购买用户指定的外卖。\n10. 在做点外卖任务时,如果用户需要点多个外卖,请尽量在同一店铺进行购买,如果无法找到可以下单,并说明某个商品未找到。\n11. 请严格遵循用户意图执行任务,用户的特殊要求可以执行多次搜索,滑动查找。比如(i)用户要求点一杯咖啡,要咸的,你可以直接搜索咸咖啡,或者搜索咖啡后滑动查找咸的咖啡,比如海盐咖啡。(ii)用户要找到XX群,发一条消息,你可以先搜索XX群,找不到结果后,将\"群\"字去掉,搜索XX重试。(iii)用户要找到宠物友好的餐厅,你可以搜索餐厅,找到筛选,找到设施,选择可带宠物,或者直接搜索可带宠物,必要时可以使用AI搜索。\n12. 在选择日期时,如果原滑动方向与预期日期越来越远,请向反方向滑动查找。\n13. 执行任务过程中如果有多个可选择的项目栏,请逐个查找每个项目栏,直到完成任务,一定不要在同一项目栏多次查找,从而陷入死循环。\n14. 在执行下一步操作前请一定要检查上一步的操作是否生效,如果点击没生效,可能因为app反应较慢,请先稍微等待一下,如果还是不生效请调整一下点击位置重试,如果仍然不生效请跳过这一步继续任务,并在finish message说明点击不生效。\n15. 在执行任务中如果遇到滑动不生效的情况,请调整一下起始点位置,增大滑动距离重试,如果还是不生效,有可能是已经滑到底了,请继续向反方向滑动,直到顶部或底部,如果仍然没有符合要求的结果,请跳过这一步继续任务,并在finish message说明但没找到要求的项目。\n16. 在做游戏任务时如果在战斗页面如果有自动战斗一定要开启自动战斗,如果多轮历史状态相似要检查自动战斗是否开启。\n17. 如果没有合适的搜索结果,可能是因为搜索页面不对,请返回到搜索页面的上一级尝试重新搜索,如果尝试三次返回上一级搜索后仍然没有符合要求的结果,执行 finish(message=\"原因\")。\n18. 在结束任务前请一定要仔细检查任务是否完整准确的完成,如果出现错选、漏选、多选的情况,请返回之前的步骤进行纠正。\n`;\n};\n\nexport const getAutoGLMPlanPrompt = (\n vlMode: TVlModeTypes | undefined,\n): string => {\n if (vlMode === 'auto-glm-multilingual') {\n return getAutoGLMMultilingualPlanPrompt();\n } else if (vlMode === 'auto-glm') {\n return getAutoGLMChinesePlanPrompt();\n }\n throw new Error(`Unsupported vlMode for Auto-GLM plan prompt: ${vlMode}`);\n};\n\nexport const getAutoGLMLocatePrompt = (\n vlMode: TVlModeTypes | undefined,\n): string => {\n if (vlMode === 'auto-glm-multilingual') {\n return `\nThe current date: ${getMultilingualFormattedDate()}\n\n# Setup\nYou are a professional Android operation agent assistant that can fulfill the user's high-level instructions. Given a screenshot of the Android interface at each step, you first analyze the situation, then plan the best course of action using Python-style pseudo-code.\n\n# More details about the code\nYour response format must be structured as follows:\n\nThink first: Use <think>...</think> to analyze the current screen, identify key elements, and determine the most efficient action.\nProvide the action: Use <answer>...</answer> to return a single line of pseudo-code representing the operation.\n\nYour output should STRICTLY follow the format:\n<think>\n[Your thought]\n</think>\n<answer>\n[Your operation code]\n</answer>\n\n- **Tap**\n Perform a tap action on a specified screen area. The element is a list of 2 integers, representing the coordinates of the tap point.\n **Example**:\n <answer>\n do(action=\"Tap\", element=[x,y])\n </answer>\n\nREMEMBER:\n- Your goal is to locate and tap the UI element specified by the user (e.g., button, icon, link, etc.). Do not attempt any other actions.\n `;\n } else if (vlMode === 'auto-glm') {\n return `\n今天的日期是: ${getChineseFormattedDate()}\n\n你是一个智能体分析专家,可以根据操作历史和当前状态图执行一系列操作来完成任务。\n你必须严格按照要求输出以下格式:\n<think>{think}</think>\n<answer>{action}</answer>\n\n其中:\n- {think} 是对你为什么选择这个操作的简短推理说明。\n- {action} 是本次执行的具体操作指令,必须严格遵循下方定义的指令格式。\n\n操作指令及其作用如下:\n- do(action=\"Tap\", element=[x,y]) \n Tap是点击操作,点击屏幕上的特定点。可用此操作点击按钮、选择项目、从主屏幕打开应用程序,或与任何可点击的用户界面元素进行交互。坐标系统从左上角 (0,0) 开始到右下角(999,999)结束。此操作完成后,您将自动收到结果状态的截图。\n\n必须遵循的规则:\n- 你的目标是定位并点击用户指定的UI元素(例如按钮、图标、链接等),请不要尝试任何其他的操作。\n `;\n }\n throw new Error(`Unsupported vlMode for Auto-GLM locate prompt: ${vlMode}`);\n};\n"],"names":["getMultilingualFormattedDate","today","Date","year","month","String","date","dayOfWeek","getChineseFormattedDate","weekdayNames","weekday","getAutoGLMMultilingualPlanPrompt","getAutoGLMChinesePlanPrompt","getAutoGLMPlanPrompt","vlMode","Error","getAutoGLMLocatePrompt"],"mappings":"AA+BA,SAASA;IACP,MAAMC,QAAQ,IAAIC;IAClB,MAAMC,OAAOF,MAAM,WAAW;IAC9B,MAAMG,QAAQC,OAAOJ,MAAM,QAAQ,KAAK,GAAG,QAAQ,CAAC,GAAG;IACvD,MAAMK,OAAOD,OAAOJ,MAAM,OAAO,IAAI,QAAQ,CAAC,GAAG;IACjD,MAAMM,YAAY;QAChB;QACA;QACA;QACA;QACA;QACA;QACA;KACD,CAACN,MAAM,MAAM,GAAG;IAEjB,OAAO,GAAGE,KAAK,CAAC,EAAEC,MAAM,CAAC,EAAEE,KAAK,EAAE,EAAEC,WAAW;AACjD;AAKA,SAASC;IACP,MAAMP,QAAQ,IAAIC;IAClB,MAAMC,OAAOF,MAAM,WAAW;IAC9B,MAAMG,QAAQC,OAAOJ,MAAM,QAAQ,KAAK,GAAG,QAAQ,CAAC,GAAG;IACvD,MAAMK,OAAOD,OAAOJ,MAAM,OAAO,IAAI,QAAQ,CAAC,GAAG;IACjD,MAAMQ,eAAe;QACnB;QACA;QACA;QACA;QACA;QACA;QACA;KACD;IACD,MAAMC,UAAUD,YAAY,CAACR,MAAM,MAAM,GAAG;IAE5C,OAAO,GAAGE,KAAK,CAAC,EAAEC,MAAM,CAAC,EAAEE,KAAK,EAAE,EAAEI,SAAS;AAC/C;AAEA,MAAMC,mCAAmC,IAChC,CAAC;kBACQ,EAAEX,+BAA+B;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;EAoEjD,CAAC;AAGH,MAAMY,8BAA8B,IAC3B,CAAC;QACF,EAAEJ,0BAA0B;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAyDpC,CAAC;AAGM,MAAMK,uBAAuB,CAClCC;IAEA,IAAIA,AAAW,4BAAXA,QACF,OAAOH;IACF,IAAIG,AAAW,eAAXA,QACT,OAAOF;IAET,MAAM,IAAIG,MAAM,CAAC,6CAA6C,EAAED,QAAQ;AAC1E;AAEO,MAAME,yBAAyB,CACpCF;IAEA,IAAIA,AAAW,4BAAXA,QACF,OAAO,CAAC;kBACM,EAAEd,+BAA+B;;;;;;;;;;;;;;;;;;;;;;;;;;;;EA4BjD,CAAC;IACM,IAAIc,AAAW,eAAXA,QACT,OAAO,CAAC;QACJ,EAAEN,0BAA0B;;;;;;;;;;;;;;;;;IAiBhC,CAAC;IAEH,MAAM,IAAIO,MAAM,CAAC,+CAA+C,EAAED,QAAQ;AAC5E"}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"ai-model/auto-glm/util.mjs","sources":["../../../../src/ai-model/auto-glm/util.ts"],"sourcesContent":["import type { TVlModeTypes } from '@midscene/shared/env';\n\n/**\n * Check if the vlMode is auto-glm or auto-glm-multilingual\n * @param vlMode The VL mode to check\n * @returns true if vlMode is auto-glm or auto-glm-multilingual\n */\nexport function isAutoGLM(vlMode: TVlModeTypes | undefined): boolean {\n return vlMode === 'auto-glm' || vlMode === 'auto-glm-multilingual';\n}\n"],"names":["isAutoGLM","vlMode"],"mappings":"AAOO,SAASA,UAAUC,MAAgC;IACxD,OAAOA,AAAW,eAAXA,UAAyBA,AAAW,4BAAXA;AAClC"}
|
|
@@ -5,7 +5,8 @@ import { generatePlaywrightTest, generatePlaywrightTestStream } from "./prompt/p
|
|
|
5
5
|
import { generateYamlTest, generateYamlTestStream } from "./prompt/yaml-generator.mjs";
|
|
6
6
|
import { AiExtractElementInfo, AiJudgeOrderSensitive, AiLocateElement, AiLocateSection } from "./inspect.mjs";
|
|
7
7
|
import { plan } from "./llm-planning.mjs";
|
|
8
|
+
import { autoGLMPlanning } from "./auto-glm/planning.mjs";
|
|
8
9
|
import { PointSchema, RectSchema, SizeSchema, TMultimodalPromptSchema, TUserPromptSchema, adaptBboxToRect, dumpActionParam, findAllMidsceneLocatorField, getMidsceneLocationSchema, loadActionParam, parseActionParam } from "../common.mjs";
|
|
9
10
|
import { resizeImageForUiTars, uiTarsPlanning } from "./ui-tars-planning.mjs";
|
|
10
11
|
import { ConversationHistory } from "./conversation-history.mjs";
|
|
11
|
-
export { AiExtractElementInfo, AiJudgeOrderSensitive, AiLocateElement, AiLocateSection, ConversationHistory, PointSchema, RectSchema, SizeSchema, TMultimodalPromptSchema, TUserPromptSchema, adaptBboxToRect, callAI, callAIWithObjectResponse, callAIWithStringResponse, describeUserPage, dumpActionParam, findAllMidsceneLocatorField, generatePlaywrightTest, generatePlaywrightTestStream, generateYamlTest, generateYamlTestStream, getMidsceneLocationSchema, loadActionParam, parseActionParam, plan, resizeImageForUiTars, systemPromptToLocateElement, uiTarsPlanning };
|
|
12
|
+
export { AiExtractElementInfo, AiJudgeOrderSensitive, AiLocateElement, AiLocateSection, ConversationHistory, PointSchema, RectSchema, SizeSchema, TMultimodalPromptSchema, TUserPromptSchema, adaptBboxToRect, autoGLMPlanning, callAI, callAIWithObjectResponse, callAIWithStringResponse, describeUserPage, dumpActionParam, findAllMidsceneLocatorField, generatePlaywrightTest, generatePlaywrightTestStream, generateYamlTest, generateYamlTestStream, getMidsceneLocationSchema, loadActionParam, parseActionParam, plan, resizeImageForUiTars, systemPromptToLocateElement, uiTarsPlanning };
|
|
@@ -3,11 +3,14 @@ import { cropByRect, paddingToMatchBlockByBase64, preProcessImageUrl } from "@mi
|
|
|
3
3
|
import { getDebug } from "@midscene/shared/logger";
|
|
4
4
|
import { assert } from "@midscene/shared/utils";
|
|
5
5
|
import { adaptBboxToRect, expandSearchArea, mergeRects } from "../common.mjs";
|
|
6
|
+
import { parseAutoGLMLocateResponse } from "./auto-glm/parser.mjs";
|
|
7
|
+
import { getAutoGLMLocatePrompt } from "./auto-glm/prompt.mjs";
|
|
8
|
+
import { isAutoGLM } from "./auto-glm/util.mjs";
|
|
6
9
|
import { extractDataQueryPrompt, systemPromptToExtract } from "./prompt/extraction.mjs";
|
|
7
10
|
import { findElementPrompt, systemPromptToLocateElement } from "./prompt/llm-locator.mjs";
|
|
8
11
|
import { sectionLocatorInstruction, systemPromptToLocateSection } from "./prompt/llm-section-locator.mjs";
|
|
9
12
|
import { orderSensitiveJudgePrompt, systemPromptToJudgeOrderSensitive } from "./prompt/order-sensitive-judge.mjs";
|
|
10
|
-
import { callAIWithObjectResponse } from "./service-caller/index.mjs";
|
|
13
|
+
import { callAIWithObjectResponse, callAIWithStringResponse } from "./service-caller/index.mjs";
|
|
11
14
|
const debugInspect = getDebug('ai:inspect');
|
|
12
15
|
const debugSection = getDebug('ai:section');
|
|
13
16
|
const extraTextFromUserPrompt = (prompt)=>{
|
|
@@ -60,7 +63,7 @@ async function AiLocateElement(options) {
|
|
|
60
63
|
assert(targetElementDescription, "cannot find the target element description");
|
|
61
64
|
const targetElementDescriptionText = extraTextFromUserPrompt(targetElementDescription);
|
|
62
65
|
const userInstructionPrompt = findElementPrompt(targetElementDescriptionText);
|
|
63
|
-
const systemPrompt = systemPromptToLocateElement(vlMode);
|
|
66
|
+
const systemPrompt = isAutoGLM(vlMode) ? getAutoGLMLocatePrompt(vlMode) : systemPromptToLocateElement(vlMode);
|
|
64
67
|
let imagePayload = screenshotBase64;
|
|
65
68
|
let imageWidth = context.size.width;
|
|
66
69
|
let imageHeight = context.size.height;
|
|
@@ -97,7 +100,7 @@ async function AiLocateElement(options) {
|
|
|
97
100
|
},
|
|
98
101
|
{
|
|
99
102
|
type: 'text',
|
|
100
|
-
text: userInstructionPrompt
|
|
103
|
+
text: isAutoGLM(vlMode) ? `Tap: ${userInstructionPrompt}` : userInstructionPrompt
|
|
101
104
|
}
|
|
102
105
|
]
|
|
103
106
|
}
|
|
@@ -109,6 +112,68 @@ async function AiLocateElement(options) {
|
|
|
109
112
|
});
|
|
110
113
|
msgs.push(...addOns);
|
|
111
114
|
}
|
|
115
|
+
if (isAutoGLM(vlMode)) {
|
|
116
|
+
const { content: rawResponseContent, usage } = await callAIWithStringResponse(msgs, modelConfig);
|
|
117
|
+
debugInspect('auto-glm rawResponse:', rawResponseContent);
|
|
118
|
+
const parsed = parseAutoGLMLocateResponse(rawResponseContent);
|
|
119
|
+
debugInspect('auto-glm thinking:', parsed.think);
|
|
120
|
+
debugInspect('auto-glm coordinates:', parsed.coordinates);
|
|
121
|
+
let resRect;
|
|
122
|
+
let matchedElements = [];
|
|
123
|
+
let errors = [];
|
|
124
|
+
if (parsed.error || !parsed.coordinates) {
|
|
125
|
+
errors = [
|
|
126
|
+
parsed.error || 'Failed to parse auto-glm response'
|
|
127
|
+
];
|
|
128
|
+
debugInspect('auto-glm parse error:', errors[0]);
|
|
129
|
+
} else {
|
|
130
|
+
const { x, y } = parsed.coordinates;
|
|
131
|
+
debugInspect('auto-glm coordinates [0-999]:', {
|
|
132
|
+
x,
|
|
133
|
+
y
|
|
134
|
+
});
|
|
135
|
+
const pixelX = Math.round(x * imageWidth / 1000);
|
|
136
|
+
const pixelY = Math.round(y * imageHeight / 1000);
|
|
137
|
+
debugInspect('auto-glm pixel coordinates:', {
|
|
138
|
+
pixelX,
|
|
139
|
+
pixelY
|
|
140
|
+
});
|
|
141
|
+
const bboxSize = 10;
|
|
142
|
+
const x1 = Math.max(pixelX - bboxSize / 2, 0);
|
|
143
|
+
const y1 = Math.max(pixelY - bboxSize / 2, 0);
|
|
144
|
+
const x2 = Math.min(pixelX + bboxSize / 2, imageWidth);
|
|
145
|
+
const y2 = Math.min(pixelY + bboxSize / 2, imageHeight);
|
|
146
|
+
resRect = {
|
|
147
|
+
left: x1,
|
|
148
|
+
top: y1,
|
|
149
|
+
width: x2 - x1,
|
|
150
|
+
height: y2 - y1
|
|
151
|
+
};
|
|
152
|
+
if (options.searchConfig?.rect) {
|
|
153
|
+
resRect.left += options.searchConfig.rect.left;
|
|
154
|
+
resRect.top += options.searchConfig.rect.top;
|
|
155
|
+
}
|
|
156
|
+
debugInspect('auto-glm resRect:', resRect);
|
|
157
|
+
const rectCenter = {
|
|
158
|
+
x: resRect.left + resRect.width / 2,
|
|
159
|
+
y: resRect.top + resRect.height / 2
|
|
160
|
+
};
|
|
161
|
+
const element = generateElementByPosition(rectCenter, targetElementDescriptionText);
|
|
162
|
+
if (element) matchedElements = [
|
|
163
|
+
element
|
|
164
|
+
];
|
|
165
|
+
}
|
|
166
|
+
return {
|
|
167
|
+
rect: resRect,
|
|
168
|
+
parseResult: {
|
|
169
|
+
elements: matchedElements,
|
|
170
|
+
errors
|
|
171
|
+
},
|
|
172
|
+
rawResponse: rawResponseContent,
|
|
173
|
+
usage,
|
|
174
|
+
reasoning_content: parsed.think
|
|
175
|
+
};
|
|
176
|
+
}
|
|
112
177
|
const res = await callAIFn(msgs, modelConfig);
|
|
113
178
|
const rawResponse = JSON.stringify(res.content);
|
|
114
179
|
let resRect;
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"ai-model/inspect.mjs","sources":["../../../src/ai-model/inspect.ts"],"sourcesContent":["import type {\n AIDataExtractionResponse,\n AIElementResponse,\n AISectionLocatorResponse,\n AIUsageInfo,\n Rect,\n ReferenceImage,\n ServiceExtractOption,\n UIContext,\n} from '@/types';\nimport type { IModelConfig } from '@midscene/shared/env';\nimport { generateElementByPosition } from '@midscene/shared/extractor/dom-util';\nimport {\n cropByRect,\n paddingToMatchBlockByBase64,\n preProcessImageUrl,\n} from '@midscene/shared/img';\nimport { getDebug } from '@midscene/shared/logger';\nimport type { LocateResultElement } from '@midscene/shared/types';\nimport { assert } from '@midscene/shared/utils';\nimport type {\n ChatCompletionSystemMessageParam,\n ChatCompletionUserMessageParam,\n} from 'openai/resources/index';\nimport type { TMultimodalPrompt, TUserPrompt } from '../common';\nimport { adaptBboxToRect, expandSearchArea, mergeRects } from '../common';\nimport {\n extractDataQueryPrompt,\n systemPromptToExtract,\n} from './prompt/extraction';\nimport {\n findElementPrompt,\n systemPromptToLocateElement,\n} from './prompt/llm-locator';\nimport {\n sectionLocatorInstruction,\n systemPromptToLocateSection,\n} from './prompt/llm-section-locator';\nimport {\n orderSensitiveJudgePrompt,\n systemPromptToJudgeOrderSensitive,\n} from './prompt/order-sensitive-judge';\nimport { callAIWithObjectResponse } from './service-caller/index';\n\nexport type AIArgs = [\n ChatCompletionSystemMessageParam,\n ...ChatCompletionUserMessageParam[],\n];\n\nconst debugInspect = getDebug('ai:inspect');\nconst debugSection = getDebug('ai:section');\n\nconst extraTextFromUserPrompt = (prompt: TUserPrompt): string => {\n if (typeof prompt === 'string') {\n return prompt;\n } else {\n return prompt.prompt;\n }\n};\n\nconst promptsToChatParam = async (\n multimodalPrompt: TMultimodalPrompt,\n): Promise<ChatCompletionUserMessageParam[]> => {\n const msgs: ChatCompletionUserMessageParam[] = [];\n if (multimodalPrompt?.images?.length) {\n msgs.push({\n role: 'user',\n content: [\n {\n type: 'text',\n text: 'Next, I will provide all the reference images.',\n },\n ],\n });\n\n for (const item of multimodalPrompt.images) {\n const base64 = await preProcessImageUrl(\n item.url,\n !!multimodalPrompt.convertHttpImage2Base64,\n );\n\n msgs.push({\n role: 'user',\n content: [\n {\n type: 'text',\n text: `this is the reference image named '${item.name}':`,\n },\n ],\n });\n\n msgs.push({\n role: 'user',\n content: [\n {\n type: 'image_url',\n image_url: {\n url: base64,\n detail: 'high',\n },\n },\n ],\n });\n }\n }\n return msgs;\n};\n\nexport async function AiLocateElement(options: {\n context: UIContext;\n targetElementDescription: TUserPrompt;\n referenceImage?: ReferenceImage;\n callAIFn: typeof callAIWithObjectResponse<\n AIElementResponse | [number, number]\n >;\n searchConfig?: Awaited<ReturnType<typeof AiLocateSection>>;\n modelConfig: IModelConfig;\n}): Promise<{\n parseResult: {\n elements: LocateResultElement[];\n errors?: string[];\n };\n rect?: Rect;\n rawResponse: string;\n usage?: AIUsageInfo;\n reasoning_content?: string;\n}> {\n const { context, targetElementDescription, callAIFn, modelConfig } = options;\n const { vlMode } = modelConfig;\n const screenshotBase64 = context.screenshot.getData();\n\n assert(\n targetElementDescription,\n 'cannot find the target element description',\n );\n const targetElementDescriptionText = extraTextFromUserPrompt(\n targetElementDescription,\n );\n const userInstructionPrompt = findElementPrompt(targetElementDescriptionText);\n const systemPrompt = systemPromptToLocateElement(vlMode);\n\n let imagePayload = screenshotBase64;\n let imageWidth = context.size.width;\n let imageHeight = context.size.height;\n let originalImageWidth = imageWidth;\n let originalImageHeight = imageHeight;\n\n if (options.searchConfig) {\n assert(\n options.searchConfig.rect,\n 'searchArea is provided but its rect cannot be found. Failed to locate element',\n );\n assert(\n options.searchConfig.imageBase64,\n 'searchArea is provided but its imageBase64 cannot be found. Failed to locate element',\n );\n\n imagePayload = options.searchConfig.imageBase64;\n imageWidth = options.searchConfig.rect?.width;\n imageHeight = options.searchConfig.rect?.height;\n originalImageWidth = imageWidth;\n originalImageHeight = imageHeight;\n } else if (vlMode === 'qwen2.5-vl') {\n const paddedResult = await paddingToMatchBlockByBase64(imagePayload);\n imageWidth = paddedResult.width;\n imageHeight = paddedResult.height;\n imagePayload = paddedResult.imageBase64;\n }\n\n const msgs: AIArgs = [\n { role: 'system', content: systemPrompt },\n {\n role: 'user',\n content: [\n {\n type: 'image_url',\n image_url: {\n url: imagePayload,\n detail: 'high',\n },\n },\n {\n type: 'text',\n text: userInstructionPrompt,\n },\n ],\n },\n ];\n\n if (typeof targetElementDescription !== 'string') {\n const addOns = await promptsToChatParam({\n images: targetElementDescription.images,\n convertHttpImage2Base64: targetElementDescription.convertHttpImage2Base64,\n });\n msgs.push(...addOns);\n }\n\n const res = await callAIFn(msgs, modelConfig);\n\n const rawResponse = JSON.stringify(res.content);\n\n let resRect: Rect | undefined;\n let matchedElements: LocateResultElement[] = [];\n let errors: string[] | undefined =\n 'errors' in res.content ? res.content.errors : [];\n try {\n if (\n 'bbox' in res.content &&\n Array.isArray(res.content.bbox) &&\n res.content.bbox.length >= 1\n ) {\n resRect = adaptBboxToRect(\n res.content.bbox,\n imageWidth,\n imageHeight,\n options.searchConfig?.rect?.left,\n options.searchConfig?.rect?.top,\n originalImageWidth,\n originalImageHeight,\n vlMode,\n );\n\n debugInspect('resRect', resRect);\n\n const rectCenter = {\n x: resRect.left + resRect.width / 2,\n y: resRect.top + resRect.height / 2,\n };\n\n const element: LocateResultElement = generateElementByPosition(\n rectCenter,\n targetElementDescriptionText as string,\n );\n errors = [];\n\n if (element) {\n matchedElements = [element];\n }\n }\n } catch (e) {\n const msg =\n e instanceof Error\n ? `Failed to parse bbox: ${e.message}`\n : 'unknown error in locate';\n if (!errors || errors?.length === 0) {\n errors = [msg];\n } else {\n errors.push(`(${msg})`);\n }\n }\n\n return {\n rect: resRect,\n parseResult: {\n elements: matchedElements as LocateResultElement[],\n errors: errors as string[],\n },\n rawResponse,\n usage: res.usage,\n reasoning_content: res.reasoning_content,\n };\n}\n\nexport async function AiLocateSection(options: {\n context: UIContext;\n sectionDescription: TUserPrompt;\n modelConfig: IModelConfig;\n}): Promise<{\n rect?: Rect;\n imageBase64?: string;\n error?: string;\n rawResponse: string;\n usage?: AIUsageInfo;\n}> {\n const { context, sectionDescription, modelConfig } = options;\n const { vlMode } = modelConfig;\n const screenshotBase64 = context.screenshot.getData();\n\n const systemPrompt = systemPromptToLocateSection(vlMode);\n const sectionLocatorInstructionText = sectionLocatorInstruction(\n extraTextFromUserPrompt(sectionDescription),\n );\n const msgs: AIArgs = [\n { role: 'system', content: systemPrompt },\n {\n role: 'user',\n content: [\n {\n type: 'image_url',\n image_url: {\n url: screenshotBase64,\n detail: 'high',\n },\n },\n {\n type: 'text',\n text: sectionLocatorInstructionText,\n },\n ],\n },\n ];\n\n if (typeof sectionDescription !== 'string') {\n const addOns = await promptsToChatParam({\n images: sectionDescription.images,\n convertHttpImage2Base64: sectionDescription.convertHttpImage2Base64,\n });\n msgs.push(...addOns);\n }\n\n const result = await callAIWithObjectResponse<AISectionLocatorResponse>(\n msgs,\n modelConfig,\n );\n\n let sectionRect: Rect | undefined;\n const sectionBbox = result.content.bbox;\n if (sectionBbox) {\n const targetRect = adaptBboxToRect(\n sectionBbox,\n context.size.width,\n context.size.height,\n 0,\n 0,\n context.size.width,\n context.size.height,\n vlMode,\n );\n debugSection('original targetRect %j', targetRect);\n\n const referenceBboxList = result.content.references_bbox || [];\n debugSection('referenceBboxList %j', referenceBboxList);\n\n const referenceRects = referenceBboxList\n .filter((bbox) => Array.isArray(bbox))\n .map((bbox) => {\n return adaptBboxToRect(\n bbox,\n context.size.width,\n context.size.height,\n 0,\n 0,\n context.size.width,\n context.size.height,\n vlMode,\n );\n });\n debugSection('referenceRects %j', referenceRects);\n\n // merge the sectionRect and referenceRects\n const mergedRect = mergeRects([targetRect, ...referenceRects]);\n debugSection('mergedRect %j', mergedRect);\n\n // expand search area to at least 200 x 200\n sectionRect = expandSearchArea(mergedRect, context.size, vlMode);\n debugSection('expanded sectionRect %j', sectionRect);\n }\n\n let imageBase64 = screenshotBase64;\n if (sectionRect) {\n const croppedResult = await cropByRect(\n screenshotBase64,\n sectionRect,\n vlMode === 'qwen2.5-vl',\n );\n imageBase64 = croppedResult.imageBase64;\n sectionRect.width = croppedResult.width;\n sectionRect.height = croppedResult.height;\n }\n\n return {\n rect: sectionRect,\n imageBase64,\n error: result.content.error,\n rawResponse: JSON.stringify(result.content),\n usage: result.usage,\n };\n}\n\nexport async function AiExtractElementInfo<T>(options: {\n dataQuery: string | Record<string, string>;\n multimodalPrompt?: TMultimodalPrompt;\n context: UIContext;\n pageDescription?: string;\n extractOption?: ServiceExtractOption;\n modelConfig: IModelConfig;\n}) {\n const { dataQuery, context, extractOption, multimodalPrompt, modelConfig } =\n options;\n const systemPrompt = systemPromptToExtract();\n const screenshotBase64 = context.screenshot.getData();\n\n const extractDataPromptText = extractDataQueryPrompt(\n options.pageDescription || '',\n dataQuery,\n );\n\n const userContent: ChatCompletionUserMessageParam['content'] = [];\n\n if (extractOption?.screenshotIncluded !== false) {\n userContent.push({\n type: 'image_url',\n image_url: {\n url: screenshotBase64,\n detail: 'high',\n },\n });\n }\n\n userContent.push({\n type: 'text',\n text: extractDataPromptText,\n });\n\n const msgs: AIArgs = [\n { role: 'system', content: systemPrompt },\n {\n role: 'user',\n content: userContent,\n },\n ];\n\n if (multimodalPrompt) {\n const addOns = await promptsToChatParam({\n images: multimodalPrompt.images,\n convertHttpImage2Base64: multimodalPrompt.convertHttpImage2Base64,\n });\n msgs.push(...addOns);\n }\n\n const result = await callAIWithObjectResponse<AIDataExtractionResponse<T>>(\n msgs,\n modelConfig,\n );\n return {\n parseResult: result.content,\n usage: result.usage,\n reasoning_content: result.reasoning_content,\n };\n}\n\nexport async function AiJudgeOrderSensitive(\n description: string,\n callAIFn: typeof callAIWithObjectResponse<{ isOrderSensitive: boolean }>,\n modelConfig: IModelConfig,\n): Promise<{\n isOrderSensitive: boolean;\n usage?: AIUsageInfo;\n}> {\n const systemPrompt = systemPromptToJudgeOrderSensitive();\n const userPrompt = orderSensitiveJudgePrompt(description);\n\n const msgs: AIArgs = [\n { role: 'system', content: systemPrompt },\n {\n role: 'user',\n content: userPrompt,\n },\n ];\n\n const result = await callAIFn(msgs, modelConfig);\n\n return {\n isOrderSensitive: result.content.isOrderSensitive ?? false,\n usage: result.usage,\n };\n}\n"],"names":["debugInspect","getDebug","debugSection","extraTextFromUserPrompt","prompt","promptsToChatParam","multimodalPrompt","msgs","item","base64","preProcessImageUrl","AiLocateElement","options","context","targetElementDescription","callAIFn","modelConfig","vlMode","screenshotBase64","assert","targetElementDescriptionText","userInstructionPrompt","findElementPrompt","systemPrompt","systemPromptToLocateElement","imagePayload","imageWidth","imageHeight","originalImageWidth","originalImageHeight","paddedResult","paddingToMatchBlockByBase64","addOns","res","rawResponse","JSON","resRect","matchedElements","errors","Array","adaptBboxToRect","rectCenter","element","generateElementByPosition","e","msg","Error","AiLocateSection","sectionDescription","systemPromptToLocateSection","sectionLocatorInstructionText","sectionLocatorInstruction","result","callAIWithObjectResponse","sectionRect","sectionBbox","targetRect","referenceBboxList","referenceRects","bbox","mergedRect","mergeRects","expandSearchArea","imageBase64","croppedResult","cropByRect","AiExtractElementInfo","dataQuery","extractOption","systemPromptToExtract","extractDataPromptText","extractDataQueryPrompt","userContent","AiJudgeOrderSensitive","description","systemPromptToJudgeOrderSensitive","userPrompt","orderSensitiveJudgePrompt"],"mappings":";;;;;;;;;;AAiDA,MAAMA,eAAeC,SAAS;AAC9B,MAAMC,eAAeD,SAAS;AAE9B,MAAME,0BAA0B,CAACC;IAC/B,IAAI,AAAkB,YAAlB,OAAOA,QACT,OAAOA;IAEP,OAAOA,OAAO,MAAM;AAExB;AAEA,MAAMC,qBAAqB,OACzBC;IAEA,MAAMC,OAAyC,EAAE;IACjD,IAAID,kBAAkB,QAAQ,QAAQ;QACpCC,KAAK,IAAI,CAAC;YACR,MAAM;YACN,SAAS;gBACP;oBACE,MAAM;oBACN,MAAM;gBACR;aACD;QACH;QAEA,KAAK,MAAMC,QAAQF,iBAAiB,MAAM,CAAE;YAC1C,MAAMG,SAAS,MAAMC,mBACnBF,KAAK,GAAG,EACR,CAAC,CAACF,iBAAiB,uBAAuB;YAG5CC,KAAK,IAAI,CAAC;gBACR,MAAM;gBACN,SAAS;oBACP;wBACE,MAAM;wBACN,MAAM,CAAC,mCAAmC,EAAEC,KAAK,IAAI,CAAC,EAAE,CAAC;oBAC3D;iBACD;YACH;YAEAD,KAAK,IAAI,CAAC;gBACR,MAAM;gBACN,SAAS;oBACP;wBACE,MAAM;wBACN,WAAW;4BACT,KAAKE;4BACL,QAAQ;wBACV;oBACF;iBACD;YACH;QACF;IACF;IACA,OAAOF;AACT;AAEO,eAAeI,gBAAgBC,OASrC;IAUC,MAAM,EAAEC,OAAO,EAAEC,wBAAwB,EAAEC,QAAQ,EAAEC,WAAW,EAAE,GAAGJ;IACrE,MAAM,EAAEK,MAAM,EAAE,GAAGD;IACnB,MAAME,mBAAmBL,QAAQ,UAAU,CAAC,OAAO;IAEnDM,OACEL,0BACA;IAEF,MAAMM,+BAA+BjB,wBACnCW;IAEF,MAAMO,wBAAwBC,kBAAkBF;IAChD,MAAMG,eAAeC,4BAA4BP;IAEjD,IAAIQ,eAAeP;IACnB,IAAIQ,aAAab,QAAQ,IAAI,CAAC,KAAK;IACnC,IAAIc,cAAcd,QAAQ,IAAI,CAAC,MAAM;IACrC,IAAIe,qBAAqBF;IACzB,IAAIG,sBAAsBF;IAE1B,IAAIf,QAAQ,YAAY,EAAE;QACxBO,OACEP,QAAQ,YAAY,CAAC,IAAI,EACzB;QAEFO,OACEP,QAAQ,YAAY,CAAC,WAAW,EAChC;QAGFa,eAAeb,QAAQ,YAAY,CAAC,WAAW;QAC/Cc,aAAad,QAAQ,YAAY,CAAC,IAAI,EAAE;QACxCe,cAAcf,QAAQ,YAAY,CAAC,IAAI,EAAE;QACzCgB,qBAAqBF;QACrBG,sBAAsBF;IACxB,OAAO,IAAIV,AAAW,iBAAXA,QAAyB;QAClC,MAAMa,eAAe,MAAMC,4BAA4BN;QACvDC,aAAaI,aAAa,KAAK;QAC/BH,cAAcG,aAAa,MAAM;QACjCL,eAAeK,aAAa,WAAW;IACzC;IAEA,MAAMvB,OAAe;QACnB;YAAE,MAAM;YAAU,SAASgB;QAAa;QACxC;YACE,MAAM;YACN,SAAS;gBACP;oBACE,MAAM;oBACN,WAAW;wBACT,KAAKE;wBACL,QAAQ;oBACV;gBACF;gBACA;oBACE,MAAM;oBACN,MAAMJ;gBACR;aACD;QACH;KACD;IAED,IAAI,AAAoC,YAApC,OAAOP,0BAAuC;QAChD,MAAMkB,SAAS,MAAM3B,mBAAmB;YACtC,QAAQS,yBAAyB,MAAM;YACvC,yBAAyBA,yBAAyB,uBAAuB;QAC3E;QACAP,KAAK,IAAI,IAAIyB;IACf;IAEA,MAAMC,MAAM,MAAMlB,SAASR,MAAMS;IAEjC,MAAMkB,cAAcC,KAAK,SAAS,CAACF,IAAI,OAAO;IAE9C,IAAIG;IACJ,IAAIC,kBAAyC,EAAE;IAC/C,IAAIC,SACF,YAAYL,IAAI,OAAO,GAAGA,IAAI,OAAO,CAAC,MAAM,GAAG,EAAE;IACnD,IAAI;QACF,IACE,UAAUA,IAAI,OAAO,IACrBM,MAAM,OAAO,CAACN,IAAI,OAAO,CAAC,IAAI,KAC9BA,IAAI,OAAO,CAAC,IAAI,CAAC,MAAM,IAAI,GAC3B;YACAG,UAAUI,gBACRP,IAAI,OAAO,CAAC,IAAI,EAChBP,YACAC,aACAf,QAAQ,YAAY,EAAE,MAAM,MAC5BA,QAAQ,YAAY,EAAE,MAAM,KAC5BgB,oBACAC,qBACAZ;YAGFjB,aAAa,WAAWoC;YAExB,MAAMK,aAAa;gBACjB,GAAGL,QAAQ,IAAI,GAAGA,QAAQ,KAAK,GAAG;gBAClC,GAAGA,QAAQ,GAAG,GAAGA,QAAQ,MAAM,GAAG;YACpC;YAEA,MAAMM,UAA+BC,0BACnCF,YACArB;YAEFkB,SAAS,EAAE;YAEX,IAAII,SACFL,kBAAkB;gBAACK;aAAQ;QAE/B;IACF,EAAE,OAAOE,GAAG;QACV,MAAMC,MACJD,aAAaE,QACT,CAAC,sBAAsB,EAAEF,EAAE,OAAO,EAAE,GACpC;QACN,IAAI,AAACN,UAAUA,QAAQ,WAAW,GAGhCA,OAAO,IAAI,CAAC,CAAC,CAAC,EAAEO,IAAI,CAAC,CAAC;aAFtBP,SAAS;YAACO;SAAI;IAIlB;IAEA,OAAO;QACL,MAAMT;QACN,aAAa;YACX,UAAUC;YACV,QAAQC;QACV;QACAJ;QACA,OAAOD,IAAI,KAAK;QAChB,mBAAmBA,IAAI,iBAAiB;IAC1C;AACF;AAEO,eAAec,gBAAgBnC,OAIrC;IAOC,MAAM,EAAEC,OAAO,EAAEmC,kBAAkB,EAAEhC,WAAW,EAAE,GAAGJ;IACrD,MAAM,EAAEK,MAAM,EAAE,GAAGD;IACnB,MAAME,mBAAmBL,QAAQ,UAAU,CAAC,OAAO;IAEnD,MAAMU,eAAe0B,4BAA4BhC;IACjD,MAAMiC,gCAAgCC,0BACpChD,wBAAwB6C;IAE1B,MAAMzC,OAAe;QACnB;YAAE,MAAM;YAAU,SAASgB;QAAa;QACxC;YACE,MAAM;YACN,SAAS;gBACP;oBACE,MAAM;oBACN,WAAW;wBACT,KAAKL;wBACL,QAAQ;oBACV;gBACF;gBACA;oBACE,MAAM;oBACN,MAAMgC;gBACR;aACD;QACH;KACD;IAED,IAAI,AAA8B,YAA9B,OAAOF,oBAAiC;QAC1C,MAAMhB,SAAS,MAAM3B,mBAAmB;YACtC,QAAQ2C,mBAAmB,MAAM;YACjC,yBAAyBA,mBAAmB,uBAAuB;QACrE;QACAzC,KAAK,IAAI,IAAIyB;IACf;IAEA,MAAMoB,SAAS,MAAMC,yBACnB9C,MACAS;IAGF,IAAIsC;IACJ,MAAMC,cAAcH,OAAO,OAAO,CAAC,IAAI;IACvC,IAAIG,aAAa;QACf,MAAMC,aAAahB,gBACjBe,aACA1C,QAAQ,IAAI,CAAC,KAAK,EAClBA,QAAQ,IAAI,CAAC,MAAM,EACnB,GACA,GACAA,QAAQ,IAAI,CAAC,KAAK,EAClBA,QAAQ,IAAI,CAAC,MAAM,EACnBI;QAEFf,aAAa,0BAA0BsD;QAEvC,MAAMC,oBAAoBL,OAAO,OAAO,CAAC,eAAe,IAAI,EAAE;QAC9DlD,aAAa,wBAAwBuD;QAErC,MAAMC,iBAAiBD,kBACpB,MAAM,CAAC,CAACE,OAASpB,MAAM,OAAO,CAACoB,OAC/B,GAAG,CAAC,CAACA,OACGnB,gBACLmB,MACA9C,QAAQ,IAAI,CAAC,KAAK,EAClBA,QAAQ,IAAI,CAAC,MAAM,EACnB,GACA,GACAA,QAAQ,IAAI,CAAC,KAAK,EAClBA,QAAQ,IAAI,CAAC,MAAM,EACnBI;QAGNf,aAAa,qBAAqBwD;QAGlC,MAAME,aAAaC,WAAW;YAACL;eAAeE;SAAe;QAC7DxD,aAAa,iBAAiB0D;QAG9BN,cAAcQ,iBAAiBF,YAAY/C,QAAQ,IAAI,EAAEI;QACzDf,aAAa,2BAA2BoD;IAC1C;IAEA,IAAIS,cAAc7C;IAClB,IAAIoC,aAAa;QACf,MAAMU,gBAAgB,MAAMC,WAC1B/C,kBACAoC,aACArC,AAAW,iBAAXA;QAEF8C,cAAcC,cAAc,WAAW;QACvCV,YAAY,KAAK,GAAGU,cAAc,KAAK;QACvCV,YAAY,MAAM,GAAGU,cAAc,MAAM;IAC3C;IAEA,OAAO;QACL,MAAMV;QACNS;QACA,OAAOX,OAAO,OAAO,CAAC,KAAK;QAC3B,aAAajB,KAAK,SAAS,CAACiB,OAAO,OAAO;QAC1C,OAAOA,OAAO,KAAK;IACrB;AACF;AAEO,eAAec,qBAAwBtD,OAO7C;IACC,MAAM,EAAEuD,SAAS,EAAEtD,OAAO,EAAEuD,aAAa,EAAE9D,gBAAgB,EAAEU,WAAW,EAAE,GACxEJ;IACF,MAAMW,eAAe8C;IACrB,MAAMnD,mBAAmBL,QAAQ,UAAU,CAAC,OAAO;IAEnD,MAAMyD,wBAAwBC,uBAC5B3D,QAAQ,eAAe,IAAI,IAC3BuD;IAGF,MAAMK,cAAyD,EAAE;IAEjE,IAAIJ,eAAe,uBAAuB,OACxCI,YAAY,IAAI,CAAC;QACf,MAAM;QACN,WAAW;YACT,KAAKtD;YACL,QAAQ;QACV;IACF;IAGFsD,YAAY,IAAI,CAAC;QACf,MAAM;QACN,MAAMF;IACR;IAEA,MAAM/D,OAAe;QACnB;YAAE,MAAM;YAAU,SAASgB;QAAa;QACxC;YACE,MAAM;YACN,SAASiD;QACX;KACD;IAED,IAAIlE,kBAAkB;QACpB,MAAM0B,SAAS,MAAM3B,mBAAmB;YACtC,QAAQC,iBAAiB,MAAM;YAC/B,yBAAyBA,iBAAiB,uBAAuB;QACnE;QACAC,KAAK,IAAI,IAAIyB;IACf;IAEA,MAAMoB,SAAS,MAAMC,yBACnB9C,MACAS;IAEF,OAAO;QACL,aAAaoC,OAAO,OAAO;QAC3B,OAAOA,OAAO,KAAK;QACnB,mBAAmBA,OAAO,iBAAiB;IAC7C;AACF;AAEO,eAAeqB,sBACpBC,WAAmB,EACnB3D,QAAwE,EACxEC,WAAyB;IAKzB,MAAMO,eAAeoD;IACrB,MAAMC,aAAaC,0BAA0BH;IAE7C,MAAMnE,OAAe;QACnB;YAAE,MAAM;YAAU,SAASgB;QAAa;QACxC;YACE,MAAM;YACN,SAASqD;QACX;KACD;IAED,MAAMxB,SAAS,MAAMrC,SAASR,MAAMS;IAEpC,OAAO;QACL,kBAAkBoC,OAAO,OAAO,CAAC,gBAAgB,IAAI;QACrD,OAAOA,OAAO,KAAK;IACrB;AACF"}
|
|
1
|
+
{"version":3,"file":"ai-model/inspect.mjs","sources":["../../../src/ai-model/inspect.ts"],"sourcesContent":["import type {\n AIDataExtractionResponse,\n AIElementResponse,\n AISectionLocatorResponse,\n AIUsageInfo,\n Rect,\n ReferenceImage,\n ServiceExtractOption,\n UIContext,\n} from '@/types';\nimport type { IModelConfig } from '@midscene/shared/env';\nimport { generateElementByPosition } from '@midscene/shared/extractor/dom-util';\nimport {\n cropByRect,\n paddingToMatchBlockByBase64,\n preProcessImageUrl,\n} from '@midscene/shared/img';\nimport { getDebug } from '@midscene/shared/logger';\nimport type { LocateResultElement } from '@midscene/shared/types';\nimport { assert } from '@midscene/shared/utils';\nimport type {\n ChatCompletionSystemMessageParam,\n ChatCompletionUserMessageParam,\n} from 'openai/resources/index';\nimport type { TMultimodalPrompt, TUserPrompt } from '../common';\nimport { adaptBboxToRect, expandSearchArea, mergeRects } from '../common';\nimport { parseAutoGLMLocateResponse } from './auto-glm/parser';\nimport { getAutoGLMLocatePrompt } from './auto-glm/prompt';\nimport { isAutoGLM } from './auto-glm/util';\nimport {\n extractDataQueryPrompt,\n systemPromptToExtract,\n} from './prompt/extraction';\nimport {\n findElementPrompt,\n systemPromptToLocateElement,\n} from './prompt/llm-locator';\nimport {\n sectionLocatorInstruction,\n systemPromptToLocateSection,\n} from './prompt/llm-section-locator';\nimport {\n orderSensitiveJudgePrompt,\n systemPromptToJudgeOrderSensitive,\n} from './prompt/order-sensitive-judge';\nimport {\n callAIWithObjectResponse,\n callAIWithStringResponse,\n} from './service-caller/index';\n\nexport type AIArgs = [\n ChatCompletionSystemMessageParam,\n ...ChatCompletionUserMessageParam[],\n];\n\nconst debugInspect = getDebug('ai:inspect');\nconst debugSection = getDebug('ai:section');\n\nconst extraTextFromUserPrompt = (prompt: TUserPrompt): string => {\n if (typeof prompt === 'string') {\n return prompt;\n } else {\n return prompt.prompt;\n }\n};\n\nconst promptsToChatParam = async (\n multimodalPrompt: TMultimodalPrompt,\n): Promise<ChatCompletionUserMessageParam[]> => {\n const msgs: ChatCompletionUserMessageParam[] = [];\n if (multimodalPrompt?.images?.length) {\n msgs.push({\n role: 'user',\n content: [\n {\n type: 'text',\n text: 'Next, I will provide all the reference images.',\n },\n ],\n });\n\n for (const item of multimodalPrompt.images) {\n const base64 = await preProcessImageUrl(\n item.url,\n !!multimodalPrompt.convertHttpImage2Base64,\n );\n\n msgs.push({\n role: 'user',\n content: [\n {\n type: 'text',\n text: `this is the reference image named '${item.name}':`,\n },\n ],\n });\n\n msgs.push({\n role: 'user',\n content: [\n {\n type: 'image_url',\n image_url: {\n url: base64,\n detail: 'high',\n },\n },\n ],\n });\n }\n }\n return msgs;\n};\n\nexport async function AiLocateElement(options: {\n context: UIContext;\n targetElementDescription: TUserPrompt;\n referenceImage?: ReferenceImage;\n callAIFn: typeof callAIWithObjectResponse<\n AIElementResponse | [number, number]\n >;\n searchConfig?: Awaited<ReturnType<typeof AiLocateSection>>;\n modelConfig: IModelConfig;\n}): Promise<{\n parseResult: {\n elements: LocateResultElement[];\n errors?: string[];\n };\n rect?: Rect;\n rawResponse: string;\n usage?: AIUsageInfo;\n reasoning_content?: string;\n}> {\n const { context, targetElementDescription, callAIFn, modelConfig } = options;\n const { vlMode } = modelConfig;\n const screenshotBase64 = context.screenshot.getData();\n\n assert(\n targetElementDescription,\n 'cannot find the target element description',\n );\n const targetElementDescriptionText = extraTextFromUserPrompt(\n targetElementDescription,\n );\n const userInstructionPrompt = findElementPrompt(targetElementDescriptionText);\n const systemPrompt = isAutoGLM(vlMode)\n ? getAutoGLMLocatePrompt(vlMode)\n : systemPromptToLocateElement(vlMode);\n\n let imagePayload = screenshotBase64;\n let imageWidth = context.size.width;\n let imageHeight = context.size.height;\n let originalImageWidth = imageWidth;\n let originalImageHeight = imageHeight;\n\n if (options.searchConfig) {\n assert(\n options.searchConfig.rect,\n 'searchArea is provided but its rect cannot be found. Failed to locate element',\n );\n assert(\n options.searchConfig.imageBase64,\n 'searchArea is provided but its imageBase64 cannot be found. Failed to locate element',\n );\n\n imagePayload = options.searchConfig.imageBase64;\n imageWidth = options.searchConfig.rect?.width;\n imageHeight = options.searchConfig.rect?.height;\n originalImageWidth = imageWidth;\n originalImageHeight = imageHeight;\n } else if (vlMode === 'qwen2.5-vl') {\n const paddedResult = await paddingToMatchBlockByBase64(imagePayload);\n imageWidth = paddedResult.width;\n imageHeight = paddedResult.height;\n imagePayload = paddedResult.imageBase64;\n }\n\n const msgs: AIArgs = [\n { role: 'system', content: systemPrompt },\n {\n role: 'user',\n content: [\n {\n type: 'image_url',\n image_url: {\n url: imagePayload,\n detail: 'high',\n },\n },\n {\n type: 'text',\n text: isAutoGLM(vlMode)\n ? `Tap: ${userInstructionPrompt}`\n : userInstructionPrompt,\n },\n ],\n },\n ];\n\n if (typeof targetElementDescription !== 'string') {\n const addOns = await promptsToChatParam({\n images: targetElementDescription.images,\n convertHttpImage2Base64: targetElementDescription.convertHttpImage2Base64,\n });\n msgs.push(...addOns);\n }\n\n if (isAutoGLM(vlMode)) {\n const { content: rawResponseContent, usage } =\n await callAIWithStringResponse(msgs, modelConfig);\n\n debugInspect('auto-glm rawResponse:', rawResponseContent);\n\n const parsed = parseAutoGLMLocateResponse(rawResponseContent);\n\n debugInspect('auto-glm thinking:', parsed.think);\n debugInspect('auto-glm coordinates:', parsed.coordinates);\n\n let resRect: Rect | undefined;\n let matchedElements: LocateResultElement[] = [];\n let errors: string[] = [];\n\n if (parsed.error || !parsed.coordinates) {\n errors = [parsed.error || 'Failed to parse auto-glm response'];\n debugInspect('auto-glm parse error:', errors[0]);\n } else {\n const { x, y } = parsed.coordinates;\n\n debugInspect('auto-glm coordinates [0-999]:', { x, y });\n\n // Convert auto-glm coordinates [0,999] to pixel bbox\n // Map from [0,999] to pixel coordinates\n const pixelX = Math.round((x * imageWidth) / 1000);\n const pixelY = Math.round((y * imageHeight) / 1000);\n\n debugInspect('auto-glm pixel coordinates:', { pixelX, pixelY });\n\n // Create a small bbox around the point\n const bboxSize = 10;\n const x1 = Math.max(pixelX - bboxSize / 2, 0);\n const y1 = Math.max(pixelY - bboxSize / 2, 0);\n const x2 = Math.min(pixelX + bboxSize / 2, imageWidth);\n const y2 = Math.min(pixelY + bboxSize / 2, imageHeight);\n\n // Convert to Rect format\n resRect = {\n left: x1,\n top: y1,\n width: x2 - x1,\n height: y2 - y1,\n };\n\n // Apply offset if searching in a cropped area\n if (options.searchConfig?.rect) {\n resRect.left += options.searchConfig.rect.left;\n resRect.top += options.searchConfig.rect.top;\n }\n\n debugInspect('auto-glm resRect:', resRect);\n\n const rectCenter = {\n x: resRect.left + resRect.width / 2,\n y: resRect.top + resRect.height / 2,\n };\n\n const element: LocateResultElement = generateElementByPosition(\n rectCenter,\n targetElementDescriptionText as string,\n );\n\n if (element) {\n matchedElements = [element];\n }\n }\n\n return {\n rect: resRect,\n parseResult: {\n elements: matchedElements,\n errors,\n },\n rawResponse: rawResponseContent,\n usage,\n reasoning_content: parsed.think,\n };\n }\n\n const res = await callAIFn(msgs, modelConfig);\n\n const rawResponse = JSON.stringify(res.content);\n\n let resRect: Rect | undefined;\n let matchedElements: LocateResultElement[] = [];\n let errors: string[] | undefined =\n 'errors' in res.content ? res.content.errors : [];\n try {\n if (\n 'bbox' in res.content &&\n Array.isArray(res.content.bbox) &&\n res.content.bbox.length >= 1\n ) {\n resRect = adaptBboxToRect(\n res.content.bbox,\n imageWidth,\n imageHeight,\n options.searchConfig?.rect?.left,\n options.searchConfig?.rect?.top,\n originalImageWidth,\n originalImageHeight,\n vlMode,\n );\n\n debugInspect('resRect', resRect);\n\n const rectCenter = {\n x: resRect.left + resRect.width / 2,\n y: resRect.top + resRect.height / 2,\n };\n\n const element: LocateResultElement = generateElementByPosition(\n rectCenter,\n targetElementDescriptionText as string,\n );\n errors = [];\n\n if (element) {\n matchedElements = [element];\n }\n }\n } catch (e) {\n const msg =\n e instanceof Error\n ? `Failed to parse bbox: ${e.message}`\n : 'unknown error in locate';\n if (!errors || errors?.length === 0) {\n errors = [msg];\n } else {\n errors.push(`(${msg})`);\n }\n }\n\n return {\n rect: resRect,\n parseResult: {\n elements: matchedElements as LocateResultElement[],\n errors: errors as string[],\n },\n rawResponse,\n usage: res.usage,\n reasoning_content: res.reasoning_content,\n };\n}\n\nexport async function AiLocateSection(options: {\n context: UIContext;\n sectionDescription: TUserPrompt;\n modelConfig: IModelConfig;\n}): Promise<{\n rect?: Rect;\n imageBase64?: string;\n error?: string;\n rawResponse: string;\n usage?: AIUsageInfo;\n}> {\n const { context, sectionDescription, modelConfig } = options;\n const { vlMode } = modelConfig;\n const screenshotBase64 = context.screenshot.getData();\n\n const systemPrompt = systemPromptToLocateSection(vlMode);\n const sectionLocatorInstructionText = sectionLocatorInstruction(\n extraTextFromUserPrompt(sectionDescription),\n );\n const msgs: AIArgs = [\n { role: 'system', content: systemPrompt },\n {\n role: 'user',\n content: [\n {\n type: 'image_url',\n image_url: {\n url: screenshotBase64,\n detail: 'high',\n },\n },\n {\n type: 'text',\n text: sectionLocatorInstructionText,\n },\n ],\n },\n ];\n\n if (typeof sectionDescription !== 'string') {\n const addOns = await promptsToChatParam({\n images: sectionDescription.images,\n convertHttpImage2Base64: sectionDescription.convertHttpImage2Base64,\n });\n msgs.push(...addOns);\n }\n\n const result = await callAIWithObjectResponse<AISectionLocatorResponse>(\n msgs,\n modelConfig,\n );\n\n let sectionRect: Rect | undefined;\n const sectionBbox = result.content.bbox;\n if (sectionBbox) {\n const targetRect = adaptBboxToRect(\n sectionBbox,\n context.size.width,\n context.size.height,\n 0,\n 0,\n context.size.width,\n context.size.height,\n vlMode,\n );\n debugSection('original targetRect %j', targetRect);\n\n const referenceBboxList = result.content.references_bbox || [];\n debugSection('referenceBboxList %j', referenceBboxList);\n\n const referenceRects = referenceBboxList\n .filter((bbox) => Array.isArray(bbox))\n .map((bbox) => {\n return adaptBboxToRect(\n bbox,\n context.size.width,\n context.size.height,\n 0,\n 0,\n context.size.width,\n context.size.height,\n vlMode,\n );\n });\n debugSection('referenceRects %j', referenceRects);\n\n // merge the sectionRect and referenceRects\n const mergedRect = mergeRects([targetRect, ...referenceRects]);\n debugSection('mergedRect %j', mergedRect);\n\n // expand search area to at least 200 x 200\n sectionRect = expandSearchArea(mergedRect, context.size, vlMode);\n debugSection('expanded sectionRect %j', sectionRect);\n }\n\n let imageBase64 = screenshotBase64;\n if (sectionRect) {\n const croppedResult = await cropByRect(\n screenshotBase64,\n sectionRect,\n vlMode === 'qwen2.5-vl',\n );\n imageBase64 = croppedResult.imageBase64;\n sectionRect.width = croppedResult.width;\n sectionRect.height = croppedResult.height;\n }\n\n return {\n rect: sectionRect,\n imageBase64,\n error: result.content.error,\n rawResponse: JSON.stringify(result.content),\n usage: result.usage,\n };\n}\n\nexport async function AiExtractElementInfo<T>(options: {\n dataQuery: string | Record<string, string>;\n multimodalPrompt?: TMultimodalPrompt;\n context: UIContext;\n pageDescription?: string;\n extractOption?: ServiceExtractOption;\n modelConfig: IModelConfig;\n}) {\n const { dataQuery, context, extractOption, multimodalPrompt, modelConfig } =\n options;\n const systemPrompt = systemPromptToExtract();\n const screenshotBase64 = context.screenshot.getData();\n\n const extractDataPromptText = extractDataQueryPrompt(\n options.pageDescription || '',\n dataQuery,\n );\n\n const userContent: ChatCompletionUserMessageParam['content'] = [];\n\n if (extractOption?.screenshotIncluded !== false) {\n userContent.push({\n type: 'image_url',\n image_url: {\n url: screenshotBase64,\n detail: 'high',\n },\n });\n }\n\n userContent.push({\n type: 'text',\n text: extractDataPromptText,\n });\n\n const msgs: AIArgs = [\n { role: 'system', content: systemPrompt },\n {\n role: 'user',\n content: userContent,\n },\n ];\n\n if (multimodalPrompt) {\n const addOns = await promptsToChatParam({\n images: multimodalPrompt.images,\n convertHttpImage2Base64: multimodalPrompt.convertHttpImage2Base64,\n });\n msgs.push(...addOns);\n }\n\n const result = await callAIWithObjectResponse<AIDataExtractionResponse<T>>(\n msgs,\n modelConfig,\n );\n return {\n parseResult: result.content,\n usage: result.usage,\n reasoning_content: result.reasoning_content,\n };\n}\n\nexport async function AiJudgeOrderSensitive(\n description: string,\n callAIFn: typeof callAIWithObjectResponse<{ isOrderSensitive: boolean }>,\n modelConfig: IModelConfig,\n): Promise<{\n isOrderSensitive: boolean;\n usage?: AIUsageInfo;\n}> {\n const systemPrompt = systemPromptToJudgeOrderSensitive();\n const userPrompt = orderSensitiveJudgePrompt(description);\n\n const msgs: AIArgs = [\n { role: 'system', content: systemPrompt },\n {\n role: 'user',\n content: userPrompt,\n },\n ];\n\n const result = await callAIFn(msgs, modelConfig);\n\n return {\n isOrderSensitive: result.content.isOrderSensitive ?? false,\n usage: result.usage,\n };\n}\n"],"names":["debugInspect","getDebug","debugSection","extraTextFromUserPrompt","prompt","promptsToChatParam","multimodalPrompt","msgs","item","base64","preProcessImageUrl","AiLocateElement","options","context","targetElementDescription","callAIFn","modelConfig","vlMode","screenshotBase64","assert","targetElementDescriptionText","userInstructionPrompt","findElementPrompt","systemPrompt","isAutoGLM","getAutoGLMLocatePrompt","systemPromptToLocateElement","imagePayload","imageWidth","imageHeight","originalImageWidth","originalImageHeight","paddedResult","paddingToMatchBlockByBase64","addOns","rawResponseContent","usage","callAIWithStringResponse","parsed","parseAutoGLMLocateResponse","resRect","matchedElements","errors","x","y","pixelX","Math","pixelY","bboxSize","x1","y1","x2","y2","rectCenter","element","generateElementByPosition","res","rawResponse","JSON","Array","adaptBboxToRect","e","msg","Error","AiLocateSection","sectionDescription","systemPromptToLocateSection","sectionLocatorInstructionText","sectionLocatorInstruction","result","callAIWithObjectResponse","sectionRect","sectionBbox","targetRect","referenceBboxList","referenceRects","bbox","mergedRect","mergeRects","expandSearchArea","imageBase64","croppedResult","cropByRect","AiExtractElementInfo","dataQuery","extractOption","systemPromptToExtract","extractDataPromptText","extractDataQueryPrompt","userContent","AiJudgeOrderSensitive","description","systemPromptToJudgeOrderSensitive","userPrompt","orderSensitiveJudgePrompt"],"mappings":";;;;;;;;;;;;;AAuDA,MAAMA,eAAeC,SAAS;AAC9B,MAAMC,eAAeD,SAAS;AAE9B,MAAME,0BAA0B,CAACC;IAC/B,IAAI,AAAkB,YAAlB,OAAOA,QACT,OAAOA;IAEP,OAAOA,OAAO,MAAM;AAExB;AAEA,MAAMC,qBAAqB,OACzBC;IAEA,MAAMC,OAAyC,EAAE;IACjD,IAAID,kBAAkB,QAAQ,QAAQ;QACpCC,KAAK,IAAI,CAAC;YACR,MAAM;YACN,SAAS;gBACP;oBACE,MAAM;oBACN,MAAM;gBACR;aACD;QACH;QAEA,KAAK,MAAMC,QAAQF,iBAAiB,MAAM,CAAE;YAC1C,MAAMG,SAAS,MAAMC,mBACnBF,KAAK,GAAG,EACR,CAAC,CAACF,iBAAiB,uBAAuB;YAG5CC,KAAK,IAAI,CAAC;gBACR,MAAM;gBACN,SAAS;oBACP;wBACE,MAAM;wBACN,MAAM,CAAC,mCAAmC,EAAEC,KAAK,IAAI,CAAC,EAAE,CAAC;oBAC3D;iBACD;YACH;YAEAD,KAAK,IAAI,CAAC;gBACR,MAAM;gBACN,SAAS;oBACP;wBACE,MAAM;wBACN,WAAW;4BACT,KAAKE;4BACL,QAAQ;wBACV;oBACF;iBACD;YACH;QACF;IACF;IACA,OAAOF;AACT;AAEO,eAAeI,gBAAgBC,OASrC;IAUC,MAAM,EAAEC,OAAO,EAAEC,wBAAwB,EAAEC,QAAQ,EAAEC,WAAW,EAAE,GAAGJ;IACrE,MAAM,EAAEK,MAAM,EAAE,GAAGD;IACnB,MAAME,mBAAmBL,QAAQ,UAAU,CAAC,OAAO;IAEnDM,OACEL,0BACA;IAEF,MAAMM,+BAA+BjB,wBACnCW;IAEF,MAAMO,wBAAwBC,kBAAkBF;IAChD,MAAMG,eAAeC,UAAUP,UAC3BQ,uBAAuBR,UACvBS,4BAA4BT;IAEhC,IAAIU,eAAeT;IACnB,IAAIU,aAAaf,QAAQ,IAAI,CAAC,KAAK;IACnC,IAAIgB,cAAchB,QAAQ,IAAI,CAAC,MAAM;IACrC,IAAIiB,qBAAqBF;IACzB,IAAIG,sBAAsBF;IAE1B,IAAIjB,QAAQ,YAAY,EAAE;QACxBO,OACEP,QAAQ,YAAY,CAAC,IAAI,EACzB;QAEFO,OACEP,QAAQ,YAAY,CAAC,WAAW,EAChC;QAGFe,eAAef,QAAQ,YAAY,CAAC,WAAW;QAC/CgB,aAAahB,QAAQ,YAAY,CAAC,IAAI,EAAE;QACxCiB,cAAcjB,QAAQ,YAAY,CAAC,IAAI,EAAE;QACzCkB,qBAAqBF;QACrBG,sBAAsBF;IACxB,OAAO,IAAIZ,AAAW,iBAAXA,QAAyB;QAClC,MAAMe,eAAe,MAAMC,4BAA4BN;QACvDC,aAAaI,aAAa,KAAK;QAC/BH,cAAcG,aAAa,MAAM;QACjCL,eAAeK,aAAa,WAAW;IACzC;IAEA,MAAMzB,OAAe;QACnB;YAAE,MAAM;YAAU,SAASgB;QAAa;QACxC;YACE,MAAM;YACN,SAAS;gBACP;oBACE,MAAM;oBACN,WAAW;wBACT,KAAKI;wBACL,QAAQ;oBACV;gBACF;gBACA;oBACE,MAAM;oBACN,MAAMH,UAAUP,UACZ,CAAC,KAAK,EAAEI,uBAAuB,GAC/BA;gBACN;aACD;QACH;KACD;IAED,IAAI,AAAoC,YAApC,OAAOP,0BAAuC;QAChD,MAAMoB,SAAS,MAAM7B,mBAAmB;YACtC,QAAQS,yBAAyB,MAAM;YACvC,yBAAyBA,yBAAyB,uBAAuB;QAC3E;QACAP,KAAK,IAAI,IAAI2B;IACf;IAEA,IAAIV,UAAUP,SAAS;QACrB,MAAM,EAAE,SAASkB,kBAAkB,EAAEC,KAAK,EAAE,GAC1C,MAAMC,yBAAyB9B,MAAMS;QAEvChB,aAAa,yBAAyBmC;QAEtC,MAAMG,SAASC,2BAA2BJ;QAE1CnC,aAAa,sBAAsBsC,OAAO,KAAK;QAC/CtC,aAAa,yBAAyBsC,OAAO,WAAW;QAExD,IAAIE;QACJ,IAAIC,kBAAyC,EAAE;QAC/C,IAAIC,SAAmB,EAAE;QAEzB,IAAIJ,OAAO,KAAK,IAAI,CAACA,OAAO,WAAW,EAAE;YACvCI,SAAS;gBAACJ,OAAO,KAAK,IAAI;aAAoC;YAC9DtC,aAAa,yBAAyB0C,MAAM,CAAC,EAAE;QACjD,OAAO;YACL,MAAM,EAAEC,CAAC,EAAEC,CAAC,EAAE,GAAGN,OAAO,WAAW;YAEnCtC,aAAa,iCAAiC;gBAAE2C;gBAAGC;YAAE;YAIrD,MAAMC,SAASC,KAAK,KAAK,CAAEH,IAAIf,aAAc;YAC7C,MAAMmB,SAASD,KAAK,KAAK,CAAEF,IAAIf,cAAe;YAE9C7B,aAAa,+BAA+B;gBAAE6C;gBAAQE;YAAO;YAG7D,MAAMC,WAAW;YACjB,MAAMC,KAAKH,KAAK,GAAG,CAACD,SAASG,WAAW,GAAG;YAC3C,MAAME,KAAKJ,KAAK,GAAG,CAACC,SAASC,WAAW,GAAG;YAC3C,MAAMG,KAAKL,KAAK,GAAG,CAACD,SAASG,WAAW,GAAGpB;YAC3C,MAAMwB,KAAKN,KAAK,GAAG,CAACC,SAASC,WAAW,GAAGnB;YAG3CW,UAAU;gBACR,MAAMS;gBACN,KAAKC;gBACL,OAAOC,KAAKF;gBACZ,QAAQG,KAAKF;YACf;YAGA,IAAItC,QAAQ,YAAY,EAAE,MAAM;gBAC9B4B,QAAQ,IAAI,IAAI5B,QAAQ,YAAY,CAAC,IAAI,CAAC,IAAI;gBAC9C4B,QAAQ,GAAG,IAAI5B,QAAQ,YAAY,CAAC,IAAI,CAAC,GAAG;YAC9C;YAEAZ,aAAa,qBAAqBwC;YAElC,MAAMa,aAAa;gBACjB,GAAGb,QAAQ,IAAI,GAAGA,QAAQ,KAAK,GAAG;gBAClC,GAAGA,QAAQ,GAAG,GAAGA,QAAQ,MAAM,GAAG;YACpC;YAEA,MAAMc,UAA+BC,0BACnCF,YACAjC;YAGF,IAAIkC,SACFb,kBAAkB;gBAACa;aAAQ;QAE/B;QAEA,OAAO;YACL,MAAMd;YACN,aAAa;gBACX,UAAUC;gBACVC;YACF;YACA,aAAaP;YACbC;YACA,mBAAmBE,OAAO,KAAK;QACjC;IACF;IAEA,MAAMkB,MAAM,MAAMzC,SAASR,MAAMS;IAEjC,MAAMyC,cAAcC,KAAK,SAAS,CAACF,IAAI,OAAO;IAE9C,IAAIhB;IACJ,IAAIC,kBAAyC,EAAE;IAC/C,IAAIC,SACF,YAAYc,IAAI,OAAO,GAAGA,IAAI,OAAO,CAAC,MAAM,GAAG,EAAE;IACnD,IAAI;QACF,IACE,UAAUA,IAAI,OAAO,IACrBG,MAAM,OAAO,CAACH,IAAI,OAAO,CAAC,IAAI,KAC9BA,IAAI,OAAO,CAAC,IAAI,CAAC,MAAM,IAAI,GAC3B;YACAhB,UAAUoB,gBACRJ,IAAI,OAAO,CAAC,IAAI,EAChB5B,YACAC,aACAjB,QAAQ,YAAY,EAAE,MAAM,MAC5BA,QAAQ,YAAY,EAAE,MAAM,KAC5BkB,oBACAC,qBACAd;YAGFjB,aAAa,WAAWwC;YAExB,MAAMa,aAAa;gBACjB,GAAGb,QAAQ,IAAI,GAAGA,QAAQ,KAAK,GAAG;gBAClC,GAAGA,QAAQ,GAAG,GAAGA,QAAQ,MAAM,GAAG;YACpC;YAEA,MAAMc,UAA+BC,0BACnCF,YACAjC;YAEFsB,SAAS,EAAE;YAEX,IAAIY,SACFb,kBAAkB;gBAACa;aAAQ;QAE/B;IACF,EAAE,OAAOO,GAAG;QACV,MAAMC,MACJD,aAAaE,QACT,CAAC,sBAAsB,EAAEF,EAAE,OAAO,EAAE,GACpC;QACN,IAAI,AAACnB,UAAUA,QAAQ,WAAW,GAGhCA,OAAO,IAAI,CAAC,CAAC,CAAC,EAAEoB,IAAI,CAAC,CAAC;aAFtBpB,SAAS;YAACoB;SAAI;IAIlB;IAEA,OAAO;QACL,MAAMtB;QACN,aAAa;YACX,UAAUC;YACV,QAAQC;QACV;QACAe;QACA,OAAOD,IAAI,KAAK;QAChB,mBAAmBA,IAAI,iBAAiB;IAC1C;AACF;AAEO,eAAeQ,gBAAgBpD,OAIrC;IAOC,MAAM,EAAEC,OAAO,EAAEoD,kBAAkB,EAAEjD,WAAW,EAAE,GAAGJ;IACrD,MAAM,EAAEK,MAAM,EAAE,GAAGD;IACnB,MAAME,mBAAmBL,QAAQ,UAAU,CAAC,OAAO;IAEnD,MAAMU,eAAe2C,4BAA4BjD;IACjD,MAAMkD,gCAAgCC,0BACpCjE,wBAAwB8D;IAE1B,MAAM1D,OAAe;QACnB;YAAE,MAAM;YAAU,SAASgB;QAAa;QACxC;YACE,MAAM;YACN,SAAS;gBACP;oBACE,MAAM;oBACN,WAAW;wBACT,KAAKL;wBACL,QAAQ;oBACV;gBACF;gBACA;oBACE,MAAM;oBACN,MAAMiD;gBACR;aACD;QACH;KACD;IAED,IAAI,AAA8B,YAA9B,OAAOF,oBAAiC;QAC1C,MAAM/B,SAAS,MAAM7B,mBAAmB;YACtC,QAAQ4D,mBAAmB,MAAM;YACjC,yBAAyBA,mBAAmB,uBAAuB;QACrE;QACA1D,KAAK,IAAI,IAAI2B;IACf;IAEA,MAAMmC,SAAS,MAAMC,yBACnB/D,MACAS;IAGF,IAAIuD;IACJ,MAAMC,cAAcH,OAAO,OAAO,CAAC,IAAI;IACvC,IAAIG,aAAa;QACf,MAAMC,aAAab,gBACjBY,aACA3D,QAAQ,IAAI,CAAC,KAAK,EAClBA,QAAQ,IAAI,CAAC,MAAM,EACnB,GACA,GACAA,QAAQ,IAAI,CAAC,KAAK,EAClBA,QAAQ,IAAI,CAAC,MAAM,EACnBI;QAEFf,aAAa,0BAA0BuE;QAEvC,MAAMC,oBAAoBL,OAAO,OAAO,CAAC,eAAe,IAAI,EAAE;QAC9DnE,aAAa,wBAAwBwE;QAErC,MAAMC,iBAAiBD,kBACpB,MAAM,CAAC,CAACE,OAASjB,MAAM,OAAO,CAACiB,OAC/B,GAAG,CAAC,CAACA,OACGhB,gBACLgB,MACA/D,QAAQ,IAAI,CAAC,KAAK,EAClBA,QAAQ,IAAI,CAAC,MAAM,EACnB,GACA,GACAA,QAAQ,IAAI,CAAC,KAAK,EAClBA,QAAQ,IAAI,CAAC,MAAM,EACnBI;QAGNf,aAAa,qBAAqByE;QAGlC,MAAME,aAAaC,WAAW;YAACL;eAAeE;SAAe;QAC7DzE,aAAa,iBAAiB2E;QAG9BN,cAAcQ,iBAAiBF,YAAYhE,QAAQ,IAAI,EAAEI;QACzDf,aAAa,2BAA2BqE;IAC1C;IAEA,IAAIS,cAAc9D;IAClB,IAAIqD,aAAa;QACf,MAAMU,gBAAgB,MAAMC,WAC1BhE,kBACAqD,aACAtD,AAAW,iBAAXA;QAEF+D,cAAcC,cAAc,WAAW;QACvCV,YAAY,KAAK,GAAGU,cAAc,KAAK;QACvCV,YAAY,MAAM,GAAGU,cAAc,MAAM;IAC3C;IAEA,OAAO;QACL,MAAMV;QACNS;QACA,OAAOX,OAAO,OAAO,CAAC,KAAK;QAC3B,aAAaX,KAAK,SAAS,CAACW,OAAO,OAAO;QAC1C,OAAOA,OAAO,KAAK;IACrB;AACF;AAEO,eAAec,qBAAwBvE,OAO7C;IACC,MAAM,EAAEwE,SAAS,EAAEvE,OAAO,EAAEwE,aAAa,EAAE/E,gBAAgB,EAAEU,WAAW,EAAE,GACxEJ;IACF,MAAMW,eAAe+D;IACrB,MAAMpE,mBAAmBL,QAAQ,UAAU,CAAC,OAAO;IAEnD,MAAM0E,wBAAwBC,uBAC5B5E,QAAQ,eAAe,IAAI,IAC3BwE;IAGF,MAAMK,cAAyD,EAAE;IAEjE,IAAIJ,eAAe,uBAAuB,OACxCI,YAAY,IAAI,CAAC;QACf,MAAM;QACN,WAAW;YACT,KAAKvE;YACL,QAAQ;QACV;IACF;IAGFuE,YAAY,IAAI,CAAC;QACf,MAAM;QACN,MAAMF;IACR;IAEA,MAAMhF,OAAe;QACnB;YAAE,MAAM;YAAU,SAASgB;QAAa;QACxC;YACE,MAAM;YACN,SAASkE;QACX;KACD;IAED,IAAInF,kBAAkB;QACpB,MAAM4B,SAAS,MAAM7B,mBAAmB;YACtC,QAAQC,iBAAiB,MAAM;YAC/B,yBAAyBA,iBAAiB,uBAAuB;QACnE;QACAC,KAAK,IAAI,IAAI2B;IACf;IAEA,MAAMmC,SAAS,MAAMC,yBACnB/D,MACAS;IAEF,OAAO;QACL,aAAaqD,OAAO,OAAO;QAC3B,OAAOA,OAAO,KAAK;QACnB,mBAAmBA,OAAO,iBAAiB;IAC7C;AACF;AAEO,eAAeqB,sBACpBC,WAAmB,EACnB5E,QAAwE,EACxEC,WAAyB;IAKzB,MAAMO,eAAeqE;IACrB,MAAMC,aAAaC,0BAA0BH;IAE7C,MAAMpF,OAAe;QACnB;YAAE,MAAM;YAAU,SAASgB;QAAa;QACxC;YACE,MAAM;YACN,SAASsE;QACX;KACD;IAED,MAAMxB,SAAS,MAAMtD,SAASR,MAAMS;IAEpC,OAAO;QACL,kBAAkBqD,OAAO,OAAO,CAAC,gBAAgB,IAAI;QACrD,OAAOA,OAAO,KAAK;IACrB;AACF"}
|
|
@@ -3,6 +3,7 @@ import { getDebug } from "@midscene/shared/logger";
|
|
|
3
3
|
import { assert, ifInBrowser } from "@midscene/shared/utils";
|
|
4
4
|
import { jsonrepair } from "jsonrepair";
|
|
5
5
|
import openai_0 from "openai";
|
|
6
|
+
import { isAutoGLM } from "../auto-glm/util.mjs";
|
|
6
7
|
async function createChatClient({ modelConfig }) {
|
|
7
8
|
const { socksProxy, httpProxy, modelName, openaiBaseURL, openaiApiKey, openaiExtraConfig, modelDescription, uiTarsModelVersion: uiTarsVersion, vlMode, modelFamily, createOpenAIClient, timeout } = modelConfig;
|
|
8
9
|
let proxyAgent;
|
|
@@ -141,6 +142,10 @@ async function callAI(messages, modelConfig, options) {
|
|
|
141
142
|
vl_high_resolution_images: true
|
|
142
143
|
} : {}
|
|
143
144
|
};
|
|
145
|
+
if (isAutoGLM(vlMode)) {
|
|
146
|
+
commonConfig.top_p = 0.85;
|
|
147
|
+
commonConfig.frequency_penalty = 0.2;
|
|
148
|
+
}
|
|
144
149
|
const { config: deepThinkConfig, debugMessage, warningMessage } = resolveDeepThinkConfig({
|
|
145
150
|
deepThink: options?.deepThink,
|
|
146
151
|
modelFamily
|