@midscene/core 0.30.5 → 1.0.1-beta-20251021060907.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/es/agent/agent.mjs +41 -33
- package/dist/es/agent/agent.mjs.map +1 -1
- package/dist/es/agent/execution-session.mjs +41 -0
- package/dist/es/agent/execution-session.mjs.map +1 -0
- package/dist/es/agent/task-builder.mjs +303 -0
- package/dist/es/agent/task-builder.mjs.map +1 -0
- package/dist/es/agent/tasks.mjs +68 -391
- package/dist/es/agent/tasks.mjs.map +1 -1
- package/dist/es/agent/ui-utils.mjs.map +1 -1
- package/dist/es/agent/utils.mjs +6 -6
- package/dist/es/agent/utils.mjs.map +1 -1
- package/dist/es/ai-model/common.mjs +1 -15
- package/dist/es/ai-model/common.mjs.map +1 -1
- package/dist/es/ai-model/inspect.mjs +2 -3
- package/dist/es/ai-model/inspect.mjs.map +1 -1
- package/dist/es/ai-model/llm-planning.mjs +6 -24
- package/dist/es/ai-model/llm-planning.mjs.map +1 -1
- package/dist/es/ai-model/prompt/llm-locator.mjs +3 -204
- package/dist/es/ai-model/prompt/llm-locator.mjs.map +1 -1
- package/dist/es/ai-model/service-caller/index.mjs +101 -231
- package/dist/es/ai-model/service-caller/index.mjs.map +1 -1
- package/dist/es/index.mjs +3 -2
- package/dist/es/index.mjs.map +1 -1
- package/dist/es/insight/index.mjs +18 -19
- package/dist/es/insight/index.mjs.map +1 -1
- package/dist/es/insight/utils.mjs +3 -3
- package/dist/es/insight/utils.mjs.map +1 -1
- package/dist/es/report.mjs.map +1 -1
- package/dist/es/{ai-model/action-executor.mjs → task-runner.mjs} +69 -10
- package/dist/es/task-runner.mjs.map +1 -0
- package/dist/es/types.mjs +18 -1
- package/dist/es/types.mjs.map +1 -1
- package/dist/es/utils.mjs +2 -2
- package/dist/es/yaml/player.mjs +18 -14
- package/dist/es/yaml/player.mjs.map +1 -1
- package/dist/lib/agent/agent.js +41 -33
- package/dist/lib/agent/agent.js.map +1 -1
- package/dist/lib/agent/execution-session.js +75 -0
- package/dist/lib/agent/execution-session.js.map +1 -0
- package/dist/lib/agent/task-builder.js +340 -0
- package/dist/lib/agent/task-builder.js.map +1 -0
- package/dist/lib/agent/tasks.js +68 -391
- package/dist/lib/agent/tasks.js.map +1 -1
- package/dist/lib/agent/ui-utils.js.map +1 -1
- package/dist/lib/agent/utils.js +6 -6
- package/dist/lib/agent/utils.js.map +1 -1
- package/dist/lib/ai-model/common.js +2 -19
- package/dist/lib/ai-model/common.js.map +1 -1
- package/dist/lib/ai-model/inspect.js +1 -2
- package/dist/lib/ai-model/inspect.js.map +1 -1
- package/dist/lib/ai-model/llm-planning.js +5 -23
- package/dist/lib/ai-model/llm-planning.js.map +1 -1
- package/dist/lib/ai-model/prompt/llm-locator.js +2 -206
- package/dist/lib/ai-model/prompt/llm-locator.js.map +1 -1
- package/dist/lib/ai-model/service-caller/index.js +236 -384
- package/dist/lib/ai-model/service-caller/index.js.map +1 -1
- package/dist/lib/index.js +9 -5
- package/dist/lib/index.js.map +1 -1
- package/dist/lib/insight/index.js +17 -18
- package/dist/lib/insight/index.js.map +1 -1
- package/dist/lib/insight/utils.js +5 -5
- package/dist/lib/insight/utils.js.map +1 -1
- package/dist/lib/report.js.map +1 -1
- package/dist/lib/{ai-model/action-executor.js → task-runner.js} +71 -12
- package/dist/lib/task-runner.js.map +1 -0
- package/dist/lib/types.js +22 -1
- package/dist/lib/types.js.map +1 -1
- package/dist/lib/utils.js +2 -2
- package/dist/lib/yaml/player.js +18 -14
- package/dist/lib/yaml/player.js.map +1 -1
- package/dist/types/agent/agent.d.ts +16 -0
- package/dist/types/agent/execution-session.d.ts +27 -0
- package/dist/types/agent/task-builder.d.ts +24 -0
- package/dist/types/agent/tasks.d.ts +8 -11
- package/dist/types/agent/ui-utils.d.ts +2 -2
- package/dist/types/agent/utils.d.ts +5 -2
- package/dist/types/ai-model/common.d.ts +0 -1
- package/dist/types/ai-model/prompt/llm-locator.d.ts +0 -2
- package/dist/types/index.d.ts +4 -3
- package/dist/types/insight/index.d.ts +5 -10
- package/dist/types/insight/utils.d.ts +2 -2
- package/dist/types/{ai-model/action-executor.d.ts → task-runner.d.ts} +14 -3
- package/dist/types/types.d.ts +47 -4
- package/dist/types/yaml.d.ts +3 -1
- package/package.json +4 -7
- package/dist/es/ai-model/action-executor.mjs.map +0 -1
- package/dist/lib/ai-model/action-executor.js.map +0 -1
|
@@ -1,102 +1,58 @@
|
|
|
1
1
|
import { AIResponseFormat } from "../../types.mjs";
|
|
2
|
-
import {
|
|
3
|
-
import { DefaultAzureCredential, getBearerTokenProvider } from "@azure/identity";
|
|
4
|
-
import { MIDSCENE_API_TYPE, MIDSCENE_LANGSMITH_DEBUG, OPENAI_MAX_TOKENS, globalConfigManager } from "@midscene/shared/env";
|
|
5
|
-
import { parseBase64 } from "@midscene/shared/img";
|
|
2
|
+
import { OPENAI_MAX_TOKENS, globalConfigManager } from "@midscene/shared/env";
|
|
6
3
|
import { getDebug } from "@midscene/shared/logger";
|
|
7
|
-
import { assert
|
|
4
|
+
import { assert } from "@midscene/shared/utils";
|
|
8
5
|
import { HttpsProxyAgent } from "https-proxy-agent";
|
|
9
6
|
import { jsonrepair } from "jsonrepair";
|
|
10
|
-
import openai_0
|
|
7
|
+
import openai_0 from "openai";
|
|
11
8
|
import { SocksProxyAgent } from "socks-proxy-agent";
|
|
12
9
|
import { AIActionType } from "../common.mjs";
|
|
13
10
|
import { assertSchema } from "../prompt/assertion.mjs";
|
|
14
|
-
import { locatorSchema } from "../prompt/llm-locator.mjs";
|
|
15
11
|
import { planSchema } from "../prompt/llm-planning.mjs";
|
|
16
12
|
async function createChatClient({ AIActionTypeValue, modelConfig }) {
|
|
17
|
-
const { socksProxy, httpProxy, modelName, openaiBaseURL, openaiApiKey, openaiExtraConfig,
|
|
13
|
+
const { socksProxy, httpProxy, modelName, openaiBaseURL, openaiApiKey, openaiExtraConfig, modelDescription, uiTarsModelVersion: uiTarsVersion, vlMode, createOpenAIClient } = modelConfig;
|
|
18
14
|
let openai;
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
baseURL: openaiBaseURL,
|
|
30
|
-
apiKey: openaiApiKey,
|
|
31
|
-
httpAgent: proxyAgent,
|
|
32
|
-
...openaiExtraConfig,
|
|
33
|
-
dangerouslyAllowBrowser: true
|
|
15
|
+
if (createOpenAIClient) openai = createOpenAIClient({
|
|
16
|
+
modelName,
|
|
17
|
+
openaiApiKey,
|
|
18
|
+
openaiBaseURL,
|
|
19
|
+
socksProxy,
|
|
20
|
+
httpProxy,
|
|
21
|
+
openaiExtraConfig,
|
|
22
|
+
vlMode,
|
|
23
|
+
intent: modelConfig.intent,
|
|
24
|
+
modelDescription
|
|
34
25
|
});
|
|
35
|
-
else
|
|
36
|
-
let
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
endpoint: azureOpenaiEndpoint,
|
|
52
|
-
apiVersion: azureOpenaiApiVersion,
|
|
53
|
-
deployment: azureOpenaiDeployment,
|
|
54
|
-
dangerouslyAllowBrowser: true,
|
|
26
|
+
else {
|
|
27
|
+
let proxyAgent;
|
|
28
|
+
const debugProxy = getDebug('ai:call:proxy');
|
|
29
|
+
if (httpProxy) {
|
|
30
|
+
debugProxy('using http proxy', httpProxy);
|
|
31
|
+
proxyAgent = new HttpsProxyAgent(httpProxy);
|
|
32
|
+
} else if (socksProxy) {
|
|
33
|
+
debugProxy('using socks proxy', socksProxy);
|
|
34
|
+
proxyAgent = new SocksProxyAgent(socksProxy);
|
|
35
|
+
}
|
|
36
|
+
openai = new openai_0({
|
|
37
|
+
baseURL: openaiBaseURL,
|
|
38
|
+
apiKey: openaiApiKey,
|
|
39
|
+
...proxyAgent ? {
|
|
40
|
+
httpAgent: proxyAgent
|
|
41
|
+
} : {},
|
|
55
42
|
...openaiExtraConfig,
|
|
56
|
-
|
|
43
|
+
dangerouslyAllowBrowser: true
|
|
57
44
|
});
|
|
58
|
-
} else if (!useAnthropicSdk) openai = new openai_0({
|
|
59
|
-
baseURL: openaiBaseURL,
|
|
60
|
-
apiKey: openaiApiKey,
|
|
61
|
-
httpAgent: proxyAgent,
|
|
62
|
-
...openaiExtraConfig,
|
|
63
|
-
defaultHeaders: {
|
|
64
|
-
...(null == openaiExtraConfig ? void 0 : openaiExtraConfig.defaultHeaders) || {},
|
|
65
|
-
[MIDSCENE_API_TYPE]: AIActionTypeValue.toString()
|
|
66
|
-
},
|
|
67
|
-
dangerouslyAllowBrowser: true
|
|
68
|
-
});
|
|
69
|
-
if (openai && globalConfigManager.getEnvConfigInBoolean(MIDSCENE_LANGSMITH_DEBUG)) {
|
|
70
|
-
if (ifInBrowser) throw new Error('langsmith is not supported in browser');
|
|
71
|
-
console.log('DEBUGGING MODE: langsmith wrapper enabled');
|
|
72
|
-
const { wrapOpenAI } = await import("langsmith/wrappers");
|
|
73
|
-
openai = wrapOpenAI(openai);
|
|
74
45
|
}
|
|
75
|
-
|
|
46
|
+
return {
|
|
76
47
|
completion: openai.chat.completions,
|
|
77
|
-
style: 'openai',
|
|
78
|
-
modelName,
|
|
79
|
-
modelDescription,
|
|
80
|
-
uiTarsVersion,
|
|
81
|
-
vlMode
|
|
82
|
-
};
|
|
83
|
-
if (useAnthropicSdk) openai = new Anthropic({
|
|
84
|
-
apiKey: anthropicApiKey,
|
|
85
|
-
httpAgent: proxyAgent,
|
|
86
|
-
dangerouslyAllowBrowser: true
|
|
87
|
-
});
|
|
88
|
-
if (void 0 !== openai && openai.messages) return {
|
|
89
|
-
completion: openai.messages,
|
|
90
|
-
style: 'anthropic',
|
|
91
48
|
modelName,
|
|
92
49
|
modelDescription,
|
|
93
50
|
uiTarsVersion,
|
|
94
51
|
vlMode
|
|
95
52
|
};
|
|
96
|
-
throw new Error('Openai SDK or Anthropic SDK is not initialized');
|
|
97
53
|
}
|
|
98
54
|
async function callAI(messages, AIActionTypeValue, modelConfig, options) {
|
|
99
|
-
const { completion,
|
|
55
|
+
const { completion, modelName, modelDescription, uiTarsVersion, vlMode } = await createChatClient({
|
|
100
56
|
AIActionTypeValue,
|
|
101
57
|
modelConfig
|
|
102
58
|
});
|
|
@@ -115,168 +71,85 @@ async function callAI(messages, AIActionTypeValue, modelConfig, options) {
|
|
|
115
71
|
temperature: 'vlm-ui-tars' === vlMode ? 0.0 : 0.1,
|
|
116
72
|
stream: !!isStreaming,
|
|
117
73
|
max_tokens: 'number' == typeof maxTokens ? maxTokens : Number.parseInt(maxTokens || '2048', 10),
|
|
118
|
-
...'qwen-vl' === vlMode
|
|
74
|
+
...'qwen-vl' === vlMode ? {
|
|
119
75
|
vl_high_resolution_images: true
|
|
120
76
|
} : {}
|
|
121
77
|
};
|
|
122
78
|
try {
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
usage: void 0
|
|
147
|
-
};
|
|
148
|
-
options.onChunk(chunkData);
|
|
149
|
-
}
|
|
150
|
-
if (null == (_chunk_choices2 = chunk.choices) ? void 0 : null == (_chunk_choices_2 = _chunk_choices2[0]) ? void 0 : _chunk_choices_2.finish_reason) {
|
|
151
|
-
timeCost = Date.now() - startTime;
|
|
152
|
-
if (!usage) {
|
|
153
|
-
const estimatedTokens = Math.max(1, Math.floor(accumulated.length / 4));
|
|
154
|
-
usage = {
|
|
155
|
-
prompt_tokens: estimatedTokens,
|
|
156
|
-
completion_tokens: estimatedTokens,
|
|
157
|
-
total_tokens: 2 * estimatedTokens
|
|
158
|
-
};
|
|
159
|
-
}
|
|
160
|
-
const finalChunk = {
|
|
161
|
-
content: '',
|
|
162
|
-
accumulated,
|
|
163
|
-
reasoning_content: '',
|
|
164
|
-
isComplete: true,
|
|
165
|
-
usage: {
|
|
166
|
-
prompt_tokens: usage.prompt_tokens ?? 0,
|
|
167
|
-
completion_tokens: usage.completion_tokens ?? 0,
|
|
168
|
-
total_tokens: usage.total_tokens ?? 0,
|
|
169
|
-
time_cost: timeCost ?? 0,
|
|
170
|
-
model_name: modelName,
|
|
171
|
-
model_description: modelDescription,
|
|
172
|
-
intent: modelConfig.intent
|
|
173
|
-
}
|
|
174
|
-
};
|
|
175
|
-
options.onChunk(finalChunk);
|
|
176
|
-
break;
|
|
177
|
-
}
|
|
178
|
-
}
|
|
179
|
-
content = accumulated;
|
|
180
|
-
debugProfileStats(`streaming model, ${modelName}, mode, ${vlMode || 'default'}, cost-ms, ${timeCost}`);
|
|
181
|
-
} else {
|
|
182
|
-
var _result_usage, _result_usage1, _result_usage2;
|
|
183
|
-
const result = await completion.create({
|
|
184
|
-
model: modelName,
|
|
185
|
-
messages,
|
|
186
|
-
response_format: responseFormat,
|
|
187
|
-
...commonConfig
|
|
188
|
-
});
|
|
189
|
-
timeCost = Date.now() - startTime;
|
|
190
|
-
debugProfileStats(`model, ${modelName}, mode, ${vlMode || 'default'}, ui-tars-version, ${uiTarsVersion}, prompt-tokens, ${(null == (_result_usage = result.usage) ? void 0 : _result_usage.prompt_tokens) || ''}, completion-tokens, ${(null == (_result_usage1 = result.usage) ? void 0 : _result_usage1.completion_tokens) || ''}, total-tokens, ${(null == (_result_usage2 = result.usage) ? void 0 : _result_usage2.total_tokens) || ''}, cost-ms, ${timeCost}, requestId, ${result._request_id || ''}`);
|
|
191
|
-
debugProfileDetail(`model usage detail: ${JSON.stringify(result.usage)}`);
|
|
192
|
-
assert(result.choices, `invalid response from LLM service: ${JSON.stringify(result)}`);
|
|
193
|
-
content = result.choices[0].message.content;
|
|
194
|
-
usage = result.usage;
|
|
195
|
-
}
|
|
196
|
-
debugCall(`response: ${content}`);
|
|
197
|
-
assert(content, 'empty content');
|
|
198
|
-
} else if ('anthropic' === style) {
|
|
199
|
-
const convertImageContent = (content)=>{
|
|
200
|
-
if ('image_url' === content.type) {
|
|
201
|
-
const imgBase64 = content.image_url.url;
|
|
202
|
-
assert(imgBase64, 'image_url is required');
|
|
203
|
-
const { mimeType, body } = parseBase64(content.image_url.url);
|
|
204
|
-
return {
|
|
205
|
-
source: {
|
|
206
|
-
type: 'base64',
|
|
207
|
-
media_type: mimeType,
|
|
208
|
-
data: body
|
|
209
|
-
},
|
|
210
|
-
type: 'image'
|
|
79
|
+
debugCall(`sending ${isStreaming ? 'streaming ' : ''}request to ${modelName}`);
|
|
80
|
+
if (isStreaming) {
|
|
81
|
+
const stream = await completion.create({
|
|
82
|
+
model: modelName,
|
|
83
|
+
messages,
|
|
84
|
+
response_format: responseFormat,
|
|
85
|
+
...commonConfig
|
|
86
|
+
}, {
|
|
87
|
+
stream: true
|
|
88
|
+
});
|
|
89
|
+
for await (const chunk of stream){
|
|
90
|
+
var _chunk_choices__delta, _chunk_choices_, _chunk_choices, _chunk_choices__delta1, _chunk_choices_1, _chunk_choices1, _chunk_choices_2, _chunk_choices2;
|
|
91
|
+
const content = (null == (_chunk_choices = chunk.choices) ? void 0 : null == (_chunk_choices_ = _chunk_choices[0]) ? void 0 : null == (_chunk_choices__delta = _chunk_choices_.delta) ? void 0 : _chunk_choices__delta.content) || '';
|
|
92
|
+
const reasoning_content = (null == (_chunk_choices1 = chunk.choices) ? void 0 : null == (_chunk_choices_1 = _chunk_choices1[0]) ? void 0 : null == (_chunk_choices__delta1 = _chunk_choices_1.delta) ? void 0 : _chunk_choices__delta1.reasoning_content) || '';
|
|
93
|
+
if (chunk.usage) usage = chunk.usage;
|
|
94
|
+
if (content || reasoning_content) {
|
|
95
|
+
accumulated += content;
|
|
96
|
+
const chunkData = {
|
|
97
|
+
content,
|
|
98
|
+
reasoning_content,
|
|
99
|
+
accumulated,
|
|
100
|
+
isComplete: false,
|
|
101
|
+
usage: void 0
|
|
211
102
|
};
|
|
103
|
+
options.onChunk(chunkData);
|
|
212
104
|
}
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
content: Array.isArray(m.content) ? m.content.map(convertImageContent) : m.content
|
|
222
|
-
})),
|
|
223
|
-
response_format: responseFormat,
|
|
224
|
-
...commonConfig
|
|
225
|
-
});
|
|
226
|
-
for await (const chunk of stream){
|
|
227
|
-
var _chunk_delta;
|
|
228
|
-
const content = (null == (_chunk_delta = chunk.delta) ? void 0 : _chunk_delta.text) || '';
|
|
229
|
-
if (content) {
|
|
230
|
-
accumulated += content;
|
|
231
|
-
const chunkData = {
|
|
232
|
-
content,
|
|
233
|
-
accumulated,
|
|
234
|
-
reasoning_content: '',
|
|
235
|
-
isComplete: false,
|
|
236
|
-
usage: void 0
|
|
105
|
+
if (null == (_chunk_choices2 = chunk.choices) ? void 0 : null == (_chunk_choices_2 = _chunk_choices2[0]) ? void 0 : _chunk_choices_2.finish_reason) {
|
|
106
|
+
timeCost = Date.now() - startTime;
|
|
107
|
+
if (!usage) {
|
|
108
|
+
const estimatedTokens = Math.max(1, Math.floor(accumulated.length / 4));
|
|
109
|
+
usage = {
|
|
110
|
+
prompt_tokens: estimatedTokens,
|
|
111
|
+
completion_tokens: estimatedTokens,
|
|
112
|
+
total_tokens: 2 * estimatedTokens
|
|
237
113
|
};
|
|
238
|
-
options.onChunk(chunkData);
|
|
239
|
-
}
|
|
240
|
-
if ('message_stop' === chunk.type) {
|
|
241
|
-
timeCost = Date.now() - startTime;
|
|
242
|
-
const anthropicUsage = chunk.usage;
|
|
243
|
-
const finalChunk = {
|
|
244
|
-
content: '',
|
|
245
|
-
accumulated,
|
|
246
|
-
reasoning_content: '',
|
|
247
|
-
isComplete: true,
|
|
248
|
-
usage: anthropicUsage ? {
|
|
249
|
-
prompt_tokens: anthropicUsage.input_tokens ?? 0,
|
|
250
|
-
completion_tokens: anthropicUsage.output_tokens ?? 0,
|
|
251
|
-
total_tokens: (anthropicUsage.input_tokens ?? 0) + (anthropicUsage.output_tokens ?? 0),
|
|
252
|
-
time_cost: timeCost ?? 0,
|
|
253
|
-
model_name: modelName,
|
|
254
|
-
model_description: modelDescription,
|
|
255
|
-
intent: modelConfig.intent
|
|
256
|
-
} : void 0
|
|
257
|
-
};
|
|
258
|
-
options.onChunk(finalChunk);
|
|
259
|
-
break;
|
|
260
114
|
}
|
|
115
|
+
const finalChunk = {
|
|
116
|
+
content: '',
|
|
117
|
+
accumulated,
|
|
118
|
+
reasoning_content: '',
|
|
119
|
+
isComplete: true,
|
|
120
|
+
usage: {
|
|
121
|
+
prompt_tokens: usage.prompt_tokens ?? 0,
|
|
122
|
+
completion_tokens: usage.completion_tokens ?? 0,
|
|
123
|
+
total_tokens: usage.total_tokens ?? 0,
|
|
124
|
+
time_cost: timeCost ?? 0,
|
|
125
|
+
model_name: modelName,
|
|
126
|
+
model_description: modelDescription,
|
|
127
|
+
intent: modelConfig.intent
|
|
128
|
+
}
|
|
129
|
+
};
|
|
130
|
+
options.onChunk(finalChunk);
|
|
131
|
+
break;
|
|
261
132
|
}
|
|
262
|
-
content = accumulated;
|
|
263
|
-
} else {
|
|
264
|
-
const result = await completion.create({
|
|
265
|
-
model: modelName,
|
|
266
|
-
system: 'You are a versatile professional in software UI automation',
|
|
267
|
-
messages: messages.map((m)=>({
|
|
268
|
-
role: 'user',
|
|
269
|
-
content: Array.isArray(m.content) ? m.content.map(convertImageContent) : m.content
|
|
270
|
-
})),
|
|
271
|
-
response_format: responseFormat,
|
|
272
|
-
...commonConfig
|
|
273
|
-
});
|
|
274
|
-
timeCost = Date.now() - startTime;
|
|
275
|
-
content = result.content[0].text;
|
|
276
|
-
usage = result.usage;
|
|
277
133
|
}
|
|
278
|
-
|
|
134
|
+
content = accumulated;
|
|
135
|
+
debugProfileStats(`streaming model, ${modelName}, mode, ${vlMode || 'default'}, cost-ms, ${timeCost}`);
|
|
136
|
+
} else {
|
|
137
|
+
var _result_usage, _result_usage1, _result_usage2;
|
|
138
|
+
const result = await completion.create({
|
|
139
|
+
model: modelName,
|
|
140
|
+
messages,
|
|
141
|
+
response_format: responseFormat,
|
|
142
|
+
...commonConfig
|
|
143
|
+
});
|
|
144
|
+
timeCost = Date.now() - startTime;
|
|
145
|
+
debugProfileStats(`model, ${modelName}, mode, ${vlMode || 'default'}, ui-tars-version, ${uiTarsVersion}, prompt-tokens, ${(null == (_result_usage = result.usage) ? void 0 : _result_usage.prompt_tokens) || ''}, completion-tokens, ${(null == (_result_usage1 = result.usage) ? void 0 : _result_usage1.completion_tokens) || ''}, total-tokens, ${(null == (_result_usage2 = result.usage) ? void 0 : _result_usage2.total_tokens) || ''}, cost-ms, ${timeCost}, requestId, ${result._request_id || ''}`);
|
|
146
|
+
debugProfileDetail(`model usage detail: ${JSON.stringify(result.usage)}`);
|
|
147
|
+
assert(result.choices, `invalid response from LLM service: ${JSON.stringify(result)}`);
|
|
148
|
+
content = result.choices[0].message.content;
|
|
149
|
+
usage = result.usage;
|
|
279
150
|
}
|
|
151
|
+
debugCall(`response: ${content}`);
|
|
152
|
+
assert(content, 'empty content');
|
|
280
153
|
if (isStreaming && !usage) {
|
|
281
154
|
const estimatedTokens = Math.max(1, Math.floor((content || '').length / 4));
|
|
282
155
|
usage = {
|
|
@@ -312,9 +185,6 @@ const getResponseFormat = (modelName, AIActionTypeValue)=>{
|
|
|
312
185
|
case AIActionType.ASSERT:
|
|
313
186
|
responseFormat = assertSchema;
|
|
314
187
|
break;
|
|
315
|
-
case AIActionType.INSPECT_ELEMENT:
|
|
316
|
-
responseFormat = locatorSchema;
|
|
317
|
-
break;
|
|
318
188
|
case AIActionType.PLAN:
|
|
319
189
|
responseFormat = planSchema;
|
|
320
190
|
break;
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"ai-model/service-caller/index.mjs","sources":["webpack://@midscene/core/./src/ai-model/service-caller/index.ts"],"sourcesContent":["import { AIResponseFormat, type AIUsageInfo } from '@/types';\nimport type { CodeGenerationChunk, StreamingCallback } from '@/types';\nimport { Anthropic } from '@anthropic-ai/sdk';\nimport {\n DefaultAzureCredential,\n getBearerTokenProvider,\n} from '@azure/identity';\nimport {\n type IModelConfig,\n MIDSCENE_API_TYPE,\n MIDSCENE_LANGSMITH_DEBUG,\n OPENAI_MAX_TOKENS,\n type TVlModeTypes,\n type UITarsModelVersion,\n globalConfigManager,\n} from '@midscene/shared/env';\n\nimport { parseBase64 } from '@midscene/shared/img';\nimport { getDebug } from '@midscene/shared/logger';\nimport { assert } from '@midscene/shared/utils';\nimport { ifInBrowser } from '@midscene/shared/utils';\nimport { HttpsProxyAgent } from 'https-proxy-agent';\nimport { jsonrepair } from 'jsonrepair';\nimport OpenAI, { AzureOpenAI } from 'openai';\nimport type { ChatCompletionMessageParam } from 'openai/resources/index';\nimport type { Stream } from 'openai/streaming';\nimport { SocksProxyAgent } from 'socks-proxy-agent';\nimport { AIActionType, type AIArgs } from '../common';\nimport { assertSchema } from '../prompt/assertion';\nimport { locatorSchema } from '../prompt/llm-locator';\nimport { planSchema } from '../prompt/llm-planning';\n\nasync function createChatClient({\n AIActionTypeValue,\n modelConfig,\n}: {\n AIActionTypeValue: AIActionType;\n modelConfig: IModelConfig;\n}): Promise<{\n completion: OpenAI.Chat.Completions;\n style: 'openai' | 'anthropic';\n modelName: string;\n modelDescription: string;\n uiTarsVersion?: UITarsModelVersion;\n vlMode: TVlModeTypes | undefined;\n}> {\n const {\n socksProxy,\n httpProxy,\n modelName,\n openaiBaseURL,\n openaiApiKey,\n openaiExtraConfig,\n openaiUseAzureDeprecated,\n useAzureOpenai,\n azureOpenaiScope,\n azureOpenaiKey,\n azureOpenaiEndpoint,\n azureOpenaiApiVersion,\n azureOpenaiDeployment,\n azureExtraConfig,\n useAnthropicSdk,\n anthropicApiKey,\n modelDescription,\n uiTarsModelVersion: uiTarsVersion,\n vlMode,\n } = modelConfig;\n\n let openai: OpenAI | AzureOpenAI | undefined;\n\n let proxyAgent = undefined;\n const debugProxy = getDebug('ai:call:proxy');\n if (httpProxy) {\n debugProxy('using http proxy', httpProxy);\n proxyAgent = new HttpsProxyAgent(httpProxy);\n } else if (socksProxy) {\n debugProxy('using socks proxy', socksProxy);\n proxyAgent = new SocksProxyAgent(socksProxy);\n }\n\n if (openaiUseAzureDeprecated) {\n // this is deprecated\n openai = new AzureOpenAI({\n baseURL: openaiBaseURL,\n apiKey: openaiApiKey,\n httpAgent: proxyAgent,\n ...openaiExtraConfig,\n dangerouslyAllowBrowser: true,\n }) as OpenAI;\n } else if (useAzureOpenai) {\n // https://learn.microsoft.com/en-us/azure/ai-services/openai/chatgpt-quickstart?tabs=bash%2Cjavascript-key%2Ctypescript-keyless%2Cpython&pivots=programming-language-javascript#rest-api\n // keyless authentication\n let tokenProvider: any = undefined;\n if (azureOpenaiScope) {\n assert(\n !ifInBrowser,\n 'Azure OpenAI is not supported in browser with Midscene.',\n );\n const credential = new DefaultAzureCredential();\n\n tokenProvider = getBearerTokenProvider(credential, azureOpenaiScope);\n\n openai = new AzureOpenAI({\n azureADTokenProvider: tokenProvider,\n endpoint: azureOpenaiEndpoint,\n apiVersion: azureOpenaiApiVersion,\n deployment: azureOpenaiDeployment,\n ...openaiExtraConfig,\n ...azureExtraConfig,\n });\n } else {\n // endpoint, apiKey, apiVersion, deployment\n openai = new AzureOpenAI({\n apiKey: azureOpenaiKey,\n endpoint: azureOpenaiEndpoint,\n apiVersion: azureOpenaiApiVersion,\n deployment: azureOpenaiDeployment,\n dangerouslyAllowBrowser: true,\n ...openaiExtraConfig,\n ...azureExtraConfig,\n });\n }\n } else if (!useAnthropicSdk) {\n openai = new OpenAI({\n baseURL: openaiBaseURL,\n apiKey: openaiApiKey,\n httpAgent: proxyAgent,\n ...openaiExtraConfig,\n defaultHeaders: {\n ...(openaiExtraConfig?.defaultHeaders || {}),\n [MIDSCENE_API_TYPE]: AIActionTypeValue.toString(),\n },\n dangerouslyAllowBrowser: true,\n });\n }\n\n if (\n openai &&\n globalConfigManager.getEnvConfigInBoolean(MIDSCENE_LANGSMITH_DEBUG)\n ) {\n if (ifInBrowser) {\n throw new Error('langsmith is not supported in browser');\n }\n console.log('DEBUGGING MODE: langsmith wrapper enabled');\n const { wrapOpenAI } = await import('langsmith/wrappers');\n openai = wrapOpenAI(openai);\n }\n\n if (typeof openai !== 'undefined') {\n return {\n completion: openai.chat.completions,\n style: 'openai',\n modelName,\n modelDescription,\n uiTarsVersion,\n vlMode,\n };\n }\n\n // Anthropic\n if (useAnthropicSdk) {\n openai = new Anthropic({\n apiKey: anthropicApiKey,\n httpAgent: proxyAgent,\n dangerouslyAllowBrowser: true,\n }) as any;\n }\n\n if (typeof openai !== 'undefined' && (openai as any).messages) {\n return {\n completion: (openai as any).messages,\n style: 'anthropic',\n modelName,\n modelDescription,\n uiTarsVersion,\n vlMode,\n };\n }\n\n throw new Error('Openai SDK or Anthropic SDK is not initialized');\n}\n\nexport async function callAI(\n messages: ChatCompletionMessageParam[],\n AIActionTypeValue: AIActionType,\n modelConfig: IModelConfig,\n options?: {\n stream?: boolean;\n onChunk?: StreamingCallback;\n },\n): Promise<{ content: string; usage?: AIUsageInfo; isStreamed: boolean }> {\n const {\n completion,\n style,\n modelName,\n modelDescription,\n uiTarsVersion,\n vlMode,\n } = await createChatClient({\n AIActionTypeValue,\n modelConfig,\n });\n\n const responseFormat = getResponseFormat(modelName, AIActionTypeValue);\n\n const maxTokens = globalConfigManager.getEnvConfigValue(OPENAI_MAX_TOKENS);\n const debugCall = getDebug('ai:call');\n const debugProfileStats = getDebug('ai:profile:stats');\n const debugProfileDetail = getDebug('ai:profile:detail');\n\n const startTime = Date.now();\n\n const isStreaming = options?.stream && options?.onChunk;\n let content: string | undefined;\n let accumulated = '';\n let usage: OpenAI.CompletionUsage | undefined;\n let timeCost: number | undefined;\n\n const commonConfig = {\n temperature: vlMode === 'vlm-ui-tars' ? 0.0 : 0.1,\n stream: !!isStreaming,\n max_tokens:\n typeof maxTokens === 'number'\n ? maxTokens\n : Number.parseInt(maxTokens || '2048', 10),\n ...(vlMode === 'qwen-vl' || vlMode === 'qwen3-vl' // qwen specific config\n ? {\n vl_high_resolution_images: true,\n }\n : {}),\n };\n\n try {\n if (style === 'openai') {\n debugCall(\n `sending ${isStreaming ? 'streaming ' : ''}request to ${modelName}`,\n );\n\n if (isStreaming) {\n const stream = (await completion.create(\n {\n model: modelName,\n messages,\n response_format: responseFormat,\n ...commonConfig,\n },\n {\n stream: true,\n },\n )) as Stream<OpenAI.Chat.Completions.ChatCompletionChunk> & {\n _request_id?: string | null;\n };\n\n for await (const chunk of stream) {\n const content = chunk.choices?.[0]?.delta?.content || '';\n const reasoning_content =\n (chunk.choices?.[0]?.delta as any)?.reasoning_content || '';\n\n // Check for usage info in any chunk (OpenAI provides usage in separate chunks)\n if (chunk.usage) {\n usage = chunk.usage;\n }\n\n if (content || reasoning_content) {\n accumulated += content;\n const chunkData: CodeGenerationChunk = {\n content,\n reasoning_content,\n accumulated,\n isComplete: false,\n usage: undefined,\n };\n options.onChunk!(chunkData);\n }\n\n // Check if stream is complete\n if (chunk.choices?.[0]?.finish_reason) {\n timeCost = Date.now() - startTime;\n\n // If usage is not available from the stream, provide a basic usage info\n if (!usage) {\n // Estimate token counts based on content length (rough approximation)\n const estimatedTokens = Math.max(\n 1,\n Math.floor(accumulated.length / 4),\n );\n usage = {\n prompt_tokens: estimatedTokens,\n completion_tokens: estimatedTokens,\n total_tokens: estimatedTokens * 2,\n };\n }\n\n // Send final chunk\n const finalChunk: CodeGenerationChunk = {\n content: '',\n accumulated,\n reasoning_content: '',\n isComplete: true,\n usage: {\n prompt_tokens: usage.prompt_tokens ?? 0,\n completion_tokens: usage.completion_tokens ?? 0,\n total_tokens: usage.total_tokens ?? 0,\n time_cost: timeCost ?? 0,\n model_name: modelName,\n model_description: modelDescription,\n intent: modelConfig.intent,\n },\n };\n options.onChunk!(finalChunk);\n break;\n }\n }\n content = accumulated;\n debugProfileStats(\n `streaming model, ${modelName}, mode, ${vlMode || 'default'}, cost-ms, ${timeCost}`,\n );\n } else {\n const result = await completion.create({\n model: modelName,\n messages,\n response_format: responseFormat,\n ...commonConfig,\n } as any);\n timeCost = Date.now() - startTime;\n\n debugProfileStats(\n `model, ${modelName}, mode, ${vlMode || 'default'}, ui-tars-version, ${uiTarsVersion}, prompt-tokens, ${result.usage?.prompt_tokens || ''}, completion-tokens, ${result.usage?.completion_tokens || ''}, total-tokens, ${result.usage?.total_tokens || ''}, cost-ms, ${timeCost}, requestId, ${result._request_id || ''}`,\n );\n\n debugProfileDetail(\n `model usage detail: ${JSON.stringify(result.usage)}`,\n );\n\n assert(\n result.choices,\n `invalid response from LLM service: ${JSON.stringify(result)}`,\n );\n content = result.choices[0].message.content!;\n usage = result.usage;\n }\n\n debugCall(`response: ${content}`);\n assert(content, 'empty content');\n } else if (style === 'anthropic') {\n const convertImageContent = (content: any) => {\n if (content.type === 'image_url') {\n const imgBase64 = content.image_url.url;\n assert(imgBase64, 'image_url is required');\n const { mimeType, body } = parseBase64(content.image_url.url);\n return {\n source: {\n type: 'base64',\n media_type: mimeType,\n data: body,\n },\n type: 'image',\n };\n }\n return content;\n };\n\n if (isStreaming) {\n const stream = (await completion.create({\n model: modelName,\n system: 'You are a versatile professional in software UI automation',\n messages: messages.map((m) => ({\n role: 'user',\n content: Array.isArray(m.content)\n ? (m.content as any).map(convertImageContent)\n : m.content,\n })),\n response_format: responseFormat,\n ...commonConfig,\n } as any)) as any;\n\n for await (const chunk of stream) {\n const content = chunk.delta?.text || '';\n if (content) {\n accumulated += content;\n const chunkData: CodeGenerationChunk = {\n content,\n accumulated,\n reasoning_content: '',\n isComplete: false,\n usage: undefined,\n };\n options.onChunk!(chunkData);\n }\n\n // Check if stream is complete\n if (chunk.type === 'message_stop') {\n timeCost = Date.now() - startTime;\n const anthropicUsage = chunk.usage;\n\n // Send final chunk\n const finalChunk: CodeGenerationChunk = {\n content: '',\n accumulated,\n reasoning_content: '',\n isComplete: true,\n usage: anthropicUsage\n ? {\n prompt_tokens: anthropicUsage.input_tokens ?? 0,\n completion_tokens: anthropicUsage.output_tokens ?? 0,\n total_tokens:\n (anthropicUsage.input_tokens ?? 0) +\n (anthropicUsage.output_tokens ?? 0),\n time_cost: timeCost ?? 0,\n model_name: modelName,\n model_description: modelDescription,\n intent: modelConfig.intent,\n }\n : undefined,\n };\n options.onChunk!(finalChunk);\n break;\n }\n }\n content = accumulated;\n } else {\n const result = await completion.create({\n model: modelName,\n system: 'You are a versatile professional in software UI automation',\n messages: messages.map((m) => ({\n role: 'user',\n content: Array.isArray(m.content)\n ? (m.content as any).map(convertImageContent)\n : m.content,\n })),\n response_format: responseFormat,\n ...commonConfig,\n } as any);\n timeCost = Date.now() - startTime;\n content = (result as any).content[0].text as string;\n usage = result.usage;\n }\n\n assert(content, 'empty content');\n }\n // Ensure we always have usage info for streaming responses\n if (isStreaming && !usage) {\n // Estimate token counts based on content length (rough approximation)\n const estimatedTokens = Math.max(\n 1,\n Math.floor((content || '').length / 4),\n );\n usage = {\n prompt_tokens: estimatedTokens,\n completion_tokens: estimatedTokens,\n total_tokens: estimatedTokens * 2,\n };\n }\n\n return {\n content: content || '',\n usage: usage\n ? {\n prompt_tokens: usage.prompt_tokens ?? 0,\n completion_tokens: usage.completion_tokens ?? 0,\n total_tokens: usage.total_tokens ?? 0,\n time_cost: timeCost ?? 0,\n model_name: modelName,\n model_description: modelDescription,\n intent: modelConfig.intent,\n }\n : undefined,\n isStreamed: !!isStreaming,\n };\n } catch (e: any) {\n console.error(' call AI error', e);\n const newError = new Error(\n `failed to call ${isStreaming ? 'streaming ' : ''}AI model service: ${e.message}. Trouble shooting: https://midscenejs.com/model-provider.html`,\n {\n cause: e,\n },\n );\n throw newError;\n }\n}\n\nexport const getResponseFormat = (\n modelName: string,\n AIActionTypeValue: AIActionType,\n):\n | OpenAI.ChatCompletionCreateParams['response_format']\n | OpenAI.ResponseFormatJSONObject => {\n let responseFormat:\n | OpenAI.ChatCompletionCreateParams['response_format']\n | OpenAI.ResponseFormatJSONObject\n | undefined;\n\n if (modelName.includes('gpt-4')) {\n switch (AIActionTypeValue) {\n case AIActionType.ASSERT:\n responseFormat = assertSchema;\n break;\n case AIActionType.INSPECT_ELEMENT:\n responseFormat = locatorSchema;\n break;\n case AIActionType.PLAN:\n responseFormat = planSchema;\n break;\n case AIActionType.EXTRACT_DATA:\n case AIActionType.DESCRIBE_ELEMENT:\n responseFormat = { type: AIResponseFormat.JSON };\n break;\n case AIActionType.TEXT:\n // No response format for plain text - return as-is\n responseFormat = undefined;\n break;\n }\n }\n\n // gpt-4o-2024-05-13 only supports json_object response format\n // Skip for plain text to allow string output\n if (\n modelName === 'gpt-4o-2024-05-13' &&\n AIActionTypeValue !== AIActionType.TEXT\n ) {\n responseFormat = { type: AIResponseFormat.JSON };\n }\n\n return responseFormat;\n};\n\nexport async function callAIWithObjectResponse<T>(\n messages: ChatCompletionMessageParam[],\n AIActionTypeValue: AIActionType,\n modelConfig: IModelConfig,\n): Promise<{ content: T; usage?: AIUsageInfo }> {\n const response = await callAI(messages, AIActionTypeValue, modelConfig);\n assert(response, 'empty response');\n const vlMode = modelConfig.vlMode;\n const jsonContent = safeParseJson(response.content, vlMode);\n return { content: jsonContent, usage: response.usage };\n}\n\nexport async function callAIWithStringResponse(\n msgs: AIArgs,\n AIActionTypeValue: AIActionType,\n modelConfig: IModelConfig,\n): Promise<{ content: string; usage?: AIUsageInfo }> {\n const { content, usage } = await callAI(msgs, AIActionTypeValue, modelConfig);\n return { content, usage };\n}\n\nexport function extractJSONFromCodeBlock(response: string) {\n try {\n // First, try to match a JSON object directly in the response\n const jsonMatch = response.match(/^\\s*(\\{[\\s\\S]*\\})\\s*$/);\n if (jsonMatch) {\n return jsonMatch[1];\n }\n\n // If no direct JSON object is found, try to extract JSON from a code block\n const codeBlockMatch = response.match(\n /```(?:json)?\\s*(\\{[\\s\\S]*?\\})\\s*```/,\n );\n if (codeBlockMatch) {\n return codeBlockMatch[1];\n }\n\n // If no code block is found, try to find a JSON-like structure in the text\n const jsonLikeMatch = response.match(/\\{[\\s\\S]*\\}/);\n if (jsonLikeMatch) {\n return jsonLikeMatch[0];\n }\n } catch {}\n // If no JSON-like structure is found, return the original response\n return response;\n}\n\nexport function preprocessDoubaoBboxJson(input: string) {\n if (input.includes('bbox')) {\n // when its values like 940 445 969 490, replace all /\\d+\\s+\\d+/g with /$1,$2/g\n while (/\\d+\\s+\\d+/.test(input)) {\n input = input.replace(/(\\d+)\\s+(\\d+)/g, '$1,$2');\n }\n }\n return input;\n}\n\nexport function safeParseJson(input: string, vlMode: TVlModeTypes | undefined) {\n const cleanJsonString = extractJSONFromCodeBlock(input);\n // match the point\n if (cleanJsonString?.match(/\\((\\d+),(\\d+)\\)/)) {\n return cleanJsonString\n .match(/\\((\\d+),(\\d+)\\)/)\n ?.slice(1)\n .map(Number);\n }\n try {\n return JSON.parse(cleanJsonString);\n } catch {}\n try {\n return JSON.parse(jsonrepair(cleanJsonString));\n } catch (e) {}\n\n if (vlMode === 'doubao-vision' || vlMode === 'vlm-ui-tars') {\n const jsonString = preprocessDoubaoBboxJson(cleanJsonString);\n return JSON.parse(jsonrepair(jsonString));\n }\n throw Error(`failed to parse json response: ${input}`);\n}\n"],"names":["createChatClient","AIActionTypeValue","modelConfig","socksProxy","httpProxy","modelName","openaiBaseURL","openaiApiKey","openaiExtraConfig","openaiUseAzureDeprecated","useAzureOpenai","azureOpenaiScope","azureOpenaiKey","azureOpenaiEndpoint","azureOpenaiApiVersion","azureOpenaiDeployment","azureExtraConfig","useAnthropicSdk","anthropicApiKey","modelDescription","uiTarsVersion","vlMode","openai","proxyAgent","debugProxy","getDebug","HttpsProxyAgent","SocksProxyAgent","AzureOpenAI","tokenProvider","assert","ifInBrowser","credential","DefaultAzureCredential","getBearerTokenProvider","OpenAI","MIDSCENE_API_TYPE","globalConfigManager","MIDSCENE_LANGSMITH_DEBUG","Error","console","wrapOpenAI","Anthropic","callAI","messages","options","completion","style","responseFormat","getResponseFormat","maxTokens","OPENAI_MAX_TOKENS","debugCall","debugProfileStats","debugProfileDetail","startTime","Date","isStreaming","content","accumulated","usage","timeCost","commonConfig","Number","stream","chunk","_chunk_choices__delta","_chunk_choices__delta1","_chunk_choices_2","reasoning_content","chunkData","undefined","estimatedTokens","Math","finalChunk","_result_usage","_result_usage1","_result_usage2","result","JSON","convertImageContent","imgBase64","mimeType","body","parseBase64","m","Array","_chunk_delta","anthropicUsage","e","newError","AIActionType","assertSchema","locatorSchema","planSchema","AIResponseFormat","callAIWithObjectResponse","response","jsonContent","safeParseJson","callAIWithStringResponse","msgs","extractJSONFromCodeBlock","jsonMatch","codeBlockMatch","jsonLikeMatch","preprocessDoubaoBboxJson","input","cleanJsonString","_cleanJsonString_match","jsonrepair","jsonString"],"mappings":";;;;;;;;;;;;;;;AAgCA,eAAeA,iBAAiB,EAC9BC,iBAAiB,EACjBC,WAAW,EAIZ;IAQC,MAAM,EACJC,UAAU,EACVC,SAAS,EACTC,SAAS,EACTC,aAAa,EACbC,YAAY,EACZC,iBAAiB,EACjBC,wBAAwB,EACxBC,cAAc,EACdC,gBAAgB,EAChBC,cAAc,EACdC,mBAAmB,EACnBC,qBAAqB,EACrBC,qBAAqB,EACrBC,gBAAgB,EAChBC,eAAe,EACfC,eAAe,EACfC,gBAAgB,EAChB,oBAAoBC,aAAa,EACjCC,MAAM,EACP,GAAGnB;IAEJ,IAAIoB;IAEJ,IAAIC;IACJ,MAAMC,aAAaC,SAAS;IAC5B,IAAIrB,WAAW;QACboB,WAAW,oBAAoBpB;QAC/BmB,aAAa,IAAIG,gBAAgBtB;IACnC,OAAO,IAAID,YAAY;QACrBqB,WAAW,qBAAqBrB;QAChCoB,aAAa,IAAII,gBAAgBxB;IACnC;IAEA,IAAIM,0BAEFa,SAAS,IAAIM,YAAY;QACvB,SAAStB;QACT,QAAQC;QACR,WAAWgB;QACX,GAAGf,iBAAiB;QACpB,yBAAyB;IAC3B;SACK,IAAIE,gBAAgB;QAGzB,IAAImB;QACJ,IAAIlB,kBAAkB;YACpBmB,OACE,CAACC,aACD;YAEF,MAAMC,aAAa,IAAIC;YAEvBJ,gBAAgBK,uBAAuBF,YAAYrB;YAEnDW,SAAS,IAAIM,YAAY;gBACvB,sBAAsBC;gBACtB,UAAUhB;gBACV,YAAYC;gBACZ,YAAYC;gBACZ,GAAGP,iBAAiB;gBACpB,GAAGQ,gBAAgB;YACrB;QACF,OAEEM,SAAS,IAAIM,YAAY;YACvB,QAAQhB;YACR,UAAUC;YACV,YAAYC;YACZ,YAAYC;YACZ,yBAAyB;YACzB,GAAGP,iBAAiB;YACpB,GAAGQ,gBAAgB;QACrB;IAEJ,OAAO,IAAI,CAACC,iBACVK,SAAS,IAAIa,SAAO;QAClB,SAAS7B;QACT,QAAQC;QACR,WAAWgB;QACX,GAAGf,iBAAiB;QACpB,gBAAgB;YACd,GAAIA,AAAAA,CAAAA,QAAAA,oBAAAA,KAAAA,IAAAA,kBAAmB,cAAc,AAAD,KAAK,CAAC,CAAC;YAC3C,CAAC4B,kBAAkB,EAAEnC,kBAAkB,QAAQ;QACjD;QACA,yBAAyB;IAC3B;IAGF,IACEqB,UACAe,oBAAoB,qBAAqB,CAACC,2BAC1C;QACA,IAAIP,aACF,MAAM,IAAIQ,MAAM;QAElBC,QAAQ,GAAG,CAAC;QACZ,MAAM,EAAEC,UAAU,EAAE,GAAG,MAAM,MAAM,CAAC;QACpCnB,SAASmB,WAAWnB;IACtB;IAEA,IAAI,AAAkB,WAAXA,QACT,OAAO;QACL,YAAYA,OAAO,IAAI,CAAC,WAAW;QACnC,OAAO;QACPjB;QACAc;QACAC;QACAC;IACF;IAIF,IAAIJ,iBACFK,SAAS,IAAIoB,UAAU;QACrB,QAAQxB;QACR,WAAWK;QACX,yBAAyB;IAC3B;IAGF,IAAI,AAAkB,WAAXD,UAA2BA,OAAe,QAAQ,EAC3D,OAAO;QACL,YAAaA,OAAe,QAAQ;QACpC,OAAO;QACPjB;QACAc;QACAC;QACAC;IACF;IAGF,MAAM,IAAIkB,MAAM;AAClB;AAEO,eAAeI,OACpBC,QAAsC,EACtC3C,iBAA+B,EAC/BC,WAAyB,EACzB2C,OAGC;IAED,MAAM,EACJC,UAAU,EACVC,KAAK,EACL1C,SAAS,EACTc,gBAAgB,EAChBC,aAAa,EACbC,MAAM,EACP,GAAG,MAAMrB,iBAAiB;QACzBC;QACAC;IACF;IAEA,MAAM8C,iBAAiBC,kBAAkB5C,WAAWJ;IAEpD,MAAMiD,YAAYb,oBAAoB,iBAAiB,CAACc;IACxD,MAAMC,YAAY3B,SAAS;IAC3B,MAAM4B,oBAAoB5B,SAAS;IACnC,MAAM6B,qBAAqB7B,SAAS;IAEpC,MAAM8B,YAAYC,KAAK,GAAG;IAE1B,MAAMC,cAAcZ,AAAAA,CAAAA,QAAAA,UAAAA,KAAAA,IAAAA,QAAS,MAAM,AAAD,KAAKA,CAAAA,QAAAA,UAAAA,KAAAA,IAAAA,QAAS,OAAO,AAAD;IACtD,IAAIa;IACJ,IAAIC,cAAc;IAClB,IAAIC;IACJ,IAAIC;IAEJ,MAAMC,eAAe;QACnB,aAAazC,AAAW,kBAAXA,SAA2B,MAAM;QAC9C,QAAQ,CAAC,CAACoC;QACV,YACE,AAAqB,YAArB,OAAOP,YACHA,YACAa,OAAO,QAAQ,CAACb,aAAa,QAAQ;QAC3C,GAAI7B,AAAW,cAAXA,UAAwBA,AAAW,eAAXA,SACxB;YACE,2BAA2B;QAC7B,IACA,CAAC,CAAC;IACR;IAEA,IAAI;QACF,IAAI0B,AAAU,aAAVA,OAAoB;YACtBK,UACE,CAAC,QAAQ,EAAEK,cAAc,eAAe,GAAG,WAAW,EAAEpD,WAAW;YAGrE,IAAIoD,aAAa;gBACf,MAAMO,SAAU,MAAMlB,WAAW,MAAM,CACrC;oBACE,OAAOzC;oBACPuC;oBACA,iBAAiBI;oBACjB,GAAGc,YAAY;gBACjB,GACA;oBACE,QAAQ;gBACV;gBAKF,WAAW,MAAMG,SAASD,OAAQ;wBAChBE,uBAAAA,iBAAAA,gBAEbC,wBAAAA,kBAAAA,iBAoBCC,kBAAAA;oBAtBJ,MAAMV,UAAUQ,AAAAA,SAAAA,CAAAA,iBAAAA,MAAM,OAAO,AAAD,IAAZA,KAAAA,IAAAA,QAAAA,CAAAA,kBAAAA,cAAe,CAAC,EAAE,AAAD,IAAjBA,KAAAA,IAAAA,QAAAA,CAAAA,wBAAAA,gBAAoB,KAAK,AAAD,IAAxBA,KAAAA,IAAAA,sBAA2B,OAAO,AAAD,KAAK;oBACtD,MAAMG,oBACJ,AAAC,SAAAF,CAAAA,kBAAAA,MAAM,OAAO,AAAD,IAAZA,KAAAA,IAAAA,QAAAA,CAAAA,mBAAAA,eAAe,CAAC,EAAE,AAAD,IAAjBA,KAAAA,IAAAA,QAAAA,CAAAA,yBAAAA,iBAAoB,KAAK,AAAD,IAAxBA,KAAAA,IAAAA,uBAAmC,iBAAiB,AAAD,KAAK;oBAG3D,IAAIF,MAAM,KAAK,EACbL,QAAQK,MAAM,KAAK;oBAGrB,IAAIP,WAAWW,mBAAmB;wBAChCV,eAAeD;wBACf,MAAMY,YAAiC;4BACrCZ;4BACAW;4BACAV;4BACA,YAAY;4BACZ,OAAOY;wBACT;wBACA1B,QAAQ,OAAO,CAAEyB;oBACnB;oBAGA,IAAI,QAAAF,CAAAA,kBAAAA,MAAM,OAAO,AAAD,IAAZA,KAAAA,IAAAA,QAAAA,CAAAA,mBAAAA,eAAe,CAAC,EAAE,AAAD,IAAjBA,KAAAA,IAAAA,iBAAoB,aAAa,EAAE;wBACrCP,WAAWL,KAAK,GAAG,KAAKD;wBAGxB,IAAI,CAACK,OAAO;4BAEV,MAAMY,kBAAkBC,KAAK,GAAG,CAC9B,GACAA,KAAK,KAAK,CAACd,YAAY,MAAM,GAAG;4BAElCC,QAAQ;gCACN,eAAeY;gCACf,mBAAmBA;gCACnB,cAAcA,AAAkB,IAAlBA;4BAChB;wBACF;wBAGA,MAAME,aAAkC;4BACtC,SAAS;4BACTf;4BACA,mBAAmB;4BACnB,YAAY;4BACZ,OAAO;gCACL,eAAeC,MAAM,aAAa,IAAI;gCACtC,mBAAmBA,MAAM,iBAAiB,IAAI;gCAC9C,cAAcA,MAAM,YAAY,IAAI;gCACpC,WAAWC,YAAY;gCACvB,YAAYxD;gCACZ,mBAAmBc;gCACnB,QAAQjB,YAAY,MAAM;4BAC5B;wBACF;wBACA2C,QAAQ,OAAO,CAAE6B;wBACjB;oBACF;gBACF;gBACAhB,UAAUC;gBACVN,kBACE,CAAC,iBAAiB,EAAEhD,UAAU,QAAQ,EAAEgB,UAAU,UAAU,WAAW,EAAEwC,UAAU;YAEvF,OAAO;oBAUqGc,eAAyDC,gBAAwDC;gBAT3N,MAAMC,SAAS,MAAMhC,WAAW,MAAM,CAAC;oBACrC,OAAOzC;oBACPuC;oBACA,iBAAiBI;oBACjB,GAAGc,YAAY;gBACjB;gBACAD,WAAWL,KAAK,GAAG,KAAKD;gBAExBF,kBACE,CAAC,OAAO,EAAEhD,UAAU,QAAQ,EAAEgB,UAAU,UAAU,mBAAmB,EAAED,cAAc,iBAAiB,EAAEuD,AAAAA,SAAAA,CAAAA,gBAAAA,OAAO,KAAK,AAAD,IAAXA,KAAAA,IAAAA,cAAc,aAAa,AAAD,KAAK,GAAG,qBAAqB,EAAEC,AAAAA,SAAAA,CAAAA,iBAAAA,OAAO,KAAK,AAAD,IAAXA,KAAAA,IAAAA,eAAc,iBAAiB,AAAD,KAAK,GAAG,gBAAgB,EAAEC,AAAAA,SAAAA,CAAAA,iBAAAA,OAAO,KAAK,AAAD,IAAXA,KAAAA,IAAAA,eAAc,YAAY,AAAD,KAAK,GAAG,WAAW,EAAEhB,SAAS,aAAa,EAAEiB,OAAO,WAAW,IAAI,IAAI;gBAG3TxB,mBACE,CAAC,oBAAoB,EAAEyB,KAAK,SAAS,CAACD,OAAO,KAAK,GAAG;gBAGvDhD,OACEgD,OAAO,OAAO,EACd,CAAC,mCAAmC,EAAEC,KAAK,SAAS,CAACD,SAAS;gBAEhEpB,UAAUoB,OAAO,OAAO,CAAC,EAAE,CAAC,OAAO,CAAC,OAAO;gBAC3ClB,QAAQkB,OAAO,KAAK;YACtB;YAEA1B,UAAU,CAAC,UAAU,EAAEM,SAAS;YAChC5B,OAAO4B,SAAS;QAClB,OAAO,IAAIX,AAAU,gBAAVA,OAAuB;YAChC,MAAMiC,sBAAsB,CAACtB;gBAC3B,IAAIA,AAAiB,gBAAjBA,QAAQ,IAAI,EAAkB;oBAChC,MAAMuB,YAAYvB,QAAQ,SAAS,CAAC,GAAG;oBACvC5B,OAAOmD,WAAW;oBAClB,MAAM,EAAEC,QAAQ,EAAEC,IAAI,EAAE,GAAGC,YAAY1B,QAAQ,SAAS,CAAC,GAAG;oBAC5D,OAAO;wBACL,QAAQ;4BACN,MAAM;4BACN,YAAYwB;4BACZ,MAAMC;wBACR;wBACA,MAAM;oBACR;gBACF;gBACA,OAAOzB;YACT;YAEA,IAAID,aAAa;gBACf,MAAMO,SAAU,MAAMlB,WAAW,MAAM,CAAC;oBACtC,OAAOzC;oBACP,QAAQ;oBACR,UAAUuC,SAAS,GAAG,CAAC,CAACyC,IAAO;4BAC7B,MAAM;4BACN,SAASC,MAAM,OAAO,CAACD,EAAE,OAAO,IAC3BA,EAAE,OAAO,CAAS,GAAG,CAACL,uBACvBK,EAAE,OAAO;wBACf;oBACA,iBAAiBrC;oBACjB,GAAGc,YAAY;gBACjB;gBAEA,WAAW,MAAMG,SAASD,OAAQ;wBAChBuB;oBAAhB,MAAM7B,UAAU6B,AAAAA,SAAAA,CAAAA,eAAAA,MAAM,KAAK,AAAD,IAAVA,KAAAA,IAAAA,aAAa,IAAI,AAAD,KAAK;oBACrC,IAAI7B,SAAS;wBACXC,eAAeD;wBACf,MAAMY,YAAiC;4BACrCZ;4BACAC;4BACA,mBAAmB;4BACnB,YAAY;4BACZ,OAAOY;wBACT;wBACA1B,QAAQ,OAAO,CAAEyB;oBACnB;oBAGA,IAAIL,AAAe,mBAAfA,MAAM,IAAI,EAAqB;wBACjCJ,WAAWL,KAAK,GAAG,KAAKD;wBACxB,MAAMiC,iBAAiBvB,MAAM,KAAK;wBAGlC,MAAMS,aAAkC;4BACtC,SAAS;4BACTf;4BACA,mBAAmB;4BACnB,YAAY;4BACZ,OAAO6B,iBACH;gCACE,eAAeA,eAAe,YAAY,IAAI;gCAC9C,mBAAmBA,eAAe,aAAa,IAAI;gCACnD,cACGA,AAAAA,CAAAA,eAAe,YAAY,IAAI,KAC/BA,CAAAA,eAAe,aAAa,IAAI;gCACnC,WAAW3B,YAAY;gCACvB,YAAYxD;gCACZ,mBAAmBc;gCACnB,QAAQjB,YAAY,MAAM;4BAC5B,IACAqE;wBACN;wBACA1B,QAAQ,OAAO,CAAE6B;wBACjB;oBACF;gBACF;gBACAhB,UAAUC;YACZ,OAAO;gBACL,MAAMmB,SAAS,MAAMhC,WAAW,MAAM,CAAC;oBACrC,OAAOzC;oBACP,QAAQ;oBACR,UAAUuC,SAAS,GAAG,CAAC,CAACyC,IAAO;4BAC7B,MAAM;4BACN,SAASC,MAAM,OAAO,CAACD,EAAE,OAAO,IAC3BA,EAAE,OAAO,CAAS,GAAG,CAACL,uBACvBK,EAAE,OAAO;wBACf;oBACA,iBAAiBrC;oBACjB,GAAGc,YAAY;gBACjB;gBACAD,WAAWL,KAAK,GAAG,KAAKD;gBACxBG,UAAWoB,OAAe,OAAO,CAAC,EAAE,CAAC,IAAI;gBACzClB,QAAQkB,OAAO,KAAK;YACtB;YAEAhD,OAAO4B,SAAS;QAClB;QAEA,IAAID,eAAe,CAACG,OAAO;YAEzB,MAAMY,kBAAkBC,KAAK,GAAG,CAC9B,GACAA,KAAK,KAAK,CAAEf,AAAAA,CAAAA,WAAW,EAAC,EAAG,MAAM,GAAG;YAEtCE,QAAQ;gBACN,eAAeY;gBACf,mBAAmBA;gBACnB,cAAcA,AAAkB,IAAlBA;YAChB;QACF;QAEA,OAAO;YACL,SAASd,WAAW;YACpB,OAAOE,QACH;gBACE,eAAeA,MAAM,aAAa,IAAI;gBACtC,mBAAmBA,MAAM,iBAAiB,IAAI;gBAC9C,cAAcA,MAAM,YAAY,IAAI;gBACpC,WAAWC,YAAY;gBACvB,YAAYxD;gBACZ,mBAAmBc;gBACnB,QAAQjB,YAAY,MAAM;YAC5B,IACAqE;YACJ,YAAY,CAAC,CAACd;QAChB;IACF,EAAE,OAAOgC,GAAQ;QACfjD,QAAQ,KAAK,CAAC,kBAAkBiD;QAChC,MAAMC,WAAW,IAAInD,MACnB,CAAC,eAAe,EAAEkB,cAAc,eAAe,GAAG,kBAAkB,EAAEgC,EAAE,OAAO,CAAC,8DAA8D,CAAC,EAC/I;YACE,OAAOA;QACT;QAEF,MAAMC;IACR;AACF;AAEO,MAAMzC,oBAAoB,CAC/B5C,WACAJ;IAIA,IAAI+C;IAKJ,IAAI3C,UAAU,QAAQ,CAAC,UACrB,OAAQJ;QACN,KAAK0F,aAAa,MAAM;YACtB3C,iBAAiB4C;YACjB;QACF,KAAKD,aAAa,eAAe;YAC/B3C,iBAAiB6C;YACjB;QACF,KAAKF,aAAa,IAAI;YACpB3C,iBAAiB8C;YACjB;QACF,KAAKH,aAAa,YAAY;QAC9B,KAAKA,aAAa,gBAAgB;YAChC3C,iBAAiB;gBAAE,MAAM+C,iBAAiB,IAAI;YAAC;YAC/C;QACF,KAAKJ,aAAa,IAAI;YAEpB3C,iBAAiBuB;YACjB;IACJ;IAKF,IACElE,AAAc,wBAAdA,aACAJ,sBAAsB0F,aAAa,IAAI,EAEvC3C,iBAAiB;QAAE,MAAM+C,iBAAiB,IAAI;IAAC;IAGjD,OAAO/C;AACT;AAEO,eAAegD,yBACpBpD,QAAsC,EACtC3C,iBAA+B,EAC/BC,WAAyB;IAEzB,MAAM+F,WAAW,MAAMtD,OAAOC,UAAU3C,mBAAmBC;IAC3D4B,OAAOmE,UAAU;IACjB,MAAM5E,SAASnB,YAAY,MAAM;IACjC,MAAMgG,cAAcC,cAAcF,SAAS,OAAO,EAAE5E;IACpD,OAAO;QAAE,SAAS6E;QAAa,OAAOD,SAAS,KAAK;IAAC;AACvD;AAEO,eAAeG,yBACpBC,IAAY,EACZpG,iBAA+B,EAC/BC,WAAyB;IAEzB,MAAM,EAAEwD,OAAO,EAAEE,KAAK,EAAE,GAAG,MAAMjB,OAAO0D,MAAMpG,mBAAmBC;IACjE,OAAO;QAAEwD;QAASE;IAAM;AAC1B;AAEO,SAAS0C,yBAAyBL,QAAgB;IACvD,IAAI;QAEF,MAAMM,YAAYN,SAAS,KAAK,CAAC;QACjC,IAAIM,WACF,OAAOA,SAAS,CAAC,EAAE;QAIrB,MAAMC,iBAAiBP,SAAS,KAAK,CACnC;QAEF,IAAIO,gBACF,OAAOA,cAAc,CAAC,EAAE;QAI1B,MAAMC,gBAAgBR,SAAS,KAAK,CAAC;QACrC,IAAIQ,eACF,OAAOA,aAAa,CAAC,EAAE;IAE3B,EAAE,OAAM,CAAC;IAET,OAAOR;AACT;AAEO,SAASS,yBAAyBC,KAAa;IACpD,IAAIA,MAAM,QAAQ,CAAC,SAEjB,MAAO,YAAY,IAAI,CAACA,OACtBA,QAAQA,MAAM,OAAO,CAAC,kBAAkB;IAG5C,OAAOA;AACT;AAEO,SAASR,cAAcQ,KAAa,EAAEtF,MAAgC;IAC3E,MAAMuF,kBAAkBN,yBAAyBK;IAEjD,IAAIC,QAAAA,kBAAAA,KAAAA,IAAAA,gBAAiB,KAAK,CAAC,oBAAoB;YACtCC;QAAP,OAAO,QAAAA,CAAAA,yBAAAA,gBACJ,KAAK,CAAC,kBAAiB,IADnBA,KAAAA,IAAAA,uBAEH,KAAK,CAAC,GACP,GAAG,CAAC9C;IACT;IACA,IAAI;QACF,OAAOgB,KAAK,KAAK,CAAC6B;IACpB,EAAE,OAAM,CAAC;IACT,IAAI;QACF,OAAO7B,KAAK,KAAK,CAAC+B,WAAWF;IAC/B,EAAE,OAAOnB,GAAG,CAAC;IAEb,IAAIpE,AAAW,oBAAXA,UAA8BA,AAAW,kBAAXA,QAA0B;QAC1D,MAAM0F,aAAaL,yBAAyBE;QAC5C,OAAO7B,KAAK,KAAK,CAAC+B,WAAWC;IAC/B;IACA,MAAMxE,MAAM,CAAC,+BAA+B,EAAEoE,OAAO;AACvD"}
|
|
1
|
+
{"version":3,"file":"ai-model/service-caller/index.mjs","sources":["webpack://@midscene/core/./src/ai-model/service-caller/index.ts"],"sourcesContent":["import { AIResponseFormat, type AIUsageInfo } from '@/types';\nimport type { CodeGenerationChunk, StreamingCallback } from '@/types';\nimport {\n type IModelConfig,\n OPENAI_MAX_TOKENS,\n type TVlModeTypes,\n type UITarsModelVersion,\n globalConfigManager,\n} from '@midscene/shared/env';\n\nimport { getDebug } from '@midscene/shared/logger';\nimport { assert } from '@midscene/shared/utils';\nimport { HttpsProxyAgent } from 'https-proxy-agent';\nimport { jsonrepair } from 'jsonrepair';\nimport OpenAI from 'openai';\nimport type { ChatCompletionMessageParam } from 'openai/resources/index';\nimport type { Stream } from 'openai/streaming';\nimport { SocksProxyAgent } from 'socks-proxy-agent';\nimport { AIActionType, type AIArgs } from '../common';\nimport { assertSchema } from '../prompt/assertion';\nimport { planSchema } from '../prompt/llm-planning';\n\nasync function createChatClient({\n AIActionTypeValue,\n modelConfig,\n}: {\n AIActionTypeValue: AIActionType;\n modelConfig: IModelConfig;\n}): Promise<{\n completion: OpenAI.Chat.Completions;\n modelName: string;\n modelDescription: string;\n uiTarsVersion?: UITarsModelVersion;\n vlMode: TVlModeTypes | undefined;\n}> {\n const {\n socksProxy,\n httpProxy,\n modelName,\n openaiBaseURL,\n openaiApiKey,\n openaiExtraConfig,\n modelDescription,\n uiTarsModelVersion: uiTarsVersion,\n vlMode,\n createOpenAIClient,\n } = modelConfig;\n\n let openai: OpenAI;\n\n // Use custom client factory if provided\n if (createOpenAIClient) {\n openai = createOpenAIClient({\n modelName,\n openaiApiKey,\n openaiBaseURL,\n socksProxy,\n httpProxy,\n openaiExtraConfig,\n vlMode,\n intent: modelConfig.intent,\n modelDescription,\n });\n } else {\n // Default OpenAI client creation\n let proxyAgent = undefined;\n const debugProxy = getDebug('ai:call:proxy');\n if (httpProxy) {\n debugProxy('using http proxy', httpProxy);\n proxyAgent = new HttpsProxyAgent(httpProxy);\n } else if (socksProxy) {\n debugProxy('using socks proxy', socksProxy);\n proxyAgent = new SocksProxyAgent(socksProxy);\n }\n\n openai = new OpenAI({\n baseURL: openaiBaseURL,\n apiKey: openaiApiKey,\n ...(proxyAgent ? { httpAgent: proxyAgent as any } : {}),\n ...openaiExtraConfig,\n dangerouslyAllowBrowser: true,\n });\n }\n\n return {\n completion: openai.chat.completions,\n modelName,\n modelDescription,\n uiTarsVersion,\n vlMode,\n };\n}\n\nexport async function callAI(\n messages: ChatCompletionMessageParam[],\n AIActionTypeValue: AIActionType,\n modelConfig: IModelConfig,\n options?: {\n stream?: boolean;\n onChunk?: StreamingCallback;\n },\n): Promise<{ content: string; usage?: AIUsageInfo; isStreamed: boolean }> {\n const { completion, modelName, modelDescription, uiTarsVersion, vlMode } =\n await createChatClient({\n AIActionTypeValue,\n modelConfig,\n });\n\n const responseFormat = getResponseFormat(modelName, AIActionTypeValue);\n\n const maxTokens = globalConfigManager.getEnvConfigValue(OPENAI_MAX_TOKENS);\n const debugCall = getDebug('ai:call');\n const debugProfileStats = getDebug('ai:profile:stats');\n const debugProfileDetail = getDebug('ai:profile:detail');\n\n const startTime = Date.now();\n\n const isStreaming = options?.stream && options?.onChunk;\n let content: string | undefined;\n let accumulated = '';\n let usage: OpenAI.CompletionUsage | undefined;\n let timeCost: number | undefined;\n\n const commonConfig = {\n temperature: vlMode === 'vlm-ui-tars' ? 0.0 : 0.1,\n stream: !!isStreaming,\n max_tokens:\n typeof maxTokens === 'number'\n ? maxTokens\n : Number.parseInt(maxTokens || '2048', 10),\n ...(vlMode === 'qwen-vl' // qwen vl v2 specific config\n ? {\n vl_high_resolution_images: true,\n }\n : {}),\n };\n\n try {\n debugCall(\n `sending ${isStreaming ? 'streaming ' : ''}request to ${modelName}`,\n );\n\n if (isStreaming) {\n const stream = (await completion.create(\n {\n model: modelName,\n messages,\n response_format: responseFormat,\n ...commonConfig,\n },\n {\n stream: true,\n },\n )) as Stream<OpenAI.Chat.Completions.ChatCompletionChunk> & {\n _request_id?: string | null;\n };\n\n for await (const chunk of stream) {\n const content = chunk.choices?.[0]?.delta?.content || '';\n const reasoning_content =\n (chunk.choices?.[0]?.delta as any)?.reasoning_content || '';\n\n // Check for usage info in any chunk (OpenAI provides usage in separate chunks)\n if (chunk.usage) {\n usage = chunk.usage;\n }\n\n if (content || reasoning_content) {\n accumulated += content;\n const chunkData: CodeGenerationChunk = {\n content,\n reasoning_content,\n accumulated,\n isComplete: false,\n usage: undefined,\n };\n options.onChunk!(chunkData);\n }\n\n // Check if stream is complete\n if (chunk.choices?.[0]?.finish_reason) {\n timeCost = Date.now() - startTime;\n\n // If usage is not available from the stream, provide a basic usage info\n if (!usage) {\n // Estimate token counts based on content length (rough approximation)\n const estimatedTokens = Math.max(\n 1,\n Math.floor(accumulated.length / 4),\n );\n usage = {\n prompt_tokens: estimatedTokens,\n completion_tokens: estimatedTokens,\n total_tokens: estimatedTokens * 2,\n };\n }\n\n // Send final chunk\n const finalChunk: CodeGenerationChunk = {\n content: '',\n accumulated,\n reasoning_content: '',\n isComplete: true,\n usage: {\n prompt_tokens: usage.prompt_tokens ?? 0,\n completion_tokens: usage.completion_tokens ?? 0,\n total_tokens: usage.total_tokens ?? 0,\n time_cost: timeCost ?? 0,\n model_name: modelName,\n model_description: modelDescription,\n intent: modelConfig.intent,\n },\n };\n options.onChunk!(finalChunk);\n break;\n }\n }\n content = accumulated;\n debugProfileStats(\n `streaming model, ${modelName}, mode, ${vlMode || 'default'}, cost-ms, ${timeCost}`,\n );\n } else {\n const result = await completion.create({\n model: modelName,\n messages,\n response_format: responseFormat,\n ...commonConfig,\n } as any);\n timeCost = Date.now() - startTime;\n\n debugProfileStats(\n `model, ${modelName}, mode, ${vlMode || 'default'}, ui-tars-version, ${uiTarsVersion}, prompt-tokens, ${result.usage?.prompt_tokens || ''}, completion-tokens, ${result.usage?.completion_tokens || ''}, total-tokens, ${result.usage?.total_tokens || ''}, cost-ms, ${timeCost}, requestId, ${result._request_id || ''}`,\n );\n\n debugProfileDetail(`model usage detail: ${JSON.stringify(result.usage)}`);\n\n assert(\n result.choices,\n `invalid response from LLM service: ${JSON.stringify(result)}`,\n );\n content = result.choices[0].message.content!;\n usage = result.usage;\n }\n\n debugCall(`response: ${content}`);\n assert(content, 'empty content');\n\n // Ensure we always have usage info for streaming responses\n if (isStreaming && !usage) {\n // Estimate token counts based on content length (rough approximation)\n const estimatedTokens = Math.max(\n 1,\n Math.floor((content || '').length / 4),\n );\n usage = {\n prompt_tokens: estimatedTokens,\n completion_tokens: estimatedTokens,\n total_tokens: estimatedTokens * 2,\n };\n }\n\n return {\n content: content || '',\n usage: usage\n ? {\n prompt_tokens: usage.prompt_tokens ?? 0,\n completion_tokens: usage.completion_tokens ?? 0,\n total_tokens: usage.total_tokens ?? 0,\n time_cost: timeCost ?? 0,\n model_name: modelName,\n model_description: modelDescription,\n intent: modelConfig.intent,\n }\n : undefined,\n isStreamed: !!isStreaming,\n };\n } catch (e: any) {\n console.error(' call AI error', e);\n const newError = new Error(\n `failed to call ${isStreaming ? 'streaming ' : ''}AI model service: ${e.message}. Trouble shooting: https://midscenejs.com/model-provider.html`,\n {\n cause: e,\n },\n );\n throw newError;\n }\n}\n\nexport const getResponseFormat = (\n modelName: string,\n AIActionTypeValue: AIActionType,\n):\n | OpenAI.ChatCompletionCreateParams['response_format']\n | OpenAI.ResponseFormatJSONObject => {\n let responseFormat:\n | OpenAI.ChatCompletionCreateParams['response_format']\n | OpenAI.ResponseFormatJSONObject\n | undefined;\n\n if (modelName.includes('gpt-4')) {\n switch (AIActionTypeValue) {\n case AIActionType.ASSERT:\n responseFormat = assertSchema;\n break;\n case AIActionType.PLAN:\n responseFormat = planSchema;\n break;\n case AIActionType.EXTRACT_DATA:\n case AIActionType.DESCRIBE_ELEMENT:\n responseFormat = { type: AIResponseFormat.JSON };\n break;\n case AIActionType.TEXT:\n // No response format for plain text - return as-is\n responseFormat = undefined;\n break;\n }\n }\n\n // gpt-4o-2024-05-13 only supports json_object response format\n // Skip for plain text to allow string output\n if (\n modelName === 'gpt-4o-2024-05-13' &&\n AIActionTypeValue !== AIActionType.TEXT\n ) {\n responseFormat = { type: AIResponseFormat.JSON };\n }\n\n return responseFormat;\n};\n\nexport async function callAIWithObjectResponse<T>(\n messages: ChatCompletionMessageParam[],\n AIActionTypeValue: AIActionType,\n modelConfig: IModelConfig,\n): Promise<{ content: T; usage?: AIUsageInfo }> {\n const response = await callAI(messages, AIActionTypeValue, modelConfig);\n assert(response, 'empty response');\n const vlMode = modelConfig.vlMode;\n const jsonContent = safeParseJson(response.content, vlMode);\n return { content: jsonContent, usage: response.usage };\n}\n\nexport async function callAIWithStringResponse(\n msgs: AIArgs,\n AIActionTypeValue: AIActionType,\n modelConfig: IModelConfig,\n): Promise<{ content: string; usage?: AIUsageInfo }> {\n const { content, usage } = await callAI(msgs, AIActionTypeValue, modelConfig);\n return { content, usage };\n}\n\nexport function extractJSONFromCodeBlock(response: string) {\n try {\n // First, try to match a JSON object directly in the response\n const jsonMatch = response.match(/^\\s*(\\{[\\s\\S]*\\})\\s*$/);\n if (jsonMatch) {\n return jsonMatch[1];\n }\n\n // If no direct JSON object is found, try to extract JSON from a code block\n const codeBlockMatch = response.match(\n /```(?:json)?\\s*(\\{[\\s\\S]*?\\})\\s*```/,\n );\n if (codeBlockMatch) {\n return codeBlockMatch[1];\n }\n\n // If no code block is found, try to find a JSON-like structure in the text\n const jsonLikeMatch = response.match(/\\{[\\s\\S]*\\}/);\n if (jsonLikeMatch) {\n return jsonLikeMatch[0];\n }\n } catch {}\n // If no JSON-like structure is found, return the original response\n return response;\n}\n\nexport function preprocessDoubaoBboxJson(input: string) {\n if (input.includes('bbox')) {\n // when its values like 940 445 969 490, replace all /\\d+\\s+\\d+/g with /$1,$2/g\n while (/\\d+\\s+\\d+/.test(input)) {\n input = input.replace(/(\\d+)\\s+(\\d+)/g, '$1,$2');\n }\n }\n return input;\n}\n\nexport function safeParseJson(input: string, vlMode: TVlModeTypes | undefined) {\n const cleanJsonString = extractJSONFromCodeBlock(input);\n // match the point\n if (cleanJsonString?.match(/\\((\\d+),(\\d+)\\)/)) {\n return cleanJsonString\n .match(/\\((\\d+),(\\d+)\\)/)\n ?.slice(1)\n .map(Number);\n }\n try {\n return JSON.parse(cleanJsonString);\n } catch {}\n try {\n return JSON.parse(jsonrepair(cleanJsonString));\n } catch (e) {}\n\n if (vlMode === 'doubao-vision' || vlMode === 'vlm-ui-tars') {\n const jsonString = preprocessDoubaoBboxJson(cleanJsonString);\n return JSON.parse(jsonrepair(jsonString));\n }\n throw Error(`failed to parse json response: ${input}`);\n}\n"],"names":["createChatClient","AIActionTypeValue","modelConfig","socksProxy","httpProxy","modelName","openaiBaseURL","openaiApiKey","openaiExtraConfig","modelDescription","uiTarsVersion","vlMode","createOpenAIClient","openai","proxyAgent","debugProxy","getDebug","HttpsProxyAgent","SocksProxyAgent","OpenAI","callAI","messages","options","completion","responseFormat","getResponseFormat","maxTokens","globalConfigManager","OPENAI_MAX_TOKENS","debugCall","debugProfileStats","debugProfileDetail","startTime","Date","isStreaming","content","accumulated","usage","timeCost","commonConfig","Number","stream","chunk","_chunk_choices__delta","_chunk_choices__delta1","_chunk_choices_2","reasoning_content","chunkData","undefined","estimatedTokens","Math","finalChunk","_result_usage","_result_usage1","_result_usage2","result","JSON","assert","e","console","newError","Error","AIActionType","assertSchema","planSchema","AIResponseFormat","callAIWithObjectResponse","response","jsonContent","safeParseJson","callAIWithStringResponse","msgs","extractJSONFromCodeBlock","jsonMatch","codeBlockMatch","jsonLikeMatch","preprocessDoubaoBboxJson","input","cleanJsonString","_cleanJsonString_match","jsonrepair","jsonString"],"mappings":";;;;;;;;;;;AAsBA,eAAeA,iBAAiB,EAC9BC,iBAAiB,EACjBC,WAAW,EAIZ;IAOC,MAAM,EACJC,UAAU,EACVC,SAAS,EACTC,SAAS,EACTC,aAAa,EACbC,YAAY,EACZC,iBAAiB,EACjBC,gBAAgB,EAChB,oBAAoBC,aAAa,EACjCC,MAAM,EACNC,kBAAkB,EACnB,GAAGV;IAEJ,IAAIW;IAGJ,IAAID,oBACFC,SAASD,mBAAmB;QAC1BP;QACAE;QACAD;QACAH;QACAC;QACAI;QACAG;QACA,QAAQT,YAAY,MAAM;QAC1BO;IACF;SACK;QAEL,IAAIK;QACJ,MAAMC,aAAaC,SAAS;QAC5B,IAAIZ,WAAW;YACbW,WAAW,oBAAoBX;YAC/BU,aAAa,IAAIG,gBAAgBb;QACnC,OAAO,IAAID,YAAY;YACrBY,WAAW,qBAAqBZ;YAChCW,aAAa,IAAII,gBAAgBf;QACnC;QAEAU,SAAS,IAAIM,SAAO;YAClB,SAASb;YACT,QAAQC;YACR,GAAIO,aAAa;gBAAE,WAAWA;YAAkB,IAAI,CAAC,CAAC;YACtD,GAAGN,iBAAiB;YACpB,yBAAyB;QAC3B;IACF;IAEA,OAAO;QACL,YAAYK,OAAO,IAAI,CAAC,WAAW;QACnCR;QACAI;QACAC;QACAC;IACF;AACF;AAEO,eAAeS,OACpBC,QAAsC,EACtCpB,iBAA+B,EAC/BC,WAAyB,EACzBoB,OAGC;IAED,MAAM,EAAEC,UAAU,EAAElB,SAAS,EAAEI,gBAAgB,EAAEC,aAAa,EAAEC,MAAM,EAAE,GACtE,MAAMX,iBAAiB;QACrBC;QACAC;IACF;IAEF,MAAMsB,iBAAiBC,kBAAkBpB,WAAWJ;IAEpD,MAAMyB,YAAYC,oBAAoB,iBAAiB,CAACC;IACxD,MAAMC,YAAYb,SAAS;IAC3B,MAAMc,oBAAoBd,SAAS;IACnC,MAAMe,qBAAqBf,SAAS;IAEpC,MAAMgB,YAAYC,KAAK,GAAG;IAE1B,MAAMC,cAAcZ,AAAAA,CAAAA,QAAAA,UAAAA,KAAAA,IAAAA,QAAS,MAAM,AAAD,KAAKA,CAAAA,QAAAA,UAAAA,KAAAA,IAAAA,QAAS,OAAO,AAAD;IACtD,IAAIa;IACJ,IAAIC,cAAc;IAClB,IAAIC;IACJ,IAAIC;IAEJ,MAAMC,eAAe;QACnB,aAAa5B,AAAW,kBAAXA,SAA2B,MAAM;QAC9C,QAAQ,CAAC,CAACuB;QACV,YACE,AAAqB,YAArB,OAAOR,YACHA,YACAc,OAAO,QAAQ,CAACd,aAAa,QAAQ;QAC3C,GAAIf,AAAW,cAAXA,SACA;YACE,2BAA2B;QAC7B,IACA,CAAC,CAAC;IACR;IAEA,IAAI;QACFkB,UACE,CAAC,QAAQ,EAAEK,cAAc,eAAe,GAAG,WAAW,EAAE7B,WAAW;QAGrE,IAAI6B,aAAa;YACf,MAAMO,SAAU,MAAMlB,WAAW,MAAM,CACrC;gBACE,OAAOlB;gBACPgB;gBACA,iBAAiBG;gBACjB,GAAGe,YAAY;YACjB,GACA;gBACE,QAAQ;YACV;YAKF,WAAW,MAAMG,SAASD,OAAQ;oBAChBE,uBAAAA,iBAAAA,gBAEbC,wBAAAA,kBAAAA,iBAoBCC,kBAAAA;gBAtBJ,MAAMV,UAAUQ,AAAAA,SAAAA,CAAAA,iBAAAA,MAAM,OAAO,AAAD,IAAZA,KAAAA,IAAAA,QAAAA,CAAAA,kBAAAA,cAAe,CAAC,EAAE,AAAD,IAAjBA,KAAAA,IAAAA,QAAAA,CAAAA,wBAAAA,gBAAoB,KAAK,AAAD,IAAxBA,KAAAA,IAAAA,sBAA2B,OAAO,AAAD,KAAK;gBACtD,MAAMG,oBACJ,AAAC,SAAAF,CAAAA,kBAAAA,MAAM,OAAO,AAAD,IAAZA,KAAAA,IAAAA,QAAAA,CAAAA,mBAAAA,eAAe,CAAC,EAAE,AAAD,IAAjBA,KAAAA,IAAAA,QAAAA,CAAAA,yBAAAA,iBAAoB,KAAK,AAAD,IAAxBA,KAAAA,IAAAA,uBAAmC,iBAAiB,AAAD,KAAK;gBAG3D,IAAIF,MAAM,KAAK,EACbL,QAAQK,MAAM,KAAK;gBAGrB,IAAIP,WAAWW,mBAAmB;oBAChCV,eAAeD;oBACf,MAAMY,YAAiC;wBACrCZ;wBACAW;wBACAV;wBACA,YAAY;wBACZ,OAAOY;oBACT;oBACA1B,QAAQ,OAAO,CAAEyB;gBACnB;gBAGA,IAAI,QAAAF,CAAAA,kBAAAA,MAAM,OAAO,AAAD,IAAZA,KAAAA,IAAAA,QAAAA,CAAAA,mBAAAA,eAAe,CAAC,EAAE,AAAD,IAAjBA,KAAAA,IAAAA,iBAAoB,aAAa,EAAE;oBACrCP,WAAWL,KAAK,GAAG,KAAKD;oBAGxB,IAAI,CAACK,OAAO;wBAEV,MAAMY,kBAAkBC,KAAK,GAAG,CAC9B,GACAA,KAAK,KAAK,CAACd,YAAY,MAAM,GAAG;wBAElCC,QAAQ;4BACN,eAAeY;4BACf,mBAAmBA;4BACnB,cAAcA,AAAkB,IAAlBA;wBAChB;oBACF;oBAGA,MAAME,aAAkC;wBACtC,SAAS;wBACTf;wBACA,mBAAmB;wBACnB,YAAY;wBACZ,OAAO;4BACL,eAAeC,MAAM,aAAa,IAAI;4BACtC,mBAAmBA,MAAM,iBAAiB,IAAI;4BAC9C,cAAcA,MAAM,YAAY,IAAI;4BACpC,WAAWC,YAAY;4BACvB,YAAYjC;4BACZ,mBAAmBI;4BACnB,QAAQP,YAAY,MAAM;wBAC5B;oBACF;oBACAoB,QAAQ,OAAO,CAAE6B;oBACjB;gBACF;YACF;YACAhB,UAAUC;YACVN,kBACE,CAAC,iBAAiB,EAAEzB,UAAU,QAAQ,EAAEM,UAAU,UAAU,WAAW,EAAE2B,UAAU;QAEvF,OAAO;gBAUqGc,eAAyDC,gBAAwDC;YAT3N,MAAMC,SAAS,MAAMhC,WAAW,MAAM,CAAC;gBACrC,OAAOlB;gBACPgB;gBACA,iBAAiBG;gBACjB,GAAGe,YAAY;YACjB;YACAD,WAAWL,KAAK,GAAG,KAAKD;YAExBF,kBACE,CAAC,OAAO,EAAEzB,UAAU,QAAQ,EAAEM,UAAU,UAAU,mBAAmB,EAAED,cAAc,iBAAiB,EAAE0C,AAAAA,SAAAA,CAAAA,gBAAAA,OAAO,KAAK,AAAD,IAAXA,KAAAA,IAAAA,cAAc,aAAa,AAAD,KAAK,GAAG,qBAAqB,EAAEC,AAAAA,SAAAA,CAAAA,iBAAAA,OAAO,KAAK,AAAD,IAAXA,KAAAA,IAAAA,eAAc,iBAAiB,AAAD,KAAK,GAAG,gBAAgB,EAAEC,AAAAA,SAAAA,CAAAA,iBAAAA,OAAO,KAAK,AAAD,IAAXA,KAAAA,IAAAA,eAAc,YAAY,AAAD,KAAK,GAAG,WAAW,EAAEhB,SAAS,aAAa,EAAEiB,OAAO,WAAW,IAAI,IAAI;YAG3TxB,mBAAmB,CAAC,oBAAoB,EAAEyB,KAAK,SAAS,CAACD,OAAO,KAAK,GAAG;YAExEE,OACEF,OAAO,OAAO,EACd,CAAC,mCAAmC,EAAEC,KAAK,SAAS,CAACD,SAAS;YAEhEpB,UAAUoB,OAAO,OAAO,CAAC,EAAE,CAAC,OAAO,CAAC,OAAO;YAC3ClB,QAAQkB,OAAO,KAAK;QACtB;QAEA1B,UAAU,CAAC,UAAU,EAAEM,SAAS;QAChCsB,OAAOtB,SAAS;QAGhB,IAAID,eAAe,CAACG,OAAO;YAEzB,MAAMY,kBAAkBC,KAAK,GAAG,CAC9B,GACAA,KAAK,KAAK,CAAEf,AAAAA,CAAAA,WAAW,EAAC,EAAG,MAAM,GAAG;YAEtCE,QAAQ;gBACN,eAAeY;gBACf,mBAAmBA;gBACnB,cAAcA,AAAkB,IAAlBA;YAChB;QACF;QAEA,OAAO;YACL,SAASd,WAAW;YACpB,OAAOE,QACH;gBACE,eAAeA,MAAM,aAAa,IAAI;gBACtC,mBAAmBA,MAAM,iBAAiB,IAAI;gBAC9C,cAAcA,MAAM,YAAY,IAAI;gBACpC,WAAWC,YAAY;gBACvB,YAAYjC;gBACZ,mBAAmBI;gBACnB,QAAQP,YAAY,MAAM;YAC5B,IACA8C;YACJ,YAAY,CAAC,CAACd;QAChB;IACF,EAAE,OAAOwB,GAAQ;QACfC,QAAQ,KAAK,CAAC,kBAAkBD;QAChC,MAAME,WAAW,IAAIC,MACnB,CAAC,eAAe,EAAE3B,cAAc,eAAe,GAAG,kBAAkB,EAAEwB,EAAE,OAAO,CAAC,8DAA8D,CAAC,EAC/I;YACE,OAAOA;QACT;QAEF,MAAME;IACR;AACF;AAEO,MAAMnC,oBAAoB,CAC/BpB,WACAJ;IAIA,IAAIuB;IAKJ,IAAInB,UAAU,QAAQ,CAAC,UACrB,OAAQJ;QACN,KAAK6D,aAAa,MAAM;YACtBtC,iBAAiBuC;YACjB;QACF,KAAKD,aAAa,IAAI;YACpBtC,iBAAiBwC;YACjB;QACF,KAAKF,aAAa,YAAY;QAC9B,KAAKA,aAAa,gBAAgB;YAChCtC,iBAAiB;gBAAE,MAAMyC,iBAAiB,IAAI;YAAC;YAC/C;QACF,KAAKH,aAAa,IAAI;YAEpBtC,iBAAiBwB;YACjB;IACJ;IAKF,IACE3C,AAAc,wBAAdA,aACAJ,sBAAsB6D,aAAa,IAAI,EAEvCtC,iBAAiB;QAAE,MAAMyC,iBAAiB,IAAI;IAAC;IAGjD,OAAOzC;AACT;AAEO,eAAe0C,yBACpB7C,QAAsC,EACtCpB,iBAA+B,EAC/BC,WAAyB;IAEzB,MAAMiE,WAAW,MAAM/C,OAAOC,UAAUpB,mBAAmBC;IAC3DuD,OAAOU,UAAU;IACjB,MAAMxD,SAAST,YAAY,MAAM;IACjC,MAAMkE,cAAcC,cAAcF,SAAS,OAAO,EAAExD;IACpD,OAAO;QAAE,SAASyD;QAAa,OAAOD,SAAS,KAAK;IAAC;AACvD;AAEO,eAAeG,yBACpBC,IAAY,EACZtE,iBAA+B,EAC/BC,WAAyB;IAEzB,MAAM,EAAEiC,OAAO,EAAEE,KAAK,EAAE,GAAG,MAAMjB,OAAOmD,MAAMtE,mBAAmBC;IACjE,OAAO;QAAEiC;QAASE;IAAM;AAC1B;AAEO,SAASmC,yBAAyBL,QAAgB;IACvD,IAAI;QAEF,MAAMM,YAAYN,SAAS,KAAK,CAAC;QACjC,IAAIM,WACF,OAAOA,SAAS,CAAC,EAAE;QAIrB,MAAMC,iBAAiBP,SAAS,KAAK,CACnC;QAEF,IAAIO,gBACF,OAAOA,cAAc,CAAC,EAAE;QAI1B,MAAMC,gBAAgBR,SAAS,KAAK,CAAC;QACrC,IAAIQ,eACF,OAAOA,aAAa,CAAC,EAAE;IAE3B,EAAE,OAAM,CAAC;IAET,OAAOR;AACT;AAEO,SAASS,yBAAyBC,KAAa;IACpD,IAAIA,MAAM,QAAQ,CAAC,SAEjB,MAAO,YAAY,IAAI,CAACA,OACtBA,QAAQA,MAAM,OAAO,CAAC,kBAAkB;IAG5C,OAAOA;AACT;AAEO,SAASR,cAAcQ,KAAa,EAAElE,MAAgC;IAC3E,MAAMmE,kBAAkBN,yBAAyBK;IAEjD,IAAIC,QAAAA,kBAAAA,KAAAA,IAAAA,gBAAiB,KAAK,CAAC,oBAAoB;YACtCC;QAAP,OAAO,QAAAA,CAAAA,yBAAAA,gBACJ,KAAK,CAAC,kBAAiB,IADnBA,KAAAA,IAAAA,uBAEH,KAAK,CAAC,GACP,GAAG,CAACvC;IACT;IACA,IAAI;QACF,OAAOgB,KAAK,KAAK,CAACsB;IACpB,EAAE,OAAM,CAAC;IACT,IAAI;QACF,OAAOtB,KAAK,KAAK,CAACwB,WAAWF;IAC/B,EAAE,OAAOpB,GAAG,CAAC;IAEb,IAAI/C,AAAW,oBAAXA,UAA8BA,AAAW,kBAAXA,QAA0B;QAC1D,MAAMsE,aAAaL,yBAAyBE;QAC5C,OAAOtB,KAAK,KAAK,CAACwB,WAAWC;IAC/B;IACA,MAAMpB,MAAM,CAAC,+BAA+B,EAAEgB,OAAO;AACvD"}
|
package/dist/es/index.mjs
CHANGED
|
@@ -1,11 +1,12 @@
|
|
|
1
1
|
import { z } from "zod";
|
|
2
|
-
import {
|
|
2
|
+
import { TaskRunner } from "./task-runner.mjs";
|
|
3
3
|
import insight from "./insight/index.mjs";
|
|
4
4
|
import { getVersion } from "./utils.mjs";
|
|
5
5
|
import { AiLocateElement, PointSchema, RectSchema, SizeSchema, TMultimodalPromptSchema, TUserPromptSchema, describeUserPage, getMidsceneLocationSchema, plan } from "./ai-model/index.mjs";
|
|
6
6
|
import { MIDSCENE_MODEL_NAME } from "@midscene/shared/env";
|
|
7
|
+
import { InsightError } from "./types.mjs";
|
|
7
8
|
import { Agent, createAgent } from "./agent/index.mjs";
|
|
8
9
|
const src = insight;
|
|
9
|
-
export { Agent, AiLocateElement,
|
|
10
|
+
export { Agent, AiLocateElement, insight as Insight, InsightError, MIDSCENE_MODEL_NAME, PointSchema, RectSchema, SizeSchema, TMultimodalPromptSchema, TUserPromptSchema, TaskRunner, createAgent, src as default, describeUserPage, getMidsceneLocationSchema, getVersion, plan, z };
|
|
10
11
|
|
|
11
12
|
//# sourceMappingURL=index.mjs.map
|
package/dist/es/index.mjs.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"index.mjs","sources":["webpack://@midscene/core/./src/index.ts"],"sourcesContent":["import { z } from 'zod';\nimport {
|
|
1
|
+
{"version":3,"file":"index.mjs","sources":["webpack://@midscene/core/./src/index.ts"],"sourcesContent":["import { z } from 'zod';\nimport { TaskRunner } from './task-runner';\nimport Insight from './insight/index';\nimport { getVersion } from './utils';\n\nexport {\n plan,\n describeUserPage,\n AiLocateElement,\n getMidsceneLocationSchema,\n type MidsceneLocationResultType,\n PointSchema,\n SizeSchema,\n RectSchema,\n TMultimodalPromptSchema,\n TUserPromptSchema,\n type TMultimodalPrompt,\n type TUserPrompt,\n} from './ai-model/index';\n\nexport {\n MIDSCENE_MODEL_NAME,\n type CreateOpenAIClientFn,\n} from '@midscene/shared/env';\n\nexport type * from './types';\nexport { InsightError } from './types';\n\nexport { z };\n\nexport default Insight;\nexport { TaskRunner, Insight, getVersion };\n\nexport type {\n MidsceneYamlScript,\n MidsceneYamlTask,\n MidsceneYamlFlowItem,\n MidsceneYamlConfigResult,\n MidsceneYamlConfig,\n MidsceneYamlScriptWebEnv,\n MidsceneYamlScriptAndroidEnv,\n MidsceneYamlScriptIOSEnv,\n MidsceneYamlScriptEnv,\n LocateOption,\n DetailedLocateParam,\n} from './yaml';\n\nexport { Agent, type AgentOpt, createAgent } from './agent';\n"],"names":["Insight"],"mappings":";;;;;;;;AA8BA,YAAeA"}
|