@midscene/core 0.30.10 → 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/es/agent/agent.mjs +233 -144
- package/dist/es/agent/agent.mjs.map +1 -1
- package/dist/es/agent/execution-session.mjs +41 -0
- package/dist/es/agent/execution-session.mjs.map +1 -0
- package/dist/es/agent/index.mjs +3 -3
- package/dist/es/agent/task-builder.mjs +319 -0
- package/dist/es/agent/task-builder.mjs.map +1 -0
- package/dist/es/agent/task-cache.mjs +4 -4
- package/dist/es/agent/task-cache.mjs.map +1 -1
- package/dist/es/agent/tasks.mjs +197 -504
- package/dist/es/agent/tasks.mjs.map +1 -1
- package/dist/es/agent/ui-utils.mjs +54 -35
- package/dist/es/agent/ui-utils.mjs.map +1 -1
- package/dist/es/agent/utils.mjs +16 -58
- package/dist/es/agent/utils.mjs.map +1 -1
- package/dist/es/ai-model/conversation-history.mjs +25 -13
- package/dist/es/ai-model/conversation-history.mjs.map +1 -1
- package/dist/es/ai-model/index.mjs +4 -4
- package/dist/es/ai-model/inspect.mjs +45 -54
- package/dist/es/ai-model/inspect.mjs.map +1 -1
- package/dist/es/ai-model/llm-planning.mjs +47 -65
- package/dist/es/ai-model/llm-planning.mjs.map +1 -1
- package/dist/es/ai-model/prompt/assertion.mjs.map +1 -1
- package/dist/es/ai-model/prompt/common.mjs.map +1 -1
- package/dist/es/ai-model/prompt/describe.mjs.map +1 -1
- package/dist/es/ai-model/prompt/extraction.mjs.map +1 -1
- package/dist/es/ai-model/prompt/llm-locator.mjs +11 -235
- package/dist/es/ai-model/prompt/llm-locator.mjs.map +1 -1
- package/dist/es/ai-model/prompt/llm-planning.mjs +76 -322
- package/dist/es/ai-model/prompt/llm-planning.mjs.map +1 -1
- package/dist/es/ai-model/prompt/llm-section-locator.mjs +15 -14
- package/dist/es/ai-model/prompt/llm-section-locator.mjs.map +1 -1
- package/dist/es/ai-model/prompt/order-sensitive-judge.mjs +35 -0
- package/dist/es/ai-model/prompt/order-sensitive-judge.mjs.map +1 -0
- package/dist/es/ai-model/prompt/playwright-generator.mjs +2 -2
- package/dist/es/ai-model/prompt/playwright-generator.mjs.map +1 -1
- package/dist/es/ai-model/prompt/ui-tars-locator.mjs.map +1 -1
- package/dist/es/ai-model/prompt/ui-tars-planning.mjs.map +1 -1
- package/dist/es/ai-model/prompt/util.mjs +3 -88
- package/dist/es/ai-model/prompt/util.mjs.map +1 -1
- package/dist/es/ai-model/prompt/yaml-generator.mjs +10 -10
- package/dist/es/ai-model/prompt/yaml-generator.mjs.map +1 -1
- package/dist/es/ai-model/service-caller/index.mjs +182 -274
- package/dist/es/ai-model/service-caller/index.mjs.map +1 -1
- package/dist/es/ai-model/ui-tars-planning.mjs +69 -8
- package/dist/es/ai-model/ui-tars-planning.mjs.map +1 -1
- package/dist/es/{ai-model/common.mjs → common.mjs} +18 -30
- package/dist/es/common.mjs.map +1 -0
- package/dist/es/device/device-options.mjs +0 -0
- package/dist/es/device/index.mjs +29 -12
- package/dist/es/device/index.mjs.map +1 -1
- package/dist/es/index.mjs +5 -4
- package/dist/es/index.mjs.map +1 -1
- package/dist/es/report.mjs.map +1 -1
- package/dist/es/{insight → service}/index.mjs +38 -51
- package/dist/es/service/index.mjs.map +1 -0
- package/dist/es/{insight → service}/utils.mjs +3 -3
- package/dist/es/service/utils.mjs.map +1 -0
- package/dist/es/task-runner.mjs +264 -0
- package/dist/es/task-runner.mjs.map +1 -0
- package/dist/es/tree.mjs +13 -2
- package/dist/es/tree.mjs.map +1 -0
- package/dist/es/types.mjs +18 -1
- package/dist/es/types.mjs.map +1 -1
- package/dist/es/utils.mjs +6 -7
- package/dist/es/utils.mjs.map +1 -1
- package/dist/es/yaml/builder.mjs.map +1 -1
- package/dist/es/yaml/player.mjs +121 -98
- package/dist/es/yaml/player.mjs.map +1 -1
- package/dist/es/yaml/utils.mjs +1 -1
- package/dist/es/yaml/utils.mjs.map +1 -1
- package/dist/lib/agent/agent.js +231 -142
- package/dist/lib/agent/agent.js.map +1 -1
- package/dist/lib/agent/common.js +1 -1
- package/dist/lib/agent/execution-session.js +75 -0
- package/dist/lib/agent/execution-session.js.map +1 -0
- package/dist/lib/agent/index.js +14 -14
- package/dist/lib/agent/index.js.map +1 -1
- package/dist/lib/agent/task-builder.js +356 -0
- package/dist/lib/agent/task-builder.js.map +1 -0
- package/dist/lib/agent/task-cache.js +8 -8
- package/dist/lib/agent/task-cache.js.map +1 -1
- package/dist/lib/agent/tasks.js +202 -506
- package/dist/lib/agent/tasks.js.map +1 -1
- package/dist/lib/agent/ui-utils.js +58 -36
- package/dist/lib/agent/ui-utils.js.map +1 -1
- package/dist/lib/agent/utils.js +26 -68
- package/dist/lib/agent/utils.js.map +1 -1
- package/dist/lib/ai-model/conversation-history.js +27 -15
- package/dist/lib/ai-model/conversation-history.js.map +1 -1
- package/dist/lib/ai-model/index.js +27 -27
- package/dist/lib/ai-model/index.js.map +1 -1
- package/dist/lib/ai-model/inspect.js +51 -57
- package/dist/lib/ai-model/inspect.js.map +1 -1
- package/dist/lib/ai-model/llm-planning.js +49 -67
- package/dist/lib/ai-model/llm-planning.js.map +1 -1
- package/dist/lib/ai-model/prompt/assertion.js +2 -2
- package/dist/lib/ai-model/prompt/assertion.js.map +1 -1
- package/dist/lib/ai-model/prompt/common.js +2 -2
- package/dist/lib/ai-model/prompt/common.js.map +1 -1
- package/dist/lib/ai-model/prompt/describe.js +2 -2
- package/dist/lib/ai-model/prompt/describe.js.map +1 -1
- package/dist/lib/ai-model/prompt/extraction.js +2 -2
- package/dist/lib/ai-model/prompt/extraction.js.map +1 -1
- package/dist/lib/ai-model/prompt/llm-locator.js +14 -241
- package/dist/lib/ai-model/prompt/llm-locator.js.map +1 -1
- package/dist/lib/ai-model/prompt/llm-planning.js +79 -328
- package/dist/lib/ai-model/prompt/llm-planning.js.map +1 -1
- package/dist/lib/ai-model/prompt/llm-section-locator.js +17 -16
- package/dist/lib/ai-model/prompt/llm-section-locator.js.map +1 -1
- package/dist/lib/ai-model/prompt/order-sensitive-judge.js +72 -0
- package/dist/lib/ai-model/prompt/order-sensitive-judge.js.map +1 -0
- package/dist/lib/ai-model/prompt/playwright-generator.js +11 -11
- package/dist/lib/ai-model/prompt/playwright-generator.js.map +1 -1
- package/dist/lib/ai-model/prompt/ui-tars-locator.js +2 -2
- package/dist/lib/ai-model/prompt/ui-tars-locator.js.map +1 -1
- package/dist/lib/ai-model/prompt/ui-tars-planning.js +2 -2
- package/dist/lib/ai-model/prompt/ui-tars-planning.js.map +1 -1
- package/dist/lib/ai-model/prompt/util.js +7 -95
- package/dist/lib/ai-model/prompt/util.js.map +1 -1
- package/dist/lib/ai-model/prompt/yaml-generator.js +18 -18
- package/dist/lib/ai-model/prompt/yaml-generator.js.map +1 -1
- package/dist/lib/ai-model/service-caller/index.js +288 -401
- package/dist/lib/ai-model/service-caller/index.js.map +1 -1
- package/dist/lib/ai-model/ui-tars-planning.js +71 -10
- package/dist/lib/ai-model/ui-tars-planning.js.map +1 -1
- package/dist/lib/{ai-model/common.js → common.js} +40 -55
- package/dist/lib/common.js.map +1 -0
- package/dist/lib/device/device-options.js +20 -0
- package/dist/lib/device/device-options.js.map +1 -0
- package/dist/lib/device/index.js +63 -40
- package/dist/lib/device/index.js.map +1 -1
- package/dist/lib/image/index.js +5 -5
- package/dist/lib/image/index.js.map +1 -1
- package/dist/lib/index.js +24 -20
- package/dist/lib/index.js.map +1 -1
- package/dist/lib/report.js +2 -2
- package/dist/lib/report.js.map +1 -1
- package/dist/lib/{insight → service}/index.js +41 -54
- package/dist/lib/service/index.js.map +1 -0
- package/dist/lib/{insight → service}/utils.js +7 -7
- package/dist/lib/service/utils.js.map +1 -0
- package/dist/lib/task-runner.js +301 -0
- package/dist/lib/task-runner.js.map +1 -0
- package/dist/lib/tree.js +13 -4
- package/dist/lib/tree.js.map +1 -1
- package/dist/lib/types.js +31 -12
- package/dist/lib/types.js.map +1 -1
- package/dist/lib/utils.js +16 -17
- package/dist/lib/utils.js.map +1 -1
- package/dist/lib/yaml/builder.js +2 -2
- package/dist/lib/yaml/builder.js.map +1 -1
- package/dist/lib/yaml/index.js +16 -22
- package/dist/lib/yaml/index.js.map +1 -1
- package/dist/lib/yaml/player.js +123 -100
- package/dist/lib/yaml/player.js.map +1 -1
- package/dist/lib/yaml/utils.js +6 -6
- package/dist/lib/yaml/utils.js.map +1 -1
- package/dist/lib/yaml.js +1 -1
- package/dist/lib/yaml.js.map +1 -1
- package/dist/types/agent/agent.d.ts +62 -17
- package/dist/types/agent/execution-session.d.ts +36 -0
- package/dist/types/agent/index.d.ts +3 -2
- package/dist/types/agent/task-builder.d.ts +35 -0
- package/dist/types/agent/tasks.d.ts +32 -23
- package/dist/types/agent/ui-utils.d.ts +9 -2
- package/dist/types/agent/utils.d.ts +9 -35
- package/dist/types/ai-model/conversation-history.d.ts +8 -4
- package/dist/types/ai-model/index.d.ts +5 -5
- package/dist/types/ai-model/inspect.d.ts +20 -12
- package/dist/types/ai-model/llm-planning.d.ts +3 -1
- package/dist/types/ai-model/prompt/llm-locator.d.ts +1 -6
- package/dist/types/ai-model/prompt/llm-planning.d.ts +2 -3
- package/dist/types/ai-model/prompt/llm-section-locator.d.ts +1 -3
- package/dist/types/ai-model/prompt/order-sensitive-judge.d.ts +2 -0
- package/dist/types/ai-model/prompt/util.d.ts +2 -34
- package/dist/types/ai-model/service-caller/index.d.ts +2 -3
- package/dist/types/ai-model/ui-tars-planning.d.ts +15 -2
- package/dist/types/{ai-model/common.d.ts → common.d.ts} +6 -6
- package/dist/types/device/device-options.d.ts +57 -0
- package/dist/types/device/index.d.ts +55 -39
- package/dist/types/index.d.ts +7 -6
- package/dist/types/service/index.d.ts +26 -0
- package/dist/types/service/utils.d.ts +2 -0
- package/dist/types/task-runner.d.ts +49 -0
- package/dist/types/tree.d.ts +4 -1
- package/dist/types/types.d.ts +103 -66
- package/dist/types/yaml/utils.d.ts +1 -1
- package/dist/types/yaml.d.ts +68 -43
- package/package.json +9 -12
- package/dist/es/ai-model/action-executor.mjs +0 -129
- package/dist/es/ai-model/action-executor.mjs.map +0 -1
- package/dist/es/ai-model/common.mjs.map +0 -1
- package/dist/es/insight/index.mjs.map +0 -1
- package/dist/es/insight/utils.mjs.map +0 -1
- package/dist/lib/ai-model/action-executor.js +0 -163
- package/dist/lib/ai-model/action-executor.js.map +0 -1
- package/dist/lib/ai-model/common.js.map +0 -1
- package/dist/lib/insight/index.js.map +0 -1
- package/dist/lib/insight/utils.js.map +0 -1
- package/dist/types/ai-model/action-executor.d.ts +0 -19
- package/dist/types/insight/index.d.ts +0 -31
- package/dist/types/insight/utils.d.ts +0 -2
|
@@ -1,21 +1,5 @@
|
|
|
1
1
|
"use strict";
|
|
2
|
-
var
|
|
3
|
-
"langsmith/wrappers": function(module) {
|
|
4
|
-
module.exports = import("langsmith/wrappers").then(function(module) {
|
|
5
|
-
return module;
|
|
6
|
-
});
|
|
7
|
-
}
|
|
8
|
-
};
|
|
9
|
-
var __webpack_module_cache__ = {};
|
|
10
|
-
function __webpack_require__(moduleId) {
|
|
11
|
-
var cachedModule = __webpack_module_cache__[moduleId];
|
|
12
|
-
if (void 0 !== cachedModule) return cachedModule.exports;
|
|
13
|
-
var module = __webpack_module_cache__[moduleId] = {
|
|
14
|
-
exports: {}
|
|
15
|
-
};
|
|
16
|
-
__webpack_modules__[moduleId](module, module.exports, __webpack_require__);
|
|
17
|
-
return module.exports;
|
|
18
|
-
}
|
|
2
|
+
var __webpack_require__ = {};
|
|
19
3
|
(()=>{
|
|
20
4
|
__webpack_require__.n = (module)=>{
|
|
21
5
|
var getter = module && module.__esModule ? ()=>module['default'] : ()=>module;
|
|
@@ -47,420 +31,323 @@ function __webpack_require__(moduleId) {
|
|
|
47
31
|
};
|
|
48
32
|
})();
|
|
49
33
|
var __webpack_exports__ = {};
|
|
50
|
-
(
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
const
|
|
67
|
-
|
|
68
|
-
const
|
|
69
|
-
const
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
let openai;
|
|
80
|
-
let proxyAgent;
|
|
81
|
-
const debugProxy = (0, logger_namespaceObject.getDebug)('ai:call:proxy');
|
|
82
|
-
if (httpProxy) {
|
|
83
|
-
debugProxy('using http proxy', httpProxy);
|
|
84
|
-
proxyAgent = new external_https_proxy_agent_namespaceObject.HttpsProxyAgent(httpProxy);
|
|
85
|
-
} else if (socksProxy) {
|
|
86
|
-
debugProxy('using socks proxy', socksProxy);
|
|
87
|
-
proxyAgent = new external_socks_proxy_agent_namespaceObject.SocksProxyAgent(socksProxy);
|
|
34
|
+
__webpack_require__.r(__webpack_exports__);
|
|
35
|
+
__webpack_require__.d(__webpack_exports__, {
|
|
36
|
+
callAIWithObjectResponse: ()=>callAIWithObjectResponse,
|
|
37
|
+
extractJSONFromCodeBlock: ()=>extractJSONFromCodeBlock,
|
|
38
|
+
preprocessDoubaoBboxJson: ()=>preprocessDoubaoBboxJson,
|
|
39
|
+
callAIWithStringResponse: ()=>callAIWithStringResponse,
|
|
40
|
+
safeParseJson: ()=>safeParseJson,
|
|
41
|
+
callAI: ()=>callAI
|
|
42
|
+
});
|
|
43
|
+
const env_namespaceObject = require("@midscene/shared/env");
|
|
44
|
+
const logger_namespaceObject = require("@midscene/shared/logger");
|
|
45
|
+
const utils_namespaceObject = require("@midscene/shared/utils");
|
|
46
|
+
const external_jsonrepair_namespaceObject = require("jsonrepair");
|
|
47
|
+
const external_openai_namespaceObject = require("openai");
|
|
48
|
+
var external_openai_default = /*#__PURE__*/ __webpack_require__.n(external_openai_namespaceObject);
|
|
49
|
+
async function createChatClient({ AIActionTypeValue, modelConfig }) {
|
|
50
|
+
const { socksProxy, httpProxy, modelName, openaiBaseURL, openaiApiKey, openaiExtraConfig, modelDescription, uiTarsModelVersion: uiTarsVersion, vlMode, createOpenAIClient, timeout } = modelConfig;
|
|
51
|
+
let proxyAgent;
|
|
52
|
+
const debugProxy = (0, logger_namespaceObject.getDebug)('ai:call:proxy');
|
|
53
|
+
const sanitizeProxyUrl = (url)=>{
|
|
54
|
+
try {
|
|
55
|
+
const parsed = new URL(url);
|
|
56
|
+
if (parsed.username) {
|
|
57
|
+
parsed.password = '****';
|
|
58
|
+
return parsed.href;
|
|
59
|
+
}
|
|
60
|
+
return url;
|
|
61
|
+
} catch {
|
|
62
|
+
return url;
|
|
88
63
|
}
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
if (azureOpenaiScope) {
|
|
99
|
-
(0, utils_namespaceObject.assert)(!utils_namespaceObject.ifInBrowser, 'Azure OpenAI is not supported in browser with Midscene.');
|
|
100
|
-
const credential = new identity_namespaceObject.DefaultAzureCredential();
|
|
101
|
-
tokenProvider = (0, identity_namespaceObject.getBearerTokenProvider)(credential, azureOpenaiScope);
|
|
102
|
-
openai = new external_openai_namespaceObject.AzureOpenAI({
|
|
103
|
-
azureADTokenProvider: tokenProvider,
|
|
104
|
-
endpoint: azureOpenaiEndpoint,
|
|
105
|
-
apiVersion: azureOpenaiApiVersion,
|
|
106
|
-
deployment: azureOpenaiDeployment,
|
|
107
|
-
...openaiExtraConfig,
|
|
108
|
-
...azureExtraConfig
|
|
109
|
-
});
|
|
110
|
-
} else openai = new external_openai_namespaceObject.AzureOpenAI({
|
|
111
|
-
apiKey: azureOpenaiKey,
|
|
112
|
-
endpoint: azureOpenaiEndpoint,
|
|
113
|
-
apiVersion: azureOpenaiApiVersion,
|
|
114
|
-
deployment: azureOpenaiDeployment,
|
|
115
|
-
dangerouslyAllowBrowser: true,
|
|
116
|
-
...openaiExtraConfig,
|
|
117
|
-
...azureExtraConfig
|
|
64
|
+
};
|
|
65
|
+
if (httpProxy) {
|
|
66
|
+
debugProxy('using http proxy', sanitizeProxyUrl(httpProxy));
|
|
67
|
+
if (utils_namespaceObject.ifInBrowser) console.warn('HTTP proxy is configured but not supported in browser environment');
|
|
68
|
+
else {
|
|
69
|
+
const moduleName = 'undici';
|
|
70
|
+
const { ProxyAgent } = await import(moduleName);
|
|
71
|
+
proxyAgent = new ProxyAgent({
|
|
72
|
+
uri: httpProxy
|
|
118
73
|
});
|
|
119
|
-
} else if (!useAnthropicSdk) openai = new (external_openai_default())({
|
|
120
|
-
baseURL: openaiBaseURL,
|
|
121
|
-
apiKey: openaiApiKey,
|
|
122
|
-
httpAgent: proxyAgent,
|
|
123
|
-
...openaiExtraConfig,
|
|
124
|
-
defaultHeaders: {
|
|
125
|
-
...(null == openaiExtraConfig ? void 0 : openaiExtraConfig.defaultHeaders) || {},
|
|
126
|
-
[env_namespaceObject.MIDSCENE_API_TYPE]: AIActionTypeValue.toString()
|
|
127
|
-
},
|
|
128
|
-
dangerouslyAllowBrowser: true
|
|
129
|
-
});
|
|
130
|
-
if (openai && env_namespaceObject.globalConfigManager.getEnvConfigInBoolean(env_namespaceObject.MIDSCENE_LANGSMITH_DEBUG)) {
|
|
131
|
-
if (utils_namespaceObject.ifInBrowser) throw new Error('langsmith is not supported in browser');
|
|
132
|
-
console.log('DEBUGGING MODE: langsmith wrapper enabled');
|
|
133
|
-
const { wrapOpenAI } = await Promise.resolve().then(__webpack_require__.bind(__webpack_require__, "langsmith/wrappers"));
|
|
134
|
-
openai = wrapOpenAI(openai);
|
|
135
74
|
}
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
75
|
+
} else if (socksProxy) {
|
|
76
|
+
debugProxy('using socks proxy', sanitizeProxyUrl(socksProxy));
|
|
77
|
+
if (utils_namespaceObject.ifInBrowser) console.warn('SOCKS proxy is configured but not supported in browser environment');
|
|
78
|
+
else try {
|
|
79
|
+
const moduleName = 'fetch-socks';
|
|
80
|
+
const { socksDispatcher } = await import(moduleName);
|
|
81
|
+
const proxyUrl = new URL(socksProxy);
|
|
82
|
+
if (!proxyUrl.hostname) throw new Error('SOCKS proxy URL must include a valid hostname');
|
|
83
|
+
const port = Number.parseInt(proxyUrl.port, 10);
|
|
84
|
+
if (!proxyUrl.port || Number.isNaN(port)) throw new Error('SOCKS proxy URL must include a valid port');
|
|
85
|
+
const protocol = proxyUrl.protocol.replace(':', '');
|
|
86
|
+
const socksType = 'socks4' === protocol ? 4 : 'socks5' === protocol ? 5 : 5;
|
|
87
|
+
proxyAgent = socksDispatcher({
|
|
88
|
+
type: socksType,
|
|
89
|
+
host: proxyUrl.hostname,
|
|
90
|
+
port,
|
|
91
|
+
...proxyUrl.username ? {
|
|
92
|
+
userId: decodeURIComponent(proxyUrl.username),
|
|
93
|
+
password: decodeURIComponent(proxyUrl.password || '')
|
|
94
|
+
} : {}
|
|
95
|
+
});
|
|
96
|
+
debugProxy('socks proxy configured successfully', {
|
|
97
|
+
type: socksType,
|
|
98
|
+
host: proxyUrl.hostname,
|
|
99
|
+
port: port
|
|
100
|
+
});
|
|
101
|
+
} catch (error) {
|
|
102
|
+
console.error('Failed to configure SOCKS proxy:', error);
|
|
103
|
+
throw new Error(`Invalid SOCKS proxy URL: ${socksProxy}. Expected format: socks4://host:port, socks5://host:port, or with authentication: socks5://user:pass@host:port`);
|
|
104
|
+
}
|
|
158
105
|
}
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
106
|
+
const openAIOptions = {
|
|
107
|
+
baseURL: openaiBaseURL,
|
|
108
|
+
apiKey: openaiApiKey,
|
|
109
|
+
...proxyAgent ? {
|
|
110
|
+
fetchOptions: {
|
|
111
|
+
dispatcher: proxyAgent
|
|
112
|
+
}
|
|
113
|
+
} : {},
|
|
114
|
+
...openaiExtraConfig,
|
|
115
|
+
...'number' == typeof timeout ? {
|
|
116
|
+
timeout
|
|
117
|
+
} : {},
|
|
118
|
+
dangerouslyAllowBrowser: true
|
|
119
|
+
};
|
|
120
|
+
const baseOpenAI = new (external_openai_default())(openAIOptions);
|
|
121
|
+
let openai = baseOpenAI;
|
|
122
|
+
if (openai && env_namespaceObject.globalConfigManager.getEnvConfigInBoolean(env_namespaceObject.MIDSCENE_LANGSMITH_DEBUG)) {
|
|
123
|
+
if (utils_namespaceObject.ifInBrowser) throw new Error('langsmith is not supported in browser');
|
|
124
|
+
console.log('DEBUGGING MODE: langsmith wrapper enabled');
|
|
125
|
+
const langsmithModule = 'langsmith/wrappers';
|
|
126
|
+
const { wrapOpenAI } = await import(langsmithModule);
|
|
127
|
+
openai = wrapOpenAI(openai);
|
|
128
|
+
}
|
|
129
|
+
if (openai && env_namespaceObject.globalConfigManager.getEnvConfigInBoolean(env_namespaceObject.MIDSCENE_LANGFUSE_DEBUG)) {
|
|
130
|
+
if (utils_namespaceObject.ifInBrowser) throw new Error('langfuse is not supported in browser');
|
|
131
|
+
console.log('DEBUGGING MODE: langfuse wrapper enabled');
|
|
132
|
+
const langfuseModule = 'langfuse';
|
|
133
|
+
const { observeOpenAI } = await import(langfuseModule);
|
|
134
|
+
openai = observeOpenAI(openai);
|
|
135
|
+
}
|
|
136
|
+
if (createOpenAIClient) {
|
|
137
|
+
const wrappedClient = await createOpenAIClient(baseOpenAI, openAIOptions);
|
|
138
|
+
if (wrappedClient) openai = wrappedClient;
|
|
139
|
+
}
|
|
140
|
+
return {
|
|
141
|
+
completion: openai.chat.completions,
|
|
142
|
+
modelName,
|
|
143
|
+
modelDescription,
|
|
144
|
+
uiTarsVersion,
|
|
145
|
+
vlMode
|
|
146
|
+
};
|
|
147
|
+
}
|
|
148
|
+
async function callAI(messages, AIActionTypeValue, modelConfig, options) {
|
|
149
|
+
const { completion, modelName, modelDescription, uiTarsVersion, vlMode } = await createChatClient({
|
|
150
|
+
AIActionTypeValue,
|
|
151
|
+
modelConfig
|
|
152
|
+
});
|
|
153
|
+
const maxTokens = env_namespaceObject.globalConfigManager.getEnvConfigValue(env_namespaceObject.MIDSCENE_MODEL_MAX_TOKENS) ?? env_namespaceObject.globalConfigManager.getEnvConfigValue(env_namespaceObject.OPENAI_MAX_TOKENS);
|
|
154
|
+
const debugCall = (0, logger_namespaceObject.getDebug)('ai:call');
|
|
155
|
+
const debugProfileStats = (0, logger_namespaceObject.getDebug)('ai:profile:stats');
|
|
156
|
+
const debugProfileDetail = (0, logger_namespaceObject.getDebug)('ai:profile:detail');
|
|
157
|
+
const startTime = Date.now();
|
|
158
|
+
const isStreaming = options?.stream && options?.onChunk;
|
|
159
|
+
let content;
|
|
160
|
+
let accumulated = '';
|
|
161
|
+
let usage;
|
|
162
|
+
let timeCost;
|
|
163
|
+
const buildUsageInfo = (usageData)=>{
|
|
164
|
+
if (!usageData) return;
|
|
165
|
+
const cachedInputTokens = usageData?.prompt_tokens_details?.cached_tokens;
|
|
166
|
+
return {
|
|
167
|
+
prompt_tokens: usageData.prompt_tokens ?? 0,
|
|
168
|
+
completion_tokens: usageData.completion_tokens ?? 0,
|
|
169
|
+
total_tokens: usageData.total_tokens ?? 0,
|
|
170
|
+
cached_input: cachedInputTokens ?? 0,
|
|
171
|
+
time_cost: timeCost ?? 0,
|
|
172
|
+
model_name: modelName,
|
|
173
|
+
model_description: modelDescription,
|
|
174
|
+
intent: modelConfig.intent
|
|
182
175
|
};
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
prompt_tokens: estimatedTokens,
|
|
217
|
-
completion_tokens: estimatedTokens,
|
|
218
|
-
total_tokens: 2 * estimatedTokens
|
|
219
|
-
};
|
|
220
|
-
}
|
|
221
|
-
const finalChunk = {
|
|
222
|
-
content: '',
|
|
223
|
-
accumulated,
|
|
224
|
-
reasoning_content: '',
|
|
225
|
-
isComplete: true,
|
|
226
|
-
usage: {
|
|
227
|
-
prompt_tokens: usage.prompt_tokens ?? 0,
|
|
228
|
-
completion_tokens: usage.completion_tokens ?? 0,
|
|
229
|
-
total_tokens: usage.total_tokens ?? 0,
|
|
230
|
-
time_cost: timeCost ?? 0,
|
|
231
|
-
model_name: modelName,
|
|
232
|
-
model_description: modelDescription,
|
|
233
|
-
intent: modelConfig.intent
|
|
234
|
-
}
|
|
235
|
-
};
|
|
236
|
-
options.onChunk(finalChunk);
|
|
237
|
-
break;
|
|
238
|
-
}
|
|
239
|
-
}
|
|
240
|
-
content = accumulated;
|
|
241
|
-
debugProfileStats(`streaming model, ${modelName}, mode, ${vlMode || 'default'}, cost-ms, ${timeCost}`);
|
|
242
|
-
} else {
|
|
243
|
-
var _result_usage, _result_usage1, _result_usage2;
|
|
244
|
-
const result = await completion.create({
|
|
245
|
-
model: modelName,
|
|
246
|
-
messages,
|
|
247
|
-
response_format: responseFormat,
|
|
248
|
-
...commonConfig
|
|
249
|
-
});
|
|
250
|
-
timeCost = Date.now() - startTime;
|
|
251
|
-
debugProfileStats(`model, ${modelName}, mode, ${vlMode || 'default'}, ui-tars-version, ${uiTarsVersion}, prompt-tokens, ${(null == (_result_usage = result.usage) ? void 0 : _result_usage.prompt_tokens) || ''}, completion-tokens, ${(null == (_result_usage1 = result.usage) ? void 0 : _result_usage1.completion_tokens) || ''}, total-tokens, ${(null == (_result_usage2 = result.usage) ? void 0 : _result_usage2.total_tokens) || ''}, cost-ms, ${timeCost}, requestId, ${result._request_id || ''}`);
|
|
252
|
-
debugProfileDetail(`model usage detail: ${JSON.stringify(result.usage)}`);
|
|
253
|
-
(0, utils_namespaceObject.assert)(result.choices, `invalid response from LLM service: ${JSON.stringify(result)}`);
|
|
254
|
-
content = result.choices[0].message.content;
|
|
255
|
-
usage = result.usage;
|
|
176
|
+
};
|
|
177
|
+
const commonConfig = {
|
|
178
|
+
temperature: 'vlm-ui-tars' === vlMode ? 0.0 : void 0,
|
|
179
|
+
stream: !!isStreaming,
|
|
180
|
+
max_tokens: 'number' == typeof maxTokens ? maxTokens : void 0,
|
|
181
|
+
...'qwen2.5-vl' === vlMode ? {
|
|
182
|
+
vl_high_resolution_images: true
|
|
183
|
+
} : {}
|
|
184
|
+
};
|
|
185
|
+
try {
|
|
186
|
+
debugCall(`sending ${isStreaming ? 'streaming ' : ''}request to ${modelName}`);
|
|
187
|
+
if (isStreaming) {
|
|
188
|
+
const stream = await completion.create({
|
|
189
|
+
model: modelName,
|
|
190
|
+
messages,
|
|
191
|
+
...commonConfig
|
|
192
|
+
}, {
|
|
193
|
+
stream: true
|
|
194
|
+
});
|
|
195
|
+
for await (const chunk of stream){
|
|
196
|
+
const content = chunk.choices?.[0]?.delta?.content || '';
|
|
197
|
+
const reasoning_content = chunk.choices?.[0]?.delta?.reasoning_content || '';
|
|
198
|
+
if (chunk.usage) usage = chunk.usage;
|
|
199
|
+
if (content || reasoning_content) {
|
|
200
|
+
accumulated += content;
|
|
201
|
+
const chunkData = {
|
|
202
|
+
content,
|
|
203
|
+
reasoning_content,
|
|
204
|
+
accumulated,
|
|
205
|
+
isComplete: false,
|
|
206
|
+
usage: void 0
|
|
207
|
+
};
|
|
208
|
+
options.onChunk(chunkData);
|
|
256
209
|
}
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
return {
|
|
266
|
-
source: {
|
|
267
|
-
type: 'base64',
|
|
268
|
-
media_type: mimeType,
|
|
269
|
-
data: body
|
|
270
|
-
},
|
|
271
|
-
type: 'image'
|
|
210
|
+
if (chunk.choices?.[0]?.finish_reason) {
|
|
211
|
+
timeCost = Date.now() - startTime;
|
|
212
|
+
if (!usage) {
|
|
213
|
+
const estimatedTokens = Math.max(1, Math.floor(accumulated.length / 4));
|
|
214
|
+
usage = {
|
|
215
|
+
prompt_tokens: estimatedTokens,
|
|
216
|
+
completion_tokens: estimatedTokens,
|
|
217
|
+
total_tokens: 2 * estimatedTokens
|
|
272
218
|
};
|
|
273
219
|
}
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
})),
|
|
284
|
-
response_format: responseFormat,
|
|
285
|
-
...commonConfig
|
|
286
|
-
});
|
|
287
|
-
for await (const chunk of stream){
|
|
288
|
-
var _chunk_delta;
|
|
289
|
-
const content = (null == (_chunk_delta = chunk.delta) ? void 0 : _chunk_delta.text) || '';
|
|
290
|
-
if (content) {
|
|
291
|
-
accumulated += content;
|
|
292
|
-
const chunkData = {
|
|
293
|
-
content,
|
|
294
|
-
accumulated,
|
|
295
|
-
reasoning_content: '',
|
|
296
|
-
isComplete: false,
|
|
297
|
-
usage: void 0
|
|
298
|
-
};
|
|
299
|
-
options.onChunk(chunkData);
|
|
300
|
-
}
|
|
301
|
-
if ('message_stop' === chunk.type) {
|
|
302
|
-
timeCost = Date.now() - startTime;
|
|
303
|
-
const anthropicUsage = chunk.usage;
|
|
304
|
-
const finalChunk = {
|
|
305
|
-
content: '',
|
|
306
|
-
accumulated,
|
|
307
|
-
reasoning_content: '',
|
|
308
|
-
isComplete: true,
|
|
309
|
-
usage: anthropicUsage ? {
|
|
310
|
-
prompt_tokens: anthropicUsage.input_tokens ?? 0,
|
|
311
|
-
completion_tokens: anthropicUsage.output_tokens ?? 0,
|
|
312
|
-
total_tokens: (anthropicUsage.input_tokens ?? 0) + (anthropicUsage.output_tokens ?? 0),
|
|
313
|
-
time_cost: timeCost ?? 0,
|
|
314
|
-
model_name: modelName,
|
|
315
|
-
model_description: modelDescription,
|
|
316
|
-
intent: modelConfig.intent
|
|
317
|
-
} : void 0
|
|
318
|
-
};
|
|
319
|
-
options.onChunk(finalChunk);
|
|
320
|
-
break;
|
|
321
|
-
}
|
|
322
|
-
}
|
|
323
|
-
content = accumulated;
|
|
324
|
-
} else {
|
|
325
|
-
const result = await completion.create({
|
|
326
|
-
model: modelName,
|
|
327
|
-
system: 'You are a versatile professional in software UI automation',
|
|
328
|
-
messages: messages.map((m)=>({
|
|
329
|
-
role: 'user',
|
|
330
|
-
content: Array.isArray(m.content) ? m.content.map(convertImageContent) : m.content
|
|
331
|
-
})),
|
|
332
|
-
response_format: responseFormat,
|
|
333
|
-
...commonConfig
|
|
334
|
-
});
|
|
335
|
-
timeCost = Date.now() - startTime;
|
|
336
|
-
content = result.content[0].text;
|
|
337
|
-
usage = result.usage;
|
|
220
|
+
const finalChunk = {
|
|
221
|
+
content: '',
|
|
222
|
+
accumulated,
|
|
223
|
+
reasoning_content: '',
|
|
224
|
+
isComplete: true,
|
|
225
|
+
usage: buildUsageInfo(usage)
|
|
226
|
+
};
|
|
227
|
+
options.onChunk(finalChunk);
|
|
228
|
+
break;
|
|
338
229
|
}
|
|
339
|
-
(0, utils_namespaceObject.assert)(content, 'empty content');
|
|
340
|
-
}
|
|
341
|
-
if (isStreaming && !usage) {
|
|
342
|
-
const estimatedTokens = Math.max(1, Math.floor((content || '').length / 4));
|
|
343
|
-
usage = {
|
|
344
|
-
prompt_tokens: estimatedTokens,
|
|
345
|
-
completion_tokens: estimatedTokens,
|
|
346
|
-
total_tokens: 2 * estimatedTokens
|
|
347
|
-
};
|
|
348
230
|
}
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
model_name: modelName,
|
|
357
|
-
model_description: modelDescription,
|
|
358
|
-
intent: modelConfig.intent
|
|
359
|
-
} : void 0,
|
|
360
|
-
isStreamed: !!isStreaming
|
|
361
|
-
};
|
|
362
|
-
} catch (e) {
|
|
363
|
-
console.error(' call AI error', e);
|
|
364
|
-
const newError = new Error(`failed to call ${isStreaming ? 'streaming ' : ''}AI model service: ${e.message}. Trouble shooting: https://midscenejs.com/model-provider.html`, {
|
|
365
|
-
cause: e
|
|
231
|
+
content = accumulated;
|
|
232
|
+
debugProfileStats(`streaming model, ${modelName}, mode, ${vlMode || 'default'}, cost-ms, ${timeCost}`);
|
|
233
|
+
} else {
|
|
234
|
+
const result = await completion.create({
|
|
235
|
+
model: modelName,
|
|
236
|
+
messages,
|
|
237
|
+
...commonConfig
|
|
366
238
|
});
|
|
367
|
-
|
|
239
|
+
timeCost = Date.now() - startTime;
|
|
240
|
+
debugProfileStats(`model, ${modelName}, mode, ${vlMode || 'default'}, ui-tars-version, ${uiTarsVersion}, prompt-tokens, ${result.usage?.prompt_tokens || ''}, completion-tokens, ${result.usage?.completion_tokens || ''}, total-tokens, ${result.usage?.total_tokens || ''}, cost-ms, ${timeCost}, requestId, ${result._request_id || ''}`);
|
|
241
|
+
debugProfileDetail(`model usage detail: ${JSON.stringify(result.usage)}`);
|
|
242
|
+
(0, utils_namespaceObject.assert)(result.choices, `invalid response from LLM service: ${JSON.stringify(result)}`);
|
|
243
|
+
content = result.choices[0].message.content;
|
|
244
|
+
usage = result.usage;
|
|
368
245
|
}
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
|
|
372
|
-
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
break;
|
|
379
|
-
case external_common_js_namespaceObject.AIActionType.PLAN:
|
|
380
|
-
responseFormat = llm_planning_js_namespaceObject.planSchema;
|
|
381
|
-
break;
|
|
382
|
-
case external_common_js_namespaceObject.AIActionType.EXTRACT_DATA:
|
|
383
|
-
case external_common_js_namespaceObject.AIActionType.DESCRIBE_ELEMENT:
|
|
384
|
-
responseFormat = {
|
|
385
|
-
type: external_types_js_namespaceObject.AIResponseFormat.JSON
|
|
386
|
-
};
|
|
387
|
-
break;
|
|
388
|
-
case external_common_js_namespaceObject.AIActionType.TEXT:
|
|
389
|
-
responseFormat = void 0;
|
|
390
|
-
break;
|
|
246
|
+
debugCall(`response: ${content}`);
|
|
247
|
+
(0, utils_namespaceObject.assert)(content, 'empty content');
|
|
248
|
+
if (isStreaming && !usage) {
|
|
249
|
+
const estimatedTokens = Math.max(1, Math.floor((content || '').length / 4));
|
|
250
|
+
usage = {
|
|
251
|
+
prompt_tokens: estimatedTokens,
|
|
252
|
+
completion_tokens: estimatedTokens,
|
|
253
|
+
total_tokens: 2 * estimatedTokens
|
|
254
|
+
};
|
|
391
255
|
}
|
|
392
|
-
if ('gpt-4o-2024-05-13' === modelName && AIActionTypeValue !== external_common_js_namespaceObject.AIActionType.TEXT) responseFormat = {
|
|
393
|
-
type: external_types_js_namespaceObject.AIResponseFormat.JSON
|
|
394
|
-
};
|
|
395
|
-
return responseFormat;
|
|
396
|
-
};
|
|
397
|
-
async function callAIWithObjectResponse(messages, AIActionTypeValue, modelConfig) {
|
|
398
|
-
const response = await callAI(messages, AIActionTypeValue, modelConfig);
|
|
399
|
-
(0, utils_namespaceObject.assert)(response, 'empty response');
|
|
400
|
-
const vlMode = modelConfig.vlMode;
|
|
401
|
-
const jsonContent = safeParseJson(response.content, vlMode);
|
|
402
|
-
return {
|
|
403
|
-
content: jsonContent,
|
|
404
|
-
usage: response.usage
|
|
405
|
-
};
|
|
406
|
-
}
|
|
407
|
-
async function callAIWithStringResponse(msgs, AIActionTypeValue, modelConfig) {
|
|
408
|
-
const { content, usage } = await callAI(msgs, AIActionTypeValue, modelConfig);
|
|
409
256
|
return {
|
|
410
|
-
content,
|
|
411
|
-
usage
|
|
257
|
+
content: content || '',
|
|
258
|
+
usage: buildUsageInfo(usage),
|
|
259
|
+
isStreamed: !!isStreaming
|
|
412
260
|
};
|
|
261
|
+
} catch (e) {
|
|
262
|
+
console.error(' call AI error', e);
|
|
263
|
+
const newError = new Error(`failed to call ${isStreaming ? 'streaming ' : ''}AI model service (${modelName}): ${e.message}. Trouble shooting: https://midscenejs.com/model-provider.html`, {
|
|
264
|
+
cause: e
|
|
265
|
+
});
|
|
266
|
+
throw newError;
|
|
413
267
|
}
|
|
414
|
-
|
|
415
|
-
|
|
416
|
-
|
|
417
|
-
|
|
418
|
-
|
|
419
|
-
|
|
420
|
-
|
|
421
|
-
|
|
422
|
-
|
|
423
|
-
|
|
424
|
-
}
|
|
425
|
-
|
|
426
|
-
|
|
427
|
-
|
|
428
|
-
|
|
429
|
-
|
|
430
|
-
|
|
431
|
-
|
|
432
|
-
|
|
433
|
-
|
|
434
|
-
|
|
435
|
-
|
|
436
|
-
|
|
437
|
-
|
|
438
|
-
|
|
439
|
-
|
|
440
|
-
|
|
441
|
-
|
|
442
|
-
|
|
443
|
-
|
|
268
|
+
}
|
|
269
|
+
async function callAIWithObjectResponse(messages, AIActionTypeValue, modelConfig) {
|
|
270
|
+
const response = await callAI(messages, AIActionTypeValue, modelConfig);
|
|
271
|
+
(0, utils_namespaceObject.assert)(response, 'empty response');
|
|
272
|
+
const vlMode = modelConfig.vlMode;
|
|
273
|
+
const jsonContent = safeParseJson(response.content, vlMode);
|
|
274
|
+
return {
|
|
275
|
+
content: jsonContent,
|
|
276
|
+
contentString: response.content,
|
|
277
|
+
usage: response.usage
|
|
278
|
+
};
|
|
279
|
+
}
|
|
280
|
+
async function callAIWithStringResponse(msgs, AIActionTypeValue, modelConfig) {
|
|
281
|
+
const { content, usage } = await callAI(msgs, AIActionTypeValue, modelConfig);
|
|
282
|
+
return {
|
|
283
|
+
content,
|
|
284
|
+
usage
|
|
285
|
+
};
|
|
286
|
+
}
|
|
287
|
+
function extractJSONFromCodeBlock(response) {
|
|
288
|
+
try {
|
|
289
|
+
const jsonMatch = response.match(/^\s*(\{[\s\S]*\})\s*$/);
|
|
290
|
+
if (jsonMatch) return jsonMatch[1];
|
|
291
|
+
const codeBlockMatch = response.match(/```(?:json)?\s*(\{[\s\S]*?\})\s*```/);
|
|
292
|
+
if (codeBlockMatch) return codeBlockMatch[1];
|
|
293
|
+
const jsonLikeMatch = response.match(/\{[\s\S]*\}/);
|
|
294
|
+
if (jsonLikeMatch) return jsonLikeMatch[0];
|
|
295
|
+
} catch {}
|
|
296
|
+
return response;
|
|
297
|
+
}
|
|
298
|
+
function preprocessDoubaoBboxJson(input) {
|
|
299
|
+
if (input.includes('bbox')) while(/\d+\s+\d+/.test(input))input = input.replace(/(\d+)\s+(\d+)/g, '$1,$2');
|
|
300
|
+
return input;
|
|
301
|
+
}
|
|
302
|
+
function normalizeJsonObject(obj) {
|
|
303
|
+
if (null == obj) return obj;
|
|
304
|
+
if (Array.isArray(obj)) return obj.map((item)=>normalizeJsonObject(item));
|
|
305
|
+
if ('object' == typeof obj) {
|
|
306
|
+
const normalized = {};
|
|
307
|
+
for (const [key, value] of Object.entries(obj)){
|
|
308
|
+
const trimmedKey = key.trim();
|
|
309
|
+
let normalizedValue = normalizeJsonObject(value);
|
|
310
|
+
if ('string' == typeof normalizedValue) normalizedValue = normalizedValue.trim();
|
|
311
|
+
normalized[trimmedKey] = normalizedValue;
|
|
444
312
|
}
|
|
445
|
-
|
|
313
|
+
return normalized;
|
|
446
314
|
}
|
|
447
|
-
|
|
315
|
+
if ('string' == typeof obj) return obj.trim();
|
|
316
|
+
return obj;
|
|
317
|
+
}
|
|
318
|
+
function safeParseJson(input, vlMode) {
|
|
319
|
+
const cleanJsonString = extractJSONFromCodeBlock(input);
|
|
320
|
+
if (cleanJsonString?.match(/\((\d+),(\d+)\)/)) return cleanJsonString.match(/\((\d+),(\d+)\)/)?.slice(1).map(Number);
|
|
321
|
+
let parsed;
|
|
322
|
+
try {
|
|
323
|
+
parsed = JSON.parse(cleanJsonString);
|
|
324
|
+
return normalizeJsonObject(parsed);
|
|
325
|
+
} catch {}
|
|
326
|
+
try {
|
|
327
|
+
parsed = JSON.parse((0, external_jsonrepair_namespaceObject.jsonrepair)(cleanJsonString));
|
|
328
|
+
return normalizeJsonObject(parsed);
|
|
329
|
+
} catch (e) {}
|
|
330
|
+
if ('doubao-vision' === vlMode || 'vlm-ui-tars' === vlMode) {
|
|
331
|
+
const jsonString = preprocessDoubaoBboxJson(cleanJsonString);
|
|
332
|
+
parsed = JSON.parse((0, external_jsonrepair_namespaceObject.jsonrepair)(jsonString));
|
|
333
|
+
return normalizeJsonObject(parsed);
|
|
334
|
+
}
|
|
335
|
+
throw Error(`failed to parse json response: ${input}`);
|
|
336
|
+
}
|
|
448
337
|
exports.callAI = __webpack_exports__.callAI;
|
|
449
338
|
exports.callAIWithObjectResponse = __webpack_exports__.callAIWithObjectResponse;
|
|
450
339
|
exports.callAIWithStringResponse = __webpack_exports__.callAIWithStringResponse;
|
|
451
340
|
exports.extractJSONFromCodeBlock = __webpack_exports__.extractJSONFromCodeBlock;
|
|
452
|
-
exports.getResponseFormat = __webpack_exports__.getResponseFormat;
|
|
453
341
|
exports.preprocessDoubaoBboxJson = __webpack_exports__.preprocessDoubaoBboxJson;
|
|
454
342
|
exports.safeParseJson = __webpack_exports__.safeParseJson;
|
|
455
|
-
for(var
|
|
343
|
+
for(var __rspack_i in __webpack_exports__)if (-1 === [
|
|
456
344
|
"callAI",
|
|
457
345
|
"callAIWithObjectResponse",
|
|
458
346
|
"callAIWithStringResponse",
|
|
459
347
|
"extractJSONFromCodeBlock",
|
|
460
|
-
"getResponseFormat",
|
|
461
348
|
"preprocessDoubaoBboxJson",
|
|
462
349
|
"safeParseJson"
|
|
463
|
-
].indexOf(
|
|
350
|
+
].indexOf(__rspack_i)) exports[__rspack_i] = __webpack_exports__[__rspack_i];
|
|
464
351
|
Object.defineProperty(exports, '__esModule', {
|
|
465
352
|
value: true
|
|
466
353
|
});
|