@godscene/core 1.7.11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (189) hide show
  1. package/LICENSE +21 -0
  2. package/README.md +9 -0
  3. package/dist/es/agent/agent.mjs +767 -0
  4. package/dist/es/agent/common.mjs +0 -0
  5. package/dist/es/agent/execution-session.mjs +39 -0
  6. package/dist/es/agent/index.mjs +6 -0
  7. package/dist/es/agent/task-builder.mjs +343 -0
  8. package/dist/es/agent/task-cache.mjs +212 -0
  9. package/dist/es/agent/tasks.mjs +428 -0
  10. package/dist/es/agent/ui-utils.mjs +101 -0
  11. package/dist/es/agent/utils.mjs +167 -0
  12. package/dist/es/ai-model/auto-glm/actions.mjs +237 -0
  13. package/dist/es/ai-model/auto-glm/index.mjs +6 -0
  14. package/dist/es/ai-model/auto-glm/parser.mjs +237 -0
  15. package/dist/es/ai-model/auto-glm/planning.mjs +69 -0
  16. package/dist/es/ai-model/auto-glm/prompt.mjs +220 -0
  17. package/dist/es/ai-model/auto-glm/util.mjs +7 -0
  18. package/dist/es/ai-model/connectivity.mjs +136 -0
  19. package/dist/es/ai-model/conversation-history.mjs +193 -0
  20. package/dist/es/ai-model/index.mjs +12 -0
  21. package/dist/es/ai-model/inspect.mjs +395 -0
  22. package/dist/es/ai-model/llm-planning.mjs +231 -0
  23. package/dist/es/ai-model/prompt/common.mjs +5 -0
  24. package/dist/es/ai-model/prompt/describe.mjs +64 -0
  25. package/dist/es/ai-model/prompt/extraction.mjs +129 -0
  26. package/dist/es/ai-model/prompt/llm-locator.mjs +49 -0
  27. package/dist/es/ai-model/prompt/llm-planning.mjs +584 -0
  28. package/dist/es/ai-model/prompt/llm-section-locator.mjs +42 -0
  29. package/dist/es/ai-model/prompt/order-sensitive-judge.mjs +33 -0
  30. package/dist/es/ai-model/prompt/playwright-generator.mjs +115 -0
  31. package/dist/es/ai-model/prompt/ui-tars-planning.mjs +34 -0
  32. package/dist/es/ai-model/prompt/util.mjs +57 -0
  33. package/dist/es/ai-model/prompt/yaml-generator.mjs +201 -0
  34. package/dist/es/ai-model/service-caller/codex-app-server.mjs +573 -0
  35. package/dist/es/ai-model/service-caller/image-detail.mjs +4 -0
  36. package/dist/es/ai-model/service-caller/index.mjs +648 -0
  37. package/dist/es/ai-model/service-caller/request-timeout.mjs +47 -0
  38. package/dist/es/ai-model/ui-tars-planning.mjs +247 -0
  39. package/dist/es/common.mjs +382 -0
  40. package/dist/es/device/device-options.mjs +0 -0
  41. package/dist/es/device/index.mjs +340 -0
  42. package/dist/es/dump/html-utils.mjs +290 -0
  43. package/dist/es/dump/index.mjs +3 -0
  44. package/dist/es/dump/screenshot-restoration.mjs +30 -0
  45. package/dist/es/dump/screenshot-store.mjs +125 -0
  46. package/dist/es/index.mjs +17 -0
  47. package/dist/es/report-cli.mjs +149 -0
  48. package/dist/es/report-generator.mjs +203 -0
  49. package/dist/es/report-markdown.mjs +216 -0
  50. package/dist/es/report.mjs +287 -0
  51. package/dist/es/screenshot-item.mjs +120 -0
  52. package/dist/es/service/index.mjs +272 -0
  53. package/dist/es/service/utils.mjs +13 -0
  54. package/dist/es/skill/index.mjs +35 -0
  55. package/dist/es/task-runner.mjs +261 -0
  56. package/dist/es/task-timing.mjs +10 -0
  57. package/dist/es/tree.mjs +11 -0
  58. package/dist/es/types.mjs +202 -0
  59. package/dist/es/utils.mjs +232 -0
  60. package/dist/es/yaml/builder.mjs +11 -0
  61. package/dist/es/yaml/index.mjs +4 -0
  62. package/dist/es/yaml/player.mjs +425 -0
  63. package/dist/es/yaml/utils.mjs +100 -0
  64. package/dist/es/yaml.mjs +0 -0
  65. package/dist/lib/agent/agent.js +815 -0
  66. package/dist/lib/agent/common.js +5 -0
  67. package/dist/lib/agent/execution-session.js +73 -0
  68. package/dist/lib/agent/index.js +76 -0
  69. package/dist/lib/agent/task-builder.js +380 -0
  70. package/dist/lib/agent/task-cache.js +264 -0
  71. package/dist/lib/agent/tasks.js +471 -0
  72. package/dist/lib/agent/ui-utils.js +153 -0
  73. package/dist/lib/agent/utils.js +238 -0
  74. package/dist/lib/ai-model/auto-glm/actions.js +271 -0
  75. package/dist/lib/ai-model/auto-glm/index.js +64 -0
  76. package/dist/lib/ai-model/auto-glm/parser.js +280 -0
  77. package/dist/lib/ai-model/auto-glm/planning.js +103 -0
  78. package/dist/lib/ai-model/auto-glm/prompt.js +257 -0
  79. package/dist/lib/ai-model/auto-glm/util.js +44 -0
  80. package/dist/lib/ai-model/connectivity.js +180 -0
  81. package/dist/lib/ai-model/conversation-history.js +227 -0
  82. package/dist/lib/ai-model/index.js +127 -0
  83. package/dist/lib/ai-model/inspect.js +441 -0
  84. package/dist/lib/ai-model/llm-planning.js +268 -0
  85. package/dist/lib/ai-model/prompt/common.js +39 -0
  86. package/dist/lib/ai-model/prompt/describe.js +98 -0
  87. package/dist/lib/ai-model/prompt/extraction.js +169 -0
  88. package/dist/lib/ai-model/prompt/llm-locator.js +86 -0
  89. package/dist/lib/ai-model/prompt/llm-planning.js +621 -0
  90. package/dist/lib/ai-model/prompt/llm-section-locator.js +79 -0
  91. package/dist/lib/ai-model/prompt/order-sensitive-judge.js +70 -0
  92. package/dist/lib/ai-model/prompt/playwright-generator.js +176 -0
  93. package/dist/lib/ai-model/prompt/ui-tars-planning.js +71 -0
  94. package/dist/lib/ai-model/prompt/util.js +103 -0
  95. package/dist/lib/ai-model/prompt/yaml-generator.js +262 -0
  96. package/dist/lib/ai-model/service-caller/codex-app-server.js +622 -0
  97. package/dist/lib/ai-model/service-caller/image-detail.js +38 -0
  98. package/dist/lib/ai-model/service-caller/index.js +716 -0
  99. package/dist/lib/ai-model/service-caller/request-timeout.js +93 -0
  100. package/dist/lib/ai-model/ui-tars-planning.js +281 -0
  101. package/dist/lib/common.js +491 -0
  102. package/dist/lib/device/device-options.js +18 -0
  103. package/dist/lib/device/index.js +467 -0
  104. package/dist/lib/dump/html-utils.js +366 -0
  105. package/dist/lib/dump/index.js +58 -0
  106. package/dist/lib/dump/screenshot-restoration.js +64 -0
  107. package/dist/lib/dump/screenshot-store.js +165 -0
  108. package/dist/lib/index.js +184 -0
  109. package/dist/lib/report-cli.js +189 -0
  110. package/dist/lib/report-generator.js +244 -0
  111. package/dist/lib/report-markdown.js +253 -0
  112. package/dist/lib/report.js +333 -0
  113. package/dist/lib/screenshot-item.js +154 -0
  114. package/dist/lib/service/index.js +306 -0
  115. package/dist/lib/service/utils.js +47 -0
  116. package/dist/lib/skill/index.js +69 -0
  117. package/dist/lib/task-runner.js +298 -0
  118. package/dist/lib/task-timing.js +44 -0
  119. package/dist/lib/tree.js +51 -0
  120. package/dist/lib/types.js +298 -0
  121. package/dist/lib/utils.js +314 -0
  122. package/dist/lib/yaml/builder.js +55 -0
  123. package/dist/lib/yaml/index.js +79 -0
  124. package/dist/lib/yaml/player.js +459 -0
  125. package/dist/lib/yaml/utils.js +153 -0
  126. package/dist/lib/yaml.js +18 -0
  127. package/dist/types/agent/agent.d.ts +220 -0
  128. package/dist/types/agent/common.d.ts +0 -0
  129. package/dist/types/agent/execution-session.d.ts +36 -0
  130. package/dist/types/agent/index.d.ts +9 -0
  131. package/dist/types/agent/task-builder.d.ts +34 -0
  132. package/dist/types/agent/task-cache.d.ts +49 -0
  133. package/dist/types/agent/tasks.d.ts +70 -0
  134. package/dist/types/agent/ui-utils.d.ts +14 -0
  135. package/dist/types/agent/utils.d.ts +25 -0
  136. package/dist/types/ai-model/auto-glm/actions.d.ts +78 -0
  137. package/dist/types/ai-model/auto-glm/index.d.ts +6 -0
  138. package/dist/types/ai-model/auto-glm/parser.d.ts +18 -0
  139. package/dist/types/ai-model/auto-glm/planning.d.ts +12 -0
  140. package/dist/types/ai-model/auto-glm/prompt.d.ts +27 -0
  141. package/dist/types/ai-model/auto-glm/util.d.ts +13 -0
  142. package/dist/types/ai-model/connectivity.d.ts +20 -0
  143. package/dist/types/ai-model/conversation-history.d.ts +105 -0
  144. package/dist/types/ai-model/index.d.ts +16 -0
  145. package/dist/types/ai-model/inspect.d.ts +67 -0
  146. package/dist/types/ai-model/llm-planning.d.ts +19 -0
  147. package/dist/types/ai-model/prompt/common.d.ts +2 -0
  148. package/dist/types/ai-model/prompt/describe.d.ts +1 -0
  149. package/dist/types/ai-model/prompt/extraction.d.ts +7 -0
  150. package/dist/types/ai-model/prompt/llm-locator.d.ts +3 -0
  151. package/dist/types/ai-model/prompt/llm-planning.d.ts +10 -0
  152. package/dist/types/ai-model/prompt/llm-section-locator.d.ts +3 -0
  153. package/dist/types/ai-model/prompt/order-sensitive-judge.d.ts +2 -0
  154. package/dist/types/ai-model/prompt/playwright-generator.d.ts +26 -0
  155. package/dist/types/ai-model/prompt/ui-tars-planning.d.ts +2 -0
  156. package/dist/types/ai-model/prompt/util.d.ts +33 -0
  157. package/dist/types/ai-model/prompt/yaml-generator.d.ts +102 -0
  158. package/dist/types/ai-model/service-caller/codex-app-server.d.ts +42 -0
  159. package/dist/types/ai-model/service-caller/image-detail.d.ts +2 -0
  160. package/dist/types/ai-model/service-caller/index.d.ts +60 -0
  161. package/dist/types/ai-model/service-caller/request-timeout.d.ts +32 -0
  162. package/dist/types/ai-model/ui-tars-planning.d.ts +72 -0
  163. package/dist/types/common.d.ts +288 -0
  164. package/dist/types/device/device-options.d.ts +155 -0
  165. package/dist/types/device/index.d.ts +2565 -0
  166. package/dist/types/dump/html-utils.d.ts +75 -0
  167. package/dist/types/dump/index.d.ts +5 -0
  168. package/dist/types/dump/screenshot-restoration.d.ts +8 -0
  169. package/dist/types/dump/screenshot-store.d.ts +49 -0
  170. package/dist/types/index.d.ts +21 -0
  171. package/dist/types/report-cli.d.ts +36 -0
  172. package/dist/types/report-generator.d.ts +88 -0
  173. package/dist/types/report-markdown.d.ts +24 -0
  174. package/dist/types/report.d.ts +52 -0
  175. package/dist/types/screenshot-item.d.ts +67 -0
  176. package/dist/types/service/index.d.ts +24 -0
  177. package/dist/types/service/utils.d.ts +2 -0
  178. package/dist/types/skill/index.d.ts +25 -0
  179. package/dist/types/task-runner.d.ts +50 -0
  180. package/dist/types/task-timing.d.ts +8 -0
  181. package/dist/types/tree.d.ts +4 -0
  182. package/dist/types/types.d.ts +684 -0
  183. package/dist/types/utils.d.ts +45 -0
  184. package/dist/types/yaml/builder.d.ts +2 -0
  185. package/dist/types/yaml/index.d.ts +4 -0
  186. package/dist/types/yaml/player.d.ts +34 -0
  187. package/dist/types/yaml/utils.d.ts +9 -0
  188. package/dist/types/yaml.d.ts +215 -0
  189. package/package.json +130 -0
@@ -0,0 +1,716 @@
1
+ "use strict";
2
+ var __webpack_require__ = {};
3
+ (()=>{
4
+ __webpack_require__.n = (module)=>{
5
+ var getter = module && module.__esModule ? ()=>module['default'] : ()=>module;
6
+ __webpack_require__.d(getter, {
7
+ a: getter
8
+ });
9
+ return getter;
10
+ };
11
+ })();
12
+ (()=>{
13
+ __webpack_require__.d = (exports1, definition)=>{
14
+ for(var key in definition)if (__webpack_require__.o(definition, key) && !__webpack_require__.o(exports1, key)) Object.defineProperty(exports1, key, {
15
+ enumerable: true,
16
+ get: definition[key]
17
+ });
18
+ };
19
+ })();
20
+ (()=>{
21
+ __webpack_require__.o = (obj, prop)=>Object.prototype.hasOwnProperty.call(obj, prop);
22
+ })();
23
+ (()=>{
24
+ __webpack_require__.r = (exports1)=>{
25
+ if ("u" > typeof Symbol && Symbol.toStringTag) Object.defineProperty(exports1, Symbol.toStringTag, {
26
+ value: 'Module'
27
+ });
28
+ Object.defineProperty(exports1, '__esModule', {
29
+ value: true
30
+ });
31
+ };
32
+ })();
33
+ var __webpack_exports__ = {};
34
+ __webpack_require__.r(__webpack_exports__);
35
+ __webpack_require__.d(__webpack_exports__, {
36
+ callAIWithObjectResponse: ()=>callAIWithObjectResponse,
37
+ extractJSONFromCodeBlock: ()=>extractJSONFromCodeBlock,
38
+ preprocessDoubaoBboxJson: ()=>preprocessDoubaoBboxJson,
39
+ resolveReasoningConfig: ()=>resolveReasoningConfig,
40
+ safeParseJson: ()=>safeParseJson,
41
+ callAI: ()=>callAI,
42
+ yhtCallAI: ()=>yhtCallAI,
43
+ AIResponseParseError: ()=>AIResponseParseError,
44
+ callAIWithStringResponse: ()=>callAIWithStringResponse
45
+ });
46
+ const env_namespaceObject = require("@godscene/shared/env");
47
+ const logger_namespaceObject = require("@godscene/shared/logger");
48
+ const utils_namespaceObject = require("@godscene/shared/utils");
49
+ const external_jsonrepair_namespaceObject = require("jsonrepair");
50
+ const external_openai_namespaceObject = require("openai");
51
+ var external_openai_default = /*#__PURE__*/ __webpack_require__.n(external_openai_namespaceObject);
52
+ const util_js_namespaceObject = require("../auto-glm/util.js");
53
+ const external_codex_app_server_js_namespaceObject = require("./codex-app-server.js");
54
+ const external_image_detail_js_namespaceObject = require("./image-detail.js");
55
+ const external_request_timeout_js_namespaceObject = require("./request-timeout.js");
56
+ function _define_property(obj, key, value) {
57
+ if (key in obj) Object.defineProperty(obj, key, {
58
+ value: value,
59
+ enumerable: true,
60
+ configurable: true,
61
+ writable: true
62
+ });
63
+ else obj[key] = value;
64
+ return obj;
65
+ }
66
+ class AIResponseParseError extends Error {
67
+ constructor(message, rawResponse, usage){
68
+ super(message), _define_property(this, "usage", void 0), _define_property(this, "rawResponse", void 0);
69
+ this.name = 'AIResponseParseError';
70
+ this.rawResponse = rawResponse;
71
+ this.usage = usage;
72
+ }
73
+ }
74
+ const defaultYhtConfig = {
75
+ domain: 'APILink',
76
+ yht_access_token: '',
77
+ chatType: 14,
78
+ model: 'doubao-seed-1-6-vision-250815',
79
+ modelCategory: 2,
80
+ stream: 0,
81
+ temperature: 0.01,
82
+ top_p: 0.7,
83
+ baseURL: 'https://c2.yonyoucloud.com/iuap-aip-service/report/rest/api/aiService/chat',
84
+ traceId: '',
85
+ extraParams: {},
86
+ topic: ''
87
+ };
88
+ async function createChatClient({ modelConfig }) {
89
+ const { socksProxy, httpProxy, modelName, openaiBaseURL, openaiApiKey, openaiExtraConfig, modelDescription, uiTarsModelVersion, modelFamily, createOpenAIClient, timeout } = modelConfig;
90
+ let proxyAgent;
91
+ const warnClient = (0, logger_namespaceObject.getDebug)('ai:call', {
92
+ console: true
93
+ });
94
+ const debugProxy = (0, logger_namespaceObject.getDebug)('ai:call:proxy');
95
+ const warnProxy = (0, logger_namespaceObject.getDebug)('ai:call:proxy', {
96
+ console: true
97
+ });
98
+ const sanitizeProxyUrl = (url)=>{
99
+ try {
100
+ const parsed = new URL(url);
101
+ if (parsed.username) {
102
+ parsed.password = '****';
103
+ return parsed.href;
104
+ }
105
+ return url;
106
+ } catch {
107
+ return url;
108
+ }
109
+ };
110
+ if (httpProxy) {
111
+ debugProxy('using http proxy', sanitizeProxyUrl(httpProxy));
112
+ if (utils_namespaceObject.ifInBrowser) warnProxy('HTTP proxy is configured but not supported in browser environment');
113
+ else {
114
+ const moduleName = 'undici';
115
+ const { ProxyAgent } = await import(moduleName);
116
+ proxyAgent = new ProxyAgent({
117
+ uri: httpProxy
118
+ });
119
+ }
120
+ } else if (socksProxy) {
121
+ debugProxy('using socks proxy', sanitizeProxyUrl(socksProxy));
122
+ if (utils_namespaceObject.ifInBrowser) warnProxy('SOCKS proxy is configured but not supported in browser environment');
123
+ else try {
124
+ const moduleName = 'fetch-socks';
125
+ const { socksDispatcher } = await import(moduleName);
126
+ const proxyUrl = new URL(socksProxy);
127
+ if (!proxyUrl.hostname) throw new Error('SOCKS proxy URL must include a valid hostname');
128
+ const port = Number.parseInt(proxyUrl.port, 10);
129
+ if (!proxyUrl.port || Number.isNaN(port)) throw new Error('SOCKS proxy URL must include a valid port');
130
+ const protocol = proxyUrl.protocol.replace(':', '');
131
+ const socksType = 'socks4' === protocol ? 4 : 'socks5' === protocol ? 5 : 5;
132
+ proxyAgent = socksDispatcher({
133
+ type: socksType,
134
+ host: proxyUrl.hostname,
135
+ port,
136
+ ...proxyUrl.username ? {
137
+ userId: decodeURIComponent(proxyUrl.username),
138
+ password: decodeURIComponent(proxyUrl.password || '')
139
+ } : {}
140
+ });
141
+ debugProxy('socks proxy configured successfully', {
142
+ type: socksType,
143
+ host: proxyUrl.hostname,
144
+ port: port
145
+ });
146
+ } catch (error) {
147
+ warnProxy('Failed to configure SOCKS proxy:', error);
148
+ throw new Error(`Invalid SOCKS proxy URL: ${socksProxy}. Expected format: socks4://host:port, socks5://host:port, or with authentication: socks5://user:pass@host:port`);
149
+ }
150
+ }
151
+ const effectiveTimeoutMs = (0, external_request_timeout_js_namespaceObject.resolveEffectiveTimeoutMs)({
152
+ timeout
153
+ });
154
+ const openAIOptions = {
155
+ baseURL: openaiBaseURL,
156
+ apiKey: openaiApiKey,
157
+ ...proxyAgent ? {
158
+ fetchOptions: {
159
+ dispatcher: proxyAgent
160
+ }
161
+ } : {},
162
+ ...openaiExtraConfig,
163
+ maxRetries: 0,
164
+ ...null !== effectiveTimeoutMs ? {
165
+ timeout: effectiveTimeoutMs
166
+ } : {},
167
+ dangerouslyAllowBrowser: true
168
+ };
169
+ const baseOpenAI = new (external_openai_default())(openAIOptions);
170
+ let openai = baseOpenAI;
171
+ if (openai && env_namespaceObject.globalConfigManager.getEnvConfigInBoolean(env_namespaceObject.MIDSCENE_LANGSMITH_DEBUG)) {
172
+ if (utils_namespaceObject.ifInBrowser) throw new Error('langsmith is not supported in browser');
173
+ warnClient('DEBUGGING MODE: langsmith wrapper enabled');
174
+ const langsmithModule = 'langsmith/wrappers';
175
+ const { wrapOpenAI } = await import(langsmithModule);
176
+ openai = wrapOpenAI(openai);
177
+ }
178
+ if (openai && env_namespaceObject.globalConfigManager.getEnvConfigInBoolean(env_namespaceObject.MIDSCENE_LANGFUSE_DEBUG)) {
179
+ if (utils_namespaceObject.ifInBrowser) throw new Error('langfuse is not supported in browser');
180
+ warnClient('DEBUGGING MODE: langfuse wrapper enabled');
181
+ const langfuseModule = '@langfuse/openai';
182
+ const { observeOpenAI } = await import(langfuseModule);
183
+ openai = observeOpenAI(openai);
184
+ }
185
+ if (createOpenAIClient) {
186
+ const wrappedClient = await createOpenAIClient(baseOpenAI, openAIOptions);
187
+ if (wrappedClient) openai = wrappedClient;
188
+ }
189
+ return {
190
+ completion: openai.chat.completions,
191
+ modelName,
192
+ modelDescription,
193
+ uiTarsModelVersion,
194
+ modelFamily
195
+ };
196
+ }
197
+ async function yhtCallAI(messages, modelConfig, options) {
198
+ const yhtConfig = {
199
+ domain: modelConfig.yht_domain || defaultYhtConfig.domain,
200
+ yht_access_token: modelConfig.yht_access_token || defaultYhtConfig.yht_access_token,
201
+ model: modelConfig.yht_model || defaultYhtConfig.model,
202
+ baseURL: modelConfig.yht_base_url || defaultYhtConfig.baseURL,
203
+ chatType: Number(modelConfig.yht_chat_type || defaultYhtConfig.chatType),
204
+ modelCategory: Number(modelConfig.yht_model_category || defaultYhtConfig.modelCategory),
205
+ stream: Number(modelConfig.yht_stream || defaultYhtConfig.stream),
206
+ temperature: Number(modelConfig.yht_temperature || defaultYhtConfig.temperature),
207
+ top_p: Number(modelConfig.yht_top_p || defaultYhtConfig.top_p),
208
+ traceId: modelConfig.yht_trace_id || defaultYhtConfig.traceId,
209
+ topic: defaultYhtConfig.topic,
210
+ extraParams: defaultYhtConfig.extraParams
211
+ };
212
+ const convertToYhtMessageFormat = (messages)=>messages.map((msg)=>{
213
+ const yhtMessage = {
214
+ role: 'system' === msg.role ? 'system' : 'user'
215
+ };
216
+ if ('string' == typeof msg.content) yhtMessage.vlContent = [
217
+ {
218
+ type: 'text',
219
+ text: msg.content
220
+ }
221
+ ];
222
+ else if (Array.isArray(msg.content)) {
223
+ const textParts = msg.content.filter((part)=>'text' === part.type);
224
+ const imageParts = msg.content.filter((part)=>'image_url' === part.type);
225
+ if (imageParts.length > 0) yhtMessage.vlContent = [
226
+ {
227
+ type: 'text',
228
+ text: textParts.map((part)=>part.text).join(' ') || '请分析图片内容'
229
+ },
230
+ ...imageParts.map((part)=>({
231
+ type: 'image_url',
232
+ image_url: {
233
+ url: part.image_url.url,
234
+ detail: 'high'
235
+ }
236
+ }))
237
+ ];
238
+ else if (textParts.length > 0) yhtMessage.vlContent = [
239
+ {
240
+ type: 'text',
241
+ text: textParts.map((part)=>part.text).join(' ')
242
+ }
243
+ ];
244
+ }
245
+ return yhtMessage;
246
+ });
247
+ const debugCall = (0, logger_namespaceObject.getDebug)('ai:call:yht');
248
+ const debugProfileStats = (0, logger_namespaceObject.getDebug)('ai:profile:stats:yht');
249
+ const debugProfileDetail = (0, logger_namespaceObject.getDebug)('ai:profile:detail:yht');
250
+ const startTime = Date.now();
251
+ try {
252
+ debugCall(`准备调用AI服务,模型: ${yhtConfig.model}`);
253
+ const yhtMessages = convertToYhtMessageFormat(messages);
254
+ debugProfileDetail('转换后的消息格式:', JSON.stringify(yhtMessages));
255
+ const requestBody = {
256
+ domain: yhtConfig.domain,
257
+ messages: yhtMessages,
258
+ chatType: yhtConfig.chatType,
259
+ model: yhtConfig.model,
260
+ modelCategory: yhtConfig.modelCategory,
261
+ stream: yhtConfig.stream,
262
+ temperature: yhtConfig.temperature,
263
+ top_p: yhtConfig.top_p,
264
+ extraParams: yhtConfig.extraParams,
265
+ topic: yhtConfig.topic
266
+ };
267
+ debugCall('发送请求到AI服务');
268
+ const controller = new AbortController();
269
+ const { signal } = controller;
270
+ options?.abortSignal?.addEventListener('abort', ()=>{
271
+ controller.abort();
272
+ }, {
273
+ once: true
274
+ });
275
+ const timeout = 120000;
276
+ let response;
277
+ const timeoutPromise = (timeout)=>new Promise((_, reject)=>{
278
+ setTimeout(()=>{
279
+ reject(new Error(`Request timeout after ${timeout}ms`));
280
+ }, timeout);
281
+ });
282
+ try {
283
+ response = await Promise.race([
284
+ fetch(yhtConfig.baseURL, {
285
+ method: 'POST',
286
+ headers: {
287
+ 'Content-Type': 'application/json',
288
+ yht_access_token: yhtConfig.yht_access_token || '',
289
+ traceId: yhtConfig.traceId
290
+ },
291
+ body: JSON.stringify(requestBody),
292
+ signal
293
+ }),
294
+ timeoutPromise(timeout)
295
+ ]).finally(()=>{
296
+ setTimeout(()=>{
297
+ controller.abort();
298
+ }, 2000);
299
+ });
300
+ } catch (error) {
301
+ if ('AbortError' === error.name) console.log('Error1:', 'Request was aborted due to timeout');
302
+ else console.log('Error2:', error.message);
303
+ throw error;
304
+ }
305
+ if (!response.ok) {
306
+ const errorText = await response.text();
307
+ debugCall(`用友AI服务返回错误: ${response.status} ${errorText}`);
308
+ throw new Error(`用友AI服务返回错误: ${response.status} ${errorText}`);
309
+ }
310
+ const responseText = await response.text();
311
+ const result = JSON.parse(responseText);
312
+ const timeCost = Date.now() - startTime;
313
+ debugProfileStats(`用友模型调用完成,耗时: ${timeCost}ms, token使用: ${result.usage?.total_tokens || 'unknown'}`);
314
+ debugProfileDetail(`用友模型使用详情: ${JSON.stringify(result.usage)}`);
315
+ const content = result.result?.content || '';
316
+ debugCall(`用友AI服务响应内容: ${content}`);
317
+ const usage = {
318
+ prompt_tokens: result.usage?.prompt_tokens || 0,
319
+ completion_tokens: result.usage?.completion_tokens || 0,
320
+ total_tokens: result.usage?.total_tokens || 0,
321
+ cached_input: 0,
322
+ time_cost: timeCost,
323
+ model_name: yhtConfig.model,
324
+ model_description: `${yhtConfig.model} mode`,
325
+ intent: 'default',
326
+ request_id: result._request_id
327
+ };
328
+ const callAI_new = {
329
+ content: result.result?.content || '',
330
+ reasoning_content: result.result?.reasoning_content || '',
331
+ usage,
332
+ isStreamed: false
333
+ };
334
+ return callAI_new;
335
+ } catch (e) {
336
+ if ('AbortError' === e.name) throw new Error('请求取消');
337
+ console.error('用友AI调用错误:', e);
338
+ throw e;
339
+ }
340
+ }
341
+ async function callAI(messages, modelConfig, options) {
342
+ if (modelConfig.yht_access_token) return await yhtCallAI(messages, modelConfig, options);
343
+ if ((0, external_codex_app_server_js_namespaceObject.isCodexAppServerProvider)(modelConfig.openaiBaseURL)) return (0, external_codex_app_server_js_namespaceObject.callAIWithCodexAppServer)(messages, modelConfig, options);
344
+ const { completion, modelName, modelDescription, uiTarsModelVersion, modelFamily } = await createChatClient({
345
+ modelConfig
346
+ });
347
+ const effectiveTimeoutMs = (0, external_request_timeout_js_namespaceObject.resolveEffectiveTimeoutMs)(modelConfig);
348
+ const extraBody = modelConfig.extraBody;
349
+ const maxTokens = env_namespaceObject.globalConfigManager.getEnvConfigValueAsNumber(env_namespaceObject.MIDSCENE_MODEL_MAX_TOKENS) ?? env_namespaceObject.globalConfigManager.getEnvConfigValueAsNumber(env_namespaceObject.OPENAI_MAX_TOKENS);
350
+ const debugCall = (0, logger_namespaceObject.getDebug)('ai:call');
351
+ const warnCall = (0, logger_namespaceObject.getDebug)('ai:call', {
352
+ console: true
353
+ });
354
+ const debugProfileStats = (0, logger_namespaceObject.getDebug)('ai:profile:stats');
355
+ const debugProfileDetail = (0, logger_namespaceObject.getDebug)('ai:profile:detail');
356
+ const startTime = Date.now();
357
+ const temperature = (()=>{
358
+ if ('gpt-5' === modelFamily) return void debugCall('temperature is ignored for gpt-5');
359
+ return modelConfig.temperature ?? 0;
360
+ })();
361
+ const isStreaming = options?.stream && options?.onChunk;
362
+ let content;
363
+ let accumulated = '';
364
+ let accumulatedReasoning = '';
365
+ let usage;
366
+ let timeCost;
367
+ let requestId;
368
+ const hasUsableText = (value)=>'string' == typeof value && value.trim().length > 0;
369
+ const buildUsageInfo = (usageData, requestId)=>{
370
+ if (!usageData) return;
371
+ const cachedInputTokens = usageData?.prompt_tokens_details?.cached_tokens;
372
+ return {
373
+ prompt_tokens: usageData.prompt_tokens ?? 0,
374
+ completion_tokens: usageData.completion_tokens ?? 0,
375
+ total_tokens: usageData.total_tokens ?? 0,
376
+ cached_input: cachedInputTokens ?? 0,
377
+ time_cost: timeCost ?? 0,
378
+ model_name: modelName,
379
+ model_description: modelDescription,
380
+ intent: modelConfig.intent,
381
+ request_id: requestId ?? void 0
382
+ };
383
+ };
384
+ const commonConfig = {
385
+ temperature,
386
+ stream: !!isStreaming,
387
+ max_tokens: maxTokens,
388
+ ...'qwen2.5-vl' === modelFamily ? {
389
+ vl_high_resolution_images: true
390
+ } : {}
391
+ };
392
+ if ((0, util_js_namespaceObject.isAutoGLM)(modelFamily)) {
393
+ commonConfig.top_p = 0.85;
394
+ commonConfig.frequency_penalty = 0.2;
395
+ }
396
+ const mergedEnableReasoning = (()=>{
397
+ const normalizedDeepThink = options?.deepThink === 'unset' ? void 0 : options?.deepThink;
398
+ if (true === normalizedDeepThink) return true;
399
+ if (false === normalizedDeepThink) return false;
400
+ return modelConfig.reasoningEnabled;
401
+ })();
402
+ const { config: reasoningEffortConfig, debugMessage: reasoningEffortDebugMessage, warningMessage } = resolveReasoningConfig({
403
+ reasoningEnabled: mergedEnableReasoning,
404
+ reasoningEffort: modelConfig.reasoningEffort,
405
+ reasoningBudget: modelConfig.reasoningBudget,
406
+ modelFamily
407
+ });
408
+ if (reasoningEffortDebugMessage) debugCall(reasoningEffortDebugMessage);
409
+ if (warningMessage) warnCall(warningMessage);
410
+ const shouldUseOriginalImageDetail = (0, external_image_detail_js_namespaceObject.shouldForceOriginalImageDetail)(modelConfig);
411
+ const messagesWithImageDetail = (()=>{
412
+ if (!shouldUseOriginalImageDetail) return messages;
413
+ return messages.map((msg)=>{
414
+ if (!Array.isArray(msg.content)) return msg;
415
+ const content = msg.content.map((part)=>{
416
+ if (part && 'image_url' === part.type && part.image_url?.url) return {
417
+ ...part,
418
+ image_url: {
419
+ ...part.image_url,
420
+ detail: 'original'
421
+ }
422
+ };
423
+ return part;
424
+ });
425
+ return {
426
+ ...msg,
427
+ content
428
+ };
429
+ });
430
+ })();
431
+ try {
432
+ debugCall(`sending ${isStreaming ? 'streaming ' : ''}request to ${modelName}`);
433
+ if (isStreaming) {
434
+ const { signal: streamSignal, cleanup: cleanupStreamSignal } = (0, external_request_timeout_js_namespaceObject.buildRequestAbortSignal)(effectiveTimeoutMs, options?.abortSignal);
435
+ try {
436
+ const stream = await completion.create({
437
+ model: modelName,
438
+ messages: messagesWithImageDetail,
439
+ ...commonConfig,
440
+ ...reasoningEffortConfig,
441
+ ...extraBody
442
+ }, {
443
+ stream: true,
444
+ signal: streamSignal
445
+ });
446
+ requestId = stream._request_id;
447
+ for await (const chunk of stream){
448
+ const content = chunk.choices?.[0]?.delta?.content || '';
449
+ const reasoning_content = chunk.choices?.[0]?.delta?.reasoning_content || '';
450
+ if (chunk.usage) usage = chunk.usage;
451
+ if (content || reasoning_content) {
452
+ accumulated += content;
453
+ accumulatedReasoning += reasoning_content;
454
+ const chunkData = {
455
+ content,
456
+ reasoning_content,
457
+ accumulated,
458
+ isComplete: false,
459
+ usage: void 0
460
+ };
461
+ options.onChunk(chunkData);
462
+ }
463
+ if (chunk.choices?.[0]?.finish_reason) {
464
+ timeCost = Date.now() - startTime;
465
+ if (!usage) {
466
+ const estimatedTokens = Math.max(1, Math.floor(accumulated.length / 4));
467
+ usage = {
468
+ prompt_tokens: estimatedTokens,
469
+ completion_tokens: estimatedTokens,
470
+ total_tokens: 2 * estimatedTokens
471
+ };
472
+ }
473
+ const finalChunk = {
474
+ content: '',
475
+ accumulated,
476
+ reasoning_content: '',
477
+ isComplete: true,
478
+ usage: buildUsageInfo(usage, requestId)
479
+ };
480
+ options.onChunk(finalChunk);
481
+ break;
482
+ }
483
+ }
484
+ } finally{
485
+ cleanupStreamSignal();
486
+ }
487
+ content = accumulated;
488
+ debugProfileStats(`streaming model, ${modelName}, mode, ${modelFamily || 'default'}, cost-ms, ${timeCost}, temperature, ${temperature ?? ''}`);
489
+ } else {
490
+ const retryCount = modelConfig.retryCount ?? 1;
491
+ const retryInterval = modelConfig.retryInterval ?? 2000;
492
+ const maxAttempts = retryCount + 1;
493
+ let lastError;
494
+ for(let attempt = 1; attempt <= maxAttempts; attempt++){
495
+ const { signal: attemptSignal, cleanup: cleanupAttemptSignal } = (0, external_request_timeout_js_namespaceObject.buildRequestAbortSignal)(effectiveTimeoutMs, options?.abortSignal);
496
+ try {
497
+ const result = await completion.create({
498
+ model: modelName,
499
+ messages: messagesWithImageDetail,
500
+ ...commonConfig,
501
+ ...reasoningEffortConfig,
502
+ ...extraBody
503
+ }, {
504
+ signal: attemptSignal
505
+ });
506
+ timeCost = Date.now() - startTime;
507
+ debugProfileStats(`model, ${modelName}, mode, ${modelFamily || 'default'}, ui-tars-version, ${uiTarsModelVersion}, prompt-tokens, ${result.usage?.prompt_tokens || ''}, completion-tokens, ${result.usage?.completion_tokens || ''}, total-tokens, ${result.usage?.total_tokens || ''}, cost-ms, ${timeCost}, requestId, ${result._request_id || ''}, temperature, ${temperature ?? ''}`);
508
+ debugProfileDetail(`model usage detail: ${JSON.stringify(result.usage)}`);
509
+ if (!result.choices) throw new Error(`invalid response from LLM service: ${JSON.stringify(result)}`);
510
+ content = result.choices[0].message.content;
511
+ accumulatedReasoning = result.choices[0].message?.reasoning_content || '';
512
+ usage = result.usage;
513
+ requestId = result._request_id;
514
+ if (!hasUsableText(content) && hasUsableText(accumulatedReasoning)) {
515
+ warnCall('empty content from AI model, using reasoning content');
516
+ content = accumulatedReasoning;
517
+ }
518
+ if (!hasUsableText(content)) throw new AIResponseParseError('empty content from AI model', JSON.stringify(result), buildUsageInfo(usage, requestId));
519
+ break;
520
+ } catch (error) {
521
+ lastError = error;
522
+ const wasHardTimeout = (0, external_request_timeout_js_namespaceObject.isHardTimeoutError)(lastError);
523
+ if (wasHardTimeout) warnCall(`AI call hit hard timeout (${effectiveTimeoutMs}ms, attempt ${attempt}/${maxAttempts}, model ${modelName}, intent ${modelConfig.intent})`);
524
+ if (options?.abortSignal?.aborted) break;
525
+ if (attempt < maxAttempts) {
526
+ warnCall(`AI call failed (attempt ${attempt}/${maxAttempts}), retrying in ${retryInterval}ms... Error: ${lastError.message}`);
527
+ await new Promise((resolve)=>setTimeout(resolve, retryInterval));
528
+ }
529
+ } finally{
530
+ cleanupAttemptSignal();
531
+ }
532
+ }
533
+ if (!content) throw lastError;
534
+ }
535
+ debugCall(`response reasoning content: ${accumulatedReasoning}`);
536
+ debugCall(`response content: ${content}`);
537
+ if (isStreaming && !usage) {
538
+ const estimatedTokens = Math.max(1, Math.floor((content || '').length / 4));
539
+ usage = {
540
+ prompt_tokens: estimatedTokens,
541
+ completion_tokens: estimatedTokens,
542
+ total_tokens: 2 * estimatedTokens
543
+ };
544
+ }
545
+ return {
546
+ content: content || '',
547
+ reasoning_content: accumulatedReasoning || void 0,
548
+ usage: buildUsageInfo(usage, requestId),
549
+ isStreamed: !!isStreaming
550
+ };
551
+ } catch (e) {
552
+ warnCall('call AI error', e);
553
+ if (e instanceof AIResponseParseError) throw e;
554
+ const newError = new Error(`failed to call ${isStreaming ? 'streaming ' : ''}AI model service (${modelName}): ${e.message}\nTrouble shooting: https://midscenejs.com/model-provider.html`, {
555
+ cause: e
556
+ });
557
+ throw newError;
558
+ }
559
+ }
560
+ async function callAIWithObjectResponse(messages, modelConfig, options) {
561
+ const response = await callAI(messages, modelConfig, {
562
+ deepThink: options?.deepThink,
563
+ abortSignal: options?.abortSignal
564
+ });
565
+ (0, utils_namespaceObject.assert)(response, 'empty response');
566
+ const modelFamily = modelConfig.modelFamily;
567
+ const jsonContent = safeParseJson(response.content, modelFamily);
568
+ if ('object' != typeof jsonContent) throw new AIResponseParseError(`failed to parse json response from model (${modelConfig.modelName}): ${response.content}`, response.content, response.usage);
569
+ return {
570
+ content: jsonContent,
571
+ contentString: response.content,
572
+ usage: response.usage,
573
+ reasoning_content: response.reasoning_content
574
+ };
575
+ }
576
+ async function callAIWithStringResponse(msgs, modelConfig, options) {
577
+ const { content, usage } = await callAI(msgs, modelConfig, {
578
+ abortSignal: options?.abortSignal
579
+ });
580
+ return {
581
+ content,
582
+ usage
583
+ };
584
+ }
585
+ function extractJSONFromCodeBlock(response) {
586
+ try {
587
+ const jsonMatch = response.match(/^\s*(\{[\s\S]*\})\s*$/);
588
+ if (jsonMatch) return jsonMatch[1];
589
+ const codeBlockMatch = response.match(/```(?:json)?\s*(\{[\s\S]*?\})\s*```/);
590
+ if (codeBlockMatch) return codeBlockMatch[1];
591
+ const jsonLikeMatch = response.match(/\{[\s\S]*\}/);
592
+ if (jsonLikeMatch) return jsonLikeMatch[0];
593
+ } catch {}
594
+ return response;
595
+ }
596
+ function preprocessDoubaoBboxJson(input) {
597
+ if (input.includes('bbox')) while(/\d+\s+\d+/.test(input))input = input.replace(/(\d+)\s+(\d+)/g, '$1,$2');
598
+ return input;
599
+ }
600
+ function resolveReasoningConfig({ reasoningEnabled, reasoningEffort, reasoningBudget, modelFamily }) {
601
+ if (void 0 === reasoningEnabled && !reasoningEffort && void 0 === reasoningBudget) return {
602
+ config: {}
603
+ };
604
+ const debugMessages = [];
605
+ const config = {};
606
+ if ('qwen3-vl' === modelFamily || 'qwen3.5' === modelFamily || 'qwen3.6' === modelFamily) {
607
+ if (void 0 !== reasoningEnabled) {
608
+ config.enable_thinking = reasoningEnabled;
609
+ debugMessages.push(`enable_thinking=${reasoningEnabled}`);
610
+ }
611
+ if (void 0 !== reasoningBudget) {
612
+ config.thinking_budget = reasoningBudget;
613
+ debugMessages.push(`thinking_budget=${reasoningBudget}`);
614
+ }
615
+ } else if ('doubao-vision' === modelFamily || 'doubao-seed' === modelFamily) {
616
+ if (void 0 !== reasoningEnabled) {
617
+ config.thinking = {
618
+ type: reasoningEnabled ? 'enabled' : 'disabled'
619
+ };
620
+ debugMessages.push(`thinking.type=${reasoningEnabled ? 'enabled' : 'disabled'}`);
621
+ }
622
+ if (reasoningEffort) {
623
+ config.reasoning_effort = reasoningEffort;
624
+ debugMessages.push(`reasoning_effort="${reasoningEffort}"`);
625
+ }
626
+ } else if ('glm-v' === modelFamily) {
627
+ if (void 0 !== reasoningEnabled) {
628
+ config.thinking = {
629
+ type: reasoningEnabled ? 'enabled' : 'disabled'
630
+ };
631
+ debugMessages.push(`thinking.type=${reasoningEnabled ? 'enabled' : 'disabled'}`);
632
+ }
633
+ } else if ('gpt-5' === modelFamily) {
634
+ config.reasoning = void 0;
635
+ debugMessages.push('reasoning config is ignored for gpt-5');
636
+ } else if (!modelFamily) return {
637
+ config: {},
638
+ debugMessage: 'reasoning config ignored: no model_family configured',
639
+ warningMessage: 'Reasoning config is set but no model_family is configured. Set MIDSCENE_MODEL_FAMILY to enable reasoning config pass-through.'
640
+ };
641
+ else if (reasoningEffort) {
642
+ config.reasoning_effort = reasoningEffort;
643
+ debugMessages.push(`reasoning_effort="${reasoningEffort}"`);
644
+ }
645
+ return {
646
+ config,
647
+ debugMessage: debugMessages.length ? `reasoning config for ${modelFamily}: ${debugMessages.join(', ')}` : void 0
648
+ };
649
+ }
650
+ function normalizeJsonObject(obj) {
651
+ if (null == obj) return obj;
652
+ if (Array.isArray(obj)) return obj.map((item)=>normalizeJsonObject(item));
653
+ if ('object' == typeof obj) {
654
+ const normalized = {};
655
+ for (const [key, value] of Object.entries(obj)){
656
+ const trimmedKey = key.trim();
657
+ let normalizedValue = normalizeJsonObject(value);
658
+ if ('string' == typeof normalizedValue) normalizedValue = normalizedValue.trim();
659
+ normalized[trimmedKey] = normalizedValue;
660
+ }
661
+ return normalized;
662
+ }
663
+ if ('string' == typeof obj) return obj.trim();
664
+ return obj;
665
+ }
666
+ function safeParseJson(input, modelFamily) {
667
+ const cleanJsonString = extractJSONFromCodeBlock(input);
668
+ if (cleanJsonString?.match(/\((\d+),(\d+)\)/)) return cleanJsonString.match(/\((\d+),(\d+)\)/)?.slice(1).map(Number);
669
+ let parsed;
670
+ let lastError;
671
+ try {
672
+ parsed = JSON.parse(cleanJsonString);
673
+ return normalizeJsonObject(parsed);
674
+ } catch (error) {
675
+ lastError = error;
676
+ }
677
+ try {
678
+ parsed = JSON.parse((0, external_jsonrepair_namespaceObject.jsonrepair)(cleanJsonString));
679
+ return normalizeJsonObject(parsed);
680
+ } catch (error) {
681
+ lastError = error;
682
+ }
683
+ if ('doubao-vision' === modelFamily || 'doubao-seed' === modelFamily || (0, util_js_namespaceObject.isUITars)(modelFamily)) {
684
+ const jsonString = preprocessDoubaoBboxJson(cleanJsonString);
685
+ try {
686
+ parsed = JSON.parse((0, external_jsonrepair_namespaceObject.jsonrepair)(jsonString));
687
+ return normalizeJsonObject(parsed);
688
+ } catch (error) {
689
+ lastError = error;
690
+ }
691
+ }
692
+ throw Error(`failed to parse LLM response into JSON. Error - ${String(lastError ?? 'unknown error')}. Response - \n ${input}`);
693
+ }
694
+ exports.AIResponseParseError = __webpack_exports__.AIResponseParseError;
695
+ exports.callAI = __webpack_exports__.callAI;
696
+ exports.callAIWithObjectResponse = __webpack_exports__.callAIWithObjectResponse;
697
+ exports.callAIWithStringResponse = __webpack_exports__.callAIWithStringResponse;
698
+ exports.extractJSONFromCodeBlock = __webpack_exports__.extractJSONFromCodeBlock;
699
+ exports.preprocessDoubaoBboxJson = __webpack_exports__.preprocessDoubaoBboxJson;
700
+ exports.resolveReasoningConfig = __webpack_exports__.resolveReasoningConfig;
701
+ exports.safeParseJson = __webpack_exports__.safeParseJson;
702
+ exports.yhtCallAI = __webpack_exports__.yhtCallAI;
703
+ for(var __rspack_i in __webpack_exports__)if (-1 === [
704
+ "AIResponseParseError",
705
+ "callAI",
706
+ "callAIWithObjectResponse",
707
+ "callAIWithStringResponse",
708
+ "extractJSONFromCodeBlock",
709
+ "preprocessDoubaoBboxJson",
710
+ "resolveReasoningConfig",
711
+ "safeParseJson",
712
+ "yhtCallAI"
713
+ ].indexOf(__rspack_i)) exports[__rspack_i] = __webpack_exports__[__rspack_i];
714
+ Object.defineProperty(exports, '__esModule', {
715
+ value: true
716
+ });