@godscene/core 1.7.11
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +9 -0
- package/dist/es/agent/agent.mjs +767 -0
- package/dist/es/agent/common.mjs +0 -0
- package/dist/es/agent/execution-session.mjs +39 -0
- package/dist/es/agent/index.mjs +6 -0
- package/dist/es/agent/task-builder.mjs +343 -0
- package/dist/es/agent/task-cache.mjs +212 -0
- package/dist/es/agent/tasks.mjs +428 -0
- package/dist/es/agent/ui-utils.mjs +101 -0
- package/dist/es/agent/utils.mjs +167 -0
- package/dist/es/ai-model/auto-glm/actions.mjs +237 -0
- package/dist/es/ai-model/auto-glm/index.mjs +6 -0
- package/dist/es/ai-model/auto-glm/parser.mjs +237 -0
- package/dist/es/ai-model/auto-glm/planning.mjs +69 -0
- package/dist/es/ai-model/auto-glm/prompt.mjs +220 -0
- package/dist/es/ai-model/auto-glm/util.mjs +7 -0
- package/dist/es/ai-model/connectivity.mjs +136 -0
- package/dist/es/ai-model/conversation-history.mjs +193 -0
- package/dist/es/ai-model/index.mjs +12 -0
- package/dist/es/ai-model/inspect.mjs +395 -0
- package/dist/es/ai-model/llm-planning.mjs +231 -0
- package/dist/es/ai-model/prompt/common.mjs +5 -0
- package/dist/es/ai-model/prompt/describe.mjs +64 -0
- package/dist/es/ai-model/prompt/extraction.mjs +129 -0
- package/dist/es/ai-model/prompt/llm-locator.mjs +49 -0
- package/dist/es/ai-model/prompt/llm-planning.mjs +584 -0
- package/dist/es/ai-model/prompt/llm-section-locator.mjs +42 -0
- package/dist/es/ai-model/prompt/order-sensitive-judge.mjs +33 -0
- package/dist/es/ai-model/prompt/playwright-generator.mjs +115 -0
- package/dist/es/ai-model/prompt/ui-tars-planning.mjs +34 -0
- package/dist/es/ai-model/prompt/util.mjs +57 -0
- package/dist/es/ai-model/prompt/yaml-generator.mjs +201 -0
- package/dist/es/ai-model/service-caller/codex-app-server.mjs +573 -0
- package/dist/es/ai-model/service-caller/image-detail.mjs +4 -0
- package/dist/es/ai-model/service-caller/index.mjs +648 -0
- package/dist/es/ai-model/service-caller/request-timeout.mjs +47 -0
- package/dist/es/ai-model/ui-tars-planning.mjs +247 -0
- package/dist/es/common.mjs +382 -0
- package/dist/es/device/device-options.mjs +0 -0
- package/dist/es/device/index.mjs +340 -0
- package/dist/es/dump/html-utils.mjs +290 -0
- package/dist/es/dump/index.mjs +3 -0
- package/dist/es/dump/screenshot-restoration.mjs +30 -0
- package/dist/es/dump/screenshot-store.mjs +125 -0
- package/dist/es/index.mjs +17 -0
- package/dist/es/report-cli.mjs +149 -0
- package/dist/es/report-generator.mjs +203 -0
- package/dist/es/report-markdown.mjs +216 -0
- package/dist/es/report.mjs +287 -0
- package/dist/es/screenshot-item.mjs +120 -0
- package/dist/es/service/index.mjs +272 -0
- package/dist/es/service/utils.mjs +13 -0
- package/dist/es/skill/index.mjs +35 -0
- package/dist/es/task-runner.mjs +261 -0
- package/dist/es/task-timing.mjs +10 -0
- package/dist/es/tree.mjs +11 -0
- package/dist/es/types.mjs +202 -0
- package/dist/es/utils.mjs +232 -0
- package/dist/es/yaml/builder.mjs +11 -0
- package/dist/es/yaml/index.mjs +4 -0
- package/dist/es/yaml/player.mjs +425 -0
- package/dist/es/yaml/utils.mjs +100 -0
- package/dist/es/yaml.mjs +0 -0
- package/dist/lib/agent/agent.js +815 -0
- package/dist/lib/agent/common.js +5 -0
- package/dist/lib/agent/execution-session.js +73 -0
- package/dist/lib/agent/index.js +76 -0
- package/dist/lib/agent/task-builder.js +380 -0
- package/dist/lib/agent/task-cache.js +264 -0
- package/dist/lib/agent/tasks.js +471 -0
- package/dist/lib/agent/ui-utils.js +153 -0
- package/dist/lib/agent/utils.js +238 -0
- package/dist/lib/ai-model/auto-glm/actions.js +271 -0
- package/dist/lib/ai-model/auto-glm/index.js +64 -0
- package/dist/lib/ai-model/auto-glm/parser.js +280 -0
- package/dist/lib/ai-model/auto-glm/planning.js +103 -0
- package/dist/lib/ai-model/auto-glm/prompt.js +257 -0
- package/dist/lib/ai-model/auto-glm/util.js +44 -0
- package/dist/lib/ai-model/connectivity.js +180 -0
- package/dist/lib/ai-model/conversation-history.js +227 -0
- package/dist/lib/ai-model/index.js +127 -0
- package/dist/lib/ai-model/inspect.js +441 -0
- package/dist/lib/ai-model/llm-planning.js +268 -0
- package/dist/lib/ai-model/prompt/common.js +39 -0
- package/dist/lib/ai-model/prompt/describe.js +98 -0
- package/dist/lib/ai-model/prompt/extraction.js +169 -0
- package/dist/lib/ai-model/prompt/llm-locator.js +86 -0
- package/dist/lib/ai-model/prompt/llm-planning.js +621 -0
- package/dist/lib/ai-model/prompt/llm-section-locator.js +79 -0
- package/dist/lib/ai-model/prompt/order-sensitive-judge.js +70 -0
- package/dist/lib/ai-model/prompt/playwright-generator.js +176 -0
- package/dist/lib/ai-model/prompt/ui-tars-planning.js +71 -0
- package/dist/lib/ai-model/prompt/util.js +103 -0
- package/dist/lib/ai-model/prompt/yaml-generator.js +262 -0
- package/dist/lib/ai-model/service-caller/codex-app-server.js +622 -0
- package/dist/lib/ai-model/service-caller/image-detail.js +38 -0
- package/dist/lib/ai-model/service-caller/index.js +716 -0
- package/dist/lib/ai-model/service-caller/request-timeout.js +93 -0
- package/dist/lib/ai-model/ui-tars-planning.js +281 -0
- package/dist/lib/common.js +491 -0
- package/dist/lib/device/device-options.js +18 -0
- package/dist/lib/device/index.js +467 -0
- package/dist/lib/dump/html-utils.js +366 -0
- package/dist/lib/dump/index.js +58 -0
- package/dist/lib/dump/screenshot-restoration.js +64 -0
- package/dist/lib/dump/screenshot-store.js +165 -0
- package/dist/lib/index.js +184 -0
- package/dist/lib/report-cli.js +189 -0
- package/dist/lib/report-generator.js +244 -0
- package/dist/lib/report-markdown.js +253 -0
- package/dist/lib/report.js +333 -0
- package/dist/lib/screenshot-item.js +154 -0
- package/dist/lib/service/index.js +306 -0
- package/dist/lib/service/utils.js +47 -0
- package/dist/lib/skill/index.js +69 -0
- package/dist/lib/task-runner.js +298 -0
- package/dist/lib/task-timing.js +44 -0
- package/dist/lib/tree.js +51 -0
- package/dist/lib/types.js +298 -0
- package/dist/lib/utils.js +314 -0
- package/dist/lib/yaml/builder.js +55 -0
- package/dist/lib/yaml/index.js +79 -0
- package/dist/lib/yaml/player.js +459 -0
- package/dist/lib/yaml/utils.js +153 -0
- package/dist/lib/yaml.js +18 -0
- package/dist/types/agent/agent.d.ts +220 -0
- package/dist/types/agent/common.d.ts +0 -0
- package/dist/types/agent/execution-session.d.ts +36 -0
- package/dist/types/agent/index.d.ts +9 -0
- package/dist/types/agent/task-builder.d.ts +34 -0
- package/dist/types/agent/task-cache.d.ts +49 -0
- package/dist/types/agent/tasks.d.ts +70 -0
- package/dist/types/agent/ui-utils.d.ts +14 -0
- package/dist/types/agent/utils.d.ts +25 -0
- package/dist/types/ai-model/auto-glm/actions.d.ts +78 -0
- package/dist/types/ai-model/auto-glm/index.d.ts +6 -0
- package/dist/types/ai-model/auto-glm/parser.d.ts +18 -0
- package/dist/types/ai-model/auto-glm/planning.d.ts +12 -0
- package/dist/types/ai-model/auto-glm/prompt.d.ts +27 -0
- package/dist/types/ai-model/auto-glm/util.d.ts +13 -0
- package/dist/types/ai-model/connectivity.d.ts +20 -0
- package/dist/types/ai-model/conversation-history.d.ts +105 -0
- package/dist/types/ai-model/index.d.ts +16 -0
- package/dist/types/ai-model/inspect.d.ts +67 -0
- package/dist/types/ai-model/llm-planning.d.ts +19 -0
- package/dist/types/ai-model/prompt/common.d.ts +2 -0
- package/dist/types/ai-model/prompt/describe.d.ts +1 -0
- package/dist/types/ai-model/prompt/extraction.d.ts +7 -0
- package/dist/types/ai-model/prompt/llm-locator.d.ts +3 -0
- package/dist/types/ai-model/prompt/llm-planning.d.ts +10 -0
- package/dist/types/ai-model/prompt/llm-section-locator.d.ts +3 -0
- package/dist/types/ai-model/prompt/order-sensitive-judge.d.ts +2 -0
- package/dist/types/ai-model/prompt/playwright-generator.d.ts +26 -0
- package/dist/types/ai-model/prompt/ui-tars-planning.d.ts +2 -0
- package/dist/types/ai-model/prompt/util.d.ts +33 -0
- package/dist/types/ai-model/prompt/yaml-generator.d.ts +102 -0
- package/dist/types/ai-model/service-caller/codex-app-server.d.ts +42 -0
- package/dist/types/ai-model/service-caller/image-detail.d.ts +2 -0
- package/dist/types/ai-model/service-caller/index.d.ts +60 -0
- package/dist/types/ai-model/service-caller/request-timeout.d.ts +32 -0
- package/dist/types/ai-model/ui-tars-planning.d.ts +72 -0
- package/dist/types/common.d.ts +288 -0
- package/dist/types/device/device-options.d.ts +155 -0
- package/dist/types/device/index.d.ts +2565 -0
- package/dist/types/dump/html-utils.d.ts +75 -0
- package/dist/types/dump/index.d.ts +5 -0
- package/dist/types/dump/screenshot-restoration.d.ts +8 -0
- package/dist/types/dump/screenshot-store.d.ts +49 -0
- package/dist/types/index.d.ts +21 -0
- package/dist/types/report-cli.d.ts +36 -0
- package/dist/types/report-generator.d.ts +88 -0
- package/dist/types/report-markdown.d.ts +24 -0
- package/dist/types/report.d.ts +52 -0
- package/dist/types/screenshot-item.d.ts +67 -0
- package/dist/types/service/index.d.ts +24 -0
- package/dist/types/service/utils.d.ts +2 -0
- package/dist/types/skill/index.d.ts +25 -0
- package/dist/types/task-runner.d.ts +50 -0
- package/dist/types/task-timing.d.ts +8 -0
- package/dist/types/tree.d.ts +4 -0
- package/dist/types/types.d.ts +684 -0
- package/dist/types/utils.d.ts +45 -0
- package/dist/types/yaml/builder.d.ts +2 -0
- package/dist/types/yaml/index.d.ts +4 -0
- package/dist/types/yaml/player.d.ts +34 -0
- package/dist/types/yaml/utils.d.ts +9 -0
- package/dist/types/yaml.d.ts +215 -0
- package/package.json +130 -0
|
@@ -0,0 +1,648 @@
|
|
|
1
|
+
import { MIDSCENE_LANGFUSE_DEBUG, MIDSCENE_LANGSMITH_DEBUG, MIDSCENE_MODEL_MAX_TOKENS, OPENAI_MAX_TOKENS, globalConfigManager } from "@godscene/shared/env";
|
|
2
|
+
import { getDebug } from "@godscene/shared/logger";
|
|
3
|
+
import { assert, ifInBrowser } from "@godscene/shared/utils";
|
|
4
|
+
import { jsonrepair } from "jsonrepair";
|
|
5
|
+
import openai_0 from "openai";
|
|
6
|
+
import { isAutoGLM, isUITars } from "../auto-glm/util.mjs";
|
|
7
|
+
import { callAIWithCodexAppServer, isCodexAppServerProvider } from "./codex-app-server.mjs";
|
|
8
|
+
import { shouldForceOriginalImageDetail } from "./image-detail.mjs";
|
|
9
|
+
import { buildRequestAbortSignal, isHardTimeoutError, resolveEffectiveTimeoutMs } from "./request-timeout.mjs";
|
|
10
|
+
function _define_property(obj, key, value) {
|
|
11
|
+
if (key in obj) Object.defineProperty(obj, key, {
|
|
12
|
+
value: value,
|
|
13
|
+
enumerable: true,
|
|
14
|
+
configurable: true,
|
|
15
|
+
writable: true
|
|
16
|
+
});
|
|
17
|
+
else obj[key] = value;
|
|
18
|
+
return obj;
|
|
19
|
+
}
|
|
20
|
+
class AIResponseParseError extends Error {
|
|
21
|
+
constructor(message, rawResponse, usage){
|
|
22
|
+
super(message), _define_property(this, "usage", void 0), _define_property(this, "rawResponse", void 0);
|
|
23
|
+
this.name = 'AIResponseParseError';
|
|
24
|
+
this.rawResponse = rawResponse;
|
|
25
|
+
this.usage = usage;
|
|
26
|
+
}
|
|
27
|
+
}
|
|
28
|
+
const defaultYhtConfig = {
|
|
29
|
+
domain: 'APILink',
|
|
30
|
+
yht_access_token: '',
|
|
31
|
+
chatType: 14,
|
|
32
|
+
model: 'doubao-seed-1-6-vision-250815',
|
|
33
|
+
modelCategory: 2,
|
|
34
|
+
stream: 0,
|
|
35
|
+
temperature: 0.01,
|
|
36
|
+
top_p: 0.7,
|
|
37
|
+
baseURL: 'https://c2.yonyoucloud.com/iuap-aip-service/report/rest/api/aiService/chat',
|
|
38
|
+
traceId: '',
|
|
39
|
+
extraParams: {},
|
|
40
|
+
topic: ''
|
|
41
|
+
};
|
|
42
|
+
async function createChatClient({ modelConfig }) {
|
|
43
|
+
const { socksProxy, httpProxy, modelName, openaiBaseURL, openaiApiKey, openaiExtraConfig, modelDescription, uiTarsModelVersion, modelFamily, createOpenAIClient, timeout } = modelConfig;
|
|
44
|
+
let proxyAgent;
|
|
45
|
+
const warnClient = getDebug('ai:call', {
|
|
46
|
+
console: true
|
|
47
|
+
});
|
|
48
|
+
const debugProxy = getDebug('ai:call:proxy');
|
|
49
|
+
const warnProxy = getDebug('ai:call:proxy', {
|
|
50
|
+
console: true
|
|
51
|
+
});
|
|
52
|
+
const sanitizeProxyUrl = (url)=>{
|
|
53
|
+
try {
|
|
54
|
+
const parsed = new URL(url);
|
|
55
|
+
if (parsed.username) {
|
|
56
|
+
parsed.password = '****';
|
|
57
|
+
return parsed.href;
|
|
58
|
+
}
|
|
59
|
+
return url;
|
|
60
|
+
} catch {
|
|
61
|
+
return url;
|
|
62
|
+
}
|
|
63
|
+
};
|
|
64
|
+
if (httpProxy) {
|
|
65
|
+
debugProxy('using http proxy', sanitizeProxyUrl(httpProxy));
|
|
66
|
+
if (ifInBrowser) warnProxy('HTTP proxy is configured but not supported in browser environment');
|
|
67
|
+
else {
|
|
68
|
+
const moduleName = 'undici';
|
|
69
|
+
const { ProxyAgent } = await import(moduleName);
|
|
70
|
+
proxyAgent = new ProxyAgent({
|
|
71
|
+
uri: httpProxy
|
|
72
|
+
});
|
|
73
|
+
}
|
|
74
|
+
} else if (socksProxy) {
|
|
75
|
+
debugProxy('using socks proxy', sanitizeProxyUrl(socksProxy));
|
|
76
|
+
if (ifInBrowser) warnProxy('SOCKS proxy is configured but not supported in browser environment');
|
|
77
|
+
else try {
|
|
78
|
+
const moduleName = 'fetch-socks';
|
|
79
|
+
const { socksDispatcher } = await import(moduleName);
|
|
80
|
+
const proxyUrl = new URL(socksProxy);
|
|
81
|
+
if (!proxyUrl.hostname) throw new Error('SOCKS proxy URL must include a valid hostname');
|
|
82
|
+
const port = Number.parseInt(proxyUrl.port, 10);
|
|
83
|
+
if (!proxyUrl.port || Number.isNaN(port)) throw new Error('SOCKS proxy URL must include a valid port');
|
|
84
|
+
const protocol = proxyUrl.protocol.replace(':', '');
|
|
85
|
+
const socksType = 'socks4' === protocol ? 4 : 'socks5' === protocol ? 5 : 5;
|
|
86
|
+
proxyAgent = socksDispatcher({
|
|
87
|
+
type: socksType,
|
|
88
|
+
host: proxyUrl.hostname,
|
|
89
|
+
port,
|
|
90
|
+
...proxyUrl.username ? {
|
|
91
|
+
userId: decodeURIComponent(proxyUrl.username),
|
|
92
|
+
password: decodeURIComponent(proxyUrl.password || '')
|
|
93
|
+
} : {}
|
|
94
|
+
});
|
|
95
|
+
debugProxy('socks proxy configured successfully', {
|
|
96
|
+
type: socksType,
|
|
97
|
+
host: proxyUrl.hostname,
|
|
98
|
+
port: port
|
|
99
|
+
});
|
|
100
|
+
} catch (error) {
|
|
101
|
+
warnProxy('Failed to configure SOCKS proxy:', error);
|
|
102
|
+
throw new Error(`Invalid SOCKS proxy URL: ${socksProxy}. Expected format: socks4://host:port, socks5://host:port, or with authentication: socks5://user:pass@host:port`);
|
|
103
|
+
}
|
|
104
|
+
}
|
|
105
|
+
const effectiveTimeoutMs = resolveEffectiveTimeoutMs({
|
|
106
|
+
timeout
|
|
107
|
+
});
|
|
108
|
+
const openAIOptions = {
|
|
109
|
+
baseURL: openaiBaseURL,
|
|
110
|
+
apiKey: openaiApiKey,
|
|
111
|
+
...proxyAgent ? {
|
|
112
|
+
fetchOptions: {
|
|
113
|
+
dispatcher: proxyAgent
|
|
114
|
+
}
|
|
115
|
+
} : {},
|
|
116
|
+
...openaiExtraConfig,
|
|
117
|
+
maxRetries: 0,
|
|
118
|
+
...null !== effectiveTimeoutMs ? {
|
|
119
|
+
timeout: effectiveTimeoutMs
|
|
120
|
+
} : {},
|
|
121
|
+
dangerouslyAllowBrowser: true
|
|
122
|
+
};
|
|
123
|
+
const baseOpenAI = new openai_0(openAIOptions);
|
|
124
|
+
let openai = baseOpenAI;
|
|
125
|
+
if (openai && globalConfigManager.getEnvConfigInBoolean(MIDSCENE_LANGSMITH_DEBUG)) {
|
|
126
|
+
if (ifInBrowser) throw new Error('langsmith is not supported in browser');
|
|
127
|
+
warnClient('DEBUGGING MODE: langsmith wrapper enabled');
|
|
128
|
+
const langsmithModule = 'langsmith/wrappers';
|
|
129
|
+
const { wrapOpenAI } = await import(langsmithModule);
|
|
130
|
+
openai = wrapOpenAI(openai);
|
|
131
|
+
}
|
|
132
|
+
if (openai && globalConfigManager.getEnvConfigInBoolean(MIDSCENE_LANGFUSE_DEBUG)) {
|
|
133
|
+
if (ifInBrowser) throw new Error('langfuse is not supported in browser');
|
|
134
|
+
warnClient('DEBUGGING MODE: langfuse wrapper enabled');
|
|
135
|
+
const langfuseModule = '@langfuse/openai';
|
|
136
|
+
const { observeOpenAI } = await import(langfuseModule);
|
|
137
|
+
openai = observeOpenAI(openai);
|
|
138
|
+
}
|
|
139
|
+
if (createOpenAIClient) {
|
|
140
|
+
const wrappedClient = await createOpenAIClient(baseOpenAI, openAIOptions);
|
|
141
|
+
if (wrappedClient) openai = wrappedClient;
|
|
142
|
+
}
|
|
143
|
+
return {
|
|
144
|
+
completion: openai.chat.completions,
|
|
145
|
+
modelName,
|
|
146
|
+
modelDescription,
|
|
147
|
+
uiTarsModelVersion,
|
|
148
|
+
modelFamily
|
|
149
|
+
};
|
|
150
|
+
}
|
|
151
|
+
async function yhtCallAI(messages, modelConfig, options) {
|
|
152
|
+
const yhtConfig = {
|
|
153
|
+
domain: modelConfig.yht_domain || defaultYhtConfig.domain,
|
|
154
|
+
yht_access_token: modelConfig.yht_access_token || defaultYhtConfig.yht_access_token,
|
|
155
|
+
model: modelConfig.yht_model || defaultYhtConfig.model,
|
|
156
|
+
baseURL: modelConfig.yht_base_url || defaultYhtConfig.baseURL,
|
|
157
|
+
chatType: Number(modelConfig.yht_chat_type || defaultYhtConfig.chatType),
|
|
158
|
+
modelCategory: Number(modelConfig.yht_model_category || defaultYhtConfig.modelCategory),
|
|
159
|
+
stream: Number(modelConfig.yht_stream || defaultYhtConfig.stream),
|
|
160
|
+
temperature: Number(modelConfig.yht_temperature || defaultYhtConfig.temperature),
|
|
161
|
+
top_p: Number(modelConfig.yht_top_p || defaultYhtConfig.top_p),
|
|
162
|
+
traceId: modelConfig.yht_trace_id || defaultYhtConfig.traceId,
|
|
163
|
+
topic: defaultYhtConfig.topic,
|
|
164
|
+
extraParams: defaultYhtConfig.extraParams
|
|
165
|
+
};
|
|
166
|
+
const convertToYhtMessageFormat = (messages)=>messages.map((msg)=>{
|
|
167
|
+
const yhtMessage = {
|
|
168
|
+
role: 'system' === msg.role ? 'system' : 'user'
|
|
169
|
+
};
|
|
170
|
+
if ('string' == typeof msg.content) yhtMessage.vlContent = [
|
|
171
|
+
{
|
|
172
|
+
type: 'text',
|
|
173
|
+
text: msg.content
|
|
174
|
+
}
|
|
175
|
+
];
|
|
176
|
+
else if (Array.isArray(msg.content)) {
|
|
177
|
+
const textParts = msg.content.filter((part)=>'text' === part.type);
|
|
178
|
+
const imageParts = msg.content.filter((part)=>'image_url' === part.type);
|
|
179
|
+
if (imageParts.length > 0) yhtMessage.vlContent = [
|
|
180
|
+
{
|
|
181
|
+
type: 'text',
|
|
182
|
+
text: textParts.map((part)=>part.text).join(' ') || '请分析图片内容'
|
|
183
|
+
},
|
|
184
|
+
...imageParts.map((part)=>({
|
|
185
|
+
type: 'image_url',
|
|
186
|
+
image_url: {
|
|
187
|
+
url: part.image_url.url,
|
|
188
|
+
detail: 'high'
|
|
189
|
+
}
|
|
190
|
+
}))
|
|
191
|
+
];
|
|
192
|
+
else if (textParts.length > 0) yhtMessage.vlContent = [
|
|
193
|
+
{
|
|
194
|
+
type: 'text',
|
|
195
|
+
text: textParts.map((part)=>part.text).join(' ')
|
|
196
|
+
}
|
|
197
|
+
];
|
|
198
|
+
}
|
|
199
|
+
return yhtMessage;
|
|
200
|
+
});
|
|
201
|
+
const debugCall = getDebug('ai:call:yht');
|
|
202
|
+
const debugProfileStats = getDebug('ai:profile:stats:yht');
|
|
203
|
+
const debugProfileDetail = getDebug('ai:profile:detail:yht');
|
|
204
|
+
const startTime = Date.now();
|
|
205
|
+
try {
|
|
206
|
+
debugCall(`准备调用AI服务,模型: ${yhtConfig.model}`);
|
|
207
|
+
const yhtMessages = convertToYhtMessageFormat(messages);
|
|
208
|
+
debugProfileDetail('转换后的消息格式:', JSON.stringify(yhtMessages));
|
|
209
|
+
const requestBody = {
|
|
210
|
+
domain: yhtConfig.domain,
|
|
211
|
+
messages: yhtMessages,
|
|
212
|
+
chatType: yhtConfig.chatType,
|
|
213
|
+
model: yhtConfig.model,
|
|
214
|
+
modelCategory: yhtConfig.modelCategory,
|
|
215
|
+
stream: yhtConfig.stream,
|
|
216
|
+
temperature: yhtConfig.temperature,
|
|
217
|
+
top_p: yhtConfig.top_p,
|
|
218
|
+
extraParams: yhtConfig.extraParams,
|
|
219
|
+
topic: yhtConfig.topic
|
|
220
|
+
};
|
|
221
|
+
debugCall('发送请求到AI服务');
|
|
222
|
+
const controller = new AbortController();
|
|
223
|
+
const { signal } = controller;
|
|
224
|
+
options?.abortSignal?.addEventListener('abort', ()=>{
|
|
225
|
+
controller.abort();
|
|
226
|
+
}, {
|
|
227
|
+
once: true
|
|
228
|
+
});
|
|
229
|
+
const timeout = 120000;
|
|
230
|
+
let response;
|
|
231
|
+
const timeoutPromise = (timeout)=>new Promise((_, reject)=>{
|
|
232
|
+
setTimeout(()=>{
|
|
233
|
+
reject(new Error(`Request timeout after ${timeout}ms`));
|
|
234
|
+
}, timeout);
|
|
235
|
+
});
|
|
236
|
+
try {
|
|
237
|
+
response = await Promise.race([
|
|
238
|
+
fetch(yhtConfig.baseURL, {
|
|
239
|
+
method: 'POST',
|
|
240
|
+
headers: {
|
|
241
|
+
'Content-Type': 'application/json',
|
|
242
|
+
yht_access_token: yhtConfig.yht_access_token || '',
|
|
243
|
+
traceId: yhtConfig.traceId
|
|
244
|
+
},
|
|
245
|
+
body: JSON.stringify(requestBody),
|
|
246
|
+
signal
|
|
247
|
+
}),
|
|
248
|
+
timeoutPromise(timeout)
|
|
249
|
+
]).finally(()=>{
|
|
250
|
+
setTimeout(()=>{
|
|
251
|
+
controller.abort();
|
|
252
|
+
}, 2000);
|
|
253
|
+
});
|
|
254
|
+
} catch (error) {
|
|
255
|
+
if ('AbortError' === error.name) console.log('Error1:', 'Request was aborted due to timeout');
|
|
256
|
+
else console.log('Error2:', error.message);
|
|
257
|
+
throw error;
|
|
258
|
+
}
|
|
259
|
+
if (!response.ok) {
|
|
260
|
+
const errorText = await response.text();
|
|
261
|
+
debugCall(`用友AI服务返回错误: ${response.status} ${errorText}`);
|
|
262
|
+
throw new Error(`用友AI服务返回错误: ${response.status} ${errorText}`);
|
|
263
|
+
}
|
|
264
|
+
const responseText = await response.text();
|
|
265
|
+
const result = JSON.parse(responseText);
|
|
266
|
+
const timeCost = Date.now() - startTime;
|
|
267
|
+
debugProfileStats(`用友模型调用完成,耗时: ${timeCost}ms, token使用: ${result.usage?.total_tokens || 'unknown'}`);
|
|
268
|
+
debugProfileDetail(`用友模型使用详情: ${JSON.stringify(result.usage)}`);
|
|
269
|
+
const content = result.result?.content || '';
|
|
270
|
+
debugCall(`用友AI服务响应内容: ${content}`);
|
|
271
|
+
const usage = {
|
|
272
|
+
prompt_tokens: result.usage?.prompt_tokens || 0,
|
|
273
|
+
completion_tokens: result.usage?.completion_tokens || 0,
|
|
274
|
+
total_tokens: result.usage?.total_tokens || 0,
|
|
275
|
+
cached_input: 0,
|
|
276
|
+
time_cost: timeCost,
|
|
277
|
+
model_name: yhtConfig.model,
|
|
278
|
+
model_description: `${yhtConfig.model} mode`,
|
|
279
|
+
intent: 'default',
|
|
280
|
+
request_id: result._request_id
|
|
281
|
+
};
|
|
282
|
+
const callAI_new = {
|
|
283
|
+
content: result.result?.content || '',
|
|
284
|
+
reasoning_content: result.result?.reasoning_content || '',
|
|
285
|
+
usage,
|
|
286
|
+
isStreamed: false
|
|
287
|
+
};
|
|
288
|
+
return callAI_new;
|
|
289
|
+
} catch (e) {
|
|
290
|
+
if ('AbortError' === e.name) throw new Error('请求取消');
|
|
291
|
+
console.error('用友AI调用错误:', e);
|
|
292
|
+
throw e;
|
|
293
|
+
}
|
|
294
|
+
}
|
|
295
|
+
async function callAI(messages, modelConfig, options) {
|
|
296
|
+
if (modelConfig.yht_access_token) return await yhtCallAI(messages, modelConfig, options);
|
|
297
|
+
if (isCodexAppServerProvider(modelConfig.openaiBaseURL)) return callAIWithCodexAppServer(messages, modelConfig, options);
|
|
298
|
+
const { completion, modelName, modelDescription, uiTarsModelVersion, modelFamily } = await createChatClient({
|
|
299
|
+
modelConfig
|
|
300
|
+
});
|
|
301
|
+
const effectiveTimeoutMs = resolveEffectiveTimeoutMs(modelConfig);
|
|
302
|
+
const extraBody = modelConfig.extraBody;
|
|
303
|
+
const maxTokens = globalConfigManager.getEnvConfigValueAsNumber(MIDSCENE_MODEL_MAX_TOKENS) ?? globalConfigManager.getEnvConfigValueAsNumber(OPENAI_MAX_TOKENS);
|
|
304
|
+
const debugCall = getDebug('ai:call');
|
|
305
|
+
const warnCall = getDebug('ai:call', {
|
|
306
|
+
console: true
|
|
307
|
+
});
|
|
308
|
+
const debugProfileStats = getDebug('ai:profile:stats');
|
|
309
|
+
const debugProfileDetail = getDebug('ai:profile:detail');
|
|
310
|
+
const startTime = Date.now();
|
|
311
|
+
const temperature = (()=>{
|
|
312
|
+
if ('gpt-5' === modelFamily) return void debugCall('temperature is ignored for gpt-5');
|
|
313
|
+
return modelConfig.temperature ?? 0;
|
|
314
|
+
})();
|
|
315
|
+
const isStreaming = options?.stream && options?.onChunk;
|
|
316
|
+
let content;
|
|
317
|
+
let accumulated = '';
|
|
318
|
+
let accumulatedReasoning = '';
|
|
319
|
+
let usage;
|
|
320
|
+
let timeCost;
|
|
321
|
+
let requestId;
|
|
322
|
+
const hasUsableText = (value)=>'string' == typeof value && value.trim().length > 0;
|
|
323
|
+
const buildUsageInfo = (usageData, requestId)=>{
|
|
324
|
+
if (!usageData) return;
|
|
325
|
+
const cachedInputTokens = usageData?.prompt_tokens_details?.cached_tokens;
|
|
326
|
+
return {
|
|
327
|
+
prompt_tokens: usageData.prompt_tokens ?? 0,
|
|
328
|
+
completion_tokens: usageData.completion_tokens ?? 0,
|
|
329
|
+
total_tokens: usageData.total_tokens ?? 0,
|
|
330
|
+
cached_input: cachedInputTokens ?? 0,
|
|
331
|
+
time_cost: timeCost ?? 0,
|
|
332
|
+
model_name: modelName,
|
|
333
|
+
model_description: modelDescription,
|
|
334
|
+
intent: modelConfig.intent,
|
|
335
|
+
request_id: requestId ?? void 0
|
|
336
|
+
};
|
|
337
|
+
};
|
|
338
|
+
const commonConfig = {
|
|
339
|
+
temperature,
|
|
340
|
+
stream: !!isStreaming,
|
|
341
|
+
max_tokens: maxTokens,
|
|
342
|
+
...'qwen2.5-vl' === modelFamily ? {
|
|
343
|
+
vl_high_resolution_images: true
|
|
344
|
+
} : {}
|
|
345
|
+
};
|
|
346
|
+
if (isAutoGLM(modelFamily)) {
|
|
347
|
+
commonConfig.top_p = 0.85;
|
|
348
|
+
commonConfig.frequency_penalty = 0.2;
|
|
349
|
+
}
|
|
350
|
+
const mergedEnableReasoning = (()=>{
|
|
351
|
+
const normalizedDeepThink = options?.deepThink === 'unset' ? void 0 : options?.deepThink;
|
|
352
|
+
if (true === normalizedDeepThink) return true;
|
|
353
|
+
if (false === normalizedDeepThink) return false;
|
|
354
|
+
return modelConfig.reasoningEnabled;
|
|
355
|
+
})();
|
|
356
|
+
const { config: reasoningEffortConfig, debugMessage: reasoningEffortDebugMessage, warningMessage } = resolveReasoningConfig({
|
|
357
|
+
reasoningEnabled: mergedEnableReasoning,
|
|
358
|
+
reasoningEffort: modelConfig.reasoningEffort,
|
|
359
|
+
reasoningBudget: modelConfig.reasoningBudget,
|
|
360
|
+
modelFamily
|
|
361
|
+
});
|
|
362
|
+
if (reasoningEffortDebugMessage) debugCall(reasoningEffortDebugMessage);
|
|
363
|
+
if (warningMessage) warnCall(warningMessage);
|
|
364
|
+
const shouldUseOriginalImageDetail = shouldForceOriginalImageDetail(modelConfig);
|
|
365
|
+
const messagesWithImageDetail = (()=>{
|
|
366
|
+
if (!shouldUseOriginalImageDetail) return messages;
|
|
367
|
+
return messages.map((msg)=>{
|
|
368
|
+
if (!Array.isArray(msg.content)) return msg;
|
|
369
|
+
const content = msg.content.map((part)=>{
|
|
370
|
+
if (part && 'image_url' === part.type && part.image_url?.url) return {
|
|
371
|
+
...part,
|
|
372
|
+
image_url: {
|
|
373
|
+
...part.image_url,
|
|
374
|
+
detail: 'original'
|
|
375
|
+
}
|
|
376
|
+
};
|
|
377
|
+
return part;
|
|
378
|
+
});
|
|
379
|
+
return {
|
|
380
|
+
...msg,
|
|
381
|
+
content
|
|
382
|
+
};
|
|
383
|
+
});
|
|
384
|
+
})();
|
|
385
|
+
try {
|
|
386
|
+
debugCall(`sending ${isStreaming ? 'streaming ' : ''}request to ${modelName}`);
|
|
387
|
+
if (isStreaming) {
|
|
388
|
+
const { signal: streamSignal, cleanup: cleanupStreamSignal } = buildRequestAbortSignal(effectiveTimeoutMs, options?.abortSignal);
|
|
389
|
+
try {
|
|
390
|
+
const stream = await completion.create({
|
|
391
|
+
model: modelName,
|
|
392
|
+
messages: messagesWithImageDetail,
|
|
393
|
+
...commonConfig,
|
|
394
|
+
...reasoningEffortConfig,
|
|
395
|
+
...extraBody
|
|
396
|
+
}, {
|
|
397
|
+
stream: true,
|
|
398
|
+
signal: streamSignal
|
|
399
|
+
});
|
|
400
|
+
requestId = stream._request_id;
|
|
401
|
+
for await (const chunk of stream){
|
|
402
|
+
const content = chunk.choices?.[0]?.delta?.content || '';
|
|
403
|
+
const reasoning_content = chunk.choices?.[0]?.delta?.reasoning_content || '';
|
|
404
|
+
if (chunk.usage) usage = chunk.usage;
|
|
405
|
+
if (content || reasoning_content) {
|
|
406
|
+
accumulated += content;
|
|
407
|
+
accumulatedReasoning += reasoning_content;
|
|
408
|
+
const chunkData = {
|
|
409
|
+
content,
|
|
410
|
+
reasoning_content,
|
|
411
|
+
accumulated,
|
|
412
|
+
isComplete: false,
|
|
413
|
+
usage: void 0
|
|
414
|
+
};
|
|
415
|
+
options.onChunk(chunkData);
|
|
416
|
+
}
|
|
417
|
+
if (chunk.choices?.[0]?.finish_reason) {
|
|
418
|
+
timeCost = Date.now() - startTime;
|
|
419
|
+
if (!usage) {
|
|
420
|
+
const estimatedTokens = Math.max(1, Math.floor(accumulated.length / 4));
|
|
421
|
+
usage = {
|
|
422
|
+
prompt_tokens: estimatedTokens,
|
|
423
|
+
completion_tokens: estimatedTokens,
|
|
424
|
+
total_tokens: 2 * estimatedTokens
|
|
425
|
+
};
|
|
426
|
+
}
|
|
427
|
+
const finalChunk = {
|
|
428
|
+
content: '',
|
|
429
|
+
accumulated,
|
|
430
|
+
reasoning_content: '',
|
|
431
|
+
isComplete: true,
|
|
432
|
+
usage: buildUsageInfo(usage, requestId)
|
|
433
|
+
};
|
|
434
|
+
options.onChunk(finalChunk);
|
|
435
|
+
break;
|
|
436
|
+
}
|
|
437
|
+
}
|
|
438
|
+
} finally{
|
|
439
|
+
cleanupStreamSignal();
|
|
440
|
+
}
|
|
441
|
+
content = accumulated;
|
|
442
|
+
debugProfileStats(`streaming model, ${modelName}, mode, ${modelFamily || 'default'}, cost-ms, ${timeCost}, temperature, ${temperature ?? ''}`);
|
|
443
|
+
} else {
|
|
444
|
+
const retryCount = modelConfig.retryCount ?? 1;
|
|
445
|
+
const retryInterval = modelConfig.retryInterval ?? 2000;
|
|
446
|
+
const maxAttempts = retryCount + 1;
|
|
447
|
+
let lastError;
|
|
448
|
+
for(let attempt = 1; attempt <= maxAttempts; attempt++){
|
|
449
|
+
const { signal: attemptSignal, cleanup: cleanupAttemptSignal } = buildRequestAbortSignal(effectiveTimeoutMs, options?.abortSignal);
|
|
450
|
+
try {
|
|
451
|
+
const result = await completion.create({
|
|
452
|
+
model: modelName,
|
|
453
|
+
messages: messagesWithImageDetail,
|
|
454
|
+
...commonConfig,
|
|
455
|
+
...reasoningEffortConfig,
|
|
456
|
+
...extraBody
|
|
457
|
+
}, {
|
|
458
|
+
signal: attemptSignal
|
|
459
|
+
});
|
|
460
|
+
timeCost = Date.now() - startTime;
|
|
461
|
+
debugProfileStats(`model, ${modelName}, mode, ${modelFamily || 'default'}, ui-tars-version, ${uiTarsModelVersion}, prompt-tokens, ${result.usage?.prompt_tokens || ''}, completion-tokens, ${result.usage?.completion_tokens || ''}, total-tokens, ${result.usage?.total_tokens || ''}, cost-ms, ${timeCost}, requestId, ${result._request_id || ''}, temperature, ${temperature ?? ''}`);
|
|
462
|
+
debugProfileDetail(`model usage detail: ${JSON.stringify(result.usage)}`);
|
|
463
|
+
if (!result.choices) throw new Error(`invalid response from LLM service: ${JSON.stringify(result)}`);
|
|
464
|
+
content = result.choices[0].message.content;
|
|
465
|
+
accumulatedReasoning = result.choices[0].message?.reasoning_content || '';
|
|
466
|
+
usage = result.usage;
|
|
467
|
+
requestId = result._request_id;
|
|
468
|
+
if (!hasUsableText(content) && hasUsableText(accumulatedReasoning)) {
|
|
469
|
+
warnCall('empty content from AI model, using reasoning content');
|
|
470
|
+
content = accumulatedReasoning;
|
|
471
|
+
}
|
|
472
|
+
if (!hasUsableText(content)) throw new AIResponseParseError('empty content from AI model', JSON.stringify(result), buildUsageInfo(usage, requestId));
|
|
473
|
+
break;
|
|
474
|
+
} catch (error) {
|
|
475
|
+
lastError = error;
|
|
476
|
+
const wasHardTimeout = isHardTimeoutError(lastError);
|
|
477
|
+
if (wasHardTimeout) warnCall(`AI call hit hard timeout (${effectiveTimeoutMs}ms, attempt ${attempt}/${maxAttempts}, model ${modelName}, intent ${modelConfig.intent})`);
|
|
478
|
+
if (options?.abortSignal?.aborted) break;
|
|
479
|
+
if (attempt < maxAttempts) {
|
|
480
|
+
warnCall(`AI call failed (attempt ${attempt}/${maxAttempts}), retrying in ${retryInterval}ms... Error: ${lastError.message}`);
|
|
481
|
+
await new Promise((resolve)=>setTimeout(resolve, retryInterval));
|
|
482
|
+
}
|
|
483
|
+
} finally{
|
|
484
|
+
cleanupAttemptSignal();
|
|
485
|
+
}
|
|
486
|
+
}
|
|
487
|
+
if (!content) throw lastError;
|
|
488
|
+
}
|
|
489
|
+
debugCall(`response reasoning content: ${accumulatedReasoning}`);
|
|
490
|
+
debugCall(`response content: ${content}`);
|
|
491
|
+
if (isStreaming && !usage) {
|
|
492
|
+
const estimatedTokens = Math.max(1, Math.floor((content || '').length / 4));
|
|
493
|
+
usage = {
|
|
494
|
+
prompt_tokens: estimatedTokens,
|
|
495
|
+
completion_tokens: estimatedTokens,
|
|
496
|
+
total_tokens: 2 * estimatedTokens
|
|
497
|
+
};
|
|
498
|
+
}
|
|
499
|
+
return {
|
|
500
|
+
content: content || '',
|
|
501
|
+
reasoning_content: accumulatedReasoning || void 0,
|
|
502
|
+
usage: buildUsageInfo(usage, requestId),
|
|
503
|
+
isStreamed: !!isStreaming
|
|
504
|
+
};
|
|
505
|
+
} catch (e) {
|
|
506
|
+
warnCall('call AI error', e);
|
|
507
|
+
if (e instanceof AIResponseParseError) throw e;
|
|
508
|
+
const newError = new Error(`failed to call ${isStreaming ? 'streaming ' : ''}AI model service (${modelName}): ${e.message}\nTrouble shooting: https://midscenejs.com/model-provider.html`, {
|
|
509
|
+
cause: e
|
|
510
|
+
});
|
|
511
|
+
throw newError;
|
|
512
|
+
}
|
|
513
|
+
}
|
|
514
|
+
async function callAIWithObjectResponse(messages, modelConfig, options) {
|
|
515
|
+
const response = await callAI(messages, modelConfig, {
|
|
516
|
+
deepThink: options?.deepThink,
|
|
517
|
+
abortSignal: options?.abortSignal
|
|
518
|
+
});
|
|
519
|
+
assert(response, 'empty response');
|
|
520
|
+
const modelFamily = modelConfig.modelFamily;
|
|
521
|
+
const jsonContent = safeParseJson(response.content, modelFamily);
|
|
522
|
+
if ('object' != typeof jsonContent) throw new AIResponseParseError(`failed to parse json response from model (${modelConfig.modelName}): ${response.content}`, response.content, response.usage);
|
|
523
|
+
return {
|
|
524
|
+
content: jsonContent,
|
|
525
|
+
contentString: response.content,
|
|
526
|
+
usage: response.usage,
|
|
527
|
+
reasoning_content: response.reasoning_content
|
|
528
|
+
};
|
|
529
|
+
}
|
|
530
|
+
async function callAIWithStringResponse(msgs, modelConfig, options) {
|
|
531
|
+
const { content, usage } = await callAI(msgs, modelConfig, {
|
|
532
|
+
abortSignal: options?.abortSignal
|
|
533
|
+
});
|
|
534
|
+
return {
|
|
535
|
+
content,
|
|
536
|
+
usage
|
|
537
|
+
};
|
|
538
|
+
}
|
|
539
|
+
function extractJSONFromCodeBlock(response) {
|
|
540
|
+
try {
|
|
541
|
+
const jsonMatch = response.match(/^\s*(\{[\s\S]*\})\s*$/);
|
|
542
|
+
if (jsonMatch) return jsonMatch[1];
|
|
543
|
+
const codeBlockMatch = response.match(/```(?:json)?\s*(\{[\s\S]*?\})\s*```/);
|
|
544
|
+
if (codeBlockMatch) return codeBlockMatch[1];
|
|
545
|
+
const jsonLikeMatch = response.match(/\{[\s\S]*\}/);
|
|
546
|
+
if (jsonLikeMatch) return jsonLikeMatch[0];
|
|
547
|
+
} catch {}
|
|
548
|
+
return response;
|
|
549
|
+
}
|
|
550
|
+
function preprocessDoubaoBboxJson(input) {
|
|
551
|
+
if (input.includes('bbox')) while(/\d+\s+\d+/.test(input))input = input.replace(/(\d+)\s+(\d+)/g, '$1,$2');
|
|
552
|
+
return input;
|
|
553
|
+
}
|
|
554
|
+
function resolveReasoningConfig({ reasoningEnabled, reasoningEffort, reasoningBudget, modelFamily }) {
|
|
555
|
+
if (void 0 === reasoningEnabled && !reasoningEffort && void 0 === reasoningBudget) return {
|
|
556
|
+
config: {}
|
|
557
|
+
};
|
|
558
|
+
const debugMessages = [];
|
|
559
|
+
const config = {};
|
|
560
|
+
if ('qwen3-vl' === modelFamily || 'qwen3.5' === modelFamily || 'qwen3.6' === modelFamily) {
|
|
561
|
+
if (void 0 !== reasoningEnabled) {
|
|
562
|
+
config.enable_thinking = reasoningEnabled;
|
|
563
|
+
debugMessages.push(`enable_thinking=${reasoningEnabled}`);
|
|
564
|
+
}
|
|
565
|
+
if (void 0 !== reasoningBudget) {
|
|
566
|
+
config.thinking_budget = reasoningBudget;
|
|
567
|
+
debugMessages.push(`thinking_budget=${reasoningBudget}`);
|
|
568
|
+
}
|
|
569
|
+
} else if ('doubao-vision' === modelFamily || 'doubao-seed' === modelFamily) {
|
|
570
|
+
if (void 0 !== reasoningEnabled) {
|
|
571
|
+
config.thinking = {
|
|
572
|
+
type: reasoningEnabled ? 'enabled' : 'disabled'
|
|
573
|
+
};
|
|
574
|
+
debugMessages.push(`thinking.type=${reasoningEnabled ? 'enabled' : 'disabled'}`);
|
|
575
|
+
}
|
|
576
|
+
if (reasoningEffort) {
|
|
577
|
+
config.reasoning_effort = reasoningEffort;
|
|
578
|
+
debugMessages.push(`reasoning_effort="${reasoningEffort}"`);
|
|
579
|
+
}
|
|
580
|
+
} else if ('glm-v' === modelFamily) {
|
|
581
|
+
if (void 0 !== reasoningEnabled) {
|
|
582
|
+
config.thinking = {
|
|
583
|
+
type: reasoningEnabled ? 'enabled' : 'disabled'
|
|
584
|
+
};
|
|
585
|
+
debugMessages.push(`thinking.type=${reasoningEnabled ? 'enabled' : 'disabled'}`);
|
|
586
|
+
}
|
|
587
|
+
} else if ('gpt-5' === modelFamily) {
|
|
588
|
+
config.reasoning = void 0;
|
|
589
|
+
debugMessages.push('reasoning config is ignored for gpt-5');
|
|
590
|
+
} else if (!modelFamily) return {
|
|
591
|
+
config: {},
|
|
592
|
+
debugMessage: 'reasoning config ignored: no model_family configured',
|
|
593
|
+
warningMessage: 'Reasoning config is set but no model_family is configured. Set MIDSCENE_MODEL_FAMILY to enable reasoning config pass-through.'
|
|
594
|
+
};
|
|
595
|
+
else if (reasoningEffort) {
|
|
596
|
+
config.reasoning_effort = reasoningEffort;
|
|
597
|
+
debugMessages.push(`reasoning_effort="${reasoningEffort}"`);
|
|
598
|
+
}
|
|
599
|
+
return {
|
|
600
|
+
config,
|
|
601
|
+
debugMessage: debugMessages.length ? `reasoning config for ${modelFamily}: ${debugMessages.join(', ')}` : void 0
|
|
602
|
+
};
|
|
603
|
+
}
|
|
604
|
+
function normalizeJsonObject(obj) {
|
|
605
|
+
if (null == obj) return obj;
|
|
606
|
+
if (Array.isArray(obj)) return obj.map((item)=>normalizeJsonObject(item));
|
|
607
|
+
if ('object' == typeof obj) {
|
|
608
|
+
const normalized = {};
|
|
609
|
+
for (const [key, value] of Object.entries(obj)){
|
|
610
|
+
const trimmedKey = key.trim();
|
|
611
|
+
let normalizedValue = normalizeJsonObject(value);
|
|
612
|
+
if ('string' == typeof normalizedValue) normalizedValue = normalizedValue.trim();
|
|
613
|
+
normalized[trimmedKey] = normalizedValue;
|
|
614
|
+
}
|
|
615
|
+
return normalized;
|
|
616
|
+
}
|
|
617
|
+
if ('string' == typeof obj) return obj.trim();
|
|
618
|
+
return obj;
|
|
619
|
+
}
|
|
620
|
+
function safeParseJson(input, modelFamily) {
|
|
621
|
+
const cleanJsonString = extractJSONFromCodeBlock(input);
|
|
622
|
+
if (cleanJsonString?.match(/\((\d+),(\d+)\)/)) return cleanJsonString.match(/\((\d+),(\d+)\)/)?.slice(1).map(Number);
|
|
623
|
+
let parsed;
|
|
624
|
+
let lastError;
|
|
625
|
+
try {
|
|
626
|
+
parsed = JSON.parse(cleanJsonString);
|
|
627
|
+
return normalizeJsonObject(parsed);
|
|
628
|
+
} catch (error) {
|
|
629
|
+
lastError = error;
|
|
630
|
+
}
|
|
631
|
+
try {
|
|
632
|
+
parsed = JSON.parse(jsonrepair(cleanJsonString));
|
|
633
|
+
return normalizeJsonObject(parsed);
|
|
634
|
+
} catch (error) {
|
|
635
|
+
lastError = error;
|
|
636
|
+
}
|
|
637
|
+
if ('doubao-vision' === modelFamily || 'doubao-seed' === modelFamily || isUITars(modelFamily)) {
|
|
638
|
+
const jsonString = preprocessDoubaoBboxJson(cleanJsonString);
|
|
639
|
+
try {
|
|
640
|
+
parsed = JSON.parse(jsonrepair(jsonString));
|
|
641
|
+
return normalizeJsonObject(parsed);
|
|
642
|
+
} catch (error) {
|
|
643
|
+
lastError = error;
|
|
644
|
+
}
|
|
645
|
+
}
|
|
646
|
+
throw Error(`failed to parse LLM response into JSON. Error - ${String(lastError ?? 'unknown error')}. Response - \n ${input}`);
|
|
647
|
+
}
|
|
648
|
+
export { AIResponseParseError, callAI, callAIWithObjectResponse, callAIWithStringResponse, extractJSONFromCodeBlock, preprocessDoubaoBboxJson, resolveReasoningConfig, safeParseJson, yhtCallAI };
|