@jsonstudio/llms 0.6.375 → 0.6.467

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. package/dist/conversion/codecs/gemini-openai-codec.js +15 -1
  2. package/dist/conversion/compat/actions/iflow-web-search.d.ts +18 -0
  3. package/dist/conversion/compat/actions/iflow-web-search.js +87 -0
  4. package/dist/conversion/compat/profiles/chat-glm.json +4 -0
  5. package/dist/conversion/compat/profiles/chat-iflow.json +5 -1
  6. package/dist/conversion/hub/pipeline/compat/compat-pipeline-executor.js +6 -0
  7. package/dist/conversion/hub/pipeline/compat/compat-types.d.ts +2 -0
  8. package/dist/conversion/hub/pipeline/hub-pipeline.js +5 -1
  9. package/dist/conversion/hub/pipeline/session-identifiers.d.ts +9 -0
  10. package/dist/conversion/hub/pipeline/session-identifiers.js +76 -0
  11. package/dist/conversion/hub/pipeline/stages/resp_inbound/resp_inbound_stage1_sse_decode/index.js +31 -2
  12. package/dist/conversion/hub/process/chat-process.js +89 -25
  13. package/dist/conversion/responses/responses-openai-bridge.js +75 -4
  14. package/dist/conversion/shared/anthropic-message-utils.js +41 -6
  15. package/dist/conversion/shared/errors.d.ts +20 -0
  16. package/dist/conversion/shared/errors.js +28 -0
  17. package/dist/conversion/shared/responses-conversation-store.js +30 -3
  18. package/dist/conversion/shared/responses-output-builder.js +68 -6
  19. package/dist/filters/special/request-toolcalls-stringify.d.ts +13 -0
  20. package/dist/filters/special/request-toolcalls-stringify.js +103 -3
  21. package/dist/filters/special/response-tool-text-canonicalize.d.ts +16 -0
  22. package/dist/filters/special/response-tool-text-canonicalize.js +27 -3
  23. package/dist/router/virtual-router/classifier.js +4 -2
  24. package/dist/router/virtual-router/engine.d.ts +30 -0
  25. package/dist/router/virtual-router/engine.js +600 -42
  26. package/dist/router/virtual-router/provider-registry.d.ts +15 -0
  27. package/dist/router/virtual-router/provider-registry.js +40 -0
  28. package/dist/router/virtual-router/routing-instructions.d.ts +34 -0
  29. package/dist/router/virtual-router/routing-instructions.js +383 -0
  30. package/dist/router/virtual-router/sticky-session-store.d.ts +3 -0
  31. package/dist/router/virtual-router/sticky-session-store.js +110 -0
  32. package/dist/router/virtual-router/tool-signals.js +0 -22
  33. package/dist/router/virtual-router/types.d.ts +35 -0
  34. package/dist/servertool/engine.js +42 -1
  35. package/dist/servertool/handlers/web-search.js +157 -4
  36. package/dist/servertool/types.d.ts +6 -0
  37. package/package.json +1 -1
@@ -4,6 +4,7 @@ import { normalizeChatMessageContent } from '../shared/chat-output-normalizer.js
4
4
  import { mapBridgeToolsToChat } from '../shared/tool-mapping.js';
5
5
  import { prepareGeminiToolsForBridge } from '../shared/gemini-tool-utils.js';
6
6
  import { registerResponsesReasoning, consumeResponsesReasoning, registerResponsesOutputTextMeta, consumeResponsesOutputTextMeta, consumeResponsesPayloadSnapshot, registerResponsesPayloadSnapshot, consumeResponsesPassthrough, registerResponsesPassthrough } from '../shared/responses-reasoning-registry.js';
7
+ import { ProviderProtocolError } from '../shared/errors.js';
7
8
  const DUMMY_THOUGHT_SIGNATURE = 'skip_thought_signature_validator';
8
9
  function isObject(v) {
9
10
  return !!v && typeof v === 'object' && !Array.isArray(v);
@@ -179,6 +180,8 @@ export function buildOpenAIChatFromGeminiResponse(payload) {
179
180
  const primary = candidates[0] && typeof candidates[0] === 'object' ? candidates[0] : {};
180
181
  const content = primary?.content || {};
181
182
  const role = mapGeminiRoleToChat(content.role);
183
+ const rawFinishReason = primary?.finishReason;
184
+ const finishReasonUpper = typeof rawFinishReason === 'string' ? rawFinishReason.trim().toUpperCase() : '';
182
185
  const parts = Array.isArray(content.parts) ? content.parts : [];
183
186
  const textParts = [];
184
187
  const reasoningParts = [];
@@ -318,12 +321,23 @@ export function buildOpenAIChatFromGeminiResponse(payload) {
318
321
  }
319
322
  }
320
323
  const hasToolCalls = toolCalls.length > 0;
324
+ // 如果 Gemini 返回 UNEXPECTED_TOOL_CALL,且当前没有有效的工具调用可继续,
325
+ // 说明上游工具协议/声明与模型期望不一致,应视为 Provider 级错误而不是正常 stop,
326
+ // 由上层通过 ProviderErrorCenter / HTTP 4xx/5xx 显式反馈给客户端。
327
+ if (!hasToolCalls && finishReasonUpper === 'UNEXPECTED_TOOL_CALL') {
328
+ throw new ProviderProtocolError('Gemini returned finishReason=UNEXPECTED_TOOL_CALL; this usually indicates an incompatible or unexpected tool invocation.', {
329
+ code: 'TOOL_PROTOCOL_ERROR',
330
+ protocol: 'gemini-chat',
331
+ providerType: 'gemini',
332
+ details: { finishReason: rawFinishReason }
333
+ });
334
+ }
321
335
  const finish_reason = (() => {
322
336
  // If the model is emitting tool calls, treat this turn as a tool_calls
323
337
  // completion so downstream tool governance can continue the loop.
324
338
  if (hasToolCalls)
325
339
  return 'tool_calls';
326
- const fr = String(primary?.finishReason || '').toUpperCase();
340
+ const fr = finishReasonUpper;
327
341
  if (fr === 'MAX_TOKENS')
328
342
  return 'length';
329
343
  if (fr === 'STOP')
@@ -0,0 +1,18 @@
1
+ import type { JsonObject } from '../../hub/types/json.js';
2
+ import type { AdapterContext } from '../../hub/types/chat-envelope.js';
3
+ /**
4
+ * IFlow web_search 请求适配(作用于 openai-chat 兼容 payload):
5
+ *
6
+ * - 仅在 routeId 以 `web_search` 开头时生效(来自 AdapterContext.routeId);
7
+ * - 读取顶层的 `web_search` helper 对象 `{ query, recency, count, engine }`;
8
+ * - 当 query 为空或无效时:删除 helper,原样透传;
9
+ * - 当 query 有效时:构造一个标准的 OpenAI function tool:
10
+ * - name 固定为 `web_search`;
11
+ * - parameters 包含 query/recency/count 三个字段;
12
+ * - 将生成的 function tool 写入 `tools` 数组,并删除顶层 `web_search`。
13
+ *
14
+ * 注意:
15
+ * - 顶层 `web_search` 只在 servertool 的二跳请求中出现,用于驱动后端搜索;
16
+ * - 用户侧的工具调用仍然使用统一的 `web_search` function tool schema。
17
+ */
18
+ export declare function applyIflowWebSearchRequestTransform(payload: JsonObject, adapterContext?: AdapterContext): JsonObject;
@@ -0,0 +1,87 @@
1
+ const isRecord = (value) => typeof value === 'object' && value !== null && !Array.isArray(value);
2
+ const DEBUG_IFLOW_WEB_SEARCH = (process.env.ROUTECODEX_DEBUG_IFLOW_WEB_SEARCH || '').trim() === '1';
3
+ /**
4
+ * IFlow web_search 请求适配(作用于 openai-chat 兼容 payload):
5
+ *
6
+ * - 仅在 routeId 以 `web_search` 开头时生效(来自 AdapterContext.routeId);
7
+ * - 读取顶层的 `web_search` helper 对象 `{ query, recency, count, engine }`;
8
+ * - 当 query 为空或无效时:删除 helper,原样透传;
9
+ * - 当 query 有效时:构造一个标准的 OpenAI function tool:
10
+ * - name 固定为 `web_search`;
11
+ * - parameters 包含 query/recency/count 三个字段;
12
+ * - 将生成的 function tool 写入 `tools` 数组,并删除顶层 `web_search`。
13
+ *
14
+ * 注意:
15
+ * - 顶层 `web_search` 只在 servertool 的二跳请求中出现,用于驱动后端搜索;
16
+ * - 用户侧的工具调用仍然使用统一的 `web_search` function tool schema。
17
+ */
18
+ export function applyIflowWebSearchRequestTransform(payload, adapterContext) {
19
+ const routeId = typeof adapterContext?.routeId === 'string' ? adapterContext.routeId : '';
20
+ if (!routeId || !routeId.toLowerCase().startsWith('web_search')) {
21
+ return payload;
22
+ }
23
+ const root = structuredClone(payload);
24
+ const webSearchRaw = root.web_search;
25
+ if (!isRecord(webSearchRaw)) {
26
+ return root;
27
+ }
28
+ const webSearch = webSearchRaw;
29
+ const queryValue = webSearch.query;
30
+ const recencyValue = webSearch.recency;
31
+ const countValue = webSearch.count;
32
+ const query = typeof queryValue === 'string' ? queryValue.trim() : '';
33
+ const recency = typeof recencyValue === 'string' ? recencyValue.trim() : undefined;
34
+ let count;
35
+ if (typeof countValue === 'number' && Number.isFinite(countValue)) {
36
+ const normalized = Math.floor(countValue);
37
+ if (normalized >= 1 && normalized <= 50) {
38
+ count = normalized;
39
+ }
40
+ }
41
+ if (!query) {
42
+ // No meaningful search query, drop the helper object and passthrough.
43
+ delete root.web_search;
44
+ return root;
45
+ }
46
+ const tool = {
47
+ type: 'function',
48
+ function: {
49
+ name: 'web_search',
50
+ description: 'Perform web search over the public internet and return up-to-date results.',
51
+ parameters: {
52
+ type: 'object',
53
+ properties: {
54
+ query: {
55
+ type: 'string',
56
+ description: 'Search query string.'
57
+ },
58
+ recency: {
59
+ type: 'string',
60
+ description: 'Optional recency filter such as "day", "week", or "month".'
61
+ },
62
+ count: {
63
+ type: 'integer',
64
+ minimum: 1,
65
+ maximum: 50,
66
+ description: 'Maximum number of search results to retrieve (1-50).'
67
+ }
68
+ },
69
+ required: ['query']
70
+ }
71
+ }
72
+ };
73
+ root.tools = [tool];
74
+ delete root.web_search;
75
+ if (DEBUG_IFLOW_WEB_SEARCH) {
76
+ try {
77
+ // eslint-disable-next-line no-console
78
+ console.log('\x1b[38;5;27m[compat][iflow_web_search_request] applied web_search transform ' +
79
+ `query=${JSON.stringify(query).slice(0, 200)} ` +
80
+ `recency=${String(recency ?? '')}\x1b[0m`);
81
+ }
82
+ catch {
83
+ // logging best-effort
84
+ }
85
+ }
86
+ return root;
87
+ }
@@ -5,6 +5,10 @@
5
5
  "mappings": [
6
6
  { "action": "snapshot", "phase": "compat-pre" },
7
7
  { "action": "dto_unwrap" },
8
+ {
9
+ "action": "remove",
10
+ "path": "parallel_tool_calls"
11
+ },
8
12
  {
9
13
  "action": "glm_image_content"
10
14
  },
@@ -34,7 +34,8 @@
34
34
  "tools",
35
35
  "tool_choice",
36
36
  "stop",
37
- "response_format"
37
+ "response_format",
38
+ "web_search"
38
39
  ],
39
40
  "messages": {
40
41
  "allowedRoles": ["system", "user", "assistant", "tool"],
@@ -102,6 +103,9 @@
102
103
  ]
103
104
  },
104
105
  { "action": "tool_schema_sanitize", "mode": "glm_shell" },
106
+ {
107
+ "action": "iflow_web_search_request"
108
+ },
105
109
  { "action": "snapshot", "phase": "compat-post" },
106
110
  { "action": "dto_rewrap" }
107
111
  ]
@@ -12,6 +12,7 @@ import { applyQwenRequestTransform, applyQwenResponseTransform } from '../../../
12
12
  import { extractGlmToolMarkup } from '../../../compat/actions/glm-tool-extraction.js';
13
13
  import { applyGlmWebSearchRequestTransform } from '../../../compat/actions/glm-web-search.js';
14
14
  import { applyGeminiWebSearchCompat } from '../../../compat/actions/gemini-web-search.js';
15
+ import { applyIflowWebSearchRequestTransform } from '../../../compat/actions/iflow-web-search.js';
15
16
  import { applyGlmImageContentTransform } from '../../../compat/actions/glm-image-content.js';
16
17
  import { applyGlmVisionPromptTransform } from '../../../compat/actions/glm-vision-prompt.js';
17
18
  const RATE_LIMIT_ERROR = 'ERR_COMPAT_RATE_LIMIT_DETECTED';
@@ -171,6 +172,11 @@ function applyMapping(root, mapping, state) {
171
172
  replaceRoot(root, applyGeminiWebSearchCompat(root, state.adapterContext));
172
173
  }
173
174
  break;
175
+ case 'iflow_web_search_request':
176
+ if (state.direction === 'request') {
177
+ replaceRoot(root, applyIflowWebSearchRequestTransform(root, state.adapterContext));
178
+ }
179
+ break;
174
180
  case 'glm_image_content':
175
181
  if (state.direction === 'request') {
176
182
  replaceRoot(root, applyGlmImageContentTransform(root));
@@ -106,6 +106,8 @@ export type MappingInstruction = {
106
106
  action: 'glm_vision_prompt';
107
107
  } | {
108
108
  action: 'gemini_web_search_request';
109
+ } | {
110
+ action: 'iflow_web_search_request';
109
111
  };
110
112
  export type FilterInstruction = {
111
113
  action: 'rate_limit_text';
@@ -21,6 +21,7 @@ import { runReqProcessStage2RouteSelect } from './stages/req_process/req_process
21
21
  import { runReqOutboundStage1SemanticMap } from './stages/req_outbound/req_outbound_stage1_semantic_map/index.js';
22
22
  import { runReqOutboundStage2FormatBuild } from './stages/req_outbound/req_outbound_stage2_format_build/index.js';
23
23
  import { runReqOutboundStage3Compat } from './stages/req_outbound/req_outbound_stage3_compat/index.js';
24
+ import { extractSessionIdentifiersFromMetadata } from './session-identifiers.js';
24
25
  export class HubPipeline {
25
26
  routerEngine;
26
27
  config;
@@ -123,6 +124,7 @@ export class HubPipeline {
123
124
  const stdMetadata = workingRequest?.metadata;
124
125
  const serverToolRequired = stdMetadata?.webSearchEnabled === true ||
125
126
  stdMetadata?.serverToolRequired === true;
127
+ const sessionIdentifiers = extractSessionIdentifiersFromMetadata(normalized.metadata);
126
128
  const metadataInput = {
127
129
  requestId: normalized.id,
128
130
  entryEndpoint: normalized.entryEndpoint,
@@ -133,7 +135,9 @@ export class HubPipeline {
133
135
  routeHint: normalized.routeHint,
134
136
  stage: normalized.stage,
135
137
  responsesResume: responsesResume,
136
- ...(serverToolRequired ? { serverToolRequired: true } : {})
138
+ ...(serverToolRequired ? { serverToolRequired: true } : {}),
139
+ ...(sessionIdentifiers.sessionId ? { sessionId: sessionIdentifiers.sessionId } : {}),
140
+ ...(sessionIdentifiers.conversationId ? { conversationId: sessionIdentifiers.conversationId } : {})
137
141
  };
138
142
  const routing = runReqProcessStage2RouteSelect({
139
143
  routerEngine: this.routerEngine,
@@ -0,0 +1,9 @@
1
+ export interface SessionIdentifiers {
2
+ sessionId?: string;
3
+ conversationId?: string;
4
+ }
5
+ export declare function extractSessionIdentifiersFromMetadata(metadata: Record<string, unknown> | undefined): SessionIdentifiers;
6
+ export declare function coerceClientHeaders(raw: unknown): Record<string, string> | undefined;
7
+ export declare function pickHeader(headers: Record<string, string>, candidates: string[]): string | undefined;
8
+ export declare function findHeaderValue(headers: Record<string, string>, target: string): string | undefined;
9
+ export declare function normalizeHeaderKey(value: string): string;
@@ -0,0 +1,76 @@
1
+ export function extractSessionIdentifiersFromMetadata(metadata) {
2
+ const directSession = normalizeIdentifier(metadata?.sessionId);
3
+ const directConversation = normalizeIdentifier(metadata?.conversationId);
4
+ const headers = coerceClientHeaders(metadata?.clientHeaders);
5
+ const sessionId = directSession ||
6
+ (headers ? pickHeader(headers, ['session_id', 'session-id', 'x-session-id', 'anthropic-session-id']) : undefined);
7
+ const conversationId = directConversation ||
8
+ (headers
9
+ ? pickHeader(headers, [
10
+ 'conversation_id',
11
+ 'conversation-id',
12
+ 'x-conversation-id',
13
+ 'anthropic-conversation-id',
14
+ 'openai-conversation-id'
15
+ ])
16
+ : undefined);
17
+ return {
18
+ ...(sessionId ? { sessionId } : {}),
19
+ ...(conversationId ? { conversationId } : {})
20
+ };
21
+ }
22
+ export function coerceClientHeaders(raw) {
23
+ if (!raw || typeof raw !== 'object') {
24
+ return undefined;
25
+ }
26
+ const normalized = {};
27
+ for (const [key, value] of Object.entries(raw)) {
28
+ if (typeof value === 'string' && value.trim()) {
29
+ normalized[key] = value;
30
+ }
31
+ }
32
+ return Object.keys(normalized).length ? normalized : undefined;
33
+ }
34
+ export function pickHeader(headers, candidates) {
35
+ for (const name of candidates) {
36
+ const value = findHeaderValue(headers, name);
37
+ if (value) {
38
+ return value;
39
+ }
40
+ }
41
+ return undefined;
42
+ }
43
+ export function findHeaderValue(headers, target) {
44
+ const lowered = typeof target === 'string' ? target.toLowerCase() : '';
45
+ if (!lowered) {
46
+ return undefined;
47
+ }
48
+ const normalizedTarget = normalizeHeaderKey(lowered);
49
+ for (const [key, value] of Object.entries(headers)) {
50
+ if (typeof value !== 'string') {
51
+ continue;
52
+ }
53
+ const trimmed = value.trim();
54
+ if (!trimmed) {
55
+ continue;
56
+ }
57
+ const loweredKey = key.toLowerCase();
58
+ if (loweredKey === lowered) {
59
+ return trimmed;
60
+ }
61
+ if (normalizeHeaderKey(loweredKey) === normalizedTarget) {
62
+ return trimmed;
63
+ }
64
+ }
65
+ return undefined;
66
+ }
67
+ export function normalizeHeaderKey(value) {
68
+ return value.replace(/[\s_-]+/g, '');
69
+ }
70
+ function normalizeIdentifier(value) {
71
+ if (typeof value !== 'string') {
72
+ return undefined;
73
+ }
74
+ const trimmed = value.trim();
75
+ return trimmed || undefined;
76
+ }
@@ -1,5 +1,17 @@
1
1
  import { defaultSseCodecRegistry } from '../../../../../../sse/index.js';
2
2
  import { recordStage } from '../../../stages/utils.js';
3
+ import { ProviderProtocolError } from '../../../../../shared/errors.js';
4
+ function resolveProviderType(protocol) {
5
+ if (protocol === 'openai-chat')
6
+ return 'openai';
7
+ if (protocol === 'openai-responses')
8
+ return 'responses';
9
+ if (protocol === 'anthropic-messages')
10
+ return 'anthropic';
11
+ if (protocol === 'gemini-chat')
12
+ return 'gemini';
13
+ return undefined;
14
+ }
3
15
  export async function runRespInboundStage1SseDecode(options) {
4
16
  const stream = extractSseStream(options.payload);
5
17
  if (!stream) {
@@ -15,7 +27,15 @@ export async function runRespInboundStage1SseDecode(options) {
15
27
  reason: 'protocol_unsupported',
16
28
  protocol: options.providerProtocol
17
29
  });
18
- throw new Error(`[resp_inbound_stage1_sse_decode] Protocol ${options.providerProtocol} does not support SSE decoding`);
30
+ throw new ProviderProtocolError(`[resp_inbound_stage1_sse_decode] Protocol ${options.providerProtocol} does not support SSE decoding`, {
31
+ code: 'SSE_DECODE_ERROR',
32
+ protocol: options.providerProtocol,
33
+ providerType: resolveProviderType(options.providerProtocol),
34
+ details: {
35
+ phase: 'resp_inbound_stage1_sse_decode',
36
+ reason: 'protocol_unsupported'
37
+ }
38
+ });
19
39
  }
20
40
  try {
21
41
  const codec = defaultSseCodecRegistry.get(options.providerProtocol);
@@ -38,7 +58,16 @@ export async function runRespInboundStage1SseDecode(options) {
38
58
  protocol: options.providerProtocol,
39
59
  error: message
40
60
  });
41
- throw new Error(`[resp_inbound_stage1_sse_decode] Failed to decode SSE payload for protocol ${options.providerProtocol}: ${message}`);
61
+ throw new ProviderProtocolError(`[resp_inbound_stage1_sse_decode] Failed to decode SSE payload for protocol ${options.providerProtocol}: ${message}`, {
62
+ code: 'SSE_DECODE_ERROR',
63
+ protocol: options.providerProtocol,
64
+ providerType: resolveProviderType(options.providerProtocol),
65
+ details: {
66
+ phase: 'resp_inbound_stage1_sse_decode',
67
+ requestId: options.adapterContext.requestId,
68
+ message
69
+ }
70
+ });
42
71
  }
43
72
  }
44
73
  function supportsSseProtocol(protocol) {
@@ -326,9 +326,9 @@ function maybeInjectWebSearchTool(request, metadata) {
326
326
  const injectPolicy = rawConfig.injectPolicy === 'always' || rawConfig.injectPolicy === 'selective'
327
327
  ? rawConfig.injectPolicy
328
328
  : 'selective';
329
+ const intent = detectWebSearchIntent(request);
329
330
  if (injectPolicy === 'selective') {
330
- const hasExplicitIntent = detectWebSearchIntent(request);
331
- if (!hasExplicitIntent) {
331
+ if (!intent.hasIntent) {
332
332
  // 当最近一条用户消息没有明显的“联网搜索”关键词时,
333
333
  // 如果上一轮 assistant 的工具调用已经属于搜索类(如 web_search),
334
334
  // 则仍然视为 web_search 续写场景,强制注入 web_search 工具,
@@ -351,9 +351,35 @@ function maybeInjectWebSearchTool(request, metadata) {
351
351
  return typeof fn?.name === 'string' && fn.name.trim() === 'web_search';
352
352
  });
353
353
  if (hasWebSearch) {
354
- return request;
354
+ const nextMetadata = {
355
+ ...(request.metadata ?? {}),
356
+ webSearchEnabled: true
357
+ };
358
+ return {
359
+ ...request,
360
+ metadata: nextMetadata
361
+ };
362
+ }
363
+ let engines = rawConfig.engines.filter((engine) => typeof engine?.id === 'string' && !!engine.id.trim() && !engine.serverToolsDisabled);
364
+ // 当用户明确要求「谷歌搜索」时,只暴露 Gemini / Antigravity 类搜索后端:
365
+ // - providerKey 以 gemini-cli. 或 antigravity. 开头;
366
+ // - 或 engine id 中包含 "google"(向前兼容配置中用 id 标识 google 引擎的场景)。
367
+ if (intent.googlePreferred) {
368
+ const preferred = engines.filter((engine) => {
369
+ const id = engine.id.trim().toLowerCase();
370
+ const providerKey = (engine.providerKey || '').toLowerCase();
371
+ if (providerKey.startsWith('gemini-cli.') || providerKey.startsWith('antigravity.')) {
372
+ return true;
373
+ }
374
+ if (id.includes('google')) {
375
+ return true;
376
+ }
377
+ return false;
378
+ });
379
+ if (preferred.length > 0) {
380
+ engines = preferred;
381
+ }
355
382
  }
356
- const engines = rawConfig.engines.filter((engine) => typeof engine?.id === 'string' && !!engine.id.trim() && !engine.serverToolsDisabled);
357
383
  if (!engines.length) {
358
384
  return request;
359
385
  }
@@ -418,7 +444,7 @@ function maybeInjectWebSearchTool(request, metadata) {
418
444
  function detectWebSearchIntent(request) {
419
445
  const messages = Array.isArray(request.messages) ? request.messages : [];
420
446
  if (!messages.length) {
421
- return false;
447
+ return { hasIntent: false, googlePreferred: false };
422
448
  }
423
449
  // 从末尾向前找到最近一条 user 消息,忽略 tool / assistant 的工具调用轮次,
424
450
  // 以便在 Responses / 多轮工具调用场景下仍然根据“最近一条用户输入”判断意图。
@@ -431,7 +457,7 @@ function detectWebSearchIntent(request) {
431
457
  }
432
458
  }
433
459
  if (!lastUser) {
434
- return false;
460
+ return { hasIntent: false, googlePreferred: false };
435
461
  }
436
462
  // 支持多模态 content:既可能是纯文本字符串,也可能是带 image_url 的分段数组。
437
463
  let content = '';
@@ -455,34 +481,72 @@ function detectWebSearchIntent(request) {
455
481
  content = texts.join('\n');
456
482
  }
457
483
  if (!content) {
458
- return false;
484
+ return { hasIntent: false, googlePreferred: false };
485
+ }
486
+ // Hard 100% keywords (中文):明确说明“谷歌搜索 / 谷歌一下 / 百度一下”均视为搜索意图。
487
+ // 其中“谷歌搜索 / 谷歌一下”会偏向 Google/Gemini 搜索后端。
488
+ const zh = content;
489
+ const hasGoogleExplicit = zh.includes('谷歌搜索') ||
490
+ zh.includes('谷歌一下');
491
+ const hasBaiduExplicit = zh.includes('百度一下');
492
+ if (hasGoogleExplicit || hasBaiduExplicit) {
493
+ // 谷歌 / 百度关键字都会优先尝试走“谷歌搜索”引擎;
494
+ // 只有在 Virtual Router 未配置任何谷歌相关 engine 时,才回退为普通联网搜索。
495
+ return {
496
+ hasIntent: true,
497
+ googlePreferred: true
498
+ };
459
499
  }
500
+ // English intent: simple substring match on lowercased text.
460
501
  const text = content.toLowerCase();
461
- const keywords = [
462
- // English
502
+ // 1) Direct patterns like "web search" / "internet search" / "/search".
503
+ const englishDirect = [
463
504
  'web search',
464
505
  'web_search',
465
506
  'websearch',
466
507
  'internet search',
467
508
  'search the web',
468
- 'online search',
469
- 'search online',
470
- 'search on the internet',
471
- 'search the internet',
472
509
  'web-search',
473
- 'online-search',
474
510
  'internet-search',
475
- // Chinese
476
- '联网搜索',
477
- '网络搜索',
478
- '上网搜索',
479
- '网上搜索',
480
- '网上查',
481
- '网上查找',
482
- '上网查',
483
- '上网搜',
484
- // Command-style
485
511
  '/search'
486
512
  ];
487
- return keywords.some((keyword) => text.includes(keyword.toLowerCase()));
513
+ if (englishDirect.some((keyword) => text.includes(keyword))) {
514
+ return { hasIntent: true, googlePreferred: text.includes('google') };
515
+ }
516
+ // 2) Verb + noun combinations, similar to the Chinese rule:
517
+ // - verb: search / find / look up / look for / google
518
+ // - noun: web / internet / online / news / information / info / report / reports / article / articles
519
+ const verbTokensEn = ['search', 'find', 'look up', 'look for', 'google'];
520
+ const nounTokensEn = [
521
+ 'web',
522
+ 'internet',
523
+ 'online',
524
+ 'news',
525
+ 'information',
526
+ 'info',
527
+ 'report',
528
+ 'reports',
529
+ 'article',
530
+ 'articles'
531
+ ];
532
+ const hasVerbEn = verbTokensEn.some((token) => text.includes(token));
533
+ const hasNounEn = nounTokensEn.some((token) => text.includes(token));
534
+ if (hasVerbEn && hasNounEn) {
535
+ return { hasIntent: true, googlePreferred: text.includes('google') };
536
+ }
537
+ // 中文规则:
538
+ // 1. 只要文本中包含“上网”,直接命中(例如“帮我上网看看今天的新闻”)。
539
+ // 2. 否则,如果同时包含「搜索/查找/搜」中的任意一个动词 + 「网络/联网/新闻/信息/报道」中的任意一个名词,也判定为联网搜索意图。
540
+ const chineseText = content; // 中文大小写不敏感,这里直接用原文。
541
+ if (chineseText.includes('上网')) {
542
+ return { hasIntent: true, googlePreferred: false };
543
+ }
544
+ const verbTokens = ['搜索', '查找', '搜'];
545
+ const nounTokens = ['网络', '联网', '新闻', '信息', '报道'];
546
+ const hasVerb = verbTokens.some((token) => chineseText.includes(token));
547
+ const hasNoun = nounTokens.some((token) => chineseText.includes(token));
548
+ if (hasVerb && hasNoun) {
549
+ return { hasIntent: true, googlePreferred: false };
550
+ }
551
+ return { hasIntent: false, googlePreferred: false };
488
552
  }
@@ -3,6 +3,7 @@ import { evaluateResponsesHostPolicy } from './responses-host-policy.js';
3
3
  import { convertMessagesToBridgeInput, convertBridgeInputToChatMessages } from '../shared/bridge-message-utils.js';
4
4
  import { createToolCallIdTransformer, enforceToolCallIdStyle, resolveToolCallIdStyle, stripInternalToolingMetadata, sanitizeResponsesFunctionName } from '../shared/responses-tool-utils.js';
5
5
  import { mapBridgeToolsToChat, mapChatToolsToBridge } from '../shared/tool-mapping.js';
6
+ import { ProviderProtocolError } from '../shared/errors.js';
6
7
  // --- Utilities (ported strictly) ---
7
8
  import { canonicalizeChatResponseTools } from '../shared/tool-canonicalizer.js';
8
9
  import { normalizeMessageReasoningTools } from '../shared/reasoning-tool-normalizer.js';
@@ -95,7 +96,16 @@ export function buildChatRequestFromResponses(payload, context) {
95
96
  // 不在 Responses 路径做工具治理;统一在 Chat 后半段处理
96
97
  // No system tips for MCP on OpenAI Responses path (avoid leaking tool names)
97
98
  if (!messages.length) {
98
- throw new Error('Responses payload produced no chat messages');
99
+ throw new ProviderProtocolError('Responses payload produced no chat messages', {
100
+ code: 'MALFORMED_REQUEST',
101
+ protocol: 'openai-responses',
102
+ providerType: 'responses',
103
+ details: {
104
+ context: 'buildChatRequestFromResponses',
105
+ inputLength: Array.isArray(context.input) ? context.input.length : undefined,
106
+ requestId: context.requestId
107
+ }
108
+ });
99
109
  }
100
110
  // 如果只有 system 消息且无 user/assistant/tool,后续桥接 action 会从 instructions 注入兜底 user 消息
101
111
  const result = { model: payload.model, messages };
@@ -536,15 +546,76 @@ export function buildResponsesPayloadFromChat(payload, context) {
536
546
  if (response.object === 'response' && Array.isArray(response.output)) {
537
547
  return response;
538
548
  }
539
- if (!Array.isArray(response.choices) || !response.choices.length) {
540
- throw new Error('Responses bridge expects OpenAI Chat completion payload');
549
+ const hasChoicesArray = Array.isArray(response.choices);
550
+ const choicesLength = hasChoicesArray ? response.choices.length : 0;
551
+ // Graceful fallback for provider payloads that do not contain a valid
552
+ // ChatCompletion-style choices array (e.g. certain compat error envelopes).
553
+ if (!hasChoicesArray || choicesLength === 0) {
554
+ const rawStatus = response.status;
555
+ const statusCode = typeof rawStatus === 'string' && rawStatus.trim().length
556
+ ? rawStatus.trim()
557
+ : typeof rawStatus === 'number'
558
+ ? String(rawStatus)
559
+ : undefined;
560
+ const message = typeof response.msg === 'string' && response.msg.trim().length
561
+ ? response.msg.trim()
562
+ : typeof response.message === 'string' && response.message.trim().length
563
+ ? response.message.trim()
564
+ : 'Upstream returned non-standard Chat completion payload (missing choices).';
565
+ const out = {
566
+ id: response.id || `resp-${Date.now()}`,
567
+ object: 'response',
568
+ created_at: response.created_at || response.created || Math.floor(Date.now() / 1000),
569
+ model: response.model,
570
+ status: 'failed',
571
+ output: []
572
+ };
573
+ if (message) {
574
+ out.output_text = message;
575
+ out.error = {
576
+ type: 'provider_error',
577
+ code: statusCode,
578
+ message
579
+ };
580
+ }
581
+ if (context) {
582
+ for (const k of ['metadata', 'parallel_tool_calls', 'tool_choice', 'include']) {
583
+ if (context[k] !== undefined)
584
+ out[k] = context[k];
585
+ }
586
+ if (!shouldStripHostManagedFields(context) && context.store !== undefined) {
587
+ out.store = context.store;
588
+ }
589
+ }
590
+ if (typeof response.request_id === 'string') {
591
+ out.request_id = response.request_id;
592
+ }
593
+ else if (typeof response.id === 'string') {
594
+ out.request_id = response.id;
595
+ }
596
+ else if (typeof context?.requestId === 'string') {
597
+ out.request_id = context.requestId;
598
+ }
599
+ if (out.metadata) {
600
+ stripInternalToolingMetadata(out.metadata);
601
+ }
602
+ return out;
541
603
  }
542
604
  const canonical = canonicalizeChatResponseTools(response);
543
605
  const choices = Array.isArray(canonical?.choices) ? canonical.choices : [];
544
606
  const primaryChoice = choices[0] && typeof choices[0] === 'object' ? choices[0] : undefined;
545
607
  const message = primaryChoice && typeof primaryChoice.message === 'object' ? primaryChoice.message : undefined;
546
608
  if (!message) {
547
- throw new Error('Responses bridge could not locate assistant message in Chat completion');
609
+ throw new ProviderProtocolError('Responses bridge could not locate assistant message in Chat completion', {
610
+ code: 'MALFORMED_RESPONSE',
611
+ protocol: 'openai-chat',
612
+ providerType: 'openai',
613
+ details: {
614
+ context: 'buildResponsesPayloadFromChat',
615
+ choicesLength: choices.length,
616
+ requestId: context?.requestId
617
+ }
618
+ });
548
619
  }
549
620
  if (message) {
550
621
  try {