@jsonstudio/rcc 0.89.552 → 0.89.611

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (70) hide show
  1. package/dist/build-info.js +2 -2
  2. package/dist/modules/llmswitch/bridge.d.ts +43 -0
  3. package/dist/modules/llmswitch/bridge.js +103 -0
  4. package/dist/modules/llmswitch/bridge.js.map +1 -1
  5. package/dist/monitoring/semantic-config-loader.js +3 -1
  6. package/dist/monitoring/semantic-config-loader.js.map +1 -1
  7. package/dist/providers/core/runtime/http-transport-provider.d.ts +3 -0
  8. package/dist/providers/core/runtime/http-transport-provider.js +70 -4
  9. package/dist/providers/core/runtime/http-transport-provider.js.map +1 -1
  10. package/dist/providers/core/runtime/responses-provider.d.ts +2 -2
  11. package/dist/providers/core/runtime/responses-provider.js +33 -28
  12. package/dist/providers/core/runtime/responses-provider.js.map +1 -1
  13. package/dist/providers/core/utils/provider-error-reporter.js +7 -7
  14. package/dist/providers/core/utils/provider-error-reporter.js.map +1 -1
  15. package/dist/providers/core/utils/snapshot-writer.js +6 -2
  16. package/dist/providers/core/utils/snapshot-writer.js.map +1 -1
  17. package/dist/server/runtime/http-server/index.js +59 -47
  18. package/dist/server/runtime/http-server/index.js.map +1 -1
  19. package/dist/server/runtime/http-server/llmswitch-loader.d.ts +0 -1
  20. package/dist/server/runtime/http-server/llmswitch-loader.js +17 -21
  21. package/dist/server/runtime/http-server/llmswitch-loader.js.map +1 -1
  22. package/dist/server/runtime/http-server/request-executor.d.ts +6 -0
  23. package/dist/server/runtime/http-server/request-executor.js +113 -37
  24. package/dist/server/runtime/http-server/request-executor.js.map +1 -1
  25. package/node_modules/@jsonstudio/llms/dist/conversion/codecs/gemini-openai-codec.js +15 -1
  26. package/node_modules/@jsonstudio/llms/dist/conversion/compat/actions/iflow-web-search.d.ts +18 -0
  27. package/node_modules/@jsonstudio/llms/dist/conversion/compat/actions/iflow-web-search.js +87 -0
  28. package/node_modules/@jsonstudio/llms/dist/conversion/compat/profiles/chat-gemini.json +14 -15
  29. package/node_modules/@jsonstudio/llms/dist/conversion/compat/profiles/chat-glm.json +194 -190
  30. package/node_modules/@jsonstudio/llms/dist/conversion/compat/profiles/chat-iflow.json +199 -195
  31. package/node_modules/@jsonstudio/llms/dist/conversion/compat/profiles/chat-lmstudio.json +43 -43
  32. package/node_modules/@jsonstudio/llms/dist/conversion/compat/profiles/chat-qwen.json +20 -20
  33. package/node_modules/@jsonstudio/llms/dist/conversion/compat/profiles/responses-c4m.json +42 -42
  34. package/node_modules/@jsonstudio/llms/dist/conversion/hub/pipeline/compat/compat-pipeline-executor.js +6 -0
  35. package/node_modules/@jsonstudio/llms/dist/conversion/hub/pipeline/compat/compat-types.d.ts +2 -0
  36. package/node_modules/@jsonstudio/llms/dist/conversion/hub/pipeline/hub-pipeline.js +5 -1
  37. package/node_modules/@jsonstudio/llms/dist/conversion/hub/pipeline/session-identifiers.d.ts +9 -0
  38. package/node_modules/@jsonstudio/llms/dist/conversion/hub/pipeline/session-identifiers.js +76 -0
  39. package/node_modules/@jsonstudio/llms/dist/conversion/hub/pipeline/stages/resp_inbound/resp_inbound_stage1_sse_decode/index.js +31 -2
  40. package/node_modules/@jsonstudio/llms/dist/conversion/hub/process/chat-process.js +89 -25
  41. package/node_modules/@jsonstudio/llms/dist/conversion/responses/responses-openai-bridge.js +75 -4
  42. package/node_modules/@jsonstudio/llms/dist/conversion/shared/anthropic-message-utils.js +41 -6
  43. package/node_modules/@jsonstudio/llms/dist/conversion/shared/errors.d.ts +20 -0
  44. package/node_modules/@jsonstudio/llms/dist/conversion/shared/errors.js +28 -0
  45. package/node_modules/@jsonstudio/llms/dist/conversion/shared/responses-conversation-store.js +30 -3
  46. package/node_modules/@jsonstudio/llms/dist/conversion/shared/responses-output-builder.js +68 -6
  47. package/node_modules/@jsonstudio/llms/dist/filters/special/request-toolcalls-stringify.d.ts +13 -0
  48. package/node_modules/@jsonstudio/llms/dist/filters/special/request-toolcalls-stringify.js +103 -3
  49. package/node_modules/@jsonstudio/llms/dist/filters/special/response-tool-text-canonicalize.d.ts +16 -0
  50. package/node_modules/@jsonstudio/llms/dist/filters/special/response-tool-text-canonicalize.js +27 -3
  51. package/node_modules/@jsonstudio/llms/dist/router/virtual-router/classifier.js +4 -2
  52. package/node_modules/@jsonstudio/llms/dist/router/virtual-router/engine.d.ts +30 -0
  53. package/node_modules/@jsonstudio/llms/dist/router/virtual-router/engine.js +618 -42
  54. package/node_modules/@jsonstudio/llms/dist/router/virtual-router/health-manager.d.ts +23 -0
  55. package/node_modules/@jsonstudio/llms/dist/router/virtual-router/health-manager.js +14 -0
  56. package/node_modules/@jsonstudio/llms/dist/router/virtual-router/provider-registry.d.ts +15 -0
  57. package/node_modules/@jsonstudio/llms/dist/router/virtual-router/provider-registry.js +40 -0
  58. package/node_modules/@jsonstudio/llms/dist/router/virtual-router/routing-instructions.d.ts +34 -0
  59. package/node_modules/@jsonstudio/llms/dist/router/virtual-router/routing-instructions.js +393 -0
  60. package/node_modules/@jsonstudio/llms/dist/router/virtual-router/sticky-session-store.d.ts +3 -0
  61. package/node_modules/@jsonstudio/llms/dist/router/virtual-router/sticky-session-store.js +110 -0
  62. package/node_modules/@jsonstudio/llms/dist/router/virtual-router/tool-signals.js +0 -22
  63. package/node_modules/@jsonstudio/llms/dist/router/virtual-router/types.d.ts +41 -0
  64. package/node_modules/@jsonstudio/llms/dist/servertool/engine.js +42 -1
  65. package/node_modules/@jsonstudio/llms/dist/servertool/handlers/web-search.js +157 -4
  66. package/node_modules/@jsonstudio/llms/dist/servertool/types.d.ts +6 -0
  67. package/node_modules/@jsonstudio/llms/package.json +1 -1
  68. package/package.json +8 -5
  69. package/scripts/mock-provider/run-regressions.mjs +38 -2
  70. package/scripts/verify-apply-patch.mjs +132 -0
@@ -12,6 +12,7 @@ import { applyQwenRequestTransform, applyQwenResponseTransform } from '../../../
12
12
  import { extractGlmToolMarkup } from '../../../compat/actions/glm-tool-extraction.js';
13
13
  import { applyGlmWebSearchRequestTransform } from '../../../compat/actions/glm-web-search.js';
14
14
  import { applyGeminiWebSearchCompat } from '../../../compat/actions/gemini-web-search.js';
15
+ import { applyIflowWebSearchRequestTransform } from '../../../compat/actions/iflow-web-search.js';
15
16
  import { applyGlmImageContentTransform } from '../../../compat/actions/glm-image-content.js';
16
17
  import { applyGlmVisionPromptTransform } from '../../../compat/actions/glm-vision-prompt.js';
17
18
  const RATE_LIMIT_ERROR = 'ERR_COMPAT_RATE_LIMIT_DETECTED';
@@ -171,6 +172,11 @@ function applyMapping(root, mapping, state) {
171
172
  replaceRoot(root, applyGeminiWebSearchCompat(root, state.adapterContext));
172
173
  }
173
174
  break;
175
+ case 'iflow_web_search_request':
176
+ if (state.direction === 'request') {
177
+ replaceRoot(root, applyIflowWebSearchRequestTransform(root, state.adapterContext));
178
+ }
179
+ break;
174
180
  case 'glm_image_content':
175
181
  if (state.direction === 'request') {
176
182
  replaceRoot(root, applyGlmImageContentTransform(root));
@@ -106,6 +106,8 @@ export type MappingInstruction = {
106
106
  action: 'glm_vision_prompt';
107
107
  } | {
108
108
  action: 'gemini_web_search_request';
109
+ } | {
110
+ action: 'iflow_web_search_request';
109
111
  };
110
112
  export type FilterInstruction = {
111
113
  action: 'rate_limit_text';
@@ -21,6 +21,7 @@ import { runReqProcessStage2RouteSelect } from './stages/req_process/req_process
21
21
  import { runReqOutboundStage1SemanticMap } from './stages/req_outbound/req_outbound_stage1_semantic_map/index.js';
22
22
  import { runReqOutboundStage2FormatBuild } from './stages/req_outbound/req_outbound_stage2_format_build/index.js';
23
23
  import { runReqOutboundStage3Compat } from './stages/req_outbound/req_outbound_stage3_compat/index.js';
24
+ import { extractSessionIdentifiersFromMetadata } from './session-identifiers.js';
24
25
  export class HubPipeline {
25
26
  routerEngine;
26
27
  config;
@@ -123,6 +124,7 @@ export class HubPipeline {
123
124
  const stdMetadata = workingRequest?.metadata;
124
125
  const serverToolRequired = stdMetadata?.webSearchEnabled === true ||
125
126
  stdMetadata?.serverToolRequired === true;
127
+ const sessionIdentifiers = extractSessionIdentifiersFromMetadata(normalized.metadata);
126
128
  const metadataInput = {
127
129
  requestId: normalized.id,
128
130
  entryEndpoint: normalized.entryEndpoint,
@@ -133,7 +135,9 @@ export class HubPipeline {
133
135
  routeHint: normalized.routeHint,
134
136
  stage: normalized.stage,
135
137
  responsesResume: responsesResume,
136
- ...(serverToolRequired ? { serverToolRequired: true } : {})
138
+ ...(serverToolRequired ? { serverToolRequired: true } : {}),
139
+ ...(sessionIdentifiers.sessionId ? { sessionId: sessionIdentifiers.sessionId } : {}),
140
+ ...(sessionIdentifiers.conversationId ? { conversationId: sessionIdentifiers.conversationId } : {})
137
141
  };
138
142
  const routing = runReqProcessStage2RouteSelect({
139
143
  routerEngine: this.routerEngine,
@@ -0,0 +1,9 @@
1
+ export interface SessionIdentifiers {
2
+ sessionId?: string;
3
+ conversationId?: string;
4
+ }
5
+ export declare function extractSessionIdentifiersFromMetadata(metadata: Record<string, unknown> | undefined): SessionIdentifiers;
6
+ export declare function coerceClientHeaders(raw: unknown): Record<string, string> | undefined;
7
+ export declare function pickHeader(headers: Record<string, string>, candidates: string[]): string | undefined;
8
+ export declare function findHeaderValue(headers: Record<string, string>, target: string): string | undefined;
9
+ export declare function normalizeHeaderKey(value: string): string;
@@ -0,0 +1,76 @@
1
+ export function extractSessionIdentifiersFromMetadata(metadata) {
2
+ const directSession = normalizeIdentifier(metadata?.sessionId);
3
+ const directConversation = normalizeIdentifier(metadata?.conversationId);
4
+ const headers = coerceClientHeaders(metadata?.clientHeaders);
5
+ const sessionId = directSession ||
6
+ (headers ? pickHeader(headers, ['session_id', 'session-id', 'x-session-id', 'anthropic-session-id']) : undefined);
7
+ const conversationId = directConversation ||
8
+ (headers
9
+ ? pickHeader(headers, [
10
+ 'conversation_id',
11
+ 'conversation-id',
12
+ 'x-conversation-id',
13
+ 'anthropic-conversation-id',
14
+ 'openai-conversation-id'
15
+ ])
16
+ : undefined);
17
+ return {
18
+ ...(sessionId ? { sessionId } : {}),
19
+ ...(conversationId ? { conversationId } : {})
20
+ };
21
+ }
22
+ export function coerceClientHeaders(raw) {
23
+ if (!raw || typeof raw !== 'object') {
24
+ return undefined;
25
+ }
26
+ const normalized = {};
27
+ for (const [key, value] of Object.entries(raw)) {
28
+ if (typeof value === 'string' && value.trim()) {
29
+ normalized[key] = value;
30
+ }
31
+ }
32
+ return Object.keys(normalized).length ? normalized : undefined;
33
+ }
34
+ export function pickHeader(headers, candidates) {
35
+ for (const name of candidates) {
36
+ const value = findHeaderValue(headers, name);
37
+ if (value) {
38
+ return value;
39
+ }
40
+ }
41
+ return undefined;
42
+ }
43
+ export function findHeaderValue(headers, target) {
44
+ const lowered = typeof target === 'string' ? target.toLowerCase() : '';
45
+ if (!lowered) {
46
+ return undefined;
47
+ }
48
+ const normalizedTarget = normalizeHeaderKey(lowered);
49
+ for (const [key, value] of Object.entries(headers)) {
50
+ if (typeof value !== 'string') {
51
+ continue;
52
+ }
53
+ const trimmed = value.trim();
54
+ if (!trimmed) {
55
+ continue;
56
+ }
57
+ const loweredKey = key.toLowerCase();
58
+ if (loweredKey === lowered) {
59
+ return trimmed;
60
+ }
61
+ if (normalizeHeaderKey(loweredKey) === normalizedTarget) {
62
+ return trimmed;
63
+ }
64
+ }
65
+ return undefined;
66
+ }
67
+ export function normalizeHeaderKey(value) {
68
+ return value.replace(/[\s_-]+/g, '');
69
+ }
70
+ function normalizeIdentifier(value) {
71
+ if (typeof value !== 'string') {
72
+ return undefined;
73
+ }
74
+ const trimmed = value.trim();
75
+ return trimmed || undefined;
76
+ }
@@ -1,5 +1,17 @@
1
1
  import { defaultSseCodecRegistry } from '../../../../../../sse/index.js';
2
2
  import { recordStage } from '../../../stages/utils.js';
3
+ import { ProviderProtocolError } from '../../../../../shared/errors.js';
4
+ function resolveProviderType(protocol) {
5
+ if (protocol === 'openai-chat')
6
+ return 'openai';
7
+ if (protocol === 'openai-responses')
8
+ return 'responses';
9
+ if (protocol === 'anthropic-messages')
10
+ return 'anthropic';
11
+ if (protocol === 'gemini-chat')
12
+ return 'gemini';
13
+ return undefined;
14
+ }
3
15
  export async function runRespInboundStage1SseDecode(options) {
4
16
  const stream = extractSseStream(options.payload);
5
17
  if (!stream) {
@@ -15,7 +27,15 @@ export async function runRespInboundStage1SseDecode(options) {
15
27
  reason: 'protocol_unsupported',
16
28
  protocol: options.providerProtocol
17
29
  });
18
- throw new Error(`[resp_inbound_stage1_sse_decode] Protocol ${options.providerProtocol} does not support SSE decoding`);
30
+ throw new ProviderProtocolError(`[resp_inbound_stage1_sse_decode] Protocol ${options.providerProtocol} does not support SSE decoding`, {
31
+ code: 'SSE_DECODE_ERROR',
32
+ protocol: options.providerProtocol,
33
+ providerType: resolveProviderType(options.providerProtocol),
34
+ details: {
35
+ phase: 'resp_inbound_stage1_sse_decode',
36
+ reason: 'protocol_unsupported'
37
+ }
38
+ });
19
39
  }
20
40
  try {
21
41
  const codec = defaultSseCodecRegistry.get(options.providerProtocol);
@@ -38,7 +58,16 @@ export async function runRespInboundStage1SseDecode(options) {
38
58
  protocol: options.providerProtocol,
39
59
  error: message
40
60
  });
41
- throw new Error(`[resp_inbound_stage1_sse_decode] Failed to decode SSE payload for protocol ${options.providerProtocol}: ${message}`);
61
+ throw new ProviderProtocolError(`[resp_inbound_stage1_sse_decode] Failed to decode SSE payload for protocol ${options.providerProtocol}: ${message}`, {
62
+ code: 'SSE_DECODE_ERROR',
63
+ protocol: options.providerProtocol,
64
+ providerType: resolveProviderType(options.providerProtocol),
65
+ details: {
66
+ phase: 'resp_inbound_stage1_sse_decode',
67
+ requestId: options.adapterContext.requestId,
68
+ message
69
+ }
70
+ });
42
71
  }
43
72
  }
44
73
  function supportsSseProtocol(protocol) {
@@ -326,9 +326,9 @@ function maybeInjectWebSearchTool(request, metadata) {
326
326
  const injectPolicy = rawConfig.injectPolicy === 'always' || rawConfig.injectPolicy === 'selective'
327
327
  ? rawConfig.injectPolicy
328
328
  : 'selective';
329
+ const intent = detectWebSearchIntent(request);
329
330
  if (injectPolicy === 'selective') {
330
- const hasExplicitIntent = detectWebSearchIntent(request);
331
- if (!hasExplicitIntent) {
331
+ if (!intent.hasIntent) {
332
332
  // 当最近一条用户消息没有明显的“联网搜索”关键词时,
333
333
  // 如果上一轮 assistant 的工具调用已经属于搜索类(如 web_search),
334
334
  // 则仍然视为 web_search 续写场景,强制注入 web_search 工具,
@@ -351,9 +351,35 @@ function maybeInjectWebSearchTool(request, metadata) {
351
351
  return typeof fn?.name === 'string' && fn.name.trim() === 'web_search';
352
352
  });
353
353
  if (hasWebSearch) {
354
- return request;
354
+ const nextMetadata = {
355
+ ...(request.metadata ?? {}),
356
+ webSearchEnabled: true
357
+ };
358
+ return {
359
+ ...request,
360
+ metadata: nextMetadata
361
+ };
362
+ }
363
+ let engines = rawConfig.engines.filter((engine) => typeof engine?.id === 'string' && !!engine.id.trim() && !engine.serverToolsDisabled);
364
+ // 当用户明确要求「谷歌搜索」时,只暴露 Gemini / Antigravity 类搜索后端:
365
+ // - providerKey 以 gemini-cli. 或 antigravity. 开头;
366
+ // - 或 engine id 中包含 "google"(向前兼容配置中用 id 标识 google 引擎的场景)。
367
+ if (intent.googlePreferred) {
368
+ const preferred = engines.filter((engine) => {
369
+ const id = engine.id.trim().toLowerCase();
370
+ const providerKey = (engine.providerKey || '').toLowerCase();
371
+ if (providerKey.startsWith('gemini-cli.') || providerKey.startsWith('antigravity.')) {
372
+ return true;
373
+ }
374
+ if (id.includes('google')) {
375
+ return true;
376
+ }
377
+ return false;
378
+ });
379
+ if (preferred.length > 0) {
380
+ engines = preferred;
381
+ }
355
382
  }
356
- const engines = rawConfig.engines.filter((engine) => typeof engine?.id === 'string' && !!engine.id.trim() && !engine.serverToolsDisabled);
357
383
  if (!engines.length) {
358
384
  return request;
359
385
  }
@@ -418,7 +444,7 @@ function maybeInjectWebSearchTool(request, metadata) {
418
444
  function detectWebSearchIntent(request) {
419
445
  const messages = Array.isArray(request.messages) ? request.messages : [];
420
446
  if (!messages.length) {
421
- return false;
447
+ return { hasIntent: false, googlePreferred: false };
422
448
  }
423
449
  // 从末尾向前找到最近一条 user 消息,忽略 tool / assistant 的工具调用轮次,
424
450
  // 以便在 Responses / 多轮工具调用场景下仍然根据“最近一条用户输入”判断意图。
@@ -431,7 +457,7 @@ function detectWebSearchIntent(request) {
431
457
  }
432
458
  }
433
459
  if (!lastUser) {
434
- return false;
460
+ return { hasIntent: false, googlePreferred: false };
435
461
  }
436
462
  // 支持多模态 content:既可能是纯文本字符串,也可能是带 image_url 的分段数组。
437
463
  let content = '';
@@ -455,34 +481,72 @@ function detectWebSearchIntent(request) {
455
481
  content = texts.join('\n');
456
482
  }
457
483
  if (!content) {
458
- return false;
484
+ return { hasIntent: false, googlePreferred: false };
485
+ }
486
+ // Hard 100% keywords (中文):明确说明“谷歌搜索 / 谷歌一下 / 百度一下”均视为搜索意图。
487
+ // 其中“谷歌搜索 / 谷歌一下”会偏向 Google/Gemini 搜索后端。
488
+ const zh = content;
489
+ const hasGoogleExplicit = zh.includes('谷歌搜索') ||
490
+ zh.includes('谷歌一下');
491
+ const hasBaiduExplicit = zh.includes('百度一下');
492
+ if (hasGoogleExplicit || hasBaiduExplicit) {
493
+ // 谷歌 / 百度关键字都会优先尝试走“谷歌搜索”引擎;
494
+ // 只有在 Virtual Router 未配置任何谷歌相关 engine 时,才回退为普通联网搜索。
495
+ return {
496
+ hasIntent: true,
497
+ googlePreferred: true
498
+ };
459
499
  }
500
+ // English intent: simple substring match on lowercased text.
460
501
  const text = content.toLowerCase();
461
- const keywords = [
462
- // English
502
+ // 1) Direct patterns like "web search" / "internet search" / "/search".
503
+ const englishDirect = [
463
504
  'web search',
464
505
  'web_search',
465
506
  'websearch',
466
507
  'internet search',
467
508
  'search the web',
468
- 'online search',
469
- 'search online',
470
- 'search on the internet',
471
- 'search the internet',
472
509
  'web-search',
473
- 'online-search',
474
510
  'internet-search',
475
- // Chinese
476
- '联网搜索',
477
- '网络搜索',
478
- '上网搜索',
479
- '网上搜索',
480
- '网上查',
481
- '网上查找',
482
- '上网查',
483
- '上网搜',
484
- // Command-style
485
511
  '/search'
486
512
  ];
487
- return keywords.some((keyword) => text.includes(keyword.toLowerCase()));
513
+ if (englishDirect.some((keyword) => text.includes(keyword))) {
514
+ return { hasIntent: true, googlePreferred: text.includes('google') };
515
+ }
516
+ // 2) Verb + noun combinations, similar to the Chinese rule:
517
+ // - verb: search / find / look up / look for / google
518
+ // - noun: web / internet / online / news / information / info / report / reports / article / articles
519
+ const verbTokensEn = ['search', 'find', 'look up', 'look for', 'google'];
520
+ const nounTokensEn = [
521
+ 'web',
522
+ 'internet',
523
+ 'online',
524
+ 'news',
525
+ 'information',
526
+ 'info',
527
+ 'report',
528
+ 'reports',
529
+ 'article',
530
+ 'articles'
531
+ ];
532
+ const hasVerbEn = verbTokensEn.some((token) => text.includes(token));
533
+ const hasNounEn = nounTokensEn.some((token) => text.includes(token));
534
+ if (hasVerbEn && hasNounEn) {
535
+ return { hasIntent: true, googlePreferred: text.includes('google') };
536
+ }
537
+ // 中文规则:
538
+ // 1. 只要文本中包含“上网”,直接命中(例如“帮我上网看看今天的新闻”)。
539
+ // 2. 否则,如果同时包含「搜索/查找/搜」中的任意一个动词 + 「网络/联网/新闻/信息/报道」中的任意一个名词,也判定为联网搜索意图。
540
+ const chineseText = content; // 中文大小写不敏感,这里直接用原文。
541
+ if (chineseText.includes('上网')) {
542
+ return { hasIntent: true, googlePreferred: false };
543
+ }
544
+ const verbTokens = ['搜索', '查找', '搜'];
545
+ const nounTokens = ['网络', '联网', '新闻', '信息', '报道'];
546
+ const hasVerb = verbTokens.some((token) => chineseText.includes(token));
547
+ const hasNoun = nounTokens.some((token) => chineseText.includes(token));
548
+ if (hasVerb && hasNoun) {
549
+ return { hasIntent: true, googlePreferred: false };
550
+ }
551
+ return { hasIntent: false, googlePreferred: false };
488
552
  }
@@ -3,6 +3,7 @@ import { evaluateResponsesHostPolicy } from './responses-host-policy.js';
3
3
  import { convertMessagesToBridgeInput, convertBridgeInputToChatMessages } from '../shared/bridge-message-utils.js';
4
4
  import { createToolCallIdTransformer, enforceToolCallIdStyle, resolveToolCallIdStyle, stripInternalToolingMetadata, sanitizeResponsesFunctionName } from '../shared/responses-tool-utils.js';
5
5
  import { mapBridgeToolsToChat, mapChatToolsToBridge } from '../shared/tool-mapping.js';
6
+ import { ProviderProtocolError } from '../shared/errors.js';
6
7
  // --- Utilities (ported strictly) ---
7
8
  import { canonicalizeChatResponseTools } from '../shared/tool-canonicalizer.js';
8
9
  import { normalizeMessageReasoningTools } from '../shared/reasoning-tool-normalizer.js';
@@ -95,7 +96,16 @@ export function buildChatRequestFromResponses(payload, context) {
95
96
  // 不在 Responses 路径做工具治理;统一在 Chat 后半段处理
96
97
  // No system tips for MCP on OpenAI Responses path (avoid leaking tool names)
97
98
  if (!messages.length) {
98
- throw new Error('Responses payload produced no chat messages');
99
+ throw new ProviderProtocolError('Responses payload produced no chat messages', {
100
+ code: 'MALFORMED_REQUEST',
101
+ protocol: 'openai-responses',
102
+ providerType: 'responses',
103
+ details: {
104
+ context: 'buildChatRequestFromResponses',
105
+ inputLength: Array.isArray(context.input) ? context.input.length : undefined,
106
+ requestId: context.requestId
107
+ }
108
+ });
99
109
  }
100
110
  // 如果只有 system 消息且无 user/assistant/tool,后续桥接 action 会从 instructions 注入兜底 user 消息
101
111
  const result = { model: payload.model, messages };
@@ -536,15 +546,76 @@ export function buildResponsesPayloadFromChat(payload, context) {
536
546
  if (response.object === 'response' && Array.isArray(response.output)) {
537
547
  return response;
538
548
  }
539
- if (!Array.isArray(response.choices) || !response.choices.length) {
540
- throw new Error('Responses bridge expects OpenAI Chat completion payload');
549
+ const hasChoicesArray = Array.isArray(response.choices);
550
+ const choicesLength = hasChoicesArray ? response.choices.length : 0;
551
+ // Graceful fallback for provider payloads that do not contain a valid
552
+ // ChatCompletion-style choices array (e.g. certain compat error envelopes).
553
+ if (!hasChoicesArray || choicesLength === 0) {
554
+ const rawStatus = response.status;
555
+ const statusCode = typeof rawStatus === 'string' && rawStatus.trim().length
556
+ ? rawStatus.trim()
557
+ : typeof rawStatus === 'number'
558
+ ? String(rawStatus)
559
+ : undefined;
560
+ const message = typeof response.msg === 'string' && response.msg.trim().length
561
+ ? response.msg.trim()
562
+ : typeof response.message === 'string' && response.message.trim().length
563
+ ? response.message.trim()
564
+ : 'Upstream returned non-standard Chat completion payload (missing choices).';
565
+ const out = {
566
+ id: response.id || `resp-${Date.now()}`,
567
+ object: 'response',
568
+ created_at: response.created_at || response.created || Math.floor(Date.now() / 1000),
569
+ model: response.model,
570
+ status: 'failed',
571
+ output: []
572
+ };
573
+ if (message) {
574
+ out.output_text = message;
575
+ out.error = {
576
+ type: 'provider_error',
577
+ code: statusCode,
578
+ message
579
+ };
580
+ }
581
+ if (context) {
582
+ for (const k of ['metadata', 'parallel_tool_calls', 'tool_choice', 'include']) {
583
+ if (context[k] !== undefined)
584
+ out[k] = context[k];
585
+ }
586
+ if (!shouldStripHostManagedFields(context) && context.store !== undefined) {
587
+ out.store = context.store;
588
+ }
589
+ }
590
+ if (typeof response.request_id === 'string') {
591
+ out.request_id = response.request_id;
592
+ }
593
+ else if (typeof response.id === 'string') {
594
+ out.request_id = response.id;
595
+ }
596
+ else if (typeof context?.requestId === 'string') {
597
+ out.request_id = context.requestId;
598
+ }
599
+ if (out.metadata) {
600
+ stripInternalToolingMetadata(out.metadata);
601
+ }
602
+ return out;
541
603
  }
542
604
  const canonical = canonicalizeChatResponseTools(response);
543
605
  const choices = Array.isArray(canonical?.choices) ? canonical.choices : [];
544
606
  const primaryChoice = choices[0] && typeof choices[0] === 'object' ? choices[0] : undefined;
545
607
  const message = primaryChoice && typeof primaryChoice.message === 'object' ? primaryChoice.message : undefined;
546
608
  if (!message) {
547
- throw new Error('Responses bridge could not locate assistant message in Chat completion');
609
+ throw new ProviderProtocolError('Responses bridge could not locate assistant message in Chat completion', {
610
+ code: 'MALFORMED_RESPONSE',
611
+ protocol: 'openai-chat',
612
+ providerType: 'openai',
613
+ details: {
614
+ context: 'buildResponsesPayloadFromChat',
615
+ choicesLength: choices.length,
616
+ requestId: context?.requestId
617
+ }
618
+ });
548
619
  }
549
620
  if (message) {
550
621
  try {
@@ -3,6 +3,7 @@ import { resolveBridgePolicy, resolvePolicyActions } from './bridge-policies.js'
3
3
  import { normalizeChatMessageContent } from './chat-output-normalizer.js';
4
4
  import { mapBridgeToolsToChat, mapChatToolsToBridge } from './tool-mapping.js';
5
5
  import { jsonClone } from '../hub/types/json.js';
6
+ import { ProviderProtocolError } from './errors.js';
6
7
  function isObject(v) {
7
8
  return !!v && typeof v === 'object' && !Array.isArray(v);
8
9
  }
@@ -111,20 +112,39 @@ function extractToolResultSegment(entry) {
111
112
  }
112
113
  return String(entry);
113
114
  }
115
+ function resolveProtocolErrorCode(context) {
116
+ const ctx = context.toLowerCase();
117
+ return ctx.includes('tool') ? 'TOOL_PROTOCOL_ERROR' : 'MALFORMED_REQUEST';
118
+ }
114
119
  function requireTrimmedString(value, context) {
115
120
  if (typeof value !== 'string') {
116
- throw new Error(`Anthropic bridge constraint violated: ${context} must be a string`);
121
+ throw new ProviderProtocolError(`Anthropic bridge constraint violated: ${context} must be a string`, {
122
+ code: resolveProtocolErrorCode(context),
123
+ protocol: 'anthropic-messages',
124
+ providerType: 'anthropic',
125
+ details: { context, actualType: typeof value }
126
+ });
117
127
  }
118
128
  const trimmed = value.trim();
119
129
  if (!trimmed.length) {
120
- throw new Error(`Anthropic bridge constraint violated: ${context} must not be empty`);
130
+ throw new ProviderProtocolError(`Anthropic bridge constraint violated: ${context} must not be empty`, {
131
+ code: resolveProtocolErrorCode(context),
132
+ protocol: 'anthropic-messages',
133
+ providerType: 'anthropic',
134
+ details: { context }
135
+ });
121
136
  }
122
137
  return trimmed;
123
138
  }
124
139
  function requireSystemText(block, context) {
125
140
  const text = flattenAnthropicText(block).trim();
126
141
  if (!text) {
127
- throw new Error(`Anthropic bridge constraint violated: ${context} must contain text`);
142
+ throw new ProviderProtocolError(`Anthropic bridge constraint violated: ${context} must contain text`, {
143
+ code: resolveProtocolErrorCode(context),
144
+ protocol: 'anthropic-messages',
145
+ providerType: 'anthropic',
146
+ details: { context }
147
+ });
128
148
  }
129
149
  return text;
130
150
  }
@@ -710,7 +730,12 @@ export function buildAnthropicRequestFromOpenAIChat(chatReq) {
710
730
  pushSystemBlock(requireSystemText(val, 'top-level system'));
711
731
  return;
712
732
  }
713
- throw new Error('Anthropic bridge constraint violated: unsupported system payload type');
733
+ throw new ProviderProtocolError('Anthropic bridge constraint violated: unsupported system payload type', {
734
+ code: 'MALFORMED_REQUEST',
735
+ protocol: 'anthropic-messages',
736
+ providerType: 'anthropic',
737
+ details: { context: 'top-level system', actualType: typeof val }
738
+ });
714
739
  };
715
740
  ingestSystem(sys);
716
741
  }
@@ -730,7 +755,12 @@ export function buildAnthropicRequestFromOpenAIChat(chatReq) {
730
755
  const text = collectText(contentNode).trim();
731
756
  if (role === 'system') {
732
757
  if (!text) {
733
- throw new Error('Anthropic bridge constraint violated: Chat system message must contain text');
758
+ throw new ProviderProtocolError('Anthropic bridge constraint violated: Chat system message must contain text', {
759
+ code: 'MALFORMED_REQUEST',
760
+ protocol: 'anthropic-messages',
761
+ providerType: 'anthropic',
762
+ details: { context: 'chat.system', original: contentNode }
763
+ });
734
764
  }
735
765
  pushSystemBlock(text);
736
766
  continue;
@@ -738,7 +768,12 @@ export function buildAnthropicRequestFromOpenAIChat(chatReq) {
738
768
  if (role === 'tool') {
739
769
  const toolCallId = requireTrimmedString(m.tool_call_id ?? m.call_id ?? m.tool_use_id ?? m.id, 'tool_result.tool_call_id');
740
770
  if (!knownToolCallIds.has(toolCallId)) {
741
- throw new Error(`Anthropic bridge constraint violated: tool result ${toolCallId} has no matching tool call`);
771
+ throw new ProviderProtocolError(`Anthropic bridge constraint violated: tool result ${toolCallId} has no matching tool call`, {
772
+ code: 'TOOL_PROTOCOL_ERROR',
773
+ protocol: 'anthropic-messages',
774
+ providerType: 'anthropic',
775
+ details: { toolCallId }
776
+ });
742
777
  }
743
778
  const block = {
744
779
  type: 'tool_result',
@@ -0,0 +1,20 @@
1
+ export type ProviderProtocolErrorCode = 'TOOL_PROTOCOL_ERROR' | 'SSE_DECODE_ERROR' | 'MALFORMED_RESPONSE' | 'MALFORMED_REQUEST';
2
+ export type ProviderErrorCategory = 'EXTERNAL_ERROR' | 'TOOL_ERROR' | 'INTERNAL_ERROR';
3
+ export interface ProviderProtocolErrorOptions {
4
+ code: ProviderProtocolErrorCode;
5
+ protocol?: string;
6
+ providerType?: string;
7
+ /**
8
+ * 粗粒度错误类别;若未显式指定,将基于 code 自动推导。
9
+ */
10
+ category?: ProviderErrorCategory;
11
+ details?: Record<string, unknown>;
12
+ }
13
+ export declare class ProviderProtocolError extends Error {
14
+ readonly code: ProviderProtocolErrorCode;
15
+ readonly protocol?: string;
16
+ readonly providerType?: string;
17
+ readonly category: ProviderErrorCategory;
18
+ readonly details?: Record<string, unknown>;
19
+ constructor(message: string, options: ProviderProtocolErrorOptions);
20
+ }
@@ -0,0 +1,28 @@
1
+ function inferCategoryFromCode(code) {
2
+ switch (code) {
3
+ case 'TOOL_PROTOCOL_ERROR':
4
+ return 'TOOL_ERROR';
5
+ case 'SSE_DECODE_ERROR':
6
+ case 'MALFORMED_RESPONSE':
7
+ case 'MALFORMED_REQUEST':
8
+ default:
9
+ // 默认视为外部协议/载荷问题;真正的内部错误由上层使用 INTERNAL_ERROR 显式标记。
10
+ return 'EXTERNAL_ERROR';
11
+ }
12
+ }
13
+ export class ProviderProtocolError extends Error {
14
+ code;
15
+ protocol;
16
+ providerType;
17
+ category;
18
+ details;
19
+ constructor(message, options) {
20
+ super(message);
21
+ this.name = 'ProviderProtocolError';
22
+ this.code = options.code;
23
+ this.protocol = options.protocol;
24
+ this.providerType = options.providerType;
25
+ this.category = options.category ?? inferCategoryFromCode(options.code);
26
+ this.details = options.details;
27
+ }
28
+ }
@@ -1,3 +1,4 @@
1
+ import { ProviderProtocolError } from './errors.js';
1
2
  const TTL_MS = 1000 * 60 * 30; // 30min
2
3
  function cloneDeep(value) {
3
4
  try {
@@ -205,16 +206,42 @@ class ResponsesConversationStore {
205
206
  }
206
207
  resumeConversation(responseId, submitPayload, options) {
207
208
  if (typeof responseId !== 'string' || !responseId.trim()) {
208
- throw new Error('Responses conversation requires valid response_id');
209
+ throw new ProviderProtocolError('Responses conversation requires valid response_id', {
210
+ code: 'MALFORMED_REQUEST',
211
+ protocol: 'openai-responses',
212
+ providerType: 'responses',
213
+ details: {
214
+ context: 'responses-conversation-store.resumeConversation',
215
+ reason: 'missing_or_empty_response_id'
216
+ }
217
+ });
209
218
  }
210
219
  this.prune();
211
220
  const entry = this.responseIndex.get(responseId);
212
221
  if (!entry) {
213
- throw new Error('Responses conversation expired or not found');
222
+ throw new ProviderProtocolError('Responses conversation expired or not found', {
223
+ code: 'MALFORMED_REQUEST',
224
+ protocol: 'openai-responses',
225
+ providerType: 'responses',
226
+ details: {
227
+ context: 'responses-conversation-store.resumeConversation',
228
+ reason: 'expired_or_unknown_response_id',
229
+ responseId
230
+ }
231
+ });
214
232
  }
215
233
  const toolOutputs = Array.isArray(submitPayload.tool_outputs) ? submitPayload.tool_outputs : [];
216
234
  if (!toolOutputs.length) {
217
- throw new Error('tool_outputs array is required when submitting Responses tool results');
235
+ throw new ProviderProtocolError('tool_outputs array is required when submitting Responses tool results', {
236
+ code: 'MALFORMED_REQUEST',
237
+ protocol: 'openai-responses',
238
+ providerType: 'responses',
239
+ details: {
240
+ context: 'responses-conversation-store.resumeConversation',
241
+ reason: 'missing_tool_outputs',
242
+ responseId
243
+ }
244
+ });
218
245
  }
219
246
  const mergedInput = coerceInputArray(entry.input);
220
247
  const normalizedOutputs = normalizeSubmittedToolOutputs(toolOutputs);