@jsonstudio/llms 0.6.230 → 0.6.467

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (81) hide show
  1. package/README.md +2 -0
  2. package/dist/conversion/codecs/gemini-openai-codec.js +24 -2
  3. package/dist/conversion/compat/actions/gemini-web-search.d.ts +17 -0
  4. package/dist/conversion/compat/actions/gemini-web-search.js +68 -0
  5. package/dist/conversion/compat/actions/glm-image-content.d.ts +2 -0
  6. package/dist/conversion/compat/actions/glm-image-content.js +83 -0
  7. package/dist/conversion/compat/actions/glm-vision-prompt.d.ts +11 -0
  8. package/dist/conversion/compat/actions/glm-vision-prompt.js +177 -0
  9. package/dist/conversion/compat/actions/glm-web-search.js +25 -28
  10. package/dist/conversion/compat/actions/iflow-web-search.d.ts +18 -0
  11. package/dist/conversion/compat/actions/iflow-web-search.js +87 -0
  12. package/dist/conversion/compat/actions/universal-shape-filter.js +11 -0
  13. package/dist/conversion/compat/profiles/chat-gemini.json +17 -0
  14. package/dist/conversion/compat/profiles/chat-glm.json +194 -184
  15. package/dist/conversion/compat/profiles/chat-iflow.json +199 -195
  16. package/dist/conversion/compat/profiles/chat-lmstudio.json +43 -43
  17. package/dist/conversion/compat/profiles/chat-qwen.json +20 -20
  18. package/dist/conversion/compat/profiles/responses-c4m.json +42 -42
  19. package/dist/conversion/config/sample-config.json +1 -1
  20. package/dist/conversion/hub/pipeline/compat/compat-pipeline-executor.js +24 -0
  21. package/dist/conversion/hub/pipeline/compat/compat-types.d.ts +8 -0
  22. package/dist/conversion/hub/pipeline/hub-pipeline.js +32 -1
  23. package/dist/conversion/hub/pipeline/session-identifiers.d.ts +9 -0
  24. package/dist/conversion/hub/pipeline/session-identifiers.js +76 -0
  25. package/dist/conversion/hub/pipeline/stages/resp_inbound/resp_inbound_stage1_sse_decode/index.js +31 -2
  26. package/dist/conversion/hub/pipeline/target-utils.js +6 -0
  27. package/dist/conversion/hub/process/chat-process.js +186 -40
  28. package/dist/conversion/hub/response/provider-response.d.ts +13 -1
  29. package/dist/conversion/hub/response/provider-response.js +84 -35
  30. package/dist/conversion/hub/response/server-side-tools.js +61 -4
  31. package/dist/conversion/hub/semantic-mappers/gemini-mapper.js +123 -3
  32. package/dist/conversion/hub/semantic-mappers/responses-mapper.js +17 -1
  33. package/dist/conversion/hub/standardized-bridge.js +14 -0
  34. package/dist/conversion/responses/responses-openai-bridge.js +110 -6
  35. package/dist/conversion/shared/anthropic-message-utils.js +133 -9
  36. package/dist/conversion/shared/bridge-message-utils.js +137 -10
  37. package/dist/conversion/shared/errors.d.ts +20 -0
  38. package/dist/conversion/shared/errors.js +28 -0
  39. package/dist/conversion/shared/responses-conversation-store.js +30 -3
  40. package/dist/conversion/shared/responses-output-builder.js +111 -8
  41. package/dist/conversion/shared/tool-filter-pipeline.js +1 -0
  42. package/dist/filters/special/request-toolcalls-stringify.d.ts +13 -0
  43. package/dist/filters/special/request-toolcalls-stringify.js +103 -3
  44. package/dist/filters/special/response-tool-text-canonicalize.d.ts +16 -0
  45. package/dist/filters/special/response-tool-text-canonicalize.js +27 -3
  46. package/dist/router/virtual-router/bootstrap.js +44 -12
  47. package/dist/router/virtual-router/classifier.js +13 -17
  48. package/dist/router/virtual-router/engine.d.ts +39 -0
  49. package/dist/router/virtual-router/engine.js +755 -55
  50. package/dist/router/virtual-router/features.js +1 -1
  51. package/dist/router/virtual-router/message-utils.js +36 -24
  52. package/dist/router/virtual-router/provider-registry.d.ts +15 -0
  53. package/dist/router/virtual-router/provider-registry.js +42 -1
  54. package/dist/router/virtual-router/routing-instructions.d.ts +34 -0
  55. package/dist/router/virtual-router/routing-instructions.js +383 -0
  56. package/dist/router/virtual-router/sticky-session-store.d.ts +3 -0
  57. package/dist/router/virtual-router/sticky-session-store.js +110 -0
  58. package/dist/router/virtual-router/token-counter.js +14 -3
  59. package/dist/router/virtual-router/tool-signals.js +0 -22
  60. package/dist/router/virtual-router/types.d.ts +80 -0
  61. package/dist/router/virtual-router/types.js +2 -1
  62. package/dist/servertool/engine.d.ts +27 -0
  63. package/dist/servertool/engine.js +101 -0
  64. package/dist/servertool/flow-types.d.ts +40 -0
  65. package/dist/servertool/flow-types.js +1 -0
  66. package/dist/servertool/handlers/vision.d.ts +1 -0
  67. package/dist/servertool/handlers/vision.js +194 -0
  68. package/dist/servertool/handlers/web-search.d.ts +1 -0
  69. package/dist/servertool/handlers/web-search.js +791 -0
  70. package/dist/servertool/orchestration-types.d.ts +33 -0
  71. package/dist/servertool/orchestration-types.js +1 -0
  72. package/dist/servertool/registry.d.ts +18 -0
  73. package/dist/servertool/registry.js +27 -0
  74. package/dist/servertool/server-side-tools.d.ts +8 -0
  75. package/dist/servertool/server-side-tools.js +208 -0
  76. package/dist/servertool/types.d.ts +94 -0
  77. package/dist/servertool/types.js +1 -0
  78. package/dist/servertool/vision-tool.d.ts +2 -0
  79. package/dist/servertool/vision-tool.js +185 -0
  80. package/dist/sse/sse-to-json/builders/response-builder.js +6 -3
  81. package/package.json +1 -1
@@ -3,6 +3,7 @@ import { resolveBridgePolicy, resolvePolicyActions } from './bridge-policies.js'
3
3
  import { normalizeChatMessageContent } from './chat-output-normalizer.js';
4
4
  import { mapBridgeToolsToChat, mapChatToolsToBridge } from './tool-mapping.js';
5
5
  import { jsonClone } from '../hub/types/json.js';
6
+ import { ProviderProtocolError } from './errors.js';
6
7
  function isObject(v) {
7
8
  return !!v && typeof v === 'object' && !Array.isArray(v);
8
9
  }
@@ -111,20 +112,39 @@ function extractToolResultSegment(entry) {
111
112
  }
112
113
  return String(entry);
113
114
  }
115
+ function resolveProtocolErrorCode(context) {
116
+ const ctx = context.toLowerCase();
117
+ return ctx.includes('tool') ? 'TOOL_PROTOCOL_ERROR' : 'MALFORMED_REQUEST';
118
+ }
114
119
  function requireTrimmedString(value, context) {
115
120
  if (typeof value !== 'string') {
116
- throw new Error(`Anthropic bridge constraint violated: ${context} must be a string`);
121
+ throw new ProviderProtocolError(`Anthropic bridge constraint violated: ${context} must be a string`, {
122
+ code: resolveProtocolErrorCode(context),
123
+ protocol: 'anthropic-messages',
124
+ providerType: 'anthropic',
125
+ details: { context, actualType: typeof value }
126
+ });
117
127
  }
118
128
  const trimmed = value.trim();
119
129
  if (!trimmed.length) {
120
- throw new Error(`Anthropic bridge constraint violated: ${context} must not be empty`);
130
+ throw new ProviderProtocolError(`Anthropic bridge constraint violated: ${context} must not be empty`, {
131
+ code: resolveProtocolErrorCode(context),
132
+ protocol: 'anthropic-messages',
133
+ providerType: 'anthropic',
134
+ details: { context }
135
+ });
121
136
  }
122
137
  return trimmed;
123
138
  }
124
139
  function requireSystemText(block, context) {
125
140
  const text = flattenAnthropicText(block).trim();
126
141
  if (!text) {
127
- throw new Error(`Anthropic bridge constraint violated: ${context} must contain text`);
142
+ throw new ProviderProtocolError(`Anthropic bridge constraint violated: ${context} must contain text`, {
143
+ code: resolveProtocolErrorCode(context),
144
+ protocol: 'anthropic-messages',
145
+ providerType: 'anthropic',
146
+ details: { context }
147
+ });
128
148
  }
129
149
  return text;
130
150
  }
@@ -266,6 +286,7 @@ export function buildOpenAIChatFromAnthropic(payload) {
266
286
  continue;
267
287
  }
268
288
  const textParts = [];
289
+ const imageBlocks = [];
269
290
  const toolCalls = [];
270
291
  const reasoningParts = [];
271
292
  const toolResults = [];
@@ -284,6 +305,29 @@ export function buildOpenAIChatFromAnthropic(payload) {
284
305
  reasoningParts.push(thinkingText);
285
306
  }
286
307
  }
308
+ else if (t === 'image') {
309
+ const source = block.source;
310
+ if (source && typeof source === 'object') {
311
+ const s = source;
312
+ const srcType = typeof s.type === 'string' ? s.type.toLowerCase() : '';
313
+ let url;
314
+ if (srcType === 'url' && typeof s.url === 'string') {
315
+ url = s.url;
316
+ }
317
+ else if (srcType === 'base64' && typeof s.data === 'string') {
318
+ const mediaType = typeof s.media_type === 'string' && s.media_type.trim().length
319
+ ? s.media_type.trim()
320
+ : 'image/png';
321
+ url = `data:${mediaType};base64,${s.data}`;
322
+ }
323
+ if (url && url.trim().length) {
324
+ imageBlocks.push({
325
+ type: 'image_url',
326
+ image_url: { url: url.trim() }
327
+ });
328
+ }
329
+ }
330
+ }
287
331
  else if (t === 'tool_use') {
288
332
  const name = requireTrimmedString(block.name, 'tool_use.name');
289
333
  const id = requireTrimmedString(block.id, 'tool_use.id');
@@ -310,10 +354,22 @@ export function buildOpenAIChatFromAnthropic(payload) {
310
354
  }
311
355
  const hasText = typeof normalized.contentText === 'string' && normalized.contentText.length > 0;
312
356
  const hasReasoning = mergedReasoning.length > 0;
313
- if (hasText || hasRawText || toolCalls.length > 0 || hasReasoning) {
357
+ if (hasText || hasRawText || toolCalls.length > 0 || hasReasoning || imageBlocks.length > 0) {
358
+ let contentNode = (hasText ? normalized.contentText : undefined) ?? combinedText ?? '';
359
+ if (imageBlocks.length > 0) {
360
+ const blocks = [];
361
+ const textPayload = (hasText ? normalized.contentText : undefined) ?? combinedText ?? '';
362
+ if (typeof textPayload === 'string' && textPayload.trim().length) {
363
+ blocks.push({ type: 'text', text: textPayload.trim() });
364
+ }
365
+ for (const img of imageBlocks) {
366
+ blocks.push(jsonClone(img));
367
+ }
368
+ contentNode = blocks;
369
+ }
314
370
  const msg = {
315
371
  role,
316
- content: (hasText ? normalized.contentText : undefined) ?? combinedText ?? ''
372
+ content: contentNode
317
373
  };
318
374
  if (toolCalls.length)
319
375
  msg.tool_calls = toolCalls;
@@ -674,7 +730,12 @@ export function buildAnthropicRequestFromOpenAIChat(chatReq) {
674
730
  pushSystemBlock(requireSystemText(val, 'top-level system'));
675
731
  return;
676
732
  }
677
- throw new Error('Anthropic bridge constraint violated: unsupported system payload type');
733
+ throw new ProviderProtocolError('Anthropic bridge constraint violated: unsupported system payload type', {
734
+ code: 'MALFORMED_REQUEST',
735
+ protocol: 'anthropic-messages',
736
+ providerType: 'anthropic',
737
+ details: { context: 'top-level system', actualType: typeof val }
738
+ });
678
739
  };
679
740
  ingestSystem(sys);
680
741
  }
@@ -690,10 +751,16 @@ export function buildAnthropicRequestFromOpenAIChat(chatReq) {
690
751
  targetShape = mirrorShapes[mirrorIndex];
691
752
  mirrorIndex += 1;
692
753
  }
693
- const text = collectText(m.content).trim();
754
+ const contentNode = m.content;
755
+ const text = collectText(contentNode).trim();
694
756
  if (role === 'system') {
695
757
  if (!text) {
696
- throw new Error('Anthropic bridge constraint violated: Chat system message must contain text');
758
+ throw new ProviderProtocolError('Anthropic bridge constraint violated: Chat system message must contain text', {
759
+ code: 'MALFORMED_REQUEST',
760
+ protocol: 'anthropic-messages',
761
+ providerType: 'anthropic',
762
+ details: { context: 'chat.system', original: contentNode }
763
+ });
697
764
  }
698
765
  pushSystemBlock(text);
699
766
  continue;
@@ -701,7 +768,12 @@ export function buildAnthropicRequestFromOpenAIChat(chatReq) {
701
768
  if (role === 'tool') {
702
769
  const toolCallId = requireTrimmedString(m.tool_call_id ?? m.call_id ?? m.tool_use_id ?? m.id, 'tool_result.tool_call_id');
703
770
  if (!knownToolCallIds.has(toolCallId)) {
704
- throw new Error(`Anthropic bridge constraint violated: tool result ${toolCallId} has no matching tool call`);
771
+ throw new ProviderProtocolError(`Anthropic bridge constraint violated: tool result ${toolCallId} has no matching tool call`, {
772
+ code: 'TOOL_PROTOCOL_ERROR',
773
+ protocol: 'anthropic-messages',
774
+ providerType: 'anthropic',
775
+ details: { toolCallId }
776
+ });
705
777
  }
706
778
  const block = {
707
779
  type: 'tool_result',
@@ -715,6 +787,58 @@ export function buildAnthropicRequestFromOpenAIChat(chatReq) {
715
787
  continue;
716
788
  }
717
789
  const blocks = [];
790
+ if (Array.isArray(contentNode)) {
791
+ // Preserve or synthesize image blocks where possible, and fall back to text for the rest.
792
+ for (const entry of contentNode) {
793
+ if (!entry || typeof entry !== 'object')
794
+ continue;
795
+ const node = entry;
796
+ const t = typeof node.type === 'string' ? node.type.toLowerCase() : '';
797
+ if (t === 'image' && node.source && typeof node.source === 'object') {
798
+ // Pass-through Anthropic image block as-is.
799
+ blocks.push({
800
+ type: 'image',
801
+ source: jsonClone(node.source)
802
+ });
803
+ continue;
804
+ }
805
+ if (t === 'image_url') {
806
+ let url = '';
807
+ const imageUrl = node.image_url;
808
+ if (typeof imageUrl === 'string') {
809
+ url = imageUrl;
810
+ }
811
+ else if (imageUrl && typeof imageUrl === 'object' && typeof imageUrl.url === 'string') {
812
+ url = imageUrl.url;
813
+ }
814
+ const trimmed = url.trim();
815
+ if (!trimmed.length)
816
+ continue;
817
+ const source = {};
818
+ if (trimmed.startsWith('data:')) {
819
+ const match = /^data:([^;,]+)?(?:;base64)?,(.*)$/s.exec(trimmed);
820
+ if (match) {
821
+ const mediaType = (match[1] || '').trim() || 'image/png';
822
+ source.type = 'base64';
823
+ source.media_type = mediaType;
824
+ source.data = match[2] || '';
825
+ }
826
+ else {
827
+ source.type = 'url';
828
+ source.url = trimmed;
829
+ }
830
+ }
831
+ else {
832
+ source.type = 'url';
833
+ source.url = trimmed;
834
+ }
835
+ blocks.push({
836
+ type: 'image',
837
+ source
838
+ });
839
+ }
840
+ }
841
+ }
718
842
  if (text) {
719
843
  blocks.push({ type: 'text', text });
720
844
  }
@@ -66,6 +66,59 @@ function collectText(value) {
66
66
  }
67
67
  return '';
68
68
  }
69
+ function extractImageBlocksFromContent(content) {
70
+ const images = [];
71
+ const visit = (value) => {
72
+ if (!value)
73
+ return;
74
+ if (Array.isArray(value)) {
75
+ for (const entry of value)
76
+ visit(entry);
77
+ return;
78
+ }
79
+ if (typeof value !== 'object') {
80
+ return;
81
+ }
82
+ const record = value;
83
+ const typeValue = typeof record.type === 'string' ? record.type.toLowerCase() : '';
84
+ if (typeValue === 'image' || typeValue === 'image_url' || typeValue === 'input_image') {
85
+ let url = '';
86
+ const imageUrl = record.image_url;
87
+ if (typeof imageUrl === 'string') {
88
+ url = imageUrl;
89
+ }
90
+ else if (imageUrl && typeof imageUrl === 'object' && typeof imageUrl.url === 'string') {
91
+ url = imageUrl.url;
92
+ }
93
+ else if (typeof record.url === 'string') {
94
+ url = record.url;
95
+ }
96
+ else if (typeof record.uri === 'string') {
97
+ url = record.uri;
98
+ }
99
+ else if (typeof record.data === 'string') {
100
+ url = record.data;
101
+ }
102
+ const trimmed = url.trim();
103
+ if (trimmed.length) {
104
+ let detail;
105
+ if (imageUrl && typeof imageUrl === 'object' && typeof imageUrl.detail === 'string') {
106
+ detail = imageUrl.detail.trim() || undefined;
107
+ }
108
+ else if (typeof record.detail === 'string') {
109
+ detail = record.detail.trim() || undefined;
110
+ }
111
+ images.push({ url: trimmed, detail });
112
+ }
113
+ return;
114
+ }
115
+ if (Array.isArray(record.content)) {
116
+ visit(record.content);
117
+ }
118
+ };
119
+ visit(content);
120
+ return images;
121
+ }
69
122
  function extractUserTextFromEntry(entry) {
70
123
  if (!entry || typeof entry !== 'object')
71
124
  return '';
@@ -94,6 +147,7 @@ export function convertMessagesToBridgeInput(options) {
94
147
  const role = coerceBridgeRole(m.role || 'user');
95
148
  const content = m.content;
96
149
  const collectedText = collectText(content);
150
+ const imageBlocks = extractImageBlocksFromContent(content);
97
151
  const text = role === 'system' ? collectedText : collectedText.trim();
98
152
  if (role === 'system') {
99
153
  if (collectedText && collectedText.length) {
@@ -170,13 +224,29 @@ export function convertMessagesToBridgeInput(options) {
170
224
  }
171
225
  continue;
172
226
  }
173
- if (typeof text === 'string') {
227
+ if (typeof text === 'string' || imageBlocks.length) {
174
228
  const tRole = role === 'assistant' ? 'output_text' : 'input_text';
175
- const entry = {
176
- role,
177
- content: [{ type: tRole, text }]
178
- };
179
- input.push(entry);
229
+ const blocks = [];
230
+ if (typeof text === 'string' && text.length) {
231
+ blocks.push({ type: tRole, text });
232
+ }
233
+ for (const img of imageBlocks) {
234
+ const block = {
235
+ type: 'input_image',
236
+ image_url: img.url
237
+ };
238
+ if (img.detail) {
239
+ block.detail = img.detail;
240
+ }
241
+ blocks.push(block);
242
+ }
243
+ if (blocks.length) {
244
+ const entry = {
245
+ role,
246
+ content: blocks
247
+ };
248
+ input.push(entry);
249
+ }
180
250
  if (role === 'user') {
181
251
  const trimmed = typeof text === 'string' ? text.trim() : '';
182
252
  if (trimmed.length) {
@@ -260,6 +330,7 @@ function processMessageBlocks(blocks, normalizeFunctionName, tools, toolNameById
260
330
  const toolMessages = [];
261
331
  let currentLastCall = lastToolCallId;
262
332
  const reasoningSegments = [];
333
+ const images = [];
263
334
  for (const block of blocks) {
264
335
  if (!block || typeof block !== 'object')
265
336
  continue;
@@ -282,6 +353,18 @@ function processMessageBlocks(blocks, normalizeFunctionName, tools, toolNameById
282
353
  toolMessages.push(tm);
283
354
  currentLastCall = nested.lastCallId;
284
355
  reasoningSegments.push(...nested.reasoningSegments);
356
+ if (nested.images.length)
357
+ images.push(...nested.images);
358
+ continue;
359
+ }
360
+ if (type === 'input_image') {
361
+ const url = typeof block.image_url === 'string' ? block.image_url.trim() : '';
362
+ if (url) {
363
+ const detail = typeof block.detail === 'string' && block.detail.trim()
364
+ ? block.detail.trim()
365
+ : undefined;
366
+ images.push({ url, detail });
367
+ }
285
368
  continue;
286
369
  }
287
370
  if (type === 'function_call') {
@@ -344,7 +427,7 @@ function processMessageBlocks(blocks, normalizeFunctionName, tools, toolNameById
344
427
  }
345
428
  }
346
429
  const text = textParts.length ? textParts.join('\n').trim() : null;
347
- return { text, toolCalls, toolMessages, lastCallId: currentLastCall, reasoningSegments };
430
+ return { text, images, toolCalls, toolMessages, lastCallId: currentLastCall, reasoningSegments };
348
431
  }
349
432
  export function convertBridgeInputToChatMessages(options) {
350
433
  const { input, tools, normalizeFunctionName, toolResultFallbackText } = options;
@@ -470,7 +553,29 @@ export function convertBridgeInputToChatMessages(options) {
470
553
  for (const msg of nested.toolMessages)
471
554
  messages.push(msg);
472
555
  const normalizedRole = coerceBridgeRole((explicit.role ?? entry.role) || 'user');
473
- if (typeof nested.text === 'string') {
556
+ if (nested.images.length) {
557
+ const contentBlocks = [];
558
+ if (typeof nested.text === 'string' && nested.text.trim().length) {
559
+ contentBlocks.push({ type: 'text', text: nested.text });
560
+ }
561
+ for (const img of nested.images) {
562
+ const imgBlock = { type: 'image_url', image_url: { url: img.url } };
563
+ if (img.detail) {
564
+ imgBlock.image_url.detail = img.detail;
565
+ }
566
+ contentBlocks.push(imgBlock);
567
+ }
568
+ const msg = {
569
+ role: normalizedRole,
570
+ content: contentBlocks
571
+ };
572
+ const combinedReasoning = combineReasoningSegments(consumeEntryReasoning(), nested.reasoningSegments);
573
+ if (combinedReasoning.length) {
574
+ msg.reasoning_content = combinedReasoning.join('\n');
575
+ }
576
+ messages.push(msg);
577
+ }
578
+ else if (typeof nested.text === 'string') {
474
579
  pushNormalizedChatMessage(messages, normalizedRole, nested.text, {
475
580
  reasoningSegments: combineReasoningSegments(consumeEntryReasoning(), nested.reasoningSegments)
476
581
  });
@@ -491,9 +596,31 @@ export function convertBridgeInputToChatMessages(options) {
491
596
  for (const msg of nested.toolMessages)
492
597
  messages.push(msg);
493
598
  const normalizedRole = coerceBridgeRole(entry.role || 'user');
494
- if (typeof nested.text === 'string') {
599
+ if (nested.images.length) {
600
+ const contentBlocks = [];
601
+ if (typeof nested.text === 'string' && nested.text.trim().length) {
602
+ contentBlocks.push({ type: 'text', text: nested.text });
603
+ }
604
+ for (const img of nested.images) {
605
+ const imgBlock = { type: 'image_url', image_url: { url: img.url } };
606
+ if (img.detail) {
607
+ imgBlock.image_url.detail = img.detail;
608
+ }
609
+ contentBlocks.push(imgBlock);
610
+ }
611
+ const msg = {
612
+ role: normalizedRole,
613
+ content: contentBlocks
614
+ };
615
+ const combinedReasoning = combineReasoningSegments(consumeEntryReasoning(), nested.reasoningSegments);
616
+ if (combinedReasoning.length) {
617
+ msg.reasoning_content = combinedReasoning.join('\n');
618
+ }
619
+ messages.push(msg);
620
+ }
621
+ else if (typeof nested.text === 'string') {
495
622
  pushNormalizedChatMessage(messages, normalizedRole, nested.text, {
496
- reasoningSegments: combineReasoningSegments(consumeEntryReasoning(), nested.reasoningSegments)
623
+ reasoningSegments: consumeEntryReasoning()
497
624
  });
498
625
  }
499
626
  lastToolCallId = nested.lastCallId;
@@ -0,0 +1,20 @@
1
+ export type ProviderProtocolErrorCode = 'TOOL_PROTOCOL_ERROR' | 'SSE_DECODE_ERROR' | 'MALFORMED_RESPONSE' | 'MALFORMED_REQUEST';
2
+ export type ProviderErrorCategory = 'EXTERNAL_ERROR' | 'TOOL_ERROR' | 'INTERNAL_ERROR';
3
+ export interface ProviderProtocolErrorOptions {
4
+ code: ProviderProtocolErrorCode;
5
+ protocol?: string;
6
+ providerType?: string;
7
+ /**
8
+ * 粗粒度错误类别;若未显式指定,将基于 code 自动推导。
9
+ */
10
+ category?: ProviderErrorCategory;
11
+ details?: Record<string, unknown>;
12
+ }
13
+ export declare class ProviderProtocolError extends Error {
14
+ readonly code: ProviderProtocolErrorCode;
15
+ readonly protocol?: string;
16
+ readonly providerType?: string;
17
+ readonly category: ProviderErrorCategory;
18
+ readonly details?: Record<string, unknown>;
19
+ constructor(message: string, options: ProviderProtocolErrorOptions);
20
+ }
@@ -0,0 +1,28 @@
1
+ function inferCategoryFromCode(code) {
2
+ switch (code) {
3
+ case 'TOOL_PROTOCOL_ERROR':
4
+ return 'TOOL_ERROR';
5
+ case 'SSE_DECODE_ERROR':
6
+ case 'MALFORMED_RESPONSE':
7
+ case 'MALFORMED_REQUEST':
8
+ default:
9
+ // 默认视为外部协议/载荷问题;真正的内部错误由上层使用 INTERNAL_ERROR 显式标记。
10
+ return 'EXTERNAL_ERROR';
11
+ }
12
+ }
13
+ export class ProviderProtocolError extends Error {
14
+ code;
15
+ protocol;
16
+ providerType;
17
+ category;
18
+ details;
19
+ constructor(message, options) {
20
+ super(message);
21
+ this.name = 'ProviderProtocolError';
22
+ this.code = options.code;
23
+ this.protocol = options.protocol;
24
+ this.providerType = options.providerType;
25
+ this.category = options.category ?? inferCategoryFromCode(options.code);
26
+ this.details = options.details;
27
+ }
28
+ }
@@ -1,3 +1,4 @@
1
+ import { ProviderProtocolError } from './errors.js';
1
2
  const TTL_MS = 1000 * 60 * 30; // 30min
2
3
  function cloneDeep(value) {
3
4
  try {
@@ -205,16 +206,42 @@ class ResponsesConversationStore {
205
206
  }
206
207
  resumeConversation(responseId, submitPayload, options) {
207
208
  if (typeof responseId !== 'string' || !responseId.trim()) {
208
- throw new Error('Responses conversation requires valid response_id');
209
+ throw new ProviderProtocolError('Responses conversation requires valid response_id', {
210
+ code: 'MALFORMED_REQUEST',
211
+ protocol: 'openai-responses',
212
+ providerType: 'responses',
213
+ details: {
214
+ context: 'responses-conversation-store.resumeConversation',
215
+ reason: 'missing_or_empty_response_id'
216
+ }
217
+ });
209
218
  }
210
219
  this.prune();
211
220
  const entry = this.responseIndex.get(responseId);
212
221
  if (!entry) {
213
- throw new Error('Responses conversation expired or not found');
222
+ throw new ProviderProtocolError('Responses conversation expired or not found', {
223
+ code: 'MALFORMED_REQUEST',
224
+ protocol: 'openai-responses',
225
+ providerType: 'responses',
226
+ details: {
227
+ context: 'responses-conversation-store.resumeConversation',
228
+ reason: 'expired_or_unknown_response_id',
229
+ responseId
230
+ }
231
+ });
214
232
  }
215
233
  const toolOutputs = Array.isArray(submitPayload.tool_outputs) ? submitPayload.tool_outputs : [];
216
234
  if (!toolOutputs.length) {
217
- throw new Error('tool_outputs array is required when submitting Responses tool results');
235
+ throw new ProviderProtocolError('tool_outputs array is required when submitting Responses tool results', {
236
+ code: 'MALFORMED_REQUEST',
237
+ protocol: 'openai-responses',
238
+ providerType: 'responses',
239
+ details: {
240
+ context: 'responses-conversation-store.resumeConversation',
241
+ reason: 'missing_tool_outputs',
242
+ responseId
243
+ }
244
+ });
218
245
  }
219
246
  const mergedInput = coerceInputArray(entry.input);
220
247
  const normalizedOutputs = normalizeSubmittedToolOutputs(toolOutputs);
@@ -1,6 +1,41 @@
1
1
  import { normalizeFunctionCallId } from './bridge-id-utils.js';
2
2
  import { normalizeContentPart } from './output-content-normalizer.js';
3
3
  import { expandResponsesMessageItem } from '../../sse/shared/responses-output-normalizer.js';
4
+ function buildToolOutputIndex(response) {
5
+ const ids = new Set();
6
+ try {
7
+ const primary = Array.isArray(response.tool_outputs)
8
+ ? response.tool_outputs
9
+ : [];
10
+ for (const entry of primary) {
11
+ if (!entry || typeof entry !== 'object')
12
+ continue;
13
+ const raw = entry.tool_call_id ||
14
+ entry.call_id ||
15
+ entry.id;
16
+ if (typeof raw === 'string' && raw.trim().length) {
17
+ const trimmed = raw.trim();
18
+ // 记录原始 ID(例如 OpenAI 的 toolu_ 前缀),以兼容直接使用
19
+ // tool_call_id 的客户端;同时记录归一化后的 fc_ 形式,保证与
20
+ // buildFunctionCallOutput 中 normalizeFunctionCallId 的结果对齐。
21
+ ids.add(trimmed);
22
+ try {
23
+ const normalized = normalizeFunctionCallId({ callId: trimmed, fallback: trimmed });
24
+ if (normalized && normalized !== trimmed) {
25
+ ids.add(normalized);
26
+ }
27
+ }
28
+ catch {
29
+ // 归一化失败不应影响主流程
30
+ }
31
+ }
32
+ }
33
+ }
34
+ catch {
35
+ // best-effort: 不因索引构建失败影响主流程
36
+ }
37
+ return ids;
38
+ }
4
39
  function appendReasoningSegments(target, raw) {
5
40
  if (typeof raw !== 'string' || !raw.length) {
6
41
  return;
@@ -91,7 +126,13 @@ export function buildResponsesOutputFromChat(options) {
91
126
  const usage = normalizeUsage(response.usage);
92
127
  const outputTextMeta = response?.__responses_output_text_meta;
93
128
  const outputText = resolveOutputText(convertedContent, outputTextMeta);
94
- const hasNormalizedToolCalls = normalizedToolCalls.length > 0;
129
+ // 如果顶层 tool_outputs 已经为所有 tool_calls 提供了结果,说明这些函数调用
130
+ // 已在服务端(例如 server-side web_search)完成,不应再对客户端暴露
131
+ // required_action/submit_tool_outputs。此时只需返回 completed 状态即可,避免
132
+ // 再触发一轮工具回合。
133
+ const executedIds = buildToolOutputIndex(response);
134
+ const pendingToolCalls = normalizedToolCalls.filter((entry) => !executedIds.has(entry.id));
135
+ const hasNormalizedToolCalls = pendingToolCalls.length > 0;
95
136
  if (hasNormalizedToolCalls) {
96
137
  for (const item of outputItems) {
97
138
  if (item.type === 'message') {
@@ -100,7 +141,7 @@ export function buildResponsesOutputFromChat(options) {
100
141
  }
101
142
  }
102
143
  const requiredAction = hasNormalizedToolCalls
103
- ? buildRequiredActionFromNormalized(normalizedToolCalls)
144
+ ? buildRequiredActionFromNormalized(pendingToolCalls)
104
145
  : undefined;
105
146
  const status = hasNormalizedToolCalls ? 'requires_action' : 'completed';
106
147
  return {
@@ -136,6 +177,47 @@ function normalizeUsage(usageRaw) {
136
177
  }
137
178
  return usageRaw;
138
179
  }
180
+ function extractApplyPatchArguments(rawArgs) {
181
+ // Upstream Responses providers may wrap apply_patch arguments in a JSON object
182
+ // (e.g. { patch: '*** Begin Patch...', input: '...' }). For Codex, the tool
183
+ // expects a FREEFORM patch string obeying the unified diff grammar. Here we
184
+ // best-effort extract such a patch string when available.
185
+ const tryExtractFromObject = (obj) => {
186
+ if (!obj || typeof obj !== 'object' || Array.isArray(obj))
187
+ return null;
188
+ const record = obj;
189
+ const candidates = ['patch', 'input'];
190
+ for (const key of candidates) {
191
+ const value = record[key];
192
+ if (typeof value !== 'string')
193
+ continue;
194
+ const trimmed = value.trimStart();
195
+ if (trimmed.startsWith('*** Begin Patch')) {
196
+ return trimmed;
197
+ }
198
+ }
199
+ return null;
200
+ };
201
+ if (typeof rawArgs === 'string') {
202
+ const trimmed = rawArgs.trimStart();
203
+ if (trimmed.startsWith('*** Begin Patch')) {
204
+ return trimmed;
205
+ }
206
+ try {
207
+ const parsed = JSON.parse(rawArgs);
208
+ const fromObject = tryExtractFromObject(parsed);
209
+ if (fromObject) {
210
+ return fromObject;
211
+ }
212
+ }
213
+ catch {
214
+ // non-JSON string that is not a patch header; leave to caller
215
+ }
216
+ return null;
217
+ }
218
+ const fromObject = tryExtractFromObject(rawArgs);
219
+ return fromObject;
220
+ }
139
221
  function buildFunctionCallOutput(call, allocateOutputId, sanitizeFunctionName, baseCount, offset) {
140
222
  try {
141
223
  const fn = call?.function || {};
@@ -146,16 +228,37 @@ function buildFunctionCallOutput(call, allocateOutputId, sanitizeFunctionName, b
146
228
  if (!sanitized || sanitized.toLowerCase() === 'tool')
147
229
  return null;
148
230
  const rawArgs = fn?.arguments ?? call.arguments ?? {};
149
- const argsStr = typeof rawArgs === 'string'
150
- ? rawArgs
151
- : (() => {
231
+ let argsStr;
232
+ if (sanitized === 'apply_patch') {
233
+ const patch = extractApplyPatchArguments(rawArgs);
234
+ if (patch != null) {
235
+ argsStr = patch;
236
+ }
237
+ else if (typeof rawArgs === 'string') {
238
+ argsStr = rawArgs;
239
+ }
240
+ else {
152
241
  try {
153
- return JSON.stringify(rawArgs ?? {});
242
+ argsStr = JSON.stringify(rawArgs ?? {});
154
243
  }
155
244
  catch {
156
- return '{}';
245
+ argsStr = '{}';
157
246
  }
158
- })();
247
+ }
248
+ }
249
+ else {
250
+ argsStr =
251
+ typeof rawArgs === 'string'
252
+ ? rawArgs
253
+ : (() => {
254
+ try {
255
+ return JSON.stringify(rawArgs ?? {});
256
+ }
257
+ catch {
258
+ return '{}';
259
+ }
260
+ })();
261
+ }
159
262
  const originalCallId = typeof call.id === 'string' && call.id.trim().length
160
263
  ? String(call.id)
161
264
  : (typeof call.call_id === 'string' && call.call_id.trim().length ? String(call.call_id) : undefined);