aiexecode 1.0.94 → 1.0.96

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of aiexecode might be problematic. Click here for more details.

Files changed (51) hide show
  1. package/README.md +210 -87
  2. package/index.js +33 -1
  3. package/package.json +3 -3
  4. package/payload_viewer/out/404/index.html +1 -1
  5. package/payload_viewer/out/404.html +1 -1
  6. package/payload_viewer/out/_next/static/chunks/{37d0cd2587a38f79.js → b6c0459f3789d25c.js} +1 -1
  7. package/payload_viewer/out/_next/static/chunks/b75131b58f8ca46a.css +3 -0
  8. package/payload_viewer/out/index.html +1 -1
  9. package/payload_viewer/out/index.txt +3 -3
  10. package/payload_viewer/web_server.js +361 -0
  11. package/src/LLMClient/client.js +392 -16
  12. package/src/LLMClient/converters/responses-to-claude.js +67 -18
  13. package/src/LLMClient/converters/responses-to-zai.js +608 -0
  14. package/src/LLMClient/errors.js +18 -4
  15. package/src/LLMClient/index.js +5 -0
  16. package/src/ai_based/completion_judge.js +35 -4
  17. package/src/ai_based/orchestrator.js +146 -35
  18. package/src/commands/agents.js +70 -0
  19. package/src/commands/commands.js +51 -0
  20. package/src/commands/debug.js +52 -0
  21. package/src/commands/help.js +11 -1
  22. package/src/commands/model.js +43 -7
  23. package/src/commands/skills.js +46 -0
  24. package/src/config/ai_models.js +96 -5
  25. package/src/config/constants.js +71 -0
  26. package/src/frontend/components/HelpView.js +106 -2
  27. package/src/frontend/components/SetupWizard.js +53 -8
  28. package/src/frontend/utils/toolUIFormatter.js +261 -0
  29. package/src/system/agents_loader.js +289 -0
  30. package/src/system/ai_request.js +147 -9
  31. package/src/system/command_parser.js +33 -3
  32. package/src/system/conversation_state.js +265 -0
  33. package/src/system/custom_command_loader.js +386 -0
  34. package/src/system/session.js +59 -35
  35. package/src/system/skill_loader.js +318 -0
  36. package/src/system/tool_approval.js +10 -0
  37. package/src/tools/file_reader.js +49 -9
  38. package/src/tools/glob.js +0 -3
  39. package/src/tools/ripgrep.js +5 -7
  40. package/src/tools/skill_tool.js +122 -0
  41. package/src/tools/web_downloader.js +0 -3
  42. package/src/util/clone.js +174 -0
  43. package/src/util/config.js +38 -2
  44. package/src/util/config_migration.js +174 -0
  45. package/src/util/path_validator.js +178 -0
  46. package/src/util/prompt_loader.js +68 -1
  47. package/src/util/safe_fs.js +43 -3
  48. package/payload_viewer/out/_next/static/chunks/ecd2072ebf41611f.css +0 -3
  49. /package/payload_viewer/out/_next/static/{wkEKh6i9XPSyP6rjDRvHn → lHmNygVpv4N1VR0LdnwkJ}/_buildManifest.js +0 -0
  50. /package/payload_viewer/out/_next/static/{wkEKh6i9XPSyP6rjDRvHn → lHmNygVpv4N1VR0LdnwkJ}/_clientMiddlewareManifest.json +0 -0
  51. /package/payload_viewer/out/_next/static/{wkEKh6i9XPSyP6rjDRvHn → lHmNygVpv4N1VR0LdnwkJ}/_ssgManifest.js +0 -0
@@ -0,0 +1,608 @@
1
+ /**
2
+ * Convert Responses API format to Z.AI (GLM) format
3
+ * Z.AI uses Anthropic Messages API compatible interface
4
+ * Base URL: https://api.z.ai/api/anthropic
5
+ */
6
+
7
+ import { getMaxTokens } from '../../config/ai_models.js';
8
+ import { createDebugLogger } from '../../util/debug_log.js';
9
+
10
+ const debugLog = createDebugLogger('zai_converter.log', 'zai_converter');
11
+
12
+ /**
13
+ * Convert Responses API request to Z.AI format
14
+ * @param {Object} responsesRequest - Responses API format request
15
+ * @returns {Object} Z.AI (Anthropic-compatible) format request
16
+ */
17
+ export function convertResponsesRequestToZaiFormat(responsesRequest) {
18
+ const startTime = Date.now();
19
+ debugLog(`[convertRequest] START: model=${responsesRequest.model}, input_items=${responsesRequest.input?.length || 0}`);
20
+
21
+ const model = responsesRequest.model;
22
+ if (!model) {
23
+ throw new Error('Model name is required');
24
+ }
25
+
26
+ const defaultMaxTokens = getMaxTokens(model);
27
+
28
+ const zaiRequest = {
29
+ model: model,
30
+ max_tokens: responsesRequest.max_output_tokens || defaultMaxTokens
31
+ };
32
+
33
+ // Convert input to messages
34
+ const messages = [];
35
+
36
+ if (typeof responsesRequest.input === 'string') {
37
+ messages.push({
38
+ role: 'user',
39
+ content: responsesRequest.input
40
+ });
41
+ } else if (Array.isArray(responsesRequest.input)) {
42
+ for (const item of responsesRequest.input) {
43
+ // Handle output items (no role, has type)
44
+ if (!item.role && item.type) {
45
+ if (item.type === 'message') {
46
+ const textBlocks = [];
47
+ if (item.content && Array.isArray(item.content)) {
48
+ for (const contentBlock of item.content) {
49
+ if (contentBlock.type === 'output_text' && contentBlock.text) {
50
+ textBlocks.push({
51
+ type: 'text',
52
+ text: contentBlock.text
53
+ });
54
+ }
55
+ }
56
+ }
57
+ if (textBlocks.length > 0) {
58
+ messages.push({
59
+ role: 'assistant',
60
+ content: textBlocks
61
+ });
62
+ }
63
+ // Note: If message has no text content, it will be handled by the
64
+ // subsequent function_call items which create their own assistant messages
65
+ } else if (item.type === 'function_call') {
66
+ // Add placeholder text with tool_use for Z.AI/Anthropic API compatibility
67
+ messages.push({
68
+ role: 'assistant',
69
+ content: [
70
+ {
71
+ type: 'text',
72
+ text: '(no content)'
73
+ },
74
+ {
75
+ type: 'tool_use',
76
+ id: item.call_id || item.id,
77
+ name: item.name,
78
+ input: JSON.parse(item.arguments || '{}')
79
+ }
80
+ ]
81
+ });
82
+ } else if (item.type === 'function_call_output') {
83
+ // Build tool_result object with proper error handling
84
+ const toolResult = {
85
+ type: 'tool_result',
86
+ tool_use_id: item.call_id,
87
+ content: typeof item.output === 'string' ? item.output : JSON.stringify(item.output)
88
+ };
89
+
90
+ // Add is_error flag if the output indicates an error
91
+ // Check for common error patterns in the output
92
+ if (item.is_error === true) {
93
+ toolResult.is_error = true;
94
+ } else if (typeof item.output === 'object' && item.output !== null) {
95
+ // Check for operation_successful: false pattern
96
+ if (item.output.operation_successful === false ||
97
+ item.output.stdout?.operation_successful === false) {
98
+ toolResult.is_error = true;
99
+ }
100
+ }
101
+
102
+ messages.push({
103
+ role: 'user',
104
+ content: [toolResult]
105
+ });
106
+ }
107
+ continue;
108
+ }
109
+
110
+ if (item.role && item.content) {
111
+ if (item.role === 'system') {
112
+ // Z.AI는 Anthropic API와 호환 - system을 배열로 지원 (캐시 제어 포함)
113
+ if (Array.isArray(item.content)) {
114
+ // 배열인 경우: 캐시 제어가 있는 블록들을 처리
115
+ const systemBlocks = item.content.map(c => {
116
+ const block = {
117
+ type: 'text',
118
+ text: c.type === 'input_text' || c.type === 'text' ? c.text : (typeof c === 'string' ? c : '')
119
+ };
120
+ // cache_control이 있으면 유지 (Claude Code 스타일)
121
+ if (c.cache_control) {
122
+ block.cache_control = c.cache_control;
123
+ }
124
+ return block;
125
+ }).filter(b => b.text);
126
+
127
+ // 캐시 제어가 있는 블록이 있으면 배열로, 없으면 단순 문자열로
128
+ const hasCacheControl = systemBlocks.some(b => b.cache_control);
129
+ if (hasCacheControl) {
130
+ zaiRequest.system = systemBlocks;
131
+ debugLog(`[convertRequest] System message with cache_control: ${systemBlocks.length} blocks`);
132
+ } else {
133
+ zaiRequest.system = systemBlocks.map(b => b.text).join('\n');
134
+ }
135
+ } else {
136
+ zaiRequest.system = item.content;
137
+ }
138
+ } else if (item.role === 'tool') {
139
+ const toolResult = {
140
+ type: 'tool_result',
141
+ tool_use_id: item.tool_call_id || item.id,
142
+ content: typeof item.content === 'string' ? item.content : JSON.stringify(item.content)
143
+ };
144
+
145
+ // Add is_error flag if present
146
+ if (item.is_error === true) {
147
+ toolResult.is_error = true;
148
+ }
149
+
150
+ messages.push({
151
+ role: 'user',
152
+ content: [toolResult]
153
+ });
154
+ } else if (item.role === 'assistant' && Array.isArray(item.content)) {
155
+ const textBlocks = [];
156
+ const toolUseBlocks = [];
157
+
158
+ for (const outputItem of item.content) {
159
+ if (outputItem.type === 'message' && outputItem.content) {
160
+ for (const contentBlock of outputItem.content) {
161
+ if (contentBlock.type === 'output_text' && contentBlock.text) {
162
+ textBlocks.push({
163
+ type: 'text',
164
+ text: contentBlock.text
165
+ });
166
+ }
167
+ }
168
+ } else if (outputItem.type === 'function_call') {
169
+ toolUseBlocks.push({
170
+ type: 'tool_use',
171
+ id: outputItem.call_id || outputItem.id,
172
+ name: outputItem.name,
173
+ input: JSON.parse(outputItem.arguments || '{}')
174
+ });
175
+ }
176
+ }
177
+
178
+ // If we have tool_use blocks but no text, add a placeholder text
179
+ // Z.AI/Anthropic API recommends having text content with tool calls
180
+ if (toolUseBlocks.length > 0 && textBlocks.length === 0) {
181
+ textBlocks.push({
182
+ type: 'text',
183
+ text: '(no content)'
184
+ });
185
+ }
186
+
187
+ const zaiContent = [...textBlocks, ...toolUseBlocks];
188
+
189
+ if (zaiContent.length > 0) {
190
+ messages.push({
191
+ role: 'assistant',
192
+ content: zaiContent
193
+ });
194
+ }
195
+ } else {
196
+ // 일반 user/assistant 메시지 처리
197
+ if (Array.isArray(item.content)) {
198
+ // 캐시 제어가 있는 content 블록 확인
199
+ const hasCacheControl = item.content.some(c => c.cache_control);
200
+
201
+ if (hasCacheControl) {
202
+ // 캐시 제어가 있는 경우: 블록 배열로 유지
203
+ const contentBlocks = item.content.map(c => {
204
+ const block = {
205
+ type: 'text',
206
+ text: c.type === 'input_text' || c.type === 'text' ? c.text : (typeof c === 'string' ? c : '')
207
+ };
208
+ if (c.cache_control) {
209
+ block.cache_control = c.cache_control;
210
+ }
211
+ return block;
212
+ }).filter(b => b.text);
213
+
214
+ messages.push({
215
+ role: item.role === 'assistant' ? 'assistant' : 'user',
216
+ content: contentBlocks
217
+ });
218
+ debugLog(`[convertRequest] User message with cache_control: ${contentBlocks.length} blocks`);
219
+ } else {
220
+ // 캐시 제어 없음: 단순 텍스트로 합침
221
+ const content = item.content.map(c => c.type === 'input_text' || c.type === 'text' ? c.text : c).filter(Boolean).join('\n');
222
+ messages.push({
223
+ role: item.role === 'assistant' ? 'assistant' : 'user',
224
+ content: content
225
+ });
226
+ }
227
+ } else {
228
+ messages.push({
229
+ role: item.role === 'assistant' ? 'assistant' : 'user',
230
+ content: item.content
231
+ });
232
+ }
233
+ }
234
+ }
235
+ }
236
+ }
237
+
238
+ // Merge consecutive messages with the same role
239
+ const mergedMessages = [];
240
+ for (let i = 0; i < messages.length; i++) {
241
+ const currentMsg = messages[i];
242
+
243
+ if (i < messages.length - 1 && messages[i + 1].role === currentMsg.role) {
244
+ const mergedContent = Array.isArray(currentMsg.content) ? [...currentMsg.content] : [currentMsg.content];
245
+
246
+ while (i < messages.length - 1 && messages[i + 1].role === currentMsg.role) {
247
+ i++;
248
+ const nextContent = messages[i].content;
249
+ if (Array.isArray(nextContent)) {
250
+ mergedContent.push(...nextContent);
251
+ } else {
252
+ mergedContent.push(nextContent);
253
+ }
254
+ }
255
+
256
+ mergedMessages.push({
257
+ role: currentMsg.role,
258
+ content: mergedContent
259
+ });
260
+ } else {
261
+ mergedMessages.push(currentMsg);
262
+ }
263
+ }
264
+
265
+ // Normalize content format for Z.AI/Anthropic API
266
+ // Content arrays must contain objects with {type: "text", text: "..."} format
267
+ for (const msg of mergedMessages) {
268
+ if (Array.isArray(msg.content)) {
269
+ msg.content = msg.content.map(item => {
270
+ // If item is already in correct format, keep it
271
+ if (typeof item === 'object' && item !== null && item.type) {
272
+ return item;
273
+ }
274
+ // Convert plain string to text block format
275
+ if (typeof item === 'string') {
276
+ return { type: 'text', text: item };
277
+ }
278
+ // Fallback: convert to string
279
+ return { type: 'text', text: String(item) };
280
+ });
281
+ }
282
+ }
283
+
284
+ zaiRequest.messages = mergedMessages;
285
+
286
+ // Handle instructions (system message)
287
+ if (responsesRequest.instructions) {
288
+ zaiRequest.system = responsesRequest.instructions;
289
+ }
290
+
291
+ // Convert tools from Responses API format to Z.AI (Anthropic) format
292
+ if (responsesRequest.tools && Array.isArray(responsesRequest.tools)) {
293
+ zaiRequest.tools = responsesRequest.tools.map(tool => {
294
+ if (tool.type === 'function') {
295
+ if (tool.function) {
296
+ return {
297
+ name: tool.function.name,
298
+ description: tool.function.description || `Function: ${tool.function.name}`,
299
+ input_schema: tool.function.parameters || {
300
+ type: 'object',
301
+ properties: {}
302
+ }
303
+ };
304
+ } else {
305
+ return {
306
+ name: tool.name,
307
+ description: tool.description || `Function: ${tool.name}`,
308
+ input_schema: tool.parameters || {
309
+ type: 'object',
310
+ properties: {}
311
+ }
312
+ };
313
+ }
314
+ } else if (tool.type === 'custom') {
315
+ return {
316
+ name: tool.name,
317
+ description: tool.description || `Tool: ${tool.name}`,
318
+ input_schema: tool.input_schema || {
319
+ type: 'object',
320
+ properties: {}
321
+ }
322
+ };
323
+ }
324
+ return {
325
+ name: tool.name,
326
+ description: tool.description,
327
+ input_schema: tool.input_schema
328
+ };
329
+ });
330
+ }
331
+
332
+ // Temperature - always set to 0 for consistent results
333
+ zaiRequest.temperature = 0;
334
+
335
+ // Stream - Z.AI requires this field (will be overridden by SDK if using stream method)
336
+ zaiRequest.stream = responsesRequest.stream || false;
337
+
338
+ // Tool choice
339
+ if (responsesRequest.tool_choice !== undefined) {
340
+ if (typeof responsesRequest.tool_choice === 'string') {
341
+ if (responsesRequest.tool_choice === 'auto') {
342
+ zaiRequest.tool_choice = { type: 'auto' };
343
+ } else if (responsesRequest.tool_choice === 'required') {
344
+ zaiRequest.tool_choice = { type: 'any' };
345
+ }
346
+ } else if (responsesRequest.tool_choice?.type === 'function' || responsesRequest.tool_choice?.type === 'custom') {
347
+ const toolName = responsesRequest.tool_choice.function?.name || responsesRequest.tool_choice.name;
348
+ zaiRequest.tool_choice = {
349
+ type: 'tool',
350
+ name: toolName
351
+ };
352
+ }
353
+ }
354
+
355
+ // Metadata
356
+ if (responsesRequest.metadata) {
357
+ zaiRequest.metadata = responsesRequest.metadata;
358
+ }
359
+
360
+ // Assistant Prefill 지원
361
+ // Z.AI/GLM에서 JSON 응답을 유도하기 위해 assistant 메시지를 미리 추가
362
+ if (responsesRequest.assistant_prefill) {
363
+ zaiRequest.messages.push({
364
+ role: 'assistant',
365
+ content: [
366
+ {
367
+ type: 'text',
368
+ text: responsesRequest.assistant_prefill
369
+ }
370
+ ]
371
+ });
372
+ debugLog(`[convertRequest] Assistant prefill added: ${responsesRequest.assistant_prefill}`);
373
+ }
374
+
375
+ // Handle json_schema format by converting to tool use
376
+ if (responsesRequest.text?.format?.type === 'json_schema') {
377
+ const schemaName = responsesRequest.text.format.name || 'output';
378
+ const schema = responsesRequest.text.format.schema;
379
+
380
+ const syntheticTool = {
381
+ name: schemaName,
382
+ description: `Generate structured output matching the ${schemaName} schema`,
383
+ input_schema: schema
384
+ };
385
+
386
+ zaiRequest.tools = [syntheticTool];
387
+
388
+ zaiRequest.tool_choice = {
389
+ type: 'tool',
390
+ name: schemaName
391
+ };
392
+
393
+ if (zaiRequest.messages.length > 0 && zaiRequest.messages[zaiRequest.messages.length - 1].role === 'assistant') {
394
+ zaiRequest.messages.push({
395
+ role: 'user',
396
+ content: [{ type: 'text', text: 'Please provide the structured output.' }]
397
+ });
398
+ }
399
+ }
400
+
401
+ const elapsed = Date.now() - startTime;
402
+ debugLog(`[convertRequest] END: ${elapsed}ms, messages=${zaiRequest.messages?.length}, system_len=${zaiRequest.system?.length || 0}, tools=${zaiRequest.tools?.length || 0}`);
403
+
404
+ return zaiRequest;
405
+ }
406
+
407
+ /**
408
+ * Convert Z.AI response to Responses API format
409
+ * @param {Object} zaiResponse - Z.AI (Anthropic-compatible) format response
410
+ * @param {string} model - Model name
411
+ * @param {Object} originalRequest - Original request for context
412
+ * @returns {Object} Responses API format response
413
+ */
414
+ export function convertZaiResponseToResponsesFormat(zaiResponse, model = 'glm-4.7', originalRequest = {}) {
415
+ const startTime = Date.now();
416
+ debugLog(`[convertResponse] START: id=${zaiResponse.id}, content_blocks=${zaiResponse.content?.length || 0}`);
417
+
418
+ const output = [];
419
+ let outputText = '';
420
+
421
+ const wasJsonSchemaRequest = originalRequest.text?.format?.type === 'json_schema';
422
+ const schemaName = originalRequest.text?.format?.name;
423
+
424
+ // Process content blocks
425
+ if (zaiResponse.content && Array.isArray(zaiResponse.content)) {
426
+ const messageContent = [];
427
+ let jsonSchemaOutput = null; // json_schema 요청 시 tool_use의 input 저장
428
+
429
+ // 먼저 tool_use 블록에서 json_schema 출력 확인
430
+ if (wasJsonSchemaRequest) {
431
+ for (const block of zaiResponse.content) {
432
+ if (block.type === 'tool_use' && block.name === schemaName) {
433
+ jsonSchemaOutput = JSON.stringify(block.input);
434
+ debugLog(`[convertResponse] Found json_schema tool_use: ${jsonSchemaOutput}`);
435
+ break;
436
+ }
437
+ }
438
+ }
439
+
440
+ for (const block of zaiResponse.content) {
441
+ if (block.type === 'thinking') {
442
+ // Z.AI/GLM thinking 블록 → Responses API reasoning 타입으로 변환
443
+ output.push({
444
+ id: `reasoning_${zaiResponse.id}_${output.length}`,
445
+ type: 'reasoning',
446
+ status: 'completed',
447
+ content: [
448
+ {
449
+ type: 'thinking',
450
+ thinking: block.thinking || ''
451
+ }
452
+ ]
453
+ });
454
+ debugLog(`[convertResponse] Converted thinking block: ${(block.thinking || '').length} chars`);
455
+ } else if (block.type === 'text') {
456
+ // json_schema 요청이고 tool_use가 있으면, text 블록은 output_text에 포함하지 않음
457
+ // (GLM 모델이 text + tool_use를 둘 다 반환하는 경우 대응)
458
+ if (wasJsonSchemaRequest && jsonSchemaOutput) {
459
+ debugLog(`[convertResponse] Skipping text block for json_schema request (tool_use found)`);
460
+ // text 블록은 messageContent에만 추가 (참고용)
461
+ messageContent.push({
462
+ type: 'output_text',
463
+ text: block.text,
464
+ annotations: []
465
+ });
466
+ // outputText에는 추가하지 않음!
467
+ } else {
468
+ messageContent.push({
469
+ type: 'output_text',
470
+ text: block.text,
471
+ annotations: []
472
+ });
473
+ outputText += block.text;
474
+ }
475
+ } else if (block.type === 'tool_use') {
476
+ if (wasJsonSchemaRequest && block.name === schemaName) {
477
+ // json_schema 요청의 tool_use는 outputText로만 변환
478
+ const jsonOutput = JSON.stringify(block.input);
479
+ messageContent.push({
480
+ type: 'output_text',
481
+ text: jsonOutput,
482
+ annotations: []
483
+ });
484
+ outputText = jsonOutput; // outputText를 JSON만으로 설정 (덮어쓰기)
485
+ } else {
486
+ // Z.AI uses 'call_' prefix for tool IDs
487
+ output.push({
488
+ id: `fc_${block.id}`,
489
+ type: 'function_call',
490
+ status: 'completed',
491
+ arguments: JSON.stringify(block.input),
492
+ call_id: block.id,
493
+ name: block.name
494
+ });
495
+ }
496
+ }
497
+ }
498
+
499
+ if (messageContent.length > 0) {
500
+ output.push({
501
+ id: `msg_${zaiResponse.id}`,
502
+ type: 'message',
503
+ status: 'completed',
504
+ role: 'assistant',
505
+ content: messageContent
506
+ });
507
+ }
508
+ }
509
+
510
+ if (output.length === 0) {
511
+ output.push({
512
+ id: `msg_${zaiResponse.id}`,
513
+ type: 'message',
514
+ status: 'completed',
515
+ role: 'assistant',
516
+ content: [
517
+ {
518
+ type: 'output_text',
519
+ text: outputText || ' ',
520
+ annotations: []
521
+ }
522
+ ]
523
+ });
524
+ }
525
+
526
+ // Determine status and incomplete_details based on stop_reason
527
+ const stopReason = zaiResponse.stop_reason;
528
+ const isCompleted = stopReason === 'end_turn' || stopReason === 'tool_use';
529
+ const isMaxTokens = stopReason === 'max_tokens';
530
+
531
+ // Build incomplete_details if response was truncated
532
+ let incompleteDetails = null;
533
+ if (isMaxTokens) {
534
+ incompleteDetails = {
535
+ reason: 'max_output_tokens',
536
+ message: 'Response was truncated because it reached the maximum output token limit'
537
+ };
538
+ debugLog(`[convertResponse] WARNING: Response truncated due to max_tokens`);
539
+ } else if (!isCompleted && stopReason) {
540
+ incompleteDetails = {
541
+ reason: stopReason,
542
+ message: `Response stopped with reason: ${stopReason}`
543
+ };
544
+ }
545
+
546
+ const responsesResponse = {
547
+ id: `resp_${zaiResponse.id}`,
548
+ object: 'response',
549
+ created_at: Math.floor(Date.now() / 1000),
550
+ status: isCompleted ? 'completed' : 'incomplete',
551
+ background: false,
552
+ billing: {
553
+ payer: 'developer'
554
+ },
555
+ error: null,
556
+ incomplete_details: incompleteDetails,
557
+ instructions: originalRequest.instructions || null,
558
+ max_output_tokens: originalRequest.max_output_tokens || null,
559
+ max_tool_calls: null,
560
+ model: model,
561
+ output: output,
562
+ parallel_tool_calls: true,
563
+ previous_response_id: null,
564
+ prompt_cache_key: null,
565
+ prompt_cache_retention: null,
566
+ reasoning: {
567
+ effort: originalRequest.reasoning?.effort || null,
568
+ summary: originalRequest.reasoning?.summary || null
569
+ },
570
+ safety_identifier: null,
571
+ service_tier: zaiResponse.usage?.service_tier || 'standard',
572
+ store: originalRequest.store !== undefined ? originalRequest.store : true,
573
+ temperature: originalRequest.temperature !== undefined ? originalRequest.temperature : 1,
574
+ text: {
575
+ format: {
576
+ type: 'text'
577
+ },
578
+ verbosity: 'medium'
579
+ },
580
+ tool_choice: originalRequest.tool_choice || 'auto',
581
+ tools: originalRequest.tools || [],
582
+ top_logprobs: 0,
583
+ top_p: originalRequest.top_p !== undefined ? originalRequest.top_p : 1,
584
+ truncation: 'disabled',
585
+ usage: {
586
+ input_tokens: zaiResponse.usage?.input_tokens || 0,
587
+ input_tokens_details: {
588
+ cached_tokens: zaiResponse.usage?.cache_read_input_tokens || 0
589
+ },
590
+ output_tokens: zaiResponse.usage?.output_tokens || 0,
591
+ output_tokens_details: {
592
+ reasoning_tokens: 0
593
+ },
594
+ total_tokens: (zaiResponse.usage?.input_tokens || 0) + (zaiResponse.usage?.output_tokens || 0),
595
+ // Z.AI 추가 정보
596
+ cache_read_input_tokens: zaiResponse.usage?.cache_read_input_tokens || 0,
597
+ server_tool_use: zaiResponse.usage?.server_tool_use || null
598
+ },
599
+ user: null,
600
+ metadata: {},
601
+ output_text: outputText
602
+ };
603
+
604
+ const elapsed = Date.now() - startTime;
605
+ debugLog(`[convertResponse] END: ${elapsed}ms, output_items=${output.length}, output_text_len=${outputText.length}`);
606
+
607
+ return responsesResponse;
608
+ }
@@ -176,15 +176,29 @@ export function normalizeError(error, provider) {
176
176
  }
177
177
 
178
178
  // Anthropic SDK error (check BEFORE OpenAI SDK error, as both have error.status && error.error)
179
- if (provider === 'claude' && error.status) {
179
+ // Z.AI uses Anthropic SDK so handle similarly
180
+ if ((provider === 'claude' || provider === 'zai') && error.status) {
180
181
  // Parse Claude error from error.error object (Anthropic SDK provides parsed error)
181
182
  let errorType = ERROR_TYPE_MAP[error.status] || 'api_error';
182
183
  let code = null;
183
- let message = error.message || 'Claude API error';
184
+ let message = error.message || (provider === 'zai' ? 'Z.AI API error' : 'Claude API error');
184
185
 
186
+ // Z.AI specific error format: { detail: [{ type, loc, msg, ... }] }
187
+ if (error.error && error.error.detail && Array.isArray(error.error.detail)) {
188
+ const zaiError = error.error.detail[0];
189
+ if (zaiError) {
190
+ code = zaiError.type || null;
191
+ message = zaiError.msg || message;
192
+
193
+ // Map Z.AI error types
194
+ if (zaiError.type === 'json_invalid') {
195
+ errorType = 'invalid_request_error';
196
+ }
197
+ }
198
+ }
185
199
  // Anthropic SDK provides error.error object with structure:
186
200
  // { type: 'error', error: { type: 'not_found_error', message: '...' } }
187
- if (error.error && error.error.error) {
201
+ else if (error.error && error.error.error) {
188
202
  const claudeError = error.error.error;
189
203
 
190
204
  if (claudeError.type) {
@@ -220,7 +234,7 @@ export function normalizeError(error, provider) {
220
234
  param: null,
221
235
  code: code,
222
236
  status: error.status,
223
- provider: 'claude',
237
+ provider: provider,
224
238
  originalError: error
225
239
  }
226
240
  );
@@ -21,6 +21,11 @@ export {
21
21
  convertOllamaResponseToResponsesFormat
22
22
  } from './converters/responses-to-ollama.js';
23
23
 
24
+ export {
25
+ convertResponsesRequestToZaiFormat,
26
+ convertZaiResponseToResponsesFormat
27
+ } from './converters/responses-to-zai.js';
28
+
24
29
  // Export error classes
25
30
  export {
26
31
  LLMError,