@icode-js/icode 3.0.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. package/README.md +346 -0
  2. package/bin/icode.js +6 -0
  3. package/package.json +34 -0
  4. package/src/cli.js +131 -0
  5. package/src/commands/ai.js +287 -0
  6. package/src/commands/checkout.js +59 -0
  7. package/src/commands/clean.js +65 -0
  8. package/src/commands/codereview.js +52 -0
  9. package/src/commands/config.js +513 -0
  10. package/src/commands/explain.js +80 -0
  11. package/src/commands/help.js +49 -0
  12. package/src/commands/info.js +57 -0
  13. package/src/commands/migrate.js +86 -0
  14. package/src/commands/push.js +125 -0
  15. package/src/commands/sync.js +74 -0
  16. package/src/commands/tag.js +53 -0
  17. package/src/commands/undo.js +66 -0
  18. package/src/core/ai-client.js +1125 -0
  19. package/src/core/ai-commit-summary.js +18 -0
  20. package/src/core/ai-config.js +342 -0
  21. package/src/core/ai-diff-range.js +117 -0
  22. package/src/core/args.js +47 -0
  23. package/src/core/commit-conventions.js +169 -0
  24. package/src/core/config-store.js +194 -0
  25. package/src/core/errors.js +25 -0
  26. package/src/core/git-context.js +105 -0
  27. package/src/core/git-service.js +428 -0
  28. package/src/core/hook-diagnostics.js +23 -0
  29. package/src/core/loading.js +36 -0
  30. package/src/core/logger.js +55 -0
  31. package/src/core/prompts.js +152 -0
  32. package/src/core/shell.js +77 -0
  33. package/src/workflows/ai-codereview-workflow.js +126 -0
  34. package/src/workflows/ai-commit-workflow.js +128 -0
  35. package/src/workflows/ai-conflict-workflow.js +102 -0
  36. package/src/workflows/ai-explain-workflow.js +116 -0
  37. package/src/workflows/ai-risk-review-workflow.js +49 -0
  38. package/src/workflows/checkout-workflow.js +85 -0
  39. package/src/workflows/clean-workflow.js +131 -0
  40. package/src/workflows/info-workflow.js +30 -0
  41. package/src/workflows/migrate-workflow.js +449 -0
  42. package/src/workflows/push-workflow.js +276 -0
  43. package/src/workflows/rollback-workflow.js +84 -0
  44. package/src/workflows/sync-workflow.js +141 -0
  45. package/src/workflows/tag-workflow.js +64 -0
  46. package/src/workflows/undo-workflow.js +328 -0
@@ -0,0 +1,1125 @@
1
+ import { IcodeError } from './errors.js'
2
+ import { getAiProfile } from './ai-config.js'
3
+ import { withSpinner } from './loading.js'
4
+
5
+ function trimSlash(value) {
6
+ return value.replace(/\/+$/, '')
7
+ }
8
+
9
+ function isTruthy(value) {
10
+ const normalized = String(value || '').trim().toLowerCase()
11
+ return ['1', 'true', 'yes', 'y', 'on'].includes(normalized)
12
+ }
13
+
14
+ function normalizeHeaders(headers = {}) {
15
+ const next = {}
16
+ Object.entries(headers).forEach(([key, value]) => {
17
+ if (value == null) {
18
+ return
19
+ }
20
+ next[String(key)] = String(value)
21
+ })
22
+ return next
23
+ }
24
+
25
+ function isPlainObject(value) {
26
+ return Boolean(value) && typeof value === 'object' && !Array.isArray(value)
27
+ }
28
+
29
+ function mergeRequestBody(baseBody, extraBody) {
30
+ const base = isPlainObject(baseBody) ? baseBody : {}
31
+ const extra = isPlainObject(extraBody) ? extraBody : {}
32
+ const merged = {
33
+ ...base,
34
+ ...extra
35
+ }
36
+
37
+ // Keep nested options merged so profile.requestBody can tweak a field without wiping defaults.
38
+ if (isPlainObject(base.options) || isPlainObject(extra.options)) {
39
+ merged.options = {
40
+ ...(isPlainObject(base.options) ? base.options : {}),
41
+ ...(isPlainObject(extra.options) ? extra.options : {})
42
+ }
43
+ }
44
+
45
+ return merged
46
+ }
47
+
48
+ function shouldDumpResponse(options = {}) {
49
+ if (typeof options.dumpResponse === 'boolean') {
50
+ return options.dumpResponse
51
+ }
52
+
53
+ return isTruthy(process.env.ICODE_AI_DUMP_RESPONSE)
54
+ }
55
+
56
+ function allowThinkingFallback(options = {}) {
57
+ return options.allowThinkingFallback === true
58
+ }
59
+
60
+ function serializeHeaders(headers) {
61
+ const next = {}
62
+ if (!headers) {
63
+ return next
64
+ }
65
+
66
+ if (typeof headers.forEach === 'function') {
67
+ headers.forEach((value, key) => {
68
+ next[String(key)] = String(value)
69
+ })
70
+ return next
71
+ }
72
+
73
+ if (Array.isArray(headers)) {
74
+ headers.forEach(([key, value]) => {
75
+ if (key != null && value != null) {
76
+ next[String(key)] = String(value)
77
+ }
78
+ })
79
+ return next
80
+ }
81
+
82
+ if (typeof headers === 'object') {
83
+ return normalizeHeaders(headers)
84
+ }
85
+
86
+ return next
87
+ }
88
+
89
+ function printResponseDump(meta) {
90
+ const payload = {
91
+ profile: meta.profile,
92
+ format: meta.format,
93
+ endpoint: meta.endpoint,
94
+ status: meta.status,
95
+ headers: meta.responseHeaders || {},
96
+ body: meta.responseText
97
+ }
98
+ process.stderr.write(`[icode] AI 原始响应:\n${JSON.stringify(payload, null, 2)}\n`)
99
+ }
100
+
101
+ function collectFetchErrorLines(error) {
102
+ const lines = []
103
+ const message = typeof error?.message === 'string' ? error.message.trim() : ''
104
+ if (message) {
105
+ lines.push(message)
106
+ }
107
+
108
+ const cause = error?.cause
109
+ if (cause && typeof cause === 'object') {
110
+ const causeParts = []
111
+ const causeCode = typeof cause.code === 'string' ? cause.code.trim() : ''
112
+ const causeMessage = typeof cause.message === 'string' ? cause.message.trim() : ''
113
+
114
+ if (causeCode) {
115
+ causeParts.push(causeCode)
116
+ }
117
+ if (causeMessage) {
118
+ causeParts.push(causeMessage)
119
+ }
120
+
121
+ const causeLine = causeParts.join(': ')
122
+ if (causeLine && !lines.includes(causeLine)) {
123
+ lines.push(causeLine)
124
+ }
125
+ }
126
+
127
+ return lines
128
+ }
129
+
130
+ function buildFetchRequestError(profile, endpoint, error) {
131
+ const detailLines = collectFetchErrorLines(error)
132
+ const detailBlock = detailLines.length ? `\n${detailLines.join('\n')}` : ''
133
+
134
+ return new IcodeError(`AI 请求异常(${profile.format}/${profile.name}): ${endpoint}${detailBlock}`, {
135
+ code: 'AI_FETCH_ERROR',
136
+ exitCode: 2,
137
+ cause: error,
138
+ meta: {
139
+ endpoint,
140
+ profile: profile.name,
141
+ format: profile.format,
142
+ causeMessage: error?.message || '',
143
+ causeCode: error?.cause?.code || error?.code || ''
144
+ }
145
+ })
146
+ }
147
+
148
+ function resolveApiKey(profile) {
149
+ if (profile.apiKey) {
150
+ return profile.apiKey
151
+ }
152
+
153
+ if (profile.format === 'anthropic') {
154
+ return process.env.ANTHROPIC_API_KEY || process.env.ICODE_AI_API_KEY || ''
155
+ }
156
+
157
+ if (profile.format === 'ollama') {
158
+ return process.env.OLLAMA_API_KEY || ''
159
+ }
160
+
161
+ return process.env.OPENAI_API_KEY || process.env.ICODE_AI_API_KEY || ''
162
+ }
163
+
164
+ function buildEndpoint(profile) {
165
+ const configuredBaseUrl = trimSlash(profile.baseUrl || '')
166
+ const fallbackBaseUrl = profile.format === 'ollama'
167
+ ? trimSlash(process.env.OLLAMA_HOST || 'http://127.0.0.1:11434')
168
+ : ''
169
+ const baseUrl = configuredBaseUrl || fallbackBaseUrl
170
+
171
+ if (!baseUrl) {
172
+ throw new IcodeError(`AI profile ${profile.name} 缺少 baseUrl`, {
173
+ code: 'AI_BASE_URL_EMPTY',
174
+ exitCode: 2
175
+ })
176
+ }
177
+
178
+ if (profile.format === 'anthropic') {
179
+ if (baseUrl.endsWith('/messages')) {
180
+ return baseUrl
181
+ }
182
+ return `${baseUrl}/messages`
183
+ }
184
+
185
+ if (profile.format === 'ollama') {
186
+ if (baseUrl.endsWith('/api/chat')) {
187
+ return baseUrl
188
+ }
189
+ return `${baseUrl}/api/chat`
190
+ }
191
+
192
+ if (baseUrl.endsWith('/responses')) {
193
+ return baseUrl
194
+ }
195
+
196
+ if (baseUrl.endsWith('/chat/completions')) {
197
+ return baseUrl
198
+ }
199
+
200
+ return `${baseUrl}/chat/completions`
201
+ }
202
+
203
+ function buildOpenAIResponsesEndpoint(profile) {
204
+ const baseUrl = trimSlash(profile.baseUrl || '')
205
+
206
+ if (!baseUrl) {
207
+ throw new IcodeError(`AI profile ${profile.name} 缺少 baseUrl`, {
208
+ code: 'AI_BASE_URL_EMPTY',
209
+ exitCode: 2
210
+ })
211
+ }
212
+
213
+ if (baseUrl.endsWith('/responses')) {
214
+ return baseUrl
215
+ }
216
+
217
+ if (baseUrl.endsWith('/chat/completions')) {
218
+ return `${baseUrl.slice(0, -'/chat/completions'.length)}/responses`
219
+ }
220
+
221
+ return `${baseUrl}/responses`
222
+ }
223
+
224
+ function isOpenAIResponsesEndpoint(endpoint = '') {
225
+ return endpoint.endsWith('/responses')
226
+ }
227
+
228
+ function parseContentArray(content) {
229
+ if (!Array.isArray(content)) {
230
+ return ''
231
+ }
232
+
233
+ return content
234
+ .map((item) => {
235
+ if (typeof item === 'string') {
236
+ return item
237
+ }
238
+
239
+ if (item && typeof item === 'object') {
240
+ if (typeof item.text === 'string') {
241
+ return item.text
242
+ }
243
+ if (typeof item.content === 'string') {
244
+ return item.content
245
+ }
246
+ }
247
+
248
+ return ''
249
+ })
250
+ .join('\n')
251
+ .trim()
252
+ }
253
+
254
+ function parseOpenAIContent(payload) {
255
+ const content = payload?.choices?.[0]?.message?.content
256
+ if (Array.isArray(content)) {
257
+ return parseContentArray(content)
258
+ }
259
+
260
+ return (content || '').trim()
261
+ }
262
+
263
+ function parseOpenAIResponsesContent(payload) {
264
+ if (typeof payload?.output_text === 'string' && payload.output_text.trim()) {
265
+ return payload.output_text.trim()
266
+ }
267
+
268
+ const output = Array.isArray(payload?.output) ? payload.output : []
269
+ const parts = []
270
+
271
+ output.forEach((item) => {
272
+ const content = Array.isArray(item?.content) ? item.content : []
273
+ content.forEach((contentItem) => {
274
+ if (contentItem?.type === 'output_text' && typeof contentItem.text === 'string' && contentItem.text.trim()) {
275
+ parts.push(contentItem.text.trim())
276
+ }
277
+ })
278
+ })
279
+
280
+ return parts.join('\n').trim()
281
+ }
282
+
283
+ function parseAnthropicContent(payload) {
284
+ const content = payload?.content
285
+ if (Array.isArray(content)) {
286
+ return content
287
+ .map((item) => item?.text || '')
288
+ .join('\n')
289
+ .trim()
290
+ }
291
+
292
+ return ''
293
+ }
294
+
295
+ function parseOllamaContent(payload) {
296
+ const openAiCompatible = parseOpenAIContent(payload)
297
+ if (openAiCompatible) {
298
+ return openAiCompatible
299
+ }
300
+
301
+ const messageContent = payload?.message?.content
302
+ if (typeof messageContent === 'string') {
303
+ return messageContent.trim()
304
+ }
305
+ if (Array.isArray(messageContent)) {
306
+ return parseContentArray(messageContent)
307
+ }
308
+
309
+ const responseContent = payload?.response
310
+ if (typeof responseContent === 'string') {
311
+ return responseContent.trim()
312
+ }
313
+
314
+ const outputContent = payload?.output?.text || payload?.output_text || payload?.output?.content
315
+ if (typeof outputContent === 'string' && outputContent.trim()) {
316
+ return outputContent.trim()
317
+ }
318
+ if (Array.isArray(outputContent)) {
319
+ const parsed = parseContentArray(outputContent)
320
+ if (parsed) {
321
+ return parsed
322
+ }
323
+ }
324
+
325
+ const nestedMessageContent = payload?.data?.choices?.[0]?.message?.content
326
+ if (typeof nestedMessageContent === 'string') {
327
+ return nestedMessageContent.trim()
328
+ }
329
+ if (Array.isArray(nestedMessageContent)) {
330
+ const parsed = parseContentArray(nestedMessageContent)
331
+ if (parsed) {
332
+ return parsed
333
+ }
334
+ }
335
+
336
+ const plainContent = payload?.content
337
+ if (typeof plainContent === 'string' && plainContent.trim()) {
338
+ return plainContent.trim()
339
+ }
340
+ if (Array.isArray(plainContent)) {
341
+ const parsed = parseContentArray(plainContent)
342
+ if (parsed) {
343
+ return parsed
344
+ }
345
+ }
346
+
347
+ if (typeof payload?.result === 'string' && payload.result.trim()) {
348
+ return payload.result.trim()
349
+ }
350
+ if (typeof payload?.message === 'string' && payload.message.trim()) {
351
+ return payload.message.trim()
352
+ }
353
+
354
+ return ''
355
+ }
356
+
357
+ function parseOpenAIThinking(payload) {
358
+ const message = payload?.choices?.[0]?.message
359
+ if (!message) {
360
+ return ''
361
+ }
362
+
363
+ const candidates = [
364
+ message.reasoning_content,
365
+ message.reasoning,
366
+ message.thinking
367
+ ]
368
+
369
+ for (const item of candidates) {
370
+ if (typeof item === 'string' && item.trim()) {
371
+ return item.trim()
372
+ }
373
+ if (Array.isArray(item)) {
374
+ const parsed = parseContentArray(item)
375
+ if (parsed) {
376
+ return parsed
377
+ }
378
+ }
379
+ }
380
+
381
+ return ''
382
+ }
383
+
384
+ function parseOpenAIResponsesThinking(payload) {
385
+ const summaryList = Array.isArray(payload?.reasoning?.summary) ? payload.reasoning.summary : []
386
+ return summaryList
387
+ .map((item) => {
388
+ if (typeof item === 'string') {
389
+ return item
390
+ }
391
+ if (typeof item?.text === 'string') {
392
+ return item.text
393
+ }
394
+ return ''
395
+ })
396
+ .filter(Boolean)
397
+ .join('\n')
398
+ .trim()
399
+ }
400
+
401
+ function parseOllamaThinking(payload) {
402
+ const message = payload?.message
403
+ if (message && typeof message === 'object') {
404
+ const candidates = [
405
+ message.thinking,
406
+ message.reasoning_content,
407
+ message.reasoning
408
+ ]
409
+
410
+ for (const item of candidates) {
411
+ if (typeof item === 'string' && item.trim()) {
412
+ return item.trim()
413
+ }
414
+ if (Array.isArray(item)) {
415
+ const parsed = parseContentArray(item)
416
+ if (parsed) {
417
+ return parsed
418
+ }
419
+ }
420
+ }
421
+ }
422
+
423
+ const payloadCandidates = [
424
+ payload?.thinking,
425
+ payload?.reasoning_content,
426
+ payload?.reasoning
427
+ ]
428
+ for (const item of payloadCandidates) {
429
+ if (typeof item === 'string' && item.trim()) {
430
+ return item.trim()
431
+ }
432
+ if (Array.isArray(item)) {
433
+ const parsed = parseContentArray(item)
434
+ if (parsed) {
435
+ return parsed
436
+ }
437
+ }
438
+ }
439
+
440
+ return ''
441
+ }
442
+
443
+ function parseJsonWithFallback(rawText) {
444
+ const text = (rawText || '').trim()
445
+ if (!text) {
446
+ return null
447
+ }
448
+
449
+ try {
450
+ return JSON.parse(text)
451
+ } catch {
452
+ // 兜底解析: 尝试提取第一段 JSON 块。
453
+ const match = text.match(/\{[\s\S]*\}/)
454
+ if (!match) {
455
+ return null
456
+ }
457
+
458
+ try {
459
+ return JSON.parse(match[0])
460
+ } catch {
461
+ return null
462
+ }
463
+ }
464
+ }
465
+
466
+ function parseOpenAIStreamContent(rawText) {
467
+ const contentParts = []
468
+ const thinkingParts = []
469
+ let payloadCount = 0
470
+
471
+ const lines = String(rawText || '').split(/\r?\n/)
472
+ for (const rawLine of lines) {
473
+ const line = rawLine.trim()
474
+ if (!line.startsWith('data:')) {
475
+ continue
476
+ }
477
+
478
+ const payloadText = line.slice(5).trim()
479
+ if (!payloadText || payloadText === '[DONE]') {
480
+ continue
481
+ }
482
+
483
+ let payload = null
484
+ try {
485
+ payload = JSON.parse(payloadText)
486
+ } catch {
487
+ continue
488
+ }
489
+
490
+ payloadCount += 1
491
+ const delta = payload?.choices?.[0]?.delta
492
+ if (!delta) {
493
+ continue
494
+ }
495
+
496
+ if (typeof delta.content === 'string') {
497
+ contentParts.push(delta.content)
498
+ } else if (Array.isArray(delta.content)) {
499
+ const parsed = parseContentArray(delta.content)
500
+ if (parsed) {
501
+ contentParts.push(parsed)
502
+ }
503
+ }
504
+
505
+ const thinkingCandidate = delta.reasoning_content || delta.reasoning || delta.thinking
506
+ if (typeof thinkingCandidate === 'string') {
507
+ thinkingParts.push(thinkingCandidate)
508
+ } else if (Array.isArray(thinkingCandidate)) {
509
+ const parsed = parseContentArray(thinkingCandidate)
510
+ if (parsed) {
511
+ thinkingParts.push(parsed)
512
+ }
513
+ }
514
+ }
515
+
516
+ return {
517
+ payloadCount,
518
+ content: contentParts.join('').trim(),
519
+ thinking: thinkingParts.join('').trim()
520
+ }
521
+ }
522
+
523
+ function buildStreamPartKey(payload, indexField = 'content_index') {
524
+ const outputIndex = Number.isInteger(payload?.output_index) ? payload.output_index : 0
525
+ const partIndex = Number.isInteger(payload?.[indexField]) ? payload[indexField] : 0
526
+ return `${outputIndex}:${partIndex}`
527
+ }
528
+
529
+ function setStreamPart(parts, key, value, append = false) {
530
+ if (typeof value !== 'string' || !value) {
531
+ return
532
+ }
533
+
534
+ const current = parts.get(key) || ''
535
+ parts.set(key, append ? `${current}${value}` : value)
536
+ }
537
+
538
+ function joinStreamParts(parts) {
539
+ return [...parts.entries()]
540
+ .sort(([leftKey], [rightKey]) => {
541
+ const [leftOutput = 0, leftPart = 0] = leftKey.split(':').map((item) => Number(item) || 0)
542
+ const [rightOutput = 0, rightPart = 0] = rightKey.split(':').map((item) => Number(item) || 0)
543
+ if (leftOutput !== rightOutput) {
544
+ return leftOutput - rightOutput
545
+ }
546
+ return leftPart - rightPart
547
+ })
548
+ .map(([, value]) => value.trim())
549
+ .filter(Boolean)
550
+ .join('\n')
551
+ .trim()
552
+ }
553
+
554
+ function parseOpenAIResponsesStreamContent(rawText) {
555
+ const contentParts = new Map()
556
+ const thinkingParts = new Map()
557
+ let payloadCount = 0
558
+ let completedResponse = null
559
+
560
+ const lines = String(rawText || '').split(/\r?\n/)
561
+ for (const rawLine of lines) {
562
+ const line = rawLine.trim()
563
+ if (!line.startsWith('data:')) {
564
+ continue
565
+ }
566
+
567
+ const payloadText = line.slice(5).trim()
568
+ if (!payloadText) {
569
+ continue
570
+ }
571
+
572
+ let payload = null
573
+ try {
574
+ payload = JSON.parse(payloadText)
575
+ } catch {
576
+ continue
577
+ }
578
+
579
+ payloadCount += 1
580
+
581
+ if (payload?.type === 'response.completed' && payload?.response && typeof payload.response === 'object') {
582
+ completedResponse = payload.response
583
+ continue
584
+ }
585
+
586
+ if (payload?.type === 'response.output_text.delta') {
587
+ setStreamPart(contentParts, buildStreamPartKey(payload), payload.delta, true)
588
+ continue
589
+ }
590
+
591
+ if (payload?.type === 'response.output_text.done') {
592
+ setStreamPart(contentParts, buildStreamPartKey(payload), payload.text)
593
+ continue
594
+ }
595
+
596
+ if (payload?.type === 'response.reasoning_summary_text.delta') {
597
+ setStreamPart(thinkingParts, buildStreamPartKey(payload, 'summary_index'), payload.delta, true)
598
+ continue
599
+ }
600
+
601
+ if (payload?.type === 'response.reasoning_summary_text.done') {
602
+ setStreamPart(thinkingParts, buildStreamPartKey(payload, 'summary_index'), payload.text)
603
+ continue
604
+ }
605
+
606
+ if (payload?.type === 'response.reasoning_text.delta') {
607
+ setStreamPart(thinkingParts, buildStreamPartKey(payload), payload.delta, true)
608
+ continue
609
+ }
610
+
611
+ if (payload?.type === 'response.reasoning_text.done') {
612
+ setStreamPart(thinkingParts, buildStreamPartKey(payload), payload.text)
613
+ continue
614
+ }
615
+
616
+ if (payload?.type === 'response.content_part.done') {
617
+ const part = payload.part
618
+ if (part?.type === 'output_text') {
619
+ setStreamPart(contentParts, buildStreamPartKey(payload), part.text)
620
+ continue
621
+ }
622
+
623
+ if (part?.type === 'reasoning_text') {
624
+ setStreamPart(thinkingParts, buildStreamPartKey(payload), part.text)
625
+ }
626
+ }
627
+ }
628
+
629
+ let content = joinStreamParts(contentParts)
630
+ let thinking = joinStreamParts(thinkingParts)
631
+
632
+ if (completedResponse) {
633
+ if (!content) {
634
+ content = parseOpenAIResponsesContent(completedResponse)
635
+ }
636
+ if (!thinking) {
637
+ thinking = parseOpenAIResponsesThinking(completedResponse)
638
+ }
639
+ }
640
+
641
+ return {
642
+ payloadCount,
643
+ content,
644
+ thinking
645
+ }
646
+ }
647
+
648
+ function parseOllamaStreamContent(rawText) {
649
+ const contentParts = []
650
+ const thinkingParts = []
651
+ let payloadCount = 0
652
+
653
+ const lines = String(rawText || '').split(/\r?\n/)
654
+ for (const rawLine of lines) {
655
+ const line = rawLine.trim()
656
+ if (!line) {
657
+ continue
658
+ }
659
+
660
+ let payload = null
661
+ try {
662
+ payload = JSON.parse(line)
663
+ } catch {
664
+ continue
665
+ }
666
+
667
+ payloadCount += 1
668
+
669
+ const content = parseOllamaContent(payload)
670
+ if (content) {
671
+ contentParts.push(content)
672
+ }
673
+
674
+ const thinking = parseOllamaThinking(payload)
675
+ if (thinking) {
676
+ thinkingParts.push(thinking)
677
+ }
678
+ }
679
+
680
+ return {
681
+ payloadCount,
682
+ content: contentParts.join('').trim(),
683
+ thinking: thinkingParts.join('').trim()
684
+ }
685
+ }
686
+
687
+ function parseResponseJson(text, meta) {
688
+ try {
689
+ return JSON.parse(text)
690
+ } catch (error) {
691
+ throw new IcodeError(`AI 响应解析失败: ${error.message}`, {
692
+ code: 'AI_RESPONSE_JSON_PARSE_ERROR',
693
+ exitCode: 2,
694
+ meta: {
695
+ ...meta,
696
+ rawResponse: text
697
+ }
698
+ })
699
+ }
700
+ }
701
+
702
+ function buildThinkingOnlyError(message, meta, thinkingContent = '', hint = '') {
703
+ return new IcodeError(message, {
704
+ code: 'AI_EMPTY_RESPONSE',
705
+ exitCode: 2,
706
+ meta: {
707
+ ...meta,
708
+ thinkingPreview: thinkingContent.slice(0, 400),
709
+ hint
710
+ }
711
+ })
712
+ }
713
+
714
+ function shouldRetryWithResponsesApi(error) {
715
+ if (!error || error.code !== 'AI_HTTP_ERROR') {
716
+ return false
717
+ }
718
+
719
+ const rawResponse = String(error.meta?.rawResponse || '')
720
+ return rawResponse.includes('Unsupported legacy protocol') && rawResponse.includes('/v1/responses')
721
+ }
722
+
723
+ function buildOpenAIChatRequestBody(profile, prompt) {
724
+ return mergeRequestBody({
725
+ model: profile.model,
726
+ stream: false,
727
+ temperature: profile.temperature,
728
+ max_tokens: profile.maxTokens,
729
+ messages: [
730
+ {
731
+ role: 'system',
732
+ content: prompt.systemPrompt
733
+ },
734
+ {
735
+ role: 'user',
736
+ content: prompt.userPrompt
737
+ }
738
+ ]
739
+ }, profile.requestBody)
740
+ }
741
+
742
+ function buildOpenAIResponsesRequestBody(profile, prompt) {
743
+ return mergeRequestBody({
744
+ model: profile.model,
745
+ stream: false,
746
+ temperature: profile.temperature,
747
+ max_output_tokens: profile.maxTokens,
748
+ instructions: prompt.systemPrompt,
749
+ input: prompt.userPrompt
750
+ }, profile.requestBody)
751
+ }
752
+
753
+ async function performJsonRequest({ endpoint, profile, headers, requestBody, options }) {
754
+ return withSpinner(`等待响应`, async () => {
755
+ let response = null
756
+ try {
757
+ response = await fetch(endpoint, {
758
+ method: 'POST',
759
+ headers,
760
+ body: JSON.stringify(requestBody)
761
+ })
762
+ } catch (error) {
763
+ throw buildFetchRequestError(profile, endpoint, error)
764
+ }
765
+
766
+ const responseText = await response.text()
767
+ const responseHeaders = serializeHeaders(response.headers)
768
+ if (shouldDumpResponse(options)) {
769
+ printResponseDump({
770
+ profile: profile.name,
771
+ format: profile.format,
772
+ endpoint,
773
+ status: response.status,
774
+ responseHeaders,
775
+ responseText
776
+ })
777
+ }
778
+
779
+ if (!response.ok) {
780
+ throw new IcodeError(`AI 请求失败(${response.status}): ${responseText}`, {
781
+ code: 'AI_HTTP_ERROR',
782
+ exitCode: 2,
783
+ meta: {
784
+ status: response.status,
785
+ endpoint,
786
+ profile: profile.name,
787
+ format: profile.format,
788
+ responseHeaders,
789
+ rawResponse: responseText
790
+ }
791
+ })
792
+ }
793
+
794
+ return {
795
+ status: response.status,
796
+ responseHeaders,
797
+ text: responseText
798
+ }
799
+ })
800
+ }
801
+
802
+ function parseOpenAIPayload(responseMeta, profile, endpoint) {
803
+ return parseResponseJson(responseMeta.text, {
804
+ status: responseMeta.status,
805
+ endpoint,
806
+ profile: profile.name,
807
+ format: profile.format,
808
+ responseHeaders: responseMeta.responseHeaders
809
+ })
810
+ }
811
+
812
+ function resolveOpenAITextFromPayload(payload, responseMeta, profile, endpoint, options) {
813
+ const parsedContent = isOpenAIResponsesEndpoint(endpoint)
814
+ ? parseOpenAIResponsesContent(payload)
815
+ : parseOpenAIContent(payload)
816
+ if (parsedContent) {
817
+ return parsedContent
818
+ }
819
+
820
+ const thinkingContent = isOpenAIResponsesEndpoint(endpoint)
821
+ ? parseOpenAIResponsesThinking(payload)
822
+ : parseOpenAIThinking(payload)
823
+ if (thinkingContent && allowThinkingFallback(options)) {
824
+ return thinkingContent
825
+ }
826
+
827
+ throw buildThinkingOnlyError(
828
+ 'AI 返回正文为空。当前响应可能只有思考过程,默认不会把 reasoning/thinking 当成最终内容输出。',
829
+ {
830
+ endpoint,
831
+ profile: profile.name,
832
+ status: responseMeta.status,
833
+ responseHeaders: responseMeta.responseHeaders,
834
+ rawResponse: responseMeta.text
835
+ },
836
+ thinkingContent,
837
+ isOpenAIResponsesEndpoint(endpoint)
838
+ ? '可在 profile.requestBody 中设置 {"reasoning":{"summary":"none"},"stream":false},或切换支持文本输出的模型。'
839
+ : '可在 profile.requestBody 中设置 {"thinking":{"type":"disabled"},"stream":false},或切换不带思考输出的模型。'
840
+ )
841
+ }
842
+
843
+ async function requestOpenAI(profile, prompt, options = {}) {
844
+ const apiKey = resolveApiKey(profile)
845
+ if (!apiKey) {
846
+ throw new IcodeError(`AI profile ${profile.name} 缺少 apiKey(可通过配置或环境变量设置)`, {
847
+ code: 'AI_API_KEY_EMPTY',
848
+ exitCode: 2
849
+ })
850
+ }
851
+
852
+ const endpoint = buildEndpoint(profile)
853
+ const headers = {
854
+ 'Content-Type': 'application/json',
855
+ Authorization: `Bearer ${apiKey}`,
856
+ ...normalizeHeaders(profile.headers)
857
+ }
858
+ let resolvedEndpoint = endpoint
859
+ let requestBody = isOpenAIResponsesEndpoint(resolvedEndpoint)
860
+ ? buildOpenAIResponsesRequestBody(profile, prompt)
861
+ : buildOpenAIChatRequestBody(profile, prompt)
862
+ let responseMeta = null
863
+
864
+ try {
865
+ responseMeta = await performJsonRequest({
866
+ endpoint: resolvedEndpoint,
867
+ profile,
868
+ headers,
869
+ requestBody,
870
+ options
871
+ })
872
+ } catch (error) {
873
+ if (!isOpenAIResponsesEndpoint(resolvedEndpoint) && shouldRetryWithResponsesApi(error)) {
874
+ resolvedEndpoint = buildOpenAIResponsesEndpoint(profile)
875
+ requestBody = buildOpenAIResponsesRequestBody(profile, prompt)
876
+ responseMeta = await performJsonRequest({
877
+ endpoint: resolvedEndpoint,
878
+ profile,
879
+ headers,
880
+ requestBody,
881
+ options
882
+ })
883
+ } else {
884
+ throw error
885
+ }
886
+ }
887
+
888
+ if (requestBody.stream === true) {
889
+ const streamResult = isOpenAIResponsesEndpoint(resolvedEndpoint)
890
+ ? parseOpenAIResponsesStreamContent(responseMeta.text)
891
+ : parseOpenAIStreamContent(responseMeta.text)
892
+ if (streamResult.payloadCount > 0) {
893
+ if (streamResult.content) {
894
+ return streamResult.content
895
+ }
896
+ if (streamResult.thinking && allowThinkingFallback(options)) {
897
+ return streamResult.thinking
898
+ }
899
+
900
+ throw buildThinkingOnlyError(
901
+ 'AI 返回正文为空,检测到 reasoning/thinking。为避免暴露思考过程,默认不会把它当成最终内容输出。',
902
+ {
903
+ endpoint: resolvedEndpoint,
904
+ profile: profile.name,
905
+ status: responseMeta.status,
906
+ responseHeaders: responseMeta.responseHeaders,
907
+ rawResponse: responseMeta.text
908
+ },
909
+ streamResult.thinking,
910
+ isOpenAIResponsesEndpoint(resolvedEndpoint)
911
+ ? '可在 profile.requestBody 中设置 {"reasoning":{"summary":"none"},"stream":false} 后重试。'
912
+ : '可在 profile.requestBody 中设置 {"thinking":{"type":"disabled"},"stream":false} 后重试。'
913
+ )
914
+ }
915
+ }
916
+
917
+ const payload = parseOpenAIPayload(responseMeta, profile, resolvedEndpoint)
918
+ return resolveOpenAITextFromPayload(payload, responseMeta, profile, resolvedEndpoint, options)
919
+ }
920
+
921
+ async function requestAnthropic(profile, prompt, options = {}) {
922
+ const apiKey = resolveApiKey(profile)
923
+ if (!apiKey) {
924
+ throw new IcodeError(`AI profile ${profile.name} 缺少 apiKey(可通过配置或环境变量设置)`, {
925
+ code: 'AI_API_KEY_EMPTY',
926
+ exitCode: 2
927
+ })
928
+ }
929
+
930
+ const endpoint = buildEndpoint(profile)
931
+ const customHeaders = normalizeHeaders(profile.headers)
932
+ const anthropicVersion = customHeaders['anthropic-version'] || '2023-06-01'
933
+
934
+ const headers = {
935
+ 'Content-Type': 'application/json',
936
+ 'x-api-key': apiKey,
937
+ 'anthropic-version': anthropicVersion,
938
+ ...customHeaders
939
+ }
940
+ const requestBody = mergeRequestBody({
941
+ model: profile.model,
942
+ system: prompt.systemPrompt,
943
+ stream: false,
944
+ max_tokens: profile.maxTokens,
945
+ temperature: profile.temperature,
946
+ messages: [
947
+ {
948
+ role: 'user',
949
+ content: prompt.userPrompt
950
+ }
951
+ ]
952
+ }, profile.requestBody)
953
+
954
+ const responseMeta = await performJsonRequest({
955
+ endpoint,
956
+ profile,
957
+ headers,
958
+ requestBody,
959
+ options
960
+ })
961
+
962
+ const payload = parseResponseJson(responseMeta.text, {
963
+ status: responseMeta.status,
964
+ endpoint,
965
+ profile: profile.name,
966
+ format: profile.format,
967
+ responseHeaders: responseMeta.responseHeaders
968
+ })
969
+ const parsedContent = parseAnthropicContent(payload)
970
+ if (parsedContent) {
971
+ return parsedContent
972
+ }
973
+
974
+ throw new IcodeError('Anthropic 返回内容为空,请检查模型可用性或调整请求参数后重试。', {
975
+ code: 'AI_EMPTY_RESPONSE',
976
+ exitCode: 2,
977
+ meta: {
978
+ endpoint,
979
+ profile: profile.name,
980
+ status: responseMeta.status,
981
+ responseHeaders: responseMeta.responseHeaders,
982
+ rawResponse: responseMeta.text
983
+ }
984
+ })
985
+ }
986
+
987
+ async function requestOllama(profile, prompt, options = {}) {
988
+ const endpoint = buildEndpoint(profile)
989
+ const customHeaders = normalizeHeaders(profile.headers)
990
+ const headers = {
991
+ 'Content-Type': 'application/json',
992
+ ...customHeaders
993
+ }
994
+ const apiKey = resolveApiKey(profile)
995
+
996
+ // Ollama local deployments usually do not need auth; gateways can still use Bearer.
997
+ if (apiKey && !Object.keys(headers).some((key) => key.toLowerCase() === 'authorization')) {
998
+ headers.Authorization = `Bearer ${apiKey}`
999
+ }
1000
+ const requestBody = mergeRequestBody({
1001
+ model: profile.model,
1002
+ stream: false,
1003
+ messages: [
1004
+ {
1005
+ role: 'system',
1006
+ content: prompt.systemPrompt
1007
+ },
1008
+ {
1009
+ role: 'user',
1010
+ content: prompt.userPrompt
1011
+ }
1012
+ ],
1013
+ options: {
1014
+ temperature: profile.temperature,
1015
+ num_predict: profile.maxTokens
1016
+ }
1017
+ }, profile.requestBody)
1018
+
1019
+ const responseMeta = await performJsonRequest({
1020
+ endpoint,
1021
+ profile,
1022
+ headers,
1023
+ requestBody,
1024
+ options
1025
+ })
1026
+
1027
+ if (requestBody.stream === true) {
1028
+ const streamResult = parseOllamaStreamContent(responseMeta.text)
1029
+ if (streamResult.payloadCount > 0) {
1030
+ if (streamResult.content) {
1031
+ return streamResult.content
1032
+ }
1033
+ if (streamResult.thinking && allowThinkingFallback(options)) {
1034
+ return streamResult.thinking
1035
+ }
1036
+
1037
+ throw buildThinkingOnlyError(
1038
+ 'AI 返回正文为空,检测到 thinking/reasoning。为避免暴露思考过程,默认不会把它当成最终内容输出。',
1039
+ {
1040
+ endpoint,
1041
+ profile: profile.name,
1042
+ status: responseMeta.status,
1043
+ responseHeaders: responseMeta.responseHeaders,
1044
+ rawResponse: responseMeta.text
1045
+ },
1046
+ streamResult.thinking,
1047
+ '可在 profile.requestBody 中设置 {"think":false,"stream":false} 后重试。'
1048
+ )
1049
+ }
1050
+ }
1051
+
1052
+ const payload = parseResponseJson(responseMeta.text, {
1053
+ status: responseMeta.status,
1054
+ endpoint,
1055
+ profile: profile.name,
1056
+ format: profile.format,
1057
+ responseHeaders: responseMeta.responseHeaders
1058
+ })
1059
+ const parsedContent = parseOllamaContent(payload)
1060
+ if (parsedContent) {
1061
+ return parsedContent
1062
+ }
1063
+
1064
+ const thinkingContent = parseOllamaThinking(payload)
1065
+ if (thinkingContent && allowThinkingFallback(options)) {
1066
+ return thinkingContent
1067
+ }
1068
+
1069
+ throw buildThinkingOnlyError(
1070
+ 'Ollama 返回正文为空。当前响应可能只有思考过程,默认不会把 thinking/reasoning 当成最终内容输出。',
1071
+ {
1072
+ endpoint,
1073
+ profile: profile.name,
1074
+ status: responseMeta.status,
1075
+ responseHeaders: responseMeta.responseHeaders,
1076
+ payloadKeys: Object.keys(payload || {}),
1077
+ doneReason: payload?.done_reason || payload?.choices?.[0]?.finish_reason || '',
1078
+ rawResponse: responseMeta.text
1079
+ },
1080
+ thinkingContent,
1081
+ '若模型仅返回思考内容,可在 profile.requestBody 中设置 {"think":false,"stream":false},或切换不带思考的模型。'
1082
+ )
1083
+ }
1084
+
1085
+ export async function askAi(prompt, options = {}) {
1086
+ const profile = getAiProfile(options.profile)
1087
+ if (!profile.model) {
1088
+ throw new IcodeError(`AI profile ${profile.name} 缺少 model`, {
1089
+ code: 'AI_MODEL_EMPTY',
1090
+ exitCode: 2
1091
+ })
1092
+ }
1093
+
1094
+ if (profile.format === 'anthropic') {
1095
+ return requestAnthropic(profile, prompt, options)
1096
+ }
1097
+
1098
+ if (profile.format === 'ollama') {
1099
+ return requestOllama(profile, prompt, options)
1100
+ }
1101
+
1102
+ return requestOpenAI(profile, prompt, options)
1103
+ }
1104
+
1105
+ export async function askAiJson(prompt, options = {}) {
1106
+ const text = await askAi(prompt, {
1107
+ ...options,
1108
+ allowThinkingFallback: options.allowThinkingFallback !== false
1109
+ })
1110
+ const parsed = parseJsonWithFallback(text)
1111
+ if (!parsed) {
1112
+ throw new IcodeError('AI 返回结果不是合法 JSON,请调整模型提示词或重试。', {
1113
+ code: 'AI_JSON_PARSE_ERROR',
1114
+ exitCode: 2,
1115
+ meta: {
1116
+ text
1117
+ }
1118
+ })
1119
+ }
1120
+
1121
+ return {
1122
+ text,
1123
+ parsed
1124
+ }
1125
+ }