botrun-horse 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (64) hide show
  1. package/README.md +1 -0
  2. package/bin/bh.mjs +193 -0
  3. package/bin/commands/dag-cmd.mjs +74 -0
  4. package/bin/commands/db-cmd.mjs +73 -0
  5. package/bin/commands/doc.mjs +185 -0
  6. package/bin/commands/gemini.mjs +120 -0
  7. package/bin/commands/help.mjs +109 -0
  8. package/bin/commands/legal.mjs +174 -0
  9. package/bin/commands/nchc.mjs +212 -0
  10. package/bin/commands/openrouter.mjs +154 -0
  11. package/bin/commands/prompt.mjs +175 -0
  12. package/bin/commands/schema.mjs +258 -0
  13. package/bin/commands/search.mjs +46 -0
  14. package/bin/commands/writing.mjs +33 -0
  15. package/lib/core/adapters/base.mjs +52 -0
  16. package/lib/core/adapters/claude.mjs +13 -0
  17. package/lib/core/adapters/gemini-api.mjs +174 -0
  18. package/lib/core/adapters/gemini-shared.mjs +164 -0
  19. package/lib/core/adapters/gemini-vertex.mjs +232 -0
  20. package/lib/core/adapters/local.mjs +13 -0
  21. package/lib/core/adapters/nchc.mjs +236 -0
  22. package/lib/core/adapters/openai-shared.mjs +34 -0
  23. package/lib/core/adapters/openrouter.mjs +304 -0
  24. package/lib/core/ai-cache.mjs +277 -0
  25. package/lib/core/ai-router.mjs +217 -0
  26. package/lib/core/cli-utils.mjs +170 -0
  27. package/lib/core/dag.mjs +114 -0
  28. package/lib/core/db.mjs +412 -0
  29. package/lib/core/env.mjs +64 -0
  30. package/lib/core/llm.mjs +58 -0
  31. package/lib/core/paths.mjs +115 -0
  32. package/lib/core/proxy.mjs +46 -0
  33. package/lib/core/watermelon.mjs +9 -0
  34. package/lib/doc/index.mjs +419 -0
  35. package/lib/doc/office2text.mjs +234 -0
  36. package/lib/doc/pdf2text.mjs +133 -0
  37. package/lib/doc/split.mjs +132 -0
  38. package/lib/flows/draft-writing.mjs +29 -0
  39. package/lib/flows/gemini-ask.mjs +185 -0
  40. package/lib/flows/hatch-portal.mjs +13 -0
  41. package/lib/flows/legal-ask.mjs +325 -0
  42. package/lib/flows/openai-agent.mjs +167 -0
  43. package/lib/flows/opencode-agent.mjs +240 -0
  44. package/lib/flows/openrouter-ask.mjs +111 -0
  45. package/lib/flows/review-doc.mjs +18 -0
  46. package/lib/ocr/index.mjs +6 -0
  47. package/lib/portal/hatch.mjs +6 -0
  48. package/lib/portal/index.mjs +6 -0
  49. package/lib/prompt/prompt-search.mjs +55 -0
  50. package/lib/prompt/prompt-store.mjs +94 -0
  51. package/lib/prompt/prompts/zero-framework/coding.md +15 -0
  52. package/lib/prompt/prompts/zero-framework/search.md +12 -0
  53. package/lib/prompt/prompts/zero-framework/slice.md +11 -0
  54. package/lib/search/crawler.mjs +6 -0
  55. package/lib/search/index.mjs +7 -0
  56. package/lib/tools/fs-tools.mjs +268 -0
  57. package/lib/tools/index.mjs +27 -0
  58. package/lib/writing/generate.mjs +86 -0
  59. package/lib/writing/generators/nstc-generators.mjs +279 -0
  60. package/lib/writing/generators/nstc-top5.mjs +554 -0
  61. package/lib/writing/index.mjs +5 -0
  62. package/lib/writing/layouts/nstc-layout.mjs +249 -0
  63. package/lib/writing/renderer.mjs +61 -0
  64. package/package.json +35 -0
@@ -0,0 +1,236 @@
1
+ // lib/core/adapters/nchc.mjs — 國網 GenAI LLM adapter
2
+ //
3
+ // 端點:https://portal.genai.nchc.org.tw/api/v1 (OpenAI 相容介面)
4
+ // 驗證:NCHC_GENAI_API_KEY(Bearer Token)
5
+ // Proxy:依賴 proxy.mjs 全局 ProxyAgent dispatcher,不自行建立 HTTP client
6
+ //
7
+ // 可用模型:
8
+ // - Ministral-3-14B-Instruct-2512 (輕量快速)
9
+ // - Devstral-2-123B-Instruct-2512 (程式碼專精)
10
+ // - Mistral-Large-3-675B-Instruct-2512 (最強,預設)
11
+ //
12
+ // 介面相容 GeminiVertexAdapter(SOLID LSP):
13
+ // generateContent({ prompt, systemInstruction })
14
+ // generateContentStream({ prompt, systemInstruction })
15
+ //
16
+ // LlmResponse Value Object(DDD — 所有 adapter 共用語言):
17
+ // { text: string, sources: [], usage: { promptTokens, outputTokens, totalTokens },
18
+ // perf: { latencySec, ttftSec?, outputTokensPerSec }, model: string }
19
+
20
+ import { calcPerfOpenAI, estimateTokens } from './openai-shared.mjs';
21
+ import { BaseAdapter } from './base.mjs';
22
+
23
+ const NCHC_API_BASE = 'https://portal.genai.nchc.org.tw/api/v1';
24
+
25
+ /** 預設設定常數 */
26
+ const DEFAULTS = {
27
+ model: 'Mistral-Large-3-675B-Instruct-2512',
28
+ maxTokens: 8192,
29
+ temperature: 0.7,
30
+ };
31
+
32
+ /**
33
+ * 國網 GenAI Adapter(OpenAI 相容介面)
34
+ * 使用全局 fetch(已由 proxy.mjs 設定 ProxyAgent),強制走 HTTPS proxy。
35
+ *
36
+ * SOLID:
37
+ * SRP — 每個私有方法只做一件事
38
+ * OCP — 透過 llm.mjs factory 擴充,不改既有 adapter
39
+ * LSP — 介面完全相容 GeminiVertexAdapter
40
+ * DIP — 依賴全局 fetch(由外部注入),不自建 HTTP client
41
+ */
42
+ export class NchcAdapter extends BaseAdapter {
43
+ /**
44
+ * @param {object} opts
45
+ * @param {string} [opts.apiKey] - NCHC_GENAI_API_KEY(預設讀環境變數)
46
+ * @param {string} [opts.model] - 模型名稱(預設 Mistral-Large-3-675B-Instruct-2512)
47
+ * @param {number} [opts.maxTokens] - 最大輸出 tokens(預設 8192)
48
+ * @param {number} [opts.temperature] - 溫度(預設 0.7)
49
+ */
50
+ constructor(opts = {}) {
51
+ super();
52
+ const apiKey = opts.apiKey || process.env.NCHC_GENAI_API_KEY;
53
+ if (!apiKey) {
54
+ throw new Error(
55
+ 'NchcAdapter: 需要 NCHC GenAI API Key。\n' +
56
+ '請設定環境變數 NCHC_GENAI_API_KEY 或傳入 apiKey 參數。'
57
+ );
58
+ }
59
+ this.apiKey = apiKey;
60
+ this.model = opts.model || DEFAULTS.model;
61
+ this.maxTokens = opts.maxTokens || DEFAULTS.maxTokens;
62
+ this.temperature = opts.temperature ?? DEFAULTS.temperature;
63
+ }
64
+
65
+ // ── 私有建構方法(SRP:各自單一職責) ──────────────────────────────────
66
+
67
+ /**
68
+ * 建構 HTTP headers(DRY:兩個 fetch 共用,避免重複)
69
+ * @param {object} [extra={}] - 額外 headers(如 Accept: text/event-stream)
70
+ */
71
+ _buildHeaders(extra = {}) {
72
+ return {
73
+ 'Content-Type': 'application/json',
74
+ 'Authorization': `Bearer ${this.apiKey}`,
75
+ ...extra,
76
+ };
77
+ }
78
+
79
+ /** 建構 messages 陣列(OpenAI 格式) */
80
+ _buildMessages(prompt, systemInstruction) {
81
+ const messages = [];
82
+ if (systemInstruction) {
83
+ messages.push({ role: 'system', content: systemInstruction });
84
+ }
85
+ messages.push({ role: 'user', content: prompt });
86
+ return messages;
87
+ }
88
+
89
+ /** 建構 request body */
90
+ _buildBody(prompt, systemInstruction, stream = false) {
91
+ return {
92
+ model: this.model,
93
+ messages: this._buildMessages(prompt, systemInstruction),
94
+ max_tokens: this.maxTokens,
95
+ temperature: this.temperature,
96
+ stream,
97
+ };
98
+ }
99
+
100
+ /**
101
+ * 檢查 HTTP 回應狀態(DRY:兩個 fetch 共用錯誤處理)
102
+ * @param {Response} resp
103
+ * @param {string} context - 用於錯誤訊息的描述(如「非串流」、「串流」)
104
+ */
105
+ async _checkResponse(resp, context) {
106
+ if (resp.ok) return;
107
+ const errText = await resp.text().catch(() => '');
108
+ throw new Error(`NCHC API ${context}錯誤 ${resp.status}: ${errText}`);
109
+ }
110
+
111
+ /**
112
+ * 解析 SSE 串流(SRP:獨立 SSE 解析職責,從 generateContentStream 抽出)
113
+ * 逐行解析 `data: {...}` 格式,遇到 `[DONE]` 停止。
114
+ *
115
+ * @param {Response} resp - fetch 回應(resp.body 為 ReadableStream)
116
+ * @yields {object} 每個已解析的 SSE JSON chunk
117
+ */
118
+ async *_parseSseStream(resp) {
119
+ const decoder = new TextDecoder();
120
+ let buffer = '';
121
+
122
+ for await (const rawChunk of resp.body) {
123
+ buffer += decoder.decode(rawChunk, { stream: true });
124
+
125
+ const lines = buffer.split('\n');
126
+ buffer = lines.pop() ?? ''; // 保留最後可能不完整的行
127
+
128
+ for (const line of lines) {
129
+ const trimmed = line.trim();
130
+ if (!trimmed || !trimmed.startsWith('data: ')) continue;
131
+
132
+ const jsonStr = trimmed.slice(6); // 移除 'data: ' 前綴
133
+ if (jsonStr === '[DONE]') return;
134
+
135
+ try {
136
+ yield JSON.parse(jsonStr);
137
+ } catch {
138
+ // 跳過格式錯誤的 SSE 行(網路中斷、partial chunk 等)
139
+ }
140
+ }
141
+ }
142
+ }
143
+
144
+ // ── 公開 API(與 GeminiVertexAdapter 介面相容,SOLID LSP) ──────────────
145
+
146
+ /**
147
+ * 向國網 GenAI 提問(非串流)
148
+ * @param {object} params
149
+ * @param {string} params.prompt - 使用者提問
150
+ * @param {string} [params.systemInstruction] - 系統指示
151
+ * @returns {Promise<LlmResponse>}
152
+ */
153
+ async generateContent({ prompt, systemInstruction }) {
154
+ const t0 = performance.now();
155
+
156
+ const resp = await fetch(`${NCHC_API_BASE}/chat/completions`, {
157
+ method: 'POST',
158
+ headers: this._buildHeaders(),
159
+ body: JSON.stringify(this._buildBody(prompt, systemInstruction, false)),
160
+ });
161
+ await this._checkResponse(resp, '非串流');
162
+
163
+ const data = await resp.json();
164
+ const latencyMs = performance.now() - t0;
165
+ const text = data.choices?.[0]?.message?.content || '';
166
+ const usage = data.usage || {};
167
+
168
+ return {
169
+ text,
170
+ sources: [], // 國網 API 無搜尋接地功能
171
+ usage: {
172
+ promptTokens: usage.prompt_tokens || 0,
173
+ outputTokens: usage.completion_tokens || 0,
174
+ totalTokens: usage.total_tokens || 0,
175
+ },
176
+ perf: calcPerfOpenAI(usage, latencyMs),
177
+ model: data.model || this.model,
178
+ };
179
+ }
180
+
181
+ /**
182
+ * 串流向國網 GenAI 提問(AsyncGenerator)
183
+ * yield { type: 'text', text } — 回應文字(即時串流)
184
+ * yield { type: 'metadata', ...result } — 最終效能指標
185
+ *
186
+ * @param {object} params
187
+ * @param {string} params.prompt - 使用者提問
188
+ * @param {string} [params.systemInstruction] - 系統指示
189
+ */
190
+ async *generateContentStream({ prompt, systemInstruction }) {
191
+ const t0 = performance.now();
192
+ let ttftMs = 0;
193
+ let isFirst = true;
194
+ let fullText = '';
195
+ let finishReason = null;
196
+ let lastModel = this.model;
197
+
198
+ const resp = await fetch(`${NCHC_API_BASE}/chat/completions`, {
199
+ method: 'POST',
200
+ headers: this._buildHeaders({ 'Accept': 'text/event-stream' }),
201
+ body: JSON.stringify(this._buildBody(prompt, systemInstruction, true)),
202
+ });
203
+ await this._checkResponse(resp, '串流');
204
+
205
+ for await (const chunk of this._parseSseStream(resp)) {
206
+ if (isFirst) { ttftMs = performance.now() - t0; isFirst = false; }
207
+ if (chunk.model) lastModel = chunk.model;
208
+
209
+ const delta = chunk.choices?.[0]?.delta;
210
+ if (delta?.content) {
211
+ fullText += delta.content;
212
+ yield { type: 'text', text: delta.content };
213
+ }
214
+
215
+ const fr = chunk.choices?.[0]?.finish_reason;
216
+ if (fr) finishReason = fr;
217
+ }
218
+
219
+ const latencyMs = performance.now() - t0;
220
+ const estimatedOutputTokens = estimateTokens(fullText); // DRY:單一計算,重複使用
221
+
222
+ yield {
223
+ type: 'metadata',
224
+ text: fullText,
225
+ sources: [],
226
+ usage: {
227
+ promptTokens: 0, // 串流模式 NCHC 不回傳 prompt_tokens
228
+ outputTokens: estimatedOutputTokens,
229
+ totalTokens: estimatedOutputTokens,
230
+ },
231
+ perf: calcPerfOpenAI({ completion_tokens: estimatedOutputTokens }, latencyMs, ttftMs),
232
+ model: lastModel,
233
+ finishReason,
234
+ };
235
+ }
236
+ }
@@ -0,0 +1,34 @@
1
+ // lib/core/adapters/openai-shared.mjs — OpenAI 相容 API 共用純函式
2
+ //
3
+ // DRY:供所有 OpenAI 相容 adapter 共用(NCHC、未來 Claude 等)
4
+ // SOLID SRP:純函式,無副作用,無狀態
5
+
6
+ /**
7
+ * 估算 token 數(基於字元數)
8
+ * 中文字元約 1.5 chars/token,英文約 4 chars/token,混合取 3 為保守估算
9
+ * 僅用於串流模式(API 不回傳 prompt token 數量時)
10
+ *
11
+ * @param {string} text
12
+ * @returns {number}
13
+ */
14
+ export function estimateTokens(text) {
15
+ return Math.ceil(text.length / 3);
16
+ }
17
+
18
+ /**
19
+ * 計算 OpenAI 相容 API 效能指標(純函式)
20
+ *
21
+ * @param {object} usage - OpenAI usage 物件 { completion_tokens }
22
+ * @param {number} latencyMs - 完整請求延遲 (ms)
23
+ * @param {number} [ttftMs=0] - Time To First Token (ms)
24
+ * @returns {{ latencySec, ttftSec, outputTokensPerSec }}
25
+ */
26
+ export function calcPerfOpenAI(usage, latencyMs, ttftMs = 0) {
27
+ const outputTokens = usage?.completion_tokens || 0;
28
+ const latencySec = +(latencyMs / 1000).toFixed(2);
29
+ const ttftSec = ttftMs ? +(ttftMs / 1000).toFixed(2) : undefined;
30
+ const outputTokensPerSec = latencyMs > 0
31
+ ? +(outputTokens / (latencyMs / 1000)).toFixed(1)
32
+ : 0;
33
+ return { latencySec, ttftSec, outputTokensPerSec };
34
+ }
@@ -0,0 +1,304 @@
1
+ // lib/core/adapters/openrouter.mjs — OpenRouter LLM adapter
2
+ //
3
+ // 端點:https://openrouter.ai/api/v1 (OpenAI 相容介面)
4
+ // 驗證:OPENROUTER_API_KEY(Bearer Token)
5
+ // Proxy:依賴 proxy.mjs 全局 ProxyAgent dispatcher,不自行建立 HTTP client
6
+ //
7
+ // 特色:
8
+ // - 支援 400+ 模型(任意指定 model 參數)
9
+ // - 網路搜尋:plugins: [{ id: 'web' }] 或模型名稱加 :online 後綴
10
+ // - 引證來源:annotations[].url_citation(搜尋時回傳)
11
+ //
12
+ // 介面相容 GeminiVertexAdapter(SOLID LSP):
13
+ // generateContent({ prompt, systemInstruction })
14
+ // generateContentStream({ prompt, systemInstruction })
15
+ //
16
+ // LlmResponse Value Object(DDD — 所有 adapter 共用語言):
17
+ // { text: string, sources: [], usage: { promptTokens, outputTokens, totalTokens },
18
+ // perf: { latencySec, ttftSec?, outputTokensPerSec }, model: string }
19
+
20
+ import { calcPerfOpenAI, estimateTokens } from './openai-shared.mjs';
21
+ import { BaseAdapter } from './base.mjs';
22
+
23
+ const OPENROUTER_API_BASE = 'https://openrouter.ai/api/v1';
24
+
25
+ /** 預設設定常數 */
26
+ const DEFAULTS = {
27
+ model: 'openai/gpt-4o-mini',
28
+ maxTokens: 8192,
29
+ temperature: 0.7,
30
+ webSearch: false,
31
+ maxResults: 5,
32
+ siteUrl: 'https://github.com/botrun/botrun-horse',
33
+ siteName: 'botrun-horse',
34
+ };
35
+
36
+ /**
37
+ * OpenRouter LLM Adapter(OpenAI 相容介面)
38
+ * 使用全局 fetch(已由 proxy.mjs 設定 ProxyAgent),強制走 HTTPS proxy。
39
+ *
40
+ * SOLID:
41
+ * SRP — 每個私有方法只做一件事
42
+ * OCP — 透過 llm.mjs factory 擴充,不改既有 adapter
43
+ * LSP — 介面完全相容 GeminiVertexAdapter
44
+ * DIP — 依賴全局 fetch(由外部注入),不自建 HTTP client
45
+ */
46
+ export class OpenRouterAdapter extends BaseAdapter {
47
+ /**
48
+ * @param {object} opts
49
+ * @param {string} [opts.apiKey] - OPENROUTER_API_KEY(預設讀環境變數)
50
+ * @param {string} [opts.model] - 模型名稱(預設 openai/gpt-4o-mini,支援任意 OpenRouter 模型)
51
+ * @param {number} [opts.maxTokens] - 最大輸出 tokens(預設 8192)
52
+ * @param {number} [opts.temperature] - 溫度(預設 0.7)
53
+ * @param {boolean} [opts.webSearch] - 啟用網路搜尋(預設 false)
54
+ * @param {number} [opts.maxResults] - 搜尋最大結果數(預設 5)
55
+ * @param {string} [opts.reasoningEffort] - 推理層級('minimal'|'low'|'medium'|'high',預設 null 不啟用)
56
+ * @param {string} [opts.siteUrl] - HTTP-Referer(OpenRouter 排名用)
57
+ * @param {string} [opts.siteName] - X-Title(OpenRouter 排名用)
58
+ */
59
+ constructor(opts = {}) {
60
+ super();
61
+ const apiKey = opts.apiKey || process.env.OPENROUTER_API_KEY;
62
+ if (!apiKey) {
63
+ throw new Error(
64
+ 'OpenRouterAdapter: 需要 OpenRouter API Key。\n' +
65
+ '請設定環境變數 OPENROUTER_API_KEY 或傳入 apiKey 參數。\n' +
66
+ '取得 API Key:https://openrouter.ai/keys'
67
+ );
68
+ }
69
+ this.apiKey = apiKey;
70
+ this.model = opts.model || DEFAULTS.model;
71
+ this.maxTokens = opts.maxTokens || DEFAULTS.maxTokens;
72
+ this.temperature = opts.temperature ?? DEFAULTS.temperature;
73
+ this.webSearch = opts.webSearch ?? DEFAULTS.webSearch;
74
+ this.maxResults = opts.maxResults || DEFAULTS.maxResults;
75
+ this.reasoningEffort = opts.reasoningEffort || null;
76
+ this.siteUrl = opts.siteUrl || DEFAULTS.siteUrl;
77
+ this.siteName = opts.siteName || DEFAULTS.siteName;
78
+ }
79
+
80
+ // ── 私有建構方法(SRP:各自單一職責) ──────────────────────────────────
81
+
82
+ /**
83
+ * 建構 HTTP headers(DRY:兩個 fetch 共用,避免重複)
84
+ * HTTP-Referer 與 X-Title 為 OpenRouter 建議的識別 headers
85
+ * @param {object} [extra={}] - 額外 headers
86
+ */
87
+ _buildHeaders(extra = {}) {
88
+ return {
89
+ 'Content-Type': 'application/json',
90
+ 'Authorization': `Bearer ${this.apiKey}`,
91
+ 'HTTP-Referer': this.siteUrl,
92
+ 'X-Title': this.siteName,
93
+ ...extra,
94
+ };
95
+ }
96
+
97
+ /** 建構 messages 陣列(OpenAI chat 格式) */
98
+ _buildMessages(prompt, systemInstruction) {
99
+ const messages = [];
100
+ if (systemInstruction) {
101
+ messages.push({ role: 'system', content: systemInstruction });
102
+ }
103
+ messages.push({ role: 'user', content: prompt });
104
+ return messages;
105
+ }
106
+
107
+ /**
108
+ * 建構 request body
109
+ * 若啟用 webSearch,加入 plugins 陣列(Exa 搜尋引擎,支援所有模型)
110
+ * 亦可在模型名稱加 :online 後綴達到相同效果
111
+ */
112
+ _buildBody(prompt, systemInstruction, stream = false) {
113
+ const body = {
114
+ model: this.model,
115
+ messages: this._buildMessages(prompt, systemInstruction),
116
+ max_tokens: this.maxTokens,
117
+ temperature: this.temperature,
118
+ stream,
119
+ };
120
+
121
+ if (this.webSearch) {
122
+ // plugins 方式:細粒度控制搜尋參數(優先於 :online 後綴)
123
+ body.plugins = [{ id: 'web', max_results: this.maxResults }];
124
+ }
125
+
126
+ if (this.reasoningEffort) {
127
+ // OpenRouter reasoning 參數:映射到 Gemini thinkingLevel / Claude reasoning
128
+ body.reasoning = { effort: this.reasoningEffort };
129
+ }
130
+
131
+ return body;
132
+ }
133
+
134
+ /**
135
+ * 檢查 HTTP 回應狀態(DRY:兩個 fetch 共用錯誤處理)
136
+ * @param {Response} resp
137
+ * @param {string} context - 錯誤情境描述
138
+ */
139
+ async _checkResponse(resp, context) {
140
+ if (resp.ok) return;
141
+ const errText = await resp.text().catch(() => '');
142
+ throw new Error(`OpenRouter API ${context}錯誤 ${resp.status}: ${errText}`);
143
+ }
144
+
145
+ /**
146
+ * 從 message 的 annotations 陣列提取搜尋引證來源(SRP)
147
+ * OpenRouter 網路搜尋結果透過 annotations[].url_citation 回傳
148
+ *
149
+ * @param {object} [message] - OpenAI message 或 delta 物件
150
+ * @returns {Array<{uri: string, title: string, content: string}>}
151
+ */
152
+ _extractSources(message) {
153
+ if (!message?.annotations?.length) return [];
154
+ return message.annotations
155
+ .filter(a => a.type === 'url_citation' && a.url_citation?.url)
156
+ .map(a => ({
157
+ uri: a.url_citation.url,
158
+ title: a.url_citation.title || '',
159
+ content: a.url_citation.content || '',
160
+ }));
161
+ }
162
+
163
+ /**
164
+ * 解析 SSE 串流(SRP:獨立 SSE 解析職責)
165
+ * 逐行解析 `data: {...}` 格式,遇到 `[DONE]` 停止。
166
+ * 自動忽略 OpenRouter 的 `: OPENROUTER PROCESSING` 心跳注解行。
167
+ *
168
+ * @param {Response} resp - fetch 回應(resp.body 為 ReadableStream)
169
+ * @yields {object} 每個已解析的 SSE JSON chunk
170
+ */
171
+ async *_parseSseStream(resp) {
172
+ const decoder = new TextDecoder();
173
+ let buffer = '';
174
+
175
+ for await (const rawChunk of resp.body) {
176
+ buffer += decoder.decode(rawChunk, { stream: true });
177
+
178
+ const lines = buffer.split('\n');
179
+ buffer = lines.pop() ?? ''; // 保留最後可能不完整的行
180
+
181
+ for (const line of lines) {
182
+ const trimmed = line.trim();
183
+ // 跳過空行與 SSE 注解行(以 ':' 開頭的心跳訊息)
184
+ if (!trimmed || !trimmed.startsWith('data: ')) continue;
185
+
186
+ const jsonStr = trimmed.slice(6); // 移除 'data: ' 前綴
187
+ if (jsonStr === '[DONE]') return;
188
+
189
+ try {
190
+ yield JSON.parse(jsonStr);
191
+ } catch {
192
+ // 跳過格式錯誤的 SSE 行(網路中斷、partial chunk 等)
193
+ }
194
+ }
195
+ }
196
+ }
197
+
198
+ // ── 公開 API(與 GeminiVertexAdapter 介面相容,SOLID LSP) ──────────────
199
+
200
+ /**
201
+ * 向 OpenRouter 提問(非串流)
202
+ * @param {object} params
203
+ * @param {string} params.prompt - 使用者提問
204
+ * @param {string} [params.systemInstruction] - 系統指示
205
+ * @returns {Promise<LlmResponse>}
206
+ */
207
+ async generateContent({ prompt, systemInstruction }) {
208
+ const t0 = performance.now();
209
+
210
+ const resp = await fetch(`${OPENROUTER_API_BASE}/chat/completions`, {
211
+ method: 'POST',
212
+ headers: this._buildHeaders(),
213
+ body: JSON.stringify(this._buildBody(prompt, systemInstruction, false)),
214
+ });
215
+ await this._checkResponse(resp, '非串流');
216
+
217
+ const data = await resp.json();
218
+ const latencyMs = performance.now() - t0;
219
+ const message = data.choices?.[0]?.message;
220
+ const text = message?.content || '';
221
+ const usage = data.usage || {};
222
+ const sources = this._extractSources(message);
223
+
224
+ return {
225
+ text,
226
+ sources,
227
+ usage: {
228
+ promptTokens: usage.prompt_tokens || 0,
229
+ outputTokens: usage.completion_tokens || 0,
230
+ totalTokens: usage.total_tokens || 0,
231
+ },
232
+ perf: calcPerfOpenAI(usage, latencyMs),
233
+ model: data.model || this.model,
234
+ };
235
+ }
236
+
237
+ /**
238
+ * 串流向 OpenRouter 提問(AsyncGenerator)
239
+ * yield { type: 'text', text } — 回應文字(即時串流)
240
+ * yield { type: 'metadata', ...result } — 最終效能指標 + 引證來源
241
+ *
242
+ * 注意:annotations(網路搜尋引證)在串流模式下可能出現於最終 chunk 的 delta
243
+ *
244
+ * @param {object} params
245
+ * @param {string} params.prompt - 使用者提問
246
+ * @param {string} [params.systemInstruction] - 系統指示
247
+ */
248
+ async *generateContentStream({ prompt, systemInstruction }) {
249
+ const t0 = performance.now();
250
+ let ttftMs = 0;
251
+ let isFirst = true;
252
+ let fullText = '';
253
+ let finishReason = null;
254
+ let lastModel = this.model;
255
+ let lastUsage = null;
256
+ let annotations = []; // 累積串流中出現的 annotations
257
+
258
+ const resp = await fetch(`${OPENROUTER_API_BASE}/chat/completions`, {
259
+ method: 'POST',
260
+ headers: this._buildHeaders({ 'Accept': 'text/event-stream' }),
261
+ body: JSON.stringify(this._buildBody(prompt, systemInstruction, true)),
262
+ });
263
+ await this._checkResponse(resp, '串流');
264
+
265
+ for await (const chunk of this._parseSseStream(resp)) {
266
+ if (isFirst) { ttftMs = performance.now() - t0; isFirst = false; }
267
+ if (chunk.model) lastModel = chunk.model;
268
+ if (chunk.usage) lastUsage = chunk.usage;
269
+
270
+ const delta = chunk.choices?.[0]?.delta;
271
+ if (delta?.content) {
272
+ fullText += delta.content;
273
+ yield { type: 'text', text: delta.content };
274
+ }
275
+
276
+ // 蒐集串流中出現的 annotations(網路搜尋引證,通常在最後一個 chunk)
277
+ if (delta?.annotations?.length) {
278
+ annotations = [...annotations, ...delta.annotations];
279
+ }
280
+
281
+ const fr = chunk.choices?.[0]?.finish_reason;
282
+ if (fr) finishReason = fr;
283
+ }
284
+
285
+ const latencyMs = performance.now() - t0;
286
+ const usage = lastUsage || {};
287
+ const outputTokens = usage.completion_tokens || estimateTokens(fullText);
288
+ const sources = this._extractSources({ annotations });
289
+
290
+ yield {
291
+ type: 'metadata',
292
+ text: fullText,
293
+ sources,
294
+ usage: {
295
+ promptTokens: usage.prompt_tokens || 0,
296
+ outputTokens,
297
+ totalTokens: usage.total_tokens || outputTokens,
298
+ },
299
+ perf: calcPerfOpenAI({ completion_tokens: outputTokens }, latencyMs, ttftMs),
300
+ model: lastModel,
301
+ finishReason,
302
+ };
303
+ }
304
+ }