opc-agent 4.1.0 → 4.1.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (258) hide show
  1. package/.github/ISSUE_TEMPLATE/bug_report.md +20 -20
  2. package/.github/ISSUE_TEMPLATE/feature_request.md +14 -14
  3. package/.github/PULL_REQUEST_TEMPLATE.md +13 -13
  4. package/CHANGELOG.md +48 -48
  5. package/CONTRIBUTING.md +36 -36
  6. package/README.zh-CN.md +497 -497
  7. package/USABILITY-ISSUES.md +73 -0
  8. package/dist/channels/web.js +8 -2
  9. package/dist/channels/wechat.js +6 -6
  10. package/dist/cli.js +200 -85
  11. package/dist/core/runtime.js +37 -15
  12. package/dist/deploy/index.js +56 -56
  13. package/dist/doctor.d.ts +1 -0
  14. package/dist/doctor.js +105 -10
  15. package/dist/memory/deepbrain.d.ts +1 -1
  16. package/dist/memory/deepbrain.js +95 -4
  17. package/dist/scheduler/cron-engine.js +3 -36
  18. package/dist/studio/server.js +30 -1
  19. package/dist/studio-ui/index.html +230 -10
  20. package/dist/ui/components.js +105 -105
  21. package/examples/README.md +22 -22
  22. package/examples/basic-agent.ts +90 -90
  23. package/examples/brain-integration.ts +71 -71
  24. package/examples/multi-channel.ts +74 -74
  25. package/fix-sidebar.mjs +188 -188
  26. package/install.ps1 +154 -154
  27. package/install.sh +164 -164
  28. package/package.json +1 -1
  29. package/scripts/install.ps1 +31 -31
  30. package/scripts/install.sh +40 -40
  31. package/serve-studio.js +13 -13
  32. package/serve-test.js +25 -25
  33. package/src/channels/dingtalk.ts +46 -46
  34. package/src/channels/email.ts +351 -351
  35. package/src/channels/feishu.ts +349 -349
  36. package/src/channels/googlechat.ts +42 -42
  37. package/src/channels/imessage.ts +31 -31
  38. package/src/channels/irc.ts +82 -82
  39. package/src/channels/line.ts +32 -32
  40. package/src/channels/matrix.ts +33 -33
  41. package/src/channels/mattermost.ts +57 -57
  42. package/src/channels/msteams.ts +32 -32
  43. package/src/channels/nostr.ts +32 -32
  44. package/src/channels/qq.ts +33 -33
  45. package/src/channels/signal.ts +32 -32
  46. package/src/channels/sms.ts +33 -33
  47. package/src/channels/telegram.ts +616 -616
  48. package/src/channels/twitch.ts +65 -65
  49. package/src/channels/voice-call.ts +100 -100
  50. package/src/channels/web.ts +8 -2
  51. package/src/channels/websocket.ts +399 -399
  52. package/src/channels/wechat.ts +329 -329
  53. package/src/channels/whatsapp.ts +32 -32
  54. package/src/cli/chat.ts +99 -99
  55. package/src/cli/setup.ts +314 -314
  56. package/src/cli.ts +195 -92
  57. package/src/core/agent.ts +476 -476
  58. package/src/core/api-server.ts +277 -277
  59. package/src/core/audio.ts +98 -98
  60. package/src/core/collaboration.ts +275 -275
  61. package/src/core/context-discovery.ts +85 -85
  62. package/src/core/context-refs.ts +140 -140
  63. package/src/core/gateway.ts +106 -106
  64. package/src/core/heartbeat.ts +51 -51
  65. package/src/core/hooks.ts +105 -105
  66. package/src/core/ide-bridge.ts +133 -133
  67. package/src/core/node-network.ts +86 -86
  68. package/src/core/profiles.ts +122 -122
  69. package/src/core/runtime.ts +25 -0
  70. package/src/core/scheduler.ts +187 -187
  71. package/src/core/session-manager.ts +137 -137
  72. package/src/core/subagent.ts +98 -98
  73. package/src/core/vision.ts +180 -180
  74. package/src/core/workflow-graph.ts +365 -365
  75. package/src/daemon.ts +96 -96
  76. package/src/deploy/index.ts +255 -255
  77. package/src/doctor.ts +98 -11
  78. package/src/eval/index.ts +211 -211
  79. package/src/eval/suites/basic.json +16 -16
  80. package/src/eval/suites/memory.json +12 -12
  81. package/src/eval/suites/safety.json +14 -14
  82. package/src/hub/brain-seed.ts +54 -54
  83. package/src/hub/client.ts +60 -60
  84. package/src/mcp/servers/calculator-mcp.ts +65 -65
  85. package/src/mcp/servers/crypto-mcp.ts +73 -73
  86. package/src/mcp/servers/database-mcp.ts +72 -72
  87. package/src/mcp/servers/datetime-mcp.ts +69 -69
  88. package/src/mcp/servers/filesystem.ts +66 -66
  89. package/src/mcp/servers/github-mcp.ts +58 -58
  90. package/src/mcp/servers/index.ts +63 -63
  91. package/src/mcp/servers/json-mcp.ts +102 -102
  92. package/src/mcp/servers/memory-mcp.ts +56 -56
  93. package/src/mcp/servers/regex-mcp.ts +53 -53
  94. package/src/mcp/servers/web-mcp.ts +49 -49
  95. package/src/memory/context-compressor.ts +189 -189
  96. package/src/memory/deepbrain.ts +99 -5
  97. package/src/memory/seed-loader.ts +212 -212
  98. package/src/memory/user-profiler.ts +215 -215
  99. package/src/plugins/content-filter.ts +23 -23
  100. package/src/plugins/logger.ts +18 -18
  101. package/src/plugins/rate-limiter.ts +38 -38
  102. package/src/protocols/a2a/client.ts +132 -132
  103. package/src/protocols/a2a/index.ts +8 -8
  104. package/src/protocols/a2a/server.ts +333 -333
  105. package/src/protocols/a2a/types.ts +88 -88
  106. package/src/protocols/a2a/utils.ts +50 -50
  107. package/src/protocols/agui/client.ts +83 -83
  108. package/src/protocols/agui/index.ts +4 -4
  109. package/src/protocols/agui/server.ts +218 -218
  110. package/src/protocols/agui/types.ts +153 -153
  111. package/src/protocols/index.ts +2 -2
  112. package/src/protocols/mcp/agent-tools.ts +134 -134
  113. package/src/protocols/mcp/index.ts +8 -8
  114. package/src/protocols/mcp/server.ts +262 -262
  115. package/src/protocols/mcp/types.ts +69 -69
  116. package/src/providers/index.ts +632 -632
  117. package/src/publish/index.ts +376 -376
  118. package/src/scheduler/cron-engine.ts +191 -191
  119. package/src/scheduler/index.ts +2 -2
  120. package/src/schema/oad.ts +217 -217
  121. package/src/security/approval.ts +131 -131
  122. package/src/security/approvals.ts +143 -143
  123. package/src/security/elevated.ts +105 -105
  124. package/src/security/guardrails.ts +248 -248
  125. package/src/security/index.ts +9 -9
  126. package/src/security/keys.ts +87 -87
  127. package/src/security/secrets.ts +129 -129
  128. package/src/skills/builtin/index.ts +408 -408
  129. package/src/skills/marketplace.ts +113 -113
  130. package/src/skills/types.ts +42 -42
  131. package/src/studio/server.ts +31 -1
  132. package/src/studio/templates-data.ts +178 -178
  133. package/src/studio-ui/index.html +230 -10
  134. package/src/telemetry/index.ts +324 -324
  135. package/src/tools/builtin/browser.ts +299 -299
  136. package/src/tools/builtin/datetime.ts +41 -41
  137. package/src/tools/builtin/file.ts +107 -107
  138. package/src/tools/builtin/home-assistant.ts +116 -116
  139. package/src/tools/builtin/rl-tools.ts +243 -243
  140. package/src/tools/builtin/shell.ts +43 -43
  141. package/src/tools/builtin/vision.ts +64 -64
  142. package/src/tools/builtin/web-search.ts +126 -126
  143. package/src/tools/builtin/web.ts +35 -35
  144. package/src/tools/document-processor.ts +213 -213
  145. package/src/tools/image-generator.ts +150 -150
  146. package/src/tools/integrations/calendar.ts +73 -73
  147. package/src/tools/integrations/code-exec.ts +39 -39
  148. package/src/tools/integrations/csv-analyzer.ts +92 -92
  149. package/src/tools/integrations/database.ts +44 -44
  150. package/src/tools/integrations/email-send.ts +76 -76
  151. package/src/tools/integrations/git-tool.ts +42 -42
  152. package/src/tools/integrations/github-tool.ts +76 -76
  153. package/src/tools/integrations/image-gen.ts +56 -56
  154. package/src/tools/integrations/index.ts +92 -92
  155. package/src/tools/integrations/jira.ts +83 -83
  156. package/src/tools/integrations/notion.ts +71 -71
  157. package/src/tools/integrations/npm-tool.ts +48 -48
  158. package/src/tools/integrations/pdf-reader.ts +58 -58
  159. package/src/tools/integrations/slack.ts +65 -65
  160. package/src/tools/integrations/summarizer.ts +49 -49
  161. package/src/tools/integrations/translator.ts +48 -48
  162. package/src/tools/integrations/trello.ts +60 -60
  163. package/src/tools/integrations/vector-search.ts +42 -42
  164. package/src/tools/integrations/web-scraper.ts +47 -47
  165. package/src/tools/integrations/web-search.ts +58 -58
  166. package/src/tools/integrations/webhook.ts +38 -38
  167. package/src/tools/mcp-client.ts +131 -131
  168. package/src/tools/web-scraper.ts +179 -179
  169. package/src/tools/web-search.ts +180 -180
  170. package/src/ui/components.ts +127 -127
  171. package/srv-out.txt +1 -1
  172. package/templates/ecommerce-assistant/README.md +45 -45
  173. package/templates/ecommerce-assistant/oad.yaml +47 -47
  174. package/templates/tech-support/README.md +43 -43
  175. package/templates/tech-support/oad.yaml +45 -45
  176. package/test-agent/Dockerfile +9 -9
  177. package/test-agent/README.md +50 -50
  178. package/test-agent/agent.yaml +23 -23
  179. package/test-agent/docker-compose.yml +11 -11
  180. package/test-agent/oad.yaml +31 -31
  181. package/test-agent/package-lock.json +1492 -1492
  182. package/test-agent/package.json +17 -17
  183. package/test-agent/src/index.ts +24 -24
  184. package/test-agent/src/skills/echo.ts +15 -15
  185. package/test-agent/tsconfig.json +24 -24
  186. package/test-full.js +43 -43
  187. package/test-sidebar.js +22 -22
  188. package/test-studio3.js +75 -75
  189. package/test-studio4.js +41 -41
  190. package/tests/a2a-protocol.test.ts +285 -285
  191. package/tests/agui-protocol.test.ts +246 -246
  192. package/tests/api-server.test.ts +148 -148
  193. package/tests/approvals.test.ts +89 -89
  194. package/tests/audio.test.ts +40 -40
  195. package/tests/brain-seed-extended.test.ts +490 -490
  196. package/tests/brain-seed.test.ts +239 -239
  197. package/tests/browser.test.ts +179 -179
  198. package/tests/channels/discord.test.ts +79 -79
  199. package/tests/channels/email.test.ts +148 -148
  200. package/tests/channels/feishu.test.ts +123 -123
  201. package/tests/channels/telegram.test.ts +129 -129
  202. package/tests/channels/websocket.test.ts +53 -53
  203. package/tests/channels/wechat.test.ts +170 -170
  204. package/tests/channels-extra.test.ts +45 -45
  205. package/tests/chat-cli.test.ts +160 -160
  206. package/tests/cli.test.ts +46 -46
  207. package/tests/context-compressor.test.ts +172 -172
  208. package/tests/context-refs.test.ts +121 -121
  209. package/tests/cron-engine.test.ts +101 -101
  210. package/tests/daemon.test.ts +135 -135
  211. package/tests/deepbrain-wire.test.ts +234 -234
  212. package/tests/deploy-and-dag.test.ts +196 -196
  213. package/tests/doctor.test.ts +38 -38
  214. package/tests/document-processor.test.ts +69 -69
  215. package/tests/e2e-nocode.test.ts +442 -442
  216. package/tests/elevated.test.ts +69 -69
  217. package/tests/eval.test.ts +173 -173
  218. package/tests/gateway.test.ts +63 -63
  219. package/tests/guardrails.test.ts +177 -177
  220. package/tests/home-assistant.test.ts +40 -40
  221. package/tests/hooks.test.ts +79 -79
  222. package/tests/ide-bridge.test.ts +38 -38
  223. package/tests/image-generator.test.ts +84 -84
  224. package/tests/init-role.test.ts +124 -124
  225. package/tests/integrations.test.ts +249 -249
  226. package/tests/mcp-client.test.ts +92 -92
  227. package/tests/mcp-server.test.ts +178 -178
  228. package/tests/mcp-servers.test.ts +260 -260
  229. package/tests/node-network.test.ts +74 -74
  230. package/tests/plugin-a2a-enhanced.test.ts +230 -230
  231. package/tests/profiles.test.ts +61 -61
  232. package/tests/publish.test.ts +231 -231
  233. package/tests/rl-tools.test.ts +93 -93
  234. package/tests/sandbox-manager.test.ts +46 -46
  235. package/tests/scheduler.test.ts +200 -200
  236. package/tests/secrets.test.ts +107 -107
  237. package/tests/security-enhanced.test.ts +233 -233
  238. package/tests/settings-api.test.ts +148 -148
  239. package/tests/setup.test.ts +73 -73
  240. package/tests/subagent.test.ts +193 -193
  241. package/tests/telegram-discord.test.ts +60 -60
  242. package/tests/telemetry.test.ts +186 -186
  243. package/tests/user-profiler.test.ts +169 -169
  244. package/tests/v090-features.test.ts +254 -254
  245. package/tests/vision.test.ts +61 -61
  246. package/tests/voice-call.test.ts +47 -47
  247. package/tests/voice-enhanced.test.ts +169 -169
  248. package/tests/voice-interaction.test.ts +38 -38
  249. package/tests/web-search.test.ts +155 -155
  250. package/tests/workflow-graph.test.ts +279 -279
  251. package/tutorial/customer-service-agent/README.md +612 -612
  252. package/tutorial/customer-service-agent/SOUL.md +26 -26
  253. package/tutorial/customer-service-agent/agent.yaml +63 -63
  254. package/tutorial/customer-service-agent/package.json +19 -19
  255. package/tutorial/customer-service-agent/src/index.ts +69 -69
  256. package/tutorial/customer-service-agent/src/skills/faq.ts +27 -27
  257. package/tutorial/customer-service-agent/src/skills/ticket.ts +22 -22
  258. package/tutorial/customer-service-agent/tsconfig.json +14 -14
@@ -1,632 +1,632 @@
1
- import type { Message } from '../core/types';
2
- import type { MCPToolDefinition } from '../tools/mcp';
3
- import * as https from 'https';
4
- import * as http from 'http';
5
-
6
- export interface ChatOptions {
7
- tools?: MCPToolDefinition[];
8
- }
9
-
10
- export interface LLMProvider {
11
- name: string;
12
- chat(messages: Message[], systemPrompt?: string, options?: ChatOptions): Promise<string>;
13
- chatStream(messages: Message[], systemPrompt?: string): AsyncIterable<string>;
14
- }
15
-
16
- interface OpenAIMessage {
17
- role: 'system' | 'user' | 'assistant';
18
- content: string;
19
- }
20
-
21
- function getApiKey(): string {
22
- return process.env.OPC_LLM_API_KEY || process.env.OPENAI_API_KEY || '';
23
- }
24
-
25
- function getBaseUrl(): string {
26
- return process.env.OPC_LLM_BASE_URL || 'https://api.openai.com/v1';
27
- }
28
-
29
- function buildToolPrompt(tools: MCPToolDefinition[]): string {
30
- const toolsDesc = tools.map(t =>
31
- `- ${t.name}: ${t.description}\n Input schema: ${JSON.stringify(t.inputSchema)}`
32
- ).join('\n');
33
- return `\n\nYou have access to the following tools. To use a tool, respond with ONLY a JSON object in this format:\n<tool_call>{"name": "tool_name", "arguments": {...}}</tool_call>\n\nAvailable tools:\n${toolsDesc}\n\nIf you don't need a tool, respond normally with text.`;
34
- }
35
-
36
- class OpenAICompatibleProvider implements LLMProvider {
37
- name: string;
38
- private model: string;
39
- private baseUrl: string;
40
- private apiKey: string;
41
-
42
- constructor(name: string, model: string, baseUrl?: string, apiKey?: string) {
43
- this.name = name;
44
- this.model = model;
45
- this.baseUrl = baseUrl || getBaseUrl();
46
- this.apiKey = apiKey || getApiKey();
47
- }
48
-
49
- private formatMessages(messages: Message[], systemPrompt?: string): OpenAIMessage[] {
50
- const formatted: OpenAIMessage[] = [];
51
- if (systemPrompt) {
52
- formatted.push({ role: 'system', content: systemPrompt });
53
- }
54
- for (const m of messages) {
55
- formatted.push({ role: m.role as 'user' | 'assistant', content: m.content });
56
- }
57
- return formatted;
58
- }
59
-
60
- private async request(body: any): Promise<any> {
61
- if (!this.apiKey) {
62
- throw new Error('No API key configured. Set OPC_LLM_API_KEY or OPENAI_API_KEY environment variable.');
63
- }
64
-
65
- const url = new URL(`${this.baseUrl}/chat/completions`);
66
- const isGemini = url.hostname.includes('googleapis.com');
67
- if (isGemini) {
68
- url.searchParams.set('key', this.apiKey);
69
- }
70
- const isHttps = url.protocol === 'https:';
71
- const lib = isHttps ? https : http;
72
-
73
- const postData = JSON.stringify(body);
74
-
75
- const headers: Record<string, string> = {
76
- 'Content-Type': 'application/json',
77
- 'Content-Length': String(Buffer.byteLength(postData)),
78
- };
79
- if (!isGemini) {
80
- headers['Authorization'] = `Bearer ${this.apiKey}`;
81
- }
82
-
83
- return new Promise((resolve, reject) => {
84
- const req = lib.request(
85
- {
86
- hostname: url.hostname,
87
- port: url.port || (isHttps ? 443 : 80),
88
- path: url.pathname + url.search,
89
- method: 'POST',
90
- headers,
91
- },
92
- (res) => {
93
- let data = '';
94
- res.on('data', (chunk: Buffer) => (data += chunk.toString()));
95
- res.on('end', () => {
96
- if (res.statusCode && res.statusCode >= 400) {
97
- reject(new Error(`LLM API error ${res.statusCode}: ${data}`));
98
- return;
99
- }
100
- try {
101
- resolve(JSON.parse(data));
102
- } catch {
103
- reject(new Error(`Invalid JSON response: ${data.slice(0, 200)}`));
104
- }
105
- });
106
- },
107
- );
108
- req.on('error', reject);
109
- req.write(postData);
110
- req.end();
111
- });
112
- }
113
-
114
- async chat(messages: Message[], systemPrompt?: string, options?: ChatOptions): Promise<string> {
115
- if (!this.apiKey) {
116
- const last = messages[messages.length - 1];
117
- return `[${this.name}/${this.model} - no API key] Echo: ${last?.content ?? ''}`;
118
- }
119
- let effectivePrompt = systemPrompt;
120
- if (options?.tools && options.tools.length > 0) {
121
- effectivePrompt = (systemPrompt || '') + buildToolPrompt(options.tools);
122
- }
123
- const formatted = this.formatMessages(messages, effectivePrompt);
124
- const result = await this.request({
125
- model: this.model,
126
- messages: formatted,
127
- temperature: 0.7,
128
- max_tokens: 2048,
129
- });
130
- return result.choices?.[0]?.message?.content ?? '';
131
- }
132
-
133
- async *chatStream(messages: Message[], systemPrompt?: string): AsyncIterable<string> {
134
- if (!this.apiKey) {
135
- const last = messages[messages.length - 1];
136
- yield `[${this.name}/${this.model} - no API key] Echo: ${last?.content ?? ''}`;
137
- return;
138
- }
139
-
140
- const formatted = this.formatMessages(messages, systemPrompt);
141
- const url = new URL(`${this.baseUrl}/chat/completions`);
142
- const isGemini = url.hostname.includes('googleapis.com');
143
- if (isGemini) {
144
- url.searchParams.set('key', this.apiKey);
145
- }
146
- const isHttps = url.protocol === 'https:';
147
- const lib = isHttps ? https : http;
148
- const postData = JSON.stringify({
149
- model: this.model,
150
- messages: formatted,
151
- temperature: 0.7,
152
- max_tokens: 2048,
153
- stream: true,
154
- });
155
-
156
- const streamHeaders: Record<string, string> = {
157
- 'Content-Type': 'application/json',
158
- 'Content-Length': String(Buffer.byteLength(postData)),
159
- };
160
- if (!isGemini) {
161
- streamHeaders['Authorization'] = `Bearer ${this.apiKey}`;
162
- }
163
-
164
- const response = await new Promise<http.IncomingMessage>((resolve, reject) => {
165
- const req = lib.request(
166
- {
167
- hostname: url.hostname,
168
- port: url.port || (isHttps ? 443 : 80),
169
- path: url.pathname + url.search,
170
- method: 'POST',
171
- headers: streamHeaders,
172
- },
173
- resolve,
174
- );
175
- req.on('error', reject);
176
- req.write(postData);
177
- req.end();
178
- });
179
-
180
- if (response.statusCode && response.statusCode >= 400) {
181
- let data = '';
182
- for await (const chunk of response) data += chunk.toString();
183
- throw new Error(`LLM API error ${response.statusCode}: ${data}`);
184
- }
185
-
186
- let buffer = '';
187
- for await (const chunk of response) {
188
- buffer += chunk.toString();
189
- const lines = buffer.split('\n');
190
- buffer = lines.pop() ?? '';
191
-
192
- for (const line of lines) {
193
- const trimmed = line.trim();
194
- if (!trimmed || !trimmed.startsWith('data: ')) continue;
195
- const data = trimmed.slice(6);
196
- if (data === '[DONE]') return;
197
- try {
198
- const parsed = JSON.parse(data);
199
- const content = parsed.choices?.[0]?.delta?.content;
200
- if (content) yield content;
201
- } catch {
202
- // skip malformed lines
203
- }
204
- }
205
- }
206
- }
207
- }
208
-
209
- class GeminiNativeProvider implements LLMProvider {
210
- name = 'gemini';
211
- private model: string;
212
- private apiKey: string;
213
-
214
- constructor(model: string, apiKey?: string) {
215
- this.model = model;
216
- this.apiKey = apiKey || getApiKey();
217
- }
218
-
219
- private buildUrl(stream: boolean): string {
220
- const action = stream ? 'streamGenerateContent?alt=sse&' : 'generateContent?';
221
- return `https://generativelanguage.googleapis.com/v1beta/models/${this.model}:${action}key=${this.apiKey}`;
222
- }
223
-
224
- private formatContents(messages: Message[], systemPrompt?: string): { contents: any[]; systemInstruction?: any } {
225
- const contents: any[] = [];
226
- for (const m of messages) {
227
- contents.push({ role: m.role === 'assistant' ? 'model' : 'user', parts: [{ text: m.content }] });
228
- }
229
- const result: any = { contents };
230
- if (systemPrompt) {
231
- result.systemInstruction = { parts: [{ text: systemPrompt }] };
232
- }
233
- return result;
234
- }
235
-
236
- async chat(messages: Message[], systemPrompt?: string, options?: ChatOptions): Promise<string> {
237
- if (!this.apiKey) {
238
- const last = messages[messages.length - 1];
239
- return `[gemini/${this.model} - no API key] Echo: ${last?.content ?? ''}`;
240
- }
241
- let effectivePrompt = systemPrompt;
242
- if (options?.tools && options.tools.length > 0) {
243
- effectivePrompt = (systemPrompt || '') + buildToolPrompt(options.tools);
244
- }
245
- const body = this.formatContents(messages, effectivePrompt);
246
- const url = this.buildUrl(false);
247
- const postData = JSON.stringify(body);
248
-
249
- return new Promise((resolve, reject) => {
250
- const parsedUrl = new URL(url);
251
- const req = https.request({
252
- hostname: parsedUrl.hostname,
253
- path: parsedUrl.pathname + parsedUrl.search,
254
- method: 'POST',
255
- headers: { 'Content-Type': 'application/json', 'Content-Length': String(Buffer.byteLength(postData)) },
256
- }, (res) => {
257
- let data = '';
258
- res.on('data', (chunk: Buffer) => (data += chunk.toString()));
259
- res.on('end', () => {
260
- if (res.statusCode && res.statusCode >= 400) { reject(new Error(`Gemini API error ${res.statusCode}: ${data}`)); return; }
261
- try {
262
- const parsed = JSON.parse(data);
263
- resolve(parsed.candidates?.[0]?.content?.parts?.[0]?.text ?? '');
264
- } catch { reject(new Error(`Invalid Gemini response: ${data.slice(0, 200)}`)); }
265
- });
266
- });
267
- req.on('error', reject);
268
- req.write(postData);
269
- req.end();
270
- });
271
- }
272
-
273
- async *chatStream(messages: Message[], systemPrompt?: string): AsyncIterable<string> {
274
- if (!this.apiKey) {
275
- const last = messages[messages.length - 1];
276
- yield `[gemini/${this.model} - no API key] Echo: ${last?.content ?? ''}`;
277
- return;
278
- }
279
- const body = this.formatContents(messages, systemPrompt);
280
- const url = this.buildUrl(true);
281
- const postData = JSON.stringify(body);
282
- const parsedUrl = new URL(url);
283
-
284
- const response = await new Promise<http.IncomingMessage>((resolve, reject) => {
285
- const req = https.request({
286
- hostname: parsedUrl.hostname,
287
- path: parsedUrl.pathname + parsedUrl.search,
288
- method: 'POST',
289
- headers: { 'Content-Type': 'application/json', 'Content-Length': String(Buffer.byteLength(postData)) },
290
- }, resolve);
291
- req.on('error', reject);
292
- req.write(postData);
293
- req.end();
294
- });
295
-
296
- if (response.statusCode && response.statusCode >= 400) {
297
- let data = '';
298
- for await (const chunk of response) data += chunk.toString();
299
- throw new Error(`Gemini API error ${response.statusCode}: ${data}`);
300
- }
301
-
302
- let buffer = '';
303
- for await (const chunk of response) {
304
- buffer += chunk.toString();
305
- const lines = buffer.split('\n');
306
- buffer = lines.pop() ?? '';
307
- for (const line of lines) {
308
- const trimmed = line.trim();
309
- if (!trimmed.startsWith('data: ')) continue;
310
- const data = trimmed.slice(6);
311
- if (data === '[DONE]') return;
312
- try {
313
- const parsed = JSON.parse(data);
314
- const text = parsed.candidates?.[0]?.content?.parts?.[0]?.text;
315
- if (text) yield text;
316
- } catch {}
317
- }
318
- }
319
- }
320
- }
321
-
322
- function isGeminiNative(): boolean {
323
- const baseUrl = process.env.OPC_LLM_BASE_URL || '';
324
- const key = getApiKey();
325
- return key.startsWith('AQ.') || (baseUrl.includes('googleapis.com') && !baseUrl.includes('/openai'));
326
- }
327
-
328
- class ClaudeCLIProvider implements LLMProvider {
329
- name = 'claude-cli';
330
- private model: string;
331
-
332
- constructor(model?: string) {
333
- // Claude CLI uses short model names; don't pass API-style model names
334
- // Let Claude CLI use its default model unless explicitly set to a known CLI model
335
- const cliModels = ['sonnet', 'opus', 'haiku', 'claude-sonnet-4-20250514', 'claude-opus-4-20250514'];
336
- if (model && !cliModels.includes(model)) {
337
- // Map common patterns
338
- if (model.includes('opus')) this.model = 'opus';
339
- else if (model.includes('haiku')) this.model = 'haiku';
340
- else this.model = ''; // let CLI choose default
341
- } else {
342
- this.model = model || '';
343
- }
344
- }
345
-
346
- async chat(messages: Message[], systemPrompt?: string, options?: ChatOptions): Promise<string> {
347
- const { writeFileSync, unlinkSync, mkdtempSync } = await import('fs');
348
- const { join } = await import('path');
349
- const { tmpdir } = await import('os');
350
-
351
- // Build the prompt from messages
352
- const lastMessage = messages[messages.length - 1];
353
- if (!lastMessage) return '';
354
-
355
- let prompt = lastMessage.content;
356
-
357
- // Add tool prompt if tools provided
358
- if (options?.tools && options.tools.length > 0) {
359
- prompt += buildToolPrompt(options.tools);
360
- }
361
-
362
- const args = ['-p'];
363
- // Write system prompt to temp file to avoid shell escaping issues
364
- let tmpFile: string | undefined;
365
- if (systemPrompt) {
366
- const tmpDir = mkdtempSync(join(tmpdir(), 'opc-'));
367
- tmpFile = join(tmpDir, 'system.txt');
368
- writeFileSync(tmpFile, systemPrompt, 'utf8');
369
- args.push('--system-prompt-file', tmpFile);
370
- }
371
- if (this.model) {
372
- args.push('--model', this.model);
373
- }
374
- args.push(prompt);
375
-
376
- try {
377
- const { spawn } = await import('child_process');
378
- const result = await new Promise<string>((resolve, reject) => {
379
- const proc = spawn('claude', args, {
380
- env: { ...process.env },
381
- stdio: ['pipe', 'pipe', 'pipe'],
382
- });
383
- // Close stdin immediately to avoid "no stdin data" warning
384
- proc.stdin.end();
385
- let stdout = '';
386
- let stderr = '';
387
- proc.stdout.on('data', (d: Buffer) => { stdout += d.toString(); });
388
- proc.stderr.on('data', (d: Buffer) => { stderr += d.toString(); });
389
- proc.on('close', (code) => {
390
- if (code === 0 || stdout.trim()) {
391
- resolve(stdout.trim());
392
- } else {
393
- reject(new Error(`Claude CLI exited ${code}: ${stderr}`));
394
- }
395
- });
396
- proc.on('error', (err: any) => {
397
- if (err.code === 'ENOENT') {
398
- reject(new Error(
399
- 'Claude CLI not found. Install it: npm install -g @anthropic-ai/claude-code\n' +
400
- 'Then authenticate: claude login'
401
- ));
402
- } else {
403
- reject(err);
404
- }
405
- });
406
- // Timeout
407
- setTimeout(() => {
408
- proc.kill();
409
- reject(new Error('Claude CLI timed out after 120s'));
410
- }, 120_000);
411
- });
412
- return result;
413
- } catch (err: any) {
414
- throw new Error(`Claude CLI error: ${err.message}`);
415
- } finally {
416
- if (tmpFile) {
417
- try { unlinkSync(tmpFile); } catch {}
418
- }
419
- }
420
- }
421
-
422
- async *chatStream(messages: Message[], systemPrompt?: string): AsyncIterable<string> {
423
- const args = ['-p', '--verbose', '--output-format', 'stream-json'];
424
- if (this.model) {
425
- args.push('--model', this.model);
426
- }
427
-
428
- // Write system prompt to temp file if needed
429
- let tmpFile: string | undefined;
430
- if (systemPrompt) {
431
- const { writeFileSync } = await import('fs');
432
- const { join } = await import('path');
433
- const { tmpdir } = await import('os');
434
- tmpFile = join(tmpdir(), `opc-claude-stream-${Date.now()}.txt`);
435
- writeFileSync(tmpFile, systemPrompt);
436
- args.push('--system-prompt-file', tmpFile);
437
- }
438
-
439
- const lastMsg = messages[messages.length - 1];
440
- args.push(lastMsg?.content ?? '');
441
-
442
- const { spawn } = await import('child_process');
443
-
444
- try {
445
- const proc = spawn('claude', args, {
446
- env: { ...process.env },
447
- stdio: ['pipe', 'pipe', 'pipe'],
448
- });
449
- proc.stdin.end();
450
-
451
- let buffer = '';
452
- let lastContent = '';
453
-
454
- for await (const chunk of proc.stdout) {
455
- buffer += (chunk as Buffer).toString();
456
- const lines = buffer.split('\n');
457
- buffer = lines.pop() ?? '';
458
-
459
- for (const line of lines) {
460
- const trimmed = line.trim();
461
- if (!trimmed) continue;
462
- try {
463
- const event = JSON.parse(trimmed);
464
- // Claude CLI stream-json format:
465
- // {"type":"assistant","message":{"content":[{"type":"text","text":"..."}]}}
466
- if (event.type === 'assistant' && event.message?.content) {
467
- for (const block of event.message.content) {
468
- if (block.type === 'text' && block.text) {
469
- const newContent = block.text;
470
- if (newContent.length > lastContent.length) {
471
- yield newContent.slice(lastContent.length);
472
- lastContent = newContent;
473
- }
474
- }
475
- }
476
- }
477
- // Also handle result type for final content
478
- if (event.type === 'result' && event.result) {
479
- const remaining = event.result.slice(lastContent.length);
480
- if (remaining) yield remaining;
481
- }
482
- // Handle assistant message with content array
483
- if (event.type === 'assistant' && event.message?.content) {
484
- for (const block of event.message.content) {
485
- if (block.type === 'text' && block.text) {
486
- const newText = block.text;
487
- if (newText.length > lastContent.length) {
488
- yield newText.slice(lastContent.length);
489
- lastContent = newText;
490
- }
491
- }
492
- }
493
- }
494
- // Handle result message
495
- if (event.type === 'result' && event.result) {
496
- const resultText = typeof event.result === 'string' ? event.result : '';
497
- if (resultText && resultText.length > lastContent.length) {
498
- yield resultText.slice(lastContent.length);
499
- }
500
- }
501
- } catch {
502
- // Not JSON, might be raw text
503
- }
504
- }
505
- }
506
-
507
- // Process remaining buffer
508
- if (buffer.trim()) {
509
- try {
510
- const event = JSON.parse(buffer.trim());
511
- if (event.type === 'result' && event.result) {
512
- const resultText = typeof event.result === 'string' ? event.result : '';
513
- if (resultText && resultText.length > lastContent.length) {
514
- yield resultText.slice(lastContent.length);
515
- }
516
- }
517
- } catch {
518
- // If not JSON, yield as raw text if we haven't yielded anything
519
- if (!lastContent && buffer.trim()) {
520
- yield buffer.trim();
521
- }
522
- }
523
- }
524
-
525
- await new Promise<void>((resolve) => {
526
- proc.on('close', () => resolve());
527
- });
528
- } finally {
529
- if (tmpFile) {
530
- try { const { unlinkSync } = await import('fs'); unlinkSync(tmpFile); } catch {}
531
- }
532
- }
533
- }
534
- }
535
-
536
- import { execSync } from 'child_process';
537
-
538
- function detectClaudeCLI(): boolean {
539
- try {
540
- execSync('claude --version', { stdio: 'pipe', timeout: 3000 });
541
- return true;
542
- } catch { return false; }
543
- }
544
-
545
- function detectOllama(): boolean {
546
- try {
547
- // Use node http instead of curl for Windows compatibility
548
- const { execSync: es } = require('child_process');
549
- // Quick check: try to connect to Ollama API via node
550
- const result = es(
551
- `node -e "const h=require('http');const r=h.get('http://localhost:11434/api/tags',{timeout:2000},s=>{let d='';s.on('data',c=>d+=c);s.on('end',()=>{process.stdout.write(d.includes('models')?'1':'0')})});r.on('error',()=>process.stdout.write('0'));r.on('timeout',()=>{r.destroy();process.stdout.write('0')})"`,
552
- { stdio: 'pipe', timeout: 5000 }
553
- );
554
- return result.toString().trim() === '1';
555
- } catch { return false; }
556
- }
557
-
558
- function detectApiKeys(): { provider: string; key: string; baseUrl?: string } | null {
559
- if (process.env.ANTHROPIC_API_KEY) return { provider: 'anthropic', key: process.env.ANTHROPIC_API_KEY };
560
- if (process.env.OPENAI_API_KEY && process.env.OPENAI_API_KEY !== 'your-api-key-here') return { provider: 'openai', key: process.env.OPENAI_API_KEY };
561
- if (process.env.DEEPSEEK_API_KEY) return { provider: 'deepseek', key: process.env.DEEPSEEK_API_KEY, baseUrl: 'https://api.deepseek.com/v1' };
562
- if (process.env.DASHSCOPE_API_KEY) return { provider: 'qwen', key: process.env.DASHSCOPE_API_KEY, baseUrl: 'https://dashscope.aliyuncs.com/compatible-mode/v1' };
563
- if (process.env.GEMINI_API_KEY) return { provider: 'gemini', key: process.env.GEMINI_API_KEY };
564
- return null;
565
- }
566
-
567
- export function autoDetectProvider(): { name: string; model?: string; baseUrl?: string; apiKey?: string } {
568
- // 1. Claude CLI (zero config, Claude Max/Pro subscription)
569
- if (detectClaudeCLI()) {
570
- return { name: 'claude-cli' };
571
- }
572
-
573
- // 2. API keys from environment
574
- const apiKey = detectApiKeys();
575
- if (apiKey) {
576
- return { name: apiKey.provider, apiKey: apiKey.key, baseUrl: apiKey.baseUrl };
577
- }
578
-
579
- // 3. Ollama (local, free)
580
- if (detectOllama()) {
581
- return { name: 'ollama', model: 'qwen2.5:7b' };
582
- }
583
-
584
- // 4. Nothing found
585
- return { name: 'none' };
586
- }
587
-
588
- export function createProvider(name: string = 'auto', model?: string, baseUrl?: string, apiKey?: string): LLMProvider {
589
- // Auto-detect if name is 'auto' or default openai with no real key
590
- const needsAutoDetect = name === 'auto' || (name === 'openai' && !apiKey && (!process.env.OPENAI_API_KEY || process.env.OPENAI_API_KEY === 'your-api-key-here'));
591
- if (needsAutoDetect) {
592
- const detected = autoDetectProvider();
593
- if (detected.name !== 'none') {
594
- console.log(`[provider] Auto-detected: ${detected.name}`);
595
- name = detected.name;
596
- model = model || detected.model;
597
- baseUrl = baseUrl || detected.baseUrl;
598
- apiKey = apiKey || detected.apiKey;
599
- }
600
- }
601
-
602
- const finalModel = model || process.env.OPC_LLM_MODEL || 'gpt-4o-mini';
603
-
604
- // Claude CLI mode: use local claude command (Claude Max/Pro subscription)
605
- if (name === 'claude-cli' || process.env.OPC_LLM_PROVIDER === 'claude-cli') {
606
- return new ClaudeCLIProvider(finalModel !== 'gpt-4o-mini' ? finalModel : undefined);
607
- }
608
-
609
- if (name === 'ollama') {
610
- const ollamaBase = baseUrl || process.env.OPC_LLM_BASE_URL || 'http://localhost:11434/v1';
611
- const ollamaKey = apiKey || process.env.OPC_LLM_API_KEY || 'ollama';
612
- return new OpenAICompatibleProvider('ollama', finalModel, ollamaBase, ollamaKey);
613
- }
614
-
615
- const finalKey = apiKey || getApiKey();
616
- const finalBaseUrl = baseUrl || getBaseUrl();
617
-
618
- if (finalKey.startsWith('AQ.') || isGeminiNative()) {
619
- return new GeminiNativeProvider(finalModel, finalKey);
620
- }
621
-
622
- let resolvedName = name;
623
- if (finalBaseUrl.includes('deepseek.com')) {
624
- resolvedName = 'deepseek';
625
- } else if (finalBaseUrl.includes('dashscope.aliyuncs.com')) {
626
- resolvedName = 'qwen';
627
- }
628
-
629
- return new OpenAICompatibleProvider(resolvedName, finalModel, finalBaseUrl, finalKey);
630
- }
631
-
632
- export const SUPPORTED_PROVIDERS = ['openai', 'ollama', 'claude-cli', 'deepseek', 'qwen', 'gemini', 'dashscope', 'zhipu', 'moonshot'] as const;
1
+ import type { Message } from '../core/types';
2
+ import type { MCPToolDefinition } from '../tools/mcp';
3
+ import * as https from 'https';
4
+ import * as http from 'http';
5
+
6
+ export interface ChatOptions {
7
+ tools?: MCPToolDefinition[];
8
+ }
9
+
10
+ export interface LLMProvider {
11
+ name: string;
12
+ chat(messages: Message[], systemPrompt?: string, options?: ChatOptions): Promise<string>;
13
+ chatStream(messages: Message[], systemPrompt?: string): AsyncIterable<string>;
14
+ }
15
+
16
+ interface OpenAIMessage {
17
+ role: 'system' | 'user' | 'assistant';
18
+ content: string;
19
+ }
20
+
21
+ function getApiKey(): string {
22
+ return process.env.OPC_LLM_API_KEY || process.env.OPENAI_API_KEY || '';
23
+ }
24
+
25
+ function getBaseUrl(): string {
26
+ return process.env.OPC_LLM_BASE_URL || 'https://api.openai.com/v1';
27
+ }
28
+
29
+ function buildToolPrompt(tools: MCPToolDefinition[]): string {
30
+ const toolsDesc = tools.map(t =>
31
+ `- ${t.name}: ${t.description}\n Input schema: ${JSON.stringify(t.inputSchema)}`
32
+ ).join('\n');
33
+ return `\n\nYou have access to the following tools. To use a tool, respond with ONLY a JSON object in this format:\n<tool_call>{"name": "tool_name", "arguments": {...}}</tool_call>\n\nAvailable tools:\n${toolsDesc}\n\nIf you don't need a tool, respond normally with text.`;
34
+ }
35
+
36
+ class OpenAICompatibleProvider implements LLMProvider {
37
+ name: string;
38
+ private model: string;
39
+ private baseUrl: string;
40
+ private apiKey: string;
41
+
42
+ constructor(name: string, model: string, baseUrl?: string, apiKey?: string) {
43
+ this.name = name;
44
+ this.model = model;
45
+ this.baseUrl = baseUrl || getBaseUrl();
46
+ this.apiKey = apiKey || getApiKey();
47
+ }
48
+
49
+ private formatMessages(messages: Message[], systemPrompt?: string): OpenAIMessage[] {
50
+ const formatted: OpenAIMessage[] = [];
51
+ if (systemPrompt) {
52
+ formatted.push({ role: 'system', content: systemPrompt });
53
+ }
54
+ for (const m of messages) {
55
+ formatted.push({ role: m.role as 'user' | 'assistant', content: m.content });
56
+ }
57
+ return formatted;
58
+ }
59
+
60
+ private async request(body: any): Promise<any> {
61
+ if (!this.apiKey) {
62
+ throw new Error('No API key configured. Set OPC_LLM_API_KEY or OPENAI_API_KEY environment variable.');
63
+ }
64
+
65
+ const url = new URL(`${this.baseUrl}/chat/completions`);
66
+ const isGemini = url.hostname.includes('googleapis.com');
67
+ if (isGemini) {
68
+ url.searchParams.set('key', this.apiKey);
69
+ }
70
+ const isHttps = url.protocol === 'https:';
71
+ const lib = isHttps ? https : http;
72
+
73
+ const postData = JSON.stringify(body);
74
+
75
+ const headers: Record<string, string> = {
76
+ 'Content-Type': 'application/json',
77
+ 'Content-Length': String(Buffer.byteLength(postData)),
78
+ };
79
+ if (!isGemini) {
80
+ headers['Authorization'] = `Bearer ${this.apiKey}`;
81
+ }
82
+
83
+ return new Promise((resolve, reject) => {
84
+ const req = lib.request(
85
+ {
86
+ hostname: url.hostname,
87
+ port: url.port || (isHttps ? 443 : 80),
88
+ path: url.pathname + url.search,
89
+ method: 'POST',
90
+ headers,
91
+ },
92
+ (res) => {
93
+ let data = '';
94
+ res.on('data', (chunk: Buffer) => (data += chunk.toString()));
95
+ res.on('end', () => {
96
+ if (res.statusCode && res.statusCode >= 400) {
97
+ reject(new Error(`LLM API error ${res.statusCode}: ${data}`));
98
+ return;
99
+ }
100
+ try {
101
+ resolve(JSON.parse(data));
102
+ } catch {
103
+ reject(new Error(`Invalid JSON response: ${data.slice(0, 200)}`));
104
+ }
105
+ });
106
+ },
107
+ );
108
+ req.on('error', reject);
109
+ req.write(postData);
110
+ req.end();
111
+ });
112
+ }
113
+
114
+ async chat(messages: Message[], systemPrompt?: string, options?: ChatOptions): Promise<string> {
115
+ if (!this.apiKey) {
116
+ const last = messages[messages.length - 1];
117
+ return `[${this.name}/${this.model} - no API key] Echo: ${last?.content ?? ''}`;
118
+ }
119
+ let effectivePrompt = systemPrompt;
120
+ if (options?.tools && options.tools.length > 0) {
121
+ effectivePrompt = (systemPrompt || '') + buildToolPrompt(options.tools);
122
+ }
123
+ const formatted = this.formatMessages(messages, effectivePrompt);
124
+ const result = await this.request({
125
+ model: this.model,
126
+ messages: formatted,
127
+ temperature: 0.7,
128
+ max_tokens: 2048,
129
+ });
130
+ return result.choices?.[0]?.message?.content ?? '';
131
+ }
132
+
133
+ async *chatStream(messages: Message[], systemPrompt?: string): AsyncIterable<string> {
134
+ if (!this.apiKey) {
135
+ const last = messages[messages.length - 1];
136
+ yield `[${this.name}/${this.model} - no API key] Echo: ${last?.content ?? ''}`;
137
+ return;
138
+ }
139
+
140
+ const formatted = this.formatMessages(messages, systemPrompt);
141
+ const url = new URL(`${this.baseUrl}/chat/completions`);
142
+ const isGemini = url.hostname.includes('googleapis.com');
143
+ if (isGemini) {
144
+ url.searchParams.set('key', this.apiKey);
145
+ }
146
+ const isHttps = url.protocol === 'https:';
147
+ const lib = isHttps ? https : http;
148
+ const postData = JSON.stringify({
149
+ model: this.model,
150
+ messages: formatted,
151
+ temperature: 0.7,
152
+ max_tokens: 2048,
153
+ stream: true,
154
+ });
155
+
156
+ const streamHeaders: Record<string, string> = {
157
+ 'Content-Type': 'application/json',
158
+ 'Content-Length': String(Buffer.byteLength(postData)),
159
+ };
160
+ if (!isGemini) {
161
+ streamHeaders['Authorization'] = `Bearer ${this.apiKey}`;
162
+ }
163
+
164
+ const response = await new Promise<http.IncomingMessage>((resolve, reject) => {
165
+ const req = lib.request(
166
+ {
167
+ hostname: url.hostname,
168
+ port: url.port || (isHttps ? 443 : 80),
169
+ path: url.pathname + url.search,
170
+ method: 'POST',
171
+ headers: streamHeaders,
172
+ },
173
+ resolve,
174
+ );
175
+ req.on('error', reject);
176
+ req.write(postData);
177
+ req.end();
178
+ });
179
+
180
+ if (response.statusCode && response.statusCode >= 400) {
181
+ let data = '';
182
+ for await (const chunk of response) data += chunk.toString();
183
+ throw new Error(`LLM API error ${response.statusCode}: ${data}`);
184
+ }
185
+
186
+ let buffer = '';
187
+ for await (const chunk of response) {
188
+ buffer += chunk.toString();
189
+ const lines = buffer.split('\n');
190
+ buffer = lines.pop() ?? '';
191
+
192
+ for (const line of lines) {
193
+ const trimmed = line.trim();
194
+ if (!trimmed || !trimmed.startsWith('data: ')) continue;
195
+ const data = trimmed.slice(6);
196
+ if (data === '[DONE]') return;
197
+ try {
198
+ const parsed = JSON.parse(data);
199
+ const content = parsed.choices?.[0]?.delta?.content;
200
+ if (content) yield content;
201
+ } catch {
202
+ // skip malformed lines
203
+ }
204
+ }
205
+ }
206
+ }
207
+ }
208
+
209
+ class GeminiNativeProvider implements LLMProvider {
210
+ name = 'gemini';
211
+ private model: string;
212
+ private apiKey: string;
213
+
214
+ constructor(model: string, apiKey?: string) {
215
+ this.model = model;
216
+ this.apiKey = apiKey || getApiKey();
217
+ }
218
+
219
+ private buildUrl(stream: boolean): string {
220
+ const action = stream ? 'streamGenerateContent?alt=sse&' : 'generateContent?';
221
+ return `https://generativelanguage.googleapis.com/v1beta/models/${this.model}:${action}key=${this.apiKey}`;
222
+ }
223
+
224
+ private formatContents(messages: Message[], systemPrompt?: string): { contents: any[]; systemInstruction?: any } {
225
+ const contents: any[] = [];
226
+ for (const m of messages) {
227
+ contents.push({ role: m.role === 'assistant' ? 'model' : 'user', parts: [{ text: m.content }] });
228
+ }
229
+ const result: any = { contents };
230
+ if (systemPrompt) {
231
+ result.systemInstruction = { parts: [{ text: systemPrompt }] };
232
+ }
233
+ return result;
234
+ }
235
+
236
+ async chat(messages: Message[], systemPrompt?: string, options?: ChatOptions): Promise<string> {
237
+ if (!this.apiKey) {
238
+ const last = messages[messages.length - 1];
239
+ return `[gemini/${this.model} - no API key] Echo: ${last?.content ?? ''}`;
240
+ }
241
+ let effectivePrompt = systemPrompt;
242
+ if (options?.tools && options.tools.length > 0) {
243
+ effectivePrompt = (systemPrompt || '') + buildToolPrompt(options.tools);
244
+ }
245
+ const body = this.formatContents(messages, effectivePrompt);
246
+ const url = this.buildUrl(false);
247
+ const postData = JSON.stringify(body);
248
+
249
+ return new Promise((resolve, reject) => {
250
+ const parsedUrl = new URL(url);
251
+ const req = https.request({
252
+ hostname: parsedUrl.hostname,
253
+ path: parsedUrl.pathname + parsedUrl.search,
254
+ method: 'POST',
255
+ headers: { 'Content-Type': 'application/json', 'Content-Length': String(Buffer.byteLength(postData)) },
256
+ }, (res) => {
257
+ let data = '';
258
+ res.on('data', (chunk: Buffer) => (data += chunk.toString()));
259
+ res.on('end', () => {
260
+ if (res.statusCode && res.statusCode >= 400) { reject(new Error(`Gemini API error ${res.statusCode}: ${data}`)); return; }
261
+ try {
262
+ const parsed = JSON.parse(data);
263
+ resolve(parsed.candidates?.[0]?.content?.parts?.[0]?.text ?? '');
264
+ } catch { reject(new Error(`Invalid Gemini response: ${data.slice(0, 200)}`)); }
265
+ });
266
+ });
267
+ req.on('error', reject);
268
+ req.write(postData);
269
+ req.end();
270
+ });
271
+ }
272
+
273
+ async *chatStream(messages: Message[], systemPrompt?: string): AsyncIterable<string> {
274
+ if (!this.apiKey) {
275
+ const last = messages[messages.length - 1];
276
+ yield `[gemini/${this.model} - no API key] Echo: ${last?.content ?? ''}`;
277
+ return;
278
+ }
279
+ const body = this.formatContents(messages, systemPrompt);
280
+ const url = this.buildUrl(true);
281
+ const postData = JSON.stringify(body);
282
+ const parsedUrl = new URL(url);
283
+
284
+ const response = await new Promise<http.IncomingMessage>((resolve, reject) => {
285
+ const req = https.request({
286
+ hostname: parsedUrl.hostname,
287
+ path: parsedUrl.pathname + parsedUrl.search,
288
+ method: 'POST',
289
+ headers: { 'Content-Type': 'application/json', 'Content-Length': String(Buffer.byteLength(postData)) },
290
+ }, resolve);
291
+ req.on('error', reject);
292
+ req.write(postData);
293
+ req.end();
294
+ });
295
+
296
+ if (response.statusCode && response.statusCode >= 400) {
297
+ let data = '';
298
+ for await (const chunk of response) data += chunk.toString();
299
+ throw new Error(`Gemini API error ${response.statusCode}: ${data}`);
300
+ }
301
+
302
+ let buffer = '';
303
+ for await (const chunk of response) {
304
+ buffer += chunk.toString();
305
+ const lines = buffer.split('\n');
306
+ buffer = lines.pop() ?? '';
307
+ for (const line of lines) {
308
+ const trimmed = line.trim();
309
+ if (!trimmed.startsWith('data: ')) continue;
310
+ const data = trimmed.slice(6);
311
+ if (data === '[DONE]') return;
312
+ try {
313
+ const parsed = JSON.parse(data);
314
+ const text = parsed.candidates?.[0]?.content?.parts?.[0]?.text;
315
+ if (text) yield text;
316
+ } catch {}
317
+ }
318
+ }
319
+ }
320
+ }
321
+
322
+ function isGeminiNative(): boolean {
323
+ const baseUrl = process.env.OPC_LLM_BASE_URL || '';
324
+ const key = getApiKey();
325
+ return key.startsWith('AQ.') || (baseUrl.includes('googleapis.com') && !baseUrl.includes('/openai'));
326
+ }
327
+
328
+ class ClaudeCLIProvider implements LLMProvider {
329
+ name = 'claude-cli';
330
+ private model: string;
331
+
332
+ constructor(model?: string) {
333
+ // Claude CLI uses short model names; don't pass API-style model names
334
+ // Let Claude CLI use its default model unless explicitly set to a known CLI model
335
+ const cliModels = ['sonnet', 'opus', 'haiku', 'claude-sonnet-4-20250514', 'claude-opus-4-20250514'];
336
+ if (model && !cliModels.includes(model)) {
337
+ // Map common patterns
338
+ if (model.includes('opus')) this.model = 'opus';
339
+ else if (model.includes('haiku')) this.model = 'haiku';
340
+ else this.model = ''; // let CLI choose default
341
+ } else {
342
+ this.model = model || '';
343
+ }
344
+ }
345
+
346
+ async chat(messages: Message[], systemPrompt?: string, options?: ChatOptions): Promise<string> {
347
+ const { writeFileSync, unlinkSync, mkdtempSync } = await import('fs');
348
+ const { join } = await import('path');
349
+ const { tmpdir } = await import('os');
350
+
351
+ // Build the prompt from messages
352
+ const lastMessage = messages[messages.length - 1];
353
+ if (!lastMessage) return '';
354
+
355
+ let prompt = lastMessage.content;
356
+
357
+ // Add tool prompt if tools provided
358
+ if (options?.tools && options.tools.length > 0) {
359
+ prompt += buildToolPrompt(options.tools);
360
+ }
361
+
362
+ const args = ['-p'];
363
+ // Write system prompt to temp file to avoid shell escaping issues
364
+ let tmpFile: string | undefined;
365
+ if (systemPrompt) {
366
+ const tmpDir = mkdtempSync(join(tmpdir(), 'opc-'));
367
+ tmpFile = join(tmpDir, 'system.txt');
368
+ writeFileSync(tmpFile, systemPrompt, 'utf8');
369
+ args.push('--system-prompt-file', tmpFile);
370
+ }
371
+ if (this.model) {
372
+ args.push('--model', this.model);
373
+ }
374
+ args.push(prompt);
375
+
376
+ try {
377
+ const { spawn } = await import('child_process');
378
+ const result = await new Promise<string>((resolve, reject) => {
379
+ const proc = spawn('claude', args, {
380
+ env: { ...process.env },
381
+ stdio: ['pipe', 'pipe', 'pipe'],
382
+ });
383
+ // Close stdin immediately to avoid "no stdin data" warning
384
+ proc.stdin.end();
385
+ let stdout = '';
386
+ let stderr = '';
387
+ proc.stdout.on('data', (d: Buffer) => { stdout += d.toString(); });
388
+ proc.stderr.on('data', (d: Buffer) => { stderr += d.toString(); });
389
+ proc.on('close', (code) => {
390
+ if (code === 0 || stdout.trim()) {
391
+ resolve(stdout.trim());
392
+ } else {
393
+ reject(new Error(`Claude CLI exited ${code}: ${stderr}`));
394
+ }
395
+ });
396
+ proc.on('error', (err: any) => {
397
+ if (err.code === 'ENOENT') {
398
+ reject(new Error(
399
+ 'Claude CLI not found. Install it: npm install -g @anthropic-ai/claude-code\n' +
400
+ 'Then authenticate: claude login'
401
+ ));
402
+ } else {
403
+ reject(err);
404
+ }
405
+ });
406
+ // Timeout
407
+ setTimeout(() => {
408
+ proc.kill();
409
+ reject(new Error('Claude CLI timed out after 120s'));
410
+ }, 120_000);
411
+ });
412
+ return result;
413
+ } catch (err: any) {
414
+ throw new Error(`Claude CLI error: ${err.message}`);
415
+ } finally {
416
+ if (tmpFile) {
417
+ try { unlinkSync(tmpFile); } catch {}
418
+ }
419
+ }
420
+ }
421
+
422
+ async *chatStream(messages: Message[], systemPrompt?: string): AsyncIterable<string> {
423
+ const args = ['-p', '--verbose', '--output-format', 'stream-json'];
424
+ if (this.model) {
425
+ args.push('--model', this.model);
426
+ }
427
+
428
+ // Write system prompt to temp file if needed
429
+ let tmpFile: string | undefined;
430
+ if (systemPrompt) {
431
+ const { writeFileSync } = await import('fs');
432
+ const { join } = await import('path');
433
+ const { tmpdir } = await import('os');
434
+ tmpFile = join(tmpdir(), `opc-claude-stream-${Date.now()}.txt`);
435
+ writeFileSync(tmpFile, systemPrompt);
436
+ args.push('--system-prompt-file', tmpFile);
437
+ }
438
+
439
+ const lastMsg = messages[messages.length - 1];
440
+ args.push(lastMsg?.content ?? '');
441
+
442
+ const { spawn } = await import('child_process');
443
+
444
+ try {
445
+ const proc = spawn('claude', args, {
446
+ env: { ...process.env },
447
+ stdio: ['pipe', 'pipe', 'pipe'],
448
+ });
449
+ proc.stdin.end();
450
+
451
+ let buffer = '';
452
+ let lastContent = '';
453
+
454
+ for await (const chunk of proc.stdout) {
455
+ buffer += (chunk as Buffer).toString();
456
+ const lines = buffer.split('\n');
457
+ buffer = lines.pop() ?? '';
458
+
459
+ for (const line of lines) {
460
+ const trimmed = line.trim();
461
+ if (!trimmed) continue;
462
+ try {
463
+ const event = JSON.parse(trimmed);
464
+ // Claude CLI stream-json format:
465
+ // {"type":"assistant","message":{"content":[{"type":"text","text":"..."}]}}
466
+ if (event.type === 'assistant' && event.message?.content) {
467
+ for (const block of event.message.content) {
468
+ if (block.type === 'text' && block.text) {
469
+ const newContent = block.text;
470
+ if (newContent.length > lastContent.length) {
471
+ yield newContent.slice(lastContent.length);
472
+ lastContent = newContent;
473
+ }
474
+ }
475
+ }
476
+ }
477
+ // Also handle result type for final content
478
+ if (event.type === 'result' && event.result) {
479
+ const remaining = event.result.slice(lastContent.length);
480
+ if (remaining) yield remaining;
481
+ }
482
+ // Handle assistant message with content array
483
+ if (event.type === 'assistant' && event.message?.content) {
484
+ for (const block of event.message.content) {
485
+ if (block.type === 'text' && block.text) {
486
+ const newText = block.text;
487
+ if (newText.length > lastContent.length) {
488
+ yield newText.slice(lastContent.length);
489
+ lastContent = newText;
490
+ }
491
+ }
492
+ }
493
+ }
494
+ // Handle result message
495
+ if (event.type === 'result' && event.result) {
496
+ const resultText = typeof event.result === 'string' ? event.result : '';
497
+ if (resultText && resultText.length > lastContent.length) {
498
+ yield resultText.slice(lastContent.length);
499
+ }
500
+ }
501
+ } catch {
502
+ // Not JSON, might be raw text
503
+ }
504
+ }
505
+ }
506
+
507
+ // Process remaining buffer
508
+ if (buffer.trim()) {
509
+ try {
510
+ const event = JSON.parse(buffer.trim());
511
+ if (event.type === 'result' && event.result) {
512
+ const resultText = typeof event.result === 'string' ? event.result : '';
513
+ if (resultText && resultText.length > lastContent.length) {
514
+ yield resultText.slice(lastContent.length);
515
+ }
516
+ }
517
+ } catch {
518
+ // If not JSON, yield as raw text if we haven't yielded anything
519
+ if (!lastContent && buffer.trim()) {
520
+ yield buffer.trim();
521
+ }
522
+ }
523
+ }
524
+
525
+ await new Promise<void>((resolve) => {
526
+ proc.on('close', () => resolve());
527
+ });
528
+ } finally {
529
+ if (tmpFile) {
530
+ try { const { unlinkSync } = await import('fs'); unlinkSync(tmpFile); } catch {}
531
+ }
532
+ }
533
+ }
534
+ }
535
+
536
+ import { execSync } from 'child_process';
537
+
538
+ function detectClaudeCLI(): boolean {
539
+ try {
540
+ execSync('claude --version', { stdio: 'pipe', timeout: 3000 });
541
+ return true;
542
+ } catch { return false; }
543
+ }
544
+
545
+ function detectOllama(): boolean {
546
+ try {
547
+ // Use node http instead of curl for Windows compatibility
548
+ const { execSync: es } = require('child_process');
549
+ // Quick check: try to connect to Ollama API via node
550
+ const result = es(
551
+ `node -e "const h=require('http');const r=h.get('http://localhost:11434/api/tags',{timeout:2000},s=>{let d='';s.on('data',c=>d+=c);s.on('end',()=>{process.stdout.write(d.includes('models')?'1':'0')})});r.on('error',()=>process.stdout.write('0'));r.on('timeout',()=>{r.destroy();process.stdout.write('0')})"`,
552
+ { stdio: 'pipe', timeout: 5000 }
553
+ );
554
+ return result.toString().trim() === '1';
555
+ } catch { return false; }
556
+ }
557
+
558
+ function detectApiKeys(): { provider: string; key: string; baseUrl?: string } | null {
559
+ if (process.env.ANTHROPIC_API_KEY) return { provider: 'anthropic', key: process.env.ANTHROPIC_API_KEY };
560
+ if (process.env.OPENAI_API_KEY && process.env.OPENAI_API_KEY !== 'your-api-key-here') return { provider: 'openai', key: process.env.OPENAI_API_KEY };
561
+ if (process.env.DEEPSEEK_API_KEY) return { provider: 'deepseek', key: process.env.DEEPSEEK_API_KEY, baseUrl: 'https://api.deepseek.com/v1' };
562
+ if (process.env.DASHSCOPE_API_KEY) return { provider: 'qwen', key: process.env.DASHSCOPE_API_KEY, baseUrl: 'https://dashscope.aliyuncs.com/compatible-mode/v1' };
563
+ if (process.env.GEMINI_API_KEY) return { provider: 'gemini', key: process.env.GEMINI_API_KEY };
564
+ return null;
565
+ }
566
+
567
+ export function autoDetectProvider(): { name: string; model?: string; baseUrl?: string; apiKey?: string } {
568
+ // 1. Claude CLI (zero config, Claude Max/Pro subscription)
569
+ if (detectClaudeCLI()) {
570
+ return { name: 'claude-cli' };
571
+ }
572
+
573
+ // 2. API keys from environment
574
+ const apiKey = detectApiKeys();
575
+ if (apiKey) {
576
+ return { name: apiKey.provider, apiKey: apiKey.key, baseUrl: apiKey.baseUrl };
577
+ }
578
+
579
+ // 3. Ollama (local, free)
580
+ if (detectOllama()) {
581
+ return { name: 'ollama', model: 'qwen2.5:7b' };
582
+ }
583
+
584
+ // 4. Nothing found
585
+ return { name: 'none' };
586
+ }
587
+
588
+ export function createProvider(name: string = 'auto', model?: string, baseUrl?: string, apiKey?: string): LLMProvider {
589
+ // Auto-detect if name is 'auto' or default openai with no real key
590
+ const needsAutoDetect = name === 'auto' || (name === 'openai' && !apiKey && (!process.env.OPENAI_API_KEY || process.env.OPENAI_API_KEY === 'your-api-key-here'));
591
+ if (needsAutoDetect) {
592
+ const detected = autoDetectProvider();
593
+ if (detected.name !== 'none') {
594
+ console.log(`[provider] Auto-detected: ${detected.name}`);
595
+ name = detected.name;
596
+ model = model || detected.model;
597
+ baseUrl = baseUrl || detected.baseUrl;
598
+ apiKey = apiKey || detected.apiKey;
599
+ }
600
+ }
601
+
602
+ const finalModel = model || process.env.OPC_LLM_MODEL || 'gpt-4o-mini';
603
+
604
+ // Claude CLI mode: use local claude command (Claude Max/Pro subscription)
605
+ if (name === 'claude-cli' || process.env.OPC_LLM_PROVIDER === 'claude-cli') {
606
+ return new ClaudeCLIProvider(finalModel !== 'gpt-4o-mini' ? finalModel : undefined);
607
+ }
608
+
609
+ if (name === 'ollama') {
610
+ const ollamaBase = baseUrl || process.env.OPC_LLM_BASE_URL || 'http://localhost:11434/v1';
611
+ const ollamaKey = apiKey || process.env.OPC_LLM_API_KEY || 'ollama';
612
+ return new OpenAICompatibleProvider('ollama', finalModel, ollamaBase, ollamaKey);
613
+ }
614
+
615
+ const finalKey = apiKey || getApiKey();
616
+ const finalBaseUrl = baseUrl || getBaseUrl();
617
+
618
+ if (finalKey.startsWith('AQ.') || isGeminiNative()) {
619
+ return new GeminiNativeProvider(finalModel, finalKey);
620
+ }
621
+
622
+ let resolvedName = name;
623
+ if (finalBaseUrl.includes('deepseek.com')) {
624
+ resolvedName = 'deepseek';
625
+ } else if (finalBaseUrl.includes('dashscope.aliyuncs.com')) {
626
+ resolvedName = 'qwen';
627
+ }
628
+
629
+ return new OpenAICompatibleProvider(resolvedName, finalModel, finalBaseUrl, finalKey);
630
+ }
631
+
632
+ export const SUPPORTED_PROVIDERS = ['openai', 'ollama', 'claude-cli', 'deepseek', 'qwen', 'gemini', 'dashscope', 'zhipu', 'moonshot'] as const;