gthinking 1.2.1 → 2.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (271) hide show
  1. package/.eslintrc.js +34 -0
  2. package/ANALYSIS_SUMMARY.md +363 -0
  3. package/README.md +230 -245
  4. package/dist/analysis/analysis-engine.d.ts +63 -0
  5. package/dist/analysis/analysis-engine.d.ts.map +1 -0
  6. package/dist/analysis/analysis-engine.js +322 -0
  7. package/dist/analysis/analysis-engine.js.map +1 -0
  8. package/dist/core/config.d.ts +1419 -0
  9. package/dist/core/config.d.ts.map +1 -0
  10. package/dist/core/config.js +361 -0
  11. package/dist/core/config.js.map +1 -0
  12. package/dist/core/engine.d.ts +176 -0
  13. package/dist/core/engine.d.ts.map +1 -0
  14. package/dist/core/engine.js +604 -0
  15. package/dist/core/engine.js.map +1 -0
  16. package/dist/core/errors.d.ts +153 -0
  17. package/dist/core/errors.d.ts.map +1 -0
  18. package/dist/core/errors.js +287 -0
  19. package/dist/core/errors.js.map +1 -0
  20. package/dist/core/index.d.ts +7 -0
  21. package/dist/core/index.d.ts.map +1 -0
  22. package/dist/{types.js → core/index.js} +8 -4
  23. package/dist/core/index.js.map +1 -0
  24. package/dist/core/pipeline.d.ts +121 -0
  25. package/dist/core/pipeline.d.ts.map +1 -0
  26. package/dist/core/pipeline.js +289 -0
  27. package/dist/core/pipeline.js.map +1 -0
  28. package/dist/core/rate-limiter.d.ts +58 -0
  29. package/dist/core/rate-limiter.d.ts.map +1 -0
  30. package/dist/core/rate-limiter.js +133 -0
  31. package/dist/core/rate-limiter.js.map +1 -0
  32. package/dist/core/session-manager.d.ts +96 -0
  33. package/dist/core/session-manager.d.ts.map +1 -0
  34. package/dist/core/session-manager.js +223 -0
  35. package/dist/core/session-manager.js.map +1 -0
  36. package/dist/creativity/creativity-engine.d.ts +6 -0
  37. package/dist/creativity/creativity-engine.d.ts.map +1 -0
  38. package/dist/creativity/creativity-engine.js +17 -0
  39. package/dist/creativity/creativity-engine.js.map +1 -0
  40. package/dist/index.d.ts +24 -32
  41. package/dist/index.d.ts.map +1 -1
  42. package/dist/index.js +130 -104
  43. package/dist/index.js.map +1 -1
  44. package/dist/learning/learning-engine.d.ts +6 -0
  45. package/dist/learning/learning-engine.d.ts.map +1 -0
  46. package/dist/learning/learning-engine.js +17 -0
  47. package/dist/learning/learning-engine.js.map +1 -0
  48. package/dist/llm/index.d.ts +10 -0
  49. package/dist/llm/index.d.ts.map +1 -0
  50. package/dist/llm/index.js +26 -0
  51. package/dist/llm/index.js.map +1 -0
  52. package/dist/llm/llm-service.d.ts +109 -0
  53. package/dist/llm/llm-service.d.ts.map +1 -0
  54. package/dist/llm/llm-service.js +224 -0
  55. package/dist/llm/llm-service.js.map +1 -0
  56. package/dist/llm/providers/base.d.ts +85 -0
  57. package/dist/llm/providers/base.d.ts.map +1 -0
  58. package/dist/llm/providers/base.js +57 -0
  59. package/dist/llm/providers/base.js.map +1 -0
  60. package/dist/llm/providers/cli.d.ts +23 -0
  61. package/dist/llm/providers/cli.d.ts.map +1 -0
  62. package/dist/llm/providers/cli.js +158 -0
  63. package/dist/llm/providers/cli.js.map +1 -0
  64. package/dist/llm/providers/gemini.d.ts +30 -0
  65. package/dist/llm/providers/gemini.d.ts.map +1 -0
  66. package/dist/llm/providers/gemini.js +168 -0
  67. package/dist/llm/providers/gemini.js.map +1 -0
  68. package/dist/llm/sanitization.d.ts +50 -0
  69. package/dist/llm/sanitization.d.ts.map +1 -0
  70. package/dist/llm/sanitization.js +149 -0
  71. package/dist/llm/sanitization.js.map +1 -0
  72. package/dist/{server.d.ts.map → mcp/server.d.ts.map} +1 -1
  73. package/dist/mcp/server.js +108 -0
  74. package/dist/mcp/server.js.map +1 -0
  75. package/dist/planning/planning-engine.d.ts +6 -0
  76. package/dist/planning/planning-engine.d.ts.map +1 -0
  77. package/dist/planning/planning-engine.js +17 -0
  78. package/dist/planning/planning-engine.js.map +1 -0
  79. package/dist/reasoning/reasoning-engine.d.ts +6 -0
  80. package/dist/reasoning/reasoning-engine.d.ts.map +1 -0
  81. package/dist/reasoning/reasoning-engine.js +17 -0
  82. package/dist/reasoning/reasoning-engine.js.map +1 -0
  83. package/dist/search/search-engine.d.ts +99 -0
  84. package/dist/search/search-engine.d.ts.map +1 -0
  85. package/dist/search/search-engine.js +271 -0
  86. package/dist/search/search-engine.js.map +1 -0
  87. package/dist/synthesis/synthesis-engine.d.ts +6 -0
  88. package/dist/synthesis/synthesis-engine.d.ts.map +1 -0
  89. package/dist/synthesis/synthesis-engine.js +17 -0
  90. package/dist/synthesis/synthesis-engine.js.map +1 -0
  91. package/dist/types/analysis.d.ts +1534 -49
  92. package/dist/types/analysis.d.ts.map +1 -1
  93. package/dist/types/analysis.js +250 -0
  94. package/dist/types/analysis.js.map +1 -1
  95. package/dist/types/core.d.ts +257 -30
  96. package/dist/types/core.d.ts.map +1 -1
  97. package/dist/types/core.js +148 -18
  98. package/dist/types/core.js.map +1 -1
  99. package/dist/types/creativity.d.ts +2871 -56
  100. package/dist/types/creativity.d.ts.map +1 -1
  101. package/dist/types/creativity.js +195 -0
  102. package/dist/types/creativity.js.map +1 -1
  103. package/dist/types/index.d.ts +6 -2
  104. package/dist/types/index.d.ts.map +1 -1
  105. package/dist/types/index.js +17 -2
  106. package/dist/types/index.js.map +1 -1
  107. package/dist/types/learning.d.ts +851 -61
  108. package/dist/types/learning.d.ts.map +1 -1
  109. package/dist/types/learning.js +155 -0
  110. package/dist/types/learning.js.map +1 -1
  111. package/dist/types/planning.d.ts +2223 -71
  112. package/dist/types/planning.d.ts.map +1 -1
  113. package/dist/types/planning.js +190 -0
  114. package/dist/types/planning.js.map +1 -1
  115. package/dist/types/reasoning.d.ts +2209 -72
  116. package/dist/types/reasoning.d.ts.map +1 -1
  117. package/dist/types/reasoning.js +200 -1
  118. package/dist/types/reasoning.js.map +1 -1
  119. package/dist/types/search.d.ts +981 -53
  120. package/dist/types/search.d.ts.map +1 -1
  121. package/dist/types/search.js +137 -0
  122. package/dist/types/search.js.map +1 -1
  123. package/dist/types/synthesis.d.ts +583 -37
  124. package/dist/types/synthesis.d.ts.map +1 -1
  125. package/dist/types/synthesis.js +138 -0
  126. package/dist/types/synthesis.js.map +1 -1
  127. package/dist/utils/cache.d.ts +144 -0
  128. package/dist/utils/cache.d.ts.map +1 -0
  129. package/dist/utils/cache.js +288 -0
  130. package/dist/utils/cache.js.map +1 -0
  131. package/dist/utils/id-generator.d.ts +89 -0
  132. package/dist/utils/id-generator.d.ts.map +1 -0
  133. package/dist/utils/id-generator.js +132 -0
  134. package/dist/utils/id-generator.js.map +1 -0
  135. package/dist/utils/index.d.ts +11 -0
  136. package/dist/utils/index.d.ts.map +1 -0
  137. package/dist/utils/index.js +33 -0
  138. package/dist/utils/index.js.map +1 -0
  139. package/dist/utils/logger.d.ts +142 -0
  140. package/dist/utils/logger.d.ts.map +1 -0
  141. package/dist/utils/logger.js +248 -0
  142. package/dist/utils/logger.js.map +1 -0
  143. package/dist/utils/metrics.d.ts +149 -0
  144. package/dist/utils/metrics.d.ts.map +1 -0
  145. package/dist/utils/metrics.js +296 -0
  146. package/dist/utils/metrics.js.map +1 -0
  147. package/dist/utils/timer.d.ts +7 -0
  148. package/dist/utils/timer.d.ts.map +1 -0
  149. package/dist/utils/timer.js +17 -0
  150. package/dist/utils/timer.js.map +1 -0
  151. package/dist/utils/validation.d.ts +147 -0
  152. package/dist/utils/validation.d.ts.map +1 -0
  153. package/dist/utils/validation.js +275 -0
  154. package/dist/utils/validation.js.map +1 -0
  155. package/docs/API.md +411 -0
  156. package/docs/ARCHITECTURE.md +271 -0
  157. package/docs/CHANGELOG.md +283 -0
  158. package/jest.config.js +28 -0
  159. package/package.json +43 -30
  160. package/src/analysis/analysis-engine.ts +383 -0
  161. package/src/core/config.ts +406 -0
  162. package/src/core/engine.ts +785 -0
  163. package/src/core/errors.ts +349 -0
  164. package/src/core/index.ts +12 -0
  165. package/src/core/pipeline.ts +424 -0
  166. package/src/core/rate-limiter.ts +155 -0
  167. package/src/core/session-manager.ts +269 -0
  168. package/src/creativity/creativity-engine.ts +14 -0
  169. package/src/index.ts +178 -0
  170. package/src/learning/learning-engine.ts +14 -0
  171. package/src/llm/index.ts +10 -0
  172. package/src/llm/llm-service.ts +285 -0
  173. package/src/llm/providers/base.ts +146 -0
  174. package/src/llm/providers/cli.ts +186 -0
  175. package/src/llm/providers/gemini.ts +201 -0
  176. package/src/llm/sanitization.ts +178 -0
  177. package/src/mcp/server.ts +117 -0
  178. package/src/planning/planning-engine.ts +14 -0
  179. package/src/reasoning/reasoning-engine.ts +14 -0
  180. package/src/search/search-engine.ts +333 -0
  181. package/src/synthesis/synthesis-engine.ts +14 -0
  182. package/src/types/analysis.ts +337 -0
  183. package/src/types/core.ts +342 -0
  184. package/src/types/creativity.ts +268 -0
  185. package/src/types/index.ts +31 -0
  186. package/src/types/learning.ts +215 -0
  187. package/src/types/planning.ts +251 -0
  188. package/src/types/reasoning.ts +288 -0
  189. package/src/types/search.ts +192 -0
  190. package/src/types/synthesis.ts +187 -0
  191. package/src/utils/cache.ts +363 -0
  192. package/src/utils/id-generator.ts +135 -0
  193. package/src/utils/index.ts +22 -0
  194. package/src/utils/logger.ts +290 -0
  195. package/src/utils/metrics.ts +380 -0
  196. package/src/utils/timer.ts +15 -0
  197. package/src/utils/validation.ts +297 -0
  198. package/tests/setup.ts +22 -0
  199. package/tests/unit/cache.test.ts +189 -0
  200. package/tests/unit/engine.test.ts +179 -0
  201. package/tests/unit/validation.test.ts +218 -0
  202. package/tsconfig.json +17 -12
  203. package/GEMINI.md +0 -68
  204. package/analysis.ts +0 -1063
  205. package/creativity.ts +0 -1055
  206. package/dist/analysis.d.ts +0 -54
  207. package/dist/analysis.d.ts.map +0 -1
  208. package/dist/analysis.js +0 -866
  209. package/dist/analysis.js.map +0 -1
  210. package/dist/creativity.d.ts +0 -81
  211. package/dist/creativity.d.ts.map +0 -1
  212. package/dist/creativity.js +0 -828
  213. package/dist/creativity.js.map +0 -1
  214. package/dist/engine.d.ts +0 -90
  215. package/dist/engine.d.ts.map +0 -1
  216. package/dist/engine.js +0 -677
  217. package/dist/engine.js.map +0 -1
  218. package/dist/examples.d.ts +0 -7
  219. package/dist/examples.d.ts.map +0 -1
  220. package/dist/examples.js +0 -506
  221. package/dist/examples.js.map +0 -1
  222. package/dist/learning.d.ts +0 -72
  223. package/dist/learning.d.ts.map +0 -1
  224. package/dist/learning.js +0 -615
  225. package/dist/learning.js.map +0 -1
  226. package/dist/llm-service.d.ts +0 -21
  227. package/dist/llm-service.d.ts.map +0 -1
  228. package/dist/llm-service.js +0 -100
  229. package/dist/llm-service.js.map +0 -1
  230. package/dist/planning.d.ts +0 -58
  231. package/dist/planning.d.ts.map +0 -1
  232. package/dist/planning.js +0 -824
  233. package/dist/planning.js.map +0 -1
  234. package/dist/reasoning.d.ts +0 -73
  235. package/dist/reasoning.d.ts.map +0 -1
  236. package/dist/reasoning.js +0 -845
  237. package/dist/reasoning.js.map +0 -1
  238. package/dist/search-discovery.d.ts +0 -73
  239. package/dist/search-discovery.d.ts.map +0 -1
  240. package/dist/search-discovery.js +0 -548
  241. package/dist/search-discovery.js.map +0 -1
  242. package/dist/server.js +0 -113
  243. package/dist/server.js.map +0 -1
  244. package/dist/types/engine.d.ts +0 -55
  245. package/dist/types/engine.d.ts.map +0 -1
  246. package/dist/types/engine.js +0 -3
  247. package/dist/types/engine.js.map +0 -1
  248. package/dist/types.d.ts +0 -6
  249. package/dist/types.d.ts.map +0 -1
  250. package/dist/types.js.map +0 -1
  251. package/engine.ts +0 -947
  252. package/examples.ts +0 -717
  253. package/index.ts +0 -106
  254. package/learning.ts +0 -779
  255. package/llm-service.ts +0 -120
  256. package/planning.ts +0 -1028
  257. package/reasoning.ts +0 -1079
  258. package/search-discovery.ts +0 -700
  259. package/server.ts +0 -115
  260. package/types/analysis.ts +0 -69
  261. package/types/core.ts +0 -90
  262. package/types/creativity.ts +0 -72
  263. package/types/engine.ts +0 -60
  264. package/types/index.ts +0 -9
  265. package/types/learning.ts +0 -69
  266. package/types/planning.ts +0 -85
  267. package/types/reasoning.ts +0 -92
  268. package/types/search.ts +0 -58
  269. package/types/synthesis.ts +0 -42
  270. package/types.ts +0 -6
  271. /package/dist/{server.d.ts → mcp/server.d.ts} +0 -0
@@ -0,0 +1,285 @@
1
+ /**
2
+ * LLM Service for GThinking v2.0.0
3
+ *
4
+ * Secure LLM service that uses HTTP APIs instead of command-line
5
+ * to prevent command injection vulnerabilities.
6
+ *
7
+ * @module llm/llm-service
8
+ */
9
+
10
+ import type { ILLMProvider, LLMMessage, LLMRequestOptions, LLMResponse, LLMStreamChunk } from './providers/base';
11
+ import { GeminiProvider } from './providers/gemini';
12
+ import { CLIProvider } from './providers/cli';
13
+ import { sanitizePrompt, validatePrompt } from './sanitization';
14
+ import { LLMError } from '../core/errors';
15
+ import { logger, createComponentLogger } from '../utils/logger';
16
+ import type { EngineConfig } from '../types/core';
17
+
18
+ /**
19
+ * LLM service configuration
20
+ */
21
+ export interface LLMServiceConfig {
22
+ provider: 'gemini' | 'claude' | 'openai' | 'cli';
23
+ apiKey?: string;
24
+ model?: string;
25
+ cliCommand?: string;
26
+ cliArgs?: string[];
27
+ cliPromptPassingMethod?: 'stdin' | 'arg';
28
+ cliPromptFlag?: string;
29
+ timeoutMs?: number;
30
+ maxRetries?: number;
31
+ }
32
+
33
+ /**
34
+ * LLM Service
35
+ *
36
+ * Provides a unified interface for interacting with various LLM providers
37
+ * with built-in security measures including input sanitization.
38
+ */
39
+ export class LLMService {
40
+ private provider: ILLMProvider | null = null;
41
+ private readonly logger = createComponentLogger('LLMService');
42
+
43
+ /**
44
+ * Create a new LLM service
45
+ *
46
+ * @param config - LLM service configuration
47
+ */
48
+ constructor(config: LLMServiceConfig) {
49
+ this.initializeProvider(config);
50
+ }
51
+
52
+ /**
53
+ * Create LLM service from engine configuration
54
+ *
55
+ * @param config - Engine configuration
56
+ * @returns LLM service instance
57
+ */
58
+ static fromConfig(config: EngineConfig): LLMService {
59
+ const apiKey = config.llmApiKey;
60
+
61
+ // Only require API key if NOT using CLI provider
62
+ if (config.llmProvider !== 'cli' && (apiKey === undefined || apiKey === '')) {
63
+ throw new LLMError('LLM API key not configured', config.llmProvider);
64
+ }
65
+
66
+ return new LLMService({
67
+ provider: config.llmProvider as any,
68
+ apiKey,
69
+ model: config.llmModel,
70
+ cliCommand: config.llmCliCommand || 'gemini', // Pass the command from config
71
+ cliArgs: config.llmCliArgs,
72
+ cliPromptPassingMethod: config.llmCliPromptPassingMethod,
73
+ cliPromptFlag: config.llmCliPromptFlag,
74
+ timeoutMs: config.llmTimeoutMs,
75
+ maxRetries: config.llmMaxRetries,
76
+ });
77
+ }
78
+
79
+ /**
80
+ * Complete a prompt with the LLM
81
+ *
82
+ * @param prompt - User prompt
83
+ * @param systemPrompt - Optional system prompt
84
+ * @param options - Request options
85
+ * @returns LLM response
86
+ */
87
+ async complete(
88
+ prompt: string,
89
+ systemPrompt?: string,
90
+ options: LLMRequestOptions = {}
91
+ ): Promise<string> {
92
+ this.ensureProvider();
93
+
94
+ // Sanitize inputs
95
+ const sanitizedPrompt = sanitizePrompt(prompt);
96
+ const sanitizedSystemPrompt = systemPrompt ? sanitizePrompt(systemPrompt) : undefined;
97
+
98
+ // Validate inputs
99
+ const validation = validatePrompt(sanitizedPrompt);
100
+ if (!validation.safe) {
101
+ this.logger.warn('Prompt validation warnings', { issues: validation.issues });
102
+ }
103
+
104
+ const messages: LLMMessage[] = [];
105
+
106
+ if (sanitizedSystemPrompt !== undefined) {
107
+ messages.push({ role: 'system', content: sanitizedSystemPrompt });
108
+ }
109
+
110
+ messages.push({ role: 'user', content: sanitizedPrompt });
111
+
112
+ this.logger.debug('Sending completion request', {
113
+ provider: this.provider!.name,
114
+ model: options.model,
115
+ });
116
+
117
+ const response = await this.provider!.complete(messages, options);
118
+
119
+ return response.content;
120
+ }
121
+
122
+ /**
123
+ * Stream a prompt completion from the LLM
124
+ *
125
+ * @param prompt - User prompt
126
+ * @param systemPrompt - Optional system prompt
127
+ * @param options - Request options
128
+ * @returns Async generator of stream chunks
129
+ */
130
+ async *stream(
131
+ prompt: string,
132
+ systemPrompt?: string,
133
+ options: LLMRequestOptions = {}
134
+ ): AsyncGenerator<string> {
135
+ this.ensureProvider();
136
+
137
+ // Sanitize inputs
138
+ const sanitizedPrompt = sanitizePrompt(prompt);
139
+ const sanitizedSystemPrompt = systemPrompt ? sanitizePrompt(systemPrompt) : undefined;
140
+
141
+ const messages: LLMMessage[] = [];
142
+
143
+ if (sanitizedSystemPrompt !== undefined) {
144
+ messages.push({ role: 'system', content: sanitizedSystemPrompt });
145
+ }
146
+
147
+ messages.push({ role: 'user', content: sanitizedPrompt });
148
+
149
+ this.logger.debug('Starting stream request', {
150
+ provider: this.provider!.name,
151
+ model: options.model,
152
+ });
153
+
154
+ for await (const chunk of this.provider!.stream(messages, options)) {
155
+ if (chunk.content) {
156
+ yield chunk.content;
157
+ }
158
+ }
159
+ }
160
+
161
+ /**
162
+ * Complete with structured messages
163
+ *
164
+ * @param messages - Array of messages
165
+ * @param options - Request options
166
+ * @returns LLM response
167
+ */
168
+ async completeMessages(
169
+ messages: LLMMessage[],
170
+ options: LLMRequestOptions = {}
171
+ ): Promise<LLMResponse> {
172
+ this.ensureProvider();
173
+
174
+ // Sanitize all messages
175
+ const sanitizedMessages = messages.map(msg => ({
176
+ ...msg,
177
+ content: sanitizePrompt(msg.content),
178
+ }));
179
+
180
+ this.logger.debug('Sending messages completion request', {
181
+ provider: this.provider!.name,
182
+ messageCount: sanitizedMessages.length,
183
+ });
184
+
185
+ return this.provider!.complete(sanitizedMessages, options);
186
+ }
187
+
188
+ /**
189
+ * Check if the service is properly configured
190
+ *
191
+ * @returns True if configured correctly
192
+ */
193
+ isConfigured(): boolean {
194
+ return this.provider !== null && this.provider.validateConfig();
195
+ }
196
+
197
+ /**
198
+ * Get the current provider name
199
+ *
200
+ * @returns Provider name or null
201
+ */
202
+ getProviderName(): string | null {
203
+ return this.provider?.name ?? null;
204
+ }
205
+
206
+ /**
207
+ * Get available models for the current provider
208
+ *
209
+ * @returns Array of model names
210
+ */
211
+ getAvailableModels(): string[] {
212
+ return this.provider?.availableModels ?? [];
213
+ }
214
+
215
+ /**
216
+ * Initialize the LLM provider
217
+ *
218
+ * @param config - Service configuration
219
+ */
220
+ private initializeProvider(config: LLMServiceConfig): void {
221
+ switch (config.provider) {
222
+ case 'gemini':
223
+ this.provider = new GeminiProvider({
224
+ apiKey: config.apiKey,
225
+ model: config.model,
226
+ timeoutMs: config.timeoutMs,
227
+ maxRetries: config.maxRetries,
228
+ });
229
+ break;
230
+
231
+ case 'claude':
232
+ // Claude provider would be implemented here
233
+ throw new LLMError('Claude provider not yet implemented', 'claude');
234
+
235
+ case 'openai':
236
+ // OpenAI provider would be implemented here
237
+ throw new LLMError('OpenAI provider not yet implemented', 'openai');
238
+
239
+ case 'cli':
240
+ // Auto-configure defaults for known tools if not specified
241
+ let promptMethod = config.cliPromptPassingMethod || 'stdin';
242
+ let promptFlag = config.cliPromptFlag;
243
+
244
+ // Smart defaults for Gemini if using cli command 'gemini'
245
+ if (config.cliCommand === 'gemini' && !config.cliPromptPassingMethod) {
246
+ promptMethod = 'arg';
247
+ promptFlag = '-p';
248
+ }
249
+
250
+ this.provider = new CLIProvider({
251
+ command: config.cliCommand || 'gemini',
252
+ args: config.cliArgs,
253
+ promptPassingMethod: promptMethod,
254
+ promptFlag: promptFlag,
255
+ timeoutMs: config.timeoutMs,
256
+ maxRetries: config.maxRetries
257
+ });
258
+ break;
259
+
260
+ default:
261
+ throw new LLMError(`Unknown provider: ${config.provider}`, String(config.provider));
262
+ }
263
+
264
+ this.logger.info('LLM provider initialized', { provider: config.provider });
265
+ }
266
+
267
+ /**
268
+ * Ensure provider is initialized
269
+ */
270
+ private ensureProvider(): void {
271
+ if (this.provider === null) {
272
+ throw new LLMError('LLM provider not initialized', 'unknown');
273
+ }
274
+ }
275
+ }
276
+
277
+ /**
278
+ * Create an LLM service from configuration
279
+ *
280
+ * @param config - Engine configuration
281
+ * @returns LLM service instance
282
+ */
283
+ export function createLLMService(config: EngineConfig): LLMService {
284
+ return LLMService.fromConfig(config);
285
+ }
@@ -0,0 +1,146 @@
1
+ /**
2
+ * Base LLM Provider Interface
3
+ *
4
+ * @module llm/providers/base
5
+ */
6
+
7
+ import type { LLMError } from '../../core/errors';
8
+
9
+ /**
10
+ * LLM message role
11
+ */
12
+ export type MessageRole = 'system' | 'user' | 'assistant';
13
+
14
+ /**
15
+ * LLM message
16
+ */
17
+ export interface LLMMessage {
18
+ role: MessageRole;
19
+ content: string;
20
+ }
21
+
22
+ /**
23
+ * LLM request options
24
+ */
25
+ export interface LLMRequestOptions {
26
+ model?: string;
27
+ temperature?: number;
28
+ maxTokens?: number;
29
+ topP?: number;
30
+ frequencyPenalty?: number;
31
+ presencePenalty?: number;
32
+ stopSequences?: string[];
33
+ timeoutMs?: number;
34
+ }
35
+
36
+ /**
37
+ * LLM response
38
+ */
39
+ export interface LLMResponse {
40
+ content: string;
41
+ model: string;
42
+ usage: {
43
+ promptTokens: number;
44
+ completionTokens: number;
45
+ totalTokens: number;
46
+ };
47
+ finishReason: string;
48
+ }
49
+
50
+ /**
51
+ * LLM stream chunk
52
+ */
53
+ export interface LLMStreamChunk {
54
+ content: string;
55
+ isComplete: boolean;
56
+ }
57
+
58
+ /**
59
+ * Base LLM provider interface
60
+ */
61
+ export interface ILLMProvider {
62
+ readonly name: string;
63
+ readonly availableModels: string[];
64
+
65
+ complete(messages: LLMMessage[], options?: LLMRequestOptions): Promise<LLMResponse>;
66
+ stream(messages: LLMMessage[], options?: LLMRequestOptions): AsyncGenerator<LLMStreamChunk>;
67
+ validateConfig(): boolean;
68
+ }
69
+
70
+ /**
71
+ * Base LLM provider class
72
+ */
73
+ export abstract class BaseLLMProvider implements ILLMProvider {
74
+ abstract readonly name: string;
75
+ abstract readonly availableModels: string[];
76
+
77
+ protected apiKey: string;
78
+ protected baseUrl: string;
79
+ protected defaultModel: string;
80
+ protected timeoutMs: number;
81
+ protected maxRetries: number;
82
+
83
+ constructor(config: {
84
+ apiKey: string;
85
+ baseUrl: string;
86
+ defaultModel: string;
87
+ timeoutMs?: number;
88
+ maxRetries?: number;
89
+ }) {
90
+ this.apiKey = config.apiKey;
91
+ this.baseUrl = config.baseUrl;
92
+ this.defaultModel = config.defaultModel;
93
+ this.timeoutMs = config.timeoutMs ?? 30000;
94
+ this.maxRetries = config.maxRetries ?? 3;
95
+ }
96
+
97
+ abstract complete(
98
+ messages: LLMMessage[],
99
+ options?: LLMRequestOptions
100
+ ): Promise<LLMResponse>;
101
+
102
+ abstract stream(
103
+ messages: LLMMessage[],
104
+ options?: LLMRequestOptions
105
+ ): AsyncGenerator<LLMStreamChunk>;
106
+
107
+ validateConfig(): boolean {
108
+ return this.apiKey.length > 0 && this.baseUrl.length > 0;
109
+ }
110
+
111
+ protected getModel(options?: LLMRequestOptions): string {
112
+ return options?.model ?? this.defaultModel;
113
+ }
114
+
115
+ protected async withRetry<T>(
116
+ operation: () => Promise<T>,
117
+ retries = this.maxRetries
118
+ ): Promise<T> {
119
+ let lastError: Error | undefined;
120
+
121
+ for (let i = 0; i <= retries; i++) {
122
+ try {
123
+ return await operation();
124
+ } catch (error) {
125
+ lastError = error as Error;
126
+
127
+ // Don't retry on certain errors
128
+ if (this.isNonRetryableError(error)) {
129
+ throw error;
130
+ }
131
+
132
+ if (i < retries) {
133
+ const delay = Math.pow(2, i) * 1000; // Exponential backoff
134
+ await new Promise(resolve => setTimeout(resolve, delay));
135
+ }
136
+ }
137
+ }
138
+
139
+ throw lastError;
140
+ }
141
+
142
+ protected isNonRetryableError(error: unknown): boolean {
143
+ // Override in subclasses for provider-specific logic
144
+ return false;
145
+ }
146
+ }
@@ -0,0 +1,186 @@
1
+
2
+ import { spawn } from 'child_process';
3
+ import { BaseLLMProvider, type LLMMessage, type LLMRequestOptions, type LLMResponse, type LLMStreamChunk } from './base';
4
+ import { LLMError } from '../../core/errors';
5
+ import { logger } from '../../utils/logger';
6
+
7
+ export interface CLIConfig {
8
+ command: string; // e.g., 'gemini', 'claude'
9
+ args?: string[];
10
+ promptPassingMethod?: 'stdin' | 'arg';
11
+ promptFlag?: string; // e.g. '-p'
12
+ timeoutMs?: number;
13
+ maxRetries?: number;
14
+ }
15
+
16
+ export class CLIProvider extends BaseLLMProvider {
17
+ readonly name = 'cli';
18
+ readonly availableModels = ['local-default']; // CLI usually handles the model selection internally or via args
19
+ private command: string;
20
+ private defaultArgs: string[];
21
+ private promptPassingMethod: 'stdin' | 'arg';
22
+ private promptFlag?: string;
23
+
24
+ constructor(config: CLIConfig) {
25
+ super({
26
+ apiKey: 'local-auth', // Placeholder for base class requirement
27
+ baseUrl: 'local', // Placeholder
28
+ defaultModel: 'default',
29
+ timeoutMs: config.timeoutMs,
30
+ maxRetries: config.maxRetries
31
+ });
32
+ this.command = config.command;
33
+ this.defaultArgs = config.args || [];
34
+ this.promptPassingMethod = config.promptPassingMethod || 'stdin';
35
+ this.promptFlag = config.promptFlag;
36
+ }
37
+
38
+ // Helper to extract the actual prompt from messages
39
+ // CLIs usually accept a single prompt string, not a full chat history JSON
40
+ private getPromptText(messages: LLMMessage[]): string {
41
+ // Strategy: Concatenate system prompt and user messages
42
+ return messages.map(m => {
43
+ if (m.role === 'system') return `System: ${m.content}\n`;
44
+ if (m.role === 'user') return `User: ${m.content}\n`;
45
+ return `Assistant: ${m.content}\n`;
46
+ }).join('\n') + "\nResponse:";
47
+ }
48
+
49
+ // Strategy for simple CLIs (like 'gemini "prompt"'):
50
+ // If the CLI accepts the prompt as the last argument
51
+ private getLatestPrompt(messages: LLMMessage[]): string {
52
+ const lastUserMsg = [...messages].reverse().find(m => m.role === 'user');
53
+ return lastUserMsg ? lastUserMsg.content : '';
54
+ }
55
+
56
+ async complete(
57
+ messages: LLMMessage[],
58
+ options: LLMRequestOptions = {}
59
+ ): Promise<LLMResponse> {
60
+ return this.withRetry(async () => {
61
+ const prompt = this.getLatestPrompt(messages);
62
+
63
+ let args = [...this.defaultArgs];
64
+
65
+ // Handle Prompt passing logic
66
+ if (this.promptPassingMethod === 'arg') {
67
+ if (this.promptFlag) {
68
+ args.push(this.promptFlag);
69
+ }
70
+ args.push(prompt);
71
+ }
72
+
73
+ return new Promise((resolve, reject) => {
74
+ logger.debug(`Executing CLI command: ${this.command}`, { args, method: this.promptPassingMethod });
75
+
76
+ const child = spawn(this.command, args, {
77
+ env: { ...process.env },
78
+ shell: false,
79
+ stdio: ['pipe', 'pipe', 'pipe']
80
+ });
81
+
82
+ let stdout = '';
83
+ let stderr = '';
84
+
85
+ child.stdout.on('data', (data) => {
86
+ stdout += data.toString();
87
+ });
88
+
89
+ child.stderr.on('data', (data) => {
90
+ stderr += data.toString();
91
+ });
92
+
93
+ child.on('close', (code) => {
94
+ if (code !== 0) {
95
+ logger.error(`CLI execution failed`, { code, stderr });
96
+ reject(new LLMError(`CLI command failed with code ${code}: ${stderr}`, 'cli'));
97
+ } else {
98
+ resolve({
99
+ content: stdout.trim(),
100
+ model: 'cli-default',
101
+ usage: {
102
+ promptTokens: 0,
103
+ completionTokens: 0,
104
+ totalTokens: 0
105
+ },
106
+ finishReason: 'stop'
107
+ });
108
+ }
109
+ });
110
+
111
+ child.on('error', (err) => {
112
+ reject(new LLMError(`Failed to spawn CLI process: ${err.message}`, 'cli'));
113
+ });
114
+
115
+ // Only write to stdin if configured to do so
116
+ if (this.promptPassingMethod === 'stdin') {
117
+ child.stdin.write(prompt);
118
+ child.stdin.end();
119
+ } else {
120
+ // Even if passing by arg, we should end stdin to prevent hanging if process reads stdin
121
+ child.stdin.end();
122
+ }
123
+ });
124
+ });
125
+ }
126
+
127
+ async *stream(
128
+ messages: LLMMessage[],
129
+ options: LLMRequestOptions = {}
130
+ ): AsyncGenerator<LLMStreamChunk> {
131
+ const prompt = this.getLatestPrompt(messages);
132
+ let args = [...this.defaultArgs];
133
+
134
+ if (this.promptPassingMethod === 'arg') {
135
+ if (this.promptFlag) {
136
+ args.push(this.promptFlag);
137
+ }
138
+ args.push(prompt);
139
+ }
140
+
141
+ const child = spawn(this.command, args, {
142
+ env: { ...process.env },
143
+ shell: false,
144
+ stdio: ['pipe', 'pipe', 'pipe']
145
+ });
146
+
147
+ if (this.promptPassingMethod === 'stdin') {
148
+ child.stdin.write(prompt);
149
+ child.stdin.end();
150
+ } else {
151
+ child.stdin.end();
152
+ }
153
+
154
+ // Use a manual iterator to handle backpressure properly would require
155
+ // implementing a ReadableStream-like behavior or simply yielding as we get data.
156
+ // For simplicity and effectiveness in this context, we'll iterate over the stdout stream.
157
+
158
+ try {
159
+ for await (const chunk of child.stdout) {
160
+ yield {
161
+ content: chunk.toString(),
162
+ isComplete: false
163
+ };
164
+ }
165
+
166
+ // Wait for process to exit to check for errors
167
+ await new Promise<void>((resolve, reject) => {
168
+ child.on('close', (code) => {
169
+ if (code !== 0) {
170
+ // We can capture stderr here if needed, but for stream we just end
171
+ // Ideally we would have collected stderr separately
172
+ reject(new LLMError(`CLI stream failed with code ${code}`, 'cli'));
173
+ } else {
174
+ resolve();
175
+ }
176
+ });
177
+ child.on('error', reject);
178
+ });
179
+
180
+ yield { content: '', isComplete: true };
181
+
182
+ } catch (error) {
183
+ throw new LLMError(`Stream error: ${(error as Error).message}`, 'cli');
184
+ }
185
+ }
186
+ }