gthinking 1.3.0 → 2.1.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.eslintrc.js +34 -0
- package/ANALYSIS_SUMMARY.md +363 -0
- package/README.md +230 -250
- package/dist/analysis/analysis-engine.d.ts +63 -0
- package/dist/analysis/analysis-engine.d.ts.map +1 -0
- package/dist/analysis/analysis-engine.js +322 -0
- package/dist/analysis/analysis-engine.js.map +1 -0
- package/dist/core/config.d.ts +1419 -0
- package/dist/core/config.d.ts.map +1 -0
- package/dist/core/config.js +361 -0
- package/dist/core/config.js.map +1 -0
- package/dist/core/engine.d.ts +176 -0
- package/dist/core/engine.d.ts.map +1 -0
- package/dist/core/engine.js +604 -0
- package/dist/core/engine.js.map +1 -0
- package/dist/core/errors.d.ts +153 -0
- package/dist/core/errors.d.ts.map +1 -0
- package/dist/core/errors.js +287 -0
- package/dist/core/errors.js.map +1 -0
- package/dist/core/index.d.ts +7 -0
- package/dist/core/index.d.ts.map +1 -0
- package/dist/{types.js → core/index.js} +8 -4
- package/dist/core/index.js.map +1 -0
- package/dist/core/pipeline.d.ts +121 -0
- package/dist/core/pipeline.d.ts.map +1 -0
- package/dist/core/pipeline.js +289 -0
- package/dist/core/pipeline.js.map +1 -0
- package/dist/core/rate-limiter.d.ts +58 -0
- package/dist/core/rate-limiter.d.ts.map +1 -0
- package/dist/core/rate-limiter.js +133 -0
- package/dist/core/rate-limiter.js.map +1 -0
- package/dist/core/session-manager.d.ts +96 -0
- package/dist/core/session-manager.d.ts.map +1 -0
- package/dist/core/session-manager.js +223 -0
- package/dist/core/session-manager.js.map +1 -0
- package/dist/creativity/creativity-engine.d.ts +6 -0
- package/dist/creativity/creativity-engine.d.ts.map +1 -0
- package/dist/creativity/creativity-engine.js +17 -0
- package/dist/creativity/creativity-engine.js.map +1 -0
- package/dist/index.d.ts +24 -32
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +130 -104
- package/dist/index.js.map +1 -1
- package/dist/learning/learning-engine.d.ts +6 -0
- package/dist/learning/learning-engine.d.ts.map +1 -0
- package/dist/learning/learning-engine.js +17 -0
- package/dist/learning/learning-engine.js.map +1 -0
- package/dist/llm/index.d.ts +10 -0
- package/dist/llm/index.d.ts.map +1 -0
- package/dist/llm/index.js +26 -0
- package/dist/llm/index.js.map +1 -0
- package/dist/llm/llm-service.d.ts +109 -0
- package/dist/llm/llm-service.d.ts.map +1 -0
- package/dist/llm/llm-service.js +224 -0
- package/dist/llm/llm-service.js.map +1 -0
- package/dist/llm/providers/base.d.ts +85 -0
- package/dist/llm/providers/base.d.ts.map +1 -0
- package/dist/llm/providers/base.js +57 -0
- package/dist/llm/providers/base.js.map +1 -0
- package/dist/llm/providers/cli.d.ts +23 -0
- package/dist/llm/providers/cli.d.ts.map +1 -0
- package/dist/llm/providers/cli.js +158 -0
- package/dist/llm/providers/cli.js.map +1 -0
- package/dist/llm/providers/gemini.d.ts +30 -0
- package/dist/llm/providers/gemini.d.ts.map +1 -0
- package/dist/llm/providers/gemini.js +168 -0
- package/dist/llm/providers/gemini.js.map +1 -0
- package/dist/llm/sanitization.d.ts +50 -0
- package/dist/llm/sanitization.d.ts.map +1 -0
- package/dist/llm/sanitization.js +149 -0
- package/dist/llm/sanitization.js.map +1 -0
- package/dist/{server.d.ts.map → mcp/server.d.ts.map} +1 -1
- package/dist/mcp/server.js +108 -0
- package/dist/mcp/server.js.map +1 -0
- package/dist/planning/planning-engine.d.ts +6 -0
- package/dist/planning/planning-engine.d.ts.map +1 -0
- package/dist/planning/planning-engine.js +17 -0
- package/dist/planning/planning-engine.js.map +1 -0
- package/dist/reasoning/reasoning-engine.d.ts +6 -0
- package/dist/reasoning/reasoning-engine.d.ts.map +1 -0
- package/dist/reasoning/reasoning-engine.js +17 -0
- package/dist/reasoning/reasoning-engine.js.map +1 -0
- package/dist/search/search-engine.d.ts +99 -0
- package/dist/search/search-engine.d.ts.map +1 -0
- package/dist/search/search-engine.js +271 -0
- package/dist/search/search-engine.js.map +1 -0
- package/dist/synthesis/synthesis-engine.d.ts +6 -0
- package/dist/synthesis/synthesis-engine.d.ts.map +1 -0
- package/dist/synthesis/synthesis-engine.js +17 -0
- package/dist/synthesis/synthesis-engine.js.map +1 -0
- package/dist/types/analysis.d.ts +1534 -49
- package/dist/types/analysis.d.ts.map +1 -1
- package/dist/types/analysis.js +250 -0
- package/dist/types/analysis.js.map +1 -1
- package/dist/types/core.d.ts +257 -30
- package/dist/types/core.d.ts.map +1 -1
- package/dist/types/core.js +148 -18
- package/dist/types/core.js.map +1 -1
- package/dist/types/creativity.d.ts +2871 -56
- package/dist/types/creativity.d.ts.map +1 -1
- package/dist/types/creativity.js +195 -0
- package/dist/types/creativity.js.map +1 -1
- package/dist/types/index.d.ts +6 -2
- package/dist/types/index.d.ts.map +1 -1
- package/dist/types/index.js +17 -2
- package/dist/types/index.js.map +1 -1
- package/dist/types/learning.d.ts +851 -61
- package/dist/types/learning.d.ts.map +1 -1
- package/dist/types/learning.js +155 -0
- package/dist/types/learning.js.map +1 -1
- package/dist/types/planning.d.ts +2223 -71
- package/dist/types/planning.d.ts.map +1 -1
- package/dist/types/planning.js +190 -0
- package/dist/types/planning.js.map +1 -1
- package/dist/types/reasoning.d.ts +2209 -72
- package/dist/types/reasoning.d.ts.map +1 -1
- package/dist/types/reasoning.js +200 -1
- package/dist/types/reasoning.js.map +1 -1
- package/dist/types/search.d.ts +981 -53
- package/dist/types/search.d.ts.map +1 -1
- package/dist/types/search.js +137 -0
- package/dist/types/search.js.map +1 -1
- package/dist/types/synthesis.d.ts +583 -38
- package/dist/types/synthesis.d.ts.map +1 -1
- package/dist/types/synthesis.js +138 -0
- package/dist/types/synthesis.js.map +1 -1
- package/dist/utils/cache.d.ts +144 -0
- package/dist/utils/cache.d.ts.map +1 -0
- package/dist/utils/cache.js +288 -0
- package/dist/utils/cache.js.map +1 -0
- package/dist/utils/id-generator.d.ts +89 -0
- package/dist/utils/id-generator.d.ts.map +1 -0
- package/dist/utils/id-generator.js +132 -0
- package/dist/utils/id-generator.js.map +1 -0
- package/dist/utils/index.d.ts +11 -0
- package/dist/utils/index.d.ts.map +1 -0
- package/dist/utils/index.js +33 -0
- package/dist/utils/index.js.map +1 -0
- package/dist/utils/logger.d.ts +142 -0
- package/dist/utils/logger.d.ts.map +1 -0
- package/dist/utils/logger.js +248 -0
- package/dist/utils/logger.js.map +1 -0
- package/dist/utils/metrics.d.ts +149 -0
- package/dist/utils/metrics.d.ts.map +1 -0
- package/dist/utils/metrics.js +296 -0
- package/dist/utils/metrics.js.map +1 -0
- package/dist/utils/timer.d.ts +7 -0
- package/dist/utils/timer.d.ts.map +1 -0
- package/dist/utils/timer.js +17 -0
- package/dist/utils/timer.js.map +1 -0
- package/dist/utils/validation.d.ts +147 -0
- package/dist/utils/validation.d.ts.map +1 -0
- package/dist/utils/validation.js +275 -0
- package/dist/utils/validation.js.map +1 -0
- package/docs/API.md +411 -0
- package/docs/ARCHITECTURE.md +271 -0
- package/docs/CHANGELOG.md +283 -0
- package/jest.config.js +28 -0
- package/package.json +43 -30
- package/src/analysis/analysis-engine.ts +383 -0
- package/src/core/config.ts +406 -0
- package/src/core/engine.ts +785 -0
- package/src/core/errors.ts +349 -0
- package/src/core/index.ts +12 -0
- package/src/core/pipeline.ts +424 -0
- package/src/core/rate-limiter.ts +155 -0
- package/src/core/session-manager.ts +269 -0
- package/src/creativity/creativity-engine.ts +14 -0
- package/src/index.ts +178 -0
- package/src/learning/learning-engine.ts +14 -0
- package/src/llm/index.ts +10 -0
- package/src/llm/llm-service.ts +285 -0
- package/src/llm/providers/base.ts +146 -0
- package/src/llm/providers/cli.ts +186 -0
- package/src/llm/providers/gemini.ts +201 -0
- package/src/llm/sanitization.ts +178 -0
- package/src/mcp/server.ts +117 -0
- package/src/planning/planning-engine.ts +14 -0
- package/src/reasoning/reasoning-engine.ts +14 -0
- package/src/search/search-engine.ts +333 -0
- package/src/synthesis/synthesis-engine.ts +14 -0
- package/src/types/analysis.ts +337 -0
- package/src/types/core.ts +342 -0
- package/src/types/creativity.ts +268 -0
- package/src/types/index.ts +31 -0
- package/src/types/learning.ts +215 -0
- package/src/types/planning.ts +251 -0
- package/src/types/reasoning.ts +288 -0
- package/src/types/search.ts +192 -0
- package/src/types/synthesis.ts +187 -0
- package/src/utils/cache.ts +363 -0
- package/src/utils/id-generator.ts +135 -0
- package/src/utils/index.ts +22 -0
- package/src/utils/logger.ts +290 -0
- package/src/utils/metrics.ts +380 -0
- package/src/utils/timer.ts +15 -0
- package/src/utils/validation.ts +297 -0
- package/tests/setup.ts +22 -0
- package/tests/unit/cache.test.ts +189 -0
- package/tests/unit/engine.test.ts +179 -0
- package/tests/unit/validation.test.ts +218 -0
- package/tsconfig.json +17 -12
- package/GEMINI.md +0 -68
- package/analysis.ts +0 -1063
- package/creativity.ts +0 -1055
- package/dist/analysis.d.ts +0 -54
- package/dist/analysis.d.ts.map +0 -1
- package/dist/analysis.js +0 -866
- package/dist/analysis.js.map +0 -1
- package/dist/creativity.d.ts +0 -81
- package/dist/creativity.d.ts.map +0 -1
- package/dist/creativity.js +0 -828
- package/dist/creativity.js.map +0 -1
- package/dist/engine.d.ts +0 -90
- package/dist/engine.d.ts.map +0 -1
- package/dist/engine.js +0 -720
- package/dist/engine.js.map +0 -1
- package/dist/examples.d.ts +0 -7
- package/dist/examples.d.ts.map +0 -1
- package/dist/examples.js +0 -506
- package/dist/examples.js.map +0 -1
- package/dist/learning.d.ts +0 -72
- package/dist/learning.d.ts.map +0 -1
- package/dist/learning.js +0 -615
- package/dist/learning.js.map +0 -1
- package/dist/llm-service.d.ts +0 -21
- package/dist/llm-service.d.ts.map +0 -1
- package/dist/llm-service.js +0 -100
- package/dist/llm-service.js.map +0 -1
- package/dist/planning.d.ts +0 -62
- package/dist/planning.d.ts.map +0 -1
- package/dist/planning.js +0 -886
- package/dist/planning.js.map +0 -1
- package/dist/reasoning.d.ts +0 -73
- package/dist/reasoning.d.ts.map +0 -1
- package/dist/reasoning.js +0 -845
- package/dist/reasoning.js.map +0 -1
- package/dist/search-discovery.d.ts +0 -73
- package/dist/search-discovery.d.ts.map +0 -1
- package/dist/search-discovery.js +0 -548
- package/dist/search-discovery.js.map +0 -1
- package/dist/server.js +0 -113
- package/dist/server.js.map +0 -1
- package/dist/types/engine.d.ts +0 -55
- package/dist/types/engine.d.ts.map +0 -1
- package/dist/types/engine.js +0 -3
- package/dist/types/engine.js.map +0 -1
- package/dist/types.d.ts +0 -6
- package/dist/types.d.ts.map +0 -1
- package/dist/types.js.map +0 -1
- package/engine.ts +0 -1009
- package/examples.ts +0 -717
- package/index.ts +0 -106
- package/learning.ts +0 -779
- package/llm-service.ts +0 -120
- package/planning.ts +0 -1101
- package/reasoning.ts +0 -1079
- package/search-discovery.ts +0 -700
- package/server.ts +0 -115
- package/types/analysis.ts +0 -69
- package/types/core.ts +0 -90
- package/types/creativity.ts +0 -72
- package/types/engine.ts +0 -60
- package/types/index.ts +0 -9
- package/types/learning.ts +0 -69
- package/types/planning.ts +0 -85
- package/types/reasoning.ts +0 -92
- package/types/search.ts +0 -58
- package/types/synthesis.ts +0 -43
- package/types.ts +0 -6
- /package/dist/{server.d.ts → mcp/server.d.ts} +0 -0
|
@@ -0,0 +1,285 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* LLM Service for GThinking v2.0.0
|
|
3
|
+
*
|
|
4
|
+
* Secure LLM service that uses HTTP APIs instead of command-line
|
|
5
|
+
* to prevent command injection vulnerabilities.
|
|
6
|
+
*
|
|
7
|
+
* @module llm/llm-service
|
|
8
|
+
*/
|
|
9
|
+
|
|
10
|
+
import type { ILLMProvider, LLMMessage, LLMRequestOptions, LLMResponse, LLMStreamChunk } from './providers/base';
|
|
11
|
+
import { GeminiProvider } from './providers/gemini';
|
|
12
|
+
import { CLIProvider } from './providers/cli';
|
|
13
|
+
import { sanitizePrompt, validatePrompt } from './sanitization';
|
|
14
|
+
import { LLMError } from '../core/errors';
|
|
15
|
+
import { logger, createComponentLogger } from '../utils/logger';
|
|
16
|
+
import type { EngineConfig } from '../types/core';
|
|
17
|
+
|
|
18
|
+
/**
|
|
19
|
+
* LLM service configuration
|
|
20
|
+
*/
|
|
21
|
+
export interface LLMServiceConfig {
|
|
22
|
+
provider: 'gemini' | 'claude' | 'openai' | 'cli';
|
|
23
|
+
apiKey?: string;
|
|
24
|
+
model?: string;
|
|
25
|
+
cliCommand?: string;
|
|
26
|
+
cliArgs?: string[];
|
|
27
|
+
cliPromptPassingMethod?: 'stdin' | 'arg';
|
|
28
|
+
cliPromptFlag?: string;
|
|
29
|
+
timeoutMs?: number;
|
|
30
|
+
maxRetries?: number;
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
/**
|
|
34
|
+
* LLM Service
|
|
35
|
+
*
|
|
36
|
+
* Provides a unified interface for interacting with various LLM providers
|
|
37
|
+
* with built-in security measures including input sanitization.
|
|
38
|
+
*/
|
|
39
|
+
export class LLMService {
|
|
40
|
+
private provider: ILLMProvider | null = null;
|
|
41
|
+
private readonly logger = createComponentLogger('LLMService');
|
|
42
|
+
|
|
43
|
+
/**
|
|
44
|
+
* Create a new LLM service
|
|
45
|
+
*
|
|
46
|
+
* @param config - LLM service configuration
|
|
47
|
+
*/
|
|
48
|
+
constructor(config: LLMServiceConfig) {
|
|
49
|
+
this.initializeProvider(config);
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
/**
|
|
53
|
+
* Create LLM service from engine configuration
|
|
54
|
+
*
|
|
55
|
+
* @param config - Engine configuration
|
|
56
|
+
* @returns LLM service instance
|
|
57
|
+
*/
|
|
58
|
+
static fromConfig(config: EngineConfig): LLMService {
|
|
59
|
+
const apiKey = config.llmApiKey;
|
|
60
|
+
|
|
61
|
+
// Only require API key if NOT using CLI provider
|
|
62
|
+
if (config.llmProvider !== 'cli' && (apiKey === undefined || apiKey === '')) {
|
|
63
|
+
throw new LLMError('LLM API key not configured', config.llmProvider);
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
return new LLMService({
|
|
67
|
+
provider: config.llmProvider as any,
|
|
68
|
+
apiKey,
|
|
69
|
+
model: config.llmModel,
|
|
70
|
+
cliCommand: config.llmCliCommand || 'gemini', // Pass the command from config
|
|
71
|
+
cliArgs: config.llmCliArgs,
|
|
72
|
+
cliPromptPassingMethod: config.llmCliPromptPassingMethod,
|
|
73
|
+
cliPromptFlag: config.llmCliPromptFlag,
|
|
74
|
+
timeoutMs: config.llmTimeoutMs,
|
|
75
|
+
maxRetries: config.llmMaxRetries,
|
|
76
|
+
});
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
/**
|
|
80
|
+
* Complete a prompt with the LLM
|
|
81
|
+
*
|
|
82
|
+
* @param prompt - User prompt
|
|
83
|
+
* @param systemPrompt - Optional system prompt
|
|
84
|
+
* @param options - Request options
|
|
85
|
+
* @returns LLM response
|
|
86
|
+
*/
|
|
87
|
+
async complete(
|
|
88
|
+
prompt: string,
|
|
89
|
+
systemPrompt?: string,
|
|
90
|
+
options: LLMRequestOptions = {}
|
|
91
|
+
): Promise<string> {
|
|
92
|
+
this.ensureProvider();
|
|
93
|
+
|
|
94
|
+
// Sanitize inputs
|
|
95
|
+
const sanitizedPrompt = sanitizePrompt(prompt);
|
|
96
|
+
const sanitizedSystemPrompt = systemPrompt ? sanitizePrompt(systemPrompt) : undefined;
|
|
97
|
+
|
|
98
|
+
// Validate inputs
|
|
99
|
+
const validation = validatePrompt(sanitizedPrompt);
|
|
100
|
+
if (!validation.safe) {
|
|
101
|
+
this.logger.warn('Prompt validation warnings', { issues: validation.issues });
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
const messages: LLMMessage[] = [];
|
|
105
|
+
|
|
106
|
+
if (sanitizedSystemPrompt !== undefined) {
|
|
107
|
+
messages.push({ role: 'system', content: sanitizedSystemPrompt });
|
|
108
|
+
}
|
|
109
|
+
|
|
110
|
+
messages.push({ role: 'user', content: sanitizedPrompt });
|
|
111
|
+
|
|
112
|
+
this.logger.debug('Sending completion request', {
|
|
113
|
+
provider: this.provider!.name,
|
|
114
|
+
model: options.model,
|
|
115
|
+
});
|
|
116
|
+
|
|
117
|
+
const response = await this.provider!.complete(messages, options);
|
|
118
|
+
|
|
119
|
+
return response.content;
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
/**
|
|
123
|
+
* Stream a prompt completion from the LLM
|
|
124
|
+
*
|
|
125
|
+
* @param prompt - User prompt
|
|
126
|
+
* @param systemPrompt - Optional system prompt
|
|
127
|
+
* @param options - Request options
|
|
128
|
+
* @returns Async generator of stream chunks
|
|
129
|
+
*/
|
|
130
|
+
async *stream(
|
|
131
|
+
prompt: string,
|
|
132
|
+
systemPrompt?: string,
|
|
133
|
+
options: LLMRequestOptions = {}
|
|
134
|
+
): AsyncGenerator<string> {
|
|
135
|
+
this.ensureProvider();
|
|
136
|
+
|
|
137
|
+
// Sanitize inputs
|
|
138
|
+
const sanitizedPrompt = sanitizePrompt(prompt);
|
|
139
|
+
const sanitizedSystemPrompt = systemPrompt ? sanitizePrompt(systemPrompt) : undefined;
|
|
140
|
+
|
|
141
|
+
const messages: LLMMessage[] = [];
|
|
142
|
+
|
|
143
|
+
if (sanitizedSystemPrompt !== undefined) {
|
|
144
|
+
messages.push({ role: 'system', content: sanitizedSystemPrompt });
|
|
145
|
+
}
|
|
146
|
+
|
|
147
|
+
messages.push({ role: 'user', content: sanitizedPrompt });
|
|
148
|
+
|
|
149
|
+
this.logger.debug('Starting stream request', {
|
|
150
|
+
provider: this.provider!.name,
|
|
151
|
+
model: options.model,
|
|
152
|
+
});
|
|
153
|
+
|
|
154
|
+
for await (const chunk of this.provider!.stream(messages, options)) {
|
|
155
|
+
if (chunk.content) {
|
|
156
|
+
yield chunk.content;
|
|
157
|
+
}
|
|
158
|
+
}
|
|
159
|
+
}
|
|
160
|
+
|
|
161
|
+
/**
|
|
162
|
+
* Complete with structured messages
|
|
163
|
+
*
|
|
164
|
+
* @param messages - Array of messages
|
|
165
|
+
* @param options - Request options
|
|
166
|
+
* @returns LLM response
|
|
167
|
+
*/
|
|
168
|
+
async completeMessages(
|
|
169
|
+
messages: LLMMessage[],
|
|
170
|
+
options: LLMRequestOptions = {}
|
|
171
|
+
): Promise<LLMResponse> {
|
|
172
|
+
this.ensureProvider();
|
|
173
|
+
|
|
174
|
+
// Sanitize all messages
|
|
175
|
+
const sanitizedMessages = messages.map(msg => ({
|
|
176
|
+
...msg,
|
|
177
|
+
content: sanitizePrompt(msg.content),
|
|
178
|
+
}));
|
|
179
|
+
|
|
180
|
+
this.logger.debug('Sending messages completion request', {
|
|
181
|
+
provider: this.provider!.name,
|
|
182
|
+
messageCount: sanitizedMessages.length,
|
|
183
|
+
});
|
|
184
|
+
|
|
185
|
+
return this.provider!.complete(sanitizedMessages, options);
|
|
186
|
+
}
|
|
187
|
+
|
|
188
|
+
/**
|
|
189
|
+
* Check if the service is properly configured
|
|
190
|
+
*
|
|
191
|
+
* @returns True if configured correctly
|
|
192
|
+
*/
|
|
193
|
+
isConfigured(): boolean {
|
|
194
|
+
return this.provider !== null && this.provider.validateConfig();
|
|
195
|
+
}
|
|
196
|
+
|
|
197
|
+
/**
|
|
198
|
+
* Get the current provider name
|
|
199
|
+
*
|
|
200
|
+
* @returns Provider name or null
|
|
201
|
+
*/
|
|
202
|
+
getProviderName(): string | null {
|
|
203
|
+
return this.provider?.name ?? null;
|
|
204
|
+
}
|
|
205
|
+
|
|
206
|
+
/**
|
|
207
|
+
* Get available models for the current provider
|
|
208
|
+
*
|
|
209
|
+
* @returns Array of model names
|
|
210
|
+
*/
|
|
211
|
+
getAvailableModels(): string[] {
|
|
212
|
+
return this.provider?.availableModels ?? [];
|
|
213
|
+
}
|
|
214
|
+
|
|
215
|
+
/**
|
|
216
|
+
* Initialize the LLM provider
|
|
217
|
+
*
|
|
218
|
+
* @param config - Service configuration
|
|
219
|
+
*/
|
|
220
|
+
private initializeProvider(config: LLMServiceConfig): void {
|
|
221
|
+
switch (config.provider) {
|
|
222
|
+
case 'gemini':
|
|
223
|
+
this.provider = new GeminiProvider({
|
|
224
|
+
apiKey: config.apiKey,
|
|
225
|
+
model: config.model,
|
|
226
|
+
timeoutMs: config.timeoutMs,
|
|
227
|
+
maxRetries: config.maxRetries,
|
|
228
|
+
});
|
|
229
|
+
break;
|
|
230
|
+
|
|
231
|
+
case 'claude':
|
|
232
|
+
// Claude provider would be implemented here
|
|
233
|
+
throw new LLMError('Claude provider not yet implemented', 'claude');
|
|
234
|
+
|
|
235
|
+
case 'openai':
|
|
236
|
+
// OpenAI provider would be implemented here
|
|
237
|
+
throw new LLMError('OpenAI provider not yet implemented', 'openai');
|
|
238
|
+
|
|
239
|
+
case 'cli':
|
|
240
|
+
// Auto-configure defaults for known tools if not specified
|
|
241
|
+
let promptMethod = config.cliPromptPassingMethod || 'stdin';
|
|
242
|
+
let promptFlag = config.cliPromptFlag;
|
|
243
|
+
|
|
244
|
+
// Smart defaults for Gemini if using cli command 'gemini'
|
|
245
|
+
if (config.cliCommand === 'gemini' && !config.cliPromptPassingMethod) {
|
|
246
|
+
promptMethod = 'arg';
|
|
247
|
+
promptFlag = '-p';
|
|
248
|
+
}
|
|
249
|
+
|
|
250
|
+
this.provider = new CLIProvider({
|
|
251
|
+
command: config.cliCommand || 'gemini',
|
|
252
|
+
args: config.cliArgs,
|
|
253
|
+
promptPassingMethod: promptMethod,
|
|
254
|
+
promptFlag: promptFlag,
|
|
255
|
+
timeoutMs: config.timeoutMs,
|
|
256
|
+
maxRetries: config.maxRetries
|
|
257
|
+
});
|
|
258
|
+
break;
|
|
259
|
+
|
|
260
|
+
default:
|
|
261
|
+
throw new LLMError(`Unknown provider: ${config.provider}`, String(config.provider));
|
|
262
|
+
}
|
|
263
|
+
|
|
264
|
+
this.logger.info('LLM provider initialized', { provider: config.provider });
|
|
265
|
+
}
|
|
266
|
+
|
|
267
|
+
/**
|
|
268
|
+
* Ensure provider is initialized
|
|
269
|
+
*/
|
|
270
|
+
private ensureProvider(): void {
|
|
271
|
+
if (this.provider === null) {
|
|
272
|
+
throw new LLMError('LLM provider not initialized', 'unknown');
|
|
273
|
+
}
|
|
274
|
+
}
|
|
275
|
+
}
|
|
276
|
+
|
|
277
|
+
/**
|
|
278
|
+
* Create an LLM service from configuration
|
|
279
|
+
*
|
|
280
|
+
* @param config - Engine configuration
|
|
281
|
+
* @returns LLM service instance
|
|
282
|
+
*/
|
|
283
|
+
export function createLLMService(config: EngineConfig): LLMService {
|
|
284
|
+
return LLMService.fromConfig(config);
|
|
285
|
+
}
|
|
@@ -0,0 +1,146 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Base LLM Provider Interface
|
|
3
|
+
*
|
|
4
|
+
* @module llm/providers/base
|
|
5
|
+
*/
|
|
6
|
+
|
|
7
|
+
import type { LLMError } from '../../core/errors';
|
|
8
|
+
|
|
9
|
+
/**
|
|
10
|
+
* LLM message role
|
|
11
|
+
*/
|
|
12
|
+
export type MessageRole = 'system' | 'user' | 'assistant';
|
|
13
|
+
|
|
14
|
+
/**
|
|
15
|
+
* LLM message
|
|
16
|
+
*/
|
|
17
|
+
export interface LLMMessage {
|
|
18
|
+
role: MessageRole;
|
|
19
|
+
content: string;
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
/**
|
|
23
|
+
* LLM request options
|
|
24
|
+
*/
|
|
25
|
+
export interface LLMRequestOptions {
|
|
26
|
+
model?: string;
|
|
27
|
+
temperature?: number;
|
|
28
|
+
maxTokens?: number;
|
|
29
|
+
topP?: number;
|
|
30
|
+
frequencyPenalty?: number;
|
|
31
|
+
presencePenalty?: number;
|
|
32
|
+
stopSequences?: string[];
|
|
33
|
+
timeoutMs?: number;
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
/**
|
|
37
|
+
* LLM response
|
|
38
|
+
*/
|
|
39
|
+
export interface LLMResponse {
|
|
40
|
+
content: string;
|
|
41
|
+
model: string;
|
|
42
|
+
usage: {
|
|
43
|
+
promptTokens: number;
|
|
44
|
+
completionTokens: number;
|
|
45
|
+
totalTokens: number;
|
|
46
|
+
};
|
|
47
|
+
finishReason: string;
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
/**
|
|
51
|
+
* LLM stream chunk
|
|
52
|
+
*/
|
|
53
|
+
export interface LLMStreamChunk {
|
|
54
|
+
content: string;
|
|
55
|
+
isComplete: boolean;
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
/**
|
|
59
|
+
* Base LLM provider interface
|
|
60
|
+
*/
|
|
61
|
+
export interface ILLMProvider {
|
|
62
|
+
readonly name: string;
|
|
63
|
+
readonly availableModels: string[];
|
|
64
|
+
|
|
65
|
+
complete(messages: LLMMessage[], options?: LLMRequestOptions): Promise<LLMResponse>;
|
|
66
|
+
stream(messages: LLMMessage[], options?: LLMRequestOptions): AsyncGenerator<LLMStreamChunk>;
|
|
67
|
+
validateConfig(): boolean;
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
/**
|
|
71
|
+
* Base LLM provider class
|
|
72
|
+
*/
|
|
73
|
+
export abstract class BaseLLMProvider implements ILLMProvider {
|
|
74
|
+
abstract readonly name: string;
|
|
75
|
+
abstract readonly availableModels: string[];
|
|
76
|
+
|
|
77
|
+
protected apiKey: string;
|
|
78
|
+
protected baseUrl: string;
|
|
79
|
+
protected defaultModel: string;
|
|
80
|
+
protected timeoutMs: number;
|
|
81
|
+
protected maxRetries: number;
|
|
82
|
+
|
|
83
|
+
constructor(config: {
|
|
84
|
+
apiKey: string;
|
|
85
|
+
baseUrl: string;
|
|
86
|
+
defaultModel: string;
|
|
87
|
+
timeoutMs?: number;
|
|
88
|
+
maxRetries?: number;
|
|
89
|
+
}) {
|
|
90
|
+
this.apiKey = config.apiKey;
|
|
91
|
+
this.baseUrl = config.baseUrl;
|
|
92
|
+
this.defaultModel = config.defaultModel;
|
|
93
|
+
this.timeoutMs = config.timeoutMs ?? 30000;
|
|
94
|
+
this.maxRetries = config.maxRetries ?? 3;
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
abstract complete(
|
|
98
|
+
messages: LLMMessage[],
|
|
99
|
+
options?: LLMRequestOptions
|
|
100
|
+
): Promise<LLMResponse>;
|
|
101
|
+
|
|
102
|
+
abstract stream(
|
|
103
|
+
messages: LLMMessage[],
|
|
104
|
+
options?: LLMRequestOptions
|
|
105
|
+
): AsyncGenerator<LLMStreamChunk>;
|
|
106
|
+
|
|
107
|
+
validateConfig(): boolean {
|
|
108
|
+
return this.apiKey.length > 0 && this.baseUrl.length > 0;
|
|
109
|
+
}
|
|
110
|
+
|
|
111
|
+
protected getModel(options?: LLMRequestOptions): string {
|
|
112
|
+
return options?.model ?? this.defaultModel;
|
|
113
|
+
}
|
|
114
|
+
|
|
115
|
+
protected async withRetry<T>(
|
|
116
|
+
operation: () => Promise<T>,
|
|
117
|
+
retries = this.maxRetries
|
|
118
|
+
): Promise<T> {
|
|
119
|
+
let lastError: Error | undefined;
|
|
120
|
+
|
|
121
|
+
for (let i = 0; i <= retries; i++) {
|
|
122
|
+
try {
|
|
123
|
+
return await operation();
|
|
124
|
+
} catch (error) {
|
|
125
|
+
lastError = error as Error;
|
|
126
|
+
|
|
127
|
+
// Don't retry on certain errors
|
|
128
|
+
if (this.isNonRetryableError(error)) {
|
|
129
|
+
throw error;
|
|
130
|
+
}
|
|
131
|
+
|
|
132
|
+
if (i < retries) {
|
|
133
|
+
const delay = Math.pow(2, i) * 1000; // Exponential backoff
|
|
134
|
+
await new Promise(resolve => setTimeout(resolve, delay));
|
|
135
|
+
}
|
|
136
|
+
}
|
|
137
|
+
}
|
|
138
|
+
|
|
139
|
+
throw lastError;
|
|
140
|
+
}
|
|
141
|
+
|
|
142
|
+
protected isNonRetryableError(error: unknown): boolean {
|
|
143
|
+
// Override in subclasses for provider-specific logic
|
|
144
|
+
return false;
|
|
145
|
+
}
|
|
146
|
+
}
|
|
@@ -0,0 +1,186 @@
|
|
|
1
|
+
|
|
2
|
+
import { spawn } from 'child_process';
|
|
3
|
+
import { BaseLLMProvider, type LLMMessage, type LLMRequestOptions, type LLMResponse, type LLMStreamChunk } from './base';
|
|
4
|
+
import { LLMError } from '../../core/errors';
|
|
5
|
+
import { logger } from '../../utils/logger';
|
|
6
|
+
|
|
7
|
+
export interface CLIConfig {
|
|
8
|
+
command: string; // e.g., 'gemini', 'claude'
|
|
9
|
+
args?: string[];
|
|
10
|
+
promptPassingMethod?: 'stdin' | 'arg';
|
|
11
|
+
promptFlag?: string; // e.g. '-p'
|
|
12
|
+
timeoutMs?: number;
|
|
13
|
+
maxRetries?: number;
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
export class CLIProvider extends BaseLLMProvider {
|
|
17
|
+
readonly name = 'cli';
|
|
18
|
+
readonly availableModels = ['local-default']; // CLI usually handles the model selection internally or via args
|
|
19
|
+
private command: string;
|
|
20
|
+
private defaultArgs: string[];
|
|
21
|
+
private promptPassingMethod: 'stdin' | 'arg';
|
|
22
|
+
private promptFlag?: string;
|
|
23
|
+
|
|
24
|
+
constructor(config: CLIConfig) {
|
|
25
|
+
super({
|
|
26
|
+
apiKey: 'local-auth', // Placeholder for base class requirement
|
|
27
|
+
baseUrl: 'local', // Placeholder
|
|
28
|
+
defaultModel: 'default',
|
|
29
|
+
timeoutMs: config.timeoutMs,
|
|
30
|
+
maxRetries: config.maxRetries
|
|
31
|
+
});
|
|
32
|
+
this.command = config.command;
|
|
33
|
+
this.defaultArgs = config.args || [];
|
|
34
|
+
this.promptPassingMethod = config.promptPassingMethod || 'stdin';
|
|
35
|
+
this.promptFlag = config.promptFlag;
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
// Helper to extract the actual prompt from messages
|
|
39
|
+
// CLIs usually accept a single prompt string, not a full chat history JSON
|
|
40
|
+
private getPromptText(messages: LLMMessage[]): string {
|
|
41
|
+
// Strategy: Concatenate system prompt and user messages
|
|
42
|
+
return messages.map(m => {
|
|
43
|
+
if (m.role === 'system') return `System: ${m.content}\n`;
|
|
44
|
+
if (m.role === 'user') return `User: ${m.content}\n`;
|
|
45
|
+
return `Assistant: ${m.content}\n`;
|
|
46
|
+
}).join('\n') + "\nResponse:";
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
// Strategy for simple CLIs (like 'gemini "prompt"'):
|
|
50
|
+
// If the CLI accepts the prompt as the last argument
|
|
51
|
+
private getLatestPrompt(messages: LLMMessage[]): string {
|
|
52
|
+
const lastUserMsg = [...messages].reverse().find(m => m.role === 'user');
|
|
53
|
+
return lastUserMsg ? lastUserMsg.content : '';
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
async complete(
|
|
57
|
+
messages: LLMMessage[],
|
|
58
|
+
options: LLMRequestOptions = {}
|
|
59
|
+
): Promise<LLMResponse> {
|
|
60
|
+
return this.withRetry(async () => {
|
|
61
|
+
const prompt = this.getLatestPrompt(messages);
|
|
62
|
+
|
|
63
|
+
let args = [...this.defaultArgs];
|
|
64
|
+
|
|
65
|
+
// Handle Prompt passing logic
|
|
66
|
+
if (this.promptPassingMethod === 'arg') {
|
|
67
|
+
if (this.promptFlag) {
|
|
68
|
+
args.push(this.promptFlag);
|
|
69
|
+
}
|
|
70
|
+
args.push(prompt);
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
return new Promise((resolve, reject) => {
|
|
74
|
+
logger.debug(`Executing CLI command: ${this.command}`, { args, method: this.promptPassingMethod });
|
|
75
|
+
|
|
76
|
+
const child = spawn(this.command, args, {
|
|
77
|
+
env: { ...process.env },
|
|
78
|
+
shell: false,
|
|
79
|
+
stdio: ['pipe', 'pipe', 'pipe']
|
|
80
|
+
});
|
|
81
|
+
|
|
82
|
+
let stdout = '';
|
|
83
|
+
let stderr = '';
|
|
84
|
+
|
|
85
|
+
child.stdout.on('data', (data) => {
|
|
86
|
+
stdout += data.toString();
|
|
87
|
+
});
|
|
88
|
+
|
|
89
|
+
child.stderr.on('data', (data) => {
|
|
90
|
+
stderr += data.toString();
|
|
91
|
+
});
|
|
92
|
+
|
|
93
|
+
child.on('close', (code) => {
|
|
94
|
+
if (code !== 0) {
|
|
95
|
+
logger.error(`CLI execution failed`, { code, stderr });
|
|
96
|
+
reject(new LLMError(`CLI command failed with code ${code}: ${stderr}`, 'cli'));
|
|
97
|
+
} else {
|
|
98
|
+
resolve({
|
|
99
|
+
content: stdout.trim(),
|
|
100
|
+
model: 'cli-default',
|
|
101
|
+
usage: {
|
|
102
|
+
promptTokens: 0,
|
|
103
|
+
completionTokens: 0,
|
|
104
|
+
totalTokens: 0
|
|
105
|
+
},
|
|
106
|
+
finishReason: 'stop'
|
|
107
|
+
});
|
|
108
|
+
}
|
|
109
|
+
});
|
|
110
|
+
|
|
111
|
+
child.on('error', (err) => {
|
|
112
|
+
reject(new LLMError(`Failed to spawn CLI process: ${err.message}`, 'cli'));
|
|
113
|
+
});
|
|
114
|
+
|
|
115
|
+
// Only write to stdin if configured to do so
|
|
116
|
+
if (this.promptPassingMethod === 'stdin') {
|
|
117
|
+
child.stdin.write(prompt);
|
|
118
|
+
child.stdin.end();
|
|
119
|
+
} else {
|
|
120
|
+
// Even if passing by arg, we should end stdin to prevent hanging if process reads stdin
|
|
121
|
+
child.stdin.end();
|
|
122
|
+
}
|
|
123
|
+
});
|
|
124
|
+
});
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
async *stream(
|
|
128
|
+
messages: LLMMessage[],
|
|
129
|
+
options: LLMRequestOptions = {}
|
|
130
|
+
): AsyncGenerator<LLMStreamChunk> {
|
|
131
|
+
const prompt = this.getLatestPrompt(messages);
|
|
132
|
+
let args = [...this.defaultArgs];
|
|
133
|
+
|
|
134
|
+
if (this.promptPassingMethod === 'arg') {
|
|
135
|
+
if (this.promptFlag) {
|
|
136
|
+
args.push(this.promptFlag);
|
|
137
|
+
}
|
|
138
|
+
args.push(prompt);
|
|
139
|
+
}
|
|
140
|
+
|
|
141
|
+
const child = spawn(this.command, args, {
|
|
142
|
+
env: { ...process.env },
|
|
143
|
+
shell: false,
|
|
144
|
+
stdio: ['pipe', 'pipe', 'pipe']
|
|
145
|
+
});
|
|
146
|
+
|
|
147
|
+
if (this.promptPassingMethod === 'stdin') {
|
|
148
|
+
child.stdin.write(prompt);
|
|
149
|
+
child.stdin.end();
|
|
150
|
+
} else {
|
|
151
|
+
child.stdin.end();
|
|
152
|
+
}
|
|
153
|
+
|
|
154
|
+
// Use a manual iterator to handle backpressure properly would require
|
|
155
|
+
// implementing a ReadableStream-like behavior or simply yielding as we get data.
|
|
156
|
+
// For simplicity and effectiveness in this context, we'll iterate over the stdout stream.
|
|
157
|
+
|
|
158
|
+
try {
|
|
159
|
+
for await (const chunk of child.stdout) {
|
|
160
|
+
yield {
|
|
161
|
+
content: chunk.toString(),
|
|
162
|
+
isComplete: false
|
|
163
|
+
};
|
|
164
|
+
}
|
|
165
|
+
|
|
166
|
+
// Wait for process to exit to check for errors
|
|
167
|
+
await new Promise<void>((resolve, reject) => {
|
|
168
|
+
child.on('close', (code) => {
|
|
169
|
+
if (code !== 0) {
|
|
170
|
+
// We can capture stderr here if needed, but for stream we just end
|
|
171
|
+
// Ideally we would have collected stderr separately
|
|
172
|
+
reject(new LLMError(`CLI stream failed with code ${code}`, 'cli'));
|
|
173
|
+
} else {
|
|
174
|
+
resolve();
|
|
175
|
+
}
|
|
176
|
+
});
|
|
177
|
+
child.on('error', reject);
|
|
178
|
+
});
|
|
179
|
+
|
|
180
|
+
yield { content: '', isComplete: true };
|
|
181
|
+
|
|
182
|
+
} catch (error) {
|
|
183
|
+
throw new LLMError(`Stream error: ${(error as Error).message}`, 'cli');
|
|
184
|
+
}
|
|
185
|
+
}
|
|
186
|
+
}
|