@mandujs/core 0.8.3 → 0.9.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@mandujs/core",
3
- "version": "0.8.3",
3
+ "version": "0.9.0",
4
4
  "description": "Mandu Framework Core - Spec, Generator, Guard, Runtime",
5
5
  "type": "module",
6
6
  "main": "./src/index.ts",
@@ -0,0 +1,120 @@
1
+ /**
2
+ * Brain v0.1 - Base LLM Adapter Interface
3
+ *
4
+ * Defines the interface for LLM adapters.
5
+ * Brain works without LLM (template-based), LLM only improves suggestion quality.
6
+ */
7
+
8
+ import type {
9
+ AdapterConfig,
10
+ AdapterStatus,
11
+ ChatMessage,
12
+ CompletionOptions,
13
+ CompletionResult,
14
+ } from "../types";
15
+
16
+ /**
17
+ * Base LLM Adapter Interface
18
+ *
19
+ * Implementations:
20
+ * - OllamaAdapter: Local sLLM via Ollama
21
+ * - (Future) OpenAIAdapter, AnthropicAdapter, etc.
22
+ */
23
+ export interface LLMAdapter {
24
+ /**
25
+ * Adapter name (e.g., "ollama", "openai")
26
+ */
27
+ readonly name: string;
28
+
29
+ /**
30
+ * Check if the adapter is available and configured
31
+ */
32
+ checkStatus(): Promise<AdapterStatus>;
33
+
34
+ /**
35
+ * Complete a chat conversation
36
+ */
37
+ complete(
38
+ messages: ChatMessage[],
39
+ options?: CompletionOptions
40
+ ): Promise<CompletionResult>;
41
+
42
+ /**
43
+ * Generate a simple completion (convenience method)
44
+ */
45
+ generate(prompt: string, options?: CompletionOptions): Promise<string>;
46
+ }
47
+
48
+ /**
49
+ * Base adapter implementation with common functionality
50
+ */
51
+ export abstract class BaseLLMAdapter implements LLMAdapter {
52
+ abstract readonly name: string;
53
+ protected config: AdapterConfig;
54
+
55
+ constructor(config: AdapterConfig) {
56
+ this.config = config;
57
+ }
58
+
59
+ abstract checkStatus(): Promise<AdapterStatus>;
60
+ abstract complete(
61
+ messages: ChatMessage[],
62
+ options?: CompletionOptions
63
+ ): Promise<CompletionResult>;
64
+
65
+ /**
66
+ * Simple generation (wraps complete with a single user message)
67
+ */
68
+ async generate(prompt: string, options?: CompletionOptions): Promise<string> {
69
+ const result = await this.complete(
70
+ [{ role: "user", content: prompt }],
71
+ options
72
+ );
73
+ return result.content;
74
+ }
75
+
76
+ /**
77
+ * Get the configured model name
78
+ */
79
+ get model(): string {
80
+ return this.config.model;
81
+ }
82
+
83
+ /**
84
+ * Get the configured base URL
85
+ */
86
+ get baseUrl(): string {
87
+ return this.config.baseUrl;
88
+ }
89
+ }
90
+
91
+ /**
92
+ * No-op adapter for when LLM is not available
93
+ * Returns empty results, allowing Brain to fall back to template-based analysis
94
+ */
95
+ export class NoopAdapter implements LLMAdapter {
96
+ readonly name = "noop";
97
+
98
+ async checkStatus(): Promise<AdapterStatus> {
99
+ return {
100
+ available: false,
101
+ model: null,
102
+ error: "No LLM adapter configured",
103
+ };
104
+ }
105
+
106
+ async complete(): Promise<CompletionResult> {
107
+ return {
108
+ content: "",
109
+ usage: {
110
+ promptTokens: 0,
111
+ completionTokens: 0,
112
+ totalTokens: 0,
113
+ },
114
+ };
115
+ }
116
+
117
+ async generate(): Promise<string> {
118
+ return "";
119
+ }
120
+ }
@@ -0,0 +1,8 @@
1
+ /**
2
+ * Brain v0.1 - LLM Adapters
3
+ *
4
+ * Export all adapter implementations and utilities.
5
+ */
6
+
7
+ export * from "./base";
8
+ export * from "./ollama";
@@ -0,0 +1,249 @@
1
+ /**
2
+ * Brain v0.1 - Ollama LLM Adapter
3
+ *
4
+ * Default adapter for local sLLM via Ollama.
5
+ * Recommended models: llama3.2, codellama, mistral
6
+ */
7
+
8
+ import { BaseLLMAdapter } from "./base";
9
+ import type {
10
+ AdapterConfig,
11
+ AdapterStatus,
12
+ ChatMessage,
13
+ CompletionOptions,
14
+ CompletionResult,
15
+ } from "../types";
16
+
17
+ /**
18
+ * Default Ollama configuration
19
+ *
20
+ * Ministral 3B: 저사양 PC에서도 동작하는 경량 모델
21
+ * - 2GB VRAM 이하에서도 CPU 모드로 동작
22
+ * - 코드 분석/제안에 충분한 성능
23
+ */
24
+ export const DEFAULT_OLLAMA_CONFIG: AdapterConfig = {
25
+ baseUrl: "http://localhost:11434",
26
+ model: "ministral-3:3b",
27
+ timeout: 30000, // 30 seconds
28
+ };
29
+
30
+ /**
31
+ * Ollama API response types
32
+ */
33
+ interface OllamaTagsResponse {
34
+ models: Array<{
35
+ name: string;
36
+ size: number;
37
+ modified_at: string;
38
+ }>;
39
+ }
40
+
41
+ interface OllamaChatResponse {
42
+ model: string;
43
+ message: {
44
+ role: string;
45
+ content: string;
46
+ };
47
+ done: boolean;
48
+ eval_count?: number;
49
+ prompt_eval_count?: number;
50
+ }
51
+
52
+ /**
53
+ * Ollama LLM Adapter
54
+ *
55
+ * Connects to a local Ollama instance for sLLM inference.
56
+ * Falls back gracefully if Ollama is not available.
57
+ */
58
+ export class OllamaAdapter extends BaseLLMAdapter {
59
+ readonly name = "ollama";
60
+
61
+ constructor(config: Partial<AdapterConfig> = {}) {
62
+ super({
63
+ ...DEFAULT_OLLAMA_CONFIG,
64
+ ...config,
65
+ });
66
+ }
67
+
68
+ /**
69
+ * Check if Ollama is running and the model is available
70
+ */
71
+ async checkStatus(): Promise<AdapterStatus> {
72
+ try {
73
+ const controller = new AbortController();
74
+ const timeoutId = setTimeout(
75
+ () => controller.abort(),
76
+ this.config.timeout ?? 5000
77
+ );
78
+
79
+ const response = await fetch(`${this.baseUrl}/api/tags`, {
80
+ signal: controller.signal,
81
+ });
82
+
83
+ clearTimeout(timeoutId);
84
+
85
+ if (!response.ok) {
86
+ return {
87
+ available: false,
88
+ model: null,
89
+ error: `Ollama API error: ${response.status}`,
90
+ };
91
+ }
92
+
93
+ const data = (await response.json()) as OllamaTagsResponse;
94
+ const models = data.models || [];
95
+
96
+ // Check if configured model is available
97
+ const modelAvailable = models.some(
98
+ (m) =>
99
+ m.name === this.config.model ||
100
+ m.name.startsWith(`${this.config.model}:`)
101
+ );
102
+
103
+ if (!modelAvailable) {
104
+ // Check if any model is available
105
+ if (models.length > 0) {
106
+ return {
107
+ available: true,
108
+ model: models[0].name,
109
+ error: `Configured model '${this.config.model}' not found. Using '${models[0].name}' instead.`,
110
+ };
111
+ }
112
+
113
+ return {
114
+ available: false,
115
+ model: null,
116
+ error: `No models available. Run: ollama pull ${this.config.model}`,
117
+ };
118
+ }
119
+
120
+ return {
121
+ available: true,
122
+ model: this.config.model,
123
+ };
124
+ } catch (error) {
125
+ if (error instanceof Error && error.name === "AbortError") {
126
+ return {
127
+ available: false,
128
+ model: null,
129
+ error: "Ollama connection timeout",
130
+ };
131
+ }
132
+
133
+ const errorMessage =
134
+ error instanceof Error ? error.message : "Unknown error";
135
+
136
+ // Check for common connection errors
137
+ if (
138
+ errorMessage.includes("ECONNREFUSED") ||
139
+ errorMessage.includes("fetch failed")
140
+ ) {
141
+ return {
142
+ available: false,
143
+ model: null,
144
+ error: "Ollama is not running. Start with: ollama serve",
145
+ };
146
+ }
147
+
148
+ return {
149
+ available: false,
150
+ model: null,
151
+ error: `Ollama check failed: ${errorMessage}`,
152
+ };
153
+ }
154
+ }
155
+
156
+ /**
157
+ * Complete a chat conversation using Ollama's chat API
158
+ */
159
+ async complete(
160
+ messages: ChatMessage[],
161
+ options: CompletionOptions = {}
162
+ ): Promise<CompletionResult> {
163
+ const { temperature = 0.7, maxTokens = 2048 } = options;
164
+
165
+ try {
166
+ const controller = new AbortController();
167
+ const timeoutId = setTimeout(
168
+ () => controller.abort(),
169
+ this.config.timeout ?? 30000
170
+ );
171
+
172
+ const response = await fetch(`${this.baseUrl}/api/chat`, {
173
+ method: "POST",
174
+ headers: {
175
+ "Content-Type": "application/json",
176
+ },
177
+ body: JSON.stringify({
178
+ model: this.config.model,
179
+ messages: messages.map((m) => ({
180
+ role: m.role,
181
+ content: m.content,
182
+ })),
183
+ stream: false,
184
+ options: {
185
+ temperature,
186
+ num_predict: maxTokens,
187
+ },
188
+ }),
189
+ signal: controller.signal,
190
+ });
191
+
192
+ clearTimeout(timeoutId);
193
+
194
+ if (!response.ok) {
195
+ const errorText = await response.text();
196
+ throw new Error(`Ollama API error: ${response.status} - ${errorText}`);
197
+ }
198
+
199
+ const data = (await response.json()) as OllamaChatResponse;
200
+
201
+ return {
202
+ content: data.message?.content || "",
203
+ usage: {
204
+ promptTokens: data.prompt_eval_count || 0,
205
+ completionTokens: data.eval_count || 0,
206
+ totalTokens: (data.prompt_eval_count || 0) + (data.eval_count || 0),
207
+ },
208
+ };
209
+ } catch (error) {
210
+ if (error instanceof Error && error.name === "AbortError") {
211
+ throw new Error("Ollama request timeout");
212
+ }
213
+ throw error;
214
+ }
215
+ }
216
+
217
+ /**
218
+ * Pull a model from Ollama registry
219
+ */
220
+ async pullModel(modelName?: string): Promise<boolean> {
221
+ const model = modelName ?? this.config.model;
222
+
223
+ try {
224
+ const response = await fetch(`${this.baseUrl}/api/pull`, {
225
+ method: "POST",
226
+ headers: {
227
+ "Content-Type": "application/json",
228
+ },
229
+ body: JSON.stringify({
230
+ name: model,
231
+ stream: false,
232
+ }),
233
+ });
234
+
235
+ return response.ok;
236
+ } catch {
237
+ return false;
238
+ }
239
+ }
240
+ }
241
+
242
+ /**
243
+ * Create an Ollama adapter with optional configuration
244
+ */
245
+ export function createOllamaAdapter(
246
+ config?: Partial<AdapterConfig>
247
+ ): OllamaAdapter {
248
+ return new OllamaAdapter(config);
249
+ }
@@ -0,0 +1,324 @@
1
+ /**
2
+ * Brain v0.1 - Main Brain Class
3
+ *
4
+ * Brain handles two responsibilities:
5
+ * 1. Doctor (error recovery): Guard failure analysis + minimal patch suggestions
6
+ * 2. Watch (error prevention): File change warnings (no blocking)
7
+ *
8
+ * Core Principles:
9
+ * - Works without LLM (template-based), LLM only improves suggestion quality
10
+ * - Never blocks operations - only warns and suggests
11
+ * - Brain failure doesn't affect Core functionality (isolation)
12
+ * - Auto-apply is disabled by default (experimental flag)
13
+ */
14
+
15
+ import type {
16
+ BrainConfig,
17
+ BrainPolicy,
18
+ EnvironmentInfo,
19
+ AdapterStatus,
20
+ } from "./types";
21
+ import { DEFAULT_BRAIN_POLICY } from "./types";
22
+ import { type LLMAdapter, NoopAdapter } from "./adapters/base";
23
+ import { createOllamaAdapter } from "./adapters/ollama";
24
+ import { SessionMemory, getSessionMemory } from "./memory";
25
+ import {
26
+ detectEnvironment,
27
+ shouldEnableBrain,
28
+ isolatedBrainExecution,
29
+ } from "./permissions";
30
+
31
+ /**
32
+ * Brain status
33
+ */
34
+ export interface BrainStatus {
35
+ /** Whether Brain is enabled */
36
+ enabled: boolean;
37
+ /** LLM adapter status */
38
+ adapter: AdapterStatus;
39
+ /** Environment info */
40
+ environment: EnvironmentInfo;
41
+ /** Memory status */
42
+ memory: {
43
+ hasData: boolean;
44
+ sessionDuration: number;
45
+ idleTime: number;
46
+ };
47
+ }
48
+
49
+ /**
50
+ * Brain initialization options
51
+ */
52
+ export interface BrainInitOptions {
53
+ /** Custom configuration */
54
+ config?: Partial<BrainConfig>;
55
+ /** Custom policy */
56
+ policy?: Partial<BrainPolicy>;
57
+ /** Custom adapter (for testing) */
58
+ adapter?: LLMAdapter;
59
+ }
60
+
61
+ /**
62
+ * Main Brain class
63
+ *
64
+ * Singleton pattern - use Brain.getInstance() to get the instance.
65
+ */
66
+ export class Brain {
67
+ private static instance: Brain | null = null;
68
+
69
+ private config: BrainConfig;
70
+ private policy: BrainPolicy;
71
+ private adapter: LLMAdapter;
72
+ private memory: SessionMemory;
73
+ private environment: EnvironmentInfo;
74
+ private _enabled: boolean;
75
+ private _initialized: boolean = false;
76
+
77
+ private constructor(options: BrainInitOptions = {}) {
78
+ // Detect environment
79
+ this.environment = detectEnvironment();
80
+
81
+ // Set up policy
82
+ this.policy = {
83
+ ...DEFAULT_BRAIN_POLICY,
84
+ ...options.policy,
85
+ };
86
+
87
+ // Set up config
88
+ this.config = {
89
+ enabled: true,
90
+ autoApply: false, // Disabled by default
91
+ maxRetries: 3,
92
+ watch: {
93
+ debounceMs: 300,
94
+ },
95
+ ...options.config,
96
+ };
97
+
98
+ // Set up adapter
99
+ if (options.adapter) {
100
+ this.adapter = options.adapter;
101
+ } else if (this.config.adapter) {
102
+ this.adapter = createOllamaAdapter(this.config.adapter);
103
+ } else {
104
+ // Default: Ollama with default settings
105
+ this.adapter = createOllamaAdapter();
106
+ }
107
+
108
+ // Get session memory
109
+ this.memory = getSessionMemory();
110
+
111
+ // Initially disabled until initialized
112
+ this._enabled = false;
113
+ }
114
+
115
+ /**
116
+ * Get the singleton Brain instance
117
+ */
118
+ static getInstance(options?: BrainInitOptions): Brain {
119
+ if (!Brain.instance) {
120
+ Brain.instance = new Brain(options);
121
+ }
122
+ return Brain.instance;
123
+ }
124
+
125
+ /**
126
+ * Reset the singleton instance (for testing)
127
+ */
128
+ static resetInstance(): void {
129
+ Brain.instance = null;
130
+ }
131
+
132
+ /**
133
+ * Initialize Brain (async operations like checking adapter)
134
+ */
135
+ async initialize(): Promise<boolean> {
136
+ if (this._initialized) {
137
+ return this._enabled;
138
+ }
139
+
140
+ // Check adapter status
141
+ const adapterStatus = await this.checkAdapterStatus();
142
+ this.environment.modelAvailable = adapterStatus.available;
143
+
144
+ // Determine if Brain should be enabled
145
+ this._enabled = shouldEnableBrain(this.policy, this.environment);
146
+
147
+ this._initialized = true;
148
+
149
+ return this._enabled;
150
+ }
151
+
152
+ /**
153
+ * Check if Brain is enabled
154
+ */
155
+ get enabled(): boolean {
156
+ return this._enabled;
157
+ }
158
+
159
+ /**
160
+ * Check if Brain has been initialized
161
+ */
162
+ get initialized(): boolean {
163
+ return this._initialized;
164
+ }
165
+
166
+ /**
167
+ * Get the current adapter
168
+ */
169
+ getAdapter(): LLMAdapter {
170
+ return this.adapter;
171
+ }
172
+
173
+ /**
174
+ * Get the session memory
175
+ */
176
+ getMemory(): SessionMemory {
177
+ return this.memory;
178
+ }
179
+
180
+ /**
181
+ * Check adapter status
182
+ */
183
+ async checkAdapterStatus(): Promise<AdapterStatus> {
184
+ const { result } = await isolatedBrainExecution(
185
+ () => this.adapter.checkStatus(),
186
+ { available: false, model: null, error: "Check failed" }
187
+ );
188
+ return result;
189
+ }
190
+
191
+ /**
192
+ * Get full Brain status
193
+ */
194
+ async getStatus(): Promise<BrainStatus> {
195
+ const adapterStatus = await this.checkAdapterStatus();
196
+
197
+ return {
198
+ enabled: this._enabled,
199
+ adapter: adapterStatus,
200
+ environment: this.environment,
201
+ memory: {
202
+ hasData: this.memory.hasData(),
203
+ sessionDuration: this.memory.getSessionDuration(),
204
+ idleTime: this.memory.getIdleTime(),
205
+ },
206
+ };
207
+ }
208
+
209
+ /**
210
+ * Check if LLM is available for enhanced analysis
211
+ */
212
+ async isLLMAvailable(): Promise<boolean> {
213
+ const status = await this.checkAdapterStatus();
214
+ return status.available;
215
+ }
216
+
217
+ /**
218
+ * Execute a Brain operation with isolation
219
+ *
220
+ * Wraps operations to ensure Brain failures don't affect Core.
221
+ */
222
+ async execute<T>(
223
+ operation: () => Promise<T>,
224
+ fallback: T
225
+ ): Promise<{ result: T; error?: Error }> {
226
+ if (!this._enabled) {
227
+ return { result: fallback };
228
+ }
229
+
230
+ return isolatedBrainExecution(operation, fallback);
231
+ }
232
+
233
+ /**
234
+ * Generate a completion using the LLM adapter
235
+ *
236
+ * Returns empty string if LLM is not available.
237
+ */
238
+ async generate(prompt: string): Promise<string> {
239
+ if (!this._enabled) {
240
+ return "";
241
+ }
242
+
243
+ const { result } = await isolatedBrainExecution(
244
+ () => this.adapter.generate(prompt),
245
+ ""
246
+ );
247
+
248
+ return result;
249
+ }
250
+
251
+ /**
252
+ * Disable Brain
253
+ */
254
+ disable(): void {
255
+ this._enabled = false;
256
+ }
257
+
258
+ /**
259
+ * Enable Brain (only if conditions allow)
260
+ */
261
+ enable(): boolean {
262
+ const canEnable = shouldEnableBrain(this.policy, this.environment);
263
+ if (canEnable) {
264
+ this._enabled = true;
265
+ }
266
+ return this._enabled;
267
+ }
268
+
269
+ /**
270
+ * Get configuration
271
+ */
272
+ getConfig(): Readonly<BrainConfig> {
273
+ return { ...this.config };
274
+ }
275
+
276
+ /**
277
+ * Get policy
278
+ */
279
+ getPolicy(): Readonly<BrainPolicy> {
280
+ return { ...this.policy };
281
+ }
282
+
283
+ /**
284
+ * Get environment info
285
+ */
286
+ getEnvironment(): Readonly<EnvironmentInfo> {
287
+ return { ...this.environment };
288
+ }
289
+ }
290
+
291
+ /**
292
+ * Get or create the Brain instance
293
+ *
294
+ * Convenience function for accessing the singleton.
295
+ */
296
+ export function getBrain(options?: BrainInitOptions): Brain {
297
+ return Brain.getInstance(options);
298
+ }
299
+
300
+ /**
301
+ * Initialize Brain and return its enabled status
302
+ *
303
+ * Convenience function for initialization.
304
+ */
305
+ export async function initializeBrain(
306
+ options?: BrainInitOptions
307
+ ): Promise<boolean> {
308
+ const brain = getBrain(options);
309
+ return brain.initialize();
310
+ }
311
+
312
+ /**
313
+ * Check if Brain is available and enabled
314
+ *
315
+ * Safe check that handles uninitialized state.
316
+ */
317
+ export function isBrainEnabled(): boolean {
318
+ try {
319
+ const brain = Brain.getInstance();
320
+ return brain.enabled;
321
+ } catch {
322
+ return false;
323
+ }
324
+ }