@mandujs/core 0.8.3 → 0.9.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@mandujs/core",
3
- "version": "0.8.3",
3
+ "version": "0.9.1",
4
4
  "description": "Mandu Framework Core - Spec, Generator, Guard, Runtime",
5
5
  "type": "module",
6
6
  "main": "./src/index.ts",
@@ -45,5 +45,8 @@
45
45
  "react": ">=18.0.0",
46
46
  "react-dom": ">=18.0.0",
47
47
  "zod": ">=3.0.0"
48
+ },
49
+ "dependencies": {
50
+ "ollama": "^0.6.3"
48
51
  }
49
52
  }
@@ -0,0 +1,120 @@
1
+ /**
2
+ * Brain v0.1 - Base LLM Adapter Interface
3
+ *
4
+ * Defines the interface for LLM adapters.
5
+ * Brain works without LLM (template-based), LLM only improves suggestion quality.
6
+ */
7
+
8
+ import type {
9
+ AdapterConfig,
10
+ AdapterStatus,
11
+ ChatMessage,
12
+ CompletionOptions,
13
+ CompletionResult,
14
+ } from "../types";
15
+
16
+ /**
17
+ * Base LLM Adapter Interface
18
+ *
19
+ * Implementations:
20
+ * - OllamaAdapter: Local sLLM via Ollama
21
+ * - (Future) OpenAIAdapter, AnthropicAdapter, etc.
22
+ */
23
+ export interface LLMAdapter {
24
+ /**
25
+ * Adapter name (e.g., "ollama", "openai")
26
+ */
27
+ readonly name: string;
28
+
29
+ /**
30
+ * Check if the adapter is available and configured
31
+ */
32
+ checkStatus(): Promise<AdapterStatus>;
33
+
34
+ /**
35
+ * Complete a chat conversation
36
+ */
37
+ complete(
38
+ messages: ChatMessage[],
39
+ options?: CompletionOptions
40
+ ): Promise<CompletionResult>;
41
+
42
+ /**
43
+ * Generate a simple completion (convenience method)
44
+ */
45
+ generate(prompt: string, options?: CompletionOptions): Promise<string>;
46
+ }
47
+
48
+ /**
49
+ * Base adapter implementation with common functionality
50
+ */
51
+ export abstract class BaseLLMAdapter implements LLMAdapter {
52
+ abstract readonly name: string;
53
+ protected config: AdapterConfig;
54
+
55
+ constructor(config: AdapterConfig) {
56
+ this.config = config;
57
+ }
58
+
59
+ abstract checkStatus(): Promise<AdapterStatus>;
60
+ abstract complete(
61
+ messages: ChatMessage[],
62
+ options?: CompletionOptions
63
+ ): Promise<CompletionResult>;
64
+
65
+ /**
66
+ * Simple generation (wraps complete with a single user message)
67
+ */
68
+ async generate(prompt: string, options?: CompletionOptions): Promise<string> {
69
+ const result = await this.complete(
70
+ [{ role: "user", content: prompt }],
71
+ options
72
+ );
73
+ return result.content;
74
+ }
75
+
76
+ /**
77
+ * Get the configured model name
78
+ */
79
+ get model(): string {
80
+ return this.config.model;
81
+ }
82
+
83
+ /**
84
+ * Get the configured base URL
85
+ */
86
+ get baseUrl(): string {
87
+ return this.config.baseUrl;
88
+ }
89
+ }
90
+
91
+ /**
92
+ * No-op adapter for when LLM is not available
93
+ * Returns empty results, allowing Brain to fall back to template-based analysis
94
+ */
95
+ export class NoopAdapter implements LLMAdapter {
96
+ readonly name = "noop";
97
+
98
+ async checkStatus(): Promise<AdapterStatus> {
99
+ return {
100
+ available: false,
101
+ model: null,
102
+ error: "No LLM adapter configured",
103
+ };
104
+ }
105
+
106
+ async complete(): Promise<CompletionResult> {
107
+ return {
108
+ content: "",
109
+ usage: {
110
+ promptTokens: 0,
111
+ completionTokens: 0,
112
+ totalTokens: 0,
113
+ },
114
+ };
115
+ }
116
+
117
+ async generate(): Promise<string> {
118
+ return "";
119
+ }
120
+ }
@@ -0,0 +1,8 @@
1
+ /**
2
+ * Brain v0.1 - LLM Adapters
3
+ *
4
+ * Export all adapter implementations and utilities.
5
+ */
6
+
7
+ export * from "./base";
8
+ export * from "./ollama";
@@ -0,0 +1,235 @@
1
+ /**
2
+ * Brain v0.1 - Ollama LLM Adapter
3
+ *
4
+ * Default adapter for local sLLM via Ollama.
5
+ * Uses official ollama npm package for reliable API integration.
6
+ * Recommended models: ministral-3:3b, llama3.2, codellama, mistral
7
+ */
8
+
9
+ import { Ollama } from "ollama";
10
+ import { BaseLLMAdapter } from "./base";
11
+ import type {
12
+ AdapterConfig,
13
+ AdapterStatus,
14
+ ChatMessage,
15
+ CompletionOptions,
16
+ CompletionResult,
17
+ } from "../types";
18
+
19
+ /**
20
+ * Default Ollama configuration
21
+ *
22
+ * Ministral 3B: 저사양 PC에서도 동작하는 경량 모델
23
+ * - 2GB VRAM 이하에서도 CPU 모드로 동작
24
+ * - 코드 분석/제안에 충분한 성능
25
+ */
26
+ export const DEFAULT_OLLAMA_CONFIG: AdapterConfig = {
27
+ baseUrl: "http://localhost:11434",
28
+ model: "ministral-3:3b",
29
+ timeout: 30000, // 30 seconds
30
+ };
31
+
32
+ /**
33
+ * Ollama LLM Adapter
34
+ *
35
+ * Connects to a local Ollama instance for sLLM inference.
36
+ * Falls back gracefully if Ollama is not available.
37
+ */
38
+ export class OllamaAdapter extends BaseLLMAdapter {
39
+ readonly name = "ollama";
40
+ private client: Ollama;
41
+
42
+ constructor(config: Partial<AdapterConfig> = {}) {
43
+ super({
44
+ ...DEFAULT_OLLAMA_CONFIG,
45
+ ...config,
46
+ });
47
+
48
+ this.client = new Ollama({
49
+ host: this.baseUrl,
50
+ });
51
+ }
52
+
53
+ /**
54
+ * Check if Ollama is running and the model is available
55
+ */
56
+ async checkStatus(): Promise<AdapterStatus> {
57
+ try {
58
+ const response = await this.client.list();
59
+ const models = response.models || [];
60
+
61
+ // Check if configured model is available
62
+ const modelAvailable = models.some(
63
+ (m) =>
64
+ m.name === this.config.model ||
65
+ m.name.startsWith(`${this.config.model}:`)
66
+ );
67
+
68
+ if (!modelAvailable) {
69
+ // Check if any model is available
70
+ if (models.length > 0) {
71
+ return {
72
+ available: true,
73
+ model: models[0].name,
74
+ error: `Configured model '${this.config.model}' not found. Using '${models[0].name}' instead.`,
75
+ };
76
+ }
77
+
78
+ return {
79
+ available: false,
80
+ model: null,
81
+ error: `No models available. Run: ollama pull ${this.config.model}`,
82
+ };
83
+ }
84
+
85
+ return {
86
+ available: true,
87
+ model: this.config.model,
88
+ };
89
+ } catch (error) {
90
+ const errorMessage =
91
+ error instanceof Error ? error.message : "Unknown error";
92
+
93
+ // Check for common connection errors
94
+ if (
95
+ errorMessage.includes("ECONNREFUSED") ||
96
+ errorMessage.includes("fetch failed") ||
97
+ errorMessage.includes("Unable to connect")
98
+ ) {
99
+ return {
100
+ available: false,
101
+ model: null,
102
+ error: "Ollama is not running. Start with: ollama serve",
103
+ };
104
+ }
105
+
106
+ return {
107
+ available: false,
108
+ model: null,
109
+ error: `Ollama check failed: ${errorMessage}`,
110
+ };
111
+ }
112
+ }
113
+
114
+ /**
115
+ * Complete a chat conversation using Ollama's chat API
116
+ */
117
+ async complete(
118
+ messages: ChatMessage[],
119
+ options: CompletionOptions = {}
120
+ ): Promise<CompletionResult> {
121
+ const { temperature = 0.7, maxTokens = 2048 } = options;
122
+
123
+ try {
124
+ const response = await this.client.chat({
125
+ model: this.config.model,
126
+ messages: messages.map((m) => ({
127
+ role: m.role,
128
+ content: m.content,
129
+ })),
130
+ stream: false,
131
+ options: {
132
+ temperature,
133
+ num_predict: maxTokens,
134
+ },
135
+ });
136
+
137
+ return {
138
+ content: response.message?.content || "",
139
+ usage: {
140
+ promptTokens: response.prompt_eval_count || 0,
141
+ completionTokens: response.eval_count || 0,
142
+ totalTokens:
143
+ (response.prompt_eval_count || 0) + (response.eval_count || 0),
144
+ },
145
+ };
146
+ } catch (error) {
147
+ if (error instanceof Error && error.name === "AbortError") {
148
+ throw new Error("Ollama request timeout");
149
+ }
150
+ throw error;
151
+ }
152
+ }
153
+
154
+ /**
155
+ * Pull a model from Ollama registry with progress callback
156
+ */
157
+ async pullModel(
158
+ modelName?: string,
159
+ onProgress?: (status: string, completed?: number, total?: number) => void
160
+ ): Promise<{ success: boolean; error?: string }> {
161
+ const model = modelName ?? this.config.model;
162
+
163
+ try {
164
+ const stream = await this.client.pull({
165
+ model,
166
+ stream: true,
167
+ });
168
+
169
+ for await (const progress of stream) {
170
+ if (onProgress && progress.status) {
171
+ onProgress(
172
+ progress.status,
173
+ progress.completed,
174
+ progress.total
175
+ );
176
+ }
177
+ }
178
+
179
+ return { success: true };
180
+ } catch (error) {
181
+ return {
182
+ success: false,
183
+ error: error instanceof Error ? error.message : "Unknown error",
184
+ };
185
+ }
186
+ }
187
+
188
+ /**
189
+ * Check if Ollama server is reachable
190
+ */
191
+ async isServerRunning(): Promise<boolean> {
192
+ try {
193
+ await this.client.list();
194
+ return true;
195
+ } catch {
196
+ return false;
197
+ }
198
+ }
199
+
200
+ /**
201
+ * List all available models
202
+ */
203
+ async listModels(): Promise<string[]> {
204
+ try {
205
+ const response = await this.client.list();
206
+ return (response.models || []).map((m) => m.name);
207
+ } catch {
208
+ return [];
209
+ }
210
+ }
211
+
212
+ /**
213
+ * Generate embeddings for text
214
+ */
215
+ async embed(text: string, model?: string): Promise<number[] | null> {
216
+ try {
217
+ const response = await this.client.embed({
218
+ model: model ?? this.config.model,
219
+ input: text,
220
+ });
221
+ return response.embeddings?.[0] ?? null;
222
+ } catch {
223
+ return null;
224
+ }
225
+ }
226
+ }
227
+
228
+ /**
229
+ * Create an Ollama adapter with optional configuration
230
+ */
231
+ export function createOllamaAdapter(
232
+ config?: Partial<AdapterConfig>
233
+ ): OllamaAdapter {
234
+ return new OllamaAdapter(config);
235
+ }
@@ -0,0 +1,324 @@
1
+ /**
2
+ * Brain v0.1 - Main Brain Class
3
+ *
4
+ * Brain handles two responsibilities:
5
+ * 1. Doctor (error recovery): Guard failure analysis + minimal patch suggestions
6
+ * 2. Watch (error prevention): File change warnings (no blocking)
7
+ *
8
+ * Core Principles:
9
+ * - Works without LLM (template-based), LLM only improves suggestion quality
10
+ * - Never blocks operations - only warns and suggests
11
+ * - Brain failure doesn't affect Core functionality (isolation)
12
+ * - Auto-apply is disabled by default (experimental flag)
13
+ */
14
+
15
+ import type {
16
+ BrainConfig,
17
+ BrainPolicy,
18
+ EnvironmentInfo,
19
+ AdapterStatus,
20
+ } from "./types";
21
+ import { DEFAULT_BRAIN_POLICY } from "./types";
22
+ import { type LLMAdapter, NoopAdapter } from "./adapters/base";
23
+ import { createOllamaAdapter } from "./adapters/ollama";
24
+ import { SessionMemory, getSessionMemory } from "./memory";
25
+ import {
26
+ detectEnvironment,
27
+ shouldEnableBrain,
28
+ isolatedBrainExecution,
29
+ } from "./permissions";
30
+
31
+ /**
32
+ * Brain status
33
+ */
34
+ export interface BrainStatus {
35
+ /** Whether Brain is enabled */
36
+ enabled: boolean;
37
+ /** LLM adapter status */
38
+ adapter: AdapterStatus;
39
+ /** Environment info */
40
+ environment: EnvironmentInfo;
41
+ /** Memory status */
42
+ memory: {
43
+ hasData: boolean;
44
+ sessionDuration: number;
45
+ idleTime: number;
46
+ };
47
+ }
48
+
49
+ /**
50
+ * Brain initialization options
51
+ */
52
+ export interface BrainInitOptions {
53
+ /** Custom configuration */
54
+ config?: Partial<BrainConfig>;
55
+ /** Custom policy */
56
+ policy?: Partial<BrainPolicy>;
57
+ /** Custom adapter (for testing) */
58
+ adapter?: LLMAdapter;
59
+ }
60
+
61
+ /**
62
+ * Main Brain class
63
+ *
64
+ * Singleton pattern - use Brain.getInstance() to get the instance.
65
+ */
66
+ export class Brain {
67
+ private static instance: Brain | null = null;
68
+
69
+ private config: BrainConfig;
70
+ private policy: BrainPolicy;
71
+ private adapter: LLMAdapter;
72
+ private memory: SessionMemory;
73
+ private environment: EnvironmentInfo;
74
+ private _enabled: boolean;
75
+ private _initialized: boolean = false;
76
+
77
+ private constructor(options: BrainInitOptions = {}) {
78
+ // Detect environment
79
+ this.environment = detectEnvironment();
80
+
81
+ // Set up policy
82
+ this.policy = {
83
+ ...DEFAULT_BRAIN_POLICY,
84
+ ...options.policy,
85
+ };
86
+
87
+ // Set up config
88
+ this.config = {
89
+ enabled: true,
90
+ autoApply: false, // Disabled by default
91
+ maxRetries: 3,
92
+ watch: {
93
+ debounceMs: 300,
94
+ },
95
+ ...options.config,
96
+ };
97
+
98
+ // Set up adapter
99
+ if (options.adapter) {
100
+ this.adapter = options.adapter;
101
+ } else if (this.config.adapter) {
102
+ this.adapter = createOllamaAdapter(this.config.adapter);
103
+ } else {
104
+ // Default: Ollama with default settings
105
+ this.adapter = createOllamaAdapter();
106
+ }
107
+
108
+ // Get session memory
109
+ this.memory = getSessionMemory();
110
+
111
+ // Initially disabled until initialized
112
+ this._enabled = false;
113
+ }
114
+
115
+ /**
116
+ * Get the singleton Brain instance
117
+ */
118
+ static getInstance(options?: BrainInitOptions): Brain {
119
+ if (!Brain.instance) {
120
+ Brain.instance = new Brain(options);
121
+ }
122
+ return Brain.instance;
123
+ }
124
+
125
+ /**
126
+ * Reset the singleton instance (for testing)
127
+ */
128
+ static resetInstance(): void {
129
+ Brain.instance = null;
130
+ }
131
+
132
+ /**
133
+ * Initialize Brain (async operations like checking adapter)
134
+ */
135
+ async initialize(): Promise<boolean> {
136
+ if (this._initialized) {
137
+ return this._enabled;
138
+ }
139
+
140
+ // Check adapter status
141
+ const adapterStatus = await this.checkAdapterStatus();
142
+ this.environment.modelAvailable = adapterStatus.available;
143
+
144
+ // Determine if Brain should be enabled
145
+ this._enabled = shouldEnableBrain(this.policy, this.environment);
146
+
147
+ this._initialized = true;
148
+
149
+ return this._enabled;
150
+ }
151
+
152
+ /**
153
+ * Check if Brain is enabled
154
+ */
155
+ get enabled(): boolean {
156
+ return this._enabled;
157
+ }
158
+
159
+ /**
160
+ * Check if Brain has been initialized
161
+ */
162
+ get initialized(): boolean {
163
+ return this._initialized;
164
+ }
165
+
166
+ /**
167
+ * Get the current adapter
168
+ */
169
+ getAdapter(): LLMAdapter {
170
+ return this.adapter;
171
+ }
172
+
173
+ /**
174
+ * Get the session memory
175
+ */
176
+ getMemory(): SessionMemory {
177
+ return this.memory;
178
+ }
179
+
180
+ /**
181
+ * Check adapter status
182
+ */
183
+ async checkAdapterStatus(): Promise<AdapterStatus> {
184
+ const { result } = await isolatedBrainExecution(
185
+ () => this.adapter.checkStatus(),
186
+ { available: false, model: null, error: "Check failed" }
187
+ );
188
+ return result;
189
+ }
190
+
191
+ /**
192
+ * Get full Brain status
193
+ */
194
+ async getStatus(): Promise<BrainStatus> {
195
+ const adapterStatus = await this.checkAdapterStatus();
196
+
197
+ return {
198
+ enabled: this._enabled,
199
+ adapter: adapterStatus,
200
+ environment: this.environment,
201
+ memory: {
202
+ hasData: this.memory.hasData(),
203
+ sessionDuration: this.memory.getSessionDuration(),
204
+ idleTime: this.memory.getIdleTime(),
205
+ },
206
+ };
207
+ }
208
+
209
+ /**
210
+ * Check if LLM is available for enhanced analysis
211
+ */
212
+ async isLLMAvailable(): Promise<boolean> {
213
+ const status = await this.checkAdapterStatus();
214
+ return status.available;
215
+ }
216
+
217
+ /**
218
+ * Execute a Brain operation with isolation
219
+ *
220
+ * Wraps operations to ensure Brain failures don't affect Core.
221
+ */
222
+ async execute<T>(
223
+ operation: () => Promise<T>,
224
+ fallback: T
225
+ ): Promise<{ result: T; error?: Error }> {
226
+ if (!this._enabled) {
227
+ return { result: fallback };
228
+ }
229
+
230
+ return isolatedBrainExecution(operation, fallback);
231
+ }
232
+
233
+ /**
234
+ * Generate a completion using the LLM adapter
235
+ *
236
+ * Returns empty string if LLM is not available.
237
+ */
238
+ async generate(prompt: string): Promise<string> {
239
+ if (!this._enabled) {
240
+ return "";
241
+ }
242
+
243
+ const { result } = await isolatedBrainExecution(
244
+ () => this.adapter.generate(prompt),
245
+ ""
246
+ );
247
+
248
+ return result;
249
+ }
250
+
251
+ /**
252
+ * Disable Brain
253
+ */
254
+ disable(): void {
255
+ this._enabled = false;
256
+ }
257
+
258
+ /**
259
+ * Enable Brain (only if conditions allow)
260
+ */
261
+ enable(): boolean {
262
+ const canEnable = shouldEnableBrain(this.policy, this.environment);
263
+ if (canEnable) {
264
+ this._enabled = true;
265
+ }
266
+ return this._enabled;
267
+ }
268
+
269
+ /**
270
+ * Get configuration
271
+ */
272
+ getConfig(): Readonly<BrainConfig> {
273
+ return { ...this.config };
274
+ }
275
+
276
+ /**
277
+ * Get policy
278
+ */
279
+ getPolicy(): Readonly<BrainPolicy> {
280
+ return { ...this.policy };
281
+ }
282
+
283
+ /**
284
+ * Get environment info
285
+ */
286
+ getEnvironment(): Readonly<EnvironmentInfo> {
287
+ return { ...this.environment };
288
+ }
289
+ }
290
+
291
+ /**
292
+ * Get or create the Brain instance
293
+ *
294
+ * Convenience function for accessing the singleton.
295
+ */
296
+ export function getBrain(options?: BrainInitOptions): Brain {
297
+ return Brain.getInstance(options);
298
+ }
299
+
300
+ /**
301
+ * Initialize Brain and return its enabled status
302
+ *
303
+ * Convenience function for initialization.
304
+ */
305
+ export async function initializeBrain(
306
+ options?: BrainInitOptions
307
+ ): Promise<boolean> {
308
+ const brain = getBrain(options);
309
+ return brain.initialize();
310
+ }
311
+
312
+ /**
313
+ * Check if Brain is available and enabled
314
+ *
315
+ * Safe check that handles uninitialized state.
316
+ */
317
+ export function isBrainEnabled(): boolean {
318
+ try {
319
+ const brain = Brain.getInstance();
320
+ return brain.enabled;
321
+ } catch {
322
+ return false;
323
+ }
324
+ }