@mtkn/mega-agent 0.1.0 → 0.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md ADDED
@@ -0,0 +1,564 @@
1
+ # @mtkn/mega-agent
2
+
3
+ [![npm version](https://img.shields.io/npm/v/@mtkn/mega-agent.svg)](https://www.npmjs.com/package/@mtkn/mega-agent)
4
+ [![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT)
5
+
6
+ AI-powered agent infrastructure for Express + Prisma backends. Drop-in chat API with tool calling, long-term memory, incident detection, workflow automation, and more.
7
+
8
+ ## Features
9
+
10
+ - **Multi-provider LLM** — Anthropic, OpenAI, Gemini, or custom endpoints
11
+ - **Tool calling** with read/write approval workflow
12
+ - **Long-term memory** — embedding + Qdrant vector search (optional)
13
+ - **Chat API** with SSE streaming and 4 chat modes
14
+ - **Incident detection** — anomaly detection with severity levels
15
+ - **Workflow automation** — schedule, event, threshold, and manual triggers
16
+ - **Artifact storage** — reports, tables, charts, code, images, datasets
17
+ - **MCP support** — Model Context Protocol for external data sources
18
+ - **Prisma persistence** — 12 models, fully typed
19
+
20
+ ## Quick Start
21
+
22
+ ```bash
23
+ npm install @mtkn/mega-agent
24
+ ```
25
+
26
+ ```typescript
27
+ import { createMegaAgent } from '@mtkn/mega-agent';
28
+
29
+ const agent = await createMegaAgent({
30
+ prisma,
31
+ logger,
32
+ llm: {
33
+ providers: [
34
+ { provider: 'anthropic', apiKey: 'sk-ant-...', defaultModel: 'claude-sonnet-4-20250514' },
35
+ ],
36
+ },
37
+ });
38
+
39
+ app.use('/api/chat', agent.createChatRouter(authMiddleware));
40
+ ```
41
+
42
+ ## Requirements
43
+
44
+ | Requirement | Version |
45
+ |---|---|
46
+ | Node.js | >= 18.17 |
47
+ | TypeScript | 5.x |
48
+ | Express | 4.x |
49
+ | Prisma | 6.x |
50
+ | PostgreSQL | 14+ |
51
+
52
+ **Optional services (for memory system):**
53
+
54
+ | Service | Purpose |
55
+ |---|---|
56
+ | Qdrant | Vector database for semantic search |
57
+ | OpenAI API | Embedding model (`text-embedding-3-small`) |
58
+ | Redis + BullMQ | Job queue (for workflows) |
59
+
60
+ ## Installation
61
+
62
+ ```bash
63
+ npm install @mtkn/mega-agent
64
+
65
+ # Peer dependencies (should already be in your project)
66
+ npm install @prisma/client express
67
+ npm install -D prisma @types/express typescript
68
+ ```
69
+
70
+ ## Prisma Schema Setup
71
+
72
+ The package requires 16 enums and 12 models in your Prisma schema.
73
+
74
+ ### Option A: Automatic Sync (Recommended)
75
+
76
+ ```bash
77
+ npx mega-agent-prisma-sync --target prisma/schema.prisma
78
+ ```
79
+
80
+ This copies all mega-agent enums and models into your schema between marker comments. Running it again updates only the marked section.
81
+
82
+ ### Option B: Manual Copy
83
+
84
+ Copy all enums and models from the reference schema:
85
+
86
+ ```bash
87
+ # View the reference schema
88
+ cat node_modules/@mtkn/mega-agent/prisma/schema.prisma
89
+ ```
90
+
91
+ ### Add User Relations
92
+
93
+ Each AI model has a `userId` field. Add relations in your `User` model:
94
+
95
+ ```prisma
96
+ model User {
97
+ // ... your existing fields ...
98
+
99
+ // AI Agent relations
100
+ aiMemories AiMemory[]
101
+ aiMemorySuggestions AiMemorySuggestion[]
102
+ aiChats AiChat[]
103
+ aiArtifacts AiArtifact[]
104
+ aiMcpSources AiMcpSource[]
105
+ aiWorkflows AiWorkflow[]
106
+ aiIncidents AiIncident[]
107
+ aiTimelineEvents AiTimelineEvent[]
108
+ aiReports AiReport[]
109
+ aiPreferences AiUserPreferences?
110
+ }
111
+ ```
112
+
113
+ And add the reverse relation in each AI model:
114
+
115
+ ```prisma
116
+ user User @relation(fields: [userId], references: [id], onDelete: Cascade)
117
+ ```
118
+
119
+ ### Add Notification Model
120
+
121
+ The workflow service uses `prisma.notification.create()`. Your schema needs:
122
+
123
+ ```prisma
124
+ model Notification {
125
+ id Int @id @default(autoincrement())
126
+ userId Int @map("user_id")
127
+ type String
128
+ title String
129
+ message String
130
+ isRead Boolean @default(false) @map("is_read")
131
+ createdAt DateTime @default(now()) @map("created_at")
132
+ user User @relation(fields: [userId], references: [id], onDelete: Cascade)
133
+ @@map("notifications")
134
+ }
135
+ ```
136
+
137
+ ### Run Migration
138
+
139
+ ```bash
140
+ npx prisma migrate dev --name add_mega_agent_models
141
+ npx prisma generate
142
+ ```
143
+
144
+ ## Configuration
145
+
146
+ ```typescript
147
+ interface MegaAgentConfig {
148
+ /** Prisma client instance */
149
+ prisma: PrismaClient;
150
+
151
+ /** Logger with info/error/warn/debug methods */
152
+ logger: LoggerLike;
153
+
154
+ /** LLM provider configs (at least 1 required) */
155
+ llm: {
156
+ providers: Array<{
157
+ provider: 'anthropic' | 'openai' | 'gemini' | 'custom';
158
+ apiKey: string;
159
+ defaultModel?: string;
160
+ baseUrl?: string; // only for 'custom'
161
+ }>;
162
+ };
163
+
164
+ /** Memory system — omit to disable */
165
+ memory?: {
166
+ embedding: {
167
+ apiKey: string;
168
+ provider?: 'openai'; // default: 'openai'
169
+ defaultModel?: string; // default: 'text-embedding-3-small'
170
+ };
171
+ qdrant: {
172
+ host: string; // default: 'localhost'
173
+ port: number; // default: 6333
174
+ apiKey?: string;
175
+ };
176
+ };
177
+
178
+ /** Agent identity (for system prompt customization) */
179
+ agent?: {
180
+ agentName?: string;
181
+ agentDescription?: string;
182
+ customPrompt?: string;
183
+ };
184
+ }
185
+ ```
186
+
187
+ ## Usage
188
+
189
+ ### Basic Setup
190
+
191
+ ```typescript
192
+ import type { Express } from 'express';
193
+ import { createMegaAgent } from '@mtkn/mega-agent';
194
+ import { prisma } from './lib/prisma-client.js';
195
+ import { logger } from './lib/logger.js';
196
+ import { authMiddleware } from './middleware/auth.js';
197
+
198
+ export async function initChatModule(app: Express): Promise<void> {
199
+ const providers = [];
200
+
201
+ if (process.env.ANTHROPIC_API_KEY) {
202
+ providers.push({
203
+ provider: 'anthropic' as const,
204
+ apiKey: process.env.ANTHROPIC_API_KEY,
205
+ defaultModel: process.env.ANTHROPIC_DEFAULT_MODEL || 'claude-sonnet-4-20250514',
206
+ });
207
+ }
208
+
209
+ if (process.env.OPENAI_API_KEY) {
210
+ providers.push({
211
+ provider: 'openai' as const,
212
+ apiKey: process.env.OPENAI_API_KEY,
213
+ defaultModel: process.env.OPENAI_DEFAULT_MODEL || 'gpt-4o',
214
+ });
215
+ }
216
+
217
+ // Memory config (optional — requires OpenAI key + Qdrant)
218
+ const memory = process.env.OPENAI_API_KEY
219
+ ? {
220
+ embedding: {
221
+ apiKey: process.env.OPENAI_API_KEY,
222
+ provider: 'openai' as const,
223
+ defaultModel: process.env.EMBEDDING_MODEL || 'text-embedding-3-small',
224
+ },
225
+ qdrant: {
226
+ host: process.env.QDRANT_HOST || 'localhost',
227
+ port: parseInt(process.env.QDRANT_PORT || '6333', 10),
228
+ apiKey: process.env.QDRANT_API_KEY || undefined,
229
+ },
230
+ }
231
+ : undefined;
232
+
233
+ const agent = await createMegaAgent({
234
+ prisma,
235
+ logger,
236
+ llm: { providers },
237
+ memory,
238
+ agent: {
239
+ agentName: 'ProductionAssistant',
240
+ agentDescription: 'Manufacturing management AI assistant',
241
+ },
242
+ });
243
+
244
+ // Register domain-specific tools (see below)
245
+ registerMyTools();
246
+
247
+ // Mount chat routes
248
+ app.use('/api/chat', agent.createChatRouter(authMiddleware));
249
+ }
250
+ ```
251
+
252
+ ### Auth Middleware
253
+
254
+ The chat router requires an auth middleware that sets `req.user` with at least an `id` field:
255
+
256
+ ```typescript
257
+ type AuthMiddleware = (req: Request, res: Response, next: NextFunction) => void;
258
+
259
+ // Your middleware must set:
260
+ // req.user = { id: number, ... }
261
+ ```
262
+
263
+ ### Registering Domain Tools
264
+
265
+ Tools give the AI agent access to your application's data and actions. The package comes with 2 built-in tools (`search_memory`, `create_memory`) when memory is enabled. Register your own domain tools after calling `createMegaAgent()`:
266
+
267
+ ```typescript
268
+ import { registerTools } from '@mtkn/mega-agent';
269
+ import type { ToolDefinition } from '@mtkn/mega-agent';
270
+
271
+ const tools: ToolDefinition[] = [
272
+ // Read tools — auto-executed, no user approval needed
273
+ {
274
+ name: 'list_products',
275
+ description: 'List products with optional status filter',
276
+ category: 'read',
277
+ inputSchema: {
278
+ type: 'object',
279
+ properties: {
280
+ status: { type: 'string', description: 'Filter by status', enum: ['ACTIVE', 'ARCHIVED'] },
281
+ limit: { type: 'number', description: 'Max results (default 20)' },
282
+ },
283
+ },
284
+ handler: async (args, userId) => {
285
+ return prisma.product.findMany({
286
+ where: args.status ? { status: args.status as string } : undefined,
287
+ take: (args.limit as number) || 20,
288
+ });
289
+ },
290
+ },
291
+
292
+ // Write tools — require user approval before execution
293
+ {
294
+ name: 'create_order',
295
+ description: 'Create a new order. Requires approval.',
296
+ category: 'write',
297
+ inputSchema: {
298
+ type: 'object',
299
+ properties: {
300
+ productId: { type: 'number', description: 'Product ID' },
301
+ quantity: { type: 'number', description: 'Order quantity' },
302
+ },
303
+ required: ['productId', 'quantity'],
304
+ },
305
+ handler: async (args, userId) => {
306
+ return prisma.order.create({
307
+ data: {
308
+ productId: args.productId as number,
309
+ quantity: args.quantity as number,
310
+ userId,
311
+ },
312
+ });
313
+ },
314
+ },
315
+ ];
316
+
317
+ export function registerMyTools(): void {
318
+ registerTools(tools);
319
+ }
320
+ ```
321
+
322
+ **Tool Categories:**
323
+ - `read` — Auto-executed by the agent, no approval needed
324
+ - `write` — Agent pauses and waits for user approval via `POST /:id/approve`
325
+
326
+ ### Using Services Independently
327
+
328
+ You can use individual services without the full chat stack:
329
+
330
+ ```typescript
331
+ import { initLLMService, LLMService } from '@mtkn/mega-agent';
332
+ import type { LLMProviderConfig } from '@mtkn/mega-agent';
333
+
334
+ // Initialize the LLM service once
335
+ const providers: LLMProviderConfig[] = [
336
+ { provider: 'anthropic', apiKey: process.env.ANTHROPIC_API_KEY!, defaultModel: 'claude-sonnet-4-20250514' },
337
+ ];
338
+ initLLMService({ config: providers, logger });
339
+
340
+ // Use it anywhere
341
+ const llm = new LLMService();
342
+ const response = await llm.complete({
343
+ messages: [
344
+ { role: 'system', content: 'You are a risk analyst.' },
345
+ { role: 'user', content: 'Analyze this production data...' },
346
+ ],
347
+ temperature: 0.3,
348
+ });
349
+ ```
350
+
351
+ Other services (after `createMegaAgent()` has been called):
352
+
353
+ ```typescript
354
+ import {
355
+ IncidentService, getIncidentServiceDeps,
356
+ WorkflowService, getWorkflowServiceDeps,
357
+ ArtifactService, getArtifactServiceDeps,
358
+ } from '@mtkn/mega-agent';
359
+
360
+ // Check if dependencies are initialized
361
+ if (getIncidentServiceDeps()) {
362
+ const incidents = new IncidentService();
363
+ // Use incident detection...
364
+ }
365
+
366
+ if (getWorkflowServiceDeps()) {
367
+ const workflows = new WorkflowService();
368
+ // Use workflow automation...
369
+ }
370
+ ```
371
+
372
+ ## API Endpoints
373
+
374
+ The chat router creates the following endpoints:
375
+
376
+ | Method | Path | Description |
377
+ |--------|------|-------------|
378
+ | `POST` | `/` | Create a new chat session |
379
+ | `GET` | `/` | List user's chats (paginated) |
380
+ | `GET` | `/:id` | Get chat with full message history |
381
+ | `PATCH` | `/:id` | Update chat (title, mode, model, etc.) |
382
+ | `DELETE` | `/:id` | Delete chat and all messages |
383
+ | `POST` | `/:id/messages` | Send message (agent responds) |
384
+ | `POST` | `/:id/approve` | Approve a pending write tool call |
385
+ | `POST` | `/:id/reject` | Reject a pending write tool call |
386
+
387
+ ### Send Message
388
+
389
+ ```bash
390
+ POST /api/chat/:id/messages
391
+ Content-Type: application/json
392
+
393
+ {
394
+ "content": "Show me today's production stats",
395
+ "model": "claude-sonnet-4-20250514", // optional override
396
+ "stream": true // optional SSE streaming
397
+ }
398
+ ```
399
+
400
+ **Response (non-streaming):**
401
+
402
+ ```json
403
+ {
404
+ "success": true,
405
+ "data": {
406
+ "userMessage": { "id": "...", "role": "USER", "content": "..." },
407
+ "assistantMessage": { "id": "...", "role": "ASSISTANT", "content": "..." },
408
+ "memories": [],
409
+ "tokens": { "prompt": 1200, "completion": 350, "total": 1550 },
410
+ "latencyMs": 2340
411
+ }
412
+ }
413
+ ```
414
+
415
+ **SSE Streaming:**
416
+
417
+ When `stream: true`, the response uses Server-Sent Events:
418
+
419
+ ```
420
+ Content-Type: text/event-stream
421
+
422
+ data: {"type":"chunk","content":"Here are"}
423
+ data: {"type":"chunk","content":" today's stats..."}
424
+ data: {"type":"tool_call","name":"get_dashboard","args":{}}
425
+ data: {"type":"done","tokens":{"prompt":1200,"completion":350}}
426
+ ```
427
+
428
+ ## Chat Modes
429
+
430
+ Each chat session has a mode that shapes the agent's behavior:
431
+
432
+ | Mode | Description |
433
+ |------|-------------|
434
+ | `EXPLORE` | Analysis, insights, data exploration. Agent focuses on understanding and explaining. |
435
+ | `GENERATE` | Content creation. Agent produces structured output (reports, tables, code). |
436
+ | `EXECUTE` | Action-oriented. Agent actively uses tools to perform tasks. |
437
+ | `AUTOMATE` | Workflow setup. Agent helps create triggers, conditions, and automated actions. |
438
+
439
+ Set the mode when creating or updating a chat:
440
+
441
+ ```bash
442
+ POST /api/chat
443
+ { "mode": "EXECUTE", "title": "Production Management" }
444
+ ```
445
+
446
+ ## Tool Approval Flow
447
+
448
+ ```
449
+ User sends message
450
+
451
+
452
+ Agent processes with LLM
453
+
454
+ ├── Uses read tool → auto-executes → continues
455
+
456
+ └── Uses write tool → pauses
457
+
458
+
459
+ Returns pending tool call to client
460
+
461
+ ├── POST /:id/approve → executes tool → agent continues
462
+
463
+ └── POST /:id/reject → agent informed → responds accordingly
464
+ ```
465
+
466
+ ## Prisma Models
467
+
468
+ The package uses 12 models and 16 enums:
469
+
470
+ | Model | Purpose |
471
+ |-------|---------|
472
+ | `AiMemory` | Long-term memory with vector embeddings |
473
+ | `AiMemorySuggestion` | AI-suggested memories for user review |
474
+ | `AiChat` | Chat sessions with mode and settings |
475
+ | `AiChatMessage` | Individual messages with token tracking |
476
+ | `AiArtifact` | Generated content (reports, charts, code, etc.) |
477
+ | `AiIncident` | Detected anomalies with severity and status |
478
+ | `AiMcpSource` | Model Context Protocol data sources |
479
+ | `AiWorkflow` | Automated workflows with triggers and actions |
480
+ | `AiWorkflowExecution` | Workflow execution history |
481
+ | `AiReport` | Generated reports (daily, weekly, monthly) |
482
+ | `AiTimelineEvent` | Event log for audit trail |
483
+ | `AiUserPreferences` | Per-user AI settings |
484
+
485
+ ## Environment Variables
486
+
487
+ | Variable | Required | Description | Default |
488
+ |----------|----------|-------------|---------|
489
+ | `ANTHROPIC_API_KEY` | At least 1 LLM key | Anthropic API key | — |
490
+ | `ANTHROPIC_DEFAULT_MODEL` | No | Default Anthropic model | `claude-sonnet-4-20250514` |
491
+ | `OPENAI_API_KEY` | At least 1 LLM key | OpenAI API key | — |
492
+ | `OPENAI_DEFAULT_MODEL` | No | Default OpenAI model | `gpt-4o` |
493
+ | `EMBEDDING_MODEL` | No | OpenAI embedding model | `text-embedding-3-small` |
494
+ | `QDRANT_HOST` | No | Qdrant server host | `localhost` |
495
+ | `QDRANT_PORT` | No | Qdrant server port | `6333` |
496
+ | `QDRANT_API_KEY` | No | Qdrant auth key | — |
497
+
498
+ ## MegaAgent Return Type
499
+
500
+ ```typescript
501
+ interface MegaAgent {
502
+ /** Access to service instances */
503
+ services: {
504
+ chat: ChatService;
505
+ llm: LLMService;
506
+ memory: MemoryService | null; // null if memory not configured
507
+ embedding: EmbeddingService | null;
508
+ qdrant: QdrantService | null;
509
+ };
510
+
511
+ /** Prompt settings */
512
+ promptOptions: {
513
+ agentName?: string;
514
+ agentDescription?: string;
515
+ customPrompt?: string;
516
+ };
517
+
518
+ /** Create Express router with all chat endpoints */
519
+ createChatRouter(authMiddleware: AuthMiddleware): Router;
520
+ }
521
+ ```
522
+
523
+ ## Troubleshooting
524
+
525
+ ### `Module '@prisma/client' has no exported member 'AiChat'`
526
+
527
+ Prisma client needs to be regenerated:
528
+
529
+ ```bash
530
+ npx prisma generate
531
+ ```
532
+
533
+ If using a monorepo with linked packages, ensure there's only one `@prisma/client` instance. Add a postinstall script to deduplicate:
534
+
535
+ ```json
536
+ {
537
+ "scripts": {
538
+ "postinstall": "rm -rf node_modules/@mtkn/mega-agent/node_modules/@prisma node_modules/@mtkn/mega-agent/node_modules/.prisma 2>/dev/null || true"
539
+ }
540
+ }
541
+ ```
542
+
543
+ ### Memory system not starting
544
+
545
+ - Verify `OPENAI_API_KEY` is set and valid
546
+ - Check Qdrant is running: `curl http://localhost:6333/collections`
547
+ - Memory is optional — chat works without it
548
+
549
+ ### Tools not being used by the agent
550
+
551
+ - Call `registerTools()` **after** `createMegaAgent()`
552
+ - Ensure `inputSchema` is valid JSON Schema (`type: 'object'` at root)
553
+ - Tool `handler` must return a `Promise`
554
+ - Check tool `description` — the LLM uses it to decide when to call the tool
555
+
556
+ ### Agent not responding or timing out
557
+
558
+ - The agent loop has a max of 10 iterations per message
559
+ - Up to 20 context messages are sent to the LLM
560
+ - Check LLM API key validity and rate limits
561
+
562
+ ## License
563
+
564
+ MIT
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@mtkn/mega-agent",
3
- "version": "0.1.0",
3
+ "version": "0.1.1",
4
4
  "description": "AI-powered agent infrastructure for Express + Prisma backends",
5
5
  "type": "module",
6
6
  "main": "./dist/index.js",
@@ -363,7 +363,7 @@ model AiArtifact {
363
363
  // ── INCIDENT SYSTEM ─────────────────────────────────────────
364
364
 
365
365
  model AiIncident {
366
- id String @id @default(uuid())
366
+ id String @id @default(cuid())
367
367
  userId Int @map("user_id")
368
368
  fingerprint String
369
369
  title String @db.VarChar(255)
@@ -475,7 +475,7 @@ model AiWorkflowExecution {
475
475
  // ── REPORT SYSTEM ───────────────────────────────────────────
476
476
 
477
477
  model AiReport {
478
- id String @id @default(uuid())
478
+ id String @id @default(cuid())
479
479
  userId Int @map("user_id")
480
480
  type ReportType
481
481
  title String @db.VarChar(255)
@@ -501,7 +501,7 @@ model AiReport {
501
501
  // ── TIMELINE SYSTEM ─────────────────────────────────────────
502
502
 
503
503
  model AiTimelineEvent {
504
- id String @id @default(uuid())
504
+ id String @id @default(cuid())
505
505
  userId Int @map("user_id")
506
506
  type EventType
507
507
  title String @db.VarChar(255)
@@ -526,7 +526,7 @@ model AiTimelineEvent {
526
526
  // ── USER PREFERENCES ────────────────────────────────────────
527
527
 
528
528
  model AiUserPreferences {
529
- id String @id @default(uuid())
529
+ id String @id @default(cuid())
530
530
  userId Int @unique @map("user_id")
531
531
 
532
532
  // AI Settings