morpheus-cli 0.3.3 → 0.3.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. package/README.md +1010 -999
  2. package/bin/morpheus.js +48 -48
  3. package/dist/channels/telegram.js +34 -29
  4. package/dist/cli/commands/start.js +41 -3
  5. package/dist/runtime/lifecycle.js +13 -0
  6. package/dist/runtime/memory/backfill-embeddings.js +12 -12
  7. package/dist/runtime/memory/sati/index.js +5 -5
  8. package/dist/runtime/memory/sati/repository.js +186 -186
  9. package/dist/runtime/memory/sati/system-prompts.js +52 -52
  10. package/dist/runtime/memory/session-embedding-worker.js +32 -32
  11. package/dist/runtime/memory/sqlite.js +151 -151
  12. package/dist/runtime/oracle.js +116 -116
  13. package/dist/runtime/tools/analytics-tools.js +12 -12
  14. package/dist/ui/index.html +13 -2
  15. package/dist/ui/manifest.webmanifest +1 -0
  16. package/dist/ui/pwa-192x192.png +0 -0
  17. package/dist/ui/pwa-512x512.png +0 -0
  18. package/dist/ui/pwa-maskable-192x192.png +0 -0
  19. package/dist/ui/pwa-maskable-512x512.png +0 -0
  20. package/dist/ui/registerSW.js +1 -0
  21. package/dist/ui/sw.js +1 -0
  22. package/dist/ui/vite.svg +31 -31
  23. package/dist/ui/workbox-26f462e7.js +1 -0
  24. package/package.json +84 -84
  25. package/dist/http/__tests__/status_api.test.js +0 -55
  26. package/dist/http/__tests__/status_with_server_api.test.js +0 -60
  27. package/dist/runtime/__tests__/agent.test.js +0 -95
  28. package/dist/runtime/__tests__/agent_memory_limit.test.js +0 -61
  29. package/dist/runtime/__tests__/agent_persistence.test.js +0 -154
  30. package/dist/runtime/__tests__/manual_santi_verify.js +0 -55
  31. package/dist/runtime/agent.js +0 -172
  32. package/dist/runtime/audio-agent.js +0 -55
  33. package/dist/runtime/santi/contracts.js +0 -1
  34. package/dist/runtime/santi/middleware.js +0 -61
  35. package/dist/runtime/santi/santi.js +0 -109
  36. package/dist/runtime/santi/store.js +0 -158
  37. package/dist/runtime/tools/__tests__/factory.test.js +0 -42
@@ -1,172 +0,0 @@
1
- import { HumanMessage, SystemMessage } from "@langchain/core/messages";
2
- import { ProviderFactory } from "./providers/factory.js";
3
- import { ToolsFactory } from "./tools/factory.js";
4
- import { ConfigManager } from "../config/manager.js";
5
- import { ProviderError } from "./errors.js";
6
- import { DisplayManager } from "./display.js";
7
- import { SQLiteChatMessageHistory } from "./memory/sqlite.js";
8
- export class Agent {
9
- provider;
10
- config;
11
- history;
12
- display = DisplayManager.getInstance();
13
- databasePath;
14
- constructor(config, overrides) {
15
- this.config = config || ConfigManager.getInstance().get();
16
- this.databasePath = overrides?.databasePath;
17
- }
18
- async initialize() {
19
- if (!this.config.llm) {
20
- throw new Error("LLM configuration missing in config object.");
21
- }
22
- // Basic validation before provider creation
23
- if (!this.config.llm.provider) {
24
- throw new Error("LLM provider not specified in configuration.");
25
- }
26
- // Note: API Key validation is delegated to ProviderFactory or the Provider itself
27
- // to allow for Environment Variable fallback supported by LangChain.
28
- try {
29
- const tools = await ToolsFactory.create();
30
- this.provider = await ProviderFactory.create(this.config.llm, tools);
31
- if (!this.provider) {
32
- throw new Error("Provider factory returned undefined");
33
- }
34
- // Initialize persistent memory with SQLite
35
- this.history = new SQLiteChatMessageHistory({
36
- sessionId: "default",
37
- databasePath: this.databasePath,
38
- limit: this.config.memory?.limit || 100, // Fallback purely defensive if config type allows optional
39
- });
40
- }
41
- catch (err) {
42
- if (err instanceof ProviderError)
43
- throw err; // Re-throw known errors
44
- // Wrap unknown errors
45
- throw new ProviderError(this.config.llm.provider || 'unknown', err, "Agent initialization failed");
46
- }
47
- }
48
- async chat(message, extraUsage) {
49
- if (!this.provider) {
50
- throw new Error("Agent not initialized. Call initialize() first.");
51
- }
52
- if (!this.history) {
53
- throw new Error("Message history not initialized. Call initialize() first.");
54
- }
55
- try {
56
- this.display.log('Processing message...', { source: 'Agent' });
57
- const userMessage = new HumanMessage(message);
58
- // Inject provider/model metadata for persistence
59
- userMessage.provider_metadata = {
60
- provider: this.config.llm.provider,
61
- model: this.config.llm.model
62
- };
63
- // Attach extra usage (e.g. from Audio) to the user message to be persisted
64
- if (extraUsage) {
65
- userMessage.usage_metadata = extraUsage;
66
- }
67
- const systemMessage = new SystemMessage(`You are ${this.config.agent.name}, ${this.config.agent.personality},a local AI operator responsible for orchestrating tools, MCPs, and language models to solve the user’s request accurately and reliably.
68
-
69
- Your primary responsibility is NOT to answer from memory when external tools are available.
70
-
71
- You must follow these rules strictly:
72
-
73
- 1. Tool Evaluation First
74
- Before generating a final answer, always evaluate whether any available tool or MCP is capable of providing a more accurate, up-to-date, or authoritative response.
75
-
76
- If a tool can provide the answer, you MUST call the tool.
77
-
78
- 2. No Historical Assumptions for Dynamic Data
79
- If the user asks something that:
80
- - may change over time
81
- - depends on system state
82
- - depends on filesystem
83
- - depends on external APIs
84
- - was previously asked in the conversation
85
-
86
- You MUST NOT reuse previous outputs as final truth.
87
-
88
- Instead:
89
- - Re-evaluate available tools
90
- - Re-execute the relevant tool
91
- - Provide a fresh result
92
-
93
- Even if the user already asked the same question before, you must treat the request as requiring a new verification.
94
-
95
- 3. History Is Context, Not Source of Truth
96
- Conversation history may help with context, but it must not replace real-time verification via tools when tools are available.
97
-
98
- Never assume:
99
- - System state
100
- - File contents
101
- - Database values
102
- - API responses
103
- based only on previous messages.
104
-
105
- 4. Tool Priority Over Language Guessing
106
- If a tool can compute, fetch, inspect, or verify something, prefer tool usage over generating a speculative answer.
107
-
108
- Never hallucinate values that could be retrieved through a tool.
109
-
110
- 5. Freshness Principle
111
- Repeated user queries require fresh validation.
112
- Do not respond with:
113
- "As I said before..."
114
- Instead, perform a new tool check if applicable.
115
-
116
- 6. Final Answer Policy
117
- Only provide a direct natural language answer if:
118
- - No tool is relevant
119
- - Tools are unavailable
120
- - The question is conceptual or explanatory
121
-
122
- Otherwise, use tools first.
123
-
124
- You are an operator, not a guesser.
125
- Accuracy is more important than speed.
126
- `);
127
- // Load existing history from database
128
- const previousMessages = await this.history.getMessages();
129
- const messages = [
130
- systemMessage,
131
- ...previousMessages,
132
- userMessage
133
- ];
134
- const response = await this.provider.invoke({ messages });
135
- // Identify new messages generated during the interaction
136
- // The `messages` array passed to invoke had length `messages.length`
137
- // The `response.messages` contains the full state.
138
- // New messages start after the inputs.
139
- const startNewMessagesIndex = messages.length;
140
- const newGeneratedMessages = response.messages.slice(startNewMessagesIndex);
141
- // Persist User Message first
142
- await this.history.addMessage(userMessage);
143
- // Persist all new intermediate tool calls and responses
144
- for (const msg of newGeneratedMessages) {
145
- // Inject provider/model metadata search interactors
146
- msg.provider_metadata = {
147
- provider: this.config.llm.provider,
148
- model: this.config.llm.model
149
- };
150
- await this.history.addMessage(msg);
151
- }
152
- this.display.log('Response generated.', { source: 'Agent' });
153
- const lastMessage = response.messages[response.messages.length - 1];
154
- return (typeof lastMessage.content === 'string') ? lastMessage.content : JSON.stringify(lastMessage.content);
155
- }
156
- catch (err) {
157
- throw new ProviderError(this.config.llm.provider, err, "Chat request failed");
158
- }
159
- }
160
- async getHistory() {
161
- if (!this.history) {
162
- throw new Error("Message history not initialized. Call initialize() first.");
163
- }
164
- return await this.history.getMessages();
165
- }
166
- async clearMemory() {
167
- if (!this.history) {
168
- throw new Error("Message history not initialized. Call initialize() first.");
169
- }
170
- await this.history.clear();
171
- }
172
- }
@@ -1,55 +0,0 @@
1
- import { GoogleGenAI } from '@google/genai';
2
- export class AudioAgent {
3
- async transcribe(filePath, mimeType, apiKey) {
4
- try {
5
- const ai = new GoogleGenAI({ apiKey });
6
- // Upload the file
7
- const uploadResult = await ai.files.upload({
8
- file: filePath,
9
- config: { mimeType }
10
- });
11
- // Generate content (transcription)
12
- // using gemini-1.5-flash as it is fast and supports audio
13
- const response = await ai.models.generateContent({
14
- model: 'gemini-2.5-flash-lite',
15
- contents: [
16
- {
17
- role: 'user',
18
- parts: [
19
- {
20
- fileData: {
21
- fileUri: uploadResult.uri,
22
- mimeType: uploadResult.mimeType
23
- }
24
- },
25
- { text: "Transcribe this audio message accurately. Return only the transcribed text without any additional commentary." }
26
- ]
27
- }
28
- ]
29
- });
30
- // The new SDK returns text directly on the response object
31
- const text = response.text;
32
- if (!text) {
33
- throw new Error('No transcription generated');
34
- }
35
- // Extract usage metadata
36
- const usage = response.usageMetadata;
37
- const usageMetadata = {
38
- input_tokens: usage?.promptTokenCount ?? 0,
39
- output_tokens: usage?.candidatesTokenCount ?? 0,
40
- total_tokens: usage?.totalTokenCount ?? 0,
41
- input_token_details: {
42
- cache_read: usage?.cachedContentTokenCount ?? 0
43
- }
44
- };
45
- return { text, usage: usageMetadata };
46
- }
47
- catch (error) {
48
- // Wrap error for clarity
49
- if (error instanceof Error) {
50
- throw new Error(`Audio transcription failed: ${error.message}`);
51
- }
52
- throw error;
53
- }
54
- }
55
- }
@@ -1 +0,0 @@
1
- export {};
@@ -1,61 +0,0 @@
1
- import { createMiddleware } from "langchain";
2
- import { SystemMessage } from "@langchain/core/messages";
3
- import { DisplayManager } from "../display.js";
4
- // T009: Create skeleton
5
- export const createSantiMiddleware = (santi) => {
6
- const display = DisplayManager.getInstance();
7
- return createMiddleware({
8
- name: "SatiMemoryMiddleware",
9
- // T010: beforeAgent hook
10
- async beforeAgent(state) {
11
- try {
12
- const messages = state.messages;
13
- if (!messages || messages.length === 0)
14
- return;
15
- // Extract last user message
16
- const lastMessage = messages[messages.length - 1];
17
- if (lastMessage._getType() !== "human")
18
- return;
19
- // Recover memories
20
- const memories = await santi.recover(lastMessage.content);
21
- if (memories.length > 0) {
22
- const memoryText = memories.map(m => `- [${m.category.toUpperCase()}] ${m.summary}`).join("\n");
23
- const systemMsg = new SystemMessage(`Relevant long-term memory about the Architect:\n${memoryText}`);
24
- // Inject into state logic - typically by appending to messages or inserting
25
- // Since this is "beforeAgent", modifying state.messages usually propagates to the agent input
26
- // We need to check if 'state' is mutable or if we return a diff.
27
- // In standard LangGraph/LangChain middleware, we might return an update.
28
- // Spec example: "return;" implies mutation of 'state' or side-effect?
29
- // Spec: "The injection... should occur adding a SystemMessage to the array state.messages"
30
- // Assuming direct mutation as per common JS middleware patterns if not specified otherwise
31
- state.messages.push(systemMsg);
32
- display.log('Injected long-term memories into context', { source: 'SatiMiddleware' });
33
- }
34
- }
35
- catch (error) {
36
- display.log(`Error in beforeAgent: ${error}`, { level: "error", source: 'SatiMiddleware' });
37
- }
38
- },
39
- // T013: afterAgent hook
40
- async afterAgent(state) {
41
- try {
42
- const messages = state.messages;
43
- // We interact with the *result* of the agent.
44
- // 'state' usually contains the full history at this point.
45
- // We pass the full recent history to evaluate.
46
- if (messages && messages.length > 0) {
47
- // We fire and forget this to not block the response?
48
- // Or await it? Middleware in LangGraph might be blocking.
49
- // Given performance goals (<2s), and that this is post-generation, blocking is safer to ensure data consistency
50
- // but user might perceive latency.
51
- // Spec says "afterAgent... Persist if necessary".
52
- // We'll await it.
53
- await santi.evaluate(messages);
54
- }
55
- }
56
- catch (error) {
57
- display.log(`Error in afterAgent: ${error}`, { level: "error", source: 'SatiMiddleware' });
58
- }
59
- }
60
- });
61
- };
@@ -1,109 +0,0 @@
1
- import { HumanMessage } from "@langchain/core/messages";
2
- import { SantiStore } from "./store.js";
3
- import { DisplayManager } from "../display.js";
4
- import { ConfigManager } from "../../config/manager.js";
5
- import { ProviderFactory } from "../providers/factory.js";
6
- import * as crypto from 'crypto';
7
- export class Santi {
8
- store;
9
- display = DisplayManager.getInstance();
10
- constructor() {
11
- this.store = new SantiStore();
12
- }
13
- // T007 & T008: Recover implementation
14
- async recover(message, limit = 5) {
15
- this.display.log('Recovering memories...', { source: 'Sati' });
16
- // 1. Keyword extraction (simple heuristic: words > 3 chars)
17
- // Ideally this could be an LLM call to generate search queries, but for now we stick to the plan/store logic.
18
- // If we want to be smarter, we could ask the LLM "Extract search keywords from this message".
19
- // For MVP, direct search is faster and meets the "textual search" constraint.
20
- // T008: Fetch from store
21
- const memories = this.store.searchMemories(message, limit);
22
- if (memories.length > 0) {
23
- this.display.log(`Recovered ${memories.length} memories`, { source: 'Sati' });
24
- }
25
- return memories;
26
- }
27
- // T012: Evaluate implementation
28
- async evaluate(recentMessages) {
29
- if (recentMessages.length < 2)
30
- return; // Need at least user + AI response to evaluate context
31
- this.display.log('Evaluating interaction for memory persistence...', { source: 'Sati' });
32
- try {
33
- const config = ConfigManager.getInstance().get().llm;
34
- if (!config) {
35
- this.display.log('LLM config missing, skipping memory evaluation.', { level: "warning", source: 'Sati' });
36
- return;
37
- }
38
- const model = ProviderFactory.createModel(config);
39
- // Format conversation for prompt
40
- const conversationText = recentMessages.map(m => {
41
- const role = m._getType() === 'human' ? 'User' : 'Assistant';
42
- const content = typeof m.content === 'string' ? m.content : JSON.stringify(m.content);
43
- return `${role}: ${content}`;
44
- }).join('\n');
45
- const prompt = `
46
- You are Sati, guardian of the long-term memory system.
47
- Analyze the interaction below between the Architect (User) and the System.
48
-
49
- Decide if there is any information that should be stored as long-term memory.
50
- Store ONLY:
51
- - Persistent preferences
52
- - Architectural decisions
53
- - Permanent constraints
54
- - Project context
55
- - Personal identity (non-sensitive)
56
- - Favorite languages/tech
57
- - Relationships/Pets/Names
58
-
59
- IGNORE:
60
- - One-off questions
61
- - Temporary context
62
- - Redundant info
63
- - SENSITIVE DATA (API Keys, Secrets, Passwords) - NEVER STORE THESE.
64
-
65
- Format output as JSON:
66
- {
67
- "should_store": boolean,
68
- "category": "preference" | "project" | "identity" | "constraint" | "context" | "personal_data" | "languages" | "favorite_things" | "relationships" | "pets" | "naming" | "professional_profile",
69
- "importance": "low" | "medium" | "high",
70
- "summary": "Concise summary",
71
- "reason": "Why this is important"
72
- }
73
-
74
- Interaction:
75
- ${conversationText}
76
- `;
77
- const response = await model.invoke([new HumanMessage(prompt)]);
78
- const content = typeof response.content === 'string' ? response.content : JSON.stringify(response.content);
79
- // Parse JSON (handle potential markdown code blocks)
80
- const jsonMatch = content.match(/\{[\s\S]*\}/);
81
- if (!jsonMatch) {
82
- return; // No JSON found
83
- }
84
- const result = JSON.parse(jsonMatch[0]);
85
- if (result.should_store && result.summary && result.category && result.importance) {
86
- // T015: Privacy Filter (Regex check)
87
- const sensitiveRegex = /(sk-[a-zA-Z0-9]{20,}|eyJ[a-zA-Z0-9]{20,}|password|secret|[0-9a-f]{32,})/;
88
- if (sensitiveRegex.test(result.summary)) {
89
- this.display.log('Sensitive data detected in memory summary. Aborting persistence.', { level: 'warning', source: 'Sati' });
90
- return;
91
- }
92
- // Generate hash for deduplication
93
- const hash = crypto.createHash('sha256').update(result.summary.toLowerCase().trim()).digest('hex');
94
- // T014: Persist
95
- const memory = this.store.addMemory({
96
- category: result.category,
97
- importance: result.importance,
98
- summary: result.summary,
99
- hash: hash,
100
- source: 'conversation_evaluation'
101
- });
102
- this.display.log(`Persisted new memory: ${memory.summary.substring(0, 50)}...`, { source: 'Sati' });
103
- }
104
- }
105
- catch (err) {
106
- this.display.log(`Evaluation failed: ${err}`, { level: "error", source: 'Sati' });
107
- }
108
- }
109
- }
@@ -1,158 +0,0 @@
1
- import Database from "better-sqlite3";
2
- import * as fs from "fs-extra";
3
- import * as path from "path";
4
- import { homedir } from "os";
5
- import { v4 as uuidv4 } from 'uuid';
6
- export class SantiStore {
7
- db;
8
- constructor(databasePath) {
9
- const dbPath = databasePath || path.join(homedir(), ".morpheus", "memory", "santi-memory.db");
10
- this.ensureDirectory(dbPath);
11
- this.db = new Database(dbPath, { timeout: 5000 });
12
- this.init();
13
- }
14
- ensureDirectory(filePath) {
15
- const dir = path.dirname(filePath);
16
- fs.ensureDirSync(dir);
17
- }
18
- init() {
19
- // T003: Table creation
20
- const schema = `
21
- CREATE TABLE IF NOT EXISTS long_term_memory (
22
- id TEXT PRIMARY KEY,
23
- category TEXT NOT NULL CHECK(category IN ('preference','project','identity','constraint','context','personal_data','languages','favorite_things','relationships','pets','naming','professional_profile')),
24
- importance TEXT NOT NULL CHECK(importance IN ('low','medium','high')),
25
- summary TEXT NOT NULL,
26
- details TEXT,
27
- hash TEXT NOT NULL UNIQUE,
28
- source TEXT,
29
- created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
30
- updated_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
31
- last_accessed_at DATETIME,
32
- access_count INTEGER DEFAULT 0,
33
- version INTEGER DEFAULT 1,
34
- archived BOOLEAN DEFAULT 0
35
- );
36
-
37
- CREATE INDEX IF NOT EXISTS idx_memory_category ON long_term_memory(category);
38
- CREATE INDEX IF NOT EXISTS idx_memory_importance ON long_term_memory(importance);
39
- CREATE INDEX IF NOT EXISTS idx_memory_archived ON long_term_memory(archived);
40
- `;
41
- this.db.exec(schema);
42
- }
43
- // T004: addMemory
44
- addMemory(memory) {
45
- // Check for existing hash
46
- const existing = this.db.prepare('SELECT * FROM long_term_memory WHERE hash = ?').get(memory.hash);
47
- if (existing) {
48
- // If exact hash match, return existing (idempotent)
49
- return existing;
50
- }
51
- const id = uuidv4();
52
- const now = new Date().toISOString();
53
- const newMemory = {
54
- ...memory,
55
- id,
56
- details: memory.details || null, // Ensure explicit null if undefined
57
- source: memory.source || null,
58
- created_at: now,
59
- updated_at: now,
60
- access_count: 0,
61
- version: 1,
62
- archived: false
63
- };
64
- const stmt = this.db.prepare(`
65
- INSERT INTO long_term_memory (
66
- id, category, importance, summary, details, hash, source,
67
- created_at, updated_at, access_count, version, archived
68
- ) VALUES (
69
- @id, @category, @importance, @summary, @details, @hash, @source,
70
- @created_at, @updated_at, @access_count, @version, @archived
71
- )
72
- `);
73
- const bindParams = {
74
- ...newMemory,
75
- archived: newMemory.archived ? 1 : 0
76
- };
77
- stmt.run(bindParams);
78
- return newMemory;
79
- }
80
- // T005: searchMemories
81
- searchMemories(query, limit = 5) {
82
- // Basic text search using LIKE for now as per requirements (no vectors)
83
- // We split query into keywords and search for any match
84
- // Prioritizing importance and recency
85
- const keywords = query.split(' ').filter(word => word.length > 3).map(w => `%${w}%`);
86
- if (keywords.length === 0)
87
- return [];
88
- // Construct dynamic query for multiple keywords (OR logic for broader retrieval, refined by importance)
89
- const conditions = keywords.map((_, i) => `summary LIKE ?`).join(' OR ');
90
- const stmt = this.db.prepare(`
91
- SELECT * FROM long_term_memory
92
- WHERE archived = 0 AND (${conditions})
93
- ORDER BY
94
- CASE importance
95
- WHEN 'high' THEN 1
96
- WHEN 'medium' THEN 2
97
- WHEN 'low' THEN 3
98
- END ASC,
99
- updated_at DESC
100
- LIMIT ?
101
- `);
102
- const results = stmt.all(...keywords, limit);
103
- // Update access stats
104
- if (results.length > 0) {
105
- this.updateAccessStats(results.map(r => r.id));
106
- }
107
- return results;
108
- }
109
- updateAccessStats(ids) {
110
- const now = new Date().toISOString();
111
- const stmt = this.db.prepare(`
112
- UPDATE long_term_memory
113
- SET access_count = access_count + 1, last_accessed_at = ?
114
- WHERE id = ?
115
- `);
116
- const transaction = this.db.transaction((timestamp, memoryIds) => {
117
- for (const id of memoryIds) {
118
- stmt.run(timestamp, id);
119
- }
120
- });
121
- transaction(now, ids);
122
- }
123
- // T006: updateMemory
124
- updateMemory(id, data) {
125
- const setClauses = [];
126
- const params = [];
127
- if (data.summary) {
128
- setClauses.push("summary = ?");
129
- params.push(data.summary);
130
- }
131
- if (data.details) {
132
- setClauses.push("details = ?");
133
- params.push(data.details);
134
- }
135
- if (data.importance) {
136
- setClauses.push("importance = ?");
137
- params.push(data.importance);
138
- }
139
- if (data.hash) {
140
- setClauses.push("hash = ?");
141
- params.push(data.hash);
142
- }
143
- if (setClauses.length === 0)
144
- return;
145
- setClauses.push("updated_at = CURRENT_TIMESTAMP");
146
- setClauses.push("version = version + 1");
147
- params.push(id); // Where ID
148
- const stmt = this.db.prepare(`
149
- UPDATE long_term_memory
150
- SET ${setClauses.join(', ')}
151
- WHERE id = ?
152
- `);
153
- stmt.run(...params);
154
- }
155
- getByHash(hash) {
156
- return this.db.prepare('SELECT * FROM long_term_memory WHERE hash = ?').get(hash);
157
- }
158
- }
@@ -1,42 +0,0 @@
1
- import { describe, it, expect, vi, beforeEach } from 'vitest';
2
- import { ToolsFactory } from '../factory.js';
3
- import { MultiServerMCPClient } from "@langchain/mcp-adapters";
4
- vi.mock("@langchain/mcp-adapters", () => {
5
- return {
6
- MultiServerMCPClient: vi.fn(),
7
- };
8
- });
9
- vi.mock("../../display.js", () => ({
10
- DisplayManager: {
11
- getInstance: () => ({
12
- log: vi.fn(),
13
- })
14
- }
15
- }));
16
- describe('ToolsFactory', () => {
17
- beforeEach(() => {
18
- vi.resetAllMocks();
19
- });
20
- it('should create tools successfully', async () => {
21
- const mockGetTools = vi.fn().mockResolvedValue(['tool1', 'tool2']);
22
- // Mock the constructor and getTools method
23
- MultiServerMCPClient.mockImplementation(function () {
24
- return {
25
- getTools: mockGetTools
26
- };
27
- });
28
- const tools = await ToolsFactory.create();
29
- expect(MultiServerMCPClient).toHaveBeenCalled();
30
- expect(mockGetTools).toHaveBeenCalled();
31
- expect(tools).toEqual(['tool1', 'tool2']);
32
- });
33
- it('should return empty array on failure', async () => {
34
- MultiServerMCPClient.mockImplementation(function () {
35
- return {
36
- getTools: vi.fn().mockRejectedValue(new Error('MCP Failed'))
37
- };
38
- });
39
- const tools = await ToolsFactory.create();
40
- expect(tools).toEqual([]);
41
- });
42
- });