neuroagent 1.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. package/LICENSE +21 -0
  2. package/README.md +273 -0
  3. package/dist/index.d.ts +55 -0
  4. package/dist/index.js +215 -0
  5. package/neuroagent/__init__.py +29 -0
  6. package/neuroagent/agents/__init__.py +3 -0
  7. package/neuroagent/agents/agent.py +253 -0
  8. package/neuroagent/cli/__init__.py +3 -0
  9. package/neuroagent/cli/neuroagent_cli.py +182 -0
  10. package/neuroagent/examples/__init__.py +1 -0
  11. package/neuroagent/examples/dev_agent.py +31 -0
  12. package/neuroagent/examples/multi_agent_team.py +45 -0
  13. package/neuroagent/examples/website_agent.py +27 -0
  14. package/neuroagent/frontend/__init__.py +1 -0
  15. package/neuroagent/frontend/widget.js +409 -0
  16. package/neuroagent/llm/__init__.py +5 -0
  17. package/neuroagent/llm/base.py +46 -0
  18. package/neuroagent/llm/local_model_provider.py +76 -0
  19. package/neuroagent/llm/openai_provider.py +58 -0
  20. package/neuroagent/memory/__init__.py +5 -0
  21. package/neuroagent/memory/long_memory.py +52 -0
  22. package/neuroagent/memory/short_memory.py +39 -0
  23. package/neuroagent/memory/vector_memory.py +57 -0
  24. package/neuroagent/planner/__init__.py +3 -0
  25. package/neuroagent/planner/planner.py +90 -0
  26. package/neuroagent/server/__init__.py +16 -0
  27. package/neuroagent/server/api_server.py +191 -0
  28. package/neuroagent/server/websocket_server.py +108 -0
  29. package/neuroagent/team/__init__.py +3 -0
  30. package/neuroagent/team/team.py +134 -0
  31. package/neuroagent/tools/__init__.py +19 -0
  32. package/neuroagent/tools/base.py +45 -0
  33. package/neuroagent/tools/code_executor.py +69 -0
  34. package/neuroagent/tools/file_manager.py +62 -0
  35. package/neuroagent/tools/http_client.py +57 -0
  36. package/neuroagent/tools/web_search.py +48 -0
  37. package/neuroagent/utils/__init__.py +3 -0
  38. package/neuroagent/utils/helpers.py +31 -0
  39. package/package.json +56 -0
  40. package/requirements.txt +15 -0
  41. package/setup.py +61 -0
  42. package/src/index.d.ts +55 -0
  43. package/src/index.js +215 -0
  44. package/web_example/index.html +249 -0
  45. package/web_example/neuroagent.js +301 -0
  46. package/web_example/script.js +114 -0
@@ -0,0 +1,409 @@
1
+ /**
2
+ * NeuroAgent Widget - JavaScript widget for integrating AI agents on websites
3
+ * Version: 0.1.0
4
+ */
5
+
6
+ (function(global) {
7
+ 'use strict';
8
+
9
+ class NeuroAgentWidget {
10
+ constructor(config) {
11
+ this.config = {
12
+ apiUrl: config.apiUrl || 'http://localhost:8000',
13
+ agent: config.agent || 'default',
14
+ wsUrl: config.wsUrl || null,
15
+ theme: config.theme || 'dark',
16
+ position: config.position || 'bottom-right',
17
+ title: config.title || 'AI Assistant',
18
+ welcomeMessage: config.welcomeMessage || 'Hello! How can I help you today?',
19
+ ...config
20
+ };
21
+
22
+ this.isOpen = false;
23
+ this.messages = [];
24
+ this.websocket = null;
25
+
26
+ this.init();
27
+ }
28
+
29
+ init() {
30
+ this.createStyles();
31
+ this.createWidget();
32
+ this.createChatContainer();
33
+ this.bindEvents();
34
+
35
+ if (this.config.wsUrl) {
36
+ this.connectWebSocket();
37
+ }
38
+ }
39
+
40
+ createStyles() {
41
+ const styles = document.createElement('style');
42
+ styles.textContent = `
43
+ .neuroagent-widget {
44
+ font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;
45
+ position: fixed;
46
+ z-index: 999999;
47
+ }
48
+
49
+ .neuroagent-widget-button {
50
+ width: 60px;
51
+ height: 60px;
52
+ border-radius: 50%;
53
+ background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
54
+ border: none;
55
+ cursor: pointer;
56
+ box-shadow: 0 4px 15px rgba(102, 126, 234, 0.4);
57
+ display: flex;
58
+ align-items: center;
59
+ justify-content: center;
60
+ transition: transform 0.3s ease;
61
+ }
62
+
63
+ .neuroagent-widget-button:hover {
64
+ transform: scale(1.1);
65
+ }
66
+
67
+ .neuroagent-widget-button svg {
68
+ width: 30px;
69
+ height: 30px;
70
+ fill: white;
71
+ }
72
+
73
+ .neuroagent-chat {
74
+ position: fixed;
75
+ width: 380px;
76
+ height: 500px;
77
+ background: white;
78
+ border-radius: 16px;
79
+ box-shadow: 0 10px 40px rgba(0, 0, 0, 0.2);
80
+ display: none;
81
+ flex-direction: column;
82
+ overflow: hidden;
83
+ }
84
+
85
+ .neuroagent-chat.open {
86
+ display: flex;
87
+ }
88
+
89
+ .neuroagent-chat-header {
90
+ background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
91
+ color: white;
92
+ padding: 16px;
93
+ display: flex;
94
+ align-items: center;
95
+ justify-content: space-between;
96
+ }
97
+
98
+ .neuroagent-chat-title {
99
+ font-weight: 600;
100
+ font-size: 16px;
101
+ }
102
+
103
+ .neuroagent-chat-close {
104
+ background: none;
105
+ border: none;
106
+ color: white;
107
+ cursor: pointer;
108
+ font-size: 20px;
109
+ padding: 0;
110
+ line-height: 1;
111
+ }
112
+
113
+ .neuroagent-chat-messages {
114
+ flex: 1;
115
+ padding: 16px;
116
+ overflow-y: auto;
117
+ display: flex;
118
+ flex-direction: column;
119
+ gap: 12px;
120
+ }
121
+
122
+ .neuroagent-message {
123
+ max-width: 80%;
124
+ padding: 12px 16px;
125
+ border-radius: 16px;
126
+ font-size: 14px;
127
+ line-height: 1.5;
128
+ }
129
+
130
+ .neuroagent-message.user {
131
+ align-self: flex-end;
132
+ background: #667eea;
133
+ color: white;
134
+ border-bottom-right-radius: 4px;
135
+ }
136
+
137
+ .neuroagent-message.assistant {
138
+ align-self: flex-start;
139
+ background: #f0f0f0;
140
+ color: #333;
141
+ border-bottom-left-radius: 4px;
142
+ }
143
+
144
+ .neuroagent-message.thinking {
145
+ align-self: flex-start;
146
+ background: #f0f0f0;
147
+ color: #666;
148
+ font-style: italic;
149
+ }
150
+
151
+ .neuroagent-chat-input {
152
+ padding: 16px;
153
+ border-top: 1px solid #eee;
154
+ display: flex;
155
+ gap: 8px;
156
+ }
157
+
158
+ .neuroagent-chat-input input {
159
+ flex: 1;
160
+ padding: 12px 16px;
161
+ border: 1px solid #ddd;
162
+ border-radius: 24px;
163
+ outline: none;
164
+ font-size: 14px;
165
+ }
166
+
167
+ .neuroagent-chat-input input:focus {
168
+ border-color: #667eea;
169
+ }
170
+
171
+ .neuroagent-chat-input button {
172
+ background: #667eea;
173
+ color: white;
174
+ border: none;
175
+ padding: 12px 20px;
176
+ border-radius: 24px;
177
+ cursor: pointer;
178
+ font-size: 14px;
179
+ font-weight: 500;
180
+ transition: background 0.3s ease;
181
+ }
182
+
183
+ .neuroagent-chat-input button:hover {
184
+ background: #5568d3;
185
+ }
186
+
187
+ .neuroagent-chat-input button:disabled {
188
+ background: #ccc;
189
+ cursor: not-allowed;
190
+ }
191
+
192
+ .neuroagent-position-bottom-right {
193
+ bottom: 20px;
194
+ right: 20px;
195
+ }
196
+
197
+ .neuroagent-position-bottom-left {
198
+ bottom: 20px;
199
+ left: 20px;
200
+ }
201
+
202
+ .neuroagent-position-top-right {
203
+ top: 20px;
204
+ right: 20px;
205
+ }
206
+
207
+ .neuroagent-position-top-left {
208
+ top: 20px;
209
+ left: 20px;
210
+ }
211
+
212
+ .neuroagent-chat.neuroagent-position-bottom-right,
213
+ .neuroagent-chat.neuroagent-position-bottom-left {
214
+ bottom: 90px;
215
+ }
216
+
217
+ .neuroagent-chat.neuroagent-position-top-right,
218
+ .neuroagent-chat.neuroagent-position-top-left {
219
+ top: 90px;
220
+ }
221
+
222
+ .neuroagent-chat.neuroagent-position-bottom-right,
223
+ .neuroagent-chat.neuroagent-position-top-right {
224
+ right: 20px;
225
+ }
226
+
227
+ .neuroagent-chat.neuroagent-position-bottom-left,
228
+ .neuroagent-chat.neuroagent-position-top-left {
229
+ left: 20px;
230
+ }
231
+ `;
232
+
233
+ document.head.appendChild(styles);
234
+ }
235
+
236
+ createWidget() {
237
+ this.widget = document.createElement('div');
238
+ this.widget.className = `neuroagent-widget neuroagent-position-${this.config.position}`;
239
+
240
+ this.widget.innerHTML = `
241
+ <button class="neuroagent-widget-button" title="${this.config.title}">
242
+ <svg viewBox="0 0 24 24">
243
+ <path d="M20 2H4c-1.1 0-2 .9-2 2v18l4-4h14c1.1 0 2-.9 2-2V4c0-1.1-.9-2-2-2zm0 14H6l-2 2V4h16v12z"/>
244
+ <path d="M7 9h10v2H7zm0-3h10v2H7z"/>
245
+ </svg>
246
+ </button>
247
+ `;
248
+
249
+ document.body.appendChild(this.widget);
250
+ }
251
+
252
+ createChatContainer() {
253
+ this.chatContainer = document.createElement('div');
254
+ this.chatContainer.className = `neuroagent-chat neuroagent-position-${this.config.position}`;
255
+ this.chatContainer.innerHTML = `
256
+ <div class="neuroagent-chat-header">
257
+ <span class="neuroagent-chat-title">${this.config.title}</span>
258
+ <button class="neuroagent-chat-close">&times;</button>
259
+ </div>
260
+ <div class="neuroagent-chat-messages"></div>
261
+ <div class="neuroagent-chat-input">
262
+ <input type="text" placeholder="Type a message..." />
263
+ <button>Send</button>
264
+ </div>
265
+ `;
266
+
267
+ document.body.appendChild(this.chatContainer);
268
+
269
+ this.messagesContainer = this.chatContainer.querySelector('.neuroagent-chat-messages');
270
+ this.inputField = this.chatContainer.querySelector('input');
271
+ this.sendButton = this.chatContainer.querySelector('button');
272
+
273
+ this.addMessage(this.config.welcomeMessage, 'assistant');
274
+ }
275
+
276
+ bindEvents() {
277
+ const button = this.widget.querySelector('.neuroagent-widget-button');
278
+ const closeButton = this.chatContainer.querySelector('.neuroagent-chat-close');
279
+
280
+ button.addEventListener('click', () => this.toggle());
281
+ closeButton.addEventListener('click', () => this.close());
282
+
283
+ this.sendButton.addEventListener('click', () => this.sendMessage());
284
+ this.inputField.addEventListener('keypress', (e) => {
285
+ if (e.key === 'Enter') {
286
+ this.sendMessage();
287
+ }
288
+ });
289
+ }
290
+
291
+ toggle() {
292
+ this.isOpen ? this.close() : this.open();
293
+ }
294
+
295
+ open() {
296
+ this.isOpen = true;
297
+ this.chatContainer.classList.add('open');
298
+ this.inputField.focus();
299
+ }
300
+
301
+ close() {
302
+ this.isOpen = false;
303
+ this.chatContainer.classList.remove('open');
304
+ }
305
+
306
+ addMessage(content, type = 'assistant') {
307
+ const message = document.createElement('div');
308
+ message.className = `neuroagent-message ${type}`;
309
+ message.textContent = content;
310
+ this.messagesContainer.appendChild(message);
311
+ this.messagesContainer.scrollTop = this.messagesContainer.scrollHeight;
312
+
313
+ this.messages.push({ content, type });
314
+ }
315
+
316
+ async sendMessage() {
317
+ const message = this.inputField.value.trim();
318
+ if (!message) return;
319
+
320
+ this.inputField.value = '';
321
+ this.addMessage(message, 'user');
322
+ this.sendButton.disabled = true;
323
+
324
+ this.addMessage('Thinking...', 'thinking');
325
+ const thinkingMsg = this.messagesContainer.lastElementChild;
326
+
327
+ try {
328
+ const response = await fetch(`${this.config.apiUrl}/agent/chat`, {
329
+ method: 'POST',
330
+ headers: {
331
+ 'Content-Type': 'application/json'
332
+ },
333
+ body: JSON.stringify({
334
+ agent: this.config.agent,
335
+ message: message
336
+ })
337
+ });
338
+
339
+ const data = await response.json();
340
+
341
+ this.messagesContainer.removeChild(thinkingMsg);
342
+
343
+ if (data.response) {
344
+ this.addMessage(data.response, 'assistant');
345
+ } else {
346
+ this.addMessage('Sorry, something went wrong.', 'assistant');
347
+ }
348
+ } catch (error) {
349
+ this.messagesContainer.removeChild(thinkingMsg);
350
+ this.addMessage('Error connecting to server.', 'assistant');
351
+ }
352
+
353
+ this.sendButton.disabled = false;
354
+ }
355
+
356
+ connectWebSocket() {
357
+ const wsUrl = this.config.wsUrl || this.config.apiUrl.replace('http', 'ws') + '/ws';
358
+
359
+ this.websocket = new WebSocket(`${wsUrl}/${this.config.agent}`);
360
+
361
+ this.websocket.onopen = () => {
362
+ console.log('NeuroAgent WebSocket connected');
363
+ };
364
+
365
+ this.websocket.onmessage = (event) => {
366
+ const data = JSON.parse(event.data);
367
+
368
+ if (data.type === 'response') {
369
+ this.addMessage(data.message, 'assistant');
370
+ }
371
+ };
372
+
373
+ this.websocket.onerror = (error) => {
374
+ console.error('WebSocket error:', error);
375
+ };
376
+ }
377
+
378
+ async chat(message) {
379
+ try {
380
+ const response = await fetch(`${this.config.apiUrl}/agent/chat`, {
381
+ method: 'POST',
382
+ headers: {
383
+ 'Content-Type': 'application/json'
384
+ },
385
+ body: JSON.stringify({
386
+ agent: this.config.agent,
387
+ message: message
388
+ })
389
+ });
390
+
391
+ const data = await response.json();
392
+ return data.response;
393
+ } catch (error) {
394
+ return 'Error connecting to server.';
395
+ }
396
+ }
397
+ }
398
+
399
+ global.NeuroAgent = {
400
+ init: function(config) {
401
+ return new NeuroAgentWidget(config);
402
+ }
403
+ };
404
+
405
+ if (typeof module !== 'undefined' && module.exports) {
406
+ module.exports = global.NeuroAgent;
407
+ }
408
+
409
+ })(typeof window !== 'undefined' ? window : this);
@@ -0,0 +1,5 @@
1
+ from neuroagent.llm.base import LLMProvider, LLMResponse, Message
2
+ from neuroagent.llm.openai_provider import OpenAIProvider
3
+ from neuroagent.llm.local_model_provider import LocalModelProvider
4
+
5
+ __all__ = ["LLMProvider", "LLMResponse", "Message", "OpenAIProvider", "LocalModelProvider"]
@@ -0,0 +1,46 @@
1
+ from abc import ABC, abstractmethod
2
+ from typing import List, Dict, Any, Optional
3
+ from pydantic import BaseModel
4
+
5
+
6
+ class Message(BaseModel):
7
+ role: str
8
+ content: str
9
+
10
+
11
+ class LLMResponse(BaseModel):
12
+ content: str
13
+ model: str
14
+ usage: Optional[Dict[str, int]] = None
15
+
16
+
17
+ class LLMProvider(ABC):
18
+ def __init__(self, api_key: Optional[str] = None, model: str = "gpt-4"):
19
+ self.api_key = api_key
20
+ self.model = model
21
+
22
+ @abstractmethod
23
+ async def chat(
24
+ self,
25
+ messages: List[Message],
26
+ temperature: float = 0.7,
27
+ max_tokens: Optional[int] = None,
28
+ **kwargs
29
+ ) -> LLMResponse:
30
+ pass
31
+
32
+ @abstractmethod
33
+ async def complete(
34
+ self,
35
+ prompt: str,
36
+ temperature: float = 0.7,
37
+ max_tokens: Optional[int] = None,
38
+ **kwargs
39
+ ) -> LLMResponse:
40
+ pass
41
+
42
+ def format_messages(self, system: str, history: List[Dict[str, str]]) -> List[Message]:
43
+ messages = [Message(role="system", content=system)]
44
+ for msg in history:
45
+ messages.append(Message(role=msg.get("role", "user"), content=msg.get("content", "")))
46
+ return messages
@@ -0,0 +1,76 @@
1
+ import os
2
+ import json
3
+ import asyncio
4
+ from typing import List, Optional, Dict, Any
5
+
6
+ from neuroagent.llm.base import LLMProvider, LLMResponse, Message
7
+
8
+
9
+ class LocalModelProvider(LLMProvider):
10
+ def __init__(
11
+ self,
12
+ model: str = "llama2",
13
+ api_base: str = "http://localhost:11434/api/generate",
14
+ api_key: Optional[str] = None
15
+ ):
16
+ super().__init__(api_key=api_key, model=model)
17
+ self.api_base = api_base
18
+
19
+ async def _call_ollama(self, prompt: str, **kwargs) -> Dict[str, Any]:
20
+ import aiohttp
21
+
22
+ url = self.api_base
23
+ payload = {
24
+ "model": self.model,
25
+ "prompt": prompt,
26
+ "stream": False,
27
+ "options": {
28
+ "temperature": kwargs.get("temperature", 0.7),
29
+ "num_predict": kwargs.get("max_tokens", 256)
30
+ }
31
+ }
32
+
33
+ async with aiohttp.ClientSession() as session:
34
+ async with session.post(url, json=payload) as response:
35
+ if response.status != 200:
36
+ raise Exception(f"Ollama API error: {await response.text()}")
37
+ return await response.json()
38
+
39
+ async def chat(
40
+ self,
41
+ messages: List[Message],
42
+ temperature: float = 0.7,
43
+ max_tokens: Optional[int] = None,
44
+ **kwargs
45
+ ) -> LLMResponse:
46
+ system_msg = next((m.content for m in messages if m.role == "system"), "")
47
+ conversation = "\n".join([
48
+ f"{m.role}: {m.content}"
49
+ for m in messages
50
+ if m.role != "system"
51
+ ])
52
+
53
+ prompt = f"{system_msg}\n\n{conversation}" if system_msg else conversation
54
+
55
+ result = await self._call_ollama(
56
+ prompt,
57
+ temperature=temperature,
58
+ max_tokens=max_tokens or 256,
59
+ **kwargs
60
+ )
61
+
62
+ return LLMResponse(
63
+ content=result.get("response", ""),
64
+ model=self.model,
65
+ usage=None
66
+ )
67
+
68
+ async def complete(
69
+ self,
70
+ prompt: str,
71
+ temperature: float = 0.7,
72
+ max_tokens: Optional[int] = None,
73
+ **kwargs
74
+ ) -> LLMResponse:
75
+ messages = [Message(role="user", content=prompt)]
76
+ return await self.chat(messages, temperature, max_tokens, **kwargs)
@@ -0,0 +1,58 @@
1
+ import os
2
+ from typing import List, Optional
3
+ from openai import AsyncOpenAI
4
+
5
+ from neuroagent.llm.base import LLMProvider, LLMResponse, Message
6
+
7
+
8
+ class OpenAIProvider(LLMProvider):
9
+ def __init__(
10
+ self,
11
+ api_key: Optional[str] = None,
12
+ model: str = "gpt-4",
13
+ base_url: Optional[str] = None
14
+ ):
15
+ api_key = api_key or os.getenv("OPENAI_API_KEY")
16
+ super().__init__(api_key=api_key, model=model)
17
+
18
+ self.client = AsyncOpenAI(
19
+ api_key=api_key,
20
+ base_url=base_url
21
+ )
22
+
23
+ async def chat(
24
+ self,
25
+ messages: List[Message],
26
+ temperature: float = 0.7,
27
+ max_tokens: Optional[int] = None,
28
+ **kwargs
29
+ ) -> LLMResponse:
30
+ openai_messages = [{"role": m.role, "content": m.content} for m in messages]
31
+
32
+ response = await self.client.chat.completions.create(
33
+ model=self.model,
34
+ messages=openai_messages,
35
+ temperature=temperature,
36
+ max_tokens=max_tokens,
37
+ **kwargs
38
+ )
39
+
40
+ return LLMResponse(
41
+ content=response.choices[0].message.content or "",
42
+ model=response.model,
43
+ usage={
44
+ "prompt_tokens": response.usage.prompt_tokens if response.usage else 0,
45
+ "completion_tokens": response.usage.completion_tokens if response.usage else 0,
46
+ "total_tokens": response.usage.total_tokens if response.usage else 0
47
+ }
48
+ )
49
+
50
+ async def complete(
51
+ self,
52
+ prompt: str,
53
+ temperature: float = 0.7,
54
+ max_tokens: Optional[int] = None,
55
+ **kwargs
56
+ ) -> LLMResponse:
57
+ messages = [Message(role="user", content=prompt)]
58
+ return await self.chat(messages, temperature, max_tokens, **kwargs)
@@ -0,0 +1,5 @@
1
+ from neuroagent.memory.short_memory import ShortMemory
2
+ from neuroagent.memory.long_memory import LongMemory
3
+ from neuroagent.memory.vector_memory import VectorMemory
4
+
5
+ __all__ = ["ShortMemory", "LongMemory", "VectorMemory"]
@@ -0,0 +1,52 @@
1
+ import json
2
+ import os
3
+ from typing import List, Dict, Any, Optional
4
+ from datetime import datetime
5
+
6
+
7
+ class LongMemory:
8
+ def __init__(self, storage_path: Optional[str] = None):
9
+ self.storage_path = storage_path or ".neuroagent_memory.json"
10
+ self._memory: List[Dict[str, Any]] = self._load()
11
+
12
+ def _load(self) -> List[Dict[str, Any]]:
13
+ if os.path.exists(self.storage_path):
14
+ try:
15
+ with open(self.storage_path, "r") as f:
16
+ return json.load(f)
17
+ except:
18
+ return []
19
+ return []
20
+
21
+ def _save(self):
22
+ with open(self.storage_path, "w") as f:
23
+ json.dump(self._memory, f, indent=2)
24
+
25
+ def save(self, content: str, metadata: Optional[Dict[str, Any]] = None):
26
+ entry = {
27
+ "content": content,
28
+ "timestamp": datetime.now().isoformat(),
29
+ "metadata": metadata or {}
30
+ }
31
+ self._memory.append(entry)
32
+ self._save()
33
+
34
+ def get_all(self) -> List[Dict[str, Any]]:
35
+ return self._memory
36
+
37
+ def get_by_date(self, date: str) -> List[Dict[str, Any]]:
38
+ return [e for e in self._memory if e["timestamp"].startswith(date)]
39
+
40
+ def search(self, query: str) -> List[Dict[str, Any]]:
41
+ query_lower = query.lower()
42
+ return [
43
+ entry for entry in self._memory
44
+ if query_lower in entry["content"].lower()
45
+ ]
46
+
47
+ def clear(self):
48
+ self._memory = []
49
+ self._save()
50
+
51
+ def to_messages(self) -> List[Dict[str, str]]:
52
+ return [{"role": "system", "content": e["content"]} for e in self._memory[-10:]]
@@ -0,0 +1,39 @@
1
+ from typing import List, Dict, Any, Optional
2
+ from datetime import datetime
3
+ from collections import deque
4
+
5
+
6
+ class ShortMemory:
7
+ def __init__(self, max_items: int = 100):
8
+ self.max_items = max_items
9
+ self._memory = deque(maxlen=max_items)
10
+
11
+ def save(self, content: str, metadata: Optional[Dict[str, Any]] = None):
12
+ entry = {
13
+ "content": content,
14
+ "timestamp": datetime.now().isoformat(),
15
+ "metadata": metadata or {}
16
+ }
17
+ self._memory.append(entry)
18
+
19
+ def get_recent(self, n: int = 10) -> List[Dict[str, Any]]:
20
+ return list(self._memory)[-n:]
21
+
22
+ def get_all(self) -> List[Dict[str, Any]]:
23
+ return list(self._memory)
24
+
25
+ def clear(self):
26
+ self._memory.clear()
27
+
28
+ def search(self, query: str) -> List[Dict[str, Any]]:
29
+ query_lower = query.lower()
30
+ return [
31
+ entry for entry in self._memory
32
+ if query_lower in entry["content"].lower()
33
+ ]
34
+
35
+ def to_messages(self) -> List[Dict[str, str]]:
36
+ return [
37
+ {"role": "user" if i % 2 == 0 else "assistant", "content": entry["content"]}
38
+ for i, entry in enumerate(self._memory)
39
+ ]