mem-llm 2.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mem_llm/__init__.py +98 -0
- mem_llm/api_server.py +595 -0
- mem_llm/base_llm_client.py +201 -0
- mem_llm/builtin_tools.py +311 -0
- mem_llm/cli.py +254 -0
- mem_llm/clients/__init__.py +22 -0
- mem_llm/clients/lmstudio_client.py +393 -0
- mem_llm/clients/ollama_client.py +354 -0
- mem_llm/config.yaml.example +52 -0
- mem_llm/config_from_docs.py +180 -0
- mem_llm/config_manager.py +231 -0
- mem_llm/conversation_summarizer.py +372 -0
- mem_llm/data_export_import.py +640 -0
- mem_llm/dynamic_prompt.py +298 -0
- mem_llm/knowledge_loader.py +88 -0
- mem_llm/llm_client.py +225 -0
- mem_llm/llm_client_factory.py +260 -0
- mem_llm/logger.py +129 -0
- mem_llm/mem_agent.py +1611 -0
- mem_llm/memory_db.py +612 -0
- mem_llm/memory_manager.py +321 -0
- mem_llm/memory_tools.py +253 -0
- mem_llm/prompt_security.py +304 -0
- mem_llm/response_metrics.py +221 -0
- mem_llm/retry_handler.py +193 -0
- mem_llm/thread_safe_db.py +301 -0
- mem_llm/tool_system.py +429 -0
- mem_llm/vector_store.py +278 -0
- mem_llm/web_launcher.py +129 -0
- mem_llm/web_ui/README.md +44 -0
- mem_llm/web_ui/__init__.py +7 -0
- mem_llm/web_ui/index.html +641 -0
- mem_llm/web_ui/memory.html +569 -0
- mem_llm/web_ui/metrics.html +75 -0
- mem_llm-2.0.0.dist-info/METADATA +667 -0
- mem_llm-2.0.0.dist-info/RECORD +39 -0
- mem_llm-2.0.0.dist-info/WHEEL +5 -0
- mem_llm-2.0.0.dist-info/entry_points.txt +3 -0
- mem_llm-2.0.0.dist-info/top_level.txt +1 -0
mem_llm/web_launcher.py
ADDED
|
@@ -0,0 +1,129 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Web UI Launcher for Mem-LLM
|
|
4
|
+
Starts the API server and opens the Web UI in the browser.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import sys
|
|
8
|
+
import time
|
|
9
|
+
import webbrowser
|
|
10
|
+
import requests
|
|
11
|
+
import threading
|
|
12
|
+
|
|
13
|
+
def check_backend_available():
|
|
14
|
+
"""Check if Ollama or LM Studio is available."""
|
|
15
|
+
backends = []
|
|
16
|
+
|
|
17
|
+
# Check Ollama
|
|
18
|
+
try:
|
|
19
|
+
response = requests.get("http://localhost:11434/api/tags", timeout=2)
|
|
20
|
+
if response.status_code == 200:
|
|
21
|
+
backends.append("Ollama (http://localhost:11434)")
|
|
22
|
+
except:
|
|
23
|
+
pass
|
|
24
|
+
|
|
25
|
+
# Check LM Studio
|
|
26
|
+
try:
|
|
27
|
+
response = requests.get("http://localhost:1234/v1/models", timeout=2)
|
|
28
|
+
if response.status_code == 200:
|
|
29
|
+
backends.append("LM Studio (http://localhost:1234)")
|
|
30
|
+
except:
|
|
31
|
+
pass
|
|
32
|
+
|
|
33
|
+
return backends
|
|
34
|
+
|
|
35
|
+
def check_api_ready(url="http://localhost:8000/api/v1/health", timeout=30):
|
|
36
|
+
"""Wait for API server to be ready."""
|
|
37
|
+
start_time = time.time()
|
|
38
|
+
while time.time() - start_time < timeout:
|
|
39
|
+
try:
|
|
40
|
+
response = requests.get(url, timeout=1)
|
|
41
|
+
if response.status_code == 200:
|
|
42
|
+
return True
|
|
43
|
+
except:
|
|
44
|
+
pass
|
|
45
|
+
time.sleep(0.5)
|
|
46
|
+
return False
|
|
47
|
+
|
|
48
|
+
def start_api_server():
|
|
49
|
+
"""Start the FastAPI server in a subprocess."""
|
|
50
|
+
try:
|
|
51
|
+
# Import here to avoid circular imports
|
|
52
|
+
from mem_llm.api_server import app
|
|
53
|
+
import uvicorn
|
|
54
|
+
|
|
55
|
+
# Run server in a thread
|
|
56
|
+
def run_server():
|
|
57
|
+
uvicorn.run(app, host="0.0.0.0", port=8000, log_level="info")
|
|
58
|
+
|
|
59
|
+
server_thread = threading.Thread(target=run_server, daemon=True)
|
|
60
|
+
server_thread.start()
|
|
61
|
+
|
|
62
|
+
return server_thread
|
|
63
|
+
except Exception as e:
|
|
64
|
+
print(f"ā Failed to start API server: {e}")
|
|
65
|
+
return None
|
|
66
|
+
|
|
67
|
+
def main():
|
|
68
|
+
"""Main entry point for the web launcher."""
|
|
69
|
+
print("š Mem-LLM Web UI Launcher")
|
|
70
|
+
print("=" * 50)
|
|
71
|
+
|
|
72
|
+
# Check available backends
|
|
73
|
+
print("\nš Checking available backends...")
|
|
74
|
+
backends = check_backend_available()
|
|
75
|
+
|
|
76
|
+
if backends:
|
|
77
|
+
print("ā
Available backends:")
|
|
78
|
+
for backend in backends:
|
|
79
|
+
print(f" - {backend}")
|
|
80
|
+
else:
|
|
81
|
+
print("ā ļø No local backends detected!")
|
|
82
|
+
print(" Please start Ollama or LM Studio first:")
|
|
83
|
+
print(" - Ollama: https://ollama.ai")
|
|
84
|
+
print(" - LM Studio: https://lmstudio.ai")
|
|
85
|
+
response = input("\n Continue anyway? (y/n): ")
|
|
86
|
+
if response.lower() != 'y':
|
|
87
|
+
sys.exit(0)
|
|
88
|
+
|
|
89
|
+
# Start API server
|
|
90
|
+
print("\nš Starting API server...")
|
|
91
|
+
server_thread = start_api_server()
|
|
92
|
+
|
|
93
|
+
if not server_thread:
|
|
94
|
+
print("ā Failed to start API server!")
|
|
95
|
+
sys.exit(1)
|
|
96
|
+
|
|
97
|
+
# Wait for server to be ready
|
|
98
|
+
print("ā³ Waiting for API server to be ready...")
|
|
99
|
+
if not check_api_ready():
|
|
100
|
+
print("ā API server failed to start within 30 seconds!")
|
|
101
|
+
sys.exit(1)
|
|
102
|
+
|
|
103
|
+
print("ā
API server is ready!")
|
|
104
|
+
|
|
105
|
+
# Open browser
|
|
106
|
+
web_url = "http://localhost:8000/"
|
|
107
|
+
print(f"\nš Opening Web UI at {web_url}")
|
|
108
|
+
webbrowser.open(web_url)
|
|
109
|
+
|
|
110
|
+
print("\n" + "=" * 50)
|
|
111
|
+
print("ā
Mem-LLM Web UI is running!")
|
|
112
|
+
print(" - Chat: http://localhost:8000/")
|
|
113
|
+
print(" - Memory: http://localhost:8000/memory")
|
|
114
|
+
print(" - Metrics: http://localhost:8000/metrics")
|
|
115
|
+
print(" - API Docs: http://localhost:8000/docs")
|
|
116
|
+
print("\n Press Ctrl+C to stop the server.")
|
|
117
|
+
print("=" * 50)
|
|
118
|
+
|
|
119
|
+
# Keep the main thread alive
|
|
120
|
+
try:
|
|
121
|
+
while True:
|
|
122
|
+
time.sleep(1)
|
|
123
|
+
except KeyboardInterrupt:
|
|
124
|
+
print("\n\nš Shutting down...")
|
|
125
|
+
sys.exit(0)
|
|
126
|
+
|
|
127
|
+
if __name__ == "__main__":
|
|
128
|
+
main()
|
|
129
|
+
|
mem_llm/web_ui/README.md
ADDED
|
@@ -0,0 +1,44 @@
|
|
|
1
|
+
# Mem-LLM Web UI
|
|
2
|
+
|
|
3
|
+
Modern web interface for Mem-LLM with streaming support, memory management, and metrics dashboard.
|
|
4
|
+
|
|
5
|
+
## š Pages
|
|
6
|
+
|
|
7
|
+
1. **š¬ Chat (index.html)** - Main chat interface with real-time streaming
|
|
8
|
+
2. **š§ Memory (memory.html)** - Memory management and search
|
|
9
|
+
3. **š Metrics (metrics.html)** - System metrics and statistics
|
|
10
|
+
|
|
11
|
+
## š Usage
|
|
12
|
+
|
|
13
|
+
```bash
|
|
14
|
+
# Install mem-llm with API support
|
|
15
|
+
pip install mem-llm[api]
|
|
16
|
+
|
|
17
|
+
# Launch Web UI (recommended)
|
|
18
|
+
mem-llm-web
|
|
19
|
+
|
|
20
|
+
# Or use launcher script
|
|
21
|
+
python start_web_ui.py
|
|
22
|
+
```
|
|
23
|
+
|
|
24
|
+
## š Requirements
|
|
25
|
+
|
|
26
|
+
- Python 3.8+
|
|
27
|
+
- FastAPI
|
|
28
|
+
- Uvicorn
|
|
29
|
+
- WebSockets
|
|
30
|
+
|
|
31
|
+
## š§ Configuration
|
|
32
|
+
|
|
33
|
+
Configure backend and model in the Web UI sidebar, or edit `api_server.py` defaults.
|
|
34
|
+
|
|
35
|
+
## š More Info
|
|
36
|
+
|
|
37
|
+
- [Main README](../README.md)
|
|
38
|
+
- [API Docs](http://localhost:8000/docs)
|
|
39
|
+
- [Examples](../../examples/)
|
|
40
|
+
|
|
41
|
+
## š License
|
|
42
|
+
|
|
43
|
+
MIT License
|
|
44
|
+
|