mem-llm 2.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,129 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Web UI Launcher for Mem-LLM
4
+ Starts the API server and opens the Web UI in the browser.
5
+ """
6
+
7
+ import sys
8
+ import time
9
+ import webbrowser
10
+ import requests
11
+ import threading
12
+
13
+ def check_backend_available():
14
+ """Check if Ollama or LM Studio is available."""
15
+ backends = []
16
+
17
+ # Check Ollama
18
+ try:
19
+ response = requests.get("http://localhost:11434/api/tags", timeout=2)
20
+ if response.status_code == 200:
21
+ backends.append("Ollama (http://localhost:11434)")
22
+ except:
23
+ pass
24
+
25
+ # Check LM Studio
26
+ try:
27
+ response = requests.get("http://localhost:1234/v1/models", timeout=2)
28
+ if response.status_code == 200:
29
+ backends.append("LM Studio (http://localhost:1234)")
30
+ except:
31
+ pass
32
+
33
+ return backends
34
+
35
+ def check_api_ready(url="http://localhost:8000/api/v1/health", timeout=30):
36
+ """Wait for API server to be ready."""
37
+ start_time = time.time()
38
+ while time.time() - start_time < timeout:
39
+ try:
40
+ response = requests.get(url, timeout=1)
41
+ if response.status_code == 200:
42
+ return True
43
+ except:
44
+ pass
45
+ time.sleep(0.5)
46
+ return False
47
+
48
+ def start_api_server():
49
+ """Start the FastAPI server in a subprocess."""
50
+ try:
51
+ # Import here to avoid circular imports
52
+ from mem_llm.api_server import app
53
+ import uvicorn
54
+
55
+ # Run server in a thread
56
+ def run_server():
57
+ uvicorn.run(app, host="0.0.0.0", port=8000, log_level="info")
58
+
59
+ server_thread = threading.Thread(target=run_server, daemon=True)
60
+ server_thread.start()
61
+
62
+ return server_thread
63
+ except Exception as e:
64
+ print(f"āŒ Failed to start API server: {e}")
65
+ return None
66
+
67
+ def main():
68
+ """Main entry point for the web launcher."""
69
+ print("šŸš€ Mem-LLM Web UI Launcher")
70
+ print("=" * 50)
71
+
72
+ # Check available backends
73
+ print("\nšŸ” Checking available backends...")
74
+ backends = check_backend_available()
75
+
76
+ if backends:
77
+ print("āœ… Available backends:")
78
+ for backend in backends:
79
+ print(f" - {backend}")
80
+ else:
81
+ print("āš ļø No local backends detected!")
82
+ print(" Please start Ollama or LM Studio first:")
83
+ print(" - Ollama: https://ollama.ai")
84
+ print(" - LM Studio: https://lmstudio.ai")
85
+ response = input("\n Continue anyway? (y/n): ")
86
+ if response.lower() != 'y':
87
+ sys.exit(0)
88
+
89
+ # Start API server
90
+ print("\n🌐 Starting API server...")
91
+ server_thread = start_api_server()
92
+
93
+ if not server_thread:
94
+ print("āŒ Failed to start API server!")
95
+ sys.exit(1)
96
+
97
+ # Wait for server to be ready
98
+ print("ā³ Waiting for API server to be ready...")
99
+ if not check_api_ready():
100
+ print("āŒ API server failed to start within 30 seconds!")
101
+ sys.exit(1)
102
+
103
+ print("āœ… API server is ready!")
104
+
105
+ # Open browser
106
+ web_url = "http://localhost:8000/"
107
+ print(f"\n🌐 Opening Web UI at {web_url}")
108
+ webbrowser.open(web_url)
109
+
110
+ print("\n" + "=" * 50)
111
+ print("āœ… Mem-LLM Web UI is running!")
112
+ print(" - Chat: http://localhost:8000/")
113
+ print(" - Memory: http://localhost:8000/memory")
114
+ print(" - Metrics: http://localhost:8000/metrics")
115
+ print(" - API Docs: http://localhost:8000/docs")
116
+ print("\n Press Ctrl+C to stop the server.")
117
+ print("=" * 50)
118
+
119
+ # Keep the main thread alive
120
+ try:
121
+ while True:
122
+ time.sleep(1)
123
+ except KeyboardInterrupt:
124
+ print("\n\nšŸ‘‹ Shutting down...")
125
+ sys.exit(0)
126
+
127
+ if __name__ == "__main__":
128
+ main()
129
+
@@ -0,0 +1,44 @@
1
+ # Mem-LLM Web UI
2
+
3
+ Modern web interface for Mem-LLM with streaming support, memory management, and metrics dashboard.
4
+
5
+ ## šŸ“„ Pages
6
+
7
+ 1. **šŸ’¬ Chat (index.html)** - Main chat interface with real-time streaming
8
+ 2. **🧠 Memory (memory.html)** - Memory management and search
9
+ 3. **šŸ“Š Metrics (metrics.html)** - System metrics and statistics
10
+
11
+ ## šŸš€ Usage
12
+
13
+ ```bash
14
+ # Install mem-llm with API support
15
+ pip install mem-llm[api]
16
+
17
+ # Launch Web UI (recommended)
18
+ mem-llm-web
19
+
20
+ # Or use launcher script
21
+ python start_web_ui.py
22
+ ```
23
+
24
+ ## šŸ“‹ Requirements
25
+
26
+ - Python 3.8+
27
+ - FastAPI
28
+ - Uvicorn
29
+ - WebSockets
30
+
31
+ ## šŸ”§ Configuration
32
+
33
+ Configure backend and model in the Web UI sidebar, or edit `api_server.py` defaults.
34
+
35
+ ## šŸ“š More Info
36
+
37
+ - [Main README](../README.md)
38
+ - [API Docs](http://localhost:8000/docs)
39
+ - [Examples](../../examples/)
40
+
41
+ ## šŸ“„ License
42
+
43
+ MIT License
44
+
@@ -0,0 +1,7 @@
1
+ """
2
+ Mem-LLM Web UI
3
+ Modern web interface for Mem-LLM with real-time streaming support.
4
+ """
5
+
6
+ __all__ = []
7
+