mem-llm 1.0.2__py3-none-any.whl → 2.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mem-llm might be problematic. Click here for more details.

Files changed (41) hide show
  1. mem_llm/__init__.py +71 -8
  2. mem_llm/api_server.py +595 -0
  3. mem_llm/base_llm_client.py +201 -0
  4. mem_llm/builtin_tools.py +311 -0
  5. mem_llm/builtin_tools_async.py +170 -0
  6. mem_llm/cli.py +254 -0
  7. mem_llm/clients/__init__.py +22 -0
  8. mem_llm/clients/lmstudio_client.py +393 -0
  9. mem_llm/clients/ollama_client.py +354 -0
  10. mem_llm/config.yaml.example +1 -1
  11. mem_llm/config_from_docs.py +1 -1
  12. mem_llm/config_manager.py +5 -3
  13. mem_llm/conversation_summarizer.py +372 -0
  14. mem_llm/data_export_import.py +640 -0
  15. mem_llm/dynamic_prompt.py +298 -0
  16. mem_llm/llm_client.py +77 -14
  17. mem_llm/llm_client_factory.py +260 -0
  18. mem_llm/logger.py +129 -0
  19. mem_llm/mem_agent.py +1178 -87
  20. mem_llm/memory_db.py +290 -59
  21. mem_llm/memory_manager.py +60 -1
  22. mem_llm/prompt_security.py +304 -0
  23. mem_llm/response_metrics.py +221 -0
  24. mem_llm/retry_handler.py +193 -0
  25. mem_llm/thread_safe_db.py +301 -0
  26. mem_llm/tool_system.py +537 -0
  27. mem_llm/vector_store.py +278 -0
  28. mem_llm/web_launcher.py +129 -0
  29. mem_llm/web_ui/README.md +44 -0
  30. mem_llm/web_ui/__init__.py +7 -0
  31. mem_llm/web_ui/index.html +641 -0
  32. mem_llm/web_ui/memory.html +569 -0
  33. mem_llm/web_ui/metrics.html +75 -0
  34. mem_llm-2.1.0.dist-info/METADATA +753 -0
  35. mem_llm-2.1.0.dist-info/RECORD +40 -0
  36. {mem_llm-1.0.2.dist-info → mem_llm-2.1.0.dist-info}/WHEEL +1 -1
  37. mem_llm-2.1.0.dist-info/entry_points.txt +3 -0
  38. mem_llm/prompt_templates.py +0 -244
  39. mem_llm-1.0.2.dist-info/METADATA +0 -382
  40. mem_llm-1.0.2.dist-info/RECORD +0 -15
  41. {mem_llm-1.0.2.dist-info → mem_llm-2.1.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,40 @@
1
+ mem_llm/__init__.py,sha256=uG8swMB5oVhnI-vPhfn5aWWL4BIcWNZv5tolA3CDkAk,3133
2
+ mem_llm/api_server.py,sha256=n2Lu9lj_RSMRO-p81AnFB4fS0gbnshFKqfWtL3kYReM,20874
3
+ mem_llm/base_llm_client.py,sha256=RbX9QVdHGT0TRoCTGB_qyMzjosg7Q54L1eLdzzj7pEE,6292
4
+ mem_llm/builtin_tools.py,sha256=HC0pu3JoBv_yqI2fIJTIsmGNtzGQthUbjlraKoyFyu8,8567
5
+ mem_llm/builtin_tools_async.py,sha256=kPHVgvshfn6mwasYmUDgIfdrfvXLeUr5POs7IexAfg4,4686
6
+ mem_llm/cli.py,sha256=CV1BprDNPIPZrMLahW0WPrZ03NwoxW46QpJO2FnPqBQ,8658
7
+ mem_llm/config.yaml.example,sha256=Bo2hfPC9ltqnCyUdvM-XpN5gigTlxoN-5yr6X2w1saM,913
8
+ mem_llm/config_from_docs.py,sha256=uB1GEQqhzTWLKumgz4jHG65QDlExUHOgsdj7rS7W0lg,4970
9
+ mem_llm/config_manager.py,sha256=xANKAinOO8w_HGeeS7MqMzTh18H9sa078sRrFfHbOG8,7251
10
+ mem_llm/conversation_summarizer.py,sha256=yCG2pKrAJf7xjaG6DPXL0i9eesMZnnzjKTpuyLHMTPQ,12509
11
+ mem_llm/data_export_import.py,sha256=gQIdD0hBY23qcRvx139yE15RWHXPinL_EoRNY7iabj0,22592
12
+ mem_llm/dynamic_prompt.py,sha256=8H99QVDRJSVtGb_o4sdEPnG1cJWuer3KiD-nuL1srTA,10244
13
+ mem_llm/knowledge_loader.py,sha256=oSNhfYYcx7DlZLVogxnbSwaIydq_Q3__RDJFeZR2XVw,2699
14
+ mem_llm/llm_client.py,sha256=GvOwzlTJ2ogpe4y6BmFPpXxJNN1G7B6cgeGUc_0Ngy0,8705
15
+ mem_llm/llm_client_factory.py,sha256=ncwxr3T3aqZVCiGw3GpMRq8kIaqf73BIxN9gvRTo2MA,8728
16
+ mem_llm/logger.py,sha256=dZUmhGgFXtDsDBU_D4kZlJeMp6k-VNPaBcyTt7rZYKE,4507
17
+ mem_llm/mem_agent.py,sha256=rXVdSY3XaZ7sV4-elXWN4J7rUSFYu4Jd5jWQeOCDFcc,68074
18
+ mem_llm/memory_db.py,sha256=yY_afim1Rpk3mOz-qI5WvDDAwWoVd-NucBMBLVUNpwg,21711
19
+ mem_llm/memory_manager.py,sha256=BtzI1o-NYZXMkZHtc36xEZizgNn9fAu6cBkGzNXa-uI,10373
20
+ mem_llm/memory_tools.py,sha256=ARANFqu_bmL56SlV1RzTjfQsJj-Qe2QvqY0pF92hDxU,8678
21
+ mem_llm/prompt_security.py,sha256=ehAi6aLiXj0gFFhpyjwEr8LentSTJwOQDLbINV7SaVM,9960
22
+ mem_llm/response_metrics.py,sha256=nMegWV7brNOmptjxGJfYEqRKvAj_302MIw8Ky1PzEy8,7912
23
+ mem_llm/retry_handler.py,sha256=z5ZcSQKbvVeNK7plagTLorvOeoYgRpQcsX3PpNqUjKM,6389
24
+ mem_llm/thread_safe_db.py,sha256=Fq-wSn4ua1qiR6M4ZTIy7UT1IlFj5xODNExgub1blbU,10328
25
+ mem_llm/tool_system.py,sha256=o8IGWUP1xygkq5jtZKeB9whdyq-vO3YSl_blVmlw-D4,20371
26
+ mem_llm/vector_store.py,sha256=dDK2dyiu0WmfyE5vrAJywhEyCGf7nokEu9DxAE7MRp0,10863
27
+ mem_llm/web_launcher.py,sha256=mEE1Wh-2u-xqgtkRW2i-zG0tizDIyJCo9BX942kA73M,3722
28
+ mem_llm/clients/__init__.py,sha256=mDrflLaozDeRvmgq7eR30eOTIm3Au_gmmGdHLroeiAI,381
29
+ mem_llm/clients/lmstudio_client.py,sha256=e1WZUtVYxQHvks-cun2bcEtbhb6XyX2_6p3a1gVQEcE,14777
30
+ mem_llm/clients/ollama_client.py,sha256=ZPxNcVndOhF-Ftn2cal_c5YyI-hFoXNt53oSV9tniSA,13170
31
+ mem_llm/web_ui/README.md,sha256=NrL8ZoRuQ_VC7srjy95RFkUDEi9gq3SCVoOp68rDZe8,852
32
+ mem_llm/web_ui/__init__.py,sha256=n9FLiBMOguoQ7k9ZAIK4-uL-VeEwl99UMEpFMU6zOzM,105
33
+ mem_llm/web_ui/index.html,sha256=sRwlSpiOXU4BrBDHE2eMSO6tGa2c_JS7mAdefZusAcU,20243
34
+ mem_llm/web_ui/memory.html,sha256=9-j88P8wO5VaoeFx9l8VeOZIn-oC_HwygFQ_7-Exohk,17969
35
+ mem_llm/web_ui/metrics.html,sha256=1uwyBKsbqkBrEJAHCH1tHDfISd3DbhJu1eOxDqqUviw,4334
36
+ mem_llm-2.1.0.dist-info/METADATA,sha256=OEZ2n0YaFrHumYTQ35urEDTEobqq7vHDpHT3gcjKFN8,24867
37
+ mem_llm-2.1.0.dist-info/WHEEL,sha256=beeZ86-EfXScwlR_HKu4SllMC9wUEj_8Z_4FJ3egI2w,91
38
+ mem_llm-2.1.0.dist-info/entry_points.txt,sha256=Ywhb5wtj-a_RtuZPzWW5XMSorRI-qQQ-ISTabYIldwA,85
39
+ mem_llm-2.1.0.dist-info/top_level.txt,sha256=_fU1ML-0JwkaxWdhqpwtmTNaJEOvDMQeJdA8d5WqDn8,8
40
+ mem_llm-2.1.0.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (80.9.0)
2
+ Generator: setuptools (76.1.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5
 
@@ -0,0 +1,3 @@
1
+ [console_scripts]
2
+ mem-llm = mem_llm.cli:main
3
+ mem-llm-web = mem_llm.web_launcher:main
@@ -1,244 +0,0 @@
1
- """
2
- System Prompt Templates and Management
3
- Customizable prompt templates for different scenarios
4
- """
5
-
6
- from typing import Dict, List, Optional
7
- from datetime import datetime
8
-
9
-
10
- class PromptTemplate:
11
- """System prompt template"""
12
-
13
- def __init__(self, name: str, base_prompt: str,
14
- variables: Optional[Dict[str, str]] = None):
15
- """
16
- Args:
17
- name: Template name
18
- base_prompt: Base prompt text (can contain variables in {variable} format)
19
- variables: Default variable values
20
- """
21
- self.name = name
22
- self.base_prompt = base_prompt
23
- self.variables = variables or {}
24
-
25
- def render(self, **kwargs) -> str:
26
- """
27
- Fill template with variables
28
-
29
- Args:
30
- **kwargs: Variable values
31
-
32
- Returns:
33
- Generated prompt
34
- """
35
- merged_vars = {**self.variables, **kwargs}
36
- return self.base_prompt.format(**merged_vars)
37
-
38
-
39
- class PromptManager:
40
- """Manages prompt templates"""
41
-
42
- def __init__(self):
43
- self.templates: Dict[str, PromptTemplate] = {}
44
- self._load_default_templates()
45
-
46
- def _load_default_templates(self) -> None:
47
- """Load default templates"""
48
-
49
- # 1. Customer Service
50
- self.add_template(
51
- name="customer_service",
52
- base_prompt="""You are a professional customer service assistant for {company_name} company.
53
-
54
- Your task:
55
- - Approach customers kindly and helpfully
56
- - Remember past interactions and create context
57
- - Solve problems quickly and effectively
58
- - Redirect to human representative when necessary
59
-
60
- Communication Style:
61
- - Use {tone} tone
62
- - Give short and clear answers
63
- - Show empathy
64
- - Be professional
65
-
66
- Important Rules:
67
- - Never lie
68
- - Don't speculate on topics you don't know
69
- - Keep customer satisfaction in the foreground
70
- - Ask if there's any other help at the end of each response
71
-
72
- You are currently working on {current_date}.
73
- """,
74
- variables={
75
- "company_name": "Our Company",
76
- "tone": "friendly and professional",
77
- "current_date": datetime.now().strftime("%Y-%m-%d")
78
- }
79
- )
80
-
81
- # 2. Technical Support
82
- self.add_template(
83
- name="tech_support",
84
- base_prompt="""You are a technical support expert for {product_name}.
85
-
86
- Your Expertise Areas:
87
- - Problem diagnosis and resolution
88
- - Step-by-step guidance
89
- - Technical documentation
90
- - Debugging
91
-
92
- Approach:
93
- - First understand the problem completely
94
- - Start with simple solutions
95
- - Explain step by step
96
- - Explain technical terms when necessary
97
-
98
- User Level: {user_level}
99
-
100
- Response Format:
101
- 1. Summarize the problem
102
- 2. List possible causes
103
- 3. Provide solution steps
104
- 4. Check results
105
-
106
- Log level: {log_level}
107
- """,
108
- variables={
109
- "product_name": "Our Product",
110
- "user_level": "intermediate level",
111
- "log_level": "detailed"
112
- }
113
- )
114
-
115
- # 3. Personal Assistant
116
- self.add_template(
117
- name="personal_assistant",
118
- base_prompt="""Sen {user_name} için kişisel dijital asistansın.
119
-
120
- Görevlerin:
121
- - Günlük planlamasına yardım
122
- - Hatırlatmalar
123
- - Bilgi toplama ve özetleme
124
- - Öneri ve tavsiyeler
125
-
126
- Kişiselleştirme:
127
- - Kullanıcının tercihlerini öğren
128
- - Alışkanlıklarını hatırla
129
- - Proaktif önerilerde bulun
130
- - Önceliklere göre sırala
131
-
132
- Çalışma Saatleri: {work_hours}
133
- Zaman Dilimi: {timezone}
134
- Tercih Edilen Dil: {language}
135
-
136
- Yaklaşım:
137
- - Verimlilik odaklı
138
- - Minimal ve net
139
- - Proaktif
140
- - Esnek
141
-
142
- Veri Gizliliği: {privacy_level}
143
- """,
144
- variables={
145
- "user_name": "Kullanıcı",
146
- "work_hours": "09:00-18:00",
147
- "timezone": "Europe/Istanbul",
148
- "language": "Türkçe",
149
- "privacy_level": "yüksek"
150
- }
151
- )
152
-
153
- # 4. Business Customer Service
154
- self.add_template(
155
- name="business_customer_service",
156
- base_prompt="""Sen {company_name} şirketinin kurumsal müşteri hizmetleri asistanısın.
157
-
158
- Kurumsal Müşteri Yaklaşımı:
159
- - Profesyonel ve çözüm odaklı
160
- - SLA'lara uygun hızlı yanıt
161
- - Teknik sorunlara derin destek
162
- - Çoklu kanal entegrasyonu
163
-
164
- Şirket Bilgileri:
165
- - Kuruluş Yılı: {founded_year}
166
- - Çalışan Sayısı: {employee_count}
167
- - Sektör: {industry}
168
-
169
- Öncelik Seviyesi: {priority_level}
170
- SLA Süresi: {sla_hours} saat
171
- """,
172
- variables={
173
- "company_name": "Kurumsal Şirket",
174
- "founded_year": "2010",
175
- "employee_count": "500+",
176
- "industry": "Teknoloji",
177
- "priority_level": "yüksek",
178
- "sla_hours": "4"
179
- }
180
- )
181
-
182
- def add_template(self, name: str, base_prompt: str,
183
- variables: Optional[Dict[str, str]] = None) -> None:
184
- """
185
- Add new template
186
-
187
- Args:
188
- name: Template name
189
- base_prompt: Prompt text
190
- variables: Default variables
191
- """
192
- self.templates[name] = PromptTemplate(name, base_prompt, variables)
193
-
194
- def get_template(self, name: str) -> Optional[PromptTemplate]:
195
- """
196
- Get template
197
-
198
- Args:
199
- name: Template name
200
-
201
- Returns:
202
- PromptTemplate or None
203
- """
204
- return self.templates.get(name)
205
-
206
- def render_prompt(self, template_name: str, **kwargs) -> str:
207
- """
208
- Render template
209
-
210
- Args:
211
- template_name: Template name
212
- **kwargs: Variable values
213
-
214
- Returns:
215
- Generated prompt
216
- """
217
- template = self.get_template(template_name)
218
- if template:
219
- return template.render(**kwargs)
220
- raise ValueError(f"Template '{template_name}' not found")
221
-
222
- def list_templates(self) -> List[str]:
223
- """List available templates"""
224
- return list(self.templates.keys())
225
-
226
- def get_template_variables(self, template_name: str) -> Dict[str, str]:
227
- """
228
- Return template variables
229
-
230
- Args:
231
- template_name: Template name
232
-
233
- Returns:
234
- Variables dictionary
235
- """
236
- template = self.get_template(template_name)
237
- if template:
238
- return template.variables.copy()
239
- return {}
240
-
241
-
242
- # Global instance for ready use
243
- prompt_manager = PromptManager()
244
-
@@ -1,382 +0,0 @@
1
- Metadata-Version: 2.4
2
- Name: mem-llm
3
- Version: 1.0.2
4
- Summary: Memory-enabled AI assistant with local LLM support
5
- Home-page: https://github.com/emredeveloper/Mem-LLM
6
- Author: C. Emre Karataş
7
- Author-email: karatasqemre@gmail.com
8
- Project-URL: Bug Reports, https://github.com/emredeveloper/Mem-LLM/issues
9
- Project-URL: Source, https://github.com/emredeveloper/Mem-LLM
10
- Keywords: llm ai memory agent chatbot ollama local
11
- Classifier: Development Status :: 4 - Beta
12
- Classifier: Intended Audience :: Developers
13
- Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
14
- Classifier: License :: OSI Approved :: MIT License
15
- Classifier: Programming Language :: Python :: 3
16
- Classifier: Programming Language :: Python :: 3.8
17
- Classifier: Programming Language :: Python :: 3.9
18
- Classifier: Programming Language :: Python :: 3.10
19
- Classifier: Programming Language :: Python :: 3.11
20
- Classifier: Programming Language :: Python :: 3.12
21
- Requires-Python: >=3.8
22
- Description-Content-Type: text/markdown
23
- Requires-Dist: requests>=2.31.0
24
- Requires-Dist: pyyaml>=6.0.1
25
- Provides-Extra: dev
26
- Requires-Dist: pytest>=7.4.0; extra == "dev"
27
- Requires-Dist: black>=23.7.0; extra == "dev"
28
- Requires-Dist: flake8>=6.1.0; extra == "dev"
29
- Dynamic: author
30
- Dynamic: author-email
31
- Dynamic: classifier
32
- Dynamic: description
33
- Dynamic: description-content-type
34
- Dynamic: home-page
35
- Dynamic: keywords
36
- Dynamic: project-url
37
- Dynamic: provides-extra
38
- Dynamic: requires-dist
39
- Dynamic: requires-python
40
- Dynamic: summary
41
-
42
- # 🧠 Mem-Agent: Memory-Enabled Mini Assistant
43
-
44
- <div align="center">
45
-
46
- [![Python](https://img.shields.io/badge/Python-3.8%2B-blue.svg)](https://www.python.org/downloads/)
47
- [![License](https://img.shields.io/badge/License-MIT-green.svg)](LICENSE)
48
- [![Ollama](https://img.shields.io/badge/Ollama-Compatible-orange.svg)](https://ollama.ai/)
49
-
50
- **A local AI assistant that remembers user interactions and responds with context awareness using a lightweight 4-billion parameter LLM.**
51
-
52
- [Quick Start](#-quick-start) • [Features](#-features) • [Documentation](#-documentation) • [Examples](#-usage-examples)
53
-
54
- </div>
55
-
56
- ---
57
-
58
- ## 🎯 Why Mem-Agent?
59
-
60
- Most Large Language Models (LLMs) treat every conversation as "new" and don't remember past interactions. **Mem-Agent** uses a small locally-running model to:
61
-
62
- - ✅ **Remember user history** - Separate memory for each customer/user
63
- - ✅ **Context awareness** - Responds based on previous conversations
64
- - ✅ **Fully local** - No internet connection required
65
- - ✅ **Lightweight & fast** - Only 2.5 GB model size
66
- - ✅ **Easy integration** - Get started with 3 lines of code
67
-
68
- ## 🚀 Quick Start
69
-
70
- ### 1. Install Ollama
71
-
72
- ```bash
73
- # Windows/Mac/Linux: https://ollama.ai/download
74
- curl https://ollama.ai/install.sh | sh
75
-
76
- # Start the service
77
- ollama serve
78
- ```
79
-
80
- ### 2. Download Model
81
-
82
- ```bash
83
- ollama pull granite4:tiny-h
84
- ```
85
-
86
- ### 3. Use Mem-Agent
87
-
88
- ```python
89
- from mem_llm import MemAgent
90
-
91
- # Create agent
92
- agent = MemAgent(model="granite4:tiny-h")
93
-
94
- # System check
95
- status = agent.check_setup()
96
- if status['status'] == 'ready':
97
- print("✅ System ready!")
98
- else:
99
- print("❌ Error:", status)
100
-
101
- # Set user
102
- agent.set_user("user123")
103
-
104
- # First conversation
105
- response = agent.chat("Hello, my name is Ali")
106
- print(response)
107
-
108
- # Second conversation - It remembers me!
109
- response = agent.chat("Do you remember my name?")
110
- print(response)
111
- ```
112
-
113
- ## 📚 Example Scripts
114
-
115
- ### 1. Simple Test
116
-
117
- ```bash
118
- python examples/example_simple.py
119
- ```
120
-
121
- ### 2. Customer Service Simulation
122
-
123
- ```bash
124
- python examples/example_customer_service.py
125
- ```
126
-
127
- ## 🏗️ Project Structure
128
-
129
- ```
130
- Memory LLM/
131
- ├── memory_llm/ # Main package
132
- │ ├── __init__.py # Package initialization
133
- │ ├── mem_agent.py # Main assistant class
134
- │ ├── memory_manager.py # Memory management
135
- │ ├── memory_db.py # SQL database support
136
- │ ├── llm_client.py # Ollama integration
137
- │ ├── memory_tools.py # User tools
138
- │ ├── knowledge_loader.py # Knowledge base loader
139
- │ ├── prompt_templates.py # Prompt templates
140
- │ └── config_manager.py # Configuration manager
141
- ├── examples/ # Example scripts
142
- ├── tests/ # Test files
143
- ├── setup.py # Installation script
144
- ├── requirements.txt # Dependencies
145
- └── README.md # This file
146
- ```
147
-
148
- ## 🔧 API Usage
149
-
150
- ### MemAgent Class
151
-
152
- ```python
153
- from mem_llm import MemAgent
154
-
155
- agent = MemAgent(
156
- model="granite4:tiny-h", # Ollama model name
157
- memory_dir="memories", # Memory directory
158
- ollama_url="http://localhost:11434" # Ollama API URL
159
- )
160
- ```
161
-
162
- #### Basic Methods
163
-
164
- ```python
165
- # Set user
166
- agent.set_user("user_id")
167
-
168
- # Chat
169
- response = agent.chat(
170
- message="Hello",
171
- user_id="optional_user_id", # If set_user not used
172
- metadata={"key": "value"} # Additional information
173
- )
174
-
175
- # Get memory summary
176
- summary = agent.memory_manager.get_summary("user_id")
177
-
178
- # Search in history
179
- results = agent.search_user_history("keyword", "user_id")
180
-
181
- # Update profile
182
- agent.update_user_info({
183
- "name": "Ali",
184
- "preferences": {"language": "en"}
185
- })
186
-
187
- # Get statistics
188
- stats = agent.get_statistics()
189
-
190
- # Export memory
191
- json_data = agent.export_memory("user_id")
192
-
193
- # Clear memory (WARNING!)
194
- agent.clear_user_memory("user_id", confirm=True)
195
- ```
196
-
197
- ### MemoryManager Class
198
-
199
- ```python
200
- from mem_llm import MemoryManager
201
-
202
- memory = MemoryManager(memory_dir="memories")
203
-
204
- # Load memory
205
- data = memory.load_memory("user_id")
206
-
207
- # Add interaction
208
- memory.add_interaction(
209
- user_id="user_id",
210
- user_message="Hello",
211
- bot_response="Hello! How can I help you?",
212
- metadata={"timestamp": "2025-01-13"}
213
- )
214
-
215
- # Get recent conversations
216
- recent = memory.get_recent_conversations("user_id", limit=5)
217
-
218
- # Search
219
- results = memory.search_memory("user_id", "order")
220
- ```
221
-
222
- ### OllamaClient Class
223
-
224
- ```python
225
- from mem_llm import OllamaClient
226
-
227
- client = OllamaClient(model="granite4:tiny-h")
228
-
229
- # Simple generation
230
- response = client.generate("Hello world!")
231
-
232
- # Chat format
233
- response = client.chat([
234
- {"role": "system", "content": "You are a helpful assistant"},
235
- {"role": "user", "content": "Hello"}
236
- ])
237
-
238
- # Connection check
239
- is_ready = client.check_connection()
240
-
241
- # Model list
242
- models = client.list_models()
243
- ```
244
-
245
- ## 💡 Usage Scenarios
246
-
247
- ### 1. Customer Service Bot
248
- - Remembers customer history
249
- - Knows previous issues
250
- - Makes personalized recommendations
251
-
252
- ### 2. Personal Assistant
253
- - Tracks daily activities
254
- - Learns preferences
255
- - Makes reminders
256
-
257
- ### 3. Education Assistant
258
- - Tracks student progress
259
- - Adjusts difficulty level
260
- - Remembers past mistakes
261
-
262
- ### 4. Support Ticket System
263
- - Stores ticket history
264
- - Finds related old tickets
265
- - Provides solution suggestions
266
-
267
- ## 📊 Memory Format
268
-
269
- Memories are stored in JSON format:
270
-
271
- ```json
272
- {
273
- "conversations": [
274
- {
275
- "timestamp": "2025-01-13T10:30:00",
276
- "user_message": "Hello",
277
- "bot_response": "Hello! How can I help you?",
278
- "metadata": {
279
- "topic": "greeting"
280
- }
281
- }
282
- ],
283
- "profile": {
284
- "user_id": "user123",
285
- "first_seen": "2025-01-13T10:30:00",
286
- "preferences": {},
287
- "summary": {}
288
- },
289
- "last_updated": "2025-01-13T10:35:00"
290
- }
291
- ```
292
-
293
- ## 🔒 Privacy and Security
294
-
295
- - ✅ Works completely locally (no internet connection required)
296
- - ✅ Data stored on your computer
297
- - ✅ No data sent to third-party services
298
- - ✅ Memories in JSON format, easily deletable
299
-
300
- ## 🛠️ Development
301
-
302
- ### Test Mode
303
-
304
- ```python
305
- # Simple chat without memory (for testing)
306
- response = agent.simple_chat("Test message")
307
- ```
308
-
309
- ### Using Your Own Model
310
-
311
- ```python
312
- # Different Ollama model
313
- agent = MemAgent(model="llama2:7b")
314
-
315
- # Or another LLM API
316
- # Customize llm_client.py file
317
- ```
318
-
319
- ## 🐛 Troubleshooting
320
-
321
- ### Ollama Connection Error
322
-
323
- ```bash
324
- # Start Ollama service
325
- ollama serve
326
-
327
- # Port check
328
- netstat -an | findstr "11434"
329
- ```
330
-
331
- ### Model Not Found
332
-
333
- ```bash
334
- # Check model list
335
- ollama list
336
-
337
- # Download model
338
- ollama pull granite4:tiny-h
339
- ```
340
-
341
- ### Memory Issues
342
-
343
- ```python
344
- # Check memory directory
345
- import os
346
- os.path.exists("memories")
347
-
348
- # List memory files
349
- os.listdir("memories")
350
- ```
351
-
352
- ## 📈 Performance
353
-
354
- - **Model Size**: ~2.5 GB
355
- - **Response Time**: ~1-3 seconds (depends on CPU)
356
- - **Memory Usage**: ~4-6 GB RAM
357
- - **Disk Usage**: ~10-50 KB per user
358
-
359
- ## 🤝 Contributing
360
-
361
- 1. Fork the repository
362
- 2. Create feature branch (`git checkout -b feature/amazing-feature`)
363
- 3. Commit changes (`git commit -m 'feat: Add amazing feature'`)
364
- 4. Push to branch (`git push origin feature/amazing-feature`)
365
- 5. Open Pull Request
366
-
367
- ## 📝 License
368
-
369
- MIT License - See LICENSE file for details.
370
-
371
- ## 🙏 Acknowledgments
372
-
373
- - [Ollama](https://ollama.ai/) - Local LLM server
374
- - [Granite](https://www.ibm.com/granite) - IBM Granite models
375
-
376
- ## 📞 Contact
377
-
378
- You can open an issue for your questions.
379
-
380
- ---
381
-
382
- **Note**: This project is for educational and research purposes. Please perform comprehensive testing before using in production environment.
@@ -1,15 +0,0 @@
1
- mem_llm/__init__.py,sha256=l9ynmAWNyC_CPZcb5q-pkJ_oVdJZpFN4hwVHRNqCkg8,920
2
- mem_llm/config.yaml.example,sha256=lgmfaU5pxnIm4zYxwgCcgLSohNx1Jw6oh3Qk0Xoe2DE,917
3
- mem_llm/config_from_docs.py,sha256=YFhq1SWyK63C-TNMS73ncNHg8sJ-XGOf2idWVCjxFco,4974
4
- mem_llm/config_manager.py,sha256=8PIHs21jZWlI-eG9DgekjOvNxU3-U4xH7SbT8Gr-Z6M,7075
5
- mem_llm/knowledge_loader.py,sha256=oSNhfYYcx7DlZLVogxnbSwaIydq_Q3__RDJFeZR2XVw,2699
6
- mem_llm/llm_client.py,sha256=tLNulVEV_tWdktvcQUokdhd0gTkIISUHipglRt17IWk,5255
7
- mem_llm/mem_agent.py,sha256=BIEMHpbss4QPstS-aEoZwmKBBc_fg87tf8Jj7MTIV8g,20357
8
- mem_llm/memory_db.py,sha256=KyNIcChYihSavd2ot5KMBlVB9lq8rexoBQ0lA5bCJNI,12611
9
- mem_llm/memory_manager.py,sha256=iXnf5YEJXmQ75jgJ2LEx9zCHxIpZTcLtHlp2eWgFjRg,8335
10
- mem_llm/memory_tools.py,sha256=ARANFqu_bmL56SlV1RzTjfQsJj-Qe2QvqY0pF92hDxU,8678
11
- mem_llm/prompt_templates.py,sha256=tCiQJw3QQKIaH8NsxEKOIaIVxw4XT43PwdmyfCINzzM,6536
12
- mem_llm-1.0.2.dist-info/METADATA,sha256=yJxAbApli62T27XkKE0SIHxgD50PPbhXldmBXfdyfhE,9347
13
- mem_llm-1.0.2.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
14
- mem_llm-1.0.2.dist-info/top_level.txt,sha256=_fU1ML-0JwkaxWdhqpwtmTNaJEOvDMQeJdA8d5WqDn8,8
15
- mem_llm-1.0.2.dist-info/RECORD,,