mem-llm 1.0.2__tar.gz → 1.0.4__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mem-llm might be problematic. Click here for more details.

Files changed (38) hide show
  1. mem_llm-1.0.4/PKG-INFO +304 -0
  2. mem_llm-1.0.4/README.md +263 -0
  3. {mem_llm-1.0.2 → mem_llm-1.0.4}/mem_llm/__init__.py +1 -1
  4. {mem_llm-1.0.2 → mem_llm-1.0.4}/mem_llm/llm_client.py +5 -1
  5. {mem_llm-1.0.2 → mem_llm-1.0.4}/mem_llm/mem_agent.py +113 -23
  6. mem_llm-1.0.4/mem_llm.egg-info/PKG-INFO +304 -0
  7. {mem_llm-1.0.2 → mem_llm-1.0.4}/setup.py +1 -1
  8. mem_llm-1.0.2/PKG-INFO +0 -382
  9. mem_llm-1.0.2/README.md +0 -341
  10. mem_llm-1.0.2/mem_llm.egg-info/PKG-INFO +0 -382
  11. {mem_llm-1.0.2 → mem_llm-1.0.4}/CHANGELOG.md +0 -0
  12. {mem_llm-1.0.2 → mem_llm-1.0.4}/INTEGRATION_GUIDE.md +0 -0
  13. {mem_llm-1.0.2 → mem_llm-1.0.4}/MANIFEST.in +0 -0
  14. {mem_llm-1.0.2 → mem_llm-1.0.4}/QUICKSTART.md +0 -0
  15. {mem_llm-1.0.2 → mem_llm-1.0.4}/QUICKSTART_TR.md +0 -0
  16. {mem_llm-1.0.2 → mem_llm-1.0.4}/STRUCTURE.md +0 -0
  17. {mem_llm-1.0.2 → mem_llm-1.0.4}/docs/CONFIG_GUIDE.md +0 -0
  18. {mem_llm-1.0.2 → mem_llm-1.0.4}/docs/INDEX.md +0 -0
  19. {mem_llm-1.0.2 → mem_llm-1.0.4}/docs/README.md +0 -0
  20. {mem_llm-1.0.2 → mem_llm-1.0.4}/mem_llm/config.yaml.example +0 -0
  21. {mem_llm-1.0.2 → mem_llm-1.0.4}/mem_llm/config_from_docs.py +0 -0
  22. {mem_llm-1.0.2 → mem_llm-1.0.4}/mem_llm/config_manager.py +0 -0
  23. {mem_llm-1.0.2 → mem_llm-1.0.4}/mem_llm/knowledge_loader.py +0 -0
  24. {mem_llm-1.0.2 → mem_llm-1.0.4}/mem_llm/memory_db.py +0 -0
  25. {mem_llm-1.0.2 → mem_llm-1.0.4}/mem_llm/memory_manager.py +0 -0
  26. {mem_llm-1.0.2 → mem_llm-1.0.4}/mem_llm/memory_tools.py +0 -0
  27. {mem_llm-1.0.2 → mem_llm-1.0.4}/mem_llm/prompt_templates.py +0 -0
  28. {mem_llm-1.0.2 → mem_llm-1.0.4}/mem_llm.egg-info/SOURCES.txt +0 -0
  29. {mem_llm-1.0.2 → mem_llm-1.0.4}/mem_llm.egg-info/dependency_links.txt +0 -0
  30. {mem_llm-1.0.2 → mem_llm-1.0.4}/mem_llm.egg-info/requires.txt +0 -0
  31. {mem_llm-1.0.2 → mem_llm-1.0.4}/mem_llm.egg-info/top_level.txt +0 -0
  32. {mem_llm-1.0.2 → mem_llm-1.0.4}/requirements.txt +0 -0
  33. {mem_llm-1.0.2 → mem_llm-1.0.4}/setup.cfg +0 -0
  34. {mem_llm-1.0.2 → mem_llm-1.0.4}/tests/test_integration.py +0 -0
  35. {mem_llm-1.0.2 → mem_llm-1.0.4}/tests/test_llm_client.py +0 -0
  36. {mem_llm-1.0.2 → mem_llm-1.0.4}/tests/test_mem_agent.py +0 -0
  37. {mem_llm-1.0.2 → mem_llm-1.0.4}/tests/test_memory_manager.py +0 -0
  38. {mem_llm-1.0.2 → mem_llm-1.0.4}/tests/test_memory_tools.py +0 -0
mem_llm-1.0.4/PKG-INFO ADDED
@@ -0,0 +1,304 @@
1
+ Metadata-Version: 2.4
2
+ Name: mem-llm
3
+ Version: 1.0.4
4
+ Summary: Memory-enabled AI assistant with local LLM support
5
+ Home-page: https://github.com/emredeveloper/Mem-LLM
6
+ Author: C. Emre Karataş
7
+ Author-email: karatasqemre@gmail.com
8
+ Project-URL: Bug Reports, https://github.com/emredeveloper/Mem-LLM/issues
9
+ Project-URL: Source, https://github.com/emredeveloper/Mem-LLM
10
+ Keywords: llm ai memory agent chatbot ollama local
11
+ Classifier: Development Status :: 4 - Beta
12
+ Classifier: Intended Audience :: Developers
13
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
14
+ Classifier: License :: OSI Approved :: MIT License
15
+ Classifier: Programming Language :: Python :: 3
16
+ Classifier: Programming Language :: Python :: 3.8
17
+ Classifier: Programming Language :: Python :: 3.9
18
+ Classifier: Programming Language :: Python :: 3.10
19
+ Classifier: Programming Language :: Python :: 3.11
20
+ Classifier: Programming Language :: Python :: 3.12
21
+ Requires-Python: >=3.8
22
+ Description-Content-Type: text/markdown
23
+ Requires-Dist: requests>=2.31.0
24
+ Requires-Dist: pyyaml>=6.0.1
25
+ Provides-Extra: dev
26
+ Requires-Dist: pytest>=7.4.0; extra == "dev"
27
+ Requires-Dist: black>=23.7.0; extra == "dev"
28
+ Requires-Dist: flake8>=6.1.0; extra == "dev"
29
+ Dynamic: author
30
+ Dynamic: author-email
31
+ Dynamic: classifier
32
+ Dynamic: description
33
+ Dynamic: description-content-type
34
+ Dynamic: home-page
35
+ Dynamic: keywords
36
+ Dynamic: project-url
37
+ Dynamic: provides-extra
38
+ Dynamic: requires-dist
39
+ Dynamic: requires-python
40
+ Dynamic: summary
41
+
42
+ # 🧠 mem-llm
43
+
44
+ **Memory-enabled AI assistant that remembers conversations using local LLMs**
45
+
46
+ [![Python](https://img.shields.io/badge/Python-3.8%2B-blue.svg)](https://www.python.org/downloads/)
47
+ [![PyPI](https://img.shields.io/pypi/v/mem-llm.svg)](https://pypi.org/project/mem-llm/)
48
+ [![License](https://img.shields.io/badge/License-MIT-green.svg)](LICENSE)
49
+
50
+ ---
51
+
52
+ ## 🎯 What is it?
53
+
54
+ A lightweight Python library that adds **persistent memory** to local LLM chatbots. Each user gets their own conversation history that the AI remembers across sessions.
55
+
56
+ **Perfect for:**
57
+ - 💬 Customer service chatbots
58
+ - 🤖 Personal AI assistants
59
+ - 📝 Context-aware applications
60
+ - 🏢 Business automation
61
+
62
+ ---
63
+
64
+ ## ⚡ Quick Start
65
+
66
+ ### 1. Install
67
+
68
+ ```bash
69
+ pip install mem-llm
70
+ ```
71
+
72
+ ### 2. Setup Ollama (one-time)
73
+
74
+ ```bash
75
+ # Install: https://ollama.ai/download
76
+ ollama serve
77
+
78
+ # Download model (only 2.5GB)
79
+ ollama pull granite4:tiny-h
80
+ ```
81
+
82
+ ### 3. Use
83
+
84
+ ```python
85
+ from mem_llm import MemAgent
86
+
87
+ # Create agent (one line!)
88
+ agent = MemAgent()
89
+
90
+ # Set user
91
+ agent.set_user("john")
92
+
93
+ # Chat - it remembers!
94
+ agent.chat("My name is John")
95
+ agent.chat("What's my name?") # → "Your name is John"
96
+ ```
97
+
98
+ ---
99
+
100
+ ## 💡 Features
101
+
102
+ | Feature | Description |
103
+ |---------|-------------|
104
+ | 🧠 **Memory** | Remembers each user's conversation history |
105
+ | 👥 **Multi-user** | Separate memory for each user |
106
+ | 🔒 **Privacy** | 100% local, no cloud/API needed |
107
+ | ⚡ **Fast** | Lightweight SQLite/JSON storage |
108
+ | 🎯 **Simple** | 3 lines of code to get started |
109
+
110
+ ---
111
+
112
+ ## 📖 Usage Examples
113
+
114
+ ### Basic Chat
115
+
116
+ ```python
117
+ from mem_llm import MemAgent
118
+
119
+ agent = MemAgent()
120
+ agent.set_user("alice")
121
+
122
+ # First conversation
123
+ agent.chat("I love pizza")
124
+
125
+ # Later...
126
+ agent.chat("What's my favorite food?")
127
+ # → "Your favorite food is pizza"
128
+ ```
129
+
130
+ ### Customer Service Bot
131
+
132
+ ```python
133
+ agent = MemAgent()
134
+
135
+ # Customer 1
136
+ agent.set_user("customer_001")
137
+ agent.chat("My order #12345 is delayed")
138
+
139
+ # Customer 2 (different memory!)
140
+ agent.set_user("customer_002")
141
+ agent.chat("I want to return item #67890")
142
+ ```
143
+
144
+ ### Check User Profile
145
+
146
+ ```python
147
+ # Get automatically extracted user info
148
+ profile = agent.get_user_profile()
149
+ # {'name': 'Alice', 'favorite_food': 'pizza', 'location': 'NYC'}
150
+ ```
151
+
152
+ ---
153
+
154
+ ## 🔧 Configuration
155
+
156
+ ### JSON Memory (default - simple)
157
+
158
+ ```python
159
+ agent = MemAgent(
160
+ model="granite4:tiny-h",
161
+ use_sql=False, # Use JSON files
162
+ memory_dir="memories"
163
+ )
164
+ ```
165
+
166
+ ### SQL Memory (advanced - faster)
167
+
168
+ ```python
169
+ agent = MemAgent(
170
+ model="granite4:tiny-h",
171
+ use_sql=True, # Use SQLite
172
+ memory_dir="memories.db"
173
+ )
174
+ ```
175
+
176
+ ### Custom Settings
177
+
178
+ ```python
179
+ agent = MemAgent(
180
+ model="llama2", # Any Ollama model
181
+ ollama_url="http://localhost:11434"
182
+ )
183
+ ```
184
+
185
+ ---
186
+
187
+ ## 📚 API Reference
188
+
189
+ ### MemAgent
190
+
191
+ ```python
192
+ # Initialize
193
+ agent = MemAgent(model="granite4:tiny-h", use_sql=False)
194
+
195
+ # Set active user
196
+ agent.set_user(user_id: str, name: Optional[str] = None)
197
+
198
+ # Chat
199
+ response = agent.chat(message: str, metadata: Optional[Dict] = None) -> str
200
+
201
+ # Get profile
202
+ profile = agent.get_user_profile(user_id: Optional[str] = None) -> Dict
203
+
204
+ # System check
205
+ status = agent.check_setup() -> Dict
206
+ ```
207
+
208
+ ---
209
+
210
+ ## 🎨 Advanced: PDF/DOCX Config
211
+
212
+ Generate config from business documents:
213
+
214
+ ```python
215
+ from mem_llm import create_config_from_document
216
+
217
+ # Create config.yaml from PDF
218
+ create_config_from_document(
219
+ doc_path="company_info.pdf",
220
+ output_path="config.yaml",
221
+ company_name="Acme Corp"
222
+ )
223
+
224
+ # Use config
225
+ agent = MemAgent(config_file="config.yaml")
226
+ ```
227
+
228
+ ---
229
+
230
+ ## 🔥 Models
231
+
232
+ Works with any [Ollama](https://ollama.ai/) model:
233
+
234
+ | Model | Size | Speed | Quality |
235
+ |-------|------|-------|---------|
236
+ | `granite4:tiny-h` | 2.5GB | ⚡⚡⚡ | ⭐⭐ |
237
+ | `llama2` | 4GB | ⚡⚡ | ⭐⭐⭐ |
238
+ | `mistral` | 4GB | ⚡⚡ | ⭐⭐⭐⭐ |
239
+ | `llama3` | 5GB | ⚡ | ⭐⭐⭐⭐⭐ |
240
+
241
+ ```bash
242
+ ollama pull <model-name>
243
+ ```
244
+
245
+ ---
246
+
247
+ ## 📦 Requirements
248
+
249
+ - Python 3.8+
250
+ - Ollama (for LLM)
251
+ - 4GB RAM minimum
252
+ - 5GB disk space
253
+
254
+ **Dependencies** (auto-installed):
255
+ - `requests >= 2.31.0`
256
+ - `pyyaml >= 6.0.1`
257
+
258
+ ---
259
+
260
+ ## 🐛 Troubleshooting
261
+
262
+ ### Ollama not running?
263
+
264
+ ```bash
265
+ ollama serve
266
+ ```
267
+
268
+ ### Model not found?
269
+
270
+ ```bash
271
+ ollama pull granite4:tiny-h
272
+ ```
273
+
274
+ ### Import error?
275
+
276
+ ```bash
277
+ pip install mem-llm --upgrade
278
+ ```
279
+
280
+ ---
281
+
282
+ ## 📄 License
283
+
284
+ MIT License - feel free to use in personal and commercial projects!
285
+
286
+ ---
287
+
288
+ ## 🔗 Links
289
+
290
+ - **PyPI:** https://pypi.org/project/mem-llm/
291
+ - **GitHub:** https://github.com/emredeveloper/Mem-LLM
292
+ - **Ollama:** https://ollama.ai/
293
+
294
+ ---
295
+
296
+ ## 🌟 Star us on GitHub!
297
+
298
+ If you find this useful, give us a ⭐ on [GitHub](https://github.com/emredeveloper/Mem-LLM)!
299
+
300
+ ---
301
+
302
+ <div align="center">
303
+ Made with ❤️ by <a href="https://github.com/emredeveloper">C. Emre Karataş</a>
304
+ </div>
@@ -0,0 +1,263 @@
1
+ # 🧠 mem-llm
2
+
3
+ **Memory-enabled AI assistant that remembers conversations using local LLMs**
4
+
5
+ [![Python](https://img.shields.io/badge/Python-3.8%2B-blue.svg)](https://www.python.org/downloads/)
6
+ [![PyPI](https://img.shields.io/pypi/v/mem-llm.svg)](https://pypi.org/project/mem-llm/)
7
+ [![License](https://img.shields.io/badge/License-MIT-green.svg)](LICENSE)
8
+
9
+ ---
10
+
11
+ ## 🎯 What is it?
12
+
13
+ A lightweight Python library that adds **persistent memory** to local LLM chatbots. Each user gets their own conversation history that the AI remembers across sessions.
14
+
15
+ **Perfect for:**
16
+ - 💬 Customer service chatbots
17
+ - 🤖 Personal AI assistants
18
+ - 📝 Context-aware applications
19
+ - 🏢 Business automation
20
+
21
+ ---
22
+
23
+ ## ⚡ Quick Start
24
+
25
+ ### 1. Install
26
+
27
+ ```bash
28
+ pip install mem-llm
29
+ ```
30
+
31
+ ### 2. Setup Ollama (one-time)
32
+
33
+ ```bash
34
+ # Install: https://ollama.ai/download
35
+ ollama serve
36
+
37
+ # Download model (only 2.5GB)
38
+ ollama pull granite4:tiny-h
39
+ ```
40
+
41
+ ### 3. Use
42
+
43
+ ```python
44
+ from mem_llm import MemAgent
45
+
46
+ # Create agent (one line!)
47
+ agent = MemAgent()
48
+
49
+ # Set user
50
+ agent.set_user("john")
51
+
52
+ # Chat - it remembers!
53
+ agent.chat("My name is John")
54
+ agent.chat("What's my name?") # → "Your name is John"
55
+ ```
56
+
57
+ ---
58
+
59
+ ## 💡 Features
60
+
61
+ | Feature | Description |
62
+ |---------|-------------|
63
+ | 🧠 **Memory** | Remembers each user's conversation history |
64
+ | 👥 **Multi-user** | Separate memory for each user |
65
+ | 🔒 **Privacy** | 100% local, no cloud/API needed |
66
+ | ⚡ **Fast** | Lightweight SQLite/JSON storage |
67
+ | 🎯 **Simple** | 3 lines of code to get started |
68
+
69
+ ---
70
+
71
+ ## 📖 Usage Examples
72
+
73
+ ### Basic Chat
74
+
75
+ ```python
76
+ from mem_llm import MemAgent
77
+
78
+ agent = MemAgent()
79
+ agent.set_user("alice")
80
+
81
+ # First conversation
82
+ agent.chat("I love pizza")
83
+
84
+ # Later...
85
+ agent.chat("What's my favorite food?")
86
+ # → "Your favorite food is pizza"
87
+ ```
88
+
89
+ ### Customer Service Bot
90
+
91
+ ```python
92
+ agent = MemAgent()
93
+
94
+ # Customer 1
95
+ agent.set_user("customer_001")
96
+ agent.chat("My order #12345 is delayed")
97
+
98
+ # Customer 2 (different memory!)
99
+ agent.set_user("customer_002")
100
+ agent.chat("I want to return item #67890")
101
+ ```
102
+
103
+ ### Check User Profile
104
+
105
+ ```python
106
+ # Get automatically extracted user info
107
+ profile = agent.get_user_profile()
108
+ # {'name': 'Alice', 'favorite_food': 'pizza', 'location': 'NYC'}
109
+ ```
110
+
111
+ ---
112
+
113
+ ## 🔧 Configuration
114
+
115
+ ### JSON Memory (default - simple)
116
+
117
+ ```python
118
+ agent = MemAgent(
119
+ model="granite4:tiny-h",
120
+ use_sql=False, # Use JSON files
121
+ memory_dir="memories"
122
+ )
123
+ ```
124
+
125
+ ### SQL Memory (advanced - faster)
126
+
127
+ ```python
128
+ agent = MemAgent(
129
+ model="granite4:tiny-h",
130
+ use_sql=True, # Use SQLite
131
+ memory_dir="memories.db"
132
+ )
133
+ ```
134
+
135
+ ### Custom Settings
136
+
137
+ ```python
138
+ agent = MemAgent(
139
+ model="llama2", # Any Ollama model
140
+ ollama_url="http://localhost:11434"
141
+ )
142
+ ```
143
+
144
+ ---
145
+
146
+ ## 📚 API Reference
147
+
148
+ ### MemAgent
149
+
150
+ ```python
151
+ # Initialize
152
+ agent = MemAgent(model="granite4:tiny-h", use_sql=False)
153
+
154
+ # Set active user
155
+ agent.set_user(user_id: str, name: Optional[str] = None)
156
+
157
+ # Chat
158
+ response = agent.chat(message: str, metadata: Optional[Dict] = None) -> str
159
+
160
+ # Get profile
161
+ profile = agent.get_user_profile(user_id: Optional[str] = None) -> Dict
162
+
163
+ # System check
164
+ status = agent.check_setup() -> Dict
165
+ ```
166
+
167
+ ---
168
+
169
+ ## 🎨 Advanced: PDF/DOCX Config
170
+
171
+ Generate config from business documents:
172
+
173
+ ```python
174
+ from mem_llm import create_config_from_document
175
+
176
+ # Create config.yaml from PDF
177
+ create_config_from_document(
178
+ doc_path="company_info.pdf",
179
+ output_path="config.yaml",
180
+ company_name="Acme Corp"
181
+ )
182
+
183
+ # Use config
184
+ agent = MemAgent(config_file="config.yaml")
185
+ ```
186
+
187
+ ---
188
+
189
+ ## 🔥 Models
190
+
191
+ Works with any [Ollama](https://ollama.ai/) model:
192
+
193
+ | Model | Size | Speed | Quality |
194
+ |-------|------|-------|---------|
195
+ | `granite4:tiny-h` | 2.5GB | ⚡⚡⚡ | ⭐⭐ |
196
+ | `llama2` | 4GB | ⚡⚡ | ⭐⭐⭐ |
197
+ | `mistral` | 4GB | ⚡⚡ | ⭐⭐⭐⭐ |
198
+ | `llama3` | 5GB | ⚡ | ⭐⭐⭐⭐⭐ |
199
+
200
+ ```bash
201
+ ollama pull <model-name>
202
+ ```
203
+
204
+ ---
205
+
206
+ ## 📦 Requirements
207
+
208
+ - Python 3.8+
209
+ - Ollama (for LLM)
210
+ - 4GB RAM minimum
211
+ - 5GB disk space
212
+
213
+ **Dependencies** (auto-installed):
214
+ - `requests >= 2.31.0`
215
+ - `pyyaml >= 6.0.1`
216
+
217
+ ---
218
+
219
+ ## 🐛 Troubleshooting
220
+
221
+ ### Ollama not running?
222
+
223
+ ```bash
224
+ ollama serve
225
+ ```
226
+
227
+ ### Model not found?
228
+
229
+ ```bash
230
+ ollama pull granite4:tiny-h
231
+ ```
232
+
233
+ ### Import error?
234
+
235
+ ```bash
236
+ pip install mem-llm --upgrade
237
+ ```
238
+
239
+ ---
240
+
241
+ ## 📄 License
242
+
243
+ MIT License - feel free to use in personal and commercial projects!
244
+
245
+ ---
246
+
247
+ ## 🔗 Links
248
+
249
+ - **PyPI:** https://pypi.org/project/mem-llm/
250
+ - **GitHub:** https://github.com/emredeveloper/Mem-LLM
251
+ - **Ollama:** https://ollama.ai/
252
+
253
+ ---
254
+
255
+ ## 🌟 Star us on GitHub!
256
+
257
+ If you find this useful, give us a ⭐ on [GitHub](https://github.com/emredeveloper/Mem-LLM)!
258
+
259
+ ---
260
+
261
+ <div align="center">
262
+ Made with ❤️ by <a href="https://github.com/emredeveloper">C. Emre Karataş</a>
263
+ </div>
@@ -24,7 +24,7 @@ try:
24
24
  except ImportError:
25
25
  __all_pro__ = []
26
26
 
27
- __version__ = "1.0.2"
27
+ __version__ = "1.0.4"
28
28
  __author__ = "C. Emre Karataş"
29
29
 
30
30
  __all__ = [
@@ -107,7 +107,11 @@ class OllamaClient:
107
107
  "stream": False,
108
108
  "options": {
109
109
  "temperature": temperature,
110
- "num_predict": max_tokens
110
+ "num_predict": max_tokens,
111
+ "num_ctx": 2048, # Context window
112
+ "top_k": 40, # Limit vocab
113
+ "top_p": 0.9, # Nucleus sampling
114
+ "stop": ["\n\n\n", "---"] # Stop sequences
111
115
  }
112
116
  }
113
117