mem-llm 1.3.0__py3-none-any.whl → 1.3.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mem-llm might be problematic. Click here for more details.

@@ -0,0 +1,278 @@
1
+ """
2
+ Vector Store Abstraction Layer
3
+ Supports multiple vector databases (Chroma, FAISS, etc.)
4
+ """
5
+
6
+ from abc import ABC, abstractmethod
7
+ from typing import List, Dict, Optional, Any
8
+ import logging
9
+
10
+ logger = logging.getLogger(__name__)
11
+
12
+
13
+ class VectorStore(ABC):
14
+ """Abstract interface for vector stores"""
15
+
16
+ @abstractmethod
17
+ def add_documents(self, documents: List[Dict[str, Any]]) -> None:
18
+ """
19
+ Add documents to vector store
20
+
21
+ Args:
22
+ documents: List of dicts with 'id', 'text', 'metadata'
23
+ """
24
+ pass
25
+
26
+ @abstractmethod
27
+ def search(self, query: str, limit: int = 5, filter_metadata: Optional[Dict] = None) -> List[Dict[str, Any]]:
28
+ """
29
+ Search similar documents
30
+
31
+ Args:
32
+ query: Search query text
33
+ limit: Maximum number of results
34
+ filter_metadata: Optional metadata filters
35
+
36
+ Returns:
37
+ List of similar documents with scores
38
+ """
39
+ pass
40
+
41
+ @abstractmethod
42
+ def delete_collection(self) -> None:
43
+ """Delete all vectors in collection"""
44
+ pass
45
+
46
+ @abstractmethod
47
+ def get_stats(self) -> Dict[str, Any]:
48
+ """Get statistics about the vector store"""
49
+ pass
50
+
51
+
52
+ try:
53
+ import chromadb
54
+ from chromadb.config import Settings
55
+ CHROMA_AVAILABLE = True
56
+ except ImportError:
57
+ CHROMA_AVAILABLE = False
58
+ logger.warning("ChromaDB not available. Install with: pip install chromadb")
59
+
60
+
61
+ class ChromaVectorStore(VectorStore):
62
+ """ChromaDB implementation of VectorStore"""
63
+
64
+ def __init__(self, collection_name: str = "knowledge_base",
65
+ persist_directory: Optional[str] = None,
66
+ embedding_model: str = "all-MiniLM-L6-v2"):
67
+ """
68
+ Initialize ChromaDB vector store
69
+
70
+ Args:
71
+ collection_name: Name of the collection
72
+ persist_directory: Directory to persist data (None = in-memory)
73
+ embedding_model: Embedding model name (sentence-transformers compatible)
74
+ """
75
+ if not CHROMA_AVAILABLE:
76
+ raise ImportError(
77
+ "ChromaDB is not installed. Install with: pip install chromadb"
78
+ )
79
+
80
+ self.collection_name = collection_name
81
+ self.persist_directory = persist_directory
82
+ self.embedding_model = embedding_model
83
+
84
+ # Initialize Chroma client
85
+ if persist_directory:
86
+ self.client = chromadb.PersistentClient(path=persist_directory)
87
+ else:
88
+ self.client = chromadb.Client()
89
+
90
+ # Lazy load embedding model
91
+ self._embedding_fn = None
92
+
93
+ # Get or create collection with embedding function
94
+ try:
95
+ # Create embedding function
96
+ embedding_fn = self._get_embedding_function()
97
+
98
+ self.collection = self.client.get_or_create_collection(
99
+ name=collection_name,
100
+ embedding_function=embedding_fn,
101
+ metadata={"hnsw:space": "cosine"}
102
+ )
103
+ except Exception as e:
104
+ logger.error(f"Failed to create Chroma collection: {e}")
105
+ raise
106
+
107
+ def _get_embedding_function(self):
108
+ """Lazy load embedding function"""
109
+ if self._embedding_fn is None:
110
+ try:
111
+ # Try to use ChromaDB's native SentenceTransformerEmbeddingFunction
112
+ try:
113
+ # Try different import paths for ChromaDB embedding functions
114
+ try:
115
+ from chromadb.utils import embedding_functions
116
+ embedding_fn_class = embedding_functions.SentenceTransformerEmbeddingFunction
117
+ except (ImportError, AttributeError):
118
+ try:
119
+ from chromadb.utils.embedding_functions import SentenceTransformerEmbeddingFunction as embedding_fn_class
120
+ except ImportError:
121
+ embedding_fn_class = None
122
+
123
+ if embedding_fn_class:
124
+ self._embedding_fn = embedding_fn_class(model_name=self.embedding_model)
125
+ logger.info(f"Loaded embedding model using ChromaDB native function: {self.embedding_model}")
126
+ else:
127
+ raise AttributeError("SentenceTransformerEmbeddingFunction not found")
128
+
129
+ except (ImportError, AttributeError, Exception) as e:
130
+ # Fallback: Custom embedding function wrapper compatible with ChromaDB
131
+ from sentence_transformers import SentenceTransformer
132
+ model = SentenceTransformer(self.embedding_model)
133
+
134
+ class CustomEmbeddingFunction:
135
+ def __init__(self, model, model_name):
136
+ self.model = model
137
+ self.model_name = model_name
138
+ self.name = model_name # ChromaDB may check for 'name' attribute
139
+
140
+ def __call__(self, texts: List[str]) -> List[List[float]]:
141
+ embeddings = self.model.encode(texts, show_progress_bar=False)
142
+ return embeddings.tolist()
143
+
144
+ def encode_queries(self, queries: List[str]) -> List[List[float]]:
145
+ return self.__call__(queries)
146
+
147
+ self._embedding_fn = CustomEmbeddingFunction(model, self.embedding_model)
148
+ logger.info(f"Loaded embedding model using custom wrapper: {self.embedding_model} (fallback: {e})")
149
+ except ImportError:
150
+ raise ImportError(
151
+ "sentence-transformers not installed. "
152
+ "Install with: pip install sentence-transformers"
153
+ )
154
+
155
+ return self._embedding_fn
156
+
157
+ def add_documents(self, documents: List[Dict[str, Any]]) -> None:
158
+ """Add documents to ChromaDB"""
159
+ if not documents:
160
+ return
161
+
162
+ # Prepare data
163
+ ids = []
164
+ texts = []
165
+ metadatas = []
166
+
167
+ for doc in documents:
168
+ doc_id = str(doc.get('id', doc.get('text', ''))[:100])
169
+ # Ensure unique IDs
170
+ if doc_id in ids:
171
+ doc_id = f"{doc_id}_{len(ids)}"
172
+ ids.append(doc_id)
173
+ texts.append(doc['text'])
174
+ # Ensure metadata values are JSON-serializable
175
+ metadata = doc.get('metadata', {})
176
+ clean_metadata = {}
177
+ for k, v in metadata.items():
178
+ if isinstance(v, (str, int, float, bool)) or v is None:
179
+ clean_metadata[k] = v
180
+ else:
181
+ clean_metadata[k] = str(v)
182
+ metadatas.append(clean_metadata)
183
+
184
+ # Add to collection (Chroma will use embedding function automatically)
185
+ try:
186
+ self.collection.add(
187
+ ids=ids,
188
+ documents=texts,
189
+ metadatas=metadatas
190
+ )
191
+
192
+ logger.debug(f"Added {len(documents)} documents to Chroma")
193
+ except Exception as e:
194
+ logger.error(f"Error adding documents to Chroma: {e}")
195
+ raise
196
+
197
+ def search(self, query: str, limit: int = 5,
198
+ filter_metadata: Optional[Dict] = None) -> List[Dict[str, Any]]:
199
+ """Search in ChromaDB"""
200
+ try:
201
+ # Build where clause for metadata filtering
202
+ where = None
203
+ if filter_metadata:
204
+ where = filter_metadata
205
+
206
+ # Search (Chroma will use embedding function automatically)
207
+ results = self.collection.query(
208
+ query_texts=[query],
209
+ n_results=limit,
210
+ where=where
211
+ )
212
+
213
+ # Format results
214
+ formatted_results = []
215
+ if results.get('documents') and len(results['documents']) > 0 and len(results['documents'][0]) > 0:
216
+ num_results = len(results['documents'][0])
217
+ distances = results.get('distances', [[0.0] * num_results])
218
+
219
+ for i in range(num_results):
220
+ # ChromaDB uses cosine distance (0 = identical, 1 = opposite)
221
+ # Convert to similarity score (1 = identical, 0 = opposite)
222
+ distance = distances[0][i] if distances and len(distances[0]) > i else 0.0
223
+ similarity = 1.0 - distance if distance <= 1.0 else max(0.0, 1.0 / (1.0 + distance))
224
+
225
+ formatted_results.append({
226
+ 'id': results['ids'][0][i] if results.get('ids') and len(results['ids'][0]) > i else f"doc_{i}",
227
+ 'text': results['documents'][0][i],
228
+ 'metadata': results['metadatas'][0][i] if results.get('metadatas') and len(results['metadatas'][0]) > i else {},
229
+ 'score': similarity
230
+ })
231
+
232
+ return formatted_results
233
+ except Exception as e:
234
+ logger.error(f"Error searching Chroma: {e}")
235
+ return []
236
+
237
+ def delete_collection(self) -> None:
238
+ """Delete collection"""
239
+ try:
240
+ self.client.delete_collection(self.collection_name)
241
+ logger.info(f"Deleted Chroma collection: {self.collection_name}")
242
+ except Exception as e:
243
+ logger.error(f"Error deleting collection: {e}")
244
+
245
+ def get_stats(self) -> Dict[str, Any]:
246
+ """Get collection statistics"""
247
+ try:
248
+ count = self.collection.count()
249
+ return {
250
+ 'total_documents': count,
251
+ 'collection_name': self.collection_name,
252
+ 'embedding_model': self.embedding_model
253
+ }
254
+ except Exception as e:
255
+ logger.error(f"Error getting stats: {e}")
256
+ return {'total_documents': 0}
257
+
258
+
259
+ def create_vector_store(store_type: str = "chroma", **kwargs) -> Optional[VectorStore]:
260
+ """
261
+ Factory function to create vector store
262
+
263
+ Args:
264
+ store_type: Type of vector store ('chroma', 'faiss', etc.)
265
+ **kwargs: Store-specific parameters
266
+
267
+ Returns:
268
+ VectorStore instance or None if not available
269
+ """
270
+ if store_type == "chroma":
271
+ if not CHROMA_AVAILABLE:
272
+ logger.warning("ChromaDB not available. Install with: pip install chromadb")
273
+ return None
274
+ return ChromaVectorStore(**kwargs)
275
+ else:
276
+ logger.warning(f"Unknown vector store type: {store_type}")
277
+ return None
278
+
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: mem-llm
3
- Version: 1.3.0
3
+ Version: 1.3.2
4
4
  Summary: Memory-enabled AI assistant with multi-backend LLM support (Ollama, LM Studio, Gemini) - Local and cloud ready
5
5
  Author-email: "C. Emre Karataş" <karatasqemre@gmail.com>
6
6
  License: MIT
@@ -59,9 +59,9 @@ Requires-Dist: pymongo>=4.6.0; extra == "all"
59
59
  [![Python 3.8+](https://img.shields.io/badge/python-3.8+-blue.svg)](https://www.python.org/downloads/)
60
60
  [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
61
61
 
62
- **Memory-enabled AI assistant with local LLM support**
62
+ **Memory-enabled AI assistant with multi-backend LLM support (Ollama, LM Studio, Gemini)**
63
63
 
64
- Mem-LLM is a powerful Python library that brings persistent memory capabilities to local Large Language Models. Build AI assistants that remember user interactions, manage knowledge bases, and work completely offline with Ollama.
64
+ Mem-LLM is a powerful Python library that brings persistent memory capabilities to Large Language Models. Build AI assistants that remember user interactions, manage knowledge bases, and choose between local (Ollama, LM Studio) or cloud (Gemini) backends.
65
65
 
66
66
  ## 🔗 Links
67
67
 
@@ -70,29 +70,40 @@ Mem-LLM is a powerful Python library that brings persistent memory capabilities
70
70
  - **Issues**: https://github.com/emredeveloper/Mem-LLM/issues
71
71
  - **Documentation**: See examples/ directory
72
72
 
73
- ## 🆕 What's New in v1.2.0
73
+ ## 🆕 What's New in v1.3.2
74
74
 
75
- - **Conversation Summarization**: Automatic conversation compression (~40-60% token reduction)
76
- - 📤 **Data Export/Import**: JSON, CSV, SQLite, PostgreSQL, MongoDB support
77
- - 🗄️ **Multi-Database**: Enterprise-ready PostgreSQL & MongoDB integration
78
- - �️ **In-Memory DB**: Use `:memory:` for temporary operations
79
- - � **Cleaner Logs**: Default WARNING level for production-ready output
80
- - � **Bug Fixes**: Database path handling, organized SQLite files
75
+ - 📊 **Response Metrics** (v1.3.1+) Track confidence, latency, KB usage, and quality analytics
76
+ - 🔍 **Vector Search** (v1.3.2+) Semantic search with ChromaDB, cross-lingual support
77
+ - 🎯 **Quality Monitoring** – Production-ready metrics for response quality
78
+ - 🌐 **Semantic Understanding** Understands meaning, not just keywords
81
79
 
82
- [See full changelog](CHANGELOG.md#120---2025-10-21)
80
+ ## What's New in v1.3.0
81
+
82
+ - 🔌 **Multi-Backend Support**: Choose between Ollama (local), LM Studio (local), or Google Gemini (cloud)
83
+ - 🏗️ **Factory Pattern**: Clean, extensible architecture for easy backend switching
84
+ - 🔍 **Auto-Detection**: Automatically finds and uses available local LLM services
85
+ - ⚡ **Unified API**: Same code works across all backends - just change one parameter
86
+ - 📚 **New Examples**: 4 additional examples showing multi-backend usage
87
+ - 🎯 **Backward Compatible**: All v1.2.0 code still works without changes
88
+
89
+ [See full changelog](CHANGELOG.md)
83
90
 
84
91
  ## ✨ Key Features
85
92
 
93
+ - 🔌 **Multi-Backend Support** (v1.3.0+) - Choose Ollama, LM Studio, or Gemini with unified API
94
+ - 🔍 **Auto-Detection** (v1.3.0+) - Automatically find and use available LLM services
95
+ - 📊 **Response Metrics** (v1.3.1+) - Track confidence, latency, KB usage, and quality analytics
96
+ - 🔍 **Vector Search** (v1.3.2+) - Semantic search with ChromaDB, cross-lingual support
86
97
  - 🧠 **Persistent Memory** - Remembers conversations across sessions
87
- - 🤖 **Universal Ollama Support** - Works with ALL Ollama models (Qwen3, DeepSeek, Llama3, Granite, etc.)
98
+ - 🤖 **Universal Model Support** - Works with 100+ Ollama models, LM Studio models, and Gemini
88
99
  - 💾 **Dual Storage Modes** - JSON (simple) or SQLite (advanced) memory backends
89
100
  - 📚 **Knowledge Base** - Built-in FAQ/support system with categorized entries
90
101
  - 🎯 **Dynamic Prompts** - Context-aware system prompts that adapt to active features
91
102
  - 👥 **Multi-User Support** - Separate memory spaces for different users
92
103
  - 🔧 **Memory Tools** - Search, export, and manage stored memories
93
104
  - 🎨 **Flexible Configuration** - Personal or business usage modes
94
- - 📊 **Production Ready** - Comprehensive test suite with 34+ automated tests
95
- - 🔒 **100% Local & Private** - No cloud dependencies, your data stays yours
105
+ - 📊 **Production Ready** - Comprehensive test suite with 50+ automated tests
106
+ - 🔒 **Privacy Options** - 100% local (Ollama/LM Studio) or cloud (Gemini)
96
107
  - 🛡️ **Prompt Injection Protection** (v1.1.0+) - Advanced security against prompt attacks (opt-in)
97
108
  - ⚡ **High Performance** (v1.1.0+) - Thread-safe operations, 15K+ msg/s throughput
98
109
  - 🔄 **Retry Logic** (v1.1.0+) - Automatic exponential backoff for network errors
@@ -130,8 +141,9 @@ pip install -U mem-llm
130
141
 
131
142
  ### Prerequisites
132
143
 
133
- Install and start [Ollama](https://ollama.ai):
144
+ **Choose one of the following LLM backends:**
134
145
 
146
+ #### Option 1: Ollama (Local, Privacy-First)
135
147
  ```bash
136
148
  # Install Ollama (visit https://ollama.ai)
137
149
  # Then pull a model
@@ -141,15 +153,38 @@ ollama pull granite4:tiny-h
141
153
  ollama serve
142
154
  ```
143
155
 
156
+ #### Option 2: LM Studio (Local, GUI-Based)
157
+ ```bash
158
+ # 1. Download and install LM Studio: https://lmstudio.ai
159
+ # 2. Download a model from the UI
160
+ # 3. Start the local server (default port: 1234)
161
+ ```
162
+
163
+ #### Option 3: Google Gemini (Cloud, Powerful)
164
+ ```bash
165
+ # Get API key from: https://makersuite.google.com/app/apikey
166
+ # Set environment variable
167
+ export GEMINI_API_KEY="your-api-key-here"
168
+ ```
169
+
144
170
  ### Basic Usage
145
171
 
146
172
  ```python
147
173
  from mem_llm import MemAgent
148
174
 
149
- # Create an agent
175
+ # Option 1: Use Ollama (default)
150
176
  agent = MemAgent(model="granite4:tiny-h")
151
177
 
152
- # Set user and chat
178
+ # Option 2: Use LM Studio
179
+ agent = MemAgent(backend='lmstudio', model='local-model')
180
+
181
+ # Option 3: Use Gemini
182
+ agent = MemAgent(backend='gemini', model='gemini-2.5-flash', api_key='your-key')
183
+
184
+ # Option 4: Auto-detect available backend
185
+ agent = MemAgent(auto_detect_backend=True)
186
+
187
+ # Set user and chat (same for all backends!)
153
188
  agent.set_user("alice")
154
189
  response = agent.chat("My name is Alice and I love Python!")
155
190
  print(response)
@@ -159,10 +194,34 @@ response = agent.chat("What's my name and what do I love?")
159
194
  print(response) # Agent remembers: "Your name is Alice and you love Python!"
160
195
  ```
161
196
 
162
- That's it! Just 5 lines of code to get started.
197
+ That's it! Just 5 lines of code to get started with any backend.
163
198
 
164
199
  ## 📖 Usage Examples
165
200
 
201
+ ### Multi-Backend Examples (v1.3.0+)
202
+
203
+ ```python
204
+ from mem_llm import MemAgent
205
+
206
+ # LM Studio - Fast local inference
207
+ agent = MemAgent(
208
+ backend='lmstudio',
209
+ model='local-model',
210
+ base_url='http://localhost:1234'
211
+ )
212
+
213
+ # Google Gemini - Cloud power
214
+ agent = MemAgent(
215
+ backend='gemini',
216
+ model='gemini-2.5-flash',
217
+ api_key='your-api-key'
218
+ )
219
+
220
+ # Auto-detect - Universal compatibility
221
+ agent = MemAgent(auto_detect_backend=True)
222
+ print(f"Using: {agent.llm.get_backend_info()['name']}")
223
+ ```
224
+
166
225
  ### Multi-User Conversations
167
226
 
168
227
  ```python
@@ -379,16 +438,21 @@ Mem-LLM works with **ALL Ollama models**, including:
379
438
  ```
380
439
  mem-llm/
381
440
  ├── mem_llm/
382
- │ ├── mem_agent.py # Main agent class
383
- │ ├── memory_manager.py # JSON memory backend
384
- │ ├── memory_db.py # SQL memory backend
385
- │ ├── llm_client.py # Ollama API client
386
- │ ├── knowledge_loader.py # Knowledge base system
387
- │ ├── dynamic_prompt.py # Context-aware prompts
388
- ├── memory_tools.py # Memory management tools
389
- │ ├── config_manager.py # Configuration handler
390
- └── cli.py # Command-line interface
391
- └── examples/ # Usage examples
441
+ │ ├── mem_agent.py # Main agent class (multi-backend)
442
+ │ ├── base_llm_client.py # Abstract LLM interface
443
+ │ ├── llm_client_factory.py # Backend factory pattern
444
+ │ ├── clients/ # LLM backend implementations
445
+ ├── ollama_client.py # Ollama integration
446
+ ├── lmstudio_client.py # LM Studio integration
447
+ │ └── gemini_client.py # Google Gemini integration
448
+ │ ├── memory_manager.py # JSON memory backend
449
+ ├── memory_db.py # SQL memory backend
450
+ │ ├── knowledge_loader.py # Knowledge base system
451
+ │ ├── dynamic_prompt.py # Context-aware prompts
452
+ │ ├── memory_tools.py # Memory management tools
453
+ │ ├── config_manager.py # Configuration handler
454
+ │ └── cli.py # Command-line interface
455
+ └── examples/ # Usage examples (14 total)
392
456
  ```
393
457
 
394
458
  ## 🔥 Advanced Features
@@ -430,10 +494,12 @@ stats = agent.get_memory_stats()
430
494
  ## 📦 Project Structure
431
495
 
432
496
  ### Core Components
433
- - **MemAgent**: Main interface for building AI assistants
497
+ - **MemAgent**: Main interface for building AI assistants (multi-backend support)
498
+ - **LLMClientFactory**: Factory pattern for backend creation
499
+ - **BaseLLMClient**: Abstract interface for all LLM backends
500
+ - **OllamaClient / LMStudioClient / GeminiClient**: Backend implementations
434
501
  - **MemoryManager**: JSON-based memory storage (simple)
435
502
  - **SQLMemoryManager**: SQLite-based storage (advanced)
436
- - **OllamaClient**: LLM communication handler
437
503
  - **KnowledgeLoader**: Knowledge base management
438
504
 
439
505
  ### Optional Features
@@ -457,14 +523,19 @@ The `examples/` directory contains ready-to-run demonstrations:
457
523
  8. **08_conversation_summarization.py** - Token compression with auto-summary (v1.2.0+)
458
524
  9. **09_data_export_import.py** - Multi-format export/import demo (v1.2.0+)
459
525
  10. **10_database_connection_test.py** - Enterprise PostgreSQL/MongoDB migration (v1.2.0+)
526
+ 11. **11_lmstudio_example.py** - Using LM Studio backend (v1.3.0+)
527
+ 12. **12_gemini_example.py** - Using Google Gemini API (v1.3.0+)
528
+ 13. **13_multi_backend_comparison.py** - Compare different backends (v1.3.0+)
529
+ 14. **14_auto_detect_backend.py** - Auto-detection feature demo (v1.3.0+)
460
530
 
461
531
  ## 📊 Project Status
462
532
 
463
- - **Version**: 1.2.0
533
+ - **Version**: 1.3.0
464
534
  - **Status**: Production Ready
465
- - **Last Updated**: October 21, 2025
466
- - **Test Coverage**: 16/16 automated tests (100% success rate)
535
+ - **Last Updated**: October 31, 2025
536
+ - **Test Coverage**: 50+ automated tests (100% success rate)
467
537
  - **Performance**: Thread-safe operations, <1ms search latency
538
+ - **Backends**: Ollama, LM Studio, Google Gemini
468
539
  - **Databases**: SQLite, PostgreSQL, MongoDB, In-Memory
469
540
 
470
541
  ## 📈 Roadmap
@@ -476,10 +547,14 @@ The `examples/` directory contains ready-to-run demonstrations:
476
547
  - [x] ~~Conversation Summarization~~ (v1.2.0)
477
548
  - [x] ~~Multi-Database Export/Import~~ (v1.2.0)
478
549
  - [x] ~~In-Memory Database~~ (v1.2.0)
550
+ - [x] ~~Multi-Backend Support (Ollama, LM Studio, Gemini)~~ (v1.3.0)
551
+ - [x] ~~Auto-Detection~~ (v1.3.0)
552
+ - [x] ~~Factory Pattern Architecture~~ (v1.3.0)
553
+ - [ ] OpenAI & Claude backends
554
+ - [ ] Streaming support
479
555
  - [ ] Web UI dashboard
480
556
  - [ ] REST API server
481
557
  - [ ] Vector database integration
482
- - [ ] Advanced analytics dashboard
483
558
 
484
559
  ## 📄 License
485
560
 
@@ -1,9 +1,9 @@
1
- mem_llm/__init__.py,sha256=Nx_7o8uFoK7WzLjY4Ko2sVITQoAcYtwNsUK4AugddbE,2636
1
+ mem_llm/__init__.py,sha256=fKHDaLkOUE4uFqaTkqfKcop4Ckz9qfFOTKGcfz6BGlE,2918
2
2
  mem_llm/base_llm_client.py,sha256=aCpr8ZnvOsu-a-zp9quTDP42XvjAC1uci6r11s0QdVA,5218
3
3
  mem_llm/cli.py,sha256=DiqQyBZknN8pVagY5jXH85_LZ6odVGopfpa-7DILNNE,8666
4
4
  mem_llm/config.yaml.example,sha256=lgmfaU5pxnIm4zYxwgCcgLSohNx1Jw6oh3Qk0Xoe2DE,917
5
5
  mem_llm/config_from_docs.py,sha256=YFhq1SWyK63C-TNMS73ncNHg8sJ-XGOf2idWVCjxFco,4974
6
- mem_llm/config_manager.py,sha256=is4m0ISBIfv4PInGjrpvhxy0A7p9_BQ_UoJeayaIT3A,7084
6
+ mem_llm/config_manager.py,sha256=QwkZz8qNBj5KI0h7t45PQmvJ7Orqnx3iOIUbU5yAVoo,7255
7
7
  mem_llm/conversation_summarizer.py,sha256=yCG2pKrAJf7xjaG6DPXL0i9eesMZnnzjKTpuyLHMTPQ,12509
8
8
  mem_llm/data_export_import.py,sha256=gQIdD0hBY23qcRvx139yE15RWHXPinL_EoRNY7iabj0,22592
9
9
  mem_llm/dynamic_prompt.py,sha256=8H99QVDRJSVtGb_o4sdEPnG1cJWuer3KiD-nuL1srTA,10244
@@ -11,19 +11,21 @@ mem_llm/knowledge_loader.py,sha256=oSNhfYYcx7DlZLVogxnbSwaIydq_Q3__RDJFeZR2XVw,2
11
11
  mem_llm/llm_client.py,sha256=3F04nlnRWRlhkQ3aZO-OfsxeajB2gwbIDfClu04cyb0,8709
12
12
  mem_llm/llm_client_factory.py,sha256=jite-4CkgFBd9e0b2cIaZzP-zTqA7tjNqXnJ5CQgcbs,9325
13
13
  mem_llm/logger.py,sha256=dZUmhGgFXtDsDBU_D4kZlJeMp6k-VNPaBcyTt7rZYKE,4507
14
- mem_llm/mem_agent.py,sha256=Y4qCHNtdPlOJssQLG1GJdy02FsztYe9sjnbh54qAWWU,37221
15
- mem_llm/memory_db.py,sha256=4HbxgfhPrijbBKsEv4ncmjZeK-RhtLkyWBrg-quCsNE,14715
16
- mem_llm/memory_manager.py,sha256=CZI3A8pFboHQIgeiXB1h2gZK7mgfbVSU3IxuqE-zXtc,9978
14
+ mem_llm/mem_agent.py,sha256=8R0oAtXzD_X99QVVsfMjZl_wkiCCHdKNWrTrsrbpzdY,52771
15
+ mem_llm/memory_db.py,sha256=yY_afim1Rpk3mOz-qI5WvDDAwWoVd-NucBMBLVUNpwg,21711
16
+ mem_llm/memory_manager.py,sha256=BtzI1o-NYZXMkZHtc36xEZizgNn9fAu6cBkGzNXa-uI,10373
17
17
  mem_llm/memory_tools.py,sha256=ARANFqu_bmL56SlV1RzTjfQsJj-Qe2QvqY0pF92hDxU,8678
18
18
  mem_llm/prompt_security.py,sha256=ehAi6aLiXj0gFFhpyjwEr8LentSTJwOQDLbINV7SaVM,9960
19
+ mem_llm/response_metrics.py,sha256=nMegWV7brNOmptjxGJfYEqRKvAj_302MIw8Ky1PzEy8,7912
19
20
  mem_llm/retry_handler.py,sha256=z5ZcSQKbvVeNK7plagTLorvOeoYgRpQcsX3PpNqUjKM,6389
20
21
  mem_llm/thread_safe_db.py,sha256=Fq-wSn4ua1qiR6M4ZTIy7UT1IlFj5xODNExgub1blbU,10328
22
+ mem_llm/vector_store.py,sha256=7fzvxLjfJrspN1Tcety4JtcKksxnkM0E5es0UtBgI-c,10816
21
23
  mem_llm/clients/__init__.py,sha256=Nvr4NuL9ZlDF_dUjr-ZMFxRRrBdHoUOjqncZs3n5Wow,475
22
24
  mem_llm/clients/gemini_client.py,sha256=dmRZRd8f-x6J2W7luzcB1BOx_4UpXpCF4YiPGUccWCw,14432
23
25
  mem_llm/clients/lmstudio_client.py,sha256=IxUX3sVRfXN46hfEUTCrspGTOeqsn4YAu9WzFuGh940,10156
24
26
  mem_llm/clients/ollama_client.py,sha256=2BfYSBiOowhFg9UiCXkILlBG9_4Vri3-Iny_gH6-um0,9710
25
- mem_llm-1.3.0.dist-info/METADATA,sha256=Ov-FBPV2qYjgWWYv9l0WidhSmr7vGx-NW1uIqxAToi4,15518
26
- mem_llm-1.3.0.dist-info/WHEEL,sha256=beeZ86-EfXScwlR_HKu4SllMC9wUEj_8Z_4FJ3egI2w,91
27
- mem_llm-1.3.0.dist-info/entry_points.txt,sha256=z9bg6xgNroIobvCMtnSXeFPc-vI1nMen8gejHCdnl0U,45
28
- mem_llm-1.3.0.dist-info/top_level.txt,sha256=_fU1ML-0JwkaxWdhqpwtmTNaJEOvDMQeJdA8d5WqDn8,8
29
- mem_llm-1.3.0.dist-info/RECORD,,
27
+ mem_llm-1.3.2.dist-info/METADATA,sha256=6KYvn0Y00gcxzyU45tkckwW991IqSDpLoAvSfubeypY,18774
28
+ mem_llm-1.3.2.dist-info/WHEEL,sha256=beeZ86-EfXScwlR_HKu4SllMC9wUEj_8Z_4FJ3egI2w,91
29
+ mem_llm-1.3.2.dist-info/entry_points.txt,sha256=z9bg6xgNroIobvCMtnSXeFPc-vI1nMen8gejHCdnl0U,45
30
+ mem_llm-1.3.2.dist-info/top_level.txt,sha256=_fU1ML-0JwkaxWdhqpwtmTNaJEOvDMQeJdA8d5WqDn8,8
31
+ mem_llm-1.3.2.dist-info/RECORD,,