mem-llm 1.3.1__tar.gz → 1.3.2__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mem-llm might be problematic. Click here for more details.
- {mem_llm-1.3.1 → mem_llm-1.3.2}/CHANGELOG.md +62 -0
- {mem_llm-1.3.1/mem_llm.egg-info → mem_llm-1.3.2}/PKG-INFO +12 -3
- {mem_llm-1.3.1 → mem_llm-1.3.2}/README.md +11 -2
- {mem_llm-1.3.1 → mem_llm-1.3.2}/mem_llm/__init__.py +9 -2
- {mem_llm-1.3.1 → mem_llm-1.3.2}/mem_llm/config_manager.py +3 -1
- {mem_llm-1.3.1 → mem_llm-1.3.2}/mem_llm/mem_agent.py +400 -16
- {mem_llm-1.3.1 → mem_llm-1.3.2}/mem_llm/memory_db.py +186 -4
- {mem_llm-1.3.1 → mem_llm-1.3.2}/mem_llm/memory_manager.py +10 -1
- mem_llm-1.3.2/mem_llm/response_metrics.py +221 -0
- mem_llm-1.3.2/mem_llm/vector_store.py +278 -0
- {mem_llm-1.3.1 → mem_llm-1.3.2/mem_llm.egg-info}/PKG-INFO +12 -3
- {mem_llm-1.3.1 → mem_llm-1.3.2}/mem_llm.egg-info/SOURCES.txt +2 -0
- {mem_llm-1.3.1 → mem_llm-1.3.2}/pyproject.toml +1 -1
- {mem_llm-1.3.1 → mem_llm-1.3.2}/requirements-optional.txt +4 -0
- {mem_llm-1.3.1 → mem_llm-1.3.2}/MANIFEST.in +0 -0
- {mem_llm-1.3.1 → mem_llm-1.3.2}/mem_llm/base_llm_client.py +0 -0
- {mem_llm-1.3.1 → mem_llm-1.3.2}/mem_llm/cli.py +0 -0
- {mem_llm-1.3.1 → mem_llm-1.3.2}/mem_llm/clients/__init__.py +0 -0
- {mem_llm-1.3.1 → mem_llm-1.3.2}/mem_llm/clients/gemini_client.py +0 -0
- {mem_llm-1.3.1 → mem_llm-1.3.2}/mem_llm/clients/lmstudio_client.py +0 -0
- {mem_llm-1.3.1 → mem_llm-1.3.2}/mem_llm/clients/ollama_client.py +0 -0
- {mem_llm-1.3.1 → mem_llm-1.3.2}/mem_llm/config.yaml.example +0 -0
- {mem_llm-1.3.1 → mem_llm-1.3.2}/mem_llm/config_from_docs.py +0 -0
- {mem_llm-1.3.1 → mem_llm-1.3.2}/mem_llm/conversation_summarizer.py +0 -0
- {mem_llm-1.3.1 → mem_llm-1.3.2}/mem_llm/data_export_import.py +0 -0
- {mem_llm-1.3.1 → mem_llm-1.3.2}/mem_llm/dynamic_prompt.py +0 -0
- {mem_llm-1.3.1 → mem_llm-1.3.2}/mem_llm/knowledge_loader.py +0 -0
- {mem_llm-1.3.1 → mem_llm-1.3.2}/mem_llm/llm_client.py +0 -0
- {mem_llm-1.3.1 → mem_llm-1.3.2}/mem_llm/llm_client_factory.py +0 -0
- {mem_llm-1.3.1 → mem_llm-1.3.2}/mem_llm/logger.py +0 -0
- {mem_llm-1.3.1 → mem_llm-1.3.2}/mem_llm/memory_tools.py +0 -0
- {mem_llm-1.3.1 → mem_llm-1.3.2}/mem_llm/prompt_security.py +0 -0
- {mem_llm-1.3.1 → mem_llm-1.3.2}/mem_llm/retry_handler.py +0 -0
- {mem_llm-1.3.1 → mem_llm-1.3.2}/mem_llm/thread_safe_db.py +0 -0
- {mem_llm-1.3.1 → mem_llm-1.3.2}/mem_llm.egg-info/dependency_links.txt +0 -0
- {mem_llm-1.3.1 → mem_llm-1.3.2}/mem_llm.egg-info/entry_points.txt +0 -0
- {mem_llm-1.3.1 → mem_llm-1.3.2}/mem_llm.egg-info/requires.txt +0 -0
- {mem_llm-1.3.1 → mem_llm-1.3.2}/mem_llm.egg-info/top_level.txt +0 -0
- {mem_llm-1.3.1 → mem_llm-1.3.2}/requirements-dev.txt +0 -0
- {mem_llm-1.3.1 → mem_llm-1.3.2}/requirements.txt +0 -0
- {mem_llm-1.3.1 → mem_llm-1.3.2}/setup.cfg +0 -0
- {mem_llm-1.3.1 → mem_llm-1.3.2}/tests/test_advanced_coverage.py +0 -0
- {mem_llm-1.3.1 → mem_llm-1.3.2}/tests/test_backward_compatibility.py +0 -0
- {mem_llm-1.3.1 → mem_llm-1.3.2}/tests/test_conversation_summarizer.py +0 -0
- {mem_llm-1.3.1 → mem_llm-1.3.2}/tests/test_data_export_import.py +0 -0
- {mem_llm-1.3.1 → mem_llm-1.3.2}/tests/test_improvements.py +0 -0
- {mem_llm-1.3.1 → mem_llm-1.3.2}/tests/test_integration.py +0 -0
- {mem_llm-1.3.1 → mem_llm-1.3.2}/tests/test_llm_backends.py +0 -0
- {mem_llm-1.3.1 → mem_llm-1.3.2}/tests/test_llm_client.py +0 -0
- {mem_llm-1.3.1 → mem_llm-1.3.2}/tests/test_mem_agent.py +0 -0
- {mem_llm-1.3.1 → mem_llm-1.3.2}/tests/test_memory_manager.py +0 -0
- {mem_llm-1.3.1 → mem_llm-1.3.2}/tests/test_memory_tools.py +0 -0
- {mem_llm-1.3.1 → mem_llm-1.3.2}/tests/test_qwen3_model.py +0 -0
|
@@ -5,6 +5,68 @@ All notable changes to this project will be documented in this file.
|
|
|
5
5
|
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
|
6
6
|
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
|
7
7
|
|
|
8
|
+
## [1.3.2] - 2025-11-02
|
|
9
|
+
|
|
10
|
+
### 🎉 Major Features
|
|
11
|
+
|
|
12
|
+
- 📊 **Response Metrics & Quality Analytics** (v1.3.1+)
|
|
13
|
+
- `ChatResponse` dataclass: Comprehensive response tracking
|
|
14
|
+
- `ResponseMetricsAnalyzer`: Aggregate analytics and monitoring
|
|
15
|
+
- Confidence scoring: Based on KB usage, memory, temperature, and length
|
|
16
|
+
- Real-time latency tracking: Monitor response performance
|
|
17
|
+
- Quality labels: High/Medium/Low classification
|
|
18
|
+
- Export metrics: JSON and summary formats for dashboards
|
|
19
|
+
- Production monitoring: Health checks and SLA tracking
|
|
20
|
+
|
|
21
|
+
- 🔍 **Vector Search & Semantic Knowledge Base** (v1.3.2+)
|
|
22
|
+
- ChromaDB integration: Semantic search with embeddings
|
|
23
|
+
- Sentence-transformers support: `all-MiniLM-L6-v2` default model
|
|
24
|
+
- Cross-lingual search: Understands meaning across languages
|
|
25
|
+
- Hybrid search: Vector + keyword search combination
|
|
26
|
+
- Better relevancy: Semantic understanding vs keyword matching
|
|
27
|
+
- Optional feature: Install with `pip install chromadb sentence-transformers`
|
|
28
|
+
|
|
29
|
+
### 🆕 New Components
|
|
30
|
+
|
|
31
|
+
- `response_metrics.py`: `ChatResponse`, `ResponseMetricsAnalyzer`, `calculate_confidence`
|
|
32
|
+
- `vector_store.py`: `VectorStore`, `ChromaVectorStore`, `create_vector_store`
|
|
33
|
+
- Enhanced `SQLMemoryManager`: Vector search integration
|
|
34
|
+
- Enhanced `MemAgent`: Response metrics and vector search support
|
|
35
|
+
|
|
36
|
+
### 🔄 Enhanced Features
|
|
37
|
+
|
|
38
|
+
- **MemAgent.chat()**: New `return_metrics` parameter for detailed response analysis
|
|
39
|
+
- **Memory Metadata**: Automatic saving of response metrics in conversations
|
|
40
|
+
- **User Profile**: Improved preferences and summary extraction/parsing
|
|
41
|
+
- **Knowledge Base Search**: Optional vector search with `use_vector_search=True`
|
|
42
|
+
- **ChromaDB Sync**: `sync_all_kb_to_vector_store()` method for existing KB entries
|
|
43
|
+
|
|
44
|
+
### 📚 New Examples
|
|
45
|
+
|
|
46
|
+
- `15_response_metrics.py`: Response quality metrics and analytics
|
|
47
|
+
- `16_vector_search.py`: Semantic/vector search demonstration
|
|
48
|
+
|
|
49
|
+
### 🐛 Bug Fixes
|
|
50
|
+
|
|
51
|
+
- Fixed metadata not being saved in conversation history
|
|
52
|
+
- Fixed preferences parsing from JSON string to dict
|
|
53
|
+
- Fixed summary generation for existing users
|
|
54
|
+
- Fixed `get_user_profile()` SQL/JSON memory detection logic
|
|
55
|
+
- Fixed ChromaDB embedding function compatibility
|
|
56
|
+
|
|
57
|
+
### 📝 Documentation
|
|
58
|
+
|
|
59
|
+
- Updated all examples: Simplified and more readable
|
|
60
|
+
- Enhanced README with new features
|
|
61
|
+
- Vector search usage guide
|
|
62
|
+
|
|
63
|
+
### ⚡ Improved
|
|
64
|
+
|
|
65
|
+
- Better error handling for ChromaDB initialization
|
|
66
|
+
- Fallback mechanism for embedding function loading
|
|
67
|
+
- Enhanced similarity score calculation for vector search
|
|
68
|
+
- Improved conversation metadata tracking
|
|
69
|
+
|
|
8
70
|
## [1.3.1] - 2025-10-31
|
|
9
71
|
|
|
10
72
|
### 📝 Documentation
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.2
|
|
2
2
|
Name: mem-llm
|
|
3
|
-
Version: 1.3.
|
|
3
|
+
Version: 1.3.2
|
|
4
4
|
Summary: Memory-enabled AI assistant with multi-backend LLM support (Ollama, LM Studio, Gemini) - Local and cloud ready
|
|
5
5
|
Author-email: "C. Emre Karataş" <karatasqemre@gmail.com>
|
|
6
6
|
License: MIT
|
|
@@ -70,7 +70,14 @@ Mem-LLM is a powerful Python library that brings persistent memory capabilities
|
|
|
70
70
|
- **Issues**: https://github.com/emredeveloper/Mem-LLM/issues
|
|
71
71
|
- **Documentation**: See examples/ directory
|
|
72
72
|
|
|
73
|
-
## 🆕 What's New in v1.3.
|
|
73
|
+
## 🆕 What's New in v1.3.2
|
|
74
|
+
|
|
75
|
+
- 📊 **Response Metrics** (v1.3.1+) – Track confidence, latency, KB usage, and quality analytics
|
|
76
|
+
- 🔍 **Vector Search** (v1.3.2+) – Semantic search with ChromaDB, cross-lingual support
|
|
77
|
+
- 🎯 **Quality Monitoring** – Production-ready metrics for response quality
|
|
78
|
+
- 🌐 **Semantic Understanding** – Understands meaning, not just keywords
|
|
79
|
+
|
|
80
|
+
## What's New in v1.3.0
|
|
74
81
|
|
|
75
82
|
- 🔌 **Multi-Backend Support**: Choose between Ollama (local), LM Studio (local), or Google Gemini (cloud)
|
|
76
83
|
- 🏗️ **Factory Pattern**: Clean, extensible architecture for easy backend switching
|
|
@@ -79,12 +86,14 @@ Mem-LLM is a powerful Python library that brings persistent memory capabilities
|
|
|
79
86
|
- 📚 **New Examples**: 4 additional examples showing multi-backend usage
|
|
80
87
|
- 🎯 **Backward Compatible**: All v1.2.0 code still works without changes
|
|
81
88
|
|
|
82
|
-
[See full changelog](CHANGELOG.md
|
|
89
|
+
[See full changelog](CHANGELOG.md)
|
|
83
90
|
|
|
84
91
|
## ✨ Key Features
|
|
85
92
|
|
|
86
93
|
- 🔌 **Multi-Backend Support** (v1.3.0+) - Choose Ollama, LM Studio, or Gemini with unified API
|
|
87
94
|
- 🔍 **Auto-Detection** (v1.3.0+) - Automatically find and use available LLM services
|
|
95
|
+
- 📊 **Response Metrics** (v1.3.1+) - Track confidence, latency, KB usage, and quality analytics
|
|
96
|
+
- 🔍 **Vector Search** (v1.3.2+) - Semantic search with ChromaDB, cross-lingual support
|
|
88
97
|
- 🧠 **Persistent Memory** - Remembers conversations across sessions
|
|
89
98
|
- 🤖 **Universal Model Support** - Works with 100+ Ollama models, LM Studio models, and Gemini
|
|
90
99
|
- 💾 **Dual Storage Modes** - JSON (simple) or SQLite (advanced) memory backends
|
|
@@ -15,7 +15,14 @@ Mem-LLM is a powerful Python library that brings persistent memory capabilities
|
|
|
15
15
|
- **Issues**: https://github.com/emredeveloper/Mem-LLM/issues
|
|
16
16
|
- **Documentation**: See examples/ directory
|
|
17
17
|
|
|
18
|
-
## 🆕 What's New in v1.3.
|
|
18
|
+
## 🆕 What's New in v1.3.2
|
|
19
|
+
|
|
20
|
+
- 📊 **Response Metrics** (v1.3.1+) – Track confidence, latency, KB usage, and quality analytics
|
|
21
|
+
- 🔍 **Vector Search** (v1.3.2+) – Semantic search with ChromaDB, cross-lingual support
|
|
22
|
+
- 🎯 **Quality Monitoring** – Production-ready metrics for response quality
|
|
23
|
+
- 🌐 **Semantic Understanding** – Understands meaning, not just keywords
|
|
24
|
+
|
|
25
|
+
## What's New in v1.3.0
|
|
19
26
|
|
|
20
27
|
- 🔌 **Multi-Backend Support**: Choose between Ollama (local), LM Studio (local), or Google Gemini (cloud)
|
|
21
28
|
- 🏗️ **Factory Pattern**: Clean, extensible architecture for easy backend switching
|
|
@@ -24,12 +31,14 @@ Mem-LLM is a powerful Python library that brings persistent memory capabilities
|
|
|
24
31
|
- 📚 **New Examples**: 4 additional examples showing multi-backend usage
|
|
25
32
|
- 🎯 **Backward Compatible**: All v1.2.0 code still works without changes
|
|
26
33
|
|
|
27
|
-
[See full changelog](CHANGELOG.md
|
|
34
|
+
[See full changelog](CHANGELOG.md)
|
|
28
35
|
|
|
29
36
|
## ✨ Key Features
|
|
30
37
|
|
|
31
38
|
- 🔌 **Multi-Backend Support** (v1.3.0+) - Choose Ollama, LM Studio, or Gemini with unified API
|
|
32
39
|
- 🔍 **Auto-Detection** (v1.3.0+) - Automatically find and use available LLM services
|
|
40
|
+
- 📊 **Response Metrics** (v1.3.1+) - Track confidence, latency, KB usage, and quality analytics
|
|
41
|
+
- 🔍 **Vector Search** (v1.3.2+) - Semantic search with ChromaDB, cross-lingual support
|
|
33
42
|
- 🧠 **Persistent Memory** - Remembers conversations across sessions
|
|
34
43
|
- 🤖 **Universal Model Support** - Works with 100+ Ollama models, LM Studio models, and Gemini
|
|
35
44
|
- 💾 **Dual Storage Modes** - JSON (simple) or SQLite (advanced) memory backends
|
|
@@ -63,7 +63,14 @@ try:
|
|
|
63
63
|
except ImportError:
|
|
64
64
|
__all_export_import__ = []
|
|
65
65
|
|
|
66
|
-
|
|
66
|
+
# Response Metrics (v1.3.1+)
|
|
67
|
+
try:
|
|
68
|
+
from .response_metrics import ChatResponse, ResponseMetricsAnalyzer, calculate_confidence
|
|
69
|
+
__all_metrics__ = ["ChatResponse", "ResponseMetricsAnalyzer", "calculate_confidence"]
|
|
70
|
+
except ImportError:
|
|
71
|
+
__all_metrics__ = []
|
|
72
|
+
|
|
73
|
+
__version__ = "1.3.2"
|
|
67
74
|
__author__ = "C. Emre Karataş"
|
|
68
75
|
|
|
69
76
|
# Multi-backend LLM support (v1.3.0+)
|
|
@@ -80,4 +87,4 @@ __all__ = [
|
|
|
80
87
|
"MemAgent",
|
|
81
88
|
"MemoryManager",
|
|
82
89
|
"OllamaClient",
|
|
83
|
-
] + __all_llm_backends__ + __all_tools__ + __all_pro__ + __all_cli__ + __all_security__ + __all_enhanced__ + __all_summarizer__ + __all_export_import__
|
|
90
|
+
] + __all_llm_backends__ + __all_tools__ + __all_pro__ + __all_cli__ + __all_security__ + __all_enhanced__ + __all_summarizer__ + __all_export_import__ + __all_metrics__
|
|
@@ -62,7 +62,9 @@ class ConfigManager:
|
|
|
62
62
|
"default_kb": "ecommerce",
|
|
63
63
|
"custom_kb_file": None,
|
|
64
64
|
"search_limit": 5,
|
|
65
|
-
"min_relevance_score": 0.3
|
|
65
|
+
"min_relevance_score": 0.3,
|
|
66
|
+
"enable_vector_search": False, # v1.3.2+ - Optional semantic search
|
|
67
|
+
"embedding_model": "all-MiniLM-L6-v2" # Sentence transformers model
|
|
66
68
|
},
|
|
67
69
|
"response": {
|
|
68
70
|
"use_knowledge_base": True,
|