mem-llm 1.2.0__tar.gz → 1.3.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mem-llm might be problematic. Click here for more details.

Files changed (51) hide show
  1. {mem_llm-1.2.0 → mem_llm-1.3.0}/CHANGELOG.md +59 -0
  2. {mem_llm-1.2.0/mem_llm.egg-info → mem_llm-1.3.0}/PKG-INFO +4 -3
  3. {mem_llm-1.2.0 → mem_llm-1.3.0}/mem_llm/__init__.py +12 -3
  4. mem_llm-1.3.0/mem_llm/base_llm_client.py +175 -0
  5. mem_llm-1.3.0/mem_llm/clients/__init__.py +25 -0
  6. mem_llm-1.3.0/mem_llm/clients/gemini_client.py +381 -0
  7. mem_llm-1.3.0/mem_llm/clients/lmstudio_client.py +280 -0
  8. mem_llm-1.3.0/mem_llm/clients/ollama_client.py +268 -0
  9. mem_llm-1.3.0/mem_llm/llm_client_factory.py +277 -0
  10. {mem_llm-1.2.0 → mem_llm-1.3.0}/mem_llm/mem_agent.py +123 -37
  11. {mem_llm-1.2.0 → mem_llm-1.3.0/mem_llm.egg-info}/PKG-INFO +4 -3
  12. {mem_llm-1.2.0 → mem_llm-1.3.0}/mem_llm.egg-info/SOURCES.txt +7 -0
  13. {mem_llm-1.2.0 → mem_llm-1.3.0}/mem_llm.egg-info/requires.txt +1 -0
  14. {mem_llm-1.2.0 → mem_llm-1.3.0}/pyproject.toml +5 -4
  15. {mem_llm-1.2.0 → mem_llm-1.3.0}/requirements.txt +1 -0
  16. mem_llm-1.3.0/tests/test_llm_backends.py +352 -0
  17. {mem_llm-1.2.0 → mem_llm-1.3.0}/MANIFEST.in +0 -0
  18. {mem_llm-1.2.0 → mem_llm-1.3.0}/README.md +0 -0
  19. {mem_llm-1.2.0 → mem_llm-1.3.0}/mem_llm/cli.py +0 -0
  20. {mem_llm-1.2.0 → mem_llm-1.3.0}/mem_llm/config.yaml.example +0 -0
  21. {mem_llm-1.2.0 → mem_llm-1.3.0}/mem_llm/config_from_docs.py +0 -0
  22. {mem_llm-1.2.0 → mem_llm-1.3.0}/mem_llm/config_manager.py +0 -0
  23. {mem_llm-1.2.0 → mem_llm-1.3.0}/mem_llm/conversation_summarizer.py +0 -0
  24. {mem_llm-1.2.0 → mem_llm-1.3.0}/mem_llm/data_export_import.py +0 -0
  25. {mem_llm-1.2.0 → mem_llm-1.3.0}/mem_llm/dynamic_prompt.py +0 -0
  26. {mem_llm-1.2.0 → mem_llm-1.3.0}/mem_llm/knowledge_loader.py +0 -0
  27. {mem_llm-1.2.0 → mem_llm-1.3.0}/mem_llm/llm_client.py +0 -0
  28. {mem_llm-1.2.0 → mem_llm-1.3.0}/mem_llm/logger.py +0 -0
  29. {mem_llm-1.2.0 → mem_llm-1.3.0}/mem_llm/memory_db.py +0 -0
  30. {mem_llm-1.2.0 → mem_llm-1.3.0}/mem_llm/memory_manager.py +0 -0
  31. {mem_llm-1.2.0 → mem_llm-1.3.0}/mem_llm/memory_tools.py +0 -0
  32. {mem_llm-1.2.0 → mem_llm-1.3.0}/mem_llm/prompt_security.py +0 -0
  33. {mem_llm-1.2.0 → mem_llm-1.3.0}/mem_llm/retry_handler.py +0 -0
  34. {mem_llm-1.2.0 → mem_llm-1.3.0}/mem_llm/thread_safe_db.py +0 -0
  35. {mem_llm-1.2.0 → mem_llm-1.3.0}/mem_llm.egg-info/dependency_links.txt +0 -0
  36. {mem_llm-1.2.0 → mem_llm-1.3.0}/mem_llm.egg-info/entry_points.txt +0 -0
  37. {mem_llm-1.2.0 → mem_llm-1.3.0}/mem_llm.egg-info/top_level.txt +0 -0
  38. {mem_llm-1.2.0 → mem_llm-1.3.0}/requirements-dev.txt +0 -0
  39. {mem_llm-1.2.0 → mem_llm-1.3.0}/requirements-optional.txt +0 -0
  40. {mem_llm-1.2.0 → mem_llm-1.3.0}/setup.cfg +0 -0
  41. {mem_llm-1.2.0 → mem_llm-1.3.0}/tests/test_advanced_coverage.py +0 -0
  42. {mem_llm-1.2.0 → mem_llm-1.3.0}/tests/test_backward_compatibility.py +0 -0
  43. {mem_llm-1.2.0 → mem_llm-1.3.0}/tests/test_conversation_summarizer.py +0 -0
  44. {mem_llm-1.2.0 → mem_llm-1.3.0}/tests/test_data_export_import.py +0 -0
  45. {mem_llm-1.2.0 → mem_llm-1.3.0}/tests/test_improvements.py +0 -0
  46. {mem_llm-1.2.0 → mem_llm-1.3.0}/tests/test_integration.py +0 -0
  47. {mem_llm-1.2.0 → mem_llm-1.3.0}/tests/test_llm_client.py +0 -0
  48. {mem_llm-1.2.0 → mem_llm-1.3.0}/tests/test_mem_agent.py +0 -0
  49. {mem_llm-1.2.0 → mem_llm-1.3.0}/tests/test_memory_manager.py +0 -0
  50. {mem_llm-1.2.0 → mem_llm-1.3.0}/tests/test_memory_tools.py +0 -0
  51. {mem_llm-1.2.0 → mem_llm-1.3.0}/tests/test_qwen3_model.py +0 -0
@@ -5,6 +5,65 @@ All notable changes to this project will be documented in this file.
5
5
  The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
6
6
  and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
7
7
 
8
+ ## [1.3.0] - 2025-10-31
9
+
10
+ ### 🎉 Major Features
11
+
12
+ - 🔌 **Multi-Backend LLM Support**: Choose your preferred LLM backend
13
+ - **Ollama**: Local, privacy-first, 100+ models
14
+ - **LM Studio**: Fast local inference with easy GUI
15
+ - **Google Gemini**: Powerful cloud models (gemini-2.5-flash)
16
+ - Unified API across all backends
17
+ - Seamless switching between backends
18
+
19
+ - 🏗️ **Factory Pattern Architecture**: Clean, extensible design
20
+ - `LLMClientFactory`: Central backend management
21
+ - `BaseLLMClient`: Abstract interface for all backends
22
+ - Easy to add new backends in the future
23
+
24
+ - 🔍 **Auto-Detection**: Automatically find available LLM service
25
+ - `auto_detect_backend=True` parameter
26
+ - Checks Ollama → LM Studio → other local services
27
+ - No manual configuration needed
28
+
29
+ ### 🆕 New Components
30
+
31
+ - `BaseLLMClient`: Abstract base class for all LLM backends
32
+ - `LLMClientFactory`: Factory pattern for backend creation
33
+ - `OllamaClient` (refactored): Now inherits from BaseLLMClient
34
+ - `LMStudioClient`: OpenAI-compatible local inference
35
+ - `GeminiClient`: Google Gemini API integration
36
+
37
+ ### 📚 New Examples
38
+
39
+ - `11_lmstudio_example.py`: Using LM Studio backend
40
+ - `12_gemini_example.py`: Using Google Gemini API
41
+ - `13_multi_backend_comparison.py`: Compare backend performance
42
+ - `14_auto_detect_backend.py`: Auto-detection feature
43
+
44
+ ### 📖 New Documentation
45
+
46
+ - `MULTI_BACKEND_GUIDE.md`: Comprehensive guide for multi-backend setup
47
+
48
+ ### 🔄 Changed
49
+
50
+ - **MemAgent**: Now supports multiple backends (backward compatible)
51
+ - **Examples**: All simplified for clarity
52
+ - **Package structure**: Better organized with `clients/` subdirectory
53
+
54
+ ### ⚡ Improved
55
+
56
+ - **Backward Compatibility**: All v1.2.0 code still works
57
+ - **Error Messages**: Backend-specific troubleshooting
58
+ - **Connection Checks**: Improved availability detection
59
+
60
+ ### 🧪 Testing
61
+
62
+ - 16+ new tests for multi-backend support
63
+ - Factory pattern tests
64
+ - Backend availability checks
65
+ - MemAgent integration tests
66
+
8
67
  ## [1.2.0] - 2025-10-21
9
68
 
10
69
  ### Added
@@ -1,13 +1,13 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: mem-llm
3
- Version: 1.2.0
4
- Summary: Memory-enabled AI assistant with local LLM support - Now with data import/export and multi-database support
3
+ Version: 1.3.0
4
+ Summary: Memory-enabled AI assistant with multi-backend LLM support (Ollama, LM Studio, Gemini) - Local and cloud ready
5
5
  Author-email: "C. Emre Karataş" <karatasqemre@gmail.com>
6
6
  License: MIT
7
7
  Project-URL: Homepage, https://github.com/emredeveloper/Mem-LLM
8
8
  Project-URL: Bug Reports, https://github.com/emredeveloper/Mem-LLM/issues
9
9
  Project-URL: Source, https://github.com/emredeveloper/Mem-LLM
10
- Keywords: llm,ai,memory,agent,chatbot,ollama,local
10
+ Keywords: llm,ai,memory,agent,chatbot,ollama,lmstudio,gemini,multi-backend,local
11
11
  Classifier: Development Status :: 4 - Beta
12
12
  Classifier: Intended Audience :: Developers
13
13
  Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
@@ -22,6 +22,7 @@ Description-Content-Type: text/markdown
22
22
  Requires-Dist: requests>=2.31.0
23
23
  Requires-Dist: pyyaml>=6.0.1
24
24
  Requires-Dist: click>=8.1.0
25
+ Requires-Dist: google-generativeai>=0.3.0
25
26
  Provides-Extra: dev
26
27
  Requires-Dist: pytest>=7.4.0; extra == "dev"
27
28
  Requires-Dist: pytest-cov>=4.1.0; extra == "dev"
@@ -5,7 +5,13 @@ AI library that remembers user interactions
5
5
 
6
6
  from .mem_agent import MemAgent
7
7
  from .memory_manager import MemoryManager
8
- from .llm_client import OllamaClient
8
+ from .llm_client import OllamaClient # Backward compatibility
9
+ from .base_llm_client import BaseLLMClient
10
+ from .llm_client_factory import LLMClientFactory
11
+
12
+ # New multi-backend support (v1.3.0+)
13
+ from .clients import OllamaClient as OllamaClientNew
14
+ from .clients import LMStudioClient, GeminiClient
9
15
 
10
16
  # Tools (optional)
11
17
  try:
@@ -57,9 +63,12 @@ try:
57
63
  except ImportError:
58
64
  __all_export_import__ = []
59
65
 
60
- __version__ = "1.2.0"
66
+ __version__ = "1.3.0"
61
67
  __author__ = "C. Emre Karataş"
62
68
 
69
+ # Multi-backend LLM support (v1.3.0+)
70
+ __all_llm_backends__ = ["BaseLLMClient", "LLMClientFactory", "OllamaClientNew", "LMStudioClient", "GeminiClient"]
71
+
63
72
  # CLI
64
73
  try:
65
74
  from .cli import cli
@@ -71,4 +80,4 @@ __all__ = [
71
80
  "MemAgent",
72
81
  "MemoryManager",
73
82
  "OllamaClient",
74
- ] + __all_tools__ + __all_pro__ + __all_cli__ + __all_security__ + __all_enhanced__ + __all_summarizer__ + __all_export_import__
83
+ ] + __all_llm_backends__ + __all_tools__ + __all_pro__ + __all_cli__ + __all_security__ + __all_enhanced__ + __all_summarizer__ + __all_export_import__
@@ -0,0 +1,175 @@
1
+ """
2
+ Base LLM Client Interface
3
+ ==========================
4
+
5
+ Abstract base class for all LLM client implementations.
6
+ Ensures consistent interface across different backends (Ollama, LM Studio, Gemini, etc.)
7
+
8
+ Author: C. Emre Karataş
9
+ Version: 1.3.0
10
+ """
11
+
12
+ from abc import ABC, abstractmethod
13
+ from typing import List, Dict, Optional, Any
14
+ import logging
15
+
16
+
17
+ class BaseLLMClient(ABC):
18
+ """
19
+ Abstract base class for LLM clients
20
+
21
+ All LLM backends must implement these methods to ensure
22
+ compatibility with MemAgent and other components.
23
+ """
24
+
25
+ def __init__(self, model: str = None, **kwargs):
26
+ """
27
+ Initialize LLM client
28
+
29
+ Args:
30
+ model: Model name/identifier
31
+ **kwargs: Backend-specific configuration
32
+ """
33
+ self.model = model
34
+ self.logger = logging.getLogger(self.__class__.__name__)
35
+
36
+ @abstractmethod
37
+ def chat(self, messages: List[Dict[str, str]],
38
+ temperature: float = 0.7,
39
+ max_tokens: int = 2000,
40
+ **kwargs) -> str:
41
+ """
42
+ Send chat request and return response
43
+
44
+ Args:
45
+ messages: List of messages in format:
46
+ [{"role": "system/user/assistant", "content": "..."}]
47
+ temperature: Sampling temperature (0.0-1.0)
48
+ max_tokens: Maximum tokens in response
49
+ **kwargs: Additional backend-specific parameters
50
+
51
+ Returns:
52
+ Model response text
53
+
54
+ Raises:
55
+ ConnectionError: If cannot connect to service
56
+ ValueError: If invalid parameters
57
+ """
58
+ pass
59
+
60
+ @abstractmethod
61
+ def check_connection(self) -> bool:
62
+ """
63
+ Check if LLM service is available and responding
64
+
65
+ Returns:
66
+ True if service is available, False otherwise
67
+ """
68
+ pass
69
+
70
+ def generate(self, prompt: str,
71
+ system_prompt: Optional[str] = None,
72
+ temperature: float = 0.7,
73
+ max_tokens: int = 500,
74
+ **kwargs) -> str:
75
+ """
76
+ Generate text from a simple prompt (convenience method)
77
+
78
+ Args:
79
+ prompt: User prompt
80
+ system_prompt: Optional system prompt
81
+ temperature: Sampling temperature
82
+ max_tokens: Maximum tokens
83
+ **kwargs: Additional parameters
84
+
85
+ Returns:
86
+ Generated text
87
+ """
88
+ # Convert to chat format
89
+ messages = []
90
+ if system_prompt:
91
+ messages.append({"role": "system", "content": system_prompt})
92
+ messages.append({"role": "user", "content": prompt})
93
+
94
+ return self.chat(messages, temperature, max_tokens, **kwargs)
95
+
96
+ def list_models(self) -> List[str]:
97
+ """
98
+ List available models (optional, not all backends support this)
99
+
100
+ Returns:
101
+ List of model names
102
+ """
103
+ return [self.model] if self.model else []
104
+
105
+ def _format_messages_to_text(self, messages: List[Dict]) -> str:
106
+ """
107
+ Helper: Convert message list to text format
108
+
109
+ Useful for backends that don't support chat format natively.
110
+
111
+ Args:
112
+ messages: Message list
113
+
114
+ Returns:
115
+ Formatted text prompt
116
+ """
117
+ result = []
118
+ for msg in messages:
119
+ role = msg.get('role', 'user').upper()
120
+ content = msg.get('content', '').strip()
121
+ if content:
122
+ result.append(f"{role}: {content}")
123
+ return "\n\n".join(result)
124
+
125
+ def _validate_messages(self, messages: List[Dict]) -> bool:
126
+ """
127
+ Validate message format
128
+
129
+ Args:
130
+ messages: Messages to validate
131
+
132
+ Returns:
133
+ True if valid
134
+
135
+ Raises:
136
+ ValueError: If invalid format
137
+ """
138
+ if not isinstance(messages, list):
139
+ raise ValueError("Messages must be a list")
140
+
141
+ if not messages:
142
+ raise ValueError("Messages list cannot be empty")
143
+
144
+ for i, msg in enumerate(messages):
145
+ if not isinstance(msg, dict):
146
+ raise ValueError(f"Message {i} must be a dictionary")
147
+
148
+ if 'role' not in msg:
149
+ raise ValueError(f"Message {i} missing 'role' field")
150
+
151
+ if 'content' not in msg:
152
+ raise ValueError(f"Message {i} missing 'content' field")
153
+
154
+ if msg['role'] not in ['system', 'user', 'assistant']:
155
+ raise ValueError(f"Message {i} has invalid role: {msg['role']}")
156
+
157
+ return True
158
+
159
+ def get_info(self) -> Dict[str, Any]:
160
+ """
161
+ Get client information
162
+
163
+ Returns:
164
+ Dictionary with client metadata
165
+ """
166
+ return {
167
+ 'backend': self.__class__.__name__,
168
+ 'model': self.model,
169
+ 'available': self.check_connection()
170
+ }
171
+
172
+ def __repr__(self) -> str:
173
+ """String representation"""
174
+ return f"{self.__class__.__name__}(model='{self.model}')"
175
+
@@ -0,0 +1,25 @@
1
+ """
2
+ LLM Clients Package
3
+ ===================
4
+
5
+ Multiple LLM backend support for Mem-LLM.
6
+
7
+ Available Backends:
8
+ - OllamaClient: Local Ollama service
9
+ - LMStudioClient: LM Studio (OpenAI-compatible)
10
+ - GeminiClient: Google Gemini API
11
+
12
+ Author: C. Emre Karataş
13
+ Version: 1.3.0
14
+ """
15
+
16
+ from .ollama_client import OllamaClient
17
+ from .lmstudio_client import LMStudioClient
18
+ from .gemini_client import GeminiClient
19
+
20
+ __all__ = [
21
+ 'OllamaClient',
22
+ 'LMStudioClient',
23
+ 'GeminiClient',
24
+ ]
25
+