mem0ai-azure-mysql 0.1.115.2__py3-none-any.whl → 0.1.116.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (56) hide show
  1. mem0/client/main.py +20 -17
  2. mem0/configs/llms/anthropic.py +56 -0
  3. mem0/configs/llms/aws_bedrock.py +191 -0
  4. mem0/configs/llms/azure.py +57 -0
  5. mem0/configs/llms/base.py +29 -119
  6. mem0/configs/llms/deepseek.py +56 -0
  7. mem0/configs/llms/lmstudio.py +59 -0
  8. mem0/configs/llms/ollama.py +56 -0
  9. mem0/configs/llms/openai.py +76 -0
  10. mem0/configs/llms/vllm.py +56 -0
  11. mem0/configs/vector_stores/databricks.py +63 -0
  12. mem0/configs/vector_stores/elasticsearch.py +18 -0
  13. mem0/configs/vector_stores/milvus.py +1 -0
  14. mem0/configs/vector_stores/pgvector.py +15 -2
  15. mem0/configs/vector_stores/pinecone.py +1 -0
  16. mem0/embeddings/azure_openai.py +0 -3
  17. mem0/embeddings/ollama.py +1 -1
  18. mem0/graphs/configs.py +26 -2
  19. mem0/graphs/neptune/main.py +1 -0
  20. mem0/graphs/tools.py +6 -6
  21. mem0/llms/anthropic.py +33 -10
  22. mem0/llms/aws_bedrock.py +484 -154
  23. mem0/llms/azure_openai.py +30 -19
  24. mem0/llms/azure_openai_structured.py +19 -4
  25. mem0/llms/base.py +105 -6
  26. mem0/llms/deepseek.py +31 -9
  27. mem0/llms/lmstudio.py +75 -14
  28. mem0/llms/ollama.py +44 -32
  29. mem0/llms/openai.py +39 -22
  30. mem0/llms/vllm.py +32 -14
  31. mem0/memory/base.py +2 -2
  32. mem0/memory/graph_memory.py +166 -54
  33. mem0/memory/kuzu_memory.py +710 -0
  34. mem0/memory/main.py +59 -37
  35. mem0/memory/memgraph_memory.py +43 -35
  36. mem0/memory/utils.py +51 -0
  37. mem0/proxy/main.py +5 -10
  38. mem0/utils/factory.py +132 -25
  39. mem0/vector_stores/azure_ai_search.py +0 -3
  40. mem0/vector_stores/chroma.py +27 -2
  41. mem0/vector_stores/configs.py +1 -0
  42. mem0/vector_stores/databricks.py +759 -0
  43. mem0/vector_stores/elasticsearch.py +2 -0
  44. mem0/vector_stores/langchain.py +3 -2
  45. mem0/vector_stores/milvus.py +3 -1
  46. mem0/vector_stores/mongodb.py +20 -1
  47. mem0/vector_stores/pgvector.py +83 -9
  48. mem0/vector_stores/pinecone.py +17 -8
  49. mem0/vector_stores/qdrant.py +30 -0
  50. mem0ai_azure_mysql-0.1.116.2.data/data/README.md +24 -0
  51. mem0ai_azure_mysql-0.1.116.2.dist-info/METADATA +88 -0
  52. {mem0ai_azure_mysql-0.1.115.2.dist-info → mem0ai_azure_mysql-0.1.116.2.dist-info}/RECORD +53 -43
  53. mem0ai_azure_mysql-0.1.115.2.data/data/README.md +0 -169
  54. mem0ai_azure_mysql-0.1.115.2.dist-info/METADATA +0 -224
  55. mem0ai_azure_mysql-0.1.115.2.dist-info/licenses/LICENSE +0 -201
  56. {mem0ai_azure_mysql-0.1.115.2.dist-info → mem0ai_azure_mysql-0.1.116.2.dist-info}/WHEEL +0 -0
mem0/client/main.py CHANGED
@@ -267,10 +267,14 @@ class MemoryClient:
267
267
  Update a memory by ID.
268
268
  Args:
269
269
  memory_id (str): Memory ID.
270
- text (str, optional): Data to update in the memory.
270
+ text (str, optional): New content to update the memory with.
271
271
  metadata (dict, optional): Metadata to update in the memory.
272
+
272
273
  Returns:
273
274
  Dict[str, Any]: The response from the server.
275
+
276
+ Example:
277
+ >>> client.update(memory_id="mem_123", text="Likes to play tennis on weekends")
274
278
  """
275
279
  if text is None and metadata is None:
276
280
  raise ValueError("Either text or metadata must be provided for update.")
@@ -447,16 +451,13 @@ class MemoryClient:
447
451
  """Batch update memories.
448
452
 
449
453
  Args:
450
- memories: List of memory dictionaries to update. Each dictionary
451
- must contain:
454
+ memories: List of memory dictionaries to update. Each dictionary must contain:
452
455
  - memory_id (str): ID of the memory to update
453
- - text (str): New text content for the memory
456
+ - text (str, optional): New text content for the memory
457
+ - metadata (dict, optional): New metadata for the memory
454
458
 
455
459
  Returns:
456
- str: Message indicating the success of the batch update.
457
-
458
- Raises:
459
- APIError: If the API request fails.
460
+ Dict[str, Any]: The response from the server.
460
461
  """
461
462
  response = self.client.put("/v1/batch/", json={"memories": memories})
462
463
  response.raise_for_status()
@@ -1057,13 +1058,18 @@ class AsyncMemoryClient:
1057
1058
  self, memory_id: str, text: Optional[str] = None, metadata: Optional[Dict[str, Any]] = None
1058
1059
  ) -> Dict[str, Any]:
1059
1060
  """
1060
- Update a memory by ID.
1061
+ Update a memory by ID asynchronously.
1062
+
1061
1063
  Args:
1062
1064
  memory_id (str): Memory ID.
1063
- text (str, optional): Data to update in the memory.
1065
+ text (str, optional): New content to update the memory with.
1064
1066
  metadata (dict, optional): Metadata to update in the memory.
1067
+
1065
1068
  Returns:
1066
1069
  Dict[str, Any]: The response from the server.
1070
+
1071
+ Example:
1072
+ >>> await client.update(memory_id="mem_123", text="Likes to play tennis on weekends")
1067
1073
  """
1068
1074
  if text is None and metadata is None:
1069
1075
  raise ValueError("Either text or metadata must be provided for update.")
@@ -1232,16 +1238,13 @@ class AsyncMemoryClient:
1232
1238
  """Batch update memories.
1233
1239
 
1234
1240
  Args:
1235
- memories: List of memory dictionaries to update. Each dictionary
1236
- must contain:
1241
+ memories: List of memory dictionaries to update. Each dictionary must contain:
1237
1242
  - memory_id (str): ID of the memory to update
1238
- - text (str): New text content for the memory
1243
+ - text (str, optional): New text content for the memory
1244
+ - metadata (dict, optional): New metadata for the memory
1239
1245
 
1240
1246
  Returns:
1241
- str: Message indicating the success of the batch update.
1242
-
1243
- Raises:
1244
- APIError: If the API request fails.
1247
+ Dict[str, Any]: The response from the server.
1245
1248
  """
1246
1249
  response = await self.async_client.put("/v1/batch/", json={"memories": memories})
1247
1250
  response.raise_for_status()
@@ -0,0 +1,56 @@
1
+ from typing import Optional
2
+
3
+ from mem0.configs.llms.base import BaseLlmConfig
4
+
5
+
6
+ class AnthropicConfig(BaseLlmConfig):
7
+ """
8
+ Configuration class for Anthropic-specific parameters.
9
+ Inherits from BaseLlmConfig and adds Anthropic-specific settings.
10
+ """
11
+
12
+ def __init__(
13
+ self,
14
+ # Base parameters
15
+ model: Optional[str] = None,
16
+ temperature: float = 0.1,
17
+ api_key: Optional[str] = None,
18
+ max_tokens: int = 2000,
19
+ top_p: float = 0.1,
20
+ top_k: int = 1,
21
+ enable_vision: bool = False,
22
+ vision_details: Optional[str] = "auto",
23
+ http_client_proxies: Optional[dict] = None,
24
+ # Anthropic-specific parameters
25
+ anthropic_base_url: Optional[str] = None,
26
+ ):
27
+ """
28
+ Initialize Anthropic configuration.
29
+
30
+ Args:
31
+ model: Anthropic model to use, defaults to None
32
+ temperature: Controls randomness, defaults to 0.1
33
+ api_key: Anthropic API key, defaults to None
34
+ max_tokens: Maximum tokens to generate, defaults to 2000
35
+ top_p: Nucleus sampling parameter, defaults to 0.1
36
+ top_k: Top-k sampling parameter, defaults to 1
37
+ enable_vision: Enable vision capabilities, defaults to False
38
+ vision_details: Vision detail level, defaults to "auto"
39
+ http_client_proxies: HTTP client proxy settings, defaults to None
40
+ anthropic_base_url: Anthropic API base URL, defaults to None
41
+ """
42
+ # Initialize base parameters
43
+ super().__init__(
44
+ model=model,
45
+ temperature=temperature,
46
+ api_key=api_key,
47
+ max_tokens=max_tokens,
48
+ top_p=top_p,
49
+ top_k=top_k,
50
+ enable_vision=enable_vision,
51
+ vision_details=vision_details,
52
+ http_client_proxies=http_client_proxies,
53
+ )
54
+
55
+ # Anthropic-specific parameters
56
+ self.anthropic_base_url = anthropic_base_url
@@ -0,0 +1,191 @@
1
+ from typing import Optional, Dict, Any, List
2
+ from mem0.configs.llms.base import BaseLlmConfig
3
+ import os
4
+
5
+
6
+ class AWSBedrockConfig(BaseLlmConfig):
7
+ """
8
+ Configuration class for AWS Bedrock LLM integration.
9
+
10
+ Supports all available Bedrock models with automatic provider detection.
11
+ """
12
+
13
+ def __init__(
14
+ self,
15
+ model: Optional[str] = None,
16
+ temperature: float = 0.1,
17
+ max_tokens: int = 2000,
18
+ top_p: float = 0.9,
19
+ top_k: int = 1,
20
+ aws_access_key_id: Optional[str] = None,
21
+ aws_secret_access_key: Optional[str] = None,
22
+ aws_region: str = "us-west-2",
23
+ aws_session_token: Optional[str] = None,
24
+ aws_profile: Optional[str] = None,
25
+ model_kwargs: Optional[Dict[str, Any]] = None,
26
+ **kwargs,
27
+ ):
28
+ """
29
+ Initialize AWS Bedrock configuration.
30
+
31
+ Args:
32
+ model: Bedrock model identifier (e.g., "amazon.nova-3-mini-20241119-v1:0")
33
+ temperature: Controls randomness (0.0 to 2.0)
34
+ max_tokens: Maximum tokens to generate
35
+ top_p: Nucleus sampling parameter (0.0 to 1.0)
36
+ top_k: Top-k sampling parameter (1 to 40)
37
+ aws_access_key_id: AWS access key (optional, uses env vars if not provided)
38
+ aws_secret_access_key: AWS secret key (optional, uses env vars if not provided)
39
+ aws_region: AWS region for Bedrock service
40
+ aws_session_token: AWS session token for temporary credentials
41
+ aws_profile: AWS profile name for credentials
42
+ model_kwargs: Additional model-specific parameters
43
+ **kwargs: Additional arguments passed to base class
44
+ """
45
+ super().__init__(
46
+ model=model or "anthropic.claude-3-5-sonnet-20240620-v1:0",
47
+ temperature=temperature,
48
+ max_tokens=max_tokens,
49
+ top_p=top_p,
50
+ top_k=top_k,
51
+ **kwargs,
52
+ )
53
+
54
+ self.aws_access_key_id = aws_access_key_id
55
+ self.aws_secret_access_key = aws_secret_access_key
56
+ self.aws_region = aws_region
57
+ self.aws_session_token = aws_session_token
58
+ self.aws_profile = aws_profile
59
+ self.model_kwargs = model_kwargs or {}
60
+
61
+ @property
62
+ def provider(self) -> str:
63
+ """Get the provider from the model identifier."""
64
+ if not self.model or "." not in self.model:
65
+ return "unknown"
66
+ return self.model.split(".")[0]
67
+
68
+ @property
69
+ def model_name(self) -> str:
70
+ """Get the model name without provider prefix."""
71
+ if not self.model or "." not in self.model:
72
+ return self.model
73
+ return ".".join(self.model.split(".")[1:])
74
+
75
+ def get_model_config(self) -> Dict[str, Any]:
76
+ """Get model-specific configuration parameters."""
77
+ base_config = {
78
+ "temperature": self.temperature,
79
+ "max_tokens": self.max_tokens,
80
+ "top_p": self.top_p,
81
+ "top_k": self.top_k,
82
+ }
83
+
84
+ # Add custom model kwargs
85
+ base_config.update(self.model_kwargs)
86
+
87
+ return base_config
88
+
89
+ def get_aws_config(self) -> Dict[str, Any]:
90
+ """Get AWS configuration parameters."""
91
+ config = {
92
+ "region_name": self.aws_region,
93
+ }
94
+
95
+ if self.aws_access_key_id:
96
+ config["aws_access_key_id"] = self.aws_access_key_id or os.getenv("AWS_ACCESS_KEY_ID")
97
+
98
+ if self.aws_secret_access_key:
99
+ config["aws_secret_access_key"] = self.aws_secret_access_key or os.getenv("AWS_SECRET_ACCESS_KEY")
100
+
101
+ if self.aws_session_token:
102
+ config["aws_session_token"] = self.aws_session_token or os.getenv("AWS_SESSION_TOKEN")
103
+
104
+ if self.aws_profile:
105
+ config["profile_name"] = self.aws_profile or os.getenv("AWS_PROFILE")
106
+
107
+ return config
108
+
109
+ def validate_model_format(self) -> bool:
110
+ """
111
+ Validate that the model identifier follows Bedrock naming convention.
112
+
113
+ Returns:
114
+ True if valid, False otherwise
115
+ """
116
+ if not self.model:
117
+ return False
118
+
119
+ # Check if model follows provider.model-name format
120
+ if "." not in self.model:
121
+ return False
122
+
123
+ provider, model_name = self.model.split(".", 1)
124
+
125
+ # Validate provider
126
+ valid_providers = [
127
+ "ai21", "amazon", "anthropic", "cohere", "meta", "mistral",
128
+ "stability", "writer", "deepseek", "gpt-oss", "perplexity",
129
+ "snowflake", "titan", "command", "j2", "llama"
130
+ ]
131
+
132
+ if provider not in valid_providers:
133
+ return False
134
+
135
+ # Validate model name is not empty
136
+ if not model_name:
137
+ return False
138
+
139
+ return True
140
+
141
+ def get_supported_regions(self) -> List[str]:
142
+ """Get list of AWS regions that support Bedrock."""
143
+ return [
144
+ "us-east-1",
145
+ "us-west-2",
146
+ "us-east-2",
147
+ "eu-west-1",
148
+ "ap-southeast-1",
149
+ "ap-northeast-1",
150
+ ]
151
+
152
+ def get_model_capabilities(self) -> Dict[str, Any]:
153
+ """Get model capabilities based on provider."""
154
+ capabilities = {
155
+ "supports_tools": False,
156
+ "supports_vision": False,
157
+ "supports_streaming": False,
158
+ "supports_multimodal": False,
159
+ }
160
+
161
+ if self.provider == "anthropic":
162
+ capabilities.update({
163
+ "supports_tools": True,
164
+ "supports_vision": True,
165
+ "supports_streaming": True,
166
+ "supports_multimodal": True,
167
+ })
168
+ elif self.provider == "amazon":
169
+ capabilities.update({
170
+ "supports_tools": True,
171
+ "supports_vision": True,
172
+ "supports_streaming": True,
173
+ "supports_multimodal": True,
174
+ })
175
+ elif self.provider == "cohere":
176
+ capabilities.update({
177
+ "supports_tools": True,
178
+ "supports_streaming": True,
179
+ })
180
+ elif self.provider == "meta":
181
+ capabilities.update({
182
+ "supports_vision": True,
183
+ "supports_streaming": True,
184
+ })
185
+ elif self.provider == "mistral":
186
+ capabilities.update({
187
+ "supports_vision": True,
188
+ "supports_streaming": True,
189
+ })
190
+
191
+ return capabilities
@@ -0,0 +1,57 @@
1
+ from typing import Any, Dict, Optional
2
+
3
+ from mem0.configs.base import AzureConfig
4
+ from mem0.configs.llms.base import BaseLlmConfig
5
+
6
+
7
+ class AzureOpenAIConfig(BaseLlmConfig):
8
+ """
9
+ Configuration class for Azure OpenAI-specific parameters.
10
+ Inherits from BaseLlmConfig and adds Azure OpenAI-specific settings.
11
+ """
12
+
13
+ def __init__(
14
+ self,
15
+ # Base parameters
16
+ model: Optional[str] = None,
17
+ temperature: float = 0.1,
18
+ api_key: Optional[str] = None,
19
+ max_tokens: int = 2000,
20
+ top_p: float = 0.1,
21
+ top_k: int = 1,
22
+ enable_vision: bool = False,
23
+ vision_details: Optional[str] = "auto",
24
+ http_client_proxies: Optional[dict] = None,
25
+ # Azure OpenAI-specific parameters
26
+ azure_kwargs: Optional[Dict[str, Any]] = None,
27
+ ):
28
+ """
29
+ Initialize Azure OpenAI configuration.
30
+
31
+ Args:
32
+ model: Azure OpenAI model to use, defaults to None
33
+ temperature: Controls randomness, defaults to 0.1
34
+ api_key: Azure OpenAI API key, defaults to None
35
+ max_tokens: Maximum tokens to generate, defaults to 2000
36
+ top_p: Nucleus sampling parameter, defaults to 0.1
37
+ top_k: Top-k sampling parameter, defaults to 1
38
+ enable_vision: Enable vision capabilities, defaults to False
39
+ vision_details: Vision detail level, defaults to "auto"
40
+ http_client_proxies: HTTP client proxy settings, defaults to None
41
+ azure_kwargs: Azure-specific configuration, defaults to None
42
+ """
43
+ # Initialize base parameters
44
+ super().__init__(
45
+ model=model,
46
+ temperature=temperature,
47
+ api_key=api_key,
48
+ max_tokens=max_tokens,
49
+ top_p=top_p,
50
+ top_k=top_k,
51
+ enable_vision=enable_vision,
52
+ vision_details=vision_details,
53
+ http_client_proxies=http_client_proxies,
54
+ )
55
+
56
+ # Azure OpenAI-specific parameters
57
+ self.azure_kwargs = AzureConfig(**(azure_kwargs or {}))
mem0/configs/llms/base.py CHANGED
@@ -3,12 +3,14 @@ from typing import Dict, Optional, Union
3
3
 
4
4
  import httpx
5
5
 
6
- from mem0.configs.base import AzureConfig
7
-
8
6
 
9
7
  class BaseLlmConfig(ABC):
10
8
  """
11
- Config for LLMs.
9
+ Base configuration for LLMs with only common parameters.
10
+ Provider-specific configurations should be handled by separate config classes.
11
+
12
+ This class contains only the parameters that are common across all LLM providers.
13
+ For provider-specific parameters, use the appropriate provider config class.
12
14
  """
13
15
 
14
16
  def __init__(
@@ -21,89 +23,34 @@ class BaseLlmConfig(ABC):
21
23
  top_k: int = 1,
22
24
  enable_vision: bool = False,
23
25
  vision_details: Optional[str] = "auto",
24
- # Openrouter specific
25
- models: Optional[list[str]] = None,
26
- route: Optional[str] = "fallback",
27
- openrouter_base_url: Optional[str] = None,
28
- # Openai specific
29
- openai_base_url: Optional[str] = None,
30
- site_url: Optional[str] = None,
31
- app_name: Optional[str] = None,
32
- # Ollama specific
33
- ollama_base_url: Optional[str] = None,
34
- # AzureOpenAI specific
35
- azure_kwargs: Optional[AzureConfig] = {},
36
- # AzureOpenAI specific
37
26
  http_client_proxies: Optional[Union[Dict, str]] = None,
38
- # DeepSeek specific
39
- deepseek_base_url: Optional[str] = None,
40
- # XAI specific
41
- xai_base_url: Optional[str] = None,
42
- # Sarvam specific
43
- sarvam_base_url: Optional[str] = "https://api.sarvam.ai/v1",
44
- # LM Studio specific
45
- lmstudio_base_url: Optional[str] = "http://localhost:1234/v1",
46
- lmstudio_response_format: dict = None,
47
- # vLLM specific
48
- vllm_base_url: Optional[str] = "http://localhost:8000/v1",
49
- # AWS Bedrock specific
50
- aws_access_key_id: Optional[str] = None,
51
- aws_secret_access_key: Optional[str] = None,
52
- aws_region: Optional[str] = "us-west-2",
53
27
  ):
54
28
  """
55
- Initializes a configuration class instance for the LLM.
56
-
57
- :param model: Controls the OpenAI model used, defaults to None
58
- :type model: Optional[str], optional
59
- :param temperature: Controls the randomness of the model's output.
60
- Higher values (closer to 1) make output more random, lower values make it more deterministic, defaults to 0
61
- :type temperature: float, optional
62
- :param api_key: OpenAI API key to be use, defaults to None
63
- :type api_key: Optional[str], optional
64
- :param max_tokens: Controls how many tokens are generated, defaults to 2000
65
- :type max_tokens: int, optional
66
- :param top_p: Controls the diversity of words. Higher values (closer to 1) make word selection more diverse,
67
- defaults to 1
68
- :type top_p: float, optional
69
- :param top_k: Controls the diversity of words. Higher values make word selection more diverse, defaults to 0
70
- :type top_k: int, optional
71
- :param enable_vision: Enable vision for the LLM, defaults to False
72
- :type enable_vision: bool, optional
73
- :param vision_details: Details of the vision to be used [low, high, auto], defaults to "auto"
74
- :type vision_details: Optional[str], optional
75
- :param models: Openrouter models to use, defaults to None
76
- :type models: Optional[list[str]], optional
77
- :param route: Openrouter route to be used, defaults to "fallback"
78
- :type route: Optional[str], optional
79
- :param openrouter_base_url: Openrouter base URL to be use, defaults to "https://openrouter.ai/api/v1"
80
- :type openrouter_base_url: Optional[str], optional
81
- :param site_url: Openrouter site URL to use, defaults to None
82
- :type site_url: Optional[str], optional
83
- :param app_name: Openrouter app name to use, defaults to None
84
- :type app_name: Optional[str], optional
85
- :param ollama_base_url: The base URL of the LLM, defaults to None
86
- :type ollama_base_url: Optional[str], optional
87
- :param openai_base_url: Openai base URL to be use, defaults to "https://api.openai.com/v1"
88
- :type openai_base_url: Optional[str], optional
89
- :param azure_kwargs: key-value arguments for the AzureOpenAI LLM model, defaults a dict inside init
90
- :type azure_kwargs: Optional[Dict[str, Any]], defaults a dict inside init
91
- :param http_client_proxies: The proxy server(s) settings used to create self.http_client, defaults to None
92
- :type http_client_proxies: Optional[Dict | str], optional
93
- :param deepseek_base_url: DeepSeek base URL to be use, defaults to None
94
- :type deepseek_base_url: Optional[str], optional
95
- :param xai_base_url: XAI base URL to be use, defaults to None
96
- :type xai_base_url: Optional[str], optional
97
- :param sarvam_base_url: Sarvam base URL to be use, defaults to "https://api.sarvam.ai/v1"
98
- :type sarvam_base_url: Optional[str], optional
99
- :param lmstudio_base_url: LM Studio base URL to be use, defaults to "http://localhost:1234/v1"
100
- :type lmstudio_base_url: Optional[str], optional
101
- :param lmstudio_response_format: LM Studio response format to be use, defaults to None
102
- :type lmstudio_response_format: Optional[Dict], optional
103
- :param vllm_base_url: vLLM base URL to be use, defaults to "http://localhost:8000/v1"
104
- :type vllm_base_url: Optional[str], optional
29
+ Initialize a base configuration class instance for the LLM.
30
+
31
+ Args:
32
+ model: The model identifier to use (e.g., "gpt-4o-mini", "claude-3-5-sonnet-20240620")
33
+ Defaults to None (will be set by provider-specific configs)
34
+ temperature: Controls the randomness of the model's output.
35
+ Higher values (closer to 1) make output more random, lower values make it more deterministic.
36
+ Range: 0.0 to 2.0. Defaults to 0.1
37
+ api_key: API key for the LLM provider. If None, will try to get from environment variables.
38
+ Defaults to None
39
+ max_tokens: Maximum number of tokens to generate in the response.
40
+ Range: 1 to 4096 (varies by model). Defaults to 2000
41
+ top_p: Nucleus sampling parameter. Controls diversity via nucleus sampling.
42
+ Higher values (closer to 1) make word selection more diverse.
43
+ Range: 0.0 to 1.0. Defaults to 0.1
44
+ top_k: Top-k sampling parameter. Limits the number of tokens considered for each step.
45
+ Higher values make word selection more diverse.
46
+ Range: 1 to 40. Defaults to 1
47
+ enable_vision: Whether to enable vision capabilities for the model.
48
+ Only applicable to vision-enabled models. Defaults to False
49
+ vision_details: Level of detail for vision processing.
50
+ Options: "low", "high", "auto". Defaults to "auto"
51
+ http_client_proxies: Proxy settings for HTTP client.
52
+ Can be a dict or string. Defaults to None
105
53
  """
106
-
107
54
  self.model = model
108
55
  self.temperature = temperature
109
56
  self.api_key = api_key
@@ -112,41 +59,4 @@ class BaseLlmConfig(ABC):
112
59
  self.top_k = top_k
113
60
  self.enable_vision = enable_vision
114
61
  self.vision_details = vision_details
115
-
116
- # AzureOpenAI specific
117
62
  self.http_client = httpx.Client(proxies=http_client_proxies) if http_client_proxies else None
118
-
119
- # Openrouter specific
120
- self.models = models
121
- self.route = route
122
- self.openrouter_base_url = openrouter_base_url
123
- self.openai_base_url = openai_base_url
124
- self.site_url = site_url
125
- self.app_name = app_name
126
-
127
- # Ollama specific
128
- self.ollama_base_url = ollama_base_url
129
-
130
- # DeepSeek specific
131
- self.deepseek_base_url = deepseek_base_url
132
-
133
- # AzureOpenAI specific
134
- self.azure_kwargs = AzureConfig(**azure_kwargs) or {}
135
-
136
- # XAI specific
137
- self.xai_base_url = xai_base_url
138
-
139
- # Sarvam specific
140
- self.sarvam_base_url = sarvam_base_url
141
-
142
- # LM Studio specific
143
- self.lmstudio_base_url = lmstudio_base_url
144
- self.lmstudio_response_format = lmstudio_response_format
145
-
146
- # vLLM specific
147
- self.vllm_base_url = vllm_base_url
148
-
149
- # AWS Bedrock specific
150
- self.aws_access_key_id = aws_access_key_id
151
- self.aws_secret_access_key = aws_secret_access_key
152
- self.aws_region = aws_region
@@ -0,0 +1,56 @@
1
+ from typing import Optional
2
+
3
+ from mem0.configs.llms.base import BaseLlmConfig
4
+
5
+
6
+ class DeepSeekConfig(BaseLlmConfig):
7
+ """
8
+ Configuration class for DeepSeek-specific parameters.
9
+ Inherits from BaseLlmConfig and adds DeepSeek-specific settings.
10
+ """
11
+
12
+ def __init__(
13
+ self,
14
+ # Base parameters
15
+ model: Optional[str] = None,
16
+ temperature: float = 0.1,
17
+ api_key: Optional[str] = None,
18
+ max_tokens: int = 2000,
19
+ top_p: float = 0.1,
20
+ top_k: int = 1,
21
+ enable_vision: bool = False,
22
+ vision_details: Optional[str] = "auto",
23
+ http_client_proxies: Optional[dict] = None,
24
+ # DeepSeek-specific parameters
25
+ deepseek_base_url: Optional[str] = None,
26
+ ):
27
+ """
28
+ Initialize DeepSeek configuration.
29
+
30
+ Args:
31
+ model: DeepSeek model to use, defaults to None
32
+ temperature: Controls randomness, defaults to 0.1
33
+ api_key: DeepSeek API key, defaults to None
34
+ max_tokens: Maximum tokens to generate, defaults to 2000
35
+ top_p: Nucleus sampling parameter, defaults to 0.1
36
+ top_k: Top-k sampling parameter, defaults to 1
37
+ enable_vision: Enable vision capabilities, defaults to False
38
+ vision_details: Vision detail level, defaults to "auto"
39
+ http_client_proxies: HTTP client proxy settings, defaults to None
40
+ deepseek_base_url: DeepSeek API base URL, defaults to None
41
+ """
42
+ # Initialize base parameters
43
+ super().__init__(
44
+ model=model,
45
+ temperature=temperature,
46
+ api_key=api_key,
47
+ max_tokens=max_tokens,
48
+ top_p=top_p,
49
+ top_k=top_k,
50
+ enable_vision=enable_vision,
51
+ vision_details=vision_details,
52
+ http_client_proxies=http_client_proxies,
53
+ )
54
+
55
+ # DeepSeek-specific parameters
56
+ self.deepseek_base_url = deepseek_base_url
@@ -0,0 +1,59 @@
1
+ from typing import Any, Dict, Optional
2
+
3
+ from mem0.configs.llms.base import BaseLlmConfig
4
+
5
+
6
+ class LMStudioConfig(BaseLlmConfig):
7
+ """
8
+ Configuration class for LM Studio-specific parameters.
9
+ Inherits from BaseLlmConfig and adds LM Studio-specific settings.
10
+ """
11
+
12
+ def __init__(
13
+ self,
14
+ # Base parameters
15
+ model: Optional[str] = None,
16
+ temperature: float = 0.1,
17
+ api_key: Optional[str] = None,
18
+ max_tokens: int = 2000,
19
+ top_p: float = 0.1,
20
+ top_k: int = 1,
21
+ enable_vision: bool = False,
22
+ vision_details: Optional[str] = "auto",
23
+ http_client_proxies: Optional[dict] = None,
24
+ # LM Studio-specific parameters
25
+ lmstudio_base_url: Optional[str] = None,
26
+ lmstudio_response_format: Optional[Dict[str, Any]] = None,
27
+ ):
28
+ """
29
+ Initialize LM Studio configuration.
30
+
31
+ Args:
32
+ model: LM Studio model to use, defaults to None
33
+ temperature: Controls randomness, defaults to 0.1
34
+ api_key: LM Studio API key, defaults to None
35
+ max_tokens: Maximum tokens to generate, defaults to 2000
36
+ top_p: Nucleus sampling parameter, defaults to 0.1
37
+ top_k: Top-k sampling parameter, defaults to 1
38
+ enable_vision: Enable vision capabilities, defaults to False
39
+ vision_details: Vision detail level, defaults to "auto"
40
+ http_client_proxies: HTTP client proxy settings, defaults to None
41
+ lmstudio_base_url: LM Studio base URL, defaults to None
42
+ lmstudio_response_format: LM Studio response format, defaults to None
43
+ """
44
+ # Initialize base parameters
45
+ super().__init__(
46
+ model=model,
47
+ temperature=temperature,
48
+ api_key=api_key,
49
+ max_tokens=max_tokens,
50
+ top_p=top_p,
51
+ top_k=top_k,
52
+ enable_vision=enable_vision,
53
+ vision_details=vision_details,
54
+ http_client_proxies=http_client_proxies,
55
+ )
56
+
57
+ # LM Studio-specific parameters
58
+ self.lmstudio_base_url = lmstudio_base_url or "http://localhost:1234/v1"
59
+ self.lmstudio_response_format = lmstudio_response_format