mem0ai-azure-mysql 0.1.115__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (116) hide show
  1. mem0/__init__.py +6 -0
  2. mem0/client/__init__.py +0 -0
  3. mem0/client/main.py +1535 -0
  4. mem0/client/project.py +860 -0
  5. mem0/client/utils.py +29 -0
  6. mem0/configs/__init__.py +0 -0
  7. mem0/configs/base.py +90 -0
  8. mem0/configs/dbs/__init__.py +4 -0
  9. mem0/configs/dbs/base.py +41 -0
  10. mem0/configs/dbs/mysql.py +25 -0
  11. mem0/configs/embeddings/__init__.py +0 -0
  12. mem0/configs/embeddings/base.py +108 -0
  13. mem0/configs/enums.py +7 -0
  14. mem0/configs/llms/__init__.py +0 -0
  15. mem0/configs/llms/base.py +152 -0
  16. mem0/configs/prompts.py +333 -0
  17. mem0/configs/vector_stores/__init__.py +0 -0
  18. mem0/configs/vector_stores/azure_ai_search.py +59 -0
  19. mem0/configs/vector_stores/baidu.py +29 -0
  20. mem0/configs/vector_stores/chroma.py +40 -0
  21. mem0/configs/vector_stores/elasticsearch.py +47 -0
  22. mem0/configs/vector_stores/faiss.py +39 -0
  23. mem0/configs/vector_stores/langchain.py +32 -0
  24. mem0/configs/vector_stores/milvus.py +43 -0
  25. mem0/configs/vector_stores/mongodb.py +25 -0
  26. mem0/configs/vector_stores/opensearch.py +41 -0
  27. mem0/configs/vector_stores/pgvector.py +37 -0
  28. mem0/configs/vector_stores/pinecone.py +56 -0
  29. mem0/configs/vector_stores/qdrant.py +49 -0
  30. mem0/configs/vector_stores/redis.py +26 -0
  31. mem0/configs/vector_stores/supabase.py +44 -0
  32. mem0/configs/vector_stores/upstash_vector.py +36 -0
  33. mem0/configs/vector_stores/vertex_ai_vector_search.py +27 -0
  34. mem0/configs/vector_stores/weaviate.py +43 -0
  35. mem0/dbs/__init__.py +4 -0
  36. mem0/dbs/base.py +68 -0
  37. mem0/dbs/configs.py +21 -0
  38. mem0/dbs/mysql.py +321 -0
  39. mem0/embeddings/__init__.py +0 -0
  40. mem0/embeddings/aws_bedrock.py +100 -0
  41. mem0/embeddings/azure_openai.py +43 -0
  42. mem0/embeddings/base.py +31 -0
  43. mem0/embeddings/configs.py +30 -0
  44. mem0/embeddings/gemini.py +39 -0
  45. mem0/embeddings/huggingface.py +41 -0
  46. mem0/embeddings/langchain.py +35 -0
  47. mem0/embeddings/lmstudio.py +29 -0
  48. mem0/embeddings/mock.py +11 -0
  49. mem0/embeddings/ollama.py +53 -0
  50. mem0/embeddings/openai.py +49 -0
  51. mem0/embeddings/together.py +31 -0
  52. mem0/embeddings/vertexai.py +54 -0
  53. mem0/graphs/__init__.py +0 -0
  54. mem0/graphs/configs.py +96 -0
  55. mem0/graphs/neptune/__init__.py +0 -0
  56. mem0/graphs/neptune/base.py +410 -0
  57. mem0/graphs/neptune/main.py +372 -0
  58. mem0/graphs/tools.py +371 -0
  59. mem0/graphs/utils.py +97 -0
  60. mem0/llms/__init__.py +0 -0
  61. mem0/llms/anthropic.py +64 -0
  62. mem0/llms/aws_bedrock.py +270 -0
  63. mem0/llms/azure_openai.py +114 -0
  64. mem0/llms/azure_openai_structured.py +76 -0
  65. mem0/llms/base.py +32 -0
  66. mem0/llms/configs.py +34 -0
  67. mem0/llms/deepseek.py +85 -0
  68. mem0/llms/gemini.py +201 -0
  69. mem0/llms/groq.py +88 -0
  70. mem0/llms/langchain.py +65 -0
  71. mem0/llms/litellm.py +87 -0
  72. mem0/llms/lmstudio.py +53 -0
  73. mem0/llms/ollama.py +94 -0
  74. mem0/llms/openai.py +124 -0
  75. mem0/llms/openai_structured.py +52 -0
  76. mem0/llms/sarvam.py +89 -0
  77. mem0/llms/together.py +88 -0
  78. mem0/llms/vllm.py +89 -0
  79. mem0/llms/xai.py +52 -0
  80. mem0/memory/__init__.py +0 -0
  81. mem0/memory/base.py +63 -0
  82. mem0/memory/graph_memory.py +632 -0
  83. mem0/memory/main.py +1843 -0
  84. mem0/memory/memgraph_memory.py +630 -0
  85. mem0/memory/setup.py +56 -0
  86. mem0/memory/storage.py +218 -0
  87. mem0/memory/telemetry.py +90 -0
  88. mem0/memory/utils.py +133 -0
  89. mem0/proxy/__init__.py +0 -0
  90. mem0/proxy/main.py +194 -0
  91. mem0/utils/factory.py +132 -0
  92. mem0/vector_stores/__init__.py +0 -0
  93. mem0/vector_stores/azure_ai_search.py +383 -0
  94. mem0/vector_stores/baidu.py +368 -0
  95. mem0/vector_stores/base.py +58 -0
  96. mem0/vector_stores/chroma.py +229 -0
  97. mem0/vector_stores/configs.py +60 -0
  98. mem0/vector_stores/elasticsearch.py +235 -0
  99. mem0/vector_stores/faiss.py +473 -0
  100. mem0/vector_stores/langchain.py +179 -0
  101. mem0/vector_stores/milvus.py +245 -0
  102. mem0/vector_stores/mongodb.py +293 -0
  103. mem0/vector_stores/opensearch.py +281 -0
  104. mem0/vector_stores/pgvector.py +294 -0
  105. mem0/vector_stores/pinecone.py +373 -0
  106. mem0/vector_stores/qdrant.py +240 -0
  107. mem0/vector_stores/redis.py +295 -0
  108. mem0/vector_stores/supabase.py +237 -0
  109. mem0/vector_stores/upstash_vector.py +293 -0
  110. mem0/vector_stores/vertex_ai_vector_search.py +629 -0
  111. mem0/vector_stores/weaviate.py +316 -0
  112. mem0ai_azure_mysql-0.1.115.data/data/README.md +169 -0
  113. mem0ai_azure_mysql-0.1.115.dist-info/METADATA +224 -0
  114. mem0ai_azure_mysql-0.1.115.dist-info/RECORD +116 -0
  115. mem0ai_azure_mysql-0.1.115.dist-info/WHEEL +4 -0
  116. mem0ai_azure_mysql-0.1.115.dist-info/licenses/LICENSE +201 -0
@@ -0,0 +1,270 @@
1
+ import json
2
+ import os
3
+ import re
4
+ from typing import Any, Dict, List, Optional
5
+
6
+ try:
7
+ import boto3
8
+ except ImportError:
9
+ raise ImportError("The 'boto3' library is required. Please install it using 'pip install boto3'.")
10
+
11
+ from mem0.configs.llms.base import BaseLlmConfig
12
+ from mem0.llms.base import LLMBase
13
+
14
+ PROVIDERS = ["ai21", "amazon", "anthropic", "cohere", "meta", "mistral", "stability", "writer"]
15
+
16
+
17
+ def extract_provider(model: str) -> str:
18
+ for provider in PROVIDERS:
19
+ if re.search(rf"\b{re.escape(provider)}\b", model):
20
+ return provider
21
+ raise ValueError(f"Unknown provider in model: {model}")
22
+
23
+
24
+ class AWSBedrockLLM(LLMBase):
25
+ def __init__(self, config: Optional[BaseLlmConfig] = None):
26
+ super().__init__(config)
27
+
28
+ if not self.config.model:
29
+ self.config.model = "anthropic.claude-3-5-sonnet-20240620-v1:0"
30
+
31
+ # Get AWS config from environment variables or use defaults
32
+ aws_access_key = os.environ.get("AWS_ACCESS_KEY_ID", "")
33
+ aws_secret_key = os.environ.get("AWS_SECRET_ACCESS_KEY", "")
34
+ aws_region = os.environ.get("AWS_REGION", "us-west-2")
35
+
36
+ # Check if AWS config is provided in the config
37
+ if hasattr(self.config, "aws_access_key_id"):
38
+ aws_access_key = self.config.aws_access_key_id
39
+ if hasattr(self.config, "aws_secret_access_key"):
40
+ aws_secret_key = self.config.aws_secret_access_key
41
+ if hasattr(self.config, "aws_region"):
42
+ aws_region = self.config.aws_region
43
+
44
+ self.client = boto3.client(
45
+ "bedrock-runtime",
46
+ region_name=aws_region,
47
+ aws_access_key_id=aws_access_key if aws_access_key else None,
48
+ aws_secret_access_key=aws_secret_key if aws_secret_key else None,
49
+ )
50
+
51
+ self.model_kwargs = {
52
+ "temperature": self.config.temperature,
53
+ "max_tokens_to_sample": self.config.max_tokens,
54
+ "top_p": self.config.top_p,
55
+ }
56
+
57
+ def _format_messages(self, messages: List[Dict[str, str]]) -> str:
58
+ """
59
+ Formats a list of messages into the required prompt structure for the model.
60
+
61
+ Args:
62
+ messages (List[Dict[str, str]]): A list of dictionaries where each dictionary represents a message.
63
+ Each dictionary contains 'role' and 'content' keys.
64
+
65
+ Returns:
66
+ str: A formatted string combining all messages, structured with roles capitalized and separated by newlines.
67
+ """
68
+
69
+ formatted_messages = []
70
+ for message in messages:
71
+ role = message["role"].capitalize()
72
+ content = message["content"]
73
+ formatted_messages.append(f"\n\n{role}: {content}")
74
+
75
+ return "\n\nHuman: " + "".join(formatted_messages) + "\n\nAssistant:"
76
+
77
+ def _parse_response(self, response, tools) -> str:
78
+ """
79
+ Process the response based on whether tools are used or not.
80
+
81
+ Args:
82
+ response: The raw response from API.
83
+ tools: The list of tools provided in the request.
84
+
85
+ Returns:
86
+ str or dict: The processed response.
87
+ """
88
+ if tools:
89
+ processed_response = {"tool_calls": []}
90
+
91
+ if response["output"]["message"]["content"]:
92
+ for item in response["output"]["message"]["content"]:
93
+ if "toolUse" in item:
94
+ processed_response["tool_calls"].append(
95
+ {
96
+ "name": item["toolUse"]["name"],
97
+ "arguments": item["toolUse"]["input"],
98
+ }
99
+ )
100
+
101
+ return processed_response
102
+
103
+ response_body = response.get("body").read().decode()
104
+ response_json = json.loads(response_body)
105
+ return response_json.get("content", [{"text": ""}])[0].get("text", "")
106
+
107
+ def _prepare_input(
108
+ self,
109
+ provider: str,
110
+ model: str,
111
+ prompt: str,
112
+ model_kwargs: Optional[Dict[str, Any]] = {},
113
+ ) -> Dict[str, Any]:
114
+ """
115
+ Prepares the input dictionary for the specified provider's model by mapping and renaming
116
+ keys in the input based on the provider's requirements.
117
+
118
+ Args:
119
+ provider (str): The name of the service provider (e.g., "meta", "ai21", "mistral", "cohere", "amazon").
120
+ model (str): The name or identifier of the model being used.
121
+ prompt (str): The text prompt to be processed by the model.
122
+ model_kwargs (Dict[str, Any]): Additional keyword arguments specific to the model's requirements.
123
+
124
+ Returns:
125
+ Dict[str, Any]: The prepared input dictionary with the correct keys and values for the specified provider.
126
+ """
127
+
128
+ input_body = {"prompt": prompt, **model_kwargs}
129
+
130
+ provider_mappings = {
131
+ "meta": {"max_tokens_to_sample": "max_gen_len"},
132
+ "ai21": {"max_tokens_to_sample": "maxTokens", "top_p": "topP"},
133
+ "mistral": {"max_tokens_to_sample": "max_tokens"},
134
+ "cohere": {"max_tokens_to_sample": "max_tokens", "top_p": "p"},
135
+ }
136
+
137
+ if provider in provider_mappings:
138
+ for old_key, new_key in provider_mappings[provider].items():
139
+ if old_key in input_body:
140
+ input_body[new_key] = input_body.pop(old_key)
141
+
142
+ if provider == "cohere" and "cohere.command-r" in model:
143
+ input_body["message"] = input_body.pop("prompt")
144
+
145
+ if provider == "amazon":
146
+ input_body = {
147
+ "inputText": prompt,
148
+ "textGenerationConfig": {
149
+ "maxTokenCount": self.model_kwargs["max_tokens_to_sample"]
150
+ or self.model_kwargs["max_tokens"]
151
+ or 5000,
152
+ "topP": self.model_kwargs["top_p"] or 0.9,
153
+ "temperature": self.model_kwargs["temperature"] or 0.1,
154
+ },
155
+ }
156
+ input_body["textGenerationConfig"] = {
157
+ k: v for k, v in input_body["textGenerationConfig"].items() if v is not None
158
+ }
159
+
160
+ return input_body
161
+
162
+ def _convert_tool_format(self, original_tools):
163
+ """
164
+ Converts a list of tools from their original format to a new standardized format.
165
+
166
+ Args:
167
+ original_tools (list): A list of dictionaries representing the original tools, each containing a 'type' key and corresponding details.
168
+
169
+ Returns:
170
+ list: A list of dictionaries representing the tools in the new standardized format.
171
+ """
172
+ new_tools = []
173
+
174
+ for tool in original_tools:
175
+ if tool["type"] == "function":
176
+ function = tool["function"]
177
+ new_tool = {
178
+ "toolSpec": {
179
+ "name": function["name"],
180
+ "description": function["description"],
181
+ "inputSchema": {
182
+ "json": {
183
+ "type": "object",
184
+ "properties": {},
185
+ "required": function["parameters"].get("required", []),
186
+ }
187
+ },
188
+ }
189
+ }
190
+
191
+ for prop, details in function["parameters"].get("properties", {}).items():
192
+ new_tool["toolSpec"]["inputSchema"]["json"]["properties"][prop] = details
193
+
194
+ new_tools.append(new_tool)
195
+
196
+ return new_tools
197
+
198
+ def generate_response(
199
+ self,
200
+ messages: List[Dict[str, str]],
201
+ response_format=None,
202
+ tools: Optional[List[Dict]] = None,
203
+ tool_choice: str = "auto",
204
+ ):
205
+ """
206
+ Generate a response based on the given messages using AWS Bedrock.
207
+
208
+ Args:
209
+ messages (list): List of message dicts containing 'role' and 'content'.
210
+ tools (list, optional): List of tools that the model can call. Defaults to None.
211
+ tool_choice (str, optional): Tool choice method. Defaults to "auto".
212
+
213
+ Returns:
214
+ str: The generated response.
215
+ """
216
+
217
+ if tools:
218
+ # Use converse method when tools are provided
219
+ messages = [
220
+ {
221
+ "role": "user",
222
+ "content": [{"text": message["content"]} for message in messages],
223
+ }
224
+ ]
225
+ inference_config = {
226
+ "temperature": self.model_kwargs["temperature"],
227
+ "maxTokens": self.model_kwargs["max_tokens_to_sample"],
228
+ "topP": self.model_kwargs["top_p"],
229
+ }
230
+ tools_config = {"tools": self._convert_tool_format(tools)}
231
+
232
+ response = self.client.converse(
233
+ modelId=self.config.model,
234
+ messages=messages,
235
+ inferenceConfig=inference_config,
236
+ toolConfig=tools_config,
237
+ )
238
+ else:
239
+ # Use invoke_model method when no tools are provided
240
+ prompt = self._format_messages(messages)
241
+ provider = extract_provider(self.config.model)
242
+ input_body = self._prepare_input(provider, self.config.model, prompt, model_kwargs=self.model_kwargs)
243
+ body = json.dumps(input_body)
244
+
245
+ if provider == "anthropic" or provider == "deepseek":
246
+ input_body = {
247
+ "messages": [{"role": "user", "content": [{"type": "text", "text": prompt}]}],
248
+ "max_tokens": self.model_kwargs["max_tokens_to_sample"] or self.model_kwargs["max_tokens"] or 5000,
249
+ "temperature": self.model_kwargs["temperature"] or 0.1,
250
+ "top_p": self.model_kwargs["top_p"] or 0.9,
251
+ "anthropic_version": "bedrock-2023-05-31",
252
+ }
253
+
254
+ body = json.dumps(input_body)
255
+
256
+ response = self.client.invoke_model(
257
+ body=body,
258
+ modelId=self.config.model,
259
+ accept="application/json",
260
+ contentType="application/json",
261
+ )
262
+ else:
263
+ response = self.client.invoke_model(
264
+ body=body,
265
+ modelId=self.config.model,
266
+ accept="application/json",
267
+ contentType="application/json",
268
+ )
269
+
270
+ return self._parse_response(response, tools)
@@ -0,0 +1,114 @@
1
+ import json
2
+ import os
3
+ from typing import Dict, List, Optional
4
+
5
+ from openai import AzureOpenAI
6
+ from azure.identity import DefaultAzureCredential, get_bearer_token_provider
7
+
8
+ from mem0.configs.llms.base import BaseLlmConfig
9
+ from mem0.llms.base import LLMBase
10
+ from mem0.memory.utils import extract_json
11
+
12
+
13
+ class AzureOpenAILLM(LLMBase):
14
+ def __init__(self, config: Optional[BaseLlmConfig] = None):
15
+ super().__init__(config)
16
+
17
+ # Model name should match the custom deployment name chosen for it.
18
+ if not self.config.model:
19
+ self.config.model = "gpt-4o"
20
+
21
+ azure_deployment = self.config.azure_kwargs.azure_deployment or os.getenv("LLM_AZURE_DEPLOYMENT")
22
+ azure_endpoint = self.config.azure_kwargs.azure_endpoint or os.getenv("LLM_AZURE_ENDPOINT")
23
+ api_version = self.config.azure_kwargs.api_version or os.getenv("LLM_AZURE_API_VERSION")
24
+ default_headers = self.config.azure_kwargs.default_headers
25
+
26
+ credential = DefaultAzureCredential()
27
+ token_provider = get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default")
28
+ self.client = AzureOpenAI(
29
+ azure_deployment=azure_deployment,
30
+ azure_endpoint=azure_endpoint,
31
+ api_version=api_version,
32
+ azure_ad_token_provider=token_provider,
33
+ http_client=self.config.http_client,
34
+ default_headers=default_headers,
35
+ )
36
+
37
+ def _parse_response(self, response, tools):
38
+ """
39
+ Process the response based on whether tools are used or not.
40
+
41
+ Args:
42
+ response: The raw response from API.
43
+ tools: The list of tools provided in the request.
44
+
45
+ Returns:
46
+ str or dict: The processed response.
47
+ """
48
+ if tools:
49
+ processed_response = {
50
+ "content": response.choices[0].message.content,
51
+ "tool_calls": [],
52
+ }
53
+
54
+ if response.choices[0].message.tool_calls:
55
+ for tool_call in response.choices[0].message.tool_calls:
56
+ processed_response["tool_calls"].append(
57
+ {
58
+ "name": tool_call.function.name,
59
+ "arguments": json.loads(extract_json(tool_call.function.arguments)),
60
+ }
61
+ )
62
+
63
+ return processed_response
64
+ else:
65
+ return response.choices[0].message.content
66
+
67
+ def generate_response(
68
+ self,
69
+ messages: List[Dict[str, str]],
70
+ response_format=None,
71
+ tools: Optional[List[Dict]] = None,
72
+ tool_choice: str = "auto",
73
+ ):
74
+ """
75
+ Generate a response based on the given messages using Azure OpenAI.
76
+
77
+ Args:
78
+ messages (list): List of message dicts containing 'role' and 'content'.
79
+ response_format (str or object, optional): Format of the response. Defaults to "text".
80
+ tools (list, optional): List of tools that the model can call. Defaults to None.
81
+ tool_choice (str, optional): Tool choice method. Defaults to "auto".
82
+
83
+ Returns:
84
+ str: The generated response.
85
+ """
86
+
87
+ user_prompt = messages[-1]["content"]
88
+
89
+ user_prompt = user_prompt.replace("assistant", "ai")
90
+
91
+ messages[-1]["content"] = user_prompt
92
+
93
+ common_params = {
94
+ "model": self.config.model,
95
+ "messages": messages,
96
+ }
97
+
98
+ if self.config.model in {"o3-mini", "o1-preview", "o1"}:
99
+ params = common_params
100
+ else:
101
+ params = {
102
+ **common_params,
103
+ "temperature": self.config.temperature,
104
+ "max_tokens": self.config.max_tokens,
105
+ "top_p": self.config.top_p,
106
+ }
107
+ if response_format:
108
+ params["response_format"] = response_format
109
+ if tools: # TODO: Remove tools if no issues found with new memory addition logic
110
+ params["tools"] = tools
111
+ params["tool_choice"] = tool_choice
112
+
113
+ response = self.client.chat.completions.create(**params)
114
+ return self._parse_response(response, tools)
@@ -0,0 +1,76 @@
1
+ import os
2
+ from typing import Dict, List, Optional
3
+
4
+ from openai import AzureOpenAI
5
+
6
+ from mem0.configs.llms.base import BaseLlmConfig
7
+ from mem0.llms.base import LLMBase
8
+
9
+
10
+ class AzureOpenAIStructuredLLM(LLMBase):
11
+ def __init__(self, config: Optional[BaseLlmConfig] = None):
12
+ super().__init__(config)
13
+
14
+ # Model name should match the custom deployment name chosen for it.
15
+ if not self.config.model:
16
+ self.config.model = "gpt-4o-2024-08-06"
17
+
18
+ api_key = os.getenv("LLM_AZURE_OPENAI_API_KEY") or self.config.azure_kwargs.api_key
19
+ azure_deployment = os.getenv("LLM_AZURE_DEPLOYMENT") or self.config.azure_kwargs.azure_deployment
20
+ azure_endpoint = os.getenv("LLM_AZURE_ENDPOINT") or self.config.azure_kwargs.azure_endpoint
21
+ api_version = os.getenv("LLM_AZURE_API_VERSION") or self.config.azure_kwargs.api_version
22
+ default_headers = self.config.azure_kwargs.default_headers
23
+
24
+ # Can display a warning if API version is of model and api-version
25
+ self.client = AzureOpenAI(
26
+ azure_deployment=azure_deployment,
27
+ azure_endpoint=azure_endpoint,
28
+ api_version=api_version,
29
+ api_key=api_key,
30
+ http_client=self.config.http_client,
31
+ default_headers=default_headers,
32
+ )
33
+
34
+ def generate_response(
35
+ self,
36
+ messages: List[Dict[str, str]],
37
+ response_format: Optional[str] = None,
38
+ tools: Optional[List[Dict]] = None,
39
+ tool_choice: str = "auto",
40
+ ) -> str:
41
+ """
42
+ Generate a response based on the given messages using Azure OpenAI.
43
+
44
+ Args:
45
+ messages (List[Dict[str, str]]): A list of dictionaries, each containing a 'role' and 'content' key.
46
+ response_format (Optional[str]): The desired format of the response. Defaults to None.
47
+
48
+ Returns:
49
+ str: The generated response.
50
+ """
51
+
52
+ user_prompt = messages[-1]["content"]
53
+
54
+ user_prompt = user_prompt.replace("assistant", "ai")
55
+
56
+ messages[-1]["content"] = user_prompt
57
+
58
+ params = {
59
+ "model": self.config.model,
60
+ "messages": messages,
61
+ "temperature": self.config.temperature,
62
+ "max_tokens": self.config.max_tokens,
63
+ "top_p": self.config.top_p,
64
+ }
65
+ if response_format:
66
+ params["response_format"] = response_format
67
+ if tools:
68
+ params["tools"] = tools
69
+ params["tool_choice"] = tool_choice
70
+
71
+ if tools:
72
+ params["tools"] = tools
73
+ params["tool_choice"] = tool_choice
74
+
75
+ response = self.client.chat.completions.create(**params)
76
+ return self._parse_response(response, tools)
mem0/llms/base.py ADDED
@@ -0,0 +1,32 @@
1
+ from abc import ABC, abstractmethod
2
+ from typing import Dict, List, Optional
3
+
4
+ from mem0.configs.llms.base import BaseLlmConfig
5
+
6
+
7
+ class LLMBase(ABC):
8
+ def __init__(self, config: Optional[BaseLlmConfig] = None):
9
+ """Initialize a base LLM class
10
+
11
+ :param config: LLM configuration option class, defaults to None
12
+ :type config: Optional[BaseLlmConfig], optional
13
+ """
14
+ if config is None:
15
+ self.config = BaseLlmConfig()
16
+ else:
17
+ self.config = config
18
+
19
+ @abstractmethod
20
+ def generate_response(self, messages, tools: Optional[List[Dict]] = None, tool_choice: str = "auto"):
21
+ """
22
+ Generate a response based on the given messages.
23
+
24
+ Args:
25
+ messages (list): List of message dicts containing 'role' and 'content'.
26
+ tools (list, optional): List of tools that the model can call. Defaults to None.
27
+ tool_choice (str, optional): Tool choice method. Defaults to "auto".
28
+
29
+ Returns:
30
+ str: The generated response.
31
+ """
32
+ pass
mem0/llms/configs.py ADDED
@@ -0,0 +1,34 @@
1
+ from typing import Optional
2
+
3
+ from pydantic import BaseModel, Field, field_validator
4
+
5
+
6
+ class LlmConfig(BaseModel):
7
+ provider: str = Field(description="Provider of the LLM (e.g., 'ollama', 'openai')", default="openai")
8
+ config: Optional[dict] = Field(description="Configuration for the specific LLM", default={})
9
+
10
+ @field_validator("config")
11
+ def validate_config(cls, v, values):
12
+ provider = values.data.get("provider")
13
+ if provider in (
14
+ "openai",
15
+ "ollama",
16
+ "anthropic",
17
+ "groq",
18
+ "together",
19
+ "aws_bedrock",
20
+ "litellm",
21
+ "azure_openai",
22
+ "openai_structured",
23
+ "azure_openai_structured",
24
+ "gemini",
25
+ "deepseek",
26
+ "xai",
27
+ "sarvam",
28
+ "lmstudio",
29
+ "vllm",
30
+ "langchain",
31
+ ):
32
+ return v
33
+ else:
34
+ raise ValueError(f"Unsupported LLM provider: {provider}")
mem0/llms/deepseek.py ADDED
@@ -0,0 +1,85 @@
1
+ import json
2
+ import os
3
+ from typing import Dict, List, Optional
4
+
5
+ from openai import OpenAI
6
+
7
+ from mem0.configs.llms.base import BaseLlmConfig
8
+ from mem0.llms.base import LLMBase
9
+ from mem0.memory.utils import extract_json
10
+
11
+
12
+ class DeepSeekLLM(LLMBase):
13
+ def __init__(self, config: Optional[BaseLlmConfig] = None):
14
+ super().__init__(config)
15
+
16
+ if not self.config.model:
17
+ self.config.model = "deepseek-chat"
18
+
19
+ api_key = self.config.api_key or os.getenv("DEEPSEEK_API_KEY")
20
+ base_url = self.config.deepseek_base_url or os.getenv("DEEPSEEK_API_BASE") or "https://api.deepseek.com"
21
+ self.client = OpenAI(api_key=api_key, base_url=base_url)
22
+
23
+ def _parse_response(self, response, tools):
24
+ """
25
+ Process the response based on whether tools are used or not.
26
+
27
+ Args:
28
+ response: The raw response from API.
29
+ tools: The list of tools provided in the request.
30
+
31
+ Returns:
32
+ str or dict: The processed response.
33
+ """
34
+ if tools:
35
+ processed_response = {
36
+ "content": response.choices[0].message.content,
37
+ "tool_calls": [],
38
+ }
39
+
40
+ if response.choices[0].message.tool_calls:
41
+ for tool_call in response.choices[0].message.tool_calls:
42
+ processed_response["tool_calls"].append(
43
+ {
44
+ "name": tool_call.function.name,
45
+ "arguments": json.loads(extract_json(tool_call.function.arguments)),
46
+ }
47
+ )
48
+
49
+ return processed_response
50
+ else:
51
+ return response.choices[0].message.content
52
+
53
+ def generate_response(
54
+ self,
55
+ messages: List[Dict[str, str]],
56
+ response_format=None,
57
+ tools: Optional[List[Dict]] = None,
58
+ tool_choice: str = "auto",
59
+ ):
60
+ """
61
+ Generate a response based on the given messages using DeepSeek.
62
+
63
+ Args:
64
+ messages (list): List of message dicts containing 'role' and 'content'.
65
+ response_format (str or object, optional): Format of the response. Defaults to "text".
66
+ tools (list, optional): List of tools that the model can call. Defaults to None.
67
+ tool_choice (str, optional): Tool choice method. Defaults to "auto".
68
+
69
+ Returns:
70
+ str: The generated response.
71
+ """
72
+ params = {
73
+ "model": self.config.model,
74
+ "messages": messages,
75
+ "temperature": self.config.temperature,
76
+ "max_tokens": self.config.max_tokens,
77
+ "top_p": self.config.top_p,
78
+ }
79
+
80
+ if tools:
81
+ params["tools"] = tools
82
+ params["tool_choice"] = tool_choice
83
+
84
+ response = self.client.chat.completions.create(**params)
85
+ return self._parse_response(response, tools)