mem0ai-azure-mysql 0.1.115__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (116) hide show
  1. mem0/__init__.py +6 -0
  2. mem0/client/__init__.py +0 -0
  3. mem0/client/main.py +1535 -0
  4. mem0/client/project.py +860 -0
  5. mem0/client/utils.py +29 -0
  6. mem0/configs/__init__.py +0 -0
  7. mem0/configs/base.py +90 -0
  8. mem0/configs/dbs/__init__.py +4 -0
  9. mem0/configs/dbs/base.py +41 -0
  10. mem0/configs/dbs/mysql.py +25 -0
  11. mem0/configs/embeddings/__init__.py +0 -0
  12. mem0/configs/embeddings/base.py +108 -0
  13. mem0/configs/enums.py +7 -0
  14. mem0/configs/llms/__init__.py +0 -0
  15. mem0/configs/llms/base.py +152 -0
  16. mem0/configs/prompts.py +333 -0
  17. mem0/configs/vector_stores/__init__.py +0 -0
  18. mem0/configs/vector_stores/azure_ai_search.py +59 -0
  19. mem0/configs/vector_stores/baidu.py +29 -0
  20. mem0/configs/vector_stores/chroma.py +40 -0
  21. mem0/configs/vector_stores/elasticsearch.py +47 -0
  22. mem0/configs/vector_stores/faiss.py +39 -0
  23. mem0/configs/vector_stores/langchain.py +32 -0
  24. mem0/configs/vector_stores/milvus.py +43 -0
  25. mem0/configs/vector_stores/mongodb.py +25 -0
  26. mem0/configs/vector_stores/opensearch.py +41 -0
  27. mem0/configs/vector_stores/pgvector.py +37 -0
  28. mem0/configs/vector_stores/pinecone.py +56 -0
  29. mem0/configs/vector_stores/qdrant.py +49 -0
  30. mem0/configs/vector_stores/redis.py +26 -0
  31. mem0/configs/vector_stores/supabase.py +44 -0
  32. mem0/configs/vector_stores/upstash_vector.py +36 -0
  33. mem0/configs/vector_stores/vertex_ai_vector_search.py +27 -0
  34. mem0/configs/vector_stores/weaviate.py +43 -0
  35. mem0/dbs/__init__.py +4 -0
  36. mem0/dbs/base.py +68 -0
  37. mem0/dbs/configs.py +21 -0
  38. mem0/dbs/mysql.py +321 -0
  39. mem0/embeddings/__init__.py +0 -0
  40. mem0/embeddings/aws_bedrock.py +100 -0
  41. mem0/embeddings/azure_openai.py +43 -0
  42. mem0/embeddings/base.py +31 -0
  43. mem0/embeddings/configs.py +30 -0
  44. mem0/embeddings/gemini.py +39 -0
  45. mem0/embeddings/huggingface.py +41 -0
  46. mem0/embeddings/langchain.py +35 -0
  47. mem0/embeddings/lmstudio.py +29 -0
  48. mem0/embeddings/mock.py +11 -0
  49. mem0/embeddings/ollama.py +53 -0
  50. mem0/embeddings/openai.py +49 -0
  51. mem0/embeddings/together.py +31 -0
  52. mem0/embeddings/vertexai.py +54 -0
  53. mem0/graphs/__init__.py +0 -0
  54. mem0/graphs/configs.py +96 -0
  55. mem0/graphs/neptune/__init__.py +0 -0
  56. mem0/graphs/neptune/base.py +410 -0
  57. mem0/graphs/neptune/main.py +372 -0
  58. mem0/graphs/tools.py +371 -0
  59. mem0/graphs/utils.py +97 -0
  60. mem0/llms/__init__.py +0 -0
  61. mem0/llms/anthropic.py +64 -0
  62. mem0/llms/aws_bedrock.py +270 -0
  63. mem0/llms/azure_openai.py +114 -0
  64. mem0/llms/azure_openai_structured.py +76 -0
  65. mem0/llms/base.py +32 -0
  66. mem0/llms/configs.py +34 -0
  67. mem0/llms/deepseek.py +85 -0
  68. mem0/llms/gemini.py +201 -0
  69. mem0/llms/groq.py +88 -0
  70. mem0/llms/langchain.py +65 -0
  71. mem0/llms/litellm.py +87 -0
  72. mem0/llms/lmstudio.py +53 -0
  73. mem0/llms/ollama.py +94 -0
  74. mem0/llms/openai.py +124 -0
  75. mem0/llms/openai_structured.py +52 -0
  76. mem0/llms/sarvam.py +89 -0
  77. mem0/llms/together.py +88 -0
  78. mem0/llms/vllm.py +89 -0
  79. mem0/llms/xai.py +52 -0
  80. mem0/memory/__init__.py +0 -0
  81. mem0/memory/base.py +63 -0
  82. mem0/memory/graph_memory.py +632 -0
  83. mem0/memory/main.py +1843 -0
  84. mem0/memory/memgraph_memory.py +630 -0
  85. mem0/memory/setup.py +56 -0
  86. mem0/memory/storage.py +218 -0
  87. mem0/memory/telemetry.py +90 -0
  88. mem0/memory/utils.py +133 -0
  89. mem0/proxy/__init__.py +0 -0
  90. mem0/proxy/main.py +194 -0
  91. mem0/utils/factory.py +132 -0
  92. mem0/vector_stores/__init__.py +0 -0
  93. mem0/vector_stores/azure_ai_search.py +383 -0
  94. mem0/vector_stores/baidu.py +368 -0
  95. mem0/vector_stores/base.py +58 -0
  96. mem0/vector_stores/chroma.py +229 -0
  97. mem0/vector_stores/configs.py +60 -0
  98. mem0/vector_stores/elasticsearch.py +235 -0
  99. mem0/vector_stores/faiss.py +473 -0
  100. mem0/vector_stores/langchain.py +179 -0
  101. mem0/vector_stores/milvus.py +245 -0
  102. mem0/vector_stores/mongodb.py +293 -0
  103. mem0/vector_stores/opensearch.py +281 -0
  104. mem0/vector_stores/pgvector.py +294 -0
  105. mem0/vector_stores/pinecone.py +373 -0
  106. mem0/vector_stores/qdrant.py +240 -0
  107. mem0/vector_stores/redis.py +295 -0
  108. mem0/vector_stores/supabase.py +237 -0
  109. mem0/vector_stores/upstash_vector.py +293 -0
  110. mem0/vector_stores/vertex_ai_vector_search.py +629 -0
  111. mem0/vector_stores/weaviate.py +316 -0
  112. mem0ai_azure_mysql-0.1.115.data/data/README.md +169 -0
  113. mem0ai_azure_mysql-0.1.115.dist-info/METADATA +224 -0
  114. mem0ai_azure_mysql-0.1.115.dist-info/RECORD +116 -0
  115. mem0ai_azure_mysql-0.1.115.dist-info/WHEEL +4 -0
  116. mem0ai_azure_mysql-0.1.115.dist-info/licenses/LICENSE +201 -0
mem0/llms/openai.py ADDED
@@ -0,0 +1,124 @@
1
+ import json
2
+ import os
3
+ import warnings
4
+ from typing import Dict, List, Optional
5
+
6
+ from openai import OpenAI
7
+
8
+ from mem0.configs.llms.base import BaseLlmConfig
9
+ from mem0.llms.base import LLMBase
10
+ from mem0.memory.utils import extract_json
11
+
12
+
13
+ class OpenAILLM(LLMBase):
14
+ def __init__(self, config: Optional[BaseLlmConfig] = None):
15
+ super().__init__(config)
16
+
17
+ if not self.config.model:
18
+ self.config.model = "gpt-4o-mini"
19
+
20
+ if os.environ.get("OPENROUTER_API_KEY"): # Use OpenRouter
21
+ self.client = OpenAI(
22
+ api_key=os.environ.get("OPENROUTER_API_KEY"),
23
+ base_url=self.config.openrouter_base_url
24
+ or os.getenv("OPENROUTER_API_BASE")
25
+ or "https://openrouter.ai/api/v1",
26
+ )
27
+ else:
28
+ api_key = self.config.api_key or os.getenv("OPENAI_API_KEY")
29
+ base_url = (
30
+ self.config.openai_base_url
31
+ or os.getenv("OPENAI_API_BASE")
32
+ or os.getenv("OPENAI_BASE_URL")
33
+ or "https://api.openai.com/v1"
34
+ )
35
+ if os.environ.get("OPENAI_API_BASE"):
36
+ warnings.warn(
37
+ "The environment variable 'OPENAI_API_BASE' is deprecated and will be removed in the 0.1.80. "
38
+ "Please use 'OPENAI_BASE_URL' instead.",
39
+ DeprecationWarning,
40
+ )
41
+
42
+ self.client = OpenAI(api_key=api_key, base_url=base_url)
43
+
44
+ def _parse_response(self, response, tools):
45
+ """
46
+ Process the response based on whether tools are used or not.
47
+
48
+ Args:
49
+ response: The raw response from API.
50
+ tools: The list of tools provided in the request.
51
+
52
+ Returns:
53
+ str or dict: The processed response.
54
+ """
55
+ if tools:
56
+ processed_response = {
57
+ "content": response.choices[0].message.content,
58
+ "tool_calls": [],
59
+ }
60
+
61
+ if response.choices[0].message.tool_calls:
62
+ for tool_call in response.choices[0].message.tool_calls:
63
+ processed_response["tool_calls"].append(
64
+ {
65
+ "name": tool_call.function.name,
66
+ "arguments": json.loads(extract_json(tool_call.function.arguments)),
67
+ }
68
+ )
69
+
70
+ return processed_response
71
+ else:
72
+ return response.choices[0].message.content
73
+
74
+ def generate_response(
75
+ self,
76
+ messages: List[Dict[str, str]],
77
+ response_format=None,
78
+ tools: Optional[List[Dict]] = None,
79
+ tool_choice: str = "auto",
80
+ ):
81
+ """
82
+ Generate a JSON response based on the given messages using OpenAI.
83
+
84
+ Args:
85
+ messages (list): List of message dicts containing 'role' and 'content'.
86
+ response_format (str or object, optional): Format of the response. Defaults to "text".
87
+ tools (list, optional): List of tools that the model can call. Defaults to None.
88
+ tool_choice (str, optional): Tool choice method. Defaults to "auto".
89
+
90
+ Returns:
91
+ json: The generated response.
92
+ """
93
+ params = {
94
+ "model": self.config.model,
95
+ "messages": messages,
96
+ "temperature": self.config.temperature,
97
+ "max_tokens": self.config.max_tokens,
98
+ "top_p": self.config.top_p,
99
+ }
100
+
101
+ if os.getenv("OPENROUTER_API_KEY"):
102
+ openrouter_params = {}
103
+ if self.config.models:
104
+ openrouter_params["models"] = self.config.models
105
+ openrouter_params["route"] = self.config.route
106
+ params.pop("model")
107
+
108
+ if self.config.site_url and self.config.app_name:
109
+ extra_headers = {
110
+ "HTTP-Referer": self.config.site_url,
111
+ "X-Title": self.config.app_name,
112
+ }
113
+ openrouter_params["extra_headers"] = extra_headers
114
+
115
+ params.update(**openrouter_params)
116
+
117
+ if response_format:
118
+ params["response_format"] = response_format
119
+ if tools: # TODO: Remove tools if no issues found with new memory addition logic
120
+ params["tools"] = tools
121
+ params["tool_choice"] = tool_choice
122
+
123
+ response = self.client.chat.completions.create(**params)
124
+ return self._parse_response(response, tools)
@@ -0,0 +1,52 @@
1
+ import os
2
+ from typing import Dict, List, Optional
3
+
4
+ from openai import OpenAI
5
+
6
+ from mem0.configs.llms.base import BaseLlmConfig
7
+ from mem0.llms.base import LLMBase
8
+
9
+
10
+ class OpenAIStructuredLLM(LLMBase):
11
+ def __init__(self, config: Optional[BaseLlmConfig] = None):
12
+ super().__init__(config)
13
+
14
+ if not self.config.model:
15
+ self.config.model = "gpt-4o-2024-08-06"
16
+
17
+ api_key = self.config.api_key or os.getenv("OPENAI_API_KEY")
18
+ base_url = self.config.openai_base_url or os.getenv("OPENAI_API_BASE") or "https://api.openai.com/v1"
19
+ self.client = OpenAI(api_key=api_key, base_url=base_url)
20
+
21
+ def generate_response(
22
+ self,
23
+ messages: List[Dict[str, str]],
24
+ response_format: Optional[str] = None,
25
+ tools: Optional[List[Dict]] = None,
26
+ tool_choice: str = "auto",
27
+ ) -> str:
28
+ """
29
+ Generate a response based on the given messages using OpenAI.
30
+
31
+ Args:
32
+ messages (List[Dict[str, str]]): A list of dictionaries, each containing a 'role' and 'content' key.
33
+ response_format (Optional[str]): The desired format of the response. Defaults to None.
34
+
35
+
36
+ Returns:
37
+ str: The generated response.
38
+ """
39
+ params = {
40
+ "model": self.config.model,
41
+ "messages": messages,
42
+ "temperature": self.config.temperature,
43
+ }
44
+
45
+ if response_format:
46
+ params["response_format"] = response_format
47
+ if tools:
48
+ params["tools"] = tools
49
+ params["tool_choice"] = tool_choice
50
+
51
+ response = self.client.beta.chat.completions.parse(**params)
52
+ return response.choices[0].message.content
mem0/llms/sarvam.py ADDED
@@ -0,0 +1,89 @@
1
+ import os
2
+ from typing import Dict, List, Optional
3
+
4
+ import requests
5
+
6
+ from mem0.configs.llms.base import BaseLlmConfig
7
+ from mem0.llms.base import LLMBase
8
+
9
+
10
+ class SarvamLLM(LLMBase):
11
+ def __init__(self, config: Optional[BaseLlmConfig] = None):
12
+ super().__init__(config)
13
+
14
+ # Set default model if not provided
15
+ if not self.config.model:
16
+ self.config.model = "sarvam-m"
17
+
18
+ # Get API key from config or environment variable
19
+ self.api_key = self.config.api_key or os.getenv("SARVAM_API_KEY")
20
+
21
+ if not self.api_key:
22
+ raise ValueError(
23
+ "Sarvam API key is required. Set SARVAM_API_KEY environment variable or provide api_key in config."
24
+ )
25
+
26
+ # Set base URL - use config value or environment or default
27
+ self.base_url = (
28
+ getattr(self.config, "sarvam_base_url", None) or os.getenv("SARVAM_API_BASE") or "https://api.sarvam.ai/v1"
29
+ )
30
+
31
+ def generate_response(self, messages: List[Dict[str, str]], response_format=None) -> str:
32
+ """
33
+ Generate a response based on the given messages using Sarvam-M.
34
+
35
+ Args:
36
+ messages (list): List of message dicts containing 'role' and 'content'.
37
+ response_format (str or object, optional): Format of the response.
38
+ Currently not used by Sarvam API.
39
+
40
+ Returns:
41
+ str: The generated response.
42
+ """
43
+ url = f"{self.base_url}/chat/completions"
44
+
45
+ headers = {"Authorization": f"Bearer {self.api_key}", "Content-Type": "application/json"}
46
+
47
+ # Prepare the request payload
48
+ params = {
49
+ "messages": messages,
50
+ "model": self.config.model if isinstance(self.config.model, str) else "sarvam-m",
51
+ }
52
+
53
+ # Add standard parameters that already exist in BaseLlmConfig
54
+ if self.config.temperature is not None:
55
+ params["temperature"] = self.config.temperature
56
+
57
+ if self.config.max_tokens is not None:
58
+ params["max_tokens"] = self.config.max_tokens
59
+
60
+ if self.config.top_p is not None:
61
+ params["top_p"] = self.config.top_p
62
+
63
+ # Handle Sarvam-specific parameters if model is passed as dict
64
+ if isinstance(self.config.model, dict):
65
+ # Extract model name
66
+ params["model"] = self.config.model.get("name", "sarvam-m")
67
+
68
+ # Add Sarvam-specific parameters
69
+ sarvam_specific_params = ["reasoning_effort", "frequency_penalty", "presence_penalty", "seed", "stop", "n"]
70
+
71
+ for param in sarvam_specific_params:
72
+ if param in self.config.model:
73
+ params[param] = self.config.model[param]
74
+
75
+ try:
76
+ response = requests.post(url, headers=headers, json=params, timeout=30)
77
+ response.raise_for_status()
78
+
79
+ result = response.json()
80
+
81
+ if "choices" in result and len(result["choices"]) > 0:
82
+ return result["choices"][0]["message"]["content"]
83
+ else:
84
+ raise ValueError("No response choices found in Sarvam API response")
85
+
86
+ except requests.exceptions.RequestException as e:
87
+ raise RuntimeError(f"Sarvam API request failed: {e}")
88
+ except KeyError as e:
89
+ raise ValueError(f"Unexpected response format from Sarvam API: {e}")
mem0/llms/together.py ADDED
@@ -0,0 +1,88 @@
1
+ import json
2
+ import os
3
+ from typing import Dict, List, Optional
4
+
5
+ try:
6
+ from together import Together
7
+ except ImportError:
8
+ raise ImportError("The 'together' library is required. Please install it using 'pip install together'.")
9
+
10
+ from mem0.configs.llms.base import BaseLlmConfig
11
+ from mem0.llms.base import LLMBase
12
+ from mem0.memory.utils import extract_json
13
+
14
+
15
+ class TogetherLLM(LLMBase):
16
+ def __init__(self, config: Optional[BaseLlmConfig] = None):
17
+ super().__init__(config)
18
+
19
+ if not self.config.model:
20
+ self.config.model = "mistralai/Mixtral-8x7B-Instruct-v0.1"
21
+
22
+ api_key = self.config.api_key or os.getenv("TOGETHER_API_KEY")
23
+ self.client = Together(api_key=api_key)
24
+
25
+ def _parse_response(self, response, tools):
26
+ """
27
+ Process the response based on whether tools are used or not.
28
+
29
+ Args:
30
+ response: The raw response from API.
31
+ tools: The list of tools provided in the request.
32
+
33
+ Returns:
34
+ str or dict: The processed response.
35
+ """
36
+ if tools:
37
+ processed_response = {
38
+ "content": response.choices[0].message.content,
39
+ "tool_calls": [],
40
+ }
41
+
42
+ if response.choices[0].message.tool_calls:
43
+ for tool_call in response.choices[0].message.tool_calls:
44
+ processed_response["tool_calls"].append(
45
+ {
46
+ "name": tool_call.function.name,
47
+ "arguments": json.loads(extract_json(tool_call.function.arguments)),
48
+ }
49
+ )
50
+
51
+ return processed_response
52
+ else:
53
+ return response.choices[0].message.content
54
+
55
+ def generate_response(
56
+ self,
57
+ messages: List[Dict[str, str]],
58
+ response_format=None,
59
+ tools: Optional[List[Dict]] = None,
60
+ tool_choice: str = "auto",
61
+ ):
62
+ """
63
+ Generate a response based on the given messages using TogetherAI.
64
+
65
+ Args:
66
+ messages (list): List of message dicts containing 'role' and 'content'.
67
+ response_format (str or object, optional): Format of the response. Defaults to "text".
68
+ tools (list, optional): List of tools that the model can call. Defaults to None.
69
+ tool_choice (str, optional): Tool choice method. Defaults to "auto".
70
+
71
+ Returns:
72
+ str: The generated response.
73
+ """
74
+ params = {
75
+ "model": self.config.model,
76
+ "messages": messages,
77
+ "temperature": self.config.temperature,
78
+ "max_tokens": self.config.max_tokens,
79
+ "top_p": self.config.top_p,
80
+ }
81
+ if response_format:
82
+ params["response_format"] = response_format
83
+ if tools: # TODO: Remove tools if no issues found with new memory addition logic
84
+ params["tools"] = tools
85
+ params["tool_choice"] = tool_choice
86
+
87
+ response = self.client.chat.completions.create(**params)
88
+ return self._parse_response(response, tools)
mem0/llms/vllm.py ADDED
@@ -0,0 +1,89 @@
1
+ import json
2
+ import os
3
+ from typing import Dict, List, Optional
4
+
5
+ from openai import OpenAI
6
+
7
+ from mem0.configs.llms.base import BaseLlmConfig
8
+ from mem0.llms.base import LLMBase
9
+ from mem0.memory.utils import extract_json
10
+
11
+
12
+ class VllmLLM(LLMBase):
13
+ def __init__(self, config: Optional[BaseLlmConfig] = None):
14
+ super().__init__(config)
15
+
16
+ if not self.config.model:
17
+ self.config.model = "Qwen/Qwen2.5-32B-Instruct"
18
+
19
+ self.config.api_key = self.config.api_key or os.getenv("VLLM_API_KEY") or "vllm-api-key"
20
+ base_url = self.config.vllm_base_url or os.getenv("VLLM_BASE_URL")
21
+
22
+ self.client = OpenAI(base_url=base_url, api_key=self.config.api_key)
23
+
24
+ def _parse_response(self, response, tools):
25
+ """
26
+ Process the response based on whether tools are used or not.
27
+
28
+ Args:
29
+ response: The raw response from API.
30
+ tools: The list of tools provided in the request.
31
+
32
+ Returns:
33
+ str or dict: The processed response.
34
+ """
35
+ if tools:
36
+ processed_response = {
37
+ "content": response.choices[0].message.content,
38
+ "tool_calls": [],
39
+ }
40
+
41
+ if response.choices[0].message.tool_calls:
42
+ for tool_call in response.choices[0].message.tool_calls:
43
+ processed_response["tool_calls"].append(
44
+ {
45
+ "name": tool_call.function.name,
46
+ "arguments": json.loads(extract_json(tool_call.function.arguments)),
47
+ }
48
+ )
49
+
50
+ return processed_response
51
+ else:
52
+ return response.choices[0].message.content
53
+
54
+ def generate_response(
55
+ self,
56
+ messages: List[Dict[str, str]],
57
+ response_format=None,
58
+ tools: Optional[List[Dict]] = None,
59
+ tool_choice: str = "auto",
60
+ ):
61
+ """
62
+ Generate a response based on the given messages using vLLM.
63
+
64
+ Args:
65
+ messages (list): List of message dicts containing 'role' and 'content'.
66
+ response_format (str or object, optional): Format of the response. Defaults to "text".
67
+ tools (list, optional): List of tools that the model can call. Defaults to None.
68
+ tool_choice (str, optional): Tool choice method. Defaults to "auto".
69
+
70
+ Returns:
71
+ str: The generated response.
72
+ """
73
+ params = {
74
+ "model": self.config.model,
75
+ "messages": messages,
76
+ "temperature": self.config.temperature,
77
+ "max_tokens": self.config.max_tokens,
78
+ "top_p": self.config.top_p,
79
+ }
80
+
81
+ if response_format:
82
+ params["response_format"] = response_format
83
+
84
+ if tools:
85
+ params["tools"] = tools
86
+ params["tool_choice"] = tool_choice
87
+
88
+ response = self.client.chat.completions.create(**params)
89
+ return self._parse_response(response, tools)
mem0/llms/xai.py ADDED
@@ -0,0 +1,52 @@
1
+ import os
2
+ from typing import Dict, List, Optional
3
+
4
+ from openai import OpenAI
5
+
6
+ from mem0.configs.llms.base import BaseLlmConfig
7
+ from mem0.llms.base import LLMBase
8
+
9
+
10
+ class XAILLM(LLMBase):
11
+ def __init__(self, config: Optional[BaseLlmConfig] = None):
12
+ super().__init__(config)
13
+
14
+ if not self.config.model:
15
+ self.config.model = "grok-2-latest"
16
+
17
+ api_key = self.config.api_key or os.getenv("XAI_API_KEY")
18
+ base_url = self.config.xai_base_url or os.getenv("XAI_API_BASE") or "https://api.x.ai/v1"
19
+ self.client = OpenAI(api_key=api_key, base_url=base_url)
20
+
21
+ def generate_response(
22
+ self,
23
+ messages: List[Dict[str, str]],
24
+ response_format=None,
25
+ tools: Optional[List[Dict]] = None,
26
+ tool_choice: str = "auto",
27
+ ):
28
+ """
29
+ Generate a response based on the given messages using XAI.
30
+
31
+ Args:
32
+ messages (list): List of message dicts containing 'role' and 'content'.
33
+ response_format (str or object, optional): Format of the response. Defaults to "text".
34
+ tools (list, optional): List of tools that the model can call. Defaults to None.
35
+ tool_choice (str, optional): Tool choice method. Defaults to "auto".
36
+
37
+ Returns:
38
+ str: The generated response.
39
+ """
40
+ params = {
41
+ "model": self.config.model,
42
+ "messages": messages,
43
+ "temperature": self.config.temperature,
44
+ "max_tokens": self.config.max_tokens,
45
+ "top_p": self.config.top_p,
46
+ }
47
+
48
+ if response_format:
49
+ params["response_format"] = response_format
50
+
51
+ response = self.client.chat.completions.create(**params)
52
+ return response.choices[0].message.content
File without changes
mem0/memory/base.py ADDED
@@ -0,0 +1,63 @@
1
+ from abc import ABC, abstractmethod
2
+
3
+
4
+ class MemoryBase(ABC):
5
+ @abstractmethod
6
+ def get(self, memory_id):
7
+ """
8
+ Retrieve a memory by ID.
9
+
10
+ Args:
11
+ memory_id (str): ID of the memory to retrieve.
12
+
13
+ Returns:
14
+ dict: Retrieved memory.
15
+ """
16
+ pass
17
+
18
+ @abstractmethod
19
+ def get_all(self):
20
+ """
21
+ List all memories.
22
+
23
+ Returns:
24
+ list: List of all memories.
25
+ """
26
+ pass
27
+
28
+ @abstractmethod
29
+ def update(self, memory_id, data):
30
+ """
31
+ Update a memory by ID.
32
+
33
+ Args:
34
+ memory_id (str): ID of the memory to update.
35
+ data (dict): Data to update the memory with.
36
+
37
+ Returns:
38
+ dict: Updated memory.
39
+ """
40
+ pass
41
+
42
+ @abstractmethod
43
+ def delete(self, memory_id):
44
+ """
45
+ Delete a memory by ID.
46
+
47
+ Args:
48
+ memory_id (str): ID of the memory to delete.
49
+ """
50
+ pass
51
+
52
+ @abstractmethod
53
+ def history(self, memory_id):
54
+ """
55
+ Get the history of changes for a memory by ID.
56
+
57
+ Args:
58
+ memory_id (str): ID of the memory to get history for.
59
+
60
+ Returns:
61
+ list: List of changes for the memory.
62
+ """
63
+ pass