agentrun-mem0ai 0.0.11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agentrun_mem0/__init__.py +6 -0
- agentrun_mem0/client/__init__.py +0 -0
- agentrun_mem0/client/main.py +1747 -0
- agentrun_mem0/client/project.py +931 -0
- agentrun_mem0/client/utils.py +115 -0
- agentrun_mem0/configs/__init__.py +0 -0
- agentrun_mem0/configs/base.py +90 -0
- agentrun_mem0/configs/embeddings/__init__.py +0 -0
- agentrun_mem0/configs/embeddings/base.py +110 -0
- agentrun_mem0/configs/enums.py +7 -0
- agentrun_mem0/configs/llms/__init__.py +0 -0
- agentrun_mem0/configs/llms/anthropic.py +56 -0
- agentrun_mem0/configs/llms/aws_bedrock.py +192 -0
- agentrun_mem0/configs/llms/azure.py +57 -0
- agentrun_mem0/configs/llms/base.py +62 -0
- agentrun_mem0/configs/llms/deepseek.py +56 -0
- agentrun_mem0/configs/llms/lmstudio.py +59 -0
- agentrun_mem0/configs/llms/ollama.py +56 -0
- agentrun_mem0/configs/llms/openai.py +79 -0
- agentrun_mem0/configs/llms/vllm.py +56 -0
- agentrun_mem0/configs/prompts.py +459 -0
- agentrun_mem0/configs/rerankers/__init__.py +0 -0
- agentrun_mem0/configs/rerankers/base.py +17 -0
- agentrun_mem0/configs/rerankers/cohere.py +15 -0
- agentrun_mem0/configs/rerankers/config.py +12 -0
- agentrun_mem0/configs/rerankers/huggingface.py +17 -0
- agentrun_mem0/configs/rerankers/llm.py +48 -0
- agentrun_mem0/configs/rerankers/sentence_transformer.py +16 -0
- agentrun_mem0/configs/rerankers/zero_entropy.py +28 -0
- agentrun_mem0/configs/vector_stores/__init__.py +0 -0
- agentrun_mem0/configs/vector_stores/alibabacloud_mysql.py +64 -0
- agentrun_mem0/configs/vector_stores/aliyun_tablestore.py +32 -0
- agentrun_mem0/configs/vector_stores/azure_ai_search.py +57 -0
- agentrun_mem0/configs/vector_stores/azure_mysql.py +84 -0
- agentrun_mem0/configs/vector_stores/baidu.py +27 -0
- agentrun_mem0/configs/vector_stores/chroma.py +58 -0
- agentrun_mem0/configs/vector_stores/databricks.py +61 -0
- agentrun_mem0/configs/vector_stores/elasticsearch.py +65 -0
- agentrun_mem0/configs/vector_stores/faiss.py +37 -0
- agentrun_mem0/configs/vector_stores/langchain.py +30 -0
- agentrun_mem0/configs/vector_stores/milvus.py +42 -0
- agentrun_mem0/configs/vector_stores/mongodb.py +25 -0
- agentrun_mem0/configs/vector_stores/neptune.py +27 -0
- agentrun_mem0/configs/vector_stores/opensearch.py +41 -0
- agentrun_mem0/configs/vector_stores/pgvector.py +52 -0
- agentrun_mem0/configs/vector_stores/pinecone.py +55 -0
- agentrun_mem0/configs/vector_stores/qdrant.py +47 -0
- agentrun_mem0/configs/vector_stores/redis.py +24 -0
- agentrun_mem0/configs/vector_stores/s3_vectors.py +28 -0
- agentrun_mem0/configs/vector_stores/supabase.py +44 -0
- agentrun_mem0/configs/vector_stores/upstash_vector.py +34 -0
- agentrun_mem0/configs/vector_stores/valkey.py +15 -0
- agentrun_mem0/configs/vector_stores/vertex_ai_vector_search.py +28 -0
- agentrun_mem0/configs/vector_stores/weaviate.py +41 -0
- agentrun_mem0/embeddings/__init__.py +0 -0
- agentrun_mem0/embeddings/aws_bedrock.py +100 -0
- agentrun_mem0/embeddings/azure_openai.py +55 -0
- agentrun_mem0/embeddings/base.py +31 -0
- agentrun_mem0/embeddings/configs.py +30 -0
- agentrun_mem0/embeddings/gemini.py +39 -0
- agentrun_mem0/embeddings/huggingface.py +44 -0
- agentrun_mem0/embeddings/langchain.py +35 -0
- agentrun_mem0/embeddings/lmstudio.py +29 -0
- agentrun_mem0/embeddings/mock.py +11 -0
- agentrun_mem0/embeddings/ollama.py +53 -0
- agentrun_mem0/embeddings/openai.py +49 -0
- agentrun_mem0/embeddings/together.py +31 -0
- agentrun_mem0/embeddings/vertexai.py +64 -0
- agentrun_mem0/exceptions.py +503 -0
- agentrun_mem0/graphs/__init__.py +0 -0
- agentrun_mem0/graphs/configs.py +105 -0
- agentrun_mem0/graphs/neptune/__init__.py +0 -0
- agentrun_mem0/graphs/neptune/base.py +497 -0
- agentrun_mem0/graphs/neptune/neptunedb.py +511 -0
- agentrun_mem0/graphs/neptune/neptunegraph.py +474 -0
- agentrun_mem0/graphs/tools.py +371 -0
- agentrun_mem0/graphs/utils.py +97 -0
- agentrun_mem0/llms/__init__.py +0 -0
- agentrun_mem0/llms/anthropic.py +87 -0
- agentrun_mem0/llms/aws_bedrock.py +665 -0
- agentrun_mem0/llms/azure_openai.py +141 -0
- agentrun_mem0/llms/azure_openai_structured.py +91 -0
- agentrun_mem0/llms/base.py +131 -0
- agentrun_mem0/llms/configs.py +34 -0
- agentrun_mem0/llms/deepseek.py +107 -0
- agentrun_mem0/llms/gemini.py +201 -0
- agentrun_mem0/llms/groq.py +88 -0
- agentrun_mem0/llms/langchain.py +94 -0
- agentrun_mem0/llms/litellm.py +87 -0
- agentrun_mem0/llms/lmstudio.py +114 -0
- agentrun_mem0/llms/ollama.py +117 -0
- agentrun_mem0/llms/openai.py +147 -0
- agentrun_mem0/llms/openai_structured.py +52 -0
- agentrun_mem0/llms/sarvam.py +89 -0
- agentrun_mem0/llms/together.py +88 -0
- agentrun_mem0/llms/vllm.py +107 -0
- agentrun_mem0/llms/xai.py +52 -0
- agentrun_mem0/memory/__init__.py +0 -0
- agentrun_mem0/memory/base.py +63 -0
- agentrun_mem0/memory/graph_memory.py +698 -0
- agentrun_mem0/memory/kuzu_memory.py +713 -0
- agentrun_mem0/memory/main.py +2229 -0
- agentrun_mem0/memory/memgraph_memory.py +689 -0
- agentrun_mem0/memory/setup.py +56 -0
- agentrun_mem0/memory/storage.py +218 -0
- agentrun_mem0/memory/telemetry.py +90 -0
- agentrun_mem0/memory/utils.py +208 -0
- agentrun_mem0/proxy/__init__.py +0 -0
- agentrun_mem0/proxy/main.py +189 -0
- agentrun_mem0/reranker/__init__.py +9 -0
- agentrun_mem0/reranker/base.py +20 -0
- agentrun_mem0/reranker/cohere_reranker.py +85 -0
- agentrun_mem0/reranker/huggingface_reranker.py +147 -0
- agentrun_mem0/reranker/llm_reranker.py +142 -0
- agentrun_mem0/reranker/sentence_transformer_reranker.py +107 -0
- agentrun_mem0/reranker/zero_entropy_reranker.py +96 -0
- agentrun_mem0/utils/factory.py +283 -0
- agentrun_mem0/utils/gcp_auth.py +167 -0
- agentrun_mem0/vector_stores/__init__.py +0 -0
- agentrun_mem0/vector_stores/alibabacloud_mysql.py +547 -0
- agentrun_mem0/vector_stores/aliyun_tablestore.py +252 -0
- agentrun_mem0/vector_stores/azure_ai_search.py +396 -0
- agentrun_mem0/vector_stores/azure_mysql.py +463 -0
- agentrun_mem0/vector_stores/baidu.py +368 -0
- agentrun_mem0/vector_stores/base.py +58 -0
- agentrun_mem0/vector_stores/chroma.py +332 -0
- agentrun_mem0/vector_stores/configs.py +67 -0
- agentrun_mem0/vector_stores/databricks.py +761 -0
- agentrun_mem0/vector_stores/elasticsearch.py +237 -0
- agentrun_mem0/vector_stores/faiss.py +479 -0
- agentrun_mem0/vector_stores/langchain.py +180 -0
- agentrun_mem0/vector_stores/milvus.py +250 -0
- agentrun_mem0/vector_stores/mongodb.py +310 -0
- agentrun_mem0/vector_stores/neptune_analytics.py +467 -0
- agentrun_mem0/vector_stores/opensearch.py +292 -0
- agentrun_mem0/vector_stores/pgvector.py +404 -0
- agentrun_mem0/vector_stores/pinecone.py +382 -0
- agentrun_mem0/vector_stores/qdrant.py +270 -0
- agentrun_mem0/vector_stores/redis.py +295 -0
- agentrun_mem0/vector_stores/s3_vectors.py +176 -0
- agentrun_mem0/vector_stores/supabase.py +237 -0
- agentrun_mem0/vector_stores/upstash_vector.py +293 -0
- agentrun_mem0/vector_stores/valkey.py +824 -0
- agentrun_mem0/vector_stores/vertex_ai_vector_search.py +635 -0
- agentrun_mem0/vector_stores/weaviate.py +343 -0
- agentrun_mem0ai-0.0.11.data/data/README.md +205 -0
- agentrun_mem0ai-0.0.11.dist-info/METADATA +277 -0
- agentrun_mem0ai-0.0.11.dist-info/RECORD +150 -0
- agentrun_mem0ai-0.0.11.dist-info/WHEEL +4 -0
- agentrun_mem0ai-0.0.11.dist-info/licenses/LICENSE +201 -0
|
@@ -0,0 +1,201 @@
|
|
|
1
|
+
import os
|
|
2
|
+
from typing import Dict, List, Optional
|
|
3
|
+
|
|
4
|
+
try:
|
|
5
|
+
from google import genai
|
|
6
|
+
from google.genai import types
|
|
7
|
+
except ImportError:
|
|
8
|
+
raise ImportError("The 'google-genai' library is required. Please install it using 'pip install google-genai'.")
|
|
9
|
+
|
|
10
|
+
from agentrun_mem0.configs.llms.base import BaseLlmConfig
|
|
11
|
+
from agentrun_mem0.llms.base import LLMBase
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class GeminiLLM(LLMBase):
|
|
15
|
+
def __init__(self, config: Optional[BaseLlmConfig] = None):
|
|
16
|
+
super().__init__(config)
|
|
17
|
+
|
|
18
|
+
if not self.config.model:
|
|
19
|
+
self.config.model = "gemini-2.0-flash"
|
|
20
|
+
|
|
21
|
+
api_key = self.config.api_key or os.getenv("GOOGLE_API_KEY")
|
|
22
|
+
self.client = genai.Client(api_key=api_key)
|
|
23
|
+
|
|
24
|
+
def _parse_response(self, response, tools):
|
|
25
|
+
"""
|
|
26
|
+
Process the response based on whether tools are used or not.
|
|
27
|
+
|
|
28
|
+
Args:
|
|
29
|
+
response: The raw response from API.
|
|
30
|
+
tools: The list of tools provided in the request.
|
|
31
|
+
|
|
32
|
+
Returns:
|
|
33
|
+
str or dict: The processed response.
|
|
34
|
+
"""
|
|
35
|
+
if tools:
|
|
36
|
+
processed_response = {
|
|
37
|
+
"content": None,
|
|
38
|
+
"tool_calls": [],
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
# Extract content from the first candidate
|
|
42
|
+
if response.candidates and response.candidates[0].content.parts:
|
|
43
|
+
for part in response.candidates[0].content.parts:
|
|
44
|
+
if hasattr(part, "text") and part.text:
|
|
45
|
+
processed_response["content"] = part.text
|
|
46
|
+
break
|
|
47
|
+
|
|
48
|
+
# Extract function calls
|
|
49
|
+
if response.candidates and response.candidates[0].content.parts:
|
|
50
|
+
for part in response.candidates[0].content.parts:
|
|
51
|
+
if hasattr(part, "function_call") and part.function_call:
|
|
52
|
+
fn = part.function_call
|
|
53
|
+
processed_response["tool_calls"].append(
|
|
54
|
+
{
|
|
55
|
+
"name": fn.name,
|
|
56
|
+
"arguments": dict(fn.args) if fn.args else {},
|
|
57
|
+
}
|
|
58
|
+
)
|
|
59
|
+
|
|
60
|
+
return processed_response
|
|
61
|
+
else:
|
|
62
|
+
if response.candidates and response.candidates[0].content.parts:
|
|
63
|
+
for part in response.candidates[0].content.parts:
|
|
64
|
+
if hasattr(part, "text") and part.text:
|
|
65
|
+
return part.text
|
|
66
|
+
return ""
|
|
67
|
+
|
|
68
|
+
def _reformat_messages(self, messages: List[Dict[str, str]]):
|
|
69
|
+
"""
|
|
70
|
+
Reformat messages for Gemini.
|
|
71
|
+
|
|
72
|
+
Args:
|
|
73
|
+
messages: The list of messages provided in the request.
|
|
74
|
+
|
|
75
|
+
Returns:
|
|
76
|
+
tuple: (system_instruction, contents_list)
|
|
77
|
+
"""
|
|
78
|
+
system_instruction = None
|
|
79
|
+
contents = []
|
|
80
|
+
|
|
81
|
+
for message in messages:
|
|
82
|
+
if message["role"] == "system":
|
|
83
|
+
system_instruction = message["content"]
|
|
84
|
+
else:
|
|
85
|
+
content = types.Content(
|
|
86
|
+
parts=[types.Part(text=message["content"])],
|
|
87
|
+
role=message["role"],
|
|
88
|
+
)
|
|
89
|
+
contents.append(content)
|
|
90
|
+
|
|
91
|
+
return system_instruction, contents
|
|
92
|
+
|
|
93
|
+
def _reformat_tools(self, tools: Optional[List[Dict]]):
|
|
94
|
+
"""
|
|
95
|
+
Reformat tools for Gemini.
|
|
96
|
+
|
|
97
|
+
Args:
|
|
98
|
+
tools: The list of tools provided in the request.
|
|
99
|
+
|
|
100
|
+
Returns:
|
|
101
|
+
list: The list of tools in the required format.
|
|
102
|
+
"""
|
|
103
|
+
|
|
104
|
+
def remove_additional_properties(data):
|
|
105
|
+
"""Recursively removes 'additionalProperties' from nested dictionaries."""
|
|
106
|
+
if isinstance(data, dict):
|
|
107
|
+
filtered_dict = {
|
|
108
|
+
key: remove_additional_properties(value)
|
|
109
|
+
for key, value in data.items()
|
|
110
|
+
if not (key == "additionalProperties")
|
|
111
|
+
}
|
|
112
|
+
return filtered_dict
|
|
113
|
+
else:
|
|
114
|
+
return data
|
|
115
|
+
|
|
116
|
+
if tools:
|
|
117
|
+
function_declarations = []
|
|
118
|
+
for tool in tools:
|
|
119
|
+
func = tool["function"].copy()
|
|
120
|
+
cleaned_func = remove_additional_properties(func)
|
|
121
|
+
|
|
122
|
+
function_declaration = types.FunctionDeclaration(
|
|
123
|
+
name=cleaned_func["name"],
|
|
124
|
+
description=cleaned_func.get("description", ""),
|
|
125
|
+
parameters=cleaned_func.get("parameters", {}),
|
|
126
|
+
)
|
|
127
|
+
function_declarations.append(function_declaration)
|
|
128
|
+
|
|
129
|
+
tool_obj = types.Tool(function_declarations=function_declarations)
|
|
130
|
+
return [tool_obj]
|
|
131
|
+
else:
|
|
132
|
+
return None
|
|
133
|
+
|
|
134
|
+
def generate_response(
|
|
135
|
+
self,
|
|
136
|
+
messages: List[Dict[str, str]],
|
|
137
|
+
response_format=None,
|
|
138
|
+
tools: Optional[List[Dict]] = None,
|
|
139
|
+
tool_choice: str = "auto",
|
|
140
|
+
):
|
|
141
|
+
"""
|
|
142
|
+
Generate a response based on the given messages using Gemini.
|
|
143
|
+
|
|
144
|
+
Args:
|
|
145
|
+
messages (list): List of message dicts containing 'role' and 'content'.
|
|
146
|
+
response_format (str or object, optional): Format for the response. Defaults to "text".
|
|
147
|
+
tools (list, optional): List of tools that the model can call. Defaults to None.
|
|
148
|
+
tool_choice (str, optional): Tool choice method. Defaults to "auto".
|
|
149
|
+
|
|
150
|
+
Returns:
|
|
151
|
+
str: The generated response.
|
|
152
|
+
"""
|
|
153
|
+
|
|
154
|
+
# Extract system instruction and reformat messages
|
|
155
|
+
system_instruction, contents = self._reformat_messages(messages)
|
|
156
|
+
|
|
157
|
+
# Prepare generation config
|
|
158
|
+
config_params = {
|
|
159
|
+
"temperature": self.config.temperature,
|
|
160
|
+
"max_output_tokens": self.config.max_tokens,
|
|
161
|
+
"top_p": self.config.top_p,
|
|
162
|
+
}
|
|
163
|
+
|
|
164
|
+
# Add system instruction to config if present
|
|
165
|
+
if system_instruction:
|
|
166
|
+
config_params["system_instruction"] = system_instruction
|
|
167
|
+
|
|
168
|
+
if response_format is not None and response_format["type"] == "json_object":
|
|
169
|
+
config_params["response_mime_type"] = "application/json"
|
|
170
|
+
if "schema" in response_format:
|
|
171
|
+
config_params["response_schema"] = response_format["schema"]
|
|
172
|
+
|
|
173
|
+
if tools:
|
|
174
|
+
formatted_tools = self._reformat_tools(tools)
|
|
175
|
+
config_params["tools"] = formatted_tools
|
|
176
|
+
|
|
177
|
+
if tool_choice:
|
|
178
|
+
if tool_choice == "auto":
|
|
179
|
+
mode = types.FunctionCallingConfigMode.AUTO
|
|
180
|
+
elif tool_choice == "any":
|
|
181
|
+
mode = types.FunctionCallingConfigMode.ANY
|
|
182
|
+
else:
|
|
183
|
+
mode = types.FunctionCallingConfigMode.NONE
|
|
184
|
+
|
|
185
|
+
tool_config = types.ToolConfig(
|
|
186
|
+
function_calling_config=types.FunctionCallingConfig(
|
|
187
|
+
mode=mode,
|
|
188
|
+
allowed_function_names=(
|
|
189
|
+
[tool["function"]["name"] for tool in tools] if tool_choice == "any" else None
|
|
190
|
+
),
|
|
191
|
+
)
|
|
192
|
+
)
|
|
193
|
+
config_params["tool_config"] = tool_config
|
|
194
|
+
|
|
195
|
+
generation_config = types.GenerateContentConfig(**config_params)
|
|
196
|
+
|
|
197
|
+
response = self.client.models.generate_content(
|
|
198
|
+
model=self.config.model, contents=contents, config=generation_config
|
|
199
|
+
)
|
|
200
|
+
|
|
201
|
+
return self._parse_response(response, tools)
|
|
@@ -0,0 +1,88 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import os
|
|
3
|
+
from typing import Dict, List, Optional
|
|
4
|
+
|
|
5
|
+
try:
|
|
6
|
+
from groq import Groq
|
|
7
|
+
except ImportError:
|
|
8
|
+
raise ImportError("The 'groq' library is required. Please install it using 'pip install groq'.")
|
|
9
|
+
|
|
10
|
+
from agentrun_mem0.configs.llms.base import BaseLlmConfig
|
|
11
|
+
from agentrun_mem0.llms.base import LLMBase
|
|
12
|
+
from agentrun_mem0.memory.utils import extract_json
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class GroqLLM(LLMBase):
|
|
16
|
+
def __init__(self, config: Optional[BaseLlmConfig] = None):
|
|
17
|
+
super().__init__(config)
|
|
18
|
+
|
|
19
|
+
if not self.config.model:
|
|
20
|
+
self.config.model = "llama3-70b-8192"
|
|
21
|
+
|
|
22
|
+
api_key = self.config.api_key or os.getenv("GROQ_API_KEY")
|
|
23
|
+
self.client = Groq(api_key=api_key)
|
|
24
|
+
|
|
25
|
+
def _parse_response(self, response, tools):
|
|
26
|
+
"""
|
|
27
|
+
Process the response based on whether tools are used or not.
|
|
28
|
+
|
|
29
|
+
Args:
|
|
30
|
+
response: The raw response from API.
|
|
31
|
+
tools: The list of tools provided in the request.
|
|
32
|
+
|
|
33
|
+
Returns:
|
|
34
|
+
str or dict: The processed response.
|
|
35
|
+
"""
|
|
36
|
+
if tools:
|
|
37
|
+
processed_response = {
|
|
38
|
+
"content": response.choices[0].message.content,
|
|
39
|
+
"tool_calls": [],
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
if response.choices[0].message.tool_calls:
|
|
43
|
+
for tool_call in response.choices[0].message.tool_calls:
|
|
44
|
+
processed_response["tool_calls"].append(
|
|
45
|
+
{
|
|
46
|
+
"name": tool_call.function.name,
|
|
47
|
+
"arguments": json.loads(extract_json(tool_call.function.arguments)),
|
|
48
|
+
}
|
|
49
|
+
)
|
|
50
|
+
|
|
51
|
+
return processed_response
|
|
52
|
+
else:
|
|
53
|
+
return response.choices[0].message.content
|
|
54
|
+
|
|
55
|
+
def generate_response(
|
|
56
|
+
self,
|
|
57
|
+
messages: List[Dict[str, str]],
|
|
58
|
+
response_format=None,
|
|
59
|
+
tools: Optional[List[Dict]] = None,
|
|
60
|
+
tool_choice: str = "auto",
|
|
61
|
+
):
|
|
62
|
+
"""
|
|
63
|
+
Generate a response based on the given messages using Groq.
|
|
64
|
+
|
|
65
|
+
Args:
|
|
66
|
+
messages (list): List of message dicts containing 'role' and 'content'.
|
|
67
|
+
response_format (str or object, optional): Format of the response. Defaults to "text".
|
|
68
|
+
tools (list, optional): List of tools that the model can call. Defaults to None.
|
|
69
|
+
tool_choice (str, optional): Tool choice method. Defaults to "auto".
|
|
70
|
+
|
|
71
|
+
Returns:
|
|
72
|
+
str: The generated response.
|
|
73
|
+
"""
|
|
74
|
+
params = {
|
|
75
|
+
"model": self.config.model,
|
|
76
|
+
"messages": messages,
|
|
77
|
+
"temperature": self.config.temperature,
|
|
78
|
+
"max_tokens": self.config.max_tokens,
|
|
79
|
+
"top_p": self.config.top_p,
|
|
80
|
+
}
|
|
81
|
+
if response_format:
|
|
82
|
+
params["response_format"] = response_format
|
|
83
|
+
if tools:
|
|
84
|
+
params["tools"] = tools
|
|
85
|
+
params["tool_choice"] = tool_choice
|
|
86
|
+
|
|
87
|
+
response = self.client.chat.completions.create(**params)
|
|
88
|
+
return self._parse_response(response, tools)
|
|
@@ -0,0 +1,94 @@
|
|
|
1
|
+
from typing import Dict, List, Optional
|
|
2
|
+
|
|
3
|
+
from agentrun_mem0.configs.llms.base import BaseLlmConfig
|
|
4
|
+
from agentrun_mem0.llms.base import LLMBase
|
|
5
|
+
|
|
6
|
+
try:
|
|
7
|
+
from langchain.chat_models.base import BaseChatModel
|
|
8
|
+
from langchain_core.messages import AIMessage
|
|
9
|
+
except ImportError:
|
|
10
|
+
raise ImportError("langchain is not installed. Please install it using `pip install langchain`")
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class LangchainLLM(LLMBase):
|
|
14
|
+
def __init__(self, config: Optional[BaseLlmConfig] = None):
|
|
15
|
+
super().__init__(config)
|
|
16
|
+
|
|
17
|
+
if self.config.model is None:
|
|
18
|
+
raise ValueError("`model` parameter is required")
|
|
19
|
+
|
|
20
|
+
if not isinstance(self.config.model, BaseChatModel):
|
|
21
|
+
raise ValueError("`model` must be an instance of BaseChatModel")
|
|
22
|
+
|
|
23
|
+
self.langchain_model = self.config.model
|
|
24
|
+
|
|
25
|
+
def _parse_response(self, response: AIMessage, tools: Optional[List[Dict]]):
|
|
26
|
+
"""
|
|
27
|
+
Process the response based on whether tools are used or not.
|
|
28
|
+
|
|
29
|
+
Args:
|
|
30
|
+
response: AI Message.
|
|
31
|
+
tools: The list of tools provided in the request.
|
|
32
|
+
|
|
33
|
+
Returns:
|
|
34
|
+
str or dict: The processed response.
|
|
35
|
+
"""
|
|
36
|
+
if not tools:
|
|
37
|
+
return response.content
|
|
38
|
+
|
|
39
|
+
processed_response = {
|
|
40
|
+
"content": response.content,
|
|
41
|
+
"tool_calls": [],
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
for tool_call in response.tool_calls:
|
|
45
|
+
processed_response["tool_calls"].append(
|
|
46
|
+
{
|
|
47
|
+
"name": tool_call["name"],
|
|
48
|
+
"arguments": tool_call["args"],
|
|
49
|
+
}
|
|
50
|
+
)
|
|
51
|
+
|
|
52
|
+
return processed_response
|
|
53
|
+
|
|
54
|
+
def generate_response(
|
|
55
|
+
self,
|
|
56
|
+
messages: List[Dict[str, str]],
|
|
57
|
+
response_format=None,
|
|
58
|
+
tools: Optional[List[Dict]] = None,
|
|
59
|
+
tool_choice: str = "auto",
|
|
60
|
+
):
|
|
61
|
+
"""
|
|
62
|
+
Generate a response based on the given messages using langchain_community.
|
|
63
|
+
|
|
64
|
+
Args:
|
|
65
|
+
messages (list): List of message dicts containing 'role' and 'content'.
|
|
66
|
+
response_format (str or object, optional): Format of the response. Not used in Langchain.
|
|
67
|
+
tools (list, optional): List of tools that the model can call.
|
|
68
|
+
tool_choice (str, optional): Tool choice method.
|
|
69
|
+
|
|
70
|
+
Returns:
|
|
71
|
+
str: The generated response.
|
|
72
|
+
"""
|
|
73
|
+
# Convert the messages to LangChain's tuple format
|
|
74
|
+
langchain_messages = []
|
|
75
|
+
for message in messages:
|
|
76
|
+
role = message["role"]
|
|
77
|
+
content = message["content"]
|
|
78
|
+
|
|
79
|
+
if role == "system":
|
|
80
|
+
langchain_messages.append(("system", content))
|
|
81
|
+
elif role == "user":
|
|
82
|
+
langchain_messages.append(("human", content))
|
|
83
|
+
elif role == "assistant":
|
|
84
|
+
langchain_messages.append(("ai", content))
|
|
85
|
+
|
|
86
|
+
if not langchain_messages:
|
|
87
|
+
raise ValueError("No valid messages found in the messages list")
|
|
88
|
+
|
|
89
|
+
langchain_model = self.langchain_model
|
|
90
|
+
if tools:
|
|
91
|
+
langchain_model = langchain_model.bind_tools(tools=tools, tool_choice=tool_choice)
|
|
92
|
+
|
|
93
|
+
response: AIMessage = langchain_model.invoke(langchain_messages)
|
|
94
|
+
return self._parse_response(response, tools)
|
|
@@ -0,0 +1,87 @@
|
|
|
1
|
+
import json
|
|
2
|
+
from typing import Dict, List, Optional
|
|
3
|
+
|
|
4
|
+
try:
|
|
5
|
+
import litellm
|
|
6
|
+
except ImportError:
|
|
7
|
+
raise ImportError("The 'litellm' library is required. Please install it using 'pip install litellm'.")
|
|
8
|
+
|
|
9
|
+
from agentrun_mem0.configs.llms.base import BaseLlmConfig
|
|
10
|
+
from agentrun_mem0.llms.base import LLMBase
|
|
11
|
+
from agentrun_mem0.memory.utils import extract_json
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class LiteLLM(LLMBase):
|
|
15
|
+
def __init__(self, config: Optional[BaseLlmConfig] = None):
|
|
16
|
+
super().__init__(config)
|
|
17
|
+
|
|
18
|
+
if not self.config.model:
|
|
19
|
+
self.config.model = "gpt-4.1-nano-2025-04-14"
|
|
20
|
+
|
|
21
|
+
def _parse_response(self, response, tools):
|
|
22
|
+
"""
|
|
23
|
+
Process the response based on whether tools are used or not.
|
|
24
|
+
|
|
25
|
+
Args:
|
|
26
|
+
response: The raw response from API.
|
|
27
|
+
tools: The list of tools provided in the request.
|
|
28
|
+
|
|
29
|
+
Returns:
|
|
30
|
+
str or dict: The processed response.
|
|
31
|
+
"""
|
|
32
|
+
if tools:
|
|
33
|
+
processed_response = {
|
|
34
|
+
"content": response.choices[0].message.content,
|
|
35
|
+
"tool_calls": [],
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
if response.choices[0].message.tool_calls:
|
|
39
|
+
for tool_call in response.choices[0].message.tool_calls:
|
|
40
|
+
processed_response["tool_calls"].append(
|
|
41
|
+
{
|
|
42
|
+
"name": tool_call.function.name,
|
|
43
|
+
"arguments": json.loads(extract_json(tool_call.function.arguments)),
|
|
44
|
+
}
|
|
45
|
+
)
|
|
46
|
+
|
|
47
|
+
return processed_response
|
|
48
|
+
else:
|
|
49
|
+
return response.choices[0].message.content
|
|
50
|
+
|
|
51
|
+
def generate_response(
|
|
52
|
+
self,
|
|
53
|
+
messages: List[Dict[str, str]],
|
|
54
|
+
response_format=None,
|
|
55
|
+
tools: Optional[List[Dict]] = None,
|
|
56
|
+
tool_choice: str = "auto",
|
|
57
|
+
):
|
|
58
|
+
"""
|
|
59
|
+
Generate a response based on the given messages using Litellm.
|
|
60
|
+
|
|
61
|
+
Args:
|
|
62
|
+
messages (list): List of message dicts containing 'role' and 'content'.
|
|
63
|
+
response_format (str or object, optional): Format of the response. Defaults to "text".
|
|
64
|
+
tools (list, optional): List of tools that the model can call. Defaults to None.
|
|
65
|
+
tool_choice (str, optional): Tool choice method. Defaults to "auto".
|
|
66
|
+
|
|
67
|
+
Returns:
|
|
68
|
+
str: The generated response.
|
|
69
|
+
"""
|
|
70
|
+
if not litellm.supports_function_calling(self.config.model):
|
|
71
|
+
raise ValueError(f"Model '{self.config.model}' in litellm does not support function calling.")
|
|
72
|
+
|
|
73
|
+
params = {
|
|
74
|
+
"model": self.config.model,
|
|
75
|
+
"messages": messages,
|
|
76
|
+
"temperature": self.config.temperature,
|
|
77
|
+
"max_tokens": self.config.max_tokens,
|
|
78
|
+
"top_p": self.config.top_p,
|
|
79
|
+
}
|
|
80
|
+
if response_format:
|
|
81
|
+
params["response_format"] = response_format
|
|
82
|
+
if tools: # TODO: Remove tools if no issues found with new memory addition logic
|
|
83
|
+
params["tools"] = tools
|
|
84
|
+
params["tool_choice"] = tool_choice
|
|
85
|
+
|
|
86
|
+
response = litellm.completion(**params)
|
|
87
|
+
return self._parse_response(response, tools)
|
|
@@ -0,0 +1,114 @@
|
|
|
1
|
+
import json
|
|
2
|
+
from typing import Dict, List, Optional, Union
|
|
3
|
+
|
|
4
|
+
from openai import OpenAI
|
|
5
|
+
|
|
6
|
+
from agentrun_mem0.configs.llms.base import BaseLlmConfig
|
|
7
|
+
from agentrun_mem0.configs.llms.lmstudio import LMStudioConfig
|
|
8
|
+
from agentrun_mem0.llms.base import LLMBase
|
|
9
|
+
from agentrun_mem0.memory.utils import extract_json
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class LMStudioLLM(LLMBase):
|
|
13
|
+
def __init__(self, config: Optional[Union[BaseLlmConfig, LMStudioConfig, Dict]] = None):
|
|
14
|
+
# Convert to LMStudioConfig if needed
|
|
15
|
+
if config is None:
|
|
16
|
+
config = LMStudioConfig()
|
|
17
|
+
elif isinstance(config, dict):
|
|
18
|
+
config = LMStudioConfig(**config)
|
|
19
|
+
elif isinstance(config, BaseLlmConfig) and not isinstance(config, LMStudioConfig):
|
|
20
|
+
# Convert BaseLlmConfig to LMStudioConfig
|
|
21
|
+
config = LMStudioConfig(
|
|
22
|
+
model=config.model,
|
|
23
|
+
temperature=config.temperature,
|
|
24
|
+
api_key=config.api_key,
|
|
25
|
+
max_tokens=config.max_tokens,
|
|
26
|
+
top_p=config.top_p,
|
|
27
|
+
top_k=config.top_k,
|
|
28
|
+
enable_vision=config.enable_vision,
|
|
29
|
+
vision_details=config.vision_details,
|
|
30
|
+
http_client_proxies=config.http_client,
|
|
31
|
+
)
|
|
32
|
+
|
|
33
|
+
super().__init__(config)
|
|
34
|
+
|
|
35
|
+
self.config.model = (
|
|
36
|
+
self.config.model
|
|
37
|
+
or "lmstudio-community/Meta-Llama-3.1-70B-Instruct-GGUF/Meta-Llama-3.1-70B-Instruct-IQ2_M.gguf"
|
|
38
|
+
)
|
|
39
|
+
self.config.api_key = self.config.api_key or "lm-studio"
|
|
40
|
+
|
|
41
|
+
self.client = OpenAI(base_url=self.config.lmstudio_base_url, api_key=self.config.api_key)
|
|
42
|
+
|
|
43
|
+
def _parse_response(self, response, tools):
|
|
44
|
+
"""
|
|
45
|
+
Process the response based on whether tools are used or not.
|
|
46
|
+
|
|
47
|
+
Args:
|
|
48
|
+
response: The raw response from API.
|
|
49
|
+
tools: The list of tools provided in the request.
|
|
50
|
+
|
|
51
|
+
Returns:
|
|
52
|
+
str or dict: The processed response.
|
|
53
|
+
"""
|
|
54
|
+
if tools:
|
|
55
|
+
processed_response = {
|
|
56
|
+
"content": response.choices[0].message.content,
|
|
57
|
+
"tool_calls": [],
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
if response.choices[0].message.tool_calls:
|
|
61
|
+
for tool_call in response.choices[0].message.tool_calls:
|
|
62
|
+
processed_response["tool_calls"].append(
|
|
63
|
+
{
|
|
64
|
+
"name": tool_call.function.name,
|
|
65
|
+
"arguments": json.loads(extract_json(tool_call.function.arguments)),
|
|
66
|
+
}
|
|
67
|
+
)
|
|
68
|
+
|
|
69
|
+
return processed_response
|
|
70
|
+
else:
|
|
71
|
+
return response.choices[0].message.content
|
|
72
|
+
|
|
73
|
+
def generate_response(
|
|
74
|
+
self,
|
|
75
|
+
messages: List[Dict[str, str]],
|
|
76
|
+
response_format=None,
|
|
77
|
+
tools: Optional[List[Dict]] = None,
|
|
78
|
+
tool_choice: str = "auto",
|
|
79
|
+
**kwargs,
|
|
80
|
+
):
|
|
81
|
+
"""
|
|
82
|
+
Generate a response based on the given messages using LM Studio.
|
|
83
|
+
|
|
84
|
+
Args:
|
|
85
|
+
messages (list): List of message dicts containing 'role' and 'content'.
|
|
86
|
+
response_format (str or object, optional): Format of the response. Defaults to "text".
|
|
87
|
+
tools (list, optional): List of tools that the model can call. Defaults to None.
|
|
88
|
+
tool_choice (str, optional): Tool choice method. Defaults to "auto".
|
|
89
|
+
**kwargs: Additional LM Studio-specific parameters.
|
|
90
|
+
|
|
91
|
+
Returns:
|
|
92
|
+
str: The generated response.
|
|
93
|
+
"""
|
|
94
|
+
params = self._get_supported_params(messages=messages, **kwargs)
|
|
95
|
+
params.update(
|
|
96
|
+
{
|
|
97
|
+
"model": self.config.model,
|
|
98
|
+
"messages": messages,
|
|
99
|
+
}
|
|
100
|
+
)
|
|
101
|
+
|
|
102
|
+
if self.config.lmstudio_response_format:
|
|
103
|
+
params["response_format"] = self.config.lmstudio_response_format
|
|
104
|
+
elif response_format:
|
|
105
|
+
params["response_format"] = response_format
|
|
106
|
+
else:
|
|
107
|
+
params["response_format"] = {"type": "json_object"}
|
|
108
|
+
|
|
109
|
+
if tools:
|
|
110
|
+
params["tools"] = tools
|
|
111
|
+
params["tool_choice"] = tool_choice
|
|
112
|
+
|
|
113
|
+
response = self.client.chat.completions.create(**params)
|
|
114
|
+
return self._parse_response(response, tools)
|