agentrun-mem0ai 0.0.11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agentrun_mem0/__init__.py +6 -0
- agentrun_mem0/client/__init__.py +0 -0
- agentrun_mem0/client/main.py +1747 -0
- agentrun_mem0/client/project.py +931 -0
- agentrun_mem0/client/utils.py +115 -0
- agentrun_mem0/configs/__init__.py +0 -0
- agentrun_mem0/configs/base.py +90 -0
- agentrun_mem0/configs/embeddings/__init__.py +0 -0
- agentrun_mem0/configs/embeddings/base.py +110 -0
- agentrun_mem0/configs/enums.py +7 -0
- agentrun_mem0/configs/llms/__init__.py +0 -0
- agentrun_mem0/configs/llms/anthropic.py +56 -0
- agentrun_mem0/configs/llms/aws_bedrock.py +192 -0
- agentrun_mem0/configs/llms/azure.py +57 -0
- agentrun_mem0/configs/llms/base.py +62 -0
- agentrun_mem0/configs/llms/deepseek.py +56 -0
- agentrun_mem0/configs/llms/lmstudio.py +59 -0
- agentrun_mem0/configs/llms/ollama.py +56 -0
- agentrun_mem0/configs/llms/openai.py +79 -0
- agentrun_mem0/configs/llms/vllm.py +56 -0
- agentrun_mem0/configs/prompts.py +459 -0
- agentrun_mem0/configs/rerankers/__init__.py +0 -0
- agentrun_mem0/configs/rerankers/base.py +17 -0
- agentrun_mem0/configs/rerankers/cohere.py +15 -0
- agentrun_mem0/configs/rerankers/config.py +12 -0
- agentrun_mem0/configs/rerankers/huggingface.py +17 -0
- agentrun_mem0/configs/rerankers/llm.py +48 -0
- agentrun_mem0/configs/rerankers/sentence_transformer.py +16 -0
- agentrun_mem0/configs/rerankers/zero_entropy.py +28 -0
- agentrun_mem0/configs/vector_stores/__init__.py +0 -0
- agentrun_mem0/configs/vector_stores/alibabacloud_mysql.py +64 -0
- agentrun_mem0/configs/vector_stores/aliyun_tablestore.py +32 -0
- agentrun_mem0/configs/vector_stores/azure_ai_search.py +57 -0
- agentrun_mem0/configs/vector_stores/azure_mysql.py +84 -0
- agentrun_mem0/configs/vector_stores/baidu.py +27 -0
- agentrun_mem0/configs/vector_stores/chroma.py +58 -0
- agentrun_mem0/configs/vector_stores/databricks.py +61 -0
- agentrun_mem0/configs/vector_stores/elasticsearch.py +65 -0
- agentrun_mem0/configs/vector_stores/faiss.py +37 -0
- agentrun_mem0/configs/vector_stores/langchain.py +30 -0
- agentrun_mem0/configs/vector_stores/milvus.py +42 -0
- agentrun_mem0/configs/vector_stores/mongodb.py +25 -0
- agentrun_mem0/configs/vector_stores/neptune.py +27 -0
- agentrun_mem0/configs/vector_stores/opensearch.py +41 -0
- agentrun_mem0/configs/vector_stores/pgvector.py +52 -0
- agentrun_mem0/configs/vector_stores/pinecone.py +55 -0
- agentrun_mem0/configs/vector_stores/qdrant.py +47 -0
- agentrun_mem0/configs/vector_stores/redis.py +24 -0
- agentrun_mem0/configs/vector_stores/s3_vectors.py +28 -0
- agentrun_mem0/configs/vector_stores/supabase.py +44 -0
- agentrun_mem0/configs/vector_stores/upstash_vector.py +34 -0
- agentrun_mem0/configs/vector_stores/valkey.py +15 -0
- agentrun_mem0/configs/vector_stores/vertex_ai_vector_search.py +28 -0
- agentrun_mem0/configs/vector_stores/weaviate.py +41 -0
- agentrun_mem0/embeddings/__init__.py +0 -0
- agentrun_mem0/embeddings/aws_bedrock.py +100 -0
- agentrun_mem0/embeddings/azure_openai.py +55 -0
- agentrun_mem0/embeddings/base.py +31 -0
- agentrun_mem0/embeddings/configs.py +30 -0
- agentrun_mem0/embeddings/gemini.py +39 -0
- agentrun_mem0/embeddings/huggingface.py +44 -0
- agentrun_mem0/embeddings/langchain.py +35 -0
- agentrun_mem0/embeddings/lmstudio.py +29 -0
- agentrun_mem0/embeddings/mock.py +11 -0
- agentrun_mem0/embeddings/ollama.py +53 -0
- agentrun_mem0/embeddings/openai.py +49 -0
- agentrun_mem0/embeddings/together.py +31 -0
- agentrun_mem0/embeddings/vertexai.py +64 -0
- agentrun_mem0/exceptions.py +503 -0
- agentrun_mem0/graphs/__init__.py +0 -0
- agentrun_mem0/graphs/configs.py +105 -0
- agentrun_mem0/graphs/neptune/__init__.py +0 -0
- agentrun_mem0/graphs/neptune/base.py +497 -0
- agentrun_mem0/graphs/neptune/neptunedb.py +511 -0
- agentrun_mem0/graphs/neptune/neptunegraph.py +474 -0
- agentrun_mem0/graphs/tools.py +371 -0
- agentrun_mem0/graphs/utils.py +97 -0
- agentrun_mem0/llms/__init__.py +0 -0
- agentrun_mem0/llms/anthropic.py +87 -0
- agentrun_mem0/llms/aws_bedrock.py +665 -0
- agentrun_mem0/llms/azure_openai.py +141 -0
- agentrun_mem0/llms/azure_openai_structured.py +91 -0
- agentrun_mem0/llms/base.py +131 -0
- agentrun_mem0/llms/configs.py +34 -0
- agentrun_mem0/llms/deepseek.py +107 -0
- agentrun_mem0/llms/gemini.py +201 -0
- agentrun_mem0/llms/groq.py +88 -0
- agentrun_mem0/llms/langchain.py +94 -0
- agentrun_mem0/llms/litellm.py +87 -0
- agentrun_mem0/llms/lmstudio.py +114 -0
- agentrun_mem0/llms/ollama.py +117 -0
- agentrun_mem0/llms/openai.py +147 -0
- agentrun_mem0/llms/openai_structured.py +52 -0
- agentrun_mem0/llms/sarvam.py +89 -0
- agentrun_mem0/llms/together.py +88 -0
- agentrun_mem0/llms/vllm.py +107 -0
- agentrun_mem0/llms/xai.py +52 -0
- agentrun_mem0/memory/__init__.py +0 -0
- agentrun_mem0/memory/base.py +63 -0
- agentrun_mem0/memory/graph_memory.py +698 -0
- agentrun_mem0/memory/kuzu_memory.py +713 -0
- agentrun_mem0/memory/main.py +2229 -0
- agentrun_mem0/memory/memgraph_memory.py +689 -0
- agentrun_mem0/memory/setup.py +56 -0
- agentrun_mem0/memory/storage.py +218 -0
- agentrun_mem0/memory/telemetry.py +90 -0
- agentrun_mem0/memory/utils.py +208 -0
- agentrun_mem0/proxy/__init__.py +0 -0
- agentrun_mem0/proxy/main.py +189 -0
- agentrun_mem0/reranker/__init__.py +9 -0
- agentrun_mem0/reranker/base.py +20 -0
- agentrun_mem0/reranker/cohere_reranker.py +85 -0
- agentrun_mem0/reranker/huggingface_reranker.py +147 -0
- agentrun_mem0/reranker/llm_reranker.py +142 -0
- agentrun_mem0/reranker/sentence_transformer_reranker.py +107 -0
- agentrun_mem0/reranker/zero_entropy_reranker.py +96 -0
- agentrun_mem0/utils/factory.py +283 -0
- agentrun_mem0/utils/gcp_auth.py +167 -0
- agentrun_mem0/vector_stores/__init__.py +0 -0
- agentrun_mem0/vector_stores/alibabacloud_mysql.py +547 -0
- agentrun_mem0/vector_stores/aliyun_tablestore.py +252 -0
- agentrun_mem0/vector_stores/azure_ai_search.py +396 -0
- agentrun_mem0/vector_stores/azure_mysql.py +463 -0
- agentrun_mem0/vector_stores/baidu.py +368 -0
- agentrun_mem0/vector_stores/base.py +58 -0
- agentrun_mem0/vector_stores/chroma.py +332 -0
- agentrun_mem0/vector_stores/configs.py +67 -0
- agentrun_mem0/vector_stores/databricks.py +761 -0
- agentrun_mem0/vector_stores/elasticsearch.py +237 -0
- agentrun_mem0/vector_stores/faiss.py +479 -0
- agentrun_mem0/vector_stores/langchain.py +180 -0
- agentrun_mem0/vector_stores/milvus.py +250 -0
- agentrun_mem0/vector_stores/mongodb.py +310 -0
- agentrun_mem0/vector_stores/neptune_analytics.py +467 -0
- agentrun_mem0/vector_stores/opensearch.py +292 -0
- agentrun_mem0/vector_stores/pgvector.py +404 -0
- agentrun_mem0/vector_stores/pinecone.py +382 -0
- agentrun_mem0/vector_stores/qdrant.py +270 -0
- agentrun_mem0/vector_stores/redis.py +295 -0
- agentrun_mem0/vector_stores/s3_vectors.py +176 -0
- agentrun_mem0/vector_stores/supabase.py +237 -0
- agentrun_mem0/vector_stores/upstash_vector.py +293 -0
- agentrun_mem0/vector_stores/valkey.py +824 -0
- agentrun_mem0/vector_stores/vertex_ai_vector_search.py +635 -0
- agentrun_mem0/vector_stores/weaviate.py +343 -0
- agentrun_mem0ai-0.0.11.data/data/README.md +205 -0
- agentrun_mem0ai-0.0.11.dist-info/METADATA +277 -0
- agentrun_mem0ai-0.0.11.dist-info/RECORD +150 -0
- agentrun_mem0ai-0.0.11.dist-info/WHEEL +4 -0
- agentrun_mem0ai-0.0.11.dist-info/licenses/LICENSE +201 -0
|
@@ -0,0 +1,115 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import logging
|
|
3
|
+
import httpx
|
|
4
|
+
|
|
5
|
+
from agentrun_mem0.exceptions import (
|
|
6
|
+
NetworkError,
|
|
7
|
+
create_exception_from_response,
|
|
8
|
+
)
|
|
9
|
+
|
|
10
|
+
logger = logging.getLogger(__name__)
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class APIError(Exception):
|
|
14
|
+
"""Exception raised for errors in the API.
|
|
15
|
+
|
|
16
|
+
Deprecated: Use specific exception classes from agentrun_mem0.exceptions instead.
|
|
17
|
+
This class is maintained for backward compatibility.
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
pass
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def api_error_handler(func):
|
|
24
|
+
"""Decorator to handle API errors consistently.
|
|
25
|
+
|
|
26
|
+
This decorator catches HTTP and request errors and converts them to
|
|
27
|
+
appropriate structured exception classes with detailed error information.
|
|
28
|
+
|
|
29
|
+
The decorator analyzes HTTP status codes and response content to create
|
|
30
|
+
the most specific exception type with helpful error messages, suggestions,
|
|
31
|
+
and debug information.
|
|
32
|
+
"""
|
|
33
|
+
from functools import wraps
|
|
34
|
+
|
|
35
|
+
@wraps(func)
|
|
36
|
+
def wrapper(*args, **kwargs):
|
|
37
|
+
try:
|
|
38
|
+
return func(*args, **kwargs)
|
|
39
|
+
except httpx.HTTPStatusError as e:
|
|
40
|
+
logger.error(f"HTTP error occurred: {e}")
|
|
41
|
+
|
|
42
|
+
# Extract error details from response
|
|
43
|
+
response_text = ""
|
|
44
|
+
error_details = {}
|
|
45
|
+
debug_info = {
|
|
46
|
+
"status_code": e.response.status_code,
|
|
47
|
+
"url": str(e.request.url),
|
|
48
|
+
"method": e.request.method,
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
try:
|
|
52
|
+
response_text = e.response.text
|
|
53
|
+
# Try to parse JSON response for additional error details
|
|
54
|
+
if e.response.headers.get("content-type", "").startswith("application/json"):
|
|
55
|
+
error_data = json.loads(response_text)
|
|
56
|
+
if isinstance(error_data, dict):
|
|
57
|
+
error_details = error_data
|
|
58
|
+
response_text = error_data.get("detail", response_text)
|
|
59
|
+
except (json.JSONDecodeError, AttributeError):
|
|
60
|
+
# Fallback to plain text response
|
|
61
|
+
pass
|
|
62
|
+
|
|
63
|
+
# Add rate limit information if available
|
|
64
|
+
if e.response.status_code == 429:
|
|
65
|
+
retry_after = e.response.headers.get("Retry-After")
|
|
66
|
+
if retry_after:
|
|
67
|
+
try:
|
|
68
|
+
debug_info["retry_after"] = int(retry_after)
|
|
69
|
+
except ValueError:
|
|
70
|
+
pass
|
|
71
|
+
|
|
72
|
+
# Add rate limit headers if available
|
|
73
|
+
for header in ["X-RateLimit-Limit", "X-RateLimit-Remaining", "X-RateLimit-Reset"]:
|
|
74
|
+
value = e.response.headers.get(header)
|
|
75
|
+
if value:
|
|
76
|
+
debug_info[header.lower().replace("-", "_")] = value
|
|
77
|
+
|
|
78
|
+
# Create specific exception based on status code
|
|
79
|
+
exception = create_exception_from_response(
|
|
80
|
+
status_code=e.response.status_code,
|
|
81
|
+
response_text=response_text,
|
|
82
|
+
details=error_details,
|
|
83
|
+
debug_info=debug_info,
|
|
84
|
+
)
|
|
85
|
+
|
|
86
|
+
raise exception
|
|
87
|
+
|
|
88
|
+
except httpx.RequestError as e:
|
|
89
|
+
logger.error(f"Request error occurred: {e}")
|
|
90
|
+
|
|
91
|
+
# Determine the appropriate exception type based on error type
|
|
92
|
+
if isinstance(e, httpx.TimeoutException):
|
|
93
|
+
raise NetworkError(
|
|
94
|
+
message=f"Request timed out: {str(e)}",
|
|
95
|
+
error_code="NET_TIMEOUT",
|
|
96
|
+
suggestion="Please check your internet connection and try again",
|
|
97
|
+
debug_info={"error_type": "timeout", "original_error": str(e)},
|
|
98
|
+
)
|
|
99
|
+
elif isinstance(e, httpx.ConnectError):
|
|
100
|
+
raise NetworkError(
|
|
101
|
+
message=f"Connection failed: {str(e)}",
|
|
102
|
+
error_code="NET_CONNECT",
|
|
103
|
+
suggestion="Please check your internet connection and try again",
|
|
104
|
+
debug_info={"error_type": "connection", "original_error": str(e)},
|
|
105
|
+
)
|
|
106
|
+
else:
|
|
107
|
+
# Generic network error for other request errors
|
|
108
|
+
raise NetworkError(
|
|
109
|
+
message=f"Network request failed: {str(e)}",
|
|
110
|
+
error_code="NET_GENERIC",
|
|
111
|
+
suggestion="Please check your internet connection and try again",
|
|
112
|
+
debug_info={"error_type": "request", "original_error": str(e)},
|
|
113
|
+
)
|
|
114
|
+
|
|
115
|
+
return wrapper
|
|
File without changes
|
|
@@ -0,0 +1,90 @@
|
|
|
1
|
+
import os
|
|
2
|
+
from typing import Any, Dict, Optional
|
|
3
|
+
|
|
4
|
+
from pydantic import BaseModel, Field
|
|
5
|
+
|
|
6
|
+
from agentrun_mem0.embeddings.configs import EmbedderConfig
|
|
7
|
+
from agentrun_mem0.graphs.configs import GraphStoreConfig
|
|
8
|
+
from agentrun_mem0.llms.configs import LlmConfig
|
|
9
|
+
from agentrun_mem0.vector_stores.configs import VectorStoreConfig
|
|
10
|
+
from agentrun_mem0.configs.rerankers.config import RerankerConfig
|
|
11
|
+
|
|
12
|
+
# Set up the directory path
|
|
13
|
+
home_dir = os.path.expanduser("~")
|
|
14
|
+
mem0_dir = os.environ.get("MEM0_DIR") or os.path.join(home_dir, ".mem0")
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class MemoryItem(BaseModel):
|
|
18
|
+
id: str = Field(..., description="The unique identifier for the text data")
|
|
19
|
+
memory: str = Field(
|
|
20
|
+
..., description="The memory deduced from the text data"
|
|
21
|
+
) # TODO After prompt changes from platform, update this
|
|
22
|
+
hash: Optional[str] = Field(None, description="The hash of the memory")
|
|
23
|
+
# The metadata value can be anything and not just string. Fix it
|
|
24
|
+
metadata: Optional[Dict[str, Any]] = Field(None, description="Additional metadata for the text data")
|
|
25
|
+
score: Optional[float] = Field(None, description="The score associated with the text data")
|
|
26
|
+
created_at: Optional[str] = Field(None, description="The timestamp when the memory was created")
|
|
27
|
+
updated_at: Optional[str] = Field(None, description="The timestamp when the memory was updated")
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
class MemoryConfig(BaseModel):
|
|
31
|
+
vector_store: VectorStoreConfig = Field(
|
|
32
|
+
description="Configuration for the vector store",
|
|
33
|
+
default_factory=VectorStoreConfig,
|
|
34
|
+
)
|
|
35
|
+
llm: LlmConfig = Field(
|
|
36
|
+
description="Configuration for the language model",
|
|
37
|
+
default_factory=LlmConfig,
|
|
38
|
+
)
|
|
39
|
+
embedder: EmbedderConfig = Field(
|
|
40
|
+
description="Configuration for the embedding model",
|
|
41
|
+
default_factory=EmbedderConfig,
|
|
42
|
+
)
|
|
43
|
+
history_db_path: str = Field(
|
|
44
|
+
description="Path to the history database",
|
|
45
|
+
default=os.path.join(mem0_dir, "history.db"),
|
|
46
|
+
)
|
|
47
|
+
graph_store: GraphStoreConfig = Field(
|
|
48
|
+
description="Configuration for the graph",
|
|
49
|
+
default_factory=GraphStoreConfig,
|
|
50
|
+
)
|
|
51
|
+
reranker: Optional[RerankerConfig] = Field(
|
|
52
|
+
description="Configuration for the reranker",
|
|
53
|
+
default=None,
|
|
54
|
+
)
|
|
55
|
+
version: str = Field(
|
|
56
|
+
description="The version of the API",
|
|
57
|
+
default="v1.1",
|
|
58
|
+
)
|
|
59
|
+
custom_fact_extraction_prompt: Optional[str] = Field(
|
|
60
|
+
description="Custom prompt for the fact extraction",
|
|
61
|
+
default=None,
|
|
62
|
+
)
|
|
63
|
+
custom_update_memory_prompt: Optional[str] = Field(
|
|
64
|
+
description="Custom prompt for the update memory",
|
|
65
|
+
default=None,
|
|
66
|
+
)
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
class AzureConfig(BaseModel):
|
|
70
|
+
"""
|
|
71
|
+
Configuration settings for Azure.
|
|
72
|
+
|
|
73
|
+
Args:
|
|
74
|
+
api_key (str): The API key used for authenticating with the Azure service.
|
|
75
|
+
azure_deployment (str): The name of the Azure deployment.
|
|
76
|
+
azure_endpoint (str): The endpoint URL for the Azure service.
|
|
77
|
+
api_version (str): The version of the Azure API being used.
|
|
78
|
+
default_headers (Dict[str, str]): Headers to include in requests to the Azure API.
|
|
79
|
+
"""
|
|
80
|
+
|
|
81
|
+
api_key: str = Field(
|
|
82
|
+
description="The API key used for authenticating with the Azure service.",
|
|
83
|
+
default=None,
|
|
84
|
+
)
|
|
85
|
+
azure_deployment: str = Field(description="The name of the Azure deployment.", default=None)
|
|
86
|
+
azure_endpoint: str = Field(description="The endpoint URL for the Azure service.", default=None)
|
|
87
|
+
api_version: str = Field(description="The version of the Azure API being used.", default=None)
|
|
88
|
+
default_headers: Optional[Dict[str, str]] = Field(
|
|
89
|
+
description="Headers to include in requests to the Azure API.", default=None
|
|
90
|
+
)
|
|
File without changes
|
|
@@ -0,0 +1,110 @@
|
|
|
1
|
+
import os
|
|
2
|
+
from abc import ABC
|
|
3
|
+
from typing import Dict, Optional, Union
|
|
4
|
+
|
|
5
|
+
import httpx
|
|
6
|
+
|
|
7
|
+
from agentrun_mem0.configs.base import AzureConfig
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class BaseEmbedderConfig(ABC):
|
|
11
|
+
"""
|
|
12
|
+
Config for Embeddings.
|
|
13
|
+
"""
|
|
14
|
+
|
|
15
|
+
def __init__(
|
|
16
|
+
self,
|
|
17
|
+
model: Optional[str] = None,
|
|
18
|
+
api_key: Optional[str] = None,
|
|
19
|
+
embedding_dims: Optional[int] = None,
|
|
20
|
+
# Ollama specific
|
|
21
|
+
ollama_base_url: Optional[str] = None,
|
|
22
|
+
# Openai specific
|
|
23
|
+
openai_base_url: Optional[str] = None,
|
|
24
|
+
# Huggingface specific
|
|
25
|
+
model_kwargs: Optional[dict] = None,
|
|
26
|
+
huggingface_base_url: Optional[str] = None,
|
|
27
|
+
# AzureOpenAI specific
|
|
28
|
+
azure_kwargs: Optional[AzureConfig] = {},
|
|
29
|
+
http_client_proxies: Optional[Union[Dict, str]] = None,
|
|
30
|
+
# VertexAI specific
|
|
31
|
+
vertex_credentials_json: Optional[str] = None,
|
|
32
|
+
memory_add_embedding_type: Optional[str] = None,
|
|
33
|
+
memory_update_embedding_type: Optional[str] = None,
|
|
34
|
+
memory_search_embedding_type: Optional[str] = None,
|
|
35
|
+
# Gemini specific
|
|
36
|
+
output_dimensionality: Optional[str] = None,
|
|
37
|
+
# LM Studio specific
|
|
38
|
+
lmstudio_base_url: Optional[str] = "http://localhost:1234/v1",
|
|
39
|
+
# AWS Bedrock specific
|
|
40
|
+
aws_access_key_id: Optional[str] = None,
|
|
41
|
+
aws_secret_access_key: Optional[str] = None,
|
|
42
|
+
aws_region: Optional[str] = None,
|
|
43
|
+
):
|
|
44
|
+
"""
|
|
45
|
+
Initializes a configuration class instance for the Embeddings.
|
|
46
|
+
|
|
47
|
+
:param model: Embedding model to use, defaults to None
|
|
48
|
+
:type model: Optional[str], optional
|
|
49
|
+
:param api_key: API key to be use, defaults to None
|
|
50
|
+
:type api_key: Optional[str], optional
|
|
51
|
+
:param embedding_dims: The number of dimensions in the embedding, defaults to None
|
|
52
|
+
:type embedding_dims: Optional[int], optional
|
|
53
|
+
:param ollama_base_url: Base URL for the Ollama API, defaults to None
|
|
54
|
+
:type ollama_base_url: Optional[str], optional
|
|
55
|
+
:param model_kwargs: key-value arguments for the huggingface embedding model, defaults a dict inside init
|
|
56
|
+
:type model_kwargs: Optional[Dict[str, Any]], defaults a dict inside init
|
|
57
|
+
:param huggingface_base_url: Huggingface base URL to be use, defaults to None
|
|
58
|
+
:type huggingface_base_url: Optional[str], optional
|
|
59
|
+
:param openai_base_url: Openai base URL to be use, defaults to "https://api.openai.com/v1"
|
|
60
|
+
:type openai_base_url: Optional[str], optional
|
|
61
|
+
:param azure_kwargs: key-value arguments for the AzureOpenAI embedding model, defaults a dict inside init
|
|
62
|
+
:type azure_kwargs: Optional[Dict[str, Any]], defaults a dict inside init
|
|
63
|
+
:param http_client_proxies: The proxy server settings used to create self.http_client, defaults to None
|
|
64
|
+
:type http_client_proxies: Optional[Dict | str], optional
|
|
65
|
+
:param vertex_credentials_json: The path to the Vertex AI credentials JSON file, defaults to None
|
|
66
|
+
:type vertex_credentials_json: Optional[str], optional
|
|
67
|
+
:param memory_add_embedding_type: The type of embedding to use for the add memory action, defaults to None
|
|
68
|
+
:type memory_add_embedding_type: Optional[str], optional
|
|
69
|
+
:param memory_update_embedding_type: The type of embedding to use for the update memory action, defaults to None
|
|
70
|
+
:type memory_update_embedding_type: Optional[str], optional
|
|
71
|
+
:param memory_search_embedding_type: The type of embedding to use for the search memory action, defaults to None
|
|
72
|
+
:type memory_search_embedding_type: Optional[str], optional
|
|
73
|
+
:param lmstudio_base_url: LM Studio base URL to be use, defaults to "http://localhost:1234/v1"
|
|
74
|
+
:type lmstudio_base_url: Optional[str], optional
|
|
75
|
+
"""
|
|
76
|
+
|
|
77
|
+
self.model = model
|
|
78
|
+
self.api_key = api_key
|
|
79
|
+
self.openai_base_url = openai_base_url
|
|
80
|
+
self.embedding_dims = embedding_dims
|
|
81
|
+
|
|
82
|
+
# AzureOpenAI specific
|
|
83
|
+
self.http_client = httpx.Client(proxies=http_client_proxies) if http_client_proxies else None
|
|
84
|
+
|
|
85
|
+
# Ollama specific
|
|
86
|
+
self.ollama_base_url = ollama_base_url
|
|
87
|
+
|
|
88
|
+
# Huggingface specific
|
|
89
|
+
self.model_kwargs = model_kwargs or {}
|
|
90
|
+
self.huggingface_base_url = huggingface_base_url
|
|
91
|
+
# AzureOpenAI specific
|
|
92
|
+
self.azure_kwargs = AzureConfig(**azure_kwargs) or {}
|
|
93
|
+
|
|
94
|
+
# VertexAI specific
|
|
95
|
+
self.vertex_credentials_json = vertex_credentials_json
|
|
96
|
+
self.memory_add_embedding_type = memory_add_embedding_type
|
|
97
|
+
self.memory_update_embedding_type = memory_update_embedding_type
|
|
98
|
+
self.memory_search_embedding_type = memory_search_embedding_type
|
|
99
|
+
|
|
100
|
+
# Gemini specific
|
|
101
|
+
self.output_dimensionality = output_dimensionality
|
|
102
|
+
|
|
103
|
+
# LM Studio specific
|
|
104
|
+
self.lmstudio_base_url = lmstudio_base_url
|
|
105
|
+
|
|
106
|
+
# AWS Bedrock specific
|
|
107
|
+
self.aws_access_key_id = aws_access_key_id
|
|
108
|
+
self.aws_secret_access_key = aws_secret_access_key
|
|
109
|
+
self.aws_region = aws_region or os.environ.get("AWS_REGION") or "us-west-2"
|
|
110
|
+
|
|
File without changes
|
|
@@ -0,0 +1,56 @@
|
|
|
1
|
+
from typing import Optional
|
|
2
|
+
|
|
3
|
+
from agentrun_mem0.configs.llms.base import BaseLlmConfig
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class AnthropicConfig(BaseLlmConfig):
|
|
7
|
+
"""
|
|
8
|
+
Configuration class for Anthropic-specific parameters.
|
|
9
|
+
Inherits from BaseLlmConfig and adds Anthropic-specific settings.
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
def __init__(
|
|
13
|
+
self,
|
|
14
|
+
# Base parameters
|
|
15
|
+
model: Optional[str] = None,
|
|
16
|
+
temperature: float = 0.1,
|
|
17
|
+
api_key: Optional[str] = None,
|
|
18
|
+
max_tokens: int = 2000,
|
|
19
|
+
top_p: float = 0.1,
|
|
20
|
+
top_k: int = 1,
|
|
21
|
+
enable_vision: bool = False,
|
|
22
|
+
vision_details: Optional[str] = "auto",
|
|
23
|
+
http_client_proxies: Optional[dict] = None,
|
|
24
|
+
# Anthropic-specific parameters
|
|
25
|
+
anthropic_base_url: Optional[str] = None,
|
|
26
|
+
):
|
|
27
|
+
"""
|
|
28
|
+
Initialize Anthropic configuration.
|
|
29
|
+
|
|
30
|
+
Args:
|
|
31
|
+
model: Anthropic model to use, defaults to None
|
|
32
|
+
temperature: Controls randomness, defaults to 0.1
|
|
33
|
+
api_key: Anthropic API key, defaults to None
|
|
34
|
+
max_tokens: Maximum tokens to generate, defaults to 2000
|
|
35
|
+
top_p: Nucleus sampling parameter, defaults to 0.1
|
|
36
|
+
top_k: Top-k sampling parameter, defaults to 1
|
|
37
|
+
enable_vision: Enable vision capabilities, defaults to False
|
|
38
|
+
vision_details: Vision detail level, defaults to "auto"
|
|
39
|
+
http_client_proxies: HTTP client proxy settings, defaults to None
|
|
40
|
+
anthropic_base_url: Anthropic API base URL, defaults to None
|
|
41
|
+
"""
|
|
42
|
+
# Initialize base parameters
|
|
43
|
+
super().__init__(
|
|
44
|
+
model=model,
|
|
45
|
+
temperature=temperature,
|
|
46
|
+
api_key=api_key,
|
|
47
|
+
max_tokens=max_tokens,
|
|
48
|
+
top_p=top_p,
|
|
49
|
+
top_k=top_k,
|
|
50
|
+
enable_vision=enable_vision,
|
|
51
|
+
vision_details=vision_details,
|
|
52
|
+
http_client_proxies=http_client_proxies,
|
|
53
|
+
)
|
|
54
|
+
|
|
55
|
+
# Anthropic-specific parameters
|
|
56
|
+
self.anthropic_base_url = anthropic_base_url
|
|
@@ -0,0 +1,192 @@
|
|
|
1
|
+
import os
|
|
2
|
+
from typing import Any, Dict, List, Optional
|
|
3
|
+
|
|
4
|
+
from agentrun_mem0.configs.llms.base import BaseLlmConfig
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class AWSBedrockConfig(BaseLlmConfig):
|
|
8
|
+
"""
|
|
9
|
+
Configuration class for AWS Bedrock LLM integration.
|
|
10
|
+
|
|
11
|
+
Supports all available Bedrock models with automatic provider detection.
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
def __init__(
|
|
15
|
+
self,
|
|
16
|
+
model: Optional[str] = None,
|
|
17
|
+
temperature: float = 0.1,
|
|
18
|
+
max_tokens: int = 2000,
|
|
19
|
+
top_p: float = 0.9,
|
|
20
|
+
top_k: int = 1,
|
|
21
|
+
aws_access_key_id: Optional[str] = None,
|
|
22
|
+
aws_secret_access_key: Optional[str] = None,
|
|
23
|
+
aws_region: str = "",
|
|
24
|
+
aws_session_token: Optional[str] = None,
|
|
25
|
+
aws_profile: Optional[str] = None,
|
|
26
|
+
model_kwargs: Optional[Dict[str, Any]] = None,
|
|
27
|
+
**kwargs,
|
|
28
|
+
):
|
|
29
|
+
"""
|
|
30
|
+
Initialize AWS Bedrock configuration.
|
|
31
|
+
|
|
32
|
+
Args:
|
|
33
|
+
model: Bedrock model identifier (e.g., "amazon.nova-3-mini-20241119-v1:0")
|
|
34
|
+
temperature: Controls randomness (0.0 to 2.0)
|
|
35
|
+
max_tokens: Maximum tokens to generate
|
|
36
|
+
top_p: Nucleus sampling parameter (0.0 to 1.0)
|
|
37
|
+
top_k: Top-k sampling parameter (1 to 40)
|
|
38
|
+
aws_access_key_id: AWS access key (optional, uses env vars if not provided)
|
|
39
|
+
aws_secret_access_key: AWS secret key (optional, uses env vars if not provided)
|
|
40
|
+
aws_region: AWS region for Bedrock service
|
|
41
|
+
aws_session_token: AWS session token for temporary credentials
|
|
42
|
+
aws_profile: AWS profile name for credentials
|
|
43
|
+
model_kwargs: Additional model-specific parameters
|
|
44
|
+
**kwargs: Additional arguments passed to base class
|
|
45
|
+
"""
|
|
46
|
+
super().__init__(
|
|
47
|
+
model=model or "anthropic.claude-3-5-sonnet-20240620-v1:0",
|
|
48
|
+
temperature=temperature,
|
|
49
|
+
max_tokens=max_tokens,
|
|
50
|
+
top_p=top_p,
|
|
51
|
+
top_k=top_k,
|
|
52
|
+
**kwargs,
|
|
53
|
+
)
|
|
54
|
+
|
|
55
|
+
self.aws_access_key_id = aws_access_key_id
|
|
56
|
+
self.aws_secret_access_key = aws_secret_access_key
|
|
57
|
+
self.aws_region = aws_region or os.getenv("AWS_REGION", "us-west-2")
|
|
58
|
+
self.aws_session_token = aws_session_token
|
|
59
|
+
self.aws_profile = aws_profile
|
|
60
|
+
self.model_kwargs = model_kwargs or {}
|
|
61
|
+
|
|
62
|
+
@property
|
|
63
|
+
def provider(self) -> str:
|
|
64
|
+
"""Get the provider from the model identifier."""
|
|
65
|
+
if not self.model or "." not in self.model:
|
|
66
|
+
return "unknown"
|
|
67
|
+
return self.model.split(".")[0]
|
|
68
|
+
|
|
69
|
+
@property
|
|
70
|
+
def model_name(self) -> str:
|
|
71
|
+
"""Get the model name without provider prefix."""
|
|
72
|
+
if not self.model or "." not in self.model:
|
|
73
|
+
return self.model
|
|
74
|
+
return ".".join(self.model.split(".")[1:])
|
|
75
|
+
|
|
76
|
+
def get_model_config(self) -> Dict[str, Any]:
|
|
77
|
+
"""Get model-specific configuration parameters."""
|
|
78
|
+
base_config = {
|
|
79
|
+
"temperature": self.temperature,
|
|
80
|
+
"max_tokens": self.max_tokens,
|
|
81
|
+
"top_p": self.top_p,
|
|
82
|
+
"top_k": self.top_k,
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
# Add custom model kwargs
|
|
86
|
+
base_config.update(self.model_kwargs)
|
|
87
|
+
|
|
88
|
+
return base_config
|
|
89
|
+
|
|
90
|
+
def get_aws_config(self) -> Dict[str, Any]:
|
|
91
|
+
"""Get AWS configuration parameters."""
|
|
92
|
+
config = {
|
|
93
|
+
"region_name": self.aws_region,
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
if self.aws_access_key_id:
|
|
97
|
+
config["aws_access_key_id"] = self.aws_access_key_id or os.getenv("AWS_ACCESS_KEY_ID")
|
|
98
|
+
|
|
99
|
+
if self.aws_secret_access_key:
|
|
100
|
+
config["aws_secret_access_key"] = self.aws_secret_access_key or os.getenv("AWS_SECRET_ACCESS_KEY")
|
|
101
|
+
|
|
102
|
+
if self.aws_session_token:
|
|
103
|
+
config["aws_session_token"] = self.aws_session_token or os.getenv("AWS_SESSION_TOKEN")
|
|
104
|
+
|
|
105
|
+
if self.aws_profile:
|
|
106
|
+
config["profile_name"] = self.aws_profile or os.getenv("AWS_PROFILE")
|
|
107
|
+
|
|
108
|
+
return config
|
|
109
|
+
|
|
110
|
+
def validate_model_format(self) -> bool:
|
|
111
|
+
"""
|
|
112
|
+
Validate that the model identifier follows Bedrock naming convention.
|
|
113
|
+
|
|
114
|
+
Returns:
|
|
115
|
+
True if valid, False otherwise
|
|
116
|
+
"""
|
|
117
|
+
if not self.model:
|
|
118
|
+
return False
|
|
119
|
+
|
|
120
|
+
# Check if model follows provider.model-name format
|
|
121
|
+
if "." not in self.model:
|
|
122
|
+
return False
|
|
123
|
+
|
|
124
|
+
provider, model_name = self.model.split(".", 1)
|
|
125
|
+
|
|
126
|
+
# Validate provider
|
|
127
|
+
valid_providers = [
|
|
128
|
+
"ai21", "amazon", "anthropic", "cohere", "meta", "mistral",
|
|
129
|
+
"stability", "writer", "deepseek", "gpt-oss", "perplexity",
|
|
130
|
+
"snowflake", "titan", "command", "j2", "llama"
|
|
131
|
+
]
|
|
132
|
+
|
|
133
|
+
if provider not in valid_providers:
|
|
134
|
+
return False
|
|
135
|
+
|
|
136
|
+
# Validate model name is not empty
|
|
137
|
+
if not model_name:
|
|
138
|
+
return False
|
|
139
|
+
|
|
140
|
+
return True
|
|
141
|
+
|
|
142
|
+
def get_supported_regions(self) -> List[str]:
|
|
143
|
+
"""Get list of AWS regions that support Bedrock."""
|
|
144
|
+
return [
|
|
145
|
+
"us-east-1",
|
|
146
|
+
"us-west-2",
|
|
147
|
+
"us-east-2",
|
|
148
|
+
"eu-west-1",
|
|
149
|
+
"ap-southeast-1",
|
|
150
|
+
"ap-northeast-1",
|
|
151
|
+
]
|
|
152
|
+
|
|
153
|
+
def get_model_capabilities(self) -> Dict[str, Any]:
|
|
154
|
+
"""Get model capabilities based on provider."""
|
|
155
|
+
capabilities = {
|
|
156
|
+
"supports_tools": False,
|
|
157
|
+
"supports_vision": False,
|
|
158
|
+
"supports_streaming": False,
|
|
159
|
+
"supports_multimodal": False,
|
|
160
|
+
}
|
|
161
|
+
|
|
162
|
+
if self.provider == "anthropic":
|
|
163
|
+
capabilities.update({
|
|
164
|
+
"supports_tools": True,
|
|
165
|
+
"supports_vision": True,
|
|
166
|
+
"supports_streaming": True,
|
|
167
|
+
"supports_multimodal": True,
|
|
168
|
+
})
|
|
169
|
+
elif self.provider == "amazon":
|
|
170
|
+
capabilities.update({
|
|
171
|
+
"supports_tools": True,
|
|
172
|
+
"supports_vision": True,
|
|
173
|
+
"supports_streaming": True,
|
|
174
|
+
"supports_multimodal": True,
|
|
175
|
+
})
|
|
176
|
+
elif self.provider == "cohere":
|
|
177
|
+
capabilities.update({
|
|
178
|
+
"supports_tools": True,
|
|
179
|
+
"supports_streaming": True,
|
|
180
|
+
})
|
|
181
|
+
elif self.provider == "meta":
|
|
182
|
+
capabilities.update({
|
|
183
|
+
"supports_vision": True,
|
|
184
|
+
"supports_streaming": True,
|
|
185
|
+
})
|
|
186
|
+
elif self.provider == "mistral":
|
|
187
|
+
capabilities.update({
|
|
188
|
+
"supports_vision": True,
|
|
189
|
+
"supports_streaming": True,
|
|
190
|
+
})
|
|
191
|
+
|
|
192
|
+
return capabilities
|
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
from typing import Any, Dict, Optional
|
|
2
|
+
|
|
3
|
+
from agentrun_mem0.configs.base import AzureConfig
|
|
4
|
+
from agentrun_mem0.configs.llms.base import BaseLlmConfig
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class AzureOpenAIConfig(BaseLlmConfig):
|
|
8
|
+
"""
|
|
9
|
+
Configuration class for Azure OpenAI-specific parameters.
|
|
10
|
+
Inherits from BaseLlmConfig and adds Azure OpenAI-specific settings.
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
def __init__(
|
|
14
|
+
self,
|
|
15
|
+
# Base parameters
|
|
16
|
+
model: Optional[str] = None,
|
|
17
|
+
temperature: float = 0.1,
|
|
18
|
+
api_key: Optional[str] = None,
|
|
19
|
+
max_tokens: int = 2000,
|
|
20
|
+
top_p: float = 0.1,
|
|
21
|
+
top_k: int = 1,
|
|
22
|
+
enable_vision: bool = False,
|
|
23
|
+
vision_details: Optional[str] = "auto",
|
|
24
|
+
http_client_proxies: Optional[dict] = None,
|
|
25
|
+
# Azure OpenAI-specific parameters
|
|
26
|
+
azure_kwargs: Optional[Dict[str, Any]] = None,
|
|
27
|
+
):
|
|
28
|
+
"""
|
|
29
|
+
Initialize Azure OpenAI configuration.
|
|
30
|
+
|
|
31
|
+
Args:
|
|
32
|
+
model: Azure OpenAI model to use, defaults to None
|
|
33
|
+
temperature: Controls randomness, defaults to 0.1
|
|
34
|
+
api_key: Azure OpenAI API key, defaults to None
|
|
35
|
+
max_tokens: Maximum tokens to generate, defaults to 2000
|
|
36
|
+
top_p: Nucleus sampling parameter, defaults to 0.1
|
|
37
|
+
top_k: Top-k sampling parameter, defaults to 1
|
|
38
|
+
enable_vision: Enable vision capabilities, defaults to False
|
|
39
|
+
vision_details: Vision detail level, defaults to "auto"
|
|
40
|
+
http_client_proxies: HTTP client proxy settings, defaults to None
|
|
41
|
+
azure_kwargs: Azure-specific configuration, defaults to None
|
|
42
|
+
"""
|
|
43
|
+
# Initialize base parameters
|
|
44
|
+
super().__init__(
|
|
45
|
+
model=model,
|
|
46
|
+
temperature=temperature,
|
|
47
|
+
api_key=api_key,
|
|
48
|
+
max_tokens=max_tokens,
|
|
49
|
+
top_p=top_p,
|
|
50
|
+
top_k=top_k,
|
|
51
|
+
enable_vision=enable_vision,
|
|
52
|
+
vision_details=vision_details,
|
|
53
|
+
http_client_proxies=http_client_proxies,
|
|
54
|
+
)
|
|
55
|
+
|
|
56
|
+
# Azure OpenAI-specific parameters
|
|
57
|
+
self.azure_kwargs = AzureConfig(**(azure_kwargs or {}))
|