agentrun-mem0ai 0.0.11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agentrun_mem0/__init__.py +6 -0
- agentrun_mem0/client/__init__.py +0 -0
- agentrun_mem0/client/main.py +1747 -0
- agentrun_mem0/client/project.py +931 -0
- agentrun_mem0/client/utils.py +115 -0
- agentrun_mem0/configs/__init__.py +0 -0
- agentrun_mem0/configs/base.py +90 -0
- agentrun_mem0/configs/embeddings/__init__.py +0 -0
- agentrun_mem0/configs/embeddings/base.py +110 -0
- agentrun_mem0/configs/enums.py +7 -0
- agentrun_mem0/configs/llms/__init__.py +0 -0
- agentrun_mem0/configs/llms/anthropic.py +56 -0
- agentrun_mem0/configs/llms/aws_bedrock.py +192 -0
- agentrun_mem0/configs/llms/azure.py +57 -0
- agentrun_mem0/configs/llms/base.py +62 -0
- agentrun_mem0/configs/llms/deepseek.py +56 -0
- agentrun_mem0/configs/llms/lmstudio.py +59 -0
- agentrun_mem0/configs/llms/ollama.py +56 -0
- agentrun_mem0/configs/llms/openai.py +79 -0
- agentrun_mem0/configs/llms/vllm.py +56 -0
- agentrun_mem0/configs/prompts.py +459 -0
- agentrun_mem0/configs/rerankers/__init__.py +0 -0
- agentrun_mem0/configs/rerankers/base.py +17 -0
- agentrun_mem0/configs/rerankers/cohere.py +15 -0
- agentrun_mem0/configs/rerankers/config.py +12 -0
- agentrun_mem0/configs/rerankers/huggingface.py +17 -0
- agentrun_mem0/configs/rerankers/llm.py +48 -0
- agentrun_mem0/configs/rerankers/sentence_transformer.py +16 -0
- agentrun_mem0/configs/rerankers/zero_entropy.py +28 -0
- agentrun_mem0/configs/vector_stores/__init__.py +0 -0
- agentrun_mem0/configs/vector_stores/alibabacloud_mysql.py +64 -0
- agentrun_mem0/configs/vector_stores/aliyun_tablestore.py +32 -0
- agentrun_mem0/configs/vector_stores/azure_ai_search.py +57 -0
- agentrun_mem0/configs/vector_stores/azure_mysql.py +84 -0
- agentrun_mem0/configs/vector_stores/baidu.py +27 -0
- agentrun_mem0/configs/vector_stores/chroma.py +58 -0
- agentrun_mem0/configs/vector_stores/databricks.py +61 -0
- agentrun_mem0/configs/vector_stores/elasticsearch.py +65 -0
- agentrun_mem0/configs/vector_stores/faiss.py +37 -0
- agentrun_mem0/configs/vector_stores/langchain.py +30 -0
- agentrun_mem0/configs/vector_stores/milvus.py +42 -0
- agentrun_mem0/configs/vector_stores/mongodb.py +25 -0
- agentrun_mem0/configs/vector_stores/neptune.py +27 -0
- agentrun_mem0/configs/vector_stores/opensearch.py +41 -0
- agentrun_mem0/configs/vector_stores/pgvector.py +52 -0
- agentrun_mem0/configs/vector_stores/pinecone.py +55 -0
- agentrun_mem0/configs/vector_stores/qdrant.py +47 -0
- agentrun_mem0/configs/vector_stores/redis.py +24 -0
- agentrun_mem0/configs/vector_stores/s3_vectors.py +28 -0
- agentrun_mem0/configs/vector_stores/supabase.py +44 -0
- agentrun_mem0/configs/vector_stores/upstash_vector.py +34 -0
- agentrun_mem0/configs/vector_stores/valkey.py +15 -0
- agentrun_mem0/configs/vector_stores/vertex_ai_vector_search.py +28 -0
- agentrun_mem0/configs/vector_stores/weaviate.py +41 -0
- agentrun_mem0/embeddings/__init__.py +0 -0
- agentrun_mem0/embeddings/aws_bedrock.py +100 -0
- agentrun_mem0/embeddings/azure_openai.py +55 -0
- agentrun_mem0/embeddings/base.py +31 -0
- agentrun_mem0/embeddings/configs.py +30 -0
- agentrun_mem0/embeddings/gemini.py +39 -0
- agentrun_mem0/embeddings/huggingface.py +44 -0
- agentrun_mem0/embeddings/langchain.py +35 -0
- agentrun_mem0/embeddings/lmstudio.py +29 -0
- agentrun_mem0/embeddings/mock.py +11 -0
- agentrun_mem0/embeddings/ollama.py +53 -0
- agentrun_mem0/embeddings/openai.py +49 -0
- agentrun_mem0/embeddings/together.py +31 -0
- agentrun_mem0/embeddings/vertexai.py +64 -0
- agentrun_mem0/exceptions.py +503 -0
- agentrun_mem0/graphs/__init__.py +0 -0
- agentrun_mem0/graphs/configs.py +105 -0
- agentrun_mem0/graphs/neptune/__init__.py +0 -0
- agentrun_mem0/graphs/neptune/base.py +497 -0
- agentrun_mem0/graphs/neptune/neptunedb.py +511 -0
- agentrun_mem0/graphs/neptune/neptunegraph.py +474 -0
- agentrun_mem0/graphs/tools.py +371 -0
- agentrun_mem0/graphs/utils.py +97 -0
- agentrun_mem0/llms/__init__.py +0 -0
- agentrun_mem0/llms/anthropic.py +87 -0
- agentrun_mem0/llms/aws_bedrock.py +665 -0
- agentrun_mem0/llms/azure_openai.py +141 -0
- agentrun_mem0/llms/azure_openai_structured.py +91 -0
- agentrun_mem0/llms/base.py +131 -0
- agentrun_mem0/llms/configs.py +34 -0
- agentrun_mem0/llms/deepseek.py +107 -0
- agentrun_mem0/llms/gemini.py +201 -0
- agentrun_mem0/llms/groq.py +88 -0
- agentrun_mem0/llms/langchain.py +94 -0
- agentrun_mem0/llms/litellm.py +87 -0
- agentrun_mem0/llms/lmstudio.py +114 -0
- agentrun_mem0/llms/ollama.py +117 -0
- agentrun_mem0/llms/openai.py +147 -0
- agentrun_mem0/llms/openai_structured.py +52 -0
- agentrun_mem0/llms/sarvam.py +89 -0
- agentrun_mem0/llms/together.py +88 -0
- agentrun_mem0/llms/vllm.py +107 -0
- agentrun_mem0/llms/xai.py +52 -0
- agentrun_mem0/memory/__init__.py +0 -0
- agentrun_mem0/memory/base.py +63 -0
- agentrun_mem0/memory/graph_memory.py +698 -0
- agentrun_mem0/memory/kuzu_memory.py +713 -0
- agentrun_mem0/memory/main.py +2229 -0
- agentrun_mem0/memory/memgraph_memory.py +689 -0
- agentrun_mem0/memory/setup.py +56 -0
- agentrun_mem0/memory/storage.py +218 -0
- agentrun_mem0/memory/telemetry.py +90 -0
- agentrun_mem0/memory/utils.py +208 -0
- agentrun_mem0/proxy/__init__.py +0 -0
- agentrun_mem0/proxy/main.py +189 -0
- agentrun_mem0/reranker/__init__.py +9 -0
- agentrun_mem0/reranker/base.py +20 -0
- agentrun_mem0/reranker/cohere_reranker.py +85 -0
- agentrun_mem0/reranker/huggingface_reranker.py +147 -0
- agentrun_mem0/reranker/llm_reranker.py +142 -0
- agentrun_mem0/reranker/sentence_transformer_reranker.py +107 -0
- agentrun_mem0/reranker/zero_entropy_reranker.py +96 -0
- agentrun_mem0/utils/factory.py +283 -0
- agentrun_mem0/utils/gcp_auth.py +167 -0
- agentrun_mem0/vector_stores/__init__.py +0 -0
- agentrun_mem0/vector_stores/alibabacloud_mysql.py +547 -0
- agentrun_mem0/vector_stores/aliyun_tablestore.py +252 -0
- agentrun_mem0/vector_stores/azure_ai_search.py +396 -0
- agentrun_mem0/vector_stores/azure_mysql.py +463 -0
- agentrun_mem0/vector_stores/baidu.py +368 -0
- agentrun_mem0/vector_stores/base.py +58 -0
- agentrun_mem0/vector_stores/chroma.py +332 -0
- agentrun_mem0/vector_stores/configs.py +67 -0
- agentrun_mem0/vector_stores/databricks.py +761 -0
- agentrun_mem0/vector_stores/elasticsearch.py +237 -0
- agentrun_mem0/vector_stores/faiss.py +479 -0
- agentrun_mem0/vector_stores/langchain.py +180 -0
- agentrun_mem0/vector_stores/milvus.py +250 -0
- agentrun_mem0/vector_stores/mongodb.py +310 -0
- agentrun_mem0/vector_stores/neptune_analytics.py +467 -0
- agentrun_mem0/vector_stores/opensearch.py +292 -0
- agentrun_mem0/vector_stores/pgvector.py +404 -0
- agentrun_mem0/vector_stores/pinecone.py +382 -0
- agentrun_mem0/vector_stores/qdrant.py +270 -0
- agentrun_mem0/vector_stores/redis.py +295 -0
- agentrun_mem0/vector_stores/s3_vectors.py +176 -0
- agentrun_mem0/vector_stores/supabase.py +237 -0
- agentrun_mem0/vector_stores/upstash_vector.py +293 -0
- agentrun_mem0/vector_stores/valkey.py +824 -0
- agentrun_mem0/vector_stores/vertex_ai_vector_search.py +635 -0
- agentrun_mem0/vector_stores/weaviate.py +343 -0
- agentrun_mem0ai-0.0.11.data/data/README.md +205 -0
- agentrun_mem0ai-0.0.11.dist-info/METADATA +277 -0
- agentrun_mem0ai-0.0.11.dist-info/RECORD +150 -0
- agentrun_mem0ai-0.0.11.dist-info/WHEEL +4 -0
- agentrun_mem0ai-0.0.11.dist-info/licenses/LICENSE +201 -0
|
@@ -0,0 +1,62 @@
|
|
|
1
|
+
from abc import ABC
|
|
2
|
+
from typing import Dict, Optional, Union
|
|
3
|
+
|
|
4
|
+
import httpx
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class BaseLlmConfig(ABC):
|
|
8
|
+
"""
|
|
9
|
+
Base configuration for LLMs with only common parameters.
|
|
10
|
+
Provider-specific configurations should be handled by separate config classes.
|
|
11
|
+
|
|
12
|
+
This class contains only the parameters that are common across all LLM providers.
|
|
13
|
+
For provider-specific parameters, use the appropriate provider config class.
|
|
14
|
+
"""
|
|
15
|
+
|
|
16
|
+
def __init__(
|
|
17
|
+
self,
|
|
18
|
+
model: Optional[Union[str, Dict]] = None,
|
|
19
|
+
temperature: float = 0.1,
|
|
20
|
+
api_key: Optional[str] = None,
|
|
21
|
+
max_tokens: int = 2000,
|
|
22
|
+
top_p: float = 0.1,
|
|
23
|
+
top_k: int = 1,
|
|
24
|
+
enable_vision: bool = False,
|
|
25
|
+
vision_details: Optional[str] = "auto",
|
|
26
|
+
http_client_proxies: Optional[Union[Dict, str]] = None,
|
|
27
|
+
):
|
|
28
|
+
"""
|
|
29
|
+
Initialize a base configuration class instance for the LLM.
|
|
30
|
+
|
|
31
|
+
Args:
|
|
32
|
+
model: The model identifier to use (e.g., "gpt-4.1-nano-2025-04-14", "claude-3-5-sonnet-20240620")
|
|
33
|
+
Defaults to None (will be set by provider-specific configs)
|
|
34
|
+
temperature: Controls the randomness of the model's output.
|
|
35
|
+
Higher values (closer to 1) make output more random, lower values make it more deterministic.
|
|
36
|
+
Range: 0.0 to 2.0. Defaults to 0.1
|
|
37
|
+
api_key: API key for the LLM provider. If None, will try to get from environment variables.
|
|
38
|
+
Defaults to None
|
|
39
|
+
max_tokens: Maximum number of tokens to generate in the response.
|
|
40
|
+
Range: 1 to 4096 (varies by model). Defaults to 2000
|
|
41
|
+
top_p: Nucleus sampling parameter. Controls diversity via nucleus sampling.
|
|
42
|
+
Higher values (closer to 1) make word selection more diverse.
|
|
43
|
+
Range: 0.0 to 1.0. Defaults to 0.1
|
|
44
|
+
top_k: Top-k sampling parameter. Limits the number of tokens considered for each step.
|
|
45
|
+
Higher values make word selection more diverse.
|
|
46
|
+
Range: 1 to 40. Defaults to 1
|
|
47
|
+
enable_vision: Whether to enable vision capabilities for the model.
|
|
48
|
+
Only applicable to vision-enabled models. Defaults to False
|
|
49
|
+
vision_details: Level of detail for vision processing.
|
|
50
|
+
Options: "low", "high", "auto". Defaults to "auto"
|
|
51
|
+
http_client_proxies: Proxy settings for HTTP client.
|
|
52
|
+
Can be a dict or string. Defaults to None
|
|
53
|
+
"""
|
|
54
|
+
self.model = model
|
|
55
|
+
self.temperature = temperature
|
|
56
|
+
self.api_key = api_key
|
|
57
|
+
self.max_tokens = max_tokens
|
|
58
|
+
self.top_p = top_p
|
|
59
|
+
self.top_k = top_k
|
|
60
|
+
self.enable_vision = enable_vision
|
|
61
|
+
self.vision_details = vision_details
|
|
62
|
+
self.http_client = httpx.Client(proxies=http_client_proxies) if http_client_proxies else None
|
|
@@ -0,0 +1,56 @@
|
|
|
1
|
+
from typing import Optional
|
|
2
|
+
|
|
3
|
+
from agentrun_mem0.configs.llms.base import BaseLlmConfig
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class DeepSeekConfig(BaseLlmConfig):
|
|
7
|
+
"""
|
|
8
|
+
Configuration class for DeepSeek-specific parameters.
|
|
9
|
+
Inherits from BaseLlmConfig and adds DeepSeek-specific settings.
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
def __init__(
|
|
13
|
+
self,
|
|
14
|
+
# Base parameters
|
|
15
|
+
model: Optional[str] = None,
|
|
16
|
+
temperature: float = 0.1,
|
|
17
|
+
api_key: Optional[str] = None,
|
|
18
|
+
max_tokens: int = 2000,
|
|
19
|
+
top_p: float = 0.1,
|
|
20
|
+
top_k: int = 1,
|
|
21
|
+
enable_vision: bool = False,
|
|
22
|
+
vision_details: Optional[str] = "auto",
|
|
23
|
+
http_client_proxies: Optional[dict] = None,
|
|
24
|
+
# DeepSeek-specific parameters
|
|
25
|
+
deepseek_base_url: Optional[str] = None,
|
|
26
|
+
):
|
|
27
|
+
"""
|
|
28
|
+
Initialize DeepSeek configuration.
|
|
29
|
+
|
|
30
|
+
Args:
|
|
31
|
+
model: DeepSeek model to use, defaults to None
|
|
32
|
+
temperature: Controls randomness, defaults to 0.1
|
|
33
|
+
api_key: DeepSeek API key, defaults to None
|
|
34
|
+
max_tokens: Maximum tokens to generate, defaults to 2000
|
|
35
|
+
top_p: Nucleus sampling parameter, defaults to 0.1
|
|
36
|
+
top_k: Top-k sampling parameter, defaults to 1
|
|
37
|
+
enable_vision: Enable vision capabilities, defaults to False
|
|
38
|
+
vision_details: Vision detail level, defaults to "auto"
|
|
39
|
+
http_client_proxies: HTTP client proxy settings, defaults to None
|
|
40
|
+
deepseek_base_url: DeepSeek API base URL, defaults to None
|
|
41
|
+
"""
|
|
42
|
+
# Initialize base parameters
|
|
43
|
+
super().__init__(
|
|
44
|
+
model=model,
|
|
45
|
+
temperature=temperature,
|
|
46
|
+
api_key=api_key,
|
|
47
|
+
max_tokens=max_tokens,
|
|
48
|
+
top_p=top_p,
|
|
49
|
+
top_k=top_k,
|
|
50
|
+
enable_vision=enable_vision,
|
|
51
|
+
vision_details=vision_details,
|
|
52
|
+
http_client_proxies=http_client_proxies,
|
|
53
|
+
)
|
|
54
|
+
|
|
55
|
+
# DeepSeek-specific parameters
|
|
56
|
+
self.deepseek_base_url = deepseek_base_url
|
|
@@ -0,0 +1,59 @@
|
|
|
1
|
+
from typing import Any, Dict, Optional
|
|
2
|
+
|
|
3
|
+
from agentrun_mem0.configs.llms.base import BaseLlmConfig
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class LMStudioConfig(BaseLlmConfig):
|
|
7
|
+
"""
|
|
8
|
+
Configuration class for LM Studio-specific parameters.
|
|
9
|
+
Inherits from BaseLlmConfig and adds LM Studio-specific settings.
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
def __init__(
|
|
13
|
+
self,
|
|
14
|
+
# Base parameters
|
|
15
|
+
model: Optional[str] = None,
|
|
16
|
+
temperature: float = 0.1,
|
|
17
|
+
api_key: Optional[str] = None,
|
|
18
|
+
max_tokens: int = 2000,
|
|
19
|
+
top_p: float = 0.1,
|
|
20
|
+
top_k: int = 1,
|
|
21
|
+
enable_vision: bool = False,
|
|
22
|
+
vision_details: Optional[str] = "auto",
|
|
23
|
+
http_client_proxies: Optional[dict] = None,
|
|
24
|
+
# LM Studio-specific parameters
|
|
25
|
+
lmstudio_base_url: Optional[str] = None,
|
|
26
|
+
lmstudio_response_format: Optional[Dict[str, Any]] = None,
|
|
27
|
+
):
|
|
28
|
+
"""
|
|
29
|
+
Initialize LM Studio configuration.
|
|
30
|
+
|
|
31
|
+
Args:
|
|
32
|
+
model: LM Studio model to use, defaults to None
|
|
33
|
+
temperature: Controls randomness, defaults to 0.1
|
|
34
|
+
api_key: LM Studio API key, defaults to None
|
|
35
|
+
max_tokens: Maximum tokens to generate, defaults to 2000
|
|
36
|
+
top_p: Nucleus sampling parameter, defaults to 0.1
|
|
37
|
+
top_k: Top-k sampling parameter, defaults to 1
|
|
38
|
+
enable_vision: Enable vision capabilities, defaults to False
|
|
39
|
+
vision_details: Vision detail level, defaults to "auto"
|
|
40
|
+
http_client_proxies: HTTP client proxy settings, defaults to None
|
|
41
|
+
lmstudio_base_url: LM Studio base URL, defaults to None
|
|
42
|
+
lmstudio_response_format: LM Studio response format, defaults to None
|
|
43
|
+
"""
|
|
44
|
+
# Initialize base parameters
|
|
45
|
+
super().__init__(
|
|
46
|
+
model=model,
|
|
47
|
+
temperature=temperature,
|
|
48
|
+
api_key=api_key,
|
|
49
|
+
max_tokens=max_tokens,
|
|
50
|
+
top_p=top_p,
|
|
51
|
+
top_k=top_k,
|
|
52
|
+
enable_vision=enable_vision,
|
|
53
|
+
vision_details=vision_details,
|
|
54
|
+
http_client_proxies=http_client_proxies,
|
|
55
|
+
)
|
|
56
|
+
|
|
57
|
+
# LM Studio-specific parameters
|
|
58
|
+
self.lmstudio_base_url = lmstudio_base_url or "http://localhost:1234/v1"
|
|
59
|
+
self.lmstudio_response_format = lmstudio_response_format
|
|
@@ -0,0 +1,56 @@
|
|
|
1
|
+
from typing import Optional
|
|
2
|
+
|
|
3
|
+
from agentrun_mem0.configs.llms.base import BaseLlmConfig
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class OllamaConfig(BaseLlmConfig):
|
|
7
|
+
"""
|
|
8
|
+
Configuration class for Ollama-specific parameters.
|
|
9
|
+
Inherits from BaseLlmConfig and adds Ollama-specific settings.
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
def __init__(
|
|
13
|
+
self,
|
|
14
|
+
# Base parameters
|
|
15
|
+
model: Optional[str] = None,
|
|
16
|
+
temperature: float = 0.1,
|
|
17
|
+
api_key: Optional[str] = None,
|
|
18
|
+
max_tokens: int = 2000,
|
|
19
|
+
top_p: float = 0.1,
|
|
20
|
+
top_k: int = 1,
|
|
21
|
+
enable_vision: bool = False,
|
|
22
|
+
vision_details: Optional[str] = "auto",
|
|
23
|
+
http_client_proxies: Optional[dict] = None,
|
|
24
|
+
# Ollama-specific parameters
|
|
25
|
+
ollama_base_url: Optional[str] = None,
|
|
26
|
+
):
|
|
27
|
+
"""
|
|
28
|
+
Initialize Ollama configuration.
|
|
29
|
+
|
|
30
|
+
Args:
|
|
31
|
+
model: Ollama model to use, defaults to None
|
|
32
|
+
temperature: Controls randomness, defaults to 0.1
|
|
33
|
+
api_key: Ollama API key, defaults to None
|
|
34
|
+
max_tokens: Maximum tokens to generate, defaults to 2000
|
|
35
|
+
top_p: Nucleus sampling parameter, defaults to 0.1
|
|
36
|
+
top_k: Top-k sampling parameter, defaults to 1
|
|
37
|
+
enable_vision: Enable vision capabilities, defaults to False
|
|
38
|
+
vision_details: Vision detail level, defaults to "auto"
|
|
39
|
+
http_client_proxies: HTTP client proxy settings, defaults to None
|
|
40
|
+
ollama_base_url: Ollama base URL, defaults to None
|
|
41
|
+
"""
|
|
42
|
+
# Initialize base parameters
|
|
43
|
+
super().__init__(
|
|
44
|
+
model=model,
|
|
45
|
+
temperature=temperature,
|
|
46
|
+
api_key=api_key,
|
|
47
|
+
max_tokens=max_tokens,
|
|
48
|
+
top_p=top_p,
|
|
49
|
+
top_k=top_k,
|
|
50
|
+
enable_vision=enable_vision,
|
|
51
|
+
vision_details=vision_details,
|
|
52
|
+
http_client_proxies=http_client_proxies,
|
|
53
|
+
)
|
|
54
|
+
|
|
55
|
+
# Ollama-specific parameters
|
|
56
|
+
self.ollama_base_url = ollama_base_url
|
|
@@ -0,0 +1,79 @@
|
|
|
1
|
+
from typing import Any, Callable, List, Optional
|
|
2
|
+
|
|
3
|
+
from agentrun_mem0.configs.llms.base import BaseLlmConfig
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class OpenAIConfig(BaseLlmConfig):
|
|
7
|
+
"""
|
|
8
|
+
Configuration class for OpenAI and OpenRouter-specific parameters.
|
|
9
|
+
Inherits from BaseLlmConfig and adds OpenAI-specific settings.
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
def __init__(
|
|
13
|
+
self,
|
|
14
|
+
# Base parameters
|
|
15
|
+
model: Optional[str] = None,
|
|
16
|
+
temperature: float = 0.1,
|
|
17
|
+
api_key: Optional[str] = None,
|
|
18
|
+
max_tokens: int = 2000,
|
|
19
|
+
top_p: float = 0.1,
|
|
20
|
+
top_k: int = 1,
|
|
21
|
+
enable_vision: bool = False,
|
|
22
|
+
vision_details: Optional[str] = "auto",
|
|
23
|
+
http_client_proxies: Optional[dict] = None,
|
|
24
|
+
# OpenAI-specific parameters
|
|
25
|
+
openai_base_url: Optional[str] = None,
|
|
26
|
+
models: Optional[List[str]] = None,
|
|
27
|
+
route: Optional[str] = "fallback",
|
|
28
|
+
openrouter_base_url: Optional[str] = None,
|
|
29
|
+
site_url: Optional[str] = None,
|
|
30
|
+
app_name: Optional[str] = None,
|
|
31
|
+
store: bool = False,
|
|
32
|
+
# Response monitoring callback
|
|
33
|
+
response_callback: Optional[Callable[[Any, dict, dict], None]] = None,
|
|
34
|
+
):
|
|
35
|
+
"""
|
|
36
|
+
Initialize OpenAI configuration.
|
|
37
|
+
|
|
38
|
+
Args:
|
|
39
|
+
model: OpenAI model to use, defaults to None
|
|
40
|
+
temperature: Controls randomness, defaults to 0.1
|
|
41
|
+
api_key: OpenAI API key, defaults to None
|
|
42
|
+
max_tokens: Maximum tokens to generate, defaults to 2000
|
|
43
|
+
top_p: Nucleus sampling parameter, defaults to 0.1
|
|
44
|
+
top_k: Top-k sampling parameter, defaults to 1
|
|
45
|
+
enable_vision: Enable vision capabilities, defaults to False
|
|
46
|
+
vision_details: Vision detail level, defaults to "auto"
|
|
47
|
+
http_client_proxies: HTTP client proxy settings, defaults to None
|
|
48
|
+
openai_base_url: OpenAI API base URL, defaults to None
|
|
49
|
+
models: List of models for OpenRouter, defaults to None
|
|
50
|
+
route: OpenRouter route strategy, defaults to "fallback"
|
|
51
|
+
openrouter_base_url: OpenRouter base URL, defaults to None
|
|
52
|
+
site_url: Site URL for OpenRouter, defaults to None
|
|
53
|
+
app_name: Application name for OpenRouter, defaults to None
|
|
54
|
+
response_callback: Optional callback for monitoring LLM responses.
|
|
55
|
+
"""
|
|
56
|
+
# Initialize base parameters
|
|
57
|
+
super().__init__(
|
|
58
|
+
model=model,
|
|
59
|
+
temperature=temperature,
|
|
60
|
+
api_key=api_key,
|
|
61
|
+
max_tokens=max_tokens,
|
|
62
|
+
top_p=top_p,
|
|
63
|
+
top_k=top_k,
|
|
64
|
+
enable_vision=enable_vision,
|
|
65
|
+
vision_details=vision_details,
|
|
66
|
+
http_client_proxies=http_client_proxies,
|
|
67
|
+
)
|
|
68
|
+
|
|
69
|
+
# OpenAI-specific parameters
|
|
70
|
+
self.openai_base_url = openai_base_url
|
|
71
|
+
self.models = models
|
|
72
|
+
self.route = route
|
|
73
|
+
self.openrouter_base_url = openrouter_base_url
|
|
74
|
+
self.site_url = site_url
|
|
75
|
+
self.app_name = app_name
|
|
76
|
+
self.store = store
|
|
77
|
+
|
|
78
|
+
# Response monitoring
|
|
79
|
+
self.response_callback = response_callback
|
|
@@ -0,0 +1,56 @@
|
|
|
1
|
+
from typing import Optional
|
|
2
|
+
|
|
3
|
+
from agentrun_mem0.configs.llms.base import BaseLlmConfig
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class VllmConfig(BaseLlmConfig):
|
|
7
|
+
"""
|
|
8
|
+
Configuration class for vLLM-specific parameters.
|
|
9
|
+
Inherits from BaseLlmConfig and adds vLLM-specific settings.
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
def __init__(
|
|
13
|
+
self,
|
|
14
|
+
# Base parameters
|
|
15
|
+
model: Optional[str] = None,
|
|
16
|
+
temperature: float = 0.1,
|
|
17
|
+
api_key: Optional[str] = None,
|
|
18
|
+
max_tokens: int = 2000,
|
|
19
|
+
top_p: float = 0.1,
|
|
20
|
+
top_k: int = 1,
|
|
21
|
+
enable_vision: bool = False,
|
|
22
|
+
vision_details: Optional[str] = "auto",
|
|
23
|
+
http_client_proxies: Optional[dict] = None,
|
|
24
|
+
# vLLM-specific parameters
|
|
25
|
+
vllm_base_url: Optional[str] = None,
|
|
26
|
+
):
|
|
27
|
+
"""
|
|
28
|
+
Initialize vLLM configuration.
|
|
29
|
+
|
|
30
|
+
Args:
|
|
31
|
+
model: vLLM model to use, defaults to None
|
|
32
|
+
temperature: Controls randomness, defaults to 0.1
|
|
33
|
+
api_key: vLLM API key, defaults to None
|
|
34
|
+
max_tokens: Maximum tokens to generate, defaults to 2000
|
|
35
|
+
top_p: Nucleus sampling parameter, defaults to 0.1
|
|
36
|
+
top_k: Top-k sampling parameter, defaults to 1
|
|
37
|
+
enable_vision: Enable vision capabilities, defaults to False
|
|
38
|
+
vision_details: Vision detail level, defaults to "auto"
|
|
39
|
+
http_client_proxies: HTTP client proxy settings, defaults to None
|
|
40
|
+
vllm_base_url: vLLM base URL, defaults to None
|
|
41
|
+
"""
|
|
42
|
+
# Initialize base parameters
|
|
43
|
+
super().__init__(
|
|
44
|
+
model=model,
|
|
45
|
+
temperature=temperature,
|
|
46
|
+
api_key=api_key,
|
|
47
|
+
max_tokens=max_tokens,
|
|
48
|
+
top_p=top_p,
|
|
49
|
+
top_k=top_k,
|
|
50
|
+
enable_vision=enable_vision,
|
|
51
|
+
vision_details=vision_details,
|
|
52
|
+
http_client_proxies=http_client_proxies,
|
|
53
|
+
)
|
|
54
|
+
|
|
55
|
+
# vLLM-specific parameters
|
|
56
|
+
self.vllm_base_url = vllm_base_url or "http://localhost:8000/v1"
|