powermem 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (123) hide show
  1. powermem/__init__.py +103 -0
  2. powermem/agent/__init__.py +35 -0
  3. powermem/agent/abstract/__init__.py +22 -0
  4. powermem/agent/abstract/collaboration.py +259 -0
  5. powermem/agent/abstract/context.py +187 -0
  6. powermem/agent/abstract/manager.py +232 -0
  7. powermem/agent/abstract/permission.py +217 -0
  8. powermem/agent/abstract/privacy.py +267 -0
  9. powermem/agent/abstract/scope.py +199 -0
  10. powermem/agent/agent.py +791 -0
  11. powermem/agent/components/__init__.py +18 -0
  12. powermem/agent/components/collaboration_coordinator.py +645 -0
  13. powermem/agent/components/permission_controller.py +586 -0
  14. powermem/agent/components/privacy_protector.py +767 -0
  15. powermem/agent/components/scope_controller.py +685 -0
  16. powermem/agent/factories/__init__.py +16 -0
  17. powermem/agent/factories/agent_factory.py +266 -0
  18. powermem/agent/factories/config_factory.py +308 -0
  19. powermem/agent/factories/memory_factory.py +229 -0
  20. powermem/agent/implementations/__init__.py +16 -0
  21. powermem/agent/implementations/hybrid.py +728 -0
  22. powermem/agent/implementations/multi_agent.py +1040 -0
  23. powermem/agent/implementations/multi_user.py +1020 -0
  24. powermem/agent/types.py +53 -0
  25. powermem/agent/wrappers/__init__.py +14 -0
  26. powermem/agent/wrappers/agent_memory_wrapper.py +427 -0
  27. powermem/agent/wrappers/compatibility_wrapper.py +520 -0
  28. powermem/config_loader.py +318 -0
  29. powermem/configs.py +249 -0
  30. powermem/core/__init__.py +19 -0
  31. powermem/core/async_memory.py +1493 -0
  32. powermem/core/audit.py +258 -0
  33. powermem/core/base.py +165 -0
  34. powermem/core/memory.py +1567 -0
  35. powermem/core/setup.py +162 -0
  36. powermem/core/telemetry.py +215 -0
  37. powermem/integrations/__init__.py +17 -0
  38. powermem/integrations/embeddings/__init__.py +13 -0
  39. powermem/integrations/embeddings/aws_bedrock.py +100 -0
  40. powermem/integrations/embeddings/azure_openai.py +55 -0
  41. powermem/integrations/embeddings/base.py +31 -0
  42. powermem/integrations/embeddings/config/base.py +132 -0
  43. powermem/integrations/embeddings/configs.py +31 -0
  44. powermem/integrations/embeddings/factory.py +48 -0
  45. powermem/integrations/embeddings/gemini.py +39 -0
  46. powermem/integrations/embeddings/huggingface.py +41 -0
  47. powermem/integrations/embeddings/langchain.py +35 -0
  48. powermem/integrations/embeddings/lmstudio.py +29 -0
  49. powermem/integrations/embeddings/mock.py +11 -0
  50. powermem/integrations/embeddings/ollama.py +53 -0
  51. powermem/integrations/embeddings/openai.py +49 -0
  52. powermem/integrations/embeddings/qwen.py +102 -0
  53. powermem/integrations/embeddings/together.py +31 -0
  54. powermem/integrations/embeddings/vertexai.py +54 -0
  55. powermem/integrations/llm/__init__.py +18 -0
  56. powermem/integrations/llm/anthropic.py +87 -0
  57. powermem/integrations/llm/base.py +132 -0
  58. powermem/integrations/llm/config/anthropic.py +56 -0
  59. powermem/integrations/llm/config/azure.py +56 -0
  60. powermem/integrations/llm/config/base.py +62 -0
  61. powermem/integrations/llm/config/deepseek.py +56 -0
  62. powermem/integrations/llm/config/ollama.py +56 -0
  63. powermem/integrations/llm/config/openai.py +79 -0
  64. powermem/integrations/llm/config/qwen.py +68 -0
  65. powermem/integrations/llm/config/qwen_asr.py +46 -0
  66. powermem/integrations/llm/config/vllm.py +56 -0
  67. powermem/integrations/llm/configs.py +26 -0
  68. powermem/integrations/llm/deepseek.py +106 -0
  69. powermem/integrations/llm/factory.py +118 -0
  70. powermem/integrations/llm/gemini.py +201 -0
  71. powermem/integrations/llm/langchain.py +65 -0
  72. powermem/integrations/llm/ollama.py +106 -0
  73. powermem/integrations/llm/openai.py +166 -0
  74. powermem/integrations/llm/openai_structured.py +80 -0
  75. powermem/integrations/llm/qwen.py +207 -0
  76. powermem/integrations/llm/qwen_asr.py +171 -0
  77. powermem/integrations/llm/vllm.py +106 -0
  78. powermem/integrations/rerank/__init__.py +20 -0
  79. powermem/integrations/rerank/base.py +43 -0
  80. powermem/integrations/rerank/config/__init__.py +7 -0
  81. powermem/integrations/rerank/config/base.py +27 -0
  82. powermem/integrations/rerank/configs.py +23 -0
  83. powermem/integrations/rerank/factory.py +68 -0
  84. powermem/integrations/rerank/qwen.py +159 -0
  85. powermem/intelligence/__init__.py +17 -0
  86. powermem/intelligence/ebbinghaus_algorithm.py +354 -0
  87. powermem/intelligence/importance_evaluator.py +361 -0
  88. powermem/intelligence/intelligent_memory_manager.py +284 -0
  89. powermem/intelligence/manager.py +148 -0
  90. powermem/intelligence/plugin.py +229 -0
  91. powermem/prompts/__init__.py +29 -0
  92. powermem/prompts/graph/graph_prompts.py +217 -0
  93. powermem/prompts/graph/graph_tools_prompts.py +469 -0
  94. powermem/prompts/importance_evaluation.py +246 -0
  95. powermem/prompts/intelligent_memory_prompts.py +163 -0
  96. powermem/prompts/templates.py +193 -0
  97. powermem/storage/__init__.py +14 -0
  98. powermem/storage/adapter.py +896 -0
  99. powermem/storage/base.py +109 -0
  100. powermem/storage/config/base.py +13 -0
  101. powermem/storage/config/oceanbase.py +58 -0
  102. powermem/storage/config/pgvector.py +52 -0
  103. powermem/storage/config/sqlite.py +27 -0
  104. powermem/storage/configs.py +159 -0
  105. powermem/storage/factory.py +59 -0
  106. powermem/storage/migration_manager.py +438 -0
  107. powermem/storage/oceanbase/__init__.py +8 -0
  108. powermem/storage/oceanbase/constants.py +162 -0
  109. powermem/storage/oceanbase/oceanbase.py +1384 -0
  110. powermem/storage/oceanbase/oceanbase_graph.py +1441 -0
  111. powermem/storage/pgvector/__init__.py +7 -0
  112. powermem/storage/pgvector/pgvector.py +420 -0
  113. powermem/storage/sqlite/__init__.py +0 -0
  114. powermem/storage/sqlite/sqlite.py +218 -0
  115. powermem/storage/sqlite/sqlite_vector_store.py +311 -0
  116. powermem/utils/__init__.py +35 -0
  117. powermem/utils/utils.py +605 -0
  118. powermem/version.py +23 -0
  119. powermem-0.1.0.dist-info/METADATA +187 -0
  120. powermem-0.1.0.dist-info/RECORD +123 -0
  121. powermem-0.1.0.dist-info/WHEEL +5 -0
  122. powermem-0.1.0.dist-info/licenses/LICENSE +206 -0
  123. powermem-0.1.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,18 @@
1
+ """
2
+ LLM integration module
3
+
4
+ This module provides LLM integrations and factory.
5
+ """
6
+ from .base import LLMBase
7
+ from .configs import LLMConfig
8
+ from .factory import LLMFactory
9
+
10
+ # provider alias name
11
+ LlmFactory = LLMFactory
12
+ LlmConfig = LLMConfig
13
+
14
+ __all__ = [
15
+ "LLMBase",
16
+ "LlmFactory",
17
+ "LlmConfig"
18
+ ]
@@ -0,0 +1,87 @@
1
+ import os
2
+ from typing import Dict, List, Optional, Union
3
+
4
+ from powermem.integrations.llm import LLMBase
5
+ from powermem.integrations.llm.config.anthropic import AnthropicConfig
6
+ from powermem.integrations.llm.config.base import BaseLLMConfig
7
+
8
+ try:
9
+ import anthropic
10
+ except ImportError:
11
+ raise ImportError("The 'anthropic' library is required. Please install it using 'pip install anthropic'.")
12
+
13
+
14
+ class AnthropicLLM(LLMBase):
15
+ def __init__(self, config: Optional[Union[BaseLLMConfig, AnthropicConfig, Dict]] = None):
16
+ # Convert to AnthropicConfig if needed
17
+ if config is None:
18
+ config = AnthropicConfig()
19
+ elif isinstance(config, dict):
20
+ config = AnthropicConfig(**config)
21
+ elif isinstance(config, BaseLLMConfig) and not isinstance(config, AnthropicConfig):
22
+ # Convert BaseLLMConfig to AnthropicConfig
23
+ config = AnthropicConfig(
24
+ model=config.model,
25
+ temperature=config.temperature,
26
+ api_key=config.api_key,
27
+ max_tokens=config.max_tokens,
28
+ top_p=config.top_p,
29
+ top_k=config.top_k,
30
+ enable_vision=config.enable_vision,
31
+ vision_details=config.vision_details,
32
+ http_client_proxies=config.http_client,
33
+ )
34
+
35
+ super().__init__(config)
36
+
37
+ if not self.config.model:
38
+ self.config.model = "claude-3-5-sonnet-20240620"
39
+
40
+ api_key = self.config.api_key or os.getenv("ANTHROPIC_API_KEY")
41
+ self.client = anthropic.Anthropic(api_key=api_key)
42
+
43
+ def generate_response(
44
+ self,
45
+ messages: List[Dict[str, str]],
46
+ response_format=None,
47
+ tools: Optional[List[Dict]] = None,
48
+ tool_choice: str = "auto",
49
+ **kwargs,
50
+ ):
51
+ """
52
+ Generate a response based on the given messages using Anthropic.
53
+
54
+ Args:
55
+ messages (list): List of message dicts containing 'role' and 'content'.
56
+ response_format (str or object, optional): Format of the response. Defaults to "text".
57
+ tools (list, optional): List of tools that the model can call. Defaults to None.
58
+ tool_choice (str, optional): Tool choice method. Defaults to "auto".
59
+ **kwargs: Additional Anthropic-specific parameters.
60
+
61
+ Returns:
62
+ str: The generated response.
63
+ """
64
+ # Separate system message from other messages
65
+ system_message = ""
66
+ filtered_messages = []
67
+ for message in messages:
68
+ if message["role"] == "system":
69
+ system_message = message["content"]
70
+ else:
71
+ filtered_messages.append(message)
72
+
73
+ params = self._get_supported_params(messages=messages, **kwargs)
74
+ params.update(
75
+ {
76
+ "model": self.config.model,
77
+ "messages": filtered_messages,
78
+ "system": system_message,
79
+ }
80
+ )
81
+
82
+ if tools: # TODO: Remove tools if no issues found with new memory addition logic
83
+ params["tools"] = tools
84
+ params["tool_choice"] = tool_choice
85
+
86
+ response = self.client.messages.create(**params)
87
+ return response.content[0].text
@@ -0,0 +1,132 @@
1
+ from abc import ABC, abstractmethod
2
+ from typing import Dict, List, Optional, Union
3
+
4
+ from powermem.integrations.llm.config.base import BaseLLMConfig
5
+
6
+
7
+ class LLMBase(ABC):
8
+ """
9
+ Base class for all LLM providers.
10
+ Handles common functionality and delegates provider-specific logic to subclasses.
11
+ """
12
+
13
+ def __init__(self, config: Optional[Union[BaseLLMConfig, Dict]] = None):
14
+ """Initialize a base LLM class
15
+
16
+ :param config: LLM configuration option class or dict, defaults to None
17
+ :type config: Optional[Union[BaseLLMConfig, Dict]], optional
18
+ """
19
+ if config is None:
20
+ self.config = BaseLLMConfig()
21
+ elif isinstance(config, dict):
22
+ # Handle dict-based configuration (backward compatibility)
23
+ self.config = BaseLLMConfig(**config)
24
+ else:
25
+ self.config = config
26
+
27
+ # Validate configuration
28
+ self._validate_config()
29
+
30
+ def _validate_config(self):
31
+ """
32
+ Validate the configuration.
33
+ Override in subclasses to add provider-specific validation.
34
+ """
35
+ if not hasattr(self.config, "model"):
36
+ raise ValueError("Configuration must have a 'model' attribute")
37
+
38
+ if not hasattr(self.config, "api_key") and not hasattr(self.config, "api_key"):
39
+ # Check if API key is available via environment variable
40
+ # This will be handled by individual providers
41
+ pass
42
+
43
+ def _is_reasoning_model(self, model: str) -> bool:
44
+ """
45
+ Check if the model is a reasoning model or GPT-5 series that doesn't support certain parameters.
46
+
47
+ Args:
48
+ model: The model name to check
49
+
50
+ Returns:
51
+ bool: True if the model is a reasoning model or GPT-5 series
52
+ """
53
+ reasoning_models = {
54
+ "o1", "o1-preview", "o3-mini", "o3",
55
+ "gpt-5", "gpt-5o", "gpt-5o-mini", "gpt-5o-micro",
56
+ }
57
+
58
+ if model.lower() in reasoning_models:
59
+ return True
60
+
61
+ model_lower = model.lower()
62
+ if any(reasoning_model in model_lower for reasoning_model in ["gpt-5", "o1", "o3"]):
63
+ return True
64
+
65
+ return False
66
+
67
+ def _get_supported_params(self, **kwargs) -> Dict:
68
+ """
69
+ Get parameters that are supported by the current model.
70
+ Filters out unsupported parameters for reasoning models and GPT-5 series.
71
+
72
+ Args:
73
+ **kwargs: Additional parameters to include
74
+
75
+ Returns:
76
+ Dict: Filtered parameters dictionary
77
+ """
78
+ model = getattr(self.config, 'model', '')
79
+
80
+ if self._is_reasoning_model(model):
81
+ supported_params = {}
82
+
83
+ if "messages" in kwargs:
84
+ supported_params["messages"] = kwargs["messages"]
85
+ if "response_format" in kwargs:
86
+ supported_params["response_format"] = kwargs["response_format"]
87
+ if "tools" in kwargs:
88
+ supported_params["tools"] = kwargs["tools"]
89
+ if "tool_choice" in kwargs:
90
+ supported_params["tool_choice"] = kwargs["tool_choice"]
91
+
92
+ return supported_params
93
+ else:
94
+ # For regular models, include all common parameters
95
+ return self._get_common_params(**kwargs)
96
+
97
+ @abstractmethod
98
+ def generate_response(
99
+ self, messages: List[Dict[str, str]], tools: Optional[List[Dict]] = None, tool_choice: str = "auto",
100
+ **kwargs
101
+ ):
102
+ """
103
+ Generate a response based on the given messages.
104
+
105
+ Args:
106
+ messages (list): List of message dicts containing 'role' and 'content'.
107
+ tools (list, optional): List of tools that the model can call. Defaults to None.
108
+ tool_choice (str, optional): Tool choice method. Defaults to "auto".
109
+ **kwargs: Additional provider-specific parameters.
110
+
111
+ Returns:
112
+ str or dict: The generated response.
113
+ """
114
+ pass
115
+
116
+ def _get_common_params(self, **kwargs) -> Dict:
117
+ """
118
+ Get common parameters that most providers use.
119
+
120
+ Returns:
121
+ Dict: Common parameters dictionary.
122
+ """
123
+ params = {
124
+ "temperature": self.config.temperature,
125
+ "max_tokens": self.config.max_tokens,
126
+ "top_p": self.config.top_p,
127
+ }
128
+
129
+ # Add provider-specific parameters from kwargs
130
+ params.update(kwargs)
131
+
132
+ return params
@@ -0,0 +1,56 @@
1
+ from typing import Optional
2
+
3
+ from powermem.integrations.llm.config.base import BaseLLMConfig
4
+
5
+
6
+ class AnthropicConfig(BaseLLMConfig):
7
+ """
8
+ Configuration class for Anthropic-specific parameters.
9
+ Inherits from BaseLLMConfig and adds Anthropic-specific settings.
10
+ """
11
+
12
+ def __init__(
13
+ self,
14
+ # Base parameters
15
+ model: Optional[str] = None,
16
+ temperature: float = 0.1,
17
+ api_key: Optional[str] = None,
18
+ max_tokens: int = 2000,
19
+ top_p: float = 0.1,
20
+ top_k: int = 1,
21
+ enable_vision: bool = False,
22
+ vision_details: Optional[str] = "auto",
23
+ http_client_proxies: Optional[dict] = None,
24
+ # Anthropic-specific parameters
25
+ anthropic_base_url: Optional[str] = None,
26
+ ):
27
+ """
28
+ Initialize Anthropic configuration.
29
+
30
+ Args:
31
+ model: Anthropic model to use, defaults to None
32
+ temperature: Controls randomness, defaults to 0.1
33
+ api_key: Anthropic API key, defaults to None
34
+ max_tokens: Maximum tokens to generate, defaults to 2000
35
+ top_p: Nucleus sampling parameter, defaults to 0.1
36
+ top_k: Top-k sampling parameter, defaults to 1
37
+ enable_vision: Enable vision capabilities, defaults to False
38
+ vision_details: Vision detail level, defaults to "auto"
39
+ http_client_proxies: HTTP client proxy settings, defaults to None
40
+ anthropic_base_url: Anthropic API base URL, defaults to None
41
+ """
42
+ # Initialize base parameters
43
+ super().__init__(
44
+ model=model,
45
+ temperature=temperature,
46
+ api_key=api_key,
47
+ max_tokens=max_tokens,
48
+ top_p=top_p,
49
+ top_k=top_k,
50
+ enable_vision=enable_vision,
51
+ vision_details=vision_details,
52
+ http_client_proxies=http_client_proxies,
53
+ )
54
+
55
+ # Anthropic-specific parameters
56
+ self.anthropic_base_url = anthropic_base_url
@@ -0,0 +1,56 @@
1
+ from typing import Any, Dict, Optional
2
+
3
+ from powermem.integrations.llm.config.base import BaseLLMConfig
4
+
5
+
6
+ class AzureOpenAIConfig(BaseLLMConfig):
7
+ """
8
+ Configuration class for Azure OpenAI-specific parameters.
9
+ Inherits from BaseLLMConfig and adds Azure OpenAI-specific settings.
10
+ """
11
+
12
+ def __init__(
13
+ self,
14
+ # Base parameters
15
+ model: Optional[str] = None,
16
+ temperature: float = 0.1,
17
+ api_key: Optional[str] = None,
18
+ max_tokens: int = 2000,
19
+ top_p: float = 0.1,
20
+ top_k: int = 1,
21
+ enable_vision: bool = False,
22
+ vision_details: Optional[str] = "auto",
23
+ http_client_proxies: Optional[dict] = None,
24
+ # Azure OpenAI-specific parameters
25
+ azure_kwargs: Optional[Dict[str, Any]] = None,
26
+ ):
27
+ """
28
+ Initialize Azure OpenAI configuration.
29
+
30
+ Args:
31
+ model: Azure OpenAI model to use, defaults to None
32
+ temperature: Controls randomness, defaults to 0.1
33
+ api_key: Azure OpenAI API key, defaults to None
34
+ max_tokens: Maximum tokens to generate, defaults to 2000
35
+ top_p: Nucleus sampling parameter, defaults to 0.1
36
+ top_k: Top-k sampling parameter, defaults to 1
37
+ enable_vision: Enable vision capabilities, defaults to False
38
+ vision_details: Vision detail level, defaults to "auto"
39
+ http_client_proxies: HTTP client proxy settings, defaults to None
40
+ azure_kwargs: Azure-specific configuration, defaults to None
41
+ """
42
+ # Initialize base parameters
43
+ super().__init__(
44
+ model=model,
45
+ temperature=temperature,
46
+ api_key=api_key,
47
+ max_tokens=max_tokens,
48
+ top_p=top_p,
49
+ top_k=top_k,
50
+ enable_vision=enable_vision,
51
+ vision_details=vision_details,
52
+ http_client_proxies=http_client_proxies,
53
+ )
54
+
55
+ # Azure OpenAI-specific parameters
56
+ self.azure_kwargs = AzureConfig(**(azure_kwargs or {}))
@@ -0,0 +1,62 @@
1
+ from abc import ABC
2
+ from typing import Dict, Optional, Union
3
+
4
+ import httpx
5
+
6
+
7
+ class BaseLLMConfig(ABC):
8
+ """
9
+ Base configuration for LLMs with only common parameters.
10
+ Provider-specific configurations should be handled by separate config classes.
11
+
12
+ This class contains only the parameters that are common across all LLM providers.
13
+ For provider-specific parameters, use the appropriate provider config class.
14
+ """
15
+
16
+ def __init__(
17
+ self,
18
+ model: Optional[Union[str, Dict]] = None,
19
+ temperature: float = 0.1,
20
+ api_key: Optional[str] = None,
21
+ max_tokens: int = 2000,
22
+ top_p: float = 0.1,
23
+ top_k: int = 1,
24
+ enable_vision: bool = False,
25
+ vision_details: Optional[str] = "auto",
26
+ http_client_proxies: Optional[Union[Dict, str]] = None,
27
+ ):
28
+ """
29
+ Initialize a base configuration class instance for the LLM.
30
+
31
+ Args:
32
+ model: The model identifier to use (e.g., "gpt-4o-mini", "claude-3-5-sonnet-20240620")
33
+ Defaults to None (will be set by provider-specific configs)
34
+ temperature: Controls the randomness of the model's output.
35
+ Higher values (closer to 1) make output more random, lower values make it more deterministic.
36
+ Range: 0.0 to 2.0. Defaults to 0.1
37
+ api_key: API key for the LLM provider. If None, will try to get from environment variables.
38
+ Defaults to None
39
+ max_tokens: Maximum number of tokens to generate in the response.
40
+ Range: 1 to 4096 (varies by model). Defaults to 2000
41
+ top_p: Nucleus sampling parameter. Controls diversity via nucleus sampling.
42
+ Higher values (closer to 1) make word selection more diverse.
43
+ Range: 0.0 to 1.0. Defaults to 0.1
44
+ top_k: Top-k sampling parameter. Limits the number of tokens considered for each step.
45
+ Higher values make word selection more diverse.
46
+ Range: 1 to 40. Defaults to 1
47
+ enable_vision: Whether to enable vision capabilities for the model.
48
+ Only applicable to vision-enabled models. Defaults to False
49
+ vision_details: Level of detail for vision processing.
50
+ Options: "low", "high", "auto". Defaults to "auto"
51
+ http_client_proxies: Proxy settings for HTTP client.
52
+ Can be a dict or string. Defaults to None
53
+ """
54
+ self.model = model
55
+ self.temperature = temperature
56
+ self.api_key = api_key
57
+ self.max_tokens = max_tokens
58
+ self.top_p = top_p
59
+ self.top_k = top_k
60
+ self.enable_vision = enable_vision
61
+ self.vision_details = vision_details
62
+ self.http_client = httpx.Client(proxies=http_client_proxies) if http_client_proxies else None
@@ -0,0 +1,56 @@
1
+ from typing import Optional
2
+
3
+ from powermem.integrations.llm.config.base import BaseLLMConfig
4
+
5
+
6
+ class DeepSeekConfig(BaseLLMConfig):
7
+ """
8
+ Configuration class for DeepSeek-specific parameters.
9
+ Inherits from BaseLLMConfig and adds DeepSeek-specific settings.
10
+ """
11
+
12
+ def __init__(
13
+ self,
14
+ # Base parameters
15
+ model: Optional[str] = None,
16
+ temperature: float = 0.1,
17
+ api_key: Optional[str] = None,
18
+ max_tokens: int = 2000,
19
+ top_p: float = 0.1,
20
+ top_k: int = 1,
21
+ enable_vision: bool = False,
22
+ vision_details: Optional[str] = "auto",
23
+ http_client_proxies: Optional[dict] = None,
24
+ # DeepSeek-specific parameters
25
+ deepseek_base_url: Optional[str] = None,
26
+ ):
27
+ """
28
+ Initialize DeepSeek configuration.
29
+
30
+ Args:
31
+ model: DeepSeek model to use, defaults to None
32
+ temperature: Controls randomness, defaults to 0.1
33
+ api_key: DeepSeek API key, defaults to None
34
+ max_tokens: Maximum tokens to generate, defaults to 2000
35
+ top_p: Nucleus sampling parameter, defaults to 0.1
36
+ top_k: Top-k sampling parameter, defaults to 1
37
+ enable_vision: Enable vision capabilities, defaults to False
38
+ vision_details: Vision detail level, defaults to "auto"
39
+ http_client_proxies: HTTP client proxy settings, defaults to None
40
+ deepseek_base_url: DeepSeek API base URL, defaults to None
41
+ """
42
+ # Initialize base parameters
43
+ super().__init__(
44
+ model=model,
45
+ temperature=temperature,
46
+ api_key=api_key,
47
+ max_tokens=max_tokens,
48
+ top_p=top_p,
49
+ top_k=top_k,
50
+ enable_vision=enable_vision,
51
+ vision_details=vision_details,
52
+ http_client_proxies=http_client_proxies,
53
+ )
54
+
55
+ # DeepSeek-specific parameters
56
+ self.deepseek_base_url = deepseek_base_url
@@ -0,0 +1,56 @@
1
+ from typing import Optional
2
+
3
+ from powermem.integrations.llm.config.base import BaseLLMConfig
4
+
5
+
6
+ class OllamaConfig(BaseLLMConfig):
7
+ """
8
+ Configuration class for Ollama-specific parameters.
9
+ Inherits from BaseLLMConfig and adds Ollama-specific settings.
10
+ """
11
+
12
+ def __init__(
13
+ self,
14
+ # Base parameters
15
+ model: Optional[str] = None,
16
+ temperature: float = 0.1,
17
+ api_key: Optional[str] = None,
18
+ max_tokens: int = 2000,
19
+ top_p: float = 0.1,
20
+ top_k: int = 1,
21
+ enable_vision: bool = False,
22
+ vision_details: Optional[str] = "auto",
23
+ http_client_proxies: Optional[dict] = None,
24
+ # Ollama-specific parameters
25
+ ollama_base_url: Optional[str] = None,
26
+ ):
27
+ """
28
+ Initialize Ollama configuration.
29
+
30
+ Args:
31
+ model: Ollama model to use, defaults to None
32
+ temperature: Controls randomness, defaults to 0.1
33
+ api_key: Ollama API key, defaults to None
34
+ max_tokens: Maximum tokens to generate, defaults to 2000
35
+ top_p: Nucleus sampling parameter, defaults to 0.1
36
+ top_k: Top-k sampling parameter, defaults to 1
37
+ enable_vision: Enable vision capabilities, defaults to False
38
+ vision_details: Vision detail level, defaults to "auto"
39
+ http_client_proxies: HTTP client proxy settings, defaults to None
40
+ ollama_base_url: Ollama base URL, defaults to None
41
+ """
42
+ # Initialize base parameters
43
+ super().__init__(
44
+ model=model,
45
+ temperature=temperature,
46
+ api_key=api_key,
47
+ max_tokens=max_tokens,
48
+ top_p=top_p,
49
+ top_k=top_k,
50
+ enable_vision=enable_vision,
51
+ vision_details=vision_details,
52
+ http_client_proxies=http_client_proxies,
53
+ )
54
+
55
+ # Ollama-specific parameters
56
+ self.ollama_base_url = ollama_base_url
@@ -0,0 +1,79 @@
1
+ from typing import Any, Callable, List, Optional
2
+
3
+ from powermem.integrations.llm.config.base import BaseLLMConfig
4
+
5
+
6
+ class OpenAIConfig(BaseLLMConfig):
7
+ """
8
+ Configuration class for OpenAI and OpenRouter-specific parameters.
9
+ Inherits from BaseLLMConfig and adds OpenAI-specific settings.
10
+ """
11
+
12
+ def __init__(
13
+ self,
14
+ # Base parameters
15
+ model: Optional[str] = None,
16
+ temperature: float = 0.1,
17
+ api_key: Optional[str] = None,
18
+ max_tokens: int = 2000,
19
+ top_p: float = 0.1,
20
+ top_k: int = 1,
21
+ enable_vision: bool = False,
22
+ vision_details: Optional[str] = "auto",
23
+ http_client_proxies: Optional[dict] = None,
24
+ # OpenAI-specific parameters
25
+ openai_base_url: Optional[str] = None,
26
+ models: Optional[List[str]] = None,
27
+ route: Optional[str] = "fallback",
28
+ openrouter_base_url: Optional[str] = None,
29
+ site_url: Optional[str] = None,
30
+ app_name: Optional[str] = None,
31
+ store: bool = False,
32
+ # Response monitoring callback
33
+ response_callback: Optional[Callable[[Any, dict, dict], None]] = None,
34
+ ):
35
+ """
36
+ Initialize OpenAI configuration.
37
+
38
+ Args:
39
+ model: OpenAI model to use, defaults to None
40
+ temperature: Controls randomness, defaults to 0.1
41
+ api_key: OpenAI API key, defaults to None
42
+ max_tokens: Maximum tokens to generate, defaults to 2000
43
+ top_p: Nucleus sampling parameter, defaults to 0.1
44
+ top_k: Top-k sampling parameter, defaults to 1
45
+ enable_vision: Enable vision capabilities, defaults to False
46
+ vision_details: Vision detail level, defaults to "auto"
47
+ http_client_proxies: HTTP client proxy settings, defaults to None
48
+ openai_base_url: OpenAI API base URL, defaults to None
49
+ models: List of models for OpenRouter, defaults to None
50
+ route: OpenRouter route strategy, defaults to "fallback"
51
+ openrouter_base_url: OpenRouter base URL, defaults to None
52
+ site_url: Site URL for OpenRouter, defaults to None
53
+ app_name: Application name for OpenRouter, defaults to None
54
+ response_callback: Optional callback for monitoring LLM responses.
55
+ """
56
+ # Initialize base parameters
57
+ super().__init__(
58
+ model=model,
59
+ temperature=temperature,
60
+ api_key=api_key,
61
+ max_tokens=max_tokens,
62
+ top_p=top_p,
63
+ top_k=top_k,
64
+ enable_vision=enable_vision,
65
+ vision_details=vision_details,
66
+ http_client_proxies=http_client_proxies,
67
+ )
68
+
69
+ # OpenAI-specific parameters
70
+ self.openai_base_url = openai_base_url
71
+ self.models = models
72
+ self.route = route
73
+ self.openrouter_base_url = openrouter_base_url
74
+ self.site_url = site_url
75
+ self.app_name = app_name
76
+ self.store = store
77
+
78
+ # Response monitoring
79
+ self.response_callback = response_callback
@@ -0,0 +1,68 @@
1
+ from typing import Any, Callable, Optional
2
+
3
+ from powermem.integrations.llm.config.base import BaseLLMConfig
4
+
5
+
6
+ class QwenConfig(BaseLLMConfig):
7
+ """
8
+ Configuration class for Qwen-specific parameters.
9
+ Inherits from BaseLLMConfig and adds Qwen-specific settings.
10
+ """
11
+
12
+ def __init__(
13
+ self,
14
+ # Base parameters
15
+ model: Optional[str] = None,
16
+ temperature: float = 0.1,
17
+ api_key: Optional[str] = None,
18
+ max_tokens: int = 2000,
19
+ top_p: float = 0.1,
20
+ top_k: int = 1,
21
+ enable_vision: bool = False,
22
+ vision_details: Optional[str] = "auto",
23
+ http_client_proxies: Optional[dict] = None,
24
+ # Qwen-specific parameters
25
+ dashscope_base_url: Optional[str] = None,
26
+ enable_search: bool = False,
27
+ search_params: Optional[dict] = None,
28
+ # Response monitoring callback
29
+ response_callback: Optional[Callable[[Any, dict, dict], None]] = None,
30
+ ):
31
+ """
32
+ Initialize Qwen configuration.
33
+
34
+ Args:
35
+ model: Qwen model to use, defaults to None
36
+ temperature: Controls randomness, defaults to 0.1
37
+ api_key: DashScope API key, defaults to None
38
+ max_tokens: Maximum tokens to generate, defaults to 2000
39
+ top_p: Nucleus sampling parameter, defaults to 0.1
40
+ top_k: Top-k sampling parameter, defaults to 1
41
+ enable_vision: Enable vision capabilities, defaults to False
42
+ vision_details: Vision detail level, defaults to "auto"
43
+ http_client_proxies: HTTP client proxy settings, defaults to None
44
+ dashscope_base_url: DashScope API base URL, defaults to None
45
+ enable_search: Enable web search capability, defaults to False
46
+ search_params: Parameters for web search, defaults to None
47
+ response_callback: Optional callback for monitoring LLM responses.
48
+ """
49
+ # Initialize base parameters
50
+ super().__init__(
51
+ model=model,
52
+ temperature=temperature,
53
+ api_key=api_key,
54
+ max_tokens=max_tokens,
55
+ top_p=top_p,
56
+ top_k=top_k,
57
+ enable_vision=enable_vision,
58
+ vision_details=vision_details,
59
+ http_client_proxies=http_client_proxies,
60
+ )
61
+
62
+ # Qwen-specific parameters
63
+ self.dashscope_base_url = dashscope_base_url
64
+ self.enable_search = enable_search
65
+ self.search_params = search_params or {}
66
+
67
+ # Response monitoring
68
+ self.response_callback = response_callback