powermem 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (123) hide show
  1. powermem/__init__.py +103 -0
  2. powermem/agent/__init__.py +35 -0
  3. powermem/agent/abstract/__init__.py +22 -0
  4. powermem/agent/abstract/collaboration.py +259 -0
  5. powermem/agent/abstract/context.py +187 -0
  6. powermem/agent/abstract/manager.py +232 -0
  7. powermem/agent/abstract/permission.py +217 -0
  8. powermem/agent/abstract/privacy.py +267 -0
  9. powermem/agent/abstract/scope.py +199 -0
  10. powermem/agent/agent.py +791 -0
  11. powermem/agent/components/__init__.py +18 -0
  12. powermem/agent/components/collaboration_coordinator.py +645 -0
  13. powermem/agent/components/permission_controller.py +586 -0
  14. powermem/agent/components/privacy_protector.py +767 -0
  15. powermem/agent/components/scope_controller.py +685 -0
  16. powermem/agent/factories/__init__.py +16 -0
  17. powermem/agent/factories/agent_factory.py +266 -0
  18. powermem/agent/factories/config_factory.py +308 -0
  19. powermem/agent/factories/memory_factory.py +229 -0
  20. powermem/agent/implementations/__init__.py +16 -0
  21. powermem/agent/implementations/hybrid.py +728 -0
  22. powermem/agent/implementations/multi_agent.py +1040 -0
  23. powermem/agent/implementations/multi_user.py +1020 -0
  24. powermem/agent/types.py +53 -0
  25. powermem/agent/wrappers/__init__.py +14 -0
  26. powermem/agent/wrappers/agent_memory_wrapper.py +427 -0
  27. powermem/agent/wrappers/compatibility_wrapper.py +520 -0
  28. powermem/config_loader.py +318 -0
  29. powermem/configs.py +249 -0
  30. powermem/core/__init__.py +19 -0
  31. powermem/core/async_memory.py +1493 -0
  32. powermem/core/audit.py +258 -0
  33. powermem/core/base.py +165 -0
  34. powermem/core/memory.py +1567 -0
  35. powermem/core/setup.py +162 -0
  36. powermem/core/telemetry.py +215 -0
  37. powermem/integrations/__init__.py +17 -0
  38. powermem/integrations/embeddings/__init__.py +13 -0
  39. powermem/integrations/embeddings/aws_bedrock.py +100 -0
  40. powermem/integrations/embeddings/azure_openai.py +55 -0
  41. powermem/integrations/embeddings/base.py +31 -0
  42. powermem/integrations/embeddings/config/base.py +132 -0
  43. powermem/integrations/embeddings/configs.py +31 -0
  44. powermem/integrations/embeddings/factory.py +48 -0
  45. powermem/integrations/embeddings/gemini.py +39 -0
  46. powermem/integrations/embeddings/huggingface.py +41 -0
  47. powermem/integrations/embeddings/langchain.py +35 -0
  48. powermem/integrations/embeddings/lmstudio.py +29 -0
  49. powermem/integrations/embeddings/mock.py +11 -0
  50. powermem/integrations/embeddings/ollama.py +53 -0
  51. powermem/integrations/embeddings/openai.py +49 -0
  52. powermem/integrations/embeddings/qwen.py +102 -0
  53. powermem/integrations/embeddings/together.py +31 -0
  54. powermem/integrations/embeddings/vertexai.py +54 -0
  55. powermem/integrations/llm/__init__.py +18 -0
  56. powermem/integrations/llm/anthropic.py +87 -0
  57. powermem/integrations/llm/base.py +132 -0
  58. powermem/integrations/llm/config/anthropic.py +56 -0
  59. powermem/integrations/llm/config/azure.py +56 -0
  60. powermem/integrations/llm/config/base.py +62 -0
  61. powermem/integrations/llm/config/deepseek.py +56 -0
  62. powermem/integrations/llm/config/ollama.py +56 -0
  63. powermem/integrations/llm/config/openai.py +79 -0
  64. powermem/integrations/llm/config/qwen.py +68 -0
  65. powermem/integrations/llm/config/qwen_asr.py +46 -0
  66. powermem/integrations/llm/config/vllm.py +56 -0
  67. powermem/integrations/llm/configs.py +26 -0
  68. powermem/integrations/llm/deepseek.py +106 -0
  69. powermem/integrations/llm/factory.py +118 -0
  70. powermem/integrations/llm/gemini.py +201 -0
  71. powermem/integrations/llm/langchain.py +65 -0
  72. powermem/integrations/llm/ollama.py +106 -0
  73. powermem/integrations/llm/openai.py +166 -0
  74. powermem/integrations/llm/openai_structured.py +80 -0
  75. powermem/integrations/llm/qwen.py +207 -0
  76. powermem/integrations/llm/qwen_asr.py +171 -0
  77. powermem/integrations/llm/vllm.py +106 -0
  78. powermem/integrations/rerank/__init__.py +20 -0
  79. powermem/integrations/rerank/base.py +43 -0
  80. powermem/integrations/rerank/config/__init__.py +7 -0
  81. powermem/integrations/rerank/config/base.py +27 -0
  82. powermem/integrations/rerank/configs.py +23 -0
  83. powermem/integrations/rerank/factory.py +68 -0
  84. powermem/integrations/rerank/qwen.py +159 -0
  85. powermem/intelligence/__init__.py +17 -0
  86. powermem/intelligence/ebbinghaus_algorithm.py +354 -0
  87. powermem/intelligence/importance_evaluator.py +361 -0
  88. powermem/intelligence/intelligent_memory_manager.py +284 -0
  89. powermem/intelligence/manager.py +148 -0
  90. powermem/intelligence/plugin.py +229 -0
  91. powermem/prompts/__init__.py +29 -0
  92. powermem/prompts/graph/graph_prompts.py +217 -0
  93. powermem/prompts/graph/graph_tools_prompts.py +469 -0
  94. powermem/prompts/importance_evaluation.py +246 -0
  95. powermem/prompts/intelligent_memory_prompts.py +163 -0
  96. powermem/prompts/templates.py +193 -0
  97. powermem/storage/__init__.py +14 -0
  98. powermem/storage/adapter.py +896 -0
  99. powermem/storage/base.py +109 -0
  100. powermem/storage/config/base.py +13 -0
  101. powermem/storage/config/oceanbase.py +58 -0
  102. powermem/storage/config/pgvector.py +52 -0
  103. powermem/storage/config/sqlite.py +27 -0
  104. powermem/storage/configs.py +159 -0
  105. powermem/storage/factory.py +59 -0
  106. powermem/storage/migration_manager.py +438 -0
  107. powermem/storage/oceanbase/__init__.py +8 -0
  108. powermem/storage/oceanbase/constants.py +162 -0
  109. powermem/storage/oceanbase/oceanbase.py +1384 -0
  110. powermem/storage/oceanbase/oceanbase_graph.py +1441 -0
  111. powermem/storage/pgvector/__init__.py +7 -0
  112. powermem/storage/pgvector/pgvector.py +420 -0
  113. powermem/storage/sqlite/__init__.py +0 -0
  114. powermem/storage/sqlite/sqlite.py +218 -0
  115. powermem/storage/sqlite/sqlite_vector_store.py +311 -0
  116. powermem/utils/__init__.py +35 -0
  117. powermem/utils/utils.py +605 -0
  118. powermem/version.py +23 -0
  119. powermem-0.1.0.dist-info/METADATA +187 -0
  120. powermem-0.1.0.dist-info/RECORD +123 -0
  121. powermem-0.1.0.dist-info/WHEEL +5 -0
  122. powermem-0.1.0.dist-info/licenses/LICENSE +206 -0
  123. powermem-0.1.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,46 @@
1
+ from typing import Optional
2
+
3
+ from powermem.integrations.llm.config.base import BaseLLMConfig
4
+
5
+
6
+ class QwenASRConfig(BaseLLMConfig):
7
+ """
8
+ Configuration class for Qwen ASR-specific parameters.
9
+ Inherits from BaseLLMConfig and adds ASR-specific settings.
10
+ """
11
+
12
+ def __init__(
13
+ self,
14
+ # Base parameters (only model and api_key are used for ASR)
15
+ model: Optional[str] = None,
16
+ api_key: Optional[str] = None,
17
+ # ASR-specific parameters
18
+ dashscope_base_url: Optional[str] = None,
19
+ asr_options: Optional[dict] = None,
20
+ result_format: str = "message",
21
+ ):
22
+ """
23
+ Initialize Qwen ASR configuration.
24
+
25
+ Args:
26
+ model: Qwen ASR model to use, defaults to "qwen3-asr-flash"
27
+ api_key: DashScope API key, defaults to None
28
+ dashscope_base_url: DashScope API base URL, defaults to None
29
+ asr_options: ASR-specific options (e.g., language, enable_itn), defaults to {"enable_itn": True}
30
+ result_format: Result format for ASR response, defaults to "message"
31
+ """
32
+ # Initialize base parameters with defaults (ASR doesn't use these parameters)
33
+ super().__init__(
34
+ model=model,
35
+ api_key=api_key,
36
+ )
37
+
38
+ # ASR-specific parameters
39
+ self.dashscope_base_url = dashscope_base_url
40
+ # Default asr_options with enable_itn enabled
41
+ if asr_options is None:
42
+ self.asr_options = {"enable_itn": True}
43
+ else:
44
+ self.asr_options = asr_options
45
+ self.result_format = result_format
46
+
@@ -0,0 +1,56 @@
1
+ from typing import Optional
2
+
3
+ from powermem.integrations.llm.config.base import BaseLLMConfig
4
+
5
+
6
+ class VllmConfig(BaseLLMConfig):
7
+ """
8
+ Configuration class for vLLM-specific parameters.
9
+ Inherits from BaseLLMConfig and adds vLLM-specific settings.
10
+ """
11
+
12
+ def __init__(
13
+ self,
14
+ # Base parameters
15
+ model: Optional[str] = None,
16
+ temperature: float = 0.1,
17
+ api_key: Optional[str] = None,
18
+ max_tokens: int = 2000,
19
+ top_p: float = 0.1,
20
+ top_k: int = 1,
21
+ enable_vision: bool = False,
22
+ vision_details: Optional[str] = "auto",
23
+ http_client_proxies: Optional[dict] = None,
24
+ # vLLM-specific parameters
25
+ vllm_base_url: Optional[str] = None,
26
+ ):
27
+ """
28
+ Initialize vLLM configuration.
29
+
30
+ Args:
31
+ model: vLLM model to use, defaults to None
32
+ temperature: Controls randomness, defaults to 0.1
33
+ api_key: vLLM API key, defaults to None
34
+ max_tokens: Maximum tokens to generate, defaults to 2000
35
+ top_p: Nucleus sampling parameter, defaults to 0.1
36
+ top_k: Top-k sampling parameter, defaults to 1
37
+ enable_vision: Enable vision capabilities, defaults to False
38
+ vision_details: Vision detail level, defaults to "auto"
39
+ http_client_proxies: HTTP client proxy settings, defaults to None
40
+ vllm_base_url: vLLM base URL, defaults to None
41
+ """
42
+ # Initialize base parameters
43
+ super().__init__(
44
+ model=model,
45
+ temperature=temperature,
46
+ api_key=api_key,
47
+ max_tokens=max_tokens,
48
+ top_p=top_p,
49
+ top_k=top_k,
50
+ enable_vision=enable_vision,
51
+ vision_details=vision_details,
52
+ http_client_proxies=http_client_proxies,
53
+ )
54
+
55
+ # vLLM-specific parameters
56
+ self.vllm_base_url = vllm_base_url or "http://localhost:8000/v1"
@@ -0,0 +1,26 @@
1
+ from typing import Optional
2
+
3
+ from pydantic import BaseModel, Field, field_validator
4
+
5
+
6
+ class LLMConfig(BaseModel):
7
+ provider: str = Field(description="Provider of the LLM (e.g., 'ollama', 'openai')", default="openai")
8
+ config: Optional[dict] = Field(description="Configuration for the specific LLM", default={})
9
+
10
+ @field_validator("config")
11
+ def validate_config(cls, v, info):
12
+ provider = info.data.get("provider")
13
+ if provider in (
14
+ "openai",
15
+ "ollama",
16
+ "anthropic",
17
+ "openai_structured",
18
+ "gemini",
19
+ "deepseek",
20
+ "vllm",
21
+ "langchain",
22
+ "qwen",
23
+ ):
24
+ return v
25
+ else:
26
+ raise ValueError(f"Unsupported LLM provider: {provider}")
@@ -0,0 +1,106 @@
1
+ import json
2
+ import os
3
+ from typing import Dict, List, Optional, Union
4
+
5
+ from openai import OpenAI
6
+ from powermem.integrations.llm import LLMBase
7
+ from powermem.integrations.llm.config.base import BaseLLMConfig
8
+ from powermem.integrations.llm.config.deepseek import DeepSeekConfig
9
+ from powermem.utils.utils import extract_json
10
+
11
+
12
+ class DeepSeekLLM(LLMBase):
13
+ def __init__(self, config: Optional[Union[BaseLLMConfig, DeepSeekConfig, Dict]] = None):
14
+ # Convert to DeepSeekConfig if needed
15
+ if config is None:
16
+ config = DeepSeekConfig()
17
+ elif isinstance(config, dict):
18
+ config = DeepSeekConfig(**config)
19
+ elif isinstance(config, BaseLLMConfig) and not isinstance(config, DeepSeekConfig):
20
+ # Convert BaseLLMConfig to DeepSeekConfig
21
+ config = DeepSeekConfig(
22
+ model=config.model,
23
+ temperature=config.temperature,
24
+ api_key=config.api_key,
25
+ max_tokens=config.max_tokens,
26
+ top_p=config.top_p,
27
+ top_k=config.top_k,
28
+ enable_vision=config.enable_vision,
29
+ vision_details=config.vision_details,
30
+ http_client_proxies=config.http_client,
31
+ )
32
+
33
+ super().__init__(config)
34
+
35
+ if not self.config.model:
36
+ self.config.model = "deepseek-chat"
37
+
38
+ api_key = self.config.api_key or os.getenv("DEEPSEEK_API_KEY")
39
+ base_url = self.config.deepseek_base_url or os.getenv("DEEPSEEK_API_BASE") or "https://api.deepseek.com"
40
+ self.client = OpenAI(api_key=api_key, base_url=base_url)
41
+
42
+ def _parse_response(self, response, tools):
43
+ """
44
+ Process the response based on whether tools are used or not.
45
+
46
+ Args:
47
+ response: The raw response from API.
48
+ tools: The list of tools provided in the request.
49
+
50
+ Returns:
51
+ str or dict: The processed response.
52
+ """
53
+ if tools:
54
+ processed_response = {
55
+ "content": response.choices[0].message.content,
56
+ "tool_calls": [],
57
+ }
58
+
59
+ if response.choices[0].message.tool_calls:
60
+ for tool_call in response.choices[0].message.tool_calls:
61
+ processed_response["tool_calls"].append(
62
+ {
63
+ "name": tool_call.function.name,
64
+ "arguments": json.loads(extract_json(tool_call.function.arguments)),
65
+ }
66
+ )
67
+
68
+ return processed_response
69
+ else:
70
+ return response.choices[0].message.content
71
+
72
+ def generate_response(
73
+ self,
74
+ messages: List[Dict[str, str]],
75
+ response_format=None,
76
+ tools: Optional[List[Dict]] = None,
77
+ tool_choice: str = "auto",
78
+ **kwargs,
79
+ ):
80
+ """
81
+ Generate a response based on the given messages using DeepSeek.
82
+
83
+ Args:
84
+ messages (list): List of message dicts containing 'role' and 'content'.
85
+ response_format (str or object, optional): Format of the response. Defaults to "text".
86
+ tools (list, optional): List of tools that the model can call. Defaults to None.
87
+ tool_choice (str, optional): Tool choice method. Defaults to "auto".
88
+ **kwargs: Additional DeepSeek-specific parameters.
89
+
90
+ Returns:
91
+ str: The generated response.
92
+ """
93
+ params = self._get_supported_params(messages=messages, **kwargs)
94
+ params.update(
95
+ {
96
+ "model": self.config.model,
97
+ "messages": messages,
98
+ }
99
+ )
100
+
101
+ if tools:
102
+ params["tools"] = tools
103
+ params["tool_choice"] = tool_choice
104
+
105
+ response = self.client.chat.completions.create(**params)
106
+ return self._parse_response(response, tools)
@@ -0,0 +1,118 @@
1
+ import importlib
2
+ from typing import Dict, Optional, Union
3
+
4
+ from powermem.integrations.llm.config.anthropic import AnthropicConfig
5
+ from powermem.integrations.llm.config.base import BaseLLMConfig
6
+ from powermem.integrations.llm.config.deepseek import DeepSeekConfig
7
+ from powermem.integrations.llm.config.ollama import OllamaConfig
8
+ from powermem.integrations.llm.config.openai import OpenAIConfig
9
+ from powermem.integrations.llm.config.qwen import QwenConfig
10
+ from powermem.integrations.llm.config.qwen_asr import QwenASRConfig
11
+ from powermem.integrations.llm.config.vllm import VllmConfig
12
+
13
+
14
+ def load_class(class_type):
15
+ module_path, class_name = class_type.rsplit(".", 1)
16
+ module = importlib.import_module(module_path)
17
+ return getattr(module, class_name)
18
+
19
+
20
+ class LLMFactory:
21
+ """
22
+ Factory for creating LLM instances with appropriate configurations.
23
+ Supports both old-style BaseLLMConfig and new provider-specific configs.
24
+ """
25
+
26
+ # Provider mappings with their config classes
27
+ provider_to_class = {
28
+ "ollama": ("powermem.integrations.llm.ollama.OllamaLLM", OllamaConfig),
29
+ "openai": ("powermem.integrations.llm.openai.OpenAILLM", OpenAIConfig),
30
+ "openai_structured": ("powermem.integrations.llm.openai_structured.OpenAIStructuredLLM", OpenAIConfig),
31
+ "anthropic": ("powermem.integrations.llm.anthropic.AnthropicLLM", AnthropicConfig),
32
+ "gemini": ("powermem.integrations.llm.gemini.GeminiLLM", BaseLLMConfig),
33
+ "deepseek": ("powermem.integrations.llm.deepseek.DeepSeekLLM", DeepSeekConfig),
34
+ "vllm": ("powermem.integrations.llm.vllm.VllmLLM", VllmConfig),
35
+ "langchain": ("powermem.integrations.llm.langchain.LangchainLLM", BaseLLMConfig),
36
+ "qwen": ("powermem.integrations.llm.qwen.QwenLLM", QwenConfig),
37
+ "qwen_asr": ("powermem.integrations.llm.qwen_asr.QwenASR", QwenASRConfig),
38
+ }
39
+
40
+ @classmethod
41
+ def create(cls, provider_name: str, config: Optional[Union[BaseLLMConfig, Dict]] = None, **kwargs):
42
+ """
43
+ Create an LLM instance with the appropriate configuration.
44
+
45
+ Args:
46
+ provider_name (str): The provider name (e.g., 'openai', 'anthropic')
47
+ config: Configuration object or dict. If None, will create default config
48
+ **kwargs: Additional configuration parameters
49
+
50
+ Returns:
51
+ Configured LLM instance
52
+
53
+ Raises:
54
+ ValueError: If provider is not supported
55
+ """
56
+ if provider_name not in cls.provider_to_class:
57
+ raise ValueError(f"Unsupported Llm provider: {provider_name}")
58
+
59
+ class_type, config_class = cls.provider_to_class[provider_name]
60
+ llm_class = load_class(class_type)
61
+
62
+ # Handle configuration
63
+ if config is None:
64
+ # Create default config with kwargs
65
+ config = config_class(**kwargs)
66
+ elif isinstance(config, dict):
67
+ # Merge dict config with kwargs
68
+ config.update(kwargs)
69
+ config = config_class(**config)
70
+ elif isinstance(config, BaseLLMConfig):
71
+ # Convert base config to provider-specific config if needed
72
+ if config_class != BaseLLMConfig:
73
+ # Convert to provider-specific config
74
+ config_dict = {
75
+ "model": config.model,
76
+ "temperature": config.temperature,
77
+ "api_key": config.api_key,
78
+ "max_tokens": config.max_tokens,
79
+ "top_p": config.top_p,
80
+ "top_k": config.top_k,
81
+ "enable_vision": config.enable_vision,
82
+ "vision_details": config.vision_details,
83
+ "http_client_proxies": config.http_client,
84
+ }
85
+ config_dict.update(kwargs)
86
+ config = config_class(**config_dict)
87
+ else:
88
+ # Use base config as-is
89
+ pass
90
+ else:
91
+ # Assume it's already the correct config type
92
+ pass
93
+
94
+ return llm_class(config)
95
+
96
+ @classmethod
97
+ def register_provider(cls, name: str, class_path: str, config_class=None):
98
+ """
99
+ Register a new provider.
100
+
101
+ Args:
102
+ name (str): Provider name
103
+ class_path (str): Full path to LLM class
104
+ config_class: Configuration class for the provider (defaults to BaseLLMConfig)
105
+ """
106
+ if config_class is None:
107
+ config_class = BaseLLMConfig
108
+ cls.provider_to_class[name] = (class_path, config_class)
109
+
110
+ @classmethod
111
+ def get_supported_providers(cls) -> list:
112
+ """
113
+ Get list of supported providers.
114
+
115
+ Returns:
116
+ list: List of supported provider names
117
+ """
118
+ return list(cls.provider_to_class.keys())
@@ -0,0 +1,201 @@
1
+ import os
2
+ from typing import Dict, List, Optional
3
+
4
+ from powermem.integrations.llm import LLMBase
5
+ from powermem.integrations.llm.config.base import BaseLLMConfig
6
+
7
+ try:
8
+ from google import genai
9
+ from google.genai import types
10
+ except ImportError:
11
+ raise ImportError("The 'google-genai' library is required. Please install it using 'pip install google-genai'.")
12
+
13
+
14
+ class GeminiLLM(LLMBase):
15
+ def __init__(self, config: Optional[BaseLLMConfig] = None):
16
+ super().__init__(config)
17
+
18
+ if not self.config.model:
19
+ self.config.model = "gemini-2.0-flash"
20
+
21
+ api_key = self.config.api_key or os.getenv("GOOGLE_API_KEY")
22
+ self.client = genai.Client(api_key=api_key)
23
+
24
+ def _parse_response(self, response, tools):
25
+ """
26
+ Process the response based on whether tools are used or not.
27
+
28
+ Args:
29
+ response: The raw response from API.
30
+ tools: The list of tools provided in the request.
31
+
32
+ Returns:
33
+ str or dict: The processed response.
34
+ """
35
+ if tools:
36
+ processed_response = {
37
+ "content": None,
38
+ "tool_calls": [],
39
+ }
40
+
41
+ # Extract content from the first candidate
42
+ if response.candidates and response.candidates[0].content.parts:
43
+ for part in response.candidates[0].content.parts:
44
+ if hasattr(part, "text") and part.text:
45
+ processed_response["content"] = part.text
46
+ break
47
+
48
+ # Extract function calls
49
+ if response.candidates and response.candidates[0].content.parts:
50
+ for part in response.candidates[0].content.parts:
51
+ if hasattr(part, "function_call") and part.function_call:
52
+ fn = part.function_call
53
+ processed_response["tool_calls"].append(
54
+ {
55
+ "name": fn.name,
56
+ "arguments": dict(fn.args) if fn.args else {},
57
+ }
58
+ )
59
+
60
+ return processed_response
61
+ else:
62
+ if response.candidates and response.candidates[0].content.parts:
63
+ for part in response.candidates[0].content.parts:
64
+ if hasattr(part, "text") and part.text:
65
+ return part.text
66
+ return ""
67
+
68
+ def _reformat_messages(self, messages: List[Dict[str, str]]):
69
+ """
70
+ Reformat messages for Gemini.
71
+
72
+ Args:
73
+ messages: The list of messages provided in the request.
74
+
75
+ Returns:
76
+ tuple: (system_instruction, contents_list)
77
+ """
78
+ system_instruction = None
79
+ contents = []
80
+
81
+ for message in messages:
82
+ if message["role"] == "system":
83
+ system_instruction = message["content"]
84
+ else:
85
+ content = types.Content(
86
+ parts=[types.Part(text=message["content"])],
87
+ role=message["role"],
88
+ )
89
+ contents.append(content)
90
+
91
+ return system_instruction, contents
92
+
93
+ def _reformat_tools(self, tools: Optional[List[Dict]]):
94
+ """
95
+ Reformat tools for Gemini.
96
+
97
+ Args:
98
+ tools: The list of tools provided in the request.
99
+
100
+ Returns:
101
+ list: The list of tools in the required format.
102
+ """
103
+
104
+ def remove_additional_properties(data):
105
+ """Recursively removes 'additionalProperties' from nested dictionaries."""
106
+ if isinstance(data, dict):
107
+ filtered_dict = {
108
+ key: remove_additional_properties(value)
109
+ for key, value in data.items()
110
+ if not (key == "additionalProperties")
111
+ }
112
+ return filtered_dict
113
+ else:
114
+ return data
115
+
116
+ if tools:
117
+ function_declarations = []
118
+ for tool in tools:
119
+ func = tool["function"].copy()
120
+ cleaned_func = remove_additional_properties(func)
121
+
122
+ function_declaration = types.FunctionDeclaration(
123
+ name=cleaned_func["name"],
124
+ description=cleaned_func.get("description", ""),
125
+ parameters=cleaned_func.get("parameters", {}),
126
+ )
127
+ function_declarations.append(function_declaration)
128
+
129
+ tool_obj = types.Tool(function_declarations=function_declarations)
130
+ return [tool_obj]
131
+ else:
132
+ return None
133
+
134
+ def generate_response(
135
+ self,
136
+ messages: List[Dict[str, str]],
137
+ response_format=None,
138
+ tools: Optional[List[Dict]] = None,
139
+ tool_choice: str = "auto",
140
+ ):
141
+ """
142
+ Generate a response based on the given messages using Gemini.
143
+
144
+ Args:
145
+ messages (list): List of message dicts containing 'role' and 'content'.
146
+ response_format (str or object, optional): Format for the response. Defaults to "text".
147
+ tools (list, optional): List of tools that the model can call. Defaults to None.
148
+ tool_choice (str, optional): Tool choice method. Defaults to "auto".
149
+
150
+ Returns:
151
+ str: The generated response.
152
+ """
153
+
154
+ # Extract system instruction and reformat messages
155
+ system_instruction, contents = self._reformat_messages(messages)
156
+
157
+ # Prepare generation config
158
+ config_params = {
159
+ "temperature": self.config.temperature,
160
+ "max_output_tokens": self.config.max_tokens,
161
+ "top_p": self.config.top_p,
162
+ }
163
+
164
+ # Add system instruction to config if present
165
+ if system_instruction:
166
+ config_params["system_instruction"] = system_instruction
167
+
168
+ if response_format is not None and response_format["type"] == "json_object":
169
+ config_params["response_mime_type"] = "application/json"
170
+ if "schema" in response_format:
171
+ config_params["response_schema"] = response_format["schema"]
172
+
173
+ if tools:
174
+ formatted_tools = self._reformat_tools(tools)
175
+ config_params["tools"] = formatted_tools
176
+
177
+ if tool_choice:
178
+ if tool_choice == "auto":
179
+ mode = types.FunctionCallingConfigMode.AUTO
180
+ elif tool_choice == "any":
181
+ mode = types.FunctionCallingConfigMode.ANY
182
+ else:
183
+ mode = types.FunctionCallingConfigMode.NONE
184
+
185
+ tool_config = types.ToolConfig(
186
+ function_calling_config=types.FunctionCallingConfig(
187
+ mode=mode,
188
+ allowed_function_names=(
189
+ [tool["function"]["name"] for tool in tools] if tool_choice == "any" else None
190
+ ),
191
+ )
192
+ )
193
+ config_params["tool_config"] = tool_config
194
+
195
+ generation_config = types.GenerateContentConfig(**config_params)
196
+
197
+ response = self.client.models.generate_content(
198
+ model=self.config.model, contents=contents, config=generation_config
199
+ )
200
+
201
+ return self._parse_response(response, tools)
@@ -0,0 +1,65 @@
1
+ from typing import Dict, List, Optional
2
+
3
+ from powermem.integrations.llm import LLMBase
4
+ from powermem.integrations.llm.config.base import BaseLLMConfig
5
+
6
+ try:
7
+ from langchain.chat_models.base import BaseChatModel
8
+ except ImportError:
9
+ raise ImportError("langchain is not installed. Please install it using `pip install langchain`")
10
+
11
+
12
+ class LangchainLLM(LLMBase):
13
+ def __init__(self, config: Optional[BaseLLMConfig] = None):
14
+ super().__init__(config)
15
+
16
+ if self.config.model is None:
17
+ raise ValueError("`model` parameter is required")
18
+
19
+ if not isinstance(self.config.model, BaseChatModel):
20
+ raise ValueError("`model` must be an instance of BaseChatModel")
21
+
22
+ self.langchain_model = self.config.model
23
+
24
+ def generate_response(
25
+ self,
26
+ messages: List[Dict[str, str]],
27
+ response_format=None,
28
+ tools: Optional[List[Dict]] = None,
29
+ tool_choice: str = "auto",
30
+ ):
31
+ """
32
+ Generate a response based on the given messages using langchain_community.
33
+
34
+ Args:
35
+ messages (list): List of message dicts containing 'role' and 'content'.
36
+ response_format (str or object, optional): Format of the response. Not used in Langchain.
37
+ tools (list, optional): List of tools that the model can call. Not used in Langchain.
38
+ tool_choice (str, optional): Tool choice method. Not used in Langchain.
39
+
40
+ Returns:
41
+ str: The generated response.
42
+ """
43
+ try:
44
+ # Convert the messages to LangChain's tuple format
45
+ langchain_messages = []
46
+ for message in messages:
47
+ role = message["role"]
48
+ content = message["content"]
49
+
50
+ if role == "system":
51
+ langchain_messages.append(("system", content))
52
+ elif role == "user":
53
+ langchain_messages.append(("human", content))
54
+ elif role == "assistant":
55
+ langchain_messages.append(("ai", content))
56
+
57
+ if not langchain_messages:
58
+ raise ValueError("No valid messages found in the messages list")
59
+
60
+ ai_message = self.langchain_model.invoke(langchain_messages)
61
+
62
+ return ai_message.content
63
+
64
+ except Exception as e:
65
+ raise Exception(f"Error generating response using langchain model: {str(e)}")