dm-aioaiagent 0.3.3__py3-none-any.whl → 0.3.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
dm_aioaiagent/__init__.py CHANGED
@@ -2,5 +2,5 @@ from dotenv import load_dotenv
2
2
  load_dotenv()
3
3
  from .ai_agent import DMAIAgent
4
4
  from .async_ai_agent import DMAioAIAgent
5
- from .image_message_content_builder import ImageMessageContentBuilder
5
+ from .openai_image_message_content import OpenAIImageMessageContent
6
6
  from .types import Message
dm_aioaiagent/ai_agent.py CHANGED
@@ -1,7 +1,7 @@
1
1
  import os
2
+ from pydantic import SecretStr
2
3
  from itertools import dropwhile
3
4
  from threading import Thread
4
- from langchain_openai import ChatOpenAI
5
5
  from langchain_core.tools import BaseTool
6
6
  from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
7
7
  from langchain_core.messages import SystemMessage, HumanMessage, AIMessage, ToolMessage
@@ -14,11 +14,11 @@ __all__ = ["DMAIAgent"]
14
14
 
15
15
 
16
16
  class DMAIAgent:
17
- agent_name = "AIAgent"
18
- _allowed_roles = ("user", "ai")
19
- _response_if_request_fail = "I can't provide a response right now. Please try again later."
20
- _response_if_invalid_image = "The image is unavailable or the link is incorrect."
17
+ AGENT_NAME = "AIAgent"
21
18
  MAX_MEMORY_MESSAGES = 20 # Only INT greater than 0
19
+ RESPONSE_IF_REQUEST_FAIL = "I can't provide a response right now. Please try again later."
20
+ RESPONSE_IF_INVALID_IMAGE = "The image is unavailable or the link is incorrect."
21
+ _ALLOWED_ROLES = ("user", "ai")
22
22
 
23
23
  def __init__(
24
24
  self,
@@ -30,44 +30,30 @@ class DMAIAgent:
30
30
  agent_name: str = None,
31
31
  input_output_logging: bool = True,
32
32
  is_memory_enabled: bool = True,
33
+ save_tools_responses_in_memory: bool = True,
33
34
  max_memory_messages: int = None,
35
+ llm_provider_api_key: str = "",
34
36
  response_if_request_fail: str = None,
35
37
  response_if_invalid_image: str = None
36
38
  ):
37
- if not os.getenv("OPENAI_API_KEY"):
38
- raise EnvironmentError("'OPENAI_API_KEY' environment variable is not set!")
39
+ self._logger = DMLogger(agent_name or self.AGENT_NAME)
40
+ self._input_output_logging = bool(input_output_logging)
39
41
 
40
- self._logger = DMLogger(agent_name or self.agent_name)
42
+ self._system_message = str(system_message)
43
+ self._tools = tools or []
41
44
  self._is_tools_exists = bool(tools)
42
- self._input_output_logging = bool(input_output_logging)
45
+ self._model = str(model)
46
+ self._temperature = int(temperature)
47
+ self._llm_provider_api_key = str(llm_provider_api_key)
48
+
43
49
  self._is_memory_enabled = bool(is_memory_enabled)
50
+ self._save_tools_responses_in_memory = bool(save_tools_responses_in_memory)
44
51
  self._max_memory_messages = self._validate_max_memory_messages(max_memory_messages)
45
- self._response_if_request_fail = str(response_if_request_fail or self._response_if_request_fail)
46
- self._response_if_invalid_image = str(response_if_invalid_image or self._response_if_invalid_image)
52
+ self._response_if_request_fail = str(response_if_request_fail or self.RESPONSE_IF_REQUEST_FAIL)
53
+ self._response_if_invalid_image = str(response_if_invalid_image or self.RESPONSE_IF_INVALID_IMAGE)
47
54
 
48
- prompt = ChatPromptTemplate.from_messages([SystemMessage(content=system_message),
49
- MessagesPlaceholder(variable_name="messages")])
50
- llm = ChatOpenAI(model=str(model), temperature=int(temperature))
51
- if self._is_tools_exists:
52
- self._tool_map = {t.name: t for t in tools}
53
- llm = llm.bind_tools(tools)
54
- self._agent = prompt | llm
55
- self._memory = {}
56
-
57
- workflow = StateGraph(State)
58
- workflow.add_node("Prepare messages", self._prepare_messages_node)
59
- workflow.add_node("Invoke LLM", self._invoke_llm_node)
60
- workflow.add_node("Execute tool", self._execute_tool_node)
61
- workflow.add_node("Exit", self._exit_node)
62
-
63
- workflow.add_edge("Prepare messages", "Invoke LLM")
64
- workflow.add_conditional_edges(source="Invoke LLM",
65
- path=self._messages_router,
66
- path_map={"execute_tool": "Execute tool", "exit": "Exit"})
67
- workflow.add_edge("Execute tool", "Invoke LLM")
68
- workflow.set_entry_point("Prepare messages")
69
- workflow.set_finish_point("Exit")
70
- self._graph = workflow.compile()
55
+ self._init_agent()
56
+ self._init_graph()
71
57
 
72
58
  def run(self, input_messages: InputMessagesType, memory_id: str = None) -> ResponseType:
73
59
  state = self._graph.invoke({"input_messages": input_messages, "memory_id": memory_id})
@@ -96,7 +82,7 @@ class DMAIAgent:
96
82
  if isinstance(item, dict):
97
83
  role = item.get("role")
98
84
  content = item.get("content")
99
- if not role or role not in self._allowed_roles or not content:
85
+ if not role or role not in self._ALLOWED_ROLES or not content:
100
86
  continue
101
87
  if role == "ai":
102
88
  MessageClass = AIMessage
@@ -168,8 +154,15 @@ class DMAIAgent:
168
154
  if self._is_memory_enabled:
169
155
  memory_id = self._validate_memory_id(state.memory_id)
170
156
  messages_to_memory = state.messages[-self._max_memory_messages:]
171
- # drop ToolsMessages from start of list
172
- self._memory[memory_id] = list(dropwhile(lambda x: isinstance(x, ToolMessage), messages_to_memory))
157
+ if self._save_tools_responses_in_memory:
158
+ # drop ToolsMessages from start of list
159
+ self._memory[memory_id] = list(dropwhile(lambda x: isinstance(x, ToolMessage), messages_to_memory))
160
+ else:
161
+ self._memory[memory_id] = []
162
+ for mes in messages_to_memory:
163
+ if isinstance(mes, ToolMessage) or (isinstance(mes, AIMessage) and mes.tool_calls):
164
+ continue
165
+ self._memory[memory_id].append(mes)
173
166
  state.response = answer
174
167
  else:
175
168
  state.response = state.messages[len(state.input_messages):]
@@ -182,6 +175,49 @@ class DMAIAgent:
182
175
  route = "exit"
183
176
  return route
184
177
 
178
+ def _init_agent(self) -> None:
179
+ if self._llm_provider_api_key:
180
+ self._llm_provider_api_key = SecretStr(self._llm_provider_api_key)
181
+
182
+ if self._model.startswith("gpt"):
183
+ from langchain_openai import ChatOpenAI
184
+
185
+ api_key = SecretStr(self._llm_provider_api_key or os.getenv("OPENAI_API_KEY"))
186
+ llm = ChatOpenAI(model_name=self._model, temperature=self._temperature, openai_api_key=api_key)
187
+ elif self._model.startswith("claude"):
188
+ from langchain_anthropic import ChatAnthropic
189
+
190
+ api_key = SecretStr(self._llm_provider_api_key or os.getenv("ANTHROPIC_API_KEY"))
191
+ llm = ChatAnthropic(model=self._model, temperature=self._temperature, anthropic_api_key=api_key)
192
+ else:
193
+ raise ValueError(f"{self.__class__.__name__} not support this model: '{self._model}'")
194
+
195
+ if self._is_tools_exists:
196
+ self._tool_map = {t.name: t for t in self._tools}
197
+ llm = llm.bind_tools(self._tools)
198
+
199
+ prompt = ChatPromptTemplate.from_messages([SystemMessage(content=self._system_message),
200
+ MessagesPlaceholder(variable_name="messages")])
201
+
202
+ self._agent = prompt | llm
203
+ self._memory = {}
204
+
205
+ def _init_graph(self) -> None:
206
+ workflow = StateGraph(State)
207
+ workflow.add_node("Prepare messages", self._prepare_messages_node)
208
+ workflow.add_node("Invoke LLM", self._invoke_llm_node)
209
+ workflow.add_node("Execute tool", self._execute_tool_node)
210
+ workflow.add_node("Exit", self._exit_node)
211
+
212
+ workflow.add_edge("Prepare messages", "Invoke LLM")
213
+ workflow.add_conditional_edges(source="Invoke LLM",
214
+ path=self._messages_router,
215
+ path_map={"execute_tool": "Execute tool", "exit": "Exit"})
216
+ workflow.add_edge("Execute tool", "Invoke LLM")
217
+ workflow.set_entry_point("Prepare messages")
218
+ workflow.set_finish_point("Exit")
219
+ self._graph = workflow.compile()
220
+
185
221
  @staticmethod
186
222
  def _validate_memory_id(memory_id: Union[str, None]) -> Union[str, int]:
187
223
  return str(memory_id) if memory_id else 0
@@ -1,4 +1,4 @@
1
- class ImageMessageContentBuilder(list):
1
+ class OpenAIImageMessageContent(list):
2
2
  def __init__(self, image_url: str, text: str = None):
3
3
  content = []
4
4
  if isinstance(text, str):
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: dm-aioaiagent
3
- Version: 0.3.3
3
+ Version: 0.3.5
4
4
  Summary: This is my custom aioaiagent client
5
5
  Home-page: https://pypi.org/project/dm-aioaiagent
6
6
  Author: dimka4621
@@ -141,7 +141,8 @@ if __name__ == "__main__":
141
141
  ### Image vision
142
142
 
143
143
  ```python
144
- from dm_aioaiagent import DMAIAgent, ImageMessageContentBuilder
144
+ from dm_aioaiagent import DMAIAgent, OpenAIImageMessageContent
145
+
145
146
 
146
147
  def main():
147
148
  # create an agent
@@ -149,8 +150,8 @@ def main():
149
150
 
150
151
  # create an image message content
151
152
  # NOTE: text argument is optional
152
- img_content = ImageMessageContentBuilder(image_url="https://your.domain/image",
153
- text="Hello, what is shown in the photo?")
153
+ img_content = OpenAIImageMessageContent(image_url="https://your.domain/image",
154
+ text="Hello, what is shown in the photo?")
154
155
 
155
156
  # define the conversation message
156
157
  messages = [
@@ -163,7 +164,7 @@ def main():
163
164
 
164
165
 
165
166
  if __name__ == "__main__":
166
- main()
167
+ main()
167
168
  ```
168
169
 
169
170
  ### Set custom logger
@@ -0,0 +1,9 @@
1
+ dm_aioaiagent/__init__.py,sha256=ffLO0jGxiY40VYptRjJXCh7t0XKAccGtBXW5FafbFgA,213
2
+ dm_aioaiagent/ai_agent.py,sha256=ZW85hzBpWVFgnnaOlMcpayFyxtH9mtIrc43ZVzdKBBY,10815
3
+ dm_aioaiagent/async_ai_agent.py,sha256=qg7LZRxC7MSzjjburEX09T5EF15ZXAWAd_9RB8vA0oE,2599
4
+ dm_aioaiagent/openai_image_message_content.py,sha256=EP_i0ERCz7c4KOM8UXp2-AI91ntGC7PGQBc5MMNspcs,434
5
+ dm_aioaiagent/types.py,sha256=H2_iICmWr6u9d1-6BtSxdt6qV6Jm8v7Zh8das5kV6I4,989
6
+ dm_aioaiagent-0.3.5.dist-info/METADATA,sha256=B9lKnewVEzH_XCegUY8LsbQqHBWSW7eu1icdstFbH7o,5031
7
+ dm_aioaiagent-0.3.5.dist-info/WHEEL,sha256=PZUExdf71Ui_so67QXpySuHtCi3-J3wvF4ORK6k_S8U,91
8
+ dm_aioaiagent-0.3.5.dist-info/top_level.txt,sha256=CbasLH0KI7zA77XwT6JDCnmRascxKNGvUVV9MgYjHAU,14
9
+ dm_aioaiagent-0.3.5.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (75.3.0)
2
+ Generator: setuptools (75.6.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5
 
@@ -1,9 +0,0 @@
1
- dm_aioaiagent/__init__.py,sha256=pq9gL6E1VN4Tkx9PD83AIW9e_-N5REqJeC0TE8bZua4,215
2
- dm_aioaiagent/ai_agent.py,sha256=TTMSL2B-fdv_q7ytO38kC5fd8uB1Vk71ev7tS5tJMwE,9284
3
- dm_aioaiagent/async_ai_agent.py,sha256=qg7LZRxC7MSzjjburEX09T5EF15ZXAWAd_9RB8vA0oE,2599
4
- dm_aioaiagent/image_message_content_builder.py,sha256=mAJnsWdnpYpaFAdcCsVwgWH480Q4WfVEpM-Y6KdjyEc,435
5
- dm_aioaiagent/types.py,sha256=H2_iICmWr6u9d1-6BtSxdt6qV6Jm8v7Zh8das5kV6I4,989
6
- dm_aioaiagent-0.3.3.dist-info/METADATA,sha256=i06u5vC73yDmx73r5lUCr0Okw1vtf1FwKJgjpuZiKY0,5032
7
- dm_aioaiagent-0.3.3.dist-info/WHEEL,sha256=P9jw-gEje8ByB7_hXoICnHtVCrEwMQh-630tKvQWehc,91
8
- dm_aioaiagent-0.3.3.dist-info/top_level.txt,sha256=CbasLH0KI7zA77XwT6JDCnmRascxKNGvUVV9MgYjHAU,14
9
- dm_aioaiagent-0.3.3.dist-info/RECORD,,