dm-aioaiagent 0.2.0__py3-none-any.whl → 0.3.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
dm_aioaiagent/__init__.py CHANGED
@@ -2,3 +2,4 @@ from dotenv import load_dotenv
2
2
  load_dotenv()
3
3
  from .ai_agent import DMAIAgent
4
4
  from .async_ai_agent import DMAioAIAgent
5
+ from .types import Message
dm_aioaiagent/ai_agent.py CHANGED
@@ -1,42 +1,22 @@
1
- import json
2
1
  import os
3
- from typing import Optional, Literal, Union
4
- from typing_extensions import TypedDict
5
- from pydantic import BaseModel, Field
2
+ from itertools import dropwhile
6
3
  from threading import Thread
7
4
  from langchain_openai import ChatOpenAI
8
5
  from langchain_core.tools import BaseTool
9
6
  from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
10
- from langchain_core.messages import BaseMessage, SystemMessage, HumanMessage, AIMessage, ToolMessage
7
+ from langchain_core.messages import SystemMessage, HumanMessage, AIMessage, ToolMessage
11
8
  from langgraph.graph import StateGraph
12
9
  from dm_logger import DMLogger
13
10
 
14
- __all__ = ["DMAIAgent"]
15
-
16
-
17
- class Message(TypedDict):
18
- role: Literal["user", "ai"]
19
- content: str
20
-
21
-
22
- class InnerState(BaseModel):
23
- messages: list[BaseMessage] = Field(default=[])
24
- context: list[Message] = Field(default=[])
25
-
26
-
27
- class InputState(BaseModel):
28
- messages: list[Message]
29
- inner_state: Optional[InnerState] = Field(default=InnerState())
30
-
11
+ from .types import *
31
12
 
32
- class OutputState(TypedDict):
33
- answer: str
34
- context: list[Message]
13
+ __all__ = ["DMAIAgent"]
35
14
 
36
15
 
37
16
  class DMAIAgent:
38
17
  agent_name = "AIAgent"
39
18
  _allowed_roles = ("user", "ai")
19
+ MAX_MEMORY_MESSAGES = 20 # Only INT greater than 0
40
20
 
41
21
  def __init__(
42
22
  self,
@@ -47,15 +27,17 @@ class DMAIAgent:
47
27
  temperature: int = 1,
48
28
  agent_name: str = None,
49
29
  input_output_logging: bool = True,
50
- return_context: bool = False
30
+ is_memory_enabled: bool = True,
31
+ max_memory_messages: int = None,
51
32
  ):
52
33
  if not os.getenv("OPENAI_API_KEY"):
53
- raise EnvironmentError("OPENAI_API_KEY environment variable is not set!")
34
+ raise EnvironmentError("'OPENAI_API_KEY' environment variable is not set!")
54
35
 
55
36
  self._logger = DMLogger(agent_name or self.agent_name)
56
- self._input_output_logging = input_output_logging
57
- self._return_context = return_context
58
37
  self._is_tools_exists = bool(tools)
38
+ self._input_output_logging = bool(input_output_logging)
39
+ self._is_memory_enabled = bool(is_memory_enabled)
40
+ self._max_memory_messages = self._validate_max_memory_messages(max_memory_messages)
59
41
 
60
42
  prompt = ChatPromptTemplate.from_messages([SystemMessage(content=system_message),
61
43
  MessagesPlaceholder(variable_name="messages")])
@@ -64,8 +46,9 @@ class DMAIAgent:
64
46
  self._tool_map = {t.name: t for t in tools}
65
47
  llm = llm.bind_tools(tools)
66
48
  self._agent = prompt | llm
49
+ self._memory = {}
67
50
 
68
- workflow = StateGraph(input=InputState, output=OutputState)
51
+ workflow = StateGraph(State)
69
52
  workflow.add_node("Prepare messages", self._prepare_messages_node)
70
53
  workflow.add_node("Invoke LLM", self._invoke_llm_node)
71
54
  workflow.add_node("Execute tool", self._execute_tool_node)
@@ -80,40 +63,49 @@ class DMAIAgent:
80
63
  workflow.set_finish_point("Exit")
81
64
  self._graph = workflow.compile()
82
65
 
83
- def run(self, messages: list[Message]) -> Union[str, OutputState]:
84
- state = self._graph.invoke({"messages": messages})
85
- if self._return_context:
86
- return state
87
- return state["answer"]
66
+ def run(self, input_messages: InputMessagesType, memory_id: str = None) -> ResponseType:
67
+ state = self._graph.invoke({"input_messages": input_messages, "memory_id": memory_id})
68
+ return state["response"]
69
+
70
+ def get_memory_messages(self, memory_id: str = None) -> list[BaseMessage]:
71
+ return self._memory.get(self._validate_memory_id(memory_id), [])
72
+
73
+ def clear_memory(self, memory_id: str = None) -> None:
74
+ self._memory[self._validate_memory_id(memory_id)] = []
75
+
76
+ def _prepare_messages_node(self, state: State) -> State:
77
+ state.memory_id = self._validate_memory_id(state.memory_id)
78
+ state.input_messages = state.input_messages or [{"role": "user", "content": ""}]
79
+ for item in state.input_messages:
80
+ if isinstance(item, dict):
81
+ role = item.get("role")
82
+ content = item.get("content")
83
+ if not role or role not in self._allowed_roles or not content:
84
+ continue
85
+ if role == "ai":
86
+ MessageClass = AIMessage
87
+ else:
88
+ MessageClass = HumanMessage
89
+ state.messages.append(MessageClass(content))
90
+ elif isinstance(item, BaseMessage):
91
+ state.messages.append(item)
88
92
 
89
- def _prepare_messages_node(self, state: InputState) -> InputState:
90
- state.messages = state.messages or [{"role": "user", "content": "Привіт"}]
91
- state.inner_state = InnerState()
92
93
  if self._input_output_logging:
93
- self._logger.debug(input_messages=state.messages)
94
-
95
- for item in state.messages:
96
- role = item.get("role")
97
- content = item.get("content")
98
- if not role or role not in self._allowed_roles or not content:
99
- continue
100
- if role == "ai":
101
- MessageClass = AIMessage
102
- else:
103
- MessageClass = HumanMessage
104
- state.inner_state.messages.append(MessageClass(content))
94
+ self._logger.debug(f"Query:\n{state.messages[-1].content}", memory_id=state.memory_id)
95
+ if self._is_memory_enabled:
96
+ state.messages = self.get_memory_messages(state.memory_id) + state.messages
105
97
  return state
106
98
 
107
- def _invoke_llm_node(self, state: InputState) -> InputState:
99
+ def _invoke_llm_node(self, state: State) -> State:
108
100
  self._logger.debug("Run node: Invoke LLM")
109
- ai_response = self._agent.invoke({"messages": state.inner_state.messages})
110
- state.inner_state.messages.append(ai_response)
101
+ ai_response = self._agent.invoke({"messages": state.messages})
102
+ state.messages.append(ai_response)
111
103
  return state
112
104
 
113
- def _execute_tool_node(self, state: InputState) -> InputState:
105
+ def _execute_tool_node(self, state: State) -> State:
114
106
  self._logger.debug("Run node: Execute tool")
115
107
  threads = []
116
- for tool_call in state.inner_state.messages[-1].tool_calls:
108
+ for tool_call in state.messages[-1].tool_calls:
117
109
  tool_id = tool_call["id"]
118
110
  tool_name = tool_call["name"]
119
111
  tool_args = tool_call["args"]
@@ -130,11 +122,8 @@ class DMAIAgent:
130
122
  tool_response = f"Tool not found!"
131
123
  self._logger.debug(f"Tool response:\n{tool_response}", tool_id=tool_id)
132
124
 
133
- state.inner_state.context.append({"tool_name": tool_name,
134
- "tool_args": json.dumps(tool_args, ensure_ascii=False),
135
- "tool_response": tool_response})
136
125
  tool_message = ToolMessage(content=str(tool_response), name=tool_name, tool_call_id=tool_id)
137
- state.inner_state.messages.append(tool_message)
126
+ state.messages.append(tool_message)
138
127
 
139
128
  threads.append(Thread(target=tool_callback, daemon=True))
140
129
 
@@ -145,19 +134,37 @@ class DMAIAgent:
145
134
 
146
135
  return state
147
136
 
148
- def _exit_node(self, state: InputState) -> OutputState:
149
- answer = state.inner_state.messages[-1].content if state.inner_state.messages else ""
137
+ def _exit_node(self, state: State) -> State:
138
+ answer = state.messages[-1].content
150
139
  if self._input_output_logging:
151
- self._logger.debug(f"Answer:\n{answer}")
152
- return OutputState(answer=answer, context=state.inner_state.context)
140
+ self._logger.debug(f"Answer:\n{answer}", memory_id=state.memory_id)
153
141
 
154
- def _messages_router(self, state: InputState) -> str:
155
- if self._is_tools_exists and state.inner_state.messages[-1].tool_calls:
142
+ if self._is_memory_enabled:
143
+ messages_to_memory = state.messages[-self._max_memory_messages:]
144
+ # drop ToolsMessages from start of list
145
+ self._memory[state.memory_id] = list(dropwhile(lambda x: isinstance(x, ToolMessage), messages_to_memory))
146
+ state.response = answer
147
+ else:
148
+ state.response = state.messages[len(state.input_messages):]
149
+ return state
150
+
151
+ def _messages_router(self, state: State) -> str:
152
+ if self._is_tools_exists and state.messages[-1].tool_calls:
156
153
  route = "execute_tool"
157
154
  else:
158
155
  route = "exit"
159
156
  return route
160
157
 
158
+ @staticmethod
159
+ def _validate_memory_id(memory_id: Union[str, None]) -> Union[str, int]:
160
+ return str(memory_id) if memory_id else 0
161
+
162
+ @classmethod
163
+ def _validate_max_memory_messages(cls, max_messages_in_memory: int) -> int:
164
+ if isinstance(max_messages_in_memory, int) and max_messages_in_memory > 0:
165
+ return max_messages_in_memory
166
+ return cls.MAX_MEMORY_MESSAGES
167
+
161
168
  def print_graph(self) -> None:
162
169
  self._graph.get_graph().print_ascii()
163
170
 
@@ -1,10 +1,9 @@
1
- import json
2
1
  import sys
3
2
  import asyncio
4
- from typing import Union
5
3
  from langchain_core.messages import ToolMessage
6
4
 
7
- from .ai_agent import DMAIAgent, InputState, OutputState, Message
5
+ from .ai_agent import DMAIAgent
6
+ from .types import *
8
7
 
9
8
  if sys.platform == "win32":
10
9
  asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
@@ -15,22 +14,20 @@ __all__ = ["DMAioAIAgent"]
15
14
  class DMAioAIAgent(DMAIAgent):
16
15
  agent_name = "AsyncAIAgent"
17
16
 
18
- async def run(self, messages: list[Message]) -> Union[str, OutputState]:
19
- state = await self._graph.ainvoke({"messages": messages})
20
- if self._return_context:
21
- return state
22
- return state["answer"]
17
+ async def run(self, input_messages: InputMessagesType, memory_id: str = None) -> ResponseType:
18
+ state = await self._graph.ainvoke({"input_messages": input_messages, "memory_id": memory_id})
19
+ return state["response"]
23
20
 
24
- async def _invoke_llm_node(self, state: InputState) -> InputState:
21
+ async def _invoke_llm_node(self, state: State) -> State:
25
22
  self._logger.debug("Run node: Invoke LLM")
26
- ai_response = await self._agent.ainvoke({"messages": state.inner_state.messages})
27
- state.inner_state.messages.append(ai_response)
23
+ ai_response = await self._agent.ainvoke({"messages": state.messages})
24
+ state.messages.append(ai_response)
28
25
  return state
29
26
 
30
- async def _execute_tool_node(self, state: InputState) -> InputState:
27
+ async def _execute_tool_node(self, state: State) -> State:
31
28
  self._logger.debug("Run node: Execute tool")
32
29
  tasks = []
33
- for tool_call in state.inner_state.messages[-1].tool_calls:
30
+ for tool_call in state.messages[-1].tool_calls:
34
31
  tool_id = tool_call["id"]
35
32
  tool_name = tool_call["name"]
36
33
  tool_args = tool_call["args"]
@@ -47,11 +44,8 @@ class DMAioAIAgent(DMAIAgent):
47
44
  tool_response = f"Tool '{tool_name}' not found!"
48
45
  self._logger.debug(f"Tool response:\n{tool_response}", tool_id=tool_id)
49
46
 
50
- state.inner_state.context.append({"tool_name": tool_name,
51
- "tool_args": json.dumps(tool_args, ensure_ascii=False),
52
- "tool_response": tool_response})
53
47
  tool_message = ToolMessage(content=str(tool_response), name=tool_name, tool_call_id=tool_id)
54
- state.inner_state.messages.append(tool_message)
48
+ state.messages.append(tool_message)
55
49
 
56
50
  tasks.append(asyncio.create_task(tool_callback()))
57
51
 
dm_aioaiagent/types.py ADDED
@@ -0,0 +1,20 @@
1
+ from typing import Optional, Literal, Union
2
+ from typing_extensions import TypedDict
3
+ from pydantic import BaseModel, Field
4
+ from langchain_core.messages import BaseMessage
5
+
6
+
7
+ class Message(TypedDict):
8
+ role: Literal["user", "ai"]
9
+ content: str
10
+
11
+
12
+ InputMessagesType = list[Union[Message, BaseMessage]]
13
+ ResponseType = Union[str, list[BaseMessage]]
14
+
15
+
16
+ class State(BaseModel):
17
+ input_messages: InputMessagesType
18
+ memory_id: Union[str, int, None] = Field(default=0)
19
+ messages: Optional[list[BaseMessage]] = Field(default_factory=list)
20
+ response: ResponseType = Field(default="")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: dm-aioaiagent
3
- Version: 0.2.0
3
+ Version: 0.3.1
4
4
  Summary: This is my custom aioaiagent client
5
5
  Home-page: https://pypi.org/project/dm-aioaiagent
6
6
  Author: dimka4621
@@ -12,14 +12,14 @@ Classifier: License :: OSI Approved :: MIT License
12
12
  Classifier: Operating System :: OS Independent
13
13
  Requires-Python: >=3.9
14
14
  Description-Content-Type: text/markdown
15
- Requires-Dist: dm-logger==0.5.2
16
- Requires-Dist: python-dotenv==1.0.1
17
- Requires-Dist: pydantic==2.9.2
18
- Requires-Dist: langchain==0.3.0
19
- Requires-Dist: langchain-core==0.3.5
20
- Requires-Dist: langgraph==0.2.23
21
- Requires-Dist: langchain-community==0.3.0
22
- Requires-Dist: langchain-openai==0.2.0
15
+ Requires-Dist: dm-logger~=0.5.2
16
+ Requires-Dist: python-dotenv>=1.0.0
17
+ Requires-Dist: pydantic<3.0.0,>=2.9.2
18
+ Requires-Dist: langchain~=0.3.0
19
+ Requires-Dist: langchain-core~=0.3.5
20
+ Requires-Dist: langgraph~=0.2.23
21
+ Requires-Dist: langchain-community~=0.3.0
22
+ Requires-Dist: langchain-openai~=0.2.0
23
23
 
24
24
  # DM-aioaiagent
25
25
 
@@ -34,6 +34,12 @@ Requires-Dist: langchain-openai==0.2.0
34
34
 
35
35
  Analogue to `DMAioAIAgent` is the synchronous client `DMAIAgent`.
36
36
 
37
+ ### Use agent *with* inner memory
38
+
39
+ By default, agent use inner memory to store the conversation history.
40
+
41
+ (You can set *max count messages in memory* by `max_memory_messages` init argument)
42
+
37
43
  ```python
38
44
  import asyncio
39
45
  from dm_aioaiagent import DMAioAIAgent
@@ -52,24 +58,40 @@ async def main():
52
58
  # create an agent
53
59
  ai_agent = DMAioAIAgent(system_message, tools, model=model_name)
54
60
  # if you don't want to see the input and output messages from agent
55
- # you can set input_output_logging=False
61
+ # you can set `input_output_logging=False` init argument
56
62
 
57
- # define the conversation messages
58
- messages = [
59
- {"role": "user", "content": "Hello!"},
60
- {"role": "ai", "content": "How can I help you?"},
61
- {"role": "user", "content": "I want to know the weather in Kyiv"},
63
+ # define the conversation message
64
+ input_messages = [
65
+ {"role": "user", "content": "Hello!"},
66
+ ]
67
+
68
+ # call an agent
69
+ # specify `memory_id` argument to store the conversation history by your custom id
70
+ answer = await ai_agent.run(input_messages)
71
+
72
+ # define the next conversation message
73
+ input_messages = [
74
+ {"role": "user", "content": "I want to know the weather in Kyiv"}
62
75
  ]
63
76
 
64
77
  # call an agent
65
- answer = await ai_agent.run(messages)
78
+ answer = await ai_agent.run(input_messages)
79
+
80
+ # get full conversation history
81
+ conversation_history = ai_agent.get_memory_messages()
82
+
83
+ # clear conversation history
84
+ ai_agent.clear_memory()
66
85
 
67
86
 
68
87
  if __name__ == "__main__":
69
88
  asyncio.run(main())
70
89
  ```
71
90
 
72
- ### Return context of the tools with answer
91
+ ### Use agent *without* inner memory
92
+
93
+ If you want to control the memory of the agent, you can disable it by setting `is_memory_enabled=False`
94
+
73
95
  ```python
74
96
  import asyncio
75
97
  from dm_aioaiagent import DMAioAIAgent
@@ -79,25 +101,36 @@ async def main():
79
101
  # define a system message
80
102
  system_message = "Your custom system message with role, backstory and goal"
81
103
 
82
- # define a list of tools, if you want to use them
104
+ # (optional) define a list of tools, if you want to use them
83
105
  tools = [...]
84
106
 
107
+ # define a openai model, default is "gpt-4o-mini"
108
+ model_name = "gpt-4o"
109
+
85
110
  # create an agent
86
- ai_agent = DMAioAIAgent(system_message, tools, return_context=True)
111
+ ai_agent = DMAioAIAgent(system_message, tools, model=model_name,
112
+ is_memory_enabled=False)
113
+ # if you don't want to see the input and output messages from agent
114
+ # you can set input_output_logging=False
87
115
 
88
- # define the conversation messages
116
+ # define the conversation message
89
117
  messages = [
90
- {"role": "user", "content": "Hello!"},
91
- {"role": "ai", "content": "How can I help you?"},
92
- {"role": "user", "content": "I want to know the weather in Kyiv"},
118
+ {"role": "user", "content": "Hello!"}
93
119
  ]
94
120
 
95
- # start the agent
96
- state = await ai_agent.run(messages)
121
+ # call an agent
122
+ new_messages = await ai_agent.run(messages)
123
+
124
+ # add new_messages to messages
125
+ messages.extend(new_messages)
97
126
 
98
- # if you define tools, you can see the context of the tools
99
- answer = state["answer"]
100
- context = state["context"]
127
+ # define the next conversation message
128
+ messages.append(
129
+ {"role": "user", "content": "I want to know the weather in Kyiv"}
130
+ )
131
+
132
+ # call an agent
133
+ new_messages = await ai_agent.run(messages)
101
134
 
102
135
 
103
136
  if __name__ == "__main__":
@@ -0,0 +1,8 @@
1
+ dm_aioaiagent/__init__.py,sha256=8B0XQR-XE2VieU7-LOHCjqDYv0gSYqkgAkj_eq6XAhI,145
2
+ dm_aioaiagent/ai_agent.py,sha256=YqM2KpLZVMqFVWBpmqEzgJSLQSycvB1g9ndxdg2aL4k,7846
3
+ dm_aioaiagent/async_ai_agent.py,sha256=n7OfJUHRwjk92gae9NkpPKQ21Xost2-JsQ0JyzYTXT4,2146
4
+ dm_aioaiagent/types.py,sha256=Xvx0x1GxLAvCot_3CZGgD8BFWwnTEc1lg7moLxkkJoc,587
5
+ dm_aioaiagent-0.3.1.dist-info/METADATA,sha256=BeQ8eugmPWTmAmL7WJwWUc64URWW-5IB6Qg8Fovkq1Q,4299
6
+ dm_aioaiagent-0.3.1.dist-info/WHEEL,sha256=OVMc5UfuAQiSplgO0_WdW7vXVGAt9Hdd6qtN4HotdyA,91
7
+ dm_aioaiagent-0.3.1.dist-info/top_level.txt,sha256=CbasLH0KI7zA77XwT6JDCnmRascxKNGvUVV9MgYjHAU,14
8
+ dm_aioaiagent-0.3.1.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (75.1.0)
2
+ Generator: setuptools (75.2.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5
 
@@ -1,7 +0,0 @@
1
- dm_aioaiagent/__init__.py,sha256=Hhgz18uMAnR0ezLOukoWCe1sFS4Q4LC26xbRKdu9FFw,118
2
- dm_aioaiagent/ai_agent.py,sha256=SuCOLx8n2rLCOnpilZ5k7KM6KqD4lU-BGy3ZN43gvA8,7149
3
- dm_aioaiagent/async_ai_agent.py,sha256=vrQLNWdvAGQhSZDNMffe9VFT5BPd2WQQhlnx4f3u-UY,2525
4
- dm_aioaiagent-0.2.0.dist-info/METADATA,sha256=PxBpBaUMp2Hgig-wRkpIvXq-EGot7mjfQK7s38-mRIE,3354
5
- dm_aioaiagent-0.2.0.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
6
- dm_aioaiagent-0.2.0.dist-info/top_level.txt,sha256=CbasLH0KI7zA77XwT6JDCnmRascxKNGvUVV9MgYjHAU,14
7
- dm_aioaiagent-0.2.0.dist-info/RECORD,,