dm-aioaiagent 0.1.3__tar.gz → 0.3.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- dm_aioaiagent-0.3.0/PKG-INFO +168 -0
- dm_aioaiagent-0.3.0/README.md +145 -0
- {dm_aioaiagent-0.1.3 → dm_aioaiagent-0.3.0}/dm_aioaiagent/__init__.py +1 -0
- {dm_aioaiagent-0.1.3 → dm_aioaiagent-0.3.0}/dm_aioaiagent/ai_agent.py +73 -61
- {dm_aioaiagent-0.1.3 → dm_aioaiagent-0.3.0}/dm_aioaiagent/async_ai_agent.py +13 -15
- dm_aioaiagent-0.3.0/dm_aioaiagent/types.py +20 -0
- dm_aioaiagent-0.3.0/dm_aioaiagent.egg-info/PKG-INFO +168 -0
- {dm_aioaiagent-0.1.3 → dm_aioaiagent-0.3.0}/dm_aioaiagent.egg-info/SOURCES.txt +1 -0
- {dm_aioaiagent-0.1.3 → dm_aioaiagent-0.3.0}/setup.py +1 -1
- dm_aioaiagent-0.1.3/PKG-INFO +0 -103
- dm_aioaiagent-0.1.3/README.md +0 -80
- dm_aioaiagent-0.1.3/dm_aioaiagent.egg-info/PKG-INFO +0 -103
- {dm_aioaiagent-0.1.3 → dm_aioaiagent-0.3.0}/dm_aioaiagent.egg-info/dependency_links.txt +0 -0
- {dm_aioaiagent-0.1.3 → dm_aioaiagent-0.3.0}/dm_aioaiagent.egg-info/requires.txt +0 -0
- {dm_aioaiagent-0.1.3 → dm_aioaiagent-0.3.0}/dm_aioaiagent.egg-info/top_level.txt +0 -0
- {dm_aioaiagent-0.1.3 → dm_aioaiagent-0.3.0}/setup.cfg +0 -0
|
@@ -0,0 +1,168 @@
|
|
|
1
|
+
Metadata-Version: 2.1
|
|
2
|
+
Name: dm-aioaiagent
|
|
3
|
+
Version: 0.3.0
|
|
4
|
+
Summary: This is my custom aioaiagent client
|
|
5
|
+
Home-page: https://pypi.org/project/dm-aioaiagent
|
|
6
|
+
Author: dimka4621
|
|
7
|
+
Author-email: mismartconfig@gmail.com
|
|
8
|
+
Project-URL: GitHub, https://github.com/MykhLibs/dm-aioaiagent
|
|
9
|
+
Keywords: dm aioaiagent
|
|
10
|
+
Classifier: Programming Language :: Python :: 3.8
|
|
11
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
12
|
+
Classifier: Operating System :: OS Independent
|
|
13
|
+
Requires-Python: >=3.9
|
|
14
|
+
Description-Content-Type: text/markdown
|
|
15
|
+
Requires-Dist: dm-logger==0.5.2
|
|
16
|
+
Requires-Dist: python-dotenv==1.0.1
|
|
17
|
+
Requires-Dist: pydantic==2.9.2
|
|
18
|
+
Requires-Dist: langchain==0.3.0
|
|
19
|
+
Requires-Dist: langchain-core==0.3.5
|
|
20
|
+
Requires-Dist: langgraph==0.2.23
|
|
21
|
+
Requires-Dist: langchain-community==0.3.0
|
|
22
|
+
Requires-Dist: langchain-openai==0.2.0
|
|
23
|
+
|
|
24
|
+
# DM-aioaiagent
|
|
25
|
+
|
|
26
|
+
## Urls
|
|
27
|
+
|
|
28
|
+
* [PyPI](https://pypi.org/project/dm-aioaiagent)
|
|
29
|
+
* [GitHub](https://github.com/MykhLibs/dm-aioaiagent)
|
|
30
|
+
|
|
31
|
+
### * Package contains both `asynchronous` and `synchronous` clients
|
|
32
|
+
|
|
33
|
+
## Usage
|
|
34
|
+
|
|
35
|
+
Analogue to `DMAioAIAgent` is the synchronous client `DMAIAgent`.
|
|
36
|
+
|
|
37
|
+
### Use agent *with* inner memory
|
|
38
|
+
|
|
39
|
+
By default, agent use inner memory to store the conversation history.
|
|
40
|
+
|
|
41
|
+
(You can set *max count messages in memory* by `max_memory_messages` init argument)
|
|
42
|
+
|
|
43
|
+
```python
|
|
44
|
+
import asyncio
|
|
45
|
+
from dm_aioaiagent import DMAioAIAgent
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
async def main():
|
|
49
|
+
# define a system message
|
|
50
|
+
system_message = "Your custom system message with role, backstory and goal"
|
|
51
|
+
|
|
52
|
+
# (optional) define a list of tools, if you want to use them
|
|
53
|
+
tools = [...]
|
|
54
|
+
|
|
55
|
+
# define a openai model, default is "gpt-4o-mini"
|
|
56
|
+
model_name = "gpt-4o"
|
|
57
|
+
|
|
58
|
+
# create an agent
|
|
59
|
+
ai_agent = DMAioAIAgent(system_message, tools, model=model_name)
|
|
60
|
+
# if you don't want to see the input and output messages from agent
|
|
61
|
+
# you can set `input_output_logging=False` init argument
|
|
62
|
+
|
|
63
|
+
# define the conversation message
|
|
64
|
+
input_messages = [
|
|
65
|
+
{"role": "user", "content": "Hello!"},
|
|
66
|
+
]
|
|
67
|
+
|
|
68
|
+
# call an agent
|
|
69
|
+
# specify `memory_id` argument to store the conversation history by your custom id
|
|
70
|
+
answer = await ai_agent.run(input_messages)
|
|
71
|
+
|
|
72
|
+
# define the next conversation message
|
|
73
|
+
input_messages = [
|
|
74
|
+
{"role": "user", "content": "I want to know the weather in Kyiv"}
|
|
75
|
+
]
|
|
76
|
+
|
|
77
|
+
# call an agent
|
|
78
|
+
answer = await ai_agent.run(input_messages)
|
|
79
|
+
|
|
80
|
+
# get full conversation history
|
|
81
|
+
conversation_history = ai_agent.get_memory_messages()
|
|
82
|
+
|
|
83
|
+
# clear conversation history
|
|
84
|
+
ai_agent.clear_memory()
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
if __name__ == "__main__":
|
|
88
|
+
asyncio.run(main())
|
|
89
|
+
```
|
|
90
|
+
|
|
91
|
+
### Use agent *without* inner memory
|
|
92
|
+
|
|
93
|
+
If you want to control the memory of the agent, you can disable it by setting `is_memory_enabled=False`
|
|
94
|
+
|
|
95
|
+
```python
|
|
96
|
+
import asyncio
|
|
97
|
+
from dm_aioaiagent import DMAioAIAgent
|
|
98
|
+
|
|
99
|
+
|
|
100
|
+
async def main():
|
|
101
|
+
# define a system message
|
|
102
|
+
system_message = "Your custom system message with role, backstory and goal"
|
|
103
|
+
|
|
104
|
+
# (optional) define a list of tools, if you want to use them
|
|
105
|
+
tools = [...]
|
|
106
|
+
|
|
107
|
+
# define a openai model, default is "gpt-4o-mini"
|
|
108
|
+
model_name = "gpt-4o"
|
|
109
|
+
|
|
110
|
+
# create an agent
|
|
111
|
+
ai_agent = DMAioAIAgent(system_message, tools, model=model_name,
|
|
112
|
+
is_memory_enabled=False)
|
|
113
|
+
# if you don't want to see the input and output messages from agent
|
|
114
|
+
# you can set input_output_logging=False
|
|
115
|
+
|
|
116
|
+
# define the conversation message
|
|
117
|
+
messages = [
|
|
118
|
+
{"role": "user", "content": "Hello!"}
|
|
119
|
+
]
|
|
120
|
+
|
|
121
|
+
# call an agent
|
|
122
|
+
new_messages = await ai_agent.run(messages)
|
|
123
|
+
|
|
124
|
+
# add new_messages to messages
|
|
125
|
+
messages.extend(new_messages)
|
|
126
|
+
|
|
127
|
+
# define the next conversation message
|
|
128
|
+
messages.append(
|
|
129
|
+
{"role": "user", "content": "I want to know the weather in Kyiv"}
|
|
130
|
+
)
|
|
131
|
+
|
|
132
|
+
# call an agent
|
|
133
|
+
new_messages = await ai_agent.run(messages)
|
|
134
|
+
|
|
135
|
+
|
|
136
|
+
if __name__ == "__main__":
|
|
137
|
+
asyncio.run(main())
|
|
138
|
+
```
|
|
139
|
+
|
|
140
|
+
### Set custom logger
|
|
141
|
+
|
|
142
|
+
_If you want set up custom logger_
|
|
143
|
+
|
|
144
|
+
```python
|
|
145
|
+
from dm_aioaiagent import DMAioAIAgent
|
|
146
|
+
|
|
147
|
+
|
|
148
|
+
# create custom logger
|
|
149
|
+
class MyLogger:
|
|
150
|
+
def debug(self, message):
|
|
151
|
+
pass
|
|
152
|
+
|
|
153
|
+
def info(self, message):
|
|
154
|
+
pass
|
|
155
|
+
|
|
156
|
+
def warning(self, message):
|
|
157
|
+
print(message)
|
|
158
|
+
|
|
159
|
+
def error(self, message):
|
|
160
|
+
print(message)
|
|
161
|
+
|
|
162
|
+
|
|
163
|
+
# create an agent
|
|
164
|
+
ai_agent = DMAioAIAgent()
|
|
165
|
+
|
|
166
|
+
# set up custom logger for this agent
|
|
167
|
+
ai_agent.set_logger(MyLogger())
|
|
168
|
+
```
|
|
@@ -0,0 +1,145 @@
|
|
|
1
|
+
# DM-aioaiagent
|
|
2
|
+
|
|
3
|
+
## Urls
|
|
4
|
+
|
|
5
|
+
* [PyPI](https://pypi.org/project/dm-aioaiagent)
|
|
6
|
+
* [GitHub](https://github.com/MykhLibs/dm-aioaiagent)
|
|
7
|
+
|
|
8
|
+
### * Package contains both `asynchronous` and `synchronous` clients
|
|
9
|
+
|
|
10
|
+
## Usage
|
|
11
|
+
|
|
12
|
+
Analogue to `DMAioAIAgent` is the synchronous client `DMAIAgent`.
|
|
13
|
+
|
|
14
|
+
### Use agent *with* inner memory
|
|
15
|
+
|
|
16
|
+
By default, agent use inner memory to store the conversation history.
|
|
17
|
+
|
|
18
|
+
(You can set *max count messages in memory* by `max_memory_messages` init argument)
|
|
19
|
+
|
|
20
|
+
```python
|
|
21
|
+
import asyncio
|
|
22
|
+
from dm_aioaiagent import DMAioAIAgent
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
async def main():
|
|
26
|
+
# define a system message
|
|
27
|
+
system_message = "Your custom system message with role, backstory and goal"
|
|
28
|
+
|
|
29
|
+
# (optional) define a list of tools, if you want to use them
|
|
30
|
+
tools = [...]
|
|
31
|
+
|
|
32
|
+
# define a openai model, default is "gpt-4o-mini"
|
|
33
|
+
model_name = "gpt-4o"
|
|
34
|
+
|
|
35
|
+
# create an agent
|
|
36
|
+
ai_agent = DMAioAIAgent(system_message, tools, model=model_name)
|
|
37
|
+
# if you don't want to see the input and output messages from agent
|
|
38
|
+
# you can set `input_output_logging=False` init argument
|
|
39
|
+
|
|
40
|
+
# define the conversation message
|
|
41
|
+
input_messages = [
|
|
42
|
+
{"role": "user", "content": "Hello!"},
|
|
43
|
+
]
|
|
44
|
+
|
|
45
|
+
# call an agent
|
|
46
|
+
# specify `memory_id` argument to store the conversation history by your custom id
|
|
47
|
+
answer = await ai_agent.run(input_messages)
|
|
48
|
+
|
|
49
|
+
# define the next conversation message
|
|
50
|
+
input_messages = [
|
|
51
|
+
{"role": "user", "content": "I want to know the weather in Kyiv"}
|
|
52
|
+
]
|
|
53
|
+
|
|
54
|
+
# call an agent
|
|
55
|
+
answer = await ai_agent.run(input_messages)
|
|
56
|
+
|
|
57
|
+
# get full conversation history
|
|
58
|
+
conversation_history = ai_agent.get_memory_messages()
|
|
59
|
+
|
|
60
|
+
# clear conversation history
|
|
61
|
+
ai_agent.clear_memory()
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
if __name__ == "__main__":
|
|
65
|
+
asyncio.run(main())
|
|
66
|
+
```
|
|
67
|
+
|
|
68
|
+
### Use agent *without* inner memory
|
|
69
|
+
|
|
70
|
+
If you want to control the memory of the agent, you can disable it by setting `is_memory_enabled=False`
|
|
71
|
+
|
|
72
|
+
```python
|
|
73
|
+
import asyncio
|
|
74
|
+
from dm_aioaiagent import DMAioAIAgent
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
async def main():
|
|
78
|
+
# define a system message
|
|
79
|
+
system_message = "Your custom system message with role, backstory and goal"
|
|
80
|
+
|
|
81
|
+
# (optional) define a list of tools, if you want to use them
|
|
82
|
+
tools = [...]
|
|
83
|
+
|
|
84
|
+
# define a openai model, default is "gpt-4o-mini"
|
|
85
|
+
model_name = "gpt-4o"
|
|
86
|
+
|
|
87
|
+
# create an agent
|
|
88
|
+
ai_agent = DMAioAIAgent(system_message, tools, model=model_name,
|
|
89
|
+
is_memory_enabled=False)
|
|
90
|
+
# if you don't want to see the input and output messages from agent
|
|
91
|
+
# you can set input_output_logging=False
|
|
92
|
+
|
|
93
|
+
# define the conversation message
|
|
94
|
+
messages = [
|
|
95
|
+
{"role": "user", "content": "Hello!"}
|
|
96
|
+
]
|
|
97
|
+
|
|
98
|
+
# call an agent
|
|
99
|
+
new_messages = await ai_agent.run(messages)
|
|
100
|
+
|
|
101
|
+
# add new_messages to messages
|
|
102
|
+
messages.extend(new_messages)
|
|
103
|
+
|
|
104
|
+
# define the next conversation message
|
|
105
|
+
messages.append(
|
|
106
|
+
{"role": "user", "content": "I want to know the weather in Kyiv"}
|
|
107
|
+
)
|
|
108
|
+
|
|
109
|
+
# call an agent
|
|
110
|
+
new_messages = await ai_agent.run(messages)
|
|
111
|
+
|
|
112
|
+
|
|
113
|
+
if __name__ == "__main__":
|
|
114
|
+
asyncio.run(main())
|
|
115
|
+
```
|
|
116
|
+
|
|
117
|
+
### Set custom logger
|
|
118
|
+
|
|
119
|
+
_If you want set up custom logger_
|
|
120
|
+
|
|
121
|
+
```python
|
|
122
|
+
from dm_aioaiagent import DMAioAIAgent
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
# create custom logger
|
|
126
|
+
class MyLogger:
|
|
127
|
+
def debug(self, message):
|
|
128
|
+
pass
|
|
129
|
+
|
|
130
|
+
def info(self, message):
|
|
131
|
+
pass
|
|
132
|
+
|
|
133
|
+
def warning(self, message):
|
|
134
|
+
print(message)
|
|
135
|
+
|
|
136
|
+
def error(self, message):
|
|
137
|
+
print(message)
|
|
138
|
+
|
|
139
|
+
|
|
140
|
+
# create an agent
|
|
141
|
+
ai_agent = DMAioAIAgent()
|
|
142
|
+
|
|
143
|
+
# set up custom logger for this agent
|
|
144
|
+
ai_agent.set_logger(MyLogger())
|
|
145
|
+
```
|
|
@@ -1,42 +1,22 @@
|
|
|
1
|
-
import json
|
|
2
1
|
import os
|
|
3
|
-
from
|
|
4
|
-
from typing_extensions import TypedDict
|
|
5
|
-
from pydantic import BaseModel, Field
|
|
2
|
+
from itertools import dropwhile
|
|
6
3
|
from threading import Thread
|
|
7
4
|
from langchain_openai import ChatOpenAI
|
|
8
5
|
from langchain_core.tools import BaseTool
|
|
9
6
|
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
|
|
10
|
-
from langchain_core.messages import
|
|
7
|
+
from langchain_core.messages import SystemMessage, HumanMessage, AIMessage, ToolMessage
|
|
11
8
|
from langgraph.graph import StateGraph
|
|
12
9
|
from dm_logger import DMLogger
|
|
13
10
|
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
class Message(TypedDict):
|
|
18
|
-
role: Literal["user", "ai"]
|
|
19
|
-
content: str
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
class InnerState(BaseModel):
|
|
23
|
-
messages: list[BaseMessage] = Field(default=[])
|
|
24
|
-
context: list[Message] = Field(default=[])
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
class InputState(BaseModel):
|
|
28
|
-
messages: list[Message]
|
|
29
|
-
inner_state: Optional[InnerState] = Field(default=InnerState())
|
|
30
|
-
|
|
11
|
+
from .types import *
|
|
31
12
|
|
|
32
|
-
|
|
33
|
-
answer: str
|
|
34
|
-
context: list[Message]
|
|
13
|
+
__all__ = ["DMAIAgent"]
|
|
35
14
|
|
|
36
15
|
|
|
37
16
|
class DMAIAgent:
|
|
38
17
|
agent_name = "AIAgent"
|
|
39
18
|
_allowed_roles = ("user", "ai")
|
|
19
|
+
MAX_MEMORY_MESSAGES = 20 # Only INT greater than 0
|
|
40
20
|
|
|
41
21
|
def __init__(
|
|
42
22
|
self,
|
|
@@ -46,14 +26,18 @@ class DMAIAgent:
|
|
|
46
26
|
model: str = "gpt-4o-mini",
|
|
47
27
|
temperature: int = 1,
|
|
48
28
|
agent_name: str = None,
|
|
49
|
-
input_output_logging: bool = True
|
|
29
|
+
input_output_logging: bool = True,
|
|
30
|
+
is_memory_enabled: bool = True,
|
|
31
|
+
max_memory_messages: int = None,
|
|
50
32
|
):
|
|
51
33
|
if not os.getenv("OPENAI_API_KEY"):
|
|
52
|
-
raise EnvironmentError("OPENAI_API_KEY environment variable is not set!")
|
|
34
|
+
raise EnvironmentError("'OPENAI_API_KEY' environment variable is not set!")
|
|
53
35
|
|
|
54
36
|
self._logger = DMLogger(agent_name or self.agent_name)
|
|
55
|
-
self._input_output_logging = input_output_logging
|
|
56
37
|
self._is_tools_exists = bool(tools)
|
|
38
|
+
self._input_output_logging = bool(input_output_logging)
|
|
39
|
+
self._is_memory_enabled = bool(is_memory_enabled)
|
|
40
|
+
self._max_memory_messages = self._validate_max_memory_messages(max_memory_messages)
|
|
57
41
|
|
|
58
42
|
prompt = ChatPromptTemplate.from_messages([SystemMessage(content=system_message),
|
|
59
43
|
MessagesPlaceholder(variable_name="messages")])
|
|
@@ -62,8 +46,9 @@ class DMAIAgent:
|
|
|
62
46
|
self._tool_map = {t.name: t for t in tools}
|
|
63
47
|
llm = llm.bind_tools(tools)
|
|
64
48
|
self._agent = prompt | llm
|
|
49
|
+
self._memory = {}
|
|
65
50
|
|
|
66
|
-
workflow = StateGraph(
|
|
51
|
+
workflow = StateGraph(State)
|
|
67
52
|
workflow.add_node("Prepare messages", self._prepare_messages_node)
|
|
68
53
|
workflow.add_node("Invoke LLM", self._invoke_llm_node)
|
|
69
54
|
workflow.add_node("Execute tool", self._execute_tool_node)
|
|
@@ -78,37 +63,49 @@ class DMAIAgent:
|
|
|
78
63
|
workflow.set_finish_point("Exit")
|
|
79
64
|
self._graph = workflow.compile()
|
|
80
65
|
|
|
81
|
-
def run(self,
|
|
82
|
-
|
|
66
|
+
def run(self, input_messages: InputMessagesType, memory_id: str = None) -> ResponseType:
|
|
67
|
+
state = self._graph.invoke({"input_messages": input_messages, "memory_id": memory_id})
|
|
68
|
+
return state["response"]
|
|
69
|
+
|
|
70
|
+
def get_memory_messages(self, memory_id: str = None) -> list[BaseMessage]:
|
|
71
|
+
return self._memory.get(self._validate_memory_id(memory_id), [])
|
|
72
|
+
|
|
73
|
+
def clear_memory(self, memory_id: str = None) -> None:
|
|
74
|
+
self._memory[self._validate_memory_id(memory_id)] = []
|
|
75
|
+
|
|
76
|
+
def _prepare_messages_node(self, state: State) -> State:
|
|
77
|
+
state.memory_id = self._validate_memory_id(state.memory_id)
|
|
78
|
+
state.input_messages = state.input_messages or [{"role": "user", "content": ""}]
|
|
79
|
+
for item in state.input_messages:
|
|
80
|
+
if isinstance(item, dict):
|
|
81
|
+
role = item.get("role")
|
|
82
|
+
content = item.get("content")
|
|
83
|
+
if not role or role not in self._allowed_roles or not content:
|
|
84
|
+
continue
|
|
85
|
+
if role == "ai":
|
|
86
|
+
MessageClass = AIMessage
|
|
87
|
+
else:
|
|
88
|
+
MessageClass = HumanMessage
|
|
89
|
+
state.messages.append(MessageClass(content))
|
|
90
|
+
elif isinstance(item, BaseMessage):
|
|
91
|
+
state.messages.append(item)
|
|
83
92
|
|
|
84
|
-
def _prepare_messages_node(self, state: InputState) -> InputState:
|
|
85
|
-
state.messages = state.messages or [{"role": "user", "content": "Привіт"}]
|
|
86
|
-
state.inner_state = InnerState()
|
|
87
93
|
if self._input_output_logging:
|
|
88
|
-
self._logger.debug(
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
role = item.get("role")
|
|
92
|
-
content = item.get("content")
|
|
93
|
-
if not role or role not in self._allowed_roles or not content:
|
|
94
|
-
continue
|
|
95
|
-
if role == "ai":
|
|
96
|
-
MessageClass = AIMessage
|
|
97
|
-
else:
|
|
98
|
-
MessageClass = HumanMessage
|
|
99
|
-
state.inner_state.messages.append(MessageClass(content))
|
|
94
|
+
self._logger.debug(f"Query:\n{state.messages[-1].content}", memory_id=state.memory_id)
|
|
95
|
+
if self._is_memory_enabled:
|
|
96
|
+
state.messages = self.get_memory_messages(state.memory_id) + state.messages
|
|
100
97
|
return state
|
|
101
98
|
|
|
102
|
-
def _invoke_llm_node(self, state:
|
|
99
|
+
def _invoke_llm_node(self, state: State) -> State:
|
|
103
100
|
self._logger.debug("Run node: Invoke LLM")
|
|
104
|
-
ai_response = self._agent.invoke({"messages": state.
|
|
105
|
-
state.
|
|
101
|
+
ai_response = self._agent.invoke({"messages": state.messages})
|
|
102
|
+
state.messages.append(ai_response)
|
|
106
103
|
return state
|
|
107
104
|
|
|
108
|
-
def _execute_tool_node(self, state:
|
|
105
|
+
def _execute_tool_node(self, state: State) -> State:
|
|
109
106
|
self._logger.debug("Run node: Execute tool")
|
|
110
107
|
threads = []
|
|
111
|
-
for tool_call in state.
|
|
108
|
+
for tool_call in state.messages[-1].tool_calls:
|
|
112
109
|
tool_id = tool_call["id"]
|
|
113
110
|
tool_name = tool_call["name"]
|
|
114
111
|
tool_args = tool_call["args"]
|
|
@@ -125,11 +122,8 @@ class DMAIAgent:
|
|
|
125
122
|
tool_response = f"Tool not found!"
|
|
126
123
|
self._logger.debug(f"Tool response:\n{tool_response}", tool_id=tool_id)
|
|
127
124
|
|
|
128
|
-
state.inner_state.context.append({"tool_name": tool_name,
|
|
129
|
-
"tool_args": json.dumps(tool_args, ensure_ascii=False),
|
|
130
|
-
"tool_response": tool_response})
|
|
131
125
|
tool_message = ToolMessage(content=str(tool_response), name=tool_name, tool_call_id=tool_id)
|
|
132
|
-
state.
|
|
126
|
+
state.messages.append(tool_message)
|
|
133
127
|
|
|
134
128
|
threads.append(Thread(target=tool_callback, daemon=True))
|
|
135
129
|
|
|
@@ -140,19 +134,37 @@ class DMAIAgent:
|
|
|
140
134
|
|
|
141
135
|
return state
|
|
142
136
|
|
|
143
|
-
def _exit_node(self, state:
|
|
144
|
-
answer = state.
|
|
137
|
+
def _exit_node(self, state: State) -> State:
|
|
138
|
+
answer = state.messages[-1].content
|
|
145
139
|
if self._input_output_logging:
|
|
146
|
-
self._logger.debug(f"Answer:\n{answer}")
|
|
147
|
-
return OutputState(answer=answer, context=state.inner_state.context)
|
|
140
|
+
self._logger.debug(f"Answer:\n{answer}", memory_id=state.memory_id)
|
|
148
141
|
|
|
149
|
-
|
|
150
|
-
|
|
142
|
+
if self._is_memory_enabled:
|
|
143
|
+
messages_to_memory = state.messages[-self._max_memory_messages:]
|
|
144
|
+
# drop ToolsMessages from start of list
|
|
145
|
+
self._memory[state.memory_id] = list(dropwhile(lambda x: isinstance(x, ToolMessage), messages_to_memory))
|
|
146
|
+
state.response = answer
|
|
147
|
+
else:
|
|
148
|
+
state.response = state.messages[len(state.input_messages):]
|
|
149
|
+
return state
|
|
150
|
+
|
|
151
|
+
def _messages_router(self, state: State) -> str:
|
|
152
|
+
if self._is_tools_exists and state.messages[-1].tool_calls:
|
|
151
153
|
route = "execute_tool"
|
|
152
154
|
else:
|
|
153
155
|
route = "exit"
|
|
154
156
|
return route
|
|
155
157
|
|
|
158
|
+
@staticmethod
|
|
159
|
+
def _validate_memory_id(memory_id: Union[str, None]) -> Union[str, int]:
|
|
160
|
+
return str(memory_id) if memory_id else 0
|
|
161
|
+
|
|
162
|
+
@classmethod
|
|
163
|
+
def _validate_max_memory_messages(cls, max_messages_in_memory: int) -> int:
|
|
164
|
+
if isinstance(max_messages_in_memory, int) and max_messages_in_memory > 0:
|
|
165
|
+
return max_messages_in_memory
|
|
166
|
+
return cls.MAX_MEMORY_MESSAGES
|
|
167
|
+
|
|
156
168
|
def print_graph(self) -> None:
|
|
157
169
|
self._graph.get_graph().print_ascii()
|
|
158
170
|
|
|
@@ -1,32 +1,33 @@
|
|
|
1
|
-
import json
|
|
2
1
|
import sys
|
|
3
2
|
import asyncio
|
|
4
3
|
from langchain_core.messages import ToolMessage
|
|
5
4
|
|
|
6
|
-
from .ai_agent import DMAIAgent
|
|
7
|
-
|
|
8
|
-
__all__ = ["DMAioAIAgent"]
|
|
5
|
+
from .ai_agent import DMAIAgent
|
|
6
|
+
from .types import *
|
|
9
7
|
|
|
10
8
|
if sys.platform == "win32":
|
|
11
9
|
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
|
|
12
10
|
|
|
11
|
+
__all__ = ["DMAioAIAgent"]
|
|
12
|
+
|
|
13
13
|
|
|
14
14
|
class DMAioAIAgent(DMAIAgent):
|
|
15
15
|
agent_name = "AsyncAIAgent"
|
|
16
16
|
|
|
17
|
-
async def run(self,
|
|
18
|
-
|
|
17
|
+
async def run(self, input_messages: InputMessagesType, memory_id: str = None) -> ResponseType:
|
|
18
|
+
state = await self._graph.ainvoke({"input_messages": input_messages, "memory_id": memory_id})
|
|
19
|
+
return state["response"]
|
|
19
20
|
|
|
20
|
-
async def _invoke_llm_node(self, state:
|
|
21
|
+
async def _invoke_llm_node(self, state: State) -> State:
|
|
21
22
|
self._logger.debug("Run node: Invoke LLM")
|
|
22
|
-
ai_response = await self._agent.ainvoke({"messages": state.
|
|
23
|
-
state.
|
|
23
|
+
ai_response = await self._agent.ainvoke({"messages": state.messages})
|
|
24
|
+
state.messages.append(ai_response)
|
|
24
25
|
return state
|
|
25
26
|
|
|
26
|
-
async def _execute_tool_node(self, state:
|
|
27
|
+
async def _execute_tool_node(self, state: State) -> State:
|
|
27
28
|
self._logger.debug("Run node: Execute tool")
|
|
28
29
|
tasks = []
|
|
29
|
-
for tool_call in state.
|
|
30
|
+
for tool_call in state.messages[-1].tool_calls:
|
|
30
31
|
tool_id = tool_call["id"]
|
|
31
32
|
tool_name = tool_call["name"]
|
|
32
33
|
tool_args = tool_call["args"]
|
|
@@ -43,11 +44,8 @@ class DMAioAIAgent(DMAIAgent):
|
|
|
43
44
|
tool_response = f"Tool '{tool_name}' not found!"
|
|
44
45
|
self._logger.debug(f"Tool response:\n{tool_response}", tool_id=tool_id)
|
|
45
46
|
|
|
46
|
-
state.inner_state.context.append({"tool_name": tool_name,
|
|
47
|
-
"tool_args": json.dumps(tool_args, ensure_ascii=False),
|
|
48
|
-
"tool_response": tool_response})
|
|
49
47
|
tool_message = ToolMessage(content=str(tool_response), name=tool_name, tool_call_id=tool_id)
|
|
50
|
-
state.
|
|
48
|
+
state.messages.append(tool_message)
|
|
51
49
|
|
|
52
50
|
tasks.append(asyncio.create_task(tool_callback()))
|
|
53
51
|
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
from typing import Optional, Literal, Union
|
|
2
|
+
from typing_extensions import TypedDict
|
|
3
|
+
from pydantic import BaseModel, Field
|
|
4
|
+
from langchain_core.messages import BaseMessage
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class Message(TypedDict):
|
|
8
|
+
role: Literal["user", "ai"]
|
|
9
|
+
content: str
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
InputMessagesType = list[Union[Message, BaseMessage]]
|
|
13
|
+
ResponseType = Union[str, list[BaseMessage]]
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class State(BaseModel):
|
|
17
|
+
input_messages: InputMessagesType
|
|
18
|
+
memory_id: Union[str, int, None] = Field(default=0)
|
|
19
|
+
messages: Optional[list[BaseMessage]] = Field(default_factory=list)
|
|
20
|
+
response: ResponseType = Field(default="")
|
|
@@ -0,0 +1,168 @@
|
|
|
1
|
+
Metadata-Version: 2.1
|
|
2
|
+
Name: dm-aioaiagent
|
|
3
|
+
Version: 0.3.0
|
|
4
|
+
Summary: This is my custom aioaiagent client
|
|
5
|
+
Home-page: https://pypi.org/project/dm-aioaiagent
|
|
6
|
+
Author: dimka4621
|
|
7
|
+
Author-email: mismartconfig@gmail.com
|
|
8
|
+
Project-URL: GitHub, https://github.com/MykhLibs/dm-aioaiagent
|
|
9
|
+
Keywords: dm aioaiagent
|
|
10
|
+
Classifier: Programming Language :: Python :: 3.8
|
|
11
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
12
|
+
Classifier: Operating System :: OS Independent
|
|
13
|
+
Requires-Python: >=3.9
|
|
14
|
+
Description-Content-Type: text/markdown
|
|
15
|
+
Requires-Dist: dm-logger==0.5.2
|
|
16
|
+
Requires-Dist: python-dotenv==1.0.1
|
|
17
|
+
Requires-Dist: pydantic==2.9.2
|
|
18
|
+
Requires-Dist: langchain==0.3.0
|
|
19
|
+
Requires-Dist: langchain-core==0.3.5
|
|
20
|
+
Requires-Dist: langgraph==0.2.23
|
|
21
|
+
Requires-Dist: langchain-community==0.3.0
|
|
22
|
+
Requires-Dist: langchain-openai==0.2.0
|
|
23
|
+
|
|
24
|
+
# DM-aioaiagent
|
|
25
|
+
|
|
26
|
+
## Urls
|
|
27
|
+
|
|
28
|
+
* [PyPI](https://pypi.org/project/dm-aioaiagent)
|
|
29
|
+
* [GitHub](https://github.com/MykhLibs/dm-aioaiagent)
|
|
30
|
+
|
|
31
|
+
### * Package contains both `asynchronous` and `synchronous` clients
|
|
32
|
+
|
|
33
|
+
## Usage
|
|
34
|
+
|
|
35
|
+
Analogue to `DMAioAIAgent` is the synchronous client `DMAIAgent`.
|
|
36
|
+
|
|
37
|
+
### Use agent *with* inner memory
|
|
38
|
+
|
|
39
|
+
By default, agent use inner memory to store the conversation history.
|
|
40
|
+
|
|
41
|
+
(You can set *max count messages in memory* by `max_memory_messages` init argument)
|
|
42
|
+
|
|
43
|
+
```python
|
|
44
|
+
import asyncio
|
|
45
|
+
from dm_aioaiagent import DMAioAIAgent
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
async def main():
|
|
49
|
+
# define a system message
|
|
50
|
+
system_message = "Your custom system message with role, backstory and goal"
|
|
51
|
+
|
|
52
|
+
# (optional) define a list of tools, if you want to use them
|
|
53
|
+
tools = [...]
|
|
54
|
+
|
|
55
|
+
# define a openai model, default is "gpt-4o-mini"
|
|
56
|
+
model_name = "gpt-4o"
|
|
57
|
+
|
|
58
|
+
# create an agent
|
|
59
|
+
ai_agent = DMAioAIAgent(system_message, tools, model=model_name)
|
|
60
|
+
# if you don't want to see the input and output messages from agent
|
|
61
|
+
# you can set `input_output_logging=False` init argument
|
|
62
|
+
|
|
63
|
+
# define the conversation message
|
|
64
|
+
input_messages = [
|
|
65
|
+
{"role": "user", "content": "Hello!"},
|
|
66
|
+
]
|
|
67
|
+
|
|
68
|
+
# call an agent
|
|
69
|
+
# specify `memory_id` argument to store the conversation history by your custom id
|
|
70
|
+
answer = await ai_agent.run(input_messages)
|
|
71
|
+
|
|
72
|
+
# define the next conversation message
|
|
73
|
+
input_messages = [
|
|
74
|
+
{"role": "user", "content": "I want to know the weather in Kyiv"}
|
|
75
|
+
]
|
|
76
|
+
|
|
77
|
+
# call an agent
|
|
78
|
+
answer = await ai_agent.run(input_messages)
|
|
79
|
+
|
|
80
|
+
# get full conversation history
|
|
81
|
+
conversation_history = ai_agent.get_memory_messages()
|
|
82
|
+
|
|
83
|
+
# clear conversation history
|
|
84
|
+
ai_agent.clear_memory()
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
if __name__ == "__main__":
|
|
88
|
+
asyncio.run(main())
|
|
89
|
+
```
|
|
90
|
+
|
|
91
|
+
### Use agent *without* inner memory
|
|
92
|
+
|
|
93
|
+
If you want to control the memory of the agent, you can disable it by setting `is_memory_enabled=False`
|
|
94
|
+
|
|
95
|
+
```python
|
|
96
|
+
import asyncio
|
|
97
|
+
from dm_aioaiagent import DMAioAIAgent
|
|
98
|
+
|
|
99
|
+
|
|
100
|
+
async def main():
|
|
101
|
+
# define a system message
|
|
102
|
+
system_message = "Your custom system message with role, backstory and goal"
|
|
103
|
+
|
|
104
|
+
# (optional) define a list of tools, if you want to use them
|
|
105
|
+
tools = [...]
|
|
106
|
+
|
|
107
|
+
# define a openai model, default is "gpt-4o-mini"
|
|
108
|
+
model_name = "gpt-4o"
|
|
109
|
+
|
|
110
|
+
# create an agent
|
|
111
|
+
ai_agent = DMAioAIAgent(system_message, tools, model=model_name,
|
|
112
|
+
is_memory_enabled=False)
|
|
113
|
+
# if you don't want to see the input and output messages from agent
|
|
114
|
+
# you can set input_output_logging=False
|
|
115
|
+
|
|
116
|
+
# define the conversation message
|
|
117
|
+
messages = [
|
|
118
|
+
{"role": "user", "content": "Hello!"}
|
|
119
|
+
]
|
|
120
|
+
|
|
121
|
+
# call an agent
|
|
122
|
+
new_messages = await ai_agent.run(messages)
|
|
123
|
+
|
|
124
|
+
# add new_messages to messages
|
|
125
|
+
messages.extend(new_messages)
|
|
126
|
+
|
|
127
|
+
# define the next conversation message
|
|
128
|
+
messages.append(
|
|
129
|
+
{"role": "user", "content": "I want to know the weather in Kyiv"}
|
|
130
|
+
)
|
|
131
|
+
|
|
132
|
+
# call an agent
|
|
133
|
+
new_messages = await ai_agent.run(messages)
|
|
134
|
+
|
|
135
|
+
|
|
136
|
+
if __name__ == "__main__":
|
|
137
|
+
asyncio.run(main())
|
|
138
|
+
```
|
|
139
|
+
|
|
140
|
+
### Set custom logger
|
|
141
|
+
|
|
142
|
+
_If you want set up custom logger_
|
|
143
|
+
|
|
144
|
+
```python
|
|
145
|
+
from dm_aioaiagent import DMAioAIAgent
|
|
146
|
+
|
|
147
|
+
|
|
148
|
+
# create custom logger
|
|
149
|
+
class MyLogger:
|
|
150
|
+
def debug(self, message):
|
|
151
|
+
pass
|
|
152
|
+
|
|
153
|
+
def info(self, message):
|
|
154
|
+
pass
|
|
155
|
+
|
|
156
|
+
def warning(self, message):
|
|
157
|
+
print(message)
|
|
158
|
+
|
|
159
|
+
def error(self, message):
|
|
160
|
+
print(message)
|
|
161
|
+
|
|
162
|
+
|
|
163
|
+
# create an agent
|
|
164
|
+
ai_agent = DMAioAIAgent()
|
|
165
|
+
|
|
166
|
+
# set up custom logger for this agent
|
|
167
|
+
ai_agent.set_logger(MyLogger())
|
|
168
|
+
```
|
dm_aioaiagent-0.1.3/PKG-INFO
DELETED
|
@@ -1,103 +0,0 @@
|
|
|
1
|
-
Metadata-Version: 2.1
|
|
2
|
-
Name: dm-aioaiagent
|
|
3
|
-
Version: 0.1.3
|
|
4
|
-
Summary: This is my custom aioaiagent client
|
|
5
|
-
Home-page: https://pypi.org/project/dm-aioaiagent
|
|
6
|
-
Author: dimka4621
|
|
7
|
-
Author-email: mismartconfig@gmail.com
|
|
8
|
-
Project-URL: GitHub, https://github.com/MykhLibs/dm-aioaiagent
|
|
9
|
-
Keywords: dm aioaiagent
|
|
10
|
-
Classifier: Programming Language :: Python :: 3.8
|
|
11
|
-
Classifier: License :: OSI Approved :: MIT License
|
|
12
|
-
Classifier: Operating System :: OS Independent
|
|
13
|
-
Requires-Python: >=3.9
|
|
14
|
-
Description-Content-Type: text/markdown
|
|
15
|
-
Requires-Dist: dm-logger==0.5.2
|
|
16
|
-
Requires-Dist: python-dotenv==1.0.1
|
|
17
|
-
Requires-Dist: pydantic==2.9.2
|
|
18
|
-
Requires-Dist: langchain==0.3.0
|
|
19
|
-
Requires-Dist: langchain-core==0.3.5
|
|
20
|
-
Requires-Dist: langgraph==0.2.23
|
|
21
|
-
Requires-Dist: langchain-community==0.3.0
|
|
22
|
-
Requires-Dist: langchain-openai==0.2.0
|
|
23
|
-
|
|
24
|
-
# DM-aioaiagent
|
|
25
|
-
|
|
26
|
-
## Urls
|
|
27
|
-
|
|
28
|
-
* [PyPI](https://pypi.org/project/dm-aioaiagent)
|
|
29
|
-
* [GitHub](https://github.com/MykhLibs/dm-aioaiagent)
|
|
30
|
-
|
|
31
|
-
### * Package contains both `asynchronous` and `synchronous` clients
|
|
32
|
-
|
|
33
|
-
## Usage
|
|
34
|
-
|
|
35
|
-
Analogue to `DMAioAIAgent` is the synchronous client `DMAIAgent`.
|
|
36
|
-
|
|
37
|
-
```python
|
|
38
|
-
import asyncio
|
|
39
|
-
from dm_aioaiagent import DMAioAIAgent
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
async def main():
|
|
43
|
-
# define a system message
|
|
44
|
-
system_message = "Your custom system message with role, backstory and goal"
|
|
45
|
-
|
|
46
|
-
# define a list of tools, if you want to use them
|
|
47
|
-
tools = [...]
|
|
48
|
-
|
|
49
|
-
# define a openai model, default is "gpt-4o-mini"
|
|
50
|
-
model_name = "gpt-4o"
|
|
51
|
-
|
|
52
|
-
# create an agent
|
|
53
|
-
ai_agent = DMAioAIAgent(system_message, tools, model=model_name)
|
|
54
|
-
# you can set input_output_logging=False, if you don't want to see the input and output messages from agent
|
|
55
|
-
|
|
56
|
-
# define the conversation messages
|
|
57
|
-
messages = [
|
|
58
|
-
{"role": "user", "content": "Hello!"},
|
|
59
|
-
{"role": "ai", "content": "How can I help you?"},
|
|
60
|
-
{"role": "user", "content": "I want to know the weather in Kyiv"},
|
|
61
|
-
]
|
|
62
|
-
|
|
63
|
-
# start the agent
|
|
64
|
-
state = await ai_agent.run(messages)
|
|
65
|
-
|
|
66
|
-
# if you define tools, you can see the context of the tools
|
|
67
|
-
answer = state["answer"]
|
|
68
|
-
print(state["context"])
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
if __name__ == "__main__":
|
|
72
|
-
asyncio.run(main())
|
|
73
|
-
```
|
|
74
|
-
|
|
75
|
-
### Set custom logger
|
|
76
|
-
|
|
77
|
-
_If you want set up custom logger_
|
|
78
|
-
|
|
79
|
-
```python
|
|
80
|
-
from dm_aioaiagent import DMAioAIAgent
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
# create custom logger
|
|
84
|
-
class MyLogger:
|
|
85
|
-
def debug(self, message):
|
|
86
|
-
pass
|
|
87
|
-
|
|
88
|
-
def info(self, message):
|
|
89
|
-
pass
|
|
90
|
-
|
|
91
|
-
def warning(self, message):
|
|
92
|
-
print(message)
|
|
93
|
-
|
|
94
|
-
def error(self, message):
|
|
95
|
-
print(message)
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
# create agent
|
|
99
|
-
ai_agent = DMAioAIAgent()
|
|
100
|
-
|
|
101
|
-
# set up custom logger for this agent
|
|
102
|
-
ai_agent.set_logger(MyLogger())
|
|
103
|
-
```
|
dm_aioaiagent-0.1.3/README.md
DELETED
|
@@ -1,80 +0,0 @@
|
|
|
1
|
-
# DM-aioaiagent
|
|
2
|
-
|
|
3
|
-
## Urls
|
|
4
|
-
|
|
5
|
-
* [PyPI](https://pypi.org/project/dm-aioaiagent)
|
|
6
|
-
* [GitHub](https://github.com/MykhLibs/dm-aioaiagent)
|
|
7
|
-
|
|
8
|
-
### * Package contains both `asynchronous` and `synchronous` clients
|
|
9
|
-
|
|
10
|
-
## Usage
|
|
11
|
-
|
|
12
|
-
Analogue to `DMAioAIAgent` is the synchronous client `DMAIAgent`.
|
|
13
|
-
|
|
14
|
-
```python
|
|
15
|
-
import asyncio
|
|
16
|
-
from dm_aioaiagent import DMAioAIAgent
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
async def main():
|
|
20
|
-
# define a system message
|
|
21
|
-
system_message = "Your custom system message with role, backstory and goal"
|
|
22
|
-
|
|
23
|
-
# define a list of tools, if you want to use them
|
|
24
|
-
tools = [...]
|
|
25
|
-
|
|
26
|
-
# define a openai model, default is "gpt-4o-mini"
|
|
27
|
-
model_name = "gpt-4o"
|
|
28
|
-
|
|
29
|
-
# create an agent
|
|
30
|
-
ai_agent = DMAioAIAgent(system_message, tools, model=model_name)
|
|
31
|
-
# you can set input_output_logging=False, if you don't want to see the input and output messages from agent
|
|
32
|
-
|
|
33
|
-
# define the conversation messages
|
|
34
|
-
messages = [
|
|
35
|
-
{"role": "user", "content": "Hello!"},
|
|
36
|
-
{"role": "ai", "content": "How can I help you?"},
|
|
37
|
-
{"role": "user", "content": "I want to know the weather in Kyiv"},
|
|
38
|
-
]
|
|
39
|
-
|
|
40
|
-
# start the agent
|
|
41
|
-
state = await ai_agent.run(messages)
|
|
42
|
-
|
|
43
|
-
# if you define tools, you can see the context of the tools
|
|
44
|
-
answer = state["answer"]
|
|
45
|
-
print(state["context"])
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
if __name__ == "__main__":
|
|
49
|
-
asyncio.run(main())
|
|
50
|
-
```
|
|
51
|
-
|
|
52
|
-
### Set custom logger
|
|
53
|
-
|
|
54
|
-
_If you want set up custom logger_
|
|
55
|
-
|
|
56
|
-
```python
|
|
57
|
-
from dm_aioaiagent import DMAioAIAgent
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
# create custom logger
|
|
61
|
-
class MyLogger:
|
|
62
|
-
def debug(self, message):
|
|
63
|
-
pass
|
|
64
|
-
|
|
65
|
-
def info(self, message):
|
|
66
|
-
pass
|
|
67
|
-
|
|
68
|
-
def warning(self, message):
|
|
69
|
-
print(message)
|
|
70
|
-
|
|
71
|
-
def error(self, message):
|
|
72
|
-
print(message)
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
# create agent
|
|
76
|
-
ai_agent = DMAioAIAgent()
|
|
77
|
-
|
|
78
|
-
# set up custom logger for this agent
|
|
79
|
-
ai_agent.set_logger(MyLogger())
|
|
80
|
-
```
|
|
@@ -1,103 +0,0 @@
|
|
|
1
|
-
Metadata-Version: 2.1
|
|
2
|
-
Name: dm-aioaiagent
|
|
3
|
-
Version: 0.1.3
|
|
4
|
-
Summary: This is my custom aioaiagent client
|
|
5
|
-
Home-page: https://pypi.org/project/dm-aioaiagent
|
|
6
|
-
Author: dimka4621
|
|
7
|
-
Author-email: mismartconfig@gmail.com
|
|
8
|
-
Project-URL: GitHub, https://github.com/MykhLibs/dm-aioaiagent
|
|
9
|
-
Keywords: dm aioaiagent
|
|
10
|
-
Classifier: Programming Language :: Python :: 3.8
|
|
11
|
-
Classifier: License :: OSI Approved :: MIT License
|
|
12
|
-
Classifier: Operating System :: OS Independent
|
|
13
|
-
Requires-Python: >=3.9
|
|
14
|
-
Description-Content-Type: text/markdown
|
|
15
|
-
Requires-Dist: dm-logger==0.5.2
|
|
16
|
-
Requires-Dist: python-dotenv==1.0.1
|
|
17
|
-
Requires-Dist: pydantic==2.9.2
|
|
18
|
-
Requires-Dist: langchain==0.3.0
|
|
19
|
-
Requires-Dist: langchain-core==0.3.5
|
|
20
|
-
Requires-Dist: langgraph==0.2.23
|
|
21
|
-
Requires-Dist: langchain-community==0.3.0
|
|
22
|
-
Requires-Dist: langchain-openai==0.2.0
|
|
23
|
-
|
|
24
|
-
# DM-aioaiagent
|
|
25
|
-
|
|
26
|
-
## Urls
|
|
27
|
-
|
|
28
|
-
* [PyPI](https://pypi.org/project/dm-aioaiagent)
|
|
29
|
-
* [GitHub](https://github.com/MykhLibs/dm-aioaiagent)
|
|
30
|
-
|
|
31
|
-
### * Package contains both `asynchronous` and `synchronous` clients
|
|
32
|
-
|
|
33
|
-
## Usage
|
|
34
|
-
|
|
35
|
-
Analogue to `DMAioAIAgent` is the synchronous client `DMAIAgent`.
|
|
36
|
-
|
|
37
|
-
```python
|
|
38
|
-
import asyncio
|
|
39
|
-
from dm_aioaiagent import DMAioAIAgent
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
async def main():
|
|
43
|
-
# define a system message
|
|
44
|
-
system_message = "Your custom system message with role, backstory and goal"
|
|
45
|
-
|
|
46
|
-
# define a list of tools, if you want to use them
|
|
47
|
-
tools = [...]
|
|
48
|
-
|
|
49
|
-
# define a openai model, default is "gpt-4o-mini"
|
|
50
|
-
model_name = "gpt-4o"
|
|
51
|
-
|
|
52
|
-
# create an agent
|
|
53
|
-
ai_agent = DMAioAIAgent(system_message, tools, model=model_name)
|
|
54
|
-
# you can set input_output_logging=False, if you don't want to see the input and output messages from agent
|
|
55
|
-
|
|
56
|
-
# define the conversation messages
|
|
57
|
-
messages = [
|
|
58
|
-
{"role": "user", "content": "Hello!"},
|
|
59
|
-
{"role": "ai", "content": "How can I help you?"},
|
|
60
|
-
{"role": "user", "content": "I want to know the weather in Kyiv"},
|
|
61
|
-
]
|
|
62
|
-
|
|
63
|
-
# start the agent
|
|
64
|
-
state = await ai_agent.run(messages)
|
|
65
|
-
|
|
66
|
-
# if you define tools, you can see the context of the tools
|
|
67
|
-
answer = state["answer"]
|
|
68
|
-
print(state["context"])
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
if __name__ == "__main__":
|
|
72
|
-
asyncio.run(main())
|
|
73
|
-
```
|
|
74
|
-
|
|
75
|
-
### Set custom logger
|
|
76
|
-
|
|
77
|
-
_If you want set up custom logger_
|
|
78
|
-
|
|
79
|
-
```python
|
|
80
|
-
from dm_aioaiagent import DMAioAIAgent
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
# create custom logger
|
|
84
|
-
class MyLogger:
|
|
85
|
-
def debug(self, message):
|
|
86
|
-
pass
|
|
87
|
-
|
|
88
|
-
def info(self, message):
|
|
89
|
-
pass
|
|
90
|
-
|
|
91
|
-
def warning(self, message):
|
|
92
|
-
print(message)
|
|
93
|
-
|
|
94
|
-
def error(self, message):
|
|
95
|
-
print(message)
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
# create agent
|
|
99
|
-
ai_agent = DMAioAIAgent()
|
|
100
|
-
|
|
101
|
-
# set up custom logger for this agent
|
|
102
|
-
ai_agent.set_logger(MyLogger())
|
|
103
|
-
```
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|