dm-aioaiagent 0.4.8__tar.gz → 0.5.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
- Metadata-Version: 2.2
1
+ Metadata-Version: 2.4
2
2
  Name: dm-aioaiagent
3
- Version: 0.4.8
3
+ Version: 0.5.0
4
4
  Summary: This is my custom aioaiagent client
5
5
  Home-page: https://pypi.org/project/dm-aioaiagent
6
6
  Author: dimka4621
@@ -12,7 +12,7 @@ Classifier: License :: OSI Approved :: MIT License
12
12
  Classifier: Operating System :: OS Independent
13
13
  Requires-Python: >=3.9
14
14
  Description-Content-Type: text/markdown
15
- Requires-Dist: dm-logger~=0.5.2
15
+ Requires-Dist: dm-logger~=0.6.2
16
16
  Requires-Dist: python-dotenv>=1.0.0
17
17
  Requires-Dist: pydantic<3.0.0,>=2.9.2
18
18
  Requires-Dist: langchain~=0.3.0
@@ -48,6 +48,27 @@ Dynamic: summary
48
48
 
49
49
  Analogue to `DMAioAIAgent` is the synchronous client `DMAIAgent`.
50
50
 
51
+ ### Windows Setup
52
+
53
+ ```python
54
+ import asyncio
55
+ import sys
56
+
57
+ if sys.platform == "win32":
58
+ asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
59
+ ```
60
+
61
+ ### Api Key Setup
62
+
63
+ You can set your OpenAI API key in the environment variable `OPENAI_API_KEY` or pass it as an argument to the agent.
64
+
65
+ **Use load_dotenv to load the `.env` file.**
66
+
67
+ ```python
68
+ from dotenv import load_dotenv
69
+ load_dotenv()
70
+ ```
71
+
51
72
  ### Use agent *with* inner memory and run *single* message
52
73
 
53
74
  By default, agent use inner memory to store the conversation history.
@@ -169,33 +190,3 @@ def main():
169
190
  if __name__ == "__main__":
170
191
  main()
171
192
  ```
172
-
173
- ### Set custom logger
174
-
175
- _If you want set up custom logger_
176
-
177
- ```python
178
- from dm_aioaiagent import DMAioAIAgent
179
-
180
-
181
- # create custom logger
182
- class MyLogger:
183
- def debug(self, message):
184
- pass
185
-
186
- def info(self, message):
187
- pass
188
-
189
- def warning(self, message):
190
- print(message)
191
-
192
- def error(self, message):
193
- print(message)
194
-
195
-
196
- # create an agent
197
- ai_agent = DMAioAIAgent()
198
-
199
- # set up custom logger for this agent
200
- ai_agent.set_logger(MyLogger())
201
- ```
@@ -11,6 +11,27 @@
11
11
 
12
12
  Analogue to `DMAioAIAgent` is the synchronous client `DMAIAgent`.
13
13
 
14
+ ### Windows Setup
15
+
16
+ ```python
17
+ import asyncio
18
+ import sys
19
+
20
+ if sys.platform == "win32":
21
+ asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
22
+ ```
23
+
24
+ ### Api Key Setup
25
+
26
+ You can set your OpenAI API key in the environment variable `OPENAI_API_KEY` or pass it as an argument to the agent.
27
+
28
+ **Use load_dotenv to load the `.env` file.**
29
+
30
+ ```python
31
+ from dotenv import load_dotenv
32
+ load_dotenv()
33
+ ```
34
+
14
35
  ### Use agent *with* inner memory and run *single* message
15
36
 
16
37
  By default, agent use inner memory to store the conversation history.
@@ -132,33 +153,3 @@ def main():
132
153
  if __name__ == "__main__":
133
154
  main()
134
155
  ```
135
-
136
- ### Set custom logger
137
-
138
- _If you want set up custom logger_
139
-
140
- ```python
141
- from dm_aioaiagent import DMAioAIAgent
142
-
143
-
144
- # create custom logger
145
- class MyLogger:
146
- def debug(self, message):
147
- pass
148
-
149
- def info(self, message):
150
- pass
151
-
152
- def warning(self, message):
153
- print(message)
154
-
155
- def error(self, message):
156
- print(message)
157
-
158
-
159
- # create an agent
160
- ai_agent = DMAioAIAgent()
161
-
162
- # set up custom logger for this agent
163
- ai_agent.set_logger(MyLogger())
164
- ```
@@ -1,5 +1,3 @@
1
- from dotenv import load_dotenv
2
- load_dotenv()
3
1
  from .ai_agent import DMAIAgent
4
2
  from .async_ai_agent import DMAioAIAgent
5
3
  from .openai_image_message_content import OpenAIImageMessageContent
@@ -12,8 +12,6 @@ from dm_logger import DMLogger
12
12
 
13
13
  from .types import *
14
14
 
15
- __all__ = ["DMAIAgent"]
16
-
17
15
 
18
16
  class DMAIAgent:
19
17
  MAX_MEMORY_MESSAGES = 20 # Only INT greater than 0
@@ -21,48 +19,66 @@ class DMAIAgent:
21
19
 
22
20
  def __init__(
23
21
  self,
22
+ # general
24
23
  system_message: str = "You are a helpful assistant.",
25
- tools: list[BaseTool] = None,
26
- *,
27
24
  model: str = "gpt-4o-mini",
28
25
  temperature: float = None,
29
- parallel_tool_calls: bool = None,
26
+ output_schema: OutputSchemaType = None, # IF you set output_schema, tools and memory will be disabled
30
27
  agent_name: str = "AIAgent",
31
- input_output_logging: bool = True,
28
+ # tools
29
+ tools: list[BaseTool] = None,
30
+ parallel_tool_calls: bool = None,
31
+ # memory
32
32
  is_memory_enabled: bool = True,
33
- max_memory_messages: int = MAX_MEMORY_MESSAGES,
34
33
  save_tools_responses_in_memory: bool = True,
35
- llm_provider_api_key: str = "",
36
- llm_provider_base_url: str = "",
34
+ max_memory_messages: int = MAX_MEMORY_MESSAGES,
35
+ # other
36
+ input_output_logging: bool = True,
37
37
  response_if_request_fail: str = "I can't provide a response right now. Please try again later.",
38
- response_if_invalid_image: str = "The image is unavailable or the link is incorrect."
38
+ response_if_invalid_image: str = "The image is unavailable or the link is incorrect.",
39
+ # llm provider
40
+ llm_provider_api_key: str = "",
41
+ llm_provider_base_url: str = ""
39
42
  ):
40
43
  self._logger = DMLogger(agent_name)
41
- self._input_output_logging = bool(input_output_logging)
42
44
 
45
+ # general
43
46
  self._system_message = str(system_message)
44
- self._tools = tools or []
45
- self._is_tools_exists = bool(tools)
46
47
  self._model = str(model)
47
48
  self._temperature = temperature
49
+ self._output_schema = self._validate_output_schema(output_schema)
50
+ if self._output_schema:
51
+ tools = []
52
+ parallel_tool_calls = None
53
+ is_memory_enabled = False
54
+ # tools
55
+ self._tools = tools or []
56
+ self._is_tools_exists = bool(tools)
48
57
  self._parallel_tool_calls = parallel_tool_calls
49
- self._llm_provider_api_key = str(llm_provider_api_key)
50
- self._llm_provider_base_url = str(llm_provider_base_url)
51
-
58
+ # memory
52
59
  self._memory_messages = []
53
60
  self._is_memory_enabled = bool(is_memory_enabled)
54
61
  self._save_tools_responses_in_memory = bool(save_tools_responses_in_memory)
55
62
  self._max_memory_messages = self._validate_max_memory_messages(max_memory_messages)
63
+ # other
64
+ self._input_output_logging = bool(input_output_logging)
56
65
  self._response_if_request_fail = str(response_if_request_fail)
57
66
  self._response_if_invalid_image = str(response_if_invalid_image)
67
+ # llm provider
68
+ self._llm_provider_api_key = str(llm_provider_api_key)
69
+ self._llm_provider_base_url = str(llm_provider_base_url)
58
70
 
59
71
  self._check_langsmith_envs()
60
72
  self._init_agent()
61
73
  self._init_graph()
62
74
 
63
- def run(self, query: str, **kwargs) -> str:
64
- new_messages = self.run_messages(messages=[{"role": "user", "content": query}], **kwargs)
65
- return new_messages[-1].content
75
+ def run(self, query: str, **kwargs) -> Union[str, TypedDict, BaseModel]:
76
+ new_messages = self.run_messages(messages=[TextMessage(role="user", content=query)], **kwargs)
77
+
78
+ last_message = new_messages[-1]
79
+ if isinstance(last_message, AIMessage):
80
+ return last_message.content
81
+ return last_message
66
82
 
67
83
  def run_messages(
68
84
  self,
@@ -85,8 +101,10 @@ class DMAIAgent:
85
101
  "tags": ls_tags,
86
102
  "run_id": ls_run_id
87
103
  }
88
- state = self._graph.invoke(input={"messages": messages, "new_messages": []},
89
- config={k: v for k, v in config_data.items() if v})
104
+ state = self._graph.invoke(
105
+ input={"messages": messages, "new_messages": []},
106
+ config={k: v for k, v in config_data.items() if v}
107
+ )
90
108
  return state["new_messages"]
91
109
 
92
110
  @property
@@ -126,12 +144,18 @@ class DMAIAgent:
126
144
  except Exception as e:
127
145
  self._logger.error(e)
128
146
  if second_attempt:
129
- response = self._response_if_invalid_image if "invalid_image_url" in str(e) else self._response_if_request_fail
147
+ if "invalid_image_url" in str(e):
148
+ response = self._response_if_invalid_image
149
+ else:
150
+ response = self._response_if_request_fail
151
+
130
152
  ai_response = AIMessage(content=response)
131
153
  state["messages"].append(ai_response)
132
154
  state["new_messages"].append(ai_response)
133
155
  return state
156
+
134
157
  return self._invoke_llm_node(state, second_attempt=True)
158
+
135
159
  state["messages"].append(ai_response)
136
160
  state["new_messages"].append(ai_response)
137
161
  return state
@@ -139,6 +163,7 @@ class DMAIAgent:
139
163
  def _execute_tool_node(self, state: State) -> State:
140
164
  self._logger.debug("Run node: Execute tool")
141
165
  threads = []
166
+
142
167
  for tool_call in state["messages"][-1].tool_calls:
143
168
  tool_id = tool_call["id"]
144
169
  tool_name = tool_call["name"]
@@ -170,7 +195,10 @@ class DMAIAgent:
170
195
 
171
196
  def _exit_node(self, state: State) -> State:
172
197
  if self._input_output_logging:
173
- self._logger.debug(f'Answer:\n{state["messages"][-1].content}')
198
+ answer = state["messages"][-1]
199
+ if isinstance(answer, AIMessage):
200
+ answer = answer.content
201
+ self._logger.debug(f'Answer:\n{answer}')
174
202
 
175
203
  if self._is_memory_enabled:
176
204
  messages_to_memory = state["messages"][-self._max_memory_messages:]
@@ -186,16 +214,18 @@ class DMAIAgent:
186
214
  return state
187
215
 
188
216
  def _messages_router(self, state: State) -> str:
189
- if self._is_tools_exists and state["messages"][-1].tool_calls:
190
- route = "execute_tool"
191
- else:
192
- route = "exit"
193
- return route
217
+ if self._output_schema:
218
+ return "exit"
219
+ elif self._is_tools_exists and state["messages"][-1].tool_calls:
220
+ return "execute_tool"
221
+ return "exit"
194
222
 
195
223
  def _init_agent(self) -> None:
196
224
  base_kwargs = {"model": self._model}
197
225
  if isinstance(self._temperature, float):
198
226
  base_kwargs["temperature"] = self._temperature
227
+ else:
228
+ ValueError("Temperature must be a float value.")
199
229
  if self._llm_provider_api_key:
200
230
  base_kwargs["api_key"] = SecretStr(self._llm_provider_api_key)
201
231
  if self._llm_provider_base_url:
@@ -220,6 +250,9 @@ class DMAIAgent:
220
250
  self._tool_map = {t.name: t for t in self._tools}
221
251
  llm = llm.bind_tools(self._tools, **bind_tool_kwargs)
222
252
 
253
+ if self._output_schema:
254
+ llm = llm.with_structured_output(self._output_schema)
255
+
223
256
  prompt = ChatPromptTemplate.from_messages([SystemMessage(content=self._system_message),
224
257
  MessagesPlaceholder(variable_name="messages")])
225
258
  self._agent = prompt | llm
@@ -254,6 +287,15 @@ class DMAIAgent:
254
287
  return max_messages_in_memory
255
288
  return cls.MAX_MEMORY_MESSAGES
256
289
 
290
+ @staticmethod
291
+ def _validate_output_schema(schema: OutputSchemaType) -> OutputSchemaType:
292
+ if schema is None:
293
+ return None
294
+ if isinstance(schema, type) and \
295
+ (type(schema).__name__ == "_TypedDictMeta" or issubclass(schema, BaseModel)):
296
+ return schema
297
+ raise ValueError("Output schema must be a TypedDict or BaseModel type, or None.")
298
+
257
299
  def print_graph(self) -> None:
258
300
  self._graph.get_graph().print_ascii()
259
301
 
@@ -264,14 +306,3 @@ class DMAIAgent:
264
306
  f.write(image)
265
307
  except Exception as e:
266
308
  self._logger.error(e)
267
-
268
- def set_logger(self, logger) -> None:
269
- if (
270
- hasattr(logger, "debug") and callable(logger.debug) and
271
- hasattr(logger, "info") and callable(logger.info) and
272
- hasattr(logger, "warning") and callable(logger.warning) and
273
- hasattr(logger, "error") and callable(logger.error)
274
- ):
275
- self._logger = logger
276
- else:
277
- print("Invalid logger")
@@ -6,13 +6,18 @@ from langchain_core.messages import AIMessage, ToolMessage
6
6
  from .ai_agent import DMAIAgent
7
7
  from .types import *
8
8
 
9
- __all__ = ["DMAioAIAgent"]
10
-
11
9
 
12
10
  class DMAioAIAgent(DMAIAgent):
13
- async def run(self, query: str, *args, **kwargs) -> str:
14
- new_messages = await self.run_messages(messages=[{"role": "user", "content": query}], *args, **kwargs)
15
- return new_messages[-1].content
11
+ def __init__(self, *args, **kwargs):
12
+ super().__init__(*args, **kwargs)
13
+
14
+ async def run(self, query: str, *args, **kwargs) -> Union[str, TypedDict, BaseModel]:
15
+ new_messages = await self.run_messages(messages=[TextMessage(role="user", content=query)], *args, **kwargs)
16
+
17
+ last_message = new_messages[-1]
18
+ if isinstance(last_message, AIMessage):
19
+ return last_message.content
20
+ return last_message
16
21
 
17
22
  async def run_messages(
18
23
  self,
@@ -35,8 +40,10 @@ class DMAioAIAgent(DMAIAgent):
35
40
  "tags": ls_tags,
36
41
  "run_id": ls_run_id
37
42
  }
38
- state = await self._graph.ainvoke(input={"messages": messages, "new_messages": []},
39
- config={k: v for k, v in config_data.items() if v})
43
+ state = await self._graph.ainvoke(
44
+ input={"messages": messages, "new_messages": []},
45
+ config={k: v for k, v in config_data.items() if v}
46
+ )
40
47
  return state["new_messages"]
41
48
 
42
49
  async def _invoke_llm_node(self, state: State, second_attempt: bool = False) -> State:
@@ -46,12 +53,18 @@ class DMAioAIAgent(DMAIAgent):
46
53
  except Exception as e:
47
54
  self._logger.error(e)
48
55
  if second_attempt:
49
- response = self._response_if_invalid_image if "invalid_image_url" in str(e) else self._response_if_request_fail
56
+ if "invalid_image_url" in str(e):
57
+ response = self._response_if_invalid_image
58
+ else:
59
+ response = self._response_if_request_fail
60
+
50
61
  ai_response = AIMessage(content=response)
51
62
  state["messages"].append(ai_response)
52
63
  state["new_messages"].append(ai_response)
53
64
  return state
65
+
54
66
  return await self._invoke_llm_node(state, second_attempt=True)
67
+
55
68
  state["messages"].append(ai_response)
56
69
  state["new_messages"].append(ai_response)
57
70
  return state
@@ -59,6 +72,7 @@ class DMAioAIAgent(DMAIAgent):
59
72
  async def _execute_tool_node(self, state: State) -> State:
60
73
  self._logger.debug("Run node: Execute tool")
61
74
  tasks = []
75
+
62
76
  for tool_call in state["messages"][-1].tool_calls:
63
77
  tool_id = tool_call["id"]
64
78
  tool_name = tool_call["name"]
@@ -1,7 +1,10 @@
1
- from typing import Literal, Union
1
+ from typing import Literal, Union, Type
2
2
  from typing_extensions import TypedDict
3
+ from pydantic import BaseModel
3
4
  from langchain_core.messages import BaseMessage
4
5
 
6
+ OutputSchemaType = Union[Type[TypedDict], Type[BaseModel], None]
7
+
5
8
 
6
9
  class ImageMessageTextItem(TypedDict):
7
10
  type: Literal['text']
@@ -25,6 +28,7 @@ class TextMessage(TypedDict):
25
28
 
26
29
  InputMessage = Union[TextMessage, ImageMessage, BaseMessage]
27
30
 
31
+
28
32
  class State(TypedDict):
29
33
  messages: list[InputMessage]
30
34
  new_messages: list[BaseMessage]
@@ -1,6 +1,6 @@
1
- Metadata-Version: 2.2
1
+ Metadata-Version: 2.4
2
2
  Name: dm-aioaiagent
3
- Version: 0.4.8
3
+ Version: 0.5.0
4
4
  Summary: This is my custom aioaiagent client
5
5
  Home-page: https://pypi.org/project/dm-aioaiagent
6
6
  Author: dimka4621
@@ -12,7 +12,7 @@ Classifier: License :: OSI Approved :: MIT License
12
12
  Classifier: Operating System :: OS Independent
13
13
  Requires-Python: >=3.9
14
14
  Description-Content-Type: text/markdown
15
- Requires-Dist: dm-logger~=0.5.2
15
+ Requires-Dist: dm-logger~=0.6.2
16
16
  Requires-Dist: python-dotenv>=1.0.0
17
17
  Requires-Dist: pydantic<3.0.0,>=2.9.2
18
18
  Requires-Dist: langchain~=0.3.0
@@ -48,6 +48,27 @@ Dynamic: summary
48
48
 
49
49
  Analogue to `DMAioAIAgent` is the synchronous client `DMAIAgent`.
50
50
 
51
+ ### Windows Setup
52
+
53
+ ```python
54
+ import asyncio
55
+ import sys
56
+
57
+ if sys.platform == "win32":
58
+ asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
59
+ ```
60
+
61
+ ### Api Key Setup
62
+
63
+ You can set your OpenAI API key in the environment variable `OPENAI_API_KEY` or pass it as an argument to the agent.
64
+
65
+ **Use load_dotenv to load the `.env` file.**
66
+
67
+ ```python
68
+ from dotenv import load_dotenv
69
+ load_dotenv()
70
+ ```
71
+
51
72
  ### Use agent *with* inner memory and run *single* message
52
73
 
53
74
  By default, agent use inner memory to store the conversation history.
@@ -169,33 +190,3 @@ def main():
169
190
  if __name__ == "__main__":
170
191
  main()
171
192
  ```
172
-
173
- ### Set custom logger
174
-
175
- _If you want set up custom logger_
176
-
177
- ```python
178
- from dm_aioaiagent import DMAioAIAgent
179
-
180
-
181
- # create custom logger
182
- class MyLogger:
183
- def debug(self, message):
184
- pass
185
-
186
- def info(self, message):
187
- pass
188
-
189
- def warning(self, message):
190
- print(message)
191
-
192
- def error(self, message):
193
- print(message)
194
-
195
-
196
- # create an agent
197
- ai_agent = DMAioAIAgent()
198
-
199
- # set up custom logger for this agent
200
- ai_agent.set_logger(MyLogger())
201
- ```
@@ -1,4 +1,4 @@
1
- dm-logger~=0.5.2
1
+ dm-logger~=0.6.2
2
2
  python-dotenv>=1.0.0
3
3
  pydantic<3.0.0,>=2.9.2
4
4
  langchain~=0.3.0
@@ -8,7 +8,7 @@ def readme():
8
8
 
9
9
  setup(
10
10
  name='dm-aioaiagent',
11
- version='v0.4.8',
11
+ version='v0.5.0',
12
12
  author='dimka4621',
13
13
  author_email='mismartconfig@gmail.com',
14
14
  description='This is my custom aioaiagent client',
@@ -17,7 +17,7 @@ setup(
17
17
  url='https://pypi.org/project/dm-aioaiagent',
18
18
  packages=find_packages(),
19
19
  install_requires=[
20
- 'dm-logger~=0.5.2',
20
+ 'dm-logger~=0.6.2',
21
21
  'python-dotenv>=1.0.0',
22
22
  'pydantic>=2.9.2, < 3.0.0',
23
23
  'langchain~=0.3.0',
File without changes