dm-aioaiagent 0.5.6__tar.gz → 0.6.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {dm_aioaiagent-0.5.6/dm_aioaiagent.egg-info → dm_aioaiagent-0.6.0}/PKG-INFO +77 -5
- dm_aioaiagent-0.5.6/PKG-INFO → dm_aioaiagent-0.6.0/README.md +54 -38
- {dm_aioaiagent-0.5.6 → dm_aioaiagent-0.6.0}/dm_aioaiagent/ai_agent.py +21 -11
- dm_aioaiagent-0.6.0/dm_aioaiagent.egg-info/PKG-INFO +264 -0
- dm_aioaiagent-0.6.0/dm_aioaiagent.egg-info/requires.txt +36 -0
- {dm_aioaiagent-0.5.6 → dm_aioaiagent-0.6.0}/setup.py +19 -4
- dm_aioaiagent-0.5.6/README.md +0 -155
- dm_aioaiagent-0.5.6/dm_aioaiagent.egg-info/requires.txt +0 -11
- {dm_aioaiagent-0.5.6 → dm_aioaiagent-0.6.0}/dm_aioaiagent/__init__.py +0 -0
- {dm_aioaiagent-0.5.6 → dm_aioaiagent-0.6.0}/dm_aioaiagent/async_ai_agent.py +0 -0
- {dm_aioaiagent-0.5.6 → dm_aioaiagent-0.6.0}/dm_aioaiagent/openai_image_message_content.py +0 -0
- {dm_aioaiagent-0.5.6 → dm_aioaiagent-0.6.0}/dm_aioaiagent/types.py +0 -0
- {dm_aioaiagent-0.5.6 → dm_aioaiagent-0.6.0}/dm_aioaiagent.egg-info/SOURCES.txt +0 -0
- {dm_aioaiagent-0.5.6 → dm_aioaiagent-0.6.0}/dm_aioaiagent.egg-info/dependency_links.txt +0 -0
- {dm_aioaiagent-0.5.6 → dm_aioaiagent-0.6.0}/dm_aioaiagent.egg-info/top_level.txt +0 -0
- {dm_aioaiagent-0.5.6 → dm_aioaiagent-0.6.0}/setup.cfg +0 -0
|
@@ -1,28 +1,46 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: dm-aioaiagent
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.6.0
|
|
4
4
|
Summary: This is my custom aioaiagent client
|
|
5
5
|
Home-page: https://pypi.org/project/dm-aioaiagent
|
|
6
6
|
Author: dimka4621
|
|
7
7
|
Author-email: mismartconfig@gmail.com
|
|
8
8
|
Project-URL: GitHub, https://github.com/MykhLibs/dm-aioaiagent
|
|
9
9
|
Keywords: dm aioaiagent
|
|
10
|
-
Classifier: Programming Language :: Python :: 3.
|
|
10
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
11
11
|
Classifier: License :: OSI Approved :: MIT License
|
|
12
12
|
Classifier: Operating System :: OS Independent
|
|
13
13
|
Requires-Python: >=3.9
|
|
14
14
|
Description-Content-Type: text/markdown
|
|
15
15
|
Requires-Dist: dm-logger<0.7.0,>=0.6.6
|
|
16
|
-
Requires-Dist: python-dotenv
|
|
16
|
+
Requires-Dist: python-dotenv>=1.0.0
|
|
17
17
|
Requires-Dist: pydantic<3.0.0,>=2.9.2
|
|
18
18
|
Requires-Dist: langchain<0.4.0,>=0.3.0
|
|
19
19
|
Requires-Dist: langchain-core<0.4.0,>=0.3.5
|
|
20
20
|
Requires-Dist: langchain-community<0.4.0,>=0.3.0
|
|
21
21
|
Requires-Dist: langchain-openai<0.4.0,>=0.3.0
|
|
22
|
-
Requires-Dist: langchain-anthropic<0.4.0,>=0.3.0
|
|
23
22
|
Requires-Dist: langgraph<0.4.0,>=0.3.23
|
|
24
23
|
Requires-Dist: langsmith<0.4.0,>=0.3.45
|
|
25
24
|
Requires-Dist: grandalf<0.9.0,>=0.8.0
|
|
25
|
+
Provides-Extra: anthropic
|
|
26
|
+
Requires-Dist: langchain-anthropic<0.4.0,>=0.3.0; extra == "anthropic"
|
|
27
|
+
Provides-Extra: gemini
|
|
28
|
+
Requires-Dist: langchain-google-genai<3.0.0,>=2.1.0; extra == "gemini"
|
|
29
|
+
Provides-Extra: groq
|
|
30
|
+
Requires-Dist: langchain-groq<0.4.0,>=0.3.0; extra == "groq"
|
|
31
|
+
Provides-Extra: mistral
|
|
32
|
+
Requires-Dist: langchain-mistralai<0.3.0,>=0.2.0; extra == "mistral"
|
|
33
|
+
Provides-Extra: deepseek
|
|
34
|
+
Requires-Dist: langchain-deepseek<0.2.0,>=0.1.0; extra == "deepseek"
|
|
35
|
+
Provides-Extra: ollama
|
|
36
|
+
Requires-Dist: langchain-ollama<0.4.0,>=0.3.0; extra == "ollama"
|
|
37
|
+
Provides-Extra: all
|
|
38
|
+
Requires-Dist: langchain-anthropic<0.4.0,>=0.3.0; extra == "all"
|
|
39
|
+
Requires-Dist: langchain-google-genai<3.0.0,>=2.1.0; extra == "all"
|
|
40
|
+
Requires-Dist: langchain-groq<0.4.0,>=0.3.0; extra == "all"
|
|
41
|
+
Requires-Dist: langchain-mistralai<0.3.0,>=0.2.0; extra == "all"
|
|
42
|
+
Requires-Dist: langchain-deepseek<0.2.0,>=0.1.0; extra == "all"
|
|
43
|
+
Requires-Dist: langchain-ollama<0.4.0,>=0.3.0; extra == "all"
|
|
26
44
|
Dynamic: author
|
|
27
45
|
Dynamic: author-email
|
|
28
46
|
Dynamic: classifier
|
|
@@ -31,6 +49,7 @@ Dynamic: description-content-type
|
|
|
31
49
|
Dynamic: home-page
|
|
32
50
|
Dynamic: keywords
|
|
33
51
|
Dynamic: project-url
|
|
52
|
+
Dynamic: provides-extra
|
|
34
53
|
Dynamic: requires-dist
|
|
35
54
|
Dynamic: requires-python
|
|
36
55
|
Dynamic: summary
|
|
@@ -44,6 +63,59 @@ Dynamic: summary
|
|
|
44
63
|
|
|
45
64
|
### * Package contains both `asynchronous` and `synchronous` clients
|
|
46
65
|
|
|
66
|
+
## Installation
|
|
67
|
+
|
|
68
|
+
By default, the package ships with **OpenAI** support. Other providers are optional extras:
|
|
69
|
+
|
|
70
|
+
```bash
|
|
71
|
+
pip install dm-aioaiagent # OpenAI only
|
|
72
|
+
pip install dm-aioaiagent[anthropic] # + Anthropic
|
|
73
|
+
pip install dm-aioaiagent[anthropic,gemini] # several at once
|
|
74
|
+
pip install dm-aioaiagent[all] # every supported provider
|
|
75
|
+
```
|
|
76
|
+
|
|
77
|
+
Available extras: `anthropic`, `gemini`, `groq`, `mistral`, `deepseek`, `ollama`, `all`.
|
|
78
|
+
|
|
79
|
+
If you call a model from a provider whose package is not installed, `init_chat_model` will raise an `ImportError` with the exact `pip install` command you need.
|
|
80
|
+
|
|
81
|
+
## Providers
|
|
82
|
+
|
|
83
|
+
Provider resolution is delegated to LangChain's [`init_chat_model`](https://python.langchain.com/api_reference/langchain/chat_models/langchain.chat_models.base.init_chat_model.html) — the agent picks the provider automatically by model name prefix when possible. For everything else, use the `"provider:model"` mask.
|
|
84
|
+
|
|
85
|
+
```python
|
|
86
|
+
# Auto-detected from model prefix (rules come from LangChain's init_chat_model)
|
|
87
|
+
agent = DMAioAIAgent(model="gpt-4o-mini") # → openai
|
|
88
|
+
agent = DMAioAIAgent(model="claude-3-5-sonnet-latest") # → anthropic
|
|
89
|
+
agent = DMAioAIAgent(model="gemini-2.0-flash") # → google_vertexai (see note below)
|
|
90
|
+
|
|
91
|
+
# Explicit provider via "provider:model" mask
|
|
92
|
+
agent = DMAioAIAgent(model="google_genai:gemini-2.0-flash")
|
|
93
|
+
agent = DMAioAIAgent(model="groq:llama-3.1-70b-versatile")
|
|
94
|
+
agent = DMAioAIAgent(model="mistralai:mistral-large-latest")
|
|
95
|
+
agent = DMAioAIAgent(model="deepseek:deepseek-chat")
|
|
96
|
+
agent = DMAioAIAgent(model="ollama:llama3.1")
|
|
97
|
+
|
|
98
|
+
# OpenAI-compatible gateway (OpenRouter, Together, vLLM, LiteLLM proxy, ...)
|
|
99
|
+
# Works without installing any extra — just point to the OpenAI-compatible URL.
|
|
100
|
+
agent = DMAioAIAgent(
|
|
101
|
+
model="meta-llama/llama-3.1-70b-instruct",
|
|
102
|
+
llm_provider_base_url="https://openrouter.ai/api/v1",
|
|
103
|
+
llm_provider_api_key="sk-or-...",
|
|
104
|
+
)
|
|
105
|
+
```
|
|
106
|
+
|
|
107
|
+
> **Note about Gemini.** LangChain's auto-detect maps the `gemini*` prefix to **`google_vertexai`** (Google Cloud Vertex AI, requires a GCP service account). If you have a regular **Google AI Studio** API key (`GOOGLE_API_KEY`), use the `google_genai:` mask explicitly:
|
|
108
|
+
>
|
|
109
|
+
> ```python
|
|
110
|
+
> agent = DMAioAIAgent(model="google_genai:gemini-2.0-flash")
|
|
111
|
+
> ```
|
|
112
|
+
|
|
113
|
+
Supported provider keys for the `"provider:model"` mask (list inherited from LangChain): `openai`, `anthropic`, `azure_openai`, `azure_ai`, `google_vertexai`, `google_genai`, `bedrock`, `bedrock_converse`, `cohere`, `fireworks`, `together`, `mistralai`, `huggingface`, `groq`, `ollama`, `google_anthropic_vertex`, `deepseek`, `ibm`, `nvidia`, `xai`, `perplexity`.
|
|
114
|
+
|
|
115
|
+
### Note about parallel tool calls
|
|
116
|
+
|
|
117
|
+
`parallel_tool_calls` is currently mapped only for **OpenAI** and **Anthropic** (their APIs use different formats). For other providers the parameter is silently ignored — extend per-provider mapping if you need it.
|
|
118
|
+
|
|
47
119
|
## Usage
|
|
48
120
|
|
|
49
121
|
Analogue to `DMAioAIAgent` is the synchronous client `DMAIAgent`.
|
|
@@ -60,7 +132,7 @@ if sys.platform == "win32":
|
|
|
60
132
|
|
|
61
133
|
### Api Key Setup
|
|
62
134
|
|
|
63
|
-
|
|
135
|
+
Each provider reads its API key from a dedicated environment variable, e.g. `OPENAI_API_KEY`, `ANTHROPIC_API_KEY`, `GOOGLE_API_KEY`, `GROQ_API_KEY`, `MISTRAL_API_KEY`, etc. Alternatively, pass the key explicitly via the `llm_provider_api_key` argument — useful for multi-tenant setups, custom gateways, or runtime key rotation.
|
|
64
136
|
|
|
65
137
|
**Use load_dotenv to load the `.env` file.**
|
|
66
138
|
|
|
@@ -1,40 +1,3 @@
|
|
|
1
|
-
Metadata-Version: 2.4
|
|
2
|
-
Name: dm-aioaiagent
|
|
3
|
-
Version: 0.5.6
|
|
4
|
-
Summary: This is my custom aioaiagent client
|
|
5
|
-
Home-page: https://pypi.org/project/dm-aioaiagent
|
|
6
|
-
Author: dimka4621
|
|
7
|
-
Author-email: mismartconfig@gmail.com
|
|
8
|
-
Project-URL: GitHub, https://github.com/MykhLibs/dm-aioaiagent
|
|
9
|
-
Keywords: dm aioaiagent
|
|
10
|
-
Classifier: Programming Language :: Python :: 3.8
|
|
11
|
-
Classifier: License :: OSI Approved :: MIT License
|
|
12
|
-
Classifier: Operating System :: OS Independent
|
|
13
|
-
Requires-Python: >=3.9
|
|
14
|
-
Description-Content-Type: text/markdown
|
|
15
|
-
Requires-Dist: dm-logger<0.7.0,>=0.6.6
|
|
16
|
-
Requires-Dist: python-dotenv<1.1.0,>=1.0.0
|
|
17
|
-
Requires-Dist: pydantic<3.0.0,>=2.9.2
|
|
18
|
-
Requires-Dist: langchain<0.4.0,>=0.3.0
|
|
19
|
-
Requires-Dist: langchain-core<0.4.0,>=0.3.5
|
|
20
|
-
Requires-Dist: langchain-community<0.4.0,>=0.3.0
|
|
21
|
-
Requires-Dist: langchain-openai<0.4.0,>=0.3.0
|
|
22
|
-
Requires-Dist: langchain-anthropic<0.4.0,>=0.3.0
|
|
23
|
-
Requires-Dist: langgraph<0.4.0,>=0.3.23
|
|
24
|
-
Requires-Dist: langsmith<0.4.0,>=0.3.45
|
|
25
|
-
Requires-Dist: grandalf<0.9.0,>=0.8.0
|
|
26
|
-
Dynamic: author
|
|
27
|
-
Dynamic: author-email
|
|
28
|
-
Dynamic: classifier
|
|
29
|
-
Dynamic: description
|
|
30
|
-
Dynamic: description-content-type
|
|
31
|
-
Dynamic: home-page
|
|
32
|
-
Dynamic: keywords
|
|
33
|
-
Dynamic: project-url
|
|
34
|
-
Dynamic: requires-dist
|
|
35
|
-
Dynamic: requires-python
|
|
36
|
-
Dynamic: summary
|
|
37
|
-
|
|
38
1
|
# DM-aioaiagent
|
|
39
2
|
|
|
40
3
|
## Urls
|
|
@@ -44,6 +7,59 @@ Dynamic: summary
|
|
|
44
7
|
|
|
45
8
|
### * Package contains both `asynchronous` and `synchronous` clients
|
|
46
9
|
|
|
10
|
+
## Installation
|
|
11
|
+
|
|
12
|
+
By default, the package ships with **OpenAI** support. Other providers are optional extras:
|
|
13
|
+
|
|
14
|
+
```bash
|
|
15
|
+
pip install dm-aioaiagent # OpenAI only
|
|
16
|
+
pip install dm-aioaiagent[anthropic] # + Anthropic
|
|
17
|
+
pip install dm-aioaiagent[anthropic,gemini] # several at once
|
|
18
|
+
pip install dm-aioaiagent[all] # every supported provider
|
|
19
|
+
```
|
|
20
|
+
|
|
21
|
+
Available extras: `anthropic`, `gemini`, `groq`, `mistral`, `deepseek`, `ollama`, `all`.
|
|
22
|
+
|
|
23
|
+
If you call a model from a provider whose package is not installed, `init_chat_model` will raise an `ImportError` with the exact `pip install` command you need.
|
|
24
|
+
|
|
25
|
+
## Providers
|
|
26
|
+
|
|
27
|
+
Provider resolution is delegated to LangChain's [`init_chat_model`](https://python.langchain.com/api_reference/langchain/chat_models/langchain.chat_models.base.init_chat_model.html) — the agent picks the provider automatically by model name prefix when possible. For everything else, use the `"provider:model"` mask.
|
|
28
|
+
|
|
29
|
+
```python
|
|
30
|
+
# Auto-detected from model prefix (rules come from LangChain's init_chat_model)
|
|
31
|
+
agent = DMAioAIAgent(model="gpt-4o-mini") # → openai
|
|
32
|
+
agent = DMAioAIAgent(model="claude-3-5-sonnet-latest") # → anthropic
|
|
33
|
+
agent = DMAioAIAgent(model="gemini-2.0-flash") # → google_vertexai (see note below)
|
|
34
|
+
|
|
35
|
+
# Explicit provider via "provider:model" mask
|
|
36
|
+
agent = DMAioAIAgent(model="google_genai:gemini-2.0-flash")
|
|
37
|
+
agent = DMAioAIAgent(model="groq:llama-3.1-70b-versatile")
|
|
38
|
+
agent = DMAioAIAgent(model="mistralai:mistral-large-latest")
|
|
39
|
+
agent = DMAioAIAgent(model="deepseek:deepseek-chat")
|
|
40
|
+
agent = DMAioAIAgent(model="ollama:llama3.1")
|
|
41
|
+
|
|
42
|
+
# OpenAI-compatible gateway (OpenRouter, Together, vLLM, LiteLLM proxy, ...)
|
|
43
|
+
# Works without installing any extra — just point to the OpenAI-compatible URL.
|
|
44
|
+
agent = DMAioAIAgent(
|
|
45
|
+
model="meta-llama/llama-3.1-70b-instruct",
|
|
46
|
+
llm_provider_base_url="https://openrouter.ai/api/v1",
|
|
47
|
+
llm_provider_api_key="sk-or-...",
|
|
48
|
+
)
|
|
49
|
+
```
|
|
50
|
+
|
|
51
|
+
> **Note about Gemini.** LangChain's auto-detect maps the `gemini*` prefix to **`google_vertexai`** (Google Cloud Vertex AI, requires a GCP service account). If you have a regular **Google AI Studio** API key (`GOOGLE_API_KEY`), use the `google_genai:` mask explicitly:
|
|
52
|
+
>
|
|
53
|
+
> ```python
|
|
54
|
+
> agent = DMAioAIAgent(model="google_genai:gemini-2.0-flash")
|
|
55
|
+
> ```
|
|
56
|
+
|
|
57
|
+
Supported provider keys for the `"provider:model"` mask (list inherited from LangChain): `openai`, `anthropic`, `azure_openai`, `azure_ai`, `google_vertexai`, `google_genai`, `bedrock`, `bedrock_converse`, `cohere`, `fireworks`, `together`, `mistralai`, `huggingface`, `groq`, `ollama`, `google_anthropic_vertex`, `deepseek`, `ibm`, `nvidia`, `xai`, `perplexity`.
|
|
58
|
+
|
|
59
|
+
### Note about parallel tool calls
|
|
60
|
+
|
|
61
|
+
`parallel_tool_calls` is currently mapped only for **OpenAI** and **Anthropic** (their APIs use different formats). For other providers the parameter is silently ignored — extend per-provider mapping if you need it.
|
|
62
|
+
|
|
47
63
|
## Usage
|
|
48
64
|
|
|
49
65
|
Analogue to `DMAioAIAgent` is the synchronous client `DMAIAgent`.
|
|
@@ -60,7 +76,7 @@ if sys.platform == "win32":
|
|
|
60
76
|
|
|
61
77
|
### Api Key Setup
|
|
62
78
|
|
|
63
|
-
|
|
79
|
+
Each provider reads its API key from a dedicated environment variable, e.g. `OPENAI_API_KEY`, `ANTHROPIC_API_KEY`, `GOOGLE_API_KEY`, `GROQ_API_KEY`, `MISTRAL_API_KEY`, etc. Alternatively, pass the key explicitly via the `llm_provider_api_key` argument — useful for multi-tenant setups, custom gateways, or runtime key rotation.
|
|
64
80
|
|
|
65
81
|
**Use load_dotenv to load the `.env` file.**
|
|
66
82
|
|
|
@@ -4,6 +4,7 @@ from typing import Any
|
|
|
4
4
|
from pydantic import SecretStr
|
|
5
5
|
from itertools import dropwhile
|
|
6
6
|
from threading import Thread
|
|
7
|
+
from langchain.chat_models import init_chat_model
|
|
7
8
|
from langchain_core.tools import BaseTool
|
|
8
9
|
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
|
|
9
10
|
from langchain_core.messages import SystemMessage, HumanMessage, AIMessage, ToolMessage
|
|
@@ -242,29 +243,28 @@ class DMAIAgent:
|
|
|
242
243
|
|
|
243
244
|
def _init_agent(self) -> None:
|
|
244
245
|
base_kwargs = {"model": self._model}
|
|
245
|
-
if
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
246
|
+
if self._temperature is not None:
|
|
247
|
+
if not isinstance(self._temperature, (int, float)):
|
|
248
|
+
raise ValueError("Temperature must be a float value.")
|
|
249
|
+
base_kwargs["temperature"] = float(self._temperature)
|
|
249
250
|
if self._llm_provider_api_key:
|
|
250
251
|
base_kwargs["api_key"] = SecretStr(self._llm_provider_api_key)
|
|
251
252
|
if self._llm_provider_base_url:
|
|
252
253
|
base_kwargs["base_url"] = self._llm_provider_base_url
|
|
253
254
|
|
|
254
|
-
|
|
255
|
-
from langchain_anthropic import ChatAnthropic
|
|
255
|
+
llm = init_chat_model(**base_kwargs)
|
|
256
256
|
|
|
257
|
-
|
|
257
|
+
provider = self._detect_provider(self._model)
|
|
258
|
+
if provider == "anthropic":
|
|
258
259
|
bind_tool_kwargs = {"tool_choice": {"type": "auto"}}
|
|
259
260
|
if isinstance(self._parallel_tool_calls, bool):
|
|
260
261
|
bind_tool_kwargs["tool_choice"]["disable_parallel_tool_use"] = not self._parallel_tool_calls
|
|
261
|
-
|
|
262
|
-
from langchain_openai import ChatOpenAI
|
|
263
|
-
|
|
264
|
-
llm = ChatOpenAI(**base_kwargs)
|
|
262
|
+
elif provider == "openai":
|
|
265
263
|
bind_tool_kwargs = {}
|
|
266
264
|
if isinstance(self._parallel_tool_calls, bool):
|
|
267
265
|
bind_tool_kwargs["parallel_tool_calls"] = self._parallel_tool_calls
|
|
266
|
+
else:
|
|
267
|
+
bind_tool_kwargs = {}
|
|
268
268
|
|
|
269
269
|
if self._is_tools_exists:
|
|
270
270
|
self._tool_map = {t.name: t for t in self._tools}
|
|
@@ -277,6 +277,16 @@ class DMAIAgent:
|
|
|
277
277
|
MessagesPlaceholder(variable_name="messages")])
|
|
278
278
|
self._agent = prompt | llm
|
|
279
279
|
|
|
280
|
+
@staticmethod
|
|
281
|
+
def _detect_provider(model: str) -> str:
|
|
282
|
+
if ":" in model:
|
|
283
|
+
return model.split(":", 1)[0].replace("-", "_").lower()
|
|
284
|
+
if model.startswith(("gpt-", "o1", "o3")):
|
|
285
|
+
return "openai"
|
|
286
|
+
if model.startswith("claude"):
|
|
287
|
+
return "anthropic"
|
|
288
|
+
return ""
|
|
289
|
+
|
|
280
290
|
def _init_graph(self) -> None:
|
|
281
291
|
workflow = StateGraph(State)
|
|
282
292
|
workflow.add_node("Prepare messages", self._prepare_messages_node)
|
|
@@ -0,0 +1,264 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: dm-aioaiagent
|
|
3
|
+
Version: 0.6.0
|
|
4
|
+
Summary: This is my custom aioaiagent client
|
|
5
|
+
Home-page: https://pypi.org/project/dm-aioaiagent
|
|
6
|
+
Author: dimka4621
|
|
7
|
+
Author-email: mismartconfig@gmail.com
|
|
8
|
+
Project-URL: GitHub, https://github.com/MykhLibs/dm-aioaiagent
|
|
9
|
+
Keywords: dm aioaiagent
|
|
10
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
11
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
12
|
+
Classifier: Operating System :: OS Independent
|
|
13
|
+
Requires-Python: >=3.9
|
|
14
|
+
Description-Content-Type: text/markdown
|
|
15
|
+
Requires-Dist: dm-logger<0.7.0,>=0.6.6
|
|
16
|
+
Requires-Dist: python-dotenv>=1.0.0
|
|
17
|
+
Requires-Dist: pydantic<3.0.0,>=2.9.2
|
|
18
|
+
Requires-Dist: langchain<0.4.0,>=0.3.0
|
|
19
|
+
Requires-Dist: langchain-core<0.4.0,>=0.3.5
|
|
20
|
+
Requires-Dist: langchain-community<0.4.0,>=0.3.0
|
|
21
|
+
Requires-Dist: langchain-openai<0.4.0,>=0.3.0
|
|
22
|
+
Requires-Dist: langgraph<0.4.0,>=0.3.23
|
|
23
|
+
Requires-Dist: langsmith<0.4.0,>=0.3.45
|
|
24
|
+
Requires-Dist: grandalf<0.9.0,>=0.8.0
|
|
25
|
+
Provides-Extra: anthropic
|
|
26
|
+
Requires-Dist: langchain-anthropic<0.4.0,>=0.3.0; extra == "anthropic"
|
|
27
|
+
Provides-Extra: gemini
|
|
28
|
+
Requires-Dist: langchain-google-genai<3.0.0,>=2.1.0; extra == "gemini"
|
|
29
|
+
Provides-Extra: groq
|
|
30
|
+
Requires-Dist: langchain-groq<0.4.0,>=0.3.0; extra == "groq"
|
|
31
|
+
Provides-Extra: mistral
|
|
32
|
+
Requires-Dist: langchain-mistralai<0.3.0,>=0.2.0; extra == "mistral"
|
|
33
|
+
Provides-Extra: deepseek
|
|
34
|
+
Requires-Dist: langchain-deepseek<0.2.0,>=0.1.0; extra == "deepseek"
|
|
35
|
+
Provides-Extra: ollama
|
|
36
|
+
Requires-Dist: langchain-ollama<0.4.0,>=0.3.0; extra == "ollama"
|
|
37
|
+
Provides-Extra: all
|
|
38
|
+
Requires-Dist: langchain-anthropic<0.4.0,>=0.3.0; extra == "all"
|
|
39
|
+
Requires-Dist: langchain-google-genai<3.0.0,>=2.1.0; extra == "all"
|
|
40
|
+
Requires-Dist: langchain-groq<0.4.0,>=0.3.0; extra == "all"
|
|
41
|
+
Requires-Dist: langchain-mistralai<0.3.0,>=0.2.0; extra == "all"
|
|
42
|
+
Requires-Dist: langchain-deepseek<0.2.0,>=0.1.0; extra == "all"
|
|
43
|
+
Requires-Dist: langchain-ollama<0.4.0,>=0.3.0; extra == "all"
|
|
44
|
+
Dynamic: author
|
|
45
|
+
Dynamic: author-email
|
|
46
|
+
Dynamic: classifier
|
|
47
|
+
Dynamic: description
|
|
48
|
+
Dynamic: description-content-type
|
|
49
|
+
Dynamic: home-page
|
|
50
|
+
Dynamic: keywords
|
|
51
|
+
Dynamic: project-url
|
|
52
|
+
Dynamic: provides-extra
|
|
53
|
+
Dynamic: requires-dist
|
|
54
|
+
Dynamic: requires-python
|
|
55
|
+
Dynamic: summary
|
|
56
|
+
|
|
57
|
+
# DM-aioaiagent
|
|
58
|
+
|
|
59
|
+
## Urls
|
|
60
|
+
|
|
61
|
+
* [PyPI](https://pypi.org/project/dm-aioaiagent)
|
|
62
|
+
* [GitHub](https://github.com/MykhLibs/dm-aioaiagent)
|
|
63
|
+
|
|
64
|
+
### * Package contains both `asynchronous` and `synchronous` clients
|
|
65
|
+
|
|
66
|
+
## Installation
|
|
67
|
+
|
|
68
|
+
By default, the package ships with **OpenAI** support. Other providers are optional extras:
|
|
69
|
+
|
|
70
|
+
```bash
|
|
71
|
+
pip install dm-aioaiagent # OpenAI only
|
|
72
|
+
pip install dm-aioaiagent[anthropic] # + Anthropic
|
|
73
|
+
pip install dm-aioaiagent[anthropic,gemini] # several at once
|
|
74
|
+
pip install dm-aioaiagent[all] # every supported provider
|
|
75
|
+
```
|
|
76
|
+
|
|
77
|
+
Available extras: `anthropic`, `gemini`, `groq`, `mistral`, `deepseek`, `ollama`, `all`.
|
|
78
|
+
|
|
79
|
+
If you call a model from a provider whose package is not installed, `init_chat_model` will raise an `ImportError` with the exact `pip install` command you need.
|
|
80
|
+
|
|
81
|
+
## Providers
|
|
82
|
+
|
|
83
|
+
Provider resolution is delegated to LangChain's [`init_chat_model`](https://python.langchain.com/api_reference/langchain/chat_models/langchain.chat_models.base.init_chat_model.html) — the agent picks the provider automatically by model name prefix when possible. For everything else, use the `"provider:model"` mask.
|
|
84
|
+
|
|
85
|
+
```python
|
|
86
|
+
# Auto-detected from model prefix (rules come from LangChain's init_chat_model)
|
|
87
|
+
agent = DMAioAIAgent(model="gpt-4o-mini") # → openai
|
|
88
|
+
agent = DMAioAIAgent(model="claude-3-5-sonnet-latest") # → anthropic
|
|
89
|
+
agent = DMAioAIAgent(model="gemini-2.0-flash") # → google_vertexai (see note below)
|
|
90
|
+
|
|
91
|
+
# Explicit provider via "provider:model" mask
|
|
92
|
+
agent = DMAioAIAgent(model="google_genai:gemini-2.0-flash")
|
|
93
|
+
agent = DMAioAIAgent(model="groq:llama-3.1-70b-versatile")
|
|
94
|
+
agent = DMAioAIAgent(model="mistralai:mistral-large-latest")
|
|
95
|
+
agent = DMAioAIAgent(model="deepseek:deepseek-chat")
|
|
96
|
+
agent = DMAioAIAgent(model="ollama:llama3.1")
|
|
97
|
+
|
|
98
|
+
# OpenAI-compatible gateway (OpenRouter, Together, vLLM, LiteLLM proxy, ...)
|
|
99
|
+
# Works without installing any extra — just point to the OpenAI-compatible URL.
|
|
100
|
+
agent = DMAioAIAgent(
|
|
101
|
+
model="meta-llama/llama-3.1-70b-instruct",
|
|
102
|
+
llm_provider_base_url="https://openrouter.ai/api/v1",
|
|
103
|
+
llm_provider_api_key="sk-or-...",
|
|
104
|
+
)
|
|
105
|
+
```
|
|
106
|
+
|
|
107
|
+
> **Note about Gemini.** LangChain's auto-detect maps the `gemini*` prefix to **`google_vertexai`** (Google Cloud Vertex AI, requires a GCP service account). If you have a regular **Google AI Studio** API key (`GOOGLE_API_KEY`), use the `google_genai:` mask explicitly:
|
|
108
|
+
>
|
|
109
|
+
> ```python
|
|
110
|
+
> agent = DMAioAIAgent(model="google_genai:gemini-2.0-flash")
|
|
111
|
+
> ```
|
|
112
|
+
|
|
113
|
+
Supported provider keys for the `"provider:model"` mask (list inherited from LangChain): `openai`, `anthropic`, `azure_openai`, `azure_ai`, `google_vertexai`, `google_genai`, `bedrock`, `bedrock_converse`, `cohere`, `fireworks`, `together`, `mistralai`, `huggingface`, `groq`, `ollama`, `google_anthropic_vertex`, `deepseek`, `ibm`, `nvidia`, `xai`, `perplexity`.
|
|
114
|
+
|
|
115
|
+
### Note about parallel tool calls
|
|
116
|
+
|
|
117
|
+
`parallel_tool_calls` is currently mapped only for **OpenAI** and **Anthropic** (their APIs use different formats). For other providers the parameter is silently ignored — extend per-provider mapping if you need it.
|
|
118
|
+
|
|
119
|
+
## Usage
|
|
120
|
+
|
|
121
|
+
Analogue to `DMAioAIAgent` is the synchronous client `DMAIAgent`.
|
|
122
|
+
|
|
123
|
+
### Windows Setup
|
|
124
|
+
|
|
125
|
+
```python
|
|
126
|
+
import asyncio
|
|
127
|
+
import sys
|
|
128
|
+
|
|
129
|
+
if sys.platform == "win32":
|
|
130
|
+
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
|
|
131
|
+
```
|
|
132
|
+
|
|
133
|
+
### Api Key Setup
|
|
134
|
+
|
|
135
|
+
Each provider reads its API key from a dedicated environment variable, e.g. `OPENAI_API_KEY`, `ANTHROPIC_API_KEY`, `GOOGLE_API_KEY`, `GROQ_API_KEY`, `MISTRAL_API_KEY`, etc. Alternatively, pass the key explicitly via the `llm_provider_api_key` argument — useful for multi-tenant setups, custom gateways, or runtime key rotation.
|
|
136
|
+
|
|
137
|
+
**Use load_dotenv to load the `.env` file.**
|
|
138
|
+
|
|
139
|
+
```python
|
|
140
|
+
from dotenv import load_dotenv
|
|
141
|
+
load_dotenv()
|
|
142
|
+
```
|
|
143
|
+
|
|
144
|
+
### Use agent *with* inner memory and run *single* message
|
|
145
|
+
|
|
146
|
+
By default, agent use inner memory to store the conversation history.
|
|
147
|
+
|
|
148
|
+
(You can set *max count messages in memory* by `max_memory_messages` init argument)
|
|
149
|
+
|
|
150
|
+
```python
|
|
151
|
+
import asyncio
|
|
152
|
+
from dm_aioaiagent import DMAioAIAgent
|
|
153
|
+
|
|
154
|
+
|
|
155
|
+
async def main():
|
|
156
|
+
# define a system message
|
|
157
|
+
system_message = "Your custom system message with role, backstory and goal"
|
|
158
|
+
|
|
159
|
+
# (optional) define a list of tools, if you want to use them
|
|
160
|
+
tools = [...]
|
|
161
|
+
|
|
162
|
+
# define a openai model, default is "gpt-4o-mini"
|
|
163
|
+
model_name = "gpt-4o"
|
|
164
|
+
|
|
165
|
+
# create an agent
|
|
166
|
+
ai_agent = DMAioAIAgent(system_message, tools, model=model_name)
|
|
167
|
+
# if you don't want to see the input and output messages from agent
|
|
168
|
+
# you can set `input_output_logging=False` init argument
|
|
169
|
+
|
|
170
|
+
# call an agent
|
|
171
|
+
answer = await ai_agent.run("Hello!")
|
|
172
|
+
|
|
173
|
+
# call an agent
|
|
174
|
+
answer = await ai_agent.run("I want to know the weather in Kyiv")
|
|
175
|
+
|
|
176
|
+
# get full conversation history
|
|
177
|
+
conversation_history = ai_agent.memory_messages
|
|
178
|
+
|
|
179
|
+
# clear conversation history
|
|
180
|
+
ai_agent.clear_memory_messages()
|
|
181
|
+
|
|
182
|
+
|
|
183
|
+
if __name__ == "__main__":
|
|
184
|
+
asyncio.run(main())
|
|
185
|
+
```
|
|
186
|
+
|
|
187
|
+
### Use agent *without* inner memory and run *multiple* messages
|
|
188
|
+
|
|
189
|
+
If you want to control the memory of the agent, you can disable it by setting `is_memory_enabled=False`
|
|
190
|
+
|
|
191
|
+
```python
|
|
192
|
+
import asyncio
|
|
193
|
+
from dm_aioaiagent import DMAioAIAgent
|
|
194
|
+
|
|
195
|
+
|
|
196
|
+
async def main():
|
|
197
|
+
# define a system message
|
|
198
|
+
system_message = "Your custom system message with role, backstory and goal"
|
|
199
|
+
|
|
200
|
+
# (optional) define a list of tools, if you want to use them
|
|
201
|
+
tools = [...]
|
|
202
|
+
|
|
203
|
+
# define a openai model, default is "gpt-4o-mini"
|
|
204
|
+
model_name = "gpt-4o"
|
|
205
|
+
|
|
206
|
+
# create an agent
|
|
207
|
+
ai_agent = DMAioAIAgent(system_message, tools, model=model_name,
|
|
208
|
+
is_memory_enabled=False)
|
|
209
|
+
# if you don't want to see the input and output messages from agent
|
|
210
|
+
# you can set input_output_logging=False
|
|
211
|
+
|
|
212
|
+
# define the conversation message(s)
|
|
213
|
+
messages = [
|
|
214
|
+
{"role": "user", "content": "Hello!"}
|
|
215
|
+
]
|
|
216
|
+
|
|
217
|
+
# call an agent
|
|
218
|
+
new_messages = await ai_agent.run_messages(messages)
|
|
219
|
+
|
|
220
|
+
# add new_messages to messages
|
|
221
|
+
messages.extend(new_messages)
|
|
222
|
+
|
|
223
|
+
# define the next conversation message
|
|
224
|
+
messages.append(
|
|
225
|
+
{"role": "user", "content": "I want to know the weather in Kyiv"}
|
|
226
|
+
)
|
|
227
|
+
|
|
228
|
+
# call an agent
|
|
229
|
+
new_messages = await ai_agent.run_messages(messages)
|
|
230
|
+
|
|
231
|
+
|
|
232
|
+
if __name__ == "__main__":
|
|
233
|
+
asyncio.run(main())
|
|
234
|
+
```
|
|
235
|
+
|
|
236
|
+
### Image vision
|
|
237
|
+
|
|
238
|
+
```python
|
|
239
|
+
from dm_aioaiagent import DMAIAgent, OpenAIImageMessageContent
|
|
240
|
+
|
|
241
|
+
|
|
242
|
+
def main():
|
|
243
|
+
# create an agent
|
|
244
|
+
ai_agent = DMAIAgent(agent_name="image_vision", model="gpt-4o")
|
|
245
|
+
|
|
246
|
+
# create an image message content
|
|
247
|
+
# NOTE: text argument is optional
|
|
248
|
+
img_content = OpenAIImageMessageContent(image_url="https://your.domain/image",
|
|
249
|
+
text="Hello, what is shown in the photo?")
|
|
250
|
+
|
|
251
|
+
# define the conversation messages
|
|
252
|
+
messages = [
|
|
253
|
+
{"role": "user", "content": "Hello!"},
|
|
254
|
+
{"role": "user", "content": img_content},
|
|
255
|
+
]
|
|
256
|
+
|
|
257
|
+
# call an agent
|
|
258
|
+
new_messages = ai_agent.run_messages(messages)
|
|
259
|
+
answer = new_messages[-1].content
|
|
260
|
+
|
|
261
|
+
|
|
262
|
+
if __name__ == "__main__":
|
|
263
|
+
main()
|
|
264
|
+
```
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
dm-logger<0.7.0,>=0.6.6
|
|
2
|
+
python-dotenv>=1.0.0
|
|
3
|
+
pydantic<3.0.0,>=2.9.2
|
|
4
|
+
langchain<0.4.0,>=0.3.0
|
|
5
|
+
langchain-core<0.4.0,>=0.3.5
|
|
6
|
+
langchain-community<0.4.0,>=0.3.0
|
|
7
|
+
langchain-openai<0.4.0,>=0.3.0
|
|
8
|
+
langgraph<0.4.0,>=0.3.23
|
|
9
|
+
langsmith<0.4.0,>=0.3.45
|
|
10
|
+
grandalf<0.9.0,>=0.8.0
|
|
11
|
+
|
|
12
|
+
[all]
|
|
13
|
+
langchain-anthropic<0.4.0,>=0.3.0
|
|
14
|
+
langchain-google-genai<3.0.0,>=2.1.0
|
|
15
|
+
langchain-groq<0.4.0,>=0.3.0
|
|
16
|
+
langchain-mistralai<0.3.0,>=0.2.0
|
|
17
|
+
langchain-deepseek<0.2.0,>=0.1.0
|
|
18
|
+
langchain-ollama<0.4.0,>=0.3.0
|
|
19
|
+
|
|
20
|
+
[anthropic]
|
|
21
|
+
langchain-anthropic<0.4.0,>=0.3.0
|
|
22
|
+
|
|
23
|
+
[deepseek]
|
|
24
|
+
langchain-deepseek<0.2.0,>=0.1.0
|
|
25
|
+
|
|
26
|
+
[gemini]
|
|
27
|
+
langchain-google-genai<3.0.0,>=2.1.0
|
|
28
|
+
|
|
29
|
+
[groq]
|
|
30
|
+
langchain-groq<0.4.0,>=0.3.0
|
|
31
|
+
|
|
32
|
+
[mistral]
|
|
33
|
+
langchain-mistralai<0.3.0,>=0.2.0
|
|
34
|
+
|
|
35
|
+
[ollama]
|
|
36
|
+
langchain-ollama<0.4.0,>=0.3.0
|
|
@@ -8,7 +8,7 @@ def readme():
|
|
|
8
8
|
|
|
9
9
|
setup(
|
|
10
10
|
name='dm-aioaiagent',
|
|
11
|
-
version='v0.
|
|
11
|
+
version='v0.6.0',
|
|
12
12
|
author='dimka4621',
|
|
13
13
|
author_email='mismartconfig@gmail.com',
|
|
14
14
|
description='This is my custom aioaiagent client',
|
|
@@ -18,19 +18,34 @@ setup(
|
|
|
18
18
|
packages=find_packages(),
|
|
19
19
|
install_requires=[
|
|
20
20
|
'dm-logger>=0.6.6, <0.7.0',
|
|
21
|
-
'python-dotenv>=1.0.0
|
|
21
|
+
'python-dotenv>=1.0.0',
|
|
22
22
|
'pydantic>=2.9.2, <3.0.0',
|
|
23
23
|
'langchain>=0.3.0, <0.4.0',
|
|
24
24
|
'langchain-core>=0.3.5, <0.4.0',
|
|
25
25
|
'langchain-community>=0.3.0, <0.4.0',
|
|
26
26
|
'langchain-openai>=0.3.0, <0.4.0',
|
|
27
|
-
'langchain-anthropic>=0.3.0, <0.4.0',
|
|
28
27
|
'langgraph>=0.3.23, <0.4.0',
|
|
29
28
|
'langsmith>=0.3.45, <0.4.0',
|
|
30
29
|
'grandalf>=0.8.0, <0.9.0',
|
|
31
30
|
],
|
|
31
|
+
extras_require={
|
|
32
|
+
'anthropic': ['langchain-anthropic>=0.3.0, <0.4.0'],
|
|
33
|
+
'gemini': ['langchain-google-genai>=2.1.0, <3.0.0'],
|
|
34
|
+
'groq': ['langchain-groq>=0.3.0, <0.4.0'],
|
|
35
|
+
'mistral': ['langchain-mistralai>=0.2.0, <0.3.0'],
|
|
36
|
+
'deepseek': ['langchain-deepseek>=0.1.0, <0.2.0'],
|
|
37
|
+
'ollama': ['langchain-ollama>=0.3.0, <0.4.0'],
|
|
38
|
+
'all': [
|
|
39
|
+
'langchain-anthropic>=0.3.0, <0.4.0',
|
|
40
|
+
'langchain-google-genai>=2.1.0, <3.0.0',
|
|
41
|
+
'langchain-groq>=0.3.0, <0.4.0',
|
|
42
|
+
'langchain-mistralai>=0.2.0, <0.3.0',
|
|
43
|
+
'langchain-deepseek>=0.1.0, <0.2.0',
|
|
44
|
+
'langchain-ollama>=0.3.0, <0.4.0',
|
|
45
|
+
],
|
|
46
|
+
},
|
|
32
47
|
classifiers=[
|
|
33
|
-
'Programming Language :: Python :: 3.
|
|
48
|
+
'Programming Language :: Python :: 3.9',
|
|
34
49
|
'License :: OSI Approved :: MIT License',
|
|
35
50
|
'Operating System :: OS Independent'
|
|
36
51
|
],
|
dm_aioaiagent-0.5.6/README.md
DELETED
|
@@ -1,155 +0,0 @@
|
|
|
1
|
-
# DM-aioaiagent
|
|
2
|
-
|
|
3
|
-
## Urls
|
|
4
|
-
|
|
5
|
-
* [PyPI](https://pypi.org/project/dm-aioaiagent)
|
|
6
|
-
* [GitHub](https://github.com/MykhLibs/dm-aioaiagent)
|
|
7
|
-
|
|
8
|
-
### * Package contains both `asynchronous` and `synchronous` clients
|
|
9
|
-
|
|
10
|
-
## Usage
|
|
11
|
-
|
|
12
|
-
Analogue to `DMAioAIAgent` is the synchronous client `DMAIAgent`.
|
|
13
|
-
|
|
14
|
-
### Windows Setup
|
|
15
|
-
|
|
16
|
-
```python
|
|
17
|
-
import asyncio
|
|
18
|
-
import sys
|
|
19
|
-
|
|
20
|
-
if sys.platform == "win32":
|
|
21
|
-
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
|
|
22
|
-
```
|
|
23
|
-
|
|
24
|
-
### Api Key Setup
|
|
25
|
-
|
|
26
|
-
You can set your OpenAI API key in the environment variable `OPENAI_API_KEY` or pass it as an argument to the agent.
|
|
27
|
-
|
|
28
|
-
**Use load_dotenv to load the `.env` file.**
|
|
29
|
-
|
|
30
|
-
```python
|
|
31
|
-
from dotenv import load_dotenv
|
|
32
|
-
load_dotenv()
|
|
33
|
-
```
|
|
34
|
-
|
|
35
|
-
### Use agent *with* inner memory and run *single* message
|
|
36
|
-
|
|
37
|
-
By default, agent use inner memory to store the conversation history.
|
|
38
|
-
|
|
39
|
-
(You can set *max count messages in memory* by `max_memory_messages` init argument)
|
|
40
|
-
|
|
41
|
-
```python
|
|
42
|
-
import asyncio
|
|
43
|
-
from dm_aioaiagent import DMAioAIAgent
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
async def main():
|
|
47
|
-
# define a system message
|
|
48
|
-
system_message = "Your custom system message with role, backstory and goal"
|
|
49
|
-
|
|
50
|
-
# (optional) define a list of tools, if you want to use them
|
|
51
|
-
tools = [...]
|
|
52
|
-
|
|
53
|
-
# define a openai model, default is "gpt-4o-mini"
|
|
54
|
-
model_name = "gpt-4o"
|
|
55
|
-
|
|
56
|
-
# create an agent
|
|
57
|
-
ai_agent = DMAioAIAgent(system_message, tools, model=model_name)
|
|
58
|
-
# if you don't want to see the input and output messages from agent
|
|
59
|
-
# you can set `input_output_logging=False` init argument
|
|
60
|
-
|
|
61
|
-
# call an agent
|
|
62
|
-
answer = await ai_agent.run("Hello!")
|
|
63
|
-
|
|
64
|
-
# call an agent
|
|
65
|
-
answer = await ai_agent.run("I want to know the weather in Kyiv")
|
|
66
|
-
|
|
67
|
-
# get full conversation history
|
|
68
|
-
conversation_history = ai_agent.memory_messages
|
|
69
|
-
|
|
70
|
-
# clear conversation history
|
|
71
|
-
ai_agent.clear_memory_messages()
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
if __name__ == "__main__":
|
|
75
|
-
asyncio.run(main())
|
|
76
|
-
```
|
|
77
|
-
|
|
78
|
-
### Use agent *without* inner memory and run *multiple* messages
|
|
79
|
-
|
|
80
|
-
If you want to control the memory of the agent, you can disable it by setting `is_memory_enabled=False`
|
|
81
|
-
|
|
82
|
-
```python
|
|
83
|
-
import asyncio
|
|
84
|
-
from dm_aioaiagent import DMAioAIAgent
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
async def main():
|
|
88
|
-
# define a system message
|
|
89
|
-
system_message = "Your custom system message with role, backstory and goal"
|
|
90
|
-
|
|
91
|
-
# (optional) define a list of tools, if you want to use them
|
|
92
|
-
tools = [...]
|
|
93
|
-
|
|
94
|
-
# define a openai model, default is "gpt-4o-mini"
|
|
95
|
-
model_name = "gpt-4o"
|
|
96
|
-
|
|
97
|
-
# create an agent
|
|
98
|
-
ai_agent = DMAioAIAgent(system_message, tools, model=model_name,
|
|
99
|
-
is_memory_enabled=False)
|
|
100
|
-
# if you don't want to see the input and output messages from agent
|
|
101
|
-
# you can set input_output_logging=False
|
|
102
|
-
|
|
103
|
-
# define the conversation message(s)
|
|
104
|
-
messages = [
|
|
105
|
-
{"role": "user", "content": "Hello!"}
|
|
106
|
-
]
|
|
107
|
-
|
|
108
|
-
# call an agent
|
|
109
|
-
new_messages = await ai_agent.run_messages(messages)
|
|
110
|
-
|
|
111
|
-
# add new_messages to messages
|
|
112
|
-
messages.extend(new_messages)
|
|
113
|
-
|
|
114
|
-
# define the next conversation message
|
|
115
|
-
messages.append(
|
|
116
|
-
{"role": "user", "content": "I want to know the weather in Kyiv"}
|
|
117
|
-
)
|
|
118
|
-
|
|
119
|
-
# call an agent
|
|
120
|
-
new_messages = await ai_agent.run_messages(messages)
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
if __name__ == "__main__":
|
|
124
|
-
asyncio.run(main())
|
|
125
|
-
```
|
|
126
|
-
|
|
127
|
-
### Image vision
|
|
128
|
-
|
|
129
|
-
```python
|
|
130
|
-
from dm_aioaiagent import DMAIAgent, OpenAIImageMessageContent
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
def main():
|
|
134
|
-
# create an agent
|
|
135
|
-
ai_agent = DMAIAgent(agent_name="image_vision", model="gpt-4o")
|
|
136
|
-
|
|
137
|
-
# create an image message content
|
|
138
|
-
# NOTE: text argument is optional
|
|
139
|
-
img_content = OpenAIImageMessageContent(image_url="https://your.domain/image",
|
|
140
|
-
text="Hello, what is shown in the photo?")
|
|
141
|
-
|
|
142
|
-
# define the conversation messages
|
|
143
|
-
messages = [
|
|
144
|
-
{"role": "user", "content": "Hello!"},
|
|
145
|
-
{"role": "user", "content": img_content},
|
|
146
|
-
]
|
|
147
|
-
|
|
148
|
-
# call an agent
|
|
149
|
-
new_messages = ai_agent.run_messages(messages)
|
|
150
|
-
answer = new_messages[-1].content
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
if __name__ == "__main__":
|
|
154
|
-
main()
|
|
155
|
-
```
|
|
@@ -1,11 +0,0 @@
|
|
|
1
|
-
dm-logger<0.7.0,>=0.6.6
|
|
2
|
-
python-dotenv<1.1.0,>=1.0.0
|
|
3
|
-
pydantic<3.0.0,>=2.9.2
|
|
4
|
-
langchain<0.4.0,>=0.3.0
|
|
5
|
-
langchain-core<0.4.0,>=0.3.5
|
|
6
|
-
langchain-community<0.4.0,>=0.3.0
|
|
7
|
-
langchain-openai<0.4.0,>=0.3.0
|
|
8
|
-
langchain-anthropic<0.4.0,>=0.3.0
|
|
9
|
-
langgraph<0.4.0,>=0.3.23
|
|
10
|
-
langsmith<0.4.0,>=0.3.45
|
|
11
|
-
grandalf<0.9.0,>=0.8.0
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|