flowtic 1.6.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- flowtic-1.6.0/.gitignore +11 -0
- flowtic-1.6.0/.python-version +1 -0
- flowtic-1.6.0/PKG-INFO +203 -0
- flowtic-1.6.0/README.md +171 -0
- flowtic-1.6.0/pyproject.toml +55 -0
- flowtic-1.6.0/src/flowtic/__init__.py +14 -0
- flowtic-1.6.0/src/flowtic/agents/__init__.py +4 -0
- flowtic-1.6.0/src/flowtic/agents/base.py +141 -0
- flowtic-1.6.0/src/flowtic/agents/core.py +226 -0
- flowtic-1.6.0/src/flowtic/agents/tools.py +40 -0
- flowtic-1.6.0/src/flowtic/communication/__init__.py +4 -0
- flowtic-1.6.0/src/flowtic/communication/callbacks.py +11 -0
- flowtic-1.6.0/src/flowtic/communication/channel/__init__.py +1 -0
- flowtic-1.6.0/src/flowtic/communication/channel/core.py +222 -0
- flowtic-1.6.0/src/flowtic/session/__init__.py +1 -0
- flowtic-1.6.0/src/flowtic/session/base.py +43 -0
- flowtic-1.6.0/src/flowtic/session/core.py +101 -0
flowtic-1.6.0/.gitignore
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
3.12
|
flowtic-1.6.0/PKG-INFO
ADDED
|
@@ -0,0 +1,203 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: flowtic
|
|
3
|
+
Version: 1.6.0
|
|
4
|
+
Summary: A modern Python library for building multi-agent communication systems with LLM integration
|
|
5
|
+
Project-URL: Homepage, https://github.com/PrAsAnNaRePo/Flowtic
|
|
6
|
+
Project-URL: Repository, https://github.com/PrAsAnNaRePo/Flowtic
|
|
7
|
+
Project-URL: Issues, https://github.com/PrAsAnNaRePo/Flowtic/issues
|
|
8
|
+
Project-URL: Documentation, https://github.com/PrAsAnNaRePo/Flowtic#readme
|
|
9
|
+
Author-email: PrAsAnNaRePo <prasannatwenty@gmail.com>
|
|
10
|
+
Maintainer-email: PrAsAnNaRePo <prasannatwenty@gmail.com>
|
|
11
|
+
License: MIT
|
|
12
|
+
Keywords: agents,ai,communication,llm,multi-agent
|
|
13
|
+
Classifier: Development Status :: 3 - Alpha
|
|
14
|
+
Classifier: Intended Audience :: Developers
|
|
15
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
16
|
+
Classifier: Programming Language :: Python :: 3
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
18
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
19
|
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
20
|
+
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
21
|
+
Requires-Python: <3.14,>=3.11
|
|
22
|
+
Requires-Dist: litellm>=1.74.7
|
|
23
|
+
Requires-Dist: pillow>=11.3.0
|
|
24
|
+
Provides-Extra: dev
|
|
25
|
+
Requires-Dist: build>=1.0.0; extra == 'dev'
|
|
26
|
+
Requires-Dist: mypy>=1.8.0; extra == 'dev'
|
|
27
|
+
Requires-Dist: pytest-asyncio>=0.23.0; extra == 'dev'
|
|
28
|
+
Requires-Dist: pytest>=8.0.0; extra == 'dev'
|
|
29
|
+
Requires-Dist: ruff>=0.1.0; extra == 'dev'
|
|
30
|
+
Requires-Dist: twine>=4.0.0; extra == 'dev'
|
|
31
|
+
Description-Content-Type: text/markdown
|
|
32
|
+
|
|
33
|
+
# Flowtic
|
|
34
|
+
|
|
35
|
+
Build agent workflows that actually talk to each other. No complex orchestration, just simple communication patterns that work.
|
|
36
|
+
|
|
37
|
+
## What it does
|
|
38
|
+
|
|
39
|
+
- **Agent sessions**: Each agent keeps its own conversation history (text + images)
|
|
40
|
+
- **Tool system**: Write functions once, agents use them automatically
|
|
41
|
+
- **Communication graph**: Tell agents who can talk to whom with simple syntax
|
|
42
|
+
- **Callbacks**: Hook into conversations for logging, user input, whatever you need
|
|
43
|
+
|
|
44
|
+
## Install
|
|
45
|
+
|
|
46
|
+
```bash
|
|
47
|
+
pip install git+https://github.com/PrAsAnNaRePo/Flowtic.git
|
|
48
|
+
```
|
|
49
|
+
|
|
50
|
+
## Quick start
|
|
51
|
+
|
|
52
|
+
### Single agent with tools
|
|
53
|
+
|
|
54
|
+
```python
|
|
55
|
+
from flowtic import Agent, Tool, Tools
|
|
56
|
+
|
|
57
|
+
def calculate(expression: str):
|
|
58
|
+
return str(eval(expression)), None
|
|
59
|
+
|
|
60
|
+
calc_tool = Tool(
|
|
61
|
+
tool_definition={
|
|
62
|
+
"type": "function",
|
|
63
|
+
"function": {
|
|
64
|
+
"name": "calculate",
|
|
65
|
+
"description": "Calculate math expressions",
|
|
66
|
+
"parameters": {
|
|
67
|
+
"type": "object",
|
|
68
|
+
"properties": {
|
|
69
|
+
"expression": {"type": "string", "description": "Math expression"}
|
|
70
|
+
},
|
|
71
|
+
"required": ["expression"]
|
|
72
|
+
}
|
|
73
|
+
}
|
|
74
|
+
},
|
|
75
|
+
tool_execution=calculate
|
|
76
|
+
)
|
|
77
|
+
|
|
78
|
+
agent = Agent(
|
|
79
|
+
agent_name="calculator",
|
|
80
|
+
model_name="gpt-4o", # any litellm model
|
|
81
|
+
instructions="You help with math problems.",
|
|
82
|
+
tools=Tools([calc_tool])
|
|
83
|
+
)
|
|
84
|
+
|
|
85
|
+
result = agent("What's 15 * 23?")
|
|
86
|
+
```
|
|
87
|
+
|
|
88
|
+
### Multi-agent workflow
|
|
89
|
+
|
|
90
|
+
```python
|
|
91
|
+
from flowtic import Agent, CommunicationProtocol
|
|
92
|
+
|
|
93
|
+
# Create agents
|
|
94
|
+
analyst = Agent(
|
|
95
|
+
agent_name="analyst",
|
|
96
|
+
model_name="gpt-4o",
|
|
97
|
+
instructions="Analyze requirements and ask clarifying questions.",
|
|
98
|
+
allow_user_input=True # can talk to user
|
|
99
|
+
)
|
|
100
|
+
|
|
101
|
+
coder = Agent(
|
|
102
|
+
agent_name="coder",
|
|
103
|
+
model_name="gpt-4o",
|
|
104
|
+
instructions="Write code based on requirements.",
|
|
105
|
+
allow_user_input=False # only talks to other agents
|
|
106
|
+
)
|
|
107
|
+
|
|
108
|
+
# Set up who talks to whom
|
|
109
|
+
protocol = CommunicationProtocol(
|
|
110
|
+
"analyst<->coder", # bidirectional
|
|
111
|
+
[analyst, coder]
|
|
112
|
+
)
|
|
113
|
+
|
|
114
|
+
# Start the workflow
|
|
115
|
+
protocol.execute("Build a simple todo app")
|
|
116
|
+
```
|
|
117
|
+
|
|
118
|
+
## Communication patterns
|
|
119
|
+
|
|
120
|
+
The syntax is dead simple:
|
|
121
|
+
|
|
122
|
+
- `A->B` means A can send messages to B
|
|
123
|
+
- `A<->B` means they can talk both ways
|
|
124
|
+
- `A->B, B->C` chains them together
|
|
125
|
+
- `A<->B, A->C` means A talks to both B and C
|
|
126
|
+
|
|
127
|
+
When agents communicate, they automatically get tools to message each other. No setup needed.
|
|
128
|
+
|
|
129
|
+
## Images and multimodal
|
|
130
|
+
|
|
131
|
+
```python
|
|
132
|
+
# Agent automatically handles images
|
|
133
|
+
agent("Here's a screenshot", images=["path/to/image.png"])
|
|
134
|
+
|
|
135
|
+
# Works with URLs and base64 too
|
|
136
|
+
agent("Analyze this", images=["https://example.com/chart.png"])
|
|
137
|
+
```
|
|
138
|
+
|
|
139
|
+
## Custom callbacks
|
|
140
|
+
|
|
141
|
+
```python
|
|
142
|
+
from flowtic import Callback
|
|
143
|
+
|
|
144
|
+
class MyCallbacks(Callback):
|
|
145
|
+
def on_user_loop(self, agent_name, message):
|
|
146
|
+
return input(f"{agent_name}: {message}\n> ")
|
|
147
|
+
|
|
148
|
+
def on_tool_call(self, agent_name, tool_name, args):
|
|
149
|
+
print(f"{agent_name} is using {tool_name}")
|
|
150
|
+
|
|
151
|
+
agent = Agent(
|
|
152
|
+
agent_name="helper",
|
|
153
|
+
model_name="gpt-4o",
|
|
154
|
+
callbacks=MyCallbacks()
|
|
155
|
+
)
|
|
156
|
+
```
|
|
157
|
+
|
|
158
|
+
## Session management
|
|
159
|
+
|
|
160
|
+
Each agent keeps its own conversation buffer. Even if multiple agents reuse the same `SessionManager`, their histories stay isolated by agent name:
|
|
161
|
+
|
|
162
|
+
```python
|
|
163
|
+
from flowtic import SessionManager
|
|
164
|
+
|
|
165
|
+
session_store = SessionManager()
|
|
166
|
+
|
|
167
|
+
agent1 = Agent(
|
|
168
|
+
agent_name="researcher",
|
|
169
|
+
model_name="gpt-4o",
|
|
170
|
+
session=session_store
|
|
171
|
+
)
|
|
172
|
+
|
|
173
|
+
agent2 = Agent(
|
|
174
|
+
agent_name="writer",
|
|
175
|
+
model_name="gpt-4o",
|
|
176
|
+
session=session_store
|
|
177
|
+
)
|
|
178
|
+
|
|
179
|
+
# The writer still has its own memory
|
|
180
|
+
agent1("Find info about climate change")
|
|
181
|
+
agent2("Write a summary based on what the researcher found")
|
|
182
|
+
```
|
|
183
|
+
|
|
184
|
+
If you want agents to share context, use agent-to-agent communication. `CommunicationProtocol` injects `_spin_into`, so one agent can explicitly hand context to another instead of silently sharing history.
|
|
185
|
+
|
|
186
|
+
Sessions handle images automatically - no extra work needed:
|
|
187
|
+
|
|
188
|
+
```python
|
|
189
|
+
session = SessionManager()
|
|
190
|
+
|
|
191
|
+
# Add context manually if needed
|
|
192
|
+
session.add_user_context("my_agent",
|
|
193
|
+
text="Here's the data",
|
|
194
|
+
images=["chart1.png", "chart2.png"]
|
|
195
|
+
)
|
|
196
|
+
|
|
197
|
+
# Get the full conversation
|
|
198
|
+
history = session.get_buffer_memory("my_agent")
|
|
199
|
+
```
|
|
200
|
+
|
|
201
|
+
## That's it
|
|
202
|
+
|
|
203
|
+
Three main pieces: agents that remember conversations, tools they can use, and simple rules for who talks to whom. Everything else just works.
|
flowtic-1.6.0/README.md
ADDED
|
@@ -0,0 +1,171 @@
|
|
|
1
|
+
# Flowtic
|
|
2
|
+
|
|
3
|
+
Build agent workflows that actually talk to each other. No complex orchestration, just simple communication patterns that work.
|
|
4
|
+
|
|
5
|
+
## What it does
|
|
6
|
+
|
|
7
|
+
- **Agent sessions**: Each agent keeps its own conversation history (text + images)
|
|
8
|
+
- **Tool system**: Write functions once, agents use them automatically
|
|
9
|
+
- **Communication graph**: Tell agents who can talk to whom with simple syntax
|
|
10
|
+
- **Callbacks**: Hook into conversations for logging, user input, whatever you need
|
|
11
|
+
|
|
12
|
+
## Install
|
|
13
|
+
|
|
14
|
+
```bash
|
|
15
|
+
pip install git+https://github.com/PrAsAnNaRePo/Flowtic.git
|
|
16
|
+
```
|
|
17
|
+
|
|
18
|
+
## Quick start
|
|
19
|
+
|
|
20
|
+
### Single agent with tools
|
|
21
|
+
|
|
22
|
+
```python
|
|
23
|
+
from flowtic import Agent, Tool, Tools
|
|
24
|
+
|
|
25
|
+
def calculate(expression: str):
|
|
26
|
+
return str(eval(expression)), None
|
|
27
|
+
|
|
28
|
+
calc_tool = Tool(
|
|
29
|
+
tool_definition={
|
|
30
|
+
"type": "function",
|
|
31
|
+
"function": {
|
|
32
|
+
"name": "calculate",
|
|
33
|
+
"description": "Calculate math expressions",
|
|
34
|
+
"parameters": {
|
|
35
|
+
"type": "object",
|
|
36
|
+
"properties": {
|
|
37
|
+
"expression": {"type": "string", "description": "Math expression"}
|
|
38
|
+
},
|
|
39
|
+
"required": ["expression"]
|
|
40
|
+
}
|
|
41
|
+
}
|
|
42
|
+
},
|
|
43
|
+
tool_execution=calculate
|
|
44
|
+
)
|
|
45
|
+
|
|
46
|
+
agent = Agent(
|
|
47
|
+
agent_name="calculator",
|
|
48
|
+
model_name="gpt-4o", # any litellm model
|
|
49
|
+
instructions="You help with math problems.",
|
|
50
|
+
tools=Tools([calc_tool])
|
|
51
|
+
)
|
|
52
|
+
|
|
53
|
+
result = agent("What's 15 * 23?")
|
|
54
|
+
```
|
|
55
|
+
|
|
56
|
+
### Multi-agent workflow
|
|
57
|
+
|
|
58
|
+
```python
|
|
59
|
+
from flowtic import Agent, CommunicationProtocol
|
|
60
|
+
|
|
61
|
+
# Create agents
|
|
62
|
+
analyst = Agent(
|
|
63
|
+
agent_name="analyst",
|
|
64
|
+
model_name="gpt-4o",
|
|
65
|
+
instructions="Analyze requirements and ask clarifying questions.",
|
|
66
|
+
allow_user_input=True # can talk to user
|
|
67
|
+
)
|
|
68
|
+
|
|
69
|
+
coder = Agent(
|
|
70
|
+
agent_name="coder",
|
|
71
|
+
model_name="gpt-4o",
|
|
72
|
+
instructions="Write code based on requirements.",
|
|
73
|
+
allow_user_input=False # only talks to other agents
|
|
74
|
+
)
|
|
75
|
+
|
|
76
|
+
# Set up who talks to whom
|
|
77
|
+
protocol = CommunicationProtocol(
|
|
78
|
+
"analyst<->coder", # bidirectional
|
|
79
|
+
[analyst, coder]
|
|
80
|
+
)
|
|
81
|
+
|
|
82
|
+
# Start the workflow
|
|
83
|
+
protocol.execute("Build a simple todo app")
|
|
84
|
+
```
|
|
85
|
+
|
|
86
|
+
## Communication patterns
|
|
87
|
+
|
|
88
|
+
The syntax is dead simple:
|
|
89
|
+
|
|
90
|
+
- `A->B` means A can send messages to B
|
|
91
|
+
- `A<->B` means they can talk both ways
|
|
92
|
+
- `A->B, B->C` chains them together
|
|
93
|
+
- `A<->B, A->C` means A talks to both B and C
|
|
94
|
+
|
|
95
|
+
When agents communicate, they automatically get tools to message each other. No setup needed.
|
|
96
|
+
|
|
97
|
+
## Images and multimodal
|
|
98
|
+
|
|
99
|
+
```python
|
|
100
|
+
# Agent automatically handles images
|
|
101
|
+
agent("Here's a screenshot", images=["path/to/image.png"])
|
|
102
|
+
|
|
103
|
+
# Works with URLs and base64 too
|
|
104
|
+
agent("Analyze this", images=["https://example.com/chart.png"])
|
|
105
|
+
```
|
|
106
|
+
|
|
107
|
+
## Custom callbacks
|
|
108
|
+
|
|
109
|
+
```python
|
|
110
|
+
from flowtic import Callback
|
|
111
|
+
|
|
112
|
+
class MyCallbacks(Callback):
|
|
113
|
+
def on_user_loop(self, agent_name, message):
|
|
114
|
+
return input(f"{agent_name}: {message}\n> ")
|
|
115
|
+
|
|
116
|
+
def on_tool_call(self, agent_name, tool_name, args):
|
|
117
|
+
print(f"{agent_name} is using {tool_name}")
|
|
118
|
+
|
|
119
|
+
agent = Agent(
|
|
120
|
+
agent_name="helper",
|
|
121
|
+
model_name="gpt-4o",
|
|
122
|
+
callbacks=MyCallbacks()
|
|
123
|
+
)
|
|
124
|
+
```
|
|
125
|
+
|
|
126
|
+
## Session management
|
|
127
|
+
|
|
128
|
+
Each agent keeps its own conversation buffer. Even if multiple agents reuse the same `SessionManager`, their histories stay isolated by agent name:
|
|
129
|
+
|
|
130
|
+
```python
|
|
131
|
+
from flowtic import SessionManager
|
|
132
|
+
|
|
133
|
+
session_store = SessionManager()
|
|
134
|
+
|
|
135
|
+
agent1 = Agent(
|
|
136
|
+
agent_name="researcher",
|
|
137
|
+
model_name="gpt-4o",
|
|
138
|
+
session=session_store
|
|
139
|
+
)
|
|
140
|
+
|
|
141
|
+
agent2 = Agent(
|
|
142
|
+
agent_name="writer",
|
|
143
|
+
model_name="gpt-4o",
|
|
144
|
+
session=session_store
|
|
145
|
+
)
|
|
146
|
+
|
|
147
|
+
# The writer still has its own memory
|
|
148
|
+
agent1("Find info about climate change")
|
|
149
|
+
agent2("Write a summary based on what the researcher found")
|
|
150
|
+
```
|
|
151
|
+
|
|
152
|
+
If you want agents to share context, use agent-to-agent communication. `CommunicationProtocol` injects `_spin_into`, so one agent can explicitly hand context to another instead of silently sharing history.
|
|
153
|
+
|
|
154
|
+
Sessions handle images automatically - no extra work needed:
|
|
155
|
+
|
|
156
|
+
```python
|
|
157
|
+
session = SessionManager()
|
|
158
|
+
|
|
159
|
+
# Add context manually if needed
|
|
160
|
+
session.add_user_context("my_agent",
|
|
161
|
+
text="Here's the data",
|
|
162
|
+
images=["chart1.png", "chart2.png"]
|
|
163
|
+
)
|
|
164
|
+
|
|
165
|
+
# Get the full conversation
|
|
166
|
+
history = session.get_buffer_memory("my_agent")
|
|
167
|
+
```
|
|
168
|
+
|
|
169
|
+
## That's it
|
|
170
|
+
|
|
171
|
+
Three main pieces: agents that remember conversations, tools they can use, and simple rules for who talks to whom. Everything else just works.
|
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
[project]
|
|
2
|
+
name = "flowtic"
|
|
3
|
+
version = "1.6.0"
|
|
4
|
+
description = "A modern Python library for building multi-agent communication systems with LLM integration"
|
|
5
|
+
readme = "README.md"
|
|
6
|
+
license = { text = "MIT" }
|
|
7
|
+
authors = [
|
|
8
|
+
{ name = "PrAsAnNaRePo", email = "prasannatwenty@gmail.com" }
|
|
9
|
+
]
|
|
10
|
+
maintainers = [
|
|
11
|
+
{ name = "PrAsAnNaRePo", email = "prasannatwenty@gmail.com" }
|
|
12
|
+
]
|
|
13
|
+
requires-python = ">=3.11,<3.14"
|
|
14
|
+
classifiers = [
|
|
15
|
+
"Development Status :: 3 - Alpha",
|
|
16
|
+
"Intended Audience :: Developers",
|
|
17
|
+
"License :: OSI Approved :: MIT License",
|
|
18
|
+
"Programming Language :: Python :: 3",
|
|
19
|
+
"Programming Language :: Python :: 3.12",
|
|
20
|
+
"Programming Language :: Python :: 3.13",
|
|
21
|
+
"Topic :: Software Development :: Libraries :: Python Modules",
|
|
22
|
+
"Topic :: Scientific/Engineering :: Artificial Intelligence",
|
|
23
|
+
]
|
|
24
|
+
keywords = ["llm", "agents", "communication", "ai", "multi-agent"]
|
|
25
|
+
dependencies = [
|
|
26
|
+
"litellm>=1.74.7",
|
|
27
|
+
"pillow>=11.3.0",
|
|
28
|
+
]
|
|
29
|
+
|
|
30
|
+
[project.urls]
|
|
31
|
+
Homepage = "https://github.com/PrAsAnNaRePo/Flowtic"
|
|
32
|
+
Repository = "https://github.com/PrAsAnNaRePo/Flowtic"
|
|
33
|
+
Issues = "https://github.com/PrAsAnNaRePo/Flowtic/issues"
|
|
34
|
+
Documentation = "https://github.com/PrAsAnNaRePo/Flowtic#readme"
|
|
35
|
+
|
|
36
|
+
[project.optional-dependencies]
|
|
37
|
+
dev = [
|
|
38
|
+
"pytest>=8.0.0",
|
|
39
|
+
"pytest-asyncio>=0.23.0",
|
|
40
|
+
"ruff>=0.1.0",
|
|
41
|
+
"mypy>=1.8.0",
|
|
42
|
+
"twine>=4.0.0",
|
|
43
|
+
"build>=1.0.0",
|
|
44
|
+
]
|
|
45
|
+
|
|
46
|
+
[build-system]
|
|
47
|
+
requires = ["hatchling"]
|
|
48
|
+
build-backend = "hatchling.build"
|
|
49
|
+
|
|
50
|
+
[tool.hatch.build.targets.sdist]
|
|
51
|
+
exclude = [
|
|
52
|
+
"/tests",
|
|
53
|
+
"/dist",
|
|
54
|
+
"/uv.lock",
|
|
55
|
+
]
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
from .agents import Agent
|
|
2
|
+
from .agents.tools import Tool, Tools
|
|
3
|
+
from .session import SessionManager
|
|
4
|
+
from .communication import CommunicationProtocol
|
|
5
|
+
from .communication.callbacks import Callback
|
|
6
|
+
|
|
7
|
+
__all__ = [
|
|
8
|
+
"Agent",
|
|
9
|
+
"SessionManager",
|
|
10
|
+
"CommunicationProtocol",
|
|
11
|
+
"Callback",
|
|
12
|
+
"Tool",
|
|
13
|
+
"Tools",
|
|
14
|
+
]
|
|
@@ -0,0 +1,141 @@
|
|
|
1
|
+
from abc import ABC
|
|
2
|
+
import inspect
|
|
3
|
+
from typing import Any, Dict, Optional
|
|
4
|
+
from flowtic.session import SessionManager
|
|
5
|
+
from flowtic.agents.tools import Tool, Tools
|
|
6
|
+
from litellm import completion, acompletion
|
|
7
|
+
from flowtic.communication import Callback
|
|
8
|
+
|
|
9
|
+
class AgentInterface(ABC):
|
|
10
|
+
def __init__(
|
|
11
|
+
self,
|
|
12
|
+
agent_name: str,
|
|
13
|
+
model_name: str,
|
|
14
|
+
instructions: str | None = "You are a helpful assistant.",
|
|
15
|
+
tools: Tools | None = None,
|
|
16
|
+
tool_choice: str | None = None,
|
|
17
|
+
session: Optional[SessionManager] = None,
|
|
18
|
+
*,
|
|
19
|
+
allow_user_input: bool = True,
|
|
20
|
+
max_turns: int = -1,
|
|
21
|
+
callbacks: Callback | None = None,
|
|
22
|
+
temperature: float = 1,
|
|
23
|
+
reasoning_effort=None,
|
|
24
|
+
verbose: bool = False
|
|
25
|
+
):
|
|
26
|
+
self.agent_name = agent_name
|
|
27
|
+
self.model_name = model_name
|
|
28
|
+
self.instructions = instructions or "You are a helpful assistant."
|
|
29
|
+
self.tools = tools
|
|
30
|
+
self.tool_choice = tool_choice
|
|
31
|
+
self.session = session
|
|
32
|
+
self.allow_user_input = allow_user_input
|
|
33
|
+
self.max_turns = max_turns
|
|
34
|
+
self.callbacks = callbacks
|
|
35
|
+
self.temperature = temperature
|
|
36
|
+
self.reasoning_effort = reasoning_effort
|
|
37
|
+
self.verbose = verbose
|
|
38
|
+
|
|
39
|
+
if not self.session:
|
|
40
|
+
print("Session not provided, creating a new one...") if self.verbose else None
|
|
41
|
+
self.session = SessionManager()
|
|
42
|
+
|
|
43
|
+
if self.allow_user_input and self.callbacks is None:
|
|
44
|
+
raise ValueError("Callbacks should be provided if allow_user_input is True")
|
|
45
|
+
|
|
46
|
+
if not self.callbacks:
|
|
47
|
+
self.callbacks = Callback()
|
|
48
|
+
|
|
49
|
+
if not self.allow_user_input:
|
|
50
|
+
print(f"Restricting user input to Agent {self.name}") if self.verbose else None
|
|
51
|
+
self.instructions += '\nYou are STRICTLY NOT allowed to communicate to user directly, contact to any other agents if you are allowed to.'
|
|
52
|
+
|
|
53
|
+
if self.agent_name:
|
|
54
|
+
self.instructions = f'\nYou are {self.agent_name}. ' + self.instructions
|
|
55
|
+
|
|
56
|
+
self._register_session()
|
|
57
|
+
self.session.add_sys_ins(self.name, self.instructions)
|
|
58
|
+
|
|
59
|
+
@property
|
|
60
|
+
def name(self) -> str:
|
|
61
|
+
return self.agent_name
|
|
62
|
+
|
|
63
|
+
def completion(self, **kwargs) -> Any:
|
|
64
|
+
return completion(
|
|
65
|
+
model=self.model_name,
|
|
66
|
+
messages=self.session.get_buffer_memory(tag=self.name),
|
|
67
|
+
tools=self.tools.get_definitions() if self.tools else None,
|
|
68
|
+
tool_choice=self.tool_choice if self.tools else None,
|
|
69
|
+
temperature=self.temperature,
|
|
70
|
+
reasoning_effort=self.reasoning_effort,
|
|
71
|
+
**kwargs
|
|
72
|
+
)
|
|
73
|
+
|
|
74
|
+
def acompletion(self, **kwargs) -> Any:
|
|
75
|
+
return acompletion(
|
|
76
|
+
model=self.model_name,
|
|
77
|
+
messages=self.session.get_buffer_memory(tag=self.name),
|
|
78
|
+
tools=self.tools.get_definitions() if self.tools else None,
|
|
79
|
+
tool_choice=self.tool_choice if self.tools else None,
|
|
80
|
+
temperature=self.temperature,
|
|
81
|
+
reasoning_effort=self.reasoning_effort,
|
|
82
|
+
**kwargs
|
|
83
|
+
)
|
|
84
|
+
|
|
85
|
+
def _register_session(self) -> None:
|
|
86
|
+
self.session._register_buffer(self.name)
|
|
87
|
+
|
|
88
|
+
def add_context(
|
|
89
|
+
self,
|
|
90
|
+
input: Optional[Dict[str, Any]] = None,
|
|
91
|
+
assistant_output: Optional[Any] = None,
|
|
92
|
+
tool_output: Optional[Dict[str, Any]] = None,
|
|
93
|
+
) -> None:
|
|
94
|
+
if input:
|
|
95
|
+
self.session.add_user_context(self.name, **input)
|
|
96
|
+
elif assistant_output:
|
|
97
|
+
self.session.add_assistant_context(self.name, assistant_output)
|
|
98
|
+
elif tool_output:
|
|
99
|
+
self.session.add_tool_context(self.name, **tool_output)
|
|
100
|
+
else:
|
|
101
|
+
raise ValueError("No input provided")
|
|
102
|
+
|
|
103
|
+
def add_tool(self, tool: Tool) -> None:
|
|
104
|
+
if not self.tools:
|
|
105
|
+
self.tools = Tools([tool])
|
|
106
|
+
else:
|
|
107
|
+
self.tools.register_tool(tool)
|
|
108
|
+
|
|
109
|
+
def _call_user_loop(self, assistant_message: str):
|
|
110
|
+
method = self.callbacks.on_user_loop
|
|
111
|
+
signature = inspect.signature(method)
|
|
112
|
+
positional_params = [
|
|
113
|
+
parameter
|
|
114
|
+
for parameter in signature.parameters.values()
|
|
115
|
+
if parameter.kind in (inspect.Parameter.POSITIONAL_ONLY, inspect.Parameter.POSITIONAL_OR_KEYWORD)
|
|
116
|
+
]
|
|
117
|
+
|
|
118
|
+
if any(parameter.kind == inspect.Parameter.VAR_POSITIONAL for parameter in signature.parameters.values()):
|
|
119
|
+
return method(self.name, assistant_message)
|
|
120
|
+
|
|
121
|
+
if len(positional_params) <= 1:
|
|
122
|
+
return method(assistant_message)
|
|
123
|
+
|
|
124
|
+
return method(self.name, assistant_message)
|
|
125
|
+
|
|
126
|
+
def _call_tool_callback(self, function_name: str, arguments: Dict[str, Any]):
|
|
127
|
+
method = self.callbacks.on_tool_call
|
|
128
|
+
signature = inspect.signature(method)
|
|
129
|
+
positional_params = [
|
|
130
|
+
parameter
|
|
131
|
+
for parameter in signature.parameters.values()
|
|
132
|
+
if parameter.kind in (inspect.Parameter.POSITIONAL_ONLY, inspect.Parameter.POSITIONAL_OR_KEYWORD)
|
|
133
|
+
]
|
|
134
|
+
|
|
135
|
+
if any(parameter.kind == inspect.Parameter.VAR_POSITIONAL for parameter in signature.parameters.values()):
|
|
136
|
+
return method(self.name, function_name, arguments)
|
|
137
|
+
|
|
138
|
+
if len(positional_params) <= 2:
|
|
139
|
+
return method(function_name, arguments)
|
|
140
|
+
|
|
141
|
+
return method(self.name, function_name, arguments)
|
|
@@ -0,0 +1,226 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import json
|
|
3
|
+
from typing import Any, List, Optional
|
|
4
|
+
|
|
5
|
+
from flowtic.agents.base import AgentInterface
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def _message_content_to_text(content: Any) -> Optional[str]:
|
|
9
|
+
if content is None:
|
|
10
|
+
return None
|
|
11
|
+
|
|
12
|
+
if isinstance(content, str):
|
|
13
|
+
return content
|
|
14
|
+
|
|
15
|
+
if isinstance(content, list):
|
|
16
|
+
text_parts = []
|
|
17
|
+
for item in content:
|
|
18
|
+
if isinstance(item, dict) and item.get("type") == "text":
|
|
19
|
+
text_parts.append(item.get("text", ""))
|
|
20
|
+
text_output = "\n".join(part for part in text_parts if part)
|
|
21
|
+
return text_output or str(content)
|
|
22
|
+
|
|
23
|
+
return str(content)
|
|
24
|
+
|
|
25
|
+
class Agent(AgentInterface):
|
|
26
|
+
def __init__(self, **kwargs):
|
|
27
|
+
"""
|
|
28
|
+
Initialize the agent.
|
|
29
|
+
|
|
30
|
+
Args:
|
|
31
|
+
model_name (str): The litellm compatible model name.
|
|
32
|
+
instructions (str | None, optional): The instructions for the agent. Defaults to "You are a helpful assistant."
|
|
33
|
+
tools (Optional[List], optional): The tools to execute for the agent. Defaults to None.
|
|
34
|
+
tool_choice (str, optional): The tool choice for the agent. Defaults to "none".
|
|
35
|
+
session (Optional[SessionManager], optional): The session for the agent to keep context. Defaults to None.
|
|
36
|
+
allow_user_input (bool, optional): Whether to allow the model to take user input. Defaults to True.
|
|
37
|
+
max_turns (int, optional): The maximum number of turns. Defaults to -1 (unlimited).
|
|
38
|
+
"""
|
|
39
|
+
super().__init__(**kwargs)
|
|
40
|
+
|
|
41
|
+
def __call__(self, input: str, images: Optional[List] = None):
|
|
42
|
+
"""
|
|
43
|
+
call the agent
|
|
44
|
+
|
|
45
|
+
Args:
|
|
46
|
+
input (str): The input to the agent.
|
|
47
|
+
images (Optional[List], optional): List of images as a local file path or url or base64 encoded string. Defaults to None.
|
|
48
|
+
"""
|
|
49
|
+
|
|
50
|
+
if self.verbose:
|
|
51
|
+
print(f">> Staring {self.name} agent execution")
|
|
52
|
+
|
|
53
|
+
self.add_context(input={'text': input, 'images': images})
|
|
54
|
+
|
|
55
|
+
turn_count = 0
|
|
56
|
+
final_output = None
|
|
57
|
+
while True:
|
|
58
|
+
if self.max_turns > 0 and turn_count >= self.max_turns:
|
|
59
|
+
break
|
|
60
|
+
|
|
61
|
+
response = self.completion()
|
|
62
|
+
response_message = response.choices[0].message
|
|
63
|
+
self.add_context(assistant_output=response_message)
|
|
64
|
+
turn_count += 1
|
|
65
|
+
message_text = _message_content_to_text(response_message.content)
|
|
66
|
+
if message_text is not None:
|
|
67
|
+
final_output = message_text
|
|
68
|
+
|
|
69
|
+
tool_calls = response_message.tool_calls or []
|
|
70
|
+
|
|
71
|
+
if tool_calls:
|
|
72
|
+
communication_occurred = False
|
|
73
|
+
for tool_call in tool_calls:
|
|
74
|
+
function_name = tool_call.function.name
|
|
75
|
+
function_args = json.loads(tool_call.function.arguments)
|
|
76
|
+
self._call_tool_callback(function_name, function_args)
|
|
77
|
+
if function_name == '_spin_into':
|
|
78
|
+
tool_output = self.tools.get_callable(function_name)(self.name, **function_args)
|
|
79
|
+
communication_occurred = True
|
|
80
|
+
else:
|
|
81
|
+
tool_output = self.tools.get_callable(function_name)(**function_args)
|
|
82
|
+
|
|
83
|
+
assert isinstance(tool_output, tuple), "Tool output should return a tuple of (text, images (none if no images))"
|
|
84
|
+
if tool_output[0] is not None:
|
|
85
|
+
final_output = str(tool_output[0])
|
|
86
|
+
|
|
87
|
+
self.add_context(tool_output={'fn_name': function_name, 'tool_call_id': tool_call.id, 'output': str(tool_output[0])})
|
|
88
|
+
if tool_output[1]:
|
|
89
|
+
self.add_context(input={'text': 'Here are the tool output images:\n', 'images': tool_output[1] \
|
|
90
|
+
if isinstance(tool_output[1], list) else [tool_output[1]]})
|
|
91
|
+
|
|
92
|
+
# If agent communicated to another agent and doesn't allow user input, stop
|
|
93
|
+
if communication_occurred and not self.allow_user_input:
|
|
94
|
+
break
|
|
95
|
+
|
|
96
|
+
else:
|
|
97
|
+
if self.allow_user_input:
|
|
98
|
+
try:
|
|
99
|
+
user_input = self._call_user_loop(message_text or "")
|
|
100
|
+
self.add_context(input={'text': user_input})
|
|
101
|
+
except NotImplementedError:
|
|
102
|
+
break
|
|
103
|
+
else:
|
|
104
|
+
break
|
|
105
|
+
|
|
106
|
+
return final_output
|
|
107
|
+
|
|
108
|
+
|
|
109
|
+
class AsyncAgent(AgentInterface):
|
|
110
|
+
def __init__(self, **kwargs):
|
|
111
|
+
"""
|
|
112
|
+
Initialize the asyncronous agent.
|
|
113
|
+
|
|
114
|
+
Args:
|
|
115
|
+
model_name (str): The litellm compatible model name.
|
|
116
|
+
instructions (str | None, optional): The instructions for the agent. Defaults to "You are a helpful assistant."
|
|
117
|
+
tools (Optional[List], optional): The tools to execute for the agent. Defaults to None.
|
|
118
|
+
tool_choice (str, optional): The tool choice for the agent. Defaults to "none".
|
|
119
|
+
session (Optional[SessionManager], optional): The session for the agent to keep context. Defaults to None.
|
|
120
|
+
allow_user_input (bool, optional): Whether to allow the model to take user input. Defaults to True.
|
|
121
|
+
max_turns (int, optional): The maximum number of turns. Defaults to -1 (unlimited).
|
|
122
|
+
"""
|
|
123
|
+
super().__init__(**kwargs)
|
|
124
|
+
|
|
125
|
+
async def run_async_or_sync(self, func, *args, **kwargs):
|
|
126
|
+
result = func(*args, **kwargs)
|
|
127
|
+
if asyncio.iscoroutine(result):
|
|
128
|
+
return await result
|
|
129
|
+
return result
|
|
130
|
+
|
|
131
|
+
async def __call__(self, input: str, images: Optional[List] = None):
|
|
132
|
+
"""
|
|
133
|
+
call the agent
|
|
134
|
+
|
|
135
|
+
Args:
|
|
136
|
+
input (str): The input to the agent.
|
|
137
|
+
images (Optional[List], optional): List of images as a local file path or url or base64 encoded string. Defaults to None.
|
|
138
|
+
"""
|
|
139
|
+
|
|
140
|
+
if self.verbose:
|
|
141
|
+
print(f">> Staring {self.name} agent execution")
|
|
142
|
+
|
|
143
|
+
self.add_context(input={'text': input, 'images': images})
|
|
144
|
+
|
|
145
|
+
turn_count = 0
|
|
146
|
+
final_output = None
|
|
147
|
+
while True:
|
|
148
|
+
if self.max_turns > 0 and turn_count >= self.max_turns:
|
|
149
|
+
break
|
|
150
|
+
if self.verbose:
|
|
151
|
+
print("Session Buffer:")
|
|
152
|
+
print(self.session.get_buffer_memory(self.name))
|
|
153
|
+
|
|
154
|
+
response = await self.acompletion()
|
|
155
|
+
response_message = response.choices[0].message
|
|
156
|
+
|
|
157
|
+
if self.verbose:
|
|
158
|
+
print(response_message)
|
|
159
|
+
|
|
160
|
+
self.add_context(assistant_output=response_message)
|
|
161
|
+
turn_count += 1
|
|
162
|
+
message_text = _message_content_to_text(response_message.content)
|
|
163
|
+
if message_text is not None:
|
|
164
|
+
final_output = message_text
|
|
165
|
+
|
|
166
|
+
tool_calls = response_message.tool_calls or []
|
|
167
|
+
|
|
168
|
+
if tool_calls:
|
|
169
|
+
communication_occurred = False
|
|
170
|
+
|
|
171
|
+
tasks = []
|
|
172
|
+
tool_metadata = []
|
|
173
|
+
for tool_call in tool_calls:
|
|
174
|
+
callable_func = self.tools.get_callable(tool_call.function.name)
|
|
175
|
+
args = json.loads(tool_call.function.arguments)
|
|
176
|
+
self._call_tool_callback(tool_call.function.name, args)
|
|
177
|
+
|
|
178
|
+
if tool_call.function.name == '_async_spin_into':
|
|
179
|
+
communication_occurred = True
|
|
180
|
+
task = asyncio.create_task(self.run_async_or_sync(callable_func, self.name, **args))
|
|
181
|
+
else:
|
|
182
|
+
task = asyncio.create_task(self.run_async_or_sync(callable_func, **args))
|
|
183
|
+
|
|
184
|
+
tasks.append(task)
|
|
185
|
+
tool_metadata.append({
|
|
186
|
+
'function_name': tool_call.function.name,
|
|
187
|
+
'tool_call': tool_call
|
|
188
|
+
})
|
|
189
|
+
|
|
190
|
+
tool_outputs = await asyncio.gather(*tasks)
|
|
191
|
+
|
|
192
|
+
for tool_output, metadata in zip(tool_outputs, tool_metadata):
|
|
193
|
+
if self.verbose:
|
|
194
|
+
print("TOOL OUTPUT: ")
|
|
195
|
+
print(tool_output)
|
|
196
|
+
|
|
197
|
+
# if metadata['function_name'] != '_async_spin_into':
|
|
198
|
+
assert isinstance(tool_output, tuple), "Tool output should return a tuple of (text, images (none if no images))"
|
|
199
|
+
if tool_output[0] is not None:
|
|
200
|
+
final_output = str(tool_output[0])
|
|
201
|
+
self.add_context(tool_output={
|
|
202
|
+
'fn_name': metadata['function_name'],
|
|
203
|
+
'tool_call_id': metadata['tool_call'].id,
|
|
204
|
+
'output': str(tool_output[0])
|
|
205
|
+
})
|
|
206
|
+
|
|
207
|
+
if tool_output[1]:
|
|
208
|
+
self.add_context(input={'text': 'Here are the tool output images:\n', 'images': tool_output[1] \
|
|
209
|
+
if isinstance(tool_output[1], list) else [tool_output[1]]})
|
|
210
|
+
|
|
211
|
+
# If agent communicated to another agent and doesn't allow user input, stop
|
|
212
|
+
if communication_occurred and not self.allow_user_input:
|
|
213
|
+
break
|
|
214
|
+
|
|
215
|
+
else:
|
|
216
|
+
if self.allow_user_input:
|
|
217
|
+
try:
|
|
218
|
+
user_input = self._call_user_loop(message_text or "")
|
|
219
|
+
self.add_context(input={'text': user_input})
|
|
220
|
+
except NotImplementedError:
|
|
221
|
+
break
|
|
222
|
+
else:
|
|
223
|
+
break
|
|
224
|
+
|
|
225
|
+
return final_output
|
|
226
|
+
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
from typing import Dict, Callable, List
|
|
2
|
+
|
|
3
|
+
class Tool():
|
|
4
|
+
def __init__(
|
|
5
|
+
self,
|
|
6
|
+
tool_definition: Dict,
|
|
7
|
+
tool_execution: Callable,
|
|
8
|
+
) -> None:
|
|
9
|
+
self.tool_definition = tool_definition
|
|
10
|
+
self.tool_execution = tool_execution
|
|
11
|
+
|
|
12
|
+
assert tool_definition['function']['name'] == tool_execution.__name__, "Tool name mismatch"
|
|
13
|
+
|
|
14
|
+
def get_name(self) -> str:
|
|
15
|
+
return self.tool_definition['function']['name']
|
|
16
|
+
|
|
17
|
+
class Tools():
|
|
18
|
+
def __init__(
|
|
19
|
+
self,
|
|
20
|
+
tools: List[Tool]
|
|
21
|
+
):
|
|
22
|
+
self.tools = tools
|
|
23
|
+
self._map = None
|
|
24
|
+
self._create_map()
|
|
25
|
+
|
|
26
|
+
def _create_map(self):
|
|
27
|
+
self._map = {tool.get_name(): tool.tool_execution for tool in self.tools}
|
|
28
|
+
|
|
29
|
+
def get_callable(self, tool_name: str) -> Callable:
|
|
30
|
+
if tool_name not in self._map:
|
|
31
|
+
raise ValueError(f"Tool {tool_name} not found")
|
|
32
|
+
return self._map[tool_name]
|
|
33
|
+
|
|
34
|
+
def get_definitions(self) -> List[Dict]:
|
|
35
|
+
return [tool.tool_definition for tool in self.tools]
|
|
36
|
+
|
|
37
|
+
def register_tool(self, tool: Tool) -> None:
|
|
38
|
+
self.tools = [existing_tool for existing_tool in self.tools if existing_tool.get_name() != tool.get_name()]
|
|
39
|
+
self.tools.append(tool)
|
|
40
|
+
self._create_map()
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
from typing import Dict, Any
|
|
2
|
+
|
|
3
|
+
class Callback:
|
|
4
|
+
def __init__(self) -> None:
|
|
5
|
+
pass
|
|
6
|
+
|
|
7
|
+
def on_user_loop(self, agent_name: str, assistant_message: str):
|
|
8
|
+
raise NotImplementedError
|
|
9
|
+
|
|
10
|
+
def on_tool_call(self, agent_name: str, fn_name: str, arguments: Dict[str, Any]):
|
|
11
|
+
return None
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
from .core import CommunicationProtocol as CommunicationProtocol
|
|
@@ -0,0 +1,222 @@
|
|
|
1
|
+
from typing import List, Optional, TYPE_CHECKING
|
|
2
|
+
|
|
3
|
+
if TYPE_CHECKING:
|
|
4
|
+
from flowtic.agents import Agent
|
|
5
|
+
import re
|
|
6
|
+
from collections import defaultdict
|
|
7
|
+
from flowtic.agents.tools import Tool
|
|
8
|
+
|
|
9
|
+
class CommunicationProtocol:
|
|
10
|
+
def __init__(
|
|
11
|
+
self,
|
|
12
|
+
logic_str: str,
|
|
13
|
+
agents: List['Agent'],
|
|
14
|
+
async_run_type = False,
|
|
15
|
+
*,
|
|
16
|
+
verbose: bool = False,
|
|
17
|
+
) -> None:
|
|
18
|
+
self.logic_str = logic_str
|
|
19
|
+
self.agents = agents
|
|
20
|
+
self.verbose = verbose
|
|
21
|
+
|
|
22
|
+
self.mapping = self._parse_communication(logic_str)
|
|
23
|
+
if not self.mapping:
|
|
24
|
+
raise ValueError("Communication protocol cannot be empty")
|
|
25
|
+
|
|
26
|
+
self.async_run_type = async_run_type
|
|
27
|
+
self.agent_map = {agents[i].name: agents[i] for i in range(len(agents))}
|
|
28
|
+
self._communication_validation()
|
|
29
|
+
if self.verbose:
|
|
30
|
+
self.print_graph_as_tree()
|
|
31
|
+
for item in self.mapping.items():
|
|
32
|
+
self._inject_handsoff(item[0], item[1])
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def _parse_communication(self, sequence: str):
|
|
36
|
+
pattern = re.compile(r'\s*(\w+)\s*(<->|->)\s*(\w+)\s*')
|
|
37
|
+
graph = defaultdict(list)
|
|
38
|
+
|
|
39
|
+
for fragment in sequence.split(','):
|
|
40
|
+
m = pattern.fullmatch(fragment.strip())
|
|
41
|
+
if not m:
|
|
42
|
+
raise ValueError(f"Un-parsable fragment: {fragment!r}")
|
|
43
|
+
src, arrow, dst = m.groups()
|
|
44
|
+
|
|
45
|
+
graph[src].append(dst)
|
|
46
|
+
if arrow == '<->':
|
|
47
|
+
graph[dst].append(src)
|
|
48
|
+
|
|
49
|
+
return {n: list(dict.fromkeys(neigh)) for n, neigh in graph.items()}
|
|
50
|
+
|
|
51
|
+
def parse_agents(self):
|
|
52
|
+
agents = []
|
|
53
|
+
agents.extend(list(self.mapping.keys()))
|
|
54
|
+
_vals = [j for i in list(self.mapping.values()) for j in i]
|
|
55
|
+
agents.extend(_vals)
|
|
56
|
+
return list(dict.fromkeys(agents))
|
|
57
|
+
|
|
58
|
+
def _communication_validation(self):
|
|
59
|
+
protocol_agents = set(self.parse_agents())
|
|
60
|
+
provided_agents = set(self.agent_map)
|
|
61
|
+
|
|
62
|
+
if protocol_agents != provided_agents:
|
|
63
|
+
missing_agents = sorted(protocol_agents - provided_agents)
|
|
64
|
+
extra_agents = sorted(provided_agents - protocol_agents)
|
|
65
|
+
errors = []
|
|
66
|
+
if missing_agents:
|
|
67
|
+
errors.append(f"Missing agents: {missing_agents}")
|
|
68
|
+
if extra_agents:
|
|
69
|
+
errors.append(f"Unexpected agents: {extra_agents}")
|
|
70
|
+
raise ValueError(". ".join(errors))
|
|
71
|
+
|
|
72
|
+
def print_graph_as_tree(self):
|
|
73
|
+
targets = {t for vals in self.mapping.values() for t in vals}
|
|
74
|
+
roots = [n for n in self.mapping if n not in targets] or list(self.mapping.keys())
|
|
75
|
+
|
|
76
|
+
def dfs(node, prefix="", visited=None, is_last=True):
|
|
77
|
+
if visited is None:
|
|
78
|
+
visited = set()
|
|
79
|
+
looped = node in visited
|
|
80
|
+
branch = "└── " if is_last else "├── "
|
|
81
|
+
print(prefix + branch + node + (" (↺)" if looped else ""))
|
|
82
|
+
if looped:
|
|
83
|
+
return
|
|
84
|
+
visited.add(node)
|
|
85
|
+
|
|
86
|
+
children = self.mapping.get(node, [])
|
|
87
|
+
for i, child in enumerate(children):
|
|
88
|
+
dfs(child,
|
|
89
|
+
prefix + (" " if is_last else "│ "),
|
|
90
|
+
visited,
|
|
91
|
+
i == len(children) - 1)
|
|
92
|
+
|
|
93
|
+
visited = set()
|
|
94
|
+
for i, root in enumerate(roots):
|
|
95
|
+
dfs(root, "", visited, i == len(roots) - 1)
|
|
96
|
+
if i != len(roots) - 1:
|
|
97
|
+
print()
|
|
98
|
+
|
|
99
|
+
def get_connected_agents(self, agent_name: str):
|
|
100
|
+
return self.mapping.get(agent_name, [])
|
|
101
|
+
|
|
102
|
+
def _format_handoff_message(self, sender: str, receiver: str, message: str, context: str) -> str:
|
|
103
|
+
prefix = f"Hey {receiver}, It's {sender} here (not the user, don't get confused)."
|
|
104
|
+
cleaned_context = context.strip()
|
|
105
|
+
cleaned_message = message.strip()
|
|
106
|
+
|
|
107
|
+
if cleaned_context:
|
|
108
|
+
return f"{prefix} {cleaned_context}\n\n{cleaned_message}"
|
|
109
|
+
|
|
110
|
+
return f"{prefix}\n\n{cleaned_message}"
|
|
111
|
+
|
|
112
|
+
def _validate_receiver(self, sender: str, receiver: str) -> None:
|
|
113
|
+
allowed_receivers = self.get_connected_agents(sender)
|
|
114
|
+
if receiver not in allowed_receivers:
|
|
115
|
+
raise ValueError(f"Agent {sender} cannot communicate with {receiver}. Allowed receivers: {allowed_receivers}")
|
|
116
|
+
|
|
117
|
+
def _inject_handsoff(self, agent: str, recievers: List[str]):
|
|
118
|
+
self.agent_map.get(agent).add_tool(
|
|
119
|
+
Tool(
|
|
120
|
+
tool_definition={
|
|
121
|
+
"type": "function",
|
|
122
|
+
"function": {
|
|
123
|
+
"name": f"{self._async_spin_into.__name__ if self.async_run_type else self._spin_into.__name__}",
|
|
124
|
+
"description": "Use this tool to communicate with other agents",
|
|
125
|
+
"parameters": {
|
|
126
|
+
"type": "object",
|
|
127
|
+
"properties": {
|
|
128
|
+
"receiver": {
|
|
129
|
+
"type": "string",
|
|
130
|
+
"description": f"you must have to mention receiver name clearly as it is. Only allowed to {','.join([i for i in recievers])}",
|
|
131
|
+
},
|
|
132
|
+
"message": {
|
|
133
|
+
"type": "string",
|
|
134
|
+
"description": "This includes the main task, goal or whatever the important information you want to pass to the receiver",
|
|
135
|
+
},
|
|
136
|
+
"context": {
|
|
137
|
+
"type": "string",
|
|
138
|
+
"description": "This includes the additional context, your reasoning, etc. of the conversation so far",
|
|
139
|
+
},
|
|
140
|
+
},
|
|
141
|
+
"required": ["receiver", "message", "context"],
|
|
142
|
+
},
|
|
143
|
+
},
|
|
144
|
+
},
|
|
145
|
+
tool_execution=self._async_spin_into if self.async_run_type else self._spin_into,
|
|
146
|
+
)
|
|
147
|
+
)
|
|
148
|
+
|
|
149
|
+
def _collect_output(self, messages):
|
|
150
|
+
output_parts = []
|
|
151
|
+
|
|
152
|
+
for msg in messages:
|
|
153
|
+
if isinstance(msg, dict) and msg.get('role') == 'tool':
|
|
154
|
+
content = msg.get('content', '')
|
|
155
|
+
if content and content != 'None':
|
|
156
|
+
output_parts.append(content)
|
|
157
|
+
elif hasattr(msg, 'role') and msg.role == 'assistant' and msg.content:
|
|
158
|
+
output_parts.append(msg.content)
|
|
159
|
+
|
|
160
|
+
if output_parts:
|
|
161
|
+
return output_parts[-1]
|
|
162
|
+
|
|
163
|
+
return None
|
|
164
|
+
|
|
165
|
+
def _spin_up(self, agent_name: str, input: str, images: Optional[List] = None):
|
|
166
|
+
agent = self.agent_map.get(agent_name)
|
|
167
|
+
if agent is None:
|
|
168
|
+
raise ValueError(f"No agent found called {agent_name}")
|
|
169
|
+
|
|
170
|
+
original_buffer = agent.session.get_buffer_memory(tag=agent.name)
|
|
171
|
+
original_length = len(original_buffer)
|
|
172
|
+
|
|
173
|
+
output = agent(input, images=images)
|
|
174
|
+
if output is not None:
|
|
175
|
+
return output
|
|
176
|
+
|
|
177
|
+
updated_buffer = agent.session.get_buffer_memory(tag=agent.name)
|
|
178
|
+
new_messages = updated_buffer[original_length:]
|
|
179
|
+
|
|
180
|
+
return self._collect_output(new_messages) or f"{agent_name} completed the request"
|
|
181
|
+
|
|
182
|
+
async def _async_spin_up(self, agent_name: str, input: str, images: Optional[List] = None):
|
|
183
|
+
agent = self.agent_map.get(agent_name)
|
|
184
|
+
|
|
185
|
+
if agent is None:
|
|
186
|
+
raise ValueError(f"No agent found called {agent_name}")
|
|
187
|
+
|
|
188
|
+
original_buffer = agent.session.get_buffer_memory(tag=agent.name)
|
|
189
|
+
original_length = len(original_buffer)
|
|
190
|
+
|
|
191
|
+
output = await agent(input, images=images)
|
|
192
|
+
if output is not None:
|
|
193
|
+
return output
|
|
194
|
+
|
|
195
|
+
updated_buffer = agent.session.get_buffer_memory(tag=agent.name)
|
|
196
|
+
new_messages = updated_buffer[original_length:]
|
|
197
|
+
|
|
198
|
+
return self._collect_output(new_messages) or f"{agent_name} completed the request"
|
|
199
|
+
|
|
200
|
+
def _spin_into(self, sender: str, receiver: str, message: str, context: str):
|
|
201
|
+
self._validate_receiver(sender, receiver)
|
|
202
|
+
return self._spin_up(receiver, self._format_handoff_message(sender, receiver, message, context)), None
|
|
203
|
+
|
|
204
|
+
async def _async_spin_into(self, sender: str, receiver: str, message: str, context: str):
|
|
205
|
+
self._validate_receiver(sender, receiver)
|
|
206
|
+
return await self._async_spin_up(
|
|
207
|
+
receiver,
|
|
208
|
+
self._format_handoff_message(sender, receiver, message, context),
|
|
209
|
+
), None
|
|
210
|
+
|
|
211
|
+
def execute(self, input: str, images: Optional[List] = None, start_agent: Optional[str] = None):
|
|
212
|
+
prior_agent_name = start_agent or list(self.mapping.keys())[0]
|
|
213
|
+
|
|
214
|
+
return self._spin_up(prior_agent_name, input, images=images)
|
|
215
|
+
|
|
216
|
+
async def async_execute(self, input: str, images: Optional[List] = None, start_agent: Optional[str] = None):
|
|
217
|
+
prior_agent_name = start_agent or list(self.mapping.keys())[0]
|
|
218
|
+
|
|
219
|
+
return await self._async_spin_up(prior_agent_name, input, images=images)
|
|
220
|
+
|
|
221
|
+
async def asyn_execute(self, input: str, images: Optional[List] = None, start_agent: Optional[str] = None):
|
|
222
|
+
return await self.async_execute(input, images=images, start_agent=start_agent)
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
from .core import SessionManager as SessionManager
|
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from abc import ABC, abstractmethod
|
|
4
|
+
from typing import Any, List, Optional
|
|
5
|
+
|
|
6
|
+
class SessionInterface(ABC):
|
|
7
|
+
def __init__(self, ctx_size: int = 4):
|
|
8
|
+
self._buffer_memory = dict()
|
|
9
|
+
self._ctx_size = ctx_size
|
|
10
|
+
|
|
11
|
+
@property
|
|
12
|
+
def ctx_size(self) -> int:
|
|
13
|
+
return self._ctx_size
|
|
14
|
+
|
|
15
|
+
@ctx_size.setter
|
|
16
|
+
def ctx_size(self, value: int):
|
|
17
|
+
self._ctx_size = value
|
|
18
|
+
|
|
19
|
+
def get_buffer_memory(self, tag: str) -> List:
|
|
20
|
+
return self._buffer_memory[tag]
|
|
21
|
+
|
|
22
|
+
def add_sys_ins(self, tag: str, instruction: str):
|
|
23
|
+
self._buffer_memory[tag].append(
|
|
24
|
+
{
|
|
25
|
+
'role': 'system',
|
|
26
|
+
'content': instruction
|
|
27
|
+
}
|
|
28
|
+
)
|
|
29
|
+
|
|
30
|
+
@abstractmethod
|
|
31
|
+
def add_user_context(self, tag: str, text: Optional[str] = None, images: Optional[List] = None): ...
|
|
32
|
+
|
|
33
|
+
@abstractmethod
|
|
34
|
+
def add_assistant_context(self, tag: str, ass_out: Any): ...
|
|
35
|
+
|
|
36
|
+
@abstractmethod
|
|
37
|
+
def add_tool_context(self, tag: str, fn_name: str, tool_call_id: str, output: Any): ...
|
|
38
|
+
|
|
39
|
+
def _register_buffer(self, tag: str):
|
|
40
|
+
if tag not in self._buffer_memory:
|
|
41
|
+
self._buffer_memory[tag] = []
|
|
42
|
+
else:
|
|
43
|
+
raise ValueError(f"Tag {tag} already exists")
|
|
@@ -0,0 +1,101 @@
|
|
|
1
|
+
import base64
|
|
2
|
+
import binascii
|
|
3
|
+
import io
|
|
4
|
+
import mimetypes
|
|
5
|
+
import os
|
|
6
|
+
from typing import Any, List, Optional
|
|
7
|
+
|
|
8
|
+
from PIL import Image
|
|
9
|
+
|
|
10
|
+
from flowtic.session.base import SessionInterface
|
|
11
|
+
|
|
12
|
+
class SessionManager(SessionInterface):
|
|
13
|
+
def __init__(self, *args, **kwargs):
|
|
14
|
+
super().__init__(*args, **kwargs)
|
|
15
|
+
|
|
16
|
+
def _encode_image_bytes(self, image_bytes: bytes, mime_type: str) -> str:
|
|
17
|
+
encoded = base64.b64encode(image_bytes).decode("utf-8")
|
|
18
|
+
return f"data:{mime_type};base64,{encoded}"
|
|
19
|
+
|
|
20
|
+
def _handle_image(self, image: Any):
|
|
21
|
+
if isinstance(image, Image.Image):
|
|
22
|
+
buffered = io.BytesIO()
|
|
23
|
+
image_format = (image.format or "PNG").upper()
|
|
24
|
+
image.save(buffered, format=image_format)
|
|
25
|
+
return self._encode_image_bytes(buffered.getvalue(), f"image/{image_format.lower()}")
|
|
26
|
+
|
|
27
|
+
if isinstance(image, bytes):
|
|
28
|
+
return self._encode_image_bytes(image, "image/jpeg")
|
|
29
|
+
|
|
30
|
+
if not isinstance(image, str):
|
|
31
|
+
raise TypeError("Images must be file paths, URLs, base64 strings, bytes, or PIL images")
|
|
32
|
+
|
|
33
|
+
normalized_image = image.strip()
|
|
34
|
+
|
|
35
|
+
if normalized_image.startswith(("http://", "https://", "data:image/")):
|
|
36
|
+
return normalized_image
|
|
37
|
+
|
|
38
|
+
if os.path.exists(normalized_image):
|
|
39
|
+
mime_type = mimetypes.guess_type(normalized_image)[0] or "image/jpeg"
|
|
40
|
+
with open(normalized_image, "rb") as file_handle:
|
|
41
|
+
return self._encode_image_bytes(file_handle.read(), mime_type)
|
|
42
|
+
|
|
43
|
+
try:
|
|
44
|
+
base64.b64decode(normalized_image, validate=True)
|
|
45
|
+
except (binascii.Error, ValueError) as exc:
|
|
46
|
+
raise ValueError(
|
|
47
|
+
"Images must be valid local paths, URLs, data URLs, or raw base64 strings"
|
|
48
|
+
) from exc
|
|
49
|
+
|
|
50
|
+
return f"data:image/jpeg;base64,{normalized_image}"
|
|
51
|
+
|
|
52
|
+
def add_user_context(self, tag: str, text: Optional[str] = None, images: Optional[List] = None):
|
|
53
|
+
if text and images:
|
|
54
|
+
self._buffer_memory[tag].append(
|
|
55
|
+
{
|
|
56
|
+
'role': 'user',
|
|
57
|
+
'content':[
|
|
58
|
+
{'type': 'text', 'text': text},
|
|
59
|
+
]
|
|
60
|
+
}
|
|
61
|
+
)
|
|
62
|
+
for img in images:
|
|
63
|
+
self._buffer_memory[tag][-1]['content'].append(
|
|
64
|
+
{'type': 'image_url', 'image_url': {'url': self._handle_image(img)}}
|
|
65
|
+
)
|
|
66
|
+
|
|
67
|
+
elif text:
|
|
68
|
+
self._buffer_memory[tag].append(
|
|
69
|
+
{
|
|
70
|
+
'role': 'user',
|
|
71
|
+
'content':[
|
|
72
|
+
{'type': 'text', 'text': text},
|
|
73
|
+
]
|
|
74
|
+
}
|
|
75
|
+
)
|
|
76
|
+
elif images:
|
|
77
|
+
self._buffer_memory[tag].append(
|
|
78
|
+
{
|
|
79
|
+
'role': 'user',
|
|
80
|
+
'content':[
|
|
81
|
+
{'type': 'image_url', 'image_url': {'url': self._handle_image(img)}} for img in images
|
|
82
|
+
]
|
|
83
|
+
}
|
|
84
|
+
)
|
|
85
|
+
else:
|
|
86
|
+
raise ValueError("No input provided")
|
|
87
|
+
|
|
88
|
+
def add_assistant_context(self, tag: str, ass_out: Any):
|
|
89
|
+
self._buffer_memory[tag].append(
|
|
90
|
+
ass_out
|
|
91
|
+
)
|
|
92
|
+
|
|
93
|
+
def add_tool_context(self, tag: str, fn_name, tool_call_id, output):
|
|
94
|
+
self._buffer_memory[tag].append(
|
|
95
|
+
{
|
|
96
|
+
'tool_call_id': tool_call_id,
|
|
97
|
+
'role': 'tool',
|
|
98
|
+
'name': fn_name,
|
|
99
|
+
'content': output
|
|
100
|
+
}
|
|
101
|
+
)
|