agentflowkit 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agentflow/__init__.py +25 -0
- agentflow/agent.py +97 -0
- agentflow/events.py +32 -0
- agentflow/exceptions.py +21 -0
- agentflow/llm.py +96 -0
- agentflow/pipeline.py +170 -0
- agentflow/types.py +40 -0
- agentflowkit-0.1.0.dist-info/METADATA +177 -0
- agentflowkit-0.1.0.dist-info/RECORD +11 -0
- agentflowkit-0.1.0.dist-info/WHEEL +4 -0
- agentflowkit-0.1.0.dist-info/licenses/LICENSE +21 -0
agentflow/__init__.py
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
"""agentflow - Lightweight multi-agent AI pipeline framework."""
|
|
2
|
+
|
|
3
|
+
__version__ = "0.1.0"
|
|
4
|
+
|
|
5
|
+
from .agent import Agent, BaseAgent
|
|
6
|
+
from .llm import LLM
|
|
7
|
+
from .pipeline import Pipeline
|
|
8
|
+
from .types import AgentResult, PipelineResult, Event
|
|
9
|
+
from .events import EventEmitter
|
|
10
|
+
from .exceptions import AgentFlowError, AgentError, PipelineError, LLMError
|
|
11
|
+
|
|
12
|
+
__all__ = [
|
|
13
|
+
"Agent",
|
|
14
|
+
"BaseAgent",
|
|
15
|
+
"LLM",
|
|
16
|
+
"Pipeline",
|
|
17
|
+
"AgentResult",
|
|
18
|
+
"PipelineResult",
|
|
19
|
+
"Event",
|
|
20
|
+
"EventEmitter",
|
|
21
|
+
"AgentFlowError",
|
|
22
|
+
"AgentError",
|
|
23
|
+
"PipelineError",
|
|
24
|
+
"LLMError",
|
|
25
|
+
]
|
agentflow/agent.py
ADDED
|
@@ -0,0 +1,97 @@
|
|
|
1
|
+
"""Agent definition via decorators and base class."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import time
|
|
6
|
+
from abc import ABC, abstractmethod
|
|
7
|
+
from typing import Any, Callable, Awaitable
|
|
8
|
+
|
|
9
|
+
from .llm import LLM
|
|
10
|
+
from .types import AgentResult
|
|
11
|
+
from .exceptions import AgentError
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class BaseAgent(ABC):
|
|
15
|
+
"""Base class for agents that need full control.
|
|
16
|
+
|
|
17
|
+
Subclass this for complex agents with custom logic.
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
name: str
|
|
21
|
+
role: str
|
|
22
|
+
|
|
23
|
+
def __init__(self, name: str, role: str):
|
|
24
|
+
self.name = name
|
|
25
|
+
self.role = role
|
|
26
|
+
|
|
27
|
+
@abstractmethod
|
|
28
|
+
async def execute(self, task: str, context: dict[str, str], llm: LLM) -> AgentResult:
|
|
29
|
+
"""Execute the agent's task.
|
|
30
|
+
|
|
31
|
+
Args:
|
|
32
|
+
task: The task/topic string.
|
|
33
|
+
context: Dict mapping agent_name -> output from previous agents.
|
|
34
|
+
llm: The LLM provider to use.
|
|
35
|
+
|
|
36
|
+
Returns:
|
|
37
|
+
AgentResult with the agent's output.
|
|
38
|
+
"""
|
|
39
|
+
...
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
class _DecoratorAgent:
|
|
43
|
+
"""Agent created via the @Agent decorator."""
|
|
44
|
+
|
|
45
|
+
def __init__(self, name: str, role: str, prompt_fn: Callable[..., Awaitable[str]]):
|
|
46
|
+
self.name = name
|
|
47
|
+
self.role = role
|
|
48
|
+
self._prompt_fn = prompt_fn
|
|
49
|
+
|
|
50
|
+
async def execute(self, task: str, context: dict[str, str], llm: LLM) -> AgentResult:
|
|
51
|
+
start = time.perf_counter()
|
|
52
|
+
try:
|
|
53
|
+
user_message = await self._prompt_fn(task, context)
|
|
54
|
+
except Exception as e:
|
|
55
|
+
raise AgentError(self.name, f"Prompt function failed: {e}") from e
|
|
56
|
+
|
|
57
|
+
system_prompt = f"You are a {self.role}. Provide clear, thorough, well-structured responses."
|
|
58
|
+
|
|
59
|
+
try:
|
|
60
|
+
response = await llm.generate([
|
|
61
|
+
{"role": "system", "content": system_prompt},
|
|
62
|
+
{"role": "user", "content": user_message},
|
|
63
|
+
])
|
|
64
|
+
except Exception as e:
|
|
65
|
+
raise AgentError(self.name, str(e)) from e
|
|
66
|
+
|
|
67
|
+
duration = time.perf_counter() - start
|
|
68
|
+
return AgentResult(
|
|
69
|
+
agent=self.name,
|
|
70
|
+
output=response["content"],
|
|
71
|
+
tokens_used=response["tokens"],
|
|
72
|
+
duration=round(duration, 3),
|
|
73
|
+
metadata={"model": response["model"]},
|
|
74
|
+
)
|
|
75
|
+
|
|
76
|
+
def __repr__(self) -> str:
|
|
77
|
+
return f"Agent(name={self.name!r}, role={self.role!r})"
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
class Agent:
|
|
81
|
+
"""Decorator to define an agent from an async function.
|
|
82
|
+
|
|
83
|
+
The decorated function receives (task, context) and returns
|
|
84
|
+
the user message to send to the LLM.
|
|
85
|
+
|
|
86
|
+
Usage:
|
|
87
|
+
@Agent(name="researcher", role="Research Analyst")
|
|
88
|
+
async def researcher(task: str, context: dict) -> str:
|
|
89
|
+
return f"Research this topic: {task}"
|
|
90
|
+
"""
|
|
91
|
+
|
|
92
|
+
def __init__(self, name: str, role: str):
|
|
93
|
+
self.name = name
|
|
94
|
+
self.role = role
|
|
95
|
+
|
|
96
|
+
def __call__(self, fn: Callable[..., Awaitable[str]]) -> _DecoratorAgent:
|
|
97
|
+
return _DecoratorAgent(self.name, self.role, fn)
|
agentflow/events.py
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
"""Event system for pipeline streaming."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import asyncio
|
|
6
|
+
from typing import Any, AsyncGenerator
|
|
7
|
+
|
|
8
|
+
from .types import Event
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class EventEmitter:
|
|
12
|
+
"""Async event emitter for pipeline progress."""
|
|
13
|
+
|
|
14
|
+
def __init__(self):
|
|
15
|
+
self._queue: asyncio.Queue[Event | None] = asyncio.Queue()
|
|
16
|
+
|
|
17
|
+
def emit(self, event_type: str, agent: str = "", **data: Any) -> None:
|
|
18
|
+
"""Emit an event (non-blocking)."""
|
|
19
|
+
event = Event(type=event_type, agent=agent, data=data)
|
|
20
|
+
self._queue.put_nowait(event)
|
|
21
|
+
|
|
22
|
+
def done(self) -> None:
|
|
23
|
+
"""Signal that no more events will be emitted."""
|
|
24
|
+
self._queue.put_nowait(None)
|
|
25
|
+
|
|
26
|
+
async def stream(self) -> AsyncGenerator[Event, None]:
|
|
27
|
+
"""Async generator that yields events until done."""
|
|
28
|
+
while True:
|
|
29
|
+
event = await self._queue.get()
|
|
30
|
+
if event is None:
|
|
31
|
+
break
|
|
32
|
+
yield event
|
agentflow/exceptions.py
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
"""agentflow - Lightweight multi-agent AI pipeline framework."""
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class AgentFlowError(Exception):
|
|
5
|
+
"""Base exception for agentflow."""
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class LLMError(AgentFlowError):
|
|
9
|
+
"""Raised when an LLM call fails."""
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class AgentError(AgentFlowError):
|
|
13
|
+
"""Raised when an agent execution fails."""
|
|
14
|
+
|
|
15
|
+
def __init__(self, agent_name: str, message: str):
|
|
16
|
+
self.agent_name = agent_name
|
|
17
|
+
super().__init__(f"Agent '{agent_name}' failed: {message}")
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class PipelineError(AgentFlowError):
|
|
21
|
+
"""Raised when pipeline orchestration fails."""
|
agentflow/llm.py
ADDED
|
@@ -0,0 +1,96 @@
|
|
|
1
|
+
"""LLM provider abstraction for agentflow."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import asyncio
|
|
6
|
+
import time
|
|
7
|
+
from typing import Any
|
|
8
|
+
|
|
9
|
+
from openai import AsyncOpenAI, APIError, RateLimitError
|
|
10
|
+
|
|
11
|
+
from .exceptions import LLMError
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class LLM:
|
|
15
|
+
"""OpenAI-compatible LLM provider.
|
|
16
|
+
|
|
17
|
+
Works with any provider that exposes an OpenAI-compatible API:
|
|
18
|
+
OpenAI, Groq, Together, Ollama, vLLM, etc.
|
|
19
|
+
|
|
20
|
+
Args:
|
|
21
|
+
model: Model name (e.g. "gpt-4o", "llama-3.3-70b-versatile").
|
|
22
|
+
api_key: API key for the provider.
|
|
23
|
+
base_url: Base URL for the API (default: OpenAI).
|
|
24
|
+
temperature: Sampling temperature (0.0-2.0).
|
|
25
|
+
max_tokens: Maximum tokens in response.
|
|
26
|
+
max_retries: Number of retries on transient failures.
|
|
27
|
+
"""
|
|
28
|
+
|
|
29
|
+
def __init__(
|
|
30
|
+
self,
|
|
31
|
+
model: str = "gpt-4o-mini",
|
|
32
|
+
api_key: str | None = None,
|
|
33
|
+
base_url: str | None = None,
|
|
34
|
+
temperature: float = 0.7,
|
|
35
|
+
max_tokens: int = 4096,
|
|
36
|
+
max_retries: int = 2,
|
|
37
|
+
):
|
|
38
|
+
self.model = model
|
|
39
|
+
self.temperature = temperature
|
|
40
|
+
self.max_tokens = max_tokens
|
|
41
|
+
self.max_retries = max_retries
|
|
42
|
+
|
|
43
|
+
kwargs: dict[str, Any] = {}
|
|
44
|
+
if api_key:
|
|
45
|
+
kwargs["api_key"] = api_key
|
|
46
|
+
if base_url:
|
|
47
|
+
kwargs["base_url"] = base_url
|
|
48
|
+
|
|
49
|
+
self._client = AsyncOpenAI(**kwargs)
|
|
50
|
+
|
|
51
|
+
async def generate(
|
|
52
|
+
self,
|
|
53
|
+
messages: list[dict[str, str]],
|
|
54
|
+
model: str | None = None,
|
|
55
|
+
temperature: float | None = None,
|
|
56
|
+
max_tokens: int | None = None,
|
|
57
|
+
) -> dict[str, Any]:
|
|
58
|
+
"""Generate a completion from the LLM.
|
|
59
|
+
|
|
60
|
+
Returns:
|
|
61
|
+
Dict with keys: content, tokens, duration, model.
|
|
62
|
+
"""
|
|
63
|
+
start = time.perf_counter()
|
|
64
|
+
last_error = None
|
|
65
|
+
|
|
66
|
+
for attempt in range(self.max_retries + 1):
|
|
67
|
+
try:
|
|
68
|
+
response = await self._client.chat.completions.create(
|
|
69
|
+
model=model or self.model,
|
|
70
|
+
messages=messages,
|
|
71
|
+
temperature=temperature if temperature is not None else self.temperature,
|
|
72
|
+
max_tokens=max_tokens or self.max_tokens,
|
|
73
|
+
)
|
|
74
|
+
duration = time.perf_counter() - start
|
|
75
|
+
choice = response.choices[0]
|
|
76
|
+
usage = response.usage
|
|
77
|
+
|
|
78
|
+
return {
|
|
79
|
+
"content": choice.message.content or "",
|
|
80
|
+
"tokens": usage.total_tokens if usage else 0,
|
|
81
|
+
"duration": round(duration, 3),
|
|
82
|
+
"model": response.model,
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
except RateLimitError as e:
|
|
86
|
+
last_error = e
|
|
87
|
+
if attempt < self.max_retries:
|
|
88
|
+
await asyncio.sleep(2 ** attempt)
|
|
89
|
+
continue
|
|
90
|
+
except APIError as e:
|
|
91
|
+
last_error = e
|
|
92
|
+
if attempt < self.max_retries:
|
|
93
|
+
await asyncio.sleep(1)
|
|
94
|
+
continue
|
|
95
|
+
|
|
96
|
+
raise LLMError(f"LLM call failed after {self.max_retries + 1} attempts: {last_error}")
|
agentflow/pipeline.py
ADDED
|
@@ -0,0 +1,170 @@
|
|
|
1
|
+
"""Pipeline orchestrator for multi-agent execution."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import asyncio
|
|
6
|
+
from typing import Any, AsyncGenerator
|
|
7
|
+
|
|
8
|
+
from .agent import _DecoratorAgent, BaseAgent
|
|
9
|
+
from .events import EventEmitter
|
|
10
|
+
from .exceptions import PipelineError, AgentError
|
|
11
|
+
from .llm import LLM
|
|
12
|
+
from .types import AgentResult, PipelineResult, Event
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
AgentLike = _DecoratorAgent | BaseAgent
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class _PipelineNode:
|
|
19
|
+
"""Internal node in the pipeline graph."""
|
|
20
|
+
|
|
21
|
+
def __init__(self, agent: AgentLike, depends_on: list[str]):
|
|
22
|
+
self.agent = agent
|
|
23
|
+
self.depends_on = depends_on
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class Pipeline:
|
|
27
|
+
"""Multi-agent pipeline with dependency resolution.
|
|
28
|
+
|
|
29
|
+
Args:
|
|
30
|
+
llm: The LLM provider for all agents.
|
|
31
|
+
|
|
32
|
+
Usage:
|
|
33
|
+
pipe = Pipeline(llm=llm)
|
|
34
|
+
pipe.add(researcher)
|
|
35
|
+
pipe.add(writer, depends_on=["researcher"])
|
|
36
|
+
result = await pipe.run("AI in Healthcare")
|
|
37
|
+
"""
|
|
38
|
+
|
|
39
|
+
def __init__(self, llm: LLM):
|
|
40
|
+
self._llm = llm
|
|
41
|
+
self._nodes: list[_PipelineNode] = []
|
|
42
|
+
self._agent_names: set[str] = set()
|
|
43
|
+
|
|
44
|
+
def add(self, agent: AgentLike, depends_on: list[str] | None = None) -> "Pipeline":
|
|
45
|
+
"""Add an agent to the pipeline.
|
|
46
|
+
|
|
47
|
+
Args:
|
|
48
|
+
agent: An @Agent-decorated function or BaseAgent subclass instance.
|
|
49
|
+
depends_on: List of agent names this agent depends on.
|
|
50
|
+
|
|
51
|
+
Returns:
|
|
52
|
+
self (for chaining).
|
|
53
|
+
"""
|
|
54
|
+
name = agent.name
|
|
55
|
+
if name in self._agent_names:
|
|
56
|
+
raise PipelineError(f"Duplicate agent name: '{name}'")
|
|
57
|
+
|
|
58
|
+
deps = depends_on or []
|
|
59
|
+
for dep in deps:
|
|
60
|
+
if dep not in self._agent_names:
|
|
61
|
+
raise PipelineError(
|
|
62
|
+
f"Agent '{name}' depends on '{dep}', but '{dep}' hasn't been added yet"
|
|
63
|
+
)
|
|
64
|
+
|
|
65
|
+
self._nodes.append(_PipelineNode(agent, deps))
|
|
66
|
+
self._agent_names.add(name)
|
|
67
|
+
return self
|
|
68
|
+
|
|
69
|
+
def _resolve_order(self) -> list[_PipelineNode]:
|
|
70
|
+
"""Topological sort of the pipeline graph."""
|
|
71
|
+
resolved: list[_PipelineNode] = []
|
|
72
|
+
seen: set[str] = set()
|
|
73
|
+
node_map = {n.agent.name: n for n in self._nodes}
|
|
74
|
+
|
|
75
|
+
def visit(name: str) -> None:
|
|
76
|
+
if name in seen:
|
|
77
|
+
return
|
|
78
|
+
node = node_map[name]
|
|
79
|
+
for dep in node.depends_on:
|
|
80
|
+
if dep not in seen:
|
|
81
|
+
visit(dep)
|
|
82
|
+
seen.add(name)
|
|
83
|
+
resolved.append(node)
|
|
84
|
+
|
|
85
|
+
for node in self._nodes:
|
|
86
|
+
visit(node.agent.name)
|
|
87
|
+
|
|
88
|
+
return resolved
|
|
89
|
+
|
|
90
|
+
async def run(self, task: str) -> PipelineResult:
|
|
91
|
+
"""Execute the pipeline sequentially.
|
|
92
|
+
|
|
93
|
+
Args:
|
|
94
|
+
task: The task string passed to each agent.
|
|
95
|
+
|
|
96
|
+
Returns:
|
|
97
|
+
PipelineResult with all agent results.
|
|
98
|
+
"""
|
|
99
|
+
ordered = self._resolve_order()
|
|
100
|
+
results: dict[str, AgentResult] = {}
|
|
101
|
+
context: dict[str, str] = {}
|
|
102
|
+
|
|
103
|
+
for node in ordered:
|
|
104
|
+
agent = node.agent
|
|
105
|
+
result = await agent.execute(task, context, self._llm)
|
|
106
|
+
results[agent.name] = result
|
|
107
|
+
context[agent.name] = result.output
|
|
108
|
+
|
|
109
|
+
last_output = results[ordered[-1].agent.name].output if ordered else ""
|
|
110
|
+
total_tokens = sum(r.tokens_used for r in results.values())
|
|
111
|
+
total_duration = sum(r.duration for r in results.values())
|
|
112
|
+
|
|
113
|
+
return PipelineResult(
|
|
114
|
+
output=last_output,
|
|
115
|
+
results=results,
|
|
116
|
+
total_tokens=total_tokens,
|
|
117
|
+
total_duration=round(total_duration, 3),
|
|
118
|
+
)
|
|
119
|
+
|
|
120
|
+
async def stream(self, task: str) -> AsyncGenerator[Event, None]:
|
|
121
|
+
"""Execute the pipeline and yield events.
|
|
122
|
+
|
|
123
|
+
Yields Event objects as agents start, complete, or error.
|
|
124
|
+
The final event has type "pipeline_complete".
|
|
125
|
+
"""
|
|
126
|
+
emitter = EventEmitter()
|
|
127
|
+
ordered = self._resolve_order()
|
|
128
|
+
results: dict[str, AgentResult] = {}
|
|
129
|
+
context: dict[str, str] = {}
|
|
130
|
+
|
|
131
|
+
async def _run() -> None:
|
|
132
|
+
try:
|
|
133
|
+
for node in ordered:
|
|
134
|
+
agent = node.agent
|
|
135
|
+
emitter.emit("agent_start", agent=agent.name, role=agent.role)
|
|
136
|
+
|
|
137
|
+
try:
|
|
138
|
+
result = await agent.execute(task, context, self._llm)
|
|
139
|
+
results[agent.name] = result
|
|
140
|
+
context[agent.name] = result.output
|
|
141
|
+
emitter.emit(
|
|
142
|
+
"agent_complete",
|
|
143
|
+
agent=agent.name,
|
|
144
|
+
tokens=result.tokens_used,
|
|
145
|
+
duration=result.duration,
|
|
146
|
+
output_preview=result.output[:200],
|
|
147
|
+
)
|
|
148
|
+
except AgentError as e:
|
|
149
|
+
emitter.emit("agent_error", agent=agent.name, error=str(e))
|
|
150
|
+
raise
|
|
151
|
+
|
|
152
|
+
total_tokens = sum(r.tokens_used for r in results.values())
|
|
153
|
+
total_duration = sum(r.duration for r in results.values())
|
|
154
|
+
emitter.emit(
|
|
155
|
+
"pipeline_complete",
|
|
156
|
+
total_tokens=total_tokens,
|
|
157
|
+
total_duration=round(total_duration, 3),
|
|
158
|
+
agents_completed=len(results),
|
|
159
|
+
)
|
|
160
|
+
except Exception as e:
|
|
161
|
+
emitter.emit("pipeline_error", error=str(e))
|
|
162
|
+
finally:
|
|
163
|
+
emitter.done()
|
|
164
|
+
|
|
165
|
+
run_task = asyncio.create_task(_run())
|
|
166
|
+
|
|
167
|
+
async for event in emitter.stream():
|
|
168
|
+
yield event
|
|
169
|
+
|
|
170
|
+
await run_task
|
agentflow/types.py
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
"""Core data models for agentflow."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from datetime import datetime, timezone
|
|
6
|
+
from typing import Any
|
|
7
|
+
|
|
8
|
+
from pydantic import BaseModel, Field
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class AgentResult(BaseModel):
|
|
12
|
+
"""Result from a single agent execution."""
|
|
13
|
+
|
|
14
|
+
agent: str
|
|
15
|
+
output: str
|
|
16
|
+
tokens_used: int = 0
|
|
17
|
+
duration: float = 0.0
|
|
18
|
+
metadata: dict[str, Any] = Field(default_factory=dict)
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class PipelineResult(BaseModel):
|
|
22
|
+
"""Result from a full pipeline execution."""
|
|
23
|
+
|
|
24
|
+
output: str # Last agent's output
|
|
25
|
+
results: dict[str, AgentResult] = Field(default_factory=dict)
|
|
26
|
+
total_tokens: int = 0
|
|
27
|
+
total_duration: float = 0.0
|
|
28
|
+
|
|
29
|
+
def get(self, agent_name: str) -> AgentResult | None:
|
|
30
|
+
"""Get a specific agent's result."""
|
|
31
|
+
return self.results.get(agent_name)
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
class Event(BaseModel):
|
|
35
|
+
"""Pipeline event for streaming."""
|
|
36
|
+
|
|
37
|
+
type: str # agent_start, agent_complete, agent_error, pipeline_complete
|
|
38
|
+
agent: str = ""
|
|
39
|
+
data: dict[str, Any] = Field(default_factory=dict)
|
|
40
|
+
timestamp: str = Field(default_factory=lambda: datetime.now(timezone.utc).isoformat())
|
|
@@ -0,0 +1,177 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: agentflowkit
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: Lightweight multi-agent AI pipeline framework with decorator-based API
|
|
5
|
+
Project-URL: Homepage, https://github.com/KaramQ6/agentflow
|
|
6
|
+
Project-URL: Repository, https://github.com/KaramQ6/agentflow
|
|
7
|
+
Author: KaramQ6
|
|
8
|
+
License-Expression: MIT
|
|
9
|
+
License-File: LICENSE
|
|
10
|
+
Keywords: agents,ai,async,llm,multi-agent,pipeline
|
|
11
|
+
Classifier: Development Status :: 3 - Alpha
|
|
12
|
+
Classifier: Framework :: AsyncIO
|
|
13
|
+
Classifier: Intended Audience :: Developers
|
|
14
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
15
|
+
Classifier: Programming Language :: Python :: 3
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
18
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
19
|
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
20
|
+
Requires-Python: >=3.10
|
|
21
|
+
Requires-Dist: openai>=1.0.0
|
|
22
|
+
Requires-Dist: pydantic>=2.0.0
|
|
23
|
+
Provides-Extra: dev
|
|
24
|
+
Requires-Dist: build; extra == 'dev'
|
|
25
|
+
Requires-Dist: pytest-asyncio>=0.23; extra == 'dev'
|
|
26
|
+
Requires-Dist: pytest>=8.0; extra == 'dev'
|
|
27
|
+
Description-Content-Type: text/markdown
|
|
28
|
+
|
|
29
|
+
# agentflow
|
|
30
|
+
|
|
31
|
+
[](https://pypi.org/project/agentflow-py/)
|
|
32
|
+
[](https://www.python.org/downloads/)
|
|
33
|
+
[](https://opensource.org/licenses/MIT)
|
|
34
|
+
|
|
35
|
+
Lightweight multi-agent AI pipeline framework. Define agents with decorators, wire them into pipelines, stream events in real-time.
|
|
36
|
+
|
|
37
|
+
- **Decorator-based** - Define agents as simple async functions
|
|
38
|
+
- **Async-first** - Built on asyncio, no sync bottlenecks
|
|
39
|
+
- **Event streaming** - Real-time pipeline monitoring via async generators
|
|
40
|
+
- **Provider agnostic** - Works with any OpenAI-compatible API (OpenAI, Groq, Together, Ollama, etc.)
|
|
41
|
+
- **Minimal deps** - Just `openai` + `pydantic`
|
|
42
|
+
|
|
43
|
+
## Install
|
|
44
|
+
|
|
45
|
+
```bash
|
|
46
|
+
pip install agentflow-py
|
|
47
|
+
```
|
|
48
|
+
|
|
49
|
+
## Quick Start
|
|
50
|
+
|
|
51
|
+
```python
|
|
52
|
+
import asyncio
|
|
53
|
+
from agentflow import Agent, Pipeline, LLM
|
|
54
|
+
|
|
55
|
+
# 1. Configure LLM (any OpenAI-compatible provider)
|
|
56
|
+
llm = LLM(
|
|
57
|
+
model="llama-3.3-70b-versatile",
|
|
58
|
+
base_url="https://api.groq.com/openai/v1",
|
|
59
|
+
api_key="your-api-key",
|
|
60
|
+
)
|
|
61
|
+
|
|
62
|
+
# 2. Define agents with decorators
|
|
63
|
+
@Agent(name="researcher", role="Research Analyst")
|
|
64
|
+
async def researcher(task: str, context: dict) -> str:
|
|
65
|
+
return f"Research this topic thoroughly: {task}"
|
|
66
|
+
|
|
67
|
+
@Agent(name="writer", role="Content Writer")
|
|
68
|
+
async def writer(task: str, context: dict) -> str:
|
|
69
|
+
research = context["researcher"]
|
|
70
|
+
return f"Write an article based on:\n{research}"
|
|
71
|
+
|
|
72
|
+
# 3. Build pipeline
|
|
73
|
+
pipe = Pipeline(llm=llm)
|
|
74
|
+
pipe.add(researcher)
|
|
75
|
+
pipe.add(writer, depends_on=["researcher"])
|
|
76
|
+
|
|
77
|
+
# 4. Run
|
|
78
|
+
async def main():
|
|
79
|
+
result = await pipe.run("AI in Healthcare")
|
|
80
|
+
print(result.output)
|
|
81
|
+
print(f"Tokens: {result.total_tokens}")
|
|
82
|
+
|
|
83
|
+
asyncio.run(main())
|
|
84
|
+
```
|
|
85
|
+
|
|
86
|
+
## Event Streaming
|
|
87
|
+
|
|
88
|
+
Stream real-time events as agents execute:
|
|
89
|
+
|
|
90
|
+
```python
|
|
91
|
+
async for event in pipe.stream("AI in Healthcare"):
|
|
92
|
+
if event.type == "agent_start":
|
|
93
|
+
print(f"{event.agent} started...")
|
|
94
|
+
elif event.type == "agent_complete":
|
|
95
|
+
print(f"{event.agent} done ({event.data['tokens']} tokens)")
|
|
96
|
+
elif event.type == "pipeline_complete":
|
|
97
|
+
print(f"Total: {event.data['total_tokens']} tokens")
|
|
98
|
+
```
|
|
99
|
+
|
|
100
|
+
## Pipeline Results
|
|
101
|
+
|
|
102
|
+
Access individual agent results:
|
|
103
|
+
|
|
104
|
+
```python
|
|
105
|
+
result = await pipe.run("AI in Healthcare")
|
|
106
|
+
|
|
107
|
+
# Final output (last agent)
|
|
108
|
+
print(result.output)
|
|
109
|
+
|
|
110
|
+
# Individual agent results
|
|
111
|
+
research = result.get("researcher")
|
|
112
|
+
print(research.output)
|
|
113
|
+
print(research.tokens_used)
|
|
114
|
+
print(research.duration)
|
|
115
|
+
|
|
116
|
+
# Totals
|
|
117
|
+
print(result.total_tokens)
|
|
118
|
+
print(result.total_duration)
|
|
119
|
+
```
|
|
120
|
+
|
|
121
|
+
## Advanced: Class-Based Agents
|
|
122
|
+
|
|
123
|
+
For complex agents that need custom logic:
|
|
124
|
+
|
|
125
|
+
```python
|
|
126
|
+
from agentflow import BaseAgent, AgentResult
|
|
127
|
+
|
|
128
|
+
class CustomAgent(BaseAgent):
|
|
129
|
+
def __init__(self):
|
|
130
|
+
super().__init__(name="custom", role="Custom Processor")
|
|
131
|
+
|
|
132
|
+
async def execute(self, task, context, llm):
|
|
133
|
+
# Custom logic here
|
|
134
|
+
response = await llm.generate([
|
|
135
|
+
{"role": "system", "content": f"You are a {self.role}."},
|
|
136
|
+
{"role": "user", "content": task},
|
|
137
|
+
])
|
|
138
|
+
return AgentResult(
|
|
139
|
+
agent=self.name,
|
|
140
|
+
output=response["content"],
|
|
141
|
+
tokens_used=response["tokens"],
|
|
142
|
+
duration=response["duration"],
|
|
143
|
+
)
|
|
144
|
+
|
|
145
|
+
pipe.add(CustomAgent())
|
|
146
|
+
```
|
|
147
|
+
|
|
148
|
+
## Supported Providers
|
|
149
|
+
|
|
150
|
+
Any OpenAI-compatible API works:
|
|
151
|
+
|
|
152
|
+
```python
|
|
153
|
+
# OpenAI
|
|
154
|
+
llm = LLM(model="gpt-4o-mini", api_key="sk-...")
|
|
155
|
+
|
|
156
|
+
# Groq (free tier)
|
|
157
|
+
llm = LLM(model="llama-3.3-70b-versatile",
|
|
158
|
+
base_url="https://api.groq.com/openai/v1",
|
|
159
|
+
api_key="gsk_...")
|
|
160
|
+
|
|
161
|
+
# Ollama (local)
|
|
162
|
+
llm = LLM(model="llama3", base_url="http://localhost:11434/v1")
|
|
163
|
+
|
|
164
|
+
# Together AI
|
|
165
|
+
llm = LLM(model="meta-llama/Llama-3-70b-chat-hf",
|
|
166
|
+
base_url="https://api.together.xyz/v1",
|
|
167
|
+
api_key="...")
|
|
168
|
+
```
|
|
169
|
+
|
|
170
|
+
## Examples
|
|
171
|
+
|
|
172
|
+
- [`examples/research_crew.py`](examples/research_crew.py) - Multi-agent research pipeline
|
|
173
|
+
- [`examples/code_reviewer.py`](examples/code_reviewer.py) - AI code review pipeline
|
|
174
|
+
|
|
175
|
+
## License
|
|
176
|
+
|
|
177
|
+
MIT
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
agentflow/__init__.py,sha256=rcexChol8voXcAacVHhd9oaThKS0cZEPZk3iQV3ZAUo,563
|
|
2
|
+
agentflow/agent.py,sha256=Gy5oLL-tD3ep7rLsWX_tiRzCvdwcymZhBJIAUcDU6Vw,2893
|
|
3
|
+
agentflow/events.py,sha256=i_rVWlg4r2v3pU46Aupv1NLJb_4c-PkXJKL0cc5MkgI,933
|
|
4
|
+
agentflow/exceptions.py,sha256=oNmZALxuHE0YqwAL6BXLlGRj_Sw4LAvxnhjgd3FEC90,551
|
|
5
|
+
agentflow/llm.py,sha256=R-UAYae9eNG_YkpkqChGVOGzmOs7pxWTWjKM9mlUDnE,3048
|
|
6
|
+
agentflow/pipeline.py,sha256=61fIeKEMsD_shFOebfLUzXWkg9EsmP8564XvLonS5sA,5552
|
|
7
|
+
agentflow/types.py,sha256=VLa-e_Bq_G0LEqFjPI3nl_59tmam06QlPVPJo2TilZA,1105
|
|
8
|
+
agentflowkit-0.1.0.dist-info/METADATA,sha256=9DeoONIEqtFf6QX88rSLsU3fQ0kLkbC416l4qJjglvk,5101
|
|
9
|
+
agentflowkit-0.1.0.dist-info/WHEEL,sha256=QccIxa26bgl1E6uMy58deGWi-0aeIkkangHcxk2kWfw,87
|
|
10
|
+
agentflowkit-0.1.0.dist-info/licenses/LICENSE,sha256=hRbBgprHDfoNgRcvsejDAkGyaR4XuKztjvO1ic93iK8,1064
|
|
11
|
+
agentflowkit-0.1.0.dist-info/RECORD,,
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2025 KaramQ6
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|