agentflowkit 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. agentflowkit-0.1.0/.gitattributes +2 -0
  2. agentflowkit-0.1.0/.gitignore +11 -0
  3. agentflowkit-0.1.0/LICENSE +21 -0
  4. agentflowkit-0.1.0/PKG-INFO +177 -0
  5. agentflowkit-0.1.0/README.md +149 -0
  6. agentflowkit-0.1.0/examples/code_reviewer.py +77 -0
  7. agentflowkit-0.1.0/examples/research_crew.py +85 -0
  8. agentflowkit-0.1.0/pyproject.toml +46 -0
  9. agentflowkit-0.1.0/src/agentflow/__init__.py +25 -0
  10. agentflowkit-0.1.0/src/agentflow/agent.py +97 -0
  11. agentflowkit-0.1.0/src/agentflow/events.py +32 -0
  12. agentflowkit-0.1.0/src/agentflow/exceptions.py +21 -0
  13. agentflowkit-0.1.0/src/agentflow/llm.py +96 -0
  14. agentflowkit-0.1.0/src/agentflow/pipeline.py +170 -0
  15. agentflowkit-0.1.0/src/agentflow/types.py +40 -0
  16. agentflowkit-0.1.0/tests/test_agent.py +54 -0
  17. agentflowkit-0.1.0/tests/test_llm.py +33 -0
  18. agentflowkit-0.1.0/tests/test_pipeline.py +161 -0
  19. agentflowkit-0.1.0/tmpclaude-0006-cwd +1 -0
  20. agentflowkit-0.1.0/tmpclaude-07af-cwd +1 -0
  21. agentflowkit-0.1.0/tmpclaude-153f-cwd +1 -0
  22. agentflowkit-0.1.0/tmpclaude-186c-cwd +1 -0
  23. agentflowkit-0.1.0/tmpclaude-1ebb-cwd +1 -0
  24. agentflowkit-0.1.0/tmpclaude-2838-cwd +1 -0
  25. agentflowkit-0.1.0/tmpclaude-2ebe-cwd +1 -0
  26. agentflowkit-0.1.0/tmpclaude-6962-cwd +1 -0
  27. agentflowkit-0.1.0/tmpclaude-6b68-cwd +1 -0
  28. agentflowkit-0.1.0/tmpclaude-8083-cwd +1 -0
  29. agentflowkit-0.1.0/tmpclaude-80d4-cwd +1 -0
  30. agentflowkit-0.1.0/tmpclaude-8eb0-cwd +1 -0
  31. agentflowkit-0.1.0/tmpclaude-94d9-cwd +1 -0
  32. agentflowkit-0.1.0/tmpclaude-a787-cwd +1 -0
  33. agentflowkit-0.1.0/tmpclaude-aea4-cwd +1 -0
  34. agentflowkit-0.1.0/tmpclaude-ba42-cwd +1 -0
  35. agentflowkit-0.1.0/tmpclaude-c025-cwd +1 -0
  36. agentflowkit-0.1.0/tmpclaude-c7b8-cwd +1 -0
  37. agentflowkit-0.1.0/tmpclaude-de78-cwd +1 -0
  38. agentflowkit-0.1.0/tmpclaude-de86-cwd +1 -0
  39. agentflowkit-0.1.0/tmpclaude-e628-cwd +1 -0
  40. agentflowkit-0.1.0/tmpclaude-e792-cwd +1 -0
  41. agentflowkit-0.1.0/tmpclaude-f142-cwd +1 -0
  42. agentflowkit-0.1.0/tmpclaude-f2e4-cwd +1 -0
  43. agentflowkit-0.1.0/tmpclaude-f501-cwd +1 -0
  44. agentflowkit-0.1.0/tmpclaude-fd1d-cwd +1 -0
@@ -0,0 +1,2 @@
1
+ # Auto detect text files and perform LF normalization
2
+ * text=auto
@@ -0,0 +1,11 @@
1
+ __pycache__/
2
+ *.pyc
3
+ *.pyo
4
+ *.egg-info/
5
+ dist/
6
+ build/
7
+ .venv/
8
+ venv/
9
+ .env
10
+ .pytest_cache/
11
+ *.egg
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2025 KaramQ6
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,177 @@
1
+ Metadata-Version: 2.4
2
+ Name: agentflowkit
3
+ Version: 0.1.0
4
+ Summary: Lightweight multi-agent AI pipeline framework with decorator-based API
5
+ Project-URL: Homepage, https://github.com/KaramQ6/agentflow
6
+ Project-URL: Repository, https://github.com/KaramQ6/agentflow
7
+ Author: KaramQ6
8
+ License-Expression: MIT
9
+ License-File: LICENSE
10
+ Keywords: agents,ai,async,llm,multi-agent,pipeline
11
+ Classifier: Development Status :: 3 - Alpha
12
+ Classifier: Framework :: AsyncIO
13
+ Classifier: Intended Audience :: Developers
14
+ Classifier: License :: OSI Approved :: MIT License
15
+ Classifier: Programming Language :: Python :: 3
16
+ Classifier: Programming Language :: Python :: 3.10
17
+ Classifier: Programming Language :: Python :: 3.11
18
+ Classifier: Programming Language :: Python :: 3.12
19
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
20
+ Requires-Python: >=3.10
21
+ Requires-Dist: openai>=1.0.0
22
+ Requires-Dist: pydantic>=2.0.0
23
+ Provides-Extra: dev
24
+ Requires-Dist: build; extra == 'dev'
25
+ Requires-Dist: pytest-asyncio>=0.23; extra == 'dev'
26
+ Requires-Dist: pytest>=8.0; extra == 'dev'
27
+ Description-Content-Type: text/markdown
28
+
29
+ # agentflow
30
+
31
+ [![PyPI version](https://badge.fury.io/py/agentflow-py.svg)](https://pypi.org/project/agentflow-py/)
32
+ [![Python 3.10+](https://img.shields.io/badge/python-3.10+-blue.svg)](https://www.python.org/downloads/)
33
+ [![License: MIT](https://img.shields.io/badge/License-MIT-green.svg)](https://opensource.org/licenses/MIT)
34
+
35
+ Lightweight multi-agent AI pipeline framework. Define agents with decorators, wire them into pipelines, stream events in real-time.
36
+
37
+ - **Decorator-based** - Define agents as simple async functions
38
+ - **Async-first** - Built on asyncio, no sync bottlenecks
39
+ - **Event streaming** - Real-time pipeline monitoring via async generators
40
+ - **Provider agnostic** - Works with any OpenAI-compatible API (OpenAI, Groq, Together, Ollama, etc.)
41
+ - **Minimal deps** - Just `openai` + `pydantic`
42
+
43
+ ## Install
44
+
45
+ ```bash
46
+ pip install agentflow-py
47
+ ```
48
+
49
+ ## Quick Start
50
+
51
+ ```python
52
+ import asyncio
53
+ from agentflow import Agent, Pipeline, LLM
54
+
55
+ # 1. Configure LLM (any OpenAI-compatible provider)
56
+ llm = LLM(
57
+ model="llama-3.3-70b-versatile",
58
+ base_url="https://api.groq.com/openai/v1",
59
+ api_key="your-api-key",
60
+ )
61
+
62
+ # 2. Define agents with decorators
63
+ @Agent(name="researcher", role="Research Analyst")
64
+ async def researcher(task: str, context: dict) -> str:
65
+ return f"Research this topic thoroughly: {task}"
66
+
67
+ @Agent(name="writer", role="Content Writer")
68
+ async def writer(task: str, context: dict) -> str:
69
+ research = context["researcher"]
70
+ return f"Write an article based on:\n{research}"
71
+
72
+ # 3. Build pipeline
73
+ pipe = Pipeline(llm=llm)
74
+ pipe.add(researcher)
75
+ pipe.add(writer, depends_on=["researcher"])
76
+
77
+ # 4. Run
78
+ async def main():
79
+ result = await pipe.run("AI in Healthcare")
80
+ print(result.output)
81
+ print(f"Tokens: {result.total_tokens}")
82
+
83
+ asyncio.run(main())
84
+ ```
85
+
86
+ ## Event Streaming
87
+
88
+ Stream real-time events as agents execute:
89
+
90
+ ```python
91
+ async for event in pipe.stream("AI in Healthcare"):
92
+ if event.type == "agent_start":
93
+ print(f"{event.agent} started...")
94
+ elif event.type == "agent_complete":
95
+ print(f"{event.agent} done ({event.data['tokens']} tokens)")
96
+ elif event.type == "pipeline_complete":
97
+ print(f"Total: {event.data['total_tokens']} tokens")
98
+ ```
99
+
100
+ ## Pipeline Results
101
+
102
+ Access individual agent results:
103
+
104
+ ```python
105
+ result = await pipe.run("AI in Healthcare")
106
+
107
+ # Final output (last agent)
108
+ print(result.output)
109
+
110
+ # Individual agent results
111
+ research = result.get("researcher")
112
+ print(research.output)
113
+ print(research.tokens_used)
114
+ print(research.duration)
115
+
116
+ # Totals
117
+ print(result.total_tokens)
118
+ print(result.total_duration)
119
+ ```
120
+
121
+ ## Advanced: Class-Based Agents
122
+
123
+ For complex agents that need custom logic:
124
+
125
+ ```python
126
+ from agentflow import BaseAgent, AgentResult
127
+
128
+ class CustomAgent(BaseAgent):
129
+ def __init__(self):
130
+ super().__init__(name="custom", role="Custom Processor")
131
+
132
+ async def execute(self, task, context, llm):
133
+ # Custom logic here
134
+ response = await llm.generate([
135
+ {"role": "system", "content": f"You are a {self.role}."},
136
+ {"role": "user", "content": task},
137
+ ])
138
+ return AgentResult(
139
+ agent=self.name,
140
+ output=response["content"],
141
+ tokens_used=response["tokens"],
142
+ duration=response["duration"],
143
+ )
144
+
145
+ pipe.add(CustomAgent())
146
+ ```
147
+
148
+ ## Supported Providers
149
+
150
+ Any OpenAI-compatible API works:
151
+
152
+ ```python
153
+ # OpenAI
154
+ llm = LLM(model="gpt-4o-mini", api_key="sk-...")
155
+
156
+ # Groq (free tier)
157
+ llm = LLM(model="llama-3.3-70b-versatile",
158
+ base_url="https://api.groq.com/openai/v1",
159
+ api_key="gsk_...")
160
+
161
+ # Ollama (local)
162
+ llm = LLM(model="llama3", base_url="http://localhost:11434/v1")
163
+
164
+ # Together AI
165
+ llm = LLM(model="meta-llama/Llama-3-70b-chat-hf",
166
+ base_url="https://api.together.xyz/v1",
167
+ api_key="...")
168
+ ```
169
+
170
+ ## Examples
171
+
172
+ - [`examples/research_crew.py`](examples/research_crew.py) - Multi-agent research pipeline
173
+ - [`examples/code_reviewer.py`](examples/code_reviewer.py) - AI code review pipeline
174
+
175
+ ## License
176
+
177
+ MIT
@@ -0,0 +1,149 @@
1
+ # agentflow
2
+
3
+ [![PyPI version](https://badge.fury.io/py/agentflow-py.svg)](https://pypi.org/project/agentflow-py/)
4
+ [![Python 3.10+](https://img.shields.io/badge/python-3.10+-blue.svg)](https://www.python.org/downloads/)
5
+ [![License: MIT](https://img.shields.io/badge/License-MIT-green.svg)](https://opensource.org/licenses/MIT)
6
+
7
+ Lightweight multi-agent AI pipeline framework. Define agents with decorators, wire them into pipelines, stream events in real-time.
8
+
9
+ - **Decorator-based** - Define agents as simple async functions
10
+ - **Async-first** - Built on asyncio, no sync bottlenecks
11
+ - **Event streaming** - Real-time pipeline monitoring via async generators
12
+ - **Provider agnostic** - Works with any OpenAI-compatible API (OpenAI, Groq, Together, Ollama, etc.)
13
+ - **Minimal deps** - Just `openai` + `pydantic`
14
+
15
+ ## Install
16
+
17
+ ```bash
18
+ pip install agentflow-py
19
+ ```
20
+
21
+ ## Quick Start
22
+
23
+ ```python
24
+ import asyncio
25
+ from agentflow import Agent, Pipeline, LLM
26
+
27
+ # 1. Configure LLM (any OpenAI-compatible provider)
28
+ llm = LLM(
29
+ model="llama-3.3-70b-versatile",
30
+ base_url="https://api.groq.com/openai/v1",
31
+ api_key="your-api-key",
32
+ )
33
+
34
+ # 2. Define agents with decorators
35
+ @Agent(name="researcher", role="Research Analyst")
36
+ async def researcher(task: str, context: dict) -> str:
37
+ return f"Research this topic thoroughly: {task}"
38
+
39
+ @Agent(name="writer", role="Content Writer")
40
+ async def writer(task: str, context: dict) -> str:
41
+ research = context["researcher"]
42
+ return f"Write an article based on:\n{research}"
43
+
44
+ # 3. Build pipeline
45
+ pipe = Pipeline(llm=llm)
46
+ pipe.add(researcher)
47
+ pipe.add(writer, depends_on=["researcher"])
48
+
49
+ # 4. Run
50
+ async def main():
51
+ result = await pipe.run("AI in Healthcare")
52
+ print(result.output)
53
+ print(f"Tokens: {result.total_tokens}")
54
+
55
+ asyncio.run(main())
56
+ ```
57
+
58
+ ## Event Streaming
59
+
60
+ Stream real-time events as agents execute:
61
+
62
+ ```python
63
+ async for event in pipe.stream("AI in Healthcare"):
64
+ if event.type == "agent_start":
65
+ print(f"{event.agent} started...")
66
+ elif event.type == "agent_complete":
67
+ print(f"{event.agent} done ({event.data['tokens']} tokens)")
68
+ elif event.type == "pipeline_complete":
69
+ print(f"Total: {event.data['total_tokens']} tokens")
70
+ ```
71
+
72
+ ## Pipeline Results
73
+
74
+ Access individual agent results:
75
+
76
+ ```python
77
+ result = await pipe.run("AI in Healthcare")
78
+
79
+ # Final output (last agent)
80
+ print(result.output)
81
+
82
+ # Individual agent results
83
+ research = result.get("researcher")
84
+ print(research.output)
85
+ print(research.tokens_used)
86
+ print(research.duration)
87
+
88
+ # Totals
89
+ print(result.total_tokens)
90
+ print(result.total_duration)
91
+ ```
92
+
93
+ ## Advanced: Class-Based Agents
94
+
95
+ For complex agents that need custom logic:
96
+
97
+ ```python
98
+ from agentflow import BaseAgent, AgentResult
99
+
100
+ class CustomAgent(BaseAgent):
101
+ def __init__(self):
102
+ super().__init__(name="custom", role="Custom Processor")
103
+
104
+ async def execute(self, task, context, llm):
105
+ # Custom logic here
106
+ response = await llm.generate([
107
+ {"role": "system", "content": f"You are a {self.role}."},
108
+ {"role": "user", "content": task},
109
+ ])
110
+ return AgentResult(
111
+ agent=self.name,
112
+ output=response["content"],
113
+ tokens_used=response["tokens"],
114
+ duration=response["duration"],
115
+ )
116
+
117
+ pipe.add(CustomAgent())
118
+ ```
119
+
120
+ ## Supported Providers
121
+
122
+ Any OpenAI-compatible API works:
123
+
124
+ ```python
125
+ # OpenAI
126
+ llm = LLM(model="gpt-4o-mini", api_key="sk-...")
127
+
128
+ # Groq (free tier)
129
+ llm = LLM(model="llama-3.3-70b-versatile",
130
+ base_url="https://api.groq.com/openai/v1",
131
+ api_key="gsk_...")
132
+
133
+ # Ollama (local)
134
+ llm = LLM(model="llama3", base_url="http://localhost:11434/v1")
135
+
136
+ # Together AI
137
+ llm = LLM(model="meta-llama/Llama-3-70b-chat-hf",
138
+ base_url="https://api.together.xyz/v1",
139
+ api_key="...")
140
+ ```
141
+
142
+ ## Examples
143
+
144
+ - [`examples/research_crew.py`](examples/research_crew.py) - Multi-agent research pipeline
145
+ - [`examples/code_reviewer.py`](examples/code_reviewer.py) - AI code review pipeline
146
+
147
+ ## License
148
+
149
+ MIT
@@ -0,0 +1,77 @@
1
+ """Code reviewer example - AI code review pipeline.
2
+
3
+ Run: python examples/code_reviewer.py
4
+
5
+ Requires GROQ_API_KEY environment variable.
6
+ """
7
+
8
+ import asyncio
9
+ import os
10
+
11
+ from agentflow import Agent, Pipeline, LLM
12
+
13
+
14
+ llm = LLM(
15
+ model="llama-3.3-70b-versatile",
16
+ base_url="https://api.groq.com/openai/v1",
17
+ api_key=os.environ.get("GROQ_API_KEY", ""),
18
+ )
19
+
20
+
21
+ @Agent(name="analyzer", role="Code Analyzer")
22
+ async def analyzer(task: str, context: dict) -> str:
23
+ return (
24
+ f"Analyze the following code for potential issues including bugs, "
25
+ f"security vulnerabilities, performance problems, and code smells. "
26
+ f"List each issue with its severity (critical/warning/info).\n\n"
27
+ f"Code:\n```\n{task}\n```"
28
+ )
29
+
30
+
31
+ @Agent(name="suggester", role="Code Improvement Specialist")
32
+ async def suggester(task: str, context: dict) -> str:
33
+ analysis = context["analyzer"]
34
+ return (
35
+ f"Based on the following code analysis, provide specific code "
36
+ f"improvements. For each issue found, show the exact fix with "
37
+ f"before/after code snippets.\n\n"
38
+ f"Original code:\n```\n{task}\n```\n\n"
39
+ f"Analysis:\n{analysis}"
40
+ )
41
+
42
+
43
+ SAMPLE_CODE = '''
44
+ def get_user(user_id):
45
+ query = f"SELECT * FROM users WHERE id = {user_id}"
46
+ result = db.execute(query)
47
+ data = result.fetchone()
48
+ password = data['password']
49
+ return {"id": data['id'], "name": data['name'], "password": password}
50
+ '''
51
+
52
+
53
+ async def main():
54
+ pipe = Pipeline(llm=llm)
55
+ pipe.add(analyzer)
56
+ pipe.add(suggester, depends_on=["analyzer"])
57
+
58
+ print("Code Review Pipeline")
59
+ print("=" * 60)
60
+
61
+ async for event in pipe.stream(SAMPLE_CODE.strip()):
62
+ if event.type == "agent_start":
63
+ print(f"\n>> {event.agent} reviewing...")
64
+ elif event.type == "agent_complete":
65
+ print(f" Done ({event.data.get('tokens', 0)} tokens)")
66
+ elif event.type == "pipeline_complete":
67
+ print(f"\nReview complete in {event.data.get('total_duration', 0):.1f}s")
68
+
69
+ result = await pipe.run(SAMPLE_CODE.strip())
70
+ print(f"\n{'=' * 60}")
71
+ print("REVIEW RESULT:")
72
+ print("=" * 60)
73
+ print(result.output)
74
+
75
+
76
+ if __name__ == "__main__":
77
+ asyncio.run(main())
@@ -0,0 +1,85 @@
1
+ """Research crew example - multi-agent research pipeline.
2
+
3
+ Run: python examples/research_crew.py
4
+
5
+ Requires GROQ_API_KEY environment variable or replace the api_key below.
6
+ """
7
+
8
+ import asyncio
9
+ import os
10
+
11
+ from agentflow import Agent, Pipeline, LLM
12
+
13
+
14
+ # Configure LLM (Groq free tier)
15
+ llm = LLM(
16
+ model="llama-3.3-70b-versatile",
17
+ base_url="https://api.groq.com/openai/v1",
18
+ api_key=os.environ.get("GROQ_API_KEY", ""),
19
+ )
20
+
21
+
22
+ @Agent(name="researcher", role="Research Analyst")
23
+ async def researcher(task: str, context: dict) -> str:
24
+ return (
25
+ f"Research the following topic and provide a comprehensive analysis "
26
+ f"with key findings, statistics, and expert opinions:\n\n{task}"
27
+ )
28
+
29
+
30
+ @Agent(name="writer", role="Content Writer")
31
+ async def writer(task: str, context: dict) -> str:
32
+ research = context["researcher"]
33
+ return (
34
+ f"Based on the following research, write a well-structured article "
35
+ f"with an introduction, 3-4 main sections, and a conclusion.\n\n"
36
+ f"Topic: {task}\n\nResearch:\n{research}"
37
+ )
38
+
39
+
40
+ @Agent(name="editor", role="Editor and Fact-Checker")
41
+ async def editor(task: str, context: dict) -> str:
42
+ article = context["writer"]
43
+ return (
44
+ f"Review and improve the following article. Fix any issues with "
45
+ f"clarity, structure, grammar, and factual accuracy. Return the "
46
+ f"final polished version.\n\nArticle:\n{article}"
47
+ )
48
+
49
+
50
+ async def main():
51
+ topic = "The Impact of Large Language Models on Software Development in 2025"
52
+
53
+ # Build pipeline
54
+ pipe = Pipeline(llm=llm)
55
+ pipe.add(researcher)
56
+ pipe.add(writer, depends_on=["researcher"])
57
+ pipe.add(editor, depends_on=["writer"])
58
+
59
+ print(f"Running research crew on: {topic}\n")
60
+ print("=" * 60)
61
+
62
+ # Stream events
63
+ async for event in pipe.stream(topic):
64
+ if event.type == "agent_start":
65
+ print(f"\n>> {event.agent} started...")
66
+ elif event.type == "agent_complete":
67
+ tokens = event.data.get("tokens", 0)
68
+ duration = event.data.get("duration", 0)
69
+ print(f" {event.agent} done ({tokens} tokens, {duration:.1f}s)")
70
+ elif event.type == "pipeline_complete":
71
+ print(f"\n{'=' * 60}")
72
+ print(f"Pipeline complete!")
73
+ print(f" Total tokens: {event.data.get('total_tokens', 0)}")
74
+ print(f" Total time: {event.data.get('total_duration', 0):.1f}s")
75
+
76
+ # Also run without streaming to get the result
77
+ result = await pipe.run(topic)
78
+ print(f"\n{'=' * 60}")
79
+ print("FINAL ARTICLE:")
80
+ print("=" * 60)
81
+ print(result.output[:2000]) # Print first 2000 chars
82
+
83
+
84
+ if __name__ == "__main__":
85
+ asyncio.run(main())
@@ -0,0 +1,46 @@
1
+ [build-system]
2
+ requires = ["hatchling"]
3
+ build-backend = "hatchling.build"
4
+
5
+ [project]
6
+ name = "agentflowkit"
7
+ version = "0.1.0"
8
+ description = "Lightweight multi-agent AI pipeline framework with decorator-based API"
9
+ readme = "README.md"
10
+ license = "MIT"
11
+ requires-python = ">=3.10"
12
+ authors = [{ name = "KaramQ6" }]
13
+ keywords = ["ai", "agents", "multi-agent", "llm", "pipeline", "async"]
14
+ classifiers = [
15
+ "Development Status :: 3 - Alpha",
16
+ "Intended Audience :: Developers",
17
+ "License :: OSI Approved :: MIT License",
18
+ "Programming Language :: Python :: 3",
19
+ "Programming Language :: Python :: 3.10",
20
+ "Programming Language :: Python :: 3.11",
21
+ "Programming Language :: Python :: 3.12",
22
+ "Topic :: Scientific/Engineering :: Artificial Intelligence",
23
+ "Framework :: AsyncIO",
24
+ ]
25
+ dependencies = [
26
+ "openai>=1.0.0",
27
+ "pydantic>=2.0.0",
28
+ ]
29
+
30
+ [project.optional-dependencies]
31
+ dev = [
32
+ "pytest>=8.0",
33
+ "pytest-asyncio>=0.23",
34
+ "build",
35
+ ]
36
+
37
+ [project.urls]
38
+ Homepage = "https://github.com/KaramQ6/agentflow"
39
+ Repository = "https://github.com/KaramQ6/agentflow"
40
+
41
+ [tool.hatch.build.targets.wheel]
42
+ packages = ["src/agentflow"]
43
+
44
+ [tool.pytest.ini_options]
45
+ asyncio_mode = "auto"
46
+ testpaths = ["tests"]
@@ -0,0 +1,25 @@
1
+ """agentflow - Lightweight multi-agent AI pipeline framework."""
2
+
3
+ __version__ = "0.1.0"
4
+
5
+ from .agent import Agent, BaseAgent
6
+ from .llm import LLM
7
+ from .pipeline import Pipeline
8
+ from .types import AgentResult, PipelineResult, Event
9
+ from .events import EventEmitter
10
+ from .exceptions import AgentFlowError, AgentError, PipelineError, LLMError
11
+
12
+ __all__ = [
13
+ "Agent",
14
+ "BaseAgent",
15
+ "LLM",
16
+ "Pipeline",
17
+ "AgentResult",
18
+ "PipelineResult",
19
+ "Event",
20
+ "EventEmitter",
21
+ "AgentFlowError",
22
+ "AgentError",
23
+ "PipelineError",
24
+ "LLMError",
25
+ ]
@@ -0,0 +1,97 @@
1
+ """Agent definition via decorators and base class."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import time
6
+ from abc import ABC, abstractmethod
7
+ from typing import Any, Callable, Awaitable
8
+
9
+ from .llm import LLM
10
+ from .types import AgentResult
11
+ from .exceptions import AgentError
12
+
13
+
14
+ class BaseAgent(ABC):
15
+ """Base class for agents that need full control.
16
+
17
+ Subclass this for complex agents with custom logic.
18
+ """
19
+
20
+ name: str
21
+ role: str
22
+
23
+ def __init__(self, name: str, role: str):
24
+ self.name = name
25
+ self.role = role
26
+
27
+ @abstractmethod
28
+ async def execute(self, task: str, context: dict[str, str], llm: LLM) -> AgentResult:
29
+ """Execute the agent's task.
30
+
31
+ Args:
32
+ task: The task/topic string.
33
+ context: Dict mapping agent_name -> output from previous agents.
34
+ llm: The LLM provider to use.
35
+
36
+ Returns:
37
+ AgentResult with the agent's output.
38
+ """
39
+ ...
40
+
41
+
42
+ class _DecoratorAgent:
43
+ """Agent created via the @Agent decorator."""
44
+
45
+ def __init__(self, name: str, role: str, prompt_fn: Callable[..., Awaitable[str]]):
46
+ self.name = name
47
+ self.role = role
48
+ self._prompt_fn = prompt_fn
49
+
50
+ async def execute(self, task: str, context: dict[str, str], llm: LLM) -> AgentResult:
51
+ start = time.perf_counter()
52
+ try:
53
+ user_message = await self._prompt_fn(task, context)
54
+ except Exception as e:
55
+ raise AgentError(self.name, f"Prompt function failed: {e}") from e
56
+
57
+ system_prompt = f"You are a {self.role}. Provide clear, thorough, well-structured responses."
58
+
59
+ try:
60
+ response = await llm.generate([
61
+ {"role": "system", "content": system_prompt},
62
+ {"role": "user", "content": user_message},
63
+ ])
64
+ except Exception as e:
65
+ raise AgentError(self.name, str(e)) from e
66
+
67
+ duration = time.perf_counter() - start
68
+ return AgentResult(
69
+ agent=self.name,
70
+ output=response["content"],
71
+ tokens_used=response["tokens"],
72
+ duration=round(duration, 3),
73
+ metadata={"model": response["model"]},
74
+ )
75
+
76
+ def __repr__(self) -> str:
77
+ return f"Agent(name={self.name!r}, role={self.role!r})"
78
+
79
+
80
+ class Agent:
81
+ """Decorator to define an agent from an async function.
82
+
83
+ The decorated function receives (task, context) and returns
84
+ the user message to send to the LLM.
85
+
86
+ Usage:
87
+ @Agent(name="researcher", role="Research Analyst")
88
+ async def researcher(task: str, context: dict) -> str:
89
+ return f"Research this topic: {task}"
90
+ """
91
+
92
+ def __init__(self, name: str, role: str):
93
+ self.name = name
94
+ self.role = role
95
+
96
+ def __call__(self, fn: Callable[..., Awaitable[str]]) -> _DecoratorAgent:
97
+ return _DecoratorAgent(self.name, self.role, fn)