devhive 0.1.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- devhive-0.1.6.dist-info/METADATA +5 -0
- devhive-0.1.6.dist-info/RECORD +19 -0
- devhive-0.1.6.dist-info/WHEEL +5 -0
- devhive-0.1.6.dist-info/entry_points.txt +3 -0
- devhive-0.1.6.dist-info/top_level.txt +1 -0
- mcp_server/__init__.py +1 -0
- mcp_server/agents/__init__.py +10 -0
- mcp_server/agents/architect.py +28 -0
- mcp_server/agents/archivist.py +19 -0
- mcp_server/agents/auditor.py +28 -0
- mcp_server/agents/base_agent.py +104 -0
- mcp_server/agents/ceo.py +171 -0
- mcp_server/agents/developer.py +44 -0
- mcp_server/agents/explorer.py +36 -0
- mcp_server/agents/proposal.py +30 -0
- mcp_server/agents/qa.py +46 -0
- mcp_server/agents/task_agent.py +27 -0
- mcp_server/cli.py +314 -0
- mcp_server/server.py +801 -0
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
mcp_server/__init__.py,sha256=fZsU_7U8cOD1nFGAGfDkQkbkqTGP6MIfyQu3V_avYWM,21
|
|
2
|
+
mcp_server/cli.py,sha256=dWs1_BnnU6K2I8DCLG-jOyGRyBDP0IC2m1RzLskEa9w,12039
|
|
3
|
+
mcp_server/server.py,sha256=2KpBwiM3fXvj_FhF_S8Cz1P4riGVWMxBFgXCx7v7pNo,28365
|
|
4
|
+
mcp_server/agents/__init__.py,sha256=twIY6ejPe7goNxSg6PmG34uuK_WTRfGnH5qwlODLnY0,508
|
|
5
|
+
mcp_server/agents/architect.py,sha256=RaiTBu14Yaksxsw-J-vrWM3OPwdORgM7qxggta-r1as,1147
|
|
6
|
+
mcp_server/agents/archivist.py,sha256=8WSPLBlsdbXnqJ1DHTTTYvmlJV_NTsXT0UTW36mbiZc,828
|
|
7
|
+
mcp_server/agents/auditor.py,sha256=tmVoEoC_Sgy6xbkBqH655gK8hMnyZz_KdLdo853G984,1244
|
|
8
|
+
mcp_server/agents/base_agent.py,sha256=204h_k95ctt5nfuhdyRREJQqJfChc318dAPsJi6NAS0,3992
|
|
9
|
+
mcp_server/agents/ceo.py,sha256=TNQ4wn6QzMlC17hzT3lzTPTXfsFG_MBZJBplWEE7DaE,5628
|
|
10
|
+
mcp_server/agents/developer.py,sha256=4LfQWnjLARv0K3RGU4lTEi_ITjykrOUPtjwPmZpGpKs,1829
|
|
11
|
+
mcp_server/agents/explorer.py,sha256=YX3MnI_futq812m4UR9RZ8ldV_YUKyCX9edMKI_nvBc,1452
|
|
12
|
+
mcp_server/agents/proposal.py,sha256=45ODWvQaOP8mueZAzdZ9IwLX1c1ayOZU4PRBkOkoXHI,1242
|
|
13
|
+
mcp_server/agents/qa.py,sha256=zEF5TQ-Ll7Dr3g8BOwijp1IvRBJRyJyCtsqITrqBqe0,1850
|
|
14
|
+
mcp_server/agents/task_agent.py,sha256=pLsgnu6CpXgM0Zw4t9imwzQvpx42sDi2RPOXDgwleuE,1151
|
|
15
|
+
devhive-0.1.6.dist-info/METADATA,sha256=8PM2B9c_m3YY4XrjbS6vBGmIFYjeVjJtlV5DsTa21f8,103
|
|
16
|
+
devhive-0.1.6.dist-info/WHEEL,sha256=aeYiig01lYGDzBgS8HxWXOg3uV61G9ijOsup-k9o1sk,91
|
|
17
|
+
devhive-0.1.6.dist-info/entry_points.txt,sha256=6psqoP5HWwon8SMKeI2Pamsl_jLutNFxUmoV-AtXeyM,85
|
|
18
|
+
devhive-0.1.6.dist-info/top_level.txt,sha256=R49ZBwHkJvJ-AJ8o9wy6MuCs0rvZpyCEuvdwhI4PpHs,11
|
|
19
|
+
devhive-0.1.6.dist-info/RECORD,,
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
mcp_server
|
mcp_server/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
# MCP Server Package
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
from mcp_server.agents.base_agent import BaseAgent
|
|
2
|
+
from mcp_server.agents.explorer import ExplorerAgent
|
|
3
|
+
from mcp_server.agents.proposal import ProposalAgent
|
|
4
|
+
from mcp_server.agents.architect import ArchitectAgent
|
|
5
|
+
from mcp_server.agents.task_agent import TaskAgent
|
|
6
|
+
from mcp_server.agents.developer import DeveloperAgent
|
|
7
|
+
from mcp_server.agents.qa import QAAgent
|
|
8
|
+
from mcp_server.agents.auditor import AuditorAgent
|
|
9
|
+
from mcp_server.agents.archivist import ArchivistAgent
|
|
10
|
+
from mcp_server.agents.ceo import CEOAgent
|
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
import json
|
|
2
|
+
from typing import Dict, Any
|
|
3
|
+
from mcp.server.fastmcp import Context
|
|
4
|
+
from mcp_server.agents.base_agent import BaseAgent
|
|
5
|
+
|
|
6
|
+
class ArchitectAgent(BaseAgent):
|
|
7
|
+
role = "Architect"
|
|
8
|
+
|
|
9
|
+
async def execute(self, ctx: Context, **kwargs) -> str:
|
|
10
|
+
context = self.get_context()
|
|
11
|
+
sys_prompt = "You are the Tech Lead. Output JSON only."
|
|
12
|
+
user_prompt = f"""Design architecture.
|
|
13
|
+
Context: {json.dumps(context, default=str)}
|
|
14
|
+
Return JSON with keys: architecture_pattern, components, data_models, apis."""
|
|
15
|
+
|
|
16
|
+
resp = await self._call_llm(ctx, sys_prompt, user_prompt)
|
|
17
|
+
data = self._parse_json(resp)
|
|
18
|
+
return self.save_artifact("architecture", data)
|
|
19
|
+
|
|
20
|
+
def generate_summary(self, data: Dict[str, Any]) -> str:
|
|
21
|
+
"""Generate executive summary for Architect agent."""
|
|
22
|
+
components_count = len(data.get("components", []))
|
|
23
|
+
apis_count = len(data.get("apis", []))
|
|
24
|
+
pattern = data.get("architecture_pattern", "N/A")
|
|
25
|
+
return (
|
|
26
|
+
f"Designed architecture using {pattern} pattern "
|
|
27
|
+
f"with {components_count} components and {apis_count} APIs."
|
|
28
|
+
)
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
import json
|
|
2
|
+
from typing import Dict, Any
|
|
3
|
+
from mcp.server.fastmcp import Context
|
|
4
|
+
from mcp_server.agents.base_agent import BaseAgent
|
|
5
|
+
|
|
6
|
+
class ArchivistAgent(BaseAgent):
|
|
7
|
+
role = "Archivist"
|
|
8
|
+
async def execute(self, ctx: Context, **kwargs) -> str:
|
|
9
|
+
state = self.state_manager.get_state()
|
|
10
|
+
state["status"] = "completed"
|
|
11
|
+
if "artifacts" not in state: state["artifacts"] = {}
|
|
12
|
+
state["artifacts"]["archive"] = "archived"
|
|
13
|
+
self.state_manager.update_state(state)
|
|
14
|
+
return "Project Archived Successfully"
|
|
15
|
+
|
|
16
|
+
def generate_summary(self, data: Dict[str, Any]) -> str:
|
|
17
|
+
"""Generate executive summary for Archivist agent."""
|
|
18
|
+
# Archivist doesn't use structured data, just returns success message
|
|
19
|
+
return "Project archived successfully. All artifacts and state preserved."
|
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
import json
|
|
2
|
+
from typing import Dict, Any
|
|
3
|
+
from mcp.server.fastmcp import Context
|
|
4
|
+
from mcp_server.agents.base_agent import BaseAgent
|
|
5
|
+
|
|
6
|
+
class AuditorAgent(BaseAgent):
|
|
7
|
+
role = "Auditor"
|
|
8
|
+
async def execute(self, ctx: Context, **kwargs) -> str:
|
|
9
|
+
context = self.get_context()
|
|
10
|
+
sys_prompt = "You are the Auditor. Output JSON only."
|
|
11
|
+
user_prompt = f"""Verify the project.
|
|
12
|
+
Context: {json.dumps(context, default=str)}
|
|
13
|
+
Return JSON with keys: architecture_consistency (bool), missing_pieces (list), security_risks (list)."""
|
|
14
|
+
|
|
15
|
+
resp = await self._call_llm(ctx, sys_prompt, user_prompt)
|
|
16
|
+
data = self._parse_json(resp)
|
|
17
|
+
return self.save_artifact("verification", data)
|
|
18
|
+
|
|
19
|
+
def generate_summary(self, data: Dict[str, Any]) -> str:
|
|
20
|
+
"""Generate executive summary for Auditor agent."""
|
|
21
|
+
consistent = data.get("architecture_consistency", False)
|
|
22
|
+
missing_count = len(data.get("missing_pieces", []))
|
|
23
|
+
security_count = len(data.get("security_risks", []))
|
|
24
|
+
status = "✓ Passed" if consistent and missing_count == 0 else "⚠ Issues Found"
|
|
25
|
+
return (
|
|
26
|
+
f"{status}. Missing pieces: {missing_count}, "
|
|
27
|
+
f"Security risks: {security_count}"
|
|
28
|
+
)
|
|
@@ -0,0 +1,104 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import logging
|
|
3
|
+
from typing import Dict, Any, Optional
|
|
4
|
+
from mcp.types import SamplingMessage, TextContent
|
|
5
|
+
from mcp.server.fastmcp import Context
|
|
6
|
+
from mcp_server.core.project_state_manager import ProjectStateManager
|
|
7
|
+
from mcp_server.core.artifact_manager import ArtifactManager
|
|
8
|
+
from mcp_server.core.context_router import ContextRouter
|
|
9
|
+
from mcp_server.core.llm import LLM
|
|
10
|
+
|
|
11
|
+
logger = logging.getLogger(__name__)
|
|
12
|
+
|
|
13
|
+
class BaseAgent:
|
|
14
|
+
role: str = "Base"
|
|
15
|
+
|
|
16
|
+
def __init__(self, project_name: str):
|
|
17
|
+
self.project_name = project_name
|
|
18
|
+
self.state_manager = ProjectStateManager(project_name)
|
|
19
|
+
self.artifact_manager = ArtifactManager(project_name)
|
|
20
|
+
self.context_router = ContextRouter(self.state_manager, self.artifact_manager)
|
|
21
|
+
|
|
22
|
+
def get_context(self) -> Dict[str, Any]:
|
|
23
|
+
return self.context_router.get_context(self.role)
|
|
24
|
+
|
|
25
|
+
def save_artifact(self, step_name: str, content: Dict[str, Any]) -> str:
|
|
26
|
+
aid = self.artifact_manager.save_artifact(step_name, content)
|
|
27
|
+
self.state_manager.update_artifact(step_name, aid)
|
|
28
|
+
return aid
|
|
29
|
+
|
|
30
|
+
def generate_summary(self, data: Dict[str, Any]) -> str:
|
|
31
|
+
"""
|
|
32
|
+
Generate a 1-3 sentence executive summary of the agent's work.
|
|
33
|
+
|
|
34
|
+
This default implementation provides a basic summary. Subclasses should
|
|
35
|
+
override this method to provide more specific summaries based on their
|
|
36
|
+
role and the data they produce.
|
|
37
|
+
|
|
38
|
+
Args:
|
|
39
|
+
data: The artifact data produced by the agent
|
|
40
|
+
|
|
41
|
+
Returns:
|
|
42
|
+
A concise 1-3 sentence summary suitable for orchestrator context
|
|
43
|
+
"""
|
|
44
|
+
return f"{self.role} completed successfully."
|
|
45
|
+
|
|
46
|
+
async def _call_llm(self, ctx: Context, system_prompt: str, user_prompt: str, max_tokens: int = 2000)-> str:
|
|
47
|
+
decision = await LLM.generate_json(
|
|
48
|
+
ctx,
|
|
49
|
+
system_prompt,
|
|
50
|
+
user_prompt
|
|
51
|
+
)
|
|
52
|
+
return json.dumps(decision)
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
async def _call_llm_old(self, ctx: Context, system_prompt: str, user_prompt: str, max_tokens: int = 2000) -> str:
|
|
56
|
+
"""Helper to call LLM via Sampling."""
|
|
57
|
+
try:
|
|
58
|
+
messages = [
|
|
59
|
+
{"role": "user", "content": { "type": "text", "text": f"{system_prompt}\n\n{user_prompt}" }}
|
|
60
|
+
]
|
|
61
|
+
|
|
62
|
+
# Using create_message which is standard
|
|
63
|
+
if hasattr(ctx.session, 'create_message'):
|
|
64
|
+
try:
|
|
65
|
+
result = await ctx.session.create_message(
|
|
66
|
+
messages=messages,
|
|
67
|
+
max_tokens=max_tokens,
|
|
68
|
+
system_prompt=system_prompt
|
|
69
|
+
)
|
|
70
|
+
except:
|
|
71
|
+
# Retry with just user prompt if system prompt arg fails
|
|
72
|
+
result = await ctx.session.create_message(
|
|
73
|
+
messages=messages,
|
|
74
|
+
max_tokens=max_tokens
|
|
75
|
+
)
|
|
76
|
+
|
|
77
|
+
if hasattr(result, 'content') and hasattr(result.content, 'text'):
|
|
78
|
+
return result.content.text
|
|
79
|
+
return str(result)
|
|
80
|
+
|
|
81
|
+
# Fallback
|
|
82
|
+
if hasattr(ctx.session, 'sample'):
|
|
83
|
+
return str(await ctx.session.sample(messages=messages, max_tokens=max_tokens))
|
|
84
|
+
|
|
85
|
+
return '{"error": "No sampling capability found on session"}'
|
|
86
|
+
|
|
87
|
+
except Exception as e:
|
|
88
|
+
logger.error(f"LLM Call failed: {e}")
|
|
89
|
+
return json.dumps({"error": str(e)})
|
|
90
|
+
|
|
91
|
+
def _parse_json(self, text: str) -> Dict[str, Any]:
|
|
92
|
+
""" Robust JSON parser """
|
|
93
|
+
text = text.strip()
|
|
94
|
+
if "```json" in text:
|
|
95
|
+
text = text.split("```json")[1].split("```")[0]
|
|
96
|
+
elif "```" in text:
|
|
97
|
+
text = text.split("```")[1].split("```")[0]
|
|
98
|
+
try:
|
|
99
|
+
return json.loads(text)
|
|
100
|
+
except:
|
|
101
|
+
return {"raw_text": text}
|
|
102
|
+
|
|
103
|
+
async def execute(self, ctx: Context, **kwargs) -> Any:
|
|
104
|
+
raise NotImplementedError
|
mcp_server/agents/ceo.py
ADDED
|
@@ -0,0 +1,171 @@
|
|
|
1
|
+
import json
|
|
2
|
+
from typing import Dict, Any
|
|
3
|
+
from mcp.server.fastmcp import Context
|
|
4
|
+
from mcp_server.agents.base_agent import BaseAgent
|
|
5
|
+
from mcp_server.core.project_state_manager import ProjectStateManager
|
|
6
|
+
|
|
7
|
+
class CEOAgent(BaseAgent):
|
|
8
|
+
role = "CEO"
|
|
9
|
+
|
|
10
|
+
def get_next_agent_deterministic(self) -> Dict[str, Any]:
|
|
11
|
+
"""
|
|
12
|
+
Determine next agent to run based on current state (no LLM needed).
|
|
13
|
+
This is a rule-based decision making process following the strict pipeline sequence.
|
|
14
|
+
|
|
15
|
+
Pipeline order:
|
|
16
|
+
1. Explorer
|
|
17
|
+
2. Proposal
|
|
18
|
+
3. Architect
|
|
19
|
+
4. TaskPlanner
|
|
20
|
+
5. Developer
|
|
21
|
+
6. QA
|
|
22
|
+
7. Auditor
|
|
23
|
+
8. Archivist
|
|
24
|
+
|
|
25
|
+
Returns:
|
|
26
|
+
Dict with 'agent' (role name) and 'reason' (explanation)
|
|
27
|
+
"""
|
|
28
|
+
state = self.state_manager.get_state()
|
|
29
|
+
artifacts = state.get("artifacts", {})
|
|
30
|
+
|
|
31
|
+
# Check each stage in order
|
|
32
|
+
if artifacts.get("exploration") is None:
|
|
33
|
+
return {
|
|
34
|
+
"agent": "Explorer",
|
|
35
|
+
"reason": "Need initial feature analysis and exploration"
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
# Determine complexity from exploration artifact
|
|
39
|
+
complexity = "high" # Default to high safety
|
|
40
|
+
try:
|
|
41
|
+
expl_id = artifacts.get("exploration")
|
|
42
|
+
if expl_id:
|
|
43
|
+
expl_data = self.artifact_manager.load_artifact(expl_id)
|
|
44
|
+
complexity = expl_data.get("complexity", "high").lower()
|
|
45
|
+
except Exception:
|
|
46
|
+
pass # Fallback to high complexity on error
|
|
47
|
+
|
|
48
|
+
# Dynamic Routing based on Complexity
|
|
49
|
+
# Low: Explorer -> Developer -> QA -> Archivist
|
|
50
|
+
# Medium: Explorer -> Proposal -> Developer -> QA -> Archivist
|
|
51
|
+
# High: Full Suite
|
|
52
|
+
|
|
53
|
+
if artifacts.get("proposal") is None:
|
|
54
|
+
if complexity == "low":
|
|
55
|
+
pass # Skip Proposal
|
|
56
|
+
else:
|
|
57
|
+
return {
|
|
58
|
+
"agent": "Proposal",
|
|
59
|
+
"reason": "Exploration complete, need feature proposal"
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
if artifacts.get("architecture") is None:
|
|
63
|
+
if complexity in ["low", "medium"]:
|
|
64
|
+
pass # Skip Architecture
|
|
65
|
+
else:
|
|
66
|
+
return {
|
|
67
|
+
"agent": "Architect",
|
|
68
|
+
"reason": "Proposal complete, need technical architecture design"
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
if artifacts.get("tasks") is None:
|
|
72
|
+
if complexity in ["low", "medium"]:
|
|
73
|
+
pass # Skip Task Planning
|
|
74
|
+
else:
|
|
75
|
+
return {
|
|
76
|
+
"agent": "TaskPlanner",
|
|
77
|
+
"reason": "Architecture complete, need task breakdown"
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
if artifacts.get("implementation") is None:
|
|
81
|
+
return {
|
|
82
|
+
"agent": "Developer",
|
|
83
|
+
"reason": "Ready for implementation"
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
if artifacts.get("tests") is None:
|
|
87
|
+
return {
|
|
88
|
+
"agent": "QA",
|
|
89
|
+
"reason": "Implementation complete, need tests"
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
if artifacts.get("verification") is None:
|
|
93
|
+
if complexity in ["low", "medium"]:
|
|
94
|
+
pass # Skip Auditor
|
|
95
|
+
else:
|
|
96
|
+
return {
|
|
97
|
+
"agent": "Auditor",
|
|
98
|
+
"reason": "Tests complete, need final verification"
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
if artifacts.get("archive") is None:
|
|
102
|
+
return {
|
|
103
|
+
"agent": "Archivist",
|
|
104
|
+
"reason": "All stages complete, ready to archive project"
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
# All done
|
|
108
|
+
return {
|
|
109
|
+
"agent": "Complete",
|
|
110
|
+
"reason": "All pipeline stages finished, project is complete"
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
async def execute(self, ctx: Context, **kwargs) -> Dict[str, Any]:
|
|
114
|
+
"""
|
|
115
|
+
Original LLM-based decision making (requires sampling support).
|
|
116
|
+
Currently not used in manual workflow.
|
|
117
|
+
"""
|
|
118
|
+
state_manager = ProjectStateManager(self.project_name)
|
|
119
|
+
state = state_manager.get_state()
|
|
120
|
+
|
|
121
|
+
context = {
|
|
122
|
+
"project_status": state.get('status'),
|
|
123
|
+
"artifacts_summary": state.get("artifacts", {})
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
sys_prompt = """
|
|
127
|
+
You are the CEO orchestrating an AI software development pipeline.
|
|
128
|
+
|
|
129
|
+
The pipeline is strictly sequential:
|
|
130
|
+
|
|
131
|
+
1 Explorer
|
|
132
|
+
2 Proposal
|
|
133
|
+
3 Architect
|
|
134
|
+
4 TaskPlanner
|
|
135
|
+
5 Developer
|
|
136
|
+
6 QA
|
|
137
|
+
7 Auditor
|
|
138
|
+
8 Archivist
|
|
139
|
+
|
|
140
|
+
Rules:
|
|
141
|
+
|
|
142
|
+
- NEVER return Wait unless the pipeline is finished
|
|
143
|
+
- ALWAYS select the next missing role
|
|
144
|
+
- Each role produces an artifact
|
|
145
|
+
|
|
146
|
+
Explorer → exploration
|
|
147
|
+
Proposal → proposal
|
|
148
|
+
Architect → architecture
|
|
149
|
+
TaskPlanner → tasks
|
|
150
|
+
Developer → implementation
|
|
151
|
+
QA → tests
|
|
152
|
+
Auditor → verification
|
|
153
|
+
Archivist → archived
|
|
154
|
+
|
|
155
|
+
Return JSON only:
|
|
156
|
+
{ "decision": "Run <Role>", "reason": "..." }
|
|
157
|
+
"""
|
|
158
|
+
user_prompt = f"""Decide next step.
|
|
159
|
+
|
|
160
|
+
Project: {context.get('project_name')}
|
|
161
|
+
Status: {context.get('project_status')}
|
|
162
|
+
Stage: {context.get('stage')}
|
|
163
|
+
Artifacts: {json.dumps(context.get('artifacts_summary', {}), indent=2)}
|
|
164
|
+
|
|
165
|
+
Available Roles: Explorer, Proposal, Architect, TaskPlanner, Developer, QA, Auditor, Archivist.
|
|
166
|
+
Return JSON: {{ "decision": "Run <Role>", "reason": "..." }}
|
|
167
|
+
If finished, "Run Archivist".
|
|
168
|
+
If stage is "initialization", "Run Explorer"
|
|
169
|
+
"""
|
|
170
|
+
resp = await self._call_llm(ctx, sys_prompt, user_prompt)
|
|
171
|
+
return self._parse_json(resp)
|
|
@@ -0,0 +1,44 @@
|
|
|
1
|
+
import json
|
|
2
|
+
from typing import Dict, Any
|
|
3
|
+
from mcp.server.fastmcp import Context
|
|
4
|
+
from mcp_server.agents.base_agent import BaseAgent
|
|
5
|
+
|
|
6
|
+
class DeveloperAgent(BaseAgent):
|
|
7
|
+
role = "Developer"
|
|
8
|
+
|
|
9
|
+
def write_files(self, data: Dict[str, Any]) -> list[str]:
|
|
10
|
+
"""Write implementation files to disk. Returns list of file paths."""
|
|
11
|
+
files = data.get("files", [])
|
|
12
|
+
from mcp_server.utils.filesystem import write_file
|
|
13
|
+
file_paths = []
|
|
14
|
+
for f in files:
|
|
15
|
+
if isinstance(f, dict) and "path" in f and "content" in f:
|
|
16
|
+
write_file(f["path"], f["content"])
|
|
17
|
+
file_paths.append(f["path"])
|
|
18
|
+
return file_paths
|
|
19
|
+
|
|
20
|
+
async def execute(self, ctx: Context, **kwargs) -> str:
|
|
21
|
+
context = self.get_context()
|
|
22
|
+
sys_prompt = "You are the Developer. Output JSON only."
|
|
23
|
+
user_prompt = f"""Implement the feature.
|
|
24
|
+
Context: {json.dumps(context, default=str)}
|
|
25
|
+
Return JSON with keys: implementation_strategy, file_structure, pseudocode, files (list of {{path, content}})."""
|
|
26
|
+
|
|
27
|
+
resp = await self._call_llm(ctx, sys_prompt, user_prompt, max_tokens=4000)
|
|
28
|
+
data = self._parse_json(resp)
|
|
29
|
+
|
|
30
|
+
# Write files (using extracted method)
|
|
31
|
+
file_paths = self.write_files(data)
|
|
32
|
+
self.state_manager.add_files(file_paths)
|
|
33
|
+
return self.save_artifact("implementation", data)
|
|
34
|
+
|
|
35
|
+
def generate_summary(self, data: Dict[str, Any]) -> str:
|
|
36
|
+
"""Generate executive summary for Developer agent."""
|
|
37
|
+
files_count = len(data.get("files", []))
|
|
38
|
+
strategy = data.get("implementation_strategy", "N/A")
|
|
39
|
+
# Truncate if too long
|
|
40
|
+
if len(strategy) > 80:
|
|
41
|
+
strategy = strategy[:80] + "..."
|
|
42
|
+
return (
|
|
43
|
+
f"Implemented {files_count} files using strategy: {strategy}"
|
|
44
|
+
)
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
import json
|
|
2
|
+
from typing import Dict, Any
|
|
3
|
+
from mcp.server.fastmcp import Context
|
|
4
|
+
from mcp_server.agents.base_agent import BaseAgent
|
|
5
|
+
|
|
6
|
+
class ExplorerAgent(BaseAgent):
|
|
7
|
+
role = "Explorer"
|
|
8
|
+
|
|
9
|
+
async def execute(self, ctx: Context, **kwargs) -> str:
|
|
10
|
+
requirements = kwargs.get("requirements", "")
|
|
11
|
+
context = self.get_context()
|
|
12
|
+
sys_prompt = """
|
|
13
|
+
You are the Analyst (Explorer). Remember always read AGENTS.md or GUIDELINES.md if exists.
|
|
14
|
+
|
|
15
|
+
Output JSON only.
|
|
16
|
+
"""
|
|
17
|
+
user_prompt = f"""Analyze: {requirements}
|
|
18
|
+
Context: {json.dumps(context, default=str)}
|
|
19
|
+
Return JSON with keys: user_needs, constraints, dependencies, risks."""
|
|
20
|
+
|
|
21
|
+
resp = await self._call_llm(ctx, sys_prompt, user_prompt)
|
|
22
|
+
data = self._parse_json(resp)
|
|
23
|
+
return self.save_artifact("exploration", data)
|
|
24
|
+
|
|
25
|
+
def generate_summary(self, data: Dict[str, Any]) -> str:
|
|
26
|
+
"""Generate executive summary for Explorer agent."""
|
|
27
|
+
constraints_count = len(data.get("constraints", []))
|
|
28
|
+
deps_count = len(data.get("dependencies", []))
|
|
29
|
+
user_needs = data.get("user_needs", "N/A")
|
|
30
|
+
# Truncate user_needs if too long
|
|
31
|
+
if len(user_needs) > 80:
|
|
32
|
+
user_needs = user_needs[:80] + "..."
|
|
33
|
+
return (
|
|
34
|
+
f"Analyzed requirements and identified {constraints_count} constraints "
|
|
35
|
+
f"and {deps_count} dependencies. Key user need: {user_needs}"
|
|
36
|
+
)
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
import json
|
|
2
|
+
from typing import Dict, Any
|
|
3
|
+
from mcp.server.fastmcp import Context
|
|
4
|
+
from mcp_server.agents.base_agent import BaseAgent
|
|
5
|
+
|
|
6
|
+
class ProposalAgent(BaseAgent):
|
|
7
|
+
role = "Proposal"
|
|
8
|
+
|
|
9
|
+
async def execute(self, ctx: Context, **kwargs) -> str:
|
|
10
|
+
context = self.get_context()
|
|
11
|
+
sys_prompt = "You are the Product Manager. Output JSON only."
|
|
12
|
+
user_prompt = f"""Create proposal based on exploration.
|
|
13
|
+
Context: {json.dumps(context, default=str)}
|
|
14
|
+
Return JSON with keys: feature_description, user_value, acceptance_criteria, scope."""
|
|
15
|
+
|
|
16
|
+
resp = await self._call_llm(ctx, sys_prompt, user_prompt)
|
|
17
|
+
data = self._parse_json(resp)
|
|
18
|
+
return self.save_artifact("proposal", data)
|
|
19
|
+
|
|
20
|
+
def generate_summary(self, data: Dict[str, Any]) -> str:
|
|
21
|
+
"""Generate executive summary for Proposal agent."""
|
|
22
|
+
criteria_count = len(data.get("acceptance_criteria", []))
|
|
23
|
+
feature_desc = data.get("feature_description", "N/A")
|
|
24
|
+
# Truncate if too long
|
|
25
|
+
if len(feature_desc) > 80:
|
|
26
|
+
feature_desc = feature_desc[:80] + "..."
|
|
27
|
+
return (
|
|
28
|
+
f"Created feature proposal with {criteria_count} acceptance criteria. "
|
|
29
|
+
f"Feature: {feature_desc}"
|
|
30
|
+
)
|
mcp_server/agents/qa.py
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
import json
|
|
2
|
+
from typing import Dict, Any
|
|
3
|
+
from mcp.server.fastmcp import Context
|
|
4
|
+
from mcp_server.agents.base_agent import BaseAgent
|
|
5
|
+
|
|
6
|
+
class QAAgent(BaseAgent):
|
|
7
|
+
role = "QA"
|
|
8
|
+
|
|
9
|
+
def write_test_files(self, data: Dict[str, Any]) -> list[str]:
|
|
10
|
+
"""Write test files to disk. Returns list of file paths."""
|
|
11
|
+
files = data.get("files", [])
|
|
12
|
+
from mcp_server.utils.filesystem import write_file
|
|
13
|
+
file_paths = []
|
|
14
|
+
for f in files:
|
|
15
|
+
if isinstance(f, dict) and "path" in f and "content" in f:
|
|
16
|
+
write_file(f["path"], f["content"])
|
|
17
|
+
file_paths.append(f["path"])
|
|
18
|
+
return file_paths
|
|
19
|
+
|
|
20
|
+
async def execute(self, ctx: Context, **kwargs) -> str:
|
|
21
|
+
context = self.get_context()
|
|
22
|
+
sys_prompt = "You are QA. Output JSON only."
|
|
23
|
+
user_prompt = f"""Generate tests.
|
|
24
|
+
Context: {json.dumps(context, default=str)}
|
|
25
|
+
Return JSON with keys: test_strategy, unit_tests, validation_plan, files (list of {{path, content}})."""
|
|
26
|
+
|
|
27
|
+
resp = await self._call_llm(ctx, sys_prompt, user_prompt)
|
|
28
|
+
data = self._parse_json(resp)
|
|
29
|
+
|
|
30
|
+
# Write test files (using extracted method)
|
|
31
|
+
file_paths = self.write_test_files(data)
|
|
32
|
+
self.state_manager.add_files(file_paths)
|
|
33
|
+
return self.save_artifact("tests", data)
|
|
34
|
+
|
|
35
|
+
def generate_summary(self, data: Dict[str, Any]) -> str:
|
|
36
|
+
"""Generate executive summary for QA agent."""
|
|
37
|
+
tests_count = len(data.get("unit_tests", []))
|
|
38
|
+
files_count = len(data.get("files", []))
|
|
39
|
+
strategy = data.get("test_strategy", "N/A")
|
|
40
|
+
# Truncate if too long
|
|
41
|
+
if len(strategy) > 60:
|
|
42
|
+
strategy = strategy[:60] + "..."
|
|
43
|
+
return (
|
|
44
|
+
f"Generated {tests_count} unit tests across {files_count} test files. "
|
|
45
|
+
f"Strategy: {strategy}"
|
|
46
|
+
)
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
import json
|
|
2
|
+
from typing import Dict, Any
|
|
3
|
+
from mcp.server.fastmcp import Context
|
|
4
|
+
from mcp_server.agents.base_agent import BaseAgent
|
|
5
|
+
|
|
6
|
+
class TaskAgent(BaseAgent):
|
|
7
|
+
role = "TaskPlanner"
|
|
8
|
+
async def execute(self, ctx: Context, **kwargs) -> str:
|
|
9
|
+
context = self.get_context()
|
|
10
|
+
sys_prompt = "You are the Scrum Master. Output JSON only."
|
|
11
|
+
user_prompt = f"""Break down tasks.
|
|
12
|
+
Context: {json.dumps(context, default=str)}
|
|
13
|
+
Return JSON with keys: epics, tasks (list of dicts), estimated_complexity."""
|
|
14
|
+
|
|
15
|
+
resp = await self._call_llm(ctx, sys_prompt, user_prompt)
|
|
16
|
+
data = self._parse_json(resp)
|
|
17
|
+
return self.save_artifact("tasks", data)
|
|
18
|
+
|
|
19
|
+
def generate_summary(self, data: Dict[str, Any]) -> str:
|
|
20
|
+
"""Generate executive summary for TaskPlanner agent."""
|
|
21
|
+
tasks_count = len(data.get("tasks", []))
|
|
22
|
+
complexity = data.get("estimated_complexity", "N/A")
|
|
23
|
+
epics_count = len(data.get("epics", [])) if isinstance(data.get("epics"), list) else 0
|
|
24
|
+
return (
|
|
25
|
+
f"Created {tasks_count} tasks across {epics_count} epics. "
|
|
26
|
+
f"Complexity: {complexity}"
|
|
27
|
+
)
|