flowly-code 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (86) hide show
  1. flowly_code/__init__.py +30 -0
  2. flowly_code/__main__.py +8 -0
  3. flowly_code/activity/__init__.py +1 -0
  4. flowly_code/activity/bus.py +91 -0
  5. flowly_code/activity/events.py +40 -0
  6. flowly_code/agent/__init__.py +8 -0
  7. flowly_code/agent/context.py +485 -0
  8. flowly_code/agent/loop.py +1349 -0
  9. flowly_code/agent/memory.py +109 -0
  10. flowly_code/agent/skills.py +259 -0
  11. flowly_code/agent/subagent.py +249 -0
  12. flowly_code/agent/tools/__init__.py +6 -0
  13. flowly_code/agent/tools/base.py +55 -0
  14. flowly_code/agent/tools/delegate.py +194 -0
  15. flowly_code/agent/tools/dispatch.py +840 -0
  16. flowly_code/agent/tools/docker.py +609 -0
  17. flowly_code/agent/tools/filesystem.py +280 -0
  18. flowly_code/agent/tools/mcp.py +85 -0
  19. flowly_code/agent/tools/message.py +235 -0
  20. flowly_code/agent/tools/registry.py +257 -0
  21. flowly_code/agent/tools/screenshot.py +444 -0
  22. flowly_code/agent/tools/shell.py +166 -0
  23. flowly_code/agent/tools/spawn.py +65 -0
  24. flowly_code/agent/tools/system.py +917 -0
  25. flowly_code/agent/tools/trello.py +420 -0
  26. flowly_code/agent/tools/web.py +139 -0
  27. flowly_code/agent/tools/x.py +399 -0
  28. flowly_code/bus/__init__.py +6 -0
  29. flowly_code/bus/events.py +37 -0
  30. flowly_code/bus/queue.py +81 -0
  31. flowly_code/channels/__init__.py +6 -0
  32. flowly_code/channels/base.py +121 -0
  33. flowly_code/channels/manager.py +135 -0
  34. flowly_code/channels/telegram.py +1132 -0
  35. flowly_code/cli/__init__.py +1 -0
  36. flowly_code/cli/commands.py +1831 -0
  37. flowly_code/cli/setup.py +1356 -0
  38. flowly_code/compaction/__init__.py +39 -0
  39. flowly_code/compaction/estimator.py +88 -0
  40. flowly_code/compaction/pruning.py +223 -0
  41. flowly_code/compaction/service.py +297 -0
  42. flowly_code/compaction/summarizer.py +384 -0
  43. flowly_code/compaction/types.py +71 -0
  44. flowly_code/config/__init__.py +6 -0
  45. flowly_code/config/loader.py +102 -0
  46. flowly_code/config/schema.py +324 -0
  47. flowly_code/exec/__init__.py +39 -0
  48. flowly_code/exec/approvals.py +288 -0
  49. flowly_code/exec/executor.py +184 -0
  50. flowly_code/exec/safety.py +247 -0
  51. flowly_code/exec/types.py +88 -0
  52. flowly_code/gateway/__init__.py +5 -0
  53. flowly_code/gateway/server.py +103 -0
  54. flowly_code/heartbeat/__init__.py +5 -0
  55. flowly_code/heartbeat/service.py +130 -0
  56. flowly_code/multiagent/README.md +248 -0
  57. flowly_code/multiagent/__init__.py +1 -0
  58. flowly_code/multiagent/invoke.py +210 -0
  59. flowly_code/multiagent/orchestrator.py +156 -0
  60. flowly_code/multiagent/router.py +156 -0
  61. flowly_code/multiagent/setup.py +171 -0
  62. flowly_code/pairing/__init__.py +21 -0
  63. flowly_code/pairing/store.py +343 -0
  64. flowly_code/providers/__init__.py +6 -0
  65. flowly_code/providers/base.py +69 -0
  66. flowly_code/providers/litellm_provider.py +178 -0
  67. flowly_code/providers/transcription.py +64 -0
  68. flowly_code/session/__init__.py +5 -0
  69. flowly_code/session/manager.py +249 -0
  70. flowly_code/skills/README.md +24 -0
  71. flowly_code/skills/compact/SKILL.md +27 -0
  72. flowly_code/skills/github/SKILL.md +48 -0
  73. flowly_code/skills/skill-creator/SKILL.md +371 -0
  74. flowly_code/skills/summarize/SKILL.md +67 -0
  75. flowly_code/skills/tmux/SKILL.md +121 -0
  76. flowly_code/skills/tmux/scripts/find-sessions.sh +112 -0
  77. flowly_code/skills/tmux/scripts/wait-for-text.sh +83 -0
  78. flowly_code/skills/weather/SKILL.md +49 -0
  79. flowly_code/utils/__init__.py +5 -0
  80. flowly_code/utils/helpers.py +91 -0
  81. flowly_code-1.0.0.dist-info/METADATA +724 -0
  82. flowly_code-1.0.0.dist-info/RECORD +86 -0
  83. flowly_code-1.0.0.dist-info/WHEEL +4 -0
  84. flowly_code-1.0.0.dist-info/entry_points.txt +2 -0
  85. flowly_code-1.0.0.dist-info/licenses/LICENSE +191 -0
  86. flowly_code-1.0.0.dist-info/licenses/NOTICE +74 -0
@@ -0,0 +1,130 @@
1
+ """Heartbeat service - periodic agent wake-up to check for tasks."""
2
+
3
+ import asyncio
4
+ from pathlib import Path
5
+ from typing import Any, Callable, Coroutine
6
+
7
+ from loguru import logger
8
+
9
+ # Default interval: 30 minutes
10
+ DEFAULT_HEARTBEAT_INTERVAL_S = 30 * 60
11
+
12
+ # The prompt sent to agent during heartbeat
13
+ HEARTBEAT_PROMPT = """Read HEARTBEAT.md in your workspace (if it exists).
14
+ Follow any instructions or tasks listed there.
15
+ If nothing needs attention, reply with just: HEARTBEAT_OK"""
16
+
17
+ # Token that indicates "nothing to do"
18
+ HEARTBEAT_OK_TOKEN = "HEARTBEAT_OK"
19
+
20
+
21
+ def _is_heartbeat_empty(content: str | None) -> bool:
22
+ """Check if HEARTBEAT.md has no actionable content."""
23
+ if not content:
24
+ return True
25
+
26
+ # Lines to skip: empty, headers, HTML comments, empty checkboxes
27
+ skip_patterns = {"- [ ]", "* [ ]", "- [x]", "* [x]"}
28
+
29
+ for line in content.split("\n"):
30
+ line = line.strip()
31
+ if not line or line.startswith("#") or line.startswith("<!--") or line in skip_patterns:
32
+ continue
33
+ return False # Found actionable content
34
+
35
+ return True
36
+
37
+
38
+ class HeartbeatService:
39
+ """
40
+ Periodic heartbeat service that wakes the agent to check for tasks.
41
+
42
+ The agent reads HEARTBEAT.md from the workspace and executes any
43
+ tasks listed there. If nothing needs attention, it replies HEARTBEAT_OK.
44
+ """
45
+
46
+ def __init__(
47
+ self,
48
+ workspace: Path,
49
+ on_heartbeat: Callable[[str], Coroutine[Any, Any, str]] | None = None,
50
+ interval_s: int = DEFAULT_HEARTBEAT_INTERVAL_S,
51
+ enabled: bool = True,
52
+ ):
53
+ self.workspace = workspace
54
+ self.on_heartbeat = on_heartbeat
55
+ self.interval_s = interval_s
56
+ self.enabled = enabled
57
+ self._running = False
58
+ self._task: asyncio.Task | None = None
59
+
60
+ @property
61
+ def heartbeat_file(self) -> Path:
62
+ return self.workspace / "HEARTBEAT.md"
63
+
64
+ def _read_heartbeat_file(self) -> str | None:
65
+ """Read HEARTBEAT.md content."""
66
+ if self.heartbeat_file.exists():
67
+ try:
68
+ return self.heartbeat_file.read_text()
69
+ except Exception:
70
+ return None
71
+ return None
72
+
73
+ async def start(self) -> None:
74
+ """Start the heartbeat service."""
75
+ if not self.enabled:
76
+ logger.info("Heartbeat disabled")
77
+ return
78
+
79
+ self._running = True
80
+ self._task = asyncio.create_task(self._run_loop())
81
+ logger.info(f"Heartbeat started (every {self.interval_s}s)")
82
+
83
+ def stop(self) -> None:
84
+ """Stop the heartbeat service."""
85
+ self._running = False
86
+ if self._task:
87
+ self._task.cancel()
88
+ self._task = None
89
+
90
+ async def _run_loop(self) -> None:
91
+ """Main heartbeat loop."""
92
+ while self._running:
93
+ try:
94
+ await asyncio.sleep(self.interval_s)
95
+ if self._running:
96
+ await self._tick()
97
+ except asyncio.CancelledError:
98
+ break
99
+ except Exception as e:
100
+ logger.error(f"Heartbeat error: {e}")
101
+
102
+ async def _tick(self) -> None:
103
+ """Execute a single heartbeat tick."""
104
+ content = self._read_heartbeat_file()
105
+
106
+ # Skip if HEARTBEAT.md is empty or doesn't exist
107
+ if _is_heartbeat_empty(content):
108
+ logger.debug("Heartbeat: no tasks (HEARTBEAT.md empty)")
109
+ return
110
+
111
+ logger.info("Heartbeat: checking for tasks...")
112
+
113
+ if self.on_heartbeat:
114
+ try:
115
+ response = await self.on_heartbeat(HEARTBEAT_PROMPT)
116
+
117
+ # Check if agent said "nothing to do"
118
+ if HEARTBEAT_OK_TOKEN in response.upper().replace("_", ""):
119
+ logger.info("Heartbeat: OK (no action needed)")
120
+ else:
121
+ logger.info(f"Heartbeat: completed task")
122
+
123
+ except Exception as e:
124
+ logger.error(f"Heartbeat execution failed: {e}")
125
+
126
+ async def trigger_now(self) -> str | None:
127
+ """Manually trigger a heartbeat."""
128
+ if self.on_heartbeat:
129
+ return await self.on_heartbeat(HEARTBEAT_PROMPT)
130
+ return None
@@ -0,0 +1,248 @@
1
+ # Multi-Agent Orchestration
2
+
3
+ Flowly's multi-agent system delegates tasks to external CLI-based AI agents (Claude Code, Codex, Gemini CLI, OpenCode, Droid) and orchestrates team collaboration through chain execution and fan-out patterns.
4
+
5
+ ## Architecture
6
+
7
+ ```
8
+ User message
9
+
10
+
11
+ ┌─────────────┐ @mention? ┌──────────────┐
12
+ │ AgentLoop │ ───────────────── │ AgentRouter │
13
+ │ (main LLM) │ │ (routing) │
14
+ └──────┬──────┘ └───────┬───────┘
15
+ │ │
16
+ │ delegate_to() │ route()
17
+ ▼ ▼
18
+ ┌──────────────┐ ┌───────────────────┐
19
+ │ DelegateTool │ │ TeamOrchestrator │
20
+ │ (fire&forget)│ │ (chain + fan-out) │
21
+ └──────┬───────┘ └─────────┬─────────┘
22
+ │ │
23
+ ▼ ▼
24
+ ┌──────────────────────────────────────────────┐
25
+ │ invoke_agent() │
26
+ │ Spawns CLI subprocess per provider: │
27
+ │ claude / codex / gemini / opencode / droid │
28
+ └──────────────────────────────────────────────┘
29
+ ```
30
+
31
+ ## Key Concepts
32
+
33
+ ### Agents vs AgentLoop
34
+
35
+ These are fundamentally different:
36
+
37
+ | | AgentLoop | Configured Agents |
38
+ |---|---|---|
39
+ | **What** | Main LLM processing engine | CLI subprocess instances |
40
+ | **Lifetime** | Long-lived, handles all messages | Short-lived, one task per invocation |
41
+ | **Provider** | Single LiteLLM provider | Each agent has its own provider/model |
42
+ | **Invocation** | Runs continuously in-process | Spawned as `claude -p "..."`, `codex exec "..."`, etc. |
43
+ | **Tools** | Full tool registry (web, file, exec, MCP, ...) | Only what the CLI tool provides |
44
+ | **Communication** | Message bus | `[@agent_id: message]` tag format in responses |
45
+
46
+ ### Execution Patterns
47
+
48
+ #### 1. Direct Delegation (fire-and-forget)
49
+
50
+ The main AgentLoop calls `delegate_to(agent_id, message)`. The subprocess runs in the background, and the result is delivered asynchronously via the message bus.
51
+
52
+ ```
53
+ User: "fix the login bug"
54
+ → AgentLoop calls delegate_to("coder", "fix the login bug")
55
+ → Returns immediately: "Task delegated to @coder..."
56
+ → Background: claude --model opus -p "fix the login bug"
57
+ → When done: result sent back through bus → AgentLoop summarizes
58
+ ```
59
+
60
+ #### 2. Team Chain (sequential handoff)
61
+
62
+ When routed via `@team`, the leader agent is invoked first. If its response contains a `[@teammate: message]` tag, the orchestrator detects it and invokes that teammate with the message. The chain continues until no teammate is mentioned or `MAX_CHAIN_DEPTH` (10) is reached.
63
+
64
+ ```
65
+ User: "@dev fix and review the auth module"
66
+ → Router: @dev → team leader "coder"
67
+ → Step 1: invoke coder → response contains [@reviewer: check this PR]
68
+ → Step 2: invoke reviewer with coder's message
69
+ → Step 3: reviewer responds (no mention) → chain ends
70
+ → Final: all step responses combined
71
+ ```
72
+
73
+ #### 3. Fan-out (parallel execution)
74
+
75
+ If an agent's response mentions multiple teammates (via tag format), all are invoked in parallel. The chain ends after fan-out.
76
+
77
+ ```
78
+ Step 1: invoke leader
79
+ → Response: [@coder: implement feature] [@tester: write tests]
80
+ Step 2: invoke coder AND tester in parallel (asyncio.gather)
81
+ → Both responses collected
82
+ → Chain ends
83
+ ```
84
+
85
+ ## Files
86
+
87
+ | File | Purpose |
88
+ |------|---------|
89
+ | `router.py` | `@mention` parsing, message routing, teammate validation |
90
+ | `orchestrator.py` | Team chain execution, fan-out, depth limiting |
91
+ | `invoke.py` | CLI subprocess spawning per provider (claude, codex, gemini, ...) |
92
+ | `setup.py` | Agent directory initialization, `AGENTS.md` / `CLAUDE.md` generation |
93
+
94
+ ## Module Details
95
+
96
+ ### router.py — AgentRouter
97
+
98
+ Parses `@agent_id` or `@team_id` prefixes from incoming messages and routes to the correct agent.
99
+
100
+ **Routing priority:**
101
+ 1. `@agent_id` → direct agent match
102
+ 2. `@team_id` → team leader agent
103
+ 3. `@agent_name` → case-insensitive name match
104
+ 4. `@team_name` → case-insensitive team name match
105
+ 5. No prefix → default agent (main AgentLoop handles it)
106
+
107
+ **Teammate mention extraction** from agent responses supports two formats:
108
+ - **Tag format** (preferred): `[@agent_id: message here]` — supports multiple mentions
109
+ - **Bare format** (fallback): `@agent_id` — first valid match only
110
+
111
+ ### orchestrator.py — TeamOrchestrator
112
+
113
+ Executes the chain/fan-out logic:
114
+
115
+ ```python
116
+ result = await orchestrator.execute(
117
+ message="fix the auth bug",
118
+ agent_id="coder",
119
+ team_context=TeamContext(team_id="dev", team=team_config),
120
+ agents=all_agents,
121
+ workspace=workspace_path,
122
+ )
123
+ # result.steps = [ChainStep("coder", "..."), ChainStep("reviewer", "...")]
124
+ # result.final_response = combined output
125
+ ```
126
+
127
+ **Chain rules:**
128
+ - Max depth: 10 steps (configurable via `MAX_CHAIN_DEPTH`)
129
+ - Single mention → sequential handoff (next agent gets `[Message from teammate @prev]:`)
130
+ - Multiple mentions → parallel fan-out (all invoked via `asyncio.gather`)
131
+ - Fan-out terminates the chain (no further handoffs)
132
+ - Errors are caught per-agent and included in results
133
+
134
+ ### invoke.py — CLI Subprocess Invocation
135
+
136
+ Each provider maps to a specific CLI tool:
137
+
138
+ | Provider | CLI Command | Key Flags |
139
+ |----------|-------------|-----------|
140
+ | `anthropic` | `claude` | `--dangerously-skip-permissions`, `--model`, `-c` (continue), `--append-system-prompt`, `--add-dir` |
141
+ | `openai` | `codex exec` | `--skip-git-repo-check`, `--dangerously-bypass-approvals-and-sandbox`, `--json` |
142
+ | `gemini` | `gemini` | `--model`, `-p` |
143
+ | `opencode` | `opencode run` | `--model` |
144
+ | `droid` | `droid exec` | `--auto high`, `--model` |
145
+
146
+ **Model resolution:** Short names are resolved to full IDs:
147
+ - `sonnet` → `claude-sonnet-4-5`
148
+ - `opus` → `claude-opus-4-6`
149
+ - `haiku` → `claude-haiku-4-5`
150
+ - `gpt-5.3-codex` → `gpt-5.3-codex`
151
+
152
+ **Timeout:** 1800 seconds (30 minutes) per invocation.
153
+
154
+ **Codex output:** Parsed from JSONL format — extracts the final `agent_message` from `item.completed` events.
155
+
156
+ ### setup.py — Agent Directory Setup
157
+
158
+ Creates `~/.flowly/workspace/agents/{agent_id}/` with:
159
+
160
+ ```
161
+ agents/{agent_id}/
162
+ ├── AGENTS.md # Team communication instructions
163
+ └── .claude/
164
+ └── CLAUDE.md # Teammate list for Claude Code
165
+ ```
166
+
167
+ **AGENTS.md** contains:
168
+ - Instructions explaining the `[@agent_id: message]` tag system
169
+ - List of teammates with their IDs and models
170
+ - Rules for single/parallel/chain communication
171
+
172
+ Teammate lists are updated between `<!-- TEAMMATES_START -->` / `<!-- TEAMMATES_END -->` markers, so manual edits outside those markers are preserved.
173
+
174
+ ## Configuration
175
+
176
+ ```json
177
+ {
178
+ "agents": {
179
+ "defaults": {
180
+ "model": "anthropic/claude-sonnet-4-5",
181
+ "workspace": "~/.flowly/workspace"
182
+ },
183
+ "agents": {
184
+ "coder": {
185
+ "name": "Code Assistant",
186
+ "provider": "anthropic",
187
+ "model": "opus",
188
+ "persona": ""
189
+ },
190
+ "reviewer": {
191
+ "name": "Code Reviewer",
192
+ "provider": "openai",
193
+ "model": "gpt-5.3-codex",
194
+ "workingDirectory": "~/projects"
195
+ }
196
+ },
197
+ "teams": {
198
+ "dev": {
199
+ "name": "Development Team",
200
+ "agents": ["coder", "reviewer"],
201
+ "leader_agent": "coder"
202
+ }
203
+ }
204
+ }
205
+ }
206
+ ```
207
+
208
+ ### Agent Fields
209
+
210
+ | Field | Description | Default |
211
+ |-------|-------------|---------|
212
+ | `name` | Display name | agent ID |
213
+ | `provider` | `anthropic`, `openai`, `gemini`, `opencode`, `droid` | `anthropic` |
214
+ | `model` | Short name or full model ID | — |
215
+ | `working_directory` | Where the CLI runs | `$HOME` |
216
+ | `persona` | Agent persona | — |
217
+
218
+ ### Team Fields
219
+
220
+ | Field | Description |
221
+ |-------|-------------|
222
+ | `name` | Team display name |
223
+ | `agents` | List of agent IDs in the team |
224
+ | `leader_agent` | Agent that receives `@team` messages first |
225
+
226
+ ## Integration with AgentLoop
227
+
228
+ The multi-agent system connects to the main AgentLoop through `DelegateTool`:
229
+
230
+ ```python
231
+ # In cli/commands.py gateway() setup:
232
+ from flowly_code.multiagent.router import AgentRouter
233
+ from flowly_code.multiagent.orchestrator import TeamOrchestrator
234
+ from flowly_code.agent.tools.delegate import DelegateTool
235
+
236
+ router = AgentRouter(agents, teams)
237
+ orchestrator = TeamOrchestrator(router)
238
+
239
+ # Setup agent directories with AGENTS.md
240
+ for agent_id, cfg in agents.items():
241
+ ensure_agent_directory(workspace / agent_id, agent_id, agents, teams)
242
+
243
+ # Register delegate_to tool on main agent
244
+ delegate_tool = DelegateTool(agents, teams, workspace, bus)
245
+ agent.tools.register(delegate_tool)
246
+ ```
247
+
248
+ When a `[DELEGATE_RESULT:agent_id]` message comes back through the bus, the routing layer temporarily removes `delegate_to` from the tool registry to prevent infinite re-delegation loops.
@@ -0,0 +1 @@
1
+ """Multi-agent orchestration for flowly."""
@@ -0,0 +1,210 @@
1
+ """Agent invocation via CLI subprocess delegation."""
2
+
3
+ import asyncio
4
+ import json
5
+ from pathlib import Path
6
+
7
+ from loguru import logger
8
+
9
+ from flowly_code.config.schema import MultiAgentConfig
10
+
11
+
12
+ # CLI install hints for error messages
13
+ INSTALL_HINTS = {
14
+ "claude": "npm install -g @anthropic-ai/claude-code",
15
+ "codex": "npm install -g @openai/codex",
16
+ "gemini": "npm install -g @anthropic-ai/gemini-cli",
17
+ "opencode": "brew install opencode OR go install github.com/opencode-ai/opencode@latest",
18
+ "droid": "npm install -g @anthropic-ai/factory",
19
+ }
20
+
21
+ # Short name → full model ID mappings
22
+ CLAUDE_MODELS = {
23
+ "sonnet": "claude-sonnet-4-5",
24
+ "opus": "claude-opus-4-6",
25
+ "haiku": "claude-haiku-4-5",
26
+ }
27
+
28
+ CODEX_MODELS = {
29
+ "gpt-5.3-codex": "gpt-5.3-codex",
30
+ "gpt-5.2": "gpt-5.2",
31
+ }
32
+
33
+
34
+ def resolve_claude_model(short_name: str) -> str:
35
+ """Resolve short model name to full Claude model ID."""
36
+ return CLAUDE_MODELS.get(short_name, short_name)
37
+
38
+
39
+ def resolve_codex_model(short_name: str) -> str:
40
+ """Resolve short model name to full Codex model ID."""
41
+ return CODEX_MODELS.get(short_name, short_name)
42
+
43
+
44
+ def parse_codex_jsonl(output: str) -> str:
45
+ """Parse Codex JSONL output and extract the final agent_message."""
46
+ response = ""
47
+ for line in output.strip().split("\n"):
48
+ try:
49
+ data = json.loads(line)
50
+ if data.get("type") == "item.completed" and data.get("item", {}).get("type") == "agent_message":
51
+ response = data["item"].get("text", "")
52
+ except (json.JSONDecodeError, KeyError):
53
+ continue
54
+ return response or "Sorry, I could not generate a response."
55
+
56
+
57
+ def _build_system_context(agent_id: str, workspace_path: Path) -> str:
58
+ """Build system prompt context from agent's AGENTS.md file.
59
+
60
+ Reads the AGENTS.md from the agent directory and returns it as
61
+ system prompt context for the subprocess.
62
+ """
63
+ agents_md = workspace_path / agent_id / "AGENTS.md"
64
+ if agents_md.exists():
65
+ return agents_md.read_text()
66
+ return ""
67
+
68
+
69
+ async def invoke_agent(
70
+ agent: MultiAgentConfig,
71
+ agent_id: str,
72
+ message: str,
73
+ workspace_path: Path,
74
+ continue_conversation: bool = True,
75
+ timeout: int = 1800,
76
+ ) -> str:
77
+ """Invoke an agent via CLI subprocess.
78
+
79
+ Args:
80
+ agent: Agent configuration.
81
+ agent_id: Agent identifier.
82
+ message: Message to send to the agent.
83
+ workspace_path: Base path for agent working directories.
84
+ continue_conversation: Whether to continue previous conversation.
85
+ timeout: Subprocess timeout in seconds.
86
+
87
+ Returns:
88
+ Agent response text.
89
+ """
90
+ provider = agent.provider or "anthropic"
91
+
92
+ # Working directory: explicit config path, or user's home directory.
93
+ # The agent dir (~/.flowly/workspace/agents/{id}/) only holds AGENTS.md
94
+ # and .claude/CLAUDE.md — the agent should work in the user's project.
95
+ if agent.working_directory:
96
+ working_dir = str(Path(agent.working_directory).expanduser())
97
+ else:
98
+ working_dir = str(Path.home())
99
+
100
+ # Ensure working directory exists
101
+ Path(working_dir).mkdir(parents=True, exist_ok=True)
102
+
103
+ # Agent config directory (holds AGENTS.md and .claude/CLAUDE.md)
104
+ agent_dir = str(workspace_path / agent_id)
105
+
106
+ if provider == "anthropic":
107
+ args = ["claude", "--dangerously-skip-permissions"]
108
+ model_id = resolve_claude_model(agent.model) if agent.model else None
109
+ if model_id:
110
+ args.extend(["--model", model_id])
111
+ if continue_conversation:
112
+ args.append("-c")
113
+
114
+ # Inject teammate context via --append-system-prompt
115
+ system_context = _build_system_context(agent_id, workspace_path)
116
+ if system_context:
117
+ args.extend(["--append-system-prompt", system_context])
118
+
119
+ # Add agent dir so Claude Code can read .claude/CLAUDE.md from there
120
+ args.extend(["--add-dir", agent_dir])
121
+
122
+ args.extend(["-p", message])
123
+
124
+ elif provider == "openai":
125
+ args = ["codex", "exec"]
126
+ if continue_conversation:
127
+ args.extend(["resume", "--last"])
128
+ model_id = resolve_codex_model(agent.model) if agent.model else None
129
+ if model_id:
130
+ args.extend(["--model", model_id])
131
+ args.extend([
132
+ "--skip-git-repo-check",
133
+ "--dangerously-bypass-approvals-and-sandbox",
134
+ "--json",
135
+ message,
136
+ ])
137
+
138
+ elif provider == "gemini":
139
+ args = ["gemini"]
140
+ if agent.model:
141
+ args.extend(["--model", agent.model])
142
+ args.extend(["-p", message])
143
+
144
+ elif provider == "opencode":
145
+ args = ["opencode", "run"]
146
+ if agent.model:
147
+ args.extend(["--model", agent.model])
148
+ args.append(message)
149
+
150
+ elif provider == "droid":
151
+ args = ["droid", "exec", "--auto", "high"]
152
+ if agent.model:
153
+ args.extend(["--model", agent.model])
154
+ args.append(message)
155
+
156
+ else:
157
+ raise ValueError(f"Unsupported provider: {provider}")
158
+
159
+ logger.info(f"Invoking agent @{agent_id} [{provider}/{agent.model}] in {working_dir}")
160
+ return await run_subprocess(args, cwd=working_dir, timeout=timeout, provider=provider)
161
+
162
+
163
+ async def run_subprocess(
164
+ args: list[str],
165
+ cwd: str,
166
+ timeout: int = 1800,
167
+ provider: str = "anthropic",
168
+ ) -> str:
169
+ """Run a CLI command as subprocess and return stdout.
170
+
171
+ Args:
172
+ args: Command and arguments.
173
+ cwd: Working directory.
174
+ timeout: Timeout in seconds.
175
+ provider: Provider name for output parsing.
176
+
177
+ Returns:
178
+ Command output (parsed for codex).
179
+
180
+ Raises:
181
+ RuntimeError: If command fails or times out.
182
+ """
183
+ try:
184
+ proc = await asyncio.create_subprocess_exec(
185
+ *args,
186
+ cwd=cwd,
187
+ stdout=asyncio.subprocess.PIPE,
188
+ stderr=asyncio.subprocess.PIPE,
189
+ )
190
+ except FileNotFoundError:
191
+ cmd = args[0]
192
+ hint = INSTALL_HINTS.get(cmd, f"install '{cmd}' and make sure it's in your PATH")
193
+ raise RuntimeError(f"Command '{cmd}' not found. Install it first: {hint}")
194
+
195
+ try:
196
+ stdout, stderr = await asyncio.wait_for(proc.communicate(), timeout=timeout)
197
+ except asyncio.TimeoutError:
198
+ proc.kill()
199
+ raise RuntimeError(f"Agent subprocess timed out after {timeout}s")
200
+
201
+ if proc.returncode != 0:
202
+ error_msg = stderr.decode().strip() or f"Process exited with code {proc.returncode}"
203
+ raise RuntimeError(f"Agent process failed: {error_msg}")
204
+
205
+ output = stdout.decode()
206
+
207
+ if provider == "openai":
208
+ return parse_codex_jsonl(output)
209
+
210
+ return output.strip()