devrel-origin 0.2.14__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- devrel_origin/__init__.py +15 -0
- devrel_origin/cli/__init__.py +92 -0
- devrel_origin/cli/_common.py +243 -0
- devrel_origin/cli/analytics.py +28 -0
- devrel_origin/cli/argus.py +497 -0
- devrel_origin/cli/auth.py +227 -0
- devrel_origin/cli/config.py +108 -0
- devrel_origin/cli/content.py +259 -0
- devrel_origin/cli/cost.py +108 -0
- devrel_origin/cli/cro.py +298 -0
- devrel_origin/cli/deliverables.py +65 -0
- devrel_origin/cli/docs.py +91 -0
- devrel_origin/cli/doctor.py +178 -0
- devrel_origin/cli/experiment.py +29 -0
- devrel_origin/cli/growth.py +97 -0
- devrel_origin/cli/init.py +472 -0
- devrel_origin/cli/intel.py +27 -0
- devrel_origin/cli/kb.py +96 -0
- devrel_origin/cli/listen.py +31 -0
- devrel_origin/cli/marketing.py +66 -0
- devrel_origin/cli/migrate.py +45 -0
- devrel_origin/cli/run.py +46 -0
- devrel_origin/cli/sales.py +57 -0
- devrel_origin/cli/schedule.py +62 -0
- devrel_origin/cli/synthesize.py +28 -0
- devrel_origin/cli/triage.py +29 -0
- devrel_origin/cli/video.py +35 -0
- devrel_origin/core/__init__.py +58 -0
- devrel_origin/core/agent_config.py +75 -0
- devrel_origin/core/argus.py +964 -0
- devrel_origin/core/atlas.py +1450 -0
- devrel_origin/core/base.py +372 -0
- devrel_origin/core/cyra.py +563 -0
- devrel_origin/core/dex.py +708 -0
- devrel_origin/core/echo.py +614 -0
- devrel_origin/core/growth/__init__.py +27 -0
- devrel_origin/core/growth/recommendations.py +219 -0
- devrel_origin/core/growth/target_kinds.py +51 -0
- devrel_origin/core/iris.py +513 -0
- devrel_origin/core/kai.py +1367 -0
- devrel_origin/core/llm.py +542 -0
- devrel_origin/core/llm_backends.py +274 -0
- devrel_origin/core/mox.py +514 -0
- devrel_origin/core/nova.py +349 -0
- devrel_origin/core/pax.py +1205 -0
- devrel_origin/core/rex.py +532 -0
- devrel_origin/core/sage.py +486 -0
- devrel_origin/core/sentinel.py +385 -0
- devrel_origin/core/types.py +98 -0
- devrel_origin/core/video/__init__.py +22 -0
- devrel_origin/core/video/assembler.py +131 -0
- devrel_origin/core/video/browser_recorder.py +118 -0
- devrel_origin/core/video/desktop_recorder.py +254 -0
- devrel_origin/core/video/overlay_renderer.py +143 -0
- devrel_origin/core/video/script_parser.py +147 -0
- devrel_origin/core/video/tts_engine.py +82 -0
- devrel_origin/core/vox.py +268 -0
- devrel_origin/core/watchdog.py +321 -0
- devrel_origin/project/__init__.py +1 -0
- devrel_origin/project/config.py +75 -0
- devrel_origin/project/cost_sink.py +61 -0
- devrel_origin/project/init.py +104 -0
- devrel_origin/project/paths.py +75 -0
- devrel_origin/project/state.py +241 -0
- devrel_origin/project/templates/__init__.py +4 -0
- devrel_origin/project/templates/config.toml +24 -0
- devrel_origin/project/templates/devrel.gitignore +10 -0
- devrel_origin/project/templates/slop-blocklist.md +45 -0
- devrel_origin/project/templates/style.md +24 -0
- devrel_origin/project/templates/voice.md +29 -0
- devrel_origin/quality/__init__.py +66 -0
- devrel_origin/quality/editorial.py +357 -0
- devrel_origin/quality/persona.py +84 -0
- devrel_origin/quality/readability.py +148 -0
- devrel_origin/quality/slop.py +167 -0
- devrel_origin/quality/style.py +110 -0
- devrel_origin/quality/voice.py +15 -0
- devrel_origin/tools/__init__.py +9 -0
- devrel_origin/tools/analytics.py +304 -0
- devrel_origin/tools/api_client.py +393 -0
- devrel_origin/tools/apollo_client.py +305 -0
- devrel_origin/tools/code_validator.py +428 -0
- devrel_origin/tools/github_tools.py +297 -0
- devrel_origin/tools/instantly_client.py +412 -0
- devrel_origin/tools/kb_harvester.py +340 -0
- devrel_origin/tools/mcp_server.py +578 -0
- devrel_origin/tools/notifications.py +245 -0
- devrel_origin/tools/run_report.py +193 -0
- devrel_origin/tools/scheduler.py +231 -0
- devrel_origin/tools/search_tools.py +321 -0
- devrel_origin/tools/self_improve.py +168 -0
- devrel_origin/tools/sheets.py +236 -0
- devrel_origin-0.2.14.dist-info/METADATA +354 -0
- devrel_origin-0.2.14.dist-info/RECORD +98 -0
- devrel_origin-0.2.14.dist-info/WHEEL +5 -0
- devrel_origin-0.2.14.dist-info/entry_points.txt +2 -0
- devrel_origin-0.2.14.dist-info/licenses/LICENSE +21 -0
- devrel_origin-0.2.14.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,1450 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Atlas — Orchestrator Agent
|
|
3
|
+
|
|
4
|
+
Coordinates the multi-agent system through task delegation, retry logic,
|
|
5
|
+
cross-agent context sharing, and weekly OKR tracking.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import asyncio
|
|
9
|
+
import json
|
|
10
|
+
import logging
|
|
11
|
+
import os
|
|
12
|
+
import random
|
|
13
|
+
import re
|
|
14
|
+
import shutil
|
|
15
|
+
import subprocess
|
|
16
|
+
from contextlib import nullcontext as _nullcontext, suppress
|
|
17
|
+
from dataclasses import dataclass, field
|
|
18
|
+
from datetime import datetime, timedelta, timezone
|
|
19
|
+
from pathlib import Path
|
|
20
|
+
from typing import TYPE_CHECKING, Any, Optional
|
|
21
|
+
|
|
22
|
+
if TYPE_CHECKING:
|
|
23
|
+
from devrel_origin.project.paths import ProjectPaths
|
|
24
|
+
from devrel_origin.tools.apollo_client import ApolloClient
|
|
25
|
+
|
|
26
|
+
from devrel_origin.core.agent_config import AgentConfig, load_config
|
|
27
|
+
from devrel_origin.core.argus import Argus
|
|
28
|
+
from devrel_origin.core.dex import Dex
|
|
29
|
+
from devrel_origin.core.echo import Echo
|
|
30
|
+
from devrel_origin.core.iris import Iris
|
|
31
|
+
from devrel_origin.core.kai import Kai
|
|
32
|
+
from devrel_origin.core.llm import LLMClient
|
|
33
|
+
from devrel_origin.core.mox import Mox
|
|
34
|
+
from devrel_origin.core.nova import Nova
|
|
35
|
+
from devrel_origin.core.pax import Pax
|
|
36
|
+
from devrel_origin.core.rex import Rex
|
|
37
|
+
from devrel_origin.core.sage import Sage
|
|
38
|
+
from devrel_origin.core.sentinel import Sentinel
|
|
39
|
+
from devrel_origin.core.vox import Vox
|
|
40
|
+
from devrel_origin.core.watchdog import Watchdog
|
|
41
|
+
from devrel_origin.tools.api_client import PostHogClient
|
|
42
|
+
from devrel_origin.tools.github_tools import GitHubTools
|
|
43
|
+
from devrel_origin.tools.instantly_client import InstantlyClient
|
|
44
|
+
from devrel_origin.tools.search_tools import SearchTools
|
|
45
|
+
|
|
46
|
+
logger = logging.getLogger(__name__)
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
@dataclass
|
|
50
|
+
class WeeklyMemory:
|
|
51
|
+
"""Summary of a previous week's output for trend detection and dedup."""
|
|
52
|
+
|
|
53
|
+
week_of: str = ""
|
|
54
|
+
content_titles: list[str] = field(default_factory=list)
|
|
55
|
+
pain_points_addressed: list[str] = field(default_factory=list)
|
|
56
|
+
competitors_tracked: list[str] = field(default_factory=list)
|
|
57
|
+
experiments_run: list[str] = field(default_factory=list)
|
|
58
|
+
top_themes: list[str] = field(default_factory=list)
|
|
59
|
+
okr_snapshot: dict[str, Any] = field(default_factory=dict)
|
|
60
|
+
|
|
61
|
+
def to_dict(self) -> dict[str, Any]:
|
|
62
|
+
"""Serialize to a plain dict for JSON and context propagation."""
|
|
63
|
+
return {
|
|
64
|
+
"week_of": self.week_of,
|
|
65
|
+
"content_titles": self.content_titles,
|
|
66
|
+
"pain_points_addressed": self.pain_points_addressed,
|
|
67
|
+
"competitors_tracked": self.competitors_tracked,
|
|
68
|
+
"experiments_run": self.experiments_run,
|
|
69
|
+
"top_themes": self.top_themes,
|
|
70
|
+
"okr_snapshot": self.okr_snapshot,
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
@classmethod
|
|
74
|
+
def from_context(cls, ctx: "SharedContext") -> "WeeklyMemory":
|
|
75
|
+
"""Extract a compact memory summary from a full SharedContext."""
|
|
76
|
+
content_titles = []
|
|
77
|
+
if isinstance(ctx.kai_content, dict):
|
|
78
|
+
title = ctx.kai_content.get("task", "")
|
|
79
|
+
if title:
|
|
80
|
+
content_titles.append(title)
|
|
81
|
+
|
|
82
|
+
pain_points = []
|
|
83
|
+
if isinstance(ctx.iris_themes, dict):
|
|
84
|
+
for t in ctx.iris_themes.get("themes", []):
|
|
85
|
+
if isinstance(t, dict):
|
|
86
|
+
pain_points.append(t.get("title", ""))
|
|
87
|
+
|
|
88
|
+
competitors = []
|
|
89
|
+
if isinstance(ctx.rex_competitive, dict):
|
|
90
|
+
competitors = ctx.rex_competitive.get("competitors_discovered", [])
|
|
91
|
+
|
|
92
|
+
experiments = []
|
|
93
|
+
if isinstance(ctx.nova_experiments, dict):
|
|
94
|
+
for e in ctx.nova_experiments.get("experiments", []):
|
|
95
|
+
if isinstance(e, dict):
|
|
96
|
+
experiments.append(e.get("name", ""))
|
|
97
|
+
|
|
98
|
+
return cls(
|
|
99
|
+
week_of=ctx.week_of,
|
|
100
|
+
content_titles=content_titles,
|
|
101
|
+
pain_points_addressed=pain_points[:10],
|
|
102
|
+
competitors_tracked=competitors[:10],
|
|
103
|
+
experiments_run=experiments[:5],
|
|
104
|
+
top_themes=list(pain_points[:5]),
|
|
105
|
+
okr_snapshot=ctx.okr_progress,
|
|
106
|
+
)
|
|
107
|
+
|
|
108
|
+
|
|
109
|
+
@dataclass
|
|
110
|
+
class SharedContext:
|
|
111
|
+
"""Cross-agent context object that flows between specialists."""
|
|
112
|
+
|
|
113
|
+
week_of: str = ""
|
|
114
|
+
sage_triage: dict[str, Any] = field(default_factory=dict)
|
|
115
|
+
echo_social: dict[str, Any] = field(default_factory=dict)
|
|
116
|
+
iris_themes: dict[str, Any] = field(default_factory=dict)
|
|
117
|
+
nova_experiments: dict[str, Any] = field(default_factory=dict)
|
|
118
|
+
kai_content: dict[str, Any] = field(default_factory=dict)
|
|
119
|
+
vox_video: dict[str, Any] = field(default_factory=dict)
|
|
120
|
+
dex_docs: dict[str, Any] = field(default_factory=dict)
|
|
121
|
+
rex_competitive: dict[str, Any] = field(default_factory=dict)
|
|
122
|
+
pax_sales: dict[str, Any] = field(default_factory=dict)
|
|
123
|
+
mox_campaigns: dict[str, Any] = field(default_factory=dict)
|
|
124
|
+
okr_progress: dict[str, Any] = field(default_factory=dict)
|
|
125
|
+
instantly_campaigns: dict[str, Any] = field(default_factory=dict)
|
|
126
|
+
instantly_analytics: dict[str, Any] = field(default_factory=dict)
|
|
127
|
+
instantly_replies: dict[str, Any] = field(default_factory=dict)
|
|
128
|
+
# Argus content performance report (Stage 5b output). Shape mirrors
|
|
129
|
+
# PerformanceReport.to_json() — keys: period_start, period_end,
|
|
130
|
+
# top_performers (list of metric dicts), bottom_performers, trend_signals
|
|
131
|
+
# (list of strings), recommendations (list of {action, target, target_type,
|
|
132
|
+
# rationale, evidence, confidence, source_ids}), sources_ok (dict[str, bool]),
|
|
133
|
+
# insufficient_data (bool), llm_error (str | None), all_primary
|
|
134
|
+
# (dict[content_id, primary_metric]). On Argus failure: {"error": "<reason>"}.
|
|
135
|
+
argus_report: dict[str, Any] = field(default_factory=dict)
|
|
136
|
+
# Cyra CRO report (Stage 5c output). Keys: period_end, funnel_id, sources_ok,
|
|
137
|
+
# dropoffs (list), recommendations (list). On Cyra failure: {"error": "<reason>"}.
|
|
138
|
+
cro_report: dict[str, Any] = field(default_factory=dict)
|
|
139
|
+
previous_weeks: list[WeeklyMemory] = field(default_factory=list)
|
|
140
|
+
|
|
141
|
+
def to_dict(self) -> dict[str, Any]:
|
|
142
|
+
d = {
|
|
143
|
+
"week_of": self.week_of,
|
|
144
|
+
"sage_triage": self.sage_triage,
|
|
145
|
+
"echo_social": self.echo_social,
|
|
146
|
+
"iris_themes": self.iris_themes,
|
|
147
|
+
"nova_experiments": self.nova_experiments,
|
|
148
|
+
"kai_content": self.kai_content,
|
|
149
|
+
"vox_video": self.vox_video,
|
|
150
|
+
"dex_docs": self.dex_docs,
|
|
151
|
+
"rex_competitive": self.rex_competitive,
|
|
152
|
+
"pax_sales": self.pax_sales,
|
|
153
|
+
"mox_campaigns": self.mox_campaigns,
|
|
154
|
+
"okr_progress": self.okr_progress,
|
|
155
|
+
"instantly_campaigns": self.instantly_campaigns,
|
|
156
|
+
"instantly_analytics": self.instantly_analytics,
|
|
157
|
+
"instantly_replies": self.instantly_replies,
|
|
158
|
+
"argus_report": self.argus_report,
|
|
159
|
+
"cro_report": self.cro_report,
|
|
160
|
+
}
|
|
161
|
+
# previous_weeks included as serialized dicts for downstream agents
|
|
162
|
+
# (not persisted into context archive — save() uses this dict minus previous_weeks)
|
|
163
|
+
if self.previous_weeks:
|
|
164
|
+
d["previous_weeks"] = [w.to_dict() for w in self.previous_weeks]
|
|
165
|
+
return d
|
|
166
|
+
|
|
167
|
+
def save(self, archive_dir: Path) -> None:
|
|
168
|
+
"""Persist weekly context to archive (excludes transient previous_weeks)."""
|
|
169
|
+
archive_dir.mkdir(parents=True, exist_ok=True)
|
|
170
|
+
filepath = archive_dir / f"context_{self.week_of}.json"
|
|
171
|
+
d = self.to_dict()
|
|
172
|
+
d.pop("previous_weeks", None) # Don't persist history into archive
|
|
173
|
+
filepath.write_text(json.dumps(d, indent=2, default=str))
|
|
174
|
+
logger.info(f"Archived context to {filepath}")
|
|
175
|
+
|
|
176
|
+
@classmethod
|
|
177
|
+
def load(cls, archive_dir: Path) -> "SharedContext":
|
|
178
|
+
"""Load the most recent archived context."""
|
|
179
|
+
ctx = cls(week_of=datetime.now().strftime("%Y-W%U"))
|
|
180
|
+
if not archive_dir.exists():
|
|
181
|
+
return ctx
|
|
182
|
+
files = sorted(archive_dir.glob("context_*.json"), reverse=True)
|
|
183
|
+
if not files:
|
|
184
|
+
return ctx
|
|
185
|
+
try:
|
|
186
|
+
data = json.loads(files[0].read_text())
|
|
187
|
+
for key, value in data.items():
|
|
188
|
+
if hasattr(ctx, key) and key != "previous_weeks":
|
|
189
|
+
setattr(ctx, key, value)
|
|
190
|
+
except (json.JSONDecodeError, OSError) as e:
|
|
191
|
+
logger.warning(f"Failed to load context from {files[0]}: {e}")
|
|
192
|
+
return ctx
|
|
193
|
+
|
|
194
|
+
@classmethod
|
|
195
|
+
def load_with_history(
|
|
196
|
+
cls,
|
|
197
|
+
archive_dir: Path,
|
|
198
|
+
history_weeks: int = 4,
|
|
199
|
+
) -> "SharedContext":
|
|
200
|
+
"""Load a fresh context with previous weeks' memory summaries.
|
|
201
|
+
|
|
202
|
+
Loads up to *history_weeks* archived contexts, extracts compact
|
|
203
|
+
WeeklyMemory summaries, and attaches them to the new context.
|
|
204
|
+
Downstream agents can use previous_weeks for trend detection,
|
|
205
|
+
content dedup, and continuity.
|
|
206
|
+
"""
|
|
207
|
+
ctx = cls(week_of=datetime.now().strftime("%Y-W%U"))
|
|
208
|
+
if not archive_dir.exists():
|
|
209
|
+
return ctx
|
|
210
|
+
|
|
211
|
+
files = sorted(archive_dir.glob("context_*.json"), reverse=True)
|
|
212
|
+
memories: list[WeeklyMemory] = []
|
|
213
|
+
for f in files[:history_weeks]:
|
|
214
|
+
try:
|
|
215
|
+
data = json.loads(f.read_text())
|
|
216
|
+
prev = cls()
|
|
217
|
+
for key, value in data.items():
|
|
218
|
+
if hasattr(prev, key):
|
|
219
|
+
setattr(prev, key, value)
|
|
220
|
+
memories.append(WeeklyMemory.from_context(prev))
|
|
221
|
+
except (json.JSONDecodeError, OSError) as e:
|
|
222
|
+
logger.warning(f"Failed to load history from {f}: {e}")
|
|
223
|
+
|
|
224
|
+
ctx.previous_weeks = memories
|
|
225
|
+
logger.info(f"Loaded {len(memories)} weeks of history")
|
|
226
|
+
return ctx
|
|
227
|
+
|
|
228
|
+
|
|
229
|
+
@dataclass
|
|
230
|
+
class DelegationResult:
|
|
231
|
+
"""Result of a delegated task."""
|
|
232
|
+
|
|
233
|
+
agent: str
|
|
234
|
+
task: str
|
|
235
|
+
success: bool
|
|
236
|
+
output: Any = None
|
|
237
|
+
error: Optional[str] = None
|
|
238
|
+
attempts: int = 1
|
|
239
|
+
|
|
240
|
+
|
|
241
|
+
class Atlas:
|
|
242
|
+
"""
|
|
243
|
+
Orchestrator agent that coordinates the multi-agent system.
|
|
244
|
+
|
|
245
|
+
Responsibilities:
|
|
246
|
+
- Delegate tasks to specialist agents
|
|
247
|
+
- Manage cross-agent context sharing
|
|
248
|
+
- Retry failed delegations with exponential backoff
|
|
249
|
+
- Track weekly OKR progress
|
|
250
|
+
- Archive historical context
|
|
251
|
+
"""
|
|
252
|
+
|
|
253
|
+
MAX_RETRIES = 2
|
|
254
|
+
BASE_DELAY = 2.0 # seconds
|
|
255
|
+
AGENT_TIMEOUT = 300.0 # default per-agent timeout (seconds); see DEFAULT_AGENT_TIMEOUTS
|
|
256
|
+
AGENT_CANCEL_GRACE = 5.0 # max seconds to wait for an agent to acknowledge cancellation
|
|
257
|
+
|
|
258
|
+
# Editorial-pipeline agents (Kai/Mox/Pax) run an 8-stage pipeline with revision
|
|
259
|
+
# loops on top of repo-scale prompts. Empirically, on a PostHog-scale codebase
|
|
260
|
+
# Kai exceeds 900s end-to-end (2026-05-08 user run). 1800s gives ~2x headroom
|
|
261
|
+
# for revision rounds; the cost-budget cap in config.toml is a better safeguard
|
|
262
|
+
# than a tight timeout. Override per-agent via config.agent_timeouts.
|
|
263
|
+
DEFAULT_AGENT_TIMEOUTS: dict[str, float] = {
|
|
264
|
+
"kai": 1800.0,
|
|
265
|
+
"mox": 1800.0,
|
|
266
|
+
"pax": 1800.0,
|
|
267
|
+
}
|
|
268
|
+
|
|
269
|
+
def __init__(
|
|
270
|
+
self,
|
|
271
|
+
api_client: PostHogClient,
|
|
272
|
+
knowledge_base_path: Path,
|
|
273
|
+
archive_dir: Path = Path("context_archive"),
|
|
274
|
+
llm_client: Optional[LLMClient] = None,
|
|
275
|
+
github_tools: Optional[GitHubTools] = None,
|
|
276
|
+
search_tools: Optional[SearchTools] = None,
|
|
277
|
+
config: Optional[AgentConfig] = None,
|
|
278
|
+
instantly_client: Optional[InstantlyClient] = None,
|
|
279
|
+
apollo_client: Optional["ApolloClient"] = None,
|
|
280
|
+
project_paths: Optional["ProjectPaths"] = None,
|
|
281
|
+
):
|
|
282
|
+
self.api_client = api_client
|
|
283
|
+
self.knowledge_base_path = knowledge_base_path
|
|
284
|
+
self.archive_dir = archive_dir
|
|
285
|
+
self.llm_client = llm_client
|
|
286
|
+
self.instantly_client = instantly_client
|
|
287
|
+
self.apollo_client = apollo_client
|
|
288
|
+
self.github_tools = github_tools
|
|
289
|
+
self.project_paths = project_paths
|
|
290
|
+
self.config = config or AgentConfig()
|
|
291
|
+
self.context = SharedContext(week_of=datetime.now().strftime("%Y-W%U"))
|
|
292
|
+
|
|
293
|
+
# If the caller passed a project_paths and the state DB exists, wire
|
|
294
|
+
# cost events from the LLMClient into the project's `costs` table.
|
|
295
|
+
if (
|
|
296
|
+
project_paths is not None
|
|
297
|
+
and self.llm_client is not None
|
|
298
|
+
and project_paths.state_db.is_file()
|
|
299
|
+
):
|
|
300
|
+
from devrel_origin.project.cost_sink import make_sqlite_sink
|
|
301
|
+
|
|
302
|
+
self.llm_client.set_cost_sink(make_sqlite_sink(project_paths.state_db))
|
|
303
|
+
|
|
304
|
+
# Apply config retry settings
|
|
305
|
+
self.MAX_RETRIES = self.config.retry_settings.get("max_retries", 2)
|
|
306
|
+
self.BASE_DELAY = self.config.retry_settings.get("initial_delay_seconds", 2.0)
|
|
307
|
+
|
|
308
|
+
# Per-agent timeouts: class defaults overlaid by config overrides
|
|
309
|
+
self.agent_timeouts: dict[str, float] = {
|
|
310
|
+
**self.DEFAULT_AGENT_TIMEOUTS,
|
|
311
|
+
**(self.config.agent_timeouts or {}),
|
|
312
|
+
}
|
|
313
|
+
|
|
314
|
+
# Initialize specialist agents with shared deps
|
|
315
|
+
self.kai = Kai(
|
|
316
|
+
api_client=api_client,
|
|
317
|
+
knowledge_base_path=knowledge_base_path,
|
|
318
|
+
llm_client=llm_client,
|
|
319
|
+
search_tools=search_tools,
|
|
320
|
+
)
|
|
321
|
+
self.sage = Sage(
|
|
322
|
+
api_client=api_client,
|
|
323
|
+
knowledge_base_path=knowledge_base_path,
|
|
324
|
+
github_tools=github_tools,
|
|
325
|
+
)
|
|
326
|
+
self.echo = Echo(
|
|
327
|
+
api_client=api_client,
|
|
328
|
+
knowledge_base_path=knowledge_base_path,
|
|
329
|
+
search_tools=search_tools,
|
|
330
|
+
llm_client=llm_client,
|
|
331
|
+
)
|
|
332
|
+
self.iris = Iris(
|
|
333
|
+
api_client=api_client,
|
|
334
|
+
knowledge_base_path=knowledge_base_path,
|
|
335
|
+
llm_client=llm_client,
|
|
336
|
+
)
|
|
337
|
+
self.nova = Nova(
|
|
338
|
+
api_client=api_client,
|
|
339
|
+
knowledge_base_path=knowledge_base_path,
|
|
340
|
+
)
|
|
341
|
+
self.vox = Vox(
|
|
342
|
+
api_client=api_client,
|
|
343
|
+
knowledge_base_path=knowledge_base_path,
|
|
344
|
+
search_tools=search_tools,
|
|
345
|
+
)
|
|
346
|
+
self.dex = Dex(
|
|
347
|
+
api_client=api_client,
|
|
348
|
+
knowledge_base_path=knowledge_base_path,
|
|
349
|
+
llm_client=llm_client,
|
|
350
|
+
)
|
|
351
|
+
product_name = self.config.product_name
|
|
352
|
+
self.rex = Rex(
|
|
353
|
+
api_client=api_client,
|
|
354
|
+
knowledge_base_path=knowledge_base_path,
|
|
355
|
+
llm_client=llm_client,
|
|
356
|
+
search_tools=search_tools,
|
|
357
|
+
apollo_client=apollo_client,
|
|
358
|
+
product_name=product_name,
|
|
359
|
+
)
|
|
360
|
+
self.pax = Pax(
|
|
361
|
+
api_client=api_client,
|
|
362
|
+
knowledge_base_path=knowledge_base_path,
|
|
363
|
+
llm_client=llm_client,
|
|
364
|
+
instantly_client=instantly_client,
|
|
365
|
+
apollo_client=apollo_client,
|
|
366
|
+
product_name=product_name,
|
|
367
|
+
)
|
|
368
|
+
self.mox = Mox(
|
|
369
|
+
api_client=api_client,
|
|
370
|
+
knowledge_base_path=knowledge_base_path,
|
|
371
|
+
llm_client=llm_client,
|
|
372
|
+
search_tools=search_tools,
|
|
373
|
+
instantly_client=instantly_client,
|
|
374
|
+
product_name=product_name,
|
|
375
|
+
)
|
|
376
|
+
|
|
377
|
+
self.watchdog = Watchdog(
|
|
378
|
+
archive_dir=archive_dir,
|
|
379
|
+
llm_client=llm_client,
|
|
380
|
+
)
|
|
381
|
+
self.sentinel = Sentinel(
|
|
382
|
+
api_client=api_client,
|
|
383
|
+
knowledge_base_path=knowledge_base_path,
|
|
384
|
+
llm_client=llm_client,
|
|
385
|
+
)
|
|
386
|
+
|
|
387
|
+
self._agents = {
|
|
388
|
+
"kai": self.kai,
|
|
389
|
+
"sage": self.sage,
|
|
390
|
+
"echo": self.echo,
|
|
391
|
+
"iris": self.iris,
|
|
392
|
+
"nova": self.nova,
|
|
393
|
+
"vox": self.vox,
|
|
394
|
+
"dex": self.dex,
|
|
395
|
+
"rex": self.rex,
|
|
396
|
+
"pax": self.pax,
|
|
397
|
+
"mox": self.mox,
|
|
398
|
+
"watchdog": self.watchdog,
|
|
399
|
+
"sentinel": self.sentinel,
|
|
400
|
+
}
|
|
401
|
+
|
|
402
|
+
def _resolve_timeout(self, agent_name: str) -> float:
|
|
403
|
+
"""Per-agent execution timeout.
|
|
404
|
+
|
|
405
|
+
Resolution order: config override → class default (DEFAULT_AGENT_TIMEOUTS) →
|
|
406
|
+
global AGENT_TIMEOUT (300s). Editorial-pipeline agents (Kai/Mox/Pax) default
|
|
407
|
+
to 1800s because their 8-stage revision-looped pipeline routinely exceeds 300s.
|
|
408
|
+
"""
|
|
409
|
+
return self.agent_timeouts.get(agent_name, self.AGENT_TIMEOUT)
|
|
410
|
+
|
|
411
|
+
@staticmethod
|
|
412
|
+
def _consume_task_exception(task: asyncio.Task) -> None:
|
|
413
|
+
"""Drain late task results so forced timeouts do not emit warnings."""
|
|
414
|
+
with suppress(asyncio.CancelledError, Exception):
|
|
415
|
+
task.result()
|
|
416
|
+
|
|
417
|
+
async def delegate(
|
|
418
|
+
self,
|
|
419
|
+
agent_name: str,
|
|
420
|
+
task: str,
|
|
421
|
+
context: Optional[dict[str, Any]] = None,
|
|
422
|
+
) -> DelegationResult:
|
|
423
|
+
"""
|
|
424
|
+
Delegate a task to a specialist agent with retry logic.
|
|
425
|
+
|
|
426
|
+
Uses exponential backoff with jitter on failure.
|
|
427
|
+
"""
|
|
428
|
+
agent = self._agents.get(agent_name)
|
|
429
|
+
if not agent:
|
|
430
|
+
return DelegationResult(
|
|
431
|
+
agent=agent_name,
|
|
432
|
+
task=task,
|
|
433
|
+
success=False,
|
|
434
|
+
error=f"Unknown agent: {agent_name}",
|
|
435
|
+
)
|
|
436
|
+
|
|
437
|
+
merged_context = {**self.context.to_dict(), **(context or {})}
|
|
438
|
+
last_error = None
|
|
439
|
+
timeout = self._resolve_timeout(agent_name)
|
|
440
|
+
|
|
441
|
+
# Tag LLM calls with the agent name for cost tracking
|
|
442
|
+
if self.llm_client:
|
|
443
|
+
self.llm_client.set_agent(agent_name) # legacy fallback for non-LLM call sites
|
|
444
|
+
|
|
445
|
+
for attempt in range(1, self.MAX_RETRIES + 2):
|
|
446
|
+
try:
|
|
447
|
+
logger.info(f"Delegating to {agent_name} (attempt {attempt}): {task[:80]}...")
|
|
448
|
+
ctx_mgr = (
|
|
449
|
+
self.llm_client.agent_context(agent_name) if self.llm_client else _nullcontext()
|
|
450
|
+
)
|
|
451
|
+
with ctx_mgr:
|
|
452
|
+
execution = asyncio.create_task(
|
|
453
|
+
agent.execute(task=task, context=merged_context),
|
|
454
|
+
name=f"devrel-{agent_name}-attempt-{attempt}",
|
|
455
|
+
)
|
|
456
|
+
done, _ = await asyncio.wait({execution}, timeout=timeout)
|
|
457
|
+
if not done:
|
|
458
|
+
execution.cancel()
|
|
459
|
+
cancelled, _ = await asyncio.wait(
|
|
460
|
+
{execution}, timeout=self.AGENT_CANCEL_GRACE
|
|
461
|
+
)
|
|
462
|
+
if not cancelled:
|
|
463
|
+
execution.add_done_callback(self._consume_task_exception)
|
|
464
|
+
logger.error(
|
|
465
|
+
"delegation_cancel_grace_exceeded",
|
|
466
|
+
extra={
|
|
467
|
+
"agent": agent_name,
|
|
468
|
+
"task": task[:80],
|
|
469
|
+
"timeout": timeout,
|
|
470
|
+
"cancel_grace": self.AGENT_CANCEL_GRACE,
|
|
471
|
+
},
|
|
472
|
+
)
|
|
473
|
+
else:
|
|
474
|
+
with suppress(asyncio.CancelledError):
|
|
475
|
+
execution.result()
|
|
476
|
+
raise asyncio.TimeoutError
|
|
477
|
+
result = execution.result()
|
|
478
|
+
logger.info(
|
|
479
|
+
"delegation_success",
|
|
480
|
+
extra={"agent": agent_name, "task": task[:80], "attempts": attempt},
|
|
481
|
+
)
|
|
482
|
+
return DelegationResult(
|
|
483
|
+
agent=agent_name,
|
|
484
|
+
task=task,
|
|
485
|
+
success=True,
|
|
486
|
+
output=result,
|
|
487
|
+
attempts=attempt,
|
|
488
|
+
)
|
|
489
|
+
except asyncio.TimeoutError:
|
|
490
|
+
# Don't retry on timeout: a retry would re-burn the same expensive
|
|
491
|
+
# tokens (often $0.30+ per failed editorial-pipeline attempt) without
|
|
492
|
+
# changing the outcome. Surface the failure immediately.
|
|
493
|
+
last_error = f"Agent {agent_name} timed out after {timeout}s"
|
|
494
|
+
logger.warning(last_error)
|
|
495
|
+
return DelegationResult(
|
|
496
|
+
agent=agent_name,
|
|
497
|
+
task=task,
|
|
498
|
+
success=False,
|
|
499
|
+
error=last_error,
|
|
500
|
+
attempts=attempt,
|
|
501
|
+
)
|
|
502
|
+
except Exception as e:
|
|
503
|
+
last_error = str(e)
|
|
504
|
+
logger.warning(
|
|
505
|
+
"delegation_failed",
|
|
506
|
+
extra={
|
|
507
|
+
"agent": agent_name,
|
|
508
|
+
"task": task[:80],
|
|
509
|
+
"attempt": attempt,
|
|
510
|
+
"error": last_error,
|
|
511
|
+
},
|
|
512
|
+
)
|
|
513
|
+
if attempt <= self.MAX_RETRIES:
|
|
514
|
+
delay = self.BASE_DELAY * (2 ** (attempt - 1))
|
|
515
|
+
# Add jitter: 50-150% of calculated delay
|
|
516
|
+
jittered_delay = delay * (0.5 + random.random())
|
|
517
|
+
await asyncio.sleep(jittered_delay)
|
|
518
|
+
|
|
519
|
+
return DelegationResult(
|
|
520
|
+
agent=agent_name,
|
|
521
|
+
task=task,
|
|
522
|
+
success=False,
|
|
523
|
+
error=last_error,
|
|
524
|
+
attempts=self.MAX_RETRIES + 1,
|
|
525
|
+
)
|
|
526
|
+
|
|
527
|
+
def _checkpoint(
|
|
528
|
+
self,
|
|
529
|
+
stage: int,
|
|
530
|
+
completed_agents: set[str] | None = None,
|
|
531
|
+
) -> None:
|
|
532
|
+
"""Save a partial checkpoint after completing a pipeline stage.
|
|
533
|
+
|
|
534
|
+
Checkpoints are named context_{week}_stage{N}.json and allow
|
|
535
|
+
resuming from the last completed stage on crash recovery.
|
|
536
|
+
|
|
537
|
+
``completed_agents`` is the optional set of agent names that
|
|
538
|
+
finished successfully within (or up to) the current stage —
|
|
539
|
+
used by parallel stages to allow partial-progress resume.
|
|
540
|
+
"""
|
|
541
|
+
self.archive_dir.mkdir(parents=True, exist_ok=True)
|
|
542
|
+
filepath = self.archive_dir / f"context_{self.context.week_of}_stage{stage}.json"
|
|
543
|
+
d = self.context.to_dict()
|
|
544
|
+
d.pop("previous_weeks", None)
|
|
545
|
+
d["_checkpoint_stage"] = stage
|
|
546
|
+
d["_completed_agents"] = sorted(completed_agents or [])
|
|
547
|
+
filepath.write_text(json.dumps(d, indent=2, default=str))
|
|
548
|
+
logger.info(f"Checkpoint saved: stage {stage}")
|
|
549
|
+
|
|
550
|
+
@classmethod
|
|
551
|
+
def _load_checkpoint(
|
|
552
|
+
cls, archive_dir: Path, week_of: str
|
|
553
|
+
) -> tuple[int, set[str], SharedContext] | None:
|
|
554
|
+
"""Load the latest checkpoint for the current week, if any.
|
|
555
|
+
|
|
556
|
+
Returns ``(resume_stage, completed_agents, ctx)`` or ``None``.
|
|
557
|
+
``completed_agents`` is the set of agents from the partially-
|
|
558
|
+
completed stage that already succeeded; on resume those are
|
|
559
|
+
skipped and only the failed agents are re-run.
|
|
560
|
+
"""
|
|
561
|
+
for stage in range(6, -1, -1):
|
|
562
|
+
filepath = archive_dir / f"context_{week_of}_stage{stage}.json"
|
|
563
|
+
if filepath.exists():
|
|
564
|
+
try:
|
|
565
|
+
data = json.loads(filepath.read_text())
|
|
566
|
+
ctx = SharedContext(week_of=week_of)
|
|
567
|
+
for key, value in data.items():
|
|
568
|
+
if hasattr(ctx, key) and key not in (
|
|
569
|
+
"_checkpoint_stage",
|
|
570
|
+
"_completed_agents",
|
|
571
|
+
):
|
|
572
|
+
setattr(ctx, key, value)
|
|
573
|
+
completed = set(data.get("_completed_agents", []))
|
|
574
|
+
return data.get("_checkpoint_stage", 0), completed, ctx
|
|
575
|
+
except (json.JSONDecodeError, OSError) as e:
|
|
576
|
+
logger.warning(f"Failed to load checkpoint {filepath}: {e}")
|
|
577
|
+
return None
|
|
578
|
+
|
|
579
|
+
def _cleanup_checkpoints(self) -> None:
|
|
580
|
+
"""Remove checkpoint files after successful completion."""
|
|
581
|
+
if not self.archive_dir.exists():
|
|
582
|
+
return
|
|
583
|
+
for f in self.archive_dir.glob(f"context_{self.context.week_of}_stage*.json"):
|
|
584
|
+
f.unlink(missing_ok=True)
|
|
585
|
+
logger.info("Cleaned up stage checkpoints")
|
|
586
|
+
|
|
587
|
+
def _build_content_brief(self) -> dict[str, Any]:
|
|
588
|
+
"""Create a compact evidence brief for Kai from upstream agents."""
|
|
589
|
+
|
|
590
|
+
def symbols_for(module: dict[str, Any], limit: int = 20) -> list[Any]:
|
|
591
|
+
symbols = module.get("symbols", [])
|
|
592
|
+
if isinstance(symbols, list):
|
|
593
|
+
return symbols[:limit]
|
|
594
|
+
if symbols:
|
|
595
|
+
return [symbols]
|
|
596
|
+
return []
|
|
597
|
+
|
|
598
|
+
themes = self.context.iris_themes.get("themes", [])
|
|
599
|
+
top_theme = themes[0] if themes and isinstance(themes[0], dict) else {}
|
|
600
|
+
issues = [i for i in self.context.sage_triage.get("issues", [])[:8] if isinstance(i, dict)]
|
|
601
|
+
modules = [
|
|
602
|
+
m
|
|
603
|
+
for m in self.context.dex_docs.get("modules", [])
|
|
604
|
+
if isinstance(m, dict) and m.get("path")
|
|
605
|
+
]
|
|
606
|
+
|
|
607
|
+
query_text = " ".join(
|
|
608
|
+
[
|
|
609
|
+
str(top_theme.get("title", "")),
|
|
610
|
+
str(top_theme.get("description", "")),
|
|
611
|
+
" ".join(str(i.get("title", "")) for i in issues),
|
|
612
|
+
]
|
|
613
|
+
).lower()
|
|
614
|
+
tokens = {
|
|
615
|
+
t
|
|
616
|
+
for t in re.findall(r"[a-z0-9_/-]{4,}", query_text)
|
|
617
|
+
if t not in {"issue", "docs", "user", "users", "with", "from"}
|
|
618
|
+
}
|
|
619
|
+
|
|
620
|
+
scored: list[tuple[int, dict[str, Any]]] = []
|
|
621
|
+
for module in modules:
|
|
622
|
+
haystack = " ".join(
|
|
623
|
+
[
|
|
624
|
+
str(module.get("path", "")),
|
|
625
|
+
str(module.get("language", "")),
|
|
626
|
+
str(module.get("docstring", "")),
|
|
627
|
+
" ".join(str(s) for s in symbols_for(module)),
|
|
628
|
+
]
|
|
629
|
+
).lower()
|
|
630
|
+
score = sum(1 for token in tokens if token in haystack)
|
|
631
|
+
if score:
|
|
632
|
+
scored.append((score, module))
|
|
633
|
+
scored.sort(key=lambda item: item[0], reverse=True)
|
|
634
|
+
selected_modules = [m for _, m in scored[:10]] or modules[:10]
|
|
635
|
+
|
|
636
|
+
github_issues = []
|
|
637
|
+
for issue in issues[:6]:
|
|
638
|
+
number = issue.get("number")
|
|
639
|
+
title = issue.get("title", "")
|
|
640
|
+
if number or title:
|
|
641
|
+
github_issues.append(
|
|
642
|
+
{
|
|
643
|
+
"number": number,
|
|
644
|
+
"title": title,
|
|
645
|
+
"product_area": issue.get("product_area", ""),
|
|
646
|
+
"category": issue.get("category", ""),
|
|
647
|
+
}
|
|
648
|
+
)
|
|
649
|
+
|
|
650
|
+
return {
|
|
651
|
+
"topic": top_theme.get("title") or "Top developer pain point",
|
|
652
|
+
"pain_point": {
|
|
653
|
+
"title": top_theme.get("title", ""),
|
|
654
|
+
"description": top_theme.get("description", ""),
|
|
655
|
+
"severity": top_theme.get("severity", 0),
|
|
656
|
+
"category": top_theme.get("category", ""),
|
|
657
|
+
},
|
|
658
|
+
"github_issues": github_issues,
|
|
659
|
+
"source_files": [
|
|
660
|
+
{
|
|
661
|
+
"path": m.get("path", ""),
|
|
662
|
+
"language": m.get("language", ""),
|
|
663
|
+
"symbols": symbols_for(m, limit=8),
|
|
664
|
+
}
|
|
665
|
+
for m in selected_modules
|
|
666
|
+
],
|
|
667
|
+
"allowed_claims": [
|
|
668
|
+
"Use only the listed source files, knowledge-base docs, official docs, and Dex architecture text as evidence.",
|
|
669
|
+
"Mention GitHub issues only when they appear in github_issues.",
|
|
670
|
+
"Treat missing file paths, commands, or APIs as a reason to say the context is insufficient.",
|
|
671
|
+
],
|
|
672
|
+
"forbidden_claims": [
|
|
673
|
+
"Do not invent SDK methods, endpoints, install commands, file paths, benchmarks, or issue numbers.",
|
|
674
|
+
"Do not cite repository internals unless a source file is listed.",
|
|
675
|
+
],
|
|
676
|
+
}
|
|
677
|
+
|
|
678
|
+
def _slug(self, value: str, fallback: str) -> str:
|
|
679
|
+
slug = re.sub(r"[^a-z0-9]+", "-", value.lower()).strip("-")
|
|
680
|
+
return slug[:80] or fallback
|
|
681
|
+
|
|
682
|
+
def _write_weekly_deliverables(self) -> list[str]:
|
|
683
|
+
"""Persist publishable outputs outside the transient context JSON."""
|
|
684
|
+
if not self.project_paths:
|
|
685
|
+
return []
|
|
686
|
+
|
|
687
|
+
out_dir = self.project_paths.deliverables_dir / self.context.week_of
|
|
688
|
+
out_dir.mkdir(parents=True, exist_ok=True)
|
|
689
|
+
written: list[str] = []
|
|
690
|
+
|
|
691
|
+
kai = self.context.kai_content
|
|
692
|
+
if isinstance(kai, dict) and kai.get("status") == "generated" and kai.get("content"):
|
|
693
|
+
slug = self._slug(str(kai.get("task", "kai-content")), "kai-content")
|
|
694
|
+
content_path = out_dir / f"{slug}.md"
|
|
695
|
+
content_path.write_text(str(kai["content"]))
|
|
696
|
+
written.append(str(content_path))
|
|
697
|
+
|
|
698
|
+
trace = {
|
|
699
|
+
"task": kai.get("task"),
|
|
700
|
+
"grounding_sources": kai.get("grounding_sources", []),
|
|
701
|
+
"pain_points_addressed": kai.get("pain_points_addressed", []),
|
|
702
|
+
"real_issues_referenced": kai.get("real_issues_referenced", []),
|
|
703
|
+
"revision": kai.get("revision", {}),
|
|
704
|
+
"code_validation": kai.get("code_validation", {}),
|
|
705
|
+
}
|
|
706
|
+
trace_path = out_dir / f"{slug}.trace.json"
|
|
707
|
+
trace_path.write_text(json.dumps(trace, indent=2, default=str))
|
|
708
|
+
written.append(str(trace_path))
|
|
709
|
+
|
|
710
|
+
dex = self.context.dex_docs
|
|
711
|
+
if isinstance(dex, dict) and (dex.get("llm_summary") or dex.get("architecture_doc")):
|
|
712
|
+
docs_path = out_dir / "dex-repository-summary.md"
|
|
713
|
+
docs_path.write_text(
|
|
714
|
+
"\n\n".join(
|
|
715
|
+
part
|
|
716
|
+
for part in [
|
|
717
|
+
"# Repository Summary",
|
|
718
|
+
str(dex.get("llm_summary", "")).strip(),
|
|
719
|
+
"## Architecture",
|
|
720
|
+
str(dex.get("architecture_doc", "")).strip()[:12000],
|
|
721
|
+
]
|
|
722
|
+
if part.strip()
|
|
723
|
+
)
|
|
724
|
+
)
|
|
725
|
+
written.append(str(docs_path))
|
|
726
|
+
|
|
727
|
+
return written
|
|
728
|
+
|
|
729
|
+
async def run_weekly_cycle(self) -> SharedContext:
|
|
730
|
+
"""
|
|
731
|
+
Execute the full weekly orchestration cycle with checkpointing.
|
|
732
|
+
|
|
733
|
+
Saves progress after each stage group. On restart, resumes from
|
|
734
|
+
the last completed checkpoint instead of re-running everything.
|
|
735
|
+
Produces a run report with timing, cost, and quality data.
|
|
736
|
+
"""
|
|
737
|
+
from devrel_origin.tools.run_report import RunReport
|
|
738
|
+
|
|
739
|
+
run_report = RunReport(
|
|
740
|
+
week_of=self.context.week_of,
|
|
741
|
+
started_at=datetime.now().isoformat(),
|
|
742
|
+
)
|
|
743
|
+
|
|
744
|
+
# Check for existing checkpoint to resume from
|
|
745
|
+
checkpoint = self._load_checkpoint(self.archive_dir, self.context.week_of)
|
|
746
|
+
resume_stage = 0
|
|
747
|
+
completed_agents: set[str] = set()
|
|
748
|
+
if checkpoint:
|
|
749
|
+
resume_stage, completed_agents, restored = checkpoint
|
|
750
|
+
self.context = restored
|
|
751
|
+
run_report.resumed_from_stage = resume_stage
|
|
752
|
+
logger.info(
|
|
753
|
+
f"Resuming from checkpoint: stage {resume_stage} "
|
|
754
|
+
f"(completed_agents={sorted(completed_agents)})"
|
|
755
|
+
)
|
|
756
|
+
|
|
757
|
+
# Load previous weeks' memory for trend detection and dedup
|
|
758
|
+
if resume_stage == 0:
|
|
759
|
+
history_ctx = SharedContext.load_with_history(self.archive_dir)
|
|
760
|
+
self.context.previous_weeks = history_ctx.previous_weeks
|
|
761
|
+
logger.info(
|
|
762
|
+
f"Starting weekly cycle for {self.context.week_of} "
|
|
763
|
+
f"(resume={resume_stage}, history={len(self.context.previous_weeks)} weeks)"
|
|
764
|
+
)
|
|
765
|
+
|
|
766
|
+
# Stage 0: Watchdog health check (pre-flight)
|
|
767
|
+
if resume_stage <= 0 and "watchdog" not in completed_agents:
|
|
768
|
+
watchdog_result = await self.delegate(
|
|
769
|
+
"watchdog",
|
|
770
|
+
"Run system health check. Verify all integrations are "
|
|
771
|
+
"reachable and check for stale agent outputs from last cycle.",
|
|
772
|
+
)
|
|
773
|
+
if watchdog_result.success:
|
|
774
|
+
self.context.okr_progress["pre_health"] = watchdog_result.output
|
|
775
|
+
completed_agents.add("watchdog")
|
|
776
|
+
|
|
777
|
+
# Stage 1: Sage + Echo + Dex in parallel (no cross-dependencies)
|
|
778
|
+
if resume_stage <= 1:
|
|
779
|
+
stage_1_agents = ["sage", "echo", "dex"]
|
|
780
|
+
stage_1_pending = [a for a in stage_1_agents if a not in completed_agents]
|
|
781
|
+
if stage_1_pending:
|
|
782
|
+
tasks_1 = {
|
|
783
|
+
"sage": (
|
|
784
|
+
"Triage GitHub issues from the past 7 days. Categorize by type, "
|
|
785
|
+
"analyze sentiment, flag churn risks, and identify community champions."
|
|
786
|
+
),
|
|
787
|
+
"echo": (
|
|
788
|
+
"Scan Reddit, Hacker News, and Twitter/X for OpenClaw mentions. "
|
|
789
|
+
"Identify engagement opportunities and flag reputation risks."
|
|
790
|
+
),
|
|
791
|
+
"dex": (
|
|
792
|
+
"Generate technical documentation for the repository. "
|
|
793
|
+
"Produce an architecture overview and API reference."
|
|
794
|
+
),
|
|
795
|
+
}
|
|
796
|
+
coros = [self.delegate(a, tasks_1[a]) for a in stage_1_pending]
|
|
797
|
+
results = await asyncio.gather(*coros)
|
|
798
|
+
for agent_name, res in zip(stage_1_pending, results, strict=True):
|
|
799
|
+
if res.success:
|
|
800
|
+
if agent_name == "sage":
|
|
801
|
+
self.context.sage_triage = res.output
|
|
802
|
+
elif agent_name == "echo":
|
|
803
|
+
self.context.echo_social = res.output
|
|
804
|
+
elif agent_name == "dex":
|
|
805
|
+
self.context.dex_docs = res.output
|
|
806
|
+
completed_agents.add(agent_name)
|
|
807
|
+
self._checkpoint(1, completed_agents=completed_agents)
|
|
808
|
+
|
|
809
|
+
# Stage 2: Rex + Iris in parallel (both use Sage + Echo, independent)
|
|
810
|
+
if resume_stage <= 2:
|
|
811
|
+
stage_2_agents = ["rex", "iris"]
|
|
812
|
+
stage_2_pending = [a for a in stage_2_agents if a not in completed_agents]
|
|
813
|
+
if stage_2_pending:
|
|
814
|
+
tasks_2 = {
|
|
815
|
+
"rex": (
|
|
816
|
+
"Analyze the competitive landscape. Discover competitors from the "
|
|
817
|
+
"knowledge base and web search. Identify threats and opportunities."
|
|
818
|
+
),
|
|
819
|
+
"iris": (
|
|
820
|
+
"Synthesize developer feedback themes from GitHub issues, Discourse "
|
|
821
|
+
"threads, and support channels. Rank pain points by frequency and "
|
|
822
|
+
"severity."
|
|
823
|
+
),
|
|
824
|
+
}
|
|
825
|
+
coros = [self.delegate(a, tasks_2[a]) for a in stage_2_pending]
|
|
826
|
+
results = await asyncio.gather(*coros)
|
|
827
|
+
for agent_name, res in zip(stage_2_pending, results, strict=True):
|
|
828
|
+
if res.success:
|
|
829
|
+
if agent_name == "rex":
|
|
830
|
+
self.context.rex_competitive = res.output
|
|
831
|
+
elif agent_name == "iris":
|
|
832
|
+
self.context.iris_themes = res.output
|
|
833
|
+
completed_agents.add(agent_name)
|
|
834
|
+
self._checkpoint(2, completed_agents=completed_agents)
|
|
835
|
+
|
|
836
|
+
# Stage 3: Nova + Kai in parallel (both use Iris themes, independent)
|
|
837
|
+
if resume_stage <= 3:
|
|
838
|
+
stage_3_agents = ["nova", "kai"]
|
|
839
|
+
stage_3_pending = [a for a in stage_3_agents if a not in completed_agents]
|
|
840
|
+
if stage_3_pending:
|
|
841
|
+
tasks_3 = {
|
|
842
|
+
"nova": (
|
|
843
|
+
"Design activation experiments based on the top pain points. "
|
|
844
|
+
"Include pre-registration, power analysis, and success criteria."
|
|
845
|
+
),
|
|
846
|
+
"kai": (
|
|
847
|
+
"Write a technical tutorial addressing the #1 developer pain point. "
|
|
848
|
+
"Ground the content in the knowledge base and Dex's architecture "
|
|
849
|
+
"analysis. Reference real GitHub issues from Sage's triage. "
|
|
850
|
+
"Use actual file paths, commands, and APIs from the source code."
|
|
851
|
+
),
|
|
852
|
+
}
|
|
853
|
+
content_brief = self._build_content_brief()
|
|
854
|
+
coros = [
|
|
855
|
+
self.delegate(
|
|
856
|
+
a,
|
|
857
|
+
tasks_3[a],
|
|
858
|
+
context={"content_brief": content_brief} if a == "kai" else None,
|
|
859
|
+
)
|
|
860
|
+
for a in stage_3_pending
|
|
861
|
+
]
|
|
862
|
+
results = await asyncio.gather(*coros)
|
|
863
|
+
for agent_name, res in zip(stage_3_pending, results, strict=True):
|
|
864
|
+
if res.success:
|
|
865
|
+
if agent_name == "nova":
|
|
866
|
+
self.context.nova_experiments = res.output
|
|
867
|
+
elif agent_name == "kai":
|
|
868
|
+
self.context.kai_content = res.output
|
|
869
|
+
completed_agents.add(agent_name)
|
|
870
|
+
self._checkpoint(3, completed_agents=completed_agents)
|
|
871
|
+
|
|
872
|
+
# Stage 4: Vox (uses Kai's content)
|
|
873
|
+
if resume_stage <= 4 and "vox" not in completed_agents:
|
|
874
|
+
video_result = await self.delegate(
|
|
875
|
+
"vox",
|
|
876
|
+
"Generate a video tutorial from Kai's written content. "
|
|
877
|
+
"Record screen walkthrough with narration and overlays.",
|
|
878
|
+
)
|
|
879
|
+
if video_result.success:
|
|
880
|
+
self.context.vox_video = video_result.output
|
|
881
|
+
completed_agents.add("vox")
|
|
882
|
+
self._checkpoint(4, completed_agents=completed_agents)
|
|
883
|
+
|
|
884
|
+
# Stage 5: Sentinel brand audit — audit all generated content
|
|
885
|
+
if resume_stage <= 5 and "sentinel" not in completed_agents:
|
|
886
|
+
sentinel_result = await self.delegate(
|
|
887
|
+
"sentinel",
|
|
888
|
+
"Audit all generated content for brand voice consistency, "
|
|
889
|
+
"ICP alignment, messaging coherence, and technical accuracy.",
|
|
890
|
+
)
|
|
891
|
+
if sentinel_result.success:
|
|
892
|
+
self.context.okr_progress["brand_audit"] = sentinel_result.output
|
|
893
|
+
completed_agents.add("sentinel")
|
|
894
|
+
self._checkpoint(5, completed_agents=completed_agents)
|
|
895
|
+
|
|
896
|
+
# Stage 5b: Argus content performance analyst (post-Sentinel, pre-OKR)
|
|
897
|
+
if resume_stage <= 5 and self.config.analytics_in_run and "argus" not in completed_agents:
|
|
898
|
+
try:
|
|
899
|
+
argus = self._build_argus()
|
|
900
|
+
end = datetime.now(timezone.utc)
|
|
901
|
+
start = end - timedelta(days=7)
|
|
902
|
+
argus_report = await argus.run(period_start=start, period_end=end)
|
|
903
|
+
self.context.argus_report = argus_report.to_json()
|
|
904
|
+
completed_agents.add("argus")
|
|
905
|
+
except Exception as exc: # noqa: BLE001
|
|
906
|
+
logger.warning("Argus stage failed (continuing): %s", exc)
|
|
907
|
+
self.context.argus_report = {"error": str(exc)}
|
|
908
|
+
self._checkpoint(5, completed_agents=completed_agents)
|
|
909
|
+
|
|
910
|
+
# Stage 5c: Growth pillars (Cyra in Wave 1; Vega + Selene added in Waves 2/3)
|
|
911
|
+
if resume_stage <= 5 and self.config.cro_in_run and "cyra" not in completed_agents:
|
|
912
|
+
try:
|
|
913
|
+
cyra = self._build_cyra()
|
|
914
|
+
period_end = datetime.now(timezone.utc).date().isoformat()
|
|
915
|
+
db_path = (
|
|
916
|
+
self.project_paths.state_db
|
|
917
|
+
if self.project_paths and self.project_paths.state_db.is_file()
|
|
918
|
+
else None
|
|
919
|
+
)
|
|
920
|
+
report_id = self._insert_cro_report_row(db_path, period_end)
|
|
921
|
+
deliverables_dir = (
|
|
922
|
+
self.project_paths.deliverables_dir if self.project_paths else None
|
|
923
|
+
)
|
|
924
|
+
cro_report = await cyra.execute(
|
|
925
|
+
period_end=period_end,
|
|
926
|
+
report_id=report_id,
|
|
927
|
+
page_html_by_url={},
|
|
928
|
+
iris_themes=self._extract_iris_themes(),
|
|
929
|
+
sage_friction=self._extract_sage_friction(),
|
|
930
|
+
deliverables_dir=deliverables_dir,
|
|
931
|
+
)
|
|
932
|
+
self.context.cro_report = {
|
|
933
|
+
"period_end": cro_report.period_end,
|
|
934
|
+
"funnel_id": cro_report.funnel_id,
|
|
935
|
+
"sources_ok": cro_report.sources_ok,
|
|
936
|
+
"dropoffs": [
|
|
937
|
+
{
|
|
938
|
+
"from_step": d.from_step,
|
|
939
|
+
"to_step": d.to_step,
|
|
940
|
+
"conversion_rate": d.conversion_rate,
|
|
941
|
+
"pp_delta_vs_prior": d.pp_delta_vs_prior,
|
|
942
|
+
}
|
|
943
|
+
for d in cro_report.dropoffs
|
|
944
|
+
],
|
|
945
|
+
"recommendations": [
|
|
946
|
+
{"action": r.action, "target": r.target, "confidence": r.confidence}
|
|
947
|
+
for r in cro_report.recommendations
|
|
948
|
+
],
|
|
949
|
+
}
|
|
950
|
+
completed_agents.add("cyra")
|
|
951
|
+
except Exception as exc: # noqa: BLE001
|
|
952
|
+
logger.warning("Atlas Stage 5c (Cyra) failed (continuing): %s", exc)
|
|
953
|
+
self.context.cro_report = {"error": str(exc)}
|
|
954
|
+
self._checkpoint(5, completed_agents=completed_agents)
|
|
955
|
+
|
|
956
|
+
# Stage 6: Instantly sync (analytics + reply triage)
|
|
957
|
+
if resume_stage <= 6 and self.instantly_client and "instantly_sync" not in completed_agents:
|
|
958
|
+
await self._run_instantly_sync()
|
|
959
|
+
completed_agents.add("instantly_sync")
|
|
960
|
+
self._checkpoint(6, completed_agents=completed_agents)
|
|
961
|
+
|
|
962
|
+
# Stage 7: OKR compilation (Atlas)
|
|
963
|
+
self.context.okr_progress = self._compile_okrs()
|
|
964
|
+
self.context.okr_progress["deliverables_written"] = self._write_weekly_deliverables()
|
|
965
|
+
|
|
966
|
+
# Archive the week's context and clean up checkpoints
|
|
967
|
+
self.context.save(self.archive_dir)
|
|
968
|
+
self._cleanup_checkpoints()
|
|
969
|
+
|
|
970
|
+
# Self-improvement: extract recurring issues and update agent prompts
|
|
971
|
+
try:
|
|
972
|
+
from devrel_origin.tools.self_improve import run_self_improvement
|
|
973
|
+
except ImportError as exc:
|
|
974
|
+
logger.warning("Self-improvement module not available; skipping: %s", exc)
|
|
975
|
+
else:
|
|
976
|
+
try:
|
|
977
|
+
improve_report = run_self_improvement(
|
|
978
|
+
self.archive_dir,
|
|
979
|
+
Path(__file__).parent.parent / "optimize",
|
|
980
|
+
)
|
|
981
|
+
if improve_report.get("recurring_issues"):
|
|
982
|
+
logger.info(
|
|
983
|
+
"self_improvement_complete",
|
|
984
|
+
extra={"agents_updated": list(improve_report["recurring_issues"].keys())},
|
|
985
|
+
)
|
|
986
|
+
except Exception:
|
|
987
|
+
logger.exception("Self-improvement step raised; continuing weekly cycle")
|
|
988
|
+
|
|
989
|
+
# Stage 8: Publish to content calendar + send notifications
|
|
990
|
+
await self._publish_and_notify()
|
|
991
|
+
|
|
992
|
+
# Generate run report
|
|
993
|
+
run_report.completed_at = datetime.now().isoformat()
|
|
994
|
+
started = datetime.fromisoformat(run_report.started_at)
|
|
995
|
+
run_report.duration_seconds = (datetime.now() - started).total_seconds()
|
|
996
|
+
run_report.stages_completed = 8
|
|
997
|
+
|
|
998
|
+
if self.llm_client:
|
|
999
|
+
run_report.cost = self.llm_client.usage.to_dict()
|
|
1000
|
+
|
|
1001
|
+
# Quality data from Sentinel and revision traces
|
|
1002
|
+
okr = self.context.okr_progress
|
|
1003
|
+
quality: dict[str, Any] = {}
|
|
1004
|
+
brand_audit = okr.get("brand_audit", {})
|
|
1005
|
+
if brand_audit:
|
|
1006
|
+
quality["sentinel_score"] = brand_audit.get("overall_score")
|
|
1007
|
+
revision_traces: dict[str, Any] = {}
|
|
1008
|
+
kai = self.context.kai_content
|
|
1009
|
+
if isinstance(kai, dict) and "revision" in kai:
|
|
1010
|
+
revision_traces["kai"] = kai["revision"]
|
|
1011
|
+
if revision_traces:
|
|
1012
|
+
quality["revision_traces"] = revision_traces
|
|
1013
|
+
run_report.quality = quality
|
|
1014
|
+
|
|
1015
|
+
health = okr.get("pre_health", {})
|
|
1016
|
+
if health:
|
|
1017
|
+
run_report.health = health
|
|
1018
|
+
|
|
1019
|
+
run_report.save(self.archive_dir)
|
|
1020
|
+
|
|
1021
|
+
logger.info(
|
|
1022
|
+
"weekly_cycle_complete",
|
|
1023
|
+
extra={
|
|
1024
|
+
"week": self.context.week_of,
|
|
1025
|
+
"duration_seconds": run_report.duration_seconds,
|
|
1026
|
+
"cost_usd": run_report.cost.get("total_cost_usd", 0),
|
|
1027
|
+
"sentinel_score": quality.get("sentinel_score"),
|
|
1028
|
+
},
|
|
1029
|
+
)
|
|
1030
|
+
return self.context
|
|
1031
|
+
|
|
1032
|
+
async def _publish_and_notify(self) -> None:
|
|
1033
|
+
"""Publish content to calendar and send notifications.
|
|
1034
|
+
|
|
1035
|
+
Gracefully skips if notification/sheets services aren't configured.
|
|
1036
|
+
"""
|
|
1037
|
+
import os
|
|
1038
|
+
|
|
1039
|
+
ctx_dict = self.context.to_dict()
|
|
1040
|
+
|
|
1041
|
+
# Google Sheets content calendar
|
|
1042
|
+
sheets_id = os.environ.get("SHEETS_SPREADSHEET_ID", "")
|
|
1043
|
+
sheets_token = os.environ.get("SHEETS_ACCESS_TOKEN", "")
|
|
1044
|
+
if sheets_id:
|
|
1045
|
+
try:
|
|
1046
|
+
from devrel_origin.tools.sheets import ContentCalendar, SheetsConfig
|
|
1047
|
+
|
|
1048
|
+
cal = ContentCalendar(
|
|
1049
|
+
SheetsConfig(
|
|
1050
|
+
spreadsheet_id=sheets_id,
|
|
1051
|
+
access_token=sheets_token,
|
|
1052
|
+
)
|
|
1053
|
+
)
|
|
1054
|
+
added = await cal.publish_content(ctx_dict)
|
|
1055
|
+
logger.info(f"Published to sheets: {added}")
|
|
1056
|
+
await cal.close()
|
|
1057
|
+
except Exception as exc:
|
|
1058
|
+
logger.warning(f"Sheets publish failed: {exc}")
|
|
1059
|
+
|
|
1060
|
+
# Telegram + email notifications
|
|
1061
|
+
tg_token = os.environ.get("TELEGRAM_BOT_TOKEN", "")
|
|
1062
|
+
email_sender = os.environ.get("EMAIL_SENDER", "")
|
|
1063
|
+
if tg_token or email_sender:
|
|
1064
|
+
try:
|
|
1065
|
+
from devrel_origin.tools.notifications import (
|
|
1066
|
+
NotificationConfig,
|
|
1067
|
+
NotificationService,
|
|
1068
|
+
)
|
|
1069
|
+
|
|
1070
|
+
svc = NotificationService(
|
|
1071
|
+
NotificationConfig(
|
|
1072
|
+
telegram_bot_token=tg_token,
|
|
1073
|
+
telegram_chat_id=os.environ.get("TELEGRAM_CHAT_ID", ""),
|
|
1074
|
+
email_sender=email_sender,
|
|
1075
|
+
email_password=os.environ.get("EMAIL_PASSWORD", ""),
|
|
1076
|
+
email_recipients=(
|
|
1077
|
+
os.environ.get("EMAIL_RECIPIENTS", "").split(",")
|
|
1078
|
+
if os.environ.get("EMAIL_RECIPIENTS")
|
|
1079
|
+
else None
|
|
1080
|
+
),
|
|
1081
|
+
)
|
|
1082
|
+
)
|
|
1083
|
+
result = await svc.send_digest(ctx_dict, mode="weekly")
|
|
1084
|
+
logger.info(f"Notifications sent: {result}")
|
|
1085
|
+
await svc.close()
|
|
1086
|
+
except Exception as exc:
|
|
1087
|
+
logger.warning(f"Notifications failed: {exc}")
|
|
1088
|
+
|
|
1089
|
+
async def _run_instantly_sync(self) -> None:
|
|
1090
|
+
"""Pull Instantly analytics and triage email replies."""
|
|
1091
|
+
analytics_result = await self.delegate(
|
|
1092
|
+
"mox",
|
|
1093
|
+
"Pull campaign analytics from Instantly for all active campaigns.",
|
|
1094
|
+
)
|
|
1095
|
+
if analytics_result.success:
|
|
1096
|
+
self.context.instantly_analytics = analytics_result.output
|
|
1097
|
+
|
|
1098
|
+
triage_result = await self.delegate(
|
|
1099
|
+
"pax",
|
|
1100
|
+
"Fetch new email replies from Instantly, triage them, "
|
|
1101
|
+
"and draft follow-ups for interested leads.",
|
|
1102
|
+
)
|
|
1103
|
+
if triage_result.success:
|
|
1104
|
+
self.context.instantly_replies = triage_result.output
|
|
1105
|
+
|
|
1106
|
+
def _build_argus(self) -> Argus:
|
|
1107
|
+
"""Construct an Argus instance for the optional Stage 5b call.
|
|
1108
|
+
|
|
1109
|
+
Uses the project state DB (from project_paths) for persistence and WoW
|
|
1110
|
+
baselines when available; otherwise runs without persistence.
|
|
1111
|
+
"""
|
|
1112
|
+
from devrel_origin.tools.analytics import (
|
|
1113
|
+
GitHubCollector,
|
|
1114
|
+
InstantlyCollector,
|
|
1115
|
+
PostHogCollector,
|
|
1116
|
+
SocialCollector,
|
|
1117
|
+
)
|
|
1118
|
+
|
|
1119
|
+
state_db = (
|
|
1120
|
+
self.project_paths.state_db
|
|
1121
|
+
if (self.project_paths and self.project_paths.state_db.is_file())
|
|
1122
|
+
else None
|
|
1123
|
+
)
|
|
1124
|
+
social_db = state_db if state_db else Path("/dev/null")
|
|
1125
|
+
|
|
1126
|
+
return Argus(
|
|
1127
|
+
posthog_collector=PostHogCollector(self.api_client),
|
|
1128
|
+
github_collector=GitHubCollector(self.github_tools or self._dummy_github_client()),
|
|
1129
|
+
instantly_collector=InstantlyCollector(
|
|
1130
|
+
self.instantly_client or self._dummy_instantly_client()
|
|
1131
|
+
),
|
|
1132
|
+
social_collector=SocialCollector(social_db),
|
|
1133
|
+
llm_client=self.llm_client,
|
|
1134
|
+
state_db_path=state_db,
|
|
1135
|
+
)
|
|
1136
|
+
|
|
1137
|
+
@staticmethod
|
|
1138
|
+
def _dummy_github_client():
|
|
1139
|
+
class _Dummy:
|
|
1140
|
+
repo_full_name = "unknown/unknown"
|
|
1141
|
+
|
|
1142
|
+
async def get_repo_stats(self):
|
|
1143
|
+
raise RuntimeError("github client not configured")
|
|
1144
|
+
|
|
1145
|
+
return _Dummy()
|
|
1146
|
+
|
|
1147
|
+
@staticmethod
|
|
1148
|
+
def _dummy_instantly_client():
|
|
1149
|
+
class _Dummy:
|
|
1150
|
+
async def list_campaigns_with_analytics(self):
|
|
1151
|
+
raise RuntimeError("instantly client not configured")
|
|
1152
|
+
|
|
1153
|
+
return _Dummy()
|
|
1154
|
+
|
|
1155
|
+
def _build_cyra(self):
|
|
1156
|
+
"""Construct a Cyra instance for the optional Stage 5c call.
|
|
1157
|
+
|
|
1158
|
+
Requires a real project_paths with an existing state DB (for FK integrity).
|
|
1159
|
+
If no project_paths or DB, falls back to a temp path and callers should
|
|
1160
|
+
expect _insert_cro_report_row to return 0.
|
|
1161
|
+
"""
|
|
1162
|
+
from devrel_origin.core.cyra import Cyra
|
|
1163
|
+
|
|
1164
|
+
db_path = (
|
|
1165
|
+
self.project_paths.state_db
|
|
1166
|
+
if (self.project_paths and self.project_paths.state_db.is_file())
|
|
1167
|
+
else Path("/dev/null")
|
|
1168
|
+
)
|
|
1169
|
+
return Cyra(
|
|
1170
|
+
posthog_client=self.api_client,
|
|
1171
|
+
llm_client=self.llm_client,
|
|
1172
|
+
db_path=db_path,
|
|
1173
|
+
)
|
|
1174
|
+
|
|
1175
|
+
@staticmethod
|
|
1176
|
+
def _insert_cro_report_row(db_path: Path | None, period_end: str) -> int:
|
|
1177
|
+
"""Get-or-insert an analytics_reports row and return its rowid for Cyra FK.
|
|
1178
|
+
|
|
1179
|
+
Reuses an existing row for the same period_end when one is already there
|
|
1180
|
+
(typically Argus's Stage 5b row from earlier in the same weekly cycle).
|
|
1181
|
+
This keeps the per-period row count at one regardless of how many
|
|
1182
|
+
pillars (Argus, Cyra, future Vega/Selene) ran. Returns 0 when no real
|
|
1183
|
+
DB is available (dev/null or None), which means Cyra's persist step
|
|
1184
|
+
will silently skip FK-linked inserts on missing DB.
|
|
1185
|
+
"""
|
|
1186
|
+
if db_path is None or not db_path.is_file():
|
|
1187
|
+
return 0
|
|
1188
|
+
import sqlite3
|
|
1189
|
+
|
|
1190
|
+
with sqlite3.connect(db_path) as conn:
|
|
1191
|
+
existing = conn.execute(
|
|
1192
|
+
"SELECT id FROM analytics_reports WHERE period_end = ? ORDER BY id DESC LIMIT 1",
|
|
1193
|
+
(period_end,),
|
|
1194
|
+
).fetchone()
|
|
1195
|
+
if existing:
|
|
1196
|
+
return existing[0]
|
|
1197
|
+
period_start = period_end # single-day placeholder; CLI uses a proper range
|
|
1198
|
+
cur = conn.execute(
|
|
1199
|
+
"INSERT INTO analytics_reports (period_start, period_end, report_json) "
|
|
1200
|
+
"VALUES (?, ?, ?)",
|
|
1201
|
+
(period_start, period_end, "{}"),
|
|
1202
|
+
)
|
|
1203
|
+
conn.commit()
|
|
1204
|
+
return cur.lastrowid or 0
|
|
1205
|
+
|
|
1206
|
+
def _extract_iris_themes(self) -> list[str]:
|
|
1207
|
+
"""Extract top-5 theme titles from Iris output for Cyra hypothesis context."""
|
|
1208
|
+
themes_data = self.context.iris_themes or {}
|
|
1209
|
+
if isinstance(themes_data, dict):
|
|
1210
|
+
return [t.get("title", "") for t in themes_data.get("themes", [])][:5]
|
|
1211
|
+
return []
|
|
1212
|
+
|
|
1213
|
+
def _extract_sage_friction(self) -> list[str]:
|
|
1214
|
+
"""Extract high/critical friction signals from Sage triage for Cyra hypothesis context."""
|
|
1215
|
+
triage_data = self.context.sage_triage or {}
|
|
1216
|
+
if isinstance(triage_data, dict):
|
|
1217
|
+
return [
|
|
1218
|
+
f"{i.get('title', '')}: {i.get('summary', '')}"
|
|
1219
|
+
for i in triage_data.get("issues", [])
|
|
1220
|
+
if i.get("priority") in {"high", "critical"}
|
|
1221
|
+
][:5]
|
|
1222
|
+
return []
|
|
1223
|
+
|
|
1224
|
+
def _compile_okrs(self) -> dict[str, Any]:
|
|
1225
|
+
"""Compile weekly OKR progress from all agent outputs."""
|
|
1226
|
+
kai = self.context.kai_content if isinstance(self.context.kai_content, dict) else {}
|
|
1227
|
+
return {
|
|
1228
|
+
"week": self.context.week_of,
|
|
1229
|
+
"content_produced": kai.get("status") == "generated" and bool(kai.get("content")),
|
|
1230
|
+
"issues_triaged": len(self.context.sage_triage.get("issues", [])),
|
|
1231
|
+
"social_mentions_found": self.context.echo_social.get("total_mentions", 0),
|
|
1232
|
+
"themes_identified": len(self.context.iris_themes.get("themes", [])),
|
|
1233
|
+
"experiments_designed": len(self.context.nova_experiments.get("experiments", [])),
|
|
1234
|
+
"video_produced": bool(self.context.vox_video),
|
|
1235
|
+
"docs_generated": bool(self.context.dex_docs),
|
|
1236
|
+
"competitors_analyzed": len(
|
|
1237
|
+
self.context.rex_competitive.get("competitors_discovered", [])
|
|
1238
|
+
),
|
|
1239
|
+
"emails_sent": self.context.instantly_analytics.get("total_sent", 0),
|
|
1240
|
+
"emails_opened": self.context.instantly_analytics.get("total_opened", 0),
|
|
1241
|
+
"emails_replied": self.context.instantly_analytics.get("total_replied", 0),
|
|
1242
|
+
"reply_rate": self.context.instantly_analytics.get("avg_reply_rate", 0),
|
|
1243
|
+
"followups_pending": len(self.context.instantly_replies.get("drafts", [])),
|
|
1244
|
+
"status": "complete",
|
|
1245
|
+
}
|
|
1246
|
+
|
|
1247
|
+
async def run_single_task(self, agent_name: str, task: str) -> DelegationResult:
|
|
1248
|
+
"""Run a single task on a specific agent (for ad-hoc requests)."""
|
|
1249
|
+
return await self.delegate(agent_name, task)
|
|
1250
|
+
|
|
1251
|
+
|
|
1252
|
+
async def process_draft(draft: dict, instantly_client: InstantlyClient) -> str:
|
|
1253
|
+
"""Process a single follow-up draft interactively.
|
|
1254
|
+
|
|
1255
|
+
Returns: 'approved', 'edited', 'skipped', or 'rejected'
|
|
1256
|
+
"""
|
|
1257
|
+
print(f"\n{'=' * 60}")
|
|
1258
|
+
print(f"Category: {draft.get('category', 'unknown')}")
|
|
1259
|
+
print(f"To: {draft.get('lead_email', 'unknown')}")
|
|
1260
|
+
print(f"Subject: {draft.get('draft_subject', '')}")
|
|
1261
|
+
print(f"\n{draft.get('draft_body', '')}")
|
|
1262
|
+
print(f"{'=' * 60}")
|
|
1263
|
+
|
|
1264
|
+
choice = input("[a]pprove / [e]dit / [s]kip / [r]eject: ").strip().lower()
|
|
1265
|
+
|
|
1266
|
+
if choice == "a":
|
|
1267
|
+
await instantly_client.reply_to_email(
|
|
1268
|
+
email_id=draft["email_id"],
|
|
1269
|
+
campaign_id=draft.get("campaign_id", ""),
|
|
1270
|
+
body=draft["draft_body"],
|
|
1271
|
+
thread_id=draft.get("thread_id"),
|
|
1272
|
+
)
|
|
1273
|
+
draft["status"] = "sent"
|
|
1274
|
+
return "approved"
|
|
1275
|
+
elif choice == "e":
|
|
1276
|
+
import tempfile
|
|
1277
|
+
|
|
1278
|
+
with tempfile.NamedTemporaryFile(mode="w", suffix=".txt", delete=False) as f:
|
|
1279
|
+
f.write(draft["draft_body"])
|
|
1280
|
+
tmp_path = f.name
|
|
1281
|
+
editor = os.environ.get("EDITOR", "vi")
|
|
1282
|
+
editor_path = shutil.which(editor)
|
|
1283
|
+
if editor_path is None:
|
|
1284
|
+
logger.warning("EDITOR=%s not found on PATH; skipping interactive edit", editor)
|
|
1285
|
+
edited_body = draft["draft_body"]
|
|
1286
|
+
else:
|
|
1287
|
+
subprocess.run([editor_path, str(tmp_path)], check=False)
|
|
1288
|
+
with open(tmp_path) as f:
|
|
1289
|
+
edited_body = f.read()
|
|
1290
|
+
os.unlink(tmp_path)
|
|
1291
|
+
await instantly_client.reply_to_email(
|
|
1292
|
+
email_id=draft["email_id"],
|
|
1293
|
+
campaign_id=draft.get("campaign_id", ""),
|
|
1294
|
+
body=edited_body,
|
|
1295
|
+
thread_id=draft.get("thread_id"),
|
|
1296
|
+
)
|
|
1297
|
+
draft["status"] = "sent"
|
|
1298
|
+
return "edited"
|
|
1299
|
+
elif choice == "r":
|
|
1300
|
+
draft["status"] = "rejected"
|
|
1301
|
+
return "rejected"
|
|
1302
|
+
else:
|
|
1303
|
+
return "skipped"
|
|
1304
|
+
|
|
1305
|
+
|
|
1306
|
+
def _build_apollo_client(api_key: Optional[str]) -> Optional["ApolloClient"]:
|
|
1307
|
+
"""Instantiate ApolloClient when the API key is available, else return None."""
|
|
1308
|
+
if not api_key:
|
|
1309
|
+
return None
|
|
1310
|
+
from devrel_origin.tools.apollo_client import ApolloClient # noqa: PLC0415
|
|
1311
|
+
|
|
1312
|
+
return ApolloClient(api_key=api_key)
|
|
1313
|
+
|
|
1314
|
+
|
|
1315
|
+
async def _run_review_replies(instantly_client: Optional[InstantlyClient]) -> None:
|
|
1316
|
+
"""Handle the --review-replies CLI mode."""
|
|
1317
|
+
archive_dir = Path("context_archive")
|
|
1318
|
+
ctx = SharedContext.load(archive_dir)
|
|
1319
|
+
drafts = ctx.instantly_replies.get("drafts", [])
|
|
1320
|
+
pending = [d for d in drafts if d.get("status") == "pending_approval"]
|
|
1321
|
+
|
|
1322
|
+
if not pending:
|
|
1323
|
+
print("No pending follow-up drafts to review.")
|
|
1324
|
+
return
|
|
1325
|
+
|
|
1326
|
+
if not instantly_client:
|
|
1327
|
+
print("Error: INSTANTLY_API_KEY not set. Cannot send replies.")
|
|
1328
|
+
return
|
|
1329
|
+
|
|
1330
|
+
print(f"\n{len(pending)} pending follow-up(s) to review:\n")
|
|
1331
|
+
stats: dict[str, int] = {"approved": 0, "edited": 0, "skipped": 0, "rejected": 0}
|
|
1332
|
+
|
|
1333
|
+
for draft in pending:
|
|
1334
|
+
result = await process_draft(draft, instantly_client)
|
|
1335
|
+
stats[result] = stats.get(result, 0) + 1
|
|
1336
|
+
|
|
1337
|
+
print(
|
|
1338
|
+
f"\nDone! {stats['approved']} approved, "
|
|
1339
|
+
f"{stats['edited']} edited, "
|
|
1340
|
+
f"{stats['skipped']} skipped, "
|
|
1341
|
+
f"{stats['rejected']} rejected"
|
|
1342
|
+
)
|
|
1343
|
+
|
|
1344
|
+
|
|
1345
|
+
async def main():
|
|
1346
|
+
"""CLI entry point for running the orchestrator."""
|
|
1347
|
+
import argparse
|
|
1348
|
+
import os
|
|
1349
|
+
|
|
1350
|
+
from dotenv import load_dotenv
|
|
1351
|
+
|
|
1352
|
+
load_dotenv()
|
|
1353
|
+
|
|
1354
|
+
parser = argparse.ArgumentParser(description="Atlas Orchestrator Agent")
|
|
1355
|
+
parser.add_argument(
|
|
1356
|
+
"--weekly-cycle",
|
|
1357
|
+
action="store_true",
|
|
1358
|
+
help="Run the full weekly orchestration cycle",
|
|
1359
|
+
)
|
|
1360
|
+
parser.add_argument("--agent", type=str, help="Target agent for single task")
|
|
1361
|
+
parser.add_argument("--task", type=str, help="Task description")
|
|
1362
|
+
parser.add_argument(
|
|
1363
|
+
"--review-replies",
|
|
1364
|
+
action="store_true",
|
|
1365
|
+
help="Review and approve pending follow-up email drafts",
|
|
1366
|
+
)
|
|
1367
|
+
parser.add_argument(
|
|
1368
|
+
"--config",
|
|
1369
|
+
type=str,
|
|
1370
|
+
default="config/agent_config.yaml",
|
|
1371
|
+
help="Path to agent config YAML",
|
|
1372
|
+
)
|
|
1373
|
+
args = parser.parse_args()
|
|
1374
|
+
|
|
1375
|
+
config = load_config(Path(args.config))
|
|
1376
|
+
|
|
1377
|
+
client = PostHogClient(
|
|
1378
|
+
api_key=os.environ.get("POSTHOG_API_KEY", ""),
|
|
1379
|
+
project_id=os.environ.get("POSTHOG_PROJECT_ID", ""),
|
|
1380
|
+
)
|
|
1381
|
+
kb_path = Path(__file__).parent.parent / "knowledge_base"
|
|
1382
|
+
|
|
1383
|
+
llm_client = (
|
|
1384
|
+
LLMClient(
|
|
1385
|
+
api_key=os.environ.get("ANTHROPIC_API_KEY", ""),
|
|
1386
|
+
budget_limit_usd=config.budget_limit_usd,
|
|
1387
|
+
)
|
|
1388
|
+
if os.environ.get("ANTHROPIC_API_KEY")
|
|
1389
|
+
else None
|
|
1390
|
+
)
|
|
1391
|
+
|
|
1392
|
+
github_tools = (
|
|
1393
|
+
GitHubTools(
|
|
1394
|
+
token=os.environ.get("GITHUB_TOKEN", ""),
|
|
1395
|
+
)
|
|
1396
|
+
if os.environ.get("GITHUB_TOKEN")
|
|
1397
|
+
else None
|
|
1398
|
+
)
|
|
1399
|
+
|
|
1400
|
+
search = SearchTools(
|
|
1401
|
+
firecrawl_api_key=os.environ.get("FIRECRAWL_API_KEY", ""),
|
|
1402
|
+
brave_api_key=os.environ.get("BRAVE_API_KEY", ""),
|
|
1403
|
+
)
|
|
1404
|
+
|
|
1405
|
+
instantly_client = (
|
|
1406
|
+
InstantlyClient(api_key=os.environ.get("INSTANTLY_API_KEY", ""))
|
|
1407
|
+
if os.environ.get("INSTANTLY_API_KEY")
|
|
1408
|
+
else None
|
|
1409
|
+
)
|
|
1410
|
+
|
|
1411
|
+
apollo_client = _build_apollo_client(os.environ.get("APOLLO_API_KEY"))
|
|
1412
|
+
|
|
1413
|
+
atlas = Atlas(
|
|
1414
|
+
api_client=client,
|
|
1415
|
+
knowledge_base_path=kb_path,
|
|
1416
|
+
llm_client=llm_client,
|
|
1417
|
+
github_tools=github_tools,
|
|
1418
|
+
search_tools=search,
|
|
1419
|
+
config=config,
|
|
1420
|
+
instantly_client=instantly_client,
|
|
1421
|
+
apollo_client=apollo_client,
|
|
1422
|
+
)
|
|
1423
|
+
|
|
1424
|
+
try:
|
|
1425
|
+
if args.review_replies:
|
|
1426
|
+
await _run_review_replies(instantly_client)
|
|
1427
|
+
return
|
|
1428
|
+
elif args.weekly_cycle:
|
|
1429
|
+
context = await atlas.run_weekly_cycle()
|
|
1430
|
+
print(json.dumps(context.to_dict(), indent=2, default=str))
|
|
1431
|
+
elif args.agent and args.task:
|
|
1432
|
+
result = await atlas.run_single_task(args.agent, args.task)
|
|
1433
|
+
print(json.dumps(result.__dict__, indent=2, default=str))
|
|
1434
|
+
else:
|
|
1435
|
+
parser.print_help()
|
|
1436
|
+
finally:
|
|
1437
|
+
if llm_client:
|
|
1438
|
+
await llm_client.close()
|
|
1439
|
+
if github_tools:
|
|
1440
|
+
await github_tools.close()
|
|
1441
|
+
if apollo_client:
|
|
1442
|
+
await apollo_client.close()
|
|
1443
|
+
if instantly_client:
|
|
1444
|
+
await instantly_client.close()
|
|
1445
|
+
await search.close()
|
|
1446
|
+
await client.close()
|
|
1447
|
+
|
|
1448
|
+
|
|
1449
|
+
if __name__ == "__main__":
|
|
1450
|
+
asyncio.run(main())
|