devrel-swarm 0.2.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- devrel_swarm/__init__.py +8 -0
- devrel_swarm/cli/__init__.py +82 -0
- devrel_swarm/cli/_common.py +62 -0
- devrel_swarm/cli/analytics.py +497 -0
- devrel_swarm/cli/config.py +108 -0
- devrel_swarm/cli/content.py +164 -0
- devrel_swarm/cli/cost.py +108 -0
- devrel_swarm/cli/deliverables.py +65 -0
- devrel_swarm/cli/docs.py +34 -0
- devrel_swarm/cli/doctor.py +167 -0
- devrel_swarm/cli/experiment.py +29 -0
- devrel_swarm/cli/init.py +72 -0
- devrel_swarm/cli/intel.py +27 -0
- devrel_swarm/cli/kb.py +96 -0
- devrel_swarm/cli/listen.py +31 -0
- devrel_swarm/cli/marketing.py +66 -0
- devrel_swarm/cli/run.py +46 -0
- devrel_swarm/cli/sales.py +57 -0
- devrel_swarm/cli/schedule.py +62 -0
- devrel_swarm/cli/synthesize.py +28 -0
- devrel_swarm/cli/triage.py +29 -0
- devrel_swarm/cli/video.py +35 -0
- devrel_swarm/core/__init__.py +46 -0
- devrel_swarm/core/agent_config.py +67 -0
- devrel_swarm/core/argus.py +961 -0
- devrel_swarm/core/atlas.py +1108 -0
- devrel_swarm/core/base.py +372 -0
- devrel_swarm/core/dex.py +708 -0
- devrel_swarm/core/echo.py +614 -0
- devrel_swarm/core/iris.py +513 -0
- devrel_swarm/core/kai.py +402 -0
- devrel_swarm/core/llm.py +437 -0
- devrel_swarm/core/mox.py +514 -0
- devrel_swarm/core/nova.py +349 -0
- devrel_swarm/core/pax.py +1205 -0
- devrel_swarm/core/rex.py +532 -0
- devrel_swarm/core/sage.py +486 -0
- devrel_swarm/core/sentinel.py +362 -0
- devrel_swarm/core/types.py +98 -0
- devrel_swarm/core/video/__init__.py +22 -0
- devrel_swarm/core/video/assembler.py +131 -0
- devrel_swarm/core/video/browser_recorder.py +118 -0
- devrel_swarm/core/video/desktop_recorder.py +254 -0
- devrel_swarm/core/video/overlay_renderer.py +143 -0
- devrel_swarm/core/video/script_parser.py +147 -0
- devrel_swarm/core/video/tts_engine.py +82 -0
- devrel_swarm/core/vox.py +268 -0
- devrel_swarm/core/watchdog.py +321 -0
- devrel_swarm/project/__init__.py +1 -0
- devrel_swarm/project/config.py +75 -0
- devrel_swarm/project/cost_sink.py +61 -0
- devrel_swarm/project/init.py +104 -0
- devrel_swarm/project/paths.py +75 -0
- devrel_swarm/project/state.py +142 -0
- devrel_swarm/project/templates/__init__.py +4 -0
- devrel_swarm/project/templates/config.toml +24 -0
- devrel_swarm/project/templates/devrel.gitignore +10 -0
- devrel_swarm/project/templates/slop-blocklist.md +45 -0
- devrel_swarm/project/templates/style.md +24 -0
- devrel_swarm/project/templates/voice.md +29 -0
- devrel_swarm/quality/__init__.py +66 -0
- devrel_swarm/quality/editorial.py +357 -0
- devrel_swarm/quality/persona.py +84 -0
- devrel_swarm/quality/readability.py +148 -0
- devrel_swarm/quality/slop.py +119 -0
- devrel_swarm/quality/style.py +110 -0
- devrel_swarm/quality/voice.py +15 -0
- devrel_swarm/tools/__init__.py +9 -0
- devrel_swarm/tools/analytics.py +304 -0
- devrel_swarm/tools/api_client.py +357 -0
- devrel_swarm/tools/apollo_client.py +305 -0
- devrel_swarm/tools/code_validator.py +328 -0
- devrel_swarm/tools/github_tools.py +292 -0
- devrel_swarm/tools/instantly_client.py +412 -0
- devrel_swarm/tools/kb_harvester.py +340 -0
- devrel_swarm/tools/mcp_server.py +578 -0
- devrel_swarm/tools/notifications.py +245 -0
- devrel_swarm/tools/run_report.py +193 -0
- devrel_swarm/tools/scheduler.py +231 -0
- devrel_swarm/tools/search_tools.py +321 -0
- devrel_swarm/tools/self_improve.py +168 -0
- devrel_swarm/tools/sheets.py +236 -0
- devrel_swarm-0.2.4.dist-info/METADATA +308 -0
- devrel_swarm-0.2.4.dist-info/RECORD +88 -0
- devrel_swarm-0.2.4.dist-info/WHEEL +5 -0
- devrel_swarm-0.2.4.dist-info/entry_points.txt +2 -0
- devrel_swarm-0.2.4.dist-info/licenses/LICENSE +21 -0
- devrel_swarm-0.2.4.dist-info/top_level.txt +1 -0
devrel_swarm/__init__.py
ADDED
|
@@ -0,0 +1,82 @@
|
|
|
1
|
+
"""Typer CLI app for devrel-swarm.
|
|
2
|
+
|
|
3
|
+
Phase 2 registers `init` and `doctor`. Later phases register additional
|
|
4
|
+
verb groups (run, content, sales, marketing, etc.).
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
import typer
|
|
10
|
+
|
|
11
|
+
from devrel_swarm import __version__
|
|
12
|
+
from devrel_swarm.cli.analytics import analytics_app
|
|
13
|
+
from devrel_swarm.cli.config import config_app
|
|
14
|
+
from devrel_swarm.cli.content import content_app
|
|
15
|
+
from devrel_swarm.cli.cost import cost_command
|
|
16
|
+
from devrel_swarm.cli.deliverables import deliverables_app
|
|
17
|
+
from devrel_swarm.cli.docs import docs_app
|
|
18
|
+
from devrel_swarm.cli.doctor import doctor_command
|
|
19
|
+
from devrel_swarm.cli.experiment import experiment_command
|
|
20
|
+
from devrel_swarm.cli.init import init_command
|
|
21
|
+
from devrel_swarm.cli.intel import intel_command
|
|
22
|
+
from devrel_swarm.cli.kb import kb_app
|
|
23
|
+
from devrel_swarm.cli.listen import listen_command
|
|
24
|
+
from devrel_swarm.cli.marketing import marketing_app
|
|
25
|
+
from devrel_swarm.cli.run import run_command
|
|
26
|
+
from devrel_swarm.cli.sales import sales_app
|
|
27
|
+
from devrel_swarm.cli.schedule import schedule_app
|
|
28
|
+
from devrel_swarm.cli.synthesize import synthesize_command
|
|
29
|
+
from devrel_swarm.cli.triage import triage_command
|
|
30
|
+
from devrel_swarm.cli.video import video_app
|
|
31
|
+
|
|
32
|
+
app = typer.Typer(
|
|
33
|
+
name="devrel",
|
|
34
|
+
help="DevRel + Sales + Marketing agent system. Run from inside a project repo.",
|
|
35
|
+
no_args_is_help=True,
|
|
36
|
+
add_completion=False,
|
|
37
|
+
)
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def _version_callback(value: bool) -> None:
|
|
41
|
+
if value:
|
|
42
|
+
typer.echo(f"devrel-swarm {__version__}")
|
|
43
|
+
raise typer.Exit()
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
@app.callback()
|
|
47
|
+
def main(
|
|
48
|
+
version: bool = typer.Option(
|
|
49
|
+
False,
|
|
50
|
+
"--version",
|
|
51
|
+
callback=_version_callback,
|
|
52
|
+
is_eager=True,
|
|
53
|
+
help="Show version and exit.",
|
|
54
|
+
),
|
|
55
|
+
) -> None:
|
|
56
|
+
"""Root callback. Subcommands are registered below."""
|
|
57
|
+
return None
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
app.command(name="init")(init_command)
|
|
61
|
+
app.command(name="doctor")(doctor_command)
|
|
62
|
+
app.add_typer(content_app, name="content")
|
|
63
|
+
app.command(name="run")(run_command)
|
|
64
|
+
app.command(name="triage")(triage_command)
|
|
65
|
+
app.command(name="listen")(listen_command)
|
|
66
|
+
app.command(name="synthesize")(synthesize_command)
|
|
67
|
+
app.command(name="experiment")(experiment_command)
|
|
68
|
+
app.command(name="intel")(intel_command)
|
|
69
|
+
app.add_typer(sales_app, name="sales")
|
|
70
|
+
app.add_typer(marketing_app, name="marketing")
|
|
71
|
+
app.add_typer(kb_app, name="kb")
|
|
72
|
+
app.add_typer(schedule_app, name="schedule")
|
|
73
|
+
app.command(name="cost")(cost_command)
|
|
74
|
+
app.add_typer(deliverables_app, name="deliverables")
|
|
75
|
+
app.add_typer(config_app, name="config")
|
|
76
|
+
app.add_typer(docs_app, name="docs")
|
|
77
|
+
app.add_typer(video_app, name="video")
|
|
78
|
+
app.add_typer(analytics_app, name="analytics")
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
if __name__ == "__main__":
|
|
82
|
+
app()
|
|
@@ -0,0 +1,62 @@
|
|
|
1
|
+
"""Shared CLI helpers."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import json
|
|
6
|
+
import os
|
|
7
|
+
|
|
8
|
+
import typer
|
|
9
|
+
from rich.console import Console
|
|
10
|
+
|
|
11
|
+
from devrel_swarm.core.atlas import Atlas, DelegationResult
|
|
12
|
+
from devrel_swarm.core.llm import LLMClient
|
|
13
|
+
from devrel_swarm.project.paths import ProjectNotFoundError, ProjectPaths, find_devrel_root
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def find_paths_or_exit(console: Console) -> ProjectPaths:
|
|
17
|
+
try:
|
|
18
|
+
return ProjectPaths.from_root(find_devrel_root())
|
|
19
|
+
except ProjectNotFoundError as e:
|
|
20
|
+
console.print(f"[red]{e}[/red]")
|
|
21
|
+
raise typer.Exit(code=1) from None
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def build_atlas_or_exit(paths: ProjectPaths, console: Console) -> Atlas:
|
|
25
|
+
api_key = os.environ.get("ANTHROPIC_API_KEY")
|
|
26
|
+
if not api_key:
|
|
27
|
+
console.print("[red]ANTHROPIC_API_KEY is required.[/red]")
|
|
28
|
+
raise typer.Exit(code=1)
|
|
29
|
+
llm = LLMClient(api_key=api_key)
|
|
30
|
+
try:
|
|
31
|
+
return Atlas(llm_client=llm, project_paths=paths)
|
|
32
|
+
except TypeError:
|
|
33
|
+
# Atlas may not yet accept project_paths kwarg.
|
|
34
|
+
return Atlas(llm_client=llm)
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
def render_result(result: DelegationResult, console: Console, *, json_output: bool = False) -> None:
|
|
38
|
+
if json_output:
|
|
39
|
+
# DelegationResult is a dataclass; convert via dict()/asdict.
|
|
40
|
+
from dataclasses import asdict
|
|
41
|
+
|
|
42
|
+
try:
|
|
43
|
+
payload = asdict(result)
|
|
44
|
+
except TypeError:
|
|
45
|
+
payload = {
|
|
46
|
+
"agent": getattr(result, "agent", "?"),
|
|
47
|
+
"task": getattr(result, "task", "?"),
|
|
48
|
+
"success": getattr(result, "success", False),
|
|
49
|
+
"result": getattr(result, "result", None),
|
|
50
|
+
"error": getattr(result, "error", None),
|
|
51
|
+
}
|
|
52
|
+
typer.echo(json.dumps(payload, default=str, indent=2))
|
|
53
|
+
return
|
|
54
|
+
if not result.success:
|
|
55
|
+
console.print(f"[red]✗[/red] {result.agent} failed: {result.error}")
|
|
56
|
+
raise typer.Exit(code=1)
|
|
57
|
+
console.print(f"[green]✓[/green] {result.agent} completed")
|
|
58
|
+
if isinstance(result.result, dict):
|
|
59
|
+
for k, v in list(result.result.items())[:8]:
|
|
60
|
+
console.print(f" [dim]{k}:[/dim] {str(v)[:120]}")
|
|
61
|
+
elif result.result:
|
|
62
|
+
console.print(f" {str(result.result)[:300]}")
|
|
@@ -0,0 +1,497 @@
|
|
|
1
|
+
"""`devrel analytics report` — Argus performance report.
|
|
2
|
+
|
|
3
|
+
Pulls the last N days of metrics from PostHog, GitHub, Instantly, and
|
|
4
|
+
Echo's social_mentions; ranks deterministically; emits structured
|
|
5
|
+
recommendations via a single Sonnet call.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from __future__ import annotations
|
|
9
|
+
|
|
10
|
+
import asyncio
|
|
11
|
+
import json
|
|
12
|
+
import re
|
|
13
|
+
import sys
|
|
14
|
+
from datetime import datetime, timedelta, timezone
|
|
15
|
+
from pathlib import Path
|
|
16
|
+
|
|
17
|
+
import typer
|
|
18
|
+
from rich.console import Console
|
|
19
|
+
|
|
20
|
+
from devrel_swarm.cli._common import find_paths_or_exit
|
|
21
|
+
from devrel_swarm.core.argus import (
|
|
22
|
+
Argus,
|
|
23
|
+
PerformanceReport,
|
|
24
|
+
compute_calibration,
|
|
25
|
+
write_recommendation_briefs,
|
|
26
|
+
)
|
|
27
|
+
|
|
28
|
+
console = Console()
|
|
29
|
+
err_console = Console(stderr=True)
|
|
30
|
+
|
|
31
|
+
analytics_app = typer.Typer(
|
|
32
|
+
name="analytics",
|
|
33
|
+
help="Content performance analysis (Argus).",
|
|
34
|
+
no_args_is_help=True,
|
|
35
|
+
)
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
_SINCE_RE = re.compile(r"^(\d+)([dwmy])$")
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
def _parse_since(since: str) -> timedelta:
|
|
42
|
+
"""Accept '7d' / '30d' / '12w' / '3m' / '1y'."""
|
|
43
|
+
m = _SINCE_RE.match(since.strip())
|
|
44
|
+
if not m:
|
|
45
|
+
raise typer.BadParameter(f"--since must look like '7d', '30d', '12w': got {since!r}")
|
|
46
|
+
n, unit = int(m.group(1)), m.group(2)
|
|
47
|
+
days = {"d": 1, "w": 7, "m": 30, "y": 365}[unit]
|
|
48
|
+
return timedelta(days=n * days)
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
def _build_argus(state_db_path: Path) -> Argus:
|
|
52
|
+
"""Construct Argus with real collectors. Patched in unit tests."""
|
|
53
|
+
import os
|
|
54
|
+
|
|
55
|
+
from devrel_swarm.core.llm import LLMClient
|
|
56
|
+
from devrel_swarm.tools.analytics import (
|
|
57
|
+
GitHubCollector,
|
|
58
|
+
InstantlyCollector,
|
|
59
|
+
PostHogCollector,
|
|
60
|
+
SocialCollector,
|
|
61
|
+
)
|
|
62
|
+
from devrel_swarm.tools.api_client import PostHogClient
|
|
63
|
+
from devrel_swarm.tools.github_tools import GitHubTools
|
|
64
|
+
from devrel_swarm.tools.instantly_client import InstantlyClient
|
|
65
|
+
|
|
66
|
+
posthog_client = PostHogClient(
|
|
67
|
+
api_key=os.environ.get("POSTHOG_API_KEY", ""),
|
|
68
|
+
project_id=os.environ.get("POSTHOG_PROJECT_ID", ""),
|
|
69
|
+
)
|
|
70
|
+
github_client = GitHubTools(token=os.environ.get("GITHUB_TOKEN", ""))
|
|
71
|
+
instantly_client = InstantlyClient(
|
|
72
|
+
api_key=os.environ.get("INSTANTLY_API_KEY", ""),
|
|
73
|
+
)
|
|
74
|
+
|
|
75
|
+
llm = LLMClient(api_key=os.environ.get("ANTHROPIC_API_KEY", ""))
|
|
76
|
+
llm.set_agent("argus")
|
|
77
|
+
if state_db_path.is_file():
|
|
78
|
+
from devrel_swarm.project.cost_sink import make_sqlite_sink
|
|
79
|
+
|
|
80
|
+
llm.set_cost_sink(make_sqlite_sink(state_db_path))
|
|
81
|
+
|
|
82
|
+
return Argus(
|
|
83
|
+
posthog_collector=PostHogCollector(posthog_client),
|
|
84
|
+
github_collector=GitHubCollector(github_client),
|
|
85
|
+
instantly_collector=InstantlyCollector(instantly_client),
|
|
86
|
+
social_collector=SocialCollector(state_db_path),
|
|
87
|
+
llm_client=llm,
|
|
88
|
+
state_db_path=state_db_path,
|
|
89
|
+
)
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
def _write_markdown_deliverable(
|
|
93
|
+
report: PerformanceReport,
|
|
94
|
+
deliverables_dir: Path,
|
|
95
|
+
) -> Path:
|
|
96
|
+
deliverables_dir.mkdir(parents=True, exist_ok=True)
|
|
97
|
+
out = deliverables_dir / f"analytics-{report.period_end.date().isoformat()}.md"
|
|
98
|
+
out.write_text(report.to_markdown(), encoding="utf-8")
|
|
99
|
+
return out
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
@analytics_app.command("summary")
|
|
103
|
+
def summary_command(
|
|
104
|
+
root: str = typer.Option(
|
|
105
|
+
"~", "--root", help="Root to scan for .devrel/ directories. Default: $HOME."
|
|
106
|
+
),
|
|
107
|
+
format_: str = typer.Option("md", "--format", help="md or json."),
|
|
108
|
+
max_depth: int = typer.Option(
|
|
109
|
+
4, "--max-depth", help="Max directory depth to descend (avoids slow $HOME walks)."
|
|
110
|
+
),
|
|
111
|
+
) -> None:
|
|
112
|
+
"""Aggregate Argus reports across every .devrel/ project under a root.
|
|
113
|
+
|
|
114
|
+
Walks the filesystem looking for ``.devrel/state.db`` files (capped at
|
|
115
|
+
--max-depth) and reports total spend, total recommendations, and the
|
|
116
|
+
most recent report per project.
|
|
117
|
+
"""
|
|
118
|
+
root_path = Path(root).expanduser().resolve()
|
|
119
|
+
if not root_path.is_dir():
|
|
120
|
+
console.print(f"[red]{root_path} is not a directory.[/red]")
|
|
121
|
+
raise typer.Exit(code=1)
|
|
122
|
+
|
|
123
|
+
projects: list[dict] = []
|
|
124
|
+
for state_db in _walk_for_state_dbs(root_path, max_depth):
|
|
125
|
+
info = _summarize_project_db(state_db)
|
|
126
|
+
if info:
|
|
127
|
+
projects.append(info)
|
|
128
|
+
|
|
129
|
+
if format_ == "json":
|
|
130
|
+
sys.stdout.write(json.dumps(projects, indent=2, default=str))
|
|
131
|
+
sys.stdout.write("\n")
|
|
132
|
+
return
|
|
133
|
+
|
|
134
|
+
if format_ != "md":
|
|
135
|
+
raise typer.BadParameter("--format must be 'md' or 'json'")
|
|
136
|
+
|
|
137
|
+
lines = [f"# Argus cross-project summary — {len(projects)} projects under {root_path}", ""]
|
|
138
|
+
if not projects:
|
|
139
|
+
lines.append("_No .devrel/state.db files found._")
|
|
140
|
+
sys.stdout.write("\n".join(lines) + "\n")
|
|
141
|
+
return
|
|
142
|
+
lines.append("| project | last_report | total_recs | total_metrics | spend_usd |")
|
|
143
|
+
lines.append("|---|---|---|---|---|")
|
|
144
|
+
for p in sorted(projects, key=lambda x: x["last_report"] or "", reverse=True):
|
|
145
|
+
lines.append(
|
|
146
|
+
f"| {p['project']} | {(p['last_report'] or '—')[:10]} | "
|
|
147
|
+
f"{p['total_recs']} | {p['total_metrics']} | ${p['spend_usd']:.2f} |"
|
|
148
|
+
)
|
|
149
|
+
sys.stdout.write("\n".join(lines) + "\n")
|
|
150
|
+
|
|
151
|
+
|
|
152
|
+
def _walk_for_state_dbs(root: Path, max_depth: int):
|
|
153
|
+
"""Yield every state.db at root/**/.devrel/state.db up to max_depth.
|
|
154
|
+
|
|
155
|
+
Skips dot-directories other than .devrel (so ~/.cache, ~/.config etc
|
|
156
|
+
don't slow the walk to a crawl)."""
|
|
157
|
+
|
|
158
|
+
def _walk(dir_: Path, depth: int):
|
|
159
|
+
if depth > max_depth:
|
|
160
|
+
return
|
|
161
|
+
try:
|
|
162
|
+
entries = list(dir_.iterdir())
|
|
163
|
+
except PermissionError:
|
|
164
|
+
return
|
|
165
|
+
for child in entries:
|
|
166
|
+
if not child.is_dir():
|
|
167
|
+
continue
|
|
168
|
+
if child.name == ".devrel":
|
|
169
|
+
state_db = child / "state.db"
|
|
170
|
+
if state_db.is_file():
|
|
171
|
+
yield state_db
|
|
172
|
+
continue
|
|
173
|
+
if child.name.startswith("."):
|
|
174
|
+
continue
|
|
175
|
+
yield from _walk(child, depth + 1)
|
|
176
|
+
|
|
177
|
+
yield from _walk(root, 0)
|
|
178
|
+
|
|
179
|
+
|
|
180
|
+
def _summarize_project_db(state_db: Path) -> dict | None:
|
|
181
|
+
"""Return per-project rollup or None if the DB has no Argus tables."""
|
|
182
|
+
try:
|
|
183
|
+
from devrel_swarm.project.state import open_db
|
|
184
|
+
|
|
185
|
+
with open_db(state_db) as conn:
|
|
186
|
+
try:
|
|
187
|
+
last_row = conn.execute(
|
|
188
|
+
"SELECT MAX(period_end) AS p FROM analytics_reports"
|
|
189
|
+
).fetchone()
|
|
190
|
+
rec_row = conn.execute(
|
|
191
|
+
"SELECT COUNT(*) AS c FROM analytics_recommendations"
|
|
192
|
+
).fetchone()
|
|
193
|
+
hist_row = conn.execute("SELECT COUNT(*) AS c FROM metric_history").fetchone()
|
|
194
|
+
except Exception: # noqa: BLE001 — table missing means not an Argus project
|
|
195
|
+
return None
|
|
196
|
+
cost_row = conn.execute(
|
|
197
|
+
"SELECT COALESCE(SUM(cost_usd), 0.0) AS total FROM costs WHERE agent = 'argus'"
|
|
198
|
+
).fetchone()
|
|
199
|
+
except Exception: # noqa: BLE001
|
|
200
|
+
return None
|
|
201
|
+
|
|
202
|
+
return {
|
|
203
|
+
"project": str(state_db.parent.parent),
|
|
204
|
+
"last_report": last_row["p"] if last_row else None,
|
|
205
|
+
"total_recs": int(rec_row["c"]) if rec_row else 0,
|
|
206
|
+
"total_metrics": int(hist_row["c"]) if hist_row else 0,
|
|
207
|
+
"spend_usd": float(cost_row["total"]) if cost_row else 0.0,
|
|
208
|
+
}
|
|
209
|
+
|
|
210
|
+
|
|
211
|
+
@analytics_app.command("history")
|
|
212
|
+
def history_command(
|
|
213
|
+
content_id: str = typer.Argument(
|
|
214
|
+
..., help="Content ID to show history for (e.g., 'blog/cli-launch')."
|
|
215
|
+
),
|
|
216
|
+
format_: str = typer.Option("md", "--format", help="md or json."),
|
|
217
|
+
) -> None:
|
|
218
|
+
"""Show the metric trajectory of one piece of content across reports."""
|
|
219
|
+
paths = find_paths_or_exit(console)
|
|
220
|
+
if format_ not in {"md", "json"}:
|
|
221
|
+
raise typer.BadParameter("--format must be 'md' or 'json'")
|
|
222
|
+
|
|
223
|
+
from devrel_swarm.project.state import open_db
|
|
224
|
+
|
|
225
|
+
if not paths.state_db.is_file():
|
|
226
|
+
console.print("[yellow]No state.db yet. Run 'devrel analytics report' first.[/yellow]")
|
|
227
|
+
raise typer.Exit(code=1)
|
|
228
|
+
|
|
229
|
+
with open_db(paths.state_db) as conn:
|
|
230
|
+
rows = conn.execute(
|
|
231
|
+
"SELECT period_end, primary_metric, metric_name, content_type "
|
|
232
|
+
"FROM metric_history WHERE content_id = ? ORDER BY period_end ASC",
|
|
233
|
+
(content_id,),
|
|
234
|
+
).fetchall()
|
|
235
|
+
|
|
236
|
+
if not rows:
|
|
237
|
+
console.print(f"[yellow]No history for content_id={content_id}[/yellow]")
|
|
238
|
+
raise typer.Exit(code=1)
|
|
239
|
+
|
|
240
|
+
if format_ == "json":
|
|
241
|
+
sys.stdout.write(
|
|
242
|
+
json.dumps(
|
|
243
|
+
[
|
|
244
|
+
{
|
|
245
|
+
"period_end": r["period_end"],
|
|
246
|
+
"primary_metric": r["primary_metric"],
|
|
247
|
+
"metric_name": r["metric_name"],
|
|
248
|
+
"content_type": r["content_type"],
|
|
249
|
+
}
|
|
250
|
+
for r in rows
|
|
251
|
+
],
|
|
252
|
+
indent=2,
|
|
253
|
+
)
|
|
254
|
+
)
|
|
255
|
+
sys.stdout.write("\n")
|
|
256
|
+
return
|
|
257
|
+
|
|
258
|
+
# Markdown
|
|
259
|
+
lines = [f"# History for `{content_id}` ({rows[0]['content_type']})", ""]
|
|
260
|
+
metric_name = rows[0]["metric_name"]
|
|
261
|
+
lines.append(f"| period_end | {metric_name} | delta |")
|
|
262
|
+
lines.append("|---|---|---|")
|
|
263
|
+
prev = None
|
|
264
|
+
for r in rows:
|
|
265
|
+
v = r["primary_metric"]
|
|
266
|
+
if prev is None:
|
|
267
|
+
delta = "—"
|
|
268
|
+
else:
|
|
269
|
+
d = ((v - prev) / prev * 100) if prev else 0.0
|
|
270
|
+
delta = f"{d:+.1f}%"
|
|
271
|
+
lines.append(f"| {r['period_end'][:10]} | {v:g} | {delta} |")
|
|
272
|
+
prev = v
|
|
273
|
+
sys.stdout.write("\n".join(lines) + "\n")
|
|
274
|
+
|
|
275
|
+
|
|
276
|
+
@analytics_app.command("diff")
|
|
277
|
+
def diff_command(
|
|
278
|
+
period_a: str = typer.Argument(..., help="Earlier period (YYYY-MM-DD or full ISO)."),
|
|
279
|
+
period_b: str = typer.Argument(..., help="Later period (YYYY-MM-DD or full ISO)."),
|
|
280
|
+
format_: str = typer.Option("md", "--format", help="md or json."),
|
|
281
|
+
limit: int = typer.Option(20, "--limit", help="Top N changes by absolute delta."),
|
|
282
|
+
) -> None:
|
|
283
|
+
"""Compare two periods side-by-side. Shows the top movers (gainers + losers).
|
|
284
|
+
|
|
285
|
+
Periods are matched against metric_history.period_end with a prefix
|
|
286
|
+
match: '2026-04-25' matches any timestamp starting with that date.
|
|
287
|
+
"""
|
|
288
|
+
paths = find_paths_or_exit(console)
|
|
289
|
+
if format_ not in {"md", "json"}:
|
|
290
|
+
raise typer.BadParameter("--format must be 'md' or 'json'")
|
|
291
|
+
|
|
292
|
+
from devrel_swarm.project.state import open_db
|
|
293
|
+
|
|
294
|
+
if not paths.state_db.is_file():
|
|
295
|
+
console.print("[yellow]No state.db. Run 'devrel analytics report' first.[/yellow]")
|
|
296
|
+
raise typer.Exit(code=1)
|
|
297
|
+
|
|
298
|
+
with open_db(paths.state_db) as conn:
|
|
299
|
+
a_rows = conn.execute(
|
|
300
|
+
"SELECT content_id, primary_metric, metric_name, content_type "
|
|
301
|
+
"FROM metric_history WHERE period_end LIKE ?",
|
|
302
|
+
(f"{period_a}%",),
|
|
303
|
+
).fetchall()
|
|
304
|
+
b_rows = conn.execute(
|
|
305
|
+
"SELECT content_id, primary_metric, metric_name, content_type "
|
|
306
|
+
"FROM metric_history WHERE period_end LIKE ?",
|
|
307
|
+
(f"{period_b}%",),
|
|
308
|
+
).fetchall()
|
|
309
|
+
|
|
310
|
+
if not a_rows or not b_rows:
|
|
311
|
+
console.print(
|
|
312
|
+
f"[yellow]No history for one or both periods (a={len(a_rows)}, b={len(b_rows)}).[/yellow]"
|
|
313
|
+
)
|
|
314
|
+
raise typer.Exit(code=1)
|
|
315
|
+
|
|
316
|
+
a_by_id = {r["content_id"]: r for r in a_rows}
|
|
317
|
+
b_by_id = {r["content_id"]: r for r in b_rows}
|
|
318
|
+
|
|
319
|
+
rows: list[dict] = []
|
|
320
|
+
for cid in set(a_by_id) | set(b_by_id):
|
|
321
|
+
a_val = a_by_id.get(cid, {"primary_metric": None})["primary_metric"]
|
|
322
|
+
b_val = b_by_id.get(cid, {"primary_metric": None})["primary_metric"]
|
|
323
|
+
if a_val is None and b_val is not None:
|
|
324
|
+
kind, delta_pct = "new", None
|
|
325
|
+
sort_key = b_val
|
|
326
|
+
elif a_val is not None and b_val is None:
|
|
327
|
+
kind, delta_pct = "gone", None
|
|
328
|
+
sort_key = a_val
|
|
329
|
+
else:
|
|
330
|
+
kind = "changed"
|
|
331
|
+
delta_pct = ((b_val - a_val) / a_val * 100.0) if a_val else 0.0
|
|
332
|
+
sort_key = abs(delta_pct)
|
|
333
|
+
rows.append(
|
|
334
|
+
{
|
|
335
|
+
"content_id": cid,
|
|
336
|
+
"kind": kind,
|
|
337
|
+
"a": a_val,
|
|
338
|
+
"b": b_val,
|
|
339
|
+
"delta_pct": delta_pct,
|
|
340
|
+
"_sort": sort_key,
|
|
341
|
+
"metric_name": (a_by_id.get(cid) or b_by_id[cid])["metric_name"],
|
|
342
|
+
}
|
|
343
|
+
)
|
|
344
|
+
|
|
345
|
+
rows.sort(key=lambda r: r["_sort"] or 0, reverse=True)
|
|
346
|
+
rows = rows[:limit]
|
|
347
|
+
|
|
348
|
+
if format_ == "json":
|
|
349
|
+
payload = [{k: v for k, v in r.items() if not k.startswith("_")} for r in rows]
|
|
350
|
+
sys.stdout.write(json.dumps(payload, indent=2))
|
|
351
|
+
sys.stdout.write("\n")
|
|
352
|
+
return
|
|
353
|
+
|
|
354
|
+
lines = [f"# Diff: {period_a} → {period_b}", ""]
|
|
355
|
+
lines.append("| content_id | kind | a | b | delta |")
|
|
356
|
+
lines.append("|---|---|---|---|---|")
|
|
357
|
+
for r in rows:
|
|
358
|
+
a_disp = f"{r['a']:g}" if r["a"] is not None else "—"
|
|
359
|
+
b_disp = f"{r['b']:g}" if r["b"] is not None else "—"
|
|
360
|
+
d_disp = f"{r['delta_pct']:+.1f}%" if r["delta_pct"] is not None else "—"
|
|
361
|
+
lines.append(f"| {r['content_id']} | {r['kind']} | {a_disp} | {b_disp} | {d_disp} |")
|
|
362
|
+
sys.stdout.write("\n".join(lines) + "\n")
|
|
363
|
+
|
|
364
|
+
|
|
365
|
+
@analytics_app.command("calibration")
|
|
366
|
+
def calibration_command(
|
|
367
|
+
format_: str = typer.Option("md", "--format", help="md or json."),
|
|
368
|
+
) -> None:
|
|
369
|
+
"""Show how well past Argus recommendations have actually panned out.
|
|
370
|
+
|
|
371
|
+
Scores each historical double_down/retire recommendation against the
|
|
372
|
+
metric_history observations recorded after first_seen_period. Other
|
|
373
|
+
actions are counted as 'unscored' (no clean post-hoc test).
|
|
374
|
+
"""
|
|
375
|
+
paths = find_paths_or_exit(console)
|
|
376
|
+
if format_ not in {"md", "json"}:
|
|
377
|
+
raise typer.BadParameter("--format must be 'md' or 'json'")
|
|
378
|
+
|
|
379
|
+
if not paths.state_db.is_file():
|
|
380
|
+
console.print("[yellow]No state.db. Run 'devrel analytics report' first.[/yellow]")
|
|
381
|
+
raise typer.Exit(code=1)
|
|
382
|
+
|
|
383
|
+
cal = compute_calibration(paths.state_db)
|
|
384
|
+
if format_ == "json":
|
|
385
|
+
sys.stdout.write(json.dumps(cal, indent=2))
|
|
386
|
+
sys.stdout.write("\n")
|
|
387
|
+
return
|
|
388
|
+
|
|
389
|
+
lines = ["# Argus calibration", ""]
|
|
390
|
+
lines.append(f"- scored recommendations: **{cal['scored_recs']}**")
|
|
391
|
+
lines.append(
|
|
392
|
+
f"- unscored (insufficient post-period data or non-scoreable action): {cal['unscored_recs']}"
|
|
393
|
+
)
|
|
394
|
+
if cal.get("high_conf_rate") is not None:
|
|
395
|
+
lines.append(f"- high-confidence (≥0.8) hit rate: {cal['high_conf_rate']:.0%}")
|
|
396
|
+
if cal.get("low_conf_rate") is not None:
|
|
397
|
+
lines.append(f"- low-confidence (<0.5) hit rate: {cal['low_conf_rate']:.0%}")
|
|
398
|
+
lines.append("")
|
|
399
|
+
if not cal["by_action"]:
|
|
400
|
+
lines.append(
|
|
401
|
+
"_No scored recommendations yet. Calibration needs at least one rec with metric_history rows after its first_seen_period._"
|
|
402
|
+
)
|
|
403
|
+
else:
|
|
404
|
+
lines.append("| action | n | panned_out | rate | avg_conf | lift vs coin-flip |")
|
|
405
|
+
lines.append("|---|---|---|---|---|---|")
|
|
406
|
+
for action, stats in sorted(cal["by_action"].items()):
|
|
407
|
+
lines.append(
|
|
408
|
+
f"| {action} | {stats['n']} | {stats['panned_out']} | "
|
|
409
|
+
f"{stats['rate']:.0%} | {stats['avg_confidence']:.2f} | "
|
|
410
|
+
f"{stats['calibrated_lift']:+.2f} |"
|
|
411
|
+
)
|
|
412
|
+
sys.stdout.write("\n".join(lines) + "\n")
|
|
413
|
+
|
|
414
|
+
|
|
415
|
+
@analytics_app.command("report")
|
|
416
|
+
def report_command(
|
|
417
|
+
since: str = typer.Option("7d", "--since", help="Lookback window (e.g., 7d, 30d, 12w)."),
|
|
418
|
+
format_: str = typer.Option("md", "--format", help="stdout format: md or json."),
|
|
419
|
+
push: bool = typer.Option(False, "--push", help="Push the report to configured Slack/email."),
|
|
420
|
+
push_on_partial: bool = typer.Option(
|
|
421
|
+
False,
|
|
422
|
+
"--push-on-partial",
|
|
423
|
+
help="Override the all-green push gate. Push even if some sources failed.",
|
|
424
|
+
),
|
|
425
|
+
) -> None:
|
|
426
|
+
"""Produce an Argus performance report for the last `--since` window."""
|
|
427
|
+
paths = find_paths_or_exit(console)
|
|
428
|
+
if format_ not in {"md", "json"}:
|
|
429
|
+
raise typer.BadParameter("--format must be 'md' or 'json'")
|
|
430
|
+
|
|
431
|
+
delta = _parse_since(since)
|
|
432
|
+
end = datetime.now(timezone.utc)
|
|
433
|
+
start = end - delta
|
|
434
|
+
|
|
435
|
+
argus = _build_argus(paths.state_db)
|
|
436
|
+
report = asyncio.run(argus.run(period_start=start, period_end=end))
|
|
437
|
+
|
|
438
|
+
out_path = _write_markdown_deliverable(report, paths.deliverables_dir)
|
|
439
|
+
err_console.print(f"[dim]Wrote deliverable: {out_path}[/dim]")
|
|
440
|
+
|
|
441
|
+
brief_paths = write_recommendation_briefs(report, paths.deliverables_dir)
|
|
442
|
+
if brief_paths:
|
|
443
|
+
err_console.print(
|
|
444
|
+
f"[dim]Wrote {len(brief_paths)} content brief(s) for actionable recs[/dim]"
|
|
445
|
+
)
|
|
446
|
+
|
|
447
|
+
if format_ == "json":
|
|
448
|
+
sys.stdout.write(json.dumps(report.to_json(), indent=2, default=str))
|
|
449
|
+
sys.stdout.write("\n")
|
|
450
|
+
else:
|
|
451
|
+
sys.stdout.write(report.to_markdown())
|
|
452
|
+
|
|
453
|
+
if push:
|
|
454
|
+
failed_sources = [k for k, v in report.sources_ok.items() if not v]
|
|
455
|
+
if failed_sources and not push_on_partial:
|
|
456
|
+
err_console.print(
|
|
457
|
+
f"[yellow]Skipping push: data is partial (failed sources: "
|
|
458
|
+
f"{', '.join(failed_sources)}). Pass --push-on-partial to override.[/yellow]"
|
|
459
|
+
)
|
|
460
|
+
else:
|
|
461
|
+
try:
|
|
462
|
+
asyncio.run(_push_report(report, end))
|
|
463
|
+
except Exception as exc: # noqa: BLE001
|
|
464
|
+
err_console.print(f"[yellow]Push failed: {exc}[/yellow]")
|
|
465
|
+
|
|
466
|
+
|
|
467
|
+
async def _push_report(report: PerformanceReport, end: datetime) -> None:
|
|
468
|
+
"""Push the markdown report to Telegram + email if configured.
|
|
469
|
+
|
|
470
|
+
Builds a fresh NotificationConfig from env vars; matches how
|
|
471
|
+
devrel-swarm's other push paths construct the notification service.
|
|
472
|
+
"""
|
|
473
|
+
import os
|
|
474
|
+
|
|
475
|
+
from devrel_swarm.tools.notifications import (
|
|
476
|
+
NotificationConfig,
|
|
477
|
+
NotificationService,
|
|
478
|
+
)
|
|
479
|
+
|
|
480
|
+
config = NotificationConfig(
|
|
481
|
+
telegram_bot_token=os.environ.get("TELEGRAM_BOT_TOKEN", ""),
|
|
482
|
+
telegram_chat_id=os.environ.get("TELEGRAM_CHAT_ID", ""),
|
|
483
|
+
email_sender=os.environ.get("EMAIL_SENDER", ""),
|
|
484
|
+
email_password=os.environ.get("EMAIL_PASSWORD", ""),
|
|
485
|
+
email_recipients=[
|
|
486
|
+
r.strip() for r in os.environ.get("EMAIL_RECIPIENTS", "").split(",") if r.strip()
|
|
487
|
+
]
|
|
488
|
+
or None,
|
|
489
|
+
)
|
|
490
|
+
svc = NotificationService(config)
|
|
491
|
+
try:
|
|
492
|
+
markdown = report.to_markdown()
|
|
493
|
+
subject = f"Argus report — {end.date().isoformat()}"
|
|
494
|
+
await svc.send_telegram(markdown[:4000])
|
|
495
|
+
await svc.send_email(subject, f"<pre>{markdown}</pre>")
|
|
496
|
+
finally:
|
|
497
|
+
await svc.close()
|