devrel-origin 0.2.14__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (98) hide show
  1. devrel_origin/__init__.py +15 -0
  2. devrel_origin/cli/__init__.py +92 -0
  3. devrel_origin/cli/_common.py +243 -0
  4. devrel_origin/cli/analytics.py +28 -0
  5. devrel_origin/cli/argus.py +497 -0
  6. devrel_origin/cli/auth.py +227 -0
  7. devrel_origin/cli/config.py +108 -0
  8. devrel_origin/cli/content.py +259 -0
  9. devrel_origin/cli/cost.py +108 -0
  10. devrel_origin/cli/cro.py +298 -0
  11. devrel_origin/cli/deliverables.py +65 -0
  12. devrel_origin/cli/docs.py +91 -0
  13. devrel_origin/cli/doctor.py +178 -0
  14. devrel_origin/cli/experiment.py +29 -0
  15. devrel_origin/cli/growth.py +97 -0
  16. devrel_origin/cli/init.py +472 -0
  17. devrel_origin/cli/intel.py +27 -0
  18. devrel_origin/cli/kb.py +96 -0
  19. devrel_origin/cli/listen.py +31 -0
  20. devrel_origin/cli/marketing.py +66 -0
  21. devrel_origin/cli/migrate.py +45 -0
  22. devrel_origin/cli/run.py +46 -0
  23. devrel_origin/cli/sales.py +57 -0
  24. devrel_origin/cli/schedule.py +62 -0
  25. devrel_origin/cli/synthesize.py +28 -0
  26. devrel_origin/cli/triage.py +29 -0
  27. devrel_origin/cli/video.py +35 -0
  28. devrel_origin/core/__init__.py +58 -0
  29. devrel_origin/core/agent_config.py +75 -0
  30. devrel_origin/core/argus.py +964 -0
  31. devrel_origin/core/atlas.py +1450 -0
  32. devrel_origin/core/base.py +372 -0
  33. devrel_origin/core/cyra.py +563 -0
  34. devrel_origin/core/dex.py +708 -0
  35. devrel_origin/core/echo.py +614 -0
  36. devrel_origin/core/growth/__init__.py +27 -0
  37. devrel_origin/core/growth/recommendations.py +219 -0
  38. devrel_origin/core/growth/target_kinds.py +51 -0
  39. devrel_origin/core/iris.py +513 -0
  40. devrel_origin/core/kai.py +1367 -0
  41. devrel_origin/core/llm.py +542 -0
  42. devrel_origin/core/llm_backends.py +274 -0
  43. devrel_origin/core/mox.py +514 -0
  44. devrel_origin/core/nova.py +349 -0
  45. devrel_origin/core/pax.py +1205 -0
  46. devrel_origin/core/rex.py +532 -0
  47. devrel_origin/core/sage.py +486 -0
  48. devrel_origin/core/sentinel.py +385 -0
  49. devrel_origin/core/types.py +98 -0
  50. devrel_origin/core/video/__init__.py +22 -0
  51. devrel_origin/core/video/assembler.py +131 -0
  52. devrel_origin/core/video/browser_recorder.py +118 -0
  53. devrel_origin/core/video/desktop_recorder.py +254 -0
  54. devrel_origin/core/video/overlay_renderer.py +143 -0
  55. devrel_origin/core/video/script_parser.py +147 -0
  56. devrel_origin/core/video/tts_engine.py +82 -0
  57. devrel_origin/core/vox.py +268 -0
  58. devrel_origin/core/watchdog.py +321 -0
  59. devrel_origin/project/__init__.py +1 -0
  60. devrel_origin/project/config.py +75 -0
  61. devrel_origin/project/cost_sink.py +61 -0
  62. devrel_origin/project/init.py +104 -0
  63. devrel_origin/project/paths.py +75 -0
  64. devrel_origin/project/state.py +241 -0
  65. devrel_origin/project/templates/__init__.py +4 -0
  66. devrel_origin/project/templates/config.toml +24 -0
  67. devrel_origin/project/templates/devrel.gitignore +10 -0
  68. devrel_origin/project/templates/slop-blocklist.md +45 -0
  69. devrel_origin/project/templates/style.md +24 -0
  70. devrel_origin/project/templates/voice.md +29 -0
  71. devrel_origin/quality/__init__.py +66 -0
  72. devrel_origin/quality/editorial.py +357 -0
  73. devrel_origin/quality/persona.py +84 -0
  74. devrel_origin/quality/readability.py +148 -0
  75. devrel_origin/quality/slop.py +167 -0
  76. devrel_origin/quality/style.py +110 -0
  77. devrel_origin/quality/voice.py +15 -0
  78. devrel_origin/tools/__init__.py +9 -0
  79. devrel_origin/tools/analytics.py +304 -0
  80. devrel_origin/tools/api_client.py +393 -0
  81. devrel_origin/tools/apollo_client.py +305 -0
  82. devrel_origin/tools/code_validator.py +428 -0
  83. devrel_origin/tools/github_tools.py +297 -0
  84. devrel_origin/tools/instantly_client.py +412 -0
  85. devrel_origin/tools/kb_harvester.py +340 -0
  86. devrel_origin/tools/mcp_server.py +578 -0
  87. devrel_origin/tools/notifications.py +245 -0
  88. devrel_origin/tools/run_report.py +193 -0
  89. devrel_origin/tools/scheduler.py +231 -0
  90. devrel_origin/tools/search_tools.py +321 -0
  91. devrel_origin/tools/self_improve.py +168 -0
  92. devrel_origin/tools/sheets.py +236 -0
  93. devrel_origin-0.2.14.dist-info/METADATA +354 -0
  94. devrel_origin-0.2.14.dist-info/RECORD +98 -0
  95. devrel_origin-0.2.14.dist-info/WHEEL +5 -0
  96. devrel_origin-0.2.14.dist-info/entry_points.txt +2 -0
  97. devrel_origin-0.2.14.dist-info/licenses/LICENSE +21 -0
  98. devrel_origin-0.2.14.dist-info/top_level.txt +1 -0
@@ -0,0 +1,321 @@
1
+ """
2
+ Search Tools — Web search, content retrieval, and documentation lookup.
3
+
4
+ Provides tools for grounding agent outputs in real-world data:
5
+ - OpenClaw documentation search
6
+ - General web search (via Firecrawl API)
7
+ - URL content extraction
8
+ """
9
+
10
+ import logging
11
+ import os
12
+ from dataclasses import dataclass
13
+
14
+ import httpx
15
+
16
+ logger = logging.getLogger(__name__)
17
+
18
+ PRODUCT_DOCS_BASE = os.getenv("PRODUCT_URL", "https://openclaw.ai")
19
+ GITHUB_REPO = os.getenv("GITHUB_REPO", "openclaw/openclaw")
20
+ GITMCP_BASE = f"https://gitmcp.io/{GITHUB_REPO}"
21
+ FIRECRAWL_API = "https://api.firecrawl.dev/v1"
22
+ BRAVE_API = "https://api.search.brave.com/res/v1"
23
+ API_TIMEOUT = 20.0
24
+
25
+
26
+ @dataclass
27
+ class SearchResult:
28
+ """A single search result."""
29
+
30
+ title: str
31
+ url: str
32
+ snippet: str
33
+ source: str # "devrel_ai_agents_docs", "web", "discourse"
34
+ relevance_score: float = 0.0
35
+
36
+
37
+ @dataclass
38
+ class DocSection:
39
+ """A section from OpenClaw documentation."""
40
+
41
+ title: str
42
+ url: str
43
+ content: str
44
+ breadcrumb: list[str]
45
+
46
+
47
+ class SearchTools:
48
+ """
49
+ Search and retrieval tools for content grounding.
50
+
51
+ Supports:
52
+ - OpenClaw documentation search
53
+ - Firecrawl web search API (primary), Brave Search API (fallback)
54
+ - OpenClaw community forum search
55
+ - URL content extraction (Firecrawl scrape with direct HTTP fallback)
56
+
57
+ Usage::
58
+
59
+ search = SearchTools(firecrawl_api_key="fc-...", brave_api_key="BSA...")
60
+ results = await search.search_devrel_ai_agents_docs("agent orchestration")
61
+ web_results = await search.web_search("OpenClaw vs alternatives")
62
+ """
63
+
64
+ def __init__(self, firecrawl_api_key: str = "", brave_api_key: str = ""):
65
+ self.firecrawl_api_key = firecrawl_api_key
66
+ self.brave_api_key = brave_api_key
67
+ self._client = httpx.AsyncClient(timeout=API_TIMEOUT)
68
+
69
+ async def close(self) -> None:
70
+ await self._client.aclose()
71
+
72
+ # -- OpenClaw Docs Search --------------------------------------
73
+
74
+ async def search_devrel_ai_agents_docs(self, query: str, limit: int = 10) -> list[SearchResult]:
75
+ """
76
+ Search product documentation.
77
+
78
+ Falls back to site-scoped web search if direct API unavailable.
79
+ """
80
+ try:
81
+ resp = await self._client.get(
82
+ f"{PRODUCT_DOCS_BASE}/api/search",
83
+ params={"q": query, "limit": limit},
84
+ )
85
+ if resp.status_code == 200:
86
+ data = resp.json()
87
+ return [
88
+ SearchResult(
89
+ title=hit.get("title", ""),
90
+ url=f"{PRODUCT_DOCS_BASE}{hit.get('url', '')}",
91
+ snippet=hit.get("excerpt", ""),
92
+ source="product_docs",
93
+ relevance_score=hit.get("score", 0),
94
+ )
95
+ for hit in data.get("results", [])[:limit]
96
+ ]
97
+ except Exception as exc:
98
+ logger.warning(f"Product docs search failed: {exc}")
99
+
100
+ # Fallback: site-scoped web search
101
+ docs_domain = PRODUCT_DOCS_BASE.replace("https://", "").replace("http://", "")
102
+ return await self.web_search(f"site:{docs_domain} {query}", limit=limit)
103
+
104
+ # -- Official Docs (GitMCP) -------------------------------------------
105
+
106
+ async def fetch_official_docs(self, topic: str, max_chars: int = 8000) -> str:
107
+ """
108
+ Fetch official OpenClaw documentation via GitMCP.
109
+
110
+ Queries https://gitmcp.io/openclaw/openclaw for the given topic
111
+ to ensure content agents produce accurate, up-to-date information.
112
+ Returns raw documentation text for cross-referencing.
113
+ """
114
+ url = f"{GITMCP_BASE}"
115
+ try:
116
+ # Fetch the repo README / docs index first
117
+ content = await self.fetch_url_content(url, max_chars=max_chars)
118
+ if content:
119
+ logger.info(f"Fetched official docs from GitMCP ({len(content)} chars)")
120
+ return content
121
+ except Exception as exc:
122
+ logger.warning(f"GitMCP fetch failed: {exc}")
123
+
124
+ # Fallback: search official docs site
125
+ logger.info("Falling back to OpenClaw docs search for official reference")
126
+ results = await self.search_devrel_ai_agents_docs(topic, limit=5)
127
+ if results:
128
+ sections = []
129
+ for r in results[:3]:
130
+ section_content = await self.fetch_url_content(r.url, max_chars=2000)
131
+ if section_content:
132
+ sections.append(f"## {r.title}\nSource: {r.url}\n\n{section_content}")
133
+ return "\n\n---\n\n".join(sections)
134
+
135
+ return ""
136
+
137
+ # -- Community Search -------------------------------------------------
138
+
139
+ async def search_discourse(self, query: str, limit: int = 10) -> list[SearchResult]:
140
+ """Search product community forum (Discourse)."""
141
+ community_url = os.getenv("COMMUNITY_URL", "")
142
+ if not community_url:
143
+ return []
144
+ try:
145
+ resp = await self._client.get(
146
+ f"{community_url}/search.json",
147
+ params={"q": query},
148
+ )
149
+ if resp.status_code == 200:
150
+ data = resp.json()
151
+ topics = data.get("topics", [])
152
+ return [
153
+ SearchResult(
154
+ title=t.get("title", ""),
155
+ url=f"{community_url}/t/{t.get('slug', '')}/{t.get('id', '')}",
156
+ snippet=t.get("excerpt", ""),
157
+ source="discourse",
158
+ )
159
+ for t in topics[:limit]
160
+ ]
161
+ except Exception as exc:
162
+ logger.warning(f"Discourse search failed: {exc}")
163
+
164
+ return []
165
+
166
+ # -- Web Search -------------------------------------------------------
167
+
168
+ async def web_search(self, query: str, limit: int = 10) -> list[SearchResult]:
169
+ """
170
+ General web search. Tries Firecrawl first, falls back to Brave Search.
171
+
172
+ Requires at least one API key (Firecrawl or Brave).
173
+ """
174
+ # Try Firecrawl first
175
+ if self.firecrawl_api_key:
176
+ results = await self._firecrawl_search(query, limit)
177
+ if results:
178
+ return results
179
+ logger.info("Firecrawl returned no results, trying Brave fallback")
180
+
181
+ # Fallback to Brave
182
+ if self.brave_api_key:
183
+ return await self._brave_search(query, limit)
184
+
185
+ logger.warning("No search API keys configured — web search unavailable")
186
+ return []
187
+
188
+ async def _firecrawl_search(self, query: str, limit: int = 10) -> list[SearchResult]:
189
+ """Search via Firecrawl API."""
190
+ try:
191
+ resp = await self._client.post(
192
+ f"{FIRECRAWL_API}/search",
193
+ headers={
194
+ "Authorization": f"Bearer {self.firecrawl_api_key}",
195
+ "Content-Type": "application/json",
196
+ },
197
+ json={"query": query, "limit": limit},
198
+ )
199
+ resp.raise_for_status()
200
+ data = resp.json()
201
+
202
+ results = []
203
+ for item in data.get("data", []):
204
+ results.append(
205
+ SearchResult(
206
+ title=item.get("title", ""),
207
+ url=item.get("url", ""),
208
+ snippet=item.get("description", ""),
209
+ source="web",
210
+ )
211
+ )
212
+ return results[:limit]
213
+
214
+ except Exception as exc:
215
+ logger.warning(f"Firecrawl web search failed: {exc}")
216
+ return []
217
+
218
+ async def _brave_search(self, query: str, limit: int = 10) -> list[SearchResult]:
219
+ """Search via Brave Search API (fallback)."""
220
+ try:
221
+ resp = await self._client.get(
222
+ f"{BRAVE_API}/web/search",
223
+ headers={
224
+ "X-Subscription-Token": self.brave_api_key,
225
+ "Accept": "application/json",
226
+ },
227
+ params={"q": query, "count": limit},
228
+ )
229
+ resp.raise_for_status()
230
+ data = resp.json()
231
+
232
+ results = []
233
+ for item in data.get("web", {}).get("results", []):
234
+ results.append(
235
+ SearchResult(
236
+ title=item.get("title", ""),
237
+ url=item.get("url", ""),
238
+ snippet=item.get("description", ""),
239
+ source="web",
240
+ )
241
+ )
242
+ return results[:limit]
243
+
244
+ except Exception as exc:
245
+ logger.warning(f"Brave web search failed: {exc}")
246
+ return []
247
+
248
+ # -- URL Content Extraction -------------------------------------------
249
+
250
+ async def fetch_url_content(self, url: str, max_chars: int = 10_000) -> str:
251
+ """
252
+ Fetch and extract text content from a URL.
253
+
254
+ When a Firecrawl API key is available, uses the Firecrawl scrape endpoint
255
+ for cleaner markdown output. Falls back to direct HTTP fetch with HTML
256
+ stripping otherwise.
257
+ """
258
+ if self.firecrawl_api_key:
259
+ try:
260
+ resp = await self._client.post(
261
+ f"{FIRECRAWL_API}/scrape",
262
+ headers={
263
+ "Authorization": f"Bearer {self.firecrawl_api_key}",
264
+ "Content-Type": "application/json",
265
+ },
266
+ json={"url": url, "formats": ["markdown"]},
267
+ )
268
+ resp.raise_for_status()
269
+ data = resp.json()
270
+ if data.get("success"):
271
+ text = data.get("data", {}).get("markdown", "")
272
+ return text[:max_chars]
273
+ except Exception as exc:
274
+ logger.warning(
275
+ f"Firecrawl scrape failed for {url}: {exc}, falling back to direct fetch"
276
+ )
277
+
278
+ # Fallback: direct HTTP fetch with HTML stripping
279
+ try:
280
+ resp = await self._client.get(
281
+ url,
282
+ follow_redirects=True,
283
+ headers={"User-Agent": "DevRelAIAgents/1.0"},
284
+ )
285
+ resp.raise_for_status()
286
+ text = resp.text
287
+
288
+ # Crude HTML stripping (production would use readability)
289
+ import re
290
+
291
+ text = re.sub(r"<script[^>]*>.*?</script>", "", text, flags=re.DOTALL)
292
+ text = re.sub(r"<style[^>]*>.*?</style>", "", text, flags=re.DOTALL)
293
+ text = re.sub(r"<[^>]+>", " ", text)
294
+ text = re.sub(r"\s+", " ", text).strip()
295
+
296
+ return text[:max_chars]
297
+
298
+ except Exception as exc:
299
+ logger.warning(f"URL fetch failed for {url}: {exc}")
300
+ return ""
301
+
302
+ # -- Knowledge Base Helpers -------------------------------------------
303
+
304
+ @staticmethod
305
+ def rank_results(
306
+ results: list[SearchResult],
307
+ query: str,
308
+ ) -> list[SearchResult]:
309
+ """
310
+ Re-rank search results by keyword overlap with query.
311
+
312
+ Simple TF-based scoring — production would use embeddings.
313
+ """
314
+ query_terms = set(query.lower().split())
315
+ for result in results:
316
+ text = f"{result.title} {result.snippet}".lower()
317
+ overlap = sum(1 for term in query_terms if term in text)
318
+ result.relevance_score = overlap / max(len(query_terms), 1)
319
+
320
+ results.sort(key=lambda r: r.relevance_score, reverse=True)
321
+ return results
@@ -0,0 +1,168 @@
1
+ """
2
+ Self-Improvement — Extract recurring quality issues and feed back into agent prompts.
3
+
4
+ Analyzes Sentinel audit reports across recent weeks to find patterns.
5
+ Generates per-agent "known issues" addenda that are automatically appended
6
+ to agent system prompts via the optimize/ directory.
7
+ """
8
+
9
+ import json
10
+ import logging
11
+ from collections import Counter
12
+ from pathlib import Path
13
+ from typing import Any
14
+
15
+ logger = logging.getLogger(__name__)
16
+
17
+
18
+ def load_recent_audits(
19
+ archive_dir: Path,
20
+ weeks: int = 4,
21
+ ) -> list[dict[str, Any]]:
22
+ """Load Sentinel audit results from recent context archives."""
23
+ audits = []
24
+ files = sorted(archive_dir.glob("context_*.json"), reverse=True)
25
+
26
+ for f in files[:weeks]:
27
+ if "_stage" in f.name:
28
+ continue
29
+ try:
30
+ data = json.loads(f.read_text())
31
+ okr = data.get("okr_progress", {})
32
+ audit = okr.get("brand_audit", {})
33
+ if audit and "items" in audit:
34
+ audit["_week"] = data.get("week_of", f.stem)
35
+ audits.append(audit)
36
+ except (json.JSONDecodeError, OSError):
37
+ continue
38
+
39
+ return audits
40
+
41
+
42
+ def extract_recurring_issues(
43
+ audits: list[dict[str, Any]],
44
+ min_occurrences: int = 2,
45
+ ) -> dict[str, list[dict[str, Any]]]:
46
+ """Find issues that recur across multiple weeks, grouped by agent.
47
+
48
+ Returns a dict of agent_name → list of recurring issues with counts.
49
+ """
50
+ # Collect all issues per agent
51
+ agent_issues: dict[str, list[str]] = {}
52
+ for audit in audits:
53
+ for item in audit.get("items", []):
54
+ agent = item.get("agent", "unknown")
55
+ for issue in item.get("issues", []):
56
+ desc = issue.get("detail", issue.get("description", ""))
57
+ if desc:
58
+ agent_issues.setdefault(agent, []).append(desc.lower().strip())
59
+
60
+ # Count occurrences per agent
61
+ recurring: dict[str, list[dict[str, Any]]] = {}
62
+ for agent, issues in agent_issues.items():
63
+ counts = Counter(issues)
64
+ frequent = [
65
+ {"issue": issue, "occurrences": count}
66
+ for issue, count in counts.most_common()
67
+ if count >= min_occurrences
68
+ ]
69
+ if frequent:
70
+ recurring[agent] = frequent[:5]
71
+
72
+ return recurring
73
+
74
+
75
+ def generate_prompt_addenda(
76
+ recurring: dict[str, list[dict[str, Any]]],
77
+ optimize_dir: Path,
78
+ ) -> dict[str, Path]:
79
+ """Write per-agent known-issues files to the optimize directory.
80
+
81
+ These are picked up by load_agent_prompt() as supplementary context.
82
+ Existing addenda are overwritten each cycle.
83
+
84
+ Returns a dict of agent_name → file path written.
85
+ """
86
+ written: dict[str, Path] = {}
87
+
88
+ for agent, issues in recurring.items():
89
+ agent_dir = optimize_dir / agent
90
+ agent_dir.mkdir(parents=True, exist_ok=True)
91
+
92
+ filepath = agent_dir / "known_issues.txt"
93
+ lines = [
94
+ "## Known Quality Issues (auto-generated from Sentinel audits)\n",
95
+ "Avoid these recurring problems in your output:\n",
96
+ ]
97
+ for item in issues:
98
+ lines.append(f"- {item['issue']} (flagged {item['occurrences']} times)\n")
99
+ lines.append(
100
+ "\nThese issues have been identified across multiple weekly cycles. "
101
+ "Actively work to avoid them.\n"
102
+ )
103
+
104
+ filepath.write_text("".join(lines))
105
+ written[agent] = filepath
106
+ logger.info(f"Wrote known_issues addendum for {agent}: {filepath}")
107
+
108
+ return written
109
+
110
+
111
+ def run_self_improvement(
112
+ archive_dir: Path,
113
+ optimize_dir: Path,
114
+ weeks: int = 4,
115
+ min_occurrences: int = 2,
116
+ ) -> dict[str, Any]:
117
+ """Full self-improvement cycle.
118
+
119
+ 1. Load recent Sentinel audits
120
+ 2. Extract recurring issues per agent
121
+ 3. Write prompt addenda to optimize/
122
+
123
+ Returns a report of what was found and written.
124
+ """
125
+ audits = load_recent_audits(archive_dir, weeks)
126
+ if not audits:
127
+ logger.info("No recent audits found for self-improvement")
128
+ return {"audits_analyzed": 0, "recurring_issues": {}, "files_written": {}}
129
+
130
+ recurring = extract_recurring_issues(audits, min_occurrences)
131
+ written = generate_prompt_addenda(recurring, optimize_dir)
132
+
133
+ report = {
134
+ "audits_analyzed": len(audits),
135
+ "recurring_issues": {
136
+ agent: [i["issue"] for i in issues] for agent, issues in recurring.items()
137
+ },
138
+ "files_written": {agent: str(path) for agent, path in written.items()},
139
+ }
140
+
141
+ logger.info(
142
+ f"Self-improvement: analyzed {len(audits)} audits, "
143
+ f"found {sum(len(v) for v in recurring.values())} recurring issues "
144
+ f"across {len(recurring)} agents"
145
+ )
146
+ return report
147
+
148
+
149
+ def main() -> None:
150
+ """CLI entry point."""
151
+ import argparse
152
+
153
+ parser = argparse.ArgumentParser(description="Agent self-improvement from Sentinel audits")
154
+ parser.add_argument("--archive", default="context_archive", help="Archive directory")
155
+ parser.add_argument("--optimize", default="optimize", help="Optimize directory")
156
+ parser.add_argument("--weeks", type=int, default=4, help="Weeks to analyze")
157
+ args = parser.parse_args()
158
+
159
+ report = run_self_improvement(
160
+ Path(args.archive),
161
+ Path(args.optimize),
162
+ args.weeks,
163
+ )
164
+ print(json.dumps(report, indent=2))
165
+
166
+
167
+ if __name__ == "__main__":
168
+ main()