devrel-origin 0.2.14__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- devrel_origin/__init__.py +15 -0
- devrel_origin/cli/__init__.py +92 -0
- devrel_origin/cli/_common.py +243 -0
- devrel_origin/cli/analytics.py +28 -0
- devrel_origin/cli/argus.py +497 -0
- devrel_origin/cli/auth.py +227 -0
- devrel_origin/cli/config.py +108 -0
- devrel_origin/cli/content.py +259 -0
- devrel_origin/cli/cost.py +108 -0
- devrel_origin/cli/cro.py +298 -0
- devrel_origin/cli/deliverables.py +65 -0
- devrel_origin/cli/docs.py +91 -0
- devrel_origin/cli/doctor.py +178 -0
- devrel_origin/cli/experiment.py +29 -0
- devrel_origin/cli/growth.py +97 -0
- devrel_origin/cli/init.py +472 -0
- devrel_origin/cli/intel.py +27 -0
- devrel_origin/cli/kb.py +96 -0
- devrel_origin/cli/listen.py +31 -0
- devrel_origin/cli/marketing.py +66 -0
- devrel_origin/cli/migrate.py +45 -0
- devrel_origin/cli/run.py +46 -0
- devrel_origin/cli/sales.py +57 -0
- devrel_origin/cli/schedule.py +62 -0
- devrel_origin/cli/synthesize.py +28 -0
- devrel_origin/cli/triage.py +29 -0
- devrel_origin/cli/video.py +35 -0
- devrel_origin/core/__init__.py +58 -0
- devrel_origin/core/agent_config.py +75 -0
- devrel_origin/core/argus.py +964 -0
- devrel_origin/core/atlas.py +1450 -0
- devrel_origin/core/base.py +372 -0
- devrel_origin/core/cyra.py +563 -0
- devrel_origin/core/dex.py +708 -0
- devrel_origin/core/echo.py +614 -0
- devrel_origin/core/growth/__init__.py +27 -0
- devrel_origin/core/growth/recommendations.py +219 -0
- devrel_origin/core/growth/target_kinds.py +51 -0
- devrel_origin/core/iris.py +513 -0
- devrel_origin/core/kai.py +1367 -0
- devrel_origin/core/llm.py +542 -0
- devrel_origin/core/llm_backends.py +274 -0
- devrel_origin/core/mox.py +514 -0
- devrel_origin/core/nova.py +349 -0
- devrel_origin/core/pax.py +1205 -0
- devrel_origin/core/rex.py +532 -0
- devrel_origin/core/sage.py +486 -0
- devrel_origin/core/sentinel.py +385 -0
- devrel_origin/core/types.py +98 -0
- devrel_origin/core/video/__init__.py +22 -0
- devrel_origin/core/video/assembler.py +131 -0
- devrel_origin/core/video/browser_recorder.py +118 -0
- devrel_origin/core/video/desktop_recorder.py +254 -0
- devrel_origin/core/video/overlay_renderer.py +143 -0
- devrel_origin/core/video/script_parser.py +147 -0
- devrel_origin/core/video/tts_engine.py +82 -0
- devrel_origin/core/vox.py +268 -0
- devrel_origin/core/watchdog.py +321 -0
- devrel_origin/project/__init__.py +1 -0
- devrel_origin/project/config.py +75 -0
- devrel_origin/project/cost_sink.py +61 -0
- devrel_origin/project/init.py +104 -0
- devrel_origin/project/paths.py +75 -0
- devrel_origin/project/state.py +241 -0
- devrel_origin/project/templates/__init__.py +4 -0
- devrel_origin/project/templates/config.toml +24 -0
- devrel_origin/project/templates/devrel.gitignore +10 -0
- devrel_origin/project/templates/slop-blocklist.md +45 -0
- devrel_origin/project/templates/style.md +24 -0
- devrel_origin/project/templates/voice.md +29 -0
- devrel_origin/quality/__init__.py +66 -0
- devrel_origin/quality/editorial.py +357 -0
- devrel_origin/quality/persona.py +84 -0
- devrel_origin/quality/readability.py +148 -0
- devrel_origin/quality/slop.py +167 -0
- devrel_origin/quality/style.py +110 -0
- devrel_origin/quality/voice.py +15 -0
- devrel_origin/tools/__init__.py +9 -0
- devrel_origin/tools/analytics.py +304 -0
- devrel_origin/tools/api_client.py +393 -0
- devrel_origin/tools/apollo_client.py +305 -0
- devrel_origin/tools/code_validator.py +428 -0
- devrel_origin/tools/github_tools.py +297 -0
- devrel_origin/tools/instantly_client.py +412 -0
- devrel_origin/tools/kb_harvester.py +340 -0
- devrel_origin/tools/mcp_server.py +578 -0
- devrel_origin/tools/notifications.py +245 -0
- devrel_origin/tools/run_report.py +193 -0
- devrel_origin/tools/scheduler.py +231 -0
- devrel_origin/tools/search_tools.py +321 -0
- devrel_origin/tools/self_improve.py +168 -0
- devrel_origin/tools/sheets.py +236 -0
- devrel_origin-0.2.14.dist-info/METADATA +354 -0
- devrel_origin-0.2.14.dist-info/RECORD +98 -0
- devrel_origin-0.2.14.dist-info/WHEEL +5 -0
- devrel_origin-0.2.14.dist-info/entry_points.txt +2 -0
- devrel_origin-0.2.14.dist-info/licenses/LICENSE +21 -0
- devrel_origin-0.2.14.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,372 @@
|
|
|
1
|
+
"""Shared base classes and utilities for all agents."""
|
|
2
|
+
|
|
3
|
+
import logging
|
|
4
|
+
import math
|
|
5
|
+
import re
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
from typing import Any
|
|
8
|
+
|
|
9
|
+
logger = logging.getLogger(__name__)
|
|
10
|
+
|
|
11
|
+
# Common stop words excluded from KB keyword matching
|
|
12
|
+
STOP_WORDS = frozenset(
|
|
13
|
+
{
|
|
14
|
+
"the",
|
|
15
|
+
"a",
|
|
16
|
+
"an",
|
|
17
|
+
"is",
|
|
18
|
+
"are",
|
|
19
|
+
"was",
|
|
20
|
+
"were",
|
|
21
|
+
"be",
|
|
22
|
+
"been",
|
|
23
|
+
"being",
|
|
24
|
+
"have",
|
|
25
|
+
"has",
|
|
26
|
+
"had",
|
|
27
|
+
"do",
|
|
28
|
+
"does",
|
|
29
|
+
"did",
|
|
30
|
+
"will",
|
|
31
|
+
"would",
|
|
32
|
+
"could",
|
|
33
|
+
"should",
|
|
34
|
+
"may",
|
|
35
|
+
"might",
|
|
36
|
+
"shall",
|
|
37
|
+
"can",
|
|
38
|
+
"need",
|
|
39
|
+
"must",
|
|
40
|
+
"ought",
|
|
41
|
+
"i",
|
|
42
|
+
"you",
|
|
43
|
+
"he",
|
|
44
|
+
"she",
|
|
45
|
+
"it",
|
|
46
|
+
"we",
|
|
47
|
+
"they",
|
|
48
|
+
"me",
|
|
49
|
+
"him",
|
|
50
|
+
"her",
|
|
51
|
+
"us",
|
|
52
|
+
"my",
|
|
53
|
+
"your",
|
|
54
|
+
"his",
|
|
55
|
+
"its",
|
|
56
|
+
"our",
|
|
57
|
+
"their",
|
|
58
|
+
"this",
|
|
59
|
+
"that",
|
|
60
|
+
"these",
|
|
61
|
+
"those",
|
|
62
|
+
"what",
|
|
63
|
+
"which",
|
|
64
|
+
"who",
|
|
65
|
+
"whom",
|
|
66
|
+
"when",
|
|
67
|
+
"where",
|
|
68
|
+
"why",
|
|
69
|
+
"how",
|
|
70
|
+
"all",
|
|
71
|
+
"each",
|
|
72
|
+
"every",
|
|
73
|
+
"both",
|
|
74
|
+
"few",
|
|
75
|
+
"more",
|
|
76
|
+
"most",
|
|
77
|
+
"other",
|
|
78
|
+
"some",
|
|
79
|
+
"such",
|
|
80
|
+
"no",
|
|
81
|
+
"not",
|
|
82
|
+
"only",
|
|
83
|
+
"same",
|
|
84
|
+
"so",
|
|
85
|
+
"than",
|
|
86
|
+
"too",
|
|
87
|
+
"very",
|
|
88
|
+
"just",
|
|
89
|
+
"because",
|
|
90
|
+
"as",
|
|
91
|
+
"until",
|
|
92
|
+
"while",
|
|
93
|
+
"of",
|
|
94
|
+
"at",
|
|
95
|
+
"by",
|
|
96
|
+
"for",
|
|
97
|
+
"with",
|
|
98
|
+
"about",
|
|
99
|
+
"against",
|
|
100
|
+
"between",
|
|
101
|
+
"through",
|
|
102
|
+
"during",
|
|
103
|
+
"before",
|
|
104
|
+
"after",
|
|
105
|
+
"above",
|
|
106
|
+
"below",
|
|
107
|
+
"to",
|
|
108
|
+
"from",
|
|
109
|
+
"up",
|
|
110
|
+
"down",
|
|
111
|
+
"in",
|
|
112
|
+
"out",
|
|
113
|
+
"on",
|
|
114
|
+
"off",
|
|
115
|
+
"over",
|
|
116
|
+
"under",
|
|
117
|
+
"again",
|
|
118
|
+
"further",
|
|
119
|
+
"then",
|
|
120
|
+
"once",
|
|
121
|
+
"and",
|
|
122
|
+
"but",
|
|
123
|
+
"or",
|
|
124
|
+
"nor",
|
|
125
|
+
"if",
|
|
126
|
+
"else",
|
|
127
|
+
}
|
|
128
|
+
)
|
|
129
|
+
|
|
130
|
+
|
|
131
|
+
def strip_markdown_fences(text: str) -> str:
|
|
132
|
+
"""Remove markdown code fences from LLM output.
|
|
133
|
+
|
|
134
|
+
Handles ```json, ```python, ```text, and bare ``` fences.
|
|
135
|
+
"""
|
|
136
|
+
text = text.strip()
|
|
137
|
+
text = re.sub(r"^```(?:json|python|text)?\s*\n?", "", text, count=1)
|
|
138
|
+
text = re.sub(r"\n?```\s*$", "", text)
|
|
139
|
+
return text.strip()
|
|
140
|
+
|
|
141
|
+
|
|
142
|
+
def _resolve_optimize_dir() -> Path | None:
|
|
143
|
+
"""Walk up from this file to find a repo with `optimize/` + `pyproject.toml`.
|
|
144
|
+
|
|
145
|
+
The repo-root `optimize/` tree carries per-agent prompt overrides and
|
|
146
|
+
self-improvement `known_issues.txt` files. It only exists in dev
|
|
147
|
+
checkouts; pipx-installed users never see it and always get the
|
|
148
|
+
inline defaults that callers pass to `load_agent_prompt`.
|
|
149
|
+
"""
|
|
150
|
+
candidate = Path(__file__).resolve().parent
|
|
151
|
+
for _ in range(6):
|
|
152
|
+
candidate = candidate.parent
|
|
153
|
+
if (candidate / "optimize").is_dir() and (candidate / "pyproject.toml").is_file():
|
|
154
|
+
return candidate / "optimize"
|
|
155
|
+
return None
|
|
156
|
+
|
|
157
|
+
|
|
158
|
+
_OPTIMIZE_DIR = _resolve_optimize_dir()
|
|
159
|
+
|
|
160
|
+
|
|
161
|
+
def _find_prompt_dir(agent_name: str) -> Path | None:
|
|
162
|
+
"""Return the directory holding `{agent_name}`'s prompts, or None.
|
|
163
|
+
|
|
164
|
+
Accepts both layouts the repo currently uses: top-level `optimize/{agent}/`
|
|
165
|
+
(Argus) and nested `optimize/agents/{agent}/` (Kai/Echo/Iris/Nova/Rex/Vox/
|
|
166
|
+
Dex/Sage/Mox).
|
|
167
|
+
"""
|
|
168
|
+
if _OPTIMIZE_DIR is None:
|
|
169
|
+
return None
|
|
170
|
+
for parent in (_OPTIMIZE_DIR, _OPTIMIZE_DIR / "agents"):
|
|
171
|
+
candidate = parent / agent_name
|
|
172
|
+
if candidate.is_dir():
|
|
173
|
+
return candidate
|
|
174
|
+
return None
|
|
175
|
+
|
|
176
|
+
|
|
177
|
+
def load_agent_prompt(agent_name: str, prompt_name: str, default: str) -> str:
|
|
178
|
+
"""Load a prompt from the repo's `optimize/` tree if present, else default.
|
|
179
|
+
|
|
180
|
+
Searches `optimize/{agent}/{prompt_name}` and the legacy nested
|
|
181
|
+
`optimize/agents/{agent}/{prompt_name}` layout. Appends `known_issues.txt`
|
|
182
|
+
from the same dir when the self-improvement loop has produced one.
|
|
183
|
+
"""
|
|
184
|
+
agent_dir = _find_prompt_dir(agent_name)
|
|
185
|
+
|
|
186
|
+
if agent_dir is not None and (agent_dir / prompt_name).exists():
|
|
187
|
+
prompt_path = agent_dir / prompt_name
|
|
188
|
+
logger.info(f"Loaded optimized prompt: {prompt_path}")
|
|
189
|
+
prompt = prompt_path.read_text(encoding="utf-8")
|
|
190
|
+
elif _OPTIMIZE_DIR is not None and (_OPTIMIZE_DIR / prompt_name).exists():
|
|
191
|
+
prompt = (_OPTIMIZE_DIR / prompt_name).read_text(encoding="utf-8")
|
|
192
|
+
else:
|
|
193
|
+
prompt = default
|
|
194
|
+
|
|
195
|
+
if agent_dir is not None:
|
|
196
|
+
issues_path = agent_dir / "known_issues.txt"
|
|
197
|
+
if issues_path.exists():
|
|
198
|
+
prompt += "\n\n" + issues_path.read_text(encoding="utf-8")
|
|
199
|
+
|
|
200
|
+
return prompt
|
|
201
|
+
|
|
202
|
+
|
|
203
|
+
def _tokenize(text: str, stop_words: frozenset[str] = STOP_WORDS) -> list[str]:
|
|
204
|
+
"""Tokenize text into lowercase words, removing stop words and short tokens."""
|
|
205
|
+
return [w.lower() for w in re.split(r"\W+", text) if w.lower() not in stop_words and len(w) > 2]
|
|
206
|
+
|
|
207
|
+
|
|
208
|
+
_kb_cache: dict[tuple[str, frozenset[str] | None], "KnowledgeBaseSearch"] = {}
|
|
209
|
+
|
|
210
|
+
|
|
211
|
+
def get_kb_search(
|
|
212
|
+
knowledge_base_path: Path,
|
|
213
|
+
extra_stop_words: frozenset[str] | None = None,
|
|
214
|
+
) -> "KnowledgeBaseSearch":
|
|
215
|
+
"""Return a cached KnowledgeBaseSearch instance.
|
|
216
|
+
|
|
217
|
+
Multiple agents sharing the same KB path reuse a single index
|
|
218
|
+
instead of each reading all files from disk independently.
|
|
219
|
+
"""
|
|
220
|
+
cache_key = (str(knowledge_base_path), extra_stop_words)
|
|
221
|
+
if cache_key not in _kb_cache:
|
|
222
|
+
_kb_cache[cache_key] = KnowledgeBaseSearch(knowledge_base_path, extra_stop_words)
|
|
223
|
+
return _kb_cache[cache_key]
|
|
224
|
+
|
|
225
|
+
|
|
226
|
+
class KnowledgeBaseSearch:
|
|
227
|
+
"""Reusable knowledge base indexer and TF-IDF searcher.
|
|
228
|
+
|
|
229
|
+
Uses TF-IDF scoring for relevance ranking instead of simple keyword
|
|
230
|
+
overlap. Documents that contain rare, query-specific terms score higher
|
|
231
|
+
than documents matching only common terms.
|
|
232
|
+
|
|
233
|
+
Usage::
|
|
234
|
+
|
|
235
|
+
kb = KnowledgeBaseSearch(knowledge_base_path)
|
|
236
|
+
results = kb.search("feature flags setup", limit=5)
|
|
237
|
+
text = kb.search_as_text("feature flags setup", limit=5)
|
|
238
|
+
"""
|
|
239
|
+
|
|
240
|
+
def __init__(
|
|
241
|
+
self,
|
|
242
|
+
knowledge_base_path: Path,
|
|
243
|
+
extra_stop_words: frozenset[str] | None = None,
|
|
244
|
+
):
|
|
245
|
+
self.path = knowledge_base_path
|
|
246
|
+
self.stop_words = STOP_WORDS | (extra_stop_words or frozenset())
|
|
247
|
+
self.index: dict[str, Path] = {}
|
|
248
|
+
self._doc_tokens: dict[str, list[str]] = {}
|
|
249
|
+
self._doc_contents: dict[str, str] = {}
|
|
250
|
+
self._idf: dict[str, float] = {}
|
|
251
|
+
self._build_index()
|
|
252
|
+
|
|
253
|
+
def _build_index(self) -> None:
|
|
254
|
+
"""Index all markdown files and compute IDF weights."""
|
|
255
|
+
if not self.path.exists():
|
|
256
|
+
logger.info("KB path does not exist, index empty")
|
|
257
|
+
return
|
|
258
|
+
|
|
259
|
+
for file in self.path.rglob("*.md"):
|
|
260
|
+
key = file.stem.lower().replace("-", " ").replace("_", " ")
|
|
261
|
+
self.index[key] = file
|
|
262
|
+
try:
|
|
263
|
+
content = file.read_text(encoding="utf-8")
|
|
264
|
+
except OSError:
|
|
265
|
+
continue
|
|
266
|
+
source = str(file.relative_to(self.path))
|
|
267
|
+
self._doc_contents[source] = content
|
|
268
|
+
# Tokenize filename + content for TF-IDF
|
|
269
|
+
self._doc_tokens[source] = _tokenize(
|
|
270
|
+
f"{key} {key} {content}",
|
|
271
|
+
self.stop_words,
|
|
272
|
+
)
|
|
273
|
+
|
|
274
|
+
# Compute IDF: log(N / df) for each term
|
|
275
|
+
n_docs = max(len(self._doc_tokens), 1)
|
|
276
|
+
df: dict[str, int] = {}
|
|
277
|
+
for tokens in self._doc_tokens.values():
|
|
278
|
+
seen = set(tokens)
|
|
279
|
+
for term in seen:
|
|
280
|
+
df[term] = df.get(term, 0) + 1
|
|
281
|
+
|
|
282
|
+
self._idf = {term: math.log(n_docs / count) for term, count in df.items()}
|
|
283
|
+
logger.info(f"KB indexed {len(self.index)} documents, {len(self._idf)} terms")
|
|
284
|
+
|
|
285
|
+
def _tfidf_score(self, query_tokens: list[str], source: str) -> float:
|
|
286
|
+
"""Compute TF-IDF cosine-like score between query and document."""
|
|
287
|
+
doc_tokens = self._doc_tokens.get(source, [])
|
|
288
|
+
if not doc_tokens or not query_tokens:
|
|
289
|
+
return 0.0
|
|
290
|
+
|
|
291
|
+
# Term frequencies in document
|
|
292
|
+
doc_tf: dict[str, float] = {}
|
|
293
|
+
for t in doc_tokens:
|
|
294
|
+
doc_tf[t] = doc_tf.get(t, 0) + 1
|
|
295
|
+
doc_len = len(doc_tokens)
|
|
296
|
+
|
|
297
|
+
# Score: sum of (query_tf * doc_tf/doc_len * idf^2) for matching terms
|
|
298
|
+
score = 0.0
|
|
299
|
+
query_tf: dict[str, int] = {}
|
|
300
|
+
for t in query_tokens:
|
|
301
|
+
query_tf[t] = query_tf.get(t, 0) + 1
|
|
302
|
+
|
|
303
|
+
for term, qtf in query_tf.items():
|
|
304
|
+
if term in doc_tf:
|
|
305
|
+
idf = self._idf.get(term, 1.0)
|
|
306
|
+
tf_norm = doc_tf[term] / doc_len
|
|
307
|
+
score += qtf * tf_norm * idf * idf
|
|
308
|
+
|
|
309
|
+
return score
|
|
310
|
+
|
|
311
|
+
def search(
|
|
312
|
+
self,
|
|
313
|
+
query: str,
|
|
314
|
+
limit: int = 5,
|
|
315
|
+
content_truncate: int = 3000,
|
|
316
|
+
pad_with_remaining: bool = True,
|
|
317
|
+
) -> list[dict[str, Any]]:
|
|
318
|
+
"""Search the knowledge base using TF-IDF scoring.
|
|
319
|
+
|
|
320
|
+
Args:
|
|
321
|
+
query: Search query string.
|
|
322
|
+
limit: Maximum number of results.
|
|
323
|
+
content_truncate: Truncate content to this many characters.
|
|
324
|
+
pad_with_remaining: If fewer than *limit* results match, pad with
|
|
325
|
+
unmatched KB docs (preserves the fallback behaviour agents rely on).
|
|
326
|
+
|
|
327
|
+
Returns:
|
|
328
|
+
List of dicts with ``source`` (relative path), ``content``, and
|
|
329
|
+
``relevance`` (float score) keys, sorted by relevance desc.
|
|
330
|
+
"""
|
|
331
|
+
query_tokens = _tokenize(query, self.stop_words)
|
|
332
|
+
|
|
333
|
+
scored: list[dict[str, Any]] = []
|
|
334
|
+
for source, content in self._doc_contents.items():
|
|
335
|
+
score = self._tfidf_score(query_tokens, source)
|
|
336
|
+
if score > 0:
|
|
337
|
+
scored.append(
|
|
338
|
+
{
|
|
339
|
+
"source": source,
|
|
340
|
+
"content": content[:content_truncate],
|
|
341
|
+
"relevance": round(score, 4),
|
|
342
|
+
}
|
|
343
|
+
)
|
|
344
|
+
|
|
345
|
+
scored.sort(key=lambda x: x["relevance"], reverse=True)
|
|
346
|
+
|
|
347
|
+
# Fallback: pad with remaining KB docs so callers always get context
|
|
348
|
+
if pad_with_remaining and len(scored) < limit:
|
|
349
|
+
existing_sources = {r["source"] for r in scored}
|
|
350
|
+
for source, content in self._doc_contents.items():
|
|
351
|
+
if source in existing_sources:
|
|
352
|
+
continue
|
|
353
|
+
scored.append(
|
|
354
|
+
{
|
|
355
|
+
"source": source,
|
|
356
|
+
"content": content[:content_truncate],
|
|
357
|
+
"relevance": 0,
|
|
358
|
+
}
|
|
359
|
+
)
|
|
360
|
+
if len(scored) >= limit:
|
|
361
|
+
break
|
|
362
|
+
|
|
363
|
+
return scored[:limit]
|
|
364
|
+
|
|
365
|
+
def search_as_text(self, query: str, limit: int = 5) -> str:
|
|
366
|
+
"""Search and return results as concatenated text.
|
|
367
|
+
|
|
368
|
+
Convenience wrapper used by agents (Pax, Mox) that pass KB context
|
|
369
|
+
as a single string to the LLM prompt.
|
|
370
|
+
"""
|
|
371
|
+
results = self.search(query, limit=limit, content_truncate=2000)
|
|
372
|
+
return "\n\n".join(f"[{r['source']}]\n{r['content']}" for r in results)
|