devrel-origin 0.2.14__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (98) hide show
  1. devrel_origin/__init__.py +15 -0
  2. devrel_origin/cli/__init__.py +92 -0
  3. devrel_origin/cli/_common.py +243 -0
  4. devrel_origin/cli/analytics.py +28 -0
  5. devrel_origin/cli/argus.py +497 -0
  6. devrel_origin/cli/auth.py +227 -0
  7. devrel_origin/cli/config.py +108 -0
  8. devrel_origin/cli/content.py +259 -0
  9. devrel_origin/cli/cost.py +108 -0
  10. devrel_origin/cli/cro.py +298 -0
  11. devrel_origin/cli/deliverables.py +65 -0
  12. devrel_origin/cli/docs.py +91 -0
  13. devrel_origin/cli/doctor.py +178 -0
  14. devrel_origin/cli/experiment.py +29 -0
  15. devrel_origin/cli/growth.py +97 -0
  16. devrel_origin/cli/init.py +472 -0
  17. devrel_origin/cli/intel.py +27 -0
  18. devrel_origin/cli/kb.py +96 -0
  19. devrel_origin/cli/listen.py +31 -0
  20. devrel_origin/cli/marketing.py +66 -0
  21. devrel_origin/cli/migrate.py +45 -0
  22. devrel_origin/cli/run.py +46 -0
  23. devrel_origin/cli/sales.py +57 -0
  24. devrel_origin/cli/schedule.py +62 -0
  25. devrel_origin/cli/synthesize.py +28 -0
  26. devrel_origin/cli/triage.py +29 -0
  27. devrel_origin/cli/video.py +35 -0
  28. devrel_origin/core/__init__.py +58 -0
  29. devrel_origin/core/agent_config.py +75 -0
  30. devrel_origin/core/argus.py +964 -0
  31. devrel_origin/core/atlas.py +1450 -0
  32. devrel_origin/core/base.py +372 -0
  33. devrel_origin/core/cyra.py +563 -0
  34. devrel_origin/core/dex.py +708 -0
  35. devrel_origin/core/echo.py +614 -0
  36. devrel_origin/core/growth/__init__.py +27 -0
  37. devrel_origin/core/growth/recommendations.py +219 -0
  38. devrel_origin/core/growth/target_kinds.py +51 -0
  39. devrel_origin/core/iris.py +513 -0
  40. devrel_origin/core/kai.py +1367 -0
  41. devrel_origin/core/llm.py +542 -0
  42. devrel_origin/core/llm_backends.py +274 -0
  43. devrel_origin/core/mox.py +514 -0
  44. devrel_origin/core/nova.py +349 -0
  45. devrel_origin/core/pax.py +1205 -0
  46. devrel_origin/core/rex.py +532 -0
  47. devrel_origin/core/sage.py +486 -0
  48. devrel_origin/core/sentinel.py +385 -0
  49. devrel_origin/core/types.py +98 -0
  50. devrel_origin/core/video/__init__.py +22 -0
  51. devrel_origin/core/video/assembler.py +131 -0
  52. devrel_origin/core/video/browser_recorder.py +118 -0
  53. devrel_origin/core/video/desktop_recorder.py +254 -0
  54. devrel_origin/core/video/overlay_renderer.py +143 -0
  55. devrel_origin/core/video/script_parser.py +147 -0
  56. devrel_origin/core/video/tts_engine.py +82 -0
  57. devrel_origin/core/vox.py +268 -0
  58. devrel_origin/core/watchdog.py +321 -0
  59. devrel_origin/project/__init__.py +1 -0
  60. devrel_origin/project/config.py +75 -0
  61. devrel_origin/project/cost_sink.py +61 -0
  62. devrel_origin/project/init.py +104 -0
  63. devrel_origin/project/paths.py +75 -0
  64. devrel_origin/project/state.py +241 -0
  65. devrel_origin/project/templates/__init__.py +4 -0
  66. devrel_origin/project/templates/config.toml +24 -0
  67. devrel_origin/project/templates/devrel.gitignore +10 -0
  68. devrel_origin/project/templates/slop-blocklist.md +45 -0
  69. devrel_origin/project/templates/style.md +24 -0
  70. devrel_origin/project/templates/voice.md +29 -0
  71. devrel_origin/quality/__init__.py +66 -0
  72. devrel_origin/quality/editorial.py +357 -0
  73. devrel_origin/quality/persona.py +84 -0
  74. devrel_origin/quality/readability.py +148 -0
  75. devrel_origin/quality/slop.py +167 -0
  76. devrel_origin/quality/style.py +110 -0
  77. devrel_origin/quality/voice.py +15 -0
  78. devrel_origin/tools/__init__.py +9 -0
  79. devrel_origin/tools/analytics.py +304 -0
  80. devrel_origin/tools/api_client.py +393 -0
  81. devrel_origin/tools/apollo_client.py +305 -0
  82. devrel_origin/tools/code_validator.py +428 -0
  83. devrel_origin/tools/github_tools.py +297 -0
  84. devrel_origin/tools/instantly_client.py +412 -0
  85. devrel_origin/tools/kb_harvester.py +340 -0
  86. devrel_origin/tools/mcp_server.py +578 -0
  87. devrel_origin/tools/notifications.py +245 -0
  88. devrel_origin/tools/run_report.py +193 -0
  89. devrel_origin/tools/scheduler.py +231 -0
  90. devrel_origin/tools/search_tools.py +321 -0
  91. devrel_origin/tools/self_improve.py +168 -0
  92. devrel_origin/tools/sheets.py +236 -0
  93. devrel_origin-0.2.14.dist-info/METADATA +354 -0
  94. devrel_origin-0.2.14.dist-info/RECORD +98 -0
  95. devrel_origin-0.2.14.dist-info/WHEEL +5 -0
  96. devrel_origin-0.2.14.dist-info/entry_points.txt +2 -0
  97. devrel_origin-0.2.14.dist-info/licenses/LICENSE +21 -0
  98. devrel_origin-0.2.14.dist-info/top_level.txt +1 -0
@@ -0,0 +1,614 @@
1
+ """
2
+ Echo — Social Media Listener Agent
3
+
4
+ Monitors Reddit, Hacker News, and Twitter/X for brand mentions,
5
+ sentiment trends, and community conversations. Surfaces opportunities
6
+ for engagement and flags reputation risks.
7
+ """
8
+
9
+ import json
10
+ import logging
11
+ from dataclasses import dataclass
12
+ from datetime import datetime
13
+ from pathlib import Path
14
+ from typing import Any, Optional
15
+
16
+ from devrel_origin.core.base import strip_markdown_fences
17
+ from devrel_origin.core.llm import LLMClient
18
+ from devrel_origin.tools.api_client import PostHogClient
19
+ from devrel_origin.tools.search_tools import SearchTools
20
+
21
+ logger = logging.getLogger(__name__)
22
+
23
+
24
+ @dataclass
25
+ class SocialMention:
26
+ """A single brand mention on social media."""
27
+
28
+ platform: str # "reddit", "hackernews", "twitter"
29
+ title: str
30
+ url: str
31
+ author: str
32
+ content: str
33
+ sentiment: str # "positive", "neutral", "negative"
34
+ engagement: int # upvotes, likes, points
35
+ posted_at: str
36
+ subreddit: Optional[str] = None # Reddit-specific
37
+ is_question: bool = False
38
+ requires_response: bool = False
39
+
40
+
41
+ @dataclass
42
+ class PlatformSummary:
43
+ """Aggregated stats for a single platform."""
44
+
45
+ platform: str
46
+ total_mentions: int
47
+ sentiment_breakdown: dict[str, int]
48
+ top_posts: list[SocialMention]
49
+ engagement_total: int
50
+ trending_topics: list[str]
51
+
52
+
53
+ @dataclass
54
+ class SocialListeningReport:
55
+ """Complete social listening report across all platforms."""
56
+
57
+ period: str
58
+ brand: str
59
+ total_mentions: int
60
+ platforms: list[PlatformSummary]
61
+ sentiment_overall: dict[str, int]
62
+ engagement_opportunities: list[dict[str, str]]
63
+ reputation_risks: list[dict[str, str]]
64
+ top_mentions: list[SocialMention]
65
+
66
+
67
+ # Keywords that signal engagement opportunities
68
+ ENGAGEMENT_SIGNALS = [
69
+ "looking for",
70
+ "recommend",
71
+ "alternative to",
72
+ "anyone using",
73
+ "how to",
74
+ "best tool",
75
+ "which is better",
76
+ "should I use",
77
+ "migrating from",
78
+ "vs",
79
+ "comparison",
80
+ ]
81
+
82
+ # Keywords that signal reputation risks
83
+ RISK_SIGNALS = [
84
+ "switching away",
85
+ "terrible experience",
86
+ "avoid",
87
+ "broken",
88
+ "data loss",
89
+ "security issue",
90
+ "not working",
91
+ "worst",
92
+ "cancelled",
93
+ "refund",
94
+ "scam",
95
+ "regret",
96
+ ]
97
+
98
+ REDDIT_SUBREDDITS = [
99
+ "devtools",
100
+ "selfhosted",
101
+ "SaaS",
102
+ "opensource",
103
+ ]
104
+
105
+ # Subset of ENGAGEMENT_SIGNALS that specifically indicates a question from a
106
+ # user. Maintained as its own constant so that reordering ENGAGEMENT_SIGNALS
107
+ # doesn't silently change is_question detection.
108
+ QUESTION_SIGNALS: tuple[str, ...] = (
109
+ "?",
110
+ "how do",
111
+ "how to",
112
+ "what is",
113
+ "why does",
114
+ "is there",
115
+ "can someone",
116
+ "anyone know",
117
+ )
118
+
119
+
120
+ def _parse_result_date(result: Any) -> Optional[datetime]:
121
+ """Best-effort parse of a search result's publication date.
122
+
123
+ Search backends use different field names — Firecrawl exposes
124
+ ``published_date``, Brave reports ``age``, others use ``date`` or
125
+ ``created_at``. Returns None if no parseable date is found; the
126
+ caller falls back to ``datetime.now()`` so downstream code never
127
+ sees None.
128
+ """
129
+ for fld in ("published_date", "posted_at", "date", "created_at"):
130
+ val = getattr(result, fld, None)
131
+ if not val:
132
+ continue
133
+ if isinstance(val, datetime):
134
+ return val
135
+ if isinstance(val, str):
136
+ try:
137
+ return datetime.fromisoformat(val.replace("Z", "+00:00"))
138
+ except ValueError:
139
+ continue
140
+ return None
141
+
142
+
143
+ # Platform-specific search query templates
144
+ PLATFORM_QUERIES = {
145
+ "reddit": "site:reddit.com {brand}",
146
+ "hackernews": "site:news.ycombinator.com {brand}",
147
+ "twitter": "site:twitter.com OR site:x.com {brand}",
148
+ }
149
+
150
+
151
+ class Echo:
152
+ """
153
+ Social Media Listener agent for brand monitoring across platforms.
154
+
155
+ Capabilities:
156
+ - Monitor Reddit, Hacker News, and Twitter/X for brand mentions
157
+ - Classify sentiment of social media posts
158
+ - Identify engagement opportunities (questions, comparisons, recommendations)
159
+ - Flag reputation risks (negative sentiment, churn signals)
160
+ - Track trending topics and conversations about the product
161
+ - Produce weekly social listening reports
162
+
163
+ Tools:
164
+ 1. reddit_scanner — Search Reddit for brand mentions across subreddits
165
+ 2. hackernews_scanner — Search HN for brand mentions and discussions
166
+ 3. twitter_scanner — Search Twitter/X for brand mentions
167
+ 4. sentiment_classifier — Classify post sentiment
168
+ 5. engagement_detector — Flag posts that warrant a response
169
+ 6. risk_detector — Flag posts indicating reputation risk
170
+ 7. topic_extractor — Extract trending topics from mentions
171
+ 8. report_compiler — Generate social listening report
172
+ """
173
+
174
+ BRAND_ALIASES: dict[str, list[str]] = {
175
+ "openclaw": ["openclaw", "open-claw", "open claw"],
176
+ }
177
+
178
+ def __init__(
179
+ self,
180
+ api_client: PostHogClient,
181
+ knowledge_base_path: Path,
182
+ search_tools: Optional[SearchTools] = None,
183
+ llm_client: Optional[LLMClient] = None,
184
+ search_limit: int = 20,
185
+ ):
186
+ self.api_client = api_client
187
+ self.knowledge_base_path = knowledge_base_path
188
+ self.search_tools = search_tools
189
+ self.llm_client = llm_client
190
+ self.search_limit = search_limit
191
+
192
+ async def execute(
193
+ self,
194
+ task: str,
195
+ context: Optional[dict[str, Any]] = None,
196
+ ) -> dict[str, Any]:
197
+ """
198
+ Execute a social listening task.
199
+
200
+ Scans social platforms for brand mentions, analyzes sentiment,
201
+ and surfaces engagement opportunities and reputation risks.
202
+ """
203
+ logger.info(f"Echo executing: {task[:80]}...")
204
+
205
+ brand = "OpenClaw"
206
+ mentions = await self._scan_all_platforms(brand)
207
+
208
+ # Reclassify sentiment with LLM if available (much more accurate)
209
+ await self._batch_classify_sentiment(mentions)
210
+
211
+ platform_summaries = self._build_platform_summaries(mentions)
212
+ engagement_ops = self._find_engagement_opportunities(mentions)
213
+ risks = self._flag_reputation_risks(mentions)
214
+ overall_sentiment = self._aggregate_sentiment(mentions)
215
+
216
+ return {
217
+ "agent": "echo",
218
+ "task": task,
219
+ "brand": brand,
220
+ "total_mentions": len(mentions),
221
+ "platforms": {
222
+ ps.platform: {
223
+ "total_mentions": ps.total_mentions,
224
+ "sentiment_breakdown": ps.sentiment_breakdown,
225
+ "engagement_total": ps.engagement_total,
226
+ "trending_topics": ps.trending_topics,
227
+ "top_posts": [
228
+ {
229
+ "title": m.title,
230
+ "url": m.url,
231
+ "sentiment": m.sentiment,
232
+ "engagement": m.engagement,
233
+ }
234
+ for m in ps.top_posts[:3]
235
+ ],
236
+ }
237
+ for ps in platform_summaries
238
+ },
239
+ "sentiment_overall": overall_sentiment,
240
+ "engagement_opportunities": engagement_ops,
241
+ "reputation_risks": risks,
242
+ "top_mentions": [
243
+ {
244
+ "platform": m.platform,
245
+ "title": m.title,
246
+ "url": m.url,
247
+ "author": m.author,
248
+ "sentiment": m.sentiment,
249
+ "engagement": m.engagement,
250
+ }
251
+ for m in sorted(mentions, key=lambda m: m.engagement, reverse=True)[:10]
252
+ ],
253
+ "status": "scanned",
254
+ }
255
+
256
+ async def scan_weekly(
257
+ self,
258
+ brand: str = "OpenClaw",
259
+ aliases: Optional[list[str]] = None,
260
+ ) -> SocialListeningReport:
261
+ """Run a full weekly social listening scan."""
262
+ mentions = await self._scan_all_platforms(brand, aliases)
263
+ platform_summaries = self._build_platform_summaries(mentions)
264
+ engagement_ops = self._find_engagement_opportunities(mentions)
265
+ risks = self._flag_reputation_risks(mentions)
266
+ overall_sentiment = self._aggregate_sentiment(mentions)
267
+
268
+ top_mentions = sorted(mentions, key=lambda m: m.engagement, reverse=True)[:10]
269
+
270
+ return SocialListeningReport(
271
+ period="weekly",
272
+ brand=brand,
273
+ total_mentions=len(mentions),
274
+ platforms=platform_summaries,
275
+ sentiment_overall=overall_sentiment,
276
+ engagement_opportunities=engagement_ops,
277
+ reputation_risks=risks,
278
+ top_mentions=top_mentions,
279
+ )
280
+
281
+ async def _scan_all_platforms(
282
+ self,
283
+ brand: str,
284
+ aliases: Optional[list[str]] = None,
285
+ ) -> list[SocialMention]:
286
+ """Scan all social platforms for brand mentions."""
287
+ all_mentions: list[SocialMention] = []
288
+
289
+ if not self.search_tools:
290
+ logger.warning("No search tools configured — social scanning unavailable")
291
+ return all_mentions
292
+
293
+ brand_lower = brand.lower()
294
+ known_aliases = self.BRAND_ALIASES.get(brand_lower, [brand_lower])
295
+ if aliases:
296
+ known_aliases = list(set(known_aliases + [a.lower() for a in aliases]))
297
+
298
+ for platform, query_template in PLATFORM_QUERIES.items():
299
+ try:
300
+ query = query_template.format(brand=brand)
301
+ results = await self.search_tools.web_search(query, limit=self.search_limit)
302
+ for result in results:
303
+ mention = self._parse_search_result(result, platform, known_aliases)
304
+ if mention:
305
+ all_mentions.append(mention)
306
+ except Exception as exc:
307
+ logger.warning(f"Failed to scan {platform}: {exc}")
308
+
309
+ return all_mentions
310
+
311
+ def _parse_search_result(
312
+ self,
313
+ result: Any,
314
+ platform: str,
315
+ aliases: list[str],
316
+ ) -> Optional[SocialMention]:
317
+ """Parse a search result into a SocialMention if it mentions the brand."""
318
+ text = f"{result.title} {result.snippet}".lower()
319
+
320
+ # Check if brand is actually mentioned
321
+ if not any(alias in text for alias in aliases):
322
+ return None
323
+
324
+ sentiment = self._classify_sentiment(text)
325
+ is_question = any(signal in text for signal in QUESTION_SIGNALS)
326
+ requires_response = is_question or sentiment == "negative"
327
+
328
+ # Extract subreddit from URL if Reddit
329
+ subreddit = None
330
+ if platform == "reddit" and "/r/" in result.url:
331
+ parts = result.url.split("/r/")
332
+ if len(parts) > 1:
333
+ subreddit = parts[1].split("/")[0]
334
+
335
+ # Use the actual publication date from the result when available;
336
+ # otherwise fall back to "now" (preserves prior behavior). Trend
337
+ # detection is broken if every mention is timestamped with the
338
+ # scrape time.
339
+ parsed_date = _parse_result_date(result)
340
+ posted_at = (parsed_date or datetime.now()).strftime("%Y-%m-%d")
341
+
342
+ return SocialMention(
343
+ platform=platform,
344
+ title=result.title,
345
+ url=result.url,
346
+ author="", # Not available from search results
347
+ content=result.snippet,
348
+ sentiment=sentiment,
349
+ engagement=0, # Not available from search results
350
+ posted_at=posted_at,
351
+ subreddit=subreddit,
352
+ is_question=is_question,
353
+ requires_response=requires_response,
354
+ )
355
+
356
+ def _classify_sentiment_rule_based(self, text: str) -> str:
357
+ """Rule-based sentiment classification fallback."""
358
+ text_lower = text.lower()
359
+
360
+ negative_signals = [
361
+ "terrible",
362
+ "worst",
363
+ "broken",
364
+ "hate",
365
+ "awful",
366
+ "switching away",
367
+ "avoid",
368
+ "not working",
369
+ "regret",
370
+ "disappointed",
371
+ "frustrated",
372
+ "useless",
373
+ "buggy",
374
+ ]
375
+ positive_signals = [
376
+ "love",
377
+ "great",
378
+ "awesome",
379
+ "amazing",
380
+ "best",
381
+ "recommend",
382
+ "fantastic",
383
+ "excellent",
384
+ "solid",
385
+ "impressed",
386
+ "perfect",
387
+ "incredible",
388
+ "game changer",
389
+ ]
390
+
391
+ neg_count = sum(1 for s in negative_signals if s in text_lower)
392
+ pos_count = sum(1 for s in positive_signals if s in text_lower)
393
+
394
+ if neg_count > pos_count:
395
+ return "negative"
396
+ if pos_count > neg_count:
397
+ return "positive"
398
+ return "neutral"
399
+
400
+ def _classify_sentiment(self, text: str) -> str:
401
+ """Synchronous sentiment for single mentions (rule-based fallback)."""
402
+ return self._classify_sentiment_rule_based(text)
403
+
404
+ _SENTIMENT_BATCH_SIZE = 40
405
+
406
+ async def _batch_classify_sentiment(
407
+ self,
408
+ mentions: list["SocialMention"],
409
+ ) -> None:
410
+ """Reclassify sentiment for all mentions using LLM in batched calls.
411
+
412
+ Processes mentions in chunks of _SENTIMENT_BATCH_SIZE. Mutates
413
+ mention.sentiment in place. Falls back to keeping the rule-based
414
+ classification if an LLM call fails for a chunk.
415
+ """
416
+ if not self.llm_client or not mentions:
417
+ return
418
+
419
+ total_classified = 0
420
+ for chunk_start in range(0, len(mentions), self._SENTIMENT_BATCH_SIZE):
421
+ chunk = mentions[chunk_start : chunk_start + self._SENTIMENT_BATCH_SIZE]
422
+ classified = await self._classify_sentiment_chunk(chunk)
423
+ total_classified += classified
424
+
425
+ if total_classified:
426
+ logger.info(f"LLM sentiment classified {total_classified}/{len(mentions)} mentions")
427
+
428
+ async def _classify_sentiment_chunk(
429
+ self,
430
+ chunk: list["SocialMention"],
431
+ ) -> int:
432
+ """Classify sentiment for a single chunk. Returns count of classified."""
433
+ items = []
434
+ for i, m in enumerate(chunk):
435
+ items.append(f"{i}. [{m.platform}] {m.title[:100]} — {m.content[:150]}")
436
+ items_text = "\n".join(items)
437
+
438
+ prompt = f"""Classify the sentiment of each social media mention below.
439
+ Return a JSON array where each element is an object with "index" (int) and
440
+ "sentiment" (one of "positive", "neutral", "negative").
441
+
442
+ Mentions:
443
+ {items_text}
444
+
445
+ Consider nuance:
446
+ - Sarcasm: "love how broken this is" = negative
447
+ - Mixed sentiment: lean toward the dominant signal
448
+ - Developer frustration: "why doesn't it support X?" = neutral-negative
449
+ - Feature requests framed as complaints = neutral
450
+ - Content too short to determine = neutral
451
+
452
+ Return ONLY the JSON array, no commentary."""
453
+
454
+ try:
455
+ raw = await self.llm_client.generate(
456
+ system_prompt=(
457
+ "You are a sentiment analyst specializing in developer community "
458
+ "discourse across Reddit, Hacker News, and Twitter/X."
459
+ ),
460
+ user_prompt=prompt,
461
+ temperature=0.1,
462
+ max_tokens=2048,
463
+ model="haiku",
464
+ )
465
+ cleaned = strip_markdown_fences(raw)
466
+ results = json.loads(cleaned)
467
+ classified = 0
468
+ for item in results:
469
+ idx = item.get("index", -1)
470
+ sent = item.get("sentiment", "")
471
+ if 0 <= idx < len(chunk) and sent in ("positive", "neutral", "negative"):
472
+ chunk[idx].sentiment = sent
473
+ classified += 1
474
+ return classified
475
+ except Exception as exc:
476
+ logger.warning(f"LLM sentiment chunk failed, keeping rule-based: {exc}")
477
+ return 0
478
+
479
+ def _build_platform_summaries(
480
+ self,
481
+ mentions: list[SocialMention],
482
+ ) -> list[PlatformSummary]:
483
+ """Build per-platform summaries from all mentions."""
484
+ by_platform: dict[str, list[SocialMention]] = {}
485
+ for m in mentions:
486
+ by_platform.setdefault(m.platform, []).append(m)
487
+
488
+ summaries = []
489
+ for platform, platform_mentions in by_platform.items():
490
+ sentiment_breakdown: dict[str, int] = {}
491
+ for m in platform_mentions:
492
+ sentiment_breakdown[m.sentiment] = sentiment_breakdown.get(m.sentiment, 0) + 1
493
+
494
+ engagement_total = sum(m.engagement for m in platform_mentions)
495
+ top_posts = sorted(platform_mentions, key=lambda m: m.engagement, reverse=True)[:5]
496
+
497
+ # Extract topics from titles
498
+ topics = self._extract_topics(platform_mentions)
499
+
500
+ summaries.append(
501
+ PlatformSummary(
502
+ platform=platform,
503
+ total_mentions=len(platform_mentions),
504
+ sentiment_breakdown=sentiment_breakdown,
505
+ top_posts=top_posts,
506
+ engagement_total=engagement_total,
507
+ trending_topics=topics,
508
+ )
509
+ )
510
+
511
+ return summaries
512
+
513
+ def _find_engagement_opportunities(
514
+ self,
515
+ mentions: list[SocialMention],
516
+ ) -> list[dict[str, str]]:
517
+ """Find posts that represent engagement opportunities."""
518
+ opportunities = []
519
+ for m in mentions:
520
+ text = f"{m.title} {m.content}".lower()
521
+ matched_signals = [s for s in ENGAGEMENT_SIGNALS if s in text]
522
+ if matched_signals:
523
+ opportunities.append(
524
+ {
525
+ "platform": m.platform,
526
+ "title": m.title,
527
+ "url": m.url,
528
+ "reason": f"Matches signals: {', '.join(matched_signals[:3])}",
529
+ "suggested_action": self._suggest_engagement_action(matched_signals),
530
+ }
531
+ )
532
+ return opportunities[:10]
533
+
534
+ def _flag_reputation_risks(
535
+ self,
536
+ mentions: list[SocialMention],
537
+ ) -> list[dict[str, str]]:
538
+ """Flag posts that indicate reputation risks."""
539
+ risks = []
540
+ for m in mentions:
541
+ text = f"{m.title} {m.content}".lower()
542
+ matched_risks = [s for s in RISK_SIGNALS if s in text]
543
+ if matched_risks:
544
+ risks.append(
545
+ {
546
+ "platform": m.platform,
547
+ "title": m.title,
548
+ "url": m.url,
549
+ "severity": "high" if len(matched_risks) >= 2 else "medium",
550
+ "signals": matched_risks,
551
+ }
552
+ )
553
+ return risks[:10]
554
+
555
+ def _aggregate_sentiment(
556
+ self,
557
+ mentions: list[SocialMention],
558
+ ) -> dict[str, int]:
559
+ """Aggregate sentiment across all mentions."""
560
+ breakdown: dict[str, int] = {"positive": 0, "neutral": 0, "negative": 0}
561
+ for m in mentions:
562
+ breakdown[m.sentiment] = breakdown.get(m.sentiment, 0) + 1
563
+ return breakdown
564
+
565
+ def _extract_topics(
566
+ self,
567
+ mentions: list[SocialMention],
568
+ ) -> list[str]:
569
+ """Extract trending topics from mention titles."""
570
+ topic_keywords = [
571
+ "devrel",
572
+ "developer relations",
573
+ "developer advocacy",
574
+ "community",
575
+ "open source",
576
+ "self-hosted",
577
+ "ai agents",
578
+ "multi-agent",
579
+ "devtools",
580
+ "developer experience",
581
+ "sdk",
582
+ "api",
583
+ "orbit",
584
+ "common room",
585
+ "devrev",
586
+ "chatwoot",
587
+ "integration",
588
+ "automation",
589
+ "performance",
590
+ "pricing",
591
+ ]
592
+
593
+ topic_counts: dict[str, int] = {}
594
+ for m in mentions:
595
+ text = f"{m.title} {m.content}".lower()
596
+ for topic in topic_keywords:
597
+ if topic in text:
598
+ topic_counts[topic] = topic_counts.get(topic, 0) + 1
599
+
600
+ sorted_topics = sorted(topic_counts.items(), key=lambda x: x[1], reverse=True)
601
+ return [topic for topic, _ in sorted_topics[:5]]
602
+
603
+ @staticmethod
604
+ def _suggest_engagement_action(signals: list[str]) -> str:
605
+ """Suggest what kind of engagement to do based on signals."""
606
+ if any(s in signals for s in ["looking for", "recommend", "best tool"]):
607
+ return "Share how OpenClaw addresses their need with a helpful, non-salesy comment"
608
+ if any(s in signals for s in ["alternative to", "vs", "comparison"]):
609
+ return "Provide an honest comparison highlighting OpenClaw's strengths vs Orbit, Common Room, DevRev, or Chatwoot"
610
+ if any(s in signals for s in ["how to", "anyone using"]):
611
+ return "Share relevant documentation or tutorial link"
612
+ if any(s in signals for s in ["migrating from", "should I use"]):
613
+ return "Offer migration guidance and link to getting-started docs"
614
+ return "Engage with helpful, relevant information"
@@ -0,0 +1,27 @@
1
+ """Shared helpers for the Growth pipeline (Selene/Vega/Cyra + Argus)."""
2
+
3
+ from devrel_origin.core.growth.recommendations import (
4
+ Recommendation,
5
+ calibrate,
6
+ find_open_by_target,
7
+ find_stale,
8
+ mark_applied,
9
+ persist_recommendation,
10
+ )
11
+ from devrel_origin.core.growth.target_kinds import (
12
+ Pillar,
13
+ TargetKind,
14
+ validate_target_kind_for_pillar,
15
+ )
16
+
17
+ __all__ = [
18
+ "Pillar",
19
+ "TargetKind",
20
+ "Recommendation",
21
+ "persist_recommendation",
22
+ "find_open_by_target",
23
+ "mark_applied",
24
+ "find_stale",
25
+ "calibrate",
26
+ "validate_target_kind_for_pillar",
27
+ ]