devrel-origin 0.2.14__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- devrel_origin/__init__.py +15 -0
- devrel_origin/cli/__init__.py +92 -0
- devrel_origin/cli/_common.py +243 -0
- devrel_origin/cli/analytics.py +28 -0
- devrel_origin/cli/argus.py +497 -0
- devrel_origin/cli/auth.py +227 -0
- devrel_origin/cli/config.py +108 -0
- devrel_origin/cli/content.py +259 -0
- devrel_origin/cli/cost.py +108 -0
- devrel_origin/cli/cro.py +298 -0
- devrel_origin/cli/deliverables.py +65 -0
- devrel_origin/cli/docs.py +91 -0
- devrel_origin/cli/doctor.py +178 -0
- devrel_origin/cli/experiment.py +29 -0
- devrel_origin/cli/growth.py +97 -0
- devrel_origin/cli/init.py +472 -0
- devrel_origin/cli/intel.py +27 -0
- devrel_origin/cli/kb.py +96 -0
- devrel_origin/cli/listen.py +31 -0
- devrel_origin/cli/marketing.py +66 -0
- devrel_origin/cli/migrate.py +45 -0
- devrel_origin/cli/run.py +46 -0
- devrel_origin/cli/sales.py +57 -0
- devrel_origin/cli/schedule.py +62 -0
- devrel_origin/cli/synthesize.py +28 -0
- devrel_origin/cli/triage.py +29 -0
- devrel_origin/cli/video.py +35 -0
- devrel_origin/core/__init__.py +58 -0
- devrel_origin/core/agent_config.py +75 -0
- devrel_origin/core/argus.py +964 -0
- devrel_origin/core/atlas.py +1450 -0
- devrel_origin/core/base.py +372 -0
- devrel_origin/core/cyra.py +563 -0
- devrel_origin/core/dex.py +708 -0
- devrel_origin/core/echo.py +614 -0
- devrel_origin/core/growth/__init__.py +27 -0
- devrel_origin/core/growth/recommendations.py +219 -0
- devrel_origin/core/growth/target_kinds.py +51 -0
- devrel_origin/core/iris.py +513 -0
- devrel_origin/core/kai.py +1367 -0
- devrel_origin/core/llm.py +542 -0
- devrel_origin/core/llm_backends.py +274 -0
- devrel_origin/core/mox.py +514 -0
- devrel_origin/core/nova.py +349 -0
- devrel_origin/core/pax.py +1205 -0
- devrel_origin/core/rex.py +532 -0
- devrel_origin/core/sage.py +486 -0
- devrel_origin/core/sentinel.py +385 -0
- devrel_origin/core/types.py +98 -0
- devrel_origin/core/video/__init__.py +22 -0
- devrel_origin/core/video/assembler.py +131 -0
- devrel_origin/core/video/browser_recorder.py +118 -0
- devrel_origin/core/video/desktop_recorder.py +254 -0
- devrel_origin/core/video/overlay_renderer.py +143 -0
- devrel_origin/core/video/script_parser.py +147 -0
- devrel_origin/core/video/tts_engine.py +82 -0
- devrel_origin/core/vox.py +268 -0
- devrel_origin/core/watchdog.py +321 -0
- devrel_origin/project/__init__.py +1 -0
- devrel_origin/project/config.py +75 -0
- devrel_origin/project/cost_sink.py +61 -0
- devrel_origin/project/init.py +104 -0
- devrel_origin/project/paths.py +75 -0
- devrel_origin/project/state.py +241 -0
- devrel_origin/project/templates/__init__.py +4 -0
- devrel_origin/project/templates/config.toml +24 -0
- devrel_origin/project/templates/devrel.gitignore +10 -0
- devrel_origin/project/templates/slop-blocklist.md +45 -0
- devrel_origin/project/templates/style.md +24 -0
- devrel_origin/project/templates/voice.md +29 -0
- devrel_origin/quality/__init__.py +66 -0
- devrel_origin/quality/editorial.py +357 -0
- devrel_origin/quality/persona.py +84 -0
- devrel_origin/quality/readability.py +148 -0
- devrel_origin/quality/slop.py +167 -0
- devrel_origin/quality/style.py +110 -0
- devrel_origin/quality/voice.py +15 -0
- devrel_origin/tools/__init__.py +9 -0
- devrel_origin/tools/analytics.py +304 -0
- devrel_origin/tools/api_client.py +393 -0
- devrel_origin/tools/apollo_client.py +305 -0
- devrel_origin/tools/code_validator.py +428 -0
- devrel_origin/tools/github_tools.py +297 -0
- devrel_origin/tools/instantly_client.py +412 -0
- devrel_origin/tools/kb_harvester.py +340 -0
- devrel_origin/tools/mcp_server.py +578 -0
- devrel_origin/tools/notifications.py +245 -0
- devrel_origin/tools/run_report.py +193 -0
- devrel_origin/tools/scheduler.py +231 -0
- devrel_origin/tools/search_tools.py +321 -0
- devrel_origin/tools/self_improve.py +168 -0
- devrel_origin/tools/sheets.py +236 -0
- devrel_origin-0.2.14.dist-info/METADATA +354 -0
- devrel_origin-0.2.14.dist-info/RECORD +98 -0
- devrel_origin-0.2.14.dist-info/WHEEL +5 -0
- devrel_origin-0.2.14.dist-info/entry_points.txt +2 -0
- devrel_origin-0.2.14.dist-info/licenses/LICENSE +21 -0
- devrel_origin-0.2.14.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,514 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Mox -- Campaign Marketing Agent
|
|
3
|
+
|
|
4
|
+
On-demand marketing content and campaign generation: SEO blog posts,
|
|
5
|
+
landing page copy, social media batches, launch campaigns, and press releases.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import asyncio
|
|
9
|
+
import json
|
|
10
|
+
import logging
|
|
11
|
+
from dataclasses import dataclass
|
|
12
|
+
from pathlib import Path
|
|
13
|
+
from typing import Any, Optional
|
|
14
|
+
|
|
15
|
+
from devrel_origin.core.base import get_kb_search, load_agent_prompt, strip_markdown_fences
|
|
16
|
+
from devrel_origin.core.llm import LLMClient
|
|
17
|
+
from devrel_origin.quality import generate_with_pipeline
|
|
18
|
+
from devrel_origin.tools.api_client import PostHogClient
|
|
19
|
+
from devrel_origin.tools.code_validator import CodeValidator
|
|
20
|
+
from devrel_origin.tools.instantly_client import CampaignAnalytics, InstantlyClient
|
|
21
|
+
from devrel_origin.tools.search_tools import SearchTools
|
|
22
|
+
|
|
23
|
+
logger = logging.getLogger(__name__)
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
# Map Mox's parsed content_type to the editorial pipeline's vocabulary.
|
|
27
|
+
# blog_post / landing_page exist verbatim in DEFAULT_TARGETS; the rest fall
|
|
28
|
+
# back to a sensible neighbour. Keep this in sync with CONTENT_KEYWORDS so
|
|
29
|
+
# every routed type has an explicit entry — unmapped types log a warning
|
|
30
|
+
# at the call site rather than silently defaulting.
|
|
31
|
+
PIPELINE_CONTENT_TYPE_MAP: dict[str, str] = {
|
|
32
|
+
"blog": "blog_post",
|
|
33
|
+
"landing_page": "landing_page",
|
|
34
|
+
"social": "blog_post",
|
|
35
|
+
"campaign": "blog_post",
|
|
36
|
+
"press_release": "blog_post",
|
|
37
|
+
"case_study": "blog_post",
|
|
38
|
+
"email_campaign": "blog_post",
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
@dataclass
|
|
43
|
+
class BlogPost:
|
|
44
|
+
"""SEO-optimized marketing blog post."""
|
|
45
|
+
|
|
46
|
+
title: str
|
|
47
|
+
body: str
|
|
48
|
+
meta_description: str
|
|
49
|
+
target_keywords: list[str]
|
|
50
|
+
cta: str
|
|
51
|
+
word_count: int
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
@dataclass
|
|
55
|
+
class LandingPageCopy:
|
|
56
|
+
"""Full landing page copy structure."""
|
|
57
|
+
|
|
58
|
+
hero_headline: str
|
|
59
|
+
hero_subhead: str
|
|
60
|
+
features: list[dict[str, str]]
|
|
61
|
+
social_proof: list[str]
|
|
62
|
+
cta_primary: str
|
|
63
|
+
cta_secondary: str
|
|
64
|
+
seo_title: str
|
|
65
|
+
seo_description: str
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
@dataclass
|
|
69
|
+
class SocialBatch:
|
|
70
|
+
"""A batch of platform-specific social media posts."""
|
|
71
|
+
|
|
72
|
+
platform: str
|
|
73
|
+
campaign_name: str
|
|
74
|
+
posts: list[dict[str, str]]
|
|
75
|
+
hashtags: list[str]
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
@dataclass
|
|
79
|
+
class CampaignBrief:
|
|
80
|
+
"""Full product launch or marketing campaign brief."""
|
|
81
|
+
|
|
82
|
+
name: str
|
|
83
|
+
goal: str
|
|
84
|
+
positioning: str
|
|
85
|
+
messages: list[str]
|
|
86
|
+
channels: list[str]
|
|
87
|
+
timeline: list[dict[str, str]]
|
|
88
|
+
draft_assets: list[str]
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
@dataclass
|
|
92
|
+
class PressRelease:
|
|
93
|
+
"""Structured press release."""
|
|
94
|
+
|
|
95
|
+
headline: str
|
|
96
|
+
subhead: str
|
|
97
|
+
body: str
|
|
98
|
+
quotes: list[dict[str, str]]
|
|
99
|
+
boilerplate: str
|
|
100
|
+
contact: str
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
class Mox:
|
|
104
|
+
"""
|
|
105
|
+
Campaign Marketing agent for on-demand content generation.
|
|
106
|
+
|
|
107
|
+
Capabilities:
|
|
108
|
+
- SEO blog posts grounded in product knowledge and pain points
|
|
109
|
+
- Landing page copy with features, social proof, and CTAs
|
|
110
|
+
- Social media batches adapted to platform conventions
|
|
111
|
+
- Product launch campaign briefs with timelines
|
|
112
|
+
- Press releases for announcements
|
|
113
|
+
"""
|
|
114
|
+
|
|
115
|
+
_DEFAULT_SYSTEM_PROMPT = """You are Mox, a campaign marketing specialist for {product_name}. \
|
|
116
|
+
Your role is to produce marketing content and campaigns that drive awareness, engagement, \
|
|
117
|
+
and conversion among developers and technical decision-makers.
|
|
118
|
+
|
|
119
|
+
Core Guidelines:
|
|
120
|
+
1. DEVELOPER-AUTHENTIC -- Write like a developer advocate, not a marketer. \
|
|
121
|
+
No buzzwords, no fluff. Technical audiences smell inauthenticity instantly.
|
|
122
|
+
2. SEO-AWARE -- Structure blog posts with clear H2/H3 hierarchy, include \
|
|
123
|
+
target keywords naturally, write compelling meta descriptions.
|
|
124
|
+
3. PAIN-POINT-DRIVEN -- Every piece of content should address a real developer \
|
|
125
|
+
frustration identified by upstream agents, not invented marketing problems.
|
|
126
|
+
4. DIFFERENTIATED -- Use competitive intelligence to position against \
|
|
127
|
+
alternatives. Show don't tell -- concrete features, not vague claims.
|
|
128
|
+
5. MULTI-FORMAT -- Adapt messaging for each platform's conventions. Twitter \
|
|
129
|
+
threads != LinkedIn posts != Reddit comments.
|
|
130
|
+
|
|
131
|
+
Copywriting Psychology:
|
|
132
|
+
6. SELL THE MOTIVE, NOT THE NEED -- People don't buy tools (Need); they buy \
|
|
133
|
+
peace of mind, competitive advantage, time back (Motive). Lead with the \
|
|
134
|
+
emotional payoff, then back it with technical evidence.
|
|
135
|
+
7. ONE NEXT STEP PER ASSET -- Every piece of content sells exactly one next \
|
|
136
|
+
step. A blog post sells a demo booking. A tweet sells a link click. A LinkedIn \
|
|
137
|
+
post sells a profile visit. Never try to close from content.
|
|
138
|
+
8. FRICTIONLESS READING -- Max 5 lines per paragraph (mobile-first). Replace \
|
|
139
|
+
subjective adjectives with hard data ("reduced triage from 12hrs to 30min" \
|
|
140
|
+
not "dramatically improved"). Never end with an open question -- end with a \
|
|
141
|
+
firm conclusion or direct CTA.
|
|
142
|
+
9. STORYTELLING -- Use the Fairytale Framework: "Once upon a time..." (old \
|
|
143
|
+
way/pain) -> "And then one day..." (discovery) -> "And now..." (dream \
|
|
144
|
+
outcome). Stories bypass critical thinking and build instant trust.
|
|
145
|
+
10. POST ARCHITECTURE -- Headlines use numbers, How-To, or extreme pain \
|
|
146
|
+
points. Body is hard facts, short paragraphs. Kicker is a direct command, \
|
|
147
|
+
never an open question.
|
|
148
|
+
|
|
149
|
+
Hormozi Offer Strategy:
|
|
150
|
+
11. LEAD MAGNET CONTENT -- Give away the information (architecture, workflow, \
|
|
151
|
+
strategy) openly and generously. Free content should be better than \
|
|
152
|
+
competitors' paid stuff. Sell the implementation (managed deployment, custom \
|
|
153
|
+
setup, done-for-you execution).
|
|
154
|
+
12. VALUE EQUATION IN COPY -- Frame benefits using: Value = (Dream Outcome x \
|
|
155
|
+
Perceived Likelihood) / (Time Delay x Effort). Show the dream outcome \
|
|
156
|
+
vividly, prove likelihood with data, emphasize speed and zero-effort.
|
|
157
|
+
13. PREMIUM POSITIONING -- Never position on price. Frame as "replace a \
|
|
158
|
+
$500K-$1M team" not "affordable tool". Cost comparison is a proof point, \
|
|
159
|
+
not a selling point.
|
|
160
|
+
14. SCARCITY & URGENCY -- When appropriate, use genuine constraints to drive \
|
|
161
|
+
action ("Only onboarding 4 companies this month", "Beta spots limited"). \
|
|
162
|
+
Never fake scarcity."""
|
|
163
|
+
|
|
164
|
+
@property
|
|
165
|
+
def SYSTEM_PROMPT(self) -> str:
|
|
166
|
+
return self._system_prompt
|
|
167
|
+
|
|
168
|
+
CONTENT_KEYWORDS: dict[str, list[str]] = {
|
|
169
|
+
"blog": ["blog", "seo", "article"],
|
|
170
|
+
"landing_page": ["landing page", "landing copy"],
|
|
171
|
+
"social": ["social", "twitter", "linkedin", "reddit"],
|
|
172
|
+
"email_campaign": ["email campaign", "cold email", "drip campaign"],
|
|
173
|
+
"campaign": ["launch", "campaign"],
|
|
174
|
+
"press_release": ["press release", "announcement"],
|
|
175
|
+
"case_study": ["case study", "customer story"],
|
|
176
|
+
}
|
|
177
|
+
|
|
178
|
+
def __init__(
|
|
179
|
+
self,
|
|
180
|
+
api_client: PostHogClient,
|
|
181
|
+
knowledge_base_path: Path,
|
|
182
|
+
llm_client: Optional[LLMClient] = None,
|
|
183
|
+
search_tools: Optional[SearchTools] = None,
|
|
184
|
+
instantly_client: Optional[InstantlyClient] = None,
|
|
185
|
+
product_name: str = "the target product",
|
|
186
|
+
):
|
|
187
|
+
self.api_client = api_client
|
|
188
|
+
self.knowledge_base_path = knowledge_base_path
|
|
189
|
+
self.llm_client = llm_client
|
|
190
|
+
self.search_tools = search_tools
|
|
191
|
+
self.instantly_client = instantly_client
|
|
192
|
+
self.product_name = product_name
|
|
193
|
+
self.code_validator = CodeValidator()
|
|
194
|
+
self._kb = get_kb_search(
|
|
195
|
+
knowledge_base_path,
|
|
196
|
+
extra_stop_words=frozenset(
|
|
197
|
+
{
|
|
198
|
+
"write",
|
|
199
|
+
"generate",
|
|
200
|
+
"create",
|
|
201
|
+
"blog",
|
|
202
|
+
"post",
|
|
203
|
+
"landing",
|
|
204
|
+
"page",
|
|
205
|
+
"social",
|
|
206
|
+
"media",
|
|
207
|
+
"posts",
|
|
208
|
+
"campaign",
|
|
209
|
+
"press",
|
|
210
|
+
"release",
|
|
211
|
+
}
|
|
212
|
+
),
|
|
213
|
+
)
|
|
214
|
+
self._system_prompt = load_agent_prompt(
|
|
215
|
+
"mox", "system_prompt.txt", self._DEFAULT_SYSTEM_PROMPT
|
|
216
|
+
)
|
|
217
|
+
|
|
218
|
+
def _parse_content_type(self, task: str) -> str:
|
|
219
|
+
"""Determine content type from task string via keyword matching."""
|
|
220
|
+
task_lower = task.lower()
|
|
221
|
+
for content_type, keywords in self.CONTENT_KEYWORDS.items():
|
|
222
|
+
if any(kw in task_lower for kw in keywords):
|
|
223
|
+
return content_type
|
|
224
|
+
return "blog" # default
|
|
225
|
+
|
|
226
|
+
def _extract_upstream_context(
|
|
227
|
+
self,
|
|
228
|
+
context: dict[str, Any] | None,
|
|
229
|
+
) -> dict[str, Any]:
|
|
230
|
+
"""Extract marketing-relevant data from SharedContext."""
|
|
231
|
+
extracted: dict[str, Any] = {
|
|
232
|
+
"competitors": [],
|
|
233
|
+
"pain_points": [],
|
|
234
|
+
"existing_content": "",
|
|
235
|
+
}
|
|
236
|
+
if not context:
|
|
237
|
+
return extracted
|
|
238
|
+
|
|
239
|
+
# Rex competitive data
|
|
240
|
+
if "rex_competitive" in context:
|
|
241
|
+
rex = context["rex_competitive"]
|
|
242
|
+
if isinstance(rex, dict):
|
|
243
|
+
extracted["competitors"] = rex.get("profiles", [])
|
|
244
|
+
|
|
245
|
+
# Iris pain points
|
|
246
|
+
if "iris_themes" in context:
|
|
247
|
+
iris = context["iris_themes"]
|
|
248
|
+
if isinstance(iris, dict):
|
|
249
|
+
extracted["pain_points"] = iris.get("themes", [])
|
|
250
|
+
|
|
251
|
+
# Kai's existing content for repurposing
|
|
252
|
+
if "kai_content" in context:
|
|
253
|
+
kai = context["kai_content"]
|
|
254
|
+
if isinstance(kai, dict):
|
|
255
|
+
extracted["existing_content"] = kai.get("content", "")[:2000]
|
|
256
|
+
|
|
257
|
+
return extracted
|
|
258
|
+
|
|
259
|
+
async def push_campaign(
|
|
260
|
+
self,
|
|
261
|
+
campaign_name: str,
|
|
262
|
+
email_sequences: list[dict],
|
|
263
|
+
accounts: list[str] | None = None,
|
|
264
|
+
) -> dict[str, Any]:
|
|
265
|
+
"""Create a campaign in Instantly with email sequences."""
|
|
266
|
+
if not self.instantly_client:
|
|
267
|
+
return {"error": "No Instantly client configured"}
|
|
268
|
+
|
|
269
|
+
campaign = await self.instantly_client.create_campaign(
|
|
270
|
+
name=campaign_name,
|
|
271
|
+
sequences=email_sequences,
|
|
272
|
+
accounts=accounts,
|
|
273
|
+
)
|
|
274
|
+
return {
|
|
275
|
+
"campaign_id": campaign.id,
|
|
276
|
+
"campaign_name": campaign.name,
|
|
277
|
+
"status": campaign.status,
|
|
278
|
+
}
|
|
279
|
+
|
|
280
|
+
async def pull_campaign_stats(
|
|
281
|
+
self,
|
|
282
|
+
campaign_ids: list[str] | None = None,
|
|
283
|
+
) -> dict[str, Any]:
|
|
284
|
+
"""Fetch and aggregate analytics for active campaigns."""
|
|
285
|
+
empty = {
|
|
286
|
+
"total_campaigns": 0,
|
|
287
|
+
"total_sent": 0,
|
|
288
|
+
"total_opened": 0,
|
|
289
|
+
"total_replied": 0,
|
|
290
|
+
"total_bounced": 0,
|
|
291
|
+
"avg_open_rate": 0.0,
|
|
292
|
+
"avg_reply_rate": 0.0,
|
|
293
|
+
"avg_bounce_rate": 0.0,
|
|
294
|
+
"per_campaign": [],
|
|
295
|
+
}
|
|
296
|
+
if not self.instantly_client:
|
|
297
|
+
return empty
|
|
298
|
+
|
|
299
|
+
if campaign_ids:
|
|
300
|
+
ids = campaign_ids
|
|
301
|
+
else:
|
|
302
|
+
campaigns = await self.instantly_client.list_campaigns()
|
|
303
|
+
ids = [c.id for c in campaigns if c.status == "active"]
|
|
304
|
+
|
|
305
|
+
async def _fetch_analytics(cid: str) -> CampaignAnalytics | None:
|
|
306
|
+
try:
|
|
307
|
+
return await self.instantly_client.get_campaign_analytics(cid)
|
|
308
|
+
except Exception as e:
|
|
309
|
+
logger.warning(f"Failed to get analytics for {cid}: {e}")
|
|
310
|
+
return None
|
|
311
|
+
|
|
312
|
+
results = await asyncio.gather(*[_fetch_analytics(cid) for cid in ids])
|
|
313
|
+
analytics: list[CampaignAnalytics] = [a for a in results if a is not None]
|
|
314
|
+
|
|
315
|
+
if not analytics:
|
|
316
|
+
return empty
|
|
317
|
+
|
|
318
|
+
total_sent = sum(a.emails_sent for a in analytics)
|
|
319
|
+
total_opened = sum(a.emails_opened for a in analytics)
|
|
320
|
+
total_replied = sum(a.emails_replied for a in analytics)
|
|
321
|
+
total_bounced = sum(a.emails_bounced for a in analytics)
|
|
322
|
+
n = len(analytics)
|
|
323
|
+
|
|
324
|
+
return {
|
|
325
|
+
"total_campaigns": n,
|
|
326
|
+
"total_sent": total_sent,
|
|
327
|
+
"total_opened": total_opened,
|
|
328
|
+
"total_replied": total_replied,
|
|
329
|
+
"total_bounced": total_bounced,
|
|
330
|
+
"avg_open_rate": sum(a.open_rate for a in analytics) / n,
|
|
331
|
+
"avg_reply_rate": sum(a.reply_rate for a in analytics) / n,
|
|
332
|
+
"avg_bounce_rate": sum(a.bounce_rate for a in analytics) / n,
|
|
333
|
+
"per_campaign": [
|
|
334
|
+
{
|
|
335
|
+
"campaign_id": a.campaign_id,
|
|
336
|
+
"campaign_name": a.campaign_name,
|
|
337
|
+
"emails_sent": a.emails_sent,
|
|
338
|
+
"reply_rate": a.reply_rate,
|
|
339
|
+
}
|
|
340
|
+
for a in analytics
|
|
341
|
+
],
|
|
342
|
+
}
|
|
343
|
+
|
|
344
|
+
def _build_content_prompt(
|
|
345
|
+
self,
|
|
346
|
+
task: str,
|
|
347
|
+
content_type: str,
|
|
348
|
+
upstream: dict[str, Any],
|
|
349
|
+
kb_context: str,
|
|
350
|
+
) -> str:
|
|
351
|
+
"""Build the LLM prompt for content generation."""
|
|
352
|
+
competitive_section = ""
|
|
353
|
+
if upstream["competitors"]:
|
|
354
|
+
competitive_section = "Competitive landscape:\n"
|
|
355
|
+
for c in upstream["competitors"][:5]:
|
|
356
|
+
if isinstance(c, dict):
|
|
357
|
+
competitive_section += (
|
|
358
|
+
f"- {c.get('name', '?')}: strengths={c.get('strengths', [])}\n"
|
|
359
|
+
)
|
|
360
|
+
|
|
361
|
+
pain_section = ""
|
|
362
|
+
if upstream["pain_points"]:
|
|
363
|
+
pain_section = "Developer pain points to address:\n"
|
|
364
|
+
for pp in upstream["pain_points"][:5]:
|
|
365
|
+
if isinstance(pp, dict):
|
|
366
|
+
pain_section += (
|
|
367
|
+
f"- {pp.get('title', '?')} (severity: {pp.get('severity', '?')})\n"
|
|
368
|
+
)
|
|
369
|
+
|
|
370
|
+
existing_section = ""
|
|
371
|
+
if upstream["existing_content"]:
|
|
372
|
+
existing_section = (
|
|
373
|
+
f"Existing tutorial content (for reference/repurposing):\n"
|
|
374
|
+
f"{upstream['existing_content'][:1000]}"
|
|
375
|
+
)
|
|
376
|
+
|
|
377
|
+
return f"""Task: {task}
|
|
378
|
+
Content type: {content_type}
|
|
379
|
+
|
|
380
|
+
## Knowledge Base
|
|
381
|
+
{kb_context if kb_context else "No relevant KB docs found."}
|
|
382
|
+
|
|
383
|
+
## Competitive Intelligence
|
|
384
|
+
{competitive_section if competitive_section else "No competitive data available."}
|
|
385
|
+
|
|
386
|
+
## Developer Pain Points
|
|
387
|
+
{pain_section if pain_section else "No pain point data available."}
|
|
388
|
+
|
|
389
|
+
{existing_section}
|
|
390
|
+
|
|
391
|
+
## Instructions
|
|
392
|
+
Generate the requested marketing content ({content_type}). Ground all claims
|
|
393
|
+
in the knowledge base. Address real developer pain points. Position against
|
|
394
|
+
competitors where relevant. Do NOT invent capabilities not in the KB.
|
|
395
|
+
|
|
396
|
+
For blog posts: include H2/H3 hierarchy, meta description, target keywords.
|
|
397
|
+
For social: adapt to platform conventions.
|
|
398
|
+
For landing pages: include hero, features, social proof, CTAs.
|
|
399
|
+
|
|
400
|
+
Return the content as markdown."""
|
|
401
|
+
|
|
402
|
+
async def execute(
|
|
403
|
+
self,
|
|
404
|
+
task: str,
|
|
405
|
+
context: Optional[dict[str, Any]] = None,
|
|
406
|
+
) -> dict[str, Any]:
|
|
407
|
+
"""Execute a marketing content generation task."""
|
|
408
|
+
logger.info(f"Mox executing: {task[:80]}...")
|
|
409
|
+
|
|
410
|
+
content_type = self._parse_content_type(task)
|
|
411
|
+
upstream = self._extract_upstream_context(context)
|
|
412
|
+
kb_context = self._kb.search_as_text(task)
|
|
413
|
+
# `prose_prompt` is the clean, human-readable prompt used by the
|
|
414
|
+
# editorial pipeline path. `email_prompt` (built below) decorates it
|
|
415
|
+
# with a JSON output contract for Instantly only — never pass the
|
|
416
|
+
# JSON-decorated one into the editorial pipeline.
|
|
417
|
+
prose_prompt = self._build_content_prompt(task, content_type, upstream, kb_context)
|
|
418
|
+
prompt = prose_prompt # backwards-compatible alias
|
|
419
|
+
|
|
420
|
+
# Handle email_campaign type with Instantly push
|
|
421
|
+
if content_type == "email_campaign" and self.instantly_client and self.llm_client:
|
|
422
|
+
email_prompt = f"""{prose_prompt}
|
|
423
|
+
|
|
424
|
+
## Output Format
|
|
425
|
+
Return ONLY a JSON object with this structure:
|
|
426
|
+
{{
|
|
427
|
+
"sequences": [
|
|
428
|
+
{{"subject": "...", "body": "...", "delay_days": N}}
|
|
429
|
+
]
|
|
430
|
+
}}
|
|
431
|
+
|
|
432
|
+
Use pain points and competitive positioning from above to craft the sequence.
|
|
433
|
+
Each email should sell one next step. 3-5 emails in the sequence."""
|
|
434
|
+
try:
|
|
435
|
+
raw = await self.llm_client.generate(
|
|
436
|
+
system_prompt=self.SYSTEM_PROMPT.format(
|
|
437
|
+
product_name=self.product_name,
|
|
438
|
+
),
|
|
439
|
+
user_prompt=email_prompt,
|
|
440
|
+
temperature=0.5,
|
|
441
|
+
)
|
|
442
|
+
data = json.loads(strip_markdown_fences(raw))
|
|
443
|
+
sequences = data.get("sequences", [])
|
|
444
|
+
campaign_result = await self.push_campaign(
|
|
445
|
+
campaign_name=f"{self.product_name} - {task[:50]}",
|
|
446
|
+
email_sequences=sequences,
|
|
447
|
+
)
|
|
448
|
+
return {
|
|
449
|
+
"agent": "mox",
|
|
450
|
+
"task": task,
|
|
451
|
+
"content_type": content_type,
|
|
452
|
+
"status": "campaign_created",
|
|
453
|
+
**campaign_result,
|
|
454
|
+
}
|
|
455
|
+
except Exception as exc:
|
|
456
|
+
logger.warning(f"Email campaign creation failed: {exc}")
|
|
457
|
+
# Fall through to normal generation. CRITICAL: continue with
|
|
458
|
+
# `prose_prompt` (clean), NOT the JSON-decorated `email_prompt`.
|
|
459
|
+
prompt = prose_prompt
|
|
460
|
+
|
|
461
|
+
base_result: dict[str, Any] = {
|
|
462
|
+
"agent": "mox",
|
|
463
|
+
"task": task,
|
|
464
|
+
"content_type": content_type,
|
|
465
|
+
"status": "generated",
|
|
466
|
+
}
|
|
467
|
+
|
|
468
|
+
if self.llm_client:
|
|
469
|
+
try:
|
|
470
|
+
# Map Mox's parsed content_type to the pipeline's vocabulary.
|
|
471
|
+
# See PIPELINE_CONTENT_TYPE_MAP for the full routing table.
|
|
472
|
+
if content_type not in PIPELINE_CONTENT_TYPE_MAP:
|
|
473
|
+
logger.warning(
|
|
474
|
+
"Unmapped content_type %r — defaulting to blog_post pipeline",
|
|
475
|
+
content_type,
|
|
476
|
+
)
|
|
477
|
+
pipeline_content_type = PIPELINE_CONTENT_TYPE_MAP.get(content_type, "blog_post")
|
|
478
|
+
raw, strengths, issues = await generate_with_pipeline(
|
|
479
|
+
llm_client=self.llm_client,
|
|
480
|
+
system_prompt=self.SYSTEM_PROMPT.format(
|
|
481
|
+
product_name=self.product_name,
|
|
482
|
+
),
|
|
483
|
+
user_prompt=prompt,
|
|
484
|
+
content_type=pipeline_content_type,
|
|
485
|
+
logger=logger,
|
|
486
|
+
)
|
|
487
|
+
base_result["content"] = raw
|
|
488
|
+
if issues and isinstance(issues[0], dict):
|
|
489
|
+
remaining_issues = [
|
|
490
|
+
i for i in issues if isinstance(i, dict) and i.get("severity") == "high"
|
|
491
|
+
]
|
|
492
|
+
else:
|
|
493
|
+
remaining_issues = [i for i in issues if isinstance(i, str) and i.strip()]
|
|
494
|
+
base_result["revision"] = {
|
|
495
|
+
"strengths": strengths,
|
|
496
|
+
"remaining_issues": remaining_issues,
|
|
497
|
+
}
|
|
498
|
+
|
|
499
|
+
# Validate code blocks in blog posts
|
|
500
|
+
if content_type == "blog":
|
|
501
|
+
report = self.code_validator.validate_content(raw)
|
|
502
|
+
base_result["code_validation"] = {
|
|
503
|
+
"total_blocks": report.total_blocks,
|
|
504
|
+
"passed": report.passed,
|
|
505
|
+
"failed": report.failed,
|
|
506
|
+
"all_passed": report.all_passed,
|
|
507
|
+
}
|
|
508
|
+
except Exception as exc:
|
|
509
|
+
logger.warning(f"LLM generation failed: {exc}")
|
|
510
|
+
base_result["prompt_used"] = prompt[:500]
|
|
511
|
+
else:
|
|
512
|
+
base_result["prompt_used"] = prompt[:500]
|
|
513
|
+
|
|
514
|
+
return base_result
|