devrel-origin 0.2.14__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- devrel_origin/__init__.py +15 -0
- devrel_origin/cli/__init__.py +92 -0
- devrel_origin/cli/_common.py +243 -0
- devrel_origin/cli/analytics.py +28 -0
- devrel_origin/cli/argus.py +497 -0
- devrel_origin/cli/auth.py +227 -0
- devrel_origin/cli/config.py +108 -0
- devrel_origin/cli/content.py +259 -0
- devrel_origin/cli/cost.py +108 -0
- devrel_origin/cli/cro.py +298 -0
- devrel_origin/cli/deliverables.py +65 -0
- devrel_origin/cli/docs.py +91 -0
- devrel_origin/cli/doctor.py +178 -0
- devrel_origin/cli/experiment.py +29 -0
- devrel_origin/cli/growth.py +97 -0
- devrel_origin/cli/init.py +472 -0
- devrel_origin/cli/intel.py +27 -0
- devrel_origin/cli/kb.py +96 -0
- devrel_origin/cli/listen.py +31 -0
- devrel_origin/cli/marketing.py +66 -0
- devrel_origin/cli/migrate.py +45 -0
- devrel_origin/cli/run.py +46 -0
- devrel_origin/cli/sales.py +57 -0
- devrel_origin/cli/schedule.py +62 -0
- devrel_origin/cli/synthesize.py +28 -0
- devrel_origin/cli/triage.py +29 -0
- devrel_origin/cli/video.py +35 -0
- devrel_origin/core/__init__.py +58 -0
- devrel_origin/core/agent_config.py +75 -0
- devrel_origin/core/argus.py +964 -0
- devrel_origin/core/atlas.py +1450 -0
- devrel_origin/core/base.py +372 -0
- devrel_origin/core/cyra.py +563 -0
- devrel_origin/core/dex.py +708 -0
- devrel_origin/core/echo.py +614 -0
- devrel_origin/core/growth/__init__.py +27 -0
- devrel_origin/core/growth/recommendations.py +219 -0
- devrel_origin/core/growth/target_kinds.py +51 -0
- devrel_origin/core/iris.py +513 -0
- devrel_origin/core/kai.py +1367 -0
- devrel_origin/core/llm.py +542 -0
- devrel_origin/core/llm_backends.py +274 -0
- devrel_origin/core/mox.py +514 -0
- devrel_origin/core/nova.py +349 -0
- devrel_origin/core/pax.py +1205 -0
- devrel_origin/core/rex.py +532 -0
- devrel_origin/core/sage.py +486 -0
- devrel_origin/core/sentinel.py +385 -0
- devrel_origin/core/types.py +98 -0
- devrel_origin/core/video/__init__.py +22 -0
- devrel_origin/core/video/assembler.py +131 -0
- devrel_origin/core/video/browser_recorder.py +118 -0
- devrel_origin/core/video/desktop_recorder.py +254 -0
- devrel_origin/core/video/overlay_renderer.py +143 -0
- devrel_origin/core/video/script_parser.py +147 -0
- devrel_origin/core/video/tts_engine.py +82 -0
- devrel_origin/core/vox.py +268 -0
- devrel_origin/core/watchdog.py +321 -0
- devrel_origin/project/__init__.py +1 -0
- devrel_origin/project/config.py +75 -0
- devrel_origin/project/cost_sink.py +61 -0
- devrel_origin/project/init.py +104 -0
- devrel_origin/project/paths.py +75 -0
- devrel_origin/project/state.py +241 -0
- devrel_origin/project/templates/__init__.py +4 -0
- devrel_origin/project/templates/config.toml +24 -0
- devrel_origin/project/templates/devrel.gitignore +10 -0
- devrel_origin/project/templates/slop-blocklist.md +45 -0
- devrel_origin/project/templates/style.md +24 -0
- devrel_origin/project/templates/voice.md +29 -0
- devrel_origin/quality/__init__.py +66 -0
- devrel_origin/quality/editorial.py +357 -0
- devrel_origin/quality/persona.py +84 -0
- devrel_origin/quality/readability.py +148 -0
- devrel_origin/quality/slop.py +167 -0
- devrel_origin/quality/style.py +110 -0
- devrel_origin/quality/voice.py +15 -0
- devrel_origin/tools/__init__.py +9 -0
- devrel_origin/tools/analytics.py +304 -0
- devrel_origin/tools/api_client.py +393 -0
- devrel_origin/tools/apollo_client.py +305 -0
- devrel_origin/tools/code_validator.py +428 -0
- devrel_origin/tools/github_tools.py +297 -0
- devrel_origin/tools/instantly_client.py +412 -0
- devrel_origin/tools/kb_harvester.py +340 -0
- devrel_origin/tools/mcp_server.py +578 -0
- devrel_origin/tools/notifications.py +245 -0
- devrel_origin/tools/run_report.py +193 -0
- devrel_origin/tools/scheduler.py +231 -0
- devrel_origin/tools/search_tools.py +321 -0
- devrel_origin/tools/self_improve.py +168 -0
- devrel_origin/tools/sheets.py +236 -0
- devrel_origin-0.2.14.dist-info/METADATA +354 -0
- devrel_origin-0.2.14.dist-info/RECORD +98 -0
- devrel_origin-0.2.14.dist-info/WHEEL +5 -0
- devrel_origin-0.2.14.dist-info/entry_points.txt +2 -0
- devrel_origin-0.2.14.dist-info/licenses/LICENSE +21 -0
- devrel_origin-0.2.14.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,349 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Nova — Growth Strategist Agent
|
|
3
|
+
|
|
4
|
+
Designs activation experiments, analyzes funnels, segments cohorts,
|
|
5
|
+
and models LTV — all with statistical rigor.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import hashlib
|
|
9
|
+
import logging
|
|
10
|
+
import math
|
|
11
|
+
import os
|
|
12
|
+
from dataclasses import dataclass
|
|
13
|
+
from pathlib import Path
|
|
14
|
+
from typing import Any, Optional
|
|
15
|
+
|
|
16
|
+
from scipy import stats
|
|
17
|
+
|
|
18
|
+
from devrel_origin.tools.api_client import PostHogClient
|
|
19
|
+
|
|
20
|
+
logger = logging.getLogger(__name__)
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
# Default daily signups assumed when DAILY_SIGNUPS_ESTIMATE is unset.
|
|
24
|
+
DAILY_SIGNUPS_DEFAULT = 500
|
|
25
|
+
# Below this floor, experiment durations explode into multi-decade ranges
|
|
26
|
+
# and a value of 0 raises ZeroDivisionError. We clamp to this floor and
|
|
27
|
+
# warn rather than silently producing a 30-year duration.
|
|
28
|
+
DAILY_SIGNUPS_FLOOR = 10
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
@dataclass
|
|
32
|
+
class ExperimentDesign:
|
|
33
|
+
"""A pre-registered A/B test design with statistical rigor."""
|
|
34
|
+
|
|
35
|
+
experiment_id: str
|
|
36
|
+
hypothesis: str
|
|
37
|
+
primary_metric: str
|
|
38
|
+
secondary_metrics: list[str]
|
|
39
|
+
control_description: str
|
|
40
|
+
variant_description: str
|
|
41
|
+
sample_size_per_arm: int
|
|
42
|
+
minimum_detectable_effect: float # e.g., 0.05 for 5% lift
|
|
43
|
+
statistical_power: float # typically 0.8
|
|
44
|
+
significance_level: float # typically 0.05
|
|
45
|
+
expected_duration_days: int
|
|
46
|
+
guardrail_metrics: list[str]
|
|
47
|
+
evaluation_method: str # frequentist, bayesian
|
|
48
|
+
pre_registration_date: str
|
|
49
|
+
success_criteria: str
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
@dataclass
|
|
53
|
+
class FunnelAnalysis:
|
|
54
|
+
"""Analysis of a conversion funnel."""
|
|
55
|
+
|
|
56
|
+
funnel_name: str
|
|
57
|
+
stages: list[dict[str, Any]] # name, count, conversion_rate, drop_off
|
|
58
|
+
overall_conversion: float
|
|
59
|
+
biggest_drop_off_stage: str
|
|
60
|
+
recommended_interventions: list[str]
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
@dataclass
|
|
64
|
+
class CohortSegment:
|
|
65
|
+
"""A user cohort defined by behavior and attributes."""
|
|
66
|
+
|
|
67
|
+
segment_name: str
|
|
68
|
+
definition: str
|
|
69
|
+
size: int
|
|
70
|
+
activation_rate: float
|
|
71
|
+
retention_d7: float
|
|
72
|
+
retention_d30: float
|
|
73
|
+
avg_events_per_user: float
|
|
74
|
+
ltv_estimate: float
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
class Nova:
|
|
78
|
+
"""
|
|
79
|
+
Growth Strategist agent for experiment design and funnel optimization.
|
|
80
|
+
|
|
81
|
+
Capabilities:
|
|
82
|
+
- Design A/B experiments with proper power analysis and pre-registration
|
|
83
|
+
- Analyze activation and conversion funnels
|
|
84
|
+
- Segment users into behavioral cohorts
|
|
85
|
+
- Model LTV by segment
|
|
86
|
+
- Recommend growth interventions based on data
|
|
87
|
+
|
|
88
|
+
Tools:
|
|
89
|
+
1. analytics_query — Query analytics for trends, funnels, retention
|
|
90
|
+
2. experiments_api — Create and read experiments via API
|
|
91
|
+
3. cohorts_api — Define and query cohorts
|
|
92
|
+
4. feature_flags_api — Manage feature flags for experiments
|
|
93
|
+
5. power_calculator — Compute required sample size for experiments
|
|
94
|
+
6. bayesian_evaluator — Evaluate experiment results with Bayesian methods
|
|
95
|
+
7. funnel_analyzer — Decompose funnels into stage-by-stage metrics
|
|
96
|
+
8. cohort_segmenter — Cluster users by behavior patterns
|
|
97
|
+
9. ltv_modeler — Estimate lifetime value by segment
|
|
98
|
+
10. intervention_recommender — Suggest growth actions based on funnel data
|
|
99
|
+
11. experiment_pre_registrar — Generate pre-registration documents
|
|
100
|
+
12. statistical_validator — Check experiment results for common pitfalls
|
|
101
|
+
13. report_generator — Compile experiment results into reports
|
|
102
|
+
14. alert_configurator — Set up metric alerts for guardrails
|
|
103
|
+
"""
|
|
104
|
+
|
|
105
|
+
SYSTEM_PROMPT = """You are Nova, a growth strategist for OpenClaw. You design
|
|
106
|
+
experiments and analyze data with statistical rigor to drive developer activation
|
|
107
|
+
and retention.
|
|
108
|
+
|
|
109
|
+
Growth principles:
|
|
110
|
+
1. PRE-REGISTER — Define hypothesis, metrics, and sample size BEFORE running
|
|
111
|
+
2. POWER ANALYSIS — Never run underpowered experiments. Calculate required n.
|
|
112
|
+
3. GUARDRAILS — Every experiment needs guardrail metrics to catch regressions
|
|
113
|
+
4. BAYESIAN WHEN POSSIBLE — Bayesian evaluation for faster, more intuitive results
|
|
114
|
+
5. SEGMENT FIRST — Different cohorts have different activation patterns
|
|
115
|
+
|
|
116
|
+
OpenClaw activation metrics:
|
|
117
|
+
- Time to repo cloned and dependencies installed (< 5 min = good)
|
|
118
|
+
- Time to first agent run completed (< 30 min = good)
|
|
119
|
+
- Knowledge base configured in first 7 days (>= 1 vertical = activated)
|
|
120
|
+
- Weekly cycle activated in first 7 days (>= 1 full cycle = activated)
|
|
121
|
+
- Team members onboarded in first 14 days (>= 1 additional = sticky)
|
|
122
|
+
|
|
123
|
+
Key funnel stages:
|
|
124
|
+
1. Signup → Repo cloned (target: 70%)
|
|
125
|
+
2. Repo cloned → First agent run (target: 85%)
|
|
126
|
+
3. First agent run → Knowledge base configured (target: 50%)
|
|
127
|
+
4. Knowledge base configured → Weekly cycle activated (target: 30%)
|
|
128
|
+
5. Weekly cycle activated → Team onboarded (target: 40%)
|
|
129
|
+
|
|
130
|
+
Power analysis formula:
|
|
131
|
+
n = (Z_alpha/2 + Z_beta)^2 * 2 * p * (1-p) / MDE^2
|
|
132
|
+
where MDE = minimum detectable effect, p = baseline conversion rate"""
|
|
133
|
+
|
|
134
|
+
def __init__(
|
|
135
|
+
self,
|
|
136
|
+
api_client: PostHogClient,
|
|
137
|
+
knowledge_base_path: Path,
|
|
138
|
+
):
|
|
139
|
+
self.api_client = api_client
|
|
140
|
+
self.knowledge_base_path = knowledge_base_path
|
|
141
|
+
|
|
142
|
+
async def execute(
|
|
143
|
+
self,
|
|
144
|
+
task: str,
|
|
145
|
+
context: Optional[dict[str, Any]] = None,
|
|
146
|
+
) -> dict[str, Any]:
|
|
147
|
+
logger.info(f"Nova executing: {task[:80]}...")
|
|
148
|
+
|
|
149
|
+
themes = []
|
|
150
|
+
if context and "iris_themes" in context:
|
|
151
|
+
iris_data = context["iris_themes"]
|
|
152
|
+
if isinstance(iris_data, dict):
|
|
153
|
+
themes = iris_data.get("themes", [])
|
|
154
|
+
|
|
155
|
+
# Design experiments for top themes
|
|
156
|
+
experiments = []
|
|
157
|
+
for theme in themes[:3]:
|
|
158
|
+
title = theme.get("title", "Unknown")
|
|
159
|
+
severity = theme.get("severity", 5.0)
|
|
160
|
+
areas = theme.get("product_areas", ["general"])
|
|
161
|
+
# High-severity themes warrant detecting a smaller lift (3% MDE) — the
|
|
162
|
+
# downside of missing a real improvement is large because the underlying
|
|
163
|
+
# pain is hurting users. Lower-severity themes accept a larger MDE (5%)
|
|
164
|
+
# to ship faster; if the experiment is inconclusive, the cost of being
|
|
165
|
+
# wrong is bounded.
|
|
166
|
+
mde = 0.03 if severity >= 7 else 0.05
|
|
167
|
+
baseline = 0.15
|
|
168
|
+
|
|
169
|
+
exp = await self.design_experiment(
|
|
170
|
+
hypothesis=f"Addressing '{title}' will improve activation",
|
|
171
|
+
primary_metric=f"{areas[0]}_activation_rate",
|
|
172
|
+
baseline_rate=baseline,
|
|
173
|
+
minimum_detectable_effect=mde,
|
|
174
|
+
)
|
|
175
|
+
experiments.append(
|
|
176
|
+
{
|
|
177
|
+
"experiment_id": exp.experiment_id,
|
|
178
|
+
"hypothesis": exp.hypothesis,
|
|
179
|
+
"primary_metric": exp.primary_metric,
|
|
180
|
+
"sample_size_per_arm": exp.sample_size_per_arm,
|
|
181
|
+
"expected_duration_days": exp.expected_duration_days,
|
|
182
|
+
"success_criteria": exp.success_criteria,
|
|
183
|
+
"guardrail_metrics": exp.guardrail_metrics,
|
|
184
|
+
}
|
|
185
|
+
)
|
|
186
|
+
|
|
187
|
+
# Analyze the standard OpenClaw activation funnel.
|
|
188
|
+
# Attempt to source real funnel stages from the API client when
|
|
189
|
+
# available; otherwise fall back to default illustrative estimates
|
|
190
|
+
# and mark them as such so downstream consumers know not to trust
|
|
191
|
+
# the absolute counts.
|
|
192
|
+
funnel_result = None
|
|
193
|
+
if themes:
|
|
194
|
+
default_stages = [
|
|
195
|
+
{"name": "signup", "count": 1000},
|
|
196
|
+
{"name": "repo_cloned", "count": 700},
|
|
197
|
+
{"name": "first_agent_run", "count": 595},
|
|
198
|
+
{"name": "knowledge_base_configured", "count": 298},
|
|
199
|
+
{"name": "weekly_cycle_activated", "count": 89},
|
|
200
|
+
{"name": "team_onboarded", "count": 36},
|
|
201
|
+
]
|
|
202
|
+
stages = default_stages
|
|
203
|
+
funnel_data_source = "default_estimates"
|
|
204
|
+
if self.api_client is not None and hasattr(self.api_client, "get_funnel"):
|
|
205
|
+
try:
|
|
206
|
+
real_stages = await self.api_client.get_funnel()
|
|
207
|
+
if real_stages:
|
|
208
|
+
stages = real_stages
|
|
209
|
+
funnel_data_source = "api"
|
|
210
|
+
except Exception as exc:
|
|
211
|
+
logger.warning("Funnel API call failed; using default estimates: %s", exc)
|
|
212
|
+
funnel = await self.analyze_funnel(
|
|
213
|
+
funnel_name="devrel_ai_agents_activation",
|
|
214
|
+
stages=stages,
|
|
215
|
+
)
|
|
216
|
+
funnel_result = {
|
|
217
|
+
"funnel_name": funnel.funnel_name,
|
|
218
|
+
"data_source": funnel_data_source,
|
|
219
|
+
"overall_conversion": funnel.overall_conversion,
|
|
220
|
+
"biggest_drop_off_stage": funnel.biggest_drop_off_stage,
|
|
221
|
+
"recommended_interventions": funnel.recommended_interventions,
|
|
222
|
+
}
|
|
223
|
+
|
|
224
|
+
return {
|
|
225
|
+
"agent": "nova",
|
|
226
|
+
"task": task,
|
|
227
|
+
"experiments": experiments,
|
|
228
|
+
"funnel_analysis": funnel_result,
|
|
229
|
+
"cohort_segments": [],
|
|
230
|
+
"upstream_themes_used": len(themes),
|
|
231
|
+
"status": "designed",
|
|
232
|
+
}
|
|
233
|
+
|
|
234
|
+
def calculate_sample_size(
|
|
235
|
+
self,
|
|
236
|
+
baseline_rate: float,
|
|
237
|
+
minimum_detectable_effect: float,
|
|
238
|
+
power: float = 0.8,
|
|
239
|
+
significance_level: float = 0.05,
|
|
240
|
+
) -> int:
|
|
241
|
+
"""
|
|
242
|
+
Calculate required sample size per arm for a two-proportion z-test.
|
|
243
|
+
|
|
244
|
+
Args:
|
|
245
|
+
baseline_rate: Current conversion rate (e.g., 0.15 for 15%)
|
|
246
|
+
minimum_detectable_effect: Absolute change to detect (e.g., 0.03 for 3pp)
|
|
247
|
+
power: Statistical power (default 0.8)
|
|
248
|
+
significance_level: Alpha level (default 0.05)
|
|
249
|
+
|
|
250
|
+
Returns:
|
|
251
|
+
Required sample size per arm
|
|
252
|
+
"""
|
|
253
|
+
z_alpha = stats.norm.ppf(1 - significance_level / 2)
|
|
254
|
+
z_beta = stats.norm.ppf(power)
|
|
255
|
+
|
|
256
|
+
p1 = baseline_rate
|
|
257
|
+
p2 = baseline_rate + minimum_detectable_effect
|
|
258
|
+
p_avg = (p1 + p2) / 2
|
|
259
|
+
|
|
260
|
+
n = (
|
|
261
|
+
z_alpha * math.sqrt(2 * p_avg * (1 - p_avg))
|
|
262
|
+
+ z_beta * math.sqrt(p1 * (1 - p1) + p2 * (1 - p2))
|
|
263
|
+
) ** 2 / minimum_detectable_effect**2
|
|
264
|
+
|
|
265
|
+
return math.ceil(n)
|
|
266
|
+
|
|
267
|
+
async def design_experiment(
|
|
268
|
+
self,
|
|
269
|
+
hypothesis: str,
|
|
270
|
+
primary_metric: str,
|
|
271
|
+
baseline_rate: float,
|
|
272
|
+
minimum_detectable_effect: float,
|
|
273
|
+
context: Optional[dict[str, Any]] = None,
|
|
274
|
+
) -> ExperimentDesign:
|
|
275
|
+
"""Design a fully pre-registered A/B experiment."""
|
|
276
|
+
sample_size = self.calculate_sample_size(
|
|
277
|
+
baseline_rate=baseline_rate,
|
|
278
|
+
minimum_detectable_effect=minimum_detectable_effect,
|
|
279
|
+
)
|
|
280
|
+
|
|
281
|
+
# Estimate duration based on daily traffic volume. Clamp to a
|
|
282
|
+
# floor so misconfigured envs (or test environments setting 0)
|
|
283
|
+
# don't produce 30-year experiments or ZeroDivisionError.
|
|
284
|
+
raw_signups = int(os.environ.get("DAILY_SIGNUPS_ESTIMATE", str(DAILY_SIGNUPS_DEFAULT)))
|
|
285
|
+
if raw_signups < DAILY_SIGNUPS_FLOOR:
|
|
286
|
+
logger.warning(
|
|
287
|
+
"DAILY_SIGNUPS_ESTIMATE=%d is below floor %d; using floor instead",
|
|
288
|
+
raw_signups,
|
|
289
|
+
DAILY_SIGNUPS_FLOOR,
|
|
290
|
+
)
|
|
291
|
+
daily_signups = DAILY_SIGNUPS_FLOOR
|
|
292
|
+
else:
|
|
293
|
+
daily_signups = raw_signups
|
|
294
|
+
duration_days = math.ceil((sample_size * 2) / daily_signups)
|
|
295
|
+
|
|
296
|
+
return ExperimentDesign(
|
|
297
|
+
# sha256-based ID is stable across process restarts (Python's
|
|
298
|
+
# built-in hash() is randomized per-process, breaking
|
|
299
|
+
# pre-registration de-duplication).
|
|
300
|
+
experiment_id=f"exp_{hashlib.sha256(hypothesis.encode()).hexdigest()[:8]}",
|
|
301
|
+
hypothesis=hypothesis,
|
|
302
|
+
primary_metric=primary_metric,
|
|
303
|
+
secondary_metrics=["time_to_first_event", "d7_retention"],
|
|
304
|
+
control_description="Current experience (no changes)",
|
|
305
|
+
variant_description="See experiment hypothesis",
|
|
306
|
+
sample_size_per_arm=sample_size,
|
|
307
|
+
minimum_detectable_effect=minimum_detectable_effect,
|
|
308
|
+
statistical_power=0.8,
|
|
309
|
+
significance_level=0.05,
|
|
310
|
+
expected_duration_days=duration_days,
|
|
311
|
+
guardrail_metrics=["error_rate", "page_load_time", "sdk_init_time"],
|
|
312
|
+
evaluation_method="bayesian",
|
|
313
|
+
pre_registration_date="",
|
|
314
|
+
success_criteria=(
|
|
315
|
+
f"Reject null hypothesis at alpha=0.05 with "
|
|
316
|
+
f">={minimum_detectable_effect * 100:.1f}pp lift in {primary_metric}"
|
|
317
|
+
),
|
|
318
|
+
)
|
|
319
|
+
|
|
320
|
+
async def analyze_funnel(
|
|
321
|
+
self,
|
|
322
|
+
funnel_name: str,
|
|
323
|
+
stages: list[dict[str, Any]],
|
|
324
|
+
) -> FunnelAnalysis:
|
|
325
|
+
"""Analyze a conversion funnel and recommend interventions."""
|
|
326
|
+
# Calculate conversion rates and find biggest drop-off
|
|
327
|
+
for i, stage in enumerate(stages):
|
|
328
|
+
if i == 0:
|
|
329
|
+
stage["conversion_rate"] = 1.0
|
|
330
|
+
else:
|
|
331
|
+
prev_count = stages[i - 1]["count"]
|
|
332
|
+
stage["conversion_rate"] = stage["count"] / prev_count if prev_count > 0 else 0
|
|
333
|
+
stage["drop_off"] = 1 - stage["conversion_rate"]
|
|
334
|
+
|
|
335
|
+
biggest_drop = max(stages[1:], key=lambda s: s["drop_off"])
|
|
336
|
+
overall = stages[-1]["count"] / stages[0]["count"] if stages[0]["count"] > 0 else 0
|
|
337
|
+
|
|
338
|
+
return FunnelAnalysis(
|
|
339
|
+
funnel_name=funnel_name,
|
|
340
|
+
stages=stages,
|
|
341
|
+
overall_conversion=overall,
|
|
342
|
+
biggest_drop_off_stage=biggest_drop["name"],
|
|
343
|
+
recommended_interventions=[
|
|
344
|
+
f"Investigate drop-off at '{biggest_drop['name']}' stage "
|
|
345
|
+
f"({biggest_drop['drop_off'] * 100:.1f}% drop-off)",
|
|
346
|
+
"Run qualitative research with users who dropped off at this stage",
|
|
347
|
+
"Design an experiment to reduce friction at this stage",
|
|
348
|
+
],
|
|
349
|
+
)
|