make-mp-data 3.0.4 → 3.0.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +46 -0
- package/dungeons/array-of-object-lookup-schema.json +327 -0
- package/dungeons/array-of-object-lookup.js +28 -8
- package/dungeons/capstone/capstone-ic3.js +291 -0
- package/dungeons/capstone/capstone-ic4.js +598 -0
- package/dungeons/capstone/capstone-ic5.js +668 -0
- package/dungeons/capstone/generate-product-lookup.js +309 -0
- package/dungeons/ecommerce-schema.json +462 -0
- package/dungeons/{copilot.js → ecommerce.js} +77 -15
- package/dungeons/education-schema.json +2409 -0
- package/dungeons/education.js +206 -442
- package/dungeons/fintech-schema.json +14034 -0
- package/dungeons/fintech.js +110 -389
- package/dungeons/foobar-schema.json +403 -0
- package/dungeons/foobar.js +27 -4
- package/dungeons/food-delivery-schema.json +192 -0
- package/dungeons/food-delivery.js +602 -0
- package/dungeons/food-schema.json +1152 -0
- package/dungeons/food.js +150 -383
- package/dungeons/gaming-schema.json +1270 -0
- package/dungeons/gaming.js +143 -3
- package/dungeons/insurance-application-schema.json +204 -0
- package/dungeons/insurance-application.js +605 -0
- package/dungeons/media-schema.json +906 -0
- package/dungeons/media.js +221 -391
- package/dungeons/retention-cadence-schema.json +78 -0
- package/dungeons/retention-cadence.js +35 -1
- package/dungeons/rpg-schema.json +4526 -0
- package/dungeons/rpg.js +130 -388
- package/dungeons/sanity-schema.json +255 -0
- package/dungeons/sanity.js +21 -10
- package/dungeons/sass-schema.json +1291 -0
- package/dungeons/sass.js +210 -337
- package/dungeons/scd-schema.json +919 -0
- package/dungeons/scd.js +38 -10
- package/dungeons/simple-schema.json +608 -0
- package/dungeons/simple.js +48 -11
- package/dungeons/simplest-schema.json +1418 -0
- package/dungeons/simplest.js +392 -0
- package/dungeons/social-schema.json +1118 -0
- package/dungeons/social.js +124 -365
- package/dungeons/text-generation-schema.json +3096 -0
- package/dungeons/text-generation.js +71 -0
- package/index.js +6 -3
- package/lib/core/config-validator.js +18 -0
- package/lib/core/storage.js +5 -5
- package/lib/generators/events.js +4 -4
- package/lib/orchestrators/mixpanel-sender.js +12 -7
- package/lib/orchestrators/user-loop.js +14 -6
- package/lib/templates/soup-presets.js +188 -0
- package/lib/utils/utils.js +52 -6
- package/package.json +1 -1
- package/types.d.ts +20 -3
- package/dungeons/adspend.js +0 -117
- package/dungeons/anon.js +0 -128
- package/dungeons/benchmark-heavy.js +0 -240
- package/dungeons/benchmark-light.js +0 -126
- package/dungeons/big.js +0 -226
- package/dungeons/business.js +0 -391
- package/dungeons/complex.js +0 -428
- package/dungeons/experiments.js +0 -137
- package/dungeons/funnels.js +0 -309
- package/dungeons/mil.js +0 -323
- package/dungeons/mirror.js +0 -160
- package/dungeons/soup-test.js +0 -52
- package/dungeons/streaming.js +0 -372
- package/dungeons/strict-event-test.js +0 -30
- package/dungeons/student-teacher.js +0 -438
- package/dungeons/too-big-events.js +0 -203
- package/dungeons/user-agent.js +0 -209
package/dungeons/sass.js
CHANGED
|
@@ -10,9 +10,13 @@ const chance = u.initChance(SEED);
|
|
|
10
10
|
const num_users = 5_000;
|
|
11
11
|
const days = 100;
|
|
12
12
|
|
|
13
|
-
/** @typedef {import("
|
|
13
|
+
/** @typedef {import("../types.js").Dungeon} Config */
|
|
14
14
|
|
|
15
|
-
|
|
15
|
+
/*
|
|
16
|
+
* ═══════════════════════════════════════════════════════════════════════════════
|
|
17
|
+
* DATASET OVERVIEW
|
|
18
|
+
* ═══════════════════════════════════════════════════════════════════════════════
|
|
19
|
+
*
|
|
16
20
|
* CLOUDFORGE - B2B Cloud Infrastructure Monitoring & Deployment Platform
|
|
17
21
|
*
|
|
18
22
|
* CloudForge is a B2B SaaS platform that combines infrastructure monitoring (like Datadog)
|
|
@@ -20,6 +24,12 @@ const days = 100;
|
|
|
20
24
|
* of all sizes - from startups deploying their first microservice to enterprises managing
|
|
21
25
|
* thousands of services across multi-cloud environments.
|
|
22
26
|
*
|
|
27
|
+
* - 5,000 users over 100 days
|
|
28
|
+
* - 600K events across 18 event types (+ 1 hook-created event type)
|
|
29
|
+
* - 8 funnels (onboarding, monitoring, incident response, deployment, infra, team, docs, billing)
|
|
30
|
+
* - Group analytics (companies)
|
|
31
|
+
* - Desktop/browser only (B2B SaaS - no mobile devices)
|
|
32
|
+
*
|
|
23
33
|
* CORE PLATFORM:
|
|
24
34
|
* Teams create workspaces, deploy services across AWS/GCP/Azure, and monitor everything
|
|
25
35
|
* from a unified dashboard. The platform tracks uptime, latency, error rates, CPU/memory
|
|
@@ -27,42 +37,202 @@ const days = 100;
|
|
|
27
37
|
* PagerDuty/Slack integrations, and on-call engineers acknowledge and resolve incidents
|
|
28
38
|
* using automated runbooks.
|
|
29
39
|
*
|
|
30
|
-
*
|
|
31
|
-
*
|
|
32
|
-
*
|
|
33
|
-
*
|
|
34
|
-
|
|
40
|
+
* PRICING MODEL:
|
|
41
|
+
* Four tiers: Free, Team, Business, Enterprise. Enterprise customers get dedicated
|
|
42
|
+
* customer success managers and annual contracts. Pricing based on seat count and
|
|
43
|
+
* resource usage.
|
|
44
|
+
*/
|
|
45
|
+
|
|
46
|
+
/*
|
|
47
|
+
* ═══════════════════════════════════════════════════════════════════════════════
|
|
48
|
+
* ANALYTICS HOOKS
|
|
49
|
+
* ═══════════════════════════════════════════════════════════════════════════════
|
|
50
|
+
*
|
|
51
|
+
* 8 deliberately architected patterns hidden in the data, simulating real-world
|
|
52
|
+
* B2B SaaS behavior. Several hooks use event removal (splice), event replacement,
|
|
53
|
+
* and module-level closure state tracking via Map objects.
|
|
35
54
|
*
|
|
36
|
-
*
|
|
37
|
-
*
|
|
38
|
-
*
|
|
39
|
-
* classification, paging multiple teams. Teams with Slack + PagerDuty integrations
|
|
40
|
-
* respond and resolve incidents significantly faster than those without.
|
|
55
|
+
* ─────────────────────────────────────────────────────────────────────────────
|
|
56
|
+
* 1. END-OF-QUARTER SPIKE (event hook)
|
|
57
|
+
* ─────────────────────────────────────────────────────────────────────────────
|
|
41
58
|
*
|
|
42
|
-
*
|
|
43
|
-
*
|
|
44
|
-
* spike beyond budgets, teams react by scaling down infrastructure. End-of-quarter
|
|
45
|
-
* pushes drive plan upgrades and team expansion as companies rush to hit targets.
|
|
59
|
+
* Days 80-90: billing events shift toward plan upgrades 40% of the time, and team
|
|
60
|
+
* member invitations are duplicated 50% of the time. Tagged: quarter_end_push: true.
|
|
46
61
|
*
|
|
47
|
-
*
|
|
48
|
-
*
|
|
49
|
-
*
|
|
62
|
+
* Mixpanel Report — Plan Upgrades Over Time:
|
|
63
|
+
* • Insights line chart
|
|
64
|
+
* • Event: "billing event", filter "event_type" = "plan_upgraded"
|
|
65
|
+
* • Daily trend
|
|
66
|
+
* • Expected: Spike in plan upgrades during days 80-90 (4x normal volume)
|
|
50
67
|
*
|
|
51
|
-
*
|
|
52
|
-
*
|
|
53
|
-
*
|
|
54
|
-
*
|
|
55
|
-
*
|
|
56
|
-
*
|
|
57
|
-
*
|
|
58
|
-
*
|
|
59
|
-
*
|
|
60
|
-
*
|
|
61
|
-
*
|
|
62
|
-
*
|
|
63
|
-
*
|
|
64
|
-
*
|
|
65
|
-
*
|
|
68
|
+
* Mixpanel Report — Team Expansion Surge:
|
|
69
|
+
* • Insights line chart
|
|
70
|
+
* • Event: "team member invited", filter "quarter_end_push" = true
|
|
71
|
+
* • Daily trend
|
|
72
|
+
* • Expected: Clear volume spike in last 10 days with duplicate invites
|
|
73
|
+
*
|
|
74
|
+
* ─────────────────────────────────────────────────────────────────────────────
|
|
75
|
+
* 2. CHURNED ACCOUNT SILENCING (everything hook)
|
|
76
|
+
* ─────────────────────────────────────────────────────────────────────────────
|
|
77
|
+
*
|
|
78
|
+
* ~10% of users (hash of distinct_id, idHash % 5 === 0) go completely silent
|
|
79
|
+
* after day 30. ALL events after month 1 are removed via splice(). User profiles
|
|
80
|
+
* are tagged churned_account: true for discoverability.
|
|
81
|
+
*
|
|
82
|
+
* Mixpanel Report — Churned Account Retention:
|
|
83
|
+
* • Retention report
|
|
84
|
+
* • Event A/B: Any event
|
|
85
|
+
* • Breakdown: User profile "churned_account"
|
|
86
|
+
* • Expected: churned_account=true shows 0% retention after day 30
|
|
87
|
+
*
|
|
88
|
+
* Mixpanel Report — Churned Account Activity:
|
|
89
|
+
* • Insights line chart
|
|
90
|
+
* • Event: Any event, measure total per user
|
|
91
|
+
* • Breakdown: User profile "churned_account"
|
|
92
|
+
* • Weekly trend
|
|
93
|
+
* • Expected: churned_account=true flatlines after week 4
|
|
94
|
+
*
|
|
95
|
+
* ─────────────────────────────────────────────────────────────────────────────
|
|
96
|
+
* 3. ALERT ESCALATION REPLACEMENT (event hook)
|
|
97
|
+
* ─────────────────────────────────────────────────────────────────────────────
|
|
98
|
+
*
|
|
99
|
+
* 30% of critical/emergency "alert triggered" events are REPLACED with a new
|
|
100
|
+
* event type "incident created" (not in the events array — hook-only). Includes
|
|
101
|
+
* escalation_level (P1/P2), teams_paged, incident_id.
|
|
102
|
+
*
|
|
103
|
+
* Mixpanel Report — Incident Created Discovery:
|
|
104
|
+
* • Insights report
|
|
105
|
+
* • Event: "incident created"
|
|
106
|
+
* • Breakdown: "escalation_level"
|
|
107
|
+
* • Expected: P1 and P2 incidents, ~30% of critical/emergency alert volume
|
|
108
|
+
*
|
|
109
|
+
* Mixpanel Report — Alert vs Incident Ratio:
|
|
110
|
+
* • Insights report
|
|
111
|
+
* • Events: "alert triggered" AND "incident created"
|
|
112
|
+
* • Expected: incident created count ~ 30% of critical+emergency alerts
|
|
113
|
+
*
|
|
114
|
+
* ─────────────────────────────────────────────────────────────────────────────
|
|
115
|
+
* 4. INTEGRATION USERS SUCCEED (everything hook)
|
|
116
|
+
* ─────────────────────────────────────────────────────────────────────────────
|
|
117
|
+
*
|
|
118
|
+
* Users with BOTH Slack AND PagerDuty integrations resolve alerts faster:
|
|
119
|
+
* response_time_mins reduced 60%, resolution_time_mins reduced 50%.
|
|
120
|
+
* Tagged: integrated_team: true.
|
|
121
|
+
*
|
|
122
|
+
* Mixpanel Report — Integration Impact on Response Time:
|
|
123
|
+
* • Insights report
|
|
124
|
+
* • Event: "alert acknowledged", measure avg "response_time_mins"
|
|
125
|
+
* • Breakdown: "integrated_team"
|
|
126
|
+
* • Expected: integrated_team=true ~ 60% lower response time
|
|
127
|
+
*
|
|
128
|
+
* Mixpanel Report — Integration Impact on Resolution:
|
|
129
|
+
* • Insights report
|
|
130
|
+
* • Event: "alert resolved", measure avg "resolution_time_mins"
|
|
131
|
+
* • Breakdown: "integrated_team"
|
|
132
|
+
* • Expected: integrated_team=true ~ 50% faster resolution
|
|
133
|
+
*
|
|
134
|
+
* ─────────────────────────────────────────────────────────────────────────────
|
|
135
|
+
* 5. DOCS READERS DEPLOY MORE (everything hook)
|
|
136
|
+
* ─────────────────────────────────────────────────────────────────────────────
|
|
137
|
+
*
|
|
138
|
+
* Users with 3+ "best_practices" documentation views get 2-3 extra production
|
|
139
|
+
* deploys spliced into their event stream. Tagged: docs_informed: true.
|
|
140
|
+
*
|
|
141
|
+
* Mixpanel Report — Docs-Informed Deployments:
|
|
142
|
+
* • Insights report
|
|
143
|
+
* • Event: "service deployed", filter "environment" = "production"
|
|
144
|
+
* • Breakdown: "docs_informed"
|
|
145
|
+
* • Expected: docs_informed=true shows extra production deployments
|
|
146
|
+
*
|
|
147
|
+
* Mixpanel Report — Docs Readers vs Non-Readers:
|
|
148
|
+
* • Insights report
|
|
149
|
+
* • Event: "service deployed", measure total per user
|
|
150
|
+
* • Segment: Users with 3+ "documentation viewed" (doc_section = "best_practices")
|
|
151
|
+
* • Expected: ~1.8x more production deploys per user for docs readers
|
|
152
|
+
*
|
|
153
|
+
* ─────────────────────────────────────────────────────────────────────────────
|
|
154
|
+
* 6. COST OVERRUN PATTERN (event hook — closure state)
|
|
155
|
+
* ─────────────────────────────────────────────────────────────────────────────
|
|
156
|
+
*
|
|
157
|
+
* When cost_change_percent > 25 on a "cost report generated" event, the user
|
|
158
|
+
* is stored in a module-level Map. Their next "infrastructure scaled" event
|
|
159
|
+
* is forced to scale_direction: "down". Tagged: budget_exceeded, cost_reaction.
|
|
160
|
+
*
|
|
161
|
+
* Mixpanel Report — Cost Overrun to Scale Down:
|
|
162
|
+
* • Insights report
|
|
163
|
+
* • Event: "infrastructure scaled"
|
|
164
|
+
* • Breakdown: "cost_reaction"
|
|
165
|
+
* • Expected: cost_reaction=true events are 100% scale_direction="down"
|
|
166
|
+
*
|
|
167
|
+
* ─────────────────────────────────────────────────────────────────────────────
|
|
168
|
+
* 7. FAILED DEPLOYMENT RECOVERY (event hook — closure state)
|
|
169
|
+
* ─────────────────────────────────────────────────────────────────────────────
|
|
170
|
+
*
|
|
171
|
+
* After a failed pipeline run, the user's next successful deploy has
|
|
172
|
+
* duration_sec * 1.5 (recovery deploys are slower). Tagged: recovery_deployment.
|
|
173
|
+
* Uses module-level Map for cross-call state.
|
|
174
|
+
*
|
|
175
|
+
* Mixpanel Report — Recovery Deploy Duration:
|
|
176
|
+
* • Insights report
|
|
177
|
+
* • Event: "deployment pipeline run", measure avg "duration_sec"
|
|
178
|
+
* • Breakdown: "recovery_deployment"
|
|
179
|
+
* • Expected: recovery_deployment=true ~ 1.5x longer duration
|
|
180
|
+
*
|
|
181
|
+
* ─────────────────────────────────────────────────────────────────────────────
|
|
182
|
+
* 8. ENTERPRISE VS STARTUP (user hook)
|
|
183
|
+
* ─────────────────────────────────────────────────────────────────────────────
|
|
184
|
+
*
|
|
185
|
+
* Company size determines seat_count, annual_contract_value, and
|
|
186
|
+
* customer_success_manager (enterprise only). All users get customer_health_score.
|
|
187
|
+
*
|
|
188
|
+
* Mixpanel Report — ACV by Company Size:
|
|
189
|
+
* • Insights report
|
|
190
|
+
* • Event: Any, measure unique users
|
|
191
|
+
* • Breakdown: User profile "company_size"
|
|
192
|
+
* • Expected: startup ($0-3.6K), smb ($3.6K-12K), mid_market ($12K-50K),
|
|
193
|
+
* enterprise ($50K-500K)
|
|
194
|
+
*
|
|
195
|
+
* ─────────────────────────────────────────────────────────────────────────────
|
|
196
|
+
* EXPECTED METRICS SUMMARY
|
|
197
|
+
* ─────────────────────────────────────────────────────────────────────────────
|
|
198
|
+
*
|
|
199
|
+
* Hook | Metric | Baseline | Hook Effect | Ratio
|
|
200
|
+
* -------------------------|--------------------------|-----------|----------------|------
|
|
201
|
+
* End-of-Quarter Spike | Plan upgrades/day | ~2/day | ~8/day | 4x
|
|
202
|
+
* Churned Accounts | Users active month 2 | 100% | 90% | 0.9x
|
|
203
|
+
* Alert Escalation | Incidents from alerts | 0% | ~30% of crit | new
|
|
204
|
+
* Integration Users | MTTR (minutes) | ~300 | ~150 | 0.5x
|
|
205
|
+
* Docs Readers | Prod deploys/user | ~3 | ~5-6 | 1.8x
|
|
206
|
+
* Cost Overrun | Scale-down after overrun | 50% | 100% | 2x
|
|
207
|
+
* Failed Deploy Recovery | Deploy duration (sec) | ~500 | ~750 | 1.5x
|
|
208
|
+
* Enterprise vs Startup | ACV range | $0-3.6K | $50K-500K | 100x+
|
|
209
|
+
*
|
|
210
|
+
* ─────────────────────────────────────────────────────────────────────────────
|
|
211
|
+
* ADVANCED ANALYSIS IDEAS
|
|
212
|
+
* ─────────────────────────────────────────────────────────────────────────────
|
|
213
|
+
*
|
|
214
|
+
* CROSS-HOOK PATTERNS:
|
|
215
|
+
* - Churned + Enterprise: Do churned accounts skew toward startups or are
|
|
216
|
+
* enterprise accounts also silenced?
|
|
217
|
+
* - Integration + Cost: Do teams with full integrations manage costs better?
|
|
218
|
+
* - Docs + Deploys + Failures: Do docs readers have fewer failed deployments?
|
|
219
|
+
* - Quarter Spike + Churn: Are quarter-end upgrades correlated with later churn?
|
|
220
|
+
* - Enterprise Recovery: Do enterprise customers recover from failed deploys
|
|
221
|
+
* differently than startups?
|
|
222
|
+
*
|
|
223
|
+
* COHORT ANALYSIS:
|
|
224
|
+
* - By company_size: Compare all metrics across startup/smb/mid_market/enterprise
|
|
225
|
+
* - By plan_tier: Free vs. Team vs. Business vs. Enterprise engagement
|
|
226
|
+
* - By cloud_provider: AWS vs. GCP vs. Azure deployment and alert patterns
|
|
227
|
+
* - By primary_role: Engineer vs. SRE vs. DevOps vs. Manager behaviors
|
|
228
|
+
*
|
|
229
|
+
* KEY METRICS:
|
|
230
|
+
* - MTTR: alert triggered → alert resolved duration
|
|
231
|
+
* - Deployment Frequency: service deployed per user per week
|
|
232
|
+
* - Deployment Success Rate: pipeline success vs. failure ratio
|
|
233
|
+
* - Cost Efficiency: total_cost trend over time per company
|
|
234
|
+
* - Feature Adoption: integration configured events by type
|
|
235
|
+
* - Documentation Engagement: documentation viewed by section
|
|
66
236
|
*/
|
|
67
237
|
|
|
68
238
|
// Generate consistent IDs for lookup tables and event properties
|
|
@@ -486,14 +656,14 @@ const config = {
|
|
|
486
656
|
|
|
487
657
|
// ─────────────────────────────────────────────────────────────
|
|
488
658
|
// Hook #2: CHURNED ACCOUNT SILENCING (everything)
|
|
489
|
-
// ~
|
|
659
|
+
// ~20% targeted (hash % 5), yielding ~10% visible after accounting for invisible churned users
|
|
490
660
|
// ─────────────────────────────────────────────────────────────
|
|
491
661
|
if (type === "everything") {
|
|
492
662
|
const userEvents = record;
|
|
493
663
|
if (userEvents && userEvents.length > 0) {
|
|
494
664
|
const firstEvent = userEvents[0];
|
|
495
665
|
const idHash = String(firstEvent.user_id || firstEvent.device_id).split("").reduce((acc, char) => acc + char.charCodeAt(0), 0);
|
|
496
|
-
const isChurnedAccount = (idHash %
|
|
666
|
+
const isChurnedAccount = (idHash % 5) === 0;
|
|
497
667
|
|
|
498
668
|
if (isChurnedAccount) {
|
|
499
669
|
for (let i = userEvents.length - 1; i >= 0; i--) {
|
|
@@ -591,6 +761,10 @@ const config = {
|
|
|
591
761
|
// Company size determines seat count, ACV, and health score
|
|
592
762
|
// ─────────────────────────────────────────────────────────────
|
|
593
763
|
if (type === "user") {
|
|
764
|
+
// Hook #2 support: tag churned accounts on user profile for discoverability
|
|
765
|
+
const idHash = String(record.distinct_id || "").split("").reduce((acc, char) => acc + char.charCodeAt(0), 0);
|
|
766
|
+
record.churned_account = (idHash % 5) === 0;
|
|
767
|
+
|
|
594
768
|
const companySize = record.company_size;
|
|
595
769
|
|
|
596
770
|
if (companySize === "enterprise") {
|
|
@@ -619,304 +793,3 @@ const config = {
|
|
|
619
793
|
};
|
|
620
794
|
|
|
621
795
|
export default config;
|
|
622
|
-
|
|
623
|
-
/**
|
|
624
|
-
* =================================================================================
|
|
625
|
-
* NEEDLE IN A HAYSTACK - CLOUDFORGE B2B SAAS ANALYTICS
|
|
626
|
-
* =================================================================================
|
|
627
|
-
*
|
|
628
|
-
* A B2B cloud infrastructure monitoring and deployment platform dungeon with 8
|
|
629
|
-
* deliberately architected analytics insights hidden in the data. This dungeon
|
|
630
|
-
* simulates CloudForge - a Datadog + Terraform hybrid serving engineering teams
|
|
631
|
-
* across companies of all sizes.
|
|
632
|
-
*
|
|
633
|
-
* =================================================================================
|
|
634
|
-
* DATASET OVERVIEW
|
|
635
|
-
* =================================================================================
|
|
636
|
-
*
|
|
637
|
-
* - 5,000 users over 100 days
|
|
638
|
-
* - 360K events across 18 event types (+ 1 hook-created event type)
|
|
639
|
-
* - 3 funnels (onboarding, incident response, deployment pipeline)
|
|
640
|
-
* - Group analytics (companies)
|
|
641
|
-
* - Lookup tables (services, alerts)
|
|
642
|
-
* - Desktop/browser only (B2B SaaS - no mobile devices)
|
|
643
|
-
*
|
|
644
|
-
* =================================================================================
|
|
645
|
-
* THE 8 ARCHITECTED HOOKS
|
|
646
|
-
* =================================================================================
|
|
647
|
-
*
|
|
648
|
-
* Each hook creates a specific, discoverable analytics insight that simulates
|
|
649
|
-
* real-world B2B SaaS behavior patterns. Several hooks use techniques like
|
|
650
|
-
* event removal (splice), event replacement, and module-level closure
|
|
651
|
-
* state tracking via Map objects.
|
|
652
|
-
*
|
|
653
|
-
* ---------------------------------------------------------------------------------
|
|
654
|
-
* 1. END-OF-QUARTER SPIKE (event hook)
|
|
655
|
-
* ---------------------------------------------------------------------------------
|
|
656
|
-
*
|
|
657
|
-
* PATTERN: During days 80-90 of the dataset (end of quarter), billing events
|
|
658
|
-
* shift toward plan upgrades 40% of the time, and team member invitations are
|
|
659
|
-
* duplicated 50% of the time. All affected events are tagged with
|
|
660
|
-
* quarter_end_push: true.
|
|
661
|
-
*
|
|
662
|
-
* HOW TO FIND IT:
|
|
663
|
-
* - Chart "billing event" by event_type, broken down by week
|
|
664
|
-
* - Chart "team member invited" count by day
|
|
665
|
-
* - Filter: quarter_end_push = true
|
|
666
|
-
* - Compare: last 10 days vs. rest of dataset
|
|
667
|
-
*
|
|
668
|
-
* EXPECTED INSIGHT: Clear spike in plan_upgraded billing events and team
|
|
669
|
-
* invitations in the final 10 days. Duplicate invitations create an
|
|
670
|
-
* artificially inflated invite count.
|
|
671
|
-
*
|
|
672
|
-
* REAL-WORLD ANALOGUE: End-of-quarter sales pushes, budget utilization
|
|
673
|
-
* deadlines, and team expansion before fiscal year-end.
|
|
674
|
-
*
|
|
675
|
-
* ---------------------------------------------------------------------------------
|
|
676
|
-
* 2. CHURNED ACCOUNT SILENCING (everything hook)
|
|
677
|
-
* ---------------------------------------------------------------------------------
|
|
678
|
-
*
|
|
679
|
-
* PATTERN: ~10% of users (determined by hash of distinct_id) go completely
|
|
680
|
-
* silent after day 30. ALL of their events after month 1 are removed via
|
|
681
|
-
* splice() - they simply vanish from the dataset.
|
|
682
|
-
*
|
|
683
|
-
* HOW TO FIND IT:
|
|
684
|
-
* - Chart: unique users per week
|
|
685
|
-
* - Retention analysis: D30 retention by cohort
|
|
686
|
-
* - Compare: users active in month 1 vs. month 2
|
|
687
|
-
* - Look for users with events ONLY in the first 30 days
|
|
688
|
-
*
|
|
689
|
-
* EXPECTED INSIGHT: A distinct cohort of ~300 users with activity exclusively
|
|
690
|
-
* in the first month. No gradual decline - a hard cutoff at day 30.
|
|
691
|
-
*
|
|
692
|
-
* REAL-WORLD ANALOGUE: Trial users who never convert, accounts that churn
|
|
693
|
-
* after initial evaluation period, or companies that lose budget approval.
|
|
694
|
-
*
|
|
695
|
-
* ---------------------------------------------------------------------------------
|
|
696
|
-
* 3. ALERT ESCALATION REPLACEMENT (event hook)
|
|
697
|
-
* ---------------------------------------------------------------------------------
|
|
698
|
-
*
|
|
699
|
-
* PATTERN: When an "alert triggered" event has severity "critical" or
|
|
700
|
-
* "emergency", there is a 30% chance the event is REPLACED entirely with a
|
|
701
|
-
* new event type: "incident created". This event type does NOT exist in the
|
|
702
|
-
* events array - it only appears because of hooks.
|
|
703
|
-
*
|
|
704
|
-
* HOW TO FIND IT:
|
|
705
|
-
* - Look for "incident created" events in the dataset (surprise event type)
|
|
706
|
-
* - Correlate: incident created events have escalation_level (P1, P2),
|
|
707
|
-
* teams_paged, incident_id, and auto_escalated: true
|
|
708
|
-
* - Compare: ratio of critical/emergency alerts to incident creations
|
|
709
|
-
*
|
|
710
|
-
* EXPECTED INSIGHT: Approximately 30% of critical/emergency alerts escalate
|
|
711
|
-
* into formal incidents. The "incident created" event is a hidden event type
|
|
712
|
-
* that analysts must discover through exploration.
|
|
713
|
-
*
|
|
714
|
-
* REAL-WORLD ANALOGUE: Automated escalation systems that create incident
|
|
715
|
-
* records from high-severity alerts (PagerDuty, OpsGenie workflows).
|
|
716
|
-
*
|
|
717
|
-
* ---------------------------------------------------------------------------------
|
|
718
|
-
* 4. INTEGRATION USERS SUCCEED (everything hook)
|
|
719
|
-
* ---------------------------------------------------------------------------------
|
|
720
|
-
*
|
|
721
|
-
* PATTERN: Users who have configured BOTH Slack AND PagerDuty integrations
|
|
722
|
-
* respond to and resolve alerts significantly faster:
|
|
723
|
-
* - alert_acknowledged response_time_mins reduced by 60%
|
|
724
|
-
* - alert_resolved resolution_time_mins reduced by 50%
|
|
725
|
-
* - Affected events tagged with integrated_team: true
|
|
726
|
-
*
|
|
727
|
-
* HOW TO FIND IT:
|
|
728
|
-
* - Segment users by: has "integration configured" for both "slack" AND "pagerduty"
|
|
729
|
-
* - Compare: average response_time_mins on alert acknowledged
|
|
730
|
-
* - Compare: average resolution_time_mins on alert resolved
|
|
731
|
-
* - Filter: integrated_team = true
|
|
732
|
-
*
|
|
733
|
-
* EXPECTED INSIGHT: Users with both integrations have median response time
|
|
734
|
-
* ~60% lower than baseline. This is a two-feature combination effect.
|
|
735
|
-
*
|
|
736
|
-
* REAL-WORLD ANALOGUE: Tool integration stacks that compound productivity
|
|
737
|
-
* (e.g., CI/CD + monitoring + alerting creating faster MTTR).
|
|
738
|
-
*
|
|
739
|
-
* ---------------------------------------------------------------------------------
|
|
740
|
-
* 5. DOCS READERS DEPLOY MORE (everything hook)
|
|
741
|
-
* ---------------------------------------------------------------------------------
|
|
742
|
-
*
|
|
743
|
-
* PATTERN: Users who view "best_practices" documentation 3 or more times get
|
|
744
|
-
* 2-3 extra "service deployed" events with environment: "production" spliced
|
|
745
|
-
* into their event stream. Tagged with docs_informed: true.
|
|
746
|
-
*
|
|
747
|
-
* HOW TO FIND IT:
|
|
748
|
-
* - Segment users by: count of "documentation viewed" where doc_section = "best_practices" >= 3
|
|
749
|
-
* - Compare: count of "service deployed" where environment = "production"
|
|
750
|
-
* - Filter: docs_informed = true
|
|
751
|
-
*
|
|
752
|
-
* EXPECTED INSIGHT: Users who read best practices documentation 3+ times
|
|
753
|
-
* deploy more services to production, suggesting docs drive confidence
|
|
754
|
-
* and adoption.
|
|
755
|
-
*
|
|
756
|
-
* REAL-WORLD ANALOGUE: Documentation engagement as a leading indicator of
|
|
757
|
-
* product adoption (developer tools where docs reading predicts usage).
|
|
758
|
-
*
|
|
759
|
-
* ---------------------------------------------------------------------------------
|
|
760
|
-
* 6. COST OVERRUN PATTERN (event hook - closure state)
|
|
761
|
-
* ---------------------------------------------------------------------------------
|
|
762
|
-
*
|
|
763
|
-
* PATTERN: When a "cost report generated" event has cost_change_percent > 25,
|
|
764
|
-
* the user is stored in a module-level Map. When that same user later triggers
|
|
765
|
-
* an "infrastructure scaled" event, the scale_direction is forced to "down"
|
|
766
|
-
* (cost-cutting reaction). Uses closure-based state tracking across separate
|
|
767
|
-
* hook calls.
|
|
768
|
-
*
|
|
769
|
-
* HOW TO FIND IT:
|
|
770
|
-
* - Filter: cost_report_generated where budget_exceeded = true
|
|
771
|
-
* - Correlate: subsequent infrastructure_scaled where cost_reaction = true
|
|
772
|
-
* - Compare: scale_direction distribution for cost_reaction users vs. others
|
|
773
|
-
*
|
|
774
|
-
* EXPECTED INSIGHT: Users who experience cost overruns (>25% increase)
|
|
775
|
-
* consistently scale down their infrastructure afterward. The Map-based
|
|
776
|
-
* tracking creates a causal chain across separate events.
|
|
777
|
-
*
|
|
778
|
-
* REAL-WORLD ANALOGUE: Cloud cost management behavior - teams that exceed
|
|
779
|
-
* budgets immediately react by reducing resource allocation.
|
|
780
|
-
*
|
|
781
|
-
* ---------------------------------------------------------------------------------
|
|
782
|
-
* 7. FAILED DEPLOYMENT RECOVERY (event hook - closure state)
|
|
783
|
-
* ---------------------------------------------------------------------------------
|
|
784
|
-
*
|
|
785
|
-
* PATTERN: When a deployment pipeline fails, the user is stored in a
|
|
786
|
-
* module-level Map. Their next successful deployment has duration_sec
|
|
787
|
-
* multiplied by 1.5x (recovery deploys are slower/more careful). Tagged
|
|
788
|
-
* with recovery_deployment: true.
|
|
789
|
-
*
|
|
790
|
-
* HOW TO FIND IT:
|
|
791
|
-
* - Filter: deployment_pipeline_run where recovery_deployment = true
|
|
792
|
-
* - Compare: average duration_sec for recovery vs. normal deployments
|
|
793
|
-
* - Sequence: look for failed -> success pairs per user
|
|
794
|
-
*
|
|
795
|
-
* EXPECTED INSIGHT: Recovery deployments after failures take 50% longer
|
|
796
|
-
* than normal deployments, reflecting more cautious deployment practices.
|
|
797
|
-
*
|
|
798
|
-
* REAL-WORLD ANALOGUE: Post-incident deployment behavior - engineers take
|
|
799
|
-
* extra care after a failed deploy, adding more checks and review steps.
|
|
800
|
-
*
|
|
801
|
-
* ---------------------------------------------------------------------------------
|
|
802
|
-
* 8. ENTERPRISE VS STARTUP (user hook)
|
|
803
|
-
* ---------------------------------------------------------------------------------
|
|
804
|
-
*
|
|
805
|
-
* PATTERN: Based on company_size, users get additional profile properties:
|
|
806
|
-
* - enterprise: seat_count (50-500), annual_contract_value (50K-500K), customer_success_manager: true
|
|
807
|
-
* - mid_market: seat_count (10-50), annual_contract_value (12K-50K)
|
|
808
|
-
* - smb: seat_count (3-10), annual_contract_value (3.6K-12K)
|
|
809
|
-
* - startup: seat_count (1-5), annual_contract_value (0-3.6K)
|
|
810
|
-
* - All users get customer_health_score (1-100)
|
|
811
|
-
*
|
|
812
|
-
* HOW TO FIND IT:
|
|
813
|
-
* - Segment users by: company_size
|
|
814
|
-
* - Compare: annual_contract_value distribution
|
|
815
|
-
* - Compare: seat_count ranges
|
|
816
|
-
* - Filter: customer_success_manager = true (enterprise only)
|
|
817
|
-
*
|
|
818
|
-
* EXPECTED INSIGHT: Clear segmentation of user base by company size with
|
|
819
|
-
* corresponding ACV and seat count distributions. Enterprise customers
|
|
820
|
-
* uniquely have dedicated CSMs.
|
|
821
|
-
*
|
|
822
|
-
* REAL-WORLD ANALOGUE: B2B SaaS customer segmentation where company size
|
|
823
|
-
* directly determines contract value, support tier, and expansion potential.
|
|
824
|
-
*
|
|
825
|
-
* =================================================================================
|
|
826
|
-
* ADVANCED ANALYSIS IDEAS
|
|
827
|
-
* =================================================================================
|
|
828
|
-
*
|
|
829
|
-
* CROSS-HOOK PATTERNS:
|
|
830
|
-
*
|
|
831
|
-
* 1. Churned + Enterprise: Do churned accounts (Hook #2) skew toward startups
|
|
832
|
-
* or are enterprise accounts also silenced? Cross-reference company_size
|
|
833
|
-
* with the ~10% churn cohort.
|
|
834
|
-
*
|
|
835
|
-
* 2. Integration + Cost: Do teams with full integrations (Hook #4) also manage
|
|
836
|
-
* costs better (Hook #6)? Integrated teams may detect cost overruns faster.
|
|
837
|
-
*
|
|
838
|
-
* 3. Docs + Deploys + Failures: Do docs readers (Hook #5) have fewer failed
|
|
839
|
-
* deployments (Hook #7)? Best practices readers may deploy more carefully.
|
|
840
|
-
*
|
|
841
|
-
* 4. Quarter Spike + Churn: Are quarter-end upgrades (Hook #1) correlated with
|
|
842
|
-
* accounts that later churn? False expansion before abandonment.
|
|
843
|
-
*
|
|
844
|
-
* 5. Enterprise Recovery: Do enterprise customers (Hook #8) recover from failed
|
|
845
|
-
* deployments (Hook #7) differently than startups?
|
|
846
|
-
*
|
|
847
|
-
* COHORT ANALYSIS:
|
|
848
|
-
*
|
|
849
|
-
* - Cohort by company_size: Compare all metrics across startup/smb/mid_market/enterprise
|
|
850
|
-
* - Cohort by plan_tier: Free vs. Team vs. Business vs. Enterprise engagement
|
|
851
|
-
* - Cohort by cloud_provider: AWS vs. GCP vs. Azure deployment and alert patterns
|
|
852
|
-
* - Cohort by primary_role: Engineer vs. SRE vs. DevOps vs. Manager behaviors
|
|
853
|
-
*
|
|
854
|
-
* FUNNEL ANALYSIS:
|
|
855
|
-
*
|
|
856
|
-
* - Onboarding: workspace created -> service deployed -> dashboard viewed
|
|
857
|
-
* Compare by company_size and plan_tier
|
|
858
|
-
* - Incident Response: alert triggered -> acknowledged -> resolved
|
|
859
|
-
* Compare integrated_team vs. non-integrated response times
|
|
860
|
-
* - Deployment: pipeline run -> service deployed -> dashboard viewed
|
|
861
|
-
* Compare recovery_deployment vs. normal deployment success
|
|
862
|
-
*
|
|
863
|
-
* KEY METRICS TO TRACK:
|
|
864
|
-
*
|
|
865
|
-
* - MTTR (Mean Time To Resolve): alert triggered -> alert resolved duration
|
|
866
|
-
* - Deployment Frequency: service deployed events per user per week
|
|
867
|
-
* - Deployment Success Rate: pipeline success vs. failure ratio
|
|
868
|
-
* - Cost Efficiency: total_cost trend over time per company
|
|
869
|
-
* - Feature Adoption: integration configured events by type
|
|
870
|
-
* - Documentation Engagement: documentation viewed by section
|
|
871
|
-
*
|
|
872
|
-
* =================================================================================
|
|
873
|
-
* EXPECTED METRICS SUMMARY
|
|
874
|
-
* =================================================================================
|
|
875
|
-
*
|
|
876
|
-
* Hook | Metric | Baseline | Hook Effect | Ratio
|
|
877
|
-
* -------------------------|--------------------------|-----------|----------------|------
|
|
878
|
-
* End-of-Quarter Spike | Plan upgrades/day | ~2/day | ~8/day | 4x
|
|
879
|
-
* Churned Accounts | Users active month 2 | 100% | 90% | 0.9x
|
|
880
|
-
* Alert Escalation | Incidents from alerts | 0% | ~30% of crit | new
|
|
881
|
-
* Integration Users | MTTR (minutes) | ~300 | ~150 | 0.5x
|
|
882
|
-
* Docs Readers | Prod deploys/user | ~3 | ~5-6 | 1.8x
|
|
883
|
-
* Cost Overrun | Scale-down after overrun | 50% | 100% | 2x
|
|
884
|
-
* Failed Deploy Recovery | Deploy duration (sec) | ~500 | ~750 | 1.5x
|
|
885
|
-
* Enterprise vs Startup | ACV range | $0-3.6K | $50K-500K | 100x+
|
|
886
|
-
*
|
|
887
|
-
* =================================================================================
|
|
888
|
-
* HOW TO RUN THIS DUNGEON
|
|
889
|
-
* =================================================================================
|
|
890
|
-
*
|
|
891
|
-
* From the dm4 root directory:
|
|
892
|
-
*
|
|
893
|
-
* npm start
|
|
894
|
-
*
|
|
895
|
-
* Or programmatically:
|
|
896
|
-
*
|
|
897
|
-
* import generate from './index.js';
|
|
898
|
-
* import config from './dungeons/harness-sass.js';
|
|
899
|
-
* const results = await generate(config);
|
|
900
|
-
*
|
|
901
|
-
* =================================================================================
|
|
902
|
-
* TECHNICAL NOTES
|
|
903
|
-
* =================================================================================
|
|
904
|
-
*
|
|
905
|
-
* - Module-level Maps (costOverrunUsers, failedDeployUsers) provide closure-based
|
|
906
|
-
* state tracking across individual event hook calls. This is the key differentiator
|
|
907
|
-
* for this dungeon - hooks 6 and 7 maintain state between separate invocations.
|
|
908
|
-
*
|
|
909
|
-
* - Hook #3 creates "incident created" events that do NOT exist in the events array.
|
|
910
|
-
* This event type only appears because of hook-based event replacement.
|
|
911
|
-
*
|
|
912
|
-
* - Hook #2 uses splice() in the "everything" handler to remove events after day 30
|
|
913
|
-
* for ~10% of users. This creates accounts with a hard activity cutoff - complete silence.
|
|
914
|
-
*
|
|
915
|
-
* - The "everything" hooks (2, 4, and 5) operate on the full user event array, enabling
|
|
916
|
-
* two-pass analysis: first identify patterns, then modify events accordingly.
|
|
917
|
-
*
|
|
918
|
-
* - Desktop/browser only: hasAndroidDevices and hasIOSDevices are both false,
|
|
919
|
-
* reflecting the B2B SaaS reality that CloudForge is used from workstations.
|
|
920
|
-
*
|
|
921
|
-
* =================================================================================
|
|
922
|
-
*/
|