@elsahafy/ux-mcp-server 2.0.0 → 4.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +159 -18
- package/dist/index.js +2130 -8
- package/knowledge/ai-ml-patterns.json +192 -0
- package/knowledge/analytics-metrics.json +521 -0
- package/knowledge/angular-patterns.json +347 -0
- package/knowledge/ar-vr-interfaces.json +139 -0
- package/knowledge/color-theory.json +499 -0
- package/knowledge/data-viz.json +527 -0
- package/knowledge/design-system-advanced.json +533 -0
- package/knowledge/ecommerce-patterns.json +616 -0
- package/knowledge/ethical-design.json +484 -0
- package/knowledge/finance-ux.json +208 -0
- package/knowledge/forms.json +641 -0
- package/knowledge/haptic-feedback.json +102 -0
- package/knowledge/healthcare-ux.json +209 -0
- package/knowledge/information-architecture.json +494 -0
- package/knowledge/microcopy.json +743 -0
- package/knowledge/mobile-patterns.json +537 -0
- package/knowledge/neurodiversity.json +228 -0
- package/knowledge/pwa-patterns.json +429 -0
- package/knowledge/saas-patterns.json +613 -0
- package/knowledge/testing-validation.json +561 -0
- package/knowledge/typography.json +509 -0
- package/knowledge/voice-ui.json +359 -0
- package/knowledge/vue-patterns.json +279 -0
- package/knowledge/web-components.json +148 -0
- package/package.json +1 -1
|
@@ -0,0 +1,561 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "UX Testing & Validation",
|
|
3
|
+
"description": "Comprehensive guide to user experience testing methodologies, validation techniques, and research methods",
|
|
4
|
+
"testing_types": {
|
|
5
|
+
"usability_testing": {
|
|
6
|
+
"description": "Observing users attempting tasks to identify usability issues",
|
|
7
|
+
"when": "Throughout design process (early = wireframes, later = high-fidelity)",
|
|
8
|
+
"moderated": {
|
|
9
|
+
"description": "Researcher facilitates session in real-time",
|
|
10
|
+
"pros": ["Can probe deeper", "Clarify confusion", "Observe body language", "Ask follow-up questions"],
|
|
11
|
+
"cons": ["Time-intensive", "Expensive", "Moderator bias", "Limited sample size"],
|
|
12
|
+
"process": {
|
|
13
|
+
"1_prepare": {
|
|
14
|
+
"tasks": "Create 3-5 realistic tasks users would perform",
|
|
15
|
+
"scenario": "Provide context ('You want to...')",
|
|
16
|
+
"script": "Write facilitator script (intro, tasks, questions)"
|
|
17
|
+
},
|
|
18
|
+
"2_recruit": {
|
|
19
|
+
"participants": "5-8 users per user group (Nielsen: 5 users find 85% of issues)",
|
|
20
|
+
"screening": "Recruit target users (not just anyone)",
|
|
21
|
+
"incentive": "$50-150 depending on audience and length"
|
|
22
|
+
},
|
|
23
|
+
"3_conduct": {
|
|
24
|
+
"duration": "30-60 minutes per session",
|
|
25
|
+
"think_aloud": "Ask users to verbalize thoughts",
|
|
26
|
+
"no_leading": "Don't guide or explain (observe struggles)",
|
|
27
|
+
"observe": ["Where they click", "Confusion points", "Workarounds", "Errors", "Completion time"],
|
|
28
|
+
"record": "Screen + audio recording (with consent)"
|
|
29
|
+
},
|
|
30
|
+
"4_analyze": {
|
|
31
|
+
"rainbow_spreadsheet": "Track issues per participant in color-coded grid",
|
|
32
|
+
"severity": "1=cosmetic, 2=minor, 3=major, 4=critical (unusable)",
|
|
33
|
+
"frequency": "How many users encountered issue",
|
|
34
|
+
"prioritize": "High severity + high frequency first"
|
|
35
|
+
},
|
|
36
|
+
"5_report": {
|
|
37
|
+
"format": ["Executive summary", "Key findings (with video clips)", "Issue list (prioritized)", "Recommendations"],
|
|
38
|
+
"deliverable": "Findings presentation + detailed issue log"
|
|
39
|
+
}
|
|
40
|
+
},
|
|
41
|
+
"tasks_best_practices": [
|
|
42
|
+
"Use realistic scenarios (not 'Find the About page')",
|
|
43
|
+
"Don't use exact UI labels in task description",
|
|
44
|
+
"Start with easy tasks, progress to harder",
|
|
45
|
+
"Mix exploratory and directed tasks",
|
|
46
|
+
"Include success criteria (measurable)"
|
|
47
|
+
]
|
|
48
|
+
},
|
|
49
|
+
"unmoderated": {
|
|
50
|
+
"description": "Users complete tasks remotely without facilitator",
|
|
51
|
+
"pros": ["Scalable (100+ users)", "Faster", "Less expensive", "Users in natural environment"],
|
|
52
|
+
"cons": ["Can't probe", "Higher dropout", "No body language", "Tech issues"],
|
|
53
|
+
"tools": ["UserTesting.com", "Lookback", "Maze", "UsabilityHub", "Loop11"],
|
|
54
|
+
"best_for": "Quantitative data, benchmarking, A/B testing validation",
|
|
55
|
+
"metrics": ["Task success rate", "Time on task", "Clicks to complete", "Misclicks"]
|
|
56
|
+
},
|
|
57
|
+
"guerrilla_testing": {
|
|
58
|
+
"description": "Quick, informal testing in public places",
|
|
59
|
+
"when": "Early concepts, rapid feedback, low budget",
|
|
60
|
+
"process": ["Bring prototype (paper or device)", "Find people (coffee shop, park)", "Brief task (< 5 min)", "Observe and note"],
|
|
61
|
+
"pros": ["Fast", "Free", "Authentic reactions"],
|
|
62
|
+
"cons": ["Not representative sample", "Limited depth", "Environmental distractions"]
|
|
63
|
+
}
|
|
64
|
+
},
|
|
65
|
+
"a_b_testing": {
|
|
66
|
+
"description": "Comparing two versions to determine which performs better",
|
|
67
|
+
"when": "After launch, to optimize conversion/engagement",
|
|
68
|
+
"process": {
|
|
69
|
+
"1_hypothesis": {
|
|
70
|
+
"format": "If we [change], then [metric] will [increase/decrease] because [reason]",
|
|
71
|
+
"example": "If we change CTA from 'Submit' to 'Get Started', then click-through rate will increase because it's more actionable"
|
|
72
|
+
},
|
|
73
|
+
"2_design": {
|
|
74
|
+
"control": "Current version (A)",
|
|
75
|
+
"variant": "New version (B)",
|
|
76
|
+
"one_change": "Change ONE variable at a time (not multiple)",
|
|
77
|
+
"sample_size": "Use A/B test calculator to determine required traffic"
|
|
78
|
+
},
|
|
79
|
+
"3_run": {
|
|
80
|
+
"split": "50/50 traffic split (random assignment)",
|
|
81
|
+
"duration": "1-2 weeks minimum (account for weekly cycles)",
|
|
82
|
+
"significance": "95% confidence level, p < 0.05"
|
|
83
|
+
},
|
|
84
|
+
"4_analyze": {
|
|
85
|
+
"primary_metric": "The metric you're optimizing (CTR, conversion, revenue)",
|
|
86
|
+
"secondary_metrics": "Other impacts (bounce rate, time on page)",
|
|
87
|
+
"segments": "Analyze by device, traffic source, user type",
|
|
88
|
+
"statistical_significance": "Use t-test or chi-square test"
|
|
89
|
+
},
|
|
90
|
+
"5_implement": {
|
|
91
|
+
"winner": "Ship winning variant to 100%",
|
|
92
|
+
"document": "Record results, learnings, next tests"
|
|
93
|
+
}
|
|
94
|
+
},
|
|
95
|
+
"what_to_test": {
|
|
96
|
+
"high_impact": [
|
|
97
|
+
"Call-to-action (text, color, size, placement)",
|
|
98
|
+
"Headlines and value propositions",
|
|
99
|
+
"Form fields (number, labels, layout)",
|
|
100
|
+
"Images (lifestyle vs product shot)",
|
|
101
|
+
"Pricing display (monthly vs annual, visual hierarchy)",
|
|
102
|
+
"Navigation structure",
|
|
103
|
+
"Page layout and visual hierarchy"
|
|
104
|
+
],
|
|
105
|
+
"low_impact": ["Button border radius", "Exact shade of color", "Minor copy tweaks"]
|
|
106
|
+
},
|
|
107
|
+
"tools": ["Google Optimize (free)", "Optimizely", "VWO", "AB Tasty"],
|
|
108
|
+
"best_practices": [
|
|
109
|
+
"Test one variable at a time",
|
|
110
|
+
"Run tests long enough (at least 1 week, ideally 2-4)",
|
|
111
|
+
"Ensure sufficient sample size (use calculator)",
|
|
112
|
+
"Consider external factors (holidays, seasonality, marketing campaigns)",
|
|
113
|
+
"Don't stop test early (even if winning)",
|
|
114
|
+
"Validate with multiple tests (replication)",
|
|
115
|
+
"Document all tests and results"
|
|
116
|
+
],
|
|
117
|
+
"common_mistakes": [
|
|
118
|
+
"Testing too many variables at once",
|
|
119
|
+
"Stopping test too early",
|
|
120
|
+
"Insufficient sample size",
|
|
121
|
+
"Not accounting for statistical significance",
|
|
122
|
+
"Ignoring external factors (Black Friday, email campaign)",
|
|
123
|
+
"Testing low-traffic pages (takes too long to reach significance)",
|
|
124
|
+
"Not having a clear hypothesis"
|
|
125
|
+
]
|
|
126
|
+
},
|
|
127
|
+
"multivariate_testing": {
|
|
128
|
+
"description": "Testing multiple variables simultaneously to find best combination",
|
|
129
|
+
"vs_ab": "A/B tests one change; MVT tests multiple changes and interactions",
|
|
130
|
+
"when": "High-traffic sites, testing page redesigns",
|
|
131
|
+
"example": "Test 3 headlines × 3 CTAs × 2 images = 18 combinations",
|
|
132
|
+
"requirement": "Requires significantly more traffic than A/B testing",
|
|
133
|
+
"caution": "Complexity grows exponentially (2^n combinations)"
|
|
134
|
+
},
|
|
135
|
+
"first_click_testing": {
|
|
136
|
+
"description": "Test where users click first for a given task",
|
|
137
|
+
"finding": "If first click is correct, 87% chance user completes task successfully",
|
|
138
|
+
"tools": ["Chalkmark", "UsabilityHub", "Optimal Workshop"],
|
|
139
|
+
"process": ["Show screenshot or prototype", "Give task", "User clicks where they'd go", "Measure accuracy"],
|
|
140
|
+
"use_when": "Validate navigation, information architecture, layout"
|
|
141
|
+
},
|
|
142
|
+
"five_second_test": {
|
|
143
|
+
"description": "Show design for 5 seconds, then ask what they remember",
|
|
144
|
+
"purpose": "Test first impressions, visual hierarchy, key message",
|
|
145
|
+
"questions": ["What was this page about?", "What stood out?", "What would you do here?"],
|
|
146
|
+
"tools": ["UsabilityHub", "Lyssna"],
|
|
147
|
+
"use_when": "Landing pages, homepages, ads, early concepts"
|
|
148
|
+
},
|
|
149
|
+
"card_sorting": {
|
|
150
|
+
"description": "Users organize content cards into groups",
|
|
151
|
+
"purpose": "Understand mental models, inform information architecture",
|
|
152
|
+
"types": {
|
|
153
|
+
"open": {
|
|
154
|
+
"description": "Users create and name their own groups",
|
|
155
|
+
"use_when": "Discover how users categorize (early IA)",
|
|
156
|
+
"output": "Natural groupings and labels"
|
|
157
|
+
},
|
|
158
|
+
"closed": {
|
|
159
|
+
"description": "Users sort into predefined groups",
|
|
160
|
+
"use_when": "Validate proposed IA",
|
|
161
|
+
"output": "Fit of content to categories"
|
|
162
|
+
},
|
|
163
|
+
"hybrid": {
|
|
164
|
+
"description": "Predefined groups + option to create new",
|
|
165
|
+
"use_when": "Refine existing IA",
|
|
166
|
+
"output": "Validation + new insights"
|
|
167
|
+
}
|
|
168
|
+
},
|
|
169
|
+
"process": {
|
|
170
|
+
"1_prepare": "Create cards (one item per card, 30-60 cards)",
|
|
171
|
+
"2_recruit": "15-30 participants per user group",
|
|
172
|
+
"3_conduct": "Remote (online tool) or in-person",
|
|
173
|
+
"4_analyze": "Dendrogram (similarity matrix), agreement rates"
|
|
174
|
+
},
|
|
175
|
+
"tools": ["OptimalSort", "UserZoom", "Miro (in-person)"],
|
|
176
|
+
"best_practices": [
|
|
177
|
+
"Use realistic content (actual page names, not lorem ipsum)",
|
|
178
|
+
"30-60 cards (not too few, not overwhelming)",
|
|
179
|
+
"Card labels should be clear and specific",
|
|
180
|
+
"Provide context about the domain",
|
|
181
|
+
"Look for patterns (> 60% agreement is strong)"
|
|
182
|
+
]
|
|
183
|
+
},
|
|
184
|
+
"tree_testing": {
|
|
185
|
+
"description": "Test findability in a text-based hierarchy (no visual design)",
|
|
186
|
+
"purpose": "Validate IA before investing in design",
|
|
187
|
+
"process": {
|
|
188
|
+
"1_create_tree": "Text-only site structure (no design distractions)",
|
|
189
|
+
"2_define_tasks": "Find-tasks ('Where would you find your order history?')",
|
|
190
|
+
"3_recruit": "20-50 users",
|
|
191
|
+
"4_test": "Users navigate tree by clicking categories",
|
|
192
|
+
"5_analyze": "Success rate, directness, time"
|
|
193
|
+
},
|
|
194
|
+
"metrics": {
|
|
195
|
+
"success_rate": { "target": "> 80%", "description": "Users found correct destination" },
|
|
196
|
+
"directness": { "target": "> 60%", "description": "First click was correct" },
|
|
197
|
+
"time_on_task": { "description": "Lower is better" }
|
|
198
|
+
},
|
|
199
|
+
"tools": ["Treejack (Optimal Workshop)", "UserZoom"],
|
|
200
|
+
"when": "After card sorting, before wireframing"
|
|
201
|
+
},
|
|
202
|
+
"surveys_questionnaires": {
|
|
203
|
+
"description": "Collect quantitative and qualitative data at scale",
|
|
204
|
+
"when": "Post-launch, to understand attitudes and satisfaction",
|
|
205
|
+
"types": {
|
|
206
|
+
"satisfaction": {
|
|
207
|
+
"sus": {
|
|
208
|
+
"name": "System Usability Scale",
|
|
209
|
+
"description": "10-question standardized usability survey",
|
|
210
|
+
"scoring": "0-100 scale (68+ is above average)",
|
|
211
|
+
"questions": [
|
|
212
|
+
"I think I would like to use this system frequently",
|
|
213
|
+
"I found the system unnecessarily complex",
|
|
214
|
+
"I thought the system was easy to use",
|
|
215
|
+
"I think I would need support to use this system",
|
|
216
|
+
"I found the various functions well integrated"
|
|
217
|
+
],
|
|
218
|
+
"scale": "1 (Strongly Disagree) to 5 (Strongly Agree)",
|
|
219
|
+
"when": "Post-task or end of session"
|
|
220
|
+
},
|
|
221
|
+
"nps": {
|
|
222
|
+
"name": "Net Promoter Score",
|
|
223
|
+
"question": "How likely are you to recommend this to a friend? (0-10)",
|
|
224
|
+
"calculation": "% Promoters (9-10) - % Detractors (0-6)",
|
|
225
|
+
"score_range": "-100 to 100",
|
|
226
|
+
"benchmark": "> 50 is excellent, 0-30 is good",
|
|
227
|
+
"follow_up": "Why did you give that score?"
|
|
228
|
+
},
|
|
229
|
+
"umux_lite": {
|
|
230
|
+
"name": "UMUX-Lite",
|
|
231
|
+
"description": "2-question alternative to SUS",
|
|
232
|
+
"questions": [
|
|
233
|
+
"This system's capabilities meet my requirements",
|
|
234
|
+
"This system is easy to use"
|
|
235
|
+
],
|
|
236
|
+
"scale": "1-7",
|
|
237
|
+
"when": "Need shorter survey than SUS"
|
|
238
|
+
}
|
|
239
|
+
},
|
|
240
|
+
"task_difficulty": {
|
|
241
|
+
"single_ease_question": {
|
|
242
|
+
"question": "Overall, how difficult or easy was the task to complete?",
|
|
243
|
+
"scale": "Very Difficult (1) to Very Easy (7)",
|
|
244
|
+
"when": "After each task in usability test"
|
|
245
|
+
}
|
|
246
|
+
},
|
|
247
|
+
"custom_surveys": {
|
|
248
|
+
"best_practices": [
|
|
249
|
+
"Keep short (< 10 questions)",
|
|
250
|
+
"One question per concept",
|
|
251
|
+
"Avoid leading questions",
|
|
252
|
+
"Use scales consistently (all 1-5 or all 1-7)",
|
|
253
|
+
"Include open-ended 'Why?' questions",
|
|
254
|
+
"Randomize order (avoid order bias)",
|
|
255
|
+
"Progress indicator for longer surveys"
|
|
256
|
+
],
|
|
257
|
+
"question_types": {
|
|
258
|
+
"likert_scale": "Strongly Disagree to Strongly Agree (1-5 or 1-7)",
|
|
259
|
+
"rating_scale": "Poor to Excellent (1-5)",
|
|
260
|
+
"yes_no": "Binary choice",
|
|
261
|
+
"multiple_choice": "Select one or multiple",
|
|
262
|
+
"open_ended": "Free text (for qualitative insights)"
|
|
263
|
+
}
|
|
264
|
+
}
|
|
265
|
+
},
|
|
266
|
+
"tools": ["Typeform", "SurveyMonkey", "Google Forms", "Qualtrics", "Hotjar"],
|
|
267
|
+
"timing": {
|
|
268
|
+
"intercept": "Pop-up during session (annoying, use sparingly)",
|
|
269
|
+
"exit": "On page exit or session end",
|
|
270
|
+
"email": "Follow-up email (lower response rate)",
|
|
271
|
+
"in_app": "Triggered after specific action"
|
|
272
|
+
}
|
|
273
|
+
},
|
|
274
|
+
"analytics_analysis": {
|
|
275
|
+
"description": "Analyze user behavior data to identify UX issues",
|
|
276
|
+
"quantitative_analytics": {
|
|
277
|
+
"key_metrics": {
|
|
278
|
+
"engagement": {
|
|
279
|
+
"time_on_page": "How long users spend (longer = more engaged, unless stuck)",
|
|
280
|
+
"pages_per_session": "Depth of engagement",
|
|
281
|
+
"scroll_depth": "How far users scroll (Hotjar)",
|
|
282
|
+
"bounce_rate": "% who leave after one page (< 50% is good)"
|
|
283
|
+
},
|
|
284
|
+
"conversion": {
|
|
285
|
+
"conversion_rate": "% who complete goal",
|
|
286
|
+
"funnel_drop_off": "Where users abandon (cart, checkout step 2?)",
|
|
287
|
+
"click_through_rate": "% who click CTA"
|
|
288
|
+
},
|
|
289
|
+
"navigation": {
|
|
290
|
+
"top_pages": "Most visited pages",
|
|
291
|
+
"entry_pages": "Where users land",
|
|
292
|
+
"exit_pages": "Where users leave (identify dead ends)",
|
|
293
|
+
"navigation_flow": "User paths through site"
|
|
294
|
+
}
|
|
295
|
+
},
|
|
296
|
+
"tools": {
|
|
297
|
+
"google_analytics": "Page views, sessions, conversion funnels, user flow",
|
|
298
|
+
"mixpanel": "Event tracking, cohort analysis, funnels",
|
|
299
|
+
"amplitude": "Product analytics, behavioral cohorts",
|
|
300
|
+
"heap": "Auto-capture all events (retroactive analysis)"
|
|
301
|
+
}
|
|
302
|
+
},
|
|
303
|
+
"qualitative_analytics": {
|
|
304
|
+
"session_recordings": {
|
|
305
|
+
"description": "Watch real user sessions (anonymized)",
|
|
306
|
+
"tools": ["Hotjar", "FullStory", "Crazy Egg", "Microsoft Clarity (free)"],
|
|
307
|
+
"use_cases": ["Understand confusion", "See rage clicks", "Identify form errors", "Observe workarounds"],
|
|
308
|
+
"privacy": "Mask sensitive data (passwords, credit cards)"
|
|
309
|
+
},
|
|
310
|
+
"heatmaps": {
|
|
311
|
+
"types": {
|
|
312
|
+
"click_heatmap": "Where users click (find dead clicks, popular elements)",
|
|
313
|
+
"move_heatmap": "Mouse movement (attention, though not perfect on mobile)",
|
|
314
|
+
"scroll_heatmap": "How far users scroll (where to place CTAs)"
|
|
315
|
+
},
|
|
316
|
+
"tools": ["Hotjar", "Crazy Egg", "Microsoft Clarity"],
|
|
317
|
+
"insights": ["Dead clicks on non-clickable elements", "Ignored CTAs", "Content below fold not seen"]
|
|
318
|
+
},
|
|
319
|
+
"rage_clicks": {
|
|
320
|
+
"description": "Repeated rapid clicks (indicates frustration)",
|
|
321
|
+
"cause": "Element not loading, broken link, confusing interaction",
|
|
322
|
+
"tools": "Hotjar, FullStory"
|
|
323
|
+
}
|
|
324
|
+
}
|
|
325
|
+
},
|
|
326
|
+
"accessibility_testing": {
|
|
327
|
+
"description": "Ensure product is usable by people with disabilities",
|
|
328
|
+
"automated_testing": {
|
|
329
|
+
"tools": {
|
|
330
|
+
"axe_devtools": "Browser extension, tests WCAG violations",
|
|
331
|
+
"lighthouse": "Built into Chrome DevTools, accessibility audit",
|
|
332
|
+
"wave": "WebAIM's browser extension",
|
|
333
|
+
"pa11y": "Command-line tool for CI/CD"
|
|
334
|
+
},
|
|
335
|
+
"limitations": "Catches ~30-40% of accessibility issues (needs manual testing)"
|
|
336
|
+
},
|
|
337
|
+
"manual_testing": {
|
|
338
|
+
"keyboard_navigation": {
|
|
339
|
+
"test": ["Tab through all interactive elements", "Shift+Tab to go backward", "Enter/Space to activate", "Escape to close modals"],
|
|
340
|
+
"check": ["Visible focus indicator", "Logical tab order", "No keyboard traps", "Skip links present"]
|
|
341
|
+
},
|
|
342
|
+
"screen_reader": {
|
|
343
|
+
"tools": {
|
|
344
|
+
"nvda": "Free, Windows",
|
|
345
|
+
"jaws": "Popular, Windows (paid)",
|
|
346
|
+
"voiceover": "Built-in, macOS/iOS",
|
|
347
|
+
"talkback": "Built-in, Android"
|
|
348
|
+
},
|
|
349
|
+
"test": ["Navigate by headings", "Navigate by landmarks", "Fill out forms", "Understand image alt text", "Hear button labels"]
|
|
350
|
+
},
|
|
351
|
+
"color_contrast": {
|
|
352
|
+
"requirement": "WCAG AA: 4.5:1 for normal text, 3:1 for large text (18px+)",
|
|
353
|
+
"tools": ["Contrast Checker", "Color Oracle (color blindness simulator)"]
|
|
354
|
+
},
|
|
355
|
+
"zoom_resize": {
|
|
356
|
+
"test": "Zoom to 200% (Ctrl/Cmd +)",
|
|
357
|
+
"check": ["No horizontal scroll", "Content reflows", "Nothing cut off", "Still usable"]
|
|
358
|
+
}
|
|
359
|
+
},
|
|
360
|
+
"assistive_testing": {
|
|
361
|
+
"description": "Test with real users with disabilities",
|
|
362
|
+
"recruit": "Recruit users who use assistive tech daily",
|
|
363
|
+
"platforms": ["Fable", "AccessibilityOz"],
|
|
364
|
+
"when": "Critical products, high-risk areas (checkout, forms)"
|
|
365
|
+
}
|
|
366
|
+
},
|
|
367
|
+
"performance_testing": {
|
|
368
|
+
"description": "Test speed and technical performance (UX impact)",
|
|
369
|
+
"core_web_vitals": {
|
|
370
|
+
"lcp": { "metric": "Largest Contentful Paint", "target": "< 2.5s", "ux_impact": "Perceived load speed" },
|
|
371
|
+
"fid": { "metric": "First Input Delay", "target": "< 100ms", "ux_impact": "Interactivity responsiveness" },
|
|
372
|
+
"cls": { "metric": "Cumulative Layout Shift", "target": "< 0.1", "ux_impact": "Visual stability (no jumps)" }
|
|
373
|
+
},
|
|
374
|
+
"tools": {
|
|
375
|
+
"lighthouse": "Chrome DevTools, scores 0-100",
|
|
376
|
+
"webpagetest": "Detailed waterfall, multiple locations",
|
|
377
|
+
"pagespeed_insights": "Google's tool (Lighthouse + field data)",
|
|
378
|
+
"chrome_devtools": "Network tab, Performance tab"
|
|
379
|
+
},
|
|
380
|
+
"mobile_performance": {
|
|
381
|
+
"test_on": "Real devices (not just emulator)",
|
|
382
|
+
"throttle": "Simulate 3G connection (Chrome DevTools)",
|
|
383
|
+
"check": ["Time to interactive", "Smooth scrolling", "Responsive touch"]
|
|
384
|
+
}
|
|
385
|
+
}
|
|
386
|
+
},
|
|
387
|
+
"research_methods": {
|
|
388
|
+
"user_interviews": {
|
|
389
|
+
"description": "One-on-one conversations to understand needs, behaviors, pain points",
|
|
390
|
+
"when": "Discovery phase, understanding problems",
|
|
391
|
+
"duration": "30-60 minutes",
|
|
392
|
+
"participants": "5-10 per user segment",
|
|
393
|
+
"structure": {
|
|
394
|
+
"intro": "5 min (explain purpose, get consent)",
|
|
395
|
+
"warmup": "5 min (easy questions, build rapport)",
|
|
396
|
+
"main_questions": "30-40 min (open-ended, probing)",
|
|
397
|
+
"wrap_up": "5 min (anything to add, thank you)"
|
|
398
|
+
},
|
|
399
|
+
"question_techniques": {
|
|
400
|
+
"open_ended": "How do you currently...? (not yes/no)",
|
|
401
|
+
"five_whys": "Ask 'why' to get to root cause",
|
|
402
|
+
"tell_me_about": "'Tell me about the last time you...' (concrete stories)",
|
|
403
|
+
"avoid_leading": "Don't suggest answers ('Do you like X?' → 'How do you feel about X?')"
|
|
404
|
+
},
|
|
405
|
+
"best_practices": [
|
|
406
|
+
"Record (with permission) - audio or video",
|
|
407
|
+
"Take notes (even if recording)",
|
|
408
|
+
"Listen more than talk (80/20 rule)",
|
|
409
|
+
"Allow silence (don't fill pauses)",
|
|
410
|
+
"Probe on interesting points ('Tell me more')",
|
|
411
|
+
"Stay neutral (don't defend design)",
|
|
412
|
+
"Focus on behavior, not opinions"
|
|
413
|
+
]
|
|
414
|
+
},
|
|
415
|
+
"contextual_inquiry": {
|
|
416
|
+
"description": "Observe users in their natural environment while they work",
|
|
417
|
+
"when": "Understanding workflows, tools, environment",
|
|
418
|
+
"structure": {
|
|
419
|
+
"master_apprentice": "User is expert, you are learning",
|
|
420
|
+
"observe": "Watch user work on real tasks",
|
|
421
|
+
"interrupt": "Ask questions when you see something interesting",
|
|
422
|
+
"interpret": "Confirm your understanding"
|
|
423
|
+
},
|
|
424
|
+
"duration": "2-4 hours",
|
|
425
|
+
"output": "Workflow diagrams, pain points, workarounds, environment factors"
|
|
426
|
+
},
|
|
427
|
+
"diary_studies": {
|
|
428
|
+
"description": "Users log activities over time (days to weeks)",
|
|
429
|
+
"when": "Understand behavior over time, in context",
|
|
430
|
+
"duration": "1 week to 1 month",
|
|
431
|
+
"method": ["Daily survey", "Photo diary", "Audio log", "App (dscout, Indeemo)"],
|
|
432
|
+
"use_cases": ["Long purchase cycles", "Habit tracking", "Day-in-the-life"],
|
|
433
|
+
"challenge": "High dropout (incentivize well)"
|
|
434
|
+
},
|
|
435
|
+
"personas": {
|
|
436
|
+
"description": "Fictional representations of user segments",
|
|
437
|
+
"when": "After user research, to communicate findings",
|
|
438
|
+
"elements": {
|
|
439
|
+
"name_photo": "Humanize (but not real person)",
|
|
440
|
+
"demographics": "Age, location, job title",
|
|
441
|
+
"goals": "What they want to achieve",
|
|
442
|
+
"frustrations": "Pain points and challenges",
|
|
443
|
+
"behaviors": "How they currently solve problems",
|
|
444
|
+
"quote": "Memorable statement capturing mindset"
|
|
445
|
+
},
|
|
446
|
+
"how_many": "3-5 personas (not too many)",
|
|
447
|
+
"use": "Decision-making ('Would Sarah use this?'), communication",
|
|
448
|
+
"anti_pattern": "Making up personas without research"
|
|
449
|
+
},
|
|
450
|
+
"journey_mapping": {
|
|
451
|
+
"description": "Visualize user's experience over time across touchpoints",
|
|
452
|
+
"when": "After research, to identify opportunities",
|
|
453
|
+
"structure": {
|
|
454
|
+
"stages": "Steps in the journey (Awareness → Consideration → Purchase → Use)",
|
|
455
|
+
"touchpoints": "Where user interacts (website, app, email, support)",
|
|
456
|
+
"actions": "What user does at each stage",
|
|
457
|
+
"thoughts": "What user is thinking",
|
|
458
|
+
"emotions": "Emotional state (up/down line)",
|
|
459
|
+
"pain_points": "Problems and frustrations",
|
|
460
|
+
"opportunities": "Where to improve"
|
|
461
|
+
},
|
|
462
|
+
"output": "Visual diagram (often horizontal timeline)",
|
|
463
|
+
"tools": ["Miro", "Figma", "Smaply", "UXPressia"]
|
|
464
|
+
}
|
|
465
|
+
},
|
|
466
|
+
"metrics_kpis": {
|
|
467
|
+
"heart_framework": {
|
|
468
|
+
"description": "Google's UX metrics framework",
|
|
469
|
+
"happiness": {
|
|
470
|
+
"description": "User attitudes",
|
|
471
|
+
"metrics": ["NPS", "CSAT (Customer Satisfaction)", "SUS (System Usability Scale)"],
|
|
472
|
+
"method": "Surveys"
|
|
473
|
+
},
|
|
474
|
+
"engagement": {
|
|
475
|
+
"description": "Level of user involvement",
|
|
476
|
+
"metrics": ["DAU/MAU ratio", "Session length", "Feature usage", "Frequency of use"],
|
|
477
|
+
"method": "Analytics"
|
|
478
|
+
},
|
|
479
|
+
"adoption": {
|
|
480
|
+
"description": "New users",
|
|
481
|
+
"metrics": ["New user sign-ups", "First-time feature use", "Onboarding completion"],
|
|
482
|
+
"method": "Analytics"
|
|
483
|
+
},
|
|
484
|
+
"retention": {
|
|
485
|
+
"description": "Continued use over time",
|
|
486
|
+
"metrics": ["Churn rate", "Day 1/7/30 retention", "Repeat purchase rate"],
|
|
487
|
+
"method": "Analytics"
|
|
488
|
+
},
|
|
489
|
+
"task_success": {
|
|
490
|
+
"description": "Efficiency and effectiveness",
|
|
491
|
+
"metrics": ["Task completion rate", "Time on task", "Error rate"],
|
|
492
|
+
"method": "Usability testing or analytics"
|
|
493
|
+
}
|
|
494
|
+
}
|
|
495
|
+
},
|
|
496
|
+
"best_practices": [
|
|
497
|
+
"Test early and often (not just at the end)",
|
|
498
|
+
"Test with real users (not coworkers or friends)",
|
|
499
|
+
"Recruit representative users (screen for target audience)",
|
|
500
|
+
"5 users find 85% of usability issues (Nielsen)",
|
|
501
|
+
"Use think-aloud protocol in usability tests",
|
|
502
|
+
"Don't lead or guide users (let them struggle)",
|
|
503
|
+
"Record sessions (with consent)",
|
|
504
|
+
"Combine quantitative (analytics, A/B tests) and qualitative (interviews, usability tests)",
|
|
505
|
+
"Prioritize findings (severity × frequency)",
|
|
506
|
+
"Share findings with team (video clips are powerful)",
|
|
507
|
+
"Test across devices (desktop, mobile, tablet)",
|
|
508
|
+
"Test accessibility (keyboard, screen reader, color contrast)",
|
|
509
|
+
"Run A/B tests long enough for statistical significance",
|
|
510
|
+
"Test one variable at a time in A/B tests",
|
|
511
|
+
"Use standardized surveys (SUS, NPS) for benchmarking",
|
|
512
|
+
"Combine multiple research methods (triangulation)",
|
|
513
|
+
"Create personas based on research (not assumptions)",
|
|
514
|
+
"Test information architecture before visual design (tree testing)",
|
|
515
|
+
"Validate assumptions with data",
|
|
516
|
+
"Iterate based on findings"
|
|
517
|
+
],
|
|
518
|
+
"anti_patterns": [
|
|
519
|
+
"Testing with coworkers or friends (not representative)",
|
|
520
|
+
"Leading users ('Click here', 'Did you see...')",
|
|
521
|
+
"Stopping A/B test too early (before significance)",
|
|
522
|
+
"Testing too many variables at once",
|
|
523
|
+
"Skipping research phase (designing based on assumptions)",
|
|
524
|
+
"Not testing on mobile devices",
|
|
525
|
+
"Ignoring accessibility testing",
|
|
526
|
+
"Only doing quantitative OR qualitative (need both)",
|
|
527
|
+
"Creating personas without research",
|
|
528
|
+
"Not prioritizing findings (trying to fix everything)",
|
|
529
|
+
"Testing too late (after development complete)",
|
|
530
|
+
"Not recruiting representative users",
|
|
531
|
+
"Insufficient sample size (< 5 users per test)",
|
|
532
|
+
"Not testing across browsers",
|
|
533
|
+
"Confusing correlation with causation in analytics",
|
|
534
|
+
"Testing in unrealistic scenarios",
|
|
535
|
+
"Not validating findings with stakeholders",
|
|
536
|
+
"Focusing only on what users say (ignore what they do)",
|
|
537
|
+
"Not documenting research (insights lost)",
|
|
538
|
+
"Testing without clear goals or hypotheses"
|
|
539
|
+
],
|
|
540
|
+
"tools_summary": {
|
|
541
|
+
"usability_testing": ["UserTesting.com", "Lookback", "Maze", "UsabilityHub", "Loop11"],
|
|
542
|
+
"tree_testing_card_sorting": ["Optimal Workshop (Treejack, OptimalSort)", "UserZoom"],
|
|
543
|
+
"surveys": ["Typeform", "SurveyMonkey", "Google Forms", "Qualtrics"],
|
|
544
|
+
"analytics": ["Google Analytics", "Mixpanel", "Amplitude", "Heap"],
|
|
545
|
+
"session_recording_heatmaps": ["Hotjar", "FullStory", "Crazy Egg", "Microsoft Clarity"],
|
|
546
|
+
"a_b_testing": ["Google Optimize", "Optimizely", "VWO", "AB Tasty"],
|
|
547
|
+
"accessibility": ["axe DevTools", "WAVE", "Lighthouse", "NVDA", "JAWS"],
|
|
548
|
+
"performance": ["Lighthouse", "WebPageTest", "PageSpeed Insights"],
|
|
549
|
+
"prototyping": ["Figma", "Adobe XD", "Sketch", "Framer"],
|
|
550
|
+
"research_ops": ["Dovetail", "EnjoyHQ", "Condens", "Airtable"]
|
|
551
|
+
},
|
|
552
|
+
"references": [
|
|
553
|
+
"Don't Make Me Think (Steve Krug) - Usability fundamentals",
|
|
554
|
+
"Rocket Surgery Made Easy (Steve Krug) - DIY usability testing",
|
|
555
|
+
"The User Experience Team of One (Leah Buley) - Practical methods",
|
|
556
|
+
"Nielsen Norman Group - Research articles and courses",
|
|
557
|
+
"Just Enough Research (Erika Hall) - Practical research guide",
|
|
558
|
+
"Quantifying the User Experience (Sauro & Lewis) - Metrics and statistics",
|
|
559
|
+
"Measuring the User Experience (Tullis & Albert) - UX metrics"
|
|
560
|
+
]
|
|
561
|
+
}
|