@elsahafy/ux-mcp-server 2.0.0 → 4.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,192 @@
1
+ {
2
+ "name": "AI/ML UX Patterns",
3
+ "description": "User experience patterns for AI and machine learning powered features",
4
+ "ai_ux_principles": {
5
+ "transparency": {
6
+ "description": "Users should understand AI is involved and how it works",
7
+ "practices": ["Explain AI decisions", "Show confidence levels", "Provide 'Why did I see this?' option", "Indicate when AI-generated"]
8
+ },
9
+ "control": {
10
+ "description": "Users should control AI, not be controlled by it",
11
+ "practices": ["Allow opt-out", "Provide override options", "Let users correct AI", "Give feedback mechanisms"]
12
+ },
13
+ "trust": {
14
+ "description": "Build confidence through reliability and honesty",
15
+ "practices": ["Show uncertainty when unsure", "Admit limitations", "Explain failures", "Consistent performance"]
16
+ }
17
+ },
18
+ "common_ai_patterns": {
19
+ "recommendations": {
20
+ "use_cases": ["Netflix (shows)", "Spotify (music)", "Amazon (products)", "LinkedIn (connections)"],
21
+ "ux_patterns": {
22
+ "explain_why": "Show reason ('Because you watched X' or 'Based on your history')",
23
+ "diversity": "Don't just show similar - introduce discovery",
24
+ "feedback": "Thumbs up/down or 'Not interested' option",
25
+ "transparency": "Allow viewing/editing preferences"
26
+ }
27
+ },
28
+ "search_autocomplete": {
29
+ "use_cases": ["Google Search", "E-commerce search"],
30
+ "best_practices": ["Show top 5-8 suggestions", "Highlight matching text", "Allow keyboard navigation", "Update as user types (debounced)"]
31
+ },
32
+ "content_moderation": {
33
+ "use_cases": ["Spam detection", "Inappropriate content filtering"],
34
+ "ux_considerations": {
35
+ "false_positives": "Allow appeal ('This was flagged incorrectly')",
36
+ "transparency": "Explain why content was removed",
37
+ "human_review": "Escalate to human for appeals"
38
+ }
39
+ },
40
+ "smart_compose": {
41
+ "examples": ["Gmail Smart Compose", "Grammarly"],
42
+ "ux_patterns": {
43
+ "ghost_text": "Show suggestion in gray, Tab to accept",
44
+ "non_intrusive": "Don't force acceptance, easy to ignore",
45
+ "learn_from_rejections": "Adapt when user doesn't accept"
46
+ }
47
+ },
48
+ "image_recognition": {
49
+ "use_cases": ["Photo tagging", "Visual search", "Accessibility (alt text)"],
50
+ "ux_patterns": {
51
+ "confidence_levels": "Show confidence (90% sure it's a cat)",
52
+ "allow_corrections": "Let users fix mistakes",
53
+ "fallback": "Provide manual option if AI fails"
54
+ }
55
+ },
56
+ "chatbots": {
57
+ "types": ["Customer service", "Virtual assistants", "FAQ bots"],
58
+ "ux_patterns": {
59
+ "set_expectations": "Clarify it's a bot, explain capabilities",
60
+ "escalation": "Offer human handoff when stuck",
61
+ "context": "Remember conversation history",
62
+ "typing_indicator": "Show bot is 'thinking'",
63
+ "error_handling": "Acknowledge misunderstanding, offer alternatives"
64
+ }
65
+ },
66
+ "personalization": {
67
+ "examples": ["Personalized dashboards", "Adaptive UI", "Dynamic pricing"],
68
+ "ux_considerations": {
69
+ "transparency": "Explain why content is personalized",
70
+ "control": "Allow disabling personalization",
71
+ "privacy": "Clear about data usage",
72
+ "filter_bubble": "Expose users to diverse content, not just personalized"
73
+ }
74
+ }
75
+ },
76
+ "ai_specific_patterns": {
77
+ "loading_states": {
78
+ "ai_processing": "AI is analyzing... (show progress if possible)",
79
+ "longer_waits": "AI tasks may take seconds-minutes, show estimates",
80
+ "skeleton_screens": "Show placeholder while AI generates content"
81
+ },
82
+ "confidence_indicators": {
83
+ "description": "Show how confident AI is in its prediction",
84
+ "examples": ["90% confidence", "High/Medium/Low confidence", "Progress bar"],
85
+ "use_when": "Predictions that users will act on (medical, financial)"
86
+ },
87
+ "explanation_interfaces": {
88
+ "description": "Help users understand AI decisions",
89
+ "techniques": {
90
+ "feature_importance": "This decision was based on: age (40%), location (30%), history (30%)",
91
+ "similar_examples": "This is like cases A, B, C",
92
+ "counterfactuals": "If X changed to Y, decision would be different"
93
+ }
94
+ },
95
+ "feedback_loops": {
96
+ "description": "Let users train the AI",
97
+ "examples": ["Thumbs up/down", "'Not interested'", "Report incorrect", "Edit suggestions"],
98
+ "benefit": "AI improves over time, users feel in control"
99
+ },
100
+ "fallback_options": {
101
+ "description": "What happens when AI fails",
102
+ "patterns": [
103
+ "Escalate to human (chatbots)",
104
+ "Provide manual alternative",
105
+ "Show uncertainty and ask for help",
106
+ "Graceful degradation (simpler AI or rule-based)"
107
+ ]
108
+ }
109
+ },
110
+ "ethical_considerations": {
111
+ "bias": {
112
+ "issue": "AI can perpetuate or amplify biases in training data",
113
+ "examples": ["Hiring tools biased against women", "Facial recognition worse for dark skin"],
114
+ "mitigation": ["Diverse training data", "Bias audits", "Human oversight", "Allow manual override"]
115
+ },
116
+ "privacy": {
117
+ "issue": "AI often requires large amounts of user data",
118
+ "practices": ["Minimize data collection", "Explain data usage", "Allow data deletion", "Anonymize when possible"]
119
+ },
120
+ "automation_bias": {
121
+ "issue": "Users over-trust AI recommendations",
122
+ "mitigation": ["Show confidence levels", "Encourage critical thinking", "Explain limitations"]
123
+ },
124
+ "filter_bubbles": {
125
+ "issue": "Personalization can create echo chambers",
126
+ "mitigation": "Expose users to diverse content, show 'Outside your usual' sections"
127
+ }
128
+ },
129
+ "testing_ai_features": {
130
+ "challenges": ["Non-deterministic (output varies)", "Hard to predict edge cases", "Requires large datasets"],
131
+ "approaches": {
132
+ "unit_tests": "Test AI model accuracy with test datasets",
133
+ "ux_testing": "Observe users interacting with AI features",
134
+ "a_b_testing": "Compare AI vs non-AI versions",
135
+ "monitoring": "Track AI accuracy, user corrections, escalations in production",
136
+ "red_teaming": "Deliberately try to break AI (adversarial testing)"
137
+ }
138
+ },
139
+ "best_practices": [
140
+ "Be transparent about AI involvement",
141
+ "Show confidence levels for predictions",
142
+ "Allow users to correct AI mistakes",
143
+ "Provide manual fallbacks",
144
+ "Explain AI decisions in plain language",
145
+ "Let users opt-out of AI features",
146
+ "Monitor for bias and fairness",
147
+ "Design for AI failures (they will happen)",
148
+ "Provide feedback mechanisms (thumbs up/down)",
149
+ "Don't hide AI errors (acknowledge and explain)",
150
+ "Set realistic expectations (don't oversell AI)",
151
+ "Escalate to humans when AI is stuck",
152
+ "Test with diverse users and data",
153
+ "Show progress for long AI tasks",
154
+ "Remember context in AI conversations"
155
+ ],
156
+ "anti_patterns": [
157
+ "Black box AI (no explanation)",
158
+ "No way to correct AI mistakes",
159
+ "Forcing AI features on users",
160
+ "Hiding AI involvement",
161
+ "Overpromising AI capabilities",
162
+ "Ignoring bias in AI",
163
+ "No human escalation path",
164
+ "Trusting AI blindly (no validation)",
165
+ "Poor error messages ('AI failed')",
166
+ "No confidence indicators",
167
+ "Collecting unnecessary data for AI",
168
+ "No feedback mechanisms",
169
+ "Annoying AI suggestions (smart replies after every message)",
170
+ "Filter bubbles without escape",
171
+ "Automation without transparency"
172
+ ],
173
+ "examples": {
174
+ "good": {
175
+ "spotify_discover_weekly": "Personalized playlist with 'Why this song?' explanations",
176
+ "gmail_smart_compose": "Non-intrusive suggestions, easy to ignore, learns from rejections",
177
+ "netflix_recommendations": "Shows reason ('Because you watched X'), allows feedback"
178
+ },
179
+ "bad": {
180
+ "amazon_alexa_random_laughs": "AI bug caused random laughing (creepy, no explanation)",
181
+ "microsoft_tay": "Chatbot learned offensive language from users (no moderation)",
182
+ "apple_card_credit_limits": "Alleged gender bias in credit limits (black box AI, no explanation)"
183
+ }
184
+ },
185
+ "resources": [
186
+ "People + AI Guidebook (Google)",
187
+ "Human-Centered AI (Stanford HAI)",
188
+ "AI Ethics Guidelines (EU)",
189
+ "Designing Human-Centric AI Experiences (Microsoft)",
190
+ "Machine Learning Fairness (Google)"
191
+ ]
192
+ }