aimodelshare 0.3.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- aimodelshare/README.md +26 -0
- aimodelshare/__init__.py +100 -0
- aimodelshare/aimsonnx.py +2381 -0
- aimodelshare/api.py +836 -0
- aimodelshare/auth.py +163 -0
- aimodelshare/aws.py +511 -0
- aimodelshare/aws_client.py +173 -0
- aimodelshare/base_image.py +154 -0
- aimodelshare/bucketpolicy.py +106 -0
- aimodelshare/color_mappings/color_mapping_keras.csv +121 -0
- aimodelshare/color_mappings/color_mapping_pytorch.csv +117 -0
- aimodelshare/containerisation.py +244 -0
- aimodelshare/containerization.py +712 -0
- aimodelshare/containerization_templates/Dockerfile.txt +8 -0
- aimodelshare/containerization_templates/Dockerfile_PySpark.txt +23 -0
- aimodelshare/containerization_templates/buildspec.txt +14 -0
- aimodelshare/containerization_templates/lambda_function.txt +40 -0
- aimodelshare/custom_approach/__init__.py +1 -0
- aimodelshare/custom_approach/lambda_function.py +17 -0
- aimodelshare/custom_eval_metrics.py +103 -0
- aimodelshare/data_sharing/__init__.py +0 -0
- aimodelshare/data_sharing/data_sharing_templates/Dockerfile.txt +3 -0
- aimodelshare/data_sharing/data_sharing_templates/__init__.py +1 -0
- aimodelshare/data_sharing/data_sharing_templates/buildspec.txt +15 -0
- aimodelshare/data_sharing/data_sharing_templates/codebuild_policies.txt +129 -0
- aimodelshare/data_sharing/data_sharing_templates/codebuild_trust_relationship.txt +12 -0
- aimodelshare/data_sharing/download_data.py +620 -0
- aimodelshare/data_sharing/share_data.py +373 -0
- aimodelshare/data_sharing/utils.py +8 -0
- aimodelshare/deploy_custom_lambda.py +246 -0
- aimodelshare/documentation/Makefile +20 -0
- aimodelshare/documentation/karma_sphinx_theme/__init__.py +28 -0
- aimodelshare/documentation/karma_sphinx_theme/_version.py +2 -0
- aimodelshare/documentation/karma_sphinx_theme/breadcrumbs.html +70 -0
- aimodelshare/documentation/karma_sphinx_theme/layout.html +172 -0
- aimodelshare/documentation/karma_sphinx_theme/search.html +50 -0
- aimodelshare/documentation/karma_sphinx_theme/searchbox.html +14 -0
- aimodelshare/documentation/karma_sphinx_theme/static/css/custom.css +2 -0
- aimodelshare/documentation/karma_sphinx_theme/static/css/custom.css.map +1 -0
- aimodelshare/documentation/karma_sphinx_theme/static/css/theme.css +2751 -0
- aimodelshare/documentation/karma_sphinx_theme/static/css/theme.css.map +1 -0
- aimodelshare/documentation/karma_sphinx_theme/static/css/theme.min.css +2 -0
- aimodelshare/documentation/karma_sphinx_theme/static/css/theme.min.css.map +1 -0
- aimodelshare/documentation/karma_sphinx_theme/static/font/fontello.eot +0 -0
- aimodelshare/documentation/karma_sphinx_theme/static/font/fontello.svg +32 -0
- aimodelshare/documentation/karma_sphinx_theme/static/font/fontello.ttf +0 -0
- aimodelshare/documentation/karma_sphinx_theme/static/font/fontello.woff +0 -0
- aimodelshare/documentation/karma_sphinx_theme/static/font/fontello.woff2 +0 -0
- aimodelshare/documentation/karma_sphinx_theme/static/js/theme.js +68 -0
- aimodelshare/documentation/karma_sphinx_theme/theme.conf +9 -0
- aimodelshare/documentation/make.bat +35 -0
- aimodelshare/documentation/requirements.txt +2 -0
- aimodelshare/documentation/source/about.rst +18 -0
- aimodelshare/documentation/source/advanced_features.rst +137 -0
- aimodelshare/documentation/source/competition.rst +218 -0
- aimodelshare/documentation/source/conf.py +58 -0
- aimodelshare/documentation/source/create_credentials.rst +86 -0
- aimodelshare/documentation/source/example_notebooks.rst +132 -0
- aimodelshare/documentation/source/functions.rst +151 -0
- aimodelshare/documentation/source/gettingstarted.rst +390 -0
- aimodelshare/documentation/source/images/creds1.png +0 -0
- aimodelshare/documentation/source/images/creds2.png +0 -0
- aimodelshare/documentation/source/images/creds3.png +0 -0
- aimodelshare/documentation/source/images/creds4.png +0 -0
- aimodelshare/documentation/source/images/creds5.png +0 -0
- aimodelshare/documentation/source/images/creds_file_example.png +0 -0
- aimodelshare/documentation/source/images/predict_tab.png +0 -0
- aimodelshare/documentation/source/index.rst +110 -0
- aimodelshare/documentation/source/modelplayground.rst +132 -0
- aimodelshare/exceptions.py +11 -0
- aimodelshare/generatemodelapi.py +1270 -0
- aimodelshare/iam/codebuild_policy.txt +129 -0
- aimodelshare/iam/codebuild_trust_relationship.txt +12 -0
- aimodelshare/iam/lambda_policy.txt +15 -0
- aimodelshare/iam/lambda_trust_relationship.txt +12 -0
- aimodelshare/json_templates/__init__.py +1 -0
- aimodelshare/json_templates/api_json.txt +155 -0
- aimodelshare/json_templates/auth/policy.txt +1 -0
- aimodelshare/json_templates/auth/role.txt +1 -0
- aimodelshare/json_templates/eval/policy.txt +1 -0
- aimodelshare/json_templates/eval/role.txt +1 -0
- aimodelshare/json_templates/function/policy.txt +1 -0
- aimodelshare/json_templates/function/role.txt +1 -0
- aimodelshare/json_templates/integration_response.txt +5 -0
- aimodelshare/json_templates/lambda_policy_1.txt +15 -0
- aimodelshare/json_templates/lambda_policy_2.txt +8 -0
- aimodelshare/json_templates/lambda_role_1.txt +12 -0
- aimodelshare/json_templates/lambda_role_2.txt +16 -0
- aimodelshare/leaderboard.py +174 -0
- aimodelshare/main/1.txt +132 -0
- aimodelshare/main/1B.txt +112 -0
- aimodelshare/main/2.txt +153 -0
- aimodelshare/main/3.txt +134 -0
- aimodelshare/main/4.txt +128 -0
- aimodelshare/main/5.txt +109 -0
- aimodelshare/main/6.txt +105 -0
- aimodelshare/main/7.txt +144 -0
- aimodelshare/main/8.txt +142 -0
- aimodelshare/main/__init__.py +1 -0
- aimodelshare/main/authorization.txt +275 -0
- aimodelshare/main/eval_classification.txt +79 -0
- aimodelshare/main/eval_lambda.txt +1709 -0
- aimodelshare/main/eval_regression.txt +80 -0
- aimodelshare/main/lambda_function.txt +8 -0
- aimodelshare/main/nst.txt +149 -0
- aimodelshare/model.py +1543 -0
- aimodelshare/modeluser.py +215 -0
- aimodelshare/moral_compass/README.md +408 -0
- aimodelshare/moral_compass/__init__.py +65 -0
- aimodelshare/moral_compass/_version.py +3 -0
- aimodelshare/moral_compass/api_client.py +601 -0
- aimodelshare/moral_compass/apps/__init__.py +69 -0
- aimodelshare/moral_compass/apps/ai_consequences.py +540 -0
- aimodelshare/moral_compass/apps/bias_detective.py +714 -0
- aimodelshare/moral_compass/apps/ethical_revelation.py +898 -0
- aimodelshare/moral_compass/apps/fairness_fixer.py +889 -0
- aimodelshare/moral_compass/apps/judge.py +888 -0
- aimodelshare/moral_compass/apps/justice_equity_upgrade.py +853 -0
- aimodelshare/moral_compass/apps/mc_integration_helpers.py +820 -0
- aimodelshare/moral_compass/apps/model_building_game.py +1104 -0
- aimodelshare/moral_compass/apps/model_building_game_beginner.py +687 -0
- aimodelshare/moral_compass/apps/moral_compass_challenge.py +858 -0
- aimodelshare/moral_compass/apps/session_auth.py +254 -0
- aimodelshare/moral_compass/apps/shared_activity_styles.css +349 -0
- aimodelshare/moral_compass/apps/tutorial.py +481 -0
- aimodelshare/moral_compass/apps/what_is_ai.py +853 -0
- aimodelshare/moral_compass/challenge.py +365 -0
- aimodelshare/moral_compass/config.py +187 -0
- aimodelshare/placeholders/model.onnx +0 -0
- aimodelshare/placeholders/preprocessor.zip +0 -0
- aimodelshare/playground.py +1968 -0
- aimodelshare/postprocessormodules.py +157 -0
- aimodelshare/preprocessormodules.py +373 -0
- aimodelshare/pyspark/1.txt +195 -0
- aimodelshare/pyspark/1B.txt +181 -0
- aimodelshare/pyspark/2.txt +220 -0
- aimodelshare/pyspark/3.txt +204 -0
- aimodelshare/pyspark/4.txt +187 -0
- aimodelshare/pyspark/5.txt +178 -0
- aimodelshare/pyspark/6.txt +174 -0
- aimodelshare/pyspark/7.txt +211 -0
- aimodelshare/pyspark/8.txt +206 -0
- aimodelshare/pyspark/__init__.py +1 -0
- aimodelshare/pyspark/authorization.txt +258 -0
- aimodelshare/pyspark/eval_classification.txt +79 -0
- aimodelshare/pyspark/eval_lambda.txt +1441 -0
- aimodelshare/pyspark/eval_regression.txt +80 -0
- aimodelshare/pyspark/lambda_function.txt +8 -0
- aimodelshare/pyspark/nst.txt +213 -0
- aimodelshare/python/my_preprocessor.py +58 -0
- aimodelshare/readme.md +26 -0
- aimodelshare/reproducibility.py +181 -0
- aimodelshare/sam/Dockerfile.txt +8 -0
- aimodelshare/sam/Dockerfile_PySpark.txt +24 -0
- aimodelshare/sam/__init__.py +1 -0
- aimodelshare/sam/buildspec.txt +11 -0
- aimodelshare/sam/codebuild_policies.txt +129 -0
- aimodelshare/sam/codebuild_trust_relationship.txt +12 -0
- aimodelshare/sam/codepipeline_policies.txt +173 -0
- aimodelshare/sam/codepipeline_trust_relationship.txt +12 -0
- aimodelshare/sam/spark-class.txt +2 -0
- aimodelshare/sam/template.txt +54 -0
- aimodelshare/tools.py +103 -0
- aimodelshare/utils/__init__.py +78 -0
- aimodelshare/utils/optional_deps.py +38 -0
- aimodelshare/utils.py +57 -0
- aimodelshare-0.3.7.dist-info/METADATA +298 -0
- aimodelshare-0.3.7.dist-info/RECORD +171 -0
- aimodelshare-0.3.7.dist-info/WHEEL +5 -0
- aimodelshare-0.3.7.dist-info/licenses/LICENSE +5 -0
- aimodelshare-0.3.7.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,714 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Activity 7: Bias Detective - Gradio application for the Justice & Equity Challenge.
|
|
3
|
+
|
|
4
|
+
This app teaches:
|
|
5
|
+
1. How to diagnose where and how bias appears in AI models
|
|
6
|
+
2. Expert fairness principles (OEIAC framework)
|
|
7
|
+
3. Identifying demographic data in datasets
|
|
8
|
+
4. Analyzing group-level bias with fairness metrics
|
|
9
|
+
|
|
10
|
+
Structure:
|
|
11
|
+
- Factory function `create_bias_detective_app()` returns a Gradio Blocks object
|
|
12
|
+
- Convenience wrapper `launch_bias_detective_app()` launches it inline (for notebooks)
|
|
13
|
+
|
|
14
|
+
Moral Compass Integration:
|
|
15
|
+
- Uses ChallengeManager for progress tracking (tasks A-C)
|
|
16
|
+
- Task A: Framework understanding
|
|
17
|
+
- Task B: Demographics identification
|
|
18
|
+
- Task C: Bias analysis
|
|
19
|
+
- Debounced sync with Force Sync option
|
|
20
|
+
"""
|
|
21
|
+
import contextlib
|
|
22
|
+
import os
|
|
23
|
+
import random
|
|
24
|
+
import logging
|
|
25
|
+
|
|
26
|
+
# Import moral compass integration helpers
|
|
27
|
+
from .mc_integration_helpers import (
|
|
28
|
+
get_challenge_manager,
|
|
29
|
+
sync_user_moral_state,
|
|
30
|
+
sync_team_state,
|
|
31
|
+
build_moral_leaderboard_html,
|
|
32
|
+
get_moral_compass_widget_html,
|
|
33
|
+
)
|
|
34
|
+
|
|
35
|
+
# Import session-based authentication
|
|
36
|
+
from .session_auth import (
|
|
37
|
+
create_session_state,
|
|
38
|
+
authenticate_session,
|
|
39
|
+
get_session_username,
|
|
40
|
+
get_session_token,
|
|
41
|
+
is_session_authenticated,
|
|
42
|
+
get_session_team,
|
|
43
|
+
set_session_team,
|
|
44
|
+
)
|
|
45
|
+
|
|
46
|
+
logger = logging.getLogger("aimodelshare.moral_compass.apps.bias_detective")
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
def _get_compas_demographic_data():
|
|
50
|
+
"""Generate demographic distribution from COMPAS-like dataset."""
|
|
51
|
+
# Simulated demographic distributions based on real COMPAS data patterns
|
|
52
|
+
demographics = {
|
|
53
|
+
"race": {
|
|
54
|
+
"African-American": 3175,
|
|
55
|
+
"Caucasian": 2103,
|
|
56
|
+
"Hispanic": 509,
|
|
57
|
+
"Other": 343,
|
|
58
|
+
"Asian": 31,
|
|
59
|
+
"Native American": 11
|
|
60
|
+
},
|
|
61
|
+
"gender": {
|
|
62
|
+
"Male": 4997,
|
|
63
|
+
"Female": 1175
|
|
64
|
+
},
|
|
65
|
+
"age": {
|
|
66
|
+
"18-25": 1637,
|
|
67
|
+
"26-35": 2184,
|
|
68
|
+
"36-45": 1453,
|
|
69
|
+
"46+": 898
|
|
70
|
+
}
|
|
71
|
+
}
|
|
72
|
+
return demographics
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
def _get_fairness_metrics():
|
|
76
|
+
"""Generate fairness metrics showing group-level bias."""
|
|
77
|
+
# Simulated fairness metrics showing disparate impact
|
|
78
|
+
metrics = {
|
|
79
|
+
"African-American": {
|
|
80
|
+
"false_positive_rate": 44.9,
|
|
81
|
+
"false_negative_rate": 28.0,
|
|
82
|
+
"sample_size": 3175
|
|
83
|
+
},
|
|
84
|
+
"Caucasian": {
|
|
85
|
+
"false_positive_rate": 23.5,
|
|
86
|
+
"false_negative_rate": 47.7,
|
|
87
|
+
"sample_size": 2103
|
|
88
|
+
},
|
|
89
|
+
"Hispanic": {
|
|
90
|
+
"false_positive_rate": 33.8,
|
|
91
|
+
"false_negative_rate": 35.2,
|
|
92
|
+
"sample_size": 509
|
|
93
|
+
},
|
|
94
|
+
"Other": {
|
|
95
|
+
"false_positive_rate": 29.1,
|
|
96
|
+
"false_negative_rate": 38.5,
|
|
97
|
+
"sample_size": 343
|
|
98
|
+
}
|
|
99
|
+
}
|
|
100
|
+
return metrics
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
def _get_user_stats():
|
|
104
|
+
"""Get user statistics."""
|
|
105
|
+
try:
|
|
106
|
+
username = os.environ.get("username")
|
|
107
|
+
team_name = os.environ.get("TEAM_NAME", "Unknown Team")
|
|
108
|
+
|
|
109
|
+
return {
|
|
110
|
+
"username": username or "Guest",
|
|
111
|
+
"team_name": team_name,
|
|
112
|
+
"is_signed_in": bool(username)
|
|
113
|
+
}
|
|
114
|
+
except Exception:
|
|
115
|
+
return {
|
|
116
|
+
"username": "Guest",
|
|
117
|
+
"team_name": "Unknown Team",
|
|
118
|
+
"is_signed_in": False
|
|
119
|
+
}
|
|
120
|
+
|
|
121
|
+
|
|
122
|
+
def create_bias_detective_app(theme_primary_hue: str = "indigo") -> "gr.Blocks":
|
|
123
|
+
"""Create the Bias Detective Gradio Blocks app (not launched yet)."""
|
|
124
|
+
try:
|
|
125
|
+
import gradio as gr
|
|
126
|
+
gr.close_all(verbose=False)
|
|
127
|
+
except ImportError as e:
|
|
128
|
+
raise ImportError(
|
|
129
|
+
"Gradio is required for the bias detective app. Install with `pip install gradio`."
|
|
130
|
+
) from e
|
|
131
|
+
|
|
132
|
+
demographics = _get_compas_demographic_data()
|
|
133
|
+
fairness_metrics = _get_fairness_metrics()
|
|
134
|
+
|
|
135
|
+
# Track state - now using closures for moral compass points
|
|
136
|
+
# Session state will be managed via Gradio State
|
|
137
|
+
framework_score = {"value": 0}
|
|
138
|
+
identified_issues = {"demographics": [], "biases": []}
|
|
139
|
+
moral_compass_points = {"value": 0}
|
|
140
|
+
server_moral_score = {"value": None}
|
|
141
|
+
is_synced = {"value": False}
|
|
142
|
+
|
|
143
|
+
def sync_moral_state(session_state, override=False):
|
|
144
|
+
"""Sync moral state to server (debounced unless override)."""
|
|
145
|
+
username = get_session_username(session_state)
|
|
146
|
+
|
|
147
|
+
if not is_session_authenticated(session_state):
|
|
148
|
+
return {
|
|
149
|
+
'widget_html': get_moral_compass_widget_html(
|
|
150
|
+
local_points=moral_compass_points["value"],
|
|
151
|
+
server_score=None,
|
|
152
|
+
is_synced=False
|
|
153
|
+
),
|
|
154
|
+
'status': 'Guest mode - sign in to sync'
|
|
155
|
+
}
|
|
156
|
+
|
|
157
|
+
# Get or create challenge manager for this user
|
|
158
|
+
challenge_manager = get_challenge_manager(username)
|
|
159
|
+
if not challenge_manager:
|
|
160
|
+
return {
|
|
161
|
+
'widget_html': get_moral_compass_widget_html(
|
|
162
|
+
local_points=moral_compass_points["value"],
|
|
163
|
+
server_score=None,
|
|
164
|
+
is_synced=False
|
|
165
|
+
),
|
|
166
|
+
'status': 'Could not create challenge manager'
|
|
167
|
+
}
|
|
168
|
+
|
|
169
|
+
# Sync to server
|
|
170
|
+
sync_result = sync_user_moral_state(
|
|
171
|
+
cm=challenge_manager,
|
|
172
|
+
moral_points=moral_compass_points["value"],
|
|
173
|
+
override=override
|
|
174
|
+
)
|
|
175
|
+
|
|
176
|
+
# Update state
|
|
177
|
+
if sync_result['synced']:
|
|
178
|
+
server_moral_score["value"] = sync_result.get('server_score')
|
|
179
|
+
is_synced["value"] = True
|
|
180
|
+
|
|
181
|
+
# Trigger team sync if user has team
|
|
182
|
+
team_name = get_session_team(session_state)
|
|
183
|
+
if team_name:
|
|
184
|
+
sync_team_state(team_name)
|
|
185
|
+
|
|
186
|
+
# Generate widget HTML
|
|
187
|
+
widget_html = get_moral_compass_widget_html(
|
|
188
|
+
local_points=moral_compass_points["value"],
|
|
189
|
+
server_score=server_moral_score["value"],
|
|
190
|
+
is_synced=is_synced["value"]
|
|
191
|
+
)
|
|
192
|
+
|
|
193
|
+
return {
|
|
194
|
+
'widget_html': widget_html,
|
|
195
|
+
'status': sync_result['message']
|
|
196
|
+
}
|
|
197
|
+
|
|
198
|
+
def check_framework_answer(session_state, principle, indicator, observable):
|
|
199
|
+
"""Check if framework components are correctly categorized."""
|
|
200
|
+
correct_mapping = {
|
|
201
|
+
"Equal Treatment": "Principle",
|
|
202
|
+
"Bias Mitigation": "Indicator",
|
|
203
|
+
"False Positive Rate Disparity": "Observable"
|
|
204
|
+
}
|
|
205
|
+
|
|
206
|
+
score = 0
|
|
207
|
+
feedback = []
|
|
208
|
+
|
|
209
|
+
if principle == "Principle":
|
|
210
|
+
score += 1
|
|
211
|
+
feedback.append("✓ Correct! 'Equal Treatment' is a core ethical principle.")
|
|
212
|
+
else:
|
|
213
|
+
feedback.append("✗ 'Equal Treatment' should be categorized as a Principle.")
|
|
214
|
+
|
|
215
|
+
if indicator == "Indicator":
|
|
216
|
+
score += 1
|
|
217
|
+
feedback.append("✓ Correct! 'Bias Mitigation' is an indicator of justice.")
|
|
218
|
+
else:
|
|
219
|
+
feedback.append("✗ 'Bias Mitigation' should be categorized as an Indicator.")
|
|
220
|
+
|
|
221
|
+
if observable == "Observable":
|
|
222
|
+
score += 1
|
|
223
|
+
feedback.append("✓ Correct! 'False Positive Rate Disparity' is a measurable observable.")
|
|
224
|
+
else:
|
|
225
|
+
feedback.append("✗ 'False Positive Rate Disparity' should be categorized as an Observable.")
|
|
226
|
+
|
|
227
|
+
framework_score["value"] = score
|
|
228
|
+
|
|
229
|
+
if score == 3:
|
|
230
|
+
moral_compass_points["value"] += 100
|
|
231
|
+
feedback.append("\n🎉 Perfect! You've earned 100 Moral Compass points!")
|
|
232
|
+
|
|
233
|
+
# Update ChallengeManager (Task A: Framework understanding) if authenticated
|
|
234
|
+
username = get_session_username(session_state)
|
|
235
|
+
if username:
|
|
236
|
+
challenge_manager = get_challenge_manager(username)
|
|
237
|
+
if challenge_manager:
|
|
238
|
+
challenge_manager.complete_task('A')
|
|
239
|
+
challenge_manager.answer_question('A', 'A1', 1)
|
|
240
|
+
|
|
241
|
+
# Trigger sync
|
|
242
|
+
sync_result = sync_moral_state(session_state)
|
|
243
|
+
feedback.append(f"\n{sync_result['status']}")
|
|
244
|
+
|
|
245
|
+
return "\n".join(feedback)
|
|
246
|
+
|
|
247
|
+
def scan_demographics(race_toggle, gender_toggle, age_toggle):
|
|
248
|
+
"""Scan dataset for demographic variables."""
|
|
249
|
+
found = []
|
|
250
|
+
charts = []
|
|
251
|
+
|
|
252
|
+
if race_toggle:
|
|
253
|
+
found.append("Race")
|
|
254
|
+
identified_issues["demographics"].append("race")
|
|
255
|
+
race_data = demographics["race"]
|
|
256
|
+
chart_text = "**Race Distribution:**\n"
|
|
257
|
+
for race, count in race_data.items():
|
|
258
|
+
chart_text += f"- {race}: {count} ({count/sum(race_data.values())*100:.1f}%)\n"
|
|
259
|
+
charts.append(chart_text)
|
|
260
|
+
|
|
261
|
+
if gender_toggle:
|
|
262
|
+
found.append("Gender")
|
|
263
|
+
identified_issues["demographics"].append("gender")
|
|
264
|
+
gender_data = demographics["gender"]
|
|
265
|
+
chart_text = "**Gender Distribution:**\n"
|
|
266
|
+
for gender, count in gender_data.items():
|
|
267
|
+
chart_text += f"- {gender}: {count} ({count/sum(gender_data.values())*100:.1f}%)\n"
|
|
268
|
+
charts.append(chart_text)
|
|
269
|
+
|
|
270
|
+
if age_toggle:
|
|
271
|
+
found.append("Age")
|
|
272
|
+
identified_issues["demographics"].append("age")
|
|
273
|
+
age_data = demographics["age"]
|
|
274
|
+
chart_text = "**Age Distribution:**\n"
|
|
275
|
+
for age_range, count in age_data.items():
|
|
276
|
+
chart_text += f"- {age_range}: {count} ({count/sum(age_data.values())*100:.1f}%)\n"
|
|
277
|
+
charts.append(chart_text)
|
|
278
|
+
|
|
279
|
+
if found:
|
|
280
|
+
moral_compass_points["value"] += 50
|
|
281
|
+
|
|
282
|
+
# Update ChallengeManager (Task B: Demographics identification)
|
|
283
|
+
if challenge_manager:
|
|
284
|
+
challenge_manager.complete_task('B')
|
|
285
|
+
challenge_manager.answer_question('B', 'B1', 1)
|
|
286
|
+
|
|
287
|
+
summary = f"✓ Found demographic variables: {', '.join(found)}\n\n"
|
|
288
|
+
summary += "⚠️ **Warning:** These variables can encode bias in AI predictions.\n\n"
|
|
289
|
+
summary += "\n".join(charts)
|
|
290
|
+
summary += f"\n\n🏆 +50 Moral Compass points for identifying potential bias sources!"
|
|
291
|
+
|
|
292
|
+
# Trigger sync
|
|
293
|
+
sync_result = sync_moral_state()
|
|
294
|
+
summary += f"\n\n{sync_result['status']}"
|
|
295
|
+
else:
|
|
296
|
+
summary = "Select variables to scan the dataset."
|
|
297
|
+
|
|
298
|
+
return summary
|
|
299
|
+
|
|
300
|
+
def analyze_bias():
|
|
301
|
+
"""Analyze group-level bias in the model."""
|
|
302
|
+
report = "## Bias Radar: Fairness Metrics by Race\n\n"
|
|
303
|
+
report += "| Group | False Positive Rate | False Negative Rate | Sample Size |\n"
|
|
304
|
+
report += "|-------|---------------------|---------------------|-------------|\n"
|
|
305
|
+
|
|
306
|
+
max_fpr = 0
|
|
307
|
+
max_fpr_group = ""
|
|
308
|
+
|
|
309
|
+
for group, metrics in fairness_metrics.items():
|
|
310
|
+
fpr = metrics["false_positive_rate"]
|
|
311
|
+
fnr = metrics["false_negative_rate"]
|
|
312
|
+
size = metrics["sample_size"]
|
|
313
|
+
report += f"| {group} | {fpr}% | {fnr}% | {size} |\n"
|
|
314
|
+
|
|
315
|
+
if fpr > max_fpr:
|
|
316
|
+
max_fpr = fpr
|
|
317
|
+
max_fpr_group = group
|
|
318
|
+
|
|
319
|
+
report += f"\n### ⚠️ High-Risk Disparity Detected\n\n"
|
|
320
|
+
report += f"**{max_fpr_group}** defendants face a **{max_fpr:.1f}%** false positive rate, "
|
|
321
|
+
report += f"nearly **{max_fpr/23.5:.1f}x higher** than Caucasian defendants (23.5%).\n\n"
|
|
322
|
+
report += "**Real-world consequence:** This means African-American defendants are wrongly "
|
|
323
|
+
report += "labeled as 'high risk' at nearly twice the rate of other groups, potentially "
|
|
324
|
+
report += "leading to longer sentences or denial of bail.\n\n"
|
|
325
|
+
|
|
326
|
+
identified_issues["biases"].append("racial_disparity_in_fpr")
|
|
327
|
+
moral_compass_points["value"] += 100
|
|
328
|
+
|
|
329
|
+
# Update ChallengeManager (Task C: Bias analysis)
|
|
330
|
+
if challenge_manager:
|
|
331
|
+
challenge_manager.complete_task('C')
|
|
332
|
+
challenge_manager.answer_question('C', 'C1', 1)
|
|
333
|
+
|
|
334
|
+
report += "🏆 +100 Moral Compass points for identifying bias patterns!"
|
|
335
|
+
|
|
336
|
+
# Trigger sync
|
|
337
|
+
sync_result = sync_moral_state()
|
|
338
|
+
report += f"\n\n{sync_result['status']}"
|
|
339
|
+
|
|
340
|
+
return report
|
|
341
|
+
|
|
342
|
+
def check_bias_question(answer):
|
|
343
|
+
"""Check bias identification question."""
|
|
344
|
+
if answer == "African-American defendants - wrongly labeled high risk":
|
|
345
|
+
moral_compass_points["value"] += 50
|
|
346
|
+
return "✓ Correct! African-American defendants suffer disproportionate false positive rates, meaning they are incorrectly predicted to reoffend at higher rates.\n\n🏆 +50 Moral Compass points!"
|
|
347
|
+
else:
|
|
348
|
+
return "✗ Not quite. Look at the false positive rates - which group has the highest rate of being wrongly predicted as high risk?"
|
|
349
|
+
|
|
350
|
+
def generate_diagnosis_report():
|
|
351
|
+
"""Generate final Bias Detective report."""
|
|
352
|
+
report = "# 🕵️ Bias Detective: Diagnosis Report\n\n"
|
|
353
|
+
report += f"**Moral Compass Score:** {moral_compass_points['value']} points\n\n"
|
|
354
|
+
report += "## Demographics Found:\n"
|
|
355
|
+
|
|
356
|
+
if identified_issues["demographics"]:
|
|
357
|
+
for demo in identified_issues["demographics"]:
|
|
358
|
+
report += f"- ✓ {demo.title()}\n"
|
|
359
|
+
else:
|
|
360
|
+
report += "- No demographics scanned yet\n"
|
|
361
|
+
|
|
362
|
+
report += "\n## Bias Patterns Discovered:\n"
|
|
363
|
+
|
|
364
|
+
if identified_issues["biases"]:
|
|
365
|
+
report += "- ✓ Racial disparity in false positive rates\n"
|
|
366
|
+
report += "- ✓ African-American defendants disproportionately affected\n"
|
|
367
|
+
else:
|
|
368
|
+
report += "- No bias analysis completed yet\n"
|
|
369
|
+
|
|
370
|
+
report += "\n## Principle(s) Invoked:\n"
|
|
371
|
+
report += "- Justice & Equity\n"
|
|
372
|
+
report += "- Equal Treatment under the law\n"
|
|
373
|
+
report += "- Bias Mitigation\n\n"
|
|
374
|
+
|
|
375
|
+
report += "**Status:** Ready to proceed to Activity 8 - Fairness Fixer\n"
|
|
376
|
+
|
|
377
|
+
return report
|
|
378
|
+
|
|
379
|
+
# Create the Gradio app
|
|
380
|
+
with gr.Blocks(
|
|
381
|
+
title="Activity 7: Bias Detective",
|
|
382
|
+
theme=gr.themes.Soft(primary_hue=theme_primary_hue)
|
|
383
|
+
) as app:
|
|
384
|
+
gr.Markdown("# 🕵️ Activity 7: Bias Detective")
|
|
385
|
+
gr.Markdown(
|
|
386
|
+
"""
|
|
387
|
+
**Objective:** Diagnose where and how bias appears in the AI model using expert fairness principles.
|
|
388
|
+
|
|
389
|
+
**Your Role:** You've joined the **AI Ethics Task Force** as a **Bias Detective**.
|
|
390
|
+
|
|
391
|
+
**Estimated Time:** 8–12 minutes
|
|
392
|
+
"""
|
|
393
|
+
)
|
|
394
|
+
|
|
395
|
+
# Moral Compass widget with Force Sync
|
|
396
|
+
with gr.Row():
|
|
397
|
+
with gr.Column(scale=3):
|
|
398
|
+
moral_compass_display = gr.HTML(
|
|
399
|
+
get_moral_compass_widget_html(
|
|
400
|
+
local_points=0,
|
|
401
|
+
server_score=None,
|
|
402
|
+
is_synced=False
|
|
403
|
+
)
|
|
404
|
+
)
|
|
405
|
+
with gr.Column(scale=1):
|
|
406
|
+
force_sync_btn = gr.Button("Force Sync", variant="secondary", size="sm")
|
|
407
|
+
sync_status = gr.Markdown("")
|
|
408
|
+
|
|
409
|
+
# Force Sync handler
|
|
410
|
+
def handle_force_sync():
|
|
411
|
+
sync_result = sync_moral_state(override=True)
|
|
412
|
+
return sync_result['widget_html'], sync_result['status']
|
|
413
|
+
|
|
414
|
+
force_sync_btn.click(
|
|
415
|
+
fn=handle_force_sync,
|
|
416
|
+
outputs=[moral_compass_display, sync_status]
|
|
417
|
+
)
|
|
418
|
+
|
|
419
|
+
# Section 7.2: Expert Framework Overview
|
|
420
|
+
with gr.Tab("7.2 Expert Framework"):
|
|
421
|
+
gr.Markdown(
|
|
422
|
+
"""
|
|
423
|
+
## Understanding the OEIAC Framework
|
|
424
|
+
|
|
425
|
+
The **OEIAC (Observatori d'Ètica en Intel·ligència Artificial de Catalunya)**
|
|
426
|
+
framework helps us evaluate AI systems through three levels:
|
|
427
|
+
|
|
428
|
+
### 🎯 Principles
|
|
429
|
+
Core ethical values (e.g., **Justice & Equity**, **Equal Treatment**)
|
|
430
|
+
|
|
431
|
+
### 📊 Indicators
|
|
432
|
+
Measurable signs of ethical behavior (e.g., **Bias Mitigation**, **Fairness**)
|
|
433
|
+
|
|
434
|
+
### 🔬 Observables
|
|
435
|
+
Specific metrics we can measure (e.g., **False Positive Rate Disparity**)
|
|
436
|
+
|
|
437
|
+
---
|
|
438
|
+
|
|
439
|
+
### Interactive Exercise: Framework Builder
|
|
440
|
+
|
|
441
|
+
Categorize these examples correctly:
|
|
442
|
+
"""
|
|
443
|
+
)
|
|
444
|
+
|
|
445
|
+
principle_choice = gr.Radio(
|
|
446
|
+
choices=["Principle", "Indicator", "Observable"],
|
|
447
|
+
label="'Equal Treatment' is a:",
|
|
448
|
+
value=None
|
|
449
|
+
)
|
|
450
|
+
indicator_choice = gr.Radio(
|
|
451
|
+
choices=["Principle", "Indicator", "Observable"],
|
|
452
|
+
label="'Bias Mitigation' is a:",
|
|
453
|
+
value=None
|
|
454
|
+
)
|
|
455
|
+
observable_choice = gr.Radio(
|
|
456
|
+
choices=["Principle", "Indicator", "Observable"],
|
|
457
|
+
label="'False Positive Rate Disparity' is a:",
|
|
458
|
+
value=None
|
|
459
|
+
)
|
|
460
|
+
|
|
461
|
+
check_btn = gr.Button("Check My Answers", variant="primary")
|
|
462
|
+
framework_feedback = gr.Markdown("")
|
|
463
|
+
|
|
464
|
+
def update_widget_after_framework(principle, indicator, observable):
|
|
465
|
+
feedback = check_framework_answer(principle, indicator, observable)
|
|
466
|
+
widget_html = get_moral_compass_widget_html(
|
|
467
|
+
local_points=moral_compass_points["value"],
|
|
468
|
+
server_score=server_moral_score["value"],
|
|
469
|
+
is_synced=is_synced["value"]
|
|
470
|
+
)
|
|
471
|
+
return feedback, widget_html
|
|
472
|
+
|
|
473
|
+
check_btn.click(
|
|
474
|
+
fn=update_widget_after_framework,
|
|
475
|
+
inputs=[principle_choice, indicator_choice, observable_choice],
|
|
476
|
+
outputs=[framework_feedback, moral_compass_display]
|
|
477
|
+
)
|
|
478
|
+
|
|
479
|
+
# Section 7.3: Identify Demographic Data
|
|
480
|
+
with gr.Tab("7.3 Demographics Scanner"):
|
|
481
|
+
gr.Markdown(
|
|
482
|
+
"""
|
|
483
|
+
## Dataset Demographics Scanner
|
|
484
|
+
|
|
485
|
+
⚠️ **Warning:** Demographic variables can encode bias in AI predictions.
|
|
486
|
+
|
|
487
|
+
Use the toggles below to scan the dataset for sensitive demographic attributes:
|
|
488
|
+
"""
|
|
489
|
+
)
|
|
490
|
+
|
|
491
|
+
race_toggle = gr.Checkbox(label="Scan for Race", value=False)
|
|
492
|
+
gender_toggle = gr.Checkbox(label="Scan for Gender", value=False)
|
|
493
|
+
age_toggle = gr.Checkbox(label="Scan for Age", value=False)
|
|
494
|
+
|
|
495
|
+
scan_btn = gr.Button("Run Demographics Scan", variant="primary")
|
|
496
|
+
demographics_output = gr.Markdown("")
|
|
497
|
+
|
|
498
|
+
def update_widget_after_scan(race, gender, age):
|
|
499
|
+
output = scan_demographics(race, gender, age)
|
|
500
|
+
widget_html = get_moral_compass_widget_html(
|
|
501
|
+
local_points=moral_compass_points["value"],
|
|
502
|
+
server_score=server_moral_score["value"],
|
|
503
|
+
is_synced=is_synced["value"]
|
|
504
|
+
)
|
|
505
|
+
return output, widget_html
|
|
506
|
+
|
|
507
|
+
scan_btn.click(
|
|
508
|
+
fn=update_widget_after_scan,
|
|
509
|
+
inputs=[race_toggle, gender_toggle, age_toggle],
|
|
510
|
+
outputs=[demographics_output, moral_compass_display]
|
|
511
|
+
)
|
|
512
|
+
|
|
513
|
+
gr.Markdown("### Check-In Question")
|
|
514
|
+
demo_question = gr.Radio(
|
|
515
|
+
choices=[
|
|
516
|
+
"They help the model make better predictions",
|
|
517
|
+
"They can lead to unfair treatment of certain groups",
|
|
518
|
+
"They are required by law",
|
|
519
|
+
"They have no effect on model outcomes"
|
|
520
|
+
],
|
|
521
|
+
label="Why are demographic variables concerning in AI models?",
|
|
522
|
+
value=None
|
|
523
|
+
)
|
|
524
|
+
demo_check_btn = gr.Button("Check Answer")
|
|
525
|
+
demo_feedback = gr.Markdown("")
|
|
526
|
+
|
|
527
|
+
def check_demo_question(answer):
|
|
528
|
+
if answer == "They can lead to unfair treatment of certain groups":
|
|
529
|
+
moral_compass_points["value"] += 25
|
|
530
|
+
return "✓ Correct! Demographic variables can perpetuate historical biases and lead to discriminatory outcomes.\n\n🏆 +25 Moral Compass points!"
|
|
531
|
+
else:
|
|
532
|
+
return "✗ Not quite. Think about how using race or gender in predictions might affect different groups."
|
|
533
|
+
|
|
534
|
+
demo_check_btn.click(
|
|
535
|
+
fn=check_demo_question,
|
|
536
|
+
inputs=demo_question,
|
|
537
|
+
outputs=demo_feedback
|
|
538
|
+
).then(
|
|
539
|
+
fn=lambda: f"## 🧭 Moral Compass Score: {moral_compass_points['value']} points",
|
|
540
|
+
outputs=moral_compass_display
|
|
541
|
+
)
|
|
542
|
+
|
|
543
|
+
# Section 7.4: Analyze Group-Level Bias
|
|
544
|
+
with gr.Tab("7.4 Bias Radar"):
|
|
545
|
+
gr.Markdown(
|
|
546
|
+
"""
|
|
547
|
+
## Bias Radar Visualization
|
|
548
|
+
|
|
549
|
+
Now let's analyze **disparities in error rates** across demographic groups.
|
|
550
|
+
|
|
551
|
+
**Key Concepts:**
|
|
552
|
+
- **False Positive Rate:** How often the model wrongly predicts someone will reoffend
|
|
553
|
+
- **False Negative Rate:** How often the model wrongly predicts someone won't reoffend
|
|
554
|
+
|
|
555
|
+
These errors have serious real-world consequences in criminal justice decisions.
|
|
556
|
+
|
|
557
|
+
### 📊 Understanding False Positives via Confusion Matrix
|
|
558
|
+
|
|
559
|
+
<details>
|
|
560
|
+
<summary><b>Click to expand: Example Confusion Matrix by Race</b></summary>
|
|
561
|
+
|
|
562
|
+
**African-American Defendants (n=3,175):**
|
|
563
|
+
```
|
|
564
|
+
Predicted: Low Risk | Predicted: High Risk
|
|
565
|
+
----------------------------------------------------------------
|
|
566
|
+
Actually Safe 805 (TN) | 1,425 (FP ⚠️)
|
|
567
|
+
Actually Risky 890 (FN) | 55 (TP)
|
|
568
|
+
```
|
|
569
|
+
|
|
570
|
+
**Caucasian Defendants (n=2,103):**
|
|
571
|
+
```
|
|
572
|
+
Predicted: Low Risk | Predicted: High Risk
|
|
573
|
+
----------------------------------------------------------------
|
|
574
|
+
Actually Safe 1,210 (TN) | 494 (FP)
|
|
575
|
+
Actually Risky 203 (FN) | 196 (TP)
|
|
576
|
+
```
|
|
577
|
+
|
|
578
|
+
**Key Finding:**
|
|
579
|
+
- African-American FP rate: 1,425 / (805 + 1,425) = **63.9%** wrongly flagged
|
|
580
|
+
- Caucasian FP rate: 494 / (1,210 + 494) = **28.9%** wrongly flagged
|
|
581
|
+
- **Disparity: 2.2x higher** for African-American defendants
|
|
582
|
+
|
|
583
|
+
**Real-world impact of False Positives:**
|
|
584
|
+
- Denied bail → pretrial detention
|
|
585
|
+
- Longer sentences recommended
|
|
586
|
+
- Family/job disruption while innocent person detained
|
|
587
|
+
|
|
588
|
+
</details>
|
|
589
|
+
|
|
590
|
+
---
|
|
591
|
+
"""
|
|
592
|
+
)
|
|
593
|
+
|
|
594
|
+
analyze_btn = gr.Button("Analyze Fairness Metrics", variant="primary")
|
|
595
|
+
bias_analysis_output = gr.Markdown("")
|
|
596
|
+
|
|
597
|
+
def update_widget_after_analysis():
|
|
598
|
+
output = analyze_bias()
|
|
599
|
+
widget_html = get_moral_compass_widget_html(
|
|
600
|
+
local_points=moral_compass_points["value"],
|
|
601
|
+
server_score=server_moral_score["value"],
|
|
602
|
+
is_synced=is_synced["value"]
|
|
603
|
+
)
|
|
604
|
+
return output, widget_html
|
|
605
|
+
|
|
606
|
+
analyze_btn.click(
|
|
607
|
+
fn=update_widget_after_analysis,
|
|
608
|
+
outputs=[bias_analysis_output, moral_compass_display]
|
|
609
|
+
)
|
|
610
|
+
|
|
611
|
+
gr.Markdown("### Check-In Question")
|
|
612
|
+
bias_question = gr.Radio(
|
|
613
|
+
choices=[
|
|
614
|
+
"Caucasian defendants - wrongly labeled low risk",
|
|
615
|
+
"African-American defendants - wrongly labeled high risk",
|
|
616
|
+
"Hispanic defendants - correctly labeled high risk",
|
|
617
|
+
"All groups are treated equally"
|
|
618
|
+
],
|
|
619
|
+
label="Which group is most harmed by this model's bias?",
|
|
620
|
+
value=None
|
|
621
|
+
)
|
|
622
|
+
bias_check_btn = gr.Button("Check Answer")
|
|
623
|
+
bias_feedback = gr.Markdown("")
|
|
624
|
+
|
|
625
|
+
bias_check_btn.click(
|
|
626
|
+
fn=check_bias_question,
|
|
627
|
+
inputs=bias_question,
|
|
628
|
+
outputs=bias_feedback
|
|
629
|
+
).then(
|
|
630
|
+
fn=lambda: f"## 🧭 Moral Compass Score: {moral_compass_points['value']} points",
|
|
631
|
+
outputs=moral_compass_display
|
|
632
|
+
)
|
|
633
|
+
|
|
634
|
+
# Ethics Leaderboard Tab
|
|
635
|
+
with gr.Tab("Ethics Leaderboard"):
|
|
636
|
+
gr.Markdown(
|
|
637
|
+
"""
|
|
638
|
+
## 🏆 Ethics Leaderboard
|
|
639
|
+
|
|
640
|
+
This leaderboard shows **combined ethical engagement + performance scores**.
|
|
641
|
+
|
|
642
|
+
**What's measured:**
|
|
643
|
+
- Moral compass points (bias detection skills)
|
|
644
|
+
- Model accuracy (technical performance)
|
|
645
|
+
- Combined score = accuracy × normalized_moral_points
|
|
646
|
+
|
|
647
|
+
**Why this matters:**
|
|
648
|
+
Being good at building models isn't enough - we must also understand fairness and bias!
|
|
649
|
+
"""
|
|
650
|
+
)
|
|
651
|
+
|
|
652
|
+
leaderboard_display = gr.HTML("")
|
|
653
|
+
refresh_leaderboard_btn = gr.Button("Refresh Leaderboard", variant="secondary")
|
|
654
|
+
|
|
655
|
+
def load_leaderboard():
|
|
656
|
+
return build_moral_leaderboard_html(
|
|
657
|
+
highlight_username=user_stats.get("username"),
|
|
658
|
+
include_teams=True
|
|
659
|
+
)
|
|
660
|
+
|
|
661
|
+
refresh_leaderboard_btn.click(
|
|
662
|
+
fn=load_leaderboard,
|
|
663
|
+
outputs=leaderboard_display
|
|
664
|
+
)
|
|
665
|
+
|
|
666
|
+
# Load initially
|
|
667
|
+
app.load(fn=load_leaderboard, outputs=leaderboard_display)
|
|
668
|
+
|
|
669
|
+
# Section 7.5: Completion
|
|
670
|
+
with gr.Tab("7.5 Diagnosis Report"):
|
|
671
|
+
gr.Markdown(
|
|
672
|
+
"""
|
|
673
|
+
## Generate Your Bias Detective Report
|
|
674
|
+
|
|
675
|
+
Review your findings and generate a comprehensive diagnosis report.
|
|
676
|
+
"""
|
|
677
|
+
)
|
|
678
|
+
|
|
679
|
+
report_btn = gr.Button("Generate Diagnosis Report", variant="primary")
|
|
680
|
+
report_output = gr.Markdown("")
|
|
681
|
+
|
|
682
|
+
report_btn.click(
|
|
683
|
+
fn=generate_diagnosis_report,
|
|
684
|
+
outputs=report_output
|
|
685
|
+
)
|
|
686
|
+
|
|
687
|
+
gr.Markdown(
|
|
688
|
+
"""
|
|
689
|
+
---
|
|
690
|
+
|
|
691
|
+
### 🎉 Activity 7 Complete!
|
|
692
|
+
|
|
693
|
+
**Next Step:** Proceed to **Activity 8: Fairness Fixer** to apply hands-on fairness fixes.
|
|
694
|
+
"""
|
|
695
|
+
)
|
|
696
|
+
|
|
697
|
+
return app
|
|
698
|
+
|
|
699
|
+
|
|
700
|
+
def launch_bias_detective_app(
|
|
701
|
+
share: bool = False,
|
|
702
|
+
server_name: str = None,
|
|
703
|
+
server_port: int = None,
|
|
704
|
+
theme_primary_hue: str = "indigo"
|
|
705
|
+
) -> None:
|
|
706
|
+
"""Convenience wrapper to create and launch the bias detective app inline."""
|
|
707
|
+
app = create_bias_detective_app(theme_primary_hue=theme_primary_hue)
|
|
708
|
+
# Use provided values or fall back to PORT env var and 0.0.0.0
|
|
709
|
+
|
|
710
|
+
if server_port is None:
|
|
711
|
+
server_port = int(os.environ.get("PORT", 8080))
|
|
712
|
+
app.launch(share=share, server_port=server_port)
|
|
713
|
+
|
|
714
|
+
launch_bias_detective_app()
|