aimodelshare 0.3.7__py3-none-any.whl → 0.3.94__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- aimodelshare/moral_compass/__init__.py +51 -2
- aimodelshare/moral_compass/api_client.py +92 -4
- aimodelshare/moral_compass/apps/__init__.py +36 -16
- aimodelshare/moral_compass/apps/ai_consequences.py +98 -88
- aimodelshare/moral_compass/apps/bias_detective_ca.py +2722 -0
- aimodelshare/moral_compass/apps/bias_detective_en.py +2722 -0
- aimodelshare/moral_compass/apps/bias_detective_part1.py +2722 -0
- aimodelshare/moral_compass/apps/bias_detective_part2.py +2465 -0
- aimodelshare/moral_compass/apps/bias_detective_part_es.py +2722 -0
- aimodelshare/moral_compass/apps/ethical_revelation.py +237 -147
- aimodelshare/moral_compass/apps/fairness_fixer.py +1839 -859
- aimodelshare/moral_compass/apps/fairness_fixer_ca.py +1869 -0
- aimodelshare/moral_compass/apps/fairness_fixer_en.py +1869 -0
- aimodelshare/moral_compass/apps/fairness_fixer_es.py +1869 -0
- aimodelshare/moral_compass/apps/judge.py +130 -143
- aimodelshare/moral_compass/apps/justice_equity_upgrade.py +793 -831
- aimodelshare/moral_compass/apps/justice_equity_upgrade_ca.py +815 -0
- aimodelshare/moral_compass/apps/justice_equity_upgrade_en.py +815 -0
- aimodelshare/moral_compass/apps/justice_equity_upgrade_es.py +815 -0
- aimodelshare/moral_compass/apps/mc_integration_helpers.py +227 -745
- aimodelshare/moral_compass/apps/model_building_app_ca.py +4399 -0
- aimodelshare/moral_compass/apps/model_building_app_ca_final.py +3899 -0
- aimodelshare/moral_compass/apps/model_building_app_en.py +4167 -0
- aimodelshare/moral_compass/apps/model_building_app_en_final.py +3869 -0
- aimodelshare/moral_compass/apps/model_building_app_es.py +4351 -0
- aimodelshare/moral_compass/apps/model_building_app_es_final.py +3899 -0
- aimodelshare/moral_compass/apps/model_building_game.py +4211 -935
- aimodelshare/moral_compass/apps/moral_compass_challenge.py +195 -95
- aimodelshare/moral_compass/apps/what_is_ai.py +126 -117
- aimodelshare/moral_compass/challenge.py +98 -17
- {aimodelshare-0.3.7.dist-info → aimodelshare-0.3.94.dist-info}/METADATA +1 -1
- {aimodelshare-0.3.7.dist-info → aimodelshare-0.3.94.dist-info}/RECORD +35 -19
- aimodelshare/moral_compass/apps/bias_detective.py +0 -714
- {aimodelshare-0.3.7.dist-info → aimodelshare-0.3.94.dist-info}/WHEEL +0 -0
- {aimodelshare-0.3.7.dist-info → aimodelshare-0.3.94.dist-info}/licenses/LICENSE +0 -0
- {aimodelshare-0.3.7.dist-info → aimodelshare-0.3.94.dist-info}/top_level.txt +0 -0
|
@@ -1,820 +1,302 @@
|
|
|
1
|
-
"""
|
|
2
|
-
Moral Compass Integration Helpers for Activities 7, 8, and 9.
|
|
3
|
-
|
|
4
|
-
This module provides helper functions for integrating the Moral Compass scoring system
|
|
5
|
-
into Ethics/Game apps, including:
|
|
6
|
-
- ChallengeManager initialization and management
|
|
7
|
-
- Debounced server synchronization
|
|
8
|
-
- Team aggregation logic
|
|
9
|
-
- Leaderboard generation with caching
|
|
10
|
-
|
|
11
|
-
Design Rationale:
|
|
12
|
-
- Client-side only scoring combination logic (server stores single moralCompassScore)
|
|
13
|
-
- Debounce prevents excessive API calls while providing responsive UI
|
|
14
|
-
- Team synthetic users (prefix: team:) enable team leaderboards
|
|
15
|
-
- Local preview fallback ensures graceful degradation when debounced or offline
|
|
16
|
-
|
|
17
|
-
Server Constraints:
|
|
18
|
-
- Only existing API endpoints available (no custom metadata fields)
|
|
19
|
-
- All combination logic handled client-side
|
|
20
|
-
- Primary metric stored as moralCompassScore in server
|
|
21
|
-
"""
|
|
22
|
-
|
|
23
1
|
import os
|
|
24
2
|
import time
|
|
25
3
|
import logging
|
|
26
|
-
from typing import
|
|
27
|
-
from
|
|
4
|
+
from typing import Dict, Any, Optional, List, Tuple
|
|
5
|
+
from urllib.parse import urlparse
|
|
28
6
|
|
|
29
|
-
|
|
7
|
+
from aimodelshare.moral_compass import MoralcompassApiClient, NotFoundError, ApiClientError
|
|
8
|
+
from aimodelshare.moral_compass.challenge import ChallengeManager
|
|
30
9
|
|
|
10
|
+
logger = logging.getLogger("aimodelshare.moral_compass.apps.helpers")
|
|
31
11
|
|
|
32
|
-
#
|
|
33
|
-
|
|
34
|
-
|
|
12
|
+
# Local caches
|
|
13
|
+
_leaderboard_cache: Dict[str, Dict[str, Any]] = {}
|
|
14
|
+
_LEADERBOARD_TTL_SECONDS = int(os.environ.get("LEADERBOARD_CACHE_SECONDS", "45"))
|
|
35
15
|
|
|
36
|
-
def get_env_config() -> Dict[str, Any]:
|
|
37
|
-
"""
|
|
38
|
-
Get environment configuration for Moral Compass integration.
|
|
39
|
-
|
|
40
|
-
Returns:
|
|
41
|
-
Dictionary with configuration values
|
|
42
|
-
"""
|
|
43
|
-
return {
|
|
44
|
-
# Debounce settings
|
|
45
|
-
'DEBOUNCE_SECONDS': int(os.getenv('MC_DEBOUNCE_SECONDS', '5')),
|
|
46
|
-
|
|
47
|
-
# Scoring mode: 'product' or 'sum'
|
|
48
|
-
'SCORING_MODE': os.getenv('MC_SCORING_MODE', 'product'),
|
|
49
|
-
|
|
50
|
-
# Weights for sum mode
|
|
51
|
-
'WEIGHT_ACCURACY': float(os.getenv('MC_WEIGHT_ACC', '0.6')),
|
|
52
|
-
'WEIGHT_MORAL': float(os.getenv('MC_WEIGHT_MORAL', '0.4')),
|
|
53
|
-
|
|
54
|
-
# Normalization settings
|
|
55
|
-
'ACCURACY_FLOOR': float(os.getenv('MC_ACCURACY_FLOOR', '0.0')),
|
|
56
|
-
'MAX_MORAL_POINTS': int(os.getenv('MAX_MORAL_POINTS', '1000')),
|
|
57
|
-
|
|
58
|
-
# Cache TTL for leaderboard
|
|
59
|
-
'CACHE_TTL_SECONDS': int(os.getenv('MC_CACHE_TTL', '30')),
|
|
60
|
-
}
|
|
61
16
|
|
|
17
|
+
def _cache_get(key: str) -> Optional[List[Dict[str, Any]]]:
|
|
18
|
+
entry = _leaderboard_cache.get(key)
|
|
19
|
+
if not entry:
|
|
20
|
+
return None
|
|
21
|
+
if (time.time() - entry.get("_ts", 0)) > _LEADERBOARD_TTL_SECONDS:
|
|
22
|
+
try:
|
|
23
|
+
del _leaderboard_cache[key]
|
|
24
|
+
except Exception:
|
|
25
|
+
pass
|
|
26
|
+
return None
|
|
27
|
+
return entry.get("data")
|
|
62
28
|
|
|
63
|
-
# ============================================================================
|
|
64
|
-
# Debounce State Management
|
|
65
|
-
# ============================================================================
|
|
66
29
|
|
|
67
|
-
|
|
68
|
-
|
|
30
|
+
def _cache_set(key: str, data: List[Dict[str, Any]]) -> None:
|
|
31
|
+
_leaderboard_cache[key] = {"data": data, "_ts": time.time()}
|
|
69
32
|
|
|
70
33
|
|
|
71
|
-
def
|
|
72
|
-
"""
|
|
73
|
-
Check if sync should proceed based on debounce logic.
|
|
74
|
-
|
|
75
|
-
Args:
|
|
76
|
-
username: The username to check
|
|
77
|
-
override: If True, bypass debounce check (for Force Sync)
|
|
78
|
-
|
|
79
|
-
Returns:
|
|
80
|
-
True if sync should proceed, False if debounced
|
|
81
|
-
"""
|
|
82
|
-
if override:
|
|
83
|
-
return True
|
|
84
|
-
|
|
85
|
-
config = get_env_config()
|
|
86
|
-
debounce_seconds = config['DEBOUNCE_SECONDS']
|
|
87
|
-
|
|
88
|
-
last_sync = _last_sync_times.get(username, 0)
|
|
89
|
-
current_time = time.time()
|
|
90
|
-
|
|
91
|
-
return (current_time - last_sync) >= debounce_seconds
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
def mark_synced(username: str) -> None:
|
|
95
|
-
"""
|
|
96
|
-
Mark a username as having been synced.
|
|
97
|
-
|
|
98
|
-
Args:
|
|
99
|
-
username: The username that was synced
|
|
34
|
+
def _derive_table_id() -> str:
|
|
100
35
|
"""
|
|
101
|
-
|
|
102
|
-
|
|
36
|
+
Derive Moral Compass table ID in the same way as the comprehensive integration test:
|
|
103
37
|
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
38
|
+
Priority:
|
|
39
|
+
- If TEST_TABLE_ID is provided, use it as-is.
|
|
40
|
+
- Else derive from TEST_PLAYGROUND_URL or PLAYGROUND_URL:
|
|
41
|
+
Use the last non-empty path segment and append '-mc'.
|
|
107
42
|
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
Get or create a ChallengeManager for a user.
|
|
111
|
-
|
|
112
|
-
Args:
|
|
113
|
-
username: The username
|
|
114
|
-
table_id: Optional table ID (auto-derived if not provided)
|
|
115
|
-
|
|
116
|
-
Returns:
|
|
117
|
-
ChallengeManager instance, or None if user not signed in
|
|
118
|
-
|
|
119
|
-
Note:
|
|
120
|
-
Requires aimodelshare.moral_compass.challenge.ChallengeManager
|
|
43
|
+
This matches tests/test_moral_compass_comprehensive_integration.py behavior so the app
|
|
44
|
+
reads/writes the same shared table.
|
|
121
45
|
"""
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
return
|
|
125
|
-
|
|
46
|
+
explicit = os.environ.get("TEST_TABLE_ID")
|
|
47
|
+
if explicit and explicit.strip():
|
|
48
|
+
return explicit.strip()
|
|
49
|
+
|
|
50
|
+
# Prefer TEST_PLAYGROUND_URL for parity with the integration test, fallback to PLAYGROUND_URL
|
|
51
|
+
pg_url = os.environ.get("TEST_PLAYGROUND_URL") or os.environ.get(
|
|
52
|
+
"PLAYGROUND_URL",
|
|
53
|
+
"https://example.com/playground/shared-comprehensive"
|
|
54
|
+
)
|
|
126
55
|
try:
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
# Auto-derive table_id if not provided
|
|
131
|
-
if not table_id:
|
|
132
|
-
table_id = _derive_table_id()
|
|
133
|
-
|
|
134
|
-
# Create API client and ChallengeManager
|
|
135
|
-
api_client = MoralcompassApiClient()
|
|
136
|
-
cm = ChallengeManager(
|
|
137
|
-
table_id=table_id,
|
|
138
|
-
username=username,
|
|
139
|
-
api_client=api_client
|
|
140
|
-
)
|
|
141
|
-
|
|
142
|
-
logger.info(f"Created ChallengeManager for user={username}, table={table_id}")
|
|
143
|
-
return cm
|
|
144
|
-
|
|
56
|
+
parts = [p for p in urlparse(pg_url).path.split("/") if p]
|
|
57
|
+
playground_id = parts[-1] if parts else "shared-comprehensive"
|
|
58
|
+
return f"{playground_id}-mc"
|
|
145
59
|
except Exception as e:
|
|
146
|
-
logger.
|
|
147
|
-
return
|
|
60
|
+
logger.warning(f"Failed to derive table ID from playground URL '{pg_url}': {e}")
|
|
61
|
+
return "shared-comprehensive-mc"
|
|
148
62
|
|
|
149
63
|
|
|
150
|
-
def
|
|
64
|
+
def _ensure_table_exists(client: MoralcompassApiClient, table_id: str, playground_url: Optional[str] = None) -> None:
|
|
151
65
|
"""
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
Returns:
|
|
155
|
-
Table ID string
|
|
156
|
-
"""
|
|
157
|
-
# Check for explicit table ID
|
|
158
|
-
table_id = os.getenv('MORAL_COMPASS_TABLE_ID')
|
|
159
|
-
if table_id:
|
|
160
|
-
return table_id
|
|
161
|
-
|
|
162
|
-
# Try to derive from playground URL
|
|
163
|
-
playground_url = os.getenv('PLAYGROUND_URL')
|
|
164
|
-
if playground_url:
|
|
165
|
-
# Extract playground ID and append -mc suffix
|
|
166
|
-
from urllib.parse import urlparse
|
|
167
|
-
parsed = urlparse(playground_url)
|
|
168
|
-
path_parts = [p for p in parsed.path.split('/') if p]
|
|
169
|
-
|
|
170
|
-
for i, part in enumerate(path_parts):
|
|
171
|
-
if part.lower() in ['playground', 'playgrounds']:
|
|
172
|
-
if i + 1 < len(path_parts):
|
|
173
|
-
playground_id = path_parts[i + 1]
|
|
174
|
-
return f"{playground_id}-mc"
|
|
175
|
-
|
|
176
|
-
# Fallback to last path component
|
|
177
|
-
if path_parts:
|
|
178
|
-
return f"{path_parts[-1]}-mc"
|
|
179
|
-
|
|
180
|
-
# Default fallback
|
|
181
|
-
return "justice-equity-challenge-mc"
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
# ============================================================================
|
|
185
|
-
# Scoring Logic
|
|
186
|
-
# ============================================================================
|
|
187
|
-
|
|
188
|
-
def compute_combined_score(accuracy: float, moral_points: int,
|
|
189
|
-
config: Optional[Dict[str, Any]] = None) -> float:
|
|
66
|
+
Ensure the table exists by mirroring the integration test's behavior.
|
|
67
|
+
If not found, create it with a display name and playground_url metadata.
|
|
190
68
|
"""
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
Note:
|
|
202
|
-
All combination logic is client-side. Server receives only the
|
|
203
|
-
final combined score as the primary metric (moralCompassScore).
|
|
204
|
-
"""
|
|
205
|
-
if config is None:
|
|
206
|
-
config = get_env_config()
|
|
207
|
-
|
|
208
|
-
# Apply accuracy floor
|
|
209
|
-
accuracy_floor = config['ACCURACY_FLOOR']
|
|
210
|
-
accuracy = max(accuracy, accuracy_floor)
|
|
211
|
-
|
|
212
|
-
# Normalize moral points (0 to 1)
|
|
213
|
-
max_moral = config['MAX_MORAL_POINTS']
|
|
214
|
-
moral_normalized = min(moral_points / max_moral, 1.0) if max_moral > 0 else 0.0
|
|
215
|
-
|
|
216
|
-
# Compute combined score based on mode
|
|
217
|
-
scoring_mode = config['SCORING_MODE']
|
|
218
|
-
|
|
219
|
-
if scoring_mode == 'product':
|
|
220
|
-
# Product mode: accuracy * moral_normalized
|
|
221
|
-
combined = accuracy * moral_normalized
|
|
222
|
-
elif scoring_mode == 'sum':
|
|
223
|
-
# Weighted sum mode
|
|
224
|
-
weight_acc = config['WEIGHT_ACCURACY']
|
|
225
|
-
weight_moral = config['WEIGHT_MORAL']
|
|
226
|
-
combined = (weight_acc * accuracy) + (weight_moral * moral_normalized)
|
|
227
|
-
else:
|
|
228
|
-
logger.warning(f"Unknown scoring mode '{scoring_mode}', defaulting to product")
|
|
229
|
-
combined = accuracy * moral_normalized
|
|
230
|
-
|
|
231
|
-
logger.debug(
|
|
232
|
-
f"Combined score: accuracy={accuracy:.4f}, moral_points={moral_points}, "
|
|
233
|
-
f"moral_norm={moral_normalized:.4f}, mode={scoring_mode}, result={combined:.4f}"
|
|
234
|
-
)
|
|
235
|
-
|
|
236
|
-
return combined
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
# ============================================================================
|
|
240
|
-
# User Sync
|
|
241
|
-
# ============================================================================
|
|
69
|
+
try:
|
|
70
|
+
client.get_table(table_id)
|
|
71
|
+
return
|
|
72
|
+
except NotFoundError:
|
|
73
|
+
pass
|
|
74
|
+
except ApiClientError as e:
|
|
75
|
+
logger.info(f"get_table error (will attempt create): {e}")
|
|
76
|
+
except Exception as e:
|
|
77
|
+
logger.info(f"Unexpected get_table error (will attempt create): {e}")
|
|
242
78
|
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
) -> Dict[str, Any]:
|
|
249
|
-
"""
|
|
250
|
-
Sync user's moral state to server with debounce.
|
|
251
|
-
|
|
252
|
-
Args:
|
|
253
|
-
cm: ChallengeManager instance
|
|
254
|
-
moral_points: Current moral compass points for this activity
|
|
255
|
-
accuracy: Optional accuracy value (fetched from playground if None)
|
|
256
|
-
override: If True, bypass debounce (for Force Sync button)
|
|
257
|
-
|
|
258
|
-
Returns:
|
|
259
|
-
Dictionary with sync result:
|
|
260
|
-
- 'synced': bool (True if actually synced, False if debounced)
|
|
261
|
-
- 'status': str ('synced', 'debounced', 'error')
|
|
262
|
-
- 'server_score': float (if synced)
|
|
263
|
-
- 'local_preview': float (always present)
|
|
264
|
-
- 'message': str (user-facing message)
|
|
265
|
-
|
|
266
|
-
Design Note:
|
|
267
|
-
- Seeds ChallengeManager with playground accuracy if not provided
|
|
268
|
-
- Computes combined score (accuracy * moral_normalized) client-side
|
|
269
|
-
- Stores combined score as primary metric on server
|
|
270
|
-
- Respects debounce unless override=True
|
|
271
|
-
"""
|
|
272
|
-
username = cm.username
|
|
273
|
-
|
|
274
|
-
# Check debounce
|
|
275
|
-
if not should_sync(username, override=override):
|
|
276
|
-
local_preview = compute_combined_score(
|
|
277
|
-
accuracy or 0.7, # Default accuracy for preview
|
|
278
|
-
moral_points
|
|
279
|
-
)
|
|
280
|
-
return {
|
|
281
|
-
'synced': False,
|
|
282
|
-
'status': 'debounced',
|
|
283
|
-
'local_preview': local_preview,
|
|
284
|
-
'message': f'Sync pending (debounced). Local preview: {local_preview:.4f}'
|
|
285
|
-
}
|
|
286
|
-
|
|
79
|
+
payload = {
|
|
80
|
+
"table_id": table_id,
|
|
81
|
+
"display_name": "Moral Compass Integration Test - Shared Table",
|
|
82
|
+
"playground_url": playground_url or os.environ.get("TEST_PLAYGROUND_URL") or os.environ.get("PLAYGROUND_URL"),
|
|
83
|
+
}
|
|
287
84
|
try:
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
# Compute combined score
|
|
293
|
-
combined_score = compute_combined_score(accuracy, moral_points)
|
|
294
|
-
|
|
295
|
-
# Update ChallengeManager metrics
|
|
296
|
-
cm.set_metric('accuracy', accuracy, primary=False)
|
|
297
|
-
cm.set_metric('moral_points', moral_points, primary=False)
|
|
298
|
-
cm.set_metric('combined_score', combined_score, primary=True)
|
|
299
|
-
|
|
300
|
-
# Sync to server
|
|
301
|
-
response = cm.sync()
|
|
302
|
-
|
|
303
|
-
# Mark as synced
|
|
304
|
-
mark_synced(username)
|
|
305
|
-
|
|
306
|
-
server_score = response.get('moralCompassScore', combined_score)
|
|
307
|
-
|
|
308
|
-
logger.info(
|
|
309
|
-
f"User sync successful: username={username}, moral_points={moral_points}, "
|
|
310
|
-
f"accuracy={accuracy:.4f}, combined={combined_score:.4f}, "
|
|
311
|
-
f"server_score={server_score:.4f}"
|
|
312
|
-
)
|
|
313
|
-
|
|
314
|
-
return {
|
|
315
|
-
'synced': True,
|
|
316
|
-
'status': 'synced',
|
|
317
|
-
'server_score': server_score,
|
|
318
|
-
'local_preview': combined_score,
|
|
319
|
-
'message': f'✓ Synced! Server score: {server_score:.4f}'
|
|
320
|
-
}
|
|
321
|
-
|
|
85
|
+
client.create_table(**payload)
|
|
86
|
+
# optional brief delay is handled in tests; here we rely on backend immediacy
|
|
87
|
+
logger.info(f"Created Moral Compass table: {table_id}")
|
|
322
88
|
except Exception as e:
|
|
323
|
-
logger.
|
|
324
|
-
local_preview = compute_combined_score(accuracy or 0.7, moral_points)
|
|
325
|
-
return {
|
|
326
|
-
'synced': False,
|
|
327
|
-
'status': 'error',
|
|
328
|
-
'local_preview': local_preview,
|
|
329
|
-
'error': str(e),
|
|
330
|
-
'message': f'⚠️ Sync error. Local preview: {local_preview:.4f}'
|
|
331
|
-
}
|
|
89
|
+
logger.warning(f"Failed to create Moral Compass table '{table_id}': {e}")
|
|
332
90
|
|
|
333
91
|
|
|
334
|
-
def
|
|
92
|
+
def get_challenge_manager(username: str, auth_token: Optional[str] = None) -> Optional[ChallengeManager]:
|
|
335
93
|
"""
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
Returns:
|
|
342
|
-
Accuracy value (0.0 to 1.0), defaults to 0.7 if not found
|
|
343
|
-
|
|
344
|
-
Note:
|
|
345
|
-
Uses playground.get_leaderboard() to fetch accuracy data
|
|
94
|
+
Create or retrieve a ChallengeManager for the given user.
|
|
95
|
+
|
|
96
|
+
Uses derived table_id and MoralcompassApiClient. Ensures the table exists first
|
|
97
|
+
to avoid missing-rank issues.
|
|
346
98
|
"""
|
|
347
99
|
try:
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
leaderboard = playground.get_leaderboard()
|
|
355
|
-
|
|
356
|
-
# Find user's entry
|
|
357
|
-
for entry in leaderboard:
|
|
358
|
-
if entry.get('username') == username or entry.get('user') == username:
|
|
359
|
-
# Get accuracy (might be stored as 'accuracy', 'score', or 'test_accuracy')
|
|
360
|
-
accuracy = (
|
|
361
|
-
entry.get('accuracy') or
|
|
362
|
-
entry.get('test_accuracy') or
|
|
363
|
-
entry.get('score', 0.7)
|
|
364
|
-
)
|
|
365
|
-
logger.debug(f"Fetched accuracy for {username}: {accuracy}")
|
|
366
|
-
return float(accuracy)
|
|
367
|
-
|
|
368
|
-
logger.warning(f"User {username} not found in leaderboard, using default 0.7")
|
|
369
|
-
return 0.7
|
|
370
|
-
|
|
371
|
-
except Exception as e:
|
|
372
|
-
logger.error(f"Failed to fetch playground accuracy: {e}")
|
|
373
|
-
return 0.7
|
|
100
|
+
table_id = _derive_table_id()
|
|
101
|
+
api_base_url = os.environ.get("MORAL_COMPASS_API_BASE_URL")
|
|
102
|
+
client = MoralcompassApiClient(api_base_url=api_base_url, auth_token=auth_token) if api_base_url else MoralcompassApiClient(auth_token=auth_token)
|
|
103
|
+
|
|
104
|
+
# Ensure table exists (matches integration-test behavior)
|
|
105
|
+
_ensure_table_exists(client, table_id, playground_url=os.environ.get("TEST_PLAYGROUND_URL") or os.environ.get("PLAYGROUND_URL"))
|
|
374
106
|
|
|
107
|
+
manager = ChallengeManager(table_id=table_id, username=username, api_client=client)
|
|
108
|
+
return manager
|
|
109
|
+
except Exception as e:
|
|
110
|
+
logger.error(f"Failed to initialize ChallengeManager for {username}: {e}")
|
|
111
|
+
return None
|
|
375
112
|
|
|
376
|
-
# ============================================================================
|
|
377
|
-
# Team Sync
|
|
378
|
-
# ============================================================================
|
|
379
113
|
|
|
380
|
-
def
|
|
114
|
+
def sync_user_moral_state(cm: ChallengeManager, moral_points: int, accuracy: float) -> Dict[str, Any]:
|
|
381
115
|
"""
|
|
382
|
-
Sync
|
|
383
|
-
|
|
384
|
-
Args:
|
|
385
|
-
team_name: The team name
|
|
386
|
-
table_id: Optional table ID (auto-derived if not provided)
|
|
387
|
-
|
|
388
|
-
Returns:
|
|
389
|
-
Dictionary with sync result (same structure as sync_user_moral_state)
|
|
390
|
-
|
|
391
|
-
Design Note:
|
|
392
|
-
- Aggregates member accuracy from playground.get_leaderboard()
|
|
393
|
-
- Aggregates member moral scores from moral_compass.list_users()
|
|
394
|
-
- Computes team combined score (avg_accuracy * avg_moral_norm)
|
|
395
|
-
- Persists as synthetic user with username = 'team:<TeamName>'
|
|
116
|
+
Sync user's moral compass metrics using ChallengeManager.
|
|
396
117
|
"""
|
|
397
|
-
if not team_name:
|
|
398
|
-
return {
|
|
399
|
-
'synced': False,
|
|
400
|
-
'status': 'error',
|
|
401
|
-
'message': 'No team name provided'
|
|
402
|
-
}
|
|
403
|
-
|
|
404
118
|
try:
|
|
405
|
-
|
|
406
|
-
|
|
407
|
-
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
|
|
411
|
-
|
|
412
|
-
if not team_data['members']:
|
|
413
|
-
logger.warning(f"No team members found for team '{team_name}'")
|
|
414
|
-
return {
|
|
415
|
-
'synced': False,
|
|
416
|
-
'status': 'error',
|
|
417
|
-
'message': f'No members found for team {team_name}'
|
|
418
|
-
}
|
|
419
|
-
|
|
420
|
-
# Compute team combined score
|
|
421
|
-
avg_accuracy = team_data['avg_accuracy']
|
|
422
|
-
avg_moral_points = team_data['avg_moral_points']
|
|
423
|
-
|
|
424
|
-
combined_score = compute_combined_score(avg_accuracy, int(avg_moral_points))
|
|
425
|
-
|
|
426
|
-
# Create synthetic team user
|
|
427
|
-
from aimodelshare.moral_compass.api_client import MoralcompassApiClient
|
|
428
|
-
|
|
429
|
-
api_client = MoralcompassApiClient()
|
|
430
|
-
team_username = f"team:{team_name}"
|
|
431
|
-
|
|
432
|
-
# Update team entry
|
|
433
|
-
response = api_client.update_moral_compass(
|
|
434
|
-
table_id=table_id,
|
|
435
|
-
username=team_username,
|
|
436
|
-
metrics={
|
|
437
|
-
'accuracy': avg_accuracy,
|
|
438
|
-
'moral_points': avg_moral_points,
|
|
439
|
-
'combined_score': combined_score,
|
|
440
|
-
'member_count': len(team_data['members'])
|
|
441
|
-
},
|
|
442
|
-
tasks_completed=0,
|
|
443
|
-
total_tasks=0,
|
|
444
|
-
questions_correct=0,
|
|
445
|
-
total_questions=0,
|
|
446
|
-
primary_metric='combined_score'
|
|
447
|
-
)
|
|
448
|
-
|
|
449
|
-
server_score = response.get('moralCompassScore', combined_score)
|
|
450
|
-
|
|
451
|
-
logger.info(
|
|
452
|
-
f"Team sync successful: team={team_name}, members={len(team_data['members'])}, "
|
|
453
|
-
f"avg_accuracy={avg_accuracy:.4f}, avg_moral={avg_moral_points:.1f}, "
|
|
454
|
-
f"combined={combined_score:.4f}, server_score={server_score:.4f}"
|
|
455
|
-
)
|
|
456
|
-
|
|
457
|
-
return {
|
|
458
|
-
'synced': True,
|
|
459
|
-
'status': 'synced',
|
|
460
|
-
'server_score': server_score,
|
|
461
|
-
'local_preview': combined_score,
|
|
462
|
-
'message': f'✓ Team synced! Score: {server_score:.4f}'
|
|
119
|
+
cm.set_metric('accuracy', accuracy, primary=True if cm.primary_metric is None else False)
|
|
120
|
+
cm.set_progress(tasks_completed=moral_points, total_tasks=cm.total_tasks)
|
|
121
|
+
result = cm.sync()
|
|
122
|
+
merged = {
|
|
123
|
+
"synced": True,
|
|
124
|
+
"status": "ok",
|
|
125
|
+
"local_preview": cm.get_local_score(),
|
|
463
126
|
}
|
|
464
|
-
|
|
127
|
+
# Merge server payload keys if present (e.g., moralCompassScore)
|
|
128
|
+
if isinstance(result, dict):
|
|
129
|
+
merged.update(result)
|
|
130
|
+
return merged
|
|
465
131
|
except Exception as e:
|
|
466
|
-
logger.
|
|
132
|
+
logger.warning(f"User sync failed for {cm.username}: {e}")
|
|
467
133
|
return {
|
|
468
|
-
|
|
469
|
-
|
|
470
|
-
|
|
471
|
-
|
|
134
|
+
"synced": False,
|
|
135
|
+
"status": "error",
|
|
136
|
+
"local_preview": cm.get_local_score(),
|
|
137
|
+
"error": str(e),
|
|
138
|
+
"message": "⚠️ Sync error. Local preview: {:.4f}".format(cm.get_local_score())
|
|
472
139
|
}
|
|
473
140
|
|
|
474
141
|
|
|
475
|
-
def
|
|
142
|
+
def sync_team_state(team_name: str) -> Dict[str, Any]:
|
|
476
143
|
"""
|
|
477
|
-
|
|
478
|
-
|
|
479
|
-
Args:
|
|
480
|
-
team_name: The team name
|
|
481
|
-
table_id: The table ID
|
|
482
|
-
|
|
483
|
-
Returns:
|
|
484
|
-
Dictionary with:
|
|
485
|
-
- 'members': List of member usernames
|
|
486
|
-
- 'avg_accuracy': Average accuracy across members
|
|
487
|
-
- 'avg_moral_points': Average moral points across members
|
|
144
|
+
Placeholder for team sync. Implement as needed when team endpoints are available.
|
|
488
145
|
"""
|
|
489
|
-
|
|
490
|
-
|
|
491
|
-
|
|
492
|
-
|
|
493
|
-
if not team_members:
|
|
494
|
-
logger.warning(f"No team members configured for team '{team_name}'")
|
|
495
|
-
return {'members': [], 'avg_accuracy': 0.0, 'avg_moral_points': 0.0}
|
|
496
|
-
|
|
497
|
-
# Fetch accuracy data from playground
|
|
498
|
-
accuracy_data = _fetch_team_accuracy_data(team_members)
|
|
499
|
-
|
|
500
|
-
# Fetch moral compass data
|
|
501
|
-
moral_data = _fetch_team_moral_data(team_members, table_id)
|
|
502
|
-
|
|
503
|
-
# Compute averages
|
|
504
|
-
valid_members = set(accuracy_data.keys()) & set(moral_data.keys())
|
|
505
|
-
|
|
506
|
-
if not valid_members:
|
|
507
|
-
return {'members': [], 'avg_accuracy': 0.0, 'avg_moral_points': 0.0}
|
|
508
|
-
|
|
509
|
-
avg_accuracy = sum(accuracy_data[m] for m in valid_members) / len(valid_members)
|
|
510
|
-
avg_moral = sum(moral_data[m] for m in valid_members) / len(valid_members)
|
|
511
|
-
|
|
512
|
-
return {
|
|
513
|
-
'members': list(valid_members),
|
|
514
|
-
'avg_accuracy': avg_accuracy,
|
|
515
|
-
'avg_moral_points': avg_moral
|
|
516
|
-
}
|
|
517
|
-
|
|
518
|
-
except Exception as e:
|
|
519
|
-
logger.error(f"Failed to aggregate team data: {e}")
|
|
520
|
-
return {'members': [], 'avg_accuracy': 0.0, 'avg_moral_points': 0.0}
|
|
146
|
+
# In current backend, teams are inferred from user rows (teamName field).
|
|
147
|
+
# This function is kept for API parity and future expansion.
|
|
148
|
+
return {"synced": False, "status": "error", "message": f"No members found for team {team_name}"}
|
|
521
149
|
|
|
522
150
|
|
|
523
|
-
def
|
|
524
|
-
"""
|
|
525
|
-
Get list of team members.
|
|
526
|
-
|
|
527
|
-
Args:
|
|
528
|
-
team_name: The team name
|
|
529
|
-
|
|
530
|
-
Returns:
|
|
531
|
-
List of member usernames
|
|
532
|
-
|
|
533
|
-
Note:
|
|
534
|
-
Currently reads from TEAM_MEMBERS environment variable (comma-separated).
|
|
535
|
-
Future enhancement: read from team registry or user profiles.
|
|
536
|
-
"""
|
|
537
|
-
# Check environment variable
|
|
538
|
-
members_str = os.getenv('TEAM_MEMBERS', '')
|
|
539
|
-
if members_str:
|
|
540
|
-
return [m.strip() for m in members_str.split(',') if m.strip()]
|
|
541
|
-
|
|
542
|
-
# Fallback: try to infer from current user
|
|
543
|
-
username = os.getenv('username')
|
|
544
|
-
if username:
|
|
545
|
-
return [username]
|
|
546
|
-
|
|
547
|
-
return []
|
|
548
|
-
|
|
549
|
-
|
|
550
|
-
def _fetch_team_accuracy_data(members: List[str]) -> Dict[str, float]:
|
|
151
|
+
def fetch_cached_users(table_id: str, ttl: int = _LEADERBOARD_TTL_SECONDS) -> List[Dict[str, Any]]:
|
|
551
152
|
"""
|
|
552
|
-
Fetch
|
|
553
|
-
|
|
554
|
-
|
|
555
|
-
|
|
556
|
-
|
|
557
|
-
|
|
558
|
-
|
|
153
|
+
Fetch and cache users for a table, exposing moralCompassScore for ranking computations.
|
|
154
|
+
|
|
155
|
+
Returns a list of dicts with keys:
|
|
156
|
+
- username
|
|
157
|
+
- moralCompassScore (fallback to totalCount if missing)
|
|
158
|
+
- submissionCount
|
|
159
|
+
- totalCount
|
|
160
|
+
- teamName (if present)
|
|
559
161
|
"""
|
|
560
|
-
|
|
561
|
-
|
|
562
|
-
|
|
563
|
-
|
|
564
|
-
|
|
565
|
-
|
|
566
|
-
|
|
567
|
-
|
|
568
|
-
|
|
569
|
-
|
|
570
|
-
|
|
571
|
-
|
|
572
|
-
|
|
573
|
-
|
|
574
|
-
|
|
575
|
-
|
|
576
|
-
|
|
577
|
-
|
|
578
|
-
accuracy_data[username] = float(accuracy)
|
|
579
|
-
|
|
580
|
-
return accuracy_data
|
|
581
|
-
|
|
582
|
-
except Exception as e:
|
|
583
|
-
logger.error(f"Failed to fetch team accuracy data: {e}")
|
|
584
|
-
return {}
|
|
162
|
+
cached = _cache_get(table_id)
|
|
163
|
+
if cached is not None:
|
|
164
|
+
return cached
|
|
165
|
+
|
|
166
|
+
client = MoralcompassApiClient(api_base_url=os.environ.get("MORAL_COMPASS_API_BASE_URL"))
|
|
167
|
+
resp = client.list_users(table_id, limit=100)
|
|
168
|
+
users = resp.get("users", []) if isinstance(resp, dict) else []
|
|
169
|
+
|
|
170
|
+
# Normalize fields and fallback
|
|
171
|
+
normalized: List[Dict[str, Any]] = []
|
|
172
|
+
for u in users:
|
|
173
|
+
normalized.append({
|
|
174
|
+
"username": u.get("username"),
|
|
175
|
+
"moralCompassScore": u.get("moralCompassScore", u.get("totalCount", 0)),
|
|
176
|
+
"submissionCount": u.get("submissionCount", 0),
|
|
177
|
+
"totalCount": u.get("totalCount", 0),
|
|
178
|
+
"teamName": u.get("teamName")
|
|
179
|
+
})
|
|
585
180
|
|
|
181
|
+
_cache_set(table_id, normalized)
|
|
182
|
+
return normalized
|
|
586
183
|
|
|
587
|
-
|
|
184
|
+
|
|
185
|
+
def get_user_ranks(username: str, table_id: Optional[str] = None, team_name: Optional[str] = None) -> Dict[str, Any]:
|
|
588
186
|
"""
|
|
589
|
-
|
|
590
|
-
|
|
591
|
-
Args:
|
|
592
|
-
members: List of member usernames
|
|
593
|
-
table_id: The table ID
|
|
594
|
-
|
|
187
|
+
Compute ranks for a user based on moralCompassScore from list_users.
|
|
188
|
+
|
|
595
189
|
Returns:
|
|
596
|
-
|
|
190
|
+
{
|
|
191
|
+
"individual_rank": Optional[int],
|
|
192
|
+
"team_rank": Optional[int],
|
|
193
|
+
"moral_compass_score": Optional[float],
|
|
194
|
+
"team_name": Optional[str]
|
|
195
|
+
}
|
|
597
196
|
"""
|
|
598
|
-
|
|
599
|
-
|
|
600
|
-
|
|
601
|
-
api_client = MoralcompassApiClient()
|
|
602
|
-
|
|
603
|
-
moral_data = {}
|
|
604
|
-
for username in members:
|
|
605
|
-
try:
|
|
606
|
-
user_stats = api_client.get_user(table_id, username)
|
|
607
|
-
# Extract moral points from moralCompassScore (reverse normalization estimate)
|
|
608
|
-
# This is an approximation; ideally we'd store raw points separately
|
|
609
|
-
moral_score = user_stats.total_count if hasattr(user_stats, 'total_count') else 0
|
|
610
|
-
moral_data[username] = float(moral_score)
|
|
611
|
-
except Exception as e:
|
|
612
|
-
logger.debug(f"Could not fetch moral data for {username}: {e}")
|
|
613
|
-
continue
|
|
614
|
-
|
|
615
|
-
return moral_data
|
|
616
|
-
|
|
617
|
-
except Exception as e:
|
|
618
|
-
logger.error(f"Failed to fetch team moral data: {e}")
|
|
619
|
-
return {}
|
|
197
|
+
table_id = table_id or _derive_table_id()
|
|
198
|
+
users = fetch_cached_users(table_id)
|
|
620
199
|
|
|
200
|
+
# Individual ranks sorted by moralCompassScore desc, then submissionCount desc
|
|
201
|
+
sorted_users = sorted(users, key=lambda x: (float(x.get("moralCompassScore", 0) or 0.0), x.get("submissionCount", 0)), reverse=True)
|
|
621
202
|
|
|
622
|
-
|
|
623
|
-
|
|
624
|
-
|
|
203
|
+
individual_rank = None
|
|
204
|
+
moral_score = None
|
|
205
|
+
user_team = None
|
|
625
206
|
|
|
626
|
-
|
|
627
|
-
|
|
207
|
+
for idx, u in enumerate(sorted_users, start=1):
|
|
208
|
+
if u.get("username") == username:
|
|
209
|
+
individual_rank = idx
|
|
210
|
+
try:
|
|
211
|
+
moral_score = float(u.get("moralCompassScore", 0) or 0.0)
|
|
212
|
+
except Exception:
|
|
213
|
+
moral_score = None
|
|
214
|
+
user_team = u.get("teamName")
|
|
215
|
+
break
|
|
216
|
+
|
|
217
|
+
team_rank = None
|
|
218
|
+
# Compute team rank if provided
|
|
219
|
+
if team_name:
|
|
220
|
+
# Aggregate team entries where username starts with 'team:' or matches teamName
|
|
221
|
+
team_users = [u for u in sorted_users if u.get("username", "").startswith("team:") or u.get("teamName")]
|
|
222
|
+
# Create team scores grouped by teamName or 'team:<name>' entries
|
|
223
|
+
team_scores: Dict[str, float] = {}
|
|
224
|
+
for u in team_users:
|
|
225
|
+
tname = u.get("teamName")
|
|
226
|
+
uname = u.get("username", "")
|
|
227
|
+
if uname.startswith("team:"):
|
|
228
|
+
tname = uname.split("team:", 1)[-1]
|
|
229
|
+
if not tname:
|
|
230
|
+
continue
|
|
231
|
+
try:
|
|
232
|
+
score = float(u.get("moralCompassScore", 0) or 0.0)
|
|
233
|
+
except Exception:
|
|
234
|
+
score = 0.0
|
|
235
|
+
team_scores[tname] = max(team_scores.get(tname, 0.0), score)
|
|
628
236
|
|
|
237
|
+
sorted_teams = sorted(team_scores.items(), key=lambda kv: kv[1], reverse=True)
|
|
238
|
+
for idx, (tname, _) in enumerate(sorted_teams, start=1):
|
|
239
|
+
if tname == team_name:
|
|
240
|
+
team_rank = idx
|
|
241
|
+
break
|
|
629
242
|
|
|
630
|
-
|
|
631
|
-
|
|
632
|
-
|
|
633
|
-
|
|
634
|
-
|
|
635
|
-
|
|
636
|
-
ttl: Cache TTL in seconds (default: 30)
|
|
637
|
-
|
|
638
|
-
Returns:
|
|
639
|
-
List of user dictionaries with fields:
|
|
640
|
-
- 'username': str
|
|
641
|
-
- 'moralCompassScore': float
|
|
642
|
-
- 'submissionCount': int (if available)
|
|
643
|
-
- 'totalCount': int (if available)
|
|
644
|
-
"""
|
|
645
|
-
if not table_id:
|
|
646
|
-
table_id = _derive_table_id()
|
|
647
|
-
|
|
648
|
-
# Check cache
|
|
649
|
-
cache_key = table_id
|
|
650
|
-
if cache_key in _leaderboard_cache:
|
|
651
|
-
cache_time, cached_data = _leaderboard_cache[cache_key]
|
|
652
|
-
if (time.time() - cache_time) < ttl:
|
|
653
|
-
logger.debug(f"Using cached leaderboard for table {table_id}")
|
|
654
|
-
return cached_data
|
|
655
|
-
|
|
656
|
-
# Fetch from API
|
|
657
|
-
try:
|
|
658
|
-
from aimodelshare.moral_compass.api_client import MoralcompassApiClient
|
|
659
|
-
|
|
660
|
-
api_client = MoralcompassApiClient()
|
|
661
|
-
users = list(api_client.iter_users(table_id))
|
|
662
|
-
|
|
663
|
-
# Convert to dict format
|
|
664
|
-
user_list = []
|
|
665
|
-
for user in users:
|
|
666
|
-
user_list.append({
|
|
667
|
-
'username': user.username,
|
|
668
|
-
'moralCompassScore': user.total_count, # Assuming total_count stores combined score
|
|
669
|
-
'submissionCount': user.submission_count,
|
|
670
|
-
'totalCount': user.total_count
|
|
671
|
-
})
|
|
672
|
-
|
|
673
|
-
# Update cache
|
|
674
|
-
_leaderboard_cache[cache_key] = (time.time(), user_list)
|
|
675
|
-
|
|
676
|
-
logger.info(f"Fetched {len(user_list)} users for table {table_id}")
|
|
677
|
-
return user_list
|
|
678
|
-
|
|
679
|
-
except Exception as e:
|
|
680
|
-
logger.error(f"Failed to fetch users for leaderboard: {e}")
|
|
681
|
-
return []
|
|
243
|
+
return {
|
|
244
|
+
"individual_rank": individual_rank,
|
|
245
|
+
"team_rank": team_rank,
|
|
246
|
+
"moral_compass_score": moral_score,
|
|
247
|
+
"team_name": user_team
|
|
248
|
+
}
|
|
682
249
|
|
|
683
250
|
|
|
684
|
-
def build_moral_leaderboard_html(
|
|
685
|
-
highlight_username: Optional[str] = None,
|
|
686
|
-
include_teams: bool = True,
|
|
687
|
-
table_id: Optional[str] = None,
|
|
688
|
-
max_entries: int = 20
|
|
689
|
-
) -> str:
|
|
251
|
+
def build_moral_leaderboard_html(table_id: Optional[str] = None, max_entries: Optional[int] = 20) -> str:
|
|
690
252
|
"""
|
|
691
|
-
Build HTML
|
|
692
|
-
|
|
693
|
-
Args:
|
|
694
|
-
highlight_username: Username to highlight (current user)
|
|
695
|
-
include_teams: If True, include team entries
|
|
696
|
-
table_id: Optional table ID (auto-derived if not provided)
|
|
697
|
-
max_entries: Maximum number of entries to display
|
|
698
|
-
|
|
699
|
-
Returns:
|
|
700
|
-
HTML string with leaderboard table
|
|
701
|
-
|
|
702
|
-
Note:
|
|
703
|
-
Uses same styling classes as model_building_game:
|
|
704
|
-
- leaderboard-html-table
|
|
705
|
-
- user-row-highlight
|
|
253
|
+
Build a simple leaderboard HTML from list_users data sorted by moralCompassScore.
|
|
706
254
|
"""
|
|
255
|
+
table_id = table_id or _derive_table_id()
|
|
707
256
|
users = fetch_cached_users(table_id)
|
|
708
|
-
|
|
709
|
-
|
|
710
|
-
|
|
711
|
-
|
|
712
|
-
|
|
713
|
-
|
|
714
|
-
|
|
715
|
-
|
|
716
|
-
|
|
717
|
-
|
|
718
|
-
|
|
719
|
-
|
|
720
|
-
|
|
721
|
-
|
|
722
|
-
|
|
723
|
-
|
|
724
|
-
|
|
725
|
-
|
|
726
|
-
html = """
|
|
727
|
-
<table class='leaderboard-html-table'>
|
|
728
|
-
<thead>
|
|
729
|
-
<tr>
|
|
730
|
-
<th>Rank</th>
|
|
731
|
-
<th>Name</th>
|
|
732
|
-
<th>Moral Compass Score</th>
|
|
733
|
-
<th>Type</th>
|
|
734
|
-
</tr>
|
|
735
|
-
</thead>
|
|
257
|
+
if max_entries is not None:
|
|
258
|
+
users = users[:max_entries]
|
|
259
|
+
|
|
260
|
+
rows = []
|
|
261
|
+
for idx, u in enumerate(users, start=1):
|
|
262
|
+
uname = u.get("username") or ""
|
|
263
|
+
score = u.get("moralCompassScore", 0)
|
|
264
|
+
try:
|
|
265
|
+
score_float = float(score or 0.0)
|
|
266
|
+
except Exception:
|
|
267
|
+
score_float = 0.0
|
|
268
|
+
rows.append(f"<tr><td>{idx}</td><td>{uname}</td><td>{score_float:.4f}</td></tr>")
|
|
269
|
+
|
|
270
|
+
html = f"""
|
|
271
|
+
<div class="mc-leaderboard">
|
|
272
|
+
<h3>Moral Compass Leaderboard</h3>
|
|
273
|
+
<table>
|
|
274
|
+
<thead><tr><th>#</th><th>User</th><th>Score</th></tr></thead>
|
|
736
275
|
<tbody>
|
|
737
|
-
|
|
738
|
-
|
|
739
|
-
for rank, user in enumerate(users_sorted, start=1):
|
|
740
|
-
username = user['username']
|
|
741
|
-
score = user['moralCompassScore']
|
|
742
|
-
|
|
743
|
-
is_team = username.startswith('team:')
|
|
744
|
-
display_name = username[5:] if is_team else username # Remove 'team:' prefix
|
|
745
|
-
entry_type = '👥 Team' if is_team else '👤 User'
|
|
746
|
-
|
|
747
|
-
# Highlight current user
|
|
748
|
-
highlight = username == highlight_username
|
|
749
|
-
row_class = "class='user-row-highlight'" if highlight else ""
|
|
750
|
-
|
|
751
|
-
html += f"""
|
|
752
|
-
<tr {row_class}>
|
|
753
|
-
<td>{rank}</td>
|
|
754
|
-
<td>{display_name}</td>
|
|
755
|
-
<td>{score:.4f}</td>
|
|
756
|
-
<td>{entry_type}</td>
|
|
757
|
-
</tr>
|
|
758
|
-
"""
|
|
759
|
-
|
|
760
|
-
html += """
|
|
276
|
+
{''.join(rows) if rows else '<tr><td colspan="3">No users yet</td></tr>'}
|
|
761
277
|
</tbody>
|
|
762
|
-
|
|
278
|
+
</table>
|
|
279
|
+
</div>
|
|
763
280
|
"""
|
|
764
|
-
|
|
765
281
|
return html
|
|
766
282
|
|
|
767
283
|
|
|
768
|
-
|
|
769
|
-
# Convenience Functions
|
|
770
|
-
# ============================================================================
|
|
771
|
-
|
|
772
|
-
def get_moral_compass_widget_html(
|
|
773
|
-
local_points: int,
|
|
774
|
-
server_score: Optional[float] = None,
|
|
775
|
-
is_synced: bool = False
|
|
776
|
-
) -> str:
|
|
284
|
+
def get_moral_compass_widget_html(username: str, table_id: Optional[str] = None) -> str:
|
|
777
285
|
"""
|
|
778
|
-
|
|
779
|
-
|
|
780
|
-
Args:
|
|
781
|
-
local_points: Local moral points accumulated
|
|
782
|
-
server_score: Server moral compass score (if synced)
|
|
783
|
-
is_synced: Whether currently synced
|
|
784
|
-
|
|
785
|
-
Returns:
|
|
786
|
-
HTML string for widget display
|
|
286
|
+
Build a minimal widget HTML showing the user's current moral compass score and rank.
|
|
787
287
|
"""
|
|
788
|
-
|
|
789
|
-
|
|
790
|
-
|
|
288
|
+
table_id = table_id or _derive_table_id()
|
|
289
|
+
ranks = get_user_ranks(username=username, table_id=table_id)
|
|
290
|
+
|
|
291
|
+
rank_text = f"#{ranks['individual_rank']}" if ranks.get("individual_rank") is not None else "N/A"
|
|
292
|
+
score = ranks.get("moral_compass_score")
|
|
293
|
+
score_text = f"{score:.4f}" if isinstance(score, (int, float)) else "N/A"
|
|
294
|
+
|
|
791
295
|
html = f"""
|
|
792
|
-
<div
|
|
793
|
-
|
|
794
|
-
|
|
795
|
-
|
|
796
|
-
<div style='text-align: center; margin: 10px;'>
|
|
797
|
-
<div style='font-size: 0.9rem; color: var(--text-muted);'>Local Points</div>
|
|
798
|
-
<div style='font-size: 2rem; font-weight: bold; color: var(--accent-strong);'>
|
|
799
|
-
{local_points}
|
|
800
|
-
</div>
|
|
801
|
-
</div>
|
|
802
|
-
"""
|
|
803
|
-
|
|
804
|
-
if server_score is not None:
|
|
805
|
-
html += f"""
|
|
806
|
-
<div style='text-align: center; margin: 10px;'>
|
|
807
|
-
<div style='font-size: 0.9rem; color: var(--text-muted);'>Server Score {status_icon}</div>
|
|
808
|
-
<div style='font-size: 2rem; font-weight: bold; color: var(--accent-strong);'>
|
|
809
|
-
{server_score:.4f}
|
|
810
|
-
</div>
|
|
811
|
-
<div style='font-size: 0.8rem; color: var(--text-muted);'>{status_text}</div>
|
|
812
|
-
</div>
|
|
813
|
-
"""
|
|
814
|
-
|
|
815
|
-
html += """
|
|
816
|
-
</div>
|
|
296
|
+
<div class="mc-widget">
|
|
297
|
+
<p><strong>User:</strong> {username}</p>
|
|
298
|
+
<p><strong>Rank:</strong> {rank_text}</p>
|
|
299
|
+
<p><strong>Moral Compass Score:</strong> {score_text}</p>
|
|
817
300
|
</div>
|
|
818
301
|
"""
|
|
819
|
-
|
|
820
302
|
return html
|