aimodelshare 0.3.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- aimodelshare/README.md +26 -0
- aimodelshare/__init__.py +100 -0
- aimodelshare/aimsonnx.py +2381 -0
- aimodelshare/api.py +836 -0
- aimodelshare/auth.py +163 -0
- aimodelshare/aws.py +511 -0
- aimodelshare/aws_client.py +173 -0
- aimodelshare/base_image.py +154 -0
- aimodelshare/bucketpolicy.py +106 -0
- aimodelshare/color_mappings/color_mapping_keras.csv +121 -0
- aimodelshare/color_mappings/color_mapping_pytorch.csv +117 -0
- aimodelshare/containerisation.py +244 -0
- aimodelshare/containerization.py +712 -0
- aimodelshare/containerization_templates/Dockerfile.txt +8 -0
- aimodelshare/containerization_templates/Dockerfile_PySpark.txt +23 -0
- aimodelshare/containerization_templates/buildspec.txt +14 -0
- aimodelshare/containerization_templates/lambda_function.txt +40 -0
- aimodelshare/custom_approach/__init__.py +1 -0
- aimodelshare/custom_approach/lambda_function.py +17 -0
- aimodelshare/custom_eval_metrics.py +103 -0
- aimodelshare/data_sharing/__init__.py +0 -0
- aimodelshare/data_sharing/data_sharing_templates/Dockerfile.txt +3 -0
- aimodelshare/data_sharing/data_sharing_templates/__init__.py +1 -0
- aimodelshare/data_sharing/data_sharing_templates/buildspec.txt +15 -0
- aimodelshare/data_sharing/data_sharing_templates/codebuild_policies.txt +129 -0
- aimodelshare/data_sharing/data_sharing_templates/codebuild_trust_relationship.txt +12 -0
- aimodelshare/data_sharing/download_data.py +620 -0
- aimodelshare/data_sharing/share_data.py +373 -0
- aimodelshare/data_sharing/utils.py +8 -0
- aimodelshare/deploy_custom_lambda.py +246 -0
- aimodelshare/documentation/Makefile +20 -0
- aimodelshare/documentation/karma_sphinx_theme/__init__.py +28 -0
- aimodelshare/documentation/karma_sphinx_theme/_version.py +2 -0
- aimodelshare/documentation/karma_sphinx_theme/breadcrumbs.html +70 -0
- aimodelshare/documentation/karma_sphinx_theme/layout.html +172 -0
- aimodelshare/documentation/karma_sphinx_theme/search.html +50 -0
- aimodelshare/documentation/karma_sphinx_theme/searchbox.html +14 -0
- aimodelshare/documentation/karma_sphinx_theme/static/css/custom.css +2 -0
- aimodelshare/documentation/karma_sphinx_theme/static/css/custom.css.map +1 -0
- aimodelshare/documentation/karma_sphinx_theme/static/css/theme.css +2751 -0
- aimodelshare/documentation/karma_sphinx_theme/static/css/theme.css.map +1 -0
- aimodelshare/documentation/karma_sphinx_theme/static/css/theme.min.css +2 -0
- aimodelshare/documentation/karma_sphinx_theme/static/css/theme.min.css.map +1 -0
- aimodelshare/documentation/karma_sphinx_theme/static/font/fontello.eot +0 -0
- aimodelshare/documentation/karma_sphinx_theme/static/font/fontello.svg +32 -0
- aimodelshare/documentation/karma_sphinx_theme/static/font/fontello.ttf +0 -0
- aimodelshare/documentation/karma_sphinx_theme/static/font/fontello.woff +0 -0
- aimodelshare/documentation/karma_sphinx_theme/static/font/fontello.woff2 +0 -0
- aimodelshare/documentation/karma_sphinx_theme/static/js/theme.js +68 -0
- aimodelshare/documentation/karma_sphinx_theme/theme.conf +9 -0
- aimodelshare/documentation/make.bat +35 -0
- aimodelshare/documentation/requirements.txt +2 -0
- aimodelshare/documentation/source/about.rst +18 -0
- aimodelshare/documentation/source/advanced_features.rst +137 -0
- aimodelshare/documentation/source/competition.rst +218 -0
- aimodelshare/documentation/source/conf.py +58 -0
- aimodelshare/documentation/source/create_credentials.rst +86 -0
- aimodelshare/documentation/source/example_notebooks.rst +132 -0
- aimodelshare/documentation/source/functions.rst +151 -0
- aimodelshare/documentation/source/gettingstarted.rst +390 -0
- aimodelshare/documentation/source/images/creds1.png +0 -0
- aimodelshare/documentation/source/images/creds2.png +0 -0
- aimodelshare/documentation/source/images/creds3.png +0 -0
- aimodelshare/documentation/source/images/creds4.png +0 -0
- aimodelshare/documentation/source/images/creds5.png +0 -0
- aimodelshare/documentation/source/images/creds_file_example.png +0 -0
- aimodelshare/documentation/source/images/predict_tab.png +0 -0
- aimodelshare/documentation/source/index.rst +110 -0
- aimodelshare/documentation/source/modelplayground.rst +132 -0
- aimodelshare/exceptions.py +11 -0
- aimodelshare/generatemodelapi.py +1270 -0
- aimodelshare/iam/codebuild_policy.txt +129 -0
- aimodelshare/iam/codebuild_trust_relationship.txt +12 -0
- aimodelshare/iam/lambda_policy.txt +15 -0
- aimodelshare/iam/lambda_trust_relationship.txt +12 -0
- aimodelshare/json_templates/__init__.py +1 -0
- aimodelshare/json_templates/api_json.txt +155 -0
- aimodelshare/json_templates/auth/policy.txt +1 -0
- aimodelshare/json_templates/auth/role.txt +1 -0
- aimodelshare/json_templates/eval/policy.txt +1 -0
- aimodelshare/json_templates/eval/role.txt +1 -0
- aimodelshare/json_templates/function/policy.txt +1 -0
- aimodelshare/json_templates/function/role.txt +1 -0
- aimodelshare/json_templates/integration_response.txt +5 -0
- aimodelshare/json_templates/lambda_policy_1.txt +15 -0
- aimodelshare/json_templates/lambda_policy_2.txt +8 -0
- aimodelshare/json_templates/lambda_role_1.txt +12 -0
- aimodelshare/json_templates/lambda_role_2.txt +16 -0
- aimodelshare/leaderboard.py +174 -0
- aimodelshare/main/1.txt +132 -0
- aimodelshare/main/1B.txt +112 -0
- aimodelshare/main/2.txt +153 -0
- aimodelshare/main/3.txt +134 -0
- aimodelshare/main/4.txt +128 -0
- aimodelshare/main/5.txt +109 -0
- aimodelshare/main/6.txt +105 -0
- aimodelshare/main/7.txt +144 -0
- aimodelshare/main/8.txt +142 -0
- aimodelshare/main/__init__.py +1 -0
- aimodelshare/main/authorization.txt +275 -0
- aimodelshare/main/eval_classification.txt +79 -0
- aimodelshare/main/eval_lambda.txt +1709 -0
- aimodelshare/main/eval_regression.txt +80 -0
- aimodelshare/main/lambda_function.txt +8 -0
- aimodelshare/main/nst.txt +149 -0
- aimodelshare/model.py +1543 -0
- aimodelshare/modeluser.py +215 -0
- aimodelshare/moral_compass/README.md +408 -0
- aimodelshare/moral_compass/__init__.py +65 -0
- aimodelshare/moral_compass/_version.py +3 -0
- aimodelshare/moral_compass/api_client.py +601 -0
- aimodelshare/moral_compass/apps/__init__.py +69 -0
- aimodelshare/moral_compass/apps/ai_consequences.py +540 -0
- aimodelshare/moral_compass/apps/bias_detective.py +714 -0
- aimodelshare/moral_compass/apps/ethical_revelation.py +898 -0
- aimodelshare/moral_compass/apps/fairness_fixer.py +889 -0
- aimodelshare/moral_compass/apps/judge.py +888 -0
- aimodelshare/moral_compass/apps/justice_equity_upgrade.py +853 -0
- aimodelshare/moral_compass/apps/mc_integration_helpers.py +820 -0
- aimodelshare/moral_compass/apps/model_building_game.py +1104 -0
- aimodelshare/moral_compass/apps/model_building_game_beginner.py +687 -0
- aimodelshare/moral_compass/apps/moral_compass_challenge.py +858 -0
- aimodelshare/moral_compass/apps/session_auth.py +254 -0
- aimodelshare/moral_compass/apps/shared_activity_styles.css +349 -0
- aimodelshare/moral_compass/apps/tutorial.py +481 -0
- aimodelshare/moral_compass/apps/what_is_ai.py +853 -0
- aimodelshare/moral_compass/challenge.py +365 -0
- aimodelshare/moral_compass/config.py +187 -0
- aimodelshare/placeholders/model.onnx +0 -0
- aimodelshare/placeholders/preprocessor.zip +0 -0
- aimodelshare/playground.py +1968 -0
- aimodelshare/postprocessormodules.py +157 -0
- aimodelshare/preprocessormodules.py +373 -0
- aimodelshare/pyspark/1.txt +195 -0
- aimodelshare/pyspark/1B.txt +181 -0
- aimodelshare/pyspark/2.txt +220 -0
- aimodelshare/pyspark/3.txt +204 -0
- aimodelshare/pyspark/4.txt +187 -0
- aimodelshare/pyspark/5.txt +178 -0
- aimodelshare/pyspark/6.txt +174 -0
- aimodelshare/pyspark/7.txt +211 -0
- aimodelshare/pyspark/8.txt +206 -0
- aimodelshare/pyspark/__init__.py +1 -0
- aimodelshare/pyspark/authorization.txt +258 -0
- aimodelshare/pyspark/eval_classification.txt +79 -0
- aimodelshare/pyspark/eval_lambda.txt +1441 -0
- aimodelshare/pyspark/eval_regression.txt +80 -0
- aimodelshare/pyspark/lambda_function.txt +8 -0
- aimodelshare/pyspark/nst.txt +213 -0
- aimodelshare/python/my_preprocessor.py +58 -0
- aimodelshare/readme.md +26 -0
- aimodelshare/reproducibility.py +181 -0
- aimodelshare/sam/Dockerfile.txt +8 -0
- aimodelshare/sam/Dockerfile_PySpark.txt +24 -0
- aimodelshare/sam/__init__.py +1 -0
- aimodelshare/sam/buildspec.txt +11 -0
- aimodelshare/sam/codebuild_policies.txt +129 -0
- aimodelshare/sam/codebuild_trust_relationship.txt +12 -0
- aimodelshare/sam/codepipeline_policies.txt +173 -0
- aimodelshare/sam/codepipeline_trust_relationship.txt +12 -0
- aimodelshare/sam/spark-class.txt +2 -0
- aimodelshare/sam/template.txt +54 -0
- aimodelshare/tools.py +103 -0
- aimodelshare/utils/__init__.py +78 -0
- aimodelshare/utils/optional_deps.py +38 -0
- aimodelshare/utils.py +57 -0
- aimodelshare-0.3.7.dist-info/METADATA +298 -0
- aimodelshare-0.3.7.dist-info/RECORD +171 -0
- aimodelshare-0.3.7.dist-info/WHEEL +5 -0
- aimodelshare-0.3.7.dist-info/licenses/LICENSE +5 -0
- aimodelshare-0.3.7.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,820 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Moral Compass Integration Helpers for Activities 7, 8, and 9.
|
|
3
|
+
|
|
4
|
+
This module provides helper functions for integrating the Moral Compass scoring system
|
|
5
|
+
into Ethics/Game apps, including:
|
|
6
|
+
- ChallengeManager initialization and management
|
|
7
|
+
- Debounced server synchronization
|
|
8
|
+
- Team aggregation logic
|
|
9
|
+
- Leaderboard generation with caching
|
|
10
|
+
|
|
11
|
+
Design Rationale:
|
|
12
|
+
- Client-side only scoring combination logic (server stores single moralCompassScore)
|
|
13
|
+
- Debounce prevents excessive API calls while providing responsive UI
|
|
14
|
+
- Team synthetic users (prefix: team:) enable team leaderboards
|
|
15
|
+
- Local preview fallback ensures graceful degradation when debounced or offline
|
|
16
|
+
|
|
17
|
+
Server Constraints:
|
|
18
|
+
- Only existing API endpoints available (no custom metadata fields)
|
|
19
|
+
- All combination logic handled client-side
|
|
20
|
+
- Primary metric stored as moralCompassScore in server
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
import os
|
|
24
|
+
import time
|
|
25
|
+
import logging
|
|
26
|
+
from typing import Optional, Dict, Any, List, Tuple
|
|
27
|
+
from datetime import datetime, timedelta
|
|
28
|
+
|
|
29
|
+
logger = logging.getLogger("aimodelshare.moral_compass.apps")
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
# ============================================================================
|
|
33
|
+
# Environment Configuration
|
|
34
|
+
# ============================================================================
|
|
35
|
+
|
|
36
|
+
def get_env_config() -> Dict[str, Any]:
|
|
37
|
+
"""
|
|
38
|
+
Get environment configuration for Moral Compass integration.
|
|
39
|
+
|
|
40
|
+
Returns:
|
|
41
|
+
Dictionary with configuration values
|
|
42
|
+
"""
|
|
43
|
+
return {
|
|
44
|
+
# Debounce settings
|
|
45
|
+
'DEBOUNCE_SECONDS': int(os.getenv('MC_DEBOUNCE_SECONDS', '5')),
|
|
46
|
+
|
|
47
|
+
# Scoring mode: 'product' or 'sum'
|
|
48
|
+
'SCORING_MODE': os.getenv('MC_SCORING_MODE', 'product'),
|
|
49
|
+
|
|
50
|
+
# Weights for sum mode
|
|
51
|
+
'WEIGHT_ACCURACY': float(os.getenv('MC_WEIGHT_ACC', '0.6')),
|
|
52
|
+
'WEIGHT_MORAL': float(os.getenv('MC_WEIGHT_MORAL', '0.4')),
|
|
53
|
+
|
|
54
|
+
# Normalization settings
|
|
55
|
+
'ACCURACY_FLOOR': float(os.getenv('MC_ACCURACY_FLOOR', '0.0')),
|
|
56
|
+
'MAX_MORAL_POINTS': int(os.getenv('MAX_MORAL_POINTS', '1000')),
|
|
57
|
+
|
|
58
|
+
# Cache TTL for leaderboard
|
|
59
|
+
'CACHE_TTL_SECONDS': int(os.getenv('MC_CACHE_TTL', '30')),
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
# ============================================================================
|
|
64
|
+
# Debounce State Management
|
|
65
|
+
# ============================================================================
|
|
66
|
+
|
|
67
|
+
# Global state for debounce tracking
|
|
68
|
+
_last_sync_times: Dict[str, float] = {}
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
def should_sync(username: str, override: bool = False) -> bool:
|
|
72
|
+
"""
|
|
73
|
+
Check if sync should proceed based on debounce logic.
|
|
74
|
+
|
|
75
|
+
Args:
|
|
76
|
+
username: The username to check
|
|
77
|
+
override: If True, bypass debounce check (for Force Sync)
|
|
78
|
+
|
|
79
|
+
Returns:
|
|
80
|
+
True if sync should proceed, False if debounced
|
|
81
|
+
"""
|
|
82
|
+
if override:
|
|
83
|
+
return True
|
|
84
|
+
|
|
85
|
+
config = get_env_config()
|
|
86
|
+
debounce_seconds = config['DEBOUNCE_SECONDS']
|
|
87
|
+
|
|
88
|
+
last_sync = _last_sync_times.get(username, 0)
|
|
89
|
+
current_time = time.time()
|
|
90
|
+
|
|
91
|
+
return (current_time - last_sync) >= debounce_seconds
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
def mark_synced(username: str) -> None:
|
|
95
|
+
"""
|
|
96
|
+
Mark a username as having been synced.
|
|
97
|
+
|
|
98
|
+
Args:
|
|
99
|
+
username: The username that was synced
|
|
100
|
+
"""
|
|
101
|
+
_last_sync_times[username] = time.time()
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
# ============================================================================
|
|
105
|
+
# ChallengeManager Initialization
|
|
106
|
+
# ============================================================================
|
|
107
|
+
|
|
108
|
+
def get_challenge_manager(username: str, table_id: Optional[str] = None) -> Optional['ChallengeManager']:
|
|
109
|
+
"""
|
|
110
|
+
Get or create a ChallengeManager for a user.
|
|
111
|
+
|
|
112
|
+
Args:
|
|
113
|
+
username: The username
|
|
114
|
+
table_id: Optional table ID (auto-derived if not provided)
|
|
115
|
+
|
|
116
|
+
Returns:
|
|
117
|
+
ChallengeManager instance, or None if user not signed in
|
|
118
|
+
|
|
119
|
+
Note:
|
|
120
|
+
Requires aimodelshare.moral_compass.challenge.ChallengeManager
|
|
121
|
+
"""
|
|
122
|
+
if not username or username.lower() == 'guest':
|
|
123
|
+
logger.debug("Cannot create ChallengeManager for guest user")
|
|
124
|
+
return None
|
|
125
|
+
|
|
126
|
+
try:
|
|
127
|
+
from aimodelshare.moral_compass.challenge import ChallengeManager
|
|
128
|
+
from aimodelshare.moral_compass.api_client import MoralcompassApiClient
|
|
129
|
+
|
|
130
|
+
# Auto-derive table_id if not provided
|
|
131
|
+
if not table_id:
|
|
132
|
+
table_id = _derive_table_id()
|
|
133
|
+
|
|
134
|
+
# Create API client and ChallengeManager
|
|
135
|
+
api_client = MoralcompassApiClient()
|
|
136
|
+
cm = ChallengeManager(
|
|
137
|
+
table_id=table_id,
|
|
138
|
+
username=username,
|
|
139
|
+
api_client=api_client
|
|
140
|
+
)
|
|
141
|
+
|
|
142
|
+
logger.info(f"Created ChallengeManager for user={username}, table={table_id}")
|
|
143
|
+
return cm
|
|
144
|
+
|
|
145
|
+
except Exception as e:
|
|
146
|
+
logger.error(f"Failed to create ChallengeManager: {e}")
|
|
147
|
+
return None
|
|
148
|
+
|
|
149
|
+
|
|
150
|
+
def _derive_table_id() -> str:
|
|
151
|
+
"""
|
|
152
|
+
Auto-derive table ID from environment or use default.
|
|
153
|
+
|
|
154
|
+
Returns:
|
|
155
|
+
Table ID string
|
|
156
|
+
"""
|
|
157
|
+
# Check for explicit table ID
|
|
158
|
+
table_id = os.getenv('MORAL_COMPASS_TABLE_ID')
|
|
159
|
+
if table_id:
|
|
160
|
+
return table_id
|
|
161
|
+
|
|
162
|
+
# Try to derive from playground URL
|
|
163
|
+
playground_url = os.getenv('PLAYGROUND_URL')
|
|
164
|
+
if playground_url:
|
|
165
|
+
# Extract playground ID and append -mc suffix
|
|
166
|
+
from urllib.parse import urlparse
|
|
167
|
+
parsed = urlparse(playground_url)
|
|
168
|
+
path_parts = [p for p in parsed.path.split('/') if p]
|
|
169
|
+
|
|
170
|
+
for i, part in enumerate(path_parts):
|
|
171
|
+
if part.lower() in ['playground', 'playgrounds']:
|
|
172
|
+
if i + 1 < len(path_parts):
|
|
173
|
+
playground_id = path_parts[i + 1]
|
|
174
|
+
return f"{playground_id}-mc"
|
|
175
|
+
|
|
176
|
+
# Fallback to last path component
|
|
177
|
+
if path_parts:
|
|
178
|
+
return f"{path_parts[-1]}-mc"
|
|
179
|
+
|
|
180
|
+
# Default fallback
|
|
181
|
+
return "justice-equity-challenge-mc"
|
|
182
|
+
|
|
183
|
+
|
|
184
|
+
# ============================================================================
|
|
185
|
+
# Scoring Logic
|
|
186
|
+
# ============================================================================
|
|
187
|
+
|
|
188
|
+
def compute_combined_score(accuracy: float, moral_points: int,
|
|
189
|
+
config: Optional[Dict[str, Any]] = None) -> float:
|
|
190
|
+
"""
|
|
191
|
+
Compute combined ethical + accuracy score.
|
|
192
|
+
|
|
193
|
+
Args:
|
|
194
|
+
accuracy: Accuracy value (0.0 to 1.0)
|
|
195
|
+
moral_points: Raw moral compass points
|
|
196
|
+
config: Optional config dict (uses env defaults if None)
|
|
197
|
+
|
|
198
|
+
Returns:
|
|
199
|
+
Combined score as float
|
|
200
|
+
|
|
201
|
+
Note:
|
|
202
|
+
All combination logic is client-side. Server receives only the
|
|
203
|
+
final combined score as the primary metric (moralCompassScore).
|
|
204
|
+
"""
|
|
205
|
+
if config is None:
|
|
206
|
+
config = get_env_config()
|
|
207
|
+
|
|
208
|
+
# Apply accuracy floor
|
|
209
|
+
accuracy_floor = config['ACCURACY_FLOOR']
|
|
210
|
+
accuracy = max(accuracy, accuracy_floor)
|
|
211
|
+
|
|
212
|
+
# Normalize moral points (0 to 1)
|
|
213
|
+
max_moral = config['MAX_MORAL_POINTS']
|
|
214
|
+
moral_normalized = min(moral_points / max_moral, 1.0) if max_moral > 0 else 0.0
|
|
215
|
+
|
|
216
|
+
# Compute combined score based on mode
|
|
217
|
+
scoring_mode = config['SCORING_MODE']
|
|
218
|
+
|
|
219
|
+
if scoring_mode == 'product':
|
|
220
|
+
# Product mode: accuracy * moral_normalized
|
|
221
|
+
combined = accuracy * moral_normalized
|
|
222
|
+
elif scoring_mode == 'sum':
|
|
223
|
+
# Weighted sum mode
|
|
224
|
+
weight_acc = config['WEIGHT_ACCURACY']
|
|
225
|
+
weight_moral = config['WEIGHT_MORAL']
|
|
226
|
+
combined = (weight_acc * accuracy) + (weight_moral * moral_normalized)
|
|
227
|
+
else:
|
|
228
|
+
logger.warning(f"Unknown scoring mode '{scoring_mode}', defaulting to product")
|
|
229
|
+
combined = accuracy * moral_normalized
|
|
230
|
+
|
|
231
|
+
logger.debug(
|
|
232
|
+
f"Combined score: accuracy={accuracy:.4f}, moral_points={moral_points}, "
|
|
233
|
+
f"moral_norm={moral_normalized:.4f}, mode={scoring_mode}, result={combined:.4f}"
|
|
234
|
+
)
|
|
235
|
+
|
|
236
|
+
return combined
|
|
237
|
+
|
|
238
|
+
|
|
239
|
+
# ============================================================================
|
|
240
|
+
# User Sync
|
|
241
|
+
# ============================================================================
|
|
242
|
+
|
|
243
|
+
def sync_user_moral_state(
|
|
244
|
+
cm: 'ChallengeManager',
|
|
245
|
+
moral_points: int,
|
|
246
|
+
accuracy: Optional[float] = None,
|
|
247
|
+
override: bool = False
|
|
248
|
+
) -> Dict[str, Any]:
|
|
249
|
+
"""
|
|
250
|
+
Sync user's moral state to server with debounce.
|
|
251
|
+
|
|
252
|
+
Args:
|
|
253
|
+
cm: ChallengeManager instance
|
|
254
|
+
moral_points: Current moral compass points for this activity
|
|
255
|
+
accuracy: Optional accuracy value (fetched from playground if None)
|
|
256
|
+
override: If True, bypass debounce (for Force Sync button)
|
|
257
|
+
|
|
258
|
+
Returns:
|
|
259
|
+
Dictionary with sync result:
|
|
260
|
+
- 'synced': bool (True if actually synced, False if debounced)
|
|
261
|
+
- 'status': str ('synced', 'debounced', 'error')
|
|
262
|
+
- 'server_score': float (if synced)
|
|
263
|
+
- 'local_preview': float (always present)
|
|
264
|
+
- 'message': str (user-facing message)
|
|
265
|
+
|
|
266
|
+
Design Note:
|
|
267
|
+
- Seeds ChallengeManager with playground accuracy if not provided
|
|
268
|
+
- Computes combined score (accuracy * moral_normalized) client-side
|
|
269
|
+
- Stores combined score as primary metric on server
|
|
270
|
+
- Respects debounce unless override=True
|
|
271
|
+
"""
|
|
272
|
+
username = cm.username
|
|
273
|
+
|
|
274
|
+
# Check debounce
|
|
275
|
+
if not should_sync(username, override=override):
|
|
276
|
+
local_preview = compute_combined_score(
|
|
277
|
+
accuracy or 0.7, # Default accuracy for preview
|
|
278
|
+
moral_points
|
|
279
|
+
)
|
|
280
|
+
return {
|
|
281
|
+
'synced': False,
|
|
282
|
+
'status': 'debounced',
|
|
283
|
+
'local_preview': local_preview,
|
|
284
|
+
'message': f'Sync pending (debounced). Local preview: {local_preview:.4f}'
|
|
285
|
+
}
|
|
286
|
+
|
|
287
|
+
try:
|
|
288
|
+
# Fetch accuracy from playground if not provided
|
|
289
|
+
if accuracy is None:
|
|
290
|
+
accuracy = _fetch_playground_accuracy(username)
|
|
291
|
+
|
|
292
|
+
# Compute combined score
|
|
293
|
+
combined_score = compute_combined_score(accuracy, moral_points)
|
|
294
|
+
|
|
295
|
+
# Update ChallengeManager metrics
|
|
296
|
+
cm.set_metric('accuracy', accuracy, primary=False)
|
|
297
|
+
cm.set_metric('moral_points', moral_points, primary=False)
|
|
298
|
+
cm.set_metric('combined_score', combined_score, primary=True)
|
|
299
|
+
|
|
300
|
+
# Sync to server
|
|
301
|
+
response = cm.sync()
|
|
302
|
+
|
|
303
|
+
# Mark as synced
|
|
304
|
+
mark_synced(username)
|
|
305
|
+
|
|
306
|
+
server_score = response.get('moralCompassScore', combined_score)
|
|
307
|
+
|
|
308
|
+
logger.info(
|
|
309
|
+
f"User sync successful: username={username}, moral_points={moral_points}, "
|
|
310
|
+
f"accuracy={accuracy:.4f}, combined={combined_score:.4f}, "
|
|
311
|
+
f"server_score={server_score:.4f}"
|
|
312
|
+
)
|
|
313
|
+
|
|
314
|
+
return {
|
|
315
|
+
'synced': True,
|
|
316
|
+
'status': 'synced',
|
|
317
|
+
'server_score': server_score,
|
|
318
|
+
'local_preview': combined_score,
|
|
319
|
+
'message': f'✓ Synced! Server score: {server_score:.4f}'
|
|
320
|
+
}
|
|
321
|
+
|
|
322
|
+
except Exception as e:
|
|
323
|
+
logger.error(f"User sync failed for {username}: {e}")
|
|
324
|
+
local_preview = compute_combined_score(accuracy or 0.7, moral_points)
|
|
325
|
+
return {
|
|
326
|
+
'synced': False,
|
|
327
|
+
'status': 'error',
|
|
328
|
+
'local_preview': local_preview,
|
|
329
|
+
'error': str(e),
|
|
330
|
+
'message': f'⚠️ Sync error. Local preview: {local_preview:.4f}'
|
|
331
|
+
}
|
|
332
|
+
|
|
333
|
+
|
|
334
|
+
def _fetch_playground_accuracy(username: str) -> float:
|
|
335
|
+
"""
|
|
336
|
+
Fetch user's accuracy from playground leaderboard.
|
|
337
|
+
|
|
338
|
+
Args:
|
|
339
|
+
username: The username
|
|
340
|
+
|
|
341
|
+
Returns:
|
|
342
|
+
Accuracy value (0.0 to 1.0), defaults to 0.7 if not found
|
|
343
|
+
|
|
344
|
+
Note:
|
|
345
|
+
Uses playground.get_leaderboard() to fetch accuracy data
|
|
346
|
+
"""
|
|
347
|
+
try:
|
|
348
|
+
from aimodelshare.playground import Competition
|
|
349
|
+
|
|
350
|
+
playground_url = os.getenv('PLAYGROUND_URL',
|
|
351
|
+
'https://cf3wdpkg0d.execute-api.us-east-1.amazonaws.com/prod/m')
|
|
352
|
+
|
|
353
|
+
playground = Competition(playground_url)
|
|
354
|
+
leaderboard = playground.get_leaderboard()
|
|
355
|
+
|
|
356
|
+
# Find user's entry
|
|
357
|
+
for entry in leaderboard:
|
|
358
|
+
if entry.get('username') == username or entry.get('user') == username:
|
|
359
|
+
# Get accuracy (might be stored as 'accuracy', 'score', or 'test_accuracy')
|
|
360
|
+
accuracy = (
|
|
361
|
+
entry.get('accuracy') or
|
|
362
|
+
entry.get('test_accuracy') or
|
|
363
|
+
entry.get('score', 0.7)
|
|
364
|
+
)
|
|
365
|
+
logger.debug(f"Fetched accuracy for {username}: {accuracy}")
|
|
366
|
+
return float(accuracy)
|
|
367
|
+
|
|
368
|
+
logger.warning(f"User {username} not found in leaderboard, using default 0.7")
|
|
369
|
+
return 0.7
|
|
370
|
+
|
|
371
|
+
except Exception as e:
|
|
372
|
+
logger.error(f"Failed to fetch playground accuracy: {e}")
|
|
373
|
+
return 0.7
|
|
374
|
+
|
|
375
|
+
|
|
376
|
+
# ============================================================================
|
|
377
|
+
# Team Sync
|
|
378
|
+
# ============================================================================
|
|
379
|
+
|
|
380
|
+
def sync_team_state(team_name: str, table_id: Optional[str] = None) -> Dict[str, Any]:
|
|
381
|
+
"""
|
|
382
|
+
Sync team aggregated state to server.
|
|
383
|
+
|
|
384
|
+
Args:
|
|
385
|
+
team_name: The team name
|
|
386
|
+
table_id: Optional table ID (auto-derived if not provided)
|
|
387
|
+
|
|
388
|
+
Returns:
|
|
389
|
+
Dictionary with sync result (same structure as sync_user_moral_state)
|
|
390
|
+
|
|
391
|
+
Design Note:
|
|
392
|
+
- Aggregates member accuracy from playground.get_leaderboard()
|
|
393
|
+
- Aggregates member moral scores from moral_compass.list_users()
|
|
394
|
+
- Computes team combined score (avg_accuracy * avg_moral_norm)
|
|
395
|
+
- Persists as synthetic user with username = 'team:<TeamName>'
|
|
396
|
+
"""
|
|
397
|
+
if not team_name:
|
|
398
|
+
return {
|
|
399
|
+
'synced': False,
|
|
400
|
+
'status': 'error',
|
|
401
|
+
'message': 'No team name provided'
|
|
402
|
+
}
|
|
403
|
+
|
|
404
|
+
try:
|
|
405
|
+
# Auto-derive table_id if not provided
|
|
406
|
+
if not table_id:
|
|
407
|
+
table_id = _derive_table_id()
|
|
408
|
+
|
|
409
|
+
# Get team members and their data
|
|
410
|
+
team_data = _aggregate_team_data(team_name, table_id)
|
|
411
|
+
|
|
412
|
+
if not team_data['members']:
|
|
413
|
+
logger.warning(f"No team members found for team '{team_name}'")
|
|
414
|
+
return {
|
|
415
|
+
'synced': False,
|
|
416
|
+
'status': 'error',
|
|
417
|
+
'message': f'No members found for team {team_name}'
|
|
418
|
+
}
|
|
419
|
+
|
|
420
|
+
# Compute team combined score
|
|
421
|
+
avg_accuracy = team_data['avg_accuracy']
|
|
422
|
+
avg_moral_points = team_data['avg_moral_points']
|
|
423
|
+
|
|
424
|
+
combined_score = compute_combined_score(avg_accuracy, int(avg_moral_points))
|
|
425
|
+
|
|
426
|
+
# Create synthetic team user
|
|
427
|
+
from aimodelshare.moral_compass.api_client import MoralcompassApiClient
|
|
428
|
+
|
|
429
|
+
api_client = MoralcompassApiClient()
|
|
430
|
+
team_username = f"team:{team_name}"
|
|
431
|
+
|
|
432
|
+
# Update team entry
|
|
433
|
+
response = api_client.update_moral_compass(
|
|
434
|
+
table_id=table_id,
|
|
435
|
+
username=team_username,
|
|
436
|
+
metrics={
|
|
437
|
+
'accuracy': avg_accuracy,
|
|
438
|
+
'moral_points': avg_moral_points,
|
|
439
|
+
'combined_score': combined_score,
|
|
440
|
+
'member_count': len(team_data['members'])
|
|
441
|
+
},
|
|
442
|
+
tasks_completed=0,
|
|
443
|
+
total_tasks=0,
|
|
444
|
+
questions_correct=0,
|
|
445
|
+
total_questions=0,
|
|
446
|
+
primary_metric='combined_score'
|
|
447
|
+
)
|
|
448
|
+
|
|
449
|
+
server_score = response.get('moralCompassScore', combined_score)
|
|
450
|
+
|
|
451
|
+
logger.info(
|
|
452
|
+
f"Team sync successful: team={team_name}, members={len(team_data['members'])}, "
|
|
453
|
+
f"avg_accuracy={avg_accuracy:.4f}, avg_moral={avg_moral_points:.1f}, "
|
|
454
|
+
f"combined={combined_score:.4f}, server_score={server_score:.4f}"
|
|
455
|
+
)
|
|
456
|
+
|
|
457
|
+
return {
|
|
458
|
+
'synced': True,
|
|
459
|
+
'status': 'synced',
|
|
460
|
+
'server_score': server_score,
|
|
461
|
+
'local_preview': combined_score,
|
|
462
|
+
'message': f'✓ Team synced! Score: {server_score:.4f}'
|
|
463
|
+
}
|
|
464
|
+
|
|
465
|
+
except Exception as e:
|
|
466
|
+
logger.error(f"Team sync failed for '{team_name}': {e}")
|
|
467
|
+
return {
|
|
468
|
+
'synced': False,
|
|
469
|
+
'status': 'error',
|
|
470
|
+
'error': str(e),
|
|
471
|
+
'message': f'⚠️ Team sync error: {str(e)}'
|
|
472
|
+
}
|
|
473
|
+
|
|
474
|
+
|
|
475
|
+
def _aggregate_team_data(team_name: str, table_id: str) -> Dict[str, Any]:
|
|
476
|
+
"""
|
|
477
|
+
Aggregate data for all team members.
|
|
478
|
+
|
|
479
|
+
Args:
|
|
480
|
+
team_name: The team name
|
|
481
|
+
table_id: The table ID
|
|
482
|
+
|
|
483
|
+
Returns:
|
|
484
|
+
Dictionary with:
|
|
485
|
+
- 'members': List of member usernames
|
|
486
|
+
- 'avg_accuracy': Average accuracy across members
|
|
487
|
+
- 'avg_moral_points': Average moral points across members
|
|
488
|
+
"""
|
|
489
|
+
try:
|
|
490
|
+
# Get team members from environment or use heuristic
|
|
491
|
+
team_members = _get_team_members(team_name)
|
|
492
|
+
|
|
493
|
+
if not team_members:
|
|
494
|
+
logger.warning(f"No team members configured for team '{team_name}'")
|
|
495
|
+
return {'members': [], 'avg_accuracy': 0.0, 'avg_moral_points': 0.0}
|
|
496
|
+
|
|
497
|
+
# Fetch accuracy data from playground
|
|
498
|
+
accuracy_data = _fetch_team_accuracy_data(team_members)
|
|
499
|
+
|
|
500
|
+
# Fetch moral compass data
|
|
501
|
+
moral_data = _fetch_team_moral_data(team_members, table_id)
|
|
502
|
+
|
|
503
|
+
# Compute averages
|
|
504
|
+
valid_members = set(accuracy_data.keys()) & set(moral_data.keys())
|
|
505
|
+
|
|
506
|
+
if not valid_members:
|
|
507
|
+
return {'members': [], 'avg_accuracy': 0.0, 'avg_moral_points': 0.0}
|
|
508
|
+
|
|
509
|
+
avg_accuracy = sum(accuracy_data[m] for m in valid_members) / len(valid_members)
|
|
510
|
+
avg_moral = sum(moral_data[m] for m in valid_members) / len(valid_members)
|
|
511
|
+
|
|
512
|
+
return {
|
|
513
|
+
'members': list(valid_members),
|
|
514
|
+
'avg_accuracy': avg_accuracy,
|
|
515
|
+
'avg_moral_points': avg_moral
|
|
516
|
+
}
|
|
517
|
+
|
|
518
|
+
except Exception as e:
|
|
519
|
+
logger.error(f"Failed to aggregate team data: {e}")
|
|
520
|
+
return {'members': [], 'avg_accuracy': 0.0, 'avg_moral_points': 0.0}
|
|
521
|
+
|
|
522
|
+
|
|
523
|
+
def _get_team_members(team_name: str) -> List[str]:
|
|
524
|
+
"""
|
|
525
|
+
Get list of team members.
|
|
526
|
+
|
|
527
|
+
Args:
|
|
528
|
+
team_name: The team name
|
|
529
|
+
|
|
530
|
+
Returns:
|
|
531
|
+
List of member usernames
|
|
532
|
+
|
|
533
|
+
Note:
|
|
534
|
+
Currently reads from TEAM_MEMBERS environment variable (comma-separated).
|
|
535
|
+
Future enhancement: read from team registry or user profiles.
|
|
536
|
+
"""
|
|
537
|
+
# Check environment variable
|
|
538
|
+
members_str = os.getenv('TEAM_MEMBERS', '')
|
|
539
|
+
if members_str:
|
|
540
|
+
return [m.strip() for m in members_str.split(',') if m.strip()]
|
|
541
|
+
|
|
542
|
+
# Fallback: try to infer from current user
|
|
543
|
+
username = os.getenv('username')
|
|
544
|
+
if username:
|
|
545
|
+
return [username]
|
|
546
|
+
|
|
547
|
+
return []
|
|
548
|
+
|
|
549
|
+
|
|
550
|
+
def _fetch_team_accuracy_data(members: List[str]) -> Dict[str, float]:
|
|
551
|
+
"""
|
|
552
|
+
Fetch accuracy data for team members from playground.
|
|
553
|
+
|
|
554
|
+
Args:
|
|
555
|
+
members: List of member usernames
|
|
556
|
+
|
|
557
|
+
Returns:
|
|
558
|
+
Dictionary mapping username -> accuracy
|
|
559
|
+
"""
|
|
560
|
+
try:
|
|
561
|
+
from aimodelshare.playground import Competition
|
|
562
|
+
|
|
563
|
+
playground_url = os.getenv('PLAYGROUND_URL',
|
|
564
|
+
'https://cf3wdpkg0d.execute-api.us-east-1.amazonaws.com/prod/m')
|
|
565
|
+
|
|
566
|
+
playground = Competition(playground_url)
|
|
567
|
+
leaderboard = playground.get_leaderboard()
|
|
568
|
+
|
|
569
|
+
accuracy_data = {}
|
|
570
|
+
for entry in leaderboard:
|
|
571
|
+
username = entry.get('username') or entry.get('user')
|
|
572
|
+
if username in members:
|
|
573
|
+
accuracy = (
|
|
574
|
+
entry.get('accuracy') or
|
|
575
|
+
entry.get('test_accuracy') or
|
|
576
|
+
entry.get('score', 0.7)
|
|
577
|
+
)
|
|
578
|
+
accuracy_data[username] = float(accuracy)
|
|
579
|
+
|
|
580
|
+
return accuracy_data
|
|
581
|
+
|
|
582
|
+
except Exception as e:
|
|
583
|
+
logger.error(f"Failed to fetch team accuracy data: {e}")
|
|
584
|
+
return {}
|
|
585
|
+
|
|
586
|
+
|
|
587
|
+
def _fetch_team_moral_data(members: List[str], table_id: str) -> Dict[str, float]:
|
|
588
|
+
"""
|
|
589
|
+
Fetch moral compass data for team members.
|
|
590
|
+
|
|
591
|
+
Args:
|
|
592
|
+
members: List of member usernames
|
|
593
|
+
table_id: The table ID
|
|
594
|
+
|
|
595
|
+
Returns:
|
|
596
|
+
Dictionary mapping username -> moral_points
|
|
597
|
+
"""
|
|
598
|
+
try:
|
|
599
|
+
from aimodelshare.moral_compass.api_client import MoralcompassApiClient
|
|
600
|
+
|
|
601
|
+
api_client = MoralcompassApiClient()
|
|
602
|
+
|
|
603
|
+
moral_data = {}
|
|
604
|
+
for username in members:
|
|
605
|
+
try:
|
|
606
|
+
user_stats = api_client.get_user(table_id, username)
|
|
607
|
+
# Extract moral points from moralCompassScore (reverse normalization estimate)
|
|
608
|
+
# This is an approximation; ideally we'd store raw points separately
|
|
609
|
+
moral_score = user_stats.total_count if hasattr(user_stats, 'total_count') else 0
|
|
610
|
+
moral_data[username] = float(moral_score)
|
|
611
|
+
except Exception as e:
|
|
612
|
+
logger.debug(f"Could not fetch moral data for {username}: {e}")
|
|
613
|
+
continue
|
|
614
|
+
|
|
615
|
+
return moral_data
|
|
616
|
+
|
|
617
|
+
except Exception as e:
|
|
618
|
+
logger.error(f"Failed to fetch team moral data: {e}")
|
|
619
|
+
return {}
|
|
620
|
+
|
|
621
|
+
|
|
622
|
+
# ============================================================================
|
|
623
|
+
# Leaderboard Cache & Generation
|
|
624
|
+
# ============================================================================
|
|
625
|
+
|
|
626
|
+
# Global cache for leaderboard data
|
|
627
|
+
_leaderboard_cache: Dict[str, Tuple[float, List[Dict[str, Any]]]] = {}
|
|
628
|
+
|
|
629
|
+
|
|
630
|
+
def fetch_cached_users(table_id: Optional[str] = None, ttl: int = 30) -> List[Dict[str, Any]]:
|
|
631
|
+
"""
|
|
632
|
+
Fetch users from moral compass table with caching.
|
|
633
|
+
|
|
634
|
+
Args:
|
|
635
|
+
table_id: Optional table ID (auto-derived if not provided)
|
|
636
|
+
ttl: Cache TTL in seconds (default: 30)
|
|
637
|
+
|
|
638
|
+
Returns:
|
|
639
|
+
List of user dictionaries with fields:
|
|
640
|
+
- 'username': str
|
|
641
|
+
- 'moralCompassScore': float
|
|
642
|
+
- 'submissionCount': int (if available)
|
|
643
|
+
- 'totalCount': int (if available)
|
|
644
|
+
"""
|
|
645
|
+
if not table_id:
|
|
646
|
+
table_id = _derive_table_id()
|
|
647
|
+
|
|
648
|
+
# Check cache
|
|
649
|
+
cache_key = table_id
|
|
650
|
+
if cache_key in _leaderboard_cache:
|
|
651
|
+
cache_time, cached_data = _leaderboard_cache[cache_key]
|
|
652
|
+
if (time.time() - cache_time) < ttl:
|
|
653
|
+
logger.debug(f"Using cached leaderboard for table {table_id}")
|
|
654
|
+
return cached_data
|
|
655
|
+
|
|
656
|
+
# Fetch from API
|
|
657
|
+
try:
|
|
658
|
+
from aimodelshare.moral_compass.api_client import MoralcompassApiClient
|
|
659
|
+
|
|
660
|
+
api_client = MoralcompassApiClient()
|
|
661
|
+
users = list(api_client.iter_users(table_id))
|
|
662
|
+
|
|
663
|
+
# Convert to dict format
|
|
664
|
+
user_list = []
|
|
665
|
+
for user in users:
|
|
666
|
+
user_list.append({
|
|
667
|
+
'username': user.username,
|
|
668
|
+
'moralCompassScore': user.total_count, # Assuming total_count stores combined score
|
|
669
|
+
'submissionCount': user.submission_count,
|
|
670
|
+
'totalCount': user.total_count
|
|
671
|
+
})
|
|
672
|
+
|
|
673
|
+
# Update cache
|
|
674
|
+
_leaderboard_cache[cache_key] = (time.time(), user_list)
|
|
675
|
+
|
|
676
|
+
logger.info(f"Fetched {len(user_list)} users for table {table_id}")
|
|
677
|
+
return user_list
|
|
678
|
+
|
|
679
|
+
except Exception as e:
|
|
680
|
+
logger.error(f"Failed to fetch users for leaderboard: {e}")
|
|
681
|
+
return []
|
|
682
|
+
|
|
683
|
+
|
|
684
|
+
def build_moral_leaderboard_html(
|
|
685
|
+
highlight_username: Optional[str] = None,
|
|
686
|
+
include_teams: bool = True,
|
|
687
|
+
table_id: Optional[str] = None,
|
|
688
|
+
max_entries: int = 20
|
|
689
|
+
) -> str:
|
|
690
|
+
"""
|
|
691
|
+
Build HTML for moral compass leaderboard.
|
|
692
|
+
|
|
693
|
+
Args:
|
|
694
|
+
highlight_username: Username to highlight (current user)
|
|
695
|
+
include_teams: If True, include team entries
|
|
696
|
+
table_id: Optional table ID (auto-derived if not provided)
|
|
697
|
+
max_entries: Maximum number of entries to display
|
|
698
|
+
|
|
699
|
+
Returns:
|
|
700
|
+
HTML string with leaderboard table
|
|
701
|
+
|
|
702
|
+
Note:
|
|
703
|
+
Uses same styling classes as model_building_game:
|
|
704
|
+
- leaderboard-html-table
|
|
705
|
+
- user-row-highlight
|
|
706
|
+
"""
|
|
707
|
+
users = fetch_cached_users(table_id)
|
|
708
|
+
|
|
709
|
+
if not users:
|
|
710
|
+
return """
|
|
711
|
+
<div style='text-align: center; padding: 40px; color: var(--text-muted);'>
|
|
712
|
+
<p>No leaderboard data available yet.</p>
|
|
713
|
+
<p>Complete activities and sync to appear on the leaderboard!</p>
|
|
714
|
+
</div>
|
|
715
|
+
"""
|
|
716
|
+
|
|
717
|
+
# Filter teams if needed
|
|
718
|
+
if not include_teams:
|
|
719
|
+
users = [u for u in users if not u['username'].startswith('team:')]
|
|
720
|
+
|
|
721
|
+
# Sort by moralCompassScore descending
|
|
722
|
+
users_sorted = sorted(users, key=lambda u: u['moralCompassScore'], reverse=True)
|
|
723
|
+
users_sorted = users_sorted[:max_entries]
|
|
724
|
+
|
|
725
|
+
# Build HTML
|
|
726
|
+
html = """
|
|
727
|
+
<table class='leaderboard-html-table'>
|
|
728
|
+
<thead>
|
|
729
|
+
<tr>
|
|
730
|
+
<th>Rank</th>
|
|
731
|
+
<th>Name</th>
|
|
732
|
+
<th>Moral Compass Score</th>
|
|
733
|
+
<th>Type</th>
|
|
734
|
+
</tr>
|
|
735
|
+
</thead>
|
|
736
|
+
<tbody>
|
|
737
|
+
"""
|
|
738
|
+
|
|
739
|
+
for rank, user in enumerate(users_sorted, start=1):
|
|
740
|
+
username = user['username']
|
|
741
|
+
score = user['moralCompassScore']
|
|
742
|
+
|
|
743
|
+
is_team = username.startswith('team:')
|
|
744
|
+
display_name = username[5:] if is_team else username # Remove 'team:' prefix
|
|
745
|
+
entry_type = '👥 Team' if is_team else '👤 User'
|
|
746
|
+
|
|
747
|
+
# Highlight current user
|
|
748
|
+
highlight = username == highlight_username
|
|
749
|
+
row_class = "class='user-row-highlight'" if highlight else ""
|
|
750
|
+
|
|
751
|
+
html += f"""
|
|
752
|
+
<tr {row_class}>
|
|
753
|
+
<td>{rank}</td>
|
|
754
|
+
<td>{display_name}</td>
|
|
755
|
+
<td>{score:.4f}</td>
|
|
756
|
+
<td>{entry_type}</td>
|
|
757
|
+
</tr>
|
|
758
|
+
"""
|
|
759
|
+
|
|
760
|
+
html += """
|
|
761
|
+
</tbody>
|
|
762
|
+
</table>
|
|
763
|
+
"""
|
|
764
|
+
|
|
765
|
+
return html
|
|
766
|
+
|
|
767
|
+
|
|
768
|
+
# ============================================================================
|
|
769
|
+
# Convenience Functions
|
|
770
|
+
# ============================================================================
|
|
771
|
+
|
|
772
|
+
def get_moral_compass_widget_html(
|
|
773
|
+
local_points: int,
|
|
774
|
+
server_score: Optional[float] = None,
|
|
775
|
+
is_synced: bool = False
|
|
776
|
+
) -> str:
|
|
777
|
+
"""
|
|
778
|
+
Generate HTML for Moral Compass widget.
|
|
779
|
+
|
|
780
|
+
Args:
|
|
781
|
+
local_points: Local moral points accumulated
|
|
782
|
+
server_score: Server moral compass score (if synced)
|
|
783
|
+
is_synced: Whether currently synced
|
|
784
|
+
|
|
785
|
+
Returns:
|
|
786
|
+
HTML string for widget display
|
|
787
|
+
"""
|
|
788
|
+
status_icon = "✓" if is_synced else "⏳"
|
|
789
|
+
status_text = "(synced)" if is_synced else "(pending)"
|
|
790
|
+
|
|
791
|
+
html = f"""
|
|
792
|
+
<div style='background: var(--block-background-fill); padding: 16px; border-radius: 8px;
|
|
793
|
+
border: 2px solid var(--accent-strong); margin: 16px 0;'>
|
|
794
|
+
<h3 style='margin-top: 0;'>🧭 Moral Compass Score</h3>
|
|
795
|
+
<div style='display: flex; justify-content: space-around; flex-wrap: wrap;'>
|
|
796
|
+
<div style='text-align: center; margin: 10px;'>
|
|
797
|
+
<div style='font-size: 0.9rem; color: var(--text-muted);'>Local Points</div>
|
|
798
|
+
<div style='font-size: 2rem; font-weight: bold; color: var(--accent-strong);'>
|
|
799
|
+
{local_points}
|
|
800
|
+
</div>
|
|
801
|
+
</div>
|
|
802
|
+
"""
|
|
803
|
+
|
|
804
|
+
if server_score is not None:
|
|
805
|
+
html += f"""
|
|
806
|
+
<div style='text-align: center; margin: 10px;'>
|
|
807
|
+
<div style='font-size: 0.9rem; color: var(--text-muted);'>Server Score {status_icon}</div>
|
|
808
|
+
<div style='font-size: 2rem; font-weight: bold; color: var(--accent-strong);'>
|
|
809
|
+
{server_score:.4f}
|
|
810
|
+
</div>
|
|
811
|
+
<div style='font-size: 0.8rem; color: var(--text-muted);'>{status_text}</div>
|
|
812
|
+
</div>
|
|
813
|
+
"""
|
|
814
|
+
|
|
815
|
+
html += """
|
|
816
|
+
</div>
|
|
817
|
+
</div>
|
|
818
|
+
"""
|
|
819
|
+
|
|
820
|
+
return html
|