aimodelshare 0.3.7__py3-none-any.whl → 0.4.71__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- aimodelshare/moral_compass/__init__.py +51 -2
- aimodelshare/moral_compass/api_client.py +92 -4
- aimodelshare/moral_compass/apps/__init__.py +36 -16
- aimodelshare/moral_compass/apps/ai_consequences.py +98 -88
- aimodelshare/moral_compass/apps/bias_detective_ca.py +2722 -0
- aimodelshare/moral_compass/apps/bias_detective_en.py +2722 -0
- aimodelshare/moral_compass/apps/bias_detective_part1.py +2722 -0
- aimodelshare/moral_compass/apps/bias_detective_part2.py +2465 -0
- aimodelshare/moral_compass/apps/bias_detective_part_es.py +2722 -0
- aimodelshare/moral_compass/apps/ethical_revelation.py +237 -147
- aimodelshare/moral_compass/apps/fairness_fixer.py +1839 -859
- aimodelshare/moral_compass/apps/fairness_fixer_ca.py +1869 -0
- aimodelshare/moral_compass/apps/fairness_fixer_en.py +1869 -0
- aimodelshare/moral_compass/apps/fairness_fixer_es.py +1869 -0
- aimodelshare/moral_compass/apps/judge.py +130 -143
- aimodelshare/moral_compass/apps/justice_equity_upgrade.py +793 -831
- aimodelshare/moral_compass/apps/justice_equity_upgrade_ca.py +815 -0
- aimodelshare/moral_compass/apps/justice_equity_upgrade_en.py +815 -0
- aimodelshare/moral_compass/apps/justice_equity_upgrade_es.py +815 -0
- aimodelshare/moral_compass/apps/mc_integration_helpers.py +227 -745
- aimodelshare/moral_compass/apps/model_building_app_ca.py +4544 -0
- aimodelshare/moral_compass/apps/model_building_app_ca_final.py +3899 -0
- aimodelshare/moral_compass/apps/model_building_app_en.py +4290 -0
- aimodelshare/moral_compass/apps/model_building_app_en_final.py +3869 -0
- aimodelshare/moral_compass/apps/model_building_app_es.py +4362 -0
- aimodelshare/moral_compass/apps/model_building_app_es_final.py +3899 -0
- aimodelshare/moral_compass/apps/model_building_game.py +4211 -935
- aimodelshare/moral_compass/apps/moral_compass_challenge.py +195 -95
- aimodelshare/moral_compass/apps/what_is_ai.py +126 -117
- aimodelshare/moral_compass/challenge.py +98 -17
- {aimodelshare-0.3.7.dist-info → aimodelshare-0.4.71.dist-info}/METADATA +1 -1
- {aimodelshare-0.3.7.dist-info → aimodelshare-0.4.71.dist-info}/RECORD +35 -19
- aimodelshare/moral_compass/apps/bias_detective.py +0 -714
- {aimodelshare-0.3.7.dist-info → aimodelshare-0.4.71.dist-info}/WHEEL +0 -0
- {aimodelshare-0.3.7.dist-info → aimodelshare-0.4.71.dist-info}/licenses/LICENSE +0 -0
- {aimodelshare-0.3.7.dist-info → aimodelshare-0.4.71.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,4544 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Model Building Game - Gradio application for the Justice & Equity Challenge.
|
|
3
|
+
|
|
4
|
+
Session-based authentication with leaderboard caching and progressive rank unlocking.
|
|
5
|
+
|
|
6
|
+
Concurrency Notes:
|
|
7
|
+
- This app is designed to run in a multi-threaded environment (Cloud Run).
|
|
8
|
+
- Per-user state is stored in gr.State objects, NOT in os.environ.
|
|
9
|
+
- Caches are protected by locks to ensure thread safety.
|
|
10
|
+
- Linear algebra libraries are constrained to single-threaded mode to prevent
|
|
11
|
+
CPU oversubscription in containerized deployments.
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
import os
|
|
15
|
+
|
|
16
|
+
# -------------------------------------------------------------------------
|
|
17
|
+
# Thread Limit Configuration (MUST be set before importing numpy/sklearn)
|
|
18
|
+
# Prevents CPU oversubscription in containerized environments like Cloud Run.
|
|
19
|
+
# -------------------------------------------------------------------------
|
|
20
|
+
os.environ.setdefault("OMP_NUM_THREADS", "1")
|
|
21
|
+
os.environ.setdefault("OPENBLAS_NUM_THREADS", "1")
|
|
22
|
+
os.environ.setdefault("MKL_NUM_THREADS", "1")
|
|
23
|
+
os.environ.setdefault("NUMEXPR_NUM_THREADS", "1")
|
|
24
|
+
|
|
25
|
+
import time
|
|
26
|
+
import random
|
|
27
|
+
import requests
|
|
28
|
+
import contextlib
|
|
29
|
+
from io import StringIO
|
|
30
|
+
import threading
|
|
31
|
+
import functools
|
|
32
|
+
from pathlib import Path
|
|
33
|
+
from datetime import datetime, timedelta
|
|
34
|
+
from typing import Optional, Dict, Any, Tuple, Callable, TypeVar
|
|
35
|
+
|
|
36
|
+
import numpy as np
|
|
37
|
+
import pandas as pd
|
|
38
|
+
import gradio as gr
|
|
39
|
+
|
|
40
|
+
# --- Scikit-learn Imports ---
|
|
41
|
+
from sklearn.model_selection import train_test_split
|
|
42
|
+
from sklearn.preprocessing import StandardScaler
|
|
43
|
+
from sklearn.impute import SimpleImputer
|
|
44
|
+
from sklearn.compose import ColumnTransformer
|
|
45
|
+
from sklearn.pipeline import Pipeline
|
|
46
|
+
from sklearn.preprocessing import OneHotEncoder
|
|
47
|
+
from sklearn.linear_model import LogisticRegression
|
|
48
|
+
from sklearn.tree import DecisionTreeClassifier
|
|
49
|
+
from sklearn.ensemble import RandomForestClassifier
|
|
50
|
+
from sklearn.neighbors import KNeighborsClassifier
|
|
51
|
+
|
|
52
|
+
# --- AI Model Share Imports ---
|
|
53
|
+
try:
|
|
54
|
+
from aimodelshare.playground import Competition
|
|
55
|
+
except ImportError:
|
|
56
|
+
raise ImportError(
|
|
57
|
+
"The 'aimodelshare' library is required. Install with: pip install aimodelshare"
|
|
58
|
+
)
|
|
59
|
+
|
|
60
|
+
# -------------------------------------------------------------------------
|
|
61
|
+
# Configuration & Caching Infrastructure
|
|
62
|
+
# -------------------------------------------------------------------------
|
|
63
|
+
|
|
64
|
+
# -------------------------------------------------------------------------
|
|
65
|
+
# CACHE CONFIGURATION (Optimized: Thread-Safe SQLite)
|
|
66
|
+
# -------------------------------------------------------------------------
|
|
67
|
+
import sqlite3
|
|
68
|
+
|
|
69
|
+
CACHE_DB_FILE = "prediction_cache.sqlite"
|
|
70
|
+
|
|
71
|
+
def get_cached_prediction(key):
|
|
72
|
+
"""
|
|
73
|
+
Lightning-fast lookup from SQLite database.
|
|
74
|
+
THREAD-SAFE FIX: Opens a new connection for every lookup.
|
|
75
|
+
"""
|
|
76
|
+
# 1. Check if DB exists
|
|
77
|
+
if not os.path.exists(CACHE_DB_FILE):
|
|
78
|
+
return None
|
|
79
|
+
|
|
80
|
+
try:
|
|
81
|
+
# Use a context manager ('with') to ensure the connection
|
|
82
|
+
# is ALWAYS closed, releasing file locks immediately.
|
|
83
|
+
# timeout=10 ensures we don't wait forever if the file is busy.
|
|
84
|
+
with sqlite3.connect(CACHE_DB_FILE, timeout=10.0) as conn:
|
|
85
|
+
cursor = conn.cursor()
|
|
86
|
+
cursor.execute("SELECT value FROM cache WHERE key=?", (key,))
|
|
87
|
+
result = cursor.fetchone()
|
|
88
|
+
|
|
89
|
+
if result:
|
|
90
|
+
return result[0]
|
|
91
|
+
else:
|
|
92
|
+
return None
|
|
93
|
+
|
|
94
|
+
except sqlite3.OperationalError as e:
|
|
95
|
+
# Handle locking errors gracefully
|
|
96
|
+
print(f"⚠️ CACHE LOCK ERROR: {e}. Falling back to training.", flush=True)
|
|
97
|
+
return None
|
|
98
|
+
|
|
99
|
+
except Exception as e:
|
|
100
|
+
print(f"⚠️ DB READ ERROR: {e}", flush=True)
|
|
101
|
+
return None
|
|
102
|
+
|
|
103
|
+
print("✅ App configured for Thread-Safe SQLite Cache.")
|
|
104
|
+
|
|
105
|
+
LEADERBOARD_CACHE_SECONDS = int(os.environ.get("LEADERBOARD_CACHE_SECONDS", "45"))
|
|
106
|
+
MAX_LEADERBOARD_ENTRIES = os.environ.get("MAX_LEADERBOARD_ENTRIES")
|
|
107
|
+
MAX_LEADERBOARD_ENTRIES = int(MAX_LEADERBOARD_ENTRIES) if MAX_LEADERBOARD_ENTRIES else None
|
|
108
|
+
DEBUG_LOG = os.environ.get("DEBUG_LOG", "false").lower() == "true"
|
|
109
|
+
|
|
110
|
+
# In-memory caches (per container instance)
|
|
111
|
+
# Each cache has its own lock for thread safety under concurrent requests
|
|
112
|
+
_cache_lock = threading.Lock() # Protects _leaderboard_cache
|
|
113
|
+
_user_stats_lock = threading.Lock() # Protects _user_stats_cache
|
|
114
|
+
_auth_lock = threading.Lock() # Protects get_aws_token() credential injection
|
|
115
|
+
|
|
116
|
+
# Auth-aware leaderboard cache: separate entries for authenticated vs anonymous
|
|
117
|
+
# Structure: {"anon": {"data": df, "timestamp": float}, "auth": {"data": df, "timestamp": float}}
|
|
118
|
+
_leaderboard_cache: Dict[str, Dict[str, Any]] = {
|
|
119
|
+
"anon": {"data": None, "timestamp": 0.0},
|
|
120
|
+
"auth": {"data": None, "timestamp": 0.0},
|
|
121
|
+
}
|
|
122
|
+
_user_stats_cache: Dict[str, Dict[str, Any]] = {}
|
|
123
|
+
USER_STATS_TTL = LEADERBOARD_CACHE_SECONDS
|
|
124
|
+
|
|
125
|
+
# -------------------------------------------------------------------------
|
|
126
|
+
# Retry Helper for External API Calls
|
|
127
|
+
# -------------------------------------------------------------------------
|
|
128
|
+
|
|
129
|
+
T = TypeVar("T")
|
|
130
|
+
|
|
131
|
+
def _retry_with_backoff(
|
|
132
|
+
func: Callable[[], T],
|
|
133
|
+
max_attempts: int = 3,
|
|
134
|
+
base_delay: float = 0.5,
|
|
135
|
+
description: str = "operation"
|
|
136
|
+
) -> T:
|
|
137
|
+
"""
|
|
138
|
+
Execute a function with exponential backoff retry on failure.
|
|
139
|
+
|
|
140
|
+
Concurrency Note: This helper provides resilience against transient
|
|
141
|
+
network failures when calling external APIs (Competition.get_leaderboard,
|
|
142
|
+
playground.submit_model). Essential for Cloud Run deployments where
|
|
143
|
+
network calls may occasionally fail under load.
|
|
144
|
+
|
|
145
|
+
Args:
|
|
146
|
+
func: Callable to execute (should take no arguments)
|
|
147
|
+
max_attempts: Maximum number of attempts (default: 3)
|
|
148
|
+
base_delay: Initial delay in seconds, doubled each retry (default: 0.5)
|
|
149
|
+
description: Human-readable description for logging
|
|
150
|
+
|
|
151
|
+
Returns:
|
|
152
|
+
Result from successful function call
|
|
153
|
+
|
|
154
|
+
Raises:
|
|
155
|
+
Last exception if all attempts fail
|
|
156
|
+
"""
|
|
157
|
+
last_exception: Optional[Exception] = None
|
|
158
|
+
delay = base_delay
|
|
159
|
+
|
|
160
|
+
for attempt in range(1, max_attempts + 1):
|
|
161
|
+
try:
|
|
162
|
+
return func()
|
|
163
|
+
except Exception as e:
|
|
164
|
+
last_exception = e
|
|
165
|
+
if attempt < max_attempts:
|
|
166
|
+
_log(f"{description} attempt {attempt} failed: {e}. Retrying in {delay}s...")
|
|
167
|
+
time.sleep(delay)
|
|
168
|
+
delay *= 2 # Exponential backoff
|
|
169
|
+
else:
|
|
170
|
+
_log(f"{description} failed after {max_attempts} attempts: {e}")
|
|
171
|
+
|
|
172
|
+
# Loop always runs at least once (max_attempts >= 1), so last_exception is set
|
|
173
|
+
raise last_exception # type: ignore[misc]
|
|
174
|
+
|
|
175
|
+
def _log(msg: str):
|
|
176
|
+
"""Log message if DEBUG_LOG is enabled."""
|
|
177
|
+
if DEBUG_LOG:
|
|
178
|
+
print(f"[ModelBuildingGame] {msg}")
|
|
179
|
+
|
|
180
|
+
def _normalize_team_name(name: str) -> str:
|
|
181
|
+
"""Normalize team name for consistent comparison and storage."""
|
|
182
|
+
if not name:
|
|
183
|
+
return ""
|
|
184
|
+
return " ".join(str(name).strip().split())
|
|
185
|
+
|
|
186
|
+
def _get_leaderboard_with_optional_token(playground_instance: Optional["Competition"], token: Optional[str] = None) -> Optional[pd.DataFrame]:
|
|
187
|
+
"""
|
|
188
|
+
Fetch fresh leaderboard with optional token authentication and retry logic.
|
|
189
|
+
|
|
190
|
+
This is a helper function that centralizes the pattern of fetching
|
|
191
|
+
a fresh (non-cached) leaderboard with optional token authentication.
|
|
192
|
+
Use this for user-facing flows that require fresh, full data.
|
|
193
|
+
|
|
194
|
+
Concurrency Note: Uses _retry_with_backoff for resilience against
|
|
195
|
+
transient network failures.
|
|
196
|
+
|
|
197
|
+
Args:
|
|
198
|
+
playground_instance: The Competition playground instance (or None)
|
|
199
|
+
token: Optional authentication token for the fetch
|
|
200
|
+
|
|
201
|
+
Returns:
|
|
202
|
+
DataFrame with leaderboard data, or None if fetch fails or playground is None
|
|
203
|
+
"""
|
|
204
|
+
if playground_instance is None:
|
|
205
|
+
return None
|
|
206
|
+
|
|
207
|
+
def _fetch():
|
|
208
|
+
if token:
|
|
209
|
+
return playground_instance.get_leaderboard(token=token)
|
|
210
|
+
return playground_instance.get_leaderboard()
|
|
211
|
+
|
|
212
|
+
try:
|
|
213
|
+
return _retry_with_backoff(_fetch, description="leaderboard fetch")
|
|
214
|
+
except Exception as e:
|
|
215
|
+
_log(f"Leaderboard fetch failed after retries: {e}")
|
|
216
|
+
return None
|
|
217
|
+
|
|
218
|
+
def _fetch_leaderboard(token: Optional[str]) -> Optional[pd.DataFrame]:
|
|
219
|
+
"""
|
|
220
|
+
Fetch leaderboard with auth-aware caching (TTL: LEADERBOARD_CACHE_SECONDS).
|
|
221
|
+
|
|
222
|
+
Concurrency Note: Cache is keyed by auth scope ("anon" vs "auth") to prevent
|
|
223
|
+
cross-user data leakage. Authenticated users share a single "auth" cache entry
|
|
224
|
+
to avoid unbounded cache growth. Protected by _cache_lock.
|
|
225
|
+
"""
|
|
226
|
+
# Determine cache key based on authentication status
|
|
227
|
+
cache_key = "auth" if token else "anon"
|
|
228
|
+
now = time.time()
|
|
229
|
+
|
|
230
|
+
with _cache_lock:
|
|
231
|
+
cache_entry = _leaderboard_cache[cache_key]
|
|
232
|
+
if (
|
|
233
|
+
cache_entry["data"] is not None
|
|
234
|
+
and now - cache_entry["timestamp"] < LEADERBOARD_CACHE_SECONDS
|
|
235
|
+
):
|
|
236
|
+
_log(f"Leaderboard cache hit ({cache_key})")
|
|
237
|
+
return cache_entry["data"]
|
|
238
|
+
|
|
239
|
+
_log(f"Fetching fresh leaderboard ({cache_key})...")
|
|
240
|
+
df = None
|
|
241
|
+
try:
|
|
242
|
+
playground_id = "https://cf3wdpkg0d.execute-api.us-east-1.amazonaws.com/prod/m"
|
|
243
|
+
playground_instance = Competition(playground_id)
|
|
244
|
+
|
|
245
|
+
def _fetch():
|
|
246
|
+
return playground_instance.get_leaderboard(token=token) if token else playground_instance.get_leaderboard()
|
|
247
|
+
|
|
248
|
+
df = _retry_with_backoff(_fetch, description="leaderboard fetch")
|
|
249
|
+
if df is not None and not df.empty and MAX_LEADERBOARD_ENTRIES:
|
|
250
|
+
df = df.head(MAX_LEADERBOARD_ENTRIES)
|
|
251
|
+
_log(f"Leaderboard fetched ({cache_key}): {len(df) if df is not None else 0} entries")
|
|
252
|
+
except Exception as e:
|
|
253
|
+
_log(f"Leaderboard fetch failed ({cache_key}): {e}")
|
|
254
|
+
df = None
|
|
255
|
+
|
|
256
|
+
with _cache_lock:
|
|
257
|
+
_leaderboard_cache[cache_key]["data"] = df
|
|
258
|
+
_leaderboard_cache[cache_key]["timestamp"] = time.time()
|
|
259
|
+
return df
|
|
260
|
+
|
|
261
|
+
def _get_or_assign_team(username: str, leaderboard_df: Optional[pd.DataFrame]) -> Tuple[str, bool]:
|
|
262
|
+
"""Get existing team from leaderboard or assign random team."""
|
|
263
|
+
# TEAM_NAMES is defined in configuration section below
|
|
264
|
+
try:
|
|
265
|
+
if leaderboard_df is not None and not leaderboard_df.empty and "Team" in leaderboard_df.columns:
|
|
266
|
+
user_submissions = leaderboard_df[leaderboard_df["username"] == username]
|
|
267
|
+
if not user_submissions.empty:
|
|
268
|
+
if "timestamp" in user_submissions.columns:
|
|
269
|
+
try:
|
|
270
|
+
user_submissions = user_submissions.copy()
|
|
271
|
+
user_submissions["timestamp"] = pd.to_datetime(
|
|
272
|
+
user_submissions["timestamp"], errors="coerce"
|
|
273
|
+
)
|
|
274
|
+
user_submissions = user_submissions.sort_values("timestamp", ascending=False)
|
|
275
|
+
_log(f"Sorted {len(user_submissions)} submissions by timestamp for {username}")
|
|
276
|
+
except Exception as ts_err:
|
|
277
|
+
_log(f"Timestamp sort error: {ts_err}")
|
|
278
|
+
existing_team = user_submissions.iloc[0]["Team"]
|
|
279
|
+
if pd.notna(existing_team) and str(existing_team).strip():
|
|
280
|
+
normalized = _normalize_team_name(existing_team)
|
|
281
|
+
_log(f"Found existing team for {username}: {normalized}")
|
|
282
|
+
return normalized, False
|
|
283
|
+
new_team = _normalize_team_name(random.choice(TEAM_NAMES))
|
|
284
|
+
_log(f"Assigning new team to {username}: {new_team}")
|
|
285
|
+
return new_team, True
|
|
286
|
+
except Exception as e:
|
|
287
|
+
_log(f"Team assignment error: {e}")
|
|
288
|
+
new_team = _normalize_team_name(random.choice(TEAM_NAMES))
|
|
289
|
+
return new_team, True
|
|
290
|
+
|
|
291
|
+
def _try_session_based_auth(request: "gr.Request") -> Tuple[bool, Optional[str], Optional[str]]:
|
|
292
|
+
"""Attempt to authenticate via session token. Returns (success, username, token)."""
|
|
293
|
+
try:
|
|
294
|
+
session_id = request.query_params.get("sessionid") if request else None
|
|
295
|
+
if not session_id:
|
|
296
|
+
_log("No sessionid in request")
|
|
297
|
+
return False, None, None
|
|
298
|
+
|
|
299
|
+
from aimodelshare.aws import get_token_from_session, _get_username_from_token
|
|
300
|
+
|
|
301
|
+
token = get_token_from_session(session_id)
|
|
302
|
+
if not token:
|
|
303
|
+
_log("Failed to get token from session")
|
|
304
|
+
return False, None, None
|
|
305
|
+
|
|
306
|
+
username = _get_username_from_token(token)
|
|
307
|
+
if not username:
|
|
308
|
+
_log("Failed to extract username from token")
|
|
309
|
+
return False, None, None
|
|
310
|
+
|
|
311
|
+
_log(f"Session auth successful for {username}")
|
|
312
|
+
return True, username, token
|
|
313
|
+
|
|
314
|
+
except Exception as e:
|
|
315
|
+
_log(f"Session auth failed: {e}")
|
|
316
|
+
return False, None, None
|
|
317
|
+
|
|
318
|
+
|
|
319
|
+
# -------------------------------------------------------------------------
|
|
320
|
+
# UPDATED FUNCTION
|
|
321
|
+
# -------------------------------------------------------------------------
|
|
322
|
+
def _compute_user_stats(username: str, token: str) -> Dict[str, Any]:
|
|
323
|
+
"""
|
|
324
|
+
Compute user statistics with caching.
|
|
325
|
+
|
|
326
|
+
Concurrency Note: Protected by _user_stats_lock for thread-safe
|
|
327
|
+
cache reads and writes.
|
|
328
|
+
"""
|
|
329
|
+
now = time.time()
|
|
330
|
+
|
|
331
|
+
# Thread-safe cache check
|
|
332
|
+
with _user_stats_lock:
|
|
333
|
+
cached = _user_stats_cache.get(username)
|
|
334
|
+
if cached and (now - cached.get("_ts", 0) < USER_STATS_TTL):
|
|
335
|
+
_log(f"User stats cache hit for {username}")
|
|
336
|
+
# Return shallow copy to prevent caller mutations from affecting cache.
|
|
337
|
+
# Stats dict contains only primitives (float, int, str), so shallow copy is sufficient.
|
|
338
|
+
return cached.copy()
|
|
339
|
+
|
|
340
|
+
_log(f"Computing fresh stats for {username}")
|
|
341
|
+
leaderboard_df = _fetch_leaderboard(token)
|
|
342
|
+
team_name, _ = _get_or_assign_team(username, leaderboard_df)
|
|
343
|
+
|
|
344
|
+
stats = {
|
|
345
|
+
"best_score": 0.0,
|
|
346
|
+
"rank": 0,
|
|
347
|
+
"team_name": team_name,
|
|
348
|
+
"submission_count": 0,
|
|
349
|
+
"last_score": 0.0,
|
|
350
|
+
"_ts": time.time()
|
|
351
|
+
}
|
|
352
|
+
|
|
353
|
+
try:
|
|
354
|
+
if leaderboard_df is not None and not leaderboard_df.empty:
|
|
355
|
+
user_submissions = leaderboard_df[leaderboard_df["username"] == username]
|
|
356
|
+
if not user_submissions.empty:
|
|
357
|
+
stats["submission_count"] = len(user_submissions)
|
|
358
|
+
if "accuracy" in user_submissions.columns:
|
|
359
|
+
stats["best_score"] = float(user_submissions["accuracy"].max())
|
|
360
|
+
if "timestamp" in user_submissions.columns:
|
|
361
|
+
try:
|
|
362
|
+
user_submissions = user_submissions.copy()
|
|
363
|
+
user_submissions["timestamp"] = pd.to_datetime(
|
|
364
|
+
user_submissions["timestamp"], errors="coerce"
|
|
365
|
+
)
|
|
366
|
+
recent = user_submissions.sort_values("timestamp", ascending=False).iloc[0]
|
|
367
|
+
stats["last_score"] = float(recent["accuracy"])
|
|
368
|
+
except:
|
|
369
|
+
stats["last_score"] = stats["best_score"]
|
|
370
|
+
else:
|
|
371
|
+
stats["last_score"] = stats["best_score"]
|
|
372
|
+
|
|
373
|
+
if "accuracy" in leaderboard_df.columns:
|
|
374
|
+
user_bests = leaderboard_df.groupby("username")["accuracy"].max()
|
|
375
|
+
ranked = user_bests.sort_values(ascending=False)
|
|
376
|
+
try:
|
|
377
|
+
stats["rank"] = int(ranked.index.get_loc(username) + 1)
|
|
378
|
+
except KeyError:
|
|
379
|
+
stats["rank"] = 0
|
|
380
|
+
except Exception as e:
|
|
381
|
+
_log(f"Error computing stats for {username}: {e}")
|
|
382
|
+
|
|
383
|
+
# Thread-safe cache update
|
|
384
|
+
with _user_stats_lock:
|
|
385
|
+
_user_stats_cache[username] = stats
|
|
386
|
+
_log(f"Stats for {username}: {stats}")
|
|
387
|
+
return stats
|
|
388
|
+
|
|
389
|
+
def _build_attempts_tracker_html(current_count, limit=10):
|
|
390
|
+
"""
|
|
391
|
+
Generate HTML for the attempts tracker display.
|
|
392
|
+
Shows current attempt count vs limit with color coding.
|
|
393
|
+
|
|
394
|
+
Args:
|
|
395
|
+
current_count: Number of attempts used so far
|
|
396
|
+
limit: Maximum allowed attempts (default: ATTEMPT_LIMIT)
|
|
397
|
+
|
|
398
|
+
Returns:
|
|
399
|
+
str: HTML string for the tracker display
|
|
400
|
+
"""
|
|
401
|
+
if current_count >= limit:
|
|
402
|
+
# Limit reached - red styling
|
|
403
|
+
bg_color = "#f0f9ff"
|
|
404
|
+
border_color = "#bae6fd"
|
|
405
|
+
text_color = "#0369a1"
|
|
406
|
+
icon = "🛑"
|
|
407
|
+
label = f"Última oportunitat (per ara) per pujar la teva puntuació!: {current_count}/{limit}"
|
|
408
|
+
else:
|
|
409
|
+
# Normal - blue styling
|
|
410
|
+
bg_color = "#f0f9ff"
|
|
411
|
+
border_color = "#bae6fd"
|
|
412
|
+
text_color = "#0369a1"
|
|
413
|
+
icon = "📊"
|
|
414
|
+
label = f"Intents utilitzats: {current_count}/{limit}"
|
|
415
|
+
|
|
416
|
+
return f"""<div style='text-align:center; padding:8px; margin:8px 0; background:{bg_color}; border-radius:8px; border:1px solid {border_color};'>
|
|
417
|
+
<p style='margin:0; color:{text_color}; font-weight:600; font-size:1rem;'>{icon} {label}</p>
|
|
418
|
+
</div>"""
|
|
419
|
+
|
|
420
|
+
def check_attempt_limit(submission_count: int, limit: int = None) -> Tuple[bool, str]:
|
|
421
|
+
"""Check if submission count exceeds limit."""
|
|
422
|
+
# ATTEMPT_LIMIT is defined in configuration section below
|
|
423
|
+
if limit is None:
|
|
424
|
+
limit = ATTEMPT_LIMIT
|
|
425
|
+
|
|
426
|
+
if submission_count >= limit:
|
|
427
|
+
msg = f"⚠️ Límit d’intents assolit ({submission_count}/{limit})"
|
|
428
|
+
return False, msg
|
|
429
|
+
return True, f"Intents: {submission_count}/{limit}"
|
|
430
|
+
|
|
431
|
+
# -------------------------------------------------------------------------
|
|
432
|
+
# Future: Fairness Metrics
|
|
433
|
+
# -------------------------------------------------------------------------
|
|
434
|
+
|
|
435
|
+
# def compute_fairness_metrics(y_true, y_pred, sensitive_attrs):
|
|
436
|
+
# """
|
|
437
|
+
# Compute fairness metrics for model predictions.
|
|
438
|
+
#
|
|
439
|
+
# Args:
|
|
440
|
+
# y_true: Ground truth labels
|
|
441
|
+
# y_pred: Model predictions
|
|
442
|
+
# sensitive_attrs: DataFrame with sensitive attributes (race, sex, age)
|
|
443
|
+
#
|
|
444
|
+
# Returns:
|
|
445
|
+
# dict: Fairness metrics including demographic parity, equalized odds
|
|
446
|
+
#
|
|
447
|
+
# TODO: Implement using fairlearn or aif360
|
|
448
|
+
# """
|
|
449
|
+
# pass
|
|
450
|
+
|
|
451
|
+
|
|
452
|
+
|
|
453
|
+
# -------------------------------------------------------------------------
|
|
454
|
+
# 1. Configuration
|
|
455
|
+
# -------------------------------------------------------------------------
|
|
456
|
+
|
|
457
|
+
MY_PLAYGROUND_ID = "https://cf3wdpkg0d.execute-api.us-east-1.amazonaws.com/prod/m"
|
|
458
|
+
|
|
459
|
+
# --- Submission Limit Configuration ---
|
|
460
|
+
# Maximum number of successful leaderboard submissions per user per session.
|
|
461
|
+
# Preview runs (pre-login) and failed/invalid attempts do NOT count toward this limit.
|
|
462
|
+
# Only actual successful playground.submit_model() calls increment the count.
|
|
463
|
+
#
|
|
464
|
+
# TODO: Server-side persistent enforcement recommended
|
|
465
|
+
# The current attempt limit is stored in gr.State (per-session) and can be bypassed
|
|
466
|
+
# by refreshing the browser. For production use with 100+ concurrent users,
|
|
467
|
+
# consider implementing server-side persistence via Redis or Firestore to track
|
|
468
|
+
# attempt counts per user across sessions.
|
|
469
|
+
ATTEMPT_LIMIT = 10
|
|
470
|
+
|
|
471
|
+
# --- Leaderboard Polling Configuration ---
|
|
472
|
+
# After a real authenticated submission, we poll the leaderboard to detect eventual consistency.
|
|
473
|
+
# This prevents the "stuck on first preview KPI" issue where the leaderboard hasn't updated yet.
|
|
474
|
+
# Increased from 12 to 60 to better tolerate backend latency and cold starts.
|
|
475
|
+
# If polling times out, optimistic fallback logic will provide provisional UI updates.
|
|
476
|
+
LEADERBOARD_POLL_TRIES = 60 # Number of polling attempts (increased to handle backend latency/cold starts)
|
|
477
|
+
LEADERBOARD_POLL_SLEEP = 1.0 # Sleep duration between polls (seconds)
|
|
478
|
+
ENABLE_AUTO_RESUBMIT_AFTER_READY = False # Future feature flag for auto-resubmit
|
|
479
|
+
|
|
480
|
+
# --- 1. MODEL CONFIGURATION (Keys match Database) ---
|
|
481
|
+
MODEL_TYPES = {
|
|
482
|
+
"The Balanced Generalist": {
|
|
483
|
+
"model_builder": lambda: LogisticRegression(
|
|
484
|
+
max_iter=500, random_state=42, class_weight="balanced"
|
|
485
|
+
),
|
|
486
|
+
# Store the Catalan description here for the UI
|
|
487
|
+
"card_ca": "Aquest model és ràpid, fiable i equilibrat. Bon punt de partida; sol donar resultats més estables."
|
|
488
|
+
},
|
|
489
|
+
"The Rule-Maker": {
|
|
490
|
+
"model_builder": lambda: DecisionTreeClassifier(
|
|
491
|
+
random_state=42, class_weight="balanced"
|
|
492
|
+
),
|
|
493
|
+
"card_ca": "Aquest model aprèn regles simples de tipus «si/aleshores». Fàcil d’interpretar, però li costa captar patrons complexos."
|
|
494
|
+
},
|
|
495
|
+
"The 'Nearest Neighbor'": {
|
|
496
|
+
"model_builder": lambda: KNeighborsClassifier(),
|
|
497
|
+
"card_ca": "Aquest model es basa en exemples semblants del passat. «Si t’assembles a aquests casos, prediré el mateix resultat»."
|
|
498
|
+
},
|
|
499
|
+
"The Deep Pattern-Finder": {
|
|
500
|
+
"model_builder": lambda: RandomForestClassifier(
|
|
501
|
+
random_state=42, class_weight="balanced"
|
|
502
|
+
),
|
|
503
|
+
"card_ca": "Aquest model combina molts arbres de decisió per trobar patrons complexos. És potent, però cal vigilar no fer-lo massa complex."
|
|
504
|
+
}
|
|
505
|
+
}
|
|
506
|
+
|
|
507
|
+
DEFAULT_MODEL = "The Balanced Generalist" # Now using the English key
|
|
508
|
+
|
|
509
|
+
# --- 2. TRANSLATION MAPS (UI Display -> Database Key) ---
|
|
510
|
+
|
|
511
|
+
# Map English Keys to Catalan Display Names for the Radio Button
|
|
512
|
+
MODEL_DISPLAY_MAP = {
|
|
513
|
+
"The Balanced Generalist": "El Generalista Equilibrat",
|
|
514
|
+
"The Rule-Maker": "El Creador de Regles",
|
|
515
|
+
"The 'Nearest Neighbor'": "El 'Veí més Proper'",
|
|
516
|
+
"The Deep Pattern-Finder": "El Detector de Patrons Profunds"
|
|
517
|
+
}
|
|
518
|
+
|
|
519
|
+
# Create the Choices List as Tuples: [(Catalan Label, English Value)]
|
|
520
|
+
# This tells Gradio: "Show the user Catalan, but send Python the English key"
|
|
521
|
+
MODEL_RADIO_CHOICES = [(label, key) for key, label in MODEL_DISPLAY_MAP.items()]
|
|
522
|
+
|
|
523
|
+
# Map Catalan Data Sizes (UI) to English Keys (Database)
|
|
524
|
+
DATA_SIZE_DB_MAP = {
|
|
525
|
+
"Petita (20%)": "Small (20%)",
|
|
526
|
+
"Mitjana (60%)": "Medium (60%)",
|
|
527
|
+
"Gran (80%)": "Large (80%)",
|
|
528
|
+
"Completa (100%)": "Full (100%)"
|
|
529
|
+
}
|
|
530
|
+
|
|
531
|
+
|
|
532
|
+
TEAM_NAMES = [
|
|
533
|
+
"The Moral Champions", "The Justice League", "The Data Detectives",
|
|
534
|
+
"The Ethical Explorers", "The Fairness Finders", "The Accuracy Avengers"
|
|
535
|
+
]
|
|
536
|
+
CURRENT_TEAM_NAME = random.choice(TEAM_NAMES)
|
|
537
|
+
|
|
538
|
+
# Team name translations for UI display only (Catalan)
|
|
539
|
+
# Internal logic (ranking, caching, grouping) always uses canonical English names
|
|
540
|
+
TEAM_NAME_TRANSLATIONS = {
|
|
541
|
+
"en": {
|
|
542
|
+
"The Justice League": "The Justice League",
|
|
543
|
+
"The Moral Champions": "The Moral Champions",
|
|
544
|
+
"The Data Detectives": "The Data Detectives",
|
|
545
|
+
"The Ethical Explorers": "The Ethical Explorers",
|
|
546
|
+
"The Fairness Finders": "The Fairness Finders",
|
|
547
|
+
"The Accuracy Avengers": "The Accuracy Avengers"
|
|
548
|
+
},
|
|
549
|
+
"ca": {
|
|
550
|
+
"The Justice League": "La Lliga de la Justícia",
|
|
551
|
+
"The Moral Champions": "Els Campions Morals",
|
|
552
|
+
"The Data Detectives": "Els Detectius de Dades",
|
|
553
|
+
"The Ethical Explorers": "Els Exploradors Ètics",
|
|
554
|
+
"The Fairness Finders": "Els Cercadors d'Equitat",
|
|
555
|
+
"The Accuracy Avengers": "Els Venjadors de Precisió"
|
|
556
|
+
}
|
|
557
|
+
}
|
|
558
|
+
|
|
559
|
+
# UI language for team name display
|
|
560
|
+
UI_TEAM_LANG = "ca"
|
|
561
|
+
|
|
562
|
+
|
|
563
|
+
# --- Feature groups for scaffolding (Weak -> Medium -> Strong) ---
|
|
564
|
+
FEATURE_SET_ALL_OPTIONS = [
|
|
565
|
+
("Nombre de delictes greus juvenils", "juv_fel_count"),
|
|
566
|
+
("Nombre de delictes lleus juvenils", "juv_misd_count"),
|
|
567
|
+
("Altres delictes juvenils", "juv_other_count"),
|
|
568
|
+
("Origen ètnic", "race"),
|
|
569
|
+
("Sexe", "sex"),
|
|
570
|
+
("Gravetat del càrrec (lleu / greu)", "c_charge_degree"),
|
|
571
|
+
("Dies abans de l'arrest", "days_b_screening_arrest"),
|
|
572
|
+
("Edat", "age"),
|
|
573
|
+
("Dies a la presó", "length_of_stay"),
|
|
574
|
+
("Nombre de delictes previs", "priors_count"),
|
|
575
|
+
]
|
|
576
|
+
FEATURE_SET_GROUP_1_VALS = [
|
|
577
|
+
"juv_fel_count", "juv_misd_count", "juv_other_count", "race", "sex",
|
|
578
|
+
"c_charge_degree", "days_b_screening_arrest"
|
|
579
|
+
]
|
|
580
|
+
FEATURE_SET_GROUP_2_VALS = ["c_charge_desc", "age"]
|
|
581
|
+
FEATURE_SET_GROUP_3_VALS = ["length_of_stay", "priors_count"]
|
|
582
|
+
ALL_NUMERIC_COLS = [
|
|
583
|
+
"juv_fel_count", "juv_misd_count", "juv_other_count",
|
|
584
|
+
"days_b_screening_arrest", "age", "length_of_stay", "priors_count"
|
|
585
|
+
]
|
|
586
|
+
ALL_CATEGORICAL_COLS = [
|
|
587
|
+
"race", "sex", "c_charge_degree"
|
|
588
|
+
]
|
|
589
|
+
DEFAULT_FEATURE_SET = FEATURE_SET_GROUP_1_VALS
|
|
590
|
+
|
|
591
|
+
|
|
592
|
+
# --- Data Size config ---
|
|
593
|
+
DATA_SIZE_MAP = {
|
|
594
|
+
"Petita (20%)": 0.2,
|
|
595
|
+
"Mitjana (60%)": 0.6,
|
|
596
|
+
"Gran (80%)": 0.8,
|
|
597
|
+
"Completa (100%)": 1.0
|
|
598
|
+
}
|
|
599
|
+
DEFAULT_DATA_SIZE = "Petita (20%)"
|
|
600
|
+
|
|
601
|
+
|
|
602
|
+
MAX_ROWS = 4000
|
|
603
|
+
TOP_N_CHARGE_CATEGORICAL = 50
|
|
604
|
+
WARM_MINI_ROWS = 300 # Small warm dataset for instant preview
|
|
605
|
+
CACHE_MAX_AGE_HOURS = 24 # Cache validity duration
|
|
606
|
+
np.random.seed(42)
|
|
607
|
+
|
|
608
|
+
# Global state containers (populated during initialization)
|
|
609
|
+
playground = None
|
|
610
|
+
X_TRAIN_RAW = None # Keep this for 100%
|
|
611
|
+
X_TEST_RAW = None
|
|
612
|
+
Y_TRAIN = None
|
|
613
|
+
Y_TEST = None
|
|
614
|
+
# Add a container for our pre-sampled data
|
|
615
|
+
X_TRAIN_SAMPLES_MAP = {}
|
|
616
|
+
Y_TRAIN_SAMPLES_MAP = {}
|
|
617
|
+
|
|
618
|
+
# Warm mini dataset for instant preview
|
|
619
|
+
X_TRAIN_WARM = None
|
|
620
|
+
Y_TRAIN_WARM = None
|
|
621
|
+
|
|
622
|
+
# Cache for transformed test sets (for future performance improvements)
|
|
623
|
+
TEST_CACHE = {}
|
|
624
|
+
|
|
625
|
+
# Initialization flags to track readiness state
|
|
626
|
+
INIT_FLAGS = {
|
|
627
|
+
"competition": False,
|
|
628
|
+
"dataset_core": False,
|
|
629
|
+
"pre_samples_small": False,
|
|
630
|
+
"pre_samples_medium": False,
|
|
631
|
+
"pre_samples_large": False,
|
|
632
|
+
"pre_samples_full": False,
|
|
633
|
+
"leaderboard": False,
|
|
634
|
+
"default_preprocessor": False,
|
|
635
|
+
"warm_mini": False,
|
|
636
|
+
"errors": []
|
|
637
|
+
}
|
|
638
|
+
|
|
639
|
+
# Lock for thread-safe flag updates
|
|
640
|
+
INIT_LOCK = threading.Lock()
|
|
641
|
+
|
|
642
|
+
# -------------------------------------------------------------------------
|
|
643
|
+
# 2. Data & Backend Utilities
|
|
644
|
+
# -------------------------------------------------------------------------
|
|
645
|
+
|
|
646
|
+
def _get_cache_dir():
|
|
647
|
+
"""Get or create the cache directory for datasets."""
|
|
648
|
+
cache_dir = Path.home() / ".aimodelshare_cache"
|
|
649
|
+
cache_dir.mkdir(exist_ok=True)
|
|
650
|
+
return cache_dir
|
|
651
|
+
|
|
652
|
+
def _safe_request_csv(url, cache_filename="compas.csv"):
|
|
653
|
+
"""
|
|
654
|
+
Request CSV from URL with local caching.
|
|
655
|
+
Reuses cached file if it exists and is less than CACHE_MAX_AGE_HOURS old.
|
|
656
|
+
"""
|
|
657
|
+
cache_dir = _get_cache_dir()
|
|
658
|
+
cache_path = cache_dir / cache_filename
|
|
659
|
+
|
|
660
|
+
# Check if cache exists and is fresh
|
|
661
|
+
if cache_path.exists():
|
|
662
|
+
file_time = datetime.fromtimestamp(cache_path.stat().st_mtime)
|
|
663
|
+
if datetime.now() - file_time < timedelta(hours=CACHE_MAX_AGE_HOURS):
|
|
664
|
+
return pd.read_csv(cache_path)
|
|
665
|
+
|
|
666
|
+
# Download fresh data
|
|
667
|
+
response = requests.get(url, timeout=30)
|
|
668
|
+
response.raise_for_status()
|
|
669
|
+
df = pd.read_csv(StringIO(response.text))
|
|
670
|
+
|
|
671
|
+
# Save to cache
|
|
672
|
+
df.to_csv(cache_path, index=False)
|
|
673
|
+
|
|
674
|
+
return df
|
|
675
|
+
|
|
676
|
+
def safe_int(value, default=1):
|
|
677
|
+
"""
|
|
678
|
+
Safely coerce a value to int, returning default if value is None or invalid.
|
|
679
|
+
Protects against TypeError when Gradio sliders receive None.
|
|
680
|
+
"""
|
|
681
|
+
if value is None:
|
|
682
|
+
return default
|
|
683
|
+
try:
|
|
684
|
+
return int(value)
|
|
685
|
+
except (ValueError, TypeError):
|
|
686
|
+
return default
|
|
687
|
+
|
|
688
|
+
def load_and_prep_data(use_cache=True):
|
|
689
|
+
"""
|
|
690
|
+
Load, sample, and prepare raw COMPAS dataset.
|
|
691
|
+
NOW PRE-SAMPLES ALL DATA SIZES and creates warm mini dataset.
|
|
692
|
+
"""
|
|
693
|
+
url = "https://raw.githubusercontent.com/propublica/compas-analysis/master/compas-scores-two-years.csv"
|
|
694
|
+
|
|
695
|
+
# Use cached version if available
|
|
696
|
+
if use_cache:
|
|
697
|
+
try:
|
|
698
|
+
df = _safe_request_csv(url)
|
|
699
|
+
except Exception as e:
|
|
700
|
+
print(f"Cache failed, fetching directly: {e}")
|
|
701
|
+
response = requests.get(url)
|
|
702
|
+
df = pd.read_csv(StringIO(response.text))
|
|
703
|
+
else:
|
|
704
|
+
response = requests.get(url)
|
|
705
|
+
df = pd.read_csv(StringIO(response.text))
|
|
706
|
+
|
|
707
|
+
# Calculate length_of_stay
|
|
708
|
+
try:
|
|
709
|
+
df['c_jail_in'] = pd.to_datetime(df['c_jail_in'])
|
|
710
|
+
df['c_jail_out'] = pd.to_datetime(df['c_jail_out'])
|
|
711
|
+
df['length_of_stay'] = (df['c_jail_out'] - df['c_jail_in']).dt.total_seconds() / (24 * 60 * 60) # in days
|
|
712
|
+
except Exception:
|
|
713
|
+
df['length_of_stay'] = np.nan
|
|
714
|
+
|
|
715
|
+
if df.shape[0] > MAX_ROWS:
|
|
716
|
+
df = df.sample(n=MAX_ROWS, random_state=42)
|
|
717
|
+
|
|
718
|
+
feature_columns = ALL_NUMERIC_COLS + ALL_CATEGORICAL_COLS
|
|
719
|
+
feature_columns = sorted(list(set(feature_columns)))
|
|
720
|
+
|
|
721
|
+
target_column = "two_year_recid"
|
|
722
|
+
|
|
723
|
+
if "c_charge_desc" in df.columns:
|
|
724
|
+
top_charges = df["c_charge_desc"].value_counts().head(TOP_N_CHARGE_CATEGORICAL).index
|
|
725
|
+
df["c_charge_desc"] = df["c_charge_desc"].apply(
|
|
726
|
+
lambda x: x if pd.notna(x) and x in top_charges else "OTHER"
|
|
727
|
+
)
|
|
728
|
+
|
|
729
|
+
for col in feature_columns:
|
|
730
|
+
if col not in df.columns:
|
|
731
|
+
if col == 'length_of_stay' and 'length_of_stay' in df.columns:
|
|
732
|
+
continue
|
|
733
|
+
df[col] = np.nan
|
|
734
|
+
|
|
735
|
+
X = df[feature_columns].copy()
|
|
736
|
+
y = df[target_column].copy()
|
|
737
|
+
|
|
738
|
+
X_train_raw, X_test_raw, y_train, y_test = train_test_split(
|
|
739
|
+
X, y, test_size=0.25, random_state=42, stratify=y
|
|
740
|
+
)
|
|
741
|
+
|
|
742
|
+
# Pre-sample all data sizes
|
|
743
|
+
global X_TRAIN_SAMPLES_MAP, Y_TRAIN_SAMPLES_MAP, X_TRAIN_WARM, Y_TRAIN_WARM
|
|
744
|
+
|
|
745
|
+
X_TRAIN_SAMPLES_MAP["Completa (100%)"] = X_train_raw
|
|
746
|
+
Y_TRAIN_SAMPLES_MAP["Completa (100%)"] = y_train
|
|
747
|
+
|
|
748
|
+
for label, frac in DATA_SIZE_MAP.items():
|
|
749
|
+
if frac < 1.0:
|
|
750
|
+
X_train_sampled = X_train_raw.sample(frac=frac, random_state=42)
|
|
751
|
+
y_train_sampled = y_train.loc[X_train_sampled.index]
|
|
752
|
+
X_TRAIN_SAMPLES_MAP[label] = X_train_sampled
|
|
753
|
+
Y_TRAIN_SAMPLES_MAP[label] = y_train_sampled
|
|
754
|
+
|
|
755
|
+
# Create warm mini dataset for instant preview
|
|
756
|
+
warm_size = min(WARM_MINI_ROWS, len(X_train_raw))
|
|
757
|
+
X_TRAIN_WARM = X_train_raw.sample(n=warm_size, random_state=42)
|
|
758
|
+
Y_TRAIN_WARM = y_train.loc[X_TRAIN_WARM.index]
|
|
759
|
+
|
|
760
|
+
|
|
761
|
+
|
|
762
|
+
return X_train_raw, X_test_raw, y_train, y_test
|
|
763
|
+
|
|
764
|
+
def _background_initializer():
|
|
765
|
+
"""
|
|
766
|
+
Background thread that performs sequential initialization tasks.
|
|
767
|
+
Updates INIT_FLAGS dict with readiness booleans and captures errors.
|
|
768
|
+
|
|
769
|
+
Initialization sequence:
|
|
770
|
+
1. Competition object connection
|
|
771
|
+
2. Dataset cached download and core split
|
|
772
|
+
3. Warm mini dataset creation
|
|
773
|
+
4. Progressive sampling: small -> medium -> large -> full
|
|
774
|
+
5. Leaderboard prefetch
|
|
775
|
+
6. Default preprocessor fit on small sample
|
|
776
|
+
"""
|
|
777
|
+
global playground, X_TRAIN_RAW, X_TEST_RAW, Y_TRAIN, Y_TEST
|
|
778
|
+
|
|
779
|
+
try:
|
|
780
|
+
# Step 1: Connect to competition
|
|
781
|
+
with INIT_LOCK:
|
|
782
|
+
if playground is None:
|
|
783
|
+
playground = Competition(MY_PLAYGROUND_ID)
|
|
784
|
+
INIT_FLAGS["competition"] = True
|
|
785
|
+
except Exception as e:
|
|
786
|
+
with INIT_LOCK:
|
|
787
|
+
INIT_FLAGS["errors"].append(f"La connexió amb la competició ha fallat: {str(e)}")
|
|
788
|
+
|
|
789
|
+
try:
|
|
790
|
+
# Step 2: Load dataset core (train/test split)
|
|
791
|
+
X_TRAIN_RAW, X_TEST_RAW, Y_TRAIN, Y_TEST = load_and_prep_data(use_cache=True)
|
|
792
|
+
with INIT_LOCK:
|
|
793
|
+
INIT_FLAGS["dataset_core"] = True
|
|
794
|
+
except Exception as e:
|
|
795
|
+
with INIT_LOCK:
|
|
796
|
+
INIT_FLAGS["errors"].append(f"La càrrega del conjunt de dades ha fallat: {str(e)}")
|
|
797
|
+
return # Cannot proceed without data
|
|
798
|
+
|
|
799
|
+
try:
|
|
800
|
+
# Step 3: Warm mini dataset (already created in load_and_prep_data)
|
|
801
|
+
if X_TRAIN_WARM is not None and len(X_TRAIN_WARM) > 0:
|
|
802
|
+
with INIT_LOCK:
|
|
803
|
+
INIT_FLAGS["warm_mini"] = True
|
|
804
|
+
except Exception as e:
|
|
805
|
+
with INIT_LOCK:
|
|
806
|
+
INIT_FLAGS["errors"].append(f"El dataset mini de preescalfament ha fallat: {str(e)}")
|
|
807
|
+
|
|
808
|
+
# Progressive sampling - samples are already created in load_and_prep_data
|
|
809
|
+
# Just mark them as ready sequentially with delays to simulate progressive loading
|
|
810
|
+
|
|
811
|
+
try:
|
|
812
|
+
# Step 4a: Small sample (20%)
|
|
813
|
+
time.sleep(0.5) # Simulate processing
|
|
814
|
+
with INIT_LOCK:
|
|
815
|
+
INIT_FLAGS["pre_samples_small"] = True
|
|
816
|
+
except Exception as e:
|
|
817
|
+
with INIT_LOCK:
|
|
818
|
+
INIT_FLAGS["errors"].append(f"La mostra petita ha fallat: {str(e)}")
|
|
819
|
+
|
|
820
|
+
try:
|
|
821
|
+
# Step 4b: Medium sample (60%)
|
|
822
|
+
time.sleep(0.5)
|
|
823
|
+
with INIT_LOCK:
|
|
824
|
+
INIT_FLAGS["pre_samples_medium"] = True
|
|
825
|
+
except Exception as e:
|
|
826
|
+
with INIT_LOCK:
|
|
827
|
+
INIT_FLAGS["errors"].append(f"La mostra mitjana ha fallat: {str(e)}")
|
|
828
|
+
|
|
829
|
+
try:
|
|
830
|
+
# Step 4c: Large sample (80%)
|
|
831
|
+
time.sleep(0.5)
|
|
832
|
+
with INIT_LOCK:
|
|
833
|
+
INIT_FLAGS["pre_samples_large"] = True
|
|
834
|
+
except Exception as e:
|
|
835
|
+
with INIT_LOCK:
|
|
836
|
+
INIT_FLAGS["errors"].append(f"La mostra gran ha fallat: {str(e)}")
|
|
837
|
+
print(f"✗ Large sample failed: {e}")
|
|
838
|
+
|
|
839
|
+
try:
|
|
840
|
+
# Step 4d: Full sample (100%)
|
|
841
|
+
print("Background init: Full sample (100%)...")
|
|
842
|
+
time.sleep(0.5)
|
|
843
|
+
with INIT_LOCK:
|
|
844
|
+
INIT_FLAGS["pre_samples_full"] = True
|
|
845
|
+
except Exception as e:
|
|
846
|
+
with INIT_LOCK:
|
|
847
|
+
INIT_FLAGS["errors"].append(f"La mostra completa ha fallat: {str(e)}")
|
|
848
|
+
|
|
849
|
+
try:
|
|
850
|
+
# Step 5: Leaderboard prefetch (best-effort, unauthenticated)
|
|
851
|
+
# Concurrency Note: Do NOT use os.environ for ambient token - prefetch
|
|
852
|
+
# anonymously to warm the cache for initial page loads.
|
|
853
|
+
if playground is not None:
|
|
854
|
+
_ = _get_leaderboard_with_optional_token(playground, None)
|
|
855
|
+
with INIT_LOCK:
|
|
856
|
+
INIT_FLAGS["leaderboard"] = True
|
|
857
|
+
except Exception as e:
|
|
858
|
+
with INIT_LOCK:
|
|
859
|
+
INIT_FLAGS["errors"].append(f"La pre-obtenció de la classificació ha fallat: {str(e)}")
|
|
860
|
+
|
|
861
|
+
try:
|
|
862
|
+
# Step 6: Default preprocessor on small sample
|
|
863
|
+
_fit_default_preprocessor()
|
|
864
|
+
with INIT_LOCK:
|
|
865
|
+
INIT_FLAGS["default_preprocessor"] = True
|
|
866
|
+
except Exception as e:
|
|
867
|
+
with INIT_LOCK:
|
|
868
|
+
INIT_FLAGS["errors"].append(f"El preprocessador per defecte ha fallat: {str(e)}")
|
|
869
|
+
print(f"✗ Default preprocessor failed: {e}")
|
|
870
|
+
|
|
871
|
+
|
|
872
|
+
def _fit_default_preprocessor():
|
|
873
|
+
"""
|
|
874
|
+
Pre-fit a default preprocessor on the small sample with default features.
|
|
875
|
+
Uses memoized preprocessor builder for efficiency.
|
|
876
|
+
"""
|
|
877
|
+
if "Petita (20%)" not in X_TRAIN_SAMPLES_MAP:
|
|
878
|
+
return
|
|
879
|
+
|
|
880
|
+
X_sample = X_TRAIN_SAMPLES_MAP["Petita (20%)"]
|
|
881
|
+
|
|
882
|
+
# Use default feature set
|
|
883
|
+
numeric_cols = [f for f in DEFAULT_FEATURE_SET if f in ALL_NUMERIC_COLS]
|
|
884
|
+
categorical_cols = [f for f in DEFAULT_FEATURE_SET if f in ALL_CATEGORICAL_COLS]
|
|
885
|
+
|
|
886
|
+
if not numeric_cols and not categorical_cols:
|
|
887
|
+
return
|
|
888
|
+
|
|
889
|
+
# Use memoized builder
|
|
890
|
+
preprocessor, selected_cols = build_preprocessor(numeric_cols, categorical_cols)
|
|
891
|
+
preprocessor.fit(X_sample[selected_cols])
|
|
892
|
+
|
|
893
|
+
def start_background_init():
|
|
894
|
+
"""
|
|
895
|
+
Start the background initialization thread.
|
|
896
|
+
Should be called once at app creation.
|
|
897
|
+
"""
|
|
898
|
+
thread = threading.Thread(target=_background_initializer, daemon=True)
|
|
899
|
+
thread.start()
|
|
900
|
+
|
|
901
|
+
def poll_init_status():
|
|
902
|
+
"""
|
|
903
|
+
Poll the initialization status and return readiness bool.
|
|
904
|
+
Returns empty string for HTML so users don't see the checklist.
|
|
905
|
+
|
|
906
|
+
Returns:
|
|
907
|
+
tuple: (status_html, ready_bool)
|
|
908
|
+
"""
|
|
909
|
+
with INIT_LOCK:
|
|
910
|
+
flags = INIT_FLAGS.copy()
|
|
911
|
+
|
|
912
|
+
# Determine if minimum requirements met
|
|
913
|
+
ready = flags["competition"] and flags["dataset_core"] and flags["pre_samples_small"]
|
|
914
|
+
|
|
915
|
+
return "", ready
|
|
916
|
+
|
|
917
|
+
def get_available_data_sizes():
|
|
918
|
+
"""
|
|
919
|
+
Return list of data sizes that are currently available based on init flags.
|
|
920
|
+
"""
|
|
921
|
+
with INIT_LOCK:
|
|
922
|
+
flags = INIT_FLAGS.copy()
|
|
923
|
+
|
|
924
|
+
available = []
|
|
925
|
+
if flags["pre_samples_small"]:
|
|
926
|
+
available.append("Petita (20%)")
|
|
927
|
+
if flags["pre_samples_medium"]:
|
|
928
|
+
available.append("Mitjana (60%)")
|
|
929
|
+
if flags["pre_samples_large"]:
|
|
930
|
+
available.append("Gran (80%)")
|
|
931
|
+
if flags["pre_samples_full"]:
|
|
932
|
+
available.append("Completa (100%)")
|
|
933
|
+
|
|
934
|
+
return available if available else ["Petita (20%)"] # Fallback
|
|
935
|
+
|
|
936
|
+
def _is_ready() -> bool:
|
|
937
|
+
"""
|
|
938
|
+
Check if initialization is complete and system is ready for real submissions.
|
|
939
|
+
|
|
940
|
+
Returns:
|
|
941
|
+
bool: True if competition, dataset, and small sample are initialized
|
|
942
|
+
"""
|
|
943
|
+
with INIT_LOCK:
|
|
944
|
+
flags = INIT_FLAGS.copy()
|
|
945
|
+
return flags["competition"] and flags["dataset_core"] and flags["pre_samples_small"]
|
|
946
|
+
|
|
947
|
+
def _get_user_latest_accuracy(df: Optional[pd.DataFrame], username: str) -> Optional[float]:
|
|
948
|
+
"""
|
|
949
|
+
Extract the user's latest submission accuracy from the leaderboard.
|
|
950
|
+
|
|
951
|
+
Uses timestamp sorting when available; otherwise assumes last row is latest.
|
|
952
|
+
|
|
953
|
+
Args:
|
|
954
|
+
df: Leaderboard DataFrame
|
|
955
|
+
username: Username to extract accuracy for
|
|
956
|
+
|
|
957
|
+
Returns:
|
|
958
|
+
float: Latest submission accuracy, or None if not found/invalid
|
|
959
|
+
"""
|
|
960
|
+
if df is None or df.empty:
|
|
961
|
+
return None
|
|
962
|
+
|
|
963
|
+
try:
|
|
964
|
+
user_rows = df[df["username"] == username]
|
|
965
|
+
if user_rows.empty or "accuracy" not in user_rows.columns:
|
|
966
|
+
return None
|
|
967
|
+
|
|
968
|
+
# Try timestamp-based sorting if available
|
|
969
|
+
if "timestamp" in user_rows.columns:
|
|
970
|
+
user_rows = user_rows.copy()
|
|
971
|
+
user_rows["__parsed_ts"] = pd.to_datetime(user_rows["timestamp"], errors="coerce")
|
|
972
|
+
valid_ts = user_rows[user_rows["__parsed_ts"].notna()]
|
|
973
|
+
|
|
974
|
+
if not valid_ts.empty:
|
|
975
|
+
# Sort by timestamp and get latest
|
|
976
|
+
latest_row = valid_ts.sort_values("__parsed_ts", ascending=False).iloc[0]
|
|
977
|
+
return float(latest_row["accuracy"])
|
|
978
|
+
|
|
979
|
+
# Fallback: assume last row is latest (append order)
|
|
980
|
+
return float(user_rows.iloc[-1]["accuracy"])
|
|
981
|
+
|
|
982
|
+
except Exception as e:
|
|
983
|
+
_log(f"Error extracting latest accuracy for {username}: {e}")
|
|
984
|
+
return None
|
|
985
|
+
|
|
986
|
+
def _get_user_latest_ts(df: Optional[pd.DataFrame], username: str) -> Optional[float]:
|
|
987
|
+
"""
|
|
988
|
+
Extract the user's latest valid timestamp from the leaderboard.
|
|
989
|
+
|
|
990
|
+
Args:
|
|
991
|
+
df: Leaderboard DataFrame
|
|
992
|
+
username: Username to extract timestamp for
|
|
993
|
+
|
|
994
|
+
Returns:
|
|
995
|
+
float: Latest timestamp as unix epoch, or None if not found/invalid
|
|
996
|
+
"""
|
|
997
|
+
if df is None or df.empty:
|
|
998
|
+
return None
|
|
999
|
+
|
|
1000
|
+
try:
|
|
1001
|
+
user_rows = df[df["username"] == username]
|
|
1002
|
+
if user_rows.empty or "timestamp" not in user_rows.columns:
|
|
1003
|
+
return None
|
|
1004
|
+
|
|
1005
|
+
# Parse timestamps and get the latest
|
|
1006
|
+
user_rows = user_rows.copy()
|
|
1007
|
+
user_rows["__parsed_ts"] = pd.to_datetime(user_rows["timestamp"], errors="coerce")
|
|
1008
|
+
valid_ts = user_rows[user_rows["__parsed_ts"].notna()]
|
|
1009
|
+
|
|
1010
|
+
if valid_ts.empty:
|
|
1011
|
+
return None
|
|
1012
|
+
|
|
1013
|
+
latest_ts = valid_ts["__parsed_ts"].max()
|
|
1014
|
+
return latest_ts.timestamp() if pd.notna(latest_ts) else None
|
|
1015
|
+
except Exception as e:
|
|
1016
|
+
_log(f"Error extracting latest timestamp for {username}: {e}")
|
|
1017
|
+
return None
|
|
1018
|
+
|
|
1019
|
+
def _user_rows_changed(
|
|
1020
|
+
refreshed_leaderboard: Optional[pd.DataFrame],
|
|
1021
|
+
username: str,
|
|
1022
|
+
old_row_count: int,
|
|
1023
|
+
old_best_score: float,
|
|
1024
|
+
old_latest_ts: Optional[float] = None,
|
|
1025
|
+
old_latest_score: Optional[float] = None
|
|
1026
|
+
) -> bool:
|
|
1027
|
+
"""
|
|
1028
|
+
Check if user's leaderboard entries have changed after submission.
|
|
1029
|
+
|
|
1030
|
+
Used after polling to detect if the leaderboard has updated with the new submission.
|
|
1031
|
+
Checks row count (new submission added), best score (score improved), latest timestamp,
|
|
1032
|
+
and latest accuracy (handles backend overwrite without append).
|
|
1033
|
+
|
|
1034
|
+
Args:
|
|
1035
|
+
refreshed_leaderboard: Fresh leaderboard data
|
|
1036
|
+
username: Username to check for
|
|
1037
|
+
old_row_count: Previous number of submissions for this user
|
|
1038
|
+
old_best_score: Previous best accuracy score
|
|
1039
|
+
old_latest_ts: Previous latest timestamp (unix epoch), optional
|
|
1040
|
+
old_latest_score: Previous latest submission accuracy, optional
|
|
1041
|
+
|
|
1042
|
+
Returns:
|
|
1043
|
+
bool: True if user has more rows, better score, newer timestamp, or changed latest accuracy
|
|
1044
|
+
"""
|
|
1045
|
+
if refreshed_leaderboard is None or refreshed_leaderboard.empty:
|
|
1046
|
+
return False
|
|
1047
|
+
|
|
1048
|
+
try:
|
|
1049
|
+
user_rows = refreshed_leaderboard[refreshed_leaderboard["username"] == username]
|
|
1050
|
+
if user_rows.empty:
|
|
1051
|
+
return False
|
|
1052
|
+
|
|
1053
|
+
new_row_count = len(user_rows)
|
|
1054
|
+
new_best_score = float(user_rows["accuracy"].max()) if "accuracy" in user_rows.columns else 0.0
|
|
1055
|
+
new_latest_ts = _get_user_latest_ts(refreshed_leaderboard, username)
|
|
1056
|
+
new_latest_score = _get_user_latest_accuracy(refreshed_leaderboard, username)
|
|
1057
|
+
|
|
1058
|
+
# Changed if we have more submissions, better score, newer timestamp, or changed latest accuracy
|
|
1059
|
+
changed = (new_row_count > old_row_count) or (new_best_score > old_best_score + 0.0001)
|
|
1060
|
+
|
|
1061
|
+
# Check timestamp if available
|
|
1062
|
+
if old_latest_ts is not None and new_latest_ts is not None:
|
|
1063
|
+
changed = changed or (new_latest_ts > old_latest_ts)
|
|
1064
|
+
|
|
1065
|
+
# Check latest accuracy change (handles overwrite-without-append case)
|
|
1066
|
+
if old_latest_score is not None and new_latest_score is not None:
|
|
1067
|
+
accuracy_changed = abs(new_latest_score - old_latest_score) >= 0.00001
|
|
1068
|
+
if accuracy_changed:
|
|
1069
|
+
_log(f"Latest accuracy changed: {old_latest_score:.4f} -> {new_latest_score:.4f}")
|
|
1070
|
+
changed = changed or accuracy_changed
|
|
1071
|
+
|
|
1072
|
+
if changed:
|
|
1073
|
+
_log(f"User rows changed for {username}:")
|
|
1074
|
+
_log(f" Row count: {old_row_count} -> {new_row_count}")
|
|
1075
|
+
_log(f" Best score: {old_best_score:.4f} -> {new_best_score:.4f}")
|
|
1076
|
+
_log(f" Latest score: {old_latest_score if old_latest_score else 'N/A'} -> {new_latest_score if new_latest_score else 'N/A'}")
|
|
1077
|
+
_log(f" Timestamp: {old_latest_ts} -> {new_latest_ts}")
|
|
1078
|
+
|
|
1079
|
+
return changed
|
|
1080
|
+
except Exception as e:
|
|
1081
|
+
_log(f"Error checking user rows: {e}")
|
|
1082
|
+
return False
|
|
1083
|
+
|
|
1084
|
+
@functools.lru_cache(maxsize=32)
|
|
1085
|
+
def _get_cached_preprocessor_config(numeric_cols_tuple, categorical_cols_tuple):
|
|
1086
|
+
"""
|
|
1087
|
+
Create and return preprocessor configuration (memoized).
|
|
1088
|
+
Uses tuples for hashability in lru_cache.
|
|
1089
|
+
|
|
1090
|
+
Concurrency Note: Uses sparse_output=True for OneHotEncoder to reduce memory
|
|
1091
|
+
footprint under concurrent requests. Downstream models that require dense
|
|
1092
|
+
arrays (DecisionTree, RandomForest) will convert via .toarray() as needed.
|
|
1093
|
+
LogisticRegression and KNeighborsClassifier handle sparse matrices natively.
|
|
1094
|
+
|
|
1095
|
+
Returns tuple of (transformers_list, selected_columns) ready for ColumnTransformer.
|
|
1096
|
+
"""
|
|
1097
|
+
numeric_cols = list(numeric_cols_tuple)
|
|
1098
|
+
categorical_cols = list(categorical_cols_tuple)
|
|
1099
|
+
|
|
1100
|
+
transformers = []
|
|
1101
|
+
selected_cols = []
|
|
1102
|
+
|
|
1103
|
+
if numeric_cols:
|
|
1104
|
+
num_tf = Pipeline(steps=[
|
|
1105
|
+
("imputer", SimpleImputer(strategy="median")),
|
|
1106
|
+
("scaler", StandardScaler())
|
|
1107
|
+
])
|
|
1108
|
+
transformers.append(("num", num_tf, numeric_cols))
|
|
1109
|
+
selected_cols.extend(numeric_cols)
|
|
1110
|
+
|
|
1111
|
+
if categorical_cols:
|
|
1112
|
+
# Use sparse_output=True to reduce memory footprint
|
|
1113
|
+
cat_tf = Pipeline(steps=[
|
|
1114
|
+
("imputer", SimpleImputer(strategy="constant", fill_value="missing")),
|
|
1115
|
+
("onehot", OneHotEncoder(handle_unknown="ignore", sparse_output=True))
|
|
1116
|
+
])
|
|
1117
|
+
transformers.append(("cat", cat_tf, categorical_cols))
|
|
1118
|
+
selected_cols.extend(categorical_cols)
|
|
1119
|
+
|
|
1120
|
+
return transformers, selected_cols
|
|
1121
|
+
|
|
1122
|
+
def build_preprocessor(numeric_cols, categorical_cols):
|
|
1123
|
+
"""
|
|
1124
|
+
Build a preprocessor using cached configuration.
|
|
1125
|
+
The configuration (pipeline structure) is memoized; the actual fit is not.
|
|
1126
|
+
|
|
1127
|
+
Note: Returns sparse matrices when categorical columns are present.
|
|
1128
|
+
Use _ensure_dense() helper if model requires dense input.
|
|
1129
|
+
"""
|
|
1130
|
+
# Convert to tuples for caching
|
|
1131
|
+
numeric_tuple = tuple(sorted(numeric_cols))
|
|
1132
|
+
categorical_tuple = tuple(sorted(categorical_cols))
|
|
1133
|
+
|
|
1134
|
+
transformers, selected_cols = _get_cached_preprocessor_config(numeric_tuple, categorical_tuple)
|
|
1135
|
+
|
|
1136
|
+
# Create new ColumnTransformer with cached config
|
|
1137
|
+
preprocessor = ColumnTransformer(transformers=transformers, remainder="drop")
|
|
1138
|
+
|
|
1139
|
+
return preprocessor, selected_cols
|
|
1140
|
+
|
|
1141
|
+
def _ensure_dense(X):
|
|
1142
|
+
"""
|
|
1143
|
+
Convert sparse matrix to dense if necessary.
|
|
1144
|
+
|
|
1145
|
+
Helper function for models that don't support sparse input
|
|
1146
|
+
(DecisionTree, RandomForest). LogisticRegression and KNN
|
|
1147
|
+
handle sparse matrices natively.
|
|
1148
|
+
"""
|
|
1149
|
+
from scipy import sparse
|
|
1150
|
+
if sparse.issparse(X):
|
|
1151
|
+
return X.toarray()
|
|
1152
|
+
return X
|
|
1153
|
+
|
|
1154
|
+
def tune_model_complexity(model, level):
|
|
1155
|
+
"""
|
|
1156
|
+
Map a 1–10 slider value to model hyperparameters.
|
|
1157
|
+
Levels 1–3: Conservative / simple
|
|
1158
|
+
Levels 4–7: Balanced
|
|
1159
|
+
Levels 8–10: Aggressive / risk of overfitting
|
|
1160
|
+
"""
|
|
1161
|
+
level = int(level)
|
|
1162
|
+
if isinstance(model, LogisticRegression):
|
|
1163
|
+
c_map = {1: 0.01, 2: 0.025, 3: 0.05, 4: 0.1, 5: 0.25, 6: 0.5, 7: 1.0, 8: 2.0, 9: 5.0, 10: 10.0}
|
|
1164
|
+
model.C = c_map.get(level, 1.0)
|
|
1165
|
+
model.max_iter = max(getattr(model, "max_iter", 0), 500)
|
|
1166
|
+
elif isinstance(model, RandomForestClassifier):
|
|
1167
|
+
depth_map = {1: 3, 2: 5, 3: 7, 4: 9, 5: 11, 6: 15, 7: 20, 8: 25, 9: None, 10: None}
|
|
1168
|
+
est_map = {1: 20, 2: 30, 3: 40, 4: 60, 5: 80, 6: 100, 7: 120, 8: 150, 9: 180, 10: 220}
|
|
1169
|
+
model.max_depth = depth_map.get(level, 10)
|
|
1170
|
+
model.n_estimators = est_map.get(level, 100)
|
|
1171
|
+
elif isinstance(model, DecisionTreeClassifier):
|
|
1172
|
+
depth_map = {1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 8, 7: 10, 8: 12, 9: 15, 10: None}
|
|
1173
|
+
model.max_depth = depth_map.get(level, 6)
|
|
1174
|
+
elif isinstance(model, KNeighborsClassifier):
|
|
1175
|
+
k_map = {1: 100, 2: 75, 3: 60, 4: 50, 5: 40, 6: 30, 7: 25, 8: 15, 9: 7, 10: 3}
|
|
1176
|
+
model.n_neighbors = k_map.get(level, 25)
|
|
1177
|
+
return model
|
|
1178
|
+
|
|
1179
|
+
# --- New Helper Functions for HTML Generation ---
|
|
1180
|
+
|
|
1181
|
+
def _normalize_team_name(name: str) -> str:
|
|
1182
|
+
"""
|
|
1183
|
+
Normalize team name for consistent comparison and storage.
|
|
1184
|
+
|
|
1185
|
+
Strips leading/trailing whitespace and collapses multiple spaces into single spaces.
|
|
1186
|
+
This ensures consistent formatting across environment variables, state, and leaderboard rendering.
|
|
1187
|
+
|
|
1188
|
+
Args:
|
|
1189
|
+
name: Team name to normalize (can be None or empty)
|
|
1190
|
+
|
|
1191
|
+
Returns:
|
|
1192
|
+
str: Normalized team name, or empty string if input is None/empty
|
|
1193
|
+
|
|
1194
|
+
Examples:
|
|
1195
|
+
>>> _normalize_team_name(" The Ethical Explorers ")
|
|
1196
|
+
'The Ethical Explorers'
|
|
1197
|
+
>>> _normalize_team_name("The Moral Champions")
|
|
1198
|
+
'The Moral Champions'
|
|
1199
|
+
>>> _normalize_team_name(None)
|
|
1200
|
+
''
|
|
1201
|
+
"""
|
|
1202
|
+
if not name:
|
|
1203
|
+
return ""
|
|
1204
|
+
return " ".join(str(name).strip().split())
|
|
1205
|
+
|
|
1206
|
+
|
|
1207
|
+
# Team name translation helpers for UI display (Catalan)
|
|
1208
|
+
def translate_team_name_for_display(team_en: str, lang: str = "ca") -> str:
|
|
1209
|
+
"""
|
|
1210
|
+
Translate a canonical English team name to the specified language for UI display.
|
|
1211
|
+
Fallback to English if translation not found.
|
|
1212
|
+
|
|
1213
|
+
Internal logic always uses canonical English names. This is only for UI display.
|
|
1214
|
+
"""
|
|
1215
|
+
if lang not in TEAM_NAME_TRANSLATIONS:
|
|
1216
|
+
lang = "en"
|
|
1217
|
+
return TEAM_NAME_TRANSLATIONS[lang].get(team_en, team_en)
|
|
1218
|
+
|
|
1219
|
+
|
|
1220
|
+
def translate_team_name_to_english(display_name: str, lang: str = "ca") -> str:
|
|
1221
|
+
"""
|
|
1222
|
+
Reverse lookup: given a localized team name, return the canonical English name.
|
|
1223
|
+
Returns the original display_name if not found.
|
|
1224
|
+
|
|
1225
|
+
For future use if user input needs to be normalized back to English.
|
|
1226
|
+
"""
|
|
1227
|
+
if lang not in TEAM_NAME_TRANSLATIONS:
|
|
1228
|
+
return display_name # Already English or unknown
|
|
1229
|
+
|
|
1230
|
+
translations = TEAM_NAME_TRANSLATIONS[lang]
|
|
1231
|
+
for english_name, localized_name in translations.items():
|
|
1232
|
+
if localized_name == display_name:
|
|
1233
|
+
return english_name
|
|
1234
|
+
return display_name
|
|
1235
|
+
|
|
1236
|
+
|
|
1237
|
+
def _format_leaderboard_for_display(df: Optional[pd.DataFrame], lang: str = "ca") -> Optional[pd.DataFrame]:
|
|
1238
|
+
"""
|
|
1239
|
+
Create a copy of the leaderboard DataFrame with team names translated for display.
|
|
1240
|
+
Does not mutate the original DataFrame.
|
|
1241
|
+
|
|
1242
|
+
For potential future use when displaying full leaderboard.
|
|
1243
|
+
Internal logic should always use the original DataFrame with English team names.
|
|
1244
|
+
"""
|
|
1245
|
+
if df is None:
|
|
1246
|
+
return None
|
|
1247
|
+
|
|
1248
|
+
if df.empty or "Team" not in df.columns:
|
|
1249
|
+
return df.copy()
|
|
1250
|
+
|
|
1251
|
+
df_display = df.copy()
|
|
1252
|
+
df_display["Team"] = df_display["Team"].apply(lambda t: translate_team_name_for_display(t, lang))
|
|
1253
|
+
return df_display
|
|
1254
|
+
|
|
1255
|
+
|
|
1256
|
+
def _build_skeleton_leaderboard(rows=6, is_team=True, submit_button_label="5. 🔬 Construir i enviar el model"):
|
|
1257
|
+
context_label = "Equip" if is_team else "Individual"
|
|
1258
|
+
return f"""
|
|
1259
|
+
<div class='lb-placeholder' aria-live='polite'>
|
|
1260
|
+
<div class='lb-placeholder-title'>{context_label} · Classificació pendent</div>
|
|
1261
|
+
<div class='lb-placeholder-sub'>
|
|
1262
|
+
<p style='margin:0 0 6px 0;'>Envia el teu primer model i desbloqueja la classificació!</p>
|
|
1263
|
+
<p style='margin:0;'><strong>Fes clic a «{submit_button_label}» (a baix a l’esquerra)</strong> per començar!</p>
|
|
1264
|
+
</div>
|
|
1265
|
+
</div>
|
|
1266
|
+
"""
|
|
1267
|
+
# --- FIX APPLIED HERE ---
|
|
1268
|
+
def build_login_prompt_html():
|
|
1269
|
+
"""
|
|
1270
|
+
Generate HTML for the login prompt text *only*.
|
|
1271
|
+
The styled preview card will be prepended to this.
|
|
1272
|
+
"""
|
|
1273
|
+
return f"""
|
|
1274
|
+
<h2 style='color: #111827; margin-top:20px; border-top: 2px solid #e5e7eb; padding-top: 20px;'>🔐 Inicia sessió per enviar i classificar-te</h2>
|
|
1275
|
+
<div style='margin-top:16px; text-align:left; font-size:1rem; line-height:1.6; color:#374151;'>
|
|
1276
|
+
<p style='margin:12px 0;'>
|
|
1277
|
+
This is a preview run only. Sign in to publish your score to the live leaderboard,
|
|
1278
|
+
earn promotions, and contribute team points.
|
|
1279
|
+
</p>
|
|
1280
|
+
<p style='margin:12px 0;'>
|
|
1281
|
+
<strong>New user?</strong> Create a free account at
|
|
1282
|
+
<a href='https://www.modelshare.ai/login' target='_blank'
|
|
1283
|
+
style='color:#4f46e5; text-decoration:underline;'>modelshare.ai/login</a>
|
|
1284
|
+
</p>
|
|
1285
|
+
</div>
|
|
1286
|
+
"""
|
|
1287
|
+
# --- END OF FIX ---
|
|
1288
|
+
|
|
1289
|
+
def _build_kpi_card_html(new_score, last_score, new_rank, last_rank, submission_count, is_preview=False, is_pending=False, local_test_accuracy=None):
|
|
1290
|
+
"""Generates the HTML for the KPI feedback card. Supports preview mode label and pending state."""
|
|
1291
|
+
|
|
1292
|
+
# Handle pending state - show processing message with provisional diff
|
|
1293
|
+
if is_pending:
|
|
1294
|
+
title = "⏳ Processant l'enviament"
|
|
1295
|
+
acc_color = "#3b82f6" # Blue
|
|
1296
|
+
acc_text = f"{(local_test_accuracy * 100):.2f}%" if local_test_accuracy is not None else "N/A"
|
|
1297
|
+
|
|
1298
|
+
# Compute provisional diff between local (new) and last score
|
|
1299
|
+
if local_test_accuracy is not None and last_score is not None and last_score > 0:
|
|
1300
|
+
score_diff = local_test_accuracy - last_score
|
|
1301
|
+
if abs(score_diff) < 0.0001:
|
|
1302
|
+
acc_diff_html = "<p style='font-size: 1.5rem; font-weight: 600; color: #6b7280; margin:0;'>Sense canvis (↔) <span style='font-size: 0.9rem; color: #9ca3af;'>(Provisional)</span></p><p style='font-size: 1.2rem; font-weight: 500; color: #6b7280; margin:0; padding-top: 8px;'>Actualització de la classificació pendent...</p>"
|
|
1303
|
+
elif score_diff > 0:
|
|
1304
|
+
acc_diff_html = f"<p style='font-size: 1.5rem; font-weight: 600; color: #16a34a; margin:0;'>+{(score_diff * 100):.2f} (⬆️) <span style='font-size: 0.9rem; color: #9ca3af;'>(Provisional)</span></p><p style='font-size: 1.2rem; font-weight: 500; color: #6b7280; margin:0; padding-top: 8px;'>Actualització de la classificació pendent...</p>"
|
|
1305
|
+
else:
|
|
1306
|
+
acc_diff_html = f"<p style='font-size: 1.5rem; font-weight: 600; color: #ef4444; margin:0;'>{(score_diff * 100):.2f} (⬇️) <span style='font-size: 0.9rem; color: #9ca3af;'>(Provisional)</span></p><p style='font-size: 1.2rem; font-weight: 500; color: #6b7280; margin:0; padding-top: 8px;'>Actualització de la classificació pendent...</p>"
|
|
1307
|
+
else:
|
|
1308
|
+
# No last score available - just show pending message
|
|
1309
|
+
acc_diff_html = "<p style='font-size: 1.2rem; font-weight: 500; color: #6b7280; margin:0; padding-top: 8px;'>Pending leaderboard update...</p>"
|
|
1310
|
+
|
|
1311
|
+
border_color = acc_color
|
|
1312
|
+
rank_color = "#6b7280" # Gray
|
|
1313
|
+
rank_text = "Pendent"
|
|
1314
|
+
rank_diff_html = "<p style='font-size: 1.2rem; font-weight: 500; color: #6b7280; margin:0;'>Calculant la posició...</p>"
|
|
1315
|
+
|
|
1316
|
+
# Handle preview mode - Styled to match "success" card
|
|
1317
|
+
elif is_preview:
|
|
1318
|
+
title = "🔬 Prova de vista prèvia finalitzada!"
|
|
1319
|
+
acc_color = "#16a34a" # Green (like success)
|
|
1320
|
+
acc_text = f"{(new_score * 100):.2f}%" if new_score > 0 else "N/A"
|
|
1321
|
+
acc_diff_html = "<p style='font-size: 1.2rem; font-weight: 500; color: #6b7280; margin:0; padding-top: 8px;'>(Només vista prèvia - no s'ha enviat)</p>" # Neutral color
|
|
1322
|
+
border_color = acc_color # Green border
|
|
1323
|
+
rank_color = "#3b82f6" # Blue (like rank)
|
|
1324
|
+
rank_text = "N/A" # Placeholder
|
|
1325
|
+
rank_diff_html = "<p style='font-size: 1.2rem; font-weight: 500; color: #6b7280; margin:0;'>Sense posició (vista prèvia)</p>" # Neutral color
|
|
1326
|
+
|
|
1327
|
+
# 1. Handle First Submission
|
|
1328
|
+
elif submission_count == 0:
|
|
1329
|
+
title = "🎉 Primer model enviat!"
|
|
1330
|
+
acc_color = "#16a34a" # green
|
|
1331
|
+
acc_text = f"{(new_score * 100):.2f}%"
|
|
1332
|
+
acc_diff_html = "<p style='font-size: 1.2rem; font-weight: 500; color: #6b7280; margin:0; padding-top: 8px;'>(La teva primera puntuació!)</p>"
|
|
1333
|
+
|
|
1334
|
+
rank_color = "#3b82f6" # blue
|
|
1335
|
+
rank_text = f"#{new_rank}"
|
|
1336
|
+
rank_diff_html = "<p style='font-size: 1.5rem; font-weight: 600; color: #3b82f6; margin:0;'>¡Ja ets a la taula!</p>"
|
|
1337
|
+
border_color = acc_color
|
|
1338
|
+
|
|
1339
|
+
else:
|
|
1340
|
+
# 2. Handle Score Changes
|
|
1341
|
+
score_diff = new_score - last_score
|
|
1342
|
+
if abs(score_diff) < 0.0001:
|
|
1343
|
+
title = "✅ Enviament completat!"
|
|
1344
|
+
acc_color = "#6b7280" # gray
|
|
1345
|
+
acc_text = f"{(new_score * 100):.2f}%"
|
|
1346
|
+
acc_diff_html = f"<p style='font-size: 1.5rem; font-weight: 600; color: {acc_color}; margin:0;'>Sense canvis (↔)</p>"
|
|
1347
|
+
border_color = acc_color
|
|
1348
|
+
elif score_diff > 0:
|
|
1349
|
+
title = "✅ Enviament completat!"
|
|
1350
|
+
acc_color = "#16a34a" # green
|
|
1351
|
+
acc_text = f"{(new_score * 100):.2f}%"
|
|
1352
|
+
acc_diff_html = f"<p style='font-size: 1.5rem; font-weight: 600; color: {acc_color}; margin:0;'>+{(score_diff * 100):.2f} (⬆️)</p>"
|
|
1353
|
+
border_color = acc_color
|
|
1354
|
+
else:
|
|
1355
|
+
title = "📉 La puntuació ha baixat"
|
|
1356
|
+
acc_color = "#ef4444" # red
|
|
1357
|
+
acc_text = f"{(new_score * 100):.2f}%"
|
|
1358
|
+
acc_diff_html = f"<p style='font-size: 1.5rem; font-weight: 600; color: {acc_color}; margin:0;'>{(score_diff * 100):.2f} (⬇️)</p>"
|
|
1359
|
+
border_color = acc_color
|
|
1360
|
+
|
|
1361
|
+
# 3. Handle Rank Changes
|
|
1362
|
+
rank_diff = last_rank - new_rank
|
|
1363
|
+
rank_color = "#3b82f6" # blue
|
|
1364
|
+
rank_text = f"#{new_rank}"
|
|
1365
|
+
if last_rank == 0: # Handle first rank
|
|
1366
|
+
rank_diff_html = "<p style='font-size: 1.5rem; font-weight: 600; color: #3b82f6; margin:0;'>¡Ja ets a la taula!</p>"
|
|
1367
|
+
elif rank_diff > 0:
|
|
1368
|
+
rank_diff_html = f"<p style='font-size: 1.5rem; font-weight: 600; color: #16a34a; margin:0;'>🚀 ¡Has pujat {rank_diff} posició/ons!</p>"
|
|
1369
|
+
elif rank_diff < 0:
|
|
1370
|
+
rank_diff_html = f"<p style='font-size: 1.5rem; font-weight: 600; color: #ef4444; margin:0;'>🔻 Has baixat {abs(rank_diff)} posició/ons!</p>"
|
|
1371
|
+
else:
|
|
1372
|
+
rank_diff_html = f"<p style='font-size: 1.5rem; font-weight: 600; color: {rank_color}; margin:0;'>Mantens la teva posició (↔)</p>"
|
|
1373
|
+
|
|
1374
|
+
return f"""
|
|
1375
|
+
<div class='kpi-card' style='border-color: {border_color};'>
|
|
1376
|
+
<h2 style='color: #111827; margin-top:0;'>{title}</h2>
|
|
1377
|
+
<div class='kpi-card-body'>
|
|
1378
|
+
<div class='kpi-metric-box'>
|
|
1379
|
+
<p class='kpi-label'>Nova precisió</p>
|
|
1380
|
+
<p class='kpi-score' style='color: {acc_color};'>{acc_text}</p>
|
|
1381
|
+
{acc_diff_html}
|
|
1382
|
+
</div>
|
|
1383
|
+
<div class='kpi-metric-box'>
|
|
1384
|
+
<p class='kpi-label'>La teva posició</p>
|
|
1385
|
+
<p class='kpi-score' style='color: {rank_color};'>{rank_text}</p>
|
|
1386
|
+
{rank_diff_html}
|
|
1387
|
+
</div>
|
|
1388
|
+
</div>
|
|
1389
|
+
</div>
|
|
1390
|
+
"""
|
|
1391
|
+
|
|
1392
|
+
def _build_team_html(team_summary_df, team_name):
|
|
1393
|
+
"""
|
|
1394
|
+
Generates the HTML for the team leaderboard.
|
|
1395
|
+
|
|
1396
|
+
Uses normalized, case-insensitive comparison to highlight the user's team row,
|
|
1397
|
+
ensuring reliable highlighting even with whitespace or casing variations.
|
|
1398
|
+
|
|
1399
|
+
Team names are translated to Catalan for display only. Internal comparisons
|
|
1400
|
+
use the unmodified English team names from the DataFrame.
|
|
1401
|
+
"""
|
|
1402
|
+
if team_summary_df is None or team_summary_df.empty:
|
|
1403
|
+
return "<p style='text-align:center; color:#6b7280; padding-top:20px;'>Encara no hi ha enviaments per equips.</p>"
|
|
1404
|
+
|
|
1405
|
+
# Normalize the current user's team name for comparison (using English names)
|
|
1406
|
+
normalized_user_team = _normalize_team_name(team_name).lower()
|
|
1407
|
+
|
|
1408
|
+
header = """
|
|
1409
|
+
<table class='leaderboard-html-table'>
|
|
1410
|
+
<thead>
|
|
1411
|
+
<tr>
|
|
1412
|
+
<th>Posició</th>
|
|
1413
|
+
<th>Equip</th>
|
|
1414
|
+
<th>Millor Puntuació</th>
|
|
1415
|
+
<th>Mitjana</th>
|
|
1416
|
+
<th>Enviaments</th>
|
|
1417
|
+
</tr>
|
|
1418
|
+
</thead>
|
|
1419
|
+
<tbody>
|
|
1420
|
+
"""
|
|
1421
|
+
|
|
1422
|
+
body = ""
|
|
1423
|
+
for index, row in team_summary_df.iterrows():
|
|
1424
|
+
# Normalize the row's team name and compare case-insensitively (using English names)
|
|
1425
|
+
normalized_row_team = _normalize_team_name(row["Team"]).lower()
|
|
1426
|
+
is_user_team = normalized_row_team == normalized_user_team
|
|
1427
|
+
row_class = "class='user-row-highlight'" if is_user_team else ""
|
|
1428
|
+
|
|
1429
|
+
# Translate team name to Catalan for display only
|
|
1430
|
+
display_team_name = translate_team_name_for_display(row["Team"], UI_TEAM_LANG)
|
|
1431
|
+
|
|
1432
|
+
body += f"""
|
|
1433
|
+
<tr {row_class}>
|
|
1434
|
+
<td>{index}</td>
|
|
1435
|
+
<td>{display_team_name}</td>
|
|
1436
|
+
<td>{(row['Best_Score'] * 100):.2f}%</td>
|
|
1437
|
+
<td>{(row['Avg_Score'] * 100):.2f}%</td>
|
|
1438
|
+
<td>{row['Submissions']}</td>
|
|
1439
|
+
</tr>
|
|
1440
|
+
"""
|
|
1441
|
+
|
|
1442
|
+
footer = "</tbody></table>"
|
|
1443
|
+
return header + body + footer
|
|
1444
|
+
|
|
1445
|
+
def _build_individual_html(individual_summary_df, username):
|
|
1446
|
+
"""Generates the HTML for the individual leaderboard."""
|
|
1447
|
+
if individual_summary_df is None or individual_summary_df.empty:
|
|
1448
|
+
return "<p style='text-align:center; color:#6b7280; padding-top:20px;'>Encara no hi ha enviaments individuals.</p>"
|
|
1449
|
+
|
|
1450
|
+
header = """
|
|
1451
|
+
<table class='leaderboard-html-table'>
|
|
1452
|
+
<thead>
|
|
1453
|
+
<tr>
|
|
1454
|
+
<th>Posició</th>
|
|
1455
|
+
<th>Enginyer/a</th>
|
|
1456
|
+
<th>Millor Puntuació</th>
|
|
1457
|
+
<th>Enviaments</th>
|
|
1458
|
+
</tr>
|
|
1459
|
+
</thead>
|
|
1460
|
+
<tbody>
|
|
1461
|
+
"""
|
|
1462
|
+
|
|
1463
|
+
body = ""
|
|
1464
|
+
for index, row in individual_summary_df.iterrows():
|
|
1465
|
+
is_user = row["Engineer"] == username
|
|
1466
|
+
row_class = "class='user-row-highlight'" if is_user else ""
|
|
1467
|
+
body += f"""
|
|
1468
|
+
<tr {row_class}>
|
|
1469
|
+
<td>{index}</td>
|
|
1470
|
+
<td>{row['Engineer']}</td>
|
|
1471
|
+
<td>{(row['Best_Score'] * 100):.2f}%</td>
|
|
1472
|
+
<td>{row['Submissions']}</td>
|
|
1473
|
+
</tr>
|
|
1474
|
+
"""
|
|
1475
|
+
|
|
1476
|
+
footer = "</tbody></table>"
|
|
1477
|
+
return header + body + footer
|
|
1478
|
+
|
|
1479
|
+
|
|
1480
|
+
|
|
1481
|
+
|
|
1482
|
+
# --- End Helper Functions ---
|
|
1483
|
+
|
|
1484
|
+
|
|
1485
|
+
def generate_competitive_summary(leaderboard_df, team_name, username, last_submission_score, last_rank, submission_count):
|
|
1486
|
+
"""
|
|
1487
|
+
Build summaries, HTML, and KPI card.
|
|
1488
|
+
|
|
1489
|
+
Concurrency Note: Uses the team_name parameter directly for team highlighting,
|
|
1490
|
+
NOT os.environ, to prevent cross-user data leakage under concurrent requests.
|
|
1491
|
+
|
|
1492
|
+
Returns (team_html, individual_html, kpi_card_html, new_best_accuracy, new_rank, this_submission_score).
|
|
1493
|
+
"""
|
|
1494
|
+
team_summary_df = pd.DataFrame(columns=["Team", "Best_Score", "Avg_Score", "Submissions"])
|
|
1495
|
+
individual_summary_df = pd.DataFrame(columns=["Engineer", "Best_Score", "Submissions"])
|
|
1496
|
+
|
|
1497
|
+
if leaderboard_df is None or leaderboard_df.empty or "accuracy" not in leaderboard_df.columns:
|
|
1498
|
+
return (
|
|
1499
|
+
"<p style='text-align:center; color:#6b7280; padding-top:20px;'>La classificació està buida.</p>",
|
|
1500
|
+
"<p style='text-align:center; color:#6b7280; padding-top:20px;'>La classificació està buida.</p>",
|
|
1501
|
+
_build_kpi_card_html(0, 0, 0, 0, 0, is_preview=False, is_pending=False, local_test_accuracy=None),
|
|
1502
|
+
0.0, 0, 0.0
|
|
1503
|
+
)
|
|
1504
|
+
|
|
1505
|
+
# Team summary
|
|
1506
|
+
if "Team" in leaderboard_df.columns:
|
|
1507
|
+
team_summary_df = (
|
|
1508
|
+
leaderboard_df.groupby("Team")["accuracy"]
|
|
1509
|
+
.agg(Best_Score="max", Avg_Score="mean", Submissions="count")
|
|
1510
|
+
.reset_index()
|
|
1511
|
+
.sort_values("Best_Score", ascending=False)
|
|
1512
|
+
.reset_index(drop=True)
|
|
1513
|
+
)
|
|
1514
|
+
team_summary_df.index = team_summary_df.index + 1
|
|
1515
|
+
|
|
1516
|
+
# Individual summary
|
|
1517
|
+
user_bests = leaderboard_df.groupby("username")["accuracy"].max()
|
|
1518
|
+
user_counts = leaderboard_df.groupby("username")["accuracy"].count()
|
|
1519
|
+
individual_summary_df = pd.DataFrame(
|
|
1520
|
+
{"Engineer": user_bests.index, "Best_Score": user_bests.values, "Submissions": user_counts.values}
|
|
1521
|
+
).sort_values("Best_Score", ascending=False).reset_index(drop=True)
|
|
1522
|
+
individual_summary_df.index = individual_summary_df.index + 1
|
|
1523
|
+
|
|
1524
|
+
# Get stats for KPI card
|
|
1525
|
+
new_rank = 0
|
|
1526
|
+
new_best_accuracy = 0.0
|
|
1527
|
+
this_submission_score = 0.0
|
|
1528
|
+
|
|
1529
|
+
try:
|
|
1530
|
+
# All submissions for this user
|
|
1531
|
+
user_rows = leaderboard_df[leaderboard_df["username"] == username].copy()
|
|
1532
|
+
|
|
1533
|
+
if not user_rows.empty:
|
|
1534
|
+
# Attempt robust timestamp parsing
|
|
1535
|
+
if "timestamp" in user_rows.columns:
|
|
1536
|
+
parsed_ts = pd.to_datetime(user_rows["timestamp"], errors="coerce")
|
|
1537
|
+
|
|
1538
|
+
if parsed_ts.notna().any():
|
|
1539
|
+
# At least one valid timestamp → use parsed ordering
|
|
1540
|
+
user_rows["__parsed_ts"] = parsed_ts
|
|
1541
|
+
user_rows = user_rows.sort_values("__parsed_ts", ascending=False)
|
|
1542
|
+
this_submission_score = float(user_rows.iloc[0]["accuracy"])
|
|
1543
|
+
else:
|
|
1544
|
+
# All timestamps invalid → assume append order, take last as "latest"
|
|
1545
|
+
this_submission_score = float(user_rows.iloc[-1]["accuracy"])
|
|
1546
|
+
else:
|
|
1547
|
+
# No timestamp column → fallback to last row
|
|
1548
|
+
this_submission_score = float(user_rows.iloc[-1]["accuracy"])
|
|
1549
|
+
|
|
1550
|
+
# Rank & best accuracy (unchanged logic, but make sure we use the same best row)
|
|
1551
|
+
my_rank_row = None
|
|
1552
|
+
# Build individual summary before this block (already done above)
|
|
1553
|
+
my_rank_row = individual_summary_df[individual_summary_df["Engineer"] == username]
|
|
1554
|
+
if not my_rank_row.empty:
|
|
1555
|
+
new_rank = my_rank_row.index[0]
|
|
1556
|
+
new_best_accuracy = float(my_rank_row["Best_Score"].iloc[0])
|
|
1557
|
+
|
|
1558
|
+
except Exception as e:
|
|
1559
|
+
_log(f"Latest submission score extraction failed: {e}")
|
|
1560
|
+
|
|
1561
|
+
# Generate HTML outputs
|
|
1562
|
+
# Concurrency Note: Use team_name parameter directly, not os.environ
|
|
1563
|
+
team_html = _build_team_html(team_summary_df, team_name)
|
|
1564
|
+
individual_html = _build_individual_html(individual_summary_df, username)
|
|
1565
|
+
kpi_card_html = _build_kpi_card_html(
|
|
1566
|
+
this_submission_score, last_submission_score, new_rank, last_rank, submission_count,
|
|
1567
|
+
is_preview=False, is_pending=False, local_test_accuracy=None
|
|
1568
|
+
)
|
|
1569
|
+
|
|
1570
|
+
return team_html, individual_html, kpi_card_html, new_best_accuracy, new_rank, this_submission_score
|
|
1571
|
+
|
|
1572
|
+
|
|
1573
|
+
def get_model_card(model_name):
|
|
1574
|
+
return MODEL_TYPES.get(model_name, {}).get("card_ca", "Descripció no disponible.")
|
|
1575
|
+
|
|
1576
|
+
def compute_rank_settings(
|
|
1577
|
+
submission_count,
|
|
1578
|
+
current_model,
|
|
1579
|
+
current_complexity,
|
|
1580
|
+
current_feature_set,
|
|
1581
|
+
current_data_size
|
|
1582
|
+
):
|
|
1583
|
+
"""
|
|
1584
|
+
Returns rank gating settings (updated for 1–10 complexity scale).
|
|
1585
|
+
Adapted for Catalan UI: Returns Tuple choices [(Display, Value)]
|
|
1586
|
+
"""
|
|
1587
|
+
|
|
1588
|
+
# Helper to generate feature choices (unchanged logic)
|
|
1589
|
+
def get_choices_for_rank(rank):
|
|
1590
|
+
if rank == 0: # Trainee
|
|
1591
|
+
return [opt for opt in FEATURE_SET_ALL_OPTIONS if opt[1] in FEATURE_SET_GROUP_1_VALS]
|
|
1592
|
+
if rank == 1: # Junior
|
|
1593
|
+
return [opt for opt in FEATURE_SET_ALL_OPTIONS if opt[1] in (FEATURE_SET_GROUP_1_VALS + FEATURE_SET_GROUP_2_VALS)]
|
|
1594
|
+
return FEATURE_SET_ALL_OPTIONS # Senior+
|
|
1595
|
+
|
|
1596
|
+
# Helper to generate Model Radio Tuples [(Catalan, English)]
|
|
1597
|
+
def get_model_tuples(available_english_keys):
|
|
1598
|
+
# FIX: Use MODEL_DISPLAY_MAP
|
|
1599
|
+
return [(MODEL_DISPLAY_MAP[k], k) for k in available_english_keys if k in MODEL_DISPLAY_MAP]
|
|
1600
|
+
|
|
1601
|
+
# Rank 0: Trainee
|
|
1602
|
+
if submission_count == 0:
|
|
1603
|
+
avail_keys = ["The Balanced Generalist"]
|
|
1604
|
+
return {
|
|
1605
|
+
"rank_message": "# 🧑🎓 Rang: Enginyer/a en Pràctiques\n<p style='font-size:24px; line-height:1.4;'>Per al teu primer enviament, només cal que facis clic al botó gran '🔬 Construir i enviar el model' de sota!</p>",
|
|
1606
|
+
"model_choices": get_model_tuples(avail_keys),
|
|
1607
|
+
"model_value": "The Balanced Generalist",
|
|
1608
|
+
"model_interactive": False,
|
|
1609
|
+
"complexity_max": 3,
|
|
1610
|
+
"complexity_value": min(current_complexity, 3),
|
|
1611
|
+
"feature_set_choices": get_choices_for_rank(0),
|
|
1612
|
+
"feature_set_value": FEATURE_SET_GROUP_1_VALS,
|
|
1613
|
+
"feature_set_interactive": False,
|
|
1614
|
+
"data_size_choices": ["Petita (20%)"],
|
|
1615
|
+
"data_size_value": "Petita (20%)",
|
|
1616
|
+
"data_size_interactive": False,
|
|
1617
|
+
}
|
|
1618
|
+
|
|
1619
|
+
# Rank 1: Junior
|
|
1620
|
+
elif submission_count == 1:
|
|
1621
|
+
# Define available models for Rank 1 using ENGLISH keys
|
|
1622
|
+
avail_keys = ["The Balanced Generalist", "The Rule-Maker", "The 'Nearest Neighbor'"]
|
|
1623
|
+
|
|
1624
|
+
return {
|
|
1625
|
+
"rank_message": "# 🎉 Has pujat de nivell! Enginyer/a Júnior\n<p style='font-size:24px; line-height:1.4;'>Nous models, mides de dades i variables desbloquejats!</p>",
|
|
1626
|
+
"model_choices": get_model_tuples(avail_keys),
|
|
1627
|
+
# Ensure current selection is valid for this rank, else reset to default
|
|
1628
|
+
"model_value": current_model if current_model in avail_keys else "The Balanced Generalist",
|
|
1629
|
+
"model_interactive": True,
|
|
1630
|
+
"complexity_max": 6,
|
|
1631
|
+
"complexity_value": min(current_complexity, 6),
|
|
1632
|
+
"feature_set_choices": get_choices_for_rank(1),
|
|
1633
|
+
"feature_set_value": current_feature_set,
|
|
1634
|
+
"feature_set_interactive": True,
|
|
1635
|
+
"data_size_choices": ["Petita (20%)", "Mitjana (60%)"],
|
|
1636
|
+
"data_size_value": current_data_size if current_data_size in ["Petita (20%)", "Mitjana (60%)"] else "Petita (20%)",
|
|
1637
|
+
"data_size_interactive": True,
|
|
1638
|
+
}
|
|
1639
|
+
|
|
1640
|
+
# Rank 2: Senior
|
|
1641
|
+
elif submission_count == 2:
|
|
1642
|
+
avail_keys = list(MODEL_TYPES.keys()) # All models
|
|
1643
|
+
|
|
1644
|
+
return {
|
|
1645
|
+
"rank_message": "# 🌟 Has pujat de nivell! Enginyer/a Sènior\n<p style='font-size:24px; line-height:1.4;'>Variables més potents desbloquejades! Els predictors més forts (com 'Edat' i 'Nombre de delictes previs') ja estan disponibles a la teva llista. Probablement milloraran la teva precisió, però recorda que sovint comporten més biaixos socials.</p>",
|
|
1646
|
+
"model_choices": get_model_tuples(avail_keys),
|
|
1647
|
+
"model_value": current_model if current_model in avail_keys else "The Deep Pattern-Finder",
|
|
1648
|
+
"model_interactive": True,
|
|
1649
|
+
"complexity_max": 8,
|
|
1650
|
+
"complexity_value": min(current_complexity, 8),
|
|
1651
|
+
"feature_set_choices": get_choices_for_rank(2),
|
|
1652
|
+
"feature_set_value": current_feature_set,
|
|
1653
|
+
"feature_set_interactive": True,
|
|
1654
|
+
"data_size_choices": ["Petita (20%)", "Mitjana (60%)", "Gran (80%)", "Completa (100%)"],
|
|
1655
|
+
"data_size_value": current_data_size if current_data_size in DATA_SIZE_DB_MAP else "Petita (20%)",
|
|
1656
|
+
"data_size_interactive": True,
|
|
1657
|
+
}
|
|
1658
|
+
|
|
1659
|
+
# Rank 3+: Lead
|
|
1660
|
+
else:
|
|
1661
|
+
avail_keys = list(MODEL_TYPES.keys()) # All models
|
|
1662
|
+
|
|
1663
|
+
return {
|
|
1664
|
+
"rank_message": "# 👑 Rang: Enginyer/a Principal\n<p style='font-size:24px; line-height:1.4;'>Totes les eines desbloquejades — optimitza amb llibertat!</p>",
|
|
1665
|
+
"model_choices": get_model_tuples(avail_keys),
|
|
1666
|
+
"model_value": current_model if current_model in avail_keys else "The Balanced Generalist",
|
|
1667
|
+
"model_interactive": True,
|
|
1668
|
+
"complexity_max": 10,
|
|
1669
|
+
"complexity_value": current_complexity,
|
|
1670
|
+
"feature_set_choices": get_choices_for_rank(3),
|
|
1671
|
+
"feature_set_value": current_feature_set,
|
|
1672
|
+
"feature_set_interactive": True,
|
|
1673
|
+
"data_size_choices": ["Petita (20%)", "Mitjana (60%)", "Gran (80%)", "Completa (100%)"],
|
|
1674
|
+
"data_size_value": current_data_size if current_data_size in DATA_SIZE_DB_MAP else "Petita (20%)",
|
|
1675
|
+
"data_size_interactive": True,
|
|
1676
|
+
}
|
|
1677
|
+
# Find components by name to yield updates
|
|
1678
|
+
# --- Existing global component placeholders ---
|
|
1679
|
+
submit_button = None
|
|
1680
|
+
submission_feedback_display = None
|
|
1681
|
+
team_leaderboard_display = None
|
|
1682
|
+
individual_leaderboard_display = None
|
|
1683
|
+
last_submission_score_state = None
|
|
1684
|
+
last_rank_state = None
|
|
1685
|
+
best_score_state = None
|
|
1686
|
+
submission_count_state = None
|
|
1687
|
+
rank_message_display = None
|
|
1688
|
+
model_type_radio = None
|
|
1689
|
+
complexity_slider = None
|
|
1690
|
+
feature_set_checkbox = None
|
|
1691
|
+
data_size_radio = None
|
|
1692
|
+
attempts_tracker_display = None
|
|
1693
|
+
team_name_state = None
|
|
1694
|
+
# Login components
|
|
1695
|
+
login_username = None
|
|
1696
|
+
login_password = None
|
|
1697
|
+
login_submit = None
|
|
1698
|
+
login_error = None
|
|
1699
|
+
# Add missing placeholders for auth states (FIX)
|
|
1700
|
+
username_state = None
|
|
1701
|
+
token_state = None
|
|
1702
|
+
first_submission_score_state = None # (already commented as "will be assigned globally")
|
|
1703
|
+
# Add state placeholders for readiness gating and preview tracking
|
|
1704
|
+
readiness_state = None
|
|
1705
|
+
was_preview_state = None
|
|
1706
|
+
kpi_meta_state = None
|
|
1707
|
+
last_seen_ts_state = None # Track last seen user timestamp from leaderboard
|
|
1708
|
+
|
|
1709
|
+
|
|
1710
|
+
def get_or_assign_team(username, token=None):
|
|
1711
|
+
"""
|
|
1712
|
+
Get the existing team for a user from the leaderboard, or assign a new random team.
|
|
1713
|
+
|
|
1714
|
+
Queries the playground leaderboard to check if the user has prior submissions with
|
|
1715
|
+
a team assignment. If found, returns that team (most recent if multiple submissions).
|
|
1716
|
+
Otherwise assigns a random team. All team names are normalized for consistency.
|
|
1717
|
+
|
|
1718
|
+
Args:
|
|
1719
|
+
username: str, the username to check for existing team
|
|
1720
|
+
token: str, optional authentication token for leaderboard fetch
|
|
1721
|
+
|
|
1722
|
+
Returns:
|
|
1723
|
+
tuple: (team_name: str, is_new: bool)
|
|
1724
|
+
- team_name: The normalized team name (existing or newly assigned)
|
|
1725
|
+
- is_new: True if newly assigned, False if existing team recovered
|
|
1726
|
+
"""
|
|
1727
|
+
try:
|
|
1728
|
+
# Query the leaderboard
|
|
1729
|
+
if playground is None:
|
|
1730
|
+
# Fallback to random assignment if playground not available
|
|
1731
|
+
print("Playground not available, assigning random team")
|
|
1732
|
+
new_team = _normalize_team_name(random.choice(TEAM_NAMES))
|
|
1733
|
+
return new_team, True
|
|
1734
|
+
|
|
1735
|
+
# Use centralized helper for authenticated leaderboard fetch
|
|
1736
|
+
leaderboard_df = _get_leaderboard_with_optional_token(playground, token)
|
|
1737
|
+
|
|
1738
|
+
# Check if leaderboard has data and Team column
|
|
1739
|
+
if leaderboard_df is not None and not leaderboard_df.empty and "Team" in leaderboard_df.columns:
|
|
1740
|
+
# Filter for this user's submissions
|
|
1741
|
+
user_submissions = leaderboard_df[leaderboard_df["username"] == username]
|
|
1742
|
+
|
|
1743
|
+
if not user_submissions.empty:
|
|
1744
|
+
# Sort by timestamp (most recent first) if timestamp column exists
|
|
1745
|
+
# Use contextlib.suppress for resilient timestamp parsing
|
|
1746
|
+
if "timestamp" in user_submissions.columns:
|
|
1747
|
+
try:
|
|
1748
|
+
# Attempt to coerce timestamp column to datetime and sort descending
|
|
1749
|
+
user_submissions = user_submissions.copy()
|
|
1750
|
+
user_submissions["timestamp"] = pd.to_datetime(user_submissions["timestamp"], errors='coerce')
|
|
1751
|
+
user_submissions = user_submissions.sort_values("timestamp", ascending=False)
|
|
1752
|
+
print(f"Sorted {len(user_submissions)} submissions by timestamp for {username}")
|
|
1753
|
+
except Exception as ts_error:
|
|
1754
|
+
# If timestamp parsing fails, continue with unsorted DataFrame
|
|
1755
|
+
print(f"Warning: Could not sort by timestamp for {username}: {ts_error}")
|
|
1756
|
+
|
|
1757
|
+
# Get the most recent team assignment (first row after sorting)
|
|
1758
|
+
existing_team = user_submissions.iloc[0]["Team"]
|
|
1759
|
+
|
|
1760
|
+
# Check if team value is valid (not null/empty)
|
|
1761
|
+
if pd.notna(existing_team) and existing_team and str(existing_team).strip():
|
|
1762
|
+
normalized_team = _normalize_team_name(existing_team)
|
|
1763
|
+
print(f"Found existing team for {username}: {normalized_team}")
|
|
1764
|
+
return normalized_team, False
|
|
1765
|
+
|
|
1766
|
+
# No existing team found - assign random
|
|
1767
|
+
new_team = _normalize_team_name(random.choice(TEAM_NAMES))
|
|
1768
|
+
print(f"Assigning new team to {username}: {new_team}")
|
|
1769
|
+
return new_team, True
|
|
1770
|
+
|
|
1771
|
+
except Exception as e:
|
|
1772
|
+
# On any error, fall back to random assignment
|
|
1773
|
+
print(f"Error checking leaderboard for team: {e}")
|
|
1774
|
+
new_team = _normalize_team_name(random.choice(TEAM_NAMES))
|
|
1775
|
+
print(f"Fallback: assigning random team to {username}: {new_team}")
|
|
1776
|
+
return new_team, True
|
|
1777
|
+
|
|
1778
|
+
def perform_inline_login(username_input, password_input):
|
|
1779
|
+
"""
|
|
1780
|
+
Perform inline authentication and return credentials via gr.State updates.
|
|
1781
|
+
|
|
1782
|
+
Concurrency Note: This function NO LONGER stores per-user credentials in
|
|
1783
|
+
os.environ to prevent cross-user data leakage. Authentication state is
|
|
1784
|
+
returned exclusively via gr.State updates (username_state, token_state,
|
|
1785
|
+
team_name_state). Password is never stored server-side.
|
|
1786
|
+
|
|
1787
|
+
Args:
|
|
1788
|
+
username_input: str, the username entered by user
|
|
1789
|
+
password_input: str, the password entered by user
|
|
1790
|
+
|
|
1791
|
+
Returns:
|
|
1792
|
+
dict: Gradio component updates for login UI elements and submit button
|
|
1793
|
+
- On success: hides login form, shows success message, enables submit
|
|
1794
|
+
- On failure: keeps login form visible, shows error with signup link
|
|
1795
|
+
"""
|
|
1796
|
+
from aimodelshare.aws import get_aws_token
|
|
1797
|
+
|
|
1798
|
+
# Validate inputs
|
|
1799
|
+
if not username_input or not username_input.strip():
|
|
1800
|
+
error_html = """
|
|
1801
|
+
<div style='background:#fef2f2; padding:12px; border-radius:8px; border-left:4px solid #ef4444; margin-top:12px;'>
|
|
1802
|
+
<p style='margin:0; color:#991b1b; font-weight:500;'>⚠️ Username is required</p>
|
|
1803
|
+
</div>
|
|
1804
|
+
"""
|
|
1805
|
+
return {
|
|
1806
|
+
login_username: gr.update(),
|
|
1807
|
+
login_password: gr.update(),
|
|
1808
|
+
login_submit: gr.update(),
|
|
1809
|
+
login_error: gr.update(value=error_html, visible=True),
|
|
1810
|
+
submit_button: gr.update(),
|
|
1811
|
+
submission_feedback_display: gr.update(),
|
|
1812
|
+
team_name_state: gr.update(),
|
|
1813
|
+
username_state: gr.update(),
|
|
1814
|
+
token_state: gr.update()
|
|
1815
|
+
}
|
|
1816
|
+
|
|
1817
|
+
if not password_input or not password_input.strip():
|
|
1818
|
+
error_html = """
|
|
1819
|
+
<div style='background:#fef2f2; padding:12px; border-radius:8px; border-left:4px solid #ef4444; margin-top:12px;'>
|
|
1820
|
+
<p style='margin:0; color:#991b1b; font-weight:500;'>⚠️ Password is required</p>
|
|
1821
|
+
</div>
|
|
1822
|
+
"""
|
|
1823
|
+
return {
|
|
1824
|
+
login_username: gr.update(),
|
|
1825
|
+
login_password: gr.update(),
|
|
1826
|
+
login_submit: gr.update(),
|
|
1827
|
+
login_error: gr.update(value=error_html, visible=True),
|
|
1828
|
+
submit_button: gr.update(),
|
|
1829
|
+
submission_feedback_display: gr.update(),
|
|
1830
|
+
team_name_state: gr.update(),
|
|
1831
|
+
username_state: gr.update(),
|
|
1832
|
+
token_state: gr.update()
|
|
1833
|
+
}
|
|
1834
|
+
|
|
1835
|
+
# Concurrency Note: get_aws_token() reads credentials from os.environ, which creates
|
|
1836
|
+
# a race condition in multi-threaded environments. We use _auth_lock to serialize
|
|
1837
|
+
# credential injection, preventing concurrent requests from seeing each other's
|
|
1838
|
+
# credentials. The password is immediately cleared after the auth attempt.
|
|
1839
|
+
#
|
|
1840
|
+
# FUTURE: Ideally get_aws_token() would be refactored to accept credentials as
|
|
1841
|
+
# parameters instead of reading from os.environ. This lock is a workaround.
|
|
1842
|
+
username_clean = username_input.strip()
|
|
1843
|
+
|
|
1844
|
+
# Attempt to get AWS token with serialized credential injection
|
|
1845
|
+
try:
|
|
1846
|
+
with _auth_lock:
|
|
1847
|
+
os.environ["username"] = username_clean
|
|
1848
|
+
os.environ["password"] = password_input.strip() # Only for get_aws_token() call
|
|
1849
|
+
try:
|
|
1850
|
+
token = get_aws_token()
|
|
1851
|
+
finally:
|
|
1852
|
+
# SECURITY: Always clear credentials from environment, even on exception
|
|
1853
|
+
# Also clear stale env vars from previous implementations within the lock
|
|
1854
|
+
# to prevent any race conditions during cleanup
|
|
1855
|
+
os.environ.pop("password", None)
|
|
1856
|
+
os.environ.pop("username", None)
|
|
1857
|
+
os.environ.pop("AWS_TOKEN", None)
|
|
1858
|
+
os.environ.pop("TEAM_NAME", None)
|
|
1859
|
+
|
|
1860
|
+
# Get or assign team for this user with explicit token (already normalized by get_or_assign_team)
|
|
1861
|
+
team_name, is_new_team = get_or_assign_team(username_clean, token=token)
|
|
1862
|
+
# Normalize team name before storing (defensive - already normalized by get_or_assign_team)
|
|
1863
|
+
team_name = _normalize_team_name(team_name)
|
|
1864
|
+
|
|
1865
|
+
# Translate team name for display only (keep team_name_state in English)
|
|
1866
|
+
display_team_name = translate_team_name_for_display(team_name, UI_TEAM_LANG)
|
|
1867
|
+
|
|
1868
|
+
# Build success message based on whether team is new or existing
|
|
1869
|
+
if is_new_team:
|
|
1870
|
+
team_message = f"T'hem assignat a un nou equip: <b>{display_team_name}</b> 🎉"
|
|
1871
|
+
else:
|
|
1872
|
+
team_message = f"Hola de nou! Continues a l'equip: <b>{display_team_name}</b> ✅"
|
|
1873
|
+
|
|
1874
|
+
# Success: hide login form, show success message with team info, enable submit button
|
|
1875
|
+
success_html = f"""
|
|
1876
|
+
<div style='background:#f0fdf4; padding:16px; border-radius:8px; border-left:4px solid #16a34a; margin-top:12px;'>
|
|
1877
|
+
<p style='margin:0; color:#15803d; font-weight:600; font-size:1.1rem;'>✓ Signed in successfully!</p>
|
|
1878
|
+
<p style='margin:8px 0 0 0; color:#166534; font-size:0.95rem;'>
|
|
1879
|
+
{team_message}
|
|
1880
|
+
</p>
|
|
1881
|
+
<p style='margin:8px 0 0 0; color:#166534; font-size:0.95rem;'>
|
|
1882
|
+
Click "Build & Submit Model" again to publish your score.
|
|
1883
|
+
</p>
|
|
1884
|
+
</div>
|
|
1885
|
+
"""
|
|
1886
|
+
return {
|
|
1887
|
+
login_username: gr.update(visible=False),
|
|
1888
|
+
login_password: gr.update(visible=False),
|
|
1889
|
+
login_submit: gr.update(visible=False),
|
|
1890
|
+
login_error: gr.update(value=success_html, visible=True),
|
|
1891
|
+
submit_button: gr.update(value="🔬 Build & Submit Model", interactive=True),
|
|
1892
|
+
submission_feedback_display: gr.update(visible=False),
|
|
1893
|
+
team_name_state: gr.update(value=team_name),
|
|
1894
|
+
username_state: gr.update(value=username_clean),
|
|
1895
|
+
token_state: gr.update(value=token)
|
|
1896
|
+
}
|
|
1897
|
+
|
|
1898
|
+
except Exception as e:
|
|
1899
|
+
# Note: Credentials are already cleaned up by the finally block in the try above.
|
|
1900
|
+
# The lock ensures no race condition during cleanup.
|
|
1901
|
+
|
|
1902
|
+
# Authentication failed: show error with signup link
|
|
1903
|
+
error_html = f"""
|
|
1904
|
+
<div style='background:#fef2f2; padding:16px; border-radius:8px; border-left:4px solid #ef4444; margin-top:12px;'>
|
|
1905
|
+
<p style='margin:0; color:#991b1b; font-weight:600; font-size:1.1rem;'>⚠️ Authentication failed</p>
|
|
1906
|
+
<p style='margin:8px 0; color:#7f1d1d; font-size:0.95rem;'>
|
|
1907
|
+
Could not verify your credentials. Please check your username and password.
|
|
1908
|
+
</p>
|
|
1909
|
+
<p style='margin:8px 0 0 0; color:#7f1d1d; font-size:0.95rem;'>
|
|
1910
|
+
<strong>New user?</strong> Create a free account at
|
|
1911
|
+
<a href='https://www.modelshare.ai/login' target='_blank'
|
|
1912
|
+
style='color:#dc2626; text-decoration:underline;'>modelshare.ai/login</a>
|
|
1913
|
+
</p>
|
|
1914
|
+
<details style='margin-top:12px; font-size:0.85rem; color:#7f1d1d;'>
|
|
1915
|
+
<summary style='cursor:pointer;'>Technical details</summary>
|
|
1916
|
+
<pre style='margin-top:8px; padding:8px; background:#fee; border-radius:4px; overflow-x:auto;'>{str(e)}</pre>
|
|
1917
|
+
</details>
|
|
1918
|
+
</div>
|
|
1919
|
+
"""
|
|
1920
|
+
return {
|
|
1921
|
+
login_username: gr.update(visible=True),
|
|
1922
|
+
login_password: gr.update(visible=True),
|
|
1923
|
+
login_submit: gr.update(visible=True),
|
|
1924
|
+
login_error: gr.update(value=error_html, visible=True),
|
|
1925
|
+
submit_button: gr.update(),
|
|
1926
|
+
submission_feedback_display: gr.update(),
|
|
1927
|
+
team_name_state: gr.update(),
|
|
1928
|
+
username_state: gr.update(),
|
|
1929
|
+
token_state: gr.update()
|
|
1930
|
+
}
|
|
1931
|
+
|
|
1932
|
+
def run_experiment(
|
|
1933
|
+
model_name_key, # Recieves ENGLISH KEY (e.g., "The Balanced Generalist") from the updated Radio Tuples
|
|
1934
|
+
complexity_level,
|
|
1935
|
+
feature_set,
|
|
1936
|
+
data_size_str, # Recieves CATALAN LABEL (e.g., "Petita (20%)")
|
|
1937
|
+
team_name,
|
|
1938
|
+
last_submission_score,
|
|
1939
|
+
last_rank,
|
|
1940
|
+
submission_count,
|
|
1941
|
+
first_submission_score,
|
|
1942
|
+
best_score,
|
|
1943
|
+
username=None,
|
|
1944
|
+
token=None,
|
|
1945
|
+
readiness_flag=None,
|
|
1946
|
+
was_preview_prev=None,
|
|
1947
|
+
progress=gr.Progress()
|
|
1948
|
+
):
|
|
1949
|
+
"""
|
|
1950
|
+
Core experiment: Uses 'yield' for visual updates and progress bar.
|
|
1951
|
+
Updated to translate Catalan inputs to English keys for Cache/DB lookup.
|
|
1952
|
+
"""
|
|
1953
|
+
# --- COLLISION GUARDS ---
|
|
1954
|
+
# Log types of potentially shadowed names to ensure they refer to component objects, not dicts
|
|
1955
|
+
_log(f"DEBUG guard: types — submit_button={type(submit_button)} submission_feedback_display={type(submission_feedback_display)} kpi_meta_state={type(kpi_meta_state)} was_preview_state={type(was_preview_state)} readiness_flag_param={type(readiness_flag)}")
|
|
1956
|
+
|
|
1957
|
+
# If any of the component names are found as dicts (indicating parameter shadowing), short-circuit
|
|
1958
|
+
if isinstance(submit_button, dict) or isinstance(submission_feedback_display, dict) or isinstance(kpi_meta_state, dict) or isinstance(was_preview_state, dict):
|
|
1959
|
+
error_html = """
|
|
1960
|
+
<div class='kpi-card' style='border-color: #ef4444;'>
|
|
1961
|
+
<h2 style='color: #111827; margin-top:0;'>⚠️ Configuration Error</h2>
|
|
1962
|
+
<div class='kpi-card-body'>
|
|
1963
|
+
<p style='color: #991b1b;'>Parameter shadowing detected. Global component variables were shadowed by local parameters.</p>
|
|
1964
|
+
<p style='color: #7f1d1d; margin-top: 8px;'>Please refresh the page and try again. If the issue persists, contact support.</p>
|
|
1965
|
+
</div>
|
|
1966
|
+
</div>
|
|
1967
|
+
"""
|
|
1968
|
+
yield {
|
|
1969
|
+
submission_feedback_display: gr.update(value=error_html, visible=True),
|
|
1970
|
+
submit_button: gr.update(value="🔬 Build & Submit Model", interactive=True)
|
|
1971
|
+
}
|
|
1972
|
+
return
|
|
1973
|
+
|
|
1974
|
+
# --- TRANSLATION LOGIC (THE FIX) ---
|
|
1975
|
+
# 1. Translate Data Size to English for DB/Cache Lookup
|
|
1976
|
+
# Example: "Petita (20%)" -> "Small (20%)"
|
|
1977
|
+
# Fallback to the input string if not found in map
|
|
1978
|
+
db_data_size = DATA_SIZE_DB_MAP.get(data_size_str, "Small (20%)")
|
|
1979
|
+
|
|
1980
|
+
# Sanitize feature_set: convert dicts/tuples to their string values
|
|
1981
|
+
sanitized_feature_set = []
|
|
1982
|
+
for feat in (feature_set or []):
|
|
1983
|
+
if isinstance(feat, dict):
|
|
1984
|
+
sanitized_feature_set.append(feat.get("value", str(feat)))
|
|
1985
|
+
elif isinstance(feat, tuple):
|
|
1986
|
+
sanitized_feature_set.append(feat[1] if len(feat) > 1 else str(feat))
|
|
1987
|
+
else:
|
|
1988
|
+
sanitized_feature_set.append(str(feat))
|
|
1989
|
+
feature_set = sanitized_feature_set
|
|
1990
|
+
|
|
1991
|
+
# Use readiness_flag parameter if provided, otherwise check readiness
|
|
1992
|
+
if readiness_flag is not None:
|
|
1993
|
+
ready = readiness_flag
|
|
1994
|
+
else:
|
|
1995
|
+
ready = _is_ready()
|
|
1996
|
+
_log(f"run_experiment: ready={ready}, username={username}, token_present={token is not None}")
|
|
1997
|
+
|
|
1998
|
+
# Default to "Unknown_User" only if no username provided via state.
|
|
1999
|
+
if not username:
|
|
2000
|
+
username = "Unknown_User"
|
|
2001
|
+
|
|
2002
|
+
# Helper to generate the animated HTML
|
|
2003
|
+
def get_status_html(step_num, title, subtitle):
|
|
2004
|
+
return f"""
|
|
2005
|
+
<div class='processing-status'>
|
|
2006
|
+
<span class='processing-icon'>⚙️</span>
|
|
2007
|
+
<div class='processing-text'>Step {step_num}/5: {title}</div>
|
|
2008
|
+
<div class='processing-subtext'>{subtitle}</div>
|
|
2009
|
+
</div>
|
|
2010
|
+
"""
|
|
2011
|
+
|
|
2012
|
+
# --- Stage 1: Lock UI and give initial feedback ---
|
|
2013
|
+
progress(0.1, desc="Iniciant l'experiment...")
|
|
2014
|
+
initial_updates = {
|
|
2015
|
+
submit_button: gr.update(value="⏳ Experiment en curs...", interactive=False),
|
|
2016
|
+
submission_feedback_display: gr.update(value=get_status_html(1, "Iniciant", "Preparant les variables de dades..."), visible=True), # Make sure it's visible
|
|
2017
|
+
login_error: gr.update(visible=False), # Hide login success/error message
|
|
2018
|
+
attempts_tracker_display: gr.update(value=_build_attempts_tracker_html(submission_count))
|
|
2019
|
+
}
|
|
2020
|
+
yield initial_updates
|
|
2021
|
+
|
|
2022
|
+
# Ensure model key is valid (It should already be English from the Radio Tuple)
|
|
2023
|
+
if not model_name_key or model_name_key not in MODEL_TYPES:
|
|
2024
|
+
model_name_key = DEFAULT_MODEL
|
|
2025
|
+
|
|
2026
|
+
complexity_level = safe_int(complexity_level, 2)
|
|
2027
|
+
|
|
2028
|
+
log_output = f"▶ New Experiment\nModel: {model_name_key}\n..."
|
|
2029
|
+
|
|
2030
|
+
# Check readiness
|
|
2031
|
+
with INIT_LOCK:
|
|
2032
|
+
flags = INIT_FLAGS.copy()
|
|
2033
|
+
|
|
2034
|
+
# Normalize variable name for consistency
|
|
2035
|
+
ready_for_submission = ready
|
|
2036
|
+
|
|
2037
|
+
# If not ready but warm mini available, run preview
|
|
2038
|
+
if not ready_for_submission and flags["warm_mini"] and X_TRAIN_WARM is not None:
|
|
2039
|
+
_log("Running warm mini preview (not ready yet)")
|
|
2040
|
+
progress(0.5, desc="Executant la vista prèvia...")
|
|
2041
|
+
yield {
|
|
2042
|
+
submission_feedback_display: gr.update(value=get_status_html("Vista prèvia", "Prova d'escalfament", "Provant amb un conjunt de dades reduït..."), visible=True),
|
|
2043
|
+
login_error: gr.update(visible=False)
|
|
2044
|
+
}
|
|
2045
|
+
|
|
2046
|
+
try:
|
|
2047
|
+
# Run preview on warm mini dataset
|
|
2048
|
+
numeric_cols = [f for f in feature_set if f in ALL_NUMERIC_COLS]
|
|
2049
|
+
categorical_cols = [f for f in feature_set if f in ALL_CATEGORICAL_COLS]
|
|
2050
|
+
|
|
2051
|
+
if not numeric_cols and not categorical_cols:
|
|
2052
|
+
raise ValueError("No features selected for modeling.")
|
|
2053
|
+
|
|
2054
|
+
# Quick preprocessing and training on warm mini (uses memoized preprocessor)
|
|
2055
|
+
preprocessor, selected_cols = build_preprocessor(numeric_cols, categorical_cols)
|
|
2056
|
+
|
|
2057
|
+
X_warm_processed = preprocessor.fit_transform(X_TRAIN_WARM[selected_cols])
|
|
2058
|
+
X_test_processed = preprocessor.transform(X_TEST_RAW[selected_cols])
|
|
2059
|
+
|
|
2060
|
+
base_model = MODEL_TYPES[model_name_key]["model_builder"]()
|
|
2061
|
+
tuned_model = tune_model_complexity(base_model, complexity_level)
|
|
2062
|
+
|
|
2063
|
+
# Handle sparse arrays for models that require dense input
|
|
2064
|
+
if isinstance(tuned_model, (DecisionTreeClassifier, RandomForestClassifier)):
|
|
2065
|
+
X_warm_for_fit = _ensure_dense(X_warm_processed)
|
|
2066
|
+
X_test_for_predict = _ensure_dense(X_test_processed)
|
|
2067
|
+
else:
|
|
2068
|
+
X_warm_for_fit = X_warm_processed
|
|
2069
|
+
X_test_for_predict = X_test_processed
|
|
2070
|
+
|
|
2071
|
+
tuned_model.fit(X_warm_for_fit, Y_TRAIN_WARM)
|
|
2072
|
+
|
|
2073
|
+
# Get preview score
|
|
2074
|
+
from sklearn.metrics import accuracy_score
|
|
2075
|
+
predictions = tuned_model.predict(X_test_for_predict)
|
|
2076
|
+
preview_score = accuracy_score(Y_TEST, predictions)
|
|
2077
|
+
|
|
2078
|
+
# Update metadata state
|
|
2079
|
+
new_kpi_meta = {
|
|
2080
|
+
"was_preview": True,
|
|
2081
|
+
"preview_score": preview_score,
|
|
2082
|
+
"ready_at_run_start": False,
|
|
2083
|
+
"poll_iterations": 0,
|
|
2084
|
+
"local_test_accuracy": preview_score,
|
|
2085
|
+
"this_submission_score": None,
|
|
2086
|
+
"new_best_accuracy": None,
|
|
2087
|
+
"rank": None
|
|
2088
|
+
}
|
|
2089
|
+
|
|
2090
|
+
# Show preview card
|
|
2091
|
+
preview_html = _build_kpi_card_html(
|
|
2092
|
+
preview_score, 0, 0, 0, -1,
|
|
2093
|
+
is_preview=True, is_pending=False, local_test_accuracy=None
|
|
2094
|
+
)
|
|
2095
|
+
|
|
2096
|
+
settings = compute_rank_settings(
|
|
2097
|
+
submission_count, model_name_key, complexity_level, feature_set, data_size_str
|
|
2098
|
+
)
|
|
2099
|
+
|
|
2100
|
+
final_updates = {
|
|
2101
|
+
submission_feedback_display: gr.update(value=preview_html, visible=True),
|
|
2102
|
+
team_leaderboard_display: _build_skeleton_leaderboard(rows=6, is_team=True),
|
|
2103
|
+
individual_leaderboard_display: _build_skeleton_leaderboard(rows=6, is_team=False),
|
|
2104
|
+
last_submission_score_state: last_submission_score,
|
|
2105
|
+
last_rank_state: last_rank,
|
|
2106
|
+
best_score_state: best_score,
|
|
2107
|
+
submission_count_state: submission_count,
|
|
2108
|
+
first_submission_score_state: first_submission_score,
|
|
2109
|
+
rank_message_display: settings["rank_message"],
|
|
2110
|
+
model_type_radio: gr.update(choices=settings["model_choices"], value=settings["model_value"], interactive=settings["model_interactive"]),
|
|
2111
|
+
complexity_slider: gr.update(minimum=1, maximum=settings["complexity_max"], value=settings["complexity_value"]),
|
|
2112
|
+
feature_set_checkbox: gr.update(choices=settings["feature_set_choices"], value=settings["feature_set_value"], interactive=settings["feature_set_interactive"]),
|
|
2113
|
+
data_size_radio: gr.update(choices=settings["data_size_choices"], value=settings["data_size_value"], interactive=settings["data_size_interactive"]),
|
|
2114
|
+
submit_button: gr.update(value="🔬 Build & Submit Model", interactive=True),
|
|
2115
|
+
login_username: gr.update(visible=False),
|
|
2116
|
+
login_password: gr.update(visible=False),
|
|
2117
|
+
login_submit: gr.update(visible=False),
|
|
2118
|
+
login_error: gr.update(visible=False),
|
|
2119
|
+
attempts_tracker_display: gr.update(value=_build_attempts_tracker_html(submission_count)),
|
|
2120
|
+
was_preview_state: True,
|
|
2121
|
+
kpi_meta_state: new_kpi_meta,
|
|
2122
|
+
last_seen_ts_state: None # No timestamp for preview
|
|
2123
|
+
}
|
|
2124
|
+
yield final_updates
|
|
2125
|
+
return
|
|
2126
|
+
|
|
2127
|
+
except Exception as e:
|
|
2128
|
+
_log(f"Preview failed: {e}")
|
|
2129
|
+
# Fall through to error handling
|
|
2130
|
+
|
|
2131
|
+
if playground is None or not ready_for_submission:
|
|
2132
|
+
settings = compute_rank_settings(
|
|
2133
|
+
submission_count, model_name_key, complexity_level, feature_set, data_size_str
|
|
2134
|
+
)
|
|
2135
|
+
|
|
2136
|
+
error_msg = "<p style='text-align:center; color:red; padding:20px 0;'>"
|
|
2137
|
+
if playground is None:
|
|
2138
|
+
error_msg += "Playground not connected. Please try again later."
|
|
2139
|
+
else:
|
|
2140
|
+
error_msg += "Data still initializing. Please wait a moment and try again."
|
|
2141
|
+
error_msg += "</p>"
|
|
2142
|
+
|
|
2143
|
+
error_kpi_meta = {
|
|
2144
|
+
"was_preview": False,
|
|
2145
|
+
"preview_score": None,
|
|
2146
|
+
"ready_at_run_start": False,
|
|
2147
|
+
"poll_iterations": 0,
|
|
2148
|
+
"local_test_accuracy": None,
|
|
2149
|
+
"this_submission_score": None,
|
|
2150
|
+
"new_best_accuracy": None,
|
|
2151
|
+
"rank": None
|
|
2152
|
+
}
|
|
2153
|
+
|
|
2154
|
+
error_updates = {
|
|
2155
|
+
submission_feedback_display: gr.update(value=error_msg, visible=True),
|
|
2156
|
+
submit_button: gr.update(value="🔬 Build & Submit Model", interactive=True),
|
|
2157
|
+
team_leaderboard_display: _build_skeleton_leaderboard(rows=6, is_team=True),
|
|
2158
|
+
individual_leaderboard_display: _build_skeleton_leaderboard(rows=6, is_team=False),
|
|
2159
|
+
last_submission_score_state: last_submission_score,
|
|
2160
|
+
last_rank_state: last_rank,
|
|
2161
|
+
best_score_state: best_score,
|
|
2162
|
+
submission_count_state: submission_count,
|
|
2163
|
+
first_submission_score_state: first_submission_score,
|
|
2164
|
+
rank_message_display: settings["rank_message"],
|
|
2165
|
+
model_type_radio: gr.update(choices=settings["model_choices"], value=settings["model_value"], interactive=settings["model_interactive"]),
|
|
2166
|
+
complexity_slider: gr.update(minimum=1, maximum=settings["complexity_max"], value=settings["complexity_value"]),
|
|
2167
|
+
feature_set_checkbox: gr.update(choices=settings["feature_set_choices"], value=settings["feature_set_value"], interactive=settings["feature_set_interactive"]),
|
|
2168
|
+
data_size_radio: gr.update(choices=settings["data_size_choices"], value=settings["data_size_value"], interactive=settings["data_size_interactive"]),
|
|
2169
|
+
login_username: gr.update(visible=False),
|
|
2170
|
+
login_password: gr.update(visible=False),
|
|
2171
|
+
login_submit: gr.update(visible=False),
|
|
2172
|
+
login_error: gr.update(visible=False),
|
|
2173
|
+
attempts_tracker_display: gr.update(value=_build_attempts_tracker_html(submission_count)),
|
|
2174
|
+
was_preview_state: False,
|
|
2175
|
+
kpi_meta_state: error_kpi_meta,
|
|
2176
|
+
last_seen_ts_state: None
|
|
2177
|
+
}
|
|
2178
|
+
yield error_updates
|
|
2179
|
+
return
|
|
2180
|
+
|
|
2181
|
+
try:
|
|
2182
|
+
# --- Stage 2: Smart Build (Cache vs Train) ---
|
|
2183
|
+
progress(0.3, desc="Building Model...")
|
|
2184
|
+
|
|
2185
|
+
# 1. Generate Cache Key (Matches format in precompute_cache.py)
|
|
2186
|
+
# Uses ENGLISH keys: "The Balanced Generalist|2|Small (20%)|age,race"
|
|
2187
|
+
sanitized_features = sorted([str(f) for f in feature_set])
|
|
2188
|
+
feature_key = ",".join(sanitized_features)
|
|
2189
|
+
|
|
2190
|
+
cache_key = f"{model_name_key}|{complexity_level}|{db_data_size}|{feature_key}"
|
|
2191
|
+
|
|
2192
|
+
_log(f"Generated Key: {cache_key}") # Debug Log
|
|
2193
|
+
|
|
2194
|
+
# 2. Check Cache
|
|
2195
|
+
cached_predictions = get_cached_prediction(cache_key)
|
|
2196
|
+
|
|
2197
|
+
# Initialize submission variables
|
|
2198
|
+
predictions = None
|
|
2199
|
+
tuned_model = None
|
|
2200
|
+
preprocessor = None
|
|
2201
|
+
|
|
2202
|
+
if cached_predictions:
|
|
2203
|
+
# === FAST PATH (Zero CPU) ===
|
|
2204
|
+
_log(f"⚡ CACHE HIT: {cache_key}")
|
|
2205
|
+
yield {
|
|
2206
|
+
submission_feedback_display: gr.update(value=get_status_html(2, "Training Model", "⚡ The machine is learning from history..."), visible=True),
|
|
2207
|
+
login_error: gr.update(visible=False)
|
|
2208
|
+
}
|
|
2209
|
+
|
|
2210
|
+
# --- DECOMPRESSION STEP ---
|
|
2211
|
+
if isinstance(cached_predictions, str):
|
|
2212
|
+
predictions = [int(c) for c in cached_predictions]
|
|
2213
|
+
else:
|
|
2214
|
+
predictions = cached_predictions
|
|
2215
|
+
|
|
2216
|
+
tuned_model = None
|
|
2217
|
+
preprocessor = None
|
|
2218
|
+
|
|
2219
|
+
else:
|
|
2220
|
+
# === CACHE MISS (Training Disabled) ===
|
|
2221
|
+
# This ensures we NEVER run heavy training code in production.
|
|
2222
|
+
msg = f"❌ CACHE MISS: {cache_key}"
|
|
2223
|
+
_log(msg)
|
|
2224
|
+
|
|
2225
|
+
# User-friendly error message (Catalan)
|
|
2226
|
+
error_html = f"""
|
|
2227
|
+
<div style='background:#fee2e2; padding:16px; border-radius:8px; border:2px solid #ef4444; color:#991b1b; text-align:center;'>
|
|
2228
|
+
<h3 style='margin:0;'>⚠️ Configuració no trobada</h3>
|
|
2229
|
+
<p style='margin:8px 0;'>Aquesta combinació específica de paràmetres no s'ha trobat a la nostra base de dades.</p>
|
|
2230
|
+
<p style='font-size:0.9em;'>Per garantir l'estabilitat del sistema, l'entrenament en temps real està desactivat. Si us plau, ajusta la configuració (per exemple, canvia la "Mida de les dades" o l'"Estratègia del model") i torna-ho a provar.</p>
|
|
2231
|
+
</div>
|
|
2232
|
+
"""
|
|
2233
|
+
|
|
2234
|
+
yield {
|
|
2235
|
+
submission_feedback_display: gr.update(value=error_html, visible=True),
|
|
2236
|
+
submit_button: gr.update(value="🔬 Construir i enviar el model", interactive=True),
|
|
2237
|
+
login_error: gr.update(visible=False)
|
|
2238
|
+
}
|
|
2239
|
+
return # <--- CRITICAL: Stop execution here.
|
|
2240
|
+
|
|
2241
|
+
|
|
2242
|
+
# --- Stage 3: Submit (API Call 1) ---
|
|
2243
|
+
# AUTHENTICATION GATE: Check for token before submission
|
|
2244
|
+
if token is None:
|
|
2245
|
+
# User not authenticated - compute preview score
|
|
2246
|
+
progress(0.6, desc="Computing Preview Score...")
|
|
2247
|
+
|
|
2248
|
+
# NOTE: Logic updated to handle cached predictions
|
|
2249
|
+
from sklearn.metrics import accuracy_score
|
|
2250
|
+
|
|
2251
|
+
# Ensure format is correct (list vs array)
|
|
2252
|
+
if isinstance(predictions, list):
|
|
2253
|
+
preds_for_metric = np.array(predictions)
|
|
2254
|
+
else:
|
|
2255
|
+
preds_for_metric = predictions
|
|
2256
|
+
|
|
2257
|
+
preview_score = accuracy_score(Y_TEST, preds_for_metric)
|
|
2258
|
+
|
|
2259
|
+
# ... (Rest of preview logic remains the same) ...
|
|
2260
|
+
|
|
2261
|
+
preview_kpi_meta = {
|
|
2262
|
+
"was_preview": True, "preview_score": preview_score, "ready_at_run_start": ready,
|
|
2263
|
+
"poll_iterations": 0, "local_test_accuracy": preview_score,
|
|
2264
|
+
"this_submission_score": None, "new_best_accuracy": None, "rank": None
|
|
2265
|
+
}
|
|
2266
|
+
|
|
2267
|
+
# 1. Generate the styled preview card
|
|
2268
|
+
preview_card_html = _build_kpi_card_html(
|
|
2269
|
+
new_score=preview_score, last_score=0, new_rank=0, last_rank=0,
|
|
2270
|
+
submission_count=-1, is_preview=True, is_pending=False, local_test_accuracy=None
|
|
2271
|
+
)
|
|
2272
|
+
|
|
2273
|
+
# 2. Inject login text
|
|
2274
|
+
login_prompt_text_html = build_login_prompt_html()
|
|
2275
|
+
closing_div_index = preview_card_html.rfind("</div>")
|
|
2276
|
+
if closing_div_index != -1:
|
|
2277
|
+
combined_html = preview_card_html[:closing_div_index] + login_prompt_text_html + "</div>"
|
|
2278
|
+
else:
|
|
2279
|
+
combined_html = preview_card_html + login_prompt_text_html
|
|
2280
|
+
|
|
2281
|
+
settings = compute_rank_settings(submission_count, model_name_key, complexity_level, feature_set, data_size_str)
|
|
2282
|
+
|
|
2283
|
+
gate_updates = {
|
|
2284
|
+
submission_feedback_display: gr.update(value=combined_html, visible=True),
|
|
2285
|
+
submit_button: gr.update(value="Sign In Required", interactive=False),
|
|
2286
|
+
login_username: gr.update(visible=True), login_password: gr.update(visible=True),
|
|
2287
|
+
login_submit: gr.update(visible=True), login_error: gr.update(value="", visible=False),
|
|
2288
|
+
team_leaderboard_display: _build_skeleton_leaderboard(rows=6, is_team=True),
|
|
2289
|
+
individual_leaderboard_display: _build_skeleton_leaderboard(rows=6, is_team=False),
|
|
2290
|
+
last_submission_score_state: last_submission_score, last_rank_state: last_rank,
|
|
2291
|
+
best_score_state: best_score, submission_count_state: submission_count,
|
|
2292
|
+
first_submission_score_state: first_submission_score,
|
|
2293
|
+
rank_message_display: settings["rank_message"],
|
|
2294
|
+
model_type_radio: gr.update(choices=settings["model_choices"], value=settings["model_value"], interactive=settings["model_interactive"]),
|
|
2295
|
+
complexity_slider: gr.update(minimum=1, maximum=settings["complexity_max"], value=settings["complexity_value"]),
|
|
2296
|
+
feature_set_checkbox: gr.update(choices=settings["feature_set_choices"], value=settings["feature_set_value"], interactive=settings["feature_set_interactive"]),
|
|
2297
|
+
data_size_radio: gr.update(choices=settings["data_size_choices"], value=settings["data_size_value"], interactive=settings["data_size_interactive"]),
|
|
2298
|
+
attempts_tracker_display: gr.update(value=_build_attempts_tracker_html(submission_count)),
|
|
2299
|
+
was_preview_state: True, kpi_meta_state: preview_kpi_meta, last_seen_ts_state: None
|
|
2300
|
+
}
|
|
2301
|
+
yield gate_updates
|
|
2302
|
+
return # Stop here
|
|
2303
|
+
|
|
2304
|
+
# --- ATTEMPT LIMIT CHECK ---
|
|
2305
|
+
if submission_count >= ATTEMPT_LIMIT:
|
|
2306
|
+
limit_warning_html = f"""
|
|
2307
|
+
<div class='kpi-card' style='border-color: #ef4444;'>
|
|
2308
|
+
<h2 style='color: #111827; margin-top:0;'>🛑 Límit d'enviaments assolit</h2>
|
|
2309
|
+
<div class='kpi-card-body'>
|
|
2310
|
+
<div class='kpi-metric-box'>
|
|
2311
|
+
<p class='kpi-label'>Attempts Used</p>
|
|
2312
|
+
<p class='kpi-score' style='color: #ef4444;'>{ATTEMPT_LIMIT} / {ATTEMPT_LIMIT}</p>
|
|
2313
|
+
</div>
|
|
2314
|
+
</div>
|
|
2315
|
+
<div style='margin-top: 16px; background:#fef2f2; padding:16px; border-radius:12px; text-align:left; font-size:0.98rem; line-height:1.4;'>
|
|
2316
|
+
<p style='margin:0; color:#991b1b;'><b>Nice Work!</b> Scroll down to "Finish and Reflect".</p>
|
|
2317
|
+
</div>
|
|
2318
|
+
</div>"""
|
|
2319
|
+
settings = compute_rank_settings(submission_count, model_name_key, complexity_level, feature_set, data_size_str)
|
|
2320
|
+
limit_reached_updates = {
|
|
2321
|
+
submission_feedback_display: gr.update(value=limit_warning_html, visible=True),
|
|
2322
|
+
submit_button: gr.update(value="🛑 Límit d'enviaments assolit", interactive=False),
|
|
2323
|
+
model_type_radio: gr.update(interactive=False), complexity_slider: gr.update(interactive=False),
|
|
2324
|
+
feature_set_checkbox: gr.update(interactive=False), data_size_radio: gr.update(interactive=False),
|
|
2325
|
+
attempts_tracker_display: gr.update(value=f"<div style='text-align:center; padding:8px; margin:8px 0; background:#fef2f2; border-radius:8px; border:1px solid #ef4444;'><p style='margin:0; color:#991b1b; font-weight:600;'>🛑 Intents utilitzats: {ATTEMPT_LIMIT}/{ATTEMPT_LIMIT}</p></div>"),
|
|
2326
|
+
team_leaderboard_display: team_leaderboard_display, individual_leaderboard_display: individual_leaderboard_display,
|
|
2327
|
+
last_submission_score_state: last_submission_score, last_rank_state: last_rank,
|
|
2328
|
+
best_score_state: best_score, submission_count_state: submission_count,
|
|
2329
|
+
first_submission_score_state: first_submission_score, rank_message_display: settings["rank_message"],
|
|
2330
|
+
login_username: gr.update(visible=False), login_password: gr.update(visible=False),
|
|
2331
|
+
login_submit: gr.update(visible=False), login_error: gr.update(visible=False),
|
|
2332
|
+
was_preview_state: False, kpi_meta_state: {}, last_seen_ts_state: None
|
|
2333
|
+
}
|
|
2334
|
+
yield limit_reached_updates
|
|
2335
|
+
return
|
|
2336
|
+
|
|
2337
|
+
progress(0.5, desc="S'està enviant al núvol...")
|
|
2338
|
+
yield {
|
|
2339
|
+
submission_feedback_display: gr.update(value=get_status_html(3, "Enviament en curs", "S'està enviant el model al servidor de la competició..."), visible=True),
|
|
2340
|
+
login_error: gr.update(visible=False)
|
|
2341
|
+
}
|
|
2342
|
+
|
|
2343
|
+
|
|
2344
|
+
description = f"{model_name_key} (Cplx:{complexity_level} Size:{data_size_str})"
|
|
2345
|
+
tags = f"team:{team_name},model:{model_name_key}"
|
|
2346
|
+
|
|
2347
|
+
# 1. FETCH BASELINE
|
|
2348
|
+
baseline_leaderboard_df = _get_leaderboard_with_optional_token(playground, token)
|
|
2349
|
+
|
|
2350
|
+
from sklearn.metrics import accuracy_score
|
|
2351
|
+
local_test_accuracy = accuracy_score(Y_TEST, predictions)
|
|
2352
|
+
|
|
2353
|
+
# 2. SUBMIT & CAPTURE ACCURACY
|
|
2354
|
+
def _submit():
|
|
2355
|
+
return playground.submit_model(
|
|
2356
|
+
model=tuned_model, # This can now be None!
|
|
2357
|
+
preprocessor=preprocessor, # This can now be None!
|
|
2358
|
+
prediction_submission=predictions, # We explicitly send predictions
|
|
2359
|
+
input_dict={'description': description, 'tags': tags},
|
|
2360
|
+
custom_metadata={'Team': team_name, 'Moral_Compass': 0},
|
|
2361
|
+
token=token,
|
|
2362
|
+
return_metrics=["accuracy"]
|
|
2363
|
+
)
|
|
2364
|
+
|
|
2365
|
+
try:
|
|
2366
|
+
submit_result = _retry_with_backoff(_submit, description="model submission")
|
|
2367
|
+
if isinstance(submit_result, tuple) and len(submit_result) == 3:
|
|
2368
|
+
_, _, metrics = submit_result
|
|
2369
|
+
if metrics and "accuracy" in metrics and metrics["accuracy"] is not None:
|
|
2370
|
+
this_submission_score = float(metrics["accuracy"])
|
|
2371
|
+
else:
|
|
2372
|
+
this_submission_score = local_test_accuracy
|
|
2373
|
+
else:
|
|
2374
|
+
this_submission_score = local_test_accuracy
|
|
2375
|
+
except Exception as e:
|
|
2376
|
+
_log(f"Submission return parsing failed: {e}. Using local accuracy.")
|
|
2377
|
+
this_submission_score = local_test_accuracy
|
|
2378
|
+
|
|
2379
|
+
_log(f"Submission successful. Server Score: {this_submission_score}")
|
|
2380
|
+
|
|
2381
|
+
try:
|
|
2382
|
+
# Short timeout to trigger the lambda without hanging the UI
|
|
2383
|
+
_log("Triggering backend merge...")
|
|
2384
|
+
playground.get_leaderboard(token=token)
|
|
2385
|
+
except Exception:
|
|
2386
|
+
# We ignore errors here because the 'submit_model' post
|
|
2387
|
+
# already succeeded. This is just a cleanup task.
|
|
2388
|
+
pass
|
|
2389
|
+
# -------------------------------------------------------------------------
|
|
2390
|
+
|
|
2391
|
+
# Immediately increment submission count...
|
|
2392
|
+
new_submission_count = submission_count + 1
|
|
2393
|
+
new_first_submission_score = first_submission_score
|
|
2394
|
+
if submission_count == 0 and first_submission_score is None:
|
|
2395
|
+
new_first_submission_score = this_submission_score
|
|
2396
|
+
|
|
2397
|
+
# --- Stage 4: Local Rank Calculation (Optimistic) ---
|
|
2398
|
+
progress(0.9, desc="Calculating Rank...")
|
|
2399
|
+
|
|
2400
|
+
# 3. SIMULATE UPDATED LEADERBOARD
|
|
2401
|
+
simulated_df = baseline_leaderboard_df.copy() if baseline_leaderboard_df is not None else pd.DataFrame()
|
|
2402
|
+
|
|
2403
|
+
# We use pd.Timestamp.now() to ensure pandas sorting logic sees this as the absolute latest
|
|
2404
|
+
new_row = pd.DataFrame([{
|
|
2405
|
+
"username": username,
|
|
2406
|
+
"accuracy": this_submission_score,
|
|
2407
|
+
"Team": team_name,
|
|
2408
|
+
"timestamp": pd.Timestamp.now(),
|
|
2409
|
+
"version": "latest"
|
|
2410
|
+
}])
|
|
2411
|
+
|
|
2412
|
+
if not simulated_df.empty:
|
|
2413
|
+
simulated_df = pd.concat([simulated_df, new_row], ignore_index=True)
|
|
2414
|
+
else:
|
|
2415
|
+
simulated_df = new_row
|
|
2416
|
+
|
|
2417
|
+
# 4. GENERATE TABLES (Use helper for tables only)
|
|
2418
|
+
# We ignore the kpi_card return from this function because it might use internal sorting
|
|
2419
|
+
# that doesn't respect our new row perfectly.
|
|
2420
|
+
team_html, individual_html, _, new_best_accuracy, new_rank, _ = generate_competitive_summary(
|
|
2421
|
+
simulated_df, team_name, username, last_submission_score, last_rank, submission_count
|
|
2422
|
+
)
|
|
2423
|
+
|
|
2424
|
+
# 5. GENERATE KPI CARD EXPLICITLY (The Authority Fix)
|
|
2425
|
+
# We manually build the card using the score we KNOW we just got.
|
|
2426
|
+
kpi_card_html = _build_kpi_card_html(
|
|
2427
|
+
new_score=this_submission_score,
|
|
2428
|
+
last_score=last_submission_score,
|
|
2429
|
+
new_rank=new_rank,
|
|
2430
|
+
last_rank=last_rank,
|
|
2431
|
+
submission_count=submission_count,
|
|
2432
|
+
is_preview=False,
|
|
2433
|
+
is_pending=False
|
|
2434
|
+
)
|
|
2435
|
+
|
|
2436
|
+
# ... (Previous Stage 1-4 logic remains unchanged) ...
|
|
2437
|
+
|
|
2438
|
+
# --- Stage 5: Final UI Update ---
|
|
2439
|
+
progress(1.0, desc="Complete!")
|
|
2440
|
+
|
|
2441
|
+
success_kpi_meta = {
|
|
2442
|
+
"was_preview": False, "preview_score": None, "ready_at_run_start": ready,
|
|
2443
|
+
"poll_iterations": 0, "local_test_accuracy": local_test_accuracy,
|
|
2444
|
+
"this_submission_score": this_submission_score, "new_best_accuracy": new_best_accuracy,
|
|
2445
|
+
"rank": new_rank, "pending": False, "optimistic_fallback": True
|
|
2446
|
+
}
|
|
2447
|
+
|
|
2448
|
+
settings = compute_rank_settings(new_submission_count, model_name_key, complexity_level, feature_set, data_size_str)
|
|
2449
|
+
|
|
2450
|
+
# -------------------------------------------------------------------------
|
|
2451
|
+
# NEW LOGIC: Check for Limit Reached immediately AFTER this submission
|
|
2452
|
+
# -------------------------------------------------------------------------
|
|
2453
|
+
limit_reached = new_submission_count >= ATTEMPT_LIMIT
|
|
2454
|
+
|
|
2455
|
+
# Prepare the UI state based on whether limit is reached
|
|
2456
|
+
if limit_reached:
|
|
2457
|
+
# 1. Append the Limit Warning HTML *below* the Result Card
|
|
2458
|
+
limit_html = f"""
|
|
2459
|
+
<div style='margin-top: 16px; border: 2px solid #ef4444; background:#fef2f2; padding:16px; border-radius:12px; text-align:left;'>
|
|
2460
|
+
<h3 style='margin:0 0 8px 0; color:#991b1b;'>🛑 Límit d'enviaments assolit ({ATTEMPT_LIMIT}/{ATTEMPT_LIMIT})</h3>
|
|
2461
|
+
<p style='margin:0; color:#7f1d1d; line-height:1.4;'>
|
|
2462
|
+
<b>You have used all your attempts for this session.</b><br>
|
|
2463
|
+
Revisa els teus resultats finals a dalt i després baixa fins a "Finalitzar i reflexionar" per continuar.
|
|
2464
|
+
</p>
|
|
2465
|
+
</div>
|
|
2466
|
+
"""
|
|
2467
|
+
final_html_display = kpi_card_html + limit_html
|
|
2468
|
+
|
|
2469
|
+
# 2. Disable all controls
|
|
2470
|
+
button_update = gr.update(value="🛑 Límit assolit", interactive=False)
|
|
2471
|
+
interactive_state = False
|
|
2472
|
+
tracker_html = f"<div style='text-align:center; padding:8px; margin:8px 0; background:#fef2f2; border-radius:8px; border:1px solid #ef4444;'><p style='margin:0; color:#991b1b; font-weight:600;'>🛑 Intents utilitzats: {ATTEMPT_LIMIT}/{ATTEMPT_LIMIT} (Max)</p></div>"
|
|
2473
|
+
|
|
2474
|
+
else:
|
|
2475
|
+
# Normal State: Show just the result card and keep controls active
|
|
2476
|
+
final_html_display = kpi_card_html
|
|
2477
|
+
button_update = gr.update(value="🔬 Construir i enviar model", interactive=True)
|
|
2478
|
+
interactive_state = True
|
|
2479
|
+
tracker_html = _build_attempts_tracker_html(new_submission_count)
|
|
2480
|
+
|
|
2481
|
+
# -------------------------------------------------------------------------
|
|
2482
|
+
|
|
2483
|
+
final_updates = {
|
|
2484
|
+
submission_feedback_display: gr.update(value=final_html_display, visible=True),
|
|
2485
|
+
team_leaderboard_display: team_html,
|
|
2486
|
+
individual_leaderboard_display: individual_html,
|
|
2487
|
+
last_submission_score_state: this_submission_score,
|
|
2488
|
+
last_rank_state: new_rank,
|
|
2489
|
+
best_score_state: new_best_accuracy,
|
|
2490
|
+
submission_count_state: new_submission_count,
|
|
2491
|
+
first_submission_score_state: new_first_submission_score,
|
|
2492
|
+
rank_message_display: settings["rank_message"],
|
|
2493
|
+
|
|
2494
|
+
# Apply the interactive state calculated above
|
|
2495
|
+
model_type_radio: gr.update(choices=settings["model_choices"], value=settings["model_value"], interactive=(settings["model_interactive"] and interactive_state)),
|
|
2496
|
+
complexity_slider: gr.update(minimum=1, maximum=settings["complexity_max"], value=settings["complexity_value"], interactive=interactive_state),
|
|
2497
|
+
feature_set_checkbox: gr.update(choices=settings["feature_set_choices"], value=settings["feature_set_value"], interactive=(settings["feature_set_interactive"] and interactive_state)),
|
|
2498
|
+
data_size_radio: gr.update(choices=settings["data_size_choices"], value=settings["data_size_value"], interactive=(settings["data_size_interactive"] and interactive_state)),
|
|
2499
|
+
|
|
2500
|
+
submit_button: button_update,
|
|
2501
|
+
|
|
2502
|
+
login_username: gr.update(visible=False), login_password: gr.update(visible=False),
|
|
2503
|
+
login_submit: gr.update(visible=False), login_error: gr.update(visible=False),
|
|
2504
|
+
attempts_tracker_display: gr.update(value=tracker_html),
|
|
2505
|
+
was_preview_state: False,
|
|
2506
|
+
kpi_meta_state: success_kpi_meta,
|
|
2507
|
+
last_seen_ts_state: time.time()
|
|
2508
|
+
}
|
|
2509
|
+
yield final_updates
|
|
2510
|
+
|
|
2511
|
+
except Exception as e:
|
|
2512
|
+
error_msg = f"ERROR: {e}"
|
|
2513
|
+
_log(f"Exception in run_experiment: {error_msg}")
|
|
2514
|
+
settings = compute_rank_settings(
|
|
2515
|
+
submission_count, model_name_key, complexity_level, feature_set, data_size_str
|
|
2516
|
+
)
|
|
2517
|
+
|
|
2518
|
+
exception_kpi_meta = {
|
|
2519
|
+
"was_preview": False,
|
|
2520
|
+
"preview_score": None,
|
|
2521
|
+
"ready_at_run_start": ready if 'ready' in locals() else False,
|
|
2522
|
+
"poll_iterations": 0,
|
|
2523
|
+
"local_test_accuracy": None,
|
|
2524
|
+
"this_submission_score": None,
|
|
2525
|
+
"new_best_accuracy": None,
|
|
2526
|
+
"rank": None,
|
|
2527
|
+
"error": str(e)
|
|
2528
|
+
}
|
|
2529
|
+
|
|
2530
|
+
error_updates = {
|
|
2531
|
+
submission_feedback_display: gr.update(
|
|
2532
|
+
f"<p style='text-align:center; color:red; padding:20px 0;'>An error occurred: {error_msg}</p>", visible=True
|
|
2533
|
+
),
|
|
2534
|
+
team_leaderboard_display: f"<p style='text-align:center; color:red; padding-top:20px;'>An error occurred: {error_msg}</p>",
|
|
2535
|
+
individual_leaderboard_display: f"<p style='text-align:center; color:red; padding-top:20px;'>An error occurred: {error_msg}</p>",
|
|
2536
|
+
last_submission_score_state: last_submission_score,
|
|
2537
|
+
last_rank_state: last_rank,
|
|
2538
|
+
best_score_state: best_score,
|
|
2539
|
+
submission_count_state: submission_count,
|
|
2540
|
+
first_submission_score_state: first_submission_score,
|
|
2541
|
+
rank_message_display: settings["rank_message"],
|
|
2542
|
+
model_type_radio: gr.update(choices=settings["model_choices"], value=settings["model_value"], interactive=settings["model_interactive"]),
|
|
2543
|
+
complexity_slider: gr.update(minimum=1, maximum=settings["complexity_max"], value=settings["complexity_value"]),
|
|
2544
|
+
feature_set_checkbox: gr.update(choices=settings["feature_set_choices"], value=settings["feature_set_value"], interactive=settings["feature_set_interactive"]),
|
|
2545
|
+
data_size_radio: gr.update(choices=settings["data_size_choices"], value=settings["data_size_value"], interactive=settings["data_size_interactive"]),
|
|
2546
|
+
submit_button: gr.update(value="🔬 Build & Submit Model", interactive=True),
|
|
2547
|
+
login_username: gr.update(visible=False),
|
|
2548
|
+
login_password: gr.update(visible=False),
|
|
2549
|
+
login_submit: gr.update(visible=False),
|
|
2550
|
+
login_error: gr.update(visible=False),
|
|
2551
|
+
attempts_tracker_display: gr.update(value=_build_attempts_tracker_html(submission_count)),
|
|
2552
|
+
was_preview_state: False,
|
|
2553
|
+
kpi_meta_state: exception_kpi_meta,
|
|
2554
|
+
last_seen_ts_state: None
|
|
2555
|
+
}
|
|
2556
|
+
yield error_updates
|
|
2557
|
+
|
|
2558
|
+
|
|
2559
|
+
def on_initial_load(username, token=None, team_name=""):
|
|
2560
|
+
"""
|
|
2561
|
+
Updated to show "Welcome & CTA" if the SPECIFIC USER has 0 submissions,
|
|
2562
|
+
even if the leaderboard/team already has data from others.
|
|
2563
|
+
"""
|
|
2564
|
+
initial_ui = compute_rank_settings(
|
|
2565
|
+
0, DEFAULT_MODEL, 2, DEFAULT_FEATURE_SET, DEFAULT_DATA_SIZE
|
|
2566
|
+
)
|
|
2567
|
+
|
|
2568
|
+
# 1. Prepare the Welcome HTML
|
|
2569
|
+
# Translate team name to Catalan for display only (keep team_name in English for logic)
|
|
2570
|
+
display_team = translate_team_name_for_display(team_name, UI_TEAM_LANG) if team_name else "El teu equip"
|
|
2571
|
+
|
|
2572
|
+
welcome_html = f"""
|
|
2573
|
+
<div style='text-align:center; padding: 30px 20px;'>
|
|
2574
|
+
<div style='font-size: 3rem; margin-bottom: 10px;'>👋</div>
|
|
2575
|
+
<h3 style='margin: 0 0 8px 0; color: #111827; font-size: 1.5rem;'>Ja formes part de l'equip: <b>{display_team}</b>!</h3>
|
|
2576
|
+
<p style='font-size: 1.1rem; color: #4b5563; margin: 0 0 20px 0;'>
|
|
2577
|
+
El teu equip necessita la teva ajuda per millorar la IA.
|
|
2578
|
+
</p>
|
|
2579
|
+
|
|
2580
|
+
<div style='background:#eff6ff; padding:16px; border-radius:12px; border:2px solid #bfdbfe; display:inline-block;'>
|
|
2581
|
+
<p style='margin:0; color:#1e40af; font-weight:bold; font-size:1.1rem;'>
|
|
2582
|
+
👈 Fes clic a 'Construir i enviar model' per començar a jugar!
|
|
2583
|
+
</p>
|
|
2584
|
+
</div>
|
|
2585
|
+
</div>
|
|
2586
|
+
"""
|
|
2587
|
+
|
|
2588
|
+
# Check background init
|
|
2589
|
+
with INIT_LOCK:
|
|
2590
|
+
background_ready = INIT_FLAGS["leaderboard"]
|
|
2591
|
+
|
|
2592
|
+
should_attempt_fetch = background_ready or (token is not None)
|
|
2593
|
+
full_leaderboard_df = None
|
|
2594
|
+
|
|
2595
|
+
if should_attempt_fetch:
|
|
2596
|
+
try:
|
|
2597
|
+
if playground:
|
|
2598
|
+
full_leaderboard_df = _get_leaderboard_with_optional_token(playground, token)
|
|
2599
|
+
except Exception as e:
|
|
2600
|
+
print(f"Error on initial load fetch: {e}")
|
|
2601
|
+
full_leaderboard_df = None
|
|
2602
|
+
|
|
2603
|
+
# -------------------------------------------------------------------------
|
|
2604
|
+
# LOGIC UPDATE: Check if THIS user has submitted anything
|
|
2605
|
+
# -------------------------------------------------------------------------
|
|
2606
|
+
user_has_submitted = False
|
|
2607
|
+
if full_leaderboard_df is not None and not full_leaderboard_df.empty:
|
|
2608
|
+
if "username" in full_leaderboard_df.columns and username:
|
|
2609
|
+
# Check if the username exists in the dataframe
|
|
2610
|
+
user_has_submitted = username in full_leaderboard_df["username"].values
|
|
2611
|
+
|
|
2612
|
+
# Decision Logic
|
|
2613
|
+
if not user_has_submitted:
|
|
2614
|
+
# CASE 1: New User (or first time loading session) -> FORCE WELCOME
|
|
2615
|
+
# regardless of whether the leaderboard has other people's data.
|
|
2616
|
+
team_html = welcome_html
|
|
2617
|
+
individual_html = "<p style='text-align:center; color:#6b7280; padding-top:40px;'>Envia el teu model per veure la teva posició a la classificació!</p>"
|
|
2618
|
+
|
|
2619
|
+
elif full_leaderboard_df is None or full_leaderboard_df.empty:
|
|
2620
|
+
# CASE 2: Returning user, but data fetch failed -> Show Skeleton
|
|
2621
|
+
team_html = _build_skeleton_leaderboard(rows=6, is_team=True)
|
|
2622
|
+
individual_html = _build_skeleton_leaderboard(rows=6, is_team=False)
|
|
2623
|
+
|
|
2624
|
+
else:
|
|
2625
|
+
# CASE 3: Returning user WITH data -> Show Real Tables
|
|
2626
|
+
try:
|
|
2627
|
+
team_html, individual_html, _, _, _, _ = generate_competitive_summary(
|
|
2628
|
+
full_leaderboard_df,
|
|
2629
|
+
team_name,
|
|
2630
|
+
username,
|
|
2631
|
+
0, 0, -1
|
|
2632
|
+
)
|
|
2633
|
+
except Exception as e:
|
|
2634
|
+
print(f"Error generating summary HTML: {e}")
|
|
2635
|
+
team_html = "<p style='text-align:center; color:red; padding-top:20px;'>S'ha produït un error en carregar la classificació.</p>"
|
|
2636
|
+
individual_html = "<p style='text-align:center; color:red; padding-top:20px;'>S'ha produït un error en mostrar la classificació.</p>"
|
|
2637
|
+
|
|
2638
|
+
return (
|
|
2639
|
+
get_model_card(DEFAULT_MODEL),
|
|
2640
|
+
team_html,
|
|
2641
|
+
individual_html,
|
|
2642
|
+
initial_ui["rank_message"],
|
|
2643
|
+
gr.update(choices=initial_ui["model_choices"], value=initial_ui["model_value"], interactive=initial_ui["model_interactive"]),
|
|
2644
|
+
gr.update(minimum=1, maximum=initial_ui["complexity_max"], value=initial_ui["complexity_value"]),
|
|
2645
|
+
gr.update(choices=initial_ui["feature_set_choices"], value=initial_ui["feature_set_value"], interactive=initial_ui["feature_set_interactive"]),
|
|
2646
|
+
gr.update(choices=initial_ui["data_size_choices"], value=initial_ui["data_size_value"], interactive=initial_ui["data_size_interactive"]),
|
|
2647
|
+
)
|
|
2648
|
+
|
|
2649
|
+
|
|
2650
|
+
# -------------------------------------------------------------------------
|
|
2651
|
+
# Conclusion helpers (dark/light mode aware)
|
|
2652
|
+
# -------------------------------------------------------------------------
|
|
2653
|
+
def build_final_conclusion_html(best_score, submissions, rank, first_score, feature_set):
|
|
2654
|
+
"""
|
|
2655
|
+
Build the final conclusion HTML with performance summary.
|
|
2656
|
+
Colors are handled via CSS classes so that light/dark mode work correctly.
|
|
2657
|
+
"""
|
|
2658
|
+
unlocked_tiers = min(3, max(0, submissions - 1)) # 0..3
|
|
2659
|
+
tier_names = ["En pràctiques", "Júnior", "Sènior", "Principal"]
|
|
2660
|
+
reached = tier_names[: unlocked_tiers + 1]
|
|
2661
|
+
tier_line = " → ".join([f"{t}{' ✅' if t in reached else ''}" for t in tier_names])
|
|
2662
|
+
|
|
2663
|
+
improvement = (best_score - first_score) if (first_score is not None and submissions > 1) else 0.0
|
|
2664
|
+
strong_predictors = {"age", "length_of_stay", "priors_count", "age_cat"}
|
|
2665
|
+
strong_used = [f for f in feature_set if f in strong_predictors]
|
|
2666
|
+
|
|
2667
|
+
ethical_note = (
|
|
2668
|
+
"Has desbloquejat predictors molt potents. Reflexiona: eliminant els camps demogràfics canviaria la justícia del model?"
|
|
2669
|
+
"En la següent secció començarem a investigar aquesta qüestió a fons."
|
|
2670
|
+
)
|
|
2671
|
+
|
|
2672
|
+
# Tailor message for very few submissions
|
|
2673
|
+
tip_html = ""
|
|
2674
|
+
if submissions < 2:
|
|
2675
|
+
tip_html = """
|
|
2676
|
+
<div class="final-conclusion-tip">
|
|
2677
|
+
<b>Tip:</b> Prova de fer almenys 2 o 3 enviaments canviant NOMÉS un paràmetre cada vegada per veure clarament la relació causa-efecte.
|
|
2678
|
+
</div>
|
|
2679
|
+
"""
|
|
2680
|
+
|
|
2681
|
+
# Add note if user reached the attempt cap
|
|
2682
|
+
attempt_cap_html = ""
|
|
2683
|
+
if submissions >= ATTEMPT_LIMIT:
|
|
2684
|
+
attempt_cap_html = f"""
|
|
2685
|
+
<div class="final-conclusion-attempt-cap">
|
|
2686
|
+
<p style="margin:0;">
|
|
2687
|
+
<b>📊 Límit d’intents assolit:</b> Has utilitzat tots els {ATTEMPT_LIMIT} intents d’enviament permesos per a aquesta sessió.
|
|
2688
|
+
Podràs enviar més models un cop hagis completat algunes activitats noves.
|
|
2689
|
+
</p>
|
|
2690
|
+
</div>
|
|
2691
|
+
"""
|
|
2692
|
+
|
|
2693
|
+
return f"""
|
|
2694
|
+
<div class="final-conclusion-root">
|
|
2695
|
+
<h1 class="final-conclusion-title">🎉 Fase d’enginyeria completada</h1>
|
|
2696
|
+
<div class="final-conclusion-card">
|
|
2697
|
+
<h2 class="final-conclusion-subtitle">Resum del teu rendiment</h2>
|
|
2698
|
+
<ul class="final-conclusion-list">
|
|
2699
|
+
<li>🏁 <b>Millor precisió:</b> {(best_score * 100):.2f}%</li>
|
|
2700
|
+
<li>📊 <b>Posició aconseguida:</b> {('#' + str(rank)) if rank > 0 else '—'}</li>
|
|
2701
|
+
<li>🔁 <b>Enviaments en aquesta sessió:</b> {submissions}{' / ' + str(ATTEMPT_LIMIT) if submissions >= ATTEMPT_LIMIT else ''}</li>
|
|
2702
|
+
<li>🧗 <b>Millora respecte a la primera puntuació d’aquesta sessió:</b> {(improvement * 100):+.2f}</li>
|
|
2703
|
+
<li>🎖️ <b>Progrés de nivell:</b> {tier_line}</li>
|
|
2704
|
+
<li>🧪 <b>Variables clau utilitzades:</b> {len(strong_used)} ({', '.join(strong_used) if strong_used else 'Encara cap'})</li>
|
|
2705
|
+
</ul>
|
|
2706
|
+
|
|
2707
|
+
{tip_html}
|
|
2708
|
+
|
|
2709
|
+
<div class="final-conclusion-ethics">
|
|
2710
|
+
<p style="margin:0;"><b>Reflexió ètica:</b> {ethical_note}</p>
|
|
2711
|
+
</div>
|
|
2712
|
+
|
|
2713
|
+
{attempt_cap_html}
|
|
2714
|
+
|
|
2715
|
+
<hr class="final-conclusion-divider" />
|
|
2716
|
+
|
|
2717
|
+
<div class="final-conclusion-next">
|
|
2718
|
+
<h2>➡️ Següent: Conseqüències al món real</h2>
|
|
2719
|
+
<p>Desplaça’t cap avall sota aquesta aplicació per continuar. Analitzaràs com models com el teu influeixen en els resultats judicials.</p>
|
|
2720
|
+
<h1 class="final-conclusion-scroll">👇 DESPLAÇA’T CAP AVALL 👇</h1>
|
|
2721
|
+
</div>
|
|
2722
|
+
</div>
|
|
2723
|
+
</div>
|
|
2724
|
+
"""
|
|
2725
|
+
|
|
2726
|
+
|
|
2727
|
+
|
|
2728
|
+
def build_conclusion_from_state(best_score, submissions, rank, first_score, feature_set):
|
|
2729
|
+
return build_final_conclusion_html(best_score, submissions, rank, first_score, feature_set)
|
|
2730
|
+
def create_model_building_game_ca_app(theme_primary_hue: str = "indigo") -> "gr.Blocks":
|
|
2731
|
+
"""
|
|
2732
|
+
Create (but do not launch) the model building game app.
|
|
2733
|
+
"""
|
|
2734
|
+
start_background_init()
|
|
2735
|
+
|
|
2736
|
+
# Add missing globals (FIX)
|
|
2737
|
+
global submit_button, submission_feedback_display, team_leaderboard_display
|
|
2738
|
+
global individual_leaderboard_display, last_submission_score_state, last_rank_state
|
|
2739
|
+
global best_score_state, submission_count_state, first_submission_score_state
|
|
2740
|
+
global rank_message_display, model_type_radio, complexity_slider
|
|
2741
|
+
global feature_set_checkbox, data_size_radio
|
|
2742
|
+
global login_username, login_password, login_submit, login_error
|
|
2743
|
+
global attempts_tracker_display, team_name_state
|
|
2744
|
+
global username_state, token_state # <-- Added
|
|
2745
|
+
global readiness_state, was_preview_state, kpi_meta_state # <-- Added for parameter shadowing guards
|
|
2746
|
+
global last_seen_ts_state # <-- Added for timestamp tracking
|
|
2747
|
+
|
|
2748
|
+
css = """
|
|
2749
|
+
/* ------------------------------
|
|
2750
|
+
Shared Design Tokens (local)
|
|
2751
|
+
------------------------------ */
|
|
2752
|
+
|
|
2753
|
+
/* We keep everything driven by Gradio theme vars:
|
|
2754
|
+
--body-background-fill, --body-text-color, --secondary-text-color,
|
|
2755
|
+
--border-color-primary, --block-background-fill, --color-accent,
|
|
2756
|
+
--shadow-drop, --prose-background-fill
|
|
2757
|
+
*/
|
|
2758
|
+
|
|
2759
|
+
:root {
|
|
2760
|
+
--slide-radius-md: 12px;
|
|
2761
|
+
--slide-radius-lg: 16px;
|
|
2762
|
+
--slide-radius-xl: 18px;
|
|
2763
|
+
--slide-spacing-lg: 24px;
|
|
2764
|
+
|
|
2765
|
+
/* Local, non-brand tokens built *on top of* theme vars */
|
|
2766
|
+
--card-bg-soft: var(--block-background-fill);
|
|
2767
|
+
--card-bg-strong: var(--prose-background-fill, var(--block-background-fill));
|
|
2768
|
+
--card-border-subtle: var(--border-color-primary);
|
|
2769
|
+
--accent-strong: var(--color-accent);
|
|
2770
|
+
--text-main: var(--body-text-color);
|
|
2771
|
+
--text-muted: var(--secondary-text-color);
|
|
2772
|
+
}
|
|
2773
|
+
|
|
2774
|
+
/* ------------------------------------------------------------------
|
|
2775
|
+
Base Layout Helpers
|
|
2776
|
+
------------------------------------------------------------------ */
|
|
2777
|
+
|
|
2778
|
+
.slide-content {
|
|
2779
|
+
max-width: 900px;
|
|
2780
|
+
margin-left: auto;
|
|
2781
|
+
margin-right: auto;
|
|
2782
|
+
}
|
|
2783
|
+
|
|
2784
|
+
/* Shared card-like panels used throughout slides */
|
|
2785
|
+
.panel-box {
|
|
2786
|
+
background: var(--card-bg-soft);
|
|
2787
|
+
padding: 20px;
|
|
2788
|
+
border-radius: var(--slide-radius-lg);
|
|
2789
|
+
border: 2px solid var(--card-border-subtle);
|
|
2790
|
+
margin-bottom: 18px;
|
|
2791
|
+
color: var(--text-main);
|
|
2792
|
+
box-shadow: var(--shadow-drop, 0 2px 4px rgba(0,0,0,0.04));
|
|
2793
|
+
}
|
|
2794
|
+
|
|
2795
|
+
.leaderboard-box {
|
|
2796
|
+
background: var(--card-bg-soft);
|
|
2797
|
+
padding: 20px;
|
|
2798
|
+
border-radius: var(--slide-radius-lg);
|
|
2799
|
+
border: 1px solid var(--card-border-subtle);
|
|
2800
|
+
margin-top: 12px;
|
|
2801
|
+
color: var(--text-main);
|
|
2802
|
+
}
|
|
2803
|
+
|
|
2804
|
+
/* For “explanatory UI” scaffolding */
|
|
2805
|
+
.mock-ui-box {
|
|
2806
|
+
background: var(--card-bg-strong);
|
|
2807
|
+
border: 2px solid var(--card-border-subtle);
|
|
2808
|
+
padding: 24px;
|
|
2809
|
+
border-radius: var(--slide-radius-lg);
|
|
2810
|
+
color: var(--text-main);
|
|
2811
|
+
}
|
|
2812
|
+
|
|
2813
|
+
.mock-ui-inner {
|
|
2814
|
+
background: var(--block-background-fill);
|
|
2815
|
+
border: 1px solid var(--card-border-subtle);
|
|
2816
|
+
padding: 24px;
|
|
2817
|
+
border-radius: var(--slide-radius-md);
|
|
2818
|
+
}
|
|
2819
|
+
|
|
2820
|
+
/* “Control box” inside the mock UI */
|
|
2821
|
+
.mock-ui-control-box {
|
|
2822
|
+
padding: 12px;
|
|
2823
|
+
background: var(--block-background-fill);
|
|
2824
|
+
border-radius: 8px;
|
|
2825
|
+
border: 1px solid var(--card-border-subtle);
|
|
2826
|
+
}
|
|
2827
|
+
|
|
2828
|
+
/* Little radio / check icons */
|
|
2829
|
+
.mock-ui-radio-on {
|
|
2830
|
+
font-size: 1.5rem;
|
|
2831
|
+
vertical-align: middle;
|
|
2832
|
+
color: var(--accent-strong);
|
|
2833
|
+
}
|
|
2834
|
+
|
|
2835
|
+
.mock-ui-radio-off {
|
|
2836
|
+
font-size: 1.5rem;
|
|
2837
|
+
vertical-align: middle;
|
|
2838
|
+
color: var(--text-muted);
|
|
2839
|
+
}
|
|
2840
|
+
|
|
2841
|
+
.mock-ui-slider-text {
|
|
2842
|
+
font-size: 1.5rem;
|
|
2843
|
+
margin: 0;
|
|
2844
|
+
color: var(--accent-strong);
|
|
2845
|
+
letter-spacing: 4px;
|
|
2846
|
+
}
|
|
2847
|
+
|
|
2848
|
+
.mock-ui-slider-bar {
|
|
2849
|
+
color: var(--text-muted);
|
|
2850
|
+
}
|
|
2851
|
+
|
|
2852
|
+
/* Simple mock button representation */
|
|
2853
|
+
.mock-button {
|
|
2854
|
+
width: 100%;
|
|
2855
|
+
font-size: 1.25rem;
|
|
2856
|
+
font-weight: 600;
|
|
2857
|
+
padding: 16px 24px;
|
|
2858
|
+
background-color: var(--accent-strong);
|
|
2859
|
+
color: var(--body-background-fill);
|
|
2860
|
+
border: none;
|
|
2861
|
+
border-radius: 8px;
|
|
2862
|
+
cursor: not-allowed;
|
|
2863
|
+
}
|
|
2864
|
+
|
|
2865
|
+
/* Step visuals on slides */
|
|
2866
|
+
.step-visual {
|
|
2867
|
+
display: flex;
|
|
2868
|
+
flex-wrap: wrap;
|
|
2869
|
+
justify-content: space-around;
|
|
2870
|
+
align-items: center;
|
|
2871
|
+
margin: 24px 0;
|
|
2872
|
+
text-align: center;
|
|
2873
|
+
font-size: 1rem;
|
|
2874
|
+
}
|
|
2875
|
+
|
|
2876
|
+
.step-visual-box {
|
|
2877
|
+
padding: 16px;
|
|
2878
|
+
background: var(--block-background-fill); /* ✅ theme-aware */
|
|
2879
|
+
border-radius: 8px;
|
|
2880
|
+
border: 2px solid var(--border-color-primary);
|
|
2881
|
+
margin: 5px;
|
|
2882
|
+
color: var(--body-text-color); /* optional, safe */
|
|
2883
|
+
}
|
|
2884
|
+
|
|
2885
|
+
.step-visual-arrow {
|
|
2886
|
+
font-size: 2rem;
|
|
2887
|
+
margin: 5px;
|
|
2888
|
+
/* no explicit color – inherit from theme or override in dark mode */
|
|
2889
|
+
}
|
|
2890
|
+
|
|
2891
|
+
/* ------------------------------------------------------------------
|
|
2892
|
+
KPI Card (score feedback)
|
|
2893
|
+
------------------------------------------------------------------ */
|
|
2894
|
+
|
|
2895
|
+
.kpi-card {
|
|
2896
|
+
background: var(--card-bg-strong);
|
|
2897
|
+
border: 2px solid var(--accent-strong);
|
|
2898
|
+
padding: 24px;
|
|
2899
|
+
border-radius: var(--slide-radius-lg);
|
|
2900
|
+
text-align: center;
|
|
2901
|
+
max-width: 600px;
|
|
2902
|
+
margin: auto;
|
|
2903
|
+
color: var(--text-main);
|
|
2904
|
+
box-shadow: var(--shadow-drop, 0 4px 6px -1px rgba(0,0,0,0.08));
|
|
2905
|
+
min-height: 200px; /* prevent layout shift */
|
|
2906
|
+
}
|
|
2907
|
+
|
|
2908
|
+
.kpi-card-body {
|
|
2909
|
+
display: flex;
|
|
2910
|
+
flex-wrap: wrap;
|
|
2911
|
+
justify-content: space-around;
|
|
2912
|
+
align-items: flex-end;
|
|
2913
|
+
margin-top: 24px;
|
|
2914
|
+
}
|
|
2915
|
+
|
|
2916
|
+
.kpi-metric-box {
|
|
2917
|
+
min-width: 150px;
|
|
2918
|
+
margin: 10px;
|
|
2919
|
+
}
|
|
2920
|
+
|
|
2921
|
+
.kpi-label {
|
|
2922
|
+
font-size: 1rem;
|
|
2923
|
+
color: var(--text-muted);
|
|
2924
|
+
margin: 0;
|
|
2925
|
+
}
|
|
2926
|
+
|
|
2927
|
+
.kpi-score {
|
|
2928
|
+
font-size: 3rem;
|
|
2929
|
+
font-weight: 700;
|
|
2930
|
+
margin: 0;
|
|
2931
|
+
line-height: 1.1;
|
|
2932
|
+
color: var(--accent-strong);
|
|
2933
|
+
}
|
|
2934
|
+
|
|
2935
|
+
.kpi-subtext-muted {
|
|
2936
|
+
font-size: 1.2rem;
|
|
2937
|
+
font-weight: 500;
|
|
2938
|
+
color: var(--text-muted);
|
|
2939
|
+
margin: 0;
|
|
2940
|
+
padding-top: 8px;
|
|
2941
|
+
}
|
|
2942
|
+
|
|
2943
|
+
/* Small variants to hint semantic state without hard-coded colors */
|
|
2944
|
+
.kpi-card--neutral {
|
|
2945
|
+
border-color: var(--card-border-subtle);
|
|
2946
|
+
}
|
|
2947
|
+
|
|
2948
|
+
.kpi-card--subtle-accent {
|
|
2949
|
+
border-color: var(--accent-strong);
|
|
2950
|
+
}
|
|
2951
|
+
|
|
2952
|
+
.kpi-score--muted {
|
|
2953
|
+
color: var(--text-muted);
|
|
2954
|
+
}
|
|
2955
|
+
|
|
2956
|
+
/* ------------------------------------------------------------------
|
|
2957
|
+
Leaderboard Table + Placeholder
|
|
2958
|
+
------------------------------------------------------------------ */
|
|
2959
|
+
|
|
2960
|
+
.leaderboard-html-table {
|
|
2961
|
+
width: 100%;
|
|
2962
|
+
border-collapse: collapse;
|
|
2963
|
+
text-align: left;
|
|
2964
|
+
font-size: 1rem;
|
|
2965
|
+
color: var(--text-main);
|
|
2966
|
+
min-height: 300px; /* Stable height */
|
|
2967
|
+
}
|
|
2968
|
+
|
|
2969
|
+
.leaderboard-html-table thead {
|
|
2970
|
+
background: var(--block-background-fill);
|
|
2971
|
+
}
|
|
2972
|
+
|
|
2973
|
+
.leaderboard-html-table th {
|
|
2974
|
+
padding: 12px 16px;
|
|
2975
|
+
font-size: 0.9rem;
|
|
2976
|
+
color: var(--text-muted);
|
|
2977
|
+
font-weight: 500;
|
|
2978
|
+
}
|
|
2979
|
+
|
|
2980
|
+
.leaderboard-html-table tbody tr {
|
|
2981
|
+
border-bottom: 1px solid var(--card-border-subtle);
|
|
2982
|
+
}
|
|
2983
|
+
|
|
2984
|
+
.leaderboard-html-table td {
|
|
2985
|
+
padding: 12px 16px;
|
|
2986
|
+
}
|
|
2987
|
+
|
|
2988
|
+
.leaderboard-html-table .user-row-highlight {
|
|
2989
|
+
background: rgba( var(--color-accent-rgb, 59,130,246), 0.1 );
|
|
2990
|
+
font-weight: 600;
|
|
2991
|
+
color: var(--accent-strong);
|
|
2992
|
+
}
|
|
2993
|
+
|
|
2994
|
+
/* Static placeholder (no shimmer, no animation) */
|
|
2995
|
+
.lb-placeholder {
|
|
2996
|
+
min-height: 300px;
|
|
2997
|
+
display: flex;
|
|
2998
|
+
flex-direction: column;
|
|
2999
|
+
align-items: center;
|
|
3000
|
+
justify-content: center;
|
|
3001
|
+
background: var(--block-background-fill);
|
|
3002
|
+
border: 1px solid var(--card-border-subtle);
|
|
3003
|
+
border-radius: 12px;
|
|
3004
|
+
padding: 40px 20px;
|
|
3005
|
+
text-align: center;
|
|
3006
|
+
}
|
|
3007
|
+
|
|
3008
|
+
.lb-placeholder-title {
|
|
3009
|
+
font-size: 1.25rem;
|
|
3010
|
+
font-weight: 500;
|
|
3011
|
+
color: var(--text-muted);
|
|
3012
|
+
margin-bottom: 8px;
|
|
3013
|
+
}
|
|
3014
|
+
|
|
3015
|
+
.lb-placeholder-sub {
|
|
3016
|
+
font-size: 1rem;
|
|
3017
|
+
color: var(--text-muted);
|
|
3018
|
+
}
|
|
3019
|
+
|
|
3020
|
+
/* ------------------------------------------------------------------
|
|
3021
|
+
Processing / “Experiment running” status
|
|
3022
|
+
------------------------------------------------------------------ */
|
|
3023
|
+
|
|
3024
|
+
.processing-status {
|
|
3025
|
+
background: var(--block-background-fill);
|
|
3026
|
+
border: 2px solid var(--accent-strong);
|
|
3027
|
+
border-radius: 16px;
|
|
3028
|
+
padding: 30px;
|
|
3029
|
+
text-align: center;
|
|
3030
|
+
box-shadow: var(--shadow-drop, 0 4px 6px rgba(0,0,0,0.12));
|
|
3031
|
+
animation: pulse-indigo 2s infinite;
|
|
3032
|
+
color: var(--text-main);
|
|
3033
|
+
}
|
|
3034
|
+
|
|
3035
|
+
.processing-icon {
|
|
3036
|
+
font-size: 4rem;
|
|
3037
|
+
margin-bottom: 10px;
|
|
3038
|
+
display: block;
|
|
3039
|
+
animation: spin-slow 3s linear infinite;
|
|
3040
|
+
}
|
|
3041
|
+
|
|
3042
|
+
.processing-text {
|
|
3043
|
+
font-size: 1.5rem;
|
|
3044
|
+
font-weight: 700;
|
|
3045
|
+
color: var(--accent-strong);
|
|
3046
|
+
}
|
|
3047
|
+
|
|
3048
|
+
.processing-subtext {
|
|
3049
|
+
font-size: 1.1rem;
|
|
3050
|
+
color: var(--text-muted);
|
|
3051
|
+
margin-top: 8px;
|
|
3052
|
+
}
|
|
3053
|
+
|
|
3054
|
+
/* Pulse & spin animations */
|
|
3055
|
+
@keyframes pulse-indigo {
|
|
3056
|
+
0% { box-shadow: 0 0 0 0 rgba(99, 102, 241, 0.4); }
|
|
3057
|
+
70% { box-shadow: 0 0 0 15px rgba(99, 102, 241, 0); }
|
|
3058
|
+
100% { box-shadow: 0 0 0 0 rgba(99, 102, 241, 0); }
|
|
3059
|
+
}
|
|
3060
|
+
|
|
3061
|
+
@keyframes spin-slow {
|
|
3062
|
+
from { transform: rotate(0deg); }
|
|
3063
|
+
to { transform: rotate(360deg); }
|
|
3064
|
+
}
|
|
3065
|
+
|
|
3066
|
+
/* Conclusion arrow pulse */
|
|
3067
|
+
@keyframes pulseArrow {
|
|
3068
|
+
0% { transform: scale(1); opacity: 1; }
|
|
3069
|
+
50% { transform: scale(1.08); opacity: 0.85; }
|
|
3070
|
+
100% { transform: scale(1); opacity: 1; }
|
|
3071
|
+
}
|
|
3072
|
+
|
|
3073
|
+
@media (prefers-reduced-motion: reduce) {
|
|
3074
|
+
[style*='pulseArrow'] {
|
|
3075
|
+
animation: none !important;
|
|
3076
|
+
}
|
|
3077
|
+
.processing-status,
|
|
3078
|
+
.processing-icon {
|
|
3079
|
+
animation: none !important;
|
|
3080
|
+
}
|
|
3081
|
+
}
|
|
3082
|
+
|
|
3083
|
+
/* ------------------------------------------------------------------
|
|
3084
|
+
Attempts Tracker + Init Banner + Alerts
|
|
3085
|
+
------------------------------------------------------------------ */
|
|
3086
|
+
|
|
3087
|
+
.init-banner {
|
|
3088
|
+
background: var(--card-bg-strong);
|
|
3089
|
+
padding: 12px;
|
|
3090
|
+
border-radius: 8px;
|
|
3091
|
+
text-align: center;
|
|
3092
|
+
margin-bottom: 16px;
|
|
3093
|
+
border: 1px solid var(--card-border-subtle);
|
|
3094
|
+
color: var(--text-main);
|
|
3095
|
+
}
|
|
3096
|
+
|
|
3097
|
+
.init-banner__text {
|
|
3098
|
+
margin: 0;
|
|
3099
|
+
font-weight: 500;
|
|
3100
|
+
color: var(--text-muted);
|
|
3101
|
+
}
|
|
3102
|
+
|
|
3103
|
+
/* Attempts tracker shell */
|
|
3104
|
+
.attempts-tracker {
|
|
3105
|
+
text-align: center;
|
|
3106
|
+
padding: 8px;
|
|
3107
|
+
margin: 8px 0;
|
|
3108
|
+
background: var(--block-background-fill);
|
|
3109
|
+
border-radius: 8px;
|
|
3110
|
+
border: 1px solid var(--card-border-subtle);
|
|
3111
|
+
}
|
|
3112
|
+
|
|
3113
|
+
.attempts-tracker__text {
|
|
3114
|
+
margin: 0;
|
|
3115
|
+
font-weight: 600;
|
|
3116
|
+
font-size: 1rem;
|
|
3117
|
+
color: var(--accent-strong);
|
|
3118
|
+
}
|
|
3119
|
+
|
|
3120
|
+
/* Limit reached variant – we *still* stick to theme colors */
|
|
3121
|
+
.attempts-tracker--limit .attempts-tracker__text {
|
|
3122
|
+
color: var(--text-main);
|
|
3123
|
+
}
|
|
3124
|
+
|
|
3125
|
+
/* Generic alert helpers used in inline login messages */
|
|
3126
|
+
.alert {
|
|
3127
|
+
padding: 12px 16px;
|
|
3128
|
+
border-radius: 8px;
|
|
3129
|
+
margin-top: 12px;
|
|
3130
|
+
text-align: left;
|
|
3131
|
+
font-size: 0.95rem;
|
|
3132
|
+
}
|
|
3133
|
+
|
|
3134
|
+
.alert--error {
|
|
3135
|
+
border-left: 4px solid var(--accent-strong);
|
|
3136
|
+
background: var(--block-background-fill);
|
|
3137
|
+
color: var(--text-main);
|
|
3138
|
+
}
|
|
3139
|
+
|
|
3140
|
+
.alert--success {
|
|
3141
|
+
border-left: 4px solid var(--accent-strong);
|
|
3142
|
+
background: var(--block-background-fill);
|
|
3143
|
+
color: var(--text-main);
|
|
3144
|
+
}
|
|
3145
|
+
|
|
3146
|
+
.alert__title {
|
|
3147
|
+
margin: 0;
|
|
3148
|
+
font-weight: 600;
|
|
3149
|
+
color: var(--text-main);
|
|
3150
|
+
}
|
|
3151
|
+
|
|
3152
|
+
.alert__body {
|
|
3153
|
+
margin: 8px 0 0 0;
|
|
3154
|
+
color: var(--text-muted);
|
|
3155
|
+
}
|
|
3156
|
+
|
|
3157
|
+
/* ------------------------------------------------------------------
|
|
3158
|
+
Navigation Loading Overlay
|
|
3159
|
+
------------------------------------------------------------------ */
|
|
3160
|
+
|
|
3161
|
+
#nav-loading-overlay {
|
|
3162
|
+
position: fixed;
|
|
3163
|
+
top: 0;
|
|
3164
|
+
left: 0;
|
|
3165
|
+
width: 100%;
|
|
3166
|
+
height: 100%;
|
|
3167
|
+
background: color-mix(in srgb, var(--body-background-fill) 90%, transparent);
|
|
3168
|
+
z-index: 9999;
|
|
3169
|
+
display: none;
|
|
3170
|
+
flex-direction: column;
|
|
3171
|
+
align-items: center;
|
|
3172
|
+
justify-content: center;
|
|
3173
|
+
opacity: 0;
|
|
3174
|
+
transition: opacity 0.3s ease;
|
|
3175
|
+
}
|
|
3176
|
+
|
|
3177
|
+
.nav-spinner {
|
|
3178
|
+
width: 50px;
|
|
3179
|
+
height: 50px;
|
|
3180
|
+
border: 5px solid var(--card-border-subtle);
|
|
3181
|
+
border-top: 5px solid var(--accent-strong);
|
|
3182
|
+
border-radius: 50%;
|
|
3183
|
+
animation: nav-spin 1s linear infinite;
|
|
3184
|
+
margin-bottom: 20px;
|
|
3185
|
+
}
|
|
3186
|
+
|
|
3187
|
+
@keyframes nav-spin {
|
|
3188
|
+
0% { transform: rotate(0deg); }
|
|
3189
|
+
100% { transform: rotate(360deg); }
|
|
3190
|
+
}
|
|
3191
|
+
|
|
3192
|
+
#nav-loading-text {
|
|
3193
|
+
font-size: 1.3rem;
|
|
3194
|
+
font-weight: 600;
|
|
3195
|
+
color: var(--accent-strong);
|
|
3196
|
+
}
|
|
3197
|
+
|
|
3198
|
+
/* ------------------------------------------------------------------
|
|
3199
|
+
Utility: Image inversion for dark mode (if needed)
|
|
3200
|
+
------------------------------------------------------------------ */
|
|
3201
|
+
|
|
3202
|
+
.dark-invert-image {
|
|
3203
|
+
filter: invert(0);
|
|
3204
|
+
}
|
|
3205
|
+
|
|
3206
|
+
@media (prefers-color-scheme: dark) {
|
|
3207
|
+
.dark-invert-image {
|
|
3208
|
+
filter: invert(1) hue-rotate(180deg);
|
|
3209
|
+
}
|
|
3210
|
+
}
|
|
3211
|
+
|
|
3212
|
+
/* ------------------------------------------------------------------
|
|
3213
|
+
Dark Mode Specific Fine Tuning
|
|
3214
|
+
------------------------------------------------------------------ */
|
|
3215
|
+
|
|
3216
|
+
@media (prefers-color-scheme: dark) {
|
|
3217
|
+
.panel-box,
|
|
3218
|
+
.leaderboard-box,
|
|
3219
|
+
.mock-ui-box,
|
|
3220
|
+
.mock-ui-inner,
|
|
3221
|
+
.processing-status,
|
|
3222
|
+
.kpi-card {
|
|
3223
|
+
background: color-mix(in srgb, var(--block-background-fill) 85%, #000 15%);
|
|
3224
|
+
border-color: color-mix(in srgb, var(--card-border-subtle) 70%, var(--accent-strong) 30%);
|
|
3225
|
+
}
|
|
3226
|
+
|
|
3227
|
+
.leaderboard-html-table thead {
|
|
3228
|
+
background: color-mix(in srgb, var(--block-background-fill) 75%, #000 25%);
|
|
3229
|
+
}
|
|
3230
|
+
|
|
3231
|
+
.lb-placeholder {
|
|
3232
|
+
background: color-mix(in srgb, var(--block-background-fill) 75%, #000 25%);
|
|
3233
|
+
}
|
|
3234
|
+
|
|
3235
|
+
#nav-loading-overlay {
|
|
3236
|
+
background: color-mix(in srgb, #000 70%, var(--body-background-fill) 30%);
|
|
3237
|
+
}
|
|
3238
|
+
}
|
|
3239
|
+
|
|
3240
|
+
/* ---------- Conclusion Card Theme Tokens ---------- */
|
|
3241
|
+
|
|
3242
|
+
/* Light theme defaults */
|
|
3243
|
+
:root,
|
|
3244
|
+
:root[data-theme="light"] {
|
|
3245
|
+
--conclusion-card-bg: #e0f2fe; /* light sky */
|
|
3246
|
+
--conclusion-card-border: #0369a1; /* sky-700 */
|
|
3247
|
+
--conclusion-card-fg: #0f172a; /* slate-900 */
|
|
3248
|
+
|
|
3249
|
+
--conclusion-tip-bg: #fef9c3; /* amber-100 */
|
|
3250
|
+
--conclusion-tip-border: #f59e0b; /* amber-500 */
|
|
3251
|
+
--conclusion-tip-fg: #713f12; /* amber-900 */
|
|
3252
|
+
|
|
3253
|
+
--conclusion-ethics-bg: #fef2f2; /* red-50 */
|
|
3254
|
+
--conclusion-ethics-border: #ef4444; /* red-500 */
|
|
3255
|
+
--conclusion-ethics-fg: #7f1d1d; /* red-900 */
|
|
3256
|
+
|
|
3257
|
+
--conclusion-attempt-bg: #fee2e2; /* red-100 */
|
|
3258
|
+
--conclusion-attempt-border: #ef4444; /* red-500 */
|
|
3259
|
+
--conclusion-attempt-fg: #7f1d1d; /* red-900 */
|
|
3260
|
+
|
|
3261
|
+
--conclusion-next-fg: #0f172a; /* main text color */
|
|
3262
|
+
}
|
|
3263
|
+
|
|
3264
|
+
/* Dark theme overrides – keep contrast high on dark background */
|
|
3265
|
+
[data-theme="dark"] {
|
|
3266
|
+
--conclusion-card-bg: #020617; /* slate-950 */
|
|
3267
|
+
--conclusion-card-border: #38bdf8; /* sky-400 */
|
|
3268
|
+
--conclusion-card-fg: #e5e7eb; /* slate-200 */
|
|
3269
|
+
|
|
3270
|
+
--conclusion-tip-bg: rgba(250, 204, 21, 0.08); /* soft amber tint */
|
|
3271
|
+
--conclusion-tip-border: #facc15; /* amber-400 */
|
|
3272
|
+
--conclusion-tip-fg: #facc15;
|
|
3273
|
+
|
|
3274
|
+
--conclusion-ethics-bg: rgba(248, 113, 113, 0.10); /* soft red tint */
|
|
3275
|
+
--conclusion-ethics-border: #f97373; /* red-ish */
|
|
3276
|
+
--conclusion-ethics-fg: #fecaca;
|
|
3277
|
+
|
|
3278
|
+
--conclusion-attempt-bg: rgba(248, 113, 113, 0.16);
|
|
3279
|
+
--conclusion-attempt-border: #f97373;
|
|
3280
|
+
--conclusion-attempt-fg: #fee2e2;
|
|
3281
|
+
|
|
3282
|
+
--conclusion-next-fg: #e5e7eb;
|
|
3283
|
+
}
|
|
3284
|
+
|
|
3285
|
+
/* ---------- Conclusion Layout ---------- */
|
|
3286
|
+
|
|
3287
|
+
.app-conclusion-wrapper {
|
|
3288
|
+
text-align: center;
|
|
3289
|
+
}
|
|
3290
|
+
|
|
3291
|
+
.app-conclusion-title {
|
|
3292
|
+
font-size: 2.4rem;
|
|
3293
|
+
margin: 0;
|
|
3294
|
+
}
|
|
3295
|
+
|
|
3296
|
+
.app-conclusion-card {
|
|
3297
|
+
margin-top: 24px;
|
|
3298
|
+
max-width: 950px;
|
|
3299
|
+
margin-left: auto;
|
|
3300
|
+
margin-right: auto;
|
|
3301
|
+
padding: 28px;
|
|
3302
|
+
border-radius: 18px;
|
|
3303
|
+
border-width: 3px;
|
|
3304
|
+
border-style: solid;
|
|
3305
|
+
background: var(--conclusion-card-bg);
|
|
3306
|
+
border-color: var(--conclusion-card-border);
|
|
3307
|
+
color: var(--conclusion-card-fg);
|
|
3308
|
+
box-shadow: 0 20px 40px rgba(15, 23, 42, 0.25);
|
|
3309
|
+
}
|
|
3310
|
+
|
|
3311
|
+
.app-conclusion-subtitle {
|
|
3312
|
+
margin-top: 0;
|
|
3313
|
+
font-size: 1.5rem;
|
|
3314
|
+
}
|
|
3315
|
+
|
|
3316
|
+
.app-conclusion-metrics {
|
|
3317
|
+
list-style: none;
|
|
3318
|
+
padding: 0;
|
|
3319
|
+
font-size: 1.05rem;
|
|
3320
|
+
text-align: left;
|
|
3321
|
+
max-width: 640px;
|
|
3322
|
+
margin: 20px auto;
|
|
3323
|
+
}
|
|
3324
|
+
|
|
3325
|
+
/* ---------- Generic panel helpers reused here ---------- */
|
|
3326
|
+
|
|
3327
|
+
.app-panel-tip,
|
|
3328
|
+
.app-panel-critical,
|
|
3329
|
+
.app-panel-warning {
|
|
3330
|
+
padding: 16px;
|
|
3331
|
+
border-radius: 12px;
|
|
3332
|
+
border-left-width: 6px;
|
|
3333
|
+
border-left-style: solid;
|
|
3334
|
+
text-align: left;
|
|
3335
|
+
font-size: 0.98rem;
|
|
3336
|
+
line-height: 1.4;
|
|
3337
|
+
margin-top: 16px;
|
|
3338
|
+
}
|
|
3339
|
+
|
|
3340
|
+
.app-panel-title {
|
|
3341
|
+
margin: 0 0 4px 0;
|
|
3342
|
+
font-weight: 700;
|
|
3343
|
+
}
|
|
3344
|
+
|
|
3345
|
+
.app-panel-body {
|
|
3346
|
+
margin: 0;
|
|
3347
|
+
}
|
|
3348
|
+
|
|
3349
|
+
/* Specific variants */
|
|
3350
|
+
|
|
3351
|
+
.app-conclusion-tip.app-panel-tip {
|
|
3352
|
+
background: var(--conclusion-tip-bg);
|
|
3353
|
+
border-left-color: var(--conclusion-tip-border);
|
|
3354
|
+
color: var(--conclusion-tip-fg);
|
|
3355
|
+
}
|
|
3356
|
+
|
|
3357
|
+
.app-conclusion-ethics.app-panel-critical {
|
|
3358
|
+
background: var(--conclusion-ethics-bg);
|
|
3359
|
+
border-left-color: var(--conclusion-ethics-border);
|
|
3360
|
+
color: var(--conclusion-ethics-fg);
|
|
3361
|
+
}
|
|
3362
|
+
|
|
3363
|
+
.app-conclusion-attempt-cap.app-panel-warning {
|
|
3364
|
+
background: var(--conclusion-attempt-bg);
|
|
3365
|
+
border-left-color: var(--conclusion-attempt-border);
|
|
3366
|
+
color: var(--conclusion-attempt-fg);
|
|
3367
|
+
}
|
|
3368
|
+
|
|
3369
|
+
/* Divider + next section */
|
|
3370
|
+
|
|
3371
|
+
.app-conclusion-divider {
|
|
3372
|
+
margin: 28px 0;
|
|
3373
|
+
border: 0;
|
|
3374
|
+
border-top: 2px solid rgba(148, 163, 184, 0.8); /* slate-400-ish */
|
|
3375
|
+
}
|
|
3376
|
+
|
|
3377
|
+
.app-conclusion-next-title {
|
|
3378
|
+
margin: 0;
|
|
3379
|
+
color: var(--conclusion-next-fg);
|
|
3380
|
+
}
|
|
3381
|
+
|
|
3382
|
+
.app-conclusion-next-body {
|
|
3383
|
+
font-size: 1rem;
|
|
3384
|
+
color: var(--conclusion-next-fg);
|
|
3385
|
+
}
|
|
3386
|
+
|
|
3387
|
+
/* Arrow inherits the same color, keeps pulse animation defined earlier */
|
|
3388
|
+
.app-conclusion-arrow {
|
|
3389
|
+
margin: 12px 0;
|
|
3390
|
+
font-size: 3rem;
|
|
3391
|
+
animation: pulseArrow 2.5s infinite;
|
|
3392
|
+
color: var(--conclusion-next-fg);
|
|
3393
|
+
}
|
|
3394
|
+
|
|
3395
|
+
/* ---------------------------------------------------- */
|
|
3396
|
+
/* Final Conclusion Slide (Light Mode Defaults) */
|
|
3397
|
+
/* ---------------------------------------------------- */
|
|
3398
|
+
|
|
3399
|
+
.final-conclusion-root {
|
|
3400
|
+
text-align: center;
|
|
3401
|
+
color: var(--body-text-color);
|
|
3402
|
+
}
|
|
3403
|
+
|
|
3404
|
+
.final-conclusion-title {
|
|
3405
|
+
font-size: 2.4rem;
|
|
3406
|
+
margin: 0;
|
|
3407
|
+
}
|
|
3408
|
+
|
|
3409
|
+
.final-conclusion-card {
|
|
3410
|
+
background-color: var(--block-background-fill);
|
|
3411
|
+
color: var(--body-text-color);
|
|
3412
|
+
padding: 28px;
|
|
3413
|
+
border-radius: 18px;
|
|
3414
|
+
border: 2px solid var(--border-color-primary);
|
|
3415
|
+
margin-top: 24px;
|
|
3416
|
+
max-width: 950px;
|
|
3417
|
+
margin-left: auto;
|
|
3418
|
+
margin-right: auto;
|
|
3419
|
+
box-shadow: var(--shadow-drop, 0 4px 10px rgba(15, 23, 42, 0.08));
|
|
3420
|
+
}
|
|
3421
|
+
|
|
3422
|
+
.final-conclusion-subtitle {
|
|
3423
|
+
margin-top: 0;
|
|
3424
|
+
margin-bottom: 8px;
|
|
3425
|
+
}
|
|
3426
|
+
|
|
3427
|
+
.final-conclusion-list {
|
|
3428
|
+
list-style: none;
|
|
3429
|
+
padding: 0;
|
|
3430
|
+
font-size: 1.05rem;
|
|
3431
|
+
text-align: left;
|
|
3432
|
+
max-width: 640px;
|
|
3433
|
+
margin: 20px auto;
|
|
3434
|
+
}
|
|
3435
|
+
|
|
3436
|
+
.final-conclusion-list li {
|
|
3437
|
+
margin: 4px 0;
|
|
3438
|
+
}
|
|
3439
|
+
|
|
3440
|
+
.final-conclusion-tip {
|
|
3441
|
+
margin-top: 16px;
|
|
3442
|
+
padding: 16px;
|
|
3443
|
+
border-radius: 12px;
|
|
3444
|
+
border-left: 6px solid var(--color-accent);
|
|
3445
|
+
background-color: color-mix(in srgb, var(--color-accent) 12%, transparent);
|
|
3446
|
+
text-align: left;
|
|
3447
|
+
font-size: 0.98rem;
|
|
3448
|
+
line-height: 1.4;
|
|
3449
|
+
}
|
|
3450
|
+
|
|
3451
|
+
.final-conclusion-ethics {
|
|
3452
|
+
margin-top: 16px;
|
|
3453
|
+
padding: 18px;
|
|
3454
|
+
border-radius: 12px;
|
|
3455
|
+
border-left: 6px solid #ef4444;
|
|
3456
|
+
background-color: color-mix(in srgb, #ef4444 10%, transparent);
|
|
3457
|
+
text-align: left;
|
|
3458
|
+
font-size: 0.98rem;
|
|
3459
|
+
line-height: 1.4;
|
|
3460
|
+
}
|
|
3461
|
+
|
|
3462
|
+
.final-conclusion-attempt-cap {
|
|
3463
|
+
margin-top: 16px;
|
|
3464
|
+
padding: 16px;
|
|
3465
|
+
border-radius: 12px;
|
|
3466
|
+
border-left: 6px solid #ef4444;
|
|
3467
|
+
background-color: color-mix(in srgb, #ef4444 16%, transparent);
|
|
3468
|
+
text-align: left;
|
|
3469
|
+
font-size: 0.98rem;
|
|
3470
|
+
line-height: 1.4;
|
|
3471
|
+
}
|
|
3472
|
+
|
|
3473
|
+
.final-conclusion-divider {
|
|
3474
|
+
margin: 28px 0;
|
|
3475
|
+
border: 0;
|
|
3476
|
+
border-top: 2px solid var(--border-color-primary);
|
|
3477
|
+
}
|
|
3478
|
+
|
|
3479
|
+
.final-conclusion-next h2 {
|
|
3480
|
+
margin: 0;
|
|
3481
|
+
}
|
|
3482
|
+
|
|
3483
|
+
.final-conclusion-next p {
|
|
3484
|
+
font-size: 1rem;
|
|
3485
|
+
margin-top: 4px;
|
|
3486
|
+
margin-bottom: 0;
|
|
3487
|
+
}
|
|
3488
|
+
|
|
3489
|
+
.final-conclusion-scroll {
|
|
3490
|
+
margin: 12px 0 0 0;
|
|
3491
|
+
font-size: 3rem;
|
|
3492
|
+
animation: pulseArrow 2.5s infinite;
|
|
3493
|
+
}
|
|
3494
|
+
|
|
3495
|
+
/* ---------------------------------------------------- */
|
|
3496
|
+
/* Dark Mode Overrides for Final Slide */
|
|
3497
|
+
/* ---------------------------------------------------- */
|
|
3498
|
+
|
|
3499
|
+
@media (prefers-color-scheme: dark) {
|
|
3500
|
+
.final-conclusion-card {
|
|
3501
|
+
background-color: #0b1120; /* deep slate */
|
|
3502
|
+
color: white; /* 100% contrast confidence */
|
|
3503
|
+
border-color: #38bdf8;
|
|
3504
|
+
box-shadow: none;
|
|
3505
|
+
}
|
|
3506
|
+
|
|
3507
|
+
.final-conclusion-tip {
|
|
3508
|
+
background-color: rgba(56, 189, 248, 0.18);
|
|
3509
|
+
}
|
|
3510
|
+
|
|
3511
|
+
.final-conclusion-ethics {
|
|
3512
|
+
background-color: rgba(248, 113, 113, 0.18);
|
|
3513
|
+
}
|
|
3514
|
+
|
|
3515
|
+
.final-conclusion-attempt-cap {
|
|
3516
|
+
background-color: rgba(248, 113, 113, 0.26);
|
|
3517
|
+
}
|
|
3518
|
+
}
|
|
3519
|
+
/* ---------------------------------------------------- */
|
|
3520
|
+
/* Slide 3: INPUT → MODEL → OUTPUT flow (theme-aware) */
|
|
3521
|
+
/* ---------------------------------------------------- */
|
|
3522
|
+
|
|
3523
|
+
|
|
3524
|
+
.model-flow {
|
|
3525
|
+
text-align: center;
|
|
3526
|
+
font-weight: 600;
|
|
3527
|
+
font-size: 1.2rem;
|
|
3528
|
+
margin: 20px 0;
|
|
3529
|
+
/* No explicit color – inherit from the card */
|
|
3530
|
+
}
|
|
3531
|
+
|
|
3532
|
+
.model-flow-label {
|
|
3533
|
+
padding: 0 0.1rem;
|
|
3534
|
+
/* No explicit color – inherit */
|
|
3535
|
+
}
|
|
3536
|
+
|
|
3537
|
+
.model-flow-arrow {
|
|
3538
|
+
margin: 0 0.35rem;
|
|
3539
|
+
font-size: 1.4rem;
|
|
3540
|
+
/* No explicit color – inherit */
|
|
3541
|
+
}
|
|
3542
|
+
|
|
3543
|
+
@media (prefers-color-scheme: dark) {
|
|
3544
|
+
.model-flow {
|
|
3545
|
+
color: var(--body-text-color);
|
|
3546
|
+
}
|
|
3547
|
+
.model-flow-arrow {
|
|
3548
|
+
/* In dark mode, nudge arrows toward accent for contrast/confidence */
|
|
3549
|
+
color: color-mix(in srgb, var(--color-accent) 75%, var(--body-text-color) 25%);
|
|
3550
|
+
}
|
|
3551
|
+
}
|
|
3552
|
+
"""
|
|
3553
|
+
|
|
3554
|
+
|
|
3555
|
+
# Define globals for yield
|
|
3556
|
+
global submit_button, submission_feedback_display, team_leaderboard_display
|
|
3557
|
+
# --- THIS IS THE FIXED LINE ---
|
|
3558
|
+
global individual_leaderboard_display, last_submission_score_state, last_rank_state, best_score_state, submission_count_state, first_submission_score_state
|
|
3559
|
+
# --- END OF FIX ---
|
|
3560
|
+
global rank_message_display, model_type_radio, complexity_slider
|
|
3561
|
+
global feature_set_checkbox, data_size_radio
|
|
3562
|
+
global login_username, login_password, login_submit, login_error
|
|
3563
|
+
global attempts_tracker_display, team_name_state
|
|
3564
|
+
|
|
3565
|
+
with gr.Blocks(theme=gr.themes.Soft(primary_hue="indigo"), css=css) as demo:
|
|
3566
|
+
# Persistent top anchor for scroll-to-top navigation
|
|
3567
|
+
gr.HTML("<div id='app_top_anchor' style='height:0;'></div>")
|
|
3568
|
+
|
|
3569
|
+
# Navigation loading overlay with spinner and dynamic message
|
|
3570
|
+
gr.HTML("""
|
|
3571
|
+
<div id='nav-loading-overlay'>
|
|
3572
|
+
<div class='nav-spinner'></div>
|
|
3573
|
+
<span id='nav-loading-text'>Loading...</span>
|
|
3574
|
+
</div>
|
|
3575
|
+
""")
|
|
3576
|
+
|
|
3577
|
+
# Concurrency Note: Do NOT read per-user state from os.environ here.
|
|
3578
|
+
# Username and other per-user data are managed via gr.State objects
|
|
3579
|
+
# and populated during handle_load_with_session_auth.
|
|
3580
|
+
|
|
3581
|
+
# Loading screen
|
|
3582
|
+
with gr.Column(visible=False) as loading_screen:
|
|
3583
|
+
gr.Markdown(
|
|
3584
|
+
"""
|
|
3585
|
+
<div style='text-align:center; padding:100px 0;'>
|
|
3586
|
+
<h2 style='font-size:2rem; color:#6b7280;'>⏳ Carregant...</h2>
|
|
3587
|
+
</div>
|
|
3588
|
+
"""
|
|
3589
|
+
)
|
|
3590
|
+
|
|
3591
|
+
# --- Briefing Slideshow (Updated with New Cards) ---
|
|
3592
|
+
|
|
3593
|
+
# Slide 1: From Understanding to Building (Retained as transition)
|
|
3594
|
+
with gr.Column(visible=True, elem_id="slide-1") as briefing_slide_1:
|
|
3595
|
+
gr.Markdown("<h1 style='text-align:center;'>🔄 De la teoria a la pràctica</h1>")
|
|
3596
|
+
gr.HTML(
|
|
3597
|
+
"""
|
|
3598
|
+
<div class='slide-content'>
|
|
3599
|
+
<div class='panel-box'>
|
|
3600
|
+
<h3 style='font-size: 1.5rem; text-align:center; margin-top:0;'>Bona feina! Ara ja has aconseguit:</h3>
|
|
3601
|
+
|
|
3602
|
+
<ul style='list-style: none; padding-left: 0; margin-top: 24px; margin-bottom: 24px;'>
|
|
3603
|
+
<li style='font-size: 1.1rem; font-weight: 500; margin-bottom: 12px;'>
|
|
3604
|
+
<span style='font-size: 1.5rem; vertical-align: middle;'>✅</span>
|
|
3605
|
+
Prendre decisions difícils com a jutge o jutgessa utilitzant prediccions d’IA
|
|
3606
|
+
</li>
|
|
3607
|
+
<li style='font-size: 1.1rem; font-weight: 500; margin-bottom: 12px;'>
|
|
3608
|
+
<span style='font-size: 1.5rem; vertical-align: middle;'>✅</span>
|
|
3609
|
+
Aprendre què són els falsos positius (falses alarmes) i els falsos negatius (advertències ignorades)
|
|
3610
|
+
</li>
|
|
3611
|
+
<li style='font-size: 1.1rem; font-weight: 500; margin-bottom: 12px;'>
|
|
3612
|
+
<span style='font-size: 1.5rem; vertical-align: middle;'>✅</span>
|
|
3613
|
+
Entendre els conceptes bàsics de com funciona la IA:
|
|
3614
|
+
</li>
|
|
3615
|
+
</ul>
|
|
3616
|
+
|
|
3617
|
+
<div style='background:white; padding:16px; border-radius:12px; margin:12px 0; text-align:center;'>
|
|
3618
|
+
<div style='display:inline-block; background:#dbeafe; padding:12px 16px; border-radius:8px; margin:4px;'>
|
|
3619
|
+
<h3 style='margin:0; color:#0369a1;'>ENTRADA</h3>
|
|
3620
|
+
</div>
|
|
3621
|
+
<div style='display:inline-block; font-size:1.5rem; margin:0 8px; color:#6b7280;'>→</div>
|
|
3622
|
+
<div style='display:inline-block; background:#fef3c7; padding:12px 16px; border-radius:8px; margin:4px;'>
|
|
3623
|
+
<h3 style='margin:0; color:#92400e;'>MODEL</h3>
|
|
3624
|
+
</div>
|
|
3625
|
+
<div style='display:inline-block; font-size:1.5rem; margin:0 8px; color:#6b7280;'>→</div>
|
|
3626
|
+
<div style='display:inline-block; background:#f0fdf4; padding:12px 16px; border-radius:8px; margin:4px;'>
|
|
3627
|
+
<h3 style='margin:0; color:#15803d;'>SORTIDA</h3>
|
|
3628
|
+
</div>
|
|
3629
|
+
</div>
|
|
3630
|
+
|
|
3631
|
+
<hr style='margin: 24px 0; border-top: 2px solid #c7d2fe;'>
|
|
3632
|
+
|
|
3633
|
+
<h3 style='font-size: 1.5rem; text-align:center;'>Ara és el moment de posar-te a la pell d’una persona enginyera d’IA.</h3>
|
|
3634
|
+
<p style='font-size: 1.1rem; text-align:center; margin-top: 12px;'>
|
|
3635
|
+
<strong>El teu nou repte:</strong> Crear models d’IA que siguin més precisos que el que has utilitzat en el rol de jutjar casos.
|
|
3636
|
+
</p>
|
|
3637
|
+
<p style='font-size: 1.1rem; text-align:center; margin-top: 12px;'>
|
|
3638
|
+
Recorda: has viscut en primera persona com les prediccions de la IA afecten la vida real de les persones. Fes servir aquest coneixement per construir un model millor.
|
|
3639
|
+
</p>
|
|
3640
|
+
</div>
|
|
3641
|
+
</div>
|
|
3642
|
+
"""
|
|
3643
|
+
)
|
|
3644
|
+
briefing_1_next = gr.Button("Següent ▶️", variant="primary", size="lg")
|
|
3645
|
+
|
|
3646
|
+
# Slide 2: Card 1 (Your Engineering Mission)
|
|
3647
|
+
with gr.Column(visible=False, elem_id="slide-2") as briefing_slide_2:
|
|
3648
|
+
gr.Markdown("<h1 style='text-align:center;'>📋 La teva missió - Construir una IA millor</h1>")
|
|
3649
|
+
|
|
3650
|
+
gr.HTML(
|
|
3651
|
+
"""
|
|
3652
|
+
<div class='slide-content'>
|
|
3653
|
+
<div class='panel-box'>
|
|
3654
|
+
<h3>La missió</h3>
|
|
3655
|
+
<p>Crea un sistema d’IA que ajudi als tribunals a prendre decisions més encertades. El model que has utilitzat abans et donava recomanacions imperfectes. Ara la teva feina és construir un model nou que predigui el risc amb més precisió i ofereixi a qui jutja informació fiable per poder ser justos i justes.</p>
|
|
3656
|
+
|
|
3657
|
+
<h3>La competició</h3>
|
|
3658
|
+
<p>Per fer-ho, competiràs amb altres persones enginyeres! Per ajudar-te en la missió, formaràs part d’un equip d’enginyeria. Els teus resultats es registraran tant de manera individual com col·lectiva a les classificacions en directe.</p>
|
|
3659
|
+
</div>
|
|
3660
|
+
|
|
3661
|
+
<div class='leaderboard-box' style='max-width: 600px; margin: 16px auto; text-align: center; padding: 16px;'>
|
|
3662
|
+
<p style='font-size: 1.1rem; margin:0;'>T’uniràs a un equip com ara...</p>
|
|
3663
|
+
<h3 style='font-size: 1.75rem; color: #6b7280; margin: 8px 0;'>
|
|
3664
|
+
🛡️ Els Exploradors Ètics
|
|
3665
|
+
</h3>
|
|
3666
|
+
</div>
|
|
3667
|
+
|
|
3668
|
+
<div class='mock-ui-box'>
|
|
3669
|
+
<h3>El repte de les dades</h3>
|
|
3670
|
+
<p>Per competir, tindràs accés a milers d’expedients de casos antics. Disposes de dos tipus d’informació:</p>
|
|
3671
|
+
<ol style='list-style-position: inside; padding-left: 20px;'>
|
|
3672
|
+
<li><strong>Perfils persones preses:</strong> És la informació que tenia el tribunal en el moment de la detenció.
|
|
3673
|
+
<ul style='margin-left: 20px; list-style-type: disc;'>
|
|
3674
|
+
<li><em>Edat, nombre d'antecedents penals, tipus de càrrec penal.</em></li>
|
|
3675
|
+
</ul>
|
|
3676
|
+
</li>
|
|
3677
|
+
<li><strong>Resultats històrics:</strong> Això és el que va passar amb aquestes persones al cap d’un temps.
|
|
3678
|
+
<ul style='margin-left: 20px; list-style-type: disc;'>
|
|
3679
|
+
<li><em>Van tornar a cometre un delicte en dos anys? (Sí/No)</em></li>
|
|
3680
|
+
</ul>
|
|
3681
|
+
</li>
|
|
3682
|
+
</ol>
|
|
3683
|
+
|
|
3684
|
+
<h3>La tasca principal</h3>
|
|
3685
|
+
<p>Has d’ensenyar el teu model d'IA a analitzar els "perfils" i predir amb precisió el "resultat".</p>
|
|
3686
|
+
<p><strong>A punt per construir alguna cosa que podria canviar la manera com funciona la justícia?</strong></p>
|
|
3687
|
+
</div>
|
|
3688
|
+
</div>
|
|
3689
|
+
"""
|
|
3690
|
+
)
|
|
3691
|
+
|
|
3692
|
+
with gr.Row():
|
|
3693
|
+
briefing_2_back = gr.Button("◀️ Enrere", size="lg")
|
|
3694
|
+
briefing_2_next = gr.Button("Següent ▶️", variant="primary", size="lg")
|
|
3695
|
+
|
|
3696
|
+
# Slide 3: Card 2 (What is a "Model"?)
|
|
3697
|
+
with gr.Column(visible=False, elem_id="slide-3") as briefing_slide_3:
|
|
3698
|
+
gr.Markdown("<h1 style='text-align:center;'>🧠 Què és un sistema d'IA?</h1>")
|
|
3699
|
+
|
|
3700
|
+
# --- FIX FOR SLIDE 3 ---
|
|
3701
|
+
# Combined all content into single gr.HTML()
|
|
3702
|
+
gr.HTML(
|
|
3703
|
+
"""
|
|
3704
|
+
<div class='slide-content'>
|
|
3705
|
+
<div class='panel-box'>
|
|
3706
|
+
<p>Abans de començar a competir, analitzem exactament què estàs construint.</p>
|
|
3707
|
+
<h3>Pensa en un sistema d'IA com una "Màquina de Predicció."</h3>
|
|
3708
|
+
<p>Ja coneixes el flux:</p>
|
|
3709
|
+
|
|
3710
|
+
<div style='background:white; padding:16px; border-radius:12px; margin:12px 0; text-align:center;'>
|
|
3711
|
+
<div style='display:inline-block; background:#dbeafe; padding:12px 16px; border-radius:8px; margin:4px;'>
|
|
3712
|
+
<h3 style='margin:0; color:#0369a1;'>ENTRADA</h3>
|
|
3713
|
+
</div>
|
|
3714
|
+
<div style='display:inline-block; font-size:1.5rem; margin:0 8px; color:#6b7280;'>→</div>
|
|
3715
|
+
<div style='display:inline-block; background:#fef3c7; padding:12px 16px; border-radius:8px; margin:4px;'>
|
|
3716
|
+
<h3 style='margin:0; color:#92400e;'>MODEL</h3>
|
|
3717
|
+
</div>
|
|
3718
|
+
<div style='display:inline-block; font-size:1.5rem; margin:0 8px; color:#6b7280;'>→</div>
|
|
3719
|
+
<div style='display:inline-block; background:#f0fdf4; padding:12px 16px; border-radius:8px; margin:4px;'>
|
|
3720
|
+
<h3 style='margin:0; color:#15803d;'>SORTIDA</h3>
|
|
3721
|
+
</div>
|
|
3722
|
+
</div>
|
|
3723
|
+
|
|
3724
|
+
<p>Com a persona enginyera, no cal que escriguis codi complex des de zero. En lloc d'això, muntes aquesta màquina utilitzant tres components principals:</p>
|
|
3725
|
+
</div>
|
|
3726
|
+
|
|
3727
|
+
<div class='mock-ui-box'>
|
|
3728
|
+
<h3>The 3 Components:</h3>
|
|
3729
|
+
<p><strong>1. L'entrada (Dades)</strong><br>
|
|
3730
|
+
La informació que li dones a la màquina.<br>
|
|
3731
|
+
<em>* Exemples: Edat, antecedents, detalls de l'acusació.</em></p>
|
|
3732
|
+
|
|
3733
|
+
<p><strong>2. El Model (El "cervell")</strong><br>
|
|
3734
|
+
Aquest és el "cervell" de la teva màquina. Estudia les dades d'entrada i intenta descobrir com es connecten les coses per fer una predicció. Pots triar diferents estratègies de model (cervells) per a la teva màquina.<br>
|
|
3735
|
+
<em>* Exemples: Alguns "cervells" només troben regles senzilles (com marcar un correu si diu 'diners gratis'). Altres tenen la capacitat de trobar patrons complexos (com reconèixer una cara concreta enmig d'una multitud).</em></p>
|
|
3736
|
+
|
|
3737
|
+
<p><strong>3. La sortida (La predicció)</strong><br>
|
|
3738
|
+
El que el model intenta endevinar com a millor opció.<br>
|
|
3739
|
+
<em>* Exemple: Nivell de risc: Alt o Baix.</em></p>
|
|
3740
|
+
|
|
3741
|
+
<hr>
|
|
3742
|
+
|
|
3743
|
+
<p><strong>Com aprèn:</strong> Mostres al model milers de casos antics (Entrades) + el que va passar realment (Sortides). El model els estudia per trobar les regles i així poder fer prediccions sobre casos nous que no ha vist mai.</p>
|
|
3744
|
+
</div>
|
|
3745
|
+
</div>
|
|
3746
|
+
"""
|
|
3747
|
+
)
|
|
3748
|
+
# --- END FIX ---
|
|
3749
|
+
|
|
3750
|
+
with gr.Row():
|
|
3751
|
+
briefing_3_back = gr.Button("◀️ Enrere", size="lg")
|
|
3752
|
+
briefing_3_next = gr.Button("Següent ▶️", variant="primary", size="lg")
|
|
3753
|
+
|
|
3754
|
+
# Slide 4: Card 3 (How Engineers Work — The Loop)
|
|
3755
|
+
with gr.Column(visible=False, elem_id="slide-4") as briefing_slide_4:
|
|
3756
|
+
gr.Markdown("<h1 style='text-align:center;'>🔁 Com treballen les persones enginyeres — El bucle</h1>")
|
|
3757
|
+
|
|
3758
|
+
# --- FIX FOR SLIDE 4 ---
|
|
3759
|
+
# Combined all content into single gr.HTML()
|
|
3760
|
+
gr.HTML(
|
|
3761
|
+
"""
|
|
3762
|
+
<div class='slide-content'>
|
|
3763
|
+
<div class='panel-box'>
|
|
3764
|
+
<p>Ara que ja coneixes els components d'un sistema d'IA, com en pots construir un de millor?</p>
|
|
3765
|
+
<h3>Aquí tens el secret:</h3>
|
|
3766
|
+
<p>Els equips d'IA reals gairebé mai l'encerten a la primera. En lloc d'això, segueixen un bucle continu d'experimentació: <strong>provar, comprovar, aprendre, repetir.</strong></p>
|
|
3767
|
+
|
|
3768
|
+
<h3>El bucle d'experimentació:</h3>
|
|
3769
|
+
<ol style='list-style-position: inside;'>
|
|
3770
|
+
<li><strong>Construeix un sistema d'IA:</strong> Escull els seus components i obtindràs una puntuació inicial de precisió.</li>
|
|
3771
|
+
<li><strong>Fes-te una pregunta:</strong> (p. ex., "Què passa si canvio el 'Cervell' —el tipus de model—?")</li>
|
|
3772
|
+
<li><strong>Comprova i compara:</strong> La puntuació ha millorat... o ha empitjorat?</li>
|
|
3773
|
+
</ol>
|
|
3774
|
+
</div>
|
|
3775
|
+
|
|
3776
|
+
<h3>Faràs exactament el mateix en la competició!</h3>
|
|
3777
|
+
|
|
3778
|
+
<div class='step-visual'>
|
|
3779
|
+
<div class='step-visual-box'><b>1. Configura</b><br/>Fes servir els controls per seleccionar el tipus de model i les dades.</div>
|
|
3780
|
+
<div class='step-visual-arrow'>→</div>
|
|
3781
|
+
<div class='step-visual-box'><b>2. Envia</b><br/>Clica a "construeix i envia" per entrenar el teu model.</div>
|
|
3782
|
+
<div class='step-visual-arrow'>→</div>
|
|
3783
|
+
<div class='step-visual-box'><b>3. Analitza</b><br/>Mira la teva posició a la classificació en temps real.</div>
|
|
3784
|
+
<div class='step-visual-arrow'>→</div>
|
|
3785
|
+
<div class='step-visual-box'><b>4. Millora</b><br/>Canvia una opció i torna a enviar-lo.</div>
|
|
3786
|
+
</div>
|
|
3787
|
+
|
|
3788
|
+
<div class='leaderboard-box' style='text-align:center;'>
|
|
3789
|
+
<p><strong>Consell:</strong> Intenta canviar només una cosa a la vegada. Si canvies massa coses de cop, no sabràs què és el que ha fet que el teu model millori o empitjori!</p>
|
|
3790
|
+
</div>
|
|
3791
|
+
</div>
|
|
3792
|
+
"""
|
|
3793
|
+
)
|
|
3794
|
+
# --- END FIX ---
|
|
3795
|
+
|
|
3796
|
+
with gr.Row():
|
|
3797
|
+
briefing_4_back = gr.Button("◀️ Enrere", size="lg")
|
|
3798
|
+
briefing_4_next = gr.Button("Següent ▶️", variant="primary", size="lg")
|
|
3799
|
+
|
|
3800
|
+
# Slide 5: Card 4 (Control Knobs — The "Brain" Settings)
|
|
3801
|
+
with gr.Column(visible=False, elem_id="slide-5") as briefing_slide_5:
|
|
3802
|
+
gr.Markdown("<h1 style='text-align:center;'>🎛️ Controls — La configuració del \"cervell\"</h1>")
|
|
3803
|
+
|
|
3804
|
+
# --- FIX FOR SLIDE 5 ---
|
|
3805
|
+
# Combined all content into single gr.HTML()
|
|
3806
|
+
gr.HTML(
|
|
3807
|
+
"""
|
|
3808
|
+
<div class='slide-content'>
|
|
3809
|
+
<div class='mock-ui-inner'>
|
|
3810
|
+
<p>Per construir el teu sistema d’IA, utilitzaràs controls per configurar la teva Màquina de Predicció. Els dos primers controls et permeten triar l’estratègia del model (el cervell) i ajustar com aprèn patrons a partir de les dades.</p>
|
|
3811
|
+
<hr style='margin: 16px 0;'>
|
|
3812
|
+
|
|
3813
|
+
<h3 style='margin-top:0;'>1. Estratègia del Model (Tipus de model)</h3>
|
|
3814
|
+
<div style='font-size: 1rem; margin-bottom:12px;'>
|
|
3815
|
+
<b>Què és:</b> El cervell de la teva Màquina de Predicció. Utilitza un mètode matemàtic concret —anomenat algorisme— per trobar patrons a les dades. Quan aprèn d’aquests patrons, es converteix en un model preparat per fer la millor predicció possible.
|
|
3816
|
+
</div>
|
|
3817
|
+
<div class='mock-ui-control-box'>
|
|
3818
|
+
<p style='font-size: 1.1rem; margin: 8px 0;'>
|
|
3819
|
+
<span class='mock-ui-radio-on'>◉</span>
|
|
3820
|
+
<b>El Generalista Equilibrat:</b> Aprèn a partir de tot el conjunt de dades i combina diversos factors en cada decisió, cosa que ajuda a obtenir resultats coherents en diferents situacions.
|
|
3821
|
+
</p>
|
|
3822
|
+
<p style='font-size: 1.1rem; margin: 8px 0;'>
|
|
3823
|
+
<span class='mock-ui-radio-off'>○</span>
|
|
3824
|
+
<b>El Creador de Regles:</b> Utilitza regles clares del tipus “Si… aleshores…”, fàcils d’entendre però menys flexibles. (Per exemple: si hi ha delictes previs > 2, aleshores hi ha risc alt).
|
|
3825
|
+
</p>
|
|
3826
|
+
<p style='font-size: 1.1rem; margin: 8px 0;'>
|
|
3827
|
+
<span class='mock-ui-radio-off'>○</span>
|
|
3828
|
+
<b>El Buscador de Patrons Profunds:</b> Un model complex que detecta patrons amagats a les dades, però amb decisions més difícils d’explicar.
|
|
3829
|
+
</p>
|
|
3830
|
+
</div>
|
|
3831
|
+
|
|
3832
|
+
<hr style='margin: 24px 0;'>
|
|
3833
|
+
|
|
3834
|
+
<h3>2. Complexitat del Model (Nivell d’ajust))</h3>
|
|
3835
|
+
<div class='mock-ui-control-box' style='text-align: center;'>
|
|
3836
|
+
<p style='font-size: 1.1rem; margin:0;'>Rang: Nivell 1 ─── ● ─── 10</p>
|
|
3837
|
+
</div>
|
|
3838
|
+
|
|
3839
|
+
<div style='margin-top: 16px; font-size: 1rem;'>
|
|
3840
|
+
<ul style='list-style-position: inside;'>
|
|
3841
|
+
<li><b>Què és:</b> És el nivell de detall amb què el model aprèn a partir de les dades: si es fixa sobretot en patrons generals o també en casos molt específics.</li>
|
|
3842
|
+
<li><b>L'equilibri:</b>
|
|
3843
|
+
<ul style='list-style-position: inside; margin-left: 20px;'>
|
|
3844
|
+
<li><b>Baix (Nivell 1):</b> Aprèn principalment patrons generals de les dades.</li>
|
|
3845
|
+
<li><b>Alt (Nivell 5):</b> Aprèn tant patrons generals com detalls molt fins.</li>
|
|
3846
|
+
</ul>
|
|
3847
|
+
</li>
|
|
3848
|
+
</ul>
|
|
3849
|
+
<p style='color:#b91c1c; font-weight:bold; margin-top:10px;'>Avís: Si aquest valor és massa alt, el model pot “memoritzar” detalls aleatoris o coincidències sense importància (soroll) de les dades passades, en lloc d’aprendre la regla general.</p>
|
|
3850
|
+
</div>
|
|
3851
|
+
</div>
|
|
3852
|
+
</div>
|
|
3853
|
+
"""
|
|
3854
|
+
)
|
|
3855
|
+
# --- END FIX ---
|
|
3856
|
+
|
|
3857
|
+
with gr.Row():
|
|
3858
|
+
briefing_5_back = gr.Button("◀️ Enrere", size="lg")
|
|
3859
|
+
briefing_5_next = gr.Button("Següent ▶️", variant="primary", size="lg")
|
|
3860
|
+
|
|
3861
|
+
# Slide 6: Card 5 (Control Knobs — The "Data" Settings)
|
|
3862
|
+
with gr.Column(visible=False, elem_id="slide-6") as briefing_slide_6:
|
|
3863
|
+
gr.Markdown("<h1 style='text-align:center;'>🎛️ Controls — La configuració de \"dades\"</h1>")
|
|
3864
|
+
|
|
3865
|
+
# --- FIX FOR SLIDE 6 ---
|
|
3866
|
+
# Combined all content into single gr.HTML()
|
|
3867
|
+
gr.HTML(
|
|
3868
|
+
"""
|
|
3869
|
+
<div class='slide-content'>
|
|
3870
|
+
<div class='mock-ui-inner'>
|
|
3871
|
+
<p>Ara que ja has configurat la teva màquina de predicció, has de decidir quina informació processarà. Aquests selectors controlen les dades d'entrada del sistema d'IA.</p>
|
|
3872
|
+
<hr style='margin: 16px 0;'>
|
|
3873
|
+
|
|
3874
|
+
<h3 style='margin-top:0;'>3. Variables de les dades</h3>
|
|
3875
|
+
<div style='font-size: 1rem; margin-bottom:12px;'>
|
|
3876
|
+
<b>Què és:</b> Els punts de dades concrets als quals el sistema de la IA (la màquina) té permís per accedir.
|
|
3877
|
+
<br><b>Per què és important:</b> El resultat del sistema depèn totalment de la informació que rep.
|
|
3878
|
+
</div>
|
|
3879
|
+
|
|
3880
|
+
<div class='mock-ui-control-box'>
|
|
3881
|
+
<p style='font-size: 1.1rem; margin: 8px 0;'>
|
|
3882
|
+
<span class='mock-ui-radio-on'>☑</span>
|
|
3883
|
+
<b>Dades de comportament:</b> Informació com el <i>nombre de delictes juvenils</i> ajuda el sistema a identificar patrons de risc basats en fets.
|
|
3884
|
+
</p>
|
|
3885
|
+
<p style='font-size: 1.1rem; margin: 8px 0;'>
|
|
3886
|
+
<span class='mock-ui-radio-off'>☐</span>
|
|
3887
|
+
<b>Dades demogràfiques:</b> Dades com la <i>raça</i> poden ajudar el model a aprendre, però també poden replicar biaixos humans.
|
|
3888
|
+
</p>
|
|
3889
|
+
</div>
|
|
3890
|
+
<p style='margin-top:10px;'><b>La teva feina:</b> Marca ☑ o Desmarca ☐ les caselles per triar quina informació "alimentarà" el teu model.</p>
|
|
3891
|
+
|
|
3892
|
+
<hr style='margin: 24px 0;'>
|
|
3893
|
+
|
|
3894
|
+
<h3>4. Volum de dades (Volum d'entrenament)</h3>
|
|
3895
|
+
<div style='font-size: 1rem; margin-bottom:12px;'>
|
|
3896
|
+
<b>Què és:</b> La quantitat de casos històrics que el sistema de la IA utilitza per aprendre patrons.
|
|
3897
|
+
</div>
|
|
3898
|
+
|
|
3899
|
+
<div class='mock-ui-control-box'>
|
|
3900
|
+
<p style='font-size: 1.1rem; margin: 8px 0;'>
|
|
3901
|
+
<span class='mock-ui-radio-on'>◉</span>
|
|
3902
|
+
<b>Petit (20%):</b> Processament ràpid. Ideal per fer proves ràpides i revisar la teva configuració.
|
|
3903
|
+
</p>
|
|
3904
|
+
<p style='font-size: 1.1rem; margin: 8px 0;'>
|
|
3905
|
+
<span class='mock-ui-radio-off'>○</span>
|
|
3906
|
+
<b>Complet (100%):</b> Processament màxim de dades. Triga més a construir-se, però dóna al sistema d'IA la millor oportunitat per calibrar la precisió.
|
|
3907
|
+
</p>
|
|
3908
|
+
</div>
|
|
3909
|
+
|
|
3910
|
+
</div>
|
|
3911
|
+
</div>
|
|
3912
|
+
"""
|
|
3913
|
+
)
|
|
3914
|
+
# --- END FIX ---
|
|
3915
|
+
|
|
3916
|
+
with gr.Row():
|
|
3917
|
+
briefing_6_back = gr.Button("◀️ Back", size="lg")
|
|
3918
|
+
briefing_6_next = gr.Button("Next ▶️", variant="primary", size="lg")
|
|
3919
|
+
|
|
3920
|
+
# Slide 7: Card 6 (Your Score as an Engineer)
|
|
3921
|
+
with gr.Column(visible=False, elem_id="slide-7") as briefing_slide_7:
|
|
3922
|
+
gr.Markdown("<h1 style='text-align:center;'>🏆 La teva puntuació com a enginyer/a</h1>")
|
|
3923
|
+
|
|
3924
|
+
# --- FIX FOR SLIDE 7 ---
|
|
3925
|
+
# Combined all content into single gr.HTML()
|
|
3926
|
+
gr.HTML(
|
|
3927
|
+
"""
|
|
3928
|
+
<div class='slide-content'>
|
|
3929
|
+
<div class='panel-box'>
|
|
3930
|
+
<p>Ara que ja saps com construir un model, és hora de posar a prova les teves habilitats. Aquí tens com mesurarem el teu èxit i com podràs pujar en la classificació:</p>
|
|
3931
|
+
|
|
3932
|
+
<h3>Com es calcula la teva puntuació/h3>
|
|
3933
|
+
<ul style='list-style-position: inside;'>
|
|
3934
|
+
<li><strong>Precisió de la predicció:</strong> El teu model es posa a prova amb Dades Ocultes (casos guardats en una "caixa forta secreta" que el teu model mai ha vist). Això simula la predicció del futur per garantir que obtinguis una puntuació de precisió realista.</li>
|
|
3935
|
+
<li><strong>La classificació:</strong> Els marcadors en directe fan un seguiment del teu progrés individualment i en equip.</li>
|
|
3936
|
+
</ul>
|
|
3937
|
+
|
|
3938
|
+
<h3>Com pots millorar: El Joc</h3>
|
|
3939
|
+
<ul style='list-style-position: inside;'>
|
|
3940
|
+
<li><strong>Competeix per millora:</strong> Refina el teu model per superar la teva millor marca personal.</li>
|
|
3941
|
+
<li><strong>Progressa com a persona enginyera i desbloqueja eines:</strong> A mesura que enviïs més models, guanyaràs posicions i desbloquejaràs millors eines d'anàlisi.
|
|
3942
|
+
|
|
3943
|
+
</div>
|
|
3944
|
+
</div>
|
|
3945
|
+
"""
|
|
3946
|
+
)
|
|
3947
|
+
# --- END FIX ---
|
|
3948
|
+
|
|
3949
|
+
with gr.Row():
|
|
3950
|
+
briefing_7_back = gr.Button("◀️ Enrere", size="lg")
|
|
3951
|
+
briefing_7_next = gr.Button("Comença a construir el model ▶️", variant="primary", size="lg")
|
|
3952
|
+
|
|
3953
|
+
# --- End Briefing Slideshow ---
|
|
3954
|
+
|
|
3955
|
+
|
|
3956
|
+
# Model Building App (Main Interface)
|
|
3957
|
+
with gr.Column(visible=False, elem_id="model-step") as model_building_step:
|
|
3958
|
+
gr.Markdown("<h1 style='text-align:center;'>🛠️ Àrea de construcció de models</h1>")
|
|
3959
|
+
|
|
3960
|
+
# Status panel for initialization progress - HIDDEN
|
|
3961
|
+
init_status_display = gr.HTML(value="", visible=False)
|
|
3962
|
+
|
|
3963
|
+
# Banner for UI state
|
|
3964
|
+
|
|
3965
|
+
init_banner = gr.HTML(
|
|
3966
|
+
value=(
|
|
3967
|
+
"<div class='init-banner'>"
|
|
3968
|
+
"<p class='init-banner__text'>"
|
|
3969
|
+
"⏳ Carregant les dades i la classificació… pots explorar, però has d’esperar que estigui llest per enviar."
|
|
3970
|
+
"</p>"
|
|
3971
|
+
"</div>"
|
|
3972
|
+
),
|
|
3973
|
+
visible=True)
|
|
3974
|
+
|
|
3975
|
+
# Session-based authentication state objects
|
|
3976
|
+
# Concurrency Note: These are initialized to None/empty and populated
|
|
3977
|
+
# during handle_load_with_session_auth. Do NOT use os.environ here.
|
|
3978
|
+
username_state = gr.State(None)
|
|
3979
|
+
token_state = gr.State(None)
|
|
3980
|
+
|
|
3981
|
+
team_name_state = gr.State(None) # Populated via handle_load_with_session_auth
|
|
3982
|
+
last_submission_score_state = gr.State(0.0)
|
|
3983
|
+
last_rank_state = gr.State(0)
|
|
3984
|
+
best_score_state = gr.State(0.0)
|
|
3985
|
+
submission_count_state = gr.State(0)
|
|
3986
|
+
first_submission_score_state = gr.State(None)
|
|
3987
|
+
|
|
3988
|
+
# New states for readiness gating and preview tracking
|
|
3989
|
+
readiness_state = gr.State(False)
|
|
3990
|
+
was_preview_state = gr.State(False)
|
|
3991
|
+
kpi_meta_state = gr.State({})
|
|
3992
|
+
last_seen_ts_state = gr.State(None) # Track last seen user timestamp
|
|
3993
|
+
|
|
3994
|
+
# Buffered states for all dynamic inputs
|
|
3995
|
+
model_type_state = gr.State(DEFAULT_MODEL)
|
|
3996
|
+
complexity_state = gr.State(2)
|
|
3997
|
+
feature_set_state = gr.State(DEFAULT_FEATURE_SET)
|
|
3998
|
+
data_size_state = gr.State(DEFAULT_DATA_SIZE)
|
|
3999
|
+
|
|
4000
|
+
rank_message_display = gr.Markdown("### Carregant la classificació...")
|
|
4001
|
+
with gr.Row():
|
|
4002
|
+
with gr.Column(scale=1):
|
|
4003
|
+
|
|
4004
|
+
model_type_radio = gr.Radio(
|
|
4005
|
+
label="1. Estratègia del model",
|
|
4006
|
+
choices=MODEL_RADIO_CHOICES, # Uses the list of tuples [(Cat, En), ...]
|
|
4007
|
+
value=DEFAULT_MODEL, # "The Balanced Generalist"
|
|
4008
|
+
interactive=False
|
|
4009
|
+
)
|
|
4010
|
+
model_card_display = gr.Markdown(get_model_card(DEFAULT_MODEL))
|
|
4011
|
+
|
|
4012
|
+
gr.Markdown("---") # Separator
|
|
4013
|
+
|
|
4014
|
+
complexity_slider = gr.Slider(
|
|
4015
|
+
label="2. Complexitat del model (1–10)",
|
|
4016
|
+
minimum=1, maximum=3, step=1, value=2,
|
|
4017
|
+
info="Valors més alts aprenen més, però un excés pot empitjorar els resultats."
|
|
4018
|
+
)
|
|
4019
|
+
|
|
4020
|
+
gr.Markdown("---") # Separator
|
|
4021
|
+
|
|
4022
|
+
feature_set_checkbox = gr.CheckboxGroup(
|
|
4023
|
+
label="3. Selecciona les variables de dades",
|
|
4024
|
+
choices=FEATURE_SET_ALL_OPTIONS,
|
|
4025
|
+
value=DEFAULT_FEATURE_SET,
|
|
4026
|
+
interactive=False,
|
|
4027
|
+
info="Desbloqueja més variables a mesura que puges de posició!"
|
|
4028
|
+
)
|
|
4029
|
+
|
|
4030
|
+
gr.Markdown("---") # Separator
|
|
4031
|
+
|
|
4032
|
+
data_size_radio = gr.Radio(
|
|
4033
|
+
label="4. Mida de les dades",
|
|
4034
|
+
choices=[DEFAULT_DATA_SIZE],
|
|
4035
|
+
value=DEFAULT_DATA_SIZE,
|
|
4036
|
+
interactive=False
|
|
4037
|
+
)
|
|
4038
|
+
|
|
4039
|
+
gr.Markdown("---") # Separator
|
|
4040
|
+
|
|
4041
|
+
# Attempt tracker display
|
|
4042
|
+
attempts_tracker_display = gr.HTML(
|
|
4043
|
+
value="<div style='text-align:center; padding:8px; margin:8px 0; background:#f0f9ff; border-radius:8px; border:1px solid #bae6fd;'>"
|
|
4044
|
+
"<p style='margin:0; color:#0369a1; font-weight:600; font-size:1rem;'>📊 Intents utilitzats: 0/10</p>"
|
|
4045
|
+
"</div>",
|
|
4046
|
+
visible=True
|
|
4047
|
+
)
|
|
4048
|
+
|
|
4049
|
+
submit_button = gr.Button(
|
|
4050
|
+
value="5. 🔬 Construir i enviar el model",
|
|
4051
|
+
variant="primary",
|
|
4052
|
+
size="lg"
|
|
4053
|
+
)
|
|
4054
|
+
|
|
4055
|
+
with gr.Column(scale=1):
|
|
4056
|
+
gr.HTML(
|
|
4057
|
+
"""
|
|
4058
|
+
<div class='leaderboard-box'>
|
|
4059
|
+
<h3 style='margin-top:0;'>🏆 Classificació en directe</h3>
|
|
4060
|
+
<p style='margin:0;'>Envia un model per veure la teva posició.</p>
|
|
4061
|
+
</div>
|
|
4062
|
+
"""
|
|
4063
|
+
)
|
|
4064
|
+
|
|
4065
|
+
# KPI Card
|
|
4066
|
+
submission_feedback_display = gr.HTML(
|
|
4067
|
+
"<p style='text-align:center; color:#6b7280; padding:20px 0;'>Envia el teu primer model per obtenir una valoració!</p>"
|
|
4068
|
+
)
|
|
4069
|
+
|
|
4070
|
+
# Inline Login Components (initially hidden)
|
|
4071
|
+
login_username = gr.Textbox(
|
|
4072
|
+
label="Username",
|
|
4073
|
+
placeholder="Enter your modelshare.ai username",
|
|
4074
|
+
visible=False
|
|
4075
|
+
)
|
|
4076
|
+
login_password = gr.Textbox(
|
|
4077
|
+
label="Password",
|
|
4078
|
+
type="password",
|
|
4079
|
+
placeholder="Enter your password",
|
|
4080
|
+
visible=False
|
|
4081
|
+
)
|
|
4082
|
+
login_submit = gr.Button(
|
|
4083
|
+
"Sign In & Submit",
|
|
4084
|
+
variant="primary",
|
|
4085
|
+
visible=False
|
|
4086
|
+
)
|
|
4087
|
+
login_error = gr.HTML(
|
|
4088
|
+
value="",
|
|
4089
|
+
visible=False
|
|
4090
|
+
)
|
|
4091
|
+
|
|
4092
|
+
with gr.Tabs():
|
|
4093
|
+
with gr.TabItem("Classificació per equips"):
|
|
4094
|
+
team_leaderboard_display = gr.HTML(
|
|
4095
|
+
"<p style='text-align:center; color:#6b7280; padding-top:20px;'>Envia un model per veure la classificació dels equips.</p>"
|
|
4096
|
+
)
|
|
4097
|
+
with gr.TabItem("Classificació individual"):
|
|
4098
|
+
individual_leaderboard_display = gr.HTML(
|
|
4099
|
+
"<p style='text-align:center; color:#6b7280; padding-top:20px;'>Envia un model per veure la classificació individual.</p>"
|
|
4100
|
+
)
|
|
4101
|
+
|
|
4102
|
+
# REMOVED: Ethical Reminder HTML Block
|
|
4103
|
+
step_2_next = gr.Button("Finalitza i reflexiona ▶️", variant="secondary")
|
|
4104
|
+
|
|
4105
|
+
# Conclusion Step
|
|
4106
|
+
with gr.Column(visible=False, elem_id="conclusion-step") as conclusion_step:
|
|
4107
|
+
gr.Markdown("<h1 style='text-align:center;'>✅ Secció completada</h1>")
|
|
4108
|
+
final_score_display = gr.HTML(value="<p>Preparant el resum final...</p>")
|
|
4109
|
+
step_3_back = gr.Button("◀️ Tornar a l'experiment")
|
|
4110
|
+
|
|
4111
|
+
# --- Navigation Logic ---
|
|
4112
|
+
all_steps_nav = [
|
|
4113
|
+
briefing_slide_1, briefing_slide_2, briefing_slide_3,
|
|
4114
|
+
briefing_slide_4, briefing_slide_5, briefing_slide_6, briefing_slide_7,
|
|
4115
|
+
model_building_step, conclusion_step, loading_screen
|
|
4116
|
+
]
|
|
4117
|
+
|
|
4118
|
+
def create_nav(current_step, next_step):
|
|
4119
|
+
"""
|
|
4120
|
+
Simplified navigation: directly switches visibility without artificial loading screen.
|
|
4121
|
+
Loading screen only shown when entering arena if not yet ready.
|
|
4122
|
+
"""
|
|
4123
|
+
def _nav():
|
|
4124
|
+
# Direct single-step navigation
|
|
4125
|
+
updates = {next_step: gr.update(visible=True)}
|
|
4126
|
+
for s in all_steps_nav:
|
|
4127
|
+
if s != next_step:
|
|
4128
|
+
updates[s] = gr.update(visible=False)
|
|
4129
|
+
return updates
|
|
4130
|
+
return _nav
|
|
4131
|
+
|
|
4132
|
+
def finalize_and_show_conclusion(best_score, submissions, rank, first_score, feature_set):
|
|
4133
|
+
"""Build dynamic conclusion HTML and navigate to conclusion step."""
|
|
4134
|
+
html = build_final_conclusion_html(best_score, submissions, rank, first_score, feature_set)
|
|
4135
|
+
updates = {
|
|
4136
|
+
conclusion_step: gr.update(visible=True),
|
|
4137
|
+
final_score_display: gr.update(value=html)
|
|
4138
|
+
}
|
|
4139
|
+
for s in all_steps_nav:
|
|
4140
|
+
if s != conclusion_step:
|
|
4141
|
+
updates[s] = gr.update(visible=False)
|
|
4142
|
+
return [updates[s] if s in updates else gr.update() for s in all_steps_nav] + [html]
|
|
4143
|
+
|
|
4144
|
+
# Helper function to generate navigation JS with loading overlay
|
|
4145
|
+
def nav_js(target_id: str, message: str, min_show_ms: int = 1200) -> str:
|
|
4146
|
+
"""
|
|
4147
|
+
Generate JavaScript for enhanced slide navigation with loading overlay.
|
|
4148
|
+
|
|
4149
|
+
Args:
|
|
4150
|
+
target_id: Element ID of the target slide (e.g., 'slide-2', 'model-step')
|
|
4151
|
+
message: Loading message to display during transition
|
|
4152
|
+
min_show_ms: Minimum time to show overlay (prevents flicker)
|
|
4153
|
+
|
|
4154
|
+
Returns:
|
|
4155
|
+
JavaScript arrow function string for Gradio's js parameter
|
|
4156
|
+
"""
|
|
4157
|
+
return f"""
|
|
4158
|
+
()=>{{
|
|
4159
|
+
try {{
|
|
4160
|
+
// Show overlay immediately
|
|
4161
|
+
const overlay = document.getElementById('nav-loading-overlay');
|
|
4162
|
+
const messageEl = document.getElementById('nav-loading-text');
|
|
4163
|
+
if(overlay && messageEl) {{
|
|
4164
|
+
messageEl.textContent = '{message}';
|
|
4165
|
+
overlay.style.display = 'flex';
|
|
4166
|
+
setTimeout(() => {{ overlay.style.opacity = '1'; }}, 10);
|
|
4167
|
+
}}
|
|
4168
|
+
|
|
4169
|
+
const startTime = Date.now();
|
|
4170
|
+
|
|
4171
|
+
// Scroll to top after brief delay
|
|
4172
|
+
setTimeout(() => {{
|
|
4173
|
+
const anchor = document.getElementById('app_top_anchor');
|
|
4174
|
+
const container = document.querySelector('.gradio-container') || document.scrollingElement || document.documentElement;
|
|
4175
|
+
|
|
4176
|
+
function doScroll() {{
|
|
4177
|
+
if(anchor) {{ anchor.scrollIntoView({{behavior:'smooth', block:'start'}}); }}
|
|
4178
|
+
else {{ container.scrollTo({{top:0, behavior:'smooth'}}); }}
|
|
4179
|
+
|
|
4180
|
+
// Best-effort Colab iframe scroll
|
|
4181
|
+
try {{
|
|
4182
|
+
if(window.parent && window.parent !== window && window.frameElement) {{
|
|
4183
|
+
const top = window.frameElement.getBoundingClientRect().top + window.parent.scrollY;
|
|
4184
|
+
window.parent.scrollTo({{top: Math.max(top - 10, 0), behavior:'smooth'}});
|
|
4185
|
+
}}
|
|
4186
|
+
}} catch(e2) {{}}
|
|
4187
|
+
}}
|
|
4188
|
+
|
|
4189
|
+
doScroll();
|
|
4190
|
+
// Retry scroll to combat layout shifts
|
|
4191
|
+
let scrollAttempts = 0;
|
|
4192
|
+
const scrollInterval = setInterval(() => {{
|
|
4193
|
+
scrollAttempts++;
|
|
4194
|
+
doScroll();
|
|
4195
|
+
if(scrollAttempts >= 3) clearInterval(scrollInterval);
|
|
4196
|
+
}}, 130);
|
|
4197
|
+
}}, 40);
|
|
4198
|
+
|
|
4199
|
+
// Poll for target visibility and minimum display time
|
|
4200
|
+
const targetId = '{target_id}';
|
|
4201
|
+
const minShowMs = {min_show_ms};
|
|
4202
|
+
let pollCount = 0;
|
|
4203
|
+
const maxPolls = 77; // ~7 seconds max
|
|
4204
|
+
|
|
4205
|
+
const pollInterval = setInterval(() => {{
|
|
4206
|
+
pollCount++;
|
|
4207
|
+
const elapsed = Date.now() - startTime;
|
|
4208
|
+
const target = document.getElementById(targetId);
|
|
4209
|
+
const isVisible = target && target.offsetParent !== null &&
|
|
4210
|
+
window.getComputedStyle(target).display !== 'none';
|
|
4211
|
+
|
|
4212
|
+
// Hide overlay when target is visible AND minimum time elapsed
|
|
4213
|
+
if((isVisible && elapsed >= minShowMs) || pollCount >= maxPolls) {{
|
|
4214
|
+
clearInterval(pollInterval);
|
|
4215
|
+
if(overlay) {{
|
|
4216
|
+
overlay.style.opacity = '0';
|
|
4217
|
+
setTimeout(() => {{ overlay.style.display = 'none'; }}, 300);
|
|
4218
|
+
}}
|
|
4219
|
+
}}
|
|
4220
|
+
}}, 90);
|
|
4221
|
+
|
|
4222
|
+
}} catch(e) {{ console.warn('nav-js error', e); }}
|
|
4223
|
+
}}
|
|
4224
|
+
"""
|
|
4225
|
+
|
|
4226
|
+
|
|
4227
|
+
# Wire up slide buttons with enhanced navigation
|
|
4228
|
+
briefing_1_next.click(
|
|
4229
|
+
fn=create_nav(briefing_slide_1, briefing_slide_2),
|
|
4230
|
+
inputs=None, outputs=all_steps_nav,
|
|
4231
|
+
js=nav_js("slide-2", "Carregant la visió general de la missió...")
|
|
4232
|
+
)
|
|
4233
|
+
briefing_2_back.click(
|
|
4234
|
+
fn=create_nav(briefing_slide_2, briefing_slide_1),
|
|
4235
|
+
inputs=None, outputs=all_steps_nav,
|
|
4236
|
+
js=nav_js("slide-1", "Tornant a la introducció...")
|
|
4237
|
+
)
|
|
4238
|
+
briefing_2_next.click(
|
|
4239
|
+
fn=create_nav(briefing_slide_2, briefing_slide_3),
|
|
4240
|
+
inputs=None, outputs=all_steps_nav,
|
|
4241
|
+
js=nav_js("slide-3", "Explorant el concepte del model...")
|
|
4242
|
+
)
|
|
4243
|
+
briefing_3_back.click(
|
|
4244
|
+
fn=create_nav(briefing_slide_3, briefing_slide_2),
|
|
4245
|
+
inputs=None, outputs=all_steps_nav,
|
|
4246
|
+
js=nav_js("slide-2", "Tornant un pas enrere...")
|
|
4247
|
+
)
|
|
4248
|
+
briefing_3_next.click(
|
|
4249
|
+
fn=create_nav(briefing_slide_3, briefing_slide_4),
|
|
4250
|
+
inputs=None, outputs=all_steps_nav,
|
|
4251
|
+
js=nav_js("slide-4", "Entenent el bucle de l'experiment...")
|
|
4252
|
+
)
|
|
4253
|
+
briefing_4_back.click(
|
|
4254
|
+
fn=create_nav(briefing_slide_4, briefing_slide_3),
|
|
4255
|
+
inputs=None, outputs=all_steps_nav,
|
|
4256
|
+
js=nav_js("slide-3", "Revisant els conceptes previs...")
|
|
4257
|
+
)
|
|
4258
|
+
briefing_4_next.click(
|
|
4259
|
+
fn=create_nav(briefing_slide_4, briefing_slide_5),
|
|
4260
|
+
inputs=None, outputs=all_steps_nav,
|
|
4261
|
+
js=nav_js("slide-5", "Configurant els paràmetres del model...")
|
|
4262
|
+
)
|
|
4263
|
+
briefing_5_back.click(
|
|
4264
|
+
fn=create_nav(briefing_slide_5, briefing_slide_4),
|
|
4265
|
+
inputs=None, outputs=all_steps_nav,
|
|
4266
|
+
js=nav_js("slide-4", "Revisitant el bucle...")
|
|
4267
|
+
)
|
|
4268
|
+
briefing_5_next.click(
|
|
4269
|
+
fn=create_nav(briefing_slide_5, briefing_slide_6),
|
|
4270
|
+
inputs=None, outputs=all_steps_nav,
|
|
4271
|
+
js=nav_js("slide-6", "Configurant les dades d’entrada...")
|
|
4272
|
+
)
|
|
4273
|
+
briefing_6_back.click(
|
|
4274
|
+
fn=create_nav(briefing_slide_6, briefing_slide_5),
|
|
4275
|
+
inputs=None, outputs=all_steps_nav,
|
|
4276
|
+
js=nav_js("slide-5", "Ajustant l’estratègia del model...")
|
|
4277
|
+
)
|
|
4278
|
+
briefing_6_next.click(
|
|
4279
|
+
fn=create_nav(briefing_slide_6, briefing_slide_7),
|
|
4280
|
+
inputs=None, outputs=all_steps_nav,
|
|
4281
|
+
js=nav_js("slide-7", "Preparant el resum de puntuació...")
|
|
4282
|
+
)
|
|
4283
|
+
briefing_7_back.click(
|
|
4284
|
+
fn=create_nav(briefing_slide_7, briefing_slide_6),
|
|
4285
|
+
inputs=None, outputs=all_steps_nav,
|
|
4286
|
+
js=nav_js("slide-6", "Revisant els paràmetres de les dades...")
|
|
4287
|
+
)
|
|
4288
|
+
# Slide 7 -> App
|
|
4289
|
+
briefing_7_next.click(
|
|
4290
|
+
fn=create_nav(briefing_slide_7, model_building_step),
|
|
4291
|
+
inputs=None, outputs=all_steps_nav,
|
|
4292
|
+
js=nav_js("model-step", "Entrant a l'àrea de construcció del model...")
|
|
4293
|
+
)
|
|
4294
|
+
|
|
4295
|
+
# App -> Conclusion
|
|
4296
|
+
step_2_next.click(
|
|
4297
|
+
fn=finalize_and_show_conclusion,
|
|
4298
|
+
inputs=[
|
|
4299
|
+
best_score_state,
|
|
4300
|
+
submission_count_state,
|
|
4301
|
+
last_rank_state,
|
|
4302
|
+
first_submission_score_state,
|
|
4303
|
+
feature_set_state
|
|
4304
|
+
],
|
|
4305
|
+
outputs=all_steps_nav + [final_score_display],
|
|
4306
|
+
js=nav_js("conclusion-step", "Generant el resum de rendiment...")
|
|
4307
|
+
)
|
|
4308
|
+
|
|
4309
|
+
# Conclusion -> App
|
|
4310
|
+
step_3_back.click(
|
|
4311
|
+
fn=create_nav(conclusion_step, model_building_step),
|
|
4312
|
+
inputs=None, outputs=all_steps_nav,
|
|
4313
|
+
js=nav_js("model-step", "Tornant a l'àrea de construcció del model...")
|
|
4314
|
+
)
|
|
4315
|
+
|
|
4316
|
+
# Events
|
|
4317
|
+
model_type_radio.change(
|
|
4318
|
+
fn=get_model_card,
|
|
4319
|
+
inputs=model_type_radio,
|
|
4320
|
+
outputs=model_card_display
|
|
4321
|
+
)
|
|
4322
|
+
model_type_radio.change(
|
|
4323
|
+
fn=lambda v: v or DEFAULT_MODEL,
|
|
4324
|
+
inputs=model_type_radio,
|
|
4325
|
+
outputs=model_type_state
|
|
4326
|
+
)
|
|
4327
|
+
complexity_slider.change(fn=lambda v: v, inputs=complexity_slider, outputs=complexity_state)
|
|
4328
|
+
|
|
4329
|
+
feature_set_checkbox.change(
|
|
4330
|
+
fn=lambda v: v or [],
|
|
4331
|
+
inputs=feature_set_checkbox,
|
|
4332
|
+
outputs=feature_set_state
|
|
4333
|
+
)
|
|
4334
|
+
data_size_radio.change(
|
|
4335
|
+
fn=lambda v: v or DEFAULT_DATA_SIZE,
|
|
4336
|
+
inputs=data_size_radio,
|
|
4337
|
+
outputs=data_size_state
|
|
4338
|
+
)
|
|
4339
|
+
|
|
4340
|
+
all_outputs = [
|
|
4341
|
+
submission_feedback_display,
|
|
4342
|
+
team_leaderboard_display,
|
|
4343
|
+
individual_leaderboard_display,
|
|
4344
|
+
last_submission_score_state,
|
|
4345
|
+
last_rank_state,
|
|
4346
|
+
best_score_state,
|
|
4347
|
+
submission_count_state,
|
|
4348
|
+
first_submission_score_state,
|
|
4349
|
+
rank_message_display,
|
|
4350
|
+
model_type_radio,
|
|
4351
|
+
complexity_slider,
|
|
4352
|
+
feature_set_checkbox,
|
|
4353
|
+
data_size_radio,
|
|
4354
|
+
submit_button,
|
|
4355
|
+
login_username,
|
|
4356
|
+
login_password,
|
|
4357
|
+
login_submit,
|
|
4358
|
+
login_error,
|
|
4359
|
+
attempts_tracker_display,
|
|
4360
|
+
was_preview_state,
|
|
4361
|
+
kpi_meta_state,
|
|
4362
|
+
last_seen_ts_state
|
|
4363
|
+
]
|
|
4364
|
+
|
|
4365
|
+
# Wire up login button
|
|
4366
|
+
login_submit.click(
|
|
4367
|
+
fn=perform_inline_login,
|
|
4368
|
+
inputs=[login_username, login_password],
|
|
4369
|
+
outputs=[
|
|
4370
|
+
login_username,
|
|
4371
|
+
login_password,
|
|
4372
|
+
login_submit,
|
|
4373
|
+
login_error,
|
|
4374
|
+
submit_button,
|
|
4375
|
+
submission_feedback_display,
|
|
4376
|
+
team_name_state,
|
|
4377
|
+
username_state, # NEW
|
|
4378
|
+
token_state # NEW
|
|
4379
|
+
]
|
|
4380
|
+
)
|
|
4381
|
+
|
|
4382
|
+
# Removed gr.State(username) from the inputs list
|
|
4383
|
+
submit_button.click(
|
|
4384
|
+
fn=run_experiment,
|
|
4385
|
+
inputs=[
|
|
4386
|
+
model_type_state,
|
|
4387
|
+
complexity_state,
|
|
4388
|
+
feature_set_state,
|
|
4389
|
+
data_size_state,
|
|
4390
|
+
team_name_state,
|
|
4391
|
+
last_submission_score_state,
|
|
4392
|
+
last_rank_state,
|
|
4393
|
+
submission_count_state,
|
|
4394
|
+
first_submission_score_state,
|
|
4395
|
+
best_score_state,
|
|
4396
|
+
username_state, # NEW: Session-based auth
|
|
4397
|
+
token_state, # NEW: Session-based auth
|
|
4398
|
+
readiness_state, # Renamed to readiness_flag in function signature
|
|
4399
|
+
was_preview_state, # Renamed to was_preview_prev in function signature
|
|
4400
|
+
# kpi_meta_state removed from inputs - used only as output
|
|
4401
|
+
],
|
|
4402
|
+
outputs=all_outputs,
|
|
4403
|
+
show_progress="full",
|
|
4404
|
+
js=nav_js("model-step", "Executant l'experiment...", 500)
|
|
4405
|
+
)
|
|
4406
|
+
|
|
4407
|
+
# Timer for polling initialization status
|
|
4408
|
+
status_timer = gr.Timer(value=0.5, active=True) # Poll every 0.5 seconds
|
|
4409
|
+
|
|
4410
|
+
def update_init_status():
|
|
4411
|
+
"""
|
|
4412
|
+
Poll initialization status and update UI elements.
|
|
4413
|
+
Returns status HTML, banner visibility, submit button state, data size choices, and readiness_state.
|
|
4414
|
+
"""
|
|
4415
|
+
status_html, ready = poll_init_status()
|
|
4416
|
+
|
|
4417
|
+
# Update banner visibility - hide when ready
|
|
4418
|
+
banner_visible = not ready
|
|
4419
|
+
|
|
4420
|
+
# Update submit button
|
|
4421
|
+
if ready:
|
|
4422
|
+
submit_label = "5. 🔬 Build & Submit Model"
|
|
4423
|
+
submit_interactive = True
|
|
4424
|
+
else:
|
|
4425
|
+
submit_label = "⏳ Esperant les dades..."
|
|
4426
|
+
submit_interactive = False
|
|
4427
|
+
|
|
4428
|
+
# Get available data sizes based on init progress
|
|
4429
|
+
available_sizes = get_available_data_sizes()
|
|
4430
|
+
|
|
4431
|
+
# Stop timer once fully initialized
|
|
4432
|
+
timer_active = not (ready and INIT_FLAGS.get("pre_samples_full", False))
|
|
4433
|
+
|
|
4434
|
+
return (
|
|
4435
|
+
status_html,
|
|
4436
|
+
gr.update(visible=banner_visible),
|
|
4437
|
+
gr.update(value=submit_label, interactive=submit_interactive),
|
|
4438
|
+
gr.update(choices=available_sizes),
|
|
4439
|
+
timer_active,
|
|
4440
|
+
ready # readiness_state
|
|
4441
|
+
)
|
|
4442
|
+
|
|
4443
|
+
status_timer.tick(
|
|
4444
|
+
fn=update_init_status,
|
|
4445
|
+
inputs=None,
|
|
4446
|
+
outputs=[init_status_display, init_banner, submit_button, data_size_radio, status_timer, readiness_state]
|
|
4447
|
+
)
|
|
4448
|
+
|
|
4449
|
+
# Handle session-based authentication on page load
|
|
4450
|
+
def handle_load_with_session_auth(request: "gr.Request"):
|
|
4451
|
+
"""
|
|
4452
|
+
Check for session token, auto-login if present, then load initial UI with stats.
|
|
4453
|
+
|
|
4454
|
+
Concurrency Note: This function does NOT set per-user values in os.environ.
|
|
4455
|
+
All authentication state is returned via gr.State objects (username_state,
|
|
4456
|
+
token_state, team_name_state) to prevent cross-user data leakage.
|
|
4457
|
+
"""
|
|
4458
|
+
success, username, token = _try_session_based_auth(request)
|
|
4459
|
+
|
|
4460
|
+
if success and username and token:
|
|
4461
|
+
_log(f"Session auth successful on load for {username}")
|
|
4462
|
+
|
|
4463
|
+
# Get user stats and team from cache/leaderboard
|
|
4464
|
+
stats = _compute_user_stats(username, token)
|
|
4465
|
+
team_name = stats.get("team_name", "")
|
|
4466
|
+
|
|
4467
|
+
# Concurrency Note: Do NOT set os.environ for per-user values.
|
|
4468
|
+
# Return state via gr.State objects exclusively.
|
|
4469
|
+
|
|
4470
|
+
# Hide login form since user is authenticated via session
|
|
4471
|
+
# Return initial load results plus login form hidden
|
|
4472
|
+
# Pass token explicitly for authenticated leaderboard fetch
|
|
4473
|
+
initial_results = on_initial_load(username, token=token, team_name=team_name)
|
|
4474
|
+
return initial_results + (
|
|
4475
|
+
gr.update(visible=False), # login_username
|
|
4476
|
+
gr.update(visible=False), # login_password
|
|
4477
|
+
gr.update(visible=False), # login_submit
|
|
4478
|
+
gr.update(visible=False), # login_error (hide any messages)
|
|
4479
|
+
username, # username_state
|
|
4480
|
+
token, # token_state
|
|
4481
|
+
team_name, # team_name_state
|
|
4482
|
+
)
|
|
4483
|
+
else:
|
|
4484
|
+
_log("No valid session on load, showing login form")
|
|
4485
|
+
# No valid session, proceed with normal load (show login form)
|
|
4486
|
+
# No token available, call without token
|
|
4487
|
+
initial_results = on_initial_load(None, token=None, team_name="")
|
|
4488
|
+
return initial_results + (
|
|
4489
|
+
gr.update(visible=True), # login_username
|
|
4490
|
+
gr.update(visible=True), # login_password
|
|
4491
|
+
gr.update(visible=True), # login_submit
|
|
4492
|
+
gr.update(visible=False), # login_error
|
|
4493
|
+
None, # username_state
|
|
4494
|
+
None, # token_state
|
|
4495
|
+
"", # team_name_state
|
|
4496
|
+
)
|
|
4497
|
+
|
|
4498
|
+
demo.load(
|
|
4499
|
+
fn=handle_load_with_session_auth,
|
|
4500
|
+
inputs=None, # Request is auto-injected
|
|
4501
|
+
outputs=[
|
|
4502
|
+
model_card_display,
|
|
4503
|
+
team_leaderboard_display,
|
|
4504
|
+
individual_leaderboard_display,
|
|
4505
|
+
rank_message_display,
|
|
4506
|
+
model_type_radio,
|
|
4507
|
+
complexity_slider,
|
|
4508
|
+
feature_set_checkbox,
|
|
4509
|
+
data_size_radio,
|
|
4510
|
+
login_username,
|
|
4511
|
+
login_password,
|
|
4512
|
+
login_submit,
|
|
4513
|
+
login_error,
|
|
4514
|
+
username_state, # NEW
|
|
4515
|
+
token_state, # NEW
|
|
4516
|
+
team_name_state, # NEW
|
|
4517
|
+
]
|
|
4518
|
+
)
|
|
4519
|
+
|
|
4520
|
+
return demo
|
|
4521
|
+
|
|
4522
|
+
# -------------------------------------------------------------------------
|
|
4523
|
+
# 4. Convenience Launcher
|
|
4524
|
+
# -------------------------------------------------------------------------
|
|
4525
|
+
|
|
4526
|
+
def launch_model_building_game_ca_app(height: int = 1200, share: bool = False, debug: bool = False) -> None:
|
|
4527
|
+
"""
|
|
4528
|
+
Create and directly launch the Model Building Game app inline (e.g., in notebooks).
|
|
4529
|
+
"""
|
|
4530
|
+
global playground, X_TRAIN_RAW, X_TEST_RAW, Y_TRAIN, Y_TEST
|
|
4531
|
+
if playground is None:
|
|
4532
|
+
try:
|
|
4533
|
+
playground = Competition(MY_PLAYGROUND_ID)
|
|
4534
|
+
except Exception as e:
|
|
4535
|
+
print(f"WARNING: Could not connect to playground: {e}")
|
|
4536
|
+
playground = None
|
|
4537
|
+
|
|
4538
|
+
if X_TRAIN_RAW is None:
|
|
4539
|
+
X_TRAIN_RAW, X_TEST_RAW, Y_TRAIN, Y_TEST = load_and_prep_data()
|
|
4540
|
+
|
|
4541
|
+
demo = create_model_building_game_ca_app()
|
|
4542
|
+
port = int(os.environ.get("PORT", 8080))
|
|
4543
|
+
demo.launch(share=share, inline=True, debug=debug, height=height, server_port=port)
|
|
4544
|
+
|