aimodelshare 0.3.7__py3-none-any.whl → 0.4.71__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. aimodelshare/moral_compass/__init__.py +51 -2
  2. aimodelshare/moral_compass/api_client.py +92 -4
  3. aimodelshare/moral_compass/apps/__init__.py +36 -16
  4. aimodelshare/moral_compass/apps/ai_consequences.py +98 -88
  5. aimodelshare/moral_compass/apps/bias_detective_ca.py +2722 -0
  6. aimodelshare/moral_compass/apps/bias_detective_en.py +2722 -0
  7. aimodelshare/moral_compass/apps/bias_detective_part1.py +2722 -0
  8. aimodelshare/moral_compass/apps/bias_detective_part2.py +2465 -0
  9. aimodelshare/moral_compass/apps/bias_detective_part_es.py +2722 -0
  10. aimodelshare/moral_compass/apps/ethical_revelation.py +237 -147
  11. aimodelshare/moral_compass/apps/fairness_fixer.py +1839 -859
  12. aimodelshare/moral_compass/apps/fairness_fixer_ca.py +1869 -0
  13. aimodelshare/moral_compass/apps/fairness_fixer_en.py +1869 -0
  14. aimodelshare/moral_compass/apps/fairness_fixer_es.py +1869 -0
  15. aimodelshare/moral_compass/apps/judge.py +130 -143
  16. aimodelshare/moral_compass/apps/justice_equity_upgrade.py +793 -831
  17. aimodelshare/moral_compass/apps/justice_equity_upgrade_ca.py +815 -0
  18. aimodelshare/moral_compass/apps/justice_equity_upgrade_en.py +815 -0
  19. aimodelshare/moral_compass/apps/justice_equity_upgrade_es.py +815 -0
  20. aimodelshare/moral_compass/apps/mc_integration_helpers.py +227 -745
  21. aimodelshare/moral_compass/apps/model_building_app_ca.py +4544 -0
  22. aimodelshare/moral_compass/apps/model_building_app_ca_final.py +3899 -0
  23. aimodelshare/moral_compass/apps/model_building_app_en.py +4290 -0
  24. aimodelshare/moral_compass/apps/model_building_app_en_final.py +3869 -0
  25. aimodelshare/moral_compass/apps/model_building_app_es.py +4362 -0
  26. aimodelshare/moral_compass/apps/model_building_app_es_final.py +3899 -0
  27. aimodelshare/moral_compass/apps/model_building_game.py +4211 -935
  28. aimodelshare/moral_compass/apps/moral_compass_challenge.py +195 -95
  29. aimodelshare/moral_compass/apps/what_is_ai.py +126 -117
  30. aimodelshare/moral_compass/challenge.py +98 -17
  31. {aimodelshare-0.3.7.dist-info → aimodelshare-0.4.71.dist-info}/METADATA +1 -1
  32. {aimodelshare-0.3.7.dist-info → aimodelshare-0.4.71.dist-info}/RECORD +35 -19
  33. aimodelshare/moral_compass/apps/bias_detective.py +0 -714
  34. {aimodelshare-0.3.7.dist-info → aimodelshare-0.4.71.dist-info}/WHEEL +0 -0
  35. {aimodelshare-0.3.7.dist-info → aimodelshare-0.4.71.dist-info}/licenses/LICENSE +0 -0
  36. {aimodelshare-0.3.7.dist-info → aimodelshare-0.4.71.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,4362 @@
1
+ """
2
+ Model Building Game - Gradio application for the Justice & Equity Challenge.
3
+
4
+ Session-based authentication with leaderboard caching and progressive rank unlocking.
5
+
6
+ Concurrency Notes:
7
+ - This app is designed to run in a multi-threaded environment (Cloud Run).
8
+ - Per-user state is stored in gr.State objects, NOT in os.environ.
9
+ - Caches are protected by locks to ensure thread safety.
10
+ - Linear algebra libraries are constrained to single-threaded mode to prevent
11
+ CPU oversubscription in containerized deployments.
12
+ """
13
+
14
+ import os
15
+
16
+ # -------------------------------------------------------------------------
17
+ # Thread Limit Configuration (MUST be set before importing numpy/sklearn)
18
+ # Prevents CPU oversubscription in containerized environments like Cloud Run.
19
+ # -------------------------------------------------------------------------
20
+ os.environ.setdefault("OMP_NUM_THREADS", "1")
21
+ os.environ.setdefault("OPENBLAS_NUM_THREADS", "1")
22
+ os.environ.setdefault("MKL_NUM_THREADS", "1")
23
+ os.environ.setdefault("NUMEXPR_NUM_THREADS", "1")
24
+
25
+ import time
26
+ import random
27
+ import requests
28
+ import contextlib
29
+ from io import StringIO
30
+ import threading
31
+ import functools
32
+ from pathlib import Path
33
+ from datetime import datetime, timedelta
34
+ from typing import Optional, Dict, Any, Tuple, Callable, TypeVar
35
+
36
+ import numpy as np
37
+ import pandas as pd
38
+ import gradio as gr
39
+
40
+ # --- Scikit-learn Imports ---
41
+ from sklearn.model_selection import train_test_split
42
+ from sklearn.preprocessing import StandardScaler
43
+ from sklearn.impute import SimpleImputer
44
+ from sklearn.compose import ColumnTransformer
45
+ from sklearn.pipeline import Pipeline
46
+ from sklearn.preprocessing import OneHotEncoder
47
+ from sklearn.linear_model import LogisticRegression
48
+ from sklearn.tree import DecisionTreeClassifier
49
+ from sklearn.ensemble import RandomForestClassifier
50
+ from sklearn.neighbors import KNeighborsClassifier
51
+
52
+ # --- AI Model Share Imports ---
53
+ try:
54
+ from aimodelshare.playground import Competition
55
+ except ImportError:
56
+ raise ImportError(
57
+ "The 'aimodelshare' library is required. Install with: pip install aimodelshare"
58
+ )
59
+
60
+ # -------------------------------------------------------------------------
61
+ # Configuration & Caching Infrastructure
62
+ # -------------------------------------------------------------------------
63
+
64
+ # -------------------------------------------------------------------------
65
+ # CACHE CONFIGURATION (Optimized: Thread-Safe SQLite)
66
+ # -------------------------------------------------------------------------
67
+ import sqlite3
68
+
69
+ CACHE_DB_FILE = "prediction_cache.sqlite"
70
+
71
+ def get_cached_prediction(key):
72
+ """
73
+ Lightning-fast lookup from SQLite database.
74
+ THREAD-SAFE FIX: Opens a new connection for every lookup.
75
+ """
76
+ # 1. Check if DB exists
77
+ if not os.path.exists(CACHE_DB_FILE):
78
+ return None
79
+
80
+ try:
81
+ # Use a context manager ('with') to ensure the connection
82
+ # is ALWAYS closed, releasing file locks immediately.
83
+ # timeout=10 ensures we don't wait forever if the file is busy.
84
+ with sqlite3.connect(CACHE_DB_FILE, timeout=10.0) as conn:
85
+ cursor = conn.cursor()
86
+ cursor.execute("SELECT value FROM cache WHERE key=?", (key,))
87
+ result = cursor.fetchone()
88
+
89
+ if result:
90
+ return result[0]
91
+ else:
92
+ return None
93
+
94
+ except sqlite3.OperationalError as e:
95
+ # Handle locking errors gracefully
96
+ print(f"⚠️ CACHE LOCK ERROR: {e}. Falling back to training.", flush=True)
97
+ return None
98
+
99
+ except Exception as e:
100
+ print(f"⚠️ DB READ ERROR: {e}", flush=True)
101
+ return None
102
+
103
+ print("✅ App configured for Thread-Safe SQLite Cache.")
104
+
105
+ LEADERBOARD_CACHE_SECONDS = int(os.environ.get("LEADERBOARD_CACHE_SECONDS", "45"))
106
+ MAX_LEADERBOARD_ENTRIES = os.environ.get("MAX_LEADERBOARD_ENTRIES")
107
+ MAX_LEADERBOARD_ENTRIES = int(MAX_LEADERBOARD_ENTRIES) if MAX_LEADERBOARD_ENTRIES else None
108
+ DEBUG_LOG = os.environ.get("DEBUG_LOG", "false").lower() == "true"
109
+
110
+ # In-memory caches (per container instance)
111
+ # Each cache has its own lock for thread safety under concurrent requests
112
+ _cache_lock = threading.Lock() # Protects _leaderboard_cache
113
+ _user_stats_lock = threading.Lock() # Protects _user_stats_cache
114
+ _auth_lock = threading.Lock() # Protects get_aws_token() credential injection
115
+
116
+ # Auth-aware leaderboard cache: separate entries for authenticated vs anonymous
117
+ # Structure: {"anon": {"data": df, "timestamp": float}, "auth": {"data": df, "timestamp": float}}
118
+ _leaderboard_cache: Dict[str, Dict[str, Any]] = {
119
+ "anon": {"data": None, "timestamp": 0.0},
120
+ "auth": {"data": None, "timestamp": 0.0},
121
+ }
122
+ _user_stats_cache: Dict[str, Dict[str, Any]] = {}
123
+ USER_STATS_TTL = LEADERBOARD_CACHE_SECONDS
124
+
125
+ # -------------------------------------------------------------------------
126
+ # Retry Helper for External API Calls
127
+ # -------------------------------------------------------------------------
128
+
129
+ T = TypeVar("T")
130
+
131
+ def _retry_with_backoff(
132
+ func: Callable[[], T],
133
+ max_attempts: int = 3,
134
+ base_delay: float = 0.5,
135
+ description: str = "operation"
136
+ ) -> T:
137
+ """
138
+ Execute a function with exponential backoff retry on failure.
139
+
140
+ Concurrency Note: This helper provides resilience against transient
141
+ network failures when calling external APIs (Competition.get_leaderboard,
142
+ playground.submit_model). Essential for Cloud Run deployments where
143
+ network calls may occasionally fail under load.
144
+
145
+ Args:
146
+ func: Callable to execute (should take no arguments)
147
+ max_attempts: Maximum number of attempts (default: 3)
148
+ base_delay: Initial delay in seconds, doubled each retry (default: 0.5)
149
+ description: Human-readable description for logging
150
+
151
+ Returns:
152
+ Result from successful function call
153
+
154
+ Raises:
155
+ Last exception if all attempts fail
156
+ """
157
+ last_exception: Optional[Exception] = None
158
+ delay = base_delay
159
+
160
+ for attempt in range(1, max_attempts + 1):
161
+ try:
162
+ return func()
163
+ except Exception as e:
164
+ last_exception = e
165
+ if attempt < max_attempts:
166
+ _log(f"{description} attempt {attempt} failed: {e}. Retrying in {delay}s...")
167
+ time.sleep(delay)
168
+ delay *= 2 # Exponential backoff
169
+ else:
170
+ _log(f"{description} failed after {max_attempts} attempts: {e}")
171
+
172
+ # Loop always runs at least once (max_attempts >= 1), so last_exception is set
173
+ raise last_exception # type: ignore[misc]
174
+
175
+ def _log(msg: str):
176
+ """Log message if DEBUG_LOG is enabled."""
177
+ if DEBUG_LOG:
178
+ print(f"[ModelBuildingGame] {msg}")
179
+
180
+ def _normalize_team_name(name: str) -> str:
181
+ """Normalize team name for consistent comparison and storage."""
182
+ if not name:
183
+ return ""
184
+ return " ".join(str(name).strip().split())
185
+
186
+ def _get_leaderboard_with_optional_token(playground_instance: Optional["Competition"], token: Optional[str] = None) -> Optional[pd.DataFrame]:
187
+ """
188
+ Fetch fresh leaderboard with optional token authentication and retry logic.
189
+
190
+ This is a helper function that centralizes the pattern of fetching
191
+ a fresh (non-cached) leaderboard with optional token authentication.
192
+ Use this for user-facing flows that require fresh, full data.
193
+
194
+ Concurrency Note: Uses _retry_with_backoff for resilience against
195
+ transient network failures.
196
+
197
+ Args:
198
+ playground_instance: The Competition playground instance (or None)
199
+ token: Optional authentication token for the fetch
200
+
201
+ Returns:
202
+ DataFrame with leaderboard data, or None if fetch fails or playground is None
203
+ """
204
+ if playground_instance is None:
205
+ return None
206
+
207
+ def _fetch():
208
+ if token:
209
+ return playground_instance.get_leaderboard(token=token)
210
+ return playground_instance.get_leaderboard()
211
+
212
+ try:
213
+ return _retry_with_backoff(_fetch, description="leaderboard fetch")
214
+ except Exception as e:
215
+ _log(f"Leaderboard fetch failed after retries: {e}")
216
+ return None
217
+
218
+ def _fetch_leaderboard(token: Optional[str]) -> Optional[pd.DataFrame]:
219
+ """
220
+ Fetch leaderboard with auth-aware caching (TTL: LEADERBOARD_CACHE_SECONDS).
221
+
222
+ Concurrency Note: Cache is keyed by auth scope ("anon" vs "auth") to prevent
223
+ cross-user data leakage. Authenticated users share a single "auth" cache entry
224
+ to avoid unbounded cache growth. Protected by _cache_lock.
225
+ """
226
+ # Determine cache key based on authentication status
227
+ cache_key = "auth" if token else "anon"
228
+ now = time.time()
229
+
230
+ with _cache_lock:
231
+ cache_entry = _leaderboard_cache[cache_key]
232
+ if (
233
+ cache_entry["data"] is not None
234
+ and now - cache_entry["timestamp"] < LEADERBOARD_CACHE_SECONDS
235
+ ):
236
+ _log(f"Leaderboard cache hit ({cache_key})")
237
+ return cache_entry["data"]
238
+
239
+ _log(f"Fetching fresh leaderboard ({cache_key})...")
240
+ df = None
241
+ try:
242
+ playground_id = "https://cf3wdpkg0d.execute-api.us-east-1.amazonaws.com/prod/m"
243
+ playground_instance = Competition(playground_id)
244
+
245
+ def _fetch():
246
+ return playground_instance.get_leaderboard(token=token) if token else playground_instance.get_leaderboard()
247
+
248
+ df = _retry_with_backoff(_fetch, description="leaderboard fetch")
249
+ if df is not None and not df.empty and MAX_LEADERBOARD_ENTRIES:
250
+ df = df.head(MAX_LEADERBOARD_ENTRIES)
251
+ _log(f"Leaderboard fetched ({cache_key}): {len(df) if df is not None else 0} entries")
252
+ except Exception as e:
253
+ _log(f"Leaderboard fetch failed ({cache_key}): {e}")
254
+ df = None
255
+
256
+ with _cache_lock:
257
+ _leaderboard_cache[cache_key]["data"] = df
258
+ _leaderboard_cache[cache_key]["timestamp"] = time.time()
259
+ return df
260
+
261
+ def _get_or_assign_team(username: str, leaderboard_df: Optional[pd.DataFrame]) -> Tuple[str, bool]:
262
+ """Get existing team from leaderboard or assign random team."""
263
+ # TEAM_NAMES is defined in configuration section below
264
+ try:
265
+ if leaderboard_df is not None and not leaderboard_df.empty and "Team" in leaderboard_df.columns:
266
+ user_submissions = leaderboard_df[leaderboard_df["username"] == username]
267
+ if not user_submissions.empty:
268
+ if "timestamp" in user_submissions.columns:
269
+ try:
270
+ user_submissions = user_submissions.copy()
271
+ user_submissions["timestamp"] = pd.to_datetime(
272
+ user_submissions["timestamp"], errors="coerce"
273
+ )
274
+ user_submissions = user_submissions.sort_values("timestamp", ascending=False)
275
+ _log(f"Sorted {len(user_submissions)} submissions by timestamp for {username}")
276
+ except Exception as ts_err:
277
+ _log(f"Timestamp sort error: {ts_err}")
278
+ existing_team = user_submissions.iloc[0]["Team"]
279
+ if pd.notna(existing_team) and str(existing_team).strip():
280
+ normalized = _normalize_team_name(existing_team)
281
+ _log(f"Found existing team for {username}: {normalized}")
282
+ return normalized, False
283
+ new_team = _normalize_team_name(random.choice(TEAM_NAMES))
284
+ _log(f"Assigning new team to {username}: {new_team}")
285
+ return new_team, True
286
+ except Exception as e:
287
+ _log(f"Team assignment error: {e}")
288
+ new_team = _normalize_team_name(random.choice(TEAM_NAMES))
289
+ return new_team, True
290
+
291
+ def _try_session_based_auth(request: "gr.Request") -> Tuple[bool, Optional[str], Optional[str]]:
292
+ """Attempt to authenticate via session token. Returns (success, username, token)."""
293
+ try:
294
+ session_id = request.query_params.get("sessionid") if request else None
295
+ if not session_id:
296
+ _log("No sessionid in request")
297
+ return False, None, None
298
+
299
+ from aimodelshare.aws import get_token_from_session, _get_username_from_token
300
+
301
+ token = get_token_from_session(session_id)
302
+ if not token:
303
+ _log("Failed to get token from session")
304
+ return False, None, None
305
+
306
+ username = _get_username_from_token(token)
307
+ if not username:
308
+ _log("Failed to extract username from token")
309
+ return False, None, None
310
+
311
+ _log(f"Session auth successful for {username}")
312
+ return True, username, token
313
+
314
+ except Exception as e:
315
+ _log(f"Session auth failed: {e}")
316
+ return False, None, None
317
+
318
+
319
+
320
+ # -------------------------------------------------------------------------
321
+ # UPDATED FUNCTION
322
+ # -------------------------------------------------------------------------
323
+ def _compute_user_stats(username: str, token: str) -> Dict[str, Any]:
324
+ """
325
+ Compute user statistics with caching.
326
+
327
+ Concurrency Note: Protected by _user_stats_lock for thread-safe
328
+ cache reads and writes.
329
+ """
330
+ now = time.time()
331
+
332
+ # Thread-safe cache check
333
+ with _user_stats_lock:
334
+ cached = _user_stats_cache.get(username)
335
+ if cached and (now - cached.get("_ts", 0) < USER_STATS_TTL):
336
+ _log(f"User stats cache hit for {username}")
337
+ # Return shallow copy to prevent caller mutations from affecting cache.
338
+ # Stats dict contains only primitives (float, int, str), so shallow copy is sufficient.
339
+ return cached.copy()
340
+
341
+ _log(f"Computing fresh stats for {username}")
342
+ leaderboard_df = _fetch_leaderboard(token)
343
+ team_name, _ = _get_or_assign_team(username, leaderboard_df)
344
+
345
+ stats = {
346
+ "best_score": 0.0,
347
+ "rank": 0,
348
+ "team_name": team_name,
349
+ "submission_count": 0,
350
+ "last_score": 0.0,
351
+ "_ts": time.time()
352
+ }
353
+
354
+ try:
355
+ if leaderboard_df is not None and not leaderboard_df.empty:
356
+ user_submissions = leaderboard_df[leaderboard_df["username"] == username]
357
+ if not user_submissions.empty:
358
+ stats["submission_count"] = len(user_submissions)
359
+ if "accuracy" in user_submissions.columns:
360
+ stats["best_score"] = float(user_submissions["accuracy"].max())
361
+ if "timestamp" in user_submissions.columns:
362
+ try:
363
+ user_submissions = user_submissions.copy()
364
+ user_submissions["timestamp"] = pd.to_datetime(
365
+ user_submissions["timestamp"], errors="coerce"
366
+ )
367
+ recent = user_submissions.sort_values("timestamp", ascending=False).iloc[0]
368
+ stats["last_score"] = float(recent["accuracy"])
369
+ except:
370
+ stats["last_score"] = stats["best_score"]
371
+ else:
372
+ stats["last_score"] = stats["best_score"]
373
+
374
+ if "accuracy" in leaderboard_df.columns:
375
+ user_bests = leaderboard_df.groupby("username")["accuracy"].max()
376
+ ranked = user_bests.sort_values(ascending=False)
377
+ try:
378
+ stats["rank"] = int(ranked.index.get_loc(username) + 1)
379
+ except KeyError:
380
+ stats["rank"] = 0
381
+ except Exception as e:
382
+ _log(f"Error computing stats for {username}: {e}")
383
+
384
+ # Thread-safe cache update
385
+ with _user_stats_lock:
386
+ _user_stats_cache[username] = stats
387
+ _log(f"Stats for {username}: {stats}")
388
+ return stats
389
+
390
+
391
+ def _build_attempts_tracker_html(current_count, limit=10):
392
+ """
393
+ Generate HTML for the attempts tracker display.
394
+ Shows current attempt count vs limit with color coding.
395
+
396
+ Args:
397
+ current_count: Number of attempts used so far
398
+ limit: Maximum allowed attempts (default: ATTEMPT_LIMIT)
399
+
400
+ Returns:
401
+ str: HTML string for the tracker display
402
+ """
403
+ if current_count >= limit:
404
+ # Limit reached - red styling
405
+ bg_color = "#f0f9ff"
406
+ border_color = "#bae6fd"
407
+ text_color = "#0369a1"
408
+ icon = "🛑"
409
+ label = f"Última oportunidad (por ahora) para mejorar tu puntuación: {current_count}/{limit}"
410
+ else:
411
+ # Normal - blue styling
412
+ bg_color = "#f0f9ff"
413
+ border_color = "#bae6fd"
414
+ text_color = "#0369a1"
415
+ icon = "📊"
416
+ label = f"Intentos usados: {current_count}/{limit}"
417
+
418
+ return f"""<div style='text-align:center; padding:8px; margin:8px 0; background:{bg_color}; border-radius:8px; border:1px solid {border_color};'>
419
+ <p style='margin:0; color:{text_color}; font-weight:600; font-size:1rem;'>{icon} {label}</p>
420
+ </div>"""
421
+
422
+ def check_attempt_limit(submission_count: int, limit: int = None) -> Tuple[bool, str]:
423
+ """Check if submission count exceeds limit."""
424
+ # ATTEMPT_LIMIT is defined in configuration section below
425
+ if limit is None:
426
+ limit = ATTEMPT_LIMIT
427
+
428
+ if submission_count >= limit:
429
+ msg = f"⚠️ Límite de intentos alcanzado ({submission_count}/{limit})"
430
+ return False, msg
431
+ return True, f"Intentos: {submission_count}/{limit}"
432
+
433
+ # -------------------------------------------------------------------------
434
+ # Future: Fairness Metrics
435
+ # -------------------------------------------------------------------------
436
+
437
+ # def compute_fairness_metrics(y_true, y_pred, sensitive_attrs):
438
+ # """
439
+ # Compute fairness metrics for model predictions.
440
+ #
441
+ # Args:
442
+ # y_true: Ground truth labels
443
+ # y_pred: Model predictions
444
+ # sensitive_attrs: DataFrame with sensitive attributes (race, sex, age)
445
+ #
446
+ # Returns:
447
+ # dict: Fairness metrics including demographic parity, equalized odds
448
+ #
449
+ # TODO: Implement using fairlearn or aif360
450
+ # """
451
+ # pass
452
+
453
+
454
+
455
+ # -------------------------------------------------------------------------
456
+ # 1. Configuration
457
+ # -------------------------------------------------------------------------
458
+
459
+ MY_PLAYGROUND_ID = "https://cf3wdpkg0d.execute-api.us-east-1.amazonaws.com/prod/m"
460
+
461
+ # --- Submission Limit Configuration ---
462
+ # Maximum number of successful leaderboard submissions per user per session.
463
+ # Preview runs (pre-login) and failed/invalid attempts do NOT count toward this limit.
464
+ # Only actual successful playground.submit_model() calls increment the count.
465
+ #
466
+ # TODO: Server-side persistent enforcement recommended
467
+ # The current attempt limit is stored in gr.State (per-session) and can be bypassed
468
+ # by refreshing the browser. For production use with 100+ concurrent users,
469
+ # consider implementing server-side persistence via Redis or Firestore to track
470
+ # attempt counts per user across sessions.
471
+ ATTEMPT_LIMIT = 10
472
+
473
+ # --- Leaderboard Polling Configuration ---
474
+ # After a real authenticated submission, we poll the leaderboard to detect eventual consistency.
475
+ # This prevents the "stuck on first preview KPI" issue where the leaderboard hasn't updated yet.
476
+ # Increased from 12 to 60 to better tolerate backend latency and cold starts.
477
+ # If polling times out, optimistic fallback logic will provide provisional UI updates.
478
+ LEADERBOARD_POLL_TRIES = 60 # Number of polling attempts (increased to handle backend latency/cold starts)
479
+ LEADERBOARD_POLL_SLEEP = 1.0 # Sleep duration between polls (seconds)
480
+ ENABLE_AUTO_RESUBMIT_AFTER_READY = False # Future feature flag for auto-resubmit
481
+
482
+ # --- 1. MODEL CONFIGURATION (Keys match Database - English) ---
483
+ # --- 1. MODEL CONFIGURATION (Keys match Database - English) ---
484
+ MODEL_TYPES = {
485
+ "The Balanced Generalist": {
486
+ "model_builder": lambda: LogisticRegression(
487
+ max_iter=500, random_state=42, class_weight="balanced"
488
+ ),
489
+ "card_es": "Este modelo es rápido, fiable y equilibrat. Buen punto de partida; suele dar resultados estables en muchos casos."
490
+ },
491
+ "El Creador de Reglas": {
492
+ "model_builder": lambda: DecisionTreeClassifier(
493
+ random_state=42, class_weight="balanced"
494
+ ),
495
+ "card_es": "Este modelo aprende reglas simples del tipo «si/entonces». Fácil de entender, pero le cuesta captar patrones complejos."
496
+ },
497
+ "El 'Vecino más Cercano'": {
498
+ "model_builder": lambda: KNeighborsClassifier(),
499
+ "card_es": "Este modelo se basa en los ejemplos más parecidos del pasado. «Si te pareces a estos casos, predeciré el mismo resultado»."
500
+ },
501
+ "El Buscador de Patrones Profundos": {
502
+ "model_builder": lambda: RandomForestClassifier(
503
+ random_state=42, class_weight="balanced"
504
+ ),
505
+ "card_es": "Este modelo combina muchos árboles de decisión para encontrar patrones complejos. Es potente, pero conviene no pasarse con la complejidad."
506
+ }
507
+ }
508
+
509
+ DEFAULT_MODEL = "The Balanced Generalist"
510
+
511
+ # --- 2. TRANSLATION MAPS (UI Display -> Database Key) ---
512
+
513
+ # Map English Keys to Spanish Display Names
514
+ MODEL_DISPLAY_MAP = {
515
+ "The Balanced Generalist": "El Generalista Equilibrado",
516
+ "The Rule-Maker": "El Creador de Reglas",
517
+ "The 'Nearest Neighbor'": "El 'Vecino Más Cercano'",
518
+ "The Deep Pattern-Finder": "El Buscador de Patrones Profundo"
519
+ }
520
+
521
+ # --- THIS WAS MISSING ---
522
+ # Create the Choices List as Tuples: [(Spanish Label, English Value)]
523
+ MODEL_RADIO_CHOICES = [(label, key) for key, label in MODEL_DISPLAY_MAP.items()]
524
+ # ------------------------
525
+
526
+ # Map Spanish Data Sizes (UI) to English Keys (Database)
527
+ DATA_SIZE_DB_MAP = {
528
+ "Pequeño (20%)": "Small (20%)",
529
+ "Medio (60%)": "Medium (60%)",
530
+ "Grande (80%)": "Large (80%)",
531
+ "Completo (100%)": "Full (100%)"
532
+ }
533
+
534
+ TEAM_NAMES = [
535
+ "The Moral Champions", "The Justice League", "The Data Detectives",
536
+ "The Ethical Explorers", "The Fairness Finders", "The Accuracy Avengers"
537
+ ]
538
+ CURRENT_TEAM_NAME = random.choice(TEAM_NAMES)
539
+
540
+ # Team name translations for UI display only (Spanish)
541
+ # Internal logic (ranking, caching, grouping) always uses canonical English names
542
+ TEAM_NAME_TRANSLATIONS = {
543
+ "en": {
544
+ "The Justice League": "The Justice League",
545
+ "The Moral Champions": "The Moral Champions",
546
+ "The Data Detectives": "The Data Detectives",
547
+ "The Ethical Explorers": "The Ethical Explorers",
548
+ "The Fairness Finders": "The Fairness Finders",
549
+ "The Accuracy Avengers": "The Accuracy Avengers"
550
+ },
551
+ "es": {
552
+ "The Justice League": "La Liga de la Justicia",
553
+ "The Moral Champions": "Los Campeones Morales",
554
+ "The Data Detectives": "Los Detectives de Datos",
555
+ "The Ethical Explorers": "Los Exploradores Éticos",
556
+ "The Fairness Finders": "Los Buscadores de Equidad",
557
+ "The Accuracy Avengers": "Los Vengadores de Precisión"
558
+ }
559
+ }
560
+
561
+ # UI language for team name display
562
+ UI_TEAM_LANG = "es"
563
+
564
+
565
+ # --- Feature groups for scaffolding (Weak -> Medium -> Strong) ---
566
+ FEATURE_SET_ALL_OPTIONS = [
567
+ ("Número de delitos graves juveniles", "juv_fel_count"),
568
+ ("Número de delitos leves juveniles", "juv_misd_count"),
569
+ ("Otros delitos juveniles", "juv_other_count"),
570
+ ("Origen étnico", "race"),
571
+ ("Sexo", "sex"),
572
+ ("Gravedad del cargo (leve / grave)", "c_charge_degree"),
573
+ ("Días antes del arresto", "days_b_screening_arrest"),
574
+ ("Edad", "age"),
575
+ ("Días en prisión", "length_of_stay"),
576
+ ("Número de delitos previos", "priors_count"),
577
+ ]
578
+ FEATURE_SET_GROUP_1_VALS = [
579
+ "juv_fel_count", "juv_misd_count", "juv_other_count", "race", "sex",
580
+ "c_charge_degree", "days_b_screening_arrest"
581
+ ]
582
+ FEATURE_SET_GROUP_2_VALS = ["c_charge_desc", "age"]
583
+ FEATURE_SET_GROUP_3_VALS = ["length_of_stay", "priors_count"]
584
+ ALL_NUMERIC_COLS = [
585
+ "juv_fel_count", "juv_misd_count", "juv_other_count",
586
+ "days_b_screening_arrest", "age", "length_of_stay", "priors_count"
587
+ ]
588
+ ALL_CATEGORICAL_COLS = [
589
+ "race", "sex", "c_charge_degree"
590
+ ]
591
+ DEFAULT_FEATURE_SET = FEATURE_SET_GROUP_1_VALS
592
+
593
+
594
+ # --- Data Size config ---
595
+ DATA_SIZE_MAP = {
596
+ "Pequeño (20%)": 0.2,
597
+ "Medio (60%)": 0.6,
598
+ "Grande (80%)": 0.8,
599
+ "Completo (100%)": 1.0
600
+ }
601
+ DEFAULT_DATA_SIZE = "Pequeño (20%)"
602
+
603
+
604
+ MAX_ROWS = 4000
605
+ TOP_N_CHARGE_CATEGORICAL = 50
606
+ WARM_MINI_ROWS = 300 # Small warm dataset for instant preview
607
+ CACHE_MAX_AGE_HOURS = 24 # Cache validity duration
608
+ np.random.seed(42)
609
+
610
+ # Global state containers (populated during initialization)
611
+ playground = None
612
+ X_TRAIN_RAW = None # Keep this for 100%
613
+ X_TEST_RAW = None
614
+ Y_TRAIN = None
615
+ Y_TEST = None
616
+ # Add a container for our pre-sampled data
617
+ X_TRAIN_SAMPLES_MAP = {}
618
+ Y_TRAIN_SAMPLES_MAP = {}
619
+
620
+ # Warm mini dataset for instant preview
621
+ X_TRAIN_WARM = None
622
+ Y_TRAIN_WARM = None
623
+
624
+ # Cache for transformed test sets (for future performance improvements)
625
+ TEST_CACHE = {}
626
+
627
+ # Initialization flags to track readiness state
628
+ INIT_FLAGS = {
629
+ "competition": False,
630
+ "dataset_core": False,
631
+ "pre_samples_small": False,
632
+ "pre_samples_medium": False,
633
+ "pre_samples_large": False,
634
+ "pre_samples_full": False,
635
+ "leaderboard": False,
636
+ "default_preprocessor": False,
637
+ "warm_mini": False,
638
+ "errors": []
639
+ }
640
+
641
+ # Lock for thread-safe flag updates
642
+ INIT_LOCK = threading.Lock()
643
+
644
+ # -------------------------------------------------------------------------
645
+ # 2. Data & Backend Utilities
646
+ # -------------------------------------------------------------------------
647
+
648
+ def _get_cache_dir():
649
+ """Get or create the cache directory for datasets."""
650
+ cache_dir = Path.home() / ".aimodelshare_cache"
651
+ cache_dir.mkdir(exist_ok=True)
652
+ return cache_dir
653
+
654
+ def _safe_request_csv(url, cache_filename="compas.csv"):
655
+ """
656
+ Request CSV from URL with local caching.
657
+ Reuses cached file if it exists and is less than CACHE_MAX_AGE_HOURS old.
658
+ """
659
+ cache_dir = _get_cache_dir()
660
+ cache_path = cache_dir / cache_filename
661
+
662
+ # Check if cache exists and is fresh
663
+ if cache_path.exists():
664
+ file_time = datetime.fromtimestamp(cache_path.stat().st_mtime)
665
+ if datetime.now() - file_time < timedelta(hours=CACHE_MAX_AGE_HOURS):
666
+ return pd.read_csv(cache_path)
667
+
668
+ # Download fresh data
669
+ response = requests.get(url, timeout=30)
670
+ response.raise_for_status()
671
+ df = pd.read_csv(StringIO(response.text))
672
+
673
+ # Save to cache
674
+ df.to_csv(cache_path, index=False)
675
+
676
+ return df
677
+
678
+ def safe_int(value, default=1):
679
+ """
680
+ Safely coerce a value to int, returning default if value is None or invalid.
681
+ Protects against TypeError when Gradio sliders receive None.
682
+ """
683
+ if value is None:
684
+ return default
685
+ try:
686
+ return int(value)
687
+ except (ValueError, TypeError):
688
+ return default
689
+
690
+ def load_and_prep_data(use_cache=True):
691
+ """
692
+ Load, sample, and prepare raw COMPAS dataset.
693
+ NOW PRE-SAMPLES ALL DATA SIZES and creates warm mini dataset.
694
+ """
695
+ url = "https://raw.githubusercontent.com/propublica/compas-analysis/master/compas-scores-two-years.csv"
696
+
697
+ # Use cached version if available
698
+ if use_cache:
699
+ try:
700
+ df = _safe_request_csv(url)
701
+ except Exception as e:
702
+ print(f"Cache failed, fetching directly: {e}")
703
+ response = requests.get(url)
704
+ df = pd.read_csv(StringIO(response.text))
705
+ else:
706
+ response = requests.get(url)
707
+ df = pd.read_csv(StringIO(response.text))
708
+
709
+ # Calculate length_of_stay
710
+ try:
711
+ df['c_jail_in'] = pd.to_datetime(df['c_jail_in'])
712
+ df['c_jail_out'] = pd.to_datetime(df['c_jail_out'])
713
+ df['length_of_stay'] = (df['c_jail_out'] - df['c_jail_in']).dt.total_seconds() / (24 * 60 * 60) # in days
714
+ except Exception:
715
+ df['length_of_stay'] = np.nan
716
+
717
+ if df.shape[0] > MAX_ROWS:
718
+ df = df.sample(n=MAX_ROWS, random_state=42)
719
+
720
+ feature_columns = ALL_NUMERIC_COLS + ALL_CATEGORICAL_COLS
721
+ feature_columns = sorted(list(set(feature_columns)))
722
+
723
+ target_column = "two_year_recid"
724
+
725
+ if "c_charge_desc" in df.columns:
726
+ top_charges = df["c_charge_desc"].value_counts().head(TOP_N_CHARGE_CATEGORICAL).index
727
+ df["c_charge_desc"] = df["c_charge_desc"].apply(
728
+ lambda x: x if pd.notna(x) and x in top_charges else "OTHER"
729
+ )
730
+
731
+ for col in feature_columns:
732
+ if col not in df.columns:
733
+ if col == 'length_of_stay' and 'length_of_stay' in df.columns:
734
+ continue
735
+ df[col] = np.nan
736
+
737
+ X = df[feature_columns].copy()
738
+ y = df[target_column].copy()
739
+
740
+ X_train_raw, X_test_raw, y_train, y_test = train_test_split(
741
+ X, y, test_size=0.25, random_state=42, stratify=y
742
+ )
743
+
744
+ # Pre-sample all data sizes
745
+ global X_TRAIN_SAMPLES_MAP, Y_TRAIN_SAMPLES_MAP, X_TRAIN_WARM, Y_TRAIN_WARM
746
+
747
+ X_TRAIN_SAMPLES_MAP["Completo (100%)"] = X_train_raw
748
+ Y_TRAIN_SAMPLES_MAP["Completo (100%)"] = y_train
749
+
750
+ for label, frac in DATA_SIZE_MAP.items():
751
+ if frac < 1.0:
752
+ X_train_sampled = X_train_raw.sample(frac=frac, random_state=42)
753
+ y_train_sampled = y_train.loc[X_train_sampled.index]
754
+ X_TRAIN_SAMPLES_MAP[label] = X_train_sampled
755
+ Y_TRAIN_SAMPLES_MAP[label] = y_train_sampled
756
+
757
+ # Create warm mini dataset for instant preview
758
+ warm_size = min(WARM_MINI_ROWS, len(X_train_raw))
759
+ X_TRAIN_WARM = X_train_raw.sample(n=warm_size, random_state=42)
760
+ Y_TRAIN_WARM = y_train.loc[X_TRAIN_WARM.index]
761
+
762
+
763
+
764
+ return X_train_raw, X_test_raw, y_train, y_test
765
+
766
+ def _background_initializer():
767
+ """
768
+ Background thread that performs sequential initialization tasks.
769
+ Updates INIT_FLAGS dict with readiness booleans and captures errors.
770
+
771
+ Initialization sequence:
772
+ 1. Competition object connection
773
+ 2. Dataset cached download and core split
774
+ 3. Warm mini dataset creation
775
+ 4. Progressive sampling: small -> medium -> large -> full
776
+ 5. Leaderboard prefetch
777
+ 6. Default preprocessor fit on small sample
778
+ """
779
+ global playground, X_TRAIN_RAW, X_TEST_RAW, Y_TRAIN, Y_TEST
780
+
781
+ try:
782
+ # Step 1: Connect to competition
783
+ with INIT_LOCK:
784
+ if playground is None:
785
+ playground = Competition(MY_PLAYGROUND_ID)
786
+ INIT_FLAGS["competition"] = True
787
+ except Exception as e:
788
+ with INIT_LOCK:
789
+ INIT_FLAGS["errors"].append(f"Fallo en la conexión con la competición: {str(e)}")
790
+
791
+ try:
792
+ # Step 2: Load dataset core (train/test split)
793
+ X_TRAIN_RAW, X_TEST_RAW, Y_TRAIN, Y_TEST = load_and_prep_data(use_cache=True)
794
+ with INIT_LOCK:
795
+ INIT_FLAGS["dataset_core"] = True
796
+ except Exception as e:
797
+ with INIT_LOCK:
798
+ INIT_FLAGS["errors"].append(f"Error al cargar el conjunto de datos principal: {str(e)}")
799
+ return # Cannot proceed without data
800
+
801
+ try:
802
+ # Step 3: Warm mini dataset (already created in load_and_prep_data)
803
+ if X_TRAIN_WARM is not None and len(X_TRAIN_WARM) > 0:
804
+ with INIT_LOCK:
805
+ INIT_FLAGS["warm_mini"] = True
806
+ except Exception as e:
807
+ with INIT_LOCK:
808
+ INIT_FLAGS["errors"].append(f"Error al generar la vista previa instantánea: {str(e)}")
809
+
810
+ # Progressive sampling - samples are already created in load_and_prep_data
811
+ # Just mark them as ready sequentially with delays to simulate progressive loading
812
+
813
+ try:
814
+ # Step 4a: Small sample (20%)
815
+ time.sleep(0.5) # Simulate processing
816
+ with INIT_LOCK:
817
+ INIT_FLAGS["pre_samples_small"] = True
818
+ except Exception as e:
819
+ with INIT_LOCK:
820
+ INIT_FLAGS["errors"].append(f"Error en la muestra pequeña: {str(e)}")
821
+
822
+ try:
823
+ # Step 4b: Medium sample (60%)
824
+ time.sleep(0.5)
825
+ with INIT_LOCK:
826
+ INIT_FLAGS["pre_samples_medium"] = True
827
+ except Exception as e:
828
+ with INIT_LOCK:
829
+ INIT_FLAGS["errors"].append(f"Error en la muestra mediana: {str(e)}")
830
+
831
+ try:
832
+ # Step 4c: Large sample (80%)
833
+ time.sleep(0.5)
834
+ with INIT_LOCK:
835
+ INIT_FLAGS["pre_samples_large"] = True
836
+ except Exception as e:
837
+ with INIT_LOCK:
838
+ INIT_FLAGS["errors"].append(f"Error en la muestra grande: {str(e)}")
839
+ print(f"✗ Large sample failed: {e}")
840
+
841
+ try:
842
+ # Step 4d: Full sample (100%)
843
+ print("Background init: Full sample (100%)...")
844
+ time.sleep(0.5)
845
+ with INIT_LOCK:
846
+ INIT_FLAGS["pre_samples_full"] = True
847
+ except Exception as e:
848
+ with INIT_LOCK:
849
+ INIT_FLAGS["errors"].append(f"Error en la muestra completa: {str(e)}")
850
+
851
+ try:
852
+ # Step 5: Leaderboard prefetch (best-effort, unauthenticated)
853
+ # Concurrency Note: Do NOT use os.environ for ambient token - prefetch
854
+ # anonymously to warm the cache for initial page loads.
855
+ if playground is not None:
856
+ _ = _get_leaderboard_with_optional_token(playground, None)
857
+ with INIT_LOCK:
858
+ INIT_FLAGS["leaderboard"] = True
859
+ except Exception as e:
860
+ with INIT_LOCK:
861
+ INIT_FLAGS["errors"].append(f"Error al obtener la tabla de clasificación: {str(e)}")
862
+
863
+ try:
864
+ # Step 6: Default preprocessor on small sample
865
+ _fit_default_preprocessor()
866
+ with INIT_LOCK:
867
+ INIT_FLAGS["default_preprocessor"] = True
868
+ except Exception as e:
869
+ with INIT_LOCK:
870
+ INIT_FLAGS["errors"].append(f"Error en el sistema de preprocesamiento: {str(e)}")
871
+ print(f"✗ Default preprocessor failed: {e}")
872
+
873
+
874
+ def _fit_default_preprocessor():
875
+ """
876
+ Pre-fit a default preprocessor on the small sample with default features.
877
+ Uses memoized preprocessor builder for efficiency.
878
+ """
879
+ if "Pequeño (20%)" not in X_TRAIN_SAMPLES_MAP:
880
+ return
881
+
882
+ X_sample = X_TRAIN_SAMPLES_MAP["Pequeño (20%)"]
883
+
884
+ # Use default feature set
885
+ numeric_cols = [f for f in DEFAULT_FEATURE_SET if f in ALL_NUMERIC_COLS]
886
+ categorical_cols = [f for f in DEFAULT_FEATURE_SET if f in ALL_CATEGORICAL_COLS]
887
+
888
+ if not numeric_cols and not categorical_cols:
889
+ return
890
+
891
+ # Use memoized builder
892
+ preprocessor, selected_cols = build_preprocessor(numeric_cols, categorical_cols)
893
+ preprocessor.fit(X_sample[selected_cols])
894
+
895
+ def start_background_init():
896
+ """
897
+ Start the background initialization thread.
898
+ Should be called once at app creation.
899
+ """
900
+ thread = threading.Thread(target=_background_initializer, daemon=True)
901
+ thread.start()
902
+
903
+ def poll_init_status():
904
+ """
905
+ Poll the initialization status and return readiness bool.
906
+ Returns empty string for HTML so users don't see the checklist.
907
+
908
+ Returns:
909
+ tuple: (status_html, ready_bool)
910
+ """
911
+ with INIT_LOCK:
912
+ flags = INIT_FLAGS.copy()
913
+
914
+ # Determine if minimum requirements met
915
+ ready = flags["competition"] and flags["dataset_core"] and flags["pre_samples_small"]
916
+
917
+ return "", ready
918
+
919
+ def get_available_data_sizes():
920
+ """
921
+ Return list of data sizes that are currently available based on init flags.
922
+ """
923
+ with INIT_LOCK:
924
+ flags = INIT_FLAGS.copy()
925
+
926
+ available = []
927
+ if flags["pre_samples_small"]:
928
+ available.append("Pequeño (20%)")
929
+ if flags["pre_samples_medium"]:
930
+ available.append("Medio (60%)")
931
+ if flags["pre_samples_large"]:
932
+ available.append("Grande (80%)")
933
+ if flags["pre_samples_full"]:
934
+ available.append("Completo (100%)")
935
+
936
+ return available if available else ["Pequeño (20%)"] # Fallback
937
+
938
+ def _is_ready() -> bool:
939
+ """
940
+ Check if initialization is complete and system is ready for real submissions.
941
+
942
+ Returns:
943
+ bool: True if competition, dataset, and small sample are initialized
944
+ """
945
+ with INIT_LOCK:
946
+ flags = INIT_FLAGS.copy()
947
+ return flags["competition"] and flags["dataset_core"] and flags["pre_samples_small"]
948
+
949
+ def _get_user_latest_accuracy(df: Optional[pd.DataFrame], username: str) -> Optional[float]:
950
+ """
951
+ Extract the user's latest submission accuracy from the leaderboard.
952
+
953
+ Uses timestamp sorting when available; otherwise assumes last row is latest.
954
+
955
+ Args:
956
+ df: Leaderboard DataFrame
957
+ username: Username to extract accuracy for
958
+
959
+ Returns:
960
+ float: Latest submission accuracy, or None if not found/invalid
961
+ """
962
+ if df is None or df.empty:
963
+ return None
964
+
965
+ try:
966
+ user_rows = df[df["username"] == username]
967
+ if user_rows.empty or "accuracy" not in user_rows.columns:
968
+ return None
969
+
970
+ # Try timestamp-based sorting if available
971
+ if "timestamp" in user_rows.columns:
972
+ user_rows = user_rows.copy()
973
+ user_rows["__parsed_ts"] = pd.to_datetime(user_rows["timestamp"], errors="coerce")
974
+ valid_ts = user_rows[user_rows["__parsed_ts"].notna()]
975
+
976
+ if not valid_ts.empty:
977
+ # Sort by timestamp and get latest
978
+ latest_row = valid_ts.sort_values("__parsed_ts", ascending=False).iloc[0]
979
+ return float(latest_row["accuracy"])
980
+
981
+ # Fallback: assume last row is latest (append order)
982
+ return float(user_rows.iloc[-1]["accuracy"])
983
+
984
+ except Exception as e:
985
+ _log(f"Error extracting latest accuracy for {username}: {e}")
986
+ return None
987
+
988
+ def _get_user_latest_ts(df: Optional[pd.DataFrame], username: str) -> Optional[float]:
989
+ """
990
+ Extract the user's latest valid timestamp from the leaderboard.
991
+
992
+ Args:
993
+ df: Leaderboard DataFrame
994
+ username: Username to extract timestamp for
995
+
996
+ Returns:
997
+ float: Latest timestamp as unix epoch, or None if not found/invalid
998
+ """
999
+ if df is None or df.empty:
1000
+ return None
1001
+
1002
+ try:
1003
+ user_rows = df[df["username"] == username]
1004
+ if user_rows.empty or "timestamp" not in user_rows.columns:
1005
+ return None
1006
+
1007
+ # Parse timestamps and get the latest
1008
+ user_rows = user_rows.copy()
1009
+ user_rows["__parsed_ts"] = pd.to_datetime(user_rows["timestamp"], errors="coerce")
1010
+ valid_ts = user_rows[user_rows["__parsed_ts"].notna()]
1011
+
1012
+ if valid_ts.empty:
1013
+ return None
1014
+
1015
+ latest_ts = valid_ts["__parsed_ts"].max()
1016
+ return latest_ts.timestamp() if pd.notna(latest_ts) else None
1017
+ except Exception as e:
1018
+ _log(f"Error extracting latest timestamp for {username}: {e}")
1019
+ return None
1020
+
1021
+ def _user_rows_changed(
1022
+ refreshed_leaderboard: Optional[pd.DataFrame],
1023
+ username: str,
1024
+ old_row_count: int,
1025
+ old_best_score: float,
1026
+ old_latest_ts: Optional[float] = None,
1027
+ old_latest_score: Optional[float] = None
1028
+ ) -> bool:
1029
+ """
1030
+ Check if user's leaderboard entries have changed after submission.
1031
+
1032
+ Used after polling to detect if the leaderboard has updated with the new submission.
1033
+ Checks row count (new submission added), best score (score improved), latest timestamp,
1034
+ and latest accuracy (handles backend overwrite without append).
1035
+
1036
+ Args:
1037
+ refreshed_leaderboard: Fresh leaderboard data
1038
+ username: Username to check for
1039
+ old_row_count: Previous number of submissions for this user
1040
+ old_best_score: Previous best accuracy score
1041
+ old_latest_ts: Previous latest timestamp (unix epoch), optional
1042
+ old_latest_score: Previous latest submission accuracy, optional
1043
+
1044
+ Returns:
1045
+ bool: True if user has more rows, better score, newer timestamp, or changed latest accuracy
1046
+ """
1047
+ if refreshed_leaderboard is None or refreshed_leaderboard.empty:
1048
+ return False
1049
+
1050
+ try:
1051
+ user_rows = refreshed_leaderboard[refreshed_leaderboard["username"] == username]
1052
+ if user_rows.empty:
1053
+ return False
1054
+
1055
+ new_row_count = len(user_rows)
1056
+ new_best_score = float(user_rows["accuracy"].max()) if "accuracy" in user_rows.columns else 0.0
1057
+ new_latest_ts = _get_user_latest_ts(refreshed_leaderboard, username)
1058
+ new_latest_score = _get_user_latest_accuracy(refreshed_leaderboard, username)
1059
+
1060
+ # Changed if we have more submissions, better score, newer timestamp, or changed latest accuracy
1061
+ changed = (new_row_count > old_row_count) or (new_best_score > old_best_score + 0.0001)
1062
+
1063
+ # Check timestamp if available
1064
+ if old_latest_ts is not None and new_latest_ts is not None:
1065
+ changed = changed or (new_latest_ts > old_latest_ts)
1066
+
1067
+ # Check latest accuracy change (handles overwrite-without-append case)
1068
+ if old_latest_score is not None and new_latest_score is not None:
1069
+ accuracy_changed = abs(new_latest_score - old_latest_score) >= 0.00001
1070
+ if accuracy_changed:
1071
+ _log(f"Latest accuracy changed: {old_latest_score:.4f} -> {new_latest_score:.4f}")
1072
+ changed = changed or accuracy_changed
1073
+
1074
+ if changed:
1075
+ _log(f"User rows changed for {username}:")
1076
+ _log(f" Row count: {old_row_count} -> {new_row_count}")
1077
+ _log(f" Best score: {old_best_score:.4f} -> {new_best_score:.4f}")
1078
+ _log(f" Latest score: {old_latest_score if old_latest_score else 'N/A'} -> {new_latest_score if new_latest_score else 'N/A'}")
1079
+ _log(f" Timestamp: {old_latest_ts} -> {new_latest_ts}")
1080
+
1081
+ return changed
1082
+ except Exception as e:
1083
+ _log(f"Error checking user rows: {e}")
1084
+ return False
1085
+
1086
+ @functools.lru_cache(maxsize=32)
1087
+ def _get_cached_preprocessor_config(numeric_cols_tuple, categorical_cols_tuple):
1088
+ """
1089
+ Create and return preprocessor configuration (memoized).
1090
+ Uses tuples for hashability in lru_cache.
1091
+
1092
+ Concurrency Note: Uses sparse_output=True for OneHotEncoder to reduce memory
1093
+ footprint under concurrent requests. Downstream models that require dense
1094
+ arrays (DecisionTree, RandomForest) will convert via .toarray() as needed.
1095
+ LogisticRegression and KNeighborsClassifier handle sparse matrices natively.
1096
+
1097
+ Returns tuple of (transformers_list, selected_columns) ready for ColumnTransformer.
1098
+ """
1099
+ numeric_cols = list(numeric_cols_tuple)
1100
+ categorical_cols = list(categorical_cols_tuple)
1101
+
1102
+ transformers = []
1103
+ selected_cols = []
1104
+
1105
+ if numeric_cols:
1106
+ num_tf = Pipeline(steps=[
1107
+ ("imputer", SimpleImputer(strategy="median")),
1108
+ ("scaler", StandardScaler())
1109
+ ])
1110
+ transformers.append(("num", num_tf, numeric_cols))
1111
+ selected_cols.extend(numeric_cols)
1112
+
1113
+ if categorical_cols:
1114
+ # Use sparse_output=True to reduce memory footprint
1115
+ cat_tf = Pipeline(steps=[
1116
+ ("imputer", SimpleImputer(strategy="constant", fill_value="missing")),
1117
+ ("onehot", OneHotEncoder(handle_unknown="ignore", sparse_output=True))
1118
+ ])
1119
+ transformers.append(("cat", cat_tf, categorical_cols))
1120
+ selected_cols.extend(categorical_cols)
1121
+
1122
+ return transformers, selected_cols
1123
+
1124
+ def build_preprocessor(numeric_cols, categorical_cols):
1125
+ """
1126
+ Build a preprocessor using cached configuration.
1127
+ The configuration (pipeline structure) is memoized; the actual fit is not.
1128
+
1129
+ Note: Returns sparse matrices when categorical columns are present.
1130
+ Use _ensure_dense() helper if model requires dense input.
1131
+ """
1132
+ # Convert to tuples for caching
1133
+ numeric_tuple = tuple(sorted(numeric_cols))
1134
+ categorical_tuple = tuple(sorted(categorical_cols))
1135
+
1136
+ transformers, selected_cols = _get_cached_preprocessor_config(numeric_tuple, categorical_tuple)
1137
+
1138
+ # Create new ColumnTransformer with cached config
1139
+ preprocessor = ColumnTransformer(transformers=transformers, remainder="drop")
1140
+
1141
+ return preprocessor, selected_cols
1142
+
1143
+ def _ensure_dense(X):
1144
+ """
1145
+ Convert sparse matrix to dense if necessary.
1146
+
1147
+ Helper function for models that don't support sparse input
1148
+ (DecisionTree, RandomForest). LogisticRegression and KNN
1149
+ handle sparse matrices natively.
1150
+ """
1151
+ from scipy import sparse
1152
+ if sparse.issparse(X):
1153
+ return X.toarray()
1154
+ return X
1155
+
1156
+ def tune_model_complexity(model, level):
1157
+ """
1158
+ Map a 1–10 slider value to model hyperparameters.
1159
+ Levels 1–3: Conservative / simple
1160
+ Levels 4–7: Balanced
1161
+ Levels 8–10: Aggressive / risk of overfitting
1162
+ """
1163
+ level = int(level)
1164
+ if isinstance(model, LogisticRegression):
1165
+ c_map = {1: 0.01, 2: 0.025, 3: 0.05, 4: 0.1, 5: 0.25, 6: 0.5, 7: 1.0, 8: 2.0, 9: 5.0, 10: 10.0}
1166
+ model.C = c_map.get(level, 1.0)
1167
+ model.max_iter = max(getattr(model, "max_iter", 0), 500)
1168
+ elif isinstance(model, RandomForestClassifier):
1169
+ depth_map = {1: 3, 2: 5, 3: 7, 4: 9, 5: 11, 6: 15, 7: 20, 8: 25, 9: None, 10: None}
1170
+ est_map = {1: 20, 2: 30, 3: 40, 4: 60, 5: 80, 6: 100, 7: 120, 8: 150, 9: 180, 10: 220}
1171
+ model.max_depth = depth_map.get(level, 10)
1172
+ model.n_estimators = est_map.get(level, 100)
1173
+ elif isinstance(model, DecisionTreeClassifier):
1174
+ depth_map = {1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 8, 7: 10, 8: 12, 9: 15, 10: None}
1175
+ model.max_depth = depth_map.get(level, 6)
1176
+ elif isinstance(model, KNeighborsClassifier):
1177
+ k_map = {1: 100, 2: 75, 3: 60, 4: 50, 5: 40, 6: 30, 7: 25, 8: 15, 9: 7, 10: 3}
1178
+ model.n_neighbors = k_map.get(level, 25)
1179
+ return model
1180
+
1181
+ # --- New Helper Functions for HTML Generation ---
1182
+
1183
+ def _normalize_team_name(name: str) -> str:
1184
+ """
1185
+ Normalize team name for consistent comparison and storage.
1186
+
1187
+ Strips leading/trailing whitespace and collapses multiple spaces into single spaces.
1188
+ This ensures consistent formatting across environment variables, state, and leaderboard rendering.
1189
+
1190
+ Args:
1191
+ name: Team name to normalize (can be None or empty)
1192
+
1193
+ Returns:
1194
+ str: Normalized team name, or empty string if input is None/empty
1195
+
1196
+ Examples:
1197
+ >>> _normalize_team_name(" The Ethical Explorers ")
1198
+ 'The Ethical Explorers'
1199
+ >>> _normalize_team_name("The Moral Champions")
1200
+ 'The Moral Champions'
1201
+ >>> _normalize_team_name(None)
1202
+ ''
1203
+ """
1204
+ if not name:
1205
+ return ""
1206
+ return " ".join(str(name).strip().split())
1207
+
1208
+
1209
+ # Team name translation helpers for UI display (Spanish)
1210
+ def translate_team_name_for_display(team_en: str, lang: str = "es") -> str:
1211
+ """
1212
+ Translate a canonical English team name to the specified language for UI display.
1213
+ Fallback to English if translation not found.
1214
+
1215
+ Internal logic always uses canonical English names. This is only for UI display.
1216
+ """
1217
+ if lang not in TEAM_NAME_TRANSLATIONS:
1218
+ lang = "en"
1219
+ return TEAM_NAME_TRANSLATIONS[lang].get(team_en, team_en)
1220
+
1221
+
1222
+ def translate_team_name_to_english(display_name: str, lang: str = "es") -> str:
1223
+ """
1224
+ Reverse lookup: given a localized team name, return the canonical English name.
1225
+ Returns the original display_name if not found.
1226
+
1227
+ For future use if user input needs to be normalized back to English.
1228
+ """
1229
+ if lang not in TEAM_NAME_TRANSLATIONS:
1230
+ return display_name # Already English or unknown
1231
+
1232
+ translations = TEAM_NAME_TRANSLATIONS[lang]
1233
+ for english_name, localized_name in translations.items():
1234
+ if localized_name == display_name:
1235
+ return english_name
1236
+ return display_name
1237
+
1238
+
1239
+ def _format_leaderboard_for_display(df: Optional[pd.DataFrame], lang: str = "es") -> Optional[pd.DataFrame]:
1240
+ """
1241
+ Create a copy of the leaderboard DataFrame with team names translated for display.
1242
+ Does not mutate the original DataFrame.
1243
+
1244
+ For potential future use when displaying full leaderboard.
1245
+ Internal logic should always use the original DataFrame with English team names.
1246
+ """
1247
+ if df is None:
1248
+ return None
1249
+
1250
+ if df.empty or "Team" not in df.columns:
1251
+ return df.copy()
1252
+
1253
+ df_display = df.copy()
1254
+ df_display["Team"] = df_display["Team"].apply(lambda t: translate_team_name_for_display(t, lang))
1255
+ return df_display
1256
+
1257
+
1258
+ def _build_skeleton_leaderboard(rows=6, is_team=True, submit_button_label="5. 🔬 Construir y enviar el modelo"):
1259
+ context_label = "Equipo" if is_team else "Individual"
1260
+ return f"""
1261
+ <div class='lb-placeholder' aria-live='polite'>
1262
+ <div class='lb-placeholder-title'>{context_label} · Clasificación pendiente</div>
1263
+ <div class='lb-placeholder-sub'>
1264
+ <p style='margin:0 0 6px 0;'>Envía tu primer modelo para desbloquear la clasificación.</p>
1265
+ <p style='margin:0;'><strong>Haz clic en «{submit_button_label}» (abajo a la izquierda)</strong> para comenzar!</p>
1266
+ </div>
1267
+ </div>
1268
+ """
1269
+ # --- FIX APPLIED HERE ---
1270
+ def build_login_prompt_html():
1271
+ """
1272
+ Generate HTML for the login prompt text *only*.
1273
+ The styled preview card will be prepended to this.
1274
+ """
1275
+ return f"""
1276
+ <h2 style='color: #111827; margin-top:20px; border-top: 2px solid #e5e7eb; padding-top: 20px;'>🔐 Sign in to submit & rank</h2>
1277
+ <div style='margin-top:16px; text-align:left; font-size:1rem; line-height:1.6; color:#374151;'>
1278
+ <p style='margin:12px 0;'>
1279
+ This is a preview run only. Sign in to publish your score to the live leaderboard,
1280
+ earn promotions, and contribute team points.
1281
+ </p>
1282
+ <p style='margin:12px 0;'>
1283
+ <strong>New user?</strong> Create a free account at
1284
+ <a href='https://www.modelshare.ai/login' target='_blank'
1285
+ style='color:#4f46e5; text-decoration:underline;'>modelshare.ai/login</a>
1286
+ </p>
1287
+ </div>
1288
+ """
1289
+ # --- END OF FIX ---
1290
+
1291
+ def _build_kpi_card_html(new_score, last_score, new_rank, last_rank, submission_count, is_preview=False, is_pending=False, local_test_accuracy=None):
1292
+ """Generates the HTML for the KPI feedback card. Supports preview mode label and pending state."""
1293
+
1294
+ # Handle pending state - show processing message with provisional diff
1295
+ if is_pending:
1296
+ title = "⏳ Procesando envío"
1297
+ acc_color = "#3b82f6" # Blue
1298
+ acc_text = f"{(local_test_accuracy * 100):.2f}%" if local_test_accuracy is not None else "N/A"
1299
+
1300
+ # Compute provisional diff between local (new) and last score
1301
+ if local_test_accuracy is not None and last_score is not None and last_score > 0:
1302
+ score_diff = local_test_accuracy - last_score
1303
+ if abs(score_diff) < 0.0001:
1304
+ acc_diff_html = "<p style='font-size: 1.5rem; font-weight: 600; color: #6b7280; margin:0;'>Sin cambios (↔) <span style='font-size: 0.9rem; color: #9ca3af;'>(Provisional)</span></p><p style='font-size: 1.2rem; font-weight: 500; color: #6b7280; margin:0; padding-top: 8px;'>Actualizando la clasificación...</p>"
1305
+ elif score_diff > 0:
1306
+ acc_diff_html = f"<p style='font-size: 1.5rem; font-weight: 600; color: #16a34a; margin:0;'>+{(score_diff * 100):.2f} (⬆️) <span style='font-size: 0.9rem; color: #9ca3af;'>(Provisional)</span></p><p style='font-size: 1.2rem; font-weight: 500; color: #6b7280; margin:0; padding-top: 8px;'>Actualizando la clasificación...</p>"
1307
+ else:
1308
+ acc_diff_html = f"<p style='font-size: 1.5rem; font-weight: 600; color: #ef4444; margin:0;'>{(score_diff * 100):.2f} (⬇️) <span style='font-size: 0.9rem; color: #9ca3af;'>(Provisional)</span></p><p style='font-size: 1.2rem; font-weight: 500; color: #6b7280; margin:0; padding-top: 8px;'>Actualizando la clasificación...</p>"
1309
+ else:
1310
+ # No last score available - just show pending message
1311
+ acc_diff_html = "<p style='font-size: 1.2rem; font-weight: 500; color: #6b7280; margin:0; padding-top: 8px;'>Actualizando la clasificación...</p>"
1312
+
1313
+ border_color = acc_color
1314
+ rank_color = "#6b7280" # Gray
1315
+ rank_text = "Pendiente"
1316
+ rank_diff_html = "<p style='font-size: 1.2rem; font-weight: 500; color: #6b7280; margin:0;'>Calculando posición...</p>"
1317
+
1318
+ # Handle preview mode - Styled to match "success" card
1319
+ elif is_preview:
1320
+ title = "🔬 ¡Prueba completada con éxito!"
1321
+ acc_color = "#16a34a" # Green (like success)
1322
+ acc_text = f"{(new_score * 100):.2f}%" if new_score > 0 else "N/A"
1323
+ acc_diff_html = "<p style='font-size: 1.2rem; font-weight: 500; color: #6b7280; margin:0; padding-top: 8px;'>(Solo prueba - no enviado)</p>" # Neutral color
1324
+ border_color = acc_color # Green border
1325
+ rank_color = "#3b82f6" # Blue (like rank)
1326
+ rank_text = "N/A" # Placeholder
1327
+ rank_diff_html = "<p style='font-size: 1.2rem; font-weight: 500; color: #6b7280; margin:0;'>Sin clasificar (prueba)</p>" # Neutral color
1328
+
1329
+ # 1. Handle First Submission
1330
+ elif submission_count == 0:
1331
+ title = "🎉 ¡Primer modelo enviado!"
1332
+ acc_color = "#16a34a" # green
1333
+ acc_text = f"{(new_score * 100):.2f}%"
1334
+ acc_diff_html = "<p style='font-size: 1.2rem; font-weight: 500; color: #6b7280; margin:0; padding-top: 8px;'>(¡Tu primera puntuación!)</p>"
1335
+
1336
+ rank_color = "#3b82f6" # blue
1337
+ rank_text = f"#{new_rank}"
1338
+ rank_diff_html = "<p style='font-size: 1.5rem; font-weight: 600; color: #3b82f6; margin:0;'>¡Ya estás en la tabla!</p>"
1339
+ border_color = acc_color
1340
+
1341
+ else:
1342
+ # 2. Handle Score Changes
1343
+ score_diff = new_score - last_score
1344
+ if abs(score_diff) < 0.0001:
1345
+ title = "✅ Envío realizado con éxito"
1346
+ acc_color = "#6b7280" # gray
1347
+ acc_text = f"{(new_score * 100):.2f}%"
1348
+ acc_diff_html = f"<p style='font-size: 1.5rem; font-weight: 600; color: {acc_color}; margin:0;'>Sin variaciones (↔)</p>"
1349
+ border_color = acc_color
1350
+ elif score_diff > 0:
1351
+ title = "✅ Envío éxitoso!"
1352
+ acc_color = "#16a34a" # green
1353
+ acc_text = f"{(new_score * 100):.2f}%"
1354
+ acc_diff_html = f"<p style='font-size: 1.5rem; font-weight: 600; color: {acc_color}; margin:0;'>+{(score_diff * 100):.2f} (⬆️)</p>"
1355
+ border_color = acc_color
1356
+ else:
1357
+ title = "📉 Tu puntuación ha bajado"
1358
+ acc_color = "#ef4444" # red
1359
+ acc_text = f"{(new_score * 100):.2f}%"
1360
+ acc_diff_html = f"<p style='font-size: 1.5rem; font-weight: 600; color: {acc_color}; margin:0;'>{(score_diff * 100):.2f} (⬇️)</p>"
1361
+ border_color = acc_color
1362
+
1363
+ # 3. Handle Rank Changes
1364
+ rank_diff = last_rank - new_rank
1365
+ rank_color = "#3b82f6" # blue
1366
+ rank_text = f"#{new_rank}"
1367
+ if last_rank == 0: # Handle first rank
1368
+ rank_diff_html = "<p style='font-size: 1.5rem; font-weight: 600; color: #3b82f6; margin:0;'>¡Ya estás en la tabla!</p>"
1369
+ elif rank_diff > 0:
1370
+ rank_diff_html = f"<p style='font-size: 1.5rem; font-weight: 600; color: #16a34a; margin:0;'>🚀 ¡Has subido {rank_diff} posición(es)!</p>"
1371
+ elif rank_diff < 0:
1372
+ rank_diff_html = f"<p style='font-size: 1.5rem; font-weight: 600; color: #ef4444; margin:0;'>🔻 Has bajado {abs(rank_diff)} posición(es)</p>"
1373
+ else:
1374
+ rank_diff_html = f"<p style='font-size: 1.5rem; font-weight: 600; color: {rank_color}; margin:0;'>Mantienes tu posición (↔)</p>"
1375
+
1376
+ return f"""
1377
+ <div class='kpi-card' style='border-color: {border_color};'>
1378
+ <h2 style='color: #111827; margin-top:0;'>{title}</h2>
1379
+ <div class='kpi-card-body'>
1380
+ <div class='kpi-metric-box'>
1381
+ <p class='kpi-label'>Nueva precisión</p>
1382
+ <p class='kpi-score' style='color: {acc_color};'>{acc_text}</p>
1383
+ {acc_diff_html}
1384
+ </div>
1385
+ <div class='kpi-metric-box'>
1386
+ <p class='kpi-label'>Tu posición</p>
1387
+ <p class='kpi-score' style='color: {rank_color};'>{rank_text}</p>
1388
+ {rank_diff_html}
1389
+ </div>
1390
+ </div>
1391
+ </div>
1392
+ """
1393
+
1394
+ def _build_team_html(team_summary_df, team_name):
1395
+ """
1396
+ Generates the HTML for the team leaderboard.
1397
+
1398
+ Uses normalized, case-insensitive comparison to highlight the user's team row,
1399
+ ensuring reliable highlighting even with whitespace or casing variations.
1400
+
1401
+ Team names are translated to Spanish for display only. Internal comparisons
1402
+ use the unmodified English team names from the DataFrame.
1403
+ """
1404
+ if team_summary_df is None or team_summary_df.empty:
1405
+ return "<p style='text-align:center; color:#6b7280; padding-top:20px;'>Aún no hay envíos de equipos.</p>"
1406
+
1407
+ # Normalize the current user's team name for comparison (using English names)
1408
+ normalized_user_team = _normalize_team_name(team_name).lower()
1409
+
1410
+ header = """
1411
+ <table class='leaderboard-html-table'>
1412
+ <thead>
1413
+ <tr>
1414
+ <th>Posición</th>
1415
+ <th>Equipo</th>
1416
+ <th>Mejor Puntuación</th>
1417
+ <th>Media</th>
1418
+ <th>Envíos</th>
1419
+ </tr>
1420
+ </thead>
1421
+ <tbody>
1422
+ """
1423
+
1424
+ body = ""
1425
+ for index, row in team_summary_df.iterrows():
1426
+ # Normalize the row's team name and compare case-insensitively (using English names)
1427
+ normalized_row_team = _normalize_team_name(row["Team"]).lower()
1428
+ is_user_team = normalized_row_team == normalized_user_team
1429
+ row_class = "class='user-row-highlight'" if is_user_team else ""
1430
+
1431
+ # Translate team name to Spanish for display only
1432
+ display_team_name = translate_team_name_for_display(row["Team"], UI_TEAM_LANG)
1433
+
1434
+ body += f"""
1435
+ <tr {row_class}>
1436
+ <td>{index}</td>
1437
+ <td>{display_team_name}</td>
1438
+ <td>{(row['Best_Score'] * 100):.2f}%</td>
1439
+ <td>{(row['Avg_Score'] * 100):.2f}%</td>
1440
+ <td>{row['Submissions']}</td>
1441
+ </tr>
1442
+ """
1443
+
1444
+ footer = "</tbody></table>"
1445
+ return header + body + footer
1446
+
1447
+ def _build_individual_html(individual_summary_df, username):
1448
+ """Generates the HTML for the individual leaderboard."""
1449
+ if individual_summary_df is None or individual_summary_df.empty:
1450
+ return "<p style='text-align:center; color:#6b7280; padding-top:20px;'>Aún no hay envíos individuales.</p>"
1451
+
1452
+ header = """
1453
+ <table class='leaderboard-html-table'>
1454
+ <thead>
1455
+ <tr>
1456
+ <th>Posición</th>
1457
+ <th>Ingeniero/a</th>
1458
+ <th>Mejor Puntuación</th>
1459
+ <th>Envíos</th>
1460
+ </tr>
1461
+ </thead>
1462
+ <tbody>
1463
+ """
1464
+
1465
+ body = ""
1466
+ for index, row in individual_summary_df.iterrows():
1467
+ is_user = row["Engineer"] == username
1468
+ row_class = "class='user-row-highlight'" if is_user else ""
1469
+ body += f"""
1470
+ <tr {row_class}>
1471
+ <td>{index}</td>
1472
+ <td>{row['Engineer']}</td>
1473
+ <td>{(row['Best_Score'] * 100):.2f}%</td>
1474
+ <td>{row['Submissions']}</td>
1475
+ </tr>
1476
+ """
1477
+
1478
+ footer = "</tbody></table>"
1479
+ return header + body + footer
1480
+
1481
+
1482
+
1483
+
1484
+ # --- End Helper Functions ---
1485
+
1486
+
1487
+ def generate_competitive_summary(leaderboard_df, team_name, username, last_submission_score, last_rank, submission_count):
1488
+ """
1489
+ Build summaries, HTML, and KPI card.
1490
+
1491
+ Concurrency Note: Uses the team_name parameter directly for team highlighting,
1492
+ NOT os.environ, to prevent cross-user data leakage under concurrent requests.
1493
+
1494
+ Returns (team_html, individual_html, kpi_card_html, new_best_accuracy, new_rank, this_submission_score).
1495
+ """
1496
+ team_summary_df = pd.DataFrame(columns=["Team", "Best_Score", "Avg_Score", "Submissions"])
1497
+ individual_summary_df = pd.DataFrame(columns=["Engineer", "Best_Score", "Submissions"])
1498
+
1499
+ if leaderboard_df is None or leaderboard_df.empty or "accuracy" not in leaderboard_df.columns:
1500
+ return (
1501
+ "<p style='text-align:center; color:#6b7280; padding-top:20px;'>Leaderboard empty.</p>",
1502
+ "<p style='text-align:center; color:#6b7280; padding-top:20px;'>Leaderboard empty.</p>",
1503
+ _build_kpi_card_html(0, 0, 0, 0, 0, is_preview=False, is_pending=False, local_test_accuracy=None),
1504
+ 0.0, 0, 0.0
1505
+ )
1506
+
1507
+ # Team summary
1508
+ if "Team" in leaderboard_df.columns:
1509
+ team_summary_df = (
1510
+ leaderboard_df.groupby("Team")["accuracy"]
1511
+ .agg(Best_Score="max", Avg_Score="mean", Submissions="count")
1512
+ .reset_index()
1513
+ .sort_values("Best_Score", ascending=False)
1514
+ .reset_index(drop=True)
1515
+ )
1516
+ team_summary_df.index = team_summary_df.index + 1
1517
+
1518
+ # Individual summary
1519
+ user_bests = leaderboard_df.groupby("username")["accuracy"].max()
1520
+ user_counts = leaderboard_df.groupby("username")["accuracy"].count()
1521
+ individual_summary_df = pd.DataFrame(
1522
+ {"Engineer": user_bests.index, "Best_Score": user_bests.values, "Submissions": user_counts.values}
1523
+ ).sort_values("Best_Score", ascending=False).reset_index(drop=True)
1524
+ individual_summary_df.index = individual_summary_df.index + 1
1525
+
1526
+ # Get stats for KPI card
1527
+ new_rank = 0
1528
+ new_best_accuracy = 0.0
1529
+ this_submission_score = 0.0
1530
+
1531
+ try:
1532
+ # All submissions for this user
1533
+ user_rows = leaderboard_df[leaderboard_df["username"] == username].copy()
1534
+
1535
+ if not user_rows.empty:
1536
+ # Attempt robust timestamp parsing
1537
+ if "timestamp" in user_rows.columns:
1538
+ parsed_ts = pd.to_datetime(user_rows["timestamp"], errors="coerce")
1539
+
1540
+ if parsed_ts.notna().any():
1541
+ # At least one valid timestamp → use parsed ordering
1542
+ user_rows["__parsed_ts"] = parsed_ts
1543
+ user_rows = user_rows.sort_values("__parsed_ts", ascending=False)
1544
+ this_submission_score = float(user_rows.iloc[0]["accuracy"])
1545
+ else:
1546
+ # All timestamps invalid → assume append order, take last as "latest"
1547
+ this_submission_score = float(user_rows.iloc[-1]["accuracy"])
1548
+ else:
1549
+ # No timestamp column → fallback to last row
1550
+ this_submission_score = float(user_rows.iloc[-1]["accuracy"])
1551
+
1552
+ # Rank & best accuracy (unchanged logic, but make sure we use the same best row)
1553
+ my_rank_row = None
1554
+ # Build individual summary before this block (already done above)
1555
+ my_rank_row = individual_summary_df[individual_summary_df["Engineer"] == username]
1556
+ if not my_rank_row.empty:
1557
+ new_rank = my_rank_row.index[0]
1558
+ new_best_accuracy = float(my_rank_row["Best_Score"].iloc[0])
1559
+
1560
+ except Exception as e:
1561
+ _log(f"Latest submission score extraction failed: {e}")
1562
+
1563
+ # Generate HTML outputs
1564
+ # Concurrency Note: Use team_name parameter directly, not os.environ
1565
+ team_html = _build_team_html(team_summary_df, team_name)
1566
+ individual_html = _build_individual_html(individual_summary_df, username)
1567
+ kpi_card_html = _build_kpi_card_html(
1568
+ this_submission_score, last_submission_score, new_rank, last_rank, submission_count,
1569
+ is_preview=False, is_pending=False, local_test_accuracy=None
1570
+ )
1571
+
1572
+ return team_html, individual_html, kpi_card_html, new_best_accuracy, new_rank, this_submission_score
1573
+
1574
+
1575
+ def get_model_card(model_name):
1576
+ # Retrieve the Spanish description stored in "card_es"
1577
+ return MODEL_TYPES.get(model_name, {}).get("card_es", "Descripción no disponible.")
1578
+
1579
+ def compute_rank_settings(
1580
+ submission_count,
1581
+ current_model,
1582
+ current_complexity,
1583
+ current_feature_set,
1584
+ current_data_size
1585
+ ):
1586
+ """
1587
+ Returns rank gating settings.
1588
+ Adapted for Spanish UI: Returns Tuple choices [(Display, Value)]
1589
+ """
1590
+
1591
+ def get_choices_for_rank(rank):
1592
+ if rank == 0:
1593
+ return [opt for opt in FEATURE_SET_ALL_OPTIONS if opt[1] in FEATURE_SET_GROUP_1_VALS]
1594
+ if rank == 1:
1595
+ return [opt for opt in FEATURE_SET_ALL_OPTIONS if opt[1] in (FEATURE_SET_GROUP_1_VALS + FEATURE_SET_GROUP_2_VALS)]
1596
+ return FEATURE_SET_ALL_OPTIONS
1597
+
1598
+ # Helper to generate Model Radio Tuples [(Spanish, English)]
1599
+ def get_model_tuples(available_english_keys):
1600
+ return [(MODEL_DISPLAY_MAP[k], k) for k in available_english_keys if k in MODEL_DISPLAY_MAP]
1601
+
1602
+ # Rank 0: Trainee
1603
+ if submission_count == 0:
1604
+ avail_keys = ["The Balanced Generalist"]
1605
+ return {
1606
+ "rank_message": "# 🧑‍🎓 Rango: Ingeniero en Pràcticas\n<p style='font-size:24px; line-height:1.4;'>Para tu primer envío, simplemente haz clic en el botón grande '🔬 Construir y Enviar Modelo' abajo.</p>",
1607
+ "model_choices": get_model_tuples(avail_keys),
1608
+ "model_value": "The Balanced Generalist",
1609
+ "model_interactive": False,
1610
+ "complexity_max": 3,
1611
+ "complexity_value": min(current_complexity, 3),
1612
+ "feature_set_choices": get_choices_for_rank(0),
1613
+ "feature_set_value": FEATURE_SET_GROUP_1_VALS,
1614
+ "feature_set_interactive": False,
1615
+ "data_size_choices": ["Pequeño (20%)"],
1616
+ "data_size_value": "Pequeño (20%)",
1617
+ "data_size_interactive": False,
1618
+ }
1619
+
1620
+ # Rank 1: Junior
1621
+ elif submission_count == 1:
1622
+ avail_keys = ["The Balanced Generalist", "The Rule-Maker", "The 'Nearest Neighbor'"]
1623
+ return {
1624
+ "rank_message": "# 🎉 ¡Subida de Rango! Ingeniero Junior\n<p style='font-size:24px; line-height:1.4;'>¡Nuevos modelos, tamaños de datos e ingredientes de datos desbloqueados!</p>",
1625
+ "model_choices": get_model_tuples(avail_keys),
1626
+ "model_value": current_model if current_model in avail_keys else "The Balanced Generalist",
1627
+ "model_interactive": True,
1628
+ "complexity_max": 6,
1629
+ "complexity_value": min(current_complexity, 6),
1630
+ "feature_set_choices": get_choices_for_rank(1),
1631
+ "feature_set_value": current_feature_set,
1632
+ "feature_set_interactive": True,
1633
+ "data_size_choices": ["Pequeño (20%)", "Medio (60%)"],
1634
+ "data_size_value": current_data_size if current_data_size in ["Pequeño (20%)", "Medio (60%)"] else "Pequeño (20%)",
1635
+ "data_size_interactive": True,
1636
+ }
1637
+
1638
+ # Rank 2: Senior
1639
+ elif submission_count == 2:
1640
+ avail_keys = list(MODEL_TYPES.keys())
1641
+ return {
1642
+ "rank_message": "# 🌟 ¡Subida de Rango! Ingeniero Senior\n<p style='font-size:24px; line-height:1.4;'>¡Ingredientes de datos más potentes desbloqueados! Los predictores más fuertes (como 'Edad' y 'Historial Delictivo') ya están disponibles. Probablemente mejorarán tu precisión, pero recuerda que a menudo conllevan mayor sesgo social.</p>",
1643
+ "model_choices": get_model_tuples(avail_keys),
1644
+ "model_value": current_model if current_model in avail_keys else "The Deep Pattern-Finder",
1645
+ "model_interactive": True,
1646
+ "complexity_max": 8,
1647
+ "complexity_value": min(current_complexity, 8),
1648
+ "feature_set_choices": get_choices_for_rank(2),
1649
+ "feature_set_value": current_feature_set,
1650
+ "feature_set_interactive": True,
1651
+ "data_size_choices": ["Pequeño (20%)", "Medio (60%)", "Grande (80%)", "Completo (100%)"],
1652
+ "data_size_value": current_data_size if current_data_size in DATA_SIZE_DB_MAP else "Pequeño (20%)",
1653
+ "data_size_interactive": True,
1654
+ }
1655
+
1656
+ # Rank 3+: Lead
1657
+ else:
1658
+ avail_keys = list(MODEL_TYPES.keys())
1659
+ return {
1660
+ "rank_message": "# 👑 Rango: Ingeniero Líder\n<p style='font-size:24px; line-height:1.4;'>Todas las herramientas desbloqueadas — ¡optimiza libremente!</p>",
1661
+ "model_choices": get_model_tuples(avail_keys),
1662
+ "model_value": current_model if current_model in avail_keys else "The Balanced Generalist",
1663
+ "model_interactive": True,
1664
+ "complexity_max": 10,
1665
+ "complexity_value": current_complexity,
1666
+ "feature_set_choices": get_choices_for_rank(3),
1667
+ "feature_set_value": current_feature_set,
1668
+ "feature_set_interactive": True,
1669
+ "data_size_choices": ["Pequeño (20%)", "Medio (60%)", "Grande (80%)", "Completo (100%)"],
1670
+ "data_size_value": current_data_size if current_data_size in DATA_SIZE_DB_MAP else "Pequeño (20%)",
1671
+ "data_size_interactive": True,
1672
+ }
1673
+
1674
+ # Find components by name to yield updates
1675
+ # --- Existing global component placeholders ---
1676
+ submit_button = None
1677
+ submission_feedback_display = None
1678
+ team_leaderboard_display = None
1679
+ individual_leaderboard_display = None
1680
+ last_submission_score_state = None
1681
+ last_rank_state = None
1682
+ best_score_state = None
1683
+ submission_count_state = None
1684
+ rank_message_display = None
1685
+ model_type_radio = None
1686
+ complexity_slider = None
1687
+ feature_set_checkbox = None
1688
+ data_size_radio = None
1689
+ attempts_tracker_display = None
1690
+ team_name_state = None
1691
+ # Login components
1692
+ login_username = None
1693
+ login_password = None
1694
+ login_submit = None
1695
+ login_error = None
1696
+ # Add missing placeholders for auth states (FIX)
1697
+ username_state = None
1698
+ token_state = None
1699
+ first_submission_score_state = None # (already commented as "will be assigned globally")
1700
+ # Add state placeholders for readiness gating and preview tracking
1701
+ readiness_state = None
1702
+ was_preview_state = None
1703
+ kpi_meta_state = None
1704
+ last_seen_ts_state = None # Track last seen user timestamp from leaderboard
1705
+
1706
+
1707
+ def get_or_assign_team(username, token=None):
1708
+ """
1709
+ Get the existing team for a user from the leaderboard, or assign a new random team.
1710
+
1711
+ Queries the playground leaderboard to check if the user has prior submissions with
1712
+ a team assignment. If found, returns that team (most recent if multiple submissions).
1713
+ Otherwise assigns a random team. All team names are normalized for consistency.
1714
+
1715
+ Args:
1716
+ username: str, the username to check for existing team
1717
+ token: str, optional authentication token for leaderboard fetch
1718
+
1719
+ Returns:
1720
+ tuple: (team_name: str, is_new: bool)
1721
+ - team_name: The normalized team name (existing or newly assigned)
1722
+ - is_new: True if newly assigned, False if existing team recovered
1723
+ """
1724
+ try:
1725
+ # Query the leaderboard
1726
+ if playground is None:
1727
+ # Fallback to random assignment if playground not available
1728
+ print("Playground not available, assigning random team")
1729
+ new_team = _normalize_team_name(random.choice(TEAM_NAMES))
1730
+ return new_team, True
1731
+
1732
+ # Use centralized helper for authenticated leaderboard fetch
1733
+ leaderboard_df = _get_leaderboard_with_optional_token(playground, token)
1734
+
1735
+ # Check if leaderboard has data and Team column
1736
+ if leaderboard_df is not None and not leaderboard_df.empty and "Team" in leaderboard_df.columns:
1737
+ # Filter for this user's submissions
1738
+ user_submissions = leaderboard_df[leaderboard_df["username"] == username]
1739
+
1740
+ if not user_submissions.empty:
1741
+ # Sort by timestamp (most recent first) if timestamp column exists
1742
+ # Use contextlib.suppress for resilient timestamp parsing
1743
+ if "timestamp" in user_submissions.columns:
1744
+ try:
1745
+ # Attempt to coerce timestamp column to datetime and sort descending
1746
+ user_submissions = user_submissions.copy()
1747
+ user_submissions["timestamp"] = pd.to_datetime(user_submissions["timestamp"], errors='coerce')
1748
+ user_submissions = user_submissions.sort_values("timestamp", ascending=False)
1749
+ print(f"Sorted {len(user_submissions)} submissions by timestamp for {username}")
1750
+ except Exception as ts_error:
1751
+ # If timestamp parsing fails, continue with unsorted DataFrame
1752
+ print(f"Warning: Could not sort by timestamp for {username}: {ts_error}")
1753
+
1754
+ # Get the most recent team assignment (first row after sorting)
1755
+ existing_team = user_submissions.iloc[0]["Team"]
1756
+
1757
+ # Check if team value is valid (not null/empty)
1758
+ if pd.notna(existing_team) and existing_team and str(existing_team).strip():
1759
+ normalized_team = _normalize_team_name(existing_team)
1760
+ print(f"Found existing team for {username}: {normalized_team}")
1761
+ return normalized_team, False
1762
+
1763
+ # No existing team found - assign random
1764
+ new_team = _normalize_team_name(random.choice(TEAM_NAMES))
1765
+ print(f"Assigning new team to {username}: {new_team}")
1766
+ return new_team, True
1767
+
1768
+ except Exception as e:
1769
+ # On any error, fall back to random assignment
1770
+ print(f"Error checking leaderboard for team: {e}")
1771
+ new_team = _normalize_team_name(random.choice(TEAM_NAMES))
1772
+ print(f"Fallback: assigning random team to {username}: {new_team}")
1773
+ return new_team, True
1774
+
1775
+ def perform_inline_login(username_input, password_input):
1776
+ """
1777
+ Perform inline authentication and return credentials via gr.State updates.
1778
+
1779
+ Concurrency Note: This function NO LONGER stores per-user credentials in
1780
+ os.environ to prevent cross-user data leakage. Authentication state is
1781
+ returned exclusively via gr.State updates (username_state, token_state,
1782
+ team_name_state). Password is never stored server-side.
1783
+
1784
+ Args:
1785
+ username_input: str, the username entered by user
1786
+ password_input: str, the password entered by user
1787
+
1788
+ Returns:
1789
+ dict: Gradio component updates for login UI elements and submit button
1790
+ - On success: hides login form, shows success message, enables submit
1791
+ - On failure: keeps login form visible, shows error with signup link
1792
+ """
1793
+ from aimodelshare.aws import get_aws_token
1794
+
1795
+ # Validate inputs
1796
+ if not username_input or not username_input.strip():
1797
+ error_html = """
1798
+ <div style='background:#fef2f2; padding:12px; border-radius:8px; border-left:4px solid #ef4444; margin-top:12px;'>
1799
+ <p style='margin:0; color:#991b1b; font-weight:500;'>⚠️ Username is required</p>
1800
+ </div>
1801
+ """
1802
+ return {
1803
+ login_username: gr.update(),
1804
+ login_password: gr.update(),
1805
+ login_submit: gr.update(),
1806
+ login_error: gr.update(value=error_html, visible=True),
1807
+ submit_button: gr.update(),
1808
+ submission_feedback_display: gr.update(),
1809
+ team_name_state: gr.update(),
1810
+ username_state: gr.update(),
1811
+ token_state: gr.update()
1812
+ }
1813
+
1814
+ if not password_input or not password_input.strip():
1815
+ error_html = """
1816
+ <div style='background:#fef2f2; padding:12px; border-radius:8px; border-left:4px solid #ef4444; margin-top:12px;'>
1817
+ <p style='margin:0; color:#991b1b; font-weight:500;'>⚠️ Password is required</p>
1818
+ </div>
1819
+ """
1820
+ return {
1821
+ login_username: gr.update(),
1822
+ login_password: gr.update(),
1823
+ login_submit: gr.update(),
1824
+ login_error: gr.update(value=error_html, visible=True),
1825
+ submit_button: gr.update(),
1826
+ submission_feedback_display: gr.update(),
1827
+ team_name_state: gr.update(),
1828
+ username_state: gr.update(),
1829
+ token_state: gr.update()
1830
+ }
1831
+
1832
+ # Concurrency Note: get_aws_token() reads credentials from os.environ, which creates
1833
+ # a race condition in multi-threaded environments. We use _auth_lock to serialize
1834
+ # credential injection, preventing concurrent requests from seeing each other's
1835
+ # credentials. The password is immediately cleared after the auth attempt.
1836
+ #
1837
+ # FUTURE: Ideally get_aws_token() would be refactored to accept credentials as
1838
+ # parameters instead of reading from os.environ. This lock is a workaround.
1839
+ username_clean = username_input.strip()
1840
+
1841
+ # Attempt to get AWS token with serialized credential injection
1842
+ try:
1843
+ with _auth_lock:
1844
+ os.environ["username"] = username_clean
1845
+ os.environ["password"] = password_input.strip() # Only for get_aws_token() call
1846
+ try:
1847
+ token = get_aws_token()
1848
+ finally:
1849
+ # SECURITY: Always clear credentials from environment, even on exception
1850
+ # Also clear stale env vars from previous implementations within the lock
1851
+ # to prevent any race conditions during cleanup
1852
+ os.environ.pop("password", None)
1853
+ os.environ.pop("username", None)
1854
+ os.environ.pop("AWS_TOKEN", None)
1855
+ os.environ.pop("TEAM_NAME", None)
1856
+
1857
+ # Get or assign team for this user with explicit token (already normalized by get_or_assign_team)
1858
+ team_name, is_new_team = get_or_assign_team(username_clean, token=token)
1859
+ # Normalize team name before storing (defensive - already normalized by get_or_assign_team)
1860
+ team_name = _normalize_team_name(team_name)
1861
+
1862
+ # Translate team name for display only (keep team_name_state in English)
1863
+ display_team_name = translate_team_name_for_display(team_name, UI_TEAM_LANG)
1864
+
1865
+ # Build success message based on whether team is new or existing
1866
+ if is_new_team:
1867
+ team_message = f"¡Todo listo! Tu equipo es: <b>{display_team_name}</b> 🎉"
1868
+ else:
1869
+ team_message = f"Hola de nuevo! Sigues en el equipo: <b>{display_team_name}</b> ✅"
1870
+
1871
+ # Success: hide login form, show success message with team info, enable submit button
1872
+ success_html = f"""
1873
+ <div style='background:#f0fdf4; padding:16px; border-radius:8px; border-left:4px solid #16a34a; margin-top:12px;'>
1874
+ <p style='margin:0; color:#15803d; font-weight:600; font-size:1.1rem;'>✓ ¡Sesión iniciada con éxito!</p>
1875
+ <p style='margin:8px 0 0 0; color:#166534; font-size:0.95rem;'>
1876
+ {team_message}
1877
+ </p>
1878
+ <p style='margin:8px 0 0 0; color:#166534; font-size:0.95rem;'>
1879
+ Haz clic de nuevo en "Construir y enviar el modelo" para publicar tu puntuación.
1880
+ </p>
1881
+ </div>
1882
+ """
1883
+ return {
1884
+ login_username: gr.update(visible=False),
1885
+ login_password: gr.update(visible=False),
1886
+ login_submit: gr.update(visible=False),
1887
+ login_error: gr.update(value=success_html, visible=True),
1888
+ submit_button: gr.update(value="🔬 Build & Submit Model", interactive=True),
1889
+ submission_feedback_display: gr.update(visible=False),
1890
+ team_name_state: gr.update(value=team_name),
1891
+ username_state: gr.update(value=username_clean),
1892
+ token_state: gr.update(value=token)
1893
+ }
1894
+
1895
+ except Exception as e:
1896
+ # Note: Credentials are already cleaned up by the finally block in the try above.
1897
+ # The lock ensures no race condition during cleanup.
1898
+
1899
+ # Authentication failed: show error with signup link
1900
+ error_html = f"""
1901
+ <div style='background:#fef2f2; padding:16px; border-radius:8px; border-left:4px solid #ef4444; margin-top:12px;'>
1902
+ <p style='margin:0; color:#991b1b; font-weight:600; font-size:1.1rem;'>⚠️ Authentication failed</p>
1903
+ <p style='margin:8px 0; color:#7f1d1d; font-size:0.95rem;'>
1904
+ Could not verify your credentials. Please check your username and password.
1905
+ </p>
1906
+ <p style='margin:8px 0 0 0; color:#7f1d1d; font-size:0.95rem;'>
1907
+ <strong>New user?</strong> Create a free account at
1908
+ <a href='https://www.modelshare.ai/login' target='_blank'
1909
+ style='color:#dc2626; text-decoration:underline;'>modelshare.ai/login</a>
1910
+ </p>
1911
+ <details style='margin-top:12px; font-size:0.85rem; color:#7f1d1d;'>
1912
+ <summary style='cursor:pointer;'>Technical details</summary>
1913
+ <pre style='margin-top:8px; padding:8px; background:#fee; border-radius:4px; overflow-x:auto;'>{str(e)}</pre>
1914
+ </details>
1915
+ </div>
1916
+ """
1917
+ return {
1918
+ login_username: gr.update(visible=True),
1919
+ login_password: gr.update(visible=True),
1920
+ login_submit: gr.update(visible=True),
1921
+ login_error: gr.update(value=error_html, visible=True),
1922
+ submit_button: gr.update(),
1923
+ submission_feedback_display: gr.update(),
1924
+ team_name_state: gr.update(),
1925
+ username_state: gr.update(),
1926
+ token_state: gr.update()
1927
+ }
1928
+
1929
+ def run_experiment(
1930
+ model_name_key, # Recieves ENGLISH KEY (e.g., "The Balanced Generalist")
1931
+ complexity_level,
1932
+ feature_set,
1933
+ data_size_str, # Recieves SPANISH LABEL (e.g., "Pequeño (20%)")
1934
+ team_name,
1935
+ last_submission_score,
1936
+ last_rank,
1937
+ submission_count,
1938
+ first_submission_score,
1939
+ best_score,
1940
+ username=None,
1941
+ token=None,
1942
+ readiness_flag=None,
1943
+ was_preview_prev=None,
1944
+ progress=gr.Progress()
1945
+ ):
1946
+ """
1947
+ Core experiment: Uses 'yield' for visual updates and progress bar.
1948
+ Updated to translate Spanish inputs to English keys for Cache/DB lookup.
1949
+ """
1950
+ # --- COLLISION GUARDS ---
1951
+ if isinstance(submit_button, dict) or isinstance(submission_feedback_display, dict) or isinstance(kpi_meta_state, dict) or isinstance(was_preview_state, dict):
1952
+ error_html = """
1953
+ <div class='kpi-card' style='border-color: #ef4444;'>
1954
+ <h2 style='color: #111827; margin-top:0;'>⚠️ Error de configuración</h2>
1955
+ <div class='kpi-card-body'>
1956
+ <p style='color: #991b1b;'>Se ha detectado un conflicto de parámetros (shadowing). Las variables globales del componente han sido sobrescritas por parámetros locales.</p>
1957
+ <p style='color: #7f1d1d; margin-top: 8px;'>Por favor, actualiza la página e inténtalo de nuevo. Si el problema persiste, contacta con soporte técnico.</p>
1958
+ </div>
1959
+ </div>
1960
+ """
1961
+ yield {
1962
+ submission_feedback_display: gr.update(value=error_html, visible=True),
1963
+ submit_button: gr.update(value="🔬 Construir y enviar modelo", interactive=True)
1964
+ }
1965
+ return
1966
+
1967
+ # --- TRANSLATION LOGIC ---
1968
+ # 1. Translate Data Size to English for DB/Cache Lookup
1969
+ db_data_size = DATA_SIZE_DB_MAP.get(data_size_str, "Small (20%)")
1970
+
1971
+ # Sanitize feature_set
1972
+ sanitized_feature_set = []
1973
+ for feat in (feature_set or []):
1974
+ sanitized_feature_set.append(feat.get("value", str(feat)) if isinstance(feat, dict) else (feat[1] if isinstance(feat, tuple) else str(feat)))
1975
+ feature_set = sanitized_feature_set
1976
+
1977
+ if readiness_flag is not None: ready = readiness_flag
1978
+ else: ready = _is_ready()
1979
+
1980
+ if not username: username = "Unknown_User"
1981
+
1982
+ def get_status_html(step_num, title, subtitle):
1983
+ return f"<div class='processing-status'><span class='processing-icon'>⚙️</span><div class='processing-text'>Step {step_num}/5: {title}</div><div class='processing-subtext'>{subtitle}</div></div>"
1984
+
1985
+ # --- Stage 1: Lock UI and give initial feedback ---
1986
+ progress(0.1, desc="Iniciando experimento...")
1987
+ initial_updates = {
1988
+ submit_button: gr.update(value="⏳ Experimento en marcha...", interactive=False),
1989
+ submission_feedback_display: gr.update(value=get_status_html(1, "Iniciando", "Preparando las variables de tu data..."), visible=True), # Make sure it's visible
1990
+ login_error: gr.update(visible=False), # Hide login success/error message
1991
+ attempts_tracker_display: gr.update(value=_build_attempts_tracker_html(submission_count))
1992
+ }
1993
+
1994
+ if not model_name_key or model_name_key not in MODEL_TYPES:
1995
+ model_name_key = DEFAULT_MODEL
1996
+ complexity_level = safe_int(complexity_level, 2)
1997
+
1998
+ # Check readiness
1999
+ if playground is None or not ready:
2000
+ settings = compute_rank_settings(submission_count, model_name_key, complexity_level, feature_set, data_size_str)
2001
+ error_msg = "<p style='text-align:center; color:red; padding:20px 0;'>Los datos aún se están inicializando. Por favor, espera un momento y vuelve a intentarlo.</p>"
2002
+ yield { submission_feedback_display: gr.update(value=error_msg, visible=True), submit_button: gr.update(value="🔬 Construir y Enviar Modelo", interactive=True) }
2003
+ return
2004
+
2005
+ try:
2006
+ # --- Stage 2: Smart Build (Cache vs Train) ---
2007
+ progress(0.3, desc="Building Model...")
2008
+
2009
+ # 1. Generate Cache Key (ENGLISH KEYS)
2010
+ sanitized_features = sorted([str(f) for f in feature_set])
2011
+ feature_key = ",".join(sanitized_features)
2012
+ cache_key = f"{model_name_key}|{complexity_level}|{db_data_size}|{feature_key}"
2013
+
2014
+ _log(f"Generated Key: {cache_key}")
2015
+
2016
+ # 2. Check Cache
2017
+ cached_predictions = get_cached_prediction(cache_key)
2018
+
2019
+ predictions = None
2020
+ tuned_model = None
2021
+ preprocessor = None
2022
+
2023
+ if cached_predictions:
2024
+ # === FAST PATH (Zero CPU) ===
2025
+ _log(f"⚡ CACHE HIT: {cache_key}")
2026
+ yield {
2027
+ submission_feedback_display: gr.update(value=get_status_html(2, "Entrenando Modelo", "⚡ La máquina está aprendiendo de la historia..."), visible=True),
2028
+ login_error: gr.update(visible=False)
2029
+ }
2030
+ if isinstance(cached_predictions, str):
2031
+ predictions = [int(c) for c in cached_predictions]
2032
+ else:
2033
+ predictions = cached_predictions
2034
+ tuned_model = None
2035
+ preprocessor = None
2036
+
2037
+ else:
2038
+ # === CACHE MISS (Training Disabled for Safety) ===
2039
+ msg = f"❌ CACHE MISS: {cache_key}"
2040
+ _log(msg)
2041
+
2042
+ error_html = f"""
2043
+ <div style='background:#fee2e2; padding:16px; border-radius:8px; border:2px solid #ef4444; color:#991b1b; text-align:center;'>
2044
+ <h3 style='margin:0;'>⚠️ Configuración no encontrada</h3>
2045
+ <p style='margin:8px 0;'>Esta combinación específica de parámetros no se ha encontrado en nuestra base de datos.</p>
2046
+ <p style='font-size:0.9em;'>Para garantizar la estabilidad del sistema, el entrenamiento en tiempo real está desactivado. Por favor, ajusta la configuración (por ejemplo, cambia el "Tamaño de datos" o la "Estrategia del modelo") y vuelve a intentarlo.</p>
2047
+ </div>
2048
+ """
2049
+
2050
+ yield {
2051
+ submission_feedback_display: gr.update(value=error_html, visible=True),
2052
+ submit_button: gr.update(value="🔬 Construir y Enviar Modelo", interactive=True),
2053
+ login_error: gr.update(visible=False)
2054
+ }
2055
+ return
2056
+
2057
+ # --- Stage 3: Submit (API Call 1) ---
2058
+ # AUTHENTICATION GATE: Check for token before submission
2059
+
2060
+ if token is None:
2061
+ # User not authenticated - compute preview score
2062
+ progress(0.6, desc="Computing Preview Score...")
2063
+
2064
+ # NOTE: Logic updated to handle cached predictions
2065
+ from sklearn.metrics import accuracy_score
2066
+
2067
+ # Ensure format is correct (list vs array)
2068
+ if isinstance(predictions, list):
2069
+ preds_for_metric = np.array(predictions)
2070
+ else:
2071
+ preds_for_metric = predictions
2072
+
2073
+ preview_score = accuracy_score(Y_TEST, preds_for_metric)
2074
+
2075
+ # ... (Rest of preview logic remains the same) ...
2076
+ preview_kpi_meta = {
2077
+ "was_preview": True, "preview_score": preview_score, "ready_at_run_start": ready,
2078
+ "poll_iterations": 0, "local_test_accuracy": preview_score,
2079
+ "this_submission_score": None, "new_best_accuracy": None, "rank": None
2080
+ }
2081
+
2082
+ # 1. Generate the styled preview card
2083
+ preview_card_html = _build_kpi_card_html(
2084
+ new_score=preview_score, last_score=0, new_rank=0, last_rank=0,
2085
+ submission_count=-1, is_preview=True, is_pending=False, local_test_accuracy=None
2086
+ )
2087
+
2088
+ # 2. Inject login text
2089
+ login_prompt_text_html = build_login_prompt_html()
2090
+ closing_div_index = preview_card_html.rfind("</div>")
2091
+ if closing_div_index != -1:
2092
+ combined_html = preview_card_html[:closing_div_index] + login_prompt_text_html + "</div>"
2093
+ else:
2094
+ combined_html = preview_card_html + login_prompt_text_html
2095
+
2096
+ settings = compute_rank_settings(submission_count, model_name_key, complexity_level, feature_set, data_size_str)
2097
+
2098
+ gate_updates = {
2099
+ submission_feedback_display: gr.update(value=combined_html, visible=True),
2100
+ submit_button: gr.update(value="Sign In Required", interactive=False),
2101
+ login_username: gr.update(visible=True), login_password: gr.update(visible=True),
2102
+ login_submit: gr.update(visible=True), login_error: gr.update(value="", visible=False),
2103
+ team_leaderboard_display: _build_skeleton_leaderboard(rows=6, is_team=True),
2104
+ individual_leaderboard_display: _build_skeleton_leaderboard(rows=6, is_team=False),
2105
+ last_submission_score_state: last_submission_score, last_rank_state: last_rank,
2106
+ best_score_state: best_score, submission_count_state: submission_count,
2107
+ first_submission_score_state: first_submission_score,
2108
+ rank_message_display: settings["rank_message"],
2109
+ model_type_radio: gr.update(choices=settings["model_choices"], value=settings["model_value"], interactive=settings["model_interactive"]),
2110
+ complexity_slider: gr.update(minimum=1, maximum=settings["complexity_max"], value=settings["complexity_value"]),
2111
+ feature_set_checkbox: gr.update(choices=settings["feature_set_choices"], value=settings["feature_set_value"], interactive=settings["feature_set_interactive"]),
2112
+ data_size_radio: gr.update(choices=settings["data_size_choices"], value=settings["data_size_value"], interactive=settings["data_size_interactive"]),
2113
+ attempts_tracker_display: gr.update(value=_build_attempts_tracker_html(submission_count)),
2114
+ was_preview_state: True, kpi_meta_state: preview_kpi_meta, last_seen_ts_state: None
2115
+ }
2116
+ yield gate_updates
2117
+ return # Stop here
2118
+
2119
+ # --- ATTEMPT LIMIT CHECK ---
2120
+ if submission_count >= ATTEMPT_LIMIT:
2121
+ limit_warning_html = f"""
2122
+ <div class='kpi-card' style='border-color: #ef4444;'>
2123
+ <h2 style='color: #111827; margin-top:0;'>🛑 Límite de envíos alcanzado</h2>
2124
+ <div class='kpi-card-body'>
2125
+ <div class='kpi-metric-box'>
2126
+ <p class='kpi-label'>Intentos utilizados</p>
2127
+ <p class='kpi-score' style='color: #ef4444;'>{ATTEMPT_LIMIT} / {ATTEMPT_LIMIT}</p>
2128
+ </div>
2129
+ </div>
2130
+ <div style='margin-top: 16px; background:#fef2f2; padding:16px; border-radius:12px; text-align:left; font-size:0.98rem; line-height:1.4;'>
2131
+ <p style='margin:0; color:#991b1b;'><b>¡Buen trabajo!</b> Desplázate hacia abajo hasta «Finalizar y reflexionar».</p>
2132
+ </div>
2133
+ </div>"""
2134
+ settings = compute_rank_settings(submission_count, model_name_key, complexity_level, feature_set, data_size_str)
2135
+ limit_reached_updates = {
2136
+ submission_feedback_display: gr.update(value=limit_warning_html, visible=True),
2137
+ submit_button: gr.update(value="🛑 Límite de envíos alcanzado", interactive=False),
2138
+ model_type_radio: gr.update(interactive=False), complexity_slider: gr.update(interactive=False),
2139
+ feature_set_checkbox: gr.update(interactive=False), data_size_radio: gr.update(interactive=False),
2140
+ attempts_tracker_display: gr.update(value=f"<div style='text-align:center; padding:8px; margin:8px 0; background:#fef2f2; border-radius:8px; border:1px solid #ef4444;'><p style='margin:0; color:#991b1b; font-weight:600;'>🛑 Intentos utilizados: {ATTEMPT_LIMIT}/{ATTEMPT_LIMIT}</p></div>"),
2141
+ team_leaderboard_display: team_leaderboard_display, individual_leaderboard_display: individual_leaderboard_display,
2142
+ last_submission_score_state: last_submission_score, last_rank_state: last_rank,
2143
+ best_score_state: best_score, submission_count_state: submission_count,
2144
+ first_submission_score_state: first_submission_score, rank_message_display: settings["rank_message"],
2145
+ login_username: gr.update(visible=False), login_password: gr.update(visible=False),
2146
+ login_submit: gr.update(visible=False), login_error: gr.update(visible=False),
2147
+ was_preview_state: False, kpi_meta_state: {}, last_seen_ts_state: None
2148
+ }
2149
+ yield limit_reached_updates
2150
+ return
2151
+
2152
+ progress(0.5, desc="Enviando a la nube...")
2153
+ yield {
2154
+ submission_feedback_display: gr.update(value=get_status_html(3, "Enviando", "Enviando el modelo al servidor de la competición..."), visible=True),
2155
+ login_error: gr.update(visible=False)
2156
+ }
2157
+
2158
+
2159
+ description = f"{model_name_key} (Cplx:{complexity_level} Size:{data_size_str})"
2160
+ tags = f"team:{team_name},model:{model_name_key}"
2161
+
2162
+ # 1. FETCH BASELINE
2163
+ baseline_leaderboard_df = _get_leaderboard_with_optional_token(playground, token)
2164
+
2165
+ from sklearn.metrics import accuracy_score
2166
+ local_test_accuracy = accuracy_score(Y_TEST, predictions)
2167
+
2168
+ # 2. SUBMIT & CAPTURE ACCURACY
2169
+ def _submit():
2170
+ return playground.submit_model(
2171
+ model=tuned_model, # This can now be None!
2172
+ preprocessor=preprocessor, # This can now be None!
2173
+ prediction_submission=predictions, # We explicitly send predictions
2174
+ input_dict={'description': description, 'tags': tags},
2175
+ custom_metadata={'Team': team_name, 'Moral_Compass': 0},
2176
+ token=token,
2177
+ return_metrics=["accuracy"]
2178
+ )
2179
+
2180
+ try:
2181
+ submit_result = _retry_with_backoff(_submit, description="model submission")
2182
+ if isinstance(submit_result, tuple) and len(submit_result) == 3:
2183
+ _, _, metrics = submit_result
2184
+ if metrics and "accuracy" in metrics and metrics["accuracy"] is not None:
2185
+ this_submission_score = float(metrics["accuracy"])
2186
+ else:
2187
+ this_submission_score = local_test_accuracy
2188
+ else:
2189
+ this_submission_score = local_test_accuracy
2190
+ except Exception as e:
2191
+ _log(f"Falló el análisis de la respuesta del envío: {e}. Uso de precisión local.")
2192
+ this_submission_score = local_test_accuracy
2193
+
2194
+ _log(f"Envío exitoso. Puntuación del servidor: {this_submission_score}")
2195
+
2196
+ try:
2197
+ # Short timeout to trigger the lambda without hanging the UI
2198
+ _log("Iniciando fusión en el backend...")
2199
+ playground.get_leaderboard(token=token)
2200
+ except Exception:
2201
+ # We ignore errors here because the 'submit_model' post
2202
+ # already succeeded. This is just a cleanup task.
2203
+ pass
2204
+ # -------------------------------------------------------------------------
2205
+
2206
+ # Immediately increment submission count...
2207
+ new_submission_count = submission_count + 1
2208
+ new_first_submission_score = first_submission_score
2209
+ if submission_count == 0 and first_submission_score is None:
2210
+ new_first_submission_score = this_submission_score
2211
+
2212
+ # --- Stage 4: Local Rank Calculation (Optimistic) ---
2213
+ progress(0.9, desc="Calculando rango...")
2214
+
2215
+ # 3. SIMULATE UPDATED LEADERBOARD
2216
+ simulated_df = baseline_leaderboard_df.copy() if baseline_leaderboard_df is not None else pd.DataFrame()
2217
+
2218
+ # We use pd.Timestamp.now() to ensure pandas sorting logic sees this as the absolute latest
2219
+ new_row = pd.DataFrame([{
2220
+ "username": username,
2221
+ "accuracy": this_submission_score,
2222
+ "Team": team_name,
2223
+ "timestamp": pd.Timestamp.now(),
2224
+ "version": "latest"
2225
+ }])
2226
+
2227
+ if not simulated_df.empty:
2228
+ simulated_df = pd.concat([simulated_df, new_row], ignore_index=True)
2229
+ else:
2230
+ simulated_df = new_row
2231
+
2232
+ # 4. GENERATE TABLES (Use helper for tables only)
2233
+ # We ignore the kpi_card return from this function because it might use internal sorting
2234
+ # that doesn't respect our new row perfectly.
2235
+ team_html, individual_html, _, new_best_accuracy, new_rank, _ = generate_competitive_summary(
2236
+ simulated_df, team_name, username, last_submission_score, last_rank, submission_count
2237
+ )
2238
+
2239
+ # 5. GENERATE KPI CARD EXPLICITLY (The Authority Fix)
2240
+ # We manually build the card using the score we KNOW we just got.
2241
+ kpi_card_html = _build_kpi_card_html(
2242
+ new_score=this_submission_score,
2243
+ last_score=last_submission_score,
2244
+ new_rank=new_rank,
2245
+ last_rank=last_rank,
2246
+ submission_count=submission_count,
2247
+ is_preview=False,
2248
+ is_pending=False
2249
+ )
2250
+
2251
+ # ... (Previous Stage 1-4 logic remains unchanged) ...
2252
+
2253
+ # --- Stage 5: Final UI Update ---
2254
+ progress(1.0, desc="¡Completado!")
2255
+
2256
+ success_kpi_meta = {
2257
+ "was_preview": False, "preview_score": None, "ready_at_run_start": ready,
2258
+ "poll_iterations": 0, "local_test_accuracy": local_test_accuracy,
2259
+ "this_submission_score": this_submission_score, "new_best_accuracy": new_best_accuracy,
2260
+ "rank": new_rank, "pending": False, "optimistic_fallback": True
2261
+ }
2262
+
2263
+ settings = compute_rank_settings(new_submission_count, model_name_key, complexity_level, feature_set, data_size_str)
2264
+
2265
+ # -------------------------------------------------------------------------
2266
+ # NEW LOGIC: Check for Limit Reached immediately AFTER this submission
2267
+ # -------------------------------------------------------------------------
2268
+ limit_reached = new_submission_count >= ATTEMPT_LIMIT
2269
+
2270
+ # Prepare the UI state based on whether limit is reached
2271
+ if limit_reached:
2272
+ # 1. Append the Limit Warning HTML *below* the Result Card
2273
+ limit_html = f"""
2274
+ <div style='margin-top: 16px; border: 2px solid #ef4444; background:#fef2f2; padding:16px; border-radius:12px; text-align:left;'>
2275
+ <h3 style='margin:0 0 8px 0; color:#991b1b;'>🛑 Límite de envíos alcanzado ({ATTEMPT_LIMIT}/{ATTEMPT_LIMIT})</h3>
2276
+ <p style='margin:0; color:#7f1d1d; line-height:1.4;'>
2277
+ <b>Has utilizado todos tus intentos para esta sesión.</b><br>
2278
+ Revisa tus resultados finales arriba, luego desplázate hacia abajo hasta 'Finalizar y Reflexionar' para continuar.
2279
+ </p>
2280
+ </div>
2281
+ """
2282
+ final_html_display = kpi_card_html + limit_html
2283
+
2284
+ # 2. Disable all controls
2285
+ button_update = gr.update(value="🛑 Límite alcanzado", interactive=False)
2286
+ interactive_state = False
2287
+ tracker_html = f"<div style='text-align:center; padding:8px; margin:8px 0; background:#fef2f2; border-radius:8px; border:1px solid #ef4444;'><p style='margin:0; color:#991b1b; font-weight:600;'>🛑 Intentos utilizados: {ATTEMPT_LIMIT}/{ATTEMPT_LIMIT} (Max)</p></div>"
2288
+
2289
+ else:
2290
+ # Normal State: Show just the result card and keep controls active
2291
+ final_html_display = kpi_card_html
2292
+ button_update = gr.update(value="🔬 Construir y enviar modelo", interactive=True)
2293
+ interactive_state = True
2294
+ tracker_html = _build_attempts_tracker_html(new_submission_count)
2295
+
2296
+ # -------------------------------------------------------------------------
2297
+
2298
+ final_updates = {
2299
+ submission_feedback_display: gr.update(value=final_html_display, visible=True),
2300
+ team_leaderboard_display: team_html,
2301
+ individual_leaderboard_display: individual_html,
2302
+ last_submission_score_state: this_submission_score,
2303
+ last_rank_state: new_rank,
2304
+ best_score_state: new_best_accuracy,
2305
+ submission_count_state: new_submission_count,
2306
+ first_submission_score_state: new_first_submission_score,
2307
+ rank_message_display: settings["rank_message"],
2308
+
2309
+ # Apply the interactive state calculated above
2310
+ model_type_radio: gr.update(choices=settings["model_choices"], value=settings["model_value"], interactive=(settings["model_interactive"] and interactive_state)),
2311
+ complexity_slider: gr.update(minimum=1, maximum=settings["complexity_max"], value=settings["complexity_value"], interactive=interactive_state),
2312
+ feature_set_checkbox: gr.update(choices=settings["feature_set_choices"], value=settings["feature_set_value"], interactive=(settings["feature_set_interactive"] and interactive_state)),
2313
+ data_size_radio: gr.update(choices=settings["data_size_choices"], value=settings["data_size_value"], interactive=(settings["data_size_interactive"] and interactive_state)),
2314
+
2315
+ submit_button: button_update,
2316
+
2317
+ login_username: gr.update(visible=False), login_password: gr.update(visible=False),
2318
+ login_submit: gr.update(visible=False), login_error: gr.update(visible=False),
2319
+ attempts_tracker_display: gr.update(value=tracker_html),
2320
+ was_preview_state: False,
2321
+ kpi_meta_state: success_kpi_meta,
2322
+ last_seen_ts_state: time.time()
2323
+ }
2324
+ yield final_updates
2325
+
2326
+ except Exception as e:
2327
+ error_msg = f"ERROR: {e}"
2328
+ _log(f"Exception in run_experiment: {error_msg}")
2329
+ settings = compute_rank_settings(
2330
+ submission_count, model_name_key, complexity_level, feature_set, data_size_str
2331
+ )
2332
+
2333
+ exception_kpi_meta = {
2334
+ "was_preview": False,
2335
+ "preview_score": None,
2336
+ "ready_at_run_start": ready if 'ready' in locals() else False,
2337
+ "poll_iterations": 0,
2338
+ "local_test_accuracy": None,
2339
+ "this_submission_score": None,
2340
+ "new_best_accuracy": None,
2341
+ "rank": None,
2342
+ "error": str(e)
2343
+ }
2344
+
2345
+ error_updates = {
2346
+ submission_feedback_display: gr.update(
2347
+ f"<p style='text-align:center; color:red; padding:20px 0;'>Ocurrió un error: {error_msg}</p>", visible=True
2348
+ ),
2349
+ team_leaderboard_display: f"<p style='text-align:center; color:red; padding-top:20px;'>An error occurred: {error_msg}</p>",
2350
+ individual_leaderboard_display: f"<p style='text-align:center; color:red; padding-top:20px;'>An error occurred: {error_msg}</p>",
2351
+ last_submission_score_state: last_submission_score,
2352
+ last_rank_state: last_rank,
2353
+ best_score_state: best_score,
2354
+ submission_count_state: submission_count,
2355
+ first_submission_score_state: first_submission_score,
2356
+ rank_message_display: settings["rank_message"],
2357
+ model_type_radio: gr.update(choices=settings["model_choices"], value=settings["model_value"], interactive=settings["model_interactive"]),
2358
+ complexity_slider: gr.update(minimum=1, maximum=settings["complexity_max"], value=settings["complexity_value"]),
2359
+ feature_set_checkbox: gr.update(choices=settings["feature_set_choices"], value=settings["feature_set_value"], interactive=settings["feature_set_interactive"]),
2360
+ data_size_radio: gr.update(choices=settings["data_size_choices"], value=settings["data_size_value"], interactive=settings["data_size_interactive"]),
2361
+ submit_button: gr.update(value="🔬 Build & Submit Model", interactive=True),
2362
+ login_username: gr.update(visible=False),
2363
+ login_password: gr.update(visible=False),
2364
+ login_submit: gr.update(visible=False),
2365
+ login_error: gr.update(visible=False),
2366
+ attempts_tracker_display: gr.update(value=_build_attempts_tracker_html(submission_count)),
2367
+ was_preview_state: False,
2368
+ kpi_meta_state: exception_kpi_meta,
2369
+ last_seen_ts_state: None
2370
+ }
2371
+ yield error_updates
2372
+
2373
+
2374
+ def on_initial_load(username, token=None, team_name=""):
2375
+ """
2376
+ Updated to show "Welcome & CTA" if the SPECIFIC USER has 0 submissions,
2377
+ even if the leaderboard/team already has data from others.
2378
+ """
2379
+ initial_ui = compute_rank_settings(
2380
+ 0, DEFAULT_MODEL, 2, DEFAULT_FEATURE_SET, DEFAULT_DATA_SIZE
2381
+ )
2382
+
2383
+ # 1. Prepare the Welcome HTML
2384
+ # Translate team name to Spanish for display only (keep team_name in English for logic)
2385
+ display_team = translate_team_name_for_display(team_name, UI_TEAM_LANG) if team_name else "Tu Equipo"
2386
+
2387
+ welcome_html = f"""
2388
+ <div style='text-align:center; padding: 30px 20px;'>
2389
+ <div style='font-size: 3rem; margin-bottom: 10px;'>👋</div>
2390
+ <h3 style='margin: 0 0 8px 0; color: #111827; font-size: 1.5rem;'>¡Bienvenido a <b>{display_team}</b>!</h3>
2391
+ <p style='font-size: 1.1rem; color: #4b5563; margin: 0 0 20px 0;'>
2392
+ Tu equipo está esperando tu ayuda para mejorar la IA.
2393
+ </p>
2394
+
2395
+ <div style='background:#eff6ff; padding:16px; border-radius:12px; border:2px solid #bfdbfe; display:inline-block;'>
2396
+ <p style='margin:0; color:#1e40af; font-weight:bold; font-size:1.1rem;'>
2397
+ 👈 ¡Haz clic en el botón "Construir y enviar modelo" para comenzar a jugar!
2398
+ </p>
2399
+ </div>
2400
+ </div>
2401
+ """
2402
+
2403
+ # Check background init
2404
+ with INIT_LOCK:
2405
+ background_ready = INIT_FLAGS["leaderboard"]
2406
+
2407
+ should_attempt_fetch = background_ready or (token is not None)
2408
+ full_leaderboard_df = None
2409
+
2410
+ if should_attempt_fetch:
2411
+ try:
2412
+ if playground:
2413
+ full_leaderboard_df = _get_leaderboard_with_optional_token(playground, token)
2414
+ except Exception as e:
2415
+ print(f"Error on initial load fetch: {e}")
2416
+ full_leaderboard_df = None
2417
+
2418
+ # -------------------------------------------------------------------------
2419
+ # LOGIC UPDATE: Check if THIS user has submitted anything
2420
+ # -------------------------------------------------------------------------
2421
+ user_has_submitted = False
2422
+ if full_leaderboard_df is not None and not full_leaderboard_df.empty:
2423
+ if "username" in full_leaderboard_df.columns and username:
2424
+ # Check if the username exists in the dataframe
2425
+ user_has_submitted = username in full_leaderboard_df["username"].values
2426
+
2427
+ # Decision Logic
2428
+ if not user_has_submitted:
2429
+ # CASE 1: New User (or first time loading session) -> FORCE WELCOME
2430
+ # regardless of whether the leaderboard has other people's data.
2431
+ team_html = welcome_html
2432
+ individual_html = "<p style='text-align:center; color:#6b7280; padding-top:40px;'>Submit your model to see where you rank!</p>"
2433
+
2434
+ elif full_leaderboard_df is None or full_leaderboard_df.empty:
2435
+ # CASE 2: Returning user, but data fetch failed -> Show Skeleton
2436
+ team_html = _build_skeleton_leaderboard(rows=6, is_team=True)
2437
+ individual_html = _build_skeleton_leaderboard(rows=6, is_team=False)
2438
+
2439
+ else:
2440
+ # CASE 3: Returning user WITH data -> Show Real Tables
2441
+ try:
2442
+ team_html, individual_html, _, _, _, _ = generate_competitive_summary(
2443
+ full_leaderboard_df,
2444
+ team_name,
2445
+ username,
2446
+ 0, 0, -1
2447
+ )
2448
+ except Exception as e:
2449
+ print(f"Error generating summary HTML: {e}")
2450
+ team_html = "<p style='text-align:center; color:red; padding-top:20px;'>Error rendering leaderboard.</p>"
2451
+ individual_html = "<p style='text-align:center; color:red; padding-top:20px;'>Error rendering leaderboard.</p>"
2452
+
2453
+ return (
2454
+ get_model_card(DEFAULT_MODEL),
2455
+ team_html,
2456
+ individual_html,
2457
+ initial_ui["rank_message"],
2458
+ gr.update(choices=initial_ui["model_choices"], value=initial_ui["model_value"], interactive=initial_ui["model_interactive"]),
2459
+ gr.update(minimum=1, maximum=initial_ui["complexity_max"], value=initial_ui["complexity_value"]),
2460
+ gr.update(choices=initial_ui["feature_set_choices"], value=initial_ui["feature_set_value"], interactive=initial_ui["feature_set_interactive"]),
2461
+ gr.update(choices=initial_ui["data_size_choices"], value=initial_ui["data_size_value"], interactive=initial_ui["data_size_interactive"]),
2462
+ )
2463
+
2464
+
2465
+ # -------------------------------------------------------------------------
2466
+ # Conclusion helpers (dark/light mode aware)
2467
+ # -------------------------------------------------------------------------
2468
+ def build_final_conclusion_html(best_score, submissions, rank, first_score, feature_set):
2469
+ """
2470
+ Build the final conclusion HTML with performance summary.
2471
+ Colors are handled via CSS classes so that light/dark mode work correctly.
2472
+ """
2473
+ unlocked_tiers = min(3, max(0, submissions - 1)) # 0..3
2474
+ tier_names = ["Trainee", "Junior", "Senior", "Lead"]
2475
+ reached = tier_names[: unlocked_tiers + 1]
2476
+ tier_line = " → ".join([f"{t}{' ✅' if t in reached else ''}" for t in tier_names])
2477
+
2478
+ improvement = (best_score - first_score) if (first_score is not None and submissions > 1) else 0.0
2479
+ strong_predictors = {"age", "length_of_stay", "priors_count", "age_cat"}
2480
+ strong_used = [f for f in feature_set if f in strong_predictors]
2481
+
2482
+ ethical_note = (
2483
+ "You unlocked powerful predictors. Consider: Would removing demographic fields change fairness? "
2484
+ "In the next section we will begin to investigate this question further."
2485
+ )
2486
+
2487
+ # Tailor message for very few submissions
2488
+ tip_html = ""
2489
+ if submissions < 2:
2490
+ tip_html = """
2491
+ <div class="final-conclusion-tip">
2492
+ <b>Tip:</b> Try at least 2–3 submissions changing ONE setting at a time to see clear cause/effect.
2493
+ </div>
2494
+ """
2495
+
2496
+ # Add note if user reached the attempt cap
2497
+ attempt_cap_html = ""
2498
+ if submissions >= ATTEMPT_LIMIT:
2499
+ attempt_cap_html = f"""
2500
+ <div class="final-conclusion-attempt-cap">
2501
+ <p style="margin:0;">
2502
+ <b>📊 Attempt Limit Reached:</b> You used all {ATTEMPT_LIMIT} allowed submission attempts for this session.
2503
+ We will open up submissions again after you complete some new activities next.
2504
+ </p>
2505
+ </div>
2506
+ """
2507
+
2508
+ return f"""
2509
+ <div class="final-conclusion-root">
2510
+ <h1 class="final-conclusion-title">🎉 Engineering Phase Complete</h1>
2511
+ <div class="final-conclusion-card">
2512
+ <h2 class="final-conclusion-subtitle">Your Performance Snapshot</h2>
2513
+ <ul class="final-conclusion-list">
2514
+ <li>🏁 <b>Best Accuracy:</b> {(best_score * 100):.2f}%</li>
2515
+ <li>📊 <b>Rank Achieved:</b> {('#' + str(rank)) if rank > 0 else '—'}</li>
2516
+ <li>🔁 <b>Submissions Made This Session:</b> {submissions}{' / ' + str(ATTEMPT_LIMIT) if submissions >= ATTEMPT_LIMIT else ''}</li>
2517
+ <li>🧗 <b>Improvement Over First Score This Session:</b> {(improvement * 100):+.2f}</li>
2518
+ <li>🎖️ <b>Tier Progress:</b> {tier_line}</li>
2519
+ <li>🧪 <b>Strong Predictors Used:</b> {len(strong_used)} ({', '.join(strong_used) if strong_used else 'None yet'})</li>
2520
+ </ul>
2521
+
2522
+ {tip_html}
2523
+
2524
+ <div class="final-conclusion-ethics">
2525
+ <p style="margin:0;"><b>Ethical Reflection:</b> {ethical_note}</p>
2526
+ </div>
2527
+
2528
+ {attempt_cap_html}
2529
+
2530
+ <hr class="final-conclusion-divider" />
2531
+
2532
+ <div class="final-conclusion-next">
2533
+ <h2>➡️ Next: Real-World Consequences</h2>
2534
+ <p>Scroll below this app to continue. You'll examine how models like yours shape judicial outcomes.</p>
2535
+ <h1 class="final-conclusion-scroll">👇 SCROLL DOWN 👇</h1>
2536
+ </div>
2537
+ </div>
2538
+ </div>
2539
+ """
2540
+
2541
+
2542
+
2543
+ def build_conclusion_from_state(best_score, submissions, rank, first_score, feature_set):
2544
+ return build_final_conclusion_html(best_score, submissions, rank, first_score, feature_set)
2545
+ def create_model_building_game_es_app(theme_primary_hue: str = "indigo") -> "gr.Blocks":
2546
+ """
2547
+ Create (but do not launch) the model building game app.
2548
+ """
2549
+ start_background_init()
2550
+
2551
+ # Add missing globals (FIX)
2552
+ global submit_button, submission_feedback_display, team_leaderboard_display
2553
+ global individual_leaderboard_display, last_submission_score_state, last_rank_state
2554
+ global best_score_state, submission_count_state, first_submission_score_state
2555
+ global rank_message_display, model_type_radio, complexity_slider
2556
+ global feature_set_checkbox, data_size_radio
2557
+ global login_username, login_password, login_submit, login_error
2558
+ global attempts_tracker_display, team_name_state
2559
+ global username_state, token_state # <-- Added
2560
+ global readiness_state, was_preview_state, kpi_meta_state # <-- Added for parameter shadowing guards
2561
+ global last_seen_ts_state # <-- Added for timestamp tracking
2562
+
2563
+ css = """
2564
+ /* ------------------------------
2565
+ Shared Design Tokens (local)
2566
+ ------------------------------ */
2567
+
2568
+ /* We keep everything driven by Gradio theme vars:
2569
+ --body-background-fill, --body-text-color, --secondary-text-color,
2570
+ --border-color-primary, --block-background-fill, --color-accent,
2571
+ --shadow-drop, --prose-background-fill
2572
+ */
2573
+
2574
+ :root {
2575
+ --slide-radius-md: 12px;
2576
+ --slide-radius-lg: 16px;
2577
+ --slide-radius-xl: 18px;
2578
+ --slide-spacing-lg: 24px;
2579
+
2580
+ /* Local, non-brand tokens built *on top of* theme vars */
2581
+ --card-bg-soft: var(--block-background-fill);
2582
+ --card-bg-strong: var(--prose-background-fill, var(--block-background-fill));
2583
+ --card-border-subtle: var(--border-color-primary);
2584
+ --accent-strong: var(--color-accent);
2585
+ --text-main: var(--body-text-color);
2586
+ --text-muted: var(--secondary-text-color);
2587
+ }
2588
+
2589
+ /* ------------------------------------------------------------------
2590
+ Base Layout Helpers
2591
+ ------------------------------------------------------------------ */
2592
+
2593
+ .slide-content {
2594
+ max-width: 900px;
2595
+ margin-left: auto;
2596
+ margin-right: auto;
2597
+ }
2598
+
2599
+ /* Shared card-like panels used throughout slides */
2600
+ .panel-box {
2601
+ background: var(--card-bg-soft);
2602
+ padding: 20px;
2603
+ border-radius: var(--slide-radius-lg);
2604
+ border: 2px solid var(--card-border-subtle);
2605
+ margin-bottom: 18px;
2606
+ color: var(--text-main);
2607
+ box-shadow: var(--shadow-drop, 0 2px 4px rgba(0,0,0,0.04));
2608
+ }
2609
+
2610
+ .leaderboard-box {
2611
+ background: var(--card-bg-soft);
2612
+ padding: 20px;
2613
+ border-radius: var(--slide-radius-lg);
2614
+ border: 1px solid var(--card-border-subtle);
2615
+ margin-top: 12px;
2616
+ color: var(--text-main);
2617
+ }
2618
+
2619
+ /* For “explanatory UI” scaffolding */
2620
+ .mock-ui-box {
2621
+ background: var(--card-bg-strong);
2622
+ border: 2px solid var(--card-border-subtle);
2623
+ padding: 24px;
2624
+ border-radius: var(--slide-radius-lg);
2625
+ color: var(--text-main);
2626
+ }
2627
+
2628
+ .mock-ui-inner {
2629
+ background: var(--block-background-fill);
2630
+ border: 1px solid var(--card-border-subtle);
2631
+ padding: 24px;
2632
+ border-radius: var(--slide-radius-md);
2633
+ }
2634
+
2635
+ /* “Control box” inside the mock UI */
2636
+ .mock-ui-control-box {
2637
+ padding: 12px;
2638
+ background: var(--block-background-fill);
2639
+ border-radius: 8px;
2640
+ border: 1px solid var(--card-border-subtle);
2641
+ }
2642
+
2643
+ /* Little radio / check icons */
2644
+ .mock-ui-radio-on {
2645
+ font-size: 1.5rem;
2646
+ vertical-align: middle;
2647
+ color: var(--accent-strong);
2648
+ }
2649
+
2650
+ .mock-ui-radio-off {
2651
+ font-size: 1.5rem;
2652
+ vertical-align: middle;
2653
+ color: var(--text-muted);
2654
+ }
2655
+
2656
+ .mock-ui-slider-text {
2657
+ font-size: 1.5rem;
2658
+ margin: 0;
2659
+ color: var(--accent-strong);
2660
+ letter-spacing: 4px;
2661
+ }
2662
+
2663
+ .mock-ui-slider-bar {
2664
+ color: var(--text-muted);
2665
+ }
2666
+
2667
+ /* Simple mock button representation */
2668
+ .mock-button {
2669
+ width: 100%;
2670
+ font-size: 1.25rem;
2671
+ font-weight: 600;
2672
+ padding: 16px 24px;
2673
+ background-color: var(--accent-strong);
2674
+ color: var(--body-background-fill);
2675
+ border: none;
2676
+ border-radius: 8px;
2677
+ cursor: not-allowed;
2678
+ }
2679
+
2680
+ /* Step visuals on slides */
2681
+ .step-visual {
2682
+ display: flex;
2683
+ flex-wrap: wrap;
2684
+ justify-content: space-around;
2685
+ align-items: center;
2686
+ margin: 24px 0;
2687
+ text-align: center;
2688
+ font-size: 1rem;
2689
+ }
2690
+
2691
+ .step-visual-box {
2692
+ padding: 16px;
2693
+ background: var(--block-background-fill); /* ✅ theme-aware */
2694
+ border-radius: 8px;
2695
+ border: 2px solid var(--border-color-primary);
2696
+ margin: 5px;
2697
+ color: var(--body-text-color); /* optional, safe */
2698
+ }
2699
+
2700
+ .step-visual-arrow {
2701
+ font-size: 2rem;
2702
+ margin: 5px;
2703
+ /* no explicit color – inherit from theme or override in dark mode */
2704
+ }
2705
+
2706
+ /* ------------------------------------------------------------------
2707
+ KPI Card (score feedback)
2708
+ ------------------------------------------------------------------ */
2709
+
2710
+ .kpi-card {
2711
+ background: var(--card-bg-strong);
2712
+ border: 2px solid var(--accent-strong);
2713
+ padding: 24px;
2714
+ border-radius: var(--slide-radius-lg);
2715
+ text-align: center;
2716
+ max-width: 600px;
2717
+ margin: auto;
2718
+ color: var(--text-main);
2719
+ box-shadow: var(--shadow-drop, 0 4px 6px -1px rgba(0,0,0,0.08));
2720
+ min-height: 200px; /* prevent layout shift */
2721
+ }
2722
+
2723
+ .kpi-card-body {
2724
+ display: flex;
2725
+ flex-wrap: wrap;
2726
+ justify-content: space-around;
2727
+ align-items: flex-end;
2728
+ margin-top: 24px;
2729
+ }
2730
+
2731
+ .kpi-metric-box {
2732
+ min-width: 150px;
2733
+ margin: 10px;
2734
+ }
2735
+
2736
+ .kpi-label {
2737
+ font-size: 1rem;
2738
+ color: var(--text-muted);
2739
+ margin: 0;
2740
+ }
2741
+
2742
+ .kpi-score {
2743
+ font-size: 3rem;
2744
+ font-weight: 700;
2745
+ margin: 0;
2746
+ line-height: 1.1;
2747
+ color: var(--accent-strong);
2748
+ }
2749
+
2750
+ .kpi-subtext-muted {
2751
+ font-size: 1.2rem;
2752
+ font-weight: 500;
2753
+ color: var(--text-muted);
2754
+ margin: 0;
2755
+ padding-top: 8px;
2756
+ }
2757
+
2758
+ /* Small variants to hint semantic state without hard-coded colors */
2759
+ .kpi-card--neutral {
2760
+ border-color: var(--card-border-subtle);
2761
+ }
2762
+
2763
+ .kpi-card--subtle-accent {
2764
+ border-color: var(--accent-strong);
2765
+ }
2766
+
2767
+ .kpi-score--muted {
2768
+ color: var(--text-muted);
2769
+ }
2770
+
2771
+ /* ------------------------------------------------------------------
2772
+ Leaderboard Table + Placeholder
2773
+ ------------------------------------------------------------------ */
2774
+
2775
+ .leaderboard-html-table {
2776
+ width: 100%;
2777
+ border-collapse: collapse;
2778
+ text-align: left;
2779
+ font-size: 1rem;
2780
+ color: var(--text-main);
2781
+ min-height: 300px; /* Stable height */
2782
+ }
2783
+
2784
+ .leaderboard-html-table thead {
2785
+ background: var(--block-background-fill);
2786
+ }
2787
+
2788
+ .leaderboard-html-table th {
2789
+ padding: 12px 16px;
2790
+ font-size: 0.9rem;
2791
+ color: var(--text-muted);
2792
+ font-weight: 500;
2793
+ }
2794
+
2795
+ .leaderboard-html-table tbody tr {
2796
+ border-bottom: 1px solid var(--card-border-subtle);
2797
+ }
2798
+
2799
+ .leaderboard-html-table td {
2800
+ padding: 12px 16px;
2801
+ }
2802
+
2803
+ .leaderboard-html-table .user-row-highlight {
2804
+ background: rgba( var(--color-accent-rgb, 59,130,246), 0.1 );
2805
+ font-weight: 600;
2806
+ color: var(--accent-strong);
2807
+ }
2808
+
2809
+ /* Static placeholder (no shimmer, no animation) */
2810
+ .lb-placeholder {
2811
+ min-height: 300px;
2812
+ display: flex;
2813
+ flex-direction: column;
2814
+ align-items: center;
2815
+ justify-content: center;
2816
+ background: var(--block-background-fill);
2817
+ border: 1px solid var(--card-border-subtle);
2818
+ border-radius: 12px;
2819
+ padding: 40px 20px;
2820
+ text-align: center;
2821
+ }
2822
+
2823
+ .lb-placeholder-title {
2824
+ font-size: 1.25rem;
2825
+ font-weight: 500;
2826
+ color: var(--text-muted);
2827
+ margin-bottom: 8px;
2828
+ }
2829
+
2830
+ .lb-placeholder-sub {
2831
+ font-size: 1rem;
2832
+ color: var(--text-muted);
2833
+ }
2834
+
2835
+ /* ------------------------------------------------------------------
2836
+ Processing / “Experiment running” status
2837
+ ------------------------------------------------------------------ */
2838
+
2839
+ .processing-status {
2840
+ background: var(--block-background-fill);
2841
+ border: 2px solid var(--accent-strong);
2842
+ border-radius: 16px;
2843
+ padding: 30px;
2844
+ text-align: center;
2845
+ box-shadow: var(--shadow-drop, 0 4px 6px rgba(0,0,0,0.12));
2846
+ animation: pulse-indigo 2s infinite;
2847
+ color: var(--text-main);
2848
+ }
2849
+
2850
+ .processing-icon {
2851
+ font-size: 4rem;
2852
+ margin-bottom: 10px;
2853
+ display: block;
2854
+ animation: spin-slow 3s linear infinite;
2855
+ }
2856
+
2857
+ .processing-text {
2858
+ font-size: 1.5rem;
2859
+ font-weight: 700;
2860
+ color: var(--accent-strong);
2861
+ }
2862
+
2863
+ .processing-subtext {
2864
+ font-size: 1.1rem;
2865
+ color: var(--text-muted);
2866
+ margin-top: 8px;
2867
+ }
2868
+
2869
+ /* Pulse & spin animations */
2870
+ @keyframes pulse-indigo {
2871
+ 0% { box-shadow: 0 0 0 0 rgba(99, 102, 241, 0.4); }
2872
+ 70% { box-shadow: 0 0 0 15px rgba(99, 102, 241, 0); }
2873
+ 100% { box-shadow: 0 0 0 0 rgba(99, 102, 241, 0); }
2874
+ }
2875
+
2876
+ @keyframes spin-slow {
2877
+ from { transform: rotate(0deg); }
2878
+ to { transform: rotate(360deg); }
2879
+ }
2880
+
2881
+ /* Conclusion arrow pulse */
2882
+ @keyframes pulseArrow {
2883
+ 0% { transform: scale(1); opacity: 1; }
2884
+ 50% { transform: scale(1.08); opacity: 0.85; }
2885
+ 100% { transform: scale(1); opacity: 1; }
2886
+ }
2887
+
2888
+ @media (prefers-reduced-motion: reduce) {
2889
+ [style*='pulseArrow'] {
2890
+ animation: none !important;
2891
+ }
2892
+ .processing-status,
2893
+ .processing-icon {
2894
+ animation: none !important;
2895
+ }
2896
+ }
2897
+
2898
+ /* ------------------------------------------------------------------
2899
+ Attempts Tracker + Init Banner + Alerts
2900
+ ------------------------------------------------------------------ */
2901
+
2902
+ .init-banner {
2903
+ background: var(--card-bg-strong);
2904
+ padding: 12px;
2905
+ border-radius: 8px;
2906
+ text-align: center;
2907
+ margin-bottom: 16px;
2908
+ border: 1px solid var(--card-border-subtle);
2909
+ color: var(--text-main);
2910
+ }
2911
+
2912
+ .init-banner__text {
2913
+ margin: 0;
2914
+ font-weight: 500;
2915
+ color: var(--text-muted);
2916
+ }
2917
+
2918
+ /* Attempts tracker shell */
2919
+ .attempts-tracker {
2920
+ text-align: center;
2921
+ padding: 8px;
2922
+ margin: 8px 0;
2923
+ background: var(--block-background-fill);
2924
+ border-radius: 8px;
2925
+ border: 1px solid var(--card-border-subtle);
2926
+ }
2927
+
2928
+ .attempts-tracker__text {
2929
+ margin: 0;
2930
+ font-weight: 600;
2931
+ font-size: 1rem;
2932
+ color: var(--accent-strong);
2933
+ }
2934
+
2935
+ /* Limit reached variant – we *still* stick to theme colors */
2936
+ .attempts-tracker--limit .attempts-tracker__text {
2937
+ color: var(--text-main);
2938
+ }
2939
+
2940
+ /* Generic alert helpers used in inline login messages */
2941
+ .alert {
2942
+ padding: 12px 16px;
2943
+ border-radius: 8px;
2944
+ margin-top: 12px;
2945
+ text-align: left;
2946
+ font-size: 0.95rem;
2947
+ }
2948
+
2949
+ .alert--error {
2950
+ border-left: 4px solid var(--accent-strong);
2951
+ background: var(--block-background-fill);
2952
+ color: var(--text-main);
2953
+ }
2954
+
2955
+ .alert--success {
2956
+ border-left: 4px solid var(--accent-strong);
2957
+ background: var(--block-background-fill);
2958
+ color: var(--text-main);
2959
+ }
2960
+
2961
+ .alert__title {
2962
+ margin: 0;
2963
+ font-weight: 600;
2964
+ color: var(--text-main);
2965
+ }
2966
+
2967
+ .alert__body {
2968
+ margin: 8px 0 0 0;
2969
+ color: var(--text-muted);
2970
+ }
2971
+
2972
+ /* ------------------------------------------------------------------
2973
+ Navigation Loading Overlay
2974
+ ------------------------------------------------------------------ */
2975
+
2976
+ #nav-loading-overlay {
2977
+ position: fixed;
2978
+ top: 0;
2979
+ left: 0;
2980
+ width: 100%;
2981
+ height: 100%;
2982
+ background: color-mix(in srgb, var(--body-background-fill) 90%, transparent);
2983
+ z-index: 9999;
2984
+ display: none;
2985
+ flex-direction: column;
2986
+ align-items: center;
2987
+ justify-content: center;
2988
+ opacity: 0;
2989
+ transition: opacity 0.3s ease;
2990
+ }
2991
+
2992
+ .nav-spinner {
2993
+ width: 50px;
2994
+ height: 50px;
2995
+ border: 5px solid var(--card-border-subtle);
2996
+ border-top: 5px solid var(--accent-strong);
2997
+ border-radius: 50%;
2998
+ animation: nav-spin 1s linear infinite;
2999
+ margin-bottom: 20px;
3000
+ }
3001
+
3002
+ @keyframes nav-spin {
3003
+ 0% { transform: rotate(0deg); }
3004
+ 100% { transform: rotate(360deg); }
3005
+ }
3006
+
3007
+ #nav-loading-text {
3008
+ font-size: 1.3rem;
3009
+ font-weight: 600;
3010
+ color: var(--accent-strong);
3011
+ }
3012
+
3013
+ /* ------------------------------------------------------------------
3014
+ Utility: Image inversion for dark mode (if needed)
3015
+ ------------------------------------------------------------------ */
3016
+
3017
+ .dark-invert-image {
3018
+ filter: invert(0);
3019
+ }
3020
+
3021
+ @media (prefers-color-scheme: dark) {
3022
+ .dark-invert-image {
3023
+ filter: invert(1) hue-rotate(180deg);
3024
+ }
3025
+ }
3026
+
3027
+ /* ------------------------------------------------------------------
3028
+ Dark Mode Specific Fine Tuning
3029
+ ------------------------------------------------------------------ */
3030
+
3031
+ @media (prefers-color-scheme: dark) {
3032
+ .panel-box,
3033
+ .leaderboard-box,
3034
+ .mock-ui-box,
3035
+ .mock-ui-inner,
3036
+ .processing-status,
3037
+ .kpi-card {
3038
+ background: color-mix(in srgb, var(--block-background-fill) 85%, #000 15%);
3039
+ border-color: color-mix(in srgb, var(--card-border-subtle) 70%, var(--accent-strong) 30%);
3040
+ }
3041
+
3042
+ .leaderboard-html-table thead {
3043
+ background: color-mix(in srgb, var(--block-background-fill) 75%, #000 25%);
3044
+ }
3045
+
3046
+ .lb-placeholder {
3047
+ background: color-mix(in srgb, var(--block-background-fill) 75%, #000 25%);
3048
+ }
3049
+
3050
+ #nav-loading-overlay {
3051
+ background: color-mix(in srgb, #000 70%, var(--body-background-fill) 30%);
3052
+ }
3053
+ }
3054
+
3055
+ /* ---------- Conclusion Card Theme Tokens ---------- */
3056
+
3057
+ /* Light theme defaults */
3058
+ :root,
3059
+ :root[data-theme="light"] {
3060
+ --conclusion-card-bg: #e0f2fe; /* light sky */
3061
+ --conclusion-card-border: #0369a1; /* sky-700 */
3062
+ --conclusion-card-fg: #0f172a; /* slate-900 */
3063
+
3064
+ --conclusion-tip-bg: #fef9c3; /* amber-100 */
3065
+ --conclusion-tip-border: #f59e0b; /* amber-500 */
3066
+ --conclusion-tip-fg: #713f12; /* amber-900 */
3067
+
3068
+ --conclusion-ethics-bg: #fef2f2; /* red-50 */
3069
+ --conclusion-ethics-border: #ef4444; /* red-500 */
3070
+ --conclusion-ethics-fg: #7f1d1d; /* red-900 */
3071
+
3072
+ --conclusion-attempt-bg: #fee2e2; /* red-100 */
3073
+ --conclusion-attempt-border: #ef4444; /* red-500 */
3074
+ --conclusion-attempt-fg: #7f1d1d; /* red-900 */
3075
+
3076
+ --conclusion-next-fg: #0f172a; /* main text color */
3077
+ }
3078
+
3079
+ /* Dark theme overrides – keep contrast high on dark background */
3080
+ [data-theme="dark"] {
3081
+ --conclusion-card-bg: #020617; /* slate-950 */
3082
+ --conclusion-card-border: #38bdf8; /* sky-400 */
3083
+ --conclusion-card-fg: #e5e7eb; /* slate-200 */
3084
+
3085
+ --conclusion-tip-bg: rgba(250, 204, 21, 0.08); /* soft amber tint */
3086
+ --conclusion-tip-border: #facc15; /* amber-400 */
3087
+ --conclusion-tip-fg: #facc15;
3088
+
3089
+ --conclusion-ethics-bg: rgba(248, 113, 113, 0.10); /* soft red tint */
3090
+ --conclusion-ethics-border: #f97373; /* red-ish */
3091
+ --conclusion-ethics-fg: #fecaca;
3092
+
3093
+ --conclusion-attempt-bg: rgba(248, 113, 113, 0.16);
3094
+ --conclusion-attempt-border: #f97373;
3095
+ --conclusion-attempt-fg: #fee2e2;
3096
+
3097
+ --conclusion-next-fg: #e5e7eb;
3098
+ }
3099
+
3100
+ /* ---------- Conclusion Layout ---------- */
3101
+
3102
+ .app-conclusion-wrapper {
3103
+ text-align: center;
3104
+ }
3105
+
3106
+ .app-conclusion-title {
3107
+ font-size: 2.4rem;
3108
+ margin: 0;
3109
+ }
3110
+
3111
+ .app-conclusion-card {
3112
+ margin-top: 24px;
3113
+ max-width: 950px;
3114
+ margin-left: auto;
3115
+ margin-right: auto;
3116
+ padding: 28px;
3117
+ border-radius: 18px;
3118
+ border-width: 3px;
3119
+ border-style: solid;
3120
+ background: var(--conclusion-card-bg);
3121
+ border-color: var(--conclusion-card-border);
3122
+ color: var(--conclusion-card-fg);
3123
+ box-shadow: 0 20px 40px rgba(15, 23, 42, 0.25);
3124
+ }
3125
+
3126
+ .app-conclusion-subtitle {
3127
+ margin-top: 0;
3128
+ font-size: 1.5rem;
3129
+ }
3130
+
3131
+ .app-conclusion-metrics {
3132
+ list-style: none;
3133
+ padding: 0;
3134
+ font-size: 1.05rem;
3135
+ text-align: left;
3136
+ max-width: 640px;
3137
+ margin: 20px auto;
3138
+ }
3139
+
3140
+ /* ---------- Generic panel helpers reused here ---------- */
3141
+
3142
+ .app-panel-tip,
3143
+ .app-panel-critical,
3144
+ .app-panel-warning {
3145
+ padding: 16px;
3146
+ border-radius: 12px;
3147
+ border-left-width: 6px;
3148
+ border-left-style: solid;
3149
+ text-align: left;
3150
+ font-size: 0.98rem;
3151
+ line-height: 1.4;
3152
+ margin-top: 16px;
3153
+ }
3154
+
3155
+ .app-panel-title {
3156
+ margin: 0 0 4px 0;
3157
+ font-weight: 700;
3158
+ }
3159
+
3160
+ .app-panel-body {
3161
+ margin: 0;
3162
+ }
3163
+
3164
+ /* Specific variants */
3165
+
3166
+ .app-conclusion-tip.app-panel-tip {
3167
+ background: var(--conclusion-tip-bg);
3168
+ border-left-color: var(--conclusion-tip-border);
3169
+ color: var(--conclusion-tip-fg);
3170
+ }
3171
+
3172
+ .app-conclusion-ethics.app-panel-critical {
3173
+ background: var(--conclusion-ethics-bg);
3174
+ border-left-color: var(--conclusion-ethics-border);
3175
+ color: var(--conclusion-ethics-fg);
3176
+ }
3177
+
3178
+ .app-conclusion-attempt-cap.app-panel-warning {
3179
+ background: var(--conclusion-attempt-bg);
3180
+ border-left-color: var(--conclusion-attempt-border);
3181
+ color: var(--conclusion-attempt-fg);
3182
+ }
3183
+
3184
+ /* Divider + next section */
3185
+
3186
+ .app-conclusion-divider {
3187
+ margin: 28px 0;
3188
+ border: 0;
3189
+ border-top: 2px solid rgba(148, 163, 184, 0.8); /* slate-400-ish */
3190
+ }
3191
+
3192
+ .app-conclusion-next-title {
3193
+ margin: 0;
3194
+ color: var(--conclusion-next-fg);
3195
+ }
3196
+
3197
+ .app-conclusion-next-body {
3198
+ font-size: 1rem;
3199
+ color: var(--conclusion-next-fg);
3200
+ }
3201
+
3202
+ /* Arrow inherits the same color, keeps pulse animation defined earlier */
3203
+ .app-conclusion-arrow {
3204
+ margin: 12px 0;
3205
+ font-size: 3rem;
3206
+ animation: pulseArrow 2.5s infinite;
3207
+ color: var(--conclusion-next-fg);
3208
+ }
3209
+
3210
+ /* ---------------------------------------------------- */
3211
+ /* Final Conclusion Slide (Light Mode Defaults) */
3212
+ /* ---------------------------------------------------- */
3213
+
3214
+ .final-conclusion-root {
3215
+ text-align: center;
3216
+ color: var(--body-text-color);
3217
+ }
3218
+
3219
+ .final-conclusion-title {
3220
+ font-size: 2.4rem;
3221
+ margin: 0;
3222
+ }
3223
+
3224
+ .final-conclusion-card {
3225
+ background-color: var(--block-background-fill);
3226
+ color: var(--body-text-color);
3227
+ padding: 28px;
3228
+ border-radius: 18px;
3229
+ border: 2px solid var(--border-color-primary);
3230
+ margin-top: 24px;
3231
+ max-width: 950px;
3232
+ margin-left: auto;
3233
+ margin-right: auto;
3234
+ box-shadow: var(--shadow-drop, 0 4px 10px rgba(15, 23, 42, 0.08));
3235
+ }
3236
+
3237
+ .final-conclusion-subtitle {
3238
+ margin-top: 0;
3239
+ margin-bottom: 8px;
3240
+ }
3241
+
3242
+ .final-conclusion-list {
3243
+ list-style: none;
3244
+ padding: 0;
3245
+ font-size: 1.05rem;
3246
+ text-align: left;
3247
+ max-width: 640px;
3248
+ margin: 20px auto;
3249
+ }
3250
+
3251
+ .final-conclusion-list li {
3252
+ margin: 4px 0;
3253
+ }
3254
+
3255
+ .final-conclusion-tip {
3256
+ margin-top: 16px;
3257
+ padding: 16px;
3258
+ border-radius: 12px;
3259
+ border-left: 6px solid var(--color-accent);
3260
+ background-color: color-mix(in srgb, var(--color-accent) 12%, transparent);
3261
+ text-align: left;
3262
+ font-size: 0.98rem;
3263
+ line-height: 1.4;
3264
+ }
3265
+
3266
+ .final-conclusion-ethics {
3267
+ margin-top: 16px;
3268
+ padding: 18px;
3269
+ border-radius: 12px;
3270
+ border-left: 6px solid #ef4444;
3271
+ background-color: color-mix(in srgb, #ef4444 10%, transparent);
3272
+ text-align: left;
3273
+ font-size: 0.98rem;
3274
+ line-height: 1.4;
3275
+ }
3276
+
3277
+ .final-conclusion-attempt-cap {
3278
+ margin-top: 16px;
3279
+ padding: 16px;
3280
+ border-radius: 12px;
3281
+ border-left: 6px solid #ef4444;
3282
+ background-color: color-mix(in srgb, #ef4444 16%, transparent);
3283
+ text-align: left;
3284
+ font-size: 0.98rem;
3285
+ line-height: 1.4;
3286
+ }
3287
+
3288
+ .final-conclusion-divider {
3289
+ margin: 28px 0;
3290
+ border: 0;
3291
+ border-top: 2px solid var(--border-color-primary);
3292
+ }
3293
+
3294
+ .final-conclusion-next h2 {
3295
+ margin: 0;
3296
+ }
3297
+
3298
+ .final-conclusion-next p {
3299
+ font-size: 1rem;
3300
+ margin-top: 4px;
3301
+ margin-bottom: 0;
3302
+ }
3303
+
3304
+ .final-conclusion-scroll {
3305
+ margin: 12px 0 0 0;
3306
+ font-size: 3rem;
3307
+ animation: pulseArrow 2.5s infinite;
3308
+ }
3309
+
3310
+ /* ---------------------------------------------------- */
3311
+ /* Dark Mode Overrides for Final Slide */
3312
+ /* ---------------------------------------------------- */
3313
+
3314
+ @media (prefers-color-scheme: dark) {
3315
+ .final-conclusion-card {
3316
+ background-color: #0b1120; /* deep slate */
3317
+ color: white; /* 100% contrast confidence */
3318
+ border-color: #38bdf8;
3319
+ box-shadow: none;
3320
+ }
3321
+
3322
+ .final-conclusion-tip {
3323
+ background-color: rgba(56, 189, 248, 0.18);
3324
+ }
3325
+
3326
+ .final-conclusion-ethics {
3327
+ background-color: rgba(248, 113, 113, 0.18);
3328
+ }
3329
+
3330
+ .final-conclusion-attempt-cap {
3331
+ background-color: rgba(248, 113, 113, 0.26);
3332
+ }
3333
+ }
3334
+ /* ---------------------------------------------------- */
3335
+ /* Slide 3: INPUT → MODEL → OUTPUT flow (theme-aware) */
3336
+ /* ---------------------------------------------------- */
3337
+
3338
+
3339
+ .model-flow {
3340
+ text-align: center;
3341
+ font-weight: 600;
3342
+ font-size: 1.2rem;
3343
+ margin: 20px 0;
3344
+ /* No explicit color – inherit from the card */
3345
+ }
3346
+
3347
+ .model-flow-label {
3348
+ padding: 0 0.1rem;
3349
+ /* No explicit color – inherit */
3350
+ }
3351
+
3352
+ .model-flow-arrow {
3353
+ margin: 0 0.35rem;
3354
+ font-size: 1.4rem;
3355
+ /* No explicit color – inherit */
3356
+ }
3357
+
3358
+ @media (prefers-color-scheme: dark) {
3359
+ .model-flow {
3360
+ color: var(--body-text-color);
3361
+ }
3362
+ .model-flow-arrow {
3363
+ /* In dark mode, nudge arrows toward accent for contrast/confidence */
3364
+ color: color-mix(in srgb, var(--color-accent) 75%, var(--body-text-color) 25%);
3365
+ }
3366
+ }
3367
+ """
3368
+
3369
+
3370
+ # Define globals for yield
3371
+ global submit_button, submission_feedback_display, team_leaderboard_display
3372
+ # --- THIS IS THE FIXED LINE ---
3373
+ global individual_leaderboard_display, last_submission_score_state, last_rank_state, best_score_state, submission_count_state, first_submission_score_state
3374
+ # --- END OF FIX ---
3375
+ global rank_message_display, model_type_radio, complexity_slider
3376
+ global feature_set_checkbox, data_size_radio
3377
+ global login_username, login_password, login_submit, login_error
3378
+ global attempts_tracker_display, team_name_state
3379
+
3380
+ with gr.Blocks(theme=gr.themes.Soft(primary_hue="indigo"), css=css) as demo:
3381
+ # Persistent top anchor for scroll-to-top navigation
3382
+ gr.HTML("<div id='app_top_anchor' style='height:0;'></div>")
3383
+
3384
+ # Navigation loading overlay with spinner and dynamic message
3385
+ gr.HTML("""
3386
+ <div id='nav-loading-overlay'>
3387
+ <div class='nav-spinner'></div>
3388
+ <span id='nav-loading-text'>Loading...</span>
3389
+ </div>
3390
+ """)
3391
+
3392
+ # Concurrency Note: Do NOT read per-user state from os.environ here.
3393
+ # Username and other per-user data are managed via gr.State objects
3394
+ # and populated during handle_load_with_session_auth.
3395
+
3396
+ # Loading screen
3397
+ with gr.Column(visible=False) as loading_screen:
3398
+ gr.Markdown(
3399
+ """
3400
+ <div style='text-align:center; padding:100px 0;'>
3401
+ <h2 style='font-size:2rem; color:#6b7280;'>⏳ Loading...</h2>
3402
+ </div>
3403
+ """
3404
+ )
3405
+
3406
+ # --- Briefing Slideshow (Updated with New Cards) ---
3407
+
3408
+ # Slide 1: From Understanding to Building (Retained as transition)
3409
+ with gr.Column(visible=True, elem_id="slide-1") as briefing_slide_1:
3410
+ gr.Markdown("<h1 style='text-align:center;'>🔄 De la teoria a la práctica</h1>")
3411
+ gr.HTML(
3412
+ """
3413
+ <div class='slide-content'>
3414
+ <div class='panel-box'>
3415
+ <h3 style='font-size: 1.5rem; text-align:center; margin-top:0;'>¡Buen trabajo! Ahora ya has conseguido:</h3>
3416
+
3417
+ <ul style='list-style: none; padding-left: 0; margin-top: 24px; margin-bottom: 24px;'>
3418
+ <li style='font-size: 1.1rem; font-weight: 500; margin-bottom: 12px;'>
3419
+ <span style='font-size: 1.5rem; vertical-align: middle;'>✅</span>
3420
+ Tomar decisiones difíciles como juez o jueza utilizando predicciones de IA
3421
+ </li>
3422
+ <li style='font-size: 1.1rem; font-weight: 500; margin-bottom: 12px;'>
3423
+ <span style='font-size: 1.5rem; vertical-align: middle;'>✅</span>
3424
+ Aprender qué son los falsos positivos (falsas alarmas) y los falsos negativos (alertas ignoradas)
3425
+ </li>
3426
+ <li style='font-size: 1.1rem; font-weight: 500; margin-bottom: 12px;'>
3427
+ <span style='font-size: 1.5rem; vertical-align: middle;'>✅</span>
3428
+ Entender los principios básicos de cómo funciona la IA:
3429
+ </li>
3430
+ </ul>
3431
+
3432
+ <div style='background:white; padding:16px; border-radius:12px; margin:12px 0; text-align:center;'>
3433
+ <div style='display:inline-block; background:#dbeafe; padding:12px 16px; border-radius:8px; margin:4px;'>
3434
+ <h3 style='margin:0; color:#0369a1;'>ENTRADA</h3>
3435
+ </div>
3436
+ <div style='display:inline-block; font-size:1.5rem; margin:0 8px; color:#6b7280;'>→</div>
3437
+ <div style='display:inline-block; background:#fef3c7; padding:12px 16px; border-radius:8px; margin:4px;'>
3438
+ <h3 style='margin:0; color:#92400e;'>MODELO</h3>
3439
+ </div>
3440
+ <div style='display:inline-block; font-size:1.5rem; margin:0 8px; color:#6b7280;'>→</div>
3441
+ <div style='display:inline-block; background:#f0fdf4; padding:12px 16px; border-radius:8px; margin:4px;'>
3442
+ <h3 style='margin:0; color:#15803d;'>RESULTADO</h3>
3443
+ </div>
3444
+ </div>
3445
+
3446
+ <hr style='margin: 24px 0; border-top: 2px solid #c7d2fe;'>
3447
+
3448
+ <h3 style='font-size: 1.5rem; text-align:center;'>Ahora ha llegado el momento de ponerte en la piel de una persona ingeniera de IA.</h3>
3449
+ <p style='font-size: 1.1rem; text-align:center; margin-top: 12px;'>
3450
+ <strong>Tu nuevo reto:</strong> Crear modelos de IA que sean más precisos que el que utilizaste en el rol de juzgar casos.
3451
+ </p>
3452
+ <p style='font-size: 1.1rem; text-align:center; margin-top: 12px;'>
3453
+ Recuerda: has vivido en primera persona cómo las predicciones de la IA afectan la vida real de las personas. Usa ese conocimiento para construir algo mejor.
3454
+ </p>
3455
+ </div>
3456
+ </div>
3457
+ """
3458
+ )
3459
+ briefing_1_next = gr.Button("Siguiente ▶️", variant="primary", size="lg")
3460
+
3461
+ # Slide 2: Card 1 (Your Engineering Mission)
3462
+ with gr.Column(visible=False, elem_id="slide-2") as briefing_slide_2:
3463
+ gr.Markdown("<h1 style='text-align:center;'>📋 Tu misión - Construir una IA mejor</h1>")
3464
+
3465
+ gr.HTML(
3466
+ """
3467
+ <div class='slide-content'>
3468
+ <div class='panel-box'>
3469
+ <h3>La misión</h3>
3470
+ <p>Crea un modelo de IA que ayude a los tribunales a tomar decisiones más acertadas. El modelo que utilizaste antes te daba recomendaciones imperfectas. Ahora tu tarea es construir un modelo nuevo que prediga el riesgo con mayor precisión y ofrezca a quienes juzgan información fiable para poder ser justos y justas.</p>
3471
+
3472
+ <h3>La competición</h3>
3473
+ <p>Para lograrlo, competirás con otras personas ingenieras. Formarás parte de un equipo de ingeniería que te ayudará en tu misión. Tus resultados se registrarán tanto de forma individual como colectiva en las clasificaciones en tiempo real.</p>
3474
+ </div>
3475
+
3476
+ <div class='leaderboard-box' style='max-width: 600px; margin: 16px auto; text-align: center; padding: 16px;'>
3477
+ <p style='font-size: 1.1rem; margin:0;'>Te unirás a un equipo como...</p>
3478
+ <h3 style='font-size: 1.75rem; color: #6b7280; margin: 8px 0;'>
3479
+ 🛡️ Los Exploradores Éticos
3480
+ </h3>
3481
+ </div>
3482
+
3483
+ <div class='mock-ui-box'>
3484
+ <h3>El reto de los datos</h3>
3485
+ <p>Para competir, tendrás acceso a miles de expedientes de casos antiguos. Dispones de dos tipos de información:</p>
3486
+ <ol style='list-style-position: inside; padding-left: 20px;'>
3487
+ <li><strong>Perfiles de personas presas:</strong> Es la información que tenía el tribunal en el momento de la detención.
3488
+ <ul style='margin-left: 20px; list-style-type: disc;'>
3489
+ <li><em>Edad, número de antecedentes penales, tipo de cargo penal.</em></li>
3490
+ </ul>
3491
+ </li>
3492
+ <li><strong>Resultados históricos:</strong> Esto es lo que ocurrió con esas personas pasado un tiempo.
3493
+ <ul style='margin-left: 20px; list-style-type: disc;'>
3494
+ <li><em>¿Volvieron a cometer un delito en dos años? (Sí/No)</em></li>
3495
+ </ul>
3496
+ </li>
3497
+ </ol>
3498
+
3499
+ <h3>La tarea principal</h3>
3500
+ <p>Debes enseñar a tu modelo de IA a analizar los "perfiles" y predecir con precisión el "resultado".</p>
3501
+ <p><strong>¿Te animas a construir algo que podría cambiar la manera en que funciona la justicia?</strong></p>
3502
+ </div>
3503
+ </div>
3504
+ """
3505
+ )
3506
+
3507
+ with gr.Row():
3508
+ briefing_2_back = gr.Button("◀️ Atrás", size="lg")
3509
+ briefing_2_next = gr.Button("Siguiente ▶️", variant="primary", size="lg")
3510
+
3511
+ # Slide 3: Card 2 (What is a "Model"?)
3512
+ with gr.Column(visible=False, elem_id="slide-3") as briefing_slide_3:
3513
+ gr.Markdown("<h1 style='text-align:center;'>🧠 ¿Qué es un sistema de IA?</h1>")
3514
+
3515
+ # --- FIX FOR SLIDE 3 ---
3516
+ # Combined all content into single gr.HTML()
3517
+ gr.HTML(
3518
+ """
3519
+ <div class='slide-content'>
3520
+ <div class='panel-box'>
3521
+ <p>Antes de empezar a competir, veamos con claridad qué es exactamente lo que vas a construir.</p>
3522
+ <h3>Piensa en un sistema de IA como una "Máquina de Predicción".</h3>
3523
+ <p>Ya conoces el flujo:</p>
3524
+
3525
+ <div style='background:white; padding:16px; border-radius:12px; margin:12px 0; text-align:center;'>
3526
+ <div style='display:inline-block; background:#dbeafe; padding:12px 16px; border-radius:8px; margin:4px;'>
3527
+ <h3 style='margin:0; color:#0369a1;'>ENTRADA</h3>
3528
+ </div>
3529
+ <div style='display:inline-block; font-size:1.5rem; margin:0 8px; color:#6b7280;'>→</div>
3530
+ <div style='display:inline-block; background:#fef3c7; padding:12px 16px; border-radius:8px; margin:4px;'>
3531
+ <h3 style='margin:0; color:#92400e;'>MODELO</h3>
3532
+ </div>
3533
+ <div style='display:inline-block; font-size:1.5rem; margin:0 8px; color:#6b7280;'>→</div>
3534
+ <div style='display:inline-block; background:#f0fdf4; padding:12px 16px; border-radius:8px; margin:4px;'>
3535
+ <h3 style='margin:0; color:#15803d;'>SALIDA</h3>
3536
+ </div>
3537
+ </div>
3538
+
3539
+ <p>Como persona ingeniera, no necesitas escribir código complejo desde cero. En lugar de eso, vas a construir esta máquina combinando tres componentes principales.</p>
3540
+ </div>
3541
+
3542
+ <div class='mock-ui-box'>
3543
+ <h3>Los tres componentes:</h3>
3544
+ <p><strong>1. La entrada (Datos)</strong><br>
3545
+ La información que le das a la máquina.<br>
3546
+ <em>* Ejemplos: edad, número de antecedentes penales, detalles del cargo penal.</em></p>
3547
+
3548
+ <p><strong>2. El modelo (El cerebro)</strong><br>
3549
+ Este es el "cerebro" de tu máquina. Estudia los datos de entrada e intenta averiguar cómo se conectan las cosas para hacer una predicción. Puedes elegir diferentes estrategias de modelo (cerebros) para tu máquina.<br>
3550
+ <em>* Ejemplos: Algunos "cerebros" solo encuentran reglas sencillas (como marcar un correo si dice 'dinero gratis'). Otros tienen la capacidad de encontrar patrones complejos (como reconocer una cara específica en una multitud).</em></p>
3551
+
3552
+ <p><strong>3. La salida (Predicción)</strong><br>
3553
+ Lo que el modelo intenta adivinar como mejor opción.<br>
3554
+ <em>* Ejemplo: Nivel de riesgo: Alto o Bajo.</em></p>
3555
+
3556
+ <hr>
3557
+
3558
+ <p><strong>Cómo aprende:</strong> Muestras al modelo miles de casos antiguos (Entradas) + lo que pasó realmente (Salidas). El modelo los estudia para encontrar las reglas y así poder hacer predicciones sobre casos nuevos que no ha visto antes.</p>
3559
+ </div>
3560
+ </div>
3561
+ """
3562
+ )
3563
+ # --- END FIX ---
3564
+
3565
+ with gr.Row():
3566
+ briefing_3_back = gr.Button("◀️ Atrás", size="lg")
3567
+ briefing_3_next = gr.Button("Siguiente ▶️", variant="primary", size="lg")
3568
+
3569
+ # Slide 4: Card 3 (How Engineers Work — The Loop)
3570
+ with gr.Column(visible=False, elem_id="slide-4") as briefing_slide_4:
3571
+ gr.Markdown("<h1 style='text-align:center;'>🔁 Cómo trabajan las personas ingenieras — El Bucle</h1>")
3572
+
3573
+ # --- FIX FOR SLIDE 4 ---
3574
+ # Combined all content into single gr.HTML()
3575
+ gr.HTML(
3576
+ """
3577
+ <div class='slide-content'>
3578
+ <div class='panel-box'>
3579
+ <p>Ahora que ya conoces los componentes de un modelo, ¿cómo puedes construir uno mejor?</p>
3580
+ <h3>Aquí tienes el secreto:</h3>
3581
+ <p>Los equipos de IA reales casi nunca aciertan a la primera. En su lugar, siguen un bucle continuo de experimentación: <strong>Probar, comprobar, aprender y repetir.</strong></p>
3582
+
3583
+ <h3>El bucle de experimentación:</h3>
3584
+ <ol style='list-style-position: inside;'>
3585
+ <li><strong>Construye un modelo:</strong> Elige tus componentes y obtendrás una puntuación inicial de precisión.</li>
3586
+ <li><strong>Hazte una pregunta:</strong> (p. ej., "¿Qué pasa si cambio el 'Cerebro' —el tipo de modelo—?")</li>
3587
+ <li><strong>Comprueba y compara:</strong> ¿La puntuación ha mejorado... o ha empeorado?</li>
3588
+ </ol>
3589
+ </div>
3590
+
3591
+ <h3>¡Harás exactamente lo mismo en la competición!</h3>
3592
+
3593
+ <div class='step-visual'>
3594
+ <div class='step-visual-box'><b>1. Configura</b><br/>Usa los controles para seleccionar el tipo de modelo y los datos.</div>
3595
+ <div class='step-visual-arrow'>→</div>
3596
+ <div class='step-visual-box'><b>2. Envía</b><br/>Haz clic en "Construir y enviar modelo" para entrenar tu modelo.</div>
3597
+ <div class='step-visual-arrow'>→</div>
3598
+ <div class='step-visual-box'><b>3. Analiza</b><br/>Mira tu posición en la clasificación en tiempo real.</div>
3599
+ <div class='step-visual-arrow'>→</div>
3600
+ <div class='step-visual-box'><b>4. Mejora</b><br/>Cambia una opción y vuelve a enviarlo.</div>
3601
+ </div>
3602
+
3603
+ <div class='leaderboard-box' style='text-align:center;'>
3604
+ <p><strong>Consejo:</strong> Intenta cambiar solo una cosa a la vez. Si cambias demasiadas cosas de golpe, ¡no sabrás qué es lo que ha hecho que tu modelo mejore o empeore!</p>
3605
+ </div>
3606
+ </div>
3607
+ """
3608
+ )
3609
+ # --- END FIX ---
3610
+
3611
+ with gr.Row():
3612
+ briefing_4_back = gr.Button("◀️ Atrás", size="lg")
3613
+ briefing_4_next = gr.Button("Siguiente ▶️", variant="primary", size="lg")
3614
+
3615
+ # Slide 5: Card 4 (Control Knobs — The "Brain" Settings)
3616
+ with gr.Column(visible=False, elem_id="slide-5") as briefing_slide_5:
3617
+ gr.Markdown("<h1 style='text-align:center;'>🎛️ Controles — La configuración del \"cerebro\"</h1>")
3618
+
3619
+ # --- FIX FOR SLIDE 5 ---
3620
+ # Combined all content into single gr.HTML()
3621
+ gr.HTML(
3622
+ """
3623
+ <div class='slide-content'>
3624
+ <div class='mock-ui-inner'>
3625
+ <p>Para construir tu sistema de IA, usarás controles para configurar tu Máquina de Predicción. Los dos primeros controles te permiten elegir la estrategia del modelo (el cerebro) y ajustar cómo aprende patrones a partir de los datos.</p>
3626
+ <hr style='margin: 16px 0;'>
3627
+
3628
+ <h3 style='margin-top:0;'>1. Estrategia del modelo (Tipo de modelo)</h3>
3629
+ <div style='font-size: 1rem; margin-bottom:12px;'>
3630
+ <b>Qué es:</b> El cerebro de tu Máquina de Predicción. Utiliza un método matemático específico —llamado algoritmo— para encontrar patrones en los datos. Una vez aprende de esos patrones, se convierte en un modelo listo para hacer su mejor predicción.
3631
+ </div>
3632
+ <div class='mock-ui-control-box'>
3633
+ <p style='font-size: 1.1rem; margin: 8px 0;'>
3634
+ <span class='mock-ui-radio-on'>◉</span>
3635
+ <b>El Generalista Equilibrado:</b> Aprende a partir de todos los datos y tiene en cuenta varios factores en cada decisión, lo que ayuda a obtener resultados coherentes en diferentes situaciones.
3636
+ </p>
3637
+ <p style='font-size: 1.1rem; margin: 8px 0;'>
3638
+ <span class='mock-ui-radio-off'>○</span>
3639
+ <b>El Creador de Reglas:</b> Utiliza reglas claras del tipo “Si… entonces…”, fáciles de entender pero menos flexibles (Por ejemplo: si hay delitos previos > 2, entonces hay riesgo alto).
3640
+ </p>
3641
+ <p style='font-size: 1.1rem; margin: 8px 0;'>
3642
+ <span class='mock-ui-radio-off'>○</span>
3643
+ <b>El Buscador de Patrones Profundos:</b> Un modelo complejo que encuentra patrones ocultos en los datos, pero cuyas decisiones son más difíciles de explicar.
3644
+ </p>
3645
+ </div>
3646
+
3647
+ <hr style='margin: 24px 0;'>
3648
+
3649
+ <h3>2. Complejidad del modelo (Nivel de ajuste)</h3>
3650
+ <div class='mock-ui-control-box' style='text-align: center;'>
3651
+ <p style='font-size: 1.1rem; margin:0;'>Rango: Nivel 1 ─── ● ─── 10</p>
3652
+ </div>
3653
+
3654
+ <div style='margin-top: 16px; font-size: 1rem;'>
3655
+ <ul style='list-style-position: inside;'>
3656
+ <li><b>Qué es:</b> Es el nivel de detalle con el que el modelo aprende a partir de los datos: si se centra en patrones generales o también en casos muy específicos.</li>
3657
+ <li><b>El equilibrio:</b>
3658
+ <ul style='list-style-position: inside; margin-left: 20px;'>
3659
+ <li><b>Bajo (Nivel 1):</b> Aprende principalmente a partir de patrones generales de los datos.</li>
3660
+ <li><b>Alto (Nivel 5):</b> Aprende tanto patrones generales como detalles muy finos.</li>
3661
+ </ul>
3662
+ </li>
3663
+ </ul>
3664
+ <p style='color:#b91c1c; font-weight:bold; margin-top:10px;'>Aviso: Si este valor es demasiado alto, el modelo puede “memorizar” detalles aleatorios o coincidencias irrelevantes (ruido) de los datos pasados, en lugar de aprender la regla general.</p>
3665
+ </div>
3666
+ </div>
3667
+ </div>
3668
+ """
3669
+ )
3670
+ # --- END FIX ---
3671
+
3672
+ with gr.Row():
3673
+ briefing_5_back = gr.Button("◀️ Atrás", size="lg")
3674
+ briefing_5_next = gr.Button("Siguiente ▶️", variant="primary", size="lg")
3675
+
3676
+ # Slide 6: Card 5 (Control Knobs — The "Data" Settings)
3677
+ with gr.Column(visible=False, elem_id="slide-6") as briefing_slide_6:
3678
+ gr.Markdown("<h1 style='text-align:center;'>🎛️ Controles — La configuración de los \"datos\"</h1>")
3679
+
3680
+ # --- FIX FOR SLIDE 6 ---
3681
+ # Combined all content into single gr.HTML()
3682
+ gr.HTML(
3683
+ """
3684
+ <div class='slide-content'>
3685
+ <div class='mock-ui-inner'>
3686
+ <p>Ahora que has configurado tu máquina de predicción, debes decidir qué información procesará. Estos selectores controlan los datos de entrada del sistema de IA.</p>
3687
+ <hr style='margin: 16px 0;'>
3688
+
3689
+ <h3 style='margin-top:0;'>3. Variables de datos</h3>
3690
+ <div style='font-size: 1rem; margin-bottom:12px;'>
3691
+ <b>Qué es:</b> Los puntos de datos específicos a los que el sistema de IA (la máquina) tiene permiso para acceder.
3692
+ <br><b>Por qué es importante:</b> El resultado del sistema depende totalmente de la información que recibe.
3693
+ </div>
3694
+
3695
+ <div class='mock-ui-control-box'>
3696
+ <p style='font-size: 1.1rem; margin: 8px 0;'>
3697
+ <span class='mock-ui-radio-on'>☑</span>
3698
+ <b>Datos de comportamiento:</b> Información commo el <i>número de delitos juveniles</i> ayuda al sistema a identificar patrones de riesgo basados en hechos.
3699
+ </p>
3700
+ <p style='font-size: 1.1rem; margin: 8px 0;'>
3701
+ <span class='mock-ui-radio-off'>☐</span>
3702
+ <b>Datos demográficos:</b> Datos como la <i>raza</i> pueden ayudar al modelo a aprender, pero también pueden replicar sesgos humanos.
3703
+ </p>
3704
+ </p>
3705
+ </div>
3706
+ <p style='margin-top:10px;'><b>Tu tarea:</b> Marcar ☑ o Desmarcar ☐ las casillas para elegir qué información "alimentará" a tu modelo.</p>
3707
+
3708
+ <hr style='margin: 24px 0;'>
3709
+
3710
+ <h3>4. Volumen de datos (Volumen de entrenamiento)</h3>
3711
+ <div style='font-size: 1rem; margin-bottom:12px;'>
3712
+ <b>What it is:</b> La cantidad de casos históricos que el sistema de IA utiliza para aprender patrones.
3713
+ </div>
3714
+
3715
+ <div class='mock-ui-control-box'>
3716
+ <p style='font-size: 1.1rem; margin: 8px 0;'>
3717
+ <span class='mock-ui-radio-on'>◉</span>
3718
+ <b>Pequeño (20%):</b> Procesamiento rápido. Ideal para hacer pruebas rápidas y revisar tu configuración.
3719
+ </p>
3720
+ <p style='font-size: 1.1rem; margin: 8px 0;'>
3721
+ <span class='mock-ui-radio-off'>○</span>
3722
+ <b>Completo (100%):</b> Procesamiento máximo de datos. Tarda más en construirse, pero da al sistema de IA la mejor oportunidad para calibrar su precisión.
3723
+ </p>
3724
+
3725
+ </div>
3726
+
3727
+ </div>
3728
+ </div>
3729
+ """
3730
+ )
3731
+ # --- END FIX ---
3732
+
3733
+ with gr.Row():
3734
+ briefing_6_back = gr.Button("◀️ Atrás", size="lg")
3735
+ briefing_6_next = gr.Button("Siguiente ▶️", variant="primary", size="lg")
3736
+
3737
+ # Slide 7: Card 6 (Your Score as an Engineer)
3738
+ with gr.Column(visible=False, elem_id="slide-7") as briefing_slide_7:
3739
+ gr.Markdown("<h1 style='text-align:center;'>🏆 Tu puntuación como ingeniero/a</h1>")
3740
+
3741
+ # --- FIX FOR SLIDE 7 ---
3742
+ # Combined all content into single gr.HTML()
3743
+ gr.HTML(
3744
+ """
3745
+ <div class='slide-content'>
3746
+ <div class='panel-box'>
3747
+ <p>Ahora que ya sabes cómo construir un modelo, es hora de poner a prueba tus habilidades. Aquí tienes cómo mediremos tu éxito y cómo podrás subir en la clasificación:</p>
3748
+
3749
+ <h3>Cómo se calcula tu puntuación</h3>
3750
+ <ul style='list-style-position: inside;'>
3751
+ <li><strong>Precisión de la predicción:</strong> Tu modelo se pone a prueba con Datos Ocultos (casos guardados en una "caja fuerte secreta" que tu modelo nunca ha visto). Esto simula la predicción del futuro para garantizar que obtengas una puntuación de precisión realista.</li>
3752
+ <li><strong>La clasificación:</strong> Los marcadores en directo siguen tu progreso individualmente y en equipo.</li>
3753
+ </ul>
3754
+
3755
+ <h3>Cómo puedes mejorar: El Juego</h3>
3756
+ <ul style='list-style-position: inside;'>
3757
+ <li><strong>Compite para mejorar:</strong> Refina tu modelo para superar tu mejor marca personal.</li>
3758
+ <li><strong>Progresa como persona ingeniera y desbloquea herramientas:</strong> A medida que envíes más modelos, ganarás posiciones y desbloquearás mejores herramientas de análisis.</li>
3759
+ </ul>
3760
+
3761
+ </div>
3762
+ </div>
3763
+ """
3764
+ )
3765
+ # --- END FIX ---
3766
+
3767
+ with gr.Row():
3768
+ briefing_7_back = gr.Button("◀️ Atrás", size="lg")
3769
+ briefing_7_next = gr.Button("Comienza a construir un modelo ▶️", variant="primary", size="lg")
3770
+
3771
+ # --- End Briefing Slideshow ---
3772
+
3773
+
3774
+ # Model Building App (Main Interface)
3775
+ with gr.Column(visible=False, elem_id="model-step") as model_building_step:
3776
+ gr.Markdown("<h1 style='text-align:center;'>🛠️ Área de construcción de modelos</h1>")
3777
+
3778
+ # Status panel for initialization progress - HIDDEN
3779
+ init_status_display = gr.HTML(value="", visible=False)
3780
+
3781
+ # Banner for UI state
3782
+
3783
+ init_banner = gr.HTML(
3784
+ value=(
3785
+ "<div class='init-banner'>"
3786
+ "<p class='init-banner__text'>"
3787
+ "⏳ Inicializando datos y clasificación… puedes explorar, pero debes esperar a que esté listo para enviar."
3788
+ "</p>"
3789
+ "</div>"
3790
+ ),
3791
+ visible=True)
3792
+
3793
+ # Session-based authentication state objects
3794
+ # Concurrency Note: These are initialized to None/empty and populated
3795
+ # during handle_load_with_session_auth. Do NOT use os.environ here.
3796
+ username_state = gr.State(None)
3797
+ token_state = gr.State(None)
3798
+
3799
+ team_name_state = gr.State(None) # Populated via handle_load_with_session_auth
3800
+ last_submission_score_state = gr.State(0.0)
3801
+ last_rank_state = gr.State(0)
3802
+ best_score_state = gr.State(0.0)
3803
+ submission_count_state = gr.State(0)
3804
+ first_submission_score_state = gr.State(None)
3805
+
3806
+ # New states for readiness gating and preview tracking
3807
+ readiness_state = gr.State(False)
3808
+ was_preview_state = gr.State(False)
3809
+ kpi_meta_state = gr.State({})
3810
+ last_seen_ts_state = gr.State(None) # Track last seen user timestamp
3811
+
3812
+ # Buffered states for all dynamic inputs
3813
+ model_type_state = gr.State(DEFAULT_MODEL)
3814
+ complexity_state = gr.State(2)
3815
+ feature_set_state = gr.State(DEFAULT_FEATURE_SET)
3816
+ data_size_state = gr.State(DEFAULT_DATA_SIZE)
3817
+
3818
+ rank_message_display = gr.Markdown("### Rank loading...")
3819
+ with gr.Row():
3820
+ with gr.Column(scale=1):
3821
+
3822
+ model_type_radio = gr.Radio(
3823
+ label="1. Estrategia del modelo",
3824
+ # UPDATED: Use the list of tuples [(Spanish, English)]
3825
+ choices=MODEL_RADIO_CHOICES,
3826
+ value=DEFAULT_MODEL, # "The Balanced Generalist" (English Key)
3827
+ interactive=False
3828
+ )
3829
+ model_card_display = gr.Markdown(get_model_card(DEFAULT_MODEL))
3830
+
3831
+ gr.Markdown("---") # Separator
3832
+
3833
+ complexity_slider = gr.Slider(
3834
+ label="2. Complejidad del modelo (1–10)",
3835
+ minimum=1, maximum=3, step=1, value=2,
3836
+ info="Valores más altos permiten aprender patrones más complejos, pero si son demasiado altos pueden empeorar los resultados."
3837
+ )
3838
+
3839
+ gr.Markdown("---") # Separator
3840
+
3841
+ feature_set_checkbox = gr.CheckboxGroup(
3842
+ label="3. Selecciona las variables de data",
3843
+ choices=FEATURE_SET_ALL_OPTIONS,
3844
+ value=DEFAULT_FEATURE_SET,
3845
+ interactive=False,
3846
+ info="¡Se desbloquean más ingredientes según tu posición en la clasificación!"
3847
+ )
3848
+
3849
+ gr.Markdown("---") # Separator
3850
+
3851
+ data_size_radio = gr.Radio(
3852
+ label="4. Tamaño de los datos",
3853
+ choices=[DEFAULT_DATA_SIZE],
3854
+ value=DEFAULT_DATA_SIZE,
3855
+ interactive=False
3856
+ )
3857
+
3858
+ gr.Markdown("---") # Separator
3859
+
3860
+ # Attempt tracker display
3861
+ attempts_tracker_display = gr.HTML(
3862
+ value="<div style='text-align:center; padding:8px; margin:8px 0; background:#f0f9ff; border-radius:8px; border:1px solid #bae6fd;'>"
3863
+ "<p style='margin:0; color:#0369a1; font-weight:600; font-size:1rem;'>📊 Attempts used: 0/10</p>"
3864
+ "</div>",
3865
+ visible=True
3866
+ )
3867
+
3868
+ submit_button = gr.Button(
3869
+ value="5. 🔬 Construir y enviar el modelo",
3870
+ variant="primary",
3871
+ size="lg"
3872
+ )
3873
+
3874
+ with gr.Column(scale=1):
3875
+ gr.HTML(
3876
+ """
3877
+ <div class='leaderboard-box'>
3878
+ <h3 style='margin-top:0;'>🏆 Clasificación en directo</h3>
3879
+ <p style='margin:0;'>Envía un modelo para ver tu posición.</p>
3880
+ </div>
3881
+ """
3882
+ )
3883
+
3884
+ # KPI Card
3885
+ submission_feedback_display = gr.HTML(
3886
+ "<p style='text-align:center; color:#6b7280; padding:20px 0;'>¡Envía tu primer modelo para recibir una valoración!</p>"
3887
+ )
3888
+
3889
+ # Inline Login Components (initially hidden)
3890
+ login_username = gr.Textbox(
3891
+ label="Username",
3892
+ placeholder="Introduce tu nombre de usuario de modelshare.ai",
3893
+ visible=False
3894
+ )
3895
+ login_password = gr.Textbox(
3896
+ label="Contraseña",
3897
+ type="password",
3898
+ placeholder="Introduce tu contraseña",
3899
+ visible=False
3900
+ )
3901
+ login_submit = gr.Button(
3902
+ "Sign In & Submit",
3903
+ variant="primary",
3904
+ visible=False
3905
+ )
3906
+ login_error = gr.HTML(
3907
+ value="",
3908
+ visible=False
3909
+ )
3910
+
3911
+ with gr.Tabs():
3912
+ with gr.TabItem("Clasificación por equipos"):
3913
+ team_leaderboard_display = gr.HTML(
3914
+ "<p style='text-align:center; color:#6b7280; padding-top:20px;'>Submit a model to see team rankings.</p>"
3915
+ )
3916
+ with gr.TabItem("Clasificación individual"):
3917
+ individual_leaderboard_display = gr.HTML(
3918
+ "<p style='text-align:center; color:#6b7280; padding-top:20px;'>Envía un modelo para ver la clasificación individual.</p>"
3919
+ )
3920
+
3921
+ # REMOVED: Ethical Reminder HTML Block
3922
+ step_2_next = gr.Button("Finalizar y reflexionar ▶️", variant="secondary")
3923
+
3924
+ # Conclusion Step
3925
+ with gr.Column(visible=False, elem_id="conclusion-step") as conclusion_step:
3926
+ gr.Markdown("<h1 style='text-align:center;'>✅ Sección completada</h1>")
3927
+ final_score_display = gr.HTML(value="<p>Preparando el resumen final...</p>")
3928
+ step_3_back = gr.Button("◀️ Volver al experimento")
3929
+
3930
+ # --- Navigation Logic ---
3931
+ all_steps_nav = [
3932
+ briefing_slide_1, briefing_slide_2, briefing_slide_3,
3933
+ briefing_slide_4, briefing_slide_5, briefing_slide_6, briefing_slide_7,
3934
+ model_building_step, conclusion_step, loading_screen
3935
+ ]
3936
+
3937
+ def create_nav(current_step, next_step):
3938
+ """
3939
+ Simplified navigation: directly switches visibility without artificial loading screen.
3940
+ Loading screen only shown when entering arena if not yet ready.
3941
+ """
3942
+ def _nav():
3943
+ # Direct single-step navigation
3944
+ updates = {next_step: gr.update(visible=True)}
3945
+ for s in all_steps_nav:
3946
+ if s != next_step:
3947
+ updates[s] = gr.update(visible=False)
3948
+ return updates
3949
+ return _nav
3950
+
3951
+ def finalize_and_show_conclusion(best_score, submissions, rank, first_score, feature_set):
3952
+ """Build dynamic conclusion HTML and navigate to conclusion step."""
3953
+ html = build_final_conclusion_html(best_score, submissions, rank, first_score, feature_set)
3954
+ updates = {
3955
+ conclusion_step: gr.update(visible=True),
3956
+ final_score_display: gr.update(value=html)
3957
+ }
3958
+ for s in all_steps_nav:
3959
+ if s != conclusion_step:
3960
+ updates[s] = gr.update(visible=False)
3961
+ return [updates[s] if s in updates else gr.update() for s in all_steps_nav] + [html]
3962
+
3963
+ # Helper function to generate navigation JS with loading overlay
3964
+ def nav_js(target_id: str, message: str, min_show_ms: int = 1200) -> str:
3965
+ """
3966
+ Generate JavaScript for enhanced slide navigation with loading overlay.
3967
+
3968
+ Args:
3969
+ target_id: Element ID of the target slide (e.g., 'slide-2', 'model-step')
3970
+ message: Loading message to display during transition
3971
+ min_show_ms: Minimum time to show overlay (prevents flicker)
3972
+
3973
+ Returns:
3974
+ JavaScript arrow function string for Gradio's js parameter
3975
+ """
3976
+ return f"""
3977
+ ()=>{{
3978
+ try {{
3979
+ // Show overlay immediately
3980
+ const overlay = document.getElementById('nav-loading-overlay');
3981
+ const messageEl = document.getElementById('nav-loading-text');
3982
+ if(overlay && messageEl) {{
3983
+ messageEl.textContent = '{message}';
3984
+ overlay.style.display = 'flex';
3985
+ setTimeout(() => {{ overlay.style.opacity = '1'; }}, 10);
3986
+ }}
3987
+
3988
+ const startTime = Date.now();
3989
+
3990
+ // Scroll to top after brief delay
3991
+ setTimeout(() => {{
3992
+ const anchor = document.getElementById('app_top_anchor');
3993
+ const container = document.querySelector('.gradio-container') || document.scrollingElement || document.documentElement;
3994
+
3995
+ function doScroll() {{
3996
+ if(anchor) {{ anchor.scrollIntoView({{behavior:'smooth', block:'start'}}); }}
3997
+ else {{ container.scrollTo({{top:0, behavior:'smooth'}}); }}
3998
+
3999
+ // Best-effort Colab iframe scroll
4000
+ try {{
4001
+ if(window.parent && window.parent !== window && window.frameElement) {{
4002
+ const top = window.frameElement.getBoundingClientRect().top + window.parent.scrollY;
4003
+ window.parent.scrollTo({{top: Math.max(top - 10, 0), behavior:'smooth'}});
4004
+ }}
4005
+ }} catch(e2) {{}}
4006
+ }}
4007
+
4008
+ doScroll();
4009
+ // Retry scroll to combat layout shifts
4010
+ let scrollAttempts = 0;
4011
+ const scrollInterval = setInterval(() => {{
4012
+ scrollAttempts++;
4013
+ doScroll();
4014
+ if(scrollAttempts >= 3) clearInterval(scrollInterval);
4015
+ }}, 130);
4016
+ }}, 40);
4017
+
4018
+ // Poll for target visibility and minimum display time
4019
+ const targetId = '{target_id}';
4020
+ const minShowMs = {min_show_ms};
4021
+ let pollCount = 0;
4022
+ const maxPolls = 77; // ~7 seconds max
4023
+
4024
+ const pollInterval = setInterval(() => {{
4025
+ pollCount++;
4026
+ const elapsed = Date.now() - startTime;
4027
+ const target = document.getElementById(targetId);
4028
+ const isVisible = target && target.offsetParent !== null &&
4029
+ window.getComputedStyle(target).display !== 'none';
4030
+
4031
+ // Hide overlay when target is visible AND minimum time elapsed
4032
+ if((isVisible && elapsed >= minShowMs) || pollCount >= maxPolls) {{
4033
+ clearInterval(pollInterval);
4034
+ if(overlay) {{
4035
+ overlay.style.opacity = '0';
4036
+ setTimeout(() => {{ overlay.style.display = 'none'; }}, 300);
4037
+ }}
4038
+ }}
4039
+ }}, 90);
4040
+
4041
+ }} catch(e) {{ console.warn('nav-js error', e); }}
4042
+ }}
4043
+ """
4044
+
4045
+
4046
+ # Wire up slide buttons with enhanced navigation
4047
+ briefing_1_next.click(
4048
+ fn=create_nav(briefing_slide_1, briefing_slide_2),
4049
+ inputs=None, outputs=all_steps_nav,
4050
+ js=nav_js("slide-2", "Cargando la misión...")
4051
+ )
4052
+ briefing_2_back.click(
4053
+ fn=create_nav(briefing_slide_2, briefing_slide_1),
4054
+ inputs=None, outputs=all_steps_nav,
4055
+ js=nav_js("slide-1", "Volviendo a la introducción...")
4056
+ )
4057
+ briefing_2_next.click(
4058
+ fn=create_nav(briefing_slide_2, briefing_slide_3),
4059
+ inputs=None, outputs=all_steps_nav,
4060
+ js=nav_js("slide-3", "Explorando el concepto de modelo...")
4061
+ )
4062
+ briefing_3_back.click(
4063
+ fn=create_nav(briefing_slide_3, briefing_slide_2),
4064
+ inputs=None, outputs=all_steps_nav,
4065
+ js=nav_js("slide-2", "Retrocediendo...")
4066
+ )
4067
+ briefing_3_next.click(
4068
+ fn=create_nav(briefing_slide_3, briefing_slide_4),
4069
+ inputs=None, outputs=all_steps_nav,
4070
+ js=nav_js("slide-4", "Entendiendo el ciclo del experimento...")
4071
+ )
4072
+ briefing_4_back.click(
4073
+ fn=create_nav(briefing_slide_4, briefing_slide_3),
4074
+ inputs=None, outputs=all_steps_nav,
4075
+ js=nav_js("slide-3", "Repasando conceptos anteriores...")
4076
+ )
4077
+ briefing_4_next.click(
4078
+ fn=create_nav(briefing_slide_4, briefing_slide_5),
4079
+ inputs=None, outputs=all_steps_nav,
4080
+ js=nav_js("slide-5", "Configurando los controles del modelo...")
4081
+ )
4082
+ briefing_5_back.click(
4083
+ fn=create_nav(briefing_slide_5, briefing_slide_4),
4084
+ inputs=None, outputs=all_steps_nav,
4085
+ js=nav_js("slide-4", "Volviendo al ciclo...")
4086
+ )
4087
+ briefing_5_next.click(
4088
+ fn=create_nav(briefing_slide_5, briefing_slide_6),
4089
+ inputs=None, outputs=all_steps_nav,
4090
+ js=nav_js("slide-6", "Configurando los datos...")
4091
+ )
4092
+ briefing_6_back.click(
4093
+ fn=create_nav(briefing_slide_6, briefing_slide_5),
4094
+ inputs=None, outputs=all_steps_nav,
4095
+ js=nav_js("slide-5", "Ajustando la estrategia del modelo...")
4096
+ )
4097
+ briefing_6_next.click(
4098
+ fn=create_nav(briefing_slide_6, briefing_slide_7),
4099
+ inputs=None, outputs=all_steps_nav,
4100
+ js=nav_js("slide-7", "Preparando el resumen de la puntuación...")
4101
+ )
4102
+ briefing_7_back.click(
4103
+ fn=create_nav(briefing_slide_7, briefing_slide_6),
4104
+ inputs=None, outputs=all_steps_nav,
4105
+ js=nav_js("slide-6", "Repasando los controles de datos...")
4106
+ )
4107
+ # Slide 7 -> App
4108
+ briefing_7_next.click(
4109
+ fn=create_nav(briefing_slide_7, model_building_step),
4110
+ inputs=None, outputs=all_steps_nav,
4111
+ js=nav_js("model-step", "Entrando en el área de construcción de modelos...")
4112
+ )
4113
+
4114
+ # App -> Conclusion
4115
+ step_2_next.click(
4116
+ fn=finalize_and_show_conclusion,
4117
+ inputs=[
4118
+ best_score_state,
4119
+ submission_count_state,
4120
+ last_rank_state,
4121
+ first_submission_score_state,
4122
+ feature_set_state
4123
+ ],
4124
+ outputs=all_steps_nav + [final_score_display],
4125
+ js=nav_js("conclusion-step", "Generando el resumen de rendimiento...")
4126
+ )
4127
+
4128
+ # Conclusion -> App
4129
+ step_3_back.click(
4130
+ fn=create_nav(conclusion_step, model_building_step),
4131
+ inputs=None, outputs=all_steps_nav,
4132
+ js=nav_js("model-step", "Volviendo al área de trabajo del experimento...")
4133
+ )
4134
+
4135
+ # Events
4136
+ model_type_radio.change(
4137
+ fn=get_model_card,
4138
+ inputs=model_type_radio,
4139
+ outputs=model_card_display
4140
+ )
4141
+ model_type_radio.change(
4142
+ fn=lambda v: v or DEFAULT_MODEL,
4143
+ inputs=model_type_radio,
4144
+ outputs=model_type_state
4145
+ )
4146
+ complexity_slider.change(fn=lambda v: v, inputs=complexity_slider, outputs=complexity_state)
4147
+
4148
+ feature_set_checkbox.change(
4149
+ fn=lambda v: v or [],
4150
+ inputs=feature_set_checkbox,
4151
+ outputs=feature_set_state
4152
+ )
4153
+ data_size_radio.change(
4154
+ fn=lambda v: v or DEFAULT_DATA_SIZE,
4155
+ inputs=data_size_radio,
4156
+ outputs=data_size_state
4157
+ )
4158
+
4159
+ all_outputs = [
4160
+ submission_feedback_display,
4161
+ team_leaderboard_display,
4162
+ individual_leaderboard_display,
4163
+ last_submission_score_state,
4164
+ last_rank_state,
4165
+ best_score_state,
4166
+ submission_count_state,
4167
+ first_submission_score_state,
4168
+ rank_message_display,
4169
+ model_type_radio,
4170
+ complexity_slider,
4171
+ feature_set_checkbox,
4172
+ data_size_radio,
4173
+ submit_button,
4174
+ login_username,
4175
+ login_password,
4176
+ login_submit,
4177
+ login_error,
4178
+ attempts_tracker_display,
4179
+ was_preview_state,
4180
+ kpi_meta_state,
4181
+ last_seen_ts_state
4182
+ ]
4183
+
4184
+ # Wire up login button
4185
+ login_submit.click(
4186
+ fn=perform_inline_login,
4187
+ inputs=[login_username, login_password],
4188
+ outputs=[
4189
+ login_username,
4190
+ login_password,
4191
+ login_submit,
4192
+ login_error,
4193
+ submit_button,
4194
+ submission_feedback_display,
4195
+ team_name_state,
4196
+ username_state, # NEW
4197
+ token_state # NEW
4198
+ ]
4199
+ )
4200
+
4201
+ # Removed gr.State(username) from the inputs list
4202
+ submit_button.click(
4203
+ fn=run_experiment,
4204
+ inputs=[
4205
+ model_type_state,
4206
+ complexity_state,
4207
+ feature_set_state,
4208
+ data_size_state,
4209
+ team_name_state,
4210
+ last_submission_score_state,
4211
+ last_rank_state,
4212
+ submission_count_state,
4213
+ first_submission_score_state,
4214
+ best_score_state,
4215
+ username_state, # NEW: Session-based auth
4216
+ token_state, # NEW: Session-based auth
4217
+ readiness_state, # Renamed to readiness_flag in function signature
4218
+ was_preview_state, # Renamed to was_preview_prev in function signature
4219
+ # kpi_meta_state removed from inputs - used only as output
4220
+ ],
4221
+ outputs=all_outputs,
4222
+ show_progress="full",
4223
+ js=nav_js("model-step", "Running experiment...", 500)
4224
+ )
4225
+
4226
+ # Timer for polling initialization status
4227
+ status_timer = gr.Timer(value=0.5, active=True) # Poll every 0.5 seconds
4228
+
4229
+ def update_init_status():
4230
+ """
4231
+ Poll initialization status and update UI elements.
4232
+ Returns status HTML, banner visibility, submit button state, data size choices, and readiness_state.
4233
+ """
4234
+ status_html, ready = poll_init_status()
4235
+
4236
+ # Update banner visibility - hide when ready
4237
+ banner_visible = not ready
4238
+
4239
+ # Update submit button
4240
+ if ready:
4241
+ submit_label = "5. 🔬 Construir y enviar el modelo"
4242
+ submit_interactive = True
4243
+ else:
4244
+ submit_label = "⏳ Esperando datos..."
4245
+ submit_interactive = False
4246
+
4247
+ # Get available data sizes based on init progress
4248
+ available_sizes = get_available_data_sizes()
4249
+
4250
+ # Stop timer once fully initialized
4251
+ timer_active = not (ready and INIT_FLAGS.get("pre_samples_full", False))
4252
+
4253
+ return (
4254
+ status_html,
4255
+ gr.update(visible=banner_visible),
4256
+ gr.update(value=submit_label, interactive=submit_interactive),
4257
+ gr.update(choices=available_sizes),
4258
+ timer_active,
4259
+ ready # readiness_state
4260
+ )
4261
+
4262
+ status_timer.tick(
4263
+ fn=update_init_status,
4264
+ inputs=None,
4265
+ outputs=[init_status_display, init_banner, submit_button, data_size_radio, status_timer, readiness_state]
4266
+ )
4267
+
4268
+ # Handle session-based authentication on page load
4269
+ def handle_load_with_session_auth(request: "gr.Request"):
4270
+ """
4271
+ Check for session token, auto-login if present, then load initial UI with stats.
4272
+
4273
+ Concurrency Note: This function does NOT set per-user values in os.environ.
4274
+ All authentication state is returned via gr.State objects (username_state,
4275
+ token_state, team_name_state) to prevent cross-user data leakage.
4276
+ """
4277
+ success, username, token = _try_session_based_auth(request)
4278
+
4279
+ if success and username and token:
4280
+ _log(f"Session auth successful on load for {username}")
4281
+
4282
+ # Get user stats and team from cache/leaderboard
4283
+ stats = _compute_user_stats(username, token)
4284
+ team_name = stats.get("team_name", "")
4285
+
4286
+ # Concurrency Note: Do NOT set os.environ for per-user values.
4287
+ # Return state via gr.State objects exclusively.
4288
+
4289
+ # Hide login form since user is authenticated via session
4290
+ # Return initial load results plus login form hidden
4291
+ # Pass token explicitly for authenticated leaderboard fetch
4292
+ initial_results = on_initial_load(username, token=token, team_name=team_name)
4293
+ return initial_results + (
4294
+ gr.update(visible=False), # login_username
4295
+ gr.update(visible=False), # login_password
4296
+ gr.update(visible=False), # login_submit
4297
+ gr.update(visible=False), # login_error (hide any messages)
4298
+ username, # username_state
4299
+ token, # token_state
4300
+ team_name, # team_name_state
4301
+ )
4302
+ else:
4303
+ _log("No valid session on load, showing login form")
4304
+ # No valid session, proceed with normal load (show login form)
4305
+ # No token available, call without token
4306
+ initial_results = on_initial_load(None, token=None, team_name="")
4307
+ return initial_results + (
4308
+ gr.update(visible=True), # login_username
4309
+ gr.update(visible=True), # login_password
4310
+ gr.update(visible=True), # login_submit
4311
+ gr.update(visible=False), # login_error
4312
+ None, # username_state
4313
+ None, # token_state
4314
+ "", # team_name_state
4315
+ )
4316
+
4317
+ demo.load(
4318
+ fn=handle_load_with_session_auth,
4319
+ inputs=None, # Request is auto-injected
4320
+ outputs=[
4321
+ model_card_display,
4322
+ team_leaderboard_display,
4323
+ individual_leaderboard_display,
4324
+ rank_message_display,
4325
+ model_type_radio,
4326
+ complexity_slider,
4327
+ feature_set_checkbox,
4328
+ data_size_radio,
4329
+ login_username,
4330
+ login_password,
4331
+ login_submit,
4332
+ login_error,
4333
+ username_state, # NEW
4334
+ token_state, # NEW
4335
+ team_name_state, # NEW
4336
+ ]
4337
+ )
4338
+
4339
+ return demo
4340
+
4341
+ # -------------------------------------------------------------------------
4342
+ # 4. Convenience Launcher
4343
+ # -------------------------------------------------------------------------
4344
+
4345
+ def launch_model_building_game_es_app(height: int = 1200, share: bool = False, debug: bool = False) -> None:
4346
+ """
4347
+ Create and directly launch the Model Building Game app inline (e.g., in notebooks).
4348
+ """
4349
+ global playground, X_TRAIN_RAW, X_TEST_RAW, Y_TRAIN, Y_TEST
4350
+ if playground is None:
4351
+ try:
4352
+ playground = Competition(MY_PLAYGROUND_ID)
4353
+ except Exception as e:
4354
+ print(f"WARNING: Could not connect to playground: {e}")
4355
+ playground = None
4356
+
4357
+ if X_TRAIN_RAW is None:
4358
+ X_TRAIN_RAW, X_TEST_RAW, Y_TRAIN, Y_TEST = load_and_prep_data()
4359
+
4360
+ demo = create_model_building_game_es_app()
4361
+ port = int(os.environ.get("PORT", 8080))
4362
+ demo.launch(share=share, inline=True, debug=debug, height=height, server_port=port)