aimodelshare 0.3.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- aimodelshare/README.md +26 -0
- aimodelshare/__init__.py +100 -0
- aimodelshare/aimsonnx.py +2381 -0
- aimodelshare/api.py +836 -0
- aimodelshare/auth.py +163 -0
- aimodelshare/aws.py +511 -0
- aimodelshare/aws_client.py +173 -0
- aimodelshare/base_image.py +154 -0
- aimodelshare/bucketpolicy.py +106 -0
- aimodelshare/color_mappings/color_mapping_keras.csv +121 -0
- aimodelshare/color_mappings/color_mapping_pytorch.csv +117 -0
- aimodelshare/containerisation.py +244 -0
- aimodelshare/containerization.py +712 -0
- aimodelshare/containerization_templates/Dockerfile.txt +8 -0
- aimodelshare/containerization_templates/Dockerfile_PySpark.txt +23 -0
- aimodelshare/containerization_templates/buildspec.txt +14 -0
- aimodelshare/containerization_templates/lambda_function.txt +40 -0
- aimodelshare/custom_approach/__init__.py +1 -0
- aimodelshare/custom_approach/lambda_function.py +17 -0
- aimodelshare/custom_eval_metrics.py +103 -0
- aimodelshare/data_sharing/__init__.py +0 -0
- aimodelshare/data_sharing/data_sharing_templates/Dockerfile.txt +3 -0
- aimodelshare/data_sharing/data_sharing_templates/__init__.py +1 -0
- aimodelshare/data_sharing/data_sharing_templates/buildspec.txt +15 -0
- aimodelshare/data_sharing/data_sharing_templates/codebuild_policies.txt +129 -0
- aimodelshare/data_sharing/data_sharing_templates/codebuild_trust_relationship.txt +12 -0
- aimodelshare/data_sharing/download_data.py +620 -0
- aimodelshare/data_sharing/share_data.py +373 -0
- aimodelshare/data_sharing/utils.py +8 -0
- aimodelshare/deploy_custom_lambda.py +246 -0
- aimodelshare/documentation/Makefile +20 -0
- aimodelshare/documentation/karma_sphinx_theme/__init__.py +28 -0
- aimodelshare/documentation/karma_sphinx_theme/_version.py +2 -0
- aimodelshare/documentation/karma_sphinx_theme/breadcrumbs.html +70 -0
- aimodelshare/documentation/karma_sphinx_theme/layout.html +172 -0
- aimodelshare/documentation/karma_sphinx_theme/search.html +50 -0
- aimodelshare/documentation/karma_sphinx_theme/searchbox.html +14 -0
- aimodelshare/documentation/karma_sphinx_theme/static/css/custom.css +2 -0
- aimodelshare/documentation/karma_sphinx_theme/static/css/custom.css.map +1 -0
- aimodelshare/documentation/karma_sphinx_theme/static/css/theme.css +2751 -0
- aimodelshare/documentation/karma_sphinx_theme/static/css/theme.css.map +1 -0
- aimodelshare/documentation/karma_sphinx_theme/static/css/theme.min.css +2 -0
- aimodelshare/documentation/karma_sphinx_theme/static/css/theme.min.css.map +1 -0
- aimodelshare/documentation/karma_sphinx_theme/static/font/fontello.eot +0 -0
- aimodelshare/documentation/karma_sphinx_theme/static/font/fontello.svg +32 -0
- aimodelshare/documentation/karma_sphinx_theme/static/font/fontello.ttf +0 -0
- aimodelshare/documentation/karma_sphinx_theme/static/font/fontello.woff +0 -0
- aimodelshare/documentation/karma_sphinx_theme/static/font/fontello.woff2 +0 -0
- aimodelshare/documentation/karma_sphinx_theme/static/js/theme.js +68 -0
- aimodelshare/documentation/karma_sphinx_theme/theme.conf +9 -0
- aimodelshare/documentation/make.bat +35 -0
- aimodelshare/documentation/requirements.txt +2 -0
- aimodelshare/documentation/source/about.rst +18 -0
- aimodelshare/documentation/source/advanced_features.rst +137 -0
- aimodelshare/documentation/source/competition.rst +218 -0
- aimodelshare/documentation/source/conf.py +58 -0
- aimodelshare/documentation/source/create_credentials.rst +86 -0
- aimodelshare/documentation/source/example_notebooks.rst +132 -0
- aimodelshare/documentation/source/functions.rst +151 -0
- aimodelshare/documentation/source/gettingstarted.rst +390 -0
- aimodelshare/documentation/source/images/creds1.png +0 -0
- aimodelshare/documentation/source/images/creds2.png +0 -0
- aimodelshare/documentation/source/images/creds3.png +0 -0
- aimodelshare/documentation/source/images/creds4.png +0 -0
- aimodelshare/documentation/source/images/creds5.png +0 -0
- aimodelshare/documentation/source/images/creds_file_example.png +0 -0
- aimodelshare/documentation/source/images/predict_tab.png +0 -0
- aimodelshare/documentation/source/index.rst +110 -0
- aimodelshare/documentation/source/modelplayground.rst +132 -0
- aimodelshare/exceptions.py +11 -0
- aimodelshare/generatemodelapi.py +1270 -0
- aimodelshare/iam/codebuild_policy.txt +129 -0
- aimodelshare/iam/codebuild_trust_relationship.txt +12 -0
- aimodelshare/iam/lambda_policy.txt +15 -0
- aimodelshare/iam/lambda_trust_relationship.txt +12 -0
- aimodelshare/json_templates/__init__.py +1 -0
- aimodelshare/json_templates/api_json.txt +155 -0
- aimodelshare/json_templates/auth/policy.txt +1 -0
- aimodelshare/json_templates/auth/role.txt +1 -0
- aimodelshare/json_templates/eval/policy.txt +1 -0
- aimodelshare/json_templates/eval/role.txt +1 -0
- aimodelshare/json_templates/function/policy.txt +1 -0
- aimodelshare/json_templates/function/role.txt +1 -0
- aimodelshare/json_templates/integration_response.txt +5 -0
- aimodelshare/json_templates/lambda_policy_1.txt +15 -0
- aimodelshare/json_templates/lambda_policy_2.txt +8 -0
- aimodelshare/json_templates/lambda_role_1.txt +12 -0
- aimodelshare/json_templates/lambda_role_2.txt +16 -0
- aimodelshare/leaderboard.py +174 -0
- aimodelshare/main/1.txt +132 -0
- aimodelshare/main/1B.txt +112 -0
- aimodelshare/main/2.txt +153 -0
- aimodelshare/main/3.txt +134 -0
- aimodelshare/main/4.txt +128 -0
- aimodelshare/main/5.txt +109 -0
- aimodelshare/main/6.txt +105 -0
- aimodelshare/main/7.txt +144 -0
- aimodelshare/main/8.txt +142 -0
- aimodelshare/main/__init__.py +1 -0
- aimodelshare/main/authorization.txt +275 -0
- aimodelshare/main/eval_classification.txt +79 -0
- aimodelshare/main/eval_lambda.txt +1709 -0
- aimodelshare/main/eval_regression.txt +80 -0
- aimodelshare/main/lambda_function.txt +8 -0
- aimodelshare/main/nst.txt +149 -0
- aimodelshare/model.py +1543 -0
- aimodelshare/modeluser.py +215 -0
- aimodelshare/moral_compass/README.md +408 -0
- aimodelshare/moral_compass/__init__.py +65 -0
- aimodelshare/moral_compass/_version.py +3 -0
- aimodelshare/moral_compass/api_client.py +601 -0
- aimodelshare/moral_compass/apps/__init__.py +69 -0
- aimodelshare/moral_compass/apps/ai_consequences.py +540 -0
- aimodelshare/moral_compass/apps/bias_detective.py +714 -0
- aimodelshare/moral_compass/apps/ethical_revelation.py +898 -0
- aimodelshare/moral_compass/apps/fairness_fixer.py +889 -0
- aimodelshare/moral_compass/apps/judge.py +888 -0
- aimodelshare/moral_compass/apps/justice_equity_upgrade.py +853 -0
- aimodelshare/moral_compass/apps/mc_integration_helpers.py +820 -0
- aimodelshare/moral_compass/apps/model_building_game.py +1104 -0
- aimodelshare/moral_compass/apps/model_building_game_beginner.py +687 -0
- aimodelshare/moral_compass/apps/moral_compass_challenge.py +858 -0
- aimodelshare/moral_compass/apps/session_auth.py +254 -0
- aimodelshare/moral_compass/apps/shared_activity_styles.css +349 -0
- aimodelshare/moral_compass/apps/tutorial.py +481 -0
- aimodelshare/moral_compass/apps/what_is_ai.py +853 -0
- aimodelshare/moral_compass/challenge.py +365 -0
- aimodelshare/moral_compass/config.py +187 -0
- aimodelshare/placeholders/model.onnx +0 -0
- aimodelshare/placeholders/preprocessor.zip +0 -0
- aimodelshare/playground.py +1968 -0
- aimodelshare/postprocessormodules.py +157 -0
- aimodelshare/preprocessormodules.py +373 -0
- aimodelshare/pyspark/1.txt +195 -0
- aimodelshare/pyspark/1B.txt +181 -0
- aimodelshare/pyspark/2.txt +220 -0
- aimodelshare/pyspark/3.txt +204 -0
- aimodelshare/pyspark/4.txt +187 -0
- aimodelshare/pyspark/5.txt +178 -0
- aimodelshare/pyspark/6.txt +174 -0
- aimodelshare/pyspark/7.txt +211 -0
- aimodelshare/pyspark/8.txt +206 -0
- aimodelshare/pyspark/__init__.py +1 -0
- aimodelshare/pyspark/authorization.txt +258 -0
- aimodelshare/pyspark/eval_classification.txt +79 -0
- aimodelshare/pyspark/eval_lambda.txt +1441 -0
- aimodelshare/pyspark/eval_regression.txt +80 -0
- aimodelshare/pyspark/lambda_function.txt +8 -0
- aimodelshare/pyspark/nst.txt +213 -0
- aimodelshare/python/my_preprocessor.py +58 -0
- aimodelshare/readme.md +26 -0
- aimodelshare/reproducibility.py +181 -0
- aimodelshare/sam/Dockerfile.txt +8 -0
- aimodelshare/sam/Dockerfile_PySpark.txt +24 -0
- aimodelshare/sam/__init__.py +1 -0
- aimodelshare/sam/buildspec.txt +11 -0
- aimodelshare/sam/codebuild_policies.txt +129 -0
- aimodelshare/sam/codebuild_trust_relationship.txt +12 -0
- aimodelshare/sam/codepipeline_policies.txt +173 -0
- aimodelshare/sam/codepipeline_trust_relationship.txt +12 -0
- aimodelshare/sam/spark-class.txt +2 -0
- aimodelshare/sam/template.txt +54 -0
- aimodelshare/tools.py +103 -0
- aimodelshare/utils/__init__.py +78 -0
- aimodelshare/utils/optional_deps.py +38 -0
- aimodelshare/utils.py +57 -0
- aimodelshare-0.3.7.dist-info/METADATA +298 -0
- aimodelshare-0.3.7.dist-info/RECORD +171 -0
- aimodelshare-0.3.7.dist-info/WHEEL +5 -0
- aimodelshare-0.3.7.dist-info/licenses/LICENSE +5 -0
- aimodelshare-0.3.7.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,853 @@
|
|
|
1
|
+
"""
|
|
2
|
+
What is AI - Gradio application for the Justice & Equity Challenge.
|
|
3
|
+
Updated with i18n support for English (en), Spanish (es), and Catalan (ca).
|
|
4
|
+
"""
|
|
5
|
+
import contextlib
|
|
6
|
+
import os
|
|
7
|
+
import gradio as gr
|
|
8
|
+
|
|
9
|
+
# -------------------------------------------------------------------------
|
|
10
|
+
# TRANSLATION CONFIGURATION
|
|
11
|
+
# -------------------------------------------------------------------------
|
|
12
|
+
|
|
13
|
+
TRANSLATIONS = {
|
|
14
|
+
"en": {
|
|
15
|
+
"title": "🤖 What is AI, Anyway?",
|
|
16
|
+
"intro_box": "Before you can build better AI systems, you need to understand what AI actually is.<br>Don't worry - we'll explain it in simple, everyday terms!",
|
|
17
|
+
"loading": "⏳ Loading...",
|
|
18
|
+
# Step 1
|
|
19
|
+
"s1_title": "🎯 A Simple Definition",
|
|
20
|
+
"s1_head": "Artificial Intelligence (AI) is just a fancy name for:",
|
|
21
|
+
"s1_big": "A system that makes predictions based on patterns",
|
|
22
|
+
"s1_sub": "That's it! Let's break down what that means...",
|
|
23
|
+
"s1_list_title": "Think About How YOU Make Predictions:",
|
|
24
|
+
"s1_li1": "<b>Weather:</b> Dark clouds → You predict rain → You bring an umbrella",
|
|
25
|
+
"s1_li2": "<b>Traffic:</b> Rush hour time → You predict congestion → You leave early",
|
|
26
|
+
"s1_li3": "<b>Movies:</b> Actor you like → You predict you'll enjoy it → You watch it",
|
|
27
|
+
"s1_highlight": "AI does the same thing, but using data and math instead of human experience and intuition.",
|
|
28
|
+
"btn_next_formula": "Next: The AI Formula ▶️",
|
|
29
|
+
# Step 2
|
|
30
|
+
"s2_title": "📐 The Three-Part Formula",
|
|
31
|
+
"s2_intro": "Every AI system works the same way, following this simple formula:",
|
|
32
|
+
"lbl_input": "INPUT",
|
|
33
|
+
"lbl_model": "MODEL",
|
|
34
|
+
"lbl_output": "OUTPUT",
|
|
35
|
+
"desc_input": "Data goes in",
|
|
36
|
+
"desc_model": "AI processes it",
|
|
37
|
+
"desc_output": "Prediction comes out",
|
|
38
|
+
"s2_ex_title": "Real-World Examples:",
|
|
39
|
+
"s2_ex1_in": "Photo of a dog",
|
|
40
|
+
"s2_ex1_mod": "Image recognition AI",
|
|
41
|
+
"s2_ex1_out": "\"This is a Golden Retriever\"",
|
|
42
|
+
"s2_ex2_in": "\"How's the weather?\"",
|
|
43
|
+
"s2_ex2_mod": "Language AI (like ChatGPT)",
|
|
44
|
+
"s2_ex2_out": "A helpful response",
|
|
45
|
+
"s2_ex3_in": "Person's criminal history",
|
|
46
|
+
"s2_ex3_mod": "Risk assessment AI",
|
|
47
|
+
"s2_ex3_out": "\"High Risk\" or \"Low Risk\"",
|
|
48
|
+
"btn_back": "◀️ Back",
|
|
49
|
+
"btn_next_learn": "Next: How Models Learn ▶️",
|
|
50
|
+
# Step 3
|
|
51
|
+
"s3_title": "🧠 How Does an AI Model Learn?",
|
|
52
|
+
"s3_h1": "1. It Learns from Examples",
|
|
53
|
+
"s3_p1": "An AI model isn't programmed with answers. Instead, it's trained on a huge number of examples, and it learns how to find the answers on its own.",
|
|
54
|
+
"s3_p2": "In our justice scenario, this means feeding the model thousands of past cases (<b>examples</b>) to teach it how to find the <b>patterns</b> that connect a person's details to their criminal risk.",
|
|
55
|
+
"s3_h2": "2. The Training Process",
|
|
56
|
+
"s3_p3": "The AI \"trains\" by looping through historical data (past cases) millions of times:",
|
|
57
|
+
"flow_1": "1. INPUT<br>EXAMPLES",
|
|
58
|
+
"flow_2": "2. MODEL<br>GUESSES",
|
|
59
|
+
"flow_3": "3. CHECK<br>ANSWER",
|
|
60
|
+
"flow_4": "4. ADJUST<br>WEIGHTS",
|
|
61
|
+
"flow_5": "LEARNED<br>MODEL",
|
|
62
|
+
"s3_p4": "During the <b>\"Adjust\"</b> step, the model changes its internal rules (called <b>\"weights\"</b>) to get closer to the right answer. For example, it learns <b>how much</b> \"prior offenses\" should matter more than \"age\".",
|
|
63
|
+
"s3_eth_title": "⚠️ The Ethical Challenge",
|
|
64
|
+
"s3_eth_p": "<b>Here's the critical problem:</b> The model *only* learns from the data. If the historical data is biased (e.g., certain groups were arrested more often), the model will learn those biased patterns.<br><br><b>The model doesn't know \"fairness\" or \"justice,\" it only knows patterns.</b>",
|
|
65
|
+
"btn_next_try": "Next: Try It Yourself ▶️",
|
|
66
|
+
# Step 4 (Interactive)
|
|
67
|
+
"s4_title": "🎮 Try It Yourself!",
|
|
68
|
+
"s4_intro": "<b>Let's use a simple AI model to predict criminal risk.</b><br>Adjust the inputs below and see how the model's prediction changes!",
|
|
69
|
+
"s4_sect1": "1️⃣ INPUT: Adjust the Data",
|
|
70
|
+
"lbl_age": "Age",
|
|
71
|
+
"info_age": "Defendant's age",
|
|
72
|
+
"lbl_priors": "Prior Offenses",
|
|
73
|
+
"info_priors": "Number of previous crimes",
|
|
74
|
+
"lbl_severity": "Current Charge Severity",
|
|
75
|
+
"info_severity": "How serious is the current charge?",
|
|
76
|
+
"opt_minor": "Minor",
|
|
77
|
+
"opt_moderate": "Moderate",
|
|
78
|
+
"opt_serious": "Serious",
|
|
79
|
+
"s4_sect2": "2️⃣ MODEL: Process the Data",
|
|
80
|
+
"btn_run": "🔮 Run AI Prediction",
|
|
81
|
+
"s4_sect3": "3️⃣ OUTPUT: See the Prediction",
|
|
82
|
+
"res_placeholder": "Click \"Run AI Prediction\" above to see the result",
|
|
83
|
+
"s4_highlight": "<b>What You Just Did:</b><br><br>You used a very simple AI model! You provided <b style='color:#0369a1;'>input data</b> (age, priors, severity), the <b style='color:#92400e;'>model processed it</b> using rules and patterns, and it produced an <b style='color:#15803d;'>output prediction</b>.<br><br>Real AI models are more complex, but they work on the same principle!",
|
|
84
|
+
"btn_next_conn": "Next: Connection to Justice ▶️",
|
|
85
|
+
# Step 5
|
|
86
|
+
"s5_title": "🔗 Connecting to Criminal Justice",
|
|
87
|
+
"s5_p1": "<b>Remember the risk prediction you used earlier as a judge?</b>",
|
|
88
|
+
"s5_p2": "That was a real-world example of AI in action:",
|
|
89
|
+
"s5_in_desc": "• Age, race, gender, prior offenses, charge details",
|
|
90
|
+
"s5_mod_desc1": "• Trained on historical criminal justice data",
|
|
91
|
+
"s5_mod_desc2": "• Looks for patterns in who re-offended in the past",
|
|
92
|
+
"s5_out_desc": "• \"High Risk\", \"Medium Risk\", or \"Low Risk\"",
|
|
93
|
+
"s5_h2": "Why This Matters for Ethics:",
|
|
94
|
+
"s5_li1": "The <b>input data</b> might contain historical biases",
|
|
95
|
+
"s5_li2": "The <b>model</b> learns patterns from potentially unfair past decisions",
|
|
96
|
+
"s5_li3": "The <b>output predictions</b> can perpetuate discrimination",
|
|
97
|
+
"s5_final": "<b>Understanding how AI works is the first step to building fairer systems.</b><br><br>Now that you know what AI is, you're ready to help design better models that are more ethical and less biased!",
|
|
98
|
+
"btn_complete": "Complete This Section ▶️",
|
|
99
|
+
# Step 6
|
|
100
|
+
"s6_title": "🎓 You Now Understand the Basics of AI!",
|
|
101
|
+
"s6_congrats": "<b>Congratulations!</b> You now know:",
|
|
102
|
+
"s6_li1": "What AI is (a prediction system)",
|
|
103
|
+
"s6_li2": "How it works (Input → Model → Output)",
|
|
104
|
+
"s6_li3": "How AI models learn from data",
|
|
105
|
+
"s6_li4": "Why it matters for criminal justice",
|
|
106
|
+
"s6_li5": "The ethical implications of AI decisions",
|
|
107
|
+
"s6_next": "<b>Next Steps:</b>",
|
|
108
|
+
"s6_next_desc": "In the following sections, you'll learn how to build and improve AI models to make them more fair and ethical.",
|
|
109
|
+
"s6_scroll": "👇 SCROLL DOWN 👇",
|
|
110
|
+
"s6_find": "Continue to the next section below.",
|
|
111
|
+
"btn_review": "◀️ Back to Review",
|
|
112
|
+
# Logic / Dynamic
|
|
113
|
+
"risk_high": "High Risk",
|
|
114
|
+
"risk_med": "Medium Risk",
|
|
115
|
+
"risk_low": "Low Risk",
|
|
116
|
+
"risk_score": "Risk Score:"
|
|
117
|
+
},
|
|
118
|
+
"es": {
|
|
119
|
+
"title": "🤖 ¿Qué es la IA, en realidad?",
|
|
120
|
+
"intro_box": "Antes de poder construir mejores sistemas de IA, necesitas entender qué es realmente la IA.<br>No te preocupes, ¡lo explicaremos en términos simples y cotidianos!",
|
|
121
|
+
"loading": "⏳ Cargando...",
|
|
122
|
+
# Step 1
|
|
123
|
+
"s1_title": "🎯 Una Definición Simple",
|
|
124
|
+
"s1_head": "Inteligencia Artificial (IA) es solo un nombre elegante para:",
|
|
125
|
+
"s1_big": "Un sistema que hace predicciones basadas en patrones",
|
|
126
|
+
"s1_sub": "¡Eso es todo! Desglosemos qué significa eso...",
|
|
127
|
+
"s1_list_title": "Piensa en cómo TÚ haces predicciones:",
|
|
128
|
+
"s1_li1": "<b>Clima:</b> Nubes oscuras → Predices lluvia → Llevas paraguas",
|
|
129
|
+
"s1_li2": "<b>Tráfico:</b> Hora pico → Predices congestión → Sales temprano",
|
|
130
|
+
"s1_li3": "<b>Cine:</b> Actor que te gusta → Predices que te gustará → La ves",
|
|
131
|
+
"s1_highlight": "La IA hace lo mismo, pero usando datos y matemáticas en lugar de experiencia humana e intuición.",
|
|
132
|
+
"btn_next_formula": "Siguiente: La Fórmula de la IA ▶️",
|
|
133
|
+
# Step 2
|
|
134
|
+
"s2_title": "📐 La Fórmula de Tres Partes",
|
|
135
|
+
"s2_intro": "Todo sistema de IA funciona de la misma manera, siguiendo esta fórmula simple:",
|
|
136
|
+
"lbl_input": "ENTRADA",
|
|
137
|
+
"lbl_model": "MODELO",
|
|
138
|
+
"lbl_output": "SALIDA",
|
|
139
|
+
"desc_input": "Entran datos",
|
|
140
|
+
"desc_model": "IA procesa",
|
|
141
|
+
"desc_output": "Sale predicción",
|
|
142
|
+
"s2_ex_title": "Ejemplos del Mundo Real:",
|
|
143
|
+
"s2_ex1_in": "Foto de un perro",
|
|
144
|
+
"s2_ex1_mod": "IA de reconocimiento de imagen",
|
|
145
|
+
"s2_ex1_out": "\"Esto es un Golden Retriever\"",
|
|
146
|
+
"s2_ex2_in": "\"¿Qué tal el clima?\"",
|
|
147
|
+
"s2_ex2_mod": "IA de lenguaje (como ChatGPT)",
|
|
148
|
+
"s2_ex2_out": "Una respuesta útil",
|
|
149
|
+
"s2_ex3_in": "Historial criminal de una persona",
|
|
150
|
+
"s2_ex3_mod": "IA de evaluación de riesgos",
|
|
151
|
+
"s2_ex3_out": "\"Alto Riesgo\" o \"Bajo Riesgo\"",
|
|
152
|
+
"btn_back": "◀️ Atrás",
|
|
153
|
+
"btn_next_learn": "Siguiente: Cómo Aprenden los Modelos ▶️",
|
|
154
|
+
# Step 3
|
|
155
|
+
"s3_title": "🧠 ¿Cómo Aprende un Modelo de IA?",
|
|
156
|
+
"s3_h1": "1. Aprende de Ejemplos",
|
|
157
|
+
"s3_p1": "Un modelo de IA no está programado con respuestas. En cambio, se entrena con una gran cantidad de ejemplos y aprende a encontrar las respuestas por sí mismo.",
|
|
158
|
+
"s3_p2": "En nuestro escenario de justicia, esto significa alimentar al modelo con miles de casos pasados (<b>ejemplos</b>) para enseñarle a encontrar los <b>patrones</b> que conectan los detalles de una persona con su riesgo criminal.",
|
|
159
|
+
"s3_h2": "2. El Proceso de Entrenamiento",
|
|
160
|
+
"s3_p3": "La IA \"entrena\" recorriendo datos históricos (casos pasados) millones de veces:",
|
|
161
|
+
"flow_1": "1. EJEMPLOS<br>ENTRADA",
|
|
162
|
+
"flow_2": "2. MODELO<br>ADIVINA",
|
|
163
|
+
"flow_3": "3. REVISAR<br>RESPUESTA",
|
|
164
|
+
"flow_4": "4. AJUSTAR<br>PESOS",
|
|
165
|
+
"flow_5": "MODELO<br>APRENDIDO",
|
|
166
|
+
"s3_p4": "Durante el paso de <b>\"Ajustar\"</b>, el modelo cambia sus reglas internas (llamadas <b>\"pesos\"</b>) para acercarse a la respuesta correcta. Por ejemplo, aprende <b>cuánto</b> deben importar más los \"delitos previos\" que la \"edad\".",
|
|
167
|
+
"s3_eth_title": "⚠️ El Desafío Ético",
|
|
168
|
+
"s3_eth_p": "<b>Aquí está el problema crítico:</b> El modelo *solo* aprende de los datos. Si los datos históricos están sesgados (por ejemplo, ciertos grupos fueron arrestados con más frecuencia), el modelo aprenderá esos patrones sesgados.<br><br><b>El modelo no conoce la \"equidad\" o la \"justicia\", solo conoce patrones.</b>",
|
|
169
|
+
"btn_next_try": "Siguiente: Pruébalo Tú Mismo ▶️",
|
|
170
|
+
# Step 4
|
|
171
|
+
"s4_title": "🎮 ¡Pruébalo Tú Mismo!",
|
|
172
|
+
"s4_intro": "<b>Usemos un modelo de IA simple para predecir el riesgo criminal.</b><br>¡Ajusta las entradas a continuación y ve cómo cambia la predicción del modelo!",
|
|
173
|
+
"s4_sect1": "1️⃣ ENTRADA: Ajusta los Datos",
|
|
174
|
+
"lbl_age": "Edad",
|
|
175
|
+
"info_age": "Edad del acusado",
|
|
176
|
+
"lbl_priors": "Delitos Previos",
|
|
177
|
+
"info_priors": "Número de crímenes anteriores",
|
|
178
|
+
"lbl_severity": "Gravedad del Cargo Actual",
|
|
179
|
+
"info_severity": "¿Qué tan grave es el cargo actual?",
|
|
180
|
+
"opt_minor": "Menor",
|
|
181
|
+
"opt_moderate": "Moderado",
|
|
182
|
+
"opt_serious": "Grave",
|
|
183
|
+
"s4_sect2": "2️⃣ MODELO: Procesa los Datos",
|
|
184
|
+
"btn_run": "🔮 Ejecutar Predicción IA",
|
|
185
|
+
"s4_sect3": "3️⃣ SALIDA: Ver la Predicción",
|
|
186
|
+
"res_placeholder": "Haz clic en \"Ejecutar Predicción IA\" arriba para ver el resultado",
|
|
187
|
+
"s4_highlight": "<b>Lo Que Acabas de Hacer:</b><br><br>¡Usaste un modelo de IA muy simple! Proporcionaste <b style='color:#0369a1;'>datos de entrada</b> (edad, delitos, gravedad), el <b style='color:#92400e;'>modelo los procesó</b> usando reglas y patrones, y produjo una <b style='color:#15803d;'>predicción de salida</b>.<br><br>¡Los modelos de IA reales son más complejos, pero funcionan bajo el mismo principio!",
|
|
188
|
+
"btn_next_conn": "Siguiente: Conexión con la Justicia ▶️",
|
|
189
|
+
# Step 5
|
|
190
|
+
"s5_title": "🔗 Conectando con la Justicia Penal",
|
|
191
|
+
"s5_p1": "<b>¿Recuerdas la predicción de riesgo que usaste antes como juez?</b>",
|
|
192
|
+
"s5_p2": "Ese fue un ejemplo del mundo real de IA en acción:",
|
|
193
|
+
"s5_in_desc": "• Edad, raza, género, delitos previos, detalles del cargo",
|
|
194
|
+
"s5_mod_desc1": "• Entrenado con datos históricos de justicia penal",
|
|
195
|
+
"s5_mod_desc2": "• Busca patrones en quién reincidió en el pasado",
|
|
196
|
+
"s5_out_desc": "• \"Alto Riesgo\", \"Riesgo Medio\" o \"Bajo Riesgo\"",
|
|
197
|
+
"s5_h2": "Por Qué Esto Importa para la Ética:",
|
|
198
|
+
"s5_li1": "Los <b>datos de entrada</b> pueden contener sesgos históricos",
|
|
199
|
+
"s5_li2": "El <b>modelo</b> aprende patrones de decisiones pasadas potencialmente injustas",
|
|
200
|
+
"s5_li3": "Las <b>predicciones de salida</b> pueden perpetuar la discriminación",
|
|
201
|
+
"s5_final": "<b>Entender cómo funciona la IA es el primer paso para construir sistemas más justos.</b><br><br>¡Ahora que sabes qué es la IA, estás listo para ayudar a diseñar mejores modelos que sean más éticos y menos sesgados!",
|
|
202
|
+
"btn_complete": "Completar esta Sección ▶️",
|
|
203
|
+
# Step 6
|
|
204
|
+
"s6_title": "🎓 ¡Ahora Entiendes Los Conceptos Básicos de La IA!",
|
|
205
|
+
"s6_congrats": "<b>¡Felicidades!</b> Ahora sabes:",
|
|
206
|
+
"s6_li1": "Qué es la IA (un sistema de predicción)",
|
|
207
|
+
"s6_li2": "Cómo funciona (Entrada → Modelo → Salida)",
|
|
208
|
+
"s6_li3": "Cómo aprenden los modelos de IA de los datos",
|
|
209
|
+
"s6_li4": "Por qué importa para la justicia penal",
|
|
210
|
+
"s6_li5": "Las implicaciones éticas de las decisiones de IA",
|
|
211
|
+
"s6_next": "<b>Próximos Pasos:</b>",
|
|
212
|
+
"s6_next_desc": "En las siguientes secciones, aprenderás cómo construir y mejorar modelos de IA para hacerlos más justos y éticos.",
|
|
213
|
+
"s6_scroll": "👇 DESPLÁZATE HACIA ABAJO 👇",
|
|
214
|
+
"s6_find": "Continúa en la siguiente sección abajo.",
|
|
215
|
+
"btn_review": "◀️ Volver a Revisar",
|
|
216
|
+
"risk_high": "Alto Riesgo",
|
|
217
|
+
"risk_med": "Riesgo Medio",
|
|
218
|
+
"risk_low": "Bajo Riesgo",
|
|
219
|
+
"risk_score": "Puntaje de Riesgo:"
|
|
220
|
+
},
|
|
221
|
+
"ca": {
|
|
222
|
+
"title": "🤖 Què és la IA, realment?",
|
|
223
|
+
"intro_box": "Abans de poder construir millors sistemes d'IA, necessites entendre què és realment la IA.<br>No et preocupis, ho explicarem en termes simples i quotidians!",
|
|
224
|
+
"loading": "⏳ Carregant...",
|
|
225
|
+
# Step 1
|
|
226
|
+
"s1_title": "🎯 Una Definició Simple",
|
|
227
|
+
"s1_head": "Intel·ligència Artificial (IA) és només un nom elegant per a:",
|
|
228
|
+
"s1_big": "Un sistema que fa prediccions basades en patrons",
|
|
229
|
+
"s1_sub": "Això és tot! Desglossem què significa això...",
|
|
230
|
+
"s1_list_title": "Pensa en com TU fas prediccions:",
|
|
231
|
+
"s1_li1": "<b>Temps:</b> Núvols foscos → Predius pluja → Portes paraigua",
|
|
232
|
+
"s1_li2": "<b>Trànsit:</b> Hora punta → Predius congestió → Surts d'hora",
|
|
233
|
+
"s1_li3": "<b>Cinema:</b> Actor que t'agrada → Predius que t'agradarà → La veus",
|
|
234
|
+
"s1_highlight": "La IA fa el mateix, però utilitzant dades i matemàtiques en lloc d'experiència humana i intuïció.",
|
|
235
|
+
"btn_next_formula": "Següent: La Fórmula de la IA ▶️",
|
|
236
|
+
# Step 2
|
|
237
|
+
"s2_title": "📐 La Fórmula de Tres Parts",
|
|
238
|
+
"s2_intro": "Tot sistema d'IA funciona de la mateixa manera, seguint aquesta fórmula simple:",
|
|
239
|
+
"lbl_input": "ENTRADA",
|
|
240
|
+
"lbl_model": "MODEL",
|
|
241
|
+
"lbl_output": "SORTIDA",
|
|
242
|
+
"desc_input": "Entren dades",
|
|
243
|
+
"desc_model": "IA processa",
|
|
244
|
+
"desc_output": "Surt predicció",
|
|
245
|
+
"s2_ex_title": "Exemples del Món Real:",
|
|
246
|
+
"s2_ex1_in": "Foto d'un gos",
|
|
247
|
+
"s2_ex1_mod": "IA de reconeixement d'imatge",
|
|
248
|
+
"s2_ex1_out": "\"Això és un Golden Retriever\"",
|
|
249
|
+
"s2_ex2_in": "\"Quin temps fa?\"",
|
|
250
|
+
"s2_ex2_mod": "IA de llenguatge (com ChatGPT)",
|
|
251
|
+
"s2_ex2_out": "Una resposta útil",
|
|
252
|
+
"s2_ex3_in": "Historial criminal d'una persona",
|
|
253
|
+
"s2_ex3_mod": "IA d'avaluació de riscos",
|
|
254
|
+
"s2_ex3_out": "\"Alt Risc\" o \"Baix Risc\"",
|
|
255
|
+
"btn_back": "◀️ Enrere",
|
|
256
|
+
"btn_next_learn": "Següent: Com Aprenen els Models ▶️",
|
|
257
|
+
# Step 3
|
|
258
|
+
"s3_title": "🧠 Com Aprèn un Model d'IA?",
|
|
259
|
+
"s3_h1": "1. Aprèn d'Exemples",
|
|
260
|
+
"s3_p1": "Un model d'IA no està programat amb respostes. En canvi, s'entrena amb una gran quantitat d'exemples i aprèn a trobar les respostes per si mateix.",
|
|
261
|
+
"s3_p2": "En el nostre escenari de justícia, això significa alimentar el model amb milers de casos passats (<b>exemples</b>) per ensenyar-li a trobar els <b>patrons</b> que connecten els detalls d'una persona amb el seu risc criminal.",
|
|
262
|
+
"s3_h2": "2. El Procés d'Entrenament",
|
|
263
|
+
"s3_p3": "La IA \"entrena\" recorrent dades històriques (casos passats) milions de vegades:",
|
|
264
|
+
"flow_1": "1. EXEMPLES<br>ENTRADA",
|
|
265
|
+
"flow_2": "2. MODEL<br>ENDEVINA",
|
|
266
|
+
"flow_3": "3. REVISAR<br>RESPOSTA",
|
|
267
|
+
"flow_4": "4. AJUSTAR<br>PESOS",
|
|
268
|
+
"flow_5": "MODEL<br>APRÈS",
|
|
269
|
+
"s3_p4": "Durant el pas d'<b>\"Ajustar\"</b>, el model canvia les seves regles internes (anomenades <b>\"pesos\"</b>) per apropar-se a la resposta correcta. Per exemple, aprèn <b>quant</b> han d'importar més els \"delictes previs\" que l'\"edat\".",
|
|
270
|
+
"s3_eth_title": "⚠️ El Desafiament Ètic",
|
|
271
|
+
"s3_eth_p": "<b>Aquí hi ha el problema crític:</b> El model *només* aprèn de les dades. Si les dades històriques estan esbiaixades (per exemple, certs grups van ser arrestats amb més freqüència), el model aprendrà aquests patrons esbiaixats.<br><br><b>El model no coneix l'\"equitat\" o la \"justícia\", només coneix patrons.</b>",
|
|
272
|
+
"btn_next_try": "Següent: Prova-ho Tu Mateix ▶️",
|
|
273
|
+
# Step 4
|
|
274
|
+
"s4_title": "🎮 Prova-ho Tu Mateix!",
|
|
275
|
+
"s4_intro": "<b>Utilitzem un model d'IA simple per predir el risc criminal.</b><br>Ajusta les entrades a continuació i veus com canvia la predicció del model!",
|
|
276
|
+
"s4_sect1": "1️⃣ ENTRADA: Ajusta les Dades",
|
|
277
|
+
"lbl_age": "Edat",
|
|
278
|
+
"info_age": "Edat de l'acusat",
|
|
279
|
+
"lbl_priors": "Delictes Previs",
|
|
280
|
+
"info_priors": "Nombre de crims anteriors",
|
|
281
|
+
"lbl_severity": "Gravetat del Càrrec Actual",
|
|
282
|
+
"info_severity": "Què tan greu és el càrrec actual?",
|
|
283
|
+
"opt_minor": "Menor",
|
|
284
|
+
"opt_moderate": "Moderat",
|
|
285
|
+
"opt_serious": "Greu",
|
|
286
|
+
"s4_sect2": "2️⃣ MODEL: Processa les Dades",
|
|
287
|
+
"btn_run": "🔮 Executar Predicció IA",
|
|
288
|
+
"s4_sect3": "3️⃣ SORTIDA: Veure la Predicció",
|
|
289
|
+
"res_placeholder": "Fes clic a \"Executar Predicció IA\" a dalt per veure el resultat",
|
|
290
|
+
"s4_highlight": "<b>El Que Acabes de Fer:</b><br><br>Has utilitzat un model d'IA molt simple! Has proporcionat <b style='color:#0369a1;'>dades d'entrada</b> (edat, delictes, gravetat), el <b style='color:#92400e;'>model les ha processat</b> utilitzant regles i patrons, i ha produït una <b style='color:#15803d;'>predicció de sortida</b>.<br><br>Els models d'IA reals són més complexos, però funcionen sota el mateix principi!",
|
|
291
|
+
"btn_next_conn": "Següent: Connexió amb la Justícia ▶️",
|
|
292
|
+
# Step 5
|
|
293
|
+
"s5_title": "🔗 Connectant amb la Justícia Penal",
|
|
294
|
+
"s5_p1": "<b>Recordes la predicció de risc que vas utilitzar abans com a jutge?</b>",
|
|
295
|
+
"s5_p2": "Aquest va ser un exemple del món real d'IA en acció:",
|
|
296
|
+
"s5_in_desc": "• Edat, raça, gènere, delictes previs, detalls del càrrec",
|
|
297
|
+
"s5_mod_desc1": "• Entrenat amb dades històriques de justícia penal",
|
|
298
|
+
"s5_mod_desc2": "• Busca patrons en qui va reincidir en el passat",
|
|
299
|
+
"s5_out_desc": "• \"Alt Risc\", \"Risc Mitjà\" o \"Baix Risc\"",
|
|
300
|
+
"s5_h2": "Per Què Això Importa per a l'Ètica:",
|
|
301
|
+
"s5_li1": "Les <b>dades d'entrada</b> poden contenir biaixos històrics",
|
|
302
|
+
"s5_li2": "El <b>model</b> aprèn patrons de decisions passades potencialment injustes",
|
|
303
|
+
"s5_li3": "Les <b>prediccions de sortida</b> poden perpetuar la discriminació",
|
|
304
|
+
"s5_final": "<b>Entendre com funciona la IA és el primer pas per construir sistemes més justos.</b><br><br>Ara que saps què és la IA, estàs llest per ajudar a dissenyar millors models que siguin més ètics i menys esbiaixats!",
|
|
305
|
+
"btn_complete": "Completar aquesta Secció ▶️",
|
|
306
|
+
# Step 6
|
|
307
|
+
"s6_title": "🎓 Ara Ja Entens els Conceptes Bàsics de La IA!",
|
|
308
|
+
"s6_congrats": "<b>Felicitats!</b> Ara saps:",
|
|
309
|
+
"s6_li1": "Què és la IA (un sistema de predicció)",
|
|
310
|
+
"s6_li2": "Com funciona (Entrada → Model → Sortida)",
|
|
311
|
+
"s6_li3": "Com aprenen els models d'IA de les dades",
|
|
312
|
+
"s6_li4": "Per què importa per a la justícia penal",
|
|
313
|
+
"s6_li5": "Les implicacions ètiques de les decisions d'IA",
|
|
314
|
+
"s6_next": "<b>Propers Passos:</b>",
|
|
315
|
+
"s6_next_desc": "En les següents seccions, aprendràs com construir i millorar models d'IA per fer-los més justos i ètics.",
|
|
316
|
+
"s6_scroll": "👇 DESPLAÇA'T CAP AVALL 👇",
|
|
317
|
+
"s6_find": "Continua a la següent secció a sota.",
|
|
318
|
+
"btn_review": "◀️ Tornar a Revisar",
|
|
319
|
+
"risk_high": "Alt Risc",
|
|
320
|
+
"risk_med": "Risc Mitjà",
|
|
321
|
+
"risk_low": "Baix Risc",
|
|
322
|
+
"risk_score": "Puntuació de Risc:"
|
|
323
|
+
}
|
|
324
|
+
}
|
|
325
|
+
|
|
326
|
+
|
|
327
|
+
def _create_simple_predictor():
|
|
328
|
+
"""Create a simple demonstration predictor for teaching purposes."""
|
|
329
|
+
|
|
330
|
+
# Helper for translation
|
|
331
|
+
def t(lang, key):
|
|
332
|
+
return TRANSLATIONS.get(lang, TRANSLATIONS["en"]).get(key, key)
|
|
333
|
+
|
|
334
|
+
def predict_outcome(age, priors, severity, lang="en"):
|
|
335
|
+
"""Simple rule-based predictor for demonstration."""
|
|
336
|
+
|
|
337
|
+
# Translate generic input to English for logic if needed (or map values)
|
|
338
|
+
# Assuming severity inputs come in as the localized string, we map them
|
|
339
|
+
severity_map = {
|
|
340
|
+
"Minor": 1, "Menor": 1,
|
|
341
|
+
"Moderate": 2, "Moderado": 2, "Moderat": 2,
|
|
342
|
+
"Serious": 3, "Grave": 3, "Greu": 3
|
|
343
|
+
}
|
|
344
|
+
|
|
345
|
+
score = 0
|
|
346
|
+
if age < 25: score += 3
|
|
347
|
+
elif age < 35: score += 2
|
|
348
|
+
else: score += 1
|
|
349
|
+
|
|
350
|
+
if priors >= 3: score += 3
|
|
351
|
+
elif priors >= 1: score += 2
|
|
352
|
+
else: score += 0
|
|
353
|
+
|
|
354
|
+
score += severity_map.get(severity, 2)
|
|
355
|
+
|
|
356
|
+
if score >= 7:
|
|
357
|
+
risk = t(lang, "risk_high")
|
|
358
|
+
color = "#dc2626"
|
|
359
|
+
emoji = "🔴"
|
|
360
|
+
elif score >= 4:
|
|
361
|
+
risk = t(lang, "risk_med")
|
|
362
|
+
color = "#f59e0b"
|
|
363
|
+
emoji = "🟡"
|
|
364
|
+
else:
|
|
365
|
+
risk = t(lang, "risk_low")
|
|
366
|
+
color = "#16a34a"
|
|
367
|
+
emoji = "🟢"
|
|
368
|
+
|
|
369
|
+
score_label = t(lang, "risk_score")
|
|
370
|
+
|
|
371
|
+
return f"""
|
|
372
|
+
<div class="prediction-card" style="border-color:{color};">
|
|
373
|
+
<h2 class="prediction-title" style="color:{color};">{emoji} {risk}</h2>
|
|
374
|
+
<p class="prediction-score">{score_label} {score}/9</p>
|
|
375
|
+
</div>
|
|
376
|
+
"""
|
|
377
|
+
|
|
378
|
+
return predict_outcome
|
|
379
|
+
|
|
380
|
+
|
|
381
|
+
def create_what_is_ai_app(theme_primary_hue: str = "indigo") -> "gr.Blocks":
|
|
382
|
+
"""Create the What is AI Gradio Blocks app."""
|
|
383
|
+
try:
|
|
384
|
+
import gradio as gr
|
|
385
|
+
gr.close_all(verbose=False)
|
|
386
|
+
except ImportError as e:
|
|
387
|
+
raise ImportError("Gradio is required.") from e
|
|
388
|
+
|
|
389
|
+
predict_outcome = _create_simple_predictor()
|
|
390
|
+
|
|
391
|
+
# --- Translation Helper ---
|
|
392
|
+
def t(lang, key):
|
|
393
|
+
return TRANSLATIONS.get(lang, TRANSLATIONS["en"]).get(key, key)
|
|
394
|
+
|
|
395
|
+
# --- HTML Generator Helpers ---
|
|
396
|
+
def _get_step1_html(lang):
|
|
397
|
+
return f"""
|
|
398
|
+
<div class='step-card step-card-soft-blue'>
|
|
399
|
+
<p><b style='font-size:24px;'>{t(lang, 's1_head')}</b></p>
|
|
400
|
+
<div class='inner-card inner-card-emphasis-blue'>
|
|
401
|
+
<h2 style='text-align:center; margin:0; font-size:2rem;'>
|
|
402
|
+
{t(lang, 's1_big')}
|
|
403
|
+
</h2>
|
|
404
|
+
</div>
|
|
405
|
+
<p>{t(lang, 's1_sub')}</p>
|
|
406
|
+
<h3 style='color:#0369a1; margin-top:24px;'>{t(lang, 's1_list_title')}</h3>
|
|
407
|
+
<ul style='font-size:19px; margin-top:12px;'>
|
|
408
|
+
<li>{t(lang, 's1_li1')}</li>
|
|
409
|
+
<li>{t(lang, 's1_li2')}</li>
|
|
410
|
+
<li>{t(lang, 's1_li3')}</li>
|
|
411
|
+
</ul>
|
|
412
|
+
<div class='highlight-soft' style='border-left:6px solid #f59e0b;'>
|
|
413
|
+
<p style='font-size:18px; margin:0;'>{t(lang, 's1_highlight')}</p>
|
|
414
|
+
</div>
|
|
415
|
+
</div>
|
|
416
|
+
"""
|
|
417
|
+
|
|
418
|
+
def _get_step2_html(lang):
|
|
419
|
+
return f"""
|
|
420
|
+
<div class='step-card step-card-green'>
|
|
421
|
+
<p>{t(lang, 's2_intro')}</p>
|
|
422
|
+
<div class='inner-card'>
|
|
423
|
+
<div class='io-chip-row'>
|
|
424
|
+
<div class='io-chip io-chip-input'>
|
|
425
|
+
<h3 class='io-step-label-input' style='margin:0;'>1️⃣ {t(lang, 'lbl_input')}</h3>
|
|
426
|
+
<p style='margin:8px 0 0 0; font-size:16px;'>{t(lang, 'desc_input')}</p>
|
|
427
|
+
</div>
|
|
428
|
+
<span class='io-arrow'>→</span>
|
|
429
|
+
<div class='io-chip io-chip-model'>
|
|
430
|
+
<h3 class='io-step-label-model' style='margin:0;'>2️⃣ {t(lang, 'lbl_model')}</h3>
|
|
431
|
+
<p style='margin:8px 0 0 0; font-size:16px;'>{t(lang, 'desc_model')}</p>
|
|
432
|
+
</div>
|
|
433
|
+
<span class='io-arrow'>→</span>
|
|
434
|
+
<div class='io-chip io-chip-output'>
|
|
435
|
+
<h3 class='io-step-label-output' style='margin:0;'>3️⃣ {t(lang, 'lbl_output')}</h3>
|
|
436
|
+
<p style='margin:8px 0 0 0; font-size:16px;'>{t(lang, 'desc_output')}</p>
|
|
437
|
+
</div>
|
|
438
|
+
</div>
|
|
439
|
+
</div>
|
|
440
|
+
<h3 style='color:#15803d; margin-top:32px;'>{t(lang, 's2_ex_title')}</h3>
|
|
441
|
+
<div class='inner-card-wide'>
|
|
442
|
+
<p style='margin:0; font-size:18px;'>
|
|
443
|
+
<b class='io-label-input'>{t(lang, 'lbl_input')}:</b> {t(lang, 's2_ex1_in')}<br>
|
|
444
|
+
<b class='io-label-model'>{t(lang, 'lbl_model')}:</b> {t(lang, 's2_ex1_mod')}<br>
|
|
445
|
+
<b class='io-label-output'>{t(lang, 'lbl_output')}:</b> {t(lang, 's2_ex1_out')}
|
|
446
|
+
</p>
|
|
447
|
+
</div>
|
|
448
|
+
<div class='inner-card-wide'>
|
|
449
|
+
<p style='margin:0; font-size:18px;'>
|
|
450
|
+
<b class='io-label-input'>{t(lang, 'lbl_input')}:</b> {t(lang, 's2_ex2_in')}<br>
|
|
451
|
+
<b class='io-label-model'>{t(lang, 'lbl_model')}:</b> {t(lang, 's2_ex2_mod')}<br>
|
|
452
|
+
<b class='io-label-output'>{t(lang, 'lbl_output')}:</b> {t(lang, 's2_ex2_out')}
|
|
453
|
+
</p>
|
|
454
|
+
</div>
|
|
455
|
+
<div class='inner-card-wide'>
|
|
456
|
+
<p style='margin:0; font-size:18px;'>
|
|
457
|
+
<b class='io-label-input'>{t(lang, 'lbl_input')}:</b> {t(lang, 's2_ex3_in')}<br>
|
|
458
|
+
<b class='io-label-model'>{t(lang, 'lbl_model')}:</b> {t(lang, 's2_ex3_mod')}<br>
|
|
459
|
+
<b class='io-label-output'>{t(lang, 'lbl_output')}:</b> {t(lang, 's2_ex3_out')}
|
|
460
|
+
</p>
|
|
461
|
+
</div>
|
|
462
|
+
</div>
|
|
463
|
+
"""
|
|
464
|
+
|
|
465
|
+
def _get_step3_html(lang):
|
|
466
|
+
return f"""
|
|
467
|
+
<div class='step-card step-card-amber'>
|
|
468
|
+
<h3 style='color:#92400e; margin-top:0;'>{t(lang, 's3_h1')}</h3>
|
|
469
|
+
<p>{t(lang, 's3_p1')}</p>
|
|
470
|
+
<p>{t(lang, 's3_p2')}</p>
|
|
471
|
+
<hr style='margin:24px 0;'>
|
|
472
|
+
<h3 style='color:#92400e;'>{t(lang, 's3_h2')}</h3>
|
|
473
|
+
<p>{t(lang, 's3_p3')}</p>
|
|
474
|
+
<div class='inner-card'>
|
|
475
|
+
<div style='display:flex; align-items:center; justify-content:space-between; flex-wrap:wrap;'>
|
|
476
|
+
<div style='background:#dbeafe; padding:12px 16px; border-radius:8px; margin:8px; flex:1; min-width:140px; text-align:center;'>
|
|
477
|
+
<b style='color:#0369a1;'>{t(lang, 'flow_1')}</b>
|
|
478
|
+
</div>
|
|
479
|
+
<div style='font-size:1.5rem; margin:0 8px; color:#6b7280;'>→</div>
|
|
480
|
+
<div style='background:#fef3c7; padding:12px 16px; border-radius:8px; margin:8px; flex:1; min-width:140px; text-align:center;'>
|
|
481
|
+
<b style='color:#92400e;'>{t(lang, 'flow_2')}</b>
|
|
482
|
+
</div>
|
|
483
|
+
<div style='font-size:1.5rem; margin:0 8px; color:#6b7280;'>→</div>
|
|
484
|
+
<div style='background:#fef3c7; padding:12px 16px; border-radius:8px; margin:8px; flex:1; min-width:140px; text-align:center;'>
|
|
485
|
+
<b style='color:#92400e;'>{t(lang, 'flow_3')}</b>
|
|
486
|
+
</div>
|
|
487
|
+
<div style='font-size:1.5rem; margin:0 8px; color:#6b7280;'>→</div>
|
|
488
|
+
<div style='background:#fef3c7; padding:12px 16px; border-radius:8px; margin:8px; flex:1; min-width:140px; text-align:center;'>
|
|
489
|
+
<b style='color:#92400e;'>{t(lang, 'flow_4')}</b>
|
|
490
|
+
</div>
|
|
491
|
+
<div style='font-size:1.5rem; margin:0 8px; color:#6b7280;'>→</div>
|
|
492
|
+
<div style='background:#f0fdf4; padding:12px 16px; border-radius:8px; margin:8px; flex:1; min-width:140px; text-align:center;'>
|
|
493
|
+
<b style='color:#15803d;'>{t(lang, 'flow_5')}</b>
|
|
494
|
+
</div>
|
|
495
|
+
</div>
|
|
496
|
+
</div>
|
|
497
|
+
<p style='margin-top:20px;'>{t(lang, 's3_p4')}</p>
|
|
498
|
+
<hr style='margin:24px 0;'>
|
|
499
|
+
<h3 style='color:#dc2626;'>{t(lang, 's3_eth_title')}</h3>
|
|
500
|
+
<div class='keypoint-box'>
|
|
501
|
+
<p style='margin:0;'>{t(lang, 's3_eth_p')}</p>
|
|
502
|
+
</div>
|
|
503
|
+
</div>
|
|
504
|
+
"""
|
|
505
|
+
|
|
506
|
+
def _get_step4_intro_html(lang):
|
|
507
|
+
return f"""
|
|
508
|
+
<div class='step-card step-card-amber' style='text-align:center; font-size:18px;'>
|
|
509
|
+
<p style='margin:0;'>{t(lang, 's4_intro')}</p>
|
|
510
|
+
</div>
|
|
511
|
+
"""
|
|
512
|
+
|
|
513
|
+
def _get_step4_highlight_html(lang):
|
|
514
|
+
return f"""
|
|
515
|
+
<div class='highlight-soft'>
|
|
516
|
+
{t(lang, 's4_highlight')}
|
|
517
|
+
</div>
|
|
518
|
+
"""
|
|
519
|
+
|
|
520
|
+
def _get_step5_html(lang):
|
|
521
|
+
return f"""
|
|
522
|
+
<div class='step-card step-card-purple'>
|
|
523
|
+
<p><b>{t(lang, 's5_p1')}</b></p>
|
|
524
|
+
<p style='margin-top:20px;'>{t(lang, 's5_p2')}</p>
|
|
525
|
+
<div class='inner-card inner-card-emphasis-blue' style='border-color:#9333ea;'>
|
|
526
|
+
<p style='font-size:18px; margin-bottom:16px;'>
|
|
527
|
+
<b class='io-label-input'>{t(lang, 'lbl_input')}:</b> {t(lang, 'info_age')}, ...<br>
|
|
528
|
+
<span style='margin-left:24px; color:#6b7280;'>{t(lang, 's5_in_desc')}</span>
|
|
529
|
+
</p>
|
|
530
|
+
<p style='font-size:18px; margin:16px 0;'>
|
|
531
|
+
<b class='io-label-model'>{t(lang, 'lbl_model')}:</b> {t(lang, 's2_ex3_mod')}<br>
|
|
532
|
+
<span style='margin-left:24px; color:#6b7280;'>{t(lang, 's5_mod_desc1')}</span><br>
|
|
533
|
+
<span style='margin-left:24px; color:#6b7280;'>{t(lang, 's5_mod_desc2')}</span>
|
|
534
|
+
</p>
|
|
535
|
+
<p style='font-size:18px; margin-top:16px; margin-bottom:0;'>
|
|
536
|
+
<b class='io-label-output'>{t(lang, 'lbl_output')}:</b> {t(lang, 's2_ex3_out')}<br>
|
|
537
|
+
<span style='margin-left:24px; color:#6b7280;'>{t(lang, 's5_out_desc')}</span>
|
|
538
|
+
</p>
|
|
539
|
+
</div>
|
|
540
|
+
<h3 style='color:#7e22ce; margin-top:32px;'>{t(lang, 's5_h2')}</h3>
|
|
541
|
+
<div class='keypoint-box'>
|
|
542
|
+
<ul style='font-size:18px; margin:8px 0;'>
|
|
543
|
+
<li>{t(lang, 's5_li1')}</li>
|
|
544
|
+
<li>{t(lang, 's5_li2')}</li>
|
|
545
|
+
<li>{t(lang, 's5_li3')}</li>
|
|
546
|
+
</ul>
|
|
547
|
+
</div>
|
|
548
|
+
<div class='highlight-soft' style='margin-top:24px;'>
|
|
549
|
+
<p style='font-size:18px; margin:0;'>{t(lang, 's5_final')}</p>
|
|
550
|
+
</div>
|
|
551
|
+
</div>
|
|
552
|
+
"""
|
|
553
|
+
|
|
554
|
+
def _get_step6_html(lang):
|
|
555
|
+
return f"""
|
|
556
|
+
<div style='text-align:center;'>
|
|
557
|
+
<h2 style='font-size: 2.5rem;'>{t(lang, 's6_title')}</h2>
|
|
558
|
+
<div class='completion-box'>
|
|
559
|
+
<p>{t(lang, 's6_congrats')}</p>
|
|
560
|
+
<ul style='font-size:1.1rem; text-align:left; max-width:600px; margin:20px auto;'>
|
|
561
|
+
<li>{t(lang, 's6_li1')}</li>
|
|
562
|
+
<li>{t(lang, 's6_li2')}</li>
|
|
563
|
+
<li>{t(lang, 's6_li3')}</li>
|
|
564
|
+
<li>{t(lang, 's6_li4')}</li>
|
|
565
|
+
<li>{t(lang, 's6_li5')}</li>
|
|
566
|
+
</ul>
|
|
567
|
+
<p style='margin-top:32px;'><b>{t(lang, 's6_next')}</b></p>
|
|
568
|
+
<p>{t(lang, 's6_next_desc')}</p>
|
|
569
|
+
<h1 style='margin:20px 0; font-size: 3rem;'>{t(lang, 's6_scroll')}</h1>
|
|
570
|
+
<p style='font-size:1.1rem;'>{t(lang, 's6_find')}</p>
|
|
571
|
+
</div>
|
|
572
|
+
</div>
|
|
573
|
+
"""
|
|
574
|
+
|
|
575
|
+
# --- CSS (Standard) ---
|
|
576
|
+
css = """
|
|
577
|
+
/* (All original CSS classes kept intact) */
|
|
578
|
+
.large-text { font-size: 20px !important; }
|
|
579
|
+
.loading-title { font-size: 2rem; color: var(--secondary-text-color); }
|
|
580
|
+
.io-step-label-input, .io-label-input { color: #0369a1; font-weight: 700; }
|
|
581
|
+
.io-step-label-model, .io-label-model { color: #92400e; font-weight: 700; }
|
|
582
|
+
.io-step-label-output, .io-label-output { color: #15803d; font-weight: 700; }
|
|
583
|
+
.io-chip-row { text-align: center; }
|
|
584
|
+
.io-chip { display: inline-block; padding: 16px 24px; border-radius: 8px; margin: 8px; background-color: color-mix(in srgb, var(--block-background-fill) 60%, #ffffff 40%); }
|
|
585
|
+
.io-chip-input { background-color: color-mix(in srgb, #dbeafe 75%, var(--block-background-fill) 25%); }
|
|
586
|
+
.io-chip-model { background-color: color-mix(in srgb, #fef3c7 75%, var(--block-background-fill) 25%); }
|
|
587
|
+
.io-chip-output { background-color: color-mix(in srgb, #dcfce7 75%, var(--block-background-fill) 25%); }
|
|
588
|
+
.io-arrow { display: inline-block; font-size: 2rem; margin: 0 16px; color: var(--secondary-text-color); vertical-align: middle; }
|
|
589
|
+
.ai-intro-box { text-align: center; font-size: 18px; max-width: 900px; margin: auto; padding: 20px; border-radius: 12px; background-color: var(--block-background-fill); color: var(--body-text-color); border: 2px solid #6366f1; box-shadow: 0 5px 15px rgba(0, 0, 0, 0.08); }
|
|
590
|
+
.step-card { font-size: 20px; padding: 28px; border-radius: 16px; background-color: var(--block-background-fill); color: var(--body-text-color); border: 1px solid var(--border-color-primary); box-shadow: 0 4px 12px rgba(0, 0, 0, 0.06); }
|
|
591
|
+
.step-card-soft-blue { border-width: 2px; border-color: #6366f1; }
|
|
592
|
+
.step-card-green { border-width: 2px; border-color: #16a34a; }
|
|
593
|
+
.step-card-amber { border-width: 2px; border-color: #f59e0b; }
|
|
594
|
+
.step-card-purple { border-width: 2px; border-color: #9333ea; }
|
|
595
|
+
.inner-card { background-color: var(--body-background-fill); color: var(--body-text-color); padding: 24px; border-radius: 12px; margin: 24px 0; border: 1px solid var(--border-color-primary); }
|
|
596
|
+
.inner-card-emphasis-blue { border-width: 3px; border-color: #0284c7; }
|
|
597
|
+
.inner-card-wide { background-color: var(--body-background-fill); color: var(--body-text-color); padding: 20px; border-radius: 8px; margin: 16px 0; border: 1px solid var(--border-color-primary); }
|
|
598
|
+
.keypoint-box { background-color: var(--block-background-fill); color: var(--body-text-color); padding: 24px; border-radius: 12px; margin-top: 20px; border-left: 6px solid #dc2626; }
|
|
599
|
+
.highlight-soft { background-color: var(--block-background-fill); color: var(--body-text-color); padding: 20px; border-radius: 12px; font-size: 18px; border: 1px solid var(--border-color-primary); }
|
|
600
|
+
.completion-box { font-size: 1.3rem; padding: 28px; border-radius: 16px; background-color: var(--block-background-fill); color: var(--body-text-color); border: 2px solid #0284c7; box-shadow: 0 5px 15px rgba(0, 0, 0, 0.08); }
|
|
601
|
+
.prediction-card { background-color: var(--block-background-fill); color: var(--body-text-color); padding: 24px; border-radius: 12px; border: 3px solid var(--border-color-primary); text-align: center; box-shadow: 0 4px 12px rgba(0, 0, 0, 0.08); }
|
|
602
|
+
.prediction-title { margin: 0; font-size: 2.5rem; }
|
|
603
|
+
.prediction-score { font-size: 18px; margin-top: 12px; color: var(--secondary-text-color); }
|
|
604
|
+
.prediction-placeholder { background-color: var(--block-background-fill); color: var(--secondary-text-color); padding: 40px; border-radius: 12px; text-align: center; border: 1px solid var(--border-color-primary); }
|
|
605
|
+
#nav-loading-overlay { position: fixed; top: 0; left: 0; width: 100%; height: 100%; background: color-mix(in srgb, var(--body-background-fill) 95%, transparent); z-index: 9999; display: none; flex-direction: column; align-items: center; justify-content: center; opacity: 0; transition: opacity 0.3s ease; }
|
|
606
|
+
.nav-spinner { width: 50px; height: 50px; border: 5px solid var(--border-color-primary); border-top: 5px solid var(--color-accent); border-radius: 50%; animation: nav-spin 1s linear infinite; margin-bottom: 20px; }
|
|
607
|
+
@keyframes nav-spin { 0% { transform: rotate(0deg); } 100% { transform: rotate(360deg); } }
|
|
608
|
+
#nav-loading-text { font-size: 1.3rem; font-weight: 600; color: var(--color-accent); }
|
|
609
|
+
@media (prefers-color-scheme: dark) { .ai-intro-box, .step-card, .inner-card, .inner-card-wide, .keypoint-box, .highlight-soft, .completion-box, .prediction-card, .prediction-placeholder { background-color: #2D323E; color: white; border-color: #555555; box-shadow: none; } .inner-card, .inner-card-wide { background-color: #181B22; } #nav-loading-overlay { background: rgba(15, 23, 42, 0.9); } .nav-spinner { border-color: rgba(148, 163, 184, 0.4); border-top-color: var(--color-accent); } .io-chip-input { background-color: color-mix(in srgb, #1d4ed8 35%, #020617 65%); } .io-chip-model { background-color: color-mix(in srgb, #b45309 40%, #020617 60%); } .io-chip-output { background-color: color-mix(in srgb, #15803d 40%, #020617 60%); } .io-arrow { color: #e5e7eb; } }
|
|
610
|
+
"""
|
|
611
|
+
|
|
612
|
+
with gr.Blocks(theme=gr.themes.Soft(primary_hue=theme_primary_hue), css=css) as demo:
|
|
613
|
+
lang_state = gr.State("en")
|
|
614
|
+
|
|
615
|
+
gr.HTML("<div id='app_top_anchor' style='height:0;'></div>")
|
|
616
|
+
gr.HTML("<div id='nav-loading-overlay'><div class='nav-spinner'></div><span id='nav-loading-text'>Loading...</span></div>")
|
|
617
|
+
|
|
618
|
+
# --- Variables for dynamic updating ---
|
|
619
|
+
c_title = gr.Markdown("<h1 style='text-align:center;'>🤖 What is AI, Anyway?</h1>")
|
|
620
|
+
c_intro = gr.HTML(f"<div class='ai-intro-box'>{t('en', 'intro_box')}</div>")
|
|
621
|
+
gr.HTML("<hr style='margin:24px 0;'>")
|
|
622
|
+
|
|
623
|
+
with gr.Column(visible=False) as loading_screen:
|
|
624
|
+
c_load = gr.Markdown(f"<div style='text-align:center; padding: 100px 0;'><h2 class='loading-title'>{t('en', 'loading')}</h2></div>")
|
|
625
|
+
|
|
626
|
+
# Step 1
|
|
627
|
+
with gr.Column(visible=True, elem_id="step-1") as step_1:
|
|
628
|
+
c_s1_title = gr.Markdown(f"<h2 style='text-align:center;'>{t('en', 's1_title')}</h2>")
|
|
629
|
+
c_s1_html = gr.HTML(_get_step1_html("en"))
|
|
630
|
+
step_1_next = gr.Button(t('en', 'btn_next_formula'), variant="primary", size="lg")
|
|
631
|
+
|
|
632
|
+
# Step 2
|
|
633
|
+
with gr.Column(visible=False, elem_id="step-2") as step_2:
|
|
634
|
+
c_s2_title = gr.Markdown(f"<h2 style='text-align:center;'>{t('en', 's2_title')}</h2>")
|
|
635
|
+
c_s2_html = gr.HTML(_get_step2_html("en"))
|
|
636
|
+
with gr.Row():
|
|
637
|
+
step_2_back = gr.Button(t('en', 'btn_back'), size="lg")
|
|
638
|
+
step_2_next = gr.Button(t('en', 'btn_next_learn'), variant="primary", size="lg")
|
|
639
|
+
|
|
640
|
+
# Step 3
|
|
641
|
+
with gr.Column(visible=False, elem_id="step-3") as step_3:
|
|
642
|
+
c_s3_title = gr.Markdown(f"<h2 style='text-align:center;'>{t('en', 's3_title')}</h2>")
|
|
643
|
+
c_s3_html = gr.HTML(_get_step3_html("en"))
|
|
644
|
+
with gr.Row():
|
|
645
|
+
step_3_back = gr.Button(t('en', 'btn_back'), size="lg")
|
|
646
|
+
step_3_next = gr.Button(t('en', 'btn_next_try'), variant="primary", size="lg")
|
|
647
|
+
|
|
648
|
+
# Step 4 (Interactive)
|
|
649
|
+
with gr.Column(visible=False, elem_id="step-4") as step_4:
|
|
650
|
+
c_s4_title = gr.Markdown(f"<h2 style='text-align:center;'>{t('en', 's4_title')}</h2>")
|
|
651
|
+
c_s4_intro = gr.HTML(_get_step4_intro_html("en"))
|
|
652
|
+
gr.HTML("<br>")
|
|
653
|
+
|
|
654
|
+
c_s4_sect1 = gr.Markdown(f"<h3 style='text-align:center; color:#0369a1;'>{t('en', 's4_sect1')}</h3>")
|
|
655
|
+
with gr.Row():
|
|
656
|
+
age_slider = gr.Slider(minimum=18, maximum=65, value=25, step=1, label=t('en', 'lbl_age'), info=t('en', 'info_age'))
|
|
657
|
+
priors_slider = gr.Slider(minimum=0, maximum=10, value=2, step=1, label=t('en', 'lbl_priors'), info=t('en', 'info_priors'))
|
|
658
|
+
severity_dropdown = gr.Dropdown(choices=["Minor", "Moderate", "Serious"], value="Moderate", label=t('en', 'lbl_severity'), info=t('en', 'info_severity'))
|
|
659
|
+
|
|
660
|
+
gr.HTML("<hr style='margin:24px 0;'>")
|
|
661
|
+
c_s4_sect2 = gr.Markdown(f"<h3 style='text-align:center; color:#92400e;'>{t('en', 's4_sect2')}</h3>")
|
|
662
|
+
predict_btn = gr.Button(t('en', 'btn_run'), variant="primary", size="lg")
|
|
663
|
+
|
|
664
|
+
gr.HTML("<hr style='margin:24px 0;'>")
|
|
665
|
+
c_s4_sect3 = gr.Markdown(f"<h3 style='text-align:center; color:#15803d;'>{t('en', 's4_sect3')}</h3>")
|
|
666
|
+
|
|
667
|
+
prediction_output = gr.HTML(f"<div class='prediction-placeholder'><p style='font-size:18px; margin:0;'>{t('en', 'res_placeholder')}</p></div>")
|
|
668
|
+
|
|
669
|
+
gr.HTML("<hr style='margin:24px 0;'>")
|
|
670
|
+
c_s4_highlight = gr.HTML(_get_step4_highlight_html("en"))
|
|
671
|
+
|
|
672
|
+
with gr.Row():
|
|
673
|
+
step_4_back = gr.Button(t('en', 'btn_back'), size="lg")
|
|
674
|
+
step_4_next = gr.Button(t('en', 'btn_next_conn'), variant="primary", size="lg")
|
|
675
|
+
|
|
676
|
+
# Step 5
|
|
677
|
+
with gr.Column(visible=False, elem_id="step-5") as step_5:
|
|
678
|
+
c_s5_title = gr.Markdown(f"<h2 style='text-align:center;'>{t('en', 's5_title')}</h2>")
|
|
679
|
+
c_s5_html = gr.HTML(_get_step5_html("en"))
|
|
680
|
+
with gr.Row():
|
|
681
|
+
step_5_back = gr.Button(t('en', 'btn_back'), size="lg")
|
|
682
|
+
step_5_next = gr.Button(t('en', 'btn_complete'), variant="primary", size="lg")
|
|
683
|
+
|
|
684
|
+
# Step 6
|
|
685
|
+
with gr.Column(visible=False, elem_id="step-6") as step_6:
|
|
686
|
+
c_s6_html = gr.HTML(_get_step6_html("en"))
|
|
687
|
+
back_to_connection_btn = gr.Button(t('en', 'btn_review'))
|
|
688
|
+
|
|
689
|
+
# --- Update Logic ---
|
|
690
|
+
|
|
691
|
+
def update_language(request: gr.Request):
|
|
692
|
+
params = request.query_params
|
|
693
|
+
lang = params.get("lang", "en")
|
|
694
|
+
if lang not in TRANSLATIONS: lang = "en"
|
|
695
|
+
|
|
696
|
+
# Helper to access options for Dropdown updates
|
|
697
|
+
def get_opt(k): return t(lang, k)
|
|
698
|
+
|
|
699
|
+
return [
|
|
700
|
+
lang, # state
|
|
701
|
+
f"<h1 style='text-align:center;'>{t(lang, 'title')}</h1>",
|
|
702
|
+
f"<div class='ai-intro-box'>{t(lang, 'intro_box')}</div>",
|
|
703
|
+
f"<div style='text-align:center; padding: 100px 0;'><h2 class='loading-title'>{t(lang, 'loading')}</h2></div>",
|
|
704
|
+
|
|
705
|
+
# Step 1
|
|
706
|
+
f"<h2 style='text-align:center;'>{t(lang, 's1_title')}</h2>",
|
|
707
|
+
_get_step1_html(lang),
|
|
708
|
+
gr.Button(value=t(lang, 'btn_next_formula')),
|
|
709
|
+
|
|
710
|
+
# Step 2
|
|
711
|
+
f"<h2 style='text-align:center;'>{t(lang, 's2_title')}</h2>",
|
|
712
|
+
_get_step2_html(lang),
|
|
713
|
+
gr.Button(value=t(lang, 'btn_back')),
|
|
714
|
+
gr.Button(value=t(lang, 'btn_next_learn')),
|
|
715
|
+
|
|
716
|
+
# Step 3
|
|
717
|
+
f"<h2 style='text-align:center;'>{t(lang, 's3_title')}</h2>",
|
|
718
|
+
_get_step3_html(lang),
|
|
719
|
+
gr.Button(value=t(lang, 'btn_back')),
|
|
720
|
+
gr.Button(value=t(lang, 'btn_next_try')),
|
|
721
|
+
|
|
722
|
+
# Step 4
|
|
723
|
+
f"<h2 style='text-align:center;'>{t(lang, 's4_title')}</h2>",
|
|
724
|
+
_get_step4_intro_html(lang),
|
|
725
|
+
f"<h3 style='text-align:center; color:#0369a1;'>{t(lang, 's4_sect1')}</h3>",
|
|
726
|
+
gr.Slider(label=t(lang, 'lbl_age'), info=t(lang, 'info_age')),
|
|
727
|
+
gr.Slider(label=t(lang, 'lbl_priors'), info=t(lang, 'info_priors')),
|
|
728
|
+
gr.Dropdown(
|
|
729
|
+
label=t(lang, 'lbl_severity'),
|
|
730
|
+
info=t(lang, 'info_severity'),
|
|
731
|
+
choices=[get_opt('opt_minor'), get_opt('opt_moderate'), get_opt('opt_serious')],
|
|
732
|
+
value=get_opt('opt_moderate')
|
|
733
|
+
),
|
|
734
|
+
f"<h3 style='text-align:center; color:#92400e;'>{t(lang, 's4_sect2')}</h3>",
|
|
735
|
+
gr.Button(value=t(lang, 'btn_run')),
|
|
736
|
+
f"<h3 style='text-align:center; color:#15803d;'>{t(lang, 's4_sect3')}</h3>",
|
|
737
|
+
f"<div class='prediction-placeholder'><p style='font-size:18px; margin:0;'>{t(lang, 'res_placeholder')}</p></div>", # Reset output on lang change
|
|
738
|
+
_get_step4_highlight_html(lang),
|
|
739
|
+
gr.Button(value=t(lang, 'btn_back')),
|
|
740
|
+
gr.Button(value=t(lang, 'btn_next_conn')),
|
|
741
|
+
|
|
742
|
+
# Step 5
|
|
743
|
+
f"<h2 style='text-align:center;'>{t(lang, 's5_title')}</h2>",
|
|
744
|
+
_get_step5_html(lang),
|
|
745
|
+
gr.Button(value=t(lang, 'btn_back')),
|
|
746
|
+
gr.Button(value=t(lang, 'btn_complete')),
|
|
747
|
+
|
|
748
|
+
# Step 6
|
|
749
|
+
_get_step6_html(lang),
|
|
750
|
+
gr.Button(value=t(lang, 'btn_review'))
|
|
751
|
+
]
|
|
752
|
+
|
|
753
|
+
# List of outputs must match the return order exactly
|
|
754
|
+
update_targets = [
|
|
755
|
+
lang_state,
|
|
756
|
+
c_title, c_intro, c_load,
|
|
757
|
+
# S1
|
|
758
|
+
c_s1_title, c_s1_html, step_1_next,
|
|
759
|
+
# S2
|
|
760
|
+
c_s2_title, c_s2_html, step_2_back, step_2_next,
|
|
761
|
+
# S3
|
|
762
|
+
c_s3_title, c_s3_html, step_3_back, step_3_next,
|
|
763
|
+
# S4
|
|
764
|
+
c_s4_title, c_s4_intro, c_s4_sect1, age_slider, priors_slider, severity_dropdown,
|
|
765
|
+
c_s4_sect2, predict_btn, c_s4_sect3, prediction_output, c_s4_highlight, step_4_back, step_4_next,
|
|
766
|
+
# S5
|
|
767
|
+
c_s5_title, c_s5_html, step_5_back, step_5_next,
|
|
768
|
+
# S6
|
|
769
|
+
c_s6_html, back_to_connection_btn
|
|
770
|
+
]
|
|
771
|
+
|
|
772
|
+
demo.load(update_language, inputs=None, outputs=update_targets)
|
|
773
|
+
|
|
774
|
+
# --- PREDICTION BUTTON LOGIC ---
|
|
775
|
+
# Note: We pass lang_state to the predictor to ensure result is translated
|
|
776
|
+
predict_btn.click(
|
|
777
|
+
predict_outcome,
|
|
778
|
+
inputs=[age_slider, priors_slider, severity_dropdown, lang_state],
|
|
779
|
+
outputs=prediction_output,
|
|
780
|
+
show_progress="full",
|
|
781
|
+
scroll_to_output=True,
|
|
782
|
+
)
|
|
783
|
+
|
|
784
|
+
# --- NAVIGATION LOGIC ---
|
|
785
|
+
all_steps = [step_1, step_2, step_3, step_4, step_5, step_6, loading_screen]
|
|
786
|
+
|
|
787
|
+
def create_nav_generator(current_step, next_step):
|
|
788
|
+
def navigate():
|
|
789
|
+
updates = {loading_screen: gr.update(visible=True)}
|
|
790
|
+
for step in all_steps:
|
|
791
|
+
if step != loading_screen: updates[step] = gr.update(visible=False)
|
|
792
|
+
yield updates
|
|
793
|
+
updates = {next_step: gr.update(visible=True)}
|
|
794
|
+
for step in all_steps:
|
|
795
|
+
if step != next_step: updates[step] = gr.update(visible=False)
|
|
796
|
+
yield updates
|
|
797
|
+
return navigate
|
|
798
|
+
|
|
799
|
+
# JS Helper for loading overlay
|
|
800
|
+
def nav_js(target_id: str, message: str) -> str:
|
|
801
|
+
return f"""
|
|
802
|
+
()=>{{
|
|
803
|
+
try {{
|
|
804
|
+
const overlay = document.getElementById('nav-loading-overlay');
|
|
805
|
+
const messageEl = document.getElementById('nav-loading-text');
|
|
806
|
+
if(overlay && messageEl) {{
|
|
807
|
+
messageEl.textContent = '{message}';
|
|
808
|
+
overlay.style.display = 'flex';
|
|
809
|
+
setTimeout(() => {{ overlay.style.opacity = '1'; }}, 10);
|
|
810
|
+
}}
|
|
811
|
+
const startTime = Date.now();
|
|
812
|
+
setTimeout(() => {{
|
|
813
|
+
const anchor = document.getElementById('app_top_anchor');
|
|
814
|
+
if(anchor) anchor.scrollIntoView({{behavior:'smooth', block:'start'}});
|
|
815
|
+
}}, 40);
|
|
816
|
+
const targetId = '{target_id}';
|
|
817
|
+
const pollInterval = setInterval(() => {{
|
|
818
|
+
const elapsed = Date.now() - startTime;
|
|
819
|
+
const target = document.getElementById(targetId);
|
|
820
|
+
const isVisible = target && target.offsetParent !== null &&
|
|
821
|
+
window.getComputedStyle(target).display !== 'none';
|
|
822
|
+
if((isVisible && elapsed >= 1200) || elapsed > 7000) {{
|
|
823
|
+
clearInterval(pollInterval);
|
|
824
|
+
if(overlay) {{
|
|
825
|
+
overlay.style.opacity = '0';
|
|
826
|
+
setTimeout(() => {{ overlay.style.display = 'none'; }}, 300);
|
|
827
|
+
}}
|
|
828
|
+
}}
|
|
829
|
+
}}, 90);
|
|
830
|
+
}} catch(e) {{ console.warn('nav-js error', e); }}
|
|
831
|
+
}}
|
|
832
|
+
"""
|
|
833
|
+
|
|
834
|
+
step_1_next.click(fn=create_nav_generator(step_1, step_2), outputs=all_steps, js=nav_js("step-2", "Loading..."))
|
|
835
|
+
step_2_back.click(fn=create_nav_generator(step_2, step_1), outputs=all_steps, js=nav_js("step-1", "Loading..."))
|
|
836
|
+
step_2_next.click(fn=create_nav_generator(step_2, step_3), outputs=all_steps, js=nav_js("step-3", "Loading..."))
|
|
837
|
+
step_3_back.click(fn=create_nav_generator(step_3, step_2), outputs=all_steps, js=nav_js("step-2", "Loading..."))
|
|
838
|
+
step_3_next.click(fn=create_nav_generator(step_3, step_4), outputs=all_steps, js=nav_js("step-4", "Loading..."))
|
|
839
|
+
step_4_back.click(fn=create_nav_generator(step_4, step_3), outputs=all_steps, js=nav_js("step-3", "Loading..."))
|
|
840
|
+
step_4_next.click(fn=create_nav_generator(step_4, step_5), outputs=all_steps, js=nav_js("step-5", "Loading..."))
|
|
841
|
+
step_5_back.click(fn=create_nav_generator(step_5, step_4), outputs=all_steps, js=nav_js("step-4", "Loading..."))
|
|
842
|
+
step_5_next.click(fn=create_nav_generator(step_5, step_6), outputs=all_steps, js=nav_js("step-6", "Loading..."))
|
|
843
|
+
back_to_connection_btn.click(fn=create_nav_generator(step_6, step_5), outputs=all_steps, js=nav_js("step-5", "Loading..."))
|
|
844
|
+
|
|
845
|
+
return demo
|
|
846
|
+
|
|
847
|
+
def launch_what_is_ai_app(height: int = 1100, share: bool = False, debug: bool = False) -> None:
|
|
848
|
+
demo = create_what_is_ai_app()
|
|
849
|
+
port = int(os.environ.get("PORT", 8080))
|
|
850
|
+
demo.launch(share=share, inline=True, debug=debug, height=height, server_port=port)
|
|
851
|
+
|
|
852
|
+
if __name__ == "__main__":
|
|
853
|
+
launch_what_is_ai_app()
|