aimodelshare 0.1.21__py3-none-any.whl → 0.1.62__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of aimodelshare might be problematic. Click here for more details.
- aimodelshare/__init__.py +94 -14
- aimodelshare/aimsonnx.py +417 -262
- aimodelshare/api.py +8 -7
- aimodelshare/auth.py +163 -0
- aimodelshare/aws.py +4 -4
- aimodelshare/base_image.py +1 -1
- aimodelshare/containerisation.py +1 -1
- aimodelshare/data_sharing/download_data.py +145 -88
- aimodelshare/generatemodelapi.py +7 -6
- aimodelshare/main/eval_lambda.txt +81 -13
- aimodelshare/model.py +493 -197
- aimodelshare/modeluser.py +89 -1
- aimodelshare/moral_compass/README.md +408 -0
- aimodelshare/moral_compass/__init__.py +37 -0
- aimodelshare/moral_compass/_version.py +3 -0
- aimodelshare/moral_compass/api_client.py +601 -0
- aimodelshare/moral_compass/apps/__init__.py +17 -0
- aimodelshare/moral_compass/apps/tutorial.py +198 -0
- aimodelshare/moral_compass/challenge.py +365 -0
- aimodelshare/moral_compass/config.py +187 -0
- aimodelshare/playground.py +26 -14
- aimodelshare/preprocessormodules.py +60 -6
- aimodelshare/reproducibility.py +20 -5
- aimodelshare/utils/__init__.py +78 -0
- aimodelshare/utils/optional_deps.py +38 -0
- aimodelshare-0.1.62.dist-info/METADATA +298 -0
- {aimodelshare-0.1.21.dist-info → aimodelshare-0.1.62.dist-info}/RECORD +30 -22
- {aimodelshare-0.1.21.dist-info → aimodelshare-0.1.62.dist-info}/WHEEL +1 -1
- aimodelshare-0.1.62.dist-info/licenses/LICENSE +5 -0
- {aimodelshare-0.1.21.dist-info → aimodelshare-0.1.62.dist-info}/top_level.txt +0 -1
- aimodelshare-0.1.21.dist-info/LICENSE +0 -22
- aimodelshare-0.1.21.dist-info/METADATA +0 -68
- tests/__init__.py +0 -0
- tests/test_aimsonnx.py +0 -135
- tests/test_playground.py +0 -721
|
@@ -0,0 +1,198 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Tutorial Gradio application for onboarding users to the Justice & Equity Challenge.
|
|
3
|
+
|
|
4
|
+
This app teaches:
|
|
5
|
+
1. How to advance slideshow-style steps
|
|
6
|
+
2. How to interact with sliders/buttons
|
|
7
|
+
3. How model prediction output appears
|
|
8
|
+
|
|
9
|
+
Structure:
|
|
10
|
+
- Factory function `create_tutorial_app()` returns a Gradio Blocks object
|
|
11
|
+
- Convenience wrapper `launch_tutorial_app()` launches it inline (for notebooks)
|
|
12
|
+
"""
|
|
13
|
+
import contextlib
|
|
14
|
+
import os
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def _build_synthetic_model():
|
|
18
|
+
"""Build a tiny linear regression model on synthetic study habit data."""
|
|
19
|
+
import numpy as np
|
|
20
|
+
from sklearn.linear_model import LinearRegression
|
|
21
|
+
|
|
22
|
+
rng = np.random.default_rng(7)
|
|
23
|
+
n = 200
|
|
24
|
+
hours_study = rng.uniform(0, 12, n)
|
|
25
|
+
hours_sleep = rng.uniform(4, 10, n)
|
|
26
|
+
attendance = rng.uniform(50, 100, n)
|
|
27
|
+
exam_score = 5 * hours_study + 3 * hours_sleep + 0.5 * attendance + rng.normal(0, 10, n)
|
|
28
|
+
|
|
29
|
+
X = np.column_stack([hours_study, hours_sleep, attendance])
|
|
30
|
+
y = exam_score
|
|
31
|
+
lin_reg = LinearRegression().fit(X, y)
|
|
32
|
+
|
|
33
|
+
def predict_exam(sl, slp, att):
|
|
34
|
+
pred = float(lin_reg.predict([[sl, slp, att]])[0])
|
|
35
|
+
import numpy as np
|
|
36
|
+
pred = float(np.clip(pred, 0, 100))
|
|
37
|
+
return f"{round(pred, 1)}%"
|
|
38
|
+
|
|
39
|
+
return predict_exam
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def create_tutorial_app(theme_primary_hue: str = "indigo") -> "gr.Blocks":
|
|
43
|
+
"""Create the tutorial Gradio Blocks app (not launched yet)."""
|
|
44
|
+
try:
|
|
45
|
+
import gradio as gr
|
|
46
|
+
except ImportError as e:
|
|
47
|
+
raise ImportError(
|
|
48
|
+
"Gradio is required for the tutorial app. Install with `pip install gradio`."
|
|
49
|
+
) from e
|
|
50
|
+
|
|
51
|
+
predict_exam = _build_synthetic_model()
|
|
52
|
+
|
|
53
|
+
css = """
|
|
54
|
+
#prediction_output_textbox textarea {
|
|
55
|
+
font-size: 2.5rem !important;
|
|
56
|
+
font-weight: bold !important;
|
|
57
|
+
color: #1E40AF !important;
|
|
58
|
+
text-align: center !important;
|
|
59
|
+
}
|
|
60
|
+
"""
|
|
61
|
+
|
|
62
|
+
with gr.Blocks(theme=gr.themes.Soft(primary_hue=theme_primary_hue), css=css) as demo:
|
|
63
|
+
gr.Markdown("<h1 style='text-align:center;'>👋 How to Use an App (A Quick Tutorial)</h1>")
|
|
64
|
+
gr.Markdown(
|
|
65
|
+
"""
|
|
66
|
+
<div style='text-align:left; font-size:20px; max-width: 800px; margin: auto;
|
|
67
|
+
padding: 15px; background-color: #f7f7f7; border-radius: 8px;'>
|
|
68
|
+
This is a simple, 3-step tutorial.<br><br>
|
|
69
|
+
<b>Your Task:</b> Just read the instructions for each step and click the "Next" button to continue.
|
|
70
|
+
</div>
|
|
71
|
+
"""
|
|
72
|
+
)
|
|
73
|
+
gr.HTML("<hr style='margin:24px 0;'>")
|
|
74
|
+
|
|
75
|
+
# Step 1
|
|
76
|
+
with gr.Column(visible=True) as step_1_container:
|
|
77
|
+
gr.Markdown("<h2 style='text-align:center;'>Step 1: How to Use \"Slideshows\"</h2>")
|
|
78
|
+
gr.Markdown(
|
|
79
|
+
"""
|
|
80
|
+
<div style='font-size: 28px; text-align: center; background:#E3F2FD;
|
|
81
|
+
padding:28px; border-radius:16px; min-height: 150px;'>
|
|
82
|
+
<b>This is a "Slideshow" step.</b><br><br>
|
|
83
|
+
Some apps are just for reading. Your only task is to click the "Next" button to move to the next step.
|
|
84
|
+
</div>
|
|
85
|
+
"""
|
|
86
|
+
)
|
|
87
|
+
step_1_next = gr.Button("Next Step ▶️", variant="primary")
|
|
88
|
+
|
|
89
|
+
# Step 2
|
|
90
|
+
with gr.Column(visible=False) as step_2_container:
|
|
91
|
+
gr.Markdown("<h2 style='text-align:center;'>Step 2: How to Use \"Interactive Demos\"</h2>")
|
|
92
|
+
gr.Markdown(
|
|
93
|
+
"""
|
|
94
|
+
<div style='font-size: 20px; text-align: left; background:#FFF3E0;
|
|
95
|
+
padding:20px; border-radius:16px;'>
|
|
96
|
+
<b>This is an "Interactive Demo."</b><br><br>
|
|
97
|
+
Just follow the numbered steps below (from top to bottom) to see how it works!
|
|
98
|
+
</div>
|
|
99
|
+
"""
|
|
100
|
+
)
|
|
101
|
+
gr.HTML("<br>")
|
|
102
|
+
gr.Markdown(
|
|
103
|
+
"""
|
|
104
|
+
<div style="font-size: 24px; text-align:left; padding-left: 10px;">
|
|
105
|
+
<b>[ 1 ] Use these sliders to change the inputs.</b>
|
|
106
|
+
</div>
|
|
107
|
+
"""
|
|
108
|
+
)
|
|
109
|
+
s_hours = gr.Slider(0, 12, step=0.5, value=6, label="Hours Studied per Week")
|
|
110
|
+
s_sleep = gr.Slider(4, 10, step=0.5, value=7, label="Hours of Sleep per Night")
|
|
111
|
+
s_att = gr.Slider(50, 100, step=1, value=90, label="Class Attendance %")
|
|
112
|
+
|
|
113
|
+
gr.HTML("<hr style='margin: 20px 0;'>")
|
|
114
|
+
|
|
115
|
+
gr.Markdown(
|
|
116
|
+
"""
|
|
117
|
+
<div style="font-size: 24px; text-align:left; padding-left: 10px;">
|
|
118
|
+
<b>[ 2 ] Click this button to run.</b>
|
|
119
|
+
</div>
|
|
120
|
+
"""
|
|
121
|
+
)
|
|
122
|
+
with gr.Row():
|
|
123
|
+
gr.HTML(visible=False)
|
|
124
|
+
go = gr.Button("🔮 Predict", variant="primary", scale=2)
|
|
125
|
+
gr.HTML(visible=False)
|
|
126
|
+
|
|
127
|
+
gr.HTML("<hr style='margin: 20px 0;'>")
|
|
128
|
+
|
|
129
|
+
gr.Markdown(
|
|
130
|
+
"""
|
|
131
|
+
<div style="font-size: 24px; text-align:left; padding-left: 10px;">
|
|
132
|
+
<b>[ 3 ] See the result here!</b>
|
|
133
|
+
</div>
|
|
134
|
+
"""
|
|
135
|
+
)
|
|
136
|
+
out = gr.Textbox(
|
|
137
|
+
label="🔮 Predicted Exam Score", elem_id="prediction_output_textbox", interactive=False
|
|
138
|
+
)
|
|
139
|
+
go.click(predict_exam, [s_hours, s_sleep, s_att], out)
|
|
140
|
+
|
|
141
|
+
gr.HTML("<hr style='margin: 15px 0;'>")
|
|
142
|
+
with gr.Row():
|
|
143
|
+
step_2_back = gr.Button("◀️ Back")
|
|
144
|
+
step_2_next = gr.Button("Finish Tutorial ▶️", variant="primary")
|
|
145
|
+
|
|
146
|
+
# Step 3
|
|
147
|
+
with gr.Column(visible=False) as step_3_container:
|
|
148
|
+
gr.Markdown(
|
|
149
|
+
"""
|
|
150
|
+
<div style='text-align:center;'>
|
|
151
|
+
<h2 style='text-align:center; font-size: 2.5rem;'>✅ Tutorial Complete!</h2>
|
|
152
|
+
<div style='font-size: 1.5rem; background:#E8F5E9; padding:28px; border-radius:16px;
|
|
153
|
+
border: 2px solid #4CAF50;'>
|
|
154
|
+
You've mastered the basics!<br><br>
|
|
155
|
+
Your next step is <b>outside</b> this app window.<br><br>
|
|
156
|
+
<h1 style='margin:0; font-size: 3rem;'>👇 SCROLL DOWN 👇</h1><br>
|
|
157
|
+
Look below this app to find <b>Section 3</b> and begin the challenge!
|
|
158
|
+
</div>
|
|
159
|
+
</div>
|
|
160
|
+
"""
|
|
161
|
+
)
|
|
162
|
+
with gr.Row():
|
|
163
|
+
step_3_back = gr.Button("◀️ Back")
|
|
164
|
+
|
|
165
|
+
# Visibility logic (correct ordering & syntax)
|
|
166
|
+
step_1_next.click(
|
|
167
|
+
lambda: (gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)),
|
|
168
|
+
inputs=None,
|
|
169
|
+
outputs=[step_1_container, step_2_container, step_3_container],
|
|
170
|
+
)
|
|
171
|
+
step_2_back.click(
|
|
172
|
+
lambda: (gr.update(visible=True), gr.update(visible=False), gr.update(visible=False)),
|
|
173
|
+
inputs=None,
|
|
174
|
+
outputs=[step_1_container, step_2_container, step_3_container],
|
|
175
|
+
)
|
|
176
|
+
step_2_next.click(
|
|
177
|
+
lambda: (gr.update(visible=False), gr.update(visible=False), gr.update(visible=True)),
|
|
178
|
+
inputs=None,
|
|
179
|
+
outputs=[step_1_container, step_2_container, step_3_container],
|
|
180
|
+
)
|
|
181
|
+
step_3_back.click(
|
|
182
|
+
lambda: (gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)),
|
|
183
|
+
inputs=None,
|
|
184
|
+
outputs=[step_1_container, step_2_container, step_3_container],
|
|
185
|
+
)
|
|
186
|
+
|
|
187
|
+
return demo
|
|
188
|
+
|
|
189
|
+
|
|
190
|
+
def launch_tutorial_app(height: int = 950, share: bool = False, debug: bool = False) -> None:
|
|
191
|
+
"""Convenience wrapper to create and launch the tutorial app inline."""
|
|
192
|
+
demo = create_tutorial_app()
|
|
193
|
+
try:
|
|
194
|
+
import gradio as gr # noqa: F401
|
|
195
|
+
except ImportError as e:
|
|
196
|
+
raise ImportError("Gradio must be installed to launch the tutorial app.") from e
|
|
197
|
+
with contextlib.redirect_stdout(open(os.devnull, 'w')), contextlib.redirect_stderr(open(os.devnull, 'w')):
|
|
198
|
+
demo.launch(share=share, inline=True, debug=debug, height=height)
|
|
@@ -0,0 +1,365 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Challenge Manager for Moral Compass system.
|
|
3
|
+
|
|
4
|
+
Provides a local state manager for tracking multi-metric progress
|
|
5
|
+
and syncing with the Moral Compass API.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from typing import Dict, Optional, List
|
|
9
|
+
from dataclasses import dataclass
|
|
10
|
+
from .api_client import MoralcompassApiClient
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
@dataclass
|
|
14
|
+
class Question:
|
|
15
|
+
"""Represents a challenge question"""
|
|
16
|
+
id: str
|
|
17
|
+
text: str
|
|
18
|
+
options: List[str]
|
|
19
|
+
correct_index: int
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
@dataclass
|
|
23
|
+
class Task:
|
|
24
|
+
"""Represents a challenge task"""
|
|
25
|
+
id: str
|
|
26
|
+
title: str
|
|
27
|
+
description: str
|
|
28
|
+
questions: List[Question]
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
class JusticeAndEquityChallenge:
|
|
32
|
+
"""
|
|
33
|
+
Justice & Equity Challenge with predefined tasks and questions.
|
|
34
|
+
|
|
35
|
+
Contains 6 tasks (A-F) with associated questions for teaching
|
|
36
|
+
ethical AI principles related to fairness and bias.
|
|
37
|
+
"""
|
|
38
|
+
|
|
39
|
+
def __init__(self):
|
|
40
|
+
"""Initialize the Justice & Equity Challenge with tasks A-F"""
|
|
41
|
+
self.tasks = [
|
|
42
|
+
Task(
|
|
43
|
+
id="A",
|
|
44
|
+
title="Understanding Algorithmic Bias",
|
|
45
|
+
description="Learn about different types of bias in AI systems",
|
|
46
|
+
questions=[
|
|
47
|
+
Question(
|
|
48
|
+
id="A1",
|
|
49
|
+
text="What is algorithmic bias?",
|
|
50
|
+
options=[
|
|
51
|
+
"Bias in the training data",
|
|
52
|
+
"Systematic and repeatable errors in computer systems",
|
|
53
|
+
"User preference bias",
|
|
54
|
+
"Network latency bias"
|
|
55
|
+
],
|
|
56
|
+
correct_index=1
|
|
57
|
+
)
|
|
58
|
+
]
|
|
59
|
+
),
|
|
60
|
+
Task(
|
|
61
|
+
id="B",
|
|
62
|
+
title="Identifying Protected Attributes",
|
|
63
|
+
description="Understanding which attributes require fairness considerations",
|
|
64
|
+
questions=[
|
|
65
|
+
Question(
|
|
66
|
+
id="B1",
|
|
67
|
+
text="Which is a protected attribute in fairness?",
|
|
68
|
+
options=[
|
|
69
|
+
"Email address",
|
|
70
|
+
"Race or ethnicity",
|
|
71
|
+
"Browser type",
|
|
72
|
+
"Screen resolution"
|
|
73
|
+
],
|
|
74
|
+
correct_index=1
|
|
75
|
+
)
|
|
76
|
+
]
|
|
77
|
+
),
|
|
78
|
+
Task(
|
|
79
|
+
id="C",
|
|
80
|
+
title="Measuring Disparate Impact",
|
|
81
|
+
description="Learn to measure fairness using statistical metrics",
|
|
82
|
+
questions=[
|
|
83
|
+
Question(
|
|
84
|
+
id="C1",
|
|
85
|
+
text="What is disparate impact?",
|
|
86
|
+
options=[
|
|
87
|
+
"Equal outcome rates across groups",
|
|
88
|
+
"Different outcome rates for different groups",
|
|
89
|
+
"Same prediction accuracy",
|
|
90
|
+
"Uniform data distribution"
|
|
91
|
+
],
|
|
92
|
+
correct_index=1
|
|
93
|
+
)
|
|
94
|
+
]
|
|
95
|
+
),
|
|
96
|
+
Task(
|
|
97
|
+
id="D",
|
|
98
|
+
title="Evaluating Model Fairness",
|
|
99
|
+
description="Apply fairness metrics to assess model performance",
|
|
100
|
+
questions=[
|
|
101
|
+
Question(
|
|
102
|
+
id="D1",
|
|
103
|
+
text="What does equal opportunity mean?",
|
|
104
|
+
options=[
|
|
105
|
+
"Same accuracy for all groups",
|
|
106
|
+
"Equal true positive rates across groups",
|
|
107
|
+
"Equal false positive rates",
|
|
108
|
+
"Same number of predictions"
|
|
109
|
+
],
|
|
110
|
+
correct_index=1
|
|
111
|
+
)
|
|
112
|
+
]
|
|
113
|
+
),
|
|
114
|
+
Task(
|
|
115
|
+
id="E",
|
|
116
|
+
title="Mitigation Strategies",
|
|
117
|
+
description="Explore techniques to reduce algorithmic bias",
|
|
118
|
+
questions=[
|
|
119
|
+
Question(
|
|
120
|
+
id="E1",
|
|
121
|
+
text="Which is a bias mitigation technique?",
|
|
122
|
+
options=[
|
|
123
|
+
"Ignore protected attributes",
|
|
124
|
+
"Reweighting training samples",
|
|
125
|
+
"Use more servers",
|
|
126
|
+
"Faster algorithms"
|
|
127
|
+
],
|
|
128
|
+
correct_index=1
|
|
129
|
+
)
|
|
130
|
+
]
|
|
131
|
+
),
|
|
132
|
+
Task(
|
|
133
|
+
id="F",
|
|
134
|
+
title="Ethical Deployment",
|
|
135
|
+
description="Best practices for deploying fair AI systems",
|
|
136
|
+
questions=[
|
|
137
|
+
Question(
|
|
138
|
+
id="F1",
|
|
139
|
+
text="What is essential for ethical AI deployment?",
|
|
140
|
+
options=[
|
|
141
|
+
"Fastest inference time",
|
|
142
|
+
"Continuous monitoring and auditing",
|
|
143
|
+
"Most complex model",
|
|
144
|
+
"Largest dataset"
|
|
145
|
+
],
|
|
146
|
+
correct_index=1
|
|
147
|
+
)
|
|
148
|
+
]
|
|
149
|
+
)
|
|
150
|
+
]
|
|
151
|
+
|
|
152
|
+
@property
|
|
153
|
+
def total_tasks(self) -> int:
|
|
154
|
+
"""Total number of tasks in the challenge"""
|
|
155
|
+
return len(self.tasks)
|
|
156
|
+
|
|
157
|
+
@property
|
|
158
|
+
def total_questions(self) -> int:
|
|
159
|
+
"""Total number of questions across all tasks"""
|
|
160
|
+
return sum(len(task.questions) for task in self.tasks)
|
|
161
|
+
|
|
162
|
+
|
|
163
|
+
class ChallengeManager:
|
|
164
|
+
"""
|
|
165
|
+
Manages local state for a user's challenge progress with multiple metrics.
|
|
166
|
+
|
|
167
|
+
Features:
|
|
168
|
+
- Track arbitrary metrics (accuracy, fairness, robustness, etc.)
|
|
169
|
+
- Specify primary metric for scoring
|
|
170
|
+
- Track task and question progress
|
|
171
|
+
- Local preview of moral compass score
|
|
172
|
+
- Sync to server via API
|
|
173
|
+
"""
|
|
174
|
+
|
|
175
|
+
def __init__(self, table_id: str, username: str, api_client: Optional[MoralcompassApiClient] = None,
|
|
176
|
+
challenge: Optional[JusticeAndEquityChallenge] = None):
|
|
177
|
+
"""
|
|
178
|
+
Initialize a challenge manager.
|
|
179
|
+
|
|
180
|
+
Args:
|
|
181
|
+
table_id: The table identifier
|
|
182
|
+
username: The username
|
|
183
|
+
api_client: Optional API client instance (creates new one if None)
|
|
184
|
+
challenge: Optional challenge instance (creates JusticeAndEquityChallenge if None)
|
|
185
|
+
"""
|
|
186
|
+
self.table_id = table_id
|
|
187
|
+
self.username = username
|
|
188
|
+
self.api_client = api_client or MoralcompassApiClient()
|
|
189
|
+
self.challenge = challenge or JusticeAndEquityChallenge()
|
|
190
|
+
|
|
191
|
+
# Metrics state
|
|
192
|
+
self.metrics: Dict[str, float] = {}
|
|
193
|
+
self.primary_metric: Optional[str] = None
|
|
194
|
+
|
|
195
|
+
# Progress state - initialize with challenge totals
|
|
196
|
+
self.tasks_completed: int = 0
|
|
197
|
+
self.total_tasks: int = self.challenge.total_tasks
|
|
198
|
+
self.questions_correct: int = 0
|
|
199
|
+
self.total_questions: int = self.challenge.total_questions
|
|
200
|
+
|
|
201
|
+
# Track completed tasks and answers
|
|
202
|
+
self._completed_task_ids: set = set()
|
|
203
|
+
self._answered_questions: Dict[str, int] = {} # question_id -> selected_index
|
|
204
|
+
|
|
205
|
+
def set_metric(self, name: str, value: float, primary: bool = False) -> None:
|
|
206
|
+
"""
|
|
207
|
+
Set a metric value.
|
|
208
|
+
|
|
209
|
+
Args:
|
|
210
|
+
name: Metric name (e.g., 'accuracy', 'fairness', 'robustness')
|
|
211
|
+
value: Metric value (should be between 0 and 1 typically)
|
|
212
|
+
primary: If True, sets this as the primary metric for scoring
|
|
213
|
+
"""
|
|
214
|
+
self.metrics[name] = value
|
|
215
|
+
|
|
216
|
+
if primary:
|
|
217
|
+
self.primary_metric = name
|
|
218
|
+
|
|
219
|
+
def set_progress(self, tasks_completed: int = 0, total_tasks: int = 0,
|
|
220
|
+
questions_correct: int = 0, total_questions: int = 0) -> None:
|
|
221
|
+
"""
|
|
222
|
+
Set progress counters.
|
|
223
|
+
|
|
224
|
+
Args:
|
|
225
|
+
tasks_completed: Number of tasks completed
|
|
226
|
+
total_tasks: Total number of tasks
|
|
227
|
+
questions_correct: Number of questions answered correctly
|
|
228
|
+
total_questions: Total number of questions
|
|
229
|
+
"""
|
|
230
|
+
self.tasks_completed = tasks_completed
|
|
231
|
+
self.total_tasks = total_tasks
|
|
232
|
+
self.questions_correct = questions_correct
|
|
233
|
+
self.total_questions = total_questions
|
|
234
|
+
|
|
235
|
+
def complete_task(self, task_id: str) -> None:
|
|
236
|
+
"""
|
|
237
|
+
Mark a task as completed.
|
|
238
|
+
|
|
239
|
+
Args:
|
|
240
|
+
task_id: The task identifier (e.g., 'A', 'B', 'C')
|
|
241
|
+
"""
|
|
242
|
+
if task_id not in self._completed_task_ids:
|
|
243
|
+
self._completed_task_ids.add(task_id)
|
|
244
|
+
self.tasks_completed = len(self._completed_task_ids)
|
|
245
|
+
|
|
246
|
+
def answer_question(self, task_id: str, question_id: str, selected_index: int) -> bool:
|
|
247
|
+
"""
|
|
248
|
+
Record an answer to a question.
|
|
249
|
+
|
|
250
|
+
Args:
|
|
251
|
+
task_id: The task identifier
|
|
252
|
+
question_id: The question identifier
|
|
253
|
+
selected_index: The index of the selected answer
|
|
254
|
+
|
|
255
|
+
Returns:
|
|
256
|
+
True if the answer is correct, False otherwise
|
|
257
|
+
"""
|
|
258
|
+
# Find the question
|
|
259
|
+
question = None
|
|
260
|
+
for task in self.challenge.tasks:
|
|
261
|
+
if task.id == task_id:
|
|
262
|
+
for q in task.questions:
|
|
263
|
+
if q.id == question_id:
|
|
264
|
+
question = q
|
|
265
|
+
break
|
|
266
|
+
break
|
|
267
|
+
|
|
268
|
+
if question is None:
|
|
269
|
+
raise ValueError(f"Question {question_id} not found in task {task_id}")
|
|
270
|
+
|
|
271
|
+
# Record the answer
|
|
272
|
+
self._answered_questions[question_id] = selected_index
|
|
273
|
+
|
|
274
|
+
# Check if correct and update counter
|
|
275
|
+
is_correct = (selected_index == question.correct_index)
|
|
276
|
+
|
|
277
|
+
# Recalculate questions_correct
|
|
278
|
+
self.questions_correct = sum(
|
|
279
|
+
1 for qid, idx in self._answered_questions.items()
|
|
280
|
+
if self._is_answer_correct(qid, idx)
|
|
281
|
+
)
|
|
282
|
+
|
|
283
|
+
return is_correct
|
|
284
|
+
|
|
285
|
+
def _is_answer_correct(self, question_id: str, selected_index: int) -> bool:
|
|
286
|
+
"""Check if an answer is correct"""
|
|
287
|
+
for task in self.challenge.tasks:
|
|
288
|
+
for q in task.questions:
|
|
289
|
+
if q.id == question_id:
|
|
290
|
+
return selected_index == q.correct_index
|
|
291
|
+
return False
|
|
292
|
+
|
|
293
|
+
def get_progress_summary(self) -> Dict:
|
|
294
|
+
"""
|
|
295
|
+
Get a summary of current progress.
|
|
296
|
+
|
|
297
|
+
Returns:
|
|
298
|
+
Dictionary with progress information including local score preview
|
|
299
|
+
"""
|
|
300
|
+
return {
|
|
301
|
+
'tasksCompleted': self.tasks_completed,
|
|
302
|
+
'totalTasks': self.total_tasks,
|
|
303
|
+
'questionsCorrect': self.questions_correct,
|
|
304
|
+
'totalQuestions': self.total_questions,
|
|
305
|
+
'metrics': self.metrics.copy(),
|
|
306
|
+
'primaryMetric': self.primary_metric,
|
|
307
|
+
'localScorePreview': self.get_local_score()
|
|
308
|
+
}
|
|
309
|
+
|
|
310
|
+
def get_local_score(self) -> float:
|
|
311
|
+
"""
|
|
312
|
+
Calculate moral compass score locally without syncing to server.
|
|
313
|
+
|
|
314
|
+
Returns:
|
|
315
|
+
Moral compass score based on current state
|
|
316
|
+
"""
|
|
317
|
+
if not self.metrics:
|
|
318
|
+
return 0.0
|
|
319
|
+
|
|
320
|
+
# Determine primary metric
|
|
321
|
+
primary_metric = self.primary_metric
|
|
322
|
+
if primary_metric is None:
|
|
323
|
+
if 'accuracy' in self.metrics:
|
|
324
|
+
primary_metric = 'accuracy'
|
|
325
|
+
else:
|
|
326
|
+
primary_metric = sorted(self.metrics.keys())[0]
|
|
327
|
+
|
|
328
|
+
primary_value = self.metrics.get(primary_metric, 0.0)
|
|
329
|
+
|
|
330
|
+
# Calculate progress ratio
|
|
331
|
+
progress_denominator = self.total_tasks + self.total_questions
|
|
332
|
+
if progress_denominator == 0:
|
|
333
|
+
return 0.0
|
|
334
|
+
|
|
335
|
+
progress_ratio = (self.tasks_completed + self.questions_correct) / progress_denominator
|
|
336
|
+
|
|
337
|
+
return primary_value * progress_ratio
|
|
338
|
+
|
|
339
|
+
def sync(self) -> Dict:
|
|
340
|
+
"""
|
|
341
|
+
Sync current state to the Moral Compass API.
|
|
342
|
+
|
|
343
|
+
Returns:
|
|
344
|
+
API response dict with moralCompassScore and other fields
|
|
345
|
+
"""
|
|
346
|
+
if not self.metrics:
|
|
347
|
+
raise ValueError("No metrics set. Use set_metric() before syncing.")
|
|
348
|
+
|
|
349
|
+
return self.api_client.update_moral_compass(
|
|
350
|
+
table_id=self.table_id,
|
|
351
|
+
username=self.username,
|
|
352
|
+
metrics=self.metrics,
|
|
353
|
+
tasks_completed=self.tasks_completed,
|
|
354
|
+
total_tasks=self.total_tasks,
|
|
355
|
+
questions_correct=self.questions_correct,
|
|
356
|
+
total_questions=self.total_questions,
|
|
357
|
+
primary_metric=self.primary_metric
|
|
358
|
+
)
|
|
359
|
+
|
|
360
|
+
def __repr__(self) -> str:
|
|
361
|
+
return (
|
|
362
|
+
f"ChallengeManager(table_id={self.table_id!r}, username={self.username!r}, "
|
|
363
|
+
f"metrics={self.metrics}, primary_metric={self.primary_metric!r}, "
|
|
364
|
+
f"local_score={self.get_local_score():.4f})"
|
|
365
|
+
)
|