QuizGenerator 0.8.0__py3-none-any.whl → 0.9.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- QuizGenerator/contentast.py +43 -10
- QuizGenerator/generate.py +1 -1
- QuizGenerator/mixins.py +6 -2
- QuizGenerator/premade_questions/basic.py +49 -7
- QuizGenerator/premade_questions/cst334/process.py +1 -7
- QuizGenerator/premade_questions/cst463/gradient_descent/gradient_calculation.py +92 -82
- QuizGenerator/premade_questions/cst463/gradient_descent/gradient_descent_questions.py +68 -45
- QuizGenerator/premade_questions/cst463/gradient_descent/loss_calculations.py +235 -162
- QuizGenerator/premade_questions/cst463/math_and_data/matrix_questions.py +0 -1
- QuizGenerator/premade_questions/cst463/neural-network-basics/neural_network_questions.py +51 -45
- QuizGenerator/premade_questions/cst463/tensorflow-intro/tensorflow_questions.py +212 -215
- QuizGenerator/question.py +176 -18
- {quizgenerator-0.8.0.dist-info → quizgenerator-0.9.0.dist-info}/METADATA +9 -6
- {quizgenerator-0.8.0.dist-info → quizgenerator-0.9.0.dist-info}/RECORD +17 -17
- {quizgenerator-0.8.0.dist-info → quizgenerator-0.9.0.dist-info}/WHEEL +0 -0
- {quizgenerator-0.8.0.dist-info → quizgenerator-0.9.0.dist-info}/entry_points.txt +0 -0
- {quizgenerator-0.8.0.dist-info → quizgenerator-0.9.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -4,6 +4,8 @@ import abc
|
|
|
4
4
|
import logging
|
|
5
5
|
import math
|
|
6
6
|
from typing import List, Tuple, Callable, Union, Any
|
|
7
|
+
|
|
8
|
+
import sympy
|
|
7
9
|
import sympy as sp
|
|
8
10
|
|
|
9
11
|
import QuizGenerator.contentast as ca
|
|
@@ -28,37 +30,52 @@ class GradientDescentQuestion(Question, abc.ABC):
|
|
|
28
30
|
|
|
29
31
|
@QuestionRegistry.register("GradientDescentWalkthrough")
|
|
30
32
|
class GradientDescentWalkthrough(GradientDescentQuestion, TableQuestionMixin, BodyTemplatesMixin):
|
|
31
|
-
|
|
33
|
+
DEFAULT_NUM_STEPS = 4
|
|
34
|
+
DEFAULT_NUM_VARIABLES = 2
|
|
35
|
+
DEFAULT_MAX_DEGREE = 2
|
|
36
|
+
DEFAULT_SINGLE_VARIABLE = False
|
|
37
|
+
DEFAULT_MINIMIZE = True
|
|
38
|
+
|
|
32
39
|
def __init__(self, *args, **kwargs):
|
|
33
40
|
super().__init__(*args, **kwargs)
|
|
34
|
-
self.num_steps = kwargs.get("num_steps",
|
|
35
|
-
self.num_variables = kwargs.get("num_variables",
|
|
36
|
-
self.max_degree = kwargs.get("max_degree",
|
|
37
|
-
self.single_variable = kwargs.get("single_variable",
|
|
38
|
-
self.minimize = kwargs.get("minimize",
|
|
41
|
+
self.num_steps = kwargs.get("num_steps", self.DEFAULT_NUM_STEPS)
|
|
42
|
+
self.num_variables = kwargs.get("num_variables", self.DEFAULT_NUM_VARIABLES)
|
|
43
|
+
self.max_degree = kwargs.get("max_degree", self.DEFAULT_MAX_DEGREE)
|
|
44
|
+
self.single_variable = kwargs.get("single_variable", self.DEFAULT_SINGLE_VARIABLE)
|
|
45
|
+
self.minimize = kwargs.get("minimize", self.DEFAULT_MINIMIZE) # Default to minimization
|
|
39
46
|
|
|
40
47
|
if self.single_variable:
|
|
41
48
|
self.num_variables = 1
|
|
42
|
-
|
|
43
|
-
|
|
49
|
+
|
|
50
|
+
@classmethod
|
|
51
|
+
def _perform_gradient_descent(
|
|
52
|
+
cls,
|
|
53
|
+
function: sympy.Function,
|
|
54
|
+
gradient_function,
|
|
55
|
+
starting_point,
|
|
56
|
+
num_steps,
|
|
57
|
+
variables,
|
|
58
|
+
learning_rate,
|
|
59
|
+
minimize=True,
|
|
60
|
+
) -> List[dict]:
|
|
44
61
|
"""
|
|
45
62
|
Perform gradient descent and return step-by-step results.
|
|
46
63
|
"""
|
|
47
64
|
results = []
|
|
48
65
|
|
|
49
|
-
x = list(map(float,
|
|
66
|
+
x = list(map(float, starting_point)) # current location as floats
|
|
50
67
|
|
|
51
|
-
for step in range(
|
|
52
|
-
subs_map = dict(zip(
|
|
68
|
+
for step in range(num_steps):
|
|
69
|
+
subs_map = dict(zip(variables, x))
|
|
53
70
|
|
|
54
71
|
# gradient as floats
|
|
55
|
-
g_syms =
|
|
72
|
+
g_syms = gradient_function.subs(subs_map)
|
|
56
73
|
g = [float(val) for val in g_syms]
|
|
57
74
|
|
|
58
75
|
# function value
|
|
59
|
-
f_val = float(
|
|
76
|
+
f_val = float(function.subs(subs_map))
|
|
60
77
|
|
|
61
|
-
update = [
|
|
78
|
+
update = [learning_rate * gi for gi in g]
|
|
62
79
|
|
|
63
80
|
results.append(
|
|
64
81
|
{
|
|
@@ -70,61 +87,65 @@ class GradientDescentWalkthrough(GradientDescentQuestion, TableQuestionMixin, Bo
|
|
|
70
87
|
}
|
|
71
88
|
)
|
|
72
89
|
|
|
73
|
-
x = [xi - ui for xi, ui in zip(x, update)] if
|
|
90
|
+
x = [xi - ui for xi, ui in zip(x, update)] if minimize else \
|
|
74
91
|
[xi + ui for xi, ui in zip(x, update)]
|
|
75
92
|
|
|
76
93
|
return results
|
|
77
94
|
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
if
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
self.num_variables = 1
|
|
89
|
-
if "minimize" in kwargs:
|
|
90
|
-
self.minimize = kwargs.get("minimize", self.minimize)
|
|
91
|
-
|
|
92
|
-
self.rng.seed(rng_seed)
|
|
95
|
+
@classmethod
|
|
96
|
+
def _build_context(cls, *, rng_seed=None, **kwargs):
|
|
97
|
+
context = super()._build_context(rng_seed=rng_seed, **kwargs)
|
|
98
|
+
context.num_steps = kwargs.get("num_steps", cls.DEFAULT_NUM_STEPS)
|
|
99
|
+
context.num_variables = kwargs.get("num_variables", cls.DEFAULT_NUM_VARIABLES)
|
|
100
|
+
context.max_degree = kwargs.get("max_degree", cls.DEFAULT_MAX_DEGREE)
|
|
101
|
+
context.single_variable = kwargs.get("single_variable", cls.DEFAULT_SINGLE_VARIABLE)
|
|
102
|
+
if context.single_variable:
|
|
103
|
+
context.num_variables = 1
|
|
104
|
+
context.minimize = kwargs.get("minimize", cls.DEFAULT_MINIMIZE)
|
|
93
105
|
|
|
94
106
|
# Generate function and its properties
|
|
95
|
-
|
|
107
|
+
context.variables, context.function, context.gradient_function, context.equation = generate_function(
|
|
108
|
+
context.rng, context.num_variables, context.max_degree
|
|
109
|
+
)
|
|
96
110
|
|
|
97
111
|
# Generate learning rate (expanded range)
|
|
98
|
-
|
|
112
|
+
context.learning_rate = context.rng.choice([0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5])
|
|
99
113
|
|
|
100
|
-
|
|
114
|
+
context.starting_point = [context.rng.randint(-3, 3) for _ in range(context.num_variables)]
|
|
101
115
|
|
|
102
116
|
# Perform gradient descent
|
|
103
|
-
|
|
117
|
+
context.gradient_descent_results = cls._perform_gradient_descent(
|
|
118
|
+
context.function,
|
|
119
|
+
context.gradient_function,
|
|
120
|
+
context.starting_point,
|
|
121
|
+
context.num_steps,
|
|
122
|
+
context.variables,
|
|
123
|
+
context.learning_rate,
|
|
124
|
+
minimize=context.minimize,
|
|
125
|
+
)
|
|
104
126
|
|
|
105
127
|
# Build answers for each step
|
|
106
|
-
|
|
107
|
-
for i, result in enumerate(
|
|
128
|
+
context.step_answers = {}
|
|
129
|
+
for i, result in enumerate(context.gradient_descent_results):
|
|
108
130
|
step = result['step']
|
|
109
131
|
|
|
110
132
|
# Location answer
|
|
111
133
|
location_key = f"answer__location_{step}"
|
|
112
|
-
|
|
134
|
+
context.step_answers[location_key] = ca.AnswerTypes.Vector(list(result['location']), label=f"Location at step {step}")
|
|
113
135
|
|
|
114
136
|
# Gradient answer
|
|
115
137
|
gradient_key = f"answer__gradient_{step}"
|
|
116
|
-
|
|
138
|
+
context.step_answers[gradient_key] = ca.AnswerTypes.Vector(list(result['gradient']), label=f"Gradient at step {step}")
|
|
117
139
|
|
|
118
140
|
# Update answer
|
|
119
141
|
update_key = f"answer__update_{step}"
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
context = dict(kwargs)
|
|
123
|
-
context["rng_seed"] = rng_seed
|
|
142
|
+
context.step_answers[update_key] = ca.AnswerTypes.Vector(list(result['update']), label=f"Update at step {step}")
|
|
124
143
|
return context
|
|
125
144
|
|
|
126
|
-
|
|
145
|
+
@classmethod
|
|
146
|
+
def _build_body(cls, context) -> Tuple[ca.Section, List[ca.Answer]]:
|
|
127
147
|
"""Build question body and collect answers."""
|
|
148
|
+
self = context
|
|
128
149
|
body = ca.Section()
|
|
129
150
|
answers = []
|
|
130
151
|
|
|
@@ -179,7 +200,7 @@ class GradientDescentWalkthrough(GradientDescentQuestion, TableQuestionMixin, Bo
|
|
|
179
200
|
table_rows.append(row)
|
|
180
201
|
|
|
181
202
|
# Create the table using mixin
|
|
182
|
-
gradient_table =
|
|
203
|
+
gradient_table = cls.create_answer_table(
|
|
183
204
|
headers=headers,
|
|
184
205
|
data_rows=table_rows,
|
|
185
206
|
answer_columns=["location", headers[2], headers[3]] # Use actual header objects
|
|
@@ -189,8 +210,10 @@ class GradientDescentWalkthrough(GradientDescentQuestion, TableQuestionMixin, Bo
|
|
|
189
210
|
|
|
190
211
|
return body, answers
|
|
191
212
|
|
|
192
|
-
|
|
213
|
+
@classmethod
|
|
214
|
+
def _build_explanation(cls, context) -> Tuple[ca.Section, List[ca.Answer]]:
|
|
193
215
|
"""Build question explanation."""
|
|
216
|
+
self = context
|
|
194
217
|
explanation = ca.Section()
|
|
195
218
|
|
|
196
219
|
explanation.add_element(
|