QuizGenerator 0.6.3__py3-none-any.whl → 0.7.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- QuizGenerator/contentast.py +2191 -2193
- QuizGenerator/misc.py +1 -1
- QuizGenerator/mixins.py +64 -64
- QuizGenerator/premade_questions/basic.py +16 -16
- QuizGenerator/premade_questions/cst334/languages.py +26 -26
- QuizGenerator/premade_questions/cst334/math_questions.py +42 -42
- QuizGenerator/premade_questions/cst334/memory_questions.py +124 -124
- QuizGenerator/premade_questions/cst334/persistence_questions.py +48 -48
- QuizGenerator/premade_questions/cst334/process.py +38 -38
- QuizGenerator/premade_questions/cst463/gradient_descent/gradient_calculation.py +45 -45
- QuizGenerator/premade_questions/cst463/gradient_descent/gradient_descent_questions.py +34 -34
- QuizGenerator/premade_questions/cst463/gradient_descent/loss_calculations.py +53 -53
- QuizGenerator/premade_questions/cst463/gradient_descent/misc.py +2 -2
- QuizGenerator/premade_questions/cst463/math_and_data/matrix_questions.py +65 -65
- QuizGenerator/premade_questions/cst463/math_and_data/vector_questions.py +39 -39
- QuizGenerator/premade_questions/cst463/models/attention.py +36 -36
- QuizGenerator/premade_questions/cst463/models/cnns.py +26 -26
- QuizGenerator/premade_questions/cst463/models/rnns.py +36 -36
- QuizGenerator/premade_questions/cst463/models/text.py +32 -32
- QuizGenerator/premade_questions/cst463/models/weight_counting.py +15 -15
- QuizGenerator/premade_questions/cst463/neural-network-basics/neural_network_questions.py +124 -124
- QuizGenerator/premade_questions/cst463/tensorflow-intro/tensorflow_questions.py +161 -161
- QuizGenerator/question.py +41 -41
- QuizGenerator/quiz.py +7 -7
- QuizGenerator/regenerate.py +114 -13
- QuizGenerator/typst_utils.py +2 -2
- {quizgenerator-0.6.3.dist-info → quizgenerator-0.7.1.dist-info}/METADATA +1 -1
- {quizgenerator-0.6.3.dist-info → quizgenerator-0.7.1.dist-info}/RECORD +31 -31
- {quizgenerator-0.6.3.dist-info → quizgenerator-0.7.1.dist-info}/WHEEL +0 -0
- {quizgenerator-0.6.3.dist-info → quizgenerator-0.7.1.dist-info}/entry_points.txt +0 -0
- {quizgenerator-0.6.3.dist-info → quizgenerator-0.7.1.dist-info}/licenses/LICENSE +0 -0
|
@@ -7,7 +7,7 @@ from typing import List, Tuple
|
|
|
7
7
|
|
|
8
8
|
from QuizGenerator.premade_questions.cst463.models.matrices import MatrixQuestion
|
|
9
9
|
from QuizGenerator.question import Question, QuestionRegistry
|
|
10
|
-
|
|
10
|
+
import QuizGenerator.contentast as ca
|
|
11
11
|
from QuizGenerator.constants import MathRanges
|
|
12
12
|
from QuizGenerator.mixins import TableQuestionMixin
|
|
13
13
|
|
|
@@ -59,34 +59,34 @@ class word2vec__skipgram(MatrixQuestion, TableQuestionMixin):
|
|
|
59
59
|
|
|
60
60
|
## Answers:
|
|
61
61
|
# center_word, center_emb, context_words, context_embs, logits, probs
|
|
62
|
-
self.answers["logits"] = AnswerTypes.Vector(self.logits, label="Logits")
|
|
62
|
+
self.answers["logits"] = ca.AnswerTypes.Vector(self.logits, label="Logits")
|
|
63
63
|
most_likely_idx = np.argmax(self.probs)
|
|
64
64
|
most_likely_word = self.context_words[most_likely_idx]
|
|
65
|
-
self.answers["center_word"] = AnswerTypes.String(most_likely_word, label="Most likely context word")
|
|
65
|
+
self.answers["center_word"] = ca.AnswerTypes.String(most_likely_word, label="Most likely context word")
|
|
66
66
|
|
|
67
67
|
|
|
68
68
|
return True
|
|
69
69
|
|
|
70
|
-
def _get_body(self, **kwargs) -> Tuple[
|
|
70
|
+
def _get_body(self, **kwargs) -> Tuple[ca.Section, List[ca.Answer]]:
|
|
71
71
|
"""Build question body and collect answers."""
|
|
72
|
-
body =
|
|
72
|
+
body = ca.Section()
|
|
73
73
|
answers = []
|
|
74
74
|
|
|
75
75
|
body.add_element(
|
|
76
|
-
|
|
76
|
+
ca.Paragraph([
|
|
77
77
|
f"Given center word: `{self.center_word}` with embedding {self.center_emb}, compute the skip-gram probabilities for each context word and identify the most likely one."
|
|
78
78
|
])
|
|
79
79
|
)
|
|
80
80
|
body.add_elements([
|
|
81
|
-
|
|
81
|
+
ca.Paragraph([ca.Text(f"`{w}` : "), str(e)]) for w, e in zip(self.context_words, self.context_embs)
|
|
82
82
|
])
|
|
83
83
|
|
|
84
84
|
answers.append(self.answers["logits"])
|
|
85
85
|
answers.append(self.answers["center_word"])
|
|
86
86
|
body.add_elements([
|
|
87
|
-
|
|
87
|
+
ca.LineBreak(),
|
|
88
88
|
self.answers["logits"],
|
|
89
|
-
|
|
89
|
+
ca.LineBreak(),
|
|
90
90
|
self.answers["center_word"]
|
|
91
91
|
])
|
|
92
92
|
|
|
@@ -95,39 +95,39 @@ class word2vec__skipgram(MatrixQuestion, TableQuestionMixin):
|
|
|
95
95
|
|
|
96
96
|
return body, answers
|
|
97
97
|
|
|
98
|
-
def get_body(self, **kwargs) ->
|
|
98
|
+
def get_body(self, **kwargs) -> ca.Section:
|
|
99
99
|
"""Build question body (backward compatible interface)."""
|
|
100
100
|
body, _ = self._get_body(**kwargs)
|
|
101
101
|
return body
|
|
102
102
|
|
|
103
|
-
def _get_explanation(self, **kwargs) -> Tuple[
|
|
103
|
+
def _get_explanation(self, **kwargs) -> Tuple[ca.Section, List[ca.Answer]]:
|
|
104
104
|
"""Build question explanation."""
|
|
105
|
-
explanation =
|
|
106
|
-
digits =
|
|
105
|
+
explanation = ca.Section()
|
|
106
|
+
digits = ca.Answer.DEFAULT_ROUNDING_DIGITS
|
|
107
107
|
|
|
108
108
|
explanation.add_element(
|
|
109
|
-
|
|
109
|
+
ca.Paragraph([
|
|
110
110
|
"In the skip-gram model, we predict context words given a center word by computing dot products between embeddings and applying softmax."
|
|
111
111
|
])
|
|
112
112
|
)
|
|
113
113
|
|
|
114
114
|
# Step 1: Show embeddings
|
|
115
115
|
explanation.add_element(
|
|
116
|
-
|
|
117
|
-
|
|
116
|
+
ca.Paragraph([
|
|
117
|
+
ca.Text("Step 1: Given embeddings", emphasis=True)
|
|
118
118
|
])
|
|
119
119
|
)
|
|
120
120
|
|
|
121
121
|
# Format center embedding
|
|
122
122
|
center_emb_str = "[" + ", ".join([f"{x:.{digits}f}" for x in self.center_emb]) + "]"
|
|
123
123
|
explanation.add_element(
|
|
124
|
-
|
|
124
|
+
ca.Paragraph([
|
|
125
125
|
f"Center word `{self.center_word}`: {center_emb_str}"
|
|
126
126
|
])
|
|
127
127
|
)
|
|
128
128
|
|
|
129
129
|
explanation.add_element(
|
|
130
|
-
|
|
130
|
+
ca.Paragraph([
|
|
131
131
|
"Context words:"
|
|
132
132
|
])
|
|
133
133
|
)
|
|
@@ -135,21 +135,21 @@ class word2vec__skipgram(MatrixQuestion, TableQuestionMixin):
|
|
|
135
135
|
for i, (word, emb) in enumerate(zip(self.context_words, self.context_embs)):
|
|
136
136
|
emb_str = "[" + ", ".join([f"{x:.2f}" for x in emb]) + "]"
|
|
137
137
|
explanation.add_element(
|
|
138
|
-
|
|
138
|
+
ca.Paragraph([
|
|
139
139
|
f"`{word}`: {emb_str}"
|
|
140
140
|
])
|
|
141
141
|
)
|
|
142
142
|
|
|
143
143
|
# Step 2: Compute logits (dot products)
|
|
144
144
|
explanation.add_element(
|
|
145
|
-
|
|
146
|
-
|
|
145
|
+
ca.Paragraph([
|
|
146
|
+
ca.Text("Step 2: Compute logits (dot products)", emphasis=True)
|
|
147
147
|
])
|
|
148
148
|
)
|
|
149
149
|
|
|
150
150
|
# Show ONE example
|
|
151
151
|
explanation.add_element(
|
|
152
|
-
|
|
152
|
+
ca.Paragraph([
|
|
153
153
|
f"Example: Logit for `{self.context_words[0]}`"
|
|
154
154
|
])
|
|
155
155
|
)
|
|
@@ -160,20 +160,20 @@ class word2vec__skipgram(MatrixQuestion, TableQuestionMixin):
|
|
|
160
160
|
logit_val = self.logits[0]
|
|
161
161
|
|
|
162
162
|
explanation.add_element(
|
|
163
|
-
|
|
163
|
+
ca.Equation(f"{dot_product_terms} = {logit_val:.2f}")
|
|
164
164
|
)
|
|
165
165
|
|
|
166
166
|
logits_str = "[" + ", ".join([f"{x:.2f}" for x in self.logits]) + "]"
|
|
167
167
|
explanation.add_element(
|
|
168
|
-
|
|
168
|
+
ca.Paragraph([
|
|
169
169
|
f"All logits: {logits_str}"
|
|
170
170
|
])
|
|
171
171
|
)
|
|
172
172
|
|
|
173
173
|
# Step 3: Apply softmax
|
|
174
174
|
explanation.add_element(
|
|
175
|
-
|
|
176
|
-
|
|
175
|
+
ca.Paragraph([
|
|
176
|
+
ca.Text("Step 3: Apply softmax to get probabilities", emphasis=True)
|
|
177
177
|
])
|
|
178
178
|
)
|
|
179
179
|
|
|
@@ -183,18 +183,18 @@ class word2vec__skipgram(MatrixQuestion, TableQuestionMixin):
|
|
|
183
183
|
exp_terms = " + ".join([f"e^{{{l:.{digits}f}}}" for l in self.logits])
|
|
184
184
|
|
|
185
185
|
explanation.add_element(
|
|
186
|
-
|
|
186
|
+
ca.Equation(f"\\text{{denominator}} = {exp_terms} = {sum_exp:.{digits}f}")
|
|
187
187
|
)
|
|
188
188
|
|
|
189
189
|
explanation.add_element(
|
|
190
|
-
|
|
190
|
+
ca.Paragraph([
|
|
191
191
|
"Probabilities:"
|
|
192
192
|
])
|
|
193
193
|
)
|
|
194
194
|
|
|
195
195
|
for i, (word, prob) in enumerate(zip(self.context_words, self.probs)):
|
|
196
196
|
explanation.add_element(
|
|
197
|
-
|
|
197
|
+
ca.Equation(f"P(\\text{{{word}}}) = \\frac{{e^{{{self.logits[i]:.{digits}f}}}}}{{{sum_exp:.{digits}f}}} = {prob:.{digits}f}")
|
|
198
198
|
)
|
|
199
199
|
|
|
200
200
|
# Step 4: Identify most likely
|
|
@@ -202,15 +202,15 @@ class word2vec__skipgram(MatrixQuestion, TableQuestionMixin):
|
|
|
202
202
|
most_likely_word = self.context_words[most_likely_idx]
|
|
203
203
|
|
|
204
204
|
explanation.add_element(
|
|
205
|
-
|
|
206
|
-
|
|
205
|
+
ca.Paragraph([
|
|
206
|
+
ca.Text("Conclusion:", emphasis=True),
|
|
207
207
|
f" The most likely context word is `{most_likely_word}` with probability {self.probs[most_likely_idx]:.{digits}f}"
|
|
208
208
|
])
|
|
209
209
|
)
|
|
210
210
|
|
|
211
211
|
return explanation, []
|
|
212
212
|
|
|
213
|
-
def get_explanation(self, **kwargs) ->
|
|
213
|
+
def get_explanation(self, **kwargs) -> ca.Section:
|
|
214
214
|
"""Build question explanation (backward compatible interface)."""
|
|
215
215
|
explanation, _ = self._get_explanation(**kwargs)
|
|
216
216
|
return explanation
|
|
@@ -6,7 +6,7 @@ import numpy as np
|
|
|
6
6
|
from typing import List, Tuple
|
|
7
7
|
|
|
8
8
|
from QuizGenerator.question import Question, QuestionRegistry
|
|
9
|
-
|
|
9
|
+
import QuizGenerator.contentast as ca
|
|
10
10
|
from QuizGenerator.constants import MathRanges
|
|
11
11
|
|
|
12
12
|
log = logging.getLogger(__name__)
|
|
@@ -85,25 +85,25 @@ class WeightCounting(Question, abc.ABC):
|
|
|
85
85
|
continue
|
|
86
86
|
|
|
87
87
|
self.num_parameters = self.model.count_params()
|
|
88
|
-
self.answers["num_parameters"] = AnswerTypes.Int(self.num_parameters, label="Number of Parameters")
|
|
88
|
+
self.answers["num_parameters"] = ca.AnswerTypes.Int(self.num_parameters, label="Number of Parameters")
|
|
89
89
|
|
|
90
90
|
return True
|
|
91
91
|
|
|
92
|
-
def _get_body(self, **kwargs) -> Tuple[
|
|
92
|
+
def _get_body(self, **kwargs) -> Tuple[ca.Section, List[ca.Answer]]:
|
|
93
93
|
"""Build question body and collect answers."""
|
|
94
|
-
body =
|
|
94
|
+
body = ca.Section()
|
|
95
95
|
answers = []
|
|
96
96
|
|
|
97
97
|
body.add_element(
|
|
98
|
-
|
|
98
|
+
ca.Paragraph(
|
|
99
99
|
[
|
|
100
|
-
|
|
100
|
+
ca.Text("Given the below model, how many parameters does it use?")
|
|
101
101
|
]
|
|
102
102
|
)
|
|
103
103
|
)
|
|
104
104
|
|
|
105
105
|
body.add_element(
|
|
106
|
-
|
|
106
|
+
ca.Code(
|
|
107
107
|
self.model_to_python(
|
|
108
108
|
self.model,
|
|
109
109
|
fields=self.fields
|
|
@@ -111,23 +111,23 @@ class WeightCounting(Question, abc.ABC):
|
|
|
111
111
|
)
|
|
112
112
|
)
|
|
113
113
|
|
|
114
|
-
body.add_element(
|
|
114
|
+
body.add_element(ca.LineBreak())
|
|
115
115
|
|
|
116
116
|
answers.append(self.answers["num_parameters"])
|
|
117
117
|
body.add_element(self.answers["num_parameters"])
|
|
118
118
|
|
|
119
119
|
return body, answers
|
|
120
120
|
|
|
121
|
-
def get_body(self, **kwargs) ->
|
|
121
|
+
def get_body(self, **kwargs) -> ca.Section:
|
|
122
122
|
"""Build question body (backward compatible interface)."""
|
|
123
123
|
body, _ = self._get_body(**kwargs)
|
|
124
124
|
return body
|
|
125
125
|
|
|
126
|
-
def _get_explanation(self, **kwargs) -> Tuple[
|
|
126
|
+
def _get_explanation(self, **kwargs) -> Tuple[ca.Section, List[ca.Answer]]:
|
|
127
127
|
"""Build question explanation."""
|
|
128
|
-
explanation =
|
|
128
|
+
explanation = ca.Section()
|
|
129
129
|
|
|
130
|
-
def markdown_summary(model) ->
|
|
130
|
+
def markdown_summary(model) -> ca.Table:
|
|
131
131
|
# Ensure the model is built by running build() or calling it once
|
|
132
132
|
if not model.built:
|
|
133
133
|
try:
|
|
@@ -155,19 +155,19 @@ class WeightCounting(Question, abc.ABC):
|
|
|
155
155
|
data.append([name, ltype, outshape, params])
|
|
156
156
|
|
|
157
157
|
data.append(["**Total**", "", "", f"**{total_params}**"])
|
|
158
|
-
return
|
|
158
|
+
return ca.Table(data=data, headers=["Layer", "Type", "Output Shape", "Params"])
|
|
159
159
|
|
|
160
160
|
|
|
161
161
|
summary_lines = []
|
|
162
162
|
self.model.summary(print_fn=lambda x: summary_lines.append(x))
|
|
163
163
|
explanation.add_element(
|
|
164
|
-
#
|
|
164
|
+
# ca.Text('\n'.join(summary_lines))
|
|
165
165
|
markdown_summary(self.model)
|
|
166
166
|
)
|
|
167
167
|
|
|
168
168
|
return explanation, []
|
|
169
169
|
|
|
170
|
-
def get_explanation(self, **kwargs) ->
|
|
170
|
+
def get_explanation(self, **kwargs) -> ca.Section:
|
|
171
171
|
"""Build question explanation (backward compatible interface)."""
|
|
172
172
|
explanation, _ = self._get_explanation(**kwargs)
|
|
173
173
|
return explanation
|