QuizGenerator 0.4.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (52) hide show
  1. QuizGenerator/README.md +5 -0
  2. QuizGenerator/__init__.py +27 -0
  3. QuizGenerator/__main__.py +7 -0
  4. QuizGenerator/canvas/__init__.py +13 -0
  5. QuizGenerator/canvas/canvas_interface.py +627 -0
  6. QuizGenerator/canvas/classes.py +235 -0
  7. QuizGenerator/constants.py +149 -0
  8. QuizGenerator/contentast.py +1955 -0
  9. QuizGenerator/generate.py +253 -0
  10. QuizGenerator/logging.yaml +55 -0
  11. QuizGenerator/misc.py +579 -0
  12. QuizGenerator/mixins.py +548 -0
  13. QuizGenerator/performance.py +202 -0
  14. QuizGenerator/premade_questions/__init__.py +0 -0
  15. QuizGenerator/premade_questions/basic.py +103 -0
  16. QuizGenerator/premade_questions/cst334/__init__.py +1 -0
  17. QuizGenerator/premade_questions/cst334/languages.py +391 -0
  18. QuizGenerator/premade_questions/cst334/math_questions.py +297 -0
  19. QuizGenerator/premade_questions/cst334/memory_questions.py +1400 -0
  20. QuizGenerator/premade_questions/cst334/ostep13_vsfs.py +572 -0
  21. QuizGenerator/premade_questions/cst334/persistence_questions.py +451 -0
  22. QuizGenerator/premade_questions/cst334/process.py +648 -0
  23. QuizGenerator/premade_questions/cst463/__init__.py +0 -0
  24. QuizGenerator/premade_questions/cst463/gradient_descent/__init__.py +3 -0
  25. QuizGenerator/premade_questions/cst463/gradient_descent/gradient_calculation.py +369 -0
  26. QuizGenerator/premade_questions/cst463/gradient_descent/gradient_descent_questions.py +305 -0
  27. QuizGenerator/premade_questions/cst463/gradient_descent/loss_calculations.py +650 -0
  28. QuizGenerator/premade_questions/cst463/gradient_descent/misc.py +73 -0
  29. QuizGenerator/premade_questions/cst463/math_and_data/__init__.py +2 -0
  30. QuizGenerator/premade_questions/cst463/math_and_data/matrix_questions.py +631 -0
  31. QuizGenerator/premade_questions/cst463/math_and_data/vector_questions.py +534 -0
  32. QuizGenerator/premade_questions/cst463/models/__init__.py +0 -0
  33. QuizGenerator/premade_questions/cst463/models/attention.py +192 -0
  34. QuizGenerator/premade_questions/cst463/models/cnns.py +186 -0
  35. QuizGenerator/premade_questions/cst463/models/matrices.py +24 -0
  36. QuizGenerator/premade_questions/cst463/models/rnns.py +202 -0
  37. QuizGenerator/premade_questions/cst463/models/text.py +203 -0
  38. QuizGenerator/premade_questions/cst463/models/weight_counting.py +227 -0
  39. QuizGenerator/premade_questions/cst463/neural-network-basics/__init__.py +6 -0
  40. QuizGenerator/premade_questions/cst463/neural-network-basics/neural_network_questions.py +1314 -0
  41. QuizGenerator/premade_questions/cst463/tensorflow-intro/__init__.py +6 -0
  42. QuizGenerator/premade_questions/cst463/tensorflow-intro/tensorflow_questions.py +936 -0
  43. QuizGenerator/qrcode_generator.py +293 -0
  44. QuizGenerator/question.py +715 -0
  45. QuizGenerator/quiz.py +467 -0
  46. QuizGenerator/regenerate.py +472 -0
  47. QuizGenerator/typst_utils.py +113 -0
  48. quizgenerator-0.4.2.dist-info/METADATA +265 -0
  49. quizgenerator-0.4.2.dist-info/RECORD +52 -0
  50. quizgenerator-0.4.2.dist-info/WHEEL +4 -0
  51. quizgenerator-0.4.2.dist-info/entry_points.txt +3 -0
  52. quizgenerator-0.4.2.dist-info/licenses/LICENSE +674 -0
@@ -0,0 +1,203 @@
1
+ import abc
2
+ import logging
3
+ import math
4
+ import keras
5
+ import numpy as np
6
+
7
+ from QuizGenerator.misc import MatrixAnswer
8
+ from QuizGenerator.premade_questions.cst463.models.matrices import MatrixQuestion
9
+ from QuizGenerator.question import Question, QuestionRegistry, Answer
10
+ from QuizGenerator.contentast import ContentAST
11
+ from QuizGenerator.constants import MathRanges
12
+ from QuizGenerator.mixins import TableQuestionMixin
13
+
14
+ log = logging.getLogger(__name__)
15
+
16
+
17
+ @QuestionRegistry.register("cst463.word2vec.skipgram")
18
+ class word2vec__skipgram(MatrixQuestion, TableQuestionMixin):
19
+
20
+ @staticmethod
21
+ def skipgram_predict(center_emb, context_embs):
22
+ """
23
+ center_emb: (embed_dim,) - center word embedding
24
+ context_embs: (num_contexts, embed_dim) - context candidate embeddings
25
+
26
+ Returns: probabilities (num_contexts,)
27
+ """
28
+ # Compute dot products (logits)
29
+ logits = context_embs @ center_emb
30
+
31
+ # Softmax
32
+ exp_logits = np.exp(logits)
33
+ probs = exp_logits / exp_logits.sum()
34
+
35
+ return logits, probs
36
+
37
+ def refresh(self, *args, **kwargs):
38
+ super().refresh(*args, **kwargs)
39
+ self.rng = np.random.RandomState(kwargs.get("rng_seed", None))
40
+
41
+ embed_dim = kwargs.get("embed_dim", 3)
42
+ num_contexts = kwargs.get("num_contexts", 3)
43
+
44
+ # Vocabulary pool
45
+ vocab = ['cat', 'dog', 'run', 'jump', 'happy', 'sad', 'tree', 'house',
46
+ 'walk', 'sleep', 'fast', 'slow', 'big', 'small']
47
+
48
+ # Sample words
49
+ self.selected_words = self.rng.choice(vocab, size=num_contexts + 1, replace=False)
50
+ self.center_word = self.selected_words[0]
51
+ self.context_words = self.selected_words[1:]
52
+
53
+ # Small integer embeddings
54
+
55
+ self.center_emb = self.get_rounded_matrix((embed_dim,), -2, 3)
56
+ self.context_embs = self.get_rounded_matrix((num_contexts, embed_dim), -2, 3)
57
+
58
+ self.logits, self.probs = self.skipgram_predict(self.center_emb, self.context_embs)
59
+
60
+ ## Answers:
61
+ # center_word, center_emb, context_words, context_embs, logits, probs
62
+ self.answers["logits"] = Answer.vector_value(key="logits", value=self.logits)
63
+ most_likely_idx = np.argmax(self.probs)
64
+ most_likely_word = self.context_words[most_likely_idx]
65
+ self.answers["center_word"] = Answer.string(key="center_word", value=most_likely_word)
66
+
67
+
68
+ return True
69
+
70
+ def get_body(self, **kwargs) -> ContentAST.Section:
71
+ body = ContentAST.Section()
72
+
73
+ body.add_element(
74
+ ContentAST.Paragraph([
75
+ f"Given center word: `{self.center_word}` with embedding {self.center_emb}, compute the skip-gram probabilities for each context word and identify the most likely one."
76
+ ])
77
+ )
78
+ body.add_elements([
79
+ ContentAST.Paragraph([ContentAST.Text(f"`{w}` : "), str(e)]) for w, e in zip(self.context_words, self.context_embs)
80
+ ])
81
+
82
+ body.add_elements([
83
+ ContentAST.LineBreak(),
84
+ self.answers["logits"].get_ast_element("Logits"),
85
+ ContentAST.LineBreak(),
86
+ self.answers["center_word"].get_ast_element("Most likely context word")
87
+ ])
88
+
89
+
90
+ log.debug(f"output: {self.logits}")
91
+ log.debug(f"weights: {self.probs}")
92
+
93
+ return body
94
+
95
+ def get_explanation(self, **kwargs) -> ContentAST.Section:
96
+ explanation = ContentAST.Section()
97
+ digits = Answer.DEFAULT_ROUNDING_DIGITS
98
+
99
+ explanation.add_element(
100
+ ContentAST.Paragraph([
101
+ "In the skip-gram model, we predict context words given a center word by computing dot products between embeddings and applying softmax."
102
+ ])
103
+ )
104
+
105
+ # Step 1: Show embeddings
106
+ explanation.add_element(
107
+ ContentAST.Paragraph([
108
+ ContentAST.Text("Step 1: Given embeddings", emphasis=True)
109
+ ])
110
+ )
111
+
112
+ # Format center embedding
113
+ center_emb_str = "[" + ", ".join([f"{x:.{digits}f}" for x in self.center_emb]) + "]"
114
+ explanation.add_element(
115
+ ContentAST.Paragraph([
116
+ f"Center word `{self.center_word}`: {center_emb_str}"
117
+ ])
118
+ )
119
+
120
+ explanation.add_element(
121
+ ContentAST.Paragraph([
122
+ "Context words:"
123
+ ])
124
+ )
125
+
126
+ for i, (word, emb) in enumerate(zip(self.context_words, self.context_embs)):
127
+ emb_str = "[" + ", ".join([f"{x:.2f}" for x in emb]) + "]"
128
+ explanation.add_element(
129
+ ContentAST.Paragraph([
130
+ f"`{word}`: {emb_str}"
131
+ ])
132
+ )
133
+
134
+ # Step 2: Compute logits (dot products)
135
+ explanation.add_element(
136
+ ContentAST.Paragraph([
137
+ ContentAST.Text("Step 2: Compute logits (dot products)", emphasis=True)
138
+ ])
139
+ )
140
+
141
+ # Show ONE example
142
+ explanation.add_element(
143
+ ContentAST.Paragraph([
144
+ f"Example: Logit for `{self.context_words[0]}`"
145
+ ])
146
+ )
147
+
148
+ context_emb = self.context_embs[0]
149
+ dot_product_terms = " + ".join([f"({self.center_emb[j]:.2f} \\times {context_emb[j]:.2f})"
150
+ for j in range(len(self.center_emb))])
151
+ logit_val = self.logits[0]
152
+
153
+ explanation.add_element(
154
+ ContentAST.Equation(f"{dot_product_terms} = {logit_val:.2f}")
155
+ )
156
+
157
+ logits_str = "[" + ", ".join([f"{x:.2f}" for x in self.logits]) + "]"
158
+ explanation.add_element(
159
+ ContentAST.Paragraph([
160
+ f"All logits: {logits_str}"
161
+ ])
162
+ )
163
+
164
+ # Step 3: Apply softmax
165
+ explanation.add_element(
166
+ ContentAST.Paragraph([
167
+ ContentAST.Text("Step 3: Apply softmax to get probabilities", emphasis=True)
168
+ ])
169
+ )
170
+
171
+ exp_logits = np.exp(self.logits)
172
+ sum_exp = exp_logits.sum()
173
+
174
+ exp_terms = " + ".join([f"e^{{{l:.{digits}f}}}" for l in self.logits])
175
+
176
+ explanation.add_element(
177
+ ContentAST.Equation(f"\\text{{denominator}} = {exp_terms} = {sum_exp:.{digits}f}")
178
+ )
179
+
180
+ explanation.add_element(
181
+ ContentAST.Paragraph([
182
+ "Probabilities:"
183
+ ])
184
+ )
185
+
186
+ for i, (word, prob) in enumerate(zip(self.context_words, self.probs)):
187
+ explanation.add_element(
188
+ ContentAST.Equation(f"P(\\text{{{word}}}) = \\frac{{e^{{{self.logits[i]:.{digits}f}}}}}{{{sum_exp:.{digits}f}}} = {prob:.{digits}f}")
189
+ )
190
+
191
+ # Step 4: Identify most likely
192
+ most_likely_idx = np.argmax(self.probs)
193
+ most_likely_word = self.context_words[most_likely_idx]
194
+
195
+ explanation.add_element(
196
+ ContentAST.Paragraph([
197
+ ContentAST.Text("Conclusion:", emphasis=True),
198
+ f" The most likely context word is `{most_likely_word}` with probability {self.probs[most_likely_idx]:.{digits}f}"
199
+ ])
200
+ )
201
+
202
+ return explanation
203
+
@@ -0,0 +1,227 @@
1
+ import abc
2
+ import logging
3
+ import math
4
+ import keras
5
+ import numpy as np
6
+
7
+ from QuizGenerator.question import Question, QuestionRegistry, Answer
8
+ from QuizGenerator.contentast import ContentAST
9
+ from QuizGenerator.constants import MathRanges
10
+
11
+ log = logging.getLogger(__name__)
12
+
13
+
14
+ class WeightCounting(Question, abc.ABC):
15
+ @abc.abstractmethod
16
+ def get_model(self) -> keras.Model:
17
+ pass
18
+
19
+ @staticmethod
20
+ def model_to_python(model: keras.Model, fields=None, include_input=True):
21
+ if fields is None:
22
+ fields = []
23
+
24
+ def sanitize(v):
25
+ """Convert numpy types to pure Python."""
26
+ if isinstance(v, np.generic): # np.int64, np.float32, etc.
27
+ return v.item()
28
+ if isinstance(v, (list, tuple)):
29
+ return type(v)(sanitize(x) for x in v)
30
+ if isinstance(v, dict):
31
+ return {k: sanitize(x) for k, x in v.items()}
32
+ return v
33
+
34
+ lines = []
35
+ lines.append("keras.models.Sequential([")
36
+
37
+ # ---- Emit an Input line if we can ----
38
+ # model.input_shape is like (None, H, W, C) or (None, D)
39
+ if include_input and getattr(model, "input_shape", None) is not None:
40
+ input_shape = sanitize(model.input_shape[1:]) # drop batch dimension
41
+ # If it's a 1D shape like (784,), keep as tuple; if scalar, still fine.
42
+ lines.append(f" keras.layers.Input(shape={input_shape!r}),")
43
+
44
+ # ---- Emit all other layers ----
45
+ for layer in model.layers:
46
+ # If user explicitly had an Input layer, we don't want to duplicate it
47
+ if isinstance(layer, keras.layers.InputLayer):
48
+ # You *could* handle it specially here, but usually we just skip
49
+ continue
50
+
51
+ cfg = layer.get_config()
52
+
53
+ # If fields is empty, include everything; otherwise filter by fields.
54
+ if fields:
55
+ items = [(k, v) for k, v in cfg.items() if k in fields]
56
+ else:
57
+ items = cfg.items()
58
+
59
+ arg_lines = [
60
+ f"{k}={sanitize(v)!r}" # !r so strings get quotes, etc.
61
+ for k, v in items
62
+ ]
63
+ args = ",\n ".join(arg_lines)
64
+
65
+ lines.append(
66
+ f" keras.layers.{layer.__class__.__name__}("
67
+ f"{'\n ' if args else ''}{args}{'\n ' if args else ''}),"
68
+ )
69
+
70
+ lines.append("])")
71
+ return "\n".join(lines)
72
+
73
+ def refresh(self, *args, **kwargs):
74
+ super().refresh(*args, **kwargs)
75
+
76
+ refresh_success = False
77
+ while not refresh_success:
78
+ try:
79
+ self.model, self.fields = self.get_model()
80
+ refresh_success = True
81
+ except ValueError as e:
82
+ log.error(e)
83
+ log.info(f"Regenerating {self.__class__.__name__} due to improper configuration")
84
+ continue
85
+
86
+ self.num_parameters = self.model.count_params()
87
+ self.answers["num_parameters"] = Answer.integer(
88
+ "num_parameters",
89
+ self.num_parameters
90
+ )
91
+
92
+ return True
93
+
94
+ def get_body(self, **kwargs) -> ContentAST.Section:
95
+ body = ContentAST.Section()
96
+
97
+ body.add_element(
98
+ ContentAST.Paragraph(
99
+ [
100
+ ContentAST.Text("Given the below model, how many parameters does it use?")
101
+ ]
102
+ )
103
+ )
104
+
105
+ body.add_element(
106
+ ContentAST.Code(
107
+ self.model_to_python(
108
+ self.model,
109
+ fields=self.fields
110
+ )
111
+ )
112
+ )
113
+
114
+ body.add_element(ContentAST.LineBreak())
115
+
116
+ body.add_element(
117
+ ContentAST.Answer(self.answers["num_parameters"], "Number of Parameters")
118
+ )
119
+
120
+ return body
121
+
122
+ def get_explanation(self, **kwargs) -> ContentAST.Section:
123
+ explanation = ContentAST.Section()
124
+
125
+ def markdown_summary(model) -> ContentAST.Table:
126
+ # Ensure the model is built by running build() or calling it once
127
+ if not model.built:
128
+ try:
129
+ model.build(model.input_shape)
130
+ except:
131
+ pass # Some subclassed models need real data to build
132
+
133
+ data = []
134
+
135
+ total_params = 0
136
+
137
+ for layer in model.layers:
138
+ name = layer.name
139
+ ltype = layer.__class__.__name__
140
+
141
+ # Try to extract output shape
142
+ try:
143
+ outshape = tuple(layer.output.shape)
144
+ except:
145
+ outshape = "?"
146
+
147
+ params = layer.count_params()
148
+ total_params += params
149
+
150
+ data.append([name, ltype, outshape, params])
151
+
152
+ data.append(["**Total**", "", "", f"**{total_params}**"])
153
+ return ContentAST.Table(data=data, headers=["Layer", "Type", "Output Shape", "Params"])
154
+
155
+
156
+ summary_lines = []
157
+ self.model.summary(print_fn=lambda x: summary_lines.append(x))
158
+ explanation.add_element(
159
+ # ContentAST.Text('\n'.join(summary_lines))
160
+ markdown_summary(self.model)
161
+ )
162
+
163
+ return explanation
164
+
165
+
166
+ @QuestionRegistry.register("cst463.WeightCounting-CNN")
167
+ class WeightCounting_CNN(WeightCounting):
168
+
169
+ def get_model(self) -> tuple[keras.Model, list[str]]:
170
+ input_size = self.rng.choice(np.arange(28, 32))
171
+ cnn_num_filters = self.rng.choice(2 ** np.arange(8))
172
+ cnn_kernel_size = self.rng.choice(1 + np.arange(10))
173
+ cnn_strides = self.rng.choice(1 + np.arange(10))
174
+ pool_size = self.rng.choice(1 + np.arange(10))
175
+ pool_strides = self.rng.choice(1 + np.arange(10))
176
+ num_output_size = self.rng.choice([1, 10, 32, 100])
177
+
178
+ # Let's just make a small model
179
+ model = keras.models.Sequential(
180
+ [
181
+ keras.layers.Input((input_size, input_size, 1)),
182
+ keras.layers.Conv2D(
183
+ filters=cnn_num_filters,
184
+ kernel_size=(cnn_kernel_size, cnn_kernel_size),
185
+ strides=(cnn_strides, cnn_strides),
186
+ padding="valid"
187
+ ),
188
+ keras.layers.MaxPool2D(
189
+ pool_size=(pool_size, pool_size),
190
+ strides=(pool_strides, pool_strides)
191
+ ),
192
+ keras.layers.Dense(
193
+ num_output_size
194
+ )
195
+ ]
196
+ )
197
+ return model, ["units", "filters", "kernel_size", "strides", "padding", "pool_size"]
198
+
199
+
200
+ @QuestionRegistry.register("cst463.WeightCounting-RNN")
201
+ class WeightCounting_RNN(WeightCounting):
202
+ def get_model(self) -> tuple[keras.Model, list[str]]:
203
+ timesteps = int(self.rng.choice(np.arange(20, 41)))
204
+ feature_size = int(self.rng.choice(np.arange(8, 65)))
205
+
206
+ rnn_units = int(self.rng.choice(2 ** np.arange(4, 9)))
207
+ rnn_type = self.rng.choice(["SimpleRNN"])
208
+ return_sequences = bool(self.rng.choice([True, False]))
209
+
210
+ num_output_size = int(self.rng.choice([1, 10, 32, 100]))
211
+
212
+ RNNLayer = getattr(keras.layers, rnn_type)
213
+
214
+ model = keras.models.Sequential([
215
+ keras.layers.Input((timesteps, feature_size)),
216
+ RNNLayer(
217
+ units=rnn_units,
218
+ return_sequences=return_sequences,
219
+ ),
220
+ keras.layers.Dense(num_output_size),
221
+ ])
222
+ return model, ["units", "return_sequences"]
223
+
224
+
225
+ @QuestionRegistry.register()
226
+ class ConvolutionCalculation(Question):
227
+ pass
@@ -0,0 +1,6 @@
1
+ from .neural_network_questions import (
2
+ ForwardPassQuestion,
3
+ BackpropGradientQuestion,
4
+ EnsembleAveragingQuestion,
5
+ EndToEndTrainingQuestion
6
+ )