QuizGenerator 0.1.4__py3-none-any.whl → 0.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,202 @@
1
+ import abc
2
+ import logging
3
+ import math
4
+ import keras
5
+ import numpy as np
6
+
7
+ from .matrices import MatrixQuestion
8
+ from QuizGenerator.question import Question, QuestionRegistry, Answer
9
+ from QuizGenerator.contentast import ContentAST
10
+ from QuizGenerator.constants import MathRanges
11
+ from QuizGenerator.mixins import TableQuestionMixin
12
+
13
+ log = logging.getLogger(__name__)
14
+
15
+
16
+ @QuestionRegistry.register("cst463.rnn.forward-pass")
17
+ class RNNForwardPass(MatrixQuestion, TableQuestionMixin):
18
+
19
+ @staticmethod
20
+ def rnn_forward(x_seq, W_xh, W_hh, b_h, h_0, activation='tanh'):
21
+ """
22
+ x_seq: (seq_len, input_dim) - input sequence
23
+ W_xh: (input_dim, hidden_dim) - input to hidden weights
24
+ W_hh: (hidden_dim, hidden_dim) - hidden to hidden weights
25
+ b_h: (hidden_dim,) - hidden bias
26
+ h_0: (hidden_dim,) - initial hidden state
27
+
28
+ Returns: all hidden states (seq_len, hidden_dim)
29
+ """
30
+ seq_len = len(x_seq)
31
+ hidden_dim = W_hh.shape[0]
32
+
33
+ h_states = np.zeros((seq_len, hidden_dim))
34
+ h_t = h_0
35
+
36
+ for t in range(seq_len):
37
+ h_t = x_seq[t] @ W_xh + h_t @ W_hh + b_h
38
+ if activation == 'tanh':
39
+ h_t = np.tanh(h_t)
40
+ elif activation == 'relu':
41
+ h_t = np.maximum(0, h_t)
42
+ h_states[t] = h_t
43
+
44
+ return h_states
45
+
46
+ def refresh(self, *args, **kwargs):
47
+ super().refresh(*args, **kwargs)
48
+ self.rng = np.random.RandomState(kwargs.get("rng_seed", None))
49
+
50
+ seq_len = kwargs.get("seq_len", 3)
51
+ input_dim = kwargs.get("input_dim", 1)
52
+ hidden_dim = kwargs.get("hidden_dim", 1)
53
+
54
+ # Small integer weights for hand calculation
55
+ self.x_seq = self.get_rounded_matrix((seq_len, input_dim)) # self.rng.randint(0, 3, size=(seq_len, input_dim))
56
+ self.W_xh = self.get_rounded_matrix((input_dim, hidden_dim), -1, 2)
57
+ self.W_hh = self.get_rounded_matrix((hidden_dim, hidden_dim), -1, 2)
58
+ self.b_h = self.get_rounded_matrix((hidden_dim,), -1, 2)
59
+ self.h_0 = np.zeros(hidden_dim)
60
+
61
+ self.h_states = self.rnn_forward(self.x_seq, self.W_xh, self.W_hh, self.b_h, self.h_0) #.reshape((seq_len,-1))
62
+
63
+ ## Answers:
64
+ # x_seq, W_xh, W_hh, b_h, h_0, h_states
65
+
66
+ self.answers["output_sequence"] = Answer.matrix(key="output_sequence", value=self.h_states)
67
+
68
+ return True
69
+
70
+ def get_body(self, **kwargs) -> ContentAST.Section:
71
+ body = ContentAST.Section()
72
+
73
+ body.add_element(
74
+ ContentAST.Paragraph([
75
+ ContentAST.Text("Given the below information about an RNN, please calculate the output sequence."),
76
+ "Assume that you are using a tanh activation function."
77
+ ])
78
+ )
79
+ body.add_element(
80
+ self.create_info_table(
81
+ {
82
+ ContentAST.Container(["Input sequence, ", ContentAST.Equation("x_{seq}", inline=True)]) : ContentAST.Matrix(self.x_seq),
83
+ ContentAST.Container(["Input weights, ", ContentAST.Equation("W_{xh}", inline=True)]) : ContentAST.Matrix(self.W_xh),
84
+ ContentAST.Container(["Hidden weights, ", ContentAST.Equation("W_{hh}", inline=True)]) : ContentAST.Matrix(self.W_hh),
85
+ ContentAST.Container(["Bias, ", ContentAST.Equation("b_{h}", inline=True)]) : ContentAST.Matrix(self.b_h),
86
+ ContentAST.Container(["Hidden states, ", ContentAST.Equation("h_{0}", inline=True)]) : ContentAST.Matrix(self.h_0),
87
+ }
88
+ )
89
+ )
90
+
91
+ body.add_element(ContentAST.LineBreak())
92
+
93
+ body.add_element(
94
+ self.answers["output_sequence"].get_ast_element(label=f"Hidden states")
95
+ )
96
+
97
+ return body
98
+
99
+ def get_explanation(self, **kwargs) -> ContentAST.Section:
100
+ explanation = ContentAST.Section()
101
+ digits = Answer.DEFAULT_ROUNDING_DIGITS
102
+
103
+ explanation.add_element(
104
+ ContentAST.Paragraph([
105
+ "For an RNN forward pass, we compute the hidden state at each time step using:"
106
+ ])
107
+ )
108
+
109
+ explanation.add_element(
110
+ ContentAST.Equation(r"h_t = \tanh(x_t W_{xh} + h_{t-1} W_{hh} + b_h)")
111
+ )
112
+
113
+ explanation.add_element(
114
+ ContentAST.Paragraph([
115
+ "Where the input contributes via ", ContentAST.Equation("W_{xh}", inline=True),
116
+ ", the previous hidden state contributes via ", ContentAST.Equation("W_{hh}", inline=True),
117
+ ", and ", ContentAST.Equation("b_h", inline=True), " is the bias."
118
+ ])
119
+ )
120
+
121
+ # Format arrays with proper rounding
122
+ def format_array(arr):
123
+ from QuizGenerator.misc import fix_negative_zero
124
+ if arr.ndim == 0:
125
+ return f"{fix_negative_zero(arr):.{digits}f}"
126
+ return "[" + ", ".join([f"{fix_negative_zero(x):.{digits}f}" for x in arr.flatten()]) + "]"
127
+
128
+ # Show detailed examples for first 2 timesteps (or just 1 if seq_len == 1)
129
+ seq_len = len(self.x_seq)
130
+ num_examples = min(2, seq_len)
131
+
132
+ explanation.add_element(ContentAST.Paragraph([""]))
133
+
134
+ for t in range(num_examples):
135
+ explanation.add_element(
136
+ ContentAST.Paragraph([
137
+ ContentAST.Text(f"Example: Timestep {t}", emphasis=True)
138
+ ])
139
+ )
140
+
141
+ # Compute step t
142
+ x_contribution = self.x_seq[t] @ self.W_xh
143
+ if t == 0:
144
+ h_prev = self.h_0
145
+ h_prev_label = 'h_{-1}'
146
+ h_prev_desc = " (initial state)"
147
+ else:
148
+ h_prev = self.h_states[t-1]
149
+ h_prev_label = f'h_{{{t-1}}}'
150
+ h_prev_desc = ""
151
+
152
+ h_contribution = h_prev @ self.W_hh
153
+ pre_activation = x_contribution + h_contribution + self.b_h
154
+ h_result = np.tanh(pre_activation)
155
+
156
+ explanation.add_element(
157
+ ContentAST.Paragraph([
158
+ "Input contribution: ",
159
+ ContentAST.Equation(f'x_{t} W_{{xh}}', inline=True),
160
+ f" = {format_array(x_contribution)}"
161
+ ])
162
+ )
163
+
164
+ explanation.add_element(
165
+ ContentAST.Paragraph([
166
+ "Hidden contribution: ",
167
+ ContentAST.Equation(f'{h_prev_label} W_{{hh}}', inline=True),
168
+ f"{h_prev_desc} = {format_array(h_contribution)}"
169
+ ])
170
+ )
171
+
172
+ explanation.add_element(
173
+ ContentAST.Paragraph([
174
+ f"Pre-activation: {format_array(pre_activation)}"
175
+ ])
176
+ )
177
+
178
+ explanation.add_element(
179
+ ContentAST.Paragraph([
180
+ "After tanh: ",
181
+ ContentAST.Equation(f'h_{t}', inline=True),
182
+ f" = {format_array(h_result)}"
183
+ ])
184
+ )
185
+
186
+ # Add visual separator between timesteps (except after the last one)
187
+ if t < num_examples - 1:
188
+ explanation.add_element(ContentAST.Paragraph([""]))
189
+
190
+ # Show complete output sequence (rounded)
191
+ explanation.add_element(
192
+ ContentAST.Paragraph([
193
+ "Complete hidden state sequence (each row is one timestep):"
194
+ ])
195
+ )
196
+
197
+ explanation.add_element(
198
+ ContentAST.Matrix(np.round(self.h_states, digits))
199
+ )
200
+
201
+ return explanation
202
+
@@ -0,0 +1,201 @@
1
+ import abc
2
+ import logging
3
+ import math
4
+ import keras
5
+ import numpy as np
6
+
7
+ from QuizGenerator.misc import MatrixAnswer
8
+ from QuizGenerator.premade_questions.cst463.models.matrices import MatrixQuestion
9
+ from QuizGenerator.question import Question, QuestionRegistry, Answer
10
+ from QuizGenerator.contentast import ContentAST
11
+ from QuizGenerator.constants import MathRanges
12
+ from QuizGenerator.mixins import TableQuestionMixin
13
+
14
+ log = logging.getLogger(__name__)
15
+
16
+
17
+ @QuestionRegistry.register("cst463.word2vec.skipgram")
18
+ class word2vec__skipgram(MatrixQuestion, TableQuestionMixin):
19
+
20
+ @staticmethod
21
+ def skipgram_predict(center_emb, context_embs):
22
+ """
23
+ center_emb: (embed_dim,) - center word embedding
24
+ context_embs: (num_contexts, embed_dim) - context candidate embeddings
25
+
26
+ Returns: probabilities (num_contexts,)
27
+ """
28
+ # Compute dot products (logits)
29
+ logits = context_embs @ center_emb
30
+
31
+ # Softmax
32
+ exp_logits = np.exp(logits)
33
+ probs = exp_logits / exp_logits.sum()
34
+
35
+ return logits, probs
36
+
37
+ def refresh(self, *args, **kwargs):
38
+ super().refresh(*args, **kwargs)
39
+ self.rng = np.random.RandomState(kwargs.get("rng_seed", None))
40
+
41
+ embed_dim = kwargs.get("embed_dim", 3)
42
+ num_contexts = kwargs.get("num_contexts", 3)
43
+
44
+ # Vocabulary pool
45
+ vocab = ['cat', 'dog', 'run', 'jump', 'happy', 'sad', 'tree', 'house',
46
+ 'walk', 'sleep', 'fast', 'slow', 'big', 'small']
47
+
48
+ # Sample words
49
+ self.selected_words = self.rng.choice(vocab, size=num_contexts + 1, replace=False)
50
+ self.center_word = self.selected_words[0]
51
+ self.context_words = self.selected_words[1:]
52
+
53
+ # Small integer embeddings
54
+
55
+ self.center_emb = self.get_rounded_matrix((embed_dim,), -2, 3)
56
+ self.context_embs = self.get_rounded_matrix((num_contexts, embed_dim), -2, 3)
57
+
58
+ self.logits, self.probs = self.skipgram_predict(self.center_emb, self.context_embs)
59
+
60
+ ## Answers:
61
+ # center_word, center_emb, context_words, context_embs, logits, probs
62
+ self.answers["logits"] = Answer.vector_value(key="logits", value=self.logits)
63
+ self.answers["center_word"] = Answer.string(key="center_word", value=self.center_word)
64
+
65
+
66
+ return True
67
+
68
+ def get_body(self, **kwargs) -> ContentAST.Section:
69
+ body = ContentAST.Section()
70
+
71
+ body.add_element(
72
+ ContentAST.Paragraph([
73
+ f"Given center word: `{self.center_word}` with embedding {self.center_emb}, compute the skip-gram probabilities for each context word and identify the most likely one."
74
+ ])
75
+ )
76
+ body.add_elements([
77
+ ContentAST.Paragraph([ContentAST.Text(f"`{w}` : "), str(e)]) for w, e in zip(self.context_words, self.context_embs)
78
+ ])
79
+
80
+ body.add_elements([
81
+ ContentAST.LineBreak(),
82
+ self.answers["logits"].get_ast_element("Logits"),
83
+ ContentAST.LineBreak(),
84
+ self.answers["center_word"].get_ast_element("Center word")
85
+ ])
86
+
87
+
88
+ log.debug(f"output: {self.logits}")
89
+ log.debug(f"weights: {self.probs}")
90
+
91
+ return body
92
+
93
+ def get_explanation(self, **kwargs) -> ContentAST.Section:
94
+ explanation = ContentAST.Section()
95
+ digits = Answer.DEFAULT_ROUNDING_DIGITS
96
+
97
+ explanation.add_element(
98
+ ContentAST.Paragraph([
99
+ "In the skip-gram model, we predict context words given a center word by computing dot products between embeddings and applying softmax."
100
+ ])
101
+ )
102
+
103
+ # Step 1: Show embeddings
104
+ explanation.add_element(
105
+ ContentAST.Paragraph([
106
+ ContentAST.Text("Step 1: Given embeddings", emphasis=True)
107
+ ])
108
+ )
109
+
110
+ # Format center embedding
111
+ center_emb_str = "[" + ", ".join([f"{x:.{digits}f}" for x in self.center_emb]) + "]"
112
+ explanation.add_element(
113
+ ContentAST.Paragraph([
114
+ f"Center word `{self.center_word}`: {center_emb_str}"
115
+ ])
116
+ )
117
+
118
+ explanation.add_element(
119
+ ContentAST.Paragraph([
120
+ "Context words:"
121
+ ])
122
+ )
123
+
124
+ for i, (word, emb) in enumerate(zip(self.context_words, self.context_embs)):
125
+ emb_str = "[" + ", ".join([f"{x:.2f}" for x in emb]) + "]"
126
+ explanation.add_element(
127
+ ContentAST.Paragraph([
128
+ f"`{word}`: {emb_str}"
129
+ ])
130
+ )
131
+
132
+ # Step 2: Compute logits (dot products)
133
+ explanation.add_element(
134
+ ContentAST.Paragraph([
135
+ ContentAST.Text("Step 2: Compute logits (dot products)", emphasis=True)
136
+ ])
137
+ )
138
+
139
+ # Show ONE example
140
+ explanation.add_element(
141
+ ContentAST.Paragraph([
142
+ f"Example: Logit for `{self.context_words[0]}`"
143
+ ])
144
+ )
145
+
146
+ context_emb = self.context_embs[0]
147
+ dot_product_terms = " + ".join([f"({self.center_emb[j]:.2f} \\times {context_emb[j]:.2f})"
148
+ for j in range(len(self.center_emb))])
149
+ logit_val = self.logits[0]
150
+
151
+ explanation.add_element(
152
+ ContentAST.Equation(f"{dot_product_terms} = {logit_val:.2f}")
153
+ )
154
+
155
+ logits_str = "[" + ", ".join([f"{x:.2f}" for x in self.logits]) + "]"
156
+ explanation.add_element(
157
+ ContentAST.Paragraph([
158
+ f"All logits: {logits_str}"
159
+ ])
160
+ )
161
+
162
+ # Step 3: Apply softmax
163
+ explanation.add_element(
164
+ ContentAST.Paragraph([
165
+ ContentAST.Text("Step 3: Apply softmax to get probabilities", emphasis=True)
166
+ ])
167
+ )
168
+
169
+ exp_logits = np.exp(self.logits)
170
+ sum_exp = exp_logits.sum()
171
+
172
+ exp_terms = " + ".join([f"e^{{{l:.{digits}f}}}" for l in self.logits])
173
+
174
+ explanation.add_element(
175
+ ContentAST.Equation(f"\\text{{denominator}} = {exp_terms} = {sum_exp:.{digits}f}")
176
+ )
177
+
178
+ explanation.add_element(
179
+ ContentAST.Paragraph([
180
+ "Probabilities:"
181
+ ])
182
+ )
183
+
184
+ for i, (word, prob) in enumerate(zip(self.context_words, self.probs)):
185
+ explanation.add_element(
186
+ ContentAST.Equation(f"P(\\text{{{word}}}) = \\frac{{e^{{{self.logits[i]:.{digits}f}}}}}{{{sum_exp:.{digits}f}}} = {prob:.{digits}f}")
187
+ )
188
+
189
+ # Step 4: Identify most likely
190
+ most_likely_idx = np.argmax(self.probs)
191
+ most_likely_word = self.context_words[most_likely_idx]
192
+
193
+ explanation.add_element(
194
+ ContentAST.Paragraph([
195
+ ContentAST.Text("Conclusion:", emphasis=True),
196
+ f" The most likely context word is `{most_likely_word}` with probability {self.probs[most_likely_idx]:.{digits}f}"
197
+ ])
198
+ )
199
+
200
+ return explanation
201
+
@@ -0,0 +1,227 @@
1
+ import abc
2
+ import logging
3
+ import math
4
+ import keras
5
+ import numpy as np
6
+
7
+ from QuizGenerator.question import Question, QuestionRegistry, Answer
8
+ from QuizGenerator.contentast import ContentAST
9
+ from QuizGenerator.constants import MathRanges
10
+
11
+ log = logging.getLogger(__name__)
12
+
13
+
14
+ class WeightCounting(Question, abc.ABC):
15
+ @abc.abstractmethod
16
+ def get_model(self) -> keras.Model:
17
+ pass
18
+
19
+ @staticmethod
20
+ def model_to_python(model: keras.Model, fields=None, include_input=True):
21
+ if fields is None:
22
+ fields = []
23
+
24
+ def sanitize(v):
25
+ """Convert numpy types to pure Python."""
26
+ if isinstance(v, np.generic): # np.int64, np.float32, etc.
27
+ return v.item()
28
+ if isinstance(v, (list, tuple)):
29
+ return type(v)(sanitize(x) for x in v)
30
+ if isinstance(v, dict):
31
+ return {k: sanitize(x) for k, x in v.items()}
32
+ return v
33
+
34
+ lines = []
35
+ lines.append("keras.models.Sequential([")
36
+
37
+ # ---- Emit an Input line if we can ----
38
+ # model.input_shape is like (None, H, W, C) or (None, D)
39
+ if include_input and getattr(model, "input_shape", None) is not None:
40
+ input_shape = sanitize(model.input_shape[1:]) # drop batch dimension
41
+ # If it's a 1D shape like (784,), keep as tuple; if scalar, still fine.
42
+ lines.append(f" keras.layers.Input(shape={input_shape!r}),")
43
+
44
+ # ---- Emit all other layers ----
45
+ for layer in model.layers:
46
+ # If user explicitly had an Input layer, we don't want to duplicate it
47
+ if isinstance(layer, keras.layers.InputLayer):
48
+ # You *could* handle it specially here, but usually we just skip
49
+ continue
50
+
51
+ cfg = layer.get_config()
52
+
53
+ # If fields is empty, include everything; otherwise filter by fields.
54
+ if fields:
55
+ items = [(k, v) for k, v in cfg.items() if k in fields]
56
+ else:
57
+ items = cfg.items()
58
+
59
+ arg_lines = [
60
+ f"{k}={sanitize(v)!r}" # !r so strings get quotes, etc.
61
+ for k, v in items
62
+ ]
63
+ args = ",\n ".join(arg_lines)
64
+
65
+ lines.append(
66
+ f" keras.layers.{layer.__class__.__name__}("
67
+ f"{'\n ' if args else ''}{args}{'\n ' if args else ''}),"
68
+ )
69
+
70
+ lines.append("])")
71
+ return "\n".join(lines)
72
+
73
+ def refresh(self, *args, **kwargs):
74
+ super().refresh(*args, **kwargs)
75
+
76
+ refresh_success = False
77
+ while not refresh_success:
78
+ try:
79
+ self.model, self.fields = self.get_model()
80
+ refresh_success = True
81
+ except ValueError as e:
82
+ log.error(e)
83
+ log.info(f"Regenerating {self.__class__.__name__} due to improper configuration")
84
+ continue
85
+
86
+ self.num_parameters = self.model.count_params()
87
+ self.answers["num_parameters"] = Answer.integer(
88
+ "num_parameters",
89
+ self.num_parameters
90
+ )
91
+
92
+ return True
93
+
94
+ def get_body(self, **kwargs) -> ContentAST.Section:
95
+ body = ContentAST.Section()
96
+
97
+ body.add_element(
98
+ ContentAST.Paragraph(
99
+ [
100
+ ContentAST.Text("Given the below model, how many parameters does it use?")
101
+ ]
102
+ )
103
+ )
104
+
105
+ body.add_element(
106
+ ContentAST.Code(
107
+ self.model_to_python(
108
+ self.model,
109
+ fields=self.fields
110
+ )
111
+ )
112
+ )
113
+
114
+ body.add_element(ContentAST.LineBreak())
115
+
116
+ body.add_element(
117
+ ContentAST.Answer(self.answers["num_parameters"], "Number of Parameters")
118
+ )
119
+
120
+ return body
121
+
122
+ def get_explanation(self, **kwargs) -> ContentAST.Section:
123
+ explanation = ContentAST.Section()
124
+
125
+ def markdown_summary(model) -> ContentAST.Table:
126
+ # Ensure the model is built by running build() or calling it once
127
+ if not model.built:
128
+ try:
129
+ model.build(model.input_shape)
130
+ except:
131
+ pass # Some subclassed models need real data to build
132
+
133
+ data = []
134
+
135
+ total_params = 0
136
+
137
+ for layer in model.layers:
138
+ name = layer.name
139
+ ltype = layer.__class__.__name__
140
+
141
+ # Try to extract output shape
142
+ try:
143
+ outshape = tuple(layer.output.shape)
144
+ except:
145
+ outshape = "?"
146
+
147
+ params = layer.count_params()
148
+ total_params += params
149
+
150
+ data.append([name, ltype, outshape, params])
151
+
152
+ data.append(["**Total**", "", "", f"**{total_params}**"])
153
+ return ContentAST.Table(data=data, headers=["Layer", "Type", "Output Shape", "Params"])
154
+
155
+
156
+ summary_lines = []
157
+ self.model.summary(print_fn=lambda x: summary_lines.append(x))
158
+ explanation.add_element(
159
+ # ContentAST.Text('\n'.join(summary_lines))
160
+ markdown_summary(self.model)
161
+ )
162
+
163
+ return explanation
164
+
165
+
166
+ @QuestionRegistry.register("cst463.WeightCounting-CNN")
167
+ class WeightCounting_CNN(WeightCounting):
168
+
169
+ def get_model(self) -> tuple[keras.Model, list[str]]:
170
+ input_size = self.rng.choice(np.arange(28, 32))
171
+ cnn_num_filters = self.rng.choice(2 ** np.arange(8))
172
+ cnn_kernel_size = self.rng.choice(1 + np.arange(10))
173
+ cnn_strides = self.rng.choice(1 + np.arange(10))
174
+ pool_size = self.rng.choice(1 + np.arange(10))
175
+ pool_strides = self.rng.choice(1 + np.arange(10))
176
+ num_output_size = self.rng.choice([1, 10, 32, 100])
177
+
178
+ # Let's just make a small model
179
+ model = keras.models.Sequential(
180
+ [
181
+ keras.layers.Input((input_size, input_size, 1)),
182
+ keras.layers.Conv2D(
183
+ filters=cnn_num_filters,
184
+ kernel_size=(cnn_kernel_size, cnn_kernel_size),
185
+ strides=(cnn_strides, cnn_strides),
186
+ padding="valid"
187
+ ),
188
+ keras.layers.MaxPool2D(
189
+ pool_size=(pool_size, pool_size),
190
+ strides=(pool_strides, pool_strides)
191
+ ),
192
+ keras.layers.Dense(
193
+ num_output_size
194
+ )
195
+ ]
196
+ )
197
+ return model, ["filters", "kernel_size", "strides", "padding", "pool_size"]
198
+
199
+
200
+ @QuestionRegistry.register("cst463.WeightCounting-RNN")
201
+ class WeightCounting_RNN(WeightCounting):
202
+ def get_model(self) -> tuple[keras.Model, list[str]]:
203
+ timesteps = int(self.rng.choice(np.arange(20, 41)))
204
+ feature_size = int(self.rng.choice(np.arange(8, 65)))
205
+
206
+ rnn_units = int(self.rng.choice(2 ** np.arange(4, 9)))
207
+ rnn_type = self.rng.choice(["SimpleRNN"])
208
+ return_sequences = bool(self.rng.choice([True, False]))
209
+
210
+ num_output_size = int(self.rng.choice([1, 10, 32, 100]))
211
+
212
+ RNNLayer = getattr(keras.layers, rnn_type)
213
+
214
+ model = keras.models.Sequential([
215
+ keras.layers.Input((timesteps, feature_size)),
216
+ RNNLayer(
217
+ units=rnn_units,
218
+ return_sequences=return_sequences,
219
+ ),
220
+ keras.layers.Dense(num_output_size),
221
+ ])
222
+ return model, ["units", "return_sequences"]
223
+
224
+
225
+ @QuestionRegistry.register()
226
+ class ConvolutionCalculation(Question):
227
+ pass