QuizGenerator 0.1.3__py3-none-any.whl → 0.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,186 @@
1
+ import abc
2
+ import logging
3
+ import math
4
+ import keras
5
+ import numpy as np
6
+
7
+ from QuizGenerator.question import Question, QuestionRegistry
8
+ from QuizGenerator.misc import Answer, MatrixAnswer
9
+ from QuizGenerator.contentast import ContentAST
10
+ from QuizGenerator.constants import MathRanges
11
+ from .matrices import MatrixQuestion
12
+
13
+ log = logging.getLogger(__name__)
14
+
15
+
16
+ @QuestionRegistry.register("cst463.cnn.convolution")
17
+ class ConvolutionCalculation(MatrixQuestion):
18
+
19
+ @staticmethod
20
+ def conv2d_multi_channel(image, kernel, stride=1, padding=0):
21
+ """
22
+ image: (H, W, C_in) - height, width, input channels
23
+ kernel: (K_h, K_w, C_in, C_out) - kernel height, width, input channels, output filters
24
+ Returns: (H_out, W_out, C_out)
25
+ """
26
+ H, W = image.shape
27
+ K_h, K_w, C_out = kernel.shape
28
+
29
+ # Add padding
30
+ if padding > 0:
31
+ image = np.pad(image, ((padding, padding), (padding, padding), (0, 0)), mode='constant')
32
+ H, W = H + 2 * padding, W + 2 * padding
33
+
34
+ # Output dimensions
35
+ H_out = (H - K_h) // stride + 1
36
+ W_out = (W - K_w) // stride + 1
37
+
38
+ output = np.zeros((H_out, W_out, C_out))
39
+
40
+ # Convolve each filter
41
+ for f in range(C_out):
42
+ for i in range(H_out):
43
+ for j in range(W_out):
44
+ h_start = i * stride
45
+ w_start = j * stride
46
+ # Extract receptive field and sum over all input channels
47
+ receptive_field = image[h_start:h_start + K_h, w_start:w_start + K_w]
48
+ output[i, j, f] = np.sum(receptive_field * kernel[:, :, f])
49
+
50
+ return output
51
+
52
+ def refresh(self, *args, **kwargs):
53
+ super().refresh(*args, **kwargs)
54
+
55
+ # num_input_channels = 1
56
+ input_size = kwargs.get("input_size", 4)
57
+ num_filters = kwargs.get("num_filters", 1)
58
+ self.stride = kwargs.get("stride", 1)
59
+ self.padding = kwargs.get("padding", 0)
60
+
61
+ # Small sizes for hand calculation
62
+ self.image = self.get_rounded_matrix((input_size, input_size))
63
+ self.kernel = self.get_rounded_matrix((3, 3, num_filters), -1, 1)
64
+
65
+ self.result = self.conv2d_multi_channel(self.image, self.kernel, stride=self.stride, padding=self.padding)
66
+
67
+ self.answers = {
68
+ f"result_{i}" : MatrixAnswer(f"result_{i}", self.result[:,:,i])
69
+ for i in range(self.result.shape[-1])
70
+ }
71
+
72
+ return True
73
+
74
+ def get_body(self, **kwargs) -> ContentAST.Section:
75
+ body = ContentAST.Section()
76
+
77
+ body.add_elements(
78
+ [
79
+ ContentAST.Text("Given image represented as matrix: "),
80
+ ContentAST.Matrix(self.image, name="image")
81
+ ]
82
+ )
83
+
84
+ body.add_elements(
85
+ [
86
+ ContentAST.Text("And convolution filters: "),
87
+ ] + [
88
+ ContentAST.Matrix(self.kernel[:, :, i], name=f"Filter {i}")
89
+ for i in range(self.kernel.shape[-1])
90
+ ]
91
+ )
92
+
93
+ body.add_element(
94
+ ContentAST.Paragraph(
95
+ [
96
+ f"Calculate the output of the convolution operation. Assume stride = {self.stride} and padding = {self.padding}."
97
+ ]
98
+ )
99
+ )
100
+
101
+ body.add_element(ContentAST.LineBreak())
102
+
103
+ body.add_elements([
104
+ ContentAST.Container([
105
+ self.answers[f"result_{i}"].get_ast_element(label=f"Result of filter {i}"),
106
+ ContentAST.LineBreak()
107
+ ])
108
+ for i in range(self.result.shape[-1])
109
+ ])
110
+
111
+
112
+
113
+ return body
114
+
115
+ def get_explanation(self, **kwargs) -> ContentAST.Section:
116
+ explanation = ContentAST.Section()
117
+ digits = Answer.DEFAULT_ROUNDING_DIGITS
118
+
119
+ explanation.add_element(
120
+ ContentAST.Paragraph([
121
+ "To compute a 2D convolution, we slide the filter across the input image and compute the element-wise product at each position, then sum the results."
122
+ ])
123
+ )
124
+
125
+ explanation.add_element(
126
+ ContentAST.Paragraph([
127
+ f"With stride={self.stride} and padding={self.padding}: ",
128
+ f"stride controls how many pixels the filter moves each step, ",
129
+ f"and padding adds zeros around the border {'(no border in this case)' if self.padding == 0 else f'({self.padding} pixels)'}."
130
+ ])
131
+ )
132
+
133
+ # For each filter, show one detailed example computation
134
+ for f_idx in range(self.kernel.shape[-1]):
135
+ explanation.add_element(
136
+ ContentAST.Paragraph([
137
+ ContentAST.Text(f"Filter {f_idx}:", emphasis=True)
138
+ ])
139
+ )
140
+
141
+ # Show the filter (rounded)
142
+ explanation.add_element(
143
+ ContentAST.Matrix(np.round(self.kernel[:, :, f_idx], digits), name=f"Filter {f_idx}")
144
+ )
145
+
146
+ # Show ONE example computation (position 0,0)
147
+ explanation.add_element(
148
+ ContentAST.Paragraph([
149
+ "Example computation at position (0, 0):"
150
+ ])
151
+ )
152
+
153
+ # Account for padding when extracting receptive field
154
+ if self.padding > 0:
155
+ padded_image = np.pad(self.image, ((self.padding, self.padding), (self.padding, self.padding)), mode='constant')
156
+ receptive_field = padded_image[0:3, 0:3]
157
+ else:
158
+ receptive_field = self.image[0:3, 0:3]
159
+
160
+ computation_steps = []
161
+ for r in range(3):
162
+ row_terms = []
163
+ for c in range(3):
164
+ img_val = receptive_field[r, c]
165
+ kernel_val = self.kernel[r, c, f_idx]
166
+ row_terms.append(f"({img_val:.2f} \\times {kernel_val:.2f})")
167
+ computation_steps.append(" + ".join(row_terms))
168
+
169
+ equation_str = " + ".join(computation_steps)
170
+ result_val = self.result[0, 0, f_idx]
171
+
172
+ explanation.add_element(
173
+ ContentAST.Equation(f"{equation_str} = {result_val:.2f}")
174
+ )
175
+
176
+ # Show the complete output matrix (rounded)
177
+ explanation.add_element(
178
+ ContentAST.Paragraph([
179
+ "Complete output:"
180
+ ])
181
+ )
182
+ explanation.add_element(
183
+ ContentAST.Matrix(np.round(self.result[:, :, f_idx], digits))
184
+ )
185
+
186
+ return explanation
@@ -0,0 +1,24 @@
1
+ #!env python
2
+ import abc
3
+
4
+ import numpy as np
5
+
6
+ from QuizGenerator.question import Question
7
+
8
+
9
+ class MatrixQuestion(Question, abc.ABC):
10
+ def __init__(self, *args, **kwargs):
11
+ super().__init__(*args, **kwargs)
12
+ self.default_digits_to_round = kwargs.get("digits_to_round", 2)
13
+
14
+ def refresh(self, rng_seed=None, *args, **kwargs):
15
+ super().refresh(rng_seed=rng_seed, *args, **kwargs)
16
+ self.rng = np.random.RandomState(rng_seed)
17
+
18
+ def get_rounded_matrix(self, shape, low=0, high=1, digits_to_round=None):
19
+ if digits_to_round is None:
20
+ digits_to_round = self.default_digits_to_round
21
+ return np.round(
22
+ (high - low) * self.rng.rand(*shape) + low,
23
+ digits_to_round
24
+ )
@@ -0,0 +1,202 @@
1
+ import abc
2
+ import logging
3
+ import math
4
+ import keras
5
+ import numpy as np
6
+
7
+ from .matrices import MatrixQuestion
8
+ from QuizGenerator.question import Question, QuestionRegistry, Answer
9
+ from QuizGenerator.contentast import ContentAST
10
+ from QuizGenerator.constants import MathRanges
11
+ from QuizGenerator.mixins import TableQuestionMixin
12
+
13
+ log = logging.getLogger(__name__)
14
+
15
+
16
+ @QuestionRegistry.register("cst463.rnn.forward-pass")
17
+ class RNNForwardPass(MatrixQuestion, TableQuestionMixin):
18
+
19
+ @staticmethod
20
+ def rnn_forward(x_seq, W_xh, W_hh, b_h, h_0, activation='tanh'):
21
+ """
22
+ x_seq: (seq_len, input_dim) - input sequence
23
+ W_xh: (input_dim, hidden_dim) - input to hidden weights
24
+ W_hh: (hidden_dim, hidden_dim) - hidden to hidden weights
25
+ b_h: (hidden_dim,) - hidden bias
26
+ h_0: (hidden_dim,) - initial hidden state
27
+
28
+ Returns: all hidden states (seq_len, hidden_dim)
29
+ """
30
+ seq_len = len(x_seq)
31
+ hidden_dim = W_hh.shape[0]
32
+
33
+ h_states = np.zeros((seq_len, hidden_dim))
34
+ h_t = h_0
35
+
36
+ for t in range(seq_len):
37
+ h_t = x_seq[t] @ W_xh + h_t @ W_hh + b_h
38
+ if activation == 'tanh':
39
+ h_t = np.tanh(h_t)
40
+ elif activation == 'relu':
41
+ h_t = np.maximum(0, h_t)
42
+ h_states[t] = h_t
43
+
44
+ return h_states
45
+
46
+ def refresh(self, *args, **kwargs):
47
+ super().refresh(*args, **kwargs)
48
+ self.rng = np.random.RandomState(kwargs.get("rng_seed", None))
49
+
50
+ seq_len = kwargs.get("seq_len", 3)
51
+ input_dim = kwargs.get("input_dim", 1)
52
+ hidden_dim = kwargs.get("hidden_dim", 1)
53
+
54
+ # Small integer weights for hand calculation
55
+ self.x_seq = self.get_rounded_matrix((seq_len, input_dim)) # self.rng.randint(0, 3, size=(seq_len, input_dim))
56
+ self.W_xh = self.get_rounded_matrix((input_dim, hidden_dim), -1, 2)
57
+ self.W_hh = self.get_rounded_matrix((hidden_dim, hidden_dim), -1, 2)
58
+ self.b_h = self.get_rounded_matrix((hidden_dim,), -1, 2)
59
+ self.h_0 = np.zeros(hidden_dim)
60
+
61
+ self.h_states = self.rnn_forward(self.x_seq, self.W_xh, self.W_hh, self.b_h, self.h_0) #.reshape((seq_len,-1))
62
+
63
+ ## Answers:
64
+ # x_seq, W_xh, W_hh, b_h, h_0, h_states
65
+
66
+ self.answers["output_sequence"] = Answer.matrix(key="output_sequence", value=self.h_states)
67
+
68
+ return True
69
+
70
+ def get_body(self, **kwargs) -> ContentAST.Section:
71
+ body = ContentAST.Section()
72
+
73
+ body.add_element(
74
+ ContentAST.Paragraph([
75
+ ContentAST.Text("Given the below information about an RNN, please calculate the output sequence."),
76
+ "Assume that you are using a tanh activation function."
77
+ ])
78
+ )
79
+ body.add_element(
80
+ self.create_info_table(
81
+ {
82
+ ContentAST.Container(["Input sequence, ", ContentAST.Equation("x_{seq}", inline=True)]) : ContentAST.Matrix(self.x_seq),
83
+ ContentAST.Container(["Input weights, ", ContentAST.Equation("W_{xh}", inline=True)]) : ContentAST.Matrix(self.W_xh),
84
+ ContentAST.Container(["Hidden weights, ", ContentAST.Equation("W_{hh}", inline=True)]) : ContentAST.Matrix(self.W_hh),
85
+ ContentAST.Container(["Bias, ", ContentAST.Equation("b_{h}", inline=True)]) : ContentAST.Matrix(self.b_h),
86
+ ContentAST.Container(["Hidden states, ", ContentAST.Equation("h_{0}", inline=True)]) : ContentAST.Matrix(self.h_0),
87
+ }
88
+ )
89
+ )
90
+
91
+ body.add_element(ContentAST.LineBreak())
92
+
93
+ body.add_element(
94
+ self.answers["output_sequence"].get_ast_element(label=f"Hidden states")
95
+ )
96
+
97
+ return body
98
+
99
+ def get_explanation(self, **kwargs) -> ContentAST.Section:
100
+ explanation = ContentAST.Section()
101
+ digits = Answer.DEFAULT_ROUNDING_DIGITS
102
+
103
+ explanation.add_element(
104
+ ContentAST.Paragraph([
105
+ "For an RNN forward pass, we compute the hidden state at each time step using:"
106
+ ])
107
+ )
108
+
109
+ explanation.add_element(
110
+ ContentAST.Equation(r"h_t = \tanh(x_t W_{xh} + h_{t-1} W_{hh} + b_h)")
111
+ )
112
+
113
+ explanation.add_element(
114
+ ContentAST.Paragraph([
115
+ "Where the input contributes via ", ContentAST.Equation("W_{xh}", inline=True),
116
+ ", the previous hidden state contributes via ", ContentAST.Equation("W_{hh}", inline=True),
117
+ ", and ", ContentAST.Equation("b_h", inline=True), " is the bias."
118
+ ])
119
+ )
120
+
121
+ # Format arrays with proper rounding
122
+ def format_array(arr):
123
+ from QuizGenerator.misc import fix_negative_zero
124
+ if arr.ndim == 0:
125
+ return f"{fix_negative_zero(arr):.{digits}f}"
126
+ return "[" + ", ".join([f"{fix_negative_zero(x):.{digits}f}" for x in arr.flatten()]) + "]"
127
+
128
+ # Show detailed examples for first 2 timesteps (or just 1 if seq_len == 1)
129
+ seq_len = len(self.x_seq)
130
+ num_examples = min(2, seq_len)
131
+
132
+ explanation.add_element(ContentAST.Paragraph([""]))
133
+
134
+ for t in range(num_examples):
135
+ explanation.add_element(
136
+ ContentAST.Paragraph([
137
+ ContentAST.Text(f"Example: Timestep {t}", emphasis=True)
138
+ ])
139
+ )
140
+
141
+ # Compute step t
142
+ x_contribution = self.x_seq[t] @ self.W_xh
143
+ if t == 0:
144
+ h_prev = self.h_0
145
+ h_prev_label = 'h_{-1}'
146
+ h_prev_desc = " (initial state)"
147
+ else:
148
+ h_prev = self.h_states[t-1]
149
+ h_prev_label = f'h_{{{t-1}}}'
150
+ h_prev_desc = ""
151
+
152
+ h_contribution = h_prev @ self.W_hh
153
+ pre_activation = x_contribution + h_contribution + self.b_h
154
+ h_result = np.tanh(pre_activation)
155
+
156
+ explanation.add_element(
157
+ ContentAST.Paragraph([
158
+ "Input contribution: ",
159
+ ContentAST.Equation(f'x_{t} W_{{xh}}', inline=True),
160
+ f" = {format_array(x_contribution)}"
161
+ ])
162
+ )
163
+
164
+ explanation.add_element(
165
+ ContentAST.Paragraph([
166
+ "Hidden contribution: ",
167
+ ContentAST.Equation(f'{h_prev_label} W_{{hh}}', inline=True),
168
+ f"{h_prev_desc} = {format_array(h_contribution)}"
169
+ ])
170
+ )
171
+
172
+ explanation.add_element(
173
+ ContentAST.Paragraph([
174
+ f"Pre-activation: {format_array(pre_activation)}"
175
+ ])
176
+ )
177
+
178
+ explanation.add_element(
179
+ ContentAST.Paragraph([
180
+ "After tanh: ",
181
+ ContentAST.Equation(f'h_{t}', inline=True),
182
+ f" = {format_array(h_result)}"
183
+ ])
184
+ )
185
+
186
+ # Add visual separator between timesteps (except after the last one)
187
+ if t < num_examples - 1:
188
+ explanation.add_element(ContentAST.Paragraph([""]))
189
+
190
+ # Show complete output sequence (rounded)
191
+ explanation.add_element(
192
+ ContentAST.Paragraph([
193
+ "Complete hidden state sequence (each row is one timestep):"
194
+ ])
195
+ )
196
+
197
+ explanation.add_element(
198
+ ContentAST.Matrix(np.round(self.h_states, digits))
199
+ )
200
+
201
+ return explanation
202
+
@@ -0,0 +1,201 @@
1
+ import abc
2
+ import logging
3
+ import math
4
+ import keras
5
+ import numpy as np
6
+
7
+ from QuizGenerator.misc import MatrixAnswer
8
+ from QuizGenerator.premade_questions.cst463.models.matrices import MatrixQuestion
9
+ from QuizGenerator.question import Question, QuestionRegistry, Answer
10
+ from QuizGenerator.contentast import ContentAST
11
+ from QuizGenerator.constants import MathRanges
12
+ from QuizGenerator.mixins import TableQuestionMixin
13
+
14
+ log = logging.getLogger(__name__)
15
+
16
+
17
+ @QuestionRegistry.register("cst463.word2vec.skipgram")
18
+ class word2vec__skipgram(MatrixQuestion, TableQuestionMixin):
19
+
20
+ @staticmethod
21
+ def skipgram_predict(center_emb, context_embs):
22
+ """
23
+ center_emb: (embed_dim,) - center word embedding
24
+ context_embs: (num_contexts, embed_dim) - context candidate embeddings
25
+
26
+ Returns: probabilities (num_contexts,)
27
+ """
28
+ # Compute dot products (logits)
29
+ logits = context_embs @ center_emb
30
+
31
+ # Softmax
32
+ exp_logits = np.exp(logits)
33
+ probs = exp_logits / exp_logits.sum()
34
+
35
+ return logits, probs
36
+
37
+ def refresh(self, *args, **kwargs):
38
+ super().refresh(*args, **kwargs)
39
+ self.rng = np.random.RandomState(kwargs.get("rng_seed", None))
40
+
41
+ embed_dim = kwargs.get("embed_dim", 3)
42
+ num_contexts = kwargs.get("num_contexts", 3)
43
+
44
+ # Vocabulary pool
45
+ vocab = ['cat', 'dog', 'run', 'jump', 'happy', 'sad', 'tree', 'house',
46
+ 'walk', 'sleep', 'fast', 'slow', 'big', 'small']
47
+
48
+ # Sample words
49
+ self.selected_words = self.rng.choice(vocab, size=num_contexts + 1, replace=False)
50
+ self.center_word = self.selected_words[0]
51
+ self.context_words = self.selected_words[1:]
52
+
53
+ # Small integer embeddings
54
+
55
+ self.center_emb = self.get_rounded_matrix((embed_dim,), -2, 3)
56
+ self.context_embs = self.get_rounded_matrix((num_contexts, embed_dim), -2, 3)
57
+
58
+ self.logits, self.probs = self.skipgram_predict(self.center_emb, self.context_embs)
59
+
60
+ ## Answers:
61
+ # center_word, center_emb, context_words, context_embs, logits, probs
62
+ self.answers["logits"] = Answer.vector_value(key="logits", value=self.logits)
63
+ self.answers["center_word"] = Answer.string(key="center_word", value=self.center_word)
64
+
65
+
66
+ return True
67
+
68
+ def get_body(self, **kwargs) -> ContentAST.Section:
69
+ body = ContentAST.Section()
70
+
71
+ body.add_element(
72
+ ContentAST.Paragraph([
73
+ f"Given center word: `{self.center_word}` with embedding {self.center_emb}, compute the skip-gram probabilities for each context word and identify the most likely one."
74
+ ])
75
+ )
76
+ body.add_elements([
77
+ ContentAST.Paragraph([ContentAST.Text(f"`{w}` : "), str(e)]) for w, e in zip(self.context_words, self.context_embs)
78
+ ])
79
+
80
+ body.add_elements([
81
+ ContentAST.LineBreak(),
82
+ self.answers["logits"].get_ast_element("Logits"),
83
+ ContentAST.LineBreak(),
84
+ self.answers["center_word"].get_ast_element("Center word")
85
+ ])
86
+
87
+
88
+ log.debug(f"output: {self.logits}")
89
+ log.debug(f"weights: {self.probs}")
90
+
91
+ return body
92
+
93
+ def get_explanation(self, **kwargs) -> ContentAST.Section:
94
+ explanation = ContentAST.Section()
95
+ digits = Answer.DEFAULT_ROUNDING_DIGITS
96
+
97
+ explanation.add_element(
98
+ ContentAST.Paragraph([
99
+ "In the skip-gram model, we predict context words given a center word by computing dot products between embeddings and applying softmax."
100
+ ])
101
+ )
102
+
103
+ # Step 1: Show embeddings
104
+ explanation.add_element(
105
+ ContentAST.Paragraph([
106
+ ContentAST.Text("Step 1: Given embeddings", emphasis=True)
107
+ ])
108
+ )
109
+
110
+ # Format center embedding
111
+ center_emb_str = "[" + ", ".join([f"{x:.{digits}f}" for x in self.center_emb]) + "]"
112
+ explanation.add_element(
113
+ ContentAST.Paragraph([
114
+ f"Center word `{self.center_word}`: {center_emb_str}"
115
+ ])
116
+ )
117
+
118
+ explanation.add_element(
119
+ ContentAST.Paragraph([
120
+ "Context words:"
121
+ ])
122
+ )
123
+
124
+ for i, (word, emb) in enumerate(zip(self.context_words, self.context_embs)):
125
+ emb_str = "[" + ", ".join([f"{x:.2f}" for x in emb]) + "]"
126
+ explanation.add_element(
127
+ ContentAST.Paragraph([
128
+ f"`{word}`: {emb_str}"
129
+ ])
130
+ )
131
+
132
+ # Step 2: Compute logits (dot products)
133
+ explanation.add_element(
134
+ ContentAST.Paragraph([
135
+ ContentAST.Text("Step 2: Compute logits (dot products)", emphasis=True)
136
+ ])
137
+ )
138
+
139
+ # Show ONE example
140
+ explanation.add_element(
141
+ ContentAST.Paragraph([
142
+ f"Example: Logit for `{self.context_words[0]}`"
143
+ ])
144
+ )
145
+
146
+ context_emb = self.context_embs[0]
147
+ dot_product_terms = " + ".join([f"({self.center_emb[j]:.2f} \\times {context_emb[j]:.2f})"
148
+ for j in range(len(self.center_emb))])
149
+ logit_val = self.logits[0]
150
+
151
+ explanation.add_element(
152
+ ContentAST.Equation(f"{dot_product_terms} = {logit_val:.2f}")
153
+ )
154
+
155
+ logits_str = "[" + ", ".join([f"{x:.2f}" for x in self.logits]) + "]"
156
+ explanation.add_element(
157
+ ContentAST.Paragraph([
158
+ f"All logits: {logits_str}"
159
+ ])
160
+ )
161
+
162
+ # Step 3: Apply softmax
163
+ explanation.add_element(
164
+ ContentAST.Paragraph([
165
+ ContentAST.Text("Step 3: Apply softmax to get probabilities", emphasis=True)
166
+ ])
167
+ )
168
+
169
+ exp_logits = np.exp(self.logits)
170
+ sum_exp = exp_logits.sum()
171
+
172
+ exp_terms = " + ".join([f"e^{{{l:.{digits}f}}}" for l in self.logits])
173
+
174
+ explanation.add_element(
175
+ ContentAST.Equation(f"\\text{{denominator}} = {exp_terms} = {sum_exp:.{digits}f}")
176
+ )
177
+
178
+ explanation.add_element(
179
+ ContentAST.Paragraph([
180
+ "Probabilities:"
181
+ ])
182
+ )
183
+
184
+ for i, (word, prob) in enumerate(zip(self.context_words, self.probs)):
185
+ explanation.add_element(
186
+ ContentAST.Equation(f"P(\\text{{{word}}}) = \\frac{{e^{{{self.logits[i]:.{digits}f}}}}}{{{sum_exp:.{digits}f}}} = {prob:.{digits}f}")
187
+ )
188
+
189
+ # Step 4: Identify most likely
190
+ most_likely_idx = np.argmax(self.probs)
191
+ most_likely_word = self.context_words[most_likely_idx]
192
+
193
+ explanation.add_element(
194
+ ContentAST.Paragraph([
195
+ ContentAST.Text("Conclusion:", emphasis=True),
196
+ f" The most likely context word is `{most_likely_word}` with probability {self.probs[most_likely_idx]:.{digits}f}"
197
+ ])
198
+ )
199
+
200
+ return explanation
201
+