QuizGenerator 0.6.3__py3-none-any.whl → 0.7.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- QuizGenerator/contentast.py +2191 -2193
- QuizGenerator/misc.py +1 -1
- QuizGenerator/mixins.py +64 -64
- QuizGenerator/premade_questions/basic.py +16 -16
- QuizGenerator/premade_questions/cst334/languages.py +26 -26
- QuizGenerator/premade_questions/cst334/math_questions.py +42 -42
- QuizGenerator/premade_questions/cst334/memory_questions.py +124 -124
- QuizGenerator/premade_questions/cst334/persistence_questions.py +48 -48
- QuizGenerator/premade_questions/cst334/process.py +38 -38
- QuizGenerator/premade_questions/cst463/gradient_descent/gradient_calculation.py +45 -45
- QuizGenerator/premade_questions/cst463/gradient_descent/gradient_descent_questions.py +34 -34
- QuizGenerator/premade_questions/cst463/gradient_descent/loss_calculations.py +53 -53
- QuizGenerator/premade_questions/cst463/gradient_descent/misc.py +2 -2
- QuizGenerator/premade_questions/cst463/math_and_data/matrix_questions.py +65 -65
- QuizGenerator/premade_questions/cst463/math_and_data/vector_questions.py +39 -39
- QuizGenerator/premade_questions/cst463/models/attention.py +36 -36
- QuizGenerator/premade_questions/cst463/models/cnns.py +26 -26
- QuizGenerator/premade_questions/cst463/models/rnns.py +36 -36
- QuizGenerator/premade_questions/cst463/models/text.py +32 -32
- QuizGenerator/premade_questions/cst463/models/weight_counting.py +15 -15
- QuizGenerator/premade_questions/cst463/neural-network-basics/neural_network_questions.py +124 -124
- QuizGenerator/premade_questions/cst463/tensorflow-intro/tensorflow_questions.py +161 -161
- QuizGenerator/question.py +41 -41
- QuizGenerator/quiz.py +7 -7
- QuizGenerator/regenerate.py +114 -13
- QuizGenerator/typst_utils.py +2 -2
- {quizgenerator-0.6.3.dist-info → quizgenerator-0.7.1.dist-info}/METADATA +1 -1
- {quizgenerator-0.6.3.dist-info → quizgenerator-0.7.1.dist-info}/RECORD +31 -31
- {quizgenerator-0.6.3.dist-info → quizgenerator-0.7.1.dist-info}/WHEEL +0 -0
- {quizgenerator-0.6.3.dist-info → quizgenerator-0.7.1.dist-info}/entry_points.txt +0 -0
- {quizgenerator-0.6.3.dist-info → quizgenerator-0.7.1.dist-info}/licenses/LICENSE +0 -0
|
@@ -12,7 +12,7 @@ from typing import List, Tuple, Dict, Any
|
|
|
12
12
|
import matplotlib.pyplot as plt
|
|
13
13
|
import matplotlib.patches as mpatches
|
|
14
14
|
|
|
15
|
-
|
|
15
|
+
import QuizGenerator.contentast as ca
|
|
16
16
|
from QuizGenerator.question import Question, QuestionRegistry
|
|
17
17
|
from QuizGenerator.mixins import TableQuestionMixin, BodyTemplatesMixin
|
|
18
18
|
from ..models.matrices import MatrixQuestion
|
|
@@ -252,7 +252,7 @@ class SimpleNeuralNetworkBase(MatrixQuestion, abc.ABC):
|
|
|
252
252
|
include_training_context: If True, include target, loss, etc. (for backprop questions)
|
|
253
253
|
|
|
254
254
|
Returns:
|
|
255
|
-
|
|
255
|
+
ca.TableGroup with network parameters in two side-by-side tables
|
|
256
256
|
"""
|
|
257
257
|
# Left table: Inputs & Weights
|
|
258
258
|
left_data = []
|
|
@@ -261,7 +261,7 @@ class SimpleNeuralNetworkBase(MatrixQuestion, abc.ABC):
|
|
|
261
261
|
# Input values
|
|
262
262
|
for i in range(self.num_inputs):
|
|
263
263
|
left_data.append([
|
|
264
|
-
|
|
264
|
+
ca.Equation(f"x_{i+1}", inline=True),
|
|
265
265
|
f"{self.X[i]:.1f}" # Inputs are always integers or 1 decimal
|
|
266
266
|
])
|
|
267
267
|
|
|
@@ -269,14 +269,14 @@ class SimpleNeuralNetworkBase(MatrixQuestion, abc.ABC):
|
|
|
269
269
|
for j in range(self.num_hidden):
|
|
270
270
|
for i in range(self.num_inputs):
|
|
271
271
|
left_data.append([
|
|
272
|
-
|
|
272
|
+
ca.Equation(f"w_{{{j+1}{i+1}}}", inline=True),
|
|
273
273
|
f"{self.W1[j, i]:.{self.param_digits}f}"
|
|
274
274
|
])
|
|
275
275
|
|
|
276
276
|
# Weights from hidden to output
|
|
277
277
|
for i in range(self.num_hidden):
|
|
278
278
|
left_data.append([
|
|
279
|
-
|
|
279
|
+
ca.Equation(f"w_{i+3}", inline=True),
|
|
280
280
|
f"{self.W2[0, i]:.{self.param_digits}f}"
|
|
281
281
|
])
|
|
282
282
|
|
|
@@ -288,14 +288,14 @@ class SimpleNeuralNetworkBase(MatrixQuestion, abc.ABC):
|
|
|
288
288
|
if self.use_bias:
|
|
289
289
|
for j in range(self.num_hidden):
|
|
290
290
|
right_data.append([
|
|
291
|
-
|
|
291
|
+
ca.Equation(f"b_{j+1}", inline=True),
|
|
292
292
|
f"{self.b1[j]:.{self.param_digits}f}"
|
|
293
293
|
])
|
|
294
294
|
|
|
295
295
|
# Output bias
|
|
296
296
|
if self.use_bias:
|
|
297
297
|
right_data.append([
|
|
298
|
-
|
|
298
|
+
ca.Equation(r"b_{out}", inline=True),
|
|
299
299
|
f"{self.b2[0]:.{self.param_digits}f}"
|
|
300
300
|
])
|
|
301
301
|
|
|
@@ -303,14 +303,14 @@ class SimpleNeuralNetworkBase(MatrixQuestion, abc.ABC):
|
|
|
303
303
|
if include_activations and self.a1 is not None:
|
|
304
304
|
for i in range(self.num_hidden):
|
|
305
305
|
right_data.append([
|
|
306
|
-
|
|
306
|
+
ca.Equation(f"h_{i+1}", inline=True),
|
|
307
307
|
f"{self.a1[i]:.4f}"
|
|
308
308
|
])
|
|
309
309
|
|
|
310
310
|
# Output activation (if computed and requested)
|
|
311
311
|
if include_activations and self.a2 is not None:
|
|
312
312
|
right_data.append([
|
|
313
|
-
|
|
313
|
+
ca.Equation(r"\hat{y}", inline=True),
|
|
314
314
|
f"{self.a2[0]:.4f}"
|
|
315
315
|
])
|
|
316
316
|
|
|
@@ -318,20 +318,20 @@ class SimpleNeuralNetworkBase(MatrixQuestion, abc.ABC):
|
|
|
318
318
|
if include_training_context:
|
|
319
319
|
if self.y_target is not None:
|
|
320
320
|
right_data.append([
|
|
321
|
-
|
|
321
|
+
ca.Equation("y", inline=True),
|
|
322
322
|
f"{int(self.y_target)}" # Binary target (0 or 1)
|
|
323
323
|
])
|
|
324
324
|
|
|
325
325
|
if self.loss is not None:
|
|
326
326
|
right_data.append([
|
|
327
|
-
|
|
327
|
+
ca.Equation("L", inline=True),
|
|
328
328
|
f"{self.loss:.4f}"
|
|
329
329
|
])
|
|
330
330
|
|
|
331
331
|
# Create table group
|
|
332
|
-
table_group =
|
|
333
|
-
table_group.add_table(
|
|
334
|
-
table_group.add_table(
|
|
332
|
+
table_group = ca.TableGroup()
|
|
333
|
+
table_group.add_table(ca.Table(data=left_data))
|
|
334
|
+
table_group.add_table(ca.Table(data=right_data))
|
|
335
335
|
|
|
336
336
|
return table_group
|
|
337
337
|
|
|
@@ -566,18 +566,18 @@ class ForwardPassQuestion(SimpleNeuralNetworkBase):
|
|
|
566
566
|
# Hidden layer activations
|
|
567
567
|
for i in range(self.num_hidden):
|
|
568
568
|
key = f"h{i+1}"
|
|
569
|
-
self.answers[key] = AnswerTypes.Float(float(self.a1[i]), label=f"h_{i + 1}")
|
|
569
|
+
self.answers[key] = ca.AnswerTypes.Float(float(self.a1[i]), label=f"h_{i + 1}")
|
|
570
570
|
|
|
571
571
|
# Output
|
|
572
|
-
self.answers["y_pred"] = AnswerTypes.Float(float(self.a2[0]), label="ŷ")
|
|
572
|
+
self.answers["y_pred"] = ca.AnswerTypes.Float(float(self.a2[0]), label="ŷ")
|
|
573
573
|
|
|
574
|
-
def _get_body(self, **kwargs) -> Tuple[
|
|
574
|
+
def _get_body(self, **kwargs) -> Tuple[ca.Section, List[ca.Answer]]:
|
|
575
575
|
"""Build question body and collect answers."""
|
|
576
|
-
body =
|
|
576
|
+
body = ca.Section()
|
|
577
577
|
answers = []
|
|
578
578
|
|
|
579
579
|
# Question description
|
|
580
|
-
body.add_element(
|
|
580
|
+
body.add_element(ca.Paragraph([
|
|
581
581
|
f"Given the neural network below with {self._get_activation_name()} activation "
|
|
582
582
|
f"in the hidden layer and sigmoid activation in the output layer (for binary classification), "
|
|
583
583
|
f"calculate the forward pass for the given input values."
|
|
@@ -585,7 +585,7 @@ class ForwardPassQuestion(SimpleNeuralNetworkBase):
|
|
|
585
585
|
|
|
586
586
|
# Network diagram
|
|
587
587
|
body.add_element(
|
|
588
|
-
|
|
588
|
+
ca.Picture(
|
|
589
589
|
img_data=self._generate_network_diagram(show_weights=True, show_activations=False),
|
|
590
590
|
caption=f"Neural network architecture"
|
|
591
591
|
)
|
|
@@ -595,7 +595,7 @@ class ForwardPassQuestion(SimpleNeuralNetworkBase):
|
|
|
595
595
|
body.add_element(self._generate_parameter_table(include_activations=False))
|
|
596
596
|
|
|
597
597
|
# Activation function
|
|
598
|
-
body.add_element(
|
|
598
|
+
body.add_element(ca.Paragraph([
|
|
599
599
|
f"**Hidden layer activation:** {self._get_activation_name()}"
|
|
600
600
|
]))
|
|
601
601
|
|
|
@@ -605,25 +605,25 @@ class ForwardPassQuestion(SimpleNeuralNetworkBase):
|
|
|
605
605
|
|
|
606
606
|
answers.append(self.answers["y_pred"])
|
|
607
607
|
|
|
608
|
-
body.add_element(
|
|
608
|
+
body.add_element(ca.AnswerBlock(answers))
|
|
609
609
|
|
|
610
610
|
return body, answers
|
|
611
611
|
|
|
612
|
-
def get_body(self, **kwargs) ->
|
|
612
|
+
def get_body(self, **kwargs) -> ca.Section:
|
|
613
613
|
"""Build question body (backward compatible interface)."""
|
|
614
614
|
body, _ = self._get_body(**kwargs)
|
|
615
615
|
return body
|
|
616
616
|
|
|
617
|
-
def _get_explanation(self, **kwargs) -> Tuple[
|
|
617
|
+
def _get_explanation(self, **kwargs) -> Tuple[ca.Section, List[ca.Answer]]:
|
|
618
618
|
"""Build question explanation."""
|
|
619
|
-
explanation =
|
|
619
|
+
explanation = ca.Section()
|
|
620
620
|
|
|
621
|
-
explanation.add_element(
|
|
621
|
+
explanation.add_element(ca.Paragraph([
|
|
622
622
|
"To solve this problem, we need to compute the forward pass through the network."
|
|
623
623
|
]))
|
|
624
624
|
|
|
625
625
|
# Hidden layer calculations
|
|
626
|
-
explanation.add_element(
|
|
626
|
+
explanation.add_element(ca.Paragraph([
|
|
627
627
|
"**Step 1: Calculate hidden layer pre-activations**"
|
|
628
628
|
]))
|
|
629
629
|
|
|
@@ -637,35 +637,35 @@ class ForwardPassQuestion(SimpleNeuralNetworkBase):
|
|
|
637
637
|
if self.use_bias:
|
|
638
638
|
z_calc += f" + {self.b1[i]:.{self.param_digits}f}"
|
|
639
639
|
|
|
640
|
-
explanation.add_element(
|
|
640
|
+
explanation.add_element(ca.Equation(
|
|
641
641
|
f"z_{i+1} = {z_calc} = {self.z1[i]:.4f}",
|
|
642
642
|
inline=False
|
|
643
643
|
))
|
|
644
644
|
|
|
645
645
|
# Hidden layer activations
|
|
646
|
-
explanation.add_element(
|
|
646
|
+
explanation.add_element(ca.Paragraph([
|
|
647
647
|
f"**Step 2: Apply {self._get_activation_name()} activation**"
|
|
648
648
|
]))
|
|
649
649
|
|
|
650
650
|
for i in range(self.num_hidden):
|
|
651
651
|
if self.activation_function == self.ACTIVATION_SIGMOID:
|
|
652
|
-
explanation.add_element(
|
|
652
|
+
explanation.add_element(ca.Equation(
|
|
653
653
|
f"h_{i+1} = \\sigma(z_{i+1}) = \\frac{{1}}{{1 + e^{{-{self.z1[i]:.4f}}}}} = {self.a1[i]:.4f}",
|
|
654
654
|
inline=False
|
|
655
655
|
))
|
|
656
656
|
elif self.activation_function == self.ACTIVATION_RELU:
|
|
657
|
-
explanation.add_element(
|
|
657
|
+
explanation.add_element(ca.Equation(
|
|
658
658
|
f"h_{i+1} = \\text{{ReLU}}(z_{i+1}) = \\max(0, {self.z1[i]:.4f}) = {self.a1[i]:.4f}",
|
|
659
659
|
inline=False
|
|
660
660
|
))
|
|
661
661
|
else:
|
|
662
|
-
explanation.add_element(
|
|
662
|
+
explanation.add_element(ca.Equation(
|
|
663
663
|
f"h_{i+1} = z_{i+1} = {self.a1[i]:.4f}",
|
|
664
664
|
inline=False
|
|
665
665
|
))
|
|
666
666
|
|
|
667
667
|
# Output layer
|
|
668
|
-
explanation.add_element(
|
|
668
|
+
explanation.add_element(ca.Paragraph([
|
|
669
669
|
"**Step 3: Calculate output (with sigmoid activation)**"
|
|
670
670
|
]))
|
|
671
671
|
|
|
@@ -677,24 +677,24 @@ class ForwardPassQuestion(SimpleNeuralNetworkBase):
|
|
|
677
677
|
if self.use_bias:
|
|
678
678
|
z_out_calc += f" + {self.b2[0]:.{self.param_digits}f}"
|
|
679
679
|
|
|
680
|
-
explanation.add_element(
|
|
680
|
+
explanation.add_element(ca.Equation(
|
|
681
681
|
f"z_{{out}} = {z_out_calc} = {self.z2[0]:.4f}",
|
|
682
682
|
inline=False
|
|
683
683
|
))
|
|
684
684
|
|
|
685
|
-
explanation.add_element(
|
|
685
|
+
explanation.add_element(ca.Equation(
|
|
686
686
|
f"\\hat{{y}} = \\sigma(z_{{out}}) = \\frac{{1}}{{1 + e^{{-{self.z2[0]:.4f}}}}} = {self.a2[0]:.4f}",
|
|
687
687
|
inline=False
|
|
688
688
|
))
|
|
689
689
|
|
|
690
|
-
explanation.add_element(
|
|
690
|
+
explanation.add_element(ca.Paragraph([
|
|
691
691
|
"(Note: The output layer uses sigmoid activation for binary classification, "
|
|
692
692
|
"so the output is between 0 and 1, representing the probability of class 1)"
|
|
693
693
|
]))
|
|
694
694
|
|
|
695
695
|
return explanation, []
|
|
696
696
|
|
|
697
|
-
def get_explanation(self, **kwargs) ->
|
|
697
|
+
def get_explanation(self, **kwargs) -> ca.Section:
|
|
698
698
|
"""Build question explanation (backward compatible interface)."""
|
|
699
699
|
explanation, _ = self._get_explanation(**kwargs)
|
|
700
700
|
return explanation
|
|
@@ -743,20 +743,20 @@ class BackpropGradientQuestion(SimpleNeuralNetworkBase):
|
|
|
743
743
|
# Gradient for W2 (hidden to output)
|
|
744
744
|
for i in range(self.num_hidden):
|
|
745
745
|
key = f"dL_dw2_{i}"
|
|
746
|
-
self.answers[key] = AnswerTypes.Float(self._compute_gradient_W2(i), label=f"∂L/∂w_{i + 3}")
|
|
746
|
+
self.answers[key] = ca.AnswerTypes.Float(self._compute_gradient_W2(i), label=f"∂L/∂w_{i + 3}")
|
|
747
747
|
|
|
748
748
|
# Gradient for W1 (input to hidden) - pick first hidden neuron
|
|
749
749
|
for j in range(self.num_inputs):
|
|
750
750
|
key = f"dL_dw1_0{j}"
|
|
751
|
-
self.answers[key] = AnswerTypes.Float(self._compute_gradient_W1(0, j), label=f"∂L/∂w_1{j + 1}")
|
|
751
|
+
self.answers[key] = ca.AnswerTypes.Float(self._compute_gradient_W1(0, j), label=f"∂L/∂w_1{j + 1}")
|
|
752
752
|
|
|
753
|
-
def _get_body(self, **kwargs) -> Tuple[
|
|
753
|
+
def _get_body(self, **kwargs) -> Tuple[ca.Section, List[ca.Answer]]:
|
|
754
754
|
"""Build question body and collect answers."""
|
|
755
|
-
body =
|
|
755
|
+
body = ca.Section()
|
|
756
756
|
answers = []
|
|
757
757
|
|
|
758
758
|
# Question description
|
|
759
|
-
body.add_element(
|
|
759
|
+
body.add_element(ca.Paragraph([
|
|
760
760
|
f"Given the neural network below with {self._get_activation_name()} activation "
|
|
761
761
|
f"in the hidden layer and sigmoid activation in the output layer (for binary classification), "
|
|
762
762
|
f"a forward pass has been completed with the values shown. "
|
|
@@ -765,7 +765,7 @@ class BackpropGradientQuestion(SimpleNeuralNetworkBase):
|
|
|
765
765
|
|
|
766
766
|
# Network diagram
|
|
767
767
|
body.add_element(
|
|
768
|
-
|
|
768
|
+
ca.Picture(
|
|
769
769
|
img_data=self._generate_network_diagram(show_weights=True, show_activations=False),
|
|
770
770
|
caption=f"Neural network architecture"
|
|
771
771
|
)
|
|
@@ -775,11 +775,11 @@ class BackpropGradientQuestion(SimpleNeuralNetworkBase):
|
|
|
775
775
|
body.add_element(self._generate_parameter_table(include_activations=True, include_training_context=True))
|
|
776
776
|
|
|
777
777
|
# Activation function
|
|
778
|
-
body.add_element(
|
|
778
|
+
body.add_element(ca.Paragraph([
|
|
779
779
|
f"**Hidden layer activation:** {self._get_activation_name()}"
|
|
780
780
|
]))
|
|
781
781
|
|
|
782
|
-
body.add_element(
|
|
782
|
+
body.add_element(ca.Paragraph([
|
|
783
783
|
"**Calculate the following gradients:**"
|
|
784
784
|
]))
|
|
785
785
|
|
|
@@ -791,64 +791,64 @@ class BackpropGradientQuestion(SimpleNeuralNetworkBase):
|
|
|
791
791
|
for j in range(self.num_inputs):
|
|
792
792
|
answers.append(self.answers[f"dL_dw1_0{j}"])
|
|
793
793
|
|
|
794
|
-
body.add_element(
|
|
794
|
+
body.add_element(ca.AnswerBlock(answers))
|
|
795
795
|
|
|
796
796
|
return body, answers
|
|
797
797
|
|
|
798
|
-
def get_body(self, **kwargs) ->
|
|
798
|
+
def get_body(self, **kwargs) -> ca.Section:
|
|
799
799
|
"""Build question body (backward compatible interface)."""
|
|
800
800
|
body, _ = self._get_body(**kwargs)
|
|
801
801
|
return body
|
|
802
802
|
|
|
803
|
-
def _get_explanation(self, **kwargs) -> Tuple[
|
|
803
|
+
def _get_explanation(self, **kwargs) -> Tuple[ca.Section, List[ca.Answer]]:
|
|
804
804
|
"""Build question explanation."""
|
|
805
|
-
explanation =
|
|
805
|
+
explanation = ca.Section()
|
|
806
806
|
|
|
807
|
-
explanation.add_element(
|
|
807
|
+
explanation.add_element(ca.Paragraph([
|
|
808
808
|
"To solve this problem, we use the chain rule to compute gradients via backpropagation."
|
|
809
809
|
]))
|
|
810
810
|
|
|
811
811
|
# Output layer gradient
|
|
812
|
-
explanation.add_element(
|
|
812
|
+
explanation.add_element(ca.Paragraph([
|
|
813
813
|
"**Step 1: Compute output layer gradient**"
|
|
814
814
|
]))
|
|
815
815
|
|
|
816
|
-
explanation.add_element(
|
|
816
|
+
explanation.add_element(ca.Paragraph([
|
|
817
817
|
"For binary cross-entropy loss with sigmoid output activation, "
|
|
818
818
|
"the gradient with respect to the pre-activation simplifies beautifully:"
|
|
819
819
|
]))
|
|
820
820
|
|
|
821
|
-
explanation.add_element(
|
|
821
|
+
explanation.add_element(ca.Equation(
|
|
822
822
|
f"\\frac{{\\partial L}}{{\\partial z_{{out}}}} = \\hat{{y}} - y = {self.a2[0]:.4f} - {int(self.y_target)} = {self.dL_dz2:.4f}",
|
|
823
823
|
inline=False
|
|
824
824
|
))
|
|
825
825
|
|
|
826
|
-
explanation.add_element(
|
|
826
|
+
explanation.add_element(ca.Paragraph([
|
|
827
827
|
"(This elegant result comes from combining the BCE loss derivative and sigmoid activation derivative)"
|
|
828
828
|
]))
|
|
829
829
|
|
|
830
830
|
# W2 gradients
|
|
831
|
-
explanation.add_element(
|
|
831
|
+
explanation.add_element(ca.Paragraph([
|
|
832
832
|
"**Step 2: Gradients for hidden-to-output weights**"
|
|
833
833
|
]))
|
|
834
834
|
|
|
835
|
-
explanation.add_element(
|
|
835
|
+
explanation.add_element(ca.Paragraph([
|
|
836
836
|
"Using the chain rule:"
|
|
837
837
|
]))
|
|
838
838
|
|
|
839
839
|
for i in range(self.num_hidden):
|
|
840
840
|
grad = self._compute_gradient_W2(i)
|
|
841
|
-
explanation.add_element(
|
|
841
|
+
explanation.add_element(ca.Equation(
|
|
842
842
|
f"\\frac{{\\partial L}}{{\\partial w_{i+3}}} = \\frac{{\\partial L}}{{\\partial z_{{out}}}} \\cdot \\frac{{\\partial z_{{out}}}}{{\\partial w_{i+3}}} = {self.dL_dz2:.4f} \\cdot {self.a1[i]:.4f} = {grad:.4f}",
|
|
843
843
|
inline=False
|
|
844
844
|
))
|
|
845
845
|
|
|
846
846
|
# W1 gradients
|
|
847
|
-
explanation.add_element(
|
|
847
|
+
explanation.add_element(ca.Paragraph([
|
|
848
848
|
"**Step 3: Gradients for input-to-hidden weights**"
|
|
849
849
|
]))
|
|
850
850
|
|
|
851
|
-
explanation.add_element(
|
|
851
|
+
explanation.add_element(ca.Paragraph([
|
|
852
852
|
"First, compute the gradient flowing back to hidden layer:"
|
|
853
853
|
]))
|
|
854
854
|
|
|
@@ -867,14 +867,14 @@ class BackpropGradientQuestion(SimpleNeuralNetworkBase):
|
|
|
867
867
|
else:
|
|
868
868
|
act_deriv_str = f"1"
|
|
869
869
|
|
|
870
|
-
explanation.add_element(
|
|
870
|
+
explanation.add_element(ca.Equation(
|
|
871
871
|
f"\\frac{{\\partial L}}{{\\partial w_{{1{j+1}}}}} = \\frac{{\\partial L}}{{\\partial z_{{out}}}} \\cdot w_{3} \\cdot {act_deriv_str} \\cdot x_{j+1} = {self.dL_dz2:.4f} \\cdot {dz2_da1:.4f} \\cdot {da1_dz1:.4f} \\cdot {self.X[j]:.1f} = {grad:.4f}",
|
|
872
872
|
inline=False
|
|
873
873
|
))
|
|
874
874
|
|
|
875
875
|
return explanation, []
|
|
876
876
|
|
|
877
|
-
def get_explanation(self, **kwargs) ->
|
|
877
|
+
def get_explanation(self, **kwargs) -> ca.Section:
|
|
878
878
|
"""Build question explanation (backward compatible interface)."""
|
|
879
879
|
explanation, _ = self._get_explanation(**kwargs)
|
|
880
880
|
return explanation
|
|
@@ -920,31 +920,31 @@ class EnsembleAveragingQuestion(Question):
|
|
|
920
920
|
|
|
921
921
|
# Mean prediction
|
|
922
922
|
mean_pred = np.mean(self.predictions)
|
|
923
|
-
self.answers["mean"] = AnswerTypes.Float(float(mean_pred), label="Mean (average)")
|
|
923
|
+
self.answers["mean"] = ca.AnswerTypes.Float(float(mean_pred), label="Mean (average)")
|
|
924
924
|
|
|
925
925
|
# Median (optional, but useful)
|
|
926
926
|
median_pred = np.median(self.predictions)
|
|
927
|
-
self.answers["median"] = AnswerTypes.Float(float(median_pred), label="Median")
|
|
927
|
+
self.answers["median"] = ca.AnswerTypes.Float(float(median_pred), label="Median")
|
|
928
928
|
|
|
929
|
-
def _get_body(self, **kwargs) -> Tuple[
|
|
929
|
+
def _get_body(self, **kwargs) -> Tuple[ca.Section, List[ca.Answer]]:
|
|
930
930
|
"""Build question body and collect answers."""
|
|
931
|
-
body =
|
|
931
|
+
body = ca.Section()
|
|
932
932
|
answers = []
|
|
933
933
|
|
|
934
934
|
# Question description
|
|
935
|
-
body.add_element(
|
|
935
|
+
body.add_element(ca.Paragraph([
|
|
936
936
|
f"You have trained {self.num_models} different regression models on the same dataset. "
|
|
937
937
|
f"For a particular test input, each model produces the following predictions:"
|
|
938
938
|
]))
|
|
939
939
|
|
|
940
940
|
# Show predictions
|
|
941
941
|
pred_list = ", ".join([f"{p:.1f}" for p in self.predictions])
|
|
942
|
-
body.add_element(
|
|
942
|
+
body.add_element(ca.Paragraph([
|
|
943
943
|
f"Model predictions: {pred_list}"
|
|
944
944
|
]))
|
|
945
945
|
|
|
946
946
|
# Question
|
|
947
|
-
body.add_element(
|
|
947
|
+
body.add_element(ca.Paragraph([
|
|
948
948
|
"To create an ensemble, calculate the combined prediction using the following methods:"
|
|
949
949
|
]))
|
|
950
950
|
|
|
@@ -952,38 +952,38 @@ class EnsembleAveragingQuestion(Question):
|
|
|
952
952
|
answers.append(self.answers["mean"])
|
|
953
953
|
answers.append(self.answers["median"])
|
|
954
954
|
|
|
955
|
-
body.add_element(
|
|
955
|
+
body.add_element(ca.AnswerBlock(answers))
|
|
956
956
|
|
|
957
957
|
return body, answers
|
|
958
958
|
|
|
959
|
-
def get_body(self, **kwargs) ->
|
|
959
|
+
def get_body(self, **kwargs) -> ca.Section:
|
|
960
960
|
"""Build question body (backward compatible interface)."""
|
|
961
961
|
body, _ = self._get_body(**kwargs)
|
|
962
962
|
return body
|
|
963
963
|
|
|
964
|
-
def _get_explanation(self, **kwargs) -> Tuple[
|
|
964
|
+
def _get_explanation(self, **kwargs) -> Tuple[ca.Section, List[ca.Answer]]:
|
|
965
965
|
"""Build question explanation."""
|
|
966
|
-
explanation =
|
|
966
|
+
explanation = ca.Section()
|
|
967
967
|
|
|
968
|
-
explanation.add_element(
|
|
968
|
+
explanation.add_element(ca.Paragraph([
|
|
969
969
|
"Ensemble methods combine predictions from multiple models to create a more robust prediction."
|
|
970
970
|
]))
|
|
971
971
|
|
|
972
972
|
# Mean calculation
|
|
973
|
-
explanation.add_element(
|
|
973
|
+
explanation.add_element(ca.Paragraph([
|
|
974
974
|
"**Mean (Bagging approach):**"
|
|
975
975
|
]))
|
|
976
976
|
|
|
977
977
|
pred_sum = " + ".join([f"{p:.1f}" for p in self.predictions])
|
|
978
978
|
mean_val = np.mean(self.predictions)
|
|
979
979
|
|
|
980
|
-
explanation.add_element(
|
|
980
|
+
explanation.add_element(ca.Equation(
|
|
981
981
|
f"\\text{{mean}} = \\frac{{{pred_sum}}}{{{self.num_models}}} = \\frac{{{sum(self.predictions):.1f}}}{{{self.num_models}}} = {mean_val:.4f}",
|
|
982
982
|
inline=False
|
|
983
983
|
))
|
|
984
984
|
|
|
985
985
|
# Median calculation
|
|
986
|
-
explanation.add_element(
|
|
986
|
+
explanation.add_element(ca.Paragraph([
|
|
987
987
|
"**Median:**"
|
|
988
988
|
]))
|
|
989
989
|
|
|
@@ -991,26 +991,26 @@ class EnsembleAveragingQuestion(Question):
|
|
|
991
991
|
sorted_str = ", ".join([f"{p:.1f}" for p in sorted_preds])
|
|
992
992
|
median_val = np.median(self.predictions)
|
|
993
993
|
|
|
994
|
-
explanation.add_element(
|
|
994
|
+
explanation.add_element(ca.Paragraph([
|
|
995
995
|
f"Sorted predictions: {sorted_str}"
|
|
996
996
|
]))
|
|
997
997
|
|
|
998
998
|
if self.num_models % 2 == 1:
|
|
999
999
|
mid_idx = self.num_models // 2
|
|
1000
|
-
explanation.add_element(
|
|
1000
|
+
explanation.add_element(ca.Paragraph([
|
|
1001
1001
|
f"Middle value (position {mid_idx + 1}): {median_val:.1f}"
|
|
1002
1002
|
]))
|
|
1003
1003
|
else:
|
|
1004
1004
|
mid_idx1 = self.num_models // 2 - 1
|
|
1005
1005
|
mid_idx2 = self.num_models // 2
|
|
1006
|
-
explanation.add_element(
|
|
1006
|
+
explanation.add_element(ca.Paragraph([
|
|
1007
1007
|
f"Average of middle two values (positions {mid_idx1 + 1} and {mid_idx2 + 1}): "
|
|
1008
1008
|
f"({sorted_preds[mid_idx1]:.1f} + {sorted_preds[mid_idx2]:.1f}) / 2 = {median_val:.1f}"
|
|
1009
1009
|
]))
|
|
1010
1010
|
|
|
1011
1011
|
return explanation, []
|
|
1012
1012
|
|
|
1013
|
-
def get_explanation(self, **kwargs) ->
|
|
1013
|
+
def get_explanation(self, **kwargs) -> ca.Section:
|
|
1014
1014
|
"""Build question explanation (backward compatible interface)."""
|
|
1015
1015
|
explanation, _ = self._get_explanation(**kwargs)
|
|
1016
1016
|
return explanation
|
|
@@ -1083,26 +1083,26 @@ class EndToEndTrainingQuestion(SimpleNeuralNetworkBase):
|
|
|
1083
1083
|
self.answers = {}
|
|
1084
1084
|
|
|
1085
1085
|
# Forward pass answers
|
|
1086
|
-
self.answers["y_pred"] = AnswerTypes.Float(float(self.a2[0]), label="1. Forward Pass - Network output ŷ")
|
|
1086
|
+
self.answers["y_pred"] = ca.AnswerTypes.Float(float(self.a2[0]), label="1. Forward Pass - Network output ŷ")
|
|
1087
1087
|
|
|
1088
1088
|
# Loss answer
|
|
1089
|
-
self.answers["loss"] = AnswerTypes.Float(float(self.loss), label="2. Loss")
|
|
1089
|
+
self.answers["loss"] = ca.AnswerTypes.Float(float(self.loss), label="2. Loss")
|
|
1090
1090
|
|
|
1091
1091
|
# Gradient answers (for key weights)
|
|
1092
|
-
self.answers["grad_w3"] = AnswerTypes.Float(self._compute_gradient_W2(0), label="3. Gradient ∂L/∂w₃")
|
|
1093
|
-
self.answers["grad_w11"] = AnswerTypes.Float(self._compute_gradient_W1(0, 0), label="4. Gradient ∂L/∂w₁₁")
|
|
1092
|
+
self.answers["grad_w3"] = ca.AnswerTypes.Float(self._compute_gradient_W2(0), label="3. Gradient ∂L/∂w₃")
|
|
1093
|
+
self.answers["grad_w11"] = ca.AnswerTypes.Float(self._compute_gradient_W1(0, 0), label="4. Gradient ∂L/∂w₁₁")
|
|
1094
1094
|
|
|
1095
1095
|
# Updated weight answers
|
|
1096
|
-
self.answers["new_w3"] = AnswerTypes.Float(float(self.new_W2[0, 0]), label="5. Updated w₃:")
|
|
1097
|
-
self.answers["new_w11"] = AnswerTypes.Float(float(self.new_W1[0, 0]), label="6. Updated w₁₁:")
|
|
1096
|
+
self.answers["new_w3"] = ca.AnswerTypes.Float(float(self.new_W2[0, 0]), label="5. Updated w₃:")
|
|
1097
|
+
self.answers["new_w11"] = ca.AnswerTypes.Float(float(self.new_W1[0, 0]), label="6. Updated w₁₁:")
|
|
1098
1098
|
|
|
1099
|
-
def _get_body(self, **kwargs) -> Tuple[
|
|
1099
|
+
def _get_body(self, **kwargs) -> Tuple[ca.Section, List[ca.Answer]]:
|
|
1100
1100
|
"""Build question body and collect answers."""
|
|
1101
|
-
body =
|
|
1101
|
+
body = ca.Section()
|
|
1102
1102
|
answers = []
|
|
1103
1103
|
|
|
1104
1104
|
# Question description
|
|
1105
|
-
body.add_element(
|
|
1105
|
+
body.add_element(ca.Paragraph([
|
|
1106
1106
|
f"Given the neural network below with {self._get_activation_name()} activation "
|
|
1107
1107
|
f"in the hidden layer and sigmoid activation in the output layer (for binary classification), "
|
|
1108
1108
|
f"perform one complete training step (forward pass, loss calculation, "
|
|
@@ -1111,34 +1111,34 @@ class EndToEndTrainingQuestion(SimpleNeuralNetworkBase):
|
|
|
1111
1111
|
|
|
1112
1112
|
# Network diagram
|
|
1113
1113
|
body.add_element(
|
|
1114
|
-
|
|
1114
|
+
ca.Picture(
|
|
1115
1115
|
img_data=self._generate_network_diagram(show_weights=True, show_activations=False)
|
|
1116
1116
|
)
|
|
1117
1117
|
)
|
|
1118
1118
|
|
|
1119
1119
|
# Training parameters
|
|
1120
|
-
body.add_element(
|
|
1120
|
+
body.add_element(ca.Paragraph([
|
|
1121
1121
|
"**Training parameters:**"
|
|
1122
1122
|
]))
|
|
1123
1123
|
|
|
1124
|
-
body.add_element(
|
|
1124
|
+
body.add_element(ca.Paragraph([
|
|
1125
1125
|
"Input: ",
|
|
1126
|
-
|
|
1126
|
+
ca.Equation(f"x_1 = {self.X[0]:.1f}", inline=True),
|
|
1127
1127
|
", ",
|
|
1128
|
-
|
|
1128
|
+
ca.Equation(f"x_2 = {self.X[1]:.1f}", inline=True)
|
|
1129
1129
|
]))
|
|
1130
1130
|
|
|
1131
|
-
body.add_element(
|
|
1131
|
+
body.add_element(ca.Paragraph([
|
|
1132
1132
|
"Target: ",
|
|
1133
|
-
|
|
1133
|
+
ca.Equation(f"y = {int(self.y_target)}", inline=True)
|
|
1134
1134
|
]))
|
|
1135
1135
|
|
|
1136
|
-
body.add_element(
|
|
1136
|
+
body.add_element(ca.Paragraph([
|
|
1137
1137
|
"Learning rate: ",
|
|
1138
|
-
|
|
1138
|
+
ca.Equation(f"\\alpha = {self.learning_rate}", inline=True)
|
|
1139
1139
|
]))
|
|
1140
1140
|
|
|
1141
|
-
body.add_element(
|
|
1141
|
+
body.add_element(ca.Paragraph([
|
|
1142
1142
|
f"**Hidden layer activation:** {self._get_activation_name()}"
|
|
1143
1143
|
]))
|
|
1144
1144
|
|
|
@@ -1153,98 +1153,98 @@ class EndToEndTrainingQuestion(SimpleNeuralNetworkBase):
|
|
|
1153
1153
|
answers.append(self.answers["new_w3"])
|
|
1154
1154
|
answers.append(self.answers["new_w11"])
|
|
1155
1155
|
|
|
1156
|
-
body.add_element(
|
|
1156
|
+
body.add_element(ca.AnswerBlock(answers))
|
|
1157
1157
|
|
|
1158
1158
|
return body, answers
|
|
1159
1159
|
|
|
1160
|
-
def get_body(self, **kwargs) ->
|
|
1160
|
+
def get_body(self, **kwargs) -> ca.Section:
|
|
1161
1161
|
"""Build question body (backward compatible interface)."""
|
|
1162
1162
|
body, _ = self._get_body(**kwargs)
|
|
1163
1163
|
return body
|
|
1164
1164
|
|
|
1165
|
-
def _get_explanation(self, **kwargs) -> Tuple[
|
|
1165
|
+
def _get_explanation(self, **kwargs) -> Tuple[ca.Section, List[ca.Answer]]:
|
|
1166
1166
|
"""Build question explanation."""
|
|
1167
|
-
explanation =
|
|
1167
|
+
explanation = ca.Section()
|
|
1168
1168
|
|
|
1169
|
-
explanation.add_element(
|
|
1169
|
+
explanation.add_element(ca.Paragraph([
|
|
1170
1170
|
"This problem requires performing one complete training iteration. Let's go through each step."
|
|
1171
1171
|
]))
|
|
1172
1172
|
|
|
1173
1173
|
# Step 1: Forward pass
|
|
1174
|
-
explanation.add_element(
|
|
1174
|
+
explanation.add_element(ca.Paragraph([
|
|
1175
1175
|
"**Step 1: Forward Pass**"
|
|
1176
1176
|
]))
|
|
1177
1177
|
|
|
1178
1178
|
# Hidden layer
|
|
1179
1179
|
z1_0 = self.W1[0, 0] * self.X[0] + self.W1[0, 1] * self.X[1] + self.b1[0]
|
|
1180
|
-
explanation.add_element(
|
|
1180
|
+
explanation.add_element(ca.Equation(
|
|
1181
1181
|
f"z_1 = w_{{11}} x_1 + w_{{12}} x_2 + b_1 = {self.W1[0,0]:.{self.param_digits}f} \\cdot {self.X[0]:.1f} + {self.W1[0,1]:.{self.param_digits}f} \\cdot {self.X[1]:.1f} + {self.b1[0]:.{self.param_digits}f} = {self.z1[0]:.4f}",
|
|
1182
1182
|
inline=False
|
|
1183
1183
|
))
|
|
1184
1184
|
|
|
1185
|
-
explanation.add_element(
|
|
1185
|
+
explanation.add_element(ca.Equation(
|
|
1186
1186
|
f"h_1 = {self._get_activation_name()}(z_1) = {self.a1[0]:.4f}",
|
|
1187
1187
|
inline=False
|
|
1188
1188
|
))
|
|
1189
1189
|
|
|
1190
1190
|
# Similarly for h2 (abbreviated)
|
|
1191
|
-
explanation.add_element(
|
|
1191
|
+
explanation.add_element(ca.Equation(
|
|
1192
1192
|
f"h_2 = {self.a1[1]:.4f} \\text{{ (calculated similarly)}}",
|
|
1193
1193
|
inline=False
|
|
1194
1194
|
))
|
|
1195
1195
|
|
|
1196
1196
|
# Output (pre-activation)
|
|
1197
1197
|
z2 = self.W2[0, 0] * self.a1[0] + self.W2[0, 1] * self.a1[1] + self.b2[0]
|
|
1198
|
-
explanation.add_element(
|
|
1198
|
+
explanation.add_element(ca.Equation(
|
|
1199
1199
|
f"z_{{out}} = w_3 h_1 + w_4 h_2 + b_2 = {self.W2[0,0]:.{self.param_digits}f} \\cdot {self.a1[0]:.4f} + {self.W2[0,1]:.{self.param_digits}f} \\cdot {self.a1[1]:.4f} + {self.b2[0]:.{self.param_digits}f} = {self.z2[0]:.4f}",
|
|
1200
1200
|
inline=False
|
|
1201
1201
|
))
|
|
1202
1202
|
|
|
1203
1203
|
# Output (sigmoid activation)
|
|
1204
|
-
explanation.add_element(
|
|
1204
|
+
explanation.add_element(ca.Equation(
|
|
1205
1205
|
f"\\hat{{y}} = \\sigma(z_{{out}}) = \\frac{{1}}{{1 + e^{{-{self.z2[0]:.4f}}}}} = {self.a2[0]:.4f}",
|
|
1206
1206
|
inline=False
|
|
1207
1207
|
))
|
|
1208
1208
|
|
|
1209
1209
|
# Step 2: Loss
|
|
1210
|
-
explanation.add_element(
|
|
1210
|
+
explanation.add_element(ca.Paragraph([
|
|
1211
1211
|
"**Step 2: Calculate Loss (Binary Cross-Entropy)**"
|
|
1212
1212
|
]))
|
|
1213
1213
|
|
|
1214
1214
|
# Show the full BCE formula first
|
|
1215
|
-
explanation.add_element(
|
|
1215
|
+
explanation.add_element(ca.Equation(
|
|
1216
1216
|
f"L = -[y \\log(\\hat{{y}}) + (1-y) \\log(1-\\hat{{y}})]",
|
|
1217
1217
|
inline=False
|
|
1218
1218
|
))
|
|
1219
1219
|
|
|
1220
1220
|
# Then evaluate it
|
|
1221
1221
|
if self.y_target == 1:
|
|
1222
|
-
explanation.add_element(
|
|
1222
|
+
explanation.add_element(ca.Equation(
|
|
1223
1223
|
f"L = -[1 \\cdot \\log({self.a2[0]:.4f}) + 0 \\cdot \\log(1-{self.a2[0]:.4f})] = -\\log({self.a2[0]:.4f}) = {self.loss:.4f}",
|
|
1224
1224
|
inline=False
|
|
1225
1225
|
))
|
|
1226
1226
|
else:
|
|
1227
|
-
explanation.add_element(
|
|
1227
|
+
explanation.add_element(ca.Equation(
|
|
1228
1228
|
f"L = -[0 \\cdot \\log({self.a2[0]:.4f}) + 1 \\cdot \\log(1-{self.a2[0]:.4f})] = -\\log({1-self.a2[0]:.4f}) = {self.loss:.4f}",
|
|
1229
1229
|
inline=False
|
|
1230
1230
|
))
|
|
1231
1231
|
|
|
1232
1232
|
# Step 3: Gradients
|
|
1233
|
-
explanation.add_element(
|
|
1233
|
+
explanation.add_element(ca.Paragraph([
|
|
1234
1234
|
"**Step 3: Compute Gradients**"
|
|
1235
1235
|
]))
|
|
1236
1236
|
|
|
1237
|
-
explanation.add_element(
|
|
1237
|
+
explanation.add_element(ca.Paragraph([
|
|
1238
1238
|
"For BCE with sigmoid, the output layer gradient simplifies to:"
|
|
1239
1239
|
]))
|
|
1240
1240
|
|
|
1241
|
-
explanation.add_element(
|
|
1241
|
+
explanation.add_element(ca.Equation(
|
|
1242
1242
|
f"\\frac{{\\partial L}}{{\\partial z_{{out}}}} = \\hat{{y}} - y = {self.a2[0]:.4f} - {int(self.y_target)} = {self.dL_dz2:.4f}",
|
|
1243
1243
|
inline=False
|
|
1244
1244
|
))
|
|
1245
1245
|
|
|
1246
1246
|
grad_w3 = self._compute_gradient_W2(0)
|
|
1247
|
-
explanation.add_element(
|
|
1247
|
+
explanation.add_element(ca.Equation(
|
|
1248
1248
|
f"\\frac{{\\partial L}}{{\\partial w_3}} = \\frac{{\\partial L}}{{\\partial z_{{out}}}} \\cdot h_1 = {self.dL_dz2:.4f} \\cdot {self.a1[0]:.4f} = {grad_w3:.4f}",
|
|
1249
1249
|
inline=False
|
|
1250
1250
|
))
|
|
@@ -1260,35 +1260,35 @@ class EndToEndTrainingQuestion(SimpleNeuralNetworkBase):
|
|
|
1260
1260
|
else:
|
|
1261
1261
|
act_deriv_str = f"1"
|
|
1262
1262
|
|
|
1263
|
-
explanation.add_element(
|
|
1263
|
+
explanation.add_element(ca.Equation(
|
|
1264
1264
|
f"\\frac{{\\partial L}}{{\\partial w_{{11}}}} = \\frac{{\\partial L}}{{\\partial z_{{out}}}} \\cdot w_3 \\cdot {act_deriv_str} \\cdot x_1 = {self.dL_dz2:.4f} \\cdot {dz2_da1:.4f} \\cdot {da1_dz1:.4f} \\cdot {self.X[0]:.1f} = {grad_w11:.4f}",
|
|
1265
1265
|
inline=False
|
|
1266
1266
|
))
|
|
1267
1267
|
|
|
1268
1268
|
# Step 4: Weight updates
|
|
1269
|
-
explanation.add_element(
|
|
1269
|
+
explanation.add_element(ca.Paragraph([
|
|
1270
1270
|
"**Step 4: Update Weights**"
|
|
1271
1271
|
]))
|
|
1272
1272
|
|
|
1273
1273
|
new_w3 = self.new_W2[0, 0]
|
|
1274
|
-
explanation.add_element(
|
|
1274
|
+
explanation.add_element(ca.Equation(
|
|
1275
1275
|
f"w_3^{{new}} = w_3 - \\alpha \\frac{{\\partial L}}{{\\partial w_3}} = {self.W2[0,0]:.{self.param_digits}f} - {self.learning_rate} \\cdot {grad_w3:.4f} = {new_w3:.4f}",
|
|
1276
1276
|
inline=False
|
|
1277
1277
|
))
|
|
1278
1278
|
|
|
1279
1279
|
new_w11 = self.new_W1[0, 0]
|
|
1280
|
-
explanation.add_element(
|
|
1280
|
+
explanation.add_element(ca.Equation(
|
|
1281
1281
|
f"w_{{11}}^{{new}} = w_{{11}} - \\alpha \\frac{{\\partial L}}{{\\partial w_{{11}}}} = {self.W1[0,0]:.{self.param_digits}f} - {self.learning_rate} \\cdot {grad_w11:.4f} = {new_w11:.4f}",
|
|
1282
1282
|
inline=False
|
|
1283
1283
|
))
|
|
1284
1284
|
|
|
1285
|
-
explanation.add_element(
|
|
1285
|
+
explanation.add_element(ca.Paragraph([
|
|
1286
1286
|
"These updated weights would be used in the next training iteration."
|
|
1287
1287
|
]))
|
|
1288
1288
|
|
|
1289
1289
|
return explanation, []
|
|
1290
1290
|
|
|
1291
|
-
def get_explanation(self, **kwargs) ->
|
|
1291
|
+
def get_explanation(self, **kwargs) -> ca.Section:
|
|
1292
1292
|
"""Build question explanation (backward compatible interface)."""
|
|
1293
1293
|
explanation, _ = self._get_explanation(**kwargs)
|
|
1294
1294
|
return explanation
|