QuizGenerator 0.5.1__py3-none-any.whl → 0.6.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- QuizGenerator/contentast.py +1056 -1231
- QuizGenerator/generate.py +174 -2
- QuizGenerator/misc.py +0 -6
- QuizGenerator/mixins.py +7 -8
- QuizGenerator/premade_questions/basic.py +3 -3
- QuizGenerator/premade_questions/cst334/languages.py +45 -51
- QuizGenerator/premade_questions/cst334/math_questions.py +9 -10
- QuizGenerator/premade_questions/cst334/memory_questions.py +39 -56
- QuizGenerator/premade_questions/cst334/persistence_questions.py +12 -27
- QuizGenerator/premade_questions/cst334/process.py +11 -22
- QuizGenerator/premade_questions/cst463/gradient_descent/gradient_calculation.py +11 -11
- QuizGenerator/premade_questions/cst463/gradient_descent/gradient_descent_questions.py +7 -7
- QuizGenerator/premade_questions/cst463/gradient_descent/loss_calculations.py +6 -6
- QuizGenerator/premade_questions/cst463/gradient_descent/misc.py +2 -2
- QuizGenerator/premade_questions/cst463/math_and_data/matrix_questions.py +15 -19
- QuizGenerator/premade_questions/cst463/math_and_data/vector_questions.py +149 -442
- QuizGenerator/premade_questions/cst463/models/attention.py +7 -8
- QuizGenerator/premade_questions/cst463/models/cnns.py +6 -7
- QuizGenerator/premade_questions/cst463/models/rnns.py +6 -6
- QuizGenerator/premade_questions/cst463/models/text.py +7 -8
- QuizGenerator/premade_questions/cst463/models/weight_counting.py +5 -9
- QuizGenerator/premade_questions/cst463/neural-network-basics/neural_network_questions.py +22 -22
- QuizGenerator/premade_questions/cst463/tensorflow-intro/tensorflow_questions.py +25 -25
- QuizGenerator/question.py +13 -14
- {quizgenerator-0.5.1.dist-info → quizgenerator-0.6.1.dist-info}/METADATA +1 -1
- {quizgenerator-0.5.1.dist-info → quizgenerator-0.6.1.dist-info}/RECORD +29 -29
- {quizgenerator-0.5.1.dist-info → quizgenerator-0.6.1.dist-info}/WHEEL +0 -0
- {quizgenerator-0.5.1.dist-info → quizgenerator-0.6.1.dist-info}/entry_points.txt +0 -0
- {quizgenerator-0.5.1.dist-info → quizgenerator-0.6.1.dist-info}/licenses/LICENSE +0 -0
QuizGenerator/generate.py
CHANGED
|
@@ -1,16 +1,19 @@
|
|
|
1
1
|
#!env python
|
|
2
2
|
import argparse
|
|
3
|
+
from datetime import datetime
|
|
3
4
|
import os
|
|
4
5
|
import random
|
|
5
6
|
import shutil
|
|
6
7
|
import subprocess
|
|
7
8
|
import tempfile
|
|
9
|
+
import traceback
|
|
8
10
|
import re
|
|
9
11
|
from pathlib import Path
|
|
10
12
|
from dotenv import load_dotenv
|
|
11
13
|
from QuizGenerator.canvas.canvas_interface import CanvasInterface
|
|
12
14
|
|
|
13
15
|
from QuizGenerator.quiz import Quiz
|
|
16
|
+
from QuizGenerator.question import QuestionRegistry
|
|
14
17
|
|
|
15
18
|
import logging
|
|
16
19
|
log = logging.getLogger(__name__)
|
|
@@ -44,6 +47,12 @@ def parse_args():
|
|
|
44
47
|
parser.add_argument("--num_pdfs", default=0, type=int, help="How many PDF quizzes to create")
|
|
45
48
|
parser.add_argument("--latex", action="store_false", dest="typst", help="Use Typst instead of LaTeX for PDF generation")
|
|
46
49
|
|
|
50
|
+
# Testing flags
|
|
51
|
+
parser.add_argument("--test_all", type=int, default=0, metavar="N",
|
|
52
|
+
help="Generate N variations of ALL registered questions to test they work correctly")
|
|
53
|
+
parser.add_argument("--strict", action="store_true",
|
|
54
|
+
help="With --test_all, skip PDF/Canvas generation if any questions fail")
|
|
55
|
+
|
|
47
56
|
subparsers = parser.add_subparsers(dest='command')
|
|
48
57
|
test_parser = subparsers.add_parser("TEST")
|
|
49
58
|
|
|
@@ -63,8 +72,155 @@ def test():
|
|
|
63
72
|
print("\n" + "="*60)
|
|
64
73
|
print("TEST COMPLETE")
|
|
65
74
|
print("="*60)
|
|
66
|
-
|
|
67
|
-
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
def test_all_questions(
|
|
78
|
+
num_variations: int,
|
|
79
|
+
generate_pdf: bool = False,
|
|
80
|
+
use_typst: bool = True,
|
|
81
|
+
canvas_course=None,
|
|
82
|
+
strict: bool = False
|
|
83
|
+
):
|
|
84
|
+
"""
|
|
85
|
+
Test all registered questions by generating N variations of each.
|
|
86
|
+
|
|
87
|
+
This helps verify that all question types work correctly and can generate
|
|
88
|
+
valid output without errors.
|
|
89
|
+
|
|
90
|
+
Args:
|
|
91
|
+
num_variations: Number of variations to generate for each question type
|
|
92
|
+
generate_pdf: If True, generate a PDF with all successful questions
|
|
93
|
+
use_typst: If True, use Typst for PDF generation; otherwise use LaTeX
|
|
94
|
+
canvas_course: If provided, push a test quiz to this Canvas course
|
|
95
|
+
strict: If True, skip PDF/Canvas generation if any questions fail
|
|
96
|
+
"""
|
|
97
|
+
# Ensure all premade questions are loaded
|
|
98
|
+
QuestionRegistry.load_premade_questions()
|
|
99
|
+
|
|
100
|
+
registered_questions = QuestionRegistry._registry
|
|
101
|
+
total_questions = len(registered_questions)
|
|
102
|
+
|
|
103
|
+
# Test defaults for questions that require external input
|
|
104
|
+
# These are "template" questions that can't work without content
|
|
105
|
+
TEST_DEFAULTS = {
|
|
106
|
+
'fromtext': {'text': 'Test question placeholder text.'},
|
|
107
|
+
'fromgenerator': {'generator': 'return "Generated test content"'},
|
|
108
|
+
}
|
|
109
|
+
|
|
110
|
+
print(f"\nTesting {total_questions} registered question types with {num_variations} variations each...")
|
|
111
|
+
print("=" * 70)
|
|
112
|
+
|
|
113
|
+
failed_questions = []
|
|
114
|
+
successful_questions = []
|
|
115
|
+
# Collect question instances for PDF/Canvas generation
|
|
116
|
+
test_question_instances = []
|
|
117
|
+
|
|
118
|
+
for i, (question_name, question_class) in enumerate(sorted(registered_questions.items()), 1):
|
|
119
|
+
print(f"\n[{i}/{total_questions}] Testing: {question_name}")
|
|
120
|
+
print(f" Class: {question_class.__name__}")
|
|
121
|
+
|
|
122
|
+
question_failures = []
|
|
123
|
+
|
|
124
|
+
for variation in range(num_variations):
|
|
125
|
+
seed = variation * 1000 # Use different seeds for each variation
|
|
126
|
+
try:
|
|
127
|
+
# Get any test defaults for this question type
|
|
128
|
+
extra_kwargs = TEST_DEFAULTS.get(question_name, {})
|
|
129
|
+
|
|
130
|
+
# Create question instance with minimal required params
|
|
131
|
+
question = question_class(
|
|
132
|
+
name=f"{question_name} (v{variation+1})",
|
|
133
|
+
points_value=1.0,
|
|
134
|
+
**extra_kwargs
|
|
135
|
+
)
|
|
136
|
+
|
|
137
|
+
# Generate the question (this calls refresh and builds the AST)
|
|
138
|
+
question_ast = question.get_question(rng_seed=seed)
|
|
139
|
+
|
|
140
|
+
# Try rendering to both formats to catch format-specific issues
|
|
141
|
+
try:
|
|
142
|
+
question_ast.render("html")
|
|
143
|
+
except Exception as e:
|
|
144
|
+
tb = traceback.format_exc()
|
|
145
|
+
question_failures.append(f" Variation {variation+1}: HTML render failed - {e}\n{tb}")
|
|
146
|
+
continue
|
|
147
|
+
|
|
148
|
+
try:
|
|
149
|
+
question_ast.render("typst")
|
|
150
|
+
except Exception as e:
|
|
151
|
+
tb = traceback.format_exc()
|
|
152
|
+
question_failures.append(f" Variation {variation+1}: Typst render failed - {e}\n{tb}")
|
|
153
|
+
continue
|
|
154
|
+
|
|
155
|
+
# If we got here, the question works - save the instance
|
|
156
|
+
test_question_instances.append(question)
|
|
157
|
+
|
|
158
|
+
except Exception as e:
|
|
159
|
+
tb = traceback.format_exc()
|
|
160
|
+
question_failures.append(f" Variation {variation+1}: Generation failed - {e}\n{tb}")
|
|
161
|
+
|
|
162
|
+
if question_failures:
|
|
163
|
+
print(f" FAILED ({len(question_failures)}/{num_variations} variations)")
|
|
164
|
+
for failure in question_failures:
|
|
165
|
+
print(failure)
|
|
166
|
+
failed_questions.append((question_name, question_failures))
|
|
167
|
+
else:
|
|
168
|
+
print(f" OK ({num_variations}/{num_variations} variations)")
|
|
169
|
+
successful_questions.append(question_name)
|
|
170
|
+
|
|
171
|
+
# Summary
|
|
172
|
+
print("\n" + "=" * 70)
|
|
173
|
+
print("TEST SUMMARY")
|
|
174
|
+
print("=" * 70)
|
|
175
|
+
print(f"Total question types: {total_questions}")
|
|
176
|
+
print(f"Successful: {len(successful_questions)}")
|
|
177
|
+
print(f"Failed: {len(failed_questions)}")
|
|
178
|
+
|
|
179
|
+
if failed_questions:
|
|
180
|
+
print("\nFailed questions:")
|
|
181
|
+
for name, failures in failed_questions:
|
|
182
|
+
print(f" - {name}: {len(failures)} failures")
|
|
183
|
+
|
|
184
|
+
print("=" * 70)
|
|
185
|
+
|
|
186
|
+
# Generate PDF and/or push to Canvas if requested
|
|
187
|
+
if strict and failed_questions:
|
|
188
|
+
print("\n[STRICT MODE] Skipping PDF/Canvas generation due to failures")
|
|
189
|
+
elif (generate_pdf or canvas_course) and test_question_instances:
|
|
190
|
+
print(f"\nCreating test quiz with {len(test_question_instances)} questions...")
|
|
191
|
+
|
|
192
|
+
# Create a Quiz object with all successful questions
|
|
193
|
+
test_quiz = Quiz(
|
|
194
|
+
name="Test All Questions",
|
|
195
|
+
questions=test_question_instances,
|
|
196
|
+
practice=True
|
|
197
|
+
)
|
|
198
|
+
|
|
199
|
+
if generate_pdf:
|
|
200
|
+
print("Generating PDF...")
|
|
201
|
+
pdf_seed = 12345 # Fixed seed for reproducibility
|
|
202
|
+
if use_typst:
|
|
203
|
+
typst_text = test_quiz.get_quiz(rng_seed=pdf_seed).render("typst")
|
|
204
|
+
generate_typst(typst_text, remove_previous=True, name_prefix="test_all_questions")
|
|
205
|
+
else:
|
|
206
|
+
latex_text = test_quiz.get_quiz(rng_seed=pdf_seed).render_latex()
|
|
207
|
+
generate_latex(latex_text, remove_previous=True, name_prefix="test_all_questions")
|
|
208
|
+
print("PDF generated in out/ directory")
|
|
209
|
+
|
|
210
|
+
if canvas_course:
|
|
211
|
+
print("Pushing to Canvas...")
|
|
212
|
+
quiz_title = f"Test All Questions ({int(datetime.now().timestamp())} : {datetime.now().strftime('%b %d %I:%M%p')})"
|
|
213
|
+
canvas_course.push_quiz_to_canvas(
|
|
214
|
+
test_quiz,
|
|
215
|
+
num_variations=1,
|
|
216
|
+
title=quiz_title,
|
|
217
|
+
is_practice=True
|
|
218
|
+
)
|
|
219
|
+
print(f"Quiz '{quiz_title}' pushed to Canvas")
|
|
220
|
+
|
|
221
|
+
return len(failed_questions) == 0
|
|
222
|
+
|
|
223
|
+
|
|
68
224
|
def generate_latex(latex_text, remove_previous=False, name_prefix=None):
|
|
69
225
|
"""
|
|
70
226
|
Generate PDF from LaTeX source code.
|
|
@@ -269,6 +425,22 @@ def main():
|
|
|
269
425
|
test()
|
|
270
426
|
return
|
|
271
427
|
|
|
428
|
+
if args.test_all > 0:
|
|
429
|
+
# Set up Canvas course if course_id provided
|
|
430
|
+
canvas_course = None
|
|
431
|
+
if args.course_id:
|
|
432
|
+
canvas_interface = CanvasInterface(prod=args.prod)
|
|
433
|
+
canvas_course = canvas_interface.get_course(course_id=args.course_id)
|
|
434
|
+
|
|
435
|
+
success = test_all_questions(
|
|
436
|
+
args.test_all,
|
|
437
|
+
generate_pdf=True,
|
|
438
|
+
use_typst=getattr(args, 'typst', True),
|
|
439
|
+
canvas_course=canvas_course,
|
|
440
|
+
strict=args.strict
|
|
441
|
+
)
|
|
442
|
+
exit(0 if success else 1)
|
|
443
|
+
|
|
272
444
|
# Clear any previous metrics
|
|
273
445
|
PerformanceTracker.clear_metrics()
|
|
274
446
|
|
QuizGenerator/misc.py
CHANGED
|
@@ -21,9 +21,3 @@ def fix_negative_zero(value):
|
|
|
21
21
|
if isinstance(value, (int, float)):
|
|
22
22
|
return 0.0 if value == 0 else value
|
|
23
23
|
return value
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
# Backward compatibility: Answer and MatrixAnswer have moved to ContentAST
|
|
27
|
-
# Re-export them here so existing imports continue to work
|
|
28
|
-
Answer = ContentAST.Answer
|
|
29
|
-
MatrixAnswer = ContentAST.MatrixAnswer
|
QuizGenerator/mixins.py
CHANGED
|
@@ -6,7 +6,6 @@ These mixins provide reusable patterns for common question structures.
|
|
|
6
6
|
|
|
7
7
|
import abc
|
|
8
8
|
from typing import Dict, List, Any, Union
|
|
9
|
-
from QuizGenerator.misc import Answer
|
|
10
9
|
from QuizGenerator.contentast import ContentAST
|
|
11
10
|
|
|
12
11
|
|
|
@@ -68,13 +67,13 @@ class TableQuestionMixin:
|
|
|
68
67
|
"""
|
|
69
68
|
answer_columns = answer_columns or []
|
|
70
69
|
|
|
71
|
-
def format_cell(row_data: Dict, column: str) -> Union[str, Answer]:
|
|
70
|
+
def format_cell(row_data: Dict, column: str) -> Union[str, ContentAST.Answer]:
|
|
72
71
|
"""Format a cell based on whether it should be an answer or plain data"""
|
|
73
72
|
value = row_data.get(column, "")
|
|
74
73
|
|
|
75
74
|
# If this column should contain answers and the value is an Answer object
|
|
76
75
|
# Answer extends ContentAST.Leaf, so it can be used directly
|
|
77
|
-
if column in answer_columns and isinstance(value, Answer):
|
|
76
|
+
if column in answer_columns and isinstance(value, ContentAST.Answer):
|
|
78
77
|
return value
|
|
79
78
|
# If this column should contain answers but we have the answer key
|
|
80
79
|
elif column in answer_columns and isinstance(value, str) and hasattr(self, 'answers'):
|
|
@@ -150,11 +149,11 @@ class TableQuestionMixin:
|
|
|
150
149
|
ContentAST.Table with multiple answer blanks
|
|
151
150
|
"""
|
|
152
151
|
|
|
153
|
-
def process_cell_value(value: Any) -> Union[str, Answer]:
|
|
152
|
+
def process_cell_value(value: Any) -> Union[str, ContentAST.Answer]:
|
|
154
153
|
"""Convert cell values to appropriate display format"""
|
|
155
154
|
# If it's already an Answer object, use it directly
|
|
156
155
|
# Answer extends ContentAST.Leaf so it can be used in the AST
|
|
157
|
-
if isinstance(value, Answer):
|
|
156
|
+
if isinstance(value, ContentAST.Answer):
|
|
158
157
|
return value
|
|
159
158
|
# If it's a string that looks like an answer key, try to resolve it
|
|
160
159
|
elif isinstance(value, str) and value.startswith("answer__") and hasattr(self, 'answers'):
|
|
@@ -369,9 +368,9 @@ class MultiPartQuestionMixin:
|
|
|
369
368
|
Example:
|
|
370
369
|
# For a 3-part question
|
|
371
370
|
{
|
|
372
|
-
'a': Answer.integer('a', 5),
|
|
373
|
-
'b': Answer.integer('b', 12),
|
|
374
|
-
'c': Answer.integer('c', -3)
|
|
371
|
+
'a': ContentAST.Answer.integer('a', 5),
|
|
372
|
+
'b': ContentAST.Answer.integer('b', 12),
|
|
373
|
+
'c': ContentAST.Answer.integer('c', -3)
|
|
375
374
|
}
|
|
376
375
|
"""
|
|
377
376
|
if not self.is_multipart():
|
|
@@ -6,7 +6,7 @@ from typing import List, Dict, Any, Tuple
|
|
|
6
6
|
import logging
|
|
7
7
|
|
|
8
8
|
from QuizGenerator.contentast import *
|
|
9
|
-
from QuizGenerator.question import Question, QuestionRegistry
|
|
9
|
+
from QuizGenerator.question import Question, QuestionRegistry
|
|
10
10
|
from QuizGenerator.mixins import TableQuestionMixin
|
|
11
11
|
|
|
12
12
|
log = logging.getLogger(__name__)
|
|
@@ -25,8 +25,8 @@ class FromText(Question):
|
|
|
25
25
|
|
|
26
26
|
return ContentAST.Section([ContentAST.Text(self.text)])
|
|
27
27
|
|
|
28
|
-
def get_answers(self, *args, **kwargs) -> Tuple[Answer.
|
|
29
|
-
return Answer.
|
|
28
|
+
def get_answers(self, *args, **kwargs) -> Tuple[ContentAST.Answer.CanvasAnswerKind, List[Dict[str,Any]]]:
|
|
29
|
+
return ContentAST.Answer.CanvasAnswerKind.ESSAY, []
|
|
30
30
|
|
|
31
31
|
|
|
32
32
|
@QuestionRegistry.register()
|
|
@@ -6,9 +6,9 @@ import enum
|
|
|
6
6
|
import itertools
|
|
7
7
|
from typing import List, Dict, Optional, Tuple, Any
|
|
8
8
|
|
|
9
|
-
from QuizGenerator.question import QuestionRegistry, Question
|
|
9
|
+
from QuizGenerator.question import QuestionRegistry, Question
|
|
10
10
|
|
|
11
|
-
from QuizGenerator.contentast import ContentAST
|
|
11
|
+
from QuizGenerator.contentast import ContentAST, AnswerTypes
|
|
12
12
|
|
|
13
13
|
import logging
|
|
14
14
|
log = logging.getLogger(__name__)
|
|
@@ -265,60 +265,55 @@ class ValidStringsInLanguageQuestion(LanguageQuestion):
|
|
|
265
265
|
self._select_random_grammar()
|
|
266
266
|
|
|
267
267
|
self.answers = {}
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
)
|
|
277
|
-
}
|
|
268
|
+
|
|
269
|
+
# Create answers with proper ContentAST.Answer signature
|
|
270
|
+
# value is the generated string, correct indicates if it's a valid answer
|
|
271
|
+
good_string = self.grammar_good.generate(self.include_spaces)
|
|
272
|
+
self.answers["answer_good"] = ContentAST.Answer(
|
|
273
|
+
value=good_string,
|
|
274
|
+
kind=ContentAST.Answer.CanvasAnswerKind.MULTIPLE_ANSWER,
|
|
275
|
+
correct=True
|
|
278
276
|
)
|
|
279
|
-
|
|
280
|
-
self.
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
self.grammar_bad.generate(self.include_spaces, early_exit=True),
|
|
295
|
-
Answer.AnswerKind.MULTIPLE_ANSWER,
|
|
296
|
-
correct=False
|
|
297
|
-
)
|
|
298
|
-
})
|
|
299
|
-
|
|
277
|
+
|
|
278
|
+
bad_string = self.grammar_bad.generate(self.include_spaces)
|
|
279
|
+
self.answers["answer_bad"] = ContentAST.Answer(
|
|
280
|
+
value=bad_string,
|
|
281
|
+
kind=ContentAST.Answer.CanvasAnswerKind.MULTIPLE_ANSWER,
|
|
282
|
+
correct=False
|
|
283
|
+
)
|
|
284
|
+
|
|
285
|
+
bad_early_string = self.grammar_bad.generate(self.include_spaces, early_exit=True)
|
|
286
|
+
self.answers["answer_bad_early"] = ContentAST.Answer(
|
|
287
|
+
value=bad_early_string,
|
|
288
|
+
kind=ContentAST.Answer.CanvasAnswerKind.MULTIPLE_ANSWER,
|
|
289
|
+
correct=False
|
|
290
|
+
)
|
|
291
|
+
|
|
300
292
|
answer_text_set = {a.value for a in self.answers.values()}
|
|
301
293
|
num_tries = 0
|
|
302
294
|
while len(self.answers) < 10 and num_tries < self.MAX_TRIES:
|
|
303
|
-
|
|
295
|
+
|
|
304
296
|
correct = self.rng.choice([True, False])
|
|
305
297
|
if not correct:
|
|
306
298
|
early_exit = self.rng.choice([True, False])
|
|
307
299
|
else:
|
|
308
300
|
early_exit = False
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
)
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
301
|
+
|
|
302
|
+
generated_string = (
|
|
303
|
+
self.grammar_good
|
|
304
|
+
if correct or early_exit
|
|
305
|
+
else self.grammar_bad
|
|
306
|
+
).generate(self.include_spaces, early_exit=early_exit)
|
|
307
|
+
|
|
308
|
+
is_correct = correct and not early_exit
|
|
309
|
+
|
|
310
|
+
if len(generated_string) < self.MAX_LENGTH and generated_string not in answer_text_set:
|
|
311
|
+
self.answers[f"answer_{num_tries}"] = ContentAST.Answer(
|
|
312
|
+
value=generated_string,
|
|
313
|
+
kind=ContentAST.Answer.CanvasAnswerKind.MULTIPLE_ANSWER,
|
|
314
|
+
correct=is_correct
|
|
315
|
+
)
|
|
316
|
+
answer_text_set.add(generated_string)
|
|
322
317
|
num_tries += 1
|
|
323
318
|
|
|
324
319
|
# Generate answers that will be used only for the latex version.
|
|
@@ -336,7 +331,6 @@ class ValidStringsInLanguageQuestion(LanguageQuestion):
|
|
|
336
331
|
])()
|
|
337
332
|
)
|
|
338
333
|
|
|
339
|
-
|
|
340
334
|
def _get_body(self, *args, **kwargs):
|
|
341
335
|
"""Build question body and collect answers."""
|
|
342
336
|
answers = list(self.answers.values())
|
|
@@ -373,7 +367,7 @@ class ValidStringsInLanguageQuestion(LanguageQuestion):
|
|
|
373
367
|
# For Latex-only, ask students to generate some more.
|
|
374
368
|
body.add_element(
|
|
375
369
|
ContentAST.OnlyLatex([
|
|
376
|
-
ContentAST.AnswerBlock([
|
|
370
|
+
ContentAST.AnswerBlock([AnswerTypes.String("", label="") for i in range(self.num_answer_blanks)])
|
|
377
371
|
])
|
|
378
372
|
)
|
|
379
373
|
|
|
@@ -400,6 +394,6 @@ class ValidStringsInLanguageQuestion(LanguageQuestion):
|
|
|
400
394
|
explanation, _ = self._get_explanation(*args, **kwargs)
|
|
401
395
|
return explanation
|
|
402
396
|
|
|
403
|
-
def get_answers(self, *args, **kwargs) -> Tuple[Answer.
|
|
397
|
+
def get_answers(self, *args, **kwargs) -> Tuple[ContentAST.Answer.CanvasAnswerKind, List[Dict[str,Any]]]:
|
|
404
398
|
|
|
405
|
-
return Answer.
|
|
399
|
+
return ContentAST.Answer.CanvasAnswerKind.MULTIPLE_ANSWER, list(itertools.chain(*[a.get_for_canvas() for a in self.answers.values()]))
|
|
@@ -3,8 +3,8 @@ import abc
|
|
|
3
3
|
import logging
|
|
4
4
|
import math
|
|
5
5
|
|
|
6
|
-
from QuizGenerator.question import Question, QuestionRegistry
|
|
7
|
-
from QuizGenerator.contentast import ContentAST
|
|
6
|
+
from QuizGenerator.question import Question, QuestionRegistry
|
|
7
|
+
from QuizGenerator.contentast import ContentAST, AnswerTypes
|
|
8
8
|
from QuizGenerator.constants import MathRanges
|
|
9
9
|
|
|
10
10
|
log = logging.getLogger(__name__)
|
|
@@ -31,10 +31,10 @@ class BitsAndBytes(MathQuestion):
|
|
|
31
31
|
self.num_bytes = int(math.pow(2, self.num_bits))
|
|
32
32
|
|
|
33
33
|
if self.from_binary:
|
|
34
|
-
self.answers = {"answer":
|
|
34
|
+
self.answers = {"answer": AnswerTypes.Int(self.num_bytes,
|
|
35
35
|
label="Address space size", unit="Bytes")}
|
|
36
36
|
else:
|
|
37
|
-
self.answers = {"answer":
|
|
37
|
+
self.answers = {"answer": AnswerTypes.Int(self.num_bits,
|
|
38
38
|
label="Number of bits in address", unit="bits")}
|
|
39
39
|
|
|
40
40
|
def _get_body(self, **kwargs):
|
|
@@ -111,10 +111,10 @@ class HexAndBinary(MathQuestion):
|
|
|
111
111
|
self.binary_val = f"0b{self.value:0{4*self.number_of_hexits}b}"
|
|
112
112
|
|
|
113
113
|
if self.from_binary:
|
|
114
|
-
self.answers['answer'] =
|
|
114
|
+
self.answers['answer'] = AnswerTypes.String(self.hex_val,
|
|
115
115
|
label="Value in hex")
|
|
116
116
|
else:
|
|
117
|
-
self.answers['answer'] =
|
|
117
|
+
self.answers['answer'] = AnswerTypes.String(self.binary_val,
|
|
118
118
|
label="Value in binary")
|
|
119
119
|
|
|
120
120
|
def _get_body(self, **kwargs):
|
|
@@ -223,8 +223,7 @@ class AverageMemoryAccessTime(MathQuestion):
|
|
|
223
223
|
self.amat = self.hit_rate * self.hit_latency + (1 - self.hit_rate) * self.miss_latency
|
|
224
224
|
|
|
225
225
|
self.answers = {
|
|
226
|
-
"amat":
|
|
227
|
-
label="Average Memory Access Time", unit="cycles")
|
|
226
|
+
"amat": AnswerTypes.Float(self.amat, label="Average Memory Access Time", unit="cycles")
|
|
228
227
|
}
|
|
229
228
|
|
|
230
229
|
# Finally, do the self.rngizing of the question, to avoid these being non-deterministic
|
|
@@ -244,7 +243,7 @@ class AverageMemoryAccessTime(MathQuestion):
|
|
|
244
243
|
ContentAST.Paragraph([
|
|
245
244
|
ContentAST.Text("Please calculate the Average Memory Access Time given the below information. "),
|
|
246
245
|
ContentAST.Text(
|
|
247
|
-
f"Please round your answer to {Answer.DEFAULT_ROUNDING_DIGITS} decimal points. ",
|
|
246
|
+
f"Please round your answer to {ContentAST.Answer.DEFAULT_ROUNDING_DIGITS} decimal points. ",
|
|
248
247
|
hide_from_latex=True
|
|
249
248
|
)
|
|
250
249
|
])
|
|
@@ -295,7 +294,7 @@ class AverageMemoryAccessTime(MathQuestion):
|
|
|
295
294
|
lhs="AMAT",
|
|
296
295
|
rhs=[
|
|
297
296
|
r"(hit\_rate)*(hit\_cost) + (1 - hit\_rate)*(miss\_cost)",
|
|
298
|
-
f"({self.hit_rate: 0.{Answer.DEFAULT_ROUNDING_DIGITS}f})*({self.hit_latency}) + ({1 - self.hit_rate: 0.{Answer.DEFAULT_ROUNDING_DIGITS}f})*({self.miss_latency}) = {self.amat: 0.{Answer.DEFAULT_ROUNDING_DIGITS}f}\\text{{cycles}}"
|
|
297
|
+
f"({self.hit_rate: 0.{ContentAST.Answer.DEFAULT_ROUNDING_DIGITS}f})*({self.hit_latency}) + ({1 - self.hit_rate: 0.{ContentAST.Answer.DEFAULT_ROUNDING_DIGITS}f})*({self.miss_latency}) = {self.amat: 0.{ContentAST.Answer.DEFAULT_ROUNDING_DIGITS}f}\\text{{cycles}}"
|
|
299
298
|
]
|
|
300
299
|
)
|
|
301
300
|
)
|