canvaslms 5.3__py3-none-any.whl → 5.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- canvaslms/cli/quizzes.nw +1042 -67
- canvaslms/cli/quizzes.py +711 -31
- {canvaslms-5.3.dist-info → canvaslms-5.4.dist-info}/METADATA +1 -1
- {canvaslms-5.3.dist-info → canvaslms-5.4.dist-info}/RECORD +7 -7
- {canvaslms-5.3.dist-info → canvaslms-5.4.dist-info}/WHEEL +0 -0
- {canvaslms-5.3.dist-info → canvaslms-5.4.dist-info}/entry_points.txt +0 -0
- {canvaslms-5.3.dist-info → canvaslms-5.4.dist-info}/licenses/LICENSE +0 -0
canvaslms/cli/quizzes.nw
CHANGED
|
@@ -1,46 +1,148 @@
|
|
|
1
1
|
\chapter{The \texttt{quizzes} command}
|
|
2
2
|
\label{quizzes-command}
|
|
3
3
|
\chapterprecis{%
|
|
4
|
-
This chapter was originally authored by GitHub Copilot and minimally reviewed
|
|
4
|
+
This chapter was originally authored by GitHub Copilot and minimally reviewed
|
|
5
5
|
and revised by Daniel Bosk.
|
|
6
6
|
Then later expanded on by Dan-Claude and, finally,
|
|
7
7
|
revised by Daniel Bosk.%
|
|
8
8
|
}
|
|
9
9
|
|
|
10
|
-
This chapter provides the subcommand [[quizzes]], which provides
|
|
11
|
-
Canvas quiz and survey
|
|
10
|
+
This chapter provides the subcommand [[quizzes]], which provides comprehensive
|
|
11
|
+
access to Canvas quiz and survey functionality. The command supports both
|
|
12
|
+
Classic Quizzes (the original Canvas quiz system) and New Quizzes (Quizzes.Next).
|
|
12
13
|
|
|
13
|
-
The [[quizzes]] command has
|
|
14
|
-
\begin{
|
|
15
|
-
\item
|
|
16
|
-
|
|
17
|
-
\item
|
|
18
|
-
\
|
|
14
|
+
The [[quizzes]] command has the following subcommands:
|
|
15
|
+
\begin{description}
|
|
16
|
+
\item[[[list]]] Lists all quizzes in a course (Classic, New Quizzes, and surveys)
|
|
17
|
+
\item[[[view]]] Displays full quiz content including questions and answers
|
|
18
|
+
\item[[[analyse]]] Summarizes quiz/survey evaluation data with statistics and AI
|
|
19
|
+
\item[[[create]]] Creates a new quiz from JSON (settings and optionally questions)
|
|
20
|
+
\item[[[edit]]] Modifies quiz settings and instructions
|
|
21
|
+
\item[[[delete]]] Removes a quiz
|
|
22
|
+
\item[[[export]]] Exports a complete quiz to JSON for backup or migration
|
|
23
|
+
\item[[[items]]] Manages quiz questions (list, add, edit, delete, export)
|
|
24
|
+
\item[[[banks]]] Manages quiz item banks
|
|
25
|
+
\end{description}
|
|
19
26
|
|
|
20
|
-
The [[analyse]] subcommand supports two modes of operation:
|
|
21
|
-
\begin{enumerate}
|
|
22
|
-
\item Fetch quiz/survey data directly from Canvas by specifying the quiz.
|
|
23
|
-
This works reliably for Classic Quizzes and New Quizzes (Quizzes.Next).
|
|
24
|
-
The implementation uses the documented New Quiz Reports API.
|
|
25
|
-
\item Read and analyze a CSV file downloaded from Canvas.
|
|
26
|
-
This is the most reliable method for both Classic and New Quizzes.
|
|
27
|
-
\end{enumerate}
|
|
28
27
|
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
28
|
+
\section{Creating and Managing Quizzes}
|
|
29
|
+
|
|
30
|
+
The [[quizzes]] command provides a complete workflow for quiz backup, migration,
|
|
31
|
+
and duplication between courses.
|
|
32
|
+
|
|
33
|
+
\subsection{The export/create workflow}
|
|
34
|
+
|
|
35
|
+
The recommended workflow for duplicating or migrating quizzes is:
|
|
36
|
+
\begin{minted}{bash}
|
|
37
|
+
# 1. Export an existing quiz (with -I for importable format)
|
|
38
|
+
canvaslms quizzes export -c "Source Course" -a "Midterm Exam" -I > midterm.json
|
|
39
|
+
|
|
40
|
+
# 2. Create the quiz in another course
|
|
41
|
+
canvaslms quizzes create -c "Target Course" -f midterm.json
|
|
36
42
|
|
|
43
|
+
# Optionally change the title
|
|
44
|
+
canvaslms quizzes create -c "Target Course" -f midterm.json --title "New Title"
|
|
45
|
+
\end{minted}
|
|
46
|
+
|
|
47
|
+
This workflow exports both quiz settings (title, time limit, instructions, etc.)
|
|
48
|
+
and all questions in a single JSON file.
|
|
49
|
+
|
|
50
|
+
\subsection{Creating quizzes from scratch}
|
|
51
|
+
|
|
52
|
+
To create a new quiz from scratch:
|
|
53
|
+
\begin{minted}{bash}
|
|
54
|
+
# See the full JSON format with examples
|
|
55
|
+
canvaslms quizzes create --example > template.json
|
|
56
|
+
|
|
57
|
+
# Edit the template and create the quiz
|
|
58
|
+
canvaslms quizzes create -c "My Course" -f template.json
|
|
59
|
+
\end{minted}
|
|
37
60
|
|
|
38
|
-
|
|
61
|
+
The [[--example]] flag outputs complete examples for both New Quizzes and
|
|
62
|
+
Classic Quizzes, including all supported settings and question types.
|
|
63
|
+
|
|
64
|
+
\subsection{Advanced New Quiz settings}
|
|
65
|
+
\label{sec:advanced-quiz-settings}
|
|
66
|
+
|
|
67
|
+
New Quizzes support additional settings for controlling multiple attempts and
|
|
68
|
+
result visibility. These are specified in the [[quiz_settings]] object within
|
|
69
|
+
[[settings]]:
|
|
70
|
+
\begin{verbatim}
|
|
71
|
+
{
|
|
72
|
+
"quiz_type": "new",
|
|
73
|
+
"settings": {
|
|
74
|
+
"title": "Practice Quiz",
|
|
75
|
+
"quiz_settings": {
|
|
76
|
+
"multiple_attempts": { ... },
|
|
77
|
+
"result_view_settings": { ... }
|
|
78
|
+
}
|
|
79
|
+
}
|
|
80
|
+
}
|
|
81
|
+
\end{verbatim}
|
|
82
|
+
|
|
83
|
+
\paragraph{Multiple attempts with waiting periods.}
|
|
84
|
+
To allow students multiple attempts with a cooling period between attempts:
|
|
85
|
+
\begin{verbatim}
|
|
86
|
+
"multiple_attempts": {
|
|
87
|
+
"multiple_attempts_enabled": true,
|
|
88
|
+
"attempt_limit": null,
|
|
89
|
+
"score_to_keep": "latest",
|
|
90
|
+
"cooling_period": true,
|
|
91
|
+
"cooling_period_seconds": 3600
|
|
92
|
+
}
|
|
93
|
+
\end{verbatim}
|
|
94
|
+
Here, [[attempt_limit]] of [[null]] means unlimited attempts. The
|
|
95
|
+
[[score_to_keep]] can be [[highest]] (default) or [[latest]]. Setting
|
|
96
|
+
[[cooling_period]] to [[true]] requires [[cooling_period_seconds]] to specify
|
|
97
|
+
the wait time (3600 seconds = 1 hour). Using [[latest]] with a cooling period
|
|
98
|
+
lets students ``build on their last result''---they see which questions they
|
|
99
|
+
got wrong and can retry without losing progress.
|
|
100
|
+
|
|
101
|
+
\paragraph{Controlling what students see after submission.}
|
|
102
|
+
To show students their score but hide the correct answers:
|
|
103
|
+
\begin{verbatim}
|
|
104
|
+
"result_view_settings": {
|
|
105
|
+
"display_items": true,
|
|
106
|
+
"display_item_response": true,
|
|
107
|
+
"display_item_correct_answer": false,
|
|
108
|
+
"display_item_feedback": false,
|
|
109
|
+
"display_points_awarded": true,
|
|
110
|
+
"display_points_possible": true
|
|
111
|
+
}
|
|
112
|
+
\end{verbatim}
|
|
113
|
+
With these settings, students see their responses and points but cannot see
|
|
114
|
+
which answers were correct. This is useful for practice quizzes where you want
|
|
115
|
+
students to keep trying without revealing the answers.
|
|
116
|
+
|
|
117
|
+
You can also schedule when correct answers become visible using
|
|
118
|
+
[[display_correct_answer_at]] and [[hide_correct_answer_at]] with ISO 8601
|
|
119
|
+
timestamps.
|
|
120
|
+
|
|
121
|
+
\subsection{Adding questions separately}
|
|
122
|
+
|
|
123
|
+
You can also create an empty quiz and add questions separately:
|
|
124
|
+
\begin{minted}{bash}
|
|
125
|
+
# Create quiz with settings only
|
|
126
|
+
canvaslms quizzes create -c "My Course" --title "New Quiz" --type new
|
|
127
|
+
|
|
128
|
+
# Add questions from a JSON file
|
|
129
|
+
canvaslms quizzes items add -c "My Course" -a "New Quiz" -f questions.json
|
|
130
|
+
|
|
131
|
+
# See question format examples
|
|
132
|
+
canvaslms quizzes items add --example
|
|
133
|
+
\end{minted}
|
|
134
|
+
|
|
135
|
+
|
|
136
|
+
\section{Analyzing Quiz Results}
|
|
137
|
+
|
|
138
|
+
The [[quizzes analyse]] command provides statistical analysis and AI-generated
|
|
139
|
+
summaries of quiz and survey responses. It supports two modes: fetching data
|
|
140
|
+
directly from Canvas or analyzing a downloaded CSV file.
|
|
39
141
|
|
|
40
142
|
\subsection{Analyzing a CSV file}
|
|
41
143
|
|
|
42
|
-
|
|
43
|
-
|
|
144
|
+
The most reliable method is to download the Student Analysis Report CSV from
|
|
145
|
+
Canvas and analyze it locally:
|
|
44
146
|
\begin{minted}{bash}
|
|
45
147
|
canvaslms quizzes analyse --csv survey_results.csv
|
|
46
148
|
\end{minted}
|
|
@@ -216,6 +318,7 @@ subcommands:
|
|
|
216
318
|
\item[ [[add_create_command()]] ] Registers the [[quizzes create]] subcommand
|
|
217
319
|
\item[ [[add_edit_command()]] ] Registers the [[quizzes edit]] subcommand
|
|
218
320
|
\item[ [[add_delete_command()]] ] Registers the [[quizzes delete]] subcommand
|
|
321
|
+
\item[ [[add_export_command()]] ] Registers the [[quizzes export]] subcommand
|
|
219
322
|
\item[ [[add_items_command()]] ] Registers the [[quizzes items]] subcommand group
|
|
220
323
|
\item[ [[add_banks_command()]] ] Registers the [[quizzes banks]] subcommand group
|
|
221
324
|
\end{description}
|
|
@@ -269,6 +372,10 @@ def add_delete_command(subp):
|
|
|
269
372
|
"""Adds the quizzes delete subcommand to argparse parser subp"""
|
|
270
373
|
<<add quizzes delete command to subp>>
|
|
271
374
|
|
|
375
|
+
def add_export_command(subp):
|
|
376
|
+
"""Adds the quizzes export subcommand to argparse parser subp"""
|
|
377
|
+
<<add quizzes export command to subp>>
|
|
378
|
+
|
|
272
379
|
def add_items_command(subp):
|
|
273
380
|
"""Adds the quizzes items subcommand group to argparse parser subp"""
|
|
274
381
|
<<add quizzes items command to subp>>
|
|
@@ -288,7 +395,8 @@ The subcommands are organized by workflow:
|
|
|
288
395
|
content including questions (Chapter~\ref{quiz-view}).
|
|
289
396
|
\item[Analysis] [[analyse]] provides statistical summaries and AI-generated
|
|
290
397
|
insights for quiz/survey responses.
|
|
291
|
-
\item[Management] [[create]], [[edit]], and [[
|
|
398
|
+
\item[Management] [[create]], [[edit]], [[delete]], and [[export]] handle quiz
|
|
399
|
+
lifecycle, including backup and migration.
|
|
292
400
|
\item[Items] [[items]] and [[banks]] manage individual questions and item banks.
|
|
293
401
|
\end{description}
|
|
294
402
|
|
|
@@ -308,6 +416,7 @@ add_view_command(quizzes_subp)
|
|
|
308
416
|
add_create_command(quizzes_subp)
|
|
309
417
|
add_edit_command(quizzes_subp)
|
|
310
418
|
add_delete_command(quizzes_subp)
|
|
419
|
+
add_export_command(quizzes_subp)
|
|
311
420
|
add_items_command(quizzes_subp)
|
|
312
421
|
add_banks_command(quizzes_subp)
|
|
313
422
|
@
|
|
@@ -2287,55 +2396,108 @@ new quizzes.
|
|
|
2287
2396
|
\label{sec:quizzes-create}
|
|
2288
2397
|
|
|
2289
2398
|
The [[quizzes create]] command creates a new quiz in a course.
|
|
2290
|
-
Users can specify quiz settings via a JSON file
|
|
2399
|
+
Users can specify quiz settings and optionally questions via a JSON file.
|
|
2400
|
+
The JSON format supports the complete export/create workflow, allowing users
|
|
2401
|
+
to export a quiz with [[quizzes export]] and create a copy with this command.
|
|
2291
2402
|
|
|
2292
2403
|
<<add quizzes create command to subp>>=
|
|
2293
2404
|
create_parser = subp.add_parser("create",
|
|
2294
2405
|
help="Create a new quiz",
|
|
2295
|
-
description="""Create a new quiz in a course
|
|
2296
|
-
|
|
2406
|
+
description="""Create a new quiz in a course from a JSON file.
|
|
2407
|
+
|
|
2408
|
+
Use --example to see the full JSON format with all supported attributes.
|
|
2409
|
+
The JSON can include both quiz settings and questions, enabling a complete
|
|
2410
|
+
export/create workflow:
|
|
2411
|
+
|
|
2412
|
+
canvaslms quizzes export -c "Source Course" -a "Quiz" -I > quiz.json
|
|
2413
|
+
canvaslms quizzes create -c "Target Course" -f quiz.json
|
|
2414
|
+
|
|
2415
|
+
JSON STRUCTURE:
|
|
2416
|
+
{
|
|
2417
|
+
"quiz_type": "new" or "classic",
|
|
2418
|
+
"settings": { ... quiz settings ... },
|
|
2419
|
+
"items": [ ... ] (New Quizzes) or "questions": [ ... ] (Classic)
|
|
2420
|
+
}
|
|
2421
|
+
|
|
2422
|
+
SETTINGS FOR NEW QUIZZES (time_limit in seconds):
|
|
2423
|
+
title, instructions, time_limit, allowed_attempts, shuffle_questions,
|
|
2424
|
+
shuffle_answers, points_possible, due_at, unlock_at, lock_at
|
|
2425
|
+
|
|
2426
|
+
ADVANCED SETTINGS FOR NEW QUIZZES (in settings.quiz_settings):
|
|
2427
|
+
multiple_attempts: attempt_limit, score_to_keep, cooling_period_seconds
|
|
2428
|
+
result_view_settings: display_item_correct_answer, display_item_feedback, etc.
|
|
2429
|
+
|
|
2430
|
+
SETTINGS FOR CLASSIC QUIZZES (time_limit in minutes):
|
|
2431
|
+
title, description, quiz_type (assignment/practice_quiz/graded_survey/survey),
|
|
2432
|
+
time_limit, allowed_attempts, shuffle_questions, shuffle_answers,
|
|
2433
|
+
points_possible, published, due_at, unlock_at, lock_at,
|
|
2434
|
+
show_correct_answers, one_question_at_a_time, cant_go_back, access_code
|
|
2435
|
+
|
|
2436
|
+
For question format details, see: canvaslms quizzes items add --example""")
|
|
2297
2437
|
|
|
2298
2438
|
create_parser.set_defaults(func=create_command)
|
|
2299
2439
|
|
|
2300
2440
|
try:
|
|
2301
|
-
courses.add_course_option(create_parser, required=
|
|
2441
|
+
courses.add_course_option(create_parser, required=False)
|
|
2302
2442
|
except argparse.ArgumentError:
|
|
2303
2443
|
pass
|
|
2304
2444
|
|
|
2305
2445
|
create_parser.add_argument("-f", "--file",
|
|
2306
|
-
help="JSON file containing quiz settings",
|
|
2446
|
+
help="JSON file containing quiz settings and optionally questions",
|
|
2307
2447
|
type=str)
|
|
2308
2448
|
|
|
2309
2449
|
create_parser.add_argument("--type",
|
|
2310
2450
|
choices=["new", "classic"],
|
|
2311
|
-
default=
|
|
2451
|
+
default=None,
|
|
2312
2452
|
help="Quiz type: 'new' (New Quizzes) or 'classic' (Classic Quizzes). "
|
|
2313
|
-
"Default: new")
|
|
2453
|
+
"Auto-detected from JSON if not specified. Default: new")
|
|
2314
2454
|
|
|
2315
2455
|
create_parser.add_argument("--title", "-t",
|
|
2316
|
-
help="Quiz title (
|
|
2456
|
+
help="Quiz title (overrides title in JSON file)")
|
|
2457
|
+
|
|
2458
|
+
create_parser.add_argument("--example", "-E",
|
|
2459
|
+
action="store_true",
|
|
2460
|
+
help="Print example JSON for creating quizzes and exit")
|
|
2317
2461
|
@
|
|
2318
2462
|
|
|
2319
2463
|
|
|
2320
2464
|
\subsection{JSON format for quiz creation}
|
|
2321
2465
|
|
|
2322
|
-
The JSON file format
|
|
2323
|
-
|
|
2466
|
+
The JSON file format supports two structures: a simple settings-only format
|
|
2467
|
+
and a full format that includes questions.
|
|
2468
|
+
|
|
2469
|
+
\paragraph{Simple settings format.}
|
|
2470
|
+
For creating a quiz without questions (add questions later with
|
|
2471
|
+
[[quizzes items add]]):
|
|
2324
2472
|
\begin{verbatim}
|
|
2325
2473
|
{
|
|
2326
2474
|
"title": "Midterm Exam",
|
|
2327
2475
|
"instructions": "<p>Answer all questions.</p>",
|
|
2328
2476
|
"time_limit": 3600,
|
|
2329
|
-
"allowed_attempts": 2
|
|
2330
|
-
"shuffle_questions": true,
|
|
2331
|
-
"shuffle_answers": true,
|
|
2332
|
-
"points_possible": 100,
|
|
2333
|
-
"due_at": "2025-03-15T23:59:00Z"
|
|
2477
|
+
"allowed_attempts": 2
|
|
2334
2478
|
}
|
|
2335
2479
|
\end{verbatim}
|
|
2336
2480
|
|
|
2337
|
-
|
|
2338
|
-
(
|
|
2481
|
+
\paragraph{Full format with questions.}
|
|
2482
|
+
For creating a complete quiz including questions (the format produced by
|
|
2483
|
+
[[quizzes export]]):
|
|
2484
|
+
\begin{verbatim}
|
|
2485
|
+
{
|
|
2486
|
+
"quiz_type": "new",
|
|
2487
|
+
"settings": {
|
|
2488
|
+
"title": "Midterm Exam",
|
|
2489
|
+
"instructions": "<p>Answer all questions.</p>",
|
|
2490
|
+
"time_limit": 3600
|
|
2491
|
+
},
|
|
2492
|
+
"items": [ ... question items ... ]
|
|
2493
|
+
}
|
|
2494
|
+
\end{verbatim}
|
|
2495
|
+
|
|
2496
|
+
The command auto-detects the format: if [[settings]] key exists, it uses the
|
|
2497
|
+
full format; otherwise it treats the entire JSON as settings.
|
|
2498
|
+
|
|
2499
|
+
For Classic Quizzes, the format uses [[description]] instead of [[instructions]],
|
|
2500
|
+
[[time_limit]] in minutes (not seconds), and [[questions]] instead of [[items]].
|
|
2339
2501
|
|
|
2340
2502
|
|
|
2341
2503
|
\subsection{Processing the create command}
|
|
@@ -2344,26 +2506,67 @@ The [[create_command]] function processes the create request, reading settings
|
|
|
2344
2506
|
from a JSON file if provided, then calling the appropriate API based on the
|
|
2345
2507
|
selected quiz type.
|
|
2346
2508
|
|
|
2509
|
+
When [[--example]] is provided, we print example JSON for both quiz types and
|
|
2510
|
+
exit immediately without requiring course or file arguments:
|
|
2511
|
+
\begin{minted}{bash}
|
|
2512
|
+
canvaslms quizzes create --example > quiz.json
|
|
2513
|
+
# Edit quiz.json to customize
|
|
2514
|
+
canvaslms quizzes create -c "My Course" -f quiz.json
|
|
2515
|
+
\end{minted}
|
|
2516
|
+
|
|
2517
|
+
The command supports two JSON formats:
|
|
2518
|
+
\begin{enumerate}
|
|
2519
|
+
\item \textbf{Full format} with [[settings]] key: used by [[quizzes export]]
|
|
2520
|
+
\item \textbf{Simple format} without [[settings]]: treats entire JSON as settings
|
|
2521
|
+
\end{enumerate}
|
|
2522
|
+
|
|
2523
|
+
If the JSON contains [[items]] (New Quizzes) or [[questions]] (Classic Quizzes),
|
|
2524
|
+
those are added after the quiz is created.
|
|
2525
|
+
|
|
2347
2526
|
<<functions>>=
|
|
2348
2527
|
def create_command(config, canvas, args):
|
|
2349
2528
|
"""Creates a new quiz in a course"""
|
|
2529
|
+
# Handle --example flag first (doesn't require course/file)
|
|
2530
|
+
if getattr(args, 'example', False):
|
|
2531
|
+
print_full_quiz_example_json()
|
|
2532
|
+
return
|
|
2533
|
+
|
|
2534
|
+
# Validate required arguments when not using --example
|
|
2535
|
+
if not getattr(args, 'course', None):
|
|
2536
|
+
canvaslms.cli.err(1, "Please specify -c/--course or use --example")
|
|
2537
|
+
if not getattr(args, 'file', None) and not getattr(args, 'title', None):
|
|
2538
|
+
canvaslms.cli.err(1, "Please specify -f/--file or --title or use --example")
|
|
2539
|
+
|
|
2350
2540
|
# Get the course
|
|
2351
2541
|
course_list = courses.process_course_option(canvas, args)
|
|
2352
2542
|
if len(course_list) != 1:
|
|
2353
2543
|
canvaslms.cli.err(1, "Please specify exactly one course for quiz creation")
|
|
2354
2544
|
course = course_list[0]
|
|
2355
2545
|
|
|
2356
|
-
# Read quiz
|
|
2357
|
-
|
|
2546
|
+
# Read quiz data from file or use defaults
|
|
2547
|
+
quiz_data = {}
|
|
2358
2548
|
if args.file:
|
|
2359
2549
|
try:
|
|
2360
2550
|
with open(args.file, 'r', encoding='utf-8') as f:
|
|
2361
|
-
|
|
2551
|
+
quiz_data = json.load(f)
|
|
2362
2552
|
except FileNotFoundError:
|
|
2363
2553
|
canvaslms.cli.err(1, f"File not found: {args.file}")
|
|
2364
2554
|
except json.JSONDecodeError as e:
|
|
2365
2555
|
canvaslms.cli.err(1, f"Invalid JSON in {args.file}: {e}")
|
|
2366
2556
|
|
|
2557
|
+
# Determine quiz type from args or JSON
|
|
2558
|
+
quiz_type = args.type
|
|
2559
|
+
if quiz_type is None:
|
|
2560
|
+
quiz_type = quiz_data.get('quiz_type', 'new')
|
|
2561
|
+
|
|
2562
|
+
# Extract settings: support both full format (with 'settings' key) and simple format
|
|
2563
|
+
if 'settings' in quiz_data:
|
|
2564
|
+
quiz_params = quiz_data['settings'].copy()
|
|
2565
|
+
else:
|
|
2566
|
+
# Simple format: entire JSON is settings (excluding items/questions)
|
|
2567
|
+
quiz_params = {k: v for k, v in quiz_data.items()
|
|
2568
|
+
if k not in ('quiz_type', 'items', 'questions')}
|
|
2569
|
+
|
|
2367
2570
|
# Command-line title overrides file
|
|
2368
2571
|
if args.title:
|
|
2369
2572
|
quiz_params['title'] = args.title
|
|
@@ -2372,15 +2575,35 @@ def create_command(config, canvas, args):
|
|
|
2372
2575
|
canvaslms.cli.err(1, "Quiz title is required (use --title or include in JSON)")
|
|
2373
2576
|
|
|
2374
2577
|
# Create the quiz
|
|
2375
|
-
|
|
2376
|
-
|
|
2578
|
+
requester = canvas._Canvas__requester
|
|
2579
|
+
if quiz_type == "new":
|
|
2580
|
+
quiz = create_new_quiz(course, requester, quiz_params)
|
|
2377
2581
|
else:
|
|
2378
2582
|
quiz = create_classic_quiz(course, quiz_params)
|
|
2379
2583
|
|
|
2380
|
-
if quiz:
|
|
2381
|
-
print(f"Created quiz: {quiz_params.get('title')} (ID: {quiz.get('id', 'unknown')})")
|
|
2382
|
-
else:
|
|
2584
|
+
if not quiz:
|
|
2383
2585
|
canvaslms.cli.err(1, "Failed to create quiz")
|
|
2586
|
+
|
|
2587
|
+
quiz_id = quiz.get('id', 'unknown')
|
|
2588
|
+
print(f"Created quiz: {quiz_params.get('title')} (ID: {quiz_id})")
|
|
2589
|
+
|
|
2590
|
+
# Add questions if present in JSON
|
|
2591
|
+
items = quiz_data.get('items', [])
|
|
2592
|
+
questions = quiz_data.get('questions', [])
|
|
2593
|
+
|
|
2594
|
+
if quiz_type == "new" and items:
|
|
2595
|
+
print(f"Adding {len(items)} question(s)...")
|
|
2596
|
+
success, failed = add_new_quiz_items(course, quiz_id, requester, items)
|
|
2597
|
+
print(f"Added {success} question(s), {failed} failed")
|
|
2598
|
+
elif quiz_type == "classic" and questions:
|
|
2599
|
+
# For classic quizzes, we need to get the quiz object to add questions
|
|
2600
|
+
try:
|
|
2601
|
+
quiz_obj = course.get_quiz(quiz_id)
|
|
2602
|
+
print(f"Adding {len(questions)} question(s)...")
|
|
2603
|
+
success, failed = add_classic_questions(quiz_obj, questions)
|
|
2604
|
+
print(f"Added {success} question(s), {failed} failed")
|
|
2605
|
+
except Exception as e:
|
|
2606
|
+
canvaslms.cli.warn(f"Failed to add questions: {e}")
|
|
2384
2607
|
@
|
|
2385
2608
|
|
|
2386
2609
|
|
|
@@ -2389,6 +2612,13 @@ def create_command(config, canvas, args):
|
|
|
2389
2612
|
The New Quizzes API uses a different endpoint than Classic Quizzes.
|
|
2390
2613
|
We make a direct POST request to [[/api/quiz/v1/courses/:id/quizzes]].
|
|
2391
2614
|
|
|
2615
|
+
The API expects nested parameters for [[quiz_settings]], which contains
|
|
2616
|
+
[[multiple_attempts]] and [[result_view_settings]] sub-structures. We need
|
|
2617
|
+
to flatten these into the format the API expects:
|
|
2618
|
+
\begin{verbatim}
|
|
2619
|
+
quiz[quiz_settings][multiple_attempts][cooling_period_seconds]=3600
|
|
2620
|
+
\end{verbatim}
|
|
2621
|
+
|
|
2392
2622
|
<<functions>>=
|
|
2393
2623
|
def create_new_quiz(course, requester, quiz_params):
|
|
2394
2624
|
"""Creates a New Quiz via the New Quizzes API
|
|
@@ -2396,17 +2626,15 @@ def create_new_quiz(course, requester, quiz_params):
|
|
|
2396
2626
|
Args:
|
|
2397
2627
|
course: Course object
|
|
2398
2628
|
requester: Canvas API requester for direct HTTP calls
|
|
2399
|
-
quiz_params: Dictionary of quiz parameters
|
|
2629
|
+
quiz_params: Dictionary of quiz parameters, may include nested quiz_settings
|
|
2400
2630
|
|
|
2401
2631
|
Returns:
|
|
2402
2632
|
Dictionary with created quiz data, or None on failure
|
|
2403
2633
|
"""
|
|
2404
2634
|
endpoint = f"courses/{course.id}/quizzes"
|
|
2405
2635
|
|
|
2406
|
-
# Build the request parameters
|
|
2407
|
-
params =
|
|
2408
|
-
for key, value in quiz_params.items():
|
|
2409
|
-
params[f'quiz[{key}]'] = value
|
|
2636
|
+
# Build the request parameters, handling nested quiz_settings
|
|
2637
|
+
params = build_new_quiz_api_params(quiz_params)
|
|
2410
2638
|
|
|
2411
2639
|
try:
|
|
2412
2640
|
response = requester.request(
|
|
@@ -2421,6 +2649,53 @@ def create_new_quiz(course, requester, quiz_params):
|
|
|
2421
2649
|
return None
|
|
2422
2650
|
@
|
|
2423
2651
|
|
|
2652
|
+
The [[build_new_quiz_api_params]] function handles the conversion of our
|
|
2653
|
+
nested dictionary structure into the flat parameter format required by the
|
|
2654
|
+
Canvas API. It recursively processes [[quiz_settings]] and its sub-structures.
|
|
2655
|
+
|
|
2656
|
+
<<functions>>=
|
|
2657
|
+
def build_new_quiz_api_params(quiz_params):
|
|
2658
|
+
"""Converts quiz parameters to Canvas API format
|
|
2659
|
+
|
|
2660
|
+
Handles nested structures like quiz_settings.multiple_attempts by
|
|
2661
|
+
flattening them into the format:
|
|
2662
|
+
quiz[quiz_settings][multiple_attempts][key]=value
|
|
2663
|
+
|
|
2664
|
+
Args:
|
|
2665
|
+
quiz_params: Dictionary with quiz parameters, may include nested dicts
|
|
2666
|
+
|
|
2667
|
+
Returns:
|
|
2668
|
+
Dictionary suitable for passing to requester.request()
|
|
2669
|
+
"""
|
|
2670
|
+
params = {}
|
|
2671
|
+
|
|
2672
|
+
for key, value in quiz_params.items():
|
|
2673
|
+
if value is None:
|
|
2674
|
+
continue
|
|
2675
|
+
|
|
2676
|
+
if key == 'quiz_settings' and isinstance(value, dict):
|
|
2677
|
+
# Handle nested quiz_settings structure
|
|
2678
|
+
for settings_key, settings_value in value.items():
|
|
2679
|
+
if settings_value is None:
|
|
2680
|
+
continue
|
|
2681
|
+
|
|
2682
|
+
if isinstance(settings_value, dict):
|
|
2683
|
+
# Handle doubly-nested structures like multiple_attempts, result_view_settings
|
|
2684
|
+
for nested_key, nested_value in settings_value.items():
|
|
2685
|
+
if nested_value is not None:
|
|
2686
|
+
param_key = f'quiz[quiz_settings][{settings_key}][{nested_key}]'
|
|
2687
|
+
params[param_key] = nested_value
|
|
2688
|
+
else:
|
|
2689
|
+
# Direct quiz_settings value (e.g., shuffle_answers)
|
|
2690
|
+
param_key = f'quiz[quiz_settings][{settings_key}]'
|
|
2691
|
+
params[param_key] = settings_value
|
|
2692
|
+
else:
|
|
2693
|
+
# Top-level quiz parameter
|
|
2694
|
+
params[f'quiz[{key}]'] = value
|
|
2695
|
+
|
|
2696
|
+
return params
|
|
2697
|
+
@
|
|
2698
|
+
|
|
2424
2699
|
|
|
2425
2700
|
\subsection{Creating a Classic Quiz}
|
|
2426
2701
|
|
|
@@ -2605,6 +2880,103 @@ QUIZ_SCHEMA = {
|
|
|
2605
2880
|
@
|
|
2606
2881
|
|
|
2607
2882
|
|
|
2883
|
+
\subsection{New Quiz settings schema}
|
|
2884
|
+
\label{sec:new-quiz-settings-schema}
|
|
2885
|
+
|
|
2886
|
+
New Quizzes use a more sophisticated settings structure than Classic Quizzes.
|
|
2887
|
+
The [[quiz_settings]] object contains nested structures for multiple attempts
|
|
2888
|
+
and result visibility. These settings are particularly important for formative
|
|
2889
|
+
assessments where students should be able to:
|
|
2890
|
+
\begin{itemize}
|
|
2891
|
+
\item Retry the quiz multiple times (with optional waiting periods)
|
|
2892
|
+
\item See their score but not the correct answers
|
|
2893
|
+
\item Build on their previous attempt rather than starting fresh
|
|
2894
|
+
\end{itemize}
|
|
2895
|
+
|
|
2896
|
+
\paragraph{Multiple attempts settings.}
|
|
2897
|
+
The [[multiple_attempts]] structure controls how many times students can take
|
|
2898
|
+
the quiz and what happens between attempts:
|
|
2899
|
+
|
|
2900
|
+
<<constants>>=
|
|
2901
|
+
NEW_QUIZ_MULTIPLE_ATTEMPTS_SCHEMA = {
|
|
2902
|
+
'multiple_attempts_enabled': {
|
|
2903
|
+
'default': False,
|
|
2904
|
+
'description': 'Whether multiple attempts are allowed'
|
|
2905
|
+
},
|
|
2906
|
+
'attempt_limit': {
|
|
2907
|
+
'default': True,
|
|
2908
|
+
'description': 'Whether there is a maximum number of attempts (False = unlimited)'
|
|
2909
|
+
},
|
|
2910
|
+
'max_attempts': {
|
|
2911
|
+
'default': 1,
|
|
2912
|
+
'description': 'Maximum number of attempts (only used if attempt_limit is True)'
|
|
2913
|
+
},
|
|
2914
|
+
'score_to_keep': {
|
|
2915
|
+
'default': 'highest',
|
|
2916
|
+
'description': 'Which score to keep: average, first, highest, or latest'
|
|
2917
|
+
},
|
|
2918
|
+
'cooling_period': {
|
|
2919
|
+
'default': False,
|
|
2920
|
+
'description': 'Whether to require a waiting period between attempts'
|
|
2921
|
+
},
|
|
2922
|
+
'cooling_period_seconds': {
|
|
2923
|
+
'default': None,
|
|
2924
|
+
'description': 'Required waiting time between attempts in seconds (e.g., 3600 = 1 hour)'
|
|
2925
|
+
},
|
|
2926
|
+
}
|
|
2927
|
+
@
|
|
2928
|
+
|
|
2929
|
+
\paragraph{Result view settings.}
|
|
2930
|
+
The [[result_view_settings]] structure controls what students see after
|
|
2931
|
+
submitting the quiz. This is crucial for formative assessments where you want
|
|
2932
|
+
students to know their score but not memorize correct answers:
|
|
2933
|
+
|
|
2934
|
+
<<constants>>=
|
|
2935
|
+
NEW_QUIZ_RESULT_VIEW_SCHEMA = {
|
|
2936
|
+
'result_view_restricted': {
|
|
2937
|
+
'default': False,
|
|
2938
|
+
'description': 'Whether to restrict what students see in results'
|
|
2939
|
+
},
|
|
2940
|
+
'display_points_awarded': {
|
|
2941
|
+
'default': True,
|
|
2942
|
+
'description': 'Show points earned (requires result_view_restricted=True)'
|
|
2943
|
+
},
|
|
2944
|
+
'display_points_possible': {
|
|
2945
|
+
'default': True,
|
|
2946
|
+
'description': 'Show total points possible (requires result_view_restricted=True)'
|
|
2947
|
+
},
|
|
2948
|
+
'display_items': {
|
|
2949
|
+
'default': True,
|
|
2950
|
+
'description': 'Show questions in results (requires result_view_restricted=True)'
|
|
2951
|
+
},
|
|
2952
|
+
'display_item_response': {
|
|
2953
|
+
'default': True,
|
|
2954
|
+
'description': 'Show student responses (requires display_items=True)'
|
|
2955
|
+
},
|
|
2956
|
+
'display_item_response_qualifier': {
|
|
2957
|
+
'default': 'always',
|
|
2958
|
+
'description': 'When to show responses: always, once_per_attempt, after_last_attempt, once_after_last_attempt'
|
|
2959
|
+
},
|
|
2960
|
+
'display_item_response_correctness': {
|
|
2961
|
+
'default': True,
|
|
2962
|
+
'description': 'Show whether answers are correct/incorrect (requires display_item_response=True)'
|
|
2963
|
+
},
|
|
2964
|
+
'display_item_response_correctness_qualifier': {
|
|
2965
|
+
'default': 'always',
|
|
2966
|
+
'description': 'When to show correctness: always, after_last_attempt'
|
|
2967
|
+
},
|
|
2968
|
+
'display_item_correct_answer': {
|
|
2969
|
+
'default': True,
|
|
2970
|
+
'description': 'Show the correct answer (requires display_item_response_correctness=True)'
|
|
2971
|
+
},
|
|
2972
|
+
'display_item_feedback': {
|
|
2973
|
+
'default': True,
|
|
2974
|
+
'description': 'Show item feedback (requires display_items=True)'
|
|
2975
|
+
},
|
|
2976
|
+
}
|
|
2977
|
+
@
|
|
2978
|
+
|
|
2979
|
+
|
|
2608
2980
|
\subsection{Command-line interface}
|
|
2609
2981
|
|
|
2610
2982
|
The edit command takes course and quiz selection options. The [[-f]] option
|
|
@@ -2743,7 +3115,7 @@ def edit_quiz_interactive(quiz, requester, html_mode=False):
|
|
|
2743
3115
|
'updated', 'skipped', or 'error'
|
|
2744
3116
|
"""
|
|
2745
3117
|
# Extract current quiz attributes including instructions
|
|
2746
|
-
current_attrs = extract_quiz_attributes(quiz)
|
|
3118
|
+
current_attrs = extract_quiz_attributes(quiz, requester)
|
|
2747
3119
|
|
|
2748
3120
|
# Get content from editor - instructions becomes the body
|
|
2749
3121
|
result = content.get_content_from_editor(
|
|
@@ -2789,15 +3161,20 @@ the quiz object. We use [[QUIZ_SCHEMA]] to determine which attributes to
|
|
|
2789
3161
|
extract, and also include [[instructions]] separately (since it becomes the
|
|
2790
3162
|
body content, not a YAML attribute).
|
|
2791
3163
|
|
|
3164
|
+
For New Quizzes, we also extract [[quiz_settings]] which contains the
|
|
3165
|
+
important [[multiple_attempts]] and [[result_view_settings]] structures.
|
|
3166
|
+
|
|
2792
3167
|
<<functions>>=
|
|
2793
|
-
def extract_quiz_attributes(quiz):
|
|
3168
|
+
def extract_quiz_attributes(quiz, requester=None):
|
|
2794
3169
|
"""Extract editable attributes from a quiz object
|
|
2795
3170
|
|
|
2796
3171
|
Args:
|
|
2797
3172
|
quiz: Quiz object (New Quiz or Classic Quiz)
|
|
3173
|
+
requester: Canvas API requester (needed for New Quiz settings)
|
|
2798
3174
|
|
|
2799
3175
|
Returns:
|
|
2800
3176
|
Dictionary of attributes matching QUIZ_SCHEMA, plus 'instructions'
|
|
3177
|
+
and 'quiz_settings' (for New Quizzes)
|
|
2801
3178
|
"""
|
|
2802
3179
|
attrs = {}
|
|
2803
3180
|
|
|
@@ -2818,12 +3195,47 @@ def extract_quiz_attributes(quiz):
|
|
|
2818
3195
|
# Add instructions (not in schema, but needed for content_attr)
|
|
2819
3196
|
if is_new_quiz(quiz):
|
|
2820
3197
|
attrs['instructions'] = getattr(quiz, 'instructions', '') or ''
|
|
3198
|
+
# Fetch quiz_settings for New Quizzes
|
|
3199
|
+
if requester:
|
|
3200
|
+
quiz_settings = fetch_new_quiz_settings(quiz, requester)
|
|
3201
|
+
if quiz_settings:
|
|
3202
|
+
attrs['quiz_settings'] = quiz_settings
|
|
2821
3203
|
else:
|
|
2822
3204
|
attrs['instructions'] = getattr(quiz, 'description', '') or ''
|
|
2823
3205
|
|
|
2824
3206
|
return attrs
|
|
2825
3207
|
@
|
|
2826
3208
|
|
|
3209
|
+
\paragraph{Fetching New Quiz settings.}
|
|
3210
|
+
The New Quizzes API returns [[quiz_settings]] as part of the quiz object,
|
|
3211
|
+
but the [[canvasapi]] library may not expose all fields. We make a direct
|
|
3212
|
+
API call to get the complete settings.
|
|
3213
|
+
|
|
3214
|
+
<<functions>>=
|
|
3215
|
+
def fetch_new_quiz_settings(quiz, requester):
|
|
3216
|
+
"""Fetch quiz_settings from the New Quizzes API
|
|
3217
|
+
|
|
3218
|
+
Args:
|
|
3219
|
+
quiz: Quiz object (must have .id and .course attributes)
|
|
3220
|
+
requester: Canvas API requester
|
|
3221
|
+
|
|
3222
|
+
Returns:
|
|
3223
|
+
Dictionary with quiz_settings, or None if unavailable
|
|
3224
|
+
"""
|
|
3225
|
+
try:
|
|
3226
|
+
endpoint = f"courses/{quiz.course.id}/quizzes/{quiz.id}"
|
|
3227
|
+
response = requester.request(
|
|
3228
|
+
method='GET',
|
|
3229
|
+
endpoint=endpoint,
|
|
3230
|
+
_url="new_quizzes"
|
|
3231
|
+
)
|
|
3232
|
+
data = response.json()
|
|
3233
|
+
return data.get('quiz_settings', None)
|
|
3234
|
+
except Exception as e:
|
|
3235
|
+
canvaslms.cli.warn(f"Failed to fetch New Quiz settings: {e}")
|
|
3236
|
+
return None
|
|
3237
|
+
@
|
|
3238
|
+
|
|
2827
3239
|
|
|
2828
3240
|
\subsection{Applying quiz edits}
|
|
2829
3241
|
|
|
@@ -2880,7 +3292,7 @@ def quiz_attributes_to_api_params(attributes, is_new, html_body):
|
|
|
2880
3292
|
html_body: HTML content for instructions/description
|
|
2881
3293
|
|
|
2882
3294
|
Returns:
|
|
2883
|
-
Dictionary suitable for Canvas API
|
|
3295
|
+
Dictionary suitable for Canvas API (nested for New Quizzes)
|
|
2884
3296
|
"""
|
|
2885
3297
|
params = {}
|
|
2886
3298
|
|
|
@@ -2909,11 +3321,19 @@ def quiz_attributes_to_api_params(attributes, is_new, html_body):
|
|
|
2909
3321
|
continue
|
|
2910
3322
|
|
|
2911
3323
|
# Skip hide_results for New Quizzes: result visibility is controlled
|
|
2912
|
-
#
|
|
2913
|
-
# result view" setting), not via this API parameter.
|
|
3324
|
+
# through quiz_settings.result_view_settings, not this parameter.
|
|
2914
3325
|
if key == 'hide_results' and is_new:
|
|
2915
3326
|
continue
|
|
2916
3327
|
|
|
3328
|
+
# Pass through quiz_settings as-is for New Quizzes
|
|
3329
|
+
if key == 'quiz_settings' and is_new:
|
|
3330
|
+
params['quiz_settings'] = value
|
|
3331
|
+
continue
|
|
3332
|
+
|
|
3333
|
+
# Skip instructions - handled separately as body
|
|
3334
|
+
if key == 'instructions':
|
|
3335
|
+
continue
|
|
3336
|
+
|
|
2917
3337
|
params[key] = value
|
|
2918
3338
|
|
|
2919
3339
|
# Add body with appropriate field name (include even if empty to allow clearing)
|
|
@@ -2928,6 +3348,9 @@ def quiz_attributes_to_api_params(attributes, is_new, html_body):
|
|
|
2928
3348
|
|
|
2929
3349
|
\subsection{Updating a New Quiz}
|
|
2930
3350
|
|
|
3351
|
+
Updating a New Quiz uses the same nested parameter structure as creation.
|
|
3352
|
+
We reuse the [[build_new_quiz_api_params]] function to handle the conversion.
|
|
3353
|
+
|
|
2931
3354
|
<<functions>>=
|
|
2932
3355
|
def update_new_quiz(course, assignment_id, requester, quiz_params):
|
|
2933
3356
|
"""Updates a New Quiz via the New Quizzes API
|
|
@@ -2936,16 +3359,15 @@ def update_new_quiz(course, assignment_id, requester, quiz_params):
|
|
|
2936
3359
|
course: Course object
|
|
2937
3360
|
assignment_id: The quiz/assignment ID
|
|
2938
3361
|
requester: Canvas API requester
|
|
2939
|
-
quiz_params: Dictionary of parameters to update
|
|
3362
|
+
quiz_params: Dictionary of parameters to update, may include nested quiz_settings
|
|
2940
3363
|
|
|
2941
3364
|
Returns:
|
|
2942
3365
|
True on success, False on failure
|
|
2943
3366
|
"""
|
|
2944
3367
|
endpoint = f"courses/{course.id}/quizzes/{assignment_id}"
|
|
2945
3368
|
|
|
2946
|
-
|
|
2947
|
-
|
|
2948
|
-
params[f'quiz[{key}]'] = value
|
|
3369
|
+
# Build the request parameters, handling nested quiz_settings
|
|
3370
|
+
params = build_new_quiz_api_params(quiz_params)
|
|
2949
3371
|
|
|
2950
3372
|
try:
|
|
2951
3373
|
requester.request(
|
|
@@ -3104,6 +3526,213 @@ def delete_classic_quiz(quiz):
|
|
|
3104
3526
|
@
|
|
3105
3527
|
|
|
3106
3528
|
|
|
3529
|
+
\section{The [[quizzes export]] subcommand}
|
|
3530
|
+
\label{sec:quizzes-export}
|
|
3531
|
+
|
|
3532
|
+
The [[quizzes export]] command exports a complete quiz (settings and questions)
|
|
3533
|
+
to JSON format. The output is designed to be directly usable with
|
|
3534
|
+
[[quizzes create]], enabling a complete backup and migration workflow:
|
|
3535
|
+
\begin{minted}{bash}
|
|
3536
|
+
# Export a quiz from one course
|
|
3537
|
+
canvaslms quizzes export -c "Source Course" -a "Midterm" -I > midterm.json
|
|
3538
|
+
|
|
3539
|
+
# Create the same quiz in another course
|
|
3540
|
+
canvaslms quizzes create -c "Target Course" -f midterm.json
|
|
3541
|
+
\end{minted}
|
|
3542
|
+
|
|
3543
|
+
The [[--importable]] flag produces clean JSON suitable for import, stripping
|
|
3544
|
+
Canvas-specific IDs and metadata that would conflict when creating a new quiz.
|
|
3545
|
+
|
|
3546
|
+
<<add quizzes export command to subp>>=
|
|
3547
|
+
export_parser = subp.add_parser("export",
|
|
3548
|
+
help="Export a complete quiz to JSON",
|
|
3549
|
+
description="""Export a quiz (settings and questions) to JSON format.
|
|
3550
|
+
|
|
3551
|
+
The output can be directly used with 'quizzes create' to duplicate a quiz
|
|
3552
|
+
in another course or create a backup.
|
|
3553
|
+
|
|
3554
|
+
WORKFLOW EXAMPLE:
|
|
3555
|
+
# Export quiz from source course
|
|
3556
|
+
canvaslms quizzes export -c "Course A" -a "Quiz Name" -I > quiz.json
|
|
3557
|
+
|
|
3558
|
+
# Create identical quiz in target course
|
|
3559
|
+
canvaslms quizzes create -c "Course B" -f quiz.json
|
|
3560
|
+
|
|
3561
|
+
OUTPUT FORMAT:
|
|
3562
|
+
{
|
|
3563
|
+
"quiz_type": "new" or "classic",
|
|
3564
|
+
"settings": { ... quiz settings ... },
|
|
3565
|
+
"items": [ ... ] (New Quizzes) or "questions": [ ... ] (Classic)
|
|
3566
|
+
}
|
|
3567
|
+
|
|
3568
|
+
Use --importable/-I for clean JSON ready for 'quizzes create'.
|
|
3569
|
+
Without -I, the output includes Canvas IDs and metadata for reference.""")
|
|
3570
|
+
|
|
3571
|
+
export_parser.set_defaults(func=export_command)
|
|
3572
|
+
|
|
3573
|
+
try:
|
|
3574
|
+
courses.add_course_option(export_parser, required=True)
|
|
3575
|
+
except argparse.ArgumentError:
|
|
3576
|
+
pass
|
|
3577
|
+
|
|
3578
|
+
export_parser.add_argument("-a", "--assignment",
|
|
3579
|
+
required=True,
|
|
3580
|
+
help="Regex matching quiz title or Canvas ID")
|
|
3581
|
+
|
|
3582
|
+
export_parser.add_argument("--importable", "-I",
|
|
3583
|
+
action="store_true",
|
|
3584
|
+
help="Output clean JSON directly usable with 'quizzes create' command")
|
|
3585
|
+
|
|
3586
|
+
export_parser.add_argument("--include-banks", "-B",
|
|
3587
|
+
action="store_true",
|
|
3588
|
+
default=True,
|
|
3589
|
+
help="Include questions from referenced item banks (default: true)")
|
|
3590
|
+
|
|
3591
|
+
export_parser.add_argument("--no-banks",
|
|
3592
|
+
action="store_true",
|
|
3593
|
+
help="Don't expand item bank references")
|
|
3594
|
+
@
|
|
3595
|
+
|
|
3596
|
+
|
|
3597
|
+
\subsection{Processing the export command}
|
|
3598
|
+
|
|
3599
|
+
The export command finds the quiz, extracts its settings, and exports all
|
|
3600
|
+
questions. We reuse the existing [[export_new_quiz_items]] and
|
|
3601
|
+
[[export_classic_questions]] functions for question export.
|
|
3602
|
+
|
|
3603
|
+
<<functions>>=
|
|
3604
|
+
def export_command(config, canvas, args):
|
|
3605
|
+
"""Exports a complete quiz (settings + questions) to JSON"""
|
|
3606
|
+
# Find the quiz
|
|
3607
|
+
course_list = courses.process_course_option(canvas, args)
|
|
3608
|
+
quiz_list = list(filter_quizzes(course_list, args.assignment))
|
|
3609
|
+
|
|
3610
|
+
if not quiz_list:
|
|
3611
|
+
canvaslms.cli.err(1, f"No quiz found matching: {args.assignment}")
|
|
3612
|
+
|
|
3613
|
+
quiz = quiz_list[0]
|
|
3614
|
+
requester = canvas._Canvas__requester
|
|
3615
|
+
include_banks = args.include_banks and not args.no_banks
|
|
3616
|
+
importable = getattr(args, 'importable', False)
|
|
3617
|
+
|
|
3618
|
+
# Build the export structure
|
|
3619
|
+
if is_new_quiz(quiz):
|
|
3620
|
+
export = export_full_new_quiz(quiz, requester, include_banks, importable)
|
|
3621
|
+
else:
|
|
3622
|
+
export = export_full_classic_quiz(quiz, importable)
|
|
3623
|
+
|
|
3624
|
+
# Output as JSON
|
|
3625
|
+
print(json.dumps(export, indent=2, ensure_ascii=False))
|
|
3626
|
+
@
|
|
3627
|
+
|
|
3628
|
+
|
|
3629
|
+
\subsection{Exporting a complete New Quiz}
|
|
3630
|
+
|
|
3631
|
+
For New Quizzes, we extract settings from the quiz object and combine them
|
|
3632
|
+
with the items export. We also fetch and include [[quiz_settings]] which
|
|
3633
|
+
contains the important [[multiple_attempts]] and [[result_view_settings]]
|
|
3634
|
+
structures. The [[--importable]] flag triggers cleaning of Canvas-specific
|
|
3635
|
+
metadata.
|
|
3636
|
+
|
|
3637
|
+
<<functions>>=
|
|
3638
|
+
def export_full_new_quiz(quiz, requester, include_banks=True, importable=False):
|
|
3639
|
+
"""Exports a complete New Quiz with settings and items
|
|
3640
|
+
|
|
3641
|
+
Args:
|
|
3642
|
+
quiz: Quiz object (must have .id and .course attributes)
|
|
3643
|
+
requester: Canvas API requester
|
|
3644
|
+
include_banks: If True, expand Bank/BankEntry items to include bank questions
|
|
3645
|
+
importable: If True, clean output for direct import
|
|
3646
|
+
|
|
3647
|
+
Returns:
|
|
3648
|
+
Dictionary with quiz_type, settings (including quiz_settings), and items
|
|
3649
|
+
"""
|
|
3650
|
+
# Extract basic settings
|
|
3651
|
+
settings = {
|
|
3652
|
+
'title': getattr(quiz, 'title', ''),
|
|
3653
|
+
'instructions': getattr(quiz, 'instructions', '') or '',
|
|
3654
|
+
'time_limit': getattr(quiz, 'time_limit', None),
|
|
3655
|
+
'points_possible': getattr(quiz, 'points_possible', None),
|
|
3656
|
+
'due_at': getattr(quiz, 'due_at', None),
|
|
3657
|
+
'unlock_at': getattr(quiz, 'unlock_at', None),
|
|
3658
|
+
'lock_at': getattr(quiz, 'lock_at', None),
|
|
3659
|
+
}
|
|
3660
|
+
|
|
3661
|
+
# Fetch quiz_settings from the API (contains multiple_attempts, result_view_settings, etc.)
|
|
3662
|
+
quiz_settings = fetch_new_quiz_settings(quiz, requester)
|
|
3663
|
+
if quiz_settings:
|
|
3664
|
+
settings['quiz_settings'] = quiz_settings
|
|
3665
|
+
|
|
3666
|
+
# Get items
|
|
3667
|
+
items_export = export_new_quiz_items(quiz, requester, include_banks=include_banks)
|
|
3668
|
+
items = items_export.get('items', [])
|
|
3669
|
+
|
|
3670
|
+
# Clean for import if requested
|
|
3671
|
+
if importable:
|
|
3672
|
+
items_cleaned = clean_for_import({'items': items}, quiz_type='new_quiz')
|
|
3673
|
+
items = items_cleaned.get('items', [])
|
|
3674
|
+
|
|
3675
|
+
return {
|
|
3676
|
+
'quiz_type': 'new',
|
|
3677
|
+
'settings': settings,
|
|
3678
|
+
'items': items
|
|
3679
|
+
}
|
|
3680
|
+
@
|
|
3681
|
+
|
|
3682
|
+
|
|
3683
|
+
\subsection{Exporting a complete Classic Quiz}
|
|
3684
|
+
|
|
3685
|
+
For Classic Quizzes, we extract settings and questions using the canvasapi
|
|
3686
|
+
library's native methods.
|
|
3687
|
+
|
|
3688
|
+
<<functions>>=
|
|
3689
|
+
def export_full_classic_quiz(quiz, importable=False):
|
|
3690
|
+
"""Exports a complete Classic Quiz with settings and questions
|
|
3691
|
+
|
|
3692
|
+
Args:
|
|
3693
|
+
quiz: Quiz object
|
|
3694
|
+
importable: If True, clean output for direct import
|
|
3695
|
+
|
|
3696
|
+
Returns:
|
|
3697
|
+
Dictionary with quiz_type, settings, and questions
|
|
3698
|
+
"""
|
|
3699
|
+
# Extract settings
|
|
3700
|
+
settings = {
|
|
3701
|
+
'title': getattr(quiz, 'title', ''),
|
|
3702
|
+
'description': getattr(quiz, 'description', '') or '',
|
|
3703
|
+
'quiz_type': getattr(quiz, 'quiz_type', 'assignment'),
|
|
3704
|
+
'time_limit': getattr(quiz, 'time_limit', None),
|
|
3705
|
+
'allowed_attempts': getattr(quiz, 'allowed_attempts', 1),
|
|
3706
|
+
'shuffle_questions': getattr(quiz, 'shuffle_questions', False),
|
|
3707
|
+
'shuffle_answers': getattr(quiz, 'shuffle_answers', False),
|
|
3708
|
+
'points_possible': getattr(quiz, 'points_possible', None),
|
|
3709
|
+
'published': getattr(quiz, 'published', False),
|
|
3710
|
+
'due_at': getattr(quiz, 'due_at', None),
|
|
3711
|
+
'unlock_at': getattr(quiz, 'unlock_at', None),
|
|
3712
|
+
'lock_at': getattr(quiz, 'lock_at', None),
|
|
3713
|
+
'show_correct_answers': getattr(quiz, 'show_correct_answers', True),
|
|
3714
|
+
'one_question_at_a_time': getattr(quiz, 'one_question_at_a_time', False),
|
|
3715
|
+
'cant_go_back': getattr(quiz, 'cant_go_back', False),
|
|
3716
|
+
'access_code': getattr(quiz, 'access_code', None),
|
|
3717
|
+
}
|
|
3718
|
+
|
|
3719
|
+
# Get questions
|
|
3720
|
+
questions_export = export_classic_questions(quiz)
|
|
3721
|
+
questions = questions_export.get('questions', [])
|
|
3722
|
+
|
|
3723
|
+
# Clean for import if requested
|
|
3724
|
+
if importable:
|
|
3725
|
+
questions_cleaned = clean_for_import({'questions': questions}, quiz_type='classic')
|
|
3726
|
+
questions = questions_cleaned.get('questions', [])
|
|
3727
|
+
|
|
3728
|
+
return {
|
|
3729
|
+
'quiz_type': 'classic',
|
|
3730
|
+
'settings': settings,
|
|
3731
|
+
'questions': questions
|
|
3732
|
+
}
|
|
3733
|
+
@
|
|
3734
|
+
|
|
3735
|
+
|
|
3107
3736
|
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
|
3108
3737
|
\chapter{Viewing Quiz Content}
|
|
3109
3738
|
\label{quiz-view}
|
|
@@ -4889,6 +5518,226 @@ EXAMPLE_CLASSIC_QUIZ_JSON = {
|
|
|
4889
5518
|
}
|
|
4890
5519
|
@
|
|
4891
5520
|
|
|
5521
|
+
|
|
5522
|
+
\subsection{Full quiz JSON format for export/create workflow}
|
|
5523
|
+
|
|
5524
|
+
While the examples above show the format for adding \emph{questions} to an
|
|
5525
|
+
existing quiz, users often need to export a complete quiz (settings and
|
|
5526
|
+
questions) and create a copy elsewhere. The [[quizzes export]] and
|
|
5527
|
+
[[quizzes create]] commands use a unified format that combines quiz settings
|
|
5528
|
+
with questions.
|
|
5529
|
+
|
|
5530
|
+
The format wraps settings in a [[settings]] object and includes a [[quiz_type]]
|
|
5531
|
+
field so [[quizzes create]] knows which API to use:
|
|
5532
|
+
\begin{description}
|
|
5533
|
+
\item[[[quiz_type]]] Either [[new]] (New Quizzes) or [[classic]] (Classic
|
|
5534
|
+
Quizzes)
|
|
5535
|
+
\item[[[settings]]] Quiz settings like title, time limit, instructions
|
|
5536
|
+
\item[[[quiz_settings]]] For New Quizzes: nested object with [[multiple_attempts]]
|
|
5537
|
+
and [[result_view_settings]] for controlling attempts and what students see
|
|
5538
|
+
\item[[[items]]] For New Quizzes: array of question items
|
|
5539
|
+
\item[[[questions]]] For Classic Quizzes: array of questions
|
|
5540
|
+
\end{description}
|
|
5541
|
+
|
|
5542
|
+
\paragraph{New Quiz settings structure.}
|
|
5543
|
+
The [[quiz_settings]] object within [[settings]] controls advanced quiz behavior:
|
|
5544
|
+
\begin{description}
|
|
5545
|
+
\item[[[multiple_attempts]]] Controls multiple attempts, waiting periods between
|
|
5546
|
+
attempts, and which score to keep (see [[NEW_QUIZ_MULTIPLE_ATTEMPTS_SCHEMA]])
|
|
5547
|
+
\item[[[result_view_settings]]] Controls what students see after submission,
|
|
5548
|
+
including whether to show correct answers (see [[NEW_QUIZ_RESULT_VIEW_SCHEMA]])
|
|
5549
|
+
\item[[[shuffle_answers]]] Whether to randomize answer order
|
|
5550
|
+
\item[[[shuffle_questions]]] Whether to randomize question order
|
|
5551
|
+
\item[[[has_time_limit]]] Whether the quiz has a time limit
|
|
5552
|
+
\item[[[session_time_limit_in_seconds]]] Time limit in seconds
|
|
5553
|
+
\end{description}
|
|
5554
|
+
|
|
5555
|
+
<<constants>>=
|
|
5556
|
+
EXAMPLE_FULL_NEW_QUIZ_JSON = {
|
|
5557
|
+
"quiz_type": "new",
|
|
5558
|
+
"settings": {
|
|
5559
|
+
"title": "Example Practice Quiz",
|
|
5560
|
+
"instructions": "<p>This is a practice quiz to test your knowledge. "
|
|
5561
|
+
"You can retry multiple times with a 1-hour waiting period "
|
|
5562
|
+
"between attempts. Your latest score will be kept.</p>"
|
|
5563
|
+
"<p>You will see your score but not the correct answers, "
|
|
5564
|
+
"so you can keep practicing until you get them all right!</p>",
|
|
5565
|
+
"time_limit": 1800,
|
|
5566
|
+
"points_possible": 20,
|
|
5567
|
+
"due_at": None,
|
|
5568
|
+
"unlock_at": None,
|
|
5569
|
+
"lock_at": None,
|
|
5570
|
+
"quiz_settings": {
|
|
5571
|
+
# Randomization settings
|
|
5572
|
+
"shuffle_answers": True,
|
|
5573
|
+
"shuffle_questions": False,
|
|
5574
|
+
|
|
5575
|
+
# Time limit settings
|
|
5576
|
+
"has_time_limit": True,
|
|
5577
|
+
"session_time_limit_in_seconds": 1800,
|
|
5578
|
+
|
|
5579
|
+
# Question display settings
|
|
5580
|
+
"one_at_a_time_type": "none",
|
|
5581
|
+
"allow_backtracking": True,
|
|
5582
|
+
|
|
5583
|
+
# Calculator settings
|
|
5584
|
+
"calculator_type": "none",
|
|
5585
|
+
|
|
5586
|
+
# Access restrictions
|
|
5587
|
+
"filter_ip_address": False,
|
|
5588
|
+
"filters": {},
|
|
5589
|
+
"require_student_access_code": False,
|
|
5590
|
+
"student_access_code": None,
|
|
5591
|
+
|
|
5592
|
+
# Multiple attempts settings
|
|
5593
|
+
"multiple_attempts": {
|
|
5594
|
+
"multiple_attempts_enabled": True,
|
|
5595
|
+
"attempt_limit": False,
|
|
5596
|
+
"max_attempts": None,
|
|
5597
|
+
"score_to_keep": "latest",
|
|
5598
|
+
"cooling_period": True,
|
|
5599
|
+
"cooling_period_seconds": 3600
|
|
5600
|
+
},
|
|
5601
|
+
|
|
5602
|
+
# Result view settings - what students see after submission
|
|
5603
|
+
"result_view_settings": {
|
|
5604
|
+
"result_view_restricted": True,
|
|
5605
|
+
"display_points_awarded": True,
|
|
5606
|
+
"display_points_possible": True,
|
|
5607
|
+
"display_items": True,
|
|
5608
|
+
"display_item_response": True,
|
|
5609
|
+
"display_item_response_qualifier": "always",
|
|
5610
|
+
"display_item_response_correctness": True,
|
|
5611
|
+
"display_item_correct_answer": False,
|
|
5612
|
+
"display_item_feedback": False,
|
|
5613
|
+
"display_correct_answer_at": None,
|
|
5614
|
+
"hide_correct_answer_at": None
|
|
5615
|
+
}
|
|
5616
|
+
}
|
|
5617
|
+
},
|
|
5618
|
+
"items": [
|
|
5619
|
+
{
|
|
5620
|
+
"position": 1,
|
|
5621
|
+
"points_possible": 5,
|
|
5622
|
+
"entry": {
|
|
5623
|
+
"title": "Geography: Capital Cities",
|
|
5624
|
+
"item_body": "<p>What is the capital of Sweden?</p>",
|
|
5625
|
+
"interaction_type_slug": "choice",
|
|
5626
|
+
"scoring_algorithm": "Equivalence",
|
|
5627
|
+
"interaction_data": {
|
|
5628
|
+
"choices": [
|
|
5629
|
+
{"position": 1, "item_body": "<p>Stockholm</p>"},
|
|
5630
|
+
{"position": 2, "item_body": "<p>Gothenburg</p>"},
|
|
5631
|
+
{"position": 3, "item_body": "<p>Malmö</p>"},
|
|
5632
|
+
{"position": 4, "item_body": "<p>Uppsala</p>"}
|
|
5633
|
+
]
|
|
5634
|
+
},
|
|
5635
|
+
"scoring_data": {"value": 1}
|
|
5636
|
+
}
|
|
5637
|
+
},
|
|
5638
|
+
{
|
|
5639
|
+
"position": 2,
|
|
5640
|
+
"points_possible": 5,
|
|
5641
|
+
"entry": {
|
|
5642
|
+
"title": "Programming: Language Type",
|
|
5643
|
+
"item_body": "<p>Python is an interpreted programming language.</p>",
|
|
5644
|
+
"interaction_type_slug": "true-false",
|
|
5645
|
+
"scoring_algorithm": "Equivalence",
|
|
5646
|
+
"interaction_data": {
|
|
5647
|
+
"true_choice": "True",
|
|
5648
|
+
"false_choice": "False"
|
|
5649
|
+
},
|
|
5650
|
+
"scoring_data": {"value": True}
|
|
5651
|
+
}
|
|
5652
|
+
},
|
|
5653
|
+
{
|
|
5654
|
+
"position": 3,
|
|
5655
|
+
"points_possible": 5,
|
|
5656
|
+
"entry": {
|
|
5657
|
+
"title": "Math: Select All Correct",
|
|
5658
|
+
"item_body": "<p>Which of the following are prime numbers?</p>",
|
|
5659
|
+
"interaction_type_slug": "multi-answer",
|
|
5660
|
+
"scoring_algorithm": "AllOrNothing",
|
|
5661
|
+
"interaction_data": {
|
|
5662
|
+
"choices": [
|
|
5663
|
+
{"position": 1, "item_body": "<p>2</p>"},
|
|
5664
|
+
{"position": 2, "item_body": "<p>4</p>"},
|
|
5665
|
+
{"position": 3, "item_body": "<p>7</p>"},
|
|
5666
|
+
{"position": 4, "item_body": "<p>9</p>"},
|
|
5667
|
+
{"position": 5, "item_body": "<p>11</p>"}
|
|
5668
|
+
]
|
|
5669
|
+
},
|
|
5670
|
+
"scoring_data": {"value": [1, 3, 5]}
|
|
5671
|
+
}
|
|
5672
|
+
},
|
|
5673
|
+
{
|
|
5674
|
+
"position": 4,
|
|
5675
|
+
"points_possible": 5,
|
|
5676
|
+
"entry": {
|
|
5677
|
+
"title": "Programming: Output Question",
|
|
5678
|
+
"item_body": "<p>What does the following Python code print?</p>"
|
|
5679
|
+
"<pre>x = 5\nif x > 3:\n print('big')\nelse:\n print('small')</pre>",
|
|
5680
|
+
"interaction_type_slug": "choice",
|
|
5681
|
+
"scoring_algorithm": "Equivalence",
|
|
5682
|
+
"interaction_data": {
|
|
5683
|
+
"choices": [
|
|
5684
|
+
{"position": 1, "item_body": "<p>big</p>"},
|
|
5685
|
+
{"position": 2, "item_body": "<p>small</p>"},
|
|
5686
|
+
{"position": 3, "item_body": "<p>5</p>"},
|
|
5687
|
+
{"position": 4, "item_body": "<p>Nothing is printed</p>"}
|
|
5688
|
+
]
|
|
5689
|
+
},
|
|
5690
|
+
"scoring_data": {"value": 1}
|
|
5691
|
+
}
|
|
5692
|
+
}
|
|
5693
|
+
]
|
|
5694
|
+
}
|
|
5695
|
+
@
|
|
5696
|
+
|
|
5697
|
+
<<constants>>=
|
|
5698
|
+
EXAMPLE_FULL_CLASSIC_QUIZ_JSON = {
|
|
5699
|
+
"quiz_type": "classic",
|
|
5700
|
+
"settings": {
|
|
5701
|
+
"title": "Example Classic Quiz",
|
|
5702
|
+
"description": "<p>Answer all questions carefully. Time limit: 60 minutes.</p>",
|
|
5703
|
+
"quiz_type": "assignment",
|
|
5704
|
+
"time_limit": 60,
|
|
5705
|
+
"allowed_attempts": 2,
|
|
5706
|
+
"shuffle_questions": True,
|
|
5707
|
+
"shuffle_answers": True,
|
|
5708
|
+
"points_possible": 100,
|
|
5709
|
+
"published": False,
|
|
5710
|
+
"due_at": None,
|
|
5711
|
+
"unlock_at": None,
|
|
5712
|
+
"lock_at": None
|
|
5713
|
+
},
|
|
5714
|
+
"questions": [
|
|
5715
|
+
{
|
|
5716
|
+
"question_name": "Capital Question",
|
|
5717
|
+
"question_text": "<p>What is the capital of Sweden?</p>",
|
|
5718
|
+
"question_type": "multiple_choice_question",
|
|
5719
|
+
"points_possible": 5,
|
|
5720
|
+
"answers": [
|
|
5721
|
+
{"answer_text": "Stockholm", "answer_weight": 100},
|
|
5722
|
+
{"answer_text": "Gothenburg", "answer_weight": 0},
|
|
5723
|
+
{"answer_text": "Malmö", "answer_weight": 0}
|
|
5724
|
+
]
|
|
5725
|
+
},
|
|
5726
|
+
{
|
|
5727
|
+
"question_name": "True/False Question",
|
|
5728
|
+
"question_text": "<p>Python is an interpreted language.</p>",
|
|
5729
|
+
"question_type": "true_false_question",
|
|
5730
|
+
"points_possible": 5,
|
|
5731
|
+
"answers": [
|
|
5732
|
+
{"answer_text": "True", "answer_weight": 100},
|
|
5733
|
+
{"answer_text": "False", "answer_weight": 0}
|
|
5734
|
+
]
|
|
5735
|
+
}
|
|
5736
|
+
]
|
|
5737
|
+
}
|
|
5738
|
+
@
|
|
5739
|
+
|
|
5740
|
+
|
|
4892
5741
|
The [[print_example_json]] function outputs both formats with explanatory
|
|
4893
5742
|
headers, making it easy for users to copy the appropriate format for their quiz
|
|
4894
5743
|
type.
|
|
@@ -4920,6 +5769,100 @@ def print_example_json():
|
|
|
4920
5769
|
@
|
|
4921
5770
|
|
|
4922
5771
|
|
|
5772
|
+
Similarly, the [[print_full_quiz_example_json]] function outputs the full quiz
|
|
5773
|
+
format (settings plus questions) for use with [[quizzes create]] and
|
|
5774
|
+
[[quizzes export]].
|
|
5775
|
+
|
|
5776
|
+
<<functions>>=
|
|
5777
|
+
def print_full_quiz_example_json():
|
|
5778
|
+
"""Prints example JSON for full quiz creation (settings + questions)"""
|
|
5779
|
+
print("=" * 70)
|
|
5780
|
+
print("EXAMPLE JSON FOR CREATING NEW QUIZZES (Quizzes.Next)")
|
|
5781
|
+
print("=" * 70)
|
|
5782
|
+
print()
|
|
5783
|
+
print("This format includes both quiz settings and questions.")
|
|
5784
|
+
print("Save to a file and use with:")
|
|
5785
|
+
print(" canvaslms quizzes create -c COURSE -f quiz.json")
|
|
5786
|
+
print()
|
|
5787
|
+
print("This is the same format produced by 'quizzes export -I'.")
|
|
5788
|
+
print()
|
|
5789
|
+
print("BASIC SETTINGS:")
|
|
5790
|
+
print(" title - Quiz title")
|
|
5791
|
+
print(" instructions - HTML instructions shown to students")
|
|
5792
|
+
print(" time_limit - Time limit in SECONDS (or null)")
|
|
5793
|
+
print(" points_possible - Total points")
|
|
5794
|
+
print(" due_at/unlock_at/lock_at - ISO 8601 dates (or null)")
|
|
5795
|
+
print()
|
|
5796
|
+
print("QUIZ SETTINGS (in 'settings.quiz_settings'):")
|
|
5797
|
+
print()
|
|
5798
|
+
print(" Randomization:")
|
|
5799
|
+
print(" shuffle_answers: true/false - Randomize answer order")
|
|
5800
|
+
print(" shuffle_questions: true/false - Randomize question order")
|
|
5801
|
+
print()
|
|
5802
|
+
print(" Time limit:")
|
|
5803
|
+
print(" has_time_limit: true/false")
|
|
5804
|
+
print(" session_time_limit_in_seconds: number")
|
|
5805
|
+
print()
|
|
5806
|
+
print(" Question display:")
|
|
5807
|
+
print(" one_at_a_time_type: 'none' or 'question'")
|
|
5808
|
+
print(" allow_backtracking: true/false - Can go back to previous questions")
|
|
5809
|
+
print()
|
|
5810
|
+
print(" Calculator:")
|
|
5811
|
+
print(" calculator_type: 'none', 'basic', or 'scientific'")
|
|
5812
|
+
print()
|
|
5813
|
+
print(" Access restrictions:")
|
|
5814
|
+
print(" require_student_access_code: true/false")
|
|
5815
|
+
print(" student_access_code: 'password' or null")
|
|
5816
|
+
print(" filter_ip_address: true/false")
|
|
5817
|
+
print(" filters: {} or IP filter rules")
|
|
5818
|
+
print()
|
|
5819
|
+
print(" Multiple attempts:")
|
|
5820
|
+
print(" multiple_attempts_enabled: true/false")
|
|
5821
|
+
print(" attempt_limit: true/false (true = limited, false = unlimited)")
|
|
5822
|
+
print(" max_attempts: number or null")
|
|
5823
|
+
print(" score_to_keep: 'highest' or 'latest'")
|
|
5824
|
+
print(" cooling_period: true/false (require wait between attempts)")
|
|
5825
|
+
print(" cooling_period_seconds: seconds (e.g., 3600 = 1 hour)")
|
|
5826
|
+
print()
|
|
5827
|
+
print(" Result view (what students see after submission):")
|
|
5828
|
+
print(" result_view_restricted: true/false")
|
|
5829
|
+
print(" display_items: true/false - Show questions")
|
|
5830
|
+
print(" display_item_response: true/false - Show student's answers")
|
|
5831
|
+
print(" display_item_response_correctness: true/false - Show right/wrong")
|
|
5832
|
+
print(" display_item_correct_answer: true/false - Show correct answers")
|
|
5833
|
+
print(" display_item_feedback: true/false - Show per-question feedback")
|
|
5834
|
+
print(" display_points_awarded: true/false - Show points earned")
|
|
5835
|
+
print(" display_points_possible: true/false - Show max points")
|
|
5836
|
+
print(" display_correct_answer_at: ISO date or null - When to reveal")
|
|
5837
|
+
print(" hide_correct_answer_at: ISO date or null - When to hide")
|
|
5838
|
+
print()
|
|
5839
|
+
print("SCORING:")
|
|
5840
|
+
print(" Use position numbers (1, 2, 3...) to reference correct answers.")
|
|
5841
|
+
print(" UUIDs are generated automatically during import.")
|
|
5842
|
+
print()
|
|
5843
|
+
print(json.dumps(EXAMPLE_FULL_NEW_QUIZ_JSON, indent=2))
|
|
5844
|
+
print()
|
|
5845
|
+
print()
|
|
5846
|
+
print("=" * 70)
|
|
5847
|
+
print("EXAMPLE JSON FOR CREATING CLASSIC QUIZZES")
|
|
5848
|
+
print("=" * 70)
|
|
5849
|
+
print()
|
|
5850
|
+
print("Classic Quizzes use different field names and units.")
|
|
5851
|
+
print()
|
|
5852
|
+
print("Settings (time_limit in MINUTES for Classic Quizzes):")
|
|
5853
|
+
print(" title, description (not instructions), quiz_type,")
|
|
5854
|
+
print(" time_limit, allowed_attempts, shuffle_questions,")
|
|
5855
|
+
print(" shuffle_answers, points_possible, published,")
|
|
5856
|
+
print(" due_at, unlock_at, lock_at, show_correct_answers,")
|
|
5857
|
+
print(" one_question_at_a_time, cant_go_back, access_code")
|
|
5858
|
+
print()
|
|
5859
|
+
print("quiz_type values: assignment, practice_quiz, graded_survey, survey")
|
|
5860
|
+
print("answer_weight: 100 = correct, 0 = incorrect")
|
|
5861
|
+
print()
|
|
5862
|
+
print(json.dumps(EXAMPLE_FULL_CLASSIC_QUIZ_JSON, indent=2))
|
|
5863
|
+
@
|
|
5864
|
+
|
|
5865
|
+
|
|
4923
5866
|
\subsection{Processing the add command}
|
|
4924
5867
|
|
|
4925
5868
|
When [[--example]] is provided, we print the example JSON and exit immediately
|
|
@@ -5648,6 +6591,17 @@ The [[clean_interaction_data]] function strips UUIDs from choices, keeping only
|
|
|
5648
6591
|
the [[position]] and [[item_body]] fields. This makes the JSON human-readable
|
|
5649
6592
|
and avoids UUID conflicts when importing to a different quiz.
|
|
5650
6593
|
|
|
6594
|
+
Some question types use different structures:
|
|
6595
|
+
\begin{description}
|
|
6596
|
+
\item[choices] Multiple choice and multi-answer questions use a list of
|
|
6597
|
+
dictionaries with [[id]], [[position]], and [[item_body]].
|
|
6598
|
+
\item[answers] Matching questions use [[answers]] (a list of strings for the
|
|
6599
|
+
right column) and [[questions]] (a list of dicts for the left column).
|
|
6600
|
+
\end{description}
|
|
6601
|
+
|
|
6602
|
+
We handle both cases, preserving string arrays as-is while cleaning dict-based
|
|
6603
|
+
choices.
|
|
6604
|
+
|
|
5651
6605
|
<<functions>>=
|
|
5652
6606
|
def clean_interaction_data(interaction_data):
|
|
5653
6607
|
"""Removes UUIDs from interaction_data choices"""
|
|
@@ -5657,15 +6611,36 @@ def clean_interaction_data(interaction_data):
|
|
|
5657
6611
|
clean = dict(interaction_data)
|
|
5658
6612
|
|
|
5659
6613
|
# Handle choices array (multiple choice, multi-answer)
|
|
6614
|
+
# Choices are dicts with id, position, item_body
|
|
5660
6615
|
if 'choices' in clean:
|
|
5661
6616
|
clean_choices = []
|
|
5662
6617
|
for i, choice in enumerate(clean['choices']):
|
|
6618
|
+
# Skip if choice is not a dict (shouldn't happen, but be safe)
|
|
6619
|
+
if not isinstance(choice, dict):
|
|
6620
|
+
clean_choices.append(choice)
|
|
6621
|
+
continue
|
|
5663
6622
|
clean_choice = {'position': choice.get('position', i + 1)}
|
|
5664
6623
|
if 'item_body' in choice:
|
|
5665
6624
|
clean_choice['item_body'] = choice['item_body']
|
|
5666
6625
|
clean_choices.append(clean_choice)
|
|
5667
6626
|
clean['choices'] = clean_choices
|
|
5668
6627
|
|
|
6628
|
+
# Handle questions array (matching questions)
|
|
6629
|
+
# Questions are dicts with id, item_body - we keep item_body, drop id
|
|
6630
|
+
if 'questions' in clean:
|
|
6631
|
+
clean_questions = []
|
|
6632
|
+
for i, question in enumerate(clean['questions']):
|
|
6633
|
+
if not isinstance(question, dict):
|
|
6634
|
+
clean_questions.append(question)
|
|
6635
|
+
continue
|
|
6636
|
+
clean_q = {}
|
|
6637
|
+
if 'item_body' in question:
|
|
6638
|
+
clean_q['item_body'] = question['item_body']
|
|
6639
|
+
clean_questions.append(clean_q)
|
|
6640
|
+
clean['questions'] = clean_questions
|
|
6641
|
+
|
|
6642
|
+
# 'answers' is a list of strings (matching questions) - keep as-is
|
|
6643
|
+
|
|
5669
6644
|
return clean
|
|
5670
6645
|
@
|
|
5671
6646
|
|