canvaslms 5.8__tar.gz → 5.10__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {canvaslms-5.8 → canvaslms-5.10}/PKG-INFO +1 -1
- {canvaslms-5.8 → canvaslms-5.10}/pyproject.toml +1 -1
- {canvaslms-5.8 → canvaslms-5.10}/src/canvaslms/cli/assignments.nw +2 -1
- {canvaslms-5.8 → canvaslms-5.10}/src/canvaslms/cli/cli.nw +25 -2
- {canvaslms-5.8 → canvaslms-5.10}/src/canvaslms/cli/content.nw +11 -3
- {canvaslms-5.8 → canvaslms-5.10}/src/canvaslms/cli/discussions.nw +4 -2
- {canvaslms-5.8 → canvaslms-5.10}/src/canvaslms/cli/pages.nw +2 -1
- {canvaslms-5.8 → canvaslms-5.10}/src/canvaslms/cli/quizzes.nw +60 -5
- {canvaslms-5.8 → canvaslms-5.10}/src/canvaslms/cli/results.nw +85 -45
- {canvaslms-5.8 → canvaslms-5.10}/src/canvaslms/cli/users.nw +64 -1
- {canvaslms-5.8 → canvaslms-5.10}/src/canvaslms/grades/conjunctavg.nw +50 -1
- {canvaslms-5.8 → canvaslms-5.10}/src/canvaslms/grades/conjunctavgsurvey.nw +50 -0
- {canvaslms-5.8 → canvaslms-5.10}/src/canvaslms/grades/disjunctmax.nw +59 -0
- {canvaslms-5.8 → canvaslms-5.10}/src/canvaslms/grades/grades.nw +17 -3
- {canvaslms-5.8 → canvaslms-5.10}/src/canvaslms/grades/maxgradesurvey.nw +58 -0
- {canvaslms-5.8 → canvaslms-5.10}/src/canvaslms/grades/participation.nw +27 -0
- {canvaslms-5.8 → canvaslms-5.10}/src/canvaslms/grades/tilkryLAB1.nw +47 -0
- {canvaslms-5.8 → canvaslms-5.10}/LICENSE +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/README.md +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/makefiles/.circleci/config.yml +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/makefiles/.git +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/makefiles/.gitignore +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/makefiles/CONTRIBUTING.md +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/makefiles/Dockerfile +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/makefiles/Dockerfile.nw +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/makefiles/Dockerfile.tex +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/makefiles/LICENSE +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/makefiles/Makefile +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/makefiles/Makefile.nw +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/makefiles/Makefile.tex +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/makefiles/README.md +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/makefiles/doc.mk +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/makefiles/doc.mk.nw +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/makefiles/doc.tex +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/makefiles/exam.bib +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/makefiles/exam.mk +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/makefiles/exam.mk.nw +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/makefiles/exam.tex +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/makefiles/gitattributes +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/makefiles/haskell.mk +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/makefiles/haskell.mk.nw +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/makefiles/haskell.tex +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/makefiles/intro.tex +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/makefiles/latexmkrc +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/makefiles/ltxobj/_minted/AAA0D43723DCC5BE9DB71A96B52C1142.highlight.minted +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/makefiles/ltxobj/_minted/E5724293DA12A769F97F0E91498CAEEF.highlight.minted +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/makefiles/ltxobj/_minted/FFC039B25D180E99FC9FEBDB2D42EBAF.highlight.minted +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/makefiles/ltxobj/_minted/_7945F0071C510291A619CE1658F673CB.index.minted +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/makefiles/ltxobj/_minted/default.style.minted +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/makefiles/ltxobj/makefiles.aux +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/makefiles/ltxobj/makefiles.bbl +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/makefiles/ltxobj/makefiles.bcf +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/makefiles/ltxobj/makefiles.blg +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/makefiles/ltxobj/makefiles.fdb_latexmk +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/makefiles/ltxobj/makefiles.fls +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/makefiles/ltxobj/makefiles.log +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/makefiles/ltxobj/makefiles.out +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/makefiles/ltxobj/makefiles.pdf +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/makefiles/ltxobj/makefiles.run.xml +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/makefiles/ltxobj/makefiles.toc +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/makefiles/ltxobj/makefiles.xdv +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/makefiles/makefiles.bib +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/makefiles/makefiles.tex +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/makefiles/miun.course.mk +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/makefiles/miun.depend.mk +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/makefiles/miun.docs.mk +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/makefiles/miun.port.mk +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/makefiles/miun.pub.mk +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/makefiles/noweb.mk +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/makefiles/noweb.mk.nw +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/makefiles/noweb.tex +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/makefiles/pkg.mk +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/makefiles/pkg.mk.nw +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/makefiles/pkg.tex +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/makefiles/portability.mk +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/makefiles/portability.mk.nw +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/makefiles/portability.tex +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/makefiles/preamble.tex +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/makefiles/pub.mk +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/makefiles/pub.mk.nw +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/makefiles/pub.tex +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/makefiles/results.mk +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/makefiles/results.mk.nw +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/makefiles/results.tex +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/makefiles/subdir.mk +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/makefiles/subdir.mk.nw +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/makefiles/subdir.tex +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/makefiles/tex.bib +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/makefiles/tex.mk +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/makefiles/tex.mk.nw +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/makefiles/tex.tex +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/makefiles/transform.bib +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/makefiles/transform.mk +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/makefiles/transform.mk.nw +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/makefiles/transform.tex +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/src/canvaslms/Makefile +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/src/canvaslms/__init__.py +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/src/canvaslms/cli/Makefile +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/src/canvaslms/cli/cache.nw +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/src/canvaslms/cli/calendar.nw +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/src/canvaslms/cli/courses.nw +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/src/canvaslms/cli/grade.nw +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/src/canvaslms/cli/login.nw +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/src/canvaslms/cli/modules.nw +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/src/canvaslms/cli/results.py.broken +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/src/canvaslms/cli/submissions.nw +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/src/canvaslms/cli/utils.nw +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/src/canvaslms/grades/Makefile +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/src/canvaslms/grades/grades.py +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/src/canvaslms/hacks/Makefile +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/src/canvaslms/hacks/__init__.py +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/src/canvaslms/hacks/attachment_cache.nw +0 -0
- {canvaslms-5.8 → canvaslms-5.10}/src/canvaslms/hacks/canvasapi.nw +0 -0
|
@@ -1338,7 +1338,8 @@ content, accept it (which triggers the update), edit further, or discard changes
|
|
|
1338
1338
|
title = attributes.get('name', assignment.name)
|
|
1339
1339
|
result = canvaslms.cli.content.interactive_confirm_and_edit(
|
|
1340
1340
|
title, body_content, attributes,
|
|
1341
|
-
canvaslms.cli.content.ASSIGNMENT_SCHEMA, "Assignment"
|
|
1341
|
+
canvaslms.cli.content.ASSIGNMENT_SCHEMA, "Assignment",
|
|
1342
|
+
content_attr='description')
|
|
1342
1343
|
|
|
1343
1344
|
if result is None:
|
|
1344
1345
|
print("Discarded changes for this assignment.", file=sys.stderr)
|
|
@@ -356,6 +356,19 @@ argp.add_argument("-v", "--verbose",
|
|
|
356
356
|
@
|
|
357
357
|
|
|
358
358
|
|
|
359
|
+
\subsection{Cache control}
|
|
360
|
+
|
|
361
|
+
Most commands speed up by caching the Canvas API object between runs.
|
|
362
|
+
Occasionally you want to bypass the cache for a single command,
|
|
363
|
+
for example right after creating or updating objects.
|
|
364
|
+
|
|
365
|
+
<<add global options to argp>>=
|
|
366
|
+
argp.add_argument("--no-cache",
|
|
367
|
+
action="store_true",
|
|
368
|
+
help="Do not read or write the persistent Canvas object cache")
|
|
369
|
+
@
|
|
370
|
+
|
|
371
|
+
|
|
359
372
|
\subsection{Logging in and setting up Canvas}
|
|
360
373
|
|
|
361
374
|
Each subcommand will have its own module in the package.
|
|
@@ -445,15 +458,25 @@ Before creating a new Canvas object from scratch, we try to load it from the
|
|
|
445
458
|
persistent cache.
|
|
446
459
|
This can significantly speed up commands by reusing previously fetched course
|
|
447
460
|
data, assignments, users, and submissions.
|
|
461
|
+
If the cached object becomes stale (for example, right after creating a quiz),
|
|
462
|
+
you can bypass the cache with [[--no-cache]].
|
|
463
|
+
|
|
464
|
+
For example:
|
|
465
|
+
\begin{verbatim}
|
|
466
|
+
canvaslms --no-cache quizzes export -c "My Course" -a "My Quiz"
|
|
467
|
+
\end{verbatim}
|
|
448
468
|
<<try to load canvas from cache>>=
|
|
449
|
-
|
|
469
|
+
if args.no_cache:
|
|
470
|
+
canvas = None
|
|
471
|
+
else:
|
|
472
|
+
canvas = canvaslms.cli.cache.load_canvas_cache(token, hostname)
|
|
450
473
|
@
|
|
451
474
|
|
|
452
475
|
After successfully executing a command, we save the Canvas object to the cache
|
|
453
476
|
for future use.
|
|
454
477
|
We only save if the canvas object was actually used (not [[None]]).
|
|
455
478
|
<<save canvas to cache after command>>=
|
|
456
|
-
if canvas:
|
|
479
|
+
if canvas and not args.no_cache:
|
|
457
480
|
canvaslms.cli.cache.save_canvas_cache(canvas, token, hostname)
|
|
458
481
|
@
|
|
459
482
|
|
|
@@ -985,9 +985,14 @@ loop. Users can preview their content, choose to accept it, edit it further,
|
|
|
985
985
|
or discard their changes.
|
|
986
986
|
|
|
987
987
|
The function is generalized to work with any content type by accepting the
|
|
988
|
-
schema for re-editing.
|
|
988
|
+
schema for re-editing. When the user chooses to edit again, we need to pass
|
|
989
|
+
the current content back to [[get_content_from_editor]]. Since that function
|
|
990
|
+
expects content to be stored in the attributes dictionary under a named key
|
|
991
|
+
(e.g., \enquote{message} for announcements, \enquote{body} for pages), we accept
|
|
992
|
+
a [[content_attr]] parameter specifying which key to use.
|
|
989
993
|
<<interactive functions>>=
|
|
990
|
-
def interactive_confirm_and_edit(title, message, attributes, schema,
|
|
994
|
+
def interactive_confirm_and_edit(title, message, attributes, schema,
|
|
995
|
+
content_type="Content", content_attr='message'):
|
|
991
996
|
"""Interactive loop for confirming or editing content.
|
|
992
997
|
|
|
993
998
|
Args:
|
|
@@ -996,6 +1001,7 @@ def interactive_confirm_and_edit(title, message, attributes, schema, content_typ
|
|
|
996
1001
|
attributes: Current attributes
|
|
997
1002
|
schema: Schema for re-editing
|
|
998
1003
|
content_type: Type label for display
|
|
1004
|
+
content_attr: Name of attribute that holds body content (for re-editing)
|
|
999
1005
|
|
|
1000
1006
|
Returns:
|
|
1001
1007
|
Tuple of (attributes, message), or None if cancelled
|
|
@@ -1021,7 +1027,9 @@ def interactive_confirm_and_edit(title, message, attributes, schema, content_typ
|
|
|
1021
1027
|
if choice in ['a', 'accept']:
|
|
1022
1028
|
return current_attributes, current_message
|
|
1023
1029
|
elif choice in ['e', 'edit']:
|
|
1024
|
-
|
|
1030
|
+
edit_attrs = current_attributes.copy()
|
|
1031
|
+
edit_attrs[content_attr] = current_message
|
|
1032
|
+
result = get_content_from_editor(schema, edit_attrs, content_attr=content_attr)
|
|
1025
1033
|
if result is None:
|
|
1026
1034
|
print("Editor cancelled or failed. Keeping previous content.", file=sys.stderr)
|
|
1027
1035
|
else:
|
|
@@ -394,7 +394,8 @@ for course in course_list:
|
|
|
394
394
|
|
|
395
395
|
result = canvaslms.cli.content.interactive_confirm_and_edit(
|
|
396
396
|
title, message, attributes,
|
|
397
|
-
canvaslms.cli.content.ANNOUNCEMENT_SCHEMA, "Announcement"
|
|
397
|
+
canvaslms.cli.content.ANNOUNCEMENT_SCHEMA, "Announcement",
|
|
398
|
+
content_attr='message')
|
|
398
399
|
if result is None:
|
|
399
400
|
print("Cancelled.", file=sys.stderr)
|
|
400
401
|
sys.exit(0)
|
|
@@ -775,7 +776,8 @@ After editing, we enter the interactive confirm loop.
|
|
|
775
776
|
title = edited_attrs.get('title', announcement.title)
|
|
776
777
|
result = canvaslms.cli.content.interactive_confirm_and_edit(
|
|
777
778
|
title, body_content, edited_attrs,
|
|
778
|
-
canvaslms.cli.content.ANNOUNCEMENT_SCHEMA, "Announcement"
|
|
779
|
+
canvaslms.cli.content.ANNOUNCEMENT_SCHEMA, "Announcement",
|
|
780
|
+
content_attr='message')
|
|
779
781
|
|
|
780
782
|
if result is None:
|
|
781
783
|
print("Discarded changes.", file=sys.stderr)
|
|
@@ -744,7 +744,8 @@ content, accept it (which triggers the update), edit further, or discard changes
|
|
|
744
744
|
title = attributes.get('title', full_page.title)
|
|
745
745
|
result = canvaslms.cli.content.interactive_confirm_and_edit(
|
|
746
746
|
title, body_content, attributes,
|
|
747
|
-
canvaslms.cli.content.PAGE_SCHEMA, "Page"
|
|
747
|
+
canvaslms.cli.content.PAGE_SCHEMA, "Page",
|
|
748
|
+
content_attr='body')
|
|
748
749
|
|
|
749
750
|
if result is None:
|
|
750
751
|
print("Discarded changes for this page.", file=sys.stderr)
|
|
@@ -95,8 +95,13 @@ Here, [[attempt_limit]] of [[null]] means unlimited attempts. The
|
|
|
95
95
|
[[score_to_keep]] can be [[highest]] (default) or [[latest]]. Setting
|
|
96
96
|
[[cooling_period]] to [[true]] requires [[cooling_period_seconds]] to specify
|
|
97
97
|
the wait time (3600 seconds = 1 hour). Using [[latest]] with a cooling period
|
|
98
|
-
|
|
99
|
-
|
|
98
|
+
supports a workflow where students can keep trying and learn from their
|
|
99
|
+
previous attempt.
|
|
100
|
+
|
|
101
|
+
Note that some New Quizzes settings are not reliably settable via the API.
|
|
102
|
+
In particular, in our testing (KTH Canvas, February 2026),
|
|
103
|
+
[[multiple_attempts.build_on_last_attempt]] appears to be ignored on quiz
|
|
104
|
+
creation/import via the New Quizzes API.
|
|
100
105
|
|
|
101
106
|
\paragraph{Controlling what students see after submission.}
|
|
102
107
|
To show students their score but hide the correct answers:
|
|
@@ -2574,6 +2579,9 @@ def create_command(config, canvas, args):
|
|
|
2574
2579
|
if args.title:
|
|
2575
2580
|
quiz_params['title'] = args.title
|
|
2576
2581
|
|
|
2582
|
+
if quiz_type == "new":
|
|
2583
|
+
<<warn about ignored New Quiz settings>>
|
|
2584
|
+
|
|
2577
2585
|
if 'title' not in quiz_params:
|
|
2578
2586
|
canvaslms.cli.err(1, "Quiz title is required (use --title or include in JSON)")
|
|
2579
2587
|
|
|
@@ -2622,6 +2630,41 @@ to flatten these into the format the API expects:
|
|
|
2622
2630
|
quiz[quiz_settings][multiple_attempts][cooling_period_seconds]=3600
|
|
2623
2631
|
\end{verbatim}
|
|
2624
2632
|
|
|
2633
|
+
\paragraph{Some settings cannot be set via the API.}
|
|
2634
|
+
Canvas does not reliably apply every [[quiz_settings]] field during quiz
|
|
2635
|
+
creation.
|
|
2636
|
+
|
|
2637
|
+
In our testing (KTH Canvas, February 2026),
|
|
2638
|
+
[[multiple_attempts.build_on_last_attempt]] appears to be \emph{read-only via the
|
|
2639
|
+
New Quizzes API}: it shows up in exports when set in the Canvas web UI, but both
|
|
2640
|
+
[[POST]] (create) and [[PATCH]] (update) silently drop the field.
|
|
2641
|
+
|
|
2642
|
+
The official New Quizzes API documentation does not list
|
|
2643
|
+
[[multiple_attempts.build_on_last_attempt]] as a supported field.
|
|
2644
|
+
|
|
2645
|
+
This means the export/create workflow cannot round-trip that setting purely via
|
|
2646
|
+
API calls. We keep the schema and examples so the exported JSON is faithful,
|
|
2647
|
+
but users should expect that field to be ignored on import unless they set it
|
|
2648
|
+
manually in the UI.
|
|
2649
|
+
|
|
2650
|
+
When [[quizzes create]] sees this field in input JSON, we emit a warning to make
|
|
2651
|
+
the limitation explicit.
|
|
2652
|
+
|
|
2653
|
+
<<warn about ignored New Quiz settings>>=
|
|
2654
|
+
try:
|
|
2655
|
+
build_on_last_attempt = (quiz_params
|
|
2656
|
+
.get('quiz_settings', {})
|
|
2657
|
+
.get('multiple_attempts', {})
|
|
2658
|
+
.get('build_on_last_attempt', None))
|
|
2659
|
+
except Exception:
|
|
2660
|
+
build_on_last_attempt = None
|
|
2661
|
+
|
|
2662
|
+
if build_on_last_attempt is not None:
|
|
2663
|
+
canvaslms.cli.warn(
|
|
2664
|
+
"New Quizzes: quiz_settings.multiple_attempts.build_on_last_attempt "
|
|
2665
|
+
"is ignored by the API on some Canvas instances; set it in the web UI")
|
|
2666
|
+
@
|
|
2667
|
+
|
|
2625
2668
|
<<functions>>=
|
|
2626
2669
|
def create_new_quiz(course, requester, quiz_params):
|
|
2627
2670
|
"""Creates a New Quiz via the New Quizzes API
|
|
@@ -2646,7 +2689,9 @@ def create_new_quiz(course, requester, quiz_params):
|
|
|
2646
2689
|
_url="new_quizzes",
|
|
2647
2690
|
**params
|
|
2648
2691
|
)
|
|
2649
|
-
|
|
2692
|
+
data = response.json()
|
|
2693
|
+
|
|
2694
|
+
return data
|
|
2650
2695
|
except Exception as e:
|
|
2651
2696
|
canvaslms.cli.warn(f"Failed to create New Quiz: {e}")
|
|
2652
2697
|
return None
|
|
@@ -2918,6 +2963,10 @@ NEW_QUIZ_MULTIPLE_ATTEMPTS_SCHEMA = {
|
|
|
2918
2963
|
'default': 'highest',
|
|
2919
2964
|
'description': 'Which score to keep: average, first, highest, or latest'
|
|
2920
2965
|
},
|
|
2966
|
+
'build_on_last_attempt': {
|
|
2967
|
+
'default': False,
|
|
2968
|
+
'description': 'Whether students continue from their previous attempt (may be ignored by the API on import)'
|
|
2969
|
+
},
|
|
2921
2970
|
'cooling_period': {
|
|
2922
2971
|
'default': False,
|
|
2923
2972
|
'description': 'Whether to require a waiting period between attempts'
|
|
@@ -3426,7 +3475,8 @@ def edit_quiz_interactive(quiz, requester, html_mode=False):
|
|
|
3426
3475
|
message=body,
|
|
3427
3476
|
attributes=attributes,
|
|
3428
3477
|
schema=QUIZ_SCHEMA,
|
|
3429
|
-
content_type="Quiz"
|
|
3478
|
+
content_type="Quiz",
|
|
3479
|
+
content_attr='instructions'
|
|
3430
3480
|
)
|
|
3431
3481
|
|
|
3432
3482
|
if result is None:
|
|
@@ -6276,6 +6326,11 @@ The [[quiz_settings]] object within [[settings]] controls advanced quiz behavior
|
|
|
6276
6326
|
\item[[[session_time_limit_in_seconds]]] Time limit in seconds
|
|
6277
6327
|
\end{description}
|
|
6278
6328
|
|
|
6329
|
+
Some fields may be present in exports but ignored when creating a quiz via the
|
|
6330
|
+
API.
|
|
6331
|
+
In particular, see the note in \cref{sec:advanced-quiz-settings} about
|
|
6332
|
+
[[multiple_attempts.build_on_last_attempt]].
|
|
6333
|
+
|
|
6279
6334
|
<<constants>>=
|
|
6280
6335
|
EXAMPLE_FULL_NEW_QUIZ_JSON = {
|
|
6281
6336
|
"quiz_type": "new",
|
|
@@ -6319,6 +6374,7 @@ EXAMPLE_FULL_NEW_QUIZ_JSON = {
|
|
|
6319
6374
|
"attempt_limit": False,
|
|
6320
6375
|
"max_attempts": None,
|
|
6321
6376
|
"score_to_keep": "latest",
|
|
6377
|
+
"build_on_last_attempt": True,
|
|
6322
6378
|
"cooling_period": True,
|
|
6323
6379
|
"cooling_period_seconds": 3600
|
|
6324
6380
|
},
|
|
@@ -7898,4 +7954,3 @@ def get_bank_items(requester, bank_id):
|
|
|
7898
7954
|
# Item Banks API often isn't accessible - this is expected
|
|
7899
7955
|
return []
|
|
7900
7956
|
@
|
|
7901
|
-
|
|
@@ -569,33 +569,46 @@ The available summary functions and the default one can be found in
|
|
|
569
569
|
\subsection{Producing a list of missing assignments}
|
|
570
570
|
|
|
571
571
|
Now we want to look at the missing option.
|
|
572
|
-
If the user supplies this option, we want to produce a list of missing
|
|
573
|
-
assignments.
|
|
574
|
-
|
|
575
|
-
|
|
576
|
-
|
|
572
|
+
If the user supplies this option, we want to produce a list of missing
|
|
573
|
+
assignments instead of grades.
|
|
574
|
+
|
|
575
|
+
Previously, this option could take an optional module name, creating a confusing
|
|
576
|
+
interface where both [[--missing module]] and [[-S module]] could specify
|
|
577
|
+
modules.
|
|
578
|
+
We simplify this: [[--missing]] is now a simple flag, and the module specified
|
|
579
|
+
via [[-S]] provides the [[missing_assignments]] function if it has one.
|
|
580
|
+
If the [[-S]] module doesn't provide [[missing_assignments]], we fall back to
|
|
581
|
+
the default implementation in this module.
|
|
582
|
+
|
|
583
|
+
This design follows the principle that each grading module knows best what
|
|
584
|
+
\enquote{missing} means for its grading policy:
|
|
585
|
+
\begin{description}
|
|
586
|
+
\item[Conjunctive modules] (all must pass): Any assignment without a passing
|
|
587
|
+
grade is missing.
|
|
588
|
+
\item[Disjunctive modules] (at least one must pass): Assignments are only
|
|
589
|
+
\enquote{missing} if the student has NO passing grades at all.
|
|
590
|
+
\end{description}
|
|
577
591
|
<<add option for missing assignments>>=
|
|
578
|
-
<<define [[default_missing_module]]>>
|
|
579
592
|
results_parser.add_argument("--missing",
|
|
580
|
-
|
|
581
|
-
const=default_missing_module, default=None,
|
|
593
|
+
action="store_true",
|
|
582
594
|
help="Produce a list of missing assignments instead of grades. "
|
|
583
|
-
"
|
|
584
|
-
"
|
|
585
|
-
"function `missing_assignments(assignments_list, users_list). "
|
|
595
|
+
"Uses the summary module's missing_assignments() if available, "
|
|
596
|
+
"otherwise uses the default implementation. "
|
|
586
597
|
<<missing module behaviour>>
|
|
587
598
|
"This option only has effect when working with assignment groups.")
|
|
588
599
|
@
|
|
589
600
|
|
|
590
|
-
|
|
591
|
-
|
|
601
|
+
Since [[--missing]] is now a simple flag, we use the summary module (specified
|
|
602
|
+
via [[-S]]) to provide the [[missing_assignments]] function.
|
|
603
|
+
If the summary module doesn't have one, we fall back to this module's default.
|
|
592
604
|
<<load the correct missing module as [[missing]]>>=
|
|
593
605
|
if args.missing:
|
|
594
|
-
|
|
595
|
-
|
|
596
|
-
|
|
597
|
-
|
|
598
|
-
|
|
606
|
+
<<load the correct summary module as [[summary]]>>
|
|
607
|
+
if hasattr(summary, 'missing_assignments'):
|
|
608
|
+
missing = summary
|
|
609
|
+
else:
|
|
610
|
+
import canvaslms.cli.results
|
|
611
|
+
missing = canvaslms.cli.results
|
|
599
612
|
@
|
|
600
613
|
|
|
601
614
|
Now, to the main part of the problem.
|
|
@@ -624,8 +637,18 @@ for user, assignment, reason in missing_results:
|
|
|
624
637
|
\subsubsection{The default missing module}
|
|
625
638
|
|
|
626
639
|
We'll now cover a default function for the missing assignments.
|
|
627
|
-
We'll put it in the same module as the [[results]] CLI command, not in a
|
|
640
|
+
We'll put it in the same module as the [[results]] CLI command, not in a
|
|
628
641
|
separate module.
|
|
642
|
+
|
|
643
|
+
The function accepts an optional [[is_passing]] callback that determines whether
|
|
644
|
+
a grade counts as passing.
|
|
645
|
+
This allows grading modules to reuse this shared implementation while providing
|
|
646
|
+
their own definition of what \enquote{passing} means.
|
|
647
|
+
For example, [[conjunctavg]] considers only A--E and P as passing, while
|
|
648
|
+
[[conjunctavgsurvey]] also accepts numeric grades.
|
|
649
|
+
|
|
650
|
+
If no [[is_passing]] callback is provided, we fall back to regex matching
|
|
651
|
+
against [[passing_regex]].
|
|
629
652
|
<<functions>>=
|
|
630
653
|
def missing_assignments(assignments_list, users_list,
|
|
631
654
|
<<optional [[missing_assignments]] args>>):
|
|
@@ -638,8 +661,6 @@ def missing_assignments(assignments_list, users_list,
|
|
|
638
661
|
for assignment in assignments_list:
|
|
639
662
|
<<skip if [[assignment]] is optional>>
|
|
640
663
|
<<if [[assignment]] is missing for [[user]], yield it>>
|
|
641
|
-
<<define [[default_missing_module]]>>=
|
|
642
|
-
default_missing_module = "canvaslms.cli.results"
|
|
643
664
|
@
|
|
644
665
|
|
|
645
666
|
We'll add [[<<optional [[missing_assignments]] args>>]] to the function to make
|
|
@@ -663,10 +684,13 @@ We don't want to make it sound like an optional assignment is mandatory.
|
|
|
663
684
|
@
|
|
664
685
|
|
|
665
686
|
This gives us something like this.
|
|
687
|
+
We check if the grade passes using either the [[is_passing]] callback (if
|
|
688
|
+
provided) or the [[passing_regex]].
|
|
666
689
|
<<if [[assignment]] is missing for [[user]], yield it>>=
|
|
667
690
|
try:
|
|
668
691
|
submission = assignment.get_submission(user)
|
|
669
692
|
except canvasapi.exceptions.ResourceDoesNotExist:
|
|
693
|
+
yield user, assignment, "no submission exists"
|
|
670
694
|
continue
|
|
671
695
|
|
|
672
696
|
if submission is None:
|
|
@@ -677,45 +701,61 @@ elif submission.grade is None:
|
|
|
677
701
|
f"submitted on {canvaslms.cli.utils.format_local_time(submission.submitted_at)}, but not graded"
|
|
678
702
|
else:
|
|
679
703
|
yield user, assignment, "not done"
|
|
680
|
-
|
|
681
|
-
if
|
|
682
|
-
|
|
683
|
-
|
|
684
|
-
|
|
685
|
-
|
|
686
|
-
|
|
687
|
-
|
|
688
|
-
|
|
704
|
+
else:
|
|
705
|
+
<<check if grade passes using callback or regex>>
|
|
706
|
+
if not grade_passes:
|
|
707
|
+
if hasattr(submission, 'submitted_at') and submission.submitted_at and \
|
|
708
|
+
hasattr(submission, 'graded_at') and submission.graded_at and \
|
|
709
|
+
submission.submitted_at > submission.graded_at:
|
|
710
|
+
yield user, assignment, \
|
|
711
|
+
f"not a passing grade ({submission.grade}), resubmission not graded"
|
|
712
|
+
else:
|
|
713
|
+
yield user, assignment, \
|
|
714
|
+
f"not a passing grade ({submission.grade})"
|
|
715
|
+
<<check if grade passes using callback or regex>>=
|
|
716
|
+
if is_passing is not None:
|
|
717
|
+
grade_passes = is_passing(submission.grade)
|
|
718
|
+
else:
|
|
719
|
+
grade_passes = filter_grade(submission.grade, passing_regex)
|
|
689
720
|
@
|
|
690
721
|
|
|
691
|
-
Now, we need that [[passing_regex]], so we can add it to the optional
|
|
722
|
+
Now, we need that [[passing_regex]], so we can add it to the optional
|
|
692
723
|
arguments, with a default value (same as above).
|
|
693
724
|
We add the most common grading scales.
|
|
694
|
-
But we also add number scores, which can be used for mandatory surveys and the
|
|
725
|
+
But we also add number scores, which can be used for mandatory surveys and the
|
|
695
726
|
like.
|
|
696
727
|
<<optional [[missing_assignments]] args>>=
|
|
697
728
|
passing_regex=PASSING_REGEX,
|
|
698
729
|
@
|
|
699
730
|
|
|
700
|
-
Next, if we want to be able to skip optional assignments, we can add an
|
|
731
|
+
Next, if we want to be able to skip optional assignments, we can add an
|
|
701
732
|
optional argument for that.
|
|
702
733
|
<<optional [[missing_assignments]] args>>=
|
|
703
|
-
optional_assignments
|
|
734
|
+
optional_assignments=None,
|
|
735
|
+
@
|
|
736
|
+
|
|
737
|
+
Finally, we add the [[is_passing]] callback.
|
|
738
|
+
If provided, this function takes a grade and returns [[True]] if it's passing.
|
|
739
|
+
This lets grading modules define their own semantics for what constitutes a
|
|
740
|
+
passing grade.
|
|
741
|
+
<<optional [[missing_assignments]] args>>=
|
|
742
|
+
is_passing=None,
|
|
704
743
|
@
|
|
705
744
|
|
|
706
745
|
This allows us to make the call to the function as follows.
|
|
707
|
-
We check if it's the default function or not
|
|
708
|
-
arguments from the CLI
|
|
746
|
+
We check if it's the default function or not.
|
|
747
|
+
If it is, we pass additional arguments from the CLI.
|
|
748
|
+
If not, the module provides its own implementation with module-specific
|
|
749
|
+
semantics (which may or may not use these arguments).
|
|
750
|
+
|
|
751
|
+
We always pass [[passing_regex]] and [[optional_assignments]] since these are
|
|
752
|
+
CLI-level concerns that any module might want to honor.
|
|
709
753
|
<<let [[missing_results]] be the result of [[missing.missing_assignments]]>>=
|
|
710
|
-
|
|
711
|
-
|
|
712
|
-
|
|
713
|
-
|
|
714
|
-
|
|
715
|
-
)
|
|
716
|
-
else:
|
|
717
|
-
missing_results = missing.missing_assignments(
|
|
718
|
-
assignments_list, users_list)
|
|
754
|
+
missing_results = missing.missing_assignments(
|
|
755
|
+
assignments_list, users_list,
|
|
756
|
+
passing_regex=args.filter_grades,
|
|
757
|
+
optional_assignments=args.optional_assignments
|
|
758
|
+
)
|
|
719
759
|
@
|
|
720
760
|
|
|
721
761
|
All that is missing now is the optional assignments argument for the parser.
|
|
@@ -746,6 +746,47 @@ def test_list_users_sets_course_attribute(
|
|
|
746
746
|
assert result[0].course == mock_course
|
|
747
747
|
@
|
|
748
748
|
|
|
749
|
+
\subsubsection{Testing [[process_user_option]] with role filtering}
|
|
750
|
+
|
|
751
|
+
This is a regression test for a bug where [[args.role]] (a string) was passed
|
|
752
|
+
directly to [[filter_users]], causing character-by-character iteration that
|
|
753
|
+
matched all enrollment types.
|
|
754
|
+
|
|
755
|
+
We need to verify that [[process_user_option]] correctly converts the single
|
|
756
|
+
role string from argparse into a list before passing it to [[filter_users]].
|
|
757
|
+
<<test functions>>=
|
|
758
|
+
def test_process_user_option_filters_by_role(
|
|
759
|
+
mock_course, mock_student, mock_ta
|
|
760
|
+
):
|
|
761
|
+
"""Test that process_user_option correctly filters users by role"""
|
|
762
|
+
# Setup mock course and canvas
|
|
763
|
+
mock_course.get_users = Mock(return_value=[mock_student, mock_ta])
|
|
764
|
+
mock_canvas = Mock()
|
|
765
|
+
mock_canvas.get_courses = Mock(return_value=[mock_course])
|
|
766
|
+
|
|
767
|
+
# Setup args with role="student" (as a string, like argparse provides)
|
|
768
|
+
mock_args = Mock()
|
|
769
|
+
mock_args.course = ".*"
|
|
770
|
+
mock_args.user = ".*"
|
|
771
|
+
mock_args.role = "student" # Single string, not a list
|
|
772
|
+
|
|
773
|
+
# Call process_user_option
|
|
774
|
+
result = users_module.process_user_option(mock_canvas, mock_args)
|
|
775
|
+
|
|
776
|
+
# Verify only student is returned, not TA
|
|
777
|
+
assert len(result) == 1
|
|
778
|
+
assert result[0].id == 100 # Alice Student
|
|
779
|
+
|
|
780
|
+
# Now test with TA role
|
|
781
|
+
mock_course.get_users = Mock(return_value=[mock_student, mock_ta])
|
|
782
|
+
mock_args.role = "ta"
|
|
783
|
+
result = users_module.process_user_option(mock_canvas, mock_args)
|
|
784
|
+
|
|
785
|
+
# Verify only TA is returned
|
|
786
|
+
assert len(result) == 1
|
|
787
|
+
assert result[0].id == 200 # Bob TA
|
|
788
|
+
@
|
|
789
|
+
|
|
749
790
|
Second, we provide the most general function, [[filter_users]], which takes a
|
|
750
791
|
list of courses, a list of Canvas roles and a regex as arguments.
|
|
751
792
|
It returns the matching users.
|
|
@@ -934,13 +975,35 @@ When processing this option, we need to filter by course first, so we use the
|
|
|
934
975
|
processing from the [[courses]] module to get the list of courses matching the
|
|
935
976
|
courses options.
|
|
936
977
|
Then we simply filter all users.
|
|
978
|
+
|
|
979
|
+
\subsubsection{Converting role string to list}
|
|
980
|
+
|
|
981
|
+
The [[--role]] option (defined in [[add_user_roles_option]]) accepts a single
|
|
982
|
+
role string like [["student"]] or [["ta"]] via argparse.
|
|
983
|
+
However, [[filter_users]] and [[list_users]] expect a list of role names,
|
|
984
|
+
since they need to iterate over roles when checking if any role matches a
|
|
985
|
+
user's enrollments.
|
|
986
|
+
|
|
987
|
+
If we mistakenly pass a string directly to [[filter_users]], Python will
|
|
988
|
+
iterate over individual characters (for example, [["student"]] becomes
|
|
989
|
+
[['s']], [['t']], [['u']], [['d']], [['e']], [['n']], [['t']]) when checking
|
|
990
|
+
roles.
|
|
991
|
+
This causes all enrollment types to match, since characters like [['t']],
|
|
992
|
+
[['a']], [['e']], [['n']] appear in [["StudentEnrollment"]],
|
|
993
|
+
[["TaEnrollment"]], [["TeacherEnrollment"]], and so on.
|
|
994
|
+
|
|
995
|
+
Therefore, we must convert [[args.role]] to a single-element list before
|
|
996
|
+
passing it to [[filter_users]].
|
|
937
997
|
<<functions>>=
|
|
938
998
|
def process_user_option(canvas, args):
|
|
939
999
|
"""Processes the user option from command line, returns a list of users"""
|
|
1000
|
+
# args.role is a single string (e.g., "student"), but filter_users expects
|
|
1001
|
+
# a list of role names. Convert it to a single-element list.
|
|
1002
|
+
roles_list = [args.role] if args.role else []
|
|
940
1003
|
user_list = list(filter_users(
|
|
941
1004
|
courses.process_course_option(canvas, args),
|
|
942
1005
|
args.user,
|
|
943
|
-
roles=
|
|
1006
|
+
roles=roles_list))
|
|
944
1007
|
if not user_list:
|
|
945
1008
|
raise canvaslms.cli.EmptyListError("No users found matching the criteria")
|
|
946
1009
|
return user_list
|
|
@@ -133,7 +133,7 @@ graders += results.all_graders(submission)
|
|
|
133
133
|
|
|
134
134
|
\subsection{Computing averages}
|
|
135
135
|
|
|
136
|
-
To compute the average for the A--E grades; we will convert the grades into
|
|
136
|
+
To compute the average for the A--E grades; we will convert the grades into
|
|
137
137
|
integers, compute the average, round the value to an integer and convert back.
|
|
138
138
|
<<helper functions>>=
|
|
139
139
|
def a2e_average(grades):
|
|
@@ -153,3 +153,52 @@ def int_to_grade(int_grade):
|
|
|
153
153
|
return grade_map_inv[int_grade]
|
|
154
154
|
@
|
|
155
155
|
|
|
156
|
+
|
|
157
|
+
\subsection{Finding missing assignments}
|
|
158
|
+
|
|
159
|
+
For conjunctive average grading, a student is \enquote{missing} any assignment
|
|
160
|
+
that doesn't have a passing grade.
|
|
161
|
+
Since ALL assignments must pass to get a grade, each individual failure
|
|
162
|
+
prevents the student from completing the group.
|
|
163
|
+
|
|
164
|
+
This is in contrast to disjunctive grading (see [[disjunctmax]]), where
|
|
165
|
+
a student only needs ONE passing grade---there, we wouldn't report
|
|
166
|
+
individual failing assignments as \enquote{missing} if the student already
|
|
167
|
+
passed via another assignment.
|
|
168
|
+
|
|
169
|
+
We define what counts as a passing grade for this module.
|
|
170
|
+
A grade is passing if it's one of A--E, P, or \enquote{complete}.
|
|
171
|
+
<<helper functions>>=
|
|
172
|
+
def is_passing_grade(grade):
|
|
173
|
+
"""
|
|
174
|
+
Returns True if grade is passing for conjunctive A-E grading.
|
|
175
|
+
"""
|
|
176
|
+
if grade is None:
|
|
177
|
+
return False
|
|
178
|
+
if grade in ["A", "B", "C", "D", "E", "P"]:
|
|
179
|
+
return True
|
|
180
|
+
if isinstance(grade, str) and grade.casefold() == "complete":
|
|
181
|
+
return True
|
|
182
|
+
return False
|
|
183
|
+
@
|
|
184
|
+
|
|
185
|
+
We reuse the shared [[missing_assignments]] implementation from
|
|
186
|
+
[[canvaslms.cli.results]], providing our module-specific [[is_passing_grade]]
|
|
187
|
+
function as a callback.
|
|
188
|
+
<<helper functions>>=
|
|
189
|
+
def missing_assignments(assignments_list, users_list,
|
|
190
|
+
passing_regex=None,
|
|
191
|
+
optional_assignments=None):
|
|
192
|
+
"""
|
|
193
|
+
Returns missing assignments for conjunctive average grading.
|
|
194
|
+
|
|
195
|
+
Any assignment without a passing grade (A-E, P, or complete) is missing.
|
|
196
|
+
"""
|
|
197
|
+
from canvaslms.cli import results
|
|
198
|
+
return results.missing_assignments(
|
|
199
|
+
assignments_list, users_list,
|
|
200
|
+
optional_assignments=optional_assignments,
|
|
201
|
+
is_passing=is_passing_grade
|
|
202
|
+
)
|
|
203
|
+
@
|
|
204
|
+
|
|
@@ -126,3 +126,53 @@ For who graded, we simply extract the list of graders from the submissions.
|
|
|
126
126
|
graders += results.all_graders(submission)
|
|
127
127
|
@
|
|
128
128
|
|
|
129
|
+
|
|
130
|
+
\subsection{Finding missing assignments}
|
|
131
|
+
|
|
132
|
+
For conjunctive average with surveys, the definition of \enquote{passing} is
|
|
133
|
+
broader than plain [[conjunctavg]]: numeric grades (points, percentages) also
|
|
134
|
+
count as passing.
|
|
135
|
+
This accommodates mandatory surveys that aren't graded A--F but still need to
|
|
136
|
+
be completed.
|
|
137
|
+
|
|
138
|
+
We define [[is_passing_grade]] to accept A--E, P, complete, and any numeric
|
|
139
|
+
value.
|
|
140
|
+
<<helper functions>>=
|
|
141
|
+
def is_passing_grade(grade):
|
|
142
|
+
"""
|
|
143
|
+
Returns True if grade is passing (includes numeric grades).
|
|
144
|
+
"""
|
|
145
|
+
if grade is None:
|
|
146
|
+
return False
|
|
147
|
+
if grade in ["A", "B", "C", "D", "E", "P"]:
|
|
148
|
+
return True
|
|
149
|
+
if isinstance(grade, str):
|
|
150
|
+
if grade.casefold() == "complete":
|
|
151
|
+
return True
|
|
152
|
+
# Numeric grades (points, percentages) count as passing
|
|
153
|
+
if (grade.isdigit()
|
|
154
|
+
or grade.replace('.', '', 1).isdigit()
|
|
155
|
+
or grade.replace('%', '', 1).isdigit()):
|
|
156
|
+
return True
|
|
157
|
+
return False
|
|
158
|
+
@
|
|
159
|
+
|
|
160
|
+
We reuse the shared implementation with our broader [[is_passing_grade]].
|
|
161
|
+
<<helper functions>>=
|
|
162
|
+
def missing_assignments(assignments_list, users_list,
|
|
163
|
+
passing_regex=None,
|
|
164
|
+
optional_assignments=None):
|
|
165
|
+
"""
|
|
166
|
+
Returns missing assignments for conjunctive average with surveys.
|
|
167
|
+
|
|
168
|
+
Any assignment without a passing grade (A-E, P, complete, or numeric) is
|
|
169
|
+
missing.
|
|
170
|
+
"""
|
|
171
|
+
from canvaslms.cli import results
|
|
172
|
+
return results.missing_assignments(
|
|
173
|
+
assignments_list, users_list,
|
|
174
|
+
optional_assignments=optional_assignments,
|
|
175
|
+
is_passing=is_passing_grade
|
|
176
|
+
)
|
|
177
|
+
@
|
|
178
|
+
|