canvaslms 5.9__py3-none-any.whl → 5.10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- canvaslms/cli/__init__.py +10 -2
- canvaslms/cli/cli.nw +25 -2
- canvaslms/cli/quizzes.nw +58 -4
- canvaslms/cli/quizzes.py +24 -1
- canvaslms/cli/results.nw +85 -45
- canvaslms/cli/results.py +50 -44
- canvaslms/cli/users.nw +64 -1
- canvaslms/cli/users.py +4 -1
- canvaslms/grades/__init__.py +14 -0
- canvaslms/grades/conjunctavg.nw +50 -1
- canvaslms/grades/conjunctavg.py +31 -0
- canvaslms/grades/conjunctavgsurvey.nw +50 -0
- canvaslms/grades/conjunctavgsurvey.py +40 -0
- canvaslms/grades/disjunctmax.nw +59 -0
- canvaslms/grades/disjunctmax.py +38 -0
- canvaslms/grades/grades.nw +17 -3
- canvaslms/grades/maxgradesurvey.nw +58 -0
- canvaslms/grades/maxgradesurvey.py +41 -0
- canvaslms/grades/participation.nw +27 -0
- canvaslms/grades/participation.py +18 -0
- canvaslms/grades/tilkryLAB1.nw +47 -0
- canvaslms/grades/tilkryLAB1.py +36 -0
- {canvaslms-5.9.dist-info → canvaslms-5.10.dist-info}/METADATA +1 -1
- {canvaslms-5.9.dist-info → canvaslms-5.10.dist-info}/RECORD +27 -27
- {canvaslms-5.9.dist-info → canvaslms-5.10.dist-info}/WHEEL +0 -0
- {canvaslms-5.9.dist-info → canvaslms-5.10.dist-info}/entry_points.txt +0 -0
- {canvaslms-5.9.dist-info → canvaslms-5.10.dist-info}/licenses/LICENSE +0 -0
canvaslms/cli/__init__.py
CHANGED
|
@@ -118,6 +118,11 @@ def main():
|
|
|
118
118
|
default=0,
|
|
119
119
|
help="Increase verbosity: -v=INFO, -vv=DEBUG, -vvv=all library debug",
|
|
120
120
|
)
|
|
121
|
+
argp.add_argument(
|
|
122
|
+
"--no-cache",
|
|
123
|
+
action="store_true",
|
|
124
|
+
help="Do not read or write the persistent Canvas object cache",
|
|
125
|
+
)
|
|
121
126
|
canvaslms.cli.login.add_command(subp)
|
|
122
127
|
canvaslms.cli.courses.add_command(subp)
|
|
123
128
|
canvaslms.cli.modules.add_command(subp)
|
|
@@ -177,7 +182,10 @@ def main():
|
|
|
177
182
|
if "://" not in hostname:
|
|
178
183
|
hostname = f"https://{hostname}"
|
|
179
184
|
|
|
180
|
-
|
|
185
|
+
if args.no_cache:
|
|
186
|
+
canvas = None
|
|
187
|
+
else:
|
|
188
|
+
canvas = canvaslms.cli.cache.load_canvas_cache(token, hostname)
|
|
181
189
|
|
|
182
190
|
if not canvas:
|
|
183
191
|
canvas = Canvas(hostname, token)
|
|
@@ -206,7 +214,7 @@ def main():
|
|
|
206
214
|
if args.func:
|
|
207
215
|
try:
|
|
208
216
|
args.func(config, canvas, args)
|
|
209
|
-
if canvas:
|
|
217
|
+
if canvas and not args.no_cache:
|
|
210
218
|
canvaslms.cli.cache.save_canvas_cache(canvas, token, hostname)
|
|
211
219
|
except EmptyListError as e:
|
|
212
220
|
if args.quiet == 0:
|
canvaslms/cli/cli.nw
CHANGED
|
@@ -356,6 +356,19 @@ argp.add_argument("-v", "--verbose",
|
|
|
356
356
|
@
|
|
357
357
|
|
|
358
358
|
|
|
359
|
+
\subsection{Cache control}
|
|
360
|
+
|
|
361
|
+
Most commands speed up by caching the Canvas API object between runs.
|
|
362
|
+
Occasionally you want to bypass the cache for a single command,
|
|
363
|
+
for example right after creating or updating objects.
|
|
364
|
+
|
|
365
|
+
<<add global options to argp>>=
|
|
366
|
+
argp.add_argument("--no-cache",
|
|
367
|
+
action="store_true",
|
|
368
|
+
help="Do not read or write the persistent Canvas object cache")
|
|
369
|
+
@
|
|
370
|
+
|
|
371
|
+
|
|
359
372
|
\subsection{Logging in and setting up Canvas}
|
|
360
373
|
|
|
361
374
|
Each subcommand will have its own module in the package.
|
|
@@ -445,15 +458,25 @@ Before creating a new Canvas object from scratch, we try to load it from the
|
|
|
445
458
|
persistent cache.
|
|
446
459
|
This can significantly speed up commands by reusing previously fetched course
|
|
447
460
|
data, assignments, users, and submissions.
|
|
461
|
+
If the cached object becomes stale (for example, right after creating a quiz),
|
|
462
|
+
you can bypass the cache with [[--no-cache]].
|
|
463
|
+
|
|
464
|
+
For example:
|
|
465
|
+
\begin{verbatim}
|
|
466
|
+
canvaslms --no-cache quizzes export -c "My Course" -a "My Quiz"
|
|
467
|
+
\end{verbatim}
|
|
448
468
|
<<try to load canvas from cache>>=
|
|
449
|
-
|
|
469
|
+
if args.no_cache:
|
|
470
|
+
canvas = None
|
|
471
|
+
else:
|
|
472
|
+
canvas = canvaslms.cli.cache.load_canvas_cache(token, hostname)
|
|
450
473
|
@
|
|
451
474
|
|
|
452
475
|
After successfully executing a command, we save the Canvas object to the cache
|
|
453
476
|
for future use.
|
|
454
477
|
We only save if the canvas object was actually used (not [[None]]).
|
|
455
478
|
<<save canvas to cache after command>>=
|
|
456
|
-
if canvas:
|
|
479
|
+
if canvas and not args.no_cache:
|
|
457
480
|
canvaslms.cli.cache.save_canvas_cache(canvas, token, hostname)
|
|
458
481
|
@
|
|
459
482
|
|
canvaslms/cli/quizzes.nw
CHANGED
|
@@ -95,8 +95,13 @@ Here, [[attempt_limit]] of [[null]] means unlimited attempts. The
|
|
|
95
95
|
[[score_to_keep]] can be [[highest]] (default) or [[latest]]. Setting
|
|
96
96
|
[[cooling_period]] to [[true]] requires [[cooling_period_seconds]] to specify
|
|
97
97
|
the wait time (3600 seconds = 1 hour). Using [[latest]] with a cooling period
|
|
98
|
-
|
|
99
|
-
|
|
98
|
+
supports a workflow where students can keep trying and learn from their
|
|
99
|
+
previous attempt.
|
|
100
|
+
|
|
101
|
+
Note that some New Quizzes settings are not reliably settable via the API.
|
|
102
|
+
In particular, in our testing (KTH Canvas, February 2026),
|
|
103
|
+
[[multiple_attempts.build_on_last_attempt]] appears to be ignored on quiz
|
|
104
|
+
creation/import via the New Quizzes API.
|
|
100
105
|
|
|
101
106
|
\paragraph{Controlling what students see after submission.}
|
|
102
107
|
To show students their score but hide the correct answers:
|
|
@@ -2574,6 +2579,9 @@ def create_command(config, canvas, args):
|
|
|
2574
2579
|
if args.title:
|
|
2575
2580
|
quiz_params['title'] = args.title
|
|
2576
2581
|
|
|
2582
|
+
if quiz_type == "new":
|
|
2583
|
+
<<warn about ignored New Quiz settings>>
|
|
2584
|
+
|
|
2577
2585
|
if 'title' not in quiz_params:
|
|
2578
2586
|
canvaslms.cli.err(1, "Quiz title is required (use --title or include in JSON)")
|
|
2579
2587
|
|
|
@@ -2622,6 +2630,41 @@ to flatten these into the format the API expects:
|
|
|
2622
2630
|
quiz[quiz_settings][multiple_attempts][cooling_period_seconds]=3600
|
|
2623
2631
|
\end{verbatim}
|
|
2624
2632
|
|
|
2633
|
+
\paragraph{Some settings cannot be set via the API.}
|
|
2634
|
+
Canvas does not reliably apply every [[quiz_settings]] field during quiz
|
|
2635
|
+
creation.
|
|
2636
|
+
|
|
2637
|
+
In our testing (KTH Canvas, February 2026),
|
|
2638
|
+
[[multiple_attempts.build_on_last_attempt]] appears to be \emph{read-only via the
|
|
2639
|
+
New Quizzes API}: it shows up in exports when set in the Canvas web UI, but both
|
|
2640
|
+
[[POST]] (create) and [[PATCH]] (update) silently drop the field.
|
|
2641
|
+
|
|
2642
|
+
The official New Quizzes API documentation does not list
|
|
2643
|
+
[[multiple_attempts.build_on_last_attempt]] as a supported field.
|
|
2644
|
+
|
|
2645
|
+
This means the export/create workflow cannot round-trip that setting purely via
|
|
2646
|
+
API calls. We keep the schema and examples so the exported JSON is faithful,
|
|
2647
|
+
but users should expect that field to be ignored on import unless they set it
|
|
2648
|
+
manually in the UI.
|
|
2649
|
+
|
|
2650
|
+
When [[quizzes create]] sees this field in input JSON, we emit a warning to make
|
|
2651
|
+
the limitation explicit.
|
|
2652
|
+
|
|
2653
|
+
<<warn about ignored New Quiz settings>>=
|
|
2654
|
+
try:
|
|
2655
|
+
build_on_last_attempt = (quiz_params
|
|
2656
|
+
.get('quiz_settings', {})
|
|
2657
|
+
.get('multiple_attempts', {})
|
|
2658
|
+
.get('build_on_last_attempt', None))
|
|
2659
|
+
except Exception:
|
|
2660
|
+
build_on_last_attempt = None
|
|
2661
|
+
|
|
2662
|
+
if build_on_last_attempt is not None:
|
|
2663
|
+
canvaslms.cli.warn(
|
|
2664
|
+
"New Quizzes: quiz_settings.multiple_attempts.build_on_last_attempt "
|
|
2665
|
+
"is ignored by the API on some Canvas instances; set it in the web UI")
|
|
2666
|
+
@
|
|
2667
|
+
|
|
2625
2668
|
<<functions>>=
|
|
2626
2669
|
def create_new_quiz(course, requester, quiz_params):
|
|
2627
2670
|
"""Creates a New Quiz via the New Quizzes API
|
|
@@ -2646,7 +2689,9 @@ def create_new_quiz(course, requester, quiz_params):
|
|
|
2646
2689
|
_url="new_quizzes",
|
|
2647
2690
|
**params
|
|
2648
2691
|
)
|
|
2649
|
-
|
|
2692
|
+
data = response.json()
|
|
2693
|
+
|
|
2694
|
+
return data
|
|
2650
2695
|
except Exception as e:
|
|
2651
2696
|
canvaslms.cli.warn(f"Failed to create New Quiz: {e}")
|
|
2652
2697
|
return None
|
|
@@ -2918,6 +2963,10 @@ NEW_QUIZ_MULTIPLE_ATTEMPTS_SCHEMA = {
|
|
|
2918
2963
|
'default': 'highest',
|
|
2919
2964
|
'description': 'Which score to keep: average, first, highest, or latest'
|
|
2920
2965
|
},
|
|
2966
|
+
'build_on_last_attempt': {
|
|
2967
|
+
'default': False,
|
|
2968
|
+
'description': 'Whether students continue from their previous attempt (may be ignored by the API on import)'
|
|
2969
|
+
},
|
|
2921
2970
|
'cooling_period': {
|
|
2922
2971
|
'default': False,
|
|
2923
2972
|
'description': 'Whether to require a waiting period between attempts'
|
|
@@ -6277,6 +6326,11 @@ The [[quiz_settings]] object within [[settings]] controls advanced quiz behavior
|
|
|
6277
6326
|
\item[[[session_time_limit_in_seconds]]] Time limit in seconds
|
|
6278
6327
|
\end{description}
|
|
6279
6328
|
|
|
6329
|
+
Some fields may be present in exports but ignored when creating a quiz via the
|
|
6330
|
+
API.
|
|
6331
|
+
In particular, see the note in \cref{sec:advanced-quiz-settings} about
|
|
6332
|
+
[[multiple_attempts.build_on_last_attempt]].
|
|
6333
|
+
|
|
6280
6334
|
<<constants>>=
|
|
6281
6335
|
EXAMPLE_FULL_NEW_QUIZ_JSON = {
|
|
6282
6336
|
"quiz_type": "new",
|
|
@@ -6320,6 +6374,7 @@ EXAMPLE_FULL_NEW_QUIZ_JSON = {
|
|
|
6320
6374
|
"attempt_limit": False,
|
|
6321
6375
|
"max_attempts": None,
|
|
6322
6376
|
"score_to_keep": "latest",
|
|
6377
|
+
"build_on_last_attempt": True,
|
|
6323
6378
|
"cooling_period": True,
|
|
6324
6379
|
"cooling_period_seconds": 3600
|
|
6325
6380
|
},
|
|
@@ -7899,4 +7954,3 @@ def get_bank_items(requester, bank_id):
|
|
|
7899
7954
|
# Item Banks API often isn't accessible - this is expected
|
|
7900
7955
|
return []
|
|
7901
7956
|
@
|
|
7902
|
-
|
canvaslms/cli/quizzes.py
CHANGED
|
@@ -143,6 +143,10 @@ NEW_QUIZ_MULTIPLE_ATTEMPTS_SCHEMA = {
|
|
|
143
143
|
"default": "highest",
|
|
144
144
|
"description": "Which score to keep: average, first, highest, or latest",
|
|
145
145
|
},
|
|
146
|
+
"build_on_last_attempt": {
|
|
147
|
+
"default": False,
|
|
148
|
+
"description": "Whether students continue from their previous attempt (may be ignored by the API on import)",
|
|
149
|
+
},
|
|
146
150
|
"cooling_period": {
|
|
147
151
|
"default": False,
|
|
148
152
|
"description": "Whether to require a waiting period between attempts",
|
|
@@ -645,6 +649,7 @@ EXAMPLE_FULL_NEW_QUIZ_JSON = {
|
|
|
645
649
|
"attempt_limit": False,
|
|
646
650
|
"max_attempts": None,
|
|
647
651
|
"score_to_keep": "latest",
|
|
652
|
+
"build_on_last_attempt": True,
|
|
648
653
|
"cooling_period": True,
|
|
649
654
|
"cooling_period_seconds": 3600,
|
|
650
655
|
},
|
|
@@ -2105,6 +2110,22 @@ def create_command(config, canvas, args):
|
|
|
2105
2110
|
if args.title:
|
|
2106
2111
|
quiz_params["title"] = args.title
|
|
2107
2112
|
|
|
2113
|
+
if quiz_type == "new":
|
|
2114
|
+
try:
|
|
2115
|
+
build_on_last_attempt = (
|
|
2116
|
+
quiz_params.get("quiz_settings", {})
|
|
2117
|
+
.get("multiple_attempts", {})
|
|
2118
|
+
.get("build_on_last_attempt", None)
|
|
2119
|
+
)
|
|
2120
|
+
except Exception:
|
|
2121
|
+
build_on_last_attempt = None
|
|
2122
|
+
|
|
2123
|
+
if build_on_last_attempt is not None:
|
|
2124
|
+
canvaslms.cli.warn(
|
|
2125
|
+
"New Quizzes: quiz_settings.multiple_attempts.build_on_last_attempt "
|
|
2126
|
+
"is ignored by the API on some Canvas instances; set it in the web UI"
|
|
2127
|
+
)
|
|
2128
|
+
|
|
2108
2129
|
if "title" not in quiz_params:
|
|
2109
2130
|
canvaslms.cli.err(1, "Quiz title is required (use --title or include in JSON)")
|
|
2110
2131
|
|
|
@@ -2160,7 +2181,9 @@ def create_new_quiz(course, requester, quiz_params):
|
|
|
2160
2181
|
response = requester.request(
|
|
2161
2182
|
method="POST", endpoint=endpoint, _url="new_quizzes", **params
|
|
2162
2183
|
)
|
|
2163
|
-
|
|
2184
|
+
data = response.json()
|
|
2185
|
+
|
|
2186
|
+
return data
|
|
2164
2187
|
except Exception as e:
|
|
2165
2188
|
canvaslms.cli.warn(f"Failed to create New Quiz: {e}")
|
|
2166
2189
|
return None
|
canvaslms/cli/results.nw
CHANGED
|
@@ -569,33 +569,46 @@ The available summary functions and the default one can be found in
|
|
|
569
569
|
\subsection{Producing a list of missing assignments}
|
|
570
570
|
|
|
571
571
|
Now we want to look at the missing option.
|
|
572
|
-
If the user supplies this option, we want to produce a list of missing
|
|
573
|
-
assignments.
|
|
574
|
-
|
|
575
|
-
|
|
576
|
-
|
|
572
|
+
If the user supplies this option, we want to produce a list of missing
|
|
573
|
+
assignments instead of grades.
|
|
574
|
+
|
|
575
|
+
Previously, this option could take an optional module name, creating a confusing
|
|
576
|
+
interface where both [[--missing module]] and [[-S module]] could specify
|
|
577
|
+
modules.
|
|
578
|
+
We simplify this: [[--missing]] is now a simple flag, and the module specified
|
|
579
|
+
via [[-S]] provides the [[missing_assignments]] function if it has one.
|
|
580
|
+
If the [[-S]] module doesn't provide [[missing_assignments]], we fall back to
|
|
581
|
+
the default implementation in this module.
|
|
582
|
+
|
|
583
|
+
This design follows the principle that each grading module knows best what
|
|
584
|
+
\enquote{missing} means for its grading policy:
|
|
585
|
+
\begin{description}
|
|
586
|
+
\item[Conjunctive modules] (all must pass): Any assignment without a passing
|
|
587
|
+
grade is missing.
|
|
588
|
+
\item[Disjunctive modules] (at least one must pass): Assignments are only
|
|
589
|
+
\enquote{missing} if the student has NO passing grades at all.
|
|
590
|
+
\end{description}
|
|
577
591
|
<<add option for missing assignments>>=
|
|
578
|
-
<<define [[default_missing_module]]>>
|
|
579
592
|
results_parser.add_argument("--missing",
|
|
580
|
-
|
|
581
|
-
const=default_missing_module, default=None,
|
|
593
|
+
action="store_true",
|
|
582
594
|
help="Produce a list of missing assignments instead of grades. "
|
|
583
|
-
"
|
|
584
|
-
"
|
|
585
|
-
"function `missing_assignments(assignments_list, users_list). "
|
|
595
|
+
"Uses the summary module's missing_assignments() if available, "
|
|
596
|
+
"otherwise uses the default implementation. "
|
|
586
597
|
<<missing module behaviour>>
|
|
587
598
|
"This option only has effect when working with assignment groups.")
|
|
588
599
|
@
|
|
589
600
|
|
|
590
|
-
|
|
591
|
-
|
|
601
|
+
Since [[--missing]] is now a simple flag, we use the summary module (specified
|
|
602
|
+
via [[-S]]) to provide the [[missing_assignments]] function.
|
|
603
|
+
If the summary module doesn't have one, we fall back to this module's default.
|
|
592
604
|
<<load the correct missing module as [[missing]]>>=
|
|
593
605
|
if args.missing:
|
|
594
|
-
|
|
595
|
-
|
|
596
|
-
|
|
597
|
-
|
|
598
|
-
|
|
606
|
+
<<load the correct summary module as [[summary]]>>
|
|
607
|
+
if hasattr(summary, 'missing_assignments'):
|
|
608
|
+
missing = summary
|
|
609
|
+
else:
|
|
610
|
+
import canvaslms.cli.results
|
|
611
|
+
missing = canvaslms.cli.results
|
|
599
612
|
@
|
|
600
613
|
|
|
601
614
|
Now, to the main part of the problem.
|
|
@@ -624,8 +637,18 @@ for user, assignment, reason in missing_results:
|
|
|
624
637
|
\subsubsection{The default missing module}
|
|
625
638
|
|
|
626
639
|
We'll now cover a default function for the missing assignments.
|
|
627
|
-
We'll put it in the same module as the [[results]] CLI command, not in a
|
|
640
|
+
We'll put it in the same module as the [[results]] CLI command, not in a
|
|
628
641
|
separate module.
|
|
642
|
+
|
|
643
|
+
The function accepts an optional [[is_passing]] callback that determines whether
|
|
644
|
+
a grade counts as passing.
|
|
645
|
+
This allows grading modules to reuse this shared implementation while providing
|
|
646
|
+
their own definition of what \enquote{passing} means.
|
|
647
|
+
For example, [[conjunctavg]] considers only A--E and P as passing, while
|
|
648
|
+
[[conjunctavgsurvey]] also accepts numeric grades.
|
|
649
|
+
|
|
650
|
+
If no [[is_passing]] callback is provided, we fall back to regex matching
|
|
651
|
+
against [[passing_regex]].
|
|
629
652
|
<<functions>>=
|
|
630
653
|
def missing_assignments(assignments_list, users_list,
|
|
631
654
|
<<optional [[missing_assignments]] args>>):
|
|
@@ -638,8 +661,6 @@ def missing_assignments(assignments_list, users_list,
|
|
|
638
661
|
for assignment in assignments_list:
|
|
639
662
|
<<skip if [[assignment]] is optional>>
|
|
640
663
|
<<if [[assignment]] is missing for [[user]], yield it>>
|
|
641
|
-
<<define [[default_missing_module]]>>=
|
|
642
|
-
default_missing_module = "canvaslms.cli.results"
|
|
643
664
|
@
|
|
644
665
|
|
|
645
666
|
We'll add [[<<optional [[missing_assignments]] args>>]] to the function to make
|
|
@@ -663,10 +684,13 @@ We don't want to make it sound like an optional assignment is mandatory.
|
|
|
663
684
|
@
|
|
664
685
|
|
|
665
686
|
This gives us something like this.
|
|
687
|
+
We check if the grade passes using either the [[is_passing]] callback (if
|
|
688
|
+
provided) or the [[passing_regex]].
|
|
666
689
|
<<if [[assignment]] is missing for [[user]], yield it>>=
|
|
667
690
|
try:
|
|
668
691
|
submission = assignment.get_submission(user)
|
|
669
692
|
except canvasapi.exceptions.ResourceDoesNotExist:
|
|
693
|
+
yield user, assignment, "no submission exists"
|
|
670
694
|
continue
|
|
671
695
|
|
|
672
696
|
if submission is None:
|
|
@@ -677,45 +701,61 @@ elif submission.grade is None:
|
|
|
677
701
|
f"submitted on {canvaslms.cli.utils.format_local_time(submission.submitted_at)}, but not graded"
|
|
678
702
|
else:
|
|
679
703
|
yield user, assignment, "not done"
|
|
680
|
-
|
|
681
|
-
if
|
|
682
|
-
|
|
683
|
-
|
|
684
|
-
|
|
685
|
-
|
|
686
|
-
|
|
687
|
-
|
|
688
|
-
|
|
704
|
+
else:
|
|
705
|
+
<<check if grade passes using callback or regex>>
|
|
706
|
+
if not grade_passes:
|
|
707
|
+
if hasattr(submission, 'submitted_at') and submission.submitted_at and \
|
|
708
|
+
hasattr(submission, 'graded_at') and submission.graded_at and \
|
|
709
|
+
submission.submitted_at > submission.graded_at:
|
|
710
|
+
yield user, assignment, \
|
|
711
|
+
f"not a passing grade ({submission.grade}), resubmission not graded"
|
|
712
|
+
else:
|
|
713
|
+
yield user, assignment, \
|
|
714
|
+
f"not a passing grade ({submission.grade})"
|
|
715
|
+
<<check if grade passes using callback or regex>>=
|
|
716
|
+
if is_passing is not None:
|
|
717
|
+
grade_passes = is_passing(submission.grade)
|
|
718
|
+
else:
|
|
719
|
+
grade_passes = filter_grade(submission.grade, passing_regex)
|
|
689
720
|
@
|
|
690
721
|
|
|
691
|
-
Now, we need that [[passing_regex]], so we can add it to the optional
|
|
722
|
+
Now, we need that [[passing_regex]], so we can add it to the optional
|
|
692
723
|
arguments, with a default value (same as above).
|
|
693
724
|
We add the most common grading scales.
|
|
694
|
-
But we also add number scores, which can be used for mandatory surveys and the
|
|
725
|
+
But we also add number scores, which can be used for mandatory surveys and the
|
|
695
726
|
like.
|
|
696
727
|
<<optional [[missing_assignments]] args>>=
|
|
697
728
|
passing_regex=PASSING_REGEX,
|
|
698
729
|
@
|
|
699
730
|
|
|
700
|
-
Next, if we want to be able to skip optional assignments, we can add an
|
|
731
|
+
Next, if we want to be able to skip optional assignments, we can add an
|
|
701
732
|
optional argument for that.
|
|
702
733
|
<<optional [[missing_assignments]] args>>=
|
|
703
|
-
optional_assignments
|
|
734
|
+
optional_assignments=None,
|
|
735
|
+
@
|
|
736
|
+
|
|
737
|
+
Finally, we add the [[is_passing]] callback.
|
|
738
|
+
If provided, this function takes a grade and returns [[True]] if it's passing.
|
|
739
|
+
This lets grading modules define their own semantics for what constitutes a
|
|
740
|
+
passing grade.
|
|
741
|
+
<<optional [[missing_assignments]] args>>=
|
|
742
|
+
is_passing=None,
|
|
704
743
|
@
|
|
705
744
|
|
|
706
745
|
This allows us to make the call to the function as follows.
|
|
707
|
-
We check if it's the default function or not
|
|
708
|
-
arguments from the CLI
|
|
746
|
+
We check if it's the default function or not.
|
|
747
|
+
If it is, we pass additional arguments from the CLI.
|
|
748
|
+
If not, the module provides its own implementation with module-specific
|
|
749
|
+
semantics (which may or may not use these arguments).
|
|
750
|
+
|
|
751
|
+
We always pass [[passing_regex]] and [[optional_assignments]] since these are
|
|
752
|
+
CLI-level concerns that any module might want to honor.
|
|
709
753
|
<<let [[missing_results]] be the result of [[missing.missing_assignments]]>>=
|
|
710
|
-
|
|
711
|
-
|
|
712
|
-
|
|
713
|
-
|
|
714
|
-
|
|
715
|
-
)
|
|
716
|
-
else:
|
|
717
|
-
missing_results = missing.missing_assignments(
|
|
718
|
-
assignments_list, users_list)
|
|
754
|
+
missing_results = missing.missing_assignments(
|
|
755
|
+
assignments_list, users_list,
|
|
756
|
+
passing_regex=args.filter_grades,
|
|
757
|
+
optional_assignments=args.optional_assignments
|
|
758
|
+
)
|
|
719
759
|
@
|
|
720
760
|
|
|
721
761
|
All that is missing now is the optional assignments argument for the parser.
|
canvaslms/cli/results.py
CHANGED
|
@@ -124,23 +124,25 @@ def summarize_assignment_groups(canvas, args):
|
|
|
124
124
|
if args.missing:
|
|
125
125
|
if args.missing:
|
|
126
126
|
try:
|
|
127
|
-
|
|
127
|
+
summary = load_module(args.summary_module)
|
|
128
128
|
except Exception as err:
|
|
129
129
|
canvaslms.cli.err(
|
|
130
130
|
1,
|
|
131
|
-
f"Error loading
|
|
131
|
+
f"Error loading summary module "
|
|
132
|
+
f"'{args.summary_module}': {err}",
|
|
132
133
|
)
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
134
|
+
if hasattr(summary, "missing_assignments"):
|
|
135
|
+
missing = summary
|
|
136
|
+
else:
|
|
137
|
+
import canvaslms.cli.results
|
|
138
|
+
|
|
139
|
+
missing = canvaslms.cli.results
|
|
140
|
+
missing_results = missing.missing_assignments(
|
|
141
|
+
assignments_list,
|
|
142
|
+
users_list,
|
|
143
|
+
passing_regex=args.filter_grades,
|
|
144
|
+
optional_assignments=args.optional_assignments,
|
|
145
|
+
)
|
|
144
146
|
for user, assignment, reason in missing_results:
|
|
145
147
|
yield [
|
|
146
148
|
course.course_code,
|
|
@@ -198,23 +200,25 @@ def summarize_modules(canvas, args):
|
|
|
198
200
|
if args.missing:
|
|
199
201
|
if args.missing:
|
|
200
202
|
try:
|
|
201
|
-
|
|
203
|
+
summary = load_module(args.summary_module)
|
|
202
204
|
except Exception as err:
|
|
203
205
|
canvaslms.cli.err(
|
|
204
206
|
1,
|
|
205
|
-
f"Error loading
|
|
207
|
+
f"Error loading summary module "
|
|
208
|
+
f"'{args.summary_module}': {err}",
|
|
206
209
|
)
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
210
|
+
if hasattr(summary, "missing_assignments"):
|
|
211
|
+
missing = summary
|
|
212
|
+
else:
|
|
213
|
+
import canvaslms.cli.results
|
|
214
|
+
|
|
215
|
+
missing = canvaslms.cli.results
|
|
216
|
+
missing_results = missing.missing_assignments(
|
|
217
|
+
assignments_list,
|
|
218
|
+
users_list,
|
|
219
|
+
passing_regex=args.filter_grades,
|
|
220
|
+
optional_assignments=args.optional_assignments,
|
|
221
|
+
)
|
|
218
222
|
for user, assignment, reason in missing_results:
|
|
219
223
|
yield [
|
|
220
224
|
course.course_code,
|
|
@@ -338,6 +342,7 @@ def missing_assignments(
|
|
|
338
342
|
users_list,
|
|
339
343
|
passing_regex=PASSING_REGEX,
|
|
340
344
|
optional_assignments=None,
|
|
345
|
+
is_passing=None,
|
|
341
346
|
):
|
|
342
347
|
"""
|
|
343
348
|
Returns tuples of missing assignments.
|
|
@@ -362,6 +367,7 @@ def missing_assignments(
|
|
|
362
367
|
try:
|
|
363
368
|
submission = assignment.get_submission(user)
|
|
364
369
|
except canvasapi.exceptions.ResourceDoesNotExist:
|
|
370
|
+
yield user, assignment, "no submission exists"
|
|
365
371
|
continue
|
|
366
372
|
|
|
367
373
|
if submission is None:
|
|
@@ -371,17 +377,22 @@ def missing_assignments(
|
|
|
371
377
|
yield user, assignment, f"submitted on {canvaslms.cli.utils.format_local_time(submission.submitted_at)}, but not graded"
|
|
372
378
|
else:
|
|
373
379
|
yield user, assignment, "not done"
|
|
374
|
-
|
|
375
|
-
if
|
|
376
|
-
|
|
377
|
-
and submission.submitted_at
|
|
378
|
-
and hasattr(submission, "graded_at")
|
|
379
|
-
and submission.graded_at
|
|
380
|
-
and submission.submitted_at > submission.graded_at
|
|
381
|
-
):
|
|
382
|
-
yield user, assignment, f"not a passing grade ({submission.grade}), resubmission not graded"
|
|
380
|
+
else:
|
|
381
|
+
if is_passing is not None:
|
|
382
|
+
grade_passes = is_passing(submission.grade)
|
|
383
383
|
else:
|
|
384
|
-
|
|
384
|
+
grade_passes = filter_grade(submission.grade, passing_regex)
|
|
385
|
+
if not grade_passes:
|
|
386
|
+
if (
|
|
387
|
+
hasattr(submission, "submitted_at")
|
|
388
|
+
and submission.submitted_at
|
|
389
|
+
and hasattr(submission, "graded_at")
|
|
390
|
+
and submission.graded_at
|
|
391
|
+
and submission.submitted_at > submission.graded_at
|
|
392
|
+
):
|
|
393
|
+
yield user, assignment, f"not a passing grade ({submission.grade}), resubmission not graded"
|
|
394
|
+
else:
|
|
395
|
+
yield user, assignment, f"not a passing grade ({submission.grade})"
|
|
385
396
|
|
|
386
397
|
|
|
387
398
|
def add_command(subp):
|
|
@@ -432,17 +443,12 @@ def add_command(subp):
|
|
|
432
443
|
+ """. \
|
|
433
444
|
Or provide a path to your own Python file.""",
|
|
434
445
|
)
|
|
435
|
-
default_missing_module = "canvaslms.cli.results"
|
|
436
446
|
results_parser.add_argument(
|
|
437
447
|
"--missing",
|
|
438
|
-
|
|
439
|
-
nargs="?",
|
|
440
|
-
const=default_missing_module,
|
|
441
|
-
default=None,
|
|
448
|
+
action="store_true",
|
|
442
449
|
help="Produce a list of missing assignments instead of grades. "
|
|
443
|
-
"
|
|
444
|
-
"
|
|
445
|
-
"function `missing_assignments(assignments_list, users_list). "
|
|
450
|
+
"Uses the summary module's missing_assignments() if available, "
|
|
451
|
+
"otherwise uses the default implementation. "
|
|
446
452
|
"The default module checks if all things are graded or submitted. "
|
|
447
453
|
"This option only has effect when working with assignment groups.",
|
|
448
454
|
)
|