PyKubeGrader 0.1.12__tar.gz → 0.1.14__tar.gz
Sign up to get free protection for your applications and to get access to all the features.
- {pykubegrader-0.1.12/src/PyKubeGrader.egg-info → pykubegrader-0.1.14}/PKG-INFO +1 -1
- {pykubegrader-0.1.12 → pykubegrader-0.1.14/src/PyKubeGrader.egg-info}/PKG-INFO +1 -1
- {pykubegrader-0.1.12 → pykubegrader-0.1.14}/src/PyKubeGrader.egg-info/SOURCES.txt +2 -0
- {pykubegrader-0.1.12 → pykubegrader-0.1.14}/src/pykubegrader/build/api_notebook_builder.py +39 -12
- {pykubegrader-0.1.12 → pykubegrader-0.1.14}/src/pykubegrader/build/build_folder.py +68 -9
- pykubegrader-0.1.14/src/pykubegrader/initialize.py +108 -0
- pykubegrader-0.1.14/src/pykubegrader/log_parser/parse.ipynb +214 -0
- pykubegrader-0.1.14/src/pykubegrader/log_parser/parse.py +184 -0
- {pykubegrader-0.1.12 → pykubegrader-0.1.14}/src/pykubegrader/telemetry.py +39 -9
- {pykubegrader-0.1.12 → pykubegrader-0.1.14}/src/pykubegrader/validate.py +31 -22
- pykubegrader-0.1.12/src/pykubegrader/initialize.py +0 -65
- {pykubegrader-0.1.12 → pykubegrader-0.1.14}/.coveragerc +0 -0
- {pykubegrader-0.1.12 → pykubegrader-0.1.14}/.github/workflows/main.yml +0 -0
- {pykubegrader-0.1.12 → pykubegrader-0.1.14}/.gitignore +0 -0
- {pykubegrader-0.1.12 → pykubegrader-0.1.14}/.readthedocs.yml +0 -0
- {pykubegrader-0.1.12 → pykubegrader-0.1.14}/AUTHORS.rst +0 -0
- {pykubegrader-0.1.12 → pykubegrader-0.1.14}/CHANGELOG.rst +0 -0
- {pykubegrader-0.1.12 → pykubegrader-0.1.14}/CONTRIBUTING.rst +0 -0
- {pykubegrader-0.1.12 → pykubegrader-0.1.14}/LICENSE.txt +0 -0
- {pykubegrader-0.1.12 → pykubegrader-0.1.14}/README.rst +0 -0
- {pykubegrader-0.1.12 → pykubegrader-0.1.14}/docs/Makefile +0 -0
- {pykubegrader-0.1.12 → pykubegrader-0.1.14}/docs/_static/Drexel_blue_Logo_square_Dark.png +0 -0
- {pykubegrader-0.1.12 → pykubegrader-0.1.14}/docs/_static/Drexel_blue_Logo_square_Light.png +0 -0
- {pykubegrader-0.1.12 → pykubegrader-0.1.14}/docs/_static/custom.css +0 -0
- {pykubegrader-0.1.12 → pykubegrader-0.1.14}/docs/authors.rst +0 -0
- {pykubegrader-0.1.12 → pykubegrader-0.1.14}/docs/changelog.rst +0 -0
- {pykubegrader-0.1.12 → pykubegrader-0.1.14}/docs/conf.py +0 -0
- {pykubegrader-0.1.12 → pykubegrader-0.1.14}/docs/contributing.rst +0 -0
- {pykubegrader-0.1.12 → pykubegrader-0.1.14}/docs/index.rst +0 -0
- {pykubegrader-0.1.12 → pykubegrader-0.1.14}/docs/license.rst +0 -0
- {pykubegrader-0.1.12 → pykubegrader-0.1.14}/docs/readme.rst +0 -0
- {pykubegrader-0.1.12 → pykubegrader-0.1.14}/docs/requirements.txt +0 -0
- {pykubegrader-0.1.12 → pykubegrader-0.1.14}/examples/.responses.json +0 -0
- {pykubegrader-0.1.12 → pykubegrader-0.1.14}/examples/true_false.ipynb +0 -0
- {pykubegrader-0.1.12 → pykubegrader-0.1.14}/pyproject.toml +0 -0
- {pykubegrader-0.1.12 → pykubegrader-0.1.14}/setup.cfg +0 -0
- {pykubegrader-0.1.12 → pykubegrader-0.1.14}/setup.py +0 -0
- {pykubegrader-0.1.12 → pykubegrader-0.1.14}/src/PyKubeGrader.egg-info/dependency_links.txt +0 -0
- {pykubegrader-0.1.12 → pykubegrader-0.1.14}/src/PyKubeGrader.egg-info/entry_points.txt +0 -0
- {pykubegrader-0.1.12 → pykubegrader-0.1.14}/src/PyKubeGrader.egg-info/not-zip-safe +0 -0
- {pykubegrader-0.1.12 → pykubegrader-0.1.14}/src/PyKubeGrader.egg-info/requires.txt +0 -0
- {pykubegrader-0.1.12 → pykubegrader-0.1.14}/src/PyKubeGrader.egg-info/top_level.txt +0 -0
- {pykubegrader-0.1.12 → pykubegrader-0.1.14}/src/pykubegrader/__init__.py +0 -0
- {pykubegrader-0.1.12 → pykubegrader-0.1.14}/src/pykubegrader/utils.py +0 -0
- {pykubegrader-0.1.12 → pykubegrader-0.1.14}/src/pykubegrader/widgets/__init__.py +0 -0
- {pykubegrader-0.1.12 → pykubegrader-0.1.14}/src/pykubegrader/widgets/multiple_choice.py +0 -0
- {pykubegrader-0.1.12 → pykubegrader-0.1.14}/src/pykubegrader/widgets/reading_question.py +0 -0
- {pykubegrader-0.1.12 → pykubegrader-0.1.14}/src/pykubegrader/widgets/select_many.py +0 -0
- {pykubegrader-0.1.12 → pykubegrader-0.1.14}/src/pykubegrader/widgets/student_info.py +0 -0
- {pykubegrader-0.1.12 → pykubegrader-0.1.14}/src/pykubegrader/widgets/style.py +0 -0
- {pykubegrader-0.1.12 → pykubegrader-0.1.14}/src/pykubegrader/widgets/true_false.py +0 -0
- {pykubegrader-0.1.12 → pykubegrader-0.1.14}/src/pykubegrader/widgets/types_question.py +0 -0
- {pykubegrader-0.1.12 → pykubegrader-0.1.14}/src/pykubegrader/widgets_base/__init__.py +0 -0
- {pykubegrader-0.1.12 → pykubegrader-0.1.14}/src/pykubegrader/widgets_base/multi_select.py +0 -0
- {pykubegrader-0.1.12 → pykubegrader-0.1.14}/src/pykubegrader/widgets_base/reading.py +0 -0
- {pykubegrader-0.1.12 → pykubegrader-0.1.14}/src/pykubegrader/widgets_base/select.py +0 -0
- {pykubegrader-0.1.12 → pykubegrader-0.1.14}/tests/conftest.py +0 -0
- {pykubegrader-0.1.12 → pykubegrader-0.1.14}/tests/import_test.py +0 -0
- {pykubegrader-0.1.12 → pykubegrader-0.1.14}/tox.ini +0 -0
@@ -39,6 +39,8 @@ src/pykubegrader/utils.py
|
|
39
39
|
src/pykubegrader/validate.py
|
40
40
|
src/pykubegrader/build/api_notebook_builder.py
|
41
41
|
src/pykubegrader/build/build_folder.py
|
42
|
+
src/pykubegrader/log_parser/parse.ipynb
|
43
|
+
src/pykubegrader/log_parser/parse.py
|
42
44
|
src/pykubegrader/widgets/__init__.py
|
43
45
|
src/pykubegrader/widgets/multiple_choice.py
|
44
46
|
src/pykubegrader/widgets/reading_question.py
|
@@ -1,23 +1,25 @@
|
|
1
|
+
import ast
|
2
|
+
import json
|
3
|
+
import re
|
4
|
+
import shutil
|
1
5
|
from dataclasses import dataclass
|
2
6
|
from pathlib import Path
|
3
7
|
from typing import Optional
|
4
|
-
|
8
|
+
|
5
9
|
import nbformat
|
6
|
-
import json
|
7
|
-
import re
|
8
|
-
import shutil
|
9
|
-
import ast
|
10
10
|
|
11
11
|
|
12
12
|
@dataclass
|
13
13
|
class FastAPINotebookBuilder:
|
14
14
|
notebook_path: str
|
15
15
|
temp_notebook: Optional[str] = None
|
16
|
+
assignment_tag: Optional[str] = ""
|
16
17
|
|
17
18
|
def __post_init__(self):
|
18
19
|
self.root_path, self.filename = FastAPINotebookBuilder.get_filename_and_root(
|
19
20
|
self.notebook_path
|
20
21
|
)
|
22
|
+
self.total_points = 0
|
21
23
|
self.run()
|
22
24
|
|
23
25
|
def run(self):
|
@@ -34,6 +36,8 @@ class FastAPINotebookBuilder:
|
|
34
36
|
self.add_api_code()
|
35
37
|
|
36
38
|
def add_api_code(self):
|
39
|
+
self.compute_max_points_free_response()
|
40
|
+
|
37
41
|
for i, (cell_index, cell_dict) in enumerate(self.assertion_tests_dict.items()):
|
38
42
|
print(
|
39
43
|
f"Processing cell {cell_index + 1}, {i} of {len(self.assertion_tests_dict)}"
|
@@ -75,19 +79,32 @@ class FastAPINotebookBuilder:
|
|
75
79
|
["earned_points = float(os.environ.get('EARNED_POINTS', 0))\n"]
|
76
80
|
)
|
77
81
|
updated_cell_source.extend(["earned_points += score\n"])
|
82
|
+
|
83
|
+
short_filename = self.filename.split(".")[0].replace("_temp", "")
|
78
84
|
updated_cell_source.extend(
|
79
|
-
[
|
85
|
+
[
|
86
|
+
f'log_variable("{short_filename}",f"{{score}}, {{max_score}}", question_id)\n'
|
87
|
+
]
|
80
88
|
)
|
81
89
|
updated_cell_source.extend(
|
82
90
|
["os.environ['EARNED_POINTS'] = str(earned_points)\n"]
|
83
91
|
)
|
84
92
|
|
85
|
-
# cell_source = FastAPINotebookBuilder.insert_list_at_index(
|
86
|
-
# cell_source, updated_cell_source, last_import_line_ind + 1
|
87
|
-
# )
|
88
|
-
|
89
93
|
self.replace_cell_source(cell_index, updated_cell_source)
|
90
94
|
|
95
|
+
def compute_max_points_free_response(self):
|
96
|
+
for i, (cell_index, cell_dict) in enumerate(self.assertion_tests_dict.items()):
|
97
|
+
# gets the question name from the first cell to not double count
|
98
|
+
if cell_dict["is_first"]:
|
99
|
+
# get the max points for the question
|
100
|
+
max_question_points = sum(
|
101
|
+
cell["points"]
|
102
|
+
for cell in self.assertion_tests_dict.values()
|
103
|
+
if cell["question"] == cell_dict["question"]
|
104
|
+
)
|
105
|
+
|
106
|
+
self.total_points += max_question_points
|
107
|
+
|
91
108
|
def construct_first_cell_question_header(self, cell_dict):
|
92
109
|
max_question_points = sum(
|
93
110
|
cell["points"]
|
@@ -95,9 +112,19 @@ class FastAPINotebookBuilder:
|
|
95
112
|
if cell["question"] == cell_dict["question"]
|
96
113
|
)
|
97
114
|
|
98
|
-
first_cell_header = ["max_question_points =
|
115
|
+
first_cell_header = [f"max_question_points = str({max_question_points})\n"]
|
99
116
|
first_cell_header.append("earned_points = 0 \n")
|
100
117
|
first_cell_header.append("os.environ['EARNED_POINTS'] = str(earned_points)\n")
|
118
|
+
first_cell_header.append(
|
119
|
+
f"os.environ['TOTAL_POINTS_FREE_RESPONSE'] = str({self.total_points})\n"
|
120
|
+
)
|
121
|
+
|
122
|
+
short_filename = self.filename.split(".")[0].replace("_temp", "")
|
123
|
+
first_cell_header.extend(
|
124
|
+
[
|
125
|
+
f'log_variable("total-points",f"{self.assignment_tag}, {short_filename}", {self.total_points})\n'
|
126
|
+
]
|
127
|
+
)
|
101
128
|
|
102
129
|
return first_cell_header
|
103
130
|
|
@@ -202,7 +229,7 @@ class FastAPINotebookBuilder:
|
|
202
229
|
" ensure_responses,\n",
|
203
230
|
" log_variable,\n",
|
204
231
|
" score_question,\n",
|
205
|
-
"
|
232
|
+
" submit_question,\n",
|
206
233
|
" telemetry,\n",
|
207
234
|
" update_responses,\n",
|
208
235
|
")\n",
|
@@ -8,7 +8,9 @@ import shutil
|
|
8
8
|
import subprocess
|
9
9
|
import sys
|
10
10
|
from dataclasses import dataclass, field
|
11
|
+
|
11
12
|
import nbformat
|
13
|
+
|
12
14
|
from .api_notebook_builder import FastAPINotebookBuilder
|
13
15
|
|
14
16
|
|
@@ -19,12 +21,14 @@ class NotebookProcessor:
|
|
19
21
|
|
20
22
|
Attributes:
|
21
23
|
root_folder (str): The root directory containing notebooks to process.
|
24
|
+
assignment_tag (str): Tag for the assignment being processed.
|
22
25
|
solutions_folder (str): The directory where processed notebooks and solutions are stored.
|
23
26
|
verbose (bool): Flag for verbose output to the console.
|
24
27
|
log (bool): Flag to enable or disable logging.
|
25
28
|
"""
|
26
29
|
|
27
30
|
root_folder: str
|
31
|
+
assignment_tag: str = field(default="")
|
28
32
|
solutions_folder: str = field(init=False)
|
29
33
|
verbose: bool = False
|
30
34
|
log: bool = True
|
@@ -61,6 +65,8 @@ class NotebookProcessor:
|
|
61
65
|
) # Create a logger instance specific to this module
|
62
66
|
self.logger = logger # Assign the logger instance to the class for use in instance methods
|
63
67
|
|
68
|
+
self.total_point_log = {}
|
69
|
+
|
64
70
|
def process_notebooks(self):
|
65
71
|
"""
|
66
72
|
Recursively processes Jupyter notebooks in a given folder and its subfolders.
|
@@ -115,6 +121,12 @@ class NotebookProcessor:
|
|
115
121
|
# Process the notebook if it meets the criteria
|
116
122
|
self._process_single_notebook(notebook_path)
|
117
123
|
|
124
|
+
# Write the dictionary to a JSON file
|
125
|
+
with open(f"{self.solutions_folder}/total_points.json", "w") as json_file:
|
126
|
+
json.dump(
|
127
|
+
self.total_point_log, json_file, indent=4
|
128
|
+
) # `indent=4` for pretty formatting
|
129
|
+
|
118
130
|
def _print_and_log(self, message):
|
119
131
|
"""
|
120
132
|
Logs a message and optionally prints it to the console.
|
@@ -167,6 +179,11 @@ class NotebookProcessor:
|
|
167
179
|
None
|
168
180
|
"""
|
169
181
|
|
182
|
+
self.select_many_total_points = 0
|
183
|
+
self.mcq_total_points = 0
|
184
|
+
self.tf_total_points = 0
|
185
|
+
self.otter_total_points = 0
|
186
|
+
|
170
187
|
print(f"Processing notebook: {notebook_path}")
|
171
188
|
|
172
189
|
logging.info(f"Processing notebook: {notebook_path}")
|
@@ -211,7 +228,7 @@ class NotebookProcessor:
|
|
211
228
|
if any([solution_path_1, solution_path_2, solution_path_3]) is not None:
|
212
229
|
solution_path = solution_path_1 or solution_path_2 or solution_path_3
|
213
230
|
|
214
|
-
student_notebook = self.free_response_parser(
|
231
|
+
student_notebook, self.otter_total_points = self.free_response_parser(
|
215
232
|
temp_notebook_path, notebook_subfolder, notebook_name
|
216
233
|
)
|
217
234
|
|
@@ -274,11 +291,32 @@ class NotebookProcessor:
|
|
274
291
|
os.path.join(questions_folder_jbook, question_file_name_sanitized),
|
275
292
|
)
|
276
293
|
|
294
|
+
total_points = (
|
295
|
+
self.select_many_total_points
|
296
|
+
+ self.mcq_total_points
|
297
|
+
+ self.tf_total_points
|
298
|
+
+ self.otter_total_points
|
299
|
+
)
|
300
|
+
|
301
|
+
self.total_point_log.update({notebook_name: total_points})
|
302
|
+
|
277
303
|
def free_response_parser(
|
278
304
|
self, temp_notebook_path, notebook_subfolder, notebook_name
|
279
305
|
):
|
280
306
|
if self.has_assignment(temp_notebook_path, "# ASSIGNMENT CONFIG"):
|
281
307
|
# TODO: This is hardcoded for now, but should be in a configuration file.
|
308
|
+
client_private_key = os.path.join(
|
309
|
+
os.path.dirname(temp_notebook_path),
|
310
|
+
".client_private_key.bin",
|
311
|
+
)
|
312
|
+
server_public_key = os.path.join(
|
313
|
+
os.path.dirname(temp_notebook_path),
|
314
|
+
".server_public_key.bin",
|
315
|
+
)
|
316
|
+
|
317
|
+
shutil.copy("./keys/.client_private_key.bin", client_private_key)
|
318
|
+
shutil.copy("./keys/.server_public_key.bin", server_public_key)
|
319
|
+
|
282
320
|
client_private_key = os.path.join(
|
283
321
|
notebook_subfolder,
|
284
322
|
".client_private_key.bin",
|
@@ -291,7 +329,9 @@ class NotebookProcessor:
|
|
291
329
|
shutil.copy("./keys/.client_private_key.bin", client_private_key)
|
292
330
|
shutil.copy("./keys/.server_public_key.bin", server_public_key)
|
293
331
|
|
294
|
-
FastAPINotebookBuilder(
|
332
|
+
out = FastAPINotebookBuilder(
|
333
|
+
notebook_path=temp_notebook_path, assignment_tag=self.assignment_tag
|
334
|
+
)
|
295
335
|
|
296
336
|
debug_notebook = os.path.join(
|
297
337
|
notebook_subfolder,
|
@@ -336,10 +376,10 @@ class NotebookProcessor:
|
|
336
376
|
os.remove(client_private_key)
|
337
377
|
os.remove(server_public_key)
|
338
378
|
|
339
|
-
return student_notebook
|
379
|
+
return student_notebook, out.total_points
|
340
380
|
else:
|
341
381
|
NotebookProcessor.add_initialization_code(temp_notebook_path)
|
342
|
-
return None
|
382
|
+
return None, 0
|
343
383
|
|
344
384
|
@staticmethod
|
345
385
|
def remove_assignment_config_cells(notebook_path):
|
@@ -388,7 +428,9 @@ class NotebookProcessor:
|
|
388
428
|
|
389
429
|
for data_ in data:
|
390
430
|
# Generate the solution file
|
391
|
-
self.generate_solution_MCQ(
|
431
|
+
self.mcq_total_points = self.generate_solution_MCQ(
|
432
|
+
data, output_file=solution_path
|
433
|
+
)
|
392
434
|
|
393
435
|
question_path = (
|
394
436
|
f"{new_notebook_path.replace(".ipynb", "")}_questions.py"
|
@@ -428,7 +470,9 @@ class NotebookProcessor:
|
|
428
470
|
|
429
471
|
# for data_ in data:
|
430
472
|
# Generate the solution file
|
431
|
-
self.generate_solution_MCQ(
|
473
|
+
self.tf_total_points = self.generate_solution_MCQ(
|
474
|
+
data, output_file=solution_path
|
475
|
+
)
|
432
476
|
|
433
477
|
question_path = f"{new_notebook_path.replace(".ipynb", "")}_questions.py"
|
434
478
|
|
@@ -464,7 +508,9 @@ class NotebookProcessor:
|
|
464
508
|
|
465
509
|
# for data_ in data:
|
466
510
|
# Generate the solution file
|
467
|
-
self.generate_solution_MCQ(
|
511
|
+
self.select_many_total_points = self.generate_solution_MCQ(
|
512
|
+
data, output_file=solution_path
|
513
|
+
)
|
468
514
|
|
469
515
|
question_path = f"{new_notebook_path.replace(".ipynb", "")}_questions.py"
|
470
516
|
|
@@ -671,12 +717,14 @@ class NotebookProcessor:
|
|
671
717
|
if hasattr(existing_module, "total_points"):
|
672
718
|
total_points.extend(existing_module.total_points)
|
673
719
|
|
720
|
+
question_points = 0
|
674
721
|
# Process new question data and update solutions and total_points
|
675
722
|
for question_set in data_list:
|
676
723
|
for key, question_data in question_set.items():
|
677
724
|
solution_key = f"q{question_data['question number']}-{question_data['subquestion_number']}-{key}"
|
678
725
|
solutions[solution_key] = question_data["solution"]
|
679
726
|
total_points.extend([question_data["points"]])
|
727
|
+
question_points += question_data["points"]
|
680
728
|
|
681
729
|
# Write updated total_points and solutions back to the file
|
682
730
|
with open(output_file, "w", encoding="utf-8") as f:
|
@@ -689,6 +737,8 @@ class NotebookProcessor:
|
|
689
737
|
f.write(f' "{key}": {repr(solution)},\n')
|
690
738
|
f.write("}\n")
|
691
739
|
|
740
|
+
return question_points
|
741
|
+
|
692
742
|
def extract_MCQ(ipynb_file):
|
693
743
|
"""
|
694
744
|
Extracts questions from markdown cells and organizes them as a nested dictionary,
|
@@ -1635,9 +1685,18 @@ def main():
|
|
1635
1685
|
parser.add_argument(
|
1636
1686
|
"root_folder", type=str, help="Path to the root folder to process"
|
1637
1687
|
)
|
1638
|
-
args = parser.parse_args()
|
1639
1688
|
|
1640
|
-
|
1689
|
+
parser.add_argument(
|
1690
|
+
"--assignment-tag",
|
1691
|
+
type=str,
|
1692
|
+
help="assignment-tag used for calculating grades",
|
1693
|
+
default="Reading-Week-X",
|
1694
|
+
)
|
1695
|
+
|
1696
|
+
args = parser.parse_args()
|
1697
|
+
processor = NotebookProcessor(
|
1698
|
+
root_folder=args.root_folder, assignment_tag=args.assignment_tag
|
1699
|
+
)
|
1641
1700
|
processor.process_notebooks()
|
1642
1701
|
|
1643
1702
|
|
@@ -0,0 +1,108 @@
|
|
1
|
+
import os
|
2
|
+
import shutil
|
3
|
+
from pathlib import Path
|
4
|
+
|
5
|
+
import panel as pn
|
6
|
+
import requests
|
7
|
+
from IPython import get_ipython
|
8
|
+
|
9
|
+
from .telemetry import ensure_responses, log_variable, telemetry, update_responses
|
10
|
+
|
11
|
+
|
12
|
+
def initialize_assignment(
|
13
|
+
name: str,
|
14
|
+
url: str = "https://engr-131-api.eastus.cloudapp.azure.com/",
|
15
|
+
verbose: bool = False,
|
16
|
+
) -> dict:
|
17
|
+
"""
|
18
|
+
Initialize an assignment in a Jupyter environment.
|
19
|
+
|
20
|
+
Args:
|
21
|
+
name (str): The name of the assignment.
|
22
|
+
url (str): The URL of the API server.
|
23
|
+
verbose (bool): Whether to print detailed initialization information.
|
24
|
+
|
25
|
+
Returns:
|
26
|
+
dict: The responses dictionary after initialization.
|
27
|
+
|
28
|
+
Raises:
|
29
|
+
Exception: If the environment is unsupported or initialization fails.
|
30
|
+
"""
|
31
|
+
|
32
|
+
ipython = get_ipython()
|
33
|
+
if ipython is None:
|
34
|
+
raise Exception("Setup unsuccessful. Are you in a Jupyter environment?")
|
35
|
+
|
36
|
+
try:
|
37
|
+
move_dotfiles()
|
38
|
+
ipython.events.register("pre_run_cell", telemetry)
|
39
|
+
except Exception as e:
|
40
|
+
raise Exception(f"Failed to register telemetry: {e}")
|
41
|
+
|
42
|
+
jhub_user = os.getenv("JUPYTERHUB_USER")
|
43
|
+
if jhub_user is None:
|
44
|
+
raise Exception("Setup unsuccessful. Are you on JupyterHub?")
|
45
|
+
|
46
|
+
try:
|
47
|
+
seed = hash(jhub_user) % 1000
|
48
|
+
update_responses(key="seed", value=seed)
|
49
|
+
|
50
|
+
update_responses(key="assignment", value=name)
|
51
|
+
update_responses(key="jhub_user", value=jhub_user)
|
52
|
+
|
53
|
+
log_variable("Student Info", jhub_user, seed)
|
54
|
+
|
55
|
+
responses = ensure_responses()
|
56
|
+
# TODO: Add more checks here?
|
57
|
+
assert isinstance(responses.get("seed"), int), "Seed not set"
|
58
|
+
|
59
|
+
pn.extension(silent=True)
|
60
|
+
|
61
|
+
# Check connection to API server
|
62
|
+
params = {"jhub_user": responses["jhub_user"]}
|
63
|
+
response = requests.get(url, params=params)
|
64
|
+
if verbose:
|
65
|
+
print(f"status code: {response.status_code}")
|
66
|
+
data = response.json()
|
67
|
+
for k, v in data.items():
|
68
|
+
print(f"{k}: {v}")
|
69
|
+
except Exception as e:
|
70
|
+
raise Exception(f"Failed to initialize assignment: {e}")
|
71
|
+
|
72
|
+
print("Assignment successfully initialized")
|
73
|
+
if verbose:
|
74
|
+
print(f"Assignment: {name}")
|
75
|
+
print(f"Username: {jhub_user}")
|
76
|
+
|
77
|
+
return responses
|
78
|
+
|
79
|
+
|
80
|
+
#
|
81
|
+
# Helper functions
|
82
|
+
#
|
83
|
+
|
84
|
+
|
85
|
+
def move_dotfiles():
|
86
|
+
"""
|
87
|
+
Move essential dotfiles from a fixed source directory to the current working directory.
|
88
|
+
|
89
|
+
Raises:
|
90
|
+
FileNotFoundError: If a source file is missing.
|
91
|
+
Exception: If copying fails for any other reason.
|
92
|
+
"""
|
93
|
+
source_dir = Path("/opt/dotfiles")
|
94
|
+
target_dir = Path.cwd()
|
95
|
+
|
96
|
+
files_to_copy = [".client_private_key.bin", ".server_public_key.bin"]
|
97
|
+
|
98
|
+
for file_name in files_to_copy:
|
99
|
+
source_file = source_dir / file_name
|
100
|
+
target_file = target_dir / file_name
|
101
|
+
|
102
|
+
if not source_file.exists():
|
103
|
+
raise FileNotFoundError(f"Key file not found: {source_file}")
|
104
|
+
|
105
|
+
try:
|
106
|
+
shutil.copy2(source_file, target_file)
|
107
|
+
except Exception as e:
|
108
|
+
raise Exception(f"Failed to copy {source_file} to {target_file}: {e}")
|
@@ -0,0 +1,214 @@
|
|
1
|
+
{
|
2
|
+
"cells": [
|
3
|
+
{
|
4
|
+
"cell_type": "code",
|
5
|
+
"execution_count": null,
|
6
|
+
"metadata": {},
|
7
|
+
"outputs": [],
|
8
|
+
"source": [
|
9
|
+
"from .parse import LogParser\n",
|
10
|
+
"\n",
|
11
|
+
"# ----------------- Example usage -----------------\n",
|
12
|
+
"\n",
|
13
|
+
"if __name__ == \"__main__\":\n",
|
14
|
+
" log_lines = [\n",
|
15
|
+
" \"Student Info, 449, jovyan, 2024-12-27 20:55:12\",\n",
|
16
|
+
" \"total-points, 4.0, week1-readings, 17_operators_q, 2024-12-27 20:55:23\",\n",
|
17
|
+
" \"17_operators_q, question-operators-mario-dining-1, 0, 0.5, 2024-12-27 20:55:23\",\n",
|
18
|
+
" \"17_operators_q, question-operators-mario-dining-2, 0, 0.5, 2024-12-27 20:55:23\",\n",
|
19
|
+
" \"17_operators_q, question-operators-mario-dining-3, 0, 0.5, 2024-12-27 20:55:23\",\n",
|
20
|
+
" \"17_operators_q, question-operators-mario-dining-4, 0, 0.5, 2024-12-27 20:55:23\",\n",
|
21
|
+
" \"17_operators_q, question-operators-mario-dining-5, 0, 1.0, 2024-12-27 20:55:23\",\n",
|
22
|
+
" \"total-points, 4.0, week1-readings, 17_operators_q, 2024-12-27 20:55:42\",\n",
|
23
|
+
" \"17_operators_q, question-operators-mario-dining-1, 0.5, 0.5, 2024-12-27 20:55:42\",\n",
|
24
|
+
" \"17_operators_q, question-operators-mario-dining-2, 0.5, 0.5, 2024-12-27 20:55:42\",\n",
|
25
|
+
" \"17_operators_q, question-operators-mario-dining-3, 0.5, 0.5, 2024-12-27 20:55:42\",\n",
|
26
|
+
" \"17_operators_q, question-operators-mario-dining-4, 0.5, 0.5, 2024-12-27 20:55:42\",\n",
|
27
|
+
" \"17_operators_q, question-operators-mario-dining-5, 1.0, 1.0, 2024-12-27 20:55:42\",\n",
|
28
|
+
" \"total-points, 2.0, week1-readings, 17_operators_q, 2024-12-27 20:55:47\",\n",
|
29
|
+
" \"17_operators_q, question-operators-mario-dining-1, 0.5, 0.5, 2024-12-27 20:55:47\",\n",
|
30
|
+
" \"17_operators_q, question-operators-mario-dining-2, 0, 0.5, 2024-12-27 20:55:47\",\n",
|
31
|
+
" \"17_operators_q, question-operators-mario-dining-3, 0.5, 0.5, 2024-12-27 20:55:47\",\n",
|
32
|
+
" \"17_operators_q, question-operators-mario-dining-4, 0, 0.5, 2024-12-27 20:55:47\",\n",
|
33
|
+
" \"17_operators_q, question-operators-mario-dining-5, 0, 1.0, 2024-12-27 20:55:47\",\n",
|
34
|
+
" \"19_operators_q, question-operators-mario-dining-3, 0.5, 0.5, 2024-12-27 20:55:47\",\n",
|
35
|
+
" ]\n",
|
36
|
+
"\n",
|
37
|
+
" parser = LogParser(log_lines=log_lines, week_tag=\"week1-readings\")\n",
|
38
|
+
" parser.parse_logs()\n",
|
39
|
+
" parser.calculate_total_scores()\n",
|
40
|
+
" results = parser.get_results()\n",
|
41
|
+
"\n",
|
42
|
+
" print(\"Student Information:\")\n",
|
43
|
+
" print(results[\"student_information\"])\n",
|
44
|
+
"\n",
|
45
|
+
" print(\"\\nAssignment Information:\")\n",
|
46
|
+
" for assignment, info in results[\"assignment_information\"].items():\n",
|
47
|
+
" print(f\"\\nAssignment Tag: {assignment}\")\n",
|
48
|
+
" print(f\"Latest Timestamp: {info['latest_timestamp']}\")\n",
|
49
|
+
" print(f\"Total Score: {info['total_score']}\")\n",
|
50
|
+
" print(f\"Max Points: {info['max_points']}\")\n",
|
51
|
+
"\n",
|
52
|
+
" print(\"\\nAssignment Scores:\")\n",
|
53
|
+
" for assignment, score_info in results[\"assignment_scores\"].items():\n",
|
54
|
+
" print(f\"\\nAssignment Tag: {assignment}\")\n",
|
55
|
+
" print(f\"Total Score Earned: {score_info['total_score']}\")\n",
|
56
|
+
" print(\"Questions:\")\n",
|
57
|
+
" for q_tag, q_data in score_info[\"questions\"].items():\n",
|
58
|
+
" print(f\" {q_tag}:\")\n",
|
59
|
+
" print(f\" score_earned: {q_data['score_earned']}\")\n",
|
60
|
+
" print(f\" score_possible: {q_data['score_possible']}\")\n",
|
61
|
+
" print(f\" timestamp: {q_data['timestamp']}\")"
|
62
|
+
]
|
63
|
+
},
|
64
|
+
{
|
65
|
+
"cell_type": "code",
|
66
|
+
"execution_count": null,
|
67
|
+
"metadata": {},
|
68
|
+
"outputs": [],
|
69
|
+
"source": [
|
70
|
+
"log_lines = [\n",
|
71
|
+
" # Student Info\n",
|
72
|
+
" \"Student Info, 449, jovyan, 2024-12-27 20:55:12\",\n",
|
73
|
+
" # Week 1 Assignment: 17_operators_q\n",
|
74
|
+
" \"total-points, 3.0, week1-readings, 17_operators_q, 2024-12-27 20:55:23\",\n",
|
75
|
+
" \"17_operators_q, question-operators-mario-dining-1, 0, 0.5, 2024-12-27 20:55:23\",\n",
|
76
|
+
" \"17_operators_q, question-operators-mario-dining-2, 0.5, 0.5, 2024-12-27 20:55:23\",\n",
|
77
|
+
" \"17_operators_q, question-operators-mario-dining-3, 0.5, 0.5, 2024-12-27 20:55:23\",\n",
|
78
|
+
" \"17_operators_q, question-operators-mario-dining-4, 0.5, 0.5, 2024-12-27 20:55:23\",\n",
|
79
|
+
" \"17_operators_q, question-operators-mario-dining-5, 1.0, 1.0, 2024-12-27 20:55:23\",\n",
|
80
|
+
" # Week 1 Assignment: 18_advanced_q\n",
|
81
|
+
" \"total-points, 4.0, week1-readings, 18_advanced_q, 2024-12-27 20:56:00\",\n",
|
82
|
+
" \"18_advanced_q, question-advanced-problem-1, 1.0, 1.0, 2024-12-27 20:56:00\",\n",
|
83
|
+
" \"18_advanced_q, question-advanced-problem-2, 1.0, 1.0, 2024-12-27 20:56:00\",\n",
|
84
|
+
" \"18_advanced_q, question-advanced-problem-3, 0.5, 1.0, 2024-12-27 20:56:00\",\n",
|
85
|
+
" \"18_advanced_q, question-advanced-problem-4, 0.5, 1.0, 2024-12-27 20:56:00\",\n",
|
86
|
+
" # Week 2 Assignment: 19_concepts_q\n",
|
87
|
+
" \"total-points, 5.0, week2-concepts, 19_concepts_q, 2024-12-28 20:57:00\",\n",
|
88
|
+
" \"19_concepts_q, question-concepts-basic-1, 0.5, 1.0, 2024-12-28 20:57:00\",\n",
|
89
|
+
" \"19_concepts_q, question-concepts-basic-2, 0.5, 1.0, 2024-12-28 20:57:00\",\n",
|
90
|
+
" \"19_concepts_q, question-concepts-basic-3, 0.5, 1.0, 2024-12-28 20:57:00\",\n",
|
91
|
+
" \"19_concepts_q, question-concepts-basic-4, 0.5, 1.0, 2024-12-28 20:57:00\",\n",
|
92
|
+
" \"19_concepts_q, question-concepts-basic-5, 1.0, 1.0, 2024-12-28 20:57:00\",\n",
|
93
|
+
"]\n",
|
94
|
+
"\n",
|
95
|
+
"\n",
|
96
|
+
"parser = LogParser(log_lines=log_lines, week_tag=\"week1-readings\")\n",
|
97
|
+
"parser.parse_logs()\n",
|
98
|
+
"parser.calculate_total_scores()\n",
|
99
|
+
"results = parser.get_results()\n",
|
100
|
+
"\n",
|
101
|
+
"results"
|
102
|
+
]
|
103
|
+
},
|
104
|
+
{
|
105
|
+
"cell_type": "code",
|
106
|
+
"execution_count": null,
|
107
|
+
"metadata": {},
|
108
|
+
"outputs": [],
|
109
|
+
"source": [
|
110
|
+
"log_lines = [\n",
|
111
|
+
" # Student Info\n",
|
112
|
+
" \"Student Info, 550, jovyan2, 2024-12-27 20:55:12\",\n",
|
113
|
+
" # Week 1 Assignment: 17_operators_q (Initial Attempt)\n",
|
114
|
+
" \"total-points, 3.0, week1-readings,17_operators_q, 2024-12-27 20:55:23\",\n",
|
115
|
+
" \"17_operators_q, question-operators-mario-dining-1, 0, 0.5, 2024-12-27 20:55:23\",\n",
|
116
|
+
" \"17_operators_q, question-operators-mario-dining-2, 0, 0.5, 2024-12-27 20:55:23\",\n",
|
117
|
+
" \"17_operators_q, question-operators-mario-dining-3, 0.5, 0.5, 2024-12-27 20:55:23\",\n",
|
118
|
+
" \"17_operators_q, question-operators-mario-dining-4, 0, 0.5, 2024-12-27 20:55:23\",\n",
|
119
|
+
" \"17_operators_q, question-operators-mario-dining-5, 0, 1.0, 2024-12-27 20:55:23\",\n",
|
120
|
+
" # Week 1 Assignment: 17_operators_q (Re-attempt)\n",
|
121
|
+
" \"total-points, 3.0, week1-readings,17_operators_q, 2024-12-27 21:00:00\",\n",
|
122
|
+
" \"17_operators_q, question-operators-mario-dining-1, 0.5, 0.5, 2024-12-27 21:00:00\",\n",
|
123
|
+
" \"17_operators_q, question-operators-mario-dining-2, 0.5, 0.5, 2024-12-27 21:00:00\",\n",
|
124
|
+
" \"17_operators_q, question-operators-mario-dining-4, 0.5, 0.5, 2024-12-27 21:00:00\",\n",
|
125
|
+
" # Week 1 Assignment: 18_challenging_q\n",
|
126
|
+
" \"total-points, 5.0, week1-readings,18_challenging_q, 2024-12-27 21:05:00\",\n",
|
127
|
+
" \"18_challenging_q, question-challenging-problem-1, 1.0, 1.0, 2024-12-27 21:05:00\",\n",
|
128
|
+
" \"18_challenging_q, question-challenging-problem-2, 0.5, 1.0, 2024-12-27 21:05:00\",\n",
|
129
|
+
" \"18_challenging_q, question-challenging-problem-3, 1.0, 1.0, 2024-12-27 21:05:00\",\n",
|
130
|
+
" \"18_challenging_q, question-challenging-problem-4, 1.0, 1.0, 2024-12-27 21:05:00\",\n",
|
131
|
+
" \"18_challenging_q, question-challenging-problem-5, 0, 1.0, 2024-12-27 21:05:00\",\n",
|
132
|
+
"]\n",
|
133
|
+
"\n",
|
134
|
+
"parser = LogParser(log_lines=log_lines, week_tag=\"week1-readings\")\n",
|
135
|
+
"parser.parse_logs()\n",
|
136
|
+
"parser.calculate_total_scores()\n",
|
137
|
+
"results = parser.get_results()\n",
|
138
|
+
"\n",
|
139
|
+
"results"
|
140
|
+
]
|
141
|
+
},
|
142
|
+
{
|
143
|
+
"cell_type": "code",
|
144
|
+
"execution_count": null,
|
145
|
+
"metadata": {},
|
146
|
+
"outputs": [],
|
147
|
+
"source": [
|
148
|
+
"log_lines = [\n",
|
149
|
+
" # Student Info\n",
|
150
|
+
" \"Student Info, 660, jovyan3, 2024-12-27 20:55:12\",\n",
|
151
|
+
" # Week 1 Assignment: skipped some questions\n",
|
152
|
+
" \"total-points, 4.0, week1-readings,17_operators_q, 2024-12-27 20:55:23\",\n",
|
153
|
+
" \"17_operators_q, question-operators-mario-dining-1, 0.5, 0.5, 2024-12-27 20:55:23\",\n",
|
154
|
+
" \"17_operators_q, question-operators-mario-dining-3, 0.5, 0.5, 2024-12-27 20:55:23\",\n",
|
155
|
+
" \"17_operators_q, question-operators-mario-dining-5, 1.0, 1.0, 2024-12-27 20:55:23\",\n",
|
156
|
+
" \"total-points, 4.0, week1-readings,18_operators_q, 2024-12-27 20:55:23\",\n",
|
157
|
+
" # Week 2 Assignment: all questions attempted\n",
|
158
|
+
" \"total-points, 5.0, week2-math,20_math_q, 2024-12-28 20:55:23\",\n",
|
159
|
+
" \"20_math_q, question-math-basic-1, 1.0, 1.0, 2024-12-28 20:55:23\",\n",
|
160
|
+
" \"20_math_q, question-math-basic-2, 0.5, 1.0, 2024-12-28 20:55:23\",\n",
|
161
|
+
" \"20_math_q, question-math-basic-3, 0.5, 1.0, 2024-12-28 20:55:23\",\n",
|
162
|
+
" \"20_math_q, question-math-basic-4, 1.0, 1.0, 2024-12-28 20:55:23\",\n",
|
163
|
+
" \"20_math_q, question-math-basic-5, 0.5, 1.0, 2024-12-28 20:55:23\",\n",
|
164
|
+
" # Week 3 Assignment: some skipped, partial scores\n",
|
165
|
+
" \"total-points, 4.0, week3-concepts,21_concepts_q, 2024-12-29 20:55:23\",\n",
|
166
|
+
" \"21_concepts_q, question-concepts-basic-1, 0.5, 1.0, 2024-12-29 20:55:23\",\n",
|
167
|
+
" \"21_concepts_q, question-concepts-basic-2, 0.5, 1.0, 2024-12-29 20:55:23\",\n",
|
168
|
+
"]\n",
|
169
|
+
"\n",
|
170
|
+
"parser = LogParser(log_lines=log_lines, week_tag=\"week1-readings\")\n",
|
171
|
+
"parser.parse_logs()\n",
|
172
|
+
"parser.calculate_total_scores()\n",
|
173
|
+
"results = parser.get_results()\n",
|
174
|
+
"\n",
|
175
|
+
"results"
|
176
|
+
]
|
177
|
+
},
|
178
|
+
{
|
179
|
+
"cell_type": "code",
|
180
|
+
"execution_count": null,
|
181
|
+
"metadata": {},
|
182
|
+
"outputs": [],
|
183
|
+
"source": []
|
184
|
+
},
|
185
|
+
{
|
186
|
+
"cell_type": "code",
|
187
|
+
"execution_count": null,
|
188
|
+
"metadata": {},
|
189
|
+
"outputs": [],
|
190
|
+
"source": []
|
191
|
+
}
|
192
|
+
],
|
193
|
+
"metadata": {
|
194
|
+
"kernelspec": {
|
195
|
+
"display_name": "engr131_dev",
|
196
|
+
"language": "python",
|
197
|
+
"name": "python3"
|
198
|
+
},
|
199
|
+
"language_info": {
|
200
|
+
"codemirror_mode": {
|
201
|
+
"name": "ipython",
|
202
|
+
"version": 3
|
203
|
+
},
|
204
|
+
"file_extension": ".py",
|
205
|
+
"mimetype": "text/x-python",
|
206
|
+
"name": "python",
|
207
|
+
"nbconvert_exporter": "python",
|
208
|
+
"pygments_lexer": "ipython3",
|
209
|
+
"version": "3.12.7"
|
210
|
+
}
|
211
|
+
},
|
212
|
+
"nbformat": 4,
|
213
|
+
"nbformat_minor": 2
|
214
|
+
}
|
@@ -0,0 +1,184 @@
|
|
1
|
+
from dataclasses import dataclass, field
|
2
|
+
from typing import Dict, List, Optional
|
3
|
+
|
4
|
+
|
5
|
+
@dataclass
|
6
|
+
class LogParser:
|
7
|
+
"""
|
8
|
+
A class for parsing chronological logs and extracting information.
|
9
|
+
Handles both assignment info and question-level details.
|
10
|
+
"""
|
11
|
+
|
12
|
+
log_lines: List[str]
|
13
|
+
week_tag: Optional[str] = None
|
14
|
+
student_info: Dict[str, str] = field(default_factory=dict)
|
15
|
+
assignments: Dict[str, Dict] = field(default_factory=dict)
|
16
|
+
|
17
|
+
def parse_logs(self):
|
18
|
+
"""
|
19
|
+
Main method to parse logs and populate student_info and assignments.
|
20
|
+
"""
|
21
|
+
unique_students = set()
|
22
|
+
|
23
|
+
self._find_all_questions()
|
24
|
+
|
25
|
+
for line in reversed(
|
26
|
+
self.log_lines
|
27
|
+
): # Process in reverse to get the most recent entries first
|
28
|
+
if self._is_student_info(line):
|
29
|
+
self._process_student_info(line, unique_students)
|
30
|
+
elif (
|
31
|
+
any(item in line for item in self.all_questions)
|
32
|
+
and "total-points" in line
|
33
|
+
):
|
34
|
+
self._process_assignment_header(line)
|
35
|
+
|
36
|
+
# process assignment entries after all headers have been processed
|
37
|
+
for line in reversed(self.log_lines):
|
38
|
+
if (
|
39
|
+
any(item in line for item in self.all_questions)
|
40
|
+
and "total-points" not in line
|
41
|
+
):
|
42
|
+
self._process_assignment_entry(line)
|
43
|
+
|
44
|
+
def _find_all_questions(self):
|
45
|
+
"""
|
46
|
+
Finds all questions in the log_lines and returns a list of them.
|
47
|
+
"""
|
48
|
+
questions = []
|
49
|
+
for line in self.log_lines:
|
50
|
+
if self.week_tag in line:
|
51
|
+
parts = line.split(",")
|
52
|
+
question_tag = parts[3].strip()
|
53
|
+
if question_tag not in questions:
|
54
|
+
questions.append(question_tag)
|
55
|
+
self.all_questions = questions
|
56
|
+
|
57
|
+
def _is_student_info(self, line: str) -> bool:
|
58
|
+
"""
|
59
|
+
Checks if the line contains student information.
|
60
|
+
"""
|
61
|
+
return line.startswith("Student Info")
|
62
|
+
|
63
|
+
def _process_student_info(self, line: str, unique_students: set):
|
64
|
+
"""
|
65
|
+
Processes a line containing student information.
|
66
|
+
Raises an error if multiple unique students are found.
|
67
|
+
"""
|
68
|
+
parts = line.split(", ")
|
69
|
+
# Example: "Student Info, 790, jovyan, 2024-12-27 19:40:10"
|
70
|
+
student_name = parts[2].strip()
|
71
|
+
unique_students.add(student_name)
|
72
|
+
|
73
|
+
if len(unique_students) > 1:
|
74
|
+
raise ValueError(
|
75
|
+
f"Error: Multiple unique student names found: {unique_students}"
|
76
|
+
)
|
77
|
+
|
78
|
+
# Only set student_info once
|
79
|
+
if not self.student_info:
|
80
|
+
self.student_info = {
|
81
|
+
"student_id": parts[1].strip(),
|
82
|
+
"username": student_name,
|
83
|
+
"timestamp": parts[3].strip(),
|
84
|
+
}
|
85
|
+
|
86
|
+
def _process_assignment_header(self, line: str):
|
87
|
+
parts = line.split(",")
|
88
|
+
assignment_tag = parts[0].strip()
|
89
|
+
if assignment_tag.startswith("total-points"):
|
90
|
+
# Handle total-points lines as assignment info
|
91
|
+
total_points_value = self._extract_total_points(parts)
|
92
|
+
timestamp = parts[-1].strip()
|
93
|
+
notebook_name = parts[3].strip()
|
94
|
+
|
95
|
+
if notebook_name not in self.assignments:
|
96
|
+
self.assignments[notebook_name] = {
|
97
|
+
"max_points": total_points_value,
|
98
|
+
"notebook": notebook_name,
|
99
|
+
"assignment": self.week_tag,
|
100
|
+
"total_score": 0.0,
|
101
|
+
"latest_timestamp": timestamp,
|
102
|
+
"questions": {}, # Ensure 'questions' key is initialized
|
103
|
+
}
|
104
|
+
elif self.assignments[notebook_name]["latest_timestamp"] < timestamp:
|
105
|
+
self.assignments[notebook_name]["max_points"] = total_points_value
|
106
|
+
self.assignments[notebook_name]["latest_timestamp"] = timestamp
|
107
|
+
|
108
|
+
def _process_assignment_entry(self, line: str):
|
109
|
+
"""
|
110
|
+
Processes a line containing an assignment entry.
|
111
|
+
Adds it to the assignments dictionary.
|
112
|
+
"""
|
113
|
+
parts = line.split(",")
|
114
|
+
assignment_tag = parts[0].strip()
|
115
|
+
question_tag = parts[1].strip()
|
116
|
+
score_earned = float(parts[2].strip()) if len(parts) > 2 else 0.0
|
117
|
+
score_possible = float(parts[3].strip()) if len(parts) > 3 else 0.0
|
118
|
+
timestamp = parts[-1].strip()
|
119
|
+
|
120
|
+
# Ensure assignment entry exists
|
121
|
+
if assignment_tag not in self.assignments:
|
122
|
+
self.assignments[assignment_tag] = {
|
123
|
+
"questions": {},
|
124
|
+
"total_score": 0.0,
|
125
|
+
"latest_timestamp": timestamp,
|
126
|
+
}
|
127
|
+
|
128
|
+
# Add or update the question with the most recent timestamp
|
129
|
+
questions = self.assignments[assignment_tag]["questions"]
|
130
|
+
if (
|
131
|
+
question_tag not in questions
|
132
|
+
or timestamp > questions[question_tag]["timestamp"]
|
133
|
+
):
|
134
|
+
questions[question_tag] = {
|
135
|
+
"score_earned": score_earned,
|
136
|
+
"score_possible": score_possible,
|
137
|
+
"timestamp": timestamp,
|
138
|
+
}
|
139
|
+
|
140
|
+
# Update the latest timestamp if this one is more recent
|
141
|
+
if timestamp > self.assignments[assignment_tag]["latest_timestamp"]:
|
142
|
+
self.assignments[assignment_tag]["latest_timestamp"] = timestamp
|
143
|
+
|
144
|
+
def _extract_total_points(self, parts: List[str]) -> Optional[float]:
|
145
|
+
"""
|
146
|
+
Extracts the total-points value from the parts array of a total-points line.
|
147
|
+
"""
|
148
|
+
try:
|
149
|
+
return float(parts[1].strip())
|
150
|
+
except (ValueError, IndexError):
|
151
|
+
return None
|
152
|
+
|
153
|
+
def calculate_total_scores(self):
|
154
|
+
"""
|
155
|
+
Calculates total scores for each assignment by summing the 'score_earned'
|
156
|
+
of its questions, and sets 'total_points' if it was not specified.
|
157
|
+
"""
|
158
|
+
for assignment, data in self.assignments.items():
|
159
|
+
# Sum of all question score_earned
|
160
|
+
total_score = sum(q["score_earned"] for q in data["questions"].values())
|
161
|
+
data["total_score"] = total_score
|
162
|
+
|
163
|
+
def get_results(self) -> Dict[str, Dict]:
|
164
|
+
"""
|
165
|
+
Returns the parsed results as a hierarchical dictionary with three sections:
|
166
|
+
"""
|
167
|
+
return {
|
168
|
+
"student_information": self.student_info,
|
169
|
+
"assignment_information": {
|
170
|
+
assignment: {
|
171
|
+
"latest_timestamp": data["latest_timestamp"],
|
172
|
+
"total_score": data["total_score"],
|
173
|
+
"max_points": data.get("max_points", 0.0),
|
174
|
+
}
|
175
|
+
for assignment, data in self.assignments.items()
|
176
|
+
},
|
177
|
+
"assignment_scores": {
|
178
|
+
assignment: {
|
179
|
+
"questions": data["questions"],
|
180
|
+
"total_score": data["total_score"],
|
181
|
+
}
|
182
|
+
for assignment, data in self.assignments.items()
|
183
|
+
},
|
184
|
+
}
|
@@ -11,8 +11,28 @@ from IPython.core.interactiveshell import ExecutionInfo
|
|
11
11
|
from requests import Response
|
12
12
|
from requests.auth import HTTPBasicAuth
|
13
13
|
|
14
|
-
#
|
15
|
-
logging.
|
14
|
+
# Logger for .output_code.log
|
15
|
+
logger_code = logging.getLogger("code_logger")
|
16
|
+
logger_code.setLevel(logging.INFO)
|
17
|
+
|
18
|
+
file_handler_code = logging.FileHandler(".output_code.log")
|
19
|
+
file_handler_code.setLevel(logging.INFO)
|
20
|
+
|
21
|
+
# formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
|
22
|
+
# file_handler_code.setFormatter(formatter)
|
23
|
+
|
24
|
+
logger_code.addHandler(file_handler_code)
|
25
|
+
|
26
|
+
# Logger for .output_reduced.log
|
27
|
+
logger_reduced = logging.getLogger("reduced_logger")
|
28
|
+
logger_reduced.setLevel(logging.INFO)
|
29
|
+
|
30
|
+
file_handler_reduced = logging.FileHandler(".output_reduced.log")
|
31
|
+
file_handler_reduced.setLevel(logging.INFO)
|
32
|
+
|
33
|
+
# file_handler_reduced.setFormatter(formatter)
|
34
|
+
|
35
|
+
logger_reduced.addHandler(file_handler_reduced)
|
16
36
|
|
17
37
|
#
|
18
38
|
# Local functions
|
@@ -51,20 +71,30 @@ def ensure_responses() -> dict:
|
|
51
71
|
return responses
|
52
72
|
|
53
73
|
|
54
|
-
def log_encrypted(message: str) -> None:
|
74
|
+
def log_encrypted(logger: logging.Logger, message: str) -> None:
|
75
|
+
"""
|
76
|
+
Logs an encrypted version of the given message using the provided logger.
|
77
|
+
|
78
|
+
Args:
|
79
|
+
logger (object): The logger object used to log the encrypted message.
|
80
|
+
message (str): The message to be encrypted and logged.
|
81
|
+
|
82
|
+
Returns:
|
83
|
+
None
|
84
|
+
"""
|
55
85
|
encrypted_b64 = encrypt_to_b64(message)
|
56
|
-
|
86
|
+
logger.info(f"Encrypted Output: {encrypted_b64}")
|
57
87
|
|
58
88
|
|
59
|
-
def log_variable(value, info_type) -> None:
|
89
|
+
def log_variable(assignment_name, value, info_type) -> None:
|
60
90
|
timestamp = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
61
|
-
message = f"{info_type}, {value}, {timestamp}"
|
62
|
-
log_encrypted(message)
|
91
|
+
message = f"{assignment_name}, {info_type}, {value}, {timestamp}"
|
92
|
+
log_encrypted(logger_reduced, message)
|
63
93
|
|
64
94
|
|
65
95
|
def telemetry(info: ExecutionInfo) -> None:
|
66
96
|
cell_content = info.raw_cell
|
67
|
-
log_encrypted(f"code run: {cell_content}")
|
97
|
+
log_encrypted(logger_code, f"code run: {cell_content}")
|
68
98
|
|
69
99
|
|
70
100
|
def update_responses(key: str, value) -> dict:
|
@@ -119,7 +149,7 @@ def score_question(
|
|
119
149
|
return res
|
120
150
|
|
121
151
|
|
122
|
-
def
|
152
|
+
def submit_question(
|
123
153
|
student_email: str,
|
124
154
|
term: str,
|
125
155
|
assignment: str,
|
@@ -32,28 +32,7 @@ def validate_logfile(
|
|
32
32
|
# Generate box from private and public keys
|
33
33
|
key_box = generate_keys()
|
34
34
|
|
35
|
-
|
36
|
-
encrypted_lines = logfile.readlines()
|
37
|
-
|
38
|
-
decrypted_log: list[str] = []
|
39
|
-
for line in encrypted_lines:
|
40
|
-
if "Encrypted Output: " in line:
|
41
|
-
trimmed = line.split("Encrypted Output: ")[1].strip()
|
42
|
-
decoded = base64.b64decode(trimmed)
|
43
|
-
decrypted = key_box.decrypt(decoded).decode()
|
44
|
-
decrypted_log.append(decrypted)
|
45
|
-
|
46
|
-
# Decoding the log file
|
47
|
-
# data_: list[str] = drexel_jupyter_logger.decode_log_file(self.filepath, key=key)
|
48
|
-
# _loginfo = str(decrypted_log)
|
49
|
-
|
50
|
-
# Where possible, we should work with this reduced list of relevant entries
|
51
|
-
# Here we take only lines with student info or question scores
|
52
|
-
log_reduced = [
|
53
|
-
entry
|
54
|
-
for entry in decrypted_log
|
55
|
-
if re.match(r"info,", entry) or re.match(r"q\d+_\d+,", entry)
|
56
|
-
]
|
35
|
+
decrypted_log, log_reduced = read_logfile(filepath, key_box)
|
57
36
|
|
58
37
|
# For debugging; to be commented out
|
59
38
|
# with open(".output_reduced.log", "w") as f:
|
@@ -246,6 +225,36 @@ def validate_logfile(
|
|
246
225
|
submission_message(response)
|
247
226
|
|
248
227
|
|
228
|
+
def read_logfile(filepath, key_box=None) -> tuple[list[str], list[str]]:
|
229
|
+
if key_box is None:
|
230
|
+
key_box = generate_keys()
|
231
|
+
|
232
|
+
with open(filepath, "r") as logfile:
|
233
|
+
encrypted_lines = logfile.readlines()
|
234
|
+
|
235
|
+
decrypted_log: list[str] = []
|
236
|
+
for line in encrypted_lines:
|
237
|
+
if "Encrypted Output: " in line:
|
238
|
+
trimmed = line.split("Encrypted Output: ")[1].strip()
|
239
|
+
decoded = base64.b64decode(trimmed)
|
240
|
+
decrypted = key_box.decrypt(decoded).decode()
|
241
|
+
decrypted_log.append(decrypted)
|
242
|
+
|
243
|
+
# Decoding the log file
|
244
|
+
# data_: list[str] = drexel_jupyter_logger.decode_log_file(self.filepath, key=key)
|
245
|
+
# _loginfo = str(decrypted_log)
|
246
|
+
|
247
|
+
# Where possible, we should work with this reduced list of relevant entries
|
248
|
+
# Here we take only lines with student info or question scores
|
249
|
+
log_reduced = [
|
250
|
+
entry
|
251
|
+
for entry in decrypted_log
|
252
|
+
if re.match(r"info,", entry) or re.match(r"q\d+_\d+,", entry)
|
253
|
+
]
|
254
|
+
|
255
|
+
return decrypted_log, log_reduced
|
256
|
+
|
257
|
+
|
249
258
|
#
|
250
259
|
# Helper functions
|
251
260
|
#
|
@@ -1,65 +0,0 @@
|
|
1
|
-
import json
|
2
|
-
import os
|
3
|
-
from typing import Optional
|
4
|
-
|
5
|
-
import panel as pn
|
6
|
-
from IPython import get_ipython
|
7
|
-
import requests
|
8
|
-
from .telemetry import telemetry, update_responses, ensure_responses
|
9
|
-
|
10
|
-
|
11
|
-
def initialize_assignment(
|
12
|
-
name: str,
|
13
|
-
verbose: Optional[bool] = False,
|
14
|
-
url: Optional[str] = "https://engr-131-api.eastus.cloudapp.azure.com/",
|
15
|
-
) -> None:
|
16
|
-
ipython = get_ipython()
|
17
|
-
if ipython is None:
|
18
|
-
print("Setup unsuccessful. Are you in a Jupyter environment?")
|
19
|
-
return
|
20
|
-
|
21
|
-
try:
|
22
|
-
ipython.events.register("pre_run_cell", telemetry)
|
23
|
-
except TypeError as e:
|
24
|
-
print(f"Failed to register telemetry: {e}")
|
25
|
-
return
|
26
|
-
|
27
|
-
jhub_user = os.getenv("JUPYTERHUB_USER")
|
28
|
-
if jhub_user is None:
|
29
|
-
print("Setup unsuccessful. Are you on JupyterHub?")
|
30
|
-
return
|
31
|
-
|
32
|
-
try:
|
33
|
-
seed = hash(jhub_user) % 1000
|
34
|
-
update_responses(key="seed", value=seed)
|
35
|
-
update_responses(key="assignment", value=name)
|
36
|
-
update_responses(key="jhub_user", value=jhub_user)
|
37
|
-
|
38
|
-
except (TypeError, json.JSONDecodeError) as e:
|
39
|
-
print(f"Failed to initialize assignment: {e}")
|
40
|
-
return
|
41
|
-
|
42
|
-
# extract responses
|
43
|
-
responses = ensure_responses()
|
44
|
-
|
45
|
-
# TODO: Add more checks here??
|
46
|
-
assert isinstance(responses.get("seed"), int), "valid seed not found in responses"
|
47
|
-
|
48
|
-
pn.extension(silent=True)
|
49
|
-
|
50
|
-
if verbose:
|
51
|
-
print("Assignment successfully initialized")
|
52
|
-
print(f"Assignment: {name}")
|
53
|
-
print(f"Username: {jhub_user}")
|
54
|
-
|
55
|
-
# Checks connectivity to the API
|
56
|
-
params = {"jhub_user": responses["jhub_user"]}
|
57
|
-
response = requests.get(url, params=params)
|
58
|
-
if verbose:
|
59
|
-
print(f"status code: {response.status_code}")
|
60
|
-
data = response.json()
|
61
|
-
for k, v in data.items():
|
62
|
-
print(f"{k}: {v}")
|
63
|
-
|
64
|
-
print("Assignment successfully initialized")
|
65
|
-
return responses
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|