PyKubeGrader 0.1.8__py3-none-any.whl → 0.1.10__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- {PyKubeGrader-0.1.8.dist-info → PyKubeGrader-0.1.10.dist-info}/METADATA +1 -1
- {PyKubeGrader-0.1.8.dist-info → PyKubeGrader-0.1.10.dist-info}/RECORD +11 -10
- pykubegrader/build/api_notebook_builder.py +492 -0
- pykubegrader/build/build_folder.py +207 -81
- pykubegrader/initialize.py +37 -11
- pykubegrader/telemetry.py +26 -6
- pykubegrader/validate.py +88 -76
- {PyKubeGrader-0.1.8.dist-info → PyKubeGrader-0.1.10.dist-info}/LICENSE.txt +0 -0
- {PyKubeGrader-0.1.8.dist-info → PyKubeGrader-0.1.10.dist-info}/WHEEL +0 -0
- {PyKubeGrader-0.1.8.dist-info → PyKubeGrader-0.1.10.dist-info}/entry_points.txt +0 -0
- {PyKubeGrader-0.1.8.dist-info → PyKubeGrader-0.1.10.dist-info}/top_level.txt +0 -0
@@ -1,9 +1,10 @@
|
|
1
1
|
pykubegrader/__init__.py,sha256=AoAkdfIjDDZGWLlsIRENNq06L9h46kDGBIE8vRmsCfg,311
|
2
|
-
pykubegrader/initialize.py,sha256=
|
3
|
-
pykubegrader/telemetry.py,sha256=
|
2
|
+
pykubegrader/initialize.py,sha256=SREGdFK8kqnk8RYTtaTO5LKZiK2Y4YxBNjrALl5jaNo,1956
|
3
|
+
pykubegrader/telemetry.py,sha256=Zkap_ml7hWz7akBVzE-zqri_K-AkBSTEhv0IE3VM9iY,3943
|
4
4
|
pykubegrader/utils.py,sha256=dKw6SyRYU3DWRgD3xER7wq-C9e1daWPkqr901LpcwiQ,642
|
5
|
-
pykubegrader/validate.py,sha256=
|
6
|
-
pykubegrader/build/
|
5
|
+
pykubegrader/validate.py,sha256=PeT6Gx4ZOQYyogG4nb3TD9YfEkAmf__fR1aOOB3ZBWo,10705
|
6
|
+
pykubegrader/build/api_notebook_builder.py,sha256=vtRA9lDCc-PXN-y56wEXS-h_aKXDU_ChS-1_ooiKCOk,18975
|
7
|
+
pykubegrader/build/build_folder.py,sha256=HkyzQAPUJBB4VkRLllllyy1eqbe2EMa-NREVy8qbSeg,63417
|
7
8
|
pykubegrader/widgets/__init__.py,sha256=s3ky3eJDa1RedFVdpKxmqv6mHBYpOSL9Z6qThSH9cbs,303
|
8
9
|
pykubegrader/widgets/multiple_choice.py,sha256=NjD3-uXSnibpUQ0mO3hRp_O-rynFyl0Dz6IXE4tnCRI,2078
|
9
10
|
pykubegrader/widgets/reading_question.py,sha256=y30_swHwzH8LrT8deWTnxctAAmR8BSxTlXAqMgUrAT4,3031
|
@@ -16,9 +17,9 @@ pykubegrader/widgets_base/__init__.py,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-
|
|
16
17
|
pykubegrader/widgets_base/multi_select.py,sha256=u50IOhYxC_S_gq31VnFPLdbNajk_SUWhaqlMSJxhqVQ,3439
|
17
18
|
pykubegrader/widgets_base/reading.py,sha256=4uTLmlPzCwxVzufFhPjM7W19uMGguRb6y4eAV3x-zAc,5314
|
18
19
|
pykubegrader/widgets_base/select.py,sha256=h1S5StcbX8S-Wiyga4fVDhPbVvRxffwaqyVbiiuInRs,2743
|
19
|
-
PyKubeGrader-0.1.
|
20
|
-
PyKubeGrader-0.1.
|
21
|
-
PyKubeGrader-0.1.
|
22
|
-
PyKubeGrader-0.1.
|
23
|
-
PyKubeGrader-0.1.
|
24
|
-
PyKubeGrader-0.1.
|
20
|
+
PyKubeGrader-0.1.10.dist-info/LICENSE.txt,sha256=YTp-Ewc8Kems8PJEE27KnBPFnZSxoWvSg7nnknzPyYw,1546
|
21
|
+
PyKubeGrader-0.1.10.dist-info/METADATA,sha256=3lNx_8olR4Gf2iX_Sy8jzbx-xZzrgv_YmhB-eqo9CXU,2665
|
22
|
+
PyKubeGrader-0.1.10.dist-info/WHEEL,sha256=PZUExdf71Ui_so67QXpySuHtCi3-J3wvF4ORK6k_S8U,91
|
23
|
+
PyKubeGrader-0.1.10.dist-info/entry_points.txt,sha256=Kd4Bh-i3hc4qlnLU1p0nc8yPw9cC5AQGOtkk2eLGnQw,78
|
24
|
+
PyKubeGrader-0.1.10.dist-info/top_level.txt,sha256=e550Klfze6higFxER1V62fnGOcIgiKRbsrl9CC4UdtQ,13
|
25
|
+
PyKubeGrader-0.1.10.dist-info/RECORD,,
|
@@ -0,0 +1,492 @@
|
|
1
|
+
from dataclasses import dataclass
|
2
|
+
from pathlib import Path
|
3
|
+
from typing import Optional
|
4
|
+
import json
|
5
|
+
import nbformat
|
6
|
+
import json
|
7
|
+
import re
|
8
|
+
import shutil
|
9
|
+
|
10
|
+
|
11
|
+
@dataclass
|
12
|
+
class FastAPINotebookBuilder:
|
13
|
+
notebook_path: str
|
14
|
+
temp_notebook: Optional[str] = None
|
15
|
+
|
16
|
+
def __post_init__(self):
|
17
|
+
self.root_path, self.filename = FastAPINotebookBuilder.get_filename_and_root(
|
18
|
+
self.notebook_path
|
19
|
+
)
|
20
|
+
self.run()
|
21
|
+
|
22
|
+
def run(self):
|
23
|
+
|
24
|
+
# here for easy debugging
|
25
|
+
if self.temp_notebook is not None:
|
26
|
+
shutil.copy(
|
27
|
+
self.notebook_path, self.notebook_path.replace(".ipynb", "_temp.ipynb")
|
28
|
+
)
|
29
|
+
self.temp_notebook = self.notebook_path.replace(".ipynb", "_temp.ipynb")
|
30
|
+
else:
|
31
|
+
self.temp_notebook = self.notebook_path
|
32
|
+
|
33
|
+
self.assertion_tests_dict = self.question_dict()
|
34
|
+
self.add_api_code()
|
35
|
+
|
36
|
+
def add_api_code(self):
|
37
|
+
|
38
|
+
for i, (cell_index, cell_dict) in enumerate(self.assertion_tests_dict.items()):
|
39
|
+
print(
|
40
|
+
f"Processing cell {cell_index + 1}, {i} of {len(self.assertion_tests_dict)}"
|
41
|
+
)
|
42
|
+
|
43
|
+
cell = self.get_cell(cell_index)
|
44
|
+
cell_source = FastAPINotebookBuilder.add_import_statements_to_tests(
|
45
|
+
cell["source"]
|
46
|
+
)
|
47
|
+
|
48
|
+
last_import_line_ind = FastAPINotebookBuilder.find_last_import_line(
|
49
|
+
cell_source
|
50
|
+
)
|
51
|
+
|
52
|
+
# header, body = FastAPINotebookBuilder.split_list_at_marker(cell_source)
|
53
|
+
|
54
|
+
updated_cell_source = []
|
55
|
+
updated_cell_source.extend(cell_source[: last_import_line_ind + 1])
|
56
|
+
if cell_dict["is_first"]:
|
57
|
+
updated_cell_source.extend(
|
58
|
+
self.construct_first_cell_question_header(cell_dict)
|
59
|
+
)
|
60
|
+
updated_cell_source.extend(["\n"])
|
61
|
+
updated_cell_source.extend(
|
62
|
+
FastAPINotebookBuilder.construct_question_info(cell_dict)
|
63
|
+
)
|
64
|
+
updated_cell_source.extend(
|
65
|
+
FastAPINotebookBuilder.construct_update_responses(cell_dict)
|
66
|
+
)
|
67
|
+
|
68
|
+
updated_cell_source.extend(cell_source[last_import_line_ind + 1 :])
|
69
|
+
updated_cell_source.extend(["\n"])
|
70
|
+
|
71
|
+
updated_cell_source.extend(
|
72
|
+
FastAPINotebookBuilder.construct_graders(cell_dict)
|
73
|
+
)
|
74
|
+
updated_cell_source.extend(["\n"])
|
75
|
+
updated_cell_source.extend(
|
76
|
+
["earned_points = float(os.environ.get('EARNED_POINTS', 0))\n"]
|
77
|
+
)
|
78
|
+
updated_cell_source.extend(["earned_points += score\n"])
|
79
|
+
updated_cell_source.extend(
|
80
|
+
[f'log_variable(f"{{score}}, {{max_score}}", question_id)\n']
|
81
|
+
)
|
82
|
+
updated_cell_source.extend(
|
83
|
+
["os.environ['EARNED_POINTS'] = str(earned_points)\n"]
|
84
|
+
)
|
85
|
+
|
86
|
+
# cell_source = FastAPINotebookBuilder.insert_list_at_index(
|
87
|
+
# cell_source, updated_cell_source, last_import_line_ind + 1
|
88
|
+
# )
|
89
|
+
|
90
|
+
self.replace_cell_source(cell_index, updated_cell_source)
|
91
|
+
|
92
|
+
def construct_first_cell_question_header(self, cell_dict):
|
93
|
+
max_question_points = sum(
|
94
|
+
cell["points"]
|
95
|
+
for cell in self.assertion_tests_dict.values()
|
96
|
+
if cell["question"] == cell_dict["question"]
|
97
|
+
)
|
98
|
+
|
99
|
+
first_cell_header = ["max_question_points = " + str(max_question_points) + "\n"]
|
100
|
+
first_cell_header.append("earned_points = 0 \n")
|
101
|
+
first_cell_header.append("os.environ['EARNED_POINTS'] = str(earned_points)\n")
|
102
|
+
|
103
|
+
return first_cell_header
|
104
|
+
|
105
|
+
@staticmethod
|
106
|
+
def construct_update_responses(cell_dict):
|
107
|
+
update_responses = []
|
108
|
+
question_id = cell_dict["question"] + "-" + str(cell_dict["test_number"]) + "\n"
|
109
|
+
|
110
|
+
logging_variables = cell_dict["logging_variables"]
|
111
|
+
|
112
|
+
for logging_variable in logging_variables:
|
113
|
+
update_responses.append(
|
114
|
+
f"responses = update_responses(question_id, {logging_variable})\n"
|
115
|
+
)
|
116
|
+
|
117
|
+
return update_responses
|
118
|
+
|
119
|
+
@staticmethod
|
120
|
+
def split_list_at_marker(input_list, marker="""# END TEST CONFIG"""):
|
121
|
+
"""
|
122
|
+
Splits a list into two parts at the specified marker string.
|
123
|
+
|
124
|
+
Args:
|
125
|
+
input_list (list): The list to split.
|
126
|
+
marker (str): The string at which to split the list.
|
127
|
+
|
128
|
+
Returns:
|
129
|
+
tuple: A tuple containing two lists. The first list contains the elements
|
130
|
+
before the marker, and the second list contains the elements after
|
131
|
+
the marker (excluding the marker itself).
|
132
|
+
"""
|
133
|
+
if marker in input_list:
|
134
|
+
index = input_list.index(marker)
|
135
|
+
return input_list[: index + 1], input_list[index + 2 :]
|
136
|
+
else:
|
137
|
+
return (
|
138
|
+
input_list,
|
139
|
+
[],
|
140
|
+
) # If the marker is not in the list, return the original list and an empty list
|
141
|
+
|
142
|
+
@staticmethod
|
143
|
+
def construct_graders(cell_dict):
|
144
|
+
|
145
|
+
# Generate Python code
|
146
|
+
added_code = [
|
147
|
+
"if "
|
148
|
+
+ " and ".join(f"({test})" for test in cell_dict["assertions"])
|
149
|
+
+ ":\n"
|
150
|
+
]
|
151
|
+
added_code.append(f" score = {cell_dict['points']}\n")
|
152
|
+
|
153
|
+
return added_code
|
154
|
+
|
155
|
+
@staticmethod
|
156
|
+
def construct_question_info(cell_dict):
|
157
|
+
question_info = []
|
158
|
+
|
159
|
+
question_id = cell_dict["question"] + "-" + str(cell_dict["test_number"])
|
160
|
+
|
161
|
+
question_info.append(f'question_id = "{question_id}"' + "\n")
|
162
|
+
question_info.append(f'max_score = {cell_dict["points"]}\n')
|
163
|
+
question_info.append("score = 0\n")
|
164
|
+
|
165
|
+
return question_info
|
166
|
+
|
167
|
+
@staticmethod
|
168
|
+
def insert_list_at_index(
|
169
|
+
original_list, insert_list, index, line_break=True, inplace_line_break=True
|
170
|
+
):
|
171
|
+
"""
|
172
|
+
Inserts a list into another list at a specific index.
|
173
|
+
|
174
|
+
Args:
|
175
|
+
original_list (list): The original list.
|
176
|
+
insert_list (list): The list to insert.
|
177
|
+
index (int): The position at which to insert the new list.
|
178
|
+
|
179
|
+
Returns:
|
180
|
+
list: A single combined list with the second list inserted at the specified index.
|
181
|
+
"""
|
182
|
+
|
183
|
+
if inplace_line_break:
|
184
|
+
insert_list = [s + "\n" for s in insert_list]
|
185
|
+
|
186
|
+
if line_break:
|
187
|
+
if inplace_line_break:
|
188
|
+
insert_list = ["\n"] + insert_list
|
189
|
+
else:
|
190
|
+
insert_list = ["\n"] + insert_list + ["\n"]
|
191
|
+
|
192
|
+
return original_list[:index] + insert_list + original_list[index:]
|
193
|
+
|
194
|
+
@staticmethod
|
195
|
+
def add_import_statements_to_tests(cell_source):
|
196
|
+
"""
|
197
|
+
Adds the necessary import statements to the first cell of the notebook.
|
198
|
+
"""
|
199
|
+
|
200
|
+
end_test_config_line = "# END TEST CONFIG"
|
201
|
+
|
202
|
+
# Imports to add
|
203
|
+
imports = [
|
204
|
+
"from pykubegrader.telemetry import (\n",
|
205
|
+
" ensure_responses,\n",
|
206
|
+
" log_variable,\n",
|
207
|
+
" score_question,\n",
|
208
|
+
" submit_question_new,\n",
|
209
|
+
" telemetry,\n",
|
210
|
+
" update_responses,\n",
|
211
|
+
")\n",
|
212
|
+
"import os\n",
|
213
|
+
]
|
214
|
+
|
215
|
+
for i, line in enumerate(cell_source):
|
216
|
+
if end_test_config_line in line:
|
217
|
+
# Insert the imports immediately after the current line
|
218
|
+
cell_source[i + 1 : i + 1] = [
|
219
|
+
"\n"
|
220
|
+
] + imports # Add a blank line for readability
|
221
|
+
return cell_source # Exit the loop once the imports are inserted
|
222
|
+
|
223
|
+
def extract_first_cell(self):
|
224
|
+
with open(self.temp_notebook, "r", encoding="utf-8") as f:
|
225
|
+
notebook = json.load(f)
|
226
|
+
if "cells" in notebook and len(notebook["cells"]) > 0:
|
227
|
+
return notebook["cells"][0]
|
228
|
+
else:
|
229
|
+
return None
|
230
|
+
|
231
|
+
@staticmethod
|
232
|
+
def get_filename_and_root(path):
|
233
|
+
path_obj = Path(path).resolve() # Resolve the path to get an absolute path
|
234
|
+
root_path = path_obj.parent # Get the parent directory
|
235
|
+
filename = path_obj.name # Get the filename
|
236
|
+
return root_path, filename
|
237
|
+
|
238
|
+
def get_cell(self, cell_index):
|
239
|
+
with open(self.temp_notebook, "r", encoding="utf-8") as f:
|
240
|
+
notebook = json.load(f)
|
241
|
+
if "cells" in notebook and len(notebook["cells"]) > cell_index:
|
242
|
+
return notebook["cells"][cell_index]
|
243
|
+
else:
|
244
|
+
return None
|
245
|
+
|
246
|
+
def replace_cell_source(self, cell_index, new_source):
|
247
|
+
"""
|
248
|
+
Replace the source code of a specific Jupyter notebook cell.
|
249
|
+
|
250
|
+
Args:
|
251
|
+
cell_index (int): Index of the cell to be modified (0-based).
|
252
|
+
new_source (str): New source code to replace the cell's content.
|
253
|
+
"""
|
254
|
+
# Load the notebook
|
255
|
+
with open(self.temp_notebook, "r", encoding="utf-8") as f:
|
256
|
+
notebook = nbformat.read(f, as_version=4)
|
257
|
+
|
258
|
+
# Check if the cell index is valid
|
259
|
+
if cell_index >= len(notebook.cells) or cell_index < 0:
|
260
|
+
raise IndexError(
|
261
|
+
f"Cell index {cell_index} is out of range for this notebook."
|
262
|
+
)
|
263
|
+
|
264
|
+
# Replace the source code of the specified cell
|
265
|
+
notebook.cells[cell_index]["source"] = new_source
|
266
|
+
|
267
|
+
# Save the notebook
|
268
|
+
with open(self.temp_notebook, "w", encoding="utf-8") as f:
|
269
|
+
nbformat.write(notebook, f)
|
270
|
+
print(f"Updated notebook saved to {self.temp_notebook}")
|
271
|
+
|
272
|
+
@staticmethod
|
273
|
+
def find_last_import_line(cell_source):
|
274
|
+
"""
|
275
|
+
Finds the index of the last line with an import statement in a list of code lines,
|
276
|
+
including multiline import statements.
|
277
|
+
|
278
|
+
Args:
|
279
|
+
cell_source (list): List of strings representing the code lines.
|
280
|
+
|
281
|
+
Returns:
|
282
|
+
int: The index of the last line with an import statement, or -1 if no import is found.
|
283
|
+
"""
|
284
|
+
last_import_index = -1
|
285
|
+
is_multiline_import = False # Flag to track if we're inside a multiline import
|
286
|
+
|
287
|
+
for i, line in enumerate(cell_source):
|
288
|
+
stripped_line = line.strip()
|
289
|
+
|
290
|
+
if is_multiline_import:
|
291
|
+
# Continue tracking multiline import
|
292
|
+
if stripped_line.endswith("\\") or (
|
293
|
+
stripped_line and not stripped_line.endswith(")")
|
294
|
+
):
|
295
|
+
last_import_index = i # Update to current line
|
296
|
+
continue
|
297
|
+
else:
|
298
|
+
is_multiline_import = False # End of multiline import
|
299
|
+
last_import_index = i # Update to current line
|
300
|
+
|
301
|
+
# Check for single-line or start of multiline imports
|
302
|
+
if stripped_line.startswith("import") or stripped_line.startswith("from"):
|
303
|
+
last_import_index = i
|
304
|
+
# Check if it's a multiline import
|
305
|
+
if stripped_line.endswith("\\") or "(" in stripped_line:
|
306
|
+
is_multiline_import = True
|
307
|
+
|
308
|
+
return last_import_index
|
309
|
+
|
310
|
+
@staticmethod
|
311
|
+
def extract_log_variables(cell):
|
312
|
+
"""Extracts log variables from the first cell."""
|
313
|
+
if "source" in cell:
|
314
|
+
for line in cell["source"]:
|
315
|
+
# Look for the log_variables pattern
|
316
|
+
match = re.search(r"log_variables:\s*\[(.*?)\]", line)
|
317
|
+
if match:
|
318
|
+
# Split the variables by comma and strip whitespace
|
319
|
+
log_variables = [var.strip() for var in match.group(1).split(",")]
|
320
|
+
return log_variables
|
321
|
+
return []
|
322
|
+
|
323
|
+
def tag_questions(cells_dict):
|
324
|
+
"""
|
325
|
+
Adds 'is_first' and 'is_last' boolean flags to the cells based on their position
|
326
|
+
within the group of the same question. All cells will have both flags.
|
327
|
+
|
328
|
+
Args:
|
329
|
+
cells_dict (dict): A dictionary where keys are cell IDs and values are cell details.
|
330
|
+
|
331
|
+
Returns:
|
332
|
+
dict: The modified dictionary with 'is_first' and 'is_last' flags added.
|
333
|
+
"""
|
334
|
+
if not isinstance(cells_dict, dict):
|
335
|
+
raise ValueError("Input must be a dictionary.")
|
336
|
+
|
337
|
+
# Ensure all cells have the expected structure
|
338
|
+
for key, cell in cells_dict.items():
|
339
|
+
if not isinstance(cell, dict):
|
340
|
+
raise ValueError(f"Cell {key} is not a dictionary.")
|
341
|
+
if "question" not in cell:
|
342
|
+
raise KeyError(f"Cell {key} is missing the 'question' key.")
|
343
|
+
|
344
|
+
# Group the keys by question name
|
345
|
+
question_groups = {}
|
346
|
+
for key, cell in cells_dict.items():
|
347
|
+
question = cell.get(
|
348
|
+
"question"
|
349
|
+
) # Use .get() to avoid errors if key is missing
|
350
|
+
if question not in question_groups:
|
351
|
+
question_groups[question] = []
|
352
|
+
question_groups[question].append(key)
|
353
|
+
|
354
|
+
# Add 'is_first' and 'is_last' flags to all cells
|
355
|
+
for question, keys in question_groups.items():
|
356
|
+
test_number = 1
|
357
|
+
for i, key in enumerate(keys):
|
358
|
+
cells_dict[key]["is_first"] = i == 0
|
359
|
+
cells_dict[key]["is_last"] = i == len(keys) - 1
|
360
|
+
cells_dict[key]["test_number"] = test_number
|
361
|
+
test_number += 1
|
362
|
+
|
363
|
+
return cells_dict
|
364
|
+
|
365
|
+
def question_dict(self):
|
366
|
+
|
367
|
+
notebook_path = Path(self.temp_notebook)
|
368
|
+
if not notebook_path.exists():
|
369
|
+
raise FileNotFoundError(f"The file {notebook_path} does not exist.")
|
370
|
+
|
371
|
+
with open(notebook_path, "r", encoding="utf-8") as f:
|
372
|
+
notebook = json.load(f)
|
373
|
+
|
374
|
+
results_dict = {}
|
375
|
+
|
376
|
+
for cell_index, cell in enumerate(notebook.get("cells", [])):
|
377
|
+
if cell.get("cell_type") == "raw":
|
378
|
+
source = "".join(cell.get("source", ""))
|
379
|
+
if source.strip().startswith("# BEGIN QUESTION"):
|
380
|
+
question_name = re.search(r"name:\s*(.*)", source)
|
381
|
+
question_name = (
|
382
|
+
question_name.group(1).strip() if question_name else None
|
383
|
+
)
|
384
|
+
|
385
|
+
elif cell.get("cell_type") == "code":
|
386
|
+
source = "".join(cell.get("source", ""))
|
387
|
+
|
388
|
+
if source.strip().startswith('""" # BEGIN TEST CONFIG'):
|
389
|
+
logging_variables = FastAPINotebookBuilder.extract_log_variables(
|
390
|
+
cell
|
391
|
+
)
|
392
|
+
|
393
|
+
# Extract assert statements using a more robust approach
|
394
|
+
assertions = []
|
395
|
+
comments = []
|
396
|
+
|
397
|
+
# Split the source into lines for processing
|
398
|
+
lines = source.split("\n")
|
399
|
+
i = 0
|
400
|
+
while i < len(lines):
|
401
|
+
line = lines[i].strip()
|
402
|
+
if line.startswith("assert"):
|
403
|
+
# Initialize assertion collection
|
404
|
+
assertion_lines = []
|
405
|
+
comment = None
|
406
|
+
|
407
|
+
# Handle the first line
|
408
|
+
first_line = line[6:].strip() # Remove 'assert' keyword
|
409
|
+
assertion_lines.append(first_line)
|
410
|
+
|
411
|
+
# Stack to track parentheses
|
412
|
+
paren_stack = []
|
413
|
+
for char in first_line:
|
414
|
+
if char == "(":
|
415
|
+
paren_stack.append(char)
|
416
|
+
elif char == ")":
|
417
|
+
if paren_stack:
|
418
|
+
paren_stack.pop()
|
419
|
+
|
420
|
+
# Continue collecting lines while we have unclosed parentheses
|
421
|
+
current_line = i + 1
|
422
|
+
while paren_stack and current_line < len(lines):
|
423
|
+
next_line = lines[current_line].strip()
|
424
|
+
assertion_lines.append(next_line)
|
425
|
+
|
426
|
+
for char in next_line:
|
427
|
+
if char == "(":
|
428
|
+
paren_stack.append(char)
|
429
|
+
elif char == ")":
|
430
|
+
if paren_stack:
|
431
|
+
paren_stack.pop()
|
432
|
+
|
433
|
+
current_line += 1
|
434
|
+
|
435
|
+
# Join the assertion lines and clean up
|
436
|
+
full_assertion = " ".join(assertion_lines)
|
437
|
+
|
438
|
+
# Extract the comment if it exists (handling both f-strings and regular strings)
|
439
|
+
comment_match = re.search(
|
440
|
+
r',\s*(?:f?["\'])(.*?)(?:["\'])\s*(?:\)|$)',
|
441
|
+
full_assertion,
|
442
|
+
)
|
443
|
+
if comment_match:
|
444
|
+
comment = comment_match.group(1).strip()
|
445
|
+
# Remove the comment from the assertion
|
446
|
+
full_assertion = full_assertion[
|
447
|
+
: comment_match.start()
|
448
|
+
].strip()
|
449
|
+
|
450
|
+
# Ensure proper parentheses closure
|
451
|
+
open_count = full_assertion.count("(")
|
452
|
+
close_count = full_assertion.count(")")
|
453
|
+
if open_count > close_count:
|
454
|
+
full_assertion += ")" * (open_count - close_count)
|
455
|
+
|
456
|
+
# Clean up the assertion
|
457
|
+
if full_assertion.startswith(
|
458
|
+
"("
|
459
|
+
) and not full_assertion.endswith(")"):
|
460
|
+
full_assertion += ")"
|
461
|
+
|
462
|
+
assertions.append(full_assertion)
|
463
|
+
comments.append(comment)
|
464
|
+
|
465
|
+
# Update the line counter
|
466
|
+
i = current_line
|
467
|
+
else:
|
468
|
+
i += 1
|
469
|
+
|
470
|
+
# Extract points value
|
471
|
+
points_line = next(
|
472
|
+
(line for line in source.split("\n") if "points:" in line), None
|
473
|
+
)
|
474
|
+
points_value = None
|
475
|
+
if points_line:
|
476
|
+
try:
|
477
|
+
points_value = float(points_line.split(":")[-1].strip())
|
478
|
+
except ValueError:
|
479
|
+
points_value = None
|
480
|
+
|
481
|
+
# Add to results dictionary
|
482
|
+
results_dict[cell_index] = {
|
483
|
+
"assertions": assertions,
|
484
|
+
"comments": comments,
|
485
|
+
"question": question_name,
|
486
|
+
"points": points_value,
|
487
|
+
"logging_variables": logging_variables,
|
488
|
+
}
|
489
|
+
|
490
|
+
results_dict = FastAPINotebookBuilder.tag_questions(results_dict)
|
491
|
+
|
492
|
+
return results_dict
|
@@ -8,8 +8,8 @@ import shutil
|
|
8
8
|
import subprocess
|
9
9
|
import sys
|
10
10
|
from dataclasses import dataclass, field
|
11
|
-
|
12
11
|
import nbformat
|
12
|
+
from .api_notebook_builder import FastAPINotebookBuilder
|
13
13
|
|
14
14
|
|
15
15
|
@dataclass
|
@@ -166,6 +166,8 @@ class NotebookProcessor:
|
|
166
166
|
Returns:
|
167
167
|
None
|
168
168
|
"""
|
169
|
+
|
170
|
+
print(f"Processing notebook: {notebook_path}")
|
169
171
|
|
170
172
|
logging.info(f"Processing notebook: {notebook_path}")
|
171
173
|
notebook_name = os.path.splitext(os.path.basename(notebook_path))[0]
|
@@ -196,6 +198,149 @@ class NotebookProcessor:
|
|
196
198
|
else:
|
197
199
|
self._print_and_log(f"Notebook already in destination: {new_notebook_path}")
|
198
200
|
|
201
|
+
solution_path_1, question_path = self.multiple_choice_parser(
|
202
|
+
temp_notebook_path, new_notebook_path
|
203
|
+
)
|
204
|
+
solution_path_2, question_path = self.true_false_parser(
|
205
|
+
temp_notebook_path, new_notebook_path
|
206
|
+
)
|
207
|
+
solution_path_3, question_path = self.select_many_parser(
|
208
|
+
temp_notebook_path, new_notebook_path
|
209
|
+
)
|
210
|
+
|
211
|
+
if any([solution_path_1, solution_path_2, solution_path_3]) is not None:
|
212
|
+
solution_path = solution_path_1 or solution_path_2 or solution_path_3
|
213
|
+
|
214
|
+
student_notebook = self.free_response_parser(
|
215
|
+
temp_notebook_path, notebook_subfolder, notebook_name
|
216
|
+
)
|
217
|
+
|
218
|
+
# If Otter does not run, move the student file to the main directory
|
219
|
+
if student_notebook is None:
|
220
|
+
path_ = shutil.copy(temp_notebook_path, self.root_folder)
|
221
|
+
self._print_and_log(
|
222
|
+
f"Copied and cleaned student notebook: {path_} -> {self.root_folder}"
|
223
|
+
)
|
224
|
+
|
225
|
+
# Move the solution file to the autograder folder
|
226
|
+
if solution_path is not None:
|
227
|
+
# gets importable file name
|
228
|
+
importable_file_name = sanitize_string(
|
229
|
+
os.path.splitext(os.path.basename(solution_path))[0]
|
230
|
+
)
|
231
|
+
|
232
|
+
# Move the solution file to the autograder folder
|
233
|
+
os.rename(
|
234
|
+
solution_path,
|
235
|
+
os.path.join(autograder_path, f"{importable_file_name}.py"),
|
236
|
+
)
|
237
|
+
|
238
|
+
if question_path is not None:
|
239
|
+
shutil.move(question_path, student_path)
|
240
|
+
|
241
|
+
# Remove the temp copy of the notebook
|
242
|
+
os.remove(temp_notebook_path)
|
243
|
+
|
244
|
+
# Remove all postfix from filenames in dist
|
245
|
+
NotebookProcessor.remove_postfix(autograder_path, "_solutions")
|
246
|
+
NotebookProcessor.remove_postfix(student_path, "_questions")
|
247
|
+
NotebookProcessor.remove_postfix(self.root_folder, "_temp")
|
248
|
+
|
249
|
+
### CODE TO ENSURE THAT STUDENT NOTEBOOK IS IMPORTABLE
|
250
|
+
if question_path is not None:
|
251
|
+
# question_root_path = os.path.dirname(question_path)
|
252
|
+
question_file_name = os.path.basename(question_path)
|
253
|
+
question_file_name_sanitized = sanitize_string(
|
254
|
+
question_file_name.replace("_questions", "")
|
255
|
+
)
|
256
|
+
if question_file_name_sanitized.endswith("_py"):
|
257
|
+
question_file_name_sanitized = question_file_name_sanitized[:-3] + ".py"
|
258
|
+
|
259
|
+
# Rename the file
|
260
|
+
os.rename(
|
261
|
+
os.path.join(
|
262
|
+
student_path, question_file_name.replace("_questions", "")
|
263
|
+
),
|
264
|
+
os.path.join(student_path, question_file_name_sanitized),
|
265
|
+
)
|
266
|
+
|
267
|
+
# Ensure the "questions" folder exists
|
268
|
+
questions_folder_jbook = os.path.join(self.root_folder, "questions")
|
269
|
+
os.makedirs(questions_folder_jbook, exist_ok=True)
|
270
|
+
|
271
|
+
# Copy the renamed file to the "questions" folder
|
272
|
+
shutil.copy(
|
273
|
+
os.path.join(student_path, question_file_name_sanitized),
|
274
|
+
os.path.join(questions_folder_jbook, question_file_name_sanitized),
|
275
|
+
)
|
276
|
+
|
277
|
+
def free_response_parser(
|
278
|
+
self, temp_notebook_path, notebook_subfolder, notebook_name
|
279
|
+
):
|
280
|
+
if self.has_assignment(temp_notebook_path, "# ASSIGNMENT CONFIG"):
|
281
|
+
|
282
|
+
# TODO: This is hardcoded for now, but should be in a configuration file.
|
283
|
+
client_private_key = os.path.join(
|
284
|
+
notebook_subfolder,
|
285
|
+
"client_private_key.bin",
|
286
|
+
)
|
287
|
+
server_public_key = os.path.join(
|
288
|
+
notebook_subfolder,
|
289
|
+
"server_public_key.bin",
|
290
|
+
)
|
291
|
+
|
292
|
+
shutil.copy("./keys/client_private_key.bin", client_private_key)
|
293
|
+
shutil.copy("./keys/server_public_key.bin", server_public_key)
|
294
|
+
|
295
|
+
FastAPINotebookBuilder(notebook_path=temp_notebook_path)
|
296
|
+
|
297
|
+
self.run_otter_assign(
|
298
|
+
temp_notebook_path, os.path.join(notebook_subfolder, "dist")
|
299
|
+
)
|
300
|
+
|
301
|
+
student_notebook = os.path.join(
|
302
|
+
notebook_subfolder, "dist", "student", f"{notebook_name}.ipynb"
|
303
|
+
)
|
304
|
+
|
305
|
+
NotebookProcessor.add_initialization_code(student_notebook)
|
306
|
+
|
307
|
+
self.clean_notebook(student_notebook)
|
308
|
+
|
309
|
+
NotebookProcessor.replace_temp_in_notebook(
|
310
|
+
student_notebook, student_notebook
|
311
|
+
)
|
312
|
+
autograder_notebook = os.path.join(
|
313
|
+
notebook_subfolder, "dist", "autograder", f"{notebook_name}.ipynb"
|
314
|
+
)
|
315
|
+
NotebookProcessor.replace_temp_in_notebook(
|
316
|
+
autograder_notebook, autograder_notebook
|
317
|
+
)
|
318
|
+
shutil.copy(student_notebook, self.root_folder)
|
319
|
+
self._print_and_log(
|
320
|
+
f"Copied and cleaned student notebook: {student_notebook} -> {self.root_folder}"
|
321
|
+
)
|
322
|
+
|
323
|
+
# Remove the keys
|
324
|
+
os.remove(client_private_key)
|
325
|
+
os.remove(server_public_key)
|
326
|
+
|
327
|
+
return student_notebook
|
328
|
+
else:
|
329
|
+
NotebookProcessor.add_initialization_code(temp_notebook_path)
|
330
|
+
return None
|
331
|
+
|
332
|
+
@staticmethod
|
333
|
+
def add_initialization_code(notebook_path):
|
334
|
+
# finds the first code cell
|
335
|
+
index, cell = find_first_code_cell(notebook_path)
|
336
|
+
cell = cell['source']
|
337
|
+
import_text = "from pykubegrader.initialize import initialize_assignment\n"
|
338
|
+
cell = f"{import_text}\n" + cell
|
339
|
+
cell += f'\nresponses = initialize_assignment("{os.path.splitext(os.path.basename(notebook_path))[0]}")\n'
|
340
|
+
replace_cell_source(notebook_path, index, cell)
|
341
|
+
|
342
|
+
def multiple_choice_parser(self, temp_notebook_path, new_notebook_path):
|
343
|
+
|
199
344
|
### Parse the notebook for multiple choice questions
|
200
345
|
if self.has_assignment(temp_notebook_path, "# BEGIN MULTIPLE CHOICE"):
|
201
346
|
self._print_and_log(
|
@@ -229,6 +374,11 @@ class NotebookProcessor:
|
|
229
374
|
data, markers, temp_notebook_path, temp_notebook_path
|
230
375
|
)
|
231
376
|
|
377
|
+
return solution_path, question_path
|
378
|
+
else:
|
379
|
+
return None, None
|
380
|
+
|
381
|
+
def true_false_parser(self, temp_notebook_path, new_notebook_path):
|
232
382
|
### Parse the notebook for TF questions
|
233
383
|
if self.has_assignment(temp_notebook_path, "# BEGIN TF"):
|
234
384
|
markers = ("# BEGIN TF", "# END TF")
|
@@ -260,6 +410,11 @@ class NotebookProcessor:
|
|
260
410
|
data, markers, temp_notebook_path, temp_notebook_path
|
261
411
|
)
|
262
412
|
|
413
|
+
return solution_path, question_path
|
414
|
+
else:
|
415
|
+
return None, None
|
416
|
+
|
417
|
+
def select_many_parser(self, temp_notebook_path, new_notebook_path):
|
263
418
|
### Parse the notebook for select_many questions
|
264
419
|
if self.has_assignment(temp_notebook_path, "# BEGIN SELECT MANY"):
|
265
420
|
markers = ("# BEGIN SELECT MANY", "# END SELECT MANY")
|
@@ -291,86 +446,9 @@ class NotebookProcessor:
|
|
291
446
|
data, markers, temp_notebook_path, temp_notebook_path
|
292
447
|
)
|
293
448
|
|
294
|
-
|
295
|
-
|
296
|
-
|
297
|
-
)
|
298
|
-
student_notebook = os.path.join(
|
299
|
-
notebook_subfolder, "dist", "student", f"{notebook_name}.ipynb"
|
300
|
-
)
|
301
|
-
self.clean_notebook(student_notebook)
|
302
|
-
NotebookProcessor.replace_temp_in_notebook(
|
303
|
-
student_notebook, student_notebook
|
304
|
-
)
|
305
|
-
autograder_notebook = os.path.join(
|
306
|
-
notebook_subfolder, "dist", "autograder", f"{notebook_name}.ipynb"
|
307
|
-
)
|
308
|
-
NotebookProcessor.replace_temp_in_notebook(
|
309
|
-
autograder_notebook, autograder_notebook
|
310
|
-
)
|
311
|
-
shutil.copy(student_notebook, self.root_folder)
|
312
|
-
self._print_and_log(
|
313
|
-
f"Copied and cleaned student notebook: {student_notebook} -> {self.root_folder}"
|
314
|
-
)
|
315
|
-
|
316
|
-
# If Otter does not run, move the student file to the main directory
|
317
|
-
if "student_notebook" not in locals():
|
318
|
-
path_ = shutil.copy(temp_notebook_path, self.root_folder)
|
319
|
-
self._print_and_log(
|
320
|
-
f"Copied and cleaned student notebook: {path_} -> {self.root_folder}"
|
321
|
-
)
|
322
|
-
|
323
|
-
# Move the solution file to the autograder folder
|
324
|
-
if "solution_path" in locals():
|
325
|
-
# gets importable file name
|
326
|
-
importable_file_name = sanitize_string(
|
327
|
-
os.path.splitext(os.path.basename(solution_path))[0]
|
328
|
-
)
|
329
|
-
|
330
|
-
# Move the solution file to the autograder folder
|
331
|
-
os.rename(
|
332
|
-
solution_path,
|
333
|
-
os.path.join(autograder_path, f"{importable_file_name}.py"),
|
334
|
-
)
|
335
|
-
|
336
|
-
if "question_path" in locals():
|
337
|
-
shutil.move(question_path, student_path)
|
338
|
-
|
339
|
-
# Remove the temp copy of the notebook
|
340
|
-
os.remove(temp_notebook_path)
|
341
|
-
|
342
|
-
# Remove all postfix from filenames in dist
|
343
|
-
NotebookProcessor.remove_postfix(autograder_path, "_solutions")
|
344
|
-
NotebookProcessor.remove_postfix(student_path, "_questions")
|
345
|
-
NotebookProcessor.remove_postfix(self.root_folder, "_temp")
|
346
|
-
|
347
|
-
### CODE TO ENSURE THAT STUDENT NOTEBOOK IS IMPORTABLE
|
348
|
-
if "question_path" in locals():
|
349
|
-
# question_root_path = os.path.dirname(question_path)
|
350
|
-
question_file_name = os.path.basename(question_path)
|
351
|
-
question_file_name_sanitized = sanitize_string(
|
352
|
-
question_file_name.replace("_questions", "")
|
353
|
-
)
|
354
|
-
if question_file_name_sanitized.endswith("_py"):
|
355
|
-
question_file_name_sanitized = question_file_name_sanitized[:-3] + ".py"
|
356
|
-
|
357
|
-
# Rename the file
|
358
|
-
os.rename(
|
359
|
-
os.path.join(
|
360
|
-
student_path, question_file_name.replace("_questions", "")
|
361
|
-
),
|
362
|
-
os.path.join(student_path, question_file_name_sanitized),
|
363
|
-
)
|
364
|
-
|
365
|
-
# Ensure the "questions" folder exists
|
366
|
-
questions_folder_jbook = os.path.join(self.root_folder, "questions")
|
367
|
-
os.makedirs(questions_folder_jbook, exist_ok=True)
|
368
|
-
|
369
|
-
# Copy the renamed file to the "questions" folder
|
370
|
-
shutil.copy(
|
371
|
-
os.path.join(student_path, question_file_name_sanitized),
|
372
|
-
os.path.join(questions_folder_jbook, question_file_name_sanitized),
|
373
|
-
)
|
449
|
+
return solution_path, question_path
|
450
|
+
else:
|
451
|
+
return None, None
|
374
452
|
|
375
453
|
@staticmethod
|
376
454
|
def replace_temp_in_notebook(input_file, output_file):
|
@@ -1475,6 +1553,54 @@ def sanitize_string(input_string):
|
|
1475
1553
|
return sanitized
|
1476
1554
|
|
1477
1555
|
|
1556
|
+
def find_first_code_cell(notebook_path):
|
1557
|
+
"""
|
1558
|
+
Finds the first Python code cell in a Jupyter notebook and its index.
|
1559
|
+
|
1560
|
+
Args:
|
1561
|
+
notebook_path (str): Path to the Jupyter notebook file.
|
1562
|
+
|
1563
|
+
Returns:
|
1564
|
+
tuple: A tuple containing the index of the first code cell and the cell dictionary,
|
1565
|
+
or (None, None) if no code cell is found.
|
1566
|
+
"""
|
1567
|
+
# Load the notebook
|
1568
|
+
with open(notebook_path, "r", encoding="utf-8") as f:
|
1569
|
+
notebook = nbformat.read(f, as_version=4)
|
1570
|
+
|
1571
|
+
# Iterate through the cells to find the first code cell
|
1572
|
+
for index, cell in enumerate(notebook.get("cells", [])):
|
1573
|
+
if cell.get("cell_type") == "code":
|
1574
|
+
return index, cell # Return the index and the first code cell
|
1575
|
+
|
1576
|
+
return None, None # No code cell found
|
1577
|
+
|
1578
|
+
|
1579
|
+
def replace_cell_source(notebook_path, cell_index, new_source):
|
1580
|
+
"""
|
1581
|
+
Replace the source code of a specific Jupyter notebook cell.
|
1582
|
+
|
1583
|
+
Args:
|
1584
|
+
cell_index (int): Index of the cell to be modified (0-based).
|
1585
|
+
new_source (str): New source code to replace the cell's content.
|
1586
|
+
"""
|
1587
|
+
# Load the notebook
|
1588
|
+
with open(notebook_path, "r", encoding="utf-8") as f:
|
1589
|
+
notebook = nbformat.read(f, as_version=4)
|
1590
|
+
|
1591
|
+
# Check if the cell index is valid
|
1592
|
+
if cell_index >= len(notebook.cells) or cell_index < 0:
|
1593
|
+
raise IndexError(
|
1594
|
+
f"Cell index {cell_index} is out of range for this notebook."
|
1595
|
+
)
|
1596
|
+
|
1597
|
+
# Replace the source code of the specified cell
|
1598
|
+
notebook.cells[cell_index]["source"] = new_source
|
1599
|
+
|
1600
|
+
# Save the notebook
|
1601
|
+
with open(notebook_path, "w", encoding="utf-8") as f:
|
1602
|
+
nbformat.write(notebook, f)
|
1603
|
+
|
1478
1604
|
def main():
|
1479
1605
|
parser = argparse.ArgumentParser(
|
1480
1606
|
description="Recursively process Jupyter notebooks with '# ASSIGNMENT CONFIG', move them to a solutions folder, and run otter assign."
|
pykubegrader/initialize.py
CHANGED
@@ -1,13 +1,18 @@
|
|
1
1
|
import json
|
2
2
|
import os
|
3
|
+
from typing import Optional
|
3
4
|
|
4
5
|
import panel as pn
|
5
6
|
from IPython import get_ipython
|
7
|
+
import requests
|
8
|
+
from .telemetry import telemetry, update_responses, ensure_responses
|
6
9
|
|
7
|
-
from .telemetry import telemetry, update_responses
|
8
10
|
|
9
11
|
|
10
|
-
def initialize_assignment(name: str
|
12
|
+
def initialize_assignment(name: str,
|
13
|
+
verbose: Optional[bool] = False,
|
14
|
+
url: Optional[str] = "https://engr-131-api.eastus.cloudapp.azure.com/") -> None:
|
15
|
+
|
11
16
|
ipython = get_ipython()
|
12
17
|
if ipython is None:
|
13
18
|
print("Setup unsuccessful. Are you in a Jupyter environment?")
|
@@ -24,19 +29,40 @@ def initialize_assignment(name: str) -> None:
|
|
24
29
|
print("Setup unsuccessful. Are you on JupyterHub?")
|
25
30
|
return
|
26
31
|
|
27
|
-
pn.extension(silent=True)
|
28
|
-
|
29
32
|
try:
|
30
|
-
|
33
|
+
seed = hash(jhub_user) % 1000
|
34
|
+
update_responses(key="seed", value=seed)
|
35
|
+
update_responses(key="assignment", value=name)
|
36
|
+
update_responses(key="jhub_user", value=jhub_user)
|
31
37
|
|
32
|
-
seed = responses.get("seed")
|
33
|
-
if seed is None:
|
34
|
-
new_seed = hash(jhub_user) % 1000
|
35
|
-
responses = update_responses(key="seed", value=new_seed)
|
36
38
|
except (TypeError, json.JSONDecodeError) as e:
|
37
39
|
print(f"Failed to initialize assignment: {e}")
|
38
40
|
return
|
41
|
+
|
42
|
+
|
43
|
+
# extract responses
|
44
|
+
responses = ensure_responses()
|
45
|
+
|
46
|
+
# TODO: Add more checks here??
|
47
|
+
assert isinstance(responses.get('seed'), int), "valid seed not found in responses"
|
48
|
+
|
49
|
+
pn.extension(silent=True)
|
50
|
+
|
51
|
+
if verbose:
|
52
|
+
print("Assignment successfully initialized")
|
53
|
+
print(f"Assignment: {name}")
|
54
|
+
print(f"Username: {jhub_user}")
|
55
|
+
|
39
56
|
|
57
|
+
|
58
|
+
# Checks connectivity to the API
|
59
|
+
params = { "jhub_user": responses["jhub_user"] }
|
60
|
+
response = requests.get(url, params=params)
|
61
|
+
if verbose:
|
62
|
+
print(f"status code: {response.status_code}")
|
63
|
+
data = response.json()
|
64
|
+
for k, v in data.items():
|
65
|
+
print(f"{k}: {v}")
|
66
|
+
|
40
67
|
print("Assignment successfully initialized")
|
41
|
-
|
42
|
-
print(f"Username: {jhub_user}")
|
68
|
+
return responses
|
pykubegrader/telemetry.py
CHANGED
@@ -3,6 +3,7 @@ import datetime
|
|
3
3
|
import json
|
4
4
|
import logging
|
5
5
|
import os
|
6
|
+
from typing import Optional
|
6
7
|
|
7
8
|
import nacl.public
|
8
9
|
import requests
|
@@ -35,19 +36,20 @@ def encrypt_to_b64(message: str) -> str:
|
|
35
36
|
|
36
37
|
|
37
38
|
def ensure_responses() -> dict:
|
39
|
+
|
38
40
|
with open(".responses.json", "a") as _:
|
39
41
|
pass
|
40
42
|
|
41
|
-
|
43
|
+
responses = {}
|
42
44
|
|
43
45
|
try:
|
44
46
|
with open(".responses.json", "r") as f:
|
45
|
-
|
47
|
+
responses = json.load(f)
|
46
48
|
except json.JSONDecodeError:
|
47
49
|
with open(".responses.json", "w") as f:
|
48
|
-
json.dump(
|
49
|
-
|
50
|
-
return
|
50
|
+
json.dump(responses, f)
|
51
|
+
|
52
|
+
return responses
|
51
53
|
|
52
54
|
|
53
55
|
def log_encrypted(message: str) -> None:
|
@@ -94,12 +96,13 @@ def update_responses(key: str, value) -> dict:
|
|
94
96
|
#
|
95
97
|
|
96
98
|
|
99
|
+
# If we instead call this with **responses
|
97
100
|
def score_question(
|
98
101
|
student_email: str,
|
99
|
-
term: str,
|
100
102
|
assignment: str,
|
101
103
|
question: str,
|
102
104
|
submission: str,
|
105
|
+
term: str = "winter_2025",
|
103
106
|
base_url: str = "https://engr-131-api.eastus.cloudapp.azure.com/",
|
104
107
|
) -> Response:
|
105
108
|
url = base_url + "/live-scorer"
|
@@ -140,3 +143,20 @@ def submit_question_new(
|
|
140
143
|
res = requests.post(url, json=payload, auth=HTTPBasicAuth("student", "capture"))
|
141
144
|
|
142
145
|
return res
|
146
|
+
|
147
|
+
|
148
|
+
# TODO: refine function
|
149
|
+
def verify_server(
|
150
|
+
jhub_user: Optional[str] = None,
|
151
|
+
url: str = "https://engr-131-api.eastus.cloudapp.azure.com/",
|
152
|
+
) -> str:
|
153
|
+
params = {"jhub_user": jhub_user} if jhub_user else {}
|
154
|
+
res = requests.get(url, params=params)
|
155
|
+
message = f"status code: {res.status_code}"
|
156
|
+
return message
|
157
|
+
|
158
|
+
|
159
|
+
# TODO: implement function; or maybe not?
|
160
|
+
# At least improve other one
|
161
|
+
def score_question_improved(question_name: str, responses: dict) -> dict:
|
162
|
+
return {}
|
pykubegrader/validate.py
CHANGED
@@ -10,6 +10,10 @@ import numpy as np
|
|
10
10
|
import requests
|
11
11
|
from requests.auth import HTTPBasicAuth
|
12
12
|
|
13
|
+
#
|
14
|
+
# Primary function
|
15
|
+
#
|
16
|
+
|
13
17
|
|
14
18
|
def validate_logfile(
|
15
19
|
filepath: str,
|
@@ -18,59 +22,54 @@ def validate_logfile(
|
|
18
22
|
free_response_questions=0,
|
19
23
|
username="student",
|
20
24
|
password="capture",
|
21
|
-
|
22
|
-
login_url="https://engr-131-api.eastus.cloudapp.azure.com/login",
|
25
|
+
base_url="https://engr-131-api.eastus.cloudapp.azure.com",
|
23
26
|
) -> None:
|
24
27
|
login_data = {
|
25
28
|
"username": username,
|
26
29
|
"password": password,
|
27
30
|
}
|
28
31
|
|
29
|
-
|
30
|
-
|
31
|
-
server_priv_key = nacl.public.PrivateKey(server_private_key_bytes)
|
32
|
-
|
33
|
-
with open("client_public_key.bin", "rb") as pub_file:
|
34
|
-
client_public_key_bytes = pub_file.read()
|
35
|
-
client_pub_key = nacl.public.PublicKey(client_public_key_bytes)
|
36
|
-
|
37
|
-
box = nacl.public.Box(server_priv_key, client_pub_key)
|
32
|
+
# Generate box from private and public keys
|
33
|
+
key_box = generate_keys()
|
38
34
|
|
39
35
|
with open(filepath, "r") as logfile:
|
40
36
|
encrypted_lines = logfile.readlines()
|
41
37
|
|
42
|
-
|
38
|
+
decrypted_log: list[str] = []
|
43
39
|
for line in encrypted_lines:
|
44
40
|
if "Encrypted Output: " in line:
|
45
41
|
trimmed = line.split("Encrypted Output: ")[1].strip()
|
46
42
|
decoded = base64.b64decode(trimmed)
|
47
|
-
decrypted =
|
48
|
-
|
43
|
+
decrypted = key_box.decrypt(decoded).decode()
|
44
|
+
decrypted_log.append(decrypted)
|
49
45
|
|
50
46
|
# Decoding the log file
|
51
47
|
# data_: list[str] = drexel_jupyter_logger.decode_log_file(self.filepath, key=key)
|
52
|
-
_loginfo = str(
|
48
|
+
# _loginfo = str(decrypted_log)
|
53
49
|
|
54
50
|
# Where possible, we should work with this reduced list of relevant entries
|
55
|
-
|
51
|
+
# Here we take only lines with student info or question scores
|
52
|
+
log_reduced = [
|
56
53
|
entry
|
57
|
-
for entry in
|
54
|
+
for entry in decrypted_log
|
58
55
|
if re.match(r"info,", entry) or re.match(r"q\d+_\d+,", entry)
|
59
56
|
]
|
60
57
|
|
61
58
|
# For debugging; to be commented out
|
62
|
-
with open(".output_reduced.log", "w") as f:
|
63
|
-
|
59
|
+
# with open(".output_reduced.log", "w") as f:
|
60
|
+
# f.writelines(f"{item}\n" for item in log_reduced)
|
64
61
|
|
65
|
-
# Initialize
|
66
|
-
question_max_scores = question_max_scores
|
62
|
+
# Initialize question scores based on max scores
|
67
63
|
question_scores = {key: 0 for key in question_max_scores}
|
68
64
|
|
69
|
-
#
|
65
|
+
# Iterate over log to find the last entries for student info fields
|
70
66
|
# This gets the student name etc.
|
71
67
|
last_entries: dict[str, str | float] = {}
|
72
|
-
for entry in
|
68
|
+
for entry in log_reduced:
|
69
|
+
# Split on commas and strip whitespace
|
73
70
|
parts = [part.strip() for part in entry.split(",")]
|
71
|
+
|
72
|
+
# This just overwrites, so the last iteration sticks
|
74
73
|
if parts[0] == "info" and len(parts) == 4:
|
75
74
|
field_name = parts[1]
|
76
75
|
field_value = parts[2]
|
@@ -85,35 +84,29 @@ def validate_logfile(
|
|
85
84
|
"Your log file is not for the correct assignment. Please submit the correct log file."
|
86
85
|
)
|
87
86
|
|
87
|
+
# TODO: Revisit this; we may no longer require as much info
|
88
88
|
required_student_info = ["drexel_id", "first_name", "last_name", "drexel_email"]
|
89
|
-
|
90
89
|
for field in required_student_info:
|
91
90
|
if last_entries.get(field) is None:
|
92
|
-
sys.exit(
|
93
|
-
"You must submit your student information before you start the exam. Please submit your information and try again."
|
94
|
-
)
|
91
|
+
sys.exit("Missing required student information")
|
95
92
|
|
96
93
|
# Initialize code and data lists
|
97
|
-
|
98
|
-
|
94
|
+
log_execution: list[str] = []
|
95
|
+
log_data: list[str] = []
|
99
96
|
|
100
97
|
# Splitting the data into code and responses
|
101
|
-
for entry in
|
98
|
+
for entry in decrypted_log:
|
102
99
|
# Splitting the data into code and responses
|
103
100
|
if "code run:" in entry:
|
104
|
-
|
101
|
+
log_execution.append(entry)
|
105
102
|
else:
|
106
|
-
|
107
|
-
|
108
|
-
# Checks to see if the drexel_jupyter_logger is in the code
|
109
|
-
# If it is, the student might have tried to look at the solutions
|
110
|
-
# Commenting this out, since we're switching to asymmetric encryption
|
111
|
-
# flag = any("drexel_jupyter_logger" in item for item in code)
|
103
|
+
log_data.append(entry)
|
112
104
|
|
113
105
|
# Extracting timestamps and converting them to datetime objects
|
106
|
+
# TODO: Check why we're using log_reduced instead of decrypted_log
|
114
107
|
timestamps = [
|
115
108
|
datetime.strptime(row.split(",")[-1].strip(), "%Y-%m-%d %H:%M:%S")
|
116
|
-
for row in
|
109
|
+
for row in log_reduced
|
117
110
|
]
|
118
111
|
|
119
112
|
# Getting the earliest and latest times
|
@@ -122,51 +115,20 @@ def validate_logfile(
|
|
122
115
|
delta = max(timestamps) - min(timestamps)
|
123
116
|
minutes_rounded = round(delta.total_seconds() / 60, 2)
|
124
117
|
last_entries["elapsed_minutes"] = minutes_rounded
|
125
|
-
# last_entries["flag"] = flag
|
126
118
|
|
127
119
|
# Collect student info dict
|
128
|
-
|
120
|
+
student_info = {key.upper(): value for key, value in last_entries.items()}
|
129
121
|
|
130
122
|
# Write info dict to info.json
|
123
|
+
# TODO: Try/except block here?
|
131
124
|
with open("info.json", "w") as file:
|
132
|
-
print("Writing to info.json")
|
133
|
-
json.dump(
|
134
|
-
|
135
|
-
def get_last_entry(data: list[str], field_name: str) -> str:
|
136
|
-
for entry in data[::-1]:
|
137
|
-
parts = [part.strip() for part in entry.split(",")]
|
138
|
-
if parts[0] == field_name:
|
139
|
-
return entry
|
140
|
-
return ""
|
141
|
-
|
142
|
-
def get_entries_len(data: list[str], question_number: int) -> int:
|
143
|
-
"""function to get the unique entries by length
|
144
|
-
|
145
|
-
Args:
|
146
|
-
data (list): list of all the data records
|
147
|
-
question_number (int): question number to evaluate
|
148
|
-
|
149
|
-
Returns:
|
150
|
-
int: length of the unique entries
|
151
|
-
"""
|
152
|
-
|
153
|
-
# Set for unique qN_* values
|
154
|
-
unique_qN_values = set()
|
155
|
-
|
156
|
-
for entry in data:
|
157
|
-
if entry.startswith(f"q{question_number}_"):
|
158
|
-
# Split the string by commas and get the value part
|
159
|
-
parts = [part.strip() for part in entry.split(",")]
|
160
|
-
# The value is the third element after splitting (?)
|
161
|
-
value = parts[0].split("_")[1]
|
162
|
-
unique_qN_values.add(value)
|
163
|
-
|
164
|
-
return len(unique_qN_values) + 1
|
125
|
+
# print("Writing to info.json")
|
126
|
+
json.dump(student_info, file)
|
165
127
|
|
166
128
|
# Modified list comprehension to filter as per the criteria
|
167
129
|
free_response = [
|
168
130
|
entry
|
169
|
-
for entry in
|
131
|
+
for entry in log_reduced
|
170
132
|
if entry.startswith("q")
|
171
133
|
and entry.split("_")[0][1:].isdigit()
|
172
134
|
and int(entry.split("_")[0][1:]) > free_response_questions
|
@@ -180,8 +142,8 @@ def validate_logfile(
|
|
180
142
|
# Collect entries for each question in a list.
|
181
143
|
entries = [
|
182
144
|
entry
|
183
|
-
for j in range(1, get_entries_len(
|
184
|
-
if (entry := get_last_entry(
|
145
|
+
for j in range(1, get_entries_len(log_data, i))
|
146
|
+
if (entry := get_last_entry(log_data, f"q{i}_{j}")) != ""
|
185
147
|
]
|
186
148
|
|
187
149
|
# Store the list of entries in the dictionary, keyed by question number.
|
@@ -246,6 +208,7 @@ def validate_logfile(
|
|
246
208
|
print("Writing to results.json")
|
247
209
|
json.dump(result_structure, file, indent=4)
|
248
210
|
|
211
|
+
login_url = f"{base_url}/login"
|
249
212
|
verify_login(login_data, login_url)
|
250
213
|
|
251
214
|
# The file to be uploaded. Ensure the path is correct.
|
@@ -269,6 +232,8 @@ def validate_logfile(
|
|
269
232
|
"file": (file_path, open(file_path, "rb")),
|
270
233
|
}
|
271
234
|
|
235
|
+
post_url = f"{base_url}/upload-score"
|
236
|
+
|
272
237
|
# Make the POST request with data and files
|
273
238
|
response = requests.post(
|
274
239
|
url=post_url,
|
@@ -286,6 +251,53 @@ def validate_logfile(
|
|
286
251
|
#
|
287
252
|
|
288
253
|
|
254
|
+
def generate_keys() -> nacl.public.Box:
|
255
|
+
with open("server_private_key.bin", "rb") as priv_file:
|
256
|
+
server_private_key_bytes = priv_file.read()
|
257
|
+
server_priv_key = nacl.public.PrivateKey(server_private_key_bytes)
|
258
|
+
|
259
|
+
with open("client_public_key.bin", "rb") as pub_file:
|
260
|
+
client_public_key_bytes = pub_file.read()
|
261
|
+
client_pub_key = nacl.public.PublicKey(client_public_key_bytes)
|
262
|
+
|
263
|
+
box = nacl.public.Box(server_priv_key, client_pub_key)
|
264
|
+
|
265
|
+
return box
|
266
|
+
|
267
|
+
|
268
|
+
def get_entries_len(data: list[str], question_number: int) -> int:
|
269
|
+
"""function to get the unique entries by length
|
270
|
+
|
271
|
+
Args:
|
272
|
+
data (list): list of all the data records
|
273
|
+
question_number (int): question number to evaluate
|
274
|
+
|
275
|
+
Returns:
|
276
|
+
int: length of the unique entries
|
277
|
+
"""
|
278
|
+
|
279
|
+
# Set for unique qN_* values
|
280
|
+
unique_qN_values = set()
|
281
|
+
|
282
|
+
for entry in data:
|
283
|
+
if entry.startswith(f"q{question_number}_"):
|
284
|
+
# Split the string by commas and get the value part
|
285
|
+
parts = [part.strip() for part in entry.split(",")]
|
286
|
+
# The value is the third element after splitting (?)
|
287
|
+
value = parts[0].split("_")[1]
|
288
|
+
unique_qN_values.add(value)
|
289
|
+
|
290
|
+
return len(unique_qN_values) + 1
|
291
|
+
|
292
|
+
|
293
|
+
def get_last_entry(data: list[str], field_name: str) -> str:
|
294
|
+
for entry in data[::-1]:
|
295
|
+
parts = [part.strip() for part in entry.split(",")]
|
296
|
+
if parts[0] == field_name:
|
297
|
+
return entry
|
298
|
+
return ""
|
299
|
+
|
300
|
+
|
289
301
|
def submission_message(response) -> None:
|
290
302
|
if response.status_code == 200:
|
291
303
|
print("Data successfully uploaded to the server")
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|