PyKubeGrader 0.0.4__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- PyKubeGrader-0.0.4.dist-info/LICENSE.txt +28 -0
- PyKubeGrader-0.0.4.dist-info/METADATA +69 -0
- PyKubeGrader-0.0.4.dist-info/RECORD +17 -0
- PyKubeGrader-0.0.4.dist-info/WHEEL +5 -0
- PyKubeGrader-0.0.4.dist-info/top_level.txt +1 -0
- pykubegrader/__init__.py +16 -0
- pykubegrader/widgets/info_widget.py +108 -0
- pykubegrader/widgets/mc_widget.py +72 -0
- pykubegrader/widgets/misc.py +29 -0
- pykubegrader/widgets/multi_select_base.py +99 -0
- pykubegrader/widgets/reading_base.py +168 -0
- pykubegrader/widgets/reading_widget.py +84 -0
- pykubegrader/widgets/select_base.py +69 -0
- pykubegrader/widgets/select_many_widget.py +101 -0
- pykubegrader/widgets/telemetry.py +132 -0
- pykubegrader/widgets/types_widget.py +77 -0
- pykubegrader/widgets/validate.py +311 -0
@@ -0,0 +1,311 @@
|
|
1
|
+
import base64
|
2
|
+
import json
|
3
|
+
import os
|
4
|
+
import re
|
5
|
+
import sys
|
6
|
+
from datetime import datetime
|
7
|
+
|
8
|
+
import nacl.public
|
9
|
+
import numpy as np
|
10
|
+
import requests
|
11
|
+
from requests.auth import HTTPBasicAuth
|
12
|
+
|
13
|
+
|
14
|
+
def validate_logfile(
|
15
|
+
filepath: str,
|
16
|
+
assignment_id: str,
|
17
|
+
question_max_scores: dict[int, int],
|
18
|
+
free_response_questions=0,
|
19
|
+
username="student",
|
20
|
+
password="capture",
|
21
|
+
post_url="http://localhost:8000/upload-score",
|
22
|
+
login_url="http://localhost:8000/login",
|
23
|
+
) -> None:
|
24
|
+
login_data = {
|
25
|
+
"username": username,
|
26
|
+
"password": password,
|
27
|
+
}
|
28
|
+
|
29
|
+
with open("server_private_key.bin", "rb") as priv_file:
|
30
|
+
server_private_key_bytes = priv_file.read()
|
31
|
+
server_priv_key = nacl.public.PrivateKey(server_private_key_bytes)
|
32
|
+
|
33
|
+
with open("client_public_key.bin", "rb") as pub_file:
|
34
|
+
client_public_key_bytes = pub_file.read()
|
35
|
+
client_pub_key = nacl.public.PublicKey(client_public_key_bytes)
|
36
|
+
|
37
|
+
box = nacl.public.Box(server_priv_key, client_pub_key)
|
38
|
+
|
39
|
+
with open(filepath, "r") as logfile:
|
40
|
+
encrypted_lines = logfile.readlines()
|
41
|
+
|
42
|
+
data_: list[str] = []
|
43
|
+
for line in encrypted_lines:
|
44
|
+
if "Encrypted Output: " in line:
|
45
|
+
trimmed = line.split("Encrypted Output: ")[1].strip()
|
46
|
+
decoded = base64.b64decode(trimmed)
|
47
|
+
decrypted = box.decrypt(decoded).decode()
|
48
|
+
data_.append(decrypted)
|
49
|
+
|
50
|
+
# Decoding the log file
|
51
|
+
# data_: list[str] = drexel_jupyter_logger.decode_log_file(self.filepath, key=key)
|
52
|
+
_loginfo = str(data_)
|
53
|
+
|
54
|
+
# Where possible, we should work with this reduced list of relevant entries
|
55
|
+
data_reduced = [
|
56
|
+
entry
|
57
|
+
for entry in data_
|
58
|
+
if re.match(r"info,", entry) or re.match(r"q\d+_\d+,", entry)
|
59
|
+
]
|
60
|
+
|
61
|
+
# For debugging; to be commented out
|
62
|
+
with open(".output_reduced.log", "w") as f:
|
63
|
+
f.writelines(f"{item}\n" for item in data_reduced)
|
64
|
+
|
65
|
+
# Initialize the question scores and max scores
|
66
|
+
question_max_scores = question_max_scores
|
67
|
+
question_scores = {key: 0 for key in question_max_scores}
|
68
|
+
|
69
|
+
# Parsing the data to find the last entries for required fields
|
70
|
+
# This gets the student name etc.
|
71
|
+
last_entries: dict[str, str | float] = {}
|
72
|
+
for entry in data_reduced:
|
73
|
+
parts = [part.strip() for part in entry.split(",")]
|
74
|
+
if parts[0] == "info" and len(parts) == 4:
|
75
|
+
field_name = parts[1]
|
76
|
+
field_value = parts[2]
|
77
|
+
last_entries[field_name] = field_value
|
78
|
+
|
79
|
+
# For debugging; to be commented out
|
80
|
+
# print(f"Keys in last_entries dict: {last_entries.keys()}")
|
81
|
+
|
82
|
+
# Check if the assignment id is in the log file
|
83
|
+
if "assignment" not in last_entries or assignment_id != last_entries["assignment"]:
|
84
|
+
sys.exit(
|
85
|
+
"Your log file is not for the correct assignment. Please submit the correct log file."
|
86
|
+
)
|
87
|
+
|
88
|
+
required_student_info = ["drexel_id", "first_name", "last_name", "drexel_email"]
|
89
|
+
|
90
|
+
for field in required_student_info:
|
91
|
+
if last_entries.get(field) is None:
|
92
|
+
sys.exit(
|
93
|
+
"You must submit your student information before you start the exam. Please submit your information and try again."
|
94
|
+
)
|
95
|
+
|
96
|
+
# Initialize code and data lists
|
97
|
+
code: list[str] = []
|
98
|
+
data: list[str] = []
|
99
|
+
|
100
|
+
# Splitting the data into code and responses
|
101
|
+
for entry in data_:
|
102
|
+
# Splitting the data into code and responses
|
103
|
+
if "code run:" in entry:
|
104
|
+
code.append(entry)
|
105
|
+
else:
|
106
|
+
data.append(entry)
|
107
|
+
|
108
|
+
# Checks to see if the drexel_jupyter_logger is in the code
|
109
|
+
# If it is, the student might have tried to look at the solutions
|
110
|
+
# Commenting this out, since we're switching to asymmetric encryption
|
111
|
+
# flag = any("drexel_jupyter_logger" in item for item in code)
|
112
|
+
|
113
|
+
# Extracting timestamps and converting them to datetime objects
|
114
|
+
timestamps = [
|
115
|
+
datetime.strptime(row.split(",")[-1].strip(), "%Y-%m-%d %H:%M:%S")
|
116
|
+
for row in data_reduced
|
117
|
+
]
|
118
|
+
|
119
|
+
# Getting the earliest and latest times
|
120
|
+
last_entries["start_time"] = min(timestamps).strftime("%Y-%m-%d %H:%M:%S")
|
121
|
+
last_entries["end_time"] = max(timestamps).strftime("%Y-%m-%d %H:%M:%S")
|
122
|
+
delta = max(timestamps) - min(timestamps)
|
123
|
+
minutes_rounded = round(delta.total_seconds() / 60, 2)
|
124
|
+
last_entries["elapsed_minutes"] = minutes_rounded
|
125
|
+
# last_entries["flag"] = flag
|
126
|
+
|
127
|
+
# Collect student info dict
|
128
|
+
student_information = {key.upper(): value for key, value in last_entries.items()}
|
129
|
+
|
130
|
+
# Write info dict to info.json
|
131
|
+
with open("info.json", "w") as file:
|
132
|
+
print("Writing to info.json")
|
133
|
+
json.dump(student_information, file)
|
134
|
+
|
135
|
+
def get_last_entry(data: list[str], field_name: str) -> str:
|
136
|
+
for entry in data[::-1]:
|
137
|
+
parts = [part.strip() for part in entry.split(",")]
|
138
|
+
if parts[0] == field_name:
|
139
|
+
return entry
|
140
|
+
return ""
|
141
|
+
|
142
|
+
def get_len_of_entries(data, question_number) -> int:
|
143
|
+
"""function to get the unique entries by length
|
144
|
+
|
145
|
+
Args:
|
146
|
+
data (list): list of all the data records
|
147
|
+
question_number (int): question number to evaluate
|
148
|
+
|
149
|
+
Returns:
|
150
|
+
int: length of the unique entries
|
151
|
+
"""
|
152
|
+
|
153
|
+
# Set for unique qN_* values
|
154
|
+
unique_qN_values = set()
|
155
|
+
|
156
|
+
for entry in data:
|
157
|
+
if entry.startswith(f"q{question_number}_"):
|
158
|
+
# Split the string by commas and get the value part
|
159
|
+
parts = [part.strip() for part in entry.split(",")]
|
160
|
+
# The value is the third element after splitting (?)
|
161
|
+
value = parts[0].split("_")[1]
|
162
|
+
unique_qN_values.add(value)
|
163
|
+
|
164
|
+
return len(unique_qN_values) + 1
|
165
|
+
|
166
|
+
# Modified list comprehension to filter as per the criteria
|
167
|
+
free_response = [
|
168
|
+
entry
|
169
|
+
for entry in data_
|
170
|
+
if entry.startswith("q")
|
171
|
+
and entry.split("_")[0][1:].isdigit()
|
172
|
+
and int(entry.split("_")[0][1:]) > free_response_questions
|
173
|
+
]
|
174
|
+
|
175
|
+
# Initialize a dictionary to hold question entries.
|
176
|
+
q_entries = []
|
177
|
+
|
178
|
+
# Iterate over the number of free response questions.
|
179
|
+
for i in range(1, free_response_questions + 1):
|
180
|
+
# Collect entries for each question in a list.
|
181
|
+
entries = [
|
182
|
+
entry
|
183
|
+
for j in range(1, get_len_of_entries(data, i))
|
184
|
+
if (entry := get_last_entry(data, f"q{i}_{j}")) != ""
|
185
|
+
]
|
186
|
+
|
187
|
+
# Store the list of entries in the dictionary, keyed by question number.
|
188
|
+
q_entries += entries
|
189
|
+
|
190
|
+
q_entries += free_response
|
191
|
+
|
192
|
+
# Parse the data
|
193
|
+
parsed_data: list[list[str]] = [
|
194
|
+
[part.strip() for part in line.split(",")] for line in q_entries
|
195
|
+
]
|
196
|
+
|
197
|
+
unique_question_IDs = set(row[0] for row in parsed_data)
|
198
|
+
|
199
|
+
# Initialize a dictionary to hold the maximum score for each unique value
|
200
|
+
max_scores = {unique_value: 0 for unique_value in unique_question_IDs}
|
201
|
+
|
202
|
+
# Loop through each row in the data
|
203
|
+
for score_entry in parsed_data:
|
204
|
+
unique_value = score_entry[0]
|
205
|
+
score = int(score_entry[1])
|
206
|
+
# possible_score = float(row[3])
|
207
|
+
# Update the score if it's higher than the current maximum
|
208
|
+
if score > max_scores[unique_value]:
|
209
|
+
max_scores[unique_value] = score
|
210
|
+
|
211
|
+
# Loop through the max_scores dictionary and sum scores for each question
|
212
|
+
for unique_value, score in max_scores.items():
|
213
|
+
# Extract question number (assuming it's the number immediately after 'q')
|
214
|
+
question_number = int(unique_value.split("_")[0][1:])
|
215
|
+
question_scores[question_number] += score
|
216
|
+
|
217
|
+
# Sorting the dictionary by keys
|
218
|
+
question_max_scores = {
|
219
|
+
key: int(np.round(question_max_scores[key]))
|
220
|
+
for key in sorted(question_max_scores)
|
221
|
+
}
|
222
|
+
|
223
|
+
# Sorting the dictionary by keys
|
224
|
+
question_scores = {
|
225
|
+
key: int(np.round(question_scores[key])) for key in sorted(question_scores)
|
226
|
+
}
|
227
|
+
|
228
|
+
# Creating the dictionary structure
|
229
|
+
result_structure: dict[str, list[dict]] = {
|
230
|
+
"tests": [],
|
231
|
+
}
|
232
|
+
|
233
|
+
# Adding entries for each question
|
234
|
+
for question_number in question_scores.keys():
|
235
|
+
question_entry = {
|
236
|
+
"name": f"Question {question_number}",
|
237
|
+
"score": question_scores[question_number],
|
238
|
+
"max_score": question_max_scores[question_number],
|
239
|
+
# "visibility": "visible",
|
240
|
+
# "output": "",
|
241
|
+
}
|
242
|
+
result_structure["tests"].append(question_entry)
|
243
|
+
|
244
|
+
# Write results dict to results.json
|
245
|
+
with open("results.json", "w") as file:
|
246
|
+
print("Writing to results.json")
|
247
|
+
json.dump(result_structure, file, indent=4)
|
248
|
+
|
249
|
+
login_(login_data, login_url)
|
250
|
+
|
251
|
+
# The file to be uploaded. Ensure the path is correct.
|
252
|
+
file_path = "results.json"
|
253
|
+
|
254
|
+
# Construct data payload as a dict
|
255
|
+
final_data = {
|
256
|
+
"assignment": assignment_id,
|
257
|
+
"student_email": last_entries.get("drexel_email"),
|
258
|
+
# "original_file_name": file_path,
|
259
|
+
"start_time": last_entries["start_time"],
|
260
|
+
"end_time": last_entries["end_time"],
|
261
|
+
# "flag": last_entries["flag"],
|
262
|
+
# "submission_mechanism": "jupyter_notebook",
|
263
|
+
# "log_file": loginfo,
|
264
|
+
"scores": result_structure["tests"],
|
265
|
+
}
|
266
|
+
|
267
|
+
# Files to be uploaded. The key should match the name expected by the server.
|
268
|
+
_files = {
|
269
|
+
"file": (file_path, open(file_path, "rb")),
|
270
|
+
}
|
271
|
+
|
272
|
+
# Make the POST request with data and files
|
273
|
+
response = requests.post(
|
274
|
+
url=post_url,
|
275
|
+
json=final_data,
|
276
|
+
# files=files,
|
277
|
+
auth=HTTPBasicAuth(login_data["username"], login_data["password"]),
|
278
|
+
)
|
279
|
+
|
280
|
+
# Print messages for the user
|
281
|
+
submission_message(response)
|
282
|
+
|
283
|
+
|
284
|
+
def login_(login_data, login_url):
|
285
|
+
login_response = requests.post(
|
286
|
+
login_url, auth=HTTPBasicAuth(login_data["username"], login_data["password"])
|
287
|
+
)
|
288
|
+
|
289
|
+
if login_response.status_code == 200:
|
290
|
+
print("Login successful")
|
291
|
+
else:
|
292
|
+
Exception("Login failed")
|
293
|
+
|
294
|
+
|
295
|
+
def submission_message(response) -> None:
|
296
|
+
if response.status_code == 200:
|
297
|
+
print("Data successfully uploaded to the server")
|
298
|
+
print(response.text)
|
299
|
+
else:
|
300
|
+
print(f"Failed to upload data. Status code: {response.status_code}")
|
301
|
+
print(response.text)
|
302
|
+
print(
|
303
|
+
"There is something wrong with your log file or your submission. Please contact an instructor for help."
|
304
|
+
)
|
305
|
+
|
306
|
+
if os.path.exists("results.json"):
|
307
|
+
# os.remove("results.json")
|
308
|
+
# Let's keep results.json for now, for debugging
|
309
|
+
pass
|
310
|
+
else:
|
311
|
+
print("results.json was not present")
|