PyKubeGrader 0.1.21__tar.gz → 0.1.23__tar.gz

Sign up to get free protection for your applications and to get access to all the features.
Files changed (63) hide show
  1. {pykubegrader-0.1.21/src/PyKubeGrader.egg-info → pykubegrader-0.1.23}/PKG-INFO +1 -1
  2. {pykubegrader-0.1.21 → pykubegrader-0.1.23}/examples/true_false.ipynb +2 -2
  3. {pykubegrader-0.1.21 → pykubegrader-0.1.23/src/PyKubeGrader.egg-info}/PKG-INFO +1 -1
  4. {pykubegrader-0.1.21 → pykubegrader-0.1.23}/src/PyKubeGrader.egg-info/SOURCES.txt +4 -0
  5. {pykubegrader-0.1.21 → pykubegrader-0.1.23}/src/pykubegrader/build/api_notebook_builder.py +2 -2
  6. {pykubegrader-0.1.21 → pykubegrader-0.1.23}/src/pykubegrader/build/build_folder.py +30 -21
  7. pykubegrader-0.1.23/src/pykubegrader/graders/__init__.py +1 -0
  8. pykubegrader-0.1.23/src/pykubegrader/graders/late_assignments.py +45 -0
  9. {pykubegrader-0.1.21 → pykubegrader-0.1.23}/src/pykubegrader/initialize.py +4 -0
  10. pykubegrader-0.1.23/src/pykubegrader/log_parser/__init__.py +1 -0
  11. {pykubegrader-0.1.21 → pykubegrader-0.1.23}/src/pykubegrader/log_parser/parse.py +15 -15
  12. {pykubegrader-0.1.21 → pykubegrader-0.1.23}/src/pykubegrader/telemetry.py +28 -65
  13. {pykubegrader-0.1.21 → pykubegrader-0.1.23}/src/pykubegrader/utils.py +2 -2
  14. {pykubegrader-0.1.21 → pykubegrader-0.1.23}/src/pykubegrader/validate.py +7 -7
  15. pykubegrader-0.1.23/src/pykubegrader/widgets/__init__.py +1 -0
  16. {pykubegrader-0.1.21 → pykubegrader-0.1.23}/src/pykubegrader/widgets/true_false.py +2 -2
  17. pykubegrader-0.1.23/src/pykubegrader/widgets_base/__init__.py +1 -0
  18. {pykubegrader-0.1.21 → pykubegrader-0.1.23}/src/pykubegrader/widgets_base/multi_select.py +3 -1
  19. {pykubegrader-0.1.21 → pykubegrader-0.1.23}/src/pykubegrader/widgets_base/reading.py +3 -1
  20. {pykubegrader-0.1.21 → pykubegrader-0.1.23}/src/pykubegrader/widgets_base/select.py +3 -1
  21. pykubegrader-0.1.21/src/pykubegrader/widgets/__init__.py +0 -19
  22. {pykubegrader-0.1.21 → pykubegrader-0.1.23}/.coveragerc +0 -0
  23. {pykubegrader-0.1.21 → pykubegrader-0.1.23}/.github/workflows/main.yml +0 -0
  24. {pykubegrader-0.1.21 → pykubegrader-0.1.23}/.gitignore +0 -0
  25. {pykubegrader-0.1.21 → pykubegrader-0.1.23}/.readthedocs.yml +0 -0
  26. {pykubegrader-0.1.21 → pykubegrader-0.1.23}/AUTHORS.rst +0 -0
  27. {pykubegrader-0.1.21 → pykubegrader-0.1.23}/CHANGELOG.rst +0 -0
  28. {pykubegrader-0.1.21 → pykubegrader-0.1.23}/CONTRIBUTING.rst +0 -0
  29. {pykubegrader-0.1.21 → pykubegrader-0.1.23}/LICENSE.txt +0 -0
  30. {pykubegrader-0.1.21 → pykubegrader-0.1.23}/README.rst +0 -0
  31. {pykubegrader-0.1.21 → pykubegrader-0.1.23}/docs/Makefile +0 -0
  32. {pykubegrader-0.1.21 → pykubegrader-0.1.23}/docs/_static/Drexel_blue_Logo_square_Dark.png +0 -0
  33. {pykubegrader-0.1.21 → pykubegrader-0.1.23}/docs/_static/Drexel_blue_Logo_square_Light.png +0 -0
  34. {pykubegrader-0.1.21 → pykubegrader-0.1.23}/docs/_static/custom.css +0 -0
  35. {pykubegrader-0.1.21 → pykubegrader-0.1.23}/docs/authors.rst +0 -0
  36. {pykubegrader-0.1.21 → pykubegrader-0.1.23}/docs/changelog.rst +0 -0
  37. {pykubegrader-0.1.21 → pykubegrader-0.1.23}/docs/conf.py +0 -0
  38. {pykubegrader-0.1.21 → pykubegrader-0.1.23}/docs/contributing.rst +0 -0
  39. {pykubegrader-0.1.21 → pykubegrader-0.1.23}/docs/index.rst +0 -0
  40. {pykubegrader-0.1.21 → pykubegrader-0.1.23}/docs/license.rst +0 -0
  41. {pykubegrader-0.1.21 → pykubegrader-0.1.23}/docs/readme.rst +0 -0
  42. {pykubegrader-0.1.21 → pykubegrader-0.1.23}/docs/requirements.txt +0 -0
  43. {pykubegrader-0.1.21 → pykubegrader-0.1.23}/examples/.responses.json +0 -0
  44. {pykubegrader-0.1.21 → pykubegrader-0.1.23}/pyproject.toml +0 -0
  45. {pykubegrader-0.1.21 → pykubegrader-0.1.23}/setup.cfg +0 -0
  46. {pykubegrader-0.1.21 → pykubegrader-0.1.23}/setup.py +0 -0
  47. {pykubegrader-0.1.21 → pykubegrader-0.1.23}/src/PyKubeGrader.egg-info/dependency_links.txt +0 -0
  48. {pykubegrader-0.1.21 → pykubegrader-0.1.23}/src/PyKubeGrader.egg-info/entry_points.txt +0 -0
  49. {pykubegrader-0.1.21 → pykubegrader-0.1.23}/src/PyKubeGrader.egg-info/not-zip-safe +0 -0
  50. {pykubegrader-0.1.21 → pykubegrader-0.1.23}/src/PyKubeGrader.egg-info/requires.txt +0 -0
  51. {pykubegrader-0.1.21 → pykubegrader-0.1.23}/src/PyKubeGrader.egg-info/top_level.txt +0 -0
  52. {pykubegrader-0.1.21 → pykubegrader-0.1.23}/src/pykubegrader/__init__.py +0 -0
  53. {pykubegrader-0.1.21/src/pykubegrader/widgets_base → pykubegrader-0.1.23/src/pykubegrader/build}/__init__.py +0 -0
  54. {pykubegrader-0.1.21 → pykubegrader-0.1.23}/src/pykubegrader/log_parser/parse.ipynb +0 -0
  55. {pykubegrader-0.1.21 → pykubegrader-0.1.23}/src/pykubegrader/widgets/multiple_choice.py +0 -0
  56. {pykubegrader-0.1.21 → pykubegrader-0.1.23}/src/pykubegrader/widgets/reading_question.py +0 -0
  57. {pykubegrader-0.1.21 → pykubegrader-0.1.23}/src/pykubegrader/widgets/select_many.py +0 -0
  58. {pykubegrader-0.1.21 → pykubegrader-0.1.23}/src/pykubegrader/widgets/student_info.py +0 -0
  59. {pykubegrader-0.1.21 → pykubegrader-0.1.23}/src/pykubegrader/widgets/style.py +0 -0
  60. {pykubegrader-0.1.21 → pykubegrader-0.1.23}/src/pykubegrader/widgets/types_question.py +0 -0
  61. {pykubegrader-0.1.21 → pykubegrader-0.1.23}/tests/conftest.py +0 -0
  62. {pykubegrader-0.1.21 → pykubegrader-0.1.23}/tests/import_test.py +0 -0
  63. {pykubegrader-0.1.21 → pykubegrader-0.1.23}/tox.ini +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: PyKubeGrader
3
- Version: 0.1.21
3
+ Version: 0.1.23
4
4
  Summary: Add a short description here!
5
5
  Home-page: https://github.com/pyscaffold/pyscaffold/
6
6
  Author: jagar2
@@ -6,7 +6,7 @@
6
6
  "metadata": {},
7
7
  "outputs": [],
8
8
  "source": [
9
- "from pykubegrader.widgets.true_false import TFQuestion, TrueFalse_style"
9
+ "from pykubegrader.widgets.true_false import TFQuestion, TFStyle"
10
10
  ]
11
11
  },
12
12
  {
@@ -19,7 +19,7 @@
19
19
  " def __init__(\n",
20
20
  " self,\n",
21
21
  " title=\"Respond with True or False\",\n",
22
- " style=TrueFalse_style,\n",
22
+ " style=TFStyle,\n",
23
23
  " question_number=2,\n",
24
24
  " keys=[\"MC1\", \"MC2\", \"MC3\", \"MC4\"],\n",
25
25
  " descriptions=[\n",
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: PyKubeGrader
3
- Version: 0.1.21
3
+ Version: 0.1.23
4
4
  Summary: Add a short description here!
5
5
  Home-page: https://github.com/pyscaffold/pyscaffold/
6
6
  Author: jagar2
@@ -37,8 +37,12 @@ src/pykubegrader/initialize.py
37
37
  src/pykubegrader/telemetry.py
38
38
  src/pykubegrader/utils.py
39
39
  src/pykubegrader/validate.py
40
+ src/pykubegrader/build/__init__.py
40
41
  src/pykubegrader/build/api_notebook_builder.py
41
42
  src/pykubegrader/build/build_folder.py
43
+ src/pykubegrader/graders/__init__.py
44
+ src/pykubegrader/graders/late_assignments.py
45
+ src/pykubegrader/log_parser/__init__.py
42
46
  src/pykubegrader/log_parser/parse.ipynb
43
47
  src/pykubegrader/log_parser/parse.py
44
48
  src/pykubegrader/widgets/__init__.py
@@ -93,7 +93,7 @@ class FastAPINotebookBuilder:
93
93
  self.replace_cell_source(cell_index, updated_cell_source)
94
94
 
95
95
  def compute_max_points_free_response(self):
96
- for i, (cell_index, cell_dict) in enumerate(self.assertion_tests_dict.items()):
96
+ for cell_dict in self.assertion_tests_dict.values():
97
97
  # gets the question name from the first cell to not double count
98
98
  if cell_dict["is_first"]:
99
99
  # get the max points for the question
@@ -380,7 +380,7 @@ class FastAPINotebookBuilder:
380
380
  question_groups[question].append(key)
381
381
 
382
382
  # Add 'is_first' and 'is_last' flags to all cells
383
- for question, keys in question_groups.items():
383
+ for keys in question_groups.values():
384
384
  test_number = 1
385
385
  for i, key in enumerate(keys):
386
386
  cells_dict[key]["is_first"] = i == 0
@@ -58,6 +58,11 @@ class NotebookProcessor:
58
58
  format="%(asctime)s - %(levelname)s - %(message)s", # Log message format: timestamp, level, and message
59
59
  )
60
60
 
61
+ self.assignmet_type = self.assignment_tag.split("-")[0].lower()
62
+
63
+ week_num = self.assignment_tag.split("-")[-1]
64
+ self.week = f"week_{week_num}"
65
+
61
66
  # Initialize a global logger for the class
62
67
  global logger
63
68
  logger = logging.getLogger(
@@ -354,7 +359,9 @@ class NotebookProcessor:
354
359
  notebook_subfolder, "dist", "student", f"{notebook_name}.ipynb"
355
360
  )
356
361
 
357
- NotebookProcessor.add_initialization_code(student_notebook)
362
+ NotebookProcessor.add_initialization_code(
363
+ student_notebook, self.week, self.assignmet_type
364
+ )
358
365
 
359
366
  self.clean_notebook(student_notebook)
360
367
 
@@ -378,7 +385,9 @@ class NotebookProcessor:
378
385
 
379
386
  return student_notebook, out.total_points
380
387
  else:
381
- NotebookProcessor.add_initialization_code(temp_notebook_path)
388
+ NotebookProcessor.add_initialization_code(
389
+ temp_notebook_path, self.week, self.assignmet_type
390
+ )
382
391
  return None, 0
383
392
 
384
393
  @staticmethod
@@ -399,13 +408,13 @@ class NotebookProcessor:
399
408
  nbformat.write(notebook, f)
400
409
 
401
410
  @staticmethod
402
- def add_initialization_code(notebook_path):
411
+ def add_initialization_code(notebook_path, week, assignment_type):
403
412
  # finds the first code cell
404
413
  index, cell = find_first_code_cell(notebook_path)
405
414
  cell = cell["source"]
406
415
  import_text = "from pykubegrader.initialize import initialize_assignment\n"
407
416
  cell = f"{import_text}\n" + cell
408
- cell += f'\nresponses = initialize_assignment("{os.path.splitext(os.path.basename(notebook_path))[0]}")\n'
417
+ cell += f'\nresponses = initialize_assignment("{os.path.splitext(os.path.basename(notebook_path))[0]}", "{week}", "{assignment_type}" )\n'
409
418
  replace_cell_source(notebook_path, index, cell)
410
419
 
411
420
  def multiple_choice_parser(self, temp_notebook_path, new_notebook_path):
@@ -429,7 +438,7 @@ class NotebookProcessor:
429
438
  for data_ in data:
430
439
  # Generate the solution file
431
440
  self.mcq_total_points = self.generate_solution_MCQ(
432
- data, output_file=solution_path
441
+ data_, output_file=solution_path
433
442
  )
434
443
 
435
444
  question_path = (
@@ -1435,7 +1444,7 @@ def generate_mcq_file(data_dict, output_file="mc_questions.py"):
1435
1444
 
1436
1445
  for question_dict in data_dict:
1437
1446
  with open(output_file, "a", encoding="utf-8") as f:
1438
- for i, (q_key, q_value) in enumerate(question_dict.items()):
1447
+ for i, q_value in enumerate(question_dict.values()):
1439
1448
  if i == 0:
1440
1449
  # Write the MCQuestion class
1441
1450
  f.write(
@@ -1451,7 +1460,7 @@ def generate_mcq_file(data_dict, output_file="mc_questions.py"):
1451
1460
  break
1452
1461
 
1453
1462
  keys = []
1454
- for i, (q_key, q_value) in enumerate(question_dict.items()):
1463
+ for q_value in question_dict.values():
1455
1464
  # Write keys
1456
1465
  keys.append(
1457
1466
  f"q{q_value['question number']}-{q_value['subquestion_number']}-{q_value['name']}"
@@ -1460,20 +1469,20 @@ def generate_mcq_file(data_dict, output_file="mc_questions.py"):
1460
1469
  f.write(f" keys={keys},\n")
1461
1470
 
1462
1471
  options = []
1463
- for i, (q_key, q_value) in enumerate(question_dict.items()):
1472
+ for q_value in question_dict.values():
1464
1473
  # Write options
1465
1474
  options.append(q_value["OPTIONS"])
1466
1475
 
1467
1476
  f.write(f" options={options},\n")
1468
1477
 
1469
1478
  descriptions = []
1470
- for i, (q_key, q_value) in enumerate(question_dict.items()):
1479
+ for q_value in question_dict.values():
1471
1480
  # Write descriptions
1472
1481
  descriptions.append(q_value["question_text"])
1473
1482
  f.write(f" descriptions={descriptions},\n")
1474
1483
 
1475
1484
  points = []
1476
- for i, (q_key, q_value) in enumerate(question_dict.items()):
1485
+ for q_value in question_dict.values():
1477
1486
  # Write points
1478
1487
  points.append(q_value["points"])
1479
1488
 
@@ -1506,7 +1515,7 @@ def generate_select_many_file(data_dict, output_file="select_many_questions.py")
1506
1515
 
1507
1516
  for question_dict in data_dict:
1508
1517
  with open(output_file, "a", encoding="utf-8") as f:
1509
- for i, (q_key, q_value) in enumerate(question_dict.items()):
1518
+ for i, q_value in enumerate(question_dict.values()):
1510
1519
  if i == 0:
1511
1520
  # Write the MCQuestion class
1512
1521
  f.write(
@@ -1522,7 +1531,7 @@ def generate_select_many_file(data_dict, output_file="select_many_questions.py")
1522
1531
  break
1523
1532
 
1524
1533
  keys = []
1525
- for i, (q_key, q_value) in enumerate(question_dict.items()):
1534
+ for q_value in question_dict.values():
1526
1535
  # Write keys
1527
1536
  keys.append(
1528
1537
  f"q{q_value['question number']}-{q_value['subquestion_number']}-{q_value['name']}"
@@ -1531,20 +1540,20 @@ def generate_select_many_file(data_dict, output_file="select_many_questions.py")
1531
1540
  f.write(f" keys={keys},\n")
1532
1541
 
1533
1542
  descriptions = []
1534
- for i, (q_key, q_value) in enumerate(question_dict.items()):
1543
+ for q_value in question_dict.values():
1535
1544
  # Write descriptions
1536
1545
  descriptions.append(q_value["question_text"])
1537
1546
  f.write(f" descriptions={descriptions},\n")
1538
1547
 
1539
1548
  options = []
1540
- for i, (q_key, q_value) in enumerate(question_dict.items()):
1549
+ for q_value in question_dict.values():
1541
1550
  # Write options
1542
1551
  options.append(q_value["OPTIONS"])
1543
1552
 
1544
1553
  f.write(f" options={options},\n")
1545
1554
 
1546
1555
  points = []
1547
- for i, (q_key, q_value) in enumerate(question_dict.items()):
1556
+ for q_value in question_dict.values():
1548
1557
  # Write points
1549
1558
  points.append(q_value["points"])
1550
1559
 
@@ -1572,7 +1581,7 @@ def generate_tf_file(data_dict, output_file="tf_questions.py"):
1572
1581
 
1573
1582
  # Define header lines
1574
1583
  header_lines = [
1575
- "from pykubegrader.widgets.true_false import TFQuestion, TrueFalse_style\n",
1584
+ "from pykubegrader.widgets.true_false import TFQuestion, TFStyle\n",
1576
1585
  "import pykubegrader.initialize\n",
1577
1586
  "import panel as pn\n\n",
1578
1587
  "pn.extension()\n\n",
@@ -1583,7 +1592,7 @@ def generate_tf_file(data_dict, output_file="tf_questions.py"):
1583
1592
 
1584
1593
  for question_dict in data_dict:
1585
1594
  with open(output_file, "a", encoding="utf-8") as f:
1586
- for i, (q_key, q_value) in enumerate(question_dict.items()):
1595
+ for i, q_value in enumerate(question_dict.values()):
1587
1596
  if i == 0:
1588
1597
  # Write the MCQuestion class
1589
1598
  f.write(
@@ -1592,14 +1601,14 @@ def generate_tf_file(data_dict, output_file="tf_questions.py"):
1592
1601
  f.write(" def __init__(self):\n")
1593
1602
  f.write(" super().__init__(\n")
1594
1603
  f.write(f" title=f'{q_value['question_text']}',\n")
1595
- f.write(" style=TrueFalse_style,\n")
1604
+ f.write(" style=TFStyle,\n")
1596
1605
  f.write(
1597
1606
  f" question_number={q_value['question number']},\n"
1598
1607
  )
1599
1608
  break
1600
1609
 
1601
1610
  keys = []
1602
- for i, (q_key, q_value) in enumerate(question_dict.items()):
1611
+ for q_value in question_dict.values():
1603
1612
  # Write keys
1604
1613
  keys.append(
1605
1614
  f"q{q_value['question number']}-{q_value['subquestion_number']}-{q_value['name']}"
@@ -1608,13 +1617,13 @@ def generate_tf_file(data_dict, output_file="tf_questions.py"):
1608
1617
  f.write(f" keys={keys},\n")
1609
1618
 
1610
1619
  descriptions = []
1611
- for i, (q_key, q_value) in enumerate(question_dict.items()):
1620
+ for q_value in question_dict.values():
1612
1621
  # Write descriptions
1613
1622
  descriptions.append(q_value["question_text"])
1614
1623
  f.write(f" descriptions={descriptions},\n")
1615
1624
 
1616
1625
  points = []
1617
- for i, (q_key, q_value) in enumerate(question_dict.items()):
1626
+ for q_value in question_dict.values():
1618
1627
  # Write points
1619
1628
  points.append(q_value["points"])
1620
1629
 
@@ -0,0 +1,45 @@
1
+ import datetime
2
+
3
+ import numpy as np
4
+
5
+
6
+ def calculate_late_submission(
7
+ due: str,
8
+ submitted: str,
9
+ Q0: int = 100,
10
+ Q_min: int = 40,
11
+ k: float = 6.88e-5,
12
+ ) -> float:
13
+ """
14
+ Calculate the percentage value based on an exponential decay model
15
+ with respect to a due date, using datetime string inputs.
16
+
17
+ Parameters:
18
+ - due_date_str (str): The due date as a string in the format "%Y-%m-%d %H:%M:%S".
19
+ - submission_date (str): The comparison date as a string in the format "%Y-%m-%d %H:%M:%S".
20
+ - Q0 (float): Initial value (default is 100).
21
+ - Q_min (float): Minimum value (default is 40).
22
+ - k (float): Decay constant per minute (default is 6.88e-5).
23
+
24
+ Returns:
25
+ - float: The percentage value after decay, bounded between Q_min and Q0.
26
+ """
27
+
28
+ # Convert datetime strings to UNIX timestamps
29
+ due_date = datetime.datetime.strptime(due, "%Y-%m-%d %H:%M:%S")
30
+ submitted_date = datetime.datetime.strptime(submitted, "%Y-%m-%d %H:%M:%S")
31
+
32
+ # Calculate time difference in seconds
33
+ time_difference = (submitted_date - due_date).total_seconds()
34
+
35
+ # Convert time difference from seconds to minutes
36
+ time_in_minutes = time_difference / 60.0
37
+
38
+ # Calculate the exponential decay
39
+ Q: float = Q0 * np.exp(-k * time_in_minutes)
40
+
41
+ # Apply floor and ceiling conditions
42
+ Q = np.maximum(Q, Q_min)
43
+ Q = np.minimum(Q, Q0)
44
+
45
+ return Q
@@ -11,6 +11,8 @@ from .telemetry import ensure_responses, log_variable, telemetry, update_respons
11
11
 
12
12
  def initialize_assignment(
13
13
  name: str,
14
+ week: int,
15
+ assignment_type: str,
14
16
  url: str = "https://engr-131-api.eastus.cloudapp.azure.com/",
15
17
  verbose: bool = False,
16
18
  ) -> dict:
@@ -46,6 +48,8 @@ def initialize_assignment(
46
48
  try:
47
49
  seed = hash(jhub_user) % 1000
48
50
  update_responses(key="seed", value=seed)
51
+ update_responses(key="week", value=week)
52
+ update_responses(key="assignment_type", value=assignment_type)
49
53
 
50
54
  update_responses(key="assignment", value=name)
51
55
  update_responses(key="jhub_user", value=jhub_user)
@@ -1,5 +1,5 @@
1
1
  from dataclasses import dataclass, field
2
- from typing import Dict, List, Optional
2
+ from typing import Optional
3
3
 
4
4
 
5
5
  @dataclass
@@ -9,16 +9,16 @@ class LogParser:
9
9
  Handles both assignment info and question-level details.
10
10
  """
11
11
 
12
- log_lines: List[str]
12
+ log_lines: list[str]
13
13
  week_tag: Optional[str] = None
14
- student_info: Dict[str, str] = field(default_factory=dict)
15
- assignments: Dict[str, Dict] = field(default_factory=dict)
14
+ student_info: dict[str, str] = field(default_factory=dict)
15
+ assignments: dict[str, dict] = field(default_factory=dict)
16
16
 
17
- def parse_logs(self):
17
+ def parse_logs(self) -> None:
18
18
  """
19
19
  Main method to parse logs and populate student_info and assignments.
20
20
  """
21
- unique_students = set()
21
+ unique_students: set[str] = set()
22
22
 
23
23
  self._find_all_questions()
24
24
 
@@ -41,13 +41,13 @@ class LogParser:
41
41
  ):
42
42
  self._process_assignment_entry(line)
43
43
 
44
- def _find_all_questions(self):
44
+ def _find_all_questions(self) -> None:
45
45
  """
46
46
  Finds all questions in the log_lines and returns a list of them.
47
47
  """
48
48
  questions = []
49
49
  for line in self.log_lines:
50
- if self.week_tag in line:
50
+ if self.week_tag and self.week_tag in line:
51
51
  parts = line.split(",")
52
52
  question_tag = parts[3].strip()
53
53
  if question_tag not in questions:
@@ -60,7 +60,7 @@ class LogParser:
60
60
  """
61
61
  return line.startswith("Student Info")
62
62
 
63
- def _process_student_info(self, line: str, unique_students: set):
63
+ def _process_student_info(self, line: str, unique_students: set) -> None:
64
64
  """
65
65
  Processes a line containing student information.
66
66
  Raises an error if multiple unique students are found.
@@ -83,7 +83,7 @@ class LogParser:
83
83
  "timestamp": parts[3].strip(),
84
84
  }
85
85
 
86
- def _process_assignment_header(self, line: str):
86
+ def _process_assignment_header(self, line: str) -> None:
87
87
  parts = line.split(",")
88
88
  assignment_tag = parts[0].strip()
89
89
  if assignment_tag.startswith("total-points"):
@@ -105,7 +105,7 @@ class LogParser:
105
105
  self.assignments[notebook_name]["max_points"] = total_points_value
106
106
  self.assignments[notebook_name]["latest_timestamp"] = timestamp
107
107
 
108
- def _process_assignment_entry(self, line: str):
108
+ def _process_assignment_entry(self, line: str) -> None:
109
109
  """
110
110
  Processes a line containing an assignment entry.
111
111
  Adds it to the assignments dictionary.
@@ -141,7 +141,7 @@ class LogParser:
141
141
  if timestamp > self.assignments[assignment_tag]["latest_timestamp"]:
142
142
  self.assignments[assignment_tag]["latest_timestamp"] = timestamp
143
143
 
144
- def _extract_total_points(self, parts: List[str]) -> Optional[float]:
144
+ def _extract_total_points(self, parts: list[str]) -> Optional[float]:
145
145
  """
146
146
  Extracts the total-points value from the parts array of a total-points line.
147
147
  """
@@ -150,17 +150,17 @@ class LogParser:
150
150
  except (ValueError, IndexError):
151
151
  return None
152
152
 
153
- def calculate_total_scores(self):
153
+ def calculate_total_scores(self) -> None:
154
154
  """
155
155
  Calculates total scores for each assignment by summing the 'score_earned'
156
156
  of its questions, and sets 'total_points' if it was not specified.
157
157
  """
158
- for assignment, data in self.assignments.items():
158
+ for data in self.assignments.values():
159
159
  # Sum of all question score_earned
160
160
  total_score = sum(q["score_earned"] for q in data["questions"].values())
161
161
  data["total_score"] = total_score
162
162
 
163
- def get_results(self) -> Dict[str, Dict]:
163
+ def get_results(self) -> dict[str, dict]:
164
164
  """
165
165
  Returns the parsed results as a hierarchical dictionary with three sections:
166
166
  """
@@ -11,27 +11,24 @@ from IPython.core.interactiveshell import ExecutionInfo
11
11
  from requests import Response
12
12
  from requests.auth import HTTPBasicAuth
13
13
 
14
- # Logger for .output_code.log
14
+ #
15
+ # Logging setup
16
+ #
17
+
18
+ # Logger for cell execution
15
19
  logger_code = logging.getLogger("code_logger")
16
20
  logger_code.setLevel(logging.INFO)
17
21
 
18
22
  file_handler_code = logging.FileHandler(".output_code.log")
19
23
  file_handler_code.setLevel(logging.INFO)
20
-
21
- # formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
22
- # file_handler_code.setFormatter(formatter)
23
-
24
24
  logger_code.addHandler(file_handler_code)
25
25
 
26
- # Logger for .output_reduced.log
26
+ # Logger for question scores etc.
27
27
  logger_reduced = logging.getLogger("reduced_logger")
28
28
  logger_reduced.setLevel(logging.INFO)
29
29
 
30
30
  file_handler_reduced = logging.FileHandler(".output_reduced.log")
31
31
  file_handler_reduced.setLevel(logging.INFO)
32
-
33
- # file_handler_reduced.setFormatter(formatter)
34
-
35
32
  logger_reduced.addHandler(file_handler_reduced)
36
33
 
37
34
  #
@@ -55,7 +52,7 @@ def encrypt_to_b64(message: str) -> str:
55
52
  return encrypted_b64
56
53
 
57
54
 
58
- def ensure_responses() -> dict:
55
+ def ensure_responses() -> dict[str, Any]:
59
56
  with open(".responses.json", "a") as _:
60
57
  pass
61
58
 
@@ -125,28 +122,34 @@ def update_responses(key: str, value) -> dict:
125
122
  #
126
123
 
127
124
 
128
- # If we instead call this with **responses
125
+ # TODO: Improve error handling
129
126
  def score_question(
130
- student_email: str,
131
- assignment: str,
132
- question: str,
133
- submission: str,
134
127
  term: str = "winter_2025",
135
- base_url: str = "https://engr-131-api.eastus.cloudapp.azure.com/",
136
- ) -> Response:
128
+ base_url: str = "https://engr-131-api.eastus.cloudapp.azure.com",
129
+ ) -> None:
137
130
  url = base_url + "/live-scorer"
138
131
 
139
- payload = {
140
- "student_email": student_email,
132
+ responses = ensure_responses()
133
+
134
+ payload: dict[str, Any] = {
135
+ "student_email": f'{responses["jhub_user"]}@drexel.edu',
141
136
  "term": term,
142
- "assignment": assignment,
143
- "question": question,
144
- "responses": submission,
137
+ "week": responses["week"],
138
+ "assignment": responses["assignment_type"],
139
+ "question": f'_{responses["assignment"]}',
140
+ "responses": responses,
145
141
  }
146
142
 
147
143
  res = requests.post(url, json=payload, auth=HTTPBasicAuth("student", "capture"))
148
144
 
149
- return res
145
+ res_data: dict[str, tuple[float, float]] = res.json()
146
+
147
+ for question, (points_earned, max_points) in res_data.items():
148
+ log_variable(
149
+ assignment_name=responses["assignment"],
150
+ value=f"{points_earned}, {max_points}",
151
+ info_type=question,
152
+ )
150
153
 
151
154
 
152
155
  def submit_question(
@@ -157,7 +160,7 @@ def submit_question(
157
160
  responses: dict,
158
161
  score: dict,
159
162
  base_url: str = "https://engr-131-api.eastus.cloudapp.azure.com/",
160
- ):
163
+ ) -> Response:
161
164
  url = base_url + "/submit-question"
162
165
 
163
166
  payload = {
@@ -174,7 +177,7 @@ def submit_question(
174
177
  return res
175
178
 
176
179
 
177
- # TODO: refine function
180
+ # TODO: Refine
178
181
  def verify_server(
179
182
  jhub_user: Optional[str] = None,
180
183
  url: str = "https://engr-131-api.eastus.cloudapp.azure.com/",
@@ -183,43 +186,3 @@ def verify_server(
183
186
  res = requests.get(url, params=params)
184
187
  message = f"status code: {res.status_code}"
185
188
  return message
186
-
187
-
188
- # TODO: implement function; or maybe not?
189
- # At least improve other one
190
- def score_question_improved(
191
- week: str,
192
- assignment_category: str,
193
- term: str = "winter_2025",
194
- base_url: str = "https://engr-131-api.eastus.cloudapp.azure.com",
195
- ) -> None:
196
- url = base_url + "/live-scorer"
197
-
198
- responses = ensure_responses()
199
-
200
- payload: dict[str, Any] = {
201
- "student_email": f'{responses["jhub_user"]}@drexel.edu',
202
- "term": term,
203
- "week": week,
204
- "assignment": assignment_category,
205
- "question": f'_{responses["assignment"]}',
206
- "responses": responses,
207
- }
208
-
209
- res = requests.post(url, json=payload, auth=HTTPBasicAuth("student", "capture"))
210
-
211
- res_data = res.json()
212
- # max_points, points_earned = res_data["max_points"], res_data["points_earned"]
213
- # log_variable(
214
- # assignment_name=responses["assignment"],
215
- # value=f"{points_earned}, {max_points}",
216
- # info_type="score",
217
- # )
218
-
219
- # res_data is now dict[str, tuple[float, float]]
220
- for question, (points_earned, max_points) in res_data.items():
221
- log_variable(
222
- assignment_name=responses["assignment"],
223
- value=f"{points_earned}, {max_points}",
224
- info_type=question,
225
- )
@@ -1,5 +1,5 @@
1
1
  import random
2
- from typing import Tuple
2
+ from typing import Optional, Tuple
3
3
 
4
4
  import panel as pn
5
5
 
@@ -8,7 +8,7 @@ def list_of_lists(options: list) -> bool:
8
8
  return all(isinstance(elem, list) for elem in options)
9
9
 
10
10
 
11
- def shuffle_options(options, seed: int):
11
+ def shuffle_options(options: list[Optional[str]], seed: int) -> list[Optional[str]]:
12
12
  random.seed(seed)
13
13
  random.shuffle(options)
14
14
 
@@ -19,10 +19,10 @@ def validate_logfile(
19
19
  filepath: str,
20
20
  assignment_id: str,
21
21
  question_max_scores: dict[int, int],
22
- free_response_questions=0,
23
- username="student",
24
- password="capture",
25
- base_url="https://engr-131-api.eastus.cloudapp.azure.com",
22
+ free_response_questions: int = 0,
23
+ username: str = "student",
24
+ password: str = "capture",
25
+ base_url: str = "https://engr-131-api.eastus.cloudapp.azure.com",
26
26
  ) -> None:
27
27
  login_data = {
28
28
  "username": username,
@@ -225,7 +225,7 @@ def validate_logfile(
225
225
  submission_message(response)
226
226
 
227
227
 
228
- def read_logfile(filepath, key_box=None) -> tuple[list[str], list[str]]:
228
+ def read_logfile(filepath: str, key_box=None) -> tuple[list[str], list[str]]:
229
229
  if key_box is None:
230
230
  key_box = generate_keys()
231
231
 
@@ -307,7 +307,7 @@ def get_last_entry(data: list[str], field_name: str) -> str:
307
307
  return ""
308
308
 
309
309
 
310
- def submission_message(response) -> None:
310
+ def submission_message(response: requests.Response) -> None:
311
311
  if response.status_code == 200:
312
312
  print("Data successfully uploaded to the server")
313
313
  print(response.text)
@@ -326,7 +326,7 @@ def submission_message(response) -> None:
326
326
  print("results.json was not present")
327
327
 
328
328
 
329
- def verify_login(login_data, login_url):
329
+ def verify_login(login_data: dict[str, str], login_url: str) -> None:
330
330
  login_response = requests.post(
331
331
  login_url, auth=HTTPBasicAuth(login_data["username"], login_data["password"])
332
332
  )
@@ -13,7 +13,7 @@ pn.extension(design="material", global_css=[drexel_colors], raw_css=[raw_css])
13
13
  #
14
14
 
15
15
 
16
- def TrueFalse_style(
16
+ def TFStyle(
17
17
  descriptions: List[str],
18
18
  options: List[str] | List[List[str]],
19
19
  initial_vals: List[str],
@@ -71,7 +71,7 @@ class TFQuestion(SelectQuestion):
71
71
  def __init__(
72
72
  self,
73
73
  title="Select if the statement is True or False",
74
- style=TrueFalse_style,
74
+ style=TFStyle,
75
75
  question_number=2,
76
76
  keys=["MC1", "MC2", "MC3", "MC4"],
77
77
  options=None,
@@ -3,7 +3,7 @@ from typing import Callable, Tuple
3
3
 
4
4
  import panel as pn
5
5
 
6
- from ..telemetry import ensure_responses, update_responses
6
+ from ..telemetry import ensure_responses, score_question, update_responses
7
7
  from ..utils import shuffle_questions
8
8
  from ..widgets.style import drexel_colors, raw_css
9
9
 
@@ -104,6 +104,8 @@ class MultiSelectQuestion:
104
104
 
105
105
  self.record_responses(responses_flat)
106
106
 
107
+ score_question() # Debugging; update later
108
+
107
109
  def record_responses(self, responses_flat: list[bool]) -> None:
108
110
  for key, value in zip(self.keys, responses_flat):
109
111
  update_responses(key, value)
@@ -3,7 +3,7 @@ from typing import Optional
3
3
 
4
4
  import panel as pn
5
5
 
6
- from ..telemetry import ensure_responses, update_responses
6
+ from ..telemetry import ensure_responses, score_question, update_responses
7
7
  from ..utils import shuffle_options
8
8
 
9
9
 
@@ -162,6 +162,8 @@ class ReadingPython:
162
162
  i += 1
163
163
  update_responses(f"q{self.question_number}_{i}", exec_val)
164
164
 
165
+ score_question() # Debugging; update later
166
+
165
167
  print("Responses recorded successfully")
166
168
 
167
169
  def show(self):
@@ -3,7 +3,7 @@ from typing import Callable, Tuple
3
3
 
4
4
  import panel as pn
5
5
 
6
- from ..telemetry import ensure_responses, update_responses
6
+ from ..telemetry import ensure_responses, score_question, update_responses
7
7
  from ..utils import shuffle_questions
8
8
  from ..widgets.style import drexel_colors
9
9
 
@@ -71,6 +71,8 @@ class SelectQuestion:
71
71
  for key, value in selections.items():
72
72
  update_responses(key, value)
73
73
 
74
+ score_question() # Debugging; update later
75
+
74
76
  # Temporarily change button text to indicate submission
75
77
  self.submit_button.name = "Responses Submitted"
76
78
  time.sleep(1)
@@ -1,19 +0,0 @@
1
- # Auto-generated __init__.py
2
-
3
- from . import (
4
- multiple_choice,
5
- reading_question,
6
- select_many,
7
- student_info,
8
- true_false,
9
- types_question,
10
- )
11
-
12
- __all__ = [
13
- "select_many",
14
- "multiple_choice",
15
- "true_false",
16
- "reading_question",
17
- "student_info",
18
- "types_question",
19
- ]
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes