assignment-codeval 0.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
File without changes
@@ -0,0 +1,75 @@
1
+ import datetime
2
+ import sys
3
+ from functools import cache
4
+ from typing import NamedTuple
5
+ from configparser import ConfigParser
6
+
7
+ import click
8
+ from canvasapi import Canvas
9
+ from canvasapi.current_user import CurrentUser
10
+
11
+ from assignment_codeval.commons import error, info, errorWithException
12
+
13
+ CanvasConnection = NamedTuple('CanvasConnection', [('canvas', Canvas), ('user', CurrentUser)])
14
+
15
+
16
+ def _check_config(parser, section, key):
17
+ if section not in parser:
18
+ error(f"did not find [{section}] section in {parser.config_file}.")
19
+ sys.exit(1)
20
+ if key not in parser[section]:
21
+ error(f"did not find {key} in [{section}] in {parser.config_file}.")
22
+ sys.exit(1)
23
+
24
+
25
+ def connect_to_canvas():
26
+ parser = ConfigParser()
27
+ config_file = click.get_app_dir("codeval.ini")
28
+ parser.read(config_file)
29
+ parser.config_file = config_file
30
+
31
+ for key in ['url', 'token']:
32
+ _check_config(parser, 'SERVER', key)
33
+ try:
34
+ canvas = Canvas(parser['SERVER']['url'], parser['SERVER']['token'])
35
+ user = canvas.get_current_user()
36
+ info(f"connected to canvas as {user.name} ({user.id})")
37
+ return CanvasConnection(canvas, user)
38
+ except:
39
+ errorWithException(f"there was a problem accessing canvas.")
40
+
41
+
42
+ @cache
43
+ def get_course(canvas, name, is_active=True):
44
+ ''' find one course based on partial match '''
45
+ course_list = get_courses(canvas, name, is_active)
46
+ if len(course_list) == 0:
47
+ error(f'no courses found that contain {name}. options are:')
48
+ for c in get_courses(canvas, "", is_active):
49
+ error(fr" {c.name}")
50
+ sys.exit(2)
51
+ elif len(course_list) > 1:
52
+ error(f"multiple courses found for {name}: {[c.name for c in course_list]}")
53
+ for c in course_list:
54
+ error(f" {c.name}")
55
+ sys.exit(2)
56
+ return course_list[0]
57
+
58
+
59
+ def get_courses(canvas, name: str, is_active=True, is_finished=False):
60
+ ''' find the courses based on partial match '''
61
+ courses = canvas.get_courses(enrollment_type="teacher")
62
+ now = datetime.datetime.now(datetime.timezone.utc)
63
+ course_list = []
64
+ for c in courses:
65
+ start = c.start_at_date if hasattr(c, "start_at_date") else now
66
+ end = c.end_at_date if hasattr(c, "end_at_date") else now
67
+ if is_active and (start > now or end < now):
68
+ continue
69
+ if is_finished and end < now:
70
+ continue
71
+ if name in c.name:
72
+ c.start = start
73
+ c.end = end
74
+ course_list.append(c)
75
+ return course_list
@@ -0,0 +1,17 @@
1
+ import click
2
+ from assignment_codeval.evaluate import run_evaluation
3
+ from assignment_codeval.github_connect import github_setup_repo
4
+ from assignment_codeval.submissions import download_submissions, upload_submission_comments
5
+
6
+
7
+ @click.group()
8
+ def cli():
9
+ pass
10
+
11
+ cli.add_command(run_evaluation)
12
+ cli.add_command(download_submissions)
13
+ cli.add_command(upload_submission_comments)
14
+ cli.add_command(github_setup_repo)
15
+
16
+ if __name__ == "__main__":
17
+ cli()
@@ -0,0 +1,51 @@
1
+ from typing import NoReturn
2
+
3
+ import click
4
+ import time
5
+ import dataclasses
6
+
7
+
8
+ @dataclasses.dataclass(init=True, repr=True, frozen=True)
9
+ class _Config():
10
+ """Global configuration object for the CLI"""
11
+ show_debug: bool
12
+ dry_run: bool
13
+ force: bool
14
+ copy_tmpdir: bool
15
+
16
+ # static global config instance
17
+ _instance: '_Config' = None
18
+
19
+
20
+ def get_config():
21
+ if _Config._instance is None:
22
+ _Config._instance = _Config(False, True, False, False)
23
+ return _Config._instance
24
+
25
+
26
+ def set_config(show_debug, dry_run, force, copy_tmpdir):
27
+ _Config._instance = _Config(show_debug, dry_run, force, copy_tmpdir)
28
+ return _Config._instance
29
+
30
+
31
+ def _now():
32
+ return time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())
33
+
34
+
35
+ def debug(message):
36
+ if get_config().show_debug:
37
+ click.echo(click.style(f"{_now()} D {message}", fg='magenta'))
38
+
39
+ def error(message):
40
+ click.echo(click.style(f"{_now()} E {message}", fg='red'))
41
+
42
+ def errorWithException(message) -> NoReturn:
43
+ error(message)
44
+ raise EnvironmentError(message)
45
+
46
+ def info(message):
47
+ click.echo(click.style(f"{_now()} I {message}", fg='blue'))
48
+
49
+
50
+ def warn(message):
51
+ click.echo(click.style(f"{_now()} W {message}", fg='yellow'))
@@ -0,0 +1,676 @@
1
+ #! /usr/bin/python3
2
+
3
+ import os
4
+ import re
5
+ import subprocess
6
+ import sys
7
+ import traceback
8
+ import threading
9
+ import time
10
+
11
+ import click
12
+
13
+ from assignment_codeval.file_utils import unzip
14
+
15
+ ###########################################################
16
+ # Globals
17
+ ###########################################################
18
+
19
+
20
+ test_args = ""
21
+ cmps = []
22
+ timeout_val = 10
23
+ expected_exit_code = -1
24
+ test_case_count = 0
25
+ test_case_hint = ""
26
+ test_case_total = 0
27
+ num_passed = 0
28
+ num_failed = 0
29
+ is_hidden_testcase = False
30
+ is_verbose = False
31
+ compilelog = []
32
+
33
+ ###########################################################
34
+ # Specification Tags to Function Mapping
35
+ ###########################################################
36
+
37
+
38
+ def compile_code(compile_command):
39
+ """Specifies the command to compile the submission code
40
+
41
+ Arguments:
42
+ compile_command: the command to compile the submission code with
43
+
44
+ Returns:
45
+ None
46
+ """
47
+ if test_case_count != 0:
48
+ check_test()
49
+
50
+ # Run compile command
51
+ with open("compilelog", "w") as outfile:
52
+ compile_popen = subprocess.Popen(
53
+ compile_command, shell=True, stdout=outfile, stderr=outfile, text=True
54
+ )
55
+
56
+ compile_popen.communicate(compile_popen)
57
+
58
+ if compile_popen.returncode:
59
+ with open("compilelog", "r") as infile:
60
+ compile_log = infile.readlines()
61
+
62
+ # Print head of compile log
63
+ for line in compile_log[:10]:
64
+ print(line, end="")
65
+
66
+ if len(compile_log) > 10:
67
+ print("...", end="")
68
+
69
+ # Print tail of compile log
70
+ for line in compile_log[-10:]:
71
+ print(line, end="")
72
+
73
+ sys.exit(1)
74
+
75
+
76
+ def check_function(args):
77
+ """Will be followed by a function name and a list of files to check to ensure that the function
78
+ is used by one of those files.
79
+
80
+ Arguments:
81
+ function_name: the function name to check files for usage of
82
+ *files: the files to check for the function name
83
+
84
+ Returns:
85
+ None
86
+ """
87
+ check_test()
88
+ args = args.split()
89
+ function_name = args[0]
90
+ files = args[1:]
91
+
92
+ # Surpress output
93
+ function_popen = subprocess.Popen(
94
+ ["grep", f"[^[:alpha:]]{function_name}[[:space:]]*("] + files,
95
+ stdout=subprocess.PIPE,
96
+ stderr=subprocess.PIPE,
97
+ )
98
+
99
+ function_popen.communicate()
100
+ if function_popen.returncode:
101
+ print(f"Not using {function_name} FAILED")
102
+ else:
103
+ print(f"Used {function_name} PASSED")
104
+
105
+
106
+ def check_not_function(args):
107
+ """Will be followed by a function name and a list of files to check to ensure that the function
108
+ is not used by any of those files.
109
+
110
+ Arguments:
111
+ function_name: the funcion name to check files for usage of
112
+ *files: the files to check for the function name
113
+
114
+ Returns:
115
+ None
116
+ """
117
+ check_test()
118
+ args = args.split()
119
+ function_name = args[0]
120
+ files = args[1:]
121
+
122
+ # Surpress output
123
+ function_popen = subprocess.Popen(
124
+ ["grep", f"[^[:alpha:]]{function_name}[[:space:]]*("] + files,
125
+ stdout=subprocess.PIPE,
126
+ stderr=subprocess.PIPE,
127
+ )
128
+
129
+ function_popen.communicate()
130
+ if function_popen.returncode:
131
+ print(f"used{function_name} PASSED")
132
+ else:
133
+ print(f"not using {function_name} FAILED")
134
+
135
+
136
+ def run_command(command):
137
+ """Will be followed by a command to run.
138
+
139
+ Arguments:
140
+ command: the command to run
141
+
142
+ Returns:
143
+ None
144
+ """
145
+ check_test()
146
+
147
+ # Execute without surpressing output
148
+ print(command)
149
+ command_popen = subprocess.Popen(command, shell=True)
150
+ command_popen.communicate()
151
+
152
+
153
+ def run_command_noerror(command):
154
+ """Will be followed by a command to run, evaluation fails if the command exits with an error.
155
+
156
+ Arguments:
157
+ command: the command to run
158
+
159
+ Returns:
160
+ None
161
+ """
162
+ check_test()
163
+
164
+ # Run as test case
165
+ global test_case_count
166
+ test_case_count += 1
167
+ print(f"Test case count {test_case_count} of {test_case_total}")
168
+
169
+ # Execute without surpressing output
170
+ command_popen = subprocess.Popen(command, shell=True)
171
+ command_popen.communicate()
172
+
173
+ if command_popen.returncode:
174
+ print("FAILED")
175
+ for file in os.listdir("evaluationLogs"):
176
+ with open(file, "r") as infile:
177
+ file_lines = infile.readlines()
178
+ # Print entire file
179
+ print("\n".join(file_lines))
180
+
181
+ # Exit entire program with error
182
+ sys.exit(1)
183
+ else:
184
+ print("PASSED")
185
+
186
+
187
+ def compare(file1, file2):
188
+ """Will be followed by two files to compare.
189
+
190
+ Arguments:
191
+ file1: The first file to compare
192
+ file2: The second file to compare
193
+
194
+ Returns:
195
+ None
196
+ """
197
+ cmps.append(file1)
198
+ cmps.append(file2)
199
+
200
+
201
+ def test_case(test_case_command):
202
+ """Will be followed by the command to run to test the submission.
203
+
204
+ Arguments:
205
+ test_case_command: the command to run the submission
206
+
207
+ Returns:
208
+ None
209
+ """
210
+ check_test()
211
+
212
+ # Clear hint
213
+ global hint
214
+ hint = ""
215
+
216
+ # Set new test case command
217
+ global test_args
218
+ test_args = test_case_command
219
+
220
+ # Increment test cases
221
+ global test_case_count
222
+ test_case_count += 1
223
+
224
+ # Set test case hidden
225
+ global test_case_hidden
226
+ test_case_hidden = False
227
+
228
+
229
+ def test_case_hidden(test_case_command):
230
+ """Will be followed by the command to run to test the submission. Test case is hidden.
231
+
232
+ Arguments:
233
+ test_case_command: the command to run the submission
234
+
235
+ Returns:
236
+ None
237
+ """
238
+ check_test()
239
+
240
+ # Clear hint
241
+ global hint
242
+ hint = ""
243
+
244
+ # Set new test case command
245
+ global test_args
246
+ test_args = test_case_command
247
+
248
+ # Increment test cases
249
+ global test_case_count
250
+ test_case_count += 1
251
+
252
+ # Set hidden test case
253
+ global test_case_hidden
254
+ test_case_hidden = True
255
+
256
+
257
+ def supply_input(inputs):
258
+ """Specifies the input for a test case.
259
+
260
+ Arguments:
261
+ *inputs: inputs to be used for test case
262
+
263
+ Returns:
264
+ None
265
+ """
266
+ with open("fileinput", "a") as outfile:
267
+ outfile.write(inputs)
268
+
269
+
270
+ def supply_input_file(input_file):
271
+ """Specifies the input for a test case read from a file.
272
+
273
+ Arguments:
274
+ input_file: file to get input for test case from
275
+
276
+ Returns:
277
+ None
278
+ """
279
+ with open(input_file, "r") as infile:
280
+ input_lines = infile.readlines()
281
+
282
+ with open("fileinput", "a") as outfile:
283
+ outfile.writelines(input_lines)
284
+
285
+
286
+ def check_output(outputs):
287
+ """Specifies the expected output for a test case.
288
+
289
+ Arguments:
290
+ *outputs: outputs to be used for test case
291
+
292
+ Returns:
293
+ None
294
+ """
295
+
296
+ with open("expectedoutput", "a") as outfile:
297
+ outfile.write(outputs + "\n")
298
+
299
+
300
+ def check_output_file(output_file):
301
+ """Specifies the expected output for a test case read from a file.
302
+
303
+ Arguments:
304
+ output_file: file to get output for test case from
305
+
306
+ Returns:
307
+ None
308
+ """
309
+ with open(output_file, "r") as infile:
310
+ output_lines = infile.readlines()
311
+
312
+ with open("expectedoutput", "a") as outfile:
313
+ outfile.writelines(output_lines)
314
+
315
+
316
+ def check_error(error_output):
317
+ """Specifies the expected error output for a test case.
318
+
319
+ Arguments:
320
+ error_output: expected error output for a test case
321
+
322
+ Returns:
323
+ None
324
+ """
325
+ with open("expectederror", "a") as outfile:
326
+ outfile.write(error_output)
327
+
328
+
329
+ def hint(hints):
330
+ """Hint
331
+
332
+ Arguments:
333
+ *hints: hints to be associated with test case
334
+
335
+ Returns:
336
+ None
337
+ """
338
+ global test_case_hint
339
+ test_case_hint = hints
340
+
341
+
342
+ def timeout(timeout_sec):
343
+ """Specifies the time limit in seconds for a test case to run. Defaults to 20 seconds.
344
+
345
+ Arguments:
346
+ timeout_sec: time limit in seconds for a test case to run
347
+
348
+ Returns:
349
+ None
350
+ """
351
+ global timeout_val
352
+ timeout_val = float(timeout_sec)
353
+
354
+
355
+ def exit_code(test_case_exit_code):
356
+ """Specifies the expected exit code for a test case. Defaults to zero.
357
+
358
+ Arguments:
359
+ test_case_exit_code: the expected exit code for a test case
360
+
361
+ Returns:
362
+ None
363
+ """
364
+ global expected_exit_code
365
+ expected_exit_code = float(test_case_exit_code)
366
+
367
+
368
+ def start_server(timeout_sec, kill_timeout_sec, *server_cmd):
369
+ """Command containing timeout (wait until server starts), kill timeout (wait to kill the server),
370
+ and the command to start a server
371
+
372
+ Arguments:
373
+ timeout_sec: timeout in seconds to wait for server to start
374
+ kill_timeout_sec: timeout in seconds to wait until killing the server
375
+ server_cmd: command to run to start the server
376
+
377
+ Returns:
378
+ None
379
+ """
380
+
381
+ print(
382
+ f'Starting server with command: {" ".join(server_cmd)} and sleeping for: {timeout_sec}. Will kill server '
383
+ f'after {kill_timeout_sec} seconds.'
384
+ )
385
+
386
+ # Send output to compile log in background
387
+ with open("compilelog", "w") as outfile:
388
+ server_popen = subprocess.Popen(
389
+ server_cmd, shell=True, stdout=outfile, stderr=outfile, text=True
390
+ )
391
+
392
+ print(f"Server pid: {server_popen.pid}. Sleeping for {timeout_sec} seconds.")
393
+ # Block for timeout_sec so that server can start
394
+ time.sleep(float(timeout_sec))
395
+
396
+ # Kill the server after the timeout
397
+ def kill_server(pid):
398
+ print(f"Killing {pid}")
399
+ subprocess.Popen(
400
+ ["kill", "-9", f"{pid}"], stdout=subprocess.PIPE, stderr=subprocess.PIPE
401
+ )
402
+
403
+ kill_timer = threading.Timer(
404
+ float(kill_timeout_sec), kill_server, *[server_popen.pid]
405
+ )
406
+ kill_timer.daemon = True
407
+ kill_timer.start()
408
+
409
+
410
+ """
411
+ Here is where the tags are mapped to functions.
412
+ Any tags that are added or changed must be modified here.
413
+ Assume that everything will be passed to these functions as a string, account for
414
+ this in the function itself.
415
+ """
416
+ tag_func_map = {
417
+ "C": compile_code,
418
+ "CF": check_function,
419
+ "NCF": check_not_function,
420
+ "CMD": run_command,
421
+ "TCMD": run_command_noerror,
422
+ "CMP": compare,
423
+ "T": test_case,
424
+ "HT": test_case_hidden,
425
+ "I": supply_input,
426
+ "IF": supply_input_file,
427
+ "O": check_output,
428
+ "OF": check_output_file,
429
+ "E": check_error,
430
+ "HINT": hint,
431
+ "TO": timeout,
432
+ "X": exit_code,
433
+ "SS": start_server,
434
+ }
435
+
436
+
437
+ def setup():
438
+ files = [
439
+ "compilelog",
440
+ "difflog",
441
+ "expectedoutput",
442
+ "expectederror",
443
+ "fileinput",
444
+ "yourerror",
445
+ "youroutput",
446
+ ]
447
+ cleanup()
448
+ # Create files
449
+ for file in files:
450
+ open(file, "w").close()
451
+
452
+
453
+ def parse_tags(tags: list[str]):
454
+ """Given list of strings, parses and executes tags
455
+
456
+ Arguments:
457
+ tags (list[str]): list of tags and arguments to be parsed and executed
458
+
459
+ Returns:
460
+ None
461
+ """
462
+ tag_pattern = r"([A-Z]+) (.*)"
463
+ for tag_line in tags:
464
+ tag_match = re.match(tag_pattern, tag_line)
465
+
466
+ # If line does not match tag format
467
+ if not tag_match:
468
+ continue
469
+
470
+ tag = tag_match.group(1)
471
+ args = tag_match.group(2)
472
+
473
+ # Execute function based on tag-function mapping
474
+ try:
475
+ tag_func_map[tag](args)
476
+ except KeyError:
477
+ # Tag was not found in dictionary
478
+ continue
479
+ except (TypeError, ValueError):
480
+ traceback.print_exc()
481
+ print(f"Invalid arguments for tag {tag} {args}")
482
+
483
+
484
+ def parse_diff(diff_lines: list[str]):
485
+ """Given output from diff command, parse lines into console
486
+
487
+ Arguments:
488
+ parse_diff (list[str]): list of lines as output from diff command
489
+
490
+ Returns:
491
+ None
492
+ """
493
+ os.makedirs("evaluationLogs", exist_ok=True)
494
+ # Directly write into logOfDiff rather than use redirection
495
+ with open("evaluationLogs/logOfDiff", "w") as outfile:
496
+ for line in diff_lines:
497
+ first_word = line.split(" ")[:2]
498
+ first_character = first_word[0]
499
+
500
+ if first_character != "@":
501
+ # Lines present in your output but not present in expected
502
+ if first_character == "-" and first_word[:3] == "---":
503
+ student_output_file = line[3:-37]
504
+ outfile.write(f"Your output file: {student_output_file}")
505
+
506
+ # Lines present in expected output but not in yours
507
+ elif first_character == "+" and first_word[:3] == "+++":
508
+ expected_output_file = line[3:-37]
509
+ outfile.write(f"Expected output file: {expected_output_file}")
510
+
511
+ # Catch rest
512
+ else:
513
+ outfile.write(line)
514
+
515
+
516
+ def check_test():
517
+ global test_args
518
+ if test_args == "":
519
+ return
520
+
521
+ print(f"Test case {test_case_count} of {test_case_total}")
522
+ passed = True
523
+
524
+ with open("fileinput", "r") as fileinput, open(
525
+ "youroutput", "w"
526
+ ) as youroutput, open("yourerror", "w") as yourerror:
527
+ test_exec = subprocess.Popen(
528
+ test_args, shell=True, stdin=fileinput, stdout=youroutput, stderr=yourerror
529
+ )
530
+
531
+ # Timeout handling
532
+ try:
533
+ test_exec.communicate(timeout=timeout_val)
534
+
535
+ except TimeoutError:
536
+ print(f"Took more than {timeout_val} seconds to run. FAIL")
537
+ passed = False
538
+
539
+ # Difflog handling
540
+ with open("difflog", "w") as outfile:
541
+ diff_popen = subprocess.Popen(
542
+ "diff -U1 -a ./youroutput ./expectedoutput | cat -te | head -22",
543
+ shell=True,
544
+ stdout=outfile,
545
+ stderr=outfile,
546
+ text=True,
547
+ )
548
+ diff_popen.communicate()
549
+
550
+ # Append to difflog second time around
551
+ with open("difflog", "a") as outfile:
552
+ diff_popen = subprocess.Popen(
553
+ "diff -U1 -a ./yourerror ./expectederror | cat -te | head -22",
554
+ shell=True,
555
+ stdout=outfile,
556
+ stderr=outfile,
557
+ text=True,
558
+ )
559
+ diff_popen.communicate()
560
+
561
+ # Now read all the lines to accumulate both diffs
562
+ with open("difflog", "r") as infile:
563
+ diff_lines = infile.readlines()
564
+
565
+ if len(diff_lines):
566
+ passed = False
567
+ parse_diff(diff_lines)
568
+
569
+ # Exit code handling
570
+ if expected_exit_code != -1 and test_exec.returncode != expected_exit_code:
571
+ passed = False
572
+ print(
573
+ f" Exit Code failure: expected {expected_exit_code} got {test_exec.returncode}"
574
+ )
575
+
576
+ # Compare files handling, do not surpress output
577
+ for files in cmps:
578
+ cmd_popen = subprocess.Popen(["cmp", files])
579
+ cmd_popen.communicate()
580
+ if cmd_popen.returncode:
581
+ passed = False
582
+ break
583
+
584
+ test_args = ""
585
+
586
+ # Pass fail handling
587
+ if passed:
588
+ global num_passed
589
+ num_passed += 1
590
+ print("Passed")
591
+ else:
592
+ global num_failed
593
+ num_failed += 1
594
+ print("FAILED")
595
+
596
+ # Hidden test case handling
597
+ if test_case_hidden:
598
+ print(" Test Case is Hidden")
599
+ if hint:
600
+ print(f"HINT: {hint}")
601
+ else:
602
+ if hint:
603
+ print(f"HINT: {hint}")
604
+
605
+ # Cleanup
606
+ print(f" Command ran: {test_args}")
607
+ for file in os.listdir("evaluationLogs"):
608
+ with open("evaluationLogs/" + file, "r") as infile:
609
+ file_lines = infile.readlines()
610
+
611
+ # Print entire file
612
+ print("".join(file_lines))
613
+
614
+ cleanup()
615
+
616
+ # Exit program after failed test case
617
+ sys.exit(2)
618
+
619
+ # reinitialize test variables and files here
620
+ setup()
621
+
622
+
623
+ def cleanup():
624
+ files = [
625
+ "compilelog",
626
+ "difflog",
627
+ "expectedoutput",
628
+ "expectederror",
629
+ "fileinput",
630
+ "yourerror",
631
+ "youroutput",
632
+ ]
633
+
634
+ if os.path.exists("evaluationLogs"):
635
+ if os.path.exists("evaluationLogs/logOfDiff"):
636
+ os.remove("evaluationLogs/logOfDiff")
637
+ os.rmdir("evaluationLogs")
638
+
639
+ for name in files:
640
+ if os.path.exists(name):
641
+ os.remove(name)
642
+
643
+
644
+ @click.command()
645
+ @click.argument("codeval_file", type=click.Path(exists=True))
646
+ def run_evaluation(codeval_file):
647
+ start_time_seconds = time.time()
648
+
649
+ setup()
650
+
651
+ # Count test case total
652
+ global test_case_total
653
+ with open(codeval_file, "r") as infile:
654
+ testcases = infile.readlines()
655
+ for testcase in testcases:
656
+ parts = testcase.split(" ", 1)
657
+ tag = parts[0]
658
+ if tag == "T" or tag == "HT":
659
+ test_case_total += 1
660
+ elif tag == "Z":
661
+ filename=parts[1].strip()
662
+ file = os.path.join(os.path.dirname(codeval_file), filename) if filename.startswith("/") else filename
663
+ unzip(file, ".")
664
+
665
+ # Read testcases
666
+ with open(codeval_file, "r") as infile:
667
+ testcases = infile.readlines()
668
+ parse_tags(testcases)
669
+
670
+ check_test()
671
+
672
+ # cleanup
673
+ cleanup()
674
+
675
+ end_time_seconds = time.time()
676
+ print(f"took {end_time_seconds - start_time_seconds} seconds")
@@ -0,0 +1,62 @@
1
+ import os
2
+ import shutil
3
+ import sys
4
+ import subprocess
5
+
6
+ import requests
7
+ import zipfile
8
+ from assignment_codeval.commons import debug, error
9
+
10
+
11
+ def download_attachment(directory, attachment):
12
+ curPath = os.getcwd()
13
+ os.chdir(os.path.join(curPath, directory))
14
+
15
+ fname = attachment['display_name']
16
+ prefix = os.path.splitext(fname)[0]
17
+ suffix = os.path.splitext(fname)[1]
18
+ durl = attachment['url']
19
+ with requests.get(durl) as response:
20
+ if response.status_code != 200:
21
+ error(f'error {response.status_code} fetching {durl}')
22
+ with open(f"{prefix}{suffix}", "wb") as fd:
23
+ for chunk in response.iter_content():
24
+ fd.write(chunk)
25
+
26
+ os.chdir(curPath)
27
+ return os.path.join(directory, fname)
28
+
29
+
30
+ def unzip(filepath, dir, delete=False):
31
+ with zipfile.ZipFile(filepath) as file:
32
+ for zi in file.infolist():
33
+ file.extract(zi.filename, path=dir)
34
+ debug(f"extracting {zi.filename}")
35
+ fname = os.path.join(dir, zi.filename)
36
+ s = os.stat(fname)
37
+ # the user executable bit is set
38
+ perms = (s.st_mode | (zi.external_attr >> 16)) & 0o777
39
+ os.chmod(fname, perms)
40
+
41
+ debug(f"{filepath} extracted to {dir}.")
42
+ if delete:
43
+ os.remove(filepath)
44
+ debug(f"{filepath} deleted.")
45
+
46
+
47
+ def set_acls(temp_dir):
48
+ """Set ACLs for the temporary directory"""
49
+ if sys.platform == 'darwin':
50
+ subprocess.call(["chmod", "-R", "o+rwx", temp_dir])
51
+ else:
52
+ subprocess.call(["setfacl", "-d", "-m", "o::rwx", temp_dir])
53
+
54
+
55
+ def copy_files_to_submission_dir(temp_fixed, temp_dir):
56
+ shutil.copytree(temp_fixed, temp_dir, dirs_exist_ok=True)
57
+ shutil.copy("../../evaluate.sh", f"{temp_dir}/evaluate.sh")
58
+ shutil.copy("evaluate.py", f"{temp_dir}/evaluate.py")
59
+ shutil.copy("../../runvalgrind.sh", f"{temp_dir}/runvalgrind.sh")
60
+ shutil.copy("../../parsediff", f"{temp_dir}/parsediff")
61
+ shutil.copy("../../parsevalgrind", f"{temp_dir}/parsevalgrind")
62
+ shutil.copy("../../checksql.sh", f"{temp_dir}/checksql.sh")
@@ -0,0 +1,63 @@
1
+ import os.path
2
+ import subprocess
3
+ from configparser import ConfigParser
4
+
5
+ import click
6
+
7
+ from assignment_codeval.canvas_utils import get_course, connect_to_canvas
8
+ from assignment_codeval.commons import error, debug
9
+
10
+
11
+ @click.command()
12
+ @click.argument("course_name", metavar="COURSE")
13
+ @click.option("--github-field", help="GitHub field name in canvas profile", default="github", show_default=True)
14
+ def github_setup_repo(course_name, github_field):
15
+ """
16
+ Connect to a GitHub repository for a given course and assignment.
17
+
18
+ COURSE can be a unique substring of the actual course name.
19
+
20
+ GITHUB_PREFIX is the prefix of the GitHub repository name, it should have a form similar to:
21
+
22
+ git@github.com:<gh_classroom_account>/<gh_classroom_assignment>
23
+ """
24
+ canvas, user = connect_to_canvas()
25
+ parser = ConfigParser()
26
+ config_file = click.get_app_dir("codeval.ini")
27
+ parser.read(config_file)
28
+ parser.config_file = config_file
29
+ course = get_course(canvas, course_name, True)
30
+
31
+ gh_key = course.name.replace(":", "").replace("=", "")
32
+ if 'GITHUB' not in parser or gh_key not in parser['GITHUB']:
33
+ error(f"did not find mapping for {gh_key} in GITHUB section of {parser.config_file}.")
34
+ return
35
+
36
+ gh_repo_prefix = parser['GITHUB'][gh_key]
37
+
38
+ users = course.get_users(include=["enrollments"])
39
+ for user in users:
40
+ ssid = user.login_id
41
+ os.makedirs(ssid, exist_ok=True)
42
+ with open(f"{ssid}/gh_result.txt", "w") as fd:
43
+ profile = user.get_profile(include=["links", "link"])
44
+ gh_links = None
45
+ if 'links' in profile:
46
+ gh_links = [m['url'] for m in profile['links'] if m['title'].lower() == github_field.lower()]
47
+ if not gh_links:
48
+ print(f"no {github_field} link found in canvas profile", file=fd)
49
+ continue
50
+ if len(gh_links) != 1:
51
+ print(f"multiple {github_field} links found in canvas profile", file=fd)
52
+ continue
53
+ gh_url = gh_links[0]
54
+ gh_id = gh_url.rstrip('/').rsplit('/', 1)[-1]
55
+ repo_url = f"{gh_repo_prefix}-{gh_id}.git"
56
+ if os.path.exists(f'{ssid}/repo'):
57
+ print(f"pulling {repo_url}", file=fd)
58
+ rc = subprocess.run(['git', 'pull'], cwd=f'{ssid}/repo', stdout=fd, stderr=subprocess.STDOUT)
59
+ else:
60
+ print(f"cloning {repo_url}", file=fd)
61
+ rc = subprocess.run(['git', 'clone', repo_url, f'{ssid}/repo'], stdout=fd, stderr=subprocess.STDOUT)
62
+ if rc.returncode != 0:
63
+ error(f"error {rc.returncode} connecting to github repo for {ssid} using {repo_url}")
@@ -0,0 +1,156 @@
1
+ import os
2
+ import re
3
+ import sys
4
+ from datetime import datetime, timezone
5
+ from functools import cache
6
+
7
+ import click
8
+ import requests
9
+
10
+ from assignment_codeval.canvas_utils import connect_to_canvas, get_course
11
+ from assignment_codeval.commons import debug, error, info, warn
12
+
13
+
14
+ @click.command()
15
+ @click.argument("submissions_dir", metavar="SUBMISSIONS_DIR")
16
+ @click.option("--codeval-prefix", help="prefix for codeval comments", default="codeval: ", show_default=True)
17
+ def upload_submission_comments(submissions_dir, codeval_prefix):
18
+ """
19
+ Upload comments for submissions from a directory.
20
+
21
+ the submissions_dir specifies a directory that has comments to upload stored as:
22
+
23
+ COURSE/ASSIGNMENT/STUDENT_ID/comments.txt
24
+
25
+ if the file comments.txt.sent exts, the comment has already been uploaded and will be skipped.
26
+ """
27
+ (canvas, user) = connect_to_canvas()
28
+ clean_submissions_dir = submissions_dir.rstrip('/')
29
+ for dirpath, dirnames, filenames in os.walk(clean_submissions_dir):
30
+ match = re.match(fr'^{clean_submissions_dir}/([^/]+)/([^/]+)/([^/]+)$', dirpath)
31
+ if match:
32
+ course_name = match.group(1)
33
+ assignment_name = match.group(2)
34
+ student_id = match.group(3)
35
+ if "comments.txt" in filenames:
36
+ if "comments.txt.sent" in filenames:
37
+ info(f"skipping already uploaded comments for {student_id} in {course_name}: {assignment_name}")
38
+ else:
39
+ info(f"uploading comments for {student_id} in {course_name}: {assignment_name}")
40
+ course = get_course(canvas, course_name)
41
+ assignment = get_assignment(course, assignment_name)
42
+ with open(f"{dirpath}/comments.txt", "r") as fd:
43
+ comment = fd.read()
44
+ # nulls seem to be very problematic for canvas
45
+ comment = comment.replace("\0", "\\0")
46
+ comment.strip()
47
+ submission = get_submissions_by_id(assignment).get(student_id)
48
+ if submission:
49
+ submission.edit(comment={'text_comment': f'{codeval_prefix} {comment}'})
50
+ else:
51
+ warn(f"no submission found for {student_id} in {course_name}: {assignment_name}")
52
+ with open(f"{dirpath}/comments.txt.sent", "w") as fd:
53
+ fd.write(datetime.now(timezone.utc).strftime('%Y-%m-%dT%H:%M:%SZ'))
54
+
55
+
56
+ @click.command()
57
+ @click.argument("course_name", metavar="COURSE")
58
+ @click.argument("assignment_name", metavar="ASSIGNMENT")
59
+ @click.option("--target-dir", help="directory to download submissions to", default='./submissions', show_default=True)
60
+ @click.option("--only-uncommented", is_flag=True, help="only download submissions without codeval comments since last submission")
61
+ @click.option("--codeval-prefix", help="prefix for codeval comments", default="codeval: ", show_default=True)
62
+ @click.option("--include-empty", is_flag=True, help="include empty submissions")
63
+ def download_submissions(course_name, assignment_name, target_dir, only_uncommented, codeval_prefix, include_empty):
64
+ """
65
+ Download submissions for a given assignment in a course from Canvas.
66
+
67
+ the COURSE and ASSIGNMENT arguments can be partial names.
68
+ """
69
+ (canvas, user) = connect_to_canvas()
70
+
71
+ course = get_course(canvas, course_name)
72
+ assignment = get_assignment(course, assignment_name)
73
+ submission_dir = os.path.join(target_dir, course.name, assignment.name)
74
+ os.makedirs(submission_dir, exist_ok=True)
75
+
76
+ for submission in assignment.get_submissions(include=["submission_comments", "user"]):
77
+ if not submission.attempt and not include_empty:
78
+ continue
79
+ submission_comments = [c['created_at'] for c in submission.submission_comments if
80
+ 'comment' in c and c['comment'].startswith(codeval_prefix)]
81
+ submission_comments.sort()
82
+ if submission_comments:
83
+ last_comment_date = submission_comments[-1]
84
+ else:
85
+ last_comment_date = None
86
+ if only_uncommented and last_comment_date and submission.submitted_at <= last_comment_date:
87
+ continue
88
+
89
+ student_id = submission.user['login_id']
90
+ student_submission_dir = os.path.join(submission_dir, student_id)
91
+ os.makedirs(student_submission_dir, exist_ok=True)
92
+
93
+ metapath = os.path.join(student_submission_dir, "metadata.txt")
94
+ with open(metapath, "w") as fd:
95
+ print(f"""id={student_id}
96
+ course={course.name}
97
+ assignment={assignment.name}
98
+ attempt={submission.attempt}
99
+ late={submission.late}
100
+ date={submission.submitted_at}
101
+ last_comment={last_comment_date}""", file=fd)
102
+ body = submission.body
103
+ if body:
104
+ filepath = os.path.join(student_submission_dir, "content.txt")
105
+ with open(filepath, "w") as fd:
106
+ fd.write(body)
107
+ debug(f"Downloaded content for student {student_id} to {filepath}")
108
+
109
+ if hasattr(submission, "attachment"):
110
+ attachment = submission.attachment
111
+ fname = attachment['display_name']
112
+ prefix = os.path.splitext(fname)[0]
113
+ suffix = os.path.splitext(fname)[1]
114
+ durl = attachment['url']
115
+ filepath = os.path.join(student_submission_dir, f"{prefix}{suffix}")
116
+
117
+ with requests.get(durl) as response:
118
+ if response.status_code != 200:
119
+ error(f'error {response.status_code} fetching {durl}')
120
+ with open(filepath, "wb") as fd:
121
+ for chunk in response.iter_content():
122
+ fd.write(chunk)
123
+
124
+ debug(f"Downloaded submission for student {student_id} to {filepath}")
125
+
126
+ return submission_dir
127
+
128
+
129
+ @cache
130
+ def get_assignment(course, assignment_name):
131
+ assignments = [a for a in course.get_assignments() if assignment_name.lower() in a.name.lower()]
132
+ if len(assignments) == 0:
133
+ error(f'no assignments found that contain {assignment_name}. options are:')
134
+ for a in course.get_assignments():
135
+ error(fr" {a.name}")
136
+ sys.exit(2)
137
+ elif len(assignments) > 1:
138
+ strict_name_assignments = [a for a in assignments if a.name == assignment_name]
139
+ if len(strict_name_assignments) == 1:
140
+ assignments = strict_name_assignments
141
+ else:
142
+ error(f"multiple assignments found for {assignment_name}: {[a.name for a in assignments]}")
143
+ for a in assignments:
144
+ error(f" {a.name}")
145
+ sys.exit(2)
146
+ assignment = assignments[0]
147
+ return assignment
148
+
149
+
150
+ @cache
151
+ def get_submissions_by_id(assignment):
152
+ submissions_by_id = {}
153
+ for submission in assignment.get_submissions(include=["user"]):
154
+ student_id = submission.user['login_id']
155
+ submissions_by_id[student_id] = submission
156
+ return submissions_by_id
@@ -0,0 +1,241 @@
1
+ Metadata-Version: 2.4
2
+ Name: assignment-codeval
3
+ Version: 0.0.1
4
+ Summary: CodEval for evaluating programming assignments
5
+ Requires-Python: >=3.7
6
+ Description-Content-Type: text/markdown
7
+ Requires-Dist: canvasapi==3.3.0
8
+ Requires-Dist: certifi==2021.10.8
9
+ Requires-Dist: charset-normalizer==2.0.9
10
+ Requires-Dist: click==8.2.1
11
+ Requires-Dist: configparser==5.2.0
12
+ Requires-Dist: idna==3.3
13
+ Requires-Dist: pytz==2021.3
14
+ Requires-Dist: requests==2.27.0
15
+ Requires-Dist: urllib3==1.26.7
16
+ Requires-Dist: pymongo==4.3.3
17
+ Requires-Dist: markdown==3.4.1
18
+
19
+ # CodEval
20
+
21
+ Currently CodEval has 3 main components:
22
+ ## 1. Test Simple I/O Programming Assignments on Canvas
23
+ ### codeval.ini contents
24
+ ```
25
+ [SERVER]
26
+ url=<canvas API>
27
+ token=<canvas token>
28
+ [RUN]
29
+ precommand=
30
+ command=
31
+ ```
32
+
33
+ Refer to a sample codeval.ini file [here](samples/codeval.ini)
34
+
35
+ ### Command to run:
36
+ `python3 codeval.py grade-submissions <a unique part of course name> [FLAGS]`
37
+ Example:
38
+ If the course name on Canvas is CS 149 - Operating Systems, the command can be:
39
+ `python3 codeval.py CS\ 149`
40
+ or
41
+ `python3 codeval.py "Operating Systems"`
42
+ Use a part of the course name that can uniquely identify the course on Canvas.
43
+
44
+ ### Flags
45
+ - **--dry-run/--no-dry-run** (Optional)
46
+ - Default: --dry-run
47
+ - Do not update the results on Canvas. Print the results to the terminal instead.
48
+ - **--verbose/--no-verbose** (Optional)
49
+ - Default: --no-verbose
50
+ - Show detailed logs
51
+ - **--force/--no-force** (Optional)
52
+ - Default: --no-force
53
+ - Grade submissions even if already graded
54
+ - **--copytmpdir/--no-copytmpdir** (Optional)
55
+ - Default: --no-copytmpdir
56
+ - Copy temporary directory content to current directory for debugging
57
+
58
+ ### Specification Tags
59
+ Tags used in a spec file (\<course name>.codeval)
60
+
61
+ | Tag | Meaning | Function |
62
+ |---|---|---|
63
+ | C | Compile Code | Specifies the command to compile the submission code |
64
+ | CTO | Compile Timeout | Timeout in seconds for the compile command to run |
65
+ | RUN | Run Script | Specifies the script to use to evaluate the specification file. Defaults to evaluate.sh. |
66
+ | Z | Download Zip | Will be followed by zip files to download from Canvas to use when running the test cases. |
67
+ | CF | Check Function | Will be followed by a function name and a list of files to check to ensure that the function is used by one of those files. |
68
+ | CMD/TCMD | Run Command | Will be followed by a command to run. The TCMD will cause the evaluation to fail if the command exits with an error. |
69
+ | CMP | Compare | Will be followed by two files to compare. |
70
+ | T/HT | Test Case | Will be followed by the command to run to test the submission. |
71
+ | I/IF | Supply Input | Specifies the input for a test case. The IF version will read the input from a file. |
72
+ | O/OF | Check Output | Specifies the expected output for a test case. The OF version will read from a file. |
73
+ | E | Check Error | Specifies the expected error output for a test case. |
74
+ | TO | Timeout | Specifies the time limit in seconds for a test case to run. Defaults to 20 seconds. |
75
+ | X | Exit Code | Specifies the expected exit code for a test case. Defaults to zero. |
76
+ | SS | Start Server | Command containing timeout (wait until server starts), kill timeout (wait to kill the server), and the command to start a server |
77
+
78
+ Refer to a sample spec file [here](samples/assignment-name.codeval)
79
+
80
+ ## 2. Test Distributed Programming Assignments
81
+ ### (or complex non I/O programs)
82
+ ### codeval.ini contents
83
+ ```
84
+ [SERVER]
85
+ url=<canvas API>
86
+ token=<canvas token>
87
+ [RUN]
88
+ precommand=
89
+ command=
90
+ dist_command=
91
+ host_ip=
92
+ [MONGO]
93
+ url=
94
+ db=
95
+ ```
96
+
97
+ Refer to a sample codeval.ini file [here](samples/codeval.ini)
98
+
99
+ ### Command to run
100
+ is the same as the [command in #1](#command-to-run):
101
+ `python3 codeval.py grade-submissions <a unique part of course name> [FLAGS]`
102
+
103
+ ### Distributed Specification Tags
104
+
105
+ | Tag | Meaning | Function |
106
+ |---|---|---|
107
+ | --DT-- | Distributed Tests Begin | Marks the beginning of distributed tests. Is used to determine if the spec file has distributed tests |
108
+ | GTO | Global timeout | A total timeout for all distributed tests, for each of homogenous and heterogenous tests. Homogenous tests = GTO value. Heterogenous tests = 2 * GTO value |
109
+ | PORTS | Exposed ports count | Maximum number of ports needed to expose per docker container |
110
+ | ECMD/ECMDT SYNC/ASYNC | External Command | Command that runs in the a controller container, emulating a host machine. ECMDT: Evaluation fails if command returns an error. SYNC: CodEval waits for command to execute or fail. ASYNC: CodEval doesn't wait for command to execute, failure is checked if ECMDT |
111
+ | DTC $int [HOM] [HET] | Distributed Test Config Group | Signifies the start of a new group of Distributed tests. Replace $int with the number of containers that needs to be started for the test group. HOM denotes homogenous tests, i.e., user's own submissions will be executed in the contianers. HET denotes heterogenous tests, i.e., a combination of $int - 1 other users' and current user's submissions will be executed in the containers. Can enter either HOM or HET or both |
112
+ | ICMD/ICMDT SYNC/ASYNC */n1,n2,n3... | Internal Command | Command that runs in each of the containers. ICMDT: Evaluation fails if command returns an error. SYNC: wait for command to execute or fail. ASYNC: Don't wait for command to execute, failure is checked if ICMDT *: run command in all the containers. n1,n2..nx: Run command in containers indexed n1,n2..nx only. Containers follow zero-based indexing |
113
+ | TESTCMD | Test Command | Command run on the host machine to validate the submission(s) |
114
+ | --DTCLEAN-- | Cleanup Commands | Commands to execute after the tests have completed or failed. Can contain only ECMD or ECMDT |
115
+
116
+ ### Special placeholders in commands
117
+ | Placeholder | Usage |
118
+ | --- | --- |
119
+ | TEMP_DIR | used in ECMD/ECMDT to be replaced by the temporary directory generated by CodEval during execution |
120
+ | HOST_IP | used in ECMD/ECMDT/ICMD/ICMDT to be replaced by the host's IP specified in codeval.ini |
121
+ | USERNAME | used in ICMD/ICMDT to be replaced by the user's username whose submission is being evaluated |
122
+ | PORT_$int | used in ICMD/ICMDT to be replaced by a port number assigned to the running docker continer. $int needs to be < PORT value in the specification |
123
+
124
+ Refer to a sample spec file [here](samples/assignment-name.codeval)
125
+
126
+ ### Notes
127
+ - The config file `codeval.ini` needs to contain the extra entries only if the tag `--DT--` exists in the specification file
128
+ - Distributed tests need a running mongodb service to persists the progress of students running heterogenous tests
129
+
130
+
131
+ ## 3. Test SQL Assignments
132
+ ### codeval.ini contents
133
+ ```
134
+ [SERVER]
135
+ url=<canvas API>
136
+ token=<canvas token>
137
+ [RUN]
138
+ precommand=
139
+ command=
140
+ dist_command=
141
+ host_ip=
142
+ sql_command=
143
+ ```
144
+
145
+ Refer to a sample codeval.ini file [here](SQL/samples/codeval.ini)
146
+
147
+ ### Command to run
148
+ is the same as the [command in #1](#command-to-run):
149
+ `python3 codeval.py grade-submissions <a unique part of course name> [FLAGS]`
150
+
151
+ ### SQL Specification Tags
152
+
153
+ | Tag | Meaning | Function |
154
+ |------------------|-------------------------|----------------------------------------------------------------------------------------------|
155
+ | --SQL-- | SQL Tests Begin | Marks the beginning of SQL tests. Is used to determine if the spec file has SQL based tests |
156
+ | INSERT | Insert rows in DB | Insert rows in the SQL database using files/ individual insert queries. |
157
+ | CONDITIONPRESENT | Check condition in file | Validate submission files for a required condition to be present in submissions. |
158
+ | SCHEMACHECK | External Command | Validate submission files for database related checks like constraints. |
159
+ | TSQL | SQL Test | Marks the SQL test, take input as a file or individual query and run it on submission files. |
160
+
161
+ Refer to a sample spec file [here](SQL/samples/ASSIGNMENT:CREATE.codeval)
162
+
163
+ ### Notes
164
+ - The config file `codeval.ini` needs to contain the extra entries only if the tag `--SQL--` exists in the specification file
165
+ - SQL tests need a separate container image to run SQL tests in MYSQL.
166
+
167
+
168
+ ## 4. Create an assignment on Canvas
169
+
170
+ ### Command to create the assignment:
171
+ **Syntax:** `python3 codeval.py create-assignment <course_name> <specification_file> [ --dry-run/--no-dry-run ] [ --verbose/--no-verbose ] [ --group_name ]`
172
+ **Example:** `python3 codeval.py create-assignment "Practice1" 'a_big_bag_of_strings.txt' --no-dry-run --verbose --group_name "exam 2"`
173
+
174
+ ### Command to grade the assignment:
175
+ **Syntax:** `python3 codeval.py grade-submissions <course_name> [ --dry-run/--no-dry-run ] [ --verbose/--no-verbose ] [ --force/--no-force][--copytmpdir/--no-copytmpdir]`
176
+ **Example:** `python3 codeval.py grade-submissions "Practice1" --no-dry-run --force --verbose`
177
+
178
+ **New tags introduced are :**
179
+
180
+ CRT_HW START <Assignment_name>
181
+
182
+ CRT_HW END
183
+
184
+ DISCSN_URL
185
+
186
+ EXMPLS <no_of_test_cases>
187
+
188
+ URL_OF_HW "file_name"
189
+
190
+ ### MODIFICATIONS REQUIRED IN THE SPECIFICATION FILE.
191
+ 1) Start the specification file with the tag CRT_HW START followed by a space followed by the name of assignment.
192
+ ``` For ex: CRT_HW START Hello World```
193
+ 2) The following lines after the first line will contain the description of the assignment in Markdown format.
194
+ 3) The description ends with the last line containing just the tag CRT_HW END .
195
+ ``` For ex: CRT_HW END ```
196
+ 4) After this tag, the content for grading the submission begins.
197
+
198
+ Addition of the Discussion Topic in the assignment description.
199
+ 1) Insert the tag DISCUSSION_LINK wherever you want the corresponding discussion topic's link to appear.
200
+ ```For ex: To access the discussion topic for this assignment you go here DISCUSSION_LINK```
201
+
202
+ #### Addition of sample examples in the assignment description.
203
+ 1) Insert the tag EXMPLS followed by single space followed by the value.
204
+ Here value is the number of test cases to be displayed as sample examples.
205
+ At maximum it will print all the non hidden test cases.
206
+ For ex: EXMPLS 5
207
+ #### Addition of the links to the files uploaded in the Codeval folder in the assignment description.
208
+ 1) In order to add hyperlink to a file the markdown format is as follows:
209
+ [file_name_to_be_displayed](Url_of_the_file)
210
+ Here in the parenthesis where the Url is required,insert the tag
211
+ URL_OF_HW followed by space followed by the file name of the file required to be linked in double quotes.
212
+ For ex: URL_OF_HW "file name.extension"
213
+ Note: The file should be present in the Codeval folder.
214
+
215
+ ### UPLOAD THE REQUIRED FILES IN CODEVAL FOLDER IN FILES SECTION.
216
+ 1) Create a folder called `assignmentFiles` which should conatin all the necessary files including
217
+ the specification file.
218
+
219
+ ### EXAMPLE OF THE SPECIFICATION FILE.
220
+
221
+ CRT_HW START Bag Of Strings
222
+ # Description
223
+ ## Problem Statement
224
+ - This Is An Example For The Description Of The Assignment In Markdown.
225
+ - To Download The File [Hello_World](URL_OF_HW "Helloworld.Txt")
226
+
227
+ ## Sample Examples
228
+ EXMPLS 3
229
+
230
+ ## Discussion Topic
231
+ Here Is The Link To The Discussion Topic: DISCSN_URL
232
+
233
+ ### Rubric
234
+ | Cases | Points|
235
+ | ----- |----- |
236
+ | Base Points | 50 |
237
+
238
+ CRT_HW END
239
+
240
+ C cc -o bigbag --std=gnu11 bigbag.c
241
+
@@ -0,0 +1,13 @@
1
+ assignment_codeval/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
+ assignment_codeval/canvas_utils.py,sha256=LpNOYznMagA6t9_Dm8-liSgcDQE2BZpR_RdCazS2nco,2536
3
+ assignment_codeval/cli.py,sha256=lfToqPg6s4txTKvFHhLTiSgp5QozgHtjojjmz22MiFc,448
4
+ assignment_codeval/commons.py,sha256=iVCayl1AVeoJBRkF2ye2037qoM457eEdRd54F51ePlM,1209
5
+ assignment_codeval/evaluate.py,sha256=CptkqpZXiy_mVLN_mt5pgGhEf2En7t9RwBVwSeQvyBc,17024
6
+ assignment_codeval/file_utils.py,sha256=9wHuZACuU-1oMQqmwfFbN95WExVO2UPui7m_y9g9my8,2081
7
+ assignment_codeval/github_connect.py,sha256=Xhgee4b-a-3iVpy0TKmXalJdhOW3bkeWnSgaAn9LN_U,2688
8
+ assignment_codeval/submissions.py,sha256=_I1ylksbzrAfTfCT3KxjSMivmHdPE40SrABOykOTEN0,6968
9
+ assignment_codeval-0.0.1.dist-info/METADATA,sha256=n0knrTyC_QfkEsHTE-7mLUOE4Ty7HWkC12bysfZ5COk,11328
10
+ assignment_codeval-0.0.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
11
+ assignment_codeval-0.0.1.dist-info/entry_points.txt,sha256=Pk0eAhsXBpCsgkv6HYLSHTRf5x_zUf-Lg8KK9KsK2B8,66
12
+ assignment_codeval-0.0.1.dist-info/top_level.txt,sha256=a_P8W2IWjAELxKlTV2t4rLwWVoTxElxUMvWgWjR_1n4,19
13
+ assignment_codeval-0.0.1.dist-info/RECORD,,
@@ -0,0 +1,5 @@
1
+ Wheel-Version: 1.0
2
+ Generator: setuptools (80.9.0)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+
@@ -0,0 +1,2 @@
1
+ [console_scripts]
2
+ assignment-codeval = assignment_codeval.cli:cli
@@ -0,0 +1 @@
1
+ assignment_codeval