QuizGenerator 0.7.1__py3-none-any.whl → 0.8.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- QuizGenerator/contentast.py +48 -15
- QuizGenerator/generate.py +2 -1
- QuizGenerator/mixins.py +14 -100
- QuizGenerator/premade_questions/basic.py +24 -29
- QuizGenerator/premade_questions/cst334/languages.py +100 -99
- QuizGenerator/premade_questions/cst334/math_questions.py +112 -122
- QuizGenerator/premade_questions/cst334/memory_questions.py +621 -621
- QuizGenerator/premade_questions/cst334/persistence_questions.py +137 -163
- QuizGenerator/premade_questions/cst334/process.py +312 -328
- QuizGenerator/premade_questions/cst463/gradient_descent/gradient_calculation.py +34 -35
- QuizGenerator/premade_questions/cst463/gradient_descent/gradient_descent_questions.py +41 -36
- QuizGenerator/premade_questions/cst463/gradient_descent/loss_calculations.py +48 -41
- QuizGenerator/premade_questions/cst463/math_and_data/matrix_questions.py +285 -521
- QuizGenerator/premade_questions/cst463/math_and_data/vector_questions.py +149 -126
- QuizGenerator/premade_questions/cst463/models/attention.py +44 -50
- QuizGenerator/premade_questions/cst463/models/cnns.py +43 -47
- QuizGenerator/premade_questions/cst463/models/matrices.py +61 -11
- QuizGenerator/premade_questions/cst463/models/rnns.py +48 -50
- QuizGenerator/premade_questions/cst463/models/text.py +65 -67
- QuizGenerator/premade_questions/cst463/models/weight_counting.py +47 -46
- QuizGenerator/premade_questions/cst463/neural-network-basics/neural_network_questions.py +100 -156
- QuizGenerator/premade_questions/cst463/tensorflow-intro/tensorflow_questions.py +93 -141
- QuizGenerator/question.py +310 -202
- QuizGenerator/quiz.py +8 -5
- QuizGenerator/regenerate.py +14 -6
- {quizgenerator-0.7.1.dist-info → quizgenerator-0.8.1.dist-info}/METADATA +30 -2
- {quizgenerator-0.7.1.dist-info → quizgenerator-0.8.1.dist-info}/RECORD +30 -30
- {quizgenerator-0.7.1.dist-info → quizgenerator-0.8.1.dist-info}/WHEEL +0 -0
- {quizgenerator-0.7.1.dist-info → quizgenerator-0.8.1.dist-info}/entry_points.txt +0 -0
- {quizgenerator-0.7.1.dist-info → quizgenerator-0.8.1.dist-info}/licenses/LICENSE +0 -0
|
@@ -8,6 +8,7 @@ import enum
|
|
|
8
8
|
import io
|
|
9
9
|
import logging
|
|
10
10
|
import os
|
|
11
|
+
import random
|
|
11
12
|
import uuid
|
|
12
13
|
from typing import List
|
|
13
14
|
|
|
@@ -119,7 +120,8 @@ class SchedulingQuestion(ProcessQuestion, RegenerableChoiceMixin, TableQuestionM
|
|
|
119
120
|
def has_started(self) -> bool:
|
|
120
121
|
return self.response_time is None
|
|
121
122
|
|
|
122
|
-
|
|
123
|
+
@classmethod
|
|
124
|
+
def get_workload(cls, rng, num_jobs, *args, **kwargs) -> List[SchedulingQuestion.Job]:
|
|
123
125
|
"""Makes a guaranteed interesting workload by following rules
|
|
124
126
|
1. First job to arrive is the longest
|
|
125
127
|
2. At least 2 other jobs arrive in its runtime
|
|
@@ -132,9 +134,9 @@ class SchedulingQuestion(ProcessQuestion, RegenerableChoiceMixin, TableQuestionM
|
|
|
132
134
|
|
|
133
135
|
# First create a job that is relatively long-running and arrives first.
|
|
134
136
|
# Set arrival time to something fairly low
|
|
135
|
-
job0_arrival =
|
|
137
|
+
job0_arrival = rng.randint(0, int(0.25 * cls.MAX_ARRIVAL_TIME))
|
|
136
138
|
# Set duration to something fairly long
|
|
137
|
-
job0_duration =
|
|
139
|
+
job0_duration = rng.randint(int(cls.MAX_JOB_DURATION * 0.75), cls.MAX_JOB_DURATION)
|
|
138
140
|
|
|
139
141
|
# Next, let's create a job that will test whether we are preemptive or not.
|
|
140
142
|
# The core characteristics of this job are that it:
|
|
@@ -146,24 +148,24 @@ class SchedulingQuestion(ProcessQuestion, RegenerableChoiceMixin, TableQuestionM
|
|
|
146
148
|
# duration:
|
|
147
149
|
# lower: self.MIN_JOB_DURATION
|
|
148
150
|
# upper:
|
|
149
|
-
job1_arrival =
|
|
151
|
+
job1_arrival = rng.randint(
|
|
150
152
|
job0_arrival + 1, # Make sure we start _after_ job0
|
|
151
|
-
job0_arrival + job0_duration -
|
|
153
|
+
job0_arrival + job0_duration - cls.MIN_JOB_DURATION - 2 # Make sure we always have enough time for job1 & job2
|
|
152
154
|
)
|
|
153
|
-
job1_duration =
|
|
154
|
-
|
|
155
|
+
job1_duration = rng.randint(
|
|
156
|
+
cls.MIN_JOB_DURATION + 1, # default minimum and leave room for job2
|
|
155
157
|
job0_arrival + job0_duration - job1_arrival - 1 # Make sure our job ends _at least_ before job0 would end
|
|
156
158
|
)
|
|
157
159
|
|
|
158
160
|
# Finally, we want to differentiate between STCF and SJF
|
|
159
161
|
# So, if we don't preempt job0 we want to make it be a tough choice between the next 2 jobs when it completes.
|
|
160
162
|
# This means we want a job that arrives _before_ job0 finishes, after job1 enters, and is shorter than job1
|
|
161
|
-
job2_arrival =
|
|
163
|
+
job2_arrival = rng.randint(
|
|
162
164
|
job1_arrival + 1, # Make sure we arrive after job1 so we subvert FIFO
|
|
163
165
|
job0_arrival + job0_duration - 1 # ...but before job0 would exit the system
|
|
164
166
|
)
|
|
165
|
-
job2_duration =
|
|
166
|
-
|
|
167
|
+
job2_duration = rng.randint(
|
|
168
|
+
cls.MIN_JOB_DURATION, # Make sure it's at least the minimum.
|
|
167
169
|
job1_duration - 1, # Make sure it's shorter than job1
|
|
168
170
|
)
|
|
169
171
|
|
|
@@ -177,12 +179,12 @@ class SchedulingQuestion(ProcessQuestion, RegenerableChoiceMixin, TableQuestionM
|
|
|
177
179
|
# Add more jobs as necessary, if more than 3 are requested
|
|
178
180
|
if num_jobs > 3:
|
|
179
181
|
job_tuples.extend([
|
|
180
|
-
(
|
|
182
|
+
(rng.randint(0, cls.MAX_ARRIVAL_TIME), rng.randint(cls.MIN_JOB_DURATION, cls.MAX_JOB_DURATION))
|
|
181
183
|
for _ in range(num_jobs - 3)
|
|
182
184
|
])
|
|
183
185
|
|
|
184
186
|
# Shuffle jobs so they are in a random order
|
|
185
|
-
|
|
187
|
+
rng.shuffle(job_tuples)
|
|
186
188
|
|
|
187
189
|
# Make workload from job tuples
|
|
188
190
|
workload = []
|
|
@@ -197,30 +199,38 @@ class SchedulingQuestion(ProcessQuestion, RegenerableChoiceMixin, TableQuestionM
|
|
|
197
199
|
|
|
198
200
|
return workload
|
|
199
201
|
|
|
200
|
-
|
|
202
|
+
@classmethod
|
|
203
|
+
def run_simulation(
|
|
204
|
+
cls,
|
|
205
|
+
jobs_to_run: List[SchedulingQuestion.Job],
|
|
206
|
+
selector,
|
|
207
|
+
preemptable,
|
|
208
|
+
time_quantum=None,
|
|
209
|
+
scheduler_algorithm=None
|
|
210
|
+
):
|
|
201
211
|
curr_time = 0
|
|
202
212
|
selected_job: SchedulingQuestion.Job | None = None
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
213
|
+
|
|
214
|
+
timeline = collections.defaultdict(list)
|
|
215
|
+
timeline[curr_time].append("Simulation Start")
|
|
206
216
|
for job in jobs_to_run:
|
|
207
|
-
|
|
208
|
-
|
|
217
|
+
timeline[job.arrival_time].append(f"Job{job.job_id} arrived")
|
|
218
|
+
|
|
209
219
|
while len(jobs_to_run) > 0:
|
|
210
220
|
possible_time_slices = []
|
|
211
|
-
|
|
221
|
+
|
|
212
222
|
# Get the jobs currently in the system
|
|
213
223
|
available_jobs = list(filter(
|
|
214
224
|
(lambda j: j.arrival_time <= curr_time),
|
|
215
225
|
jobs_to_run
|
|
216
226
|
))
|
|
217
|
-
|
|
227
|
+
|
|
218
228
|
# Get the jobs that will enter the system in the future
|
|
219
229
|
future_jobs : List[SchedulingQuestion.Job] = list(filter(
|
|
220
230
|
(lambda j: j.arrival_time > curr_time),
|
|
221
231
|
jobs_to_run
|
|
222
232
|
))
|
|
223
|
-
|
|
233
|
+
|
|
224
234
|
# Check whether there are jobs in the system already
|
|
225
235
|
if len(available_jobs) > 0:
|
|
226
236
|
# Use the selector to identify what job we are going to run
|
|
@@ -229,13 +239,16 @@ class SchedulingQuestion(ProcessQuestion, RegenerableChoiceMixin, TableQuestionM
|
|
|
229
239
|
key=(lambda j: selector(j, curr_time))
|
|
230
240
|
)
|
|
231
241
|
if selected_job.has_started():
|
|
232
|
-
|
|
242
|
+
timeline[curr_time].append(
|
|
243
|
+
f"Starting Job{selected_job.job_id} "
|
|
244
|
+
f"(resp = {curr_time - selected_job.arrival_time:0.{cls.ROUNDING_DIGITS}f}s)"
|
|
245
|
+
)
|
|
233
246
|
# We start the job that we selected
|
|
234
|
-
selected_job.run(curr_time, (
|
|
235
|
-
|
|
247
|
+
selected_job.run(curr_time, (scheduler_algorithm == cls.Kind.RoundRobin))
|
|
248
|
+
|
|
236
249
|
# We could run to the end of the job
|
|
237
250
|
possible_time_slices.append(selected_job.time_remaining(curr_time))
|
|
238
|
-
|
|
251
|
+
|
|
239
252
|
# Check if we are preemptable or if we haven't found any time slices yet
|
|
240
253
|
if preemptable or len(possible_time_slices) == 0:
|
|
241
254
|
# Then when a job enters we could stop the current task
|
|
@@ -244,32 +257,37 @@ class SchedulingQuestion(ProcessQuestion, RegenerableChoiceMixin, TableQuestionM
|
|
|
244
257
|
future_jobs,
|
|
245
258
|
key=(lambda j: j.arrival_time)
|
|
246
259
|
)
|
|
247
|
-
possible_time_slices.append(
|
|
248
|
-
|
|
260
|
+
possible_time_slices.append((next_arrival.arrival_time - curr_time))
|
|
261
|
+
|
|
249
262
|
if time_quantum is not None:
|
|
250
263
|
possible_time_slices.append(time_quantum)
|
|
251
|
-
|
|
252
|
-
|
|
264
|
+
|
|
253
265
|
## Now we pick the minimum
|
|
254
266
|
try:
|
|
255
267
|
next_time_slice = min(possible_time_slices)
|
|
256
268
|
except ValueError:
|
|
257
269
|
log.error("No jobs available to schedule")
|
|
258
270
|
break
|
|
259
|
-
if
|
|
271
|
+
if scheduler_algorithm != SchedulingQuestion.Kind.RoundRobin:
|
|
260
272
|
if selected_job is not None:
|
|
261
|
-
|
|
273
|
+
timeline[curr_time].append(
|
|
274
|
+
f"Running Job{selected_job.job_id} "
|
|
275
|
+
f"for {next_time_slice:0.{cls.ROUNDING_DIGITS}f}s"
|
|
276
|
+
)
|
|
262
277
|
else:
|
|
263
|
-
|
|
278
|
+
timeline[curr_time].append(f"(No job running)")
|
|
264
279
|
curr_time += next_time_slice
|
|
265
|
-
|
|
280
|
+
|
|
266
281
|
# We stop the job we selected, and potentially mark it as complete
|
|
267
282
|
if selected_job is not None:
|
|
268
|
-
selected_job.stop(curr_time, (
|
|
283
|
+
selected_job.stop(curr_time, (scheduler_algorithm == cls.Kind.RoundRobin))
|
|
269
284
|
if selected_job.is_complete(curr_time):
|
|
270
|
-
|
|
285
|
+
timeline[curr_time].append(
|
|
286
|
+
f"Completed Job{selected_job.job_id} "
|
|
287
|
+
f"(TAT = {selected_job.turnaround_time:0.{cls.ROUNDING_DIGITS}f}s)"
|
|
288
|
+
)
|
|
271
289
|
selected_job = None
|
|
272
|
-
|
|
290
|
+
|
|
273
291
|
# Filter out completed jobs
|
|
274
292
|
jobs_to_run : List[SchedulingQuestion.Job] = list(filter(
|
|
275
293
|
(lambda j: not j.is_complete(curr_time)),
|
|
@@ -277,103 +295,93 @@ class SchedulingQuestion(ProcessQuestion, RegenerableChoiceMixin, TableQuestionM
|
|
|
277
295
|
))
|
|
278
296
|
if len(jobs_to_run) == 0:
|
|
279
297
|
break
|
|
280
|
-
|
|
281
|
-
def __init__(self, num_jobs=3, scheduler_kind=None, *args, **kwargs):
|
|
282
|
-
# Preserve question-specific params for QR code config BEFORE calling super().__init__()
|
|
283
|
-
kwargs['num_jobs'] = num_jobs
|
|
284
|
-
|
|
285
|
-
# Register the regenerable choice using the mixin
|
|
286
|
-
self.register_choice('scheduler_kind', SchedulingQuestion.Kind, scheduler_kind, kwargs)
|
|
287
298
|
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
def
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
299
|
+
return timeline
|
|
300
|
+
|
|
301
|
+
@classmethod
|
|
302
|
+
def _build_context(cls, *, rng_seed=None, **kwargs):
|
|
303
|
+
rng = random.Random(rng_seed)
|
|
304
|
+
num_jobs = kwargs.get("num_jobs", 3)
|
|
305
|
+
|
|
306
|
+
scheduler_kind = kwargs.get("scheduler_kind")
|
|
307
|
+
if scheduler_kind is None:
|
|
308
|
+
scheduler_algorithm = rng.choice(list(cls.Kind))
|
|
309
|
+
config_params = {"scheduler_kind": scheduler_algorithm.name}
|
|
310
|
+
else:
|
|
311
|
+
if isinstance(scheduler_kind, cls.Kind):
|
|
312
|
+
scheduler_algorithm = scheduler_kind
|
|
313
|
+
else:
|
|
314
|
+
scheduler_algorithm = cls.get_kind_from_string(str(scheduler_kind))
|
|
315
|
+
config_params = {"scheduler_kind": scheduler_algorithm.name}
|
|
295
316
|
|
|
296
|
-
|
|
297
|
-
# Note: We ignore the parent's return value since we need to generate the workload first
|
|
298
|
-
super().refresh(*args, **kwargs)
|
|
317
|
+
jobs = cls.get_workload(rng, num_jobs)
|
|
299
318
|
|
|
300
|
-
|
|
301
|
-
self.scheduler_algorithm = self.get_choice('scheduler_kind', SchedulingQuestion.Kind)
|
|
302
|
-
|
|
303
|
-
# Get workload jobs
|
|
304
|
-
jobs = self.get_workload(self.num_jobs)
|
|
305
|
-
|
|
306
|
-
# Run simulations different depending on which algorithm we chose
|
|
307
|
-
match self.scheduler_algorithm:
|
|
319
|
+
match scheduler_algorithm:
|
|
308
320
|
case SchedulingQuestion.Kind.ShortestDuration:
|
|
309
|
-
|
|
321
|
+
timeline = cls.run_simulation(
|
|
310
322
|
jobs_to_run=jobs,
|
|
311
323
|
selector=(lambda j, curr_time: (j.duration, j.job_id)),
|
|
312
324
|
preemptable=False,
|
|
313
|
-
time_quantum=None
|
|
325
|
+
time_quantum=None,
|
|
326
|
+
scheduler_algorithm=scheduler_algorithm
|
|
314
327
|
)
|
|
315
328
|
case SchedulingQuestion.Kind.ShortestTimeRemaining:
|
|
316
|
-
|
|
329
|
+
timeline = cls.run_simulation(
|
|
317
330
|
jobs_to_run=jobs,
|
|
318
331
|
selector=(lambda j, curr_time: (j.time_remaining(curr_time), j.job_id)),
|
|
319
332
|
preemptable=True,
|
|
320
|
-
time_quantum=None
|
|
333
|
+
time_quantum=None,
|
|
334
|
+
scheduler_algorithm=scheduler_algorithm
|
|
321
335
|
)
|
|
322
336
|
case SchedulingQuestion.Kind.RoundRobin:
|
|
323
|
-
|
|
337
|
+
timeline = cls.run_simulation(
|
|
324
338
|
jobs_to_run=jobs,
|
|
325
339
|
selector=(lambda j, curr_time: (j.last_run, j.job_id)),
|
|
326
340
|
preemptable=True,
|
|
327
|
-
time_quantum=1e-05
|
|
341
|
+
time_quantum=1e-05,
|
|
342
|
+
scheduler_algorithm=scheduler_algorithm
|
|
328
343
|
)
|
|
329
344
|
case _:
|
|
330
|
-
|
|
345
|
+
timeline = cls.run_simulation(
|
|
331
346
|
jobs_to_run=jobs,
|
|
332
347
|
selector=(lambda j, curr_time: (j.arrival_time, j.job_id)),
|
|
333
348
|
preemptable=False,
|
|
334
|
-
time_quantum=None
|
|
349
|
+
time_quantum=None,
|
|
350
|
+
scheduler_algorithm=scheduler_algorithm
|
|
335
351
|
)
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
self.job_stats = {
|
|
352
|
+
|
|
353
|
+
job_stats = {
|
|
339
354
|
i : {
|
|
340
|
-
"arrival_time" : job.arrival_time,
|
|
341
|
-
"duration" : job.duration,
|
|
342
|
-
"Response" : job.response_time,
|
|
343
|
-
"TAT" : job.turnaround_time,
|
|
355
|
+
"arrival_time" : job.arrival_time,
|
|
356
|
+
"duration" : job.duration,
|
|
357
|
+
"Response" : job.response_time,
|
|
358
|
+
"TAT" : job.turnaround_time,
|
|
344
359
|
"state_changes" : [job.arrival_time] + job.state_change_times + [job.arrival_time + job.turnaround_time],
|
|
345
360
|
}
|
|
346
361
|
for (i, job) in enumerate(jobs)
|
|
347
362
|
}
|
|
348
|
-
|
|
363
|
+
overall_stats = {
|
|
349
364
|
"Response" : sum([job.response_time for job in jobs]) / len(jobs),
|
|
350
365
|
"TAT" : sum([job.turnaround_time for job in jobs]) / len(jobs)
|
|
351
366
|
}
|
|
352
|
-
|
|
353
|
-
# todo: make this less convoluted
|
|
354
|
-
self.average_response = self.overall_stats["Response"]
|
|
355
|
-
self.average_tat = self.overall_stats["TAT"]
|
|
356
|
-
|
|
357
|
-
for job_id in sorted(self.job_stats.keys()):
|
|
358
|
-
self.answers.update({
|
|
359
|
-
f"answer__response_time_job{job_id}": ca.AnswerTypes.Float(self.job_stats[job_id]["Response"]),
|
|
360
|
-
f"answer__turnaround_time_job{job_id}": ca.AnswerTypes.Float(self.job_stats[job_id]["TAT"]),
|
|
361
|
-
})
|
|
362
|
-
self.answers.update({
|
|
363
|
-
"answer__average_response_time": ca.AnswerTypes.Float(
|
|
364
|
-
sum([job.response_time for job in jobs]) / len(jobs),
|
|
365
|
-
label="Overall average response time"
|
|
366
|
-
),
|
|
367
|
-
"answer__average_turnaround_time": ca.AnswerTypes.Float(
|
|
368
|
-
sum([job.turnaround_time for job in jobs]) / len(jobs),
|
|
369
|
-
label="Overall average TAT"
|
|
370
|
-
)
|
|
371
|
-
})
|
|
372
367
|
|
|
373
|
-
|
|
374
|
-
|
|
368
|
+
return {
|
|
369
|
+
"num_jobs": num_jobs,
|
|
370
|
+
"scheduler_algorithm": scheduler_algorithm,
|
|
371
|
+
"job_stats": job_stats,
|
|
372
|
+
"overall_stats": overall_stats,
|
|
373
|
+
"timeline": timeline,
|
|
374
|
+
"_config_params": config_params,
|
|
375
|
+
}
|
|
376
|
+
|
|
377
|
+
@classmethod
|
|
378
|
+
def is_interesting_ctx(cls, context) -> bool:
|
|
379
|
+
duration_sum = sum([context["job_stats"][job_id]['duration'] for job_id in context["job_stats"].keys()])
|
|
380
|
+
tat_sum = sum([context["job_stats"][job_id]['TAT'] for job_id in context["job_stats"].keys()])
|
|
381
|
+
return (tat_sum >= duration_sum * 1.1)
|
|
375
382
|
|
|
376
|
-
|
|
383
|
+
@classmethod
|
|
384
|
+
def _build_body(cls, context):
|
|
377
385
|
"""
|
|
378
386
|
Build question body and collect answers.
|
|
379
387
|
Returns:
|
|
@@ -384,28 +392,35 @@ class SchedulingQuestion(ProcessQuestion, RegenerableChoiceMixin, TableQuestionM
|
|
|
384
392
|
|
|
385
393
|
# Create table data for scheduling results
|
|
386
394
|
table_rows = []
|
|
387
|
-
for job_id in sorted(
|
|
395
|
+
for job_id in sorted(context["job_stats"].keys()):
|
|
396
|
+
response_answer = ca.AnswerTypes.Float(context["job_stats"][job_id]["Response"])
|
|
397
|
+
tat_answer = ca.AnswerTypes.Float(context["job_stats"][job_id]["TAT"])
|
|
398
|
+
answers.append(response_answer)
|
|
399
|
+
answers.append(tat_answer)
|
|
388
400
|
table_rows.append({
|
|
389
401
|
"Job ID": f"Job{job_id}",
|
|
390
|
-
"Arrival":
|
|
391
|
-
"Duration":
|
|
392
|
-
"Response Time":
|
|
393
|
-
"TAT":
|
|
402
|
+
"Arrival": context["job_stats"][job_id]["arrival_time"],
|
|
403
|
+
"Duration": context["job_stats"][job_id]["duration"],
|
|
404
|
+
"Response Time": response_answer,
|
|
405
|
+
"TAT": tat_answer
|
|
394
406
|
})
|
|
395
|
-
# Collect answers for this job
|
|
396
|
-
answers.append(self.answers[f"answer__response_time_job{job_id}"])
|
|
397
|
-
answers.append(self.answers[f"answer__turnaround_time_job{job_id}"])
|
|
398
407
|
|
|
399
408
|
# Create table using mixin
|
|
400
|
-
scheduling_table =
|
|
409
|
+
scheduling_table = cls.create_answer_table(
|
|
401
410
|
headers=["Job ID", "Arrival", "Duration", "Response Time", "TAT"],
|
|
402
411
|
data_rows=table_rows,
|
|
403
412
|
answer_columns=["Response Time", "TAT"]
|
|
404
413
|
)
|
|
405
414
|
|
|
406
415
|
# Collect average answers
|
|
407
|
-
avg_response_answer =
|
|
408
|
-
|
|
416
|
+
avg_response_answer = ca.AnswerTypes.Float(
|
|
417
|
+
context["overall_stats"]["Response"],
|
|
418
|
+
label="Overall average response time"
|
|
419
|
+
)
|
|
420
|
+
avg_tat_answer = ca.AnswerTypes.Float(
|
|
421
|
+
context["overall_stats"]["TAT"],
|
|
422
|
+
label="Overall average TAT"
|
|
423
|
+
)
|
|
409
424
|
answers.append(avg_response_answer)
|
|
410
425
|
answers.append(avg_tat_answer)
|
|
411
426
|
|
|
@@ -414,26 +429,16 @@ class SchedulingQuestion(ProcessQuestion, RegenerableChoiceMixin, TableQuestionM
|
|
|
414
429
|
|
|
415
430
|
# Use mixin to create complete body
|
|
416
431
|
intro_text = (
|
|
417
|
-
f"Given the below information, compute the required values if using **{
|
|
432
|
+
f"Given the below information, compute the required values if using **{context['scheduler_algorithm']}** scheduling. "
|
|
418
433
|
f"Break any ties using the job number."
|
|
419
434
|
)
|
|
420
435
|
|
|
421
|
-
|
|
422
|
-
f"Please format answer as fractions, mixed numbers, or numbers rounded to a maximum of {ca.Answer.DEFAULT_ROUNDING_DIGITS} digits after the decimal. "
|
|
423
|
-
"Examples of appropriately formatted answers would be `0`, `3/2`, `1 1/3`, `1.6667`, and `1.25`. "
|
|
424
|
-
"Note that answers that can be rounded to whole numbers should be, rather than being left in fractional form."
|
|
425
|
-
])])
|
|
426
|
-
|
|
427
|
-
body = self.create_fill_in_table_body(intro_text, instructions, scheduling_table)
|
|
436
|
+
body = cls.create_fill_in_table_body(intro_text, None, scheduling_table)
|
|
428
437
|
body.add_element(average_block)
|
|
429
|
-
return body, answers
|
|
430
|
-
|
|
431
|
-
def get_body(self, *args, **kwargs) -> ca.Section:
|
|
432
|
-
"""Build question body (backward compatible interface)."""
|
|
433
|
-
body, _ = self._get_body(*args, **kwargs)
|
|
434
438
|
return body
|
|
435
439
|
|
|
436
|
-
|
|
440
|
+
@classmethod
|
|
441
|
+
def _build_explanation(cls, context):
|
|
437
442
|
"""
|
|
438
443
|
Build question explanation.
|
|
439
444
|
Returns:
|
|
@@ -443,7 +448,7 @@ class SchedulingQuestion(ProcessQuestion, RegenerableChoiceMixin, TableQuestionM
|
|
|
443
448
|
|
|
444
449
|
explanation.add_element(
|
|
445
450
|
ca.Paragraph([
|
|
446
|
-
f"To calculate the overall Turnaround and Response times using {
|
|
451
|
+
f"To calculate the overall Turnaround and Response times using {context['scheduler_algorithm']} "
|
|
447
452
|
f"we want to first start by calculating the respective target and response times of all of our individual jobs."
|
|
448
453
|
])
|
|
449
454
|
)
|
|
@@ -458,7 +463,7 @@ class SchedulingQuestion(ProcessQuestion, RegenerableChoiceMixin, TableQuestionM
|
|
|
458
463
|
|
|
459
464
|
explanation.add_element(
|
|
460
465
|
ca.Paragraph([
|
|
461
|
-
f"For each of our {len(
|
|
466
|
+
f"For each of our {len(context['job_stats'].keys())} jobs, we can make these calculations.",
|
|
462
467
|
])
|
|
463
468
|
)
|
|
464
469
|
|
|
@@ -468,21 +473,21 @@ class SchedulingQuestion(ProcessQuestion, RegenerableChoiceMixin, TableQuestionM
|
|
|
468
473
|
"For turnaround time (TAT) this would be:"
|
|
469
474
|
] + [
|
|
470
475
|
f"Job{job_id}_TAT "
|
|
471
|
-
f"= {
|
|
472
|
-
f"- {
|
|
473
|
-
f"= {
|
|
474
|
-
for job_id in sorted(
|
|
476
|
+
f"= {context['job_stats'][job_id]['arrival_time'] + context['job_stats'][job_id]['TAT']:0.{cls.ROUNDING_DIGITS}f} "
|
|
477
|
+
f"- {context['job_stats'][job_id]['arrival_time']:0.{cls.ROUNDING_DIGITS}f} "
|
|
478
|
+
f"= {context['job_stats'][job_id]['TAT']:0.{cls.ROUNDING_DIGITS}f}"
|
|
479
|
+
for job_id in sorted(context['job_stats'].keys())
|
|
475
480
|
])
|
|
476
481
|
)
|
|
477
482
|
|
|
478
483
|
summation_line = ' + '.join([
|
|
479
|
-
f"{
|
|
484
|
+
f"{context['job_stats'][job_id]['TAT']:0.{cls.ROUNDING_DIGITS}f}" for job_id in sorted(context['job_stats'].keys())
|
|
480
485
|
])
|
|
481
486
|
explanation.add_element(
|
|
482
487
|
ca.Paragraph([
|
|
483
488
|
f"We then calculate the average of these to find the average TAT time",
|
|
484
|
-
f"Avg(TAT) = ({summation_line}) / ({len(
|
|
485
|
-
f"= {
|
|
489
|
+
f"Avg(TAT) = ({summation_line}) / ({len(context['job_stats'].keys())}) "
|
|
490
|
+
f"= {context['overall_stats']['TAT']:0.{cls.ROUNDING_DIGITS}f}",
|
|
486
491
|
])
|
|
487
492
|
)
|
|
488
493
|
|
|
@@ -493,22 +498,22 @@ class SchedulingQuestion(ProcessQuestion, RegenerableChoiceMixin, TableQuestionM
|
|
|
493
498
|
"For response time this would be:"
|
|
494
499
|
] + [
|
|
495
500
|
f"Job{job_id}_response "
|
|
496
|
-
f"= {
|
|
497
|
-
f"- {
|
|
498
|
-
f"= {
|
|
499
|
-
for job_id in sorted(
|
|
501
|
+
f"= {context['job_stats'][job_id]['arrival_time'] + context['job_stats'][job_id]['Response']:0.{cls.ROUNDING_DIGITS}f} "
|
|
502
|
+
f"- {context['job_stats'][job_id]['arrival_time']:0.{cls.ROUNDING_DIGITS}f} "
|
|
503
|
+
f"= {context['job_stats'][job_id]['Response']:0.{cls.ROUNDING_DIGITS}f}"
|
|
504
|
+
for job_id in sorted(context['job_stats'].keys())
|
|
500
505
|
])
|
|
501
506
|
)
|
|
502
507
|
|
|
503
508
|
summation_line = ' + '.join([
|
|
504
|
-
f"{
|
|
509
|
+
f"{context['job_stats'][job_id]['Response']:0.{cls.ROUNDING_DIGITS}f}" for job_id in sorted(context['job_stats'].keys())
|
|
505
510
|
])
|
|
506
511
|
explanation.add_element(
|
|
507
512
|
ca.Paragraph([
|
|
508
513
|
f"We then calculate the average of these to find the average Response time",
|
|
509
514
|
f"Avg(Response) "
|
|
510
|
-
f"= ({summation_line}) / ({len(
|
|
511
|
-
f"= {
|
|
515
|
+
f"= ({summation_line}) / ({len(context['job_stats'].keys())}) "
|
|
516
|
+
f"= {context['overall_stats']['Response']:0.{cls.ROUNDING_DIGITS}f}",
|
|
512
517
|
"\n",
|
|
513
518
|
])
|
|
514
519
|
)
|
|
@@ -517,42 +522,36 @@ class SchedulingQuestion(ProcessQuestion, RegenerableChoiceMixin, TableQuestionM
|
|
|
517
522
|
ca.Table(
|
|
518
523
|
headers=["Time", "Events"],
|
|
519
524
|
data=[
|
|
520
|
-
[f"{t:02.{
|
|
521
|
-
for t in sorted(
|
|
525
|
+
[f"{t:02.{cls.ROUNDING_DIGITS}f}s"] + ['\n'.join(context['timeline'][t])]
|
|
526
|
+
for t in sorted(context['timeline'].keys())
|
|
522
527
|
]
|
|
523
528
|
)
|
|
524
529
|
)
|
|
525
530
|
|
|
526
531
|
explanation.add_element(
|
|
527
532
|
ca.Picture(
|
|
528
|
-
img_data=
|
|
533
|
+
img_data=cls.make_image(context),
|
|
529
534
|
caption="Process Scheduling Overview"
|
|
530
535
|
)
|
|
531
536
|
)
|
|
532
537
|
|
|
533
|
-
return explanation, []
|
|
534
|
-
|
|
535
|
-
def get_explanation(self, **kwargs) -> ca.Section:
|
|
536
|
-
"""Build question explanation (backward compatible interface)."""
|
|
537
|
-
explanation, _ = self._get_explanation(**kwargs)
|
|
538
538
|
return explanation
|
|
539
539
|
|
|
540
|
-
|
|
541
|
-
|
|
542
|
-
tat_sum = sum([self.job_stats[job_id]['TAT'] for job_id in self.job_stats.keys()])
|
|
543
|
-
return (tat_sum >= duration_sum * 1.1)
|
|
544
|
-
|
|
545
|
-
def make_image(self):
|
|
546
|
-
|
|
547
|
-
fig, ax = plt.subplots(1, 1, figsize=self.IMAGE_FIGSIZE, dpi=self.IMAGE_DPI)
|
|
540
|
+
@classmethod
|
|
541
|
+
def make_image(cls, context):
|
|
548
542
|
|
|
549
|
-
|
|
543
|
+
fig, ax = plt.subplots(1, 1, figsize=cls.IMAGE_FIGSIZE, dpi=cls.IMAGE_DPI)
|
|
544
|
+
|
|
545
|
+
job_stats = context["job_stats"]
|
|
546
|
+
scheduler_algorithm = context["scheduler_algorithm"]
|
|
547
|
+
|
|
548
|
+
for x_loc in set([t for job_id in job_stats.keys() for t in job_stats[job_id]["state_changes"] ]):
|
|
550
549
|
ax.axvline(x_loc, zorder=0)
|
|
551
|
-
plt.text(x_loc + 0, len(
|
|
552
|
-
|
|
553
|
-
if
|
|
554
|
-
for y_loc, job_id in enumerate(sorted(
|
|
555
|
-
for i, (start, stop) in enumerate(zip(
|
|
550
|
+
plt.text(x_loc + 0, len(job_stats.keys())-0.3, f'{x_loc:0.{cls.ROUNDING_DIGITS}f}s', rotation=90)
|
|
551
|
+
|
|
552
|
+
if scheduler_algorithm != cls.Kind.RoundRobin:
|
|
553
|
+
for y_loc, job_id in enumerate(sorted(job_stats.keys(), reverse=True)):
|
|
554
|
+
for i, (start, stop) in enumerate(zip(job_stats[job_id]["state_changes"], job_stats[job_id]["state_changes"][1:])):
|
|
556
555
|
ax.barh(
|
|
557
556
|
y = [y_loc],
|
|
558
557
|
left = [start],
|
|
@@ -563,19 +562,19 @@ class SchedulingQuestion(ProcessQuestion, RegenerableChoiceMixin, TableQuestionM
|
|
|
563
562
|
)
|
|
564
563
|
else:
|
|
565
564
|
job_deltas = collections.defaultdict(int)
|
|
566
|
-
for job_id in
|
|
567
|
-
job_deltas[
|
|
568
|
-
job_deltas[
|
|
565
|
+
for job_id in job_stats.keys():
|
|
566
|
+
job_deltas[job_stats[job_id]["state_changes"][0]] += 1
|
|
567
|
+
job_deltas[job_stats[job_id]["state_changes"][1]] -= 1
|
|
569
568
|
|
|
570
569
|
regimes_ranges = zip(sorted(job_deltas.keys()), sorted(job_deltas.keys())[1:])
|
|
571
570
|
|
|
572
571
|
for (low, high) in regimes_ranges:
|
|
573
572
|
jobs_in_range = [
|
|
574
|
-
i for i, job_id in enumerate(list(
|
|
573
|
+
i for i, job_id in enumerate(list(job_stats.keys())[::-1])
|
|
575
574
|
if
|
|
576
|
-
(
|
|
575
|
+
(job_stats[job_id]["state_changes"][0] <= low)
|
|
577
576
|
and
|
|
578
|
-
(
|
|
577
|
+
(job_stats[job_id]["state_changes"][1] >= high)
|
|
579
578
|
]
|
|
580
579
|
|
|
581
580
|
if len(jobs_in_range) == 0: continue
|
|
@@ -584,15 +583,15 @@ class SchedulingQuestion(ProcessQuestion, RegenerableChoiceMixin, TableQuestionM
|
|
|
584
583
|
y = jobs_in_range,
|
|
585
584
|
left = [low for _ in jobs_in_range],
|
|
586
585
|
width = [high - low for _ in jobs_in_range],
|
|
587
|
-
color=f"{ 1 - ((len(jobs_in_range) - 1) / (len(
|
|
586
|
+
color=f"{ 1 - ((len(jobs_in_range) - 1) / (len(job_stats.keys())))}",
|
|
588
587
|
)
|
|
589
588
|
|
|
590
589
|
# Plot the overall TAT
|
|
591
590
|
ax.barh(
|
|
592
|
-
y = [i for i in range(len(
|
|
593
|
-
left = [
|
|
594
|
-
width = [
|
|
595
|
-
tick_label = [f"Job{job_id}" for job_id in sorted(
|
|
591
|
+
y = [i for i in range(len(job_stats))][::-1],
|
|
592
|
+
left = [job_stats[job_id]["arrival_time"] for job_id in sorted(job_stats.keys())],
|
|
593
|
+
width = [job_stats[job_id]["TAT"] for job_id in sorted(job_stats.keys())],
|
|
594
|
+
tick_label = [f"Job{job_id}" for job_id in sorted(job_stats.keys())],
|
|
596
595
|
color=(0,0,0,0),
|
|
597
596
|
edgecolor='black',
|
|
598
597
|
linewidth=2,
|
|
@@ -603,20 +602,24 @@ class SchedulingQuestion(ProcessQuestion, RegenerableChoiceMixin, TableQuestionM
|
|
|
603
602
|
# Save to BytesIO object instead of a file
|
|
604
603
|
buffer = io.BytesIO()
|
|
605
604
|
plt.tight_layout()
|
|
606
|
-
plt.savefig(buffer, format='png', dpi=
|
|
605
|
+
plt.savefig(buffer, format='png', dpi=cls.IMAGE_DPI, bbox_inches='tight', pad_inches=0.2)
|
|
607
606
|
plt.close(fig)
|
|
608
607
|
|
|
609
608
|
# Reset buffer position to the beginning
|
|
610
609
|
buffer.seek(0)
|
|
611
610
|
return buffer
|
|
612
611
|
|
|
613
|
-
|
|
612
|
+
@classmethod
|
|
613
|
+
def make_image_file(cls, context, image_dir="imgs"):
|
|
614
614
|
|
|
615
|
-
image_buffer =
|
|
615
|
+
image_buffer = cls.make_image(context)
|
|
616
616
|
|
|
617
617
|
# Original file-saving logic
|
|
618
618
|
if not os.path.exists(image_dir): os.mkdir(image_dir)
|
|
619
|
-
image_path = os.path.join(
|
|
619
|
+
image_path = os.path.join(
|
|
620
|
+
image_dir,
|
|
621
|
+
f"{str(context['scheduler_algorithm']).replace(' ', '_')}-{uuid.uuid4()}.png"
|
|
622
|
+
)
|
|
620
623
|
|
|
621
624
|
with open(image_path, 'wb') as fid:
|
|
622
625
|
fid.write(image_buffer.getvalue())
|
|
@@ -649,45 +652,19 @@ class MLFQQuestion(ProcessQuestion, TableQuestionMixin, BodyTemplatesMixin):
|
|
|
649
652
|
run_intervals: List[tuple] = dataclasses.field(default_factory=list)
|
|
650
653
|
max_queue_level: int = 0
|
|
651
654
|
|
|
652
|
-
|
|
653
|
-
|
|
654
|
-
num_jobs: int = DEFAULT_NUM_JOBS,
|
|
655
|
-
num_queues: int = DEFAULT_NUM_QUEUES,
|
|
656
|
-
min_job_length: int = MIN_DURATION,
|
|
657
|
-
max_job_length: int = MAX_DURATION,
|
|
658
|
-
boost_interval: int | None = None,
|
|
659
|
-
boost_interval_range: List[int] | None = None,
|
|
660
|
-
*args,
|
|
661
|
-
**kwargs
|
|
662
|
-
):
|
|
663
|
-
kwargs["num_jobs"] = num_jobs
|
|
664
|
-
kwargs["num_queues"] = num_queues
|
|
665
|
-
kwargs["min_job_length"] = min_job_length
|
|
666
|
-
kwargs["max_job_length"] = max_job_length
|
|
667
|
-
if boost_interval is not None:
|
|
668
|
-
kwargs["boost_interval"] = boost_interval
|
|
669
|
-
if boost_interval_range is not None:
|
|
670
|
-
kwargs["boost_interval_range"] = boost_interval_range
|
|
671
|
-
super().__init__(*args, **kwargs)
|
|
672
|
-
self.num_jobs = num_jobs
|
|
673
|
-
self.num_queues = num_queues
|
|
674
|
-
self.min_job_length = min_job_length
|
|
675
|
-
self.max_job_length = max_job_length
|
|
676
|
-
self.boost_interval = boost_interval
|
|
677
|
-
self.boost_interval_range = boost_interval_range
|
|
678
|
-
|
|
679
|
-
def get_workload(self, num_jobs: int) -> List[MLFQQuestion.Job]:
|
|
655
|
+
@classmethod
|
|
656
|
+
def get_workload(cls, rng, num_jobs: int, min_job_length: int, max_job_length: int) -> List[MLFQQuestion.Job]:
|
|
680
657
|
arrivals = [0]
|
|
681
658
|
if num_jobs > 1:
|
|
682
659
|
arrivals.extend(
|
|
683
|
-
|
|
660
|
+
rng.randint(cls.MIN_ARRIVAL, cls.MAX_ARRIVAL)
|
|
684
661
|
for _ in range(num_jobs - 1)
|
|
685
662
|
)
|
|
686
663
|
if max(arrivals) == 0:
|
|
687
|
-
arrivals[-1] =
|
|
664
|
+
arrivals[-1] = rng.randint(1, cls.MAX_ARRIVAL)
|
|
688
665
|
|
|
689
666
|
durations = [
|
|
690
|
-
|
|
667
|
+
rng.randint(min_job_length, max_job_length)
|
|
691
668
|
for _ in range(num_jobs)
|
|
692
669
|
]
|
|
693
670
|
|
|
@@ -703,7 +680,8 @@ class MLFQQuestion(ProcessQuestion, TableQuestionMixin, BodyTemplatesMixin):
|
|
|
703
680
|
)
|
|
704
681
|
return jobs
|
|
705
682
|
|
|
706
|
-
|
|
683
|
+
@staticmethod
|
|
684
|
+
def _normalize_queue_params(values: List[int] | None, num_queues: int) -> List[int]:
|
|
707
685
|
if values is None:
|
|
708
686
|
return []
|
|
709
687
|
values = list(values)
|
|
@@ -711,21 +689,83 @@ class MLFQQuestion(ProcessQuestion, TableQuestionMixin, BodyTemplatesMixin):
|
|
|
711
689
|
values.append(values[-1])
|
|
712
690
|
return values[:num_queues]
|
|
713
691
|
|
|
692
|
+
@classmethod
|
|
693
|
+
def _build_context(cls, *, rng_seed=None, **kwargs):
|
|
694
|
+
rng = random.Random(rng_seed)
|
|
695
|
+
num_jobs = kwargs.get("num_jobs", cls.DEFAULT_NUM_JOBS)
|
|
696
|
+
num_queues = kwargs.get("num_queues", cls.DEFAULT_NUM_QUEUES)
|
|
697
|
+
min_job_length = kwargs.get("min_job_length", cls.MIN_DURATION)
|
|
698
|
+
max_job_length = kwargs.get("max_job_length", cls.MAX_DURATION)
|
|
699
|
+
boost_interval = kwargs.get("boost_interval", None)
|
|
700
|
+
boost_interval_range = kwargs.get("boost_interval_range", None)
|
|
701
|
+
|
|
702
|
+
if boost_interval is None and boost_interval_range:
|
|
703
|
+
low, high = boost_interval_range
|
|
704
|
+
boost_interval = rng.randint(low, high)
|
|
705
|
+
|
|
706
|
+
jobs = cls.get_workload(rng, num_jobs, min_job_length, max_job_length)
|
|
707
|
+
|
|
708
|
+
queue_quantums = [2**(num_queues - 1 - i) for i in range(num_queues)]
|
|
709
|
+
queue_quantums = cls._normalize_queue_params(queue_quantums, num_queues)
|
|
710
|
+
queue_quantums = [int(q) for q in queue_quantums]
|
|
711
|
+
|
|
712
|
+
queue_allotments = [None] + [
|
|
713
|
+
queue_quantums[i] * 2 for i in range(1, num_queues)
|
|
714
|
+
]
|
|
715
|
+
queue_allotments = cls._normalize_queue_params(queue_allotments, num_queues)
|
|
716
|
+
queue_allotments = [
|
|
717
|
+
int(allotment) if allotment is not None else None
|
|
718
|
+
for allotment in queue_allotments
|
|
719
|
+
]
|
|
720
|
+
queue_allotments[0] = None
|
|
721
|
+
|
|
722
|
+
timeline, boost_times, jobs = cls.run_simulation(
|
|
723
|
+
jobs,
|
|
724
|
+
queue_quantums,
|
|
725
|
+
queue_allotments,
|
|
726
|
+
boost_interval
|
|
727
|
+
)
|
|
728
|
+
|
|
729
|
+
job_stats = {
|
|
730
|
+
job.job_id: {
|
|
731
|
+
"arrival_time": job.arrival_time,
|
|
732
|
+
"duration": job.duration,
|
|
733
|
+
"Response": job.response_time,
|
|
734
|
+
"TAT": job.turnaround_time,
|
|
735
|
+
"run_intervals": list(job.run_intervals),
|
|
736
|
+
}
|
|
737
|
+
for job in jobs
|
|
738
|
+
}
|
|
739
|
+
|
|
740
|
+
return {
|
|
741
|
+
"num_jobs": num_jobs,
|
|
742
|
+
"num_queues": num_queues,
|
|
743
|
+
"min_job_length": min_job_length,
|
|
744
|
+
"max_job_length": max_job_length,
|
|
745
|
+
"boost_interval": boost_interval,
|
|
746
|
+
"queue_quantums": queue_quantums,
|
|
747
|
+
"queue_allotments": queue_allotments,
|
|
748
|
+
"timeline": timeline,
|
|
749
|
+
"boost_times": boost_times,
|
|
750
|
+
"job_stats": job_stats,
|
|
751
|
+
}
|
|
752
|
+
|
|
753
|
+
@classmethod
|
|
714
754
|
def run_simulation(
|
|
715
|
-
|
|
755
|
+
cls,
|
|
716
756
|
jobs: List[MLFQQuestion.Job],
|
|
717
757
|
queue_quantums: List[int],
|
|
718
758
|
queue_allotments: List[int | None],
|
|
719
759
|
boost_interval: int | None,
|
|
720
760
|
) -> None:
|
|
721
|
-
|
|
722
|
-
|
|
761
|
+
timeline = collections.defaultdict(list)
|
|
762
|
+
boost_times = []
|
|
723
763
|
pending = sorted(jobs, key=lambda j: (j.arrival_time, j.job_id))
|
|
724
764
|
queues = [collections.deque() for _ in range(len(queue_quantums))]
|
|
725
765
|
completed = set()
|
|
726
766
|
|
|
727
767
|
curr_time = pending[0].arrival_time if pending else 0
|
|
728
|
-
|
|
768
|
+
timeline[curr_time].append("Simulation Start")
|
|
729
769
|
next_boost_time = None
|
|
730
770
|
if boost_interval is not None:
|
|
731
771
|
next_boost_time = boost_interval
|
|
@@ -738,7 +778,7 @@ class MLFQQuestion(ProcessQuestion, TableQuestionMixin, BodyTemplatesMixin):
|
|
|
738
778
|
job.time_in_queue = 0
|
|
739
779
|
job.remaining_quantum = None
|
|
740
780
|
queues[-1].append(job)
|
|
741
|
-
|
|
781
|
+
timeline[job.arrival_time].append(
|
|
742
782
|
f"Job{job.job_id} arrived (dur = {job.duration})"
|
|
743
783
|
)
|
|
744
784
|
|
|
@@ -750,17 +790,17 @@ class MLFQQuestion(ProcessQuestion, TableQuestionMixin, BodyTemplatesMixin):
|
|
|
750
790
|
if running_job is not None and running_job.remaining_time > 0:
|
|
751
791
|
jobs_to_boost.append(running_job)
|
|
752
792
|
if not jobs_to_boost:
|
|
753
|
-
|
|
793
|
+
boost_times.append(curr_time)
|
|
754
794
|
return
|
|
755
795
|
for job in sorted(jobs_to_boost, key=lambda j: j.job_id):
|
|
756
796
|
job.queue_level = len(queues) - 1
|
|
757
797
|
job.time_in_queue = 0
|
|
758
798
|
job.remaining_quantum = None
|
|
759
799
|
queues[-1].append(job)
|
|
760
|
-
|
|
800
|
+
timeline[curr_time].append(
|
|
761
801
|
f"Boosted all jobs to Q{len(queues) - 1}"
|
|
762
802
|
)
|
|
763
|
-
|
|
803
|
+
boost_times.append(curr_time)
|
|
764
804
|
|
|
765
805
|
enqueue_arrivals(curr_time)
|
|
766
806
|
|
|
@@ -778,7 +818,7 @@ class MLFQQuestion(ProcessQuestion, TableQuestionMixin, BodyTemplatesMixin):
|
|
|
778
818
|
if next_times:
|
|
779
819
|
next_time = min(next_times)
|
|
780
820
|
if next_time > curr_time:
|
|
781
|
-
|
|
821
|
+
timeline[curr_time].append("CPU idle")
|
|
782
822
|
curr_time = next_time
|
|
783
823
|
enqueue_arrivals(curr_time)
|
|
784
824
|
while next_boost_time is not None and curr_time >= next_boost_time:
|
|
@@ -807,7 +847,7 @@ class MLFQQuestion(ProcessQuestion, TableQuestionMixin, BodyTemplatesMixin):
|
|
|
807
847
|
job.response_time = curr_time - job.arrival_time
|
|
808
848
|
|
|
809
849
|
if slice_duration > 0:
|
|
810
|
-
|
|
850
|
+
timeline[curr_time].append(
|
|
811
851
|
f"Running Job{job.job_id} in Q{q_idx} for {slice_duration}"
|
|
812
852
|
)
|
|
813
853
|
job.run_intervals.append((curr_time, curr_time + slice_duration, q_idx))
|
|
@@ -827,7 +867,7 @@ class MLFQQuestion(ProcessQuestion, TableQuestionMixin, BodyTemplatesMixin):
|
|
|
827
867
|
job.turnaround_time = curr_time - job.arrival_time
|
|
828
868
|
job.remaining_quantum = None
|
|
829
869
|
completed.add(job.job_id)
|
|
830
|
-
|
|
870
|
+
timeline[curr_time].append(
|
|
831
871
|
f"Completed Job{job.job_id} (TAT = {job.turnaround_time})"
|
|
832
872
|
)
|
|
833
873
|
continue
|
|
@@ -845,7 +885,7 @@ class MLFQQuestion(ProcessQuestion, TableQuestionMixin, BodyTemplatesMixin):
|
|
|
845
885
|
job.time_in_queue = 0
|
|
846
886
|
job.remaining_quantum = None
|
|
847
887
|
queues[q_idx - 1].append(job)
|
|
848
|
-
|
|
888
|
+
timeline[curr_time].append(
|
|
849
889
|
f"Demoted Job{job.job_id} to Q{q_idx - 1}"
|
|
850
890
|
)
|
|
851
891
|
continue
|
|
@@ -857,70 +897,18 @@ class MLFQQuestion(ProcessQuestion, TableQuestionMixin, BodyTemplatesMixin):
|
|
|
857
897
|
job.remaining_quantum = None
|
|
858
898
|
queues[q_idx].append(job)
|
|
859
899
|
|
|
860
|
-
|
|
861
|
-
super().refresh(*args, **kwargs)
|
|
862
|
-
|
|
863
|
-
self.num_jobs = kwargs.get("num_jobs", self.num_jobs)
|
|
864
|
-
self.num_queues = kwargs.get("num_queues", self.num_queues)
|
|
865
|
-
self.min_job_length = kwargs.get("min_job_length", self.min_job_length)
|
|
866
|
-
self.max_job_length = kwargs.get("max_job_length", self.max_job_length)
|
|
867
|
-
self.boost_interval = kwargs.get("boost_interval", self.boost_interval)
|
|
868
|
-
self.boost_interval_range = kwargs.get(
|
|
869
|
-
"boost_interval_range",
|
|
870
|
-
self.boost_interval_range
|
|
871
|
-
)
|
|
872
|
-
if self.boost_interval is None and self.boost_interval_range:
|
|
873
|
-
low, high = self.boost_interval_range
|
|
874
|
-
self.boost_interval = self.rng.randint(low, high)
|
|
875
|
-
|
|
876
|
-
jobs = self.get_workload(self.num_jobs)
|
|
877
|
-
|
|
878
|
-
queue_quantums = [2**(self.num_queues - 1 - i) for i in range(self.num_queues)]
|
|
879
|
-
queue_quantums = self._normalize_queue_params(queue_quantums, self.num_queues)
|
|
880
|
-
queue_quantums = [int(q) for q in queue_quantums]
|
|
881
|
-
|
|
882
|
-
queue_allotments = [None] + [
|
|
883
|
-
queue_quantums[i] * 2 for i in range(1, self.num_queues)
|
|
884
|
-
]
|
|
885
|
-
queue_allotments = self._normalize_queue_params(queue_allotments, self.num_queues)
|
|
886
|
-
queue_allotments = [
|
|
887
|
-
int(allotment) if allotment is not None else None
|
|
888
|
-
for allotment in queue_allotments
|
|
889
|
-
]
|
|
890
|
-
queue_allotments[0] = None
|
|
891
|
-
|
|
892
|
-
self.queue_quantums = queue_quantums
|
|
893
|
-
self.queue_allotments = queue_allotments
|
|
894
|
-
|
|
895
|
-
self.run_simulation(jobs, queue_quantums, queue_allotments, self.boost_interval)
|
|
900
|
+
return timeline, boost_times, jobs
|
|
896
901
|
|
|
897
|
-
|
|
898
|
-
|
|
899
|
-
"arrival_time": job.arrival_time,
|
|
900
|
-
"duration": job.duration,
|
|
901
|
-
"Response": job.response_time,
|
|
902
|
-
"TAT": job.turnaround_time,
|
|
903
|
-
"run_intervals": list(job.run_intervals),
|
|
904
|
-
}
|
|
905
|
-
for job in jobs
|
|
906
|
-
}
|
|
907
|
-
|
|
908
|
-
for job_id in sorted(self.job_stats.keys()):
|
|
909
|
-
self.answers.update({
|
|
910
|
-
f"answer__turnaround_time_job{job_id}": ca.AnswerTypes.Float(self.job_stats[job_id]["TAT"])
|
|
911
|
-
})
|
|
912
|
-
|
|
913
|
-
return self.is_interesting()
|
|
914
|
-
|
|
915
|
-
def _get_body(self, *args, **kwargs):
|
|
902
|
+
@classmethod
|
|
903
|
+
def _build_body(cls, context):
|
|
916
904
|
answers: List[ca.Answer] = []
|
|
917
905
|
|
|
918
906
|
queue_rows = []
|
|
919
|
-
for i in reversed(range(
|
|
920
|
-
allotment =
|
|
907
|
+
for i in reversed(range(context["num_queues"])):
|
|
908
|
+
allotment = context["queue_allotments"][i]
|
|
921
909
|
queue_rows.append([
|
|
922
910
|
f"Q{i}",
|
|
923
|
-
|
|
911
|
+
context["queue_quantums"][i],
|
|
924
912
|
"infinite" if allotment is None else allotment
|
|
925
913
|
])
|
|
926
914
|
queue_table = ca.Table(
|
|
@@ -929,16 +917,17 @@ class MLFQQuestion(ProcessQuestion, TableQuestionMixin, BodyTemplatesMixin):
|
|
|
929
917
|
)
|
|
930
918
|
|
|
931
919
|
table_rows = []
|
|
932
|
-
for job_id in sorted(
|
|
920
|
+
for job_id in sorted(context["job_stats"].keys()):
|
|
921
|
+
tat_answer = ca.AnswerTypes.Float(context["job_stats"][job_id]["TAT"])
|
|
922
|
+
answers.append(tat_answer)
|
|
933
923
|
table_rows.append({
|
|
934
924
|
"Job ID": f"Job{job_id}",
|
|
935
|
-
"Arrival":
|
|
936
|
-
"Duration":
|
|
937
|
-
"TAT":
|
|
925
|
+
"Arrival": context["job_stats"][job_id]["arrival_time"],
|
|
926
|
+
"Duration": context["job_stats"][job_id]["duration"],
|
|
927
|
+
"TAT": tat_answer,
|
|
938
928
|
})
|
|
939
|
-
answers.append(self.answers[f"answer__turnaround_time_job{job_id}"])
|
|
940
929
|
|
|
941
|
-
scheduling_table =
|
|
930
|
+
scheduling_table = cls.create_answer_table(
|
|
942
931
|
headers=["Job ID", "Arrival", "Duration", "TAT"],
|
|
943
932
|
data_rows=table_rows,
|
|
944
933
|
answer_columns=["TAT"]
|
|
@@ -946,7 +935,7 @@ class MLFQQuestion(ProcessQuestion, TableQuestionMixin, BodyTemplatesMixin):
|
|
|
946
935
|
|
|
947
936
|
intro_text = (
|
|
948
937
|
"Assume an MLFQ scheduler with round-robin inside each queue. "
|
|
949
|
-
f"New jobs enter the highest-priority queue (Q{
|
|
938
|
+
f"New jobs enter the highest-priority queue (Q{context['num_queues'] - 1}) "
|
|
950
939
|
"and a job is demoted after using its total allotment for that queue. "
|
|
951
940
|
"If a higher-priority job arrives, it preempts any lower-priority job."
|
|
952
941
|
)
|
|
@@ -959,21 +948,18 @@ class MLFQQuestion(ProcessQuestion, TableQuestionMixin, BodyTemplatesMixin):
|
|
|
959
948
|
body = ca.Section()
|
|
960
949
|
body.add_element(ca.Paragraph([intro_text]))
|
|
961
950
|
body.add_element(queue_table)
|
|
962
|
-
if
|
|
951
|
+
if context["boost_interval"] is not None:
|
|
963
952
|
body.add_element(ca.Paragraph([
|
|
964
|
-
f"Every {
|
|
965
|
-
f"Q{
|
|
953
|
+
f"Every {context['boost_interval']} time units, all jobs are boosted to "
|
|
954
|
+
f"Q{context['num_queues'] - 1}. After a boost, scheduling restarts with the "
|
|
966
955
|
"lowest job number in that queue."
|
|
967
956
|
]))
|
|
968
957
|
body.add_element(ca.Paragraph([instructions]))
|
|
969
958
|
body.add_element(scheduling_table)
|
|
970
|
-
return body, answers
|
|
971
|
-
|
|
972
|
-
def get_body(self, *args, **kwargs) -> ca.Section:
|
|
973
|
-
body, _ = self._get_body(*args, **kwargs)
|
|
974
959
|
return body
|
|
975
960
|
|
|
976
|
-
|
|
961
|
+
@classmethod
|
|
962
|
+
def _build_explanation(cls, context):
|
|
977
963
|
explanation = ca.Section()
|
|
978
964
|
|
|
979
965
|
explanation.add_element(
|
|
@@ -988,10 +974,10 @@ class MLFQQuestion(ProcessQuestion, TableQuestionMixin, BodyTemplatesMixin):
|
|
|
988
974
|
"For each job:"
|
|
989
975
|
] + [
|
|
990
976
|
f"Job{job_id}_TAT = "
|
|
991
|
-
f"{
|
|
992
|
-
f"- {
|
|
993
|
-
f"= {
|
|
994
|
-
for job_id in sorted(
|
|
977
|
+
f"{context['job_stats'][job_id]['arrival_time'] + context['job_stats'][job_id]['TAT']:0.{cls.ROUNDING_DIGITS}f} "
|
|
978
|
+
f"- {context['job_stats'][job_id]['arrival_time']:0.{cls.ROUNDING_DIGITS}f} "
|
|
979
|
+
f"= {context['job_stats'][job_id]['TAT']:0.{cls.ROUNDING_DIGITS}f}"
|
|
980
|
+
for job_id in sorted(context['job_stats'].keys())
|
|
995
981
|
])
|
|
996
982
|
)
|
|
997
983
|
|
|
@@ -999,10 +985,10 @@ class MLFQQuestion(ProcessQuestion, TableQuestionMixin, BodyTemplatesMixin):
|
|
|
999
985
|
ca.Table(
|
|
1000
986
|
headers=["Time", "Events"],
|
|
1001
987
|
data=[
|
|
1002
|
-
[f"{t:0.{
|
|
1003
|
-
for t in sorted(
|
|
988
|
+
[f"{t:0.{cls.ROUNDING_DIGITS}f}s"] + ['\n'.join(events)]
|
|
989
|
+
for t in sorted(context['timeline'].keys())
|
|
1004
990
|
if (events := [
|
|
1005
|
-
event for event in
|
|
991
|
+
event for event in context['timeline'][t]
|
|
1006
992
|
if (
|
|
1007
993
|
"arrived" in event
|
|
1008
994
|
or "Demoted" in event
|
|
@@ -1018,41 +1004,39 @@ class MLFQQuestion(ProcessQuestion, TableQuestionMixin, BodyTemplatesMixin):
|
|
|
1018
1004
|
|
|
1019
1005
|
explanation.add_element(
|
|
1020
1006
|
ca.Picture(
|
|
1021
|
-
img_data=
|
|
1007
|
+
img_data=cls.make_image(context),
|
|
1022
1008
|
caption="MLFQ Scheduling Overview"
|
|
1023
1009
|
)
|
|
1024
1010
|
)
|
|
1025
1011
|
|
|
1026
|
-
return explanation, []
|
|
1027
|
-
|
|
1028
|
-
def get_explanation(self, **kwargs) -> ca.Section:
|
|
1029
|
-
explanation, _ = self._get_explanation(**kwargs)
|
|
1030
1012
|
return explanation
|
|
1031
1013
|
|
|
1032
|
-
|
|
1033
|
-
|
|
1014
|
+
@classmethod
|
|
1015
|
+
def make_image(cls, context):
|
|
1016
|
+
fig, ax = plt.subplots(1, 1, figsize=cls.IMAGE_FIGSIZE, dpi=cls.IMAGE_DPI)
|
|
1034
1017
|
|
|
1035
|
-
|
|
1018
|
+
job_stats = context["job_stats"]
|
|
1019
|
+
num_jobs = len(job_stats)
|
|
1036
1020
|
if num_jobs == 0:
|
|
1037
1021
|
buffer = io.BytesIO()
|
|
1038
1022
|
plt.tight_layout()
|
|
1039
|
-
plt.savefig(buffer, format='png', dpi=
|
|
1023
|
+
plt.savefig(buffer, format='png', dpi=cls.IMAGE_DPI, bbox_inches='tight')
|
|
1040
1024
|
plt.close(fig)
|
|
1041
1025
|
buffer.seek(0)
|
|
1042
1026
|
return buffer
|
|
1043
1027
|
|
|
1044
1028
|
job_colors = {
|
|
1045
1029
|
job_id: str(0.15 + 0.7 * (idx / max(1, num_jobs - 1)))
|
|
1046
|
-
for idx, job_id in enumerate(sorted(
|
|
1030
|
+
for idx, job_id in enumerate(sorted(job_stats.keys()))
|
|
1047
1031
|
}
|
|
1048
1032
|
job_lane = {
|
|
1049
1033
|
job_id: idx
|
|
1050
|
-
for idx, job_id in enumerate(sorted(
|
|
1034
|
+
for idx, job_id in enumerate(sorted(job_stats.keys(), reverse=True))
|
|
1051
1035
|
}
|
|
1052
1036
|
lanes_per_queue = num_jobs
|
|
1053
1037
|
|
|
1054
|
-
for job_id in sorted(
|
|
1055
|
-
for start, stop, queue_level in
|
|
1038
|
+
for job_id in sorted(job_stats.keys()):
|
|
1039
|
+
for start, stop, queue_level in job_stats[job_id]["run_intervals"]:
|
|
1056
1040
|
y_loc = queue_level * lanes_per_queue + job_lane[job_id]
|
|
1057
1041
|
ax.barh(
|
|
1058
1042
|
y=[y_loc],
|
|
@@ -1063,7 +1047,7 @@ class MLFQQuestion(ProcessQuestion, TableQuestionMixin, BodyTemplatesMixin):
|
|
|
1063
1047
|
color=job_colors[job_id]
|
|
1064
1048
|
)
|
|
1065
1049
|
|
|
1066
|
-
for queue_idx in range(
|
|
1050
|
+
for queue_idx in range(context["num_queues"]):
|
|
1067
1051
|
if queue_idx % 2 == 1:
|
|
1068
1052
|
ax.axhspan(
|
|
1069
1053
|
queue_idx * lanes_per_queue - 0.5,
|
|
@@ -1074,8 +1058,8 @@ class MLFQQuestion(ProcessQuestion, TableQuestionMixin, BodyTemplatesMixin):
|
|
|
1074
1058
|
)
|
|
1075
1059
|
|
|
1076
1060
|
arrival_times = sorted({
|
|
1077
|
-
|
|
1078
|
-
for job_id in
|
|
1061
|
+
job_stats[job_id]["arrival_time"]
|
|
1062
|
+
for job_id in job_stats.keys()
|
|
1079
1063
|
})
|
|
1080
1064
|
bottom_label_y = -0.1
|
|
1081
1065
|
for arrival_time in arrival_times:
|
|
@@ -1083,7 +1067,7 @@ class MLFQQuestion(ProcessQuestion, TableQuestionMixin, BodyTemplatesMixin):
|
|
|
1083
1067
|
ax.text(
|
|
1084
1068
|
arrival_time + 0.2,
|
|
1085
1069
|
bottom_label_y,
|
|
1086
|
-
f"{arrival_time:0.{
|
|
1070
|
+
f"{arrival_time:0.{cls.ROUNDING_DIGITS}f}s",
|
|
1087
1071
|
color='0.2',
|
|
1088
1072
|
rotation=90,
|
|
1089
1073
|
ha='left',
|
|
@@ -1091,37 +1075,37 @@ class MLFQQuestion(ProcessQuestion, TableQuestionMixin, BodyTemplatesMixin):
|
|
|
1091
1075
|
)
|
|
1092
1076
|
|
|
1093
1077
|
completion_times = sorted({
|
|
1094
|
-
|
|
1095
|
-
for job_id in
|
|
1078
|
+
job_stats[job_id]["arrival_time"] + job_stats[job_id]["TAT"]
|
|
1079
|
+
for job_id in job_stats.keys()
|
|
1096
1080
|
})
|
|
1097
1081
|
for completion_time in completion_times:
|
|
1098
1082
|
ax.axvline(completion_time, color='red', linewidth=1.5, zorder=0)
|
|
1099
1083
|
ax.text(
|
|
1100
1084
|
completion_time - 0.6,
|
|
1101
|
-
|
|
1102
|
-
f"{completion_time:0.{
|
|
1085
|
+
context["num_queues"] * lanes_per_queue - 0.5,
|
|
1086
|
+
f"{completion_time:0.{cls.ROUNDING_DIGITS}f}s",
|
|
1103
1087
|
color='red',
|
|
1104
1088
|
rotation=90,
|
|
1105
1089
|
ha='center',
|
|
1106
1090
|
va='top'
|
|
1107
1091
|
)
|
|
1108
1092
|
|
|
1109
|
-
for boost_time in sorted(set(
|
|
1093
|
+
for boost_time in sorted(set(context["boost_times"])):
|
|
1110
1094
|
ax.axvline(boost_time, color='tab:blue', linestyle='--', linewidth=1.2, zorder=0)
|
|
1111
1095
|
|
|
1112
1096
|
tick_positions = [
|
|
1113
1097
|
q * lanes_per_queue + (lanes_per_queue - 1) / 2
|
|
1114
|
-
for q in range(
|
|
1098
|
+
for q in range(context["num_queues"])
|
|
1115
1099
|
]
|
|
1116
1100
|
ax.set_yticks(tick_positions)
|
|
1117
|
-
ax.set_yticklabels([f"Q{i}" for i in range(
|
|
1118
|
-
ax.set_ylim(-0.5,
|
|
1101
|
+
ax.set_yticklabels([f"Q{i}" for i in range(context["num_queues"])])
|
|
1102
|
+
ax.set_ylim(-0.5, context["num_queues"] * lanes_per_queue - 0.5)
|
|
1119
1103
|
ax.set_xlim(xmin=0)
|
|
1120
1104
|
ax.set_xlabel("Time")
|
|
1121
1105
|
|
|
1122
1106
|
buffer = io.BytesIO()
|
|
1123
1107
|
plt.tight_layout()
|
|
1124
|
-
plt.savefig(buffer, format='png', dpi=
|
|
1108
|
+
plt.savefig(buffer, format='png', dpi=cls.IMAGE_DPI, bbox_inches='tight')
|
|
1125
1109
|
plt.close(fig)
|
|
1126
1110
|
buffer.seek(0)
|
|
1127
1111
|
return buffer
|