QuizGenerator 0.7.0__py3-none-any.whl → 0.8.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- QuizGenerator/contentast.py +6 -6
- QuizGenerator/generate.py +2 -1
- QuizGenerator/mixins.py +14 -100
- QuizGenerator/premade_questions/basic.py +24 -29
- QuizGenerator/premade_questions/cst334/languages.py +100 -99
- QuizGenerator/premade_questions/cst334/math_questions.py +112 -122
- QuizGenerator/premade_questions/cst334/memory_questions.py +621 -621
- QuizGenerator/premade_questions/cst334/persistence_questions.py +137 -163
- QuizGenerator/premade_questions/cst334/process.py +312 -322
- QuizGenerator/premade_questions/cst463/gradient_descent/gradient_calculation.py +34 -35
- QuizGenerator/premade_questions/cst463/gradient_descent/gradient_descent_questions.py +41 -36
- QuizGenerator/premade_questions/cst463/gradient_descent/loss_calculations.py +48 -41
- QuizGenerator/premade_questions/cst463/math_and_data/matrix_questions.py +285 -520
- QuizGenerator/premade_questions/cst463/math_and_data/vector_questions.py +149 -126
- QuizGenerator/premade_questions/cst463/models/attention.py +44 -50
- QuizGenerator/premade_questions/cst463/models/cnns.py +43 -47
- QuizGenerator/premade_questions/cst463/models/matrices.py +61 -11
- QuizGenerator/premade_questions/cst463/models/rnns.py +48 -50
- QuizGenerator/premade_questions/cst463/models/text.py +65 -67
- QuizGenerator/premade_questions/cst463/models/weight_counting.py +47 -46
- QuizGenerator/premade_questions/cst463/neural-network-basics/neural_network_questions.py +100 -156
- QuizGenerator/premade_questions/cst463/tensorflow-intro/tensorflow_questions.py +93 -141
- QuizGenerator/question.py +273 -202
- QuizGenerator/quiz.py +8 -5
- QuizGenerator/regenerate.py +128 -19
- {quizgenerator-0.7.0.dist-info → quizgenerator-0.8.0.dist-info}/METADATA +30 -2
- {quizgenerator-0.7.0.dist-info → quizgenerator-0.8.0.dist-info}/RECORD +30 -30
- {quizgenerator-0.7.0.dist-info → quizgenerator-0.8.0.dist-info}/WHEEL +0 -0
- {quizgenerator-0.7.0.dist-info → quizgenerator-0.8.0.dist-info}/entry_points.txt +0 -0
- {quizgenerator-0.7.0.dist-info → quizgenerator-0.8.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -8,6 +8,7 @@ import enum
|
|
|
8
8
|
import io
|
|
9
9
|
import logging
|
|
10
10
|
import os
|
|
11
|
+
import random
|
|
11
12
|
import uuid
|
|
12
13
|
from typing import List
|
|
13
14
|
|
|
@@ -119,7 +120,8 @@ class SchedulingQuestion(ProcessQuestion, RegenerableChoiceMixin, TableQuestionM
|
|
|
119
120
|
def has_started(self) -> bool:
|
|
120
121
|
return self.response_time is None
|
|
121
122
|
|
|
122
|
-
|
|
123
|
+
@classmethod
|
|
124
|
+
def get_workload(cls, rng, num_jobs, *args, **kwargs) -> List[SchedulingQuestion.Job]:
|
|
123
125
|
"""Makes a guaranteed interesting workload by following rules
|
|
124
126
|
1. First job to arrive is the longest
|
|
125
127
|
2. At least 2 other jobs arrive in its runtime
|
|
@@ -132,9 +134,9 @@ class SchedulingQuestion(ProcessQuestion, RegenerableChoiceMixin, TableQuestionM
|
|
|
132
134
|
|
|
133
135
|
# First create a job that is relatively long-running and arrives first.
|
|
134
136
|
# Set arrival time to something fairly low
|
|
135
|
-
job0_arrival =
|
|
137
|
+
job0_arrival = rng.randint(0, int(0.25 * cls.MAX_ARRIVAL_TIME))
|
|
136
138
|
# Set duration to something fairly long
|
|
137
|
-
job0_duration =
|
|
139
|
+
job0_duration = rng.randint(int(cls.MAX_JOB_DURATION * 0.75), cls.MAX_JOB_DURATION)
|
|
138
140
|
|
|
139
141
|
# Next, let's create a job that will test whether we are preemptive or not.
|
|
140
142
|
# The core characteristics of this job are that it:
|
|
@@ -146,24 +148,24 @@ class SchedulingQuestion(ProcessQuestion, RegenerableChoiceMixin, TableQuestionM
|
|
|
146
148
|
# duration:
|
|
147
149
|
# lower: self.MIN_JOB_DURATION
|
|
148
150
|
# upper:
|
|
149
|
-
job1_arrival =
|
|
151
|
+
job1_arrival = rng.randint(
|
|
150
152
|
job0_arrival + 1, # Make sure we start _after_ job0
|
|
151
|
-
job0_arrival + job0_duration -
|
|
153
|
+
job0_arrival + job0_duration - cls.MIN_JOB_DURATION - 2 # Make sure we always have enough time for job1 & job2
|
|
152
154
|
)
|
|
153
|
-
job1_duration =
|
|
154
|
-
|
|
155
|
+
job1_duration = rng.randint(
|
|
156
|
+
cls.MIN_JOB_DURATION + 1, # default minimum and leave room for job2
|
|
155
157
|
job0_arrival + job0_duration - job1_arrival - 1 # Make sure our job ends _at least_ before job0 would end
|
|
156
158
|
)
|
|
157
159
|
|
|
158
160
|
# Finally, we want to differentiate between STCF and SJF
|
|
159
161
|
# So, if we don't preempt job0 we want to make it be a tough choice between the next 2 jobs when it completes.
|
|
160
162
|
# This means we want a job that arrives _before_ job0 finishes, after job1 enters, and is shorter than job1
|
|
161
|
-
job2_arrival =
|
|
163
|
+
job2_arrival = rng.randint(
|
|
162
164
|
job1_arrival + 1, # Make sure we arrive after job1 so we subvert FIFO
|
|
163
165
|
job0_arrival + job0_duration - 1 # ...but before job0 would exit the system
|
|
164
166
|
)
|
|
165
|
-
job2_duration =
|
|
166
|
-
|
|
167
|
+
job2_duration = rng.randint(
|
|
168
|
+
cls.MIN_JOB_DURATION, # Make sure it's at least the minimum.
|
|
167
169
|
job1_duration - 1, # Make sure it's shorter than job1
|
|
168
170
|
)
|
|
169
171
|
|
|
@@ -177,12 +179,12 @@ class SchedulingQuestion(ProcessQuestion, RegenerableChoiceMixin, TableQuestionM
|
|
|
177
179
|
# Add more jobs as necessary, if more than 3 are requested
|
|
178
180
|
if num_jobs > 3:
|
|
179
181
|
job_tuples.extend([
|
|
180
|
-
(
|
|
182
|
+
(rng.randint(0, cls.MAX_ARRIVAL_TIME), rng.randint(cls.MIN_JOB_DURATION, cls.MAX_JOB_DURATION))
|
|
181
183
|
for _ in range(num_jobs - 3)
|
|
182
184
|
])
|
|
183
185
|
|
|
184
186
|
# Shuffle jobs so they are in a random order
|
|
185
|
-
|
|
187
|
+
rng.shuffle(job_tuples)
|
|
186
188
|
|
|
187
189
|
# Make workload from job tuples
|
|
188
190
|
workload = []
|
|
@@ -197,30 +199,38 @@ class SchedulingQuestion(ProcessQuestion, RegenerableChoiceMixin, TableQuestionM
|
|
|
197
199
|
|
|
198
200
|
return workload
|
|
199
201
|
|
|
200
|
-
|
|
202
|
+
@classmethod
|
|
203
|
+
def run_simulation(
|
|
204
|
+
cls,
|
|
205
|
+
jobs_to_run: List[SchedulingQuestion.Job],
|
|
206
|
+
selector,
|
|
207
|
+
preemptable,
|
|
208
|
+
time_quantum=None,
|
|
209
|
+
scheduler_algorithm=None
|
|
210
|
+
):
|
|
201
211
|
curr_time = 0
|
|
202
212
|
selected_job: SchedulingQuestion.Job | None = None
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
213
|
+
|
|
214
|
+
timeline = collections.defaultdict(list)
|
|
215
|
+
timeline[curr_time].append("Simulation Start")
|
|
206
216
|
for job in jobs_to_run:
|
|
207
|
-
|
|
208
|
-
|
|
217
|
+
timeline[job.arrival_time].append(f"Job{job.job_id} arrived")
|
|
218
|
+
|
|
209
219
|
while len(jobs_to_run) > 0:
|
|
210
220
|
possible_time_slices = []
|
|
211
|
-
|
|
221
|
+
|
|
212
222
|
# Get the jobs currently in the system
|
|
213
223
|
available_jobs = list(filter(
|
|
214
224
|
(lambda j: j.arrival_time <= curr_time),
|
|
215
225
|
jobs_to_run
|
|
216
226
|
))
|
|
217
|
-
|
|
227
|
+
|
|
218
228
|
# Get the jobs that will enter the system in the future
|
|
219
229
|
future_jobs : List[SchedulingQuestion.Job] = list(filter(
|
|
220
230
|
(lambda j: j.arrival_time > curr_time),
|
|
221
231
|
jobs_to_run
|
|
222
232
|
))
|
|
223
|
-
|
|
233
|
+
|
|
224
234
|
# Check whether there are jobs in the system already
|
|
225
235
|
if len(available_jobs) > 0:
|
|
226
236
|
# Use the selector to identify what job we are going to run
|
|
@@ -229,13 +239,16 @@ class SchedulingQuestion(ProcessQuestion, RegenerableChoiceMixin, TableQuestionM
|
|
|
229
239
|
key=(lambda j: selector(j, curr_time))
|
|
230
240
|
)
|
|
231
241
|
if selected_job.has_started():
|
|
232
|
-
|
|
242
|
+
timeline[curr_time].append(
|
|
243
|
+
f"Starting Job{selected_job.job_id} "
|
|
244
|
+
f"(resp = {curr_time - selected_job.arrival_time:0.{cls.ROUNDING_DIGITS}f}s)"
|
|
245
|
+
)
|
|
233
246
|
# We start the job that we selected
|
|
234
|
-
selected_job.run(curr_time, (
|
|
235
|
-
|
|
247
|
+
selected_job.run(curr_time, (scheduler_algorithm == cls.Kind.RoundRobin))
|
|
248
|
+
|
|
236
249
|
# We could run to the end of the job
|
|
237
250
|
possible_time_slices.append(selected_job.time_remaining(curr_time))
|
|
238
|
-
|
|
251
|
+
|
|
239
252
|
# Check if we are preemptable or if we haven't found any time slices yet
|
|
240
253
|
if preemptable or len(possible_time_slices) == 0:
|
|
241
254
|
# Then when a job enters we could stop the current task
|
|
@@ -244,32 +257,37 @@ class SchedulingQuestion(ProcessQuestion, RegenerableChoiceMixin, TableQuestionM
|
|
|
244
257
|
future_jobs,
|
|
245
258
|
key=(lambda j: j.arrival_time)
|
|
246
259
|
)
|
|
247
|
-
possible_time_slices.append(
|
|
248
|
-
|
|
260
|
+
possible_time_slices.append((next_arrival.arrival_time - curr_time))
|
|
261
|
+
|
|
249
262
|
if time_quantum is not None:
|
|
250
263
|
possible_time_slices.append(time_quantum)
|
|
251
|
-
|
|
252
|
-
|
|
264
|
+
|
|
253
265
|
## Now we pick the minimum
|
|
254
266
|
try:
|
|
255
267
|
next_time_slice = min(possible_time_slices)
|
|
256
268
|
except ValueError:
|
|
257
269
|
log.error("No jobs available to schedule")
|
|
258
270
|
break
|
|
259
|
-
if
|
|
271
|
+
if scheduler_algorithm != SchedulingQuestion.Kind.RoundRobin:
|
|
260
272
|
if selected_job is not None:
|
|
261
|
-
|
|
273
|
+
timeline[curr_time].append(
|
|
274
|
+
f"Running Job{selected_job.job_id} "
|
|
275
|
+
f"for {next_time_slice:0.{cls.ROUNDING_DIGITS}f}s"
|
|
276
|
+
)
|
|
262
277
|
else:
|
|
263
|
-
|
|
278
|
+
timeline[curr_time].append(f"(No job running)")
|
|
264
279
|
curr_time += next_time_slice
|
|
265
|
-
|
|
280
|
+
|
|
266
281
|
# We stop the job we selected, and potentially mark it as complete
|
|
267
282
|
if selected_job is not None:
|
|
268
|
-
selected_job.stop(curr_time, (
|
|
283
|
+
selected_job.stop(curr_time, (scheduler_algorithm == cls.Kind.RoundRobin))
|
|
269
284
|
if selected_job.is_complete(curr_time):
|
|
270
|
-
|
|
285
|
+
timeline[curr_time].append(
|
|
286
|
+
f"Completed Job{selected_job.job_id} "
|
|
287
|
+
f"(TAT = {selected_job.turnaround_time:0.{cls.ROUNDING_DIGITS}f}s)"
|
|
288
|
+
)
|
|
271
289
|
selected_job = None
|
|
272
|
-
|
|
290
|
+
|
|
273
291
|
# Filter out completed jobs
|
|
274
292
|
jobs_to_run : List[SchedulingQuestion.Job] = list(filter(
|
|
275
293
|
(lambda j: not j.is_complete(curr_time)),
|
|
@@ -277,103 +295,93 @@ class SchedulingQuestion(ProcessQuestion, RegenerableChoiceMixin, TableQuestionM
|
|
|
277
295
|
))
|
|
278
296
|
if len(jobs_to_run) == 0:
|
|
279
297
|
break
|
|
280
|
-
|
|
281
|
-
def __init__(self, num_jobs=3, scheduler_kind=None, *args, **kwargs):
|
|
282
|
-
# Preserve question-specific params for QR code config BEFORE calling super().__init__()
|
|
283
|
-
kwargs['num_jobs'] = num_jobs
|
|
284
|
-
|
|
285
|
-
# Register the regenerable choice using the mixin
|
|
286
|
-
self.register_choice('scheduler_kind', SchedulingQuestion.Kind, scheduler_kind, kwargs)
|
|
287
298
|
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
def
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
299
|
+
return timeline
|
|
300
|
+
|
|
301
|
+
@classmethod
|
|
302
|
+
def _build_context(cls, *, rng_seed=None, **kwargs):
|
|
303
|
+
rng = random.Random(rng_seed)
|
|
304
|
+
num_jobs = kwargs.get("num_jobs", 3)
|
|
305
|
+
|
|
306
|
+
scheduler_kind = kwargs.get("scheduler_kind")
|
|
307
|
+
if scheduler_kind is None:
|
|
308
|
+
scheduler_algorithm = rng.choice(list(cls.Kind))
|
|
309
|
+
config_params = {"scheduler_kind": scheduler_algorithm.name}
|
|
310
|
+
else:
|
|
311
|
+
if isinstance(scheduler_kind, cls.Kind):
|
|
312
|
+
scheduler_algorithm = scheduler_kind
|
|
313
|
+
else:
|
|
314
|
+
scheduler_algorithm = cls.get_kind_from_string(str(scheduler_kind))
|
|
315
|
+
config_params = {"scheduler_kind": scheduler_algorithm.name}
|
|
295
316
|
|
|
296
|
-
|
|
297
|
-
# Note: We ignore the parent's return value since we need to generate the workload first
|
|
298
|
-
super().refresh(*args, **kwargs)
|
|
317
|
+
jobs = cls.get_workload(rng, num_jobs)
|
|
299
318
|
|
|
300
|
-
|
|
301
|
-
self.scheduler_algorithm = self.get_choice('scheduler_kind', SchedulingQuestion.Kind)
|
|
302
|
-
|
|
303
|
-
# Get workload jobs
|
|
304
|
-
jobs = self.get_workload(self.num_jobs)
|
|
305
|
-
|
|
306
|
-
# Run simulations different depending on which algorithm we chose
|
|
307
|
-
match self.scheduler_algorithm:
|
|
319
|
+
match scheduler_algorithm:
|
|
308
320
|
case SchedulingQuestion.Kind.ShortestDuration:
|
|
309
|
-
|
|
321
|
+
timeline = cls.run_simulation(
|
|
310
322
|
jobs_to_run=jobs,
|
|
311
323
|
selector=(lambda j, curr_time: (j.duration, j.job_id)),
|
|
312
324
|
preemptable=False,
|
|
313
|
-
time_quantum=None
|
|
325
|
+
time_quantum=None,
|
|
326
|
+
scheduler_algorithm=scheduler_algorithm
|
|
314
327
|
)
|
|
315
328
|
case SchedulingQuestion.Kind.ShortestTimeRemaining:
|
|
316
|
-
|
|
329
|
+
timeline = cls.run_simulation(
|
|
317
330
|
jobs_to_run=jobs,
|
|
318
331
|
selector=(lambda j, curr_time: (j.time_remaining(curr_time), j.job_id)),
|
|
319
332
|
preemptable=True,
|
|
320
|
-
time_quantum=None
|
|
333
|
+
time_quantum=None,
|
|
334
|
+
scheduler_algorithm=scheduler_algorithm
|
|
321
335
|
)
|
|
322
336
|
case SchedulingQuestion.Kind.RoundRobin:
|
|
323
|
-
|
|
337
|
+
timeline = cls.run_simulation(
|
|
324
338
|
jobs_to_run=jobs,
|
|
325
339
|
selector=(lambda j, curr_time: (j.last_run, j.job_id)),
|
|
326
340
|
preemptable=True,
|
|
327
|
-
time_quantum=1e-05
|
|
341
|
+
time_quantum=1e-05,
|
|
342
|
+
scheduler_algorithm=scheduler_algorithm
|
|
328
343
|
)
|
|
329
344
|
case _:
|
|
330
|
-
|
|
345
|
+
timeline = cls.run_simulation(
|
|
331
346
|
jobs_to_run=jobs,
|
|
332
347
|
selector=(lambda j, curr_time: (j.arrival_time, j.job_id)),
|
|
333
348
|
preemptable=False,
|
|
334
|
-
time_quantum=None
|
|
349
|
+
time_quantum=None,
|
|
350
|
+
scheduler_algorithm=scheduler_algorithm
|
|
335
351
|
)
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
self.job_stats = {
|
|
352
|
+
|
|
353
|
+
job_stats = {
|
|
339
354
|
i : {
|
|
340
|
-
"arrival_time" : job.arrival_time,
|
|
341
|
-
"duration" : job.duration,
|
|
342
|
-
"Response" : job.response_time,
|
|
343
|
-
"TAT" : job.turnaround_time,
|
|
355
|
+
"arrival_time" : job.arrival_time,
|
|
356
|
+
"duration" : job.duration,
|
|
357
|
+
"Response" : job.response_time,
|
|
358
|
+
"TAT" : job.turnaround_time,
|
|
344
359
|
"state_changes" : [job.arrival_time] + job.state_change_times + [job.arrival_time + job.turnaround_time],
|
|
345
360
|
}
|
|
346
361
|
for (i, job) in enumerate(jobs)
|
|
347
362
|
}
|
|
348
|
-
|
|
363
|
+
overall_stats = {
|
|
349
364
|
"Response" : sum([job.response_time for job in jobs]) / len(jobs),
|
|
350
365
|
"TAT" : sum([job.turnaround_time for job in jobs]) / len(jobs)
|
|
351
366
|
}
|
|
352
|
-
|
|
353
|
-
# todo: make this less convoluted
|
|
354
|
-
self.average_response = self.overall_stats["Response"]
|
|
355
|
-
self.average_tat = self.overall_stats["TAT"]
|
|
356
|
-
|
|
357
|
-
for job_id in sorted(self.job_stats.keys()):
|
|
358
|
-
self.answers.update({
|
|
359
|
-
f"answer__response_time_job{job_id}": ca.AnswerTypes.Float(self.job_stats[job_id]["Response"]),
|
|
360
|
-
f"answer__turnaround_time_job{job_id}": ca.AnswerTypes.Float(self.job_stats[job_id]["TAT"]),
|
|
361
|
-
})
|
|
362
|
-
self.answers.update({
|
|
363
|
-
"answer__average_response_time": ca.AnswerTypes.Float(
|
|
364
|
-
sum([job.response_time for job in jobs]) / len(jobs),
|
|
365
|
-
label="Overall average response time"
|
|
366
|
-
),
|
|
367
|
-
"answer__average_turnaround_time": ca.AnswerTypes.Float(
|
|
368
|
-
sum([job.turnaround_time for job in jobs]) / len(jobs),
|
|
369
|
-
label="Overall average TAT"
|
|
370
|
-
)
|
|
371
|
-
})
|
|
372
367
|
|
|
373
|
-
|
|
374
|
-
|
|
368
|
+
return {
|
|
369
|
+
"num_jobs": num_jobs,
|
|
370
|
+
"scheduler_algorithm": scheduler_algorithm,
|
|
371
|
+
"job_stats": job_stats,
|
|
372
|
+
"overall_stats": overall_stats,
|
|
373
|
+
"timeline": timeline,
|
|
374
|
+
"_config_params": config_params,
|
|
375
|
+
}
|
|
376
|
+
|
|
377
|
+
@classmethod
|
|
378
|
+
def is_interesting_ctx(cls, context) -> bool:
|
|
379
|
+
duration_sum = sum([context["job_stats"][job_id]['duration'] for job_id in context["job_stats"].keys()])
|
|
380
|
+
tat_sum = sum([context["job_stats"][job_id]['TAT'] for job_id in context["job_stats"].keys()])
|
|
381
|
+
return (tat_sum >= duration_sum * 1.1)
|
|
375
382
|
|
|
376
|
-
|
|
383
|
+
@classmethod
|
|
384
|
+
def _build_body(cls, context):
|
|
377
385
|
"""
|
|
378
386
|
Build question body and collect answers.
|
|
379
387
|
Returns:
|
|
@@ -384,28 +392,35 @@ class SchedulingQuestion(ProcessQuestion, RegenerableChoiceMixin, TableQuestionM
|
|
|
384
392
|
|
|
385
393
|
# Create table data for scheduling results
|
|
386
394
|
table_rows = []
|
|
387
|
-
for job_id in sorted(
|
|
395
|
+
for job_id in sorted(context["job_stats"].keys()):
|
|
396
|
+
response_answer = ca.AnswerTypes.Float(context["job_stats"][job_id]["Response"])
|
|
397
|
+
tat_answer = ca.AnswerTypes.Float(context["job_stats"][job_id]["TAT"])
|
|
398
|
+
answers.append(response_answer)
|
|
399
|
+
answers.append(tat_answer)
|
|
388
400
|
table_rows.append({
|
|
389
401
|
"Job ID": f"Job{job_id}",
|
|
390
|
-
"Arrival":
|
|
391
|
-
"Duration":
|
|
392
|
-
"Response Time":
|
|
393
|
-
"TAT":
|
|
402
|
+
"Arrival": context["job_stats"][job_id]["arrival_time"],
|
|
403
|
+
"Duration": context["job_stats"][job_id]["duration"],
|
|
404
|
+
"Response Time": response_answer,
|
|
405
|
+
"TAT": tat_answer
|
|
394
406
|
})
|
|
395
|
-
# Collect answers for this job
|
|
396
|
-
answers.append(self.answers[f"answer__response_time_job{job_id}"])
|
|
397
|
-
answers.append(self.answers[f"answer__turnaround_time_job{job_id}"])
|
|
398
407
|
|
|
399
408
|
# Create table using mixin
|
|
400
|
-
scheduling_table =
|
|
409
|
+
scheduling_table = cls.create_answer_table(
|
|
401
410
|
headers=["Job ID", "Arrival", "Duration", "Response Time", "TAT"],
|
|
402
411
|
data_rows=table_rows,
|
|
403
412
|
answer_columns=["Response Time", "TAT"]
|
|
404
413
|
)
|
|
405
414
|
|
|
406
415
|
# Collect average answers
|
|
407
|
-
avg_response_answer =
|
|
408
|
-
|
|
416
|
+
avg_response_answer = ca.AnswerTypes.Float(
|
|
417
|
+
context["overall_stats"]["Response"],
|
|
418
|
+
label="Overall average response time"
|
|
419
|
+
)
|
|
420
|
+
avg_tat_answer = ca.AnswerTypes.Float(
|
|
421
|
+
context["overall_stats"]["TAT"],
|
|
422
|
+
label="Overall average TAT"
|
|
423
|
+
)
|
|
409
424
|
answers.append(avg_response_answer)
|
|
410
425
|
answers.append(avg_tat_answer)
|
|
411
426
|
|
|
@@ -414,7 +429,7 @@ class SchedulingQuestion(ProcessQuestion, RegenerableChoiceMixin, TableQuestionM
|
|
|
414
429
|
|
|
415
430
|
# Use mixin to create complete body
|
|
416
431
|
intro_text = (
|
|
417
|
-
f"Given the below information, compute the required values if using **{
|
|
432
|
+
f"Given the below information, compute the required values if using **{context['scheduler_algorithm']}** scheduling. "
|
|
418
433
|
f"Break any ties using the job number."
|
|
419
434
|
)
|
|
420
435
|
|
|
@@ -424,16 +439,12 @@ class SchedulingQuestion(ProcessQuestion, RegenerableChoiceMixin, TableQuestionM
|
|
|
424
439
|
"Note that answers that can be rounded to whole numbers should be, rather than being left in fractional form."
|
|
425
440
|
])])
|
|
426
441
|
|
|
427
|
-
body =
|
|
442
|
+
body = cls.create_fill_in_table_body(intro_text, instructions, scheduling_table)
|
|
428
443
|
body.add_element(average_block)
|
|
429
|
-
return body, answers
|
|
430
|
-
|
|
431
|
-
def get_body(self, *args, **kwargs) -> ca.Section:
|
|
432
|
-
"""Build question body (backward compatible interface)."""
|
|
433
|
-
body, _ = self._get_body(*args, **kwargs)
|
|
434
444
|
return body
|
|
435
445
|
|
|
436
|
-
|
|
446
|
+
@classmethod
|
|
447
|
+
def _build_explanation(cls, context):
|
|
437
448
|
"""
|
|
438
449
|
Build question explanation.
|
|
439
450
|
Returns:
|
|
@@ -443,7 +454,7 @@ class SchedulingQuestion(ProcessQuestion, RegenerableChoiceMixin, TableQuestionM
|
|
|
443
454
|
|
|
444
455
|
explanation.add_element(
|
|
445
456
|
ca.Paragraph([
|
|
446
|
-
f"To calculate the overall Turnaround and Response times using {
|
|
457
|
+
f"To calculate the overall Turnaround and Response times using {context['scheduler_algorithm']} "
|
|
447
458
|
f"we want to first start by calculating the respective target and response times of all of our individual jobs."
|
|
448
459
|
])
|
|
449
460
|
)
|
|
@@ -458,7 +469,7 @@ class SchedulingQuestion(ProcessQuestion, RegenerableChoiceMixin, TableQuestionM
|
|
|
458
469
|
|
|
459
470
|
explanation.add_element(
|
|
460
471
|
ca.Paragraph([
|
|
461
|
-
f"For each of our {len(
|
|
472
|
+
f"For each of our {len(context['job_stats'].keys())} jobs, we can make these calculations.",
|
|
462
473
|
])
|
|
463
474
|
)
|
|
464
475
|
|
|
@@ -468,21 +479,21 @@ class SchedulingQuestion(ProcessQuestion, RegenerableChoiceMixin, TableQuestionM
|
|
|
468
479
|
"For turnaround time (TAT) this would be:"
|
|
469
480
|
] + [
|
|
470
481
|
f"Job{job_id}_TAT "
|
|
471
|
-
f"= {
|
|
472
|
-
f"- {
|
|
473
|
-
f"= {
|
|
474
|
-
for job_id in sorted(
|
|
482
|
+
f"= {context['job_stats'][job_id]['arrival_time'] + context['job_stats'][job_id]['TAT']:0.{cls.ROUNDING_DIGITS}f} "
|
|
483
|
+
f"- {context['job_stats'][job_id]['arrival_time']:0.{cls.ROUNDING_DIGITS}f} "
|
|
484
|
+
f"= {context['job_stats'][job_id]['TAT']:0.{cls.ROUNDING_DIGITS}f}"
|
|
485
|
+
for job_id in sorted(context['job_stats'].keys())
|
|
475
486
|
])
|
|
476
487
|
)
|
|
477
488
|
|
|
478
489
|
summation_line = ' + '.join([
|
|
479
|
-
f"{
|
|
490
|
+
f"{context['job_stats'][job_id]['TAT']:0.{cls.ROUNDING_DIGITS}f}" for job_id in sorted(context['job_stats'].keys())
|
|
480
491
|
])
|
|
481
492
|
explanation.add_element(
|
|
482
493
|
ca.Paragraph([
|
|
483
494
|
f"We then calculate the average of these to find the average TAT time",
|
|
484
|
-
f"Avg(TAT) = ({summation_line}) / ({len(
|
|
485
|
-
f"= {
|
|
495
|
+
f"Avg(TAT) = ({summation_line}) / ({len(context['job_stats'].keys())}) "
|
|
496
|
+
f"= {context['overall_stats']['TAT']:0.{cls.ROUNDING_DIGITS}f}",
|
|
486
497
|
])
|
|
487
498
|
)
|
|
488
499
|
|
|
@@ -493,22 +504,22 @@ class SchedulingQuestion(ProcessQuestion, RegenerableChoiceMixin, TableQuestionM
|
|
|
493
504
|
"For response time this would be:"
|
|
494
505
|
] + [
|
|
495
506
|
f"Job{job_id}_response "
|
|
496
|
-
f"= {
|
|
497
|
-
f"- {
|
|
498
|
-
f"= {
|
|
499
|
-
for job_id in sorted(
|
|
507
|
+
f"= {context['job_stats'][job_id]['arrival_time'] + context['job_stats'][job_id]['Response']:0.{cls.ROUNDING_DIGITS}f} "
|
|
508
|
+
f"- {context['job_stats'][job_id]['arrival_time']:0.{cls.ROUNDING_DIGITS}f} "
|
|
509
|
+
f"= {context['job_stats'][job_id]['Response']:0.{cls.ROUNDING_DIGITS}f}"
|
|
510
|
+
for job_id in sorted(context['job_stats'].keys())
|
|
500
511
|
])
|
|
501
512
|
)
|
|
502
513
|
|
|
503
514
|
summation_line = ' + '.join([
|
|
504
|
-
f"{
|
|
515
|
+
f"{context['job_stats'][job_id]['Response']:0.{cls.ROUNDING_DIGITS}f}" for job_id in sorted(context['job_stats'].keys())
|
|
505
516
|
])
|
|
506
517
|
explanation.add_element(
|
|
507
518
|
ca.Paragraph([
|
|
508
519
|
f"We then calculate the average of these to find the average Response time",
|
|
509
520
|
f"Avg(Response) "
|
|
510
|
-
f"= ({summation_line}) / ({len(
|
|
511
|
-
f"= {
|
|
521
|
+
f"= ({summation_line}) / ({len(context['job_stats'].keys())}) "
|
|
522
|
+
f"= {context['overall_stats']['Response']:0.{cls.ROUNDING_DIGITS}f}",
|
|
512
523
|
"\n",
|
|
513
524
|
])
|
|
514
525
|
)
|
|
@@ -517,42 +528,36 @@ class SchedulingQuestion(ProcessQuestion, RegenerableChoiceMixin, TableQuestionM
|
|
|
517
528
|
ca.Table(
|
|
518
529
|
headers=["Time", "Events"],
|
|
519
530
|
data=[
|
|
520
|
-
[f"{t:02.{
|
|
521
|
-
for t in sorted(
|
|
531
|
+
[f"{t:02.{cls.ROUNDING_DIGITS}f}s"] + ['\n'.join(context['timeline'][t])]
|
|
532
|
+
for t in sorted(context['timeline'].keys())
|
|
522
533
|
]
|
|
523
534
|
)
|
|
524
535
|
)
|
|
525
536
|
|
|
526
537
|
explanation.add_element(
|
|
527
538
|
ca.Picture(
|
|
528
|
-
img_data=
|
|
539
|
+
img_data=cls.make_image(context),
|
|
529
540
|
caption="Process Scheduling Overview"
|
|
530
541
|
)
|
|
531
542
|
)
|
|
532
543
|
|
|
533
|
-
return explanation, []
|
|
534
|
-
|
|
535
|
-
def get_explanation(self, **kwargs) -> ca.Section:
|
|
536
|
-
"""Build question explanation (backward compatible interface)."""
|
|
537
|
-
explanation, _ = self._get_explanation(**kwargs)
|
|
538
544
|
return explanation
|
|
539
545
|
|
|
540
|
-
|
|
541
|
-
|
|
542
|
-
tat_sum = sum([self.job_stats[job_id]['TAT'] for job_id in self.job_stats.keys()])
|
|
543
|
-
return (tat_sum >= duration_sum * 1.1)
|
|
544
|
-
|
|
545
|
-
def make_image(self):
|
|
546
|
+
@classmethod
|
|
547
|
+
def make_image(cls, context):
|
|
546
548
|
|
|
547
|
-
fig, ax = plt.subplots(1, 1, figsize=
|
|
548
|
-
|
|
549
|
-
|
|
549
|
+
fig, ax = plt.subplots(1, 1, figsize=cls.IMAGE_FIGSIZE, dpi=cls.IMAGE_DPI)
|
|
550
|
+
|
|
551
|
+
job_stats = context["job_stats"]
|
|
552
|
+
scheduler_algorithm = context["scheduler_algorithm"]
|
|
553
|
+
|
|
554
|
+
for x_loc in set([t for job_id in job_stats.keys() for t in job_stats[job_id]["state_changes"] ]):
|
|
550
555
|
ax.axvline(x_loc, zorder=0)
|
|
551
|
-
plt.text(x_loc + 0, len(
|
|
552
|
-
|
|
553
|
-
if
|
|
554
|
-
for y_loc, job_id in enumerate(sorted(
|
|
555
|
-
for i, (start, stop) in enumerate(zip(
|
|
556
|
+
plt.text(x_loc + 0, len(job_stats.keys())-0.3, f'{x_loc:0.{cls.ROUNDING_DIGITS}f}s', rotation=90)
|
|
557
|
+
|
|
558
|
+
if scheduler_algorithm != cls.Kind.RoundRobin:
|
|
559
|
+
for y_loc, job_id in enumerate(sorted(job_stats.keys(), reverse=True)):
|
|
560
|
+
for i, (start, stop) in enumerate(zip(job_stats[job_id]["state_changes"], job_stats[job_id]["state_changes"][1:])):
|
|
556
561
|
ax.barh(
|
|
557
562
|
y = [y_loc],
|
|
558
563
|
left = [start],
|
|
@@ -563,19 +568,19 @@ class SchedulingQuestion(ProcessQuestion, RegenerableChoiceMixin, TableQuestionM
|
|
|
563
568
|
)
|
|
564
569
|
else:
|
|
565
570
|
job_deltas = collections.defaultdict(int)
|
|
566
|
-
for job_id in
|
|
567
|
-
job_deltas[
|
|
568
|
-
job_deltas[
|
|
571
|
+
for job_id in job_stats.keys():
|
|
572
|
+
job_deltas[job_stats[job_id]["state_changes"][0]] += 1
|
|
573
|
+
job_deltas[job_stats[job_id]["state_changes"][1]] -= 1
|
|
569
574
|
|
|
570
575
|
regimes_ranges = zip(sorted(job_deltas.keys()), sorted(job_deltas.keys())[1:])
|
|
571
576
|
|
|
572
577
|
for (low, high) in regimes_ranges:
|
|
573
578
|
jobs_in_range = [
|
|
574
|
-
i for i, job_id in enumerate(list(
|
|
579
|
+
i for i, job_id in enumerate(list(job_stats.keys())[::-1])
|
|
575
580
|
if
|
|
576
|
-
(
|
|
581
|
+
(job_stats[job_id]["state_changes"][0] <= low)
|
|
577
582
|
and
|
|
578
|
-
(
|
|
583
|
+
(job_stats[job_id]["state_changes"][1] >= high)
|
|
579
584
|
]
|
|
580
585
|
|
|
581
586
|
if len(jobs_in_range) == 0: continue
|
|
@@ -584,15 +589,15 @@ class SchedulingQuestion(ProcessQuestion, RegenerableChoiceMixin, TableQuestionM
|
|
|
584
589
|
y = jobs_in_range,
|
|
585
590
|
left = [low for _ in jobs_in_range],
|
|
586
591
|
width = [high - low for _ in jobs_in_range],
|
|
587
|
-
color=f"{ 1 - ((len(jobs_in_range) - 1) / (len(
|
|
592
|
+
color=f"{ 1 - ((len(jobs_in_range) - 1) / (len(job_stats.keys())))}",
|
|
588
593
|
)
|
|
589
594
|
|
|
590
595
|
# Plot the overall TAT
|
|
591
596
|
ax.barh(
|
|
592
|
-
y = [i for i in range(len(
|
|
593
|
-
left = [
|
|
594
|
-
width = [
|
|
595
|
-
tick_label = [f"Job{job_id}" for job_id in sorted(
|
|
597
|
+
y = [i for i in range(len(job_stats))][::-1],
|
|
598
|
+
left = [job_stats[job_id]["arrival_time"] for job_id in sorted(job_stats.keys())],
|
|
599
|
+
width = [job_stats[job_id]["TAT"] for job_id in sorted(job_stats.keys())],
|
|
600
|
+
tick_label = [f"Job{job_id}" for job_id in sorted(job_stats.keys())],
|
|
596
601
|
color=(0,0,0,0),
|
|
597
602
|
edgecolor='black',
|
|
598
603
|
linewidth=2,
|
|
@@ -603,20 +608,24 @@ class SchedulingQuestion(ProcessQuestion, RegenerableChoiceMixin, TableQuestionM
|
|
|
603
608
|
# Save to BytesIO object instead of a file
|
|
604
609
|
buffer = io.BytesIO()
|
|
605
610
|
plt.tight_layout()
|
|
606
|
-
plt.savefig(buffer, format='png', dpi=
|
|
611
|
+
plt.savefig(buffer, format='png', dpi=cls.IMAGE_DPI, bbox_inches='tight', pad_inches=0.2)
|
|
607
612
|
plt.close(fig)
|
|
608
613
|
|
|
609
614
|
# Reset buffer position to the beginning
|
|
610
615
|
buffer.seek(0)
|
|
611
616
|
return buffer
|
|
612
617
|
|
|
613
|
-
|
|
618
|
+
@classmethod
|
|
619
|
+
def make_image_file(cls, context, image_dir="imgs"):
|
|
614
620
|
|
|
615
|
-
image_buffer =
|
|
621
|
+
image_buffer = cls.make_image(context)
|
|
616
622
|
|
|
617
623
|
# Original file-saving logic
|
|
618
624
|
if not os.path.exists(image_dir): os.mkdir(image_dir)
|
|
619
|
-
image_path = os.path.join(
|
|
625
|
+
image_path = os.path.join(
|
|
626
|
+
image_dir,
|
|
627
|
+
f"{str(context['scheduler_algorithm']).replace(' ', '_')}-{uuid.uuid4()}.png"
|
|
628
|
+
)
|
|
620
629
|
|
|
621
630
|
with open(image_path, 'wb') as fid:
|
|
622
631
|
fid.write(image_buffer.getvalue())
|
|
@@ -649,45 +658,19 @@ class MLFQQuestion(ProcessQuestion, TableQuestionMixin, BodyTemplatesMixin):
|
|
|
649
658
|
run_intervals: List[tuple] = dataclasses.field(default_factory=list)
|
|
650
659
|
max_queue_level: int = 0
|
|
651
660
|
|
|
652
|
-
|
|
653
|
-
|
|
654
|
-
num_jobs: int = DEFAULT_NUM_JOBS,
|
|
655
|
-
num_queues: int = DEFAULT_NUM_QUEUES,
|
|
656
|
-
min_job_length: int = MIN_DURATION,
|
|
657
|
-
max_job_length: int = MAX_DURATION,
|
|
658
|
-
boost_interval: int | None = None,
|
|
659
|
-
boost_interval_range: List[int] | None = None,
|
|
660
|
-
*args,
|
|
661
|
-
**kwargs
|
|
662
|
-
):
|
|
663
|
-
kwargs["num_jobs"] = num_jobs
|
|
664
|
-
kwargs["num_queues"] = num_queues
|
|
665
|
-
kwargs["min_job_length"] = min_job_length
|
|
666
|
-
kwargs["max_job_length"] = max_job_length
|
|
667
|
-
if boost_interval is not None:
|
|
668
|
-
kwargs["boost_interval"] = boost_interval
|
|
669
|
-
if boost_interval_range is not None:
|
|
670
|
-
kwargs["boost_interval_range"] = boost_interval_range
|
|
671
|
-
super().__init__(*args, **kwargs)
|
|
672
|
-
self.num_jobs = num_jobs
|
|
673
|
-
self.num_queues = num_queues
|
|
674
|
-
self.min_job_length = min_job_length
|
|
675
|
-
self.max_job_length = max_job_length
|
|
676
|
-
self.boost_interval = boost_interval
|
|
677
|
-
self.boost_interval_range = boost_interval_range
|
|
678
|
-
|
|
679
|
-
def get_workload(self, num_jobs: int) -> List[MLFQQuestion.Job]:
|
|
661
|
+
@classmethod
|
|
662
|
+
def get_workload(cls, rng, num_jobs: int, min_job_length: int, max_job_length: int) -> List[MLFQQuestion.Job]:
|
|
680
663
|
arrivals = [0]
|
|
681
664
|
if num_jobs > 1:
|
|
682
665
|
arrivals.extend(
|
|
683
|
-
|
|
666
|
+
rng.randint(cls.MIN_ARRIVAL, cls.MAX_ARRIVAL)
|
|
684
667
|
for _ in range(num_jobs - 1)
|
|
685
668
|
)
|
|
686
669
|
if max(arrivals) == 0:
|
|
687
|
-
arrivals[-1] =
|
|
670
|
+
arrivals[-1] = rng.randint(1, cls.MAX_ARRIVAL)
|
|
688
671
|
|
|
689
672
|
durations = [
|
|
690
|
-
|
|
673
|
+
rng.randint(min_job_length, max_job_length)
|
|
691
674
|
for _ in range(num_jobs)
|
|
692
675
|
]
|
|
693
676
|
|
|
@@ -703,7 +686,8 @@ class MLFQQuestion(ProcessQuestion, TableQuestionMixin, BodyTemplatesMixin):
|
|
|
703
686
|
)
|
|
704
687
|
return jobs
|
|
705
688
|
|
|
706
|
-
|
|
689
|
+
@staticmethod
|
|
690
|
+
def _normalize_queue_params(values: List[int] | None, num_queues: int) -> List[int]:
|
|
707
691
|
if values is None:
|
|
708
692
|
return []
|
|
709
693
|
values = list(values)
|
|
@@ -711,21 +695,83 @@ class MLFQQuestion(ProcessQuestion, TableQuestionMixin, BodyTemplatesMixin):
|
|
|
711
695
|
values.append(values[-1])
|
|
712
696
|
return values[:num_queues]
|
|
713
697
|
|
|
698
|
+
@classmethod
|
|
699
|
+
def _build_context(cls, *, rng_seed=None, **kwargs):
|
|
700
|
+
rng = random.Random(rng_seed)
|
|
701
|
+
num_jobs = kwargs.get("num_jobs", cls.DEFAULT_NUM_JOBS)
|
|
702
|
+
num_queues = kwargs.get("num_queues", cls.DEFAULT_NUM_QUEUES)
|
|
703
|
+
min_job_length = kwargs.get("min_job_length", cls.MIN_DURATION)
|
|
704
|
+
max_job_length = kwargs.get("max_job_length", cls.MAX_DURATION)
|
|
705
|
+
boost_interval = kwargs.get("boost_interval", None)
|
|
706
|
+
boost_interval_range = kwargs.get("boost_interval_range", None)
|
|
707
|
+
|
|
708
|
+
if boost_interval is None and boost_interval_range:
|
|
709
|
+
low, high = boost_interval_range
|
|
710
|
+
boost_interval = rng.randint(low, high)
|
|
711
|
+
|
|
712
|
+
jobs = cls.get_workload(rng, num_jobs, min_job_length, max_job_length)
|
|
713
|
+
|
|
714
|
+
queue_quantums = [2**(num_queues - 1 - i) for i in range(num_queues)]
|
|
715
|
+
queue_quantums = cls._normalize_queue_params(queue_quantums, num_queues)
|
|
716
|
+
queue_quantums = [int(q) for q in queue_quantums]
|
|
717
|
+
|
|
718
|
+
queue_allotments = [None] + [
|
|
719
|
+
queue_quantums[i] * 2 for i in range(1, num_queues)
|
|
720
|
+
]
|
|
721
|
+
queue_allotments = cls._normalize_queue_params(queue_allotments, num_queues)
|
|
722
|
+
queue_allotments = [
|
|
723
|
+
int(allotment) if allotment is not None else None
|
|
724
|
+
for allotment in queue_allotments
|
|
725
|
+
]
|
|
726
|
+
queue_allotments[0] = None
|
|
727
|
+
|
|
728
|
+
timeline, boost_times, jobs = cls.run_simulation(
|
|
729
|
+
jobs,
|
|
730
|
+
queue_quantums,
|
|
731
|
+
queue_allotments,
|
|
732
|
+
boost_interval
|
|
733
|
+
)
|
|
734
|
+
|
|
735
|
+
job_stats = {
|
|
736
|
+
job.job_id: {
|
|
737
|
+
"arrival_time": job.arrival_time,
|
|
738
|
+
"duration": job.duration,
|
|
739
|
+
"Response": job.response_time,
|
|
740
|
+
"TAT": job.turnaround_time,
|
|
741
|
+
"run_intervals": list(job.run_intervals),
|
|
742
|
+
}
|
|
743
|
+
for job in jobs
|
|
744
|
+
}
|
|
745
|
+
|
|
746
|
+
return {
|
|
747
|
+
"num_jobs": num_jobs,
|
|
748
|
+
"num_queues": num_queues,
|
|
749
|
+
"min_job_length": min_job_length,
|
|
750
|
+
"max_job_length": max_job_length,
|
|
751
|
+
"boost_interval": boost_interval,
|
|
752
|
+
"queue_quantums": queue_quantums,
|
|
753
|
+
"queue_allotments": queue_allotments,
|
|
754
|
+
"timeline": timeline,
|
|
755
|
+
"boost_times": boost_times,
|
|
756
|
+
"job_stats": job_stats,
|
|
757
|
+
}
|
|
758
|
+
|
|
759
|
+
@classmethod
|
|
714
760
|
def run_simulation(
|
|
715
|
-
|
|
761
|
+
cls,
|
|
716
762
|
jobs: List[MLFQQuestion.Job],
|
|
717
763
|
queue_quantums: List[int],
|
|
718
764
|
queue_allotments: List[int | None],
|
|
719
765
|
boost_interval: int | None,
|
|
720
766
|
) -> None:
|
|
721
|
-
|
|
722
|
-
|
|
767
|
+
timeline = collections.defaultdict(list)
|
|
768
|
+
boost_times = []
|
|
723
769
|
pending = sorted(jobs, key=lambda j: (j.arrival_time, j.job_id))
|
|
724
770
|
queues = [collections.deque() for _ in range(len(queue_quantums))]
|
|
725
771
|
completed = set()
|
|
726
772
|
|
|
727
773
|
curr_time = pending[0].arrival_time if pending else 0
|
|
728
|
-
|
|
774
|
+
timeline[curr_time].append("Simulation Start")
|
|
729
775
|
next_boost_time = None
|
|
730
776
|
if boost_interval is not None:
|
|
731
777
|
next_boost_time = boost_interval
|
|
@@ -738,7 +784,7 @@ class MLFQQuestion(ProcessQuestion, TableQuestionMixin, BodyTemplatesMixin):
|
|
|
738
784
|
job.time_in_queue = 0
|
|
739
785
|
job.remaining_quantum = None
|
|
740
786
|
queues[-1].append(job)
|
|
741
|
-
|
|
787
|
+
timeline[job.arrival_time].append(
|
|
742
788
|
f"Job{job.job_id} arrived (dur = {job.duration})"
|
|
743
789
|
)
|
|
744
790
|
|
|
@@ -750,17 +796,17 @@ class MLFQQuestion(ProcessQuestion, TableQuestionMixin, BodyTemplatesMixin):
|
|
|
750
796
|
if running_job is not None and running_job.remaining_time > 0:
|
|
751
797
|
jobs_to_boost.append(running_job)
|
|
752
798
|
if not jobs_to_boost:
|
|
753
|
-
|
|
799
|
+
boost_times.append(curr_time)
|
|
754
800
|
return
|
|
755
801
|
for job in sorted(jobs_to_boost, key=lambda j: j.job_id):
|
|
756
802
|
job.queue_level = len(queues) - 1
|
|
757
803
|
job.time_in_queue = 0
|
|
758
804
|
job.remaining_quantum = None
|
|
759
805
|
queues[-1].append(job)
|
|
760
|
-
|
|
806
|
+
timeline[curr_time].append(
|
|
761
807
|
f"Boosted all jobs to Q{len(queues) - 1}"
|
|
762
808
|
)
|
|
763
|
-
|
|
809
|
+
boost_times.append(curr_time)
|
|
764
810
|
|
|
765
811
|
enqueue_arrivals(curr_time)
|
|
766
812
|
|
|
@@ -778,7 +824,7 @@ class MLFQQuestion(ProcessQuestion, TableQuestionMixin, BodyTemplatesMixin):
|
|
|
778
824
|
if next_times:
|
|
779
825
|
next_time = min(next_times)
|
|
780
826
|
if next_time > curr_time:
|
|
781
|
-
|
|
827
|
+
timeline[curr_time].append("CPU idle")
|
|
782
828
|
curr_time = next_time
|
|
783
829
|
enqueue_arrivals(curr_time)
|
|
784
830
|
while next_boost_time is not None and curr_time >= next_boost_time:
|
|
@@ -807,7 +853,7 @@ class MLFQQuestion(ProcessQuestion, TableQuestionMixin, BodyTemplatesMixin):
|
|
|
807
853
|
job.response_time = curr_time - job.arrival_time
|
|
808
854
|
|
|
809
855
|
if slice_duration > 0:
|
|
810
|
-
|
|
856
|
+
timeline[curr_time].append(
|
|
811
857
|
f"Running Job{job.job_id} in Q{q_idx} for {slice_duration}"
|
|
812
858
|
)
|
|
813
859
|
job.run_intervals.append((curr_time, curr_time + slice_duration, q_idx))
|
|
@@ -827,7 +873,7 @@ class MLFQQuestion(ProcessQuestion, TableQuestionMixin, BodyTemplatesMixin):
|
|
|
827
873
|
job.turnaround_time = curr_time - job.arrival_time
|
|
828
874
|
job.remaining_quantum = None
|
|
829
875
|
completed.add(job.job_id)
|
|
830
|
-
|
|
876
|
+
timeline[curr_time].append(
|
|
831
877
|
f"Completed Job{job.job_id} (TAT = {job.turnaround_time})"
|
|
832
878
|
)
|
|
833
879
|
continue
|
|
@@ -845,7 +891,7 @@ class MLFQQuestion(ProcessQuestion, TableQuestionMixin, BodyTemplatesMixin):
|
|
|
845
891
|
job.time_in_queue = 0
|
|
846
892
|
job.remaining_quantum = None
|
|
847
893
|
queues[q_idx - 1].append(job)
|
|
848
|
-
|
|
894
|
+
timeline[curr_time].append(
|
|
849
895
|
f"Demoted Job{job.job_id} to Q{q_idx - 1}"
|
|
850
896
|
)
|
|
851
897
|
continue
|
|
@@ -857,70 +903,18 @@ class MLFQQuestion(ProcessQuestion, TableQuestionMixin, BodyTemplatesMixin):
|
|
|
857
903
|
job.remaining_quantum = None
|
|
858
904
|
queues[q_idx].append(job)
|
|
859
905
|
|
|
860
|
-
|
|
861
|
-
super().refresh(*args, **kwargs)
|
|
862
|
-
|
|
863
|
-
self.num_jobs = kwargs.get("num_jobs", self.num_jobs)
|
|
864
|
-
self.num_queues = kwargs.get("num_queues", self.num_queues)
|
|
865
|
-
self.min_job_length = kwargs.get("min_job_length", self.min_job_length)
|
|
866
|
-
self.max_job_length = kwargs.get("max_job_length", self.max_job_length)
|
|
867
|
-
self.boost_interval = kwargs.get("boost_interval", self.boost_interval)
|
|
868
|
-
self.boost_interval_range = kwargs.get(
|
|
869
|
-
"boost_interval_range",
|
|
870
|
-
self.boost_interval_range
|
|
871
|
-
)
|
|
872
|
-
if self.boost_interval is None and self.boost_interval_range:
|
|
873
|
-
low, high = self.boost_interval_range
|
|
874
|
-
self.boost_interval = self.rng.randint(low, high)
|
|
875
|
-
|
|
876
|
-
jobs = self.get_workload(self.num_jobs)
|
|
877
|
-
|
|
878
|
-
queue_quantums = [2**(self.num_queues - 1 - i) for i in range(self.num_queues)]
|
|
879
|
-
queue_quantums = self._normalize_queue_params(queue_quantums, self.num_queues)
|
|
880
|
-
queue_quantums = [int(q) for q in queue_quantums]
|
|
906
|
+
return timeline, boost_times, jobs
|
|
881
907
|
|
|
882
|
-
|
|
883
|
-
|
|
884
|
-
]
|
|
885
|
-
queue_allotments = self._normalize_queue_params(queue_allotments, self.num_queues)
|
|
886
|
-
queue_allotments = [
|
|
887
|
-
int(allotment) if allotment is not None else None
|
|
888
|
-
for allotment in queue_allotments
|
|
889
|
-
]
|
|
890
|
-
queue_allotments[0] = None
|
|
891
|
-
|
|
892
|
-
self.queue_quantums = queue_quantums
|
|
893
|
-
self.queue_allotments = queue_allotments
|
|
894
|
-
|
|
895
|
-
self.run_simulation(jobs, queue_quantums, queue_allotments, self.boost_interval)
|
|
896
|
-
|
|
897
|
-
self.job_stats = {
|
|
898
|
-
job.job_id: {
|
|
899
|
-
"arrival_time": job.arrival_time,
|
|
900
|
-
"duration": job.duration,
|
|
901
|
-
"Response": job.response_time,
|
|
902
|
-
"TAT": job.turnaround_time,
|
|
903
|
-
"run_intervals": list(job.run_intervals),
|
|
904
|
-
}
|
|
905
|
-
for job in jobs
|
|
906
|
-
}
|
|
907
|
-
|
|
908
|
-
for job_id in sorted(self.job_stats.keys()):
|
|
909
|
-
self.answers.update({
|
|
910
|
-
f"answer__turnaround_time_job{job_id}": ca.AnswerTypes.Float(self.job_stats[job_id]["TAT"])
|
|
911
|
-
})
|
|
912
|
-
|
|
913
|
-
return self.is_interesting()
|
|
914
|
-
|
|
915
|
-
def _get_body(self, *args, **kwargs):
|
|
908
|
+
@classmethod
|
|
909
|
+
def _build_body(cls, context):
|
|
916
910
|
answers: List[ca.Answer] = []
|
|
917
911
|
|
|
918
912
|
queue_rows = []
|
|
919
|
-
for i in reversed(range(
|
|
920
|
-
allotment =
|
|
913
|
+
for i in reversed(range(context["num_queues"])):
|
|
914
|
+
allotment = context["queue_allotments"][i]
|
|
921
915
|
queue_rows.append([
|
|
922
916
|
f"Q{i}",
|
|
923
|
-
|
|
917
|
+
context["queue_quantums"][i],
|
|
924
918
|
"infinite" if allotment is None else allotment
|
|
925
919
|
])
|
|
926
920
|
queue_table = ca.Table(
|
|
@@ -929,16 +923,17 @@ class MLFQQuestion(ProcessQuestion, TableQuestionMixin, BodyTemplatesMixin):
|
|
|
929
923
|
)
|
|
930
924
|
|
|
931
925
|
table_rows = []
|
|
932
|
-
for job_id in sorted(
|
|
926
|
+
for job_id in sorted(context["job_stats"].keys()):
|
|
927
|
+
tat_answer = ca.AnswerTypes.Float(context["job_stats"][job_id]["TAT"])
|
|
928
|
+
answers.append(tat_answer)
|
|
933
929
|
table_rows.append({
|
|
934
930
|
"Job ID": f"Job{job_id}",
|
|
935
|
-
"Arrival":
|
|
936
|
-
"Duration":
|
|
937
|
-
"TAT":
|
|
931
|
+
"Arrival": context["job_stats"][job_id]["arrival_time"],
|
|
932
|
+
"Duration": context["job_stats"][job_id]["duration"],
|
|
933
|
+
"TAT": tat_answer,
|
|
938
934
|
})
|
|
939
|
-
answers.append(self.answers[f"answer__turnaround_time_job{job_id}"])
|
|
940
935
|
|
|
941
|
-
scheduling_table =
|
|
936
|
+
scheduling_table = cls.create_answer_table(
|
|
942
937
|
headers=["Job ID", "Arrival", "Duration", "TAT"],
|
|
943
938
|
data_rows=table_rows,
|
|
944
939
|
answer_columns=["TAT"]
|
|
@@ -946,7 +941,7 @@ class MLFQQuestion(ProcessQuestion, TableQuestionMixin, BodyTemplatesMixin):
|
|
|
946
941
|
|
|
947
942
|
intro_text = (
|
|
948
943
|
"Assume an MLFQ scheduler with round-robin inside each queue. "
|
|
949
|
-
f"New jobs enter the highest-priority queue (Q{
|
|
944
|
+
f"New jobs enter the highest-priority queue (Q{context['num_queues'] - 1}) "
|
|
950
945
|
"and a job is demoted after using its total allotment for that queue. "
|
|
951
946
|
"If a higher-priority job arrives, it preempts any lower-priority job."
|
|
952
947
|
)
|
|
@@ -959,21 +954,18 @@ class MLFQQuestion(ProcessQuestion, TableQuestionMixin, BodyTemplatesMixin):
|
|
|
959
954
|
body = ca.Section()
|
|
960
955
|
body.add_element(ca.Paragraph([intro_text]))
|
|
961
956
|
body.add_element(queue_table)
|
|
962
|
-
if
|
|
957
|
+
if context["boost_interval"] is not None:
|
|
963
958
|
body.add_element(ca.Paragraph([
|
|
964
|
-
f"Every {
|
|
965
|
-
f"Q{
|
|
959
|
+
f"Every {context['boost_interval']} time units, all jobs are boosted to "
|
|
960
|
+
f"Q{context['num_queues'] - 1}. After a boost, scheduling restarts with the "
|
|
966
961
|
"lowest job number in that queue."
|
|
967
962
|
]))
|
|
968
963
|
body.add_element(ca.Paragraph([instructions]))
|
|
969
964
|
body.add_element(scheduling_table)
|
|
970
|
-
return body, answers
|
|
971
|
-
|
|
972
|
-
def get_body(self, *args, **kwargs) -> ca.Section:
|
|
973
|
-
body, _ = self._get_body(*args, **kwargs)
|
|
974
965
|
return body
|
|
975
966
|
|
|
976
|
-
|
|
967
|
+
@classmethod
|
|
968
|
+
def _build_explanation(cls, context):
|
|
977
969
|
explanation = ca.Section()
|
|
978
970
|
|
|
979
971
|
explanation.add_element(
|
|
@@ -988,10 +980,10 @@ class MLFQQuestion(ProcessQuestion, TableQuestionMixin, BodyTemplatesMixin):
|
|
|
988
980
|
"For each job:"
|
|
989
981
|
] + [
|
|
990
982
|
f"Job{job_id}_TAT = "
|
|
991
|
-
f"{
|
|
992
|
-
f"- {
|
|
993
|
-
f"= {
|
|
994
|
-
for job_id in sorted(
|
|
983
|
+
f"{context['job_stats'][job_id]['arrival_time'] + context['job_stats'][job_id]['TAT']:0.{cls.ROUNDING_DIGITS}f} "
|
|
984
|
+
f"- {context['job_stats'][job_id]['arrival_time']:0.{cls.ROUNDING_DIGITS}f} "
|
|
985
|
+
f"= {context['job_stats'][job_id]['TAT']:0.{cls.ROUNDING_DIGITS}f}"
|
|
986
|
+
for job_id in sorted(context['job_stats'].keys())
|
|
995
987
|
])
|
|
996
988
|
)
|
|
997
989
|
|
|
@@ -999,10 +991,10 @@ class MLFQQuestion(ProcessQuestion, TableQuestionMixin, BodyTemplatesMixin):
|
|
|
999
991
|
ca.Table(
|
|
1000
992
|
headers=["Time", "Events"],
|
|
1001
993
|
data=[
|
|
1002
|
-
[f"{t:0.{
|
|
1003
|
-
for t in sorted(
|
|
994
|
+
[f"{t:0.{cls.ROUNDING_DIGITS}f}s"] + ['\n'.join(events)]
|
|
995
|
+
for t in sorted(context['timeline'].keys())
|
|
1004
996
|
if (events := [
|
|
1005
|
-
event for event in
|
|
997
|
+
event for event in context['timeline'][t]
|
|
1006
998
|
if (
|
|
1007
999
|
"arrived" in event
|
|
1008
1000
|
or "Demoted" in event
|
|
@@ -1018,41 +1010,39 @@ class MLFQQuestion(ProcessQuestion, TableQuestionMixin, BodyTemplatesMixin):
|
|
|
1018
1010
|
|
|
1019
1011
|
explanation.add_element(
|
|
1020
1012
|
ca.Picture(
|
|
1021
|
-
img_data=
|
|
1013
|
+
img_data=cls.make_image(context),
|
|
1022
1014
|
caption="MLFQ Scheduling Overview"
|
|
1023
1015
|
)
|
|
1024
1016
|
)
|
|
1025
1017
|
|
|
1026
|
-
return explanation, []
|
|
1027
|
-
|
|
1028
|
-
def get_explanation(self, **kwargs) -> ca.Section:
|
|
1029
|
-
explanation, _ = self._get_explanation(**kwargs)
|
|
1030
1018
|
return explanation
|
|
1031
1019
|
|
|
1032
|
-
|
|
1033
|
-
|
|
1020
|
+
@classmethod
|
|
1021
|
+
def make_image(cls, context):
|
|
1022
|
+
fig, ax = plt.subplots(1, 1, figsize=cls.IMAGE_FIGSIZE, dpi=cls.IMAGE_DPI)
|
|
1034
1023
|
|
|
1035
|
-
|
|
1024
|
+
job_stats = context["job_stats"]
|
|
1025
|
+
num_jobs = len(job_stats)
|
|
1036
1026
|
if num_jobs == 0:
|
|
1037
1027
|
buffer = io.BytesIO()
|
|
1038
1028
|
plt.tight_layout()
|
|
1039
|
-
plt.savefig(buffer, format='png', dpi=
|
|
1029
|
+
plt.savefig(buffer, format='png', dpi=cls.IMAGE_DPI, bbox_inches='tight')
|
|
1040
1030
|
plt.close(fig)
|
|
1041
1031
|
buffer.seek(0)
|
|
1042
1032
|
return buffer
|
|
1043
1033
|
|
|
1044
1034
|
job_colors = {
|
|
1045
1035
|
job_id: str(0.15 + 0.7 * (idx / max(1, num_jobs - 1)))
|
|
1046
|
-
for idx, job_id in enumerate(sorted(
|
|
1036
|
+
for idx, job_id in enumerate(sorted(job_stats.keys()))
|
|
1047
1037
|
}
|
|
1048
1038
|
job_lane = {
|
|
1049
1039
|
job_id: idx
|
|
1050
|
-
for idx, job_id in enumerate(sorted(
|
|
1040
|
+
for idx, job_id in enumerate(sorted(job_stats.keys(), reverse=True))
|
|
1051
1041
|
}
|
|
1052
1042
|
lanes_per_queue = num_jobs
|
|
1053
1043
|
|
|
1054
|
-
for job_id in sorted(
|
|
1055
|
-
for start, stop, queue_level in
|
|
1044
|
+
for job_id in sorted(job_stats.keys()):
|
|
1045
|
+
for start, stop, queue_level in job_stats[job_id]["run_intervals"]:
|
|
1056
1046
|
y_loc = queue_level * lanes_per_queue + job_lane[job_id]
|
|
1057
1047
|
ax.barh(
|
|
1058
1048
|
y=[y_loc],
|
|
@@ -1063,7 +1053,7 @@ class MLFQQuestion(ProcessQuestion, TableQuestionMixin, BodyTemplatesMixin):
|
|
|
1063
1053
|
color=job_colors[job_id]
|
|
1064
1054
|
)
|
|
1065
1055
|
|
|
1066
|
-
for queue_idx in range(
|
|
1056
|
+
for queue_idx in range(context["num_queues"]):
|
|
1067
1057
|
if queue_idx % 2 == 1:
|
|
1068
1058
|
ax.axhspan(
|
|
1069
1059
|
queue_idx * lanes_per_queue - 0.5,
|
|
@@ -1074,8 +1064,8 @@ class MLFQQuestion(ProcessQuestion, TableQuestionMixin, BodyTemplatesMixin):
|
|
|
1074
1064
|
)
|
|
1075
1065
|
|
|
1076
1066
|
arrival_times = sorted({
|
|
1077
|
-
|
|
1078
|
-
for job_id in
|
|
1067
|
+
job_stats[job_id]["arrival_time"]
|
|
1068
|
+
for job_id in job_stats.keys()
|
|
1079
1069
|
})
|
|
1080
1070
|
bottom_label_y = -0.1
|
|
1081
1071
|
for arrival_time in arrival_times:
|
|
@@ -1083,7 +1073,7 @@ class MLFQQuestion(ProcessQuestion, TableQuestionMixin, BodyTemplatesMixin):
|
|
|
1083
1073
|
ax.text(
|
|
1084
1074
|
arrival_time + 0.2,
|
|
1085
1075
|
bottom_label_y,
|
|
1086
|
-
f"{arrival_time:0.{
|
|
1076
|
+
f"{arrival_time:0.{cls.ROUNDING_DIGITS}f}s",
|
|
1087
1077
|
color='0.2',
|
|
1088
1078
|
rotation=90,
|
|
1089
1079
|
ha='left',
|
|
@@ -1091,37 +1081,37 @@ class MLFQQuestion(ProcessQuestion, TableQuestionMixin, BodyTemplatesMixin):
|
|
|
1091
1081
|
)
|
|
1092
1082
|
|
|
1093
1083
|
completion_times = sorted({
|
|
1094
|
-
|
|
1095
|
-
for job_id in
|
|
1084
|
+
job_stats[job_id]["arrival_time"] + job_stats[job_id]["TAT"]
|
|
1085
|
+
for job_id in job_stats.keys()
|
|
1096
1086
|
})
|
|
1097
1087
|
for completion_time in completion_times:
|
|
1098
1088
|
ax.axvline(completion_time, color='red', linewidth=1.5, zorder=0)
|
|
1099
1089
|
ax.text(
|
|
1100
1090
|
completion_time - 0.6,
|
|
1101
|
-
|
|
1102
|
-
f"{completion_time:0.{
|
|
1091
|
+
context["num_queues"] * lanes_per_queue - 0.5,
|
|
1092
|
+
f"{completion_time:0.{cls.ROUNDING_DIGITS}f}s",
|
|
1103
1093
|
color='red',
|
|
1104
1094
|
rotation=90,
|
|
1105
1095
|
ha='center',
|
|
1106
1096
|
va='top'
|
|
1107
1097
|
)
|
|
1108
1098
|
|
|
1109
|
-
for boost_time in sorted(set(
|
|
1099
|
+
for boost_time in sorted(set(context["boost_times"])):
|
|
1110
1100
|
ax.axvline(boost_time, color='tab:blue', linestyle='--', linewidth=1.2, zorder=0)
|
|
1111
1101
|
|
|
1112
1102
|
tick_positions = [
|
|
1113
1103
|
q * lanes_per_queue + (lanes_per_queue - 1) / 2
|
|
1114
|
-
for q in range(
|
|
1104
|
+
for q in range(context["num_queues"])
|
|
1115
1105
|
]
|
|
1116
1106
|
ax.set_yticks(tick_positions)
|
|
1117
|
-
ax.set_yticklabels([f"Q{i}" for i in range(
|
|
1118
|
-
ax.set_ylim(-0.5,
|
|
1107
|
+
ax.set_yticklabels([f"Q{i}" for i in range(context["num_queues"])])
|
|
1108
|
+
ax.set_ylim(-0.5, context["num_queues"] * lanes_per_queue - 0.5)
|
|
1119
1109
|
ax.set_xlim(xmin=0)
|
|
1120
1110
|
ax.set_xlabel("Time")
|
|
1121
1111
|
|
|
1122
1112
|
buffer = io.BytesIO()
|
|
1123
1113
|
plt.tight_layout()
|
|
1124
|
-
plt.savefig(buffer, format='png', dpi=
|
|
1114
|
+
plt.savefig(buffer, format='png', dpi=cls.IMAGE_DPI, bbox_inches='tight')
|
|
1125
1115
|
plt.close(fig)
|
|
1126
1116
|
buffer.seek(0)
|
|
1127
1117
|
return buffer
|