job-shop-lib 1.0.0a2__py3-none-any.whl → 1.0.0a4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- job_shop_lib/_job_shop_instance.py +119 -55
- job_shop_lib/_operation.py +18 -7
- job_shop_lib/_schedule.py +13 -15
- job_shop_lib/_scheduled_operation.py +17 -18
- job_shop_lib/dispatching/__init__.py +4 -0
- job_shop_lib/dispatching/_dispatcher.py +36 -47
- job_shop_lib/dispatching/_dispatcher_observer_config.py +15 -2
- job_shop_lib/dispatching/_factories.py +10 -2
- job_shop_lib/dispatching/_ready_operation_filters.py +80 -0
- job_shop_lib/dispatching/feature_observers/_composite_feature_observer.py +0 -1
- job_shop_lib/dispatching/feature_observers/_factory.py +21 -18
- job_shop_lib/dispatching/feature_observers/_is_completed_observer.py +1 -0
- job_shop_lib/dispatching/feature_observers/_is_ready_observer.py +1 -1
- job_shop_lib/dispatching/rules/_dispatching_rule_solver.py +44 -25
- job_shop_lib/dispatching/rules/_dispatching_rules_functions.py +9 -9
- job_shop_lib/generation/_general_instance_generator.py +33 -34
- job_shop_lib/generation/_instance_generator.py +14 -17
- job_shop_lib/generation/_transformations.py +11 -8
- job_shop_lib/graphs/__init__.py +3 -0
- job_shop_lib/graphs/_build_disjunctive_graph.py +41 -3
- job_shop_lib/graphs/graph_updaters/_graph_updater.py +11 -13
- job_shop_lib/graphs/graph_updaters/_residual_graph_updater.py +17 -20
- job_shop_lib/reinforcement_learning/__init__.py +16 -7
- job_shop_lib/reinforcement_learning/_multi_job_shop_graph_env.py +69 -57
- job_shop_lib/reinforcement_learning/_single_job_shop_graph_env.py +43 -32
- job_shop_lib/reinforcement_learning/_types_and_constants.py +2 -2
- job_shop_lib/visualization/__init__.py +29 -10
- job_shop_lib/visualization/_gantt_chart_creator.py +122 -84
- job_shop_lib/visualization/_gantt_chart_video_and_gif_creation.py +68 -37
- job_shop_lib/visualization/_plot_disjunctive_graph.py +382 -0
- job_shop_lib/visualization/{_gantt_chart.py → _plot_gantt_chart.py} +78 -14
- {job_shop_lib-1.0.0a2.dist-info → job_shop_lib-1.0.0a4.dist-info}/METADATA +15 -3
- {job_shop_lib-1.0.0a2.dist-info → job_shop_lib-1.0.0a4.dist-info}/RECORD +36 -36
- {job_shop_lib-1.0.0a2.dist-info → job_shop_lib-1.0.0a4.dist-info}/WHEEL +1 -1
- job_shop_lib/visualization/_disjunctive_graph.py +0 -210
- /job_shop_lib/visualization/{_agent_task_graph.py → _plot_agent_task_graph.py} +0 -0
- {job_shop_lib-1.0.0a2.dist-info → job_shop_lib-1.0.0a4.dist-info}/LICENSE +0 -0
@@ -15,6 +15,86 @@ ReadyOperationsFilter = Callable[
|
|
15
15
|
]
|
16
16
|
|
17
17
|
|
18
|
+
def filter_non_idle_machines(
|
19
|
+
dispatcher: Dispatcher, operations: list[Operation]
|
20
|
+
) -> list[Operation]:
|
21
|
+
"""Filters out all the operations associated with non-idle machines.
|
22
|
+
|
23
|
+
A machine is considered idle if there are no ongoing operations
|
24
|
+
currently scheduled on it. This filter removes operations that are
|
25
|
+
associated with machines that are busy (i.e., have at least one
|
26
|
+
uncompleted operation).
|
27
|
+
|
28
|
+
Utilizes :meth:``Dispatcher.ongoing_operations()`` to determine machine
|
29
|
+
statuses.
|
30
|
+
|
31
|
+
Args:
|
32
|
+
dispatcher: The dispatcher object.
|
33
|
+
operations: The list of operations to filter.
|
34
|
+
|
35
|
+
Returns:
|
36
|
+
The list of operations that are associated with idle machines.
|
37
|
+
"""
|
38
|
+
current_time = dispatcher.min_start_time(operations)
|
39
|
+
non_idle_machines = _get_non_idle_machines(dispatcher, current_time)
|
40
|
+
|
41
|
+
# Filter operations to keep those that are associated with at least one
|
42
|
+
# idle machine
|
43
|
+
filtered_operations: list[Operation] = []
|
44
|
+
for operation in operations:
|
45
|
+
if all(
|
46
|
+
machine_id in non_idle_machines
|
47
|
+
for machine_id in operation.machines
|
48
|
+
):
|
49
|
+
continue
|
50
|
+
filtered_operations.append(operation)
|
51
|
+
|
52
|
+
return filtered_operations
|
53
|
+
|
54
|
+
|
55
|
+
def _get_non_idle_machines(
|
56
|
+
dispatcher: Dispatcher, current_time: int
|
57
|
+
) -> set[int]:
|
58
|
+
"""Returns the set of machine ids that are currently busy (i.e., have at
|
59
|
+
least one uncompleted operation)."""
|
60
|
+
|
61
|
+
non_idle_machines = set()
|
62
|
+
for machine_schedule in dispatcher.schedule.schedule:
|
63
|
+
for scheduled_operation in reversed(machine_schedule):
|
64
|
+
is_completed = scheduled_operation.end_time <= current_time
|
65
|
+
if is_completed:
|
66
|
+
break
|
67
|
+
non_idle_machines.add(scheduled_operation.machine_id)
|
68
|
+
|
69
|
+
return non_idle_machines
|
70
|
+
|
71
|
+
|
72
|
+
def filter_non_immediate_operations(
|
73
|
+
dispatcher: Dispatcher, operations: list[Operation]
|
74
|
+
) -> list[Operation]:
|
75
|
+
"""Filters out all the operations that can't start immediately.
|
76
|
+
|
77
|
+
An operation can start immediately if its earliest start time is the
|
78
|
+
current time.
|
79
|
+
|
80
|
+
The current time is determined by the minimum start time of the
|
81
|
+
operations.
|
82
|
+
|
83
|
+
Args:
|
84
|
+
dispatcher: The dispatcher object.
|
85
|
+
operations: The list of operations to filter.
|
86
|
+
"""
|
87
|
+
|
88
|
+
min_start_time = dispatcher.min_start_time(operations)
|
89
|
+
immediate_operations: list[Operation] = []
|
90
|
+
for operation in operations:
|
91
|
+
start_time = dispatcher.earliest_start_time(operation)
|
92
|
+
if start_time == min_start_time:
|
93
|
+
immediate_operations.append(operation)
|
94
|
+
|
95
|
+
return immediate_operations
|
96
|
+
|
97
|
+
|
18
98
|
def filter_dominated_operations(
|
19
99
|
dispatcher: Dispatcher, operations: list[Operation]
|
20
100
|
) -> list[Operation]:
|
@@ -193,7 +193,6 @@ if __name__ == "__main__":
|
|
193
193
|
dispatcher=dispatcher_,
|
194
194
|
)
|
195
195
|
for observer_type in feature_observer_types_
|
196
|
-
if not observer_type == FeatureObserverType.COMPOSITE
|
197
196
|
# and not FeatureObserverType.EARLIEST_START_TIME
|
198
197
|
]
|
199
198
|
composite_observer_ = CompositeFeatureObserver(
|
@@ -1,4 +1,4 @@
|
|
1
|
-
"""Contains factory functions for creating
|
1
|
+
"""Contains factory functions for creating :class:`FeatureObserver`s."""
|
2
2
|
|
3
3
|
from enum import Enum
|
4
4
|
|
@@ -18,8 +18,12 @@ from job_shop_lib.dispatching.feature_observers import (
|
|
18
18
|
class FeatureObserverType(str, Enum):
|
19
19
|
"""Enumeration of the different feature observers.
|
20
20
|
|
21
|
-
Each
|
22
|
-
to create the
|
21
|
+
Each :class:`FeatureObserver` is associated with a string value that can be
|
22
|
+
used to create the :class:`FeatureObserver` using the factory function.
|
23
|
+
|
24
|
+
It does not include the :class:`CompositeFeatureObserver` class since this
|
25
|
+
observer is often managed separately from the others. For example, a
|
26
|
+
common use case is to create a list of feature observers and pass them to
|
23
27
|
"""
|
24
28
|
|
25
29
|
IS_READY = "is_ready"
|
@@ -29,7 +33,6 @@ class FeatureObserverType(str, Enum):
|
|
29
33
|
POSITION_IN_JOB = "position_in_job"
|
30
34
|
REMAINING_OPERATIONS = "remaining_operations"
|
31
35
|
IS_COMPLETED = "is_completed"
|
32
|
-
COMPOSITE = "composite"
|
33
36
|
|
34
37
|
|
35
38
|
# FeatureObserverConfig = DispatcherObserverConfig[
|
@@ -43,7 +46,7 @@ FeatureObserverConfig = (
|
|
43
46
|
|
44
47
|
|
45
48
|
def feature_observer_factory(
|
46
|
-
|
49
|
+
feature_observer_type: (
|
47
50
|
str
|
48
51
|
| FeatureObserverType
|
49
52
|
| type[FeatureObserver]
|
@@ -51,29 +54,29 @@ def feature_observer_factory(
|
|
51
54
|
),
|
52
55
|
**kwargs,
|
53
56
|
) -> FeatureObserver:
|
54
|
-
"""Creates and returns a
|
55
|
-
|
57
|
+
"""Creates and returns a :class:`FeatureObserver` based on the specified
|
58
|
+
:class:`FeatureObserver` type.
|
56
59
|
|
57
60
|
Args:
|
58
61
|
feature_creator_type:
|
59
|
-
The type of
|
62
|
+
The type of :class:`FeatureObserver` to create.
|
60
63
|
**kwargs:
|
61
|
-
Additional keyword arguments to pass to the
|
62
|
-
|
64
|
+
Additional keyword arguments to pass to the
|
65
|
+
:class:`FeatureObserver` constructor.
|
63
66
|
|
64
67
|
Returns:
|
65
|
-
A
|
68
|
+
A :class:`FeatureObserver` instance.
|
66
69
|
"""
|
67
|
-
if isinstance(
|
70
|
+
if isinstance(feature_observer_type, DispatcherObserverConfig):
|
68
71
|
return feature_observer_factory(
|
69
|
-
|
70
|
-
**
|
72
|
+
feature_observer_type.class_type,
|
73
|
+
**feature_observer_type.kwargs,
|
71
74
|
**kwargs,
|
72
75
|
)
|
73
76
|
# if the instance is of type type[FeatureObserver] we can just
|
74
77
|
# call the object constructor with the keyword arguments
|
75
|
-
if isinstance(
|
76
|
-
return
|
78
|
+
if isinstance(feature_observer_type, type):
|
79
|
+
return feature_observer_type(**kwargs)
|
77
80
|
|
78
81
|
mapping: dict[FeatureObserverType, type[FeatureObserver]] = {
|
79
82
|
FeatureObserverType.IS_READY: IsReadyObserver,
|
@@ -84,5 +87,5 @@ def feature_observer_factory(
|
|
84
87
|
FeatureObserverType.REMAINING_OPERATIONS: RemainingOperationsObserver,
|
85
88
|
FeatureObserverType.IS_COMPLETED: IsCompletedObserver,
|
86
89
|
}
|
87
|
-
|
88
|
-
return
|
90
|
+
feature_observer = mapping[feature_observer_type] # type: ignore[index]
|
91
|
+
return feature_observer(**kwargs)
|
@@ -29,5 +29,5 @@ class IsReadyObserver(FeatureObserver):
|
|
29
29
|
self.initialize_features()
|
30
30
|
|
31
31
|
def _get_ready_operations(self) -> list[int]:
|
32
|
-
available_operations = self.dispatcher.
|
32
|
+
available_operations = self.dispatcher.available_operations()
|
33
33
|
return [operation.operation_id for operation in available_operations]
|
@@ -1,12 +1,14 @@
|
|
1
1
|
"""Home of the `DispatchingRuleSolver` class."""
|
2
2
|
|
3
|
-
from collections.abc import Callable
|
3
|
+
from collections.abc import Callable, Iterable
|
4
4
|
|
5
5
|
from job_shop_lib import JobShopInstance, Schedule, Operation, BaseSolver
|
6
6
|
from job_shop_lib.dispatching import (
|
7
7
|
ready_operations_filter_factory,
|
8
8
|
Dispatcher,
|
9
9
|
ReadyOperationsFilterType,
|
10
|
+
ReadyOperationsFilter,
|
11
|
+
create_composite_operation_filter,
|
10
12
|
)
|
11
13
|
from job_shop_lib.dispatching.rules import (
|
12
14
|
dispatching_rule_factory,
|
@@ -30,6 +32,35 @@ class DispatchingRuleSolver(BaseSolver):
|
|
30
32
|
pruning_function:
|
31
33
|
The pruning function to use. It is used to initialize the
|
32
34
|
dispatcher object internally when calling the solve method.
|
35
|
+
|
36
|
+
Args:
|
37
|
+
dispatching_rule:
|
38
|
+
The dispatching rule to use. It can be a string with the name
|
39
|
+
of the dispatching rule, a class`DispatchingRuleType` enum member,
|
40
|
+
or a callable that takes a dispatcher and returns the operation to
|
41
|
+
be dispatched next.
|
42
|
+
machine_chooser:
|
43
|
+
The machine chooser to use. It can be a string with the name
|
44
|
+
of the machine chooser, a :class:`MachineChooserType` member, or a
|
45
|
+
callable that takes a dispatcher and an operation and returns
|
46
|
+
the machine id where the operation will be dispatched.
|
47
|
+
ready_operations_filter:
|
48
|
+
The ready operations filter to use. It can be either:
|
49
|
+
|
50
|
+
- a string with the name of the pruning function
|
51
|
+
- a :class`ReadyOperationsFilterType` enum member.
|
52
|
+
- a callable that takes a dispatcher and a list of operations
|
53
|
+
and returns a list of operations that should be considered
|
54
|
+
for dispatching,
|
55
|
+
- a list with names or actual ready operations filters to be used.
|
56
|
+
If a list is provided, a composite filter will be created
|
57
|
+
using the specified filters.
|
58
|
+
|
59
|
+
.. seealso::
|
60
|
+
- :func:`job_shop_lib.dispatching.rules.dispatching_rule_factory`
|
61
|
+
- :func:`job_shop_lib.dispatching.rules.machine_chooser_factory`
|
62
|
+
- :func:`~job_shop_lib.dispatching.ready_operations_filter_factory`
|
63
|
+
- :func:`~job_shop_lib.dispatching.create_composite_operation_filter`
|
33
64
|
"""
|
34
65
|
|
35
66
|
def __init__(
|
@@ -41,32 +72,16 @@ class DispatchingRuleSolver(BaseSolver):
|
|
41
72
|
str | Callable[[Dispatcher, Operation], int]
|
42
73
|
) = MachineChooserType.FIRST,
|
43
74
|
ready_operations_filter: (
|
44
|
-
str
|
45
|
-
|
|
75
|
+
Iterable[ReadyOperationsFilter | str | ReadyOperationsFilterType]
|
76
|
+
| str
|
77
|
+
| ReadyOperationsFilterType
|
78
|
+
| ReadyOperationsFilter
|
46
79
|
| None
|
47
|
-
) =
|
80
|
+
) = (
|
81
|
+
ReadyOperationsFilterType.DOMINATED_OPERATIONS,
|
82
|
+
ReadyOperationsFilterType.NON_IDLE_MACHINES,
|
83
|
+
),
|
48
84
|
):
|
49
|
-
"""Initializes the solver with the given dispatching rule, machine
|
50
|
-
chooser and pruning function.
|
51
|
-
|
52
|
-
Args:
|
53
|
-
dispatching_rule:
|
54
|
-
The dispatching rule to use. It can be a string with the name
|
55
|
-
of the dispatching rule, a DispatchingRule enum member, or a
|
56
|
-
callable that takes a dispatcher and returns the operation to
|
57
|
-
be dispatched next.
|
58
|
-
machine_chooser:
|
59
|
-
The machine chooser to use. It can be a string with the name
|
60
|
-
of the machine chooser, a MachineChooser enum member, or a
|
61
|
-
callable that takes a dispatcher and an operation and returns
|
62
|
-
the machine id where the operation will be dispatched.
|
63
|
-
ready_operations_filter:
|
64
|
-
The ready operations filter to use. It can be a string with
|
65
|
-
the name of the pruning function, a PruningFunction enum
|
66
|
-
member, or a callable that takes a dispatcher and a list of
|
67
|
-
operations and returns a list of operations that should be
|
68
|
-
considered for dispatching.
|
69
|
-
"""
|
70
85
|
if isinstance(dispatching_rule, str):
|
71
86
|
dispatching_rule = dispatching_rule_factory(dispatching_rule)
|
72
87
|
if isinstance(machine_chooser, str):
|
@@ -75,6 +90,10 @@ class DispatchingRuleSolver(BaseSolver):
|
|
75
90
|
ready_operations_filter = ready_operations_filter_factory(
|
76
91
|
ready_operations_filter
|
77
92
|
)
|
93
|
+
if isinstance(ready_operations_filter, Iterable):
|
94
|
+
ready_operations_filter = create_composite_operation_filter(
|
95
|
+
ready_operations_filter
|
96
|
+
)
|
78
97
|
|
79
98
|
self.dispatching_rule = dispatching_rule
|
80
99
|
self.machine_chooser = machine_chooser
|
@@ -21,7 +21,7 @@ from job_shop_lib.dispatching.feature_observers import (
|
|
21
21
|
def shortest_processing_time_rule(dispatcher: Dispatcher) -> Operation:
|
22
22
|
"""Dispatches the operation with the shortest duration."""
|
23
23
|
return min(
|
24
|
-
dispatcher.
|
24
|
+
dispatcher.available_operations(),
|
25
25
|
key=lambda operation: operation.duration,
|
26
26
|
)
|
27
27
|
|
@@ -29,7 +29,7 @@ def shortest_processing_time_rule(dispatcher: Dispatcher) -> Operation:
|
|
29
29
|
def first_come_first_served_rule(dispatcher: Dispatcher) -> Operation:
|
30
30
|
"""Dispatches the operation with the lowest position in job."""
|
31
31
|
return min(
|
32
|
-
dispatcher.
|
32
|
+
dispatcher.available_operations(),
|
33
33
|
key=lambda operation: operation.position_in_job,
|
34
34
|
)
|
35
35
|
|
@@ -41,7 +41,7 @@ def most_work_remaining_rule(dispatcher: Dispatcher) -> Operation:
|
|
41
41
|
job_remaining_work[operation.job_id] += operation.duration
|
42
42
|
|
43
43
|
return max(
|
44
|
-
dispatcher.
|
44
|
+
dispatcher.available_operations(),
|
45
45
|
key=lambda operation: job_remaining_work[operation.job_id],
|
46
46
|
)
|
47
47
|
|
@@ -53,14 +53,14 @@ def most_operations_remaining_rule(dispatcher: Dispatcher) -> Operation:
|
|
53
53
|
job_remaining_operations[operation.job_id] += 1
|
54
54
|
|
55
55
|
return max(
|
56
|
-
dispatcher.
|
56
|
+
dispatcher.available_operations(),
|
57
57
|
key=lambda operation: job_remaining_operations[operation.job_id],
|
58
58
|
)
|
59
59
|
|
60
60
|
|
61
61
|
def random_operation_rule(dispatcher: Dispatcher) -> Operation:
|
62
62
|
"""Dispatches a random operation."""
|
63
|
-
return random.choice(dispatcher.
|
63
|
+
return random.choice(dispatcher.available_operations())
|
64
64
|
|
65
65
|
|
66
66
|
def score_based_rule(
|
@@ -80,7 +80,7 @@ def score_based_rule(
|
|
80
80
|
def rule(dispatcher: Dispatcher) -> Operation:
|
81
81
|
scores = score_function(dispatcher)
|
82
82
|
return max(
|
83
|
-
dispatcher.
|
83
|
+
dispatcher.available_operations(),
|
84
84
|
key=lambda operation: scores[operation.job_id],
|
85
85
|
)
|
86
86
|
|
@@ -102,7 +102,7 @@ def score_based_rule_with_tie_breaker(
|
|
102
102
|
"""
|
103
103
|
|
104
104
|
def rule(dispatcher: Dispatcher) -> Operation:
|
105
|
-
candidates = dispatcher.
|
105
|
+
candidates = dispatcher.available_operations()
|
106
106
|
for scoring_function in score_functions:
|
107
107
|
scores = scoring_function(dispatcher)
|
108
108
|
best_score = max(scores)
|
@@ -126,7 +126,7 @@ def shortest_processing_time_score(dispatcher: Dispatcher) -> list[int]:
|
|
126
126
|
"""Scores each job based on the duration of the next operation."""
|
127
127
|
num_jobs = dispatcher.instance.num_jobs
|
128
128
|
scores = [0] * num_jobs
|
129
|
-
for operation in dispatcher.
|
129
|
+
for operation in dispatcher.available_operations():
|
130
130
|
scores[operation.job_id] = -operation.duration
|
131
131
|
return scores
|
132
132
|
|
@@ -135,7 +135,7 @@ def first_come_first_served_score(dispatcher: Dispatcher) -> list[int]:
|
|
135
135
|
"""Scores each job based on the position of the next operation."""
|
136
136
|
num_jobs = dispatcher.instance.num_jobs
|
137
137
|
scores = [0] * num_jobs
|
138
|
-
for operation in dispatcher.
|
138
|
+
for operation in dispatcher.available_operations():
|
139
139
|
scores[operation.job_id] = operation.operation_id
|
140
140
|
return scores
|
141
141
|
|
@@ -17,36 +17,58 @@ class GeneralInstanceGenerator(InstanceGenerator):
|
|
17
17
|
durations, and more.
|
18
18
|
|
19
19
|
The class supports both single instance generation and iteration over
|
20
|
-
multiple instances, controlled by the
|
21
|
-
implements the iterator protocol, allowing it to be used in a
|
20
|
+
multiple instances, controlled by the ``iteration_limit`` parameter. It
|
21
|
+
implements the iterator protocol, allowing it to be used in a ``for`` loop.
|
22
22
|
|
23
23
|
Note:
|
24
24
|
When used as an iterator, the generator will produce instances until it
|
25
|
-
reaches the specified
|
26
|
-
it will continue indefinitely.
|
25
|
+
reaches the specified ``iteration_limit``. If ``iteration_limit`` is
|
26
|
+
``None``, it will continue indefinitely.
|
27
27
|
|
28
28
|
Attributes:
|
29
29
|
num_jobs_range:
|
30
30
|
The range of the number of jobs to generate. If a single
|
31
|
-
int is provided, it is used as both the minimum and maximum.
|
31
|
+
``int`` is provided, it is used as both the minimum and maximum.
|
32
32
|
duration_range:
|
33
33
|
The range of durations for each operation.
|
34
34
|
num_machines_range:
|
35
35
|
The range of the number of machines available. If a
|
36
|
-
single int is provided, it is used as both the minimum and
|
36
|
+
single ``int`` is provided, it is used as both the minimum and
|
37
|
+
maximum.
|
37
38
|
machines_per_operation:
|
38
39
|
Specifies how many machines each operation
|
39
|
-
can be assigned to. If a single int is provided, it is used for
|
40
|
+
can be assigned to. If a single ``int`` is provided, it is used for
|
40
41
|
all operations.
|
41
42
|
allow_less_jobs_than_machines:
|
42
|
-
If True
|
43
|
-
less than the number of machines.
|
43
|
+
If ``True``, allows generating instances where the number of jobs
|
44
|
+
is less than the number of machines.
|
44
45
|
allow_recirculation:
|
45
|
-
If True
|
46
|
+
If ``True``, a job can visit the same machine more than once.
|
46
47
|
name_suffix:
|
47
48
|
A suffix to append to each instance's name for identification.
|
48
49
|
seed:
|
49
50
|
Seed for the random number generator to ensure reproducibility.
|
51
|
+
|
52
|
+
Args:
|
53
|
+
num_jobs:
|
54
|
+
The range of the number of jobs to generate.
|
55
|
+
num_machines:
|
56
|
+
The range of the number of machines available.
|
57
|
+
duration_range:
|
58
|
+
The range of durations for each operation.
|
59
|
+
allow_less_jobs_than_machines:
|
60
|
+
Allows instances with fewer jobs than machines.
|
61
|
+
allow_recirculation:
|
62
|
+
Allows jobs to visit the same machine multiple times.
|
63
|
+
machines_per_operation:
|
64
|
+
Specifies how many machines each operation can be assigned to.
|
65
|
+
If a single ``int`` is provided, it is used for all operations.
|
66
|
+
name_suffix:
|
67
|
+
Suffix for instance names.
|
68
|
+
seed:
|
69
|
+
Seed for the random number generator.
|
70
|
+
iteration_limit:
|
71
|
+
Maximum number of instances to generate in iteration mode.
|
50
72
|
"""
|
51
73
|
|
52
74
|
def __init__( # pylint: disable=too-many-arguments
|
@@ -61,29 +83,6 @@ class GeneralInstanceGenerator(InstanceGenerator):
|
|
61
83
|
seed: int | None = None,
|
62
84
|
iteration_limit: int | None = None,
|
63
85
|
):
|
64
|
-
"""Initializes the instance generator with the given parameters.
|
65
|
-
|
66
|
-
Args:
|
67
|
-
num_jobs:
|
68
|
-
The range of the number of jobs to generate.
|
69
|
-
num_machines:
|
70
|
-
The range of the number of machines available.
|
71
|
-
duration_range:
|
72
|
-
The range of durations for each operation.
|
73
|
-
allow_less_jobs_than_machines:
|
74
|
-
Allows instances with fewer jobs than machines.
|
75
|
-
allow_recirculation:
|
76
|
-
Allows jobs to visit the same machine multiple times.
|
77
|
-
machines_per_operation:
|
78
|
-
Specifies how many machines each operation can be assigned to.
|
79
|
-
If a single int is provided, it is used for all operations.
|
80
|
-
name_suffix:
|
81
|
-
Suffix for instance names.
|
82
|
-
seed:
|
83
|
-
Seed for the random number generator.
|
84
|
-
iteration_limit:
|
85
|
-
Maximum number of instances to generate in iteration mode.
|
86
|
-
"""
|
87
86
|
super().__init__(
|
88
87
|
num_jobs=num_jobs,
|
89
88
|
num_machines=num_machines,
|
@@ -153,7 +152,7 @@ class GeneralInstanceGenerator(InstanceGenerator):
|
|
153
152
|
Args:
|
154
153
|
available_machines:
|
155
154
|
A list of available machine_ids to choose from.
|
156
|
-
If None
|
155
|
+
If ``None``, all machines are available.
|
157
156
|
"""
|
158
157
|
duration = random.randint(*self.duration_range)
|
159
158
|
|
@@ -32,6 +32,20 @@ class InstanceGenerator(abc.ABC):
|
|
32
32
|
A suffix to append to each instance's name for identification.
|
33
33
|
seed:
|
34
34
|
Seed for the random number generator to ensure reproducibility.
|
35
|
+
|
36
|
+
Args:
|
37
|
+
num_jobs:
|
38
|
+
The range of the number of jobs to generate.
|
39
|
+
num_machines:
|
40
|
+
The range of the number of machines available.
|
41
|
+
duration_range:
|
42
|
+
The range of durations for each operation.
|
43
|
+
name_suffix:
|
44
|
+
Suffix for instance names.
|
45
|
+
seed:
|
46
|
+
Seed for the random number generator.
|
47
|
+
iteration_limit:
|
48
|
+
Maximum number of instances to generate in iteration mode.
|
35
49
|
"""
|
36
50
|
|
37
51
|
def __init__( # pylint: disable=too-many-arguments
|
@@ -43,23 +57,6 @@ class InstanceGenerator(abc.ABC):
|
|
43
57
|
seed: int | None = None,
|
44
58
|
iteration_limit: int | None = None,
|
45
59
|
):
|
46
|
-
"""Initializes the instance generator with the given parameters.
|
47
|
-
|
48
|
-
Args:
|
49
|
-
num_jobs:
|
50
|
-
The range of the number of jobs to generate.
|
51
|
-
num_machines:
|
52
|
-
The range of the number of machines available.
|
53
|
-
duration_range:
|
54
|
-
The range of durations for each operation.
|
55
|
-
name_suffix:
|
56
|
-
Suffix for instance names.
|
57
|
-
seed:
|
58
|
-
Seed for the random number generator.
|
59
|
-
iteration_limit:
|
60
|
-
Maximum number of instances to generate in iteration mode.
|
61
|
-
"""
|
62
|
-
|
63
60
|
if isinstance(num_jobs, int):
|
64
61
|
num_jobs = (num_jobs, num_jobs)
|
65
62
|
if isinstance(num_machines, int):
|
@@ -111,7 +111,17 @@ class AddDurationNoise(Transformation):
|
|
111
111
|
|
112
112
|
class RemoveJobs(Transformation):
|
113
113
|
"""Removes jobs randomly until the number of jobs is within a specified
|
114
|
-
range.
|
114
|
+
range.
|
115
|
+
|
116
|
+
Args:
|
117
|
+
min_jobs:
|
118
|
+
The minimum number of jobs to remain in the instance.
|
119
|
+
max_jobs:
|
120
|
+
The maximum number of jobs to remain in the instance.
|
121
|
+
target_jobs:
|
122
|
+
If specified, the number of jobs to remain in the
|
123
|
+
instance. Overrides ``min_jobs`` and ``max_jobs``.
|
124
|
+
"""
|
115
125
|
|
116
126
|
def __init__(
|
117
127
|
self,
|
@@ -120,13 +130,6 @@ class RemoveJobs(Transformation):
|
|
120
130
|
target_jobs: int | None = None,
|
121
131
|
suffix: str | None = None,
|
122
132
|
):
|
123
|
-
"""
|
124
|
-
Args:
|
125
|
-
min_jobs: The minimum number of jobs to remain in the instance.
|
126
|
-
max_jobs: The maximum number of jobs to remain in the instance.
|
127
|
-
target_jobs: If specified, the number of jobs to remain in the
|
128
|
-
instance. Overrides min_jobs and max_jobs.
|
129
|
-
"""
|
130
133
|
if suffix is None:
|
131
134
|
suffix = f"_jobs={min_jobs}-{max_jobs}"
|
132
135
|
super().__init__(suffix=suffix)
|
job_shop_lib/graphs/__init__.py
CHANGED
@@ -7,6 +7,7 @@ The main classes and functions available in this package are:
|
|
7
7
|
Node
|
8
8
|
NodeType
|
9
9
|
build_disjunctive_graph
|
10
|
+
build_solved_disjunctive_graph
|
10
11
|
build_agent_task_graph
|
11
12
|
build_complete_agent_task_graph
|
12
13
|
build_agent_task_graph_with_jobs
|
@@ -18,6 +19,7 @@ from job_shop_lib.graphs._node import Node
|
|
18
19
|
from job_shop_lib.graphs._job_shop_graph import JobShopGraph, NODE_ATTR
|
19
20
|
from job_shop_lib.graphs._build_disjunctive_graph import (
|
20
21
|
build_disjunctive_graph,
|
22
|
+
build_solved_disjunctive_graph,
|
21
23
|
add_disjunctive_edges,
|
22
24
|
add_conjunctive_edges,
|
23
25
|
add_source_sink_nodes,
|
@@ -62,4 +64,5 @@ __all__ = [
|
|
62
64
|
"add_global_node",
|
63
65
|
"add_machine_global_edges",
|
64
66
|
"add_job_global_edges",
|
67
|
+
"build_solved_disjunctive_graph",
|
65
68
|
]
|
@@ -18,14 +18,15 @@ each disjunctive edge such that the overall processing time is minimized.
|
|
18
18
|
|
19
19
|
import itertools
|
20
20
|
|
21
|
-
from job_shop_lib import JobShopInstance
|
21
|
+
from job_shop_lib import JobShopInstance, Schedule
|
22
22
|
from job_shop_lib.graphs import JobShopGraph, EdgeType, NodeType, Node
|
23
23
|
|
24
24
|
|
25
25
|
def build_disjunctive_graph(instance: JobShopInstance) -> JobShopGraph:
|
26
26
|
"""Builds and returns a disjunctive graph for the given job shop instance.
|
27
27
|
|
28
|
-
This function creates a complete disjunctive graph from a
|
28
|
+
This function creates a complete disjunctive graph from a
|
29
|
+
:JobShopInstance.
|
29
30
|
It starts by initializing a JobShopGraph object and proceeds by adding
|
30
31
|
disjunctive edges between operations using the same machine, conjunctive
|
31
32
|
edges between successive operations in the same job, and finally, special
|
@@ -40,7 +41,7 @@ def build_disjunctive_graph(instance: JobShopInstance) -> JobShopGraph:
|
|
40
41
|
the graph.
|
41
42
|
|
42
43
|
Returns:
|
43
|
-
|
44
|
+
A :class:`JobShopGraph` object representing the disjunctive graph
|
44
45
|
of the job shop scheduling problem.
|
45
46
|
"""
|
46
47
|
graph = JobShopGraph(instance)
|
@@ -51,6 +52,43 @@ def build_disjunctive_graph(instance: JobShopInstance) -> JobShopGraph:
|
|
51
52
|
return graph
|
52
53
|
|
53
54
|
|
55
|
+
def build_solved_disjunctive_graph(schedule: Schedule) -> JobShopGraph:
|
56
|
+
"""Builds and returns a disjunctive graph for the given solved schedule.
|
57
|
+
|
58
|
+
This function constructs a disjunctive graph from the given schedule,
|
59
|
+
keeping only the disjunctive edges that represent the chosen ordering
|
60
|
+
of operations on each machine as per the solution schedule.
|
61
|
+
|
62
|
+
Args:
|
63
|
+
schedule (Schedule): The solved schedule that contains the sequencing
|
64
|
+
of operations on each machine.
|
65
|
+
|
66
|
+
Returns:
|
67
|
+
A JobShopGraph object representing the disjunctive graph
|
68
|
+
of the solved job shop scheduling problem.
|
69
|
+
"""
|
70
|
+
# Build the base disjunctive graph from the job shop instance
|
71
|
+
graph = JobShopGraph(schedule.instance)
|
72
|
+
add_conjunctive_edges(graph)
|
73
|
+
add_source_sink_nodes(graph)
|
74
|
+
add_source_sink_edges(graph)
|
75
|
+
|
76
|
+
# Iterate over each machine and add only the edges that match the solution
|
77
|
+
# order
|
78
|
+
for machine_schedule in schedule.schedule:
|
79
|
+
for i, scheduled_operation in enumerate(machine_schedule):
|
80
|
+
if i + 1 >= len(machine_schedule):
|
81
|
+
break
|
82
|
+
next_scheduled_operation = machine_schedule[i + 1]
|
83
|
+
graph.add_edge(
|
84
|
+
scheduled_operation.operation.operation_id,
|
85
|
+
next_scheduled_operation.operation.operation_id,
|
86
|
+
type=EdgeType.DISJUNCTIVE,
|
87
|
+
)
|
88
|
+
|
89
|
+
return graph
|
90
|
+
|
91
|
+
|
54
92
|
def add_disjunctive_edges(graph: JobShopGraph) -> None:
|
55
93
|
"""Adds disjunctive edges to the graph."""
|
56
94
|
|