toil 7.0.0__py3-none-any.whl → 8.1.0b1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (197) hide show
  1. toil/__init__.py +124 -86
  2. toil/batchSystems/__init__.py +1 -0
  3. toil/batchSystems/abstractBatchSystem.py +137 -77
  4. toil/batchSystems/abstractGridEngineBatchSystem.py +211 -101
  5. toil/batchSystems/awsBatch.py +237 -128
  6. toil/batchSystems/cleanup_support.py +22 -16
  7. toil/batchSystems/contained_executor.py +30 -26
  8. toil/batchSystems/gridengine.py +85 -49
  9. toil/batchSystems/htcondor.py +164 -87
  10. toil/batchSystems/kubernetes.py +622 -386
  11. toil/batchSystems/local_support.py +17 -12
  12. toil/batchSystems/lsf.py +132 -79
  13. toil/batchSystems/lsfHelper.py +13 -11
  14. toil/batchSystems/mesos/__init__.py +41 -29
  15. toil/batchSystems/mesos/batchSystem.py +288 -149
  16. toil/batchSystems/mesos/executor.py +77 -49
  17. toil/batchSystems/mesos/test/__init__.py +31 -23
  18. toil/batchSystems/options.py +39 -29
  19. toil/batchSystems/registry.py +53 -19
  20. toil/batchSystems/singleMachine.py +293 -123
  21. toil/batchSystems/slurm.py +651 -155
  22. toil/batchSystems/torque.py +46 -32
  23. toil/bus.py +141 -73
  24. toil/common.py +784 -397
  25. toil/cwl/__init__.py +1 -1
  26. toil/cwl/cwltoil.py +1137 -534
  27. toil/cwl/utils.py +17 -22
  28. toil/deferred.py +62 -41
  29. toil/exceptions.py +5 -3
  30. toil/fileStores/__init__.py +5 -5
  31. toil/fileStores/abstractFileStore.py +88 -57
  32. toil/fileStores/cachingFileStore.py +711 -247
  33. toil/fileStores/nonCachingFileStore.py +113 -75
  34. toil/job.py +1031 -349
  35. toil/jobStores/abstractJobStore.py +387 -243
  36. toil/jobStores/aws/jobStore.py +772 -412
  37. toil/jobStores/aws/utils.py +161 -109
  38. toil/jobStores/conftest.py +1 -0
  39. toil/jobStores/fileJobStore.py +289 -151
  40. toil/jobStores/googleJobStore.py +137 -70
  41. toil/jobStores/utils.py +36 -15
  42. toil/leader.py +614 -269
  43. toil/lib/accelerators.py +115 -18
  44. toil/lib/aws/__init__.py +55 -28
  45. toil/lib/aws/ami.py +122 -87
  46. toil/lib/aws/iam.py +284 -108
  47. toil/lib/aws/s3.py +31 -0
  48. toil/lib/aws/session.py +204 -58
  49. toil/lib/aws/utils.py +290 -213
  50. toil/lib/bioio.py +13 -5
  51. toil/lib/compatibility.py +11 -6
  52. toil/lib/conversions.py +83 -49
  53. toil/lib/docker.py +131 -103
  54. toil/lib/dockstore.py +379 -0
  55. toil/lib/ec2.py +322 -209
  56. toil/lib/ec2nodes.py +174 -105
  57. toil/lib/encryption/_dummy.py +5 -3
  58. toil/lib/encryption/_nacl.py +10 -6
  59. toil/lib/encryption/conftest.py +1 -0
  60. toil/lib/exceptions.py +26 -7
  61. toil/lib/expando.py +4 -2
  62. toil/lib/ftp_utils.py +217 -0
  63. toil/lib/generatedEC2Lists.py +127 -19
  64. toil/lib/history.py +1271 -0
  65. toil/lib/history_submission.py +681 -0
  66. toil/lib/humanize.py +6 -2
  67. toil/lib/io.py +121 -12
  68. toil/lib/iterables.py +4 -2
  69. toil/lib/memoize.py +12 -8
  70. toil/lib/misc.py +83 -18
  71. toil/lib/objects.py +2 -2
  72. toil/lib/resources.py +19 -7
  73. toil/lib/retry.py +125 -87
  74. toil/lib/threading.py +282 -80
  75. toil/lib/throttle.py +15 -14
  76. toil/lib/trs.py +390 -0
  77. toil/lib/web.py +38 -0
  78. toil/options/common.py +850 -402
  79. toil/options/cwl.py +185 -90
  80. toil/options/runner.py +50 -0
  81. toil/options/wdl.py +70 -19
  82. toil/provisioners/__init__.py +111 -46
  83. toil/provisioners/abstractProvisioner.py +322 -157
  84. toil/provisioners/aws/__init__.py +62 -30
  85. toil/provisioners/aws/awsProvisioner.py +980 -627
  86. toil/provisioners/clusterScaler.py +541 -279
  87. toil/provisioners/gceProvisioner.py +283 -180
  88. toil/provisioners/node.py +147 -79
  89. toil/realtimeLogger.py +34 -22
  90. toil/resource.py +137 -75
  91. toil/server/app.py +127 -61
  92. toil/server/celery_app.py +3 -1
  93. toil/server/cli/wes_cwl_runner.py +84 -55
  94. toil/server/utils.py +56 -31
  95. toil/server/wes/abstract_backend.py +64 -26
  96. toil/server/wes/amazon_wes_utils.py +21 -15
  97. toil/server/wes/tasks.py +121 -63
  98. toil/server/wes/toil_backend.py +142 -107
  99. toil/server/wsgi_app.py +4 -3
  100. toil/serviceManager.py +58 -22
  101. toil/statsAndLogging.py +183 -65
  102. toil/test/__init__.py +263 -179
  103. toil/test/batchSystems/batchSystemTest.py +438 -195
  104. toil/test/batchSystems/batch_system_plugin_test.py +18 -7
  105. toil/test/batchSystems/test_gridengine.py +173 -0
  106. toil/test/batchSystems/test_lsf_helper.py +67 -58
  107. toil/test/batchSystems/test_slurm.py +265 -49
  108. toil/test/cactus/test_cactus_integration.py +20 -22
  109. toil/test/cwl/conftest.py +39 -0
  110. toil/test/cwl/cwlTest.py +375 -72
  111. toil/test/cwl/measure_default_memory.cwl +12 -0
  112. toil/test/cwl/not_run_required_input.cwl +29 -0
  113. toil/test/cwl/optional-file.cwl +18 -0
  114. toil/test/cwl/scatter_duplicate_outputs.cwl +40 -0
  115. toil/test/docs/scriptsTest.py +60 -34
  116. toil/test/jobStores/jobStoreTest.py +412 -235
  117. toil/test/lib/aws/test_iam.py +116 -48
  118. toil/test/lib/aws/test_s3.py +16 -9
  119. toil/test/lib/aws/test_utils.py +5 -6
  120. toil/test/lib/dockerTest.py +118 -141
  121. toil/test/lib/test_conversions.py +113 -115
  122. toil/test/lib/test_ec2.py +57 -49
  123. toil/test/lib/test_history.py +212 -0
  124. toil/test/lib/test_misc.py +12 -5
  125. toil/test/lib/test_trs.py +161 -0
  126. toil/test/mesos/MesosDataStructuresTest.py +23 -10
  127. toil/test/mesos/helloWorld.py +7 -6
  128. toil/test/mesos/stress.py +25 -20
  129. toil/test/options/options.py +7 -2
  130. toil/test/provisioners/aws/awsProvisionerTest.py +293 -140
  131. toil/test/provisioners/clusterScalerTest.py +440 -250
  132. toil/test/provisioners/clusterTest.py +81 -42
  133. toil/test/provisioners/gceProvisionerTest.py +174 -100
  134. toil/test/provisioners/provisionerTest.py +25 -13
  135. toil/test/provisioners/restartScript.py +5 -4
  136. toil/test/server/serverTest.py +188 -141
  137. toil/test/sort/restart_sort.py +137 -68
  138. toil/test/sort/sort.py +134 -66
  139. toil/test/sort/sortTest.py +91 -49
  140. toil/test/src/autoDeploymentTest.py +140 -100
  141. toil/test/src/busTest.py +20 -18
  142. toil/test/src/checkpointTest.py +8 -2
  143. toil/test/src/deferredFunctionTest.py +49 -35
  144. toil/test/src/dockerCheckTest.py +33 -26
  145. toil/test/src/environmentTest.py +20 -10
  146. toil/test/src/fileStoreTest.py +538 -271
  147. toil/test/src/helloWorldTest.py +7 -4
  148. toil/test/src/importExportFileTest.py +61 -31
  149. toil/test/src/jobDescriptionTest.py +32 -17
  150. toil/test/src/jobEncapsulationTest.py +2 -0
  151. toil/test/src/jobFileStoreTest.py +74 -50
  152. toil/test/src/jobServiceTest.py +187 -73
  153. toil/test/src/jobTest.py +120 -70
  154. toil/test/src/miscTests.py +19 -18
  155. toil/test/src/promisedRequirementTest.py +82 -36
  156. toil/test/src/promisesTest.py +7 -6
  157. toil/test/src/realtimeLoggerTest.py +6 -6
  158. toil/test/src/regularLogTest.py +71 -37
  159. toil/test/src/resourceTest.py +80 -49
  160. toil/test/src/restartDAGTest.py +36 -22
  161. toil/test/src/resumabilityTest.py +9 -2
  162. toil/test/src/retainTempDirTest.py +45 -14
  163. toil/test/src/systemTest.py +12 -8
  164. toil/test/src/threadingTest.py +44 -25
  165. toil/test/src/toilContextManagerTest.py +10 -7
  166. toil/test/src/userDefinedJobArgTypeTest.py +8 -5
  167. toil/test/src/workerTest.py +33 -16
  168. toil/test/utils/toilDebugTest.py +70 -58
  169. toil/test/utils/toilKillTest.py +4 -5
  170. toil/test/utils/utilsTest.py +239 -102
  171. toil/test/wdl/wdltoil_test.py +789 -148
  172. toil/test/wdl/wdltoil_test_kubernetes.py +37 -23
  173. toil/toilState.py +52 -26
  174. toil/utils/toilConfig.py +13 -4
  175. toil/utils/toilDebugFile.py +44 -27
  176. toil/utils/toilDebugJob.py +85 -25
  177. toil/utils/toilDestroyCluster.py +11 -6
  178. toil/utils/toilKill.py +8 -3
  179. toil/utils/toilLaunchCluster.py +251 -145
  180. toil/utils/toilMain.py +37 -16
  181. toil/utils/toilRsyncCluster.py +27 -14
  182. toil/utils/toilSshCluster.py +45 -22
  183. toil/utils/toilStats.py +75 -36
  184. toil/utils/toilStatus.py +226 -119
  185. toil/utils/toilUpdateEC2Instances.py +3 -1
  186. toil/version.py +6 -6
  187. toil/wdl/utils.py +5 -5
  188. toil/wdl/wdltoil.py +3528 -1053
  189. toil/worker.py +370 -149
  190. toil-8.1.0b1.dist-info/METADATA +178 -0
  191. toil-8.1.0b1.dist-info/RECORD +259 -0
  192. {toil-7.0.0.dist-info → toil-8.1.0b1.dist-info}/WHEEL +1 -1
  193. toil-7.0.0.dist-info/METADATA +0 -158
  194. toil-7.0.0.dist-info/RECORD +0 -244
  195. {toil-7.0.0.dist-info → toil-8.1.0b1.dist-info}/LICENSE +0 -0
  196. {toil-7.0.0.dist-info → toil-8.1.0b1.dist-info}/entry_points.txt +0 -0
  197. {toil-7.0.0.dist-info → toil-8.1.0b1.dist-info}/top_level.txt +0 -0
toil/lib/ec2.py CHANGED
@@ -1,25 +1,29 @@
1
1
  import logging
2
2
  import time
3
3
  from base64 import b64encode
4
- from operator import itemgetter
5
- from typing import Dict, Iterable, List, Optional, Union, TYPE_CHECKING, Generator, Callable, Mapping, Any
6
-
7
- import botocore.client
8
- from boto3.resources.base import ServiceResource
4
+ from collections.abc import Generator, Iterable, Mapping
5
+ from typing import TYPE_CHECKING, Any, Callable, Optional, Union
9
6
 
10
7
  from toil.lib.aws.session import establish_boto3_session
11
8
  from toil.lib.aws.utils import flatten_tags
12
9
  from toil.lib.exceptions import panic
13
- from toil.lib.retry import (ErrorCondition,
14
- get_error_code,
15
- get_error_message,
16
- old_retry,
17
- retry)
18
-
19
- from mypy_boto3_ec2.client import EC2Client
20
- from mypy_boto3_autoscaling.client import AutoScalingClient
21
- from mypy_boto3_ec2.type_defs import SpotInstanceRequestTypeDef, DescribeInstancesResultTypeDef, InstanceTypeDef
22
- from mypy_boto3_ec2.service_resource import EC2ServiceResource, Instance
10
+ from toil.lib.retry import (
11
+ ErrorCondition,
12
+ get_error_code,
13
+ get_error_message,
14
+ old_retry,
15
+ retry,
16
+ )
17
+
18
+ if TYPE_CHECKING:
19
+ from mypy_boto3_autoscaling.client import AutoScalingClient
20
+ from mypy_boto3_ec2.client import EC2Client
21
+ from mypy_boto3_ec2.service_resource import EC2ServiceResource, Instance
22
+ from mypy_boto3_ec2.type_defs import (
23
+ DescribeInstancesResultTypeDef,
24
+ InstanceTypeDef,
25
+ SpotInstanceRequestTypeDef,
26
+ )
23
27
 
24
28
  a_short_time = 5
25
29
  a_long_time = 60 * 60
@@ -30,47 +34,55 @@ class UserError(RuntimeError):
30
34
  def __init__(self, message=None, cause=None):
31
35
  if (message is None) == (cause is None):
32
36
  raise RuntimeError("Must pass either message or cause.")
33
- super().__init__(
34
- message if cause is None else cause.message)
37
+ super().__init__(message if cause is None else cause.message)
35
38
 
36
39
 
37
40
  def not_found(e):
38
41
  try:
39
- return get_error_code(e).endswith('.NotFound')
42
+ return get_error_code(e).endswith(".NotFound")
40
43
  except ValueError:
41
44
  # Not the right kind of error
42
45
  return False
43
46
 
44
47
 
45
48
  def inconsistencies_detected(e):
46
- if get_error_code(e) == 'InvalidGroup.NotFound':
49
+ if get_error_code(e) == "InvalidGroup.NotFound":
47
50
  return True
48
51
  m = get_error_message(e).lower()
49
- matches = ('invalid iam instance profile' in m) or ('no associated iam roles' in m)
52
+ matches = ("invalid iam instance profile" in m) or ("no associated iam roles" in m)
50
53
  return matches
51
54
 
52
55
 
53
56
  # We also define these error categories for the new retry decorator
54
- INCONSISTENCY_ERRORS = [ErrorCondition(boto_error_codes=['InvalidGroup.NotFound']),
55
- ErrorCondition(error_message_must_include='Invalid IAM Instance Profile'),
56
- ErrorCondition(error_message_must_include='no associated IAM Roles')]
57
+ INCONSISTENCY_ERRORS = [
58
+ ErrorCondition(boto_error_codes=["InvalidGroup.NotFound"]),
59
+ ErrorCondition(error_message_must_include="Invalid IAM Instance Profile"),
60
+ ErrorCondition(error_message_must_include="no associated IAM Roles"),
61
+ ]
57
62
 
58
63
 
59
64
  def retry_ec2(t=a_short_time, retry_for=10 * a_short_time, retry_while=not_found):
60
- return old_retry(delays=(t, t, t * 2, t * 4),
61
- timeout=retry_for,
62
- predicate=retry_while)
65
+ return old_retry(
66
+ delays=(t, t, t * 2, t * 4), timeout=retry_for, predicate=retry_while
67
+ )
63
68
 
64
69
 
65
70
  class UnexpectedResourceState(Exception):
66
71
  def __init__(self, resource, to_state, state):
67
72
  super().__init__(
68
- "Expected state of %s to be '%s' but got '%s'" %
69
- (resource, to_state, state))
70
-
71
-
72
- def wait_transition(boto3_ec2: EC2Client, resource: InstanceTypeDef, from_states: Iterable[str], to_state: str,
73
- state_getter: Callable[[InstanceTypeDef], str]=lambda x: x.get('State').get('Name')):
73
+ "Expected state of %s to be '%s' but got '%s'" % (resource, to_state, state)
74
+ )
75
+
76
+
77
+ def wait_transition(
78
+ boto3_ec2: "EC2Client",
79
+ resource: "InstanceTypeDef",
80
+ from_states: Iterable[str],
81
+ to_state: str,
82
+ state_getter: Callable[["InstanceTypeDef"], str] = lambda x: x.get("State").get(
83
+ "Name"
84
+ ),
85
+ ):
74
86
  """
75
87
  Wait until the specified EC2 resource (instance, image, volume, ...) transitions from any
76
88
  of the given 'from' states to the specified 'to' state. If the instance is found in a state
@@ -88,53 +100,73 @@ def wait_transition(boto3_ec2: EC2Client, resource: InstanceTypeDef, from_states
88
100
  for attempt in retry_ec2():
89
101
  with attempt:
90
102
  described = boto3_ec2.describe_instances(InstanceIds=[instance_id])
91
- resource = described["Reservations"][0]["Instances"][0] # there should only be one requested
103
+ resource = described["Reservations"][0]["Instances"][
104
+ 0
105
+ ] # there should only be one requested
92
106
  state = state_getter(resource)
93
107
  if state != to_state:
94
108
  raise UnexpectedResourceState(resource, to_state, state)
95
109
 
96
110
 
97
- def wait_instances_running(boto3_ec2: EC2Client, instances: Iterable[InstanceTypeDef]) -> Generator[InstanceTypeDef, None, None]:
111
+ def wait_instances_running(
112
+ boto3_ec2: "EC2Client", instances: Iterable["InstanceTypeDef"]
113
+ ) -> Generator["InstanceTypeDef", None, None]:
98
114
  """
99
115
  Wait until no instance in the given iterable is 'pending'. Yield every instance that
100
116
  entered the running state as soon as it does.
101
117
 
102
- :param EC2Client boto3_ec2: the EC2 connection to use for making requests
103
- :param Iterable[InstanceTypeDef] instances: the instances to wait on
104
- :rtype: Iterable[InstanceTypeDef]
118
+ :param boto3_ec2: the EC2 connection to use for making requests
119
+ :param instances: the instances to wait on
105
120
  """
106
121
  running_ids = set()
107
122
  other_ids = set()
108
123
  while True:
109
124
  pending_ids = set()
110
125
  for i in instances:
111
- i: InstanceTypeDef
112
- if i['State']['Name'] == 'pending':
113
- pending_ids.add(i['InstanceId'])
114
- elif i['State']['Name'] == 'running':
115
- if i['InstanceId'] in running_ids:
116
- raise RuntimeError("An instance was already added to the list of running instance IDs. Maybe there is a duplicate.")
117
- running_ids.add(i['InstanceId'])
126
+ i: "InstanceTypeDef"
127
+ if i["State"]["Name"] == "pending":
128
+ pending_ids.add(i["InstanceId"])
129
+ elif i["State"]["Name"] == "running":
130
+ if i["InstanceId"] in running_ids:
131
+ raise RuntimeError(
132
+ "An instance was already added to the list of running instance IDs. Maybe there is a duplicate."
133
+ )
134
+ running_ids.add(i["InstanceId"])
118
135
  yield i
119
136
  else:
120
- if i['InstanceId'] in other_ids:
121
- raise RuntimeError("An instance was already added to the list of other instances. Maybe there is a duplicate.")
122
- other_ids.add(i['InstanceId'])
137
+ if i["InstanceId"] in other_ids:
138
+ raise RuntimeError(
139
+ "An instance was already added to the list of other instances. Maybe there is a duplicate."
140
+ )
141
+ other_ids.add(i["InstanceId"])
123
142
  yield i
124
- logger.info('%i instance(s) pending, %i running, %i other.',
125
- *list(map(len, (pending_ids, running_ids, other_ids))))
143
+ logger.info(
144
+ "%i instance(s) pending, %i running, %i other.",
145
+ *list(map(len, (pending_ids, running_ids, other_ids))),
146
+ )
126
147
  if not pending_ids:
127
148
  break
128
149
  seconds = max(a_short_time, min(len(pending_ids), 10 * a_short_time))
129
- logger.info('Sleeping for %is', seconds)
150
+ logger.info("Sleeping for %is", seconds)
130
151
  time.sleep(seconds)
131
152
  for attempt in retry_ec2():
132
153
  with attempt:
133
- described_instances = boto3_ec2.describe_instances(InstanceIds=list(pending_ids))
134
- instances = [instance for reservation in described_instances["Reservations"] for instance in reservation["Instances"]]
135
-
136
-
137
- def wait_spot_requests_active(boto3_ec2: EC2Client, requests: Iterable[SpotInstanceRequestTypeDef], timeout: float = None, tentative: bool = False) -> Iterable[List[SpotInstanceRequestTypeDef]]:
154
+ described_instances = boto3_ec2.describe_instances(
155
+ InstanceIds=list(pending_ids)
156
+ )
157
+ instances = [
158
+ instance
159
+ for reservation in described_instances["Reservations"]
160
+ for instance in reservation["Instances"]
161
+ ]
162
+
163
+
164
+ def wait_spot_requests_active(
165
+ boto3_ec2: "EC2Client",
166
+ requests: Iterable["SpotInstanceRequestTypeDef"],
167
+ timeout: float = None,
168
+ tentative: bool = False,
169
+ ) -> Iterable[list["SpotInstanceRequestTypeDef"]]:
138
170
  """
139
171
  Wait until no spot request in the given iterator is in the 'open' state or, optionally,
140
172
  a timeout occurs. Yield spot requests as soon as they leave the 'open' state.
@@ -157,54 +189,67 @@ def wait_spot_requests_active(boto3_ec2: EC2Client, requests: Iterable[SpotInsta
157
189
  open_ids = None
158
190
 
159
191
  def cancel() -> None:
160
- logger.warning('Cancelling remaining %i spot requests.', len(open_ids))
192
+ logger.warning("Cancelling remaining %i spot requests.", len(open_ids))
161
193
  boto3_ec2.cancel_spot_instance_requests(SpotInstanceRequestIds=list(open_ids))
162
194
 
163
195
  def spot_request_not_found(e: Exception) -> bool:
164
- return get_error_code(e) == 'InvalidSpotInstanceRequestID.NotFound'
196
+ return get_error_code(e) == "InvalidSpotInstanceRequestID.NotFound"
165
197
 
166
198
  try:
167
199
  while True:
168
200
  open_ids, eval_ids, fulfill_ids = set(), set(), set()
169
201
  batch = []
170
202
  for r in requests:
171
- r: SpotInstanceRequestTypeDef # pycharm thinks it is a string
172
- if r['State'] == 'open':
173
- open_ids.add(r['InstanceId'])
174
- if r['Status'] == 'pending-evaluation':
175
- eval_ids.add(r['InstanceId'])
176
- elif r['Status'] == 'pending-fulfillment':
177
- fulfill_ids.add(r['InstanceId'])
203
+ r: "SpotInstanceRequestTypeDef" # pycharm thinks it is a string
204
+ if r["State"] == "open":
205
+ open_ids.add(r["InstanceId"])
206
+ if r["Status"] == "pending-evaluation":
207
+ eval_ids.add(r["InstanceId"])
208
+ elif r["Status"] == "pending-fulfillment":
209
+ fulfill_ids.add(r["InstanceId"])
178
210
  else:
179
211
  logger.info(
180
- 'Request %s entered status %s indicating that it will not be '
181
- 'fulfilled anytime soon.', r['InstanceId'], r['Status'])
182
- elif r['State'] == 'active':
183
- if r['InstanceId'] in active_ids:
184
- raise RuntimeError("A request was already added to the list of active requests. Maybe there are duplicate requests.")
185
- active_ids.add(r['InstanceId'])
212
+ "Request %s entered status %s indicating that it will not be "
213
+ "fulfilled anytime soon.",
214
+ r["InstanceId"],
215
+ r["Status"],
216
+ )
217
+ elif r["State"] == "active":
218
+ if r["InstanceId"] in active_ids:
219
+ raise RuntimeError(
220
+ "A request was already added to the list of active requests. Maybe there are duplicate requests."
221
+ )
222
+ active_ids.add(r["InstanceId"])
186
223
  batch.append(r)
187
224
  else:
188
- if r['InstanceId'] in other_ids:
189
- raise RuntimeError("A request was already added to the list of other IDs. Maybe there are duplicate requests.")
190
- other_ids.add(r['InstanceId'])
225
+ if r["InstanceId"] in other_ids:
226
+ raise RuntimeError(
227
+ "A request was already added to the list of other IDs. Maybe there are duplicate requests."
228
+ )
229
+ other_ids.add(r["InstanceId"])
191
230
  batch.append(r)
192
231
  if batch:
193
232
  yield batch
194
- logger.info('%i spot requests(s) are open (%i of which are pending evaluation and %i '
195
- 'are pending fulfillment), %i are active and %i are in another state.',
196
- *list(map(len, (open_ids, eval_ids, fulfill_ids, active_ids, other_ids))))
233
+ logger.info(
234
+ "%i spot requests(s) are open (%i of which are pending evaluation and %i "
235
+ "are pending fulfillment), %i are active and %i are in another state.",
236
+ *list(
237
+ map(len, (open_ids, eval_ids, fulfill_ids, active_ids, other_ids))
238
+ ),
239
+ )
197
240
  if not open_ids or tentative and not eval_ids and not fulfill_ids:
198
241
  break
199
242
  sleep_time = 2 * a_short_time
200
243
  if timeout is not None and time.time() + sleep_time >= timeout:
201
- logger.warning('Timed out waiting for spot requests.')
244
+ logger.warning("Timed out waiting for spot requests.")
202
245
  break
203
- logger.info('Sleeping for %is', sleep_time)
246
+ logger.info("Sleeping for %is", sleep_time)
204
247
  time.sleep(sleep_time)
205
248
  for attempt in retry_ec2(retry_while=spot_request_not_found):
206
249
  with attempt:
207
- requests = boto3_ec2.describe_spot_instance_requests(SpotInstanceRequestIds=list(open_ids))
250
+ requests = boto3_ec2.describe_spot_instance_requests(
251
+ SpotInstanceRequestIds=list(open_ids)
252
+ )
208
253
  except BaseException:
209
254
  if open_ids:
210
255
  with panic(logger):
@@ -215,24 +260,37 @@ def wait_spot_requests_active(boto3_ec2: EC2Client, requests: Iterable[SpotInsta
215
260
  cancel()
216
261
 
217
262
 
218
- def create_spot_instances(boto3_ec2: EC2Client, price, image_id, spec, num_instances=1, timeout=None, tentative=False, tags=None) -> Generator[DescribeInstancesResultTypeDef, None, None]:
263
+ def create_spot_instances(
264
+ boto3_ec2: "EC2Client",
265
+ price,
266
+ image_id,
267
+ spec,
268
+ num_instances=1,
269
+ timeout=None,
270
+ tentative=False,
271
+ tags=None,
272
+ ) -> Generator["DescribeInstancesResultTypeDef", None, None]:
219
273
  """
220
274
  Create instances on the spot market.
221
275
  """
222
276
 
223
277
  def spotRequestNotFound(e):
224
- return getattr(e, 'error_code', None) == "InvalidSpotInstanceRequestID.NotFound"
225
-
226
- spec['LaunchSpecification'].update({'ImageId': image_id}) # boto3 image id is in the launch specification
227
- for attempt in retry_ec2(retry_for=a_long_time,
228
- retry_while=inconsistencies_detected):
278
+ return getattr(e, "error_code", None) == "InvalidSpotInstanceRequestID.NotFound"
279
+
280
+ spec["LaunchSpecification"].update(
281
+ {"ImageId": image_id}
282
+ ) # boto3 image id is in the launch specification
283
+ for attempt in retry_ec2(
284
+ retry_for=a_long_time, retry_while=inconsistencies_detected
285
+ ):
229
286
  with attempt:
230
287
  requests_dict = boto3_ec2.request_spot_instances(
231
- SpotPrice=price, InstanceCount=num_instances, **spec)
232
- requests = requests_dict['SpotInstanceRequests']
288
+ SpotPrice=price, InstanceCount=num_instances, **spec
289
+ )
290
+ requests = requests_dict["SpotInstanceRequests"]
233
291
 
234
292
  if tags is not None:
235
- for requestID in (request['SpotInstanceRequestId'] for request in requests):
293
+ for requestID in (request["SpotInstanceRequestId"] for request in requests):
236
294
  for attempt in retry_ec2(retry_while=spotRequestNotFound):
237
295
  with attempt:
238
296
  boto3_ec2.create_tags(Resources=[requestID], Tags=tags)
@@ -240,21 +298,21 @@ def create_spot_instances(boto3_ec2: EC2Client, price, image_id, spec, num_insta
240
298
  num_active, num_other = 0, 0
241
299
  # noinspection PyUnboundLocalVariable,PyTypeChecker
242
300
  # request_spot_instances's type annotation is wrong
243
- for batch in wait_spot_requests_active(boto3_ec2,
244
- requests,
245
- timeout=timeout,
246
- tentative=tentative):
301
+ for batch in wait_spot_requests_active(
302
+ boto3_ec2, requests, timeout=timeout, tentative=tentative
303
+ ):
247
304
  instance_ids = []
248
305
  for request in batch:
249
- request: SpotInstanceRequestTypeDef
250
- if request["State"] == 'active':
306
+ request: "SpotInstanceRequestTypeDef"
307
+ if request["State"] == "active":
251
308
  instance_ids.append(request["InstanceId"])
252
309
  num_active += 1
253
310
  else:
254
311
  logger.info(
255
- 'Request %s in unexpected state %s.',
312
+ "Request %s in unexpected state %s.",
256
313
  request["InstanceId"],
257
- request["State"])
314
+ request["State"],
315
+ )
258
316
  num_other += 1
259
317
  if instance_ids:
260
318
  # This next line is the reason we batch. It's so we can get multiple instances in
@@ -263,40 +321,47 @@ def create_spot_instances(boto3_ec2: EC2Client, price, image_id, spec, num_insta
263
321
  for attempt in retry_ec2():
264
322
  with attempt:
265
323
  # Increase hop limit from 1 to use Instance Metadata V2
266
- boto3_ec2.modify_instance_metadata_options(InstanceId=instance_id, HttpPutResponseHopLimit=3)
324
+ boto3_ec2.modify_instance_metadata_options(
325
+ InstanceId=instance_id, HttpPutResponseHopLimit=3
326
+ )
267
327
  yield boto3_ec2.describe_instances(InstanceIds=instance_ids)
268
328
  if not num_active:
269
- message = 'None of the spot requests entered the active state'
329
+ message = "None of the spot requests entered the active state"
270
330
  if tentative:
271
- logger.warning(message + '.')
331
+ logger.warning(message + ".")
272
332
  else:
273
333
  raise RuntimeError(message)
274
334
  if num_other:
275
- logger.warning('%i request(s) entered a state other than active.', num_other)
335
+ logger.warning("%i request(s) entered a state other than active.", num_other)
276
336
 
277
337
 
278
- def create_ondemand_instances(boto3_ec2: EC2Client, image_id: str, spec: Mapping[str, Any], num_instances: int=1) -> List[InstanceTypeDef]:
338
+ def create_ondemand_instances(
339
+ boto3_ec2: "EC2Client",
340
+ image_id: str,
341
+ spec: Mapping[str, Any],
342
+ num_instances: int = 1,
343
+ ) -> list["InstanceTypeDef"]:
279
344
  """
280
345
  Requests the RunInstances EC2 API call but accounts for the race between recently created
281
346
  instance profiles, IAM roles and an instance creation that refers to them.
282
-
283
- :rtype: List[InstanceTypeDef]
284
347
  """
285
- instance_type = spec['InstanceType']
286
- logger.info('Creating %s instance(s) ... ', instance_type)
348
+ instance_type = spec["InstanceType"]
349
+ logger.info("Creating %s instance(s) ... ", instance_type)
287
350
  boto_instance_list = []
288
- for attempt in retry_ec2(retry_for=a_long_time,
289
- retry_while=inconsistencies_detected):
351
+ for attempt in retry_ec2(
352
+ retry_for=a_long_time, retry_while=inconsistencies_detected
353
+ ):
290
354
  with attempt:
291
- boto_instance_list: List[InstanceTypeDef] = boto3_ec2.run_instances(ImageId=image_id,
292
- MinCount=num_instances,
293
- MaxCount=num_instances,
294
- **spec)['Instances']
355
+ boto_instance_list: list["InstanceTypeDef"] = boto3_ec2.run_instances(
356
+ ImageId=image_id, MinCount=num_instances, MaxCount=num_instances, **spec
357
+ )["Instances"]
295
358
 
296
359
  return boto_instance_list
297
360
 
298
361
 
299
- def increase_instance_hop_limit(boto3_ec2: EC2Client, boto_instance_list: List[InstanceTypeDef]) -> None:
362
+ def increase_instance_hop_limit(
363
+ boto3_ec2: "EC2Client", boto_instance_list: list["InstanceTypeDef"]
364
+ ) -> None:
300
365
  """
301
366
  Increase the default HTTP hop limit, as we are running Toil and Kubernetes inside a Docker container, so the default
302
367
  hop limit of 1 will not be enough when grabbing metadata information with ec2_metadata
@@ -307,11 +372,13 @@ def increase_instance_hop_limit(boto3_ec2: EC2Client, boto_instance_list: List[I
307
372
  :return:
308
373
  """
309
374
  for boto_instance in boto_instance_list:
310
- instance_id = boto_instance['InstanceId']
375
+ instance_id = boto_instance["InstanceId"]
311
376
  for attempt in retry_ec2():
312
377
  with attempt:
313
378
  # Increase hop limit from 1 to use Instance Metadata V2
314
- boto3_ec2.modify_instance_metadata_options(InstanceId=instance_id, HttpPutResponseHopLimit=3)
379
+ boto3_ec2.modify_instance_metadata_options(
380
+ InstanceId=instance_id, HttpPutResponseHopLimit=3
381
+ )
315
382
 
316
383
 
317
384
  def prune(bushy: dict) -> dict:
@@ -328,33 +395,37 @@ def prune(bushy: dict) -> dict:
328
395
 
329
396
  # We need a module-level client to get the dynamically-generated error types to
330
397
  # catch, and to wait on IAM items.
331
- iam_client = establish_boto3_session().client('iam')
398
+ iam_client = establish_boto3_session().client("iam")
332
399
 
333
400
 
334
401
  # exception is generated by a factory so we weirdly need a client instance to reference it
335
- @retry(errors=[iam_client.exceptions.NoSuchEntityException],
336
- intervals=[1, 1, 2, 4, 8, 16, 32, 64])
402
+ @retry(
403
+ errors=[iam_client.exceptions.NoSuchEntityException],
404
+ intervals=[1, 1, 2, 4, 8, 16, 32, 64],
405
+ )
337
406
  def wait_until_instance_profile_arn_exists(instance_profile_arn: str):
338
407
  # TODO: We have no guarantee that the ARN contains the name.
339
- instance_profile_name = instance_profile_arn.split(':instance-profile/')[-1]
408
+ instance_profile_name = instance_profile_arn.split(":instance-profile/")[-1]
340
409
  logger.debug("Checking for instance profile %s...", instance_profile_name)
341
410
  iam_client.get_instance_profile(InstanceProfileName=instance_profile_name)
342
411
  logger.debug("Instance profile found")
343
412
 
344
413
 
345
414
  @retry(intervals=[5, 5, 10, 20, 20, 20, 20], errors=INCONSISTENCY_ERRORS)
346
- def create_instances(ec2_resource: EC2ServiceResource,
347
- image_id: str,
348
- key_name: str,
349
- instance_type: str,
350
- num_instances: int = 1,
351
- security_group_ids: Optional[List] = None,
352
- user_data: Optional[Union[str, bytes]] = None,
353
- block_device_map: Optional[List[Dict]] = None,
354
- instance_profile_arn: Optional[str] = None,
355
- placement_az: Optional[str] = None,
356
- subnet_id: str = None,
357
- tags: Optional[Dict[str, str]] = None) -> List[Instance]:
415
+ def create_instances(
416
+ ec2_resource: "EC2ServiceResource",
417
+ image_id: str,
418
+ key_name: str,
419
+ instance_type: str,
420
+ num_instances: int = 1,
421
+ security_group_ids: Optional[list] = None,
422
+ user_data: Optional[Union[str, bytes]] = None,
423
+ block_device_map: Optional[list[dict]] = None,
424
+ instance_profile_arn: Optional[str] = None,
425
+ placement_az: Optional[str] = None,
426
+ subnet_id: str = None,
427
+ tags: Optional[dict[str, str]] = None,
428
+ ) -> list["Instance"]:
358
429
  """
359
430
  Replaces create_ondemand_instances. Uses boto3 and returns a list of Boto3 instance dicts.
360
431
 
@@ -365,23 +436,25 @@ def create_instances(ec2_resource: EC2ServiceResource,
365
436
 
366
437
  Tags, if given, are applied to the instances, and all volumes.
367
438
  """
368
- logger.info('Creating %s instance(s) ... ', instance_type)
439
+ logger.info("Creating %s instance(s) ... ", instance_type)
369
440
 
370
441
  if isinstance(user_data, str):
371
- user_data = user_data.encode('utf-8')
372
-
373
- request = {'ImageId': image_id,
374
- 'MinCount': num_instances,
375
- 'MaxCount': num_instances,
376
- 'KeyName': key_name,
377
- 'SecurityGroupIds': security_group_ids,
378
- 'InstanceType': instance_type,
379
- 'UserData': user_data,
380
- 'BlockDeviceMappings': block_device_map,
381
- 'SubnetId': subnet_id,
382
- # Metadata V2 defaults hops to 1, which is an issue when running inside a docker container
383
- # https://github.com/adamchainz/ec2-metadata?tab=readme-ov-file#instance-metadata-service-version-2
384
- 'MetadataOptions': {'HttpPutResponseHopLimit': 3}}
442
+ user_data = user_data.encode("utf-8")
443
+
444
+ request = {
445
+ "ImageId": image_id,
446
+ "MinCount": num_instances,
447
+ "MaxCount": num_instances,
448
+ "KeyName": key_name,
449
+ "SecurityGroupIds": security_group_ids,
450
+ "InstanceType": instance_type,
451
+ "UserData": user_data,
452
+ "BlockDeviceMappings": block_device_map,
453
+ "SubnetId": subnet_id,
454
+ # Metadata V2 defaults hops to 1, which is an issue when running inside a docker container
455
+ # https://github.com/adamchainz/ec2-metadata?tab=readme-ov-file#instance-metadata-service-version-2
456
+ "MetadataOptions": {"HttpPutResponseHopLimit": 3},
457
+ }
385
458
 
386
459
  if instance_profile_arn:
387
460
  # We could just retry when we get an error because the ARN doesn't
@@ -389,33 +462,37 @@ def create_instances(ec2_resource: EC2ServiceResource,
389
462
  wait_until_instance_profile_arn_exists(instance_profile_arn)
390
463
 
391
464
  # Add it to the request
392
- request['IamInstanceProfile'] = {'Arn': instance_profile_arn}
465
+ request["IamInstanceProfile"] = {"Arn": instance_profile_arn}
393
466
 
394
467
  if placement_az:
395
- request['Placement'] = {'AvailabilityZone': placement_az}
468
+ request["Placement"] = {"AvailabilityZone": placement_az}
396
469
 
397
470
  if tags:
398
471
  # Tag everything when we make it.
399
472
  flat_tags = flatten_tags(tags)
400
- request['TagSpecifications'] = [{'ResourceType': 'instance', 'Tags': flat_tags},
401
- {'ResourceType': 'volume', 'Tags': flat_tags}]
473
+ request["TagSpecifications"] = [
474
+ {"ResourceType": "instance", "Tags": flat_tags},
475
+ {"ResourceType": "volume", "Tags": flat_tags},
476
+ ]
402
477
 
403
478
  return ec2_resource.create_instances(**prune(request))
404
479
 
405
480
 
406
481
  @retry(intervals=[5, 5, 10, 20, 20, 20, 20], errors=INCONSISTENCY_ERRORS)
407
- def create_launch_template(ec2_client: EC2Client,
408
- template_name: str,
409
- image_id: str,
410
- key_name: str,
411
- instance_type: str,
412
- security_group_ids: Optional[List] = None,
413
- user_data: Optional[Union[str, bytes]] = None,
414
- block_device_map: Optional[List[Dict]] = None,
415
- instance_profile_arn: Optional[str] = None,
416
- placement_az: Optional[str] = None,
417
- subnet_id: Optional[str] = None,
418
- tags: Optional[Dict[str, str]] = None) -> str:
482
+ def create_launch_template(
483
+ ec2_client: "EC2Client",
484
+ template_name: str,
485
+ image_id: str,
486
+ key_name: str,
487
+ instance_type: str,
488
+ security_group_ids: Optional[list] = None,
489
+ user_data: Optional[Union[str, bytes]] = None,
490
+ block_device_map: Optional[list[dict]] = None,
491
+ instance_profile_arn: Optional[str] = None,
492
+ placement_az: Optional[str] = None,
493
+ subnet_id: Optional[str] = None,
494
+ tags: Optional[dict[str, str]] = None,
495
+ ) -> str:
419
496
  """
420
497
  Creates a launch template with the given name for launching instances with the given parameters.
421
498
 
@@ -431,25 +508,26 @@ def create_launch_template(ec2_client: EC2Client,
431
508
 
432
509
 
433
510
  """
434
- logger.info('Creating launch template for %s instances ... ', instance_type)
511
+ logger.info("Creating launch template for %s instances ... ", instance_type)
435
512
 
436
513
  if isinstance(user_data, str):
437
514
  # Make sure we have bytes
438
- user_data = user_data.encode('utf-8')
515
+ user_data = user_data.encode("utf-8")
439
516
 
440
517
  # Then base64 and decode back to str.
441
- user_data = b64encode(user_data).decode('utf-8')
442
-
443
- template = {'ImageId': image_id,
444
- 'KeyName': key_name,
445
- 'SecurityGroupIds': security_group_ids,
446
- 'InstanceType': instance_type,
447
- 'UserData': user_data,
448
- 'BlockDeviceMappings': block_device_map,
449
- 'SubnetId': subnet_id,
450
- # Increase hop limit from 1 to use Instance Metadata V2
451
- 'MetadataOptions': {'HttpPutResponseHopLimit': 3}
452
- }
518
+ user_data = b64encode(user_data).decode("utf-8")
519
+
520
+ template = {
521
+ "ImageId": image_id,
522
+ "KeyName": key_name,
523
+ "SecurityGroupIds": security_group_ids,
524
+ "InstanceType": instance_type,
525
+ "UserData": user_data,
526
+ "BlockDeviceMappings": block_device_map,
527
+ "SubnetId": subnet_id,
528
+ # Increase hop limit from 1 to use Instance Metadata V2
529
+ "MetadataOptions": {"HttpPutResponseHopLimit": 3},
530
+ }
453
531
 
454
532
  if instance_profile_arn:
455
533
  # We could just retry when we get an error because the ARN doesn't
@@ -457,38 +535,48 @@ def create_launch_template(ec2_client: EC2Client,
457
535
  wait_until_instance_profile_arn_exists(instance_profile_arn)
458
536
 
459
537
  # Add it to the request
460
- template['IamInstanceProfile'] = {'Arn': instance_profile_arn}
538
+ template["IamInstanceProfile"] = {"Arn": instance_profile_arn}
461
539
 
462
540
  if placement_az:
463
- template['Placement'] = {'AvailabilityZone': placement_az}
541
+ template["Placement"] = {"AvailabilityZone": placement_az}
464
542
 
465
543
  flat_tags = []
466
544
  if tags:
467
545
  # Tag everything when we make it.
468
546
  flat_tags = flatten_tags(tags)
469
- template['TagSpecifications'] = [{'ResourceType': 'instance', 'Tags': flat_tags},
470
- {'ResourceType': 'volume', 'Tags': flat_tags}]
547
+ template["TagSpecifications"] = [
548
+ {"ResourceType": "instance", "Tags": flat_tags},
549
+ {"ResourceType": "volume", "Tags": flat_tags},
550
+ ]
471
551
 
472
- request = {'LaunchTemplateData': prune(template),
473
- 'LaunchTemplateName': template_name}
552
+ request = {
553
+ "LaunchTemplateData": prune(template),
554
+ "LaunchTemplateName": template_name,
555
+ }
474
556
 
475
557
  if tags:
476
- request['TagSpecifications'] = [{'ResourceType': 'launch-template', 'Tags': flat_tags}]
558
+ request["TagSpecifications"] = [
559
+ {"ResourceType": "launch-template", "Tags": flat_tags}
560
+ ]
477
561
 
478
- return ec2_client.create_launch_template(**request)['LaunchTemplate']['LaunchTemplateId']
562
+ return ec2_client.create_launch_template(**request)["LaunchTemplate"][
563
+ "LaunchTemplateId"
564
+ ]
479
565
 
480
566
 
481
567
  @retry(intervals=[5, 5, 10, 20, 20, 20, 20], errors=INCONSISTENCY_ERRORS)
482
- def create_auto_scaling_group(autoscaling_client: AutoScalingClient,
483
- asg_name: str,
484
- launch_template_ids: Dict[str, str],
485
- vpc_subnets: List[str],
486
- min_size: int,
487
- max_size: int,
488
- instance_types: Optional[Iterable[str]] = None,
489
- spot_bid: Optional[float] = None,
490
- spot_cheapest: bool = False,
491
- tags: Optional[Dict[str, str]] = None) -> None:
568
+ def create_auto_scaling_group(
569
+ autoscaling_client: "AutoScalingClient",
570
+ asg_name: str,
571
+ launch_template_ids: dict[str, str],
572
+ vpc_subnets: list[str],
573
+ min_size: int,
574
+ max_size: int,
575
+ instance_types: Optional[Iterable[str]] = None,
576
+ spot_bid: Optional[float] = None,
577
+ spot_cheapest: bool = False,
578
+ tags: Optional[dict[str, str]] = None,
579
+ ) -> None:
492
580
  """
493
581
  Create a new Auto Scaling Group with the given name (which is also its
494
582
  unique identifier).
@@ -521,19 +609,26 @@ def create_auto_scaling_group(autoscaling_client: AutoScalingClient,
521
609
  """
522
610
 
523
611
  if instance_types is None:
524
- instance_types: List[str] = []
612
+ instance_types: list[str] = []
525
613
 
526
614
  if instance_types is not None and len(instance_types) > 20:
527
- raise RuntimeError(f"Too many instance types ({len(instance_types)}) in group; AWS supports only 20.")
615
+ raise RuntimeError(
616
+ f"Too many instance types ({len(instance_types)}) in group; AWS supports only 20."
617
+ )
528
618
 
529
619
  if len(vpc_subnets) == 0:
530
- raise RuntimeError("No VPC subnets specified to launch into; not clear where to put instances")
620
+ raise RuntimeError(
621
+ "No VPC subnets specified to launch into; not clear where to put instances"
622
+ )
531
623
 
532
624
  def get_launch_template_spec(instance_type):
533
625
  """
534
626
  Get a LaunchTemplateSpecification for the given instance type.
535
627
  """
536
- return {'LaunchTemplateId': launch_template_ids[instance_type], 'Version': '$Default'}
628
+ return {
629
+ "LaunchTemplateId": launch_template_ids[instance_type],
630
+ "Version": "$Default",
631
+ }
537
632
 
538
633
  # We always write the ASG with a MixedInstancesPolicy even when we have only one type.
539
634
  # And we use a separate launch template for every instance type, and apply it as an override.
@@ -542,24 +637,42 @@ def create_auto_scaling_group(autoscaling_client: AutoScalingClient,
542
637
  # We need to use a launch template per instance type so that different
543
638
  # instance types with specified EBS storage size overrides will get their
544
639
  # storage.
545
- mip = {'LaunchTemplate': {'LaunchTemplateSpecification': get_launch_template_spec(next(iter(instance_types))), # noqa
546
- 'Overrides': [{'InstanceType': t, 'LaunchTemplateSpecification': get_launch_template_spec(t)} for t in instance_types]}} # noqa
640
+ mip = {
641
+ "LaunchTemplate": {
642
+ "LaunchTemplateSpecification": get_launch_template_spec(
643
+ next(iter(instance_types))
644
+ ), # noqa
645
+ "Overrides": [
646
+ {
647
+ "InstanceType": t,
648
+ "LaunchTemplateSpecification": get_launch_template_spec(t),
649
+ }
650
+ for t in instance_types
651
+ ],
652
+ }
653
+ } # noqa
547
654
 
548
655
  if spot_bid is not None:
549
656
  # Ask for spot instances by saying everything above base capacity of 0 should be spot.
550
- mip['InstancesDistribution'] = {'OnDemandPercentageAboveBaseCapacity': 0,
551
- 'SpotAllocationStrategy': 'capacity-optimized' if not spot_cheapest else 'lowest-price',
552
- 'SpotMaxPrice': str(spot_bid)}
553
-
554
- asg = {'AutoScalingGroupName': asg_name,
555
- 'MixedInstancesPolicy': prune(mip),
556
- 'MinSize': min_size,
557
- 'MaxSize': max_size,
558
- 'VPCZoneIdentifier': ','.join(vpc_subnets)}
657
+ mip["InstancesDistribution"] = {
658
+ "OnDemandPercentageAboveBaseCapacity": 0,
659
+ "SpotAllocationStrategy": (
660
+ "capacity-optimized" if not spot_cheapest else "lowest-price"
661
+ ),
662
+ "SpotMaxPrice": str(spot_bid),
663
+ }
664
+
665
+ asg = {
666
+ "AutoScalingGroupName": asg_name,
667
+ "MixedInstancesPolicy": prune(mip),
668
+ "MinSize": min_size,
669
+ "MaxSize": max_size,
670
+ "VPCZoneIdentifier": ",".join(vpc_subnets),
671
+ }
559
672
 
560
673
  if tags:
561
674
  # Tag the ASG itself.
562
- asg['Tags'] = flatten_tags(tags)
675
+ asg["Tags"] = flatten_tags(tags)
563
676
 
564
677
  logger.debug("Creating Autoscaling Group across subnets: %s", vpc_subnets)
565
678