toil 6.1.0__py3-none-any.whl → 7.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (93) hide show
  1. toil/__init__.py +1 -232
  2. toil/batchSystems/abstractBatchSystem.py +22 -13
  3. toil/batchSystems/abstractGridEngineBatchSystem.py +59 -45
  4. toil/batchSystems/awsBatch.py +8 -8
  5. toil/batchSystems/contained_executor.py +4 -5
  6. toil/batchSystems/gridengine.py +1 -1
  7. toil/batchSystems/htcondor.py +5 -5
  8. toil/batchSystems/kubernetes.py +25 -11
  9. toil/batchSystems/local_support.py +3 -3
  10. toil/batchSystems/lsf.py +2 -2
  11. toil/batchSystems/mesos/batchSystem.py +4 -4
  12. toil/batchSystems/mesos/executor.py +3 -2
  13. toil/batchSystems/options.py +9 -0
  14. toil/batchSystems/singleMachine.py +11 -10
  15. toil/batchSystems/slurm.py +64 -22
  16. toil/batchSystems/torque.py +1 -1
  17. toil/bus.py +7 -3
  18. toil/common.py +36 -13
  19. toil/cwl/cwltoil.py +365 -312
  20. toil/deferred.py +1 -1
  21. toil/fileStores/abstractFileStore.py +17 -17
  22. toil/fileStores/cachingFileStore.py +2 -2
  23. toil/fileStores/nonCachingFileStore.py +1 -1
  24. toil/job.py +228 -60
  25. toil/jobStores/abstractJobStore.py +18 -10
  26. toil/jobStores/aws/jobStore.py +280 -218
  27. toil/jobStores/aws/utils.py +57 -29
  28. toil/jobStores/conftest.py +2 -2
  29. toil/jobStores/fileJobStore.py +2 -2
  30. toil/jobStores/googleJobStore.py +3 -4
  31. toil/leader.py +72 -24
  32. toil/lib/aws/__init__.py +26 -10
  33. toil/lib/aws/iam.py +2 -2
  34. toil/lib/aws/session.py +62 -22
  35. toil/lib/aws/utils.py +73 -37
  36. toil/lib/conversions.py +5 -1
  37. toil/lib/ec2.py +118 -69
  38. toil/lib/expando.py +1 -1
  39. toil/lib/io.py +14 -2
  40. toil/lib/misc.py +1 -3
  41. toil/lib/resources.py +55 -21
  42. toil/lib/retry.py +12 -5
  43. toil/lib/threading.py +2 -2
  44. toil/lib/throttle.py +1 -1
  45. toil/options/common.py +27 -24
  46. toil/provisioners/__init__.py +9 -3
  47. toil/provisioners/abstractProvisioner.py +9 -7
  48. toil/provisioners/aws/__init__.py +20 -15
  49. toil/provisioners/aws/awsProvisioner.py +406 -329
  50. toil/provisioners/gceProvisioner.py +2 -2
  51. toil/provisioners/node.py +13 -5
  52. toil/server/app.py +1 -1
  53. toil/statsAndLogging.py +58 -16
  54. toil/test/__init__.py +27 -12
  55. toil/test/batchSystems/batchSystemTest.py +40 -33
  56. toil/test/batchSystems/batch_system_plugin_test.py +79 -0
  57. toil/test/batchSystems/test_slurm.py +1 -1
  58. toil/test/cwl/cwlTest.py +8 -91
  59. toil/test/cwl/seqtk_seq.cwl +1 -1
  60. toil/test/docs/scriptsTest.py +10 -13
  61. toil/test/jobStores/jobStoreTest.py +33 -49
  62. toil/test/lib/aws/test_iam.py +2 -2
  63. toil/test/provisioners/aws/awsProvisionerTest.py +51 -34
  64. toil/test/provisioners/clusterTest.py +90 -8
  65. toil/test/server/serverTest.py +2 -2
  66. toil/test/src/autoDeploymentTest.py +1 -1
  67. toil/test/src/dockerCheckTest.py +2 -1
  68. toil/test/src/environmentTest.py +125 -0
  69. toil/test/src/fileStoreTest.py +1 -1
  70. toil/test/src/jobDescriptionTest.py +18 -8
  71. toil/test/src/jobTest.py +1 -1
  72. toil/test/src/realtimeLoggerTest.py +4 -0
  73. toil/test/src/workerTest.py +52 -19
  74. toil/test/utils/toilDebugTest.py +61 -3
  75. toil/test/utils/utilsTest.py +20 -18
  76. toil/test/wdl/wdltoil_test.py +24 -71
  77. toil/test/wdl/wdltoil_test_kubernetes.py +77 -0
  78. toil/toilState.py +68 -9
  79. toil/utils/toilDebugJob.py +153 -26
  80. toil/utils/toilLaunchCluster.py +12 -2
  81. toil/utils/toilRsyncCluster.py +7 -2
  82. toil/utils/toilSshCluster.py +7 -3
  83. toil/utils/toilStats.py +2 -1
  84. toil/utils/toilStatus.py +97 -51
  85. toil/version.py +10 -10
  86. toil/wdl/wdltoil.py +318 -51
  87. toil/worker.py +96 -69
  88. {toil-6.1.0.dist-info → toil-7.0.0.dist-info}/LICENSE +25 -0
  89. {toil-6.1.0.dist-info → toil-7.0.0.dist-info}/METADATA +55 -21
  90. {toil-6.1.0.dist-info → toil-7.0.0.dist-info}/RECORD +93 -90
  91. {toil-6.1.0.dist-info → toil-7.0.0.dist-info}/WHEEL +1 -1
  92. {toil-6.1.0.dist-info → toil-7.0.0.dist-info}/entry_points.txt +0 -0
  93. {toil-6.1.0.dist-info → toil-7.0.0.dist-info}/top_level.txt +0 -0
toil/lib/throttle.py CHANGED
@@ -43,7 +43,7 @@ class LocalThrottle:
43
43
  thread as necessary to ensure that no less than the configured minimum interval has
44
44
  passed since the last invocation of this method in the current thread returned True.
45
45
 
46
- If the wait parameter is False, this method immediatly returns True (if at least the
46
+ If the wait parameter is False, this method immediatley returns True (if at least the
47
47
  configured minimum interval has passed since the last time this method returned True in
48
48
  the current thread) or False otherwise.
49
49
  """
toil/options/common.py CHANGED
@@ -7,7 +7,7 @@ import logging
7
7
 
8
8
  from ruamel.yaml import YAML
9
9
 
10
- from toil.lib.conversions import bytes2human, human2bytes, strtobool
10
+ from toil.lib.conversions import bytes2human, human2bytes, strtobool, opt_strtobool
11
11
 
12
12
  from toil.batchSystems.options import add_all_batchsystem_options
13
13
  from toil.provisioners import parse_node_types
@@ -137,8 +137,14 @@ def make_open_interval_action(min: Union[int, float], max: Optional[Union[int, f
137
137
  func = fC(min, max)
138
138
  try:
139
139
  if not func(values):
140
- raise parser.error(
141
- f"{option_string} ({values}) must be within the range: [{min}, {'infinity' if max is None else max})")
140
+ if max is None:
141
+ raise parser.error(
142
+ f"{option_string} ({values}) must be at least {min}"
143
+ )
144
+ else:
145
+ raise parser.error(
146
+ f"{option_string} ({values}) must be at least {min} and strictly less than {max})"
147
+ )
142
148
  except AssertionError:
143
149
  raise RuntimeError(f"The {option_string} option has an invalid value: {values}")
144
150
  setattr(namespace, self.dest, values)
@@ -173,7 +179,7 @@ JOBSTORE_HELP = ("The location of the job store for the workflow. "
173
179
  "store must be accessible by all worker nodes. Depending on the desired "
174
180
  "job store implementation, the location should be formatted according to "
175
181
  "one of the following schemes:\n\n"
176
- "file:<path> where <path> points to a directory on the file systen\n\n"
182
+ "file:<path> where <path> points to a directory on the file system\n\n"
177
183
  "aws:<region>:<prefix> where <region> is the name of an AWS region like "
178
184
  "us-west-2 and <prefix> will be prepended to the names of any top-level "
179
185
  "AWS resources in use by job store, e.g. S3 buckets.\n\n "
@@ -200,14 +206,6 @@ def add_base_toil_options(parser: ArgumentParser, jobstore_as_flag: bool = False
200
206
  config.add_argument('--config', dest='config', is_config_file_arg=True, default=None, metavar="PATH",
201
207
  help="Get options from a config file.")
202
208
 
203
- def convert_bool(b: str) -> bool:
204
- """Convert a string representation of bool to bool"""
205
- return bool(strtobool(b))
206
-
207
- def opt_strtobool(b: Optional[str]) -> Optional[bool]:
208
- """Convert an optional string representation of bool to None or bool"""
209
- return b if b is None else convert_bool(b)
210
-
211
209
  add_logging_options(parser)
212
210
  parser.register("type", "bool", parseBool) # Custom type for arg=True/False.
213
211
 
@@ -353,7 +351,7 @@ def add_base_toil_options(parser: ArgumentParser, jobstore_as_flag: bool = False
353
351
  "them from being modified externally. When set to False, as long as caching is enabled, "
354
352
  "Toil will protect the file automatically by changing the permissions to read-only."
355
353
  "default=%(default)s")
356
- link_imports.add_argument("--symlinkImports", dest="symlinkImports", type=convert_bool, default=True,
354
+ link_imports.add_argument("--symlinkImports", dest="symlinkImports", type=strtobool, default=True,
357
355
  metavar="BOOL", help=link_imports_help)
358
356
  move_exports = file_store_options.add_mutually_exclusive_group()
359
357
  move_exports_help = ('When using a filesystem based job store, output files are by default moved to the '
@@ -361,7 +359,7 @@ def add_base_toil_options(parser: ArgumentParser, jobstore_as_flag: bool = False
361
359
  'location. Setting this option to True instead copies the files into the output directory. '
362
360
  'Applies to filesystem-based job stores only.'
363
361
  'default=%(default)s')
364
- move_exports.add_argument("--moveOutputs", dest="moveOutputs", type=convert_bool, default=False, metavar="BOOL",
362
+ move_exports.add_argument("--moveOutputs", dest="moveOutputs", type=strtobool, default=False, metavar="BOOL",
365
363
  help=move_exports_help)
366
364
 
367
365
  caching = file_store_options.add_mutually_exclusive_group()
@@ -471,11 +469,11 @@ def add_base_toil_options(parser: ArgumentParser, jobstore_as_flag: bool = False
471
469
  "This is useful for heterogeneous jobs where some tasks require much more "
472
470
  "disk than others.")
473
471
 
474
- autoscaling_options.add_argument("--metrics", dest="metrics", default=False, type=convert_bool, metavar="BOOL",
472
+ autoscaling_options.add_argument("--metrics", dest="metrics", default=False, type=strtobool, metavar="BOOL",
475
473
  help="Enable the prometheus/grafana dashboard for monitoring CPU/RAM usage, "
476
474
  "queue size, and issued jobs.")
477
475
  autoscaling_options.add_argument("--assumeZeroOverhead", dest="assume_zero_overhead", default=False,
478
- type=convert_bool, metavar="BOOL",
476
+ type=strtobool, metavar="BOOL",
479
477
  help="Ignore scheduler and OS overhead and assume jobs can use every last byte "
480
478
  "of memory and disk on a node when autoscaling.")
481
479
 
@@ -546,7 +544,7 @@ def add_base_toil_options(parser: ArgumentParser, jobstore_as_flag: bool = False
546
544
  help=resource_help_msg.format('default', 'accelerators', accelerators_note, []))
547
545
  resource_options.add_argument('--defaultPreemptible', '--defaultPreemptable', dest='defaultPreemptible',
548
546
  metavar='BOOL',
549
- type=convert_bool, nargs='?', const=True, default=False,
547
+ type=strtobool, nargs='?', const=True, default=False,
550
548
  help='Make all jobs able to run on preemptible (spot) nodes by default.')
551
549
  resource_options.add_argument('--maxCores', dest='maxCores', default=SYS_MAX_SIZE, metavar='INT', type=int,
552
550
  action=make_open_interval_action(1),
@@ -571,10 +569,10 @@ def add_base_toil_options(parser: ArgumentParser, jobstore_as_flag: bool = False
571
569
  f"labeling job failed. default={1}")
572
570
  job_options.add_argument("--enableUnlimitedPreemptibleRetries", "--enableUnlimitedPreemptableRetries",
573
571
  dest="enableUnlimitedPreemptibleRetries",
574
- type=convert_bool, default=False, metavar="BOOL",
572
+ type=strtobool, default=False, metavar="BOOL",
575
573
  help="If set, preemptible failures (or any failure due to an instance getting "
576
574
  "unexpectedly terminated) will not count towards job failures and --retryCount.")
577
- job_options.add_argument("--doubleMem", dest="doubleMem", type=convert_bool, default=False, metavar="BOOL",
575
+ job_options.add_argument("--doubleMem", dest="doubleMem", type=strtobool, default=False, metavar="BOOL",
578
576
  help="If set, batch jobs which die to reaching memory limit on batch schedulers "
579
577
  "will have their memory doubled and they will be retried. The remaining "
580
578
  "retry count will be reduced by 1. Currently supported by LSF.")
@@ -588,6 +586,11 @@ def add_base_toil_options(parser: ArgumentParser, jobstore_as_flag: bool = False
588
586
  help=f"Period of time to wait (in seconds) between checking for missing/overlong jobs, "
589
587
  f"that is jobs which get lost by the batch system. Expert parameter. "
590
588
  f"default=%(default)s")
589
+ job_options.add_argument("--jobStoreTimeout", dest="job_store_timeout", default=30, type=float,
590
+ action=make_open_interval_action(0), metavar="FLOAT",
591
+ help=f"Maximum time (in seconds) to wait for a job's update to the job store "
592
+ f"before declaring it failed. default=%(default)s")
593
+
591
594
 
592
595
  # Log management options
593
596
  log_options = parser.add_argument_group(
@@ -612,7 +615,7 @@ def add_base_toil_options(parser: ArgumentParser, jobstore_as_flag: bool = False
612
615
  log_options.add_argument("--writeLogsGzip", dest="writeLogsGzip", nargs='?', action='store', default=None,
613
616
  const=os.getcwd(), metavar="OPT_PATH",
614
617
  help="Identical to --writeLogs except the logs files are gzipped on the leader.")
615
- log_options.add_argument("--writeLogsFromAllJobs", dest="writeLogsFromAllJobs", type=convert_bool,
618
+ log_options.add_argument("--writeLogsFromAllJobs", dest="writeLogsFromAllJobs", type=strtobool,
616
619
  default=False, metavar="BOOL",
617
620
  help="Whether to write logs from all jobs (including the successful ones) without "
618
621
  "necessarily setting the log level to 'debug'. Ensure that either --writeLogs "
@@ -620,7 +623,7 @@ def add_base_toil_options(parser: ArgumentParser, jobstore_as_flag: bool = False
620
623
  log_options.add_argument("--writeMessages", dest="write_messages", default=None,
621
624
  type=lambda x: None if x is None else os.path.abspath(x), metavar="PATH",
622
625
  help="File to send messages from the leader's message bus to.")
623
- log_options.add_argument("--realTimeLogging", dest="realTimeLogging", type=convert_bool, default=False,
626
+ log_options.add_argument("--realTimeLogging", dest="realTimeLogging", type=strtobool, default=False,
624
627
  help="Enable real-time logging from workers to leader")
625
628
 
626
629
  # Misc options
@@ -628,12 +631,12 @@ def add_base_toil_options(parser: ArgumentParser, jobstore_as_flag: bool = False
628
631
  title="Toil miscellaneous options.",
629
632
  description="Everything else."
630
633
  )
631
- misc_options.add_argument('--disableChaining', dest='disableChaining', type=convert_bool, default=False,
634
+ misc_options.add_argument('--disableChaining', dest='disableChaining', type=strtobool, default=False,
632
635
  metavar="BOOL",
633
636
  help="Disables chaining of jobs (chaining uses one job's resource allocation "
634
637
  "for its successor job if possible).")
635
638
  misc_options.add_argument("--disableJobStoreChecksumVerification", dest="disableJobStoreChecksumVerification",
636
- default=False, type=convert_bool, metavar="BOOL",
639
+ default=False, type=strtobool, metavar="BOOL",
637
640
  help="Disables checksum verification for files transferred to/from the job store. "
638
641
  "Checksum verification is a safety check to ensure the data is not corrupted "
639
642
  "during transfer. Currently only supported for non-streaming AWS files.")
@@ -685,7 +688,7 @@ def add_base_toil_options(parser: ArgumentParser, jobstore_as_flag: bool = False
685
688
  action=make_open_interval_action(0.0), metavar="FLOAT",
686
689
  help=f"Interval of time service jobs wait between polling for the existence of the "
687
690
  f"keep-alive flag. Default: {60.0}")
688
- misc_options.add_argument('--forceDockerAppliance', dest='forceDockerAppliance', type=convert_bool, default=False,
691
+ misc_options.add_argument('--forceDockerAppliance', dest='forceDockerAppliance', type=strtobool, default=False,
689
692
  metavar="BOOL",
690
693
  help='Disables sanity checking the existence of the docker image specified by '
691
694
  'TOIL_APPLIANCE_SELF, which Toil uses to provision mesos for autoscaling.')
@@ -32,6 +32,7 @@ def cluster_factory(
32
32
  nodeStorage: int = 50,
33
33
  nodeStorageOverrides: Optional[List[str]] = None,
34
34
  sseKey: Optional[str] = None,
35
+ enable_fuse: bool = False
35
36
  ) -> Union["AWSProvisioner", "GCEProvisioner"]:
36
37
  """
37
38
  Find and instantiate the appropriate provisioner instance to make clusters in the given cloud.
@@ -51,14 +52,14 @@ def cluster_factory(
51
52
  except ImportError:
52
53
  logger.error('The aws extra must be installed to use this provisioner')
53
54
  raise
54
- return AWSProvisioner(clusterName, clusterType, zone, nodeStorage, nodeStorageOverrides, sseKey)
55
+ return AWSProvisioner(clusterName, clusterType, zone, nodeStorage, nodeStorageOverrides, sseKey, enable_fuse)
55
56
  elif provisioner == 'gce':
56
57
  try:
57
58
  from toil.provisioners.gceProvisioner import GCEProvisioner
58
59
  except ImportError:
59
60
  logger.error('The google extra must be installed to use this provisioner')
60
61
  raise
61
- return GCEProvisioner(clusterName, clusterType, zone, nodeStorage, nodeStorageOverrides, sseKey)
62
+ return GCEProvisioner(clusterName, clusterType, zone, nodeStorage, nodeStorageOverrides, sseKey, enable_fuse)
62
63
  else:
63
64
  raise RuntimeError("Invalid provisioner '%s'" % provisioner)
64
65
 
@@ -174,9 +175,14 @@ def check_valid_node_types(provisioner, node_types: List[Tuple[Set[str], Optiona
174
175
 
175
176
  class NoSuchClusterException(Exception):
176
177
  """Indicates that the specified cluster does not exist."""
177
- def __init__(self, cluster_name):
178
+ def __init__(self, cluster_name: str) -> None:
178
179
  super().__init__(f"The cluster '{cluster_name}' could not be found")
179
180
 
181
+ class NoSuchZoneException(Exception):
182
+ """Indicates that a valid zone could not be found."""
183
+ def __init__(self) -> None:
184
+ super().__init__(f"No valid zone could be found!")
185
+
180
186
 
181
187
  class ClusterTypeNotSupportedException(Exception):
182
188
  """Indicates that a provisioner does not support a given cluster type."""
@@ -137,6 +137,7 @@ class AbstractProvisioner(ABC):
137
137
  zone: Optional[str] = None,
138
138
  nodeStorage: int = 50,
139
139
  nodeStorageOverrides: Optional[List[str]] = None,
140
+ enable_fuse: bool = False
140
141
  ) -> None:
141
142
  """
142
143
  Initialize provisioner.
@@ -162,11 +163,14 @@ class AbstractProvisioner(ABC):
162
163
  for override in nodeStorageOverrides or []:
163
164
  nodeShape, storageOverride = override.split(':')
164
165
  self._nodeStorageOverrides[nodeShape] = int(storageOverride)
165
- self._leaderPrivateIP = None
166
+ self._leaderPrivateIP: Optional[str] = None
166
167
  # This will hold an SSH public key for Mesos clusters, or the
167
168
  # Kubernetes joining information as a dict for Kubernetes clusters.
168
169
  self._leaderWorkerAuthentication = None
169
170
 
171
+ # Whether or not to use FUSE on the cluster. If true, the cluster's Toil containers will be launched in privileged mode
172
+ self.enable_fuse = enable_fuse
173
+
170
174
  if clusterName:
171
175
  # Making a new cluster
172
176
  self.createClusterSettings()
@@ -812,14 +816,12 @@ class AbstractProvisioner(ABC):
812
816
  -v /opt:/opt \\
813
817
  -v /etc/kubernetes:/etc/kubernetes \\
814
818
  -v /etc/kubernetes/admin.conf:/root/.kube/config \\
815
- # Pass in a path to use for singularity image caching into the container
819
+ {"-e TOIL_KUBERNETES_PRIVILEGED=True --privileged" if self.enable_fuse else
820
+ "--security-opt seccomp=unconfined --security-opt systempaths=unconfined"} \\
816
821
  -e TOIL_KUBERNETES_HOST_PATH=/var/lib/toil \\
822
+ # Pass in a path to use for singularity image caching into the container
817
823
  -e SINGULARITY_CACHEDIR=/var/lib/toil/singularity \\
818
824
  -e MINIWDL__SINGULARITY__IMAGE_CACHE=/var/lib/toil/miniwdl \\
819
- # These rules are necessary in order to get user namespaces working
820
- # https://github.com/apptainer/singularity/issues/5806
821
- --security-opt seccomp=unconfined \\
822
- --security-opt systempaths=unconfined \\
823
825
  --name=toil_{role} \\
824
826
  {applianceSelf()} \\
825
827
  {entryPointArgs}
@@ -1236,7 +1238,7 @@ class AbstractProvisioner(ABC):
1236
1238
  WantedBy=multi-user.target
1237
1239
  ''').format(**values))
1238
1240
 
1239
- def _getIgnitionUserData(self, role, keyPath=None, preemptible=False, architecture='amd64'):
1241
+ def _getIgnitionUserData(self, role: str, keyPath: Optional[str] = None, preemptible: bool = False, architecture: str = 'amd64') -> str:
1240
1242
  """
1241
1243
  Return the text (not bytes) user data to pass to a provisioned node.
1242
1244
 
@@ -18,6 +18,8 @@ from operator import attrgetter
18
18
  from statistics import mean, stdev
19
19
  from typing import List, Optional
20
20
 
21
+ from botocore.client import BaseClient
22
+
21
23
  from toil.lib.aws import (get_aws_zone_from_boto,
22
24
  get_aws_zone_from_environment,
23
25
  get_aws_zone_from_environment_region,
@@ -27,8 +29,10 @@ logger = logging.getLogger(__name__)
27
29
 
28
30
  ZoneTuple = namedtuple('ZoneTuple', ['name', 'price_deviation'])
29
31
 
32
+
30
33
  def get_aws_zone_from_spot_market(spotBid: Optional[float], nodeType: Optional[str],
31
- boto2_ec2: Optional["boto.connection.AWSAuthConnection"], zone_options: Optional[List[str]]) -> Optional[str]:
34
+ boto3_ec2: Optional[BaseClient], zone_options: Optional[List[str]]) -> \
35
+ Optional[str]:
32
36
  """
33
37
  If a spot bid, node type, and Boto2 EC2 connection are specified, picks a
34
38
  zone where instances are easy to buy from the zones in the region of the
@@ -40,21 +44,22 @@ def get_aws_zone_from_spot_market(spotBid: Optional[float], nodeType: Optional[s
40
44
  """
41
45
  if spotBid:
42
46
  # if spot bid is present, all the other parameters must be as well
43
- assert bool(spotBid) == bool(nodeType) == bool(boto2_ec2)
47
+ assert bool(spotBid) == bool(nodeType) == bool(boto3_ec2)
44
48
  # if the zone is unset and we are using the spot market, optimize our
45
49
  # choice based on the spot history
46
50
 
47
51
  if zone_options is None:
48
52
  # We can use all the zones in the region
49
- zone_options = [z.name for z in boto2_ec2.get_all_zones()]
53
+ zone_options = [z.name for z in boto3_ec2.describe_availability_zones()]
50
54
 
51
- return optimize_spot_bid(boto2_ec2, instance_type=nodeType, spot_bid=float(spotBid), zone_options=zone_options)
55
+ return optimize_spot_bid(boto3_ec2, instance_type=nodeType, spot_bid=float(spotBid), zone_options=zone_options)
52
56
  else:
53
57
  return None
54
58
 
55
59
 
56
60
  def get_best_aws_zone(spotBid: Optional[float] = None, nodeType: Optional[str] = None,
57
- boto2_ec2: Optional["boto.connection.AWSAuthConnection"] = None, zone_options: Optional[List[str]] = None) -> Optional[str]:
61
+ boto3_ec2: Optional[BaseClient] = None,
62
+ zone_options: Optional[List[str]] = None) -> Optional[str]:
58
63
  """
59
64
  Get the right AWS zone to use.
60
65
 
@@ -81,12 +86,13 @@ def get_best_aws_zone(spotBid: Optional[float] = None, nodeType: Optional[str] =
81
86
  """
82
87
  return get_aws_zone_from_environment() or \
83
88
  get_aws_zone_from_metadata() or \
84
- get_aws_zone_from_spot_market(spotBid, nodeType, boto2_ec2, zone_options) or \
89
+ get_aws_zone_from_spot_market(spotBid, nodeType, boto3_ec2, zone_options) or \
85
90
  get_aws_zone_from_environment_region() or \
86
91
  get_aws_zone_from_boto()
87
92
 
88
93
 
89
- def choose_spot_zone(zones: List[str], bid: float, spot_history: List['boto.ec2.spotpricehistory.SpotPriceHistory']) -> str:
94
+ def choose_spot_zone(zones: List[str], bid: float,
95
+ spot_history: List['boto.ec2.spotpricehistory.SpotPriceHistory']) -> str:
90
96
  """
91
97
  Returns the zone to put the spot request based on, in order of priority:
92
98
 
@@ -137,7 +143,7 @@ def choose_spot_zone(zones: List[str], bid: float, spot_history: List['boto.ec2.
137
143
  return min(markets_under_bid or markets_over_bid, key=attrgetter('price_deviation')).name
138
144
 
139
145
 
140
- def optimize_spot_bid(boto2_ec2, instance_type, spot_bid, zone_options: List[str]):
146
+ def optimize_spot_bid(boto3_ec2: BaseClient, instance_type: str, spot_bid: float, zone_options: List[str]):
141
147
  """
142
148
  Check whether the bid is in line with history and makes an effort to place
143
149
  the instance in a sensible zone.
@@ -145,7 +151,7 @@ def optimize_spot_bid(boto2_ec2, instance_type, spot_bid, zone_options: List[str
145
151
  :param zone_options: The collection of allowed zones to consider, within
146
152
  the region associated with the Boto2 connection.
147
153
  """
148
- spot_history = _get_spot_history(boto2_ec2, instance_type)
154
+ spot_history = _get_spot_history(boto3_ec2, instance_type)
149
155
  if spot_history:
150
156
  _check_spot_bid(spot_bid, spot_history)
151
157
  most_stable_zone = choose_spot_zone(zone_options, spot_bid, spot_history)
@@ -183,20 +189,19 @@ def _check_spot_bid(spot_bid, spot_history):
183
189
  average = mean([datum.price for datum in spot_history])
184
190
  if spot_bid > average * 2:
185
191
  logger.warning("Your bid $ %f is more than double this instance type's average "
186
- "spot price ($ %f) over the last week", spot_bid, average)
192
+ "spot price ($ %f) over the last week", spot_bid, average)
187
193
 
188
194
 
189
- def _get_spot_history(boto2_ec2, instance_type):
195
+ def _get_spot_history(boto3_ec2: BaseClient, instance_type: str):
190
196
  """
191
197
  Returns list of 1,000 most recent spot market data points represented as SpotPriceHistory
192
198
  objects. Note: The most recent object/data point will be first in the list.
193
199
 
194
200
  :rtype: list[SpotPriceHistory]
195
201
  """
196
-
197
202
  one_week_ago = datetime.datetime.now() - datetime.timedelta(days=7)
198
- spot_data = boto2_ec2.get_spot_price_history(start_time=one_week_ago.isoformat(),
199
- instance_type=instance_type,
200
- product_description="Linux/UNIX")
203
+ spot_data = boto3_ec2.describe_spot_price_history(StartTime=one_week_ago.isoformat(),
204
+ InstanceTypes=[instance_type],
205
+ ProductDescriptions=["Linux/UNIX"])
201
206
  spot_data.sort(key=attrgetter("timestamp"), reverse=True)
202
207
  return spot_data