yellowdog-python-examples 8.3.0__py3-none-any.whl → 8.3.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. yellowdog_cli/__init__.py +1 -1
  2. yellowdog_cli/abort.py +11 -11
  3. yellowdog_cli/admin.py +2 -2
  4. yellowdog_cli/boost.py +3 -3
  5. yellowdog_cli/cancel.py +9 -9
  6. yellowdog_cli/cloudwizard.py +4 -4
  7. yellowdog_cli/compare.py +6 -6
  8. yellowdog_cli/create.py +54 -52
  9. yellowdog_cli/delete.py +10 -10
  10. yellowdog_cli/download.py +15 -15
  11. yellowdog_cli/finish.py +9 -9
  12. yellowdog_cli/instantiate.py +19 -17
  13. yellowdog_cli/list.py +41 -41
  14. yellowdog_cli/provision.py +28 -28
  15. yellowdog_cli/remove.py +28 -26
  16. yellowdog_cli/resize.py +9 -9
  17. yellowdog_cli/show.py +25 -25
  18. yellowdog_cli/shutdown.py +9 -9
  19. yellowdog_cli/submit.py +29 -29
  20. yellowdog_cli/terminate.py +8 -8
  21. yellowdog_cli/upload.py +11 -11
  22. yellowdog_cli/utils/cloudwizard_aws.py +32 -32
  23. yellowdog_cli/utils/cloudwizard_azure.py +27 -27
  24. yellowdog_cli/utils/cloudwizard_common.py +12 -10
  25. yellowdog_cli/utils/cloudwizard_gcp.py +8 -8
  26. yellowdog_cli/utils/csv_data.py +7 -7
  27. yellowdog_cli/utils/entity_utils.py +20 -20
  28. yellowdog_cli/utils/follow_utils.py +5 -5
  29. yellowdog_cli/utils/interactive.py +8 -8
  30. yellowdog_cli/utils/load_config.py +11 -11
  31. yellowdog_cli/utils/load_resources.py +4 -4
  32. yellowdog_cli/utils/misc_utils.py +3 -3
  33. yellowdog_cli/utils/printing.py +10 -9
  34. yellowdog_cli/utils/provision_utils.py +2 -2
  35. yellowdog_cli/utils/settings.py +1 -0
  36. yellowdog_cli/utils/start_hold_common.py +7 -7
  37. yellowdog_cli/utils/submit_utils.py +5 -5
  38. yellowdog_cli/utils/upload_utils.py +3 -3
  39. yellowdog_cli/utils/variables.py +5 -5
  40. yellowdog_cli/utils/wrapper.py +7 -7
  41. {yellowdog_python_examples-8.3.0.dist-info → yellowdog_python_examples-8.3.1.dist-info}/METADATA +2 -2
  42. yellowdog_python_examples-8.3.1.dist-info/RECORD +65 -0
  43. yellowdog_python_examples-8.3.0.dist-info/RECORD +0 -65
  44. {yellowdog_python_examples-8.3.0.dist-info → yellowdog_python_examples-8.3.1.dist-info}/WHEEL +0 -0
  45. {yellowdog_python_examples-8.3.0.dist-info → yellowdog_python_examples-8.3.1.dist-info}/entry_points.txt +0 -0
  46. {yellowdog_python_examples-8.3.0.dist-info → yellowdog_python_examples-8.3.1.dist-info}/licenses/LICENSE +0 -0
  47. {yellowdog_python_examples-8.3.0.dist-info → yellowdog_python_examples-8.3.1.dist-info}/top_level.txt +0 -0
yellowdog_cli/submit.py CHANGED
@@ -50,8 +50,8 @@ from yellowdog_cli.utils.misc_utils import format_yd_name, generate_id, link_ent
50
50
  from yellowdog_cli.utils.printing import (
51
51
  WorkRequirementSnapshot,
52
52
  print_error,
53
+ print_info,
53
54
  print_json,
54
- print_log,
55
55
  print_numbered_strings,
56
56
  print_warning,
57
57
  )
@@ -181,7 +181,7 @@ def main():
181
181
  )
182
182
 
183
183
  wr_data_file = relpath(wr_data_file)
184
- print_log(f"Loading Work Requirement data from: '{wr_data_file}'")
184
+ print_info(f"Loading Work Requirement data from: '{wr_data_file}'")
185
185
 
186
186
  # JSON file
187
187
  if wr_data_file.lower().endswith("json"):
@@ -303,7 +303,7 @@ def submit_work_requirement(
303
303
  )
304
304
  if task_group_count > 1:
305
305
  if len(wr_data[TASK_GROUPS]) == 1:
306
- print_log(
306
+ print_info(
307
307
  f"Expanding number of Task Groups to '{TASK_GROUP_COUNT}="
308
308
  f"{task_group_count}'"
309
309
  )
@@ -341,15 +341,15 @@ def submit_work_requirement(
341
341
  work_requirement = CLIENT.work_client.add_work_requirement(work_requirement)
342
342
  if ARGS_PARSER.quiet:
343
343
  print(work_requirement.id)
344
- print_log(
344
+ print_info(
345
345
  "Created "
346
346
  f"{link_entity(CONFIG_COMMON.url, work_requirement)} "
347
347
  f"('{CONFIG_COMMON.namespace}/{work_requirement.name}')"
348
348
  )
349
- print_log(f"YellowDog ID is '{work_requirement.id}'")
349
+ print_info(f"YellowDog ID is '{work_requirement.id}'")
350
350
  if ARGS_PARSER.hold:
351
351
  CLIENT.work_client.hold_work_requirement(work_requirement)
352
- print_log("Work Requirement status is set to 'HELD'")
352
+ print_info("Work Requirement status is set to 'HELD'")
353
353
  else:
354
354
  global WR_SNAPSHOT
355
355
  WR_SNAPSHOT.set_work_requirement(work_requirement)
@@ -581,7 +581,7 @@ def create_task_group(
581
581
  tag=task_group_data.get(TASK_GROUP_TAG, None),
582
582
  )
583
583
 
584
- print_log(f"Generated Task Group '{task_group_name}'")
584
+ print_info(f"Generated Task Group '{task_group_name}'")
585
585
  return task_group
586
586
 
587
587
 
@@ -614,7 +614,7 @@ def add_tasks_to_task_group(
614
614
  if task_group_task_count is not None:
615
615
  if num_tasks == 1 and task_group_task_count > 1:
616
616
  # Expand the number of Tasks to match the specified Task count
617
- print_log(
617
+ print_info(
618
618
  f"Expanding number of Tasks in Task Group '{task_group.name}' to"
619
619
  f" '{TASK_COUNT}={task_group_task_count}' Tasks"
620
620
  )
@@ -636,7 +636,7 @@ def add_tasks_to_task_group(
636
636
  num_tasks = len(tasks) if task_count is None else task_count
637
637
  num_task_batches: int = ceil(num_tasks / TASK_BATCH_SIZE)
638
638
  if num_task_batches > 1 and not ARGS_PARSER.dry_run:
639
- print_log(
639
+ print_info(
640
640
  f"Adding Tasks to Task Group '{task_group.name}' in "
641
641
  f"{num_task_batches} batches (batch size = {TASK_BATCH_SIZE})"
642
642
  )
@@ -666,7 +666,7 @@ def add_tasks_to_task_group(
666
666
  # Single batch or sequential batch submission
667
667
  if parallel_upload_threads == 1 or num_task_batches == 1:
668
668
  if num_task_batches > 1:
669
- print_log(f"Uploading {num_task_batches} Task batches sequentially")
669
+ print_info(f"Uploading {num_task_batches} Task batches sequentially")
670
670
  for batch_number in range(num_task_batches):
671
671
  if ARGS_PARSER.pause_between_batches is not None and num_task_batches > 1:
672
672
  pause_between_batches(
@@ -704,7 +704,7 @@ def add_tasks_to_task_group(
704
704
  "Option 'pause-between-batches/-P' is ignored for parallel batch uploads"
705
705
  )
706
706
  max_workers = min(num_task_batches, parallel_upload_threads)
707
- print_log(
707
+ print_info(
708
708
  f"Submitting Task batches using {max_workers} parallel submission threads"
709
709
  )
710
710
  with ThreadPoolExecutor(max_workers=max_workers) as executor:
@@ -744,12 +744,12 @@ def add_tasks_to_task_group(
744
744
 
745
745
  if not ARGS_PARSER.dry_run:
746
746
  if num_submitted_tasks > 0:
747
- print_log(
747
+ print_info(
748
748
  f"Added a total of {num_submitted_tasks:,d} Task(s) to Task Group"
749
749
  f" '{task_group.name}'"
750
750
  )
751
751
  else:
752
- print_log(f"No Tasks added to Task Group '{task_group.name}'")
752
+ print_info(f"No Tasks added to Task Group '{task_group.name}'")
753
753
 
754
754
 
755
755
  def generate_batch_of_tasks_for_task_group(
@@ -1126,7 +1126,7 @@ def submit_batch_of_tasks_to_task_group(
1126
1126
 
1127
1127
  def report_success():
1128
1128
  if num_task_batches > 1:
1129
- print_log(
1129
+ print_info(
1130
1130
  f"Batch {batch_number_str} :"
1131
1131
  f" Added {len(tasks_list):,d} Task(s) {task_range_str}to Work Requirement Task"
1132
1132
  f" Group '{task_group.name}'"
@@ -1160,7 +1160,7 @@ def submit_batch_of_tasks_to_task_group(
1160
1160
  warning_already_displayed = True
1161
1161
 
1162
1162
  if attempts < MAX_BATCH_SUBMIT_ATTEMPTS - 1:
1163
- print_log(
1163
+ print_info(
1164
1164
  f"Retrying submission of batch {batch_number_str} "
1165
1165
  f"(retry attempt {attempts + 1} of {MAX_BATCH_SUBMIT_ATTEMPTS - 1})"
1166
1166
  )
@@ -1227,7 +1227,7 @@ def follow_progress_old(work_requirement: WorkRequirement) -> None:
1227
1227
  .result()
1228
1228
  )
1229
1229
  if work_requirement.status != WorkRequirementStatus.COMPLETED:
1230
- print_log(f"Work Requirement did not complete: {work_requirement.status}")
1230
+ print_info(f"Work Requirement did not complete: {work_requirement.status}")
1231
1231
 
1232
1232
 
1233
1233
  def follow_progress(work_requirement: WorkRequirement) -> None:
@@ -1236,7 +1236,7 @@ def follow_progress(work_requirement: WorkRequirement) -> None:
1236
1236
  Replacement for the SDK version above.
1237
1237
  """
1238
1238
  if not ARGS_PARSER.dry_run:
1239
- print_log("Following Work Requirement event stream")
1239
+ print_info("Following Work Requirement event stream")
1240
1240
  follow_events(work_requirement.id, YDIDType.WORK_REQUIREMENT)
1241
1241
 
1242
1242
 
@@ -1249,7 +1249,7 @@ def on_update(work_req: WorkRequirement):
1249
1249
  for task_group in work_req.taskGroups:
1250
1250
  completed += task_group.taskSummary.statusCounts[TaskStatus.COMPLETED]
1251
1251
  total += task_group.taskSummary.taskCount
1252
- print_log(
1252
+ print_info(
1253
1253
  f"Work Requirement is {work_req.status} with {completed}/{total} "
1254
1254
  "completed Tasks"
1255
1255
  )
@@ -1285,7 +1285,7 @@ def deduplicate_inputs(task_inputs: List[TaskInput]) -> List[TaskInput]:
1285
1285
  if task_input.source == TaskInputSource.TASK_NAMESPACE
1286
1286
  else task_input.namespace
1287
1287
  )
1288
- print_log(
1288
+ print_info(
1289
1289
  f"Removing '{task_input.verification}' duplicate:"
1290
1290
  f" '{namespace}{NAMESPACE_OBJECT_STORE_PREFIX_SEPARATOR}{task_input.objectNamePattern}'"
1291
1291
  )
@@ -1635,9 +1635,9 @@ def submit_json_raw(wr_file: str):
1635
1635
 
1636
1636
  if ARGS_PARSER.dry_run:
1637
1637
  # This will show the results of any variable substitutions
1638
- print_log("Dry-run: Printing JSON Work Requirement specification:")
1638
+ print_info("Dry-run: Printing JSON Work Requirement specification:")
1639
1639
  print_json(wr_data)
1640
- print_log("Dry-run: Complete")
1640
+ print_info("Dry-run: Complete")
1641
1641
  return
1642
1642
 
1643
1643
  # Extract Tasks from Task Groups
@@ -1661,7 +1661,7 @@ def submit_json_raw(wr_file: str):
1661
1661
 
1662
1662
  if response.status_code == 200:
1663
1663
  wr_id = jsons.loads(response.text)["id"]
1664
- print_log(
1664
+ print_info(
1665
1665
  f"Created Work Requirement '{wr_data['namespace']}/{wr_name}' ({wr_id})"
1666
1666
  )
1667
1667
  if ARGS_PARSER.quiet:
@@ -1672,7 +1672,7 @@ def submit_json_raw(wr_file: str):
1672
1672
 
1673
1673
  if ARGS_PARSER.hold:
1674
1674
  CLIENT.work_client.hold_work_requirement_by_id(wr_id)
1675
- print_log("Work Requirement status set to 'HELD'")
1675
+ print_info("Work Requirement status set to 'HELD'")
1676
1676
 
1677
1677
  # Submit Tasks to the Work Requirement
1678
1678
  # Collect 'VERIFY_AT_START' files
@@ -1694,18 +1694,18 @@ def submit_json_raw(wr_file: str):
1694
1694
  # Warn about VERIFY_AT_START files & halt to allow upload or
1695
1695
  # Work Requirement cancellation
1696
1696
  if ARGS_PARSER.quiet is False and len(verify_at_start_files) != 0:
1697
- print_log(
1697
+ print_info(
1698
1698
  "The following files may be required ('VERIFY_AT_START') "
1699
1699
  "before Tasks are submitted, or the Tasks will fail."
1700
1700
  )
1701
- print_log(
1701
+ print_info(
1702
1702
  "You now have an opportunity to upload the required files "
1703
1703
  "before Tasks are submitted:"
1704
1704
  )
1705
1705
  print()
1706
1706
  print_numbered_strings(sorted(list(verify_at_start_files)))
1707
1707
  if not confirmed("Proceed now (y), or Cancel Work Requirement (n)?"):
1708
- print_log(f"Cancelling Work Requirement '{wr_name}'")
1708
+ print_info(f"Cancelling Work Requirement '{wr_name}'")
1709
1709
  CLIENT.work_client.cancel_work_requirement_by_id(wr_id)
1710
1710
  return
1711
1711
 
@@ -1713,7 +1713,7 @@ def submit_json_raw(wr_file: str):
1713
1713
  for task_group_name, task_list in task_lists.items():
1714
1714
  num_batches = ceil(len(task_list) / TASK_BATCH_SIZE)
1715
1715
  max_workers = min(num_batches, ARGS_PARSER.parallel_batches)
1716
- print_log(
1716
+ print_info(
1717
1717
  f"Submitting task batches using {max_workers} parallel submission thread(s)"
1718
1718
  )
1719
1719
  with ThreadPoolExecutor(max_workers=max_workers) as executor:
@@ -1738,7 +1738,7 @@ def submit_json_raw(wr_file: str):
1738
1738
 
1739
1739
  executor.shutdown()
1740
1740
  num_submitted_tasks = sum([x.result() for x in executors])
1741
- print_log(
1741
+ print_info(
1742
1742
  f"Added a total of {num_submitted_tasks} Task(s) to Task Group '{task_group_name}'"
1743
1743
  )
1744
1744
 
@@ -1767,7 +1767,7 @@ def submit_json_task_batch(
1767
1767
  )
1768
1768
 
1769
1769
  if response.status_code == 200:
1770
- print_log(
1770
+ print_info(
1771
1771
  f"Added {len(task_batch)} Task(s) to Task Group "
1772
1772
  f"'{task_group_name}' (Batch {formatted_number_str(batch_number, num_batches)} "
1773
1773
  f"of {num_batches})"
@@ -19,7 +19,7 @@ from yellowdog_cli.utils.entity_utils import (
19
19
  from yellowdog_cli.utils.follow_utils import follow_ids
20
20
  from yellowdog_cli.utils.interactive import confirmed, select
21
21
  from yellowdog_cli.utils.misc_utils import link_entity
22
- from yellowdog_cli.utils.printing import print_error, print_log, print_warning
22
+ from yellowdog_cli.utils.printing import print_error, print_info, print_warning
23
23
  from yellowdog_cli.utils.wrapper import ARGS_PARSER, CLIENT, CONFIG_COMMON, main_wrapper
24
24
  from yellowdog_cli.utils.ydid_utils import YDIDType, get_ydid_type
25
25
 
@@ -38,7 +38,7 @@ def main():
38
38
  terminate_cr_by_name_or_id(ARGS_PARSER.compute_requirement_names)
39
39
  return
40
40
 
41
- print_log(
41
+ print_info(
42
42
  "Terminating Compute Requirements in "
43
43
  f"namespace '{CONFIG_COMMON.namespace}' with tags "
44
44
  f"including '{CONFIG_COMMON.name_tag}'"
@@ -72,7 +72,7 @@ def main():
72
72
  )
73
73
  )
74
74
  terminated_count += 1
75
- print_log(
75
+ print_info(
76
76
  f"Terminated {link_entity(CONFIG_COMMON.url, compute_requirement_summary)}"
77
77
  )
78
78
  except Exception as e:
@@ -81,11 +81,11 @@ def main():
81
81
  )
82
82
 
83
83
  if terminated_count > 0:
84
- print_log(f"Terminated {terminated_count} Compute Requirement(s)")
84
+ print_info(f"Terminated {terminated_count} Compute Requirement(s)")
85
85
  if ARGS_PARSER.follow:
86
86
  follow_ids([cr.id for cr in selected_compute_requirement_summaries])
87
87
  else:
88
- print_log("No Compute Requirements terminated")
88
+ print_info("No Compute Requirements terminated")
89
89
 
90
90
 
91
91
  def terminate_cr_by_name_or_id(names_or_ids: List[str]):
@@ -106,11 +106,11 @@ def terminate_cr_by_name_or_id(names_or_ids: List[str]):
106
106
  )
107
107
  continue
108
108
  else:
109
- print_log(f"Found Compute Requirement ID: {compute_requirement_id}")
109
+ print_info(f"Found Compute Requirement ID: {compute_requirement_id}")
110
110
  compute_requirement_ids.append(compute_requirement_id)
111
111
 
112
112
  if len(compute_requirement_ids) == 0:
113
- print_log("No Compute Requirements to terminate")
113
+ print_info("No Compute Requirements to terminate")
114
114
  return
115
115
 
116
116
  if not confirmed(
@@ -123,7 +123,7 @@ def terminate_cr_by_name_or_id(names_or_ids: List[str]):
123
123
  CLIENT.compute_client.terminate_compute_requirement_by_id(
124
124
  compute_requirement_id
125
125
  )
126
- print_log(f"Terminated '{compute_requirement_id}'")
126
+ print_info(f"Terminated '{compute_requirement_id}'")
127
127
  except Exception as e:
128
128
  print_error(f"Failed to terminate '{compute_requirement_id}': ({e})")
129
129
 
yellowdog_cli/upload.py CHANGED
@@ -17,7 +17,7 @@ from yellowdog_client.object_store.model import FileTransferStatus
17
17
  from yellowdog_client.object_store.upload import UploadBatchBuilder
18
18
 
19
19
  from yellowdog_cli.utils.misc_utils import unpack_namespace_in_prefix
20
- from yellowdog_cli.utils.printing import print_batch_upload_files, print_log
20
+ from yellowdog_cli.utils.printing import print_batch_upload_files, print_info
21
21
  from yellowdog_cli.utils.upload_utils import upload_file
22
22
  from yellowdog_cli.utils.wrapper import ARGS_PARSER, CLIENT, CONFIG_COMMON, main_wrapper
23
23
 
@@ -27,7 +27,7 @@ def main():
27
27
  if ARGS_PARSER.content_path is not None and ARGS_PARSER.content_path != "":
28
28
  try:
29
29
  chdir(ARGS_PARSER.content_path)
30
- print_log(
30
+ print_info(
31
31
  "Uploading files relative to local directory:"
32
32
  f" '{ARGS_PARSER.content_path}'"
33
33
  )
@@ -42,12 +42,12 @@ def main():
42
42
  )
43
43
 
44
44
  if ARGS_PARSER.batch: # Use the batch uploader
45
- print_log(
45
+ print_info(
46
46
  f"Batch uploading Using Object Store namespace '{namespace}' (prefix"
47
47
  f" '{prefix}' is ignored for batch upload)"
48
48
  )
49
49
  if ARGS_PARSER.recursive or ARGS_PARSER.flatten:
50
- print_log(
50
+ print_info(
51
51
  "Warning: '--recursive', '--flatten-upload-paths' options are ignored"
52
52
  " for batch upload"
53
53
  )
@@ -66,7 +66,7 @@ def main():
66
66
  file_pattern = f".\\{file_pattern}"
67
67
  else:
68
68
  file_pattern = f"./{file_pattern}"
69
- print_log(f"Uploading files matching '{file_pattern}'")
69
+ print_info(f"Uploading files matching '{file_pattern}'")
70
70
 
71
71
  upload_batch_builder: UploadBatchBuilder = (
72
72
  CLIENT.object_store_client.build_upload_batch()
@@ -87,20 +87,20 @@ def main():
87
87
  )
88
88
  CLIENT.object_store_client.start_transfers()
89
89
  futures.wait((future,))
90
- print_log("Batch upload complete")
90
+ print_info("Batch upload complete")
91
91
  else:
92
- print_log(f"No objects matching '{file_pattern}'")
92
+ print_info(f"No objects matching '{file_pattern}'")
93
93
  return
94
94
 
95
95
  # Use the sequential uploader
96
- print_log(f"Using Object Store namespace '{namespace}' and prefix '{prefix}'")
96
+ print_info(f"Using Object Store namespace '{namespace}' and prefix '{prefix}'")
97
97
  files_set = set(ARGS_PARSER.files)
98
98
  if os_name == "nt":
99
99
  # Windows wildcard expansion (not done natively by the Windows shell)
100
100
  files_set = {f for files in files_set for f in glob(files)}
101
101
 
102
102
  if len(files_set) == 0:
103
- print_log("No files to upload")
103
+ print_info("No files to upload")
104
104
  return
105
105
 
106
106
  added_files_set = set()
@@ -124,7 +124,7 @@ def main():
124
124
  files_set = files_set.union(added_files_set).difference(removed_dirs_set)
125
125
 
126
126
  if ARGS_PARSER.flatten:
127
- print_log("Flattening upload paths")
127
+ print_info("Flattening upload paths")
128
128
 
129
129
  uploaded_file_count = 0
130
130
  for file in files_set:
@@ -140,7 +140,7 @@ def main():
140
140
  is True
141
141
  ):
142
142
  uploaded_file_count += 1
143
- print_log(f"Uploaded {uploaded_file_count} files")
143
+ print_info(f"Uploaded {uploaded_file_count} files")
144
144
 
145
145
 
146
146
  # Standalone entry point
@@ -20,7 +20,7 @@ from yellowdog_cli.utils.cloudwizard_aws_types import (
20
20
  )
21
21
  from yellowdog_cli.utils.cloudwizard_common import CommonCloudConfig
22
22
  from yellowdog_cli.utils.interactive import confirmed, select
23
- from yellowdog_cli.utils.printing import print_error, print_log, print_warning
23
+ from yellowdog_cli.utils.printing import print_error, print_info, print_warning
24
24
  from yellowdog_cli.utils.settings import RN_SOURCE_TEMPLATE, RN_STORAGE_CONFIGURATION
25
25
 
26
26
  IAM_USER_NAME = "yellowdog-cloudwizard-user"
@@ -225,7 +225,7 @@ class AWSConfig(CommonCloudConfig):
225
225
  """
226
226
  Create the required assets in the AWS account, for use with YellowDog.
227
227
  """
228
- print_log("Inserting YellowDog-created assets into the AWS account")
228
+ print_info("Inserting YellowDog-created assets into the AWS account")
229
229
  iam_client = boto3.client("iam", region_name=self.region_name)
230
230
  self._create_iam_user(iam_client)
231
231
  self._create_iam_policy(iam_client)
@@ -238,7 +238,7 @@ class AWSConfig(CommonCloudConfig):
238
238
  """
239
239
  Load the required AWS IDs that are non-constants.
240
240
  """
241
- print_log("Querying AWS account for existing assets")
241
+ print_info("Querying AWS account for existing assets")
242
242
  iam_client = boto3.client("iam", region_name=self.region_name)
243
243
 
244
244
  # Get the IAM Policy ARN
@@ -283,7 +283,7 @@ class AWSConfig(CommonCloudConfig):
283
283
  """
284
284
  Remove the Cloud Wizard assets in the AWS account.
285
285
  """
286
- print_log("Removing all YellowDog-created assets in the AWS account")
286
+ print_info("Removing all YellowDog-created assets in the AWS account")
287
287
  iam_client = boto3.client("iam", region_name=self.region_name)
288
288
  self._delete_s3_bucket()
289
289
  self._delete_access_keys(iam_client)
@@ -297,10 +297,10 @@ class AWSConfig(CommonCloudConfig):
297
297
  Create the YellowDog resources and save the resource definition file.
298
298
  """
299
299
 
300
- print_log("Creating resources in the YellowDog account")
300
+ print_info("Creating resources in the YellowDog account")
301
301
 
302
302
  # Select Compute Source Templates
303
- print_log(
303
+ print_info(
304
304
  "Please select the AWS availability zones for which to create YellowDog"
305
305
  " Compute Source Templates"
306
306
  )
@@ -334,7 +334,7 @@ class AWSConfig(CommonCloudConfig):
334
334
  return
335
335
 
336
336
  # Create Compute Source Templates
337
- print_log("Creating YellowDog Compute Source Templates")
337
+ print_info("Creating YellowDog Compute Source Templates")
338
338
  create_resources(self._source_template_resources)
339
339
 
340
340
  # Create Compute Requirement Templates
@@ -358,7 +358,7 @@ class AWSConfig(CommonCloudConfig):
358
358
  print_error("No access keys loaded; can't create Credential")
359
359
 
360
360
  # Create namespace configuration (Keyring/Credential creation must come first)
361
- print_log(
361
+ print_info(
362
362
  "Creating YellowDog Namespace Configuration"
363
363
  f" 'S3:{self._get_s3_bucket_name()}' -> '{self._namespace}'"
364
364
  )
@@ -407,9 +407,9 @@ class AWSConfig(CommonCloudConfig):
407
407
  """
408
408
  Collect network information about the enabled regions and AZs.
409
409
  """
410
- print_log("Gathering network information for all AWS regions")
410
+ print_info("Gathering network information for all AWS regions")
411
411
  for region in AWS_ALL_REGIONS:
412
- print_log(f"Gathering network information for region '{region}'")
412
+ print_info(f"Gathering network information for region '{region}'")
413
413
  ec2_client = boto3.client("ec2", region_name=region)
414
414
 
415
415
  # Collect the default security group for the region
@@ -417,7 +417,7 @@ class AWSConfig(CommonCloudConfig):
417
417
  response = ec2_client.describe_security_groups(Filters=[])
418
418
  except ClientError as e:
419
419
  if "AuthFailure" in str(e):
420
- print_log(
420
+ print_info(
421
421
  f"Region '{region}' is not enabled (AuthFailure when fetching"
422
422
  " security groups)"
423
423
  )
@@ -460,7 +460,7 @@ class AWSConfig(CommonCloudConfig):
460
460
  response = iam_client.create_user(UserName=IAM_USER_NAME)
461
461
  arn = response["User"]["Arn"]
462
462
  user_id = response["User"]["UserId"]
463
- print_log(f"Created IAM user '{IAM_USER_NAME}' ({arn})")
463
+ print_info(f"Created IAM user '{IAM_USER_NAME}' ({arn})")
464
464
 
465
465
  except ClientError as e:
466
466
  if "EntityAlreadyExists" in str(e):
@@ -491,7 +491,7 @@ class AWSConfig(CommonCloudConfig):
491
491
 
492
492
  try:
493
493
  iam_client.delete_user(UserName=IAM_USER_NAME)
494
- print_log(f"Deleted IAM user '{IAM_USER_NAME}'")
494
+ print_info(f"Deleted IAM user '{IAM_USER_NAME}'")
495
495
  except ClientError as e:
496
496
  if "NoSuchEntity" in str(e):
497
497
  print_warning(f"No user '{IAM_USER_NAME}' to delete")
@@ -507,7 +507,7 @@ class AWSConfig(CommonCloudConfig):
507
507
  PolicyName=IAM_POLICY_NAME, PolicyDocument=json.dumps(YELLOWDOG_POLICY)
508
508
  )
509
509
  self._iam_policy_arn = response["Policy"]["Arn"]
510
- print_log(
510
+ print_info(
511
511
  f"Created IAM Policy '{IAM_POLICY_NAME}' ({self._iam_policy_arn})"
512
512
  )
513
513
  except ClientError as e:
@@ -540,7 +540,7 @@ class AWSConfig(CommonCloudConfig):
540
540
 
541
541
  try:
542
542
  iam_client.delete_policy(PolicyArn=self._iam_policy_arn)
543
- print_log(f"Deleted IAM policy '{IAM_POLICY_NAME}'")
543
+ print_info(f"Deleted IAM policy '{IAM_POLICY_NAME}'")
544
544
  except ClientError as e:
545
545
  if "NoSuchEntity" in str(e):
546
546
  print_warning(
@@ -563,7 +563,7 @@ class AWSConfig(CommonCloudConfig):
563
563
  iam_client.attach_user_policy(
564
564
  UserName=IAM_USER_NAME, PolicyArn=self._iam_policy_arn
565
565
  )
566
- print_log(
566
+ print_info(
567
567
  f"Attached IAM policy '{IAM_POLICY_NAME}' to user '{IAM_USER_NAME}'"
568
568
  )
569
569
  except ClientError as e:
@@ -589,7 +589,7 @@ class AWSConfig(CommonCloudConfig):
589
589
  iam_client.detach_user_policy(
590
590
  UserName=IAM_USER_NAME, PolicyArn=self._iam_policy_arn
591
591
  )
592
- print_log(
592
+ print_info(
593
593
  f"Detached IAM policy '{IAM_POLICY_NAME}' from user '{IAM_USER_NAME}'"
594
594
  )
595
595
  except ClientError as e:
@@ -617,12 +617,12 @@ class AWSConfig(CommonCloudConfig):
617
617
  response["AccessKey"]["SecretAccessKey"],
618
618
  )
619
619
  self._access_keys.append(access_key)
620
- print_log(
620
+ print_info(
621
621
  f"Created AWS_ACCESS_KEY_ID='{access_key.access_key_id}' for user"
622
622
  f" '{IAM_USER_NAME}'"
623
623
  )
624
624
  if self._show_secrets:
625
- print_log(
625
+ print_info(
626
626
  f" AWS_SECRET_ACCESS_KEY='{access_key.secret_access_key}'"
627
627
  )
628
628
  except ClientError as e:
@@ -646,7 +646,7 @@ class AWSConfig(CommonCloudConfig):
646
646
  iam_client.delete_access_key(
647
647
  UserName=IAM_USER_NAME, AccessKeyId=access_key.access_key_id
648
648
  )
649
- print_log(f"Deleted access key '{access_key.access_key_id}'")
649
+ print_info(f"Deleted access key '{access_key.access_key_id}'")
650
650
  except ClientError as e:
651
651
  if "NoSuchEntity" in str(e):
652
652
  print_warning(
@@ -669,7 +669,7 @@ class AWSConfig(CommonCloudConfig):
669
669
  AWSServiceName="spot.amazonaws.com",
670
670
  Description=EC2_SPOT_SERVICE_LINKED_ROLE_NAME,
671
671
  )
672
- print_log(
672
+ print_info(
673
673
  f"Added service linked role '{EC2_SPOT_SERVICE_LINKED_ROLE_NAME}' to"
674
674
  " the AWS account"
675
675
  )
@@ -700,7 +700,7 @@ class AWSConfig(CommonCloudConfig):
700
700
  iam_client.delete_service_linked_role(
701
701
  RoleName=EC2_SPOT_SERVICE_LINKED_ROLE_NAME
702
702
  )
703
- print_log(
703
+ print_info(
704
704
  f"Deleted service linked role '{EC2_SPOT_SERVICE_LINKED_ROLE_NAME}'"
705
705
  " from AWS account"
706
706
  )
@@ -738,7 +738,7 @@ class AWSConfig(CommonCloudConfig):
738
738
  )
739
739
  else:
740
740
  s3_client.create_bucket(Bucket=s3_bucket_name)
741
- print_log(
741
+ print_info(
742
742
  f"Created S3 bucket '{s3_bucket_name}' in region '{self.region_name}'"
743
743
  )
744
744
  except ClientError as e:
@@ -759,11 +759,11 @@ class AWSConfig(CommonCloudConfig):
759
759
  Bucket=s3_bucket_name,
760
760
  Policy=self._generate_s3_bucket_policy(),
761
761
  )
762
- print_log(f"Attached policy to S3 bucket '{s3_bucket_name}'")
762
+ print_info(f"Attached policy to S3 bucket '{s3_bucket_name}'")
763
763
  return
764
764
  except ClientError as e:
765
765
  if "MalformedPolicy" in str(e):
766
- print_log(
766
+ print_info(
767
767
  f"Waiting {retry_interval}s for S3 bucket to be ready for"
768
768
  f" policy attachment (Attempt {index + 1} of"
769
769
  f" {max_retries}) ..."
@@ -793,12 +793,12 @@ class AWSConfig(CommonCloudConfig):
793
793
  s3_client.delete_objects(
794
794
  Bucket=s3_bucket_name, Delete={"Objects": objects_to_delete}
795
795
  )
796
- print_log(
796
+ print_info(
797
797
  f"Deleted {len(objects_to_delete)} object(s) in S3 bucket"
798
798
  f" '{s3_bucket_name}'"
799
799
  )
800
800
  else:
801
- print_log(f"No objects to delete in S3 bucket '{s3_bucket_name}'")
801
+ print_info(f"No objects to delete in S3 bucket '{s3_bucket_name}'")
802
802
 
803
803
  except ClientError as e:
804
804
  if "NoSuchBucket" in str(e):
@@ -829,7 +829,7 @@ class AWSConfig(CommonCloudConfig):
829
829
  return
830
830
  try:
831
831
  s3_client.delete_bucket(Bucket=s3_bucket_name)
832
- print_log(f"Deleted S3 bucket '{s3_bucket_name}'")
832
+ print_info(f"Deleted S3 bucket '{s3_bucket_name}'")
833
833
  except ClientError as e:
834
834
  if "NoSuchBucket" in str(e):
835
835
  print_warning(f"No S3 bucket '{s3_bucket_name}' to delete")
@@ -848,7 +848,7 @@ class AWSConfig(CommonCloudConfig):
848
848
  GroupId=security_group.id,
849
849
  IpPermissions=ingress_rule,
850
850
  )
851
- print_log(
851
+ print_info(
852
852
  f"Added {rule_name} inbound rule to security group"
853
853
  f" '{security_group.name}' ('{security_group.id}') in region"
854
854
  f" '{ec2_client.meta.region_name}'"
@@ -879,7 +879,7 @@ class AWSConfig(CommonCloudConfig):
879
879
  GroupId=security_group.id,
880
880
  IpPermissions=ingress_rule,
881
881
  )
882
- print_log(
882
+ print_info(
883
883
  f"Removed inbound {rule_name} rule from security group"
884
884
  f" '{security_group.name}' ('{security_group.id}') in region"
885
885
  f" '{ec2_client.meta.region_name}' (if present)"
@@ -1018,10 +1018,10 @@ class AWSConfig(CommonCloudConfig):
1018
1018
  )
1019
1019
  except ClientError as e:
1020
1020
  if "DryRunOperation" in str(e):
1021
- print_log(f"Validated AWS access key '{access_key.access_key_id}'")
1021
+ print_info(f"Validated AWS access key '{access_key.access_key_id}'")
1022
1022
  return True
1023
1023
  elif "AuthFailure" in str(e):
1024
- print_log(
1024
+ print_info(
1025
1025
  f"Waiting {retry_interval_seconds}s for AWS access key to"
1026
1026
  f" become valid for EC2 (attempt {index + 1} of"
1027
1027
  f" {max_retries}) ..."