paasta-tools 1.30.9__py3-none-any.whl → 1.35.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of paasta-tools might be problematic. Click here for more details.

Files changed (98) hide show
  1. paasta_tools/__init__.py +1 -1
  2. paasta_tools/api/api_docs/swagger.json +5 -0
  3. paasta_tools/cli/cmds/autoscale.py +2 -0
  4. paasta_tools/cli/cmds/check.py +2 -0
  5. paasta_tools/cli/cmds/cook_image.py +2 -0
  6. paasta_tools/cli/cmds/get_docker_image.py +2 -0
  7. paasta_tools/cli/cmds/get_image_version.py +2 -0
  8. paasta_tools/cli/cmds/get_latest_deployment.py +2 -0
  9. paasta_tools/cli/cmds/info.py +5 -1
  10. paasta_tools/cli/cmds/itest.py +2 -0
  11. paasta_tools/cli/cmds/list_namespaces.py +2 -0
  12. paasta_tools/cli/cmds/local_run.py +116 -24
  13. paasta_tools/cli/cmds/logs.py +2 -0
  14. paasta_tools/cli/cmds/mark_for_deployment.py +12 -2
  15. paasta_tools/cli/cmds/mesh_status.py +2 -1
  16. paasta_tools/cli/cmds/push_to_registry.py +2 -0
  17. paasta_tools/cli/cmds/remote_run.py +10 -0
  18. paasta_tools/cli/cmds/rollback.py +5 -1
  19. paasta_tools/cli/cmds/secret.py +4 -2
  20. paasta_tools/cli/cmds/security_check.py +2 -0
  21. paasta_tools/cli/cmds/spark_run.py +4 -0
  22. paasta_tools/cli/cmds/status.py +35 -8
  23. paasta_tools/cli/cmds/validate.py +296 -19
  24. paasta_tools/cli/cmds/wait_for_deployment.py +2 -0
  25. paasta_tools/cli/schemas/autoscaling_schema.json +3 -2
  26. paasta_tools/cli/schemas/eks_schema.json +23 -1
  27. paasta_tools/cli/schemas/smartstack_schema.json +12 -0
  28. paasta_tools/cli/utils.py +2 -1
  29. paasta_tools/contrib/paasta_update_soa_memcpu.py +10 -14
  30. paasta_tools/generate_deployments_for_service.py +2 -0
  31. paasta_tools/instance/hpa_metrics_parser.py +3 -5
  32. paasta_tools/instance/kubernetes.py +58 -25
  33. paasta_tools/kubernetes/application/controller_wrappers.py +23 -2
  34. paasta_tools/kubernetes/remote_run.py +2 -2
  35. paasta_tools/kubernetes_tools.py +37 -66
  36. paasta_tools/long_running_service_tools.py +8 -1
  37. paasta_tools/paastaapi/model/kubernetes_version.py +3 -0
  38. paasta_tools/setup_prometheus_adapter_config.py +82 -0
  39. paasta_tools/tron_tools.py +3 -0
  40. paasta_tools/utils.py +26 -9
  41. {paasta_tools-1.30.9.data → paasta_tools-1.35.8.data}/scripts/generate_deployments_for_service.py +2 -0
  42. {paasta_tools-1.30.9.data → paasta_tools-1.35.8.data}/scripts/paasta_update_soa_memcpu.py +10 -14
  43. {paasta_tools-1.30.9.data → paasta_tools-1.35.8.data}/scripts/setup_prometheus_adapter_config.py +82 -0
  44. {paasta_tools-1.30.9.dist-info → paasta_tools-1.35.8.dist-info}/METADATA +4 -4
  45. {paasta_tools-1.30.9.dist-info → paasta_tools-1.35.8.dist-info}/RECORD +98 -98
  46. {paasta_tools-1.30.9.data → paasta_tools-1.35.8.data}/scripts/apply_external_resources.py +0 -0
  47. {paasta_tools-1.30.9.data → paasta_tools-1.35.8.data}/scripts/bounce_log_latency_parser.py +0 -0
  48. {paasta_tools-1.30.9.data → paasta_tools-1.35.8.data}/scripts/check_autoscaler_max_instances.py +0 -0
  49. {paasta_tools-1.30.9.data → paasta_tools-1.35.8.data}/scripts/check_cassandracluster_services_replication.py +0 -0
  50. {paasta_tools-1.30.9.data → paasta_tools-1.35.8.data}/scripts/check_flink_services_health.py +0 -0
  51. {paasta_tools-1.30.9.data → paasta_tools-1.35.8.data}/scripts/check_kubernetes_api.py +0 -0
  52. {paasta_tools-1.30.9.data → paasta_tools-1.35.8.data}/scripts/check_kubernetes_services_replication.py +0 -0
  53. {paasta_tools-1.30.9.data → paasta_tools-1.35.8.data}/scripts/check_manual_oapi_changes.sh +0 -0
  54. {paasta_tools-1.30.9.data → paasta_tools-1.35.8.data}/scripts/check_oom_events.py +0 -0
  55. {paasta_tools-1.30.9.data → paasta_tools-1.35.8.data}/scripts/check_orphans.py +0 -0
  56. {paasta_tools-1.30.9.data → paasta_tools-1.35.8.data}/scripts/check_spark_jobs.py +0 -0
  57. {paasta_tools-1.30.9.data → paasta_tools-1.35.8.data}/scripts/cleanup_kubernetes_cr.py +0 -0
  58. {paasta_tools-1.30.9.data → paasta_tools-1.35.8.data}/scripts/cleanup_kubernetes_crd.py +0 -0
  59. {paasta_tools-1.30.9.data → paasta_tools-1.35.8.data}/scripts/cleanup_kubernetes_jobs.py +0 -0
  60. {paasta_tools-1.30.9.data → paasta_tools-1.35.8.data}/scripts/create_dynamodb_table.py +0 -0
  61. {paasta_tools-1.30.9.data → paasta_tools-1.35.8.data}/scripts/create_paasta_playground.py +0 -0
  62. {paasta_tools-1.30.9.data → paasta_tools-1.35.8.data}/scripts/delete_kubernetes_deployments.py +0 -0
  63. {paasta_tools-1.30.9.data → paasta_tools-1.35.8.data}/scripts/emit_allocated_cpu_metrics.py +0 -0
  64. {paasta_tools-1.30.9.data → paasta_tools-1.35.8.data}/scripts/generate_all_deployments +0 -0
  65. {paasta_tools-1.30.9.data → paasta_tools-1.35.8.data}/scripts/generate_authenticating_services.py +0 -0
  66. {paasta_tools-1.30.9.data → paasta_tools-1.35.8.data}/scripts/generate_services_file.py +0 -0
  67. {paasta_tools-1.30.9.data → paasta_tools-1.35.8.data}/scripts/generate_services_yaml.py +0 -0
  68. {paasta_tools-1.30.9.data → paasta_tools-1.35.8.data}/scripts/get_running_task_allocation.py +0 -0
  69. {paasta_tools-1.30.9.data → paasta_tools-1.35.8.data}/scripts/habitat_fixer.py +0 -0
  70. {paasta_tools-1.30.9.data → paasta_tools-1.35.8.data}/scripts/ide_helper.py +0 -0
  71. {paasta_tools-1.30.9.data → paasta_tools-1.35.8.data}/scripts/is_pod_healthy_in_proxy.py +0 -0
  72. {paasta_tools-1.30.9.data → paasta_tools-1.35.8.data}/scripts/is_pod_healthy_in_smartstack.py +0 -0
  73. {paasta_tools-1.30.9.data → paasta_tools-1.35.8.data}/scripts/kill_bad_containers.py +0 -0
  74. {paasta_tools-1.30.9.data → paasta_tools-1.35.8.data}/scripts/kubernetes_remove_evicted_pods.py +0 -0
  75. {paasta_tools-1.30.9.data → paasta_tools-1.35.8.data}/scripts/mass-deploy-tag.sh +0 -0
  76. {paasta_tools-1.30.9.data → paasta_tools-1.35.8.data}/scripts/mock_patch_checker.py +0 -0
  77. {paasta_tools-1.30.9.data → paasta_tools-1.35.8.data}/scripts/paasta_cleanup_remote_run_resources.py +0 -0
  78. {paasta_tools-1.30.9.data → paasta_tools-1.35.8.data}/scripts/paasta_cleanup_stale_nodes.py +0 -0
  79. {paasta_tools-1.30.9.data → paasta_tools-1.35.8.data}/scripts/paasta_deploy_tron_jobs +0 -0
  80. {paasta_tools-1.30.9.data → paasta_tools-1.35.8.data}/scripts/paasta_execute_docker_command.py +0 -0
  81. {paasta_tools-1.30.9.data → paasta_tools-1.35.8.data}/scripts/paasta_secrets_sync.py +0 -0
  82. {paasta_tools-1.30.9.data → paasta_tools-1.35.8.data}/scripts/paasta_tabcomplete.sh +0 -0
  83. {paasta_tools-1.30.9.data → paasta_tools-1.35.8.data}/scripts/render_template.py +0 -0
  84. {paasta_tools-1.30.9.data → paasta_tools-1.35.8.data}/scripts/rightsizer_soaconfigs_update.py +0 -0
  85. {paasta_tools-1.30.9.data → paasta_tools-1.35.8.data}/scripts/service_shard_remove.py +0 -0
  86. {paasta_tools-1.30.9.data → paasta_tools-1.35.8.data}/scripts/service_shard_update.py +0 -0
  87. {paasta_tools-1.30.9.data → paasta_tools-1.35.8.data}/scripts/setup_istio_mesh.py +0 -0
  88. {paasta_tools-1.30.9.data → paasta_tools-1.35.8.data}/scripts/setup_kubernetes_cr.py +0 -0
  89. {paasta_tools-1.30.9.data → paasta_tools-1.35.8.data}/scripts/setup_kubernetes_crd.py +0 -0
  90. {paasta_tools-1.30.9.data → paasta_tools-1.35.8.data}/scripts/setup_kubernetes_internal_crd.py +0 -0
  91. {paasta_tools-1.30.9.data → paasta_tools-1.35.8.data}/scripts/setup_kubernetes_job.py +0 -0
  92. {paasta_tools-1.30.9.data → paasta_tools-1.35.8.data}/scripts/shared_ip_check.py +0 -0
  93. {paasta_tools-1.30.9.data → paasta_tools-1.35.8.data}/scripts/synapse_srv_namespaces_fact.py +0 -0
  94. {paasta_tools-1.30.9.data → paasta_tools-1.35.8.data}/scripts/timeouts_metrics_prom.py +0 -0
  95. {paasta_tools-1.30.9.dist-info → paasta_tools-1.35.8.dist-info}/WHEEL +0 -0
  96. {paasta_tools-1.30.9.dist-info → paasta_tools-1.35.8.dist-info}/entry_points.txt +0 -0
  97. {paasta_tools-1.30.9.dist-info → paasta_tools-1.35.8.dist-info}/licenses/LICENSE +0 -0
  98. {paasta_tools-1.30.9.dist-info → paasta_tools-1.35.8.dist-info}/top_level.txt +0 -0
@@ -22,6 +22,7 @@ from datetime import datetime
22
22
  from functools import lru_cache
23
23
  from functools import partial
24
24
  from glob import glob
25
+ from pathlib import Path
25
26
  from typing import Any
26
27
  from typing import Callable
27
28
  from typing import cast
@@ -34,6 +35,9 @@ from typing import Union
34
35
 
35
36
  import pytz
36
37
  from croniter import croniter
38
+ from environment_tools.type_utils import available_location_types
39
+ from environment_tools.type_utils import compare_types
40
+ from environment_tools.type_utils import convert_location_type
37
41
  from jsonschema import Draft4Validator
38
42
  from jsonschema import exceptions
39
43
  from jsonschema import FormatChecker
@@ -64,6 +68,7 @@ from paasta_tools.long_running_service_tools import METRICS_PROVIDER_PISCINA
64
68
  from paasta_tools.long_running_service_tools import METRICS_PROVIDER_PROMQL
65
69
  from paasta_tools.long_running_service_tools import METRICS_PROVIDER_UWSGI
66
70
  from paasta_tools.long_running_service_tools import METRICS_PROVIDER_UWSGI_V2
71
+ from paasta_tools.long_running_service_tools import METRICS_PROVIDER_WORKER_LOAD
67
72
  from paasta_tools.secret_tools import get_secret_name_from_ref
68
73
  from paasta_tools.secret_tools import is_secret_ref
69
74
  from paasta_tools.secret_tools import is_shared_secret
@@ -120,6 +125,7 @@ SCHEMA_TYPES = {
120
125
  "rollback", # automatic rollbacks during deployments
121
126
  "tron", # batch workloads
122
127
  "eks", # eks workloads
128
+ "smartstack", # mesh configs
123
129
  "autotuned_defaults/kubernetes",
124
130
  "autotuned_defaults/cassandracluster",
125
131
  }
@@ -163,9 +169,33 @@ INVALID_AUTOSCALING_FIELDS = {
163
169
  "desired_active_requests_per_replica",
164
170
  "prometheus-adapter-config",
165
171
  },
172
+ METRICS_PROVIDER_WORKER_LOAD: {
173
+ "desired_active_requests_per_replica",
174
+ "prometheus-adapter-config",
175
+ },
166
176
  METRICS_PROVIDER_PROMQL: {"desired_active_requests_per_replica"},
167
177
  }
168
178
 
179
+ # Listener names in Envoy cannot exceed 128 characters and use the
180
+ # following format:
181
+ # service_name.namespace.listener
182
+ # This naming scheme leaves 128 - 10 = 118 characters
183
+ # for the Smartstack service name and namespace.
184
+ # See COREBACK-6303 for more context.
185
+ MAX_ENVOY_NAME_LEN = 118
186
+
187
+ # Socket names cannot exceed 108 characters, and the longest socket
188
+ # name generated by HAProxy uses the following format:
189
+ # /var/run/synapse/sockets/service_name.namespace.LONGPID.sock.tmp
190
+ # This naming scheme leaves 108 - 43 = 65 characters combined for the
191
+ # Smartstack service name and namespace. We leave a generous buffer
192
+ # to arrive at a maximum name length of 55, in case e.g. the .sock
193
+ # suffix is renamed to a longer name for certain sockets.
194
+ # See SMTSTK-204 for more context.
195
+ # NOTE: the above is mostly still true - but the path we use is now /var/run/envoy/sockets/...
196
+ # so we may want to adjust this a tad in the future ;)
197
+ MAX_SMARTSTACK_NAME_LEN = 55
198
+
169
199
 
170
200
  class ConditionConfig(TypedDict, total=False):
171
201
  """
@@ -339,11 +369,9 @@ def get_config_file_dict(file_path: str, use_ruamel: bool = False) -> Dict[Any,
339
369
  # sets disk: 100 -> an instance uses that template and overwrites
340
370
  # it with disk: 1000)
341
371
  ruamel_loader.allow_duplicate_keys = True
342
- # we want to actually expand out all anchors so that we still get
343
- # comments from the original block
344
- ruamel_loader.Constructor.flatten_mapping = (
345
- SafeConstructor.flatten_mapping
346
- )
372
+ # Note: we do NOT use flatten_mapping here because it breaks simple
373
+ # override patterns. For nested merge patterns, we load a flattened
374
+ # version separately for comment detection only.
347
375
  return ruamel_loader.load(config_file)
348
376
  else:
349
377
  return yaml.safe_load(config_file)
@@ -430,6 +458,8 @@ def add_subparser(subparsers):
430
458
  "--service",
431
459
  required=False,
432
460
  help="Service that you want to validate. Like 'example_service'.",
461
+ # strip any potential trailing / for folks tab-completing directories
462
+ type=lambda x: x.rstrip("/"),
433
463
  ).completer = lazy_choices_completer(list_services)
434
464
  validate_parser.add_argument(
435
465
  "-v",
@@ -607,7 +637,22 @@ def validate_unique_instance_names(service_path):
607
637
  return check_passed
608
638
 
609
639
 
610
- def _get_comments_for_key(data: CommentedMap, key: Any) -> Optional[str]:
640
+ def _get_config_flattened(file_path: str) -> CommentedMap:
641
+ """Load config with flatten_mapping enabled (for nested merge pattern comment detection)"""
642
+ config_file = get_file_contents(file_path)
643
+ ruamel_loader = YAML(typ="rt")
644
+ ruamel_loader.allow_duplicate_keys = True
645
+ ruamel_loader.Constructor.flatten_mapping = SafeConstructor.flatten_mapping
646
+ return ruamel_loader.load(config_file)
647
+
648
+
649
+ def _get_comments_for_key(
650
+ data: CommentedMap,
651
+ key: Any,
652
+ full_config: Optional[Dict[Any, Any]] = None,
653
+ key_value: Any = None,
654
+ full_config_flattened: Optional[Dict[Any, Any]] = None,
655
+ ) -> Optional[str]:
611
656
  # this is a little weird, but ruamel is returning a list that looks like:
612
657
  # [None, None, CommentToken(...), None] for some reason instead of just a
613
658
  # single string
@@ -624,6 +669,31 @@ def _get_comments_for_key(data: CommentedMap, key: Any) -> Optional[str]:
624
669
 
625
670
  raw_comments = [*_flatten_comments(data.ca.items.get(key, []))]
626
671
  if not raw_comments:
672
+ # If we didn't find a comment in the instance itself, check if this key
673
+ # might be inherited from a template. Look for ANY other instance/template
674
+ # in the config that has the same key with the same value and a comment.
675
+ if full_config is not None and key_value is not None:
676
+ for config_key, config_value in full_config.items():
677
+ if isinstance(config_value, CommentedMap):
678
+ if config_value.get(key) == key_value:
679
+ other_comments = [
680
+ *_flatten_comments(config_value.ca.items.get(key, []))
681
+ ]
682
+ if other_comments:
683
+ return "".join(other_comments)
684
+
685
+ # If still not found and we have a flattened config, check there
686
+ # (flattened config is needed for nested merges)
687
+ if full_config_flattened is not None:
688
+ for config_key, config_value in full_config_flattened.items():
689
+ if isinstance(config_value, CommentedMap):
690
+ if config_value.get(key) == key_value:
691
+ flattened_comments = [
692
+ *_flatten_comments(config_value.ca.items.get(key, []))
693
+ ]
694
+ if flattened_comments:
695
+ return "".join(flattened_comments)
696
+
627
697
  # return None so that we don't return an empty string below if there really aren't
628
698
  # any comments
629
699
  return None
@@ -772,12 +842,13 @@ def validate_autoscaling_configs(service_path: str) -> bool:
772
842
 
773
843
  # we need access to the comments, so we need to read the config with ruamel to be able
774
844
  # to actually get them in a "nice" automated fashion
845
+ config_file_path = os.path.join(
846
+ soa_dir,
847
+ service,
848
+ f"{instance_config.get_instance_type()}-{cluster}.yaml",
849
+ )
775
850
  config = get_config_file_dict(
776
- os.path.join(
777
- soa_dir,
778
- service,
779
- f"{instance_config.get_instance_type()}-{cluster}.yaml",
780
- ),
851
+ config_file_path,
781
852
  use_ruamel=True,
782
853
  )
783
854
  if config[instance].get("cpus") is None:
@@ -792,8 +863,15 @@ def validate_autoscaling_configs(service_path: str) -> bool:
792
863
  # cpu autoscaled, but using autotuned values - can skip
793
864
  continue
794
865
 
866
+ # Load flattened config for comment detection (handles nested merges)
867
+ config_flattened = _get_config_flattened(config_file_path)
868
+
795
869
  cpu_comment = _get_comments_for_key(
796
- data=config[instance], key="cpus"
870
+ data=config[instance],
871
+ key="cpus",
872
+ full_config=config,
873
+ key_value=config[instance].get("cpus"),
874
+ full_config_flattened=config_flattened,
797
875
  )
798
876
  # we could probably have a separate error message if there's a comment that doesn't match
799
877
  # the ack pattern, but that seems like overkill - especially for something that could cause
@@ -935,12 +1013,13 @@ def validate_cpu_burst(service_path: str) -> bool:
935
1013
  if is_k8s_service and not should_skip_cpu_burst_validation:
936
1014
  # we need access to the comments, so we need to read the config with ruamel to be able
937
1015
  # to actually get them in a "nice" automated fashion
1016
+ config_file_path = os.path.join(
1017
+ soa_dir,
1018
+ service,
1019
+ f"{instance_config.get_instance_type()}-{cluster}.yaml",
1020
+ )
938
1021
  config = get_config_file_dict(
939
- os.path.join(
940
- soa_dir,
941
- service,
942
- f"{instance_config.get_instance_type()}-{cluster}.yaml",
943
- ),
1022
+ config_file_path,
944
1023
  use_ruamel=True,
945
1024
  )
946
1025
 
@@ -951,8 +1030,15 @@ def validate_cpu_burst(service_path: str) -> bool:
951
1030
  # under the threshold - can also skip
952
1031
  continue
953
1032
 
1033
+ # Load flattened config for comment detection (handles nested merges)
1034
+ config_flattened = _get_config_flattened(config_file_path)
1035
+
954
1036
  burst_comment = _get_comments_for_key(
955
- data=config[instance], key="cpu_burst_add"
1037
+ data=config[instance],
1038
+ key="cpu_burst_add",
1039
+ full_config=config,
1040
+ key_value=config[instance].get("cpu_burst_add"),
1041
+ full_config_flattened=config_flattened,
956
1042
  )
957
1043
  # we could probably have a separate error message if there's a comment that doesn't match
958
1044
  # the ack pattern, but that seems like overkill - especially for something that could cause
@@ -978,6 +1064,191 @@ def validate_cpu_burst(service_path: str) -> bool:
978
1064
  return returncode
979
1065
 
980
1066
 
1067
+ def _check_smartstack_name_length_envoy(service: str, namespace: str) -> None:
1068
+ """Ensures that Smartstack service name and namespace does not
1069
+ exceed the limit on the length of Envoy's listener names
1070
+ """
1071
+ if len(service) + len(namespace) > MAX_ENVOY_NAME_LEN:
1072
+ raise ValueError(
1073
+ "Service name and namespace exceeds max listener name length in Envoy. Note that the full listener name "
1074
+ 'is "{}.{}.listener". Please rename so that the combined length of the service name and namespace does '
1075
+ "not exceed {} characters".format(service, namespace, MAX_ENVOY_NAME_LEN),
1076
+ )
1077
+
1078
+
1079
+ def _check_smartstack_name_length(service: str, namespace: str) -> None:
1080
+ """Ensure that Smartstack name does not
1081
+ exceed limits on HAProxy socket name
1082
+ """
1083
+ if len(service + namespace) > MAX_SMARTSTACK_NAME_LEN:
1084
+ socket_name = "/var/run/synapse/sockets/{}.{}.sock.LONGPID.tmp".format(
1085
+ service,
1086
+ namespace,
1087
+ )
1088
+ raise ValueError(
1089
+ "Name exceeds max socket name length. Note that the full socket name under the HAProxy naming scheme "
1090
+ 'is "{}". Please rename so that the combined length of the service name and namespace does not '
1091
+ "exceed {} characters".format(socket_name, MAX_SMARTSTACK_NAME_LEN),
1092
+ )
1093
+
1094
+
1095
+ @lru_cache()
1096
+ def _get_etc_services() -> list[str]:
1097
+ with open("/etc/services") as f:
1098
+ return f.read().splitlines()
1099
+
1100
+
1101
+ @lru_cache()
1102
+ def _get_etc_services_entry(port_lookup: int) -> str | None:
1103
+ entries = _get_etc_services()
1104
+ for entry in entries:
1105
+ try:
1106
+ service = entry.split()[0]
1107
+ port = entry.split()[1]
1108
+ if port.startswith("%s/" % str(port_lookup)):
1109
+ return service
1110
+ except IndexError:
1111
+ continue
1112
+ return None
1113
+
1114
+
1115
+ def _check_proxy_port_in_use(service: str, namespace: str, port: int) -> bool:
1116
+ if port is None:
1117
+ return False
1118
+
1119
+ # TODO(luisp): this should probably check the distributed /nail/etc/services
1120
+ # smartstack.yamls OR we should more automatically manage /etc/services
1121
+ etc_services_entry = _get_etc_services_entry(port)
1122
+ if etc_services_entry is None:
1123
+ return False
1124
+ elif f"{service}.{namespace}" == etc_services_entry:
1125
+ return False
1126
+ else:
1127
+ raise ValueError(
1128
+ (
1129
+ "port {} is already in use by {} according to /etc/services, it cannot be used by "
1130
+ "{}.{}. Please either pick a different port or update /etc/services via puppet"
1131
+ ).format(port, etc_services_entry, service, namespace),
1132
+ )
1133
+
1134
+
1135
+ def _check_advertise_discover(
1136
+ smartstack_data: dict[str, Any]
1137
+ ) -> None: # XXX: we should use a TypedDict here
1138
+ """Need to ensure a few properties about smartstack files
1139
+ 1) discover is a member of advertise
1140
+ 2) discovery and advertise contain valid locations
1141
+ 3) extra_advertise contains valid locations
1142
+ 4) rhs of extra_advertise are >= discover type
1143
+ """
1144
+
1145
+ def assert_valid_type(location_type: str) -> None:
1146
+ if location_type not in available_location_types():
1147
+ raise ValueError(
1148
+ 'Location type "{}" not a valid Yelp location type'.format(
1149
+ location_type,
1150
+ ),
1151
+ )
1152
+
1153
+ def assert_valid_location(location_string: str) -> None:
1154
+ try:
1155
+ typ, loc = location_string.split(":")
1156
+ assert len(convert_location_type(loc, typ, typ)) == 1
1157
+ except Exception:
1158
+ raise ValueError(
1159
+ 'Location string "{}" not a valid Yelp location'.format(
1160
+ location_string,
1161
+ ),
1162
+ )
1163
+
1164
+ advertise = smartstack_data.get("advertise", ["region"])
1165
+ discover = smartstack_data.get("discover", "region")
1166
+ if discover not in advertise:
1167
+ raise ValueError(
1168
+ 'discover key "{}" not a member of advertise "{}"'.format(
1169
+ discover,
1170
+ advertise,
1171
+ ),
1172
+ )
1173
+ for location_type in [discover] + advertise:
1174
+ assert_valid_type(location_type)
1175
+
1176
+ extra_advertisements = smartstack_data.get("extra_advertise", {})
1177
+ for source, destinations in extra_advertisements.items():
1178
+ assert_valid_location(source)
1179
+ for destination in destinations:
1180
+ assert_valid_location(destination)
1181
+ dest_type = destination.split(":")[0]
1182
+ if compare_types(dest_type, discover) > 0:
1183
+ raise ValueError(
1184
+ 'Right hand side "{}" less general than discover type "{}". Your advertisement would potentially '
1185
+ "result in more hosts seeing your service than intended. Please change the type of your RHS to be "
1186
+ ">= the discover type".format(destination, discover),
1187
+ )
1188
+
1189
+
1190
+ def _check_smartstack_valid_proxy(proxied_through: str, soa_dir: str) -> None:
1191
+ """Checks whether its parameter is a valid Smartstack namespace. Can be used for proxied_through or clb_proxy."""
1192
+ proxy_service, proxy_namespace = proxied_through.split(".")
1193
+ proxy_smartstack_filename = Path(soa_dir) / proxy_service / "smartstack.yaml"
1194
+ try:
1195
+ yaml_data = get_config_file_dict(proxy_smartstack_filename)
1196
+ if proxy_namespace not in yaml_data:
1197
+ raise ValueError(
1198
+ f"{proxied_through} is not a valid Smartstack namespace to proxy through: "
1199
+ f"{proxy_namespace} not found in {proxy_smartstack_filename}.",
1200
+ )
1201
+ except FileNotFoundError:
1202
+ raise ValueError(
1203
+ f"{proxied_through} is not a valid Smartstack namespace to proxy through: "
1204
+ f"{proxy_smartstack_filename} does not exist.",
1205
+ )
1206
+
1207
+
1208
+ def _check_smartstack_proxied_through(
1209
+ smartstack_data: dict[str, Any],
1210
+ soa_dir: str,
1211
+ ) -> None: # XXX: we should use a TypedDict here
1212
+ """Checks the proxied_through field of a Smartstack namespace refers to another valid Smartstack namespace"""
1213
+ if "proxied_through" not in smartstack_data:
1214
+ return
1215
+
1216
+ proxied_through = smartstack_data["proxied_through"]
1217
+ _check_smartstack_valid_proxy(proxied_through, soa_dir)
1218
+
1219
+
1220
+ def validate_smartstack(service_path: str) -> bool:
1221
+ if not os.path.exists(os.path.join(service_path, "smartstack.yaml")):
1222
+ # not every service is mesh-registered, exit early if this is the case
1223
+ return True
1224
+
1225
+ config = get_config_file_dict(os.path.join(service_path, "smartstack.yaml"))
1226
+ if not config:
1227
+ print(
1228
+ failure(
1229
+ "smartstack.yaml is empty - if this service is not mesh-registered, please remove this file.",
1230
+ "http://paasta.readthedocs.io/en/latest/yelpsoa_configs.html",
1231
+ )
1232
+ )
1233
+ return False
1234
+
1235
+ soa_dir, service = path_to_soa_dir_service(service_path)
1236
+ for namespace, namespace_config in config.items():
1237
+ # XXX(luisp): these should all really either return bools or be enforced in the schema
1238
+ # ...i'm mostly leaving these as-is since i'm trying to remove some internal code that
1239
+ # duplicates a bunch of paasta validate checks (i.e., py-gitolite)
1240
+ _check_smartstack_name_length_envoy(service, namespace)
1241
+ if namespace_config["proxy_port"]:
1242
+ _check_smartstack_name_length(service, namespace)
1243
+ proxy_port = namespace_config["proxy_port"]
1244
+ _check_proxy_port_in_use(service, namespace, proxy_port)
1245
+ _check_advertise_discover(namespace_config)
1246
+ _check_smartstack_proxied_through(namespace_config, soa_dir)
1247
+
1248
+ print(success("All SmartStack configs are valid"))
1249
+ return True
1250
+
1251
+
981
1252
  def paasta_validate_soa_configs(
982
1253
  service: str, service_path: str, verbose: bool = False
983
1254
  ) -> bool:
@@ -1000,6 +1271,7 @@ def paasta_validate_soa_configs(
1000
1271
  validate_secrets,
1001
1272
  validate_min_max_instances,
1002
1273
  validate_cpu_burst,
1274
+ validate_smartstack,
1003
1275
  ]
1004
1276
 
1005
1277
  # NOTE: we're explicitly passing a list comprehension to all()
@@ -1013,8 +1285,13 @@ def paasta_validate(args):
1013
1285
 
1014
1286
  :param args: argparse.Namespace obj created from sys.args by cli
1015
1287
  """
1016
- service_path = get_service_path(args.service, args.yelpsoa_config_root)
1017
1288
  service = args.service or guess_service_name()
1289
+ service_path = get_service_path(service, args.yelpsoa_config_root)
1290
+
1291
+ # not much we can do if we have no path to inspect ;)
1292
+ if not service_path:
1293
+ return 1
1294
+
1018
1295
  if not paasta_validate_soa_configs(service, service_path, args.verbose):
1019
1296
  print("Invalid configs found. Please try again.")
1020
1297
  return 1
@@ -102,6 +102,8 @@ def add_subparser(subparsers):
102
102
  help="Name of the service which you wish to wait for deployment. "
103
103
  'Leading "services-" will be stripped.',
104
104
  required=True,
105
+ # strip any potential trailing / for folks tab-completing directories
106
+ type=lambda x: x.rstrip("/"),
105
107
  ).completer = lazy_choices_completer(list_services)
106
108
  list_parser.add_argument(
107
109
  "-t",
@@ -10,8 +10,9 @@
10
10
  "cpu",
11
11
  "piscina",
12
12
  "gunicorn",
13
- "arbitrary_promql",
14
- "active-requests"
13
+ "active-requests",
14
+ "arbitrary-promql",
15
+ "worker-load"
15
16
  ]
16
17
  },
17
18
  "decision_policy": {
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "$schema": "http://json-schema.org/draft-04/schema#",
3
- "description": "http://paasta.readthedocs.io/en/latest/yelpsoa_configs.html#kubernetes-clustername-yaml",
3
+ "description": "http://paasta.readthedocs.io/en/latest/yelpsoa_configs.html#eks-clustername-yaml",
4
4
  "type": "object",
5
5
  "additionalProperties": false,
6
6
  "minProperties": 1,
@@ -955,6 +955,28 @@
955
955
  },
956
956
  "max_skew": {
957
957
  "type": "integer"
958
+ },
959
+ "match_label_keys": {
960
+ "type": "array",
961
+ "items": {
962
+ "allOf": [
963
+ {
964
+ "type": "string",
965
+ "pattern": "^([a-zA-Z0-9]([-a-zA-Z0-9_.]*[a-zA-Z0-9])?/)?[a-zA-Z0-9]([-a-zA-Z0-9_.]*[a-zA-Z0-9])?$",
966
+ "maxLength": 63
967
+ },
968
+ {
969
+ "not": {
970
+ "enum": [
971
+ "paasta.yelp.com/service",
972
+ "paasta.yelp.com/instance"
973
+ ]
974
+ }
975
+ }
976
+ ],
977
+ "$comment": "Valid Kubernetes label key: optional prefix (DNS subdomain) followed by '/' and name segment (max 63 chars each)"
978
+ },
979
+ "uniqueItems": true
958
980
  }
959
981
  },
960
982
  "required": []
@@ -62,6 +62,11 @@
62
62
  "minimum": 0,
63
63
  "maximum": 60000
64
64
  },
65
+ "timeout_client_ms": {
66
+ "type": "integer",
67
+ "minimum": 0,
68
+ "maximum": 86400000
69
+ },
65
70
  "timeout_server_ms": {
66
71
  "type": "integer",
67
72
  "minimum": 0,
@@ -132,6 +137,10 @@
132
137
  "CLUSTER_PROVIDED"
133
138
  ]
134
139
  },
140
+ "choice_count": {
141
+ "type": "integer",
142
+ "minimum": 1
143
+ },
135
144
  "chaos": {
136
145
  "type": "object",
137
146
  "additionalProperties": {
@@ -156,6 +165,9 @@
156
165
  "proxied_through": {
157
166
  "type": "string"
158
167
  },
168
+ "clb_proxy": {
169
+ "type": "string"
170
+ },
159
171
  "fixed_delay": {
160
172
  "type": "object",
161
173
  "additionalProperties": {
paasta_tools/cli/utils.py CHANGED
@@ -577,7 +577,8 @@ def lazy_choices_completer(list_func):
577
577
 
578
578
  def figure_out_service_name(args, soa_dir=DEFAULT_SOA_DIR):
579
579
  """Figures out and validates the input service name"""
580
- service = args.service or guess_service_name()
580
+ # most cmds should be doing this rstrip already - but just in case this is called from some other path...
581
+ service = args.service.rstrip("/") if args.service else guess_service_name()
581
582
  try:
582
583
  validate_service_name(service, soa_dir=soa_dir)
583
584
  except NoSuchService as service_not_found:
@@ -181,6 +181,9 @@ def get_report_from_splunk(creds, app, filename, criteria_filter):
181
181
  "date": d["result"]["_time"].split(" ")[0],
182
182
  "instance": criteria.split(" ")[2],
183
183
  "money": d["result"].get("estimated_monthly_savings", 0),
184
+ "old_cpus": d["result"].get("current_cpus"),
185
+ "old_disk": d["result"].get("current_disk"),
186
+ "old_mem": d["result"].get("current_mem"),
184
187
  "owner": d["result"].get("service_owner", "Unavailable"),
185
188
  "project": d["result"].get("project", "Unavailable"),
186
189
  "service": criteria.split(" ")[0],
@@ -192,24 +195,17 @@ def get_report_from_splunk(creds, app, filename, criteria_filter):
192
195
  "max_instances": d["result"].get("suggested_max_instances"),
193
196
  "mem": d["result"].get("suggested_mem"),
194
197
  "min_instances": d["result"].get("suggested_min_instances"),
195
- "old_cpus": d["result"].get("current_cpus"),
196
- "old_disk": d["result"].get("current_disk"),
197
- "old_mem": d["result"].get("current_mem"),
198
198
  }
199
199
 
200
200
  # the report we get is all strings, so we need to convert them to the right types
201
201
  field_conversions = {
202
- "current_cpus": float,
203
- "suggested_cpu_burst_add": float,
204
- "suggested_cpus": float,
205
- "suggested_disk": int,
206
- "suggested_hacheck_cpus": float,
207
- "suggested_max_instances": int,
208
- "suggested_mem": int,
209
- "suggested_min_instances": int,
210
- # not quite sure why these are floats...they're ints in soaconfigs
211
- "current_disk": _force_str_to_int,
212
- "current_mem": _force_str_to_int,
202
+ "cpus": float,
203
+ "cpu_burst_add": float,
204
+ "disk": int,
205
+ "hacheck_cpus": float,
206
+ "max_instances": int,
207
+ "mem": int,
208
+ "min_instances": int,
213
209
  }
214
210
 
215
211
  # merge results if we've already seen rows for this service
@@ -101,6 +101,8 @@ def parse_args() -> argparse.Namespace:
101
101
  "--service",
102
102
  required=True,
103
103
  help="Service name to make the deployments.json for",
104
+ # strip any potential trailing / for folks tab-completing directories
105
+ type=lambda x: x.rstrip("/"),
104
106
  )
105
107
  args = parser.parse_args()
106
108
  return args
@@ -1,8 +1,6 @@
1
1
  from typing import Optional
2
2
 
3
- from kubernetes.client.models.v2beta2_object_metric_status import (
4
- V2beta2ObjectMetricStatus,
5
- )
3
+ from kubernetes.client import V2ObjectMetricStatus
6
4
  from mypy_extensions import TypedDict
7
5
 
8
6
 
@@ -102,7 +100,7 @@ class HPAMetricsParser:
102
100
  )
103
101
 
104
102
  def parse_object_metric(
105
- self, metric_spec: V2beta2ObjectMetricStatus, status: HPAMetricsDict
103
+ self, metric_spec: V2ObjectMetricStatus, status: HPAMetricsDict
106
104
  ) -> None:
107
105
  status["name"] = metric_spec.metric.name
108
106
  status["target_value"] = (
@@ -112,7 +110,7 @@ class HPAMetricsParser:
112
110
  )
113
111
 
114
112
  def parse_object_metric_current(
115
- self, metric_spec: V2beta2ObjectMetricStatus, status: HPAMetricsDict
113
+ self, metric_spec: V2ObjectMetricStatus, status: HPAMetricsDict
116
114
  ) -> None:
117
115
  status["name"] = metric_spec.metric.name
118
116
  status["current_value"] = (