paasta-tools 1.27.0__py3-none-any.whl → 1.35.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of paasta-tools might be problematic. Click here for more details.
- paasta_tools/__init__.py +1 -1
- paasta_tools/api/api_docs/swagger.json +9 -1
- paasta_tools/api/tweens/auth.py +2 -1
- paasta_tools/api/views/instance.py +9 -2
- paasta_tools/api/views/remote_run.py +2 -0
- paasta_tools/async_utils.py +4 -1
- paasta_tools/bounce_lib.py +8 -5
- paasta_tools/check_services_replication_tools.py +10 -4
- paasta_tools/check_spark_jobs.py +1 -1
- paasta_tools/cli/cli.py +4 -4
- paasta_tools/cli/cmds/autoscale.py +2 -0
- paasta_tools/cli/cmds/check.py +2 -0
- paasta_tools/cli/cmds/cook_image.py +2 -0
- paasta_tools/cli/cmds/get_docker_image.py +2 -0
- paasta_tools/cli/cmds/get_image_version.py +2 -0
- paasta_tools/cli/cmds/get_latest_deployment.py +2 -0
- paasta_tools/cli/cmds/info.py +10 -3
- paasta_tools/cli/cmds/itest.py +2 -0
- paasta_tools/cli/cmds/list_namespaces.py +2 -0
- paasta_tools/cli/cmds/local_run.py +122 -27
- paasta_tools/cli/cmds/logs.py +31 -7
- paasta_tools/cli/cmds/mark_for_deployment.py +14 -4
- paasta_tools/cli/cmds/mesh_status.py +3 -2
- paasta_tools/cli/cmds/push_to_registry.py +2 -0
- paasta_tools/cli/cmds/remote_run.py +156 -12
- paasta_tools/cli/cmds/rollback.py +6 -2
- paasta_tools/cli/cmds/secret.py +4 -2
- paasta_tools/cli/cmds/security_check.py +2 -0
- paasta_tools/cli/cmds/spark_run.py +7 -3
- paasta_tools/cli/cmds/status.py +59 -29
- paasta_tools/cli/cmds/validate.py +325 -40
- paasta_tools/cli/cmds/wait_for_deployment.py +2 -0
- paasta_tools/cli/schemas/adhoc_schema.json +3 -0
- paasta_tools/cli/schemas/autoscaling_schema.json +3 -2
- paasta_tools/cli/schemas/eks_schema.json +24 -1
- paasta_tools/cli/schemas/kubernetes_schema.json +1 -0
- paasta_tools/cli/schemas/smartstack_schema.json +14 -0
- paasta_tools/cli/utils.py +34 -20
- paasta_tools/contrib/bounce_log_latency_parser.py +1 -1
- paasta_tools/contrib/check_orphans.py +1 -1
- paasta_tools/contrib/get_running_task_allocation.py +1 -1
- paasta_tools/contrib/ide_helper.py +14 -14
- paasta_tools/contrib/mock_patch_checker.py +1 -1
- paasta_tools/contrib/paasta_update_soa_memcpu.py +10 -14
- paasta_tools/contrib/render_template.py +1 -1
- paasta_tools/contrib/shared_ip_check.py +1 -1
- paasta_tools/generate_deployments_for_service.py +2 -0
- paasta_tools/instance/hpa_metrics_parser.py +3 -5
- paasta_tools/instance/kubernetes.py +70 -36
- paasta_tools/kubernetes/application/controller_wrappers.py +23 -2
- paasta_tools/kubernetes/remote_run.py +52 -25
- paasta_tools/kubernetes_tools.py +60 -69
- paasta_tools/long_running_service_tools.py +15 -5
- paasta_tools/mesos/master.py +1 -1
- paasta_tools/metrics/metastatus_lib.py +1 -25
- paasta_tools/metrics/metrics_lib.py +12 -3
- paasta_tools/paastaapi/__init__.py +1 -1
- paasta_tools/paastaapi/api/autoscaler_api.py +1 -1
- paasta_tools/paastaapi/api/default_api.py +1 -1
- paasta_tools/paastaapi/api/remote_run_api.py +1 -1
- paasta_tools/paastaapi/api/resources_api.py +1 -1
- paasta_tools/paastaapi/api/service_api.py +1 -1
- paasta_tools/paastaapi/api_client.py +1 -1
- paasta_tools/paastaapi/configuration.py +2 -2
- paasta_tools/paastaapi/exceptions.py +1 -1
- paasta_tools/paastaapi/model/adhoc_launch_history.py +1 -1
- paasta_tools/paastaapi/model/autoscaler_count_msg.py +1 -1
- paasta_tools/paastaapi/model/autoscaling_override.py +1 -1
- paasta_tools/paastaapi/model/deploy_queue.py +1 -1
- paasta_tools/paastaapi/model/deploy_queue_service_instance.py +1 -1
- paasta_tools/paastaapi/model/envoy_backend.py +1 -1
- paasta_tools/paastaapi/model/envoy_location.py +1 -1
- paasta_tools/paastaapi/model/envoy_status.py +1 -1
- paasta_tools/paastaapi/model/flink_cluster_overview.py +1 -1
- paasta_tools/paastaapi/model/flink_config.py +1 -1
- paasta_tools/paastaapi/model/flink_job.py +1 -1
- paasta_tools/paastaapi/model/flink_job_details.py +1 -1
- paasta_tools/paastaapi/model/flink_jobs.py +1 -1
- paasta_tools/paastaapi/model/float_and_error.py +1 -1
- paasta_tools/paastaapi/model/hpa_metric.py +1 -1
- paasta_tools/paastaapi/model/inline_object.py +1 -1
- paasta_tools/paastaapi/model/inline_response200.py +1 -1
- paasta_tools/paastaapi/model/inline_response2001.py +1 -1
- paasta_tools/paastaapi/model/inline_response202.py +1 -1
- paasta_tools/paastaapi/model/inline_response403.py +1 -1
- paasta_tools/paastaapi/model/instance_bounce_status.py +1 -1
- paasta_tools/paastaapi/model/instance_mesh_status.py +1 -1
- paasta_tools/paastaapi/model/instance_status.py +1 -1
- paasta_tools/paastaapi/model/instance_status_adhoc.py +1 -1
- paasta_tools/paastaapi/model/instance_status_cassandracluster.py +1 -1
- paasta_tools/paastaapi/model/instance_status_flink.py +1 -1
- paasta_tools/paastaapi/model/instance_status_kafkacluster.py +1 -1
- paasta_tools/paastaapi/model/instance_status_kubernetes.py +1 -1
- paasta_tools/paastaapi/model/instance_status_kubernetes_autoscaling_status.py +1 -1
- paasta_tools/paastaapi/model/instance_status_kubernetes_v2.py +1 -1
- paasta_tools/paastaapi/model/instance_status_tron.py +1 -1
- paasta_tools/paastaapi/model/instance_tasks.py +1 -1
- paasta_tools/paastaapi/model/integer_and_error.py +1 -1
- paasta_tools/paastaapi/model/kubernetes_container.py +1 -1
- paasta_tools/paastaapi/model/kubernetes_container_v2.py +1 -1
- paasta_tools/paastaapi/model/kubernetes_healthcheck.py +1 -1
- paasta_tools/paastaapi/model/kubernetes_pod.py +1 -1
- paasta_tools/paastaapi/model/kubernetes_pod_event.py +1 -1
- paasta_tools/paastaapi/model/kubernetes_pod_v2.py +1 -1
- paasta_tools/paastaapi/model/kubernetes_replica_set.py +1 -1
- paasta_tools/paastaapi/model/kubernetes_version.py +4 -1
- paasta_tools/paastaapi/model/remote_run_outcome.py +1 -1
- paasta_tools/paastaapi/model/remote_run_start.py +4 -1
- paasta_tools/paastaapi/model/remote_run_stop.py +1 -1
- paasta_tools/paastaapi/model/remote_run_token.py +1 -1
- paasta_tools/paastaapi/model/resource.py +1 -1
- paasta_tools/paastaapi/model/resource_item.py +1 -1
- paasta_tools/paastaapi/model/resource_value.py +1 -1
- paasta_tools/paastaapi/model/smartstack_backend.py +1 -1
- paasta_tools/paastaapi/model/smartstack_location.py +1 -1
- paasta_tools/paastaapi/model/smartstack_status.py +1 -1
- paasta_tools/paastaapi/model/task_tail_lines.py +1 -1
- paasta_tools/paastaapi/model_utils.py +1 -1
- paasta_tools/paastaapi/rest.py +1 -1
- paasta_tools/remote_git.py +2 -2
- paasta_tools/run-paasta-api-in-dev-mode.py +2 -2
- paasta_tools/run-paasta-api-playground.py +2 -2
- paasta_tools/setup_kubernetes_job.py +43 -1
- paasta_tools/setup_prometheus_adapter_config.py +82 -0
- paasta_tools/setup_tron_namespace.py +2 -2
- paasta_tools/tron_tools.py +4 -1
- paasta_tools/utils.py +29 -11
- paasta_tools/yaml_tools.py +1 -1
- {paasta_tools-1.27.0.data → paasta_tools-1.35.8.data}/scripts/check_orphans.py +1 -1
- {paasta_tools-1.27.0.data → paasta_tools-1.35.8.data}/scripts/check_spark_jobs.py +1 -1
- {paasta_tools-1.27.0.data → paasta_tools-1.35.8.data}/scripts/generate_deployments_for_service.py +2 -0
- {paasta_tools-1.27.0.data → paasta_tools-1.35.8.data}/scripts/get_running_task_allocation.py +1 -1
- {paasta_tools-1.27.0.data → paasta_tools-1.35.8.data}/scripts/ide_helper.py +14 -14
- {paasta_tools-1.27.0.data → paasta_tools-1.35.8.data}/scripts/paasta_update_soa_memcpu.py +10 -14
- {paasta_tools-1.27.0.data → paasta_tools-1.35.8.data}/scripts/setup_kubernetes_job.py +43 -1
- {paasta_tools-1.27.0.data → paasta_tools-1.35.8.data}/scripts/setup_prometheus_adapter_config.py +82 -0
- paasta_tools-1.35.8.dist-info/METADATA +79 -0
- {paasta_tools-1.27.0.dist-info → paasta_tools-1.35.8.dist-info}/RECORD +186 -191
- {paasta_tools-1.27.0.dist-info → paasta_tools-1.35.8.dist-info}/WHEEL +1 -1
- paasta_tools/frameworks/adhoc_scheduler.py +0 -71
- paasta_tools/frameworks/native_scheduler.py +0 -652
- paasta_tools/frameworks/task_store.py +0 -245
- paasta_tools/mesos_maintenance.py +0 -848
- paasta_tools/paasta_native_serviceinit.py +0 -21
- paasta_tools-1.27.0.dist-info/METADATA +0 -75
- {paasta_tools-1.27.0.data → paasta_tools-1.35.8.data}/scripts/apply_external_resources.py +0 -0
- {paasta_tools-1.27.0.data → paasta_tools-1.35.8.data}/scripts/bounce_log_latency_parser.py +0 -0
- {paasta_tools-1.27.0.data → paasta_tools-1.35.8.data}/scripts/check_autoscaler_max_instances.py +0 -0
- {paasta_tools-1.27.0.data → paasta_tools-1.35.8.data}/scripts/check_cassandracluster_services_replication.py +0 -0
- {paasta_tools-1.27.0.data → paasta_tools-1.35.8.data}/scripts/check_flink_services_health.py +0 -0
- {paasta_tools-1.27.0.data → paasta_tools-1.35.8.data}/scripts/check_kubernetes_api.py +0 -0
- {paasta_tools-1.27.0.data → paasta_tools-1.35.8.data}/scripts/check_kubernetes_services_replication.py +0 -0
- {paasta_tools-1.27.0.data → paasta_tools-1.35.8.data}/scripts/check_manual_oapi_changes.sh +0 -0
- {paasta_tools-1.27.0.data → paasta_tools-1.35.8.data}/scripts/check_oom_events.py +0 -0
- {paasta_tools-1.27.0.data → paasta_tools-1.35.8.data}/scripts/cleanup_kubernetes_cr.py +0 -0
- {paasta_tools-1.27.0.data → paasta_tools-1.35.8.data}/scripts/cleanup_kubernetes_crd.py +0 -0
- {paasta_tools-1.27.0.data → paasta_tools-1.35.8.data}/scripts/cleanup_kubernetes_jobs.py +0 -0
- {paasta_tools-1.27.0.data → paasta_tools-1.35.8.data}/scripts/create_dynamodb_table.py +0 -0
- {paasta_tools-1.27.0.data → paasta_tools-1.35.8.data}/scripts/create_paasta_playground.py +0 -0
- {paasta_tools-1.27.0.data → paasta_tools-1.35.8.data}/scripts/delete_kubernetes_deployments.py +0 -0
- {paasta_tools-1.27.0.data → paasta_tools-1.35.8.data}/scripts/emit_allocated_cpu_metrics.py +0 -0
- {paasta_tools-1.27.0.data → paasta_tools-1.35.8.data}/scripts/generate_all_deployments +0 -0
- {paasta_tools-1.27.0.data → paasta_tools-1.35.8.data}/scripts/generate_authenticating_services.py +0 -0
- {paasta_tools-1.27.0.data → paasta_tools-1.35.8.data}/scripts/generate_services_file.py +0 -0
- {paasta_tools-1.27.0.data → paasta_tools-1.35.8.data}/scripts/generate_services_yaml.py +0 -0
- {paasta_tools-1.27.0.data → paasta_tools-1.35.8.data}/scripts/habitat_fixer.py +0 -0
- {paasta_tools-1.27.0.data → paasta_tools-1.35.8.data}/scripts/is_pod_healthy_in_proxy.py +0 -0
- {paasta_tools-1.27.0.data → paasta_tools-1.35.8.data}/scripts/is_pod_healthy_in_smartstack.py +0 -0
- {paasta_tools-1.27.0.data → paasta_tools-1.35.8.data}/scripts/kill_bad_containers.py +0 -0
- {paasta_tools-1.27.0.data → paasta_tools-1.35.8.data}/scripts/kubernetes_remove_evicted_pods.py +0 -0
- {paasta_tools-1.27.0.data → paasta_tools-1.35.8.data}/scripts/mass-deploy-tag.sh +0 -0
- {paasta_tools-1.27.0.data → paasta_tools-1.35.8.data}/scripts/mock_patch_checker.py +0 -0
- {paasta_tools-1.27.0.data → paasta_tools-1.35.8.data}/scripts/paasta_cleanup_remote_run_resources.py +0 -0
- {paasta_tools-1.27.0.data → paasta_tools-1.35.8.data}/scripts/paasta_cleanup_stale_nodes.py +0 -0
- {paasta_tools-1.27.0.data → paasta_tools-1.35.8.data}/scripts/paasta_deploy_tron_jobs +0 -0
- {paasta_tools-1.27.0.data → paasta_tools-1.35.8.data}/scripts/paasta_execute_docker_command.py +0 -0
- {paasta_tools-1.27.0.data → paasta_tools-1.35.8.data}/scripts/paasta_secrets_sync.py +0 -0
- {paasta_tools-1.27.0.data → paasta_tools-1.35.8.data}/scripts/paasta_tabcomplete.sh +0 -0
- {paasta_tools-1.27.0.data → paasta_tools-1.35.8.data}/scripts/render_template.py +0 -0
- {paasta_tools-1.27.0.data → paasta_tools-1.35.8.data}/scripts/rightsizer_soaconfigs_update.py +0 -0
- {paasta_tools-1.27.0.data → paasta_tools-1.35.8.data}/scripts/service_shard_remove.py +0 -0
- {paasta_tools-1.27.0.data → paasta_tools-1.35.8.data}/scripts/service_shard_update.py +0 -0
- {paasta_tools-1.27.0.data → paasta_tools-1.35.8.data}/scripts/setup_istio_mesh.py +0 -0
- {paasta_tools-1.27.0.data → paasta_tools-1.35.8.data}/scripts/setup_kubernetes_cr.py +0 -0
- {paasta_tools-1.27.0.data → paasta_tools-1.35.8.data}/scripts/setup_kubernetes_crd.py +0 -0
- {paasta_tools-1.27.0.data → paasta_tools-1.35.8.data}/scripts/setup_kubernetes_internal_crd.py +0 -0
- {paasta_tools-1.27.0.data → paasta_tools-1.35.8.data}/scripts/shared_ip_check.py +0 -0
- {paasta_tools-1.27.0.data → paasta_tools-1.35.8.data}/scripts/synapse_srv_namespaces_fact.py +0 -0
- {paasta_tools-1.27.0.data → paasta_tools-1.35.8.data}/scripts/timeouts_metrics_prom.py +0 -0
- {paasta_tools-1.27.0.dist-info → paasta_tools-1.35.8.dist-info}/entry_points.txt +0 -0
- {paasta_tools-1.27.0.dist-info → paasta_tools-1.35.8.dist-info/licenses}/LICENSE +0 -0
- {paasta_tools-1.27.0.dist-info → paasta_tools-1.35.8.dist-info}/top_level.txt +0 -0
|
@@ -22,6 +22,7 @@ from datetime import datetime
|
|
|
22
22
|
from functools import lru_cache
|
|
23
23
|
from functools import partial
|
|
24
24
|
from glob import glob
|
|
25
|
+
from pathlib import Path
|
|
25
26
|
from typing import Any
|
|
26
27
|
from typing import Callable
|
|
27
28
|
from typing import cast
|
|
@@ -34,6 +35,9 @@ from typing import Union
|
|
|
34
35
|
|
|
35
36
|
import pytz
|
|
36
37
|
from croniter import croniter
|
|
38
|
+
from environment_tools.type_utils import available_location_types
|
|
39
|
+
from environment_tools.type_utils import compare_types
|
|
40
|
+
from environment_tools.type_utils import convert_location_type
|
|
37
41
|
from jsonschema import Draft4Validator
|
|
38
42
|
from jsonschema import exceptions
|
|
39
43
|
from jsonschema import FormatChecker
|
|
@@ -64,6 +68,7 @@ from paasta_tools.long_running_service_tools import METRICS_PROVIDER_PISCINA
|
|
|
64
68
|
from paasta_tools.long_running_service_tools import METRICS_PROVIDER_PROMQL
|
|
65
69
|
from paasta_tools.long_running_service_tools import METRICS_PROVIDER_UWSGI
|
|
66
70
|
from paasta_tools.long_running_service_tools import METRICS_PROVIDER_UWSGI_V2
|
|
71
|
+
from paasta_tools.long_running_service_tools import METRICS_PROVIDER_WORKER_LOAD
|
|
67
72
|
from paasta_tools.secret_tools import get_secret_name_from_ref
|
|
68
73
|
from paasta_tools.secret_tools import is_secret_ref
|
|
69
74
|
from paasta_tools.secret_tools import is_shared_secret
|
|
@@ -120,6 +125,7 @@ SCHEMA_TYPES = {
|
|
|
120
125
|
"rollback", # automatic rollbacks during deployments
|
|
121
126
|
"tron", # batch workloads
|
|
122
127
|
"eks", # eks workloads
|
|
128
|
+
"smartstack", # mesh configs
|
|
123
129
|
"autotuned_defaults/kubernetes",
|
|
124
130
|
"autotuned_defaults/cassandracluster",
|
|
125
131
|
}
|
|
@@ -163,9 +169,33 @@ INVALID_AUTOSCALING_FIELDS = {
|
|
|
163
169
|
"desired_active_requests_per_replica",
|
|
164
170
|
"prometheus-adapter-config",
|
|
165
171
|
},
|
|
172
|
+
METRICS_PROVIDER_WORKER_LOAD: {
|
|
173
|
+
"desired_active_requests_per_replica",
|
|
174
|
+
"prometheus-adapter-config",
|
|
175
|
+
},
|
|
166
176
|
METRICS_PROVIDER_PROMQL: {"desired_active_requests_per_replica"},
|
|
167
177
|
}
|
|
168
178
|
|
|
179
|
+
# Listener names in Envoy cannot exceed 128 characters and use the
|
|
180
|
+
# following format:
|
|
181
|
+
# service_name.namespace.listener
|
|
182
|
+
# This naming scheme leaves 128 - 10 = 118 characters
|
|
183
|
+
# for the Smartstack service name and namespace.
|
|
184
|
+
# See COREBACK-6303 for more context.
|
|
185
|
+
MAX_ENVOY_NAME_LEN = 118
|
|
186
|
+
|
|
187
|
+
# Socket names cannot exceed 108 characters, and the longest socket
|
|
188
|
+
# name generated by HAProxy uses the following format:
|
|
189
|
+
# /var/run/synapse/sockets/service_name.namespace.LONGPID.sock.tmp
|
|
190
|
+
# This naming scheme leaves 108 - 43 = 65 characters combined for the
|
|
191
|
+
# Smartstack service name and namespace. We leave a generous buffer
|
|
192
|
+
# to arrive at a maximum name length of 55, in case e.g. the .sock
|
|
193
|
+
# suffix is renamed to a longer name for certain sockets.
|
|
194
|
+
# See SMTSTK-204 for more context.
|
|
195
|
+
# NOTE: the above is mostly still true - but the path we use is now /var/run/envoy/sockets/...
|
|
196
|
+
# so we may want to adjust this a tad in the future ;)
|
|
197
|
+
MAX_SMARTSTACK_NAME_LEN = 55
|
|
198
|
+
|
|
169
199
|
|
|
170
200
|
class ConditionConfig(TypedDict, total=False):
|
|
171
201
|
"""
|
|
@@ -339,11 +369,9 @@ def get_config_file_dict(file_path: str, use_ruamel: bool = False) -> Dict[Any,
|
|
|
339
369
|
# sets disk: 100 -> an instance uses that template and overwrites
|
|
340
370
|
# it with disk: 1000)
|
|
341
371
|
ruamel_loader.allow_duplicate_keys = True
|
|
342
|
-
# we
|
|
343
|
-
#
|
|
344
|
-
|
|
345
|
-
SafeConstructor.flatten_mapping
|
|
346
|
-
)
|
|
372
|
+
# Note: we do NOT use flatten_mapping here because it breaks simple
|
|
373
|
+
# override patterns. For nested merge patterns, we load a flattened
|
|
374
|
+
# version separately for comment detection only.
|
|
347
375
|
return ruamel_loader.load(config_file)
|
|
348
376
|
else:
|
|
349
377
|
return yaml.safe_load(config_file)
|
|
@@ -430,6 +458,8 @@ def add_subparser(subparsers):
|
|
|
430
458
|
"--service",
|
|
431
459
|
required=False,
|
|
432
460
|
help="Service that you want to validate. Like 'example_service'.",
|
|
461
|
+
# strip any potential trailing / for folks tab-completing directories
|
|
462
|
+
type=lambda x: x.rstrip("/"),
|
|
433
463
|
).completer = lazy_choices_completer(list_services)
|
|
434
464
|
validate_parser.add_argument(
|
|
435
465
|
"-v",
|
|
@@ -504,23 +534,32 @@ def validate_tron(service_path: str, verbose: bool = False) -> bool:
|
|
|
504
534
|
for cluster in list_tron_clusters(service, soa_dir):
|
|
505
535
|
if not validate_tron_namespace(service, cluster, soa_dir):
|
|
506
536
|
returncode = False
|
|
507
|
-
|
|
508
|
-
|
|
509
|
-
|
|
510
|
-
|
|
511
|
-
|
|
512
|
-
|
|
513
|
-
)
|
|
514
|
-
|
|
515
|
-
|
|
516
|
-
|
|
517
|
-
|
|
518
|
-
|
|
537
|
+
# service config has been validated and cron schedules should be safe to parse
|
|
538
|
+
# TODO(TRON-1761): unify tron/paasta validate cron syntax validation
|
|
539
|
+
service_config = load_tron_service_config(
|
|
540
|
+
service=service, cluster=cluster, soa_dir=soa_dir
|
|
541
|
+
)
|
|
542
|
+
for config in service_config:
|
|
543
|
+
cron_expression = config.get_cron_expression()
|
|
544
|
+
if cron_expression:
|
|
545
|
+
try:
|
|
546
|
+
upcoming_runs = get_upcoming_runs(config, cron_expression)
|
|
547
|
+
if verbose:
|
|
548
|
+
print(info_message(f"Upcoming runs for {config.get_name()}:"))
|
|
549
|
+
for run in upcoming_runs:
|
|
550
|
+
print(f"\t{run}")
|
|
551
|
+
except Exception as e:
|
|
552
|
+
print(
|
|
553
|
+
failure(
|
|
554
|
+
f"Invalid schedule ({cron_expression}) for {config.get_name()}: {e}",
|
|
555
|
+
"http://crontab.guru",
|
|
556
|
+
)
|
|
557
|
+
)
|
|
558
|
+
returncode = False
|
|
519
559
|
return returncode
|
|
520
560
|
|
|
521
561
|
|
|
522
|
-
def
|
|
523
|
-
print(info_message(f"Upcoming runs for {config.get_name()}:"))
|
|
562
|
+
def get_upcoming_runs(config: TronJobConfig, cron_expression: str) -> List[datetime]:
|
|
524
563
|
|
|
525
564
|
config_tz = config.get_time_zone() or DEFAULT_TZ
|
|
526
565
|
|
|
@@ -528,9 +567,7 @@ def print_upcoming_runs(config: TronJobConfig, cron_expression: str) -> None:
|
|
|
528
567
|
cron_schedule=cron_expression,
|
|
529
568
|
starting_from=pytz.timezone(config_tz).localize(datetime.today()),
|
|
530
569
|
)
|
|
531
|
-
|
|
532
|
-
for run in next_cron_runs:
|
|
533
|
-
print(f"\t{run}")
|
|
570
|
+
return next_cron_runs
|
|
534
571
|
|
|
535
572
|
|
|
536
573
|
def validate_tron_namespace(service, cluster, soa_dir, tron_dir=False):
|
|
@@ -566,7 +603,7 @@ def validate_paasta_objects(service_path):
|
|
|
566
603
|
errors = "\n".join(messages)
|
|
567
604
|
print(failure((f"There were failures validating {service}: {errors}"), ""))
|
|
568
605
|
else:
|
|
569
|
-
print(success(
|
|
606
|
+
print(success("All PaaSTA Instances for are valid for all clusters"))
|
|
570
607
|
|
|
571
608
|
return returncode
|
|
572
609
|
|
|
@@ -600,7 +637,22 @@ def validate_unique_instance_names(service_path):
|
|
|
600
637
|
return check_passed
|
|
601
638
|
|
|
602
639
|
|
|
603
|
-
def
|
|
640
|
+
def _get_config_flattened(file_path: str) -> CommentedMap:
|
|
641
|
+
"""Load config with flatten_mapping enabled (for nested merge pattern comment detection)"""
|
|
642
|
+
config_file = get_file_contents(file_path)
|
|
643
|
+
ruamel_loader = YAML(typ="rt")
|
|
644
|
+
ruamel_loader.allow_duplicate_keys = True
|
|
645
|
+
ruamel_loader.Constructor.flatten_mapping = SafeConstructor.flatten_mapping
|
|
646
|
+
return ruamel_loader.load(config_file)
|
|
647
|
+
|
|
648
|
+
|
|
649
|
+
def _get_comments_for_key(
|
|
650
|
+
data: CommentedMap,
|
|
651
|
+
key: Any,
|
|
652
|
+
full_config: Optional[Dict[Any, Any]] = None,
|
|
653
|
+
key_value: Any = None,
|
|
654
|
+
full_config_flattened: Optional[Dict[Any, Any]] = None,
|
|
655
|
+
) -> Optional[str]:
|
|
604
656
|
# this is a little weird, but ruamel is returning a list that looks like:
|
|
605
657
|
# [None, None, CommentToken(...), None] for some reason instead of just a
|
|
606
658
|
# single string
|
|
@@ -617,6 +669,31 @@ def _get_comments_for_key(data: CommentedMap, key: Any) -> Optional[str]:
|
|
|
617
669
|
|
|
618
670
|
raw_comments = [*_flatten_comments(data.ca.items.get(key, []))]
|
|
619
671
|
if not raw_comments:
|
|
672
|
+
# If we didn't find a comment in the instance itself, check if this key
|
|
673
|
+
# might be inherited from a template. Look for ANY other instance/template
|
|
674
|
+
# in the config that has the same key with the same value and a comment.
|
|
675
|
+
if full_config is not None and key_value is not None:
|
|
676
|
+
for config_key, config_value in full_config.items():
|
|
677
|
+
if isinstance(config_value, CommentedMap):
|
|
678
|
+
if config_value.get(key) == key_value:
|
|
679
|
+
other_comments = [
|
|
680
|
+
*_flatten_comments(config_value.ca.items.get(key, []))
|
|
681
|
+
]
|
|
682
|
+
if other_comments:
|
|
683
|
+
return "".join(other_comments)
|
|
684
|
+
|
|
685
|
+
# If still not found and we have a flattened config, check there
|
|
686
|
+
# (flattened config is needed for nested merges)
|
|
687
|
+
if full_config_flattened is not None:
|
|
688
|
+
for config_key, config_value in full_config_flattened.items():
|
|
689
|
+
if isinstance(config_value, CommentedMap):
|
|
690
|
+
if config_value.get(key) == key_value:
|
|
691
|
+
flattened_comments = [
|
|
692
|
+
*_flatten_comments(config_value.ca.items.get(key, []))
|
|
693
|
+
]
|
|
694
|
+
if flattened_comments:
|
|
695
|
+
return "".join(flattened_comments)
|
|
696
|
+
|
|
620
697
|
# return None so that we don't return an empty string below if there really aren't
|
|
621
698
|
# any comments
|
|
622
699
|
return None
|
|
@@ -734,7 +811,7 @@ def validate_autoscaling_configs(service_path: str) -> bool:
|
|
|
734
811
|
and configured_provider_count > 1
|
|
735
812
|
):
|
|
736
813
|
raise AutoscalingValidationError(
|
|
737
|
-
|
|
814
|
+
"cannot use bespoke autoscaling with HPA autoscaling"
|
|
738
815
|
)
|
|
739
816
|
if metrics_provider["type"] in seen_provider_types:
|
|
740
817
|
raise AutoscalingValidationError(
|
|
@@ -765,12 +842,13 @@ def validate_autoscaling_configs(service_path: str) -> bool:
|
|
|
765
842
|
|
|
766
843
|
# we need access to the comments, so we need to read the config with ruamel to be able
|
|
767
844
|
# to actually get them in a "nice" automated fashion
|
|
845
|
+
config_file_path = os.path.join(
|
|
846
|
+
soa_dir,
|
|
847
|
+
service,
|
|
848
|
+
f"{instance_config.get_instance_type()}-{cluster}.yaml",
|
|
849
|
+
)
|
|
768
850
|
config = get_config_file_dict(
|
|
769
|
-
|
|
770
|
-
soa_dir,
|
|
771
|
-
service,
|
|
772
|
-
f"{instance_config.get_instance_type()}-{cluster}.yaml",
|
|
773
|
-
),
|
|
851
|
+
config_file_path,
|
|
774
852
|
use_ruamel=True,
|
|
775
853
|
)
|
|
776
854
|
if config[instance].get("cpus") is None:
|
|
@@ -785,8 +863,15 @@ def validate_autoscaling_configs(service_path: str) -> bool:
|
|
|
785
863
|
# cpu autoscaled, but using autotuned values - can skip
|
|
786
864
|
continue
|
|
787
865
|
|
|
866
|
+
# Load flattened config for comment detection (handles nested merges)
|
|
867
|
+
config_flattened = _get_config_flattened(config_file_path)
|
|
868
|
+
|
|
788
869
|
cpu_comment = _get_comments_for_key(
|
|
789
|
-
data=config[instance],
|
|
870
|
+
data=config[instance],
|
|
871
|
+
key="cpus",
|
|
872
|
+
full_config=config,
|
|
873
|
+
key_value=config[instance].get("cpus"),
|
|
874
|
+
full_config_flattened=config_flattened,
|
|
790
875
|
)
|
|
791
876
|
# we could probably have a separate error message if there's a comment that doesn't match
|
|
792
877
|
# the ack pattern, but that seems like overkill - especially for something that could cause
|
|
@@ -801,7 +886,7 @@ def validate_autoscaling_configs(service_path: str) -> bool:
|
|
|
801
886
|
):
|
|
802
887
|
link = "y/override-cpu-autotune"
|
|
803
888
|
raise AutoscalingValidationError(
|
|
804
|
-
|
|
889
|
+
"CPU override detected for a CPU-autoscaled instance; "
|
|
805
890
|
"see the following link for next steps:"
|
|
806
891
|
)
|
|
807
892
|
except AutoscalingValidationError as e:
|
|
@@ -874,7 +959,7 @@ def check_secrets_for_instance(
|
|
|
874
959
|
|
|
875
960
|
def list_upcoming_runs(
|
|
876
961
|
cron_schedule: str, starting_from: datetime, num_runs: int = 5
|
|
877
|
-
) -> List[
|
|
962
|
+
) -> List[datetime]:
|
|
878
963
|
iter = croniter(cron_schedule, starting_from)
|
|
879
964
|
return [iter.get_next(datetime) for _ in range(num_runs)]
|
|
880
965
|
|
|
@@ -928,12 +1013,13 @@ def validate_cpu_burst(service_path: str) -> bool:
|
|
|
928
1013
|
if is_k8s_service and not should_skip_cpu_burst_validation:
|
|
929
1014
|
# we need access to the comments, so we need to read the config with ruamel to be able
|
|
930
1015
|
# to actually get them in a "nice" automated fashion
|
|
1016
|
+
config_file_path = os.path.join(
|
|
1017
|
+
soa_dir,
|
|
1018
|
+
service,
|
|
1019
|
+
f"{instance_config.get_instance_type()}-{cluster}.yaml",
|
|
1020
|
+
)
|
|
931
1021
|
config = get_config_file_dict(
|
|
932
|
-
|
|
933
|
-
soa_dir,
|
|
934
|
-
service,
|
|
935
|
-
f"{instance_config.get_instance_type()}-{cluster}.yaml",
|
|
936
|
-
),
|
|
1022
|
+
config_file_path,
|
|
937
1023
|
use_ruamel=True,
|
|
938
1024
|
)
|
|
939
1025
|
|
|
@@ -944,8 +1030,15 @@ def validate_cpu_burst(service_path: str) -> bool:
|
|
|
944
1030
|
# under the threshold - can also skip
|
|
945
1031
|
continue
|
|
946
1032
|
|
|
1033
|
+
# Load flattened config for comment detection (handles nested merges)
|
|
1034
|
+
config_flattened = _get_config_flattened(config_file_path)
|
|
1035
|
+
|
|
947
1036
|
burst_comment = _get_comments_for_key(
|
|
948
|
-
data=config[instance],
|
|
1037
|
+
data=config[instance],
|
|
1038
|
+
key="cpu_burst_add",
|
|
1039
|
+
full_config=config,
|
|
1040
|
+
key_value=config[instance].get("cpu_burst_add"),
|
|
1041
|
+
full_config_flattened=config_flattened,
|
|
949
1042
|
)
|
|
950
1043
|
# we could probably have a separate error message if there's a comment that doesn't match
|
|
951
1044
|
# the ack pattern, but that seems like overkill - especially for something that could cause
|
|
@@ -971,6 +1064,191 @@ def validate_cpu_burst(service_path: str) -> bool:
|
|
|
971
1064
|
return returncode
|
|
972
1065
|
|
|
973
1066
|
|
|
1067
|
+
def _check_smartstack_name_length_envoy(service: str, namespace: str) -> None:
|
|
1068
|
+
"""Ensures that Smartstack service name and namespace does not
|
|
1069
|
+
exceed the limit on the length of Envoy's listener names
|
|
1070
|
+
"""
|
|
1071
|
+
if len(service) + len(namespace) > MAX_ENVOY_NAME_LEN:
|
|
1072
|
+
raise ValueError(
|
|
1073
|
+
"Service name and namespace exceeds max listener name length in Envoy. Note that the full listener name "
|
|
1074
|
+
'is "{}.{}.listener". Please rename so that the combined length of the service name and namespace does '
|
|
1075
|
+
"not exceed {} characters".format(service, namespace, MAX_ENVOY_NAME_LEN),
|
|
1076
|
+
)
|
|
1077
|
+
|
|
1078
|
+
|
|
1079
|
+
def _check_smartstack_name_length(service: str, namespace: str) -> None:
|
|
1080
|
+
"""Ensure that Smartstack name does not
|
|
1081
|
+
exceed limits on HAProxy socket name
|
|
1082
|
+
"""
|
|
1083
|
+
if len(service + namespace) > MAX_SMARTSTACK_NAME_LEN:
|
|
1084
|
+
socket_name = "/var/run/synapse/sockets/{}.{}.sock.LONGPID.tmp".format(
|
|
1085
|
+
service,
|
|
1086
|
+
namespace,
|
|
1087
|
+
)
|
|
1088
|
+
raise ValueError(
|
|
1089
|
+
"Name exceeds max socket name length. Note that the full socket name under the HAProxy naming scheme "
|
|
1090
|
+
'is "{}". Please rename so that the combined length of the service name and namespace does not '
|
|
1091
|
+
"exceed {} characters".format(socket_name, MAX_SMARTSTACK_NAME_LEN),
|
|
1092
|
+
)
|
|
1093
|
+
|
|
1094
|
+
|
|
1095
|
+
@lru_cache()
|
|
1096
|
+
def _get_etc_services() -> list[str]:
|
|
1097
|
+
with open("/etc/services") as f:
|
|
1098
|
+
return f.read().splitlines()
|
|
1099
|
+
|
|
1100
|
+
|
|
1101
|
+
@lru_cache()
|
|
1102
|
+
def _get_etc_services_entry(port_lookup: int) -> str | None:
|
|
1103
|
+
entries = _get_etc_services()
|
|
1104
|
+
for entry in entries:
|
|
1105
|
+
try:
|
|
1106
|
+
service = entry.split()[0]
|
|
1107
|
+
port = entry.split()[1]
|
|
1108
|
+
if port.startswith("%s/" % str(port_lookup)):
|
|
1109
|
+
return service
|
|
1110
|
+
except IndexError:
|
|
1111
|
+
continue
|
|
1112
|
+
return None
|
|
1113
|
+
|
|
1114
|
+
|
|
1115
|
+
def _check_proxy_port_in_use(service: str, namespace: str, port: int) -> bool:
|
|
1116
|
+
if port is None:
|
|
1117
|
+
return False
|
|
1118
|
+
|
|
1119
|
+
# TODO(luisp): this should probably check the distributed /nail/etc/services
|
|
1120
|
+
# smartstack.yamls OR we should more automatically manage /etc/services
|
|
1121
|
+
etc_services_entry = _get_etc_services_entry(port)
|
|
1122
|
+
if etc_services_entry is None:
|
|
1123
|
+
return False
|
|
1124
|
+
elif f"{service}.{namespace}" == etc_services_entry:
|
|
1125
|
+
return False
|
|
1126
|
+
else:
|
|
1127
|
+
raise ValueError(
|
|
1128
|
+
(
|
|
1129
|
+
"port {} is already in use by {} according to /etc/services, it cannot be used by "
|
|
1130
|
+
"{}.{}. Please either pick a different port or update /etc/services via puppet"
|
|
1131
|
+
).format(port, etc_services_entry, service, namespace),
|
|
1132
|
+
)
|
|
1133
|
+
|
|
1134
|
+
|
|
1135
|
+
def _check_advertise_discover(
|
|
1136
|
+
smartstack_data: dict[str, Any]
|
|
1137
|
+
) -> None: # XXX: we should use a TypedDict here
|
|
1138
|
+
"""Need to ensure a few properties about smartstack files
|
|
1139
|
+
1) discover is a member of advertise
|
|
1140
|
+
2) discovery and advertise contain valid locations
|
|
1141
|
+
3) extra_advertise contains valid locations
|
|
1142
|
+
4) rhs of extra_advertise are >= discover type
|
|
1143
|
+
"""
|
|
1144
|
+
|
|
1145
|
+
def assert_valid_type(location_type: str) -> None:
|
|
1146
|
+
if location_type not in available_location_types():
|
|
1147
|
+
raise ValueError(
|
|
1148
|
+
'Location type "{}" not a valid Yelp location type'.format(
|
|
1149
|
+
location_type,
|
|
1150
|
+
),
|
|
1151
|
+
)
|
|
1152
|
+
|
|
1153
|
+
def assert_valid_location(location_string: str) -> None:
|
|
1154
|
+
try:
|
|
1155
|
+
typ, loc = location_string.split(":")
|
|
1156
|
+
assert len(convert_location_type(loc, typ, typ)) == 1
|
|
1157
|
+
except Exception:
|
|
1158
|
+
raise ValueError(
|
|
1159
|
+
'Location string "{}" not a valid Yelp location'.format(
|
|
1160
|
+
location_string,
|
|
1161
|
+
),
|
|
1162
|
+
)
|
|
1163
|
+
|
|
1164
|
+
advertise = smartstack_data.get("advertise", ["region"])
|
|
1165
|
+
discover = smartstack_data.get("discover", "region")
|
|
1166
|
+
if discover not in advertise:
|
|
1167
|
+
raise ValueError(
|
|
1168
|
+
'discover key "{}" not a member of advertise "{}"'.format(
|
|
1169
|
+
discover,
|
|
1170
|
+
advertise,
|
|
1171
|
+
),
|
|
1172
|
+
)
|
|
1173
|
+
for location_type in [discover] + advertise:
|
|
1174
|
+
assert_valid_type(location_type)
|
|
1175
|
+
|
|
1176
|
+
extra_advertisements = smartstack_data.get("extra_advertise", {})
|
|
1177
|
+
for source, destinations in extra_advertisements.items():
|
|
1178
|
+
assert_valid_location(source)
|
|
1179
|
+
for destination in destinations:
|
|
1180
|
+
assert_valid_location(destination)
|
|
1181
|
+
dest_type = destination.split(":")[0]
|
|
1182
|
+
if compare_types(dest_type, discover) > 0:
|
|
1183
|
+
raise ValueError(
|
|
1184
|
+
'Right hand side "{}" less general than discover type "{}". Your advertisement would potentially '
|
|
1185
|
+
"result in more hosts seeing your service than intended. Please change the type of your RHS to be "
|
|
1186
|
+
">= the discover type".format(destination, discover),
|
|
1187
|
+
)
|
|
1188
|
+
|
|
1189
|
+
|
|
1190
|
+
def _check_smartstack_valid_proxy(proxied_through: str, soa_dir: str) -> None:
|
|
1191
|
+
"""Checks whether its parameter is a valid Smartstack namespace. Can be used for proxied_through or clb_proxy."""
|
|
1192
|
+
proxy_service, proxy_namespace = proxied_through.split(".")
|
|
1193
|
+
proxy_smartstack_filename = Path(soa_dir) / proxy_service / "smartstack.yaml"
|
|
1194
|
+
try:
|
|
1195
|
+
yaml_data = get_config_file_dict(proxy_smartstack_filename)
|
|
1196
|
+
if proxy_namespace not in yaml_data:
|
|
1197
|
+
raise ValueError(
|
|
1198
|
+
f"{proxied_through} is not a valid Smartstack namespace to proxy through: "
|
|
1199
|
+
f"{proxy_namespace} not found in {proxy_smartstack_filename}.",
|
|
1200
|
+
)
|
|
1201
|
+
except FileNotFoundError:
|
|
1202
|
+
raise ValueError(
|
|
1203
|
+
f"{proxied_through} is not a valid Smartstack namespace to proxy through: "
|
|
1204
|
+
f"{proxy_smartstack_filename} does not exist.",
|
|
1205
|
+
)
|
|
1206
|
+
|
|
1207
|
+
|
|
1208
|
+
def _check_smartstack_proxied_through(
|
|
1209
|
+
smartstack_data: dict[str, Any],
|
|
1210
|
+
soa_dir: str,
|
|
1211
|
+
) -> None: # XXX: we should use a TypedDict here
|
|
1212
|
+
"""Checks the proxied_through field of a Smartstack namespace refers to another valid Smartstack namespace"""
|
|
1213
|
+
if "proxied_through" not in smartstack_data:
|
|
1214
|
+
return
|
|
1215
|
+
|
|
1216
|
+
proxied_through = smartstack_data["proxied_through"]
|
|
1217
|
+
_check_smartstack_valid_proxy(proxied_through, soa_dir)
|
|
1218
|
+
|
|
1219
|
+
|
|
1220
|
+
def validate_smartstack(service_path: str) -> bool:
|
|
1221
|
+
if not os.path.exists(os.path.join(service_path, "smartstack.yaml")):
|
|
1222
|
+
# not every service is mesh-registered, exit early if this is the case
|
|
1223
|
+
return True
|
|
1224
|
+
|
|
1225
|
+
config = get_config_file_dict(os.path.join(service_path, "smartstack.yaml"))
|
|
1226
|
+
if not config:
|
|
1227
|
+
print(
|
|
1228
|
+
failure(
|
|
1229
|
+
"smartstack.yaml is empty - if this service is not mesh-registered, please remove this file.",
|
|
1230
|
+
"http://paasta.readthedocs.io/en/latest/yelpsoa_configs.html",
|
|
1231
|
+
)
|
|
1232
|
+
)
|
|
1233
|
+
return False
|
|
1234
|
+
|
|
1235
|
+
soa_dir, service = path_to_soa_dir_service(service_path)
|
|
1236
|
+
for namespace, namespace_config in config.items():
|
|
1237
|
+
# XXX(luisp): these should all really either return bools or be enforced in the schema
|
|
1238
|
+
# ...i'm mostly leaving these as-is since i'm trying to remove some internal code that
|
|
1239
|
+
# duplicates a bunch of paasta validate checks (i.e., py-gitolite)
|
|
1240
|
+
_check_smartstack_name_length_envoy(service, namespace)
|
|
1241
|
+
if namespace_config["proxy_port"]:
|
|
1242
|
+
_check_smartstack_name_length(service, namespace)
|
|
1243
|
+
proxy_port = namespace_config["proxy_port"]
|
|
1244
|
+
_check_proxy_port_in_use(service, namespace, proxy_port)
|
|
1245
|
+
_check_advertise_discover(namespace_config)
|
|
1246
|
+
_check_smartstack_proxied_through(namespace_config, soa_dir)
|
|
1247
|
+
|
|
1248
|
+
print(success("All SmartStack configs are valid"))
|
|
1249
|
+
return True
|
|
1250
|
+
|
|
1251
|
+
|
|
974
1252
|
def paasta_validate_soa_configs(
|
|
975
1253
|
service: str, service_path: str, verbose: bool = False
|
|
976
1254
|
) -> bool:
|
|
@@ -993,6 +1271,7 @@ def paasta_validate_soa_configs(
|
|
|
993
1271
|
validate_secrets,
|
|
994
1272
|
validate_min_max_instances,
|
|
995
1273
|
validate_cpu_burst,
|
|
1274
|
+
validate_smartstack,
|
|
996
1275
|
]
|
|
997
1276
|
|
|
998
1277
|
# NOTE: we're explicitly passing a list comprehension to all()
|
|
@@ -1006,7 +1285,13 @@ def paasta_validate(args):
|
|
|
1006
1285
|
|
|
1007
1286
|
:param args: argparse.Namespace obj created from sys.args by cli
|
|
1008
1287
|
"""
|
|
1009
|
-
service_path = get_service_path(args.service, args.yelpsoa_config_root)
|
|
1010
1288
|
service = args.service or guess_service_name()
|
|
1289
|
+
service_path = get_service_path(service, args.yelpsoa_config_root)
|
|
1290
|
+
|
|
1291
|
+
# not much we can do if we have no path to inspect ;)
|
|
1292
|
+
if not service_path:
|
|
1293
|
+
return 1
|
|
1294
|
+
|
|
1011
1295
|
if not paasta_validate_soa_configs(service, service_path, args.verbose):
|
|
1296
|
+
print("Invalid configs found. Please try again.")
|
|
1012
1297
|
return 1
|
|
@@ -102,6 +102,8 @@ def add_subparser(subparsers):
|
|
|
102
102
|
help="Name of the service which you wish to wait for deployment. "
|
|
103
103
|
'Leading "services-" will be stripped.',
|
|
104
104
|
required=True,
|
|
105
|
+
# strip any potential trailing / for folks tab-completing directories
|
|
106
|
+
type=lambda x: x.rstrip("/"),
|
|
105
107
|
).completer = lazy_choices_completer(list_services)
|
|
106
108
|
list_parser.add_argument(
|
|
107
109
|
"-t",
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"$schema": "http://json-schema.org/draft-04/schema#",
|
|
3
|
-
"description": "http://paasta.readthedocs.io/en/latest/yelpsoa_configs.html#
|
|
3
|
+
"description": "http://paasta.readthedocs.io/en/latest/yelpsoa_configs.html#eks-clustername-yaml",
|
|
4
4
|
"type": "object",
|
|
5
5
|
"additionalProperties": false,
|
|
6
6
|
"minProperties": 1,
|
|
@@ -889,6 +889,7 @@
|
|
|
889
889
|
},
|
|
890
890
|
"autotune_limits": {
|
|
891
891
|
"type": "object",
|
|
892
|
+
"additionalProperties": false,
|
|
892
893
|
"properties": {
|
|
893
894
|
"cpus": {
|
|
894
895
|
"type": "object",
|
|
@@ -954,6 +955,28 @@
|
|
|
954
955
|
},
|
|
955
956
|
"max_skew": {
|
|
956
957
|
"type": "integer"
|
|
958
|
+
},
|
|
959
|
+
"match_label_keys": {
|
|
960
|
+
"type": "array",
|
|
961
|
+
"items": {
|
|
962
|
+
"allOf": [
|
|
963
|
+
{
|
|
964
|
+
"type": "string",
|
|
965
|
+
"pattern": "^([a-zA-Z0-9]([-a-zA-Z0-9_.]*[a-zA-Z0-9])?/)?[a-zA-Z0-9]([-a-zA-Z0-9_.]*[a-zA-Z0-9])?$",
|
|
966
|
+
"maxLength": 63
|
|
967
|
+
},
|
|
968
|
+
{
|
|
969
|
+
"not": {
|
|
970
|
+
"enum": [
|
|
971
|
+
"paasta.yelp.com/service",
|
|
972
|
+
"paasta.yelp.com/instance"
|
|
973
|
+
]
|
|
974
|
+
}
|
|
975
|
+
}
|
|
976
|
+
],
|
|
977
|
+
"$comment": "Valid Kubernetes label key: optional prefix (DNS subdomain) followed by '/' and name segment (max 63 chars each)"
|
|
978
|
+
},
|
|
979
|
+
"uniqueItems": true
|
|
957
980
|
}
|
|
958
981
|
},
|
|
959
982
|
"required": []
|
|
@@ -37,6 +37,7 @@
|
|
|
37
37
|
"enum": [
|
|
38
38
|
"http",
|
|
39
39
|
"https",
|
|
40
|
+
"http2",
|
|
40
41
|
"tcp"
|
|
41
42
|
]
|
|
42
43
|
},
|
|
@@ -61,6 +62,11 @@
|
|
|
61
62
|
"minimum": 0,
|
|
62
63
|
"maximum": 60000
|
|
63
64
|
},
|
|
65
|
+
"timeout_client_ms": {
|
|
66
|
+
"type": "integer",
|
|
67
|
+
"minimum": 0,
|
|
68
|
+
"maximum": 86400000
|
|
69
|
+
},
|
|
64
70
|
"timeout_server_ms": {
|
|
65
71
|
"type": "integer",
|
|
66
72
|
"minimum": 0,
|
|
@@ -117,6 +123,7 @@
|
|
|
117
123
|
"enum": [
|
|
118
124
|
"http",
|
|
119
125
|
"https",
|
|
126
|
+
"http2",
|
|
120
127
|
"tcp"
|
|
121
128
|
]
|
|
122
129
|
},
|
|
@@ -130,6 +137,10 @@
|
|
|
130
137
|
"CLUSTER_PROVIDED"
|
|
131
138
|
]
|
|
132
139
|
},
|
|
140
|
+
"choice_count": {
|
|
141
|
+
"type": "integer",
|
|
142
|
+
"minimum": 1
|
|
143
|
+
},
|
|
133
144
|
"chaos": {
|
|
134
145
|
"type": "object",
|
|
135
146
|
"additionalProperties": {
|
|
@@ -154,6 +165,9 @@
|
|
|
154
165
|
"proxied_through": {
|
|
155
166
|
"type": "string"
|
|
156
167
|
},
|
|
168
|
+
"clb_proxy": {
|
|
169
|
+
"type": "string"
|
|
170
|
+
},
|
|
157
171
|
"fixed_delay": {
|
|
158
172
|
"type": "object",
|
|
159
173
|
"additionalProperties": {
|