paasta-tools 1.21.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- k8s_itests/__init__.py +0 -0
- k8s_itests/test_autoscaling.py +23 -0
- k8s_itests/utils.py +38 -0
- paasta_tools/__init__.py +20 -0
- paasta_tools/adhoc_tools.py +142 -0
- paasta_tools/api/__init__.py +13 -0
- paasta_tools/api/api.py +330 -0
- paasta_tools/api/api_docs/swagger.json +2323 -0
- paasta_tools/api/client.py +106 -0
- paasta_tools/api/settings.py +33 -0
- paasta_tools/api/tweens/__init__.py +6 -0
- paasta_tools/api/tweens/auth.py +125 -0
- paasta_tools/api/tweens/profiling.py +108 -0
- paasta_tools/api/tweens/request_logger.py +124 -0
- paasta_tools/api/views/__init__.py +13 -0
- paasta_tools/api/views/autoscaler.py +100 -0
- paasta_tools/api/views/exception.py +45 -0
- paasta_tools/api/views/flink.py +73 -0
- paasta_tools/api/views/instance.py +395 -0
- paasta_tools/api/views/pause_autoscaler.py +71 -0
- paasta_tools/api/views/remote_run.py +113 -0
- paasta_tools/api/views/resources.py +76 -0
- paasta_tools/api/views/service.py +35 -0
- paasta_tools/api/views/version.py +25 -0
- paasta_tools/apply_external_resources.py +79 -0
- paasta_tools/async_utils.py +109 -0
- paasta_tools/autoscaling/__init__.py +0 -0
- paasta_tools/autoscaling/autoscaling_service_lib.py +57 -0
- paasta_tools/autoscaling/forecasting.py +106 -0
- paasta_tools/autoscaling/max_all_k8s_services.py +41 -0
- paasta_tools/autoscaling/pause_service_autoscaler.py +77 -0
- paasta_tools/autoscaling/utils.py +52 -0
- paasta_tools/bounce_lib.py +184 -0
- paasta_tools/broadcast_log_to_services.py +62 -0
- paasta_tools/cassandracluster_tools.py +210 -0
- paasta_tools/check_autoscaler_max_instances.py +212 -0
- paasta_tools/check_cassandracluster_services_replication.py +35 -0
- paasta_tools/check_flink_services_health.py +203 -0
- paasta_tools/check_kubernetes_api.py +57 -0
- paasta_tools/check_kubernetes_services_replication.py +141 -0
- paasta_tools/check_oom_events.py +244 -0
- paasta_tools/check_services_replication_tools.py +324 -0
- paasta_tools/check_spark_jobs.py +234 -0
- paasta_tools/cleanup_kubernetes_cr.py +138 -0
- paasta_tools/cleanup_kubernetes_crd.py +145 -0
- paasta_tools/cleanup_kubernetes_jobs.py +344 -0
- paasta_tools/cleanup_tron_namespaces.py +96 -0
- paasta_tools/cli/__init__.py +13 -0
- paasta_tools/cli/authentication.py +85 -0
- paasta_tools/cli/cli.py +260 -0
- paasta_tools/cli/cmds/__init__.py +13 -0
- paasta_tools/cli/cmds/autoscale.py +143 -0
- paasta_tools/cli/cmds/check.py +334 -0
- paasta_tools/cli/cmds/cook_image.py +147 -0
- paasta_tools/cli/cmds/get_docker_image.py +76 -0
- paasta_tools/cli/cmds/get_image_version.py +172 -0
- paasta_tools/cli/cmds/get_latest_deployment.py +93 -0
- paasta_tools/cli/cmds/info.py +155 -0
- paasta_tools/cli/cmds/itest.py +117 -0
- paasta_tools/cli/cmds/list.py +66 -0
- paasta_tools/cli/cmds/list_clusters.py +42 -0
- paasta_tools/cli/cmds/list_deploy_queue.py +171 -0
- paasta_tools/cli/cmds/list_namespaces.py +84 -0
- paasta_tools/cli/cmds/local_run.py +1396 -0
- paasta_tools/cli/cmds/logs.py +1601 -0
- paasta_tools/cli/cmds/mark_for_deployment.py +1988 -0
- paasta_tools/cli/cmds/mesh_status.py +174 -0
- paasta_tools/cli/cmds/pause_service_autoscaler.py +107 -0
- paasta_tools/cli/cmds/push_to_registry.py +275 -0
- paasta_tools/cli/cmds/remote_run.py +252 -0
- paasta_tools/cli/cmds/rollback.py +347 -0
- paasta_tools/cli/cmds/secret.py +549 -0
- paasta_tools/cli/cmds/security_check.py +59 -0
- paasta_tools/cli/cmds/spark_run.py +1400 -0
- paasta_tools/cli/cmds/start_stop_restart.py +401 -0
- paasta_tools/cli/cmds/status.py +2302 -0
- paasta_tools/cli/cmds/validate.py +1012 -0
- paasta_tools/cli/cmds/wait_for_deployment.py +275 -0
- paasta_tools/cli/fsm/__init__.py +13 -0
- paasta_tools/cli/fsm/autosuggest.py +82 -0
- paasta_tools/cli/fsm/template/README.md +8 -0
- paasta_tools/cli/fsm/template/cookiecutter.json +7 -0
- paasta_tools/cli/fsm/template/{{cookiecutter.service}}/kubernetes-PROD.yaml +91 -0
- paasta_tools/cli/fsm/template/{{cookiecutter.service}}/monitoring.yaml +20 -0
- paasta_tools/cli/fsm/template/{{cookiecutter.service}}/service.yaml +8 -0
- paasta_tools/cli/fsm/template/{{cookiecutter.service}}/smartstack.yaml +6 -0
- paasta_tools/cli/fsm_cmd.py +121 -0
- paasta_tools/cli/paasta_tabcomplete.sh +23 -0
- paasta_tools/cli/schemas/adhoc_schema.json +199 -0
- paasta_tools/cli/schemas/autoscaling_schema.json +91 -0
- paasta_tools/cli/schemas/autotuned_defaults/cassandracluster_schema.json +37 -0
- paasta_tools/cli/schemas/autotuned_defaults/kubernetes_schema.json +89 -0
- paasta_tools/cli/schemas/deploy_schema.json +173 -0
- paasta_tools/cli/schemas/eks_schema.json +970 -0
- paasta_tools/cli/schemas/kubernetes_schema.json +970 -0
- paasta_tools/cli/schemas/rollback_schema.json +160 -0
- paasta_tools/cli/schemas/service_schema.json +25 -0
- paasta_tools/cli/schemas/smartstack_schema.json +322 -0
- paasta_tools/cli/schemas/tron_schema.json +699 -0
- paasta_tools/cli/utils.py +1118 -0
- paasta_tools/clusterman.py +21 -0
- paasta_tools/config_utils.py +385 -0
- paasta_tools/contrib/__init__.py +0 -0
- paasta_tools/contrib/bounce_log_latency_parser.py +68 -0
- paasta_tools/contrib/check_manual_oapi_changes.sh +24 -0
- paasta_tools/contrib/check_orphans.py +306 -0
- paasta_tools/contrib/create_dynamodb_table.py +35 -0
- paasta_tools/contrib/create_paasta_playground.py +105 -0
- paasta_tools/contrib/emit_allocated_cpu_metrics.py +50 -0
- paasta_tools/contrib/get_running_task_allocation.py +346 -0
- paasta_tools/contrib/habitat_fixer.py +86 -0
- paasta_tools/contrib/ide_helper.py +316 -0
- paasta_tools/contrib/is_pod_healthy_in_proxy.py +139 -0
- paasta_tools/contrib/is_pod_healthy_in_smartstack.py +50 -0
- paasta_tools/contrib/kill_bad_containers.py +109 -0
- paasta_tools/contrib/mass-deploy-tag.sh +44 -0
- paasta_tools/contrib/mock_patch_checker.py +86 -0
- paasta_tools/contrib/paasta_update_soa_memcpu.py +520 -0
- paasta_tools/contrib/render_template.py +129 -0
- paasta_tools/contrib/rightsizer_soaconfigs_update.py +348 -0
- paasta_tools/contrib/service_shard_remove.py +157 -0
- paasta_tools/contrib/service_shard_update.py +373 -0
- paasta_tools/contrib/shared_ip_check.py +77 -0
- paasta_tools/contrib/timeouts_metrics_prom.py +64 -0
- paasta_tools/delete_kubernetes_deployments.py +89 -0
- paasta_tools/deployment_utils.py +44 -0
- paasta_tools/docker_wrapper.py +234 -0
- paasta_tools/docker_wrapper_imports.py +13 -0
- paasta_tools/drain_lib.py +351 -0
- paasta_tools/dump_locally_running_services.py +71 -0
- paasta_tools/eks_tools.py +119 -0
- paasta_tools/envoy_tools.py +373 -0
- paasta_tools/firewall.py +504 -0
- paasta_tools/firewall_logging.py +154 -0
- paasta_tools/firewall_update.py +172 -0
- paasta_tools/flink_tools.py +345 -0
- paasta_tools/flinkeks_tools.py +90 -0
- paasta_tools/frameworks/__init__.py +0 -0
- paasta_tools/frameworks/adhoc_scheduler.py +71 -0
- paasta_tools/frameworks/constraints.py +87 -0
- paasta_tools/frameworks/native_scheduler.py +652 -0
- paasta_tools/frameworks/native_service_config.py +301 -0
- paasta_tools/frameworks/task_store.py +245 -0
- paasta_tools/generate_all_deployments +9 -0
- paasta_tools/generate_authenticating_services.py +94 -0
- paasta_tools/generate_deployments_for_service.py +255 -0
- paasta_tools/generate_services_file.py +114 -0
- paasta_tools/generate_services_yaml.py +30 -0
- paasta_tools/hacheck.py +76 -0
- paasta_tools/instance/__init__.py +0 -0
- paasta_tools/instance/hpa_metrics_parser.py +122 -0
- paasta_tools/instance/kubernetes.py +1362 -0
- paasta_tools/iptables.py +240 -0
- paasta_tools/kafkacluster_tools.py +143 -0
- paasta_tools/kubernetes/__init__.py +0 -0
- paasta_tools/kubernetes/application/__init__.py +0 -0
- paasta_tools/kubernetes/application/controller_wrappers.py +476 -0
- paasta_tools/kubernetes/application/tools.py +90 -0
- paasta_tools/kubernetes/bin/__init__.py +0 -0
- paasta_tools/kubernetes/bin/kubernetes_remove_evicted_pods.py +164 -0
- paasta_tools/kubernetes/bin/paasta_cleanup_remote_run_resources.py +135 -0
- paasta_tools/kubernetes/bin/paasta_cleanup_stale_nodes.py +181 -0
- paasta_tools/kubernetes/bin/paasta_secrets_sync.py +758 -0
- paasta_tools/kubernetes/remote_run.py +558 -0
- paasta_tools/kubernetes_tools.py +4679 -0
- paasta_tools/list_kubernetes_service_instances.py +128 -0
- paasta_tools/list_tron_namespaces.py +60 -0
- paasta_tools/long_running_service_tools.py +678 -0
- paasta_tools/mac_address.py +44 -0
- paasta_tools/marathon_dashboard.py +0 -0
- paasta_tools/mesos/__init__.py +0 -0
- paasta_tools/mesos/cfg.py +46 -0
- paasta_tools/mesos/cluster.py +60 -0
- paasta_tools/mesos/exceptions.py +59 -0
- paasta_tools/mesos/framework.py +77 -0
- paasta_tools/mesos/log.py +48 -0
- paasta_tools/mesos/master.py +306 -0
- paasta_tools/mesos/mesos_file.py +169 -0
- paasta_tools/mesos/parallel.py +52 -0
- paasta_tools/mesos/slave.py +115 -0
- paasta_tools/mesos/task.py +94 -0
- paasta_tools/mesos/util.py +69 -0
- paasta_tools/mesos/zookeeper.py +37 -0
- paasta_tools/mesos_maintenance.py +848 -0
- paasta_tools/mesos_tools.py +1051 -0
- paasta_tools/metrics/__init__.py +0 -0
- paasta_tools/metrics/metastatus_lib.py +1110 -0
- paasta_tools/metrics/metrics_lib.py +217 -0
- paasta_tools/monitoring/__init__.py +13 -0
- paasta_tools/monitoring/check_k8s_api_performance.py +110 -0
- paasta_tools/monitoring_tools.py +652 -0
- paasta_tools/monkrelaycluster_tools.py +146 -0
- paasta_tools/nrtsearchservice_tools.py +143 -0
- paasta_tools/nrtsearchserviceeks_tools.py +68 -0
- paasta_tools/oom_logger.py +321 -0
- paasta_tools/paasta_deploy_tron_jobs +3 -0
- paasta_tools/paasta_execute_docker_command.py +123 -0
- paasta_tools/paasta_native_serviceinit.py +21 -0
- paasta_tools/paasta_service_config_loader.py +201 -0
- paasta_tools/paastaapi/__init__.py +29 -0
- paasta_tools/paastaapi/api/__init__.py +3 -0
- paasta_tools/paastaapi/api/autoscaler_api.py +302 -0
- paasta_tools/paastaapi/api/default_api.py +569 -0
- paasta_tools/paastaapi/api/remote_run_api.py +604 -0
- paasta_tools/paastaapi/api/resources_api.py +157 -0
- paasta_tools/paastaapi/api/service_api.py +1736 -0
- paasta_tools/paastaapi/api_client.py +818 -0
- paasta_tools/paastaapi/apis/__init__.py +22 -0
- paasta_tools/paastaapi/configuration.py +455 -0
- paasta_tools/paastaapi/exceptions.py +137 -0
- paasta_tools/paastaapi/model/__init__.py +5 -0
- paasta_tools/paastaapi/model/adhoc_launch_history.py +176 -0
- paasta_tools/paastaapi/model/autoscaler_count_msg.py +176 -0
- paasta_tools/paastaapi/model/deploy_queue.py +178 -0
- paasta_tools/paastaapi/model/deploy_queue_service_instance.py +194 -0
- paasta_tools/paastaapi/model/envoy_backend.py +185 -0
- paasta_tools/paastaapi/model/envoy_location.py +184 -0
- paasta_tools/paastaapi/model/envoy_status.py +181 -0
- paasta_tools/paastaapi/model/flink_cluster_overview.py +188 -0
- paasta_tools/paastaapi/model/flink_config.py +173 -0
- paasta_tools/paastaapi/model/flink_job.py +186 -0
- paasta_tools/paastaapi/model/flink_job_details.py +192 -0
- paasta_tools/paastaapi/model/flink_jobs.py +175 -0
- paasta_tools/paastaapi/model/float_and_error.py +173 -0
- paasta_tools/paastaapi/model/hpa_metric.py +176 -0
- paasta_tools/paastaapi/model/inline_object.py +170 -0
- paasta_tools/paastaapi/model/inline_response200.py +170 -0
- paasta_tools/paastaapi/model/inline_response2001.py +170 -0
- paasta_tools/paastaapi/model/instance_bounce_status.py +200 -0
- paasta_tools/paastaapi/model/instance_mesh_status.py +186 -0
- paasta_tools/paastaapi/model/instance_status.py +220 -0
- paasta_tools/paastaapi/model/instance_status_adhoc.py +187 -0
- paasta_tools/paastaapi/model/instance_status_cassandracluster.py +173 -0
- paasta_tools/paastaapi/model/instance_status_flink.py +173 -0
- paasta_tools/paastaapi/model/instance_status_kafkacluster.py +173 -0
- paasta_tools/paastaapi/model/instance_status_kubernetes.py +263 -0
- paasta_tools/paastaapi/model/instance_status_kubernetes_autoscaling_status.py +187 -0
- paasta_tools/paastaapi/model/instance_status_kubernetes_v2.py +197 -0
- paasta_tools/paastaapi/model/instance_status_tron.py +204 -0
- paasta_tools/paastaapi/model/instance_tasks.py +182 -0
- paasta_tools/paastaapi/model/integer_and_error.py +173 -0
- paasta_tools/paastaapi/model/kubernetes_container.py +178 -0
- paasta_tools/paastaapi/model/kubernetes_container_v2.py +219 -0
- paasta_tools/paastaapi/model/kubernetes_healthcheck.py +176 -0
- paasta_tools/paastaapi/model/kubernetes_pod.py +201 -0
- paasta_tools/paastaapi/model/kubernetes_pod_event.py +176 -0
- paasta_tools/paastaapi/model/kubernetes_pod_v2.py +213 -0
- paasta_tools/paastaapi/model/kubernetes_replica_set.py +185 -0
- paasta_tools/paastaapi/model/kubernetes_version.py +202 -0
- paasta_tools/paastaapi/model/remote_run_outcome.py +189 -0
- paasta_tools/paastaapi/model/remote_run_start.py +185 -0
- paasta_tools/paastaapi/model/remote_run_stop.py +176 -0
- paasta_tools/paastaapi/model/remote_run_token.py +173 -0
- paasta_tools/paastaapi/model/resource.py +187 -0
- paasta_tools/paastaapi/model/resource_item.py +187 -0
- paasta_tools/paastaapi/model/resource_value.py +176 -0
- paasta_tools/paastaapi/model/smartstack_backend.py +191 -0
- paasta_tools/paastaapi/model/smartstack_location.py +181 -0
- paasta_tools/paastaapi/model/smartstack_status.py +181 -0
- paasta_tools/paastaapi/model/task_tail_lines.py +176 -0
- paasta_tools/paastaapi/model_utils.py +1879 -0
- paasta_tools/paastaapi/models/__init__.py +62 -0
- paasta_tools/paastaapi/rest.py +287 -0
- paasta_tools/prune_completed_pods.py +220 -0
- paasta_tools/puppet_service_tools.py +59 -0
- paasta_tools/py.typed +1 -0
- paasta_tools/remote_git.py +127 -0
- paasta_tools/run-paasta-api-in-dev-mode.py +57 -0
- paasta_tools/run-paasta-api-playground.py +51 -0
- paasta_tools/secret_providers/__init__.py +66 -0
- paasta_tools/secret_providers/vault.py +214 -0
- paasta_tools/secret_tools.py +277 -0
- paasta_tools/setup_istio_mesh.py +353 -0
- paasta_tools/setup_kubernetes_cr.py +412 -0
- paasta_tools/setup_kubernetes_crd.py +138 -0
- paasta_tools/setup_kubernetes_internal_crd.py +154 -0
- paasta_tools/setup_kubernetes_job.py +353 -0
- paasta_tools/setup_prometheus_adapter_config.py +1028 -0
- paasta_tools/setup_tron_namespace.py +248 -0
- paasta_tools/slack.py +75 -0
- paasta_tools/smartstack_tools.py +676 -0
- paasta_tools/spark_tools.py +283 -0
- paasta_tools/synapse_srv_namespaces_fact.py +42 -0
- paasta_tools/tron/__init__.py +0 -0
- paasta_tools/tron/client.py +158 -0
- paasta_tools/tron/tron_command_context.py +194 -0
- paasta_tools/tron/tron_timeutils.py +101 -0
- paasta_tools/tron_tools.py +1448 -0
- paasta_tools/utils.py +4307 -0
- paasta_tools/yaml_tools.py +44 -0
- paasta_tools-1.21.3.data/scripts/apply_external_resources.py +79 -0
- paasta_tools-1.21.3.data/scripts/bounce_log_latency_parser.py +68 -0
- paasta_tools-1.21.3.data/scripts/check_autoscaler_max_instances.py +212 -0
- paasta_tools-1.21.3.data/scripts/check_cassandracluster_services_replication.py +35 -0
- paasta_tools-1.21.3.data/scripts/check_flink_services_health.py +203 -0
- paasta_tools-1.21.3.data/scripts/check_kubernetes_api.py +57 -0
- paasta_tools-1.21.3.data/scripts/check_kubernetes_services_replication.py +141 -0
- paasta_tools-1.21.3.data/scripts/check_manual_oapi_changes.sh +24 -0
- paasta_tools-1.21.3.data/scripts/check_oom_events.py +244 -0
- paasta_tools-1.21.3.data/scripts/check_orphans.py +306 -0
- paasta_tools-1.21.3.data/scripts/check_spark_jobs.py +234 -0
- paasta_tools-1.21.3.data/scripts/cleanup_kubernetes_cr.py +138 -0
- paasta_tools-1.21.3.data/scripts/cleanup_kubernetes_crd.py +145 -0
- paasta_tools-1.21.3.data/scripts/cleanup_kubernetes_jobs.py +344 -0
- paasta_tools-1.21.3.data/scripts/create_dynamodb_table.py +35 -0
- paasta_tools-1.21.3.data/scripts/create_paasta_playground.py +105 -0
- paasta_tools-1.21.3.data/scripts/delete_kubernetes_deployments.py +89 -0
- paasta_tools-1.21.3.data/scripts/emit_allocated_cpu_metrics.py +50 -0
- paasta_tools-1.21.3.data/scripts/generate_all_deployments +9 -0
- paasta_tools-1.21.3.data/scripts/generate_authenticating_services.py +94 -0
- paasta_tools-1.21.3.data/scripts/generate_deployments_for_service.py +255 -0
- paasta_tools-1.21.3.data/scripts/generate_services_file.py +114 -0
- paasta_tools-1.21.3.data/scripts/generate_services_yaml.py +30 -0
- paasta_tools-1.21.3.data/scripts/get_running_task_allocation.py +346 -0
- paasta_tools-1.21.3.data/scripts/habitat_fixer.py +86 -0
- paasta_tools-1.21.3.data/scripts/ide_helper.py +316 -0
- paasta_tools-1.21.3.data/scripts/is_pod_healthy_in_proxy.py +139 -0
- paasta_tools-1.21.3.data/scripts/is_pod_healthy_in_smartstack.py +50 -0
- paasta_tools-1.21.3.data/scripts/kill_bad_containers.py +109 -0
- paasta_tools-1.21.3.data/scripts/kubernetes_remove_evicted_pods.py +164 -0
- paasta_tools-1.21.3.data/scripts/mass-deploy-tag.sh +44 -0
- paasta_tools-1.21.3.data/scripts/mock_patch_checker.py +86 -0
- paasta_tools-1.21.3.data/scripts/paasta_cleanup_remote_run_resources.py +135 -0
- paasta_tools-1.21.3.data/scripts/paasta_cleanup_stale_nodes.py +181 -0
- paasta_tools-1.21.3.data/scripts/paasta_deploy_tron_jobs +3 -0
- paasta_tools-1.21.3.data/scripts/paasta_execute_docker_command.py +123 -0
- paasta_tools-1.21.3.data/scripts/paasta_secrets_sync.py +758 -0
- paasta_tools-1.21.3.data/scripts/paasta_tabcomplete.sh +23 -0
- paasta_tools-1.21.3.data/scripts/paasta_update_soa_memcpu.py +520 -0
- paasta_tools-1.21.3.data/scripts/render_template.py +129 -0
- paasta_tools-1.21.3.data/scripts/rightsizer_soaconfigs_update.py +348 -0
- paasta_tools-1.21.3.data/scripts/service_shard_remove.py +157 -0
- paasta_tools-1.21.3.data/scripts/service_shard_update.py +373 -0
- paasta_tools-1.21.3.data/scripts/setup_istio_mesh.py +353 -0
- paasta_tools-1.21.3.data/scripts/setup_kubernetes_cr.py +412 -0
- paasta_tools-1.21.3.data/scripts/setup_kubernetes_crd.py +138 -0
- paasta_tools-1.21.3.data/scripts/setup_kubernetes_internal_crd.py +154 -0
- paasta_tools-1.21.3.data/scripts/setup_kubernetes_job.py +353 -0
- paasta_tools-1.21.3.data/scripts/setup_prometheus_adapter_config.py +1028 -0
- paasta_tools-1.21.3.data/scripts/shared_ip_check.py +77 -0
- paasta_tools-1.21.3.data/scripts/synapse_srv_namespaces_fact.py +42 -0
- paasta_tools-1.21.3.data/scripts/timeouts_metrics_prom.py +64 -0
- paasta_tools-1.21.3.dist-info/LICENSE +201 -0
- paasta_tools-1.21.3.dist-info/METADATA +74 -0
- paasta_tools-1.21.3.dist-info/RECORD +348 -0
- paasta_tools-1.21.3.dist-info/WHEEL +5 -0
- paasta_tools-1.21.3.dist-info/entry_points.txt +20 -0
- paasta_tools-1.21.3.dist-info/top_level.txt +2 -0
|
@@ -0,0 +1,234 @@
|
|
|
1
|
+
#!/usr/bin/env python
|
|
2
|
+
# flake8: noqa: E402
|
|
3
|
+
""" Meant to be used by mesos-slave instead of the /usr/bin/docker executable
|
|
4
|
+
directly This will parse the CLI arguments intended for docker, extract
|
|
5
|
+
environment variable settings related to the actual node hostname and mesos
|
|
6
|
+
task ID, and use those as an additional --hostname argument when calling the
|
|
7
|
+
underlying docker command.
|
|
8
|
+
|
|
9
|
+
If the environment variables are unspecified, or if --hostname is already
|
|
10
|
+
specified, this does not change any arguments and just directly calls docker
|
|
11
|
+
as-is.
|
|
12
|
+
"""
|
|
13
|
+
import logging
|
|
14
|
+
import os
|
|
15
|
+
import re
|
|
16
|
+
import socket
|
|
17
|
+
import sys
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
if "PATH" not in os.environ:
|
|
21
|
+
# This command is sometimes executed in a sanitized environment
|
|
22
|
+
# which does not have the path, which causes the following imports
|
|
23
|
+
# to fail.
|
|
24
|
+
# To compensate, we set a minimal path to get off the ground.
|
|
25
|
+
os.environ["PATH"] = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
LOCK_DIRECTORY = "/var/lib/paasta/mac-address"
|
|
29
|
+
ENV_MATCH_RE = re.compile(r"^(-\w*e\w*|--env(?P<file>-file)?)(=(?P<arg>\S.*))?$")
|
|
30
|
+
MAX_HOSTNAME_LENGTH = 60
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def parse_env_args(args):
|
|
34
|
+
result = dict(os.environ.items())
|
|
35
|
+
in_env = False
|
|
36
|
+
in_file = False
|
|
37
|
+
for arg in args:
|
|
38
|
+
if not in_env:
|
|
39
|
+
match = ENV_MATCH_RE.match(arg)
|
|
40
|
+
if not match:
|
|
41
|
+
continue
|
|
42
|
+
arg = match.group("arg") or ""
|
|
43
|
+
in_file = bool(match.group("file"))
|
|
44
|
+
if not arg:
|
|
45
|
+
in_env = True
|
|
46
|
+
continue
|
|
47
|
+
|
|
48
|
+
in_env = False
|
|
49
|
+
|
|
50
|
+
if in_file:
|
|
51
|
+
result.update(read_env_file(arg))
|
|
52
|
+
in_file = False
|
|
53
|
+
continue
|
|
54
|
+
|
|
55
|
+
try:
|
|
56
|
+
k, v = arg.split("=", 1)
|
|
57
|
+
except ValueError:
|
|
58
|
+
continue
|
|
59
|
+
|
|
60
|
+
result[k] = v
|
|
61
|
+
|
|
62
|
+
return result
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
def read_env_file(filename):
|
|
66
|
+
# Parse a file where each line is KEY=VALUE
|
|
67
|
+
# return contents in dictionary form
|
|
68
|
+
result = {}
|
|
69
|
+
with open(filename) as f:
|
|
70
|
+
for line in f:
|
|
71
|
+
try:
|
|
72
|
+
k, v = line.split("=", 1)
|
|
73
|
+
except ValueError:
|
|
74
|
+
continue
|
|
75
|
+
result[k] = v.strip()
|
|
76
|
+
return result
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
def can_add_hostname(args):
|
|
80
|
+
# return False if --hostname is already specified or if --network=host
|
|
81
|
+
if is_network_host(args):
|
|
82
|
+
return False
|
|
83
|
+
|
|
84
|
+
for index, arg in enumerate(args):
|
|
85
|
+
|
|
86
|
+
# Check for --hostname and variants
|
|
87
|
+
if arg == "-h":
|
|
88
|
+
return False
|
|
89
|
+
if arg.startswith("--hostname"):
|
|
90
|
+
return False
|
|
91
|
+
if len(arg) > 1 and arg[0] == "-" and arg[1] != "-":
|
|
92
|
+
# several short args
|
|
93
|
+
arg = arg.partition("=")[0]
|
|
94
|
+
if "h" in arg:
|
|
95
|
+
return False
|
|
96
|
+
|
|
97
|
+
return True
|
|
98
|
+
|
|
99
|
+
|
|
100
|
+
def is_network_host(args):
|
|
101
|
+
for index, arg in enumerate(args):
|
|
102
|
+
# Check for --network=host and variants
|
|
103
|
+
if arg in ("--net=host", "--network=host"):
|
|
104
|
+
return True
|
|
105
|
+
try:
|
|
106
|
+
if arg in ("--net", "--network") and args[index + 1] == "host":
|
|
107
|
+
return True
|
|
108
|
+
except IndexError:
|
|
109
|
+
pass
|
|
110
|
+
|
|
111
|
+
return False
|
|
112
|
+
|
|
113
|
+
|
|
114
|
+
def is_run(args):
|
|
115
|
+
try:
|
|
116
|
+
list(args).index("run")
|
|
117
|
+
return True
|
|
118
|
+
except ValueError:
|
|
119
|
+
return False
|
|
120
|
+
|
|
121
|
+
|
|
122
|
+
def can_add_mac_address(args):
|
|
123
|
+
# return False if --mac-address is already specified or if --network=host
|
|
124
|
+
if is_network_host(args) or not is_run(args):
|
|
125
|
+
return False
|
|
126
|
+
|
|
127
|
+
for index, arg in enumerate(args):
|
|
128
|
+
# Check for --mac-address
|
|
129
|
+
if arg.startswith("--mac-address"):
|
|
130
|
+
return False
|
|
131
|
+
|
|
132
|
+
return True
|
|
133
|
+
|
|
134
|
+
|
|
135
|
+
def generate_hostname_task_id(hostname, mesos_task_id):
|
|
136
|
+
task_id = mesos_task_id.rpartition(".")[2]
|
|
137
|
+
|
|
138
|
+
hostname_task_id = hostname + "-" + task_id
|
|
139
|
+
|
|
140
|
+
# hostnames can only contain alphanumerics and dashes and must be no more
|
|
141
|
+
# than 60 characters
|
|
142
|
+
hostname_task_id = re.sub("[^a-zA-Z0-9-]+", "-", hostname_task_id)[
|
|
143
|
+
:MAX_HOSTNAME_LENGTH
|
|
144
|
+
]
|
|
145
|
+
|
|
146
|
+
# hostnames can also not end with dashes as per RFC952
|
|
147
|
+
hostname_task_id = hostname_task_id.rstrip("-")
|
|
148
|
+
|
|
149
|
+
return hostname_task_id
|
|
150
|
+
|
|
151
|
+
|
|
152
|
+
def add_argument(args, argument):
|
|
153
|
+
# Add an argument immediately after 'run' command if it exists
|
|
154
|
+
args = list(args)
|
|
155
|
+
try:
|
|
156
|
+
run_index = args.index("run")
|
|
157
|
+
except ValueError:
|
|
158
|
+
pass
|
|
159
|
+
else:
|
|
160
|
+
args.insert(run_index + 1, argument)
|
|
161
|
+
return args
|
|
162
|
+
|
|
163
|
+
|
|
164
|
+
def arg_collision(new_args, current_args):
|
|
165
|
+
# Returns True if one of the new arguments is already in the
|
|
166
|
+
# current argument list.
|
|
167
|
+
cur_arg_keys = []
|
|
168
|
+
for c in current_args:
|
|
169
|
+
cur_arg_keys.append(c.split("=")[0])
|
|
170
|
+
return bool(set(new_args).intersection(set(cur_arg_keys)))
|
|
171
|
+
|
|
172
|
+
|
|
173
|
+
def add_firewall(argv, service, instance):
|
|
174
|
+
# Delayed import to improve performance when add_firewall is not used
|
|
175
|
+
from paasta_tools.docker_wrapper_imports import DEFAULT_SYNAPSE_SERVICE_DIR
|
|
176
|
+
from paasta_tools.docker_wrapper_imports import firewall_flock
|
|
177
|
+
from paasta_tools.docker_wrapper_imports import prepare_new_container
|
|
178
|
+
from paasta_tools.docker_wrapper_imports import reserve_unique_mac_address
|
|
179
|
+
from paasta_tools.docker_wrapper_imports import DEFAULT_SOA_DIR
|
|
180
|
+
|
|
181
|
+
output = ""
|
|
182
|
+
try:
|
|
183
|
+
mac_address, lockfile = reserve_unique_mac_address(LOCK_DIRECTORY)
|
|
184
|
+
except Exception as e:
|
|
185
|
+
output = f"Unable to add mac address: {e}"
|
|
186
|
+
else:
|
|
187
|
+
argv = add_argument(argv, f"--mac-address={mac_address}")
|
|
188
|
+
try:
|
|
189
|
+
|
|
190
|
+
with firewall_flock():
|
|
191
|
+
prepare_new_container(
|
|
192
|
+
DEFAULT_SOA_DIR,
|
|
193
|
+
DEFAULT_SYNAPSE_SERVICE_DIR,
|
|
194
|
+
service,
|
|
195
|
+
instance,
|
|
196
|
+
mac_address,
|
|
197
|
+
)
|
|
198
|
+
except Exception as e:
|
|
199
|
+
output = f"Unable to add firewall rules: {e}"
|
|
200
|
+
|
|
201
|
+
if output:
|
|
202
|
+
print(output, file=sys.stderr)
|
|
203
|
+
|
|
204
|
+
return argv
|
|
205
|
+
|
|
206
|
+
|
|
207
|
+
def main(argv=None):
|
|
208
|
+
argv = argv if argv is not None else sys.argv
|
|
209
|
+
|
|
210
|
+
env_args = parse_env_args(argv)
|
|
211
|
+
|
|
212
|
+
# Marathon sets MESOS_TASK_ID
|
|
213
|
+
mesos_task_id = env_args.get("MESOS_TASK_ID")
|
|
214
|
+
|
|
215
|
+
fqdn = socket.getfqdn()
|
|
216
|
+
hostname = fqdn.partition(".")[0]
|
|
217
|
+
if mesos_task_id and can_add_hostname(argv):
|
|
218
|
+
argv = add_argument(argv, f"-e=PAASTA_HOST={fqdn}")
|
|
219
|
+
hostname_task_id = generate_hostname_task_id(hostname, mesos_task_id)
|
|
220
|
+
argv = add_argument(argv, f"--hostname={hostname_task_id }")
|
|
221
|
+
elif can_add_hostname(argv):
|
|
222
|
+
argv = add_argument(argv, f"-e=PAASTA_HOST={fqdn}")
|
|
223
|
+
argv = add_argument(argv, f"--hostname={hostname}")
|
|
224
|
+
|
|
225
|
+
paasta_firewall = env_args.get("PAASTA_FIREWALL")
|
|
226
|
+
service = env_args.get("PAASTA_SERVICE")
|
|
227
|
+
instance = env_args.get("PAASTA_INSTANCE")
|
|
228
|
+
if paasta_firewall and service and instance and can_add_mac_address(argv):
|
|
229
|
+
try:
|
|
230
|
+
argv = add_firewall(argv, service, instance)
|
|
231
|
+
except Exception as e:
|
|
232
|
+
print(f"Unhandled exception in add_firewall: {e}", file=sys.stderr)
|
|
233
|
+
|
|
234
|
+
os.execlp("docker", "docker", *argv[1:])
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
#!/usr/bin/env python
|
|
2
|
+
# flake8: noqa: E402
|
|
3
|
+
""" Delayed imports for paasta_tools.docker_wrapper, meant to improve the performance
|
|
4
|
+
of docker_wrapper for execution paths when we do not use add_firewall. It turns out
|
|
5
|
+
that the imports needed add a fair amount of overhead to "docker inspect" command
|
|
6
|
+
which does not use add_firewall, and Mesos executes this command quite a lot. We can
|
|
7
|
+
make it faster by moving all the paasta_tools imports to separate file, i.e. here
|
|
8
|
+
"""
|
|
9
|
+
from paasta_tools.firewall import DEFAULT_SYNAPSE_SERVICE_DIR
|
|
10
|
+
from paasta_tools.firewall import firewall_flock
|
|
11
|
+
from paasta_tools.firewall import prepare_new_container
|
|
12
|
+
from paasta_tools.mac_address import reserve_unique_mac_address
|
|
13
|
+
from paasta_tools.utils import DEFAULT_SOA_DIR
|
|
@@ -0,0 +1,351 @@
|
|
|
1
|
+
# Copyright 2015-2016 Yelp Inc.
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
import asyncio
|
|
15
|
+
import time
|
|
16
|
+
from typing import Any
|
|
17
|
+
from typing import Awaitable
|
|
18
|
+
from typing import Callable
|
|
19
|
+
from typing import Dict
|
|
20
|
+
from typing import List
|
|
21
|
+
from typing import Optional
|
|
22
|
+
from typing import Sequence
|
|
23
|
+
from typing import Set
|
|
24
|
+
from typing import Type
|
|
25
|
+
from typing import TypeVar
|
|
26
|
+
|
|
27
|
+
import aiohttp
|
|
28
|
+
from mypy_extensions import TypedDict
|
|
29
|
+
|
|
30
|
+
from paasta_tools.hacheck import get_spool
|
|
31
|
+
from paasta_tools.hacheck import post_spool
|
|
32
|
+
from paasta_tools.utils import get_user_agent
|
|
33
|
+
|
|
34
|
+
_drain_methods: Dict[str, Type["DrainMethod"]] = {}
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
_RegisterDrainMethod_T = TypeVar("_RegisterDrainMethod_T", bound=Type["DrainMethod"])
|
|
38
|
+
T = TypeVar("T")
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
def register_drain_method(
|
|
42
|
+
name: str,
|
|
43
|
+
) -> Callable[[_RegisterDrainMethod_T], _RegisterDrainMethod_T]:
|
|
44
|
+
"""Returns a decorator that registers a DrainMethod subclass at a given name
|
|
45
|
+
so get_drain_method/list_drain_methods can find it."""
|
|
46
|
+
|
|
47
|
+
def outer(drain_method: _RegisterDrainMethod_T) -> _RegisterDrainMethod_T:
|
|
48
|
+
_drain_methods[name] = drain_method
|
|
49
|
+
return drain_method
|
|
50
|
+
|
|
51
|
+
return outer
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
def get_drain_method(
|
|
55
|
+
name: str,
|
|
56
|
+
service: str,
|
|
57
|
+
instance: str,
|
|
58
|
+
registrations: List[str],
|
|
59
|
+
drain_method_params: Optional[Dict] = None,
|
|
60
|
+
) -> "DrainMethod":
|
|
61
|
+
return _drain_methods[name](
|
|
62
|
+
service, instance, registrations, **(drain_method_params or {})
|
|
63
|
+
)
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
def list_drain_methods() -> List[str]:
|
|
67
|
+
return sorted(_drain_methods.keys())
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
DrainTask = TypeVar("DrainTask", bound=Any)
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
class DrainMethod:
|
|
74
|
+
"""A drain method is a way of stopping new traffic to tasks without killing them. For example, you might take a task
|
|
75
|
+
out of a load balancer by causing its healthchecks to fail.
|
|
76
|
+
|
|
77
|
+
A drain method must have the following methods:
|
|
78
|
+
- drain(task): Begin draining traffic from a task. This should be idempotent.
|
|
79
|
+
- stop_draining(task): Stop draining traffic from a task. This should be idempotent.
|
|
80
|
+
- is_draining(task): Whether a task has already been marked as downed. Note that this state should be stored out of
|
|
81
|
+
process, because a bounce may take multiple runs of setup_marathon_job to complete.
|
|
82
|
+
- is_safe_to_kill(task): Return True if this task is safe to kill, False otherwise.
|
|
83
|
+
|
|
84
|
+
When implementing a drain method, be sure to decorate with @register_drain_method(name).
|
|
85
|
+
"""
|
|
86
|
+
|
|
87
|
+
def __init__(
|
|
88
|
+
self, service: str, instance: str, registrations: List[str], **kwargs: Dict
|
|
89
|
+
) -> None:
|
|
90
|
+
self.service = service
|
|
91
|
+
self.instance = instance
|
|
92
|
+
self.registrations = registrations
|
|
93
|
+
|
|
94
|
+
async def drain(self, task: DrainTask) -> None:
|
|
95
|
+
"""Make a task stop receiving new traffic."""
|
|
96
|
+
raise NotImplementedError()
|
|
97
|
+
|
|
98
|
+
async def stop_draining(self, task: DrainTask) -> None:
|
|
99
|
+
"""Make a task that has previously been downed start receiving traffic again."""
|
|
100
|
+
raise NotImplementedError()
|
|
101
|
+
|
|
102
|
+
async def is_draining(self, task: DrainTask) -> bool:
|
|
103
|
+
"""Return whether a task is being drained."""
|
|
104
|
+
raise NotImplementedError()
|
|
105
|
+
|
|
106
|
+
async def is_safe_to_kill(self, task: DrainTask) -> bool:
|
|
107
|
+
"""Return True if a task is drained and ready to be killed, or False if we should wait."""
|
|
108
|
+
raise NotImplementedError()
|
|
109
|
+
|
|
110
|
+
|
|
111
|
+
@register_drain_method("noop")
|
|
112
|
+
class NoopDrainMethod(DrainMethod):
|
|
113
|
+
"""This drain policy does nothing and assumes every task is safe to kill."""
|
|
114
|
+
|
|
115
|
+
async def drain(self, task: DrainTask) -> None:
|
|
116
|
+
pass
|
|
117
|
+
|
|
118
|
+
async def stop_draining(self, task: DrainTask) -> None:
|
|
119
|
+
pass
|
|
120
|
+
|
|
121
|
+
async def is_draining(self, task: DrainTask) -> bool:
|
|
122
|
+
return False
|
|
123
|
+
|
|
124
|
+
async def is_safe_to_kill(self, task: DrainTask) -> bool:
|
|
125
|
+
return True
|
|
126
|
+
|
|
127
|
+
|
|
128
|
+
@register_drain_method("test")
|
|
129
|
+
class TestDrainMethod(DrainMethod):
|
|
130
|
+
"""This drain policy is meant for integration testing. Do not use."""
|
|
131
|
+
|
|
132
|
+
# These are variables on the class for ease of use in testing.
|
|
133
|
+
downed_task_ids: Set[str] = set()
|
|
134
|
+
safe_to_kill_task_ids: Set[str] = set()
|
|
135
|
+
|
|
136
|
+
async def drain(self, task: DrainTask) -> None:
|
|
137
|
+
if task.id not in self.safe_to_kill_task_ids:
|
|
138
|
+
self.downed_task_ids.add(task.id)
|
|
139
|
+
|
|
140
|
+
async def stop_draining(self, task: DrainTask) -> None:
|
|
141
|
+
self.downed_task_ids -= {task.id}
|
|
142
|
+
self.safe_to_kill_task_ids -= {task.id}
|
|
143
|
+
|
|
144
|
+
async def is_draining(self, task: DrainTask) -> bool:
|
|
145
|
+
return task.id in (self.downed_task_ids | self.safe_to_kill_task_ids)
|
|
146
|
+
|
|
147
|
+
async def is_safe_to_kill(self, task: DrainTask) -> bool:
|
|
148
|
+
return task.id in self.safe_to_kill_task_ids
|
|
149
|
+
|
|
150
|
+
@classmethod
|
|
151
|
+
def mark_arbitrary_task_as_safe_to_kill(cls) -> None:
|
|
152
|
+
cls.safe_to_kill_task_ids.add(cls.downed_task_ids.pop())
|
|
153
|
+
|
|
154
|
+
|
|
155
|
+
@register_drain_method("crashy_drain")
|
|
156
|
+
class CrashyDrainDrainMethod(NoopDrainMethod):
|
|
157
|
+
async def drain(self, task: DrainTask) -> None:
|
|
158
|
+
raise Exception("Intentionally crashing for testing purposes")
|
|
159
|
+
|
|
160
|
+
async def is_safe_to_kill(self, task: DrainTask) -> bool:
|
|
161
|
+
raise Exception("Intentionally crashing for testing purposes")
|
|
162
|
+
|
|
163
|
+
async def stop_draining(self, task: DrainTask) -> None:
|
|
164
|
+
raise Exception("Intentionally crashing for testing purposes")
|
|
165
|
+
|
|
166
|
+
async def is_draining(self, task: DrainTask) -> bool:
|
|
167
|
+
raise Exception("Intentionally crashing for testing purposes")
|
|
168
|
+
|
|
169
|
+
|
|
170
|
+
@register_drain_method("hacheck")
|
|
171
|
+
class HacheckDrainMethod(DrainMethod):
|
|
172
|
+
"""This drain policy issues a POST to hacheck's /spool/{service}/{port}/status endpoint to cause healthchecks to
|
|
173
|
+
fail. It considers tasks safe to kill if they've been down in hacheck for more than a specified delay."""
|
|
174
|
+
|
|
175
|
+
def __init__(
|
|
176
|
+
self,
|
|
177
|
+
service: str,
|
|
178
|
+
instance: str,
|
|
179
|
+
registrations: List[str],
|
|
180
|
+
delay: float = 240,
|
|
181
|
+
hacheck_port: int = 6666,
|
|
182
|
+
expiration: float = 0,
|
|
183
|
+
**kwargs: Dict,
|
|
184
|
+
) -> None:
|
|
185
|
+
super().__init__(service, instance, registrations)
|
|
186
|
+
self.delay = float(delay)
|
|
187
|
+
self.hacheck_port = hacheck_port
|
|
188
|
+
self.expiration = float(expiration) or float(delay) * 10
|
|
189
|
+
|
|
190
|
+
def spool_urls(self, task: DrainTask) -> List[str]:
|
|
191
|
+
return [
|
|
192
|
+
"http://%(task_host)s:%(hacheck_port)d/spool/%(registration)s/%(task_port)d/status"
|
|
193
|
+
% {
|
|
194
|
+
"task_host": task.host,
|
|
195
|
+
"task_port": task.ports[0],
|
|
196
|
+
"hacheck_port": self.hacheck_port,
|
|
197
|
+
"registration": registration,
|
|
198
|
+
}
|
|
199
|
+
for registration in self.registrations
|
|
200
|
+
]
|
|
201
|
+
|
|
202
|
+
async def for_each_registration(
|
|
203
|
+
self, task: DrainTask, func: Callable[..., Awaitable[T]]
|
|
204
|
+
) -> Sequence[T]:
|
|
205
|
+
if task.ports == []:
|
|
206
|
+
return None
|
|
207
|
+
futures = [func(url) for url in self.spool_urls(task)]
|
|
208
|
+
return await asyncio.gather(*futures)
|
|
209
|
+
|
|
210
|
+
async def drain(self, task: DrainTask) -> None:
|
|
211
|
+
await self.for_each_registration(task, self.down)
|
|
212
|
+
|
|
213
|
+
async def up(self, url: str) -> None:
|
|
214
|
+
await post_spool(url=url, status="up", data={"status": "up"})
|
|
215
|
+
|
|
216
|
+
async def down(self, url: str) -> None:
|
|
217
|
+
await post_spool(
|
|
218
|
+
url=url,
|
|
219
|
+
status="down",
|
|
220
|
+
data={
|
|
221
|
+
"status": "down",
|
|
222
|
+
"expiration": str(time.time() + self.expiration),
|
|
223
|
+
"reason": "Drained by Paasta",
|
|
224
|
+
},
|
|
225
|
+
)
|
|
226
|
+
|
|
227
|
+
async def stop_draining(self, task: DrainTask) -> None:
|
|
228
|
+
await self.for_each_registration(task, self.up)
|
|
229
|
+
|
|
230
|
+
async def is_draining(self, task: DrainTask) -> bool:
|
|
231
|
+
results = await self.for_each_registration(task, get_spool)
|
|
232
|
+
return not all([res is None or res["state"] == "up" for res in results])
|
|
233
|
+
|
|
234
|
+
async def is_safe_to_kill(self, task: DrainTask) -> bool:
|
|
235
|
+
results = await self.for_each_registration(task, lambda url: get_spool(url))
|
|
236
|
+
if all([res is None or res["state"] == "up" for res in results]):
|
|
237
|
+
return False
|
|
238
|
+
else:
|
|
239
|
+
return all(
|
|
240
|
+
[res.get("since", 0) < (time.time() - self.delay) for res in results]
|
|
241
|
+
)
|
|
242
|
+
|
|
243
|
+
|
|
244
|
+
class StatusCodeNotAcceptableError(Exception):
|
|
245
|
+
pass
|
|
246
|
+
|
|
247
|
+
|
|
248
|
+
UrlSpec = TypedDict(
|
|
249
|
+
"UrlSpec", {"url_format": str, "method": str, "success_codes": str}, total=False
|
|
250
|
+
)
|
|
251
|
+
|
|
252
|
+
|
|
253
|
+
@register_drain_method("http")
|
|
254
|
+
class HTTPDrainMethod(DrainMethod):
|
|
255
|
+
"""This drain policy issues arbitrary HTTP calls to arbitrary URLs specified by the parameters. The URLs are
|
|
256
|
+
specified as format strings, and will have variables such as {host}, {port}, etc. filled in."""
|
|
257
|
+
|
|
258
|
+
def __init__(
|
|
259
|
+
self,
|
|
260
|
+
service: str,
|
|
261
|
+
instance: str,
|
|
262
|
+
registrations: List[str],
|
|
263
|
+
drain: UrlSpec,
|
|
264
|
+
stop_draining: UrlSpec,
|
|
265
|
+
is_draining: UrlSpec,
|
|
266
|
+
is_safe_to_kill: UrlSpec,
|
|
267
|
+
) -> None:
|
|
268
|
+
super().__init__(service, instance, registrations)
|
|
269
|
+
self.drain_url_spec = drain
|
|
270
|
+
self.stop_draining_url_spec = stop_draining
|
|
271
|
+
self.is_draining_url_spec = is_draining
|
|
272
|
+
self.is_safe_to_kill_url_spec = is_safe_to_kill
|
|
273
|
+
|
|
274
|
+
def get_format_params(self, task: DrainTask) -> List[Dict[str, Any]]:
|
|
275
|
+
return [
|
|
276
|
+
{
|
|
277
|
+
"host": task.host,
|
|
278
|
+
"port": task.ports[0],
|
|
279
|
+
"service": self.service,
|
|
280
|
+
"instance": self.instance,
|
|
281
|
+
"nerve_ns": nerve_ns,
|
|
282
|
+
}
|
|
283
|
+
for nerve_ns in self.registrations
|
|
284
|
+
]
|
|
285
|
+
|
|
286
|
+
def format_url(self, url_format: str, format_params: Dict[str, Any]) -> str:
|
|
287
|
+
return url_format.format(**format_params)
|
|
288
|
+
|
|
289
|
+
def parse_success_codes(self, success_codes_str: str) -> Set[int]:
|
|
290
|
+
"""Expand a string like 200-399,407-409,500 to a set containing all the integers in between."""
|
|
291
|
+
acceptable_response_codes: Set[int] = set()
|
|
292
|
+
for series_str in str(success_codes_str).split(","):
|
|
293
|
+
if "-" in series_str:
|
|
294
|
+
start, end = series_str.split("-")
|
|
295
|
+
acceptable_response_codes.update(range(int(start), int(end) + 1))
|
|
296
|
+
else:
|
|
297
|
+
acceptable_response_codes.add(int(series_str))
|
|
298
|
+
return acceptable_response_codes
|
|
299
|
+
|
|
300
|
+
def check_response_code(self, status_code: int, success_codes_str: str) -> bool:
|
|
301
|
+
acceptable_response_codes = self.parse_success_codes(success_codes_str)
|
|
302
|
+
return status_code in acceptable_response_codes
|
|
303
|
+
|
|
304
|
+
async def issue_request(self, url_spec: UrlSpec, task: DrainTask) -> None:
|
|
305
|
+
"""Issue a request to the URL specified by url_spec regarding the task given."""
|
|
306
|
+
format_params = self.get_format_params(task)
|
|
307
|
+
urls = [
|
|
308
|
+
self.format_url(url_spec["url_format"], param) for param in format_params
|
|
309
|
+
]
|
|
310
|
+
method = url_spec.get("method", "GET").upper()
|
|
311
|
+
|
|
312
|
+
async with aiohttp.ClientSession() as session:
|
|
313
|
+
reqs = [
|
|
314
|
+
session.request(
|
|
315
|
+
method=method,
|
|
316
|
+
url=url,
|
|
317
|
+
headers={"User-Agent": get_user_agent()},
|
|
318
|
+
timeout=15,
|
|
319
|
+
)
|
|
320
|
+
for url in urls
|
|
321
|
+
]
|
|
322
|
+
res = await asyncio.gather(*reqs)
|
|
323
|
+
for response in res:
|
|
324
|
+
if not self.check_response_code(
|
|
325
|
+
response.status, url_spec["success_codes"]
|
|
326
|
+
):
|
|
327
|
+
raise StatusCodeNotAcceptableError(
|
|
328
|
+
f"Unacceptable status code {response.status} not in {url_spec['success_codes']} when hitting {response.url}"
|
|
329
|
+
)
|
|
330
|
+
|
|
331
|
+
async def drain(self, task: DrainTask) -> None:
|
|
332
|
+
return await self.issue_request(self.drain_url_spec, task)
|
|
333
|
+
|
|
334
|
+
async def stop_draining(self, task: DrainTask) -> None:
|
|
335
|
+
return await self.issue_request(self.stop_draining_url_spec, task)
|
|
336
|
+
|
|
337
|
+
async def is_draining(self, task: DrainTask) -> bool:
|
|
338
|
+
try:
|
|
339
|
+
await self.issue_request(self.is_draining_url_spec, task)
|
|
340
|
+
except StatusCodeNotAcceptableError:
|
|
341
|
+
return False
|
|
342
|
+
else:
|
|
343
|
+
return True
|
|
344
|
+
|
|
345
|
+
async def is_safe_to_kill(self, task: DrainTask) -> bool:
|
|
346
|
+
try:
|
|
347
|
+
await self.issue_request(self.is_safe_to_kill_url_spec, task)
|
|
348
|
+
except StatusCodeNotAcceptableError:
|
|
349
|
+
return False
|
|
350
|
+
else:
|
|
351
|
+
return True
|
|
@@ -0,0 +1,71 @@
|
|
|
1
|
+
#!/usr/bin/env python
|
|
2
|
+
# Copyright 2015-2019 Yelp Inc.
|
|
3
|
+
#
|
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
+
# you may not use this file except in compliance with the License.
|
|
6
|
+
# You may obtain a copy of the License at
|
|
7
|
+
#
|
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
+
#
|
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
+
# See the License for the specific language governing permissions and
|
|
14
|
+
# limitations under the License.
|
|
15
|
+
"""
|
|
16
|
+
Usage: ./paasta_dump_locally_running_services.py [options]
|
|
17
|
+
|
|
18
|
+
Outputs a JSON-encoded list of services that are running on this host along
|
|
19
|
+
with the host port that each service is listening on.
|
|
20
|
+
|
|
21
|
+
Command line options:
|
|
22
|
+
|
|
23
|
+
- -d <SOA_DIR>, --soa-dir <SOA_DIR>: Specify a SOA config dir to read from
|
|
24
|
+
"""
|
|
25
|
+
import argparse
|
|
26
|
+
import json
|
|
27
|
+
import sys
|
|
28
|
+
from typing import List
|
|
29
|
+
from typing import Optional
|
|
30
|
+
from typing import Sequence
|
|
31
|
+
from typing import Tuple
|
|
32
|
+
|
|
33
|
+
from paasta_tools.kubernetes_tools import get_kubernetes_services_running_here_for_nerve
|
|
34
|
+
from paasta_tools.long_running_service_tools import ServiceNamespaceConfig
|
|
35
|
+
from paasta_tools.puppet_service_tools import get_puppet_services_running_here_for_nerve
|
|
36
|
+
from paasta_tools.utils import DEFAULT_SOA_DIR
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def parse_args(argv: Optional[Sequence[str]]) -> argparse.Namespace:
|
|
40
|
+
parser = argparse.ArgumentParser(
|
|
41
|
+
description="Dumps information about locally running services."
|
|
42
|
+
)
|
|
43
|
+
parser.add_argument(
|
|
44
|
+
"-d",
|
|
45
|
+
"--soa-dir",
|
|
46
|
+
dest="soa_dir",
|
|
47
|
+
metavar="SOA_DIR",
|
|
48
|
+
default=DEFAULT_SOA_DIR,
|
|
49
|
+
help="define a different soa config directory",
|
|
50
|
+
)
|
|
51
|
+
return parser.parse_args(argv)
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
def main(argv: Optional[Sequence[str]] = None) -> None:
|
|
55
|
+
args = parse_args(argv)
|
|
56
|
+
soa_dir = args.soa_dir
|
|
57
|
+
|
|
58
|
+
service_dump: List[
|
|
59
|
+
Tuple[str, ServiceNamespaceConfig]
|
|
60
|
+
] = get_puppet_services_running_here_for_nerve(
|
|
61
|
+
soa_dir=soa_dir
|
|
62
|
+
) + get_kubernetes_services_running_here_for_nerve(
|
|
63
|
+
cluster=None, soa_dir=soa_dir
|
|
64
|
+
)
|
|
65
|
+
|
|
66
|
+
print(json.dumps(service_dump))
|
|
67
|
+
sys.exit(0)
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
if __name__ == "__main__":
|
|
71
|
+
main()
|