paasta-tools 1.26.1__py3-none-any.whl → 1.28.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (82) hide show
  1. paasta_tools/__init__.py +1 -1
  2. paasta_tools/api/api_docs/swagger.json +16 -3
  3. paasta_tools/api/tweens/auth.py +2 -1
  4. paasta_tools/api/views/autoscaler.py +60 -23
  5. paasta_tools/cli/cmds/autoscale.py +38 -17
  6. paasta_tools/cli/cmds/local_run.py +2 -2
  7. paasta_tools/contrib/bounce_log_latency_parser.py +1 -1
  8. paasta_tools/contrib/ide_helper.py +14 -14
  9. paasta_tools/contrib/mock_patch_checker.py +1 -1
  10. paasta_tools/contrib/render_template.py +1 -1
  11. paasta_tools/contrib/shared_ip_check.py +1 -1
  12. paasta_tools/instance/kubernetes.py +10 -10
  13. paasta_tools/kubernetes/application/controller_wrappers.py +4 -1
  14. paasta_tools/kubernetes_tools.py +7 -3
  15. paasta_tools/mesos/master.py +1 -1
  16. paasta_tools/metrics/metastatus_lib.py +1 -1
  17. paasta_tools/paastaapi/model/autoscaling_override.py +7 -6
  18. paasta_tools/paastaapi/model/inline_response202.py +5 -2
  19. paasta_tools/remote_git.py +2 -2
  20. paasta_tools/run-paasta-api-in-dev-mode.py +2 -2
  21. paasta_tools/run-paasta-api-playground.py +2 -2
  22. paasta_tools/setup_kubernetes_job.py +10 -4
  23. paasta_tools/yaml_tools.py +1 -1
  24. {paasta_tools-1.26.1.data → paasta_tools-1.28.0.data}/scripts/ide_helper.py +14 -14
  25. {paasta_tools-1.26.1.data → paasta_tools-1.28.0.data}/scripts/setup_kubernetes_job.py +10 -4
  26. paasta_tools-1.28.0.dist-info/METADATA +79 -0
  27. {paasta_tools-1.26.1.dist-info → paasta_tools-1.28.0.dist-info}/RECORD +81 -81
  28. {paasta_tools-1.26.1.dist-info → paasta_tools-1.28.0.dist-info}/WHEEL +1 -1
  29. paasta_tools-1.26.1.dist-info/METADATA +0 -75
  30. {paasta_tools-1.26.1.data → paasta_tools-1.28.0.data}/scripts/apply_external_resources.py +0 -0
  31. {paasta_tools-1.26.1.data → paasta_tools-1.28.0.data}/scripts/bounce_log_latency_parser.py +0 -0
  32. {paasta_tools-1.26.1.data → paasta_tools-1.28.0.data}/scripts/check_autoscaler_max_instances.py +0 -0
  33. {paasta_tools-1.26.1.data → paasta_tools-1.28.0.data}/scripts/check_cassandracluster_services_replication.py +0 -0
  34. {paasta_tools-1.26.1.data → paasta_tools-1.28.0.data}/scripts/check_flink_services_health.py +0 -0
  35. {paasta_tools-1.26.1.data → paasta_tools-1.28.0.data}/scripts/check_kubernetes_api.py +0 -0
  36. {paasta_tools-1.26.1.data → paasta_tools-1.28.0.data}/scripts/check_kubernetes_services_replication.py +0 -0
  37. {paasta_tools-1.26.1.data → paasta_tools-1.28.0.data}/scripts/check_manual_oapi_changes.sh +0 -0
  38. {paasta_tools-1.26.1.data → paasta_tools-1.28.0.data}/scripts/check_oom_events.py +0 -0
  39. {paasta_tools-1.26.1.data → paasta_tools-1.28.0.data}/scripts/check_orphans.py +0 -0
  40. {paasta_tools-1.26.1.data → paasta_tools-1.28.0.data}/scripts/check_spark_jobs.py +0 -0
  41. {paasta_tools-1.26.1.data → paasta_tools-1.28.0.data}/scripts/cleanup_kubernetes_cr.py +0 -0
  42. {paasta_tools-1.26.1.data → paasta_tools-1.28.0.data}/scripts/cleanup_kubernetes_crd.py +0 -0
  43. {paasta_tools-1.26.1.data → paasta_tools-1.28.0.data}/scripts/cleanup_kubernetes_jobs.py +0 -0
  44. {paasta_tools-1.26.1.data → paasta_tools-1.28.0.data}/scripts/create_dynamodb_table.py +0 -0
  45. {paasta_tools-1.26.1.data → paasta_tools-1.28.0.data}/scripts/create_paasta_playground.py +0 -0
  46. {paasta_tools-1.26.1.data → paasta_tools-1.28.0.data}/scripts/delete_kubernetes_deployments.py +0 -0
  47. {paasta_tools-1.26.1.data → paasta_tools-1.28.0.data}/scripts/emit_allocated_cpu_metrics.py +0 -0
  48. {paasta_tools-1.26.1.data → paasta_tools-1.28.0.data}/scripts/generate_all_deployments +0 -0
  49. {paasta_tools-1.26.1.data → paasta_tools-1.28.0.data}/scripts/generate_authenticating_services.py +0 -0
  50. {paasta_tools-1.26.1.data → paasta_tools-1.28.0.data}/scripts/generate_deployments_for_service.py +0 -0
  51. {paasta_tools-1.26.1.data → paasta_tools-1.28.0.data}/scripts/generate_services_file.py +0 -0
  52. {paasta_tools-1.26.1.data → paasta_tools-1.28.0.data}/scripts/generate_services_yaml.py +0 -0
  53. {paasta_tools-1.26.1.data → paasta_tools-1.28.0.data}/scripts/get_running_task_allocation.py +0 -0
  54. {paasta_tools-1.26.1.data → paasta_tools-1.28.0.data}/scripts/habitat_fixer.py +0 -0
  55. {paasta_tools-1.26.1.data → paasta_tools-1.28.0.data}/scripts/is_pod_healthy_in_proxy.py +0 -0
  56. {paasta_tools-1.26.1.data → paasta_tools-1.28.0.data}/scripts/is_pod_healthy_in_smartstack.py +0 -0
  57. {paasta_tools-1.26.1.data → paasta_tools-1.28.0.data}/scripts/kill_bad_containers.py +0 -0
  58. {paasta_tools-1.26.1.data → paasta_tools-1.28.0.data}/scripts/kubernetes_remove_evicted_pods.py +0 -0
  59. {paasta_tools-1.26.1.data → paasta_tools-1.28.0.data}/scripts/mass-deploy-tag.sh +0 -0
  60. {paasta_tools-1.26.1.data → paasta_tools-1.28.0.data}/scripts/mock_patch_checker.py +0 -0
  61. {paasta_tools-1.26.1.data → paasta_tools-1.28.0.data}/scripts/paasta_cleanup_remote_run_resources.py +0 -0
  62. {paasta_tools-1.26.1.data → paasta_tools-1.28.0.data}/scripts/paasta_cleanup_stale_nodes.py +0 -0
  63. {paasta_tools-1.26.1.data → paasta_tools-1.28.0.data}/scripts/paasta_deploy_tron_jobs +0 -0
  64. {paasta_tools-1.26.1.data → paasta_tools-1.28.0.data}/scripts/paasta_execute_docker_command.py +0 -0
  65. {paasta_tools-1.26.1.data → paasta_tools-1.28.0.data}/scripts/paasta_secrets_sync.py +0 -0
  66. {paasta_tools-1.26.1.data → paasta_tools-1.28.0.data}/scripts/paasta_tabcomplete.sh +0 -0
  67. {paasta_tools-1.26.1.data → paasta_tools-1.28.0.data}/scripts/paasta_update_soa_memcpu.py +0 -0
  68. {paasta_tools-1.26.1.data → paasta_tools-1.28.0.data}/scripts/render_template.py +0 -0
  69. {paasta_tools-1.26.1.data → paasta_tools-1.28.0.data}/scripts/rightsizer_soaconfigs_update.py +0 -0
  70. {paasta_tools-1.26.1.data → paasta_tools-1.28.0.data}/scripts/service_shard_remove.py +0 -0
  71. {paasta_tools-1.26.1.data → paasta_tools-1.28.0.data}/scripts/service_shard_update.py +0 -0
  72. {paasta_tools-1.26.1.data → paasta_tools-1.28.0.data}/scripts/setup_istio_mesh.py +0 -0
  73. {paasta_tools-1.26.1.data → paasta_tools-1.28.0.data}/scripts/setup_kubernetes_cr.py +0 -0
  74. {paasta_tools-1.26.1.data → paasta_tools-1.28.0.data}/scripts/setup_kubernetes_crd.py +0 -0
  75. {paasta_tools-1.26.1.data → paasta_tools-1.28.0.data}/scripts/setup_kubernetes_internal_crd.py +0 -0
  76. {paasta_tools-1.26.1.data → paasta_tools-1.28.0.data}/scripts/setup_prometheus_adapter_config.py +0 -0
  77. {paasta_tools-1.26.1.data → paasta_tools-1.28.0.data}/scripts/shared_ip_check.py +0 -0
  78. {paasta_tools-1.26.1.data → paasta_tools-1.28.0.data}/scripts/synapse_srv_namespaces_fact.py +0 -0
  79. {paasta_tools-1.26.1.data → paasta_tools-1.28.0.data}/scripts/timeouts_metrics_prom.py +0 -0
  80. {paasta_tools-1.26.1.dist-info → paasta_tools-1.28.0.dist-info}/entry_points.txt +0 -0
  81. {paasta_tools-1.26.1.dist-info → paasta_tools-1.28.0.dist-info/licenses}/LICENSE +0 -0
  82. {paasta_tools-1.26.1.dist-info → paasta_tools-1.28.0.dist-info}/top_level.txt +0 -0
paasta_tools/__init__.py CHANGED
@@ -17,4 +17,4 @@
17
17
  # setup phase, the dependencies may not exist on disk yet.
18
18
  #
19
19
  # Don't bump version manually. See `make release` docs in ./Makefile
20
- __version__ = "1.26.1"
20
+ __version__ = "1.28.0"
@@ -322,8 +322,14 @@
322
322
  },
323
323
  "min_instances": {
324
324
  "type": "integer",
325
+ "x-nullable": true,
325
326
  "description": "Minimum number of instances to run"
326
327
  },
328
+ "max_instances": {
329
+ "type": "integer",
330
+ "x-nullable": true,
331
+ "description": "Maximum number of instances to run"
332
+ },
327
333
  "expire_after": {
328
334
  "type": "number",
329
335
  "format": "float",
@@ -1724,17 +1730,24 @@
1724
1730
  "properties": {
1725
1731
  "min_instances": {
1726
1732
  "type": "integer",
1727
- "description": "Minimum number of instances to run",
1728
- "minimum": 1
1733
+ "x-nullable": true,
1734
+ "description": "Minimum number of instances to run"
1735
+ },
1736
+ "max_instances": {
1737
+ "type": "integer",
1738
+ "x-nullable": true,
1739
+ "description": "Maximum number of instances to run"
1729
1740
  },
1730
1741
  "expire_after": {
1731
1742
  "type": "number",
1732
1743
  "format": "float",
1733
- "description": "Unix timestamp when this override is no longer valid"
1744
+ "description": "Unix timestamp when this override is no longer valid",
1745
+ "minimum": 1
1734
1746
  }
1735
1747
  },
1736
1748
  "required": [
1737
1749
  "min_instances",
1750
+ "max_instances",
1738
1751
  "expire_after"
1739
1752
  ]
1740
1753
  },
@@ -77,7 +77,8 @@ class AuthTweenFactory:
77
77
  method: str,
78
78
  service: Optional[str],
79
79
  ) -> AuthorizationOutcome:
80
- """Check if API request is authorized
80
+ """
81
+ Check if API request is authorized
81
82
 
82
83
  :param str path: API path
83
84
  :param str token: authentication token
@@ -136,7 +136,7 @@ def set_autoscaling_override(request):
136
136
  Required parameters:
137
137
  - service: The service name
138
138
  - instance: The instance name
139
- - min_instances: The minimum number of instances to enforce
139
+ - min_instances AND/OR max_instances: The minimum and/or maximum number of instances to enforce
140
140
  - expires_after: unix timestamp after which the override is no longer valid
141
141
  """
142
142
  service = request.swagger_data.get("service")
@@ -156,24 +156,29 @@ def set_autoscaling_override(request):
156
156
 
157
157
  json_body = request.swagger_data.get("json_body", {})
158
158
  min_instances_override = json_body.get("min_instances")
159
- expire_after = json_body.get("expire_after")
159
+ max_instances_override = json_body.get("max_instances")
160
+ expire_after = json_body["expire_after"]
160
161
 
161
- if not isinstance(min_instances_override, int) or min_instances_override < 1:
162
- raise ApiFailure("min_instances must be a positive integer", 400)
162
+ # ideally we'd enforce this in our api spec - but since we're using both swagger and oapi, that's not quite so simple
163
+ if not min_instances_override and not max_instances_override:
164
+ raise ApiFailure(
165
+ "At least one of min_instances or max_instances must be provided", 400
166
+ )
163
167
 
164
- if not expire_after:
165
- raise ApiFailure("expire_after is required", 400)
168
+ # we can't currently enforce this in the API spec since the generator seems to have issues generating a validator
169
+ # for a nullable integer field with a minimum value as we get errors like:
170
+ # `TypeError: '<' not supported between instances of 'NoneType' and 'int'`
171
+ # that said, unless someone is manually crafting requests, this should never happen
172
+ # since the CLI includes a check for this as well
173
+ if min_instances_override is not None and min_instances_override < 1:
174
+ raise ApiFailure("min_instances must be a positive integer", 400)
175
+ if max_instances_override is not None and max_instances_override < 1:
176
+ raise ApiFailure("max_instances must be a positive integer", 400)
166
177
 
167
- max_instances = instance_config.get_max_instances()
168
- if max_instances is None:
178
+ # otherwise, we could have folks accidentally enable autoscaling (using cpu) for a non-autoscaled
179
+ if instance_config.is_autoscaling_enabled() is None:
169
180
  raise ApiFailure(f"Autoscaling is not enabled for {service}.{instance}", 400)
170
181
 
171
- if max_instances < min_instances_override:
172
- raise ApiFailure(
173
- f"min_instances ({min_instances_override}) cannot be greater than max_instances ({max_instances})",
174
- 400,
175
- )
176
-
177
182
  configmap, created = get_or_create_autoscaling_overrides_configmap()
178
183
  if created:
179
184
  log.info("Created new autoscaling overrides ConfigMap")
@@ -182,23 +187,54 @@ def set_autoscaling_override(request):
182
187
  if not configmap.data:
183
188
  configmap.data = {}
184
189
 
185
- override_data = {
186
- "min_instances": min_instances_override,
187
- "created_at": datetime.now(timezone.utc).isoformat(),
188
- # NOTE: we may want to also allow setting a max_instances override in the future, but if we do that
189
- # we'd probably want to force folks to either set one or both and share the same expiration time
190
- "expire_after": expire_after,
191
- }
192
-
193
190
  service_instance = f"{service}.{instance}"
194
191
  existing_overrides = (
195
192
  json.loads(configmap.data[service_instance])
196
193
  if service_instance in configmap.data
197
194
  else {}
198
195
  )
199
- merged_overrides = {**existing_overrides, **override_data}
196
+
197
+ # this is slightly funky as there's a hierarchy of what value to pick:
198
+ # 1. the override value provided in the request - as this should override any existing value
199
+ # 2. the existing override value in the configmap - this is used if we previously only set an override for min or
200
+ # max instances and now want to set a value for the other
201
+ # 3. the value from the instance config - this is used as a final fallback since this means no override is present
202
+ min_instances = (
203
+ min_instances_override
204
+ or existing_overrides.get("min_instances")
205
+ or instance_config.get_min_instances()
206
+ )
207
+ max_instances = (
208
+ max_instances_override
209
+ or existing_overrides.get("max_instances")
210
+ or instance_config.get_max_instances()
211
+ )
212
+
213
+ # NOTE: the max_instances check is unnecessary here, but type-checkers can't see that the is_autoscaling_enabled()
214
+ # check above ensures that max_instances is not None
215
+ if max_instances is None or max_instances < min_instances:
216
+ raise ApiFailure(
217
+ f"min_instances ({min_instances_override}) cannot be greater than max_instances ({max_instances})",
218
+ 400,
219
+ )
220
+
221
+ override_data = {
222
+ "min_instances": min_instances_override,
223
+ "max_instances": max_instances_override,
224
+ "created_at": datetime.now(timezone.utc).isoformat(),
225
+ "expire_after": expire_after,
226
+ }
227
+
228
+ # NOTE: we need to strip out null values from the incoming overrides since otherwise we'd remove existing overrides
229
+ # (since if only --set-min or --set-max is provided, the other will be sent as None)
230
+ merged_overrides = {
231
+ **existing_overrides,
232
+ **{k: v for k, v in override_data.items() if v is not None},
233
+ }
200
234
  serialized_overrides = json.dumps(merged_overrides)
201
235
 
236
+ # note: we can only append with patching - if we ever want to remove overrides outside of the cleanup cronjob,
237
+ # we would need to change this to use replace_namespaced_configmap
202
238
  patch_namespaced_configmap(
203
239
  name=AUTOSCALING_OVERRIDES_CONFIGMAP_NAME,
204
240
  namespace=AUTOSCALING_OVERRIDES_CONFIGMAP_NAMESPACE,
@@ -213,6 +249,7 @@ def set_autoscaling_override(request):
213
249
  "instance": instance,
214
250
  "cluster": cluster,
215
251
  "min_instances": min_instances_override,
252
+ "max_instances": max_instances_override,
216
253
  "expire_after": expire_after,
217
254
  "status": "SUCCESS",
218
255
  }
@@ -75,10 +75,18 @@ def add_subparser(subparsers):
75
75
  else autoscale_parser.error("Minimum instances must be >= 1"),
76
76
  default=None,
77
77
  )
78
+ override_group.add_argument(
79
+ "--set-max",
80
+ help="Set the maximum number of replicas (must be >= 1). Requires --for parameter.",
81
+ type=lambda x: int(x)
82
+ if int(x) >= 1
83
+ else autoscale_parser.error("Maximum instances must be >= 1"),
84
+ default=None,
85
+ )
78
86
  override_group.add_argument(
79
87
  "--for",
80
88
  dest="duration",
81
- help="Duration for the temporary override (e.g. '3h', '30m'). Required when using --set-min.",
89
+ help="Duration for the temporary override (e.g. '3h', '30m'). Required when using --set-min and/or --set-max.",
82
90
  default=None,
83
91
  )
84
92
 
@@ -113,22 +121,24 @@ def paasta_autoscale(args):
113
121
  log.setLevel(logging.DEBUG)
114
122
  service = figure_out_service_name(args)
115
123
 
116
- if args.set_min is not None and not args.duration:
124
+ if (args.set_min is not None or args.set_max is not None) and not args.duration:
117
125
  print(
118
126
  PaastaColors.yellow(
119
- "WARNING: --set-min requires --for parameter to specify duration - defaulting to 30m"
127
+ "WARNING: --set-min/--set-max usage requires --for parameter to specify duration - defaulting to 30m"
120
128
  )
121
129
  )
122
130
  args.duration = "30m"
123
131
 
124
- if args.duration is not None and args.set_min is None:
125
- print(PaastaColors.red("Error: --for requires --set-min parameter"))
132
+ if args.duration is not None and args.set_min is None and args.set_max is None:
133
+ print(
134
+ PaastaColors.red("Error: --for requires --set-min or --set-max parameter")
135
+ )
126
136
  return 1
127
137
 
128
- if args.set is not None and args.set_min is not None:
138
+ if args.set is not None and args.set_min is not None and args.set_max is not None:
129
139
  print(
130
140
  PaastaColors.red(
131
- "Error: Cannot use both --set and --set-min at the same time"
141
+ "Error: Cannot use both --set and --set-min or --set-max at the same time"
132
142
  )
133
143
  )
134
144
  return 1
@@ -159,9 +169,12 @@ def paasta_autoscale(args):
159
169
  print("Could not connect to paasta api. Maybe you misspelled the cluster?")
160
170
  return 1
161
171
 
172
+ # TODO: we should probably also make sure we set a couple other defaults - currently, it's possible for
173
+ # status/res/etc to be unbound in some code paths
174
+ err_reason = None
162
175
  try:
163
176
  # get current autoscaler count
164
- if args.set is None and args.set_min is None:
177
+ if args.set is None and args.set_min is None and args.set_max is None:
165
178
  log.debug("Getting the current autoscaler count...")
166
179
  res, status, _ = api.autoscaler.get_autoscaler_count(
167
180
  service=service, instance=args.instance, _return_http_data_only=False
@@ -187,7 +200,7 @@ def paasta_autoscale(args):
187
200
  )
188
201
 
189
202
  # set lower bound
190
- elif args.set_min is not None:
203
+ elif args.set_min is not None or args.set_max is not None:
191
204
  duration_seconds = parse_duration_to_seconds(args.duration)
192
205
  if not duration_seconds:
193
206
  print(
@@ -202,10 +215,11 @@ def paasta_autoscale(args):
202
215
  expiration_time = time.time() + duration_seconds
203
216
 
204
217
  log.debug(
205
- f"Setting minimum instances to {args.set_min} for duration {args.duration}."
218
+ f"Sending the following overrides for duration {args.duration}: min_instances: {args.set_min}, max_instances: {args.set_max}."
206
219
  )
207
220
  msg = paastamodels.AutoscalingOverride(
208
221
  min_instances=args.set_min,
222
+ max_instances=args.set_max,
209
223
  expire_after=expiration_time,
210
224
  )
211
225
 
@@ -224,28 +238,35 @@ def paasta_autoscale(args):
224
238
  )
225
239
  except api.api_error as exc:
226
240
  status = exc.status
241
+ err_reason = exc.body
227
242
 
228
243
  if not 200 <= status <= 299:
229
244
  print(
230
245
  PaastaColors.red(
231
- f"ERROR: '{args.instance}' is not configured to autoscale OR you set min_instances above the current max_instances, "
246
+ f"ERROR: '{args.instance}' is not configured to autoscale OR you set impossible {{min, max}}_instances, "
232
247
  f"and `paasta autoscale` could not update it. "
233
248
  f"If you want to be able to boost this service, please configure autoscaling for the service "
234
249
  f"in its config file by setting min and max instances appropriately. Example: \n"
235
250
  f"{args.instance}:\n"
236
251
  f" min_instances: 5\n"
237
- f" max_instances: 50"
252
+ f" max_instances: 50\n"
253
+ f"{err_reason}"
238
254
  )
239
255
  )
240
256
  return 0
241
257
 
242
258
  log.debug(f"Res: {res} Http: {status}")
243
- if not args.set_min:
259
+ if not args.set_min and not args.set_max:
244
260
  print(f"Desired instances: {res.desired_instances}")
245
- elif args.set_min:
246
- print(
247
- f"Temporary override set for {args.service}.{args.instance} with minimum instances: {args.set_min}"
248
- )
261
+ else:
262
+ if args.set_min:
263
+ print(
264
+ f"Temporary override set for {args.service}.{args.instance} with minimum instances: {args.set_min}"
265
+ )
266
+ if args.set_max:
267
+ print(
268
+ f"Temporary override set for {args.service}.{args.instance} with maximum instances: {args.set_max}"
269
+ )
249
270
  # folks using this might be in different timezones, so let's convert the expiration time to a few common ones
250
271
  # to make it extra clear when the override will expire
251
272
  epoch_time = datetime.fromtimestamp(res.expire_after)
@@ -977,12 +977,12 @@ def run_docker_container(
977
977
  # First try to write the file as a string
978
978
  # This is for text like config files
979
979
  with open(temp_secret_filename, "w") as f:
980
- f.write(secret_content)
980
+ f.write(secret_content) # type: ignore # TODO: make this type-safe rather than rely on exceptions
981
981
  except TypeError:
982
982
  # If that fails, try to write it as bytes
983
983
  # This is for binary files like TLS keys
984
984
  with open(temp_secret_filename, "wb") as fb:
985
- fb.write(secret_content)
985
+ fb.write(secret_content) # type: ignore # TODO: make this type-safe rather than rely on exceptions
986
986
 
987
987
  # Append this to the list of volumes passed to docker run
988
988
  volumes.append(f"{temp_secret_filename}:{container_mount_path}:ro")
@@ -1,4 +1,4 @@
1
- #!/usr/bin/env python3.8
1
+ #!/usr/bin/env python3.10
2
2
  import itertools
3
3
  import json
4
4
  import sys
@@ -53,12 +53,12 @@ def install_vscode_support() -> None:
53
53
  "python": "${workspaceFolder}/.paasta/bin/python",
54
54
  "program": "${workspaceFolder}/.paasta/bin/tox",
55
55
  "subProcess": True,
56
- "args": ["-e", "py38-linux,docs,mypy,tests"],
56
+ "args": ["-e", "py310-linux,docs,mypy,tests"],
57
57
  },
58
58
  {
59
59
  "name": "paasta cli",
60
60
  "cwd": "${workspaceFolder}",
61
- "python": "${workspaceFolder}/.tox/py38-linux/bin/python",
61
+ "python": "${workspaceFolder}/.tox/py310-linux/bin/python",
62
62
  "type": "python",
63
63
  "request": "launch",
64
64
  "module": "paasta_tools.cli.cli",
@@ -66,7 +66,7 @@ def install_vscode_support() -> None:
66
66
  {
67
67
  "name": "paasta rollback",
68
68
  "cwd": "${workspaceFolder}",
69
- "python": "${workspaceFolder}/.tox/py38-linux/bin/python",
69
+ "python": "${workspaceFolder}/.tox/py310-linux/bin/python",
70
70
  "type": "python",
71
71
  "request": "launch",
72
72
  "module": "paasta_tools.cli.cli",
@@ -83,7 +83,7 @@ def install_vscode_support() -> None:
83
83
  {
84
84
  "name": "paasta mark-for-deployment",
85
85
  "cwd": "${workspaceFolder}",
86
- "python": "${workspaceFolder}/.tox/py38-linux/bin/python",
86
+ "python": "${workspaceFolder}/.tox/py310-linux/bin/python",
87
87
  "type": "python",
88
88
  "request": "launch",
89
89
  "module": "paasta_tools.cli.cli",
@@ -101,7 +101,7 @@ def install_vscode_support() -> None:
101
101
  {
102
102
  "name": "paasta status",
103
103
  "cwd": "${workspaceFolder}",
104
- "python": "${workspaceFolder}/.tox/py38-linux/bin/python",
104
+ "python": "${workspaceFolder}/.tox/py310-linux/bin/python",
105
105
  "type": "python",
106
106
  "request": "launch",
107
107
  "module": "paasta_tools.cli.cli",
@@ -118,7 +118,7 @@ def install_vscode_support() -> None:
118
118
  {
119
119
  "name": "paasta playground",
120
120
  "cwd": "${workspaceFolder}",
121
- "python": "${workspaceFolder}/.tox/py38-linux/bin/python",
121
+ "python": "${workspaceFolder}/.tox/py310-linux/bin/python",
122
122
  "type": "python",
123
123
  "request": "launch",
124
124
  "module": "paasta_tools.cli.cli",
@@ -138,7 +138,7 @@ def install_vscode_support() -> None:
138
138
  {
139
139
  "name": "paasta status playground",
140
140
  "cwd": "${workspaceFolder}",
141
- "python": "${workspaceFolder}/.tox/py38-linux/bin/python",
141
+ "python": "${workspaceFolder}/.tox/py310-linux/bin/python",
142
142
  "type": "python",
143
143
  "request": "launch",
144
144
  "module": "paasta_tools.cli.cli",
@@ -157,7 +157,7 @@ def install_vscode_support() -> None:
157
157
  {
158
158
  "name": "paasta logs",
159
159
  "cwd": "${workspaceFolder}",
160
- "python": "${workspaceFolder}/.tox/py38-linux/bin/python",
160
+ "python": "${workspaceFolder}/.tox/py310-linux/bin/python",
161
161
  "type": "python",
162
162
  "request": "launch",
163
163
  "module": "paasta_tools.cli.cli",
@@ -175,7 +175,7 @@ def install_vscode_support() -> None:
175
175
  "name": "paasta validate",
176
176
  # This command has to be ran from inside the service repo in yelpsoa-configs
177
177
  "cwd": "${userHome}/pg/yelpsoa-configs/",
178
- "python": "${workspaceFolder}/.tox/py38-linux/bin/python",
178
+ "python": "${workspaceFolder}/.tox/py310-linux/bin/python",
179
179
  "type": "python",
180
180
  "request": "launch",
181
181
  "module": "paasta_tools.cli.cli",
@@ -184,10 +184,10 @@ def install_vscode_support() -> None:
184
184
  {
185
185
  # 1) Follow step 1 in "Running the PaaSTA HTTP API Locally" wiki
186
186
  # 2) Run this "paasta API" test to debug paasta API
187
- # 3) Run client command, e.g. PAASTA_SYSTEM_CONFIG_DIR=./etc_paasta_for_development/ .tox/py38-linux/bin/python paasta_tools/cli/cli.py status --clusters norcal-devc --service katamari_test_service
187
+ # 3) Run client command, e.g. PAASTA_SYSTEM_CONFIG_DIR=./etc_paasta_for_development/ .tox/py310-linux/bin/python paasta_tools/cli/cli.py status --clusters norcal-devc --service katamari_test_service
188
188
  "name": "paasta API",
189
189
  "cwd": "${workspaceFolder}",
190
- "python": "${workspaceFolder}/.tox/py38-linux/bin/python",
190
+ "python": "${workspaceFolder}/.tox/py310-linux/bin/python",
191
191
  "type": "python",
192
192
  "request": "launch",
193
193
  "module": "paasta_tools.run-paasta-api-in-dev-mode",
@@ -203,7 +203,7 @@ def install_vscode_support() -> None:
203
203
  {
204
204
  "name": "paasta API playground",
205
205
  "cwd": "${workspaceFolder}",
206
- "python": "${workspaceFolder}/.tox/py38-linux/bin/python",
206
+ "python": "${workspaceFolder}/.tox/py310-linux/bin/python",
207
207
  "type": "python",
208
208
  "request": "launch",
209
209
  "module": "paasta_tools.run-paasta-api-playground",
@@ -221,7 +221,7 @@ def install_vscode_support() -> None:
221
221
  {
222
222
  "name": "Run setup k8s job in playground",
223
223
  "cwd": "${workspaceFolder}",
224
- "python": "${workspaceFolder}/.tox/py38-linux/bin/python",
224
+ "python": "${workspaceFolder}/.tox/py310-linux/bin/python",
225
225
  "type": "python",
226
226
  "request": "launch",
227
227
  "module": "paasta_tools.setup_kubernetes_job",
@@ -244,7 +244,7 @@ def install_vscode_support() -> None:
244
244
  {
245
245
  "name": "Generate deployments.json in playground",
246
246
  "cwd": "${workspaceFolder}",
247
- "python": "${workspaceFolder}/.tox/py38-linux/bin/python",
247
+ "python": "${workspaceFolder}/.tox/py310-linux/bin/python",
248
248
  "type": "python",
249
249
  "request": "launch",
250
250
  "module": "paasta_tools.generate_deployments_for_service",
@@ -1,4 +1,4 @@
1
- #!/usr/bin/env python3.8
1
+ #!/usr/bin/env python3.10
2
2
  import ast
3
3
  import sys
4
4
 
@@ -1,4 +1,4 @@
1
- #!/usr/bin/env python3.8
1
+ #!/usr/bin/env python3.10
2
2
  import argparse
3
3
  import os
4
4
  import re
@@ -1,4 +1,4 @@
1
- #!/usr/bin/env python3.8
1
+ #!/usr/bin/env python3.10
2
2
  import sys
3
3
  from collections import defaultdict
4
4
 
@@ -641,7 +641,7 @@ async def kubernetes_status_v2(
641
641
  kube_client, job_config, job_config.get_kubernetes_namespace()
642
642
  )
643
643
  )
644
- tasks.append(autoscaling_task)
644
+ tasks.append(autoscaling_task) # type: ignore # PAASTA-18698; ignoring due to unexpected type mismatch
645
645
  else:
646
646
  autoscaling_task = None
647
647
 
@@ -653,7 +653,7 @@ async def kubernetes_status_v2(
653
653
  namespaces=relevant_namespaces,
654
654
  )
655
655
  )
656
- tasks.append(pods_task)
656
+ tasks.append(pods_task) # type: ignore # PAASTA-18698; ignoring due to unexpected type mismatch
657
657
 
658
658
  service_namespace_config = kubernetes_tools.load_service_namespace_config(
659
659
  service=service,
@@ -674,9 +674,9 @@ async def kubernetes_status_v2(
674
674
  )
675
675
  )
676
676
  backends_task = asyncio.create_task(
677
- get_backends_from_mesh_status(mesh_status_task)
677
+ get_backends_from_mesh_status(mesh_status_task) # type: ignore # PAASTA-18698; ignoring due to unexpected type mismatch
678
678
  )
679
- tasks.extend([mesh_status_task, backends_task])
679
+ tasks.extend([mesh_status_task, backends_task]) # type: ignore # PAASTA-18698; ignoring due to unexpected type mismatch
680
680
  else:
681
681
  mesh_status_task = None
682
682
  backends_task = None
@@ -685,7 +685,7 @@ async def kubernetes_status_v2(
685
685
  pod_status_by_sha_and_readiness_task = asyncio.create_task(
686
686
  get_pod_status_tasks_by_sha_and_readiness(
687
687
  pods_task,
688
- backends_task,
688
+ backends_task, # type: ignore # PAASTA-18698; ignoring due to unexpected type mismatch
689
689
  kube_client,
690
690
  verbose,
691
691
  )
@@ -696,15 +696,15 @@ async def kubernetes_status_v2(
696
696
  service=service,
697
697
  instance=instance,
698
698
  namespaces=relevant_namespaces,
699
- pod_status_by_sha_and_readiness_task=pod_status_by_sha_and_readiness_task,
699
+ pod_status_by_sha_and_readiness_task=pod_status_by_sha_and_readiness_task, # type: ignore # PAASTA-18698; ignoring due to unexpected type mismatch
700
700
  )
701
701
  )
702
- tasks.extend([pod_status_by_sha_and_readiness_task, versions_task])
702
+ tasks.extend([pod_status_by_sha_and_readiness_task, versions_task]) # type: ignore # PAASTA-18698; ignoring due to unexpected type mismatch
703
703
  else:
704
704
  pod_status_by_replicaset_task = asyncio.create_task(
705
705
  get_pod_status_tasks_by_replicaset(
706
706
  pods_task,
707
- backends_task,
707
+ backends_task, # type: ignore # PAASTA-18698; ignoring due to unexpected type mismatch
708
708
  kube_client,
709
709
  verbose,
710
710
  )
@@ -715,10 +715,10 @@ async def kubernetes_status_v2(
715
715
  service=service,
716
716
  instance=instance,
717
717
  namespaces=relevant_namespaces,
718
- pod_status_by_replicaset_task=pod_status_by_replicaset_task,
718
+ pod_status_by_replicaset_task=pod_status_by_replicaset_task, # type: ignore # PAASTA-18698; ignoring due to unexpected type mismatch
719
719
  )
720
720
  )
721
- tasks.extend([pod_status_by_replicaset_task, versions_task])
721
+ tasks.extend([pod_status_by_replicaset_task, versions_task]) # type: ignore # PAASTA-18698; ignoring due to unexpected type mismatch
722
722
 
723
723
  await asyncio.gather(*tasks, return_exceptions=True)
724
724
 
@@ -301,7 +301,10 @@ class DeploymentWrapper(Application):
301
301
  kube_client=kube_client,
302
302
  namespace=self.item.metadata.namespace,
303
303
  min_instances_override=(
304
- self.hpa_override["min_instances"] if self.hpa_override else None
304
+ self.hpa_override.get("min_instances") if self.hpa_override else None
305
+ ),
306
+ max_instances_override=(
307
+ self.hpa_override.get("max_instances") if self.hpa_override else None
305
308
  ),
306
309
  )
307
310
 
@@ -325,6 +325,7 @@ class DatastoreCredentialsConfig(TypedDict, total=False):
325
325
 
326
326
  class HpaOverride(TypedDict):
327
327
  min_instances: int
328
+ max_instances: int
328
329
  expire_after: str
329
330
 
330
331
 
@@ -899,6 +900,7 @@ class KubernetesDeploymentConfig(LongRunningServiceConfig):
899
900
  kube_client: KubeClient,
900
901
  namespace: str,
901
902
  min_instances_override: Optional[int] = None,
903
+ max_instances_override: Optional[int] = None,
902
904
  ) -> Optional[V2HorizontalPodAutoscaler]:
903
905
  # Returns None if an HPA should not be attached based on the config,
904
906
  # or the config is invalid.
@@ -914,7 +916,7 @@ class KubernetesDeploymentConfig(LongRunningServiceConfig):
914
916
  return None
915
917
 
916
918
  min_replicas = min_instances_override or self.get_min_instances()
917
- max_replicas = self.get_max_instances()
919
+ max_replicas = max_instances_override or self.get_max_instances()
918
920
  if min_replicas == 0 or max_replicas == 0:
919
921
  log.error(
920
922
  f"Invalid value for min or max_instances on {name}: {min_replicas}, {max_replicas}"
@@ -2015,7 +2017,7 @@ class KubernetesDeploymentConfig(LongRunningServiceConfig):
2015
2017
  )
2016
2018
  return None
2017
2019
 
2018
- def get_min_instances(self) -> Optional[int]:
2020
+ def get_min_instances(self) -> int:
2019
2021
  return self.config_dict.get(
2020
2022
  "min_instances",
2021
2023
  1,
@@ -4690,7 +4692,9 @@ def get_namespaced_configmap(
4690
4692
 
4691
4693
  def patch_namespaced_configmap(
4692
4694
  name: str,
4693
- body: Dict[str, str],
4695
+ # NOTE: passing a dict *seems* to work - but we should likely switch to passing a ConfigMap since that seems to be
4696
+ # the most supported option
4697
+ body: Union[V1ConfigMap, Dict[str, Any]],
4694
4698
  *,
4695
4699
  namespace: str,
4696
4700
  kube_client: KubeClient,
@@ -147,7 +147,7 @@ class MesosMaster:
147
147
  def _file_resolver(self, cfg):
148
148
  return self.resolve(open(cfg[6:], "r+").read().strip())
149
149
 
150
- @retry(KazooTimeoutError, tries=5, delay=0.5, logger=logger)
150
+ @retry(KazooTimeoutError, tries=5, delay=0.5, logger=logger) # type: ignore
151
151
  def _zookeeper_resolver(self, cfg):
152
152
  hosts, path = cfg[5:].split("/", 1)
153
153
  path = "/" + path
@@ -614,7 +614,7 @@ def group_slaves_by_key_func(
614
614
  """
615
615
  sorted_slaves: Sequence[_GenericNodeT]
616
616
  if sort_func is None:
617
- sorted_slaves = sorted(slaves, key=key_func)
617
+ sorted_slaves = sorted(slaves, key=key_func) # type: ignore # this code is to be deleted
618
618
  else:
619
619
  sorted_slaves = sort_func(slaves)
620
620