octavia 15.0.0__py3-none-any.whl → 16.0.0.0rc1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (97) hide show
  1. octavia/amphorae/backends/agent/api_server/keepalivedlvs.py +9 -0
  2. octavia/amphorae/backends/agent/api_server/loadbalancer.py +6 -6
  3. octavia/amphorae/backends/agent/api_server/plug.py +1 -1
  4. octavia/amphorae/backends/agent/api_server/util.py +35 -2
  5. octavia/amphorae/backends/health_daemon/status_message.py +1 -2
  6. octavia/amphorae/drivers/haproxy/rest_api_driver.py +12 -7
  7. octavia/api/drivers/amphora_driver/flavor_schema.py +5 -0
  8. octavia/api/drivers/noop_driver/driver.py +2 -1
  9. octavia/api/drivers/utils.py +12 -0
  10. octavia/api/root_controller.py +8 -2
  11. octavia/api/v2/controllers/base.py +8 -4
  12. octavia/api/v2/controllers/listener.py +12 -2
  13. octavia/api/v2/controllers/load_balancer.py +33 -1
  14. octavia/api/v2/controllers/member.py +58 -4
  15. octavia/api/v2/types/load_balancer.py +7 -1
  16. octavia/api/v2/types/member.py +3 -0
  17. octavia/common/base_taskflow.py +19 -10
  18. octavia/common/clients.py +8 -2
  19. octavia/common/config.py +17 -2
  20. octavia/common/constants.py +6 -0
  21. octavia/common/data_models.py +32 -2
  22. octavia/common/exceptions.py +5 -0
  23. octavia/common/utils.py +4 -1
  24. octavia/common/validate.py +16 -0
  25. octavia/compute/drivers/noop_driver/driver.py +30 -1
  26. octavia/controller/healthmanager/health_manager.py +7 -0
  27. octavia/controller/worker/v2/flows/amphora_flows.py +3 -5
  28. octavia/controller/worker/v2/flows/listener_flows.py +2 -1
  29. octavia/controller/worker/v2/flows/load_balancer_flows.py +38 -0
  30. octavia/controller/worker/v2/taskflow_jobboard_driver.py +34 -6
  31. octavia/controller/worker/v2/tasks/compute_tasks.py +9 -5
  32. octavia/controller/worker/v2/tasks/database_tasks.py +26 -6
  33. octavia/controller/worker/v2/tasks/network_tasks.py +118 -70
  34. octavia/db/base_models.py +29 -5
  35. octavia/db/migration/alembic_migrations/versions/3097e55493ae_add_sg_id_to_vip_table.py +39 -0
  36. octavia/db/migration/alembic_migrations/versions/8db7a6443785_add_member_vnic_type.py +36 -0
  37. octavia/db/migration/alembic_migrations/versions/fabf4983846b_add_member_port_table.py +40 -0
  38. octavia/db/models.py +43 -1
  39. octavia/db/repositories.py +88 -9
  40. octavia/network/base.py +29 -12
  41. octavia/network/data_models.py +2 -1
  42. octavia/network/drivers/neutron/allowed_address_pairs.py +55 -46
  43. octavia/network/drivers/neutron/base.py +28 -16
  44. octavia/network/drivers/neutron/utils.py +2 -2
  45. octavia/network/drivers/noop_driver/driver.py +150 -29
  46. octavia/policies/__init__.py +4 -0
  47. octavia/policies/advanced_rbac.py +95 -0
  48. octavia/policies/base.py +5 -101
  49. octavia/policies/keystone_default_roles.py +81 -0
  50. octavia/policies/loadbalancer.py +13 -0
  51. octavia/tests/common/constants.py +2 -1
  52. octavia/tests/common/sample_data_models.py +27 -14
  53. octavia/tests/functional/amphorae/backend/agent/api_server/test_server.py +5 -4
  54. octavia/tests/functional/api/drivers/driver_agent/test_driver_agent.py +2 -1
  55. octavia/tests/functional/api/v2/test_health_monitor.py +1 -1
  56. octavia/tests/functional/api/v2/test_l7policy.py +1 -1
  57. octavia/tests/functional/api/v2/test_listener.py +1 -1
  58. octavia/tests/functional/api/v2/test_load_balancer.py +150 -4
  59. octavia/tests/functional/api/v2/test_member.py +50 -0
  60. octavia/tests/functional/api/v2/test_pool.py +1 -1
  61. octavia/tests/functional/api/v2/test_quotas.py +5 -8
  62. octavia/tests/functional/db/base.py +6 -6
  63. octavia/tests/functional/db/test_models.py +124 -1
  64. octavia/tests/functional/db/test_repositories.py +237 -19
  65. octavia/tests/unit/amphorae/backends/agent/api_server/test_util.py +89 -1
  66. octavia/tests/unit/amphorae/drivers/haproxy/test_rest_api_driver_1_0.py +10 -7
  67. octavia/tests/unit/api/drivers/test_utils.py +6 -1
  68. octavia/tests/unit/certificates/generator/test_local.py +1 -1
  69. octavia/tests/unit/common/test_base_taskflow.py +4 -3
  70. octavia/tests/unit/compute/drivers/noop_driver/test_driver.py +28 -2
  71. octavia/tests/unit/controller/worker/v2/flows/test_load_balancer_flows.py +27 -1
  72. octavia/tests/unit/controller/worker/v2/tasks/test_database_tasks.py +28 -6
  73. octavia/tests/unit/controller/worker/v2/tasks/test_network_tasks.py +100 -79
  74. octavia/tests/unit/controller/worker/v2/test_taskflow_jobboard_driver.py +8 -0
  75. octavia/tests/unit/network/drivers/neutron/test_allowed_address_pairs.py +62 -45
  76. octavia/tests/unit/network/drivers/neutron/test_base.py +7 -7
  77. octavia/tests/unit/network/drivers/noop_driver/test_driver.py +55 -42
  78. {octavia-15.0.0.data → octavia-16.0.0.0rc1.data}/data/share/octavia/diskimage-create/tox.ini +0 -1
  79. {octavia-15.0.0.dist-info → octavia-16.0.0.0rc1.dist-info}/AUTHORS +3 -0
  80. octavia-16.0.0.0rc1.dist-info/METADATA +156 -0
  81. {octavia-15.0.0.dist-info → octavia-16.0.0.0rc1.dist-info}/RECORD +95 -90
  82. {octavia-15.0.0.dist-info → octavia-16.0.0.0rc1.dist-info}/WHEEL +1 -1
  83. {octavia-15.0.0.dist-info → octavia-16.0.0.0rc1.dist-info}/entry_points.txt +1 -1
  84. octavia-16.0.0.0rc1.dist-info/pbr.json +1 -0
  85. octavia-15.0.0.dist-info/METADATA +0 -156
  86. octavia-15.0.0.dist-info/pbr.json +0 -1
  87. {octavia-15.0.0.data → octavia-16.0.0.0rc1.data}/data/share/octavia/LICENSE +0 -0
  88. {octavia-15.0.0.data → octavia-16.0.0.0rc1.data}/data/share/octavia/README.rst +0 -0
  89. {octavia-15.0.0.data → octavia-16.0.0.0rc1.data}/data/share/octavia/diskimage-create/README.rst +0 -0
  90. {octavia-15.0.0.data → octavia-16.0.0.0rc1.data}/data/share/octavia/diskimage-create/diskimage-create.sh +0 -0
  91. {octavia-15.0.0.data → octavia-16.0.0.0rc1.data}/data/share/octavia/diskimage-create/image-tests.sh +0 -0
  92. {octavia-15.0.0.data → octavia-16.0.0.0rc1.data}/data/share/octavia/diskimage-create/requirements.txt +0 -0
  93. {octavia-15.0.0.data → octavia-16.0.0.0rc1.data}/data/share/octavia/diskimage-create/test-requirements.txt +0 -0
  94. {octavia-15.0.0.data → octavia-16.0.0.0rc1.data}/data/share/octavia/diskimage-create/version.txt +0 -0
  95. {octavia-15.0.0.data → octavia-16.0.0.0rc1.data}/scripts/octavia-wsgi +0 -0
  96. {octavia-15.0.0.dist-info → octavia-16.0.0.0rc1.dist-info}/LICENSE +0 -0
  97. {octavia-15.0.0.dist-info → octavia-16.0.0.0rc1.dist-info}/top_level.txt +0 -0
@@ -156,7 +156,7 @@ class DynamicLoggingConductor(impl_blocking.BlockingConductor):
156
156
  job.name)
157
157
 
158
158
 
159
- class RedisDynamicLoggingConductor(DynamicLoggingConductor):
159
+ class ExtendExpiryDynamicLoggingConductor(DynamicLoggingConductor):
160
160
 
161
161
  def _listeners_from_job(self, job, engine):
162
162
  listeners = super()._listeners_from_job(job, engine)
@@ -206,20 +206,29 @@ class TaskFlowServiceController:
206
206
  def run_conductor(self, name):
207
207
  with self.driver.persistence_driver.get_persistence() as persistence:
208
208
  with self.driver.job_board(persistence) as board:
209
- # Redis do not expire jobs by default, so jobs won't be resumed
210
- # with restart of controller. Add expiry for board and use
211
- # special listener.
212
- if (CONF.task_flow.jobboard_backend_driver ==
213
- 'redis_taskflow_driver'):
214
- conductor = RedisDynamicLoggingConductor(
209
+ # Redis and etcd do not expire jobs by default, so jobs won't
210
+ # be resumed with restart of controller. Add expiry for board
211
+ # and use special listener.
212
+ if (CONF.task_flow.jobboard_backend_driver in (
213
+ 'etcd_taskflow_driver',
214
+ 'redis_taskflow_driver')):
215
+ conductor = ExtendExpiryDynamicLoggingConductor(
215
216
  name, board, persistence=persistence,
216
217
  engine=CONF.task_flow.engine,
217
218
  engine_options={
218
219
  'max_workers': CONF.task_flow.max_workers
219
220
  })
220
- board.claim = functools.partial(
221
- board.claim,
222
- expiry=CONF.task_flow.jobboard_expiration_time)
221
+ if (CONF.task_flow.jobboard_backend_driver ==
222
+ 'redis_taskflow_driver'):
223
+ # Hack for redis only:
224
+ # The TTL of the jobs of the Redis Jobboard driver can
225
+ # be only overriden by using the 'expiry' parameter of
226
+ # the 'claim' function
227
+ # For the Etcd driver, the default TTL for all the
228
+ # locks can be configured while creating the backend
229
+ board.claim = functools.partial(
230
+ board.claim,
231
+ expiry=CONF.task_flow.jobboard_expiration_time)
223
232
  else:
224
233
  conductor = DynamicLoggingConductor(
225
234
  name, board, persistence=persistence,
octavia/common/clients.py CHANGED
@@ -111,16 +111,22 @@ class NeutronAuth:
111
111
  client.
112
112
  """
113
113
  sess = keystone.KeystoneSession('neutron').get_session()
114
+ kwargs = {}
114
115
  neutron_endpoint = CONF.neutron.endpoint_override
115
116
  if neutron_endpoint is None:
116
117
  endpoint_data = sess.get_endpoint_data(
117
- service_type='network',
118
+ service_type=(CONF.neutron.service_type or 'network'),
118
119
  interface=CONF.neutron.valid_interfaces,
119
120
  region_name=CONF.neutron.region_name)
120
121
  neutron_endpoint = endpoint_data.catalog_url
121
122
 
123
+ neutron_cafile = getattr(CONF.neutron, "cafile", None)
124
+ insecure = getattr(CONF.neutron, "insecure", False)
125
+ kwargs['verify'] = not insecure
126
+ if neutron_cafile is not None and not insecure:
127
+ kwargs['verify'] = neutron_cafile
122
128
  user_auth = token_endpoint.Token(neutron_endpoint, context.auth_token)
123
- user_sess = session.Session(auth=user_auth)
129
+ user_sess = session.Session(auth=user_auth, **kwargs)
124
130
 
125
131
  conn = openstack.connection.Connection(
126
132
  session=user_sess, oslo_conf=CONF)
octavia/common/config.py CHANGED
@@ -555,8 +555,10 @@ task_flow_opts = [
555
555
  choices=[('redis_taskflow_driver',
556
556
  'Driver that will use Redis to store job states.'),
557
557
  ('zookeeper_taskflow_driver',
558
- 'Driver that will use Zookeeper to store job states.')
559
- ],
558
+ 'Driver that will use Zookeeper to store job '
559
+ 'states.'),
560
+ ('etcd_taskflow_driver',
561
+ 'Driver that will user Etcd to store job states.')],
560
562
  help='Jobboard backend driver that will monitor job state.'),
561
563
  cfg.ListOpt('jobboard_backend_hosts', default=['127.0.0.1'],
562
564
  help='Jobboard backend server host(s).'),
@@ -569,6 +571,9 @@ task_flow_opts = [
569
571
  cfg.StrOpt('jobboard_backend_namespace', default='octavia_jobboard',
570
572
  help='Jobboard name that should be used to store taskflow '
571
573
  'job id and claims for it.'),
574
+ cfg.IntOpt('jobboard_redis_backend_db',
575
+ default=0, min=0,
576
+ help='Database ID in redis server.'),
572
577
  cfg.StrOpt('jobboard_redis_sentinel', default=None,
573
578
  help='Sentinel name if it is used for Redis.'),
574
579
  cfg.StrOpt('jobboard_redis_sentinel_username',
@@ -596,6 +601,16 @@ task_flow_opts = [
596
601
  'keyfile_password': None,
597
602
  'certfile': None,
598
603
  'verify_certs': True}),
604
+ cfg.DictOpt('jobboard_etcd_ssl_options',
605
+ help='Etcd jobboard backend ssl configuration options.',
606
+ default={'use_ssl': False,
607
+ 'ca_cert': None,
608
+ 'cert_key': None,
609
+ 'cert_cert': None}),
610
+ cfg.IntOpt('jobboard_etcd_timeout', default=None,
611
+ help='Timeout when communicating with the Etcd backend.'),
612
+ cfg.StrOpt('jobboard_etcd_api_path', default=None,
613
+ help='API Path of the Etcd server.'),
599
614
  cfg.IntOpt('jobboard_expiration_time', default=30,
600
615
  help='For backends like redis claiming jobs requiring setting '
601
616
  'the expiry - how many seconds the claim should be '
@@ -423,11 +423,15 @@ REQ_CONN_TIMEOUT = 'req_conn_timeout'
423
423
  REQ_READ_TIMEOUT = 'req_read_timeout'
424
424
  REQUEST_ERRORS = 'request_errors'
425
425
  REQUEST_ID = 'request_id'
426
+ REQUEST_SRIOV = 'request_sriov'
426
427
  ROLE = 'role'
428
+ SECURITY_GROUP_IDS = 'security_group_ids'
427
429
  SECURITY_GROUPS = 'security_groups'
428
430
  SECURITY_GROUP_RULES = 'security_group_rules'
429
431
  SERVER_GROUP_ID = 'server_group_id'
430
432
  SERVER_PEM = 'server_pem'
433
+ SG_IDS = 'sg_ids'
434
+ SG_ID = 'sg_id'
431
435
  SNI_CONTAINER_DATA = 'sni_container_data'
432
436
  SNI_CONTAINERS = 'sni_containers'
433
437
  SOFT_ANTI_AFFINITY = 'soft-anti-affinity'
@@ -585,6 +589,7 @@ ATTACH_PORT = 'attach-port'
585
589
  CALCULATE_AMPHORA_DELTA = 'calculate-amphora-delta'
586
590
  CREATE_VIP_BASE_PORT = 'create-vip-base-port'
587
591
  DELETE_AMPHORA = 'delete-amphora'
592
+ DELETE_AMPHORA_MEMBER_PORTS = 'delete-amphora-member-ports'
588
593
  DELETE_PORT = 'delete-port'
589
594
  DISABLE_AMP_HEALTH_MONITORING = 'disable-amphora-health-monitoring'
590
595
  GET_AMPHORA_FIREWALL_RULES = 'get-amphora-firewall-rules'
@@ -926,6 +931,7 @@ AMPHORA_SUPPORTED_ALPN_PROTOCOLS = [lib_consts.ALPN_PROTOCOL_HTTP_2,
926
931
  lib_consts.ALPN_PROTOCOL_HTTP_1_0]
927
932
 
928
933
  SRIOV_VIP = 'sriov_vip'
934
+ ALLOW_MEMBER_SRIOV = 'allow_member_sriov'
929
935
 
930
936
  # Amphora interface fields
931
937
  IF_TYPE = 'if_type'
@@ -378,7 +378,7 @@ class Member(BaseDataModel):
378
378
  subnet_id=None, operating_status=None, pool=None,
379
379
  created_at=None, updated_at=None, provisioning_status=None,
380
380
  name=None, monitor_address=None, monitor_port=None,
381
- tags=None):
381
+ tags=None, vnic_type=None):
382
382
  self.id = id
383
383
  self.project_id = project_id
384
384
  self.pool_id = pool_id
@@ -397,6 +397,7 @@ class Member(BaseDataModel):
397
397
  self.monitor_address = monitor_address
398
398
  self.monitor_port = monitor_port
399
399
  self.tags = tags
400
+ self.vnic_type = vnic_type
400
401
 
401
402
  def delete(self):
402
403
  for mem in self.pool.members:
@@ -558,7 +559,7 @@ class Vip(BaseDataModel):
558
559
  def __init__(self, load_balancer_id=None, ip_address=None,
559
560
  subnet_id=None, network_id=None, port_id=None,
560
561
  load_balancer=None, qos_policy_id=None, octavia_owned=None,
561
- vnic_type=None):
562
+ vnic_type=None, sg_ids=None):
562
563
  self.load_balancer_id = load_balancer_id
563
564
  self.ip_address = ip_address
564
565
  self.subnet_id = subnet_id
@@ -568,6 +569,18 @@ class Vip(BaseDataModel):
568
569
  self.qos_policy_id = qos_policy_id
569
570
  self.octavia_owned = octavia_owned
570
571
  self.vnic_type = vnic_type
572
+ self.sg_ids = sg_ids or []
573
+
574
+ def to_dict(self, **kwargs):
575
+ ret = super().to_dict(**kwargs)
576
+ if kwargs.get('recurse') is not True:
577
+ # NOTE(gthiemonge) we need to return the associated SG IDs but as
578
+ # sg_ids is a list, they are only added with recurse=True, which
579
+ # does a full recursion on the Vip, adding the associated
580
+ # LoadBalancer, its Listeners, etc...
581
+ # Adding it directly here avoids unnecessary recursion
582
+ ret[constants.SG_IDS] = self.sg_ids
583
+ return ret
571
584
 
572
585
 
573
586
  class AdditionalVip(BaseDataModel):
@@ -886,3 +899,20 @@ class ListenerCidr(BaseDataModel):
886
899
  # object. Otherwise we recurse down the "ghost" listener object.
887
900
  def to_dict(self, **kwargs):
888
901
  return {'cidr': self.cidr, 'listener_id': self.listener_id}
902
+
903
+
904
+ class VipSecurityGroup(BaseDataModel):
905
+ def __init__(self, load_balancer_id: str = None, sg_id: str = None):
906
+ self.load_balancer_id = load_balancer_id
907
+ self.sg_id = sg_id
908
+
909
+
910
+ class AmphoraMemberPort(BaseDataModel):
911
+
912
+ def __init__(self, port_id=None, amphora_id=None, network_id=None,
913
+ created_at=None, updated_at=None):
914
+ self.port_id = port_id
915
+ self.amphora_id = amphora_id
916
+ self.network_id = network_id
917
+ self.created_at = created_at
918
+ self.updated_at = updated_at
@@ -435,3 +435,8 @@ class AmphoraNetworkConfigException(OctaviaException):
435
435
  class ListenerNoChildren(APIException):
436
436
  msg = _('Protocol %(protocol)s listeners cannot have child objects.')
437
437
  code = 400
438
+
439
+
440
+ class MemberSRIOVDisabled(APIException):
441
+ msg = _('The load balancer flavor does not allow SR-IOV member ports.')
442
+ code = 400
octavia/common/utils.py CHANGED
@@ -23,6 +23,7 @@ import hashlib
23
23
  import ipaddress
24
24
  import re
25
25
  import socket
26
+ import typing
26
27
 
27
28
  from oslo_config import cfg
28
29
  from oslo_log import log as logging
@@ -30,6 +31,8 @@ from oslo_utils import excutils
30
31
  from stevedore import driver as stevedore_driver
31
32
 
32
33
  from octavia.common import constants
34
+ if typing.TYPE_CHECKING:
35
+ from octavia.network import base as network_base
33
36
 
34
37
  CONF = cfg.CONF
35
38
 
@@ -61,7 +64,7 @@ def get_amphora_driver():
61
64
  return amphora_driver
62
65
 
63
66
 
64
- def get_network_driver():
67
+ def get_network_driver() -> 'network_base.AbstractNetworkDriver':
65
68
  CONF.import_group('controller_worker', 'octavia.common.config')
66
69
  network_driver = stevedore_driver.DriverManager(
67
70
  namespace='octavia.network.drivers',
@@ -21,6 +21,7 @@ Defined here so these can also be used at deeper levels than the API.
21
21
 
22
22
  import ipaddress
23
23
  import re
24
+ import typing
24
25
 
25
26
  from oslo_config import cfg
26
27
  from rfc3986 import uri_reference
@@ -33,6 +34,9 @@ from octavia.common import exceptions
33
34
  from octavia.common import utils
34
35
  from octavia.i18n import _
35
36
 
37
+ if typing.TYPE_CHECKING:
38
+ from octavia.common import context
39
+
36
40
  CONF = cfg.CONF
37
41
  _ListenerPUT = 'octavia.api.v2.types.listener.ListenerPUT'
38
42
 
@@ -382,6 +386,18 @@ def network_exists_optionally_contains_subnet(network_id, subnet_id=None,
382
386
  return network
383
387
 
384
388
 
389
+ def security_group_exists(sg_id: str,
390
+ context: 'context.RequestContext' = None):
391
+ """Raises an exception when a security group does not exist."""
392
+ network_driver = utils.get_network_driver()
393
+ try:
394
+ network_driver.get_security_group_by_id(sg_id,
395
+ context=context)
396
+ except Exception as e:
397
+ raise exceptions.InvalidSubresource(
398
+ resource='Security Group', id=sg_id) from e
399
+
400
+
385
401
  def network_allowed_by_config(network_id, valid_networks=None):
386
402
  if CONF.networking.valid_vip_networks and not valid_networks:
387
403
  valid_networks = CONF.networking.valid_vip_networks
@@ -16,6 +16,12 @@ from collections import namedtuple
16
16
 
17
17
  from oslo_log import log as logging
18
18
  from oslo_utils import uuidutils
19
+ from sqlalchemy import Column
20
+ from sqlalchemy import create_engine
21
+ from sqlalchemy import MetaData
22
+ from sqlalchemy import String
23
+ from sqlalchemy import Table
24
+ from sqlalchemy import update
19
25
 
20
26
  from octavia.common import constants
21
27
  from octavia.common import data_models
@@ -33,6 +39,20 @@ class NoopManager:
33
39
  super().__init__()
34
40
  self.computeconfig = {}
35
41
 
42
+ # Get a DB engine for the network no-op DB
43
+ # Required to update the ports when a port is attached to a compute
44
+ self.engine = create_engine('sqlite:////tmp/octavia-network-noop.db',
45
+ isolation_level='SERIALIZABLE')
46
+ metadata_obj = MetaData()
47
+
48
+ self.interfaces_table = Table(
49
+ 'interfaces',
50
+ metadata_obj,
51
+ Column('port_id', String(36)),
52
+ Column('network_id', String(36)),
53
+ Column('compute_id', String(36)),
54
+ Column('vnic_type', String(6)))
55
+
36
56
  def build(self, name="amphora_name", amphora_flavor=None,
37
57
  image_tag=None, image_owner=None, key_name=None, sec_groups=None,
38
58
  network_ids=None, config_drive_files=None, user_data=None,
@@ -97,12 +117,21 @@ class NoopManager:
97
117
  self.computeconfig[(compute_id, network_id, ip_address, port_id)] = (
98
118
  compute_id, network_id, ip_address, port_id,
99
119
  'attach_network_or_port')
120
+
121
+ # Update the port in the network no-op DB
122
+ with self.engine.connect() as connection:
123
+ connection.execute(update(self.interfaces_table).where(
124
+ self.interfaces_table.c.port_id == port_id).values(
125
+ compute_id=compute_id))
126
+ connection.commit()
127
+
100
128
  return network_models.Interface(
101
129
  id=uuidutils.generate_uuid(),
102
130
  compute_id=compute_id,
103
131
  network_id=network_id,
104
132
  fixed_ips=[],
105
- port_id=uuidutils.generate_uuid()
133
+ port_id=uuidutils.generate_uuid(),
134
+ vnic_type=constants.VNIC_TYPE_NORMAL
106
135
  )
107
136
 
108
137
  def detach_port(self, compute_id, port_id):
@@ -77,6 +77,11 @@ class HealthManager:
77
77
  return False
78
78
 
79
79
  def health_check(self):
80
+ """Check for stale amphorae and process them
81
+
82
+ ... until either no more stale amphora were found or all executor
83
+ threads are busy.
84
+ """
80
85
  stats = {
81
86
  'failover_attempted': 0,
82
87
  'failover_failed': 0,
@@ -127,6 +132,7 @@ class HealthManager:
127
132
  if lock_session:
128
133
  lock_session.rollback()
129
134
 
135
+ # No more stale amps found
130
136
  if amp_health is None:
131
137
  break
132
138
 
@@ -137,6 +143,7 @@ class HealthManager:
137
143
  functools.partial(update_stats_on_done, stats)
138
144
  )
139
145
  futs.append(fut)
146
+ # All threads are/were busy
140
147
  if len(futs) == self.threads:
141
148
  break
142
149
  if futs:
@@ -207,6 +207,9 @@ class AmphoraFlows:
207
207
  name=constants.DELETE_AMPHORA + '-' + amphora_id,
208
208
  inject={constants.AMPHORA: amphora,
209
209
  constants.PASSIVE_FAILURE: True}))
210
+ delete_amphora_flow.add(network_tasks.DeleteAmphoraMemberPorts(
211
+ name=constants.DELETE_AMPHORA_MEMBER_PORTS + '-' + amphora_id,
212
+ inject={constants.AMPHORA_ID: amphora[constants.ID]}))
210
213
  delete_amphora_flow.add(database_tasks.DisableAmphoraHealthMonitoring(
211
214
  name=constants.DISABLE_AMP_HEALTH_MONITORING + '-' + amphora_id,
212
215
  inject={constants.AMPHORA: amphora}))
@@ -219,11 +222,6 @@ class AmphoraFlows:
219
222
  str(amphora[constants.VRRP_PORT_ID])),
220
223
  inject={constants.PORT_ID: amphora[constants.VRRP_PORT_ID],
221
224
  constants.PASSIVE_FAILURE: True}))
222
- # TODO(johnsom) What about cleaning up any member ports?
223
- # maybe we should get the list of attached ports prior to delete
224
- # and call delete on them here. Fix this as part of
225
- # https://storyboard.openstack.org/#!/story/2007077
226
-
227
225
  return delete_amphora_flow
228
226
 
229
227
  def get_vrrp_subflow(self, prefix, timeout_dict=None,
@@ -176,7 +176,8 @@ class ListenerFlows:
176
176
 
177
177
  fw_rules_subflow.add(
178
178
  amphora_driver_tasks.AmphoraeGetConnectivityStatus(
179
- name=constants.AMPHORAE_GET_CONNECTIVITY_STATUS,
179
+ name=(sf_name + '-' +
180
+ constants.AMPHORAE_GET_CONNECTIVITY_STATUS),
180
181
  requires=constants.AMPHORAE,
181
182
  inject={constants.TIMEOUT_DICT: timeout_dict,
182
183
  constants.NEW_AMPHORA_ID: constants.NIL_UUID},
@@ -31,6 +31,7 @@ from octavia.controller.worker.v2.tasks import database_tasks
31
31
  from octavia.controller.worker.v2.tasks import lifecycle_tasks
32
32
  from octavia.controller.worker.v2.tasks import network_tasks
33
33
  from octavia.controller.worker.v2.tasks import notification_tasks
34
+ from octavia.db import api as db_apis
34
35
  from octavia.db import repositories as repo
35
36
 
36
37
  CONF = cfg.CONF
@@ -41,6 +42,8 @@ class LoadBalancerFlows:
41
42
 
42
43
  def __init__(self):
43
44
  self.amp_flows = amphora_flows.AmphoraFlows()
45
+ self.amphora_repo = repo.AmphoraRepository()
46
+ self.amphora_member_port_repo = repo.AmphoraMemberPortRepository()
44
47
  self.listener_flows = listener_flows.ListenerFlows()
45
48
  self.pool_flows = pool_flows.PoolFlows()
46
49
  self.member_flows = member_flows.MemberFlows()
@@ -336,6 +339,9 @@ class LoadBalancerFlows:
336
339
  pools_delete = self._get_delete_pools_flow(pools)
337
340
  delete_LB_flow.add(pools_delete)
338
341
  delete_LB_flow.add(listeners_delete)
342
+ member_ports_delete = self.get_delete_member_ports_subflow(
343
+ lb[constants.LOADBALANCER_ID])
344
+ delete_LB_flow.add(member_ports_delete)
339
345
  delete_LB_flow.add(network_tasks.UnplugVIP(
340
346
  requires=constants.LOADBALANCER))
341
347
  delete_LB_flow.add(network_tasks.DeallocateVIP(
@@ -372,6 +378,11 @@ class LoadBalancerFlows:
372
378
  update_LB_flow = linear_flow.Flow(constants.UPDATE_LOADBALANCER_FLOW)
373
379
  update_LB_flow.add(lifecycle_tasks.LoadBalancerToErrorOnRevertTask(
374
380
  requires=constants.LOADBALANCER))
381
+ update_LB_flow.add(network_tasks.UpdateVIPSecurityGroup(
382
+ requires=constants.LOADBALANCER_ID,
383
+ provides=constants.VIP_SG_ID))
384
+ update_LB_flow.add(network_tasks.UpdateAmphoraSecurityGroup(
385
+ requires=constants.LOADBALANCER_ID))
375
386
  update_LB_flow.add(network_tasks.ApplyQos(
376
387
  requires=(constants.LOADBALANCER, constants.UPDATE_DICT)))
377
388
  update_LB_flow.add(amphora_driver_tasks.ListenersUpdate(
@@ -744,3 +755,30 @@ class LoadBalancerFlows:
744
755
  requires=constants.LOADBALANCER))
745
756
 
746
757
  return failover_LB_flow
758
+
759
+ def get_delete_member_ports_subflow(self, load_balancer_id):
760
+ """A subflow that will delete all of the member ports on an LB
761
+
762
+ :param load_balancer_id: A load balancer ID
763
+ :returns: A Taskflow flow
764
+ """
765
+ port_delete_flow = unordered_flow.Flow('delete_member_ports')
766
+
767
+ session = db_apis.get_session()
768
+ with session.begin():
769
+ amps = self.amphora_repo.get_amphorae_ids_on_lb(session,
770
+ load_balancer_id)
771
+ for amp in amps:
772
+ with session.begin():
773
+ ports = self.amphora_member_port_repo.get_port_ids(session,
774
+ amp)
775
+ for port in ports:
776
+ port_delete_flow.add(
777
+ network_tasks.DeletePort(
778
+ name='delete_member_port' + '-' + port,
779
+ inject={constants.PORT_ID: port}))
780
+ port_delete_flow.add(
781
+ database_tasks.DeleteAmpMemberPortInDB(
782
+ name='delete_member_port_in_db' + '-' + port,
783
+ inject={constants.PORT_ID: port}))
784
+ return port_delete_flow
@@ -15,6 +15,7 @@ import contextlib
15
15
 
16
16
  from oslo_config import cfg
17
17
  from oslo_log import log
18
+ from oslo_utils import netutils
18
19
  from oslo_utils import strutils
19
20
  from taskflow.jobs import backends as job_backends
20
21
  from taskflow.persistence import backends as persistence_backends
@@ -90,19 +91,16 @@ class RedisTaskFlowDriver(JobboardTaskFlowDriver):
90
91
 
91
92
  def job_board(self, persistence):
92
93
 
93
- def _format_server(host, port):
94
- if ':' in host:
95
- return '[%s]:%d' % (host, port)
96
- return '%s:%d' % (host, port)
97
-
98
94
  jobboard_backend_conf = {
99
95
  'board': 'redis',
100
96
  'host': CONF.task_flow.jobboard_backend_hosts[0],
101
97
  'port': CONF.task_flow.jobboard_backend_port,
98
+ 'db': CONF.task_flow.jobboard_redis_backend_db,
102
99
  'namespace': CONF.task_flow.jobboard_backend_namespace,
103
100
  'sentinel': CONF.task_flow.jobboard_redis_sentinel,
104
101
  'sentinel_fallbacks': [
105
- _format_server(host, CONF.task_flow.jobboard_backend_port)
102
+ '%s:%d' % (netutils.escape_ipv6(host),
103
+ CONF.task_flow.jobboard_backend_port)
106
104
  for host in CONF.task_flow.jobboard_backend_hosts[1:]
107
105
  ]
108
106
  }
@@ -131,3 +129,33 @@ class RedisTaskFlowDriver(JobboardTaskFlowDriver):
131
129
  CONF.task_flow.jobboard_backend_namespace,
132
130
  jobboard_backend_conf,
133
131
  persistence=persistence)
132
+
133
+
134
+ class EtcdTaskFlowDriver(JobboardTaskFlowDriver):
135
+
136
+ def __init__(self, persistence_driver):
137
+ self.persistence_driver = persistence_driver
138
+
139
+ def job_board(self, persistence):
140
+ jobboard_backend_conf = {
141
+ 'board': 'etcd',
142
+ 'host': CONF.task_flow.jobboard_backend_hosts[0],
143
+ 'port': CONF.task_flow.jobboard_backend_port,
144
+ 'path': CONF.task_flow.jobboard_backend_namespace,
145
+ 'ttl': CONF.task_flow.jobboard_expiration_time,
146
+ }
147
+ if CONF.task_flow.jobboard_etcd_ssl_options['use_ssl']:
148
+ jobboard_backend_conf.update(
149
+ CONF.task_flow.jobboard_etcd_ssl_options)
150
+ jobboard_backend_conf.pop('use_ssl')
151
+ jobboard_backend_conf['protocol'] = 'https'
152
+ if CONF.task_flow.jobboard_etcd_timeout is not None:
153
+ jobboard_backend_conf['timeout'] = (
154
+ CONF.task_flow.jobboard_etcd_timeout)
155
+ if CONF.task_flow.jobboard_etcd_api_path is not None:
156
+ jobboard_backend_conf['api_path'] = (
157
+ CONF.task_flow.jobboard_etcd_api_path)
158
+
159
+ return job_backends.backend(CONF.task_flow.jobboard_backend_namespace,
160
+ jobboard_backend_conf,
161
+ persistence=persistence)
@@ -236,26 +236,30 @@ class ComputeDelete(BaseComputeTask):
236
236
  amphora_id = amphora.get(constants.ID)
237
237
  compute_id = amphora[constants.COMPUTE_ID]
238
238
 
239
- if self.execute.retry.statistics.get(constants.ATTEMPT_NUMBER, 1) == 1:
239
+ # tenacity 8.5.0 moves statistics from the retry object to the function
240
+ try:
241
+ retry_statistics = self.execute.statistics
242
+ except AttributeError:
243
+ retry_statistics = self.execute.retry.statistics
244
+ if retry_statistics.get(constants.ATTEMPT_NUMBER, 1) == 1:
240
245
  LOG.debug('Compute delete execute for amphora with ID %s and '
241
246
  'compute ID: %s', amphora_id, compute_id)
242
247
  else:
243
248
  LOG.warning('Retrying compute delete of %s attempt %s of %s.',
244
249
  compute_id,
245
- self.execute.retry.statistics[
246
- constants.ATTEMPT_NUMBER],
250
+ retry_statistics[constants.ATTEMPT_NUMBER],
247
251
  self.execute.retry.stop.max_attempt_number)
248
252
  # Let the Taskflow engine know we are working and alive
249
253
  # Don't use get with a default for 'attempt_number', we need to fail
250
254
  # if that number is missing.
251
255
  self.update_progress(
252
- self.execute.retry.statistics[constants.ATTEMPT_NUMBER] /
256
+ retry_statistics[constants.ATTEMPT_NUMBER] /
253
257
  self.execute.retry.stop.max_attempt_number)
254
258
 
255
259
  try:
256
260
  self.compute.delete(compute_id)
257
261
  except Exception:
258
- if (self.execute.retry.statistics[constants.ATTEMPT_NUMBER] !=
262
+ if (retry_statistics[constants.ATTEMPT_NUMBER] !=
259
263
  self.execute.retry.stop.max_attempt_number):
260
264
  LOG.warning('Compute delete for amphora id: %s failed. '
261
265
  'Retrying.', amphora_id)
@@ -55,6 +55,7 @@ class BaseDatabaseTask(task.Task):
55
55
  self.l7policy_repo = repo.L7PolicyRepository()
56
56
  self.l7rule_repo = repo.L7RuleRepository()
57
57
  self.task_utils = task_utilities.TaskUtils()
58
+ self.amphora_member_port_repo = repo.AmphoraMemberPortRepository()
58
59
  super().__init__(**kwargs)
59
60
 
60
61
  def _delete_from_amp_health(self, session, amphora_id):
@@ -132,13 +133,17 @@ class CreateAmphoraInDB(BaseDatabaseTask):
132
133
  LOG.warning("Reverting create amphora in DB for amp id %s ", result)
133
134
 
134
135
  # Delete the amphora for now. May want to just update status later
135
- try:
136
- with db_apis.session().begin() as session:
136
+ with db_apis.session().begin() as session:
137
+ try:
137
138
  self.amphora_repo.delete(session, id=result)
138
- except Exception as e:
139
- LOG.error("Failed to delete amphora %(amp)s "
140
- "in the database due to: "
141
- "%(except)s", {'amp': result, 'except': str(e)})
139
+ except Exception as e:
140
+ LOG.error("Failed to delete amphora %(amp)s "
141
+ "in the database due to: "
142
+ "%(except)s", {'amp': result, 'except': str(e)})
143
+ try:
144
+ self.amp_health_repo.delete(session, amphora_id=result)
145
+ except Exception:
146
+ pass
142
147
 
143
148
 
144
149
  class MarkLBAmphoraeDeletedInDB(BaseDatabaseTask):
@@ -416,6 +421,21 @@ class DeleteL7RuleInDB(BaseDatabaseTask):
416
421
  'except': str(e)})
417
422
 
418
423
 
424
+ class DeleteAmpMemberPortInDB(BaseDatabaseTask):
425
+ """Delete an amphora member port record in the DB."""
426
+
427
+ def execute(self, port_id):
428
+ """Delete the amphora member port in DB
429
+
430
+ :param port_id: The port_id to be deleted
431
+ :returns: None
432
+ """
433
+
434
+ LOG.debug("Delete in DB for amphora member port %s", port_id)
435
+ with db_apis.session().begin() as session:
436
+ self.amphora_member_port_repo.delete(session, port_id=port_id)
437
+
438
+
419
439
  class ReloadAmphora(BaseDatabaseTask):
420
440
  """Get an amphora object from the database."""
421
441