octavia 13.0.0__py3-none-any.whl → 13.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (92) hide show
  1. octavia/amphorae/backends/agent/api_server/keepalivedlvs.py +9 -0
  2. octavia/amphorae/backends/agent/api_server/osutils.py +1 -2
  3. octavia/amphorae/backends/agent/api_server/util.py +35 -2
  4. octavia/amphorae/backends/utils/interface.py +4 -5
  5. octavia/amphorae/drivers/driver_base.py +16 -0
  6. octavia/amphorae/drivers/haproxy/rest_api_driver.py +13 -8
  7. octavia/amphorae/drivers/keepalived/jinja/jinja_cfg.py +0 -1
  8. octavia/amphorae/drivers/keepalived/jinja/templates/keepalived_base.template +0 -1
  9. octavia/amphorae/drivers/keepalived/vrrp_rest_driver.py +2 -1
  10. octavia/amphorae/drivers/noop_driver/driver.py +3 -0
  11. octavia/api/common/pagination.py +1 -1
  12. octavia/api/v2/controllers/health_monitor.py +3 -2
  13. octavia/api/v2/controllers/l7policy.py +0 -1
  14. octavia/api/v2/controllers/l7rule.py +0 -1
  15. octavia/api/v2/controllers/listener.py +0 -1
  16. octavia/api/v2/controllers/load_balancer.py +13 -7
  17. octavia/api/v2/controllers/member.py +18 -5
  18. octavia/api/v2/controllers/pool.py +6 -7
  19. octavia/api/v2/types/pool.py +1 -1
  20. octavia/certificates/common/pkcs12.py +9 -9
  21. octavia/certificates/manager/barbican.py +24 -16
  22. octavia/certificates/manager/castellan_mgr.py +12 -7
  23. octavia/certificates/manager/noop.py +106 -0
  24. octavia/common/clients.py +22 -4
  25. octavia/common/config.py +21 -5
  26. octavia/common/constants.py +4 -0
  27. octavia/common/exceptions.py +6 -0
  28. octavia/common/jinja/haproxy/combined_listeners/templates/macros.j2 +7 -5
  29. octavia/common/keystone.py +7 -7
  30. octavia/common/tls_utils/cert_parser.py +23 -9
  31. octavia/controller/worker/task_utils.py +28 -6
  32. octavia/controller/worker/v2/controller_worker.py +2 -2
  33. octavia/controller/worker/v2/flows/amphora_flows.py +41 -10
  34. octavia/controller/worker/v2/flows/flow_utils.py +6 -4
  35. octavia/controller/worker/v2/flows/load_balancer_flows.py +17 -3
  36. octavia/controller/worker/v2/tasks/amphora_driver_tasks.py +114 -23
  37. octavia/controller/worker/v2/tasks/database_tasks.py +36 -47
  38. octavia/controller/worker/v2/tasks/lifecycle_tasks.py +96 -40
  39. octavia/controller/worker/v2/tasks/network_tasks.py +12 -13
  40. octavia/db/base_models.py +16 -4
  41. octavia/db/repositories.py +34 -33
  42. octavia/network/drivers/neutron/allowed_address_pairs.py +10 -8
  43. octavia/network/drivers/noop_driver/driver.py +1 -2
  44. octavia/tests/common/sample_certs.py +115 -0
  45. octavia/tests/functional/api/v2/base.py +1 -1
  46. octavia/tests/functional/api/v2/test_health_monitor.py +18 -0
  47. octavia/tests/functional/api/v2/test_listener.py +45 -0
  48. octavia/tests/functional/api/v2/test_member.py +32 -0
  49. octavia/tests/functional/db/base.py +9 -0
  50. octavia/tests/functional/db/test_repositories.py +45 -98
  51. octavia/tests/unit/amphorae/backends/agent/api_server/test_util.py +89 -1
  52. octavia/tests/unit/amphorae/backends/utils/test_interface.py +3 -1
  53. octavia/tests/unit/amphorae/drivers/haproxy/test_rest_api_driver.py +3 -3
  54. octavia/tests/unit/amphorae/drivers/keepalived/jinja/test_jinja_cfg.py +0 -4
  55. octavia/tests/unit/amphorae/drivers/keepalived/test_vrrp_rest_driver.py +17 -0
  56. octavia/tests/unit/api/common/test_pagination.py +78 -1
  57. octavia/tests/unit/api/v2/types/test_pool.py +71 -0
  58. octavia/tests/unit/certificates/manager/test_barbican.py +3 -3
  59. octavia/tests/unit/certificates/manager/test_noop.py +53 -0
  60. octavia/tests/unit/cmd/test_prometheus_proxy.py +8 -1
  61. octavia/tests/unit/common/jinja/haproxy/combined_listeners/test_jinja_cfg.py +16 -17
  62. octavia/tests/unit/common/test_config.py +35 -0
  63. octavia/tests/unit/common/test_keystone.py +32 -0
  64. octavia/tests/unit/controller/worker/test_task_utils.py +58 -2
  65. octavia/tests/unit/controller/worker/v2/flows/test_amphora_flows.py +28 -5
  66. octavia/tests/unit/controller/worker/v2/flows/test_load_balancer_flows.py +10 -5
  67. octavia/tests/unit/controller/worker/v2/tasks/test_amphora_driver_tasks.py +234 -17
  68. octavia/tests/unit/controller/worker/v2/tasks/test_database_tasks.py +28 -6
  69. octavia/tests/unit/controller/worker/v2/tasks/test_database_tasks_quota.py +19 -19
  70. octavia/tests/unit/controller/worker/v2/tasks/test_network_tasks.py +57 -2
  71. octavia/tests/unit/controller/worker/v2/test_controller_worker.py +56 -1
  72. octavia/tests/unit/network/drivers/neutron/test_allowed_address_pairs.py +24 -1
  73. {octavia-13.0.0.dist-info → octavia-13.0.1.dist-info}/AUTHORS +8 -0
  74. octavia-13.0.1.dist-info/METADATA +155 -0
  75. {octavia-13.0.0.dist-info → octavia-13.0.1.dist-info}/RECORD +90 -88
  76. {octavia-13.0.0.dist-info → octavia-13.0.1.dist-info}/WHEEL +1 -1
  77. {octavia-13.0.0.dist-info → octavia-13.0.1.dist-info}/entry_points.txt +1 -1
  78. octavia-13.0.1.dist-info/pbr.json +1 -0
  79. octavia-13.0.0.dist-info/METADATA +0 -158
  80. octavia-13.0.0.dist-info/pbr.json +0 -1
  81. {octavia-13.0.0.data → octavia-13.0.1.data}/data/share/octavia/LICENSE +0 -0
  82. {octavia-13.0.0.data → octavia-13.0.1.data}/data/share/octavia/README.rst +0 -0
  83. {octavia-13.0.0.data → octavia-13.0.1.data}/data/share/octavia/diskimage-create/README.rst +0 -0
  84. {octavia-13.0.0.data → octavia-13.0.1.data}/data/share/octavia/diskimage-create/diskimage-create.sh +0 -0
  85. {octavia-13.0.0.data → octavia-13.0.1.data}/data/share/octavia/diskimage-create/image-tests.sh +0 -0
  86. {octavia-13.0.0.data → octavia-13.0.1.data}/data/share/octavia/diskimage-create/requirements.txt +0 -0
  87. {octavia-13.0.0.data → octavia-13.0.1.data}/data/share/octavia/diskimage-create/test-requirements.txt +0 -0
  88. {octavia-13.0.0.data → octavia-13.0.1.data}/data/share/octavia/diskimage-create/tox.ini +0 -0
  89. {octavia-13.0.0.data → octavia-13.0.1.data}/data/share/octavia/diskimage-create/version.txt +0 -0
  90. {octavia-13.0.0.data → octavia-13.0.1.data}/scripts/octavia-wsgi +0 -0
  91. {octavia-13.0.0.dist-info → octavia-13.0.1.dist-info}/LICENSE +0 -0
  92. {octavia-13.0.0.dist-info → octavia-13.0.1.dist-info}/top_level.txt +0 -0
@@ -54,15 +54,22 @@ class HealthMonitorToErrorOnRevertTask(BaseLifecycleTask):
54
54
  pass
55
55
 
56
56
  def revert(self, health_mon, listeners, loadbalancer, *args, **kwargs):
57
- self.task_utils.mark_health_mon_prov_status_error(
58
- health_mon[constants.HEALTHMONITOR_ID])
59
- self.task_utils.mark_pool_prov_status_active(
60
- health_mon[constants.POOL_ID])
57
+ try:
58
+ self.task_utils.mark_health_mon_prov_status_error(
59
+ health_mon[constants.HEALTHMONITOR_ID])
60
+ self.task_utils.mark_pool_prov_status_active(
61
+ health_mon[constants.POOL_ID])
62
+ for listener in listeners:
63
+ self.task_utils.mark_listener_prov_status_active(
64
+ listener[constants.LISTENER_ID])
65
+ except Exception:
66
+ # Catching and skipping, errors are already reported by task_utils
67
+ # and we want to ensure that mark_loadbalancer_prov_status_active
68
+ # is called to unlock the LB (it will pass or it will fail after a
69
+ # very long timeout)
70
+ pass
61
71
  self.task_utils.mark_loadbalancer_prov_status_active(
62
72
  loadbalancer[constants.LOADBALANCER_ID])
63
- for listener in listeners:
64
- self.task_utils.mark_listener_prov_status_active(
65
- listener[constants.LISTENER_ID])
66
73
 
67
74
 
68
75
  class L7PolicyToErrorOnRevertTask(BaseLifecycleTask):
@@ -72,12 +79,19 @@ class L7PolicyToErrorOnRevertTask(BaseLifecycleTask):
72
79
  pass
73
80
 
74
81
  def revert(self, l7policy, listeners, loadbalancer_id, *args, **kwargs):
75
- self.task_utils.mark_l7policy_prov_status_error(
76
- l7policy[constants.L7POLICY_ID])
82
+ try:
83
+ self.task_utils.mark_l7policy_prov_status_error(
84
+ l7policy[constants.L7POLICY_ID])
85
+ for listener in listeners:
86
+ self.task_utils.mark_listener_prov_status_active(
87
+ listener[constants.LISTENER_ID])
88
+ except Exception:
89
+ # Catching and skipping, errors are already reported by task_utils
90
+ # and we want to ensure that mark_loadbalancer_prov_status_active
91
+ # is called to unlock the LB (it will pass or it will fail after a
92
+ # very long timeout)
93
+ pass
77
94
  self.task_utils.mark_loadbalancer_prov_status_active(loadbalancer_id)
78
- for listener in listeners:
79
- self.task_utils.mark_listener_prov_status_active(
80
- listener[constants.LISTENER_ID])
81
95
 
82
96
 
83
97
  class L7RuleToErrorOnRevertTask(BaseLifecycleTask):
@@ -88,14 +102,21 @@ class L7RuleToErrorOnRevertTask(BaseLifecycleTask):
88
102
 
89
103
  def revert(self, l7rule, l7policy_id, listeners, loadbalancer_id, *args,
90
104
  **kwargs):
91
- self.task_utils.mark_l7rule_prov_status_error(
92
- l7rule[constants.L7RULE_ID])
93
- self.task_utils.mark_l7policy_prov_status_active(l7policy_id)
105
+ try:
106
+ self.task_utils.mark_l7rule_prov_status_error(
107
+ l7rule[constants.L7RULE_ID])
108
+ self.task_utils.mark_l7policy_prov_status_active(l7policy_id)
109
+ for listener in listeners:
110
+ self.task_utils.mark_listener_prov_status_active(
111
+ listener[constants.LISTENER_ID])
112
+ except Exception:
113
+ # Catching and skipping, errors are already reported by task_utils
114
+ # and we want to ensure that mark_loadbalancer_prov_status_active
115
+ # is called to unlock the LB (it will pass or it will fail after a
116
+ # very long timeout)
117
+ pass
94
118
  self.task_utils.mark_loadbalancer_prov_status_active(
95
119
  loadbalancer_id)
96
- for listener in listeners:
97
- self.task_utils.mark_listener_prov_status_active(
98
- listener[constants.LISTENER_ID])
99
120
 
100
121
 
101
122
  class ListenerToErrorOnRevertTask(BaseLifecycleTask):
@@ -105,8 +126,15 @@ class ListenerToErrorOnRevertTask(BaseLifecycleTask):
105
126
  pass
106
127
 
107
128
  def revert(self, listener, *args, **kwargs):
108
- self.task_utils.mark_listener_prov_status_error(
109
- listener[constants.LISTENER_ID])
129
+ try:
130
+ self.task_utils.mark_listener_prov_status_error(
131
+ listener[constants.LISTENER_ID])
132
+ except Exception:
133
+ # Catching and skipping, errors are already reported by task_utils
134
+ # and we want to ensure that mark_loadbalancer_prov_status_active
135
+ # is called to unlock the LB (it will pass or it will fail after a
136
+ # very long timeout)
137
+ pass
110
138
  self.task_utils.mark_loadbalancer_prov_status_active(
111
139
  listener[constants.LOADBALANCER_ID])
112
140
 
@@ -118,9 +146,16 @@ class ListenersToErrorOnRevertTask(BaseLifecycleTask):
118
146
  pass
119
147
 
120
148
  def revert(self, listeners, *args, **kwargs):
121
- for listener in listeners:
122
- self.task_utils.mark_listener_prov_status_error(
123
- listener[constants.LISTENER_ID])
149
+ try:
150
+ for listener in listeners:
151
+ self.task_utils.mark_listener_prov_status_error(
152
+ listener[constants.LISTENER_ID])
153
+ except Exception:
154
+ # Catching and skipping, errors are already reported by task_utils
155
+ # and we want to ensure that mark_loadbalancer_prov_status_active
156
+ # is called to unlock the LB (it will pass or it will fail after a
157
+ # very long timeout)
158
+ pass
124
159
  self.task_utils.mark_loadbalancer_prov_status_active(
125
160
  listeners[0][constants.LOADBALANCER_ID])
126
161
 
@@ -154,12 +189,19 @@ class MemberToErrorOnRevertTask(BaseLifecycleTask):
154
189
 
155
190
  def revert(self, member, listeners, loadbalancer, pool_id, *args,
156
191
  **kwargs):
157
- self.task_utils.mark_member_prov_status_error(
158
- member[constants.MEMBER_ID])
159
- for listener in listeners:
160
- self.task_utils.mark_listener_prov_status_active(
161
- listener[constants.LISTENER_ID])
162
- self.task_utils.mark_pool_prov_status_active(pool_id)
192
+ try:
193
+ self.task_utils.mark_member_prov_status_error(
194
+ member[constants.MEMBER_ID])
195
+ for listener in listeners:
196
+ self.task_utils.mark_listener_prov_status_active(
197
+ listener[constants.LISTENER_ID])
198
+ self.task_utils.mark_pool_prov_status_active(pool_id)
199
+ except Exception:
200
+ # Catching and skipping, errors are already reported by task_utils
201
+ # and we want to ensure that mark_loadbalancer_prov_status_active
202
+ # is called to unlock the LB (it will pass or it will fail after a
203
+ # very long timeout)
204
+ pass
163
205
  self.task_utils.mark_loadbalancer_prov_status_active(
164
206
  loadbalancer[constants.LOADBALANCER_ID])
165
207
 
@@ -172,13 +214,20 @@ class MembersToErrorOnRevertTask(BaseLifecycleTask):
172
214
 
173
215
  def revert(self, members, listeners, loadbalancer, pool_id, *args,
174
216
  **kwargs):
175
- for m in members:
176
- self.task_utils.mark_member_prov_status_error(
177
- m[constants.MEMBER_ID])
178
- for listener in listeners:
179
- self.task_utils.mark_listener_prov_status_active(
180
- listener[constants.LISTENER_ID])
181
- self.task_utils.mark_pool_prov_status_active(pool_id)
217
+ try:
218
+ for m in members:
219
+ self.task_utils.mark_member_prov_status_error(
220
+ m[constants.MEMBER_ID])
221
+ for listener in listeners:
222
+ self.task_utils.mark_listener_prov_status_active(
223
+ listener[constants.LISTENER_ID])
224
+ self.task_utils.mark_pool_prov_status_active(pool_id)
225
+ except Exception:
226
+ # Catching and skipping, errors are already reported by task_utils
227
+ # and we want to ensure that mark_loadbalancer_prov_status_active
228
+ # is called to unlock the LB (it will pass or it will fail after a
229
+ # very long timeout)
230
+ pass
182
231
  self.task_utils.mark_loadbalancer_prov_status_active(
183
232
  loadbalancer[constants.LOADBALANCER_ID])
184
233
 
@@ -190,9 +239,16 @@ class PoolToErrorOnRevertTask(BaseLifecycleTask):
190
239
  pass
191
240
 
192
241
  def revert(self, pool_id, listeners, loadbalancer, *args, **kwargs):
193
- self.task_utils.mark_pool_prov_status_error(pool_id)
242
+ try:
243
+ self.task_utils.mark_pool_prov_status_error(pool_id)
244
+ for listener in listeners:
245
+ self.task_utils.mark_listener_prov_status_active(
246
+ listener[constants.LISTENER_ID])
247
+ except Exception:
248
+ # Catching and skipping, errors are already reported by task_utils
249
+ # and we want to ensure that mark_loadbalancer_prov_status_active
250
+ # is called to unlock the LB (it will pass or it will fail after a
251
+ # very long timeout)
252
+ pass
194
253
  self.task_utils.mark_loadbalancer_prov_status_active(
195
254
  loadbalancer[constants.LOADBALANCER_ID])
196
- for listener in listeners:
197
- self.task_utils.mark_listener_prov_status_active(
198
- listener[constants.LISTENER_ID])
@@ -583,11 +583,10 @@ class PlugVIPAmphora(BaseNetworkTask):
583
583
  """Handle a failure to plumb a vip."""
584
584
  if isinstance(result, failure.Failure):
585
585
  return
586
+ lb_id = loadbalancer[constants.LOADBALANCER_ID]
586
587
  LOG.warning("Unable to plug VIP for amphora id %s "
587
588
  "load balancer id %s",
588
- amphora.get(constants.ID),
589
- loadbalancer[constants.LOADBALANCER_ID])
590
-
589
+ amphora.get(constants.ID), lb_id)
591
590
  try:
592
591
  session = db_apis.get_session()
593
592
  with session.begin():
@@ -597,15 +596,16 @@ class PlugVIPAmphora(BaseNetworkTask):
597
596
  db_amp.ha_port_id = result[constants.HA_PORT_ID]
598
597
  db_subnet = self.network_driver.get_subnet(
599
598
  subnet[constants.ID])
600
- db_lb = self.loadbalancer_repo.get(
601
- session,
602
- id=loadbalancer[constants.LOADBALANCER_ID])
603
-
599
+ db_lb = self.loadbalancer_repo.get(session, id=lb_id)
604
600
  self.network_driver.unplug_aap_port(db_lb.vip,
605
601
  db_amp, db_subnet)
606
602
  except Exception as e:
607
- LOG.error('Failed to unplug AAP port. Resources may still be in '
608
- 'use for VIP: %s due to error: %s', db_lb.vip, str(e))
603
+ LOG.error(
604
+ 'Failed to unplug AAP port for load balancer: %s. '
605
+ 'Resources may still be in use for VRRP port: %s. '
606
+ 'Due to error: %s',
607
+ lb_id, result[constants.VRRP_PORT_ID], str(e)
608
+ )
609
609
 
610
610
 
611
611
  class UnplugVIP(BaseNetworkTask):
@@ -1044,10 +1044,9 @@ class CreateVIPBasePort(BaseNetworkTask):
1044
1044
  return
1045
1045
  try:
1046
1046
  port_name = constants.AMP_BASE_PORT_PREFIX + amphora_id
1047
- for port in result:
1048
- self.network_driver.delete_port(port.id)
1049
- LOG.info('Deleted port %s with ID %s for amphora %s due to a '
1050
- 'revert.', port_name, port.id, amphora_id)
1047
+ self.network_driver.delete_port(result[constants.ID])
1048
+ LOG.info('Deleted port %s with ID %s for amphora %s due to a '
1049
+ 'revert.', port_name, result[constants.ID], amphora_id)
1051
1050
  except Exception as e:
1052
1051
  LOG.error('Failed to delete port %s. Resources may still be in '
1053
1052
  'use for a port intended for amphora %s due to error '
octavia/db/base_models.py CHANGED
@@ -12,6 +12,8 @@
12
12
  # License for the specific language governing permissions and limitations
13
13
  # under the License.
14
14
 
15
+ from wsme import types as wtypes
16
+
15
17
  from oslo_db.sqlalchemy import models
16
18
  from oslo_utils import strutils
17
19
  from oslo_utils import uuidutils
@@ -19,6 +21,8 @@ import sqlalchemy as sa
19
21
  from sqlalchemy.orm import collections
20
22
  from sqlalchemy.orm import declarative_base
21
23
 
24
+ from octavia.common import constants
25
+
22
26
 
23
27
  class OctaviaBase(models.ModelBase):
24
28
 
@@ -112,12 +116,20 @@ class OctaviaBase(models.ModelBase):
112
116
 
113
117
  @staticmethod
114
118
  def apply_filter(query, model, filters):
119
+ # Convert boolean filters to proper type
120
+ for key in filters:
121
+ attr = getattr(model.__v2_wsme__, key, None)
122
+ if isinstance(attr, wtypes.wsattr) and attr.datatype == bool:
123
+ filters[key] = strutils.bool_from_string(filters[key])
124
+ # Special case for 'enabled', it's 'admin_state_up' in the WSME class
125
+ # definition and the attribute has already been renamed to 'enabled' by
126
+ # a previous pagination filter
127
+ if constants.ENABLED in filters:
128
+ filters[constants.ENABLED] = strutils.bool_from_string(
129
+ filters[constants.ENABLED])
130
+
115
131
  translated_filters = {}
116
132
  child_map = {}
117
- # Convert enabled to proper type
118
- if 'enabled' in filters:
119
- filters['enabled'] = strutils.bool_from_string(
120
- filters['enabled'])
121
133
  for attr, name_map in model.__v2_wsme__._child_map.items():
122
134
  for k, v in name_map.items():
123
135
  if attr in filters and k in filters[attr]:
@@ -40,6 +40,7 @@ from octavia.common import constants as consts
40
40
  from octavia.common import data_models
41
41
  from octavia.common import exceptions
42
42
  from octavia.common import validate
43
+ from octavia.db import api as db_api
43
44
  from octavia.db import models
44
45
 
45
46
  CONF = cfg.CONF
@@ -364,8 +365,7 @@ class Repositories(object):
364
365
  provisioning_status=lb_prov_status)
365
366
  return success
366
367
 
367
- def check_quota_met(self, session, lock_session, _class, project_id,
368
- count=1):
368
+ def check_quota_met(self, session: Session, _class, project_id, count=1):
369
369
  """Checks and updates object quotas.
370
370
 
371
371
  This method makes sure the project has available quota
@@ -373,7 +373,6 @@ class Repositories(object):
373
373
  new ussage.
374
374
 
375
375
  :param session: Context database session
376
- :param lock_session: Locking database session (autocommit=False)
377
376
  :param _class: Data model object requesting quota
378
377
  :param project_id: Project ID requesting quota
379
378
  :param count: Number of objects we're going to create (default=1)
@@ -390,19 +389,14 @@ class Repositories(object):
390
389
  if not project_id:
391
390
  raise exceptions.MissingProjectID()
392
391
 
393
- quotas = self.quotas.get(session, project_id=project_id)
394
- if not quotas:
395
- # Make sure we have a record to lock
396
- self.quotas.update(
397
- session,
398
- project_id,
399
- quota={})
392
+ self.quotas.ensure_project_exists(project_id)
393
+
400
394
  # Lock the project record in the database to block other quota checks
401
395
  #
402
396
  # Note: You cannot just use the current count as the in-use
403
397
  # value as we don't want to lock the whole resource table
404
398
  try:
405
- quotas = (lock_session.query(models.Quotas)
399
+ quotas = (session.query(models.Quotas)
406
400
  .filter_by(project_id=project_id)
407
401
  .populate_existing()
408
402
  .with_for_update()
@@ -1244,23 +1238,22 @@ class AmphoraRepository(BaseRepository):
1244
1238
  :param amphora_id: The amphora id to list the load balancers from
1245
1239
  :returns: [octavia.common.data_model]
1246
1240
  """
1247
- with session.begin():
1248
- db_lb = (
1249
- # Get LB records
1250
- session.query(models.LoadBalancer)
1251
- # Joined to amphora records
1252
- .filter(models.LoadBalancer.id ==
1253
- models.Amphora.load_balancer_id)
1254
- # For just this amphora
1255
- .filter(models.Amphora.id == amphora_id)
1256
- # Where the amphora is not DELETED
1257
- .filter(models.Amphora.status != consts.DELETED)
1258
- # And the LB is also not DELETED
1259
- .filter(models.LoadBalancer.provisioning_status !=
1260
- consts.DELETED)).first()
1261
- if db_lb:
1262
- return db_lb.to_data_model()
1263
- return None
1241
+ db_lb = (
1242
+ # Get LB records
1243
+ session.query(models.LoadBalancer)
1244
+ # Joined to amphora records
1245
+ .filter(models.LoadBalancer.id ==
1246
+ models.Amphora.load_balancer_id)
1247
+ # For just this amphora
1248
+ .filter(models.Amphora.id == amphora_id)
1249
+ # Where the amphora is not DELETED
1250
+ .filter(models.Amphora.status != consts.DELETED)
1251
+ # And the LB is also not DELETED
1252
+ .filter(models.LoadBalancer.provisioning_status !=
1253
+ consts.DELETED)).first()
1254
+ if db_lb:
1255
+ return db_lb.to_data_model()
1256
+ return None
1264
1257
 
1265
1258
  def get_cert_expiring_amphora(self, session):
1266
1259
  """Retrieves an amphora whose cert is close to expiring..
@@ -1884,11 +1877,6 @@ class L7PolicyRepository(BaseRepository):
1884
1877
  class QuotasRepository(BaseRepository):
1885
1878
  model_class = models.Quotas
1886
1879
 
1887
- # Since this is for the initial quota record creation it locks the table
1888
- # which can lead to recoverable deadlocks. Thus we use the deadlock
1889
- # retry wrapper here. This may not be appropriate for other sessions
1890
- # and or queries. Use with caution.
1891
- @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
1892
1880
  def update(self, session, project_id, **model_kwargs):
1893
1881
  kwargs_quota = model_kwargs['quota']
1894
1882
  quotas = (
@@ -1905,6 +1893,19 @@ class QuotasRepository(BaseRepository):
1905
1893
  session.flush()
1906
1894
  return self.get(session, project_id=project_id)
1907
1895
 
1896
+ # Since this is for the initial quota record creation it locks the table
1897
+ # which can lead to recoverable deadlocks. Thus we use the deadlock
1898
+ # retry wrapper here. This may not be appropriate for other sessions
1899
+ # and or queries. Use with caution.
1900
+ @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
1901
+ def ensure_project_exists(self, project_id):
1902
+ with db_api.session().begin() as session:
1903
+ quotas = self.get(session, project_id=project_id)
1904
+ if not quotas:
1905
+ # Make sure we have a record to lock
1906
+ self.update(session, project_id, quota={})
1907
+ session.commit()
1908
+
1908
1909
  def delete(self, session, project_id):
1909
1910
  quotas = (
1910
1911
  session.query(self.model_class)
@@ -195,12 +195,13 @@ class AllowedAddressPairsDriver(neutron_base.BaseNeutronDriver):
195
195
  # Don't remove egress rules and don't confuse other protocols with
196
196
  # None ports with the egress rules. VRRP uses protocol 51 and 112
197
197
  if (rule.get('direction') == 'egress' or
198
- rule.get('protocol').upper() not in
198
+ rule.get('protocol') is None or
199
+ rule['protocol'].upper() not in
199
200
  [constants.PROTOCOL_TCP, constants.PROTOCOL_UDP,
200
201
  lib_consts.PROTOCOL_SCTP]):
201
202
  continue
202
203
  old_ports.append((rule.get('port_range_max'),
203
- rule.get('protocol').lower(),
204
+ rule['protocol'].lower(),
204
205
  rule.get('remote_ip_prefix')))
205
206
 
206
207
  add_ports = set(updated_ports) - set(old_ports)
@@ -364,12 +365,13 @@ class AllowedAddressPairsDriver(neutron_base.BaseNeutronDriver):
364
365
  """
365
366
  try:
366
367
  for amphora in vip.load_balancer.amphorae:
367
- try:
368
- self.network_proxy.delete_port(amphora.vrrp_port_id)
369
- except os_exceptions.ResourceNotFound:
370
- LOG.debug(
371
- 'VIP instance port %s already deleted. Skipping.',
372
- amphora.vrrp_port_id)
368
+ if amphora.vrrp_port_id:
369
+ try:
370
+ self.network_proxy.delete_port(amphora.vrrp_port_id)
371
+ except os_exceptions.ResourceNotFound:
372
+ LOG.debug(
373
+ 'VIP instance port %s already deleted. Skipping.',
374
+ amphora.vrrp_port_id)
373
375
  except AttributeError as ex:
374
376
  LOG.warning(f"Cannot delete port from amphorae. Object does not "
375
377
  f"exist ({ex!r})")
@@ -204,8 +204,7 @@ class NoopManager(object):
204
204
  return len(self.known_subnets) + 1
205
205
 
206
206
  def __iter__(self):
207
- for subnet_id in self.known_subnets:
208
- yield subnet_id
207
+ yield from self.known_subnets
209
208
  subnet = network_models.Subnet(id=uuidutils.generate_uuid(),
210
209
  network_id=self.network.id)
211
210
  self.known_subnets[subnet.id] = subnet
@@ -872,3 +872,118 @@ phYuPfZekoNbsOIPDTiPFniuP2saOF4TSRCW4KnpgblRkds6c8X+1ExdlSo5GjNa
872
872
  PftOKlYtE7T7Kw4CI9+O2H38IUOYjDt/c2twy954K4pKe4x9Ud8mImpS/oEzOsoz
873
873
  /Mn++bjO55LdaAUKQ3wa8LZ5WFB+Gs6b2kmBfzGarWEiX64=
874
874
  -----END X509 CRL-----"""
875
+
876
+ # An invalid certificate due to no subject and no subjectAltName
877
+ NOCN_NOSUBALT_CRT = b"""-----BEGIN CERTIFICATE-----
878
+ MIIE4zCCAsugAwIBAgIUTo7POpWDLecy0B7fY2OAbLztmswwDQYJKoZIhvcNAQEL
879
+ BQAwADAgFw0yMzExMjIyMjE4MzBaGA8yMTIzMTAyOTIyMTgzMFowADCCAiIwDQYJ
880
+ KoZIhvcNAQEBBQADggIPADCCAgoCggIBAPClqkTqRyjlp+LXE4oElYGvg7y710yZ
881
+ pR96TNqgugXxNLmIgzx2A3wWJ77z6qn3XoTFEXNnT6f4WrVr1Eh5/Zd1ioyj1r0G
882
+ hIuEWMkm42UsTv+bId6BkXrr4wTgXgU+ss82dmRsYArV1b+c+89oYlEjQorhQ6eT
883
+ 2aWnt1XJbtpgRYCy5DsBKg1Iq63QRXp5svEr4iX+jAiDCQnBBLhrkfMUf8zuMCev
884
+ Ij5119OGY5ihLuopIZi6OurA0fyN9e2MFlnYmWcxSZu49+6yBnXGmhmev3qzWj1+
885
+ 9DA50Pqu+NS9rVpYBNhhKuBTBxaTeZPDAl67DC2Mc8TFI1OfpiOwb+w/ewRYznry
886
+ ZceASFovPFsAlUddwu/94sxgUSCmSE81Op+VlXS0LRgg8o/OZHp/eFsG2NM0OGAH
887
+ v2uJly4OTPTd/kT50zViX3wJlRYIH+4szSjpbNXE0aF+cqQ56PBrGEe6j+SaGZEV
888
+ 6k4N9WMHNipffkq10N2d6fkRQjAD9B7gHOB6AAQ1mxoZtgchCKL7E8FuA803Yx8B
889
+ a7h9J65SJq9nbr0z4eTscFZPulW8wMZT/ZeooQJJWqvA+g2FZf0dExk46gqU3F2F
890
+ IRMvfGzSbIQF7bp/Yj4fLMUwLVaYv6NNdzhI+/eC0wVDWwbQ2rZkkvcvysSteGT4
891
+ IDuFKuIWt4UnAgMBAAGjUzBRMB0GA1UdDgQWBBSEDhho9+R5JhsAZlQ0wU4Rjbqn
892
+ OjAfBgNVHSMEGDAWgBSEDhho9+R5JhsAZlQ0wU4RjbqnOjAPBgNVHRMBAf8EBTAD
893
+ AQH/MA0GCSqGSIb3DQEBCwUAA4ICAQAZ8E7l2H56z08yJiAa5DFmT8jmBHUCoJlM
894
+ HiZSn04mtzZEfho/21Zdnb2Pa2SDrRkVXmrO+DebO5sK1Kn/EFC9P3SAOeZ3LB+m
895
+ bJUX4WGEJ+7fv9uVRwSRfF21Lxo9QFgSVfQlQAhmXcKCE/8VtKB34oOZRhR8tAxH
896
+ I4VvHUPyCT8ZwNhofP2TYHEjRi/4fsXueBH4kBHDy0/pyHMy1b5crWQAjlOhFXhW
897
+ +qauSXkbIXNXd+wX23UF2uQ8YH819V7cHAidx9ikwn6HC5hxXjzMjViDwI451V6Q
898
+ eAgrVuKTgx6cdnd2mgra8k7Bd2S+uTxwcrzVVzNfF+D2Al43xgeFF02M8Wp6ZDsh
899
+ 3/mJ7NOJGTJbXLRP+u73PEh1mGGU8H2QoGvaRO7R599sbmU4LedWX/VJc2GXojzF
900
+ ibPWaMkKtX31QiOeNiLTMSkUWiyDTvzFW2ErqyzARv/yYFcEixEFl1GV8Bqb+ujj
901
+ cxO5/y9cK6aM+qPb/FrXivXQsNArrpE3T1C54RvhUWOi+kyCiV/mDIG+oOp7sfZ5
902
+ tBPenwWB2/LGS4rS67jZdwyIC5UbVySaVxtqJrdQXTRNjGfj2m963CHbiaQLSoSF
903
+ 2Zh2e8W4ixo6k6mhih2YjZVtpHrXyzNEtHT9HpPHDeElVcWteIceZMI2Ah0C6Ggj
904
+ uTbEBYW85Q==
905
+ -----END CERTIFICATE-----"""
906
+
907
+ # A certificate with no subject but with Subject Alternative Name
908
+ NOCN_SUBALT_CRT = b"""-----BEGIN CERTIFICATE-----
909
+ MIIFAjCCAuqgAwIBAgIUNjJqSdaJ9FsivfRHbXpdmcZgJR4wDQYJKoZIhvcNAQEL
910
+ BQAwADAgFw0yMzExMzAyMTQyNTVaGA8yMTIzMTEwNjIxNDI1NVowADCCAiIwDQYJ
911
+ KoZIhvcNAQEBBQADggIPADCCAgoCggIBAKA8+0iJzx51kTufmIpxGCM/KUFWdJ0U
912
+ MmOPN1NmySNaj6nGI/Ix6m13A5SaezhbRlJvEwN7Hqg+tl+fqu0RgtQOXfBDMiJm
913
+ +kAl0CQiOH7XU41P6fyk/QL8WF3VVGBtawTWn3x9Jw7Itd/zFr+aepQOj5LIwcx1
914
+ ncHXreWdMLqDa7PpW1Ru6BW0FKVxX6WYQr2PI08nEIxu6DzLcaLHktRyNYg7r9X9
915
+ a0tLZcp5MCBG3h3EtVgUkL9qw8q6acJpDGBF7ssRTNDf3QUSg0jrfzkD9WJCi631
916
+ tefdAkDNIZXGZggbWsDGPseX4JG9p7WGzPx5QY2DkMqDJqi6FoS35tT+WNcY0n9V
917
+ oBQXtXFV/AqOC070NwrhxsNA3cBbpRqEQYJsIDaXq0cmFR4aoDWk4OXqs7I+dpyi
918
+ MFeRHEU7h4DpwzaOmOyaSmzsZqEMG2lsdJZmC+fIFkyKtP0BQv/movWY25oJSpF5
919
+ 4Q/PdwKn6PFO2bRVSLStlrhpuqXw2+CzlQT6YCAz+ajqDnn/w8NIrT6y+DiFd+kt
920
+ WCed/o4ZBzsxOexRph+t0bdkTmR8PNpnHwcxzVN33gCSc6Q5DW1/M2V8VGYqnPd/
921
+ taEaMlHm/wQ3y2/aH/tkyq85PM5tqCbUscD4TUZ7R6kb0k83Ak2iZOM5RHb4zc4p
922
+ mreNKLPfgrQ7AgMBAAGjcjBwMB0GA1UdDgQWBBT6/yXwr+5BhORB3cUkrrSgnreq
923
+ NTAfBgNVHSMEGDAWgBT6/yXwr+5BhORB3cUkrrSgnreqNTAPBgNVHRMBAf8EBTAD
924
+ AQH/MB0GA1UdEQEB/wQTMBGCD3d3dy5leGFtcGxlLmNvbTANBgkqhkiG9w0BAQsF
925
+ AAOCAgEAjxrBZ3v6wK7oZWvrzFV+aCs+KkoUkK0Y61TM4SCbWIT8oinN68nweha5
926
+ p48Jp+hSBHEsj9h0opHezihduKh5IVM7KtbcXn1GSeN2hmyAAPm/MbxyD+l+UEfB
927
+ G/behQcsYdVXXog7nwD2NXINvra8KGPqA7n/BnQ7RsxBXHVa9+IHF2L4LpbcvG7G
928
+ Ci/jmLSBk7Gi/75TsFphHAhfomovfnnNykfJ0u99ew14MxVmRWbZ+rbpMsUL/AhV
929
+ h8VujkfUs1hFbdxePTVyHwplqH65yjzzQ18q8CX7kMGi9sz2k8xJS04Nz0x1l7xQ
930
+ JDuhFMDDrcyb7vAqG7BHQ9zXWJ3IkTg9WrbfkOyTqQsJeInToWQybmr/7lY3PmC2
931
+ e/X0zNABF+ypX29RrKzWL+KfpbslysZIEPLEW28qAh3KOyml1du+lbDSNtcHxQcT
932
+ bnvz2rQlAYE70Ds3znLLuMXbq8GtS+h8EYH1jxcjZD9DAPhxi37v8QSY/ABIBGE2
933
+ lfbhbzZ5OWQLMA0L1tbTg7bG5JGoi/GmPl4oA+Dbz3+8Yd/v8XJUzQgI221tx+T+
934
+ isog5o96m62pW6hd1R+eZjVAOVMT/OxecJ9eIVva8EiZwu1Ja9arBkuhIBVK2htm
935
+ PVi6J1iFUrPZG+QrK/ZePo4xE06Lm31dr8pxdZ7Y860owwIuHfA=
936
+ -----END CERTIFICATE-----"""
937
+
938
+ NOCN_SUBALT_KEY = b"""-----BEGIN PRIVATE KEY-----
939
+ MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQCgPPtIic8edZE7
940
+ n5iKcRgjPylBVnSdFDJjjzdTZskjWo+pxiPyMeptdwOUmns4W0ZSbxMDex6oPrZf
941
+ n6rtEYLUDl3wQzIiZvpAJdAkIjh+11ONT+n8pP0C/Fhd1VRgbWsE1p98fScOyLXf
942
+ 8xa/mnqUDo+SyMHMdZ3B163lnTC6g2uz6VtUbugVtBSlcV+lmEK9jyNPJxCMbug8
943
+ y3Gix5LUcjWIO6/V/WtLS2XKeTAgRt4dxLVYFJC/asPKumnCaQxgRe7LEUzQ390F
944
+ EoNI6385A/ViQout9bXn3QJAzSGVxmYIG1rAxj7Hl+CRvae1hsz8eUGNg5DKgyao
945
+ uhaEt+bU/ljXGNJ/VaAUF7VxVfwKjgtO9DcK4cbDQN3AW6UahEGCbCA2l6tHJhUe
946
+ GqA1pODl6rOyPnacojBXkRxFO4eA6cM2jpjsmkps7GahDBtpbHSWZgvnyBZMirT9
947
+ AUL/5qL1mNuaCUqReeEPz3cCp+jxTtm0VUi0rZa4abql8Nvgs5UE+mAgM/mo6g55
948
+ /8PDSK0+svg4hXfpLVgnnf6OGQc7MTnsUaYfrdG3ZE5kfDzaZx8HMc1Td94AknOk
949
+ OQ1tfzNlfFRmKpz3f7WhGjJR5v8EN8tv2h/7ZMqvOTzObagm1LHA+E1Ge0epG9JP
950
+ NwJNomTjOUR2+M3OKZq3jSiz34K0OwIDAQABAoICABC+7r/g7w1O2hOyFR36vbwJ
951
+ QMV8RImZ774p3G1R45lXQIZMl7sa7lXsRyqDjncQSuQYiZMmjcilbSfHJvTJjLOe
952
+ oMCYNSgVPPfxO7RbAy52UFwHSvvFPk/OkWmU/tFo/fMuftJive80mJVD8U+q1D6e
953
+ 2vBLHL3CWO9GG/1QFSSY0Wum6o2DXavO+w1jMMy8gdUPnXALNBaJDKo11LVfR//9
954
+ w4xuOG0To9/ljEjBq37kCRhxU0ZWN95ZSQbpvl273rg89rywHSgDDTUXfzLisZQC
955
+ zuUq8TAH6q/FkBO3nFfruQQF39EfprXzMFvqxxkYclm8TlZ8tmgDlsmxUOMj2PKl
956
+ H9kWDC5YkynfkxltKgiEJ9Kc3pZnfaScABnz0GySsZN71bUbr7fBqwH0LhbZiQqa
957
+ b9pWcbyKuGFJ56gVsokVHcpKnKmKHedtmL33oJzI3iWYZls/mPejmkwIWt1i3F7c
958
+ ZnhDJJp3gWgzZzSyV5OjZ05SIrM9er9r+WqS75ns7vKEzhgzpHdZuUR2jNNVu/EA
959
+ rCnsebUtemr0tDYxhI5BcPgj3fzq02u7plJUFIwlPrpMxZ8VBJgoSwT7Di5qpHnt
960
+ LmiGoqRM+vVXiWshops1I7q7zLCgvP+Difi4KNjap/lBsj7hiB7alZTrMVVAXiBr
961
+ Ia++3L38ga5DJ+SHDzjBAoIBAQDNUG4URQD/j0E3pS4zn4wezSp0wOTKKIw2Z6oU
962
+ 02reZq9uFLIt+/74DVy3NZm3tBgeSakYUZeDB8zpog3mGpkPAHpwObB/fPbMYmst
963
+ cCnXYDf9Uvb7k287a0GIbCOXwkHSrgRwznAZ4EQp6E0nZSoLbyZiC+uhYEVZgQQo
964
+ JswsjKCSaL7o/4XXQOi6Mdsd4BX7aVVKjYrQZ8TkkCsMYFdQMSL1fB8DW4Q+Ixco
965
+ 6BGXPoaav/2XOb0HGBmrXX/yqllA8rw0U7RNLgsE7gZIlltGeTsQMeo/+w5+LJKt
966
+ HOhhEUHITJkRZ7P/S8OdXXoVCNiUzCxGy/LrHW/AWu0t1WWbAoIBAQDHy9Allaod
967
+ WDxdbe5G5ke03WFcPoVAxOWu0mloaFdbd7Ec39y4vr1hxRZz+SEUdouCie1nVB3P
968
+ sj2lPJ44qKS8triqNCuEalpMHaTBdIyjItqh1l66fLA1/FYxAM7cxcz5rBVK2zvf
969
+ KrT3LNmzVpbltl3nPQhvAKEV8zEdSVze6Z0K6QbZP8WfPtCiQYMAjeNu48AIp/+t
970
+ pxJbkcmWLIYixfiJbHfe0LUu/P3rk0WDCHnheVzOTSE8XzGqnIxyv6w4rYOl9IeT
971
+ SnYublICJHOTp6gKuiIieGD7TC14DB8vYbSc0+opIvYYItcS//laLOD+eLUgZx5K
972
+ Wb4ubbosnyXhAoIBAFGzQsqgFuCbQemBupviTmDnZZCmPaTQc9Mmd0DoTGuJ0x9r
973
+ 7udrkq9kqdNh6fR3Hu3WhApgVXlXvkvuJ7e8N9IHb7F+02Q39wGn3FxteMjyyfTt
974
+ ccj0h1vOt3oxBgzayVSr2KqHC4bQfm9quGEH2a5JIa38blx+MbqHI39SyQalQzRf
975
+ qDCRldHtS27kbfw6cqTj6oPLRUTfNjN5xxeassP/eZjUNocggMQ1NH8bsfxMbkXg
976
+ RmpKGJVdGsHdaA/Jh9DXhtsPv/zCaLIiga+a3WFy1nUAV+Xz4nWFCS0IBtSxiErL
977
+ aFHLwY3CuWnCi9UY+w5jHO9jMxwqT5Ds3drSQycCggEBALoewFEy4d0iRGGYtb6w
978
+ aJ4xGLBwwXt7sKcx9eXARZi8oG5QkHI9pXg9vFPfAZTpdb7uNAzszDSeS1TxakdH
979
+ uubdpJtRrDRXSrTbbI6Wvyh9oIPgijBZVWGFJtnRceMyFGeFifRI1LZpN1mHG2o4
980
+ QKvPPhzau0+Em4syGE+69tvlblkqiSm6gaN+RabRNnM+ul6jpVGrBsBDAhPxdIQE
981
+ CBS+rW9/bw9PB2m1XemlML0HGVsUzoKUUWDHISJZYXDH42yNHzVq3R014XARby31
982
+ vQEQzrbnfEL2NwoChdzuFeLytujddKZLnksPsaFOeYAqjJIh6kE8Lnh+r27a4vMM
983
+ cqECggEAAx1DVI43AMBfSbAs5C41vjRdjMrZtxfKIpFjj1whGj/JzLKdMdqqH+Ai
984
+ +R6NI7IB88pGHlCOmdEpfbr4Cq1ZnizA3yLV9sluMz1bpHlIDsCIp+1VkQYKfsEv
985
+ upZy82MtfGtG3BSLn+GCTzLJcTN6KINg98Xivp/WsRAEvwT/w1o4iJMgzKmTET2I
986
+ UGJfZcF0WeSVo34FNArfXyfXPvPV7mi08Z6fQuUnFvH9tGZs5Y9mUUSgXXEDSjKY
987
+ ZHliqmDNGub7rMy6/0wDOWiS4pi/w8FeCyBvbx23rj6i+FLO6GK+5B7TaCxjOVbk
988
+ SYVTfCHpvJIgjRkRMP2yZCk3g6T4XA==
989
+ -----END PRIVATE KEY-----"""
@@ -140,7 +140,7 @@ class BaseAPITest(base_db_test.OctaviaDBTestBase):
140
140
  self.addCleanup(reset_pecan)
141
141
 
142
142
  def start_quota_mock(self, object_type):
143
- def mock_quota(session, lock_session, _class, project_id, count=1):
143
+ def mock_quota(session, _class, project_id, count=1):
144
144
  return _class == object_type
145
145
  check_quota_met_true_mock = mock.patch(
146
146
  'octavia.db.repositories.Repositories.check_quota_met',
@@ -1782,6 +1782,24 @@ class TestHealthMonitor(base.BaseAPITest):
1782
1782
  pool_prov_status=constants.PENDING_UPDATE,
1783
1783
  hm_prov_status=constants.PENDING_UPDATE)
1784
1784
 
1785
+ def test_update_udp_case_with_udp_hm(self):
1786
+ api_hm = self.create_health_monitor(
1787
+ self.udp_pool_with_listener_id,
1788
+ constants.HEALTH_MONITOR_UDP_CONNECT, 3, 1, 1, 1).get(
1789
+ self.root_tag)
1790
+ self.set_lb_status(self.udp_lb_id)
1791
+ new_hm = {'timeout': 2}
1792
+ self.put(
1793
+ self.HM_PATH.format(healthmonitor_id=api_hm.get('id')),
1794
+ self._build_body(new_hm))
1795
+ self.assert_correct_status(
1796
+ lb_id=self.udp_lb_id, listener_id=self.udp_listener_id,
1797
+ pool_id=self.udp_pool_with_listener_id, hm_id=api_hm.get('id'),
1798
+ lb_prov_status=constants.PENDING_UPDATE,
1799
+ listener_prov_status=constants.PENDING_UPDATE,
1800
+ pool_prov_status=constants.PENDING_UPDATE,
1801
+ hm_prov_status=constants.PENDING_UPDATE)
1802
+
1785
1803
  def test_negative_update_udp_case(self):
1786
1804
  api_hm = self.create_health_monitor(
1787
1805
  self.udp_pool_with_listener_id,