octavia 13.0.0.0rc1__py3-none-any.whl → 13.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (92) hide show
  1. octavia/amphorae/backends/agent/api_server/keepalivedlvs.py +9 -0
  2. octavia/amphorae/backends/agent/api_server/osutils.py +1 -2
  3. octavia/amphorae/backends/agent/api_server/util.py +35 -2
  4. octavia/amphorae/backends/utils/interface.py +4 -5
  5. octavia/amphorae/drivers/driver_base.py +16 -0
  6. octavia/amphorae/drivers/haproxy/rest_api_driver.py +13 -8
  7. octavia/amphorae/drivers/keepalived/jinja/jinja_cfg.py +0 -1
  8. octavia/amphorae/drivers/keepalived/jinja/templates/keepalived_base.template +0 -1
  9. octavia/amphorae/drivers/keepalived/vrrp_rest_driver.py +2 -1
  10. octavia/amphorae/drivers/noop_driver/driver.py +3 -0
  11. octavia/api/common/pagination.py +1 -1
  12. octavia/api/v2/controllers/health_monitor.py +3 -2
  13. octavia/api/v2/controllers/l7policy.py +0 -1
  14. octavia/api/v2/controllers/l7rule.py +0 -1
  15. octavia/api/v2/controllers/listener.py +0 -1
  16. octavia/api/v2/controllers/load_balancer.py +13 -7
  17. octavia/api/v2/controllers/member.py +18 -5
  18. octavia/api/v2/controllers/pool.py +6 -7
  19. octavia/api/v2/types/pool.py +1 -1
  20. octavia/certificates/common/pkcs12.py +9 -9
  21. octavia/certificates/manager/barbican.py +24 -16
  22. octavia/certificates/manager/castellan_mgr.py +12 -7
  23. octavia/certificates/manager/noop.py +106 -0
  24. octavia/common/clients.py +22 -4
  25. octavia/common/config.py +21 -5
  26. octavia/common/constants.py +4 -0
  27. octavia/common/exceptions.py +6 -0
  28. octavia/common/jinja/haproxy/combined_listeners/templates/macros.j2 +7 -5
  29. octavia/common/keystone.py +7 -7
  30. octavia/common/tls_utils/cert_parser.py +23 -9
  31. octavia/controller/worker/task_utils.py +28 -6
  32. octavia/controller/worker/v2/controller_worker.py +2 -2
  33. octavia/controller/worker/v2/flows/amphora_flows.py +41 -10
  34. octavia/controller/worker/v2/flows/flow_utils.py +6 -4
  35. octavia/controller/worker/v2/flows/load_balancer_flows.py +17 -3
  36. octavia/controller/worker/v2/tasks/amphora_driver_tasks.py +114 -23
  37. octavia/controller/worker/v2/tasks/database_tasks.py +36 -47
  38. octavia/controller/worker/v2/tasks/lifecycle_tasks.py +96 -40
  39. octavia/controller/worker/v2/tasks/network_tasks.py +12 -13
  40. octavia/db/base_models.py +16 -4
  41. octavia/db/repositories.py +34 -33
  42. octavia/network/drivers/neutron/allowed_address_pairs.py +10 -8
  43. octavia/network/drivers/noop_driver/driver.py +1 -2
  44. octavia/tests/common/sample_certs.py +115 -0
  45. octavia/tests/functional/api/v2/base.py +1 -1
  46. octavia/tests/functional/api/v2/test_health_monitor.py +18 -0
  47. octavia/tests/functional/api/v2/test_listener.py +45 -0
  48. octavia/tests/functional/api/v2/test_member.py +32 -0
  49. octavia/tests/functional/db/base.py +9 -0
  50. octavia/tests/functional/db/test_repositories.py +45 -98
  51. octavia/tests/unit/amphorae/backends/agent/api_server/test_util.py +89 -1
  52. octavia/tests/unit/amphorae/backends/utils/test_interface.py +3 -1
  53. octavia/tests/unit/amphorae/drivers/haproxy/test_rest_api_driver.py +3 -3
  54. octavia/tests/unit/amphorae/drivers/keepalived/jinja/test_jinja_cfg.py +0 -4
  55. octavia/tests/unit/amphorae/drivers/keepalived/test_vrrp_rest_driver.py +17 -0
  56. octavia/tests/unit/api/common/test_pagination.py +78 -1
  57. octavia/tests/unit/api/v2/types/test_pool.py +71 -0
  58. octavia/tests/unit/certificates/manager/test_barbican.py +3 -3
  59. octavia/tests/unit/certificates/manager/test_noop.py +53 -0
  60. octavia/tests/unit/cmd/test_prometheus_proxy.py +8 -1
  61. octavia/tests/unit/common/jinja/haproxy/combined_listeners/test_jinja_cfg.py +16 -17
  62. octavia/tests/unit/common/test_config.py +35 -0
  63. octavia/tests/unit/common/test_keystone.py +32 -0
  64. octavia/tests/unit/controller/worker/test_task_utils.py +58 -2
  65. octavia/tests/unit/controller/worker/v2/flows/test_amphora_flows.py +28 -5
  66. octavia/tests/unit/controller/worker/v2/flows/test_load_balancer_flows.py +10 -5
  67. octavia/tests/unit/controller/worker/v2/tasks/test_amphora_driver_tasks.py +234 -17
  68. octavia/tests/unit/controller/worker/v2/tasks/test_database_tasks.py +28 -6
  69. octavia/tests/unit/controller/worker/v2/tasks/test_database_tasks_quota.py +19 -19
  70. octavia/tests/unit/controller/worker/v2/tasks/test_network_tasks.py +57 -2
  71. octavia/tests/unit/controller/worker/v2/test_controller_worker.py +56 -1
  72. octavia/tests/unit/network/drivers/neutron/test_allowed_address_pairs.py +24 -1
  73. {octavia-13.0.0.0rc1.dist-info → octavia-13.0.1.dist-info}/AUTHORS +8 -0
  74. octavia-13.0.1.dist-info/METADATA +155 -0
  75. {octavia-13.0.0.0rc1.dist-info → octavia-13.0.1.dist-info}/RECORD +90 -88
  76. {octavia-13.0.0.0rc1.dist-info → octavia-13.0.1.dist-info}/WHEEL +1 -1
  77. {octavia-13.0.0.0rc1.dist-info → octavia-13.0.1.dist-info}/entry_points.txt +1 -1
  78. octavia-13.0.1.dist-info/pbr.json +1 -0
  79. octavia-13.0.0.0rc1.dist-info/METADATA +0 -158
  80. octavia-13.0.0.0rc1.dist-info/pbr.json +0 -1
  81. {octavia-13.0.0.0rc1.data → octavia-13.0.1.data}/data/share/octavia/LICENSE +0 -0
  82. {octavia-13.0.0.0rc1.data → octavia-13.0.1.data}/data/share/octavia/README.rst +0 -0
  83. {octavia-13.0.0.0rc1.data → octavia-13.0.1.data}/data/share/octavia/diskimage-create/README.rst +0 -0
  84. {octavia-13.0.0.0rc1.data → octavia-13.0.1.data}/data/share/octavia/diskimage-create/diskimage-create.sh +0 -0
  85. {octavia-13.0.0.0rc1.data → octavia-13.0.1.data}/data/share/octavia/diskimage-create/image-tests.sh +0 -0
  86. {octavia-13.0.0.0rc1.data → octavia-13.0.1.data}/data/share/octavia/diskimage-create/requirements.txt +0 -0
  87. {octavia-13.0.0.0rc1.data → octavia-13.0.1.data}/data/share/octavia/diskimage-create/test-requirements.txt +0 -0
  88. {octavia-13.0.0.0rc1.data → octavia-13.0.1.data}/data/share/octavia/diskimage-create/tox.ini +0 -0
  89. {octavia-13.0.0.0rc1.data → octavia-13.0.1.data}/data/share/octavia/diskimage-create/version.txt +0 -0
  90. {octavia-13.0.0.0rc1.data → octavia-13.0.1.data}/scripts/octavia-wsgi +0 -0
  91. {octavia-13.0.0.0rc1.dist-info → octavia-13.0.1.dist-info}/LICENSE +0 -0
  92. {octavia-13.0.0.0rc1.dist-info → octavia-13.0.1.dist-info}/top_level.txt +0 -0
@@ -230,6 +230,15 @@ class KeepalivedLvs(lvs_listener_base.LvsListenerApiServerBase):
230
230
  .format(action, listener_id)),
231
231
  'details': e.output}, status=500)
232
232
 
233
+ is_vrrp = (CONF.controller_worker.loadbalancer_topology ==
234
+ consts.TOPOLOGY_ACTIVE_STANDBY)
235
+ # TODO(gthiemonge) remove RESTART from the list (same as previous todo
236
+ # in this function)
237
+ if not is_vrrp and action in [consts.AMP_ACTION_START,
238
+ consts.AMP_ACTION_RESTART,
239
+ consts.AMP_ACTION_RELOAD]:
240
+ util.send_vip_advertisements(listener_id=listener_id)
241
+
233
242
  return webob.Response(
234
243
  json={'message': 'OK',
235
244
  'details': 'keepalivedlvs listener {listener_id} '
@@ -38,8 +38,7 @@ class BaseOS(object):
38
38
  @classmethod
39
39
  def _get_subclasses(cls):
40
40
  for subclass in cls.__subclasses__():
41
- for sc in subclass._get_subclasses():
42
- yield sc
41
+ yield from subclass._get_subclasses()
43
42
  yield subclass
44
43
 
45
44
  @classmethod
@@ -392,7 +392,37 @@ def get_haproxy_vip_addresses(lb_id):
392
392
  return vips
393
393
 
394
394
 
395
- def send_vip_advertisements(lb_id):
395
+ def get_lvs_vip_addresses(listener_id: str) -> tp.List[str]:
396
+ """Get the VIP addresses for a LVS load balancer.
397
+
398
+ :param listener_id: The listener ID to get VIP addresses from.
399
+ :returns: List of VIP addresses (IPv4 and IPv6)
400
+ """
401
+ vips = []
402
+ # Extract the VIP addresses from keepalived configuration
403
+ # Format is
404
+ # virtual_server_group ipv<n>-group {
405
+ # vip_address1 port1
406
+ # vip_address2 port2
407
+ # }
408
+ # it can be repeated in case of dual-stack LBs
409
+ with open(keepalived_lvs_cfg_path(listener_id), encoding='utf-8') as file:
410
+ vsg_section = False
411
+ for line in file:
412
+ current_line = line.strip()
413
+ if vsg_section:
414
+ if current_line.startswith('}'):
415
+ vsg_section = False
416
+ else:
417
+ vip_address = current_line.split(' ')[0]
418
+ vips.append(vip_address)
419
+ elif line.startswith('virtual_server_group '):
420
+ vsg_section = True
421
+ return vips
422
+
423
+
424
+ def send_vip_advertisements(lb_id: tp.Optional[str] = None,
425
+ listener_id: tp.Optional[str] = None):
396
426
  """Sends address advertisements for each load balancer VIP.
397
427
 
398
428
  This method will send either GARP (IPv4) or neighbor advertisements (IPv6)
@@ -402,7 +432,10 @@ def send_vip_advertisements(lb_id):
402
432
  :returns: None
403
433
  """
404
434
  try:
405
- vips = get_haproxy_vip_addresses(lb_id)
435
+ if lb_id:
436
+ vips = get_haproxy_vip_addresses(lb_id)
437
+ else:
438
+ vips = get_lvs_vip_addresses(listener_id)
406
439
 
407
440
  for vip in vips:
408
441
  interface = network_utils.get_interface_name(
@@ -356,11 +356,10 @@ class InterfaceController(object):
356
356
  **rule)
357
357
 
358
358
  def _scripts_up(self, interface, current_state):
359
- if current_state == consts.IFACE_DOWN:
360
- for script in interface.scripts[consts.IFACE_UP]:
361
- LOG.debug("%s: Running command '%s'",
362
- interface.name, script[consts.COMMAND])
363
- subprocess.check_output(script[consts.COMMAND].split())
359
+ for script in interface.scripts[consts.IFACE_UP]:
360
+ LOG.debug("%s: Running command '%s'",
361
+ interface.name, script[consts.COMMAND])
362
+ subprocess.check_output(script[consts.COMMAND].split())
364
363
 
365
364
  def down(self, interface):
366
365
  LOG.info("Setting interface %s down", interface.name)
@@ -14,6 +14,9 @@
14
14
  # under the License.
15
15
 
16
16
  import abc
17
+ from typing import Optional
18
+
19
+ from octavia.db import models as db_models
17
20
 
18
21
 
19
22
  class AmphoraLoadBalancerDriver(object, metaclass=abc.ABCMeta):
@@ -236,6 +239,19 @@ class AmphoraLoadBalancerDriver(object, metaclass=abc.ABCMeta):
236
239
  :type timeout_dict: dict
237
240
  """
238
241
 
242
+ @abc.abstractmethod
243
+ def check(self, amphora: db_models.Amphora,
244
+ timeout_dict: Optional[dict] = None):
245
+ """Check connectivity to the amphora.
246
+
247
+ :param amphora: The amphora to query.
248
+ :param timeout_dict: Dictionary of timeout values for calls to the
249
+ amphora. May contain: req_conn_timeout,
250
+ req_read_timeout, conn_max_retries,
251
+ conn_retry_interval
252
+ :raises TimeOutException: The amphora didn't reply
253
+ """
254
+
239
255
 
240
256
  class VRRPDriverMixin(object, metaclass=abc.ABCMeta):
241
257
  """Abstract mixin class for VRRP support in loadbalancer amphorae
@@ -111,6 +111,11 @@ class HaproxyAmphoraLoadBalancerDriver(
111
111
 
112
112
  return api_version
113
113
 
114
+ def check(self, amphora: db_models.Amphora,
115
+ timeout_dict: Optional[dict] = None):
116
+ """Check connectivity to the amphora."""
117
+ self._populate_amphora_api_version(amphora, timeout_dict)
118
+
114
119
  def update_amphora_listeners(self, loadbalancer, amphora,
115
120
  timeout_dict=None):
116
121
  """Update the amphora with a new configuration.
@@ -579,15 +584,15 @@ class HaproxyAmphoraLoadBalancerDriver(
579
584
  req_read_timeout, conn_max_retries,
580
585
  conn_retry_interval
581
586
  :type timeout_dict: dict
582
- :returns: None if not found, the interface name string if found.
587
+ :returns: the interface name string if found.
588
+ :raises octavia.amphorae.drivers.haproxy.exceptions.NotFound:
589
+ No interface found on the amphora
590
+ :raises TimeOutException: The amphora didn't reply
583
591
  """
584
- try:
585
- self._populate_amphora_api_version(amphora, timeout_dict)
586
- response_json = self.clients[amphora.api_version].get_interface(
587
- amphora, ip_address, timeout_dict, log_error=False)
588
- return response_json.get('interface', None)
589
- except (exc.NotFound, driver_except.TimeOutException):
590
- return None
592
+ self._populate_amphora_api_version(amphora, timeout_dict)
593
+ response_json = self.clients[amphora.api_version].get_interface(
594
+ amphora, ip_address, timeout_dict, log_error=False)
595
+ return response_json.get('interface', None)
591
596
 
592
597
 
593
598
  # Check a custom hostname
@@ -123,7 +123,6 @@ class KeepalivedJinjaTemplater(object):
123
123
  peers_ips.append(amp.vrrp_ip)
124
124
  return self.get_template(self.keepalived_template).render(
125
125
  {'vrrp_group_name': loadbalancer.vrrp_group.vrrp_group_name,
126
- 'amp_role': amphora.role,
127
126
  'amp_intf': amphora.vrrp_interface,
128
127
  'amp_vrrp_id': amphora.vrrp_id,
129
128
  'amp_priority': amphora.vrrp_priority,
@@ -21,7 +21,6 @@ vrrp_script check_script {
21
21
  }
22
22
 
23
23
  vrrp_instance {{ vrrp_group_name }} {
24
- state {{ amp_role }}
25
24
  interface {{ amp_intf }}
26
25
  virtual_router_id {{ amp_vrrp_id }}
27
26
  priority {{ amp_priority }}
@@ -88,7 +88,8 @@ class KeepalivedAmphoraDriverMixin(driver_base.VRRPDriverMixin):
88
88
 
89
89
  LOG.info("Start amphora %s VRRP Service.", amphora.id)
90
90
 
91
- self._populate_amphora_api_version(amphora)
91
+ self._populate_amphora_api_version(amphora,
92
+ timeout_dict=timeout_dict)
92
93
  self.clients[amphora.api_version].start_vrrp(amphora,
93
94
  timeout_dict=timeout_dict)
94
95
 
@@ -196,3 +196,6 @@ class NoopAmphoraLoadBalancerDriver(
196
196
 
197
197
  def reload_vrrp_service(self, loadbalancer):
198
198
  pass
199
+
200
+ def check(self, amphora, timeout_dict=None):
201
+ pass
@@ -169,7 +169,7 @@ class PaginationHelper(object):
169
169
  # TODO(rm_work) Do we need to know when there are more vs exact?
170
170
  # We safely know if we have a full page, but it might include the
171
171
  # last element or it might not, it is unclear
172
- if len(model_list) >= self.limit:
172
+ if self.limit is None or len(model_list) >= self.limit:
173
173
  next_attr.append("marker={}".format(model_list[-1].get('id')))
174
174
  next_link = {
175
175
  "rel": "next",
@@ -188,7 +188,9 @@ class HealthMonitorController(base.BaseController):
188
188
  request.type == consts.HEALTH_MONITOR_UDP_CONNECT)
189
189
  conf_min_delay = (
190
190
  CONF.api_settings.udp_connect_min_interval_health_monitor)
191
- if hm_is_type_udp and request.delay < conf_min_delay:
191
+ if (hm_is_type_udp and
192
+ not isinstance(request.delay, wtypes.UnsetType) and
193
+ request.delay < conf_min_delay):
192
194
  raise exceptions.ValidationException(detail=_(
193
195
  "The request delay value %(delay)s should be larger than "
194
196
  "%(conf_min_delay)s for %(type)s health monitor type.") % {
@@ -238,7 +240,6 @@ class HealthMonitorController(base.BaseController):
238
240
  context.session.begin()
239
241
  try:
240
242
  if self.repositories.check_quota_met(
241
- context.session,
242
243
  context.session,
243
244
  data_models.HealthMonitor,
244
245
  health_monitor.project_id):
@@ -151,7 +151,6 @@ class L7PolicyController(base.BaseController):
151
151
  lock_session.begin()
152
152
  try:
153
153
  if self.repositories.check_quota_met(
154
- context.session,
155
154
  lock_session,
156
155
  data_models.L7Policy,
157
156
  l7policy.project_id):
@@ -154,7 +154,6 @@ class L7RuleController(base.BaseController):
154
154
  context.session.begin()
155
155
  try:
156
156
  if self.repositories.check_quota_met(
157
- context.session,
158
157
  context.session,
159
158
  data_models.L7Rule,
160
159
  l7rule.project_id):
@@ -372,7 +372,6 @@ class ListenersController(base.BaseController):
372
372
  context.session.begin()
373
373
  try:
374
374
  if self.repositories.check_quota_met(
375
- context.session,
376
375
  context.session,
377
376
  data_models.Listener,
378
377
  listener.project_id):
@@ -465,7 +465,6 @@ class LoadBalancersController(base.BaseController):
465
465
  lock_session.begin()
466
466
  try:
467
467
  if self.repositories.check_quota_met(
468
- context.session,
469
468
  lock_session,
470
469
  data_models.LoadBalancer,
471
470
  load_balancer.project_id):
@@ -552,8 +551,15 @@ class LoadBalancersController(base.BaseController):
552
551
  subnet_id=add_vip.subnet_id)
553
552
 
554
553
  if listeners or pools:
554
+ # expire_all is required here, it ensures that the loadbalancer
555
+ # will be re-fetched with its associated vip in _graph_create.
556
+ # without expire_all the vip attributes that have been updated
557
+ # just before this call may not be set correctly in the
558
+ # loadbalancer object.
559
+ lock_session.expire_all()
560
+
555
561
  db_pools, db_lists = self._graph_create(
556
- context.session, lock_session, db_lb, listeners, pools)
562
+ lock_session, db_lb, listeners, pools)
557
563
 
558
564
  # Prepare the data for the driver data model
559
565
  driver_lb_dict = driver_utils.lb_dict_to_provider_dict(
@@ -583,7 +589,7 @@ class LoadBalancersController(base.BaseController):
583
589
  db_lb, lb_types.LoadBalancerFullResponse)
584
590
  return lb_types.LoadBalancerFullRootResponse(loadbalancer=result)
585
591
 
586
- def _graph_create(self, session, lock_session, db_lb, listeners, pools):
592
+ def _graph_create(self, session, db_lb, listeners, pools):
587
593
  # Track which pools must have a full specification
588
594
  pools_required = set()
589
595
  # Look through listeners and find any extra pools, and move them to the
@@ -642,7 +648,7 @@ class LoadBalancersController(base.BaseController):
642
648
 
643
649
  # Check quotas for pools.
644
650
  if pools and self.repositories.check_quota_met(
645
- session, lock_session, data_models.Pool, db_lb.project_id,
651
+ session, data_models.Pool, db_lb.project_id,
646
652
  count=len(pools)):
647
653
  raise exceptions.QuotaException(resource=data_models.Pool._name())
648
654
 
@@ -661,13 +667,13 @@ class LoadBalancersController(base.BaseController):
661
667
  p['load_balancer_id'] = db_lb.id
662
668
  p['project_id'] = db_lb.project_id
663
669
  new_pool = (pool.PoolsController()._graph_create(
664
- session, lock_session, p))
670
+ session, p))
665
671
  new_pools.append(new_pool)
666
672
  pool_name_ids[new_pool.name] = new_pool.id
667
673
 
668
674
  # Now check quotas for listeners
669
675
  if listeners and self.repositories.check_quota_met(
670
- session, lock_session, data_models.Listener, db_lb.project_id,
676
+ session, data_models.Listener, db_lb.project_id,
671
677
  count=len(listeners)):
672
678
  raise exceptions.QuotaException(
673
679
  resource=data_models.Listener._name())
@@ -687,7 +693,7 @@ class LoadBalancersController(base.BaseController):
687
693
  li['load_balancer_id'] = db_lb.id
688
694
  li['project_id'] = db_lb.project_id
689
695
  new_lists.append(listener.ListenersController()._graph_create(
690
- lock_session, li, pool_name_ids=pool_name_ids))
696
+ session, li, pool_name_ids=pool_name_ids))
691
697
 
692
698
  return new_pools, new_lists
693
699
 
@@ -31,6 +31,7 @@ from octavia.common import data_models
31
31
  from octavia.common import exceptions
32
32
  from octavia.common import validate
33
33
  from octavia.db import prepare as db_prepare
34
+ from octavia.i18n import _
34
35
 
35
36
 
36
37
  LOG = logging.getLogger(__name__)
@@ -165,7 +166,6 @@ class MemberController(base.BaseController):
165
166
  context.session.begin()
166
167
  try:
167
168
  if self.repositories.check_quota_met(
168
- context.session,
169
169
  context.session,
170
170
  data_models.Member,
171
171
  member.project_id):
@@ -336,7 +336,6 @@ class MembersController(MemberController):
336
336
 
337
337
  with context.session.begin():
338
338
  db_pool = self._get_db_pool(context.session, self.pool_id)
339
- old_members = db_pool.members
340
339
 
341
340
  project_id, provider = self._get_lb_project_id_provider(
342
341
  context.session, db_pool.load_balancer_id)
@@ -354,6 +353,11 @@ class MembersController(MemberController):
354
353
  with context.session.begin():
355
354
  self._test_lb_and_listener_and_pool_statuses(context.session)
356
355
 
356
+ # Reload the pool, the members may have been updated between the
357
+ # first query in this function and the lock of the loadbalancer
358
+ db_pool = self._get_db_pool(context.session, self.pool_id)
359
+ old_members = db_pool.members
360
+
357
361
  old_member_uniques = {
358
362
  (m.ip_address, m.protocol_port): m.id for m in old_members}
359
363
  new_member_uniques = [
@@ -362,12 +366,21 @@ class MembersController(MemberController):
362
366
  # Find members that are brand new or updated
363
367
  new_members = []
364
368
  updated_members = []
369
+ updated_member_uniques = set()
365
370
  for m in members:
366
- if (m.address, m.protocol_port) not in old_member_uniques:
371
+ key = (m.address, m.protocol_port)
372
+ if key not in old_member_uniques:
367
373
  validate.ip_not_reserved(m.address)
368
374
  new_members.append(m)
369
375
  else:
370
- m.id = old_member_uniques[(m.address, m.protocol_port)]
376
+ m.id = old_member_uniques[key]
377
+ if key in updated_member_uniques:
378
+ LOG.error("Member %s is updated multiple times in "
379
+ "the same batch request.", m.id)
380
+ raise exceptions.ValidationException(
381
+ detail=_("Member must be updated only once in the "
382
+ "same request."))
383
+ updated_member_uniques.add(key)
371
384
  updated_members.append(m)
372
385
 
373
386
  # Find members that are deleted
@@ -387,7 +400,7 @@ class MembersController(MemberController):
387
400
  else:
388
401
  member_count_diff = len(new_members) - len(deleted_members)
389
402
  if member_count_diff > 0 and self.repositories.check_quota_met(
390
- context.session, context.session, data_models.Member,
403
+ context.session, data_models.Member,
391
404
  db_pool.project_id, count=member_count_diff):
392
405
  raise exceptions.QuotaException(
393
406
  resource=data_models.Member._name())
@@ -260,7 +260,6 @@ class PoolsController(base.BaseController):
260
260
  context.session.begin()
261
261
  try:
262
262
  if self.repositories.check_quota_met(
263
- context.session,
264
263
  context.session,
265
264
  data_models.Pool,
266
265
  pool.project_id):
@@ -304,18 +303,18 @@ class PoolsController(base.BaseController):
304
303
  result = self._convert_db_to_type(db_pool, pool_types.PoolResponse)
305
304
  return pool_types.PoolRootResponse(pool=result)
306
305
 
307
- def _graph_create(self, session, lock_session, pool_dict):
306
+ def _graph_create(self, session, pool_dict):
308
307
  load_balancer_id = pool_dict['load_balancer_id']
309
308
  pool_dict = db_prepare.create_pool(
310
309
  pool_dict, load_balancer_id)
311
310
  members = pool_dict.pop('members', []) or []
312
311
  hm = pool_dict.pop('health_monitor', None)
313
312
  db_pool = self._validate_create_pool(
314
- lock_session, pool_dict)
313
+ session, pool_dict)
315
314
 
316
315
  # Check quotas for healthmonitors
317
316
  if hm and self.repositories.check_quota_met(
318
- session, lock_session, data_models.HealthMonitor,
317
+ session, data_models.HealthMonitor,
319
318
  db_pool.project_id):
320
319
  raise exceptions.QuotaException(
321
320
  resource=data_models.HealthMonitor._name())
@@ -325,7 +324,7 @@ class PoolsController(base.BaseController):
325
324
  hm[constants.POOL_ID] = db_pool.id
326
325
  hm[constants.PROJECT_ID] = db_pool.project_id
327
326
  new_hm = health_monitor.HealthMonitorController()._graph_create(
328
- lock_session, hm)
327
+ session, hm)
329
328
  if db_pool.protocol in (constants.PROTOCOL_UDP,
330
329
  lib_consts.PROTOCOL_SCTP):
331
330
  health_monitor.HealthMonitorController(
@@ -344,7 +343,7 @@ class PoolsController(base.BaseController):
344
343
 
345
344
  # Now check quotas for members
346
345
  if members and self.repositories.check_quota_met(
347
- session, lock_session, data_models.Member,
346
+ session, data_models.Member,
348
347
  db_pool.project_id, count=len(members)):
349
348
  raise exceptions.QuotaException(
350
349
  resource=data_models.Member._name())
@@ -357,7 +356,7 @@ class PoolsController(base.BaseController):
357
356
  m['project_id'] = db_pool.project_id
358
357
  new_members.append(
359
358
  member.MembersController(db_pool.id)._graph_create(
360
- lock_session, m))
359
+ session, m))
361
360
  db_pool.members = new_members
362
361
  return db_pool
363
362
 
@@ -106,7 +106,7 @@ class PoolResponse(BasePoolType):
106
106
  if cls._full_response():
107
107
  del pool.loadbalancers
108
108
  member_model = member.MemberFullResponse
109
- if pool.healthmonitor:
109
+ if data_model.health_monitor:
110
110
  pool.healthmonitor = (
111
111
  health_monitor.HealthMonitorFullResponse
112
112
  .from_data_model(data_model.health_monitor))
@@ -18,7 +18,7 @@ Common classes for pkcs12 based certificate handling
18
18
  """
19
19
 
20
20
  from cryptography.hazmat.primitives import serialization
21
- from OpenSSL import crypto
21
+ from cryptography.hazmat.primitives.serialization import pkcs12
22
22
 
23
23
  from octavia.certificates.common import cert
24
24
  from octavia.common import exceptions
@@ -28,21 +28,21 @@ class PKCS12Cert(cert.Cert):
28
28
  """Representation of a Cert for local storage."""
29
29
  def __init__(self, certbag):
30
30
  try:
31
- p12 = crypto.load_pkcs12(certbag)
32
- except crypto.Error as e:
31
+ p12 = pkcs12.load_pkcs12(certbag, None)
32
+ except (TypeError, ValueError) as e:
33
33
  raise exceptions.UnreadablePKCS12(error=str(e))
34
- self.certificate = p12.get_certificate()
35
- self.intermediates = p12.get_ca_certificates()
36
- self.private_key = p12.get_privatekey()
34
+ self.certificate = p12.cert
35
+ self.intermediates = p12.additional_certs
36
+ self.private_key = p12.key
37
37
 
38
38
  def get_certificate(self):
39
- return self.certificate.to_cryptography().public_bytes(
39
+ return self.certificate.certificate.public_bytes(
40
40
  encoding=serialization.Encoding.PEM).strip()
41
41
 
42
42
  def get_intermediates(self):
43
43
  if self.intermediates:
44
44
  int_data = [
45
- ic.to_cryptography().public_bytes(
45
+ ic.certificate.public_bytes(
46
46
  encoding=serialization.Encoding.PEM).strip()
47
47
  for ic in self.intermediates
48
48
  ]
@@ -50,7 +50,7 @@ class PKCS12Cert(cert.Cert):
50
50
  return None
51
51
 
52
52
  def get_private_key(self):
53
- return self.private_key.to_cryptography_key().private_bytes(
53
+ return self.private_key.private_bytes(
54
54
  encoding=serialization.Encoding.PEM,
55
55
  format=serialization.PrivateFormat.TraditionalOpenSSL,
56
56
  encryption_algorithm=serialization.NoEncryption()).strip()
@@ -17,8 +17,9 @@
17
17
  """
18
18
  Cert manager implementation for Barbican using a single PKCS12 secret
19
19
  """
20
- from OpenSSL import crypto
21
-
20
+ from cryptography.hazmat.primitives import serialization
21
+ from cryptography.hazmat.primitives.serialization import pkcs12 as c_pkcs12
22
+ from cryptography import x509
22
23
  from oslo_config import cfg
23
24
  from oslo_log import log as logging
24
25
  from oslo_utils import encodeutils
@@ -64,25 +65,29 @@ class BarbicanCertManager(cert_mgr.CertManager):
64
65
  connection = self.auth.get_barbican_client(context.project_id)
65
66
 
66
67
  LOG.info("Storing certificate secret '%s' in Barbican.", name)
67
- p12 = crypto.PKCS12()
68
- p12.set_friendlyname(encodeutils.to_utf8(name))
69
- x509_cert = crypto.load_certificate(crypto.FILETYPE_PEM, certificate)
70
- p12.set_certificate(x509_cert)
71
- x509_pk = crypto.load_privatekey(crypto.FILETYPE_PEM, private_key)
72
- p12.set_privatekey(x509_pk)
73
- if intermediates:
74
- cert_ints = list(cert_parser.get_intermediates_pems(intermediates))
75
- x509_ints = [
76
- crypto.load_certificate(crypto.FILETYPE_PEM, ci)
77
- for ci in cert_ints]
78
- p12.set_ca_certificates(x509_ints)
68
+
79
69
  if private_key_passphrase:
80
70
  raise exceptions.CertificateStorageException(
81
71
  "Passphrase protected PKCS12 certificates are not supported.")
82
72
 
73
+ x509_cert = x509.load_pem_x509_certificate(certificate)
74
+ x509_pk = serialization.load_pem_private_key(private_key, None)
75
+ cas = None
76
+ if intermediates:
77
+ cert_ints = list(cert_parser.get_intermediates_pems(intermediates))
78
+ cas = [
79
+ x509.load_pem_x509_certificate(ci)
80
+ for ci in cert_ints]
81
+
83
82
  try:
84
83
  certificate_secret = connection.secrets.create(
85
- payload=p12.export(),
84
+ payload=c_pkcs12.serialize_key_and_certificates(
85
+ name=encodeutils.safe_encode(name),
86
+ key=x509_pk,
87
+ cert=x509_cert,
88
+ cas=cas,
89
+ encryption_algorithm=serialization.NoEncryption()
90
+ ),
86
91
  expiration=expiration,
87
92
  name=name
88
93
  )
@@ -115,7 +120,10 @@ class BarbicanCertManager(cert_mgr.CertManager):
115
120
  return pkcs12.PKCS12Cert(cert_secret.payload)
116
121
  except exceptions.UnreadablePKCS12:
117
122
  raise
118
- except Exception:
123
+ except Exception as e:
124
+ LOG.warning('Failed to load PKCS12Cert for secret %s with %s',
125
+ cert_ref, str(e))
126
+ LOG.warning('Falling back to the barbican_legacy implementation.')
119
127
  # If our get fails, try with the legacy driver.
120
128
  # TODO(rm_work): Remove this code when the deprecation cycle for
121
129
  # the legacy driver is complete.
@@ -18,7 +18,8 @@ Cert manager implementation for Castellan
18
18
  """
19
19
  from castellan.common.objects import opaque_data
20
20
  from castellan import key_manager
21
- from OpenSSL import crypto
21
+ from cryptography.hazmat.primitives import serialization
22
+ from cryptography.hazmat.primitives.serialization import pkcs12 as c_pkcs12
22
23
  from oslo_config import cfg
23
24
  from oslo_log import log as logging
24
25
 
@@ -41,16 +42,20 @@ class CastellanCertManager(cert_mgr.CertManager):
41
42
  def store_cert(self, context, certificate, private_key, intermediates=None,
42
43
  private_key_passphrase=None, expiration=None,
43
44
  name="PKCS12 Certificate Bundle"):
44
- p12 = crypto.PKCS12()
45
- p12.set_certificate(certificate)
46
- p12.set_privatekey(private_key)
47
- if intermediates:
48
- p12.set_ca_certificates(intermediates)
49
45
  if private_key_passphrase:
50
46
  raise exceptions.CertificateStorageException(
51
47
  "Passphrases protected PKCS12 certificates are not supported.")
52
48
 
53
- p12_data = opaque_data.OpaqueData(p12.export(), name=name)
49
+ p12_data = opaque_data.OpaqueData(
50
+ c_pkcs12.serialize_key_and_certificates(
51
+ name=None,
52
+ key=private_key,
53
+ cert=certificate,
54
+ cas=intermediates,
55
+ encryption_algorithm=serialization.NoEncryption()
56
+ ),
57
+ name=name
58
+ )
54
59
  self.manager.store(context, p12_data)
55
60
 
56
61
  def get_cert(self, context, cert_ref, resource_ref=None, check_only=False,