octavia 13.0.0__py3-none-any.whl → 13.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- octavia/amphorae/backends/agent/api_server/keepalivedlvs.py +9 -0
- octavia/amphorae/backends/agent/api_server/osutils.py +1 -2
- octavia/amphorae/backends/agent/api_server/util.py +35 -2
- octavia/amphorae/backends/utils/interface.py +4 -5
- octavia/amphorae/drivers/driver_base.py +16 -0
- octavia/amphorae/drivers/haproxy/rest_api_driver.py +13 -8
- octavia/amphorae/drivers/keepalived/jinja/jinja_cfg.py +0 -1
- octavia/amphorae/drivers/keepalived/jinja/templates/keepalived_base.template +0 -1
- octavia/amphorae/drivers/keepalived/vrrp_rest_driver.py +2 -1
- octavia/amphorae/drivers/noop_driver/driver.py +3 -0
- octavia/api/common/pagination.py +1 -1
- octavia/api/v2/controllers/health_monitor.py +3 -2
- octavia/api/v2/controllers/l7policy.py +0 -1
- octavia/api/v2/controllers/l7rule.py +0 -1
- octavia/api/v2/controllers/listener.py +0 -1
- octavia/api/v2/controllers/load_balancer.py +13 -7
- octavia/api/v2/controllers/member.py +18 -5
- octavia/api/v2/controllers/pool.py +6 -7
- octavia/api/v2/types/pool.py +1 -1
- octavia/certificates/common/pkcs12.py +9 -9
- octavia/certificates/manager/barbican.py +24 -16
- octavia/certificates/manager/castellan_mgr.py +12 -7
- octavia/certificates/manager/noop.py +106 -0
- octavia/common/clients.py +22 -4
- octavia/common/config.py +21 -5
- octavia/common/constants.py +4 -0
- octavia/common/exceptions.py +6 -0
- octavia/common/jinja/haproxy/combined_listeners/templates/macros.j2 +7 -5
- octavia/common/keystone.py +7 -7
- octavia/common/tls_utils/cert_parser.py +23 -9
- octavia/controller/worker/task_utils.py +28 -6
- octavia/controller/worker/v2/controller_worker.py +2 -2
- octavia/controller/worker/v2/flows/amphora_flows.py +41 -10
- octavia/controller/worker/v2/flows/flow_utils.py +6 -4
- octavia/controller/worker/v2/flows/load_balancer_flows.py +17 -3
- octavia/controller/worker/v2/tasks/amphora_driver_tasks.py +114 -23
- octavia/controller/worker/v2/tasks/database_tasks.py +36 -47
- octavia/controller/worker/v2/tasks/lifecycle_tasks.py +96 -40
- octavia/controller/worker/v2/tasks/network_tasks.py +12 -13
- octavia/db/base_models.py +16 -4
- octavia/db/repositories.py +34 -33
- octavia/network/drivers/neutron/allowed_address_pairs.py +10 -8
- octavia/network/drivers/noop_driver/driver.py +1 -2
- octavia/tests/common/sample_certs.py +115 -0
- octavia/tests/functional/api/v2/base.py +1 -1
- octavia/tests/functional/api/v2/test_health_monitor.py +18 -0
- octavia/tests/functional/api/v2/test_listener.py +45 -0
- octavia/tests/functional/api/v2/test_member.py +32 -0
- octavia/tests/functional/db/base.py +9 -0
- octavia/tests/functional/db/test_repositories.py +45 -98
- octavia/tests/unit/amphorae/backends/agent/api_server/test_util.py +89 -1
- octavia/tests/unit/amphorae/backends/utils/test_interface.py +3 -1
- octavia/tests/unit/amphorae/drivers/haproxy/test_rest_api_driver.py +3 -3
- octavia/tests/unit/amphorae/drivers/keepalived/jinja/test_jinja_cfg.py +0 -4
- octavia/tests/unit/amphorae/drivers/keepalived/test_vrrp_rest_driver.py +17 -0
- octavia/tests/unit/api/common/test_pagination.py +78 -1
- octavia/tests/unit/api/v2/types/test_pool.py +71 -0
- octavia/tests/unit/certificates/manager/test_barbican.py +3 -3
- octavia/tests/unit/certificates/manager/test_noop.py +53 -0
- octavia/tests/unit/cmd/test_prometheus_proxy.py +8 -1
- octavia/tests/unit/common/jinja/haproxy/combined_listeners/test_jinja_cfg.py +16 -17
- octavia/tests/unit/common/test_config.py +35 -0
- octavia/tests/unit/common/test_keystone.py +32 -0
- octavia/tests/unit/controller/worker/test_task_utils.py +58 -2
- octavia/tests/unit/controller/worker/v2/flows/test_amphora_flows.py +28 -5
- octavia/tests/unit/controller/worker/v2/flows/test_load_balancer_flows.py +10 -5
- octavia/tests/unit/controller/worker/v2/tasks/test_amphora_driver_tasks.py +234 -17
- octavia/tests/unit/controller/worker/v2/tasks/test_database_tasks.py +28 -6
- octavia/tests/unit/controller/worker/v2/tasks/test_database_tasks_quota.py +19 -19
- octavia/tests/unit/controller/worker/v2/tasks/test_network_tasks.py +57 -2
- octavia/tests/unit/controller/worker/v2/test_controller_worker.py +56 -1
- octavia/tests/unit/network/drivers/neutron/test_allowed_address_pairs.py +24 -1
- {octavia-13.0.0.dist-info → octavia-13.0.1.dist-info}/AUTHORS +8 -0
- octavia-13.0.1.dist-info/METADATA +155 -0
- {octavia-13.0.0.dist-info → octavia-13.0.1.dist-info}/RECORD +90 -88
- {octavia-13.0.0.dist-info → octavia-13.0.1.dist-info}/WHEEL +1 -1
- {octavia-13.0.0.dist-info → octavia-13.0.1.dist-info}/entry_points.txt +1 -1
- octavia-13.0.1.dist-info/pbr.json +1 -0
- octavia-13.0.0.dist-info/METADATA +0 -158
- octavia-13.0.0.dist-info/pbr.json +0 -1
- {octavia-13.0.0.data → octavia-13.0.1.data}/data/share/octavia/LICENSE +0 -0
- {octavia-13.0.0.data → octavia-13.0.1.data}/data/share/octavia/README.rst +0 -0
- {octavia-13.0.0.data → octavia-13.0.1.data}/data/share/octavia/diskimage-create/README.rst +0 -0
- {octavia-13.0.0.data → octavia-13.0.1.data}/data/share/octavia/diskimage-create/diskimage-create.sh +0 -0
- {octavia-13.0.0.data → octavia-13.0.1.data}/data/share/octavia/diskimage-create/image-tests.sh +0 -0
- {octavia-13.0.0.data → octavia-13.0.1.data}/data/share/octavia/diskimage-create/requirements.txt +0 -0
- {octavia-13.0.0.data → octavia-13.0.1.data}/data/share/octavia/diskimage-create/test-requirements.txt +0 -0
- {octavia-13.0.0.data → octavia-13.0.1.data}/data/share/octavia/diskimage-create/tox.ini +0 -0
- {octavia-13.0.0.data → octavia-13.0.1.data}/data/share/octavia/diskimage-create/version.txt +0 -0
- {octavia-13.0.0.data → octavia-13.0.1.data}/scripts/octavia-wsgi +0 -0
- {octavia-13.0.0.dist-info → octavia-13.0.1.dist-info}/LICENSE +0 -0
- {octavia-13.0.0.dist-info → octavia-13.0.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,106 @@
|
|
1
|
+
# Copyright (c) 2023 Red Hat
|
2
|
+
# All Rights Reserved.
|
3
|
+
#
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
5
|
+
# not use this file except in compliance with the License. You may obtain
|
6
|
+
# a copy of the License at
|
7
|
+
#
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9
|
+
#
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
12
|
+
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
13
|
+
# License for the specific language governing permissions and limitations
|
14
|
+
# under the License.
|
15
|
+
import uuid
|
16
|
+
|
17
|
+
from oslo_log import log as logging
|
18
|
+
|
19
|
+
from octavia.certificates.common import cert
|
20
|
+
from octavia.certificates.common import local
|
21
|
+
from octavia.certificates.manager import cert_mgr
|
22
|
+
from octavia.common.tls_utils import cert_parser
|
23
|
+
from octavia.tests.common import sample_certs
|
24
|
+
|
25
|
+
LOG = logging.getLogger(__name__)
|
26
|
+
|
27
|
+
|
28
|
+
class NoopCertManager(cert_mgr.CertManager):
|
29
|
+
"""Cert manager implementation for no-op operations
|
30
|
+
|
31
|
+
"""
|
32
|
+
def __init__(self):
|
33
|
+
super().__init__()
|
34
|
+
self._local_cert = None
|
35
|
+
|
36
|
+
@property
|
37
|
+
def local_cert(self):
|
38
|
+
if self._local_cert is None:
|
39
|
+
self._local_cert = self.store_cert(
|
40
|
+
None,
|
41
|
+
sample_certs.X509_CERT,
|
42
|
+
sample_certs.X509_CERT_KEY_ENCRYPTED,
|
43
|
+
sample_certs.X509_IMDS,
|
44
|
+
private_key_passphrase=sample_certs.X509_CERT_KEY_PASSPHRASE)
|
45
|
+
return self._local_cert
|
46
|
+
|
47
|
+
def store_cert(self, context, certificate, private_key, intermediates=None,
|
48
|
+
private_key_passphrase=None, **kwargs) -> cert.Cert:
|
49
|
+
"""Stores (i.e., registers) a cert with the cert manager.
|
50
|
+
|
51
|
+
This method stores the specified cert to the filesystem and returns
|
52
|
+
a UUID that can be used to retrieve it.
|
53
|
+
|
54
|
+
:param context: Ignored in this implementation
|
55
|
+
:param certificate: PEM encoded TLS certificate
|
56
|
+
:param private_key: private key for the supplied certificate
|
57
|
+
:param intermediates: ordered and concatenated intermediate certs
|
58
|
+
:param private_key_passphrase: optional passphrase for the supplied key
|
59
|
+
|
60
|
+
:returns: the UUID of the stored cert
|
61
|
+
:raises CertificateStorageException: if certificate storage fails
|
62
|
+
"""
|
63
|
+
cert_ref = str(uuid.uuid4())
|
64
|
+
if isinstance(certificate, bytes):
|
65
|
+
certificate = certificate.decode('utf-8')
|
66
|
+
if isinstance(private_key, bytes):
|
67
|
+
private_key = private_key.decode('utf-8')
|
68
|
+
|
69
|
+
LOG.debug('Driver %s no-op, store_cert certificate %s, cert_ref %s',
|
70
|
+
self.__class__.__name__, certificate, cert_ref)
|
71
|
+
|
72
|
+
cert_data = {'certificate': certificate, 'private_key': private_key}
|
73
|
+
if intermediates:
|
74
|
+
if isinstance(intermediates, bytes):
|
75
|
+
intermediates = intermediates.decode('utf-8')
|
76
|
+
cert_data['intermediates'] = list(
|
77
|
+
cert_parser.get_intermediates_pems(intermediates))
|
78
|
+
if private_key_passphrase:
|
79
|
+
if isinstance(private_key_passphrase, bytes):
|
80
|
+
private_key_passphrase = private_key_passphrase.decode('utf-8')
|
81
|
+
cert_data['private_key_passphrase'] = private_key_passphrase
|
82
|
+
|
83
|
+
return local.LocalCert(**cert_data)
|
84
|
+
|
85
|
+
def get_cert(self, context, cert_ref, check_only=True, **kwargs) -> (
|
86
|
+
cert.Cert):
|
87
|
+
LOG.debug('Driver %s no-op, get_cert with cert_ref %s',
|
88
|
+
self.__class__.__name__, cert_ref)
|
89
|
+
return self.local_cert
|
90
|
+
|
91
|
+
def delete_cert(self, context, cert_ref, resource_ref, service_name=None):
|
92
|
+
LOG.debug('Driver %s no-op, delete_cert with cert_ref %s',
|
93
|
+
self.__class__.__name__, cert_ref)
|
94
|
+
|
95
|
+
def set_acls(self, context, cert_ref):
|
96
|
+
LOG.debug('Driver %s no-op, set_acls with cert_ref %s',
|
97
|
+
self.__class__.__name__, cert_ref)
|
98
|
+
|
99
|
+
def unset_acls(self, context, cert_ref):
|
100
|
+
LOG.debug('Driver %s no-op, unset_acls with cert_ref %s',
|
101
|
+
self.__class__.__name__, cert_ref)
|
102
|
+
|
103
|
+
def get_secret(self, context, secret_ref) -> cert.Cert:
|
104
|
+
LOG.debug('Driver %s no-op, get_secret with secret_ref %s',
|
105
|
+
self.__class__.__name__, secret_ref)
|
106
|
+
return self.local_cert
|
octavia/common/clients.py
CHANGED
@@ -79,11 +79,22 @@ class NeutronAuth(object):
|
|
79
79
|
ksession = keystone.KeystoneSession('neutron')
|
80
80
|
if not cls.neutron_client:
|
81
81
|
sess = ksession.get_session()
|
82
|
-
|
83
|
-
|
82
|
+
kwargs = {'region_name': CONF.neutron.region_name}
|
83
|
+
# TODO(ricolin) `interface` option don't take list as option yet.
|
84
|
+
# We can move away from this when openstacksdk no longer depends
|
85
|
+
# on `interface`.
|
86
|
+
try:
|
87
|
+
interface = CONF.neutron.valid_interfaces[0]
|
88
|
+
except (TypeError, LookupError):
|
89
|
+
interface = CONF.neutron.valid_interfaces
|
90
|
+
if interface:
|
91
|
+
kwargs['interface'] = interface
|
84
92
|
if CONF.neutron.endpoint_override:
|
85
93
|
kwargs['network_endpoint_override'] = (
|
86
94
|
CONF.neutron.endpoint_override)
|
95
|
+
if CONF.neutron.endpoint_override.startswith("https"):
|
96
|
+
kwargs['insecure'] = CONF.neutron.insecure
|
97
|
+
kwargs['cacert'] = CONF.neutron.cafile
|
87
98
|
|
88
99
|
conn = openstack.connection.Connection(
|
89
100
|
session=sess, **kwargs)
|
@@ -100,15 +111,22 @@ class NeutronAuth(object):
|
|
100
111
|
client.
|
101
112
|
"""
|
102
113
|
sess = keystone.KeystoneSession('neutron').get_session()
|
114
|
+
kwargs = {}
|
103
115
|
neutron_endpoint = CONF.neutron.endpoint_override
|
104
116
|
if neutron_endpoint is None:
|
105
117
|
endpoint_data = sess.get_endpoint_data(
|
106
|
-
service_type='network',
|
118
|
+
service_type='network',
|
119
|
+
interface=CONF.neutron.valid_interfaces,
|
107
120
|
region_name=CONF.neutron.region_name)
|
108
121
|
neutron_endpoint = endpoint_data.catalog_url
|
109
122
|
|
123
|
+
neutron_cafile = getattr(CONF.neutron, "cafile", None)
|
124
|
+
insecure = getattr(CONF.neutron, "insecure", False)
|
125
|
+
kwargs['verify'] = not insecure
|
126
|
+
if neutron_cafile is not None and not insecure:
|
127
|
+
kwargs['verify'] = neutron_cafile
|
110
128
|
user_auth = token_endpoint.Token(neutron_endpoint, context.auth_token)
|
111
|
-
user_sess = session.Session(auth=user_auth)
|
129
|
+
user_sess = session.Session(auth=user_auth, **kwargs)
|
112
130
|
|
113
131
|
conn = openstack.connection.Connection(
|
114
132
|
session=user_sess, oslo_conf=CONF)
|
octavia/common/config.py
CHANGED
@@ -534,7 +534,18 @@ controller_worker_opts = [
|
|
534
534
|
cfg.BoolOpt('event_notifications', default=True,
|
535
535
|
help=_('Enable octavia event notifications. See '
|
536
536
|
'oslo_messaging_notifications section for additional '
|
537
|
-
'requirements.'))
|
537
|
+
'requirements.')),
|
538
|
+
# 2000 attempts is around 2h45 with the default settings
|
539
|
+
cfg.IntOpt('db_commit_retry_attempts', default=2000,
|
540
|
+
help=_('The number of times the database action will be '
|
541
|
+
'attempted.')),
|
542
|
+
cfg.IntOpt('db_commit_retry_initial_delay', default=1,
|
543
|
+
help=_('The initial delay before a retry attempt.')),
|
544
|
+
cfg.IntOpt('db_commit_retry_backoff', default=1,
|
545
|
+
help=_('The time to backoff retry attempts.')),
|
546
|
+
cfg.IntOpt('db_commit_retry_max', default=5,
|
547
|
+
help=_('The maximum amount of time to wait between retry '
|
548
|
+
'attempts.')),
|
538
549
|
]
|
539
550
|
|
540
551
|
task_flow_opts = [
|
@@ -924,24 +935,29 @@ def register_cli_opts():
|
|
924
935
|
def handle_neutron_deprecations():
|
925
936
|
# Apply neutron deprecated options to their new setting if needed
|
926
937
|
|
927
|
-
#
|
938
|
+
# Basically: if the new option is not set and the value of the deprecated
|
939
|
+
# option is not the default, it means that the deprecated setting is still
|
940
|
+
# used in the config file:
|
928
941
|
# * convert it to a valid "new" value if needed
|
929
942
|
# * set it as the default for the new option
|
930
943
|
# Thus [neutron].<new_option> has an higher precedence than
|
931
944
|
# [neutron].<deprecated_option>
|
932
945
|
loc = cfg.CONF.get_location('endpoint', 'neutron')
|
933
|
-
|
946
|
+
new_loc = cfg.CONF.get_location('endpoint_override', 'neutron')
|
947
|
+
if not new_loc and loc and loc.location != cfg.Locations.opt_default:
|
934
948
|
cfg.CONF.set_default('endpoint_override', cfg.CONF.neutron.endpoint,
|
935
949
|
'neutron')
|
936
950
|
|
937
951
|
loc = cfg.CONF.get_location('endpoint_type', 'neutron')
|
938
|
-
|
952
|
+
new_loc = cfg.CONF.get_location('valid_interfaces', 'neutron')
|
953
|
+
if not new_loc and loc and loc.location != cfg.Locations.opt_default:
|
939
954
|
endpoint_type = cfg.CONF.neutron.endpoint_type.replace('URL', '')
|
940
955
|
cfg.CONF.set_default('valid_interfaces', [endpoint_type],
|
941
956
|
'neutron')
|
942
957
|
|
943
958
|
loc = cfg.CONF.get_location('ca_certificates_file', 'neutron')
|
944
|
-
|
959
|
+
new_loc = cfg.CONF.get_location('cafile', 'neutron')
|
960
|
+
if not new_loc and loc and loc.location != cfg.Locations.opt_default:
|
945
961
|
cfg.CONF.set_default('cafile', cfg.CONF.neutron.ca_certificates_file,
|
946
962
|
'neutron')
|
947
963
|
|
octavia/common/constants.py
CHANGED
@@ -313,6 +313,7 @@ AMPHORA_INDEX = 'amphora_index'
|
|
313
313
|
AMPHORA_NETWORK_CONFIG = 'amphora_network_config'
|
314
314
|
AMPHORAE = 'amphorae'
|
315
315
|
AMPHORAE_NETWORK_CONFIG = 'amphorae_network_config'
|
316
|
+
AMPHORAE_STATUS = 'amphorae_status'
|
316
317
|
AMPS_DATA = 'amps_data'
|
317
318
|
ANTI_AFFINITY = 'anti-affinity'
|
318
319
|
ATTEMPT_NUMBER = 'attempt_number'
|
@@ -387,6 +388,7 @@ MESSAGE = 'message'
|
|
387
388
|
NAME = 'name'
|
388
389
|
NETWORK = 'network'
|
389
390
|
NETWORK_ID = 'network_id'
|
391
|
+
NEW_AMPHORA_ID = 'new_amphora_id'
|
390
392
|
NEXTHOP = 'nexthop'
|
391
393
|
NICS = 'nics'
|
392
394
|
OBJECT = 'object'
|
@@ -435,6 +437,7 @@ TLS_CERTIFICATE_ID = 'tls_certificate_id'
|
|
435
437
|
TLS_CONTAINER_ID = 'tls_container_id'
|
436
438
|
TOPOLOGY = 'topology'
|
437
439
|
TOTAL_CONNECTIONS = 'total_connections'
|
440
|
+
UNREACHABLE = 'unreachable'
|
438
441
|
UPDATED_AT = 'updated_at'
|
439
442
|
UPDATE_DICT = 'update_dict'
|
440
443
|
UPDATED_PORTS = 'updated_ports'
|
@@ -562,6 +565,7 @@ ADMIN_DOWN_PORT = 'admin-down-port'
|
|
562
565
|
AMPHORA_POST_VIP_PLUG = 'amphora-post-vip-plug'
|
563
566
|
AMPHORA_RELOAD_LISTENER = 'amphora-reload-listener'
|
564
567
|
AMPHORA_TO_ERROR_ON_REVERT = 'amphora-to-error-on-revert'
|
568
|
+
AMPHORAE_GET_CONNECTIVITY_STATUS = 'amphorae-get-connectivity-status'
|
565
569
|
AMPHORAE_POST_NETWORK_PLUG = 'amphorae-post-network-plug'
|
566
570
|
ATTACH_PORT = 'attach-port'
|
567
571
|
CALCULATE_AMPHORA_DELTA = 'calculate-amphora-delta'
|
octavia/common/exceptions.py
CHANGED
@@ -133,6 +133,12 @@ class UnreadablePKCS12(APIException):
|
|
133
133
|
code = 400
|
134
134
|
|
135
135
|
|
136
|
+
class MissingCertSubject(APIException):
|
137
|
+
msg = _('No CN or DNSName(s) found in certificate. The certificate is '
|
138
|
+
'invalid.')
|
139
|
+
code = 400
|
140
|
+
|
141
|
+
|
136
142
|
class MisMatchedKey(OctaviaException):
|
137
143
|
message = _("Key and x509 certificate do not match")
|
138
144
|
|
@@ -208,13 +208,18 @@ frontend {{ listener.id }}
|
|
208
208
|
{% else %}
|
209
209
|
{% set monitor_port_opt = "" %}
|
210
210
|
{% endif %}
|
211
|
+
{% if pool.alpn_protocols is defined %}
|
212
|
+
{% set alpn_opt = " check-alpn %s"|format(pool.alpn_protocols) %}
|
213
|
+
{% else %}
|
214
|
+
{% set alpn_opt = "" %}
|
215
|
+
{% endif %}
|
211
216
|
{% if pool.health_monitor.type == constants.HEALTH_MONITOR_HTTPS %}
|
212
217
|
{% set monitor_ssl_opt = " check-ssl verify none" %}
|
213
218
|
{% else %}
|
214
219
|
{% set monitor_ssl_opt = "" %}
|
215
220
|
{% endif %}
|
216
|
-
{% set hm_opt = " check%s inter %ds fall %d rise %d%s%s"|format(
|
217
|
-
monitor_ssl_opt, pool.health_monitor.delay,
|
221
|
+
{% set hm_opt = " check%s%s inter %ds fall %d rise %d%s%s"|format(
|
222
|
+
monitor_ssl_opt, alpn_opt, pool.health_monitor.delay,
|
218
223
|
pool.health_monitor.fall_threshold,
|
219
224
|
pool.health_monitor.rise_threshold, monitor_addr_opt,
|
220
225
|
monitor_port_opt) %}
|
@@ -370,9 +375,6 @@ backend {{ pool.id }}:{{ listener.id }}
|
|
370
375
|
option httpchk {{ pool.health_monitor.http_method }} {{ pool.health_monitor.url_path }}
|
371
376
|
{% endif %}
|
372
377
|
http-check expect rstatus {{ pool.health_monitor.expected_codes }}
|
373
|
-
{% endif %}
|
374
|
-
{% if pool.health_monitor.type == constants.HEALTH_MONITOR_TLS_HELLO %}
|
375
|
-
option ssl-hello-chk
|
376
378
|
{% endif %}
|
377
379
|
{% if pool.health_monitor.type == constants.HEALTH_MONITOR_PING %}
|
378
380
|
option external-check
|
octavia/common/keystone.py
CHANGED
@@ -80,14 +80,14 @@ class KeystoneSession(object):
|
|
80
80
|
|
81
81
|
config = getattr(cfg.CONF, self.section)
|
82
82
|
for opt in config:
|
83
|
-
# For each option in the [
|
84
|
-
# location, if the location is 'opt_default'
|
85
|
-
#
|
86
|
-
#
|
87
|
-
#
|
83
|
+
# For each option in the [section] section, get its setting
|
84
|
+
# location, if the location is 'opt_default', it means that
|
85
|
+
# the option is not configured in the config file.
|
86
|
+
# if the option is also defined in [service_auth], the
|
87
|
+
# option of the [section] can be replaced by the one from
|
88
|
+
# [service_auth]
|
88
89
|
loc = cfg.CONF.get_location(opt, self.section)
|
89
|
-
if not loc or loc.location
|
90
|
-
cfg.Locations.set_default):
|
90
|
+
if not loc or loc.location == cfg.Locations.opt_default:
|
91
91
|
if hasattr(cfg.CONF.service_auth, opt):
|
92
92
|
cur_value = getattr(config, opt)
|
93
93
|
value = getattr(cfg.CONF.service_auth, opt)
|
@@ -164,16 +164,14 @@ def _parse_pkcs7_bundle(pkcs7):
|
|
164
164
|
if PKCS7_BEG in pkcs7:
|
165
165
|
try:
|
166
166
|
for substrate in _read_pem_blocks(pkcs7):
|
167
|
-
|
168
|
-
yield cert
|
167
|
+
yield from _get_certs_from_pkcs7_substrate(substrate)
|
169
168
|
except Exception as e:
|
170
169
|
LOG.exception('Unreadable Certificate.')
|
171
170
|
raise exceptions.UnreadableCert from e
|
172
171
|
|
173
172
|
# If no PEM encoding, assume this is DER encoded and try to decode
|
174
173
|
else:
|
175
|
-
|
176
|
-
yield cert
|
174
|
+
yield from _get_certs_from_pkcs7_substrate(pkcs7)
|
177
175
|
|
178
176
|
|
179
177
|
def _read_pem_blocks(data):
|
@@ -256,14 +254,16 @@ def get_host_names(certificate):
|
|
256
254
|
"""
|
257
255
|
if isinstance(certificate, str):
|
258
256
|
certificate = certificate.encode('utf-8')
|
257
|
+
host_names = {'cn': None, 'dns_names': []}
|
259
258
|
try:
|
260
259
|
cert = x509.load_pem_x509_certificate(certificate,
|
261
260
|
backends.default_backend())
|
262
|
-
|
263
|
-
|
264
|
-
'cn'
|
265
|
-
|
266
|
-
|
261
|
+
try:
|
262
|
+
cn = cert.subject.get_attributes_for_oid(x509.OID_COMMON_NAME)[0]
|
263
|
+
host_names['cn'] = cn.value.lower()
|
264
|
+
except Exception as e:
|
265
|
+
LOG.debug(f'Unable to get CN from certificate due to: {e}. '
|
266
|
+
f'Assuming subject alternative names are present.')
|
267
267
|
try:
|
268
268
|
ext = cert.extensions.get_extension_for_oid(
|
269
269
|
x509.OID_SUBJECT_ALTERNATIVE_NAME
|
@@ -274,7 +274,17 @@ def get_host_names(certificate):
|
|
274
274
|
LOG.debug("%s extension not found",
|
275
275
|
x509.OID_SUBJECT_ALTERNATIVE_NAME)
|
276
276
|
|
277
|
+
# Certs with no subject are valid as long as a subject alternative
|
278
|
+
# name is present. If both are missing, it is an invalid cert per
|
279
|
+
# the x.509 standard.
|
280
|
+
if not host_names['cn'] and not host_names['dns_names']:
|
281
|
+
LOG.warning('No CN or DNSName(s) found in certificate. The '
|
282
|
+
'certificate is invalid.')
|
283
|
+
raise exceptions.MissingCertSubject()
|
284
|
+
|
277
285
|
return host_names
|
286
|
+
except exceptions.MissingCertSubject:
|
287
|
+
raise
|
278
288
|
except Exception as e:
|
279
289
|
LOG.exception('Unreadable Certificate.')
|
280
290
|
raise exceptions.UnreadableCert from e
|
@@ -359,6 +369,10 @@ def load_certificates_data(cert_mngr, obj, context=None):
|
|
359
369
|
cert_mngr.get_cert(context,
|
360
370
|
obj.tls_certificate_id,
|
361
371
|
check_only=True))
|
372
|
+
except exceptions.MissingCertSubject:
|
373
|
+
# This was logged below, so raise as is to provide a clear
|
374
|
+
# user error
|
375
|
+
raise
|
362
376
|
except Exception as e:
|
363
377
|
LOG.warning('Unable to retrieve certificate: %s due to %s.',
|
364
378
|
obj.tls_certificate_id, str(e))
|
@@ -14,18 +14,32 @@
|
|
14
14
|
|
15
15
|
""" Methods common to the controller work tasks."""
|
16
16
|
|
17
|
+
from oslo_config import cfg
|
17
18
|
from oslo_log import log as logging
|
19
|
+
from oslo_utils import excutils
|
20
|
+
import tenacity
|
18
21
|
|
19
22
|
from octavia.common import constants
|
20
23
|
from octavia.db import api as db_apis
|
21
24
|
from octavia.db import repositories as repo
|
22
25
|
|
26
|
+
CONF = cfg.CONF
|
23
27
|
LOG = logging.getLogger(__name__)
|
24
28
|
|
25
29
|
|
26
30
|
class TaskUtils(object):
|
27
31
|
"""Class of helper/utility methods used by tasks."""
|
28
32
|
|
33
|
+
status_update_retry = tenacity.retry(
|
34
|
+
retry=tenacity.retry_if_exception_type(Exception),
|
35
|
+
wait=tenacity.wait_incrementing(
|
36
|
+
CONF.controller_worker.db_commit_retry_initial_delay,
|
37
|
+
CONF.controller_worker.db_commit_retry_backoff,
|
38
|
+
CONF.controller_worker.db_commit_retry_max),
|
39
|
+
stop=tenacity.stop_after_attempt(
|
40
|
+
CONF.controller_worker.db_commit_retry_attempts),
|
41
|
+
after=tenacity.after_log(LOG, logging.DEBUG))
|
42
|
+
|
29
43
|
def __init__(self, **kwargs):
|
30
44
|
self.amphora_repo = repo.AmphoraRepository()
|
31
45
|
self.health_mon_repo = repo.HealthMonitorRepository()
|
@@ -160,6 +174,7 @@ class TaskUtils(object):
|
|
160
174
|
"provisioning status to ERROR due to: "
|
161
175
|
"%(except)s", {'list': listener_id, 'except': str(e)})
|
162
176
|
|
177
|
+
@status_update_retry
|
163
178
|
def mark_loadbalancer_prov_status_error(self, loadbalancer_id):
|
164
179
|
"""Sets a load balancer provisioning status to ERROR.
|
165
180
|
|
@@ -175,9 +190,12 @@ class TaskUtils(object):
|
|
175
190
|
id=loadbalancer_id,
|
176
191
|
provisioning_status=constants.ERROR)
|
177
192
|
except Exception as e:
|
178
|
-
|
179
|
-
|
180
|
-
|
193
|
+
# Reraise for tenacity
|
194
|
+
with excutils.save_and_reraise_exception():
|
195
|
+
LOG.error("Failed to update load balancer %(lb)s "
|
196
|
+
"provisioning status to ERROR due to: "
|
197
|
+
"%(except)s", {'lb': loadbalancer_id,
|
198
|
+
'except': str(e)})
|
181
199
|
|
182
200
|
def mark_listener_prov_status_active(self, listener_id):
|
183
201
|
"""Sets a listener provisioning status to ACTIVE.
|
@@ -214,6 +232,7 @@ class TaskUtils(object):
|
|
214
232
|
"to ACTIVE due to: %(except)s", {'pool': pool_id,
|
215
233
|
'except': str(e)})
|
216
234
|
|
235
|
+
@status_update_retry
|
217
236
|
def mark_loadbalancer_prov_status_active(self, loadbalancer_id):
|
218
237
|
"""Sets a load balancer provisioning status to ACTIVE.
|
219
238
|
|
@@ -229,9 +248,12 @@ class TaskUtils(object):
|
|
229
248
|
id=loadbalancer_id,
|
230
249
|
provisioning_status=constants.ACTIVE)
|
231
250
|
except Exception as e:
|
232
|
-
|
233
|
-
|
234
|
-
|
251
|
+
# Reraise for tenacity
|
252
|
+
with excutils.save_and_reraise_exception():
|
253
|
+
LOG.error("Failed to update load balancer %(lb)s "
|
254
|
+
"provisioning status to ACTIVE due to: "
|
255
|
+
"%(except)s", {'lb': loadbalancer_id,
|
256
|
+
'except': str(e)})
|
235
257
|
|
236
258
|
def mark_member_prov_status_error(self, member_id):
|
237
259
|
"""Sets a member provisioning status to ERROR.
|
@@ -394,8 +394,8 @@ class ControllerWorker(object):
|
|
394
394
|
constants.SERVER_GROUP_ID: db_lb.server_group_id,
|
395
395
|
constants.PROJECT_ID: db_lb.project_id}
|
396
396
|
if cascade:
|
397
|
-
listeners = flow_utils.get_listeners_on_lb(db_lb)
|
398
|
-
pools = flow_utils.get_pools_on_lb(db_lb)
|
397
|
+
listeners = flow_utils.get_listeners_on_lb(db_lb, True)
|
398
|
+
pools = flow_utils.get_pools_on_lb(db_lb, True)
|
399
399
|
|
400
400
|
self.run_flow(
|
401
401
|
flow_utils.get_cascade_delete_load_balancer_flow,
|
@@ -226,7 +226,8 @@ class AmphoraFlows(object):
|
|
226
226
|
return delete_amphora_flow
|
227
227
|
|
228
228
|
def get_vrrp_subflow(self, prefix, timeout_dict=None,
|
229
|
-
create_vrrp_group=True
|
229
|
+
create_vrrp_group=True,
|
230
|
+
get_amphorae_status=True):
|
230
231
|
sf_name = prefix + '-' + constants.GET_VRRP_SUBFLOW
|
231
232
|
vrrp_subflow = linear_flow.Flow(sf_name)
|
232
233
|
|
@@ -242,6 +243,17 @@ class AmphoraFlows(object):
|
|
242
243
|
requires=constants.LOADBALANCER_ID,
|
243
244
|
provides=constants.AMPHORAE_NETWORK_CONFIG))
|
244
245
|
|
246
|
+
if get_amphorae_status:
|
247
|
+
# Get the amphorae_status dict in case the caller hasn't fetched
|
248
|
+
# it yet.
|
249
|
+
vrrp_subflow.add(
|
250
|
+
amphora_driver_tasks.AmphoraeGetConnectivityStatus(
|
251
|
+
name=constants.AMPHORAE_GET_CONNECTIVITY_STATUS,
|
252
|
+
requires=constants.AMPHORAE,
|
253
|
+
rebind={constants.NEW_AMPHORA_ID: constants.AMPHORA_ID},
|
254
|
+
inject={constants.TIMEOUT_DICT: timeout_dict},
|
255
|
+
provides=constants.AMPHORAE_STATUS))
|
256
|
+
|
245
257
|
# VRRP update needs to be run on all amphora to update
|
246
258
|
# their peer configurations. So parallelize this with an
|
247
259
|
# unordered subflow.
|
@@ -252,7 +264,8 @@ class AmphoraFlows(object):
|
|
252
264
|
|
253
265
|
amp_0_subflow.add(amphora_driver_tasks.AmphoraIndexUpdateVRRPInterface(
|
254
266
|
name=sf_name + '-0-' + constants.AMP_UPDATE_VRRP_INTF,
|
255
|
-
requires=constants.AMPHORAE,
|
267
|
+
requires=(constants.AMPHORAE, constants.AMPHORAE_STATUS),
|
268
|
+
rebind={constants.NEW_AMPHORA_ID: constants.AMPHORA_ID},
|
256
269
|
inject={constants.AMPHORA_INDEX: 0,
|
257
270
|
constants.TIMEOUT_DICT: timeout_dict},
|
258
271
|
provides=constants.AMP_VRRP_INT))
|
@@ -261,13 +274,15 @@ class AmphoraFlows(object):
|
|
261
274
|
name=sf_name + '-0-' + constants.AMP_VRRP_UPDATE,
|
262
275
|
requires=(constants.LOADBALANCER_ID,
|
263
276
|
constants.AMPHORAE_NETWORK_CONFIG, constants.AMPHORAE,
|
264
|
-
constants.AMP_VRRP_INT),
|
277
|
+
constants.AMPHORAE_STATUS, constants.AMP_VRRP_INT),
|
278
|
+
rebind={constants.NEW_AMPHORA_ID: constants.AMPHORA_ID},
|
265
279
|
inject={constants.AMPHORA_INDEX: 0,
|
266
280
|
constants.TIMEOUT_DICT: timeout_dict}))
|
267
281
|
|
268
282
|
amp_0_subflow.add(amphora_driver_tasks.AmphoraIndexVRRPStart(
|
269
283
|
name=sf_name + '-0-' + constants.AMP_VRRP_START,
|
270
|
-
requires=constants.AMPHORAE,
|
284
|
+
requires=(constants.AMPHORAE, constants.AMPHORAE_STATUS),
|
285
|
+
rebind={constants.NEW_AMPHORA_ID: constants.AMPHORA_ID},
|
271
286
|
inject={constants.AMPHORA_INDEX: 0,
|
272
287
|
constants.TIMEOUT_DICT: timeout_dict}))
|
273
288
|
|
@@ -275,7 +290,8 @@ class AmphoraFlows(object):
|
|
275
290
|
|
276
291
|
amp_1_subflow.add(amphora_driver_tasks.AmphoraIndexUpdateVRRPInterface(
|
277
292
|
name=sf_name + '-1-' + constants.AMP_UPDATE_VRRP_INTF,
|
278
|
-
requires=constants.AMPHORAE,
|
293
|
+
requires=(constants.AMPHORAE, constants.AMPHORAE_STATUS),
|
294
|
+
rebind={constants.NEW_AMPHORA_ID: constants.AMPHORA_ID},
|
279
295
|
inject={constants.AMPHORA_INDEX: 1,
|
280
296
|
constants.TIMEOUT_DICT: timeout_dict},
|
281
297
|
provides=constants.AMP_VRRP_INT))
|
@@ -284,12 +300,14 @@ class AmphoraFlows(object):
|
|
284
300
|
name=sf_name + '-1-' + constants.AMP_VRRP_UPDATE,
|
285
301
|
requires=(constants.LOADBALANCER_ID,
|
286
302
|
constants.AMPHORAE_NETWORK_CONFIG, constants.AMPHORAE,
|
287
|
-
constants.AMP_VRRP_INT),
|
303
|
+
constants.AMPHORAE_STATUS, constants.AMP_VRRP_INT),
|
304
|
+
rebind={constants.NEW_AMPHORA_ID: constants.AMPHORA_ID},
|
288
305
|
inject={constants.AMPHORA_INDEX: 1,
|
289
306
|
constants.TIMEOUT_DICT: timeout_dict}))
|
290
307
|
amp_1_subflow.add(amphora_driver_tasks.AmphoraIndexVRRPStart(
|
291
308
|
name=sf_name + '-1-' + constants.AMP_VRRP_START,
|
292
|
-
requires=constants.AMPHORAE,
|
309
|
+
requires=(constants.AMPHORAE, constants.AMPHORAE_STATUS),
|
310
|
+
rebind={constants.NEW_AMPHORA_ID: constants.AMPHORA_ID},
|
293
311
|
inject={constants.AMPHORA_INDEX: 1,
|
294
312
|
constants.TIMEOUT_DICT: timeout_dict}))
|
295
313
|
|
@@ -538,6 +556,14 @@ class AmphoraFlows(object):
|
|
538
556
|
constants.CONN_RETRY_INTERVAL:
|
539
557
|
CONF.haproxy_amphora.active_connection_retry_interval}
|
540
558
|
|
559
|
+
failover_amp_flow.add(
|
560
|
+
amphora_driver_tasks.AmphoraeGetConnectivityStatus(
|
561
|
+
name=constants.AMPHORAE_GET_CONNECTIVITY_STATUS,
|
562
|
+
requires=constants.AMPHORAE,
|
563
|
+
rebind={constants.NEW_AMPHORA_ID: constants.AMPHORA_ID},
|
564
|
+
inject={constants.TIMEOUT_DICT: timeout_dict},
|
565
|
+
provides=constants.AMPHORAE_STATUS))
|
566
|
+
|
541
567
|
# Listeners update needs to be run on all amphora to update
|
542
568
|
# their peer configurations. So parallelize this with an
|
543
569
|
# unordered subflow.
|
@@ -548,7 +574,9 @@ class AmphoraFlows(object):
|
|
548
574
|
update_amps_subflow.add(
|
549
575
|
amphora_driver_tasks.AmphoraIndexListenerUpdate(
|
550
576
|
name=str(amp_index) + '-' + constants.AMP_LISTENER_UPDATE,
|
551
|
-
requires=(constants.LOADBALANCER, constants.AMPHORAE
|
577
|
+
requires=(constants.LOADBALANCER, constants.AMPHORAE,
|
578
|
+
constants.AMPHORAE_STATUS),
|
579
|
+
rebind={constants.NEW_AMPHORA_ID: constants.AMPHORA_ID},
|
552
580
|
inject={constants.AMPHORA_INDEX: amp_index,
|
553
581
|
constants.TIMEOUT_DICT: timeout_dict}))
|
554
582
|
|
@@ -558,7 +586,8 @@ class AmphoraFlows(object):
|
|
558
586
|
if lb_amp_count == 2:
|
559
587
|
failover_amp_flow.add(
|
560
588
|
self.get_vrrp_subflow(constants.GET_VRRP_SUBFLOW,
|
561
|
-
timeout_dict, create_vrrp_group=False
|
589
|
+
timeout_dict, create_vrrp_group=False,
|
590
|
+
get_amphorae_status=False))
|
562
591
|
|
563
592
|
# Reload the listener. This needs to be done here because
|
564
593
|
# it will create the required haproxy check scripts for
|
@@ -574,7 +603,9 @@ class AmphoraFlows(object):
|
|
574
603
|
amphora_driver_tasks.AmphoraIndexListenersReload(
|
575
604
|
name=(str(amp_index) + '-' +
|
576
605
|
constants.AMPHORA_RELOAD_LISTENER),
|
577
|
-
requires=(constants.LOADBALANCER, constants.AMPHORAE
|
606
|
+
requires=(constants.LOADBALANCER, constants.AMPHORAE,
|
607
|
+
constants.AMPHORAE_STATUS),
|
608
|
+
rebind={constants.NEW_AMPHORA_ID: constants.AMPHORA_ID},
|
578
609
|
inject={constants.AMPHORA_INDEX: amp_index,
|
579
610
|
constants.TIMEOUT_DICT: timeout_dict}))
|
580
611
|
|
@@ -41,29 +41,31 @@ def get_delete_load_balancer_flow(lb):
|
|
41
41
|
return LB_FLOWS.get_delete_load_balancer_flow(lb)
|
42
42
|
|
43
43
|
|
44
|
-
def get_listeners_on_lb(db_lb):
|
44
|
+
def get_listeners_on_lb(db_lb, for_delete=False):
|
45
45
|
"""Get a list of the listeners on a load balancer.
|
46
46
|
|
47
47
|
:param db_lb: A load balancer database model object.
|
48
|
+
:param for_delete: Skip errors on tls certs loading.
|
48
49
|
:returns: A list of provider dict format listeners.
|
49
50
|
"""
|
50
51
|
listener_dicts = []
|
51
52
|
for listener in db_lb.listeners:
|
52
53
|
prov_listener = provider_utils.db_listener_to_provider_listener(
|
53
|
-
listener)
|
54
|
+
listener, for_delete)
|
54
55
|
listener_dicts.append(prov_listener.to_dict())
|
55
56
|
return listener_dicts
|
56
57
|
|
57
58
|
|
58
|
-
def get_pools_on_lb(db_lb):
|
59
|
+
def get_pools_on_lb(db_lb, for_delete=False):
|
59
60
|
"""Get a list of the pools on a load balancer.
|
60
61
|
|
61
62
|
:param db_lb: A load balancer database model object.
|
63
|
+
:param for_delete: Skip errors on tls certs loading.
|
62
64
|
:returns: A list of provider dict format pools.
|
63
65
|
"""
|
64
66
|
pool_dicts = []
|
65
67
|
for pool in db_lb.pools:
|
66
|
-
prov_pool = provider_utils.db_pool_to_provider_pool(pool)
|
68
|
+
prov_pool = provider_utils.db_pool_to_provider_pool(pool, for_delete)
|
67
69
|
pool_dicts.append(prov_pool.to_dict())
|
68
70
|
return pool_dicts
|
69
71
|
|