octavia 13.0.0__py3-none-any.whl → 14.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- octavia/amphorae/backends/agent/api_server/lvs_listener_base.py +1 -1
- octavia/amphorae/backends/agent/api_server/osutils.py +5 -5
- octavia/amphorae/backends/agent/api_server/plug.py +3 -2
- octavia/amphorae/backends/agent/api_server/rules_schema.py +52 -0
- octavia/amphorae/backends/agent/api_server/server.py +28 -1
- octavia/amphorae/backends/utils/interface.py +45 -6
- octavia/amphorae/backends/utils/interface_file.py +9 -6
- octavia/amphorae/backends/utils/nftable_utils.py +125 -0
- octavia/amphorae/drivers/driver_base.py +27 -0
- octavia/amphorae/drivers/haproxy/rest_api_driver.py +42 -10
- octavia/amphorae/drivers/health/heartbeat_udp.py +2 -2
- octavia/amphorae/drivers/keepalived/vrrp_rest_driver.py +2 -1
- octavia/amphorae/drivers/noop_driver/driver.py +25 -0
- octavia/api/app.py +3 -0
- octavia/api/common/pagination.py +2 -2
- octavia/api/drivers/amphora_driver/flavor_schema.py +6 -1
- octavia/api/root_controller.py +4 -1
- octavia/api/v2/controllers/health_monitor.py +0 -1
- octavia/api/v2/controllers/l7policy.py +0 -1
- octavia/api/v2/controllers/l7rule.py +0 -1
- octavia/api/v2/controllers/listener.py +0 -1
- octavia/api/v2/controllers/load_balancer.py +13 -7
- octavia/api/v2/controllers/member.py +6 -3
- octavia/api/v2/controllers/pool.py +6 -7
- octavia/api/v2/types/load_balancer.py +5 -1
- octavia/api/v2/types/pool.py +1 -1
- octavia/certificates/common/pkcs12.py +9 -9
- octavia/certificates/manager/barbican.py +24 -16
- octavia/certificates/manager/castellan_mgr.py +12 -7
- octavia/certificates/manager/local.py +4 -4
- octavia/certificates/manager/noop.py +106 -0
- octavia/cmd/driver_agent.py +1 -1
- octavia/cmd/health_checker.py +0 -4
- octavia/cmd/health_manager.py +1 -5
- octavia/cmd/house_keeping.py +1 -1
- octavia/cmd/interface.py +0 -4
- octavia/cmd/octavia_worker.py +0 -4
- octavia/cmd/prometheus_proxy.py +0 -5
- octavia/cmd/status.py +0 -6
- octavia/common/base_taskflow.py +1 -1
- octavia/common/clients.py +15 -3
- octavia/common/config.py +24 -6
- octavia/common/constants.py +34 -0
- octavia/common/data_models.py +3 -1
- octavia/common/exceptions.py +11 -0
- octavia/common/jinja/haproxy/combined_listeners/templates/macros.j2 +7 -5
- octavia/common/keystone.py +7 -7
- octavia/common/tls_utils/cert_parser.py +24 -10
- octavia/common/utils.py +6 -0
- octavia/common/validate.py +2 -2
- octavia/compute/drivers/nova_driver.py +23 -5
- octavia/controller/worker/task_utils.py +28 -6
- octavia/controller/worker/v2/controller_worker.py +49 -15
- octavia/controller/worker/v2/flows/amphora_flows.py +120 -21
- octavia/controller/worker/v2/flows/flow_utils.py +15 -13
- octavia/controller/worker/v2/flows/listener_flows.py +95 -5
- octavia/controller/worker/v2/flows/load_balancer_flows.py +74 -30
- octavia/controller/worker/v2/taskflow_jobboard_driver.py +17 -1
- octavia/controller/worker/v2/tasks/amphora_driver_tasks.py +145 -24
- octavia/controller/worker/v2/tasks/compute_tasks.py +1 -1
- octavia/controller/worker/v2/tasks/database_tasks.py +72 -41
- octavia/controller/worker/v2/tasks/lifecycle_tasks.py +97 -41
- octavia/controller/worker/v2/tasks/network_tasks.py +57 -60
- octavia/controller/worker/v2/tasks/shim_tasks.py +28 -0
- octavia/db/migration/alembic_migrations/versions/55874a4ceed6_add_l7policy_action_redirect_prefix.py +1 -1
- octavia/db/migration/alembic_migrations/versions/5a3ee5472c31_add_cert_expiration__infor_in_amphora_table.py +1 -1
- octavia/db/migration/alembic_migrations/versions/6742ca1b27c2_add_l7policy_redirect_http_code.py +1 -1
- octavia/db/migration/alembic_migrations/versions/db2a73e82626_add_vnic_type_for_vip.py +36 -0
- octavia/db/models.py +1 -0
- octavia/db/prepare.py +1 -1
- octavia/db/repositories.py +53 -34
- octavia/distributor/drivers/driver_base.py +1 -1
- octavia/network/base.py +3 -16
- octavia/network/data_models.py +4 -1
- octavia/network/drivers/neutron/allowed_address_pairs.py +27 -26
- octavia/network/drivers/noop_driver/driver.py +10 -23
- octavia/tests/common/sample_certs.py +115 -0
- octavia/tests/common/sample_haproxy_prometheus +1 -1
- octavia/tests/functional/amphorae/backend/agent/api_server/test_server.py +37 -0
- octavia/tests/functional/api/test_healthcheck.py +2 -2
- octavia/tests/functional/api/v2/base.py +1 -1
- octavia/tests/functional/api/v2/test_listener.py +45 -0
- octavia/tests/functional/api/v2/test_load_balancer.py +17 -0
- octavia/tests/functional/db/base.py +9 -0
- octavia/tests/functional/db/test_models.py +2 -1
- octavia/tests/functional/db/test_repositories.py +55 -99
- octavia/tests/unit/amphorae/backends/agent/api_server/test_osutils.py +4 -2
- octavia/tests/unit/amphorae/backends/utils/test_interface.py +201 -1
- octavia/tests/unit/amphorae/backends/utils/test_keepalivedlvs_query.py +1 -1
- octavia/tests/unit/amphorae/backends/utils/test_nftable_utils.py +194 -0
- octavia/tests/unit/amphorae/drivers/haproxy/test_rest_api_driver.py +27 -5
- octavia/tests/unit/amphorae/drivers/haproxy/test_rest_api_driver_1_0.py +15 -2
- octavia/tests/unit/amphorae/drivers/keepalived/test_vrrp_rest_driver.py +17 -0
- octavia/tests/unit/amphorae/drivers/noop_driver/test_driver.py +2 -1
- octavia/tests/unit/api/v2/types/test_pool.py +71 -0
- octavia/tests/unit/certificates/manager/test_barbican.py +3 -3
- octavia/tests/unit/certificates/manager/test_noop.py +53 -0
- octavia/tests/unit/common/jinja/haproxy/combined_listeners/test_jinja_cfg.py +16 -17
- octavia/tests/unit/common/sample_configs/sample_configs_combined.py +5 -3
- octavia/tests/unit/common/test_config.py +35 -0
- octavia/tests/unit/common/test_keystone.py +32 -0
- octavia/tests/unit/common/test_utils.py +39 -0
- octavia/tests/unit/compute/drivers/test_nova_driver.py +22 -0
- octavia/tests/unit/controller/worker/test_task_utils.py +58 -2
- octavia/tests/unit/controller/worker/v2/flows/test_amphora_flows.py +28 -5
- octavia/tests/unit/controller/worker/v2/flows/test_listener_flows.py +64 -16
- octavia/tests/unit/controller/worker/v2/flows/test_load_balancer_flows.py +49 -9
- octavia/tests/unit/controller/worker/v2/tasks/test_amphora_driver_tasks.py +265 -17
- octavia/tests/unit/controller/worker/v2/tasks/test_database_tasks.py +101 -1
- octavia/tests/unit/controller/worker/v2/tasks/test_database_tasks_quota.py +19 -19
- octavia/tests/unit/controller/worker/v2/tasks/test_network_tasks.py +105 -42
- octavia/tests/unit/controller/worker/v2/tasks/test_shim_tasks.py +33 -0
- octavia/tests/unit/controller/worker/v2/test_controller_worker.py +85 -42
- octavia/tests/unit/network/drivers/neutron/test_allowed_address_pairs.py +48 -51
- octavia/tests/unit/network/drivers/neutron/test_utils.py +2 -0
- octavia/tests/unit/network/drivers/noop_driver/test_driver.py +0 -7
- {octavia-13.0.0.data → octavia-14.0.0.data}/data/share/octavia/diskimage-create/README.rst +6 -1
- {octavia-13.0.0.data → octavia-14.0.0.data}/data/share/octavia/diskimage-create/diskimage-create.sh +10 -4
- {octavia-13.0.0.data → octavia-14.0.0.data}/data/share/octavia/diskimage-create/requirements.txt +0 -2
- {octavia-13.0.0.data → octavia-14.0.0.data}/data/share/octavia/diskimage-create/tox.ini +30 -13
- {octavia-13.0.0.dist-info → octavia-14.0.0.dist-info}/AUTHORS +5 -0
- {octavia-13.0.0.dist-info → octavia-14.0.0.dist-info}/METADATA +6 -6
- {octavia-13.0.0.dist-info → octavia-14.0.0.dist-info}/RECORD +134 -126
- {octavia-13.0.0.dist-info → octavia-14.0.0.dist-info}/entry_points.txt +1 -1
- octavia-14.0.0.dist-info/pbr.json +1 -0
- octavia-13.0.0.dist-info/pbr.json +0 -1
- {octavia-13.0.0.data → octavia-14.0.0.data}/data/share/octavia/LICENSE +0 -0
- {octavia-13.0.0.data → octavia-14.0.0.data}/data/share/octavia/README.rst +0 -0
- {octavia-13.0.0.data → octavia-14.0.0.data}/data/share/octavia/diskimage-create/image-tests.sh +0 -0
- {octavia-13.0.0.data → octavia-14.0.0.data}/data/share/octavia/diskimage-create/test-requirements.txt +0 -0
- {octavia-13.0.0.data → octavia-14.0.0.data}/data/share/octavia/diskimage-create/version.txt +0 -0
- {octavia-13.0.0.data → octavia-14.0.0.data}/scripts/octavia-wsgi +0 -0
- {octavia-13.0.0.dist-info → octavia-14.0.0.dist-info}/LICENSE +0 -0
- {octavia-13.0.0.dist-info → octavia-14.0.0.dist-info}/WHEEL +0 -0
- {octavia-13.0.0.dist-info → octavia-14.0.0.dist-info}/top_level.txt +0 -0
octavia/cmd/status.py
CHANGED
@@ -12,8 +12,6 @@
|
|
12
12
|
# License for the specific language governing permissions and limitations
|
13
13
|
# under the License.
|
14
14
|
|
15
|
-
import sys
|
16
|
-
|
17
15
|
from oslo_config import cfg
|
18
16
|
from oslo_upgradecheck import common_checks
|
19
17
|
from oslo_upgradecheck import upgradecheck
|
@@ -120,7 +118,3 @@ class Checks(upgradecheck.UpgradeCommands):
|
|
120
118
|
def main():
|
121
119
|
return upgradecheck.main(
|
122
120
|
CONF, project='octavia', upgrade_command=Checks())
|
123
|
-
|
124
|
-
|
125
|
-
if __name__ == '__main__':
|
126
|
-
sys.exit(main())
|
octavia/common/base_taskflow.py
CHANGED
@@ -71,7 +71,7 @@ def _details_filter(obj):
|
|
71
71
|
|
72
72
|
class FilteredJob(Job):
|
73
73
|
def __str__(self):
|
74
|
-
# Override the
|
74
|
+
# Override the default __str__ method from taskflow.job.base.Job,
|
75
75
|
# filter out private information from details
|
76
76
|
cls_name = type(self).__name__
|
77
77
|
details = _details_filter(self.details)
|
octavia/common/clients.py
CHANGED
@@ -79,11 +79,22 @@ class NeutronAuth(object):
|
|
79
79
|
ksession = keystone.KeystoneSession('neutron')
|
80
80
|
if not cls.neutron_client:
|
81
81
|
sess = ksession.get_session()
|
82
|
-
|
83
|
-
|
82
|
+
kwargs = {'region_name': CONF.neutron.region_name}
|
83
|
+
# TODO(ricolin) `interface` option don't take list as option yet.
|
84
|
+
# We can move away from this when openstacksdk no longer depends
|
85
|
+
# on `interface`.
|
86
|
+
try:
|
87
|
+
interface = CONF.neutron.valid_interfaces[0]
|
88
|
+
except (TypeError, LookupError):
|
89
|
+
interface = CONF.neutron.valid_interfaces
|
90
|
+
if interface:
|
91
|
+
kwargs['interface'] = interface
|
84
92
|
if CONF.neutron.endpoint_override:
|
85
93
|
kwargs['network_endpoint_override'] = (
|
86
94
|
CONF.neutron.endpoint_override)
|
95
|
+
if CONF.neutron.endpoint_override.startswith("https"):
|
96
|
+
kwargs['insecure'] = CONF.neutron.insecure
|
97
|
+
kwargs['cacert'] = CONF.neutron.cafile
|
87
98
|
|
88
99
|
conn = openstack.connection.Connection(
|
89
100
|
session=sess, **kwargs)
|
@@ -103,7 +114,8 @@ class NeutronAuth(object):
|
|
103
114
|
neutron_endpoint = CONF.neutron.endpoint_override
|
104
115
|
if neutron_endpoint is None:
|
105
116
|
endpoint_data = sess.get_endpoint_data(
|
106
|
-
service_type='network',
|
117
|
+
service_type='network',
|
118
|
+
interface=CONF.neutron.valid_interfaces,
|
107
119
|
region_name=CONF.neutron.region_name)
|
108
120
|
neutron_endpoint = endpoint_data.catalog_url
|
109
121
|
|
octavia/common/config.py
CHANGED
@@ -534,7 +534,18 @@ controller_worker_opts = [
|
|
534
534
|
cfg.BoolOpt('event_notifications', default=True,
|
535
535
|
help=_('Enable octavia event notifications. See '
|
536
536
|
'oslo_messaging_notifications section for additional '
|
537
|
-
'requirements.'))
|
537
|
+
'requirements.')),
|
538
|
+
# 2000 attempts is around 2h45 with the default settings
|
539
|
+
cfg.IntOpt('db_commit_retry_attempts', default=2000,
|
540
|
+
help=_('The number of times the database action will be '
|
541
|
+
'attempted.')),
|
542
|
+
cfg.IntOpt('db_commit_retry_initial_delay', default=1,
|
543
|
+
help=_('The initial delay before a retry attempt.')),
|
544
|
+
cfg.IntOpt('db_commit_retry_backoff', default=1,
|
545
|
+
help=_('The time to backoff retry attempts.')),
|
546
|
+
cfg.IntOpt('db_commit_retry_max', default=5,
|
547
|
+
help=_('The maximum amount of time to wait between retry '
|
548
|
+
'attempts.')),
|
538
549
|
]
|
539
550
|
|
540
551
|
task_flow_opts = [
|
@@ -568,7 +579,9 @@ task_flow_opts = [
|
|
568
579
|
help='Jobboard backend server host(s).'),
|
569
580
|
cfg.PortOpt('jobboard_backend_port', default=6379,
|
570
581
|
help='Jobboard backend server port'),
|
571
|
-
cfg.StrOpt('
|
582
|
+
cfg.StrOpt('jobboard_backend_username',
|
583
|
+
help='Jobboard backend server user name'),
|
584
|
+
cfg.StrOpt('jobboard_backend_password', secret=True,
|
572
585
|
help='Jobboard backend server password'),
|
573
586
|
cfg.StrOpt('jobboard_backend_namespace', default='octavia_jobboard',
|
574
587
|
help='Jobboard name that should be used to store taskflow '
|
@@ -924,24 +937,29 @@ def register_cli_opts():
|
|
924
937
|
def handle_neutron_deprecations():
|
925
938
|
# Apply neutron deprecated options to their new setting if needed
|
926
939
|
|
927
|
-
#
|
940
|
+
# Basically: if the new option is not set and the value of the deprecated
|
941
|
+
# option is not the default, it means that the deprecated setting is still
|
942
|
+
# used in the config file:
|
928
943
|
# * convert it to a valid "new" value if needed
|
929
944
|
# * set it as the default for the new option
|
930
945
|
# Thus [neutron].<new_option> has an higher precedence than
|
931
946
|
# [neutron].<deprecated_option>
|
932
947
|
loc = cfg.CONF.get_location('endpoint', 'neutron')
|
933
|
-
|
948
|
+
new_loc = cfg.CONF.get_location('endpoint_override', 'neutron')
|
949
|
+
if not new_loc and loc and loc.location != cfg.Locations.opt_default:
|
934
950
|
cfg.CONF.set_default('endpoint_override', cfg.CONF.neutron.endpoint,
|
935
951
|
'neutron')
|
936
952
|
|
937
953
|
loc = cfg.CONF.get_location('endpoint_type', 'neutron')
|
938
|
-
|
954
|
+
new_loc = cfg.CONF.get_location('valid_interfaces', 'neutron')
|
955
|
+
if not new_loc and loc and loc.location != cfg.Locations.opt_default:
|
939
956
|
endpoint_type = cfg.CONF.neutron.endpoint_type.replace('URL', '')
|
940
957
|
cfg.CONF.set_default('valid_interfaces', [endpoint_type],
|
941
958
|
'neutron')
|
942
959
|
|
943
960
|
loc = cfg.CONF.get_location('ca_certificates_file', 'neutron')
|
944
|
-
|
961
|
+
new_loc = cfg.CONF.get_location('cafile', 'neutron')
|
962
|
+
if not new_loc and loc and loc.location != cfg.Locations.opt_default:
|
945
963
|
cfg.CONF.set_default('cafile', cfg.CONF.neutron.ca_certificates_file,
|
946
964
|
'neutron')
|
947
965
|
|
octavia/common/constants.py
CHANGED
@@ -308,15 +308,19 @@ AMP_DATA = 'amp_data'
|
|
308
308
|
AMP_VRRP_INT = 'amp_vrrp_int'
|
309
309
|
AMPHORA = 'amphora'
|
310
310
|
AMPHORA_DICT = 'amphora_dict'
|
311
|
+
AMPHORA_FIREWALL_RULES = 'amphora_firewall_rules'
|
311
312
|
AMPHORA_ID = 'amphora_id'
|
312
313
|
AMPHORA_INDEX = 'amphora_index'
|
313
314
|
AMPHORA_NETWORK_CONFIG = 'amphora_network_config'
|
314
315
|
AMPHORAE = 'amphorae'
|
315
316
|
AMPHORAE_NETWORK_CONFIG = 'amphorae_network_config'
|
317
|
+
AMPHORAE_STATUS = 'amphorae_status'
|
316
318
|
AMPS_DATA = 'amps_data'
|
317
319
|
ANTI_AFFINITY = 'anti-affinity'
|
318
320
|
ATTEMPT_NUMBER = 'attempt_number'
|
319
321
|
BASE_PORT = 'base_port'
|
322
|
+
BINDING_VNIC_TYPE = 'binding_vnic_type'
|
323
|
+
BUILD_AMP_DATA = 'build_amp_data'
|
320
324
|
BYTES_IN = 'bytes_in'
|
321
325
|
BYTES_OUT = 'bytes_out'
|
322
326
|
CACHED_ZONE = 'cached_zone'
|
@@ -363,6 +367,7 @@ ID = 'id'
|
|
363
367
|
IMAGE_ID = 'image_id'
|
364
368
|
IP_ADDRESS = 'ip_address'
|
365
369
|
IPV6_ICMP = 'ipv6-icmp'
|
370
|
+
IS_SRIOV = 'is_sriov'
|
366
371
|
LB_NETWORK_IP = 'lb_network_ip'
|
367
372
|
L7POLICY = 'l7policy'
|
368
373
|
L7POLICY_ID = 'l7policy_id'
|
@@ -387,6 +392,8 @@ MESSAGE = 'message'
|
|
387
392
|
NAME = 'name'
|
388
393
|
NETWORK = 'network'
|
389
394
|
NETWORK_ID = 'network_id'
|
395
|
+
NEW_AMPHORAE = 'new_amphorae'
|
396
|
+
NEW_AMPHORA_ID = 'new_amphora_id'
|
390
397
|
NEXTHOP = 'nexthop'
|
391
398
|
NICS = 'nics'
|
392
399
|
OBJECT = 'object'
|
@@ -404,6 +411,7 @@ POOL_CHILD_COUNT = 'pool_child_count'
|
|
404
411
|
POOL_ID = 'pool_id'
|
405
412
|
POOL_UPDATES = 'pool_updates'
|
406
413
|
PORT = 'port'
|
414
|
+
PORT_DATA = 'port_data'
|
407
415
|
PORT_ID = 'port_id'
|
408
416
|
PORTS = 'ports'
|
409
417
|
PROJECT_ID = 'project_id'
|
@@ -435,6 +443,7 @@ TLS_CERTIFICATE_ID = 'tls_certificate_id'
|
|
435
443
|
TLS_CONTAINER_ID = 'tls_container_id'
|
436
444
|
TOPOLOGY = 'topology'
|
437
445
|
TOTAL_CONNECTIONS = 'total_connections'
|
446
|
+
UNREACHABLE = 'unreachable'
|
438
447
|
UPDATED_AT = 'updated_at'
|
439
448
|
UPDATE_DICT = 'update_dict'
|
440
449
|
UPDATED_PORTS = 'updated_ports'
|
@@ -448,6 +457,11 @@ VIP_QOS_POLICY_ID = 'vip_qos_policy_id'
|
|
448
457
|
VIP_SG_ID = 'vip_sg_id'
|
449
458
|
VIP_SUBNET = 'vip_subnet'
|
450
459
|
VIP_SUBNET_ID = 'vip_subnet_id'
|
460
|
+
VIP_VNIC_TYPE = 'vip_vnic_type'
|
461
|
+
VNIC_TYPE = 'vnic_type'
|
462
|
+
VNIC_TYPE_DIRECT = 'direct'
|
463
|
+
VNIC_TYPE_NORMAL = 'normal'
|
464
|
+
VRRP = 'vrrp'
|
451
465
|
VRRP_ID = 'vrrp_id'
|
452
466
|
VRRP_IP = 'vrrp_ip'
|
453
467
|
VRRP_GROUP = 'vrrp_group'
|
@@ -456,6 +470,7 @@ VRRP_PORT_ID = 'vrrp_port_id'
|
|
456
470
|
VRRP_PRIORITY = 'vrrp_priority'
|
457
471
|
|
458
472
|
# Taskflow flow and task names
|
473
|
+
AMP_UPDATE_FW_SUBFLOW = 'amphora-update-firewall-subflow'
|
459
474
|
CERT_ROTATE_AMPHORA_FLOW = 'octavia-cert-rotate-amphora-flow'
|
460
475
|
CREATE_AMPHORA_FLOW = 'octavia-create-amphora-flow'
|
461
476
|
CREATE_AMPHORA_RETRY_SUBFLOW = 'octavia-create-amphora-retry-subflow'
|
@@ -484,6 +499,7 @@ DELETE_L7RULE_FLOW = 'octavia-delete-l7policy-flow'
|
|
484
499
|
FAILOVER_AMPHORA_FLOW = 'octavia-failover-amphora-flow'
|
485
500
|
FAILOVER_LOADBALANCER_FLOW = 'octavia-failover-loadbalancer-flow'
|
486
501
|
FINALIZE_AMPHORA_FLOW = 'octavia-finalize-amphora-flow'
|
502
|
+
FIREWALL_RULES_SUBFLOW = 'firewall-rules-subflow'
|
487
503
|
LOADBALANCER_NETWORKING_SUBFLOW = 'octavia-new-loadbalancer-net-subflow'
|
488
504
|
UPDATE_HEALTH_MONITOR_FLOW = 'octavia-update-health-monitor-flow'
|
489
505
|
UPDATE_LISTENER_FLOW = 'octavia-update-listener-flow'
|
@@ -561,7 +577,9 @@ DELETE_MEMBER_INDB = 'octavia-delete-member-indb'
|
|
561
577
|
ADMIN_DOWN_PORT = 'admin-down-port'
|
562
578
|
AMPHORA_POST_VIP_PLUG = 'amphora-post-vip-plug'
|
563
579
|
AMPHORA_RELOAD_LISTENER = 'amphora-reload-listener'
|
580
|
+
AMPHORA_TO_AMPHORAE_VRRP_IP = 'amphora-to-amphorae-vrrp-ip'
|
564
581
|
AMPHORA_TO_ERROR_ON_REVERT = 'amphora-to-error-on-revert'
|
582
|
+
AMPHORAE_GET_CONNECTIVITY_STATUS = 'amphorae-get-connectivity-status'
|
565
583
|
AMPHORAE_POST_NETWORK_PLUG = 'amphorae-post-network-plug'
|
566
584
|
ATTACH_PORT = 'attach-port'
|
567
585
|
CALCULATE_AMPHORA_DELTA = 'calculate-amphora-delta'
|
@@ -569,8 +587,10 @@ CREATE_VIP_BASE_PORT = 'create-vip-base-port'
|
|
569
587
|
DELETE_AMPHORA = 'delete-amphora'
|
570
588
|
DELETE_PORT = 'delete-port'
|
571
589
|
DISABLE_AMP_HEALTH_MONITORING = 'disable-amphora-health-monitoring'
|
590
|
+
GET_AMPHORA_FIREWALL_RULES = 'get-amphora-firewall-rules'
|
572
591
|
GET_AMPHORA_NETWORK_CONFIGS_BY_ID = 'get-amphora-network-configs-by-id'
|
573
592
|
GET_AMPHORAE_FROM_LB = 'get-amphorae-from-lb'
|
593
|
+
GET_SUBNET_FROM_VIP = 'get-subnet-from-vip'
|
574
594
|
HANDLE_NETWORK_DELTA = 'handle-network-delta'
|
575
595
|
MARK_AMPHORA_DELETED = 'mark-amphora-deleted'
|
576
596
|
MARK_AMPHORA_PENDING_DELETE = 'mark-amphora-pending-delete'
|
@@ -580,6 +600,7 @@ RELOAD_LB_AFTER_AMP_ASSOC = 'reload-lb-after-amp-assoc'
|
|
580
600
|
RELOAD_LB_AFTER_AMP_ASSOC_FULL_GRAPH = 'reload-lb-after-amp-assoc-full-graph'
|
581
601
|
RELOAD_LB_AFTER_PLUG_VIP = 'reload-lb-after-plug-vip'
|
582
602
|
RELOAD_LB_BEFOR_ALLOCATE_VIP = 'reload-lb-before-allocate-vip'
|
603
|
+
SET_AMPHORA_FIREWALL_RULES = 'set-amphora-firewall-rules'
|
583
604
|
UPDATE_AMP_FAILOVER_DETAILS = 'update-amp-failover-details'
|
584
605
|
|
585
606
|
|
@@ -896,6 +917,7 @@ VIP_SECURITY_GROUP_PREFIX = 'lb-'
|
|
896
917
|
|
897
918
|
AMP_BASE_PORT_PREFIX = 'octavia-lb-vrrp-'
|
898
919
|
OCTAVIA_OWNED = 'octavia_owned'
|
920
|
+
OCTAVIA_OWNER = 'Octavia'
|
899
921
|
|
900
922
|
# Sadly in the LBaaS v2 API, header insertions are on the listener objects
|
901
923
|
# but they should be on the pool. Dealing with it until v3.
|
@@ -910,6 +932,8 @@ AMPHORA_SUPPORTED_ALPN_PROTOCOLS = [lib_consts.ALPN_PROTOCOL_HTTP_2,
|
|
910
932
|
lib_consts.ALPN_PROTOCOL_HTTP_1_1,
|
911
933
|
lib_consts.ALPN_PROTOCOL_HTTP_1_0]
|
912
934
|
|
935
|
+
SRIOV_VIP = 'sriov_vip'
|
936
|
+
|
913
937
|
# Amphora interface fields
|
914
938
|
IF_TYPE = 'if_type'
|
915
939
|
BACKEND = 'backend'
|
@@ -950,3 +974,13 @@ IFLA_IFNAME = 'IFLA_IFNAME'
|
|
950
974
|
|
951
975
|
# Amphora network directory
|
952
976
|
AMP_NET_DIR_TEMPLATE = '/etc/octavia/interfaces/'
|
977
|
+
|
978
|
+
# Amphora nftables constants
|
979
|
+
NFT_ADD = 'add'
|
980
|
+
NFT_CMD = '/usr/sbin/nft'
|
981
|
+
NFT_FAMILY = 'inet'
|
982
|
+
NFT_VIP_RULES_FILE = '/var/lib/octavia/nftables-vip.rules'
|
983
|
+
NFT_VIP_TABLE = 'amphora_vip'
|
984
|
+
NFT_VIP_CHAIN = 'amphora_vip_chain'
|
985
|
+
NFT_SRIOV_PRIORITY = '-310'
|
986
|
+
PROTOCOL = 'protocol'
|
octavia/common/data_models.py
CHANGED
@@ -557,7 +557,8 @@ class Vip(BaseDataModel):
|
|
557
557
|
|
558
558
|
def __init__(self, load_balancer_id=None, ip_address=None,
|
559
559
|
subnet_id=None, network_id=None, port_id=None,
|
560
|
-
load_balancer=None, qos_policy_id=None, octavia_owned=None
|
560
|
+
load_balancer=None, qos_policy_id=None, octavia_owned=None,
|
561
|
+
vnic_type=None):
|
561
562
|
self.load_balancer_id = load_balancer_id
|
562
563
|
self.ip_address = ip_address
|
563
564
|
self.subnet_id = subnet_id
|
@@ -566,6 +567,7 @@ class Vip(BaseDataModel):
|
|
566
567
|
self.load_balancer = load_balancer
|
567
568
|
self.qos_policy_id = qos_policy_id
|
568
569
|
self.octavia_owned = octavia_owned
|
570
|
+
self.vnic_type = vnic_type
|
569
571
|
|
570
572
|
|
571
573
|
class AdditionalVip(BaseDataModel):
|
octavia/common/exceptions.py
CHANGED
@@ -133,6 +133,12 @@ class UnreadablePKCS12(APIException):
|
|
133
133
|
code = 400
|
134
134
|
|
135
135
|
|
136
|
+
class MissingCertSubject(APIException):
|
137
|
+
msg = _('No CN or DNSName(s) found in certificate. The certificate is '
|
138
|
+
'invalid.')
|
139
|
+
code = 400
|
140
|
+
|
141
|
+
|
136
142
|
class MisMatchedKey(OctaviaException):
|
137
143
|
message = _("Key and x509 certificate do not match")
|
138
144
|
|
@@ -248,6 +254,11 @@ class ComputePortInUseException(OctaviaException):
|
|
248
254
|
message = _('Compute driver reports port %(port)s is already in use.')
|
249
255
|
|
250
256
|
|
257
|
+
class ComputeNoResourcesException(OctaviaException):
|
258
|
+
message = _('The compute service does not have the resources available to '
|
259
|
+
'fulfill the request')
|
260
|
+
|
261
|
+
|
251
262
|
class ComputeUnknownException(OctaviaException):
|
252
263
|
message = _('Unknown exception from the compute driver: %(exc)s.')
|
253
264
|
|
@@ -208,13 +208,18 @@ frontend {{ listener.id }}
|
|
208
208
|
{% else %}
|
209
209
|
{% set monitor_port_opt = "" %}
|
210
210
|
{% endif %}
|
211
|
+
{% if pool.alpn_protocols is defined %}
|
212
|
+
{% set alpn_opt = " check-alpn %s"|format(pool.alpn_protocols) %}
|
213
|
+
{% else %}
|
214
|
+
{% set alpn_opt = "" %}
|
215
|
+
{% endif %}
|
211
216
|
{% if pool.health_monitor.type == constants.HEALTH_MONITOR_HTTPS %}
|
212
217
|
{% set monitor_ssl_opt = " check-ssl verify none" %}
|
213
218
|
{% else %}
|
214
219
|
{% set monitor_ssl_opt = "" %}
|
215
220
|
{% endif %}
|
216
|
-
{% set hm_opt = " check%s inter %ds fall %d rise %d%s%s"|format(
|
217
|
-
monitor_ssl_opt, pool.health_monitor.delay,
|
221
|
+
{% set hm_opt = " check%s%s inter %ds fall %d rise %d%s%s"|format(
|
222
|
+
monitor_ssl_opt, alpn_opt, pool.health_monitor.delay,
|
218
223
|
pool.health_monitor.fall_threshold,
|
219
224
|
pool.health_monitor.rise_threshold, monitor_addr_opt,
|
220
225
|
monitor_port_opt) %}
|
@@ -370,9 +375,6 @@ backend {{ pool.id }}:{{ listener.id }}
|
|
370
375
|
option httpchk {{ pool.health_monitor.http_method }} {{ pool.health_monitor.url_path }}
|
371
376
|
{% endif %}
|
372
377
|
http-check expect rstatus {{ pool.health_monitor.expected_codes }}
|
373
|
-
{% endif %}
|
374
|
-
{% if pool.health_monitor.type == constants.HEALTH_MONITOR_TLS_HELLO %}
|
375
|
-
option ssl-hello-chk
|
376
378
|
{% endif %}
|
377
379
|
{% if pool.health_monitor.type == constants.HEALTH_MONITOR_PING %}
|
378
380
|
option external-check
|
octavia/common/keystone.py
CHANGED
@@ -80,14 +80,14 @@ class KeystoneSession(object):
|
|
80
80
|
|
81
81
|
config = getattr(cfg.CONF, self.section)
|
82
82
|
for opt in config:
|
83
|
-
# For each option in the [
|
84
|
-
# location, if the location is 'opt_default'
|
85
|
-
#
|
86
|
-
#
|
87
|
-
#
|
83
|
+
# For each option in the [section] section, get its setting
|
84
|
+
# location, if the location is 'opt_default', it means that
|
85
|
+
# the option is not configured in the config file.
|
86
|
+
# if the option is also defined in [service_auth], the
|
87
|
+
# option of the [section] can be replaced by the one from
|
88
|
+
# [service_auth]
|
88
89
|
loc = cfg.CONF.get_location(opt, self.section)
|
89
|
-
if not loc or loc.location
|
90
|
-
cfg.Locations.set_default):
|
90
|
+
if not loc or loc.location == cfg.Locations.opt_default:
|
91
91
|
if hasattr(cfg.CONF.service_auth, opt):
|
92
92
|
cur_value = getattr(config, opt)
|
93
93
|
value = getattr(cfg.CONF.service_auth, opt)
|
@@ -132,7 +132,7 @@ def _prepare_x509_cert(cert=None):
|
|
132
132
|
def _split_x509s(xstr):
|
133
133
|
"""Split the input string into individual x509 text blocks
|
134
134
|
|
135
|
-
:param xstr: A large multi x509 certificate
|
135
|
+
:param xstr: A large multi x509 certificate block
|
136
136
|
:returns: A list of strings where each string represents an
|
137
137
|
X509 pem block surrounded by BEGIN CERTIFICATE,
|
138
138
|
END CERTIFICATE block tags
|
@@ -164,16 +164,14 @@ def _parse_pkcs7_bundle(pkcs7):
|
|
164
164
|
if PKCS7_BEG in pkcs7:
|
165
165
|
try:
|
166
166
|
for substrate in _read_pem_blocks(pkcs7):
|
167
|
-
|
168
|
-
yield cert
|
167
|
+
yield from _get_certs_from_pkcs7_substrate(substrate)
|
169
168
|
except Exception as e:
|
170
169
|
LOG.exception('Unreadable Certificate.')
|
171
170
|
raise exceptions.UnreadableCert from e
|
172
171
|
|
173
172
|
# If no PEM encoding, assume this is DER encoded and try to decode
|
174
173
|
else:
|
175
|
-
|
176
|
-
yield cert
|
174
|
+
yield from _get_certs_from_pkcs7_substrate(pkcs7)
|
177
175
|
|
178
176
|
|
179
177
|
def _read_pem_blocks(data):
|
@@ -256,14 +254,16 @@ def get_host_names(certificate):
|
|
256
254
|
"""
|
257
255
|
if isinstance(certificate, str):
|
258
256
|
certificate = certificate.encode('utf-8')
|
257
|
+
host_names = {'cn': None, 'dns_names': []}
|
259
258
|
try:
|
260
259
|
cert = x509.load_pem_x509_certificate(certificate,
|
261
260
|
backends.default_backend())
|
262
|
-
|
263
|
-
|
264
|
-
'cn'
|
265
|
-
|
266
|
-
|
261
|
+
try:
|
262
|
+
cn = cert.subject.get_attributes_for_oid(x509.OID_COMMON_NAME)[0]
|
263
|
+
host_names['cn'] = cn.value.lower()
|
264
|
+
except Exception as e:
|
265
|
+
LOG.debug(f'Unable to get CN from certificate due to: {e}. '
|
266
|
+
f'Assuming subject alternative names are present.')
|
267
267
|
try:
|
268
268
|
ext = cert.extensions.get_extension_for_oid(
|
269
269
|
x509.OID_SUBJECT_ALTERNATIVE_NAME
|
@@ -274,7 +274,17 @@ def get_host_names(certificate):
|
|
274
274
|
LOG.debug("%s extension not found",
|
275
275
|
x509.OID_SUBJECT_ALTERNATIVE_NAME)
|
276
276
|
|
277
|
+
# Certs with no subject are valid as long as a subject alternative
|
278
|
+
# name is present. If both are missing, it is an invalid cert per
|
279
|
+
# the x.509 standard.
|
280
|
+
if not host_names['cn'] and not host_names['dns_names']:
|
281
|
+
LOG.warning('No CN or DNSName(s) found in certificate. The '
|
282
|
+
'certificate is invalid.')
|
283
|
+
raise exceptions.MissingCertSubject()
|
284
|
+
|
277
285
|
return host_names
|
286
|
+
except exceptions.MissingCertSubject:
|
287
|
+
raise
|
278
288
|
except Exception as e:
|
279
289
|
LOG.exception('Unreadable Certificate.')
|
280
290
|
raise exceptions.UnreadableCert from e
|
@@ -359,6 +369,10 @@ def load_certificates_data(cert_mngr, obj, context=None):
|
|
359
369
|
cert_mngr.get_cert(context,
|
360
370
|
obj.tls_certificate_id,
|
361
371
|
check_only=True))
|
372
|
+
except exceptions.MissingCertSubject:
|
373
|
+
# This was logged below, so raise as is to provide a clear
|
374
|
+
# user error
|
375
|
+
raise
|
362
376
|
except Exception as e:
|
363
377
|
LOG.warning('Unable to retrieve certificate: %s due to %s.',
|
364
378
|
obj.tls_certificate_id, str(e))
|
octavia/common/utils.py
CHANGED
@@ -191,3 +191,9 @@ class exception_logger(object):
|
|
191
191
|
self.logger(e)
|
192
192
|
return None
|
193
193
|
return call
|
194
|
+
|
195
|
+
|
196
|
+
def map_protocol_to_nftable_protocol(rule_dict):
|
197
|
+
rule_dict[constants.PROTOCOL] = (
|
198
|
+
constants.L4_PROTOCOL_MAP[rule_dict[constants.PROTOCOL]])
|
199
|
+
return rule_dict
|
octavia/common/validate.py
CHANGED
@@ -229,7 +229,7 @@ def validate_l7rule_ssl_types(l7rule):
|
|
229
229
|
# log or raise key and value must be specified.
|
230
230
|
msg = 'L7rule type {0} needs to specify a key and a value.'.format(
|
231
231
|
rule_type)
|
232
|
-
# log or raise the key must be
|
232
|
+
# log or raise the key must be split by '-'
|
233
233
|
elif not dn_regex.match(req_key):
|
234
234
|
msg = 'Invalid L7rule distinguished name field.'
|
235
235
|
|
@@ -246,7 +246,7 @@ def sanitize_l7policy_api_args(l7policy, create=False):
|
|
246
246
|
redirect_pool_id exists in the database, but will raise an
|
247
247
|
error if a redirect_url doesn't look like a URL.
|
248
248
|
|
249
|
-
:param l7policy: The L7 Policy dictionary we are
|
249
|
+
:param l7policy: The L7 Policy dictionary we are sanitizing / validating
|
250
250
|
"""
|
251
251
|
if 'action' in l7policy.keys():
|
252
252
|
if l7policy['action'] == constants.L7POLICY_ACTION_REJECT:
|
@@ -348,15 +348,33 @@ class VirtualMachineManager(compute_base.ComputeBase):
|
|
348
348
|
if 'Port' in str(e):
|
349
349
|
raise exceptions.NotFound(resource='Port', id=port_id)
|
350
350
|
raise exceptions.NotFound(resource=str(e), id=compute_id)
|
351
|
+
except nova_exceptions.BadRequest as e:
|
352
|
+
if 'Failed to claim PCI device' in str(e):
|
353
|
+
message = ('Nova failed to claim PCI devices during '
|
354
|
+
f'interface attach for port {port_id} on '
|
355
|
+
f'instance {compute_id}')
|
356
|
+
LOG.error(message)
|
357
|
+
raise exceptions.ComputeNoResourcesException(message,
|
358
|
+
exc=str(e))
|
359
|
+
raise
|
360
|
+
except nova_exceptions.ClientException as e:
|
361
|
+
if 'PortBindingFailed' in str(e):
|
362
|
+
message = ('Nova failed to bind the port during '
|
363
|
+
f'interface attach for port {port_id} on '
|
364
|
+
f'instance {compute_id}')
|
365
|
+
LOG.error(message)
|
366
|
+
raise exceptions.ComputeNoResourcesException(message,
|
367
|
+
exc=str(e))
|
368
|
+
raise
|
351
369
|
except Exception as e:
|
352
370
|
LOG.error('Error attaching network %(network_id)s with ip '
|
353
|
-
'%(ip_address)s and port %(
|
371
|
+
'%(ip_address)s and port %(port_id)s to amphora '
|
354
372
|
'(compute_id: %(compute_id)s) ',
|
355
373
|
{
|
356
|
-
|
357
|
-
|
358
|
-
|
359
|
-
|
374
|
+
constants.COMPUTE_ID: compute_id,
|
375
|
+
constants.NETWORK_ID: network_id,
|
376
|
+
constants.IP_ADDRESS: ip_address,
|
377
|
+
constants.PORT_ID: port_id
|
360
378
|
})
|
361
379
|
raise exceptions.ComputeUnknownException(exc=str(e))
|
362
380
|
return interface
|
@@ -14,18 +14,32 @@
|
|
14
14
|
|
15
15
|
""" Methods common to the controller work tasks."""
|
16
16
|
|
17
|
+
from oslo_config import cfg
|
17
18
|
from oslo_log import log as logging
|
19
|
+
from oslo_utils import excutils
|
20
|
+
import tenacity
|
18
21
|
|
19
22
|
from octavia.common import constants
|
20
23
|
from octavia.db import api as db_apis
|
21
24
|
from octavia.db import repositories as repo
|
22
25
|
|
26
|
+
CONF = cfg.CONF
|
23
27
|
LOG = logging.getLogger(__name__)
|
24
28
|
|
25
29
|
|
26
30
|
class TaskUtils(object):
|
27
31
|
"""Class of helper/utility methods used by tasks."""
|
28
32
|
|
33
|
+
status_update_retry = tenacity.retry(
|
34
|
+
retry=tenacity.retry_if_exception_type(Exception),
|
35
|
+
wait=tenacity.wait_incrementing(
|
36
|
+
CONF.controller_worker.db_commit_retry_initial_delay,
|
37
|
+
CONF.controller_worker.db_commit_retry_backoff,
|
38
|
+
CONF.controller_worker.db_commit_retry_max),
|
39
|
+
stop=tenacity.stop_after_attempt(
|
40
|
+
CONF.controller_worker.db_commit_retry_attempts),
|
41
|
+
after=tenacity.after_log(LOG, logging.DEBUG))
|
42
|
+
|
29
43
|
def __init__(self, **kwargs):
|
30
44
|
self.amphora_repo = repo.AmphoraRepository()
|
31
45
|
self.health_mon_repo = repo.HealthMonitorRepository()
|
@@ -160,6 +174,7 @@ class TaskUtils(object):
|
|
160
174
|
"provisioning status to ERROR due to: "
|
161
175
|
"%(except)s", {'list': listener_id, 'except': str(e)})
|
162
176
|
|
177
|
+
@status_update_retry
|
163
178
|
def mark_loadbalancer_prov_status_error(self, loadbalancer_id):
|
164
179
|
"""Sets a load balancer provisioning status to ERROR.
|
165
180
|
|
@@ -175,9 +190,12 @@ class TaskUtils(object):
|
|
175
190
|
id=loadbalancer_id,
|
176
191
|
provisioning_status=constants.ERROR)
|
177
192
|
except Exception as e:
|
178
|
-
|
179
|
-
|
180
|
-
|
193
|
+
# Reraise for tenacity
|
194
|
+
with excutils.save_and_reraise_exception():
|
195
|
+
LOG.error("Failed to update load balancer %(lb)s "
|
196
|
+
"provisioning status to ERROR due to: "
|
197
|
+
"%(except)s", {'lb': loadbalancer_id,
|
198
|
+
'except': str(e)})
|
181
199
|
|
182
200
|
def mark_listener_prov_status_active(self, listener_id):
|
183
201
|
"""Sets a listener provisioning status to ACTIVE.
|
@@ -214,6 +232,7 @@ class TaskUtils(object):
|
|
214
232
|
"to ACTIVE due to: %(except)s", {'pool': pool_id,
|
215
233
|
'except': str(e)})
|
216
234
|
|
235
|
+
@status_update_retry
|
217
236
|
def mark_loadbalancer_prov_status_active(self, loadbalancer_id):
|
218
237
|
"""Sets a load balancer provisioning status to ACTIVE.
|
219
238
|
|
@@ -229,9 +248,12 @@ class TaskUtils(object):
|
|
229
248
|
id=loadbalancer_id,
|
230
249
|
provisioning_status=constants.ACTIVE)
|
231
250
|
except Exception as e:
|
232
|
-
|
233
|
-
|
234
|
-
|
251
|
+
# Reraise for tenacity
|
252
|
+
with excutils.save_and_reraise_exception():
|
253
|
+
LOG.error("Failed to update load balancer %(lb)s "
|
254
|
+
"provisioning status to ACTIVE due to: "
|
255
|
+
"%(except)s", {'lb': loadbalancer_id,
|
256
|
+
'except': str(e)})
|
235
257
|
|
236
258
|
def mark_member_prov_status_error(self, member_id):
|
237
259
|
"""Sets a member provisioning status to ERROR.
|