octavia 15.0.0.0rc1__py3-none-any.whl → 16.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- octavia/amphorae/backends/agent/api_server/keepalivedlvs.py +9 -0
- octavia/amphorae/backends/agent/api_server/loadbalancer.py +6 -6
- octavia/amphorae/backends/agent/api_server/plug.py +1 -1
- octavia/amphorae/backends/agent/api_server/util.py +35 -2
- octavia/amphorae/backends/health_daemon/status_message.py +1 -2
- octavia/amphorae/drivers/haproxy/rest_api_driver.py +12 -7
- octavia/api/drivers/amphora_driver/flavor_schema.py +5 -0
- octavia/api/drivers/noop_driver/driver.py +2 -1
- octavia/api/drivers/utils.py +12 -0
- octavia/api/root_controller.py +8 -2
- octavia/api/v2/controllers/base.py +8 -4
- octavia/api/v2/controllers/listener.py +12 -2
- octavia/api/v2/controllers/load_balancer.py +33 -1
- octavia/api/v2/controllers/member.py +58 -4
- octavia/api/v2/types/load_balancer.py +7 -1
- octavia/api/v2/types/member.py +3 -0
- octavia/common/base_taskflow.py +19 -10
- octavia/common/clients.py +8 -2
- octavia/common/config.py +17 -2
- octavia/common/constants.py +6 -0
- octavia/common/data_models.py +32 -2
- octavia/common/exceptions.py +5 -0
- octavia/common/utils.py +4 -1
- octavia/common/validate.py +16 -0
- octavia/compute/drivers/noop_driver/driver.py +30 -1
- octavia/controller/healthmanager/health_manager.py +7 -0
- octavia/controller/worker/v2/flows/amphora_flows.py +3 -5
- octavia/controller/worker/v2/flows/listener_flows.py +2 -1
- octavia/controller/worker/v2/flows/load_balancer_flows.py +38 -0
- octavia/controller/worker/v2/taskflow_jobboard_driver.py +34 -6
- octavia/controller/worker/v2/tasks/compute_tasks.py +9 -5
- octavia/controller/worker/v2/tasks/database_tasks.py +26 -6
- octavia/controller/worker/v2/tasks/network_tasks.py +118 -70
- octavia/db/base_models.py +29 -5
- octavia/db/migration/alembic_migrations/versions/3097e55493ae_add_sg_id_to_vip_table.py +39 -0
- octavia/db/migration/alembic_migrations/versions/8db7a6443785_add_member_vnic_type.py +36 -0
- octavia/db/migration/alembic_migrations/versions/fabf4983846b_add_member_port_table.py +40 -0
- octavia/db/models.py +43 -1
- octavia/db/repositories.py +88 -9
- octavia/network/base.py +29 -12
- octavia/network/data_models.py +2 -1
- octavia/network/drivers/neutron/allowed_address_pairs.py +55 -46
- octavia/network/drivers/neutron/base.py +28 -16
- octavia/network/drivers/neutron/utils.py +2 -2
- octavia/network/drivers/noop_driver/driver.py +150 -29
- octavia/policies/__init__.py +4 -0
- octavia/policies/advanced_rbac.py +95 -0
- octavia/policies/base.py +5 -101
- octavia/policies/keystone_default_roles.py +81 -0
- octavia/policies/loadbalancer.py +13 -0
- octavia/tests/common/constants.py +2 -1
- octavia/tests/common/sample_data_models.py +27 -14
- octavia/tests/functional/amphorae/backend/agent/api_server/test_server.py +5 -4
- octavia/tests/functional/api/drivers/driver_agent/test_driver_agent.py +2 -1
- octavia/tests/functional/api/v2/test_health_monitor.py +1 -1
- octavia/tests/functional/api/v2/test_l7policy.py +1 -1
- octavia/tests/functional/api/v2/test_listener.py +1 -1
- octavia/tests/functional/api/v2/test_load_balancer.py +150 -4
- octavia/tests/functional/api/v2/test_member.py +50 -0
- octavia/tests/functional/api/v2/test_pool.py +1 -1
- octavia/tests/functional/api/v2/test_quotas.py +5 -8
- octavia/tests/functional/db/base.py +6 -6
- octavia/tests/functional/db/test_models.py +124 -1
- octavia/tests/functional/db/test_repositories.py +237 -19
- octavia/tests/unit/amphorae/backends/agent/api_server/test_util.py +89 -1
- octavia/tests/unit/amphorae/drivers/haproxy/test_rest_api_driver_1_0.py +10 -7
- octavia/tests/unit/api/drivers/test_utils.py +6 -1
- octavia/tests/unit/certificates/generator/test_local.py +1 -1
- octavia/tests/unit/common/test_base_taskflow.py +4 -3
- octavia/tests/unit/compute/drivers/noop_driver/test_driver.py +28 -2
- octavia/tests/unit/controller/worker/v2/flows/test_load_balancer_flows.py +27 -1
- octavia/tests/unit/controller/worker/v2/tasks/test_database_tasks.py +28 -6
- octavia/tests/unit/controller/worker/v2/tasks/test_network_tasks.py +100 -79
- octavia/tests/unit/controller/worker/v2/test_taskflow_jobboard_driver.py +8 -0
- octavia/tests/unit/network/drivers/neutron/test_allowed_address_pairs.py +62 -45
- octavia/tests/unit/network/drivers/neutron/test_base.py +7 -7
- octavia/tests/unit/network/drivers/noop_driver/test_driver.py +55 -42
- {octavia-15.0.0.0rc1.data → octavia-16.0.0.data}/data/share/octavia/diskimage-create/tox.ini +0 -1
- {octavia-15.0.0.0rc1.dist-info → octavia-16.0.0.dist-info}/AUTHORS +3 -0
- octavia-16.0.0.dist-info/METADATA +156 -0
- {octavia-15.0.0.0rc1.dist-info → octavia-16.0.0.dist-info}/RECORD +95 -90
- {octavia-15.0.0.0rc1.dist-info → octavia-16.0.0.dist-info}/WHEEL +1 -1
- {octavia-15.0.0.0rc1.dist-info → octavia-16.0.0.dist-info}/entry_points.txt +1 -1
- octavia-16.0.0.dist-info/pbr.json +1 -0
- octavia-15.0.0.0rc1.dist-info/METADATA +0 -156
- octavia-15.0.0.0rc1.dist-info/pbr.json +0 -1
- {octavia-15.0.0.0rc1.data → octavia-16.0.0.data}/data/share/octavia/LICENSE +0 -0
- {octavia-15.0.0.0rc1.data → octavia-16.0.0.data}/data/share/octavia/README.rst +0 -0
- {octavia-15.0.0.0rc1.data → octavia-16.0.0.data}/data/share/octavia/diskimage-create/README.rst +0 -0
- {octavia-15.0.0.0rc1.data → octavia-16.0.0.data}/data/share/octavia/diskimage-create/diskimage-create.sh +0 -0
- {octavia-15.0.0.0rc1.data → octavia-16.0.0.data}/data/share/octavia/diskimage-create/image-tests.sh +0 -0
- {octavia-15.0.0.0rc1.data → octavia-16.0.0.data}/data/share/octavia/diskimage-create/requirements.txt +0 -0
- {octavia-15.0.0.0rc1.data → octavia-16.0.0.data}/data/share/octavia/diskimage-create/test-requirements.txt +0 -0
- {octavia-15.0.0.0rc1.data → octavia-16.0.0.data}/data/share/octavia/diskimage-create/version.txt +0 -0
- {octavia-15.0.0.0rc1.data → octavia-16.0.0.data}/scripts/octavia-wsgi +0 -0
- {octavia-15.0.0.0rc1.dist-info → octavia-16.0.0.dist-info}/LICENSE +0 -0
- {octavia-15.0.0.0rc1.dist-info → octavia-16.0.0.dist-info}/top_level.txt +0 -0
@@ -289,7 +289,8 @@ class MemberModelTest(base.OctaviaDBTestBase, ModelTestMixin):
|
|
289
289
|
f"project_id={member.project_id!r}, "
|
290
290
|
f"provisioning_status='ACTIVE', "
|
291
291
|
f"ip_address='10.0.0.1', protocol_port=80, "
|
292
|
-
f"operating_status='ONLINE', weight=None
|
292
|
+
f"operating_status='ONLINE', weight=None, "
|
293
|
+
f"vnic_type=None)",
|
293
294
|
str(member))
|
294
295
|
|
295
296
|
self.assertIsNotNone(member.created_at)
|
@@ -1049,6 +1050,7 @@ class TestDataModelConversionTest(base.OctaviaDBTestBase, ModelTestMixin):
|
|
1049
1050
|
# Generate equivalent graphs starting arbitrarily from different
|
1050
1051
|
# nodes within it; Make sure the resulting graphs all contain the
|
1051
1052
|
# same number of nodes.
|
1053
|
+
# check the default value for recursion_depth=None
|
1052
1054
|
lb_dm = self.session.query(models.LoadBalancer).filter_by(
|
1053
1055
|
id=self.lb.id).first().to_data_model()
|
1054
1056
|
lb_graph_count = self.count_graph_nodes(lb_dm)
|
@@ -1063,6 +1065,82 @@ class TestDataModelConversionTest(base.OctaviaDBTestBase, ModelTestMixin):
|
|
1063
1065
|
self.assertEqual(lb_graph_count, p_graph_count)
|
1064
1066
|
self.assertEqual(lb_graph_count, mem_graph_count)
|
1065
1067
|
|
1068
|
+
def _get_dms_for_recursion_depth(self, recursion_depth):
|
1069
|
+
lb_dm = self.session.query(models.LoadBalancer).filter_by(
|
1070
|
+
id=self.lb.id).first().to_data_model(
|
1071
|
+
recursion_depth=recursion_depth)
|
1072
|
+
p_dm = self.session.query(models.Pool).filter_by(
|
1073
|
+
id=self.pool.id).first().to_data_model(
|
1074
|
+
recursion_depth=recursion_depth)
|
1075
|
+
mem_dm = self.session.query(models.Member).filter_by(
|
1076
|
+
id=self.member.id).first().to_data_model(
|
1077
|
+
recursion_depth=recursion_depth)
|
1078
|
+
return lb_dm, p_dm, mem_dm
|
1079
|
+
|
1080
|
+
def _get_nodes_count_for_dms(self, lb_dm, p_dm, mem_dm):
|
1081
|
+
return (
|
1082
|
+
self.count_graph_nodes(lb_dm),
|
1083
|
+
self.count_graph_nodes(p_dm),
|
1084
|
+
self.count_graph_nodes(mem_dm),
|
1085
|
+
)
|
1086
|
+
|
1087
|
+
def test_graph_completeness_with_recursion_depth_equal_zero(self):
|
1088
|
+
lb_dm, p_dm, mem_dm = self._get_dms_for_recursion_depth(
|
1089
|
+
recursion_depth=0
|
1090
|
+
)
|
1091
|
+
lb_graph_count, p_graph_count, mem_graph_count = (
|
1092
|
+
self._get_nodes_count_for_dms(lb_dm, p_dm, mem_dm)
|
1093
|
+
)
|
1094
|
+
self.assertNotEqual(0, lb_graph_count)
|
1095
|
+
# recursion_depth equal to 0 means, that only current node will be
|
1096
|
+
# handled. there is no recursion
|
1097
|
+
self.assertEqual(1, lb_graph_count)
|
1098
|
+
self.assertEqual(1, p_graph_count)
|
1099
|
+
self.assertEqual(1, mem_graph_count)
|
1100
|
+
|
1101
|
+
def test_graph_completeness_with_recursion_depth_equal_one(self):
|
1102
|
+
lb_dm, p_dm, mem_dm = self._get_dms_for_recursion_depth(
|
1103
|
+
recursion_depth=1
|
1104
|
+
)
|
1105
|
+
lb_graph_count, p_graph_count, mem_graph_count = (
|
1106
|
+
self._get_nodes_count_for_dms(lb_dm, p_dm, mem_dm)
|
1107
|
+
)
|
1108
|
+
self.assertNotEqual(0, lb_graph_count)
|
1109
|
+
self.assertNotEqual(1, lb_graph_count)
|
1110
|
+
self.assertNotEqual(1, p_graph_count)
|
1111
|
+
self.assertNotEqual(1, mem_graph_count)
|
1112
|
+
# the nodes count is different for each node type
|
1113
|
+
# due to different number of related nodes
|
1114
|
+
self.assertEqual(5, lb_graph_count)
|
1115
|
+
self.assertEqual(7, p_graph_count)
|
1116
|
+
self.assertEqual(2, mem_graph_count)
|
1117
|
+
|
1118
|
+
def test_graph_completeness_with_recursion_depth_huge(self):
|
1119
|
+
lb_dm, p_dm, mem_dm = self._get_dms_for_recursion_depth(
|
1120
|
+
recursion_depth=10
|
1121
|
+
)
|
1122
|
+
lb_graph_count, p_graph_count, mem_graph_count = (
|
1123
|
+
self._get_nodes_count_for_dms(lb_dm, p_dm, mem_dm)
|
1124
|
+
)
|
1125
|
+
# recursion_depth=None is default value, so it's equal to run without
|
1126
|
+
# limit on recursion
|
1127
|
+
lb_dm_none, p_dm_none, mem_dm_none = self._get_dms_for_recursion_depth(
|
1128
|
+
recursion_depth=None
|
1129
|
+
)
|
1130
|
+
lb_graph_count_none, p_graph_count_none, mem_graph_count_none = (
|
1131
|
+
self._get_nodes_count_for_dms(lb_dm_none, p_dm_none, mem_dm_none)
|
1132
|
+
)
|
1133
|
+
self.assertNotEqual(0, lb_graph_count)
|
1134
|
+
self.assertNotEqual(1, lb_graph_count)
|
1135
|
+
self.assertEqual(lb_graph_count, p_graph_count)
|
1136
|
+
self.assertEqual(lb_graph_count, mem_graph_count)
|
1137
|
+
|
1138
|
+
# huge recursion_depth is enough to iterate through all nodes in graph
|
1139
|
+
self.assertEqual(
|
1140
|
+
(lb_graph_count, p_graph_count, mem_graph_count),
|
1141
|
+
(lb_graph_count_none, p_graph_count_none, mem_graph_count_none)
|
1142
|
+
)
|
1143
|
+
|
1066
1144
|
def test_data_model_graph_traversal(self):
|
1067
1145
|
lb_dm = self.session.query(models.LoadBalancer).filter_by(
|
1068
1146
|
id=self.lb.id).first().to_data_model()
|
@@ -1085,6 +1163,51 @@ class TestDataModelConversionTest(base.OctaviaDBTestBase, ModelTestMixin):
|
|
1085
1163
|
load_balancer.pools[0].members[0].pool.load_balancer.id)
|
1086
1164
|
self.assertEqual(lb_dm.id, m_lb_id)
|
1087
1165
|
|
1166
|
+
def test_data_model_graph_traversal_with_recursion_depth_zero(self):
|
1167
|
+
lb_dm, p_dm, mem_dm = self._get_dms_for_recursion_depth(
|
1168
|
+
recursion_depth=0
|
1169
|
+
)
|
1170
|
+
# Traverse is not possible, because resources are not handled
|
1171
|
+
# It happens, because there is no recursion
|
1172
|
+
self.assertEqual([], lb_dm.listeners)
|
1173
|
+
self.assertEqual([], lb_dm.pools)
|
1174
|
+
self.assertIsNone(lb_dm.vip)
|
1175
|
+
self.assertEqual([], lb_dm.amphorae)
|
1176
|
+
# not inner objects for Pool
|
1177
|
+
self.assertEqual([], p_dm.listeners)
|
1178
|
+
self.assertIsNone(p_dm.load_balancer)
|
1179
|
+
self.assertIsNone(p_dm.session_persistence)
|
1180
|
+
self.assertIsNone(p_dm.health_monitor)
|
1181
|
+
self.assertEqual([], p_dm.members)
|
1182
|
+
self.assertEqual([], p_dm.l7policies)
|
1183
|
+
# not inner objects for Member
|
1184
|
+
self.assertIsNone(mem_dm.pool)
|
1185
|
+
|
1186
|
+
def test_data_model_graph_traversal_with_recursion_depth_one(self):
|
1187
|
+
lb_dm, p_dm, mem_dm = self._get_dms_for_recursion_depth(
|
1188
|
+
recursion_depth=1
|
1189
|
+
)
|
1190
|
+
# one hop resources are available for LB
|
1191
|
+
self.assertEqual(1, len(lb_dm.listeners))
|
1192
|
+
self.assertEqual(1, len(lb_dm.pools))
|
1193
|
+
self.assertIsNotNone(lb_dm.vip)
|
1194
|
+
self.assertEqual(1, len(lb_dm.amphorae))
|
1195
|
+
# second hop resources are not available for LB
|
1196
|
+
self.assertEqual([], lb_dm.pools[0].listeners)
|
1197
|
+
# one hop resources are available for Pool
|
1198
|
+
self.assertEqual(1, len(p_dm.listeners))
|
1199
|
+
self.assertIsNotNone(p_dm.load_balancer)
|
1200
|
+
self.assertIsNotNone(p_dm.session_persistence)
|
1201
|
+
self.assertIsNotNone(p_dm.health_monitor)
|
1202
|
+
self.assertEqual(1, len(p_dm.members))
|
1203
|
+
self.assertEqual(1, len(p_dm.l7policies))
|
1204
|
+
# second hop resources are not available for Pool
|
1205
|
+
self.assertEqual([], p_dm.load_balancer.listeners)
|
1206
|
+
# one hop resources are available for Member
|
1207
|
+
self.assertIsNotNone(mem_dm.pool)
|
1208
|
+
# second hop resources are not available for Member
|
1209
|
+
self.assertEqual([], mem_dm.pool.listeners)
|
1210
|
+
|
1088
1211
|
def test_update_data_model_listener_default_pool_id(self):
|
1089
1212
|
lb_dm = self.create_load_balancer(
|
1090
1213
|
self.session, id=uuidutils.generate_uuid()).to_data_model()
|
@@ -67,6 +67,16 @@ class BaseRepositoryTest(base.OctaviaDBTestBase):
|
|
67
67
|
self.flavor_repo = repo.FlavorRepository()
|
68
68
|
self.flavor_profile_repo = repo.FlavorProfileRepository()
|
69
69
|
|
70
|
+
def create_loadbalancer(self, lb_id):
|
71
|
+
lb = self.lb_repo.create(self.session, id=lb_id,
|
72
|
+
project_id=self.FAKE_UUID_2, name="lb_name",
|
73
|
+
description="lb_description",
|
74
|
+
provisioning_status=constants.ACTIVE,
|
75
|
+
operating_status=constants.ONLINE,
|
76
|
+
enabled=True)
|
77
|
+
self.session.commit()
|
78
|
+
return lb
|
79
|
+
|
70
80
|
def test_get_all_return_value(self):
|
71
81
|
pool_list, _ = self.pool_repo.get_all(self.session,
|
72
82
|
project_id=self.FAKE_UUID_2)
|
@@ -125,7 +135,7 @@ class AllRepositoriesTest(base.OctaviaDBTestBase):
|
|
125
135
|
'amp_build_slots', 'amp_build_req', 'quotas',
|
126
136
|
'flavor', 'flavor_profile', 'listener_cidr',
|
127
137
|
'availability_zone', 'availability_zone_profile',
|
128
|
-
'additional_vip')
|
138
|
+
'additional_vip', 'amphora_member_port')
|
129
139
|
for repo_attr in repo_attr_names:
|
130
140
|
single_repo = getattr(self.repos, repo_attr, None)
|
131
141
|
message = (f"Class Repositories should have {repo_attr} instance "
|
@@ -163,7 +173,7 @@ class AllRepositoriesTest(base.OctaviaDBTestBase):
|
|
163
173
|
'subnet_id': uuidutils.generate_uuid(),
|
164
174
|
'network_id': uuidutils.generate_uuid(),
|
165
175
|
'qos_policy_id': None, 'octavia_owned': True,
|
166
|
-
'vnic_type': None}
|
176
|
+
'vnic_type': None, 'sgs': []}
|
167
177
|
additional_vips = [{'subnet_id': uuidutils.generate_uuid(),
|
168
178
|
'ip_address': '192.0.2.2'}]
|
169
179
|
lb_dm = self.repos.create_load_balancer_and_vip(self.session, lb, vip,
|
@@ -182,8 +192,108 @@ class AllRepositoriesTest(base.OctaviaDBTestBase):
|
|
182
192
|
vip_dm_dict = lb_dm.vip.to_dict()
|
183
193
|
vip_dm_dict['load_balancer_id'] = lb_dm.id
|
184
194
|
del vip_dm_dict['load_balancer']
|
195
|
+
vip['sg_ids'] = []
|
185
196
|
self.assertEqual(vip, vip_dm_dict)
|
186
197
|
|
198
|
+
ret = self.repos.load_balancer.get(self.session, id=lb_dm.id)
|
199
|
+
print(ret.vip.port_id)
|
200
|
+
ret = self.repos.vip.get(self.session, load_balancer_id=lb_dm.id)
|
201
|
+
print(ret.port_id)
|
202
|
+
|
203
|
+
def test_create_load_balancer_and_update_vip(self):
|
204
|
+
lb = {'name': 'test1', 'description': 'desc1', 'enabled': True,
|
205
|
+
'provisioning_status': constants.PENDING_UPDATE,
|
206
|
+
'operating_status': constants.OFFLINE,
|
207
|
+
'topology': constants.TOPOLOGY_ACTIVE_STANDBY,
|
208
|
+
'vrrp_group': None,
|
209
|
+
'provider': 'amphora',
|
210
|
+
'server_group_id': uuidutils.generate_uuid(),
|
211
|
+
'project_id': uuidutils.generate_uuid(),
|
212
|
+
'id': uuidutils.generate_uuid(), 'flavor_id': None,
|
213
|
+
'tags': ['test_tag']}
|
214
|
+
vip = {'ip_address': '192.0.2.1',
|
215
|
+
'port_id': uuidutils.generate_uuid(),
|
216
|
+
'subnet_id': uuidutils.generate_uuid(),
|
217
|
+
'network_id': uuidutils.generate_uuid(),
|
218
|
+
'qos_policy_id': None, 'octavia_owned': True,
|
219
|
+
'vnic_type': None, 'sgs': []}
|
220
|
+
additional_vips = [{'subnet_id': uuidutils.generate_uuid(),
|
221
|
+
'ip_address': '192.0.2.2'}]
|
222
|
+
lb_dm = self.repos.create_load_balancer_and_vip(self.session, lb, vip,
|
223
|
+
additional_vips)
|
224
|
+
self.session.commit()
|
225
|
+
|
226
|
+
vip_dm_dict = lb_dm.vip.to_dict()
|
227
|
+
self.assertEqual(0, len(vip_dm_dict["sg_ids"]))
|
228
|
+
|
229
|
+
vip_update = {
|
230
|
+
'port_id': uuidutils.generate_uuid(),
|
231
|
+
}
|
232
|
+
self.repos.vip.update(self.session, lb_dm.id, **vip_update)
|
233
|
+
self.session.expire_all()
|
234
|
+
self.session.flush()
|
235
|
+
self.session.commit()
|
236
|
+
|
237
|
+
updated_vip_dm = self.repos.vip.get(self.session,
|
238
|
+
load_balancer_id=lb_dm.id)
|
239
|
+
self.assertEqual(vip_update['port_id'], updated_vip_dm.port_id)
|
240
|
+
|
241
|
+
def test_create_load_balancer_and_update_vip_sg_ids(self):
|
242
|
+
lb = {'name': 'test1', 'description': 'desc1', 'enabled': True,
|
243
|
+
'provisioning_status': constants.PENDING_UPDATE,
|
244
|
+
'operating_status': constants.OFFLINE,
|
245
|
+
'topology': constants.TOPOLOGY_ACTIVE_STANDBY,
|
246
|
+
'vrrp_group': None,
|
247
|
+
'provider': 'amphora',
|
248
|
+
'server_group_id': uuidutils.generate_uuid(),
|
249
|
+
'project_id': uuidutils.generate_uuid(),
|
250
|
+
'id': uuidutils.generate_uuid(), 'flavor_id': None,
|
251
|
+
'tags': ['test_tag']}
|
252
|
+
vip = {'ip_address': '192.0.2.1',
|
253
|
+
'port_id': uuidutils.generate_uuid(),
|
254
|
+
'subnet_id': uuidutils.generate_uuid(),
|
255
|
+
'network_id': uuidutils.generate_uuid(),
|
256
|
+
'qos_policy_id': None, 'octavia_owned': True,
|
257
|
+
'vnic_type': None, 'sgs': []}
|
258
|
+
additional_vips = [{'subnet_id': uuidutils.generate_uuid(),
|
259
|
+
'ip_address': '192.0.2.2'}]
|
260
|
+
lb_dm = self.repos.create_load_balancer_and_vip(self.session, lb, vip,
|
261
|
+
additional_vips)
|
262
|
+
self.session.commit()
|
263
|
+
|
264
|
+
vip_dm_dict = lb_dm.vip.to_dict()
|
265
|
+
self.assertEqual(0, len(vip_dm_dict["sg_ids"]))
|
266
|
+
|
267
|
+
vip_update = {
|
268
|
+
'sg_ids': [uuidutils.generate_uuid(),
|
269
|
+
uuidutils.generate_uuid()],
|
270
|
+
}
|
271
|
+
self.repos.vip.update(self.session, lb_dm.id, **vip_update)
|
272
|
+
self.session.commit()
|
273
|
+
|
274
|
+
updated_vip_dm = self.repos.vip.get(self.session,
|
275
|
+
load_balancer_id=lb_dm.id)
|
276
|
+
self.assertEqual(2, len(vip_update['sg_ids']))
|
277
|
+
self.assertIn(vip_update['sg_ids'][0], updated_vip_dm.sg_ids)
|
278
|
+
self.assertIn(vip_update['sg_ids'][1], updated_vip_dm.sg_ids)
|
279
|
+
|
280
|
+
vip_update['sg_ids'] = [uuidutils.generate_uuid()]
|
281
|
+
self.repos.vip.update(self.session, lb_dm.id, **vip_update)
|
282
|
+
self.session.commit()
|
283
|
+
|
284
|
+
updated_vip_dm = self.repos.vip.get(self.session,
|
285
|
+
load_balancer_id=lb_dm.id)
|
286
|
+
self.assertEqual(1, len(vip_update['sg_ids']))
|
287
|
+
self.assertIn(vip_update['sg_ids'][0], updated_vip_dm.sg_ids)
|
288
|
+
|
289
|
+
vip_update['sg_ids'] = []
|
290
|
+
self.repos.vip.update(self.session, lb_dm.id, **vip_update)
|
291
|
+
self.session.commit()
|
292
|
+
|
293
|
+
updated_vip_dm = self.repos.vip.get(self.session,
|
294
|
+
load_balancer_id=lb_dm.id)
|
295
|
+
self.assertEqual(0, len(vip_update['sg_ids']))
|
296
|
+
|
187
297
|
def test_create_pool_on_listener_without_sp(self):
|
188
298
|
pool = {'protocol': constants.PROTOCOL_HTTP, 'name': 'pool1',
|
189
299
|
'description': 'desc1',
|
@@ -2086,6 +2196,31 @@ class PoolRepositoryTest(BaseRepositoryTest):
|
|
2086
2196
|
self.assertEqual(pool.id, new_pool.id)
|
2087
2197
|
self.assertEqual(pool.project_id, new_pool.project_id)
|
2088
2198
|
|
2199
|
+
def test_get_limited_graph(self):
|
2200
|
+
def check_pool_attrs(pool, new_pool, lb, limited_graph):
|
2201
|
+
self.assertIsInstance(new_pool, data_models.Pool)
|
2202
|
+
self.assertEqual(pool.id, new_pool.id)
|
2203
|
+
self.assertEqual(pool.project_id, new_pool.project_id)
|
2204
|
+
if limited_graph:
|
2205
|
+
self.assertIsNone(new_pool.load_balancer)
|
2206
|
+
else:
|
2207
|
+
self.assertEqual(lb.id, new_pool.load_balancer.id)
|
2208
|
+
|
2209
|
+
pool = self.create_pool(pool_id=self.FAKE_UUID_1,
|
2210
|
+
project_id=self.FAKE_UUID_2)
|
2211
|
+
# Create LB and attach pool to it.
|
2212
|
+
# It means, that in graph pool node get new relationship to LB
|
2213
|
+
lb = self.create_loadbalancer(self.FAKE_UUID_5)
|
2214
|
+
self.pool_repo.update(self.session, id=pool.id, load_balancer_id=lb.id)
|
2215
|
+
self.pool_repo.update(self.session, id=pool.id, load_balancer_id=lb.id)
|
2216
|
+
|
2217
|
+
new_pool = self.pool_repo.get(self.session, id=pool.id)
|
2218
|
+
check_pool_attrs(pool, new_pool, lb, limited_graph=False)
|
2219
|
+
|
2220
|
+
new_pool2 = self.pool_repo.get(self.session, id=pool.id,
|
2221
|
+
limited_graph=True)
|
2222
|
+
check_pool_attrs(pool, new_pool2, lb, limited_graph=True)
|
2223
|
+
|
2089
2224
|
def test_get_all(self):
|
2090
2225
|
pool_one = self.create_pool(pool_id=self.FAKE_UUID_1,
|
2091
2226
|
project_id=self.FAKE_UUID_2)
|
@@ -2316,13 +2451,33 @@ class MemberRepositoryTest(BaseRepositoryTest):
|
|
2316
2451
|
self.assertEqual(member.pool_id, new_member.pool_id)
|
2317
2452
|
self.assertEqual(member.ip_address, new_member.ip_address)
|
2318
2453
|
|
2319
|
-
def
|
2320
|
-
|
2321
|
-
|
2322
|
-
|
2323
|
-
|
2324
|
-
|
2325
|
-
|
2454
|
+
def test_get_limited_graph(self):
|
2455
|
+
def check_member_attrs(member, new_member, lb, limited_graph):
|
2456
|
+
self.assertIsInstance(new_member, data_models.Member)
|
2457
|
+
self.assertEqual(member.id, new_member.id)
|
2458
|
+
self.assertEqual(member.pool_id, new_member.pool_id)
|
2459
|
+
self.assertEqual(member.ip_address, new_member.ip_address)
|
2460
|
+
if limited_graph:
|
2461
|
+
self.assertIsNone(new_member.pool)
|
2462
|
+
else:
|
2463
|
+
self.assertEqual(lb.id, new_member.pool.load_balancer.id)
|
2464
|
+
|
2465
|
+
member = self.create_member(self.FAKE_UUID_1, self.FAKE_UUID_2,
|
2466
|
+
self.pool.id, "192.0.2.1")
|
2467
|
+
# Create LB and attach pool to it.
|
2468
|
+
# It means, that in graph pool node get new relationship to LB
|
2469
|
+
lb = self.create_loadbalancer(self.FAKE_UUID_5)
|
2470
|
+
self.pool_repo.update(self.session, id=self.pool.id,
|
2471
|
+
load_balancer_id=lb.id)
|
2472
|
+
|
2473
|
+
new_member = self.member_repo.get(self.session, id=member.id)
|
2474
|
+
check_member_attrs(member, new_member, lb, limited_graph=False)
|
2475
|
+
|
2476
|
+
new_member2 = self.member_repo.get(self.session, id=member.id,
|
2477
|
+
limited_graph=True)
|
2478
|
+
check_member_attrs(member, new_member2, lb, limited_graph=True)
|
2479
|
+
|
2480
|
+
def _validate_members_response(self, member_one, member_two, member_list):
|
2326
2481
|
self.assertIsInstance(member_list, list)
|
2327
2482
|
self.assertEqual(2, len(member_list))
|
2328
2483
|
self.assertEqual(member_one.id, member_list[0].id)
|
@@ -2332,6 +2487,54 @@ class MemberRepositoryTest(BaseRepositoryTest):
|
|
2332
2487
|
self.assertEqual(member_two.pool_id, member_list[1].pool_id)
|
2333
2488
|
self.assertEqual(member_two.ip_address, member_list[1].ip_address)
|
2334
2489
|
|
2490
|
+
def test_get_all(self):
|
2491
|
+
member_one = self.create_member(self.FAKE_UUID_1, self.FAKE_UUID_2,
|
2492
|
+
self.pool.id, "192.0.2.1")
|
2493
|
+
member_two = self.create_member(self.FAKE_UUID_3, self.FAKE_UUID_2,
|
2494
|
+
self.pool.id, "192.0.2.2")
|
2495
|
+
member_list, _ = self.member_repo.get_all(self.session,
|
2496
|
+
project_id=self.FAKE_UUID_2)
|
2497
|
+
self._validate_members_response(member_one, member_two, member_list)
|
2498
|
+
|
2499
|
+
def test_get_all_with_loadbalancer(self):
|
2500
|
+
member_one = self.create_member(self.FAKE_UUID_1, self.FAKE_UUID_2,
|
2501
|
+
self.pool.id, "192.0.2.1")
|
2502
|
+
member_two = self.create_member(self.FAKE_UUID_3, self.FAKE_UUID_2,
|
2503
|
+
self.pool.id, "192.0.2.2")
|
2504
|
+
# Create LB and attach pool to it.
|
2505
|
+
# It means, that in graph pool node get new relationship to LB
|
2506
|
+
lb = self.create_loadbalancer(self.FAKE_UUID_5)
|
2507
|
+
self.pool_repo.update(self.session, id=self.pool.id,
|
2508
|
+
load_balancer_id=lb.id)
|
2509
|
+
|
2510
|
+
member_list, _ = self.member_repo.get_all(self.session,
|
2511
|
+
project_id=self.FAKE_UUID_2)
|
2512
|
+
self._validate_members_response(member_one, member_two, member_list)
|
2513
|
+
# Without limit on recursion all nodes will be processed.
|
2514
|
+
# As result load_balancer node will be available in response
|
2515
|
+
self.assertEqual(self.pool.id, member_list[0].pool.id)
|
2516
|
+
self.assertEqual(self.pool.id, member_list[1].pool.id)
|
2517
|
+
self.assertEqual(lb.id, member_list[0].pool.load_balancer.id)
|
2518
|
+
self.assertEqual(lb.id, member_list[1].pool.load_balancer.id)
|
2519
|
+
|
2520
|
+
# get the same list of members with enabled limit graph recursion
|
2521
|
+
member_list_limit, _ = self.member_repo.get_all(
|
2522
|
+
self.session,
|
2523
|
+
project_id=self.FAKE_UUID_2,
|
2524
|
+
limited_graph=True
|
2525
|
+
)
|
2526
|
+
self._validate_members_response(
|
2527
|
+
member_one,
|
2528
|
+
member_two,
|
2529
|
+
member_list_limit
|
2530
|
+
)
|
2531
|
+
# With limit on recursion load_balancer node will not be processed.
|
2532
|
+
# As result load_balancer node will not be available in response
|
2533
|
+
self.assertEqual(self.pool.id, member_list_limit[0].pool.id)
|
2534
|
+
self.assertEqual(self.pool.id, member_list_limit[1].pool.id)
|
2535
|
+
self.assertIsNone(member_list_limit[0].pool.load_balancer)
|
2536
|
+
self.assertIsNone(member_list_limit[1].pool.load_balancer)
|
2537
|
+
|
2335
2538
|
def test_create(self):
|
2336
2539
|
member = self.create_member(self.FAKE_UUID_1, self.FAKE_UUID_2,
|
2337
2540
|
self.pool.id, ip_address="192.0.2.1")
|
@@ -2466,16 +2669,6 @@ class TestListenerRepositoryTest(BaseRepositoryTest):
|
|
2466
2669
|
self.session.commit()
|
2467
2670
|
return amphora
|
2468
2671
|
|
2469
|
-
def create_loadbalancer(self, lb_id):
|
2470
|
-
lb = self.lb_repo.create(self.session, id=lb_id,
|
2471
|
-
project_id=self.FAKE_UUID_2, name="lb_name",
|
2472
|
-
description="lb_description",
|
2473
|
-
provisioning_status=constants.ACTIVE,
|
2474
|
-
operating_status=constants.ONLINE,
|
2475
|
-
enabled=True)
|
2476
|
-
self.session.commit()
|
2477
|
-
return lb
|
2478
|
-
|
2479
2672
|
def test_get(self):
|
2480
2673
|
listener = self.create_listener(self.FAKE_UUID_1, 80)
|
2481
2674
|
new_listener = self.listener_repo.get(self.session, id=listener.id)
|
@@ -3491,6 +3684,31 @@ class VipRepositoryTest(BaseRepositoryTest):
|
|
3491
3684
|
load_balancer_id=vip.load_balancer_id)
|
3492
3685
|
self.assertEqual(address_change, new_vip.ip_address)
|
3493
3686
|
|
3687
|
+
def test_update_sg_ids(self):
|
3688
|
+
sg1_id = uuidutils.generate_uuid()
|
3689
|
+
sg2_id = uuidutils.generate_uuid()
|
3690
|
+
vip = self.create_vip(self.lb.id)
|
3691
|
+
self.vip_repo.update(self.session, vip.load_balancer_id,
|
3692
|
+
sg_ids=[sg1_id, sg2_id])
|
3693
|
+
new_vip = self.vip_repo.get(self.session,
|
3694
|
+
load_balancer_id=vip.load_balancer_id)
|
3695
|
+
self.assertIn(sg1_id, new_vip.sg_ids)
|
3696
|
+
self.assertIn(sg2_id, new_vip.sg_ids)
|
3697
|
+
|
3698
|
+
self.vip_repo.update(self.session, vip.load_balancer_id,
|
3699
|
+
sg_ids=[sg1_id])
|
3700
|
+
new_vip = self.vip_repo.get(self.session,
|
3701
|
+
load_balancer_id=vip.load_balancer_id)
|
3702
|
+
self.assertIn(sg1_id, new_vip.sg_ids)
|
3703
|
+
self.assertNotIn(sg2_id, new_vip.sg_ids)
|
3704
|
+
|
3705
|
+
self.vip_repo.update(self.session, vip.load_balancer_id,
|
3706
|
+
sg_ids=[])
|
3707
|
+
new_vip = self.vip_repo.get(self.session,
|
3708
|
+
load_balancer_id=vip.load_balancer_id)
|
3709
|
+
self.assertNotIn(sg1_id, new_vip.sg_ids)
|
3710
|
+
self.assertNotIn(sg2_id, new_vip.sg_ids)
|
3711
|
+
|
3494
3712
|
def test_delete(self):
|
3495
3713
|
vip = self.create_vip(self.lb.id)
|
3496
3714
|
self.vip_repo.delete(self.session,
|
@@ -356,13 +356,57 @@ class TestUtil(base.TestCase):
|
|
356
356
|
self.assertEqual([], util.get_haproxy_vip_addresses(LB_ID1))
|
357
357
|
mock_cfg_path.assert_called_once_with(LB_ID1)
|
358
358
|
|
359
|
+
@mock.patch('octavia.amphorae.backends.agent.api_server.util.'
|
360
|
+
'keepalived_lvs_cfg_path')
|
361
|
+
def test_get_lvs_vip_addresses(self, mock_cfg_path):
|
362
|
+
FAKE_PATH = 'fake_path'
|
363
|
+
mock_cfg_path.return_value = FAKE_PATH
|
364
|
+
self.useFixture(
|
365
|
+
test_utils.OpenFixture(FAKE_PATH, 'no match')).mock_open()
|
366
|
+
|
367
|
+
# Test with no matching lines in the config file
|
368
|
+
self.assertEqual([], util.get_lvs_vip_addresses(LB_ID1))
|
369
|
+
mock_cfg_path.assert_called_once_with(LB_ID1)
|
370
|
+
|
371
|
+
# Test with 2 matching lines
|
372
|
+
mock_cfg_path.reset_mock()
|
373
|
+
test_data = ('virtual_server_group ipv4-group {\n'
|
374
|
+
' 203.0.113.43 1\n'
|
375
|
+
' 203.0.113.44 1\n'
|
376
|
+
'}\n')
|
377
|
+
self.useFixture(
|
378
|
+
test_utils.OpenFixture(FAKE_PATH, test_data)).mock_open()
|
379
|
+
expected_result = ['203.0.113.43', '203.0.113.44']
|
380
|
+
self.assertEqual(expected_result,
|
381
|
+
util.get_lvs_vip_addresses(LB_ID1))
|
382
|
+
mock_cfg_path.assert_called_once_with(LB_ID1)
|
383
|
+
|
384
|
+
# Test with 2 groups
|
385
|
+
mock_cfg_path.reset_mock()
|
386
|
+
test_data = ('virtual_server_group ipv4-group {\n'
|
387
|
+
' 203.0.113.43 1\n'
|
388
|
+
'}\n'
|
389
|
+
'virtual_server_group ipv6-group {\n'
|
390
|
+
' 2d01:27::1 2\n'
|
391
|
+
' 2d01:27::2 2\n'
|
392
|
+
'}\n')
|
393
|
+
self.useFixture(
|
394
|
+
test_utils.OpenFixture(FAKE_PATH, test_data)).mock_open()
|
395
|
+
expected_result = ['203.0.113.43', '2d01:27::1', '2d01:27::2']
|
396
|
+
self.assertEqual(expected_result,
|
397
|
+
util.get_lvs_vip_addresses(LB_ID1))
|
398
|
+
mock_cfg_path.assert_called_once_with(LB_ID1)
|
399
|
+
|
359
400
|
@mock.patch('octavia.amphorae.backends.utils.ip_advertisement.'
|
360
401
|
'send_ip_advertisement')
|
361
402
|
@mock.patch('octavia.amphorae.backends.utils.network_utils.'
|
362
403
|
'get_interface_name')
|
363
404
|
@mock.patch('octavia.amphorae.backends.agent.api_server.util.'
|
364
405
|
'get_haproxy_vip_addresses')
|
365
|
-
|
406
|
+
@mock.patch('octavia.amphorae.backends.agent.api_server.util.'
|
407
|
+
'get_lvs_vip_addresses')
|
408
|
+
def test_send_vip_advertisements(self, mock_get_lvs_vip_addrs,
|
409
|
+
mock_get_vip_addrs,
|
366
410
|
mock_get_int_name, mock_send_advert):
|
367
411
|
mock_get_vip_addrs.side_effect = [[], ['203.0.113.46'],
|
368
412
|
Exception('boom')]
|
@@ -371,6 +415,7 @@ class TestUtil(base.TestCase):
|
|
371
415
|
# Test no VIPs
|
372
416
|
util.send_vip_advertisements(LB_ID1)
|
373
417
|
mock_get_vip_addrs.assert_called_once_with(LB_ID1)
|
418
|
+
mock_get_lvs_vip_addrs.assert_not_called()
|
374
419
|
mock_get_int_name.assert_not_called()
|
375
420
|
mock_send_advert.assert_not_called()
|
376
421
|
|
@@ -380,6 +425,7 @@ class TestUtil(base.TestCase):
|
|
380
425
|
mock_send_advert.reset_mock()
|
381
426
|
util.send_vip_advertisements(LB_ID1)
|
382
427
|
mock_get_vip_addrs.assert_called_once_with(LB_ID1)
|
428
|
+
mock_get_lvs_vip_addrs.assert_not_called()
|
383
429
|
mock_get_int_name.assert_called_once_with(
|
384
430
|
'203.0.113.46', net_ns=consts.AMPHORA_NAMESPACE)
|
385
431
|
mock_send_advert.assert_called_once_with(
|
@@ -393,6 +439,48 @@ class TestUtil(base.TestCase):
|
|
393
439
|
mock_get_int_name.assert_not_called()
|
394
440
|
mock_send_advert.assert_not_called()
|
395
441
|
|
442
|
+
@mock.patch('octavia.amphorae.backends.utils.ip_advertisement.'
|
443
|
+
'send_ip_advertisement')
|
444
|
+
@mock.patch('octavia.amphorae.backends.utils.network_utils.'
|
445
|
+
'get_interface_name')
|
446
|
+
@mock.patch('octavia.amphorae.backends.agent.api_server.util.'
|
447
|
+
'get_haproxy_vip_addresses')
|
448
|
+
@mock.patch('octavia.amphorae.backends.agent.api_server.util.'
|
449
|
+
'get_lvs_vip_addresses')
|
450
|
+
def test_send_vip_advertisements_udp(self, mock_get_lvs_vip_addrs,
|
451
|
+
mock_get_vip_addrs,
|
452
|
+
mock_get_int_name, mock_send_advert):
|
453
|
+
mock_get_lvs_vip_addrs.side_effect = [[], ['203.0.113.46'],
|
454
|
+
Exception('boom')]
|
455
|
+
mock_get_int_name.return_value = 'fake0'
|
456
|
+
|
457
|
+
# Test no VIPs
|
458
|
+
util.send_vip_advertisements(listener_id=LISTENER_ID1)
|
459
|
+
mock_get_lvs_vip_addrs.assert_called_once_with(LISTENER_ID1)
|
460
|
+
mock_get_vip_addrs.assert_not_called()
|
461
|
+
mock_get_int_name.assert_not_called()
|
462
|
+
mock_send_advert.assert_not_called()
|
463
|
+
|
464
|
+
# Test with a VIP
|
465
|
+
mock_get_lvs_vip_addrs.reset_mock()
|
466
|
+
mock_get_int_name.reset_mock()
|
467
|
+
mock_send_advert.reset_mock()
|
468
|
+
util.send_vip_advertisements(listener_id=LISTENER_ID1)
|
469
|
+
mock_get_lvs_vip_addrs.assert_called_once_with(LISTENER_ID1)
|
470
|
+
mock_get_vip_addrs.assert_not_called()
|
471
|
+
mock_get_int_name.assert_called_once_with(
|
472
|
+
'203.0.113.46', net_ns=consts.AMPHORA_NAMESPACE)
|
473
|
+
mock_send_advert.assert_called_once_with(
|
474
|
+
'fake0', '203.0.113.46', net_ns=consts.AMPHORA_NAMESPACE)
|
475
|
+
|
476
|
+
# Test with an exception (should not raise)
|
477
|
+
mock_get_lvs_vip_addrs.reset_mock()
|
478
|
+
mock_get_int_name.reset_mock()
|
479
|
+
mock_send_advert.reset_mock()
|
480
|
+
util.send_vip_advertisements(listener_id=LISTENER_ID1)
|
481
|
+
mock_get_int_name.assert_not_called()
|
482
|
+
mock_send_advert.assert_not_called()
|
483
|
+
|
396
484
|
@mock.patch('octavia.amphorae.backends.utils.ip_advertisement.'
|
397
485
|
'send_ip_advertisement')
|
398
486
|
@mock.patch('octavia.amphorae.backends.utils.network_utils.'
|
@@ -17,7 +17,6 @@ from unittest import mock
|
|
17
17
|
|
18
18
|
from oslo_config import cfg
|
19
19
|
from oslo_config import fixture as oslo_fixture
|
20
|
-
from oslo_utils.secretutils import md5
|
21
20
|
from oslo_utils import uuidutils
|
22
21
|
import requests
|
23
22
|
import requests_mock
|
@@ -354,7 +353,8 @@ class TestHaproxyAmphoraLoadBalancerDriverTest(base.TestCase):
|
|
354
353
|
mock_oslo.return_value = fake_context
|
355
354
|
self.driver.cert_manager.get_secret.reset_mock()
|
356
355
|
self.driver.cert_manager.get_secret.return_value = fake_secret
|
357
|
-
ref_md5 = md5(
|
356
|
+
ref_md5 = hashlib.md5(
|
357
|
+
fake_secret, usedforsecurity=False).hexdigest() # nosec
|
358
358
|
ref_id = hashlib.sha1(fake_secret).hexdigest() # nosec
|
359
359
|
ref_name = f'{ref_id}.pem'
|
360
360
|
|
@@ -418,7 +418,8 @@ class TestHaproxyAmphoraLoadBalancerDriverTest(base.TestCase):
|
|
418
418
|
mock_load_certs.return_value = pool_data
|
419
419
|
fake_pem = b'fake pem'
|
420
420
|
mock_build_pem.return_value = fake_pem
|
421
|
-
ref_md5 = md5(
|
421
|
+
ref_md5 = hashlib.md5(
|
422
|
+
fake_pem, usedforsecurity=False).hexdigest() # nosec
|
422
423
|
ref_name = f'{pool_cert.id}.pem'
|
423
424
|
ref_path = (f'{fake_cert_dir}/{sample_listener.load_balancer.id}/'
|
424
425
|
f'{ref_name}')
|
@@ -779,7 +780,8 @@ class TestHaproxyAmphoraLoadBalancerDriverTest(base.TestCase):
|
|
779
780
|
self.driver.clients[API_VERSION].plug_network.assert_called_once_with(
|
780
781
|
self.amp, dict(mac_address=FAKE_MAC_ADDRESS,
|
781
782
|
fixed_ips=[],
|
782
|
-
mtu=FAKE_MTU
|
783
|
+
mtu=FAKE_MTU,
|
784
|
+
is_sriov=False))
|
783
785
|
|
784
786
|
self.driver.clients[API_VERSION].plug_network.reset_mock()
|
785
787
|
|
@@ -791,7 +793,7 @@ class TestHaproxyAmphoraLoadBalancerDriverTest(base.TestCase):
|
|
791
793
|
subnet_cidr='198.51.100.0/24',
|
792
794
|
host_routes=[],
|
793
795
|
gateway=FAKE_GATEWAY)],
|
794
|
-
mtu=FAKE_MTU))
|
796
|
+
mtu=FAKE_MTU, is_sriov=False))
|
795
797
|
|
796
798
|
self.driver.clients[API_VERSION].plug_network.reset_mock()
|
797
799
|
|
@@ -818,7 +820,7 @@ class TestHaproxyAmphoraLoadBalancerDriverTest(base.TestCase):
|
|
818
820
|
additional_vips=[],
|
819
821
|
mtu=FAKE_MTU,
|
820
822
|
is_sriov=False
|
821
|
-
)))
|
823
|
+
), is_sriov=False))
|
822
824
|
|
823
825
|
def test_post_network_plug_with_host_routes(self):
|
824
826
|
SUBNET_ID = 'SUBNET_ID'
|
@@ -858,7 +860,8 @@ class TestHaproxyAmphoraLoadBalancerDriverTest(base.TestCase):
|
|
858
860
|
self.driver.clients[API_VERSION].plug_network.assert_called_once_with(
|
859
861
|
self.amp, dict(mac_address=FAKE_MAC_ADDRESS,
|
860
862
|
fixed_ips=expected_fixed_ips,
|
861
|
-
mtu=FAKE_MTU
|
863
|
+
mtu=FAKE_MTU,
|
864
|
+
is_sriov=False))
|
862
865
|
|
863
866
|
def test_get_haproxy_versions(self):
|
864
867
|
ref_haproxy_versions = ['1', '6']
|