octavia 12.0.0.0rc2__py3-none-any.whl → 13.0.0.0rc1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (193) hide show
  1. octavia/amphorae/backends/agent/api_server/osutils.py +1 -0
  2. octavia/amphorae/backends/agent/api_server/plug.py +21 -7
  3. octavia/amphorae/backends/agent/api_server/templates/amphora-netns.systemd.j2 +2 -2
  4. octavia/amphorae/backends/agent/api_server/util.py +21 -0
  5. octavia/amphorae/backends/health_daemon/health_daemon.py +9 -3
  6. octavia/amphorae/backends/health_daemon/health_sender.py +2 -0
  7. octavia/amphorae/backends/utils/interface.py +14 -6
  8. octavia/amphorae/backends/utils/interface_file.py +6 -3
  9. octavia/amphorae/backends/utils/keepalivedlvs_query.py +8 -9
  10. octavia/amphorae/drivers/driver_base.py +1 -2
  11. octavia/amphorae/drivers/haproxy/rest_api_driver.py +11 -25
  12. octavia/amphorae/drivers/health/heartbeat_udp.py +34 -24
  13. octavia/amphorae/drivers/keepalived/jinja/jinja_cfg.py +3 -12
  14. octavia/amphorae/drivers/noop_driver/driver.py +3 -5
  15. octavia/api/common/pagination.py +4 -4
  16. octavia/api/drivers/amphora_driver/v2/driver.py +11 -5
  17. octavia/api/drivers/driver_agent/driver_get.py +22 -14
  18. octavia/api/drivers/driver_agent/driver_updater.py +8 -4
  19. octavia/api/drivers/utils.py +4 -2
  20. octavia/api/healthcheck/healthcheck_plugins.py +4 -2
  21. octavia/api/root_controller.py +4 -1
  22. octavia/api/v2/controllers/amphora.py +35 -38
  23. octavia/api/v2/controllers/availability_zone_profiles.py +43 -33
  24. octavia/api/v2/controllers/availability_zones.py +22 -18
  25. octavia/api/v2/controllers/flavor_profiles.py +37 -28
  26. octavia/api/v2/controllers/flavors.py +19 -15
  27. octavia/api/v2/controllers/health_monitor.py +44 -33
  28. octavia/api/v2/controllers/l7policy.py +52 -40
  29. octavia/api/v2/controllers/l7rule.py +68 -55
  30. octavia/api/v2/controllers/listener.py +88 -61
  31. octavia/api/v2/controllers/load_balancer.py +52 -34
  32. octavia/api/v2/controllers/member.py +63 -52
  33. octavia/api/v2/controllers/pool.py +55 -42
  34. octavia/api/v2/controllers/quotas.py +5 -3
  35. octavia/api/v2/types/listener.py +15 -0
  36. octavia/cmd/octavia_worker.py +0 -3
  37. octavia/cmd/status.py +1 -4
  38. octavia/common/clients.py +25 -45
  39. octavia/common/config.py +64 -22
  40. octavia/common/constants.py +3 -2
  41. octavia/common/data_models.py +7 -1
  42. octavia/common/jinja/haproxy/combined_listeners/jinja_cfg.py +12 -1
  43. octavia/common/jinja/haproxy/combined_listeners/templates/macros.j2 +5 -2
  44. octavia/common/jinja/lvs/jinja_cfg.py +4 -2
  45. octavia/common/keystone.py +58 -5
  46. octavia/common/validate.py +35 -0
  47. octavia/compute/drivers/noop_driver/driver.py +6 -0
  48. octavia/controller/healthmanager/health_manager.py +3 -6
  49. octavia/controller/housekeeping/house_keeping.py +36 -37
  50. octavia/controller/worker/amphora_rate_limit.py +5 -4
  51. octavia/controller/worker/task_utils.py +57 -41
  52. octavia/controller/worker/v2/controller_worker.py +160 -103
  53. octavia/controller/worker/v2/flows/listener_flows.py +3 -0
  54. octavia/controller/worker/v2/flows/load_balancer_flows.py +9 -14
  55. octavia/controller/worker/v2/tasks/amphora_driver_tasks.py +152 -91
  56. octavia/controller/worker/v2/tasks/compute_tasks.py +4 -2
  57. octavia/controller/worker/v2/tasks/database_tasks.py +542 -400
  58. octavia/controller/worker/v2/tasks/network_tasks.py +119 -79
  59. octavia/db/api.py +26 -23
  60. octavia/db/base_models.py +2 -2
  61. octavia/db/healthcheck.py +2 -1
  62. octavia/db/migration/alembic_migrations/versions/632152d2d32e_add_http_strict_transport_security_.py +42 -0
  63. octavia/db/models.py +12 -2
  64. octavia/db/prepare.py +2 -0
  65. octavia/db/repositories.py +462 -482
  66. octavia/hacking/checks.py +1 -1
  67. octavia/network/base.py +0 -14
  68. octavia/network/drivers/neutron/allowed_address_pairs.py +92 -135
  69. octavia/network/drivers/neutron/base.py +65 -77
  70. octavia/network/drivers/neutron/utils.py +69 -85
  71. octavia/network/drivers/noop_driver/driver.py +0 -7
  72. octavia/statistics/drivers/update_db.py +10 -10
  73. octavia/tests/common/constants.py +91 -84
  74. octavia/tests/common/sample_data_models.py +13 -1
  75. octavia/tests/fixtures.py +32 -0
  76. octavia/tests/functional/amphorae/backend/agent/api_server/test_server.py +9 -10
  77. octavia/tests/functional/api/drivers/driver_agent/test_driver_agent.py +260 -15
  78. octavia/tests/functional/api/test_root_controller.py +3 -28
  79. octavia/tests/functional/api/v2/base.py +5 -3
  80. octavia/tests/functional/api/v2/test_amphora.py +18 -5
  81. octavia/tests/functional/api/v2/test_availability_zone_profiles.py +1 -0
  82. octavia/tests/functional/api/v2/test_listener.py +51 -19
  83. octavia/tests/functional/api/v2/test_load_balancer.py +10 -1
  84. octavia/tests/functional/db/base.py +31 -16
  85. octavia/tests/functional/db/test_models.py +27 -28
  86. octavia/tests/functional/db/test_repositories.py +407 -50
  87. octavia/tests/unit/amphorae/backends/agent/api_server/test_amphora_info.py +2 -0
  88. octavia/tests/unit/amphorae/backends/agent/api_server/test_osutils.py +1 -1
  89. octavia/tests/unit/amphorae/backends/agent/api_server/test_plug.py +54 -6
  90. octavia/tests/unit/amphorae/backends/agent/api_server/test_util.py +35 -0
  91. octavia/tests/unit/amphorae/backends/health_daemon/test_health_daemon.py +8 -0
  92. octavia/tests/unit/amphorae/backends/health_daemon/test_health_sender.py +18 -0
  93. octavia/tests/unit/amphorae/backends/utils/test_interface.py +81 -0
  94. octavia/tests/unit/amphorae/backends/utils/test_interface_file.py +2 -0
  95. octavia/tests/unit/amphorae/backends/utils/test_keepalivedlvs_query.py +129 -5
  96. octavia/tests/unit/amphorae/drivers/haproxy/test_rest_api_driver_1_0.py +42 -20
  97. octavia/tests/unit/amphorae/drivers/health/test_heartbeat_udp.py +18 -20
  98. octavia/tests/unit/amphorae/drivers/keepalived/jinja/test_jinja_cfg.py +4 -4
  99. octavia/tests/unit/amphorae/drivers/noop_driver/test_driver.py +4 -1
  100. octavia/tests/unit/api/drivers/driver_agent/test_driver_get.py +3 -3
  101. octavia/tests/unit/api/drivers/driver_agent/test_driver_updater.py +11 -13
  102. octavia/tests/unit/base.py +6 -0
  103. octavia/tests/unit/cmd/test_interface.py +2 -2
  104. octavia/tests/unit/cmd/test_status.py +2 -2
  105. octavia/tests/unit/common/jinja/haproxy/combined_listeners/test_jinja_cfg.py +152 -1
  106. octavia/tests/unit/common/sample_configs/sample_configs_combined.py +10 -3
  107. octavia/tests/unit/common/test_clients.py +0 -39
  108. octavia/tests/unit/common/test_keystone.py +54 -0
  109. octavia/tests/unit/common/test_validate.py +67 -0
  110. octavia/tests/unit/controller/healthmanager/test_health_manager.py +8 -22
  111. octavia/tests/unit/controller/housekeeping/test_house_keeping.py +3 -64
  112. octavia/tests/unit/controller/worker/test_amphora_rate_limit.py +1 -1
  113. octavia/tests/unit/controller/worker/test_task_utils.py +44 -24
  114. octavia/tests/unit/controller/worker/v2/flows/test_load_balancer_flows.py +0 -1
  115. octavia/tests/unit/controller/worker/v2/tasks/test_amphora_driver_tasks.py +49 -26
  116. octavia/tests/unit/controller/worker/v2/tasks/test_database_tasks.py +399 -196
  117. octavia/tests/unit/controller/worker/v2/tasks/test_database_tasks_quota.py +37 -64
  118. octavia/tests/unit/controller/worker/v2/tasks/test_network_tasks.py +3 -14
  119. octavia/tests/unit/controller/worker/v2/test_controller_worker.py +2 -2
  120. octavia/tests/unit/network/drivers/neutron/test_allowed_address_pairs.py +456 -561
  121. octavia/tests/unit/network/drivers/neutron/test_base.py +181 -194
  122. octavia/tests/unit/network/drivers/neutron/test_utils.py +14 -30
  123. octavia/tests/unit/statistics/drivers/test_update_db.py +7 -5
  124. {octavia-12.0.0.0rc2.data → octavia-13.0.0.0rc1.data}/data/share/octavia/README.rst +1 -1
  125. {octavia-12.0.0.0rc2.dist-info → octavia-13.0.0.0rc1.dist-info}/AUTHORS +4 -0
  126. {octavia-12.0.0.0rc2.dist-info → octavia-13.0.0.0rc1.dist-info}/METADATA +4 -4
  127. {octavia-12.0.0.0rc2.dist-info → octavia-13.0.0.0rc1.dist-info}/RECORD +141 -189
  128. {octavia-12.0.0.0rc2.dist-info → octavia-13.0.0.0rc1.dist-info}/entry_points.txt +1 -2
  129. octavia-13.0.0.0rc1.dist-info/pbr.json +1 -0
  130. octavia/api/drivers/amphora_driver/v1/__init__.py +0 -11
  131. octavia/api/drivers/amphora_driver/v1/driver.py +0 -547
  132. octavia/controller/queue/v1/__init__.py +0 -11
  133. octavia/controller/queue/v1/consumer.py +0 -64
  134. octavia/controller/queue/v1/endpoints.py +0 -160
  135. octavia/controller/worker/v1/__init__.py +0 -11
  136. octavia/controller/worker/v1/controller_worker.py +0 -1157
  137. octavia/controller/worker/v1/flows/__init__.py +0 -11
  138. octavia/controller/worker/v1/flows/amphora_flows.py +0 -610
  139. octavia/controller/worker/v1/flows/health_monitor_flows.py +0 -105
  140. octavia/controller/worker/v1/flows/l7policy_flows.py +0 -94
  141. octavia/controller/worker/v1/flows/l7rule_flows.py +0 -100
  142. octavia/controller/worker/v1/flows/listener_flows.py +0 -128
  143. octavia/controller/worker/v1/flows/load_balancer_flows.py +0 -692
  144. octavia/controller/worker/v1/flows/member_flows.py +0 -230
  145. octavia/controller/worker/v1/flows/pool_flows.py +0 -127
  146. octavia/controller/worker/v1/tasks/__init__.py +0 -11
  147. octavia/controller/worker/v1/tasks/amphora_driver_tasks.py +0 -453
  148. octavia/controller/worker/v1/tasks/cert_task.py +0 -51
  149. octavia/controller/worker/v1/tasks/compute_tasks.py +0 -335
  150. octavia/controller/worker/v1/tasks/database_tasks.py +0 -2756
  151. octavia/controller/worker/v1/tasks/lifecycle_tasks.py +0 -173
  152. octavia/controller/worker/v1/tasks/model_tasks.py +0 -41
  153. octavia/controller/worker/v1/tasks/network_tasks.py +0 -970
  154. octavia/controller/worker/v1/tasks/retry_tasks.py +0 -74
  155. octavia/tests/unit/api/drivers/amphora_driver/v1/__init__.py +0 -11
  156. octavia/tests/unit/api/drivers/amphora_driver/v1/test_driver.py +0 -824
  157. octavia/tests/unit/controller/queue/v1/__init__.py +0 -11
  158. octavia/tests/unit/controller/queue/v1/test_consumer.py +0 -61
  159. octavia/tests/unit/controller/queue/v1/test_endpoints.py +0 -189
  160. octavia/tests/unit/controller/worker/v1/__init__.py +0 -11
  161. octavia/tests/unit/controller/worker/v1/flows/__init__.py +0 -11
  162. octavia/tests/unit/controller/worker/v1/flows/test_amphora_flows.py +0 -474
  163. octavia/tests/unit/controller/worker/v1/flows/test_health_monitor_flows.py +0 -72
  164. octavia/tests/unit/controller/worker/v1/flows/test_l7policy_flows.py +0 -67
  165. octavia/tests/unit/controller/worker/v1/flows/test_l7rule_flows.py +0 -67
  166. octavia/tests/unit/controller/worker/v1/flows/test_listener_flows.py +0 -91
  167. octavia/tests/unit/controller/worker/v1/flows/test_load_balancer_flows.py +0 -431
  168. octavia/tests/unit/controller/worker/v1/flows/test_member_flows.py +0 -106
  169. octavia/tests/unit/controller/worker/v1/flows/test_pool_flows.py +0 -77
  170. octavia/tests/unit/controller/worker/v1/tasks/__init__.py +0 -11
  171. octavia/tests/unit/controller/worker/v1/tasks/test_amphora_driver_tasks.py +0 -792
  172. octavia/tests/unit/controller/worker/v1/tasks/test_cert_task.py +0 -46
  173. octavia/tests/unit/controller/worker/v1/tasks/test_compute_tasks.py +0 -634
  174. octavia/tests/unit/controller/worker/v1/tasks/test_database_tasks.py +0 -2615
  175. octavia/tests/unit/controller/worker/v1/tasks/test_database_tasks_quota.py +0 -415
  176. octavia/tests/unit/controller/worker/v1/tasks/test_lifecycle_tasks.py +0 -401
  177. octavia/tests/unit/controller/worker/v1/tasks/test_model_tasks.py +0 -44
  178. octavia/tests/unit/controller/worker/v1/tasks/test_network_tasks.py +0 -1788
  179. octavia/tests/unit/controller/worker/v1/tasks/test_retry_tasks.py +0 -47
  180. octavia/tests/unit/controller/worker/v1/test_controller_worker.py +0 -2096
  181. octavia-12.0.0.0rc2.dist-info/pbr.json +0 -1
  182. {octavia-12.0.0.0rc2.data → octavia-13.0.0.0rc1.data}/data/share/octavia/LICENSE +0 -0
  183. {octavia-12.0.0.0rc2.data → octavia-13.0.0.0rc1.data}/data/share/octavia/diskimage-create/README.rst +0 -0
  184. {octavia-12.0.0.0rc2.data → octavia-13.0.0.0rc1.data}/data/share/octavia/diskimage-create/diskimage-create.sh +0 -0
  185. {octavia-12.0.0.0rc2.data → octavia-13.0.0.0rc1.data}/data/share/octavia/diskimage-create/image-tests.sh +0 -0
  186. {octavia-12.0.0.0rc2.data → octavia-13.0.0.0rc1.data}/data/share/octavia/diskimage-create/requirements.txt +0 -0
  187. {octavia-12.0.0.0rc2.data → octavia-13.0.0.0rc1.data}/data/share/octavia/diskimage-create/test-requirements.txt +0 -0
  188. {octavia-12.0.0.0rc2.data → octavia-13.0.0.0rc1.data}/data/share/octavia/diskimage-create/tox.ini +0 -0
  189. {octavia-12.0.0.0rc2.data → octavia-13.0.0.0rc1.data}/data/share/octavia/diskimage-create/version.txt +0 -0
  190. {octavia-12.0.0.0rc2.data → octavia-13.0.0.0rc1.data}/scripts/octavia-wsgi +0 -0
  191. {octavia-12.0.0.0rc2.dist-info → octavia-13.0.0.0rc1.dist-info}/LICENSE +0 -0
  192. {octavia-12.0.0.0rc2.dist-info → octavia-13.0.0.0rc1.dist-info}/WHEEL +0 -0
  193. {octavia-12.0.0.0rc2.dist-info → octavia-13.0.0.0rc1.dist-info}/top_level.txt +0 -0
@@ -1,970 +0,0 @@
1
- # Copyright 2015 Hewlett-Packard Development Company, L.P.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License"); you may
4
- # not use this file except in compliance with the License. You may obtain
5
- # a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11
- # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12
- # License for the specific language governing permissions and limitations
13
- # under the License.
14
- #
15
- import time
16
-
17
- from oslo_config import cfg
18
- from oslo_log import log as logging
19
- from oslo_utils import excutils
20
- from taskflow import task
21
- from taskflow.types import failure
22
- import tenacity
23
-
24
- from octavia.common import constants
25
- from octavia.common import utils
26
- from octavia.controller.worker import task_utils
27
- from octavia.db import api as db_apis
28
- from octavia.db import repositories
29
- from octavia.network import base
30
- from octavia.network import data_models as n_data_models
31
-
32
- LOG = logging.getLogger(__name__)
33
- CONF = cfg.CONF
34
-
35
-
36
- class BaseNetworkTask(task.Task):
37
- """Base task to load drivers common to the tasks."""
38
-
39
- def __init__(self, **kwargs):
40
- super().__init__(**kwargs)
41
- self._network_driver = None
42
- self.task_utils = task_utils.TaskUtils()
43
- self.lb_repo = repositories.LoadBalancerRepository()
44
-
45
- @property
46
- def network_driver(self):
47
- if self._network_driver is None:
48
- self._network_driver = utils.get_network_driver()
49
- return self._network_driver
50
-
51
-
52
- class CalculateAmphoraDelta(BaseNetworkTask):
53
-
54
- default_provides = constants.DELTA
55
-
56
- def execute(self, loadbalancer, amphora, availability_zone):
57
- LOG.debug("Calculating network delta for amphora id: %s", amphora.id)
58
-
59
- vip_subnet_to_net_map = {
60
- loadbalancer.vip.subnet_id:
61
- loadbalancer.vip.network_id,
62
- }
63
-
64
- # Figure out what networks we want
65
- # seed with lb network(s)
66
- if (availability_zone and
67
- availability_zone.get(constants.MANAGEMENT_NETWORK)):
68
- management_nets = [
69
- availability_zone.get(constants.MANAGEMENT_NETWORK)]
70
- else:
71
- management_nets = CONF.controller_worker.amp_boot_network_list
72
-
73
- desired_subnet_to_net_map = {}
74
- for mgmt_net_id in management_nets:
75
- for subnet_id in self.network_driver.get_network(
76
- mgmt_net_id).subnets:
77
- desired_subnet_to_net_map[subnet_id] = mgmt_net_id
78
- desired_subnet_to_net_map.update(vip_subnet_to_net_map)
79
-
80
- for pool in loadbalancer.pools:
81
- for member in pool.members:
82
- if (member.subnet_id and
83
- member.provisioning_status !=
84
- constants.PENDING_DELETE):
85
- member_network = self.network_driver.get_subnet(
86
- member.subnet_id).network_id
87
- desired_subnet_to_net_map[member.subnet_id] = (
88
- member_network)
89
-
90
- desired_network_ids = set(desired_subnet_to_net_map.values())
91
- desired_subnet_ids = set(desired_subnet_to_net_map)
92
-
93
- # Calculate Network deltas
94
- nics = self.network_driver.get_plugged_networks(
95
- amphora.compute_id)
96
- # we don't have two nics in the same network
97
- network_to_nic_map = {nic.network_id: nic for nic in nics}
98
-
99
- plugged_network_ids = set(network_to_nic_map)
100
-
101
- del_ids = plugged_network_ids - desired_network_ids
102
- delete_nics = [n_data_models.Interface(
103
- network_id=net_id,
104
- port_id=network_to_nic_map[net_id].port_id)
105
- for net_id in del_ids]
106
-
107
- add_ids = desired_network_ids - plugged_network_ids
108
- add_nics = [n_data_models.Interface(
109
- network_id=add_net_id,
110
- fixed_ips=[
111
- n_data_models.FixedIP(
112
- subnet_id=subnet_id)
113
- for subnet_id, net_id in desired_subnet_to_net_map.items()
114
- if net_id == add_net_id])
115
- for add_net_id in add_ids]
116
-
117
- # Calculate member Subnet deltas
118
- plugged_subnets = {}
119
- for nic in network_to_nic_map.values():
120
- for fixed_ip in nic.fixed_ips or []:
121
- plugged_subnets[fixed_ip.subnet_id] = nic.network_id
122
-
123
- plugged_subnet_ids = set(plugged_subnets)
124
- del_subnet_ids = plugged_subnet_ids - desired_subnet_ids
125
- add_subnet_ids = desired_subnet_ids - plugged_subnet_ids
126
-
127
- def _subnet_updates(subnet_ids, subnets):
128
- updates = []
129
- for s in subnet_ids:
130
- network_id = subnets[s]
131
- nic = network_to_nic_map.get(network_id)
132
- port_id = nic.port_id if nic else None
133
- updates.append({
134
- constants.SUBNET_ID: s,
135
- constants.NETWORK_ID: network_id,
136
- constants.PORT_ID: port_id
137
- })
138
- return updates
139
-
140
- add_subnets = _subnet_updates(add_subnet_ids,
141
- desired_subnet_to_net_map)
142
- del_subnets = _subnet_updates(del_subnet_ids,
143
- plugged_subnets)
144
-
145
- delta = n_data_models.Delta(
146
- amphora_id=amphora.id,
147
- compute_id=amphora.compute_id,
148
- add_nics=add_nics, delete_nics=delete_nics,
149
- add_subnets=add_subnets,
150
- delete_subnets=del_subnets)
151
- return delta
152
-
153
-
154
- class CalculateDelta(BaseNetworkTask):
155
- """Task to calculate the delta between
156
-
157
- the nics on the amphora and the ones
158
- we need. Returns a list for
159
- plumbing them.
160
- """
161
-
162
- default_provides = constants.DELTAS
163
-
164
- def execute(self, loadbalancer, availability_zone):
165
- """Compute which NICs need to be plugged
166
-
167
- for the amphora to become operational.
168
-
169
- :param loadbalancer: the loadbalancer to calculate deltas for all
170
- amphorae
171
- :param availability_zone: availability zone metadata dict
172
-
173
- :returns: dict of octavia.network.data_models.Delta keyed off amphora
174
- id
175
- """
176
-
177
- calculate_amp = CalculateAmphoraDelta()
178
- deltas = {}
179
- for amphora in filter(
180
- lambda amp: amp.status == constants.AMPHORA_ALLOCATED,
181
- loadbalancer.amphorae):
182
-
183
- delta = calculate_amp.execute(loadbalancer, amphora,
184
- availability_zone)
185
- deltas[amphora.id] = delta
186
- return deltas
187
-
188
-
189
- class GetPlumbedNetworks(BaseNetworkTask):
190
- """Task to figure out the NICS on an amphora.
191
-
192
- This will likely move into the amphora driver
193
- :returns: Array of networks
194
- """
195
-
196
- default_provides = constants.NICS
197
-
198
- def execute(self, amphora):
199
- """Get plumbed networks for the amphora."""
200
-
201
- LOG.debug("Getting plumbed networks for amphora id: %s", amphora.id)
202
-
203
- return self.network_driver.get_plugged_networks(amphora.compute_id)
204
-
205
-
206
- class PlugNetworks(BaseNetworkTask):
207
- """Task to plug the networks.
208
-
209
- This uses the delta to add all missing networks/nics
210
- """
211
-
212
- def execute(self, amphora, delta):
213
- """Update the amphora networks for the delta."""
214
-
215
- LOG.debug("Plug or unplug networks for amphora id: %s", amphora.id)
216
-
217
- if not delta:
218
- LOG.debug("No network deltas for amphora id: %s", amphora.id)
219
- return
220
-
221
- # add nics
222
- for nic in delta.add_nics:
223
- self.network_driver.plug_network(amphora.compute_id,
224
- nic.network_id)
225
-
226
- def revert(self, amphora, delta, *args, **kwargs):
227
- """Handle a failed network plug by removing all nics added."""
228
-
229
- LOG.warning("Unable to plug networks for amp id %s", amphora.id)
230
- if not delta:
231
- return
232
-
233
- for nic in delta.add_nics:
234
- try:
235
- self.network_driver.unplug_network(amphora.compute_id,
236
- nic.network_id)
237
- except base.NetworkNotFound:
238
- pass
239
-
240
-
241
- class UnPlugNetworks(BaseNetworkTask):
242
- """Task to unplug the networks
243
-
244
- Loop over all nics and unplug them
245
- based on delta
246
- """
247
-
248
- def execute(self, amphora, delta):
249
- """Unplug the networks."""
250
-
251
- LOG.debug("Unplug network for amphora")
252
- if not delta:
253
- LOG.debug("No network deltas for amphora id: %s", amphora.id)
254
- return
255
-
256
- for nic in delta.delete_nics:
257
- try:
258
- self.network_driver.unplug_network(amphora.compute_id,
259
- nic.network_id)
260
- except base.NetworkNotFound:
261
- LOG.debug("Network %d not found", nic.network_id)
262
- except Exception:
263
- LOG.exception("Unable to unplug network")
264
- # TODO(xgerman) follow up if that makes sense
265
-
266
-
267
- class GetMemberPorts(BaseNetworkTask):
268
-
269
- def execute(self, loadbalancer, amphora):
270
- vip_port = self.network_driver.get_port(loadbalancer.vip.port_id)
271
- member_ports = []
272
- interfaces = self.network_driver.get_plugged_networks(
273
- amphora.compute_id)
274
- for interface in interfaces:
275
- port = self.network_driver.get_port(interface.port_id)
276
- if vip_port.network_id == port.network_id:
277
- continue
278
- port.network = self.network_driver.get_network(port.network_id)
279
- for fixed_ip in port.fixed_ips:
280
- if amphora.lb_network_ip == fixed_ip.ip_address:
281
- break
282
- fixed_ip.subnet = self.network_driver.get_subnet(
283
- fixed_ip.subnet_id)
284
- # Only add the port to the list if the IP wasn't the mgmt IP
285
- else:
286
- member_ports.append(port)
287
- return member_ports
288
-
289
-
290
- class HandleNetworkDelta(BaseNetworkTask):
291
- """Task to plug and unplug networks
292
-
293
- Plug or unplug networks based on delta
294
- """
295
-
296
- def _fill_port_info(self, port):
297
- port.network = self.network_driver.get_network(port.network_id)
298
- for fixed_ip in port.fixed_ips:
299
- fixed_ip.subnet = self.network_driver.get_subnet(
300
- fixed_ip.subnet_id)
301
-
302
- def execute(self, amphora, delta):
303
- """Handle network plugging based off deltas."""
304
- updated_ports = {}
305
- for nic in delta.add_nics:
306
- subnet_id = nic.fixed_ips[0].subnet_id
307
- interface = self.network_driver.plug_network(
308
- amphora.compute_id, nic.network_id)
309
- port = self.network_driver.get_port(interface.port_id)
310
- # nova may plugged undesired subnets (it plugs one of the subnets
311
- # of the network), we can safely unplug the subnets we don't need,
312
- # the desired subnet will be added in the 'ADD_SUBNETS' loop.
313
- extra_subnets = [
314
- fixed_ip.subnet_id
315
- for fixed_ip in port.fixed_ips
316
- if fixed_ip.subnet_id != subnet_id]
317
- for subnet_id in extra_subnets:
318
- port = self.network_driver.unplug_fixed_ip(
319
- port_id=interface.port_id, subnet_id=subnet_id)
320
- self._fill_port_info(port)
321
- updated_ports[port.network_id] = port
322
-
323
- for update in delta.add_subnets:
324
- network_id = update[constants.NETWORK_ID]
325
- # Get already existing port from Deltas or
326
- # newly created port from updated_ports dict
327
- port_id = (update[constants.PORT_ID] or
328
- updated_ports[network_id].id)
329
- subnet_id = update[constants.SUBNET_ID]
330
- # Avoid duplicated subnets
331
- has_subnet = False
332
- if network_id in updated_ports:
333
- has_subnet = any(
334
- fixed_ip.subnet_id == subnet_id
335
- for fixed_ip in updated_ports[network_id].fixed_ips)
336
- if not has_subnet:
337
- port = self.network_driver.plug_fixed_ip(
338
- port_id=port_id, subnet_id=subnet_id)
339
- self._fill_port_info(port)
340
- updated_ports[network_id] = port
341
-
342
- for update in delta.delete_subnets:
343
- network_id = update[constants.NETWORK_ID]
344
- port_id = update[constants.PORT_ID]
345
- subnet_id = update[constants.SUBNET_ID]
346
- port = self.network_driver.unplug_fixed_ip(
347
- port_id=port_id, subnet_id=subnet_id)
348
- self._fill_port_info(port)
349
- # In neutron, when removing an ipv6 subnet (with slaac) from a
350
- # port, it just ignores it.
351
- # https://bugs.launchpad.net/neutron/+bug/1945156
352
- # When it happens, don't add the port to the updated_ports dict
353
- has_subnet = any(
354
- fixed_ip.subnet_id == subnet_id
355
- for fixed_ip in port.fixed_ips)
356
- if not has_subnet:
357
- updated_ports[network_id] = port
358
-
359
- for nic in delta.delete_nics:
360
- network_id = nic.network_id
361
- try:
362
- self.network_driver.unplug_network(
363
- amphora.compute_id, network_id)
364
- except base.NetworkNotFound:
365
- LOG.debug("Network %s not found", network_id)
366
- except Exception:
367
- LOG.exception("Unable to unplug network")
368
-
369
- port_id = nic.port_id
370
- try:
371
- self.network_driver.delete_port(port_id)
372
- except Exception:
373
- LOG.exception("Unable to delete the port")
374
-
375
- updated_ports.pop(network_id, None)
376
- return {amphora.id: list(updated_ports.values())}
377
-
378
- def revert(self, result, amphora, delta, *args, **kwargs):
379
- """Handle a network plug or unplug failures."""
380
-
381
- if isinstance(result, failure.Failure):
382
- return
383
-
384
- if not delta:
385
- return
386
-
387
- LOG.warning("Unable to plug networks for amp id %s",
388
- delta.amphora_id)
389
-
390
- for nic in delta.add_nics:
391
- try:
392
- self.network_driver.unplug_network(delta.compute_id,
393
- nic.network_id)
394
- except Exception:
395
- LOG.exception("Unable to unplug network %s",
396
- nic.network_id)
397
-
398
- port_id = nic.port_id
399
- try:
400
- self.network_driver.delete_port(port_id)
401
- except Exception:
402
- LOG.exception("Unable to delete port %s", port_id)
403
-
404
-
405
- class HandleNetworkDeltas(BaseNetworkTask):
406
- """Task to plug and unplug networks
407
-
408
- Loop through the deltas and plug or unplug
409
- networks based on delta
410
- """
411
-
412
- def execute(self, deltas, loadbalancer):
413
- """Handle network plugging based off deltas."""
414
- amphorae = {amp.id: amp for amp in loadbalancer.amphorae}
415
-
416
- updated_ports = {}
417
- handle_delta = HandleNetworkDelta()
418
-
419
- for amp_id, delta in deltas.items():
420
- ret = handle_delta.execute(amphorae[amp_id], delta)
421
- updated_ports.update(ret)
422
-
423
- return updated_ports
424
-
425
- def revert(self, result, deltas, *args, **kwargs):
426
- """Handle a network plug or unplug failures."""
427
-
428
- if isinstance(result, failure.Failure):
429
- return
430
-
431
- if not deltas:
432
- return
433
-
434
- for amp_id, delta in deltas.items():
435
- LOG.warning("Unable to plug networks for amp id %s",
436
- delta.amphora_id)
437
- if not delta:
438
- return
439
-
440
- for nic in delta.add_nics:
441
- try:
442
- self.network_driver.unplug_network(delta.compute_id,
443
- nic.network_id)
444
- except Exception:
445
- LOG.exception("Unable to unplug network %s",
446
- nic.network_id)
447
-
448
- port_id = nic.port_id
449
- try:
450
- self.network_driver.delete_port(port_id)
451
- except Exception:
452
- LOG.exception("Unable to delete port %s", port_id)
453
-
454
-
455
- class PlugVIP(BaseNetworkTask):
456
- """Task to plumb a VIP."""
457
-
458
- def execute(self, loadbalancer):
459
- """Plumb a vip to an amphora."""
460
-
461
- LOG.debug("Plumbing VIP for loadbalancer id: %s", loadbalancer.id)
462
-
463
- amps_data = self.network_driver.plug_vip(loadbalancer,
464
- loadbalancer.vip)
465
- return amps_data
466
-
467
- def revert(self, result, loadbalancer, *args, **kwargs):
468
- """Handle a failure to plumb a vip."""
469
-
470
- if isinstance(result, failure.Failure):
471
- return
472
- LOG.warning("Unable to plug VIP for loadbalancer id %s",
473
- loadbalancer.id)
474
-
475
- try:
476
- # Make sure we have the current port IDs for cleanup
477
- for amp_data in result:
478
- for amphora in filter(
479
- # pylint: disable=cell-var-from-loop
480
- lambda amp: amp.id == amp_data.id,
481
- loadbalancer.amphorae):
482
- amphora.vrrp_port_id = amp_data.vrrp_port_id
483
- amphora.ha_port_id = amp_data.ha_port_id
484
-
485
- self.network_driver.unplug_vip(loadbalancer, loadbalancer.vip)
486
- except Exception as e:
487
- LOG.error("Failed to unplug VIP. Resources may still "
488
- "be in use from vip: %(vip)s due to error: %(except)s",
489
- {'vip': loadbalancer.vip.ip_address, 'except': str(e)})
490
-
491
-
492
- class UpdateVIPSecurityGroup(BaseNetworkTask):
493
- """Task to setup SG for LB."""
494
-
495
- def execute(self, loadbalancer_id):
496
- """Task to setup SG for LB.
497
-
498
- Task is idempotent and safe to retry.
499
- """
500
-
501
- LOG.debug("Setting up VIP SG for load balancer id: %s",
502
- loadbalancer_id)
503
-
504
- loadbalancer = self.lb_repo.get(db_apis.get_session(),
505
- id=loadbalancer_id)
506
-
507
- sg_id = self.network_driver.update_vip_sg(loadbalancer,
508
- loadbalancer.vip)
509
- LOG.info("Set up VIP SG %s for load balancer %s complete",
510
- sg_id if sg_id else "None", loadbalancer_id)
511
- return sg_id
512
-
513
-
514
- class GetSubnetFromVIP(BaseNetworkTask):
515
- """Task to plumb a VIP."""
516
-
517
- def execute(self, loadbalancer):
518
- """Plumb a vip to an amphora."""
519
-
520
- LOG.debug("Getting subnet for LB: %s", loadbalancer.id)
521
-
522
- subnet = self.network_driver.get_subnet(loadbalancer.vip.subnet_id)
523
- LOG.info("Got subnet %s for load balancer %s",
524
- loadbalancer.vip.subnet_id if subnet else "None",
525
- loadbalancer.id)
526
- return subnet
527
-
528
-
529
- class PlugVIPAmpphora(BaseNetworkTask):
530
- """Task to plumb a VIP."""
531
-
532
- def execute(self, loadbalancer, amphora, subnet):
533
- """Plumb a vip to an amphora."""
534
-
535
- LOG.debug("Plumbing VIP for amphora id: %s", amphora.id)
536
-
537
- amp_data = self.network_driver.plug_aap_port(
538
- loadbalancer, loadbalancer.vip, amphora, subnet)
539
- return amp_data
540
-
541
- def revert(self, result, loadbalancer, amphora, subnet, *args, **kwargs):
542
- """Handle a failure to plumb a vip."""
543
-
544
- if isinstance(result, failure.Failure):
545
- return
546
- LOG.warning("Unable to plug VIP for amphora id %s "
547
- "load balancer id %s",
548
- amphora.id, loadbalancer.id)
549
-
550
- try:
551
- amphora.vrrp_port_id = result.vrrp_port_id
552
- amphora.ha_port_id = result.ha_port_id
553
-
554
- self.network_driver.unplug_aap_port(loadbalancer.vip,
555
- amphora, subnet)
556
- except Exception as e:
557
- LOG.error('Failed to unplug AAP port. Resources may still be in '
558
- 'use for VIP: %s due to error: %s', loadbalancer.vip,
559
- str(e))
560
-
561
-
562
- class UnplugVIP(BaseNetworkTask):
563
- """Task to unplug the vip."""
564
-
565
- def execute(self, loadbalancer):
566
- """Unplug the vip."""
567
-
568
- LOG.debug("Unplug vip on amphora")
569
- try:
570
- self.network_driver.unplug_vip(loadbalancer, loadbalancer.vip)
571
- except Exception:
572
- LOG.exception("Unable to unplug vip from load balancer %s",
573
- loadbalancer.id)
574
-
575
-
576
- class AllocateVIP(BaseNetworkTask):
577
- """Task to allocate a VIP."""
578
-
579
- def execute(self, loadbalancer):
580
- """Allocate a vip to the loadbalancer."""
581
-
582
- LOG.debug("Allocating vip port id %s, subnet id %s, ip address %s for "
583
- "load balancer %s",
584
- loadbalancer.vip.port_id,
585
- loadbalancer.vip.subnet_id,
586
- loadbalancer.vip.ip_address,
587
- loadbalancer.id)
588
- # allocated_vips returns (vip, add_vips), skipping the 2nd element as
589
- # amphorav1 doesn't support add_vips
590
- vip = self.network_driver.allocate_vip(loadbalancer)[0]
591
- LOG.info("Allocated vip with port id %s, subnet id %s, ip address %s "
592
- "for load balancer %s",
593
- loadbalancer.vip.port_id,
594
- loadbalancer.vip.subnet_id,
595
- loadbalancer.vip.ip_address,
596
- loadbalancer.id)
597
- return vip
598
-
599
- def revert(self, result, loadbalancer, *args, **kwargs):
600
- """Handle a failure to allocate vip."""
601
-
602
- if isinstance(result, failure.Failure):
603
- LOG.exception("Unable to allocate VIP")
604
- return
605
- vip = result
606
- LOG.warning("Deallocating vip %s", vip.ip_address)
607
- try:
608
- self.network_driver.deallocate_vip(vip)
609
- except Exception as e:
610
- LOG.error("Failed to deallocate VIP. Resources may still "
611
- "be in use from vip: %(vip)s due to error: %(except)s",
612
- {'vip': vip.ip_address, 'except': str(e)})
613
-
614
-
615
- class AllocateVIPforFailover(AllocateVIP):
616
- """Task to allocate/validate the VIP for a failover flow."""
617
-
618
- def revert(self, result, loadbalancer, *args, **kwargs):
619
- """Handle a failure to allocate vip."""
620
-
621
- if isinstance(result, failure.Failure):
622
- LOG.exception("Unable to allocate VIP")
623
- return
624
- vip = result
625
- LOG.info("Failover revert is not deallocating vip %s because this is "
626
- "a failover.", vip.ip_address)
627
-
628
-
629
- class DeallocateVIP(BaseNetworkTask):
630
- """Task to deallocate a VIP."""
631
-
632
- def execute(self, loadbalancer):
633
- """Deallocate a VIP."""
634
-
635
- LOG.debug("Deallocating a VIP %s", loadbalancer.vip.ip_address)
636
-
637
- # NOTE(blogan): this is kind of ugly but sufficient for now. Drivers
638
- # will need access to the load balancer that the vip is/was attached
639
- # to. However the data model serialization for the vip does not give a
640
- # backref to the loadbalancer if accessed through the loadbalancer.
641
- vip = loadbalancer.vip
642
- vip.load_balancer = loadbalancer
643
- self.network_driver.deallocate_vip(vip)
644
-
645
-
646
- class UpdateVIP(BaseNetworkTask):
647
- """Task to update a VIP."""
648
-
649
- def execute(self, loadbalancer):
650
- LOG.debug("Updating VIP of load_balancer %s.", loadbalancer.id)
651
-
652
- self.network_driver.update_vip(loadbalancer)
653
-
654
-
655
- class UpdateVIPForDelete(BaseNetworkTask):
656
- """Task to update a VIP for listener delete flows."""
657
-
658
- def execute(self, loadbalancer):
659
- LOG.debug("Updating VIP for listener delete on load_balancer %s.",
660
- loadbalancer.id)
661
-
662
- self.network_driver.update_vip(loadbalancer, for_delete=True)
663
-
664
-
665
- class GetAmphoraNetworkConfigs(BaseNetworkTask):
666
- """Task to retrieve amphora network details."""
667
-
668
- def execute(self, loadbalancer, amphora=None):
669
- LOG.debug("Retrieving vip network details.")
670
- return self.network_driver.get_network_configs(loadbalancer,
671
- amphora=amphora)
672
-
673
-
674
- class GetAmphoraNetworkConfigsByID(BaseNetworkTask):
675
- """Task to retrieve amphora network details."""
676
-
677
- def execute(self, loadbalancer_id, amphora_id=None):
678
- LOG.debug("Retrieving vip network details.")
679
- amp_repo = repositories.AmphoraRepository()
680
- loadbalancer = self.lb_repo.get(db_apis.get_session(),
681
- id=loadbalancer_id)
682
- amphora = amp_repo.get(db_apis.get_session(), id=amphora_id)
683
- return self.network_driver.get_network_configs(loadbalancer,
684
- amphora=amphora)
685
-
686
-
687
- class GetAmphoraeNetworkConfigs(BaseNetworkTask):
688
- """Task to retrieve amphorae network details."""
689
-
690
- def execute(self, loadbalancer_id):
691
- LOG.debug("Retrieving vip network details.")
692
- loadbalancer = self.lb_repo.get(db_apis.get_session(),
693
- id=loadbalancer_id)
694
- return self.network_driver.get_network_configs(loadbalancer)
695
-
696
-
697
- class FailoverPreparationForAmphora(BaseNetworkTask):
698
- """Task to prepare an amphora for failover."""
699
-
700
- def execute(self, amphora):
701
- LOG.debug("Prepare amphora %s for failover.", amphora.id)
702
-
703
- self.network_driver.failover_preparation(amphora)
704
-
705
-
706
- class RetrievePortIDsOnAmphoraExceptLBNetwork(BaseNetworkTask):
707
- """Task retrieving all the port ids on an amphora, except lb network."""
708
-
709
- def execute(self, amphora):
710
- LOG.debug("Retrieve all but the lb network port id on amphora %s.",
711
- amphora.id)
712
-
713
- interfaces = self.network_driver.get_plugged_networks(
714
- compute_id=amphora.compute_id)
715
-
716
- ports = []
717
- for interface_ in interfaces:
718
- if interface_.port_id not in ports:
719
- port = self.network_driver.get_port(port_id=interface_.port_id)
720
- ips = port.fixed_ips
721
- lb_network = False
722
- for ip in ips:
723
- if ip.ip_address == amphora.lb_network_ip:
724
- lb_network = True
725
- if not lb_network:
726
- ports.append(port)
727
-
728
- return ports
729
-
730
-
731
- class PlugPorts(BaseNetworkTask):
732
- """Task to plug neutron ports into a compute instance."""
733
-
734
- def execute(self, amphora, ports):
735
- for port in ports:
736
- LOG.debug('Plugging port ID: %(port_id)s into compute instance: '
737
- '%(compute_id)s.',
738
- {'port_id': port.id, 'compute_id': amphora.compute_id})
739
- self.network_driver.plug_port(amphora, port)
740
-
741
-
742
- class ApplyQos(BaseNetworkTask):
743
- """Apply Quality of Services to the VIP"""
744
-
745
- def _apply_qos_on_vrrp_ports(self, loadbalancer, amps_data, qos_policy_id,
746
- is_revert=False, request_qos_id=None):
747
- """Call network driver to apply QoS Policy on the vrrp ports."""
748
- if not amps_data:
749
- amps_data = loadbalancer.amphorae
750
-
751
- amps_data = [amp
752
- for amp in amps_data
753
- if amp.status == constants.AMPHORA_ALLOCATED]
754
-
755
- apply_qos = ApplyQosAmphora()
756
- for amp_data in amps_data:
757
- apply_qos._apply_qos_on_vrrp_port(loadbalancer, amp_data,
758
- qos_policy_id)
759
-
760
- def execute(self, loadbalancer, amps_data=None, update_dict=None):
761
- """Apply qos policy on the vrrp ports which are related with vip."""
762
- qos_policy_id = loadbalancer.vip.qos_policy_id
763
- if not qos_policy_id and (
764
- not update_dict or (
765
- 'vip' not in update_dict or
766
- 'qos_policy_id' not in update_dict['vip'])):
767
- return
768
- self._apply_qos_on_vrrp_ports(loadbalancer, amps_data, qos_policy_id)
769
-
770
- def revert(self, result, loadbalancer, amps_data=None, update_dict=None,
771
- *args, **kwargs):
772
- """Handle a failure to apply QoS to VIP"""
773
- request_qos_id = loadbalancer.vip.qos_policy_id
774
- orig_lb = self.task_utils.get_current_loadbalancer_from_db(
775
- loadbalancer.id)
776
- orig_qos_id = orig_lb.vip.qos_policy_id
777
- if request_qos_id != orig_qos_id:
778
- self._apply_qos_on_vrrp_ports(loadbalancer, amps_data, orig_qos_id,
779
- is_revert=True,
780
- request_qos_id=request_qos_id)
781
-
782
-
783
- class ApplyQosAmphora(BaseNetworkTask):
784
- """Apply Quality of Services to the VIP"""
785
-
786
- def _apply_qos_on_vrrp_port(self, loadbalancer, amp_data, qos_policy_id,
787
- is_revert=False, request_qos_id=None):
788
- """Call network driver to apply QoS Policy on the vrrp ports."""
789
- try:
790
- self.network_driver.apply_qos_on_port(qos_policy_id,
791
- amp_data.vrrp_port_id)
792
- except Exception:
793
- if not is_revert:
794
- raise
795
- LOG.warning('Failed to undo qos policy %(qos_id)s '
796
- 'on vrrp port: %(port)s from '
797
- 'amphorae: %(amp)s',
798
- {'qos_id': request_qos_id,
799
- 'port': amp_data.vrrp_port_id,
800
- 'amp': [amp.id for amp in amp_data]})
801
-
802
- def execute(self, loadbalancer, amp_data=None, update_dict=None):
803
- """Apply qos policy on the vrrp ports which are related with vip."""
804
- qos_policy_id = loadbalancer.vip.qos_policy_id
805
- if not qos_policy_id and (
806
- update_dict and (
807
- 'vip' not in update_dict or
808
- 'qos_policy_id' not in update_dict['vip'])):
809
- return
810
- self._apply_qos_on_vrrp_port(loadbalancer, amp_data, qos_policy_id)
811
-
812
- def revert(self, result, loadbalancer, amp_data=None, update_dict=None,
813
- *args, **kwargs):
814
- """Handle a failure to apply QoS to VIP"""
815
- try:
816
- request_qos_id = loadbalancer.vip.qos_policy_id
817
- orig_lb = self.task_utils.get_current_loadbalancer_from_db(
818
- loadbalancer.id)
819
- orig_qos_id = orig_lb.vip.qos_policy_id
820
- if request_qos_id != orig_qos_id:
821
- self._apply_qos_on_vrrp_port(loadbalancer, amp_data,
822
- orig_qos_id, is_revert=True,
823
- request_qos_id=request_qos_id)
824
- except Exception as e:
825
- LOG.error('Failed to remove QoS policy: %s from port: %s due '
826
- 'to error: %s', orig_qos_id, amp_data.vrrp_port_id,
827
- str(e))
828
-
829
-
830
- class DeletePort(BaseNetworkTask):
831
- """Task to delete a network port."""
832
-
833
- @tenacity.retry(retry=tenacity.retry_if_exception_type(),
834
- stop=tenacity.stop_after_attempt(
835
- CONF.networking.max_retries),
836
- wait=tenacity.wait_exponential(
837
- multiplier=CONF.networking.retry_backoff,
838
- min=CONF.networking.retry_interval,
839
- max=CONF.networking.retry_max), reraise=True)
840
- def execute(self, port_id, passive_failure=False):
841
- """Delete the network port."""
842
- if port_id is None:
843
- return
844
- if self.execute.retry.statistics.get(constants.ATTEMPT_NUMBER, 1) == 1:
845
- LOG.debug("Deleting network port %s", port_id)
846
- else:
847
- LOG.warning('Retrying network port %s delete attempt %s of %s.',
848
- port_id,
849
- self.execute.retry.statistics[
850
- constants.ATTEMPT_NUMBER],
851
- self.execute.retry.stop.max_attempt_number)
852
- # Let the Taskflow engine know we are working and alive
853
- # Don't use get with a default for 'attempt_number', we need to fail
854
- # if that number is missing.
855
- self.update_progress(
856
- self.execute.retry.statistics[constants.ATTEMPT_NUMBER] /
857
- self.execute.retry.stop.max_attempt_number)
858
- try:
859
- self.network_driver.delete_port(port_id)
860
- except Exception:
861
- if (self.execute.retry.statistics[constants.ATTEMPT_NUMBER] !=
862
- self.execute.retry.stop.max_attempt_number):
863
- LOG.warning('Network port delete for port id: %s failed. '
864
- 'Retrying.', port_id)
865
- raise
866
- if passive_failure:
867
- LOG.exception('Network port delete for port ID: %s failed. '
868
- 'This resource will be abandoned and should '
869
- 'manually be cleaned up once the '
870
- 'network service is functional.', port_id)
871
- # Let's at least attempt to disable it so if the instance
872
- # comes back from the dead it doesn't conflict with anything.
873
- try:
874
- self.network_driver.admin_down_port(port_id)
875
- LOG.info('Successfully disabled (admin down) network port '
876
- '%s that failed to delete.', port_id)
877
- except Exception:
878
- LOG.warning('Attempt to disable (admin down) network port '
879
- '%s failed. The network service has failed. '
880
- 'Continuing.', port_id)
881
- else:
882
- LOG.exception('Network port delete for port ID: %s failed. '
883
- 'The network service has failed. '
884
- 'Aborting and reverting.', port_id)
885
- raise
886
-
887
-
888
- class CreateVIPBasePort(BaseNetworkTask):
889
- """Task to create the VIP base port for an amphora."""
890
-
891
- @tenacity.retry(retry=tenacity.retry_if_exception_type(),
892
- stop=tenacity.stop_after_attempt(
893
- CONF.networking.max_retries),
894
- wait=tenacity.wait_exponential(
895
- multiplier=CONF.networking.retry_backoff,
896
- min=CONF.networking.retry_interval,
897
- max=CONF.networking.retry_max), reraise=True)
898
- def execute(self, vip, vip_sg_id, amphora_id):
899
- port_name = constants.AMP_BASE_PORT_PREFIX + amphora_id
900
- fixed_ips = [{constants.SUBNET_ID: vip.subnet_id}]
901
- sg_id = []
902
- if vip_sg_id:
903
- sg_id = [vip_sg_id]
904
- port = self.network_driver.create_port(
905
- vip.network_id, name=port_name, fixed_ips=fixed_ips,
906
- secondary_ips=[vip.ip_address], security_group_ids=sg_id,
907
- qos_policy_id=vip.qos_policy_id)
908
- LOG.info('Created port %s with ID %s for amphora %s',
909
- port_name, port.id, amphora_id)
910
- return port
911
-
912
- def revert(self, result, vip, vip_sg_id, amphora_id, *args, **kwargs):
913
- if isinstance(result, failure.Failure):
914
- return
915
- try:
916
- port_name = constants.AMP_BASE_PORT_PREFIX + amphora_id
917
- for port in result:
918
- self.network_driver.delete_port(port.id)
919
- LOG.info('Deleted port %s with ID %s for amphora %s due to a '
920
- 'revert.', port_name, port.id, amphora_id)
921
- except Exception as e:
922
- LOG.error('Failed to delete port %s. Resources may still be in '
923
- 'use for a port intended for amphora %s due to error '
924
- '%s. Search for a port named %s',
925
- result, amphora_id, str(e), port_name)
926
-
927
-
928
- class AdminDownPort(BaseNetworkTask):
929
-
930
- def execute(self, port_id):
931
- try:
932
- self.network_driver.set_port_admin_state_up(port_id, False)
933
- except base.PortNotFound:
934
- return
935
- for i in range(CONF.networking.max_retries):
936
- port = self.network_driver.get_port(port_id)
937
- if port.status == constants.DOWN:
938
- LOG.debug('Disabled port: %s', port_id)
939
- return
940
- LOG.debug('Port %s is %s instead of DOWN, waiting.',
941
- port_id, port.status)
942
- time.sleep(CONF.networking.retry_interval)
943
- LOG.error('Port %s failed to go DOWN. Port status is still %s. '
944
- 'Ignoring and continuing.', port_id, port.status)
945
-
946
- def revert(self, result, port_id, *args, **kwargs):
947
- if isinstance(result, failure.Failure):
948
- return
949
- try:
950
- self.network_driver.set_port_admin_state_up(port_id, True)
951
- except Exception as e:
952
- LOG.error('Failed to bring port %s admin up on revert due to: %s.',
953
- port_id, str(e))
954
-
955
-
956
- class GetVIPSecurityGroupID(BaseNetworkTask):
957
-
958
- def execute(self, loadbalancer_id):
959
- sg_name = utils.get_vip_security_group_name(loadbalancer_id)
960
- try:
961
- security_group = self.network_driver.get_security_group(sg_name)
962
- if security_group:
963
- return security_group.id
964
- except base.SecurityGroupNotFound:
965
- with excutils.save_and_reraise_exception() as ctxt:
966
- if self.network_driver.sec_grp_enabled:
967
- LOG.error('VIP security group %s was not found.', sg_name)
968
- else:
969
- ctxt.reraise = False
970
- return None