nautobot 2.4.5__py3-none-any.whl → 2.4.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- nautobot/core/api/mixins.py +10 -0
- nautobot/core/celery/encoders.py +2 -2
- nautobot/core/forms/fields.py +21 -5
- nautobot/core/forms/utils.py +1 -0
- nautobot/core/jobs/bulk_actions.py +1 -1
- nautobot/core/management/commands/generate_test_data.py +1 -1
- nautobot/core/models/name_color_content_types.py +9 -0
- nautobot/core/models/validators.py +7 -0
- nautobot/core/settings.py +0 -14
- nautobot/core/settings.yaml +0 -28
- nautobot/core/tables.py +6 -1
- nautobot/core/templates/generic/object_retrieve.html +1 -1
- nautobot/core/testing/api.py +18 -0
- nautobot/core/tests/nautobot_config.py +0 -2
- nautobot/core/tests/runner.py +17 -140
- nautobot/core/tests/test_api.py +4 -4
- nautobot/core/tests/test_authentication.py +83 -4
- nautobot/core/tests/test_forms.py +11 -8
- nautobot/core/tests/test_graphql.py +9 -0
- nautobot/core/tests/test_jobs.py +7 -0
- nautobot/core/ui/object_detail.py +31 -0
- nautobot/dcim/factory.py +2 -0
- nautobot/dcim/filters/__init__.py +5 -0
- nautobot/dcim/forms.py +17 -1
- nautobot/dcim/migrations/0068_alter_softwareimagefile_download_url.py +19 -0
- nautobot/dcim/migrations/0069_softwareimagefile_external_integration.py +25 -0
- nautobot/dcim/models/devices.py +9 -2
- nautobot/dcim/tables/devices.py +1 -0
- nautobot/dcim/templates/dcim/softwareimagefile_retrieve.html +4 -0
- nautobot/dcim/tests/test_api.py +74 -31
- nautobot/dcim/tests/test_filters.py +2 -0
- nautobot/dcim/tests/test_models.py +65 -0
- nautobot/dcim/tests/test_views.py +3 -0
- nautobot/extras/forms/forms.py +7 -3
- nautobot/extras/plugins/marketplace_manifest.yml +18 -0
- nautobot/extras/tables.py +4 -5
- nautobot/extras/templates/extras/inc/panel_changelog.html +1 -1
- nautobot/extras/templates/extras/inc/panel_jobhistory.html +1 -1
- nautobot/extras/templates/extras/status.html +1 -37
- nautobot/extras/tests/integration/test_notes.py +1 -1
- nautobot/extras/tests/test_api.py +22 -7
- nautobot/extras/tests/test_changelog.py +4 -4
- nautobot/extras/tests/test_customfields.py +3 -0
- nautobot/extras/tests/test_plugins.py +19 -13
- nautobot/extras/tests/test_relationships.py +9 -0
- nautobot/extras/tests/test_tags.py +2 -2
- nautobot/extras/tests/test_views.py +15 -6
- nautobot/extras/urls.py +1 -30
- nautobot/extras/views.py +10 -54
- nautobot/ipam/tables.py +6 -2
- nautobot/ipam/templates/ipam/namespace_retrieve.html +0 -41
- nautobot/ipam/templates/ipam/service.html +2 -46
- nautobot/ipam/templates/ipam/service_edit.html +1 -17
- nautobot/ipam/templates/ipam/service_retrieve.html +7 -0
- nautobot/ipam/tests/migration/__init__.py +0 -0
- nautobot/ipam/tests/migration/test_migrations.py +510 -0
- nautobot/ipam/tests/test_api.py +66 -36
- nautobot/ipam/tests/test_filters.py +0 -10
- nautobot/ipam/tests/test_views.py +44 -2
- nautobot/ipam/urls.py +2 -47
- nautobot/ipam/utils/migrations.py +185 -152
- nautobot/ipam/utils/testing.py +177 -0
- nautobot/ipam/views.py +95 -157
- nautobot/project-static/docs/code-reference/nautobot/apps/models.html +47 -0
- nautobot/project-static/docs/code-reference/nautobot/apps/tables.html +18 -0
- nautobot/project-static/docs/code-reference/nautobot/apps/ui.html +63 -0
- nautobot/project-static/docs/development/apps/api/testing.html +0 -87
- nautobot/project-static/docs/development/apps/migration/dependency-updates.html +1 -1
- nautobot/project-static/docs/development/core/best-practices.html +3 -3
- nautobot/project-static/docs/development/core/getting-started.html +78 -107
- nautobot/project-static/docs/development/core/release-checklist.html +1 -1
- nautobot/project-static/docs/development/core/style-guide.html +1 -1
- nautobot/project-static/docs/development/core/testing.html +24 -198
- nautobot/project-static/docs/media/user-guide/administration/getting-started/nautobot-cloud.png +0 -0
- nautobot/project-static/docs/objects.inv +0 -0
- nautobot/project-static/docs/overview/application_stack.html +1 -1
- nautobot/project-static/docs/release-notes/version-2.4.html +226 -1
- nautobot/project-static/docs/search/search_index.json +1 -1
- nautobot/project-static/docs/sitemap.xml +290 -290
- nautobot/project-static/docs/sitemap.xml.gz +0 -0
- nautobot/project-static/docs/user-guide/administration/configuration/settings.html +2 -48
- nautobot/project-static/docs/user-guide/administration/guides/permissions.html +71 -0
- nautobot/project-static/docs/user-guide/administration/installation/http-server.html +3 -1
- nautobot/project-static/docs/user-guide/administration/installation/index.html +257 -16
- nautobot/project-static/docs/user-guide/administration/tools/nautobot-server.html +1 -1
- nautobot/project-static/docs/user-guide/administration/upgrading/upgrading.html +2 -2
- nautobot/project-static/docs/user-guide/core-data-model/dcim/softwareimagefile.html +4 -0
- nautobot/project-static/docs/user-guide/feature-guides/contacts-and-teams.html +11 -11
- nautobot/project-static/docs/user-guide/feature-guides/getting-started/creating-devices.html +8 -8
- nautobot/project-static/docs/user-guide/feature-guides/getting-started/creating-location-types-and-locations.html +1 -0
- nautobot/project-static/docs/user-guide/feature-guides/getting-started/interfaces.html +40 -25
- nautobot/project-static/docs/user-guide/feature-guides/getting-started/ipam.html +4 -4
- nautobot/project-static/docs/user-guide/feature-guides/getting-started/platforms.html +1 -1
- nautobot/project-static/docs/user-guide/feature-guides/getting-started/search-bar.html +77 -5
- nautobot/project-static/docs/user-guide/feature-guides/getting-started/tenants.html +1 -1
- nautobot/project-static/docs/user-guide/feature-guides/getting-started/vlans-and-vlan-groups.html +0 -1
- nautobot/project-static/docs/user-guide/feature-guides/git-data-source.html +1 -1
- nautobot/project-static/docs/user-guide/index.html +89 -2
- nautobot/project-static/docs/user-guide/platform-functionality/webhook.html +207 -122
- nautobot/virtualization/forms.py +20 -0
- nautobot/virtualization/templates/virtualization/clustergroup.html +1 -39
- nautobot/virtualization/templates/virtualization/clustertype.html +1 -0
- nautobot/virtualization/tests/test_api.py +14 -3
- nautobot/virtualization/tests/test_views.py +10 -2
- nautobot/virtualization/urls.py +10 -93
- nautobot/virtualization/views.py +33 -72
- {nautobot-2.4.5.dist-info → nautobot-2.4.6.dist-info}/METADATA +6 -5
- {nautobot-2.4.5.dist-info → nautobot-2.4.6.dist-info}/RECORD +113 -108
- {nautobot-2.4.5.dist-info → nautobot-2.4.6.dist-info}/WHEEL +1 -1
- nautobot/core/tests/performance_baselines.yml +0 -8900
- nautobot/ipam/tests/test_migrations.py +0 -462
- /nautobot/ipam/templates/ipam/{namespace_ipaddresses.html → namespace_ip_addresses.html} +0 -0
- {nautobot-2.4.5.dist-info → nautobot-2.4.6.dist-info}/LICENSE.txt +0 -0
- {nautobot-2.4.5.dist-info → nautobot-2.4.6.dist-info}/NOTICE +0 -0
- {nautobot-2.4.5.dist-info → nautobot-2.4.6.dist-info}/entry_points.txt +0 -0
|
@@ -1,46 +1,84 @@
|
|
|
1
1
|
import collections
|
|
2
2
|
import sys
|
|
3
|
+
from time import monotonic
|
|
3
4
|
|
|
4
5
|
from django.core.exceptions import ValidationError
|
|
5
6
|
from django.db import models
|
|
6
7
|
import netaddr
|
|
7
8
|
|
|
9
|
+
from nautobot.ipam.constants import IPV4_BYTE_LENGTH, IPV6_BYTE_LENGTH
|
|
10
|
+
|
|
8
11
|
BASE_NAME = "Cleanup Namespace"
|
|
9
12
|
DESCRIPTION = "Created by Nautobot 2.0 IPAM data migrations."
|
|
10
13
|
GLOBAL_NS = "Global"
|
|
11
14
|
|
|
12
15
|
|
|
16
|
+
class TimerContextManager:
|
|
17
|
+
def __init__(self, message, indent=""):
|
|
18
|
+
self.message = message
|
|
19
|
+
self.indent = indent
|
|
20
|
+
|
|
21
|
+
def __enter__(self):
|
|
22
|
+
self.start_time = monotonic()
|
|
23
|
+
print(f"{self.indent}>>> {self.message}...")
|
|
24
|
+
return self
|
|
25
|
+
|
|
26
|
+
def __exit__(self, *args, **kwargs):
|
|
27
|
+
self.elapsed_time = monotonic() - self.start_time
|
|
28
|
+
print(f"{self.indent} ... completed (elapsed time: {self.elapsed_time:.1f} seconds)")
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def is_prefix(obj):
|
|
32
|
+
return obj.__class__.__name__ == "Prefix"
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def is_ipaddress(obj):
|
|
36
|
+
return obj.__class__.__name__ == "IPAddress"
|
|
37
|
+
|
|
38
|
+
|
|
13
39
|
def process_namespaces(apps, schema_editor):
|
|
40
|
+
"""
|
|
41
|
+
Migration entry point for 1.x to 2.x IPAM data migration.
|
|
42
|
+
"""
|
|
14
43
|
print("\n", end="")
|
|
15
44
|
|
|
16
|
-
|
|
17
|
-
|
|
45
|
+
with TimerContextManager("Checking whether any Interface or VMInterface has IPs with differing VRFs"):
|
|
46
|
+
check_interface_vrfs(apps)
|
|
18
47
|
|
|
19
48
|
# Prefix Broadcast is a derived field, so we should update it before we start
|
|
20
|
-
|
|
49
|
+
with TimerContextManager("Verifying all Prefix.broadcast values"):
|
|
50
|
+
ensure_correct_prefix_broadcast(apps)
|
|
21
51
|
|
|
22
52
|
# Cleanup Prefixes and IPAddresses version fields
|
|
23
|
-
|
|
53
|
+
with TimerContextManager("Setting Prefix.version and IPAddress.version values"):
|
|
54
|
+
add_prefix_and_ip_address_version(apps)
|
|
24
55
|
|
|
25
56
|
# VRFs
|
|
26
|
-
|
|
57
|
+
with TimerContextManager("Processing VRFs"):
|
|
58
|
+
process_vrfs(apps)
|
|
27
59
|
|
|
28
60
|
# IPAddresses
|
|
29
|
-
|
|
61
|
+
with TimerContextManager("Processing IPAddresses"):
|
|
62
|
+
process_ip_addresses(apps)
|
|
30
63
|
|
|
31
64
|
# Prefixes
|
|
32
|
-
|
|
33
|
-
|
|
65
|
+
with TimerContextManager("Processing duplicate Prefixes"):
|
|
66
|
+
process_prefix_duplicates(apps)
|
|
67
|
+
with TimerContextManager("Reparenting Prefixes"):
|
|
68
|
+
reparent_prefixes(apps)
|
|
34
69
|
|
|
35
70
|
# Make another pass across all VRFs to duplicate it if it has prefixes
|
|
36
71
|
# in another namespace (non-unique VRFs with duplicate Prefixes)
|
|
37
|
-
|
|
72
|
+
with TimerContextManager("Copying VRFs to cleanup Namespaces as needed"):
|
|
73
|
+
copy_vrfs_to_cleanup_namespaces(apps)
|
|
38
74
|
|
|
39
75
|
# [VM]Interfaces
|
|
40
|
-
|
|
76
|
+
with TimerContextManager("Processing Interfaces and VM Interfaces"):
|
|
77
|
+
process_interfaces(apps)
|
|
41
78
|
|
|
42
79
|
# VRF-Prefix M2M
|
|
43
|
-
|
|
80
|
+
with TimerContextManager("Processing VRF to Prefix many-to-many"):
|
|
81
|
+
process_vrfs_prefixes_m2m(apps)
|
|
44
82
|
|
|
45
83
|
|
|
46
84
|
def check_interface_vrfs(apps):
|
|
@@ -105,36 +143,37 @@ def process_vrfs(apps):
|
|
|
105
143
|
Returns:
|
|
106
144
|
None
|
|
107
145
|
"""
|
|
146
|
+
Namespace = apps.get_model("ipam", "Namespace")
|
|
108
147
|
VRF = apps.get_model("ipam", "VRF")
|
|
148
|
+
|
|
149
|
+
global_ns = Namespace.objects.get(name=GLOBAL_NS)
|
|
109
150
|
vrfs = VRF.objects.all().order_by("name", "rd")
|
|
110
151
|
unique_non_empty_vrfs = vrfs.filter(enforce_unique=True).exclude(ip_addresses__isnull=True, prefixes__isnull=True)
|
|
111
152
|
# At the point in the migration where we iterate through vrfs in global_ns_vrfs, every vrf that
|
|
112
153
|
# has already been processed has been moved to a new namespace. Anything left in the global
|
|
113
154
|
# namespace has yet to be processed which is why we're iterating through this on the second
|
|
114
155
|
# loop.
|
|
115
|
-
global_ns_vrfs = vrfs.filter(
|
|
156
|
+
global_ns_vrfs = vrfs.filter(namespace=global_ns)
|
|
116
157
|
|
|
117
158
|
# Case 0: VRFs with enforce_unique move to their own Namespace.
|
|
118
|
-
for vrf in unique_non_empty_vrfs:
|
|
159
|
+
for vrf in unique_non_empty_vrfs.iterator():
|
|
119
160
|
if "test" not in sys.argv:
|
|
120
161
|
print(f">>> Processing migration for VRF {vrf.name!r}, Namespace {vrf.namespace.name!r}")
|
|
121
162
|
vrf.namespace = create_vrf_namespace(apps, vrf)
|
|
122
163
|
vrf.save()
|
|
123
164
|
vrf.prefixes.update(namespace=vrf.namespace)
|
|
124
|
-
|
|
125
|
-
print(f" VRF {vrf.name!r} migrated to Namespace {vrf.namespace.name!r}")
|
|
165
|
+
print(f" VRF {vrf.name!r} migrated to Namespace {vrf.namespace.name!r}")
|
|
126
166
|
|
|
127
167
|
# Case 00: VRFs with duplicate names or prefixes move to a Cleanup Namespace.
|
|
128
168
|
# Case 1 is not included here because it is a no-op.
|
|
129
|
-
for vrf in global_ns_vrfs.annotate(prefix_count=models.Count("prefixes")).order_by("-prefix_count"):
|
|
169
|
+
for vrf in global_ns_vrfs.annotate(prefix_count=models.Count("prefixes")).order_by("-prefix_count").iterator():
|
|
130
170
|
if "test" not in sys.argv:
|
|
131
|
-
print(f">>> Processing migration for VRF {vrf.name!r}, Namespace {
|
|
132
|
-
|
|
133
|
-
vrf.namespace
|
|
134
|
-
|
|
135
|
-
if vrf.namespace != original_namespace:
|
|
171
|
+
print(f">>> Processing migration for VRF {vrf.name!r}, Namespace {global_ns.name!r}")
|
|
172
|
+
vrf.namespace = get_next_vrf_cleanup_namespace(apps, vrf, global_ns=global_ns)
|
|
173
|
+
if vrf.namespace != global_ns:
|
|
174
|
+
vrf.save()
|
|
136
175
|
vrf.prefixes.update(namespace=vrf.namespace)
|
|
137
|
-
print(f" VRF {vrf.name!r} migrated from Namespace {
|
|
176
|
+
print(f" VRF {vrf.name!r} migrated from Namespace {global_ns.name!r} to {vrf.namespace.name!r}")
|
|
138
177
|
|
|
139
178
|
|
|
140
179
|
def add_prefix_and_ip_address_version(apps):
|
|
@@ -152,17 +191,21 @@ def add_prefix_and_ip_address_version(apps):
|
|
|
152
191
|
|
|
153
192
|
if "test" not in sys.argv:
|
|
154
193
|
print(">>> Populating Prefix.ip_version field")
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
194
|
+
Prefix.objects.annotate(address_len=models.functions.Length(models.F("network"))).filter(
|
|
195
|
+
address_len=IPV4_BYTE_LENGTH
|
|
196
|
+
).update(ip_version=4)
|
|
197
|
+
Prefix.objects.annotate(address_len=models.functions.Length(models.F("network"))).filter(
|
|
198
|
+
address_len=IPV6_BYTE_LENGTH
|
|
199
|
+
).update(ip_version=6)
|
|
159
200
|
|
|
160
201
|
if "test" not in sys.argv:
|
|
161
202
|
print(">>> Populating IPAddress.ip_version field")
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
203
|
+
IPAddress.objects.annotate(address_len=models.functions.Length(models.F("host"))).filter(
|
|
204
|
+
address_len=IPV4_BYTE_LENGTH
|
|
205
|
+
).update(ip_version=4)
|
|
206
|
+
IPAddress.objects.annotate(address_len=models.functions.Length(models.F("host"))).filter(
|
|
207
|
+
address_len=IPV6_BYTE_LENGTH
|
|
208
|
+
).update(ip_version=6)
|
|
166
209
|
|
|
167
210
|
|
|
168
211
|
def process_ip_addresses(apps):
|
|
@@ -186,53 +229,52 @@ def process_ip_addresses(apps):
|
|
|
186
229
|
Namespace = apps.get_model("ipam", "Namespace")
|
|
187
230
|
Prefix = apps.get_model("ipam", "Prefix")
|
|
188
231
|
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
for
|
|
193
|
-
|
|
194
|
-
|
|
232
|
+
with TimerContextManager("Reparenting individual IPAddresses to a close-enough parent Prefix", indent=" "):
|
|
233
|
+
# For IPs that don't have an exact obvious parent prefix, find close-enough matches.
|
|
234
|
+
# Explicitly set the parent for those that were found and save them.
|
|
235
|
+
for ip in IPAddress.objects.filter(parent__isnull=True).order_by("-vrf", "-tenant").iterator():
|
|
236
|
+
potential_parent = get_closest_parent(ip, Prefix.objects.all())
|
|
237
|
+
if potential_parent is not None:
|
|
238
|
+
ip.parent = potential_parent
|
|
195
239
|
ip.save()
|
|
196
|
-
break
|
|
197
|
-
|
|
198
|
-
# For IPs with no discovered parent, create one and assign it to the IP.
|
|
199
|
-
global_ns = Namespace.objects.get(name=GLOBAL_NS)
|
|
200
|
-
for orphaned_ip in IPAddress.objects.filter(parent__isnull=True):
|
|
201
|
-
ip_repr = str(validate_cidr(apps, orphaned_ip))
|
|
202
|
-
if "test" not in sys.argv:
|
|
203
|
-
print(f">>> Processing Parent migration for orphaned IPAddress {ip_repr!r}")
|
|
204
240
|
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
new_parent = potential_parents.first()
|
|
241
|
+
with TimerContextManager("Reparenting orphaned IPAddresses by creating new Prefixes as needed", indent=" "):
|
|
242
|
+
# For IPs with no discovered parent, create one and assign it to the IP.
|
|
243
|
+
global_ns = Namespace.objects.get(name=GLOBAL_NS)
|
|
244
|
+
for orphaned_ip in IPAddress.objects.filter(parent__isnull=True).select_related("tenant", "vrf").iterator():
|
|
245
|
+
ip_repr = str(validate_cidr(orphaned_ip))
|
|
246
|
+
if "test" not in sys.argv:
|
|
247
|
+
print(f">>> Processing Parent migration for orphaned IPAddress {ip_repr!r}")
|
|
213
248
|
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
ip_version=orphaned_ip.ip_version,
|
|
220
|
-
network=network,
|
|
221
|
-
broadcast=broadcast,
|
|
222
|
-
tenant=orphaned_ip.tenant,
|
|
223
|
-
vrf=orphaned_ip.vrf,
|
|
224
|
-
prefix_length=prefix_length,
|
|
225
|
-
namespace=orphaned_ip.vrf.namespace if orphaned_ip.vrf else global_ns,
|
|
226
|
-
description=DESCRIPTION,
|
|
249
|
+
new_parent_cidr = generate_parent_prefix(apps, orphaned_ip)
|
|
250
|
+
network = new_parent_cidr.network
|
|
251
|
+
prefix_length = new_parent_cidr.prefixlen
|
|
252
|
+
potential_parents = Prefix.objects.filter(network=network, prefix_length=prefix_length).exclude(
|
|
253
|
+
ip_addresses__host=orphaned_ip.host
|
|
227
254
|
)
|
|
228
|
-
|
|
229
|
-
|
|
255
|
+
new_parent = potential_parents.first()
|
|
256
|
+
if new_parent is None:
|
|
257
|
+
broadcast = new_parent_cidr[-1]
|
|
258
|
+
# This can result in duplicate Prefixes being created in the global_ns but that will be
|
|
259
|
+
# cleaned up subsequently in `process_prefix_duplicates`.
|
|
260
|
+
new_parent = Prefix.objects.create(
|
|
261
|
+
ip_version=orphaned_ip.ip_version,
|
|
262
|
+
network=network,
|
|
263
|
+
broadcast=broadcast,
|
|
264
|
+
tenant=orphaned_ip.tenant,
|
|
265
|
+
vrf=orphaned_ip.vrf,
|
|
266
|
+
prefix_length=prefix_length,
|
|
267
|
+
namespace=orphaned_ip.vrf.namespace if orphaned_ip.vrf else global_ns,
|
|
268
|
+
description=DESCRIPTION,
|
|
269
|
+
)
|
|
270
|
+
orphaned_ip.parent = new_parent
|
|
271
|
+
orphaned_ip.save()
|
|
230
272
|
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
273
|
+
parent_repr = str(validate_cidr(new_parent))
|
|
274
|
+
if "test" not in sys.argv:
|
|
275
|
+
print(
|
|
276
|
+
f" IPAddress {ip_repr!r} migrated to Parent Prefix {parent_repr!r} in Namespace {new_parent.namespace.name!r}"
|
|
277
|
+
)
|
|
236
278
|
|
|
237
279
|
# By this point we should arrive at NO orphaned IPAddress objects.
|
|
238
280
|
if IPAddress.objects.filter(parent__isnull=True).exists():
|
|
@@ -274,7 +316,9 @@ def process_prefix_duplicates(apps):
|
|
|
274
316
|
if "test" not in sys.argv:
|
|
275
317
|
print(f">>> Processing Namespace migration for duplicate Prefix {dupe!r}")
|
|
276
318
|
network, prefix_length = dupe.split("/")
|
|
277
|
-
objects = Prefix.objects.filter(network=network, prefix_length=prefix_length, namespace=ns)
|
|
319
|
+
objects = Prefix.objects.filter(network=network, prefix_length=prefix_length, namespace=ns).select_related(
|
|
320
|
+
"tenant"
|
|
321
|
+
)
|
|
278
322
|
# Leave the last instance of the Prefix in the original Namespace
|
|
279
323
|
last_prefix = objects.filter(tenant_id=tenant_ids_sorted.last()).last()
|
|
280
324
|
|
|
@@ -308,18 +352,14 @@ def reparent_prefixes(apps):
|
|
|
308
352
|
|
|
309
353
|
if "test" not in sys.argv:
|
|
310
354
|
print("\n>>> Processing Prefix parents, please standby...")
|
|
311
|
-
for pfx in Prefix.objects.all().order_by("-prefix_length", "tenant"):
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
if pfx.namespace != parent.namespace:
|
|
315
|
-
raise ValidationError("Prefix and parent are in different Namespaces")
|
|
355
|
+
for pfx in Prefix.objects.all().order_by("-prefix_length", "tenant").select_related("namespace").iterator():
|
|
356
|
+
parent = get_closest_parent(pfx, pfx.namespace.prefixes.all())
|
|
357
|
+
if parent is not None:
|
|
316
358
|
# TODO: useful but potentially very noisy. Do migrations have a verbosity option?
|
|
317
359
|
# if "test" not in sys.argv:
|
|
318
360
|
# print(f">>> {pfx.network}/{pfx.prefix_length} parent: {parent.network}/{parent.prefix_length}")
|
|
319
361
|
pfx.parent = parent
|
|
320
362
|
pfx.save()
|
|
321
|
-
except Prefix.DoesNotExist:
|
|
322
|
-
continue
|
|
323
363
|
|
|
324
364
|
|
|
325
365
|
def copy_vrfs_to_cleanup_namespaces(apps):
|
|
@@ -339,14 +379,11 @@ def copy_vrfs_to_cleanup_namespaces(apps):
|
|
|
339
379
|
VRF = apps.get_model("ipam", "VRF")
|
|
340
380
|
Namespace = apps.get_model("ipam", "Namespace")
|
|
341
381
|
|
|
342
|
-
for vrf in VRF.objects.
|
|
343
|
-
if not vrf.prefixes.exclude(namespace=vrf.namespace).exists():
|
|
344
|
-
continue
|
|
345
|
-
|
|
382
|
+
for vrf in VRF.objects.select_related("namespace", "tenant").iterator():
|
|
346
383
|
namespaces = (
|
|
347
384
|
vrf.prefixes.exclude(namespace=vrf.namespace).order_by().values_list("namespace", flat=True).distinct()
|
|
348
385
|
)
|
|
349
|
-
for namespace_pk in namespaces:
|
|
386
|
+
for namespace_pk in namespaces.iterator():
|
|
350
387
|
namespace = Namespace.objects.get(pk=namespace_pk)
|
|
351
388
|
if "test" not in sys.argv:
|
|
352
389
|
print(f">>> Copying VRF {vrf.name!r} to namespace {namespace.name!r}")
|
|
@@ -386,11 +423,11 @@ def process_interfaces(apps):
|
|
|
386
423
|
# Case 2: Interface has one or more IP address assigned to it with no more than 1 distinct associated VRF (none is excluded)
|
|
387
424
|
# The interface's VRF foreign key should be set to the VRF of any related IP Address with a non-null VRF.
|
|
388
425
|
# The interface's parent device or virtual machine should adopt an assocation to the VRF (VRFDeviceAssignment) as well.
|
|
389
|
-
for ifc in ip_interfaces:
|
|
426
|
+
for ifc in ip_interfaces.select_related("device").iterator():
|
|
390
427
|
if "test" not in sys.argv:
|
|
391
428
|
print(f">>> Processing VRF migration for numbered Interface {ifc.name!r}")
|
|
392
429
|
# Set the Interface VRF to that of the first assigned IPAddress.
|
|
393
|
-
first_ip = ifc.ip_addresses.filter(vrf__isnull=False).first()
|
|
430
|
+
first_ip = ifc.ip_addresses.filter(vrf__isnull=False).select_related("vrf").first()
|
|
394
431
|
|
|
395
432
|
ifc_vrf = first_ip.vrf
|
|
396
433
|
ifc.vrf = ifc_vrf
|
|
@@ -403,11 +440,11 @@ def process_interfaces(apps):
|
|
|
403
440
|
print(f" VRF {ifc_vrf.name!r} migrated from IPAddress {first_ip.host!r} to Interface {ifc.name!r}")
|
|
404
441
|
|
|
405
442
|
# VirtualMachine should adopt an association to the VRF (VRFDeviceAssignment) as well.
|
|
406
|
-
for ifc in ip_vminterfaces:
|
|
443
|
+
for ifc in ip_vminterfaces.select_related("virtual_machine").iterator():
|
|
407
444
|
if "test" not in sys.argv:
|
|
408
445
|
print(f">>> Processing VRF migration for numbered VMInterface {ifc.name!r}")
|
|
409
446
|
# Set the VMInterface VRF to that of the first assigned IPAddress.
|
|
410
|
-
first_ip = ifc.ip_addresses.filter(vrf__isnull=False).first()
|
|
447
|
+
first_ip = ifc.ip_addresses.filter(vrf__isnull=False).select_related("vrf").first()
|
|
411
448
|
|
|
412
449
|
ifc_vrf = first_ip.vrf
|
|
413
450
|
ifc.vrf = ifc_vrf
|
|
@@ -437,7 +474,7 @@ def process_vrfs_prefixes_m2m(apps):
|
|
|
437
474
|
|
|
438
475
|
vrfs_with_prefixes = VRF.objects.filter(prefixes__isnull=False).order_by().distinct()
|
|
439
476
|
|
|
440
|
-
for vrf in vrfs_with_prefixes:
|
|
477
|
+
for vrf in vrfs_with_prefixes.iterator():
|
|
441
478
|
if "test" not in sys.argv:
|
|
442
479
|
print(f" Converting Prefix relationships to VRF {vrf.name} to M2M.")
|
|
443
480
|
vrf.prefixes_m2m.set(vrf.prefixes.all())
|
|
@@ -448,28 +485,30 @@ def get_prefixes(qs):
|
|
|
448
485
|
Given a queryset, return the prefixes as 2-tuples of (network, prefix_length).
|
|
449
486
|
|
|
450
487
|
Args:
|
|
451
|
-
qs (QuerySet): QuerySet of Prefix objects
|
|
488
|
+
qs (QuerySet, set): QuerySet of Prefix objects, or set of values already processed by this function
|
|
452
489
|
|
|
453
490
|
Returns:
|
|
454
|
-
|
|
491
|
+
set
|
|
455
492
|
"""
|
|
456
|
-
|
|
493
|
+
if isinstance(qs, set):
|
|
494
|
+
return qs
|
|
495
|
+
return set(qs.values_list("network", "prefix_length"))
|
|
457
496
|
|
|
458
497
|
|
|
459
498
|
def compare_prefix_querysets(a, b):
|
|
460
499
|
"""
|
|
461
|
-
Compare two QuerySets of Prefix objects and return the set intersection
|
|
500
|
+
Compare two QuerySets of Prefix objects and return whether the set intersection has any common networks.
|
|
462
501
|
|
|
463
502
|
Args:
|
|
464
|
-
a (QuerySet): Left-side QuerySet
|
|
465
|
-
b (QuerySet): Right-side QuerySet
|
|
503
|
+
a (QuerySet, set): Left-side QuerySet, or set of values derived from a queryset by get_prefixes()
|
|
504
|
+
b (QuerySet, set): Right-side QuerySet, or set of values derived from a queryset by get_prefixes()
|
|
466
505
|
|
|
467
506
|
Returns:
|
|
468
|
-
|
|
507
|
+
bool
|
|
469
508
|
"""
|
|
470
|
-
set_a =
|
|
471
|
-
set_b =
|
|
472
|
-
return set_a.intersection(set_b)
|
|
509
|
+
set_a = get_prefixes(a)
|
|
510
|
+
set_b = get_prefixes(b)
|
|
511
|
+
return bool(set_a.intersection(set_b))
|
|
473
512
|
|
|
474
513
|
|
|
475
514
|
def create_vrf_namespace(apps, vrf):
|
|
@@ -528,11 +567,11 @@ def generate_parent_prefix(apps, address):
|
|
|
528
567
|
Returns:
|
|
529
568
|
netaddr.IPNetwork
|
|
530
569
|
"""
|
|
531
|
-
cidr = validate_cidr(
|
|
570
|
+
cidr = validate_cidr(address)
|
|
532
571
|
return cidr.cidr
|
|
533
572
|
|
|
534
573
|
|
|
535
|
-
def get_closest_parent(
|
|
574
|
+
def get_closest_parent(obj, qs):
|
|
536
575
|
"""
|
|
537
576
|
This is forklifted from `Prefix.objects.get_closest_parent()` so that it can safely be used in
|
|
538
577
|
migrations.
|
|
@@ -540,19 +579,16 @@ def get_closest_parent(apps, obj, qs):
|
|
|
540
579
|
Return the closest matching parent Prefix for a `cidr` even if it doesn't exist in the database.
|
|
541
580
|
|
|
542
581
|
Args:
|
|
543
|
-
obj: Prefix/IPAddress instance
|
|
582
|
+
obj (IPAddress, Prefix): Prefix/IPAddress instance
|
|
544
583
|
qs (QuerySet): QuerySet of Prefix objects
|
|
545
584
|
|
|
546
585
|
Returns:
|
|
547
|
-
Prefix or
|
|
586
|
+
Prefix or None
|
|
548
587
|
"""
|
|
549
588
|
# Validate that it's a real CIDR
|
|
550
|
-
cidr = validate_cidr(
|
|
589
|
+
cidr = validate_cidr(obj)
|
|
551
590
|
broadcast = str(cidr.broadcast or cidr.ip)
|
|
552
591
|
|
|
553
|
-
Prefix = apps.get_model("ipam", "Prefix")
|
|
554
|
-
IPAddress = apps.get_model("ipam", "IPAddress")
|
|
555
|
-
|
|
556
592
|
# Prepare the queryset filter
|
|
557
593
|
lookup_kwargs = {
|
|
558
594
|
"ip_version": cidr.version,
|
|
@@ -560,9 +596,8 @@ def get_closest_parent(apps, obj, qs):
|
|
|
560
596
|
"broadcast__gte": broadcast,
|
|
561
597
|
}
|
|
562
598
|
|
|
563
|
-
if
|
|
599
|
+
if is_prefix(obj):
|
|
564
600
|
lookup_kwargs["prefix_length__lt"] = cidr.prefixlen
|
|
565
|
-
qs = qs.exclude(id=obj.id)
|
|
566
601
|
else:
|
|
567
602
|
lookup_kwargs["prefix_length__lte"] = cidr.prefixlen
|
|
568
603
|
|
|
@@ -572,10 +607,10 @@ def get_closest_parent(apps, obj, qs):
|
|
|
572
607
|
qs.filter(**lookup_kwargs)
|
|
573
608
|
.annotate(
|
|
574
609
|
custom_sort_order=models.Case(
|
|
575
|
-
models.When(
|
|
576
|
-
models.When(tenant__isnull=True,
|
|
577
|
-
models.When(
|
|
578
|
-
models.When(
|
|
610
|
+
models.When(tenant_id=obj.tenant_id, vrf_id=obj.vrf_id, then=models.Value(1)),
|
|
611
|
+
models.When(tenant__isnull=True, vrf_id=obj.vrf_id, then=models.Value(2)),
|
|
612
|
+
models.When(tenant_id=obj.tenant_id, vrf__isnull=True, then=models.Value(3)),
|
|
613
|
+
models.When(vrf_id=obj.vrf_id, then=models.Value(4)),
|
|
579
614
|
models.When(tenant__isnull=True, vrf__isnull=True, then=models.Value(5)),
|
|
580
615
|
models.When(vrf__isnull=True, then=models.Value(6)),
|
|
581
616
|
default=models.Value(7),
|
|
@@ -584,21 +619,19 @@ def get_closest_parent(apps, obj, qs):
|
|
|
584
619
|
.order_by("-prefix_length", "custom_sort_order")
|
|
585
620
|
)
|
|
586
621
|
|
|
587
|
-
if
|
|
622
|
+
if is_ipaddress(obj):
|
|
588
623
|
# IP should not fall back to less specific prefixes
|
|
589
|
-
|
|
590
|
-
|
|
591
|
-
|
|
592
|
-
|
|
624
|
+
first_ancestor = possible_ancestors.only("prefix_length").first()
|
|
625
|
+
if not first_ancestor:
|
|
626
|
+
return None
|
|
627
|
+
prefix_length = first_ancestor.prefix_length
|
|
628
|
+
possible_ancestors = possible_ancestors.filter(prefix_length=prefix_length).exclude(ip_addresses__host=obj.host)
|
|
593
629
|
|
|
594
630
|
# If we've got any matches, the first one is our closest parent.
|
|
595
|
-
|
|
596
|
-
return possible_ancestors[0]
|
|
597
|
-
except IndexError:
|
|
598
|
-
raise Prefix.DoesNotExist(f"Could not determine parent Prefix for {cidr}")
|
|
631
|
+
return possible_ancestors.first()
|
|
599
632
|
|
|
600
633
|
|
|
601
|
-
def get_next_vrf_cleanup_namespace(apps, vrf):
|
|
634
|
+
def get_next_vrf_cleanup_namespace(apps, vrf, global_ns):
|
|
602
635
|
"""
|
|
603
636
|
Try to get the next available Cleanup Namespace based on `vrf` found in the "Global" Namespace.
|
|
604
637
|
|
|
@@ -610,6 +643,7 @@ def get_next_vrf_cleanup_namespace(apps, vrf):
|
|
|
610
643
|
Args:
|
|
611
644
|
apps: Django apps module
|
|
612
645
|
vrf (VRF): VRF instance
|
|
646
|
+
global_ns (Namespace): Global Namespace.
|
|
613
647
|
|
|
614
648
|
Returns:
|
|
615
649
|
Namespace
|
|
@@ -618,21 +652,22 @@ def get_next_vrf_cleanup_namespace(apps, vrf):
|
|
|
618
652
|
VRF = apps.get_model("ipam", "VRF")
|
|
619
653
|
|
|
620
654
|
counter = 1
|
|
621
|
-
vrf_prefixes = vrf.prefixes.all()
|
|
655
|
+
vrf_prefixes = get_prefixes(vrf.prefixes.all())
|
|
622
656
|
|
|
623
|
-
global_ns = Namespace.objects.get(name=GLOBAL_NS)
|
|
624
|
-
global_ns_prefixes = global_ns.prefixes.exclude(vrf=vrf)
|
|
625
|
-
global_dupe_prefixes = compare_prefix_querysets(vrf_prefixes, global_ns_prefixes)
|
|
626
657
|
global_dupe_vrfs = VRF.objects.filter(namespace=global_ns, name=vrf.name).exclude(pk=vrf.pk).exists()
|
|
627
658
|
|
|
628
|
-
if
|
|
629
|
-
|
|
630
|
-
|
|
631
|
-
|
|
632
|
-
|
|
633
|
-
|
|
634
|
-
|
|
635
|
-
|
|
659
|
+
if global_dupe_vrfs:
|
|
660
|
+
if "test" not in sys.argv:
|
|
661
|
+
print(f" VRF {vrf.name} has duplicate VRF name with NS {global_ns.name}")
|
|
662
|
+
else:
|
|
663
|
+
global_ns_prefixes = global_ns.prefixes.exclude(vrf=vrf)
|
|
664
|
+
global_dupe_prefixes = compare_prefix_querysets(vrf_prefixes, global_ns_prefixes)
|
|
665
|
+
if global_dupe_prefixes:
|
|
666
|
+
if "test" not in sys.argv:
|
|
667
|
+
print(f" VRF {vrf.name} has duplicate prefixes with NS {global_ns.name}")
|
|
668
|
+
else:
|
|
669
|
+
# No duplicate VRF or duplicate prefixes - just stay in global Namespace
|
|
670
|
+
return global_ns
|
|
636
671
|
|
|
637
672
|
# Iterate non-enforce_unique VRFS
|
|
638
673
|
# - Compare duplicate prefixes for each VRF
|
|
@@ -646,17 +681,18 @@ def get_next_vrf_cleanup_namespace(apps, vrf):
|
|
|
646
681
|
if created:
|
|
647
682
|
return namespace
|
|
648
683
|
|
|
649
|
-
ns_prefixes = namespace.prefixes.exclude(vrf=vrf)
|
|
650
|
-
dupe_prefixes = compare_prefix_querysets(vrf_prefixes, ns_prefixes)
|
|
651
684
|
dupe_vrfs = VRF.objects.filter(namespace=namespace, name=vrf.name).exclude(pk=vrf.pk).exists()
|
|
685
|
+
if dupe_vrfs:
|
|
686
|
+
if "test" not in sys.argv:
|
|
687
|
+
print(f" VRF {vrf.name} has duplicate VRF name with NS {namespace.name}")
|
|
688
|
+
counter += 1
|
|
689
|
+
continue
|
|
652
690
|
|
|
653
|
-
|
|
654
|
-
|
|
655
|
-
|
|
656
|
-
|
|
657
|
-
|
|
658
|
-
|
|
659
|
-
if any([dupe_prefixes, dupe_vrfs]):
|
|
691
|
+
ns_prefixes = namespace.prefixes.exclude(vrf=vrf)
|
|
692
|
+
dupe_prefixes = compare_prefix_querysets(vrf_prefixes, ns_prefixes)
|
|
693
|
+
if dupe_prefixes:
|
|
694
|
+
if "test" not in sys.argv:
|
|
695
|
+
print(f" VRF {vrf.name} has duplicate prefixes with NS {namespace.name}")
|
|
660
696
|
counter += 1
|
|
661
697
|
continue
|
|
662
698
|
|
|
@@ -704,7 +740,7 @@ def get_next_prefix_cleanup_namespace(apps, prefix, base_name=BASE_NAME):
|
|
|
704
740
|
return namespace
|
|
705
741
|
|
|
706
742
|
|
|
707
|
-
def validate_cidr(
|
|
743
|
+
def validate_cidr(value):
|
|
708
744
|
"""
|
|
709
745
|
Validate whether `value` is a valid IPv4/IPv6 CIDR.
|
|
710
746
|
|
|
@@ -714,12 +750,9 @@ def validate_cidr(apps, value):
|
|
|
714
750
|
Returns:
|
|
715
751
|
netaddr.IPNetwork
|
|
716
752
|
"""
|
|
717
|
-
|
|
718
|
-
Prefix = apps.get_model("ipam", "Prefix")
|
|
719
|
-
|
|
720
|
-
if isinstance(value, IPAddress):
|
|
753
|
+
if is_ipaddress(value):
|
|
721
754
|
value = f"{value.host}/{value.prefix_length}"
|
|
722
|
-
elif
|
|
755
|
+
elif is_prefix(value):
|
|
723
756
|
value = f"{value.network}/{value.prefix_length}"
|
|
724
757
|
else:
|
|
725
758
|
value = str(value)
|
|
@@ -742,7 +775,7 @@ def ensure_correct_prefix_broadcast(apps):
|
|
|
742
775
|
"""
|
|
743
776
|
Prefix = apps.get_model("ipam", "Prefix")
|
|
744
777
|
|
|
745
|
-
for prefix in Prefix.objects.all():
|
|
778
|
+
for prefix in Prefix.objects.all().iterator():
|
|
746
779
|
true_broadcast = str(netaddr.IPNetwork(f"{prefix.network}/{prefix.prefix_length}")[-1])
|
|
747
780
|
if prefix.broadcast != true_broadcast:
|
|
748
781
|
if "test" not in sys.argv:
|