gns3-server 3.0.0b3__py3-none-any.whl → 3.0.0rc2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of gns3-server might be problematic. Click here for more details.

Files changed (93) hide show
  1. {gns3_server-3.0.0b3.dist-info → gns3_server-3.0.0rc2.dist-info}/METADATA +32 -31
  2. {gns3_server-3.0.0b3.dist-info → gns3_server-3.0.0rc2.dist-info}/RECORD +93 -89
  3. {gns3_server-3.0.0b3.dist-info → gns3_server-3.0.0rc2.dist-info}/WHEEL +1 -1
  4. gns3server/api/routes/compute/cloud_nodes.py +1 -1
  5. gns3server/api/routes/compute/docker_nodes.py +3 -0
  6. gns3server/api/routes/compute/nat_nodes.py +1 -1
  7. gns3server/api/routes/compute/vmware_nodes.py +1 -1
  8. gns3server/api/routes/compute/vpcs_nodes.py +10 -4
  9. gns3server/api/routes/controller/projects.py +13 -1
  10. gns3server/api/routes/controller/users.py +2 -2
  11. gns3server/appliances/almalinux.gns3a +6 -6
  12. gns3server/appliances/aruba-arubaoscx.gns3a +39 -0
  13. gns3server/appliances/cisco-csr1000v.gns3a +28 -2
  14. gns3server/appliances/cisco-iou-l2.gns3a +2 -2
  15. gns3server/appliances/cisco-iou-l3.gns3a +2 -2
  16. gns3server/appliances/cisco-vWLC.gns3a +29 -1
  17. gns3server/appliances/debian.gns3a +28 -0
  18. gns3server/appliances/fortiadc.gns3a +46 -4
  19. gns3server/appliances/fortianalyzer.gns3a +42 -0
  20. gns3server/appliances/fortiauthenticator.gns3a +58 -2
  21. gns3server/appliances/fortigate.gns3a +42 -0
  22. gns3server/appliances/fortimanager.gns3a +42 -0
  23. gns3server/appliances/fortiweb.gns3a +56 -0
  24. gns3server/appliances/hbcd-pe.gns3a +62 -0
  25. gns3server/appliances/juniper-junos-space.gns3a +3 -2
  26. gns3server/appliances/juniper-vmx-legacy.gns3a +1 -1
  27. gns3server/appliances/juniper-vmx-vcp.gns3a +1 -1
  28. gns3server/appliances/juniper-vmx-vfp.gns3a +2 -1
  29. gns3server/appliances/juniper-vqfx-pfe.gns3a +1 -1
  30. gns3server/appliances/juniper-vqfx-re.gns3a +2 -1
  31. gns3server/appliances/juniper-vrr.gns3a +1 -1
  32. gns3server/appliances/juniper-vsrx.gns3a +2 -1
  33. gns3server/appliances/mikrotik-chr.gns3a +30 -99
  34. gns3server/appliances/nixos.gns3a +52 -0
  35. gns3server/appliances/opnsense.gns3a +13 -0
  36. gns3server/appliances/pan-vm-fw.gns3a +26 -0
  37. gns3server/appliances/reactos.gns3a +10 -10
  38. gns3server/appliances/security-onion.gns3a +27 -3
  39. gns3server/appliances/truenas.gns3a +104 -0
  40. gns3server/appliances/ubuntu-cloud.gns3a +35 -20
  41. gns3server/appliances/ubuntu-docker.gns3a +1 -1
  42. gns3server/appliances/ubuntu-gui.gns3a +13 -0
  43. gns3server/appliances/viptela-edge-genericx86-64.gns3a +28 -2
  44. gns3server/appliances/viptela-smart-genericx86-64.gns3a +27 -1
  45. gns3server/appliances/viptela-vmanage-genericx86-64.gns3a +32 -4
  46. gns3server/appliances/vyos.gns3a +95 -98
  47. gns3server/compute/base_node.py +1 -0
  48. gns3server/compute/docker/__init__.py +8 -2
  49. gns3server/compute/docker/docker_vm.py +56 -2
  50. gns3server/compute/docker/resources/init.sh +5 -2
  51. gns3server/compute/dynamips/__init__.py +0 -4
  52. gns3server/compute/dynamips/nodes/router.py +20 -0
  53. gns3server/compute/notification_manager.py +2 -2
  54. gns3server/compute/qemu/qemu_vm.py +26 -15
  55. gns3server/config_samples/gns3_server.conf +13 -3
  56. gns3server/configs/iou_l2_base_startup-config.txt +1 -1
  57. gns3server/configs/iou_l3_base_startup-config.txt +1 -1
  58. gns3server/controller/__init__.py +27 -13
  59. gns3server/controller/appliance_manager.py +9 -6
  60. gns3server/controller/export_project.py +27 -23
  61. gns3server/controller/import_project.py +24 -3
  62. gns3server/controller/node.py +8 -2
  63. gns3server/controller/notification.py +4 -4
  64. gns3server/controller/project.py +96 -8
  65. gns3server/controller/snapshot.py +3 -8
  66. gns3server/controller/symbols.py +1 -1
  67. gns3server/controller/topology.py +31 -2
  68. gns3server/crash_report.py +1 -1
  69. gns3server/db/models/templates.py +1 -0
  70. gns3server/db/tasks.py +1 -1
  71. gns3server/db_migrations/versions/9a5292aa4389_add_mac_address_field_in_docker_.py +27 -0
  72. gns3server/schemas/compute/docker_nodes.py +1 -0
  73. gns3server/schemas/compute/ethernet_switch_nodes.py +1 -1
  74. gns3server/schemas/config.py +3 -0
  75. gns3server/schemas/controller/templates/cloud_templates.py +2 -2
  76. gns3server/schemas/controller/templates/docker_templates.py +4 -3
  77. gns3server/schemas/controller/templates/dynamips_templates.py +5 -5
  78. gns3server/schemas/controller/templates/ethernet_hub_templates.py +1 -1
  79. gns3server/schemas/controller/templates/ethernet_switch_templates.py +2 -2
  80. gns3server/schemas/controller/templates/iou_templates.py +2 -2
  81. gns3server/schemas/controller/templates/qemu_templates.py +12 -12
  82. gns3server/schemas/controller/templates/virtualbox_templates.py +4 -5
  83. gns3server/schemas/controller/templates/vmware_templates.py +4 -4
  84. gns3server/schemas/controller/templates/vpcs_templates.py +2 -2
  85. gns3server/static/web-ui/index.html +3 -3
  86. gns3server/static/web-ui/{main.f3840f9b1c0240e6.js → main.ed82697b58d803e7.js} +1 -1
  87. gns3server/utils/__init__.py +32 -0
  88. gns3server/utils/asyncio/aiozipstream.py +15 -11
  89. gns3server/utils/hostname.py +53 -0
  90. gns3server/version.py +1 -1
  91. {gns3_server-3.0.0b3.dist-info → gns3_server-3.0.0rc2.dist-info}/LICENSE +0 -0
  92. {gns3_server-3.0.0b3.dist-info → gns3_server-3.0.0rc2.dist-info}/entry_points.txt +0 -0
  93. {gns3_server-3.0.0b3.dist-info → gns3_server-3.0.0rc2.dist-info}/top_level.txt +0 -0
@@ -30,7 +30,7 @@ except ImportError:
30
30
 
31
31
 
32
32
  from ..config import Config
33
- from ..utils import parse_version
33
+ from ..utils import parse_version, md5sum
34
34
  from ..utils.images import default_images_directory
35
35
 
36
36
  from .project import Project
@@ -91,7 +91,7 @@ class Controller:
91
91
  if server_config.enable_ssl:
92
92
  self._ssl_context = self._create_ssl_context(server_config)
93
93
 
94
- protocol = server_config.protocol
94
+ protocol = server_config.protocol.value
95
95
  if self._ssl_context and protocol != "https":
96
96
  log.warning(f"Protocol changed to 'https' for local compute because SSL is enabled")
97
97
  protocol = "https"
@@ -270,13 +270,18 @@ class Controller:
270
270
  log.error(f"Cannot read IOU license file '{iourc_path}': {e}")
271
271
  self._iou_license_settings["license_check"] = iou_config.license_check
272
272
 
273
- previous_version = controller_vars.get("version")
274
- log.info("Comparing controller version {} with config version {}".format(__version__, previous_version))
275
- if not previous_version or \
276
- parse_version(__version__.split("+")[0]) > parse_version(previous_version.split("+")[0]):
277
- self._appliance_manager.install_builtin_appliances()
278
- elif not os.listdir(self._appliance_manager.builtin_appliances_path()):
279
- self._appliance_manager.install_builtin_appliances()
273
+ # install the built-in appliances if needed
274
+ if Config.instance().settings.Server.install_builtin_appliances:
275
+ previous_version = controller_vars.get("version")
276
+ log.info("Comparing controller version {} with config version {}".format(__version__, previous_version))
277
+ builtin_appliances_path = self._appliance_manager.builtin_appliances_path()
278
+ if not previous_version or \
279
+ parse_version(__version__.split("+")[0]) > parse_version(previous_version.split("+")[0]):
280
+ self._appliance_manager.install_builtin_appliances()
281
+ elif not os.listdir(builtin_appliances_path):
282
+ self._appliance_manager.install_builtin_appliances()
283
+ else:
284
+ log.info(f"Built-in appliances are installed in '{builtin_appliances_path}'")
280
285
 
281
286
  self._appliance_manager.appliances_etag = controller_vars.get("appliances_etag")
282
287
  self._appliance_manager.load_appliances()
@@ -303,12 +308,21 @@ class Controller:
303
308
  except OSError as e:
304
309
  log.error(str(e))
305
310
 
311
+
306
312
  @staticmethod
307
- def install_resource_files(dst_path, resource_name):
313
+ def install_resource_files(dst_path, resource_name, upgrade_resources=True):
308
314
  """
309
315
  Install files from resources to user's file system
310
316
  """
311
317
 
318
+ def should_copy(src, dst, upgrade_resources):
319
+ if not os.path.exists(dst):
320
+ return True
321
+ if upgrade_resources is False:
322
+ return False
323
+ # copy the resource if it is different
324
+ return md5sum(src) != md5sum(dst)
325
+
312
326
  if hasattr(sys, "frozen") and sys.platform.startswith("win"):
313
327
  resource_path = os.path.normpath(os.path.join(os.path.dirname(sys.executable), resource_name))
314
328
  for filename in os.listdir(resource_path):
@@ -317,7 +331,7 @@ class Controller:
317
331
  else:
318
332
  for entry in importlib_resources.files('gns3server').joinpath(resource_name).iterdir():
319
333
  full_path = os.path.join(dst_path, entry.name)
320
- if entry.is_file() and not os.path.exists(full_path):
334
+ if entry.is_file() and should_copy(str(entry), full_path, upgrade_resources):
321
335
  log.debug(f'Installing {resource_name} resource file "{entry.name}" to "{full_path}"')
322
336
  shutil.copy(str(entry), os.path.join(dst_path, entry.name))
323
337
  elif entry.is_dir():
@@ -333,7 +347,7 @@ class Controller:
333
347
  dst_path = self.configs_path()
334
348
  log.info(f"Installing base configs in '{dst_path}'")
335
349
  try:
336
- Controller.install_resource_files(dst_path, "configs")
350
+ Controller.install_resource_files(dst_path, "configs", upgrade_resources=False)
337
351
  except OSError as e:
338
352
  log.error(f"Could not install base config files to {dst_path}: {e}")
339
353
 
@@ -346,7 +360,7 @@ class Controller:
346
360
  dst_path = self.disks_path()
347
361
  log.info(f"Installing built-in disks in '{dst_path}'")
348
362
  try:
349
- Controller.install_resource_files(dst_path, "disks")
363
+ Controller.install_resource_files(dst_path, "disks", upgrade_resources=False)
350
364
  except OSError as e:
351
365
  log.error(f"Could not install disk files to {dst_path}: {e}")
352
366
 
@@ -95,15 +95,18 @@ class ApplianceManager:
95
95
  os.makedirs(appliances_path, exist_ok=True)
96
96
  return appliances_path
97
97
 
98
- def builtin_appliances_path(self, delete_first=False):
98
+ def builtin_appliances_path(self):
99
99
  """
100
100
  Get the built-in appliance storage directory
101
101
  """
102
102
 
103
- appname = vendor = "GNS3"
104
- appliances_dir = os.path.join(platformdirs.user_data_dir(appname, vendor, roaming=True), "appliances")
105
- if delete_first:
106
- shutil.rmtree(appliances_dir, ignore_errors=True)
103
+ resources_path = Config.instance().settings.Server.resources_path
104
+ if not resources_path:
105
+ appname = vendor = "GNS3"
106
+ resources_path = platformdirs.user_data_dir(appname, vendor, roaming=True)
107
+ else:
108
+ resources_path = os.path.expanduser(resources_path)
109
+ appliances_dir = os.path.join(resources_path, "appliances")
107
110
  os.makedirs(appliances_dir, exist_ok=True)
108
111
  return appliances_dir
109
112
 
@@ -112,7 +115,7 @@ class ApplianceManager:
112
115
  At startup we copy the built-in appliances files.
113
116
  """
114
117
 
115
- dst_path = self.builtin_appliances_path(delete_first=True)
118
+ dst_path = self.builtin_appliances_path()
116
119
  log.info(f"Installing built-in appliances in '{dst_path}'")
117
120
  from . import Controller
118
121
  try:
@@ -16,6 +16,7 @@
16
16
  # along with this program. If not, see <http://www.gnu.org/licenses/>.
17
17
 
18
18
  import os
19
+ import sys
19
20
  import json
20
21
  import asyncio
21
22
  import aiofiles
@@ -39,7 +40,7 @@ async def export_project(
39
40
  temporary_dir,
40
41
  include_images=False,
41
42
  include_snapshots=False,
42
- keep_compute_id=False,
43
+ keep_compute_ids=False,
43
44
  allow_all_nodes=False,
44
45
  reset_mac_addresses=False,
45
46
  ):
@@ -54,9 +55,9 @@ async def export_project(
54
55
  :param temporary_dir: A temporary dir where to store intermediate data
55
56
  :param include_images: save OS images to the zip file
56
57
  :param include_snapshots: save snapshots to the zip file
57
- :param keep_compute_id: If false replace all compute id by local (standard behavior for .gns3project to make it portable)
58
- :param allow_all_nodes: Allow all nodes type to be include in the zip even if not portable
59
- :param reset_mac_addresses: Reset MAC addresses for every nodes.
58
+ :param keep_compute_ids: If false replace all compute IDs by local (standard behavior for .gns3project to make it portable)
59
+ :param allow_all_nodes: Allow all nodes type to be included in the zip even if not portable
60
+ :param reset_mac_addresses: Reset MAC addresses for each node.
60
61
  """
61
62
 
62
63
  # To avoid issue with data not saved we disallow the export of a running project
@@ -77,7 +78,7 @@ async def export_project(
77
78
  os.path.join(project._path, file),
78
79
  zstream,
79
80
  include_images,
80
- keep_compute_id,
81
+ keep_compute_ids,
81
82
  allow_all_nodes,
82
83
  temporary_dir,
83
84
  reset_mac_addresses,
@@ -89,14 +90,15 @@ async def export_project(
89
90
  files = [f for f in files if _is_exportable(os.path.join(root, f), include_snapshots)]
90
91
  for file in files:
91
92
  path = os.path.join(root, file)
92
- # check if we can export the file
93
- try:
94
- open(path).close()
95
- except OSError as e:
96
- msg = f"Could not export file {path}: {e}"
97
- log.warning(msg)
98
- project.emit_notification("log.warning", {"message": msg})
99
- continue
93
+ if not os.path.islink(path):
94
+ try:
95
+ # check if we can export the file
96
+ open(path).close()
97
+ except OSError as e:
98
+ msg = f"Could not export file {path}: {e}"
99
+ log.warning(msg)
100
+ project.emit_notification("log.warning", {"message": msg})
101
+ continue
100
102
  # ignore the .gns3 file
101
103
  if file.endswith(".gns3"):
102
104
  continue
@@ -150,7 +152,10 @@ def _patch_mtime(path):
150
152
  :param path: file path
151
153
  """
152
154
 
153
- st = os.stat(path)
155
+ if sys.platform.startswith("win"):
156
+ # only UNIX type platforms
157
+ return
158
+ st = os.stat(path, follow_symlinks=False)
154
159
  file_date = datetime.fromtimestamp(st.st_mtime)
155
160
  if file_date.year < 1980:
156
161
  new_mtime = file_date.replace(year=1980).timestamp()
@@ -166,10 +171,6 @@ def _is_exportable(path, include_snapshots=False):
166
171
  if include_snapshots is False and path.endswith("snapshots"):
167
172
  return False
168
173
 
169
- # do not export symlinks
170
- if os.path.islink(path):
171
- return False
172
-
173
174
  # do not export directories of snapshots
174
175
  if include_snapshots is False and "{sep}snapshots{sep}".format(sep=os.path.sep) in path:
175
176
  return False
@@ -193,7 +194,7 @@ def _is_exportable(path, include_snapshots=False):
193
194
 
194
195
 
195
196
  async def _patch_project_file(
196
- project, path, zstream, include_images, keep_compute_id, allow_all_nodes, temporary_dir, reset_mac_addresses
197
+ project, path, zstream, include_images, keep_compute_ids, allow_all_nodes, temporary_dir, reset_mac_addresses
197
198
  ):
198
199
  """
199
200
  Patch a project file (.gns3) to export a project.
@@ -225,16 +226,19 @@ async def _patch_project_file(
225
226
  if not allow_all_nodes and node["node_type"] in ["virtualbox", "vmware"]:
226
227
  raise ControllerError("Projects with a {} node cannot be exported".format(node["node_type"]))
227
228
 
228
- if not keep_compute_id:
229
+ if not keep_compute_ids:
229
230
  node["compute_id"] = "local" # To make project portable all node by default run on local
230
231
 
231
- if "properties" in node and node["node_type"] != "docker":
232
+ if "properties" in node:
232
233
  for prop, value in node["properties"].items():
233
234
 
234
235
  # reset the MAC address
235
236
  if reset_mac_addresses and prop in ("mac_addr", "mac_address"):
236
237
  node["properties"][prop] = None
237
238
 
239
+ if node["node_type"] == "docker":
240
+ continue
241
+
238
242
  if node["node_type"] == "iou":
239
243
  if not prop == "path":
240
244
  continue
@@ -243,13 +247,13 @@ async def _patch_project_file(
243
247
  if value is None or value.strip() == "":
244
248
  continue
245
249
 
246
- if not keep_compute_id: # If we keep the original compute we can keep the image path
250
+ if not keep_compute_ids: # If we keep the original compute we can keep the image path
247
251
  node["properties"][prop] = os.path.basename(value)
248
252
 
249
253
  if include_images is True:
250
254
  images.append({"compute_id": compute_id, "image": value, "image_type": node["node_type"]})
251
255
 
252
- if not keep_compute_id:
256
+ if not keep_compute_ids:
253
257
  topology["topology"][
254
258
  "computes"
255
259
  ] = [] # Strip compute information because could contain secret info like password
@@ -17,6 +17,7 @@
17
17
 
18
18
  import os
19
19
  import sys
20
+ import stat
20
21
  import json
21
22
  import uuid
22
23
  import shutil
@@ -40,7 +41,7 @@ Handle the import of project from a .gns3project
40
41
  """
41
42
 
42
43
 
43
- async def import_project(controller, project_id, stream, location=None, name=None, keep_compute_id=False,
44
+ async def import_project(controller, project_id, stream, location=None, name=None, keep_compute_ids=False,
44
45
  auto_start=False, auto_open=False, auto_close=True):
45
46
  """
46
47
  Import a project contain in a zip file
@@ -52,7 +53,7 @@ async def import_project(controller, project_id, stream, location=None, name=Non
52
53
  :param stream: A io.BytesIO of the zipfile
53
54
  :param location: Directory for the project if None put in the default directory
54
55
  :param name: Wanted project name, generate one from the .gns3 if None
55
- :param keep_compute_id: If true do not touch the compute id
56
+ :param keep_compute_ids: keep compute IDs unchanged
56
57
 
57
58
  :returns: Project
58
59
  """
@@ -95,6 +96,7 @@ async def import_project(controller, project_id, stream, location=None, name=Non
95
96
  try:
96
97
  with zipfile_zstd.ZipFile(stream) as zip_file:
97
98
  await wait_run_in_executor(zip_file.extractall, path)
99
+ _create_symbolic_links(zip_file, path)
98
100
  except zipfile_zstd.BadZipFile:
99
101
  raise ControllerError("Cannot extract files from GNS3 project (invalid zip)")
100
102
 
@@ -126,7 +128,7 @@ async def import_project(controller, project_id, stream, location=None, name=Non
126
128
  drawing["drawing_id"] = str(uuid.uuid4())
127
129
 
128
130
  # Modify the compute id of the node depending of compute capacity
129
- if not keep_compute_id:
131
+ if not keep_compute_ids:
130
132
  # For some VM type we move them to the GNS3 VM if possible
131
133
  # unless it's a linux host without GNS3 VM
132
134
  if not sys.platform.startswith("linux") or controller.has_compute("vm"):
@@ -184,6 +186,24 @@ async def import_project(controller, project_id, stream, location=None, name=Non
184
186
  project = await controller.load_project(dot_gns3_path, load=False)
185
187
  return project
186
188
 
189
+ def _create_symbolic_links(zip_file, path):
190
+ """
191
+ Manually create symbolic links (if any) because ZipFile does not support it.
192
+
193
+ :param zip_file: ZipFile instance
194
+ :param path: project location
195
+ """
196
+
197
+ for zip_info in zip_file.infolist():
198
+ if stat.S_ISLNK(zip_info.external_attr >> 16):
199
+ symlink_target = zip_file.read(zip_info.filename).decode()
200
+ symlink_path = os.path.join(path, zip_info.filename)
201
+ try:
202
+ # remove the regular file and replace it by a symbolic link
203
+ os.remove(symlink_path)
204
+ os.symlink(symlink_target, symlink_path)
205
+ except OSError as e:
206
+ raise ControllerError(f"Cannot create symbolic link: {e}")
187
207
 
188
208
  def _move_node_file(path, old_id, new_id):
189
209
  """
@@ -269,6 +289,7 @@ async def _import_snapshots(snapshots_path, project_name, project_id):
269
289
  with open(snapshot_path, "rb") as f:
270
290
  with zipfile_zstd.ZipFile(f) as zip_file:
271
291
  await wait_run_in_executor(zip_file.extractall, tmpdir)
292
+ _create_symbolic_links(zip_file, tmpdir)
272
293
  except OSError as e:
273
294
  raise ControllerError(f"Cannot open snapshot '{os.path.basename(snapshot)}': {e}")
274
295
  except zipfile_zstd.BadZipFile:
@@ -29,8 +29,8 @@ from .controller_error import (
29
29
  )
30
30
  from .ports.port_factory import PortFactory, StandardPortFactory, DynamipsPortFactory
31
31
  from ..utils.images import images_directories
32
+ from ..utils import macaddress_to_int, int_to_macaddress
32
33
  from ..config import Config
33
- from ..utils.qt import qt_font_to_style
34
34
 
35
35
 
36
36
  import logging
@@ -758,7 +758,13 @@ class Node:
758
758
  break
759
759
  port_name = f"eth{adapter_number}"
760
760
  port_name = custom_adapter_settings.get("port_name", port_name)
761
- self._ports.append(PortFactory(port_name, 0, adapter_number, 0, "ethernet", short_name=port_name))
761
+ mac_address = custom_adapter_settings.get("mac_address")
762
+ if not mac_address and "mac_address" in self._properties:
763
+ mac_address = int_to_macaddress(macaddress_to_int(self._properties["mac_address"]) + adapter_number)
764
+
765
+ port = PortFactory(port_name, 0, adapter_number, 0, "ethernet", short_name=port_name)
766
+ port.mac_address = mac_address
767
+ self._ports.append(port)
762
768
  elif self._node_type in ("ethernet_switch", "ethernet_hub"):
763
769
  # Basic node we don't want to have adapter number
764
770
  port_number = 0
@@ -15,7 +15,7 @@
15
15
  # You should have received a copy of the GNU General Public License
16
16
  # along with this program. If not, see <http://www.gnu.org/licenses/>.
17
17
 
18
- import os
18
+ import asyncio
19
19
  from contextlib import contextmanager
20
20
 
21
21
  from gns3server.utils.notification_queue import NotificationQueue
@@ -73,7 +73,7 @@ class Notification:
73
73
  """
74
74
 
75
75
  for controller_listener in self._controller_listeners:
76
- controller_listener.put_nowait((action, event, {}))
76
+ asyncio.get_running_loop().call_soon_threadsafe(controller_listener.put_nowait, (action, event, {}))
77
77
 
78
78
  def project_has_listeners(self, project_id):
79
79
  """
@@ -134,7 +134,7 @@ class Notification:
134
134
  except KeyError:
135
135
  return
136
136
  for listener in project_listeners:
137
- listener.put_nowait((action, event, {}))
137
+ asyncio.get_running_loop().call_soon_threadsafe(listener.put_nowait, (action, event, {}))
138
138
 
139
139
  def _send_event_to_all_projects(self, action, event):
140
140
  """
@@ -146,4 +146,4 @@ class Notification:
146
146
  """
147
147
  for project_listeners in self._project_listeners.values():
148
148
  for listener in project_listeners:
149
- listener.put_nowait((action, event, {}))
149
+ asyncio.get_running_loop().call_soon_threadsafe(listener.put_nowait, (action, event, {}))
@@ -15,6 +15,7 @@
15
15
  # You should have received a copy of the GNU General Public License
16
16
  # along with this program. If not, see <http://www.gnu.org/licenses/>.
17
17
 
18
+ import sys
18
19
  import re
19
20
  import os
20
21
  import json
@@ -26,6 +27,7 @@ import asyncio
26
27
  import aiofiles
27
28
  import tempfile
28
29
  import zipfile
30
+ import pathlib
29
31
 
30
32
  from uuid import UUID, uuid4
31
33
 
@@ -41,8 +43,9 @@ from ..utils.application_id import get_next_application_id
41
43
  from ..utils.asyncio.pool import Pool
42
44
  from ..utils.asyncio import locking
43
45
  from ..utils.asyncio import aiozipstream
46
+ from ..utils.asyncio import wait_run_in_executor
44
47
  from .export_project import export_project
45
- from .import_project import import_project
48
+ from .import_project import import_project, _move_node_file
46
49
  from .controller_error import ControllerError, ControllerForbiddenError, ControllerNotFoundError
47
50
 
48
51
  import logging
@@ -210,7 +213,11 @@ class Project:
210
213
  if os.path.exists(snapshot_dir):
211
214
  for snap in os.listdir(snapshot_dir):
212
215
  if snap.endswith(".gns3project"):
213
- snapshot = Snapshot(self, filename=snap)
216
+ try:
217
+ snapshot = Snapshot(self, filename=snap)
218
+ except ValueError:
219
+ log.error("Invalid snapshot file: {}".format(snap))
220
+ continue
214
221
  self._snapshots[snapshot.id] = snapshot
215
222
 
216
223
  # Create the project on demand on the compute node
@@ -491,7 +498,7 @@ class Project:
491
498
 
492
499
  if base_name is None:
493
500
  return None
494
- base_name = re.sub(r"[ ]", "", base_name)
501
+ base_name = re.sub(r"[ ]", "", base_name) # remove spaces in node name
495
502
  if base_name in self._allocated_node_names:
496
503
  base_name = re.sub(r"[0-9]+$", "{0}", base_name)
497
504
 
@@ -1058,13 +1065,15 @@ class Project:
1058
1065
  """
1059
1066
  Duplicate a project
1060
1067
 
1061
- It's the save as feature of the 1.X. It's implemented on top of the
1062
- export / import features. It will generate a gns3p and reimport it.
1063
- It's a little slower but we have only one implementation to maintain.
1068
+ Implemented on top of the export / import features. It will generate a gns3p and reimport it.
1069
+
1070
+ NEW: fast duplication is used if possible (when there are no remote computes).
1071
+ If not, the project is exported and reimported as explained above.
1064
1072
 
1065
1073
  :param name: Name of the new project. A new one will be generated in case of conflicts
1066
1074
  :param reset_mac_addresses: Reset MAC addresses for the new project
1067
1075
  """
1076
+
1068
1077
  # If the project was not open we open it temporary
1069
1078
  previous_status = self._status
1070
1079
  if self._status == "closed":
@@ -1072,6 +1081,18 @@ class Project:
1072
1081
 
1073
1082
  self.dump()
1074
1083
  assert self._status != "closed"
1084
+
1085
+ try:
1086
+ proj = await self._fast_duplication(name, reset_mac_addresses)
1087
+ if proj:
1088
+ if previous_status == "closed":
1089
+ await self.close()
1090
+ return proj
1091
+ else:
1092
+ log.info("Fast duplication failed, fallback to normal duplication")
1093
+ except Exception as e:
1094
+ raise ControllerError(f"Cannot duplicate project: {str(e)}")
1095
+
1075
1096
  try:
1076
1097
  begin = time.time()
1077
1098
 
@@ -1087,7 +1108,7 @@ class Project:
1087
1108
  zstream,
1088
1109
  self,
1089
1110
  tmpdir,
1090
- keep_compute_id=True,
1111
+ keep_compute_ids=True,
1091
1112
  allow_all_nodes=True,
1092
1113
  reset_mac_addresses=reset_mac_addresses,
1093
1114
  )
@@ -1106,7 +1127,7 @@ class Project:
1106
1127
  str(uuid.uuid4()),
1107
1128
  f,
1108
1129
  name=name,
1109
- keep_compute_id=True
1130
+ keep_compute_ids=True
1110
1131
  )
1111
1132
 
1112
1133
  log.info(f"Project '{project.name}' duplicated in {time.time() - begin:.4f} seconds")
@@ -1310,3 +1331,70 @@ class Project:
1310
1331
 
1311
1332
  def __repr__(self):
1312
1333
  return f"<gns3server.controller.Project {self._name} {self._id}>"
1334
+
1335
+ async def _fast_duplication(self, name=None, reset_mac_addresses=True):
1336
+ """
1337
+ Fast duplication of a project.
1338
+
1339
+ Copy the project files directly rather than in an import-export fashion.
1340
+
1341
+ :param name: Name of the new project. A new one will be generated in case of conflicts
1342
+ :param location: Parent directory of the new project
1343
+ :param reset_mac_addresses: Reset MAC addresses for the new project
1344
+ """
1345
+
1346
+ # remote replication is not supported with remote computes
1347
+ for compute in self.computes:
1348
+ if compute.id != "local":
1349
+ log.warning("Fast duplication is not supported with remote compute: '{}'".format(compute.id))
1350
+ return None
1351
+ # work dir
1352
+ p_work = pathlib.Path(self.path).parent.absolute()
1353
+ t0 = time.time()
1354
+ new_project_id = str(uuid.uuid4())
1355
+ new_project_path = p_work.joinpath(new_project_id)
1356
+ # copy dir
1357
+ await wait_run_in_executor(shutil.copytree, self.path, new_project_path.as_posix(), symlinks=True, ignore_dangling_symlinks=True)
1358
+ log.info("Project content copied from '{}' to '{}' in {}s".format(self.path, new_project_path, time.time() - t0))
1359
+ topology = json.loads(new_project_path.joinpath('{}.gns3'.format(self.name)).read_bytes())
1360
+ project_name = name or topology["name"]
1361
+ # If the project name is already used we generate a new one
1362
+ project_name = self.controller.get_free_project_name(project_name)
1363
+ topology["name"] = project_name
1364
+ # To avoid unexpected behavior (project start without manual operations just after import)
1365
+ topology["auto_start"] = False
1366
+ topology["auto_open"] = False
1367
+ topology["auto_close"] = False
1368
+ # change node ID
1369
+ node_old_to_new = {}
1370
+ for node in topology["topology"]["nodes"]:
1371
+ new_node_id = str(uuid.uuid4())
1372
+ if "node_id" in node:
1373
+ node_old_to_new[node["node_id"]] = new_node_id
1374
+ _move_node_file(new_project_path, node["node_id"], new_node_id)
1375
+ node["node_id"] = new_node_id
1376
+ if reset_mac_addresses:
1377
+ if "properties" in node:
1378
+ for prop, value in node["properties"].items():
1379
+ # reset the MAC address
1380
+ if prop in ("mac_addr", "mac_address"):
1381
+ node["properties"][prop] = None
1382
+ # change link ID
1383
+ for link in topology["topology"]["links"]:
1384
+ link["link_id"] = str(uuid.uuid4())
1385
+ for node in link["nodes"]:
1386
+ node["node_id"] = node_old_to_new[node["node_id"]]
1387
+ # Generate new drawings id
1388
+ for drawing in topology["topology"]["drawings"]:
1389
+ drawing["drawing_id"] = str(uuid.uuid4())
1390
+
1391
+ # And we dump the updated.gns3
1392
+ dot_gns3_path = new_project_path.joinpath('{}.gns3'.format(project_name))
1393
+ topology["project_id"] = new_project_id
1394
+ with open(dot_gns3_path, "w+") as f:
1395
+ json.dump(topology, f, indent=4)
1396
+
1397
+ os.remove(new_project_path.joinpath('{}.gns3'.format(self.name)))
1398
+ project = await self.controller.load_project(dot_gns3_path, load=False)
1399
+ log.info("Project '{}' fast duplicated in {:.4f} seconds".format(project.name, time.time() - t0))
1400
+ return project
@@ -59,14 +59,9 @@ class Snapshot:
59
59
  + ".gns3project"
60
60
  )
61
61
  else:
62
- self._name = filename.split("_")[0]
62
+ self._name = filename.rsplit("_", 2)[0]
63
63
  datestring = filename.replace(self._name + "_", "").split(".")[0]
64
- try:
65
- self._created_at = (
66
- datetime.strptime(datestring, "%d%m%y_%H%M%S").replace(tzinfo=timezone.utc).timestamp()
67
- )
68
- except ValueError:
69
- self._created_at = datetime.now(timezone.utc)
64
+ self._created_at = (datetime.strptime(datestring, "%d%m%y_%H%M%S").replace(tzinfo=timezone.utc).timestamp())
70
65
  self._path = os.path.join(project.path, "snapshots", filename)
71
66
 
72
67
  @property
@@ -104,7 +99,7 @@ class Snapshot:
104
99
  with tempfile.TemporaryDirectory(dir=snapshot_directory) as tmpdir:
105
100
  # Do not compress the snapshots
106
101
  with aiozipstream.ZipFile(compression=zipfile.ZIP_STORED) as zstream:
107
- await export_project(zstream, self._project, tmpdir, keep_compute_id=True, allow_all_nodes=True)
102
+ await export_project(zstream, self._project, tmpdir, keep_compute_ids=True, allow_all_nodes=True)
108
103
  async with aiofiles.open(self.path, "wb") as f:
109
104
  async for chunk in zstream:
110
105
  await f.write(chunk)
@@ -45,7 +45,7 @@ class Symbols:
45
45
  self._symbol_size_cache = {}
46
46
 
47
47
  self._server_config = Config.instance().settings.Server
48
- self._current_theme = self._server_config.default_symbol_theme
48
+ self._current_theme = self._server_config.default_symbol_theme.value
49
49
  self._themes = BUILTIN_SYMBOL_THEMES
50
50
 
51
51
  @property
@@ -35,6 +35,7 @@ from .drawing import Drawing
35
35
  from .node import Node
36
36
  from .link import Link
37
37
 
38
+ from gns3server.utils.hostname import is_ios_hostname_valid, is_rfc1123_hostname_valid, to_rfc1123_hostname, to_ios_hostname
38
39
  from gns3server.schemas.controller.topology import Topology
39
40
  from gns3server.schemas.compute.dynamips_nodes import DynamipsCreate
40
41
 
@@ -43,7 +44,7 @@ import logging
43
44
  log = logging.getLogger(__name__)
44
45
 
45
46
 
46
- GNS3_FILE_FORMAT_REVISION = 9
47
+ GNS3_FILE_FORMAT_REVISION = 10
47
48
 
48
49
 
49
50
  class DynamipsNodeValidation(DynamipsCreate):
@@ -186,10 +187,14 @@ def load_topology(path):
186
187
  if variables:
187
188
  topo["variables"] = [var for var in variables if var.get("name")]
188
189
 
190
+ # Version before GNS3 3.0
191
+ if topo["revision"] < 10:
192
+ topo = _convert_2_2_0(topo, path)
193
+
189
194
  try:
190
195
  _check_topology_schema(topo, path)
191
196
  except ControllerError as e:
192
- log.error("Can't load the topology %s", path)
197
+ log.error("Can't load the topology %s, please check using the debug mode...", path)
193
198
  raise e
194
199
 
195
200
  if changed:
@@ -201,6 +206,30 @@ def load_topology(path):
201
206
  return topo
202
207
 
203
208
 
209
+ def _convert_2_2_0(topo, topo_path):
210
+ """
211
+ Convert topologies from GNS3 2.2.x to 3.0
212
+
213
+ Changes:
214
+ * Convert Qemu and Docker node names to be a valid RFC1123 hostnames.
215
+ * Convert Dynamips and IOU node names to be a valid IOS hostnames.
216
+ """
217
+
218
+ topo["revision"] = 10
219
+
220
+ for node in topo.get("topology", {}).get("nodes", []):
221
+ if "properties" in node:
222
+ if node["node_type"] in ("qemu", "docker") and not is_rfc1123_hostname_valid(node["name"]):
223
+ new_name = to_rfc1123_hostname(node["name"])
224
+ log.info(f"Convert node name {node['name']} to {new_name} (RFC1123)")
225
+ node["name"] = new_name
226
+ if node["node_type"] in ("dynamips", "iou") and not is_ios_hostname_valid(node["name"] ):
227
+ new_name = to_ios_hostname(node["name"])
228
+ log.info(f"Convert node name {node['name']} to {new_name} (IOS)")
229
+ node["name"] = new_name
230
+ return topo
231
+
232
+
204
233
  def _convert_2_1_0(topo, topo_path):
205
234
  """
206
235
  Convert topologies from GNS3 2.1.x to 2.2