topolib 0.8.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- topolib/__init__.py +4 -0
- topolib/analysis/__init__.py +4 -0
- topolib/analysis/metrics.py +80 -0
- topolib/analysis/traffic_matrix.py +344 -0
- topolib/assets/AMRES.json +1265 -0
- topolib/assets/Abilene.json +285 -0
- topolib/assets/Bell_canada.json +925 -0
- topolib/assets/Brazil.json +699 -0
- topolib/assets/CESNET.json +657 -0
- topolib/assets/CORONET.json +1957 -0
- topolib/assets/China.json +1135 -0
- topolib/assets/DT-14.json +470 -0
- topolib/assets/DT-17.json +525 -0
- topolib/assets/DT-50.json +1515 -0
- topolib/assets/ES-30.json +967 -0
- topolib/assets/EURO-16.json +455 -0
- topolib/assets/FR-43.json +1277 -0
- topolib/assets/FUNET.json +317 -0
- topolib/assets/GCN-BG.json +855 -0
- topolib/assets/GRNET.json +1717 -0
- topolib/assets/HyperOne.json +255 -0
- topolib/assets/IT-21.json +649 -0
- topolib/assets/India.json +517 -0
- topolib/assets/JPN-12.json +331 -0
- topolib/assets/KOREN.json +287 -0
- topolib/assets/NORDUNet.json +783 -0
- topolib/assets/NSFNet.json +399 -0
- topolib/assets/PANEURO.json +757 -0
- topolib/assets/PAVLOV.json +465 -0
- topolib/assets/PLN-12.json +343 -0
- topolib/assets/SANReN.json +161 -0
- topolib/assets/SERBIA-MONTENEGRO.json +139 -0
- topolib/assets/Telefonica-21.json +637 -0
- topolib/assets/Turk_Telekom.json +551 -0
- topolib/assets/UKNet.json +685 -0
- topolib/assets/Vega_Telecom.json +819 -0
- topolib/elements/__init__.py +5 -0
- topolib/elements/link.py +121 -0
- topolib/elements/node.py +230 -0
- topolib/topology/__init__.py +4 -0
- topolib/topology/path.py +84 -0
- topolib/topology/topology.py +469 -0
- topolib/visualization/__init__.py +1 -0
- topolib/visualization/_qt_screenshot.py +103 -0
- topolib/visualization/_qt_window.py +78 -0
- topolib/visualization/_templates.py +101 -0
- topolib/visualization/mapview.py +316 -0
- topolib-0.8.0.dist-info/METADATA +148 -0
- topolib-0.8.0.dist-info/RECORD +51 -0
- topolib-0.8.0.dist-info/WHEEL +4 -0
- topolib-0.8.0.dist-info/licenses/LICENSE +22 -0
topolib/__init__.py
ADDED
|
@@ -0,0 +1,80 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Metrics module for network topology analysis.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from typing import List, Dict, Optional
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
from topolib.topology import Topology
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class Metrics:
|
|
12
|
+
"""
|
|
13
|
+
Provides static methods for computing metrics on network topologies.
|
|
14
|
+
|
|
15
|
+
All methods receive a Topology instance.
|
|
16
|
+
|
|
17
|
+
Methods
|
|
18
|
+
-------
|
|
19
|
+
node_degree(topology)
|
|
20
|
+
Calculates the degree of each node.
|
|
21
|
+
link_length_stats(topology)
|
|
22
|
+
Calculates statistics (min, max, avg) of link lengths.
|
|
23
|
+
connection_matrix(topology)
|
|
24
|
+
Builds the adjacency matrix.
|
|
25
|
+
"""
|
|
26
|
+
|
|
27
|
+
@staticmethod
|
|
28
|
+
def node_degree(topology: "Topology") -> Dict[int, int]:
|
|
29
|
+
"""
|
|
30
|
+
Calculates the degree of each node in the topology.
|
|
31
|
+
|
|
32
|
+
:param topology: Instance of topolib.topology.topology.Topology.
|
|
33
|
+
:type topology: topolib.topology.topology.Topology
|
|
34
|
+
:return: Dictionary {node_id: degree}
|
|
35
|
+
:rtype: dict[int, int]
|
|
36
|
+
"""
|
|
37
|
+
degree = {n.id: 0 for n in topology.nodes}
|
|
38
|
+
for link in topology.links:
|
|
39
|
+
degree[link.source.id] += 1
|
|
40
|
+
degree[link.target.id] += 1
|
|
41
|
+
return degree
|
|
42
|
+
|
|
43
|
+
@staticmethod
|
|
44
|
+
def link_length_stats(topology: "Topology") -> Dict[str, Optional[float]]:
|
|
45
|
+
"""
|
|
46
|
+
Calculates the minimum, maximum, and average link lengths.
|
|
47
|
+
|
|
48
|
+
:param topology: Instance of topolib.topology.topology.Topology.
|
|
49
|
+
:type topology: topolib.topology.topology.Topology
|
|
50
|
+
:return: Dictionary with keys 'min', 'max', 'avg'.
|
|
51
|
+
:rtype: dict[str, float | None]
|
|
52
|
+
"""
|
|
53
|
+
lengths = [l.length for l in topology.links]
|
|
54
|
+
if not lengths:
|
|
55
|
+
return {"min": None, "max": None, "avg": None}
|
|
56
|
+
return {
|
|
57
|
+
"min": min(lengths),
|
|
58
|
+
"max": max(lengths),
|
|
59
|
+
"avg": sum(lengths) / len(lengths),
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
@staticmethod
|
|
63
|
+
def connection_matrix(topology: "Topology") -> List[List[int]]:
|
|
64
|
+
"""
|
|
65
|
+
Builds the adjacency matrix of the topology.
|
|
66
|
+
|
|
67
|
+
:param topology: Instance of topolib.topology.topology.Topology.
|
|
68
|
+
:type topology: topolib.topology.topology.Topology
|
|
69
|
+
:return: Adjacency matrix (1 if connected, 0 otherwise).
|
|
70
|
+
:rtype: list[list[int]]
|
|
71
|
+
"""
|
|
72
|
+
id_to_idx = {n.id: i for i, n in enumerate(topology.nodes)}
|
|
73
|
+
size = len(topology.nodes)
|
|
74
|
+
matrix = [[0] * size for _ in range(size)]
|
|
75
|
+
for link in topology.links:
|
|
76
|
+
i = id_to_idx[link.source.id]
|
|
77
|
+
j = id_to_idx[link.target.id]
|
|
78
|
+
matrix[i][j] = 1
|
|
79
|
+
matrix[j][i] = 1
|
|
80
|
+
return matrix
|
|
@@ -0,0 +1,344 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Traffic matrix generation module.
|
|
3
|
+
|
|
4
|
+
This module provides methods to generate traffic demand matrices using different models:
|
|
5
|
+
- Gravitational model (population-based)
|
|
6
|
+
- DC/IXP model (datacenter and internet exchange point-based)
|
|
7
|
+
- Distribution probability model (resource-based traffic distribution)
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
from typing import Dict, Tuple, List, Any, TYPE_CHECKING
|
|
11
|
+
import itertools
|
|
12
|
+
import math
|
|
13
|
+
import numpy as np
|
|
14
|
+
from numpy.typing import NDArray
|
|
15
|
+
|
|
16
|
+
if TYPE_CHECKING:
|
|
17
|
+
from ..topology.topology import Topology
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class TrafficMatrix:
|
|
21
|
+
"""
|
|
22
|
+
Traffic matrix generator for network topologies.
|
|
23
|
+
|
|
24
|
+
Generates traffic demand matrices between nodes using different models.
|
|
25
|
+
All matrices are returned as numpy arrays where matrix[i][j] represents
|
|
26
|
+
traffic from node i to node j (in Gbps).
|
|
27
|
+
|
|
28
|
+
All methods are static and receive a Topology instance as parameter.
|
|
29
|
+
"""
|
|
30
|
+
|
|
31
|
+
@staticmethod
|
|
32
|
+
def _calculate_node_degrees(topology: "Topology") -> Dict[int, int]:
|
|
33
|
+
"""
|
|
34
|
+
Calculate the degree (number of connections) for each node.
|
|
35
|
+
|
|
36
|
+
Parameters
|
|
37
|
+
----------
|
|
38
|
+
topology : Topology
|
|
39
|
+
The network topology.
|
|
40
|
+
|
|
41
|
+
Returns
|
|
42
|
+
-------
|
|
43
|
+
dict
|
|
44
|
+
Dictionary mapping node_id to degree count.
|
|
45
|
+
"""
|
|
46
|
+
degree_counts = {node.id: 0 for node in topology.nodes}
|
|
47
|
+
processed_links: set[Tuple[int, int]] = set()
|
|
48
|
+
|
|
49
|
+
for link in topology.links:
|
|
50
|
+
# Treat links as bidirectional for degree counting
|
|
51
|
+
link_tuple: Tuple[int, int] = tuple(sorted((link.source.id, link.target.id))) # type: ignore
|
|
52
|
+
if link_tuple not in processed_links:
|
|
53
|
+
degree_counts[link.source.id] += 1
|
|
54
|
+
degree_counts[link.target.id] += 1
|
|
55
|
+
processed_links.add(link_tuple)
|
|
56
|
+
|
|
57
|
+
return degree_counts
|
|
58
|
+
|
|
59
|
+
@staticmethod
|
|
60
|
+
def _get_gravitational_k(topology: "Topology", rate: float = 0.015) -> float:
|
|
61
|
+
"""
|
|
62
|
+
Calculate the K constant for the gravitational model.
|
|
63
|
+
|
|
64
|
+
Parameters
|
|
65
|
+
----------
|
|
66
|
+
topology : Topology
|
|
67
|
+
The network topology.
|
|
68
|
+
rate : float
|
|
69
|
+
Traffic rate per population unit (default: 0.015 Gbps per capita)
|
|
70
|
+
|
|
71
|
+
Returns
|
|
72
|
+
-------
|
|
73
|
+
float
|
|
74
|
+
The K constant for the gravitational formula.
|
|
75
|
+
"""
|
|
76
|
+
nodes = list(topology.nodes)
|
|
77
|
+
if len(nodes) < 2:
|
|
78
|
+
return 0.0
|
|
79
|
+
|
|
80
|
+
total_pop = sum(node.pop for node in nodes)
|
|
81
|
+
sum_pop_pairs = sum(
|
|
82
|
+
n1.pop * n2.pop for n1, n2 in itertools.combinations(nodes, 2)
|
|
83
|
+
)
|
|
84
|
+
|
|
85
|
+
if sum_pop_pairs == 0:
|
|
86
|
+
return 0.0
|
|
87
|
+
|
|
88
|
+
return (rate * total_pop) / sum_pop_pairs
|
|
89
|
+
|
|
90
|
+
@staticmethod
|
|
91
|
+
def _pre_calculate_metrics(
|
|
92
|
+
topology: "Topology",
|
|
93
|
+
w_pop: float = 0.015,
|
|
94
|
+
w_dc: float = 400.0,
|
|
95
|
+
w_ixp: float = 2857.0,
|
|
96
|
+
) -> Tuple[Dict[int, Dict[str, float]], float]:
|
|
97
|
+
"""
|
|
98
|
+
Pre-calculate metrics for distribution probability model.
|
|
99
|
+
|
|
100
|
+
Parameters
|
|
101
|
+
----------
|
|
102
|
+
topology : Topology
|
|
103
|
+
The network topology.
|
|
104
|
+
w_pop : float
|
|
105
|
+
Weight for population (Gbps per capita)
|
|
106
|
+
w_dc : float
|
|
107
|
+
Weight for datacenter capacity (Gbps per DC)
|
|
108
|
+
w_ixp : float
|
|
109
|
+
Weight for IXP capacity (Gbps per IXP)
|
|
110
|
+
|
|
111
|
+
Returns
|
|
112
|
+
-------
|
|
113
|
+
tuple
|
|
114
|
+
(stats_lookup, total_size_N) where stats_lookup contains
|
|
115
|
+
share_pct and traffic_leaving for each node.
|
|
116
|
+
"""
|
|
117
|
+
# Calculate traffic and sizes
|
|
118
|
+
temp_data: List[Dict[str, Any]] = []
|
|
119
|
+
total_size_N = 0.0
|
|
120
|
+
|
|
121
|
+
for node in topology.nodes:
|
|
122
|
+
# Weighted sum (Traffic n_i)
|
|
123
|
+
traffic_ni = (node.pop * w_pop) + (node.dc * w_dc) + (node.ixp * w_ixp)
|
|
124
|
+
# Normalized size (Size n_i)
|
|
125
|
+
size_ni = traffic_ni / w_ixp
|
|
126
|
+
total_size_N += size_ni
|
|
127
|
+
|
|
128
|
+
temp_data.append(
|
|
129
|
+
{
|
|
130
|
+
"id": node.id,
|
|
131
|
+
"name": node.name,
|
|
132
|
+
"traffic_ni": traffic_ni,
|
|
133
|
+
"size_ni": size_ni,
|
|
134
|
+
}
|
|
135
|
+
)
|
|
136
|
+
|
|
137
|
+
# Calculate shares and leaving traffic
|
|
138
|
+
stats: Dict[int, Dict[str, float]] = {}
|
|
139
|
+
for item in temp_data:
|
|
140
|
+
share_pct: float = (
|
|
141
|
+
item["size_ni"] / total_size_N if total_size_N > 0 else 0.0
|
|
142
|
+
)
|
|
143
|
+
traffic_leaving: float = item["traffic_ni"] * (1.0 - share_pct)
|
|
144
|
+
|
|
145
|
+
stats[item["id"]] = {
|
|
146
|
+
"share_pct": share_pct,
|
|
147
|
+
"traffic_leaving": traffic_leaving,
|
|
148
|
+
"name": item["name"],
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
return stats, total_size_N
|
|
152
|
+
|
|
153
|
+
@staticmethod
|
|
154
|
+
def gravitational(topology: "Topology", rate: float = 0.015) -> NDArray[np.float64]:
|
|
155
|
+
"""
|
|
156
|
+
Generate traffic matrix using the gravitational model.
|
|
157
|
+
|
|
158
|
+
Traffic between nodes i and j is proportional to their populations:
|
|
159
|
+
T(i,j) = K * Pop_i * Pop_j
|
|
160
|
+
|
|
161
|
+
Parameters
|
|
162
|
+
----------
|
|
163
|
+
topology : Topology
|
|
164
|
+
The network topology.
|
|
165
|
+
rate : float
|
|
166
|
+
Traffic rate per population unit (default: 0.015 Gbps per capita)
|
|
167
|
+
|
|
168
|
+
Returns
|
|
169
|
+
-------
|
|
170
|
+
numpy.ndarray
|
|
171
|
+
Traffic matrix where matrix[i][j] is traffic from node i to node j (Gbps).
|
|
172
|
+
Shape: (n_nodes, n_nodes)
|
|
173
|
+
"""
|
|
174
|
+
nodes = list(topology.nodes)
|
|
175
|
+
n = len(nodes)
|
|
176
|
+
|
|
177
|
+
if n == 0:
|
|
178
|
+
return np.array([], dtype=np.float64)
|
|
179
|
+
|
|
180
|
+
K = TrafficMatrix._get_gravitational_k(topology, rate)
|
|
181
|
+
if K == 0:
|
|
182
|
+
return np.zeros((n, n), dtype=np.float64)
|
|
183
|
+
|
|
184
|
+
matrix = np.zeros((n, n), dtype=np.float64)
|
|
185
|
+
|
|
186
|
+
for i, node_i in enumerate(nodes):
|
|
187
|
+
for j, node_j in enumerate(nodes):
|
|
188
|
+
if i != j:
|
|
189
|
+
matrix[i, j] = K * node_i.pop * node_j.pop
|
|
190
|
+
|
|
191
|
+
return matrix
|
|
192
|
+
|
|
193
|
+
@staticmethod
|
|
194
|
+
def dc_ixp(topology: "Topology") -> NDArray[np.float64]:
|
|
195
|
+
"""
|
|
196
|
+
Generate traffic matrix using the DC/IXP model.
|
|
197
|
+
|
|
198
|
+
Traffic depends on node degrees and the difference between DC and IXP resources:
|
|
199
|
+
- If combined degree > 2*avg_degree: T(i,j) = 2*C(N,2)*delta_i*delta_j
|
|
200
|
+
- Otherwise: T(i,j) = N*delta_i*delta_j
|
|
201
|
+
|
|
202
|
+
where N = degree_i + degree_j, delta_i = |DC_i - IXP_i|
|
|
203
|
+
|
|
204
|
+
Parameters
|
|
205
|
+
----------
|
|
206
|
+
topology : Topology
|
|
207
|
+
The network topology.
|
|
208
|
+
|
|
209
|
+
Returns
|
|
210
|
+
-------
|
|
211
|
+
numpy.ndarray
|
|
212
|
+
Traffic matrix where matrix[i][j] is traffic from node i to node j (Gbps).
|
|
213
|
+
Shape: (n_nodes, n_nodes)
|
|
214
|
+
"""
|
|
215
|
+
nodes = list(topology.nodes)
|
|
216
|
+
n = len(nodes)
|
|
217
|
+
|
|
218
|
+
if n == 0:
|
|
219
|
+
return np.array([], dtype=np.float64)
|
|
220
|
+
|
|
221
|
+
degrees = TrafficMatrix._calculate_node_degrees(topology)
|
|
222
|
+
avg_degree = sum(degrees.values()) / n
|
|
223
|
+
|
|
224
|
+
matrix = np.zeros((n, n), dtype=np.float64)
|
|
225
|
+
|
|
226
|
+
for i, node_i in enumerate(nodes):
|
|
227
|
+
for j, node_j in enumerate(nodes):
|
|
228
|
+
if i != j:
|
|
229
|
+
N = degrees[node_i.id] + degrees[node_j.id]
|
|
230
|
+
delta_i = abs(node_i.dc - node_i.ixp)
|
|
231
|
+
delta_j = abs(node_j.dc - node_j.ixp)
|
|
232
|
+
|
|
233
|
+
if N > 2 * avg_degree:
|
|
234
|
+
if N < 2:
|
|
235
|
+
traffic = 0.0
|
|
236
|
+
else:
|
|
237
|
+
traffic = 2 * math.comb(N, 2) * delta_i * delta_j
|
|
238
|
+
else:
|
|
239
|
+
traffic = N * delta_i * delta_j
|
|
240
|
+
|
|
241
|
+
matrix[i, j] = traffic
|
|
242
|
+
|
|
243
|
+
return matrix
|
|
244
|
+
|
|
245
|
+
@staticmethod
|
|
246
|
+
def distribution_probability(
|
|
247
|
+
topology: "Topology",
|
|
248
|
+
w_pop: float = 0.015,
|
|
249
|
+
w_dc: float = 400.0,
|
|
250
|
+
w_ixp: float = 2857.0,
|
|
251
|
+
) -> NDArray[np.float64]:
|
|
252
|
+
"""
|
|
253
|
+
Generate traffic matrix using the distribution probability model.
|
|
254
|
+
|
|
255
|
+
Traffic from node i to j is distributed proportionally based on
|
|
256
|
+
resource shares and leaving traffic:
|
|
257
|
+
T(i,j) = traffic_leaving_i * (share_j / (1 - share_i))
|
|
258
|
+
|
|
259
|
+
Parameters
|
|
260
|
+
----------
|
|
261
|
+
topology : Topology
|
|
262
|
+
The network topology.
|
|
263
|
+
w_pop : float
|
|
264
|
+
Weight for population (Gbps per capita, default: 0.015)
|
|
265
|
+
w_dc : float
|
|
266
|
+
Weight for datacenter capacity (Gbps per DC, default: 400)
|
|
267
|
+
w_ixp : float
|
|
268
|
+
Weight for IXP capacity (Gbps per IXP, default: 2857)
|
|
269
|
+
|
|
270
|
+
Returns
|
|
271
|
+
-------
|
|
272
|
+
numpy.ndarray
|
|
273
|
+
Traffic matrix where matrix[i][j] is traffic from node i to node j (Gbps).
|
|
274
|
+
Shape: (n_nodes, n_nodes)
|
|
275
|
+
"""
|
|
276
|
+
nodes = list(topology.nodes)
|
|
277
|
+
n = len(nodes)
|
|
278
|
+
|
|
279
|
+
if n == 0:
|
|
280
|
+
return np.array([], dtype=np.float64)
|
|
281
|
+
|
|
282
|
+
stats_lookup, total_size_N = TrafficMatrix._pre_calculate_metrics(
|
|
283
|
+
topology, w_pop, w_dc, w_ixp
|
|
284
|
+
)
|
|
285
|
+
|
|
286
|
+
if total_size_N == 0:
|
|
287
|
+
return np.zeros((n, n), dtype=np.float64)
|
|
288
|
+
|
|
289
|
+
matrix = np.zeros((n, n), dtype=np.float64)
|
|
290
|
+
|
|
291
|
+
for i, node_i in enumerate(nodes):
|
|
292
|
+
stats_i = stats_lookup[node_i.id]
|
|
293
|
+
|
|
294
|
+
for j, node_j in enumerate(nodes):
|
|
295
|
+
if i != j:
|
|
296
|
+
stats_j = stats_lookup[node_j.id]
|
|
297
|
+
|
|
298
|
+
traffic_leaving_i = stats_i["traffic_leaving"]
|
|
299
|
+
share_i = stats_i["share_pct"]
|
|
300
|
+
share_j = stats_j["share_pct"]
|
|
301
|
+
|
|
302
|
+
# Avoid division by zero
|
|
303
|
+
if share_i >= 1.0:
|
|
304
|
+
traffic = 0.0
|
|
305
|
+
else:
|
|
306
|
+
distribution_factor = share_j / (1.0 - share_i)
|
|
307
|
+
traffic = traffic_leaving_i * distribution_factor
|
|
308
|
+
|
|
309
|
+
matrix[i, j] = traffic
|
|
310
|
+
|
|
311
|
+
return matrix
|
|
312
|
+
|
|
313
|
+
@staticmethod
|
|
314
|
+
def to_csv(
|
|
315
|
+
matrix: NDArray[np.float64], topology: "Topology", filename: str
|
|
316
|
+
) -> None:
|
|
317
|
+
"""
|
|
318
|
+
Export traffic matrix to CSV file.
|
|
319
|
+
|
|
320
|
+
Parameters
|
|
321
|
+
----------
|
|
322
|
+
matrix : numpy.ndarray
|
|
323
|
+
Traffic matrix as numpy array
|
|
324
|
+
topology : Topology
|
|
325
|
+
The topology (needed to get node IDs for labels)
|
|
326
|
+
filename : str
|
|
327
|
+
Output CSV filename
|
|
328
|
+
"""
|
|
329
|
+
if matrix.size == 0:
|
|
330
|
+
return
|
|
331
|
+
|
|
332
|
+
node_ids = [node.id for node in topology.nodes]
|
|
333
|
+
n = len(node_ids)
|
|
334
|
+
|
|
335
|
+
with open(filename, "w") as f:
|
|
336
|
+
# Header
|
|
337
|
+
f.write("src/dst," + ",".join(map(str, node_ids)) + "\n")
|
|
338
|
+
|
|
339
|
+
# Rows
|
|
340
|
+
for i in range(n):
|
|
341
|
+
row = [str(node_ids[i])]
|
|
342
|
+
for j in range(n):
|
|
343
|
+
row.append(f"{matrix[i, j]:.2f}")
|
|
344
|
+
f.write(",".join(row) + "\n")
|