modularitypruning 1.3.6__py3-none-any.whl → 1.4.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,14 +1,16 @@
1
- from .partition_utilities import all_degrees, in_degrees, out_degrees, membership_to_communities, \
2
- membership_to_layered_communities
1
+ import warnings
3
2
  from collections import defaultdict
4
- import numpy as np
5
- from numpy.random import choice
6
3
  from math import floor
7
4
  from multiprocessing import Pool, cpu_count
8
- from scipy.spatial import HalfspaceIntersection
5
+
6
+ import numpy as np
7
+ from numpy.random import choice
9
8
  from scipy.linalg import LinAlgWarning
10
9
  from scipy.optimize import linprog, OptimizeWarning
11
- import warnings
10
+ from scipy.spatial import HalfspaceIntersection
11
+
12
+ from .partition_utilities import all_degrees, in_degrees, out_degrees, membership_to_communities, \
13
+ membership_to_layered_communities
12
14
 
13
15
 
14
16
  def get_interior_point(halfspaces, initial_num_sampled=50, full_retry_limit=10):
@@ -1,12 +1,10 @@
1
- from .progress import Progress
2
1
  import functools
2
+ from multiprocessing import Pool, cpu_count
3
+
3
4
  import igraph as ig
4
5
  import leidenalg
5
- from math import ceil
6
- from multiprocessing import Pool, cpu_count
7
6
  import numpy as np
8
- import psutil
9
- import warnings
7
+ from tqdm import tqdm
10
8
 
11
9
  LOW_MEMORY_THRESHOLD = 1e9 # 1 GB
12
10
 
@@ -50,6 +48,11 @@ def singlelayer_leiden(G, gamma, return_partition=False):
50
48
  return tuple(partition.membership)
51
49
 
52
50
 
51
+ def _wrapped_singlelayer_leiden(args):
52
+ """Wrapped singlelayer_leiden() for use in multiprocessing.Pool.imap_unordered."""
53
+ return singlelayer_leiden(*args)
54
+
55
+
53
56
  def leiden_part(G):
54
57
  return leidenalg.RBConfigurationVertexPartition(G)
55
58
 
@@ -68,8 +71,6 @@ def split_intralayer_leiden_graph(G_intralayer, layer_membership):
68
71
 
69
72
  This is needed since leidenalg lacks support for faster multilayer optimization.
70
73
 
71
- WARNING: Optimization can be EXTREMELY slow! Leidenalg does not properly implement multilayer optimization.
72
-
73
74
  :param G_intralayer: intralayer graph of interest
74
75
  :type G_intralayer: igraph.Graph
75
76
  :param layer_vec: list of each vertex's layer membership
@@ -77,9 +78,6 @@ def split_intralayer_leiden_graph(G_intralayer, layer_membership):
77
78
  :return: list of intralayer networks
78
79
  :rtype: list[igraph.Graph]
79
80
  """
80
- warnings.warn("You are using Leiden multilayer modularity optimization. THIS CAN BE EXTREMELY SLOW! "
81
- "leidenalg's implementation is inefficient, especially when there are many layers.")
82
-
83
81
  # internally use hashable objects for memoization
84
82
  return _split_leiden_graph_layers_cached(n=G_intralayer.vcount(), G_es=tuple(G_intralayer.es),
85
83
  is_directed=G_intralayer.is_directed(),
@@ -108,7 +106,8 @@ def _split_leiden_graph_layers_cached(n, G_es, is_directed, layer_membership):
108
106
  def multilayer_leiden(G_intralayer, G_interlayer, layer_vec, gamma, omega, optimiser=None, return_partition=False):
109
107
  r"""Run the Leiden modularity maximization algorithm at a single (:math:`\gamma, \omega`) value.
110
108
 
111
- WARNING: Optimization can be EXTREMELY slow! Leidenalg does not properly implement multilayer optimization.
109
+ WARNING: Optimization can be EXTREMELY slow for large numbers of layers! Leidenalg does not properly implement
110
+ multilayer optimization.
112
111
 
113
112
  :param G_intralayer: intralayer graph of interest
114
113
  :type G_intralayer: igraph.Graph
@@ -150,6 +149,11 @@ def multilayer_leiden(G_intralayer, G_interlayer, layer_vec, gamma, omega, optim
150
149
  return tuple(intralayer_parts[0].membership)
151
150
 
152
151
 
152
+ def _wrapped_multilayer_leiden(args):
153
+ """Wrapped multilayer_leiden() for use in multiprocessing.Pool.imap_unordered."""
154
+ return multilayer_leiden(*args)
155
+
156
+
153
157
  def multilayer_leiden_part(G_intralayer, G_interlayer, layer_membership):
154
158
  if 'weight' not in G_intralayer.es:
155
159
  G_intralayer.es['weight'] = [1.0] * G_intralayer.ecount()
@@ -178,51 +182,29 @@ def repeated_leiden_from_gammas(G, gammas):
178
182
  return {sorted_tuple(singlelayer_leiden(G, gamma)) for gamma in gammas}
179
183
 
180
184
 
181
- def repeated_parallel_leiden_from_gammas(G, gammas, show_progress=True, chunk_dispatch=True):
185
+ def repeated_parallel_leiden_from_gammas(G, gammas, show_progress=True):
182
186
  r"""Runs the Leiden modularity maximization algorithm at each provided :math:`\gamma` value, using all CPU cores.
183
187
 
184
188
  :param G: graph of interest
185
189
  :type G: igraph.Graph
186
190
  :param gammas: list of gammas (resolution parameters) to run Leiden at
187
191
  :type gammas: list[float]
188
- :param show_progress: if True, render a progress bar. This will only work if ``chunk_dispatch`` is also True
192
+ :param show_progress: if True, render a progress bar
189
193
  :type show_progress: bool
190
- :param chunk_dispatch: if True, dispatch parallel work in chunks. Setting this to False may increase performance,
191
- but can lead to out-of-memory issues
192
- :type chunk_dispatch: bool
193
194
  :return: a set of all unique partitions returned by the Leiden algorithm
194
195
  :rtype: set of tuple[int]
195
196
  """
196
-
197
- pool = Pool(processes=cpu_count())
198
197
  total = set()
199
-
200
- chunk_size = len(gammas) // 99
201
- if chunk_size > 0 and chunk_dispatch:
202
- chunk_params = ([(G, g) for g in gammas[i:i + chunk_size]] for i in range(0, len(gammas), chunk_size))
203
- else:
204
- chunk_params = [[(G, g) for g in gammas]]
205
- chunk_size = len(gammas)
206
-
207
- if show_progress:
208
- progress = Progress(ceil(len(gammas) / chunk_size))
209
-
210
- for chunk in chunk_params:
211
- for partition in pool.starmap(singlelayer_leiden, chunk):
212
- total.add(sorted_tuple(partition))
213
-
198
+ pool_chunk_size = max(1, len(gammas) // (cpu_count() * 100))
199
+ with Pool(processes=cpu_count()) as pool:
200
+ pool_iterator = pool.imap_unordered(_wrapped_singlelayer_leiden, [(G, g) for g in gammas],
201
+ chunksize=pool_chunk_size)
214
202
  if show_progress:
215
- progress.increment()
216
-
217
- if psutil.virtual_memory().available < LOW_MEMORY_THRESHOLD:
218
- # Reinitialize pool to get around an apparent memory leak in multiprocessing
219
- pool.close()
220
- pool = Pool(processes=cpu_count())
203
+ pool_iterator = tqdm(pool_iterator, total=len(gammas))
221
204
 
222
- if show_progress:
223
- progress.done()
205
+ for partition in pool_iterator:
206
+ total.add(sorted_tuple(partition))
224
207
 
225
- pool.close()
226
208
  return total
227
209
 
228
210
 
@@ -232,10 +214,13 @@ def repeated_leiden_from_gammas_omegas(G_intralayer, G_interlayer, layer_vec, ga
232
214
 
233
215
 
234
216
  def repeated_parallel_leiden_from_gammas_omegas(G_intralayer, G_interlayer, layer_vec, gammas, omegas,
235
- show_progress=True, chunk_dispatch=True):
217
+ show_progress=True):
236
218
  """
237
219
  Runs leidenalg at each gamma and omega in ``gammas`` and ``omegas``, using all CPU cores available.
238
220
 
221
+ WARNING: Optimization can be EXTREMELY slow for large numbers of layers! Leidenalg does not properly implement
222
+ multilayer optimization.
223
+
239
224
  :param G_intralayer: intralayer graph of interest
240
225
  :type G_intralayer: igraph.Graph
241
226
  :param G_interlayer: interlayer graph of interest
@@ -248,44 +233,23 @@ def repeated_parallel_leiden_from_gammas_omegas(G_intralayer, G_interlayer, laye
248
233
  :type omegas: list[float]
249
234
  :param show_progress: if True, render a progress bar
250
235
  :type show_progress: bool
251
- :param chunk_dispatch: if True, dispatch parallel work in chunks. Setting this to False may increase performance,
252
- but can lead to out-of-memory issues
253
- :type chunk_dispatch: bool
254
236
  :return: a set of all unique partitions encountered
255
237
  :rtype: set of tuple[int]
256
238
  """
257
239
  resolution_parameter_points = [(gamma, omega) for gamma in gammas for omega in omegas]
258
240
 
259
- pool = Pool(processes=cpu_count())
260
241
  total = set()
261
-
262
- chunk_size = len(resolution_parameter_points) // 99
263
- if chunk_size > 0 and chunk_dispatch:
264
- chunk_params = ([(G_intralayer, G_interlayer, layer_vec, gamma, omega)
265
- for gamma, omega in resolution_parameter_points[i:i + chunk_size]]
266
- for i in range(0, len(resolution_parameter_points), chunk_size))
267
- else:
268
- chunk_params = [[(G_intralayer, G_interlayer, layer_vec, gamma, omega)
269
- for gamma, omega in resolution_parameter_points]]
270
- chunk_size = len(gammas)
271
-
272
- if show_progress:
273
- progress = Progress(ceil(len(resolution_parameter_points) / chunk_size))
274
-
275
- for chunk in chunk_params:
276
- for partition in pool.starmap(multilayer_leiden, chunk):
277
- total.add(sorted_tuple(partition))
278
-
242
+ pool_chunk_size = max(1, len(resolution_parameter_points) // (cpu_count() * 100))
243
+ with Pool(processes=cpu_count()) as pool:
244
+ pool_iterator = pool.imap_unordered(
245
+ _wrapped_multilayer_leiden,
246
+ [(G_intralayer, G_interlayer, layer_vec, gamma, omega) for gamma, omega in resolution_parameter_points],
247
+ chunksize=pool_chunk_size
248
+ )
279
249
  if show_progress:
280
- progress.increment()
281
-
282
- if psutil.virtual_memory().available < LOW_MEMORY_THRESHOLD:
283
- # Reinitialize pool to get around an apparent memory leak in multiprocessing
284
- pool.close()
285
- pool = Pool(processes=cpu_count())
250
+ pool_iterator = tqdm(pool_iterator, total=len(resolution_parameter_points))
286
251
 
287
- if show_progress:
288
- progress.done()
252
+ for partition in pool_iterator:
253
+ total.add(sorted_tuple(partition))
289
254
 
290
- pool.close()
291
255
  return total
@@ -6,14 +6,16 @@ module ``modularitypruning.louvain_utilities`` now shims single-layer functions
6
6
  in ``modularitypruning.leiden_utilities`` (though it still contains the legacy multi-layer functions since they can be
7
7
  faster in general -- leidenalg does not efficiently implement multilayer optimization).
8
8
  """
9
- from . import leiden_utilities
10
- from .leiden_utilities import sorted_tuple, LOW_MEMORY_THRESHOLD
11
- from .progress import Progress
9
+ import warnings
12
10
  from math import ceil
13
11
  from multiprocessing import Pool, cpu_count
12
+
14
13
  import numpy as np
15
14
  import psutil
16
- import warnings
15
+
16
+ from . import leiden_utilities
17
+ from .leiden_utilities import sorted_tuple, LOW_MEMORY_THRESHOLD
18
+ from .progress import Progress
17
19
 
18
20
  try:
19
21
  import louvain # import louvain if possible
@@ -1,8 +1,9 @@
1
+ import leidenalg
2
+
1
3
  from .leiden_utilities import singlelayer_leiden, multilayer_leiden
2
4
  from .parameter_estimation_utilities import leiden_part_with_membership, estimate_singlelayer_SBM_parameters, \
3
5
  gamma_estimate_from_parameters, omega_function_from_model, estimate_multilayer_SBM_parameters
4
6
  from .partition_utilities import in_degrees
5
- import leidenalg
6
7
 
7
8
 
8
9
  def iterative_monolayer_resolution_parameter_estimation(G, gamma=1.0, tol=1e-2, max_iter=25, verbose=False,
@@ -1,12 +1,14 @@
1
- from .leiden_utilities import leiden_part_with_membership, sorted_tuple
2
- from .champ_utilities import CHAMP_2D, CHAMP_3D
3
- from .partition_utilities import num_communities
1
+ import warnings
2
+ from math import log
3
+
4
4
  import igraph as ig
5
5
  import leidenalg
6
- from math import log
7
6
  import numpy as np
8
7
  from scipy.optimize import fsolve
9
- import warnings
8
+
9
+ from .champ_utilities import CHAMP_2D, CHAMP_3D
10
+ from .leiden_utilities import leiden_part_with_membership, sorted_tuple
11
+ from .partition_utilities import num_communities
10
12
 
11
13
 
12
14
  def estimate_singlelayer_SBM_parameters(G, partition, m=None):
@@ -534,12 +536,25 @@ def prune_to_multilayer_stable_partitions(G_intralayer, G_interlayer, layer_vec,
534
536
  parameter estimates are within the provided ``gamma_start``, ``gamma_end``, ``omega_start``, and ``omega_end``
535
537
  bounds.
536
538
 
539
+ There are three network layer topology models available, all from Pamfil et al.
540
+
541
+ * **"temporal"**: Interlayer edges always connect copies of a node from one layer to the next, often representing
542
+ interactions that change over time.
543
+ * **"multilevel"**: Interlayer edges connect a hierarchy of monolayer networks from one layer to the next. This is
544
+ more general than temporal networks, as nodes can connect arbitrarily to nodes in the next layer. These often
545
+ represent inclusion relationships, such as cities to counties, counties to states, and states to countries.
546
+ * **"multiplex"**: Each layer represents a type of interaction, making the entire multilayer network akin to an
547
+ edge-colored multigraph (each type of edge has its own layer). This model is unique in that there is no natural
548
+ ordering of layers, and the resulting theory requires some analytical simplifications, making the resulting
549
+ parameter estimation the least robust of the three models.
550
+
537
551
  See https://doi.org/10.1038/s41598-022-20142-6 for more details.
538
552
 
539
- NOTE: This method truncates omega estimates to ``omega_end - 1e-3`` in order to properly identify stable partitions
540
- with infinite interlayer coupling estimates (e.g. when all membership labels persist across layers). If
541
- ``omega_end`` is set too low, such partitions may be incorrectly identified as stable. As such, you should be
542
- somewhat wary of the returned partitions with zero community structure differences across layers.
553
+ NOTE: This method will truncate omega estimates to ``omega_end - 1e-3`` (and raise a warning) if needed to properly
554
+ identify stable partitions with very large or infinite interlayer coupling estimates (e.g., when all membership
555
+ labels persist across layers). If ``omega_end`` is set too low, these partitions may be incorrectly identified as
556
+ stable. Conversely, some partitions with large omega estimates might be misclassified as not stable. Therefore, be
557
+ cautious of returned partitions with little or no community structure differences across layers.
543
558
 
544
559
  :param G_intralayer: intralayer graph of interest
545
560
  :type G_intralayer: igraph.Graph
@@ -599,6 +614,11 @@ def prune_to_multilayer_stable_partitions(G_intralayer, G_interlayer, layer_vec,
599
614
  omega_start, omega_end)
600
615
  domains_with_estimates = domains_to_gamma_omega_estimates(G_intralayer, G_interlayer, layer_vec, domains, model)
601
616
 
617
+ if any(o_est >= omega_end for _, _, g_est, o_est in domains_with_estimates if g_est is not None):
618
+ warnings.warn(f"We are truncating some omega estimates to your choice of omega_end={omega_end}. You should "
619
+ f"check that this accurately captures the high-omega behavior of the partition domains. "
620
+ f"Be cautious of partitions with little or no community structure differences across layers!")
621
+
602
622
  # Truncate infinite omega solutions to our maximum omega
603
623
  domains_with_estimates = [(polyverts, membership, g_est, min(o_est, omega_end - 1e-3))
604
624
  for polyverts, membership, g_est, o_est in domains_with_estimates
@@ -1,4 +1,5 @@
1
1
  from collections import defaultdict
2
+
2
3
  from sklearn.metrics import adjusted_mutual_info_score, normalized_mutual_info_score
3
4
 
4
5
 
@@ -1,12 +1,14 @@
1
- from .partition_utilities import num_communities, ami
2
1
  from collections import defaultdict
3
2
  from random import sample, shuffle
4
- import numpy as np
3
+
5
4
  import matplotlib
6
- from matplotlib.patches import Polygon
7
- from matplotlib.collections import PatchCollection
8
5
  import matplotlib.pyplot as plt
6
+ import numpy as np
9
7
  import seaborn as sbn
8
+ from matplotlib.collections import PatchCollection
9
+ from matplotlib.patches import Polygon
10
+
11
+ from .partition_utilities import num_communities, ami
10
12
 
11
13
 
12
14
  def plot_adjacency(adj):
@@ -69,7 +71,7 @@ def plot_estimates(gamma_estimates):
69
71
  # length_includes_head=True, alpha=0.5, zorder=2, **{"overhang": 0.5})
70
72
 
71
73
 
72
- def plot_2d_domains(domains, xlim, ylim, flip_axes=False, use_current_axes=False):
74
+ def plot_2d_domains(domains, xlim, ylim, flip_axes=True, use_current_axes=False):
73
75
  """Plot partition dominance ranges in the (gamma, omega) plane, using the domains from CHAMP_3D.
74
76
 
75
77
  Limits output to xlim and ylim dimensions. Note that the plotting here has x=gamma and y=omega.
@@ -91,7 +93,7 @@ def plot_2d_domains(domains, xlim, ylim, flip_axes=False, use_current_axes=False
91
93
  patches.append(polygon)
92
94
 
93
95
  cnorm = matplotlib.colors.Normalize(vmin=0, vmax=len(domains))
94
- cmap = matplotlib.cm.get_cmap("Set1")
96
+ cmap = plt.get_cmap("Set1")
95
97
  available_colors = {cmap(cnorm(i)) for i in range(len(domains))}
96
98
 
97
99
  if len(available_colors) == len(domains):
@@ -207,7 +209,7 @@ def plot_2d_domains_with_num_communities(domains_with_estimates, xlim, ylim, fli
207
209
  plt.ylim(ylim)
208
210
 
209
211
 
210
- def plot_2d_domains_with_ami(domains_with_estimates, ground_truth, xlim, ylim, flip_axes=False):
212
+ def plot_2d_domains_with_ami(domains_with_estimates, ground_truth, xlim, ylim, flip_axes=True):
211
213
  """Plot partition dominance ranges in the (gamma, omega) plane, using the domains from CHAMP_3D and coloring by the
212
214
  AMI between the partitions and ground truth.
213
215
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: modularitypruning
3
- Version: 1.3.6
3
+ Version: 1.4.1
4
4
  Summary: Pruning tool to identify small subsets of network partitions that are significant from the perspective of stochastic block model inference.
5
5
  Home-page: https://github.com/ragibson/ModularityPruning
6
6
  Author: Ryan Gibson
@@ -26,6 +26,7 @@ Requires-Dist: igraph
26
26
  Requires-Dist: scikit-learn
27
27
  Requires-Dist: scipy >=1.7
28
28
  Requires-Dist: seaborn
29
+ Requires-Dist: tqdm
29
30
 
30
31
  # ModularityPruning
31
32
 
@@ -46,6 +47,7 @@ https://static-content.springer.com/esm/art%3A10.1038%2Fs41598-022-20142-6/Media
46
47
  ).
47
48
 
48
49
  ## Installation
50
+
49
51
  This project is on [PyPI](https://pypi.org/project/modularitypruning/) and can
50
52
  be installed with
51
53
 
@@ -60,6 +62,7 @@ Alternatively, you can install it from this repository directly:
60
62
  python3 setup.py install
61
63
 
62
64
  <a name = "Basic Usage"></a>
65
+
63
66
  ## Basic Usage
64
67
 
65
68
  This package interfaces directly with python-igraph. A simple example of its
@@ -0,0 +1,14 @@
1
+ modularitypruning/__init__.py,sha256=U1iz51AVVzHw0aBZeJicxVg_L6TWq5pmv8Ep_bYyySU,238
2
+ modularitypruning/champ_utilities.py,sha256=yRLng9KciNxJst3Ybp24qlRnYvIEIe5Y-ZVsfSijkqc,16350
3
+ modularitypruning/leiden_utilities.py,sha256=kHHYFj30Ezl_YUhgmwEm-vPZ4K7658MqTGegra_4V_8,10360
4
+ modularitypruning/louvain_utilities.py,sha256=duGAK0PlIJvtpo689dNuRNixNXvLIWAPf-5ELGhEHZ4,8233
5
+ modularitypruning/parameter_estimation.py,sha256=n0_VPXa6QvFZqVEKhHSBUrnqQPTLRNGgNLDTsVymYT4,10480
6
+ modularitypruning/parameter_estimation_utilities.py,sha256=teJvq_w1gSjmA2weV9PDKTLN2xgCWgsIQhiY6ZlJ_xU,28152
7
+ modularitypruning/partition_utilities.py,sha256=0hPEftbV6xcYWdusiE65UZG99kqIrALH7Nwo_44KO-Q,957
8
+ modularitypruning/plotting.py,sha256=48yRl0--q1adi415pps1MbTtKtBMQxKVOXoRjPOxu-w,10570
9
+ modularitypruning/progress.py,sha256=XxkEVx8L9BoFnWtvUPg-kWtxUmE1RHqs5p5HPiTExUQ,971
10
+ modularitypruning-1.4.1.dist-info/LICENSE,sha256=eWz3HIQQxg7p1iSpUOUDKdDhGcuMPuVDDlcXf9F12D8,1068
11
+ modularitypruning-1.4.1.dist-info/METADATA,sha256=AYXmtCswKmz9EU7rjS3WugRrMGZt3sA_AoDNq6ASB3I,3422
12
+ modularitypruning-1.4.1.dist-info/WHEEL,sha256=OVMc5UfuAQiSplgO0_WdW7vXVGAt9Hdd6qtN4HotdyA,91
13
+ modularitypruning-1.4.1.dist-info/top_level.txt,sha256=ZPOx3a-ek0Ge0ZMq-uvbySSaAL9MZ-t23-JkuHZXo9E,18
14
+ modularitypruning-1.4.1.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: bdist_wheel (0.41.3)
2
+ Generator: setuptools (75.2.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5
 
@@ -1,14 +0,0 @@
1
- modularitypruning/__init__.py,sha256=U1iz51AVVzHw0aBZeJicxVg_L6TWq5pmv8Ep_bYyySU,238
2
- modularitypruning/champ_utilities.py,sha256=VveP8N9CvMODk3dPtVMRfNLji1pktaolA6iNoW6Fy-A,16348
3
- modularitypruning/leiden_utilities.py,sha256=er_Sjp5b6fUacO-Bfygbk6CiX6J8uj-gKFw-HZWffNA,11820
4
- modularitypruning/louvain_utilities.py,sha256=Zt58Wl4hgu6-zejdl-N_NW04UC4rbYmSHgpfoGDC2Ws,8231
5
- modularitypruning/parameter_estimation.py,sha256=EPU5BDDauToPbAdG1lZc9p5Rl_oDqiC7bltfnjs5tg8,10479
6
- modularitypruning/parameter_estimation_utilities.py,sha256=dRml1ZIB3ctVC7lk4QQRQwwLBldhG92rNIT6kHs0yfQ,26563
7
- modularitypruning/partition_utilities.py,sha256=Fizqd0JuODL8W4BP2h8iV0WhZMK6HoKjH_QFNVDZkaI,956
8
- modularitypruning/plotting.py,sha256=Bxu62ueWZRKKoeNVVHy0aXvocKAUgKuj0dZZsnLmL78,10580
9
- modularitypruning/progress.py,sha256=XxkEVx8L9BoFnWtvUPg-kWtxUmE1RHqs5p5HPiTExUQ,971
10
- modularitypruning-1.3.6.dist-info/LICENSE,sha256=eWz3HIQQxg7p1iSpUOUDKdDhGcuMPuVDDlcXf9F12D8,1068
11
- modularitypruning-1.3.6.dist-info/METADATA,sha256=mcEUa7NhV6DfDHPHGHY6J7jv99McahL_R0Py_-jU0fQ,3400
12
- modularitypruning-1.3.6.dist-info/WHEEL,sha256=Xo9-1PvkuimrydujYJAjF7pCkriuXBpUPEjma1nZyJ0,92
13
- modularitypruning-1.3.6.dist-info/top_level.txt,sha256=ZPOx3a-ek0Ge0ZMq-uvbySSaAL9MZ-t23-JkuHZXo9E,18
14
- modularitypruning-1.3.6.dist-info/RECORD,,