pyg-nightly 2.7.0.dev20250603__py3-none-any.whl → 2.7.0.dev20250604__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pyg-nightly
3
- Version: 2.7.0.dev20250603
3
+ Version: 2.7.0.dev20250604
4
4
  Summary: Graph Neural Network Library for PyTorch
5
5
  Keywords: deep-learning,pytorch,geometric-deep-learning,graph-neural-networks,graph-convolutional-networks
6
6
  Author-email: Matthias Fey <matthias@pyg.org>
@@ -1,4 +1,4 @@
1
- torch_geometric/__init__.py,sha256=a6RvMmtC3cM-AdkArSXekU2E4-8E99MWvpJfWEZ_1io,2255
1
+ torch_geometric/__init__.py,sha256=di4kZd7rQFDpBlATSXKDIp6hVI7yFbo5CFhhBq2M5zs,2255
2
2
  torch_geometric/_compile.py,sha256=f-WQeH4VLi5Hn9lrgztFUCSrN_FImjhQa6BxFzcYC38,1338
3
3
  torch_geometric/_onnx.py,sha256=V9ffrIKSqhDw6xUZ12lkuSfNs48cQp2EeJ6Z19GfnVw,349
4
4
  torch_geometric/backend.py,sha256=lVaf7aLoVaB3M-UcByUJ1G4T4FOK6LXAg0CF4W3E8jo,1575
@@ -71,7 +71,7 @@ torch_geometric/datasets/ba_shapes.py,sha256=sJEQiK3CGlYTdbQBgKeLhO6mY-HRv3nS9Ya
71
71
  torch_geometric/datasets/bitcoin_otc.py,sha256=olrsq_Z306-oo17iEQoVif3-CgVIOyVc8twgIMXE0iI,4399
72
72
  torch_geometric/datasets/brca_tgca.py,sha256=2lX9oY6T7aPut8NbXFMWS1c2-_FHqCB4hqUzP4_zFsk,3962
73
73
  torch_geometric/datasets/citation_full.py,sha256=5WT6_iZ1GWuShuYZJErQ3bWNV4bHwZsYYBYztoTxMzs,4458
74
- torch_geometric/datasets/city.py,sha256=9EFbPDFlEweVYvZL9V4jmuY_wioKTcax0YxeisZbis4,5138
74
+ torch_geometric/datasets/city.py,sha256=RgOokL8CD1dU4hN_VFzCdUmom57FPwmZjUoulY5X8tM,5190
75
75
  torch_geometric/datasets/coauthor.py,sha256=Nma9aLapDE1S7lCC40WazQZbBJ8nMQV3JJZRci-F3XQ,3138
76
76
  torch_geometric/datasets/coma.py,sha256=4URaPuXdUJdtZbzWojR-BqxlTyykjtvmXptk3G2Uy9k,4734
77
77
  torch_geometric/datasets/cornell.py,sha256=i6wUr2m1U3HCaqMzi-0AZ3Nthdne6_t0ja8qCKYESzE,5311
@@ -337,7 +337,7 @@ torch_geometric/nn/attention/__init__.py,sha256=wLKTmlfP7qL9sZHy4cmDFHEtdwa-MEKE
337
337
  torch_geometric/nn/attention/performer.py,sha256=2PCDn4_-oNTao2-DkXIaoi18anP01OxRELF2pvp-jk8,7357
338
338
  torch_geometric/nn/attention/qformer.py,sha256=7J-pWm_vpumK38IC-iCBz4oqL-BEIofEIxJ0wfjWq9A,2338
339
339
  torch_geometric/nn/attention/sgformer.py,sha256=OBC5HQxbY289bPDtwN8UbPH46To2GRTeVN-najogD-o,3747
340
- torch_geometric/nn/conv/__init__.py,sha256=37zTdt0gfSAUPMtwXjZg5mWx_itojJVFNODYR1h1ch0,3515
340
+ torch_geometric/nn/conv/__init__.py,sha256=8CK-DFG2PEo2ZaFyg-IUlQH8ecQoDDi556uv3ugeQyc,3572
341
341
  torch_geometric/nn/conv/agnn_conv.py,sha256=5nEPLx_BBHcDaO6HWzLuHfXc0Yd_reKynAOH0Iq09lU,3077
342
342
  torch_geometric/nn/conv/antisymmetric_conv.py,sha256=dhA6sCETy1jlXReYJZBSyToOcL_mZ1wL10fMIb8Ppuw,4387
343
343
  torch_geometric/nn/conv/appnp.py,sha256=5hleE5c51Xq0nSP_PyRbr-ukM-3KRROdLrSNhc4AOX0,5983
@@ -374,6 +374,7 @@ torch_geometric/nn/conv/hgt_conv.py,sha256=lUhTWUMovMtn9yR_b2-kLNLqHChGOUl2OtXBY
374
374
  torch_geometric/nn/conv/hypergraph_conv.py,sha256=4BosbbqJyprlI6QjPqIfMxCqnARU_0mUn1zcAQhbw90,8691
375
375
  torch_geometric/nn/conv/le_conv.py,sha256=DonmmYZOKk5wIlTZzzIfNKqBY6MO0MRxYhyr0YtNz-Q,3494
376
376
  torch_geometric/nn/conv/lg_conv.py,sha256=8jMa79iPsOUbXEfBIc3wmbvAD8T3d1j37LeIFTX3Yag,2369
377
+ torch_geometric/nn/conv/meshcnn_conv.py,sha256=hQpqpOl_pHQA-48tX9E_EDipWcHW2tupI7uiFJfxyHU,22209
377
378
  torch_geometric/nn/conv/message_passing.py,sha256=ynTp5MlvHB4SFYnuetK4wWi_1Bj_FhDGAJbf6ZmhEqY,44360
378
379
  torch_geometric/nn/conv/mf_conv.py,sha256=SkOGMN1tFT9dcqy8xYowsB2ozw6QfkoArgR1BksZZaU,4340
379
380
  torch_geometric/nn/conv/mixhop_conv.py,sha256=qVDPWeWcnO7_eHM0ZnpKtr8SISjb4jp0xjgpoDrwjlk,4555
@@ -586,7 +587,7 @@ torch_geometric/transforms/to_superpixels.py,sha256=g8ysBv-ezcHn2gHucKuBtnbe-kBD
586
587
  torch_geometric/transforms/to_undirected.py,sha256=oklgrNzev7HjvVaBHwPQFo0RxcQpmcIebNbcv6vNCtY,2972
587
588
  torch_geometric/transforms/two_hop.py,sha256=XxZl3eztTjE00ZlyAIqYu36rjaRddQT-1v4AFF9VUBc,1313
588
589
  torch_geometric/transforms/virtual_node.py,sha256=FMGT6LZBH-SU2zmp76GKNqJBZ8PyS1_6Em2BbVhv8Tw,2932
589
- torch_geometric/utils/__init__.py,sha256=aVet2bjRvr3URikJ6LpjLATz447YuBS6FuSu5l3JwLY,4982
590
+ torch_geometric/utils/__init__.py,sha256=VkLBKLR_ArzMj2PeZuUXPzE6xoa13lUKNd5LH3RXWgU,5044
590
591
  torch_geometric/utils/_assortativity.py,sha256=pe2Hv5xLWhTW7dgqVWNiwDgDVMxMbliTdLeQf5Y65Ug,2347
591
592
  torch_geometric/utils/_coalesce.py,sha256=m4s_maBhib0jByQi6Cd8dazzhFVshZXLfB9aykCZT2g,6769
592
593
  torch_geometric/utils/_degree.py,sha256=FcsGx5cQdrBmoCQ4qQ2csjsTiDICP1as4x1HD9y5XVk,1017
@@ -619,6 +620,7 @@ torch_geometric/utils/embedding.py,sha256=b-CQ-aapEgahxSS7fuL4aNQX6GJROboV0xclZ_
619
620
  torch_geometric/utils/functions.py,sha256=orQdS_6EpzWSmBHSok3WhxCzLy9neB-cin1aTnlXY-8,703
620
621
  torch_geometric/utils/geodesic.py,sha256=-xsqE3FZU7Y9gMbucIlGJ4FM-3nk8o0AQBxIdN-QfEw,4770
621
622
  torch_geometric/utils/hetero.py,sha256=ok4uAAOyMiaeEPmvyS4DNoDwdKnLS2gmgs5WVVklxOo,5539
623
+ torch_geometric/utils/influence.py,sha256=b2v4G2jA-FKWWSohIc4iNkyFgUvS6j_g97qWT6xfrzI,9920
622
624
  torch_geometric/utils/isolated.py,sha256=nUxCfMY3q9IIFjelr4eyAJH4sYG9W3lGdpWidnp3dm4,3588
623
625
  torch_geometric/utils/laplacian.py,sha256=ludDil4yS1A27PEuYOjZtCtE3o-t0lnucJKfiqENhvM,3695
624
626
  torch_geometric/utils/loop.py,sha256=MUWUS7a5GxuxLKlCtRq95U1hc3MndybAhqKD5IAe2RY,23051
@@ -638,7 +640,7 @@ torch_geometric/utils/undirected.py,sha256=H_nfpI0_WluOG6VfjPyldvcjL4w5USAKWu2x5
638
640
  torch_geometric/visualization/__init__.py,sha256=b-HnVesXjyJ_L1N-DnjiRiRVf7lhwKaBQF_2i5YMVSU,208
639
641
  torch_geometric/visualization/graph.py,sha256=mfZHXYfiU-CWMtfawYc80IxVwVmtK9hbIkSKhM_j7oI,14311
640
642
  torch_geometric/visualization/influence.py,sha256=CWMvuNA_Nf1sfbJmQgn58yS4OFpeKXeZPe7kEuvkUBw,477
641
- pyg_nightly-2.7.0.dev20250603.dist-info/licenses/LICENSE,sha256=ic-27cMJc1kWoMEYncz3Ya3Ur2Bi3bNLWib2DT763-o,1067
642
- pyg_nightly-2.7.0.dev20250603.dist-info/WHEEL,sha256=G2gURzTEtmeR8nrdXUJfNiB3VYVxigPQ-bEQujpNiNs,82
643
- pyg_nightly-2.7.0.dev20250603.dist-info/METADATA,sha256=k8073M3Yw3Yrrcqz1VGFnEmGZu-clDXcl5GIdqvh2B8,62967
644
- pyg_nightly-2.7.0.dev20250603.dist-info/RECORD,,
643
+ pyg_nightly-2.7.0.dev20250604.dist-info/licenses/LICENSE,sha256=ic-27cMJc1kWoMEYncz3Ya3Ur2Bi3bNLWib2DT763-o,1067
644
+ pyg_nightly-2.7.0.dev20250604.dist-info/WHEEL,sha256=G2gURzTEtmeR8nrdXUJfNiB3VYVxigPQ-bEQujpNiNs,82
645
+ pyg_nightly-2.7.0.dev20250604.dist-info/METADATA,sha256=pOkfyi_H6pwQ7MLQrQ2nbMuuzDu7C1CLo_GNz0l-NsM,62967
646
+ pyg_nightly-2.7.0.dev20250604.dist-info/RECORD,,
@@ -31,7 +31,7 @@ from .lazy_loader import LazyLoader
31
31
  contrib = LazyLoader('contrib', globals(), 'torch_geometric.contrib')
32
32
  graphgym = LazyLoader('graphgym', globals(), 'torch_geometric.graphgym')
33
33
 
34
- __version__ = '2.7.0.dev20250603'
34
+ __version__ = '2.7.0.dev20250604'
35
35
 
36
36
  __all__ = [
37
37
  'Index',
@@ -16,11 +16,12 @@ class CityNetwork(InMemoryDataset):
16
16
  a Large Graph Dataset and a Measurement"
17
17
  <https://arxiv.org/abs/2503.09008>`_ paper.
18
18
  The dataset contains four city networks: `paris`, `shanghai`, `la`,
19
- and 'london', where nodes represent junctions and edges represent
20
- directed road segments. The task is to predict each node's eccentricity
21
- score, which is approximated based on its 16-hop neighborhood. The score
22
- indicates how accessible one node is in the network, and is mapped to
23
- 10 quantiles for transductive classification. See the original
19
+ and `london`, where nodes represent junctions and edges represent
20
+ undirected road segments. The task is to predict each node's eccentricity
21
+ score, which is approximated based on its 16-hop neighborhood and naturally
22
+ requires long-range information. The score indicates how accessible one
23
+ node is in the network, and is mapped to 10 quantiles for transductive
24
+ classification. See the original
24
25
  `source code <https://github.com/LeonResearch/City-Networks>`_ for more
25
26
  details on the individual networks.
26
27
 
@@ -61,6 +61,7 @@ from .gps_conv import GPSConv
61
61
  from .antisymmetric_conv import AntiSymmetricConv
62
62
  from .dir_gnn_conv import DirGNNConv
63
63
  from .mixhop_conv import MixHopConv
64
+ from .meshcnn_conv import MeshCNNConv
64
65
 
65
66
  import torch_geometric.nn.conv.utils # noqa
66
67
 
@@ -131,6 +132,7 @@ __all__ = [
131
132
  'AntiSymmetricConv',
132
133
  'DirGNNConv',
133
134
  'MixHopConv',
135
+ 'MeshCNNConv',
134
136
  ]
135
137
 
136
138
  classes = __all__
@@ -0,0 +1,491 @@
1
+ # The below is to suppress the warning on torch.nn.conv.MeshCNNConv::update
2
+ # pyright: reportIncompatibleMethodOverride=false
3
+ from typing import Optional
4
+ from warnings import warn
5
+
6
+ import torch
7
+ from torch.nn import Linear, Module, ModuleList
8
+
9
+ from torch_geometric.nn.conv import MessagePassing
10
+ from torch_geometric.typing import Tensor
11
+
12
+
13
+ class MeshCNNConv(MessagePassing):
14
+ r"""The convolutional layer introduced by the paper
15
+ `"MeshCNN: A Network With An Edge" <https://arxiv.org/abs/1809.05910>`_.
16
+
17
+ Recall that, given a set of categories :math:`C`,
18
+ MeshCNN is a function that takes as its input
19
+ a triangular mesh
20
+ :math:`\mathcal{m} = (V, F) \in \mathbb{R}^{|V| \times 3} \times
21
+ \{0,...,|V|-1\}^{3 \times |F|}`, and returns as its output
22
+ a :math:`|C|`-dimensional vector, whose :math:`i` th component denotes
23
+ the probability of the input mesh belonging to category :math:`c_i \in C`.
24
+
25
+ Let :math:`X^{(k)} \in \mathbb{R}^{|E| \times \text{Dim-Out}(k)}`
26
+ denote the output value of the prior (e.g. :math:`k` th )
27
+ layer of our neural network. The :math:`i` th row of :math:`X^{(k)}` is a
28
+ :math:`\text{Dim-Out}(k)`-dimensional vector that represents the features
29
+ computed by the :math:`k` th layer for edge :math:`e_i` of the input mesh
30
+ :math:`\mathcal{m}`. Let :math:`A \in \{0, ..., |E|-1\}^{2 \times 4*|E|}`
31
+ denote the *edge adjacency* matrix of our input mesh :math:`\mathcal{m}`.
32
+ The :math:`j` th column of :math:`A` returns a pair of indices
33
+ :math:`k,l \in \{0,...,|E|-1\}`, which means that edge
34
+ :math:`e_k` is adjacent to edge :math:`e_l`
35
+ in our input mesh :math:`\mathcal{m}`.
36
+ The definition of edge adjacency in a triangular
37
+ mesh is illustrated in Figure 1.
38
+ In a triangular
39
+ mesh, each edge :math:`e_i` is expected to be adjacent to exactly :math:`4`
40
+ neighboring edges, hence the number of columns of :math:`A`: :math:`4*|E|`.
41
+ We write *the neighborhood* of edge :math:`e_i` as
42
+ :math:`\mathcal{N}(i) = (a(i), b(i), c(i), d(i))` where
43
+
44
+ 1. :math:`a(i)` denotes the index of the *first* counter-clockwise
45
+ edge of the face *above* :math:`e_i`.
46
+
47
+ 2. :math:`b(i)` denotes the index of the *second* counter-clockwise
48
+ edge of the face *above* :math:`e_i`.
49
+
50
+ 3. :math:`c(i)` denotes the index of the *first* counter-clockwise edge
51
+ of the face *below* :math:`e_i`.
52
+
53
+ 4. :math:`d(i)` denotes the index of the *second*
54
+ counter-clockwise edge of the face *below* :math:`e_i`.
55
+
56
+ .. figure:: ../_figures/meshcnn_edge_adjacency.svg
57
+ :align: center
58
+ :width: 80%
59
+
60
+ **Figure 1:** The neighbors of edge :math:`\mathbf{e_1}`
61
+ are :math:`\mathbf{e_2}, \mathbf{e_3}, \mathbf{e_4}` and
62
+ :math:`\mathbf{e_5}`, respectively.
63
+ We write this as
64
+ :math:`\mathcal{N}(1) = (a(1), b(1), c(1), d(1)) = (2, 3, 4, 5)`
65
+
66
+
67
+ Because of this ordering constrait, :obj:`MeshCNNConv` **requires
68
+ that the columns of** :math:`A`
69
+ **be ordered in the following way**:
70
+
71
+ .. math::
72
+ &A[:,0] = (0, \text{The index of the "a" edge for edge } 0) \\
73
+ &A[:,1] = (0, \text{The index of the "b" edge for edge } 0) \\
74
+ &A[:,2] = (0, \text{The index of the "c" edge for edge } 0) \\
75
+ &A[:,3] = (0, \text{The index of the "d" edge for edge } 0) \\
76
+ \vdots \\
77
+ &A[:,4*|E|-4] =
78
+ \bigl(|E|-1,
79
+ a\bigl(|E|-1\bigr)\bigr) \\
80
+ &A[:,4*|E|-3] =
81
+ \bigl(|E|-1,
82
+ b\bigl(|E|-1\bigr)\bigr) \\
83
+ &A[:,4*|E|-2] =
84
+ \bigl(|E|-1,
85
+ c\bigl(|E|-1\bigr)\bigr) \\
86
+ &A[:,4*|E|-1] =
87
+ \bigl(|E|-1,
88
+ d\bigl(|E|-1\bigr)\bigr)
89
+
90
+
91
+ Stated a bit more compactly, for every edge :math:`e_i` in the input mesh,
92
+ :math:`A`, should have the following entries
93
+
94
+ .. math::
95
+ A[:, 4*i] &= (i, a(i)) \\
96
+ A[:, 4*i + 1] &= (i, b(i)) \\
97
+ A[:, 4*i + 2] &= (i, c(i)) \\
98
+ A[:, 4*i + 3] &= (i, d(i))
99
+
100
+ To summarize so far, we have defined 3 things:
101
+
102
+ 1. The activation of the prior (e.g. :math:`k` th) layer,
103
+ :math:`X^{(k)} \in \mathbb{R}^{|E| \times \text{Dim-Out}(k)}`
104
+
105
+ 2. The edge adjacency matrix and the definition of edge adjacency.
106
+ :math:`A \in \{0,...,|E|-1\}^{2 \times 4*|E|}`
107
+
108
+ 3. The ways the columns of :math:`A` must be ordered.
109
+
110
+
111
+
112
+ We are now finally able to define the :obj:`MeshCNNConv` class/layer.
113
+ In the following definition
114
+ we assume :obj:`MeshCNNConv` is at the :math:`k+1` th layer of our
115
+ neural network.
116
+
117
+ The :obj:`MeshCNNConv` layer is a function,
118
+
119
+ .. math::
120
+ \text{MeshCNNConv}^{(k+1)}(X^{(k)}, A) = X^{(k+1)},
121
+
122
+ that, given the prior layer's output
123
+ :math:`X^{(k)} \in \mathbb{R}^{|E| \times \text{Dim-Out}(k)}`
124
+ and the edge adjacency matrix :math:`A`
125
+ of the input mesh (graph) :math:`\mathcal{m}` ,
126
+ returns a new edge feature tensor
127
+ :math:`X^{(k+1)} \in \mathbb{R}^{|E| \times \text{Dim-Out}(k+1)}`,
128
+ where the :math:`i` th row of :math:`X^{(k+1)}`, denoted by
129
+ :math:`x^{(k+1)}_i`,
130
+ represents the :math:`\text{Dim-Out}(k+1)`-dimensional feature vector
131
+ of edge :math:`e_i`, **and is defined as follows**:
132
+
133
+ .. math::
134
+ x^{(k+1)}_i &= W^{(k+1)}_0 x^{(k)}_i \\
135
+ &+ W^{(k+1)}_1 \bigl| x^{(k)}_{a(i)} - x^{(k)}_{c(i)} \bigr| \\
136
+ &+ W^{(k+1)}_2 \bigl( x^{(k)}_{a(i)} + x^{(k)}_{c(i)} \bigr) \\
137
+ &+ W^{(k+1)}_3 \bigl| x^{(k)}_{b(i)} - x^{(k)}_{d(i)} \bigr| \\
138
+ &+ W^{(k+1)}_4 \bigl( x^{(k)}_{b(i)} + x^{(k)}_{d(i)} \bigr).
139
+
140
+ :math:`W_0^{(k+1)},W_1^{(k+1)},W_2^{(k+1)},W_3^{(k+1)}, W_4^{(k+1)}
141
+ \in \mathbb{R}^{\text{Dim-Out}(k+1) \times \text{Dim-Out}(k)}`
142
+ are trainable linear functions (i.e. "the weights" of this layer).
143
+ :math:`x_i` is the :math:`\text{Dim-Out}(k)`-dimensional feature of
144
+ edge :math:`e_i` vector computed by the prior (e.g. :math:`k`) th layer.
145
+ :math:`x^{(k)}_{a(i)}, x^{(k)}_{b(i)}, x^{(k)}_{c(i)}`, and
146
+ :math:`x^{(k)}_{d(i)}` are the :math:`\text{Dim-Out}(k)`-feature vectors,
147
+ computed in the :math:`k` th layer, that are associated with the :math:`4`
148
+ neighboring edges of :math:`e_i`.
149
+
150
+
151
+ Args:
152
+ in_channels (int): Corresonds to :math:`\text{Dim-Out}(k)`
153
+ in the above overview. This
154
+ represents the output dimension of the prior layer. For the given
155
+ input mesh :math:`\mathcal{m} = (V, F)`, the prior layer is
156
+ expected to output a
157
+ :math:`X \in \mathbb{R}^{|E| \times \textit{in_channels}}`
158
+ feature matrix.
159
+ Assuming the instance of this class
160
+ is situated at layer :math:`k+1`, we write that
161
+ :math:`X^{(k)} \in \mathbb{R}^{|E| \times \textit{in_channels}}`.
162
+ out_channels (int): Corresponds to :math:`\text{Dim-Out}(k+1)` in the
163
+ above overview. This represents the output dimension of this layer.
164
+ Assuming the instance of this class
165
+ is situated at layer :math:`k+1`, we write that
166
+ :math:`X^{(k+1)}
167
+ \in \mathbb{R}^{|E| \times \textit{out_channels}}`.
168
+ kernels (torch.nn.ModuleList, optional): A list of length of 5,
169
+ where each
170
+ element is a :class:`torch.nn.module` (i.e a neural network),
171
+ that each MUST take as input a vector
172
+ of dimension :`obj:in_channels` and return a vector of dimension
173
+ :obj:`out_channels`. In particular,
174
+ `obj:kernels[0]` is :math:`W^{(k+1)}_0` in the above overview
175
+ (see :obj:`MeshCNNConv`), `obj:kernels[1]` is :math:`W^{(k+1)}_1`,
176
+ `obj:kernels[2]` is :math:`W^{(k+1)}_2`,
177
+ `obj:kernels[3]` is :math:`W^{(k+1)}_3`
178
+ `obj:kernels[4]` is :math:`W^{(k+1)}_4`.
179
+ Note that this input is optional, in which case
180
+ each of the 5 elements in the kernels will be a linear
181
+ neural network :class:`torch.nn.modules.Linear`
182
+ correctly configured to take as input
183
+ :attr:`in_channels`-dimensional vectors and return
184
+ a vector of dimensions :attr:`out_channels`.
185
+
186
+ Discussion:
187
+ The key difference that seperates :obj:`MeshCNNConv` from a traditional
188
+ message passing graph neural network is that :obj:`MeshCNNConv`
189
+ requires the set of neighbors for a node
190
+ :math:`\mathcal{N}(u) = (v_1, v_2, ...)`
191
+ to *be an ordered set* (i.e. a tuple). In
192
+ fact, :obj:`MeshCNNConv` goes further, requiring
193
+ that :math:`\mathcal{N}(u)` always return a set of size :math:`4`.
194
+ This is different to most message passing graph neural networks,
195
+ which assume that :math:`\mathcal{N}(u) = \{v_1, v_2, ...\}` returns an
196
+ ordered set. This lends :obj:`MeshCNNConv` more expressive power,
197
+ at the cost of no longer being permutation invariant to
198
+ :math:`\mathbb{S}_4`. Put more plainly, in tradition message passing
199
+ GNNs, the network is *unable* to distinguish one neighboring node
200
+ from another.
201
+ In constrast, in :obj:`MeshCNNConv`, each of the 4 neighbors has a
202
+ "role", either the "a", "b", "c", or "d" neighbor. We encode this fact
203
+ by requiring that :math:`\mathcal{N}` return the 4-tuple,
204
+ where the first component is the "a" neighbor, and so on.
205
+
206
+ To summarize this comparison, it may re-define
207
+ :obj:`MeshCNNConv` in terms of :math:`\text{UPDATE}` and
208
+ :math:`\text{AGGREGATE}`
209
+ functions, which is a general way to define a traditional GNN layer.
210
+ If we let :math:`x_i^{(k+1)}`
211
+ denote the output of a GNN layer for node :math:`i` at
212
+ layer :math:`k+1`, and let
213
+ :math:`\mathcal{N}(i)` denote the set of nodes adjacent
214
+ to node :math:`i`,
215
+ then we can describe the :math:`k+1` th layer as traditional GNN
216
+ as
217
+
218
+ .. math::
219
+ x_i^{(k+1)} = \text{UPDATE}^{(k+1)}\bigl(x^{(k)}_i,
220
+ \text{AGGREGATE}^{(k+1)}\bigl(\mathcal{N}(i)\bigr)\bigr).
221
+
222
+ Here, :math:`\text{UPDATE}^{(k+1)}` is a function of :math:`2`
223
+ :math:`\text{Dim-Out}(k)`-dimensional vectors, and returns a
224
+ :math:`\text{Dim-Out}(k+1)`-dimensional vector.
225
+ :math:`\text{AGGREGATE}^{(k+1)}` function
226
+ is a function of a *unordered set*
227
+ of nodes that are neighbors of node :math:`i`, as defined by
228
+ :math:`\mathcal{N}(i)`. Usually the size of this set varies across
229
+ different nodes :math:`i`, and one of the most basic examples
230
+ of such a function is the "sum aggregation", defined as
231
+ :math:`\text{AGGREGATE}^{(k+1)}(\mathcal{N}(i)) =
232
+ \sum_{j \in \mathcal{N}(i)} x^{(k)}_j`.
233
+ See
234
+ :class:`SumAggregation <torch_geometric.nn.aggr.basic.SumAggregation>`
235
+ for more.
236
+
237
+ In contrast, while :obj:`MeshCNNConv` 's :math:`\text{UPDATE}`
238
+ function follows
239
+ a tradition GNN, its :math:`\text{AGGREGATE}` is a function of a tuple
240
+ (i.e. an ordered set) of neighbors
241
+ rather than a unordered set of neighbors.
242
+ In particular, while the :math:`\text{UPDATE}`
243
+ function of :obj:`MeshCNNConv` for :math:`e_i` is
244
+
245
+ .. math::
246
+ x_i^{(k+1)} = \text{UPDATE}^{(k+1)}(x_i^{(k)}, s_i^{(k+1)})
247
+ = W_0^{(k+1)}x_i^{(k)} + s_i^{(k+1)},
248
+
249
+ in contrast, :obj:`MeshCNNConv` 's :math:`\text{AGGREGATE}` function is
250
+
251
+ .. math::
252
+ s_i^{(k+1)} = \text{AGGREGATE}^{(k+1)}(A, B, C, D)
253
+ &= W_1^{(k+1)}\bigl|A - C \bigr| \\
254
+ &= W_2^{(k+1)}\bigl(A + C \bigr) \\
255
+ &= W_3^{(k+1)}\bigl|B - D \bigr| \\
256
+ &= W_4^{(k+1)}\bigl(B + D \bigr),
257
+
258
+ where :math:`A=x_{a(i)}^{(k)}, B=x_{b(i)}^{(k)}, C=x_{c(i)}^{(k)},`
259
+ and :math:`D=x_{d(i)}^{(k)}`.
260
+
261
+ ..
262
+
263
+ The :math:`i` th row of
264
+ :math:`V \in \mathbb{R}^{|V| \times 3}`
265
+ holds the cartesian :math:`xyz`
266
+ coordinates for node :math:`v_i` in the mesh, and the :math:`j` th
267
+ column in :math:`F \in \{1,...,|V|\}^{3 \times |V|}`
268
+ holds the :math:`3` indices
269
+ :math:`(k,l,m)` that correspond to the :math:`3` nodes
270
+ :math:`(v_k, v_l, v_m)` that construct face :math:`j` of the mesh.
271
+ """
272
+ def __init__(self, in_channels: int, out_channels: int,
273
+ kernels: Optional[ModuleList] = None):
274
+ super().__init__(aggr='add')
275
+ self.in_channels = in_channels
276
+ self.out_channels = out_channels
277
+
278
+ if kernels is None:
279
+ self.kernels = ModuleList(
280
+ [Linear(in_channels, out_channels) for _ in range(5)])
281
+
282
+ else:
283
+ # ensures kernels is properly formed, otherwise throws
284
+ # the appropriate error.
285
+ self._assert_kernels(kernels)
286
+ self.kernels = kernels
287
+
288
+ def forward(self, x: Tensor, edge_index: Tensor):
289
+ r"""Forward pass.
290
+
291
+ Args:
292
+ x(torch.Tensor): :math:`X^{(k)} \in
293
+ \mathbb{R}^{|E| \times \textit{in_channels}}`.
294
+ The edge feature tensor returned by the prior layer
295
+ (e.g. :math:`k`). The tensor is of shape
296
+ :math:`|E| \times \text{Dim-Out}(k)`, or equivalently,
297
+ :obj:`(|E|, self.in_channels)`.
298
+
299
+ edge_index(torch.Tensor):
300
+ :math:`A \in \{0,...,|E|-1\}^{2 \times 4*|E|}`.
301
+ The edge adjacency tensor of the networks input mesh
302
+ :math:`\mathcal{m} = (V, F)`. The edge adjacency tensor
303
+ **MUST** have the following form:
304
+
305
+ .. math::
306
+ &A[:,0] = (0,
307
+ \text{The index of the "a" edge for edge } 0) \\
308
+ &A[:,1] = (0,
309
+ \text{The index of the "b" edge for edge } 0) \\
310
+ &A[:,2] = (0,
311
+ \text{The index of the "c" edge for edge } 0) \\
312
+ &A[:,3] = (0,
313
+ \text{The index of the "d" edge for edge } 0) \\
314
+ \vdots \\
315
+ &A[:,4*|E|-4] =
316
+ \bigl(|E|-1,
317
+ a\bigl(|E|-1\bigr)\bigr) \\
318
+ &A[:,4*|E|-3] =
319
+ \bigl(|E|-1,
320
+ b\bigl(|E|-1\bigr)\bigr) \\
321
+ &A[:,4*|E|-2] =
322
+ \bigl(|E|-1,
323
+ c\bigl(|E|-1\bigr)\bigr) \\
324
+ &A[:,4*|E|-1] =
325
+ \bigl(|E|-1,
326
+ d\bigl(|E|-1\bigr)\bigr)
327
+
328
+ See :obj:`MeshCNNConv` for what
329
+ "index of the 'a'(b,c,d) edge for edge i" means, and also
330
+ for the general definition of edge adjacency in MeshCNN.
331
+ These definitions are also provided in the
332
+ `paper <https://arxiv.org/abs/1809.05910>`_ itself.
333
+
334
+ Returns:
335
+ torch.Tensor:
336
+ :math:`X^{(k+1)} \in \mathbb{R}^{|E| \times \textit{out_channels}}`.
337
+ The edge feature tensor for this (e.g. the :math:`k+1` th) layer.
338
+ The :math:`i` th row of :math:`X^{(k+1)}` is computed according
339
+ to the formula
340
+
341
+ .. math::
342
+ x^{(k+1)}_i &= W^{(k+1)}_0 x^{(k)}_i \\
343
+ &+ W^{(k+1)}_1 \bigl| x^{(k)}_{a(i)} - x^{(k)}_{c(i)} \bigr| \\
344
+ &+ W^{(k+1)}_2 \bigl( x^{(k)}_{a(i)} + x^{(k)}_{c(i)} \bigr) \\
345
+ &+ W^{(k+1)}_3 \bigl| x^{(k)}_{b(i)} - x^{(k)}_{d(i)} \bigr| \\
346
+ &+ W^{(k+1)}_4 \bigl( x^{(k)}_{b(i)} + x^{(k)}_{d(i)} \bigr),
347
+
348
+ where :math:`W_0^{(k+1)},W_1^{(k+1)},
349
+ W_2^{(k+1)},W_3^{(k+1)}, W_4^{(k+1)}
350
+ \in \mathbb{R}^{\text{Dim-Out}(k+1) \times \text{Dim-Out}(k)}`
351
+ are the trainable linear functions (i.e. the trainable
352
+ "weights") of this layer, and
353
+ :math:`x^{(k)}_{a(i)}, x^{(k)}_{b(i)}, x^{(k)}_{c(i)}`,
354
+ :math:`x^{(k)}_{d(i)}` are the
355
+ :math:`\text{Dim-Out}(k)`-dimensional edge feature vectors
356
+ computed by the prior (:math:`k` th) layer,
357
+ that are associated with the :math:`4`
358
+ neighboring edges of :math:`e_i`.
359
+
360
+ """
361
+ return self.propagate(edge_index, x=x)
362
+
363
+ def message(self, x_j: Tensor) -> Tensor:
364
+ r"""The messaging passing step of :obj:`MeshCNNConv`.
365
+
366
+
367
+ Args:
368
+ x_j: A :obj:`[4*|E|, num_node_features]` tensor.
369
+ Its ith row holds the value
370
+ stored by the source node in the previous layer of edge i.
371
+
372
+ Returns:
373
+ A :obj:`[|E|, num_node_features]` tensor,
374
+ whose ith row will be the value
375
+ that the target node of edge i will receive.
376
+ """
377
+ # The following variables names are taken from the paper
378
+ # MeshCNN computes the features associated with edge
379
+ # e by (|a - c|, a + c, |b - c|, b + c), where a, b, c, d are the
380
+ # neighboring edges of e, a being the 1 edge of the upper face,
381
+ # b being the second edge of the upper face, c being the first edge
382
+ # of the lower face,
383
+ # and d being the second edge of the lower face of the input Mesh
384
+
385
+ # TODO: It is unclear if view is faster. If it is not,
386
+ # then we should prefer the strided method commented out below
387
+
388
+ E4, in_channels = x_j.size() # E4 = 4|E|, i.e. num edges in line graph
389
+ # Option 1
390
+ n_a = x_j[0::4] # shape: |E| x in_channels
391
+ n_b = x_j[1::4] # shape: |E| x in_channels
392
+ n_c = x_j[2::4] # shape: |E| x in_channels
393
+ n_d = x_j[3::4] # shape: |E| x in_channels
394
+ m = torch.empty(E4, self.out_channels)
395
+ m[0::4] = self.kernels[1].forward(torch.abs(n_a - n_c))
396
+ m[1::4] = self.kernels[2].forward(n_a + n_c)
397
+ m[2::4] = self.kernels[3].forward(torch.abs(n_b - n_d))
398
+ m[3::4] = self.kernels[4].forward(n_b + n_d)
399
+ return m
400
+
401
+ # Option 2
402
+ # E4, in_channels = x_j.size()
403
+ # E = E4 // 4
404
+ # x_j = x_j.view(E, 4, in_channels) # shape: (|E| x 4 x in_channels)
405
+ # n_a, n_b, n_c, n_d = x_j.unbind(
406
+ # dim=1) # shape: (4 x |E| x in_channels)
407
+ # m = torch.stack(
408
+ # [
409
+ # (n_a - n_c).abs(), # shape: |E| x in_channels
410
+ # n_a + n_c,
411
+ # (n_b - n_d).abs(),
412
+ # n_b + n_d,
413
+ # ],
414
+ # dim=1) # shape: (|E| x 4 x in_channels)
415
+ # m.view(E4, in_channels) # shape 4*|E| x in_channels
416
+ # return m
417
+
418
+ def update(self, inputs: Tensor, x: Tensor) -> Tensor:
419
+ r"""The UPDATE step, in reference to the UPDATE and AGGREGATE
420
+ formulation of message passing convolution.
421
+
422
+ Args:
423
+ inputs(torch.Tensor): The :attr:`in_channels`-dimensional vector
424
+ returned by aggregate.
425
+ x(torch.Tensor): :math:`X^{(k)}`. The original inputs to this layer.
426
+
427
+ Returns:
428
+ torch.Tensor: :math:`X^{(k+1)}`. The output of this layer, which
429
+ has shape :obj:`(|E|, out_channels)`.
430
+ """
431
+ return self.kernels[0].forward(x) + inputs
432
+
433
+ def _assert_kernels(self, kernels: ModuleList):
434
+ r"""Ensures that :obj:`kernels` is a list of 5 :obj:`torch.nn.Module`
435
+ modules (i.e. networks). In addition, it also ensures that each network
436
+ takes in input of dimension :attr:`in_channels`, and returns output
437
+ of dimension :attr:`out_channels`.
438
+ This method throws an error otherwise.
439
+
440
+ .. warn::
441
+ This method throws an error if :obj:`kernels` is
442
+ not valid. (Otherwise this method returns nothing)
443
+
444
+ """
445
+ assert isinstance(kernels, ModuleList), \
446
+ f"Parameter 'kernels' must be a \
447
+ torch.nn.module.ModuleList with 5 memebers, but we got \
448
+ {type(kernels)}."
449
+
450
+ assert len(kernels) == 5, "Parameter 'kernels' must be a \
451
+ torch.nn.module.ModuleList of with exactly 5 members"
452
+
453
+ for i, network in enumerate(kernels):
454
+ assert isinstance(network, Module), \
455
+ f"kernels[{i}] must be torch.nn.Module, got \
456
+ {type(network)}"
457
+ if not hasattr(network, "in_channels") and \
458
+ not hasattr(network, "in_features"):
459
+ warn(f"kernel[{i}] does not have attribute \
460
+ 'in_channels' nor 'out_features'. The \
461
+ network must take as input a \
462
+ {self.in_channels}-dimensional tensor. \
463
+ Still, assuming user configured \
464
+ correctly. Continuing..")
465
+ else:
466
+ input_dimension = getattr(network, "in_channels",
467
+ network.in_features)
468
+ assert input_dimension == self.in_channels, f"The input \
469
+ dimension of the neural network in kernel[{i}] must \
470
+ be \
471
+ equal to 'in_channels', but input_dimension = \
472
+ {input_dimension}, and \
473
+ self.in_channels={self.in_channels}."
474
+
475
+ if not hasattr(network, "out_channels") and \
476
+ not hasattr(network, "out_features"):
477
+ warn(f"kernel[{i}] does not have attribute \
478
+ 'in_channels' nor 'out_features'. The \
479
+ network must take as input a \
480
+ {self.in_channels}-dimensional tensor. \
481
+ Still, assuming user configured \
482
+ correctly. Continuing..")
483
+ else:
484
+ output_dimension = getattr(network, "out_channels",
485
+ network.out_features)
486
+ assert output_dimension == self.out_channels, f"The output \
487
+ dimension of the neural network in kernel[{i}] must \
488
+ be \
489
+ equal to 'out_channels', but out_dimension = \
490
+ {output_dimension}, and \
491
+ self.out_channels={self.out_channels}."
@@ -57,6 +57,7 @@ from .embedding import get_embeddings, get_embeddings_hetero
57
57
  from ._trim_to_layer import trim_to_layer
58
58
  from .ppr import get_ppr
59
59
  from ._train_test_split_edges import train_test_split_edges
60
+ from .influence import total_influence
60
61
 
61
62
  __all__ = [
62
63
  'scatter',
@@ -149,6 +150,7 @@ __all__ = [
149
150
  'trim_to_layer',
150
151
  'get_ppr',
151
152
  'train_test_split_edges',
153
+ 'total_influence',
152
154
  ]
153
155
 
154
156
  # `structured_negative_sampling_feasible` is a long name and thus destroys the
@@ -0,0 +1,271 @@
1
+ from typing import List, Tuple, Union, cast
2
+
3
+ import torch
4
+ from torch import Tensor
5
+ from torch.autograd.functional import jacobian
6
+ from tqdm.auto import tqdm
7
+
8
+ from torch_geometric.data import Data
9
+ from torch_geometric.utils import k_hop_subgraph
10
+
11
+
12
+ def k_hop_subsets_rough(
13
+ node_idx: int,
14
+ num_hops: int,
15
+ edge_index: Tensor,
16
+ num_nodes: int,
17
+ ) -> List[Tensor]:
18
+ r"""Return *rough* (possibly overlapping) *k*-hop node subsets.
19
+
20
+ This is a thin wrapper around
21
+ :pyfunc:`torch_geometric.utils.k_hop_subgraph` that *additionally* returns
22
+ **all** intermediate hop subsets rather than the full union only.
23
+
24
+ Parameters
25
+ ----------
26
+ node_idx: int
27
+ Index or indices of the central node(s).
28
+ num_hops: int
29
+ Number of hops *k*.
30
+ edge_index: Tensor
31
+ Edge index in COO format with shape :math:`[2, \text{num_edges}]`.
32
+ num_nodes: int
33
+ Total number of nodes in the graph. Required to allocate the masks.
34
+
35
+ Returns:
36
+ -------
37
+ List[Tensor]
38
+ A list ``[H₀, H₁, …, H_k]`` where ``H₀`` contains the seed node(s) and
39
+ ``H_i`` (for *i*>0) contains **all** nodes that are exactly *i* hops
40
+ away in the *expanded* neighbourhood (i.e. overlaps are *not*
41
+ removed).
42
+ """
43
+ col, row = edge_index
44
+
45
+ node_mask = row.new_empty(num_nodes, dtype=torch.bool)
46
+ edge_mask = row.new_empty(row.size(0), dtype=torch.bool)
47
+
48
+ node_idx_ = torch.tensor([node_idx], device=row.device)
49
+
50
+ subsets = [node_idx_]
51
+ for _ in range(num_hops):
52
+ node_mask.zero_()
53
+ node_mask[subsets[-1]] = True
54
+ torch.index_select(node_mask, 0, row, out=edge_mask)
55
+ subsets.append(col[edge_mask])
56
+
57
+ return subsets
58
+
59
+
60
+ def k_hop_subsets_exact(
61
+ node_idx: int,
62
+ num_hops: int,
63
+ edge_index: Tensor,
64
+ num_nodes: int,
65
+ device: Union[torch.device, str],
66
+ ) -> List[Tensor]:
67
+ """Return **disjoint** *k*-hop subsets.
68
+
69
+ This function refines :pyfunc:`k_hop_subsets_rough` by removing nodes that
70
+ have already appeared in previous hops, ensuring that each subset contains
71
+ nodes *exactly* *i* hops away from the seed.
72
+ """
73
+ rough_subsets = k_hop_subsets_rough(node_idx, num_hops, edge_index,
74
+ num_nodes)
75
+
76
+ exact_subsets: List[List[int]] = [rough_subsets[0].tolist()]
77
+ visited: set[int] = set(exact_subsets[0])
78
+
79
+ for hop_subset in rough_subsets[1:]:
80
+ fresh = set(hop_subset.tolist()) - visited
81
+ visited |= fresh
82
+ exact_subsets.append(list(fresh))
83
+
84
+ return [
85
+ torch.tensor(s, device=device, dtype=edge_index.dtype)
86
+ for s in exact_subsets
87
+ ]
88
+
89
+
90
+ def jacobian_l1(
91
+ model: torch.nn.Module,
92
+ data: Data,
93
+ max_hops: int,
94
+ node_idx: int,
95
+ device: Union[torch.device, str],
96
+ *,
97
+ vectorize: bool = True,
98
+ ) -> Tensor:
99
+ """Compute the **L1 norm** of the Jacobian for a given node.
100
+
101
+ The Jacobian is evaluated w.r.t. the node features of the *k*-hop induced
102
+ sub‑graph centred at ``node_idx``. The result is *folded back* onto the
103
+ **original** node index space so that the returned tensor has length
104
+ ``data.num_nodes``, where the influence score will be zero for nodes
105
+ outside the *k*-hop subgraph.
106
+
107
+ Notes:
108
+ -----
109
+ * The function assumes that the model *and* ``data.x`` share the same
110
+ floating‑point precision (e.g. both ``float32`` or both ``float16``).
111
+
112
+ """
113
+ # Build the induced *k*-hop sub‑graph (with node re‑labelling).
114
+ edge_index = cast(Tensor, data.edge_index)
115
+ x = cast(Tensor, data.x)
116
+ k_hop_nodes, sub_edge_index, mapping, _ = k_hop_subgraph(
117
+ node_idx, max_hops, edge_index, relabel_nodes=True)
118
+ # get the location of the *center* node inside the sub‑graph
119
+ root_pos = cast(int, mapping[0])
120
+
121
+ # Move tensors & model to the correct device
122
+ device = torch.device(device)
123
+ sub_x = x[k_hop_nodes].to(device)
124
+ sub_edge_index = sub_edge_index.to(device)
125
+ model = model.to(device)
126
+
127
+ # Jacobian evaluation
128
+ def _forward(x: Tensor) -> Tensor:
129
+ return model(x, sub_edge_index)[root_pos]
130
+
131
+ jac = jacobian(_forward, sub_x, vectorize=vectorize)
132
+ influence_sub = jac.abs().sum(dim=(0, 2)) # Sum of L1 norm
133
+ num_nodes = cast(int, data.num_nodes)
134
+ # Scatter the influence scores back to the *global* node space
135
+ influence_full = torch.zeros(num_nodes, dtype=influence_sub.dtype,
136
+ device=device)
137
+ influence_full[k_hop_nodes] = influence_sub
138
+
139
+ return influence_full
140
+
141
+
142
+ def jacobian_l1_agg_per_hop(
143
+ model: torch.nn.Module,
144
+ data: Data,
145
+ max_hops: int,
146
+ node_idx: int,
147
+ device: Union[torch.device, str],
148
+ vectorize: bool = True,
149
+ ) -> Tensor:
150
+ """Aggregate Jacobian L1 norms **per hop** for node_idx.
151
+
152
+ Returns a vector ``[I_0, I_1, …, I_k]`` where ``I_i`` is the *total*
153
+ influence exerted by nodes that are exactly *i* hops away from
154
+ ``node_idx``.
155
+ """
156
+ num_nodes = cast(int, data.num_nodes)
157
+ edge_index = cast(Tensor, data.edge_index)
158
+ influence = jacobian_l1(model, data, max_hops, node_idx, device,
159
+ vectorize=vectorize)
160
+ hop_subsets = k_hop_subsets_exact(node_idx, max_hops, edge_index,
161
+ num_nodes, influence.device)
162
+ sigle_node_influence_per_hop = [influence[s].sum() for s in hop_subsets]
163
+ return torch.tensor(sigle_node_influence_per_hop, device=influence.device)
164
+
165
+
166
+ def avg_total_influence(
167
+ influence_all_nodes: Tensor,
168
+ normalize: bool = True,
169
+ ) -> Tensor:
170
+ """Compute the *influence‑weighted receptive field* ``R``."""
171
+ avg_total_influences = torch.mean(influence_all_nodes, dim=0)
172
+ if normalize: # nomalize by hop_0 (jacobian of the center node feature)
173
+ avg_total_influences = avg_total_influences / avg_total_influences[0]
174
+ return avg_total_influences
175
+
176
+
177
+ def influence_weighted_receptive_field(T: Tensor) -> float:
178
+ """Compute the *influence‑weighted receptive field* ``R``.
179
+
180
+ Given an influence matrix ``T`` of shape ``[N, k+1]`` (i‑th row contains
181
+ the per‑hop influences of node *i*), the receptive field breadth *R* is
182
+ defined as the expected hop distance when weighting by influence.
183
+
184
+ A larger *R* indicates that, on average, influence comes from **farther**
185
+ hops.
186
+ """
187
+ normalised = T / torch.sum(T, dim=1, keepdim=True)
188
+ hops = torch.arange(T.shape[1]).float() # 0 … k
189
+ breadth = normalised @ hops # shape (N,)
190
+ return breadth.mean().item()
191
+
192
+
193
+ def total_influence(
194
+ model: torch.nn.Module,
195
+ data: Data,
196
+ max_hops: int,
197
+ num_samples: Union[int, None] = None,
198
+ normalize: bool = True,
199
+ average: bool = True,
200
+ device: Union[torch.device, str] = "cpu",
201
+ vectorize: bool = True,
202
+ ) -> Tuple[Tensor, float]:
203
+ r"""Compute Jacobian‑based influence aggregates for *multiple* seed nodes.
204
+
205
+ For every sampled node :math:`v`, this method
206
+
207
+ 1. evaluates the **L1‑norm** of the Jacobian of the model output at
208
+ :math:`v` w.r.t. the node features of its *k*-hop induced sub‑graph;
209
+ 2. sums these scores **per hop** to obtain the influence vector
210
+ :math:`(I_{0}, I_{1}, \dots, I_{k})`;
211
+ 3. optionally averages those vectors over all sampled nodes and
212
+ (optionally) normalises them by :math:`I_{0}`.
213
+
214
+ Args:
215
+ model (torch.nn.Module): A PyTorch Geometric‑compatible model with
216
+ forward signature ``model(x, edge_index) -> Tensor``.
217
+ data (torch_geometric.data.Data): Graph data object providing at least
218
+ :obj:`x` (node features) and :obj:`edge_index` (connectivity).
219
+ max_hops (int): Maximum hop distance :math:`k`.
220
+ num_samples (int, optional): Number of seed nodes to evaluate.
221
+ If :obj:`None`, all nodes are used. (default: :obj:`None`)
222
+ normalize (bool, optional): If :obj:`True`, divide each hop‑wise
223
+ average by the influence of hop 0. (default: :obj:`True`)
224
+ average (bool, optional): If :obj:`True`, return the hop‑wise **mean**
225
+ over all seed nodes (shape ``[k+1]``).
226
+ If :obj:`False`, return the full influence matrix of shape
227
+ ``[N, k+1]``. (default: :obj:`True`)
228
+ device (torch.device or str, optional): Device on which to perform the
229
+ computation. (default: :obj:`"cpu"`)
230
+ vectorize (bool, optional): Forwarded to
231
+ :func:`torch.autograd.functional.jacobian`. Keeping this
232
+ :obj:`True` is often faster but increases memory usage.
233
+ (default: :obj:`True`)
234
+
235
+ Returns:
236
+ Tuple[Tensor, float]:
237
+ * **avg_influence** (*Tensor*):
238
+ • shape ``[k+1]`` if :obj:`average=True`;
239
+ • shape ``[N, k+1]`` otherwise.
240
+ * **R** (*float*): Influence‑weighted receptive‑field breadth
241
+ returned by :func:`influence_weighted_receptive_field`.
242
+
243
+ Example::
244
+ >>> avg_I, R = total_influence(model, data, max_hops=3,
245
+ ... num_samples=1000)
246
+ >>> avg_I
247
+ tensor([1.0000, 0.1273, 0.0142, 0.0019])
248
+ >>> R
249
+ 0.216
250
+ """
251
+ num_samples = data.num_nodes if num_samples is None else num_samples
252
+ num_nodes = cast(int, data.num_nodes)
253
+ nodes = torch.randperm(num_nodes)[:num_samples].tolist()
254
+
255
+ influence_all_nodes: List[Tensor] = [
256
+ jacobian_l1_agg_per_hop(model, data, max_hops, n, device,
257
+ vectorize=vectorize)
258
+ for n in tqdm(nodes, desc="Influence")
259
+ ]
260
+ allnodes = torch.vstack(influence_all_nodes).detach().cpu()
261
+
262
+ # Average total influence at each hop
263
+ if average:
264
+ avg_influence = avg_total_influence(allnodes, normalize=normalize)
265
+ else:
266
+ avg_influence = allnodes
267
+
268
+ # Influence‑weighted receptive field
269
+ R = influence_weighted_receptive_field(allnodes)
270
+
271
+ return avg_influence, R