pyafv 0.3.3__cp312-cp312-win_arm64.whl → 0.3.6__cp312-cp312-win_arm64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pyafv/__init__.py +9 -6
- pyafv/_version.py +2 -0
- pyafv/backend.py +9 -5
- pyafv/cell_geom.cp312-win_arm64.pyd +0 -0
- pyafv/cell_geom_fallback.py +249 -0
- pyafv/{finite_voronoi_fast.py → finite_voronoi.py} +236 -53
- pyafv/physical_params.py +58 -18
- {pyafv-0.3.3.dist-info → pyafv-0.3.6.dist-info}/METADATA +13 -5
- pyafv-0.3.6.dist-info/RECORD +12 -0
- pyafv/finite_voronoi_fallback.py +0 -989
- pyafv/simulator.py +0 -37
- pyafv-0.3.3.dist-info/RECORD +0 -12
- {pyafv-0.3.3.dist-info → pyafv-0.3.6.dist-info}/WHEEL +0 -0
- {pyafv-0.3.3.dist-info → pyafv-0.3.6.dist-info}/licenses/LICENSE +0 -0
- {pyafv-0.3.3.dist-info → pyafv-0.3.6.dist-info}/top_level.txt +0 -0
pyafv/finite_voronoi_fallback.py
DELETED
|
@@ -1,989 +0,0 @@
|
|
|
1
|
-
"""
|
|
2
|
-
Finite Voronoi Model Simulator in 2D
|
|
3
|
-
====================================
|
|
4
|
-
Created by Wei Wang, 2025.
|
|
5
|
-
|
|
6
|
-
Key public entry points:
|
|
7
|
-
------------------------
|
|
8
|
-
- FiniteVoronoiSimulator: configure parameters.
|
|
9
|
-
- build(): build the finite Voronoi diagram and compute forces.
|
|
10
|
-
- plot_2d(): plot the finite Voronoi diagram with matplotlib.
|
|
11
|
-
- update_points(): update cell center positions.
|
|
12
|
-
- update_params(): update physical parameters.
|
|
13
|
-
"""
|
|
14
|
-
|
|
15
|
-
from typing import Dict, List, Tuple, Optional
|
|
16
|
-
import numpy as np
|
|
17
|
-
from scipy.spatial import Voronoi
|
|
18
|
-
from collections import defaultdict
|
|
19
|
-
from matplotlib import pyplot as plt
|
|
20
|
-
from matplotlib.axes import Axes
|
|
21
|
-
|
|
22
|
-
from .physical_params import PhysicalParams
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
# ---- tiny helpers to avoid tiny allocations in hot loops ----
|
|
26
|
-
def _row_dot(a: np.ndarray, b: np.ndarray) -> np.ndarray:
|
|
27
|
-
"""Row-wise dot product for 2D arrays with shape (N,2)."""
|
|
28
|
-
return np.einsum("ij,ij->i", a, b)
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
class FiniteVoronoiSimulator:
|
|
32
|
-
def __init__(self, pts: np.ndarray, phys: PhysicalParams):
|
|
33
|
-
self.pts = pts.copy() # (N,2) array of initial points
|
|
34
|
-
self.N = pts.shape[0] # Number of points
|
|
35
|
-
self.phys = phys
|
|
36
|
-
|
|
37
|
-
# --------------------- Voronoi construction & extension ---------------------
|
|
38
|
-
def _build_voronoi_with_extensions(self) -> Tuple[Voronoi, np.ndarray, List[List[int]], int, Dict[Tuple[int,int], int], Dict[int, List[int]]]:
|
|
39
|
-
"""
|
|
40
|
-
Build SciPy Voronoi for current points. For N<=2, emulate regions.
|
|
41
|
-
For N>=3, extend infinite ridges by adding long rays and update
|
|
42
|
-
regions accordingly. Return augmented structures.
|
|
43
|
-
"""
|
|
44
|
-
r = self.phys.r
|
|
45
|
-
pts = self.pts
|
|
46
|
-
N = self.N
|
|
47
|
-
|
|
48
|
-
# Special handling: N == 1, 2
|
|
49
|
-
if N == 1:
|
|
50
|
-
vor = Voronoi(np.random.rand(3, 2))
|
|
51
|
-
vor.points[:N] = pts
|
|
52
|
-
vor.vertices = np.array([]).reshape(-1, 2)
|
|
53
|
-
|
|
54
|
-
vor.regions = [[]]
|
|
55
|
-
vor.ridge_vertices = np.array([]).reshape(-1, 2)
|
|
56
|
-
vor.point_region = [0]
|
|
57
|
-
vor.ridge_points = np.array([]).reshape(-1, 2)
|
|
58
|
-
|
|
59
|
-
vertices_all = vor.vertices
|
|
60
|
-
ridge_vertices_all = vor.ridge_vertices
|
|
61
|
-
num_vertices = len(vor.vertices)
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
if N == 2:
|
|
65
|
-
vor = Voronoi(np.random.rand(3, 2))
|
|
66
|
-
vor.points[:N] = pts
|
|
67
|
-
|
|
68
|
-
p1, p2 = pts
|
|
69
|
-
center = (p1 + p2) / 2.0
|
|
70
|
-
|
|
71
|
-
t = p1 - p2
|
|
72
|
-
t_norm = np.linalg.norm(t)
|
|
73
|
-
t /= t_norm
|
|
74
|
-
|
|
75
|
-
n = np.array([-t[1], t[0]]) # Perpendicular vector of t
|
|
76
|
-
|
|
77
|
-
if t_norm >= 2 * r:
|
|
78
|
-
vor.vertices = np.array([]).reshape(-1, 2)
|
|
79
|
-
vor.regions = [[], []]
|
|
80
|
-
vor.ridge_vertices = np.array([]).reshape(-1, 2)
|
|
81
|
-
vor.ridge_points = np.array([]).reshape(-1, 2)
|
|
82
|
-
else:
|
|
83
|
-
v1 = center + 2 * r * n
|
|
84
|
-
v2 = center - 2 * r * n
|
|
85
|
-
v3 = center + 3 * r * (p1 - center) / np.linalg.norm(p2 - center)
|
|
86
|
-
v4 = center + 3 * r * (p2 - center) / np.linalg.norm(p2 - center)
|
|
87
|
-
vor.vertices = np.array([v1, v2, v3, v4]).reshape(-1, 2)
|
|
88
|
-
vor.ridge_vertices = [[0, 1], [1, 2], [0, 2], [0, 3], [1, 3]]
|
|
89
|
-
vor.regions = [[0, 1, 2], [0, 1, 3]]
|
|
90
|
-
vor.ridge_points = np.array([[0, 1], [-1, 0], [-1, 0], [-1, 1], [-1, 1]])
|
|
91
|
-
|
|
92
|
-
vor.point_region = [0, 1]
|
|
93
|
-
|
|
94
|
-
vertices_all = vor.vertices
|
|
95
|
-
ridge_vertices_all = vor.ridge_vertices
|
|
96
|
-
num_vertices = len(vor.vertices)
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
# N >= 3 (vectorized main path)
|
|
100
|
-
if N >= 3:
|
|
101
|
-
vor = Voronoi(pts)
|
|
102
|
-
"""
|
|
103
|
-
Basic info from Voronoi object:
|
|
104
|
-
-------------------------------
|
|
105
|
-
vor.vertices # (K,2) Voronoi vertices (finite)
|
|
106
|
-
vor.ridge_points # (R,2) pairs of input point indices sharing a Voronoi ridge
|
|
107
|
-
vor.ridge_vertices # list of vertex-index lists (may contain -1 for infinity)
|
|
108
|
-
vor.point_region # for each input point, the region index
|
|
109
|
-
vor.regions # list of regions; each is a list of vertex indices (may include -1)
|
|
110
|
-
"""
|
|
111
|
-
center = np.mean(pts, axis=0)
|
|
112
|
-
|
|
113
|
-
span_x = np.ptp(vor.vertices[:, 0]) # span in x
|
|
114
|
-
span_y = np.ptp(vor.vertices[:, 1]) # span in y
|
|
115
|
-
pts_span_x = np.ptp(pts[:, 0]) # span in x
|
|
116
|
-
pts_span_y = np.ptp(pts[:, 1]) # span in y
|
|
117
|
-
span = max(span_x, span_y, pts_span_x, pts_span_y, 10. * r) # overall span
|
|
118
|
-
|
|
119
|
-
# Base copies
|
|
120
|
-
vertices_base = vor.vertices # (K,2)
|
|
121
|
-
rv_arr = np.asarray(vor.ridge_vertices, dtype=int) # (R,2) may contain -1
|
|
122
|
-
rp_arr = np.asarray(vor.ridge_points, dtype=int) # (R,2)
|
|
123
|
-
|
|
124
|
-
# Remove -1 from regions (we will append extension ids later)
|
|
125
|
-
vor.regions = [[vid for vid in region if vid >= 0] for region in vor.regions]
|
|
126
|
-
|
|
127
|
-
# Identify ridges with an infinite endpoint
|
|
128
|
-
inf_mask = (rv_arr == -1).any(axis=1) # (R,)
|
|
129
|
-
num_inf = int(inf_mask.sum())
|
|
130
|
-
|
|
131
|
-
if num_inf > 0:
|
|
132
|
-
rv_inf = rv_arr[inf_mask] # (M,2)
|
|
133
|
-
rp_inf = rp_arr[inf_mask] # (M,2)
|
|
134
|
-
|
|
135
|
-
# finite endpoint index per infinite ridge
|
|
136
|
-
v_idx_finite = np.where(rv_inf[:, 0] != -1, rv_inf[:, 0], rv_inf[:, 1]) # (M,)
|
|
137
|
-
|
|
138
|
-
# geometry for normals
|
|
139
|
-
p1 = pts[rp_inf[:, 0]] # (M,2)
|
|
140
|
-
p2 = pts[rp_inf[:, 1]] # (M,2)
|
|
141
|
-
mid = (p1 + p2) / 2.0 # (M,2)
|
|
142
|
-
|
|
143
|
-
t = p1 - p2 # (M,2)
|
|
144
|
-
t_norm = np.linalg.norm(t, axis=1, keepdims=True) # (M,1)
|
|
145
|
-
t_unit = t / t_norm
|
|
146
|
-
|
|
147
|
-
# (M,2), perpendicular
|
|
148
|
-
n = np.column_stack([-t_unit[:, 1], t_unit[:, 0]])
|
|
149
|
-
|
|
150
|
-
# Ensure "outward" normal
|
|
151
|
-
sign = np.einsum("ij,ij->i", (mid - center), n) # (M,)
|
|
152
|
-
n[sign < 0] *= -1.0
|
|
153
|
-
|
|
154
|
-
# Build extension points
|
|
155
|
-
ext = vertices_base[v_idx_finite] + (100.0 * span) * n # (M,2), long rays (extension must be long enough!)
|
|
156
|
-
|
|
157
|
-
# Concatenate once
|
|
158
|
-
K = vertices_base.shape[0]
|
|
159
|
-
vertices_all = np.vstack([vertices_base, ext]) # (K+M,2)
|
|
160
|
-
|
|
161
|
-
# New vertex ids for extensions
|
|
162
|
-
ext_ids = np.arange(K, K + num_inf, dtype=int) # (M,)
|
|
163
|
-
|
|
164
|
-
# Replace -1 with ext_ids in a vectorized way
|
|
165
|
-
rv_new = rv_arr.copy() # (R,2)
|
|
166
|
-
rv_sub = rv_new[inf_mask] # view (M,2)
|
|
167
|
-
|
|
168
|
-
pos0 = (rv_sub[:, 0] == -1) # (M,)
|
|
169
|
-
rv_sub[pos0, 0] = ext_ids[pos0]
|
|
170
|
-
rv_sub[~pos0, 1] = ext_ids[~pos0]
|
|
171
|
-
rv_new[inf_mask] = rv_sub
|
|
172
|
-
|
|
173
|
-
ridge_vertices_all = rv_new.tolist()
|
|
174
|
-
|
|
175
|
-
# Append extension id to both adjacent regions (list-of-lists => tiny loop)
|
|
176
|
-
for m in range(num_inf):
|
|
177
|
-
i1, i2 = rp_inf[m]
|
|
178
|
-
e = ext_ids[m]
|
|
179
|
-
region_id = vor.point_region[i1]
|
|
180
|
-
vor.regions[region_id].append(e)
|
|
181
|
-
region_id = vor.point_region[i2]
|
|
182
|
-
vor.regions[region_id].append(e)
|
|
183
|
-
else:
|
|
184
|
-
vertices_all = vertices_base.copy()
|
|
185
|
-
ridge_vertices_all = rv_arr.tolist()
|
|
186
|
-
|
|
187
|
-
# number of native (finite) vertices
|
|
188
|
-
num_vertices = len(vor.vertices)
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
# Build ridge incidence per vertex, and a lookup for (v1,v2) -> ridge id
|
|
192
|
-
vertex_incident_ridges = defaultdict(list)
|
|
193
|
-
# (2) cheaper key: store both directions as tuple -> int
|
|
194
|
-
vertexpair2ridge: Dict[Tuple[int,int], int] = {}
|
|
195
|
-
|
|
196
|
-
rv_full = np.asarray(ridge_vertices_all, dtype=int)
|
|
197
|
-
R = rv_full.shape[0]
|
|
198
|
-
for k in range(R):
|
|
199
|
-
v1, v2 = int(rv_full[k, 0]), int(rv_full[k, 1])
|
|
200
|
-
vertex_incident_ridges[v1].append(k)
|
|
201
|
-
vertex_incident_ridges[v2].append(k)
|
|
202
|
-
vertexpair2ridge[(v1, v2)] = k
|
|
203
|
-
vertexpair2ridge[(v2, v1)] = k
|
|
204
|
-
|
|
205
|
-
# For each finite vertex, record which input points (cells) meet there
|
|
206
|
-
vertex_points = {}
|
|
207
|
-
if N > 2:
|
|
208
|
-
for v_id in range(num_vertices):
|
|
209
|
-
s = set()
|
|
210
|
-
for ridge_id in vertex_incident_ridges[v_id]:
|
|
211
|
-
i, j = vor.ridge_points[ridge_id]
|
|
212
|
-
s.add(i), s.add(j)
|
|
213
|
-
vertex_points[v_id] = list(s)
|
|
214
|
-
|
|
215
|
-
return vor, vertices_all, ridge_vertices_all, num_vertices, vertexpair2ridge, vertex_points
|
|
216
|
-
|
|
217
|
-
# --------------------- Geometry & energy contributions per cell ---------------------
|
|
218
|
-
def _per_cell_geometry(self, vor: Voronoi, vertices_all: np.ndarray, ridge_vertices_all: np.ndarray, num_vertices: int, vertexpair2ridge: Dict[Tuple[int, int], int]) -> Dict:
|
|
219
|
-
"""
|
|
220
|
-
Iterate cells to:
|
|
221
|
-
- sort polygon/arc vertices around each cell
|
|
222
|
-
- classify edges (1 = straight Voronoi edge; 0 = circular arc)
|
|
223
|
-
- compute area/perimeter for each cell
|
|
224
|
-
- accumulate derivatives w.r.t. vertices (dA_poly/dh, dP_poly/dh)
|
|
225
|
-
- register 'outer' vertices created at arc intersections and track their point pairs
|
|
226
|
-
"""
|
|
227
|
-
N = self.N
|
|
228
|
-
r = self.phys.r
|
|
229
|
-
A0 = self.phys.A0
|
|
230
|
-
P0 = self.phys.P0
|
|
231
|
-
pts = self.pts
|
|
232
|
-
|
|
233
|
-
num_vertices_ext = len(vertices_all) # number of vertices with infinite extension points
|
|
234
|
-
|
|
235
|
-
rv = np.asarray(ridge_vertices_all, dtype=int) # (R,2)
|
|
236
|
-
rp = np.asarray(vor.ridge_points, dtype=int) # (R,2)
|
|
237
|
-
num_ridges = rp.shape[0]
|
|
238
|
-
|
|
239
|
-
# init outer-vertex arrays (same shapes you used)
|
|
240
|
-
vertices_out = np.zeros((2 * num_ridges, 2), dtype=float)
|
|
241
|
-
vertex_out_points = np.zeros((2 * num_ridges, 2), dtype=int)
|
|
242
|
-
|
|
243
|
-
# unpack ridge endpoints and vertex indices
|
|
244
|
-
p1 = rp[:, 0].copy()
|
|
245
|
-
p2 = rp[:, 1].copy()
|
|
246
|
-
v1 = rv[:, 0].copy()
|
|
247
|
-
v2 = rv[:, 1].copy()
|
|
248
|
-
|
|
249
|
-
valid_pts = (p1 >= 0) & (p2 >= 0)
|
|
250
|
-
|
|
251
|
-
# coordinates
|
|
252
|
-
P1 = np.zeros((num_ridges, 2), dtype=float); P2 = np.zeros((num_ridges, 2), dtype=float)
|
|
253
|
-
P1[valid_pts] = self.pts[p1[valid_pts]]
|
|
254
|
-
P2[valid_pts] = self.pts[p2[valid_pts]]
|
|
255
|
-
V1 = vertices_all[v1]
|
|
256
|
-
V2 = vertices_all[v2]
|
|
257
|
-
|
|
258
|
-
# enforce V1->V2 clockwise around p1 (swap only p1<->p2, not v1/v2)
|
|
259
|
-
P12 = P2 - P1
|
|
260
|
-
V12 = V2 - V1
|
|
261
|
-
swap = (P12[:, 0] * V12[:, 1] - P12[:, 1] * V12[:, 0]) > 0
|
|
262
|
-
p1_sw = p1.copy(); p2_sw = p2.copy()
|
|
263
|
-
p1_sw[swap] = p2[swap]; p2_sw[swap] = p1[swap]
|
|
264
|
-
p1, p2 = p1_sw, p2_sw
|
|
265
|
-
P1_sw = P1.copy(); P2_sw = P2.copy()
|
|
266
|
-
P1[swap] = P2_sw[swap]; P2[swap] = P1_sw[swap]
|
|
267
|
-
|
|
268
|
-
# "inner" tests relative to p1 (keep your exact choices)
|
|
269
|
-
r = self.phys.r
|
|
270
|
-
d1 = np.linalg.norm(V1 - P1, axis=1)
|
|
271
|
-
d2 = np.linalg.norm(V2 - P1, axis=1)
|
|
272
|
-
inner1 = (d1 <= r) & valid_pts
|
|
273
|
-
inner2 = (d2 <= r) & valid_pts
|
|
274
|
-
|
|
275
|
-
# segment/intersection
|
|
276
|
-
dV = V2 - V1
|
|
277
|
-
segL = np.linalg.norm(dV, axis=1)
|
|
278
|
-
denom = np.where(segL > 0.0, segL, 1.0)
|
|
279
|
-
dx, dy = dV[:, 0], dV[:, 1]
|
|
280
|
-
x, y = P1[:, 0], P1[:, 1]
|
|
281
|
-
x1, y1 = V1[:, 0], V1[:, 1]
|
|
282
|
-
t = ((x - x1) * dx + (y - y1) * dy) / denom
|
|
283
|
-
|
|
284
|
-
# mid-point C = (p1+p2)/2 (your exact choice)
|
|
285
|
-
C = 0.5 * (P1 + P2)
|
|
286
|
-
cx, cy = C[:, 0], C[:, 1]
|
|
287
|
-
t1 = -t
|
|
288
|
-
t2 = t1 + denom
|
|
289
|
-
|
|
290
|
-
d = np.linalg.norm(C - P1, axis=1)
|
|
291
|
-
has_int = (d < r) & valid_pts
|
|
292
|
-
tr = np.full_like(d, np.nan)
|
|
293
|
-
tr[has_int] = np.sqrt(r * r - d[has_int] * d[has_int])
|
|
294
|
-
|
|
295
|
-
cond1 = (-tr < t2) & (-tr > t1) & valid_pts
|
|
296
|
-
cond2 = ( tr < t2) & ( tr > t1) & valid_pts
|
|
297
|
-
|
|
298
|
-
invL = np.where(denom > 0.0, 1.0 / denom, 0.0)
|
|
299
|
-
xr1 = cx - tr * dx * invL
|
|
300
|
-
yr1 = cy - tr * dy * invL
|
|
301
|
-
xr2 = cx + tr * dx * invL
|
|
302
|
-
yr2 = cy + tr * dy * invL
|
|
303
|
-
|
|
304
|
-
# fill outer-vertex arrays only where intersections happen
|
|
305
|
-
idx1 = np.where(cond1)[0]
|
|
306
|
-
if idx1.size:
|
|
307
|
-
vertices_out[2 * idx1 + 0, 0] = xr1[idx1]
|
|
308
|
-
vertices_out[2 * idx1 + 0, 1] = yr1[idx1]
|
|
309
|
-
pairs1 = np.sort(np.stack([p1[idx1], p2[idx1]], axis=1), axis=1)
|
|
310
|
-
vertex_out_points[2 * idx1 + 0] = pairs1
|
|
311
|
-
|
|
312
|
-
idx2 = np.where(cond2)[0]
|
|
313
|
-
if idx2.size:
|
|
314
|
-
vertices_out[2 * idx2 + 1, 0] = xr2[idx2]
|
|
315
|
-
vertices_out[2 * idx2 + 1, 1] = yr2[idx2]
|
|
316
|
-
pairs2 = np.sort(np.stack([p1[idx2], p2[idx2]], axis=1), axis=1)
|
|
317
|
-
vertex_out_points[2 * idx2 + 1] = pairs2
|
|
318
|
-
|
|
319
|
-
# sets (same semantics as before)
|
|
320
|
-
vertex_out_id = set((num_vertices_ext + np.where((np.arange(2 * num_ridges) % 2 == 0) & cond1.repeat(2))[0]).tolist())
|
|
321
|
-
vertex_out_id.update((num_vertices_ext + np.where((np.arange(2 * num_ridges) % 2 == 1) & cond2.repeat(2))[0]).tolist())
|
|
322
|
-
|
|
323
|
-
vertex_in_id = set(v1[inner1].astype(int).tolist())
|
|
324
|
-
vertex_in_id.update(v2[inner2].astype(int).tolist())
|
|
325
|
-
|
|
326
|
-
# -------- NEW: packed arrays instead of ridge_info dict --------
|
|
327
|
-
# each endpoint has up to 3 entries; absent slots are -1
|
|
328
|
-
p1_edges_pack = np.full((num_ridges, 3), -1, dtype=int)
|
|
329
|
-
p1_verts_pack = np.full((num_ridges, 3), -1, dtype=int)
|
|
330
|
-
p2_edges_pack = np.full((num_ridges, 3), -1, dtype=int)
|
|
331
|
-
p2_verts_pack = np.full((num_ridges, 3), -1, dtype=int)
|
|
332
|
-
|
|
333
|
-
out_id1 = num_vertices_ext + (2 * np.arange(num_ridges) + 0)
|
|
334
|
-
out_id2 = out_id1 + 1
|
|
335
|
-
|
|
336
|
-
# p1 order: [inner1 -> (1,v1)], [cond1 -> (1,out1)], [cond2 -> (0,out2)]
|
|
337
|
-
p1_edges_pack[inner1, 0] = 1
|
|
338
|
-
p1_verts_pack[inner1, 0] = v1[inner1]
|
|
339
|
-
|
|
340
|
-
p1_edges_pack[cond1, 1] = 1
|
|
341
|
-
p1_verts_pack[cond1, 1] = out_id1[cond1]
|
|
342
|
-
|
|
343
|
-
p1_edges_pack[cond2, 2] = 0
|
|
344
|
-
p1_verts_pack[cond2, 2] = out_id2[cond2]
|
|
345
|
-
|
|
346
|
-
# p2 was "append then reverse", which yields final order: [inner2, cond2, cond1]
|
|
347
|
-
p2_edges_pack[inner2, 0] = 1
|
|
348
|
-
p2_verts_pack[inner2, 0] = v2[inner2]
|
|
349
|
-
|
|
350
|
-
p2_edges_pack[cond2, 1] = 1
|
|
351
|
-
p2_verts_pack[cond2, 1] = out_id2[cond2]
|
|
352
|
-
|
|
353
|
-
p2_edges_pack[cond1, 2] = 0
|
|
354
|
-
p2_verts_pack[cond1, 2] = out_id1[cond1]
|
|
355
|
-
|
|
356
|
-
# append outer-vertex slots (unused rows stay zero like before)
|
|
357
|
-
vertices_all = np.vstack([vertices_all, vertices_out])
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
point_edges_type = []
|
|
362
|
-
point_vertices_f_idx = []
|
|
363
|
-
|
|
364
|
-
# --- fast vectorized per-cell processing (no inner edge loop) ---
|
|
365
|
-
for idx in range(N):
|
|
366
|
-
region_id = vor.point_region[idx]
|
|
367
|
-
v_ids = np.asarray(vor.regions[region_id], dtype=int)
|
|
368
|
-
|
|
369
|
-
if v_ids.size == 0:
|
|
370
|
-
point_edges_type.append([])
|
|
371
|
-
point_vertices_f_idx.append([])
|
|
372
|
-
continue
|
|
373
|
-
|
|
374
|
-
# sort vertices clockwise around cell center
|
|
375
|
-
rel = vertices_all[v_ids] - pts[idx]
|
|
376
|
-
angles = np.arctan2(rel[:, 1], rel[:, 0])
|
|
377
|
-
order = np.argsort(angles)[::-1]
|
|
378
|
-
v_ids = v_ids[order]
|
|
379
|
-
|
|
380
|
-
# consecutive pairs (wrap) -> candidate edges around this cell
|
|
381
|
-
v1_ids = v_ids
|
|
382
|
-
v2_ids = np.roll(v_ids, -1)
|
|
383
|
-
|
|
384
|
-
# skip ray-ray (both >= num_vertices)
|
|
385
|
-
valid = ~((v1_ids >= num_vertices) & (v2_ids >= num_vertices))
|
|
386
|
-
if not np.any(valid):
|
|
387
|
-
point_edges_type.append([])
|
|
388
|
-
point_vertices_f_idx.append([])
|
|
389
|
-
continue
|
|
390
|
-
|
|
391
|
-
v1_ids = v1_ids[valid]
|
|
392
|
-
v2_ids = v2_ids[valid]
|
|
393
|
-
|
|
394
|
-
# ---- vectorized ridge id lookup for all edges of this cell ----
|
|
395
|
-
# use the dict you already built with both orientations:
|
|
396
|
-
# vertexpair2ridge[(v1, v2)] = ridge_id
|
|
397
|
-
# convert all edge pairs in one go via list comprehension (still fast, no Python loop per-edge body)
|
|
398
|
-
# NB: we keep it simple & reliable; if needed, switch to a sorted-structured-array map later.
|
|
399
|
-
keys = list(zip(v1_ids.tolist(), v2_ids.tolist()))
|
|
400
|
-
ridge_ids = np.fromiter((vertexpair2ridge[k] for k in keys), dtype=int, count=len(keys))
|
|
401
|
-
|
|
402
|
-
# decide which endpoint pack to use (p1 vs p2) for each edge
|
|
403
|
-
use_p1 = (p1[ridge_ids] == idx)
|
|
404
|
-
use_p2 = ~use_p1
|
|
405
|
-
|
|
406
|
-
# gather packs (shape (E,3)), then mask out the -1 slots
|
|
407
|
-
pack_e = np.empty((len(ridge_ids), 3), dtype=int)
|
|
408
|
-
pack_v = np.empty((len(ridge_ids), 3), dtype=int)
|
|
409
|
-
|
|
410
|
-
if np.any(use_p1):
|
|
411
|
-
pack_e[use_p1] = p1_edges_pack[ridge_ids[use_p1]]
|
|
412
|
-
pack_v[use_p1] = p1_verts_pack[ridge_ids[use_p1]]
|
|
413
|
-
if np.any(use_p2):
|
|
414
|
-
pack_e[use_p2] = p2_edges_pack[ridge_ids[use_p2]]
|
|
415
|
-
pack_v[use_p2] = p2_verts_pack[ridge_ids[use_p2]]
|
|
416
|
-
|
|
417
|
-
# flatten valid entries in pack order (keeps your exact edge ordering)
|
|
418
|
-
mask = (pack_e >= 0)
|
|
419
|
-
edges_type = pack_e[mask].tolist()
|
|
420
|
-
vertices_f_idx = pack_v[mask].tolist()
|
|
421
|
-
|
|
422
|
-
if len(vertices_f_idx) != len(edges_type):
|
|
423
|
-
raise ValueError("Vertex and edge number not equal!")
|
|
424
|
-
|
|
425
|
-
point_edges_type.append(edges_type)
|
|
426
|
-
point_vertices_f_idx.append(vertices_f_idx)
|
|
427
|
-
|
|
428
|
-
|
|
429
|
-
|
|
430
|
-
|
|
431
|
-
# --- helpers ---
|
|
432
|
-
def _row_cross(a, b):
|
|
433
|
-
# z-component of 2D cross, row-wise
|
|
434
|
-
return a[:, 0] * b[:, 1] - a[:, 1] * b[:, 0]
|
|
435
|
-
|
|
436
|
-
def _perp(u):
|
|
437
|
-
# rotate 90° CW: (ux,uy) -> (uy,-ux)
|
|
438
|
-
return np.column_stack((u[:, 1], -u[:, 0]))
|
|
439
|
-
|
|
440
|
-
vertex_out_da_dtheta = np.zeros((2 * num_ridges, 2))
|
|
441
|
-
vertex_out_dl_dtheta = np.zeros((2 * num_ridges, 2))
|
|
442
|
-
|
|
443
|
-
dA_poly_dh = np.zeros((num_vertices_ext + 2 * num_ridges, 2))
|
|
444
|
-
dP_poly_dh = np.zeros((num_vertices_ext + 2 * num_ridges, 2))
|
|
445
|
-
|
|
446
|
-
area_list = np.zeros(N)
|
|
447
|
-
perimeter_list = np.zeros(N)
|
|
448
|
-
|
|
449
|
-
for idx in range(N):
|
|
450
|
-
edges_type = np.asarray(point_edges_type[idx], dtype=int)
|
|
451
|
-
vertices_f_idx = np.asarray(point_vertices_f_idx[idx], dtype=int)
|
|
452
|
-
E = edges_type.size
|
|
453
|
-
|
|
454
|
-
if E < 2:
|
|
455
|
-
area_list[idx] = np.pi * (r ** 2)
|
|
456
|
-
perimeter_list[idx] = 2.0 * np.pi * r
|
|
457
|
-
continue
|
|
458
|
-
|
|
459
|
-
# ring indices
|
|
460
|
-
v1_idx = vertices_f_idx
|
|
461
|
-
v2_idx = np.roll(vertices_f_idx, -1)
|
|
462
|
-
v0_idx = np.roll(vertices_f_idx, 1)
|
|
463
|
-
|
|
464
|
-
ri = pts[idx]
|
|
465
|
-
V1 = vertices_all[v1_idx]
|
|
466
|
-
V2 = vertices_all[v2_idx]
|
|
467
|
-
V0 = vertices_all[v0_idx]
|
|
468
|
-
V1mR = V1 - ri
|
|
469
|
-
V2mR = V2 - ri
|
|
470
|
-
V0mR = V0 - ri
|
|
471
|
-
|
|
472
|
-
mask_str = (edges_type == 1)
|
|
473
|
-
mask_arc = ~mask_str
|
|
474
|
-
|
|
475
|
-
# ----- perimeter & area -----
|
|
476
|
-
seg12 = V1 - V2
|
|
477
|
-
l12 = np.linalg.norm(seg12, axis=1)
|
|
478
|
-
Pi_straight = l12[mask_str].sum()
|
|
479
|
-
Ai_straight = (-0.5 * _row_cross(V1mR[mask_str], V2mR[mask_str])).sum()
|
|
480
|
-
|
|
481
|
-
if np.any(mask_arc):
|
|
482
|
-
a1_full = np.arctan2(V1mR[:, 1], V1mR[:, 0])
|
|
483
|
-
a2_full = np.arctan2(V2mR[:, 1], V2mR[:, 0])
|
|
484
|
-
dangle_full = (a1_full - a2_full) % (2.0 * np.pi)
|
|
485
|
-
dangle_arc = dangle_full[mask_arc]
|
|
486
|
-
Pi_arc = (r * dangle_arc).sum()
|
|
487
|
-
Ai_arc = (0.5 * (r ** 2) * dangle_arc).sum()
|
|
488
|
-
else:
|
|
489
|
-
Pi_arc = 0.0
|
|
490
|
-
Ai_arc = 0.0
|
|
491
|
-
|
|
492
|
-
Pi = Pi_straight + Pi_arc
|
|
493
|
-
Ai = Ai_straight + Ai_arc
|
|
494
|
-
perimeter_list[idx] = Pi
|
|
495
|
-
area_list[idx] = Ai
|
|
496
|
-
|
|
497
|
-
# ----- dA_poly/dh, dP_poly/dh for v1 -----
|
|
498
|
-
dAi_v1 = -0.5 * _perp(V2mR) + 0.5 * _perp(V0mR) # (E,2)
|
|
499
|
-
|
|
500
|
-
dPi_v1 = np.zeros((E, 2))
|
|
501
|
-
if np.any(mask_str):
|
|
502
|
-
dPi_v1[mask_str] += seg12[mask_str] / l12[mask_str][:, None]
|
|
503
|
-
|
|
504
|
-
mask_prev_str = np.roll(mask_str, 1)
|
|
505
|
-
seg10 = V1 - V0
|
|
506
|
-
l10 = np.linalg.norm(seg10, axis=1)
|
|
507
|
-
if np.any(mask_prev_str):
|
|
508
|
-
dPi_v1[mask_prev_str] += seg10[mask_prev_str] / l10[mask_prev_str][:, None]
|
|
509
|
-
|
|
510
|
-
np.add.at(dA_poly_dh, v1_idx, (Ai - A0) * dAi_v1)
|
|
511
|
-
np.add.at(dP_poly_dh, v1_idx, (Pi - P0) * dPi_v1)
|
|
512
|
-
|
|
513
|
-
# ----- arc endpoint sensitivities at outer vertices -----
|
|
514
|
-
if np.any(mask_arc):
|
|
515
|
-
# endpoint rows in vertex_out_* are (outer_id - num_vertices_ext)
|
|
516
|
-
v1_arc_idx = v1_idx[mask_arc]
|
|
517
|
-
v2_arc_idx = v2_idx[mask_arc]
|
|
518
|
-
k1 = v1_arc_idx - num_vertices_ext
|
|
519
|
-
k2 = v2_arc_idx - num_vertices_ext
|
|
520
|
-
valid1 = (k1 >= 0)
|
|
521
|
-
valid2 = (k2 >= 0)
|
|
522
|
-
|
|
523
|
-
if np.any(valid1) or np.any(valid2):
|
|
524
|
-
# da/dtheta for endpoints (sector - triangle)
|
|
525
|
-
da1_full = 0.5 * (r ** 2) * (1.0 - np.cos(dangle_full)) # v1 endpoint
|
|
526
|
-
da2_full = -da1_full # v2 endpoint
|
|
527
|
-
da1_arc = da1_full[mask_arc]
|
|
528
|
-
da2_arc = da2_full[mask_arc]
|
|
529
|
-
|
|
530
|
-
# dl/dtheta is ±r
|
|
531
|
-
dl1 = r
|
|
532
|
-
dl2 = -r
|
|
533
|
-
|
|
534
|
-
vop = vertex_out_points # rows are sorted [i,j]; column 1 is max(i,j)
|
|
535
|
-
if np.any(valid1):
|
|
536
|
-
k1v = k1[valid1]
|
|
537
|
-
# CORRECT which_point: 0 if max(i,j) > idx else 1
|
|
538
|
-
which1 = (vop[k1v, 1] <= idx).astype(int)
|
|
539
|
-
vertex_out_da_dtheta[k1v, which1] = da1_arc[valid1]
|
|
540
|
-
vertex_out_dl_dtheta[k1v, which1] = dl1
|
|
541
|
-
|
|
542
|
-
if np.any(valid2):
|
|
543
|
-
k2v = k2[valid2]
|
|
544
|
-
which2 = (vop[k2v, 1] <= idx).astype(int)
|
|
545
|
-
vertex_out_da_dtheta[k2v, which2] = da2_arc[valid2]
|
|
546
|
-
vertex_out_dl_dtheta[k2v, which2] = dl2
|
|
547
|
-
|
|
548
|
-
|
|
549
|
-
diagnostics = dict(
|
|
550
|
-
vertex_in_id=set(vertex_in_id),
|
|
551
|
-
vertex_out_id=set(vertex_out_id),
|
|
552
|
-
vertices_out=vertices_out,
|
|
553
|
-
vertex_out_points=vertex_out_points,
|
|
554
|
-
vertex_out_da_dtheta=vertex_out_da_dtheta,
|
|
555
|
-
vertex_out_dl_dtheta=vertex_out_dl_dtheta,
|
|
556
|
-
dA_poly_dh=dA_poly_dh,
|
|
557
|
-
dP_poly_dh=dP_poly_dh,
|
|
558
|
-
area_list=area_list,
|
|
559
|
-
perimeter_list=perimeter_list,
|
|
560
|
-
point_edges_type=point_edges_type,
|
|
561
|
-
point_vertices_f_idx=point_vertices_f_idx,
|
|
562
|
-
num_vertices_ext=num_vertices_ext,
|
|
563
|
-
)
|
|
564
|
-
return diagnostics, vertices_all
|
|
565
|
-
|
|
566
|
-
|
|
567
|
-
# --------------------- Force assembly ---------------------
|
|
568
|
-
def _assemble_forces(self, vertices_all: np.ndarray, num_vertices_ext: int,
|
|
569
|
-
vertex_points: Dict[int, List[int]], vertex_in_id: List[int], vertex_out_id: List[int],
|
|
570
|
-
vertex_out_points: List[List[int]], vertex_out_da_dtheta: np.ndarray,
|
|
571
|
-
vertex_out_dl_dtheta: np.ndarray, dA_poly_dh: np.ndarray, dP_poly_dh: np.ndarray,
|
|
572
|
-
area_list: np.ndarray, perimeter_list: np.ndarray) -> np.ndarray:
|
|
573
|
-
"""
|
|
574
|
-
Assemble forces on cell centers from polygon and arc contributions.
|
|
575
|
-
"""
|
|
576
|
-
N = self.N
|
|
577
|
-
r = self.phys.r
|
|
578
|
-
A0 = self.phys.A0
|
|
579
|
-
P0 = self.phys.P0
|
|
580
|
-
KA = self.phys.KA
|
|
581
|
-
KP = self.phys.KP
|
|
582
|
-
Lambda = self.phys.lambda_tension
|
|
583
|
-
pts = self.pts
|
|
584
|
-
|
|
585
|
-
dE_poly_dh = 2.0 * (KA * dA_poly_dh + KP * dP_poly_dh)
|
|
586
|
-
|
|
587
|
-
fx = np.zeros(N)
|
|
588
|
-
fy = np.zeros(N)
|
|
589
|
-
|
|
590
|
-
# ===============================================================
|
|
591
|
-
# (1) Inner vertices contributions — vectorized + bincount scatter
|
|
592
|
-
# ===============================================================
|
|
593
|
-
if len(vertex_in_id) > 0:
|
|
594
|
-
H = np.asarray(list(vertex_in_id), dtype=int) # (H,)
|
|
595
|
-
# unpack triples (i,j,k) for each inner vertex
|
|
596
|
-
I = np.empty(len(H), dtype=int)
|
|
597
|
-
J = np.empty(len(H), dtype=int)
|
|
598
|
-
K = np.empty(len(H), dtype=int)
|
|
599
|
-
for t, h in enumerate(H):
|
|
600
|
-
I[t], J[t], K[t] = vertex_points[h]
|
|
601
|
-
|
|
602
|
-
ri = pts[I] # (H,2)
|
|
603
|
-
rj = pts[J]
|
|
604
|
-
rk = pts[K]
|
|
605
|
-
|
|
606
|
-
rj_minus_rk = rj - rk
|
|
607
|
-
ri_minus_rj = ri - rj
|
|
608
|
-
ri_minus_rk = ri - rk
|
|
609
|
-
rj_minus_ri = -ri_minus_rj
|
|
610
|
-
rk_minus_ri = -ri_minus_rk
|
|
611
|
-
rk_minus_rj = rk - rj
|
|
612
|
-
|
|
613
|
-
D0 = _row_dot(ri - rj, np.column_stack((rj_minus_rk[:,1], -rj_minus_rk[:,0]))) # cross2(ri-rj, rj-rk)
|
|
614
|
-
# rewrite cross robustly:
|
|
615
|
-
D0 = (ri[:,0]-rj[:,0])*(rj_minus_rk[:,1]) - (ri[:,1]-rj[:,1])*(rj_minus_rk[:,0])
|
|
616
|
-
D = 2.0 * (D0 ** 2)
|
|
617
|
-
|
|
618
|
-
# alphas
|
|
619
|
-
alpha_i = _row_dot(rj_minus_rk, rj_minus_rk) * _row_dot(ri_minus_rj, ri_minus_rk) / D
|
|
620
|
-
alpha_j = _row_dot(ri_minus_rk, ri_minus_rk) * _row_dot(rj_minus_ri, rj_minus_rk) / D
|
|
621
|
-
alpha_k = _row_dot(ri_minus_rj, ri_minus_rj) * _row_dot(rk_minus_ri, rk_minus_rj) / D
|
|
622
|
-
|
|
623
|
-
# d_alpha_j / d_ri and d_alpha_k / d_ri
|
|
624
|
-
cross_z = np.column_stack((rj_minus_rk[:, 1], -rj_minus_rk[:, 0])) # (H,2)
|
|
625
|
-
term_j_ri = (rk_minus_rj / _row_dot(rj_minus_ri, rj_minus_rk)[:, None]) + 2.0 * (ri_minus_rk / _row_dot(ri_minus_rk, ri_minus_rk)[:, None]) - 2.0 * (cross_z / D0[:, None])
|
|
626
|
-
term_k_ri = (rj_minus_rk / _row_dot(rk_minus_ri, rk_minus_rj)[:, None]) + 2.0 * (ri_minus_rj / _row_dot(ri_minus_rj, ri_minus_rj)[:, None]) - 2.0 * (cross_z / D0[:, None])
|
|
627
|
-
d_alpha_j_d_ri = (alpha_j[:, None] * term_j_ri) # (H,2)
|
|
628
|
-
d_alpha_k_d_ri = (alpha_k[:, None] * term_k_ri)
|
|
629
|
-
|
|
630
|
-
d_h_in_d_xi = alpha_i[:, None] * np.array([1.0, 0.0]) + d_alpha_j_d_ri[:, [0]] * (rj - ri) + d_alpha_k_d_ri[:, [0]] * (rk - ri)
|
|
631
|
-
d_h_in_d_yi = alpha_i[:, None] * np.array([0.0, 1.0]) + d_alpha_j_d_ri[:, [1]] * (rj - ri) + d_alpha_k_d_ri[:, [1]] * (rk - ri)
|
|
632
|
-
|
|
633
|
-
# d_alpha_i / d_rj and d_alpha_k / d_rj
|
|
634
|
-
cross_z = np.column_stack((-(ri_minus_rk)[:, 1], (ri_minus_rk)[:, 0]))
|
|
635
|
-
term_i_rj = (rk_minus_ri / _row_dot(ri_minus_rj, ri_minus_rk)[:, None]) + 2.0 * (rj_minus_rk / _row_dot(rj_minus_rk, rj_minus_rk)[:, None]) - 2.0 * (cross_z / D0[:, None])
|
|
636
|
-
term_k_rj = (ri_minus_rk / _row_dot(rk_minus_rj, rk_minus_ri)[:, None]) + 2.0 * (rj_minus_ri / _row_dot(rj_minus_ri, rj_minus_ri)[:, None]) - 2.0 * (cross_z / D0[:, None])
|
|
637
|
-
d_alpha_i_d_rj = (alpha_i[:, None] * term_i_rj)
|
|
638
|
-
d_alpha_k_d_rj = (alpha_k[:, None] * term_k_rj)
|
|
639
|
-
|
|
640
|
-
d_h_in_d_xj = d_alpha_i_d_rj[:, [0]] * (ri - rj) + alpha_j[:, None] * np.array([1.0, 0.0]) + d_alpha_k_d_rj[:, [0]] * (rk - rj)
|
|
641
|
-
d_h_in_d_yj = d_alpha_i_d_rj[:, [1]] * (ri - rj) + alpha_j[:, None] * np.array([0.0, 1.0]) + d_alpha_k_d_rj[:, [1]] * (rk - rj)
|
|
642
|
-
|
|
643
|
-
# d_alpha_i / d_rk and d_alpha_j / d_rk
|
|
644
|
-
cross_z = np.column_stack(((ri_minus_rj)[:, 1], -(ri_minus_rj)[:, 0]))
|
|
645
|
-
term_i_rk = (rj_minus_ri / _row_dot(ri_minus_rk, ri_minus_rj)[:, None]) + 2.0 * (rk_minus_rj / _row_dot(rk_minus_rj, rk_minus_rj)[:, None]) - 2.0 * (cross_z / D0[:, None])
|
|
646
|
-
term_j_rk = (ri_minus_rj / _row_dot(rj_minus_rk, rj_minus_ri)[:, None]) + 2.0 * (rk_minus_ri / _row_dot(rk_minus_ri, rk_minus_ri)[:, None]) - 2.0 * (cross_z / D0[:, None])
|
|
647
|
-
d_alpha_i_d_rk = (alpha_i[:, None] * term_i_rk)
|
|
648
|
-
d_alpha_j_d_rk = (alpha_j[:, None] * term_j_rk)
|
|
649
|
-
|
|
650
|
-
d_h_in_d_xk = d_alpha_i_d_rk[:, [0]] * (ri - rk) + d_alpha_j_d_rk[:, [0]] * (rj - rk) + alpha_k[:, None] * np.array([1.0, 0.0])
|
|
651
|
-
d_h_in_d_yk = d_alpha_i_d_rk[:, [1]] * (ri - rk) + d_alpha_j_d_rk[:, [1]] * (rj - rk) + alpha_k[:, None] * np.array([0.0, 1.0])
|
|
652
|
-
|
|
653
|
-
deh = dE_poly_dh[H] # (H,2)
|
|
654
|
-
contrib_x_i = _row_dot(deh, d_h_in_d_xi)
|
|
655
|
-
contrib_x_j = _row_dot(deh, d_h_in_d_xj)
|
|
656
|
-
contrib_x_k = _row_dot(deh, d_h_in_d_xk)
|
|
657
|
-
contrib_y_i = _row_dot(deh, d_h_in_d_yi)
|
|
658
|
-
contrib_y_j = _row_dot(deh, d_h_in_d_yj)
|
|
659
|
-
contrib_y_k = _row_dot(deh, d_h_in_d_yk)
|
|
660
|
-
|
|
661
|
-
# bincount-based scatter (faster than repeated np.add.at)
|
|
662
|
-
fx += (
|
|
663
|
-
np.bincount(I, weights=contrib_x_i, minlength=N)
|
|
664
|
-
+ np.bincount(J, weights=contrib_x_j, minlength=N)
|
|
665
|
-
+ np.bincount(K, weights=contrib_x_k, minlength=N)
|
|
666
|
-
)
|
|
667
|
-
fy += (
|
|
668
|
-
np.bincount(I, weights=contrib_y_i, minlength=N)
|
|
669
|
-
+ np.bincount(J, weights=contrib_y_j, minlength=N)
|
|
670
|
-
+ np.bincount(K, weights=contrib_y_k, minlength=N)
|
|
671
|
-
)
|
|
672
|
-
|
|
673
|
-
# ===============================================================
|
|
674
|
-
# (2) Outer vertices contributions — vectorized; bincount scatter
|
|
675
|
-
# ===============================================================
|
|
676
|
-
dA_arc_dr = np.zeros((N, 2))
|
|
677
|
-
dP_arc_dr = np.zeros((N,2))
|
|
678
|
-
dL_dr = np.zeros((N,2))
|
|
679
|
-
|
|
680
|
-
if len(vertex_out_id) > 0:
|
|
681
|
-
Vsel = np.asarray(vertex_out_id, dtype=int) # absolute vertex IDs in vertices_all
|
|
682
|
-
h_idx = Vsel - num_vertices_ext # rows into vertex_out_* arrays
|
|
683
|
-
# guard against any accidental negatives / out-of-range
|
|
684
|
-
valid_mask = (h_idx >= 0) & (h_idx < len(vertex_out_points))
|
|
685
|
-
if np.any(valid_mask):
|
|
686
|
-
Vsel = Vsel[valid_mask]
|
|
687
|
-
h_idx = h_idx[valid_mask]
|
|
688
|
-
|
|
689
|
-
# geometry slices
|
|
690
|
-
h_out = vertices_all[Vsel] # (M,2)
|
|
691
|
-
IJ = np.asarray(vertex_out_points, dtype=int)[h_idx] # (M,2)
|
|
692
|
-
I = IJ[:, 0]
|
|
693
|
-
J = IJ[:, 1]
|
|
694
|
-
|
|
695
|
-
ri = pts[I] # (M,2)
|
|
696
|
-
rj = pts[J] # (M,2)
|
|
697
|
-
rij_vec = ri - rj
|
|
698
|
-
rij = np.linalg.norm(rij_vec, axis=1) # (M,)
|
|
699
|
-
root = np.sqrt(4.0 * (r ** 2) - (rij ** 2))
|
|
700
|
-
|
|
701
|
-
# sign based on orientation: sign(cross(h_out-rj, ri-rj))
|
|
702
|
-
sign = np.sign((h_out[:, 0] - rj[:, 0]) * (ri[:, 1] - rj[:, 1])
|
|
703
|
-
- (h_out[:, 1] - rj[:, 1]) * (ri[:, 0] - rj[:, 0]))
|
|
704
|
-
|
|
705
|
-
x_unit = np.array([1.0, 0.0])[None, :]
|
|
706
|
-
y_unit = np.array([0.0, 1.0])[None, :]
|
|
707
|
-
|
|
708
|
-
cross_z = rij_vec[:, [1]] * x_unit - rij_vec[:, [0]] * y_unit # (M,2)
|
|
709
|
-
denom = (np.maximum(root[:, None], self.phys.delta) * (rij ** 3)[:, None]) # small offset to avoid singularities
|
|
710
|
-
|
|
711
|
-
dx_terms = - (2.0 * (r ** 2) * rij_vec[:, [0]] * cross_z / denom) \
|
|
712
|
-
- (root / (2.0 * rij))[:, None] * y_unit
|
|
713
|
-
dy_terms = - (2.0 * (r ** 2) * rij_vec[:, [1]] * cross_z / denom) \
|
|
714
|
-
+ (root / (2.0 * rij))[:, None] * x_unit
|
|
715
|
-
|
|
716
|
-
d_h_out_d_xi = (x_unit / 2.0) + sign[:, None] * dx_terms
|
|
717
|
-
d_h_out_d_yi = (y_unit / 2.0) + sign[:, None] * dy_terms
|
|
718
|
-
d_h_out_d_xj = (x_unit / 2.0) - sign[:, None] * dx_terms
|
|
719
|
-
d_h_out_d_yj = (y_unit / 2.0) - sign[:, None] * dy_terms
|
|
720
|
-
|
|
721
|
-
# polygon part on these selected outer vertices
|
|
722
|
-
deh_out = dE_poly_dh[Vsel] # (M,2)
|
|
723
|
-
|
|
724
|
-
fx += (
|
|
725
|
-
np.bincount(I, weights=_row_dot(deh_out, d_h_out_d_xi), minlength=N)
|
|
726
|
-
+ np.bincount(J, weights=_row_dot(deh_out, d_h_out_d_xj), minlength=N)
|
|
727
|
-
)
|
|
728
|
-
fy += (
|
|
729
|
-
np.bincount(I, weights=_row_dot(deh_out, d_h_out_d_yi), minlength=N)
|
|
730
|
-
+ np.bincount(J, weights=_row_dot(deh_out, d_h_out_d_yj), minlength=N)
|
|
731
|
-
)
|
|
732
|
-
|
|
733
|
-
# ---- arc angle sensitivities (per-cell accumulators) ----
|
|
734
|
-
u_i = h_out - ri
|
|
735
|
-
u_j = h_out - rj
|
|
736
|
-
inv_ui2 = 1.0 / _row_dot(u_i, u_i)
|
|
737
|
-
inv_uj2 = 1.0 / _row_dot(u_j, u_j)
|
|
738
|
-
u_perp_i = np.column_stack((-u_i[:, 1], u_i[:, 0])) * inv_ui2[:, None]
|
|
739
|
-
u_perp_j = np.column_stack((-u_j[:, 1], u_j[:, 0])) * inv_uj2[:, None]
|
|
740
|
-
|
|
741
|
-
d_theta_i_d_xj = _row_dot(d_h_out_d_xj, u_perp_i)
|
|
742
|
-
d_theta_i_d_yj = _row_dot(d_h_out_d_yj, u_perp_i)
|
|
743
|
-
d_theta_i_d_xi = -d_theta_i_d_xj
|
|
744
|
-
d_theta_i_d_yi = -d_theta_i_d_yj
|
|
745
|
-
|
|
746
|
-
d_theta_j_d_xi = _row_dot(d_h_out_d_xi, u_perp_j)
|
|
747
|
-
d_theta_j_d_yi = _row_dot(d_h_out_d_yi, u_perp_j)
|
|
748
|
-
d_theta_j_d_xj = -d_theta_j_d_xi
|
|
749
|
-
d_theta_j_d_yj = -d_theta_j_d_yi
|
|
750
|
-
|
|
751
|
-
# weights (only for the selected outer vertices h_idx)
|
|
752
|
-
v_da = vertex_out_da_dtheta[h_idx] # (M,2)
|
|
753
|
-
v_dl = vertex_out_dl_dtheta[h_idx] # (M,2)
|
|
754
|
-
|
|
755
|
-
Ai_w_i = (area_list[I] - A0) * v_da[:, 0]
|
|
756
|
-
Aj_w_j = (area_list[J] - A0) * v_da[:, 1]
|
|
757
|
-
Pi_w_i = (perimeter_list[I] - P0) * v_dl[:, 0]
|
|
758
|
-
Pj_w_j = (perimeter_list[J] - P0) * v_dl[:, 1]
|
|
759
|
-
|
|
760
|
-
# accumulate with bincount
|
|
761
|
-
dA_arc_dr[:, 0] += np.bincount(I, Ai_w_i * d_theta_i_d_xi + Aj_w_j * d_theta_j_d_xi, minlength=N)
|
|
762
|
-
dA_arc_dr[:, 0] += np.bincount(J, Ai_w_i * d_theta_i_d_xj + Aj_w_j * d_theta_j_d_xj, minlength=N)
|
|
763
|
-
dA_arc_dr[:, 1] += np.bincount(I, Ai_w_i * d_theta_i_d_yi + Aj_w_j * d_theta_j_d_yi, minlength=N)
|
|
764
|
-
dA_arc_dr[:, 1] += np.bincount(J, Ai_w_i * d_theta_i_d_yj + Aj_w_j * d_theta_j_d_yj, minlength=N)
|
|
765
|
-
|
|
766
|
-
dP_arc_dr[:, 0] += np.bincount(I, Pi_w_i * d_theta_i_d_xi + Pj_w_j * d_theta_j_d_xi, minlength=N)
|
|
767
|
-
dP_arc_dr[:, 0] += np.bincount(J, Pi_w_i * d_theta_i_d_xj + Pj_w_j * d_theta_j_d_xj, minlength=N)
|
|
768
|
-
dP_arc_dr[:, 1] += np.bincount(I, Pi_w_i * d_theta_i_d_yi + Pj_w_j * d_theta_j_d_yi, minlength=N)
|
|
769
|
-
dP_arc_dr[:, 1] += np.bincount(J, Pi_w_i * d_theta_i_d_yj + Pj_w_j * d_theta_j_d_yj, minlength=N)
|
|
770
|
-
|
|
771
|
-
# line-tension contributions for Lambda
|
|
772
|
-
dL_arc_x = v_dl[:, 0] * d_theta_i_d_xi + v_dl[:, 1] * d_theta_j_d_xi
|
|
773
|
-
dL_arc_y = v_dl[:, 0] * d_theta_i_d_yi + v_dl[:, 1] * d_theta_j_d_yi
|
|
774
|
-
dL_arc_xJ = v_dl[:, 0] * d_theta_i_d_xj + v_dl[:, 1] * d_theta_j_d_xj
|
|
775
|
-
dL_arc_yJ = v_dl[:, 0] * d_theta_i_d_yj + v_dl[:, 1] * d_theta_j_d_yj
|
|
776
|
-
|
|
777
|
-
dL_dr[:, 0] += np.bincount(I, dL_arc_x, minlength=N)
|
|
778
|
-
dL_dr[:, 1] += np.bincount(I, dL_arc_y, minlength=N)
|
|
779
|
-
dL_dr[:, 0] += np.bincount(J, dL_arc_xJ, minlength=N)
|
|
780
|
-
dL_dr[:, 1] += np.bincount(J, dL_arc_yJ, minlength=N)
|
|
781
|
-
|
|
782
|
-
# combine arc terms
|
|
783
|
-
dE_arc_dr = 2.0 * (KA * dA_arc_dr + KP * dP_arc_dr) + Lambda * dL_dr
|
|
784
|
-
|
|
785
|
-
fx = -(fx + dE_arc_dr[:, 0])
|
|
786
|
-
fy = -(fy + dE_arc_dr[:, 1])
|
|
787
|
-
|
|
788
|
-
F = np.zeros((N, 2), dtype=float)
|
|
789
|
-
F[:, 0] = fx
|
|
790
|
-
F[:, 1] = fy
|
|
791
|
-
return F
|
|
792
|
-
|
|
793
|
-
# --------------------- One integration step ---------------------
|
|
794
|
-
def build(self) -> Dict:
|
|
795
|
-
"""
|
|
796
|
-
Do the following:
|
|
797
|
-
- Build Voronoi (+ extensions)
|
|
798
|
-
- Get cell connectivity
|
|
799
|
-
- Compute per-cell quantities and derivatives
|
|
800
|
-
- Assemble forces
|
|
801
|
-
Returns a dictionary of diagnostics.
|
|
802
|
-
"""
|
|
803
|
-
(vor, vertices_all, ridge_vertices_all, num_vertices,
|
|
804
|
-
vertexpair2ridge, vertex_points) = self._build_voronoi_with_extensions()
|
|
805
|
-
|
|
806
|
-
connections = self._get_connections(vor.ridge_points, vertices_all, ridge_vertices_all)
|
|
807
|
-
|
|
808
|
-
geom, vertices_all = self._per_cell_geometry(vor, vertices_all, ridge_vertices_all, num_vertices, vertexpair2ridge)
|
|
809
|
-
|
|
810
|
-
F = self._assemble_forces(
|
|
811
|
-
vertices_all=vertices_all,
|
|
812
|
-
num_vertices_ext=geom["num_vertices_ext"],
|
|
813
|
-
vertex_points=vertex_points,
|
|
814
|
-
vertex_in_id=list(geom["vertex_in_id"]),
|
|
815
|
-
vertex_out_id=list(geom["vertex_out_id"]),
|
|
816
|
-
vertex_out_points=geom["vertex_out_points"],
|
|
817
|
-
vertex_out_da_dtheta=geom["vertex_out_da_dtheta"],
|
|
818
|
-
vertex_out_dl_dtheta=geom["vertex_out_dl_dtheta"],
|
|
819
|
-
dA_poly_dh=geom["dA_poly_dh"],
|
|
820
|
-
dP_poly_dh=geom["dP_poly_dh"],
|
|
821
|
-
area_list=geom["area_list"],
|
|
822
|
-
perimeter_list=geom["perimeter_list"],
|
|
823
|
-
)
|
|
824
|
-
|
|
825
|
-
return dict(
|
|
826
|
-
forces=F,
|
|
827
|
-
areas=geom["area_list"],
|
|
828
|
-
perimeters=geom["perimeter_list"],
|
|
829
|
-
vertices=vertices_all,
|
|
830
|
-
edges_type=geom["point_edges_type"],
|
|
831
|
-
regions=geom["point_vertices_f_idx"],
|
|
832
|
-
connections=connections,
|
|
833
|
-
)
|
|
834
|
-
|
|
835
|
-
# --------------------- 2D plotting utilities ---------------------
|
|
836
|
-
def plot_2d(self, ax: Optional[Axes] = None, show: bool = False) -> Axes:
|
|
837
|
-
"""
|
|
838
|
-
Build the Voronoi(+extensions) and render a 2D snapshot.
|
|
839
|
-
|
|
840
|
-
Parameters
|
|
841
|
-
----------
|
|
842
|
-
ax : matplotlib.axes.Axes or None
|
|
843
|
-
If provided, draw into this axes; otherwise get the current axes.
|
|
844
|
-
show : bool
|
|
845
|
-
Whether to call plt.show() at the end.
|
|
846
|
-
|
|
847
|
-
Returns
|
|
848
|
-
-------
|
|
849
|
-
ax : matplotlib.axes.Axes
|
|
850
|
-
"""
|
|
851
|
-
(vor, vertices_all, ridge_vertices_all, num_vertices,
|
|
852
|
-
vertexpair2ridge, vertex_points) = self._build_voronoi_with_extensions()
|
|
853
|
-
|
|
854
|
-
geom, vertices_all = self._per_cell_geometry(vor, vertices_all, ridge_vertices_all, num_vertices, vertexpair2ridge)
|
|
855
|
-
|
|
856
|
-
if ax is None:
|
|
857
|
-
ax = plt.gca()
|
|
858
|
-
|
|
859
|
-
self._plot_routine(ax, vor, vertices_all, ridge_vertices_all,
|
|
860
|
-
geom["point_edges_type"], geom["point_vertices_f_idx"])
|
|
861
|
-
|
|
862
|
-
if show:
|
|
863
|
-
plt.show()
|
|
864
|
-
return ax
|
|
865
|
-
|
|
866
|
-
|
|
867
|
-
# --------------------- Paradigm of plotting ---------------------
|
|
868
|
-
def _plot_routine(self, ax: Axes, vor: Voronoi, vertices_all: np.ndarray, ridge_vertices_all: List[List[int]],
|
|
869
|
-
point_edges_type: List[List[int]], point_vertices_f_idx: List[List[int]]) -> None:
|
|
870
|
-
"""
|
|
871
|
-
Low-level plot routine. Draws:
|
|
872
|
-
- All Voronoi edges (solid for finite, dashed for formerly-infinite)
|
|
873
|
-
- Cell centers
|
|
874
|
-
- Each cell boundary (poly edges and circular arcs)
|
|
875
|
-
"""
|
|
876
|
-
pts = self.pts
|
|
877
|
-
r = self.phys.r
|
|
878
|
-
N = self.N
|
|
879
|
-
|
|
880
|
-
center = np.mean(pts, axis=0)
|
|
881
|
-
if N > 1:
|
|
882
|
-
span_x = np.ptp(pts[:, 0]) # pts span in x
|
|
883
|
-
span_y = np.ptp(pts[:, 1]) # pts span in y
|
|
884
|
-
L = max(span_x, span_y) + 3.0 * r
|
|
885
|
-
L *= 0.8
|
|
886
|
-
else:
|
|
887
|
-
L = 5.0 * r
|
|
888
|
-
|
|
889
|
-
# Draw Voronoi ridge segments
|
|
890
|
-
for idx in range(len(vor.ridge_vertices)):
|
|
891
|
-
x1, y1 = vertices_all[ridge_vertices_all[idx][0]]
|
|
892
|
-
x2, y2 = vertices_all[ridge_vertices_all[idx][1]]
|
|
893
|
-
if -1 not in vor.ridge_vertices[idx]:
|
|
894
|
-
ax.plot([x1, x2], [y1, y2], 'k-', lw=0.5)
|
|
895
|
-
else:
|
|
896
|
-
ax.plot([x1, x2], [y1, y2], 'k--', lw=0.5)
|
|
897
|
-
|
|
898
|
-
# Draw cell centers
|
|
899
|
-
ax.plot(pts[:, 0], pts[:, 1], 'o', color='C0', markersize=2)
|
|
900
|
-
|
|
901
|
-
# Draw each cell boundary
|
|
902
|
-
for idx in range(N):
|
|
903
|
-
edges_type = point_edges_type[idx]
|
|
904
|
-
vertices_f_idx = point_vertices_f_idx[idx]
|
|
905
|
-
|
|
906
|
-
x, y = pts[idx]
|
|
907
|
-
if len(edges_type) < 2:
|
|
908
|
-
angle = np.linspace(0, 2*np.pi, 100)
|
|
909
|
-
ax.plot(x + r * np.cos(angle), y + r * np.sin(angle), color="C6", zorder=2)
|
|
910
|
-
continue
|
|
911
|
-
|
|
912
|
-
for idx_f, edge_type in enumerate(edges_type):
|
|
913
|
-
v1_idx = vertices_f_idx[idx_f]
|
|
914
|
-
x1, y1 = vertices_all[v1_idx]
|
|
915
|
-
idx2 = idx_f + 1 if idx_f < len(edges_type)-1 else 0
|
|
916
|
-
v2_idx = vertices_f_idx[idx2]
|
|
917
|
-
x2, y2 = vertices_all[v2_idx]
|
|
918
|
-
|
|
919
|
-
if edge_type == 1:
|
|
920
|
-
ax.plot([x1, x2], [y1, y2], 'b-', zorder=1)
|
|
921
|
-
else:
|
|
922
|
-
angle1 = np.arctan2(y1-y, x1-x)
|
|
923
|
-
angle2 = np.arctan2(y2-y, x2-x)
|
|
924
|
-
dangle = np.linspace(0, (angle1 - angle2) % (2*np.pi), 100)
|
|
925
|
-
|
|
926
|
-
ax.plot(x + r * np.cos(angle2+dangle), y + r * np.sin(angle2+dangle), color="C6", zorder=2)
|
|
927
|
-
|
|
928
|
-
ax.set_aspect("equal")
|
|
929
|
-
ax.set_xlim(center[0]-L, center[0]+L)
|
|
930
|
-
ax.set_ylim(center[1]-L, center[1]+L)
|
|
931
|
-
|
|
932
|
-
|
|
933
|
-
# --------------------- Connections between cells ---------------------
|
|
934
|
-
def _get_connections(self, ridge_points: List[List[int]], vertices_all: np.ndarray, ridge_vertices_all: List[List[int]]) -> np.ndarray:
|
|
935
|
-
"""
|
|
936
|
-
Determine which pairs of cells are connected, i.e.,
|
|
937
|
-
the distance from the cell center to its corresponding Voronoi ridge
|
|
938
|
-
segment is < self.phys.r.
|
|
939
|
-
"""
|
|
940
|
-
ridge_points_arr = np.asarray(ridge_points, dtype=int).reshape(-1, 2) # (R, 2)
|
|
941
|
-
ridge_vertices_arr = np.asarray(ridge_vertices_all, dtype=int).reshape(-1, 2) # (R, 2)
|
|
942
|
-
|
|
943
|
-
# take p2 for each ridge, avoid -1 points (representing space)
|
|
944
|
-
p1_idx = ridge_points_arr[:, 0] # (R,)
|
|
945
|
-
p2_idx = ridge_points_arr[:, 1] # (R,)
|
|
946
|
-
p2 = self.pts[p2_idx] # (R, 2)
|
|
947
|
-
|
|
948
|
-
v1 = vertices_all[ridge_vertices_arr[:, 0]] # (R, 2)
|
|
949
|
-
v2 = vertices_all[ridge_vertices_arr[:, 1]] # (R, 2)
|
|
950
|
-
|
|
951
|
-
# vectorized point-to-segment distance
|
|
952
|
-
AB = v2 - v1 # (R, 2)
|
|
953
|
-
AP = p2 - v1 # (R, 2)
|
|
954
|
-
denom = np.einsum("ij,ij->i", AB, AB) # (R,)
|
|
955
|
-
|
|
956
|
-
t = np.einsum("ij,ij->i", AP, AB) / denom
|
|
957
|
-
t = np.clip(t, 0.0, 1.0)[:, None] # (R,1)
|
|
958
|
-
|
|
959
|
-
C = v1 + t * AB # closest point on segment, (R,2)
|
|
960
|
-
dists = np.linalg.norm(p2 - C, axis=1) # (R,)
|
|
961
|
-
|
|
962
|
-
mask = dists < self.phys.r
|
|
963
|
-
|
|
964
|
-
connect = np.stack([p1_idx[mask], p2_idx[mask]], axis=1)
|
|
965
|
-
if connect.size > 0:
|
|
966
|
-
connect = np.sort(connect, axis=1)
|
|
967
|
-
else:
|
|
968
|
-
connect = np.empty((0, 2), dtype=int)
|
|
969
|
-
return connect
|
|
970
|
-
|
|
971
|
-
|
|
972
|
-
# --------------------- Update positions ---------------------
|
|
973
|
-
def update_positions(self, pts: np.ndarray) -> None:
|
|
974
|
-
"""
|
|
975
|
-
Update cell center positions.
|
|
976
|
-
"""
|
|
977
|
-
self.N, dim = pts.shape
|
|
978
|
-
if dim != 2:
|
|
979
|
-
raise ValueError("Positions must have shape (N,2)")
|
|
980
|
-
|
|
981
|
-
self.pts = pts
|
|
982
|
-
|
|
983
|
-
|
|
984
|
-
# --------------------- Update physical parameters ---------------------
|
|
985
|
-
def update_params(self, phys: PhysicalParams) -> None:
|
|
986
|
-
"""
|
|
987
|
-
Update physical parameters.
|
|
988
|
-
"""
|
|
989
|
-
self.phys = phys
|