bspy 4.2__py3-none-any.whl → 4.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- bspy/__init__.py +1 -1
- bspy/_spline_domain.py +74 -92
- bspy/_spline_evaluation.py +3 -3
- bspy/_spline_fitting.py +33 -12
- bspy/_spline_intersection.py +294 -279
- bspy/_spline_milling.py +233 -0
- bspy/_spline_operations.py +98 -81
- bspy/hyperplane.py +7 -3
- bspy/manifold.py +8 -3
- bspy/solid.py +8 -4
- bspy/spline.py +124 -21
- bspy/splineOpenGLFrame.py +346 -303
- bspy/spline_block.py +155 -38
- bspy/viewer.py +20 -11
- {bspy-4.2.dist-info → bspy-4.4.dist-info}/METADATA +14 -11
- bspy-4.4.dist-info/RECORD +19 -0
- {bspy-4.2.dist-info → bspy-4.4.dist-info}/WHEEL +1 -1
- bspy-4.2.dist-info/RECORD +0 -18
- {bspy-4.2.dist-info → bspy-4.4.dist-info/licenses}/LICENSE +0 -0
- {bspy-4.2.dist-info → bspy-4.4.dist-info}/top_level.txt +0 -0
bspy/_spline_intersection.py
CHANGED
|
@@ -126,104 +126,83 @@ def zeros_using_interval_newton(self):
|
|
|
126
126
|
return mySolution
|
|
127
127
|
return refine(spline, 1.0, 1.0)
|
|
128
128
|
|
|
129
|
-
def _convex_hull_2D(xData, yData, yBounds, yOtherBounds
|
|
129
|
+
def _convex_hull_2D(xData, yData, yBounds, yOtherBounds):
|
|
130
130
|
# Allow xData to be repeated for longer yData, but only if yData is a multiple.
|
|
131
131
|
if not(yData.shape[0] % xData.shape[0] == 0): raise ValueError("Size of xData does not divide evenly in size of yData")
|
|
132
|
-
|
|
133
|
-
# Assign (x0, y0) to the lowest point.
|
|
134
|
-
yMinIndex = np.argmin(yData)
|
|
135
|
-
x0 = xData[yMinIndex % xData.shape[0]]
|
|
136
|
-
y0 = yOtherBounds[0] + yData[yMinIndex]
|
|
132
|
+
yData = np.reshape(yData, (yData.shape[0] // xData.shape[0], xData.shape[0]))
|
|
137
133
|
|
|
138
134
|
# Calculate y adjustment as needed for values close to zero
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
for
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
for point in hullPoints:
|
|
191
|
-
# Check for intersection with x axis.
|
|
192
|
-
if previousPoint[1] * point[1] <= epsilon:
|
|
193
|
-
determinant = point[1] - previousPoint[1]
|
|
194
|
-
if abs(determinant) > epsilon:
|
|
195
|
-
# Crosses x axis, determine intersection.
|
|
196
|
-
x = previousPoint[0] - previousPoint[1] * (point[0] - previousPoint[0]) / determinant
|
|
197
|
-
xMin = min(xMin, x)
|
|
198
|
-
xMax = max(xMax, x)
|
|
199
|
-
elif abs(point[1]) < epsilon:
|
|
200
|
-
# Touches at endpoint. (Previous point is checked earlier.)
|
|
201
|
-
xMin = min(xMin, point[0])
|
|
202
|
-
xMax = max(xMax, point[0])
|
|
203
|
-
previousPoint = point
|
|
204
|
-
|
|
205
|
-
if xMin - epsilon > xInterval[1] or xMax + epsilon < xInterval[0]:
|
|
135
|
+
yMinAdjustment = -yBounds[0] if yBounds[0] > 0.0 else 0.0
|
|
136
|
+
yMaxAdjustment = -yBounds[1] if yBounds[1] < 0.0 else 0.0
|
|
137
|
+
yMinAdjustment += yOtherBounds[0]
|
|
138
|
+
yMaxAdjustment += yOtherBounds[1]
|
|
139
|
+
|
|
140
|
+
# Calculate the yMin and yMax arrays corresponding to xData
|
|
141
|
+
yMin = np.min(yData, axis = 0) + yMinAdjustment
|
|
142
|
+
yMax = np.max(yData, axis = 0) + yMaxAdjustment
|
|
143
|
+
|
|
144
|
+
# Initialize lower and upper hulls
|
|
145
|
+
lowerHull = [[xData[0], yMin[0]], [xData[1], yMin[1]]]
|
|
146
|
+
upperHull = [[xData[0], yMax[0]], [xData[1], yMax[1]]]
|
|
147
|
+
|
|
148
|
+
# Add additional lower points one at a time, throwing out intermediates if necessary
|
|
149
|
+
for xNext, yNext in zip(xData[2:], yMin[2:]):
|
|
150
|
+
lowerHull.append([xNext, yNext])
|
|
151
|
+
while len(lowerHull) > 2 and \
|
|
152
|
+
(lowerHull[-2][0] - lowerHull[-3][0]) * (lowerHull[-1][1] - lowerHull[-2][1]) <= \
|
|
153
|
+
(lowerHull[-1][0] - lowerHull[-2][0]) * (lowerHull[-2][1] - lowerHull[-3][1]):
|
|
154
|
+
del lowerHull[-2]
|
|
155
|
+
|
|
156
|
+
# Do the same for the upper points
|
|
157
|
+
for xNext, yNext in zip(xData[2:], yMax[2:]):
|
|
158
|
+
upperHull.append([xNext, yNext])
|
|
159
|
+
while len(upperHull) > 2 and \
|
|
160
|
+
(upperHull[-2][0] - upperHull[-3][0]) * (upperHull[-1][1] - upperHull[-2][1]) >= \
|
|
161
|
+
(upperHull[-1][0] - upperHull[-2][0]) * (upperHull[-2][1] - upperHull[-3][1]):
|
|
162
|
+
del upperHull[-2]
|
|
163
|
+
|
|
164
|
+
# Return the two hulls
|
|
165
|
+
return lowerHull, upperHull
|
|
166
|
+
|
|
167
|
+
def _intersect_convex_hull_with_x_interval(lowerHull, upperHull, epsilon, xInterval):
|
|
168
|
+
xMin = xInterval[0]
|
|
169
|
+
xMax = xInterval[1]
|
|
170
|
+
sign = -1.0
|
|
171
|
+
for hull in [lowerHull, upperHull]:
|
|
172
|
+
sign = -sign
|
|
173
|
+
p0 = hull[0]
|
|
174
|
+
for p1 in hull[1:]:
|
|
175
|
+
yDelta = p0[1] - p1[1]
|
|
176
|
+
if p0[1] * p1[1] <= 0.0 and yDelta != 0.0:
|
|
177
|
+
yDelta = p0[1] - p1[1]
|
|
178
|
+
alpha = p0[1] / yDelta
|
|
179
|
+
xNew = p0[0] * (1.0 - alpha) + p1[0] * alpha
|
|
180
|
+
if sign * yDelta > 0.0:
|
|
181
|
+
xMin = max(xMin, xNew - epsilon)
|
|
182
|
+
else:
|
|
183
|
+
xMax = min(xMax, xNew + epsilon)
|
|
184
|
+
p0 = p1
|
|
185
|
+
if xMin > xMax:
|
|
206
186
|
return None
|
|
207
187
|
else:
|
|
208
|
-
return
|
|
188
|
+
return [xMin, xMax]
|
|
209
189
|
|
|
210
|
-
Interval = namedtuple('Interval', ('block', '
|
|
190
|
+
Interval = namedtuple('Interval', ('block', 'active', 'split', 'scale', 'bounds', 'xLeft', 'xRight', 'epsilon', 'atMachineEpsilon'))
|
|
211
191
|
|
|
212
|
-
def _create_interval(
|
|
192
|
+
def _create_interval(block, active, split, scale, xLeft, xRight, epsilon):
|
|
213
193
|
nDep = 0
|
|
214
|
-
|
|
194
|
+
nInd = len(scale)
|
|
195
|
+
bounds = np.zeros((nInd, 2), scale.dtype)
|
|
215
196
|
newScale = np.empty_like(scale)
|
|
216
197
|
newBlock = []
|
|
217
198
|
for row in block:
|
|
218
199
|
newRow = []
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
for spline in row:
|
|
223
|
-
spline = spline.trim(domain[nInd:nInd + spline.nInd]).reparametrize(((0.0, 1.0),) * spline.nInd)
|
|
200
|
+
# Reparametrize splines and sum bounds
|
|
201
|
+
for map, spline in row:
|
|
202
|
+
spline = spline.reparametrize(((0.0, 1.0),) * spline.nInd)
|
|
224
203
|
bounds[nDep:nDep + spline.nDep] += spline.range_bounds()
|
|
225
|
-
|
|
226
|
-
|
|
204
|
+
newRow.append((map, spline))
|
|
205
|
+
newBlock.append(newRow)
|
|
227
206
|
|
|
228
207
|
# Check row bounds for potential roots.
|
|
229
208
|
for dep in range(spline.nDep):
|
|
@@ -232,30 +211,21 @@ def _create_interval(domain, block, unknowns, scale, slope, intercept, epsilon):
|
|
|
232
211
|
if coefsMax < -epsilon or coefsMin > epsilon:
|
|
233
212
|
# No roots in this interval.
|
|
234
213
|
return None
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
if keepDep:
|
|
251
|
-
# Remove dependent variables that are zero over the domain
|
|
252
|
-
for spline in newRow:
|
|
253
|
-
spline.nDep = len(keepDep)
|
|
254
|
-
spline.coefs = spline.coefs[keepDep]
|
|
255
|
-
|
|
256
|
-
newBlock.append(newRow)
|
|
257
|
-
|
|
258
|
-
return Interval(newBlock, unknowns, newScale[:nDep], bounds, slope, intercept, epsilon, np.dot(slope, slope) < np.finfo(slope.dtype).eps)
|
|
214
|
+
newScale[nDep] = max(-coefsMin, coefsMax)
|
|
215
|
+
# Rescale spline coefficients to max 1.0.
|
|
216
|
+
rescale = 1.0 / max(-bounds[nDep, 0], bounds[nDep, 1])
|
|
217
|
+
for map, spline in newRow:
|
|
218
|
+
spline.coefs[dep] *= rescale
|
|
219
|
+
bounds[nDep] *= rescale
|
|
220
|
+
nDep += 1
|
|
221
|
+
|
|
222
|
+
for iInd in range(nInd):
|
|
223
|
+
newSplit = (split + iInd + 1) % nInd
|
|
224
|
+
if active[newSplit]:
|
|
225
|
+
return Interval(newBlock, active, newSplit, newScale, bounds, xLeft, xRight, epsilon, np.dot(xRight - xLeft, xRight - xLeft) < np.finfo(xLeft.dtype).eps)
|
|
226
|
+
|
|
227
|
+
# No active variables left
|
|
228
|
+
return None
|
|
259
229
|
|
|
260
230
|
# We use multiprocessing.Pool to call this function in parallel, so it cannot be nested and must take a single argument.
|
|
261
231
|
def _refine_projected_polyhedron(interval):
|
|
@@ -264,135 +234,100 @@ def _refine_projected_polyhedron(interval):
|
|
|
264
234
|
roots = []
|
|
265
235
|
intervals = []
|
|
266
236
|
|
|
267
|
-
#
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
for
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
order = spline.order[nInd - rowInd]
|
|
281
|
-
nCoef = spline.nCoef[nInd - rowInd]
|
|
282
|
-
knots = spline.knots[nInd - rowInd]
|
|
283
|
-
# Move independent variable to the last (fastest) axis, adding 1 to account for the dependent variables.
|
|
284
|
-
coefs = np.moveaxis(spline.coefs, nInd - rowInd + 1, -1)
|
|
285
|
-
break
|
|
286
|
-
rowInd += spline.nInd
|
|
237
|
+
# Explore given independent variable to determine a tighter domain around roots.
|
|
238
|
+
xInterval = [0.0, 1.0]
|
|
239
|
+
iInd = interval.split
|
|
240
|
+
nDep = 0
|
|
241
|
+
for row in interval.block:
|
|
242
|
+
order = 0
|
|
243
|
+
for map, spline in row:
|
|
244
|
+
if iInd in map:
|
|
245
|
+
ind = map.index(iInd)
|
|
246
|
+
order = spline.order[ind]
|
|
247
|
+
# Move independent variable to the last (fastest) axis, adding 1 to account for the dependent variables.
|
|
248
|
+
coefs = np.moveaxis(spline.coefs, ind + 1, -1)
|
|
249
|
+
break
|
|
287
250
|
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
251
|
+
# Skip this row if it doesn't contain this independent variable.
|
|
252
|
+
if order < 1:
|
|
253
|
+
nDep += spline.nDep # Assumes there is at least one spline per block row
|
|
254
|
+
continue
|
|
291
255
|
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
256
|
+
# Compute the coefficients for f(x) = x for the independent variable and its knots.
|
|
257
|
+
xData = spline.greville(ind)
|
|
258
|
+
|
|
259
|
+
# Loop through each dependent variable in this row to refine the interval containing the root for this independent variable.
|
|
260
|
+
for yData, ySplineBounds, yBounds in zip(coefs, spline.range_bounds(),
|
|
261
|
+
interval.bounds[nDep:nDep + spline.nDep]):
|
|
262
|
+
# Compute the 2D convex hull of the knot coefficients and the spline's coefficients
|
|
263
|
+
lowerHull, upperHull = _convex_hull_2D(xData, yData.ravel(), yBounds, yBounds - ySplineBounds)
|
|
264
|
+
if lowerHull is None or upperHull is None:
|
|
265
|
+
return roots, intervals
|
|
298
266
|
|
|
299
|
-
#
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
if hull is None:
|
|
304
|
-
return roots, intervals
|
|
305
|
-
|
|
306
|
-
# Intersect the convex hull with the xInterval along the x axis (the knot coefficients axis).
|
|
307
|
-
xInterval = _intersect_convex_hull_with_x_interval(hull, epsilon, xInterval)
|
|
308
|
-
if xInterval is None:
|
|
309
|
-
return roots, intervals
|
|
267
|
+
# Intersect the convex hull with the xInterval along the x axis (the knot coefficients axis).
|
|
268
|
+
xInterval = _intersect_convex_hull_with_x_interval(lowerHull, upperHull, epsilon, xInterval)
|
|
269
|
+
if xInterval is None:
|
|
270
|
+
return roots, intervals
|
|
310
271
|
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
domain.append(xInterval)
|
|
272
|
+
nDep += spline.nDep
|
|
314
273
|
|
|
315
|
-
# Compute new
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
274
|
+
# Compute new interval bounds.
|
|
275
|
+
|
|
276
|
+
xNewLeft = interval.xLeft.copy()
|
|
277
|
+
xNewRight = interval.xRight.copy()
|
|
278
|
+
xNewLeft[iInd] = (1.0 - xInterval[0]) * interval.xLeft[iInd] + xInterval[0] * interval.xRight[iInd]
|
|
279
|
+
xNewRight[iInd] = (1.0 - xInterval[1]) * interval.xLeft[iInd] + xInterval[1] * interval.xRight[iInd]
|
|
280
|
+
newActive = interval.active.copy()
|
|
281
|
+
newActive[iInd] = (xNewRight[iInd] - xNewLeft[iInd] >= epsilon)
|
|
323
282
|
nInd = 0
|
|
324
|
-
for
|
|
325
|
-
|
|
326
|
-
newIntercept[i] = d[0] * interval.slope[i] + interval.intercept[i]
|
|
327
|
-
if newSlope[i] < epsilon:
|
|
328
|
-
uvw.append(0.5 * (d[0] + d[1]))
|
|
329
|
-
newDomain = np.delete(newDomain, nInd, axis=1)
|
|
330
|
-
else:
|
|
331
|
-
newUnknowns.append(i)
|
|
332
|
-
uvw.append(None)
|
|
283
|
+
for active in newActive:
|
|
284
|
+
if active:
|
|
333
285
|
nInd += 1
|
|
334
286
|
|
|
335
|
-
# Iteration is complete if the interval actual width
|
|
287
|
+
# Iteration is complete if the interval actual width is either
|
|
336
288
|
# one iteration past being less than sqrt(machineEpsilon) or there are no remaining independent variables.
|
|
337
289
|
if interval.atMachineEpsilon or nInd == 0:
|
|
338
290
|
# Return the interval center and radius.
|
|
339
|
-
roots.append((
|
|
291
|
+
roots.append((0.5 * (xNewLeft + xNewRight), epsilon))
|
|
340
292
|
return roots, intervals
|
|
341
293
|
|
|
342
|
-
#
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
spline = interval.block[0][0]
|
|
353
|
-
i = newUnknowns[0]
|
|
354
|
-
for root in zeros_using_interval_newton(spline):
|
|
355
|
-
if not isinstance(root, tuple):
|
|
356
|
-
root = (root, root)
|
|
357
|
-
w = root[1] - root[0]
|
|
358
|
-
slope = newSlope.copy()
|
|
359
|
-
intercept = newIntercept.copy()
|
|
360
|
-
slope[i] = w * interval.slope[i]
|
|
361
|
-
intercept[i] = root[0] * interval.slope[i] + interval.intercept[i]
|
|
362
|
-
# Return the interval center and radius.
|
|
363
|
-
roots.append((intercept + 0.5 * slope, epsilon))
|
|
364
|
-
|
|
365
|
-
return roots, intervals
|
|
294
|
+
# Split domain if not sufficient decrease in width
|
|
295
|
+
width = xInterval[1] - xInterval[0]
|
|
296
|
+
domains = [xInterval]
|
|
297
|
+
if width > Crit:
|
|
298
|
+
# Didn't get the required decrease in width, so split the domain.
|
|
299
|
+
leftDomain = xInterval
|
|
300
|
+
rightDomain = xInterval.copy()
|
|
301
|
+
leftDomain[1] = 0.5 * (leftDomain[0] + leftDomain[1])
|
|
302
|
+
rightDomain[0] = leftDomain[1]
|
|
303
|
+
domains = [leftDomain, rightDomain]
|
|
366
304
|
|
|
367
|
-
# Split domain in dimensions that aren't decreasing in width sufficiently.
|
|
368
|
-
width = newDomain[1] - newDomain[0]
|
|
369
|
-
domains = [newDomain]
|
|
370
|
-
for nInd, w in enumerate(width):
|
|
371
|
-
if w > Crit:
|
|
372
|
-
# Didn't get the required decrease in width, so split the domain.
|
|
373
|
-
domainCount = len(domains) # Cache the domain list size, since we're increasing it mid loop
|
|
374
|
-
w *= 0.5 # Halve the domain width for this independent variable
|
|
375
|
-
for i in range(domainCount):
|
|
376
|
-
leftDomain = domains[i]
|
|
377
|
-
rightDomain = leftDomain.copy()
|
|
378
|
-
leftDomain[1][nInd] -= w # Alters domain in domains list
|
|
379
|
-
rightDomain[0][nInd] += w
|
|
380
|
-
domains.append(rightDomain)
|
|
381
|
-
|
|
382
305
|
# Add new intervals to interval stack.
|
|
383
306
|
for domain in domains:
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
|
|
307
|
+
xSplitLeft = xNewLeft.copy()
|
|
308
|
+
xSplitRight = xNewRight.copy()
|
|
309
|
+
xSplitLeft[iInd] = (1.0 - domain[0]) * interval.xLeft[iInd] + domain[0] * interval.xRight[iInd]
|
|
310
|
+
xSplitRight[iInd] = (1.0 - domain[1]) * interval.xLeft[iInd] + domain[1] * interval.xRight[iInd]
|
|
311
|
+
newBlock = []
|
|
312
|
+
for row in interval.block:
|
|
313
|
+
newRow = []
|
|
314
|
+
# Trim splines
|
|
315
|
+
for map, spline in row:
|
|
316
|
+
trimRegion = [(0.0, 1.0) for i in range(spline.nInd)]
|
|
317
|
+
if iInd in map:
|
|
318
|
+
ind = map.index(iInd)
|
|
319
|
+
trimRegion[ind] = domain
|
|
320
|
+
spline = spline.trim(trimRegion)
|
|
321
|
+
newRow.append((map, spline))
|
|
322
|
+
newBlock.append(newRow)
|
|
323
|
+
newInterval = _create_interval(newBlock, newActive, iInd,
|
|
324
|
+
interval.scale, xSplitLeft, xSplitRight, epsilon)
|
|
391
325
|
if newInterval:
|
|
392
326
|
if newInterval.block:
|
|
393
327
|
intervals.append(newInterval)
|
|
394
328
|
else:
|
|
395
|
-
roots.append((
|
|
329
|
+
roots.append((0.5 * (newInterval.xLeft + newInterval.xRight),
|
|
330
|
+
0.5 * np.linalg.norm(newInterval.xRight - newInterval.xLeft)))
|
|
396
331
|
|
|
397
332
|
return roots, intervals
|
|
398
333
|
|
|
@@ -417,27 +352,22 @@ def zeros_using_projected_polyhedron(self, epsilon=None, initialScale=None):
|
|
|
417
352
|
# Set initial interval.
|
|
418
353
|
domain = self.domain().T
|
|
419
354
|
initialScale = np.full(self.nDep, 1.0, self.coefsDtype) if initialScale is None else np.array(initialScale, self.coefsDtype)
|
|
420
|
-
newInterval = _create_interval(
|
|
355
|
+
newInterval = _create_interval(self.block, self.nInd * [True], -1, initialScale,
|
|
356
|
+
domain[0], domain[1], epsilon)
|
|
421
357
|
if newInterval:
|
|
422
358
|
if newInterval.block:
|
|
423
359
|
intervals.append(newInterval)
|
|
424
360
|
else:
|
|
425
|
-
roots.append(
|
|
426
|
-
|
|
427
|
-
#pool = Pool() # Pool size matches CPU count
|
|
361
|
+
roots.append(0.5 * (newInterval.xLeft + newInterval.xRight),
|
|
362
|
+
0.5 * np.linalg.norm(newInterval.xRight - newInterval.xLeft))
|
|
428
363
|
|
|
429
364
|
# Refine all the intervals, collecting roots as we go.
|
|
430
365
|
while intervals:
|
|
431
|
-
|
|
432
|
-
|
|
433
|
-
|
|
434
|
-
|
|
435
|
-
|
|
436
|
-
else:
|
|
437
|
-
for (newRoots, newIntervals) in map(_refine_projected_polyhedron, intervals):
|
|
438
|
-
roots += newRoots
|
|
439
|
-
nextIntervals += newIntervals
|
|
440
|
-
intervals = nextIntervals
|
|
366
|
+
interval = intervals.pop()
|
|
367
|
+
newRoots, newIntervals = _refine_projected_polyhedron(interval)
|
|
368
|
+
roots += newRoots
|
|
369
|
+
newIntervals.reverse()
|
|
370
|
+
intervals += newIntervals
|
|
441
371
|
|
|
442
372
|
# Combine overlapping roots into regions.
|
|
443
373
|
regions = []
|
|
@@ -446,17 +376,33 @@ def zeros_using_projected_polyhedron(self, epsilon=None, initialScale=None):
|
|
|
446
376
|
rootCenter = root[0]
|
|
447
377
|
rootRadius = root[1]
|
|
448
378
|
|
|
449
|
-
#
|
|
379
|
+
# Take one Newton step on each root
|
|
450
380
|
value = self.evaluate(rootCenter)
|
|
451
|
-
|
|
381
|
+
residualNorm = np.linalg.norm(value)
|
|
382
|
+
try:
|
|
383
|
+
update = np.linalg.solve(self.jacobian(rootCenter), value)
|
|
384
|
+
if np.linalg.norm(update) < rootRadius:
|
|
385
|
+
rootCenter -= update
|
|
386
|
+
except:
|
|
387
|
+
pass
|
|
388
|
+
|
|
389
|
+
# Project back onto spline domain
|
|
390
|
+
selfDomain = self.domain()
|
|
391
|
+
rootCenter = np.maximum(np.minimum(rootCenter, selfDomain.T[1]), selfDomain.T[0])
|
|
392
|
+
value = self.evaluate(rootCenter)
|
|
393
|
+
newResidualNorm = np.linalg.norm(value)
|
|
394
|
+
rootRadius *= newResidualNorm / residualNorm
|
|
395
|
+
residualNorm = newResidualNorm
|
|
396
|
+
|
|
397
|
+
# Ensure we have a real root (not a boundary special case).
|
|
398
|
+
if residualNorm >= evaluationEpsilon:
|
|
452
399
|
continue
|
|
453
400
|
|
|
454
401
|
# Expand the radius of the root based on the approximate distance from the center needed
|
|
455
402
|
# to raise the value of the spline above evaluationEpsilon.
|
|
456
|
-
|
|
457
|
-
|
|
458
|
-
|
|
459
|
-
rootRadius = max(rootRadius, evaluationEpsilon / minEigenvalue)
|
|
403
|
+
minSingularValue = np.linalg.svd(self.jacobian(rootCenter), False, False)[-1]
|
|
404
|
+
if minSingularValue > epsilon:
|
|
405
|
+
rootRadius = max(rootRadius, evaluationEpsilon / minSingularValue)
|
|
460
406
|
|
|
461
407
|
# Intersect this root with the existing regions, expanding and combining them as appropriate.
|
|
462
408
|
firstRegion = None
|
|
@@ -490,6 +436,34 @@ def zeros_using_projected_polyhedron(self, epsilon=None, initialScale=None):
|
|
|
490
436
|
|
|
491
437
|
return roots
|
|
492
438
|
|
|
439
|
+
def _turning_point_determinant(self, uvw, cosTheta, sinTheta):
|
|
440
|
+
sign = -1 if hasattr(self, "metadata") and self.metadata.get("flipNormal", False) else 1
|
|
441
|
+
tangentSpace = self.jacobian(uvw).T
|
|
442
|
+
return cosTheta * sign * np.linalg.det(tangentSpace[[j for j in range(self.nInd) if j != 0]]) - \
|
|
443
|
+
sinTheta * sign * np.linalg.det(tangentSpace[[j for j in range(self.nInd) if j != 1]])
|
|
444
|
+
|
|
445
|
+
def _turning_point_determinant_gradient(self, uvw, cosTheta, sinTheta):
|
|
446
|
+
dtype = self.coefs.dtype if hasattr(self, "coefs") else self.coefsDtype
|
|
447
|
+
gradient = np.zeros(self.nInd, dtype)
|
|
448
|
+
|
|
449
|
+
sign = -1 if hasattr(self, "metadata") and self.metadata.get("flipNormal", False) else 1
|
|
450
|
+
tangentSpace = self.jacobian(uvw).T
|
|
451
|
+
dTangentSpace = tangentSpace.copy()
|
|
452
|
+
|
|
453
|
+
wrt = [0] * self.nInd
|
|
454
|
+
for i in range(self.nInd):
|
|
455
|
+
wrt[i] = 1
|
|
456
|
+
for j in range(self.nInd):
|
|
457
|
+
wrt[j] = 1 if i != j else 2
|
|
458
|
+
dTangentSpace[j, :] = self.derivative(wrt, uvw) # tangentSpace and dTangentSpace are the transpose of the jacobian
|
|
459
|
+
gradient[i] += cosTheta * sign * np.linalg.det(dTangentSpace[[k for k in range(self.nInd) if k != 0]]) - \
|
|
460
|
+
sinTheta * sign * np.linalg.det(dTangentSpace[[k for k in range(self.nInd) if k != 1]])
|
|
461
|
+
dTangentSpace[j, :] = tangentSpace[j, :] # tangentSpace and dTangentSpace are the transpose of the jacobian
|
|
462
|
+
wrt[j] = 0 if i != j else 1
|
|
463
|
+
wrt[i] = 0
|
|
464
|
+
|
|
465
|
+
return gradient
|
|
466
|
+
|
|
493
467
|
def _contours_of_C1_spline_block(self, epsilon, evaluationEpsilon):
|
|
494
468
|
Point = namedtuple('Point', ('d', 'det', 'onUVBoundary', 'turningPoint', 'uvw'))
|
|
495
469
|
|
|
@@ -505,18 +479,14 @@ def _contours_of_C1_spline_block(self, epsilon, evaluationEpsilon):
|
|
|
505
479
|
self = self.reparametrize(((0.0, 1.0),) * self.nInd)
|
|
506
480
|
|
|
507
481
|
# Rescale self in all dimensions.
|
|
508
|
-
nDep = 0
|
|
509
482
|
initialScale = np.max(np.abs(bounds), axis=1)
|
|
510
483
|
rescale = np.reciprocal(initialScale)
|
|
484
|
+
nDep = 0
|
|
511
485
|
for row in self.block:
|
|
512
|
-
|
|
513
|
-
|
|
514
|
-
for coefs, scale in zip(spline.coefs, rescale):
|
|
486
|
+
for map, spline in row:
|
|
487
|
+
for coefs, scale in zip(spline.coefs, rescale[nDep:nDep + spline.nDep]):
|
|
515
488
|
coefs *= scale
|
|
516
489
|
nDep += spline.nDep
|
|
517
|
-
|
|
518
|
-
# Construct self's normal.
|
|
519
|
-
normal = self.normal_spline((0, 1)) # We only need the first two indices
|
|
520
490
|
|
|
521
491
|
# Try arbitrary values for theta between [0, pi/2] that are unlikely to be a stationary points.
|
|
522
492
|
for theta in (1.0 / np.sqrt(2), np.pi / 6.0, 1.0/ np.e):
|
|
@@ -525,9 +495,6 @@ def _contours_of_C1_spline_block(self, epsilon, evaluationEpsilon):
|
|
|
525
495
|
sinTheta = np.sin(theta)
|
|
526
496
|
abort = False
|
|
527
497
|
|
|
528
|
-
# Construct the turning point determinant.
|
|
529
|
-
turningPointDeterminant = normal.dot((cosTheta, sinTheta))
|
|
530
|
-
|
|
531
498
|
# Find intersections with u and v boundaries.
|
|
532
499
|
def uvIntersections(nInd, boundary):
|
|
533
500
|
zeros = self.contract([None] * nInd + [boundary] + [None] * (self.nInd - nInd - 1)).zeros(epsilon, initialScale)
|
|
@@ -538,8 +505,8 @@ def _contours_of_C1_spline_block(self, epsilon, evaluationEpsilon):
|
|
|
538
505
|
break
|
|
539
506
|
uvw = np.insert(np.array(zero), nInd, boundary)
|
|
540
507
|
d = uvw[0] * cosTheta + uvw[1] * sinTheta
|
|
541
|
-
n = normal(uvw)
|
|
542
|
-
tpd =
|
|
508
|
+
n = self.normal(uvw, False, (0, 1))
|
|
509
|
+
tpd = _turning_point_determinant(self, uvw, cosTheta, sinTheta)
|
|
543
510
|
det = (0.5 - boundary) * n[nInd] * tpd
|
|
544
511
|
if abs(det) < epsilon:
|
|
545
512
|
abort = True
|
|
@@ -603,21 +570,79 @@ def _contours_of_C1_spline_block(self, epsilon, evaluationEpsilon):
|
|
|
603
570
|
continue # Try a different theta
|
|
604
571
|
|
|
605
572
|
# Find turning points by combining self and turningPointDeterminant into a system and processing its zeros.
|
|
573
|
+
|
|
574
|
+
# First, add the null space constraint to the system: dot(self's gradient, (r * sinTheta, -r * cosTheta, c, d, ...) = 0.
|
|
575
|
+
# This introduces self.nInd - 1 new independent variables: r, c, d, ...
|
|
606
576
|
turningPointBlock = self.block.copy()
|
|
607
|
-
|
|
608
|
-
|
|
577
|
+
if self.nInd > 2:
|
|
578
|
+
rSpline = bspy.Spline(1, 1, (2,), (2,), ((0.0, 0.0, 1.0, 1.0),), ((0.0, 1.0),))
|
|
579
|
+
else:
|
|
580
|
+
rSpline = bspy.Spline.point([1.0])
|
|
581
|
+
otherSpline = bspy.Spline(1, 1, (2,), (2,), ((-1.0, -1.0, 1.0, 1.0),), ((-1.0, 1.0),))
|
|
582
|
+
# Track indices of other independent variables (c, d, ...).
|
|
583
|
+
otherNInd = self.nInd + 1 # Add one since r is always the first new variable (index for r is self.nInd)
|
|
584
|
+
otherDictionary = {}
|
|
585
|
+
# Go through each row building the null space constraint.
|
|
586
|
+
for row in self.block:
|
|
587
|
+
newRow = []
|
|
588
|
+
for map, spline in row:
|
|
589
|
+
newSpline = None # The spline's portion of the null space constraint starts with None
|
|
590
|
+
newMap = map.copy() # The map for spline's contribution to the null space constraint starts with its existing map
|
|
591
|
+
# Create addition indMap with existing independent variables for use in summing the dot product.
|
|
592
|
+
indMapForAdd = [(index, index) for index in range(spline.nInd)]
|
|
593
|
+
rIndex = None # Index of r in newSpline, which we need to track since rSpline may be added twice
|
|
594
|
+
|
|
595
|
+
# Add each term of spline's contribution to dot(self's gradient, (r * sinTheta, -r * cosTheta, c, d, ...).
|
|
596
|
+
for i in range(spline.nInd):
|
|
597
|
+
dSpline = spline.differentiate(i)
|
|
598
|
+
nInd = map[i]
|
|
599
|
+
if nInd < 2:
|
|
600
|
+
factor = sinTheta if nInd == 0 else -cosTheta
|
|
601
|
+
term = dSpline.multiply(factor * rSpline)
|
|
602
|
+
if rIndex is None:
|
|
603
|
+
# Adding rSpline for the first time, so add r to newMap and track its index.
|
|
604
|
+
newMap.append(self.nInd)
|
|
605
|
+
newSpline = term if newSpline is None else newSpline.add(term, indMapForAdd)
|
|
606
|
+
rIndex = newSpline.nInd - 1
|
|
607
|
+
else:
|
|
608
|
+
# The same rSpline is being added again, so enhance the indMapForAdd to associate the two rSplines.
|
|
609
|
+
newSpline = newSpline.add(term, indMapForAdd + [(rIndex, term.nInd - 1)])
|
|
610
|
+
else:
|
|
611
|
+
if nInd not in otherDictionary:
|
|
612
|
+
otherDictionary[nInd] = otherNInd
|
|
613
|
+
otherNInd += 1
|
|
614
|
+
newMap.append(otherDictionary[nInd])
|
|
615
|
+
term = dSpline.multiply(otherSpline)
|
|
616
|
+
newSpline = term if newSpline is None else newSpline.add(term, indMapForAdd)
|
|
617
|
+
|
|
618
|
+
newMap = newMap[:newSpline.nInd]
|
|
619
|
+
newRow.append((newMap, newSpline))
|
|
620
|
+
turningPointBlock.append(newRow)
|
|
621
|
+
|
|
622
|
+
# Second, add unit vector constrain to the system.
|
|
623
|
+
# r^2 + c^2 + d^2 + ... = 1
|
|
624
|
+
rSquaredMinus1 = bspy.Spline(1, 1, (3,), (3,), ((0.0, 0.0, 0.0, 1.0, 1.0, 1.0),), ((-1.0, -1.0, 0.0),))
|
|
625
|
+
otherSquared = bspy.Spline(1, 1, (3,), (3,), ((-1.0, -1.0, -1.0, 1.0, 1.0, 1.0),), ((1.0, -1.0, 1.0),))
|
|
626
|
+
newRow = [((self.nInd,), rSquaredMinus1)]
|
|
627
|
+
assert otherNInd == 2 * self.nInd - 1
|
|
628
|
+
for nInd in range(self.nInd + 1, otherNInd):
|
|
629
|
+
newRow.append(((nInd,), otherSquared))
|
|
630
|
+
if self.nInd > 2:
|
|
631
|
+
turningPointBlock.append(newRow)
|
|
632
|
+
if self.nDep > 1:
|
|
633
|
+
turningPointInitialScale = np.append(initialScale, (1.0,) * (self.nDep + 1))
|
|
634
|
+
else:
|
|
635
|
+
turningPointInitialScale = np.append(initialScale, (1.0,))
|
|
636
|
+
|
|
637
|
+
# Finally, find the zeros of the system (only the first self.nInd values are of interest).
|
|
638
|
+
zeros = bspy.spline_block.SplineBlock(turningPointBlock).zeros(epsilon, turningPointInitialScale)
|
|
609
639
|
for uvw in zeros:
|
|
610
640
|
if isinstance(uvw, tuple):
|
|
611
641
|
abort = True
|
|
612
642
|
break
|
|
643
|
+
uvw = uvw[:self.nInd] # Remove any new independent variables added by the turning point system
|
|
613
644
|
d = uvw[0] * cosTheta + uvw[1] * sinTheta
|
|
614
|
-
|
|
615
|
-
wrt = [0] * self.nInd
|
|
616
|
-
det = 0.0
|
|
617
|
-
for nInd in range(self.nInd):
|
|
618
|
-
wrt[nInd] = 1
|
|
619
|
-
det += turningPointDeterminant.derivative(wrt, uvw) * n[nInd]
|
|
620
|
-
wrt[nInd] = 0
|
|
645
|
+
det = np.dot(self.normal(uvw, False), _turning_point_determinant_gradient(self, uvw, cosTheta, sinTheta))
|
|
621
646
|
if abs(det) < epsilon:
|
|
622
647
|
abort = True
|
|
623
648
|
break
|
|
@@ -883,22 +908,9 @@ def contours(self):
|
|
|
883
908
|
evaluationEpsilon = max(np.sqrt(epsilon), np.finfo(self.coefsDtype).eps ** 0.25)
|
|
884
909
|
|
|
885
910
|
# Split the splines in the block to ensure C1 continuity within each block
|
|
886
|
-
blocks =
|
|
887
|
-
for i, row in enumerate(self.block):
|
|
888
|
-
for j, spline in enumerate(row):
|
|
889
|
-
splines = spline.split(minContinuity = 1)
|
|
890
|
-
if splines.size == 1 and self.size == 1:
|
|
891
|
-
break # Special case of a block with one C1 spline
|
|
892
|
-
newBlocks = []
|
|
893
|
-
for spline in splines.ravel():
|
|
894
|
-
for block in blocks:
|
|
895
|
-
newBlock = block.block.copy()
|
|
896
|
-
newRow = newBlock[i].copy()
|
|
897
|
-
newBlock[i] = newRow
|
|
898
|
-
newRow[j] = spline
|
|
899
|
-
newBlocks.append(bspy.spline_block.SplineBlock(newBlock))
|
|
900
|
-
blocks = newBlocks
|
|
911
|
+
blocks = self.split(minContinuity=1).ravel()
|
|
901
912
|
|
|
913
|
+
# For each block, find its contours and join them to the contours from previous blocks.
|
|
902
914
|
contours = []
|
|
903
915
|
for block in blocks:
|
|
904
916
|
splineContours = _contours_of_C1_spline_block(block, epsilon, evaluationEpsilon)
|
|
@@ -946,6 +958,8 @@ def intersect(self, other):
|
|
|
946
958
|
zeros = spline.zeros()
|
|
947
959
|
# Convert each intersection point into a Manifold.Crossing and each intersection interval into a Manifold.Coincidence.
|
|
948
960
|
for zero in zeros:
|
|
961
|
+
if isinstance(zero, tuple) and zero[1] - zero[0] < Manifold.minSeparation:
|
|
962
|
+
zero = 0.5 * (zero[0] + zero[1])
|
|
949
963
|
if isinstance(zero, tuple):
|
|
950
964
|
# Intersection is an interval, so create a Manifold.Coincidence.
|
|
951
965
|
planeBounds = (projection @ (self((zero[0],)) - other._point), projection @ (self((zero[1],)) - other._point))
|
|
@@ -1007,6 +1021,8 @@ def intersect(self, other):
|
|
|
1007
1021
|
zeros = block.zeros()
|
|
1008
1022
|
# Convert each intersection point into a Manifold.Crossing and each intersection interval into a Manifold.Coincidence.
|
|
1009
1023
|
for zero in zeros:
|
|
1024
|
+
if isinstance(zero, tuple) and zero[1] - zero[0] < Manifold.minSeparation:
|
|
1025
|
+
zero = 0.5 * (zero[0] + zero[1])
|
|
1010
1026
|
if isinstance(zero, tuple):
|
|
1011
1027
|
# Intersection is an interval, so create a Manifold.Coincidence.
|
|
1012
1028
|
|
|
@@ -1123,14 +1139,14 @@ def complete_slice(self, slice, solid):
|
|
|
1123
1139
|
newBoundary.touched = False
|
|
1124
1140
|
|
|
1125
1141
|
# Define function for adding slice points to full domain boundaries.
|
|
1126
|
-
def process_domain_point(boundary, domainPoint):
|
|
1142
|
+
def process_domain_point(boundary, domainPoint, adjustment):
|
|
1127
1143
|
point = boundary.manifold.evaluate(domainPoint)
|
|
1128
1144
|
# See if and where point touches full domain.
|
|
1129
1145
|
for newBoundary in fullDomain.boundaries:
|
|
1130
1146
|
vector = point - newBoundary.manifold._point
|
|
1131
1147
|
if abs(np.dot(newBoundary.manifold._normal, vector)) < Manifold.minSeparation:
|
|
1132
|
-
# Add the point onto the new boundary.
|
|
1133
|
-
normal = np.sign(newBoundary.manifold._tangentSpace.T @ boundary.manifold.normal(domainPoint))
|
|
1148
|
+
# Add the point onto the new boundary (adjust normal evaluation point to move away from boundary).
|
|
1149
|
+
normal = np.sign(newBoundary.manifold._tangentSpace.T @ boundary.manifold.normal(domainPoint + adjustment))
|
|
1134
1150
|
newBoundary.domain.add_boundary(Boundary(Hyperplane(normal, newBoundary.manifold._tangentSpace.T @ vector, 0.0), Solid(0, True)))
|
|
1135
1151
|
newBoundary.touched = True
|
|
1136
1152
|
break
|
|
@@ -1139,9 +1155,9 @@ def complete_slice(self, slice, solid):
|
|
|
1139
1155
|
for boundary in slice.boundaries:
|
|
1140
1156
|
domainBoundaries = boundary.domain.boundaries
|
|
1141
1157
|
domainBoundaries.sort(key=lambda boundary: (boundary.manifold.evaluate(0.0), boundary.manifold.normal(0.0)))
|
|
1142
|
-
process_domain_point(boundary, domainBoundaries[0].manifold._point)
|
|
1158
|
+
process_domain_point(boundary, domainBoundaries[0].manifold._point, Manifold.minSeparation)
|
|
1143
1159
|
if len(domainBoundaries) > 1:
|
|
1144
|
-
process_domain_point(boundary, domainBoundaries[-1].manifold._point)
|
|
1160
|
+
process_domain_point(boundary, domainBoundaries[-1].manifold._point, -Manifold.minSeparation)
|
|
1145
1161
|
|
|
1146
1162
|
# For touched boundaries, remove domain bounds that aren't needed and then add boundary to slice.
|
|
1147
1163
|
boundaryWasTouched = False
|
|
@@ -1149,7 +1165,6 @@ def complete_slice(self, slice, solid):
|
|
|
1149
1165
|
if newBoundary.touched:
|
|
1150
1166
|
boundaryWasTouched = True
|
|
1151
1167
|
domainBoundaries = newBoundary.domain.boundaries
|
|
1152
|
-
assert len(domainBoundaries) > 2
|
|
1153
1168
|
domainBoundaries.sort(key=lambda boundary: (boundary.manifold.evaluate(0.0), boundary.manifold.normal(0.0)))
|
|
1154
1169
|
# Ensure domain endpoints don't overlap and their normals are consistent.
|
|
1155
1170
|
if abs(domainBoundaries[0].manifold._point - domainBoundaries[1].manifold._point) < Manifold.minSeparation or \
|