bspy 1.5.0__py3-none-any.whl → 2.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- bspy/_spline_domain.py +88 -159
- bspy/_spline_evaluation.py +15 -0
- bspy/_spline_fitting.py +203 -163
- bspy/_spline_operations.py +40 -13
- bspy/spline.py +238 -96
- {bspy-1.5.0.dist-info → bspy-2.1.0.dist-info}/METADATA +4 -3
- bspy-2.1.0.dist-info/RECORD +15 -0
- {bspy-1.5.0.dist-info → bspy-2.1.0.dist-info}/WHEEL +1 -1
- bspy-1.5.0.dist-info/RECORD +0 -15
- {bspy-1.5.0.dist-info → bspy-2.1.0.dist-info}/LICENSE +0 -0
- {bspy-1.5.0.dist-info → bspy-2.1.0.dist-info}/top_level.txt +0 -0
bspy/_spline_fitting.py
CHANGED
|
@@ -2,170 +2,56 @@ import numpy as np
|
|
|
2
2
|
import bspy.spline
|
|
3
3
|
import math
|
|
4
4
|
|
|
5
|
-
def
|
|
6
|
-
if
|
|
7
|
-
|
|
8
|
-
if
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
for
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
w = np.empty((nCf,), float)
|
|
50
|
-
for i in range(nCf):
|
|
51
|
-
d += inc
|
|
52
|
-
high = int(d + 0.5 + 1) # Paper's algorithm sets high to d + 0.5, but only references high + 1
|
|
53
|
-
w[i] = np.mean(knots[low:high])
|
|
54
|
-
low = high
|
|
55
|
-
for i in range(1, nCf - degree):
|
|
56
|
-
newKnots.append(np.mean(w[i:i + degree]))
|
|
57
|
-
newKnots += [knots[-1]] * ord
|
|
58
|
-
newKnotList.append(np.array(newKnots, knots.dtype))
|
|
59
|
-
knotList = newKnotList
|
|
60
|
-
else:
|
|
61
|
-
if not(len(knotList) == nInd): raise ValueError("len(knots) != nInd") # The documented interface uses the argument 'knots' instead of 'knotList'
|
|
62
|
-
nCoef = [len(knotList[i]) - order[i] for i in range(nInd)]
|
|
63
|
-
totalCoef = 1
|
|
64
|
-
newKnotList = []
|
|
65
|
-
for knots, ord, nCf in zip(knotList, order, nCoef):
|
|
66
|
-
for i in range(nCf):
|
|
67
|
-
if not(knots[i] <= knots[i + 1] and knots[i] < knots[i + ord]): raise ValueError("Improperly ordered knot sequence")
|
|
68
|
-
totalCoef *= nCf
|
|
69
|
-
newKnotList.append(np.array(knots))
|
|
70
|
-
if not(totalCoef <= totalDataPoints): raise ValueError(f"Insufficient number of data points. You need at least {totalCoef}.")
|
|
71
|
-
knotList = newKnotList
|
|
72
|
-
|
|
73
|
-
# Initialize A and b from the likely overdetermined equation, A x = b, where A contains the bspline values at the independent variables,
|
|
74
|
-
# b contains point values for the dependent variables, and the x contains the desired coefficients.
|
|
75
|
-
A = np.zeros((totalDataPoints, totalCoef), type(dataPoints[0][0]))
|
|
76
|
-
b = np.empty((totalDataPoints, nDep), A.dtype)
|
|
77
|
-
|
|
78
|
-
# Fill in the bspline values in A and the dependent point values in b at row at a time.
|
|
79
|
-
# Note that if a data point also specifies first derivatives, it fills out nInd + 1 rows (the point and its derivatives).
|
|
80
|
-
row = 0
|
|
81
|
-
for point in dataPoints:
|
|
82
|
-
hasDerivatives = len(point) == nInd + nDep * (nInd + 1)
|
|
83
|
-
|
|
84
|
-
# Compute the bspline values (and their first derivatives as needed).
|
|
85
|
-
bValueData = []
|
|
86
|
-
for knots, ord, nCf, u in zip(knotList, order, nCoef, point[:nInd]):
|
|
87
|
-
ix = np.searchsorted(knots, u, 'right')
|
|
88
|
-
ix = min(ix, nCf)
|
|
89
|
-
bValueData.append((ix, bspy.Spline.bspline_values(ix, knots, ord, u), \
|
|
90
|
-
bspy.Spline.bspline_values(ix, knots, ord, u, 1) if hasDerivatives else None))
|
|
91
|
-
|
|
92
|
-
# Compute the values for the A array.
|
|
93
|
-
# It's a little tricky because we have to multiply nInd different bspline arrays of different sizes
|
|
94
|
-
# and index into flattened A array. The solution is to loop through the total number of entries
|
|
95
|
-
# being changed (totalOrder), and compute the array indices via mods and multiplies.
|
|
96
|
-
indices = [0] * nInd
|
|
97
|
-
for i in range(totalOrder):
|
|
98
|
-
column = 0
|
|
99
|
-
bValues = np.ones((nInd + 1,), A.dtype)
|
|
100
|
-
for j, ord, nCf, index, (ix, values, dValues) in zip(range(1, nInd + 1), order, nCoef, indices, bValueData):
|
|
101
|
-
column = column * nCf + ix - ord + index
|
|
102
|
-
# Compute the bspline value for this specific element of A.
|
|
103
|
-
bValues[0] *= values[index]
|
|
104
|
-
if hasDerivatives:
|
|
105
|
-
# Compute the first derivative values for each independent variable.
|
|
106
|
-
for k in range(1, nInd + 1):
|
|
107
|
-
bValues[k] *= dValues[index] if k == j else values[index]
|
|
108
|
-
|
|
109
|
-
# Assign all the values and derivatives.
|
|
110
|
-
A[row, column] = bValues[0]
|
|
111
|
-
if hasDerivatives:
|
|
112
|
-
for k in range(1, nInd + 1):
|
|
113
|
-
A[row + k, column] = bValues[k]
|
|
114
|
-
|
|
115
|
-
# Increment the bspline indices.
|
|
116
|
-
for j in range(nInd - 1, -1, -1):
|
|
117
|
-
indices[j] = (indices[j] + 1) % order[j]
|
|
118
|
-
if indices[j] > 0:
|
|
119
|
-
break
|
|
120
|
-
|
|
121
|
-
# Assign values for the b array.
|
|
122
|
-
b[row, :] = point[nInd:nInd + nDep]
|
|
123
|
-
if hasDerivatives:
|
|
124
|
-
for k in range(1, nInd + 1):
|
|
125
|
-
b[row + k, :] = point[nInd + nDep * k:nInd + nDep * (k + 1)]
|
|
126
|
-
|
|
127
|
-
# Increment the row before filling in the next data point
|
|
128
|
-
row += nInd + 1 if hasDerivatives else 1
|
|
129
|
-
|
|
130
|
-
# Yay, the A and b arrays are ready to solve.
|
|
131
|
-
# Now, we call numpy's least squares solver.
|
|
132
|
-
coefs, residuals, rank, s = np.linalg.lstsq(A, b, rcond=None)
|
|
133
|
-
|
|
134
|
-
# Reshape the coefs array to match nCoef (un-flatten) and move the dependent variables to the front.
|
|
135
|
-
coefs = np.moveaxis(coefs.reshape((*nCoef, nDep)), -1, 0)
|
|
136
|
-
|
|
137
|
-
# Return the resulting spline, computing the accuracy based on system epsilon and the norm of the residuals.
|
|
138
|
-
maxError = np.finfo(coefs.dtype).eps
|
|
139
|
-
if residuals.size > 0:
|
|
140
|
-
maxError = max(maxError, residuals.sum())
|
|
141
|
-
return bspy.Spline(nInd, nDep, order, nCoef, knotList, coefs, np.sqrt(maxError), metadata)
|
|
142
|
-
|
|
143
|
-
# From Lowan, Arnold N., Norman Davids, and Arthur Levenson. "Table of the zeros of the Legendre polynomials of
|
|
144
|
-
# order 1-16 and the weight coefficients for Gauss' mechanical quadrature formula." (1942): 739-743.
|
|
145
|
-
_legendre_polynomial_zeros = [
|
|
146
|
-
[0.000000000000000],
|
|
147
|
-
[0.577350269189626],
|
|
148
|
-
[0.000000000000000,0.774596669241483],
|
|
149
|
-
[0.339981043584856,0.861136311594053],
|
|
150
|
-
[0.000000000000000,0.538469310105683,0.906179845938664],
|
|
151
|
-
[0.238619186083197,0.661209386466265,0.932469514203152],
|
|
152
|
-
[0.000000000000000,0.405845151377397,0.741531185599394,0.949107912342759],
|
|
153
|
-
[0.183434642495650,0.525532409916329,0.796666477413627,0.960289856497536],
|
|
154
|
-
[0.000000000000000,0.324253423403809,0.613371432700590,0.836031107326636,0.968160239507626],
|
|
155
|
-
[0.148874338981631,0.433395394129247,0.679409568299024,0.865063366688985,0.973906528517172],
|
|
156
|
-
[0.000000000000000,0.269543155952345,0.519096129110681,0.730152005574049,0.887062599768095,0.978228658146057],
|
|
157
|
-
[0.125333408511469,0.367831498918180,0.587317954286617,0.769902674194305,0.904117256370475,0.981560634246719],
|
|
158
|
-
[0.000000000000000,0.230458315955135,0.448492751036447,0.642349339440340,0.801578090733310,0.917598399222978,0.984183054718588],
|
|
159
|
-
[0.108054948707344,0.319112368927890,0.515248636358154,0.687292904811685,0.827201315069765,0.928434883663574,0.986283808696812],
|
|
160
|
-
[0.000000000000000,0.201194093997435,0.394151347077563,0.570972172608539,0.724417731360170,0.848206583410427,0.937273392400706,0.987992518020485],
|
|
161
|
-
[0.095012509837637,0.281603550779259,0.458016777657227,0.617876244402644,0.755404408355003,0.865631202387832,0.944575023073233,0.989400934991650],
|
|
162
|
-
]
|
|
5
|
+
def circular_arc(radius, angle, tolerance = None):
|
|
6
|
+
if tolerance is None:
|
|
7
|
+
tolerance = np.finfo(float).eps
|
|
8
|
+
if radius < 0.0 or angle < 0.0 or tolerance < 0.0: raise ValueError("The radius, angle, and tolerance must be positive.")
|
|
9
|
+
|
|
10
|
+
samples = int(max(np.ceil(((1.1536e-5 * radius / tolerance)**(1/8)) * angle / 90), 2.0)) + 1
|
|
11
|
+
return bspy.Spline.section([(radius * np.cos(u * angle * np.pi / 180), radius * np.sin(u * angle * np.pi / 180), 90 + u * angle, 1.0 / radius) for u in np.linspace(0.0, 1.0, samples)])
|
|
12
|
+
|
|
13
|
+
# Courtesy of Michael Epton - Translated from his F77 code lgnzro
|
|
14
|
+
def _legendre_polynomial_zeros(degree):
|
|
15
|
+
def legendre(degree, x):
|
|
16
|
+
p = [1.0, x]
|
|
17
|
+
pd = [0.0, 1.0]
|
|
18
|
+
for n in range(2, degree + 1):
|
|
19
|
+
alfa = (2 * n - 1) / n
|
|
20
|
+
beta = (n - 1) / n
|
|
21
|
+
pd.append(alfa * (p[-1] + x * pd[-1]) - beta * pd[-2])
|
|
22
|
+
p.append(alfa * x * p[-1] - beta * p[-2])
|
|
23
|
+
return p, pd
|
|
24
|
+
zval = 1.0
|
|
25
|
+
z = []
|
|
26
|
+
for iRoot in range(degree // 2):
|
|
27
|
+
done = False
|
|
28
|
+
while True:
|
|
29
|
+
p, pd = legendre(degree, zval)
|
|
30
|
+
sum = 0.0
|
|
31
|
+
for zRoot in z:
|
|
32
|
+
sum += 1.0 / (zval - zRoot)
|
|
33
|
+
dz = p[-1] / (pd[-1] - sum * p[-1])
|
|
34
|
+
zval -= dz
|
|
35
|
+
if done:
|
|
36
|
+
break
|
|
37
|
+
if dz < 1.0e-10:
|
|
38
|
+
done = True
|
|
39
|
+
z.append(zval)
|
|
40
|
+
zval -= 0.001
|
|
41
|
+
if degree % 2 == 1:
|
|
42
|
+
z.append(0.0)
|
|
43
|
+
z.reverse()
|
|
44
|
+
w = []
|
|
45
|
+
for zval in z:
|
|
46
|
+
p, pd = legendre(degree, zval)
|
|
47
|
+
w.append(2.0 / ((1.0 - zval ** 2) * pd[-1] ** 2))
|
|
48
|
+
return z, w
|
|
163
49
|
|
|
164
50
|
def contour(F, knownXValues, dF = None, epsilon = None, metadata = {}):
|
|
165
51
|
# Set up parameters for initial guess of x(t) and validate arguments.
|
|
166
52
|
order = 4
|
|
167
53
|
degree = order - 1
|
|
168
|
-
rhos = _legendre_polynomial_zeros
|
|
54
|
+
rhos, gaussWeights = _legendre_polynomial_zeros(degree - 1)
|
|
169
55
|
if not(len(knownXValues) >= 2): raise ValueError("There must be at least 2 known x values.")
|
|
170
56
|
m = len(knownXValues) - 1
|
|
171
57
|
nCoef = m * (degree - 1) + 2
|
|
@@ -390,6 +276,156 @@ def contour(F, knownXValues, dF = None, epsilon = None, metadata = {}):
|
|
|
390
276
|
spline = spline.confine(F.domain())
|
|
391
277
|
return spline
|
|
392
278
|
|
|
279
|
+
def least_squares(nInd, nDep, order, dataPoints, knotList = None, compression = 0, metadata = {}):
|
|
280
|
+
if not(nInd >= 0): raise ValueError("nInd < 0")
|
|
281
|
+
if not(nDep >= 0): raise ValueError("nDep < 0")
|
|
282
|
+
if not(len(order) == nInd): raise ValueError("len(order) != nInd")
|
|
283
|
+
if not(0 <= compression < 100): raise ValueError("compression not between 0 and 99")
|
|
284
|
+
totalOrder = 1
|
|
285
|
+
for ord in order:
|
|
286
|
+
totalOrder *= ord
|
|
287
|
+
|
|
288
|
+
totalDataPoints = len(dataPoints)
|
|
289
|
+
for point in dataPoints:
|
|
290
|
+
if not(len(point) == nInd + nDep or len(point) == nInd + nDep * (nInd + 1)): raise ValueError(f"Data points do not have {nInd + nDep} values")
|
|
291
|
+
if len(point) == nInd + nDep * (nInd + 1):
|
|
292
|
+
totalDataPoints += nInd
|
|
293
|
+
|
|
294
|
+
if knotList is None:
|
|
295
|
+
# Compute the target number of coefficients and the actual number of samples in each independent variable.
|
|
296
|
+
targetTotalCoef = len(dataPoints) * (100 - compression) / 100.0
|
|
297
|
+
totalCoef = 1
|
|
298
|
+
knotSamples = np.array([point[:nInd] for point in dataPoints], type(dataPoints[0][0])).T
|
|
299
|
+
knotList = []
|
|
300
|
+
for knotSample in knotSamples:
|
|
301
|
+
knots = np.unique(knotSample)
|
|
302
|
+
knotList.append(knots)
|
|
303
|
+
totalCoef *= len(knots)
|
|
304
|
+
|
|
305
|
+
# Scale the number of coefficients for each independent variable so that the total closely matches the target.
|
|
306
|
+
scaling = min((targetTotalCoef / totalCoef) ** (1.0 / nInd), 1.0)
|
|
307
|
+
nCoef = []
|
|
308
|
+
totalCoef = 1
|
|
309
|
+
for knots in knotList:
|
|
310
|
+
nCf = int(math.ceil(len(knots) * scaling))
|
|
311
|
+
nCoef.append(nCf)
|
|
312
|
+
totalCoef *= nCf
|
|
313
|
+
|
|
314
|
+
# Compute "ideal" knots for each independent variable, based on the number of coefficients and the sample values.
|
|
315
|
+
# Piegl, Les A., and Wayne Tiller. "Surface approximation to scanned data." The visual computer 16 (2000): 386-395.
|
|
316
|
+
newKnotList = []
|
|
317
|
+
for iInd, ord, nCf, knots in zip(range(nInd), order, nCoef, knotList):
|
|
318
|
+
degree = ord - 1
|
|
319
|
+
newKnots = [knots[0]] * ord
|
|
320
|
+
inc = len(knots)/nCf
|
|
321
|
+
low = 0
|
|
322
|
+
d = -1
|
|
323
|
+
w = np.empty((nCf,), float)
|
|
324
|
+
for i in range(nCf):
|
|
325
|
+
d += inc
|
|
326
|
+
high = int(d + 0.5 + 1) # Paper's algorithm sets high to d + 0.5, but only references high + 1
|
|
327
|
+
w[i] = np.mean(knots[low:high])
|
|
328
|
+
low = high
|
|
329
|
+
for i in range(1, nCf - degree):
|
|
330
|
+
newKnots.append(np.mean(w[i:i + degree]))
|
|
331
|
+
newKnots += [knots[-1]] * ord
|
|
332
|
+
newKnotList.append(np.array(newKnots, knots.dtype))
|
|
333
|
+
knotList = newKnotList
|
|
334
|
+
else:
|
|
335
|
+
if not(len(knotList) == nInd): raise ValueError("len(knots) != nInd") # The documented interface uses the argument 'knots' instead of 'knotList'
|
|
336
|
+
nCoef = [len(knotList[i]) - order[i] for i in range(nInd)]
|
|
337
|
+
totalCoef = 1
|
|
338
|
+
newKnotList = []
|
|
339
|
+
for knots, ord, nCf in zip(knotList, order, nCoef):
|
|
340
|
+
for i in range(nCf):
|
|
341
|
+
if not(knots[i] <= knots[i + 1] and knots[i] < knots[i + ord]): raise ValueError("Improperly ordered knot sequence")
|
|
342
|
+
totalCoef *= nCf
|
|
343
|
+
newKnotList.append(np.array(knots))
|
|
344
|
+
if not(totalCoef <= totalDataPoints): raise ValueError(f"Insufficient number of data points. You need at least {totalCoef}.")
|
|
345
|
+
knotList = newKnotList
|
|
346
|
+
|
|
347
|
+
# Initialize A and b from the likely overdetermined equation, A x = b, where A contains the bspline values at the independent variables,
|
|
348
|
+
# b contains point values for the dependent variables, and the x contains the desired coefficients.
|
|
349
|
+
A = np.zeros((totalDataPoints, totalCoef), type(dataPoints[0][0]))
|
|
350
|
+
b = np.empty((totalDataPoints, nDep), A.dtype)
|
|
351
|
+
|
|
352
|
+
# Fill in the bspline values in A and the dependent point values in b at row at a time.
|
|
353
|
+
# Note that if a data point also specifies first derivatives, it fills out nInd + 1 rows (the point and its derivatives).
|
|
354
|
+
row = 0
|
|
355
|
+
for point in dataPoints:
|
|
356
|
+
hasDerivatives = len(point) == nInd + nDep * (nInd + 1)
|
|
357
|
+
|
|
358
|
+
# Compute the bspline values (and their first derivatives as needed).
|
|
359
|
+
bValueData = []
|
|
360
|
+
for knots, ord, nCf, u in zip(knotList, order, nCoef, point[:nInd]):
|
|
361
|
+
ix = np.searchsorted(knots, u, 'right')
|
|
362
|
+
ix = min(ix, nCf)
|
|
363
|
+
bValueData.append((ix, bspy.Spline.bspline_values(ix, knots, ord, u), \
|
|
364
|
+
bspy.Spline.bspline_values(ix, knots, ord, u, 1) if hasDerivatives else None))
|
|
365
|
+
|
|
366
|
+
# Compute the values for the A array.
|
|
367
|
+
# It's a little tricky because we have to multiply nInd different bspline arrays of different sizes
|
|
368
|
+
# and index into flattened A array. The solution is to loop through the total number of entries
|
|
369
|
+
# being changed (totalOrder), and compute the array indices via mods and multiplies.
|
|
370
|
+
indices = [0] * nInd
|
|
371
|
+
for i in range(totalOrder):
|
|
372
|
+
column = 0
|
|
373
|
+
bValues = np.ones((nInd + 1,), A.dtype)
|
|
374
|
+
for j, ord, nCf, index, (ix, values, dValues) in zip(range(1, nInd + 1), order, nCoef, indices, bValueData):
|
|
375
|
+
column = column * nCf + ix - ord + index
|
|
376
|
+
# Compute the bspline value for this specific element of A.
|
|
377
|
+
bValues[0] *= values[index]
|
|
378
|
+
if hasDerivatives:
|
|
379
|
+
# Compute the first derivative values for each independent variable.
|
|
380
|
+
for k in range(1, nInd + 1):
|
|
381
|
+
bValues[k] *= dValues[index] if k == j else values[index]
|
|
382
|
+
|
|
383
|
+
# Assign all the values and derivatives.
|
|
384
|
+
A[row, column] = bValues[0]
|
|
385
|
+
if hasDerivatives:
|
|
386
|
+
for k in range(1, nInd + 1):
|
|
387
|
+
A[row + k, column] = bValues[k]
|
|
388
|
+
|
|
389
|
+
# Increment the bspline indices.
|
|
390
|
+
for j in range(nInd - 1, -1, -1):
|
|
391
|
+
indices[j] = (indices[j] + 1) % order[j]
|
|
392
|
+
if indices[j] > 0:
|
|
393
|
+
break
|
|
394
|
+
|
|
395
|
+
# Assign values for the b array.
|
|
396
|
+
b[row, :] = point[nInd:nInd + nDep]
|
|
397
|
+
if hasDerivatives:
|
|
398
|
+
for k in range(1, nInd + 1):
|
|
399
|
+
b[row + k, :] = point[nInd + nDep * k:nInd + nDep * (k + 1)]
|
|
400
|
+
|
|
401
|
+
# Increment the row before filling in the next data point
|
|
402
|
+
row += nInd + 1 if hasDerivatives else 1
|
|
403
|
+
|
|
404
|
+
# Yay, the A and b arrays are ready to solve.
|
|
405
|
+
# Now, we call numpy's least squares solver.
|
|
406
|
+
coefs, residuals, rank, s = np.linalg.lstsq(A, b, rcond=None)
|
|
407
|
+
|
|
408
|
+
# Reshape the coefs array to match nCoef (un-flatten) and move the dependent variables to the front.
|
|
409
|
+
coefs = np.moveaxis(coefs.reshape((*nCoef, nDep)), -1, 0)
|
|
410
|
+
|
|
411
|
+
# Return the resulting spline, computing the accuracy based on system epsilon and the norm of the residuals.
|
|
412
|
+
maxError = np.finfo(coefs.dtype).eps
|
|
413
|
+
if residuals.size > 0:
|
|
414
|
+
maxError = max(maxError, residuals.sum())
|
|
415
|
+
return bspy.Spline(nInd, nDep, order, nCoef, knotList, coefs, np.sqrt(maxError), metadata)
|
|
416
|
+
|
|
417
|
+
def revolve(self, angle):
|
|
418
|
+
if self.nDep != 2: raise ValueError("Spline must have 2 dependent variables")
|
|
419
|
+
|
|
420
|
+
maxRadius = max(abs(self.coefs[0].min()), self.coefs[0].max())
|
|
421
|
+
arc = ((1.0 / maxRadius, 0.0),
|
|
422
|
+
(0.0, 1.0 / maxRadius),
|
|
423
|
+
(0.0, 0.0)) @ bspy.Spline.circular_arc(maxRadius, angle) + (0.0, 0.0, 1.0)
|
|
424
|
+
radiusHeight = ((1.0, 0.0),
|
|
425
|
+
(1.0, 0.0),
|
|
426
|
+
(0.0, 1.0)) @ self
|
|
427
|
+
return arc.multiply(radiusHeight)
|
|
428
|
+
|
|
393
429
|
def ruled_surface(curve1, curve2):
|
|
394
430
|
# Ensure that the splines are compatible
|
|
395
431
|
if curve1.nInd != curve2.nInd: raise ValueError("Splines must have the same number of independent variables")
|
|
@@ -419,6 +455,10 @@ def section(xytk):
|
|
|
419
455
|
dotTangents = startTangent @ endTangent
|
|
420
456
|
theta = math.atan2(crossTangents, dotTangents)
|
|
421
457
|
|
|
458
|
+
# Make sure angle is less than 180 degrees
|
|
459
|
+
if theta * startKappa < 0.0 or theta * endKappa < 0.0 or abs(theta) == math.pi:
|
|
460
|
+
raise ValueError("Angle >= 180 degrees for two point section")
|
|
461
|
+
|
|
422
462
|
# Check data consistency
|
|
423
463
|
crossCheck = startTangent[0] * pointDiff[1] - startTangent[1] * pointDiff[0]
|
|
424
464
|
if crossCheck * startKappa < 0.0 or crossCheck * endKappa < 0.0: raise ValueError("Inconsistent start angle")
|
|
@@ -428,7 +468,7 @@ def section(xytk):
|
|
|
428
468
|
|
|
429
469
|
# Compute intersection point of tangent directions
|
|
430
470
|
tangentDistances = np.linalg.solve(np.array([startTangent, endTangent]).T, pointDiff)
|
|
431
|
-
|
|
471
|
+
frustum = startPoint + tangentDistances[0] * startTangent
|
|
432
472
|
|
|
433
473
|
# Compute critical values for section algorithm
|
|
434
474
|
onePlusCosTheta = 1.0 + math.cos(theta)
|
|
@@ -453,10 +493,10 @@ def section(xytk):
|
|
|
453
493
|
|
|
454
494
|
# Generate the quartic section which interpolates the data
|
|
455
495
|
pt0 = startPoint
|
|
456
|
-
pt1 = (1.0 - rho) * startPoint + rho *
|
|
457
|
-
pt3 = (1.0 - rho) * endPoint + rho *
|
|
496
|
+
pt1 = (1.0 - rho) * startPoint + rho * frustum
|
|
497
|
+
pt3 = (1.0 - rho) * endPoint + rho * frustum
|
|
458
498
|
pt4 = endPoint
|
|
459
|
-
pt2 = alpha0 * pt1 + alpha1 * pt3 + (1.0 - alpha0 - alpha1) *
|
|
499
|
+
pt2 = alpha0 * pt1 + alpha1 * pt3 + (1.0 - alpha0 - alpha1) * frustum
|
|
460
500
|
return bspy.Spline(1, 2, (5,), (5,), ((0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0),), (pt0, pt1, pt2, pt3, pt4))
|
|
461
501
|
|
|
462
502
|
# Check that the input data is the right size and shape
|
bspy/_spline_operations.py
CHANGED
|
@@ -198,7 +198,7 @@ def contract(self, uvw):
|
|
|
198
198
|
|
|
199
199
|
def cross(self, vector):
|
|
200
200
|
if isinstance(vector, bspy.Spline):
|
|
201
|
-
return self.multiply(vector,
|
|
201
|
+
return self.multiply(vector, [(ix, ix) for ix in range(min(self.nInd, vector.nInd))], 'C')
|
|
202
202
|
elif self.nDep == 3:
|
|
203
203
|
if not(len(vector) == self.nDep): raise ValueError("Invalid vector")
|
|
204
204
|
|
|
@@ -241,7 +241,7 @@ def differentiate(self, with_respect_to = 0):
|
|
|
241
241
|
|
|
242
242
|
def dot(self, vector):
|
|
243
243
|
if isinstance(vector, bspy.Spline):
|
|
244
|
-
return self.multiply(vector,
|
|
244
|
+
return self.multiply(vector, [(ix, ix) for ix in range(min(self.nInd, vector.nInd))], 'D')
|
|
245
245
|
else:
|
|
246
246
|
if not(len(vector) == self.nDep): raise ValueError("Invalid vector")
|
|
247
247
|
|
|
@@ -252,6 +252,20 @@ def dot(self, vector):
|
|
|
252
252
|
coefs = coefs.reshape(1, *coefs.shape)
|
|
253
253
|
return type(self)(self.nInd, 1, self.order, self.nCoef, self.knots, coefs, self.accuracy, self.metadata)
|
|
254
254
|
|
|
255
|
+
def graph(self):
|
|
256
|
+
splineDomain = self.domain()
|
|
257
|
+
uvwSplines = [bspy.Spline(1, 1, [2], [2], [[uLow, uLow, uHigh, uHigh]],
|
|
258
|
+
[[uLow, uHigh]]) for uLow, uHigh in splineDomain]
|
|
259
|
+
graphSpline = uvwSplines[0]
|
|
260
|
+
for nextSpline in uvwSplines[1:]:
|
|
261
|
+
graphMat = list(np.block([[np.identity(graphSpline.nInd)], [0.0]]))
|
|
262
|
+
nextMat = list(np.block([[np.zeros((graphSpline.nInd, 1))], [1.0]]))
|
|
263
|
+
graphSpline = (graphMat @ graphSpline).add(nextMat @ nextSpline)
|
|
264
|
+
graphMat = list(np.block([[np.identity(graphSpline.nInd)], [np.zeros((self.nDep, graphSpline.nInd))]]))
|
|
265
|
+
selfMat = list(np.block([[np.zeros((graphSpline.nInd, self.nDep))], [np.identity(self.nDep)]]))
|
|
266
|
+
finalGraph = graphMat @ graphSpline + selfMat @ self
|
|
267
|
+
return finalGraph
|
|
268
|
+
|
|
255
269
|
def integrate(self, with_respect_to = 0):
|
|
256
270
|
if not(0 <= with_respect_to < self.nInd): raise ValueError("Invalid with_respect_to")
|
|
257
271
|
|
|
@@ -285,7 +299,7 @@ def multiplyAndConvolve(self, other, indMap = None, productType = 'S'):
|
|
|
285
299
|
|
|
286
300
|
if not(productType != 'D' or self.nDep == other.nDep): raise ValueError("Mismatched dimensions")
|
|
287
301
|
if not(productType != 'C' or (self.nDep == other.nDep and 2 <= self.nDep <= 3)): raise ValueError("Mismatched dimensions")
|
|
288
|
-
if not(productType != 'S' or self.nDep == 1 or other.nDep == 1): raise ValueError("Mismatched dimensions")
|
|
302
|
+
if not(productType != 'S' or self.nDep == 1 or other.nDep == 1 or self.nDep == other.nDep): raise ValueError("Mismatched dimensions")
|
|
289
303
|
|
|
290
304
|
# Ensure scalar spline (if any) comes first (simplifies array processing).
|
|
291
305
|
if other.nDep == 1 and self.nDep > 1:
|
|
@@ -321,8 +335,11 @@ def multiplyAndConvolve(self, other, indMap = None, productType = 'S'):
|
|
|
321
335
|
coefs += outer[i,i]
|
|
322
336
|
coefs = np.expand_dims(coefs, axis=0)
|
|
323
337
|
nDep = 1
|
|
324
|
-
else: # Scalar product
|
|
325
|
-
coefs = outer
|
|
338
|
+
else: # Scalar product
|
|
339
|
+
coefs = outer
|
|
340
|
+
for i in range(1, self.nDep):
|
|
341
|
+
coefs[0,i] = coefs[i,i]
|
|
342
|
+
coefs = coefs[0]
|
|
326
343
|
|
|
327
344
|
if indMap is not None:
|
|
328
345
|
indMap = indMap.copy() # Make a copy, since we change the list as we combine independent variables
|
|
@@ -702,29 +719,39 @@ def normal_spline(self, indices=None):
|
|
|
702
719
|
|
|
703
720
|
def scale(self, multiplier):
|
|
704
721
|
if isinstance(multiplier, bspy.Spline):
|
|
705
|
-
return self.multiply(multiplier,
|
|
722
|
+
return self.multiply(multiplier, [(ix, ix) for ix in range(min(self.nInd, multiplier.nInd))], 'S')
|
|
706
723
|
else:
|
|
707
|
-
if not(np.isscalar(multiplier) or len(multiplier) == self.nDep): raise ValueError("Invalid multiplier")
|
|
708
|
-
|
|
709
724
|
if np.isscalar(multiplier):
|
|
710
725
|
accuracy = abs(multiplier) * self.accuracy
|
|
726
|
+
nDep = self.nDep
|
|
711
727
|
coefs = multiplier * self.coefs
|
|
712
|
-
|
|
728
|
+
elif len(multiplier) == self.nDep:
|
|
713
729
|
accuracy = np.linalg.norm(multiplier) * self.accuracy
|
|
730
|
+
nDep = self.nDep
|
|
714
731
|
coefs = np.array(self.coefs)
|
|
715
|
-
for i in range(
|
|
732
|
+
for i in range(nDep):
|
|
716
733
|
coefs[i] *= multiplier[i]
|
|
717
|
-
|
|
734
|
+
elif self.nDep == 1:
|
|
735
|
+
accuracy = np.linalg.norm(multiplier) * self.accuracy
|
|
736
|
+
nDep = len(multiplier)
|
|
737
|
+
coefs = np.empty((nDep, *self.coefs.shape[1:]), self.coefs.dtype)
|
|
738
|
+
for i in range(nDep):
|
|
739
|
+
coefs[i] = multiplier[i] * self.coefs[0]
|
|
740
|
+
else:
|
|
741
|
+
raise ValueError("Invalid multiplier")
|
|
742
|
+
return type(self)(self.nInd, nDep, self.order, self.nCoef, self.knots, coefs, accuracy, self.metadata)
|
|
718
743
|
|
|
719
744
|
def transform(self, matrix, maxSingularValue=None):
|
|
720
745
|
if not(matrix.ndim == 2 and matrix.shape[1] == self.nDep): raise ValueError("Invalid matrix")
|
|
721
746
|
|
|
722
747
|
if maxSingularValue is None:
|
|
723
748
|
maxSingularValue = np.linalg.svd(matrix, compute_uv=False)[0]
|
|
724
|
-
|
|
725
|
-
|
|
749
|
+
swapped = np.swapaxes(self.coefs, 0, -2)
|
|
750
|
+
newCoefs = np.swapaxes(matrix @ swapped, 0, -2)
|
|
751
|
+
return type(self)(self.nInd, matrix.shape[0], self.order, self.nCoef, self.knots, newCoefs, maxSingularValue * self.accuracy, self.metadata)
|
|
726
752
|
|
|
727
753
|
def translate(self, translationVector):
|
|
754
|
+
translationVector = np.atleast_1d(translationVector)
|
|
728
755
|
if not(len(translationVector) == self.nDep): raise ValueError("Invalid translationVector")
|
|
729
756
|
|
|
730
757
|
coefs = np.array(self.coefs)
|