spiro 0.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,7 @@
1
+ ---
2
+ SHA1:
3
+ metadata.gz: 84d28bbf80c2fe0eb5bcbe8fe82a60109bfa2425
4
+ data.tar.gz: 1d989e78b0aa54e1738e8ea321cd6d66e0734e95
5
+ SHA512:
6
+ metadata.gz: 33f6c9af30e379a5390f2ecd549189a5b9f3f2e19ed8e84f806e91a9495b32fc208fb3735d1cdd69137b2383dc6c817c872395682fe392e87a21a4fc14a28972
7
+ data.tar.gz: 3cb174eac9af018d16b5f520c5efb068b3ab765b5bf0f6c66845c16f0c5119b79bfb0682275f468eb85e025c67d6b0812fcb6f09e0fb053c0190536cecfcfa90
@@ -0,0 +1,15 @@
1
+ /.bundle/
2
+ /.yardoc
3
+ /Gemfile.lock
4
+ /_yardoc/
5
+ /coverage/
6
+ /doc/
7
+ /pkg/
8
+ /spec/reports/
9
+ /tmp/
10
+ *.bundle
11
+ *.so
12
+ *.o
13
+ *.a
14
+ mkmf.log
15
+ spiro-*.gem
data/Gemfile ADDED
@@ -0,0 +1,4 @@
1
+ source 'https://rubygems.org'
2
+
3
+ # Specify your gem's dependencies in spiro.gemspec
4
+ gemspec
@@ -0,0 +1,22 @@
1
+ Copyright (c) 2015 Simon George
2
+
3
+ MIT License
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining
6
+ a copy of this software and associated documentation files (the
7
+ "Software"), to deal in the Software without restriction, including
8
+ without limitation the rights to use, copy, modify, merge, publish,
9
+ distribute, sublicense, and/or sell copies of the Software, and to
10
+ permit persons to whom the Software is furnished to do so, subject to
11
+ the following conditions:
12
+
13
+ The above copyright notice and this permission notice shall be
14
+ included in all copies or substantial portions of the Software.
15
+
16
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
17
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
19
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
20
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
21
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
22
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
@@ -0,0 +1,31 @@
1
+ # Spiro
2
+
3
+ TODO: Write a gem description
4
+
5
+ ## Installation
6
+
7
+ Add this line to your application's Gemfile:
8
+
9
+ ```ruby
10
+ gem 'spiro'
11
+ ```
12
+
13
+ And then execute:
14
+
15
+ $ bundle
16
+
17
+ Or install it yourself as:
18
+
19
+ $ gem install spiro
20
+
21
+ ## Usage
22
+
23
+ TODO: Write usage instructions here
24
+
25
+ ## Contributing
26
+
27
+ 1. Fork it ( https://github.com/[my-github-username]/spiro/fork )
28
+ 2. Create your feature branch (`git checkout -b my-new-feature`)
29
+ 3. Commit your changes (`git commit -am 'Add some feature'`)
30
+ 4. Push to the branch (`git push origin my-new-feature`)
31
+ 5. Create a new Pull Request
@@ -0,0 +1,9 @@
1
+ require 'bundler/gem_tasks'
2
+ require 'rake/testtask'
3
+
4
+ Rake::TestTask.new do |t|
5
+ t.libs << 'test'
6
+ end
7
+
8
+ desc 'Run tests'
9
+ task :default => :test
@@ -0,0 +1,995 @@
1
+ //#ifdef HAVE_FINITE
2
+ /*#define IS_FINITE(x) finite(x)*/
3
+ //#else
4
+ #define IS_FINITE(x) isfinite(x)
5
+ //#endif
6
+
7
+ /*
8
+ ppedit - A pattern plate editor for Spiro splines.
9
+ Copyright (C) 2007 Raph Levien
10
+
11
+ This program is free software; you can redistribute it and/or
12
+ modify it under the terms of the GNU General Public License
13
+ as published by the Free Software Foundation; either version 3
14
+ of the License, or (at your option) any later version.
15
+
16
+ This program is distributed in the hope that it will be useful,
17
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
18
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19
+ GNU General Public License for more details.
20
+
21
+ You should have received a copy of the GNU General Public License
22
+ along with this program; if not, write to the Free Software
23
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
24
+ 02110-1301, USA.
25
+
26
+ */
27
+ /* C implementation of third-order polynomial spirals. */
28
+
29
+ #include <math.h>
30
+ #include <stdlib.h>
31
+ #include <string.h>
32
+
33
+ #include "bezctx_intf.h"
34
+ #include "_spiro.h"
35
+
36
+ //#include "spiro-config.h"
37
+ #ifdef VERBOSE
38
+ #include <stdio.h>
39
+ #endif
40
+
41
+ typedef struct {
42
+ double a[11]; /* band-diagonal matrix */
43
+ double al[5]; /* lower part of band-diagonal decomposition */
44
+ } bandmat;
45
+
46
+ #ifndef M_PI
47
+ #define M_PI 3.14159265358979323846 /* pi */
48
+ #endif
49
+
50
+ #ifndef N_IS
51
+ /* int n = 4; */
52
+ #define N_IS 4
53
+ #endif
54
+
55
+ #ifndef ORDER
56
+ #define ORDER 12
57
+ #endif
58
+
59
+ /* Integrate polynomial spiral curve over range -.5 .. .5. */
60
+ static void
61
+ integrate_spiro(const double ks[4], double xy[2], int n)
62
+ {
63
+ #if 0
64
+ int n = 1024;
65
+ #endif
66
+ double th1 = ks[0];
67
+ double th2 = .5 * ks[1];
68
+ double th3 = (1./6) * ks[2];
69
+ double th4 = (1./24) * ks[3];
70
+ double x, y;
71
+ double ds = 1. / n;
72
+ double ds2 = ds * ds;
73
+ double ds3 = ds2 * ds;
74
+ double k0 = ks[0] * ds;
75
+ double k1 = ks[1] * ds;
76
+ double k2 = ks[2] * ds;
77
+ double k3 = ks[3] * ds;
78
+ int i;
79
+ double s = .5 * ds - .5;
80
+
81
+ x = 0;
82
+ y = 0;
83
+
84
+ for (i = 0; i < n; i++) {
85
+
86
+ #if ORDER > 2
87
+ double u, v;
88
+ double km0, km1, km2, km3;
89
+
90
+ if (n == 1) {
91
+ km0 = k0;
92
+ km1 = k1 * ds;
93
+ km2 = k2 * ds2;
94
+ } else {
95
+ km0 = (((1./6) * k3 * s + .5 * k2) * s + k1) * s + k0;
96
+ km1 = ((.5 * k3 * s + k2) * s + k1) * ds;
97
+ km2 = (k3 * s + k2) * ds2;
98
+ }
99
+ km3 = k3 * ds3;
100
+ #endif
101
+
102
+ {
103
+
104
+ #if ORDER == 4
105
+ double km0_2 = km0 * km0;
106
+ u = 24 - km0_2;
107
+ v = km1;
108
+ #endif
109
+
110
+ #if ORDER == 6
111
+ double km0_2 = km0 * km0;
112
+ double km0_4 = km0_2 * km0_2;
113
+ u = 24 - km0_2 + (km0_4 - 4 * km0 * km2 - 3 * km1 * km1) * (1./80);
114
+ v = km1 + (km3 - 6 * km0_2 * km1) * (1./80);
115
+ #endif
116
+
117
+ #if ORDER == 8
118
+ double t1_1 = km0;
119
+ double t1_2 = .5 * km1;
120
+ double t1_3 = (1./6) * km2;
121
+ double t1_4 = (1./24) * km3;
122
+ double t2_2 = t1_1 * t1_1;
123
+ double t2_3 = 2 * (t1_1 * t1_2);
124
+ double t2_4 = 2 * (t1_1 * t1_3) + t1_2 * t1_2;
125
+ double t2_5 = 2 * (t1_1 * t1_4 + t1_2 * t1_3);
126
+ double t2_6 = 2 * (t1_2 * t1_4) + t1_3 * t1_3;
127
+ double t3_4 = t2_2 * t1_2 + t2_3 * t1_1;
128
+ double t3_6 = t2_2 * t1_4 + t2_3 * t1_3 + t2_4 * t1_2 + t2_5 * t1_1;
129
+ double t4_4 = t2_2 * t2_2;
130
+ double t4_5 = 2 * (t2_2 * t2_3);
131
+ double t4_6 = 2 * (t2_2 * t2_4) + t2_3 * t2_3;
132
+ double t5_6 = t4_4 * t1_2 + t4_5 * t1_1;
133
+ double t6_6 = t4_4 * t2_2;
134
+ u = 1;
135
+ v = 0;
136
+ v += (1./12) * t1_2 + (1./80) * t1_4;
137
+ u -= (1./24) * t2_2 + (1./160) * t2_4 + (1./896) * t2_6;
138
+ v -= (1./480) * t3_4 + (1./2688) * t3_6;
139
+ u += (1./1920) * t4_4 + (1./10752) * t4_6;
140
+ v += (1./53760) * t5_6;
141
+ u -= (1./322560) * t6_6;
142
+ #endif
143
+
144
+ #if ORDER == 10
145
+ double t1_1 = km0;
146
+ double t1_2 = .5 * km1;
147
+ double t1_3 = (1./6) * km2;
148
+ double t1_4 = (1./24) * km3;
149
+ double t2_2 = t1_1 * t1_1;
150
+ double t2_3 = 2 * (t1_1 * t1_2);
151
+ double t2_4 = 2 * (t1_1 * t1_3) + t1_2 * t1_2;
152
+ double t2_5 = 2 * (t1_1 * t1_4 + t1_2 * t1_3);
153
+ double t2_6 = 2 * (t1_2 * t1_4) + t1_3 * t1_3;
154
+ double t2_7 = 2 * (t1_3 * t1_4);
155
+ double t2_8 = t1_4 * t1_4;
156
+ double t3_4 = t2_2 * t1_2 + t2_3 * t1_1;
157
+ double t3_6 = t2_2 * t1_4 + t2_3 * t1_3 + t2_4 * t1_2 + t2_5 * t1_1;
158
+ double t3_8 = t2_4 * t1_4 + t2_5 * t1_3 + t2_6 * t1_2 + t2_7 * t1_1;
159
+ double t4_4 = t2_2 * t2_2;
160
+ double t4_5 = 2 * (t2_2 * t2_3);
161
+ double t4_6 = 2 * (t2_2 * t2_4) + t2_3 * t2_3;
162
+ double t4_7 = 2 * (t2_2 * t2_5 + t2_3 * t2_4);
163
+ double t4_8 = 2 * (t2_2 * t2_6 + t2_3 * t2_5) + t2_4 * t2_4;
164
+ double t5_6 = t4_4 * t1_2 + t4_5 * t1_1;
165
+ double t5_8 = t4_4 * t1_4 + t4_5 * t1_3 + t4_6 * t1_2 + t4_7 * t1_1;
166
+ double t6_6 = t4_4 * t2_2;
167
+ double t6_7 = t4_4 * t2_3 + t4_5 * t2_2;
168
+ double t6_8 = t4_4 * t2_4 + t4_5 * t2_3 + t4_6 * t2_2;
169
+ double t7_8 = t6_6 * t1_2 + t6_7 * t1_1;
170
+ double t8_8 = t6_6 * t2_2;
171
+ u = 1;
172
+ v = 0;
173
+ v += (1./12) * t1_2 + (1./80) * t1_4;
174
+ u -= (1./24) * t2_2 + (1./160) * t2_4 + (1./896) * t2_6 + (1./4608) * t2_8;
175
+ v -= (1./480) * t3_4 + (1./2688) * t3_6 + (1./13824) * t3_8;
176
+ u += (1./1920) * t4_4 + (1./10752) * t4_6 + (1./55296) * t4_8;
177
+ v += (1./53760) * t5_6 + (1./276480) * t5_8;
178
+ u -= (1./322560) * t6_6 + (1./1.65888e+06) * t6_8;
179
+ v -= (1./1.16122e+07) * t7_8;
180
+ u += (1./9.28973e+07) * t8_8;
181
+ #endif
182
+
183
+ #if ORDER == 12
184
+ double t1_1 = km0;
185
+ double t1_2 = .5 * km1;
186
+ double t1_3 = (1./6) * km2;
187
+ double t1_4 = (1./24) * km3;
188
+ double t2_2 = t1_1 * t1_1;
189
+ double t2_3 = 2 * (t1_1 * t1_2);
190
+ double t2_4 = 2 * (t1_1 * t1_3) + t1_2 * t1_2;
191
+ double t2_5 = 2 * (t1_1 * t1_4 + t1_2 * t1_3);
192
+ double t2_6 = 2 * (t1_2 * t1_4) + t1_3 * t1_3;
193
+ double t2_7 = 2 * (t1_3 * t1_4);
194
+ double t2_8 = t1_4 * t1_4;
195
+ double t3_4 = t2_2 * t1_2 + t2_3 * t1_1;
196
+ double t3_6 = t2_2 * t1_4 + t2_3 * t1_3 + t2_4 * t1_2 + t2_5 * t1_1;
197
+ double t3_8 = t2_4 * t1_4 + t2_5 * t1_3 + t2_6 * t1_2 + t2_7 * t1_1;
198
+ double t3_10 = t2_6 * t1_4 + t2_7 * t1_3 + t2_8 * t1_2;
199
+ double t4_4 = t2_2 * t2_2;
200
+ double t4_5 = 2 * (t2_2 * t2_3);
201
+ double t4_6 = 2 * (t2_2 * t2_4) + t2_3 * t2_3;
202
+ double t4_7 = 2 * (t2_2 * t2_5 + t2_3 * t2_4);
203
+ double t4_8 = 2 * (t2_2 * t2_6 + t2_3 * t2_5) + t2_4 * t2_4;
204
+ double t4_9 = 2 * (t2_2 * t2_7 + t2_3 * t2_6 + t2_4 * t2_5);
205
+ double t4_10 = 2 * (t2_2 * t2_8 + t2_3 * t2_7 + t2_4 * t2_6) + t2_5 * t2_5;
206
+ double t5_6 = t4_4 * t1_2 + t4_5 * t1_1;
207
+ double t5_8 = t4_4 * t1_4 + t4_5 * t1_3 + t4_6 * t1_2 + t4_7 * t1_1;
208
+ double t5_10 = t4_6 * t1_4 + t4_7 * t1_3 + t4_8 * t1_2 + t4_9 * t1_1;
209
+ double t6_6 = t4_4 * t2_2;
210
+ double t6_7 = t4_4 * t2_3 + t4_5 * t2_2;
211
+ double t6_8 = t4_4 * t2_4 + t4_5 * t2_3 + t4_6 * t2_2;
212
+ double t6_9 = t4_4 * t2_5 + t4_5 * t2_4 + t4_6 * t2_3 + t4_7 * t2_2;
213
+ double t6_10 = t4_4 * t2_6 + t4_5 * t2_5 + t4_6 * t2_4 + t4_7 * t2_3 + t4_8 * t2_2;
214
+ double t7_8 = t6_6 * t1_2 + t6_7 * t1_1;
215
+ double t7_10 = t6_6 * t1_4 + t6_7 * t1_3 + t6_8 * t1_2 + t6_9 * t1_1;
216
+ double t8_8 = t6_6 * t2_2;
217
+ double t8_9 = t6_6 * t2_3 + t6_7 * t2_2;
218
+ double t8_10 = t6_6 * t2_4 + t6_7 * t2_3 + t6_8 * t2_2;
219
+ double t9_10 = t8_8 * t1_2 + t8_9 * t1_1;
220
+ double t10_10 = t8_8 * t2_2;
221
+ u = 1;
222
+ v = 0;
223
+ v += (1./12) * t1_2 + (1./80) * t1_4;
224
+ u -= (1./24) * t2_2 + (1./160) * t2_4 + (1./896) * t2_6 + (1./4608) * t2_8;
225
+ v -= (1./480) * t3_4 + (1./2688) * t3_6 + (1./13824) * t3_8 + (1./67584) * t3_10;
226
+ u += (1./1920) * t4_4 + (1./10752) * t4_6 + (1./55296) * t4_8 + (1./270336) * t4_10;
227
+ v += (1./53760) * t5_6 + (1./276480) * t5_8 + (1./1.35168e+06) * t5_10;
228
+ u -= (1./322560) * t6_6 + (1./1.65888e+06) * t6_8 + (1./8.11008e+06) * t6_10;
229
+ v -= (1./1.16122e+07) * t7_8 + (1./5.67706e+07) * t7_10;
230
+ u += (1./9.28973e+07) * t8_8 + (1./4.54164e+08) * t8_10;
231
+ v += (1./4.08748e+09) * t9_10;
232
+ u -= (1./4.08748e+10) * t10_10;
233
+ #endif
234
+
235
+ #if ORDER == 14
236
+ double t1_1 = km0;
237
+ double t1_2 = .5 * km1;
238
+ double t1_3 = (1./6) * km2;
239
+ double t1_4 = (1./24) * km3;
240
+ double t2_2 = t1_1 * t1_1;
241
+ double t2_3 = 2 * (t1_1 * t1_2);
242
+ double t2_4 = 2 * (t1_1 * t1_3) + t1_2 * t1_2;
243
+ double t2_5 = 2 * (t1_1 * t1_4 + t1_2 * t1_3);
244
+ double t2_6 = 2 * (t1_2 * t1_4) + t1_3 * t1_3;
245
+ double t2_7 = 2 * (t1_3 * t1_4);
246
+ double t2_8 = t1_4 * t1_4;
247
+ double t3_4 = t2_2 * t1_2 + t2_3 * t1_1;
248
+ double t3_6 = t2_2 * t1_4 + t2_3 * t1_3 + t2_4 * t1_2 + t2_5 * t1_1;
249
+ double t3_8 = t2_4 * t1_4 + t2_5 * t1_3 + t2_6 * t1_2 + t2_7 * t1_1;
250
+ double t3_10 = t2_6 * t1_4 + t2_7 * t1_3 + t2_8 * t1_2;
251
+ double t3_12 = t2_8 * t1_4;
252
+ double t4_4 = t2_2 * t2_2;
253
+ double t4_5 = 2 * (t2_2 * t2_3);
254
+ double t4_6 = 2 * (t2_2 * t2_4) + t2_3 * t2_3;
255
+ double t4_7 = 2 * (t2_2 * t2_5 + t2_3 * t2_4);
256
+ double t4_8 = 2 * (t2_2 * t2_6 + t2_3 * t2_5) + t2_4 * t2_4;
257
+ double t4_9 = 2 * (t2_2 * t2_7 + t2_3 * t2_6 + t2_4 * t2_5);
258
+ double t4_10 = 2 * (t2_2 * t2_8 + t2_3 * t2_7 + t2_4 * t2_6) + t2_5 * t2_5;
259
+ double t4_11 = 2 * (t2_3 * t2_8 + t2_4 * t2_7 + t2_5 * t2_6);
260
+ double t4_12 = 2 * (t2_4 * t2_8 + t2_5 * t2_7) + t2_6 * t2_6;
261
+ double t5_6 = t4_4 * t1_2 + t4_5 * t1_1;
262
+ double t5_8 = t4_4 * t1_4 + t4_5 * t1_3 + t4_6 * t1_2 + t4_7 * t1_1;
263
+ double t5_10 = t4_6 * t1_4 + t4_7 * t1_3 + t4_8 * t1_2 + t4_9 * t1_1;
264
+ double t5_12 = t4_8 * t1_4 + t4_9 * t1_3 + t4_10 * t1_2 + t4_11 * t1_1;
265
+ double t6_6 = t4_4 * t2_2;
266
+ double t6_7 = t4_4 * t2_3 + t4_5 * t2_2;
267
+ double t6_8 = t4_4 * t2_4 + t4_5 * t2_3 + t4_6 * t2_2;
268
+ double t6_9 = t4_4 * t2_5 + t4_5 * t2_4 + t4_6 * t2_3 + t4_7 * t2_2;
269
+ double t6_10 = t4_4 * t2_6 + t4_5 * t2_5 + t4_6 * t2_4 + t4_7 * t2_3 + t4_8 * t2_2;
270
+ double t6_11 = t4_4 * t2_7 + t4_5 * t2_6 + t4_6 * t2_5 + t4_7 * t2_4 + t4_8 * t2_3 + t4_9 * t2_2;
271
+ double t6_12 = t4_4 * t2_8 + t4_5 * t2_7 + t4_6 * t2_6 + t4_7 * t2_5 + t4_8 * t2_4 + t4_9 * t2_3 + t4_10 * t2_2;
272
+ double t7_8 = t6_6 * t1_2 + t6_7 * t1_1;
273
+ double t7_10 = t6_6 * t1_4 + t6_7 * t1_3 + t6_8 * t1_2 + t6_9 * t1_1;
274
+ double t7_12 = t6_8 * t1_4 + t6_9 * t1_3 + t6_10 * t1_2 + t6_11 * t1_1;
275
+ double t8_8 = t6_6 * t2_2;
276
+ double t8_9 = t6_6 * t2_3 + t6_7 * t2_2;
277
+ double t8_10 = t6_6 * t2_4 + t6_7 * t2_3 + t6_8 * t2_2;
278
+ double t8_11 = t6_6 * t2_5 + t6_7 * t2_4 + t6_8 * t2_3 + t6_9 * t2_2;
279
+ double t8_12 = t6_6 * t2_6 + t6_7 * t2_5 + t6_8 * t2_4 + t6_9 * t2_3 + t6_10 * t2_2;
280
+ double t9_10 = t8_8 * t1_2 + t8_9 * t1_1;
281
+ double t9_12 = t8_8 * t1_4 + t8_9 * t1_3 + t8_10 * t1_2 + t8_11 * t1_1;
282
+ double t10_10 = t8_8 * t2_2;
283
+ double t10_11 = t8_8 * t2_3 + t8_9 * t2_2;
284
+ double t10_12 = t8_8 * t2_4 + t8_9 * t2_3 + t8_10 * t2_2;
285
+ double t11_12 = t10_10 * t1_2 + t10_11 * t1_1;
286
+ double t12_12 = t10_10 * t2_2;
287
+ u = 1;
288
+ v = 0;
289
+ v += (1./12) * t1_2 + (1./80) * t1_4;
290
+ u -= (1./24) * t2_2 + (1./160) * t2_4 + (1./896) * t2_6 + (1./4608) * t2_8;
291
+ v -= (1./480) * t3_4 + (1./2688) * t3_6 + (1./13824) * t3_8 + (1./67584) * t3_10 + (1./319488) * t3_12;
292
+ u += (1./1920) * t4_4 + (1./10752) * t4_6 + (1./55296) * t4_8 + (1./270336) * t4_10 + (1./1.27795e+06) * t4_12;
293
+ v += (1./53760) * t5_6 + (1./276480) * t5_8 + (1./1.35168e+06) * t5_10 + (1./6.38976e+06) * t5_12;
294
+ u -= (1./322560) * t6_6 + (1./1.65888e+06) * t6_8 + (1./8.11008e+06) * t6_10 + (1./3.83386e+07) * t6_12;
295
+ v -= (1./1.16122e+07) * t7_8 + (1./5.67706e+07) * t7_10 + (1./2.6837e+08) * t7_12;
296
+ u += (1./9.28973e+07) * t8_8 + (1./4.54164e+08) * t8_10 + (1./2.14696e+09) * t8_12;
297
+ v += (1./4.08748e+09) * t9_10 + (1./1.93226e+10) * t9_12;
298
+ u -= (1./4.08748e+10) * t10_10 + (1./1.93226e+11) * t10_12;
299
+ v -= (1./2.12549e+12) * t11_12;
300
+ u += (1./2.55059e+13) * t12_12;
301
+ #endif
302
+
303
+ #if ORDER == 16
304
+ double t1_1 = km0;
305
+ double t1_2 = .5 * km1;
306
+ double t1_3 = (1./6) * km2;
307
+ double t1_4 = (1./24) * km3;
308
+ double t2_2 = t1_1 * t1_1;
309
+ double t2_3 = 2 * (t1_1 * t1_2);
310
+ double t2_4 = 2 * (t1_1 * t1_3) + t1_2 * t1_2;
311
+ double t2_5 = 2 * (t1_1 * t1_4 + t1_2 * t1_3);
312
+ double t2_6 = 2 * (t1_2 * t1_4) + t1_3 * t1_3;
313
+ double t2_7 = 2 * (t1_3 * t1_4);
314
+ double t2_8 = t1_4 * t1_4;
315
+ double t3_4 = t2_2 * t1_2 + t2_3 * t1_1;
316
+ double t3_6 = t2_2 * t1_4 + t2_3 * t1_3 + t2_4 * t1_2 + t2_5 * t1_1;
317
+ double t3_8 = t2_4 * t1_4 + t2_5 * t1_3 + t2_6 * t1_2 + t2_7 * t1_1;
318
+ double t3_10 = t2_6 * t1_4 + t2_7 * t1_3 + t2_8 * t1_2;
319
+ double t3_12 = t2_8 * t1_4;
320
+ double t4_4 = t2_2 * t2_2;
321
+ double t4_5 = 2 * (t2_2 * t2_3);
322
+ double t4_6 = 2 * (t2_2 * t2_4) + t2_3 * t2_3;
323
+ double t4_7 = 2 * (t2_2 * t2_5 + t2_3 * t2_4);
324
+ double t4_8 = 2 * (t2_2 * t2_6 + t2_3 * t2_5) + t2_4 * t2_4;
325
+ double t4_9 = 2 * (t2_2 * t2_7 + t2_3 * t2_6 + t2_4 * t2_5);
326
+ double t4_10 = 2 * (t2_2 * t2_8 + t2_3 * t2_7 + t2_4 * t2_6) + t2_5 * t2_5;
327
+ double t4_11 = 2 * (t2_3 * t2_8 + t2_4 * t2_7 + t2_5 * t2_6);
328
+ double t4_12 = 2 * (t2_4 * t2_8 + t2_5 * t2_7) + t2_6 * t2_6;
329
+ double t4_13 = 2 * (t2_5 * t2_8 + t2_6 * t2_7);
330
+ double t4_14 = 2 * (t2_6 * t2_8) + t2_7 * t2_7;
331
+ double t5_6 = t4_4 * t1_2 + t4_5 * t1_1;
332
+ double t5_8 = t4_4 * t1_4 + t4_5 * t1_3 + t4_6 * t1_2 + t4_7 * t1_1;
333
+ double t5_10 = t4_6 * t1_4 + t4_7 * t1_3 + t4_8 * t1_2 + t4_9 * t1_1;
334
+ double t5_12 = t4_8 * t1_4 + t4_9 * t1_3 + t4_10 * t1_2 + t4_11 * t1_1;
335
+ double t5_14 = t4_10 * t1_4 + t4_11 * t1_3 + t4_12 * t1_2 + t4_13 * t1_1;
336
+ double t6_6 = t4_4 * t2_2;
337
+ double t6_7 = t4_4 * t2_3 + t4_5 * t2_2;
338
+ double t6_8 = t4_4 * t2_4 + t4_5 * t2_3 + t4_6 * t2_2;
339
+ double t6_9 = t4_4 * t2_5 + t4_5 * t2_4 + t4_6 * t2_3 + t4_7 * t2_2;
340
+ double t6_10 = t4_4 * t2_6 + t4_5 * t2_5 + t4_6 * t2_4 + t4_7 * t2_3 + t4_8 * t2_2;
341
+ double t6_11 = t4_4 * t2_7 + t4_5 * t2_6 + t4_6 * t2_5 + t4_7 * t2_4 + t4_8 * t2_3 + t4_9 * t2_2;
342
+ double t6_12 = t4_4 * t2_8 + t4_5 * t2_7 + t4_6 * t2_6 + t4_7 * t2_5 + t4_8 * t2_4 + t4_9 * t2_3 + t4_10 * t2_2;
343
+ double t6_13 = t4_5 * t2_8 + t4_6 * t2_7 + t4_7 * t2_6 + t4_8 * t2_5 + t4_9 * t2_4 + t4_10 * t2_3 + t4_11 * t2_2;
344
+ double t6_14 = t4_6 * t2_8 + t4_7 * t2_7 + t4_8 * t2_6 + t4_9 * t2_5 + t4_10 * t2_4 + t4_11 * t2_3 + t4_12 * t2_2;
345
+ double t7_8 = t6_6 * t1_2 + t6_7 * t1_1;
346
+ double t7_10 = t6_6 * t1_4 + t6_7 * t1_3 + t6_8 * t1_2 + t6_9 * t1_1;
347
+ double t7_12 = t6_8 * t1_4 + t6_9 * t1_3 + t6_10 * t1_2 + t6_11 * t1_1;
348
+ double t7_14 = t6_10 * t1_4 + t6_11 * t1_3 + t6_12 * t1_2 + t6_13 * t1_1;
349
+ double t8_8 = t6_6 * t2_2;
350
+ double t8_9 = t6_6 * t2_3 + t6_7 * t2_2;
351
+ double t8_10 = t6_6 * t2_4 + t6_7 * t2_3 + t6_8 * t2_2;
352
+ double t8_11 = t6_6 * t2_5 + t6_7 * t2_4 + t6_8 * t2_3 + t6_9 * t2_2;
353
+ double t8_12 = t6_6 * t2_6 + t6_7 * t2_5 + t6_8 * t2_4 + t6_9 * t2_3 + t6_10 * t2_2;
354
+ double t8_13 = t6_6 * t2_7 + t6_7 * t2_6 + t6_8 * t2_5 + t6_9 * t2_4 + t6_10 * t2_3 + t6_11 * t2_2;
355
+ double t8_14 = t6_6 * t2_8 + t6_7 * t2_7 + t6_8 * t2_6 + t6_9 * t2_5 + t6_10 * t2_4 + t6_11 * t2_3 + t6_12 * t2_2;
356
+ double t9_10 = t8_8 * t1_2 + t8_9 * t1_1;
357
+ double t9_12 = t8_8 * t1_4 + t8_9 * t1_3 + t8_10 * t1_2 + t8_11 * t1_1;
358
+ double t9_14 = t8_10 * t1_4 + t8_11 * t1_3 + t8_12 * t1_2 + t8_13 * t1_1;
359
+ double t10_10 = t8_8 * t2_2;
360
+ double t10_11 = t8_8 * t2_3 + t8_9 * t2_2;
361
+ double t10_12 = t8_8 * t2_4 + t8_9 * t2_3 + t8_10 * t2_2;
362
+ double t10_13 = t8_8 * t2_5 + t8_9 * t2_4 + t8_10 * t2_3 + t8_11 * t2_2;
363
+ double t10_14 = t8_8 * t2_6 + t8_9 * t2_5 + t8_10 * t2_4 + t8_11 * t2_3 + t8_12 * t2_2;
364
+ double t11_12 = t10_10 * t1_2 + t10_11 * t1_1;
365
+ double t11_14 = t10_10 * t1_4 + t10_11 * t1_3 + t10_12 * t1_2 + t10_13 * t1_1;
366
+ double t12_12 = t10_10 * t2_2;
367
+ double t12_13 = t10_10 * t2_3 + t10_11 * t2_2;
368
+ double t12_14 = t10_10 * t2_4 + t10_11 * t2_3 + t10_12 * t2_2;
369
+ double t13_14 = t12_12 * t1_2 + t12_13 * t1_1;
370
+ double t14_14 = t12_12 * t2_2;
371
+ u = 1;
372
+ u -= 1./24 * t2_2 + 1./160 * t2_4 + 1./896 * t2_6 + 1./4608 * t2_8;
373
+ u += 1./1920 * t4_4 + 1./10752 * t4_6 + 1./55296 * t4_8 + 1./270336 * t4_10 + 1./1277952 * t4_12 + 1./5898240 * t4_14;
374
+ u -= 1./322560 * t6_6 + 1./1658880 * t6_8 + 1./8110080 * t6_10 + 1./38338560 * t6_12 + 1./176947200 * t6_14;
375
+ u += 1./92897280 * t8_8 + 1./454164480 * t8_10 + 4.6577500191e-10 * t8_12 + 1.0091791708e-10 * t8_14;
376
+ u -= 2.4464949595e-11 * t10_10 + 5.1752777990e-12 * t10_12 + 1.1213101898e-12 * t10_14;
377
+ u += 3.9206649992e-14 * t12_12 + 8.4947741650e-15 * t12_14;
378
+ u -= 4.6674583324e-17 * t14_14;
379
+ v = 0;
380
+ v += 1./12 * t1_2 + 1./80 * t1_4;
381
+ v -= 1./480 * t3_4 + 1./2688 * t3_6 + 1./13824 * t3_8 + 1./67584 * t3_10 + 1./319488 * t3_12;
382
+ v += 1./53760 * t5_6 + 1./276480 * t5_8 + 1./1351680 * t5_10 + 1./6389760 * t5_12 + 1./29491200 * t5_14;
383
+ v -= 1./11612160 * t7_8 + 1./56770560 * t7_10 + 1./268369920 * t7_12 + 8.0734333664e-10 * t7_14;
384
+ v += 2.4464949595e-10 * t9_10 + 5.1752777990e-11 * t9_12 + 1.1213101898e-11 * t9_14;
385
+ v -= 4.7047979991e-13 * t11_12 + 1.0193728998e-13 * t11_14;
386
+ v += 6.5344416654e-16 * t13_14;
387
+ #endif
388
+
389
+ }
390
+
391
+ if (n == 1) {
392
+ #if ORDER == 2
393
+ x = 1;
394
+ y = 0;
395
+ #else
396
+ x = u;
397
+ y = v;
398
+ #endif
399
+ } else {
400
+ double th = (((th4 * s + th3) * s + th2) * s + th1) * s;
401
+ double cth = cos(th);
402
+ double sth = sin(th);
403
+
404
+ #if ORDER == 2
405
+ x += cth;
406
+ y += sth;
407
+ #else
408
+ x += cth * u - sth * v;
409
+ y += cth * v + sth * u;
410
+ #endif
411
+ s += ds;
412
+ }
413
+ }
414
+
415
+ #if ORDER == 4 || ORDER == 6
416
+ xy[0] = x * (1./24 * ds);
417
+ xy[1] = y * (1./24 * ds);
418
+ #else
419
+ xy[0] = x * ds;
420
+ xy[1] = y * ds;
421
+ #endif
422
+ }
423
+
424
+ static double
425
+ compute_ends(const double ks[4], double ends[2][4], double seg_ch)
426
+ {
427
+ double xy[2];
428
+ double ch, th;
429
+ double l, l2, l3;
430
+ double th_even, th_odd;
431
+ double k0_even, k0_odd;
432
+ double k1_even, k1_odd;
433
+ double k2_even, k2_odd;
434
+
435
+ integrate_spiro(ks, xy, N_IS);
436
+ ch = hypot(xy[0], xy[1]);
437
+ th = atan2(xy[1], xy[0]);
438
+ l = ch / seg_ch;
439
+
440
+ th_even = .5 * ks[0] + (1./48) * ks[2];
441
+ th_odd = .125 * ks[1] + (1./384) * ks[3] - th;
442
+ ends[0][0] = th_even - th_odd;
443
+ ends[1][0] = th_even + th_odd;
444
+ k0_even = l * (ks[0] + .125 * ks[2]);
445
+ k0_odd = l * (.5 * ks[1] + (1./48) * ks[3]);
446
+ ends[0][1] = k0_even - k0_odd;
447
+ ends[1][1] = k0_even + k0_odd;
448
+ l2 = l * l;
449
+ k1_even = l2 * (ks[1] + .125 * ks[3]);
450
+ k1_odd = l2 * .5 * ks[2];
451
+ ends[0][2] = k1_even - k1_odd;
452
+ ends[1][2] = k1_even + k1_odd;
453
+ l3 = l2 * l;
454
+ k2_even = l3 * ks[2];
455
+ k2_odd = l3 * .5 * ks[3];
456
+ ends[0][3] = k2_even - k2_odd;
457
+ ends[1][3] = k2_even + k2_odd;
458
+
459
+ return l;
460
+ }
461
+
462
+ static void
463
+ compute_pderivs(const spiro_seg *s, double ends[2][4], double derivs[4][2][4],
464
+ int jinc)
465
+ {
466
+ double recip_d = 2e6;
467
+ double delta = 1./ recip_d;
468
+ double try_ks[4];
469
+ double try_ends[2][4];
470
+ int i, j, k;
471
+
472
+ compute_ends(s->ks, ends, s->seg_ch);
473
+ for (i = 0; i < jinc; i++) {
474
+ for (j = 0; j < 4; j++)
475
+ try_ks[j] = s->ks[j];
476
+ try_ks[i] += delta;
477
+ compute_ends(try_ks, try_ends, s->seg_ch);
478
+ for (k = 0; k < 2; k++)
479
+ for (j = 0; j < 4; j++)
480
+ derivs[j][k][i] = recip_d * (try_ends[k][j] - ends[k][j]);
481
+ }
482
+ }
483
+
484
+ static double
485
+ mod_2pi(double th)
486
+ {
487
+ double u = th / (2 * M_PI);
488
+ return 2 * M_PI * (u - floor(u + 0.5));
489
+ }
490
+
491
+ static spiro_seg *
492
+ setup_path(const spiro_cp *src, int n)
493
+ {
494
+ int i, ilast, n_seg;
495
+ spiro_seg *r;
496
+
497
+ n_seg = src[0].ty == '{' ? n - 1 : n;
498
+ r = (spiro_seg *)malloc((n_seg + 1) * sizeof(spiro_seg));
499
+ if ( r==NULL ) return 0;
500
+
501
+ for (i = 0; i < n_seg; i++) {
502
+ r[i].x = src[i].x;
503
+ r[i].y = src[i].y;
504
+ r[i].ty = src[i].ty;
505
+ r[i].ks[0] = 0.;
506
+ r[i].ks[1] = 0.;
507
+ r[i].ks[2] = 0.;
508
+ r[i].ks[3] = 0.;
509
+ }
510
+ r[n_seg].x = src[n_seg % n].x;
511
+ r[n_seg].y = src[n_seg % n].y;
512
+ r[n_seg].ty = src[n_seg % n].ty;
513
+
514
+ #ifdef CHECK_INPUT_FINITENESS
515
+ /* Verify that input values are within realistic limits */
516
+ for (i = 0; i < n; i++) {
517
+ if (IS_FINITE(r[i].x)==0 || IS_FINITE(r[i].y)==0) {
518
+ #ifdef VERBOSE
519
+ fprintf(stderr, "ERROR: LibSpiro: #%d={'%c',%g,%g} is not finite.\n", \
520
+ i, src[i].ty, r[i].x, r[i].y);
521
+ #endif
522
+ free(r);
523
+ return 0;
524
+ }
525
+ }
526
+ #endif
527
+
528
+ for (i = 0; i < n_seg; i++) {
529
+ double dx = r[i + 1].x - r[i].x;
530
+ double dy = r[i + 1].y - r[i].y;
531
+ #ifndef CHECK_INPUT_FINITENESS
532
+ r[i].seg_ch = hypot(dx, dy);
533
+ #else
534
+ if (IS_FINITE(dx)==0 || IS_FINITE(dy)==0 || \
535
+ IS_FINITE((r[i].seg_ch = hypot(dx, dy)))==0) {
536
+ #ifdef VERBOSE
537
+ fprintf(stderr, "ERROR: LibSpiro: #%d={'%c',%g,%g} hypot error.\n", \
538
+ i, src[i].ty, r[i].x, r[i].y);
539
+ #endif
540
+ free(r);
541
+ return 0;
542
+ }
543
+ #endif
544
+ r[i].seg_th = atan2(dy, dx);
545
+ }
546
+
547
+ ilast = n_seg - 1;
548
+ for (i = 0; i < n_seg; i++) {
549
+ if (r[i].ty == '{' || r[i].ty == '}' || r[i].ty == 'v')
550
+ r[i].bend_th = 0.;
551
+ else
552
+ r[i].bend_th = mod_2pi(r[i].seg_th - r[ilast].seg_th);
553
+ ilast = i;
554
+ #ifdef VERBOSE
555
+ printf("input #%d={'%c',%g,%g}, hypot=%g, atan2=%g, bend_th=%g\n", \
556
+ i, src[i].ty, r[i].x, r[i].y, r[i]. seg_th, r[i].seg_th, r[i].bend_th);
557
+ #endif
558
+ }
559
+ #ifdef VERBOSE
560
+ if (n_seg < n)
561
+ printf("input #%d={'%c',%g,%g}\n", i, src[i].ty, r[i].x, r[i].y);
562
+ #endif
563
+ return r;
564
+ }
565
+
566
+ static void
567
+ bandec11(bandmat *m, int *perm, int n)
568
+ {
569
+ int i, j, k;
570
+ int l;
571
+
572
+ /* pack top triangle to the left. */
573
+ for (i = 0; i < 5; i++) {
574
+ for (j = 0; j < i + 6; j++)
575
+ m[i].a[j] = m[i].a[j + 5 - i];
576
+ for (; j < 11; j++)
577
+ m[i].a[j] = 0.;
578
+ }
579
+ l = 5;
580
+ for (k = 0; k < n; k++) {
581
+ int pivot = k;
582
+ double pivot_val = m[k].a[0];
583
+ double pivot_scale;
584
+
585
+ l = l < n ? l + 1 : n;
586
+
587
+ for (j = k + 1; j < l; j++)
588
+ if (fabs(m[j].a[0]) > fabs(pivot_val)) {
589
+ pivot_val = m[j].a[0];
590
+ pivot = j;
591
+ }
592
+
593
+ perm[k] = pivot;
594
+ if (pivot != k) {
595
+ for (j = 0; j < 11; j++) {
596
+ double tmp = m[k].a[j];
597
+ m[k].a[j] = m[pivot].a[j];
598
+ m[pivot].a[j] = tmp;
599
+ }
600
+ }
601
+
602
+ if (fabs(pivot_val) < 1e-12) pivot_val = 1e-12;
603
+ pivot_scale = 1. / pivot_val;
604
+ for (i = k + 1; i < l; i++) {
605
+ double x = m[i].a[0] * pivot_scale;
606
+ m[k].al[i - k - 1] = x;
607
+ for (j = 1; j < 11; j++)
608
+ m[i].a[j - 1] = m[i].a[j] - x * m[k].a[j];
609
+ m[i].a[10] = 0.;
610
+ }
611
+ }
612
+ }
613
+
614
+ static void
615
+ banbks11(const bandmat *m, const int *perm, double *v, int n)
616
+ {
617
+ int i, k, l;
618
+
619
+ /* forward substitution */
620
+ l = 5;
621
+ for (k = 0; k < n; k++) {
622
+ i = perm[k];
623
+ if (i != k) {
624
+ double tmp = v[k];
625
+ v[k] = v[i];
626
+ v[i] = tmp;
627
+ }
628
+ if (l < n) l++;
629
+ for (i = k + 1; i < l; i++)
630
+ v[i] -= m[k].al[i - k - 1] * v[k];
631
+ }
632
+
633
+ /* back substitution */
634
+ l = 1;
635
+ for (i = n - 1; i >= 0; i--) {
636
+ double x = v[i];
637
+ for (k = 1; k < l; k++)
638
+ x -= m[i].a[k] * v[k + i];
639
+ v[i] = x / m[i].a[0];
640
+ if (l < 11) l++;
641
+ }
642
+ }
643
+
644
+ static int compute_jinc(char ty0, char ty1)
645
+ {
646
+ if (ty0 == 'o' || ty1 == 'o' ||
647
+ ty0 == ']' || ty1 == '[')
648
+ return 4;
649
+ else if (ty0 == 'c' && ty1 == 'c')
650
+ return 2;
651
+ else if (((ty0 == '{' || ty0 == 'v' || ty0 == '[') && ty1 == 'c') ||
652
+ (ty0 == 'c' && (ty1 == '}' || ty1 == 'v' || ty1 == ']')))
653
+ return 1;
654
+ else
655
+ return 0;
656
+ }
657
+
658
+ static int count_vec(const spiro_seg *s, int nseg)
659
+ {
660
+ int i;
661
+ int n = 0;
662
+
663
+ for (i = 0; i < nseg; i++)
664
+ n += compute_jinc(s[i].ty, s[i + 1].ty);
665
+ return n;
666
+ }
667
+
668
+ static void
669
+ add_mat_line(bandmat *m, double *v,
670
+ double derivs[4], double x, double y, int j, int jj, int jinc,
671
+ int nmat)
672
+ {
673
+ int k;
674
+
675
+ if (jj >= 0) {
676
+ int joff = (j + 5 - jj + nmat) % nmat;
677
+ if (nmat < 6) {
678
+ joff = j + 5 - jj;
679
+ } else if (nmat == 6) {
680
+ joff = 2 + (j + 3 - jj + nmat) % nmat;
681
+ }
682
+ #ifdef VERBOSE
683
+ printf("add_mat_line j=%d jj=%d jinc=%d nmat=%d joff=%d\n", j, jj, jinc, nmat, joff);
684
+ #endif
685
+ v[jj] += x;
686
+ for (k = 0; k < jinc; k++)
687
+ m[jj].a[joff + k] += y * derivs[k];
688
+ }
689
+ }
690
+
691
+ static double
692
+ spiro_iter(spiro_seg *s, bandmat *m, int *perm, double *v, int n, int nmat)
693
+ {
694
+ int cyclic = s[0].ty != '{' && s[0].ty != 'v';
695
+ int i, j, jj;
696
+ double norm;
697
+ int n_invert;
698
+
699
+ for (i = 0; i < nmat; i++) {
700
+ v[i] = 0.;
701
+ for (j = 0; j < 11; j++)
702
+ m[i].a[j] = 0.;
703
+ for (j = 0; j < 5; j++)
704
+ m[i].al[j] = 0.;
705
+ }
706
+
707
+ j = 0;
708
+ if (s[0].ty == 'o')
709
+ jj = nmat - 2;
710
+ else if (s[0].ty == 'c')
711
+ jj = nmat - 1;
712
+ else
713
+ jj = 0;
714
+ for (i = 0; i < n; i++) {
715
+ char ty0 = s[i].ty;
716
+ char ty1 = s[i + 1].ty;
717
+ int jinc = compute_jinc(ty0, ty1);
718
+ double th = s[i].bend_th;
719
+ double ends[2][4];
720
+ double derivs[4][2][4];
721
+ int jthl = -1, jk0l = -1, jk1l = -1, jk2l = -1;
722
+ int jthr = -1, jk0r = -1, jk1r = -1, jk2r = -1;
723
+
724
+ compute_pderivs(&s[i], ends, derivs, jinc);
725
+
726
+ /* constraints crossing left */
727
+ if (ty0 == 'o' || ty0 == 'c' || ty0 == '[' || ty0 == ']') {
728
+ jthl = jj++;
729
+ jj %= nmat;
730
+ jk0l = jj++;
731
+ }
732
+ if (ty0 == 'o') {
733
+ jj %= nmat;
734
+ jk1l = jj++;
735
+ jk2l = jj++;
736
+ }
737
+
738
+ /* constraints on left */
739
+ if ((ty0 == '[' || ty0 == 'v' || ty0 == '{' || ty0 == 'c') &&
740
+ jinc == 4) {
741
+ if (ty0 != 'c')
742
+ jk1l = jj++;
743
+ jk2l = jj++;
744
+ }
745
+
746
+ /* constraints on right */
747
+ if ((ty1 == ']' || ty1 == 'v' || ty1 == '}' || ty1 == 'c') &&
748
+ jinc == 4) {
749
+ if (ty1 != 'c')
750
+ jk1r = jj++;
751
+ jk2r = jj++;
752
+ }
753
+
754
+ /* constraints crossing right */
755
+ if (ty1 == 'o' || ty1 == 'c' || ty1 == '[' || ty1 == ']') {
756
+ jthr = jj;
757
+ jk0r = (jj + 1) % nmat;
758
+ }
759
+ if (ty1 == 'o') {
760
+ jk1r = (jj + 2) % nmat;
761
+ jk2r = (jj + 3) % nmat;
762
+ }
763
+
764
+ add_mat_line(m, v, derivs[0][0], th - ends[0][0], 1, j, jthl, jinc, nmat);
765
+ add_mat_line(m, v, derivs[1][0], ends[0][1], -1, j, jk0l, jinc, nmat);
766
+ add_mat_line(m, v, derivs[2][0], ends[0][2], -1, j, jk1l, jinc, nmat);
767
+ add_mat_line(m, v, derivs[3][0], ends[0][3], -1, j, jk2l, jinc, nmat);
768
+ add_mat_line(m, v, derivs[0][1], -ends[1][0], 1, j, jthr, jinc, nmat);
769
+ add_mat_line(m, v, derivs[1][1], -ends[1][1], 1, j, jk0r, jinc, nmat);
770
+ add_mat_line(m, v, derivs[2][1], -ends[1][2], 1, j, jk1r, jinc, nmat);
771
+ add_mat_line(m, v, derivs[3][1], -ends[1][3], 1, j, jk2r, jinc, nmat);
772
+ if (jthl >= 0)
773
+ v[jthl] = mod_2pi(v[jthl]);
774
+ if (jthr >= 0)
775
+ v[jthr] = mod_2pi(v[jthr]);
776
+ j += jinc;
777
+ }
778
+ if (cyclic) {
779
+ memcpy(m + nmat, m, sizeof(bandmat) * nmat);
780
+ memcpy(m + 2 * nmat, m, sizeof(bandmat) * nmat);
781
+ memcpy(v + nmat, v, sizeof(double) * nmat);
782
+ memcpy(v + 2 * nmat, v, sizeof(double) * nmat);
783
+ n_invert = 3 * nmat;
784
+ j = nmat;
785
+ } else {
786
+ n_invert = nmat;
787
+ j = 0;
788
+ }
789
+
790
+ #ifdef VERBOSE
791
+ for (i = 0; i < n; i++) {
792
+ int k;
793
+ for (k = 0; k < 11; k++)
794
+ printf(" %2.4f", m[i].a[k]);
795
+ printf(": %2.4f\n", v[i]);
796
+ }
797
+ printf("---\n");
798
+ #endif
799
+ bandec11(m, perm, n_invert);
800
+ banbks11(m, perm, v, n_invert);
801
+ norm = 0.;
802
+ for (i = 0; i < n; i++) {
803
+ int jinc = compute_jinc(s[i].ty, s[i + 1].ty);
804
+ int k;
805
+
806
+ for (k = 0; k < jinc; k++) {
807
+ double dk = v[j++];
808
+
809
+ #ifdef VERBOSE
810
+ printf("s[%d].ks[%d] += %f\n", i, k, dk);
811
+ #endif
812
+ s[i].ks[k] += dk;
813
+ norm += dk * dk;
814
+ }
815
+ s[i].ks[0] = 2.0*mod_2pi(s[i].ks[0]/2.0);
816
+ }
817
+ return norm;
818
+ }
819
+
820
+ static int
821
+ check_finiteness(spiro_seg * segs, int num_segs)
822
+ {
823
+ /* Check if all values are "finite", return true=0, else return fail=-1 */
824
+ int i, j;
825
+ for (i = 0; i < num_segs; ++i)
826
+ for (j = 0; j < 4; ++j)
827
+ if ( IS_FINITE( segs[i].ks[j])==0 ) return -1;
828
+ return 0;
829
+ }
830
+
831
+ static int
832
+ solve_spiro(spiro_seg *s, int nseg)
833
+ {
834
+ int i, converged;
835
+ bandmat *m;
836
+ double *v;
837
+ int *perm;
838
+ int nmat = count_vec(s, nseg);
839
+ int n_alloc = nmat;
840
+ double norm;
841
+
842
+ if (nmat == 0)
843
+ return 1; /* just means no convergence problems */
844
+ if (s[0].ty != '{' && s[0].ty != 'v')
845
+ n_alloc *= 3;
846
+ if (n_alloc < 5)
847
+ n_alloc = 5;
848
+ m = (bandmat *)malloc(sizeof(bandmat) * n_alloc);
849
+ v = (double *)malloc(sizeof(double) * n_alloc);
850
+ perm = (int *)malloc(sizeof(int) * n_alloc);
851
+
852
+ i = converged = 0; /* not solved (yet) */
853
+ if ( m!=NULL && v!=NULL && perm!=NULL ) {
854
+ while (i++ < 60) {
855
+ norm = spiro_iter(s, m, perm, v, nseg, nmat);
856
+ #ifdef VERBOSE
857
+ printf("iteration #%d, %% norm = %g\n", i, norm);
858
+ #endif
859
+ if (check_finiteness(s, nseg)) break;
860
+ if (norm < 1e-12) { converged = 1; break; }
861
+ }
862
+ #ifdef VERBOSE
863
+ if (converged==0)
864
+ fprintf(stderr, "ERROR: LibSpiro: failed to converge after %d attempts to converge.\n", i);
865
+ } else {
866
+ fprintf(stderr, "ERROR: LibSpiro: failed to alloc memory.\n");
867
+ #endif
868
+ }
869
+
870
+ free(m);
871
+ free(v);
872
+ free(perm);
873
+ return converged;
874
+ }
875
+
876
+ static void
877
+ spiro_seg_to_bpath(const double ks[4],
878
+ double x0, double y0, double x1, double y1,
879
+ bezctx *bc, int depth)
880
+ {
881
+ double bend = fabs(ks[0]) + fabs(.5 * ks[1]) + fabs(.125 * ks[2]) +
882
+ fabs((1./48) * ks[3]);
883
+
884
+ if (bend <= 1e-8) {
885
+ bezctx_lineto(bc, x1, y1);
886
+ } else {
887
+ double seg_ch = hypot(x1 - x0, y1 - y0);
888
+ double seg_th = atan2(y1 - y0, x1 - x0);
889
+ double xy[2];
890
+ double ch, th;
891
+ double scale, rot;
892
+ double th_even, th_odd;
893
+ double ul, vl;
894
+ double ur, vr;
895
+
896
+ integrate_spiro(ks, xy, N_IS);
897
+ ch = hypot(xy[0], xy[1]);
898
+ th = atan2(xy[1], xy[0]);
899
+ scale = seg_ch / ch;
900
+ rot = seg_th - th;
901
+ if (depth > 5 || bend < 1.) {
902
+ th_even = (1./384) * ks[3] + (1./8) * ks[1] + rot;
903
+ th_odd = (1./48) * ks[2] + .5 * ks[0];
904
+ ul = (scale * (1./3)) * cos(th_even - th_odd);
905
+ vl = (scale * (1./3)) * sin(th_even - th_odd);
906
+ ur = (scale * (1./3)) * cos(th_even + th_odd);
907
+ vr = (scale * (1./3)) * sin(th_even + th_odd);
908
+ bezctx_curveto(bc, x0 + ul, y0 + vl, x1 - ur, y1 - vr, x1, y1);
909
+ } else {
910
+ /* subdivide */
911
+ double ksub[4];
912
+ double thsub;
913
+ double xysub[2];
914
+ double xmid, ymid;
915
+ double cth, sth;
916
+
917
+ ksub[0] = .5 * ks[0] - .125 * ks[1] + (1./64) * ks[2] - (1./768) * ks[3];
918
+ ksub[1] = .25 * ks[1] - (1./16) * ks[2] + (1./128) * ks[3];
919
+ ksub[2] = .125 * ks[2] - (1./32) * ks[3];
920
+ ksub[3] = (1./16) * ks[3];
921
+ thsub = rot - .25 * ks[0] + (1./32) * ks[1] - (1./384) * ks[2] + (1./6144) * ks[3];
922
+ cth = .5 * scale * cos(thsub);
923
+ sth = .5 * scale * sin(thsub);
924
+ integrate_spiro(ksub, xysub, N_IS);
925
+ xmid = x0 + cth * xysub[0] - sth * xysub[1];
926
+ ymid = y0 + cth * xysub[1] + sth * xysub[0];
927
+ spiro_seg_to_bpath(ksub, x0, y0, xmid, ymid, bc, depth + 1);
928
+ ksub[0] += .25 * ks[1] + (1./384) * ks[3];
929
+ ksub[1] += .125 * ks[2];
930
+ ksub[2] += (1./16) * ks[3];
931
+ spiro_seg_to_bpath(ksub, xmid, ymid, x1, y1, bc, depth + 1);
932
+ }
933
+ }
934
+ }
935
+
936
+ spiro_seg *
937
+ run_spiro(const spiro_cp *src, int n)
938
+ {
939
+ int converged, nseg;
940
+ spiro_seg *s;
941
+
942
+ if (src==NULL || n <= 0) return 0;
943
+
944
+ s = setup_path(src, n);
945
+ if (s) {
946
+ nseg = src[0].ty == '{' ? n - 1 : n;
947
+ converged = 1 ; /* this value is for when nseg == 1; else actual value determined below */
948
+ if (nseg > 1) converged = solve_spiro(s, nseg);
949
+ if (converged) return s;
950
+ free(s);
951
+ }
952
+ return 0;
953
+ }
954
+
955
+ void
956
+ free_spiro(spiro_seg *s)
957
+ {
958
+ if (s) free(s);
959
+ }
960
+
961
+ void
962
+ spiro_to_bpath(const spiro_seg *s, int n, bezctx *bc)
963
+ {
964
+ int i, nsegs;
965
+
966
+ if (s==NULL || n <= 0 || bc==NULL) return;
967
+
968
+ nsegs = s[n - 1].ty == '}' ? n - 1 : n;
969
+
970
+ for (i = 0; i < nsegs; i++) {
971
+ double x0 = s[i].x;
972
+ double y0 = s[i].y;
973
+ double x1 = s[i + 1].x;
974
+ double y1 = s[i + 1].y;
975
+
976
+ if (i == 0)
977
+ bezctx_moveto(bc, x0, y0, s[0].ty == '{');
978
+ bezctx_mark_knot(bc, i);
979
+ spiro_seg_to_bpath(s[i].ks, x0, y0, x1, y1, bc, 0);
980
+ }
981
+ }
982
+
983
+ double
984
+ get_knot_th(const spiro_seg *s, int i)
985
+ {
986
+ double ends[2][4];
987
+
988
+ if (i == 0) {
989
+ compute_ends(s[i].ks, ends, s[i].seg_ch);
990
+ return s[i].seg_th - ends[0][0];
991
+ } else {
992
+ compute_ends(s[i - 1].ks, ends, s[i - 1].seg_ch);
993
+ return s[i - 1].seg_th + ends[1][0];
994
+ }
995
+ }