chaine 3.13.1__cp312-cp312-musllinux_1_2_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of chaine might be problematic. Click here for more details.

Files changed (70) hide show
  1. chaine/__init__.py +2 -0
  2. chaine/_core/crf.cpp +19854 -0
  3. chaine/_core/crf.cpython-312-x86_64-linux-musl.so +0 -0
  4. chaine/_core/crf.pyx +271 -0
  5. chaine/_core/crfsuite/COPYING +27 -0
  6. chaine/_core/crfsuite/README +183 -0
  7. chaine/_core/crfsuite/include/crfsuite.h +1077 -0
  8. chaine/_core/crfsuite/include/crfsuite.hpp +649 -0
  9. chaine/_core/crfsuite/include/crfsuite_api.hpp +406 -0
  10. chaine/_core/crfsuite/include/os.h +65 -0
  11. chaine/_core/crfsuite/lib/cqdb/COPYING +28 -0
  12. chaine/_core/crfsuite/lib/cqdb/include/cqdb.h +518 -0
  13. chaine/_core/crfsuite/lib/cqdb/src/cqdb.c +639 -0
  14. chaine/_core/crfsuite/lib/cqdb/src/lookup3.c +1271 -0
  15. chaine/_core/crfsuite/lib/cqdb/src/main.c +184 -0
  16. chaine/_core/crfsuite/lib/crf/src/crf1d.h +354 -0
  17. chaine/_core/crfsuite/lib/crf/src/crf1d_context.c +788 -0
  18. chaine/_core/crfsuite/lib/crf/src/crf1d_encode.c +1020 -0
  19. chaine/_core/crfsuite/lib/crf/src/crf1d_feature.c +382 -0
  20. chaine/_core/crfsuite/lib/crf/src/crf1d_model.c +1085 -0
  21. chaine/_core/crfsuite/lib/crf/src/crf1d_tag.c +582 -0
  22. chaine/_core/crfsuite/lib/crf/src/crfsuite.c +500 -0
  23. chaine/_core/crfsuite/lib/crf/src/crfsuite_internal.h +233 -0
  24. chaine/_core/crfsuite/lib/crf/src/crfsuite_train.c +302 -0
  25. chaine/_core/crfsuite/lib/crf/src/dataset.c +115 -0
  26. chaine/_core/crfsuite/lib/crf/src/dictionary.c +127 -0
  27. chaine/_core/crfsuite/lib/crf/src/holdout.c +83 -0
  28. chaine/_core/crfsuite/lib/crf/src/json.c +1497 -0
  29. chaine/_core/crfsuite/lib/crf/src/json.h +120 -0
  30. chaine/_core/crfsuite/lib/crf/src/logging.c +85 -0
  31. chaine/_core/crfsuite/lib/crf/src/logging.h +49 -0
  32. chaine/_core/crfsuite/lib/crf/src/params.c +370 -0
  33. chaine/_core/crfsuite/lib/crf/src/params.h +84 -0
  34. chaine/_core/crfsuite/lib/crf/src/quark.c +180 -0
  35. chaine/_core/crfsuite/lib/crf/src/quark.h +46 -0
  36. chaine/_core/crfsuite/lib/crf/src/rumavl.c +1178 -0
  37. chaine/_core/crfsuite/lib/crf/src/rumavl.h +144 -0
  38. chaine/_core/crfsuite/lib/crf/src/train_arow.c +409 -0
  39. chaine/_core/crfsuite/lib/crf/src/train_averaged_perceptron.c +237 -0
  40. chaine/_core/crfsuite/lib/crf/src/train_l2sgd.c +491 -0
  41. chaine/_core/crfsuite/lib/crf/src/train_lbfgs.c +323 -0
  42. chaine/_core/crfsuite/lib/crf/src/train_passive_aggressive.c +442 -0
  43. chaine/_core/crfsuite/lib/crf/src/vecmath.h +360 -0
  44. chaine/_core/crfsuite/swig/crfsuite.cpp +1 -0
  45. chaine/_core/crfsuite_api.pxd +67 -0
  46. chaine/_core/liblbfgs/COPYING +22 -0
  47. chaine/_core/liblbfgs/README +71 -0
  48. chaine/_core/liblbfgs/include/lbfgs.h +745 -0
  49. chaine/_core/liblbfgs/lib/arithmetic_ansi.h +142 -0
  50. chaine/_core/liblbfgs/lib/arithmetic_sse_double.h +303 -0
  51. chaine/_core/liblbfgs/lib/arithmetic_sse_float.h +312 -0
  52. chaine/_core/liblbfgs/lib/lbfgs.c +1531 -0
  53. chaine/_core/tagger_wrapper.hpp +58 -0
  54. chaine/_core/trainer_wrapper.cpp +32 -0
  55. chaine/_core/trainer_wrapper.hpp +26 -0
  56. chaine/crf.py +505 -0
  57. chaine/logging.py +214 -0
  58. chaine/optimization/__init__.py +10 -0
  59. chaine/optimization/metrics.py +129 -0
  60. chaine/optimization/spaces.py +394 -0
  61. chaine/optimization/trial.py +103 -0
  62. chaine/optimization/utils.py +119 -0
  63. chaine/training.py +184 -0
  64. chaine/typing.py +18 -0
  65. chaine/validation.py +43 -0
  66. chaine-3.13.1.dist-info/METADATA +348 -0
  67. chaine-3.13.1.dist-info/RECORD +70 -0
  68. chaine-3.13.1.dist-info/WHEEL +4 -0
  69. chaine.libs/libgcc_s-a0b57c20.so.1 +0 -0
  70. chaine.libs/libstdc++-0d31ccbe.so.6.0.32 +0 -0
@@ -0,0 +1,442 @@
1
+ /*
2
+ * Online training with Passive Aggressive.
3
+ *
4
+ * Copyright (c) 2007-2010, Naoaki Okazaki
5
+ * All rights reserved.
6
+ *
7
+ * Redistribution and use in source and binary forms, with or without
8
+ * modification, are permitted provided that the following conditions are met:
9
+ * * Redistributions of source code must retain the above copyright
10
+ * notice, this list of conditions and the following disclaimer.
11
+ * * Redistributions in binary form must reproduce the above copyright
12
+ * notice, this list of conditions and the following disclaimer in the
13
+ * documentation and/or other materials provided with the distribution.
14
+ * * Neither the names of the authors nor the names of its contributors
15
+ * may be used to endorse or promote products derived from this
16
+ * software without specific prior written permission.
17
+ *
18
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
22
+ * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
23
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
24
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
25
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
26
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
27
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ */
30
+
31
+ /* $Id$ */
32
+
33
+ #ifdef HAVE_CONFIG_H
34
+ #include <config.h>
35
+ #endif /*HAVE_CONFIG_H*/
36
+
37
+ #include <os.h>
38
+
39
+ #include <stdio.h>
40
+ #include <stdlib.h>
41
+ #include <time.h>
42
+
43
+ #include <crfsuite.h>
44
+ #include "crfsuite_internal.h"
45
+ #include "logging.h"
46
+ #include "params.h"
47
+ #include "vecmath.h"
48
+
49
+ #define MIN(a, b) ((a) < (b) ? (a) : (b))
50
+
51
+ /**
52
+ * Training parameters (configurable with crfsuite_params_t interface).
53
+ */
54
+ typedef struct
55
+ {
56
+ int type;
57
+ floatval_t c;
58
+ int error_sensitive;
59
+ int averaging;
60
+ int max_iterations;
61
+ floatval_t epsilon;
62
+ } training_option_t;
63
+
64
+ /**
65
+ * Internal data structure for computing the sparse vector F(x, y) - F(x, y').
66
+ */
67
+ typedef struct
68
+ {
69
+ /* An array of feature indices relevant to the instance. */
70
+ int *actives;
71
+ int num_actives;
72
+ int cap_actives;
73
+ char *used;
74
+
75
+ /* Coefficient for collecting feature weights. */
76
+ floatval_t c;
77
+ /* The difference vector [K]. */
78
+ floatval_t *delta;
79
+ /* The number of features. */
80
+ int K;
81
+ } delta_t;
82
+
83
+ static int delta_init(delta_t *dc, const int K)
84
+ {
85
+ memset(dc, 0, sizeof(*dc));
86
+ dc->used = (char *)calloc(K, sizeof(char));
87
+ dc->delta = (floatval_t *)calloc(K, sizeof(floatval_t));
88
+ dc->K = K;
89
+ if (dc->delta == NULL || dc->used == NULL)
90
+ {
91
+ return 1;
92
+ }
93
+ return 0;
94
+ }
95
+
96
+ static void delta_finish(delta_t *dc)
97
+ {
98
+ free(dc->actives);
99
+ free(dc->used);
100
+ free(dc->delta);
101
+ memset(dc, 0, sizeof(*dc));
102
+ }
103
+
104
+ static void delta_reset(delta_t *dc)
105
+ {
106
+ int i;
107
+ for (i = 0; i < dc->num_actives; ++i)
108
+ {
109
+ int k = dc->actives[i];
110
+ dc->delta[k] = 0;
111
+ }
112
+ dc->num_actives = 0;
113
+ }
114
+
115
+ static void delta_collect(void *instance, int fid, floatval_t value)
116
+ {
117
+ delta_t *dc = (delta_t *)instance;
118
+
119
+ /* Expand the active feature list if necessary. */
120
+ if (dc->cap_actives <= dc->num_actives)
121
+ {
122
+ ++dc->cap_actives;
123
+ dc->cap_actives *= 2;
124
+ dc->actives = (int *)realloc(dc->actives, sizeof(int) * dc->cap_actives);
125
+ }
126
+
127
+ dc->actives[dc->num_actives++] = fid;
128
+ dc->delta[fid] += dc->c * value;
129
+ }
130
+
131
+ static void delta_finalize(delta_t *dc)
132
+ {
133
+ int i, j = 0, k;
134
+
135
+ /* Collapse the duplicated indices. */
136
+ for (i = 0; i < dc->num_actives; ++i)
137
+ {
138
+ k = dc->actives[i];
139
+ if (!dc->used[k])
140
+ {
141
+ dc->actives[j++] = k;
142
+ dc->used[k] = 1;
143
+ }
144
+ }
145
+ dc->num_actives = j; /* This is the distinct number of indices. */
146
+
147
+ /* Reset the used flag. */
148
+ for (i = 0; i < dc->num_actives; ++i)
149
+ {
150
+ k = dc->actives[i];
151
+ dc->used[k] = 0;
152
+ }
153
+ }
154
+
155
+ static floatval_t delta_norm2(delta_t *dc)
156
+ {
157
+ int i;
158
+ floatval_t norm2 = 0.;
159
+
160
+ for (i = 0; i < dc->num_actives; ++i)
161
+ {
162
+ int k = dc->actives[i];
163
+ norm2 += dc->delta[k] * dc->delta[k];
164
+ }
165
+ return norm2;
166
+ }
167
+
168
+ static void delta_add(delta_t *dc, floatval_t *w, floatval_t *ws, const floatval_t tau, const floatval_t u)
169
+ {
170
+ int i;
171
+ const floatval_t tauu = tau * u;
172
+
173
+ for (i = 0; i < dc->num_actives; ++i)
174
+ {
175
+ int k = dc->actives[i];
176
+ w[k] += tau * dc->delta[k];
177
+ ws[k] += tauu * dc->delta[k];
178
+ }
179
+ }
180
+
181
+ static int diff(int *x, int *y, int n)
182
+ {
183
+ int i, d = 0;
184
+ for (i = 0; i < n; ++i)
185
+ {
186
+ if (x[i] != y[i])
187
+ {
188
+ ++d;
189
+ }
190
+ }
191
+ return d;
192
+ }
193
+
194
+ static floatval_t cost_insensitive(floatval_t err, floatval_t d)
195
+ {
196
+ return err + 1.;
197
+ }
198
+
199
+ static floatval_t cost_sensitive(floatval_t err, floatval_t d)
200
+ {
201
+ return err + sqrt(d);
202
+ }
203
+
204
+ static floatval_t tau0(floatval_t cost, floatval_t norm, floatval_t c)
205
+ {
206
+ return cost / norm;
207
+ }
208
+
209
+ static floatval_t tau1(floatval_t cost, floatval_t norm, floatval_t c)
210
+ {
211
+ return MIN(c, cost / norm);
212
+ }
213
+
214
+ static floatval_t tau2(floatval_t cost, floatval_t norm, floatval_t c)
215
+ {
216
+ return cost / (norm + 0.5 / c);
217
+ }
218
+
219
+ static int exchange_options(crfsuite_params_t *params, training_option_t *opt, int mode)
220
+ {
221
+ BEGIN_PARAM_MAP(params, mode)
222
+ DDX_PARAM_INT(
223
+ "type", opt->type, 1,
224
+ "The strategy for updating feature weights: {\n"
225
+ " 0: PA without slack variables,\n"
226
+ " 1: PA type I,\n"
227
+ " 2: PA type II\n"
228
+ "}.\n")
229
+ DDX_PARAM_FLOAT(
230
+ "c", opt->c, 1.,
231
+ "The aggressiveness parameter.")
232
+ DDX_PARAM_INT(
233
+ "error_sensitive", opt->error_sensitive, 1,
234
+ "Consider the number of incorrect labels to the cost function.")
235
+ DDX_PARAM_INT(
236
+ "averaging", opt->averaging, 1,
237
+ "Compute the average of feature weights (similarly to Averaged Perceptron).")
238
+ DDX_PARAM_INT(
239
+ "max_iterations", opt->max_iterations, 100,
240
+ "The maximum number of iterations.")
241
+ DDX_PARAM_FLOAT(
242
+ "epsilon", opt->epsilon, 0.,
243
+ "The stopping criterion (the mean loss).")
244
+ END_PARAM_MAP()
245
+
246
+ return 0;
247
+ }
248
+
249
+ void crfsuite_train_passive_aggressive_init(crfsuite_params_t *params)
250
+ {
251
+ exchange_options(params, NULL, 0);
252
+ }
253
+
254
+ int crfsuite_train_passive_aggressive(
255
+ encoder_t *gm,
256
+ dataset_t *trainset,
257
+ dataset_t *testset,
258
+ crfsuite_params_t *params,
259
+ logging_t *lg,
260
+ floatval_t **ptr_w)
261
+ {
262
+ int n, i, u, ret = 0;
263
+ int *viterbi = NULL;
264
+ floatval_t *w = NULL, *ws = NULL, *wa = NULL;
265
+ const int N = trainset->num_instances;
266
+ const int K = gm->num_features;
267
+ const int T = gm->cap_items;
268
+ training_option_t opt;
269
+ delta_t dc;
270
+ clock_t begin = clock();
271
+ floatval_t (*cost_function)(floatval_t err, floatval_t d) = NULL;
272
+ floatval_t (*tau_function)(floatval_t cost, floatval_t norm, floatval_t c) = NULL;
273
+
274
+ /* Initialize the variable. */
275
+ if (delta_init(&dc, K) != 0)
276
+ {
277
+ ret = CRFSUITEERR_OUTOFMEMORY;
278
+ goto error_exit;
279
+ }
280
+
281
+ /* Obtain parameter values. */
282
+ exchange_options(params, &opt, -1);
283
+
284
+ /* Allocate arrays. */
285
+ w = (floatval_t *)calloc(sizeof(floatval_t), K);
286
+ ws = (floatval_t *)calloc(sizeof(floatval_t), K);
287
+ wa = (floatval_t *)calloc(sizeof(floatval_t), K);
288
+ viterbi = (int *)calloc(sizeof(int), T);
289
+ if (w == NULL || ws == NULL || wa == NULL || viterbi == NULL)
290
+ {
291
+ ret = CRFSUITEERR_OUTOFMEMORY;
292
+ goto error_exit;
293
+ }
294
+
295
+ /* Set the cost function for instances. */
296
+ if (opt.error_sensitive)
297
+ {
298
+ cost_function = cost_sensitive;
299
+ }
300
+ else
301
+ {
302
+ cost_function = cost_insensitive;
303
+ }
304
+
305
+ /* Set the routine for computing tau (i.e., PA, PA-I, PA-II). */
306
+ if (opt.type == 1)
307
+ {
308
+ tau_function = tau1;
309
+ }
310
+ else if (opt.type == 2)
311
+ {
312
+ tau_function = tau2;
313
+ }
314
+ else
315
+ {
316
+ tau_function = tau0;
317
+ }
318
+
319
+ /* Show the parameters. */
320
+ logging(lg, "Start training with PA");
321
+
322
+ u = 1;
323
+
324
+ /* Loop for epoch. */
325
+ for (i = 0; i < opt.max_iterations; ++i)
326
+ {
327
+ floatval_t norm = 0., sum_loss = 0.;
328
+ clock_t iteration_begin = clock();
329
+
330
+ /* Shuffle the instances. */
331
+ dataset_shuffle(trainset);
332
+
333
+ /* Loop for each instance. */
334
+ for (n = 0; n < N; ++n)
335
+ {
336
+ int d = 0;
337
+ floatval_t sv;
338
+ const crfsuite_instance_t *inst = dataset_get(trainset, n);
339
+
340
+ /* Set the feature weights to the encoder. */
341
+ gm->set_weights(gm, w, 1.);
342
+ gm->set_instance(gm, inst);
343
+
344
+ /* Tag the sequence with the current model. */
345
+ gm->viterbi(gm, viterbi, &sv);
346
+
347
+ /* Compute the number of different labels. */
348
+ d = diff(inst->labels, viterbi, inst->num_items);
349
+ if (0 < d)
350
+ {
351
+ floatval_t sc, norm2;
352
+ floatval_t tau, cost;
353
+
354
+ /*
355
+ Compute the cost of this instance.
356
+ */
357
+ gm->score(gm, inst->labels, &sc);
358
+ cost = cost_function(sv - sc, (double)d);
359
+
360
+ /* Initialize delta[k] = 0. */
361
+ delta_reset(&dc);
362
+
363
+ /*
364
+ For every feature k on the correct path:
365
+ delta[k] += 1;
366
+ */
367
+ dc.c = 1;
368
+ gm->features_on_path(gm, inst, inst->labels, delta_collect, &dc);
369
+
370
+ /*
371
+ For every feature k on the Viterbi path:
372
+ delta[k] -= 1;
373
+ */
374
+ dc.c = -1;
375
+ gm->features_on_path(gm, inst, viterbi, delta_collect, &dc);
376
+
377
+ delta_finalize(&dc);
378
+
379
+ /*
380
+ Compute tau (dpending on PA, PA-I, and PA-II).
381
+ */
382
+ norm2 = delta_norm2(&dc);
383
+ tau = tau_function(cost, norm2, opt.c);
384
+
385
+ /*
386
+ Update the feature weights:
387
+ w[k] += tau * delta[k]
388
+ ws[k] += tau * u * delta[k]
389
+ */
390
+ delta_add(&dc, w, ws, tau * inst->weight, u);
391
+
392
+ sum_loss += cost * inst->weight;
393
+ }
394
+ ++u;
395
+ }
396
+
397
+ if (opt.averaging)
398
+ {
399
+ /* Perform averaging to wa. */
400
+ veccopy(wa, w, K);
401
+ vecasub(wa, 1. / u, ws, K);
402
+ }
403
+ else
404
+ {
405
+ /* Simply copy the weights to wa. */
406
+ veccopy(wa, w, K);
407
+ }
408
+
409
+ /* Output the progress. */
410
+ logging(lg, "Iteration %d, training loss: %f", i + 1, sum_loss);
411
+
412
+ /* Holdout evaluation if necessary. */
413
+ if (testset != NULL)
414
+ {
415
+ holdout_evaluation(gm, testset, wa, lg);
416
+ }
417
+
418
+ /* Convergence test. */
419
+ if (sum_loss / N < opt.epsilon)
420
+ {
421
+ logging(lg, "Loss has converged, terminating training");
422
+ break;
423
+ }
424
+ }
425
+
426
+ free(viterbi);
427
+ free(ws);
428
+ free(w);
429
+ *ptr_w = wa;
430
+ delta_finish(&dc);
431
+ return ret;
432
+
433
+ error_exit:
434
+ free(viterbi);
435
+ free(wa);
436
+ free(ws);
437
+ free(w);
438
+ *ptr_w = NULL;
439
+ delta_finish(&dc);
440
+
441
+ return ret;
442
+ }