@elaraai/east-py-datascience 0.0.2-beta.1 → 0.0.2-beta.11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -10,44 +10,58 @@
10
10
  *
11
11
  * @packageDocumentation
12
12
  */
13
- import { StructType, VariantType, OptionType, IntegerType, FloatType, BlobType, ArrayType } from "@elaraai/east";
13
+ import { StructType, VariantType, OptionType, IntegerType, FloatType, BlobType, ArrayType, NullType } from "@elaraai/east";
14
14
  export { VectorType, MatrixType } from "../types.js";
15
15
  /**
16
- * Activation function type for neural networks.
16
+ * Activation function type for hidden layers.
17
17
  */
18
18
  export declare const TorchActivationType: VariantType<{
19
19
  /** Rectified Linear Unit */
20
- relu: StructType<{}>;
20
+ relu: NullType;
21
21
  /** Hyperbolic tangent */
22
- tanh: StructType<{}>;
22
+ tanh: NullType;
23
23
  /** Sigmoid function */
24
- sigmoid: StructType<{}>;
24
+ sigmoid: NullType;
25
25
  /** Leaky ReLU */
26
- leaky_relu: StructType<{}>;
26
+ leaky_relu: NullType;
27
27
  }>;
28
28
  /**
29
29
  * Loss function type for training.
30
30
  */
31
31
  export declare const TorchLossType: VariantType<{
32
32
  /** Mean Squared Error (regression) */
33
- mse: StructType<{}>;
33
+ mse: NullType;
34
34
  /** Mean Absolute Error (regression) */
35
- mae: StructType<{}>;
35
+ mae: NullType;
36
36
  /** Cross Entropy (classification) */
37
- cross_entropy: StructType<{}>;
37
+ cross_entropy: NullType;
38
+ /** KL Divergence (distribution matching, use with softmax output) */
39
+ kl_div: NullType;
38
40
  }>;
39
41
  /**
40
42
  * Optimizer type for training.
41
43
  */
42
44
  export declare const TorchOptimizerType: VariantType<{
43
45
  /** Adam optimizer */
44
- adam: StructType<{}>;
46
+ adam: NullType;
45
47
  /** Stochastic Gradient Descent */
46
- sgd: StructType<{}>;
48
+ sgd: NullType;
47
49
  /** AdamW with weight decay */
48
- adamw: StructType<{}>;
50
+ adamw: NullType;
49
51
  /** RMSprop optimizer */
50
- rmsprop: StructType<{}>;
52
+ rmsprop: NullType;
53
+ }>;
54
+ /**
55
+ * Output activation function type for the final layer.
56
+ * Applied only to the output layer, not hidden layers.
57
+ */
58
+ export declare const TorchOutputActivationType: VariantType<{
59
+ /** No activation (linear output) - default */
60
+ none: NullType;
61
+ /** Softmax (outputs sum to 1, for probability distributions) */
62
+ softmax: NullType;
63
+ /** Sigmoid (each output independently in [0,1]) */
64
+ sigmoid: NullType;
51
65
  }>;
52
66
  /**
53
67
  * Configuration for MLP architecture.
@@ -55,16 +69,25 @@ export declare const TorchOptimizerType: VariantType<{
55
69
  export declare const TorchMLPConfigType: StructType<{
56
70
  /** Hidden layer sizes, e.g., [64, 32] */
57
71
  hidden_layers: ArrayType<IntegerType>;
58
- /** Activation function (default relu) */
72
+ /** Activation function for hidden layers (default relu) */
59
73
  activation: OptionType<VariantType<{
60
74
  /** Rectified Linear Unit */
61
- relu: StructType<{}>;
75
+ relu: NullType;
62
76
  /** Hyperbolic tangent */
63
- tanh: StructType<{}>;
77
+ tanh: NullType;
64
78
  /** Sigmoid function */
65
- sigmoid: StructType<{}>;
79
+ sigmoid: NullType;
66
80
  /** Leaky ReLU */
67
- leaky_relu: StructType<{}>;
81
+ leaky_relu: NullType;
82
+ }>>;
83
+ /** Output activation function (default none/linear) */
84
+ output_activation: OptionType<VariantType<{
85
+ /** No activation (linear output) - default */
86
+ none: NullType;
87
+ /** Softmax (outputs sum to 1, for probability distributions) */
88
+ softmax: NullType;
89
+ /** Sigmoid (each output independently in [0,1]) */
90
+ sigmoid: NullType;
68
91
  }>>;
69
92
  /** Dropout rate (default 0.0) */
70
93
  dropout: OptionType<FloatType>;
@@ -84,22 +107,24 @@ export declare const TorchTrainConfigType: StructType<{
84
107
  /** Loss function (default mse) */
85
108
  loss: OptionType<VariantType<{
86
109
  /** Mean Squared Error (regression) */
87
- mse: StructType<{}>;
110
+ mse: NullType;
88
111
  /** Mean Absolute Error (regression) */
89
- mae: StructType<{}>;
112
+ mae: NullType;
90
113
  /** Cross Entropy (classification) */
91
- cross_entropy: StructType<{}>;
114
+ cross_entropy: NullType;
115
+ /** KL Divergence (distribution matching, use with softmax output) */
116
+ kl_div: NullType;
92
117
  }>>;
93
118
  /** Optimizer (default adam) */
94
119
  optimizer: OptionType<VariantType<{
95
120
  /** Adam optimizer */
96
- adam: StructType<{}>;
121
+ adam: NullType;
97
122
  /** Stochastic Gradient Descent */
98
- sgd: StructType<{}>;
123
+ sgd: NullType;
99
124
  /** AdamW with weight decay */
100
- adamw: StructType<{}>;
125
+ adamw: NullType;
101
126
  /** RMSprop optimizer */
102
- rmsprop: StructType<{}>;
127
+ rmsprop: NullType;
103
128
  }>>;
104
129
  /** Early stopping patience, 0 = disabled */
105
130
  early_stopping: OptionType<IntegerType>;
@@ -170,16 +195,25 @@ export declare const TorchModelBlobType: VariantType<{
170
195
  export declare const torch_mlp_train: import("@elaraai/east").PlatformDefinition<[ArrayType<ArrayType<FloatType>>, ArrayType<FloatType>, StructType<{
171
196
  /** Hidden layer sizes, e.g., [64, 32] */
172
197
  hidden_layers: ArrayType<IntegerType>;
173
- /** Activation function (default relu) */
198
+ /** Activation function for hidden layers (default relu) */
174
199
  activation: OptionType<VariantType<{
175
200
  /** Rectified Linear Unit */
176
- relu: StructType<{}>;
201
+ relu: NullType;
177
202
  /** Hyperbolic tangent */
178
- tanh: StructType<{}>;
203
+ tanh: NullType;
179
204
  /** Sigmoid function */
180
- sigmoid: StructType<{}>;
205
+ sigmoid: NullType;
181
206
  /** Leaky ReLU */
182
- leaky_relu: StructType<{}>;
207
+ leaky_relu: NullType;
208
+ }>>;
209
+ /** Output activation function (default none/linear) */
210
+ output_activation: OptionType<VariantType<{
211
+ /** No activation (linear output) - default */
212
+ none: NullType;
213
+ /** Softmax (outputs sum to 1, for probability distributions) */
214
+ softmax: NullType;
215
+ /** Sigmoid (each output independently in [0,1]) */
216
+ sigmoid: NullType;
183
217
  }>>;
184
218
  /** Dropout rate (default 0.0) */
185
219
  dropout: OptionType<FloatType>;
@@ -195,22 +229,24 @@ export declare const torch_mlp_train: import("@elaraai/east").PlatformDefinition
195
229
  /** Loss function (default mse) */
196
230
  loss: OptionType<VariantType<{
197
231
  /** Mean Squared Error (regression) */
198
- mse: StructType<{}>;
232
+ mse: NullType;
199
233
  /** Mean Absolute Error (regression) */
200
- mae: StructType<{}>;
234
+ mae: NullType;
201
235
  /** Cross Entropy (classification) */
202
- cross_entropy: StructType<{}>;
236
+ cross_entropy: NullType;
237
+ /** KL Divergence (distribution matching, use with softmax output) */
238
+ kl_div: NullType;
203
239
  }>>;
204
240
  /** Optimizer (default adam) */
205
241
  optimizer: OptionType<VariantType<{
206
242
  /** Adam optimizer */
207
- adam: StructType<{}>;
243
+ adam: NullType;
208
244
  /** Stochastic Gradient Descent */
209
- sgd: StructType<{}>;
245
+ sgd: NullType;
210
246
  /** AdamW with weight decay */
211
- adamw: StructType<{}>;
247
+ adamw: NullType;
212
248
  /** RMSprop optimizer */
213
- rmsprop: StructType<{}>;
249
+ rmsprop: NullType;
214
250
  }>>;
215
251
  /** Early stopping patience, 0 = disabled */
216
252
  early_stopping: OptionType<IntegerType>;
@@ -258,6 +294,213 @@ export declare const torch_mlp_predict: import("@elaraai/east").PlatformDefiniti
258
294
  output_dim: IntegerType;
259
295
  }>;
260
296
  }>, ArrayType<ArrayType<FloatType>>], ArrayType<FloatType>>;
297
+ /**
298
+ * Train a PyTorch MLP model with multi-output support.
299
+ *
300
+ * Supports multi-output regression (predicting multiple values per sample)
301
+ * and autoencoders (where input equals target for reconstruction learning).
302
+ * Output dimension is inferred from y.shape[1] unless overridden in config.
303
+ *
304
+ * @param X - Feature matrix (n_samples x n_features)
305
+ * @param y - Target matrix (n_samples x n_outputs)
306
+ * @param mlp_config - MLP architecture configuration
307
+ * @param train_config - Training configuration
308
+ * @returns Model blob and training result
309
+ */
310
+ export declare const torch_mlp_train_multi: import("@elaraai/east").PlatformDefinition<[ArrayType<ArrayType<FloatType>>, ArrayType<ArrayType<FloatType>>, StructType<{
311
+ /** Hidden layer sizes, e.g., [64, 32] */
312
+ hidden_layers: ArrayType<IntegerType>;
313
+ /** Activation function for hidden layers (default relu) */
314
+ activation: OptionType<VariantType<{
315
+ /** Rectified Linear Unit */
316
+ relu: NullType;
317
+ /** Hyperbolic tangent */
318
+ tanh: NullType;
319
+ /** Sigmoid function */
320
+ sigmoid: NullType;
321
+ /** Leaky ReLU */
322
+ leaky_relu: NullType;
323
+ }>>;
324
+ /** Output activation function (default none/linear) */
325
+ output_activation: OptionType<VariantType<{
326
+ /** No activation (linear output) - default */
327
+ none: NullType;
328
+ /** Softmax (outputs sum to 1, for probability distributions) */
329
+ softmax: NullType;
330
+ /** Sigmoid (each output independently in [0,1]) */
331
+ sigmoid: NullType;
332
+ }>>;
333
+ /** Dropout rate (default 0.0) */
334
+ dropout: OptionType<FloatType>;
335
+ /** Output dimension (default 1) */
336
+ output_dim: OptionType<IntegerType>;
337
+ }>, StructType<{
338
+ /** Number of epochs (default 100) */
339
+ epochs: OptionType<IntegerType>;
340
+ /** Batch size (default 32) */
341
+ batch_size: OptionType<IntegerType>;
342
+ /** Learning rate (default 0.001) */
343
+ learning_rate: OptionType<FloatType>;
344
+ /** Loss function (default mse) */
345
+ loss: OptionType<VariantType<{
346
+ /** Mean Squared Error (regression) */
347
+ mse: NullType;
348
+ /** Mean Absolute Error (regression) */
349
+ mae: NullType;
350
+ /** Cross Entropy (classification) */
351
+ cross_entropy: NullType;
352
+ /** KL Divergence (distribution matching, use with softmax output) */
353
+ kl_div: NullType;
354
+ }>>;
355
+ /** Optimizer (default adam) */
356
+ optimizer: OptionType<VariantType<{
357
+ /** Adam optimizer */
358
+ adam: NullType;
359
+ /** Stochastic Gradient Descent */
360
+ sgd: NullType;
361
+ /** AdamW with weight decay */
362
+ adamw: NullType;
363
+ /** RMSprop optimizer */
364
+ rmsprop: NullType;
365
+ }>>;
366
+ /** Early stopping patience, 0 = disabled */
367
+ early_stopping: OptionType<IntegerType>;
368
+ /** Validation split fraction (default 0.2) */
369
+ validation_split: OptionType<FloatType>;
370
+ /** Random seed for reproducibility */
371
+ random_state: OptionType<IntegerType>;
372
+ }>], StructType<{
373
+ /** Trained model blob */
374
+ model: VariantType<{
375
+ torch_mlp: StructType<{
376
+ data: BlobType;
377
+ n_features: IntegerType;
378
+ hidden_layers: ArrayType<IntegerType>;
379
+ output_dim: IntegerType;
380
+ }>;
381
+ }>;
382
+ /** Training result with losses */
383
+ result: StructType<{
384
+ /** Training loss per epoch */
385
+ train_losses: ArrayType<FloatType>;
386
+ /** Validation loss per epoch */
387
+ val_losses: ArrayType<FloatType>;
388
+ /** Best epoch (for early stopping) */
389
+ best_epoch: IntegerType;
390
+ }>;
391
+ }>>;
392
+ /**
393
+ * Make predictions with a trained PyTorch MLP (multi-output).
394
+ *
395
+ * Returns a matrix where each row contains the predicted outputs for a sample.
396
+ *
397
+ * @param model - Trained MLP model blob
398
+ * @param X - Feature matrix (n_samples x n_features)
399
+ * @returns Predicted matrix (n_samples x n_outputs)
400
+ */
401
+ export declare const torch_mlp_predict_multi: import("@elaraai/east").PlatformDefinition<[VariantType<{
402
+ /** PyTorch MLP model */
403
+ torch_mlp: StructType<{
404
+ /** Cloudpickle serialized model */
405
+ data: BlobType;
406
+ /** Number of input features */
407
+ n_features: IntegerType;
408
+ /** Hidden layer sizes */
409
+ hidden_layers: ArrayType<IntegerType>;
410
+ /** Output dimension */
411
+ output_dim: IntegerType;
412
+ }>;
413
+ }>, ArrayType<ArrayType<FloatType>>], ArrayType<ArrayType<FloatType>>>;
414
+ /**
415
+ * Extract intermediate layer activations (embeddings) from a trained MLP.
416
+ *
417
+ * For autoencoders, this allows extracting the bottleneck representation.
418
+ * The layer_index specifies which hidden layer's output to return (0-indexed).
419
+ *
420
+ * For an autoencoder with architecture [input -> 8 -> 2 -> 8 -> output]
421
+ * (hidden_layers: [8, 2, 8]):
422
+ * - layer_index=0: output after first hidden layer (8 features)
423
+ * - layer_index=1: output after second hidden layer (2 features) <- bottleneck
424
+ * - layer_index=2: output after third hidden layer (8 features)
425
+ *
426
+ * @param model - Trained MLP model blob
427
+ * @param X - Feature matrix (n_samples x n_features)
428
+ * @param layer_index - Which hidden layer's output to return (0-indexed)
429
+ * @returns Embedding matrix (n_samples x hidden_dim at that layer)
430
+ *
431
+ * @example
432
+ * ```ts
433
+ * // Train autoencoder: 4 features -> 8 -> 2 (bottleneck) -> 8 -> 4 features
434
+ * const mlp_config = $.let({
435
+ * hidden_layers: [8n, 2n, 8n],
436
+ * activation: variant('some', variant('relu', {})),
437
+ * dropout: variant('none', null),
438
+ * output_dim: variant('none', null),
439
+ * });
440
+ * const output = $.let(Torch.mlpTrainMulti(X, X, mlp_config, train_config));
441
+ *
442
+ * // Extract bottleneck embeddings (layer_index=1 for the 2-dim bottleneck)
443
+ * const embeddings = $.let(Torch.mlpEncode(output.model, X, 1n));
444
+ * // embeddings is now (n_samples x 2)
445
+ * ```
446
+ */
447
+ export declare const torch_mlp_encode: import("@elaraai/east").PlatformDefinition<[VariantType<{
448
+ /** PyTorch MLP model */
449
+ torch_mlp: StructType<{
450
+ /** Cloudpickle serialized model */
451
+ data: BlobType;
452
+ /** Number of input features */
453
+ n_features: IntegerType;
454
+ /** Hidden layer sizes */
455
+ hidden_layers: ArrayType<IntegerType>;
456
+ /** Output dimension */
457
+ output_dim: IntegerType;
458
+ }>;
459
+ }>, ArrayType<ArrayType<FloatType>>, IntegerType], ArrayType<ArrayType<FloatType>>>;
460
+ /**
461
+ * Decode embeddings back through the decoder portion of an MLP.
462
+ *
463
+ * For autoencoders, this takes bottleneck activations and runs them through
464
+ * the decoder to reconstruct the output. This is the complement to mlpEncode.
465
+ *
466
+ * For an autoencoder with architecture [input -> 8 -> 2 -> 8 -> output]
467
+ * (hidden_layers: [8, 2, 8]):
468
+ * - layer_index=1: Start from the 2-dim bottleneck, run through layers 2+ to output
469
+ * - layer_index=0: Start from the 8-dim first layer, run through layers 1+ to output
470
+ *
471
+ * Use case: Compute weighted average of origin embeddings, then decode to
472
+ * get the reconstructed blend weight distribution.
473
+ *
474
+ * @param model - Trained MLP model blob
475
+ * @param embeddings - Embedding matrix (n_samples x hidden_dim at layer_index)
476
+ * @param layer_index - Which hidden layer the embeddings come from (0-indexed)
477
+ * @returns Decoded output matrix (n_samples x output_dim)
478
+ *
479
+ * @example
480
+ * ```ts
481
+ * // After training autoencoder and extracting embeddings...
482
+ * const origin_embeddings = $.let(Torch.mlpEncode(output.model, X_onehot, 1n));
483
+ *
484
+ * // Compute weighted blend embedding (e.g., 50% origin A + 50% origin B)
485
+ * const blend_embedding = $.let(...); // weighted average of origin embeddings
486
+ *
487
+ * // Decode back to weight distribution
488
+ * const reconstructed = $.let(Torch.mlpDecode(output.model, blend_embedding, 1n));
489
+ * ```
490
+ */
491
+ export declare const torch_mlp_decode: import("@elaraai/east").PlatformDefinition<[VariantType<{
492
+ /** PyTorch MLP model */
493
+ torch_mlp: StructType<{
494
+ /** Cloudpickle serialized model */
495
+ data: BlobType;
496
+ /** Number of input features */
497
+ n_features: IntegerType;
498
+ /** Hidden layer sizes */
499
+ hidden_layers: ArrayType<IntegerType>;
500
+ /** Output dimension */
501
+ output_dim: IntegerType;
502
+ }>;
503
+ }>, ArrayType<ArrayType<FloatType>>, IntegerType], ArrayType<ArrayType<FloatType>>>;
261
504
  /**
262
505
  * Type definitions for PyTorch functions.
263
506
  */
@@ -266,51 +509,71 @@ export declare const TorchTypes: {
266
509
  readonly VectorType: ArrayType<FloatType>;
267
510
  /** Matrix type (2D array of floats) */
268
511
  readonly MatrixType: ArrayType<ArrayType<FloatType>>;
269
- /** Activation function type */
512
+ /** Activation function type for hidden layers */
270
513
  readonly TorchActivationType: VariantType<{
271
514
  /** Rectified Linear Unit */
272
- relu: StructType<{}>;
515
+ relu: NullType;
273
516
  /** Hyperbolic tangent */
274
- tanh: StructType<{}>;
517
+ tanh: NullType;
275
518
  /** Sigmoid function */
276
- sigmoid: StructType<{}>;
519
+ sigmoid: NullType;
277
520
  /** Leaky ReLU */
278
- leaky_relu: StructType<{}>;
521
+ leaky_relu: NullType;
522
+ }>;
523
+ /** Output activation function type */
524
+ readonly TorchOutputActivationType: VariantType<{
525
+ /** No activation (linear output) - default */
526
+ none: NullType;
527
+ /** Softmax (outputs sum to 1, for probability distributions) */
528
+ softmax: NullType;
529
+ /** Sigmoid (each output independently in [0,1]) */
530
+ sigmoid: NullType;
279
531
  }>;
280
532
  /** Loss function type */
281
533
  readonly TorchLossType: VariantType<{
282
534
  /** Mean Squared Error (regression) */
283
- mse: StructType<{}>;
535
+ mse: NullType;
284
536
  /** Mean Absolute Error (regression) */
285
- mae: StructType<{}>;
537
+ mae: NullType;
286
538
  /** Cross Entropy (classification) */
287
- cross_entropy: StructType<{}>;
539
+ cross_entropy: NullType;
540
+ /** KL Divergence (distribution matching, use with softmax output) */
541
+ kl_div: NullType;
288
542
  }>;
289
543
  /** Optimizer type */
290
544
  readonly TorchOptimizerType: VariantType<{
291
545
  /** Adam optimizer */
292
- adam: StructType<{}>;
546
+ adam: NullType;
293
547
  /** Stochastic Gradient Descent */
294
- sgd: StructType<{}>;
548
+ sgd: NullType;
295
549
  /** AdamW with weight decay */
296
- adamw: StructType<{}>;
550
+ adamw: NullType;
297
551
  /** RMSprop optimizer */
298
- rmsprop: StructType<{}>;
552
+ rmsprop: NullType;
299
553
  }>;
300
554
  /** MLP configuration type */
301
555
  readonly TorchMLPConfigType: StructType<{
302
556
  /** Hidden layer sizes, e.g., [64, 32] */
303
557
  hidden_layers: ArrayType<IntegerType>;
304
- /** Activation function (default relu) */
558
+ /** Activation function for hidden layers (default relu) */
305
559
  activation: OptionType<VariantType<{
306
560
  /** Rectified Linear Unit */
307
- relu: StructType<{}>;
561
+ relu: NullType;
308
562
  /** Hyperbolic tangent */
309
- tanh: StructType<{}>;
563
+ tanh: NullType;
310
564
  /** Sigmoid function */
311
- sigmoid: StructType<{}>;
565
+ sigmoid: NullType;
312
566
  /** Leaky ReLU */
313
- leaky_relu: StructType<{}>;
567
+ leaky_relu: NullType;
568
+ }>>;
569
+ /** Output activation function (default none/linear) */
570
+ output_activation: OptionType<VariantType<{
571
+ /** No activation (linear output) - default */
572
+ none: NullType;
573
+ /** Softmax (outputs sum to 1, for probability distributions) */
574
+ softmax: NullType;
575
+ /** Sigmoid (each output independently in [0,1]) */
576
+ sigmoid: NullType;
314
577
  }>>;
315
578
  /** Dropout rate (default 0.0) */
316
579
  dropout: OptionType<FloatType>;
@@ -328,22 +591,24 @@ export declare const TorchTypes: {
328
591
  /** Loss function (default mse) */
329
592
  loss: OptionType<VariantType<{
330
593
  /** Mean Squared Error (regression) */
331
- mse: StructType<{}>;
594
+ mse: NullType;
332
595
  /** Mean Absolute Error (regression) */
333
- mae: StructType<{}>;
596
+ mae: NullType;
334
597
  /** Cross Entropy (classification) */
335
- cross_entropy: StructType<{}>;
598
+ cross_entropy: NullType;
599
+ /** KL Divergence (distribution matching, use with softmax output) */
600
+ kl_div: NullType;
336
601
  }>>;
337
602
  /** Optimizer (default adam) */
338
603
  optimizer: OptionType<VariantType<{
339
604
  /** Adam optimizer */
340
- adam: StructType<{}>;
605
+ adam: NullType;
341
606
  /** Stochastic Gradient Descent */
342
- sgd: StructType<{}>;
607
+ sgd: NullType;
343
608
  /** AdamW with weight decay */
344
- adamw: StructType<{}>;
609
+ adamw: NullType;
345
610
  /** RMSprop optimizer */
346
- rmsprop: StructType<{}>;
611
+ rmsprop: NullType;
347
612
  }>>;
348
613
  /** Early stopping patience, 0 = disabled */
349
614
  early_stopping: OptionType<IntegerType>;
@@ -431,20 +696,29 @@ export declare const TorchTypes: {
431
696
  * ```
432
697
  */
433
698
  export declare const Torch: {
434
- /** Train MLP model */
699
+ /** Train MLP model (single output) */
435
700
  readonly mlpTrain: import("@elaraai/east").PlatformDefinition<[ArrayType<ArrayType<FloatType>>, ArrayType<FloatType>, StructType<{
436
701
  /** Hidden layer sizes, e.g., [64, 32] */
437
702
  hidden_layers: ArrayType<IntegerType>;
438
- /** Activation function (default relu) */
703
+ /** Activation function for hidden layers (default relu) */
439
704
  activation: OptionType<VariantType<{
440
705
  /** Rectified Linear Unit */
441
- relu: StructType<{}>;
706
+ relu: NullType;
442
707
  /** Hyperbolic tangent */
443
- tanh: StructType<{}>;
708
+ tanh: NullType;
444
709
  /** Sigmoid function */
445
- sigmoid: StructType<{}>;
710
+ sigmoid: NullType;
446
711
  /** Leaky ReLU */
447
- leaky_relu: StructType<{}>;
712
+ leaky_relu: NullType;
713
+ }>>;
714
+ /** Output activation function (default none/linear) */
715
+ output_activation: OptionType<VariantType<{
716
+ /** No activation (linear output) - default */
717
+ none: NullType;
718
+ /** Softmax (outputs sum to 1, for probability distributions) */
719
+ softmax: NullType;
720
+ /** Sigmoid (each output independently in [0,1]) */
721
+ sigmoid: NullType;
448
722
  }>>;
449
723
  /** Dropout rate (default 0.0) */
450
724
  dropout: OptionType<FloatType>;
@@ -460,22 +734,24 @@ export declare const Torch: {
460
734
  /** Loss function (default mse) */
461
735
  loss: OptionType<VariantType<{
462
736
  /** Mean Squared Error (regression) */
463
- mse: StructType<{}>;
737
+ mse: NullType;
464
738
  /** Mean Absolute Error (regression) */
465
- mae: StructType<{}>;
739
+ mae: NullType;
466
740
  /** Cross Entropy (classification) */
467
- cross_entropy: StructType<{}>;
741
+ cross_entropy: NullType;
742
+ /** KL Divergence (distribution matching, use with softmax output) */
743
+ kl_div: NullType;
468
744
  }>>;
469
745
  /** Optimizer (default adam) */
470
746
  optimizer: OptionType<VariantType<{
471
747
  /** Adam optimizer */
472
- adam: StructType<{}>;
748
+ adam: NullType;
473
749
  /** Stochastic Gradient Descent */
474
- sgd: StructType<{}>;
750
+ sgd: NullType;
475
751
  /** AdamW with weight decay */
476
- adamw: StructType<{}>;
752
+ adamw: NullType;
477
753
  /** RMSprop optimizer */
478
- rmsprop: StructType<{}>;
754
+ rmsprop: NullType;
479
755
  }>>;
480
756
  /** Early stopping patience, 0 = disabled */
481
757
  early_stopping: OptionType<IntegerType>;
@@ -503,7 +779,7 @@ export declare const Torch: {
503
779
  best_epoch: IntegerType;
504
780
  }>;
505
781
  }>>;
506
- /** Make predictions with MLP */
782
+ /** Make predictions with MLP (single output) */
507
783
  readonly mlpPredict: import("@elaraai/east").PlatformDefinition<[VariantType<{
508
784
  /** PyTorch MLP model */
509
785
  torch_mlp: StructType<{
@@ -517,57 +793,202 @@ export declare const Torch: {
517
793
  output_dim: IntegerType;
518
794
  }>;
519
795
  }>, ArrayType<ArrayType<FloatType>>], ArrayType<FloatType>>;
796
+ /** Train MLP model (multi-output) */
797
+ readonly mlpTrainMulti: import("@elaraai/east").PlatformDefinition<[ArrayType<ArrayType<FloatType>>, ArrayType<ArrayType<FloatType>>, StructType<{
798
+ /** Hidden layer sizes, e.g., [64, 32] */
799
+ hidden_layers: ArrayType<IntegerType>;
800
+ /** Activation function for hidden layers (default relu) */
801
+ activation: OptionType<VariantType<{
802
+ /** Rectified Linear Unit */
803
+ relu: NullType;
804
+ /** Hyperbolic tangent */
805
+ tanh: NullType;
806
+ /** Sigmoid function */
807
+ sigmoid: NullType;
808
+ /** Leaky ReLU */
809
+ leaky_relu: NullType;
810
+ }>>;
811
+ /** Output activation function (default none/linear) */
812
+ output_activation: OptionType<VariantType<{
813
+ /** No activation (linear output) - default */
814
+ none: NullType;
815
+ /** Softmax (outputs sum to 1, for probability distributions) */
816
+ softmax: NullType;
817
+ /** Sigmoid (each output independently in [0,1]) */
818
+ sigmoid: NullType;
819
+ }>>;
820
+ /** Dropout rate (default 0.0) */
821
+ dropout: OptionType<FloatType>;
822
+ /** Output dimension (default 1) */
823
+ output_dim: OptionType<IntegerType>;
824
+ }>, StructType<{
825
+ /** Number of epochs (default 100) */
826
+ epochs: OptionType<IntegerType>;
827
+ /** Batch size (default 32) */
828
+ batch_size: OptionType<IntegerType>;
829
+ /** Learning rate (default 0.001) */
830
+ learning_rate: OptionType<FloatType>;
831
+ /** Loss function (default mse) */
832
+ loss: OptionType<VariantType<{
833
+ /** Mean Squared Error (regression) */
834
+ mse: NullType;
835
+ /** Mean Absolute Error (regression) */
836
+ mae: NullType;
837
+ /** Cross Entropy (classification) */
838
+ cross_entropy: NullType;
839
+ /** KL Divergence (distribution matching, use with softmax output) */
840
+ kl_div: NullType;
841
+ }>>;
842
+ /** Optimizer (default adam) */
843
+ optimizer: OptionType<VariantType<{
844
+ /** Adam optimizer */
845
+ adam: NullType;
846
+ /** Stochastic Gradient Descent */
847
+ sgd: NullType;
848
+ /** AdamW with weight decay */
849
+ adamw: NullType;
850
+ /** RMSprop optimizer */
851
+ rmsprop: NullType;
852
+ }>>;
853
+ /** Early stopping patience, 0 = disabled */
854
+ early_stopping: OptionType<IntegerType>;
855
+ /** Validation split fraction (default 0.2) */
856
+ validation_split: OptionType<FloatType>;
857
+ /** Random seed for reproducibility */
858
+ random_state: OptionType<IntegerType>;
859
+ }>], StructType<{
860
+ /** Trained model blob */
861
+ model: VariantType<{
862
+ torch_mlp: StructType<{
863
+ data: BlobType;
864
+ n_features: IntegerType;
865
+ hidden_layers: ArrayType<IntegerType>;
866
+ output_dim: IntegerType;
867
+ }>;
868
+ }>;
869
+ /** Training result with losses */
870
+ result: StructType<{
871
+ /** Training loss per epoch */
872
+ train_losses: ArrayType<FloatType>;
873
+ /** Validation loss per epoch */
874
+ val_losses: ArrayType<FloatType>;
875
+ /** Best epoch (for early stopping) */
876
+ best_epoch: IntegerType;
877
+ }>;
878
+ }>>;
879
+ /** Make predictions with MLP (multi-output) */
880
+ readonly mlpPredictMulti: import("@elaraai/east").PlatformDefinition<[VariantType<{
881
+ /** PyTorch MLP model */
882
+ torch_mlp: StructType<{
883
+ /** Cloudpickle serialized model */
884
+ data: BlobType;
885
+ /** Number of input features */
886
+ n_features: IntegerType;
887
+ /** Hidden layer sizes */
888
+ hidden_layers: ArrayType<IntegerType>;
889
+ /** Output dimension */
890
+ output_dim: IntegerType;
891
+ }>;
892
+ }>, ArrayType<ArrayType<FloatType>>], ArrayType<ArrayType<FloatType>>>;
893
+ /** Extract intermediate layer activations (embeddings) from MLP */
894
+ readonly mlpEncode: import("@elaraai/east").PlatformDefinition<[VariantType<{
895
+ /** PyTorch MLP model */
896
+ torch_mlp: StructType<{
897
+ /** Cloudpickle serialized model */
898
+ data: BlobType;
899
+ /** Number of input features */
900
+ n_features: IntegerType;
901
+ /** Hidden layer sizes */
902
+ hidden_layers: ArrayType<IntegerType>;
903
+ /** Output dimension */
904
+ output_dim: IntegerType;
905
+ }>;
906
+ }>, ArrayType<ArrayType<FloatType>>, IntegerType], ArrayType<ArrayType<FloatType>>>;
907
+ /** Decode embeddings back through decoder portion of MLP */
908
+ readonly mlpDecode: import("@elaraai/east").PlatformDefinition<[VariantType<{
909
+ /** PyTorch MLP model */
910
+ torch_mlp: StructType<{
911
+ /** Cloudpickle serialized model */
912
+ data: BlobType;
913
+ /** Number of input features */
914
+ n_features: IntegerType;
915
+ /** Hidden layer sizes */
916
+ hidden_layers: ArrayType<IntegerType>;
917
+ /** Output dimension */
918
+ output_dim: IntegerType;
919
+ }>;
920
+ }>, ArrayType<ArrayType<FloatType>>, IntegerType], ArrayType<ArrayType<FloatType>>>;
520
921
  /** Type definitions */
521
922
  readonly Types: {
522
923
  /** Vector type (array of floats) */
523
924
  readonly VectorType: ArrayType<FloatType>;
524
925
  /** Matrix type (2D array of floats) */
525
926
  readonly MatrixType: ArrayType<ArrayType<FloatType>>;
526
- /** Activation function type */
927
+ /** Activation function type for hidden layers */
527
928
  readonly TorchActivationType: VariantType<{
528
929
  /** Rectified Linear Unit */
529
- relu: StructType<{}>;
930
+ relu: NullType;
530
931
  /** Hyperbolic tangent */
531
- tanh: StructType<{}>;
932
+ tanh: NullType;
532
933
  /** Sigmoid function */
533
- sigmoid: StructType<{}>;
934
+ sigmoid: NullType;
534
935
  /** Leaky ReLU */
535
- leaky_relu: StructType<{}>;
936
+ leaky_relu: NullType;
937
+ }>;
938
+ /** Output activation function type */
939
+ readonly TorchOutputActivationType: VariantType<{
940
+ /** No activation (linear output) - default */
941
+ none: NullType;
942
+ /** Softmax (outputs sum to 1, for probability distributions) */
943
+ softmax: NullType;
944
+ /** Sigmoid (each output independently in [0,1]) */
945
+ sigmoid: NullType;
536
946
  }>;
537
947
  /** Loss function type */
538
948
  readonly TorchLossType: VariantType<{
539
949
  /** Mean Squared Error (regression) */
540
- mse: StructType<{}>;
950
+ mse: NullType;
541
951
  /** Mean Absolute Error (regression) */
542
- mae: StructType<{}>;
952
+ mae: NullType;
543
953
  /** Cross Entropy (classification) */
544
- cross_entropy: StructType<{}>;
954
+ cross_entropy: NullType;
955
+ /** KL Divergence (distribution matching, use with softmax output) */
956
+ kl_div: NullType;
545
957
  }>;
546
958
  /** Optimizer type */
547
959
  readonly TorchOptimizerType: VariantType<{
548
960
  /** Adam optimizer */
549
- adam: StructType<{}>;
961
+ adam: NullType;
550
962
  /** Stochastic Gradient Descent */
551
- sgd: StructType<{}>;
963
+ sgd: NullType;
552
964
  /** AdamW with weight decay */
553
- adamw: StructType<{}>;
965
+ adamw: NullType;
554
966
  /** RMSprop optimizer */
555
- rmsprop: StructType<{}>;
967
+ rmsprop: NullType;
556
968
  }>;
557
969
  /** MLP configuration type */
558
970
  readonly TorchMLPConfigType: StructType<{
559
971
  /** Hidden layer sizes, e.g., [64, 32] */
560
972
  hidden_layers: ArrayType<IntegerType>;
561
- /** Activation function (default relu) */
973
+ /** Activation function for hidden layers (default relu) */
562
974
  activation: OptionType<VariantType<{
563
975
  /** Rectified Linear Unit */
564
- relu: StructType<{}>;
976
+ relu: NullType;
565
977
  /** Hyperbolic tangent */
566
- tanh: StructType<{}>;
978
+ tanh: NullType;
567
979
  /** Sigmoid function */
568
- sigmoid: StructType<{}>;
980
+ sigmoid: NullType;
569
981
  /** Leaky ReLU */
570
- leaky_relu: StructType<{}>;
982
+ leaky_relu: NullType;
983
+ }>>;
984
+ /** Output activation function (default none/linear) */
985
+ output_activation: OptionType<VariantType<{
986
+ /** No activation (linear output) - default */
987
+ none: NullType;
988
+ /** Softmax (outputs sum to 1, for probability distributions) */
989
+ softmax: NullType;
990
+ /** Sigmoid (each output independently in [0,1]) */
991
+ sigmoid: NullType;
571
992
  }>>;
572
993
  /** Dropout rate (default 0.0) */
573
994
  dropout: OptionType<FloatType>;
@@ -585,22 +1006,24 @@ export declare const Torch: {
585
1006
  /** Loss function (default mse) */
586
1007
  loss: OptionType<VariantType<{
587
1008
  /** Mean Squared Error (regression) */
588
- mse: StructType<{}>;
1009
+ mse: NullType;
589
1010
  /** Mean Absolute Error (regression) */
590
- mae: StructType<{}>;
1011
+ mae: NullType;
591
1012
  /** Cross Entropy (classification) */
592
- cross_entropy: StructType<{}>;
1013
+ cross_entropy: NullType;
1014
+ /** KL Divergence (distribution matching, use with softmax output) */
1015
+ kl_div: NullType;
593
1016
  }>>;
594
1017
  /** Optimizer (default adam) */
595
1018
  optimizer: OptionType<VariantType<{
596
1019
  /** Adam optimizer */
597
- adam: StructType<{}>;
1020
+ adam: NullType;
598
1021
  /** Stochastic Gradient Descent */
599
- sgd: StructType<{}>;
1022
+ sgd: NullType;
600
1023
  /** AdamW with weight decay */
601
- adamw: StructType<{}>;
1024
+ adamw: NullType;
602
1025
  /** RMSprop optimizer */
603
- rmsprop: StructType<{}>;
1026
+ rmsprop: NullType;
604
1027
  }>>;
605
1028
  /** Early stopping patience, 0 = disabled */
606
1029
  early_stopping: OptionType<IntegerType>;