EvoScientist 0.0.1.dev4__py3-none-any.whl → 0.1.0rc2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. EvoScientist/EvoScientist.py +25 -61
  2. EvoScientist/__init__.py +0 -19
  3. EvoScientist/backends.py +0 -26
  4. EvoScientist/cli.py +1365 -480
  5. EvoScientist/middleware.py +7 -56
  6. EvoScientist/skills/clip/SKILL.md +253 -0
  7. EvoScientist/skills/clip/references/applications.md +207 -0
  8. EvoScientist/skills/langgraph-docs/SKILL.md +36 -0
  9. EvoScientist/skills/tensorboard/SKILL.md +629 -0
  10. EvoScientist/skills/tensorboard/references/integrations.md +638 -0
  11. EvoScientist/skills/tensorboard/references/profiling.md +545 -0
  12. EvoScientist/skills/tensorboard/references/visualization.md +620 -0
  13. EvoScientist/skills/vllm/SKILL.md +364 -0
  14. EvoScientist/skills/vllm/references/optimization.md +226 -0
  15. EvoScientist/skills/vllm/references/quantization.md +284 -0
  16. EvoScientist/skills/vllm/references/server-deployment.md +255 -0
  17. EvoScientist/skills/vllm/references/troubleshooting.md +447 -0
  18. EvoScientist/stream/__init__.py +0 -25
  19. EvoScientist/stream/utils.py +16 -23
  20. EvoScientist/tools.py +2 -75
  21. {evoscientist-0.0.1.dev4.dist-info → evoscientist-0.1.0rc2.dist-info}/METADATA +8 -153
  22. {evoscientist-0.0.1.dev4.dist-info → evoscientist-0.1.0rc2.dist-info}/RECORD +26 -24
  23. evoscientist-0.1.0rc2.dist-info/entry_points.txt +2 -0
  24. EvoScientist/config.py +0 -274
  25. EvoScientist/llm/__init__.py +0 -21
  26. EvoScientist/llm/models.py +0 -99
  27. EvoScientist/memory.py +0 -715
  28. EvoScientist/onboard.py +0 -725
  29. EvoScientist/paths.py +0 -44
  30. EvoScientist/skills_manager.py +0 -391
  31. EvoScientist/stream/display.py +0 -604
  32. EvoScientist/stream/events.py +0 -415
  33. EvoScientist/stream/state.py +0 -343
  34. evoscientist-0.0.1.dev4.dist-info/entry_points.txt +0 -5
  35. {evoscientist-0.0.1.dev4.dist-info → evoscientist-0.1.0rc2.dist-info}/WHEEL +0 -0
  36. {evoscientist-0.0.1.dev4.dist-info → evoscientist-0.1.0rc2.dist-info}/licenses/LICENSE +0 -0
  37. {evoscientist-0.0.1.dev4.dist-info → evoscientist-0.1.0rc2.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,620 @@
1
+ # Comprehensive Visualization Guide
2
+
3
+ Complete guide to visualizing ML experiments with TensorBoard.
4
+
5
+ ## Table of Contents
6
+ - Scalars
7
+ - Images
8
+ - Histograms & Distributions
9
+ - Graphs
10
+ - Embeddings
11
+ - Text
12
+ - PR Curves
13
+ - Custom Visualizations
14
+
15
+ ## Scalars
16
+
17
+ ### Basic Scalar Logging
18
+
19
+ ```python
20
+ from torch.utils.tensorboard import SummaryWriter
21
+
22
+ writer = SummaryWriter('runs/scalars_demo')
23
+
24
+ # Log single metric
25
+ for step in range(100):
26
+ loss = compute_loss()
27
+ writer.add_scalar('Loss', loss, step)
28
+
29
+ writer.close()
30
+ ```
31
+
32
+ ### Multiple Scalars
33
+
34
+ ```python
35
+ # Group related metrics
36
+ writer.add_scalars('Loss', {
37
+ 'train': train_loss,
38
+ 'validation': val_loss,
39
+ 'test': test_loss
40
+ }, epoch)
41
+
42
+ writer.add_scalars('Metrics/Classification', {
43
+ 'accuracy': accuracy,
44
+ 'precision': precision,
45
+ 'recall': recall,
46
+ 'f1_score': f1
47
+ }, epoch)
48
+ ```
49
+
50
+ ### Time-Series Metrics
51
+
52
+ ```python
53
+ # Track metrics over training
54
+ for epoch in range(100):
55
+ # Training metrics
56
+ train_loss = 0.0
57
+ for batch in train_loader:
58
+ loss = train_batch(batch)
59
+ train_loss += loss
60
+
61
+ train_loss /= len(train_loader)
62
+
63
+ # Validation metrics
64
+ val_loss, val_acc = validate()
65
+
66
+ # Log
67
+ writer.add_scalar('Loss/train', train_loss, epoch)
68
+ writer.add_scalar('Loss/val', val_loss, epoch)
69
+ writer.add_scalar('Accuracy/val', val_acc, epoch)
70
+
71
+ # Log learning rate
72
+ current_lr = optimizer.param_groups[0]['lr']
73
+ writer.add_scalar('Learning_rate', current_lr, epoch)
74
+ ```
75
+
76
+ ### Custom Smoothing
77
+
78
+ TensorBoard UI allows smoothing scalars:
79
+ - Slider from 0 (no smoothing) to 1 (maximum smoothing)
80
+ - Exponential moving average
81
+ - Useful for noisy metrics
82
+
83
+ ## Images
84
+
85
+ ### Single Image
86
+
87
+ ```python
88
+ import torch
89
+ from torch.utils.tensorboard import SummaryWriter
90
+
91
+ writer = SummaryWriter('runs/images_demo')
92
+
93
+ # Log single image (C, H, W)
94
+ img = torch.rand(3, 224, 224)
95
+ writer.add_image('Sample_image', img, 0)
96
+ ```
97
+
98
+ ### Image Grid
99
+
100
+ ```python
101
+ from torchvision.utils import make_grid
102
+
103
+ # Create grid from batch
104
+ images = torch.rand(64, 3, 224, 224) # Batch of 64 images
105
+ img_grid = make_grid(images, nrow=8) # 8 images per row
106
+
107
+ writer.add_image('Image_grid', img_grid, epoch)
108
+ ```
109
+
110
+ ### Training Visualizations
111
+
112
+ ```python
113
+ # Visualize inputs, predictions, and ground truth
114
+ for epoch in range(10):
115
+ # Get batch
116
+ images, labels = next(iter(val_loader))
117
+
118
+ # Predict
119
+ with torch.no_grad():
120
+ predictions = model(images)
121
+
122
+ # Visualize inputs
123
+ input_grid = make_grid(images[:16], nrow=4)
124
+ writer.add_image('Inputs', input_grid, epoch)
125
+
126
+ # Visualize predictions (if images)
127
+ if isinstance(predictions, torch.Tensor) and predictions.dim() == 4:
128
+ pred_grid = make_grid(predictions[:16], nrow=4)
129
+ writer.add_image('Predictions', pred_grid, epoch)
130
+ ```
131
+
132
+ ### Attention Maps
133
+
134
+ ```python
135
+ # Visualize attention weights
136
+ attention_maps = model.get_attention(images) # (B, H, W)
137
+
138
+ # Normalize to [0, 1]
139
+ attention_maps = (attention_maps - attention_maps.min()) / (attention_maps.max() - attention_maps.min())
140
+
141
+ # Add channel dimension
142
+ attention_maps = attention_maps.unsqueeze(1) # (B, 1, H, W)
143
+
144
+ # Create grid
145
+ attention_grid = make_grid(attention_maps[:16], nrow=4)
146
+ writer.add_image('Attention_maps', attention_grid, epoch)
147
+ ```
148
+
149
+ ### TensorFlow Images
150
+
151
+ ```python
152
+ import tensorflow as tf
153
+
154
+ file_writer = tf.summary.create_file_writer('logs/images')
155
+
156
+ with file_writer.as_default():
157
+ # Log image batch
158
+ tf.summary.image('Training_samples', images, step=epoch, max_outputs=25)
159
+
160
+ # Log single image
161
+ tf.summary.image('Sample', img[tf.newaxis, ...], step=epoch)
162
+ ```
163
+
164
+ ## Histograms & Distributions
165
+
166
+ ### Weight Histograms
167
+
168
+ ```python
169
+ # PyTorch: Track weight distributions over time
170
+ for epoch in range(100):
171
+ train_epoch()
172
+
173
+ # Log all model parameters
174
+ for name, param in model.named_parameters():
175
+ writer.add_histogram(f'Weights/{name}', param, epoch)
176
+
177
+ # Log gradients
178
+ for name, param in model.named_parameters():
179
+ if param.grad is not None:
180
+ writer.add_histogram(f'Gradients/{name}', param.grad, epoch)
181
+ ```
182
+
183
+ ### Activation Histograms
184
+
185
+ ```python
186
+ # Hook to capture activations
187
+ activations = {}
188
+
189
+ def get_activation(name):
190
+ def hook(model, input, output):
191
+ activations[name] = output.detach()
192
+ return hook
193
+
194
+ # Register hooks
195
+ model.conv1.register_forward_hook(get_activation('conv1'))
196
+ model.conv2.register_forward_hook(get_activation('conv2'))
197
+ model.fc.register_forward_hook(get_activation('fc'))
198
+
199
+ # Forward pass
200
+ output = model(input)
201
+
202
+ # Log activations
203
+ for name, activation in activations.items():
204
+ writer.add_histogram(f'Activations/{name}', activation, epoch)
205
+ ```
206
+
207
+ ### Custom Distributions
208
+
209
+ ```python
210
+ # Log prediction distributions
211
+ predictions = model(test_data)
212
+ writer.add_histogram('Predictions', predictions, epoch)
213
+
214
+ # Log loss distributions across batches
215
+ losses = []
216
+ for batch in val_loader:
217
+ loss = compute_loss(batch)
218
+ losses.append(loss)
219
+
220
+ losses = torch.tensor(losses)
221
+ writer.add_histogram('Loss_distribution', losses, epoch)
222
+ ```
223
+
224
+ ### TensorFlow Histograms
225
+
226
+ ```python
227
+ import tensorflow as tf
228
+
229
+ file_writer = tf.summary.create_file_writer('logs/histograms')
230
+
231
+ with file_writer.as_default():
232
+ # Log weight distributions
233
+ for layer in model.layers:
234
+ for weight in layer.weights:
235
+ tf.summary.histogram(weight.name, weight, step=epoch)
236
+ ```
237
+
238
+ ## Graphs
239
+
240
+ ### Model Architecture
241
+
242
+ ```python
243
+ import torch
244
+ from torch.utils.tensorboard import SummaryWriter
245
+
246
+ # PyTorch model
247
+ model = ResNet50(num_classes=1000)
248
+
249
+ # Create dummy input (same shape as real input)
250
+ dummy_input = torch.randn(1, 3, 224, 224)
251
+
252
+ # Log graph
253
+ writer = SummaryWriter('runs/graph_demo')
254
+ writer.add_graph(model, dummy_input)
255
+ writer.close()
256
+
257
+ # View in TensorBoard "Graphs" tab
258
+ ```
259
+
260
+ ### TensorFlow Graph
261
+
262
+ ```python
263
+ # TensorFlow automatically logs graph with Keras
264
+ tensorboard_callback = tf.keras.callbacks.TensorBoard(
265
+ log_dir='logs',
266
+ write_graph=True # Enable graph logging
267
+ )
268
+
269
+ model.fit(x, y, callbacks=[tensorboard_callback])
270
+ ```
271
+
272
+ ## Embeddings
273
+
274
+ ### Projecting Embeddings
275
+
276
+ ```python
277
+ import torch
278
+ from torch.utils.tensorboard import SummaryWriter
279
+
280
+ writer = SummaryWriter('runs/embeddings_demo')
281
+
282
+ # Get embeddings (e.g., word embeddings, image features)
283
+ # Shape: (num_samples, embedding_dim)
284
+ embeddings = model.get_embeddings(data)
285
+
286
+ # Metadata (labels for each embedding)
287
+ metadata = ['cat', 'dog', 'bird', 'cat', 'dog', ...]
288
+
289
+ # Optional: Images for each embedding
290
+ label_img = torch.stack([img1, img2, img3, ...]) # (num_samples, C, H, W)
291
+
292
+ # Log embeddings
293
+ writer.add_embedding(
294
+ embeddings,
295
+ metadata=metadata,
296
+ label_img=label_img,
297
+ global_step=epoch,
298
+ tag='Word_embeddings'
299
+ )
300
+
301
+ writer.close()
302
+ ```
303
+
304
+ **In TensorBoard Projector:**
305
+ - Choose PCA, t-SNE, or UMAP
306
+ - Color by metadata labels
307
+ - Search and filter points
308
+ - Explore nearest neighbors
309
+
310
+ ### Image Embeddings
311
+
312
+ ```python
313
+ # Extract features from CNN
314
+ features = []
315
+ labels = []
316
+ images = []
317
+
318
+ model.eval()
319
+ with torch.no_grad():
320
+ for data, target in test_loader:
321
+ # Get features from penultimate layer
322
+ feature = model.get_features(data) # (B, feature_dim)
323
+ features.append(feature)
324
+ labels.extend(target.cpu().numpy())
325
+ images.append(data)
326
+
327
+ # Concatenate
328
+ features = torch.cat(features)
329
+ images = torch.cat(images)
330
+
331
+ # Metadata (class names)
332
+ class_names = ['airplane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
333
+ metadata = [class_names[label] for label in labels]
334
+
335
+ # Log to TensorBoard
336
+ writer.add_embedding(
337
+ features,
338
+ metadata=metadata,
339
+ label_img=images,
340
+ tag='CIFAR10_features'
341
+ )
342
+ ```
343
+
344
+ ### Text Embeddings
345
+
346
+ ```python
347
+ # Word2Vec or BERT embeddings
348
+ word_embeddings = model.word_embeddings.weight.data # (vocab_size, embedding_dim)
349
+ vocabulary = ['the', 'cat', 'dog', 'run', 'jump', ...]
350
+
351
+ writer.add_embedding(
352
+ word_embeddings,
353
+ metadata=vocabulary,
354
+ tag='Word2Vec_embeddings'
355
+ )
356
+ ```
357
+
358
+ ## Text
359
+
360
+ ### Basic Text Logging
361
+
362
+ ```python
363
+ from torch.utils.tensorboard import SummaryWriter
364
+
365
+ writer = SummaryWriter('runs/text_demo')
366
+
367
+ # Log plain text
368
+ writer.add_text('Config', str(config), 0)
369
+ writer.add_text('Hyperparameters', f'lr={lr}, batch_size={batch_size}', 0)
370
+
371
+ # Log predictions
372
+ predictions_text = f"Epoch {epoch}:\n"
373
+ for i, pred in enumerate(predictions[:5]):
374
+ predictions_text += f"Sample {i}: {pred}\n"
375
+
376
+ writer.add_text('Predictions', predictions_text, epoch)
377
+ ```
378
+
379
+ ### Markdown Tables
380
+
381
+ ```python
382
+ # Log results as markdown table
383
+ results = f"""
384
+ | Metric | Train | Validation | Test |
385
+ |--------|-------|------------|------|
386
+ | Accuracy | {train_acc:.4f} | {val_acc:.4f} | {test_acc:.4f} |
387
+ | Loss | {train_loss:.4f} | {val_loss:.4f} | {test_loss:.4f} |
388
+ | F1 Score | {train_f1:.4f} | {val_f1:.4f} | {test_f1:.4f} |
389
+ """
390
+
391
+ writer.add_text('Results/Summary', results, epoch)
392
+ ```
393
+
394
+ ### Model Summaries
395
+
396
+ ```python
397
+ # Log model architecture as text
398
+ from torchinfo import summary
399
+
400
+ model_summary = str(summary(model, input_size=(1, 3, 224, 224), verbose=0))
401
+ writer.add_text('Model/Architecture', f'```\n{model_summary}\n```', 0)
402
+ ```
403
+
404
+ ## PR Curves
405
+
406
+ ### Precision-Recall Curves
407
+
408
+ ```python
409
+ from torch.utils.tensorboard import SummaryWriter
410
+ from sklearn.metrics import precision_recall_curve
411
+
412
+ writer = SummaryWriter('runs/pr_curves')
413
+
414
+ # Get predictions and ground truth
415
+ y_true = []
416
+ y_scores = []
417
+
418
+ model.eval()
419
+ with torch.no_grad():
420
+ for data, target in test_loader:
421
+ output = model(data)
422
+ probs = torch.softmax(output, dim=1)
423
+
424
+ y_true.extend(target.cpu().numpy())
425
+ y_scores.extend(probs.cpu().numpy())
426
+
427
+ y_true = np.array(y_true)
428
+ y_scores = np.array(y_scores)
429
+
430
+ # Log PR curve for each class
431
+ num_classes = y_scores.shape[1]
432
+ for class_idx in range(num_classes):
433
+ # Binary classification: class vs rest
434
+ labels = (y_true == class_idx).astype(int)
435
+ scores = y_scores[:, class_idx]
436
+
437
+ # Add PR curve
438
+ writer.add_pr_curve(
439
+ f'PR_curve/class_{class_idx}',
440
+ labels,
441
+ scores,
442
+ global_step=epoch
443
+ )
444
+
445
+ writer.close()
446
+ ```
447
+
448
+ ### ROC Curves
449
+
450
+ ```python
451
+ # TensorBoard doesn't have built-in ROC, but we can log as image
452
+ from sklearn.metrics import roc_curve, auc
453
+ import matplotlib.pyplot as plt
454
+
455
+ fig, ax = plt.subplots()
456
+
457
+ for class_idx in range(num_classes):
458
+ labels = (y_true == class_idx).astype(int)
459
+ scores = y_scores[:, class_idx]
460
+
461
+ fpr, tpr, _ = roc_curve(labels, scores)
462
+ roc_auc = auc(fpr, tpr)
463
+
464
+ ax.plot(fpr, tpr, label=f'Class {class_idx} (AUC = {roc_auc:.2f})')
465
+
466
+ ax.plot([0, 1], [0, 1], 'k--')
467
+ ax.set_xlabel('False Positive Rate')
468
+ ax.set_ylabel('True Positive Rate')
469
+ ax.set_title('ROC Curves')
470
+ ax.legend()
471
+
472
+ # Convert to tensor and log
473
+ fig.canvas.draw()
474
+ img = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8)
475
+ img = img.reshape(fig.canvas.get_width_height()[::-1] + (3,))
476
+ img = torch.from_numpy(img).permute(2, 0, 1)
477
+
478
+ writer.add_image('ROC_curves', img, epoch)
479
+ plt.close(fig)
480
+ ```
481
+
482
+ ## Custom Visualizations
483
+
484
+ ### Confusion Matrix
485
+
486
+ ```python
487
+ import matplotlib.pyplot as plt
488
+ import seaborn as sns
489
+ from sklearn.metrics import confusion_matrix
490
+
491
+ # Compute confusion matrix
492
+ cm = confusion_matrix(y_true, y_pred)
493
+
494
+ # Plot
495
+ fig, ax = plt.subplots(figsize=(10, 10))
496
+ sns.heatmap(cm, annot=True, fmt='d', cmap='Blues', ax=ax)
497
+ ax.set_xlabel('Predicted')
498
+ ax.set_ylabel('True')
499
+ ax.set_title('Confusion Matrix')
500
+
501
+ # Convert to tensor and log
502
+ fig.canvas.draw()
503
+ img = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8)
504
+ img = img.reshape(fig.canvas.get_width_height()[::-1] + (3,))
505
+ img = torch.from_numpy(img).permute(2, 0, 1)
506
+
507
+ writer.add_image('Confusion_matrix', img, epoch)
508
+ plt.close(fig)
509
+ ```
510
+
511
+ ### Loss Landscape
512
+
513
+ ```python
514
+ # Visualize loss surface around current parameters
515
+ import numpy as np
516
+
517
+ def compute_loss_landscape(model, data, target, param1, param2):
518
+ """Compute loss for a grid of parameter values."""
519
+ # Save original params
520
+ original_params = {name: param.clone() for name, param in model.named_parameters()}
521
+
522
+ # Grid
523
+ param1_range = np.linspace(-1, 1, 50)
524
+ param2_range = np.linspace(-1, 1, 50)
525
+ losses = np.zeros((50, 50))
526
+
527
+ for i, p1 in enumerate(param1_range):
528
+ for j, p2 in enumerate(param2_range):
529
+ # Perturb parameters
530
+ model.state_dict()[param1].add_(p1)
531
+ model.state_dict()[param2].add_(p2)
532
+
533
+ # Compute loss
534
+ with torch.no_grad():
535
+ output = model(data)
536
+ loss = F.cross_entropy(output, target)
537
+ losses[i, j] = loss.item()
538
+
539
+ # Restore parameters
540
+ model.load_state_dict(original_params)
541
+
542
+ return losses
543
+
544
+ # Plot
545
+ fig = plt.figure()
546
+ ax = fig.add_subplot(111, projection='3d')
547
+ X, Y = np.meshgrid(np.linspace(-1, 1, 50), np.linspace(-1, 1, 50))
548
+ ax.plot_surface(X, Y, losses, cmap='viridis')
549
+ ax.set_title('Loss Landscape')
550
+
551
+ # Log
552
+ fig.canvas.draw()
553
+ img = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8)
554
+ img = img.reshape(fig.canvas.get_width_height()[::-1] + (3,))
555
+ img = torch.from_numpy(img).permute(2, 0, 1)
556
+ writer.add_image('Loss_landscape', img, epoch)
557
+ plt.close(fig)
558
+ ```
559
+
560
+ ## Best Practices
561
+
562
+ ### 1. Use Hierarchical Tags
563
+
564
+ ```python
565
+ # ✅ Good: Organized with hierarchy
566
+ writer.add_scalar('Loss/train', train_loss, step)
567
+ writer.add_scalar('Loss/val', val_loss, step)
568
+ writer.add_scalar('Metrics/accuracy', accuracy, step)
569
+ writer.add_scalar('Metrics/f1_score', f1, step)
570
+
571
+ # ❌ Bad: Flat namespace
572
+ writer.add_scalar('train_loss', train_loss, step)
573
+ writer.add_scalar('val_loss', val_loss, step)
574
+ ```
575
+
576
+ ### 2. Log Regularly but Not Excessively
577
+
578
+ ```python
579
+ # ✅ Good: Epoch-level + periodic batch-level
580
+ for epoch in range(100):
581
+ for batch_idx, batch in enumerate(train_loader):
582
+ loss = train_step(batch)
583
+
584
+ # Log every 100 batches
585
+ if batch_idx % 100 == 0:
586
+ global_step = epoch * len(train_loader) + batch_idx
587
+ writer.add_scalar('Loss/train_batch', loss, global_step)
588
+
589
+ # Always log epoch metrics
590
+ writer.add_scalar('Loss/train_epoch', epoch_loss, epoch)
591
+
592
+ # ❌ Bad: Every batch (creates huge logs)
593
+ for batch in train_loader:
594
+ writer.add_scalar('Loss', loss, step)
595
+ ```
596
+
597
+ ### 3. Visualize Sample Predictions
598
+
599
+ ```python
600
+ # Log predictions periodically
601
+ if epoch % 5 == 0:
602
+ model.eval()
603
+ with torch.no_grad():
604
+ sample_images, sample_labels = next(iter(val_loader))
605
+ predictions = model(sample_images)
606
+
607
+ # Visualize
608
+ img_grid = make_grid(sample_images[:16], nrow=4)
609
+ writer.add_image('Samples/inputs', img_grid, epoch)
610
+
611
+ # Add predictions as text
612
+ pred_text = '\n'.join([f'{i}: {pred.argmax()}' for i, pred in enumerate(predictions[:16])])
613
+ writer.add_text('Samples/predictions', pred_text, epoch)
614
+ ```
615
+
616
+ ## Resources
617
+
618
+ - **TensorBoard Documentation**: https://www.tensorflow.org/tensorboard
619
+ - **PyTorch TensorBoard**: https://pytorch.org/docs/stable/tensorboard.html
620
+ - **Projector Guide**: https://www.tensorflow.org/tensorboard/tensorboard_projector_plugin