cifar10-tools 0.3.0__py3-none-any.whl → 0.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,8 +1,10 @@
1
- '''Data download function for CIFAR-10 dataset. Use to pre-download data
2
- during devcontainer creation'''
1
+ '''Data loading and preprocessing functions for CIFAR-10 dataset.'''
3
2
 
4
3
  from pathlib import Path
5
- from torchvision import datasets
4
+ import torch
5
+ from torchvision import datasets, transforms
6
+ from torch.utils.data import DataLoader
7
+
6
8
 
7
9
  def download_cifar10_data(data_dir: str='data/pytorch/cifar10'):
8
10
  '''Download CIFAR-10 dataset using torchvision.datasets.'''
@@ -22,6 +24,112 @@ def download_cifar10_data(data_dir: str='data/pytorch/cifar10'):
22
24
  download=True
23
25
  )
24
26
 
27
+
28
+ def make_data_loaders(
29
+ data_dir: Path,
30
+ batch_size: int,
31
+ train_transform: transforms.Compose,
32
+ eval_transform: transforms.Compose,
33
+ device: torch.device | None = None,
34
+ download: bool = False,
35
+ ):
36
+ """
37
+ Loads CIFAR-10, applies preprocessing with separate train/eval transforms,
38
+ and returns DataLoaders.
39
+
40
+ Args:
41
+ data_dir: Path to CIFAR-10 data directory
42
+ batch_size: Batch size for DataLoaders
43
+ train_transform: Transform to apply to training data
44
+ eval_transform: Transform to apply to validation and test data
45
+ device: Device to preload tensors onto. If None, data stays on CPU
46
+ and transforms are applied on-the-fly during iteration.
47
+ download: Whether to download the dataset if not present
48
+
49
+ Returns:
50
+ Tuple of (train_loader, val_loader, test_loader)
51
+ """
52
+
53
+ # Load datasets with respective transforms
54
+ train_dataset_full = datasets.CIFAR10(
55
+ root=data_dir,
56
+ train=True,
57
+ download=download,
58
+ transform=train_transform,
59
+ )
60
+
61
+ val_test_dataset_full = datasets.CIFAR10(
62
+ root=data_dir,
63
+ train=True,
64
+ download=download,
65
+ transform=eval_transform,
66
+ )
67
+
68
+ test_dataset = datasets.CIFAR10(
69
+ root=data_dir,
70
+ train=False,
71
+ download=download,
72
+ transform=eval_transform,
73
+ )
74
+
75
+ if device is not None:
76
+ # Preload entire dataset to device for faster training
77
+ X_train_full = torch.stack([img for img, _ in train_dataset_full]).to(device)
78
+ y_train_full = torch.tensor([label for _, label in train_dataset_full]).to(device)
79
+
80
+ X_val_test_full = torch.stack([img for img, _ in val_test_dataset_full]).to(device)
81
+ y_val_test_full = torch.tensor([label for _, label in val_test_dataset_full]).to(device)
82
+
83
+ X_test = torch.stack([img for img, _ in test_dataset]).to(device)
84
+ y_test = torch.tensor([label for _, label in test_dataset]).to(device)
85
+
86
+ # Train/val split (80/20)
87
+ n_train = int(0.8 * len(X_train_full))
88
+ indices = torch.randperm(len(X_train_full))
89
+
90
+ X_train = X_train_full[indices[:n_train]]
91
+ y_train = y_train_full[indices[:n_train]]
92
+ X_val = X_val_test_full[indices[n_train:]]
93
+ y_val = y_val_test_full[indices[n_train:]]
94
+
95
+ # TensorDatasets
96
+ train_tensor_dataset = torch.utils.data.TensorDataset(X_train, y_train)
97
+ val_tensor_dataset = torch.utils.data.TensorDataset(X_val, y_val)
98
+ test_tensor_dataset = torch.utils.data.TensorDataset(X_test, y_test)
99
+
100
+ else:
101
+ # Don't preload - use datasets directly for on-the-fly transforms
102
+ # Train/val split (80/20) using Subset
103
+ n_train = int(0.8 * len(train_dataset_full))
104
+ indices = torch.randperm(len(train_dataset_full)).tolist()
105
+
106
+ train_indices = indices[:n_train]
107
+ val_indices = indices[n_train:]
108
+
109
+ train_tensor_dataset = torch.utils.data.Subset(train_dataset_full, train_indices)
110
+ val_tensor_dataset = torch.utils.data.Subset(val_test_dataset_full, val_indices)
111
+ test_tensor_dataset = test_dataset
112
+
113
+ # DataLoaders
114
+ train_loader = DataLoader(
115
+ train_tensor_dataset,
116
+ batch_size=batch_size,
117
+ shuffle=True,
118
+ )
119
+ val_loader = DataLoader(
120
+ val_tensor_dataset,
121
+ batch_size=batch_size,
122
+ shuffle=False,
123
+ )
124
+ test_loader = DataLoader(
125
+ test_tensor_dataset,
126
+ batch_size=batch_size,
127
+ shuffle=False,
128
+ )
129
+
130
+ return train_loader, val_loader, test_loader
131
+
132
+
25
133
  if __name__ == '__main__':
26
134
 
27
135
  download_cifar10_data()
@@ -12,13 +12,17 @@ import torch.nn as nn
12
12
  import torch.optim as optim
13
13
  from torch.utils.data import DataLoader
14
14
 
15
+ from cifar10_tools.pytorch.data import make_data_loaders
16
+
15
17
 
16
18
  def create_cnn(
17
19
  n_conv_blocks: int,
18
20
  initial_filters: int,
19
- fc_units_1: int,
20
- fc_units_2: int,
21
- dropout_rate: float,
21
+ n_fc_layers: int,
22
+ base_kernel_size: int,
23
+ conv_dropout_rate: float,
24
+ fc_dropout_rate: float,
25
+ pooling_strategy: str,
22
26
  use_batch_norm: bool,
23
27
  num_classes: int = 10,
24
28
  in_channels: int = 3,
@@ -29,9 +33,11 @@ def create_cnn(
29
33
  Args:
30
34
  n_conv_blocks: Number of convolutional blocks (1-5)
31
35
  initial_filters: Number of filters in first conv layer (doubles each block)
32
- fc_units_1: Number of units in first fully connected layer
33
- fc_units_2: Number of units in second fully connected layer
34
- dropout_rate: Dropout probability
36
+ n_fc_layers: Number of fully connected layers (1-8)
37
+ base_kernel_size: Base kernel size (decreases by 2 per block, min 3)
38
+ conv_dropout_rate: Dropout probability after convolutional blocks
39
+ fc_dropout_rate: Dropout probability in fully connected layers
40
+ pooling_strategy: Pooling type ('max' or 'avg')
35
41
  use_batch_norm: Whether to use batch normalization
36
42
  num_classes: Number of output classes (default: 10 for CIFAR-10)
37
43
  in_channels: Number of input channels (default: 3 for RGB)
@@ -40,15 +46,21 @@ def create_cnn(
40
46
  Returns:
41
47
  nn.Sequential model
42
48
  '''
49
+
43
50
  layers = []
44
51
  current_channels = in_channels
45
52
  current_size = input_size
46
53
 
54
+ # Convolutional blocks
47
55
  for block_idx in range(n_conv_blocks):
48
56
  out_channels = initial_filters * (2 ** block_idx)
57
+ kernel_size = max(3, base_kernel_size - 2 * block_idx)
58
+ padding = kernel_size // 2
49
59
 
50
60
  # First conv in block
51
- layers.append(nn.Conv2d(current_channels, out_channels, kernel_size=3, padding=1))
61
+ layers.append(nn.Conv2d(current_channels, out_channels, kernel_size=kernel_size, padding=padding))
62
+ # Update size after conv: output_size = (input_size + 2*padding - kernel_size) + 1
63
+ current_size = (current_size + 2 * padding - kernel_size) + 1
52
64
 
53
65
  if use_batch_norm:
54
66
  layers.append(nn.BatchNorm2d(out_channels))
@@ -56,33 +68,48 @@ def create_cnn(
56
68
  layers.append(nn.ReLU())
57
69
 
58
70
  # Second conv in block
59
- layers.append(nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1))
71
+ layers.append(nn.Conv2d(out_channels, out_channels, kernel_size=kernel_size, padding=padding))
72
+ current_size = (current_size + 2 * padding - kernel_size) + 1
60
73
 
61
74
  if use_batch_norm:
62
75
  layers.append(nn.BatchNorm2d(out_channels))
63
76
 
64
77
  layers.append(nn.ReLU())
65
78
 
66
- # Pooling and dropout
67
- layers.append(nn.MaxPool2d(2, 2))
68
- layers.append(nn.Dropout(dropout_rate))
79
+ # Pooling
80
+ if pooling_strategy == 'max':
81
+ layers.append(nn.MaxPool2d(2, 2))
82
+ else: # avg
83
+ layers.append(nn.AvgPool2d(2, 2))
84
+
85
+ layers.append(nn.Dropout(conv_dropout_rate))
69
86
 
70
87
  current_channels = out_channels
71
- current_size //= 2
88
+ current_size //= 2 # Pooling halves the size
72
89
 
73
- # Calculate flattened size
74
- final_channels = initial_filters * (2 ** (n_conv_blocks - 1))
75
- flattened_size = final_channels * current_size * current_size
90
+ # Calculate flattened size using actual current_size
91
+ flattened_size = current_channels * current_size * current_size
76
92
 
77
- # Classifier (3 fully connected layers)
93
+ # Classifier - dynamic FC layers with halving pattern
78
94
  layers.append(nn.Flatten())
79
- layers.append(nn.Linear(flattened_size, fc_units_1))
80
- layers.append(nn.ReLU())
81
- layers.append(nn.Dropout(dropout_rate))
82
- layers.append(nn.Linear(fc_units_1, fc_units_2))
83
- layers.append(nn.ReLU())
84
- layers.append(nn.Dropout(dropout_rate))
85
- layers.append(nn.Linear(fc_units_2, num_classes))
95
+
96
+ # Generate FC layer sizes by halving from flattened_size
97
+ fc_sizes = []
98
+ current_fc_size = flattened_size // 2
99
+ for _ in range(n_fc_layers):
100
+ fc_sizes.append(max(10, current_fc_size)) # Minimum 10 units
101
+ current_fc_size //= 2
102
+
103
+ # Add FC layers
104
+ in_features = flattened_size
105
+ for fc_size in fc_sizes:
106
+ layers.append(nn.Linear(in_features, fc_size))
107
+ layers.append(nn.ReLU())
108
+ layers.append(nn.Dropout(fc_dropout_rate))
109
+ in_features = fc_size
110
+
111
+ # Output layer
112
+ layers.append(nn.Linear(in_features, num_classes))
86
113
 
87
114
  return nn.Sequential(*layers)
88
115
 
@@ -113,6 +140,7 @@ def train_trial(
113
140
  best_val_accuracy = 0.0
114
141
 
115
142
  for epoch in range(n_epochs):
143
+
116
144
  # Training phase
117
145
  model.train()
118
146
 
@@ -149,55 +177,96 @@ def train_trial(
149
177
 
150
178
 
151
179
  def create_objective(
152
- train_loader: DataLoader,
153
- val_loader: DataLoader,
180
+ data_dir,
181
+ train_transform,
182
+ eval_transform,
154
183
  n_epochs: int,
155
184
  device: torch.device,
156
185
  num_classes: int = 10,
157
- in_channels: int = 3
186
+ in_channels: int = 3,
187
+ search_space: dict = None
158
188
  ) -> Callable[[optuna.Trial], float]:
159
189
  '''Create an Optuna objective function for CNN hyperparameter optimization.
160
190
 
161
- This factory function creates a closure that captures the data loaders and
162
- training configuration, returning an objective function suitable for Optuna.
191
+ This factory function creates a closure that captures the data loading parameters
192
+ and training configuration, returning an objective function suitable for Optuna.
163
193
 
164
194
  Args:
165
- train_loader: DataLoader for training data
166
- val_loader: DataLoader for validation data
195
+ data_dir: Directory containing CIFAR-10 data
196
+ train_transform: Transform to apply to training data
197
+ eval_transform: Transform to apply to validation data
167
198
  n_epochs: Number of epochs per trial
168
199
  device: Device to train on (cuda or cpu)
169
200
  num_classes: Number of output classes (default: 10)
170
201
  in_channels: Number of input channels (default: 3 for RGB)
202
+ search_space: Dictionary defining hyperparameter search space (default: None)
171
203
 
172
204
  Returns:
173
205
  Objective function for optuna.Study.optimize()
174
206
 
175
207
  Example:
176
- >>> objective = create_objective(train_loader, val_loader, n_epochs=50, device=device)
208
+ >>> objective = create_objective(data_dir, transform, transform, n_epochs=50, device=device)
177
209
  >>> study = optuna.create_study(direction='maximize')
178
210
  >>> study.optimize(objective, n_trials=100)
179
211
  '''
180
212
 
213
+ # Default search space if none provided
214
+ if search_space is None:
215
+ search_space = {
216
+ 'batch_size': [64, 128, 256, 512, 1024],
217
+ 'n_conv_blocks': (1, 5),
218
+ 'initial_filters': [8, 16, 32, 64, 128],
219
+ 'n_fc_layers': (1, 8),
220
+ 'base_kernel_size': (3, 7),
221
+ 'conv_dropout_rate': (0.0, 0.5),
222
+ 'fc_dropout_rate': (0.2, 0.75),
223
+ 'pooling_strategy': ['max', 'avg'],
224
+ 'use_batch_norm': [True, False],
225
+ 'learning_rate': (1e-5, 1e-1, 'log'),
226
+ 'optimizer': ['Adam', 'SGD', 'RMSprop'],
227
+ 'sgd_momentum': (0.8, 0.99)
228
+ }
229
+
181
230
  def objective(trial: optuna.Trial) -> float:
182
231
  '''Optuna objective function for CNN hyperparameter optimization.'''
183
232
 
184
- # Suggest hyperparameters
185
- n_conv_blocks = trial.suggest_int('n_conv_blocks', 1, 5)
186
- initial_filters = trial.suggest_categorical('initial_filters', [8, 16, 32, 64, 128])
187
- fc_units_1 = trial.suggest_categorical('fc_units_1', [128, 256, 512, 1024, 2048])
188
- fc_units_2 = trial.suggest_categorical('fc_units_2', [32, 64, 128, 256, 512])
189
- dropout_rate = trial.suggest_float('dropout_rate', 0.2, 0.75)
190
- use_batch_norm = trial.suggest_categorical('use_batch_norm', [True, False])
191
- learning_rate = trial.suggest_float('learning_rate', 1e-5, 1e-1, log=True)
192
- optimizer_name = trial.suggest_categorical('optimizer', ['Adam', 'SGD', 'RMSprop'])
233
+ # Suggest hyperparameters from search space
234
+ batch_size = trial.suggest_categorical('batch_size', search_space['batch_size'])
235
+ n_conv_blocks = trial.suggest_int('n_conv_blocks', *search_space['n_conv_blocks'])
236
+ initial_filters = trial.suggest_categorical('initial_filters', search_space['initial_filters'])
237
+ n_fc_layers = trial.suggest_int('n_fc_layers', *search_space['n_fc_layers'])
238
+ base_kernel_size = trial.suggest_int('base_kernel_size', *search_space['base_kernel_size'])
239
+ conv_dropout_rate = trial.suggest_float('conv_dropout_rate', *search_space['conv_dropout_rate'])
240
+ fc_dropout_rate = trial.suggest_float('fc_dropout_rate', *search_space['fc_dropout_rate'])
241
+ pooling_strategy = trial.suggest_categorical('pooling_strategy', search_space['pooling_strategy'])
242
+ use_batch_norm = trial.suggest_categorical('use_batch_norm', search_space['use_batch_norm'])
243
+
244
+ # Handle learning rate with optional log scale
245
+ lr_params = search_space['learning_rate']
246
+ learning_rate = trial.suggest_float('learning_rate', lr_params[0], lr_params[1],
247
+ log=(lr_params[2] == 'log' if len(lr_params) > 2 else False))
248
+
249
+ optimizer_name = trial.suggest_categorical('optimizer', search_space['optimizer'])
250
+
251
+ # Create data loaders with suggested batch size
252
+ train_loader, val_loader, _ = make_data_loaders(
253
+ data_dir=data_dir,
254
+ batch_size=batch_size,
255
+ train_transform=train_transform,
256
+ eval_transform=eval_transform,
257
+ device=device,
258
+ download=False
259
+ )
193
260
 
194
261
  # Create model
195
262
  model = create_cnn(
196
263
  n_conv_blocks=n_conv_blocks,
197
264
  initial_filters=initial_filters,
198
- fc_units_1=fc_units_1,
199
- fc_units_2=fc_units_2,
200
- dropout_rate=dropout_rate,
265
+ n_fc_layers=n_fc_layers,
266
+ base_kernel_size=base_kernel_size,
267
+ conv_dropout_rate=conv_dropout_rate,
268
+ fc_dropout_rate=fc_dropout_rate,
269
+ pooling_strategy=pooling_strategy,
201
270
  use_batch_norm=use_batch_norm,
202
271
  num_classes=num_classes,
203
272
  in_channels=in_channels
@@ -208,7 +277,7 @@ def create_objective(
208
277
  optimizer = optim.Adam(model.parameters(), lr=learning_rate)
209
278
 
210
279
  elif optimizer_name == 'SGD':
211
- momentum = trial.suggest_float('sgd_momentum', 0.8, 0.99)
280
+ momentum = trial.suggest_float('sgd_momentum', *search_space['sgd_momentum'])
212
281
  optimizer = optim.SGD(model.parameters(), lr=learning_rate, momentum=momentum)
213
282
 
214
283
  else: # RMSprop
@@ -228,6 +297,10 @@ def create_objective(
228
297
  trial=trial
229
298
  )
230
299
 
300
+ except RuntimeError as e:
301
+ # Catch architecture errors (e.g., dimension mismatches)
302
+ raise optuna.TrialPruned(f'RuntimeError with params: {trial.params} - {str(e)}')
303
+
231
304
  except torch.cuda.OutOfMemoryError:
232
305
  # Clear CUDA cache and skip this trial
233
306
  torch.cuda.empty_cache()
@@ -222,7 +222,7 @@ def plot_evaluation_curves(
222
222
  roc_auc = auc(fpr, tpr)
223
223
  ax1.plot(fpr, tpr, label=class_name)
224
224
 
225
- ax1.plot([0, 1], [0, 1], 'k--', label='Random classifier')
225
+ ax1.plot([0, 1], [0, 1], 'k--', label='random classifier')
226
226
  ax1.set_xlabel('False positive rate')
227
227
  ax1.set_ylabel('True positive rate')
228
228
  ax1.legend(loc='lower right', fontsize=12)
@@ -115,6 +115,4 @@ def train_model(
115
115
  f'val_accuracy: {val_accuracy:.2f}%'
116
116
  )
117
117
 
118
- print('\nTraining complete.')
119
-
120
118
  return history
@@ -0,0 +1,78 @@
1
+ Metadata-Version: 2.4
2
+ Name: cifar10_tools
3
+ Version: 0.5.0
4
+ Summary: Tools for training neural networks on the CIFAR-10 task with PyTorch and TensorFlow
5
+ License: GPLv3
6
+ License-File: LICENSE
7
+ Keywords: Python,Machine learning,Deep learning,CNNs,Computer vision,Image classification,CIFAR-10
8
+ Author: gperdrizet
9
+ Author-email: george@perdrizet.org
10
+ Requires-Python: >=3.10,<3.13
11
+ Classifier: Development Status :: 3 - Alpha
12
+ Classifier: Intended Audience :: Developers
13
+ Classifier: Intended Audience :: Education
14
+ Classifier: Intended Audience :: Science/Research
15
+ Classifier: License :: OSI Approved :: GNU General Public License v3 (GPLv3)
16
+ Classifier: License :: Other/Proprietary License
17
+ Classifier: Operating System :: OS Independent
18
+ Classifier: Programming Language :: Python :: 3
19
+ Classifier: Programming Language :: Python :: 3.10
20
+ Classifier: Programming Language :: Python :: 3.11
21
+ Classifier: Programming Language :: Python :: 3.12
22
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
23
+ Classifier: Topic :: Scientific/Engineering :: Image Recognition
24
+ Provides-Extra: tensorflow
25
+ Requires-Dist: numpy (>=1.24)
26
+ Requires-Dist: torch (>=2.0)
27
+ Requires-Dist: torchvision (>=0.15)
28
+ Project-URL: Documentation, https://gperdrizet.github.io/CIFAR10/README.md
29
+ Project-URL: Homepage, https://github.com/gperdrizet/CIFAR10
30
+ Project-URL: Issues, https://github.com/gperdrizet/CIFAR10/issues
31
+ Project-URL: PyPI, https://pypi.org/project/cifar10_tools
32
+ Project-URL: Repository, https://github.com/gperdrizet/CIFAR10
33
+ Description-Content-Type: text/markdown
34
+
35
+ # PyTorch: CIFAR-10 Demonstration
36
+
37
+ A progressive deep learning tutorial for image classification on the CIFAR-10 dataset using PyTorch. This project demonstrates the evolution from basic deep neural networks to optimized convolutional neural networks with data augmentation. It also provides a set of utility functions as a PyPI package for use in other projects.
38
+
39
+ [View on PyPI](https://pypi.org/project/cifar10_tools) | [Documentation](https://gperdrizet.github.io/CIFAR10/)
40
+
41
+ ## Installation
42
+
43
+ Install the helper tools package locally in editable mode to use in this repository:
44
+
45
+ ```bash
46
+ pip install -e .
47
+ ```
48
+
49
+ Or install from PyPI to use in other projects:
50
+
51
+ ```bash
52
+ pip install cifar10_tools
53
+ ```
54
+
55
+ ## Project overview
56
+
57
+ This repository contains a series of Jupyter notebooks that progressively build more sophisticated neural network architectures for the CIFAR-10 image classification task. Each notebook builds upon concepts from the previous one, demonstrating key deep learning techniques.
58
+
59
+ ## Notebooks
60
+
61
+ | Notebook | Description |
62
+ |----------|-------------|
63
+ | [01-DNN.ipynb](notebooks/01-DNN.ipynb) | **Deep Neural Network** - Baseline fully-connected DNN classifier using `nn.Sequential`. Establishes a performance baseline with a simple architecture. |
64
+ | [02-CNN.ipynb](notebooks/02-CNN.ipynb) | **Convolutional Neural Network** - Introduction to CNNs with convolutional and pooling layers using `nn.Sequential`. Demonstrates the advantage of CNNs over DNNs for image tasks. |
65
+ | [03-RGB-CNN.ipynb](notebooks/03-RGB-CNN.ipynb) | **RGB CNN** - CNN classifier that utilizes full RGB color information instead of grayscale, improving feature extraction from color images. |
66
+ | [04-optimized-CNN.ipynb](notebooks/04-optimized-CNN.ipynb) | **Hyperparameter Optimization** - Uses Optuna for automated hyperparameter tuning to find optimal network architecture and training parameters. |
67
+ | [05-augmented-CNN.ipynb](notebooks/05-augmented-CNN.ipynb) | **Data Augmentation** - Trains the optimized CNN architecture with image augmentation techniques for improved generalization and robustness. |
68
+
69
+ ## Requirements
70
+
71
+ - Python >=3.10, <3.13
72
+ - PyTorch >=2.0
73
+ - torchvision >=0.15
74
+ - numpy >=1.24
75
+
76
+ ## License
77
+
78
+ This project is licensed under the GPLv3 License - see the [LICENSE](LICENSE) file for details.
@@ -0,0 +1,12 @@
1
+ cifar10_tools/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
+ cifar10_tools/pytorch/__init__.py,sha256=4er-aMGK-MZTlkH3Owz3x-Pz_Gl_NjplKwOBYdBA1p0,909
3
+ cifar10_tools/pytorch/data.py,sha256=ZJb_EYxHPh6wsnAtzRcDFVVZaa3ChAbnC5IHaWaf0Ls,4272
4
+ cifar10_tools/pytorch/evaluation.py,sha256=i4tRYOqWATVqQVkWT_fATWRbzo9ziX2DDkXKPaiQlFE,923
5
+ cifar10_tools/pytorch/hyperparameter_optimization.py,sha256=VVZbeDRis_pviMSQFTQTgcFLp0-jtjz8hRrzDsuYA3g,11317
6
+ cifar10_tools/pytorch/plotting.py,sha256=SB50bwY4qhvYu_cVNT7EAE2vwOI8-0pxwu7jwGTJRas,9550
7
+ cifar10_tools/pytorch/training.py,sha256=spam_Q1G1ZAoheMMKY26RHl6YhIam8pW6A7Df7oS1to,3824
8
+ cifar10_tools/tensorflow/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
9
+ cifar10_tools-0.5.0.dist-info/METADATA,sha256=bOqXkMS7dQihCQ2rBwbf9mwwIJN-kofROEeg5bO2cQw,3840
10
+ cifar10_tools-0.5.0.dist-info/WHEEL,sha256=kJCRJT_g0adfAJzTx2GUMmS80rTJIVHRCfG0DQgLq3o,88
11
+ cifar10_tools-0.5.0.dist-info/licenses/LICENSE,sha256=wtHfRwmCF5-_XUmYwrBKwJkGipvHVmh7GXJOKKeOe2U,1073
12
+ cifar10_tools-0.5.0.dist-info/RECORD,,
@@ -1,35 +0,0 @@
1
- Metadata-Version: 2.4
2
- Name: cifar10_tools
3
- Version: 0.3.0
4
- Summary: Tools for training neural networks on the CIFAR-10 task with PyTorch and TensorFlow
5
- License: GPLv3
6
- License-File: LICENSE
7
- Keywords: Python,Machine learning,Deep learning,CNNs,Computer vision,Image classification,CIFAR-10
8
- Author: gperdrizet
9
- Author-email: george@perdrizet.org
10
- Requires-Python: >=3.10,<3.13
11
- Classifier: Development Status :: 3 - Alpha
12
- Classifier: Intended Audience :: Developers
13
- Classifier: Intended Audience :: Education
14
- Classifier: Intended Audience :: Science/Research
15
- Classifier: License :: OSI Approved :: GNU General Public License v3 (GPLv3)
16
- Classifier: License :: Other/Proprietary License
17
- Classifier: Operating System :: OS Independent
18
- Classifier: Programming Language :: Python :: 3
19
- Classifier: Programming Language :: Python :: 3.10
20
- Classifier: Programming Language :: Python :: 3.11
21
- Classifier: Programming Language :: Python :: 3.12
22
- Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
23
- Classifier: Topic :: Scientific/Engineering :: Image Recognition
24
- Provides-Extra: tensorflow
25
- Requires-Dist: numpy (>=1.24)
26
- Requires-Dist: torch (>=2.0)
27
- Requires-Dist: torchvision (>=0.15)
28
- Project-URL: Documentation, https://gperdrizet.github.io/CIFAR10/README.md
29
- Project-URL: Homepage, https://github.com/gperdrizet/CIFAR10
30
- Project-URL: Issues, https://github.com/gperdrizet/CIFAR10/issues
31
- Project-URL: PyPI, https://pypi.org/project/cifar10_tools
32
- Project-URL: Repository, https://github.com/gperdrizet/CIFAR10
33
- Description-Content-Type: text/markdown
34
-
35
- # PyTorch: CIFAR10 demonstration
@@ -1,12 +0,0 @@
1
- cifar10_tools/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
- cifar10_tools/pytorch/__init__.py,sha256=4er-aMGK-MZTlkH3Owz3x-Pz_Gl_NjplKwOBYdBA1p0,909
3
- cifar10_tools/pytorch/data.py,sha256=09zodpjto0xLq95tDAyq57CFh6MSYRuUBPcMmQcyKZM,626
4
- cifar10_tools/pytorch/evaluation.py,sha256=i4tRYOqWATVqQVkWT_fATWRbzo9ziX2DDkXKPaiQlFE,923
5
- cifar10_tools/pytorch/hyperparameter_optimization.py,sha256=92MwDp6CarFp6O-tkJqeVqDyn0Az15gu3pluAvnO2mw,8056
6
- cifar10_tools/pytorch/plotting.py,sha256=9kRDt9ZEX0uOUlt-9wzJHrx4WELuFYMeeQiJrmwyXNs,9550
7
- cifar10_tools/pytorch/training.py,sha256=KNaH-Q9u61o3DIcTfBhjnOvOD7yExZeXwBm6qvMGL9I,3859
8
- cifar10_tools/tensorflow/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
9
- cifar10_tools-0.3.0.dist-info/METADATA,sha256=Ll6YMa77t9ubJLaiFF8BsMmDuj_pzTLejL6Wlje2Qwo,1580
10
- cifar10_tools-0.3.0.dist-info/WHEEL,sha256=kJCRJT_g0adfAJzTx2GUMmS80rTJIVHRCfG0DQgLq3o,88
11
- cifar10_tools-0.3.0.dist-info/licenses/LICENSE,sha256=wtHfRwmCF5-_XUmYwrBKwJkGipvHVmh7GXJOKKeOe2U,1073
12
- cifar10_tools-0.3.0.dist-info/RECORD,,