openarchx 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. openarchx/__init__.py +11 -0
  2. openarchx/core/tensor.py +179 -0
  3. openarchx/cuda/__init__.py +27 -0
  4. openarchx/cuda/cuda_ops.py +296 -0
  5. openarchx/layers/activations.py +63 -0
  6. openarchx/layers/base.py +40 -0
  7. openarchx/layers/cnn.py +145 -0
  8. openarchx/layers/transformer.py +131 -0
  9. openarchx/nn/__init__.py +26 -0
  10. openarchx/nn/activations.py +127 -0
  11. openarchx/nn/containers.py +174 -0
  12. openarchx/nn/dropout.py +121 -0
  13. openarchx/nn/layers.py +338 -0
  14. openarchx/nn/losses.py +156 -0
  15. openarchx/nn/module.py +18 -0
  16. openarchx/nn/padding.py +120 -0
  17. openarchx/nn/pooling.py +318 -0
  18. openarchx/nn/rnn.py +226 -0
  19. openarchx/nn/transformers.py +187 -0
  20. openarchx/optimizers/adam.py +49 -0
  21. openarchx/optimizers/adaptive.py +63 -0
  22. openarchx/optimizers/base.py +24 -0
  23. openarchx/optimizers/modern.py +98 -0
  24. openarchx/optimizers/optx.py +91 -0
  25. openarchx/optimizers/sgd.py +63 -0
  26. openarchx/quantum/circuit.py +92 -0
  27. openarchx/quantum/gates.py +126 -0
  28. openarchx/utils/__init__.py +50 -0
  29. openarchx/utils/data.py +229 -0
  30. openarchx/utils/huggingface.py +288 -0
  31. openarchx/utils/losses.py +21 -0
  32. openarchx/utils/model_io.py +553 -0
  33. openarchx/utils/pytorch.py +420 -0
  34. openarchx/utils/tensorflow.py +467 -0
  35. openarchx/utils/transforms.py +259 -0
  36. openarchx-0.1.0.dist-info/METADATA +180 -0
  37. openarchx-0.1.0.dist-info/RECORD +43 -0
  38. openarchx-0.1.0.dist-info/WHEEL +5 -0
  39. openarchx-0.1.0.dist-info/licenses/LICENSE +21 -0
  40. openarchx-0.1.0.dist-info/top_level.txt +2 -0
  41. tests/__init__.py +1 -0
  42. tests/test_cuda_ops.py +205 -0
  43. tests/test_integrations.py +236 -0
tests/test_cuda_ops.py ADDED
@@ -0,0 +1,205 @@
1
+ import unittest
2
+ import numpy as np
3
+ import torch
4
+ import cupy as cp
5
+ from openarchx.cuda import cuda_ops
6
+ import time
7
+
8
+ class TestCUDAOps(unittest.TestCase):
9
+ @classmethod
10
+ def setUpClass(cls):
11
+ # Initialize random seed for reproducibility
12
+ np.random.seed(42)
13
+ torch.manual_seed(42)
14
+ cp.random.seed(42)
15
+
16
+ def setUp(self):
17
+ cuda_ops.clear_gpu_memory()
18
+
19
+ def tearDown(self):
20
+ cuda_ops.clear_gpu_memory()
21
+
22
+ def test_matmul(self):
23
+ # Test various matrix sizes
24
+ sizes = [(32, 32, 32), (128, 64, 256), (512, 512, 512)]
25
+
26
+ for m, k, n in sizes:
27
+ a = np.random.randn(m, k).astype(np.float32)
28
+ b = np.random.randn(k, n).astype(np.float32)
29
+
30
+ # NumPy result
31
+ expected = np.matmul(a, b)
32
+
33
+ # CUDA result
34
+ result, time_taken = cuda_ops.benchmark_operation(
35
+ cuda_ops.matmul, a, b
36
+ )
37
+
38
+ np.testing.assert_allclose(result, expected, rtol=1e-5)
39
+ print(f"MatMul {m}x{k} @ {k}x{n} - Time: {time_taken:.4f}s")
40
+
41
+ def test_conv2d(self):
42
+ batch_size = 32
43
+ in_channels = 3
44
+ out_channels = 16
45
+ input_size = 32
46
+ kernel_size = 3
47
+
48
+ input_data = np.random.randn(
49
+ batch_size, in_channels, input_size, input_size
50
+ ).astype(np.float32)
51
+
52
+ weights = np.random.randn(
53
+ out_channels, in_channels, kernel_size, kernel_size
54
+ ).astype(np.float32)
55
+
56
+ # PyTorch result for validation
57
+ torch_input = torch.from_numpy(input_data)
58
+ torch_weights = torch.from_numpy(weights)
59
+ expected = torch.nn.functional.conv2d(
60
+ torch_input, torch_weights, padding=1
61
+ ).numpy()
62
+
63
+ # CUDA result
64
+ result, time_taken = cuda_ops.benchmark_operation(
65
+ cuda_ops.conv2d, input_data, weights, padding=1
66
+ )
67
+
68
+ np.testing.assert_allclose(result, expected, rtol=1e-4)
69
+ print(f"Conv2D - Time: {time_taken:.4f}s")
70
+
71
+ def test_batch_norm(self):
72
+ batch_size = 32
73
+ channels = 64
74
+ height = 32
75
+ width = 32
76
+
77
+ input_data = np.random.randn(
78
+ batch_size, channels, height, width
79
+ ).astype(np.float32)
80
+
81
+ gamma = np.random.randn(channels).astype(np.float32)
82
+ beta = np.random.randn(channels).astype(np.float32)
83
+ running_mean = np.zeros(channels, dtype=np.float32)
84
+ running_var = np.ones(channels, dtype=np.float32)
85
+
86
+ # PyTorch result for validation
87
+ torch_input = torch.from_numpy(input_data)
88
+ torch_gamma = torch.from_numpy(gamma)
89
+ torch_beta = torch.from_numpy(beta)
90
+
91
+ expected = torch.nn.functional.batch_norm(
92
+ torch_input,
93
+ torch.from_numpy(running_mean),
94
+ torch.from_numpy(running_var),
95
+ torch_gamma,
96
+ torch_beta,
97
+ training=True
98
+ ).numpy()
99
+
100
+ # CUDA result
101
+ result, time_taken = cuda_ops.benchmark_operation(
102
+ cuda_ops.batch_norm,
103
+ input_data, gamma, beta,
104
+ running_mean, running_var
105
+ )
106
+
107
+ np.testing.assert_allclose(result, expected, rtol=1e-4)
108
+ print(f"BatchNorm - Time: {time_taken:.4f}s")
109
+
110
+ def test_dropout(self):
111
+ shape = (32, 1024)
112
+ input_data = np.random.randn(*shape).astype(np.float32)
113
+ p = 0.5
114
+
115
+ # Test training mode
116
+ result, time_taken = cuda_ops.benchmark_operation(
117
+ cuda_ops.dropout, input_data, p, True
118
+ )
119
+
120
+ # Verify dropout mask properties
121
+ mask = (result != 0).astype(np.float32)
122
+ dropout_ratio = 1 - (mask.sum() / mask.size)
123
+ self.assertAlmostEqual(dropout_ratio, p, delta=0.1)
124
+
125
+ # Test eval mode (should return input unchanged)
126
+ result = cuda_ops.dropout(input_data, p, training=False)
127
+ np.testing.assert_array_equal(result, input_data)
128
+
129
+ print(f"Dropout - Time: {time_taken:.4f}s")
130
+
131
+ def test_elementwise_ops(self):
132
+ shape = (1024, 1024)
133
+ a = np.random.randn(*shape).astype(np.float32)
134
+ b = np.random.randn(*shape).astype(np.float32)
135
+
136
+ # Test ReLU
137
+ expected = np.maximum(a, 0)
138
+ result, time_taken = cuda_ops.benchmark_operation(
139
+ cuda_ops.elementwise_op, a, op_type='relu'
140
+ )
141
+ np.testing.assert_allclose(result, expected)
142
+ print(f"ReLU - Time: {time_taken:.4f}s")
143
+
144
+ # Test Add
145
+ expected = a + b
146
+ result, time_taken = cuda_ops.benchmark_operation(
147
+ cuda_ops.elementwise_op, a, b, op_type='add'
148
+ )
149
+ np.testing.assert_allclose(result, expected)
150
+ print(f"Add - Time: {time_taken:.4f}s")
151
+
152
+ def test_maxpool2d(self):
153
+ batch_size = 32
154
+ channels = 16
155
+ height = 32
156
+ width = 32
157
+ kernel_size = 2
158
+ stride = 2
159
+
160
+ input_data = np.random.randn(
161
+ batch_size, channels, height, width
162
+ ).astype(np.float32)
163
+
164
+ # PyTorch result for validation
165
+ torch_input = torch.from_numpy(input_data)
166
+ expected, indices = torch.nn.functional.max_pool2d(
167
+ torch_input,
168
+ kernel_size,
169
+ stride=stride,
170
+ return_indices=True
171
+ )
172
+ expected = expected.numpy()
173
+
174
+ # CUDA result
175
+ result, time_taken = cuda_ops.benchmark_operation(
176
+ cuda_ops.maxpool2d,
177
+ input_data,
178
+ kernel_size,
179
+ stride
180
+ )
181
+
182
+ np.testing.assert_allclose(result[0], expected, rtol=1e-5)
183
+ print(f"MaxPool2D - Time: {time_taken:.4f}s")
184
+
185
+ def test_memory_management(self):
186
+ # Test memory info
187
+ initial_mem = cuda_ops.get_memory_info()
188
+
189
+ # Allocate some memory
190
+ shape = (1024, 1024)
191
+ data = np.random.randn(*shape).astype(np.float32)
192
+ _ = cuda_ops.to_gpu(data)
193
+
194
+ mid_mem = cuda_ops.get_memory_info()
195
+ self.assertGreater(mid_mem['used'], initial_mem['used'])
196
+
197
+ # Clear memory
198
+ cuda_ops.clear_gpu_memory()
199
+ final_mem = cuda_ops.get_memory_info()
200
+
201
+ # Memory should be freed
202
+ self.assertLessEqual(final_mem['used'], initial_mem['used'])
203
+
204
+ if __name__ == '__main__':
205
+ unittest.main()
@@ -0,0 +1,236 @@
1
+ import unittest
2
+ import sys
3
+ import os
4
+ import numpy as np
5
+
6
+ # Add parent directory to path
7
+ sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
8
+
9
+ from openarchx.core.tensor import Tensor
10
+ from openarchx.utils.data import Dataset, DataLoader, DatasetFactory
11
+ from openarchx.utils.transforms import Compose, ToTensor, Normalize, RandomCrop
12
+
13
+
14
+ class TestDataIntegration(unittest.TestCase):
15
+ """Test the dataset integration features."""
16
+
17
+ def setUp(self):
18
+ # Create a simple dataset for testing
19
+ self.X = np.random.randn(100, 3, 32, 32).astype(np.float32)
20
+ self.y = np.random.randint(0, 10, size=100).astype(np.int64)
21
+ self.dataset = Dataset(self.X, self.y)
22
+
23
+ def test_dataset_creation(self):
24
+ """Test creating a dataset."""
25
+ self.assertEqual(len(self.dataset), 100)
26
+ x, y = self.dataset[0]
27
+ self.assertEqual(x.shape, (3, 32, 32))
28
+ self.assertIsInstance(y, np.int64)
29
+
30
+ def test_dataloader(self):
31
+ """Test the dataloader."""
32
+ dataloader = DataLoader(self.dataset, batch_size=16, shuffle=True)
33
+ self.assertEqual(len(dataloader), 7) # 100/16 = 6.25 -> 7 batches
34
+
35
+ for x_batch, y_batch in dataloader:
36
+ self.assertIsInstance(x_batch, Tensor)
37
+ self.assertIsInstance(y_batch, Tensor)
38
+ self.assertEqual(x_batch.data.shape[0], 16) # Batch size
39
+ self.assertEqual(y_batch.data.shape[0], 16) # Batch size
40
+ break
41
+
42
+ def test_dataset_factory(self):
43
+ """Test the dataset factory."""
44
+ dataset = DatasetFactory.from_numpy(self.X, self.y)
45
+ self.assertEqual(len(dataset), 100)
46
+ x, y = dataset[0]
47
+ self.assertEqual(x.shape, (3, 32, 32))
48
+ self.assertIsInstance(y, np.int64)
49
+
50
+
51
+ class TestTransforms(unittest.TestCase):
52
+ """Test the transforms functionality."""
53
+
54
+ def setUp(self):
55
+ # Create sample data for testing
56
+ self.x = np.random.randn(3, 32, 32).astype(np.float32)
57
+
58
+ def test_to_tensor(self):
59
+ """Test converting to tensor."""
60
+ transform = ToTensor()
61
+ output = transform(self.x)
62
+ self.assertIsInstance(output, Tensor)
63
+ self.assertEqual(output.data.shape, (3, 32, 32))
64
+
65
+ def test_normalize(self):
66
+ """Test normalizing data."""
67
+ transform = Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
68
+ output = transform(self.x)
69
+ self.assertTrue(np.all(output <= 1.0))
70
+ self.assertTrue(np.all(output >= -1.0))
71
+
72
+ def test_random_crop(self):
73
+ """Test random cropping."""
74
+ transform = RandomCrop(24)
75
+ output = transform(self.x)
76
+ self.assertEqual(output.shape, (3, 24, 24))
77
+
78
+ def test_compose(self):
79
+ """Test composing transforms."""
80
+ transforms = Compose([
81
+ ToTensor(),
82
+ Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
83
+ ])
84
+ output = transforms(self.x)
85
+ self.assertIsInstance(output, Tensor)
86
+ self.assertTrue(np.all(output.data <= 1.0))
87
+ self.assertTrue(np.all(output.data >= -1.0))
88
+
89
+
90
+ class TestPyTorchIntegration(unittest.TestCase):
91
+ """Test PyTorch integration features (if available)."""
92
+
93
+ @classmethod
94
+ def setUpClass(cls):
95
+ # Skip tests if PyTorch is not available
96
+ try:
97
+ import torch
98
+ cls.torch_available = True
99
+ except ImportError:
100
+ cls.torch_available = False
101
+
102
+ def setUp(self):
103
+ if not self.torch_available:
104
+ self.skipTest("PyTorch not available")
105
+
106
+ import torch
107
+ from torch.utils.data import TensorDataset
108
+
109
+ # Create a simple PyTorch dataset
110
+ self.x_torch = torch.randn(100, 3, 32, 32)
111
+ self.y_torch = torch.randint(0, 10, (100,))
112
+ self.torch_dataset = TensorDataset(self.x_torch, self.y_torch)
113
+
114
+ def test_torch_adapter(self):
115
+ """Test PyTorch dataset adapter."""
116
+ from openarchx.utils.data import TorchDatasetAdapter
117
+
118
+ adapter = TorchDatasetAdapter(self.torch_dataset)
119
+ self.assertEqual(len(adapter), 100)
120
+
121
+ x, y = adapter[0]
122
+ self.assertEqual(x.shape, (3, 32, 32))
123
+
124
+ def test_torch_factory(self):
125
+ """Test creating dataset from PyTorch."""
126
+ dataset = DatasetFactory.from_torch(self.torch_dataset)
127
+ self.assertEqual(len(dataset), 100)
128
+
129
+ x, y = dataset[0]
130
+ self.assertEqual(x.shape, (3, 32, 32))
131
+
132
+ # Test with DataLoader
133
+ dataloader = DataLoader(dataset, batch_size=16, shuffle=True)
134
+ for x_batch, y_batch in dataloader:
135
+ self.assertIsInstance(x_batch, Tensor)
136
+ self.assertEqual(x_batch.data.shape[0], 16) # Batch size
137
+ break
138
+
139
+
140
+ class TestTensorFlowIntegration(unittest.TestCase):
141
+ """Test TensorFlow integration features (if available)."""
142
+
143
+ @classmethod
144
+ def setUpClass(cls):
145
+ # Skip tests if TensorFlow is not available
146
+ try:
147
+ import tensorflow as tf
148
+ cls.tf_available = True
149
+ except ImportError:
150
+ cls.tf_available = False
151
+
152
+ def setUp(self):
153
+ if not self.tf_available:
154
+ self.skipTest("TensorFlow not available")
155
+
156
+ import tensorflow as tf
157
+
158
+ # Create a simple TensorFlow dataset
159
+ self.x_tf = np.random.randn(100, 32, 32, 3).astype(np.float32) # TF uses channels last
160
+ self.y_tf = np.random.randint(0, 10, size=100).astype(np.int64)
161
+ self.tf_dataset = tf.data.Dataset.from_tensor_slices((self.x_tf, self.y_tf)).batch(10)
162
+
163
+ def test_tf_adapter(self):
164
+ """Test TensorFlow dataset adapter."""
165
+ from openarchx.utils.tensorflow import convert_from_tf_dataset
166
+
167
+ adapter = convert_from_tf_dataset(self.tf_dataset)
168
+
169
+ # Test with DataLoader
170
+ dataloader = DataLoader(adapter, batch_size=5)
171
+ for x_batch, y_batch in dataloader:
172
+ self.assertIsInstance(x_batch, Tensor)
173
+ break
174
+
175
+ def test_tf_model_adapter(self):
176
+ """Test TensorFlow model adapter."""
177
+ if not self.tf_available:
178
+ self.skipTest("TensorFlow not available")
179
+
180
+ import tensorflow as tf
181
+ from openarchx.utils.tensorflow import get_tf_model_adapter
182
+
183
+ # Create a simple Keras model
184
+ model = tf.keras.Sequential([
185
+ tf.keras.layers.Flatten(input_shape=(32, 32, 3)),
186
+ tf.keras.layers.Dense(128, activation='relu'),
187
+ tf.keras.layers.Dense(10, activation='softmax')
188
+ ])
189
+
190
+ # Create adapter
191
+ adapter = get_tf_model_adapter(model)
192
+
193
+ # Test inference
194
+ test_input = np.random.randn(1, 32, 32, 3).astype(np.float32)
195
+ output = adapter(test_input)
196
+
197
+ self.assertIsInstance(output, Tensor)
198
+ self.assertEqual(output.data.shape, (1, 10))
199
+
200
+
201
+ class TestHuggingFaceIntegration(unittest.TestCase):
202
+ """Test Hugging Face integration features (if available)."""
203
+
204
+ @classmethod
205
+ def setUpClass(cls):
206
+ # Skip tests if transformers is not available
207
+ try:
208
+ import transformers
209
+ cls.hf_available = True
210
+ except ImportError:
211
+ cls.hf_available = False
212
+
213
+ def test_tokenizer_adapter(self):
214
+ """Test Hugging Face tokenizer adapter."""
215
+ if not self.hf_available:
216
+ self.skipTest("Hugging Face transformers not available")
217
+
218
+ try:
219
+ from openarchx.utils.huggingface import get_huggingface_tokenizer
220
+
221
+ # Create tokenizer adapter
222
+ tokenizer = get_huggingface_tokenizer("bert-base-uncased")
223
+
224
+ # Test tokenization
225
+ text = "Hello, world!"
226
+ tokens = tokenizer(text, return_tensors="np")
227
+
228
+ self.assertIsInstance(tokens, dict)
229
+ self.assertIn("input_ids", tokens)
230
+ self.assertIsInstance(tokens["input_ids"], Tensor)
231
+ except Exception as e:
232
+ self.skipTest(f"Hugging Face tokenizer test failed: {e}")
233
+
234
+
235
+ if __name__ == "__main__":
236
+ unittest.main()