pyerualjetwork 4.0.1__py3-none-any.whl → 4.0.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,426 @@
1
+ from tqdm import tqdm
2
+ import cupy as cp
3
+ from colorama import Fore, Style
4
+ import sys
5
+ import math
6
+ import numpy as np
7
+
8
+ def encode_one_hot(y_train, y_test=None, summary=False):
9
+ """
10
+ Performs one-hot encoding on y_train and y_test data.
11
+
12
+ Args:
13
+ y_train (numpy.ndarray): Train label data.
14
+ y_test (numpy.ndarray): Test label data. (optional).
15
+ summary (bool): If True, prints the class-to-index mapping. Default: False
16
+
17
+ Returns:
18
+ tuple: One-hot encoded y_train ve (eğer varsa) y_test verileri.
19
+ """
20
+
21
+ y_train = y_train.get()
22
+ y_test = y_test.get()
23
+
24
+ classes = np.unique(y_train)
25
+ class_count = len(classes)
26
+
27
+ class_to_index = {cls: idx for idx, cls in enumerate(classes)}
28
+
29
+ if summary:
30
+ print("Class-to-index mapping:")
31
+ for cls, idx in class_to_index.items():
32
+ print(f" {idx}: {cls}")
33
+
34
+ y_train_encoded = np.zeros((y_train.shape[0], class_count))
35
+ for i, label in enumerate(y_train):
36
+ y_train_encoded[i, class_to_index[label]] = 1
37
+
38
+ if y_test is not None:
39
+ y_test_encoded = np.zeros((y_test.shape[0], class_count))
40
+ for i, label in enumerate(y_test):
41
+ y_test_encoded[i, class_to_index[label]] = 1
42
+ return cp.array(y_train_encoded), cp.array(y_test_encoded)
43
+
44
+ return cp.array(y_train_encoded)
45
+
46
+
47
+ def decode_one_hot(encoded_data):
48
+ """
49
+ Decodes one-hot encoded data to original categorical labels.
50
+
51
+ Args:
52
+ encoded_data (numpy.ndarray): One-hot encoded data with shape (n_samples, n_classes).
53
+
54
+ Returns:
55
+ numpy.ndarray: Decoded categorical labels with shape (n_samples,).
56
+ """
57
+
58
+ decoded_labels = cp.argmax(encoded_data, axis=1)
59
+
60
+ return decoded_labels
61
+
62
+
63
+ def split(X, y, test_size, random_state):
64
+ """
65
+ Splits the given X (features) and y (labels) data into training and testing subsets.
66
+
67
+ Args:
68
+ X (array-like): Features data.
69
+ y (array-like): Labels data.
70
+ test_size (float or int): Proportion or number of samples for the test subset.
71
+ random_state (int or None): Seed for random state.
72
+
73
+ Returns:
74
+ tuple: x_train, x_test, y_train, y_test as ordered training and testing data subsets.
75
+ """
76
+ X = cp.array(X, copy=False)
77
+ y = cp.array(y, copy=False)
78
+
79
+ num_samples = X.shape[0]
80
+
81
+ if isinstance(test_size, float):
82
+ test_size = int(test_size * num_samples)
83
+ elif isinstance(test_size, int):
84
+ if test_size > num_samples:
85
+ raise ValueError(
86
+ "test_size cannot be larger than the number of samples.")
87
+ else:
88
+ raise ValueError("test_size should be float or int.")
89
+
90
+ if random_state is not None:
91
+ cp.random.seed(random_state)
92
+
93
+ indices = cp.arange(num_samples)
94
+ cp.random.shuffle(indices)
95
+
96
+ test_indices = indices[:test_size]
97
+ train_indices = indices[test_size:]
98
+
99
+ x_train, x_test = X[train_indices], X[test_indices]
100
+ y_train, y_test = y[train_indices], y[test_indices]
101
+
102
+ return x_train, x_test, y_train, y_test
103
+
104
+
105
+ def manuel_balancer(x_train, y_train, target_samples_per_class):
106
+ """
107
+ Generates synthetic examples to balance classes to the specified number of examples per class.
108
+
109
+ Arguments:
110
+ x_train -- Input dataset (examples) - NumPy array format
111
+ y_train -- Class labels (one-hot encoded) - NumPy array format
112
+ target_samples_per_class -- Desired number of samples per class
113
+
114
+ Returns:
115
+ x_balanced -- Balanced input dataset (NumPy array format)
116
+ y_balanced -- Balanced class labels (one-hot encoded, NumPy array format)
117
+ """
118
+ from .ui import loading_bars
119
+
120
+ bar_format = loading_bars()[0]
121
+
122
+ try:
123
+ x_train = cp.array(x_train)
124
+ y_train = cp.array(y_train)
125
+ except:
126
+ pass
127
+
128
+ classes = cp.arange(y_train.shape[1])
129
+ class_count = len(classes)
130
+
131
+ x_balanced = []
132
+ y_balanced = []
133
+
134
+ for class_label in tqdm(range(class_count),leave=False, ascii="▱▰",
135
+ bar_format=bar_format,desc='Augmenting Data',ncols= 52):
136
+ class_indices = cp.where(cp.argmax(y_train, axis=1) == class_label)[0]
137
+ num_samples = len(class_indices)
138
+
139
+ if num_samples > target_samples_per_class:
140
+
141
+ selected_indices = cp.random.choice(class_indices, target_samples_per_class, replace=False)
142
+ x_balanced.append(x_train[selected_indices])
143
+ y_balanced.append(y_train[selected_indices])
144
+
145
+ else:
146
+
147
+ x_balanced.append(x_train[class_indices])
148
+ y_balanced.append(y_train[class_indices])
149
+
150
+ if num_samples < target_samples_per_class:
151
+
152
+ samples_to_add = target_samples_per_class - num_samples
153
+ additional_samples = cp.zeros((samples_to_add, x_train.shape[1]))
154
+ additional_labels = cp.zeros((samples_to_add, y_train.shape[1]))
155
+
156
+ for i in range(samples_to_add):
157
+
158
+ random_indices = cp.random.choice(class_indices, 2, replace=False)
159
+ sample1 = x_train[random_indices[0]]
160
+ sample2 = x_train[random_indices[1]]
161
+
162
+
163
+ synthetic_sample = sample1 + (sample2 - sample1) * cp.random.rand()
164
+
165
+ additional_samples[i] = synthetic_sample
166
+ additional_labels[i] = y_train[class_indices[0]]
167
+
168
+
169
+ x_balanced.append(additional_samples)
170
+ y_balanced.append(additional_labels)
171
+
172
+ x_balanced = cp.vstack(x_balanced)
173
+ y_balanced = cp.vstack(y_balanced)
174
+
175
+ return x_balanced, y_balanced
176
+
177
+
178
+ def auto_balancer(x_train, y_train):
179
+
180
+ """
181
+ Function to balance the training data across different classes.
182
+
183
+ Arguments:
184
+ x_train (list): Input data for training.
185
+ y_train (list): Labels corresponding to the input data.
186
+
187
+ Returns:
188
+ tuple: A tuple containing balanced input data and labels.
189
+ """
190
+ from .ui import loading_bars
191
+
192
+ bar_format = loading_bars()[0]
193
+
194
+ classes = cp.arange(y_train.shape[1])
195
+ class_count = len(classes)
196
+
197
+ try:
198
+ ClassIndices = {i: cp.where(cp.array(y_train)[:, i] == 1)[
199
+ 0] for i in range(class_count)}
200
+ classes = [len(ClassIndices[i]) for i in range(class_count)]
201
+
202
+ if len(set(classes)) == 1:
203
+ print(Fore.WHITE + "INFO: Data have already balanced. from: auto_balancer" + Style.RESET_ALL)
204
+ return x_train, y_train
205
+
206
+ MinCount = min(classes)
207
+
208
+ BalancedIndices = []
209
+ for i in tqdm(range(class_count),leave=False, ascii="▱▰",
210
+ bar_format= bar_format, desc='Balancing Data',ncols=70):
211
+ if len(ClassIndices[i]) > MinCount:
212
+ SelectedIndices = cp.random.choice(
213
+ ClassIndices[i], MinCount, replace=False)
214
+ else:
215
+ SelectedIndices = ClassIndices[i]
216
+ BalancedIndices.extend(SelectedIndices)
217
+
218
+ BalancedInputs = [x_train[idx] for idx in BalancedIndices]
219
+ BalancedLabels = [y_train[idx] for idx in BalancedIndices]
220
+
221
+ permutation = cp.random.permutation(len(BalancedInputs))
222
+ BalancedInputs = cp.array(BalancedInputs)[permutation]
223
+ BalancedLabels = cp.array(BalancedLabels)[permutation]
224
+
225
+ print(Fore.GREEN + "Data Succesfully Balanced from: " + str(len(x_train)
226
+ ) + " to: " + str(len(BalancedInputs)) + ". from: auto_balancer " + Style.RESET_ALL)
227
+ except:
228
+ print(Fore.RED + "ERROR: Inputs and labels must be same length check parameters")
229
+ sys.exit()
230
+
231
+ return cp.array(BalancedInputs), cp.array(BalancedLabels)
232
+
233
+
234
+ def synthetic_augmentation(x_train, y_train):
235
+ """
236
+ Generates synthetic examples to balance classes with fewer examples.
237
+
238
+ Arguments:
239
+ x -- Input dataset (examples) - array format
240
+ y -- Class labels (one-hot encoded) - array format
241
+
242
+ Returns:
243
+ x_balanced -- Balanced input dataset (array format)
244
+ y_balanced -- Balanced class labels (one-hot encoded, array format)
245
+ """
246
+ from .ui import loading_bars
247
+
248
+ bar_format = loading_bars()[0]
249
+
250
+ x = x_train
251
+ y = y_train
252
+ classes = np.arange(y_train.shape[1])
253
+ class_count = len(classes)
254
+
255
+ class_distribution = {i: 0 for i in range(class_count)}
256
+ for label in y.get():
257
+ class_distribution[np.argmax(label)] += 1
258
+
259
+ max_class_count = max(class_distribution.values())
260
+
261
+ x_balanced = list(x)
262
+ y_balanced = list(y)
263
+
264
+
265
+ for class_label in tqdm(range(class_count), leave=False, ascii="▱▰",
266
+ bar_format=bar_format,desc='Augmenting Data',ncols= 52):
267
+ class_indices = [i for i, label in enumerate(
268
+ y) if cp.argmax(label) == class_label]
269
+ num_samples = len(class_indices)
270
+
271
+ if num_samples < max_class_count:
272
+ while num_samples < max_class_count:
273
+
274
+ random_indices = cp.random.choice(
275
+ class_indices, 2, replace=False)
276
+ sample1 = x[random_indices[0]]
277
+ sample2 = x[random_indices[1]]
278
+
279
+ synthetic_sample = sample1 + \
280
+ (cp.array(sample2) - cp.array(sample1)) * cp.random.rand()
281
+
282
+ x_balanced.append(synthetic_sample.tolist())
283
+ y_balanced.append(y[class_indices[0]])
284
+
285
+ num_samples += 1
286
+
287
+ for i in range(len(x_balanced)):
288
+ x_balanced[i] = cp.array(x_balanced[i])
289
+ y_balanced[i] = cp.array(y_balanced[i])
290
+
291
+ return cp.array(x_balanced), cp.array(y_balanced)
292
+
293
+
294
+ def standard_scaler(x_train=None, x_test=None, scaler_params=None):
295
+ """
296
+ Standardizes training and test datasets. x_test may be None.
297
+
298
+ Args:
299
+ train_data: numpy.ndarray
300
+ test_data: numpy.ndarray (optional)
301
+ scaler_params (optional for using model)
302
+
303
+ Returns:
304
+ list:
305
+ Scaler parameters: mean and std
306
+ tuple
307
+ Standardized training and test datasets
308
+ """
309
+
310
+ try:
311
+
312
+ x_train = x_train.tolist()
313
+ x_test = x_test.tolist()
314
+
315
+ except:
316
+
317
+ pass
318
+
319
+ if x_train != None and scaler_params == None and x_test != None:
320
+
321
+ x_train = cp.array(x_train, copy=False)
322
+ x_test = cp.array(x_test, copy=False)
323
+
324
+ mean = cp.mean(x_train, axis=0)
325
+ std = cp.std(x_train, axis=0)
326
+
327
+ train_data_scaled = (x_train - mean) / std
328
+ test_data_scaled = (x_test - mean) / std
329
+
330
+ train_data_scaled = cp.nan_to_num(train_data_scaled, nan=0)
331
+ test_data_scaled = cp.nan_to_num(test_data_scaled, nan=0)
332
+
333
+ scaler_params = [mean, std]
334
+
335
+ return scaler_params, train_data_scaled, test_data_scaled
336
+
337
+ try:
338
+ if scaler_params == None and x_train == None and x_test != None:
339
+
340
+ x_test = cp.array(x_test, copy=False)
341
+
342
+ mean = cp.mean(x_train, axis=0)
343
+ std = cp.std(x_train, axis=0)
344
+ train_data_scaled = (x_train - mean) / std
345
+
346
+ train_data_scaled = cp.nan_to_num(train_data_scaled, nan=0)
347
+
348
+ scaler_params = [mean, std]
349
+
350
+ return scaler_params, train_data_scaled
351
+ except:
352
+
353
+ # this model is not scaled
354
+
355
+ return x_test
356
+
357
+ if scaler_params != None:
358
+
359
+ x_test = cp.array(x_test, copy=False)
360
+
361
+ try:
362
+
363
+ test_data_scaled = (x_test - scaler_params[0]) / scaler_params[1]
364
+ test_data_scaled = cp.nan_to_num(test_data_scaled, nan=0)
365
+
366
+ except:
367
+
368
+ test_data_scaled = (x_test - scaler_params[0]) / scaler_params[1]
369
+ test_data_scaled = cp.nan_to_num(test_data_scaled, nan=0)
370
+
371
+ return test_data_scaled
372
+
373
+
374
+ def normalization(
375
+ Input # num: Input data to be normalized.
376
+ ):
377
+ """
378
+ Normalizes the input data using maximum absolute scaling.
379
+
380
+ Args:
381
+ Input (num): Input data to be normalized.
382
+
383
+ Returns:
384
+ (num) Scaled input data after normalization.
385
+ """
386
+
387
+ MaxAbs = cp.max(cp.abs(Input))
388
+ return (Input / MaxAbs)
389
+
390
+
391
+ def find_closest_factors(a):
392
+
393
+ root = int(math.sqrt(a))
394
+
395
+ for i in range(root, 0, -1):
396
+ if a % i == 0:
397
+ j = a // i
398
+ return i, j
399
+
400
+
401
+ def batcher(x_test, y_test, batch_size=1):
402
+ y_labels = cp.argmax(y_test, axis=1)
403
+
404
+ unique_labels = cp.unique(y_labels)
405
+ total_samples = sum(
406
+ int(cp.sum(y_labels == class_label) * batch_size) for class_label in unique_labels
407
+ )
408
+
409
+ sampled_x = cp.empty((total_samples, x_test.shape[1]), dtype=x_test.dtype)
410
+ sampled_y = cp.empty((total_samples, y_test.shape[1]), dtype=y_test.dtype)
411
+
412
+ offset = 0
413
+ for class_label in unique_labels:
414
+
415
+ class_indices = cp.where(y_labels == class_label)[0]
416
+
417
+ num_samples = int(len(class_indices) * batch_size)
418
+
419
+ sampled_indices = cp.random.choice(class_indices, num_samples, replace=False)
420
+
421
+ sampled_x[offset:offset + num_samples] = x_test[sampled_indices]
422
+ sampled_y[offset:offset + num_samples] = y_test[sampled_indices]
423
+
424
+ offset += num_samples
425
+
426
+ return sampled_x, sampled_y
pyerualjetwork/help.py CHANGED
@@ -12,5 +12,5 @@ def docs_and_examples():
12
12
 
13
13
  print('PLAN document: https://github.com/HCB06/PyerualJetwork/tree/main/Welcome_to_PLAN\n')
14
14
  print('PLAN examples: https://github.com/HCB06/PyerualJetwork/tree/main/Welcome_to_PyerualJetwork/ExampleCodes\n')
15
- print('PLANEAT examples: https://github.com/HCB06/PyerualJetwork/tree/main/Welcome_to_Anaplan/ExampleCodes/PLANEAT\n')
15
+ print('PLANEAT examples: https://github.com/HCB06/PyerualJetwork/tree/main/Welcome_to_PyerualJetwork/ExampleCodes/PLANEAT\n')
16
16
  print('PyerualJetwork document and examples: https://github.com/HCB06/PyerualJetwork/tree/main/Welcome_to_PyerualJetwork')
@@ -0,0 +1,21 @@
1
+
2
+ import cupy as cp
3
+
4
+ def categorical_crossentropy(y_true_batch, y_pred_batch):
5
+ epsilon = 1e-7
6
+ y_pred_batch = cp.clip(y_pred_batch, epsilon, 1. - epsilon)
7
+
8
+ losses = -cp.sum(y_true_batch * cp.log(y_pred_batch), axis=1)
9
+
10
+ mean_loss = cp.mean(losses)
11
+ return mean_loss
12
+
13
+
14
+ def binary_crossentropy(y_true_batch, y_pred_batch):
15
+ epsilon = 1e-7
16
+ y_pred_batch = cp.clip(y_pred_batch, epsilon, 1. - epsilon)
17
+
18
+ losses = -cp.mean(y_true_batch * cp.log(y_pred_batch) + (1 - y_true_batch) * cp.log(1 - y_pred_batch), axis=1)
19
+
20
+ mean_loss = cp.mean(losses)
21
+ return mean_loss
@@ -0,0 +1,190 @@
1
+ import cupy as cp
2
+
3
+ def metrics(y_ts, test_preds, average='weighted'):
4
+ """
5
+ Calculates precision, recall and F1 score for a classification task.
6
+
7
+ Args:
8
+ y_ts (list or numpy.ndarray): True labels.
9
+ test_preds (list or numpy.ndarray): Predicted labels.
10
+ average (str): Type of averaging ('micro', 'macro', 'weighted').
11
+
12
+ Returns:
13
+ tuple: Precision, recall, F1 score.
14
+ """
15
+
16
+ from .data_operations_cuda import decode_one_hot
17
+
18
+ y_test_d = decode_one_hot(y_ts)
19
+ y_test_d = cp.array(y_test_d)
20
+ y_pred = cp.array(test_preds)
21
+
22
+ if y_test_d.ndim > 1:
23
+ y_test_d = y_test_d.reshape(-1)
24
+ if y_pred.ndim > 1:
25
+ y_pred = y_pred.reshape(-1)
26
+
27
+ tp = {}
28
+ fp = {}
29
+ fn = {}
30
+
31
+ classes = cp.unique(cp.concatenate((y_test_d, y_pred)))
32
+
33
+ for c in classes:
34
+ tp[c] = 0
35
+ fp[c] = 0
36
+ fn[c] = 0
37
+
38
+ for c in classes:
39
+ for true, pred in zip(y_test_d, y_pred):
40
+ if true == c and pred == c:
41
+ tp[c] += 1
42
+ elif true != c and pred == c:
43
+ fp[c] += 1
44
+ elif true == c and pred != c:
45
+ fn[c] += 1
46
+
47
+ precision = {}
48
+ recall = {}
49
+ f1 = {}
50
+
51
+ for c in classes:
52
+ precision[c] = tp[c] / (tp[c] + fp[c]) if (tp[c] + fp[c]) > 0 else 0
53
+ recall[c] = tp[c] / (tp[c] + fn[c]) if (tp[c] + fn[c]) > 0 else 0
54
+ f1[c] = 2 * (precision[c] * recall[c]) / (precision[c] + recall[c]) if (precision[c] + recall[c]) > 0 else 0
55
+
56
+ if average == 'micro':
57
+ precision_val = cp.sum(list(tp.values())) / (cp.sum(list(tp.values())) + cp.sum(list(fp.values()))) if (cp.sum(list(tp.values())) + cp.sum(list(fp.values()))) > 0 else 0
58
+ recall_val = cp.sum(list(tp.values())) / (cp.sum(list(tp.values())) + cp.sum(list(fn.values()))) if (cp.sum(list(tp.values())) + cp.sum(list(fn.values()))) > 0 else 0
59
+ f1_val = 2 * (precision_val * recall_val) / (precision_val + recall_val) if (precision_val + recall_val) > 0 else 0
60
+
61
+ elif average == 'macro':
62
+ precision_val = cp.mean(list(precision.values()))
63
+ recall_val = cp.mean(list(recall.values()))
64
+ f1_val = cp.mean(list(f1.values()))
65
+
66
+ elif average == 'weighted':
67
+ weights = cp.array([cp.sum(y_test_d == c) for c in classes])
68
+ weights = weights / cp.sum(weights)
69
+ precision_val = cp.sum([weights[i] * precision[classes[i]] for i in range(len(classes))])
70
+ recall_val = cp.sum([weights[i] * recall[classes[i]] for i in range(len(classes))])
71
+ f1_val = cp.sum([weights[i] * f1[classes[i]] for i in range(len(classes))])
72
+
73
+ else:
74
+ raise ValueError("Invalid value for 'average'. Choose from 'micro', 'macro', 'weighted'.")
75
+
76
+ return precision_val, recall_val, f1_val
77
+
78
+
79
+ def roc_curve(y_true, y_score):
80
+ """
81
+ Compute Receiver Operating Characteristic (ROC) curve.
82
+
83
+ Parameters:
84
+ y_true : array, shape = [n_samples]
85
+ True binary labels in range {0, 1} or {-1, 1}.
86
+ y_score : array, shape = [n_samples]
87
+ Target scores, can either be probability estimates of the positive class,
88
+ confidence values, or non-thresholded measure of decisions (as returned
89
+ by decision_function on some classifiers).
90
+
91
+ Returns:
92
+ fpr : array, shape = [n]
93
+ Increasing false positive rates such that element i is the false positive rate
94
+ of predictions with score >= thresholds[i].
95
+ tpr : array, shape = [n]
96
+ Increasing true positive rates such that element i is the true positive rate
97
+ of predictions with score >= thresholds[i].
98
+ thresholds : array, shape = [n]
99
+ Decreasing thresholds on the decision function used to compute fpr and tpr.
100
+ """
101
+
102
+ y_true = cp.asarray(y_true)
103
+ y_score = cp.asarray(y_score)
104
+
105
+ if len(cp.unique(y_true)) != 2:
106
+ raise ValueError("Only binary classification is supported.")
107
+
108
+
109
+ desc_score_indices = cp.argsort(y_score, kind="mergesort")[::-1]
110
+ y_score = y_score[desc_score_indices]
111
+ y_true = y_true[desc_score_indices]
112
+
113
+
114
+ fpr = []
115
+ tpr = []
116
+ thresholds = []
117
+ n_pos = cp.sum(y_true)
118
+ n_neg = len(y_true) - n_pos
119
+
120
+ tp = 0
121
+ fp = 0
122
+ prev_score = None
123
+
124
+
125
+ for i, score in enumerate(y_score):
126
+ if score != prev_score:
127
+ fpr.append(fp / n_neg)
128
+ tpr.append(tp / n_pos)
129
+ thresholds.append(score)
130
+ prev_score = score
131
+
132
+ if y_true[i] == 1:
133
+ tp += 1
134
+ else:
135
+ fp += 1
136
+
137
+ fpr.append(fp / n_neg)
138
+ tpr.append(tp / n_pos)
139
+ thresholds.append(score)
140
+
141
+ return cp.array(fpr), cp.array(tpr), cp.array(thresholds)
142
+
143
+
144
+ def confusion_matrix(y_true, y_pred, class_count):
145
+ """
146
+ Computes confusion matrix.
147
+
148
+ Args:
149
+ y_true (numpy.ndarray): True class labels (1D array).
150
+ y_pred (numpy.ndarray): Predicted class labels (1D array).
151
+ num_classes (int): Number of classes.
152
+
153
+ Returns:
154
+ numpy.ndarray: Confusion matrix of shape (num_classes, num_classes).
155
+ """
156
+ confusion = cp.zeros((class_count, class_count), dtype=int)
157
+
158
+ for i in range(len(y_true)):
159
+ true_label = y_true[i]
160
+ pred_label = y_pred[i]
161
+ confusion[true_label, pred_label] += 1
162
+
163
+ return confusion
164
+
165
+
166
+ def pca(X, n_components):
167
+ """
168
+
169
+ Parameters:
170
+ X (numpy array): (n_samples, n_features)
171
+ n_components (int):
172
+
173
+ Returns:
174
+ X_reduced (numpy array): (n_samples, n_components)
175
+ """
176
+
177
+ X_meaned = X - cp.mean(X, axis=0)
178
+
179
+ covariance_matrix = cp.cov(X_meaned, rowvar=False)
180
+
181
+ eigenvalues, eigenvectors = cp.linalg.eigh(covariance_matrix)
182
+
183
+ sorted_index = cp.argsort(eigenvalues)[::-1]
184
+ sorted_eigenvectors = eigenvectors[:, sorted_index]
185
+
186
+ eigenvectors_subset = sorted_eigenvectors[:, :n_components]
187
+
188
+ X_reduced = cp.dot(X_meaned, eigenvectors_subset)
189
+
190
+ return X_reduced