ilovetools 0.1.3__tar.gz → 0.1.4__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. {ilovetools-0.1.3/ilovetools.egg-info → ilovetools-0.1.4}/PKG-INFO +1 -1
  2. {ilovetools-0.1.3 → ilovetools-0.1.4}/ilovetools/__init__.py +3 -1
  3. ilovetools-0.1.4/ilovetools/ml/__init__.py +31 -0
  4. ilovetools-0.1.4/ilovetools/ml/metrics.py +589 -0
  5. {ilovetools-0.1.3 → ilovetools-0.1.4/ilovetools.egg-info}/PKG-INFO +1 -1
  6. {ilovetools-0.1.3 → ilovetools-0.1.4}/ilovetools.egg-info/SOURCES.txt +2 -0
  7. {ilovetools-0.1.3 → ilovetools-0.1.4}/pyproject.toml +1 -1
  8. {ilovetools-0.1.3 → ilovetools-0.1.4}/setup.py +1 -1
  9. {ilovetools-0.1.3 → ilovetools-0.1.4}/LICENSE +0 -0
  10. {ilovetools-0.1.3 → ilovetools-0.1.4}/MANIFEST.in +0 -0
  11. {ilovetools-0.1.3 → ilovetools-0.1.4}/README.md +0 -0
  12. {ilovetools-0.1.3 → ilovetools-0.1.4}/ilovetools/ai/__init__.py +0 -0
  13. {ilovetools-0.1.3 → ilovetools-0.1.4}/ilovetools/ai/embeddings.py +0 -0
  14. {ilovetools-0.1.3 → ilovetools-0.1.4}/ilovetools/ai/inference.py +0 -0
  15. {ilovetools-0.1.3 → ilovetools-0.1.4}/ilovetools/ai/llm_helpers.py +0 -0
  16. {ilovetools-0.1.3 → ilovetools-0.1.4}/ilovetools/audio/__init__.py +0 -0
  17. {ilovetools-0.1.3 → ilovetools-0.1.4}/ilovetools/automation/__init__.py +0 -0
  18. {ilovetools-0.1.3 → ilovetools-0.1.4}/ilovetools/conversion/__init__.py +0 -0
  19. {ilovetools-0.1.3 → ilovetools-0.1.4}/ilovetools/data/__init__.py +0 -0
  20. {ilovetools-0.1.3 → ilovetools-0.1.4}/ilovetools/data/feature_engineering.py +0 -0
  21. {ilovetools-0.1.3 → ilovetools-0.1.4}/ilovetools/data/preprocessing.py +0 -0
  22. {ilovetools-0.1.3 → ilovetools-0.1.4}/ilovetools/database/__init__.py +0 -0
  23. {ilovetools-0.1.3 → ilovetools-0.1.4}/ilovetools/datetime/__init__.py +0 -0
  24. {ilovetools-0.1.3 → ilovetools-0.1.4}/ilovetools/files/__init__.py +0 -0
  25. {ilovetools-0.1.3 → ilovetools-0.1.4}/ilovetools/image/__init__.py +0 -0
  26. {ilovetools-0.1.3 → ilovetools-0.1.4}/ilovetools/security/__init__.py +0 -0
  27. {ilovetools-0.1.3 → ilovetools-0.1.4}/ilovetools/text/__init__.py +0 -0
  28. {ilovetools-0.1.3 → ilovetools-0.1.4}/ilovetools/utils/__init__.py +0 -0
  29. {ilovetools-0.1.3 → ilovetools-0.1.4}/ilovetools/validation/__init__.py +0 -0
  30. {ilovetools-0.1.3 → ilovetools-0.1.4}/ilovetools/web/__init__.py +0 -0
  31. {ilovetools-0.1.3 → ilovetools-0.1.4}/ilovetools.egg-info/dependency_links.txt +0 -0
  32. {ilovetools-0.1.3 → ilovetools-0.1.4}/ilovetools.egg-info/top_level.txt +0 -0
  33. {ilovetools-0.1.3 → ilovetools-0.1.4}/requirements.txt +0 -0
  34. {ilovetools-0.1.3 → ilovetools-0.1.4}/setup.cfg +0 -0
  35. {ilovetools-0.1.3 → ilovetools-0.1.4}/tests/__init__.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ilovetools
3
- Version: 0.1.3
3
+ Version: 0.1.4
4
4
  Summary: A comprehensive Python utility library with modular tools for AI/ML, data processing, and daily programming needs
5
5
  Home-page: https://github.com/AliMehdi512/ilovetools
6
6
  Author: Ali Mehdi
@@ -2,13 +2,14 @@
2
2
  ilovetools - A comprehensive Python utility library
3
3
  """
4
4
 
5
- __version__ = "0.1.3"
5
+ __version__ = "0.1.4"
6
6
  __author__ = "Ali Mehdi"
7
7
  __email__ = "ali.mehdi.dev579@gmail.com"
8
8
 
9
9
  # Import all modules for easy access
10
10
  from . import ai
11
11
  from . import data
12
+ from . import ml
12
13
  from . import files
13
14
  from . import text
14
15
  from . import image
@@ -25,6 +26,7 @@ from . import utils
25
26
  __all__ = [
26
27
  "ai",
27
28
  "data",
29
+ "ml",
28
30
  "files",
29
31
  "text",
30
32
  "image",
@@ -0,0 +1,31 @@
1
+ """
2
+ Machine Learning utilities module
3
+ """
4
+
5
+ from .metrics import (
6
+ accuracy_score,
7
+ precision_score,
8
+ recall_score,
9
+ f1_score,
10
+ confusion_matrix,
11
+ classification_report,
12
+ mean_squared_error,
13
+ mean_absolute_error,
14
+ root_mean_squared_error,
15
+ r2_score,
16
+ roc_auc_score
17
+ )
18
+
19
+ __all__ = [
20
+ 'accuracy_score',
21
+ 'precision_score',
22
+ 'recall_score',
23
+ 'f1_score',
24
+ 'confusion_matrix',
25
+ 'classification_report',
26
+ 'mean_squared_error',
27
+ 'mean_absolute_error',
28
+ 'root_mean_squared_error',
29
+ 'r2_score',
30
+ 'roc_auc_score',
31
+ ]
@@ -0,0 +1,589 @@
1
+ """
2
+ Model evaluation metrics for ML workflows
3
+ """
4
+
5
+ from typing import List, Tuple, Dict, Union, Optional
6
+ import numpy as np
7
+
8
+ __all__ = [
9
+ 'accuracy_score',
10
+ 'precision_score',
11
+ 'recall_score',
12
+ 'f1_score',
13
+ 'confusion_matrix',
14
+ 'classification_report',
15
+ 'mean_squared_error',
16
+ 'mean_absolute_error',
17
+ 'root_mean_squared_error',
18
+ 'r2_score',
19
+ 'roc_auc_score'
20
+ ]
21
+
22
+
23
+ def accuracy_score(y_true: List, y_pred: List) -> float:
24
+ """
25
+ Calculate accuracy score for classification.
26
+
27
+ Accuracy = (Correct Predictions) / (Total Predictions)
28
+
29
+ Args:
30
+ y_true: True labels
31
+ y_pred: Predicted labels
32
+
33
+ Returns:
34
+ float: Accuracy score (0.0 to 1.0)
35
+
36
+ Examples:
37
+ >>> from ilovetools.ml import accuracy_score
38
+
39
+ # Perfect predictions
40
+ >>> y_true = [1, 0, 1, 1, 0]
41
+ >>> y_pred = [1, 0, 1, 1, 0]
42
+ >>> accuracy_score(y_true, y_pred)
43
+ 1.0
44
+
45
+ # 80% accuracy
46
+ >>> y_true = [1, 0, 1, 1, 0]
47
+ >>> y_pred = [1, 0, 1, 0, 0]
48
+ >>> accuracy_score(y_true, y_pred)
49
+ 0.8
50
+
51
+ # Real-world: Email spam detection
52
+ >>> actual = [1, 1, 0, 0, 1, 0, 1, 0]
53
+ >>> predicted = [1, 0, 0, 0, 1, 0, 1, 1]
54
+ >>> acc = accuracy_score(actual, predicted)
55
+ >>> print(f"Model accuracy: {acc:.2%}")
56
+ Model accuracy: 75.00%
57
+
58
+ Notes:
59
+ - Use for balanced datasets
60
+ - Don't use for imbalanced datasets
61
+ - Range: 0.0 (worst) to 1.0 (best)
62
+ - Simple and intuitive metric
63
+ """
64
+ if len(y_true) != len(y_pred):
65
+ raise ValueError("y_true and y_pred must have same length")
66
+
67
+ correct = sum(1 for true, pred in zip(y_true, y_pred) if true == pred)
68
+ return correct / len(y_true)
69
+
70
+
71
+ def precision_score(y_true: List, y_pred: List, positive_label: int = 1) -> float:
72
+ """
73
+ Calculate precision score for binary classification.
74
+
75
+ Precision = TP / (TP + FP)
76
+ "Of all positive predictions, how many were correct?"
77
+
78
+ Args:
79
+ y_true: True labels
80
+ y_pred: Predicted labels
81
+ positive_label: Label considered as positive class. Default: 1
82
+
83
+ Returns:
84
+ float: Precision score (0.0 to 1.0)
85
+
86
+ Examples:
87
+ >>> from ilovetools.ml import precision_score
88
+
89
+ # High precision (few false positives)
90
+ >>> y_true = [1, 0, 1, 1, 0, 0, 1, 0]
91
+ >>> y_pred = [1, 0, 1, 1, 0, 0, 0, 0]
92
+ >>> precision_score(y_true, y_pred)
93
+ 1.0
94
+
95
+ # Lower precision (some false positives)
96
+ >>> y_true = [1, 0, 1, 1, 0]
97
+ >>> y_pred = [1, 1, 1, 1, 0]
98
+ >>> precision_score(y_true, y_pred)
99
+ 0.75
100
+
101
+ # Spam detection (don't mark important emails as spam)
102
+ >>> actual_spam = [1, 1, 0, 0, 1, 0, 1, 0]
103
+ >>> predicted_spam = [1, 1, 1, 0, 1, 0, 1, 0]
104
+ >>> prec = precision_score(actual_spam, predicted_spam)
105
+ >>> print(f"Precision: {prec:.2%}")
106
+ Precision: 80.00%
107
+
108
+ Notes:
109
+ - Use when false positives are costly
110
+ - High precision = Few false alarms
111
+ - Example: Spam detection, fraud detection
112
+ - Returns 0.0 if no positive predictions
113
+ """
114
+ if len(y_true) != len(y_pred):
115
+ raise ValueError("y_true and y_pred must have same length")
116
+
117
+ tp = sum(1 for true, pred in zip(y_true, y_pred)
118
+ if true == positive_label and pred == positive_label)
119
+ fp = sum(1 for true, pred in zip(y_true, y_pred)
120
+ if true != positive_label and pred == positive_label)
121
+
122
+ if tp + fp == 0:
123
+ return 0.0
124
+
125
+ return tp / (tp + fp)
126
+
127
+
128
+ def recall_score(y_true: List, y_pred: List, positive_label: int = 1) -> float:
129
+ """
130
+ Calculate recall score (sensitivity) for binary classification.
131
+
132
+ Recall = TP / (TP + FN)
133
+ "Of all actual positives, how many did we catch?"
134
+
135
+ Args:
136
+ y_true: True labels
137
+ y_pred: Predicted labels
138
+ positive_label: Label considered as positive class. Default: 1
139
+
140
+ Returns:
141
+ float: Recall score (0.0 to 1.0)
142
+
143
+ Examples:
144
+ >>> from ilovetools.ml import recall_score
145
+
146
+ # High recall (caught most positives)
147
+ >>> y_true = [1, 0, 1, 1, 0, 0, 1, 0]
148
+ >>> y_pred = [1, 1, 1, 1, 0, 0, 1, 0]
149
+ >>> recall_score(y_true, y_pred)
150
+ 1.0
151
+
152
+ # Lower recall (missed some positives)
153
+ >>> y_true = [1, 0, 1, 1, 0]
154
+ >>> y_pred = [1, 0, 0, 1, 0]
155
+ >>> recall_score(y_true, y_pred)
156
+ 0.6666666666666666
157
+
158
+ # Cancer detection (don't miss any cases)
159
+ >>> actual_cancer = [1, 1, 0, 0, 1, 0, 1, 0]
160
+ >>> predicted_cancer = [1, 1, 0, 1, 1, 0, 0, 0]
161
+ >>> rec = recall_score(actual_cancer, predicted_cancer)
162
+ >>> print(f"Recall: {rec:.2%}")
163
+ Recall: 75.00%
164
+
165
+ Notes:
166
+ - Use when false negatives are costly
167
+ - High recall = Few missed cases
168
+ - Example: Disease detection, fraud detection
169
+ - Returns 0.0 if no actual positives
170
+ """
171
+ if len(y_true) != len(y_pred):
172
+ raise ValueError("y_true and y_pred must have same length")
173
+
174
+ tp = sum(1 for true, pred in zip(y_true, y_pred)
175
+ if true == positive_label and pred == positive_label)
176
+ fn = sum(1 for true, pred in zip(y_true, y_pred)
177
+ if true == positive_label and pred != positive_label)
178
+
179
+ if tp + fn == 0:
180
+ return 0.0
181
+
182
+ return tp / (tp + fn)
183
+
184
+
185
+ def f1_score(y_true: List, y_pred: List, positive_label: int = 1) -> float:
186
+ """
187
+ Calculate F1 score (harmonic mean of precision and recall).
188
+
189
+ F1 = 2 * (Precision * Recall) / (Precision + Recall)
190
+
191
+ Args:
192
+ y_true: True labels
193
+ y_pred: Predicted labels
194
+ positive_label: Label considered as positive class. Default: 1
195
+
196
+ Returns:
197
+ float: F1 score (0.0 to 1.0)
198
+
199
+ Examples:
200
+ >>> from ilovetools.ml import f1_score
201
+
202
+ # Balanced precision and recall
203
+ >>> y_true = [1, 0, 1, 1, 0, 0, 1, 0]
204
+ >>> y_pred = [1, 0, 1, 1, 0, 0, 1, 1]
205
+ >>> f1_score(y_true, y_pred)
206
+ 0.8888888888888888
207
+
208
+ # Imbalanced dataset
209
+ >>> y_true = [1, 0, 0, 0, 0, 0, 0, 0, 0, 0]
210
+ >>> y_pred = [1, 0, 0, 0, 0, 0, 0, 0, 0, 0]
211
+ >>> f1 = f1_score(y_true, y_pred)
212
+ >>> print(f"F1 Score: {f1:.2%}")
213
+ F1 Score: 100.00%
214
+
215
+ Notes:
216
+ - Best metric for imbalanced datasets
217
+ - Balances precision and recall
218
+ - Range: 0.0 (worst) to 1.0 (best)
219
+ - Use when both false positives and negatives matter
220
+ """
221
+ precision = precision_score(y_true, y_pred, positive_label)
222
+ recall = recall_score(y_true, y_pred, positive_label)
223
+
224
+ if precision + recall == 0:
225
+ return 0.0
226
+
227
+ return 2 * (precision * recall) / (precision + recall)
228
+
229
+
230
+ def confusion_matrix(y_true: List, y_pred: List) -> List[List[int]]:
231
+ """
232
+ Calculate confusion matrix for binary classification.
233
+
234
+ Returns 2x2 matrix:
235
+ [[TN, FP],
236
+ [FN, TP]]
237
+
238
+ Args:
239
+ y_true: True labels
240
+ y_pred: Predicted labels
241
+
242
+ Returns:
243
+ list: 2x2 confusion matrix
244
+
245
+ Examples:
246
+ >>> from ilovetools.ml import confusion_matrix
247
+
248
+ # Perfect predictions
249
+ >>> y_true = [1, 0, 1, 1, 0]
250
+ >>> y_pred = [1, 0, 1, 1, 0]
251
+ >>> cm = confusion_matrix(y_true, y_pred)
252
+ >>> print(cm)
253
+ [[2, 0], [0, 3]]
254
+
255
+ # With errors
256
+ >>> y_true = [1, 0, 1, 1, 0, 0, 1, 0]
257
+ >>> y_pred = [1, 0, 1, 0, 0, 1, 1, 0]
258
+ >>> cm = confusion_matrix(y_true, y_pred)
259
+ >>> print(cm)
260
+ [[3, 1], [1, 3]]
261
+
262
+ # Interpret results
263
+ >>> tn, fp, fn, tp = cm[0][0], cm[0][1], cm[1][0], cm[1][1]
264
+ >>> print(f"True Negatives: {tn}")
265
+ >>> print(f"False Positives: {fp}")
266
+ >>> print(f"False Negatives: {fn}")
267
+ >>> print(f"True Positives: {tp}")
268
+
269
+ Notes:
270
+ - Foundation of classification metrics
271
+ - Shows all types of errors
272
+ - Format: [[TN, FP], [FN, TP]]
273
+ - Use to understand model behavior
274
+ """
275
+ if len(y_true) != len(y_pred):
276
+ raise ValueError("y_true and y_pred must have same length")
277
+
278
+ tn = sum(1 for true, pred in zip(y_true, y_pred) if true == 0 and pred == 0)
279
+ fp = sum(1 for true, pred in zip(y_true, y_pred) if true == 0 and pred == 1)
280
+ fn = sum(1 for true, pred in zip(y_true, y_pred) if true == 1 and pred == 0)
281
+ tp = sum(1 for true, pred in zip(y_true, y_pred) if true == 1 and pred == 1)
282
+
283
+ return [[tn, fp], [fn, tp]]
284
+
285
+
286
+ def classification_report(y_true: List, y_pred: List) -> Dict[str, float]:
287
+ """
288
+ Generate comprehensive classification report.
289
+
290
+ Returns accuracy, precision, recall, and F1 score.
291
+
292
+ Args:
293
+ y_true: True labels
294
+ y_pred: Predicted labels
295
+
296
+ Returns:
297
+ dict: Dictionary with all metrics
298
+
299
+ Examples:
300
+ >>> from ilovetools.ml import classification_report
301
+
302
+ >>> y_true = [1, 0, 1, 1, 0, 0, 1, 0]
303
+ >>> y_pred = [1, 0, 1, 0, 0, 1, 1, 0]
304
+ >>> report = classification_report(y_true, y_pred)
305
+ >>> print(report)
306
+ {'accuracy': 0.75, 'precision': 0.75, 'recall': 0.75, 'f1_score': 0.75}
307
+
308
+ # Pretty print
309
+ >>> for metric, value in report.items():
310
+ ... print(f"{metric}: {value:.2%}")
311
+ accuracy: 75.00%
312
+ precision: 75.00%
313
+ recall: 75.00%
314
+ f1_score: 75.00%
315
+
316
+ Notes:
317
+ - Comprehensive overview of model performance
318
+ - All metrics in one call
319
+ - Easy to compare models
320
+ - Returns dictionary for flexibility
321
+ """
322
+ return {
323
+ 'accuracy': accuracy_score(y_true, y_pred),
324
+ 'precision': precision_score(y_true, y_pred),
325
+ 'recall': recall_score(y_true, y_pred),
326
+ 'f1_score': f1_score(y_true, y_pred)
327
+ }
328
+
329
+
330
+ def mean_squared_error(y_true: List[float], y_pred: List[float]) -> float:
331
+ """
332
+ Calculate Mean Squared Error for regression.
333
+
334
+ MSE = Average of (actual - predicted)^2
335
+
336
+ Args:
337
+ y_true: True values
338
+ y_pred: Predicted values
339
+
340
+ Returns:
341
+ float: MSE value
342
+
343
+ Examples:
344
+ >>> from ilovetools.ml import mean_squared_error
345
+
346
+ # Perfect predictions
347
+ >>> y_true = [1.0, 2.0, 3.0, 4.0]
348
+ >>> y_pred = [1.0, 2.0, 3.0, 4.0]
349
+ >>> mean_squared_error(y_true, y_pred)
350
+ 0.0
351
+
352
+ # With errors
353
+ >>> y_true = [100, 200, 300, 400]
354
+ >>> y_pred = [110, 190, 310, 390]
355
+ >>> mse = mean_squared_error(y_true, y_pred)
356
+ >>> print(f"MSE: {mse:.2f}")
357
+ MSE: 100.00
358
+
359
+ # House price prediction
360
+ >>> actual_prices = [250000, 300000, 350000]
361
+ >>> predicted_prices = [245000, 310000, 340000]
362
+ >>> mse = mean_squared_error(actual_prices, predicted_prices)
363
+
364
+ Notes:
365
+ - Penalizes large errors heavily
366
+ - Not in original units (squared)
367
+ - Sensitive to outliers
368
+ - Lower is better
369
+ """
370
+ if len(y_true) != len(y_pred):
371
+ raise ValueError("y_true and y_pred must have same length")
372
+
373
+ squared_errors = [(true - pred) ** 2 for true, pred in zip(y_true, y_pred)]
374
+ return sum(squared_errors) / len(squared_errors)
375
+
376
+
377
+ def mean_absolute_error(y_true: List[float], y_pred: List[float]) -> float:
378
+ """
379
+ Calculate Mean Absolute Error for regression.
380
+
381
+ MAE = Average of |actual - predicted|
382
+
383
+ Args:
384
+ y_true: True values
385
+ y_pred: Predicted values
386
+
387
+ Returns:
388
+ float: MAE value
389
+
390
+ Examples:
391
+ >>> from ilovetools.ml import mean_absolute_error
392
+
393
+ # Perfect predictions
394
+ >>> y_true = [1.0, 2.0, 3.0, 4.0]
395
+ >>> y_pred = [1.0, 2.0, 3.0, 4.0]
396
+ >>> mean_absolute_error(y_true, y_pred)
397
+ 0.0
398
+
399
+ # With errors
400
+ >>> y_true = [100, 200, 300, 400]
401
+ >>> y_pred = [110, 190, 310, 390]
402
+ >>> mae = mean_absolute_error(y_true, y_pred)
403
+ >>> print(f"MAE: ${mae:.2f}")
404
+ MAE: $10.00
405
+
406
+ # House price prediction
407
+ >>> actual_prices = [250000, 300000, 350000]
408
+ >>> predicted_prices = [245000, 310000, 340000]
409
+ >>> mae = mean_absolute_error(actual_prices, predicted_prices)
410
+ >>> print(f"Average error: ${mae:,.0f}")
411
+ Average error: $8,333
412
+
413
+ Notes:
414
+ - Easy to interpret
415
+ - Same units as target variable
416
+ - Less sensitive to outliers than MSE
417
+ - Lower is better
418
+ """
419
+ if len(y_true) != len(y_pred):
420
+ raise ValueError("y_true and y_pred must have same length")
421
+
422
+ absolute_errors = [abs(true - pred) for true, pred in zip(y_true, y_pred)]
423
+ return sum(absolute_errors) / len(absolute_errors)
424
+
425
+
426
+ def root_mean_squared_error(y_true: List[float], y_pred: List[float]) -> float:
427
+ """
428
+ Calculate Root Mean Squared Error for regression.
429
+
430
+ RMSE = sqrt(MSE)
431
+
432
+ Args:
433
+ y_true: True values
434
+ y_pred: Predicted values
435
+
436
+ Returns:
437
+ float: RMSE value
438
+
439
+ Examples:
440
+ >>> from ilovetools.ml import root_mean_squared_error
441
+
442
+ # Perfect predictions
443
+ >>> y_true = [1.0, 2.0, 3.0, 4.0]
444
+ >>> y_pred = [1.0, 2.0, 3.0, 4.0]
445
+ >>> root_mean_squared_error(y_true, y_pred)
446
+ 0.0
447
+
448
+ # With errors
449
+ >>> y_true = [100, 200, 300, 400]
450
+ >>> y_pred = [110, 190, 310, 390]
451
+ >>> rmse = root_mean_squared_error(y_true, y_pred)
452
+ >>> print(f"RMSE: {rmse:.2f}")
453
+ RMSE: 10.00
454
+
455
+ # House price prediction
456
+ >>> actual_prices = [250000, 300000, 350000]
457
+ >>> predicted_prices = [245000, 310000, 340000]
458
+ >>> rmse = root_mean_squared_error(actual_prices, predicted_prices)
459
+ >>> print(f"RMSE: ${rmse:,.0f}")
460
+ RMSE: $8,165
461
+
462
+ Notes:
463
+ - Most common regression metric
464
+ - Same units as target variable
465
+ - Penalizes large errors
466
+ - Lower is better
467
+ """
468
+ mse = mean_squared_error(y_true, y_pred)
469
+ return mse ** 0.5
470
+
471
+
472
+ def r2_score(y_true: List[float], y_pred: List[float]) -> float:
473
+ """
474
+ Calculate R-squared (coefficient of determination) for regression.
475
+
476
+ R² = 1 - (SS_res / SS_tot)
477
+ Proportion of variance explained by the model.
478
+
479
+ Args:
480
+ y_true: True values
481
+ y_pred: Predicted values
482
+
483
+ Returns:
484
+ float: R² value (-inf to 1.0, higher is better)
485
+
486
+ Examples:
487
+ >>> from ilovetools.ml import r2_score
488
+
489
+ # Perfect predictions
490
+ >>> y_true = [1.0, 2.0, 3.0, 4.0]
491
+ >>> y_pred = [1.0, 2.0, 3.0, 4.0]
492
+ >>> r2_score(y_true, y_pred)
493
+ 1.0
494
+
495
+ # Good predictions
496
+ >>> y_true = [100, 200, 300, 400, 500]
497
+ >>> y_pred = [110, 190, 310, 390, 510]
498
+ >>> r2 = r2_score(y_true, y_pred)
499
+ >>> print(f"R²: {r2:.2%}")
500
+ R²: 99.00%
501
+
502
+ # Interpretation
503
+ >>> r2 = 0.85
504
+ >>> print(f"Model explains {r2:.0%} of variance")
505
+ Model explains 85% of variance
506
+
507
+ Notes:
508
+ - Range: -inf to 1.0 (1.0 is perfect)
509
+ - 0.0 = Model as good as mean baseline
510
+ - Negative = Model worse than mean
511
+ - Easy to interpret as percentage
512
+ """
513
+ if len(y_true) != len(y_pred):
514
+ raise ValueError("y_true and y_pred must have same length")
515
+
516
+ mean_true = sum(y_true) / len(y_true)
517
+
518
+ ss_tot = sum((true - mean_true) ** 2 for true in y_true)
519
+ ss_res = sum((true - pred) ** 2 for true, pred in zip(y_true, y_pred))
520
+
521
+ if ss_tot == 0:
522
+ return 0.0
523
+
524
+ return 1 - (ss_res / ss_tot)
525
+
526
+
527
+ def roc_auc_score(y_true: List[int], y_scores: List[float]) -> float:
528
+ """
529
+ Calculate ROC AUC score for binary classification.
530
+
531
+ AUC = Area Under the ROC Curve
532
+ Measures model's ability to distinguish between classes.
533
+
534
+ Args:
535
+ y_true: True binary labels (0 or 1)
536
+ y_scores: Predicted probabilities or scores
537
+
538
+ Returns:
539
+ float: AUC score (0.0 to 1.0)
540
+
541
+ Examples:
542
+ >>> from ilovetools.ml import roc_auc_score
543
+
544
+ # Perfect separation
545
+ >>> y_true = [0, 0, 1, 1]
546
+ >>> y_scores = [0.1, 0.2, 0.8, 0.9]
547
+ >>> roc_auc_score(y_true, y_scores)
548
+ 1.0
549
+
550
+ # Good separation
551
+ >>> y_true = [0, 0, 1, 1, 0, 1]
552
+ >>> y_scores = [0.2, 0.3, 0.7, 0.8, 0.4, 0.9]
553
+ >>> auc = roc_auc_score(y_true, y_scores)
554
+ >>> print(f"AUC: {auc:.2%}")
555
+ AUC: 91.67%
556
+
557
+ Notes:
558
+ - 1.0 = Perfect classifier
559
+ - 0.5 = Random guessing
560
+ - < 0.5 = Worse than random
561
+ - Threshold-independent metric
562
+ """
563
+ if len(y_true) != len(y_scores):
564
+ raise ValueError("y_true and y_scores must have same length")
565
+
566
+ # Sort by scores
567
+ pairs = sorted(zip(y_scores, y_true), reverse=True)
568
+
569
+ # Count positive and negative samples
570
+ n_pos = sum(y_true)
571
+ n_neg = len(y_true) - n_pos
572
+
573
+ if n_pos == 0 or n_neg == 0:
574
+ return 0.5
575
+
576
+ # Calculate AUC using trapezoidal rule
577
+ tp = 0
578
+ fp = 0
579
+ auc = 0.0
580
+ prev_score = None
581
+
582
+ for score, label in pairs:
583
+ if label == 1:
584
+ tp += 1
585
+ else:
586
+ fp += 1
587
+ auc += tp
588
+
589
+ return auc / (n_pos * n_neg)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ilovetools
3
- Version: 0.1.3
3
+ Version: 0.1.4
4
4
  Summary: A comprehensive Python utility library with modular tools for AI/ML, data processing, and daily programming needs
5
5
  Home-page: https://github.com/AliMehdi512/ilovetools
6
6
  Author: Ali Mehdi
@@ -23,6 +23,8 @@ ilovetools/database/__init__.py
23
23
  ilovetools/datetime/__init__.py
24
24
  ilovetools/files/__init__.py
25
25
  ilovetools/image/__init__.py
26
+ ilovetools/ml/__init__.py
27
+ ilovetools/ml/metrics.py
26
28
  ilovetools/security/__init__.py
27
29
  ilovetools/text/__init__.py
28
30
  ilovetools/utils/__init__.py
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "ilovetools"
7
- version = "0.1.3"
7
+ version = "0.1.4"
8
8
  description = "A comprehensive Python utility library with modular tools for AI/ML, data processing, and daily programming needs"
9
9
  readme = "README.md"
10
10
  requires-python = ">=3.8"
@@ -5,7 +5,7 @@ with open("README.md", "r", encoding="utf-8") as fh:
5
5
 
6
6
  setup(
7
7
  name="ilovetools",
8
- version="0.1.2",
8
+ version="0.1.3",
9
9
  author="Ali Mehdi",
10
10
  author_email="ali.mehdi.dev579@gmail.com",
11
11
  description="A comprehensive Python utility library with modular tools for AI/ML, data processing, and daily programming needs",
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes